ghc-lib-parser-9.12.2.20250421/0000755000000000000000000000000007346545000013473 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/LICENSE0000644000000000000000000000311307346545000014476 0ustar0000000000000000The Glasgow Haskell Compiler License Copyright 2002, The University Court of the University of Glasgow. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - Neither name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY COURT OF THE UNIVERSITY OF GLASGOW AND THE CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY COURT OF THE UNIVERSITY OF GLASGOW OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ghc-lib-parser-9.12.2.20250421/compiler/0000755000000000000000000000000007346545000015305 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/Bytecodes.h0000644000000000000000000001160107346545000017376 0ustar0000000000000000/* ----------------------------------------------------------------------------- * * (c) The GHC Team, 1998-2009 * * Bytecode definitions. * * ---------------------------------------------------------------------------*/ /* -------------------------------------------------------------------------- * Instructions * * Notes: * o CASEFAIL is generated by the compiler whenever it tests an "irrefutable" * pattern which fails. If we don't see too many of these, we could * optimise out the redundant test. * ------------------------------------------------------------------------*/ /* NOTE: THIS FILE IS INCLUDED IN HASKELL SOURCES (ghc/compiler/GHC/ByteCode/Asm.hs). DO NOT PUT C-SPECIFIC STUFF IN HERE! I hope that's clear :-) */ #define bci_STKCHECK 1 #define bci_PUSH_L 2 #define bci_PUSH_LL 3 #define bci_PUSH_LLL 4 #define bci_PUSH8 5 #define bci_PUSH16 6 #define bci_PUSH32 7 #define bci_PUSH8_W 8 #define bci_PUSH16_W 9 #define bci_PUSH32_W 10 #define bci_PUSH_G 11 #define bci_PUSH_ALTS_P 13 #define bci_PUSH_ALTS_N 14 #define bci_PUSH_ALTS_F 15 #define bci_PUSH_ALTS_D 16 #define bci_PUSH_ALTS_L 17 #define bci_PUSH_ALTS_V 18 #define bci_PUSH_PAD8 19 #define bci_PUSH_PAD16 20 #define bci_PUSH_PAD32 21 #define bci_PUSH_UBX8 22 #define bci_PUSH_UBX16 23 #define bci_PUSH_UBX32 24 #define bci_PUSH_UBX 25 #define bci_PUSH_APPLY_N 26 #define bci_PUSH_APPLY_F 27 #define bci_PUSH_APPLY_D 28 #define bci_PUSH_APPLY_L 29 #define bci_PUSH_APPLY_V 30 #define bci_PUSH_APPLY_P 31 #define bci_PUSH_APPLY_PP 32 #define bci_PUSH_APPLY_PPP 33 #define bci_PUSH_APPLY_PPPP 34 #define bci_PUSH_APPLY_PPPPP 35 #define bci_PUSH_APPLY_PPPPPP 36 /* #define bci_PUSH_APPLY_PPPPPPP 37 */ #define bci_SLIDE 38 #define bci_ALLOC_AP 39 #define bci_ALLOC_AP_NOUPD 40 #define bci_ALLOC_PAP 41 #define bci_MKAP 42 #define bci_MKPAP 43 #define bci_UNPACK 44 #define bci_PACK 45 #define bci_TESTLT_I 46 #define bci_TESTEQ_I 47 #define bci_TESTLT_F 48 #define bci_TESTEQ_F 49 #define bci_TESTLT_D 50 #define bci_TESTEQ_D 51 #define bci_TESTLT_P 52 #define bci_TESTEQ_P 53 #define bci_CASEFAIL 54 #define bci_JMP 55 #define bci_CCALL 56 #define bci_SWIZZLE 57 #define bci_ENTER 58 #define bci_RETURN_P 60 #define bci_RETURN_N 61 #define bci_RETURN_F 62 #define bci_RETURN_D 63 #define bci_RETURN_L 64 #define bci_RETURN_V 65 #define bci_BRK_FUN 66 #define bci_TESTLT_W 67 #define bci_TESTEQ_W 68 #define bci_RETURN_T 69 #define bci_PUSH_ALTS_T 70 #define bci_TESTLT_I64 71 #define bci_TESTEQ_I64 72 #define bci_TESTLT_I32 73 #define bci_TESTEQ_I32 74 #define bci_TESTLT_I16 75 #define bci_TESTEQ_I16 76 #define bci_TESTLT_I8 77 #define bci_TESTEQ_I8 78 #define bci_TESTLT_W64 79 #define bci_TESTEQ_W64 80 #define bci_TESTLT_W32 81 #define bci_TESTEQ_W32 82 #define bci_TESTLT_W16 83 #define bci_TESTEQ_W16 84 #define bci_TESTLT_W8 85 #define bci_TESTEQ_W8 86 #define bci_PRIMCALL 87 /* If you need to go past 255 then you will run into the flags */ /* If you need to go below 0x0100 then you will run into the instructions */ #define bci_FLAG_LARGE_ARGS 0x8000 /* If a BCO definitely requires less than this many words of stack, don't include an explicit STKCHECK insn in it. The interpreter will check for this many words of stack before running each BCO, rendering an explicit check unnecessary in the majority of cases. */ #define INTERP_STACK_CHECK_THRESH 50 /*-------------------------------------------------------------------------*/ ghc-lib-parser-9.12.2.20250421/compiler/ClosureTypes.h0000644000000000000000000000670107346545000020123 0ustar0000000000000000/* ---------------------------------------------------------------------------- * * (c) The GHC Team, 1998-2005 * * Closure Type Constants: out here because the native code generator * needs to get at them. * * -------------------------------------------------------------------------- */ #pragma once /* * WARNING WARNING WARNING * * If you add or delete any closure types, don't forget to update the following, * - the closure flags table in rts/ClosureFlags.c * - isRetainer in rts/RetainerProfile.c * - the closure_type_names list in rts/Printer.c * - the ClosureType sum type in libraries/ghc-internal/src/GHC/Internal/ClosureTypes.hs */ /* CONSTR/THUNK/FUN_$A_$B mean they have $A pointers followed by $B * non-pointers in their payloads. */ /* Object tag 0 raises an internal error */ #define INVALID_OBJECT 0 #define CONSTR 1 #define CONSTR_1_0 2 #define CONSTR_0_1 3 #define CONSTR_2_0 4 #define CONSTR_1_1 5 #define CONSTR_0_2 6 #define CONSTR_NOCAF 7 #define FUN 8 #define FUN_1_0 9 #define FUN_0_1 10 #define FUN_2_0 11 #define FUN_1_1 12 #define FUN_0_2 13 #define FUN_STATIC 14 #define THUNK 15 #define THUNK_1_0 16 #define THUNK_0_1 17 #define THUNK_2_0 18 #define THUNK_1_1 19 #define THUNK_0_2 20 #define THUNK_STATIC 21 #define THUNK_SELECTOR 22 #define BCO 23 #define AP 24 #define PAP 25 #define AP_STACK 26 #define IND 27 #define IND_STATIC 28 #define RET_BCO 29 #define RET_SMALL 30 #define RET_BIG 31 #define RET_FUN 32 #define UPDATE_FRAME 33 #define CATCH_FRAME 34 #define UNDERFLOW_FRAME 35 #define STOP_FRAME 36 #define BLOCKING_QUEUE 37 #define BLACKHOLE 38 #define MVAR_CLEAN 39 #define MVAR_DIRTY 40 #define TVAR 41 #define ARR_WORDS 42 #define MUT_ARR_PTRS_CLEAN 43 #define MUT_ARR_PTRS_DIRTY 44 #define MUT_ARR_PTRS_FROZEN_DIRTY 45 #define MUT_ARR_PTRS_FROZEN_CLEAN 46 #define MUT_VAR_CLEAN 47 #define MUT_VAR_DIRTY 48 #define WEAK 49 #define PRIM 50 #define MUT_PRIM 51 #define TSO 52 #define STACK 53 #define TREC_CHUNK 54 #define ATOMICALLY_FRAME 55 #define CATCH_RETRY_FRAME 56 #define CATCH_STM_FRAME 57 #define WHITEHOLE 58 #define SMALL_MUT_ARR_PTRS_CLEAN 59 #define SMALL_MUT_ARR_PTRS_DIRTY 60 #define SMALL_MUT_ARR_PTRS_FROZEN_DIRTY 61 #define SMALL_MUT_ARR_PTRS_FROZEN_CLEAN 62 #define COMPACT_NFDATA 63 #define CONTINUATION 64 #define N_CLOSURE_TYPES 65 ghc-lib-parser-9.12.2.20250421/compiler/CodeGen.Platform.h0000644000000000000000000006247507346545000020563 0ustar0000000000000000 import GHC.Cmm.Expr #if !(defined(MACHREGS_i386) || defined(MACHREGS_x86_64) \ || defined(MACHREGS_powerpc) || defined(MACHREGS_aarch64) \ || defined(MACHREGS_riscv64)) import GHC.Utils.Panic.Plain #endif import GHC.Platform.Reg #include "MachRegs.h" #if defined(MACHREGS_i386) || defined(MACHREGS_x86_64) # if defined(MACHREGS_i386) # define eax 0 # define ebx 1 # define ecx 2 # define edx 3 # define esi 4 # define edi 5 # define ebp 6 # define esp 7 # endif # if defined(MACHREGS_x86_64) # define rax 0 # define rbx 1 # define rcx 2 # define rdx 3 # define rsi 4 # define rdi 5 # define rbp 6 # define rsp 7 # define r8 8 # define r9 9 # define r10 10 # define r11 11 # define r12 12 # define r13 13 # define r14 14 # define r15 15 # endif -- N.B. XMM, YMM, and ZMM are all aliased to the same hardware registers hence -- being assigned the same RegNos. # define xmm0 16 # define xmm1 17 # define xmm2 18 # define xmm3 19 # define xmm4 20 # define xmm5 21 # define xmm6 22 # define xmm7 23 # define xmm8 24 # define xmm9 25 # define xmm10 26 # define xmm11 27 # define xmm12 28 # define xmm13 29 # define xmm14 30 # define xmm15 31 # define ymm0 16 # define ymm1 17 # define ymm2 18 # define ymm3 19 # define ymm4 20 # define ymm5 21 # define ymm6 22 # define ymm7 23 # define ymm8 24 # define ymm9 25 # define ymm10 26 # define ymm11 27 # define ymm12 28 # define ymm13 29 # define ymm14 30 # define ymm15 31 # define zmm0 16 # define zmm1 17 # define zmm2 18 # define zmm3 19 # define zmm4 20 # define zmm5 21 # define zmm6 22 # define zmm7 23 # define zmm8 24 # define zmm9 25 # define zmm10 26 # define zmm11 27 # define zmm12 28 # define zmm13 29 # define zmm14 30 # define zmm15 31 -- Note: these are only needed for ARM/AArch64 because globalRegMaybe is now used in CmmSink.hs. -- Since it's only used to check 'isJust', the actual values don't matter, thus -- I'm not sure if these are the correct numberings. -- Normally, the register names are just stringified as part of the REG() macro #elif defined(MACHREGS_powerpc) || defined(MACHREGS_arm) \ || defined(MACHREGS_aarch64) # define r0 0 # define r1 1 # define r2 2 # define r3 3 # define r4 4 # define r5 5 # define r6 6 # define r7 7 # define r8 8 # define r9 9 # define r10 10 # define r11 11 # define r12 12 # define r13 13 # define r14 14 # define r15 15 # define r16 16 # define r17 17 # define r18 18 # define r19 19 # define r20 20 # define r21 21 # define r22 22 # define r23 23 # define r24 24 # define r25 25 # define r26 26 # define r27 27 # define r28 28 # define r29 29 # define r30 30 # define r31 31 -- See note above. These aren't actually used for anything except satisfying the compiler for globalRegMaybe -- so I'm unsure if they're the correct numberings, should they ever be attempted to be used in the NCG. #if defined(MACHREGS_aarch64) || defined(MACHREGS_arm) # define s0 32 # define s1 33 # define s2 34 # define s3 35 # define s4 36 # define s5 37 # define s6 38 # define s7 39 # define s8 40 # define s9 41 # define s10 42 # define s11 43 # define s12 44 # define s13 45 # define s14 46 # define s15 47 # define s16 48 # define s17 49 # define s18 50 # define s19 51 # define s20 52 # define s21 53 # define s22 54 # define s23 55 # define s24 56 # define s25 57 # define s26 58 # define s27 59 # define s28 60 # define s29 61 # define s30 62 # define s31 63 # define d0 32 # define d1 33 # define d2 34 # define d3 35 # define d4 36 # define d5 37 # define d6 38 # define d7 39 # define d8 40 # define d9 41 # define d10 42 # define d11 43 # define d12 44 # define d13 45 # define d14 46 # define d15 47 # define d16 48 # define d17 49 # define d18 50 # define d19 51 # define d20 52 # define d21 53 # define d22 54 # define d23 55 # define d24 56 # define d25 57 # define d26 58 # define d27 59 # define d28 60 # define d29 61 # define d30 62 # define d31 63 # define q0 32 # define q1 33 # define q2 34 # define q3 35 # define q4 36 # define q5 37 # define q6 38 # define q7 39 # define q8 40 # define q9 41 # define q10 42 # define q11 43 # define q12 44 # define q13 45 # define q14 46 # define q15 47 # define q16 48 # define q17 49 # define q18 50 # define q19 51 # define q20 52 # define q21 53 # define q22 54 # define q23 55 # define q24 56 # define q25 57 # define q26 58 # define q27 59 # define q28 60 # define q29 61 # define q30 62 # define q31 63 #endif # if defined(MACHREGS_darwin) # define f0 32 # define f1 33 # define f2 34 # define f3 35 # define f4 36 # define f5 37 # define f6 38 # define f7 39 # define f8 40 # define f9 41 # define f10 42 # define f11 43 # define f12 44 # define f13 45 # define f14 46 # define f15 47 # define f16 48 # define f17 49 # define f18 50 # define f19 51 # define f20 52 # define f21 53 # define f22 54 # define f23 55 # define f24 56 # define f25 57 # define f26 58 # define f27 59 # define f28 60 # define f29 61 # define f30 62 # define f31 63 # else # define fr0 32 # define fr1 33 # define fr2 34 # define fr3 35 # define fr4 36 # define fr5 37 # define fr6 38 # define fr7 39 # define fr8 40 # define fr9 41 # define fr10 42 # define fr11 43 # define fr12 44 # define fr13 45 # define fr14 46 # define fr15 47 # define fr16 48 # define fr17 49 # define fr18 50 # define fr19 51 # define fr20 52 # define fr21 53 # define fr22 54 # define fr23 55 # define fr24 56 # define fr25 57 # define fr26 58 # define fr27 59 # define fr28 60 # define fr29 61 # define fr30 62 # define fr31 63 # endif #elif defined(MACHREGS_s390x) # define r0 0 # define r1 1 # define r2 2 # define r3 3 # define r4 4 # define r5 5 # define r6 6 # define r7 7 # define r8 8 # define r9 9 # define r10 10 # define r11 11 # define r12 12 # define r13 13 # define r14 14 # define r15 15 # define f0 16 # define f1 17 # define f2 18 # define f3 19 # define f4 20 # define f5 21 # define f6 22 # define f7 23 # define f8 24 # define f9 25 # define f10 26 # define f11 27 # define f12 28 # define f13 29 # define f14 30 # define f15 31 #elif defined(MACHREGS_riscv64) # define zero 0 # define ra 1 # define sp 2 # define gp 3 # define tp 4 # define t0 5 # define t1 6 # define t2 7 # define s0 8 # define s1 9 # define a0 10 # define a1 11 # define a2 12 # define a3 13 # define a4 14 # define a5 15 # define a6 16 # define a7 17 # define s2 18 # define s3 19 # define s4 20 # define s5 21 # define s6 22 # define s7 23 # define s8 24 # define s9 25 # define s10 26 # define s11 27 # define t3 28 # define t4 29 # define t5 30 # define t6 31 # define ft0 32 # define ft1 33 # define ft2 34 # define ft3 35 # define ft4 36 # define ft5 37 # define ft6 38 # define ft7 39 # define fs0 40 # define fs1 41 # define fa0 42 # define fa1 43 # define fa2 44 # define fa3 45 # define fa4 46 # define fa5 47 # define fa6 48 # define fa7 49 # define fs2 50 # define fs3 51 # define fs4 52 # define fs5 53 # define fs6 54 # define fs7 55 # define fs8 56 # define fs9 57 # define fs10 58 # define fs11 59 # define ft8 60 # define ft9 61 # define ft10 62 # define ft11 63 #elif defined(MACHREGS_loongarch64) # define zero 0 # define ra 1 # define tp 2 # define sp 3 # define a0 4 # define a1 5 # define a2 6 # define a3 7 # define a4 8 # define a5 9 # define a6 10 # define a7 11 # define t0 12 # define t1 13 # define t2 14 # define t3 15 # define t4 16 # define t5 17 # define t6 18 # define t7 19 # define t8 20 # define u0 21 # define fp 22 # define s0 23 # define s1 24 # define s2 25 # define s3 26 # define s4 27 # define s5 28 # define s6 29 # define s7 30 # define s8 31 # define fa0 32 # define fa1 33 # define fa2 34 # define fa3 35 # define fa4 36 # define fa5 37 # define fa6 38 # define fa7 39 # define ft0 40 # define ft1 41 # define ft2 42 # define ft3 43 # define ft4 44 # define ft5 45 # define ft6 46 # define ft7 47 # define ft8 48 # define ft9 49 # define ft10 50 # define ft11 51 # define ft12 52 # define ft13 53 # define ft14 54 # define ft15 55 # define fs0 56 # define fs1 57 # define fs2 58 # define fs3 59 # define fs4 60 # define fs5 61 # define fs6 62 # define fs7 63 #endif -- See also Note [Caller saves and callee-saves regs.] callerSaves :: GlobalReg -> Bool #if defined(CALLER_SAVES_Base) callerSaves BaseReg = True #endif #if defined(CALLER_SAVES_R1) callerSaves (VanillaReg 1) = True #endif #if defined(CALLER_SAVES_R2) callerSaves (VanillaReg 2) = True #endif #if defined(CALLER_SAVES_R3) callerSaves (VanillaReg 3) = True #endif #if defined(CALLER_SAVES_R4) callerSaves (VanillaReg 4) = True #endif #if defined(CALLER_SAVES_R5) callerSaves (VanillaReg 5) = True #endif #if defined(CALLER_SAVES_R6) callerSaves (VanillaReg 6) = True #endif #if defined(CALLER_SAVES_R7) callerSaves (VanillaReg 7) = True #endif #if defined(CALLER_SAVES_R8) callerSaves (VanillaReg 8) = True #endif #if defined(CALLER_SAVES_R9) callerSaves (VanillaReg 9) = True #endif #if defined(CALLER_SAVES_R10) callerSaves (VanillaReg 10) = True #endif #if defined(CALLER_SAVES_F1) callerSaves (FloatReg 1) = True #endif #if defined(CALLER_SAVES_F2) callerSaves (FloatReg 2) = True #endif #if defined(CALLER_SAVES_F3) callerSaves (FloatReg 3) = True #endif #if defined(CALLER_SAVES_F4) callerSaves (FloatReg 4) = True #endif #if defined(CALLER_SAVES_F5) callerSaves (FloatReg 5) = True #endif #if defined(CALLER_SAVES_F6) callerSaves (FloatReg 6) = True #endif #if defined(CALLER_SAVES_D1) callerSaves (DoubleReg 1) = True #endif #if defined(CALLER_SAVES_D2) callerSaves (DoubleReg 2) = True #endif #if defined(CALLER_SAVES_D3) callerSaves (DoubleReg 3) = True #endif #if defined(CALLER_SAVES_D4) callerSaves (DoubleReg 4) = True #endif #if defined(CALLER_SAVES_D5) callerSaves (DoubleReg 5) = True #endif #if defined(CALLER_SAVES_D6) callerSaves (DoubleReg 6) = True #endif #if defined(CALLER_SAVES_L1) callerSaves (LongReg 1) = True #endif #if defined(CALLER_SAVES_Sp) callerSaves Sp = True #endif #if defined(CALLER_SAVES_SpLim) callerSaves SpLim = True #endif #if defined(CALLER_SAVES_Hp) callerSaves Hp = True #endif #if defined(CALLER_SAVES_HpLim) callerSaves HpLim = True #endif #if defined(CALLER_SAVES_CCCS) callerSaves CCCS = True #endif #if defined(CALLER_SAVES_CurrentTSO) callerSaves CurrentTSO = True #endif #if defined(CALLER_SAVES_CurrentNursery) callerSaves CurrentNursery = True #endif callerSaves _ = False activeStgRegs :: [GlobalReg] activeStgRegs = [ #if defined(REG_Base) BaseReg #endif #if defined(REG_Sp) ,Sp #endif #if defined(REG_Hp) ,Hp #endif #if defined(REG_R1) ,VanillaReg 1 #endif #if defined(REG_R2) ,VanillaReg 2 #endif #if defined(REG_R3) ,VanillaReg 3 #endif #if defined(REG_R4) ,VanillaReg 4 #endif #if defined(REG_R5) ,VanillaReg 5 #endif #if defined(REG_R6) ,VanillaReg 6 #endif #if defined(REG_R7) ,VanillaReg 7 #endif #if defined(REG_R8) ,VanillaReg 8 #endif #if defined(REG_R9) ,VanillaReg 9 #endif #if defined(REG_R10) ,VanillaReg 10 #endif #if defined(REG_SpLim) ,SpLim #endif #if MAX_REAL_XMM_REG != 0 #if defined(REG_F1) ,FloatReg 1 #endif #if defined(REG_D1) ,DoubleReg 1 #endif #if defined(REG_XMM1) ,XmmReg 1 #endif #if defined(REG_YMM1) ,YmmReg 1 #endif #if defined(REG_ZMM1) ,ZmmReg 1 #endif #if defined(REG_F2) ,FloatReg 2 #endif #if defined(REG_D2) ,DoubleReg 2 #endif #if defined(REG_XMM2) ,XmmReg 2 #endif #if defined(REG_YMM2) ,YmmReg 2 #endif #if defined(REG_ZMM2) ,ZmmReg 2 #endif #if defined(REG_F3) ,FloatReg 3 #endif #if defined(REG_D3) ,DoubleReg 3 #endif #if defined(REG_XMM3) ,XmmReg 3 #endif #if defined(REG_YMM3) ,YmmReg 3 #endif #if defined(REG_ZMM3) ,ZmmReg 3 #endif #if defined(REG_F4) ,FloatReg 4 #endif #if defined(REG_D4) ,DoubleReg 4 #endif #if defined(REG_XMM4) ,XmmReg 4 #endif #if defined(REG_YMM4) ,YmmReg 4 #endif #if defined(REG_ZMM4) ,ZmmReg 4 #endif #if defined(REG_F5) ,FloatReg 5 #endif #if defined(REG_D5) ,DoubleReg 5 #endif #if defined(REG_XMM5) ,XmmReg 5 #endif #if defined(REG_YMM5) ,YmmReg 5 #endif #if defined(REG_ZMM5) ,ZmmReg 5 #endif #if defined(REG_F6) ,FloatReg 6 #endif #if defined(REG_D6) ,DoubleReg 6 #endif #if defined(REG_XMM6) ,XmmReg 6 #endif #if defined(REG_YMM6) ,YmmReg 6 #endif #if defined(REG_ZMM6) ,ZmmReg 6 #endif #else /* MAX_REAL_XMM_REG == 0 */ #if defined(REG_F1) ,FloatReg 1 #endif #if defined(REG_F2) ,FloatReg 2 #endif #if defined(REG_F3) ,FloatReg 3 #endif #if defined(REG_F4) ,FloatReg 4 #endif #if defined(REG_F5) ,FloatReg 5 #endif #if defined(REG_F6) ,FloatReg 6 #endif #if defined(REG_D1) ,DoubleReg 1 #endif #if defined(REG_D2) ,DoubleReg 2 #endif #if defined(REG_D3) ,DoubleReg 3 #endif #if defined(REG_D4) ,DoubleReg 4 #endif #if defined(REG_D5) ,DoubleReg 5 #endif #if defined(REG_D6) ,DoubleReg 6 #endif #endif /* MAX_REAL_XMM_REG == 0 */ ] haveRegBase :: Bool #if defined(REG_Base) haveRegBase = True #else haveRegBase = False #endif -- | Returns 'Nothing' if this global register is not stored -- in a real machine register, otherwise returns @'Just' reg@, where -- reg is the machine register it is stored in. globalRegMaybe :: GlobalReg -> Maybe RealReg #if defined(MACHREGS_i386) || defined(MACHREGS_x86_64) \ || defined(MACHREGS_powerpc) \ || defined(MACHREGS_arm) || defined(MACHREGS_aarch64) \ || defined(MACHREGS_s390x) || defined(MACHREGS_riscv64) \ || defined(MACHREGS_wasm32) \ || defined(MACHREGS_loongarch64) # if defined(REG_Base) globalRegMaybe BaseReg = Just (RealRegSingle REG_Base) # endif # if defined(REG_R1) globalRegMaybe (VanillaReg 1) = Just (RealRegSingle REG_R1) # endif # if defined(REG_R2) globalRegMaybe (VanillaReg 2) = Just (RealRegSingle REG_R2) # endif # if defined(REG_R3) globalRegMaybe (VanillaReg 3) = Just (RealRegSingle REG_R3) # endif # if defined(REG_R4) globalRegMaybe (VanillaReg 4) = Just (RealRegSingle REG_R4) # endif # if defined(REG_R5) globalRegMaybe (VanillaReg 5) = Just (RealRegSingle REG_R5) # endif # if defined(REG_R6) globalRegMaybe (VanillaReg 6) = Just (RealRegSingle REG_R6) # endif # if defined(REG_R7) globalRegMaybe (VanillaReg 7) = Just (RealRegSingle REG_R7) # endif # if defined(REG_R8) globalRegMaybe (VanillaReg 8) = Just (RealRegSingle REG_R8) # endif # if defined(REG_R9) globalRegMaybe (VanillaReg 9) = Just (RealRegSingle REG_R9) # endif # if defined(REG_R10) globalRegMaybe (VanillaReg 10) = Just (RealRegSingle REG_R10) # endif # if defined(REG_F1) globalRegMaybe (FloatReg 1) = Just (RealRegSingle REG_F1) # endif # if defined(REG_F2) globalRegMaybe (FloatReg 2) = Just (RealRegSingle REG_F2) # endif # if defined(REG_F3) globalRegMaybe (FloatReg 3) = Just (RealRegSingle REG_F3) # endif # if defined(REG_F4) globalRegMaybe (FloatReg 4) = Just (RealRegSingle REG_F4) # endif # if defined(REG_F5) globalRegMaybe (FloatReg 5) = Just (RealRegSingle REG_F5) # endif # if defined(REG_F6) globalRegMaybe (FloatReg 6) = Just (RealRegSingle REG_F6) # endif # if defined(REG_D1) globalRegMaybe (DoubleReg 1) = Just (RealRegSingle REG_D1) # endif # if defined(REG_D2) globalRegMaybe (DoubleReg 2) = Just (RealRegSingle REG_D2) # endif # if defined(REG_D3) globalRegMaybe (DoubleReg 3) = Just (RealRegSingle REG_D3) # endif # if defined(REG_D4) globalRegMaybe (DoubleReg 4) = Just (RealRegSingle REG_D4) # endif # if defined(REG_D5) globalRegMaybe (DoubleReg 5) = Just (RealRegSingle REG_D5) # endif # if defined(REG_D6) globalRegMaybe (DoubleReg 6) = Just (RealRegSingle REG_D6) # endif # if MAX_REAL_XMM_REG != 0 # if defined(REG_XMM1) globalRegMaybe (XmmReg 1) = Just (RealRegSingle REG_XMM1) # endif # if defined(REG_XMM2) globalRegMaybe (XmmReg 2) = Just (RealRegSingle REG_XMM2) # endif # if defined(REG_XMM3) globalRegMaybe (XmmReg 3) = Just (RealRegSingle REG_XMM3) # endif # if defined(REG_XMM4) globalRegMaybe (XmmReg 4) = Just (RealRegSingle REG_XMM4) # endif # if defined(REG_XMM5) globalRegMaybe (XmmReg 5) = Just (RealRegSingle REG_XMM5) # endif # if defined(REG_XMM6) globalRegMaybe (XmmReg 6) = Just (RealRegSingle REG_XMM6) # endif # endif # if defined(MAX_REAL_YMM_REG) && MAX_REAL_YMM_REG != 0 # if defined(REG_YMM1) globalRegMaybe (YmmReg 1) = Just (RealRegSingle REG_YMM1) # endif # if defined(REG_YMM2) globalRegMaybe (YmmReg 2) = Just (RealRegSingle REG_YMM2) # endif # if defined(REG_YMM3) globalRegMaybe (YmmReg 3) = Just (RealRegSingle REG_YMM3) # endif # if defined(REG_YMM4) globalRegMaybe (YmmReg 4) = Just (RealRegSingle REG_YMM4) # endif # if defined(REG_YMM5) globalRegMaybe (YmmReg 5) = Just (RealRegSingle REG_YMM5) # endif # if defined(REG_YMM6) globalRegMaybe (YmmReg 6) = Just (RealRegSingle REG_YMM6) # endif # endif # if defined(MAX_REAL_ZMM_REG) && MAX_REAL_ZMM_REG != 0 # if defined(REG_ZMM1) globalRegMaybe (ZmmReg 1) = Just (RealRegSingle REG_ZMM1) # endif # if defined(REG_ZMM2) globalRegMaybe (ZmmReg 2) = Just (RealRegSingle REG_ZMM2) # endif # if defined(REG_ZMM3) globalRegMaybe (ZmmReg 3) = Just (RealRegSingle REG_ZMM3) # endif # if defined(REG_ZMM4) globalRegMaybe (ZmmReg 4) = Just (RealRegSingle REG_ZMM4) # endif # if defined(REG_ZMM5) globalRegMaybe (ZmmReg 5) = Just (RealRegSingle REG_ZMM5) # endif # if defined(REG_ZMM6) globalRegMaybe (ZmmReg 6) = Just (RealRegSingle REG_ZMM6) # endif # endif # if defined(REG_Sp) globalRegMaybe Sp = Just (RealRegSingle REG_Sp) # endif # if defined(REG_Lng1) globalRegMaybe (LongReg 1) = Just (RealRegSingle REG_Lng1) # endif # if defined(REG_Lng2) globalRegMaybe (LongReg 2) = Just (RealRegSingle REG_Lng2) # endif # if defined(REG_SpLim) globalRegMaybe SpLim = Just (RealRegSingle REG_SpLim) # endif # if defined(REG_Hp) globalRegMaybe Hp = Just (RealRegSingle REG_Hp) # endif # if defined(REG_HpLim) globalRegMaybe HpLim = Just (RealRegSingle REG_HpLim) # endif # if defined(REG_CurrentTSO) globalRegMaybe CurrentTSO = Just (RealRegSingle REG_CurrentTSO) # endif # if defined(REG_CurrentNursery) globalRegMaybe CurrentNursery = Just (RealRegSingle REG_CurrentNursery) # endif # if defined(REG_MachSp) globalRegMaybe MachSp = Just (RealRegSingle REG_MachSp) # endif globalRegMaybe _ = Nothing #elif defined(MACHREGS_NO_REGS) globalRegMaybe _ = Nothing #else globalRegMaybe = panic "globalRegMaybe not defined for this platform" #endif freeReg :: RegNo -> Bool #if defined(MACHREGS_i386) || defined(MACHREGS_x86_64) # if defined(MACHREGS_i386) freeReg esp = False -- %esp is the C stack pointer freeReg esi = False -- See Note [esi/edi/ebp not allocatable] freeReg edi = False freeReg ebp = False # endif # if defined(MACHREGS_x86_64) freeReg rsp = False -- %rsp is the C stack pointer # endif {- Note [esi/edi/ebp not allocatable] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ %esi is mapped to R1, so %esi would normally be allocatable while it is not being used for R1. However, %esi has no 8-bit version on x86, and the linear register allocator is not sophisticated enough to handle this irregularity (we need more RegClasses). The graph-colouring allocator also cannot handle this - it was designed with more flexibility in mind, but the current implementation is restricted to the same set of classes as the linear allocator. Hence, on x86 esi, edi and ebp are treated as not allocatable. -} -- split patterns in two functions to prevent overlaps freeReg r = freeRegBase r freeRegBase :: RegNo -> Bool # if defined(REG_Base) freeRegBase REG_Base = False # endif # if defined(REG_Sp) freeRegBase REG_Sp = False # endif # if defined(REG_SpLim) freeRegBase REG_SpLim = False # endif # if defined(REG_Hp) freeRegBase REG_Hp = False # endif # if defined(REG_HpLim) freeRegBase REG_HpLim = False # endif -- All other regs are considered to be "free", because we can track -- their liveness accurately. freeRegBase _ = True #elif defined(MACHREGS_powerpc) freeReg 0 = False -- Used by code setting the back chain pointer -- in stack reallocations on Linux. -- Moreover r0 is not usable in all insns. freeReg 1 = False -- The Stack Pointer -- most ELF PowerPC OSes use r2 as a TOC pointer freeReg 2 = False freeReg 13 = False -- reserved for system thread ID on 64 bit -- at least linux in -fPIC relies on r30 in PLT stubs freeReg 30 = False {- TODO: reserve r13 on 64 bit systems only and r30 on 32 bit respectively. For now we use r30 on 64 bit and r13 on 32 bit as a temporary register in stack handling code. See compiler/GHC/CmmToAsm/PPC/Instr.hs. Later we might want to reserve r13 and r30 only where it is required. Then use r12 as temporary register, which is also what the C ABI does. -} # if defined(REG_Base) freeReg REG_Base = False # endif # if defined(REG_Sp) freeReg REG_Sp = False # endif # if defined(REG_SpLim) freeReg REG_SpLim = False # endif # if defined(REG_Hp) freeReg REG_Hp = False # endif # if defined(REG_HpLim) freeReg REG_HpLim = False # endif freeReg _ = True #elif defined(MACHREGS_aarch64) -- stack pointer / zero reg freeReg 31 = False -- link register freeReg 30 = False -- frame pointer freeReg 29 = False -- ip0 -- used for spill offset computations freeReg 16 = False #if defined(darwin_HOST_OS) || defined(ios_HOST_OS) -- x18 is reserved by the platform on Darwin/iOS, and can not be used -- More about ARM64 ABI that Apple platforms support: -- https://developer.apple.com/documentation/xcode/writing-arm64-code-for-apple-platforms -- https://github.com/Siguza/ios-resources/blob/master/bits/arm64.md freeReg 18 = False #endif # if defined(REG_Base) freeReg REG_Base = False # endif # if defined(REG_Sp) freeReg REG_Sp = False # endif # if defined(REG_SpLim) freeReg REG_SpLim = False # endif # if defined(REG_Hp) freeReg REG_Hp = False # endif # if defined(REG_HpLim) freeReg REG_HpLim = False # endif # if defined(REG_R1) freeReg REG_R1 = False # endif # if defined(REG_R2) freeReg REG_R2 = False # endif # if defined(REG_R3) freeReg REG_R3 = False # endif # if defined(REG_R4) freeReg REG_R4 = False # endif # if defined(REG_R5) freeReg REG_R5 = False # endif # if defined(REG_R6) freeReg REG_R6 = False # endif # if defined(REG_R7) freeReg REG_R7 = False # endif # if defined(REG_R8) freeReg REG_R8 = False # endif # if defined(REG_F1) freeReg REG_F1 = False # endif # if defined(REG_F2) freeReg REG_F2 = False # endif # if defined(REG_F3) freeReg REG_F3 = False # endif # if defined(REG_F4) freeReg REG_F4 = False # endif # if defined(REG_F5) freeReg REG_F5 = False # endif # if defined(REG_F6) freeReg REG_F6 = False # endif # if defined(REG_D1) freeReg REG_D1 = False # endif # if defined(REG_D2) freeReg REG_D2 = False # endif # if defined(REG_D3) freeReg REG_D3 = False # endif # if defined(REG_D4) freeReg REG_D4 = False # endif # if defined(REG_D5) freeReg REG_D5 = False # endif # if defined(REG_D6) freeReg REG_D6 = False # endif freeReg _ = True #elif defined(MACHREGS_riscv64) -- zero reg freeReg 0 = False -- link register freeReg 1 = False -- stack pointer freeReg 2 = False -- global pointer freeReg 3 = False -- thread pointer freeReg 4 = False -- frame pointer freeReg 8 = False -- made-up inter-procedural (ip) register -- See Note [The made-up RISCV64 TMP (IP) register] freeReg 31 = False # if defined(REG_Base) freeReg REG_Base = False # endif # if defined(REG_Sp) freeReg REG_Sp = False # endif # if defined(REG_SpLim) freeReg REG_SpLim = False # endif # if defined(REG_Hp) freeReg REG_Hp = False # endif # if defined(REG_HpLim) freeReg REG_HpLim = False # endif # if defined(REG_R1) freeReg REG_R1 = False # endif # if defined(REG_R2) freeReg REG_R2 = False # endif # if defined(REG_R3) freeReg REG_R3 = False # endif # if defined(REG_R4) freeReg REG_R4 = False # endif # if defined(REG_R5) freeReg REG_R5 = False # endif # if defined(REG_R6) freeReg REG_R6 = False # endif # if defined(REG_R7) freeReg REG_R7 = False # endif # if defined(REG_R8) freeReg REG_R8 = False # endif # if defined(REG_F1) freeReg REG_F1 = False # endif # if defined(REG_F2) freeReg REG_F2 = False # endif # if defined(REG_F3) freeReg REG_F3 = False # endif # if defined(REG_F4) freeReg REG_F4 = False # endif # if defined(REG_F5) freeReg REG_F5 = False # endif # if defined(REG_F6) freeReg REG_F6 = False # endif # if defined(REG_D1) freeReg REG_D1 = False # endif # if defined(REG_D2) freeReg REG_D2 = False # endif # if defined(REG_D3) freeReg REG_D3 = False # endif # if defined(REG_D4) freeReg REG_D4 = False # endif # if defined(REG_D5) freeReg REG_D5 = False # endif # if defined(REG_D6) freeReg REG_D6 = False # endif freeReg _ = True #else freeReg = panic "freeReg not defined for this platform" #endif ghc-lib-parser-9.12.2.20250421/compiler/FunTypes.h0000644000000000000000000000257207346545000017241 0ustar0000000000000000/* ----------------------------------------------------------------------------- * * (c) The GHC Team, 2002 * * Things for functions. * * ---------------------------------------------------------------------------*/ #pragma once /* generic - function comes with a small bitmap */ #define ARG_GEN 0 /* generic - function comes with a large bitmap */ #define ARG_GEN_BIG 1 /* BCO - function is really a BCO */ #define ARG_BCO 2 /* * Specialised function types: bitmaps and calling sequences * for these functions are pre-generated: see ghc/utils/genapply and * generated code in ghc/rts/AutoApply.cmm. * * NOTE: other places to change if you change this table: * - utils/genapply/Main.hs: stackApplyTypes * - GHC.StgToCmm.Layout: stdPattern */ #define ARG_NONE 3 #define ARG_N 4 #define ARG_P 5 #define ARG_F 6 #define ARG_D 7 #define ARG_L 8 #define ARG_V16 9 #define ARG_V32 10 #define ARG_V64 11 #define ARG_NN 12 #define ARG_NP 13 #define ARG_PN 14 #define ARG_PP 15 #define ARG_NNN 16 #define ARG_NNP 17 #define ARG_NPN 18 #define ARG_NPP 19 #define ARG_PNN 20 #define ARG_PNP 21 #define ARG_PPN 22 #define ARG_PPP 23 #define ARG_PPPP 24 #define ARG_PPPPP 25 #define ARG_PPPPPP 26 #define ARG_PPPPPPP 27 #define ARG_PPPPPPPP 28 ghc-lib-parser-9.12.2.20250421/compiler/GHC/Builtin/0000755000000000000000000000000007346545000017314 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Builtin/Names.hs0000644000000000000000000036301607346545000020724 0ustar0000000000000000{- (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 \section[GHC.Builtin.Names]{Definitions of prelude modules and names} Nota Bene: all Names defined in here should come from the base package, the big-num package or (for plugins) the ghc package. - ModuleNames for prelude modules, e.g. pRELUDE_NAME :: ModuleName - Modules for prelude modules e.g. pRELUDE :: Module - Uniques for Ids, DataCons, TyCons and Classes that the compiler "knows about" in some way e.g. orderingTyConKey :: Unique minusClassOpKey :: Unique - Names for Ids, DataCons, TyCons and Classes that the compiler "knows about" in some way e.g. orderingTyConName :: Name minusName :: Name One of these Names contains (a) the module and occurrence name of the thing (b) its Unique The way the compiler "knows about" one of these things is where the type checker or desugarer needs to look it up. For example, when desugaring list comprehensions the desugarer needs to conjure up 'foldr'. It does this by looking up foldrName in the environment. - RdrNames for Ids, DataCons etc that the compiler may emit into generated code (e.g. for deriving). e.g. and_RDR :: RdrName It's not necessary to know the uniques for these guys, only their names Note [Known-key names] ~~~~~~~~~~~~~~~~~~~~~~ It is *very* important that the compiler gives wired-in things and things with "known-key" names the correct Uniques wherever they occur. We have to be careful about this in exactly two places: 1. When we parse some source code, renaming the AST better yield an AST whose Names have the correct uniques 2. When we read an interface file, the read-in gubbins better have the right uniques This is accomplished through a combination of mechanisms: 1. When parsing source code, the RdrName-decorated AST has some RdrNames which are Exact. These are wired-in RdrNames where we could directly tell from the parsed syntax what Name to use. For example, when we parse a [] in a type we can just insert an Exact RdrName Name with the listTyConKey. Currently, I believe this is just an optimisation: it would be equally valid to just output Orig RdrNames that correctly record the module etc we expect the final Name to come from. However, were we to eliminate isBuiltInOcc_maybe it would become essential (see point 3). 2. The knownKeyNames (which consist of the basicKnownKeyNames from the module, and those names reachable via the wired-in stuff from GHC.Builtin.Types) are used to initialise the "OrigNameCache" in GHC.Iface.Env. This initialization ensures that when the type checker or renamer (both of which use GHC.Iface.Env) look up an original name (i.e. a pair of a Module and an OccName) for a known-key name they get the correct Unique. This is the most important mechanism for ensuring that known-key stuff gets the right Unique, and is why it is so important to place your known-key names in the appropriate lists. 3. For "infinite families" of known-key names (i.e. tuples and sums), we have to be extra careful. Because there are an infinite number of these things, we cannot add them to the list of known-key names used to initialise the OrigNameCache. Instead, we have to rely on never having to look them up in that cache. See Note [Infinite families of known-key names] for details. Note [Infinite families of known-key names] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Infinite families of known-key things (e.g. tuples and sums) pose a tricky problem: we can't add them to the knownKeyNames finite map which we use to ensure that, e.g., a reference to (,) gets assigned the right unique (if this doesn't sound familiar see Note [Known-key names] above). We instead handle tuples and sums separately from the "vanilla" known-key things, a) The parser recognises them specially and generates an Exact Name (hence not looked up in the orig-name cache) b) The known infinite families of names are specially serialised by GHC.Iface.Binary.putName, with that special treatment detected when we read back to ensure that we get back to the correct uniques. See Note [Symbol table representation of names] in GHC.Iface.Binary and Note [How tuples work] in GHC.Builtin.Types. Most of the infinite families cannot occur in source code, so mechanisms (a) and (b) suffice to ensure that they always have the right Unique. In particular, implicit param TyCon names, constraint tuples and Any TyCons cannot be mentioned by the user. For those things that *can* appear in source programs, c) GHC.Iface.Env.lookupOrigNameCache uses isBuiltInOcc_maybe to map built-in syntax directly onto the corresponding name, rather than trying to find it in the original-name cache. See also Note [Built-in syntax and the OrigNameCache] Note that one-tuples are an exception to the rule, as they do get assigned known keys. See Note [One-tuples] (Wrinkle: Make boxed one-tuple names have known keys) in GHC.Builtin.Types. -} {-# LANGUAGE CPP #-} module GHC.Builtin.Names ( Unique, Uniquable(..), hasKey, -- Re-exported for convenience ----------------------------------------------------------- module GHC.Builtin.Names, -- A huge bunch of (a) Names, e.g. intTyConName -- (b) Uniques e.g. intTyConKey -- (c) Groups of classes and types -- (d) miscellaneous things -- So many that we export them all ) where import GHC.Prelude import GHC.Unit.Types import GHC.Types.Name.Occurrence import GHC.Types.Name.Reader import GHC.Types.Unique import GHC.Builtin.Uniques import GHC.Types.Name import GHC.Types.SrcLoc import GHC.Data.FastString import GHC.Data.List.Infinite (Infinite (..)) import qualified GHC.Data.List.Infinite as Inf import Language.Haskell.Syntax.Module.Name {- ************************************************************************ * * allNameStrings * * ************************************************************************ -} allNameStrings :: Infinite String -- Infinite list of a,b,c...z, aa, ab, ac, ... etc allNameStrings = Inf.allListsOf ['a'..'z'] allNameStringList :: [String] -- Infinite list of a,b,c...z, aa, ab, ac, ... etc allNameStringList = Inf.toList allNameStrings {- ************************************************************************ * * \subsection{Local Names} * * ************************************************************************ This *local* name is used by the interactive stuff -} itName :: Unique -> SrcSpan -> Name itName uniq loc = mkInternalName uniq (mkOccNameFS varName (fsLit "it")) loc -- mkUnboundName makes a place-holder Name; it shouldn't be looked at except possibly -- during compiler debugging. mkUnboundName :: OccName -> Name mkUnboundName occ = mkInternalName unboundKey occ noSrcSpan isUnboundName :: Name -> Bool isUnboundName name = name `hasKey` unboundKey {- ************************************************************************ * * \subsection{Known key Names} * * ************************************************************************ This section tells what the compiler knows about the association of names with uniques. These ones are the *non* wired-in ones. The wired in ones are defined in GHC.Builtin.Types etc. -} basicKnownKeyNames :: [Name] -- See Note [Known-key names] basicKnownKeyNames = genericTyConNames ++ [ -- Classes. *Must* include: -- classes that are grabbed by key (e.g., eqClassKey) -- classes in "Class.standardClassKeys" (quite a few) eqClassName, -- mentioned, derivable ordClassName, -- derivable boundedClassName, -- derivable numClassName, -- mentioned, numeric enumClassName, -- derivable monadClassName, functorClassName, realClassName, -- numeric integralClassName, -- numeric fractionalClassName, -- numeric floatingClassName, -- numeric realFracClassName, -- numeric realFloatClassName, -- numeric dataClassName, isStringClassName, applicativeClassName, alternativeClassName, foldableClassName, traversableClassName, semigroupClassName, sappendName, monoidClassName, memptyName, mappendName, mconcatName, -- The IO type ioTyConName, ioDataConName, runMainIOName, runRWName, -- Type representation types trModuleTyConName, trModuleDataConName, trNameTyConName, trNameSDataConName, trNameDDataConName, trTyConTyConName, trTyConDataConName, -- Typeable typeableClassName, typeRepTyConName, someTypeRepTyConName, someTypeRepDataConName, kindRepTyConName, kindRepTyConAppDataConName, kindRepVarDataConName, kindRepAppDataConName, kindRepFunDataConName, kindRepTYPEDataConName, kindRepTypeLitSDataConName, kindRepTypeLitDDataConName, typeLitSortTyConName, typeLitSymbolDataConName, typeLitNatDataConName, typeLitCharDataConName, typeRepIdName, mkTrTypeName, mkTrConName, mkTrAppName, mkTrFunName, typeSymbolTypeRepName, typeNatTypeRepName, typeCharTypeRepName, trGhcPrimModuleName, -- KindReps for common cases starKindRepName, starArrStarKindRepName, starArrStarArrStarKindRepName, constraintKindRepName, -- WithDict withDictClassName, -- DataToTag dataToTagClassName, -- seq# seqHashName, -- Dynamic toDynName, -- Numeric stuff negateName, minusName, geName, eqName, mkRationalBase2Name, mkRationalBase10Name, -- Conversion functions rationalTyConName, ratioTyConName, ratioDataConName, fromRationalName, fromIntegerName, toIntegerName, toRationalName, fromIntegralName, realToFracName, -- Int# stuff divIntName, modIntName, -- String stuff fromStringName, -- Enum stuff enumFromName, enumFromThenName, enumFromThenToName, enumFromToName, -- Applicative stuff pureAName, apAName, thenAName, -- Functor stuff fmapName, -- Monad stuff thenIOName, bindIOName, returnIOName, failIOName, bindMName, thenMName, returnMName, joinMName, -- MonadFail monadFailClassName, failMName, -- MonadFix monadFixClassName, mfixName, -- Arrow stuff arrAName, composeAName, firstAName, appAName, choiceAName, loopAName, -- Ix stuff ixClassName, -- Show stuff showClassName, -- Read stuff readClassName, -- Stable pointers newStablePtrName, -- GHC Extensions considerAccessibleName, -- Strings and lists unpackCStringName, unpackCStringUtf8Name, unpackCStringAppendName, unpackCStringAppendUtf8Name, unpackCStringFoldrName, unpackCStringFoldrUtf8Name, cstringLengthName, -- Overloaded lists isListClassName, fromListName, fromListNName, toListName, -- Non-empty lists nonEmptyTyConName, -- Overloaded record dot, record update getFieldName, setFieldName, -- List operations concatName, filterName, mapName, zipName, foldrName, buildName, augmentName, appendName, -- FFI primitive types that are not wired-in. stablePtrTyConName, ptrTyConName, funPtrTyConName, constPtrConName, int8TyConName, int16TyConName, int32TyConName, int64TyConName, word8TyConName, word16TyConName, word32TyConName, word64TyConName, jsvalTyConName, -- Others otherwiseIdName, inlineIdName, eqStringName, assertName, assertErrorName, traceName, printName, dollarName, -- ghc-bignum integerFromNaturalName, integerToNaturalClampName, integerToNaturalThrowName, integerToNaturalName, integerToWordName, integerToIntName, integerToWord64Name, integerToInt64Name, integerFromWordName, integerFromWord64Name, integerFromInt64Name, integerAddName, integerMulName, integerSubName, integerNegateName, integerAbsName, integerPopCountName, integerQuotName, integerRemName, integerDivName, integerModName, integerDivModName, integerQuotRemName, integerEncodeFloatName, integerEncodeDoubleName, integerGcdName, integerLcmName, integerAndName, integerOrName, integerXorName, integerComplementName, integerBitName, integerTestBitName, integerShiftLName, integerShiftRName, naturalToWordName, naturalPopCountName, naturalShiftRName, naturalShiftLName, naturalAddName, naturalSubName, naturalSubThrowName, naturalSubUnsafeName, naturalMulName, naturalQuotRemName, naturalQuotName, naturalRemName, naturalAndName, naturalAndNotName, naturalOrName, naturalXorName, naturalTestBitName, naturalBitName, naturalGcdName, naturalLcmName, naturalLog2Name, naturalLogBaseWordName, naturalLogBaseName, naturalPowModName, naturalSizeInBaseName, bignatEqName, -- Float/Double integerToFloatName, integerToDoubleName, naturalToFloatName, naturalToDoubleName, rationalToFloatName, rationalToDoubleName, -- Other classes monadPlusClassName, -- Type-level naturals knownNatClassName, knownSymbolClassName, knownCharClassName, -- Overloaded labels fromLabelClassOpName, -- Implicit Parameters ipClassName, -- Overloaded record fields hasFieldClassName, -- ExceptionContext exceptionContextTyConName, emptyExceptionContextName, -- Call Stacks callStackTyConName, emptyCallStackName, pushCallStackName, -- Source Locations srcLocDataConName, -- Annotation type checking toAnnotationWrapperName -- The SPEC type for SpecConstr , specTyConName -- The Either type , eitherTyConName, leftDataConName, rightDataConName -- The Void type , voidTyConName -- Plugins , pluginTyConName , frontendPluginTyConName -- Generics , genClassName, gen1ClassName , datatypeClassName, constructorClassName, selectorClassName -- Monad comprehensions , guardMName , liftMName , mzipName -- GHCi Sandbox , ghciIoClassName, ghciStepIoMName -- StaticPtr , makeStaticName , staticPtrTyConName , staticPtrDataConName, staticPtrInfoDataConName , fromStaticPtrName -- Fingerprint , fingerprintDataConName -- Custom type errors , errorMessageTypeErrorFamName , typeErrorTextDataConName , typeErrorAppendDataConName , typeErrorVAppendDataConName , typeErrorShowTypeDataConName -- "Unsatisfiable" constraint , unsatisfiableClassName , unsatisfiableIdName -- Unsafe coercion proofs , unsafeEqualityProofName , unsafeEqualityTyConName , unsafeReflDataConName , unsafeCoercePrimName ] genericTyConNames :: [Name] genericTyConNames = [ v1TyConName, u1TyConName, par1TyConName, rec1TyConName, k1TyConName, m1TyConName, sumTyConName, prodTyConName, compTyConName, rTyConName, dTyConName, cTyConName, sTyConName, rec0TyConName, d1TyConName, c1TyConName, s1TyConName, repTyConName, rep1TyConName, uRecTyConName, uAddrTyConName, uCharTyConName, uDoubleTyConName, uFloatTyConName, uIntTyConName, uWordTyConName, prefixIDataConName, infixIDataConName, leftAssociativeDataConName, rightAssociativeDataConName, notAssociativeDataConName, sourceUnpackDataConName, sourceNoUnpackDataConName, noSourceUnpackednessDataConName, sourceLazyDataConName, sourceStrictDataConName, noSourceStrictnessDataConName, decidedLazyDataConName, decidedStrictDataConName, decidedUnpackDataConName, metaDataDataConName, metaConsDataConName, metaSelDataConName ] {- ************************************************************************ * * \subsection{Module names} * * ************************************************************************ --MetaHaskell Extension Add a new module here -} gHC_PRIM, gHC_PRIM_PANIC, gHC_TYPES, gHC_INTERNAL_DATA_DATA, gHC_MAGIC, gHC_MAGIC_DICT, gHC_CLASSES, gHC_PRIMOPWRAPPERS :: Module gHC_PRIM = mkPrimModule (fsLit "GHC.Prim") -- Primitive types and values gHC_PRIM_PANIC = mkPrimModule (fsLit "GHC.Prim.Panic") gHC_TYPES = mkPrimModule (fsLit "GHC.Types") gHC_MAGIC = mkPrimModule (fsLit "GHC.Magic") gHC_MAGIC_DICT = mkPrimModule (fsLit "GHC.Magic.Dict") gHC_CSTRING = mkPrimModule (fsLit "GHC.CString") gHC_CLASSES = mkPrimModule (fsLit "GHC.Classes") gHC_PRIMOPWRAPPERS = mkPrimModule (fsLit "GHC.PrimopWrappers") gHC_INTERNAL_TUPLE = mkPrimModule (fsLit "GHC.Tuple") gHC_INTERNAL_CONTROL_MONAD_ZIP :: Module gHC_INTERNAL_CONTROL_MONAD_ZIP = mkGhcInternalModule (fsLit "GHC.Internal.Control.Monad.Zip") gHC_INTERNAL_NUM_INTEGER, gHC_INTERNAL_NUM_NATURAL, gHC_INTERNAL_NUM_BIGNAT :: Module gHC_INTERNAL_NUM_INTEGER = mkBignumModule (fsLit "GHC.Num.Integer") gHC_INTERNAL_NUM_NATURAL = mkBignumModule (fsLit "GHC.Num.Natural") gHC_INTERNAL_NUM_BIGNAT = mkBignumModule (fsLit "GHC.Num.BigNat") gHC_INTERNAL_BASE, gHC_INTERNAL_ENUM, gHC_INTERNAL_GHCI, gHC_INTERNAL_GHCI_HELPERS, gHC_CSTRING, gHC_INTERNAL_DATA_STRING, gHC_INTERNAL_SHOW, gHC_INTERNAL_READ, gHC_INTERNAL_NUM, gHC_INTERNAL_MAYBE, gHC_INTERNAL_LIST, gHC_INTERNAL_TUPLE, gHC_INTERNAL_DATA_EITHER, gHC_INTERNAL_DATA_FOLDABLE, gHC_INTERNAL_DATA_TRAVERSABLE, gHC_INTERNAL_EXCEPTION_CONTEXT, gHC_INTERNAL_CONC, gHC_INTERNAL_IO, gHC_INTERNAL_IO_Exception, gHC_INTERNAL_ST, gHC_INTERNAL_IX, gHC_INTERNAL_STABLE, gHC_INTERNAL_PTR, gHC_INTERNAL_ERR, gHC_INTERNAL_REAL, gHC_INTERNAL_FLOAT, gHC_INTERNAL_TOP_HANDLER, gHC_INTERNAL_SYSTEM_IO, gHC_INTERNAL_DYNAMIC, gHC_INTERNAL_TYPEABLE, gHC_INTERNAL_TYPEABLE_INTERNAL, gHC_INTERNAL_GENERICS, gHC_INTERNAL_READ_PREC, gHC_INTERNAL_LEX, gHC_INTERNAL_INT, gHC_INTERNAL_WORD, gHC_INTERNAL_MONAD, gHC_INTERNAL_MONAD_FIX, gHC_INTERNAL_MONAD_FAIL, gHC_INTERNAL_ARROW, gHC_INTERNAL_DESUGAR, gHC_INTERNAL_RANDOM, gHC_INTERNAL_EXTS, gHC_INTERNAL_IS_LIST, gHC_INTERNAL_CONTROL_EXCEPTION_BASE, gHC_INTERNAL_TYPEERROR, gHC_INTERNAL_TYPELITS, gHC_INTERNAL_TYPELITS_INTERNAL, gHC_INTERNAL_TYPENATS, gHC_INTERNAL_TYPENATS_INTERNAL, gHC_INTERNAL_DATA_COERCE, gHC_INTERNAL_DEBUG_TRACE, gHC_INTERNAL_UNSAFE_COERCE, gHC_INTERNAL_FOREIGN_C_CONSTPTR :: Module gHC_INTERNAL_BASE = mkGhcInternalModule (fsLit "GHC.Internal.Base") gHC_INTERNAL_ENUM = mkGhcInternalModule (fsLit "GHC.Internal.Enum") gHC_INTERNAL_GHCI = mkGhcInternalModule (fsLit "GHC.Internal.GHCi") gHC_INTERNAL_GHCI_HELPERS = mkGhcInternalModule (fsLit "GHC.Internal.GHCi.Helpers") gHC_INTERNAL_SHOW = mkGhcInternalModule (fsLit "GHC.Internal.Show") gHC_INTERNAL_READ = mkGhcInternalModule (fsLit "GHC.Internal.Read") gHC_INTERNAL_NUM = mkGhcInternalModule (fsLit "GHC.Internal.Num") gHC_INTERNAL_MAYBE = mkGhcInternalModule (fsLit "GHC.Internal.Maybe") gHC_INTERNAL_LIST = mkGhcInternalModule (fsLit "GHC.Internal.List") gHC_INTERNAL_DATA_EITHER = mkGhcInternalModule (fsLit "GHC.Internal.Data.Either") gHC_INTERNAL_DATA_STRING = mkGhcInternalModule (fsLit "GHC.Internal.Data.String") gHC_INTERNAL_DATA_FOLDABLE = mkGhcInternalModule (fsLit "GHC.Internal.Data.Foldable") gHC_INTERNAL_DATA_TRAVERSABLE = mkGhcInternalModule (fsLit "GHC.Internal.Data.Traversable") gHC_INTERNAL_CONC = mkGhcInternalModule (fsLit "GHC.Internal.GHC.Conc") gHC_INTERNAL_IO = mkGhcInternalModule (fsLit "GHC.Internal.IO") gHC_INTERNAL_IO_Exception = mkGhcInternalModule (fsLit "GHC.Internal.IO.Exception") gHC_INTERNAL_ST = mkGhcInternalModule (fsLit "GHC.Internal.ST") gHC_INTERNAL_IX = mkGhcInternalModule (fsLit "GHC.Internal.Ix") gHC_INTERNAL_STABLE = mkGhcInternalModule (fsLit "GHC.Internal.Stable") gHC_INTERNAL_PTR = mkGhcInternalModule (fsLit "GHC.Internal.Ptr") gHC_INTERNAL_ERR = mkGhcInternalModule (fsLit "GHC.Internal.Err") gHC_INTERNAL_REAL = mkGhcInternalModule (fsLit "GHC.Internal.Real") gHC_INTERNAL_FLOAT = mkGhcInternalModule (fsLit "GHC.Internal.Float") gHC_INTERNAL_TOP_HANDLER = mkGhcInternalModule (fsLit "GHC.Internal.TopHandler") gHC_INTERNAL_SYSTEM_IO = mkGhcInternalModule (fsLit "GHC.Internal.System.IO") gHC_INTERNAL_DYNAMIC = mkGhcInternalModule (fsLit "GHC.Internal.Data.Dynamic") gHC_INTERNAL_TYPEABLE = mkGhcInternalModule (fsLit "GHC.Internal.Data.Typeable") gHC_INTERNAL_TYPEABLE_INTERNAL = mkGhcInternalModule (fsLit "GHC.Internal.Data.Typeable.Internal") gHC_INTERNAL_DATA_DATA = mkGhcInternalModule (fsLit "GHC.Internal.Data.Data") gHC_INTERNAL_READ_PREC = mkGhcInternalModule (fsLit "GHC.Internal.Text.ParserCombinators.ReadPrec") gHC_INTERNAL_LEX = mkGhcInternalModule (fsLit "GHC.Internal.Text.Read.Lex") gHC_INTERNAL_INT = mkGhcInternalModule (fsLit "GHC.Internal.Int") gHC_INTERNAL_WORD = mkGhcInternalModule (fsLit "GHC.Internal.Word") gHC_INTERNAL_MONAD = mkGhcInternalModule (fsLit "GHC.Internal.Control.Monad") gHC_INTERNAL_MONAD_FIX = mkGhcInternalModule (fsLit "GHC.Internal.Control.Monad.Fix") gHC_INTERNAL_MONAD_FAIL = mkGhcInternalModule (fsLit "GHC.Internal.Control.Monad.Fail") gHC_INTERNAL_ARROW = mkGhcInternalModule (fsLit "GHC.Internal.Control.Arrow") gHC_INTERNAL_DESUGAR = mkGhcInternalModule (fsLit "GHC.Internal.Desugar") gHC_INTERNAL_RANDOM = mkGhcInternalModule (fsLit "GHC.Internal.System.Random") gHC_INTERNAL_EXTS = mkGhcInternalModule (fsLit "GHC.Internal.Exts") gHC_INTERNAL_IS_LIST = mkGhcInternalModule (fsLit "GHC.Internal.IsList") gHC_INTERNAL_CONTROL_EXCEPTION_BASE = mkGhcInternalModule (fsLit "GHC.Internal.Control.Exception.Base") gHC_INTERNAL_EXCEPTION_CONTEXT = mkGhcInternalModule (fsLit "GHC.Internal.Exception.Context") gHC_INTERNAL_GENERICS = mkGhcInternalModule (fsLit "GHC.Internal.Generics") gHC_INTERNAL_TYPEERROR = mkGhcInternalModule (fsLit "GHC.Internal.TypeError") gHC_INTERNAL_TYPELITS = mkGhcInternalModule (fsLit "GHC.Internal.TypeLits") gHC_INTERNAL_TYPELITS_INTERNAL = mkGhcInternalModule (fsLit "GHC.Internal.TypeLits.Internal") gHC_INTERNAL_TYPENATS = mkGhcInternalModule (fsLit "GHC.Internal.TypeNats") gHC_INTERNAL_TYPENATS_INTERNAL = mkGhcInternalModule (fsLit "GHC.Internal.TypeNats.Internal") gHC_INTERNAL_DATA_COERCE = mkGhcInternalModule (fsLit "GHC.Internal.Data.Coerce") gHC_INTERNAL_DEBUG_TRACE = mkGhcInternalModule (fsLit "GHC.Internal.Debug.Trace") gHC_INTERNAL_UNSAFE_COERCE = mkGhcInternalModule (fsLit "GHC.Internal.Unsafe.Coerce") gHC_INTERNAL_FOREIGN_C_CONSTPTR = mkGhcInternalModule (fsLit "GHC.Internal.Foreign.C.ConstPtr") gHC_INTERNAL_SRCLOC :: Module gHC_INTERNAL_SRCLOC = mkGhcInternalModule (fsLit "GHC.Internal.SrcLoc") gHC_INTERNAL_STACK, gHC_INTERNAL_STACK_TYPES :: Module gHC_INTERNAL_STACK = mkGhcInternalModule (fsLit "GHC.Internal.Stack") gHC_INTERNAL_STACK_TYPES = mkGhcInternalModule (fsLit "GHC.Internal.Stack.Types") gHC_INTERNAL_STATICPTR :: Module gHC_INTERNAL_STATICPTR = mkGhcInternalModule (fsLit "GHC.Internal.StaticPtr") gHC_INTERNAL_STATICPTR_INTERNAL :: Module gHC_INTERNAL_STATICPTR_INTERNAL = mkGhcInternalModule (fsLit "GHC.Internal.StaticPtr.Internal") gHC_INTERNAL_FINGERPRINT_TYPE :: Module gHC_INTERNAL_FINGERPRINT_TYPE = mkGhcInternalModule (fsLit "GHC.Internal.Fingerprint.Type") gHC_INTERNAL_OVER_LABELS :: Module gHC_INTERNAL_OVER_LABELS = mkGhcInternalModule (fsLit "GHC.Internal.OverloadedLabels") gHC_INTERNAL_RECORDS :: Module gHC_INTERNAL_RECORDS = mkGhcInternalModule (fsLit "GHC.Internal.Records") rOOT_MAIN :: Module rOOT_MAIN = mkMainModule (fsLit ":Main") -- Root module for initialisation mkInteractiveModule :: String -> Module -- (mkInteractiveMoudule "9") makes module 'interactive:Ghci9' mkInteractiveModule n = mkModule interactiveUnit (mkModuleName ("Ghci" ++ n)) pRELUDE_NAME, mAIN_NAME :: ModuleName pRELUDE_NAME = mkModuleNameFS (fsLit "Prelude") mAIN_NAME = mkModuleNameFS (fsLit "Main") mkPrimModule :: FastString -> Module mkPrimModule m = mkModule primUnit (mkModuleNameFS m) mkBignumModule :: FastString -> Module mkBignumModule m = mkModule bignumUnit (mkModuleNameFS m) mkGhcInternalModule :: FastString -> Module mkGhcInternalModule m = mkGhcInternalModule_ (mkModuleNameFS m) mkGhcInternalModule_ :: ModuleName -> Module mkGhcInternalModule_ m = mkModule ghcInternalUnit m mkThisGhcModule :: FastString -> Module mkThisGhcModule m = mkThisGhcModule_ (mkModuleNameFS m) mkThisGhcModule_ :: ModuleName -> Module mkThisGhcModule_ m = mkModule thisGhcUnit m mkMainModule :: FastString -> Module mkMainModule m = mkModule mainUnit (mkModuleNameFS m) mkMainModule_ :: ModuleName -> Module mkMainModule_ m = mkModule mainUnit m {- ************************************************************************ * * RdrNames * * ************************************************************************ -} main_RDR_Unqual :: RdrName main_RDR_Unqual = mkUnqual varName (fsLit "main") -- We definitely don't want an Orig RdrName, because -- main might, in principle, be imported into module Main eq_RDR, ge_RDR, le_RDR, lt_RDR, gt_RDR, compare_RDR, ltTag_RDR, eqTag_RDR, gtTag_RDR :: RdrName eq_RDR = nameRdrName eqName ge_RDR = nameRdrName geName le_RDR = varQual_RDR gHC_CLASSES (fsLit "<=") lt_RDR = varQual_RDR gHC_CLASSES (fsLit "<") gt_RDR = varQual_RDR gHC_CLASSES (fsLit ">") compare_RDR = varQual_RDR gHC_CLASSES (fsLit "compare") ltTag_RDR = nameRdrName ordLTDataConName eqTag_RDR = nameRdrName ordEQDataConName gtTag_RDR = nameRdrName ordGTDataConName map_RDR, append_RDR :: RdrName map_RDR = nameRdrName mapName append_RDR = nameRdrName appendName foldr_RDR, build_RDR, returnM_RDR, bindM_RDR, failM_RDR :: RdrName foldr_RDR = nameRdrName foldrName build_RDR = nameRdrName buildName returnM_RDR = nameRdrName returnMName bindM_RDR = nameRdrName bindMName failM_RDR = nameRdrName failMName left_RDR, right_RDR :: RdrName left_RDR = nameRdrName leftDataConName right_RDR = nameRdrName rightDataConName fromEnum_RDR, toEnum_RDR, toEnumError_RDR, succError_RDR, predError_RDR, enumIntToWord_RDR :: RdrName fromEnum_RDR = varQual_RDR gHC_INTERNAL_ENUM (fsLit "fromEnum") toEnum_RDR = varQual_RDR gHC_INTERNAL_ENUM (fsLit "toEnum") toEnumError_RDR = varQual_RDR gHC_INTERNAL_ENUM (fsLit "toEnumError") succError_RDR = varQual_RDR gHC_INTERNAL_ENUM (fsLit "succError") predError_RDR = varQual_RDR gHC_INTERNAL_ENUM (fsLit "predError") enumIntToWord_RDR = varQual_RDR gHC_INTERNAL_ENUM (fsLit "enumIntToWord") enumFrom_RDR, enumFromTo_RDR, enumFromThen_RDR, enumFromThenTo_RDR :: RdrName enumFrom_RDR = nameRdrName enumFromName enumFromTo_RDR = nameRdrName enumFromToName enumFromThen_RDR = nameRdrName enumFromThenName enumFromThenTo_RDR = nameRdrName enumFromThenToName times_RDR, plus_RDR :: RdrName times_RDR = varQual_RDR gHC_INTERNAL_NUM (fsLit "*") plus_RDR = varQual_RDR gHC_INTERNAL_NUM (fsLit "+") compose_RDR :: RdrName compose_RDR = varQual_RDR gHC_INTERNAL_BASE (fsLit ".") not_RDR, dataToTag_RDR, succ_RDR, pred_RDR, minBound_RDR, maxBound_RDR, and_RDR, range_RDR, inRange_RDR, index_RDR, unsafeIndex_RDR, unsafeRangeSize_RDR :: RdrName and_RDR = varQual_RDR gHC_CLASSES (fsLit "&&") not_RDR = varQual_RDR gHC_CLASSES (fsLit "not") dataToTag_RDR = varQual_RDR gHC_MAGIC (fsLit "dataToTag#") succ_RDR = varQual_RDR gHC_INTERNAL_ENUM (fsLit "succ") pred_RDR = varQual_RDR gHC_INTERNAL_ENUM (fsLit "pred") minBound_RDR = varQual_RDR gHC_INTERNAL_ENUM (fsLit "minBound") maxBound_RDR = varQual_RDR gHC_INTERNAL_ENUM (fsLit "maxBound") range_RDR = varQual_RDR gHC_INTERNAL_IX (fsLit "range") inRange_RDR = varQual_RDR gHC_INTERNAL_IX (fsLit "inRange") index_RDR = varQual_RDR gHC_INTERNAL_IX (fsLit "index") unsafeIndex_RDR = varQual_RDR gHC_INTERNAL_IX (fsLit "unsafeIndex") unsafeRangeSize_RDR = varQual_RDR gHC_INTERNAL_IX (fsLit "unsafeRangeSize") readList_RDR, readListDefault_RDR, readListPrec_RDR, readListPrecDefault_RDR, readPrec_RDR, parens_RDR, choose_RDR, lexP_RDR, expectP_RDR :: RdrName readList_RDR = varQual_RDR gHC_INTERNAL_READ (fsLit "readList") readListDefault_RDR = varQual_RDR gHC_INTERNAL_READ (fsLit "readListDefault") readListPrec_RDR = varQual_RDR gHC_INTERNAL_READ (fsLit "readListPrec") readListPrecDefault_RDR = varQual_RDR gHC_INTERNAL_READ (fsLit "readListPrecDefault") readPrec_RDR = varQual_RDR gHC_INTERNAL_READ (fsLit "readPrec") parens_RDR = varQual_RDR gHC_INTERNAL_READ (fsLit "parens") choose_RDR = varQual_RDR gHC_INTERNAL_READ (fsLit "choose") lexP_RDR = varQual_RDR gHC_INTERNAL_READ (fsLit "lexP") expectP_RDR = varQual_RDR gHC_INTERNAL_READ (fsLit "expectP") readField_RDR, readFieldHash_RDR, readSymField_RDR :: RdrName readField_RDR = varQual_RDR gHC_INTERNAL_READ (fsLit "readField") readFieldHash_RDR = varQual_RDR gHC_INTERNAL_READ (fsLit "readFieldHash") readSymField_RDR = varQual_RDR gHC_INTERNAL_READ (fsLit "readSymField") punc_RDR, ident_RDR, symbol_RDR :: RdrName punc_RDR = dataQual_RDR gHC_INTERNAL_LEX (fsLit "Punc") ident_RDR = dataQual_RDR gHC_INTERNAL_LEX (fsLit "Ident") symbol_RDR = dataQual_RDR gHC_INTERNAL_LEX (fsLit "Symbol") step_RDR, alt_RDR, reset_RDR, prec_RDR, pfail_RDR :: RdrName step_RDR = varQual_RDR gHC_INTERNAL_READ_PREC (fsLit "step") alt_RDR = varQual_RDR gHC_INTERNAL_READ_PREC (fsLit "+++") reset_RDR = varQual_RDR gHC_INTERNAL_READ_PREC (fsLit "reset") prec_RDR = varQual_RDR gHC_INTERNAL_READ_PREC (fsLit "prec") pfail_RDR = varQual_RDR gHC_INTERNAL_READ_PREC (fsLit "pfail") showsPrec_RDR, shows_RDR, showString_RDR, showSpace_RDR, showCommaSpace_RDR, showParen_RDR :: RdrName showsPrec_RDR = varQual_RDR gHC_INTERNAL_SHOW (fsLit "showsPrec") shows_RDR = varQual_RDR gHC_INTERNAL_SHOW (fsLit "shows") showString_RDR = varQual_RDR gHC_INTERNAL_SHOW (fsLit "showString") showSpace_RDR = varQual_RDR gHC_INTERNAL_SHOW (fsLit "showSpace") showCommaSpace_RDR = varQual_RDR gHC_INTERNAL_SHOW (fsLit "showCommaSpace") showParen_RDR = varQual_RDR gHC_INTERNAL_SHOW (fsLit "showParen") error_RDR :: RdrName error_RDR = varQual_RDR gHC_INTERNAL_ERR (fsLit "error") -- Generics (constructors and functions) u1DataCon_RDR, par1DataCon_RDR, rec1DataCon_RDR, k1DataCon_RDR, m1DataCon_RDR, l1DataCon_RDR, r1DataCon_RDR, prodDataCon_RDR, comp1DataCon_RDR, unPar1_RDR, unRec1_RDR, unK1_RDR, unComp1_RDR, from_RDR, from1_RDR, to_RDR, to1_RDR, datatypeName_RDR, moduleName_RDR, packageName_RDR, isNewtypeName_RDR, conName_RDR, conFixity_RDR, conIsRecord_RDR, selName_RDR, prefixDataCon_RDR, infixDataCon_RDR, leftAssocDataCon_RDR, rightAssocDataCon_RDR, notAssocDataCon_RDR, uAddrDataCon_RDR, uCharDataCon_RDR, uDoubleDataCon_RDR, uFloatDataCon_RDR, uIntDataCon_RDR, uWordDataCon_RDR, uAddrHash_RDR, uCharHash_RDR, uDoubleHash_RDR, uFloatHash_RDR, uIntHash_RDR, uWordHash_RDR :: RdrName u1DataCon_RDR = dataQual_RDR gHC_INTERNAL_GENERICS (fsLit "U1") par1DataCon_RDR = dataQual_RDR gHC_INTERNAL_GENERICS (fsLit "Par1") rec1DataCon_RDR = dataQual_RDR gHC_INTERNAL_GENERICS (fsLit "Rec1") k1DataCon_RDR = dataQual_RDR gHC_INTERNAL_GENERICS (fsLit "K1") m1DataCon_RDR = dataQual_RDR gHC_INTERNAL_GENERICS (fsLit "M1") l1DataCon_RDR = dataQual_RDR gHC_INTERNAL_GENERICS (fsLit "L1") r1DataCon_RDR = dataQual_RDR gHC_INTERNAL_GENERICS (fsLit "R1") prodDataCon_RDR = dataQual_RDR gHC_INTERNAL_GENERICS (fsLit ":*:") comp1DataCon_RDR = dataQual_RDR gHC_INTERNAL_GENERICS (fsLit "Comp1") unPar1_RDR = fieldQual_RDR gHC_INTERNAL_GENERICS (fsLit "Par1") (fsLit "unPar1") unRec1_RDR = fieldQual_RDR gHC_INTERNAL_GENERICS (fsLit "Rec1") (fsLit "unRec1") unK1_RDR = fieldQual_RDR gHC_INTERNAL_GENERICS (fsLit "K1") (fsLit "unK1") unComp1_RDR = fieldQual_RDR gHC_INTERNAL_GENERICS (fsLit "Comp1") (fsLit "unComp1") from_RDR = varQual_RDR gHC_INTERNAL_GENERICS (fsLit "from") from1_RDR = varQual_RDR gHC_INTERNAL_GENERICS (fsLit "from1") to_RDR = varQual_RDR gHC_INTERNAL_GENERICS (fsLit "to") to1_RDR = varQual_RDR gHC_INTERNAL_GENERICS (fsLit "to1") datatypeName_RDR = varQual_RDR gHC_INTERNAL_GENERICS (fsLit "datatypeName") moduleName_RDR = varQual_RDR gHC_INTERNAL_GENERICS (fsLit "moduleName") packageName_RDR = varQual_RDR gHC_INTERNAL_GENERICS (fsLit "packageName") isNewtypeName_RDR = varQual_RDR gHC_INTERNAL_GENERICS (fsLit "isNewtype") selName_RDR = varQual_RDR gHC_INTERNAL_GENERICS (fsLit "selName") conName_RDR = varQual_RDR gHC_INTERNAL_GENERICS (fsLit "conName") conFixity_RDR = varQual_RDR gHC_INTERNAL_GENERICS (fsLit "conFixity") conIsRecord_RDR = varQual_RDR gHC_INTERNAL_GENERICS (fsLit "conIsRecord") prefixDataCon_RDR = dataQual_RDR gHC_INTERNAL_GENERICS (fsLit "Prefix") infixDataCon_RDR = dataQual_RDR gHC_INTERNAL_GENERICS (fsLit "Infix") leftAssocDataCon_RDR = nameRdrName leftAssociativeDataConName rightAssocDataCon_RDR = nameRdrName rightAssociativeDataConName notAssocDataCon_RDR = nameRdrName notAssociativeDataConName uAddrDataCon_RDR = dataQual_RDR gHC_INTERNAL_GENERICS (fsLit "UAddr") uCharDataCon_RDR = dataQual_RDR gHC_INTERNAL_GENERICS (fsLit "UChar") uDoubleDataCon_RDR = dataQual_RDR gHC_INTERNAL_GENERICS (fsLit "UDouble") uFloatDataCon_RDR = dataQual_RDR gHC_INTERNAL_GENERICS (fsLit "UFloat") uIntDataCon_RDR = dataQual_RDR gHC_INTERNAL_GENERICS (fsLit "UInt") uWordDataCon_RDR = dataQual_RDR gHC_INTERNAL_GENERICS (fsLit "UWord") uAddrHash_RDR = fieldQual_RDR gHC_INTERNAL_GENERICS (fsLit "UAddr") (fsLit "uAddr#") uCharHash_RDR = fieldQual_RDR gHC_INTERNAL_GENERICS (fsLit "UChar") (fsLit "uChar#") uDoubleHash_RDR = fieldQual_RDR gHC_INTERNAL_GENERICS (fsLit "UDouble") (fsLit "uDouble#") uFloatHash_RDR = fieldQual_RDR gHC_INTERNAL_GENERICS (fsLit "UFloat") (fsLit "uFloat#") uIntHash_RDR = fieldQual_RDR gHC_INTERNAL_GENERICS (fsLit "UInt") (fsLit "uInt#") uWordHash_RDR = fieldQual_RDR gHC_INTERNAL_GENERICS (fsLit "UWord") (fsLit "uWord#") fmap_RDR, replace_RDR, pure_RDR, ap_RDR, liftA2_RDR, foldable_foldr_RDR, foldMap_RDR, null_RDR, all_RDR, traverse_RDR, mempty_RDR, mappend_RDR :: RdrName fmap_RDR = nameRdrName fmapName replace_RDR = varQual_RDR gHC_INTERNAL_BASE (fsLit "<$") pure_RDR = nameRdrName pureAName ap_RDR = nameRdrName apAName liftA2_RDR = varQual_RDR gHC_INTERNAL_BASE (fsLit "liftA2") foldable_foldr_RDR = varQual_RDR gHC_INTERNAL_DATA_FOLDABLE (fsLit "foldr") foldMap_RDR = varQual_RDR gHC_INTERNAL_DATA_FOLDABLE (fsLit "foldMap") null_RDR = varQual_RDR gHC_INTERNAL_DATA_FOLDABLE (fsLit "null") all_RDR = varQual_RDR gHC_INTERNAL_DATA_FOLDABLE (fsLit "all") traverse_RDR = varQual_RDR gHC_INTERNAL_DATA_TRAVERSABLE (fsLit "traverse") mempty_RDR = nameRdrName memptyName mappend_RDR = nameRdrName mappendName ---------------------- varQual_RDR, tcQual_RDR, clsQual_RDR, dataQual_RDR :: Module -> FastString -> RdrName varQual_RDR mod str = mkOrig mod (mkOccNameFS varName str) tcQual_RDR mod str = mkOrig mod (mkOccNameFS tcName str) clsQual_RDR mod str = mkOrig mod (mkOccNameFS clsName str) dataQual_RDR mod str = mkOrig mod (mkOccNameFS dataName str) fieldQual_RDR :: Module -> FastString -> FastString -> RdrName fieldQual_RDR mod con str = mkOrig mod (mkOccNameFS (fieldName con) str) {- ************************************************************************ * * \subsection{Known-key names} * * ************************************************************************ Many of these Names are not really "built in", but some parts of the compiler (notably the deriving mechanism) need to mention their names, and it's convenient to write them all down in one place. -} wildCardName :: Name wildCardName = mkSystemVarName wildCardKey (fsLit "wild") runMainIOName, runRWName :: Name runMainIOName = varQual gHC_INTERNAL_TOP_HANDLER (fsLit "runMainIO") runMainKey runRWName = varQual gHC_MAGIC (fsLit "runRW#") runRWKey orderingTyConName, ordLTDataConName, ordEQDataConName, ordGTDataConName :: Name orderingTyConName = tcQual gHC_TYPES (fsLit "Ordering") orderingTyConKey ordLTDataConName = dcQual gHC_TYPES (fsLit "LT") ordLTDataConKey ordEQDataConName = dcQual gHC_TYPES (fsLit "EQ") ordEQDataConKey ordGTDataConName = dcQual gHC_TYPES (fsLit "GT") ordGTDataConKey specTyConName :: Name specTyConName = tcQual gHC_TYPES (fsLit "SPEC") specTyConKey eitherTyConName, leftDataConName, rightDataConName :: Name eitherTyConName = tcQual gHC_INTERNAL_DATA_EITHER (fsLit "Either") eitherTyConKey leftDataConName = dcQual gHC_INTERNAL_DATA_EITHER (fsLit "Left") leftDataConKey rightDataConName = dcQual gHC_INTERNAL_DATA_EITHER (fsLit "Right") rightDataConKey voidTyConName :: Name voidTyConName = tcQual gHC_INTERNAL_BASE (fsLit "Void") voidTyConKey -- Generics (types) v1TyConName, u1TyConName, par1TyConName, rec1TyConName, k1TyConName, m1TyConName, sumTyConName, prodTyConName, compTyConName, rTyConName, dTyConName, cTyConName, sTyConName, rec0TyConName, d1TyConName, c1TyConName, s1TyConName, repTyConName, rep1TyConName, uRecTyConName, uAddrTyConName, uCharTyConName, uDoubleTyConName, uFloatTyConName, uIntTyConName, uWordTyConName, prefixIDataConName, infixIDataConName, leftAssociativeDataConName, rightAssociativeDataConName, notAssociativeDataConName, sourceUnpackDataConName, sourceNoUnpackDataConName, noSourceUnpackednessDataConName, sourceLazyDataConName, sourceStrictDataConName, noSourceStrictnessDataConName, decidedLazyDataConName, decidedStrictDataConName, decidedUnpackDataConName, metaDataDataConName, metaConsDataConName, metaSelDataConName :: Name v1TyConName = tcQual gHC_INTERNAL_GENERICS (fsLit "V1") v1TyConKey u1TyConName = tcQual gHC_INTERNAL_GENERICS (fsLit "U1") u1TyConKey par1TyConName = tcQual gHC_INTERNAL_GENERICS (fsLit "Par1") par1TyConKey rec1TyConName = tcQual gHC_INTERNAL_GENERICS (fsLit "Rec1") rec1TyConKey k1TyConName = tcQual gHC_INTERNAL_GENERICS (fsLit "K1") k1TyConKey m1TyConName = tcQual gHC_INTERNAL_GENERICS (fsLit "M1") m1TyConKey sumTyConName = tcQual gHC_INTERNAL_GENERICS (fsLit ":+:") sumTyConKey prodTyConName = tcQual gHC_INTERNAL_GENERICS (fsLit ":*:") prodTyConKey compTyConName = tcQual gHC_INTERNAL_GENERICS (fsLit ":.:") compTyConKey rTyConName = tcQual gHC_INTERNAL_GENERICS (fsLit "R") rTyConKey dTyConName = tcQual gHC_INTERNAL_GENERICS (fsLit "D") dTyConKey cTyConName = tcQual gHC_INTERNAL_GENERICS (fsLit "C") cTyConKey sTyConName = tcQual gHC_INTERNAL_GENERICS (fsLit "S") sTyConKey rec0TyConName = tcQual gHC_INTERNAL_GENERICS (fsLit "Rec0") rec0TyConKey d1TyConName = tcQual gHC_INTERNAL_GENERICS (fsLit "D1") d1TyConKey c1TyConName = tcQual gHC_INTERNAL_GENERICS (fsLit "C1") c1TyConKey s1TyConName = tcQual gHC_INTERNAL_GENERICS (fsLit "S1") s1TyConKey repTyConName = tcQual gHC_INTERNAL_GENERICS (fsLit "Rep") repTyConKey rep1TyConName = tcQual gHC_INTERNAL_GENERICS (fsLit "Rep1") rep1TyConKey uRecTyConName = tcQual gHC_INTERNAL_GENERICS (fsLit "URec") uRecTyConKey uAddrTyConName = tcQual gHC_INTERNAL_GENERICS (fsLit "UAddr") uAddrTyConKey uCharTyConName = tcQual gHC_INTERNAL_GENERICS (fsLit "UChar") uCharTyConKey uDoubleTyConName = tcQual gHC_INTERNAL_GENERICS (fsLit "UDouble") uDoubleTyConKey uFloatTyConName = tcQual gHC_INTERNAL_GENERICS (fsLit "UFloat") uFloatTyConKey uIntTyConName = tcQual gHC_INTERNAL_GENERICS (fsLit "UInt") uIntTyConKey uWordTyConName = tcQual gHC_INTERNAL_GENERICS (fsLit "UWord") uWordTyConKey prefixIDataConName = dcQual gHC_INTERNAL_GENERICS (fsLit "PrefixI") prefixIDataConKey infixIDataConName = dcQual gHC_INTERNAL_GENERICS (fsLit "InfixI") infixIDataConKey leftAssociativeDataConName = dcQual gHC_INTERNAL_GENERICS (fsLit "LeftAssociative") leftAssociativeDataConKey rightAssociativeDataConName = dcQual gHC_INTERNAL_GENERICS (fsLit "RightAssociative") rightAssociativeDataConKey notAssociativeDataConName = dcQual gHC_INTERNAL_GENERICS (fsLit "NotAssociative") notAssociativeDataConKey sourceUnpackDataConName = dcQual gHC_INTERNAL_GENERICS (fsLit "SourceUnpack") sourceUnpackDataConKey sourceNoUnpackDataConName = dcQual gHC_INTERNAL_GENERICS (fsLit "SourceNoUnpack") sourceNoUnpackDataConKey noSourceUnpackednessDataConName = dcQual gHC_INTERNAL_GENERICS (fsLit "NoSourceUnpackedness") noSourceUnpackednessDataConKey sourceLazyDataConName = dcQual gHC_INTERNAL_GENERICS (fsLit "SourceLazy") sourceLazyDataConKey sourceStrictDataConName = dcQual gHC_INTERNAL_GENERICS (fsLit "SourceStrict") sourceStrictDataConKey noSourceStrictnessDataConName = dcQual gHC_INTERNAL_GENERICS (fsLit "NoSourceStrictness") noSourceStrictnessDataConKey decidedLazyDataConName = dcQual gHC_INTERNAL_GENERICS (fsLit "DecidedLazy") decidedLazyDataConKey decidedStrictDataConName = dcQual gHC_INTERNAL_GENERICS (fsLit "DecidedStrict") decidedStrictDataConKey decidedUnpackDataConName = dcQual gHC_INTERNAL_GENERICS (fsLit "DecidedUnpack") decidedUnpackDataConKey metaDataDataConName = dcQual gHC_INTERNAL_GENERICS (fsLit "MetaData") metaDataDataConKey metaConsDataConName = dcQual gHC_INTERNAL_GENERICS (fsLit "MetaCons") metaConsDataConKey metaSelDataConName = dcQual gHC_INTERNAL_GENERICS (fsLit "MetaSel") metaSelDataConKey -- Primitive Int divIntName, modIntName :: Name divIntName = varQual gHC_CLASSES (fsLit "divInt#") divIntIdKey modIntName = varQual gHC_CLASSES (fsLit "modInt#") modIntIdKey -- Base strings Strings unpackCStringName, unpackCStringFoldrName, unpackCStringUtf8Name, unpackCStringFoldrUtf8Name, unpackCStringAppendName, unpackCStringAppendUtf8Name, eqStringName, cstringLengthName :: Name cstringLengthName = varQual gHC_CSTRING (fsLit "cstringLength#") cstringLengthIdKey eqStringName = varQual gHC_INTERNAL_BASE (fsLit "eqString") eqStringIdKey unpackCStringName = varQual gHC_CSTRING (fsLit "unpackCString#") unpackCStringIdKey unpackCStringAppendName = varQual gHC_CSTRING (fsLit "unpackAppendCString#") unpackCStringAppendIdKey unpackCStringFoldrName = varQual gHC_CSTRING (fsLit "unpackFoldrCString#") unpackCStringFoldrIdKey unpackCStringUtf8Name = varQual gHC_CSTRING (fsLit "unpackCStringUtf8#") unpackCStringUtf8IdKey unpackCStringAppendUtf8Name = varQual gHC_CSTRING (fsLit "unpackAppendCStringUtf8#") unpackCStringAppendUtf8IdKey unpackCStringFoldrUtf8Name = varQual gHC_CSTRING (fsLit "unpackFoldrCStringUtf8#") unpackCStringFoldrUtf8IdKey -- The 'inline' function inlineIdName :: Name inlineIdName = varQual gHC_MAGIC (fsLit "inline") inlineIdKey -- Base classes (Eq, Ord, Functor) fmapName, eqClassName, eqName, ordClassName, geName, functorClassName :: Name eqClassName = clsQual gHC_CLASSES (fsLit "Eq") eqClassKey eqName = varQual gHC_CLASSES (fsLit "==") eqClassOpKey ordClassName = clsQual gHC_CLASSES (fsLit "Ord") ordClassKey geName = varQual gHC_CLASSES (fsLit ">=") geClassOpKey functorClassName = clsQual gHC_INTERNAL_BASE (fsLit "Functor") functorClassKey fmapName = varQual gHC_INTERNAL_BASE (fsLit "fmap") fmapClassOpKey -- Class Monad monadClassName, thenMName, bindMName, returnMName :: Name monadClassName = clsQual gHC_INTERNAL_BASE (fsLit "Monad") monadClassKey thenMName = varQual gHC_INTERNAL_BASE (fsLit ">>") thenMClassOpKey bindMName = varQual gHC_INTERNAL_BASE (fsLit ">>=") bindMClassOpKey returnMName = varQual gHC_INTERNAL_BASE (fsLit "return") returnMClassOpKey -- Class MonadFail monadFailClassName, failMName :: Name monadFailClassName = clsQual gHC_INTERNAL_MONAD_FAIL (fsLit "MonadFail") monadFailClassKey failMName = varQual gHC_INTERNAL_MONAD_FAIL (fsLit "fail") failMClassOpKey -- Class Applicative applicativeClassName, pureAName, apAName, thenAName :: Name applicativeClassName = clsQual gHC_INTERNAL_BASE (fsLit "Applicative") applicativeClassKey apAName = varQual gHC_INTERNAL_BASE (fsLit "<*>") apAClassOpKey pureAName = varQual gHC_INTERNAL_BASE (fsLit "pure") pureAClassOpKey thenAName = varQual gHC_INTERNAL_BASE (fsLit "*>") thenAClassOpKey -- Classes (Foldable, Traversable) foldableClassName, traversableClassName :: Name foldableClassName = clsQual gHC_INTERNAL_DATA_FOLDABLE (fsLit "Foldable") foldableClassKey traversableClassName = clsQual gHC_INTERNAL_DATA_TRAVERSABLE (fsLit "Traversable") traversableClassKey -- Classes (Semigroup, Monoid) semigroupClassName, sappendName :: Name semigroupClassName = clsQual gHC_INTERNAL_BASE (fsLit "Semigroup") semigroupClassKey sappendName = varQual gHC_INTERNAL_BASE (fsLit "<>") sappendClassOpKey monoidClassName, memptyName, mappendName, mconcatName :: Name monoidClassName = clsQual gHC_INTERNAL_BASE (fsLit "Monoid") monoidClassKey memptyName = varQual gHC_INTERNAL_BASE (fsLit "mempty") memptyClassOpKey mappendName = varQual gHC_INTERNAL_BASE (fsLit "mappend") mappendClassOpKey mconcatName = varQual gHC_INTERNAL_BASE (fsLit "mconcat") mconcatClassOpKey -- AMP additions joinMName, alternativeClassName :: Name joinMName = varQual gHC_INTERNAL_BASE (fsLit "join") joinMIdKey alternativeClassName = clsQual gHC_INTERNAL_MONAD (fsLit "Alternative") alternativeClassKey -- joinMIdKey, apAClassOpKey, pureAClassOpKey, thenAClassOpKey, alternativeClassKey :: Unique joinMIdKey = mkPreludeMiscIdUnique 750 apAClassOpKey = mkPreludeMiscIdUnique 751 -- <*> pureAClassOpKey = mkPreludeMiscIdUnique 752 thenAClassOpKey = mkPreludeMiscIdUnique 753 alternativeClassKey = mkPreludeMiscIdUnique 754 -- Functions for GHC extensions considerAccessibleName :: Name considerAccessibleName = varQual gHC_INTERNAL_EXTS (fsLit "considerAccessible") considerAccessibleIdKey -- Random GHC.Internal.Base functions fromStringName, otherwiseIdName, foldrName, buildName, augmentName, mapName, appendName, assertName, dollarName :: Name dollarName = varQual gHC_INTERNAL_BASE (fsLit "$") dollarIdKey otherwiseIdName = varQual gHC_INTERNAL_BASE (fsLit "otherwise") otherwiseIdKey foldrName = varQual gHC_INTERNAL_BASE (fsLit "foldr") foldrIdKey buildName = varQual gHC_INTERNAL_BASE (fsLit "build") buildIdKey augmentName = varQual gHC_INTERNAL_BASE (fsLit "augment") augmentIdKey mapName = varQual gHC_INTERNAL_BASE (fsLit "map") mapIdKey appendName = varQual gHC_INTERNAL_BASE (fsLit "++") appendIdKey assertName = varQual gHC_INTERNAL_BASE (fsLit "assert") assertIdKey fromStringName = varQual gHC_INTERNAL_DATA_STRING (fsLit "fromString") fromStringClassOpKey -- Module GHC.Internal.Num numClassName, fromIntegerName, minusName, negateName :: Name numClassName = clsQual gHC_INTERNAL_NUM (fsLit "Num") numClassKey fromIntegerName = varQual gHC_INTERNAL_NUM (fsLit "fromInteger") fromIntegerClassOpKey minusName = varQual gHC_INTERNAL_NUM (fsLit "-") minusClassOpKey negateName = varQual gHC_INTERNAL_NUM (fsLit "negate") negateClassOpKey --------------------------------- -- ghc-bignum --------------------------------- integerFromNaturalName , integerToNaturalClampName , integerToNaturalThrowName , integerToNaturalName , integerToWordName , integerToIntName , integerToWord64Name , integerToInt64Name , integerFromWordName , integerFromWord64Name , integerFromInt64Name , integerAddName , integerMulName , integerSubName , integerNegateName , integerAbsName , integerPopCountName , integerQuotName , integerRemName , integerDivName , integerModName , integerDivModName , integerQuotRemName , integerEncodeFloatName , integerEncodeDoubleName , integerGcdName , integerLcmName , integerAndName , integerOrName , integerXorName , integerComplementName , integerBitName , integerTestBitName , integerShiftLName , integerShiftRName , naturalToWordName , naturalPopCountName , naturalShiftRName , naturalShiftLName , naturalAddName , naturalSubName , naturalSubThrowName , naturalSubUnsafeName , naturalMulName , naturalQuotRemName , naturalQuotName , naturalRemName , naturalAndName , naturalAndNotName , naturalOrName , naturalXorName , naturalTestBitName , naturalBitName , naturalGcdName , naturalLcmName , naturalLog2Name , naturalLogBaseWordName , naturalLogBaseName , naturalPowModName , naturalSizeInBaseName , bignatEqName , bignatCompareName , bignatCompareWordName :: Name bnbVarQual, bnnVarQual, bniVarQual :: String -> Unique -> Name bnbVarQual str key = varQual gHC_INTERNAL_NUM_BIGNAT (fsLit str) key bnnVarQual str key = varQual gHC_INTERNAL_NUM_NATURAL (fsLit str) key bniVarQual str key = varQual gHC_INTERNAL_NUM_INTEGER (fsLit str) key -- Types and DataCons bignatEqName = bnbVarQual "bigNatEq#" bignatEqIdKey bignatCompareName = bnbVarQual "bigNatCompare" bignatCompareIdKey bignatCompareWordName = bnbVarQual "bigNatCompareWord#" bignatCompareWordIdKey naturalToWordName = bnnVarQual "naturalToWord#" naturalToWordIdKey naturalPopCountName = bnnVarQual "naturalPopCount#" naturalPopCountIdKey naturalShiftRName = bnnVarQual "naturalShiftR#" naturalShiftRIdKey naturalShiftLName = bnnVarQual "naturalShiftL#" naturalShiftLIdKey naturalAddName = bnnVarQual "naturalAdd" naturalAddIdKey naturalSubName = bnnVarQual "naturalSub" naturalSubIdKey naturalSubThrowName = bnnVarQual "naturalSubThrow" naturalSubThrowIdKey naturalSubUnsafeName = bnnVarQual "naturalSubUnsafe" naturalSubUnsafeIdKey naturalMulName = bnnVarQual "naturalMul" naturalMulIdKey naturalQuotRemName = bnnVarQual "naturalQuotRem#" naturalQuotRemIdKey naturalQuotName = bnnVarQual "naturalQuot" naturalQuotIdKey naturalRemName = bnnVarQual "naturalRem" naturalRemIdKey naturalAndName = bnnVarQual "naturalAnd" naturalAndIdKey naturalAndNotName = bnnVarQual "naturalAndNot" naturalAndNotIdKey naturalOrName = bnnVarQual "naturalOr" naturalOrIdKey naturalXorName = bnnVarQual "naturalXor" naturalXorIdKey naturalTestBitName = bnnVarQual "naturalTestBit#" naturalTestBitIdKey naturalBitName = bnnVarQual "naturalBit#" naturalBitIdKey naturalGcdName = bnnVarQual "naturalGcd" naturalGcdIdKey naturalLcmName = bnnVarQual "naturalLcm" naturalLcmIdKey naturalLog2Name = bnnVarQual "naturalLog2#" naturalLog2IdKey naturalLogBaseWordName = bnnVarQual "naturalLogBaseWord#" naturalLogBaseWordIdKey naturalLogBaseName = bnnVarQual "naturalLogBase#" naturalLogBaseIdKey naturalPowModName = bnnVarQual "naturalPowMod" naturalPowModIdKey naturalSizeInBaseName = bnnVarQual "naturalSizeInBase#" naturalSizeInBaseIdKey integerFromNaturalName = bniVarQual "integerFromNatural" integerFromNaturalIdKey integerToNaturalClampName = bniVarQual "integerToNaturalClamp" integerToNaturalClampIdKey integerToNaturalThrowName = bniVarQual "integerToNaturalThrow" integerToNaturalThrowIdKey integerToNaturalName = bniVarQual "integerToNatural" integerToNaturalIdKey integerToWordName = bniVarQual "integerToWord#" integerToWordIdKey integerToIntName = bniVarQual "integerToInt#" integerToIntIdKey integerToWord64Name = bniVarQual "integerToWord64#" integerToWord64IdKey integerToInt64Name = bniVarQual "integerToInt64#" integerToInt64IdKey integerFromWordName = bniVarQual "integerFromWord#" integerFromWordIdKey integerFromWord64Name = bniVarQual "integerFromWord64#" integerFromWord64IdKey integerFromInt64Name = bniVarQual "integerFromInt64#" integerFromInt64IdKey integerAddName = bniVarQual "integerAdd" integerAddIdKey integerMulName = bniVarQual "integerMul" integerMulIdKey integerSubName = bniVarQual "integerSub" integerSubIdKey integerNegateName = bniVarQual "integerNegate" integerNegateIdKey integerAbsName = bniVarQual "integerAbs" integerAbsIdKey integerPopCountName = bniVarQual "integerPopCount#" integerPopCountIdKey integerQuotName = bniVarQual "integerQuot" integerQuotIdKey integerRemName = bniVarQual "integerRem" integerRemIdKey integerDivName = bniVarQual "integerDiv" integerDivIdKey integerModName = bniVarQual "integerMod" integerModIdKey integerDivModName = bniVarQual "integerDivMod#" integerDivModIdKey integerQuotRemName = bniVarQual "integerQuotRem#" integerQuotRemIdKey integerEncodeFloatName = bniVarQual "integerEncodeFloat#" integerEncodeFloatIdKey integerEncodeDoubleName = bniVarQual "integerEncodeDouble#" integerEncodeDoubleIdKey integerGcdName = bniVarQual "integerGcd" integerGcdIdKey integerLcmName = bniVarQual "integerLcm" integerLcmIdKey integerAndName = bniVarQual "integerAnd" integerAndIdKey integerOrName = bniVarQual "integerOr" integerOrIdKey integerXorName = bniVarQual "integerXor" integerXorIdKey integerComplementName = bniVarQual "integerComplement" integerComplementIdKey integerBitName = bniVarQual "integerBit#" integerBitIdKey integerTestBitName = bniVarQual "integerTestBit#" integerTestBitIdKey integerShiftLName = bniVarQual "integerShiftL#" integerShiftLIdKey integerShiftRName = bniVarQual "integerShiftR#" integerShiftRIdKey --------------------------------- -- End of ghc-bignum --------------------------------- -- GHC.Internal.Real types and classes rationalTyConName, ratioTyConName, ratioDataConName, realClassName, integralClassName, realFracClassName, fractionalClassName, fromRationalName, toIntegerName, toRationalName, fromIntegralName, realToFracName, mkRationalBase2Name, mkRationalBase10Name :: Name rationalTyConName = tcQual gHC_INTERNAL_REAL (fsLit "Rational") rationalTyConKey ratioTyConName = tcQual gHC_INTERNAL_REAL (fsLit "Ratio") ratioTyConKey ratioDataConName = dcQual gHC_INTERNAL_REAL (fsLit ":%") ratioDataConKey realClassName = clsQual gHC_INTERNAL_REAL (fsLit "Real") realClassKey integralClassName = clsQual gHC_INTERNAL_REAL (fsLit "Integral") integralClassKey realFracClassName = clsQual gHC_INTERNAL_REAL (fsLit "RealFrac") realFracClassKey fractionalClassName = clsQual gHC_INTERNAL_REAL (fsLit "Fractional") fractionalClassKey fromRationalName = varQual gHC_INTERNAL_REAL (fsLit "fromRational") fromRationalClassOpKey toIntegerName = varQual gHC_INTERNAL_REAL (fsLit "toInteger") toIntegerClassOpKey toRationalName = varQual gHC_INTERNAL_REAL (fsLit "toRational") toRationalClassOpKey fromIntegralName = varQual gHC_INTERNAL_REAL (fsLit "fromIntegral")fromIntegralIdKey realToFracName = varQual gHC_INTERNAL_REAL (fsLit "realToFrac") realToFracIdKey mkRationalBase2Name = varQual gHC_INTERNAL_REAL (fsLit "mkRationalBase2") mkRationalBase2IdKey mkRationalBase10Name = varQual gHC_INTERNAL_REAL (fsLit "mkRationalBase10") mkRationalBase10IdKey -- GHC.Internal.Float classes floatingClassName, realFloatClassName :: Name floatingClassName = clsQual gHC_INTERNAL_FLOAT (fsLit "Floating") floatingClassKey realFloatClassName = clsQual gHC_INTERNAL_FLOAT (fsLit "RealFloat") realFloatClassKey -- other GHC.Internal.Float functions integerToFloatName, integerToDoubleName, naturalToFloatName, naturalToDoubleName, rationalToFloatName, rationalToDoubleName :: Name integerToFloatName = varQual gHC_INTERNAL_FLOAT (fsLit "integerToFloat#") integerToFloatIdKey integerToDoubleName = varQual gHC_INTERNAL_FLOAT (fsLit "integerToDouble#") integerToDoubleIdKey naturalToFloatName = varQual gHC_INTERNAL_FLOAT (fsLit "naturalToFloat#") naturalToFloatIdKey naturalToDoubleName = varQual gHC_INTERNAL_FLOAT (fsLit "naturalToDouble#") naturalToDoubleIdKey rationalToFloatName = varQual gHC_INTERNAL_FLOAT (fsLit "rationalToFloat") rationalToFloatIdKey rationalToDoubleName = varQual gHC_INTERNAL_FLOAT (fsLit "rationalToDouble") rationalToDoubleIdKey -- Class Ix ixClassName :: Name ixClassName = clsQual gHC_INTERNAL_IX (fsLit "Ix") ixClassKey -- Typeable representation types trModuleTyConName , trModuleDataConName , trNameTyConName , trNameSDataConName , trNameDDataConName , trTyConTyConName , trTyConDataConName :: Name trModuleTyConName = tcQual gHC_TYPES (fsLit "Module") trModuleTyConKey trModuleDataConName = dcQual gHC_TYPES (fsLit "Module") trModuleDataConKey trNameTyConName = tcQual gHC_TYPES (fsLit "TrName") trNameTyConKey trNameSDataConName = dcQual gHC_TYPES (fsLit "TrNameS") trNameSDataConKey trNameDDataConName = dcQual gHC_TYPES (fsLit "TrNameD") trNameDDataConKey trTyConTyConName = tcQual gHC_TYPES (fsLit "TyCon") trTyConTyConKey trTyConDataConName = dcQual gHC_TYPES (fsLit "TyCon") trTyConDataConKey kindRepTyConName , kindRepTyConAppDataConName , kindRepVarDataConName , kindRepAppDataConName , kindRepFunDataConName , kindRepTYPEDataConName , kindRepTypeLitSDataConName , kindRepTypeLitDDataConName :: Name kindRepTyConName = tcQual gHC_TYPES (fsLit "KindRep") kindRepTyConKey kindRepTyConAppDataConName = dcQual gHC_TYPES (fsLit "KindRepTyConApp") kindRepTyConAppDataConKey kindRepVarDataConName = dcQual gHC_TYPES (fsLit "KindRepVar") kindRepVarDataConKey kindRepAppDataConName = dcQual gHC_TYPES (fsLit "KindRepApp") kindRepAppDataConKey kindRepFunDataConName = dcQual gHC_TYPES (fsLit "KindRepFun") kindRepFunDataConKey kindRepTYPEDataConName = dcQual gHC_TYPES (fsLit "KindRepTYPE") kindRepTYPEDataConKey kindRepTypeLitSDataConName = dcQual gHC_TYPES (fsLit "KindRepTypeLitS") kindRepTypeLitSDataConKey kindRepTypeLitDDataConName = dcQual gHC_TYPES (fsLit "KindRepTypeLitD") kindRepTypeLitDDataConKey typeLitSortTyConName , typeLitSymbolDataConName , typeLitNatDataConName , typeLitCharDataConName :: Name typeLitSortTyConName = tcQual gHC_TYPES (fsLit "TypeLitSort") typeLitSortTyConKey typeLitSymbolDataConName = dcQual gHC_TYPES (fsLit "TypeLitSymbol") typeLitSymbolDataConKey typeLitNatDataConName = dcQual gHC_TYPES (fsLit "TypeLitNat") typeLitNatDataConKey typeLitCharDataConName = dcQual gHC_TYPES (fsLit "TypeLitChar") typeLitCharDataConKey -- Class Typeable, and functions for constructing `Typeable` dictionaries typeableClassName , typeRepTyConName , someTypeRepTyConName , someTypeRepDataConName , mkTrTypeName , mkTrConName , mkTrAppName , mkTrFunName , typeRepIdName , typeNatTypeRepName , typeSymbolTypeRepName , typeCharTypeRepName , trGhcPrimModuleName :: Name typeableClassName = clsQual gHC_INTERNAL_TYPEABLE_INTERNAL (fsLit "Typeable") typeableClassKey typeRepTyConName = tcQual gHC_INTERNAL_TYPEABLE_INTERNAL (fsLit "TypeRep") typeRepTyConKey someTypeRepTyConName = tcQual gHC_INTERNAL_TYPEABLE_INTERNAL (fsLit "SomeTypeRep") someTypeRepTyConKey someTypeRepDataConName = dcQual gHC_INTERNAL_TYPEABLE_INTERNAL (fsLit "SomeTypeRep") someTypeRepDataConKey typeRepIdName = varQual gHC_INTERNAL_TYPEABLE_INTERNAL (fsLit "typeRep#") typeRepIdKey mkTrTypeName = varQual gHC_INTERNAL_TYPEABLE_INTERNAL (fsLit "mkTrType") mkTrTypeKey mkTrConName = varQual gHC_INTERNAL_TYPEABLE_INTERNAL (fsLit "mkTrCon") mkTrConKey mkTrAppName = varQual gHC_INTERNAL_TYPEABLE_INTERNAL (fsLit "mkTrApp") mkTrAppKey mkTrFunName = varQual gHC_INTERNAL_TYPEABLE_INTERNAL (fsLit "mkTrFun") mkTrFunKey typeNatTypeRepName = varQual gHC_INTERNAL_TYPEABLE_INTERNAL (fsLit "typeNatTypeRep") typeNatTypeRepKey typeSymbolTypeRepName = varQual gHC_INTERNAL_TYPEABLE_INTERNAL (fsLit "typeSymbolTypeRep") typeSymbolTypeRepKey typeCharTypeRepName = varQual gHC_INTERNAL_TYPEABLE_INTERNAL (fsLit "typeCharTypeRep") typeCharTypeRepKey -- this is the Typeable 'Module' for GHC.Prim (which has no code, so we place in GHC.Types) -- See Note [Grand plan for Typeable] in GHC.Tc.Instance.Typeable. trGhcPrimModuleName = varQual gHC_TYPES (fsLit "tr$ModuleGHCPrim") trGhcPrimModuleKey -- Typeable KindReps for some common cases starKindRepName, starArrStarKindRepName, starArrStarArrStarKindRepName, constraintKindRepName :: Name starKindRepName = varQual gHC_TYPES (fsLit "krep$*") starKindRepKey starArrStarKindRepName = varQual gHC_TYPES (fsLit "krep$*Arr*") starArrStarKindRepKey starArrStarArrStarKindRepName = varQual gHC_TYPES (fsLit "krep$*->*->*") starArrStarArrStarKindRepKey constraintKindRepName = varQual gHC_TYPES (fsLit "krep$Constraint") constraintKindRepKey -- WithDict withDictClassName :: Name withDictClassName = clsQual gHC_MAGIC_DICT (fsLit "WithDict") withDictClassKey nonEmptyTyConName :: Name nonEmptyTyConName = tcQual gHC_INTERNAL_BASE (fsLit "NonEmpty") nonEmptyTyConKey -- DataToTag dataToTagClassName :: Name dataToTagClassName = clsQual gHC_MAGIC (fsLit "DataToTag") dataToTagClassKey -- seq# seqHashName :: Name seqHashName = varQual gHC_INTERNAL_IO (fsLit "seq#") seqHashKey -- Custom type errors errorMessageTypeErrorFamName , typeErrorTextDataConName , typeErrorAppendDataConName , typeErrorVAppendDataConName , typeErrorShowTypeDataConName :: Name errorMessageTypeErrorFamName = tcQual gHC_INTERNAL_TYPEERROR (fsLit "TypeError") errorMessageTypeErrorFamKey typeErrorTextDataConName = dcQual gHC_INTERNAL_TYPEERROR (fsLit "Text") typeErrorTextDataConKey typeErrorAppendDataConName = dcQual gHC_INTERNAL_TYPEERROR (fsLit ":<>:") typeErrorAppendDataConKey typeErrorVAppendDataConName = dcQual gHC_INTERNAL_TYPEERROR (fsLit ":$$:") typeErrorVAppendDataConKey typeErrorShowTypeDataConName = dcQual gHC_INTERNAL_TYPEERROR (fsLit "ShowType") typeErrorShowTypeDataConKey -- "Unsatisfiable" constraint unsatisfiableClassName, unsatisfiableIdName :: Name unsatisfiableClassName = clsQual gHC_INTERNAL_TYPEERROR (fsLit "Unsatisfiable") unsatisfiableClassNameKey unsatisfiableIdName = varQual gHC_INTERNAL_TYPEERROR (fsLit "unsatisfiable") unsatisfiableIdNameKey -- Unsafe coercion proofs unsafeEqualityProofName, unsafeEqualityTyConName, unsafeCoercePrimName, unsafeReflDataConName :: Name unsafeEqualityProofName = varQual gHC_INTERNAL_UNSAFE_COERCE (fsLit "unsafeEqualityProof") unsafeEqualityProofIdKey unsafeEqualityTyConName = tcQual gHC_INTERNAL_UNSAFE_COERCE (fsLit "UnsafeEquality") unsafeEqualityTyConKey unsafeReflDataConName = dcQual gHC_INTERNAL_UNSAFE_COERCE (fsLit "UnsafeRefl") unsafeReflDataConKey unsafeCoercePrimName = varQual gHC_INTERNAL_UNSAFE_COERCE (fsLit "unsafeCoerce#") unsafeCoercePrimIdKey -- Dynamic toDynName :: Name toDynName = varQual gHC_INTERNAL_DYNAMIC (fsLit "toDyn") toDynIdKey -- Class Data dataClassName :: Name dataClassName = clsQual gHC_INTERNAL_DATA_DATA (fsLit "Data") dataClassKey -- Error module assertErrorName :: Name assertErrorName = varQual gHC_INTERNAL_IO_Exception (fsLit "assertError") assertErrorIdKey -- GHC.Internal.Debug.Trace traceName :: Name traceName = varQual gHC_INTERNAL_DEBUG_TRACE (fsLit "trace") traceKey -- Enum module (Enum, Bounded) enumClassName, enumFromName, enumFromToName, enumFromThenName, enumFromThenToName, boundedClassName :: Name enumClassName = clsQual gHC_INTERNAL_ENUM (fsLit "Enum") enumClassKey enumFromName = varQual gHC_INTERNAL_ENUM (fsLit "enumFrom") enumFromClassOpKey enumFromToName = varQual gHC_INTERNAL_ENUM (fsLit "enumFromTo") enumFromToClassOpKey enumFromThenName = varQual gHC_INTERNAL_ENUM (fsLit "enumFromThen") enumFromThenClassOpKey enumFromThenToName = varQual gHC_INTERNAL_ENUM (fsLit "enumFromThenTo") enumFromThenToClassOpKey boundedClassName = clsQual gHC_INTERNAL_ENUM (fsLit "Bounded") boundedClassKey -- List functions concatName, filterName, zipName :: Name concatName = varQual gHC_INTERNAL_LIST (fsLit "concat") concatIdKey filterName = varQual gHC_INTERNAL_LIST (fsLit "filter") filterIdKey zipName = varQual gHC_INTERNAL_LIST (fsLit "zip") zipIdKey -- Overloaded lists isListClassName, fromListName, fromListNName, toListName :: Name isListClassName = clsQual gHC_INTERNAL_IS_LIST (fsLit "IsList") isListClassKey fromListName = varQual gHC_INTERNAL_IS_LIST (fsLit "fromList") fromListClassOpKey fromListNName = varQual gHC_INTERNAL_IS_LIST (fsLit "fromListN") fromListNClassOpKey toListName = varQual gHC_INTERNAL_IS_LIST (fsLit "toList") toListClassOpKey -- HasField class ops getFieldName, setFieldName :: Name getFieldName = varQual gHC_INTERNAL_RECORDS (fsLit "getField") getFieldClassOpKey setFieldName = varQual gHC_INTERNAL_RECORDS (fsLit "setField") setFieldClassOpKey -- Class Show showClassName :: Name showClassName = clsQual gHC_INTERNAL_SHOW (fsLit "Show") showClassKey -- Class Read readClassName :: Name readClassName = clsQual gHC_INTERNAL_READ (fsLit "Read") readClassKey -- Classes Generic and Generic1, Datatype, Constructor and Selector genClassName, gen1ClassName, datatypeClassName, constructorClassName, selectorClassName :: Name genClassName = clsQual gHC_INTERNAL_GENERICS (fsLit "Generic") genClassKey gen1ClassName = clsQual gHC_INTERNAL_GENERICS (fsLit "Generic1") gen1ClassKey datatypeClassName = clsQual gHC_INTERNAL_GENERICS (fsLit "Datatype") datatypeClassKey constructorClassName = clsQual gHC_INTERNAL_GENERICS (fsLit "Constructor") constructorClassKey selectorClassName = clsQual gHC_INTERNAL_GENERICS (fsLit "Selector") selectorClassKey genericClassNames :: [Name] genericClassNames = [genClassName, gen1ClassName] -- GHCi things ghciIoClassName, ghciStepIoMName :: Name ghciIoClassName = clsQual gHC_INTERNAL_GHCI (fsLit "GHCiSandboxIO") ghciIoClassKey ghciStepIoMName = varQual gHC_INTERNAL_GHCI (fsLit "ghciStepIO") ghciStepIoMClassOpKey -- IO things ioTyConName, ioDataConName, thenIOName, bindIOName, returnIOName, failIOName :: Name ioTyConName = tcQual gHC_TYPES (fsLit "IO") ioTyConKey ioDataConName = dcQual gHC_TYPES (fsLit "IO") ioDataConKey thenIOName = varQual gHC_INTERNAL_BASE (fsLit "thenIO") thenIOIdKey bindIOName = varQual gHC_INTERNAL_BASE (fsLit "bindIO") bindIOIdKey returnIOName = varQual gHC_INTERNAL_BASE (fsLit "returnIO") returnIOIdKey failIOName = varQual gHC_INTERNAL_IO (fsLit "failIO") failIOIdKey -- IO things printName :: Name printName = varQual gHC_INTERNAL_SYSTEM_IO (fsLit "print") printIdKey -- Int, Word, and Addr things int8TyConName, int16TyConName, int32TyConName, int64TyConName :: Name int8TyConName = tcQual gHC_INTERNAL_INT (fsLit "Int8") int8TyConKey int16TyConName = tcQual gHC_INTERNAL_INT (fsLit "Int16") int16TyConKey int32TyConName = tcQual gHC_INTERNAL_INT (fsLit "Int32") int32TyConKey int64TyConName = tcQual gHC_INTERNAL_INT (fsLit "Int64") int64TyConKey -- Word module word8TyConName, word16TyConName, word32TyConName, word64TyConName :: Name word8TyConName = tcQual gHC_INTERNAL_WORD (fsLit "Word8") word8TyConKey word16TyConName = tcQual gHC_INTERNAL_WORD (fsLit "Word16") word16TyConKey word32TyConName = tcQual gHC_INTERNAL_WORD (fsLit "Word32") word32TyConKey word64TyConName = tcQual gHC_INTERNAL_WORD (fsLit "Word64") word64TyConKey -- PrelPtr module ptrTyConName, funPtrTyConName :: Name ptrTyConName = tcQual gHC_INTERNAL_PTR (fsLit "Ptr") ptrTyConKey funPtrTyConName = tcQual gHC_INTERNAL_PTR (fsLit "FunPtr") funPtrTyConKey -- Foreign objects and weak pointers stablePtrTyConName, newStablePtrName :: Name stablePtrTyConName = tcQual gHC_INTERNAL_STABLE (fsLit "StablePtr") stablePtrTyConKey newStablePtrName = varQual gHC_INTERNAL_STABLE (fsLit "newStablePtr") newStablePtrIdKey -- Recursive-do notation monadFixClassName, mfixName :: Name monadFixClassName = clsQual gHC_INTERNAL_MONAD_FIX (fsLit "MonadFix") monadFixClassKey mfixName = varQual gHC_INTERNAL_MONAD_FIX (fsLit "mfix") mfixIdKey -- Arrow notation arrAName, composeAName, firstAName, appAName, choiceAName, loopAName :: Name arrAName = varQual gHC_INTERNAL_ARROW (fsLit "arr") arrAIdKey composeAName = varQual gHC_INTERNAL_DESUGAR (fsLit ">>>") composeAIdKey firstAName = varQual gHC_INTERNAL_ARROW (fsLit "first") firstAIdKey appAName = varQual gHC_INTERNAL_ARROW (fsLit "app") appAIdKey choiceAName = varQual gHC_INTERNAL_ARROW (fsLit "|||") choiceAIdKey loopAName = varQual gHC_INTERNAL_ARROW (fsLit "loop") loopAIdKey -- Monad comprehensions guardMName, liftMName, mzipName :: Name guardMName = varQual gHC_INTERNAL_MONAD (fsLit "guard") guardMIdKey liftMName = varQual gHC_INTERNAL_MONAD (fsLit "liftM") liftMIdKey mzipName = varQual gHC_INTERNAL_CONTROL_MONAD_ZIP (fsLit "mzip") mzipIdKey -- Annotation type checking toAnnotationWrapperName :: Name toAnnotationWrapperName = varQual gHC_INTERNAL_DESUGAR (fsLit "toAnnotationWrapper") toAnnotationWrapperIdKey -- Other classes, needed for type defaulting monadPlusClassName, isStringClassName :: Name monadPlusClassName = clsQual gHC_INTERNAL_MONAD (fsLit "MonadPlus") monadPlusClassKey isStringClassName = clsQual gHC_INTERNAL_DATA_STRING (fsLit "IsString") isStringClassKey -- Type-level naturals knownNatClassName :: Name knownNatClassName = clsQual gHC_INTERNAL_TYPENATS (fsLit "KnownNat") knownNatClassNameKey knownSymbolClassName :: Name knownSymbolClassName = clsQual gHC_INTERNAL_TYPELITS (fsLit "KnownSymbol") knownSymbolClassNameKey knownCharClassName :: Name knownCharClassName = clsQual gHC_INTERNAL_TYPELITS (fsLit "KnownChar") knownCharClassNameKey -- Overloaded labels fromLabelClassOpName :: Name fromLabelClassOpName = varQual gHC_INTERNAL_OVER_LABELS (fsLit "fromLabel") fromLabelClassOpKey -- Implicit Parameters ipClassName :: Name ipClassName = clsQual gHC_CLASSES (fsLit "IP") ipClassKey -- Overloaded record fields hasFieldClassName :: Name hasFieldClassName = clsQual gHC_INTERNAL_RECORDS (fsLit "HasField") hasFieldClassNameKey -- ExceptionContext exceptionContextTyConName, emptyExceptionContextName :: Name exceptionContextTyConName = tcQual gHC_INTERNAL_EXCEPTION_CONTEXT (fsLit "ExceptionContext") exceptionContextTyConKey emptyExceptionContextName = varQual gHC_INTERNAL_EXCEPTION_CONTEXT (fsLit "emptyExceptionContext") emptyExceptionContextKey -- Source Locations callStackTyConName, emptyCallStackName, pushCallStackName, srcLocDataConName :: Name callStackTyConName = tcQual gHC_INTERNAL_STACK_TYPES (fsLit "CallStack") callStackTyConKey emptyCallStackName = varQual gHC_INTERNAL_STACK_TYPES (fsLit "emptyCallStack") emptyCallStackKey pushCallStackName = varQual gHC_INTERNAL_STACK_TYPES (fsLit "pushCallStack") pushCallStackKey srcLocDataConName = dcQual gHC_INTERNAL_STACK_TYPES (fsLit "SrcLoc") srcLocDataConKey -- plugins pLUGINS :: Module pLUGINS = mkThisGhcModule (fsLit "GHC.Driver.Plugins") pluginTyConName :: Name pluginTyConName = tcQual pLUGINS (fsLit "Plugin") pluginTyConKey frontendPluginTyConName :: Name frontendPluginTyConName = tcQual pLUGINS (fsLit "FrontendPlugin") frontendPluginTyConKey -- Static pointers makeStaticName :: Name makeStaticName = varQual gHC_INTERNAL_STATICPTR_INTERNAL (fsLit "makeStatic") makeStaticKey staticPtrInfoTyConName :: Name staticPtrInfoTyConName = tcQual gHC_INTERNAL_STATICPTR (fsLit "StaticPtrInfo") staticPtrInfoTyConKey staticPtrInfoDataConName :: Name staticPtrInfoDataConName = dcQual gHC_INTERNAL_STATICPTR (fsLit "StaticPtrInfo") staticPtrInfoDataConKey staticPtrTyConName :: Name staticPtrTyConName = tcQual gHC_INTERNAL_STATICPTR (fsLit "StaticPtr") staticPtrTyConKey staticPtrDataConName :: Name staticPtrDataConName = dcQual gHC_INTERNAL_STATICPTR (fsLit "StaticPtr") staticPtrDataConKey fromStaticPtrName :: Name fromStaticPtrName = varQual gHC_INTERNAL_STATICPTR (fsLit "fromStaticPtr") fromStaticPtrClassOpKey fingerprintDataConName :: Name fingerprintDataConName = dcQual gHC_INTERNAL_FINGERPRINT_TYPE (fsLit "Fingerprint") fingerprintDataConKey constPtrConName :: Name constPtrConName = tcQual gHC_INTERNAL_FOREIGN_C_CONSTPTR (fsLit "ConstPtr") constPtrTyConKey jsvalTyConName :: Name jsvalTyConName = tcQual (mkGhcInternalModule (fsLit "GHC.Internal.Wasm.Prim.Types")) (fsLit "JSVal") jsvalTyConKey {- ************************************************************************ * * \subsection{Local helpers} * * ************************************************************************ All these are original names; hence mkOrig -} {-# INLINE varQual #-} {-# INLINE tcQual #-} {-# INLINE clsQual #-} {-# INLINE dcQual #-} varQual, tcQual, clsQual, dcQual :: Module -> FastString -> Unique -> Name varQual modu str unique = mk_known_key_name varName modu str unique tcQual modu str unique = mk_known_key_name tcName modu str unique clsQual modu str unique = mk_known_key_name clsName modu str unique dcQual modu str unique = mk_known_key_name dataName modu str unique mk_known_key_name :: NameSpace -> Module -> FastString -> Unique -> Name {-# INLINE mk_known_key_name #-} mk_known_key_name space modu str unique = mkExternalName unique modu (mkOccNameFS space str) noSrcSpan {- ************************************************************************ * * \subsubsection[Uniques-prelude-Classes]{@Uniques@ for wired-in @Classes@} * * ************************************************************************ --MetaHaskell extension hand allocate keys here -} boundedClassKey, enumClassKey, eqClassKey, floatingClassKey, fractionalClassKey, integralClassKey, monadClassKey, dataClassKey, functorClassKey, numClassKey, ordClassKey, readClassKey, realClassKey, realFloatClassKey, realFracClassKey, showClassKey, ixClassKey :: Unique boundedClassKey = mkPreludeClassUnique 1 enumClassKey = mkPreludeClassUnique 2 eqClassKey = mkPreludeClassUnique 3 floatingClassKey = mkPreludeClassUnique 5 fractionalClassKey = mkPreludeClassUnique 6 integralClassKey = mkPreludeClassUnique 7 monadClassKey = mkPreludeClassUnique 8 dataClassKey = mkPreludeClassUnique 9 functorClassKey = mkPreludeClassUnique 10 numClassKey = mkPreludeClassUnique 11 ordClassKey = mkPreludeClassUnique 12 readClassKey = mkPreludeClassUnique 13 realClassKey = mkPreludeClassUnique 14 realFloatClassKey = mkPreludeClassUnique 15 realFracClassKey = mkPreludeClassUnique 16 showClassKey = mkPreludeClassUnique 17 ixClassKey = mkPreludeClassUnique 18 typeableClassKey :: Unique typeableClassKey = mkPreludeClassUnique 20 withDictClassKey :: Unique withDictClassKey = mkPreludeClassUnique 21 dataToTagClassKey :: Unique dataToTagClassKey = mkPreludeClassUnique 23 monadFixClassKey :: Unique monadFixClassKey = mkPreludeClassUnique 28 monadFailClassKey :: Unique monadFailClassKey = mkPreludeClassUnique 29 monadPlusClassKey, randomClassKey, randomGenClassKey :: Unique monadPlusClassKey = mkPreludeClassUnique 30 randomClassKey = mkPreludeClassUnique 31 randomGenClassKey = mkPreludeClassUnique 32 isStringClassKey :: Unique isStringClassKey = mkPreludeClassUnique 33 applicativeClassKey, foldableClassKey, traversableClassKey :: Unique applicativeClassKey = mkPreludeClassUnique 34 foldableClassKey = mkPreludeClassUnique 35 traversableClassKey = mkPreludeClassUnique 36 genClassKey, gen1ClassKey, datatypeClassKey, constructorClassKey, selectorClassKey :: Unique genClassKey = mkPreludeClassUnique 37 gen1ClassKey = mkPreludeClassUnique 38 datatypeClassKey = mkPreludeClassUnique 39 constructorClassKey = mkPreludeClassUnique 40 selectorClassKey = mkPreludeClassUnique 41 -- KnownNat: see Note [KnownNat & KnownSymbol and EvLit] in GHC.Tc.Instance.Class knownNatClassNameKey :: Unique knownNatClassNameKey = mkPreludeClassUnique 42 -- KnownSymbol: see Note [KnownNat & KnownSymbol and EvLit] in GHC.Tc.Instance.Class knownSymbolClassNameKey :: Unique knownSymbolClassNameKey = mkPreludeClassUnique 43 knownCharClassNameKey :: Unique knownCharClassNameKey = mkPreludeClassUnique 44 ghciIoClassKey :: Unique ghciIoClassKey = mkPreludeClassUnique 45 semigroupClassKey, monoidClassKey :: Unique semigroupClassKey = mkPreludeClassUnique 47 monoidClassKey = mkPreludeClassUnique 48 -- Implicit Parameters ipClassKey :: Unique ipClassKey = mkPreludeClassUnique 49 -- Overloaded record fields hasFieldClassNameKey :: Unique hasFieldClassNameKey = mkPreludeClassUnique 50 ---------------- Template Haskell ------------------- -- GHC.Builtin.Names.TH: USES ClassUniques 200-299 ----------------------------------------------------- {- ************************************************************************ * * \subsubsection[Uniques-prelude-TyCons]{@Uniques@ for wired-in @TyCons@} * * ************************************************************************ -} addrPrimTyConKey, arrayPrimTyConKey, boolTyConKey, byteArrayPrimTyConKey, charPrimTyConKey, charTyConKey, doublePrimTyConKey, doubleTyConKey, floatPrimTyConKey, floatTyConKey, fUNTyConKey, intPrimTyConKey, intTyConKey, int8TyConKey, int16TyConKey, int8PrimTyConKey, int16PrimTyConKey, int32PrimTyConKey, int32TyConKey, int64PrimTyConKey, int64TyConKey, integerTyConKey, naturalTyConKey, listTyConKey, foreignObjPrimTyConKey, maybeTyConKey, weakPrimTyConKey, mutableArrayPrimTyConKey, mutableByteArrayPrimTyConKey, orderingTyConKey, mVarPrimTyConKey, ratioTyConKey, rationalTyConKey, realWorldTyConKey, stablePtrPrimTyConKey, stablePtrTyConKey, eqTyConKey, heqTyConKey, ioPortPrimTyConKey, smallArrayPrimTyConKey, smallMutableArrayPrimTyConKey, stringTyConKey, ccArrowTyConKey, ctArrowTyConKey, tcArrowTyConKey :: Unique addrPrimTyConKey = mkPreludeTyConUnique 1 arrayPrimTyConKey = mkPreludeTyConUnique 3 boolTyConKey = mkPreludeTyConUnique 4 byteArrayPrimTyConKey = mkPreludeTyConUnique 5 stringTyConKey = mkPreludeTyConUnique 6 charPrimTyConKey = mkPreludeTyConUnique 7 charTyConKey = mkPreludeTyConUnique 8 doublePrimTyConKey = mkPreludeTyConUnique 9 doubleTyConKey = mkPreludeTyConUnique 10 floatPrimTyConKey = mkPreludeTyConUnique 11 floatTyConKey = mkPreludeTyConUnique 12 fUNTyConKey = mkPreludeTyConUnique 13 intPrimTyConKey = mkPreludeTyConUnique 14 intTyConKey = mkPreludeTyConUnique 15 int8PrimTyConKey = mkPreludeTyConUnique 16 int8TyConKey = mkPreludeTyConUnique 17 int16PrimTyConKey = mkPreludeTyConUnique 18 int16TyConKey = mkPreludeTyConUnique 19 int32PrimTyConKey = mkPreludeTyConUnique 20 int32TyConKey = mkPreludeTyConUnique 21 int64PrimTyConKey = mkPreludeTyConUnique 22 int64TyConKey = mkPreludeTyConUnique 23 integerTyConKey = mkPreludeTyConUnique 24 naturalTyConKey = mkPreludeTyConUnique 25 listTyConKey = mkPreludeTyConUnique 26 foreignObjPrimTyConKey = mkPreludeTyConUnique 27 maybeTyConKey = mkPreludeTyConUnique 28 weakPrimTyConKey = mkPreludeTyConUnique 29 mutableArrayPrimTyConKey = mkPreludeTyConUnique 30 mutableByteArrayPrimTyConKey = mkPreludeTyConUnique 31 orderingTyConKey = mkPreludeTyConUnique 32 mVarPrimTyConKey = mkPreludeTyConUnique 33 ioPortPrimTyConKey = mkPreludeTyConUnique 34 ratioTyConKey = mkPreludeTyConUnique 35 rationalTyConKey = mkPreludeTyConUnique 36 realWorldTyConKey = mkPreludeTyConUnique 37 stablePtrPrimTyConKey = mkPreludeTyConUnique 38 stablePtrTyConKey = mkPreludeTyConUnique 39 eqTyConKey = mkPreludeTyConUnique 40 heqTyConKey = mkPreludeTyConUnique 41 ctArrowTyConKey = mkPreludeTyConUnique 42 ccArrowTyConKey = mkPreludeTyConUnique 43 tcArrowTyConKey = mkPreludeTyConUnique 44 statePrimTyConKey, stableNamePrimTyConKey, stableNameTyConKey, mutVarPrimTyConKey, ioTyConKey, wordPrimTyConKey, wordTyConKey, word8PrimTyConKey, word8TyConKey, word16PrimTyConKey, word16TyConKey, word32PrimTyConKey, word32TyConKey, word64PrimTyConKey, word64TyConKey, kindConKey, boxityConKey, typeConKey, threadIdPrimTyConKey, bcoPrimTyConKey, ptrTyConKey, funPtrTyConKey, tVarPrimTyConKey, eqPrimTyConKey, eqReprPrimTyConKey, eqPhantPrimTyConKey, compactPrimTyConKey, stackSnapshotPrimTyConKey, promptTagPrimTyConKey, constPtrTyConKey, jsvalTyConKey :: Unique statePrimTyConKey = mkPreludeTyConUnique 50 stableNamePrimTyConKey = mkPreludeTyConUnique 51 stableNameTyConKey = mkPreludeTyConUnique 52 eqPrimTyConKey = mkPreludeTyConUnique 53 eqReprPrimTyConKey = mkPreludeTyConUnique 54 eqPhantPrimTyConKey = mkPreludeTyConUnique 55 mutVarPrimTyConKey = mkPreludeTyConUnique 56 ioTyConKey = mkPreludeTyConUnique 57 wordPrimTyConKey = mkPreludeTyConUnique 59 wordTyConKey = mkPreludeTyConUnique 60 word8PrimTyConKey = mkPreludeTyConUnique 61 word8TyConKey = mkPreludeTyConUnique 62 word16PrimTyConKey = mkPreludeTyConUnique 63 word16TyConKey = mkPreludeTyConUnique 64 word32PrimTyConKey = mkPreludeTyConUnique 65 word32TyConKey = mkPreludeTyConUnique 66 word64PrimTyConKey = mkPreludeTyConUnique 67 word64TyConKey = mkPreludeTyConUnique 68 kindConKey = mkPreludeTyConUnique 72 boxityConKey = mkPreludeTyConUnique 73 typeConKey = mkPreludeTyConUnique 74 threadIdPrimTyConKey = mkPreludeTyConUnique 75 bcoPrimTyConKey = mkPreludeTyConUnique 76 ptrTyConKey = mkPreludeTyConUnique 77 funPtrTyConKey = mkPreludeTyConUnique 78 tVarPrimTyConKey = mkPreludeTyConUnique 79 compactPrimTyConKey = mkPreludeTyConUnique 80 stackSnapshotPrimTyConKey = mkPreludeTyConUnique 81 promptTagPrimTyConKey = mkPreludeTyConUnique 82 eitherTyConKey :: Unique eitherTyConKey = mkPreludeTyConUnique 84 voidTyConKey :: Unique voidTyConKey = mkPreludeTyConUnique 85 nonEmptyTyConKey :: Unique nonEmptyTyConKey = mkPreludeTyConUnique 86 dictTyConKey :: Unique dictTyConKey = mkPreludeTyConUnique 87 -- Kind constructors liftedTypeKindTyConKey, unliftedTypeKindTyConKey, tYPETyConKey, cONSTRAINTTyConKey, liftedRepTyConKey, unliftedRepTyConKey, constraintKindTyConKey, levityTyConKey, runtimeRepTyConKey, vecCountTyConKey, vecElemTyConKey, zeroBitRepTyConKey, zeroBitTypeTyConKey :: Unique liftedTypeKindTyConKey = mkPreludeTyConUnique 88 unliftedTypeKindTyConKey = mkPreludeTyConUnique 89 tYPETyConKey = mkPreludeTyConUnique 91 cONSTRAINTTyConKey = mkPreludeTyConUnique 92 constraintKindTyConKey = mkPreludeTyConUnique 93 levityTyConKey = mkPreludeTyConUnique 94 runtimeRepTyConKey = mkPreludeTyConUnique 95 vecCountTyConKey = mkPreludeTyConUnique 96 vecElemTyConKey = mkPreludeTyConUnique 97 liftedRepTyConKey = mkPreludeTyConUnique 98 unliftedRepTyConKey = mkPreludeTyConUnique 99 zeroBitRepTyConKey = mkPreludeTyConUnique 100 zeroBitTypeTyConKey = mkPreludeTyConUnique 101 pluginTyConKey, frontendPluginTyConKey :: Unique pluginTyConKey = mkPreludeTyConUnique 102 frontendPluginTyConKey = mkPreludeTyConUnique 103 trTyConTyConKey, trModuleTyConKey, trNameTyConKey, kindRepTyConKey, typeLitSortTyConKey :: Unique trTyConTyConKey = mkPreludeTyConUnique 104 trModuleTyConKey = mkPreludeTyConUnique 105 trNameTyConKey = mkPreludeTyConUnique 106 kindRepTyConKey = mkPreludeTyConUnique 107 typeLitSortTyConKey = mkPreludeTyConUnique 108 -- Generics (Unique keys) v1TyConKey, u1TyConKey, par1TyConKey, rec1TyConKey, k1TyConKey, m1TyConKey, sumTyConKey, prodTyConKey, compTyConKey, rTyConKey, dTyConKey, cTyConKey, sTyConKey, rec0TyConKey, d1TyConKey, c1TyConKey, s1TyConKey, repTyConKey, rep1TyConKey, uRecTyConKey, uAddrTyConKey, uCharTyConKey, uDoubleTyConKey, uFloatTyConKey, uIntTyConKey, uWordTyConKey :: Unique v1TyConKey = mkPreludeTyConUnique 135 u1TyConKey = mkPreludeTyConUnique 136 par1TyConKey = mkPreludeTyConUnique 137 rec1TyConKey = mkPreludeTyConUnique 138 k1TyConKey = mkPreludeTyConUnique 139 m1TyConKey = mkPreludeTyConUnique 140 sumTyConKey = mkPreludeTyConUnique 141 prodTyConKey = mkPreludeTyConUnique 142 compTyConKey = mkPreludeTyConUnique 143 rTyConKey = mkPreludeTyConUnique 144 dTyConKey = mkPreludeTyConUnique 146 cTyConKey = mkPreludeTyConUnique 147 sTyConKey = mkPreludeTyConUnique 148 rec0TyConKey = mkPreludeTyConUnique 149 d1TyConKey = mkPreludeTyConUnique 151 c1TyConKey = mkPreludeTyConUnique 152 s1TyConKey = mkPreludeTyConUnique 153 repTyConKey = mkPreludeTyConUnique 155 rep1TyConKey = mkPreludeTyConUnique 156 uRecTyConKey = mkPreludeTyConUnique 157 uAddrTyConKey = mkPreludeTyConUnique 158 uCharTyConKey = mkPreludeTyConUnique 159 uDoubleTyConKey = mkPreludeTyConUnique 160 uFloatTyConKey = mkPreludeTyConUnique 161 uIntTyConKey = mkPreludeTyConUnique 162 uWordTyConKey = mkPreludeTyConUnique 163 -- "Unsatisfiable" constraint unsatisfiableClassNameKey :: Unique unsatisfiableClassNameKey = mkPreludeTyConUnique 170 anyTyConKey :: Unique anyTyConKey = mkPreludeTyConUnique 171 zonkAnyTyConKey :: Unique zonkAnyTyConKey = mkPreludeTyConUnique 172 -- Custom user type-errors errorMessageTypeErrorFamKey :: Unique errorMessageTypeErrorFamKey = mkPreludeTyConUnique 181 coercibleTyConKey :: Unique coercibleTyConKey = mkPreludeTyConUnique 183 proxyPrimTyConKey :: Unique proxyPrimTyConKey = mkPreludeTyConUnique 184 specTyConKey :: Unique specTyConKey = mkPreludeTyConUnique 185 smallArrayPrimTyConKey = mkPreludeTyConUnique 187 smallMutableArrayPrimTyConKey = mkPreludeTyConUnique 188 staticPtrTyConKey :: Unique staticPtrTyConKey = mkPreludeTyConUnique 189 staticPtrInfoTyConKey :: Unique staticPtrInfoTyConKey = mkPreludeTyConUnique 190 callStackTyConKey :: Unique callStackTyConKey = mkPreludeTyConUnique 191 -- Typeables typeRepTyConKey, someTypeRepTyConKey, someTypeRepDataConKey :: Unique typeRepTyConKey = mkPreludeTyConUnique 192 someTypeRepTyConKey = mkPreludeTyConUnique 193 someTypeRepDataConKey = mkPreludeTyConUnique 194 typeSymbolAppendFamNameKey :: Unique typeSymbolAppendFamNameKey = mkPreludeTyConUnique 195 -- Unsafe equality unsafeEqualityTyConKey :: Unique unsafeEqualityTyConKey = mkPreludeTyConUnique 196 -- Linear types multiplicityTyConKey :: Unique multiplicityTyConKey = mkPreludeTyConUnique 197 unrestrictedFunTyConKey :: Unique unrestrictedFunTyConKey = mkPreludeTyConUnique 198 multMulTyConKey :: Unique multMulTyConKey = mkPreludeTyConUnique 199 ---------------- Template Haskell ------------------- -- GHC.Builtin.Names.TH: USES TyConUniques 200-299 ----------------------------------------------------- ----------------------- SIMD ------------------------ -- USES TyConUniques 300-399 ----------------------------------------------------- #include "primop-vector-uniques.hs-incl" ------------- Type-level Symbol, Nat, Char ---------- -- USES TyConUniques 400-499 ----------------------------------------------------- typeSymbolKindConNameKey, typeCharKindConNameKey, typeNatAddTyFamNameKey, typeNatMulTyFamNameKey, typeNatExpTyFamNameKey, typeNatSubTyFamNameKey , typeSymbolCmpTyFamNameKey, typeNatCmpTyFamNameKey, typeCharCmpTyFamNameKey , typeLeqCharTyFamNameKey , typeNatDivTyFamNameKey , typeNatModTyFamNameKey , typeNatLogTyFamNameKey , typeConsSymbolTyFamNameKey, typeUnconsSymbolTyFamNameKey , typeCharToNatTyFamNameKey, typeNatToCharTyFamNameKey :: Unique typeSymbolKindConNameKey = mkPreludeTyConUnique 400 typeCharKindConNameKey = mkPreludeTyConUnique 401 typeNatAddTyFamNameKey = mkPreludeTyConUnique 402 typeNatMulTyFamNameKey = mkPreludeTyConUnique 403 typeNatExpTyFamNameKey = mkPreludeTyConUnique 404 typeNatSubTyFamNameKey = mkPreludeTyConUnique 405 typeSymbolCmpTyFamNameKey = mkPreludeTyConUnique 406 typeNatCmpTyFamNameKey = mkPreludeTyConUnique 407 typeCharCmpTyFamNameKey = mkPreludeTyConUnique 408 typeLeqCharTyFamNameKey = mkPreludeTyConUnique 409 typeNatDivTyFamNameKey = mkPreludeTyConUnique 410 typeNatModTyFamNameKey = mkPreludeTyConUnique 411 typeNatLogTyFamNameKey = mkPreludeTyConUnique 412 typeConsSymbolTyFamNameKey = mkPreludeTyConUnique 413 typeUnconsSymbolTyFamNameKey = mkPreludeTyConUnique 414 typeCharToNatTyFamNameKey = mkPreludeTyConUnique 415 typeNatToCharTyFamNameKey = mkPreludeTyConUnique 416 constPtrTyConKey = mkPreludeTyConUnique 417 jsvalTyConKey = mkPreludeTyConUnique 418 exceptionContextTyConKey :: Unique exceptionContextTyConKey = mkPreludeTyConUnique 420 {- ************************************************************************ * * \subsubsection[Uniques-prelude-DataCons]{@Uniques@ for wired-in @DataCons@} * * ************************************************************************ -} charDataConKey, consDataConKey, doubleDataConKey, falseDataConKey, floatDataConKey, intDataConKey, nilDataConKey, ratioDataConKey, stableNameDataConKey, trueDataConKey, wordDataConKey, word8DataConKey, ioDataConKey, heqDataConKey, eqDataConKey, nothingDataConKey, justDataConKey :: Unique charDataConKey = mkPreludeDataConUnique 1 consDataConKey = mkPreludeDataConUnique 2 doubleDataConKey = mkPreludeDataConUnique 3 falseDataConKey = mkPreludeDataConUnique 4 floatDataConKey = mkPreludeDataConUnique 5 intDataConKey = mkPreludeDataConUnique 6 nothingDataConKey = mkPreludeDataConUnique 7 justDataConKey = mkPreludeDataConUnique 8 eqDataConKey = mkPreludeDataConUnique 9 nilDataConKey = mkPreludeDataConUnique 10 ratioDataConKey = mkPreludeDataConUnique 11 word8DataConKey = mkPreludeDataConUnique 12 stableNameDataConKey = mkPreludeDataConUnique 13 trueDataConKey = mkPreludeDataConUnique 14 wordDataConKey = mkPreludeDataConUnique 15 ioDataConKey = mkPreludeDataConUnique 16 heqDataConKey = mkPreludeDataConUnique 18 -- Generic data constructors crossDataConKey, inlDataConKey, inrDataConKey, genUnitDataConKey :: Unique crossDataConKey = mkPreludeDataConUnique 20 inlDataConKey = mkPreludeDataConUnique 21 inrDataConKey = mkPreludeDataConUnique 22 genUnitDataConKey = mkPreludeDataConUnique 23 leftDataConKey, rightDataConKey :: Unique leftDataConKey = mkPreludeDataConUnique 25 rightDataConKey = mkPreludeDataConUnique 26 ordLTDataConKey, ordEQDataConKey, ordGTDataConKey :: Unique ordLTDataConKey = mkPreludeDataConUnique 27 ordEQDataConKey = mkPreludeDataConUnique 28 ordGTDataConKey = mkPreludeDataConUnique 29 mkDictDataConKey :: Unique mkDictDataConKey = mkPreludeDataConUnique 30 coercibleDataConKey :: Unique coercibleDataConKey = mkPreludeDataConUnique 32 staticPtrDataConKey :: Unique staticPtrDataConKey = mkPreludeDataConUnique 33 staticPtrInfoDataConKey :: Unique staticPtrInfoDataConKey = mkPreludeDataConUnique 34 fingerprintDataConKey :: Unique fingerprintDataConKey = mkPreludeDataConUnique 35 srcLocDataConKey :: Unique srcLocDataConKey = mkPreludeDataConUnique 37 trTyConDataConKey, trModuleDataConKey, trNameSDataConKey, trNameDDataConKey, trGhcPrimModuleKey :: Unique trTyConDataConKey = mkPreludeDataConUnique 41 trModuleDataConKey = mkPreludeDataConUnique 43 trNameSDataConKey = mkPreludeDataConUnique 45 trNameDDataConKey = mkPreludeDataConUnique 46 trGhcPrimModuleKey = mkPreludeDataConUnique 47 typeErrorTextDataConKey, typeErrorAppendDataConKey, typeErrorVAppendDataConKey, typeErrorShowTypeDataConKey :: Unique typeErrorTextDataConKey = mkPreludeDataConUnique 50 typeErrorAppendDataConKey = mkPreludeDataConUnique 51 typeErrorVAppendDataConKey = mkPreludeDataConUnique 52 typeErrorShowTypeDataConKey = mkPreludeDataConUnique 53 prefixIDataConKey, infixIDataConKey, leftAssociativeDataConKey, rightAssociativeDataConKey, notAssociativeDataConKey, sourceUnpackDataConKey, sourceNoUnpackDataConKey, noSourceUnpackednessDataConKey, sourceLazyDataConKey, sourceStrictDataConKey, noSourceStrictnessDataConKey, decidedLazyDataConKey, decidedStrictDataConKey, decidedUnpackDataConKey, metaDataDataConKey, metaConsDataConKey, metaSelDataConKey :: Unique prefixIDataConKey = mkPreludeDataConUnique 54 infixIDataConKey = mkPreludeDataConUnique 55 leftAssociativeDataConKey = mkPreludeDataConUnique 56 rightAssociativeDataConKey = mkPreludeDataConUnique 57 notAssociativeDataConKey = mkPreludeDataConUnique 58 sourceUnpackDataConKey = mkPreludeDataConUnique 59 sourceNoUnpackDataConKey = mkPreludeDataConUnique 60 noSourceUnpackednessDataConKey = mkPreludeDataConUnique 61 sourceLazyDataConKey = mkPreludeDataConUnique 62 sourceStrictDataConKey = mkPreludeDataConUnique 63 noSourceStrictnessDataConKey = mkPreludeDataConUnique 64 decidedLazyDataConKey = mkPreludeDataConUnique 65 decidedStrictDataConKey = mkPreludeDataConUnique 66 decidedUnpackDataConKey = mkPreludeDataConUnique 67 metaDataDataConKey = mkPreludeDataConUnique 68 metaConsDataConKey = mkPreludeDataConUnique 69 metaSelDataConKey = mkPreludeDataConUnique 70 vecRepDataConKey, sumRepDataConKey, tupleRepDataConKey, boxedRepDataConKey :: Unique vecRepDataConKey = mkPreludeDataConUnique 71 tupleRepDataConKey = mkPreludeDataConUnique 72 sumRepDataConKey = mkPreludeDataConUnique 73 boxedRepDataConKey = mkPreludeDataConUnique 74 boxedRepDataConTyConKey, tupleRepDataConTyConKey :: Unique -- A promoted data constructors (i.e. a TyCon) has -- the same key as the data constructor itself boxedRepDataConTyConKey = boxedRepDataConKey tupleRepDataConTyConKey = tupleRepDataConKey -- See Note [Wiring in RuntimeRep] in GHC.Builtin.Types -- Includes all nullary-data-constructor reps. Does not -- include BoxedRep, VecRep, SumRep, TupleRep. runtimeRepSimpleDataConKeys :: [Unique] runtimeRepSimpleDataConKeys = map mkPreludeDataConUnique [75..87] liftedDataConKey,unliftedDataConKey :: Unique liftedDataConKey = mkPreludeDataConUnique 88 unliftedDataConKey = mkPreludeDataConUnique 89 -- See Note [Wiring in RuntimeRep] in GHC.Builtin.Types -- VecCount vecCountDataConKeys :: [Unique] vecCountDataConKeys = map mkPreludeDataConUnique [90..95] -- See Note [Wiring in RuntimeRep] in GHC.Builtin.Types -- VecElem vecElemDataConKeys :: [Unique] vecElemDataConKeys = map mkPreludeDataConUnique [96..105] -- Typeable things kindRepTyConAppDataConKey, kindRepVarDataConKey, kindRepAppDataConKey, kindRepFunDataConKey, kindRepTYPEDataConKey, kindRepTypeLitSDataConKey, kindRepTypeLitDDataConKey :: Unique kindRepTyConAppDataConKey = mkPreludeDataConUnique 106 kindRepVarDataConKey = mkPreludeDataConUnique 107 kindRepAppDataConKey = mkPreludeDataConUnique 108 kindRepFunDataConKey = mkPreludeDataConUnique 109 kindRepTYPEDataConKey = mkPreludeDataConUnique 110 kindRepTypeLitSDataConKey = mkPreludeDataConUnique 111 kindRepTypeLitDDataConKey = mkPreludeDataConUnique 112 typeLitSymbolDataConKey, typeLitNatDataConKey, typeLitCharDataConKey :: Unique typeLitSymbolDataConKey = mkPreludeDataConUnique 113 typeLitNatDataConKey = mkPreludeDataConUnique 114 typeLitCharDataConKey = mkPreludeDataConUnique 115 -- Unsafe equality unsafeReflDataConKey :: Unique unsafeReflDataConKey = mkPreludeDataConUnique 116 -- Multiplicity oneDataConKey, manyDataConKey :: Unique oneDataConKey = mkPreludeDataConUnique 117 manyDataConKey = mkPreludeDataConUnique 118 -- ghc-bignum integerISDataConKey, integerINDataConKey, integerIPDataConKey, naturalNSDataConKey, naturalNBDataConKey :: Unique integerISDataConKey = mkPreludeDataConUnique 120 integerINDataConKey = mkPreludeDataConUnique 121 integerIPDataConKey = mkPreludeDataConUnique 122 naturalNSDataConKey = mkPreludeDataConUnique 123 naturalNBDataConKey = mkPreludeDataConUnique 124 ---------------- Template Haskell ------------------- -- GHC.Builtin.Names.TH: USES DataUniques 200-250 ----------------------------------------------------- {- ************************************************************************ * * \subsubsection[Uniques-prelude-Ids]{@Uniques@ for wired-in @Ids@ (except @DataCons@)} * * ************************************************************************ -} wildCardKey, absentErrorIdKey, absentConstraintErrorIdKey, augmentIdKey, appendIdKey, buildIdKey, foldrIdKey, recSelErrorIdKey, seqIdKey, eqStringIdKey, noMethodBindingErrorIdKey, nonExhaustiveGuardsErrorIdKey, impossibleErrorIdKey, impossibleConstraintErrorIdKey, patErrorIdKey, voidPrimIdKey, realWorldPrimIdKey, recConErrorIdKey, unpackCStringUtf8IdKey, unpackCStringAppendUtf8IdKey, unpackCStringFoldrUtf8IdKey, unpackCStringIdKey, unpackCStringAppendIdKey, unpackCStringFoldrIdKey, typeErrorIdKey, divIntIdKey, modIntIdKey, absentSumFieldErrorIdKey, cstringLengthIdKey :: Unique wildCardKey = mkPreludeMiscIdUnique 0 -- See Note [WildCard binders] absentErrorIdKey = mkPreludeMiscIdUnique 1 absentConstraintErrorIdKey = mkPreludeMiscIdUnique 2 augmentIdKey = mkPreludeMiscIdUnique 3 appendIdKey = mkPreludeMiscIdUnique 4 buildIdKey = mkPreludeMiscIdUnique 5 foldrIdKey = mkPreludeMiscIdUnique 6 recSelErrorIdKey = mkPreludeMiscIdUnique 7 seqIdKey = mkPreludeMiscIdUnique 8 absentSumFieldErrorIdKey = mkPreludeMiscIdUnique 9 eqStringIdKey = mkPreludeMiscIdUnique 10 noMethodBindingErrorIdKey = mkPreludeMiscIdUnique 11 nonExhaustiveGuardsErrorIdKey = mkPreludeMiscIdUnique 12 impossibleErrorIdKey = mkPreludeMiscIdUnique 13 impossibleConstraintErrorIdKey = mkPreludeMiscIdUnique 14 patErrorIdKey = mkPreludeMiscIdUnique 15 realWorldPrimIdKey = mkPreludeMiscIdUnique 16 recConErrorIdKey = mkPreludeMiscIdUnique 17 unpackCStringUtf8IdKey = mkPreludeMiscIdUnique 18 unpackCStringAppendUtf8IdKey = mkPreludeMiscIdUnique 19 unpackCStringFoldrUtf8IdKey = mkPreludeMiscIdUnique 20 unpackCStringIdKey = mkPreludeMiscIdUnique 21 unpackCStringAppendIdKey = mkPreludeMiscIdUnique 22 unpackCStringFoldrIdKey = mkPreludeMiscIdUnique 23 voidPrimIdKey = mkPreludeMiscIdUnique 24 typeErrorIdKey = mkPreludeMiscIdUnique 25 divIntIdKey = mkPreludeMiscIdUnique 26 modIntIdKey = mkPreludeMiscIdUnique 27 cstringLengthIdKey = mkPreludeMiscIdUnique 28 concatIdKey, filterIdKey, zipIdKey, bindIOIdKey, returnIOIdKey, newStablePtrIdKey, printIdKey, failIOIdKey, nullAddrIdKey, voidArgIdKey, otherwiseIdKey, assertIdKey :: Unique concatIdKey = mkPreludeMiscIdUnique 31 filterIdKey = mkPreludeMiscIdUnique 32 zipIdKey = mkPreludeMiscIdUnique 33 bindIOIdKey = mkPreludeMiscIdUnique 34 returnIOIdKey = mkPreludeMiscIdUnique 35 newStablePtrIdKey = mkPreludeMiscIdUnique 36 printIdKey = mkPreludeMiscIdUnique 37 failIOIdKey = mkPreludeMiscIdUnique 38 nullAddrIdKey = mkPreludeMiscIdUnique 39 voidArgIdKey = mkPreludeMiscIdUnique 40 otherwiseIdKey = mkPreludeMiscIdUnique 43 assertIdKey = mkPreludeMiscIdUnique 44 leftSectionKey, rightSectionKey :: Unique leftSectionKey = mkPreludeMiscIdUnique 45 rightSectionKey = mkPreludeMiscIdUnique 46 rootMainKey, runMainKey :: Unique rootMainKey = mkPreludeMiscIdUnique 101 runMainKey = mkPreludeMiscIdUnique 102 thenIOIdKey, lazyIdKey, assertErrorIdKey, oneShotKey, runRWKey, seqHashKey :: Unique thenIOIdKey = mkPreludeMiscIdUnique 103 lazyIdKey = mkPreludeMiscIdUnique 104 assertErrorIdKey = mkPreludeMiscIdUnique 105 oneShotKey = mkPreludeMiscIdUnique 106 runRWKey = mkPreludeMiscIdUnique 107 traceKey :: Unique traceKey = mkPreludeMiscIdUnique 108 nospecIdKey :: Unique nospecIdKey = mkPreludeMiscIdUnique 109 inlineIdKey, noinlineIdKey, noinlineConstraintIdKey :: Unique inlineIdKey = mkPreludeMiscIdUnique 120 -- see below mapIdKey, dollarIdKey, coercionTokenIdKey, considerAccessibleIdKey :: Unique mapIdKey = mkPreludeMiscIdUnique 121 dollarIdKey = mkPreludeMiscIdUnique 123 coercionTokenIdKey = mkPreludeMiscIdUnique 124 considerAccessibleIdKey = mkPreludeMiscIdUnique 125 noinlineIdKey = mkPreludeMiscIdUnique 126 noinlineConstraintIdKey = mkPreludeMiscIdUnique 127 integerToFloatIdKey, integerToDoubleIdKey, naturalToFloatIdKey, naturalToDoubleIdKey :: Unique integerToFloatIdKey = mkPreludeMiscIdUnique 128 integerToDoubleIdKey = mkPreludeMiscIdUnique 129 naturalToFloatIdKey = mkPreludeMiscIdUnique 130 naturalToDoubleIdKey = mkPreludeMiscIdUnique 131 rationalToFloatIdKey, rationalToDoubleIdKey :: Unique rationalToFloatIdKey = mkPreludeMiscIdUnique 132 rationalToDoubleIdKey = mkPreludeMiscIdUnique 133 seqHashKey = mkPreludeMiscIdUnique 134 coerceKey :: Unique coerceKey = mkPreludeMiscIdUnique 157 {- Certain class operations from Prelude classes. They get their own uniques so we can look them up easily when we want to conjure them up during type checking. -} -- Just a placeholder for unbound variables produced by the renamer: unboundKey :: Unique unboundKey = mkPreludeMiscIdUnique 158 fromIntegerClassOpKey, minusClassOpKey, fromRationalClassOpKey, enumFromClassOpKey, enumFromThenClassOpKey, enumFromToClassOpKey, enumFromThenToClassOpKey, eqClassOpKey, geClassOpKey, negateClassOpKey, bindMClassOpKey, thenMClassOpKey, returnMClassOpKey, fmapClassOpKey :: Unique fromIntegerClassOpKey = mkPreludeMiscIdUnique 160 minusClassOpKey = mkPreludeMiscIdUnique 161 fromRationalClassOpKey = mkPreludeMiscIdUnique 162 enumFromClassOpKey = mkPreludeMiscIdUnique 163 enumFromThenClassOpKey = mkPreludeMiscIdUnique 164 enumFromToClassOpKey = mkPreludeMiscIdUnique 165 enumFromThenToClassOpKey = mkPreludeMiscIdUnique 166 eqClassOpKey = mkPreludeMiscIdUnique 167 geClassOpKey = mkPreludeMiscIdUnique 168 negateClassOpKey = mkPreludeMiscIdUnique 169 bindMClassOpKey = mkPreludeMiscIdUnique 171 -- (>>=) thenMClassOpKey = mkPreludeMiscIdUnique 172 -- (>>) fmapClassOpKey = mkPreludeMiscIdUnique 173 returnMClassOpKey = mkPreludeMiscIdUnique 174 -- Recursive do notation mfixIdKey :: Unique mfixIdKey = mkPreludeMiscIdUnique 175 -- MonadFail operations failMClassOpKey :: Unique failMClassOpKey = mkPreludeMiscIdUnique 176 -- fromLabel fromLabelClassOpKey :: Unique fromLabelClassOpKey = mkPreludeMiscIdUnique 177 -- Arrow notation arrAIdKey, composeAIdKey, firstAIdKey, appAIdKey, choiceAIdKey, loopAIdKey :: Unique arrAIdKey = mkPreludeMiscIdUnique 180 composeAIdKey = mkPreludeMiscIdUnique 181 -- >>> firstAIdKey = mkPreludeMiscIdUnique 182 appAIdKey = mkPreludeMiscIdUnique 183 choiceAIdKey = mkPreludeMiscIdUnique 184 -- ||| loopAIdKey = mkPreludeMiscIdUnique 185 fromStringClassOpKey :: Unique fromStringClassOpKey = mkPreludeMiscIdUnique 186 -- Annotation type checking toAnnotationWrapperIdKey :: Unique toAnnotationWrapperIdKey = mkPreludeMiscIdUnique 187 -- Conversion functions fromIntegralIdKey, realToFracIdKey, toIntegerClassOpKey, toRationalClassOpKey :: Unique fromIntegralIdKey = mkPreludeMiscIdUnique 190 realToFracIdKey = mkPreludeMiscIdUnique 191 toIntegerClassOpKey = mkPreludeMiscIdUnique 192 toRationalClassOpKey = mkPreludeMiscIdUnique 193 -- Monad comprehensions guardMIdKey, liftMIdKey, mzipIdKey :: Unique guardMIdKey = mkPreludeMiscIdUnique 194 liftMIdKey = mkPreludeMiscIdUnique 195 mzipIdKey = mkPreludeMiscIdUnique 196 -- GHCi ghciStepIoMClassOpKey :: Unique ghciStepIoMClassOpKey = mkPreludeMiscIdUnique 197 -- Overloaded lists isListClassKey, fromListClassOpKey, fromListNClassOpKey, toListClassOpKey :: Unique isListClassKey = mkPreludeMiscIdUnique 198 fromListClassOpKey = mkPreludeMiscIdUnique 199 fromListNClassOpKey = mkPreludeMiscIdUnique 500 toListClassOpKey = mkPreludeMiscIdUnique 501 proxyHashKey :: Unique proxyHashKey = mkPreludeMiscIdUnique 502 ---------------- Template Haskell ------------------- -- GHC.Builtin.Names.TH: USES IdUniques 200-499 ----------------------------------------------------- -- Used to make `Typeable` dictionaries mkTyConKey , mkTrTypeKey , mkTrConKey , mkTrAppKey , mkTrFunKey , typeNatTypeRepKey , typeSymbolTypeRepKey , typeCharTypeRepKey , typeRepIdKey :: Unique mkTyConKey = mkPreludeMiscIdUnique 503 mkTrTypeKey = mkPreludeMiscIdUnique 504 mkTrConKey = mkPreludeMiscIdUnique 505 mkTrAppKey = mkPreludeMiscIdUnique 506 typeNatTypeRepKey = mkPreludeMiscIdUnique 507 typeSymbolTypeRepKey = mkPreludeMiscIdUnique 508 typeCharTypeRepKey = mkPreludeMiscIdUnique 509 typeRepIdKey = mkPreludeMiscIdUnique 510 mkTrFunKey = mkPreludeMiscIdUnique 511 -- KindReps for common cases starKindRepKey, starArrStarKindRepKey, starArrStarArrStarKindRepKey, constraintKindRepKey :: Unique starKindRepKey = mkPreludeMiscIdUnique 520 starArrStarKindRepKey = mkPreludeMiscIdUnique 521 starArrStarArrStarKindRepKey = mkPreludeMiscIdUnique 522 constraintKindRepKey = mkPreludeMiscIdUnique 523 -- Dynamic toDynIdKey :: Unique toDynIdKey = mkPreludeMiscIdUnique 530 heqSCSelIdKey, eqSCSelIdKey, coercibleSCSelIdKey :: Unique eqSCSelIdKey = mkPreludeMiscIdUnique 551 heqSCSelIdKey = mkPreludeMiscIdUnique 552 coercibleSCSelIdKey = mkPreludeMiscIdUnique 553 sappendClassOpKey :: Unique sappendClassOpKey = mkPreludeMiscIdUnique 554 memptyClassOpKey, mappendClassOpKey, mconcatClassOpKey :: Unique memptyClassOpKey = mkPreludeMiscIdUnique 555 mappendClassOpKey = mkPreludeMiscIdUnique 556 mconcatClassOpKey = mkPreludeMiscIdUnique 557 emptyCallStackKey, pushCallStackKey :: Unique emptyCallStackKey = mkPreludeMiscIdUnique 558 pushCallStackKey = mkPreludeMiscIdUnique 559 fromStaticPtrClassOpKey :: Unique fromStaticPtrClassOpKey = mkPreludeMiscIdUnique 560 makeStaticKey :: Unique makeStaticKey = mkPreludeMiscIdUnique 561 emptyExceptionContextKey :: Unique emptyExceptionContextKey = mkPreludeMiscIdUnique 562 -- Unsafe coercion proofs unsafeEqualityProofIdKey, unsafeCoercePrimIdKey :: Unique unsafeEqualityProofIdKey = mkPreludeMiscIdUnique 570 unsafeCoercePrimIdKey = mkPreludeMiscIdUnique 571 -- HasField class ops getFieldClassOpKey, setFieldClassOpKey :: Unique getFieldClassOpKey = mkPreludeMiscIdUnique 572 setFieldClassOpKey = mkPreludeMiscIdUnique 573 -- "Unsatisfiable" constraints unsatisfiableIdNameKey :: Unique unsatisfiableIdNameKey = mkPreludeMiscIdUnique 580 ------------------------------------------------------ -- ghc-bignum uses 600-699 uniques ------------------------------------------------------ integerFromNaturalIdKey , integerToNaturalClampIdKey , integerToNaturalThrowIdKey , integerToNaturalIdKey , integerToWordIdKey , integerToIntIdKey , integerToWord64IdKey , integerToInt64IdKey , integerAddIdKey , integerMulIdKey , integerSubIdKey , integerNegateIdKey , integerAbsIdKey , integerPopCountIdKey , integerQuotIdKey , integerRemIdKey , integerDivIdKey , integerModIdKey , integerDivModIdKey , integerQuotRemIdKey , integerEncodeFloatIdKey , integerEncodeDoubleIdKey , integerGcdIdKey , integerLcmIdKey , integerAndIdKey , integerOrIdKey , integerXorIdKey , integerComplementIdKey , integerBitIdKey , integerTestBitIdKey , integerShiftLIdKey , integerShiftRIdKey , integerFromWordIdKey , integerFromWord64IdKey , integerFromInt64IdKey , naturalToWordIdKey , naturalPopCountIdKey , naturalShiftRIdKey , naturalShiftLIdKey , naturalAddIdKey , naturalSubIdKey , naturalSubThrowIdKey , naturalSubUnsafeIdKey , naturalMulIdKey , naturalQuotRemIdKey , naturalQuotIdKey , naturalRemIdKey , naturalAndIdKey , naturalAndNotIdKey , naturalOrIdKey , naturalXorIdKey , naturalTestBitIdKey , naturalBitIdKey , naturalGcdIdKey , naturalLcmIdKey , naturalLog2IdKey , naturalLogBaseWordIdKey , naturalLogBaseIdKey , naturalPowModIdKey , naturalSizeInBaseIdKey , bignatEqIdKey , bignatCompareIdKey , bignatCompareWordIdKey :: Unique integerFromNaturalIdKey = mkPreludeMiscIdUnique 600 integerToNaturalClampIdKey = mkPreludeMiscIdUnique 601 integerToNaturalThrowIdKey = mkPreludeMiscIdUnique 602 integerToNaturalIdKey = mkPreludeMiscIdUnique 603 integerToWordIdKey = mkPreludeMiscIdUnique 604 integerToIntIdKey = mkPreludeMiscIdUnique 605 integerToWord64IdKey = mkPreludeMiscIdUnique 606 integerToInt64IdKey = mkPreludeMiscIdUnique 607 integerAddIdKey = mkPreludeMiscIdUnique 608 integerMulIdKey = mkPreludeMiscIdUnique 609 integerSubIdKey = mkPreludeMiscIdUnique 610 integerNegateIdKey = mkPreludeMiscIdUnique 611 integerAbsIdKey = mkPreludeMiscIdUnique 618 integerPopCountIdKey = mkPreludeMiscIdUnique 621 integerQuotIdKey = mkPreludeMiscIdUnique 622 integerRemIdKey = mkPreludeMiscIdUnique 623 integerDivIdKey = mkPreludeMiscIdUnique 624 integerModIdKey = mkPreludeMiscIdUnique 625 integerDivModIdKey = mkPreludeMiscIdUnique 626 integerQuotRemIdKey = mkPreludeMiscIdUnique 627 integerEncodeFloatIdKey = mkPreludeMiscIdUnique 630 integerEncodeDoubleIdKey = mkPreludeMiscIdUnique 631 integerGcdIdKey = mkPreludeMiscIdUnique 632 integerLcmIdKey = mkPreludeMiscIdUnique 633 integerAndIdKey = mkPreludeMiscIdUnique 634 integerOrIdKey = mkPreludeMiscIdUnique 635 integerXorIdKey = mkPreludeMiscIdUnique 636 integerComplementIdKey = mkPreludeMiscIdUnique 637 integerBitIdKey = mkPreludeMiscIdUnique 638 integerTestBitIdKey = mkPreludeMiscIdUnique 639 integerShiftLIdKey = mkPreludeMiscIdUnique 640 integerShiftRIdKey = mkPreludeMiscIdUnique 641 integerFromWordIdKey = mkPreludeMiscIdUnique 642 integerFromWord64IdKey = mkPreludeMiscIdUnique 643 integerFromInt64IdKey = mkPreludeMiscIdUnique 644 naturalToWordIdKey = mkPreludeMiscIdUnique 650 naturalPopCountIdKey = mkPreludeMiscIdUnique 659 naturalShiftRIdKey = mkPreludeMiscIdUnique 660 naturalShiftLIdKey = mkPreludeMiscIdUnique 661 naturalAddIdKey = mkPreludeMiscIdUnique 662 naturalSubIdKey = mkPreludeMiscIdUnique 663 naturalSubThrowIdKey = mkPreludeMiscIdUnique 664 naturalSubUnsafeIdKey = mkPreludeMiscIdUnique 665 naturalMulIdKey = mkPreludeMiscIdUnique 666 naturalQuotRemIdKey = mkPreludeMiscIdUnique 669 naturalQuotIdKey = mkPreludeMiscIdUnique 670 naturalRemIdKey = mkPreludeMiscIdUnique 671 naturalAndIdKey = mkPreludeMiscIdUnique 672 naturalAndNotIdKey = mkPreludeMiscIdUnique 673 naturalOrIdKey = mkPreludeMiscIdUnique 674 naturalXorIdKey = mkPreludeMiscIdUnique 675 naturalTestBitIdKey = mkPreludeMiscIdUnique 676 naturalBitIdKey = mkPreludeMiscIdUnique 677 naturalGcdIdKey = mkPreludeMiscIdUnique 678 naturalLcmIdKey = mkPreludeMiscIdUnique 679 naturalLog2IdKey = mkPreludeMiscIdUnique 680 naturalLogBaseWordIdKey = mkPreludeMiscIdUnique 681 naturalLogBaseIdKey = mkPreludeMiscIdUnique 682 naturalPowModIdKey = mkPreludeMiscIdUnique 683 naturalSizeInBaseIdKey = mkPreludeMiscIdUnique 684 bignatEqIdKey = mkPreludeMiscIdUnique 691 bignatCompareIdKey = mkPreludeMiscIdUnique 692 bignatCompareWordIdKey = mkPreludeMiscIdUnique 693 ------------------------------------------------------ -- ghci optimization for big rationals 700-749 uniques ------------------------------------------------------ -- Creating rationals at runtime. mkRationalBase2IdKey, mkRationalBase10IdKey :: Unique mkRationalBase2IdKey = mkPreludeMiscIdUnique 700 mkRationalBase10IdKey = mkPreludeMiscIdUnique 701 :: Unique {- ************************************************************************ * * \subsection[Class-std-groups]{Standard groups of Prelude classes} * * ************************************************************************ NOTE: @Eq@ and @Text@ do need to appear in @standardClasses@ even though every numeric class has these two as a superclass, because the list of ambiguous dictionaries hasn't been simplified. -} numericClassKeys :: [Unique] numericClassKeys = [ numClassKey , realClassKey , integralClassKey ] ++ fractionalClassKeys fractionalClassKeys :: [Unique] fractionalClassKeys = [ fractionalClassKey , floatingClassKey , realFracClassKey , realFloatClassKey ] -- The "standard classes" are used in defaulting (Haskell 98 report 4.3.4), -- and are: "classes defined in the Prelude or a standard library" standardClassKeys :: [Unique] standardClassKeys = derivableClassKeys ++ numericClassKeys ++ [randomClassKey, randomGenClassKey, functorClassKey, monadClassKey, monadPlusClassKey, monadFailClassKey, semigroupClassKey, monoidClassKey, isStringClassKey, applicativeClassKey, foldableClassKey, traversableClassKey, alternativeClassKey ] {- @derivableClassKeys@ is also used in checking \tr{deriving} constructs (@GHC.Tc.Deriv@). -} derivableClassKeys :: [Unique] derivableClassKeys = [ eqClassKey, ordClassKey, enumClassKey, ixClassKey, boundedClassKey, showClassKey, readClassKey ] -- These are the "interactive classes" that are consulted when doing -- defaulting. Does not include Num or IsString, which have special -- handling. interactiveClassNames :: [Name] interactiveClassNames = [ showClassName, eqClassName, ordClassName, foldableClassName , traversableClassName ] interactiveClassKeys :: [Unique] interactiveClassKeys = map getUnique interactiveClassNames ghc-lib-parser-9.12.2.20250421/compiler/GHC/Builtin/PrimOps.hs0000644000000000000000000010546207346545000021251 0ustar0000000000000000{- (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 \section[PrimOp]{Primitive operations (machine-level)} -} {-# LANGUAGE CPP #-} {-# LANGUAGE LambdaCase #-} module GHC.Builtin.PrimOps ( PrimOp(..), PrimOpVecCat(..), allThePrimOps, primOpType, primOpSig, primOpResultType, primOpTag, maxPrimOpTag, primOpOcc, primOpWrapperId, pprPrimOp, tagToEnumKey, primOpOutOfLine, primOpCodeSize, primOpOkForSpeculation, primOpOkToDiscard, primOpIsWorkFree, primOpIsCheap, primOpFixity, primOpDocs, primOpDeprecations, primOpIsDiv, primOpIsReallyInline, PrimOpEffect(..), primOpEffect, getPrimOpResultInfo, isComparisonPrimOp, PrimOpResultInfo(..), PrimCall(..) ) where import GHC.Prelude import GHC.Builtin.Types.Prim import GHC.Builtin.Types import GHC.Builtin.Uniques (mkPrimOpIdUnique, mkPrimOpWrapperUnique ) import GHC.Builtin.Names ( gHC_PRIMOPWRAPPERS ) import GHC.Core.TyCon ( isPrimTyCon, isUnboxedTupleTyCon, PrimRep(..) ) import GHC.Core.Type import GHC.Cmm.Type import GHC.Types.Demand import GHC.Types.Id import GHC.Types.Id.Info import GHC.Types.Name import GHC.Types.RepType ( tyConPrimRep ) import GHC.Types.Basic import GHC.Types.Fixity ( Fixity(..), FixityDirection(..) ) import GHC.Types.SrcLoc ( wiredInSrcSpan ) import GHC.Types.ForeignCall ( CLabelString ) import GHC.Types.Unique ( Unique ) import GHC.Unit.Types ( Unit ) import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Data.FastString {- ************************************************************************ * * \subsection[PrimOp-datatype]{Datatype for @PrimOp@ (an enumeration)} * * ************************************************************************ These are in \tr{state-interface.verb} order. -} -- supplies: -- data PrimOp = ... #include "primop-data-decl.hs-incl" -- supplies -- primOpTag :: PrimOp -> Int #include "primop-tag.hs-incl" primOpTag _ = error "primOpTag: unknown primop" instance Eq PrimOp where op1 == op2 = primOpTag op1 == primOpTag op2 instance Ord PrimOp where op1 < op2 = primOpTag op1 < primOpTag op2 op1 <= op2 = primOpTag op1 <= primOpTag op2 op1 >= op2 = primOpTag op1 >= primOpTag op2 op1 > op2 = primOpTag op1 > primOpTag op2 op1 `compare` op2 | op1 < op2 = LT | op1 == op2 = EQ | otherwise = GT instance Outputable PrimOp where ppr op = pprPrimOp op data PrimOpVecCat = IntVec | WordVec | FloatVec -- An @Enum@-derived list would be better; meanwhile... (ToDo) allThePrimOps :: [PrimOp] allThePrimOps = #include "primop-list.hs-incl" tagToEnumKey :: Unique tagToEnumKey = mkPrimOpIdUnique (primOpTag TagToEnumOp) {- ************************************************************************ * * \subsection[PrimOp-info]{The essential info about each @PrimOp@} * * ************************************************************************ -} data PrimOpInfo = Compare OccName -- string :: T -> T -> Int# Type | GenPrimOp OccName -- string :: \/a1..an . T1 -> .. -> Tk -> T [TyVarBinder] [Type] Type mkCompare :: FastString -> Type -> PrimOpInfo mkCompare str ty = Compare (mkVarOccFS str) ty mkGenPrimOp :: FastString -> [TyVarBinder] -> [Type] -> Type -> PrimOpInfo mkGenPrimOp str tvs tys ty = GenPrimOp (mkVarOccFS str) tvs tys ty {- ************************************************************************ * * \subsubsection{Strictness} * * ************************************************************************ Not all primops are strict! -} primOpStrictness :: PrimOp -> Arity -> DmdSig -- See Demand.DmdSig for discussion of what the results -- The arity should be the arity of the primop; that's why -- this function isn't exported. #include "primop-strictness.hs-incl" {- ************************************************************************ * * \subsubsection{Fixity} * * ************************************************************************ -} primOpFixity :: PrimOp -> Maybe Fixity #include "primop-fixity.hs-incl" {- ************************************************************************ * * \subsubsection{Docs} * * ************************************************************************ See Note [GHC.Prim Docs] in GHC.Builtin.Utils -} primOpDocs :: [(FastString, String)] #include "primop-docs.hs-incl" primOpDeprecations :: [(OccName, FastString)] #include "primop-deprecations.hs-incl" {- ************************************************************************ * * \subsubsection[PrimOp-comparison]{PrimOpInfo basic comparison ops} * * ************************************************************************ @primOpInfo@ gives all essential information (from which everything else, notably a type, can be constructed) for each @PrimOp@. -} primOpInfo :: PrimOp -> PrimOpInfo #include "primop-primop-info.hs-incl" primOpInfo _ = error "primOpInfo: unknown primop" {- Here are a load of comments from the old primOp info: A @Word#@ is an unsigned @Int#@. @decodeFloat#@ is given w/ Integer-stuff (it's similar). @decodeDouble#@ is given w/ Integer-stuff (it's similar). Decoding of floating-point numbers is sorta Integer-related. Encoding is done with plain ccalls now (see PrelNumExtra.hs). A @Weak@ Pointer is created by the @mkWeak#@ primitive: mkWeak# :: k -> v -> f -> State# RealWorld -> (# State# RealWorld, Weak# v #) In practice, you'll use the higher-level data Weak v = Weak# v mkWeak :: k -> v -> IO () -> IO (Weak v) The following operation dereferences a weak pointer. The weak pointer may have been finalized, so the operation returns a result code which must be inspected before looking at the dereferenced value. deRefWeak# :: Weak# v -> State# RealWorld -> (# State# RealWorld, v, Int# #) Only look at v if the Int# returned is /= 0 !! The higher-level op is deRefWeak :: Weak v -> IO (Maybe v) Weak pointers can be finalized early by using the finalize# operation: finalizeWeak# :: Weak# v -> State# RealWorld -> (# State# RealWorld, Int#, IO () #) The Int# returned is either 0 if the weak pointer has already been finalized, or it has no finalizer (the third component is then invalid). 1 if the weak pointer is still alive, with the finalizer returned as the third component. A {\em stable name/pointer} is an index into a table of stable name entries. Since the garbage collector is told about stable pointers, it is safe to pass a stable pointer to external systems such as C routines. \begin{verbatim} makeStablePtr# :: a -> State# RealWorld -> (# State# RealWorld, StablePtr# a #) freeStablePtr :: StablePtr# a -> State# RealWorld -> State# RealWorld deRefStablePtr# :: StablePtr# a -> State# RealWorld -> (# State# RealWorld, a #) eqStablePtr# :: StablePtr# a -> StablePtr# a -> Int# \end{verbatim} It may seem a bit surprising that @makeStablePtr#@ is a @IO@ operation since it doesn't (directly) involve IO operations. The reason is that if some optimisation pass decided to duplicate calls to @makeStablePtr#@ and we only pass one of the stable pointers over, a massive space leak can result. Putting it into the IO monad prevents this. (Another reason for putting them in a monad is to ensure correct sequencing wrt the side-effecting @freeStablePtr@ operation.) An important property of stable pointers is that if you call makeStablePtr# twice on the same object you get the same stable pointer back. Note that we can implement @freeStablePtr#@ using @_ccall_@ (and, besides, it's not likely to be used from Haskell) so it's not a primop. Question: Why @RealWorld@ - won't any instance of @_ST@ do the job? [ADR] Stable Names ~~~~~~~~~~~~ A stable name is like a stable pointer, but with three important differences: (a) You can't deRef one to get back to the original object. (b) You can convert one to an Int. (c) You don't need to 'freeStableName' The existence of a stable name doesn't guarantee to keep the object it points to alive (unlike a stable pointer), hence (a). Invariants: (a) makeStableName always returns the same value for a given object (same as stable pointers). (b) if two stable names are equal, it implies that the objects from which they were created were the same. (c) stableNameToInt always returns the same Int for a given stable name. These primops are pretty weird. tagToEnum# :: Int -> a (result type must be an enumerated type) The constraints aren't currently checked by the front end, but the code generator will fall over if they aren't satisfied. ************************************************************************ * * Which PrimOps are out-of-line * * ************************************************************************ Some PrimOps need to be called out-of-line because they either need to perform a heap check or they block. -} primOpOutOfLine :: PrimOp -> Bool #include "primop-out-of-line.hs-incl" {- ************************************************************************ * * Failure and side effects * * ************************************************************************ Note [Exceptions: asynchronous, synchronous, and unchecked] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ There are three very different sorts of things in GHC-Haskell that are sometimes called exceptions: * Haskell exceptions: These are ordinary exceptions that users can raise with the likes of 'throw' and handle with the likes of 'catch'. They come in two very different flavors: * Asynchronous exceptions: * These can arise at nearly any time, and may have nothing to do with the code being executed. * The compiler itself mostly doesn't need to care about them. * Examples: a signal from another process, running out of heap or stack * Even pure code can receive asynchronous exceptions; in this case, executing the same code again may lead to different results, because the exception may not happen next time. * See rts/RaiseAsync.c for the gory details of how they work. * Synchronous exceptions: * These are produced by the code being executed, most commonly via a call to the `raise#` or `raiseIO#` primops. * At run-time, if a piece of pure code raises a synchronous exception, it will always raise the same synchronous exception if it is run again (and not interrupted by an asynchronous exception). * In particular, if an updatable thunk does some work and then raises a synchronous exception, it is safe to overwrite it with a thunk that /immediately/ raises the same exception. * Although we are careful not to discard synchronous exceptions, we are very liberal about re-ordering them with respect to most other operations. See the paper "A semantics for imprecise exceptions" as well as Note [Precise exceptions and strictness analysis] in GHC.Types.Demand. * Unchecked exceptions: * These are nasty failures like seg-faults or primitive Int# division by zero. They differ from Haskell exceptions in that they are un-recoverable and typically bring execution to an immediate halt. * We generally treat unchecked exceptions as undefined behavior, on the assumption that the programmer never intends to crash the program in this way. Thus we have no qualms about replacing a division-by-zero with a recoverable Haskell exception or discarding an indexArray# operation whose result is unused. Note [Classifying primop effects] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Each primop has an associated 'PrimOpEffect', based on what that primop can or cannot do at runtime. This classification is * Recorded in the 'effect' field in primops.txt.pp, and * Exposed to the compiler via the 'primOpEffect' function in this module. See Note [Transformations affected by primop effects] for how we make use of this categorisation. The meanings of the four constructors of 'PrimOpEffect' are as follows, in decreasing order of permissiveness: * ReadWriteEffect A primop is marked ReadWriteEffect if it can - read or write to the world (I/O), or - read or write to a mutable data structure (e.g. readMutVar#). Every such primop uses State# tokens for sequencing, with a type like: Inputs -> State# s -> (# State# s, Outputs #) The state token threading expresses ordering, but duplicating even a read-only effect would defeat this. (See "duplication" under Note [Transformations affected by primop effects] for details.) Note that operations like `indexArray#` that read *immutable* data structures do not need such special sequencing-related care, and are therefore not marked ReadWriteEffect. * ThrowsException A primop is marked ThrowsException if - it is not marked ReadWriteEffect, and - it may diverge or throw a synchronous Haskell exception even when used in a "correct" and well-specified way. See also Note [Exceptions: asynchronous, synchronous, and unchecked]. Examples include raise#, raiseIO#, dataToTagLarge#, and seq#. Note that whether an exception is considered precise or imprecise does not matter for the purposes of the PrimOpEffect flag. * CanFail A primop is marked CanFail if - it is not marked ReadWriteEffect or ThrowsException, and - it can trigger a (potentially-unchecked) exception when used incorrectly. See Note [Exceptions: asynchronous, synchronous, and unchecked]. Examples include quotWord# and indexIntArray#, which can fail with division-by-zero and a segfault respectively. A correct use of a CanFail primop is usually surrounded by a test that screens out the bad cases such as a zero divisor or an out-of-bounds array index. We must take care never to move a CanFail primop outside the scope of such a test. * NoEffect A primop is marked NoEffect if it does not belong to any of the other three categories. We can very aggressively shuffle these operations around without fear of changing a program's meaning. Perhaps surprisingly, this aggressive shuffling imposes another restriction: The tricky NoEffect primop uncheckedShiftLWord32# has an undefined result when the provided shift amount is not between 0 and 31. Thus, a call like `uncheckedShiftLWord32# x 95#` is obviously invalid. But since uncheckedShiftLWord32# is marked NoEffect, we may float such an invalid call out of a dead branch and speculatively evaluate it. In particular, we cannot safely rewrite such an invalid call to a runtime error; we must emit code that produces a valid Word32#. (If we're lucky, Core Lint may complain that the result of such a rewrite violates the let-can-float invariant (#16742), but the rewrite is always wrong!) See also Note [Guarding against silly shifts] in GHC.Core.Opt.ConstantFold. Marking uncheckedShiftLWord32# as CanFail instead of NoEffect would give us the freedom to rewrite such invalid calls to runtime errors, but would get in the way of optimization: When speculatively executing a bit-shift prevents the allocation of a thunk, that's a big win. Note [Transformations affected by primop effects] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The PrimOpEffect properties have the following effect on program transformations. The summary table is followed by details. See also Note [Classifying primop effects] for exactly what each column means. NoEffect CanFail ThrowsException ReadWriteEffect Discard YES YES NO NO Defer (float in) YES YES SAFE SAFE Speculate (float out) YES NO NO NO Duplicate YES YES YES NO (SAFE means we could perform the transformation but do not.) * Discarding: case (a `op` b) of _ -> rhs ===> rhs You should not discard a ReadWriteEffect primop; e.g. case (writeIntArray# a i v s of (# _, _ #) -> True One could argue in favor of discarding this, since the returned State# token is not used. But in practice unsafePerformIO can easily produce similar code, and programmers sometimes write this kind of stuff by hand (#9390). So we (conservatively) never discard a ReadWriteEffect primop. Digression: We could try to track read-only effects separately from write effects to allow the former to be discarded. But in fact we want a more general rewrite for read-only operations: case readOp# state# of (# newState#, _unused_result #) -> body ==> case state# of newState# -> body Such a rewrite is not yet implemented, but would have to be done in a different place anyway. Discarding a ThrowsException primop would also discard any exception it might have thrown. For `raise#` or `raiseIO#` this would defeat the whole point of the primop, while for `dataToTagLarge#` or `seq#` this would make programs unexpectly lazier. However, it's fine to discard a CanFail primop. For example case (indexIntArray# a i) of _ -> True We can discard indexIntArray# here; this came up in #5658. Notice that CanFail primops like indexIntArray# can only trigger an exception when used incorrectly, i.e. a call that might not succeed is undefined behavior anyway. * Deferring (float-in): See Note [Floating primops] in GHC.Core.Opt.FloatIn. In the absence of data dependencies (including state token threading), we reserve the right to re-order the following things arbitrarily: * Side effects * Imprecise exceptions * Divergent computations (infinite loops) This lets us safely float almost any primop *inwards*, but not inside a (multi-shot) lambda. (See "Duplication" below.) However, the main reason to float-in a primop application would be to discard it (by floating it into some but not all branches of a case), so we actually only float-in NoEffect and CanFail operations. See also Note [Floating primops] in GHC.Core.Opt.FloatIn. (This automatically side-steps the question of precise exceptions, which mustn't be re-ordered arbitrarily but need at least ThrowsException.) * Speculation (strict float-out): You must not float a CanFail primop *outwards* lest it escape the dynamic scope of a run-time validity test. Example: case d ># 0# of True -> case x /# d of r -> r +# 1 False -> 0 Here we must not float the case outwards to give case x/# d of r -> case d ># 0# of True -> r +# 1 False -> 0 Otherwise, if this block is reached when d is zero, it will crash. Exactly the same reasoning applies to ThrowsException primops. Nor can you float out a ReadWriteEffect primop. For example: if blah then case writeMutVar# v True s0 of (# s1 #) -> s1 else s0 Notice that s0 is mentioned in both branches of the 'if', but only one of these two will actually be consumed. But if we float out to case writeMutVar# v True s0 of (# s1 #) -> if blah then s1 else s0 the writeMutVar will be performed in both branches, which is utterly wrong. What about a read-only operation that cannot fail, like readMutVar#? In principle we could safely float these out. But there are not very many such operations and it's not clear if there are real-world programs that would benefit from this. * Duplication: You cannot duplicate a ReadWriteEffect primop. You might wonder how this can occur given the state token threading, but just look at Control.Monad.ST.Lazy.Imp.strictToLazy! We get something like this p = case readMutVar# s v of (# s', r #) -> (State# s', r) s' = case p of (s', r) -> s' r = case p of (s', r) -> r (All these bindings are boxed.) If we inline p at its two call sites, we get a catastrophe: because the read is performed once when s' is demanded, and once when 'r' is demanded, which may be much later. Utterly wrong. #3207 is real example of this happening. Floating p into a multi-shot lambda would be wrong for the same reason. However, it's fine to duplicate a CanFail or ThrowsException primop. Note [Implementation: how PrimOpEffect affects transformations] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ How do we ensure that floating/duplication/discarding are done right in the simplifier? Several predicates on primops test this flag: primOpOkToDiscard <=> effect < ThrowsException primOpOkForSpeculation <=> effect == NoEffect && not (out_of_line) primOpIsCheap <=> cheap -- ...defaults to primOpOkForSpeculation [[But note that the raise# family and seq# are also considered cheap in GHC.Core.Utils.exprIsCheap by way of being work-free]] * The discarding mentioned above happens in GHC.Core.Opt.Simplify.Iteration, specifically in rebuildCase, where it is guarded by exprOkToDiscard, which in turn checks primOpOkToDiscard. * The "no-float-out" thing is achieved by ensuring that we never let-bind a saturated primop application unless it has NoEffect. The RHS of a let-binding (which can float in and out freely) satisfies exprOkForSpeculation; this is the let-can-float invariant. And exprOkForSpeculation is false of a saturated primop application unless it has NoEffect. * So primops that aren't NoEffect will appear only as the scrutinees of cases, and that's why the FloatIn pass is capable of floating case bindings inwards. * Duplication via inlining and float-in of (lifted) let-binders is controlled via primOpIsWorkFree and primOpIsCheap, by making ReadWriteEffect things (among others) not-cheap! (The test PrimOpEffect_Sanity will complain if any ReadWriteEffect primop is considered either work-free or cheap.) Additionally, a case binding is only floated inwards if its scrutinee is ok-to-discard. -} primOpEffect :: PrimOp -> PrimOpEffect #include "primop-effects.hs-incl" data PrimOpEffect -- See Note [Classifying primop effects] = NoEffect | CanFail | ThrowsException | ReadWriteEffect deriving (Eq, Ord) primOpOkForSpeculation :: PrimOp -> Bool -- See Note [Classifying primop effects] -- See comments with GHC.Core.Utils.exprOkForSpeculation -- primOpOkForSpeculation => primOpOkToDiscard primOpOkForSpeculation op = primOpEffect op == NoEffect && not (primOpOutOfLine op) -- I think the "out of line" test is because out of line things can -- be expensive (eg sine, cosine), and so we may not want to speculate them primOpOkToDiscard :: PrimOp -> Bool primOpOkToDiscard op = primOpEffect op < ThrowsException primOpIsWorkFree :: PrimOp -> Bool #include "primop-is-work-free.hs-incl" primOpIsCheap :: PrimOp -> Bool -- See Note [Classifying primop effects] #include "primop-is-cheap.hs-incl" -- In March 2001, we changed this to -- primOpIsCheap op = False -- thereby making *no* primops seem cheap. But this killed eta -- expansion on case (x ==# y) of True -> \s -> ... -- which is bad. In particular a loop like -- doLoop n = loop 0 -- where -- loop i | i == n = return () -- | otherwise = bar i >> loop (i+1) -- allocated a closure every time round because it doesn't eta expand. -- -- The problem that originally gave rise to the change was -- let x = a +# b *# c in x +# x -- were we don't want to inline x. But primopIsCheap doesn't control -- that (it's primOpIsWorkFree that does) so the problem doesn't occur -- even if primOpIsCheap sometimes says 'True'. -- | True of dyadic operators that can fail only if the second arg is zero! -- -- This function probably belongs in an automagically generated file.. but it's -- such a special case I thought I'd leave it here for now. primOpIsDiv :: PrimOp -> Bool primOpIsDiv op = case op of IntQuotOp -> True Int8QuotOp -> True Int16QuotOp -> True Int32QuotOp -> True Int64QuotOp -> True IntRemOp -> True Int8RemOp -> True Int16RemOp -> True Int32RemOp -> True Int64RemOp -> True IntQuotRemOp -> True Int8QuotRemOp -> True Int16QuotRemOp -> True Int32QuotRemOp -> True -- Int64QuotRemOp doesn't exist (yet) WordQuotOp -> True Word8QuotOp -> True Word16QuotOp -> True Word32QuotOp -> True Word64QuotOp -> True WordRemOp -> True Word8RemOp -> True Word16RemOp -> True Word32RemOp -> True Word64RemOp -> True WordQuotRemOp -> True Word8QuotRemOp -> True Word16QuotRemOp -> True Word32QuotRemOp -> True -- Word64QuotRemOp doesn't exist (yet) WordQuotRem2Op -> True FloatDivOp -> True DoubleDivOp -> True _ -> False {- ************************************************************************ * * PrimOp code size * * ************************************************************************ primOpCodeSize ~~~~~~~~~~~~~~ Gives an indication of the code size of a primop, for the purposes of calculating unfolding sizes; see GHC.Core.Unfold.sizeExpr. -} primOpCodeSize :: PrimOp -> Int #include "primop-code-size.hs-incl" primOpCodeSizeDefault :: Int primOpCodeSizeDefault = 1 -- GHC.Core.Unfold.primOpSize already takes into account primOpOutOfLine -- and adds some further costs for the args in that case. primOpCodeSizeForeignCall :: Int primOpCodeSizeForeignCall = 4 {- ************************************************************************ * * PrimOp types * * ************************************************************************ -} primOpType :: PrimOp -> Type -- you may want to use primOpSig instead primOpType op = case primOpInfo op of Compare _occ ty -> compare_fun_ty ty GenPrimOp _occ tyvars arg_tys res_ty -> mkForAllTys tyvars (mkVisFunTysMany arg_tys res_ty) primOpResultType :: PrimOp -> Type primOpResultType op = case primOpInfo op of Compare _occ _ty -> intPrimTy GenPrimOp _occ _tyvars _arg_tys res_ty -> res_ty primOpOcc :: PrimOp -> OccName primOpOcc op = case primOpInfo op of Compare occ _ -> occ GenPrimOp occ _ _ _ -> occ {- Note [Primop wrappers] ~~~~~~~~~~~~~~~~~~~~~~~~~ To support (limited) use of primops in GHCi genprimopcode generates the GHC.PrimopWrappers module. This module contains a "primop wrapper" binding for each primop. These are standard Haskell functions mirroring the types of the primops they wrap. For instance, in the case of plusInt# we would have: module GHC.PrimopWrappers where import GHC.Prim as P plusInt# :: Int# -> Int# -> Int# plusInt# a b = P.plusInt# a b The Id for the wrapper of a primop can be found using 'GHC.Builtin.PrimOps.primOpWrapperId'. However, GHCi does not use this mechanism to link primops; it rather does a rather hacky symbol lookup (see GHC.ByteCode.Linker.primopToCLabel). TODO: Perhaps this should be changed? Note that these wrappers aren't *quite* as expressive as their unwrapped brethren, in that they may exhibit less representation polymorphism. For instance, consider the case of mkWeakNoFinalizer#, which has type: mkWeakNoFinalizer# :: forall (r :: RuntimeRep) (k :: TYPE r) (v :: Type). k -> v -> State# RealWorld -> (# State# RealWorld, Weak# v #) Naively we could generate a wrapper of the form, mkWeakNoFinalizer# k v s = GHC.Prim.mkWeakNoFinalizer# k v s However, this would require that 'k' bind the representation-polymorphic key, which is disallowed by our representation polymorphism validity checks (see Note [Representation polymorphism invariants] in GHC.Core). Consequently, we give the wrapper the simpler, less polymorphic type mkWeakNoFinalizer# :: forall (k :: Type) (v :: Type). k -> v -> State# RealWorld -> (# State# RealWorld, Weak# v #) This simplification tends to be good enough for GHCi uses given that there are few representation-polymorphic primops, and we do little simplification on interpreted code anyways. TODO: This behavior is actually wrong; a program becomes ill-typed upon replacing a real primop occurrence with one of its wrapper due to the fact that the former has an additional type binder. Hmmm.... Note [Eta expanding primops] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ STG requires that primop applications be saturated. This makes code generation significantly simpler since otherwise we would need to define a calling convention for curried applications that can accommodate representation polymorphism. To ensure saturation, CorePrep eta expands all primop applications as described in Note [Eta expansion of hasNoBinding things in CorePrep] in GHC.Core.Prep. Historical Note: For a short period around GHC 8.8 we rewrote unsaturated primop applications to rather use the primop's wrapper (see Note [Primop wrappers] in GHC.Builtin.PrimOps) instead of eta expansion. This was because at the time CoreTidy would try to predict the CAFfyness of bindings that would be produced by CorePrep for inclusion in interface files. Eta expanding during CorePrep proved to be very difficult to predict, leading to nasty inconsistencies in CAFfyness determinations (see #16846). Thankfully, we now no longer try to predict CAFfyness but rather compute it on GHC STG (see Note [SRTs] in GHC.Cmm.Info.Build) and inject it into the interface file after code generation (see TODO: Refer to whatever falls out of #18096). This is much simpler and avoids the potential for inconsistency, allowing us to return to the somewhat simpler eta expansion approach for unsaturated primops. See #18079. -} -- | Returns the 'Id' of the wrapper associated with the given 'PrimOp'. -- See Note [Primop wrappers]. primOpWrapperId :: PrimOp -> Id primOpWrapperId op = mkVanillaGlobalWithInfo name ty info where info = setCafInfo vanillaIdInfo NoCafRefs name = mkExternalName uniq gHC_PRIMOPWRAPPERS (primOpOcc op) wiredInSrcSpan uniq = mkPrimOpWrapperUnique (primOpTag op) ty = primOpType op isComparisonPrimOp :: PrimOp -> Bool isComparisonPrimOp op = case primOpInfo op of Compare {} -> True GenPrimOp {} -> False -- primOpSig is like primOpType but gives the result split apart: -- (type variables, argument types, result type) -- It also gives arity, strictness info primOpSig :: PrimOp -> ([TyVarBinder], [Type], Type, Arity, DmdSig) primOpSig op = (tyvars, arg_tys, res_ty, arity, primOpStrictness op arity) where arity = length arg_tys (tyvars, arg_tys, res_ty) = case (primOpInfo op) of Compare _occ ty -> ([], [ty,ty], intPrimTy) GenPrimOp _occ tyvars arg_tys res_ty -> (tyvars, arg_tys, res_ty ) data PrimOpResultInfo = ReturnsVoid | ReturnsPrim PrimRep | ReturnsTuple -- Some PrimOps need not return a manifest primitive or algebraic value -- (i.e. they might return a polymorphic value). These PrimOps *must* -- be out of line, or the code generator won't work. getPrimOpResultInfo :: PrimOp -> PrimOpResultInfo getPrimOpResultInfo op = case (primOpInfo op) of Compare _ _ -> ReturnsPrim IntRep GenPrimOp _ _ _ ty | isPrimTyCon tc -> case tyConPrimRep tc of [] -> ReturnsVoid [rep] -> ReturnsPrim rep _ -> pprPanic "getPrimOpResultInfo" (ppr op) | isUnboxedTupleTyCon tc -> ReturnsTuple | otherwise -> pprPanic "getPrimOpResultInfo" (ppr op) where tc = tyConAppTyCon ty -- All primops return a tycon-app result -- The tycon can be an unboxed tuple or sum, though, -- which gives rise to a ReturnAlg {- We do not currently make use of whether primops are commutable. We used to try to move constants to the right hand side for strength reduction. -} {- commutableOp :: PrimOp -> Bool #include "primop-commutable.hs-incl" -} -- Utils: compare_fun_ty :: Type -> Type compare_fun_ty ty = mkVisFunTysMany [ty, ty] intPrimTy -- Output stuff: pprPrimOp :: IsLine doc => PrimOp -> doc pprPrimOp other_op = pprOccName (primOpOcc other_op) {-# SPECIALIZE pprPrimOp :: PrimOp -> SDoc #-} {-# SPECIALIZE pprPrimOp :: PrimOp -> HLine #-} -- see Note [SPECIALIZE to HDoc] in GHC.Utils.Outputable {- ************************************************************************ * * \subsubsection[PrimCall]{User-imported primitive calls} * * ************************************************************************ -} data PrimCall = PrimCall CLabelString Unit instance Outputable PrimCall where ppr (PrimCall lbl pkgId) = text "__primcall" <+> ppr pkgId <+> ppr lbl -- | Indicate if a primop is really inline: that is, it isn't out-of-line and it -- isn't DataToTagOp which are two primops that evaluate their argument -- hence induce thread/stack/heap changes. primOpIsReallyInline :: PrimOp -> Bool primOpIsReallyInline = \case DataToTagSmallOp -> False DataToTagLargeOp -> False p -> not (primOpOutOfLine p) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Builtin/PrimOps.hs-boot0000644000000000000000000000021707346545000022202 0ustar0000000000000000module GHC.Builtin.PrimOps where -- See W1 of Note [Tracking dependencies on primitives] in GHC.Internal.Base import GHC.Base () data PrimOp ghc-lib-parser-9.12.2.20250421/compiler/GHC/Builtin/PrimOps/0000755000000000000000000000000007346545000020705 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Builtin/PrimOps/Ids.hs0000644000000000000000000001431207346545000021761 0ustar0000000000000000 {-# LANGUAGE DataKinds #-} {-# LANGUAGE GADTs #-} {-# LANGUAGE MultiWayIf #-} -- | PrimOp's Ids module GHC.Builtin.PrimOps.Ids ( primOpId , allThePrimOpIds ) where import GHC.Prelude -- primop rules are attached to primop ids import {-# SOURCE #-} GHC.Core.Opt.ConstantFold (primOpRules) import GHC.Core.TyCo.Rep ( scaledThing ) import GHC.Core.Type import GHC.Core.FVs (mkRuleInfo) import GHC.Builtin.PrimOps import GHC.Builtin.Uniques import GHC.Builtin.Names import GHC.Builtin.Types.Prim import GHC.Types.Basic import GHC.Types.Cpr import GHC.Types.Demand import GHC.Types.Id import GHC.Types.Id.Info import GHC.Types.TyThing import GHC.Types.Name import GHC.Types.Name.Env import GHC.Types.Var import GHC.Types.Var.Set import GHC.Tc.Types.Origin import GHC.Tc.Utils.TcType ( ConcreteTvOrigin(..), ConcreteTyVars, TcType ) import GHC.Data.SmallArray import Data.Maybe ( mapMaybe, listToMaybe, catMaybes, maybeToList ) -- | Build a PrimOp Id mkPrimOpId :: PrimOp -> Id mkPrimOpId prim_op = id where (tyvars,arg_tys,res_ty, arity, strict_sig) = primOpSig prim_op ty = mkForAllTys tyvars (mkVisFunTysMany arg_tys res_ty) name = mkWiredInName gHC_PRIM (primOpOcc prim_op) (mkPrimOpIdUnique (primOpTag prim_op)) (AnId id) UserSyntax id = mkGlobalId (PrimOpId prim_op conc_tvs) name ty info conc_tvs = computePrimOpConcTyVarsFromType name tyvars arg_tys res_ty -- PrimOps don't ever construct a product, but we want to preserve bottoms cpr | isDeadEndDiv (snd (splitDmdSig strict_sig)) = botCpr | otherwise = topCpr info = noCafIdInfo `setRuleInfo` mkRuleInfo (maybeToList $ primOpRules name prim_op) `setArityInfo` arity `setDmdSigInfo` strict_sig `setCprSigInfo` mkCprSig arity cpr `setInlinePragInfo` neverInlinePragma -- We give PrimOps a NOINLINE pragma so that we don't -- get silly warnings from Desugar.dsRule (the inline_shadows_rule -- test) about a RULE conflicting with a possible inlining -- cf #7287 -- | Analyse the type of a primop to determine which of its outermost forall'd -- type variables must be instantiated to concrete types when the primop is -- instantiated. -- -- These are the Levity and RuntimeRep kinded type-variables which appear in -- negative position in the type of the primop. computePrimOpConcTyVarsFromType :: Name -> [TyVarBinder] -> [Type] -> Type -> ConcreteTyVars computePrimOpConcTyVarsFromType nm tyvars arg_tys _res_ty = mkNameEnv concs where concs = [ (tyVarName kind_tv, ConcreteFRR frr_orig) | Bndr tv _af <- tyvars , kind_tv <- tyCoVarsOfTypeWellScoped $ tyVarKind tv , neg_pos <- maybeToList $ frr_tyvar_maybe kind_tv , let frr_orig = FixedRuntimeRepOrigin { frr_type = mkTyVarTy tv , frr_context = FRRRepPolyId nm RepPolyPrimOp neg_pos } ] -- As per Note [Levity and representation polymorphic primops] -- in GHC.Builtin.Primops.txt.pp, we compute the ConcreteTyVars associated -- to a primop by inspecting the type variable names. frr_tyvar_maybe tv | tv `elem` [ runtimeRep1TyVar, runtimeRep2TyVar, runtimeRep3TyVar , levity1TyVar, levity2TyVar ] = listToMaybe $ mapMaybe (\ (i,arg) -> Argument i <$> positiveKindPos_maybe tv arg) (zip [1..] arg_tys) | otherwise = Nothing -- Compute whether the type variable occurs in the kind of a type variable -- in positive position in one of the argument types of the primop. -- | Does this type variable appear in a kind in a negative position in the -- type? -- -- Returns the first such position if so. -- -- NB: assumes the type is of a simple form, e.g. no foralls, no function -- arrows nested in a TyCon other than a function arrow. -- Just used to compute the set of ConcreteTyVars for a PrimOp by inspecting -- its type, see 'computePrimOpConcTyVarsFromType'. negativeKindPos_maybe :: TcTyVar -> TcType -> Maybe (Position Neg) negativeKindPos_maybe tv ty | (args, res) <- splitFunTys ty = listToMaybe $ catMaybes $ ( (if null args then Nothing else Result <$> negativeKindPos_maybe tv res) : map recur (zip [1..] args) ) where recur (pos, scaled_ty) = Argument pos <$> positiveKindPos_maybe tv (scaledThing scaled_ty) -- (assumes we don't have any function types nested inside other types) -- | Does this type variable appear in a kind in a positive position in the -- type? -- -- Returns the first such position if so. -- -- NB: assumes the type is of a simple form, e.g. no foralls, no function -- arrows nested in a TyCon other than a function arrow. -- Just used to compute the set of ConcreteTyVars for a PrimOp by inspecting -- its type, see 'computePrimOpConcTyVarsFromType'. positiveKindPos_maybe :: TcTyVar -> TcType -> Maybe (Position Pos) positiveKindPos_maybe tv ty | (args, res) <- splitFunTys ty = listToMaybe $ catMaybes $ ( (if null args then finish res else Result <$> positiveKindPos_maybe tv res) : map recur (zip [1..] args) ) where recur (pos, scaled_ty) = Argument pos <$> negativeKindPos_maybe tv (scaledThing scaled_ty) -- (assumes we don't have any function types nested inside other types) finish ty | tv `elemVarSet` tyCoVarsOfType (typeKind ty) = Just Top | otherwise = Nothing ------------------------------------------------------------- -- Cache of PrimOp's Ids ------------------------------------------------------------- -- | A cache of the PrimOp Ids, indexed by PrimOp tag (0 indexed) primOpIds :: SmallArray Id {-# NOINLINE primOpIds #-} primOpIds = listToArray (maxPrimOpTag+1) primOpTag mkPrimOpId allThePrimOps -- | Get primop id. -- -- Retrieve it from `primOpIds` cache. primOpId :: PrimOp -> Id {-# INLINE primOpId #-} primOpId op = indexSmallArray primOpIds (primOpTag op) -- | All the primop ids, as a list allThePrimOpIds :: [Id] {-# INLINE allThePrimOpIds #-} allThePrimOpIds = map (indexSmallArray primOpIds) [0..maxPrimOpTag] ghc-lib-parser-9.12.2.20250421/compiler/GHC/Builtin/Types.hs0000644000000000000000000031403207346545000020757 0ustar0000000000000000{- (c) The GRASP Project, Glasgow University, 1994-1998 Wired-in knowledge about {\em non-primitive} types -} {-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE ParallelListComp #-} {-# LANGUAGE MultiWayIf #-} {-# OPTIONS_GHC -Wno-incomplete-uni-patterns #-} -- | This module is about types that can be defined in Haskell, but which -- must be wired into the compiler nonetheless. C.f module "GHC.Builtin.Types.Prim" module GHC.Builtin.Types ( -- * Helper functions defined here mkWiredInTyConName, -- This is used in GHC.Builtin.Types.Literals to define the -- built-in functions for evaluation. mkWiredInIdName, -- used in GHC.Types.Id.Make -- * All wired in things wiredInTyCons, isBuiltInOcc_maybe, isTupleTyOcc_maybe, isSumTyOcc_maybe, isPunOcc_maybe, -- * Bool boolTy, boolTyCon, boolTyCon_RDR, boolTyConName, trueDataCon, trueDataConId, true_RDR, falseDataCon, falseDataConId, false_RDR, promotedFalseDataCon, promotedTrueDataCon, -- * Ordering orderingTyCon, ordLTDataCon, ordLTDataConId, ordEQDataCon, ordEQDataConId, ordGTDataCon, ordGTDataConId, promotedLTDataCon, promotedEQDataCon, promotedGTDataCon, -- * Boxing primitive types boxingDataCon, BoxingInfo(..), -- * Char charTyCon, charDataCon, charTyCon_RDR, charTy, stringTy, charTyConName, stringTyCon_RDR, -- * Double doubleTyCon, doubleDataCon, doubleTy, doubleTyConName, -- * Float floatTyCon, floatDataCon, floatTy, floatTyConName, -- * Int intTyCon, intDataCon, intTyCon_RDR, intDataCon_RDR, intTyConName, intTy, -- * Word wordTyCon, wordDataCon, wordTyConName, wordTy, -- * Word8 word8TyCon, word8DataCon, word8Ty, -- * List listTyCon, listTyCon_RDR, listTyConName, listTyConKey, nilDataCon, nilDataConName, nilDataConKey, consDataCon_RDR, consDataCon, consDataConName, promotedNilDataCon, promotedConsDataCon, mkListTy, mkPromotedListTy, extractPromotedList, -- * Maybe maybeTyCon, maybeTyConName, nothingDataCon, nothingDataConName, promotedNothingDataCon, justDataCon, justDataConName, promotedJustDataCon, mkPromotedMaybeTy, mkMaybeTy, isPromotedMaybeTy, -- * Tuples mkTupleTy, mkTupleTy1, mkBoxedTupleTy, mkTupleStr, tupleTyCon, tupleDataCon, tupleTyConName, tupleDataConName, promotedTupleDataCon, unitTyCon, unitDataCon, unitDataConId, unitTy, unitTyConKey, soloTyCon, pairTyCon, mkPromotedPairTy, isPromotedPairType, unboxedUnitTy, unboxedUnitTyCon, unboxedUnitDataCon, unboxedTupleKind, unboxedSumKind, filterCTuple, mkConstraintTupleTy, -- ** Constraint tuples cTupleTyCon, cTupleTyConName, cTupleTyConNames, isCTupleTyConName, cTupleTyConNameArity_maybe, cTupleDataCon, cTupleDataConName, cTupleDataConNames, cTupleSelId, cTupleSelIdName, -- * Any anyTyCon, anyTy, anyTypeOfKind, zonkAnyTyCon, -- * Recovery TyCon makeRecoveryTyCon, -- * Sums mkSumTy, sumTyCon, sumDataCon, -- * Kinds typeSymbolKindCon, typeSymbolKind, isLiftedTypeKindTyConName, typeToTypeKind, liftedRepTyCon, unliftedRepTyCon, tYPETyCon, tYPETyConName, tYPEKind, cONSTRAINTTyCon, cONSTRAINTTyConName, cONSTRAINTKind, constraintKind, liftedTypeKind, unliftedTypeKind, zeroBitTypeKind, constraintKindTyCon, liftedTypeKindTyCon, unliftedTypeKindTyCon, constraintKindTyConName, liftedTypeKindTyConName, unliftedTypeKindTyConName, liftedRepTyConName, unliftedRepTyConName, -- * Equality predicates heqTyCon, heqTyConName, heqClass, heqDataCon, eqTyCon, eqTyConName, eqClass, eqDataCon, eqTyCon_RDR, coercibleTyCon, coercibleTyConName, coercibleDataCon, coercibleClass, -- * RuntimeRep and friends runtimeRepTyCon, vecCountTyCon, vecElemTyCon, boxedRepDataConTyCon, runtimeRepTy, liftedRepTy, unliftedRepTy, zeroBitRepTy, vecRepDataConTyCon, tupleRepDataConTyCon, sumRepDataConTyCon, -- * Levity levityTyCon, levityTy, liftedDataConTyCon, unliftedDataConTyCon, liftedDataConTy, unliftedDataConTy, intRepDataConTy, int8RepDataConTy, int16RepDataConTy, int32RepDataConTy, int64RepDataConTy, wordRepDataConTy, word8RepDataConTy, word16RepDataConTy, word32RepDataConTy, word64RepDataConTy, addrRepDataConTy, floatRepDataConTy, doubleRepDataConTy, vec2DataConTy, vec4DataConTy, vec8DataConTy, vec16DataConTy, vec32DataConTy, vec64DataConTy, int8ElemRepDataConTy, int16ElemRepDataConTy, int32ElemRepDataConTy, int64ElemRepDataConTy, word8ElemRepDataConTy, word16ElemRepDataConTy, word32ElemRepDataConTy, word64ElemRepDataConTy, floatElemRepDataConTy, doubleElemRepDataConTy, -- * Multiplicity and friends multiplicityTyConName, oneDataConName, manyDataConName, multiplicityTy, multiplicityTyCon, oneDataCon, manyDataCon, oneDataConTy, manyDataConTy, oneDataConTyCon, manyDataConTyCon, multMulTyCon, unrestrictedFunTyCon, unrestrictedFunTyConName, -- * Bignum integerTy, integerTyCon, integerTyConName, integerISDataCon, integerISDataConName, integerIPDataCon, integerIPDataConName, integerINDataCon, integerINDataConName, naturalTy, naturalTyCon, naturalTyConName, naturalNSDataCon, naturalNSDataConName, naturalNBDataCon, naturalNBDataConName, pretendNameIsInScope, ) where import GHC.Prelude import {-# SOURCE #-} GHC.Types.Id.Make ( mkDataConWorkId, mkDictSelId ) -- friends: import GHC.Builtin.Names import GHC.Builtin.Types.Prim import GHC.Builtin.Uniques -- others: import GHC.Core( Expr(Type), mkConApp ) import GHC.Core.Coercion.Axiom import GHC.Core.Type import GHC.Types.Id import GHC.Core.DataCon import GHC.Core.ConLike import GHC.Core.TyCon import GHC.Core.Class ( Class, mkClass ) import GHC.Core.Map.Type ( TypeMap, emptyTypeMap, extendTypeMap, lookupTypeMap ) import qualified GHC.Core.TyCo.Rep as TyCoRep ( Type(TyConApp) ) import GHC.Types.TyThing import GHC.Types.SourceText import GHC.Types.Var ( VarBndr (Bndr), tyVarName ) import GHC.Types.RepType import GHC.Types.Name.Reader import GHC.Types.Name as Name import GHC.Types.Name.Env ( lookupNameEnv_NF, mkNameEnv ) import GHC.Types.Basic import GHC.Types.ForeignCall import GHC.Types.Unique.Set import {-# SOURCE #-} GHC.Tc.Types.Origin ( FixedRuntimeRepOrigin(..), mkFRRUnboxedTuple, mkFRRUnboxedSum ) import {-# SOURCE #-} GHC.Tc.Utils.TcType ( ConcreteTvOrigin(..), ConcreteTyVars, noConcreteTyVars ) import GHC.Settings.Constants ( mAX_TUPLE_SIZE, mAX_CTUPLE_SIZE, mAX_SUM_SIZE ) import GHC.Unit.Module ( Module ) import Data.Array import GHC.Data.FastString import GHC.Data.BooleanFormula ( mkAnd ) import GHC.Utils.Outputable import GHC.Utils.Misc import GHC.Utils.Panic import qualified Data.ByteString.Char8 as BS import Data.Foldable import Data.List ( elemIndex, intersperse ) import Numeric ( showInt ) import Data.Char (ord, isDigit) import Control.Applicative ((<|>)) alpha_tyvar :: [TyVar] alpha_tyvar = [alphaTyVar] alpha_ty :: [Type] alpha_ty = [alphaTy] {- Note [Wired-in Types and Type Constructors] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This module include a lot of wired-in types and type constructors. Here, these are presented in a tabular format to make it easier to find the wired-in type identifier corresponding to a known Haskell type. Data constructors are nested under their corresponding types with two spaces of indentation. Identifier Type Haskell name Notes ---------------------------------------------------------------------------- liftedTypeKindTyCon TyCon GHC.Types.Type Synonym for: TYPE LiftedRep unliftedTypeKindTyCon TyCon GHC.Types.Type Synonym for: TYPE UnliftedRep liftedRepTyCon TyCon GHC.Types.LiftedRep Synonym for: 'BoxedRep 'Lifted unliftedRepTyCon TyCon GHC.Types.LiftedRep Synonym for: 'BoxedRep 'Unlifted levityTyCon TyCon GHC.Types.Levity Data type liftedDataConTyCon TyCon GHC.Types.Lifted Data constructor unliftedDataConTyCon TyCon GHC.Types.Unlifted Data constructor vecCountTyCon TyCon GHC.Types.VecCount Data type vec2DataConTy Type GHC.Types.Vec2 Data constructor vec4DataConTy Type GHC.Types.Vec4 Data constructor vec8DataConTy Type GHC.Types.Vec8 Data constructor vec16DataConTy Type GHC.Types.Vec16 Data constructor vec32DataConTy Type GHC.Types.Vec32 Data constructor vec64DataConTy Type GHC.Types.Vec64 Data constructor runtimeRepTyCon TyCon GHC.Types.RuntimeRep Data type boxedRepDataConTyCon TyCon GHC.Types.BoxedRep Data constructor intRepDataConTy Type GHC.Types.IntRep Data constructor doubleRepDataConTy Type GHC.Types.DoubleRep Data constructor floatRepDataConTy Type GHC.Types.FloatRep Data constructor boolTyCon TyCon GHC.Types.Bool Data type trueDataCon DataCon GHC.Types.True Data constructor falseDataCon DataCon GHC.Types.False Data constructor promotedTrueDataCon TyCon GHC.Types.True Data constructor promotedFalseDataCon TyCon GHC.Types.False Data constructor ************************************************************************ * * \subsection{Wired in type constructors} * * ************************************************************************ If you change which things are wired in, make sure you change their names in GHC.Builtin.Names, so they use wTcQual, wDataQual, etc -} -- This list is used only to define GHC.Builtin.Utils.wiredInThings. That in turn -- is used to initialise the name environment carried around by the renamer. -- This means that if we look up the name of a TyCon (or its implicit binders) -- that occurs in this list that name will be assigned the wired-in key we -- define here. -- -- Because of their infinite nature, this list excludes -- * tuples, including boxed, unboxed and constraint tuples --- (mkTupleTyCon, unitTyCon, pairTyCon) -- * unboxed sums (sumTyCon) -- See Note [Infinite families of known-key names] in GHC.Builtin.Names -- -- See also Note [Known-key names] wiredInTyCons :: [TyCon] wiredInTyCons = map (dataConTyCon . snd) boxingDataCons ++ [ -- Units are not treated like other tuples, because they -- are defined in GHC.Base, and there's only a few of them. We -- put them in wiredInTyCons so that they will pre-populate -- the name cache, so the parser in isBuiltInOcc_maybe doesn't -- need to look out for them. unitTyCon , unboxedUnitTyCon -- Solo (i.e., the boxed 1-tuple) is also not treated -- like other tuples (i.e. we /do/ include it here), -- since it does not use special syntax like other tuples -- See Note [One-tuples] (Wrinkle: Make boxed one-tuple names -- have known keys) in GHC.Builtin.Types. , soloTyCon , anyTyCon , zonkAnyTyCon , boolTyCon , charTyCon , stringTyCon , doubleTyCon , floatTyCon , intTyCon , wordTyCon , listTyCon , orderingTyCon , maybeTyCon , heqTyCon , eqTyCon , coercibleTyCon , typeSymbolKindCon , runtimeRepTyCon , levityTyCon , vecCountTyCon , vecElemTyCon , constraintKindTyCon , liftedTypeKindTyCon , unliftedTypeKindTyCon , multiplicityTyCon , naturalTyCon , integerTyCon , liftedRepTyCon , unliftedRepTyCon , zeroBitRepTyCon , zeroBitTypeTyCon ] mkWiredInTyConName :: BuiltInSyntax -> Module -> FastString -> Unique -> TyCon -> Name mkWiredInTyConName built_in modu fs unique tycon = mkWiredInName modu (mkTcOccFS fs) unique (ATyCon tycon) -- Relevant TyCon built_in mkWiredInDataConName :: BuiltInSyntax -> Module -> FastString -> Unique -> DataCon -> Name mkWiredInDataConName built_in modu fs unique datacon = mkWiredInName modu (mkDataOccFS fs) unique (AConLike (RealDataCon datacon)) -- Relevant DataCon built_in mkWiredInIdName :: Module -> FastString -> Unique -> Id -> Name mkWiredInIdName mod fs uniq id = mkWiredInName mod (mkOccNameFS Name.varName fs) uniq (AnId id) UserSyntax -- See Note [Kind-changing of (~) and Coercible] -- in libraries/ghc-prim/GHC/Types.hs eqTyConName, eqDataConName, eqSCSelIdName :: Name eqTyConName = mkWiredInTyConName UserSyntax gHC_TYPES (fsLit "~") eqTyConKey eqTyCon eqDataConName = mkWiredInDataConName UserSyntax gHC_TYPES (fsLit "Eq#") eqDataConKey eqDataCon eqSCSelIdName = mkWiredInIdName gHC_TYPES (fsLit "eq_sel") eqSCSelIdKey eqSCSelId eqTyCon_RDR :: RdrName eqTyCon_RDR = nameRdrName eqTyConName -- See Note [Kind-changing of (~) and Coercible] -- in libraries/ghc-prim/GHC/Types.hs heqTyConName, heqDataConName, heqSCSelIdName :: Name heqTyConName = mkWiredInTyConName UserSyntax gHC_TYPES (fsLit "~~") heqTyConKey heqTyCon heqDataConName = mkWiredInDataConName UserSyntax gHC_TYPES (fsLit "HEq#") heqDataConKey heqDataCon heqSCSelIdName = mkWiredInIdName gHC_TYPES (fsLit "heq_sel") heqSCSelIdKey heqSCSelId -- See Note [Kind-changing of (~) and Coercible] in libraries/ghc-prim/GHC/Types.hs coercibleTyConName, coercibleDataConName, coercibleSCSelIdName :: Name coercibleTyConName = mkWiredInTyConName UserSyntax gHC_TYPES (fsLit "Coercible") coercibleTyConKey coercibleTyCon coercibleDataConName = mkWiredInDataConName UserSyntax gHC_TYPES (fsLit "MkCoercible") coercibleDataConKey coercibleDataCon coercibleSCSelIdName = mkWiredInIdName gHC_TYPES (fsLit "coercible_sel") coercibleSCSelIdKey coercibleSCSelId charTyConName, charDataConName, intTyConName, intDataConName, stringTyConName :: Name charTyConName = mkWiredInTyConName UserSyntax gHC_TYPES (fsLit "Char") charTyConKey charTyCon charDataConName = mkWiredInDataConName UserSyntax gHC_TYPES (fsLit "C#") charDataConKey charDataCon stringTyConName = mkWiredInTyConName UserSyntax gHC_INTERNAL_BASE (fsLit "String") stringTyConKey stringTyCon intTyConName = mkWiredInTyConName UserSyntax gHC_TYPES (fsLit "Int") intTyConKey intTyCon intDataConName = mkWiredInDataConName UserSyntax gHC_TYPES (fsLit "I#") intDataConKey intDataCon boolTyConName, falseDataConName, trueDataConName :: Name boolTyConName = mkWiredInTyConName UserSyntax gHC_TYPES (fsLit "Bool") boolTyConKey boolTyCon falseDataConName = mkWiredInDataConName UserSyntax gHC_TYPES (fsLit "False") falseDataConKey falseDataCon trueDataConName = mkWiredInDataConName UserSyntax gHC_TYPES (fsLit "True") trueDataConKey trueDataCon listTyConName, nilDataConName, consDataConName :: Name listTyConName = mkWiredInTyConName UserSyntax gHC_TYPES (fsLit "List") listTyConKey listTyCon nilDataConName = mkWiredInDataConName BuiltInSyntax gHC_TYPES (fsLit "[]") nilDataConKey nilDataCon consDataConName = mkWiredInDataConName BuiltInSyntax gHC_TYPES (fsLit ":") consDataConKey consDataCon maybeTyConName, nothingDataConName, justDataConName :: Name maybeTyConName = mkWiredInTyConName UserSyntax gHC_INTERNAL_MAYBE (fsLit "Maybe") maybeTyConKey maybeTyCon nothingDataConName = mkWiredInDataConName UserSyntax gHC_INTERNAL_MAYBE (fsLit "Nothing") nothingDataConKey nothingDataCon justDataConName = mkWiredInDataConName UserSyntax gHC_INTERNAL_MAYBE (fsLit "Just") justDataConKey justDataCon wordTyConName, wordDataConName, word8DataConName :: Name wordTyConName = mkWiredInTyConName UserSyntax gHC_TYPES (fsLit "Word") wordTyConKey wordTyCon wordDataConName = mkWiredInDataConName UserSyntax gHC_TYPES (fsLit "W#") wordDataConKey wordDataCon word8DataConName = mkWiredInDataConName UserSyntax gHC_INTERNAL_WORD (fsLit "W8#") word8DataConKey word8DataCon floatTyConName, floatDataConName, doubleTyConName, doubleDataConName :: Name floatTyConName = mkWiredInTyConName UserSyntax gHC_TYPES (fsLit "Float") floatTyConKey floatTyCon floatDataConName = mkWiredInDataConName UserSyntax gHC_TYPES (fsLit "F#") floatDataConKey floatDataCon doubleTyConName = mkWiredInTyConName UserSyntax gHC_TYPES (fsLit "Double") doubleTyConKey doubleTyCon doubleDataConName = mkWiredInDataConName UserSyntax gHC_TYPES (fsLit "D#") doubleDataConKey doubleDataCon -- Any {- Note [Any types] ~~~~~~~~~~~~~~~~ The type constructors `Any` and `ZonkAny` are closed type families declared thus: type family Any :: forall k. k where { } type family ZonkAny :: forall k. Nat -> k where { } They are used when we want a type of a particular kind, but we don't really care what that type is. The leading example is this: `ZonkAny` is used to instantiate un-constrained type variables after type checking. For example, consider the term (length [] :: Int), where length :: forall a. [a] -> Int [] :: forall a. [a] We must type-apply `length` and `[]`, but to what type? It doesn't matter! The typechecker will end up with length @alpha ([] @alpha) where `alpha` is an un-constrained unification variable. The "zonking" process zaps that unconstrained `alpha` to an arbitrary type (ZonkAny @Type 3), where the `3` is arbitrary (see wrinkle (Any5) below). This is done in `GHC.Tc.Zonk.Type.commitFlexi`. So we end up with length @(ZonkAny @Type 3) ([] @(ZonkAny @Type 3)) `Any` and `ZonkAny` differ only in the presence of the `Nat` argument; see wrinkle (Any4). Wrinkles: (Any1) `Any` and `ZonkAny` are kind polymorphic since in some program we may need to use `ZonkAny` to fill in a type variable of some kind other than * (see #959 for examples). (Any2) They are /closed/ type families, with no instances. For example, suppose that with alpha :: '(k1, k2) we add a given coercion g :: alpha ~ (Fst alpha, Snd alpha) and we zonked alpha = ZonkAny @(k1,k2) n. Then, if `ZonkAny` was a /data/ type, we'd get inconsistency because we'd have a Given equality with `ZonkAny` on one side and '(,) on the other. See also #9097 and #9636. See #25244 for a suggestion that we instead use an /open/ type family for which you cannot provide instances. Probably the difference is not very important. (Any3) They do not claim to be /data/ types, and that's important for the code generator, because the code gen may /enter/ a data value but never enters a function value. (Any4) `ZonkAny` takes a `Nat` argument so that we can readily make up /distinct/ types (#24817). Consider data SBool a where { STrue :: SBool True; SFalse :: SBool False } foo :: forall a b. (SBool a, SBool b) bar :: Bool bar = case foo @alpha @beta of (STrue, SFalse) -> True -- This branch is not inaccessible! _ -> False Now, what are `alpha` and `beta`? If we zonk both of them to the same type `Any @Type`, the pattern-match checker will (wrongly) report that the first branch is inaccessible. So we zonk them to two /different/ types: alpha := ZonkAny @Type 4 and beta := ZonkAny @Type k 5 (The actual numbers are arbitrary; they just need to differ.) The unique-name generation comes from field `tcg_zany_n` of `TcGblEnv`; and `GHC.Tc.Zonk.Type.commitFlexi` calls `GHC.Tc.Utils.Monad.newZonkAnyType` to make up a fresh type. If this example seems unconvincing (e.g. in this case foo must be bottom) see #24817 for larger but more compelling examples. (Any5) `Any` and `ZonkAny` are wired-in so we can easily refer to it where we don't have a name environment (e.g. see Rules.matchRule for one example) (Any6) `Any` is defined in library module ghc-prim:GHC.Types, and exported so that it is available to users. For this reason it's treated like any other wired-in type: - has a fixed unique, anyTyConKey, - lives in the global name cache Currently `ZonkAny` is not available to users; but it could easily be. (Any7) Properties of `Any`: * When `Any` is instantiated at a lifted type it is inhabited by at least one value, namely bottom. * You can safely coerce any /lifted/ type to `Any` and back with `unsafeCoerce`. * You can safely coerce any /unlifted/ type to `Any` and back with `unsafeCoerceUnlifted`. * You can coerce /any/ type to `Any` and back with `unsafeCoerce#`, but it's only safe when the kinds of both the type and `Any` match. * For lifted/unlifted types `unsafeCoerce[Unlifted]` should be preferred over `unsafeCoerce#` as they prevent accidentally coercing between types with kinds that don't match. See examples in ghc-prim:GHC.Types The Any tycon used to be quite magic, but we have since been able to implement it merely with an empty kind polymorphic type family. See #10886 for a bit of history. -} anyTyConName :: Name anyTyConName = mkWiredInTyConName UserSyntax gHC_TYPES (fsLit "Any") anyTyConKey anyTyCon anyTyCon :: TyCon -- See Note [Any types] anyTyCon = mkFamilyTyCon anyTyConName binders res_kind Nothing (ClosedSynFamilyTyCon Nothing) Nothing NotInjective where binders@[kv] = mkTemplateKindTyConBinders [liftedTypeKind] res_kind = mkTyVarTy (binderVar kv) anyTy :: Type anyTy = mkTyConTy anyTyCon anyTypeOfKind :: Kind -> Type anyTypeOfKind kind = mkTyConApp anyTyCon [kind] zonkAnyTyConName :: Name zonkAnyTyConName = mkWiredInTyConName UserSyntax gHC_TYPES (fsLit "ZonkAny") zonkAnyTyConKey zonkAnyTyCon zonkAnyTyCon :: TyCon -- ZonkAnyTyCon :: forall k. Nat -> k -- See Note [Any types] zonkAnyTyCon = mkFamilyTyCon zonkAnyTyConName [ mkNamedTyConBinder Specified kv , mkAnonTyConBinder nat_kv ] (mkTyVarTy kv) Nothing (ClosedSynFamilyTyCon Nothing) Nothing NotInjective where [kv,nat_kv] = mkTemplateKindVars [liftedTypeKind, naturalTy] -- | Make a fake, recovery 'TyCon' from an existing one. -- Used when recovering from errors in type declarations makeRecoveryTyCon :: TyCon -> TyCon makeRecoveryTyCon tc = mkTcTyCon (tyConName tc) bndrs res_kind noTcTyConScopedTyVars True -- Fully generalised flavour -- Keep old flavour where flavour = tyConFlavour tc [kv] = mkTemplateKindVars [liftedTypeKind] (bndrs, res_kind) = case flavour of PromotedDataConFlavour -> ([mkNamedTyConBinder Inferred kv], mkTyVarTy kv) _ -> (tyConBinders tc, tyConResKind tc) -- For data types we have already validated their kind, so it -- makes sense to keep it. For promoted data constructors we haven't, -- so we recover with kind (forall k. k). Otherwise consider -- data T a where { MkT :: Show a => T a } -- If T is for some reason invalid, we don't want to fall over -- at (promoted) use-sites of MkT. -- Kinds typeSymbolKindConName :: Name typeSymbolKindConName = mkWiredInTyConName UserSyntax gHC_TYPES (fsLit "Symbol") typeSymbolKindConNameKey typeSymbolKindCon boolTyCon_RDR, false_RDR, true_RDR, intTyCon_RDR, charTyCon_RDR, stringTyCon_RDR, intDataCon_RDR, listTyCon_RDR, consDataCon_RDR :: RdrName boolTyCon_RDR = nameRdrName boolTyConName false_RDR = nameRdrName falseDataConName true_RDR = nameRdrName trueDataConName intTyCon_RDR = nameRdrName intTyConName charTyCon_RDR = nameRdrName charTyConName stringTyCon_RDR = nameRdrName stringTyConName intDataCon_RDR = nameRdrName intDataConName listTyCon_RDR = nameRdrName listTyConName consDataCon_RDR = nameRdrName consDataConName {- ************************************************************************ * * \subsection{mkWiredInTyCon} * * ************************************************************************ -} -- This function assumes that the types it creates have all parameters at -- Representational role, and that there is no kind polymorphism. pcTyCon :: Name -> Maybe CType -> [TyVar] -> [DataCon] -> TyCon pcTyCon name cType tyvars cons = mkAlgTyCon name (mkAnonTyConBinders tyvars) liftedTypeKind (map (const Representational) tyvars) cType [] -- No stupid theta (mkDataTyConRhs cons) (VanillaAlgTyCon (mkPrelTyConRepName name)) False -- Not in GADT syntax pcDataCon :: Name -> [TyVar] -> [Type] -> TyCon -> DataCon pcDataCon n univs tys = pcRepPolyDataCon n univs noConcreteTyVars tys pcRepPolyDataCon :: Name -> [TyVar] -> ConcreteTyVars -> [Type] -> TyCon -> DataCon pcRepPolyDataCon n univs conc_tvs tys = pcDataConWithFixity False n univs [] -- no ex_tvs conc_tvs univs -- the univs are precisely the user-written tyvars [] -- No theta (map linear tys) pcDataConConstraint :: Name -> [TyVar] -> ThetaType -> TyCon -> DataCon -- Used for data constructors whose arguments are all constraints. -- Notably constraint tuples, Eq# etc. pcDataConConstraint n univs theta = pcDataConWithFixity False n univs [] -- No ex_tvs noConcreteTyVars univs -- The univs are precisely the user-written tyvars theta -- All constraint arguments [] -- No value arguments -- Used for RuntimeRep and friends; things with PromDataConInfo pcSpecialDataCon :: Name -> [Type] -> TyCon -> PromDataConInfo -> DataCon pcSpecialDataCon dc_name arg_tys tycon rri = pcDataConWithFixity' False dc_name (dataConWorkerUnique (nameUnique dc_name)) rri [] [] noConcreteTyVars [] [] (map linear arg_tys) tycon pcDataConWithFixity :: Bool -- ^ declared infix? -> Name -- ^ datacon name -> [TyVar] -- ^ univ tyvars -> [TyCoVar] -- ^ ex tycovars -> ConcreteTyVars -- ^ concrete tyvars -> [TyCoVar] -- ^ user-written tycovars -> ThetaType -> [Scaled Type] -- ^ args -> TyCon -> DataCon pcDataConWithFixity infx n = pcDataConWithFixity' infx n (dataConWorkerUnique (nameUnique n)) NoPromInfo -- The Name's unique is the first of two free uniques; -- the first is used for the datacon itself, -- the second is used for the "worker name" -- -- To support this the mkPreludeDataConUnique function "allocates" -- one DataCon unique per pair of Ints. pcDataConWithFixity' :: Bool -> Name -> Unique -> PromDataConInfo -> [TyVar] -> [TyCoVar] -> ConcreteTyVars -> [TyCoVar] -> ThetaType -> [Scaled Type] -> TyCon -> DataCon -- The Name should be in the DataName name space; it's the name -- of the DataCon itself. -- -- IMPORTANT NOTE: -- if you try to wire-in a /GADT/ data constructor you will -- find it hard (we did). You will need wrapper and worker -- Names, a DataConBoxer, DataConRep, EqSpec, etc. -- Try hard not to wire-in GADT data types. You will live -- to regret doing so (we do). pcDataConWithFixity' declared_infix dc_name wrk_key rri tyvars ex_tyvars conc_tyvars user_tyvars theta arg_tys tycon = data_con where tag_map = mkTyConTagMap tycon -- This constructs the constructor Name to ConTag map once per -- constructor, which is quadratic. It's OK here, because it's -- only called for wired in data types that don't have a lot of -- constructors. It's also likely that GHC will lift tag_map, since -- we call pcDataConWithFixity' with static TyCons in the same module. -- See Note [Constructor tag allocation] and #14657 data_con = mkDataCon dc_name declared_infix prom_info (map (const no_bang) arg_tys) [] -- No labelled fields tyvars ex_tyvars conc_tyvars (mkTyVarBinders SpecifiedSpec user_tyvars) [] -- No equality spec theta arg_tys (mkTyConApp tycon (mkTyVarTys tyvars)) rri tycon (lookupNameEnv_NF tag_map dc_name) [] -- No stupid theta (mkDataConWorkId wrk_name data_con) NoDataConRep -- Wired-in types are too simple to need wrappers no_bang = mkHsSrcBang NoSourceText NoSrcUnpack NoSrcStrict wrk_name = mkDataConWorkerName data_con wrk_key prom_info = mkPrelTyConRepName dc_name mkDataConWorkerName :: DataCon -> Unique -> Name mkDataConWorkerName data_con wrk_key = mkWiredInName modu wrk_occ wrk_key (AnId (dataConWorkId data_con)) UserSyntax where modu = assert (isExternalName dc_name) $ nameModule dc_name dc_name = dataConName data_con dc_occ = nameOccName dc_name wrk_occ = mkDataConWorkerOcc dc_occ {- ************************************************************************ * * Symbol * * ************************************************************************ -} typeSymbolKindCon :: TyCon -- data Symbol typeSymbolKindCon = pcTyCon typeSymbolKindConName Nothing [] [] typeSymbolKind :: Kind typeSymbolKind = mkTyConTy typeSymbolKindCon {- ************************************************************************ * * Stuff for dealing with tuples * * ************************************************************************ Note [How tuples work] ~~~~~~~~~~~~~~~~~~~~~~ * There are three families of tuple TyCons and corresponding DataCons, expressed by the type BasicTypes.TupleSort: data TupleSort = BoxedTuple | UnboxedTuple | ConstraintTuple * All three families are AlgTyCons, whose AlgTyConRhs is TupleTyCon * BoxedTuples - A wired-in type - Data type declarations in GHC.Tuple - The data constructors really have an info table * UnboxedTuples - A wired-in type - Data type declarations in GHC.Types but no actual declaration and no info table * ConstraintTuples - A wired-in type. - Declared as classes in GHC.Classes, e.g. class (c1,c2) => CTuple2 c1 c2 - Given constraints: the superclasses automatically become available - Wanted constraints: there is a built-in instance instance (c1,c2) => CTuple2 c1 c2 See GHC.Tc.Instance.Class.matchCTuple - Currently just go up to 64; beyond that you have to use manual nesting - Unlike BoxedTuples and UnboxedTuples, which only wire in type constructors and data constructors, ConstraintTuples also wire in superclass selector functions. For instance, $p1CTuple2 and $p2CTuple2 are the selectors for the binary constraint tuple. - The parenthesis syntax for grouping constraints in contexts is not treated as a constraint tuple. The parser starts with a tuple type, then a postprocessing action extracts the individual constraints as a list and stores them in the context field of types like HsQualTy. * In quite a lot of places things are restricted just to BoxedTuple/UnboxedTuple, and then we used BasicTypes.Boxity to distinguish E.g. tupleTyCon has a Boxity argument * When looking up an OccName in the original-name cache (GHC.Iface.Env.lookupOrigNameCache), we spot the tuple OccName to make sure we get the right wired-in name. This guy can't tell the difference between BoxedTuple and ConstraintTuple (same OccName!), so tuples are not serialised into interface files using OccNames at all. * Serialization to interface files works via the usual mechanism for known-key things: instead of serializing the OccName we just serialize the key. During deserialization we lookup the Name associated with the unique with the logic in GHC.Builtin.Uniques. See Note [Symbol table representation of names] for details. See also Note [Known-key names] in GHC.Builtin.Names. Note [One-tuples] ~~~~~~~~~~~~~~~~~ GHC supports both boxed and unboxed one-tuples: - Unboxed one-tuples are sometimes useful when returning a single value after CPR analysis - A boxed one-tuple is used by GHC.HsToCore.Utils.mkSelectorBinds, when there is just one binder Basically it keeps everything uniform. However the /naming/ of the type/data constructors for one-tuples is a bit odd: 3-tuples: Tuple3 (,,)# 2-tuples: Tuple2 (,)# 1-tuples: ?? 0-tuples: Unit ()# Zero-tuples have used up the logical name. So we use 'Solo' and 'Solo#' for one-tuples. So in ghc-prim:GHC.Tuple we see the declarations: data Unit = () data Solo a = MkSolo a data Tuple2 a b = (a,b) There is no way to write a boxed one-tuple in Haskell using tuple syntax. They can, however, be written using other methods: 1. They can be written directly by importing them from GHC.Tuple. 2. They can be generated by way of Template Haskell or in `deriving` code. There is nothing special about one-tuples in Core; in particular, they have no custom pretty-printing, just using `Solo`. Note that there is *not* a unary constraint tuple, unlike for other forms of tuples. See [Ignore unary constraint tuples] in GHC.Tc.Gen.HsType for more details. See also Note [Flattening one-tuples] in GHC.Core.Make and Note [Don't flatten tuples from HsSyn] in GHC.Core.Make. ----- -- Wrinkle: Make boxed one-tuple names have known keys ----- We make boxed one-tuple names have known keys so that `data Solo a = MkSolo a`, defined in GHC.Tuple, will be used when one-tuples are spliced in through Template Haskell. This program (from #18097) crucially relies on this: case $( tupE [ [| "ok" |] ] ) of MkSolo x -> putStrLn x Unless Solo has a known key, the type of `$( tupE [ [| "ok" |] ] )` (an ExplicitTuple of length 1) will not match the type of Solo (an ordinary data constructor used in a pattern). Making Solo known-key allows GHC to make this connection. Unlike Solo, every other tuple is /not/ known-key (see Note [Infinite families of known-key names] in GHC.Builtin.Names). The main reason for this exception is that other tuples are written with special syntax, and as a result, they are renamed using a special `isBuiltInOcc_maybe` function (see Note [Built-in syntax and the OrigNameCache] in GHC.Types.Name.Cache). In contrast, Solo is just an ordinary data type with no special syntax, so it doesn't really make sense to handle it in `isBuiltInOcc_maybe`. Making Solo known-key is the next-best way to teach the internals of the compiler about it. -} -- | Built-in syntax isn't "in scope" so these OccNames map to wired-in Names -- with BuiltInSyntax. However, this should only be necessary while resolving -- names produced by Template Haskell splices since we take care to encode -- built-in syntax names specially in interface files. See -- Note [Symbol table representation of names] in GHC.Iface.Binary. -- -- Moreover, there is no need to include names of things that the user can't -- write (e.g. type representation bindings like $tc(,,,)). isBuiltInOcc_maybe :: OccName -> Maybe Name isBuiltInOcc_maybe occ = case name of "[]" -> Just $ choose_ns listTyConName nilDataConName ":" -> Just consDataConName -- function tycon "FUN" -> Just fUNTyConName "->" -> Just unrestrictedFunTyConName -- tuple data/tycon -- We deliberately exclude Solo (the boxed 1-tuple). -- See Note [One-tuples] (Wrinkle: Make boxed one-tuple names have known keys) "()" -> Just $ tup_name Boxed 0 _ | Just rest <- "(" `BS.stripPrefix` name , (commas, rest') <- BS.span (==',') rest , ")" <- rest' -> Just $ tup_name Boxed (1+BS.length commas) -- unboxed tuple data/tycon "(##)" -> Just $ tup_name Unboxed 0 "(# #)" -> Just $ tup_name Unboxed 1 _ | Just rest <- "(#" `BS.stripPrefix` name , (commas, rest') <- BS.span (==',') rest , "#)" <- rest' -> Just $ tup_name Unboxed (1+BS.length commas) -- unboxed sum tycon _ | Just rest <- "(#" `BS.stripPrefix` name , (nb_pipes, rest') <- span_pipes rest , "#)" <- rest' -> Just $ tyConName $ sumTyCon (1+nb_pipes) -- unboxed sum datacon _ | Just rest <- "(#" `BS.stripPrefix` name , (nb_pipes1, rest') <- span_pipes rest , Just rest'' <- "_" `BS.stripPrefix` rest' , (nb_pipes2, rest''') <- span_pipes rest'' , "#)" <- rest''' -> let arity = nb_pipes1 + nb_pipes2 + 1 alt = nb_pipes1 + 1 in Just $ dataConName $ sumDataCon alt arity _ -> Nothing where name = bytesFS $ occNameFS occ span_pipes :: BS.ByteString -> (Int, BS.ByteString) span_pipes = go 0 where go nb_pipes bs = case BS.uncons bs of Just ('|',rest) -> go (nb_pipes + 1) rest Just (' ',rest) -> go nb_pipes rest _ -> (nb_pipes, bs) choose_ns :: Name -> Name -> Name choose_ns tc dc | isTcClsNameSpace ns = tc | isDataConNameSpace ns = dc | otherwise = pprPanic "tup_name" (ppr occ <+> parens (pprNameSpace ns)) where ns = occNameSpace occ tup_name boxity arity = choose_ns (getName (tupleTyCon boxity arity)) (getName (tupleDataCon boxity arity)) isTupleTyOcc_maybe :: Module -> OccName -> Maybe Name isTupleTyOcc_maybe mod occ | mod == gHC_INTERNAL_TUPLE || mod == gHC_TYPES = match_occ where match_occ | occ == occName unitTyConName = Just unitTyConName | occ == occName soloTyConName = Just soloTyConName | occ == occName unboxedUnitTyConName = Just unboxedUnitTyConName | occ == occName unboxedSoloTyConName = Just unboxedSoloTyConName | otherwise = isTupleNTyOcc_maybe occ isTupleTyOcc_maybe _ _ = Nothing isCTupleOcc_maybe :: Module -> OccName -> Maybe Name isCTupleOcc_maybe mod occ | mod == gHC_CLASSES = match_occ where match_occ | occ == occName (cTupleTyConName 0) = Just (cTupleTyConName 0) | occ == occName (cTupleTyConName 1) = Just (cTupleTyConName 1) | 'C':'T':'u':'p':'l':'e' : rest <- occNameString occ , Just (BoxedTuple, num) <- arity_and_boxity rest , num >= 2 && num <= 64 = Just $ cTupleTyConName num | otherwise = Nothing isCTupleOcc_maybe _ _ = Nothing -- | This is only for Tuple, not for Unit or Solo isTupleNTyOcc_maybe :: OccName -> Maybe Name isTupleNTyOcc_maybe occ = case occNameString occ of 'T':'u':'p':'l':'e':str | Just (sort, n) <- arity_and_boxity str, n > 1 -> Just (tupleTyConName sort n) _ -> Nothing isSumTyOcc_maybe :: Module -> OccName -> Maybe Name isSumTyOcc_maybe mod occ | mod == gHC_TYPES = isSumNTyOcc_maybe occ isSumTyOcc_maybe _ _ = Nothing isSumNTyOcc_maybe :: OccName -> Maybe Name isSumNTyOcc_maybe occ = case occNameString occ of 'S':'u':'m':str | Just (UnboxedTuple, n) <- arity_and_boxity str, n > 1 -> Just (tyConName (sumTyCon n)) _ -> Nothing -- | See Note [Small Ints parsing] -- -- Analyze a string as the suffix of an OccName of a tuple or sum tycon to -- determine its arity and boxity (based on the presence of a @#@). arity_and_boxity :: String -> Maybe (TupleSort, Int) arity_and_boxity s = case s of c1 : t1 | isDigit c1 -> case t1 of [] -> Just (BoxedTuple, digit_to_int c1) ['#'] -> Just (UnboxedTuple, digit_to_int c1) c2 : t2 | isDigit c2 -> let ar = digit_to_int c1 * 10 + digit_to_int c2 in case t2 of [] -> Just (BoxedTuple, ar) ['#'] -> Just (UnboxedTuple, ar) _ -> Nothing _ -> Nothing _ -> Nothing where digit_to_int :: Char -> Int digit_to_int c = ord c - ord '0' {- Note [Small Ints parsing] ~~~~~~~~~~~~~~~~~~~~~~~~~ Currently, tuples in Haskell have a maximum arity of 64. To parse strings of length 1 and 2 more efficiently, we can utilize an ad-hoc solution that matches their characters. This results in a speedup of up to 40 times compared to using `readMaybe @Int` on my machine. -} -- When resolving names produced by Template Haskell (see thOrigRdrName -- in GHC.ThToHs), we want ghc-prim:GHC.Types.List to yield an Exact name, not -- an Orig name. -- -- This matters for pretty-printing under ListTuplePuns. If we don't do it, -- then -ddump-splices will print ''[] as ''GHC.Types.List. -- -- Test case: th/T13776 -- isPunOcc_maybe :: Module -> OccName -> Maybe Name isPunOcc_maybe mod occ | mod == gHC_TYPES, occ == occName listTyConName = Just listTyConName | mod == gHC_TYPES, occ == occName unboxedSoloDataConName = Just unboxedSoloDataConName | otherwise = isTupleTyOcc_maybe mod occ <|> isCTupleOcc_maybe mod occ <|> isSumTyOcc_maybe mod occ mkTupleOcc :: NameSpace -> Boxity -> Arity -> OccName -- No need to cache these, the caching is done in mk_tuple mkTupleOcc ns Boxed ar = mkOccName ns (mkBoxedTupleStr ns ar) mkTupleOcc ns Unboxed ar = mkOccName ns (mkUnboxedTupleStr ns ar) mkCTupleOcc :: NameSpace -> Arity -> OccName mkCTupleOcc ns ar = mkOccName ns (mkConstraintTupleStr ar) mkTupleStr :: Boxity -> NameSpace -> Arity -> String mkTupleStr Boxed = mkBoxedTupleStr mkTupleStr Unboxed = mkUnboxedTupleStr mkBoxedTupleStr :: NameSpace -> Arity -> String mkBoxedTupleStr ns 0 | isDataConNameSpace ns = "()" | otherwise = "Unit" mkBoxedTupleStr ns 1 | isDataConNameSpace ns = "MkSolo" -- See Note [One-tuples] | otherwise = "Solo" mkBoxedTupleStr ns ar | isDataConNameSpace ns = '(' : commas ar ++ ")" | otherwise = "Tuple" ++ showInt ar "" mkUnboxedTupleStr :: NameSpace -> Arity -> String mkUnboxedTupleStr ns 0 | isDataConNameSpace ns = "(##)" | otherwise = "Unit#" mkUnboxedTupleStr ns 1 | isDataConNameSpace ns = "MkSolo#" -- See Note [One-tuples] | otherwise = "Solo#" mkUnboxedTupleStr ns ar | isDataConNameSpace ns = "(#" ++ commas ar ++ "#)" | otherwise = "Tuple" ++ show ar ++ "#" mkConstraintTupleStr :: Arity -> String mkConstraintTupleStr 0 = "CUnit" mkConstraintTupleStr 1 = "CSolo" mkConstraintTupleStr ar = "CTuple" ++ show ar commas :: Arity -> String commas ar = replicate (ar-1) ',' cTupleTyCon :: Arity -> TyCon cTupleTyCon i | i > mAX_CTUPLE_SIZE = fstOf3 (mk_ctuple i) -- Build one specially | otherwise = fstOf3 (cTupleArr ! i) cTupleTyConName :: Arity -> Name cTupleTyConName a = tyConName (cTupleTyCon a) cTupleTyConNames :: [Name] cTupleTyConNames = map cTupleTyConName (0 : [2..mAX_CTUPLE_SIZE]) cTupleTyConKeys :: UniqueSet cTupleTyConKeys = fromListUniqueSet $ map getUnique cTupleTyConNames isCTupleTyConName :: Name -> Bool isCTupleTyConName n = assertPpr (isExternalName n) (ppr n) $ getUnique n `memberUniqueSet` cTupleTyConKeys -- | If the given name is that of a constraint tuple, return its arity. cTupleTyConNameArity_maybe :: Name -> Maybe Arity cTupleTyConNameArity_maybe n | not (isCTupleTyConName n) = Nothing | otherwise = fmap adjustArity (n `elemIndex` cTupleTyConNames) where -- Since `cTupleTyConNames` jumps straight from the `0` to the `2` -- case, we have to adjust accordingly our calculated arity. adjustArity a = if a > 0 then a + 1 else a cTupleDataCon :: Arity -> DataCon cTupleDataCon i | i > mAX_CTUPLE_SIZE = sndOf3 (mk_ctuple i) -- Build one specially | otherwise = sndOf3 (cTupleArr ! i) cTupleDataConName :: Arity -> Name cTupleDataConName i = dataConName (cTupleDataCon i) cTupleDataConNames :: [Name] cTupleDataConNames = map cTupleDataConName (0 : [2..mAX_CTUPLE_SIZE]) cTupleSelId :: ConTag -- Superclass position -> Arity -- Arity -> Id cTupleSelId sc_pos arity | sc_pos > arity = panic ("cTupleSelId: index out of bounds: superclass position: " ++ show sc_pos ++ " > arity " ++ show arity) | sc_pos <= 0 = panic ("cTupleSelId: Superclass positions start from 1. " ++ "(superclass position: " ++ show sc_pos ++ ", arity: " ++ show arity ++ ")") | arity < 1 = panic ("cTupleSelId: Arity starts from 1. " ++ "(superclass position: " ++ show sc_pos ++ ", arity: " ++ show arity ++ ")") | arity > mAX_CTUPLE_SIZE = thdOf3 (mk_ctuple arity) ! (sc_pos - 1) -- Build one specially | otherwise = thdOf3 (cTupleArr ! arity) ! (sc_pos - 1) cTupleSelIdName :: ConTag -- Superclass position -> Arity -- Arity -> Name cTupleSelIdName sc_pos arity = idName (cTupleSelId sc_pos arity) tupleTyCon :: Boxity -> Arity -> TyCon tupleTyCon sort i | i > mAX_TUPLE_SIZE = fst (mk_tuple sort i) -- Build one specially tupleTyCon Boxed i = fst (boxedTupleArr ! i) tupleTyCon Unboxed i = fst (unboxedTupleArr ! i) tupleTyConName :: TupleSort -> Arity -> Name tupleTyConName ConstraintTuple a = cTupleTyConName a tupleTyConName BoxedTuple a = tyConName (tupleTyCon Boxed a) tupleTyConName UnboxedTuple a = tyConName (tupleTyCon Unboxed a) promotedTupleDataCon :: Boxity -> Arity -> TyCon promotedTupleDataCon boxity i = promoteDataCon (tupleDataCon boxity i) tupleDataCon :: Boxity -> Arity -> DataCon tupleDataCon sort i | i > mAX_TUPLE_SIZE = snd (mk_tuple sort i) -- Build one specially tupleDataCon Boxed i = snd (boxedTupleArr ! i) tupleDataCon Unboxed i = snd (unboxedTupleArr ! i) tupleDataConName :: Boxity -> Arity -> Name tupleDataConName sort i = dataConName (tupleDataCon sort i) mkPromotedPairTy :: Kind -> Kind -> Type -> Type -> Type mkPromotedPairTy k1 k2 t1 t2 = mkTyConApp (promotedTupleDataCon Boxed 2) [k1,k2,t1,t2] isPromotedPairType :: Type -> Maybe (Type, Type) isPromotedPairType t | Just (tc, [_,_,x,y]) <- splitTyConApp_maybe t , tc == promotedTupleDataCon Boxed 2 = Just (x, y) | otherwise = Nothing boxedTupleArr, unboxedTupleArr :: Array Int (TyCon,DataCon) boxedTupleArr = listArray (0,mAX_TUPLE_SIZE) [mk_tuple Boxed i | i <- [0..mAX_TUPLE_SIZE]] unboxedTupleArr = listArray (0,mAX_TUPLE_SIZE) [mk_tuple Unboxed i | i <- [0..mAX_TUPLE_SIZE]] -- | Cached type constructors, data constructors, and superclass selectors for -- constraint tuples. The outer array is indexed by the arity of the constraint -- tuple and the inner array is indexed by the superclass position. cTupleArr :: Array Int (TyCon, DataCon, Array Int Id) cTupleArr = listArray (0,mAX_CTUPLE_SIZE) [mk_ctuple i | i <- [0..mAX_CTUPLE_SIZE]] -- Although GHC does not make use of unary constraint tuples -- (see Note [Ignore unary constraint tuples] in GHC.Tc.Gen.HsType), -- this array creates one anyway. This is primarily motivated by the fact -- that (1) the indices of an Array must be contiguous, and (2) we would like -- the index of a constraint tuple in this Array to correspond to its Arity. -- We could envision skipping over the unary constraint tuple and having index -- 1 correspond to a 2-constraint tuple (and so on), but that's more -- complicated than it's worth. -- | Given the TupleRep/SumRep tycon and list of RuntimeReps of the unboxed -- tuple/sum arguments, produces the return kind of an unboxed tuple/sum type -- constructor. @unboxedTupleSumKind [IntRep, LiftedRep] --> TYPE (TupleRep/SumRep -- [IntRep, LiftedRep])@ unboxedTupleSumKind :: TyCon -> [Type] -> Kind unboxedTupleSumKind tc rr_tys = mkTYPEapp (mkTyConApp tc [mkPromotedListTy runtimeRepTy rr_tys]) -- | Specialization of 'unboxedTupleSumKind' for tuples unboxedTupleKind :: [Type] -> Kind unboxedTupleKind = unboxedTupleSumKind tupleRepDataConTyCon mk_tuple :: Boxity -> Int -> (TyCon,DataCon) mk_tuple Boxed arity = (tycon, tuple_con) where tycon = mkTupleTyCon tc_name tc_binders tc_res_kind tuple_con BoxedTuple flavour tc_binders = mkTemplateAnonTyConBinders (replicate arity liftedTypeKind) tc_res_kind = liftedTypeKind flavour = VanillaAlgTyCon (mkPrelTyConRepName tc_name) dc_tvs = binderVars tc_binders dc_arg_tys = mkTyVarTys dc_tvs tuple_con = pcDataCon dc_name dc_tvs dc_arg_tys tycon boxity = Boxed modu = gHC_INTERNAL_TUPLE tc_name = mkWiredInName modu (mkTupleOcc tcName boxity arity) tc_uniq (ATyCon tycon) UserSyntax dc_name = mkWiredInName modu (mkTupleOcc dataName boxity arity) dc_uniq (AConLike (RealDataCon tuple_con)) BuiltInSyntax tc_uniq = mkTupleTyConUnique boxity arity dc_uniq = mkTupleDataConUnique boxity arity mk_tuple Unboxed arity = (tycon, tuple_con) where tycon = mkTupleTyCon tc_name tc_binders tc_res_kind tuple_con UnboxedTuple flavour -- See Note [Unboxed tuple RuntimeRep vars] in GHC.Core.TyCon -- Kind: forall (k1:RuntimeRep) (k2:RuntimeRep). TYPE k1 -> TYPE k2 -> TYPE (TupleRep [k1, k2]) tc_binders = mkTemplateTyConBinders (replicate arity runtimeRepTy) (\ks -> map mkTYPEapp ks) tc_res_kind = unboxedTupleKind rr_tys flavour = VanillaAlgTyCon (mkPrelTyConRepName tc_name) dc_tvs = binderVars tc_binders (rr_tvs, dc_arg_tvs) = splitAt arity dc_tvs rr_tys = mkTyVarTys rr_tvs dc_arg_tys = mkTyVarTys dc_arg_tvs tuple_con = pcRepPolyDataCon dc_name dc_tvs conc_tvs dc_arg_tys tycon conc_tvs = mkNameEnv [ (tyVarName rr_tv, ConcreteFRR $ FixedRuntimeRepOrigin ty $ mkFRRUnboxedTuple pos) | rr_tv <- rr_tvs | ty <- dc_arg_tys | pos <- [1..arity] ] boxity = Unboxed modu = gHC_TYPES tc_name = mkWiredInName modu (mkTupleOcc tcName boxity arity) tc_uniq (ATyCon tycon) UserSyntax dc_name = mkWiredInName modu (mkTupleOcc dataName boxity arity) dc_uniq (AConLike (RealDataCon tuple_con)) BuiltInSyntax tc_uniq = mkTupleTyConUnique boxity arity dc_uniq = mkTupleDataConUnique boxity arity mk_ctuple :: Arity -> (TyCon, DataCon, Array ConTagZ Id) mk_ctuple arity = (tycon, tuple_con, sc_sel_ids_arr) where tycon = mkClassTyCon tc_name binders roles rhs klass (mkPrelTyConRepName tc_name) klass = mk_ctuple_class tycon sc_theta sc_sel_ids tuple_con = pcDataConConstraint dc_name tvs sc_theta tycon binders = mkTemplateAnonTyConBinders (replicate arity constraintKind) roles = replicate arity Nominal rhs = TupleTyCon{data_con = tuple_con, tup_sort = ConstraintTuple} modu = gHC_CLASSES tc_name = mkWiredInName modu (mkCTupleOcc tcName arity) tc_uniq (ATyCon tycon) UserSyntax dc_name = mkWiredInName modu (mkCTupleOcc dataName arity) dc_uniq (AConLike (RealDataCon tuple_con)) BuiltInSyntax tc_uniq = mkCTupleTyConUnique arity dc_uniq = mkCTupleDataConUnique arity tvs = binderVars binders sc_theta = map mkTyVarTy tvs sc_sel_ids = [mk_sc_sel_id sc_pos | sc_pos <- [0..arity-1]] sc_sel_ids_arr = listArray (0,arity-1) sc_sel_ids mk_sc_sel_id sc_pos = let sc_sel_id_uniq = mkCTupleSelIdUnique sc_pos arity sc_sel_id_occ = mkCTupleOcc tcName arity sc_sel_id_name = mkWiredInIdName gHC_CLASSES (occNameFS (mkSuperDictSelOcc sc_pos sc_sel_id_occ)) sc_sel_id_uniq sc_sel_id sc_sel_id = mkDictSelId sc_sel_id_name klass in sc_sel_id unitTyCon :: TyCon unitTyCon = tupleTyCon Boxed 0 unitTyConName :: Name unitTyConName = tyConName unitTyCon unitTyConKey :: Unique unitTyConKey = getUnique unitTyCon unitDataCon :: DataCon unitDataCon = head (tyConDataCons unitTyCon) unitDataConId :: Id unitDataConId = dataConWorkId unitDataCon soloTyCon :: TyCon soloTyCon = tupleTyCon Boxed 1 soloTyConName :: Name soloTyConName = tyConName soloTyCon pairTyCon :: TyCon pairTyCon = tupleTyCon Boxed 2 unboxedUnitTy :: Type unboxedUnitTy = mkTyConTy unboxedUnitTyCon unboxedUnitTyCon :: TyCon unboxedUnitTyCon = tupleTyCon Unboxed 0 unboxedUnitTyConName :: Name unboxedUnitTyConName = tyConName unboxedUnitTyCon unboxedUnitDataCon :: DataCon unboxedUnitDataCon = tupleDataCon Unboxed 0 unboxedSoloTyCon :: TyCon unboxedSoloTyCon = tupleTyCon Unboxed 1 unboxedSoloTyConName :: Name unboxedSoloTyConName = tyConName unboxedSoloTyCon unboxedSoloDataConName :: Name unboxedSoloDataConName = tupleDataConName Unboxed 1 {- ********************************************************************* * * Unboxed sums * * ********************************************************************* -} -- | OccName for n-ary unboxed sum type constructor. mkSumTyConOcc :: Arity -> OccName mkSumTyConOcc n = mkOccName tcName str where -- No need to cache these, the caching is done in mk_sum str = "Sum" ++ show n ++ "#" -- | OccName for i-th alternative of n-ary unboxed sum data constructor. mkSumDataConOcc :: ConTag -> Arity -> OccName mkSumDataConOcc alt n = mkOccName dataName str where -- No need to cache these, the caching is done in mk_sum str = '(' : '#' : ' ' : bars alt ++ '_' : bars (n - alt - 1) ++ " #)" bars i = intersperse ' ' $ replicate i '|' -- | Type constructor for n-ary unboxed sum. sumTyCon :: Arity -> TyCon sumTyCon arity | arity > mAX_SUM_SIZE = fst (mk_sum arity) -- Build one specially | arity < 2 = panic ("sumTyCon: Arity starts from 2. (arity: " ++ show arity ++ ")") | otherwise = fst (unboxedSumArr ! arity) -- | Data constructor for i-th alternative of a n-ary unboxed sum. sumDataCon :: ConTag -- Alternative -> Arity -- Arity -> DataCon sumDataCon alt arity | alt > arity = panic ("sumDataCon: index out of bounds: alt: " ++ show alt ++ " > arity " ++ show arity) | alt <= 0 = panic ("sumDataCon: Alts start from 1. (alt: " ++ show alt ++ ", arity: " ++ show arity ++ ")") | arity < 2 = panic ("sumDataCon: Arity starts from 2. (alt: " ++ show alt ++ ", arity: " ++ show arity ++ ")") | arity > mAX_SUM_SIZE = snd (mk_sum arity) ! (alt - 1) -- Build one specially | otherwise = snd (unboxedSumArr ! arity) ! (alt - 1) -- | Cached type and data constructors for sums. The outer array is -- indexed by the arity of the sum and the inner array is indexed by -- the alternative. unboxedSumArr :: Array Int (TyCon, Array Int DataCon) unboxedSumArr = listArray (2,mAX_SUM_SIZE) [mk_sum i | i <- [2..mAX_SUM_SIZE]] -- | Specialization of 'unboxedTupleSumKind' for sums unboxedSumKind :: [Type] -> Kind unboxedSumKind = unboxedTupleSumKind sumRepDataConTyCon -- | Create type constructor and data constructors for n-ary unboxed sum. mk_sum :: Arity -> (TyCon, Array ConTagZ DataCon) mk_sum arity = (tycon, sum_cons) where tycon = mkSumTyCon tc_name tc_binders tc_res_kind (elems sum_cons) UnboxedSumTyCon tc_binders = mkTemplateTyConBinders (replicate arity runtimeRepTy) (\ks -> map mkTYPEapp ks) tyvars = binderVars tc_binders tc_res_kind = unboxedSumKind rr_tys (rr_tvs, dc_arg_tvs) = splitAt arity tyvars rr_tys = mkTyVarTys rr_tvs dc_arg_tys = mkTyVarTys dc_arg_tvs conc_tvs = mkNameEnv [ (tyVarName rr_tv, ConcreteFRR $ FixedRuntimeRepOrigin ty $ mkFRRUnboxedSum (Just pos)) | rr_tv <- rr_tvs | ty <- dc_arg_tys | pos <- [1..arity] ] tc_name = mkWiredInName gHC_TYPES (mkSumTyConOcc arity) tc_uniq (ATyCon tycon) UserSyntax sum_cons = listArray (0,arity-1) [sum_con i | i <- [0..arity-1]] sum_con i = let dc = pcRepPolyDataCon dc_name tyvars -- univ tyvars conc_tvs [dc_arg_tys !! i] -- arg types tycon dc_name = mkWiredInName gHC_TYPES (mkSumDataConOcc i arity) (dc_uniq i) (AConLike (RealDataCon dc)) BuiltInSyntax in dc tc_uniq = mkSumTyConUnique arity dc_uniq i = mkSumDataConUnique i arity {- ************************************************************************ * * Equality types and classes * * ********************************************************************* -} -- See Note [The equality types story] in GHC.Builtin.Types.Prim -- ((~~) :: forall k1 k2 (a :: k1) (b :: k2). a -> b -> Constraint) -- -- It's tempting to put functional dependencies on (~~), but it's not -- necessary because the functional-dependency coverage check looks -- through superclasses, and (~#) is handled in that check. eqTyCon, heqTyCon, coercibleTyCon :: TyCon eqClass, heqClass, coercibleClass :: Class eqDataCon, heqDataCon, coercibleDataCon :: DataCon eqSCSelId, heqSCSelId, coercibleSCSelId :: Id (eqTyCon, eqClass, eqDataCon, eqSCSelId) = (tycon, klass, datacon, sc_sel_id) where tycon = mkClassTyCon eqTyConName binders roles rhs klass (mkPrelTyConRepName eqTyConName) klass = mk_class tycon sc_pred sc_sel_id datacon = pcDataConConstraint eqDataConName tvs [sc_pred] tycon -- Kind: forall k. k -> k -> Constraint binders = mkTemplateTyConBinders [liftedTypeKind] (\[k] -> [k,k]) roles = [Nominal, Nominal, Nominal] rhs = mkDataTyConRhs [datacon] tvs@[k,a,b] = binderVars binders sc_pred = mkTyConApp eqPrimTyCon (mkTyVarTys [k,k,a,b]) sc_sel_id = mkDictSelId eqSCSelIdName klass (heqTyCon, heqClass, heqDataCon, heqSCSelId) = (tycon, klass, datacon, sc_sel_id) where tycon = mkClassTyCon heqTyConName binders roles rhs klass (mkPrelTyConRepName heqTyConName) klass = mk_class tycon sc_pred sc_sel_id datacon = pcDataConConstraint heqDataConName tvs [sc_pred] tycon -- Kind: forall k1 k2. k1 -> k2 -> Constraint binders = mkTemplateTyConBinders [liftedTypeKind, liftedTypeKind] id roles = [Nominal, Nominal, Nominal, Nominal] rhs = mkDataTyConRhs [datacon] tvs = binderVars binders sc_pred = mkTyConApp eqPrimTyCon (mkTyVarTys tvs) sc_sel_id = mkDictSelId heqSCSelIdName klass (coercibleTyCon, coercibleClass, coercibleDataCon, coercibleSCSelId) = (tycon, klass, datacon, sc_sel_id) where tycon = mkClassTyCon coercibleTyConName binders roles rhs klass (mkPrelTyConRepName coercibleTyConName) klass = mk_class tycon sc_pred sc_sel_id datacon = pcDataConConstraint coercibleDataConName tvs [sc_pred] tycon -- Kind: forall k. k -> k -> Constraint binders = mkTemplateTyConBinders [liftedTypeKind] (\[k] -> [k,k]) roles = [Nominal, Representational, Representational] rhs = mkDataTyConRhs [datacon] tvs@[k,a,b] = binderVars binders sc_pred = mkTyConApp eqReprPrimTyCon (mkTyVarTys [k, k, a, b]) sc_sel_id = mkDictSelId coercibleSCSelIdName klass mk_class :: TyCon -> PredType -> Id -> Class mk_class tycon sc_pred sc_sel_id = mkClass (tyConName tycon) (tyConTyVars tycon) [] [sc_pred] [sc_sel_id] [] [] (mkAnd []) tycon mk_ctuple_class :: TyCon -> ThetaType -> [Id] -> Class mk_ctuple_class tycon sc_theta sc_sel_ids = mkClass (tyConName tycon) (tyConTyVars tycon) [] sc_theta sc_sel_ids [] [] (mkAnd []) tycon {- ********************************************************************* * * Multiplicity Polymorphism * * ********************************************************************* -} {- Multiplicity polymorphism is implemented very similarly to representation polymorphism. We write in the multiplicity kind and the One and Many types which can appear in user programs. These are defined properly in GHC.Types. data Multiplicity = One | Many -} multiplicityTyConName :: Name multiplicityTyConName = mkWiredInTyConName UserSyntax gHC_TYPES (fsLit "Multiplicity") multiplicityTyConKey multiplicityTyCon oneDataConName, manyDataConName :: Name oneDataConName = mkWiredInDataConName UserSyntax gHC_TYPES (fsLit "One") oneDataConKey oneDataCon manyDataConName = mkWiredInDataConName UserSyntax gHC_TYPES (fsLit "Many") manyDataConKey manyDataCon multiplicityTy :: Type multiplicityTy = mkTyConTy multiplicityTyCon multiplicityTyCon :: TyCon multiplicityTyCon = pcTyCon multiplicityTyConName Nothing [] [oneDataCon, manyDataCon] oneDataCon, manyDataCon :: DataCon oneDataCon = pcDataCon oneDataConName [] [] multiplicityTyCon manyDataCon = pcDataCon manyDataConName [] [] multiplicityTyCon oneDataConTy, manyDataConTy :: Type oneDataConTy = mkTyConTy oneDataConTyCon manyDataConTy = mkTyConTy manyDataConTyCon oneDataConTyCon, manyDataConTyCon :: TyCon oneDataConTyCon = promoteDataCon oneDataCon manyDataConTyCon = promoteDataCon manyDataCon multMulTyConName :: Name multMulTyConName = mkWiredInTyConName UserSyntax gHC_TYPES (fsLit "MultMul") multMulTyConKey multMulTyCon multMulTyCon :: TyCon multMulTyCon = mkFamilyTyCon multMulTyConName binders multiplicityTy Nothing (BuiltInSynFamTyCon trivialBuiltInFamily) Nothing NotInjective where binders = mkTemplateAnonTyConBinders [multiplicityTy, multiplicityTy] ------------------------ -- type (->) :: forall (rep1 :: RuntimeRep) (rep2 :: RuntimeRep). -- TYPE rep1 -> TYPE rep2 -> Type -- type (->) = FUN 'Many unrestrictedFunTyCon :: TyCon unrestrictedFunTyCon = buildSynTyCon unrestrictedFunTyConName [] arrowKind [] (TyCoRep.TyConApp fUNTyCon [manyDataConTy]) where arrowKind = mkTyConKind binders liftedTypeKind -- See also funTyCon binders = [ Bndr runtimeRep1TyVar (NamedTCB Inferred) , Bndr runtimeRep2TyVar (NamedTCB Inferred) ] ++ mkTemplateAnonTyConBinders [ mkTYPEapp runtimeRep1Ty , mkTYPEapp runtimeRep2Ty ] unrestrictedFunTyConName :: Name unrestrictedFunTyConName = mkWiredInTyConName BuiltInSyntax gHC_TYPES (fsLit "->") unrestrictedFunTyConKey unrestrictedFunTyCon {- ********************************************************************* * * Type synonyms (all declared in ghc-prim:GHC.Types) type CONSTRAINT :: RuntimeRep -> Type -- primitive; cONSTRAINTKind type Constraint = CONSTRAINT LiftedRep :: Type -- constraintKind type TYPE :: RuntimeRep -> Type -- primitive; tYPEKind type Type = TYPE LiftedRep :: Type -- liftedTypeKind type UnliftedType = TYPE UnliftedRep :: Type -- unliftedTypeKind type LiftedRep = BoxedRep Lifted :: RuntimeRep -- liftedRepTy type UnliftedRep = BoxedRep Unlifted :: RuntimeRep -- unliftedRepTy * * ********************************************************************* -} -- For these synonyms, see -- Note [TYPE and CONSTRAINT] in GHC.Builtin.Types.Prim, and -- Note [Using synonyms to compress types] in GHC.Core.Type {- Note [Naked FunTy] ~~~~~~~~~~~~~~~~~~~~~ GHC.Core.TyCo.Rep.mkFunTy has assertions about the consistency of the argument flag and arg/res types. But when constructing the kinds of tYPETyCon and cONSTRAINTTyCon we don't want to make these checks because TYPE :: RuntimeRep -> Type i.e. TYPE :: RuntimeRep -> TYPE LiftedRep so the check will loop infinitely. Hence the use of a naked FunTy constructor in tTYPETyCon and cONSTRAINTTyCon. -} ---------------------- -- type Constraint = CONSTRAINT LiftedRep constraintKindTyCon :: TyCon constraintKindTyCon = buildSynTyCon constraintKindTyConName [] liftedTypeKind [] rhs where rhs = TyCoRep.TyConApp cONSTRAINTTyCon [liftedRepTy] constraintKindTyConName :: Name constraintKindTyConName = mkWiredInTyConName UserSyntax gHC_TYPES (fsLit "Constraint") constraintKindTyConKey constraintKindTyCon constraintKind :: Kind constraintKind = mkTyConTy constraintKindTyCon ---------------------- -- type Type = TYPE LiftedRep liftedTypeKindTyCon :: TyCon liftedTypeKindTyCon = buildSynTyCon liftedTypeKindTyConName [] liftedTypeKind [] rhs where rhs = TyCoRep.TyConApp tYPETyCon [liftedRepTy] liftedTypeKindTyConName :: Name liftedTypeKindTyConName = mkWiredInTyConName UserSyntax gHC_TYPES (fsLit "Type") liftedTypeKindTyConKey liftedTypeKindTyCon liftedTypeKind, typeToTypeKind :: Type liftedTypeKind = mkTyConTy liftedTypeKindTyCon typeToTypeKind = liftedTypeKind `mkVisFunTyMany` liftedTypeKind ---------------------- -- type UnliftedType = TYPE ('BoxedRep 'Unlifted) unliftedTypeKindTyCon :: TyCon unliftedTypeKindTyCon = buildSynTyCon unliftedTypeKindTyConName [] liftedTypeKind [] rhs where rhs = TyCoRep.TyConApp tYPETyCon [unliftedRepTy] unliftedTypeKindTyConName :: Name unliftedTypeKindTyConName = mkWiredInTyConName UserSyntax gHC_TYPES (fsLit "UnliftedType") unliftedTypeKindTyConKey unliftedTypeKindTyCon unliftedTypeKind :: Type unliftedTypeKind = mkTyConTy unliftedTypeKindTyCon {- ********************************************************************* * * data Levity = Lifted | Unlifted * * ********************************************************************* -} levityTyConName, liftedDataConName, unliftedDataConName :: Name levityTyConName = mkWiredInTyConName UserSyntax gHC_TYPES (fsLit "Levity") levityTyConKey levityTyCon liftedDataConName = mkWiredInDataConName UserSyntax gHC_TYPES (fsLit "Lifted") liftedDataConKey liftedDataCon unliftedDataConName = mkWiredInDataConName UserSyntax gHC_TYPES (fsLit "Unlifted") unliftedDataConKey unliftedDataCon levityTyCon :: TyCon levityTyCon = pcTyCon levityTyConName Nothing [] [liftedDataCon,unliftedDataCon] levityTy :: Type levityTy = mkTyConTy levityTyCon liftedDataCon, unliftedDataCon :: DataCon liftedDataCon = pcSpecialDataCon liftedDataConName [] levityTyCon (Levity Lifted) unliftedDataCon = pcSpecialDataCon unliftedDataConName [] levityTyCon (Levity Unlifted) liftedDataConTyCon :: TyCon liftedDataConTyCon = promoteDataCon liftedDataCon unliftedDataConTyCon :: TyCon unliftedDataConTyCon = promoteDataCon unliftedDataCon liftedDataConTy :: Type liftedDataConTy = mkTyConTy liftedDataConTyCon unliftedDataConTy :: Type unliftedDataConTy = mkTyConTy unliftedDataConTyCon {- ********************************************************************* * * See Note [Wiring in RuntimeRep] data RuntimeRep = VecRep VecCount VecElem | TupleRep [RuntimeRep] | SumRep [RuntimeRep] | BoxedRep Levity | IntRep | Int8Rep | ...etc... * * ********************************************************************* -} {- Note [Wiring in RuntimeRep] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The RuntimeRep type (and friends) in GHC.Types has a bunch of constructors, making it a pain to wire in. To ease the pain somewhat, we use lists of the different bits, like Uniques, Names, DataCons. These lists must be kept in sync with each other. The rule is this: use the order as declared in GHC.Types. All places where such lists exist should contain a reference to this Note, so a search for this Note's name should find all the lists. See also Note [Getting from RuntimeRep to PrimRep] in GHC.Types.RepType. -} runtimeRepTyCon :: TyCon runtimeRepTyCon = pcTyCon runtimeRepTyConName Nothing [] -- Here we list all the data constructors -- of the RuntimeRep data type (vecRepDataCon : tupleRepDataCon : sumRepDataCon : boxedRepDataCon : runtimeRepSimpleDataCons) runtimeRepTy :: Type runtimeRepTy = mkTyConTy runtimeRepTyCon runtimeRepTyConName, vecRepDataConName, tupleRepDataConName, sumRepDataConName, boxedRepDataConName :: Name runtimeRepTyConName = mkWiredInTyConName UserSyntax gHC_TYPES (fsLit "RuntimeRep") runtimeRepTyConKey runtimeRepTyCon vecRepDataConName = mk_runtime_rep_dc_name (fsLit "VecRep") vecRepDataConKey vecRepDataCon tupleRepDataConName = mk_runtime_rep_dc_name (fsLit "TupleRep") tupleRepDataConKey tupleRepDataCon sumRepDataConName = mk_runtime_rep_dc_name (fsLit "SumRep") sumRepDataConKey sumRepDataCon boxedRepDataConName = mk_runtime_rep_dc_name (fsLit "BoxedRep") boxedRepDataConKey boxedRepDataCon mk_runtime_rep_dc_name :: FastString -> Unique -> DataCon -> Name mk_runtime_rep_dc_name fs u dc = mkWiredInDataConName UserSyntax gHC_TYPES fs u dc boxedRepDataCon :: DataCon boxedRepDataCon = pcSpecialDataCon boxedRepDataConName [ levityTy ] runtimeRepTyCon (RuntimeRep prim_rep_fun) where -- See Note [Getting from RuntimeRep to PrimRep] in RepType prim_rep_fun [lev] = case tyConAppTyCon_maybe lev of Just tc -> case tyConPromDataConInfo tc of Levity l -> [BoxedRep (Just l)] _ -> [BoxedRep Nothing] Nothing -> [BoxedRep Nothing] prim_rep_fun args = pprPanic "boxedRepDataCon" (ppr args) boxedRepDataConTyCon :: TyCon boxedRepDataConTyCon = promoteDataCon boxedRepDataCon tupleRepDataCon :: DataCon tupleRepDataCon = pcSpecialDataCon tupleRepDataConName [ mkListTy runtimeRepTy ] runtimeRepTyCon (RuntimeRep prim_rep_fun) where -- See Note [Getting from RuntimeRep to PrimRep] in GHC.Types.RepType prim_rep_fun [rr_ty_list] = concatMap (runtimeRepPrimRep doc) rr_tys where rr_tys = extractPromotedList rr_ty_list doc = text "tupleRepDataCon" <+> ppr rr_tys prim_rep_fun args = pprPanic "tupleRepDataCon" (ppr args) tupleRepDataConTyCon :: TyCon tupleRepDataConTyCon = promoteDataCon tupleRepDataCon sumRepDataCon :: DataCon sumRepDataCon = pcSpecialDataCon sumRepDataConName [ mkListTy runtimeRepTy ] runtimeRepTyCon (RuntimeRep prim_rep_fun) where -- See Note [Getting from RuntimeRep to PrimRep] in GHC.Types.RepType prim_rep_fun [rr_ty_list] = map slotPrimRep (toList (ubxSumRepType prim_repss)) where rr_tys = extractPromotedList rr_ty_list doc = text "sumRepDataCon" <+> ppr rr_tys prim_repss = map (runtimeRepPrimRep doc) rr_tys prim_rep_fun args = pprPanic "sumRepDataCon" (ppr args) sumRepDataConTyCon :: TyCon sumRepDataConTyCon = promoteDataCon sumRepDataCon -- See Note [Wiring in RuntimeRep] -- See Note [Getting from RuntimeRep to PrimRep] in GHC.Types.RepType runtimeRepSimpleDataCons :: [DataCon] runtimeRepSimpleDataCons = zipWith mk_runtime_rep_dc runtimeRepSimpleDataConKeys [ (fsLit "IntRep", IntRep) , (fsLit "Int8Rep", Int8Rep) , (fsLit "Int16Rep", Int16Rep) , (fsLit "Int32Rep", Int32Rep) , (fsLit "Int64Rep", Int64Rep) , (fsLit "WordRep", WordRep) , (fsLit "Word8Rep", Word8Rep) , (fsLit "Word16Rep", Word16Rep) , (fsLit "Word32Rep", Word32Rep) , (fsLit "Word64Rep", Word64Rep) , (fsLit "AddrRep", AddrRep) , (fsLit "FloatRep", FloatRep) , (fsLit "DoubleRep", DoubleRep) ] where mk_runtime_rep_dc :: Unique -> (FastString, PrimRep) -> DataCon mk_runtime_rep_dc uniq (fs, primrep) = data_con where data_con = pcSpecialDataCon dc_name [] runtimeRepTyCon (RuntimeRep (\_ -> [primrep])) dc_name = mk_runtime_rep_dc_name fs uniq data_con -- See Note [Wiring in RuntimeRep] intRepDataConTy, int8RepDataConTy, int16RepDataConTy, int32RepDataConTy, int64RepDataConTy, wordRepDataConTy, word8RepDataConTy, word16RepDataConTy, word32RepDataConTy, word64RepDataConTy, addrRepDataConTy, floatRepDataConTy, doubleRepDataConTy :: RuntimeRepType [intRepDataConTy, int8RepDataConTy, int16RepDataConTy, int32RepDataConTy, int64RepDataConTy, wordRepDataConTy, word8RepDataConTy, word16RepDataConTy, word32RepDataConTy, word64RepDataConTy, addrRepDataConTy, floatRepDataConTy, doubleRepDataConTy ] = map (mkTyConTy . promoteDataCon) runtimeRepSimpleDataCons ---------------------- -- | @type ZeroBitRep = 'Tuple '[] zeroBitRepTyCon :: TyCon zeroBitRepTyCon = buildSynTyCon zeroBitRepTyConName [] runtimeRepTy [] rhs where rhs = TyCoRep.TyConApp tupleRepDataConTyCon [mkPromotedListTy runtimeRepTy []] zeroBitRepTyConName :: Name zeroBitRepTyConName = mkWiredInTyConName UserSyntax gHC_TYPES (fsLit "ZeroBitRep") zeroBitRepTyConKey zeroBitRepTyCon zeroBitRepTy :: RuntimeRepType zeroBitRepTy = mkTyConTy zeroBitRepTyCon ---------------------- -- @type ZeroBitType = TYPE ZeroBitRep zeroBitTypeTyCon :: TyCon zeroBitTypeTyCon = buildSynTyCon zeroBitTypeTyConName [] liftedTypeKind [] rhs where rhs = TyCoRep.TyConApp tYPETyCon [zeroBitRepTy] zeroBitTypeTyConName :: Name zeroBitTypeTyConName = mkWiredInTyConName UserSyntax gHC_TYPES (fsLit "ZeroBitType") zeroBitTypeTyConKey zeroBitTypeTyCon zeroBitTypeKind :: Type zeroBitTypeKind = mkTyConTy zeroBitTypeTyCon ---------------------- -- | @type LiftedRep = 'BoxedRep 'Lifted@ liftedRepTyCon :: TyCon liftedRepTyCon = buildSynTyCon liftedRepTyConName [] runtimeRepTy [] rhs where rhs = TyCoRep.TyConApp boxedRepDataConTyCon [liftedDataConTy] liftedRepTyConName :: Name liftedRepTyConName = mkWiredInTyConName UserSyntax gHC_TYPES (fsLit "LiftedRep") liftedRepTyConKey liftedRepTyCon liftedRepTy :: RuntimeRepType liftedRepTy = mkTyConTy liftedRepTyCon ---------------------- -- | @type UnliftedRep = 'BoxedRep 'Unlifted@ unliftedRepTyCon :: TyCon unliftedRepTyCon = buildSynTyCon unliftedRepTyConName [] runtimeRepTy [] rhs where rhs = TyCoRep.TyConApp boxedRepDataConTyCon [unliftedDataConTy] unliftedRepTyConName :: Name unliftedRepTyConName = mkWiredInTyConName UserSyntax gHC_TYPES (fsLit "UnliftedRep") unliftedRepTyConKey unliftedRepTyCon unliftedRepTy :: RuntimeRepType unliftedRepTy = mkTyConTy unliftedRepTyCon {- ********************************************************************* * * VecCount, VecElem * * ********************************************************************* -} vecCountTyConName :: Name vecCountTyConName = mkWiredInTyConName UserSyntax gHC_TYPES (fsLit "VecCount") vecCountTyConKey vecCountTyCon vecElemTyConName :: Name vecElemTyConName = mkWiredInTyConName UserSyntax gHC_TYPES (fsLit "VecElem") vecElemTyConKey vecElemTyCon vecRepDataCon :: DataCon vecRepDataCon = pcSpecialDataCon vecRepDataConName [ mkTyConTy vecCountTyCon , mkTyConTy vecElemTyCon ] runtimeRepTyCon (RuntimeRep prim_rep_fun) where -- See Note [Getting from RuntimeRep to PrimRep] in GHC.Types.RepType prim_rep_fun [count, elem] | VecCount n <- tyConPromDataConInfo (tyConAppTyCon count) , VecElem e <- tyConPromDataConInfo (tyConAppTyCon elem) = [VecRep n e] prim_rep_fun args = pprPanic "vecRepDataCon" (ppr args) vecRepDataConTyCon :: TyCon vecRepDataConTyCon = promoteDataCon vecRepDataCon vecCountTyCon :: TyCon vecCountTyCon = pcTyCon vecCountTyConName Nothing [] vecCountDataCons -- See Note [Wiring in RuntimeRep] vecCountDataCons :: [DataCon] vecCountDataCons = zipWith mk_vec_count_dc [1..6] vecCountDataConKeys where mk_vec_count_dc logN key = con where n = 2^(logN :: Int) name = mk_runtime_rep_dc_name (fsLit ("Vec" ++ show n)) key con con = pcSpecialDataCon name [] vecCountTyCon (VecCount n) -- See Note [Wiring in RuntimeRep] vec2DataConTy, vec4DataConTy, vec8DataConTy, vec16DataConTy, vec32DataConTy, vec64DataConTy :: Type [vec2DataConTy, vec4DataConTy, vec8DataConTy, vec16DataConTy, vec32DataConTy, vec64DataConTy] = map (mkTyConTy . promoteDataCon) vecCountDataCons vecElemTyCon :: TyCon vecElemTyCon = pcTyCon vecElemTyConName Nothing [] vecElemDataCons -- See Note [Wiring in RuntimeRep] vecElemDataCons :: [DataCon] vecElemDataCons = zipWith3 mk_vec_elem_dc [ fsLit "Int8ElemRep", fsLit "Int16ElemRep", fsLit "Int32ElemRep", fsLit "Int64ElemRep" , fsLit "Word8ElemRep", fsLit "Word16ElemRep", fsLit "Word32ElemRep", fsLit "Word64ElemRep" , fsLit "FloatElemRep", fsLit "DoubleElemRep" ] [ Int8ElemRep, Int16ElemRep, Int32ElemRep, Int64ElemRep , Word8ElemRep, Word16ElemRep, Word32ElemRep, Word64ElemRep , FloatElemRep, DoubleElemRep ] vecElemDataConKeys where mk_vec_elem_dc nameFs elemRep key = con where name = mk_runtime_rep_dc_name nameFs key con con = pcSpecialDataCon name [] vecElemTyCon (VecElem elemRep) -- See Note [Wiring in RuntimeRep] int8ElemRepDataConTy, int16ElemRepDataConTy, int32ElemRepDataConTy, int64ElemRepDataConTy, word8ElemRepDataConTy, word16ElemRepDataConTy, word32ElemRepDataConTy, word64ElemRepDataConTy, floatElemRepDataConTy, doubleElemRepDataConTy :: Type [int8ElemRepDataConTy, int16ElemRepDataConTy, int32ElemRepDataConTy, int64ElemRepDataConTy, word8ElemRepDataConTy, word16ElemRepDataConTy, word32ElemRepDataConTy, word64ElemRepDataConTy, floatElemRepDataConTy, doubleElemRepDataConTy] = map (mkTyConTy . promoteDataCon) vecElemDataCons {- ********************************************************************* * * The boxed primitive types: Char, Int, etc * * ********************************************************************* -} charTy :: Type charTy = mkTyConTy charTyCon charTyCon :: TyCon charTyCon = pcTyCon charTyConName (Just (CType NoSourceText Nothing (NoSourceText,fsLit "HsChar"))) [] [charDataCon] charDataCon :: DataCon charDataCon = pcDataCon charDataConName [] [charPrimTy] charTyCon stringTy :: Type stringTy = mkTyConTy stringTyCon stringTyCon :: TyCon -- We have this wired-in so that Haskell literal strings -- get type String (in hsLitType), which in turn influences -- inferred types and error messages stringTyCon = buildSynTyCon stringTyConName [] liftedTypeKind [] (mkListTy charTy) intTy :: Type intTy = mkTyConTy intTyCon intTyCon :: TyCon intTyCon = pcTyCon intTyConName (Just (CType NoSourceText Nothing (NoSourceText,fsLit "HsInt"))) [] [intDataCon] intDataCon :: DataCon intDataCon = pcDataCon intDataConName [] [intPrimTy] intTyCon wordTy :: Type wordTy = mkTyConTy wordTyCon wordTyCon :: TyCon wordTyCon = pcTyCon wordTyConName (Just (CType NoSourceText Nothing (NoSourceText, fsLit "HsWord"))) [] [wordDataCon] wordDataCon :: DataCon wordDataCon = pcDataCon wordDataConName [] [wordPrimTy] wordTyCon word8Ty :: Type word8Ty = mkTyConTy word8TyCon word8TyCon :: TyCon word8TyCon = pcTyCon word8TyConName (Just (CType NoSourceText Nothing (NoSourceText, fsLit "HsWord8"))) [] [word8DataCon] word8DataCon :: DataCon word8DataCon = pcDataCon word8DataConName [] [word8PrimTy] word8TyCon floatTy :: Type floatTy = mkTyConTy floatTyCon floatTyCon :: TyCon floatTyCon = pcTyCon floatTyConName (Just (CType NoSourceText Nothing (NoSourceText, fsLit "HsFloat"))) [] [floatDataCon] floatDataCon :: DataCon floatDataCon = pcDataCon floatDataConName [] [floatPrimTy] floatTyCon doubleTy :: Type doubleTy = mkTyConTy doubleTyCon doubleTyCon :: TyCon doubleTyCon = pcTyCon doubleTyConName (Just (CType NoSourceText Nothing (NoSourceText,fsLit "HsDouble"))) [] [doubleDataCon] doubleDataCon :: DataCon doubleDataCon = pcDataCon doubleDataConName [] [doublePrimTy] doubleTyCon {- ********************************************************************* * * Boxing data constructors * * ********************************************************************* -} {- Note [Boxing constructors] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In ghc-prim:GHC.Types we have a family of data types, one for each RuntimeRep that "box" unlifted values into a (boxed, lifted) value of kind Type. For example type Int8Box :: TYPE Int8Rep -> Type data Int8Box (a :: TYPE Int8Rep) = MkInt8Box a -- MkInt8Box :: forall (a :: TYPE Int8Rep). a -> Int8Box a Then we can package an `Int8#` into an `Int8Box` with `MkInt8Box`. We can also package up a (lifted) Constraint as a value of kind Type. There are a fixed number of RuntimeReps, so we only need a fixed number of boxing types. (For TupleRep we need to box recursively; not yet done, see #22336.) This is used: * In desugaring, when we need to package up a bunch of values into a tuple, for example when desugaring arrows. See Note [Big tuples] in GHC.Core.Make. * In let-floating when we want to float an unlifted sub-expression. See Note [Floating MFEs of unlifted type] in GHC.Core.Opt.SetLevels In this module we make wired-in data type declarations for all of these boxing functions. The goal is to define boxingDataCon_maybe. Wrinkles (W1) The runtime system has special treatment (e.g. commoning up during GC) for Int and Char values. See Note [CHARLIKE and INTLIKE closures] and Note [Precomputed static closures] in the RTS. So we treat Int# and Char# specially, in specialBoxingDataCon_maybe -} data BoxingInfo b = BI_NoBoxNeeded -- The type has kind Type, so there is nothing to do | BI_NoBoxAvailable -- The type does not have kind Type, but sadly we -- don't have a boxing data constructor either | BI_Box -- The type does not have kind Type, and we do have a -- boxing data constructor; here it is { bi_data_con :: DataCon , bi_inst_con :: Expr b , bi_boxed_type :: Type } -- e.g. BI_Box { bi_data_con = I#, bi_inst_con = I#, bi_boxed_type = Int } -- recall: data Int = I# Int# -- -- BI_Box { bi_data_con = MkInt8Box, bi_inst_con = MkInt8Box @ty -- , bi_boxed_type = Int8Box ty } -- recall: data Int8Box (a :: TYPE Int8Rep) = MkIntBox a boxingDataCon :: Type -> BoxingInfo b -- ^ Given a type 'ty', if 'ty' is not of kind Type, return a data constructor that -- will box it, and the type of the boxed thing, which /does/ now have kind Type. -- See Note [Boxing constructors] boxingDataCon ty | tcIsLiftedTypeKind kind = BI_NoBoxNeeded -- Fast path for Type | Just box_con <- specialBoxingDataCon_maybe ty = BI_Box { bi_data_con = box_con, bi_inst_con = mkConApp box_con [] , bi_boxed_type = tyConNullaryTy (dataConTyCon box_con) } | Just box_con <- lookupTypeMap boxingDataConMap kind = BI_Box { bi_data_con = box_con, bi_inst_con = mkConApp box_con [Type ty] , bi_boxed_type = mkTyConApp (dataConTyCon box_con) [ty] } | otherwise = BI_NoBoxAvailable where kind = typeKind ty specialBoxingDataCon_maybe :: Type -> Maybe DataCon -- ^ See Note [Boxing constructors] wrinkle (W1) specialBoxingDataCon_maybe ty = case splitTyConApp_maybe ty of Just (tc, _) | tc `hasKey` intPrimTyConKey -> Just intDataCon | tc `hasKey` charPrimTyConKey -> Just charDataCon _ -> Nothing boxingDataConMap :: TypeMap DataCon -- See Note [Boxing constructors] boxingDataConMap = foldl add emptyTypeMap boxingDataCons where add bdcm (kind, boxing_con) = extendTypeMap bdcm kind boxing_con boxingDataCons :: [(Kind, DataCon)] -- The Kind is the kind of types for which the DataCon is the right boxing boxingDataCons = zipWith mkBoxingDataCon (map mkBoxingTyConUnique [1..]) [ (mkTYPEapp wordRepDataConTy, fsLit "WordBox", fsLit "MkWordBox") , (mkTYPEapp intRepDataConTy, fsLit "IntBox", fsLit "MkIntBox") , (mkTYPEapp floatRepDataConTy, fsLit "FloatBox", fsLit "MkFloatBox") , (mkTYPEapp doubleRepDataConTy, fsLit "DoubleBox", fsLit "MkDoubleBox") , (mkTYPEapp int8RepDataConTy, fsLit "Int8Box", fsLit "MkInt8Box") , (mkTYPEapp int16RepDataConTy, fsLit "Int16Box", fsLit "MkInt16Box") , (mkTYPEapp int32RepDataConTy, fsLit "Int32Box", fsLit "MkInt32Box") , (mkTYPEapp int64RepDataConTy, fsLit "Int64Box", fsLit "MkInt64Box") , (mkTYPEapp word8RepDataConTy, fsLit "Word8Box", fsLit "MkWord8Box") , (mkTYPEapp word16RepDataConTy, fsLit "Word16Box", fsLit "MkWord16Box") , (mkTYPEapp word32RepDataConTy, fsLit "Word32Box", fsLit "MkWord32Box") , (mkTYPEapp word64RepDataConTy, fsLit "Word64Box", fsLit "MkWord64Box") , (unliftedTypeKind, fsLit "LiftBox", fsLit "MkLiftBox") , (constraintKind, fsLit "DictBox", fsLit "MkDictBox") ] mkBoxingDataCon :: Unique -> (Kind, FastString, FastString) -> (Kind, DataCon) mkBoxingDataCon uniq_tc (kind, fs_tc, fs_dc) = (kind, dc) where uniq_dc = boxingDataConUnique uniq_tc (tv:_) = mkTemplateTyVars (repeat kind) tc = pcTyCon tc_name Nothing [tv] [dc] tc_name = mkWiredInTyConName UserSyntax gHC_TYPES fs_tc uniq_tc tc dc | isConstraintKind kind = pcDataConConstraint dc_name [tv] [mkTyVarTy tv] tc | otherwise = pcDataCon dc_name [tv] [mkTyVarTy tv] tc dc_name = mkWiredInDataConName UserSyntax gHC_TYPES fs_dc uniq_dc dc {- ************************************************************************ * * The Bool type * * ************************************************************************ An ordinary enumeration type, but deeply wired in. There are no magical operations on @Bool@ (just the regular Prelude code). {\em BEGIN IDLE SPECULATION BY SIMON} This is not the only way to encode @Bool@. A more obvious coding makes @Bool@ just a boxed up version of @Bool#@, like this: \begin{verbatim} type Bool# = Int# data Bool = MkBool Bool# \end{verbatim} Unfortunately, this doesn't correspond to what the Report says @Bool@ looks like! Furthermore, we get slightly less efficient code (I think) with this coding. @gtInt@ would look like this: \begin{verbatim} gtInt :: Int -> Int -> Bool gtInt x y = case x of I# x# -> case y of I# y# -> case (gtIntPrim x# y#) of b# -> MkBool b# \end{verbatim} Notice that the result of the @gtIntPrim@ comparison has to be turned into an integer (here called @b#@), and returned in a @MkBool@ box. The @if@ expression would compile to this: \begin{verbatim} case (gtInt x y) of MkBool b# -> case b# of { 1# -> e1; 0# -> e2 } \end{verbatim} I think this code is a little less efficient than the previous code, but I'm not certain. At all events, corresponding with the Report is important. The interesting thing is that the language is expressive enough to describe more than one alternative; and that a type doesn't necessarily need to be a straightforwardly boxed version of its primitive counterpart. {\em END IDLE SPECULATION BY SIMON} -} boolTy :: Type boolTy = mkTyConTy boolTyCon boolTyCon :: TyCon boolTyCon = pcTyCon boolTyConName (Just (CType NoSourceText Nothing (NoSourceText, fsLit "HsBool"))) [] [falseDataCon, trueDataCon] falseDataCon, trueDataCon :: DataCon falseDataCon = pcDataCon falseDataConName [] [] boolTyCon trueDataCon = pcDataCon trueDataConName [] [] boolTyCon falseDataConId, trueDataConId :: Id falseDataConId = dataConWorkId falseDataCon trueDataConId = dataConWorkId trueDataCon orderingTyCon :: TyCon orderingTyCon = pcTyCon orderingTyConName Nothing [] [ordLTDataCon, ordEQDataCon, ordGTDataCon] ordLTDataCon, ordEQDataCon, ordGTDataCon :: DataCon ordLTDataCon = pcDataCon ordLTDataConName [] [] orderingTyCon ordEQDataCon = pcDataCon ordEQDataConName [] [] orderingTyCon ordGTDataCon = pcDataCon ordGTDataConName [] [] orderingTyCon ordLTDataConId, ordEQDataConId, ordGTDataConId :: Id ordLTDataConId = dataConWorkId ordLTDataCon ordEQDataConId = dataConWorkId ordEQDataCon ordGTDataConId = dataConWorkId ordGTDataCon {- ************************************************************************ * * The List type Special syntax, deeply wired in, but otherwise an ordinary algebraic data type * * ************************************************************************ data [] a = [] | a : (List a) -} mkListTy :: Type -> Type mkListTy ty = mkTyConApp listTyCon [ty] listTyCon :: TyCon listTyCon = pcTyCon listTyConName Nothing [alphaTyVar] [nilDataCon, consDataCon] -- See also Note [Empty lists] in GHC.Hs.Expr. nilDataCon :: DataCon nilDataCon = pcDataCon nilDataConName alpha_tyvar [] listTyCon consDataCon :: DataCon consDataCon = pcDataConWithFixity True {- Declared infix -} consDataConName alpha_tyvar [] noConcreteTyVars alpha_tyvar [] (map linear [alphaTy, mkTyConApp listTyCon alpha_ty]) listTyCon -- Interesting: polymorphic recursion would help here. -- We can't use (mkListTy alphaTy) in the defn of consDataCon, else mkListTy -- gets the over-specific type (Type -> Type) -- Wired-in type Maybe maybeTyCon :: TyCon maybeTyCon = pcTyCon maybeTyConName Nothing alpha_tyvar [nothingDataCon, justDataCon] nothingDataCon :: DataCon nothingDataCon = pcDataCon nothingDataConName alpha_tyvar [] maybeTyCon justDataCon :: DataCon justDataCon = pcDataCon justDataConName alpha_tyvar [alphaTy] maybeTyCon mkPromotedMaybeTy :: Kind -> Maybe Type -> Type mkPromotedMaybeTy k (Just x) = mkTyConApp promotedJustDataCon [k,x] mkPromotedMaybeTy k Nothing = mkTyConApp promotedNothingDataCon [k] mkMaybeTy :: Type -> Kind mkMaybeTy t = mkTyConApp maybeTyCon [t] isPromotedMaybeTy :: Type -> Maybe (Maybe Type) isPromotedMaybeTy t | Just (tc,[_,x]) <- splitTyConApp_maybe t, tc == promotedJustDataCon = return $ Just x | Just (tc,[_]) <- splitTyConApp_maybe t, tc == promotedNothingDataCon = return $ Nothing | otherwise = Nothing {- ** ********************************************************************* * * The tuple types * * ************************************************************************ The tuple types are definitely magic, because they form an infinite family. \begin{itemize} \item They have a special family of type constructors, of type @TyCon@ These contain the tycon arity, but don't require a Unique. \item They have a special family of constructors, of type @Id@. Again these contain their arity but don't need a Unique. \item There should be a magic way of generating the info tables and entry code for all tuples. But at the moment we just compile a Haskell source file\srcloc{lib/prelude/...} containing declarations like: \begin{verbatim} data Tuple0 = Tup0 data Tuple2 a b = Tup2 a b data Tuple3 a b c = Tup3 a b c data Tuple4 a b c d = Tup4 a b c d ... \end{verbatim} The print-names associated with the magic @Id@s for tuple constructors ``just happen'' to be the same as those generated by these declarations. \item The instance environment should have a magic way to know that each tuple type is an instances of classes @Eq@, @Ix@, @Ord@ and so on. \ToDo{Not implemented yet.} \item There should also be a way to generate the appropriate code for each of these instances, but (like the info tables and entry code) it is done by enumeration\srcloc{lib/prelude/InTup?.hs}. \end{itemize} -} -- | Make a tuple type. The list of types should /not/ include any -- RuntimeRep specifications. Boxed 1-tuples are flattened. -- See Note [One-tuples] mkTupleTy :: Boxity -> [Type] -> Type -- Special case for *boxed* 1-tuples, which are represented by the type itself mkTupleTy Boxed [ty] = ty mkTupleTy boxity tys = mkTupleTy1 boxity tys -- | Make a tuple type. The list of types should /not/ include any -- RuntimeRep specifications. Boxed 1-tuples are *not* flattened. -- See Note [One-tuples] and Note [Don't flatten tuples from HsSyn] -- in "GHC.Core.Make" mkTupleTy1 :: Boxity -> [Type] -> Type mkTupleTy1 Boxed tys = mkTyConApp (tupleTyCon Boxed (length tys)) tys mkTupleTy1 Unboxed tys = mkTyConApp (tupleTyCon Unboxed (length tys)) (map getRuntimeRep tys ++ tys) -- | Build the type of a small tuple that holds the specified type of thing -- Flattens 1-tuples. See Note [One-tuples]. mkBoxedTupleTy :: [Type] -> Type mkBoxedTupleTy tys = mkTupleTy Boxed tys unitTy :: Type unitTy = mkTupleTy Boxed [] -- Make a constraint tuple, flattening a 1-tuple as usual -- If we get a constraint tuple that is bigger than the pre-built -- ones (in ghc-prim:GHC.Tuple), then just make one up anyway; it won't -- have an info table in the RTS, so we can't use it at runtime. But -- this is used only in filling in extra-constraint wildcards, so it -- never is used at runtime anyway -- See GHC.Tc.Gen.HsType Note [Extra-constraint holes in partial type signatures] mkConstraintTupleTy :: [Type] -> Type mkConstraintTupleTy [ty] = ty mkConstraintTupleTy tys = mkTyConApp (cTupleTyCon (length tys)) tys {- ********************************************************************* * * The sum types * * ************************************************************************ -} mkSumTy :: [Type] -> Type mkSumTy tys = mkTyConApp (sumTyCon (length tys)) (map getRuntimeRep tys ++ tys) -- Promoted Booleans promotedFalseDataCon, promotedTrueDataCon :: TyCon promotedTrueDataCon = promoteDataCon trueDataCon promotedFalseDataCon = promoteDataCon falseDataCon -- Promoted Maybe promotedNothingDataCon, promotedJustDataCon :: TyCon promotedNothingDataCon = promoteDataCon nothingDataCon promotedJustDataCon = promoteDataCon justDataCon -- Promoted Ordering promotedLTDataCon , promotedEQDataCon , promotedGTDataCon :: TyCon promotedLTDataCon = promoteDataCon ordLTDataCon promotedEQDataCon = promoteDataCon ordEQDataCon promotedGTDataCon = promoteDataCon ordGTDataCon -- Promoted List promotedConsDataCon, promotedNilDataCon :: TyCon promotedConsDataCon = promoteDataCon consDataCon promotedNilDataCon = promoteDataCon nilDataCon -- | Make a *promoted* list. mkPromotedListTy :: Kind -- ^ of the elements of the list -> [Type] -- ^ elements -> Type mkPromotedListTy k tys = foldr cons nil tys where cons :: Type -- element -> Type -- list -> Type cons elt list = mkTyConApp promotedConsDataCon [k, elt, list] nil :: Type nil = mkTyConApp promotedNilDataCon [k] -- | Extract the elements of a promoted list. Panics if the type is not a -- promoted list extractPromotedList :: Type -- ^ The promoted list -> [Type] extractPromotedList tys = go tys where go list_ty | Just (tc, [_k, t, ts]) <- splitTyConApp_maybe list_ty = assert (tc `hasKey` consDataConKey) $ t : go ts | Just (tc, [_k]) <- splitTyConApp_maybe list_ty = assert (tc `hasKey` nilDataConKey) [] | otherwise = pprPanic "extractPromotedList" (ppr tys) --------------------------------------- -- ghc-bignum --------------------------------------- integerTyConName , integerISDataConName , integerIPDataConName , integerINDataConName :: Name integerTyConName = mkWiredInTyConName UserSyntax gHC_INTERNAL_NUM_INTEGER (fsLit "Integer") integerTyConKey integerTyCon integerISDataConName = mkWiredInDataConName UserSyntax gHC_INTERNAL_NUM_INTEGER (fsLit "IS") integerISDataConKey integerISDataCon integerIPDataConName = mkWiredInDataConName UserSyntax gHC_INTERNAL_NUM_INTEGER (fsLit "IP") integerIPDataConKey integerIPDataCon integerINDataConName = mkWiredInDataConName UserSyntax gHC_INTERNAL_NUM_INTEGER (fsLit "IN") integerINDataConKey integerINDataCon integerTy :: Type integerTy = mkTyConTy integerTyCon integerTyCon :: TyCon integerTyCon = pcTyCon integerTyConName Nothing [] [integerISDataCon, integerIPDataCon, integerINDataCon] integerISDataCon :: DataCon integerISDataCon = pcDataCon integerISDataConName [] [intPrimTy] integerTyCon integerIPDataCon :: DataCon integerIPDataCon = pcDataCon integerIPDataConName [] [byteArrayPrimTy] integerTyCon integerINDataCon :: DataCon integerINDataCon = pcDataCon integerINDataConName [] [byteArrayPrimTy] integerTyCon naturalTyConName , naturalNSDataConName , naturalNBDataConName :: Name naturalTyConName = mkWiredInTyConName UserSyntax gHC_INTERNAL_NUM_NATURAL (fsLit "Natural") naturalTyConKey naturalTyCon naturalNSDataConName = mkWiredInDataConName UserSyntax gHC_INTERNAL_NUM_NATURAL (fsLit "NS") naturalNSDataConKey naturalNSDataCon naturalNBDataConName = mkWiredInDataConName UserSyntax gHC_INTERNAL_NUM_NATURAL (fsLit "NB") naturalNBDataConKey naturalNBDataCon naturalTy :: Type naturalTy = mkTyConTy naturalTyCon naturalTyCon :: TyCon naturalTyCon = pcTyCon naturalTyConName Nothing [] [naturalNSDataCon, naturalNBDataCon] naturalNSDataCon :: DataCon naturalNSDataCon = pcDataCon naturalNSDataConName [] [wordPrimTy] naturalTyCon naturalNBDataCon :: DataCon naturalNBDataCon = pcDataCon naturalNBDataConName [] [byteArrayPrimTy] naturalTyCon -- | Replaces constraint tuple names with corresponding boxed ones. filterCTuple :: RdrName -> RdrName filterCTuple (Exact n) | Just arity <- cTupleTyConNameArity_maybe n = Exact $ tupleTyConName BoxedTuple arity filterCTuple rdr = rdr {- ************************************************************************ * * Semi-builtin names * * ************************************************************************ Note [pretendNameIsInScope] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ In general, we filter out instances that mention types whose names are not in scope. However, in the situations listed below, we make an exception for some commonly used names, such as Data.Kind.Type, which may not actually be in scope but should be treated as though they were in scope. This includes built-in names, as well as a few extra names such as 'Type', 'TYPE', 'BoxedRep', etc. Situations in which we apply this special logic: - GHCi's :info command, see GHC.Runtime.Eval.getInfo. This fixes #1581. - When reporting instance overlap errors. Not doing so could mean that we would omit instances for typeclasses like type Cls :: k -> Constraint class Cls a because BoxedRep/Lifted were not in scope. See GHC.Tc.Errors.potentialInstancesErrMsg. This fixes one of the issues reported in #20465. -} -- | Should this name be considered in-scope, even though it technically isn't? -- -- This ensures that we don't filter out information because, e.g., -- Data.Kind.Type isn't imported. -- -- See Note [pretendNameIsInScope]. pretendNameIsInScope :: Name -> Bool pretendNameIsInScope n = isBuiltInSyntax n || isTupleTyConName n || isSumTyConName n || isCTupleTyConName n || any (n `hasKey`) [ liftedTypeKindTyConKey, unliftedTypeKindTyConKey , liftedDataConKey, unliftedDataConKey , tYPETyConKey , cONSTRAINTTyConKey , runtimeRepTyConKey, boxedRepDataConKey , eqTyConKey , listTyConKey , oneDataConKey , manyDataConKey , fUNTyConKey, unrestrictedFunTyConKey ] ghc-lib-parser-9.12.2.20250421/compiler/GHC/Builtin/Types.hs-boot0000644000000000000000000000450307346545000021717 0ustar0000000000000000module GHC.Builtin.Types where import {-# SOURCE #-} GHC.Core.TyCon ( TyCon ) import {-# SOURCE #-} GHC.Core.TyCo.Rep (Type, Kind, RuntimeRepType) import {-# SOURCE #-} GHC.Core.DataCon ( DataCon ) import GHC.Types.Basic (Arity, TupleSort, Boxity, ConTag) import {-# SOURCE #-} GHC.Types.Name (Name) listTyCon :: TyCon typeSymbolKind :: Type charTy :: Type mkBoxedTupleTy :: [Type] -> Type coercibleTyCon, heqTyCon :: TyCon unitTy :: Type unitTyCon :: TyCon liftedTypeKindTyConName :: Name constraintKindTyConName :: Name liftedTypeKind, unliftedTypeKind, zeroBitTypeKind :: Kind liftedTypeKindTyCon, unliftedTypeKindTyCon :: TyCon liftedRepTyCon, unliftedRepTyCon :: TyCon constraintKind :: Kind runtimeRepTyCon, levityTyCon, vecCountTyCon, vecElemTyCon :: TyCon runtimeRepTy, levityTy :: Type boxedRepDataConTyCon, liftedDataConTyCon :: TyCon vecRepDataConTyCon, tupleRepDataConTyCon :: TyCon liftedRepTy, unliftedRepTy, zeroBitRepTy :: RuntimeRepType liftedDataConTy, unliftedDataConTy :: Type intRepDataConTy, int8RepDataConTy, int16RepDataConTy, int32RepDataConTy, int64RepDataConTy, wordRepDataConTy, word8RepDataConTy, word16RepDataConTy, word32RepDataConTy, word64RepDataConTy, addrRepDataConTy, floatRepDataConTy, doubleRepDataConTy :: RuntimeRepType vec2DataConTy, vec4DataConTy, vec8DataConTy, vec16DataConTy, vec32DataConTy, vec64DataConTy :: Type int8ElemRepDataConTy, int16ElemRepDataConTy, int32ElemRepDataConTy, int64ElemRepDataConTy, word8ElemRepDataConTy, word16ElemRepDataConTy, word32ElemRepDataConTy, word64ElemRepDataConTy, floatElemRepDataConTy, doubleElemRepDataConTy :: Type anyTypeOfKind :: Kind -> Type unboxedTupleKind :: [Type] -> Type multiplicityTyCon :: TyCon multiplicityTy :: Type oneDataConTy :: Type oneDataConTyCon :: TyCon manyDataConTy :: Type manyDataConTyCon :: TyCon unrestrictedFunTyCon :: TyCon multMulTyCon :: TyCon tupleTyConName :: TupleSort -> Arity -> Name tupleDataConName :: Boxity -> Arity -> Name integerTy, naturalTy :: Type promotedTupleDataCon :: Boxity -> Arity -> TyCon tupleDataCon :: Boxity -> Arity -> DataCon tupleTyCon :: Boxity -> Arity -> TyCon cTupleDataCon :: Arity -> DataCon cTupleDataConName :: Arity -> Name cTupleTyConName :: Arity -> Name cTupleSelIdName :: ConTag -> Arity -> Name sumDataCon :: ConTag -> Arity -> DataCon sumTyCon :: Arity -> TyCon ghc-lib-parser-9.12.2.20250421/compiler/GHC/Builtin/Types/0000755000000000000000000000000007346545000020420 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Builtin/Types/Literals.hs0000644000000000000000000012462607346545000022546 0ustar0000000000000000{-# LANGUAGE LambdaCase #-} {-# OPTIONS_GHC -Wno-incomplete-uni-patterns #-} -- See calls to mkTemplateTyVars module GHC.Builtin.Types.Literals ( tryInteractInertFam, tryInteractTopFam, tryMatchFam , typeNatTyCons , typeNatCoAxiomRules , BuiltInSynFamily(..) -- If you define a new built-in type family, make sure to export its TyCon -- from here as well. -- See Note [Adding built-in type families] , typeNatAddTyCon , typeNatMulTyCon , typeNatExpTyCon , typeNatSubTyCon , typeNatDivTyCon , typeNatModTyCon , typeNatLogTyCon , typeNatCmpTyCon , typeSymbolCmpTyCon , typeSymbolAppendTyCon , typeCharCmpTyCon , typeConsSymbolTyCon , typeUnconsSymbolTyCon , typeCharToNatTyCon , typeNatToCharTyCon ) where import GHC.Prelude import GHC.Core.Type import GHC.Core.Unify ( tcMatchTys ) import GHC.Data.Pair import GHC.Core.TyCon ( TyCon, FamTyConFlav(..), mkFamilyTyCon, tyConArity , Injectivity(..), isBuiltInSynFamTyCon_maybe ) import GHC.Core.Coercion.Axiom import GHC.Core.TyCo.Compare ( tcEqType ) import GHC.Types.Name ( Name, BuiltInSyntax(..) ) import GHC.Types.Unique.FM import GHC.Builtin.Types import GHC.Builtin.Types.Prim ( mkTemplateAnonTyConBinders, mkTemplateTyVars ) import GHC.Builtin.Names ( gHC_INTERNAL_TYPELITS , gHC_INTERNAL_TYPELITS_INTERNAL , gHC_INTERNAL_TYPENATS , gHC_INTERNAL_TYPENATS_INTERNAL , typeNatAddTyFamNameKey , typeNatMulTyFamNameKey , typeNatExpTyFamNameKey , typeNatSubTyFamNameKey , typeNatDivTyFamNameKey , typeNatModTyFamNameKey , typeNatLogTyFamNameKey , typeNatCmpTyFamNameKey , typeSymbolCmpTyFamNameKey , typeSymbolAppendFamNameKey , typeCharCmpTyFamNameKey , typeConsSymbolTyFamNameKey , typeUnconsSymbolTyFamNameKey , typeCharToNatTyFamNameKey , typeNatToCharTyFamNameKey ) import GHC.Data.FastString import GHC.Utils.Panic import GHC.Utils.Outputable import Control.Monad ( guard ) import Data.List ( isPrefixOf, isSuffixOf ) import Data.Maybe ( listToMaybe ) import qualified Data.Char as Char {- Note [Type-level literals] ~~~~~~~~~~~~~~~~~~~~~~~~~~ There are currently three forms of type-level literals: natural numbers, symbols, and characters. Type-level literals are supported by CoAxiomRules (conditional axioms), which power the built-in type families (see Note [Adding built-in type families]). Currently, all built-in type families are for the express purpose of supporting type-level literals. See also the Wiki page: https://gitlab.haskell.org/ghc/ghc/wikis/type-nats Note [Adding built-in type families] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ There are a few steps to adding a built-in type family: * Adding a unique for the type family TyCon These go in GHC.Builtin.Names. It will likely be of the form @myTyFamNameKey = mkPreludeTyConUnique xyz@, where @xyz@ is a number that has not been chosen before in GHC.Builtin.Names. There are several examples already in GHC.Builtin.Names—see, for instance, typeNatAddTyFamNameKey. * Adding the type family TyCon itself This goes in GHC.Builtin.Types.Literals. There are plenty of examples of how to define these -- see, for instance, typeNatAddTyCon. Once your TyCon has been defined, be sure to: - Export it from GHC.Builtin.Types.Literals. (Not doing so caused #14632.) - Include it in the typeNatTyCons list, defined in GHC.Builtin.Types.Literals. * Define the type family somewhere Finally, you will need to define the type family somewhere, likely in @base@. Currently, all of the built-in type families are defined in GHC.TypeLits or GHC.TypeNats, so those are likely candidates. Since the behavior of your built-in type family is specified in GHC.Builtin.Types.Literals, you should give an open type family definition with no instances, like so: type family MyTypeFam (m :: Nat) (n :: Nat) :: Nat Changing the argument and result kinds as appropriate. * Update the relevant test cases The GHC test suite will likely need to be updated after you add your built-in type family. For instance: - The T9181 test prints the :browse contents of GHC.TypeLits, so if you added a test there, the expected output of T9181 will need to change. - The TcTypeNatSimple and TcTypeSymbolSimple tests have compile-time unit tests, as well as TcTypeNatSimpleRun and TcTypeSymbolSimpleRun, which have runtime unit tests. Consider adding further unit tests to those if your built-in type family deals with Nats or Symbols, respectively. Note [Inlining axiom constructors] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We have a number of constructor functions with types like mkUnaryConstFoldAxiom :: TyCon -> String -> (Type -> Maybe a) -> (a -> Maybe Type) -> BuiltInFamRewrite For very type-family-heavy code, these higher order argument are inefficient; e.g. the fourth argument might always return (Just ty) in the above. Inlining them is a bit brutal, but not bad, makes a few-percent difference in, say perf test T13386. These functions aren't exported, so the effect is very local. -} ------------------------------------------------------------------------------- -- Key utility functions ------------------------------------------------------------------------------- tryInteractTopFam :: BuiltInSynFamily -> TyCon -> [Type] -> Type -> [(CoAxiomRule, TypeEqn)] -- The returned CoAxiomRule is always unary tryInteractTopFam fam fam_tc tys r = [(bifinj_axr bif, eqn_out) | bif <- sfInteract fam , Just eqn_out <- [bifinj_proves bif eqn_in] ] where eqn_in :: TypeEqn eqn_in = Pair (mkTyConApp fam_tc tys) r tryInteractInertFam :: BuiltInSynFamily -> TyCon -> [Type] -> [Type] -- F tys1 ~ F tys2 -> [(CoAxiomRule, TypeEqn)] tryInteractInertFam builtin_fam fam_tc tys1 tys2 = [(bifinj_axr bif, eqn_out) | bif <- sfInteract builtin_fam , Just eqn_out <- [bifinj_proves bif eqn_in] ] where eqn_in = Pair (mkTyConApp fam_tc tys1) (mkTyConApp fam_tc tys2) tryMatchFam :: BuiltInSynFamily -> [Type] -> Maybe (CoAxiomRule, [Type], Type) -- Does this reduce on the given arguments? -- If it does, returns (CoAxiomRule, types to instantiate the rule at, rhs type) -- That is: mkAxiomCo (BuiltInFamRew ax) (map mkNomReflCo ts) -- :: F tys ~r rhs, tryMatchFam builtin_fam arg_tys = listToMaybe $ -- Pick first rule to match [ (bifrw_axr rw_ax, inst_tys, res_ty) | rw_ax <- sfMatchFam builtin_fam , Just (inst_tys,res_ty) <- [bifrw_match rw_ax arg_tys] ] ------------------------------------------------------------------------------- -- Constructing BuiltInFamInjectivity, BuiltInFamRewrite ------------------------------------------------------------------------------- mkUnaryConstFoldAxiom :: TyCon -> String -> (Type -> Maybe a) -> (a -> Maybe Type) -> BuiltInFamRewrite -- For the definitional axioms, like (3+4 --> 7) {-# INLINE mkUnaryConstFoldAxiom #-} -- See Note [Inlining axiom constructors] mkUnaryConstFoldAxiom fam_tc str isReqTy f = bif where bif = BIF_Rewrite { bifrw_name = fsLit str , bifrw_axr = BuiltInFamRew bif , bifrw_fam_tc = fam_tc , bifrw_arity = 1 , bifrw_match = \ts -> do { [t1] <- return ts ; t1' <- isReqTy t1 ; res <- f t1' ; return ([t1], res) } , bifrw_proves = \cs -> do { [Pair s1 s2] <- return cs ; s2' <- isReqTy s2 ; z <- f s2' ; return (mkTyConApp fam_tc [s1] === z) } } mkBinConstFoldAxiom :: TyCon -> String -> (Type -> Maybe a) -> (Type -> Maybe b) -> (a -> b -> Maybe Type) -> BuiltInFamRewrite -- For the definitional axioms, like (3+4 --> 7) {-# INLINE mkBinConstFoldAxiom #-} -- See Note [Inlining axiom constructors] mkBinConstFoldAxiom fam_tc str isReqTy1 isReqTy2 f = bif where bif = BIF_Rewrite { bifrw_name = fsLit str , bifrw_axr = BuiltInFamRew bif , bifrw_fam_tc = fam_tc , bifrw_arity = 2 , bifrw_match = \ts -> do { [t1,t2] <- return ts ; t1' <- isReqTy1 t1 ; t2' <- isReqTy2 t2 ; res <- f t1' t2' ; return ([t1,t2], res) } , bifrw_proves = \cs -> do { [Pair s1 s2, Pair t1 t2] <- return cs ; s2' <- isReqTy1 s2 ; t2' <- isReqTy2 t2 ; z <- f s2' t2' ; return (mkTyConApp fam_tc [s1,t1] === z) } } mkRewriteAxiom :: TyCon -> String -> [TyVar] -> [Type] -- LHS of axiom -> Type -- RHS of axiom -> BuiltInFamRewrite -- Not higher order, no benefit in inlining -- See Note [Inlining axiom constructors] mkRewriteAxiom fam_tc str tpl_tvs lhs_tys rhs_ty = assertPpr (tyConArity fam_tc == length lhs_tys) (text str <+> ppr lhs_tys) $ bif where bif = BIF_Rewrite { bifrw_name = fsLit str , bifrw_axr = BuiltInFamRew bif , bifrw_fam_tc = fam_tc , bifrw_arity = bif_arity , bifrw_match = match_fn , bifrw_proves = inst_fn } bif_arity = length tpl_tvs match_fn :: [Type] -> Maybe ([Type],Type) match_fn arg_tys = assertPpr (tyConArity fam_tc == length arg_tys) (text str <+> ppr arg_tys) $ case tcMatchTys lhs_tys arg_tys of Nothing -> Nothing Just subst -> Just (substTyVars subst tpl_tvs, substTy subst rhs_ty) inst_fn :: [TypeEqn] -> Maybe TypeEqn inst_fn inst_eqns = assertPpr (length inst_eqns == bif_arity) (text str $$ ppr inst_eqns) $ Just (mkTyConApp fam_tc (substTys (zipTCvSubst tpl_tvs tys1) lhs_tys) === substTy (zipTCvSubst tpl_tvs tys2) rhs_ty) where (tys1, tys2) = unzipPairs inst_eqns mkTopUnaryFamDeduction :: String -> TyCon -> (Type -> Type -> Maybe TypeEqn) -> BuiltInFamInjectivity -- Deduction from (F s ~ r) where `F` is a unary type function {-# INLINE mkTopUnaryFamDeduction #-} -- See Note [Inlining axiom constructors] mkTopUnaryFamDeduction str fam_tc f = bif where bif = BIF_Interact { bifinj_name = fsLit str , bifinj_axr = BuiltInFamInj bif , bifinj_proves = \(Pair lhs rhs) -> do { (tc, [a]) <- splitTyConApp_maybe lhs ; massertPpr (tc == fam_tc) (ppr tc $$ ppr fam_tc) ; f a rhs } } mkTopBinFamDeduction :: String -> TyCon -> (Type -> Type -> Type -> Maybe TypeEqn) -> BuiltInFamInjectivity -- Deduction from (F s t ~ r) where `F` is a binary type function {-# INLINE mkTopBinFamDeduction #-} -- See Note [Inlining axiom constructors] mkTopBinFamDeduction str fam_tc f = bif where bif = BIF_Interact { bifinj_name = fsLit str , bifinj_axr = BuiltInFamInj bif , bifinj_proves = \(Pair lhs rhs) -> do { (tc, [a,b]) <- splitTyConApp_maybe lhs ; massertPpr (tc == fam_tc) (ppr tc $$ ppr fam_tc) ; f a b rhs } } mkUnaryBIF :: String -> TyCon -> BuiltInFamInjectivity -- Not higher order, no benefit in inlining -- See Note [Inlining axiom constructors] mkUnaryBIF str fam_tc = bif where bif = BIF_Interact { bifinj_name = fsLit str , bifinj_axr = BuiltInFamInj bif , bifinj_proves = proves } proves (Pair lhs rhs) = do { (tc2, [x2]) <- splitTyConApp_maybe rhs ; guard (tc2 == fam_tc) ; (tc1, [x1]) <- splitTyConApp_maybe lhs ; massertPpr (tc1 == fam_tc) (ppr tc1 $$ ppr fam_tc) ; return (Pair x1 x2) } mkBinBIF :: String -> TyCon -> WhichArg -> WhichArg -> (Type -> Bool) -- The guard on the equal args, if any -> BuiltInFamInjectivity {-# INLINE mkBinBIF #-} -- See Note [Inlining axiom constructors] mkBinBIF str fam_tc eq1 eq2 check_me = bif where bif = BIF_Interact { bifinj_name = fsLit str , bifinj_axr = BuiltInFamInj bif , bifinj_proves = proves } proves (Pair lhs rhs) = do { (tc2, [x2,y2]) <- splitTyConApp_maybe rhs ; guard (tc2 == fam_tc) ; (tc1, [x1,y1]) <- splitTyConApp_maybe lhs ; massertPpr (tc1 == fam_tc) (ppr tc1 $$ ppr fam_tc) ; case (eq1, eq2) of (ArgX,ArgX) -> do_it x1 x2 y1 y2 (ArgX,ArgY) -> do_it x1 y2 x2 y1 (ArgY,ArgX) -> do_it y1 x2 y2 x1 (ArgY,ArgY) -> do_it y1 y2 x1 x2 } do_it a1 a2 b1 b2 = do { same a1 a2; guard (check_me a1); return (Pair b1 b2) } noGuard :: Type -> Bool noGuard _ = True numGuard :: (Integer -> Bool) -> Type -> Bool numGuard pred ty = case isNumLitTy ty of Just n -> pred n Nothing -> False data WhichArg = ArgX | ArgY ------------------------------------------------------------------------------- -- Built-in type constructors for functions on type-level nats ------------------------------------------------------------------------------- -- The list of built-in type family TyCons that GHC uses. -- If you define a built-in type family, make sure to add it to this list. -- See Note [Adding built-in type families] typeNatTyCons :: [TyCon] typeNatTyCons = [ typeNatAddTyCon , typeNatMulTyCon , typeNatExpTyCon , typeNatSubTyCon , typeNatDivTyCon , typeNatModTyCon , typeNatLogTyCon , typeNatCmpTyCon , typeSymbolCmpTyCon , typeSymbolAppendTyCon , typeCharCmpTyCon , typeConsSymbolTyCon , typeUnconsSymbolTyCon , typeCharToNatTyCon , typeNatToCharTyCon ] -- The list of built-in type family axioms that GHC uses. -- If you define new axioms, make sure to include them in this list. -- See Note [Adding built-in type families] typeNatCoAxiomRules :: UniqFM FastString CoAxiomRule typeNatCoAxiomRules = listToUFM $ [ pr | tc <- typeNatTyCons , Just ops <- [isBuiltInSynFamTyCon_maybe tc] , pr <- [ (bifinj_name bif, bifinj_axr bif) | bif <- sfInteract ops ] ++ [ (bifrw_name bif, bifrw_axr bif) | bif <- sfMatchFam ops ] ] ------------------------------------------------------------------------------- -- Addition (+) ------------------------------------------------------------------------------- typeNatAddTyCon :: TyCon typeNatAddTyCon = mkTypeNatFunTyCon2 name BuiltInSynFamily { sfMatchFam = axAddRewrites , sfInteract = axAddInjectivity } where name = mkWiredInTyConName UserSyntax gHC_INTERNAL_TYPENATS (fsLit "+") typeNatAddTyFamNameKey typeNatAddTyCon sn,tn :: TyVar -- Of kind Natural (sn: tn: _) = mkTemplateTyVars (repeat typeSymbolKind) axAddRewrites :: [BuiltInFamRewrite] axAddRewrites = [ mkRewriteAxiom tc "Add0L" [tn] [num 0, var tn] (var tn) -- 0 + t --> t , mkRewriteAxiom tc "Add0R" [sn] [var sn, num 0] (var sn) -- s + 0 --> s , mkBinConstFoldAxiom tc "AddDef" isNumLitTy isNumLitTy $ -- 3 + 4 --> 7 \x y -> Just $ num (x + y) ] where tc = typeNatAddTyCon axAddInjectivity :: [BuiltInFamInjectivity] axAddInjectivity = [ -- (s + t ~ 0) => (s ~ 0) mkTopBinFamDeduction "AddT-0L" tc $ \ a _b r -> do { _ <- known r (== 0); return (Pair a (num 0)) } , -- (s + t ~ 0) => (t ~ 0) mkTopBinFamDeduction "AddT-0R" tc $ \ _a b r -> do { _ <- known r (== 0); return (Pair b (num 0)) } , -- (5 + t ~ 8) => (t ~ 3) mkTopBinFamDeduction "AddT-KKL" tc $ \ a b r -> do { na <- isNumLitTy a; nr <- known r (>= na); return (Pair b (num (nr-na))) } , -- (s + 5 ~ 8) => (s ~ 3) mkTopBinFamDeduction "AddT-KKR" tc $ \ a b r -> do { nb <- isNumLitTy b; nr <- known r (>= nb); return (Pair a (num (nr-nb))) } , mkBinBIF "AddI-xx" tc ArgX ArgX noGuard -- x1+y1~x2+y2 {x1=x2}=> (y1 ~ y2) , mkBinBIF "AddI-xy" tc ArgX ArgY noGuard -- x1+y1~x2+y2 {x1=y2}=> (x2 ~ y1) , mkBinBIF "AddI-yx" tc ArgY ArgX noGuard -- x1+y1~x2+y2 {y1=x2}=> (x1 ~ y2) , mkBinBIF "AddI-yy" tc ArgY ArgY noGuard -- x1+y1~x2+y2 {y1=y2}=> (x1 ~ x2) ] where tc = typeNatAddTyCon ------------------------------------------------------------------------------- -- Subtraction (-) ------------------------------------------------------------------------------- typeNatSubTyCon :: TyCon typeNatSubTyCon = mkTypeNatFunTyCon2 name BuiltInSynFamily { sfMatchFam = axSubRewrites , sfInteract = axSubInjectivity } where name = mkWiredInTyConName UserSyntax gHC_INTERNAL_TYPENATS (fsLit "-") typeNatSubTyFamNameKey typeNatSubTyCon axSubRewrites :: [BuiltInFamRewrite] axSubRewrites = [ mkRewriteAxiom tc "Sub0R" [sn] [var sn, num 0] (var sn) -- s - 0 --> s , mkBinConstFoldAxiom tc "SubDef" isNumLitTy isNumLitTy $ -- 4 - 3 --> 1 if x>=y \x y -> fmap num (minus x y) ] where tc = typeNatSubTyCon axSubInjectivity :: [BuiltInFamInjectivity] axSubInjectivity = [ -- (a - b ~ 5) => (5 + b ~ a) mkTopBinFamDeduction "SubT" tc $ \ a b r -> do { _ <- isNumLitTy r; return (Pair (r .+. b) a) } , mkBinBIF "SubI-xx" tc ArgX ArgX noGuard -- (x-y1 ~ x-y2) => (y1 ~ y2) , mkBinBIF "SubI-yy" tc ArgY ArgY noGuard -- (x1-y ~ x2-y) => (x1 ~ x2) ] where tc = typeNatSubTyCon {- Note [Weakened interaction rule for subtraction] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A simpler interaction here might be: `s - t ~ r` --> `t + r ~ s` This would enable us to reuse all the code for addition. Unfortunately, this works a little too well at the moment. Consider the following example: 0 - 5 ~ r --> 5 + r ~ 0 --> (5 = 0, r = 0) This (correctly) spots that the constraint cannot be solved. However, this may be a problem if the constraint did not need to be solved in the first place! Consider the following example: f :: Proxy (If (5 <=? 0) (0 - 5) (5 - 0)) -> Proxy 5 f = id Currently, GHC is strict while evaluating functions, so this does not work, because even though the `If` should evaluate to `5 - 0`, we also evaluate the "then" branch which generates the constraint `0 - 5 ~ r`, which fails. So, for the time being, we only add an improvement when the RHS is a constant, which happens to work OK for the moment, although clearly we need to do something more general. -} ------------------------------------------------------------------------------- -- Multiplication (*) ------------------------------------------------------------------------------- typeNatMulTyCon :: TyCon typeNatMulTyCon = mkTypeNatFunTyCon2 name BuiltInSynFamily { sfMatchFam = axMulRewrites , sfInteract = axMulInjectivity } where name = mkWiredInTyConName UserSyntax gHC_INTERNAL_TYPENATS (fsLit "*") typeNatMulTyFamNameKey typeNatMulTyCon axMulRewrites :: [BuiltInFamRewrite] axMulRewrites = [ mkRewriteAxiom tc "Mul0L" [tn] [num 0, var tn] (num 0) -- 0 * t --> 0 , mkRewriteAxiom tc "Mul0R" [sn] [var sn, num 0] (num 0) -- s * 0 --> 0 , mkRewriteAxiom tc "Mul1L" [tn] [num 1, var tn] (var tn) -- 1 * t --> t , mkRewriteAxiom tc "Mul1R" [sn] [var sn, num 1] (var sn) -- s * 1 --> s , mkBinConstFoldAxiom tc "MulDef" isNumLitTy isNumLitTy $ -- 3 + 4 --> 12 \x y -> Just $ num (x * y) ] where tc = typeNatMulTyCon axMulInjectivity :: [BuiltInFamInjectivity] axMulInjectivity = [ -- (s * t ~ 1) => (s ~ 1) mkTopBinFamDeduction "MulT1" tc $ \ s _t r -> do { _ <- known r (== 1); return (Pair s r) } , -- (s * t ~ 1) => (t ~ 1) mkTopBinFamDeduction "MulT2" tc $ \ _s t r -> do { _ <- known r (== 1); return (Pair t r) } , -- (3 * t ~ 15) => (t ~ 5) mkTopBinFamDeduction "MulT3" tc $ \ s t r -> do { ns <- isNumLitTy s; nr <- isNumLitTy r; y <- divide nr ns; return (Pair t (num y)) } , -- (s * 3 ~ 15) => (s ~ 5) mkTopBinFamDeduction "MulT4" tc $ \ s t r -> do { nt <- isNumLitTy t; nr <- isNumLitTy r; y <- divide nr nt; return (Pair s (num y)) } , mkBinBIF "MulI-xx" tc ArgX ArgX (numGuard (/= 0)) -- (x*y1 ~ x*y2) {x/=0}=> (y1 ~ y2) , mkBinBIF "MulI-yy" tc ArgY ArgY (numGuard (/= 0)) -- (x1*y ~ x2*y) {y/=0}=> (x1 ~ x2) ] where tc = typeNatMulTyCon ------------------------------------------------------------------------------- -- Division: Div and Mod ------------------------------------------------------------------------------- typeNatDivTyCon :: TyCon typeNatDivTyCon = mkTypeNatFunTyCon2 name BuiltInSynFamily { sfMatchFam = axDivRewrites , sfInteract = [] } where name = mkWiredInTyConName UserSyntax gHC_INTERNAL_TYPENATS (fsLit "Div") typeNatDivTyFamNameKey typeNatDivTyCon typeNatModTyCon :: TyCon typeNatModTyCon = mkTypeNatFunTyCon2 name BuiltInSynFamily { sfMatchFam = axModRewrites , sfInteract = [] } where name = mkWiredInTyConName UserSyntax gHC_INTERNAL_TYPENATS (fsLit "Mod") typeNatModTyFamNameKey typeNatModTyCon axDivRewrites :: [BuiltInFamRewrite] axDivRewrites = [ mkRewriteAxiom tc "Div1" [sn] [var sn, num 1] (var sn) -- s `div` 1 --> s , mkBinConstFoldAxiom tc "DivDef" isNumLitTy isNumLitTy $ -- 8 `div` 4 --> 2 \x y -> do { guard (y /= 0); return (num (div x y)) } ] where tc = typeNatDivTyCon axModRewrites :: [BuiltInFamRewrite] axModRewrites = [ mkRewriteAxiom tc "Mod1" [sn] [var sn, num 1] (num 0) -- s `mod` 1 --> 0 , mkBinConstFoldAxiom tc "ModDef" isNumLitTy isNumLitTy $ -- 8 `mod` 3 --> 2 \x y -> do { guard (y /= 0); return (num (mod x y)) } ] where tc = typeNatModTyCon ------------------------------------------------------------------------------- -- Exponentiation: Exp ------------------------------------------------------------------------------- typeNatExpTyCon :: TyCon -- Exponentiation typeNatExpTyCon = mkTypeNatFunTyCon2 name BuiltInSynFamily { sfMatchFam = axExpRewrites , sfInteract = axExpInjectivity } where name = mkWiredInTyConName UserSyntax gHC_INTERNAL_TYPENATS (fsLit "^") typeNatExpTyFamNameKey typeNatExpTyCon axExpRewrites :: [BuiltInFamRewrite] axExpRewrites = [ mkRewriteAxiom tc "Exp0R" [sn] [var sn, num 0] (num 1) -- s ^ 0 --> 1 , mkRewriteAxiom tc "Exp1L" [tn] [num 1, var tn] (num 1) -- 1 ^ t --> 1 , mkRewriteAxiom tc "Exp1R" [sn] [var sn, num 1] (var sn) -- s ^ 1 --> s , mkBinConstFoldAxiom tc "ExpDef" isNumLitTy isNumLitTy $ -- 2 ^ 3 --> 8 \x y -> Just (num (x ^ y)) ] where tc = typeNatExpTyCon axExpInjectivity :: [BuiltInFamInjectivity] axExpInjectivity = [ -- (s ^ t ~ 0) => (s ~ 0) mkTopBinFamDeduction "ExpT1" tc $ \ s _t r -> do { 0 <- isNumLitTy r; return (Pair s r) } , -- (2 ^ t ~ 8) => (t ~ 3) mkTopBinFamDeduction "ExpT2" tc $ \ s t r -> do { ns <- isNumLitTy s; nr <- isNumLitTy r; y <- logExact nr ns; return (Pair t (num y)) } , -- (s ^ 2 ~ 9) => (s ~ 3) mkTopBinFamDeduction "ExpT3" tc $ \ s t r -> do { nt <- isNumLitTy t; nr <- isNumLitTy r; y <- rootExact nr nt; return (Pair s (num y)) } , mkBinBIF "ExpI-xx" tc ArgX ArgX (numGuard (> 1)) -- (x^y1 ~ x^y2) {x>1}=> (y1 ~ y2) , mkBinBIF "ExpI-yy" tc ArgY ArgY (numGuard (/= 0)) -- (x1*y ~ x2*y) {y/=0}=> (x1 ~ x2) ] where tc = typeNatExpTyCon ------------------------------------------------------------------------------- -- Logarithm: Log2 ------------------------------------------------------------------------------- typeNatLogTyCon :: TyCon typeNatLogTyCon = mkTypeNatFunTyCon1 name BuiltInSynFamily { sfMatchFam = axLogRewrites , sfInteract = [] } where name = mkWiredInTyConName UserSyntax gHC_INTERNAL_TYPENATS (fsLit "Log2") typeNatLogTyFamNameKey typeNatLogTyCon axLogRewrites :: [BuiltInFamRewrite] axLogRewrites = [ mkUnaryConstFoldAxiom tc "LogDef" isNumLitTy $ -- log 8 --> 3 \x -> do { (a,_) <- genLog x 2; return (num a) } ] where tc = typeNatLogTyCon ------------------------------------------------------------------------------- -- Comparision of Nats: CmpNat ------------------------------------------------------------------------------- typeNatCmpTyCon :: TyCon typeNatCmpTyCon = mkFamilyTyCon name (mkTemplateAnonTyConBinders [ naturalTy, naturalTy ]) orderingKind Nothing (BuiltInSynFamTyCon ops) Nothing NotInjective where name = mkWiredInTyConName UserSyntax gHC_INTERNAL_TYPENATS_INTERNAL (fsLit "CmpNat") typeNatCmpTyFamNameKey typeNatCmpTyCon ops = BuiltInSynFamily { sfMatchFam = axCmpNatRewrites , sfInteract = axCmpNatInjectivity } axCmpNatRewrites :: [BuiltInFamRewrite] axCmpNatRewrites = [ mkRewriteAxiom tc "CmpNatRefl" [sn] [var sn, var sn] (ordering EQ) -- s `cmp` s --> EQ , mkBinConstFoldAxiom tc "CmpNatDef" isNumLitTy isNumLitTy $ -- 2 `cmp` 3 --> LT \x y -> Just (ordering (compare x y)) ] where tc = typeNatCmpTyCon axCmpNatInjectivity :: [BuiltInFamInjectivity] axCmpNatInjectivity = [ -- s `cmp` t ~ EQ ==> s ~ t mkTopBinFamDeduction "CmpNatT3" typeNatCmpTyCon $ \ s t r -> do { EQ <- isOrderingLitTy r; return (Pair s t) } ] ------------------------------------------------------------------------------- -- Comparsion of Symbols: CmpSymbol ------------------------------------------------------------------------------- typeSymbolCmpTyCon :: TyCon typeSymbolCmpTyCon = mkFamilyTyCon name (mkTemplateAnonTyConBinders [typeSymbolKind, typeSymbolKind]) orderingKind Nothing (BuiltInSynFamTyCon ops) Nothing NotInjective where name = mkWiredInTyConName UserSyntax gHC_INTERNAL_TYPELITS_INTERNAL (fsLit "CmpSymbol") typeSymbolCmpTyFamNameKey typeSymbolCmpTyCon ops = BuiltInSynFamily { sfMatchFam = axSymbolCmpRewrites , sfInteract = axSymbolCmpInjectivity } ss,ts :: TyVar -- Of kind Symbol (ss: ts: _) = mkTemplateTyVars (repeat typeSymbolKind) axSymbolCmpRewrites :: [BuiltInFamRewrite] axSymbolCmpRewrites = [ mkRewriteAxiom tc "CmpSymbolRefl" [ss] [var ss, var ss] (ordering EQ) -- s `cmp` s --> EQ , mkBinConstFoldAxiom tc "CmpSymbolDef" isStrLitTy isStrLitTy $ -- "a" `cmp` "b" --> LT \x y -> Just (ordering (lexicalCompareFS x y)) ] where tc = typeSymbolCmpTyCon axSymbolCmpInjectivity :: [BuiltInFamInjectivity] axSymbolCmpInjectivity = [ mkTopBinFamDeduction "CmpSymbolT" typeSymbolCmpTyCon $ \ s t r -> do { EQ <- isOrderingLitTy r; return (Pair s t) } ] ------------------------------------------------------------------------------- -- AppendSymbol ------------------------------------------------------------------------------- typeSymbolAppendTyCon :: TyCon typeSymbolAppendTyCon = mkTypeSymbolFunTyCon2 name BuiltInSynFamily { sfMatchFam = axAppendRewrites , sfInteract = axAppendInjectivity } where name = mkWiredInTyConName UserSyntax gHC_INTERNAL_TYPELITS (fsLit "AppendSymbol") typeSymbolAppendFamNameKey typeSymbolAppendTyCon axAppendRewrites :: [BuiltInFamRewrite] axAppendRewrites = [ mkRewriteAxiom tc "Concat0R" [ts] [nullStrLitTy, var ts] (var ts) -- "" ++ t --> t , mkRewriteAxiom tc "Concat0L" [ss] [var ss, nullStrLitTy] (var ss) -- s ++ "" --> s , mkBinConstFoldAxiom tc "AppendSymbolDef" isStrLitTy isStrLitTy $ -- "a" ++ "b" --> "ab" \x y -> Just (mkStrLitTy (appendFS x y)) ] where tc = typeSymbolAppendTyCon axAppendInjectivity :: [BuiltInFamInjectivity] axAppendInjectivity = [ -- (AppendSymbol a b ~ "") => (a ~ "") mkTopBinFamDeduction "AppendSymbolT1" tc $ \ a _b r -> do { rs <- isStrLitTy r; guard (nullFS rs); return (Pair a nullStrLitTy) } , -- (AppendSymbol a b ~ "") => (b ~ "") mkTopBinFamDeduction "AppendSymbolT2" tc $ \ _a b r -> do { rs <- isStrLitTy r; guard (nullFS rs); return (Pair b nullStrLitTy) } , -- (AppendSymbol "foo" b ~ "foobar") => (b ~ "bar") mkTopBinFamDeduction "AppendSymbolT3" tc $ \ a b r -> do { as <- isStrLitTyS a; rs <- isStrLitTyS r; guard (as `isPrefixOf` rs) ; return (Pair b (mkStrLitTyS (drop (length as) rs))) } , -- (AppendSymbol f "bar" ~ "foobar") => (f ~ "foo") mkTopBinFamDeduction "AppendSymbolT3" tc $ \ a b r -> do { bs <- isStrLitTyS b; rs <- isStrLitTyS r; guard (bs `isSuffixOf` rs) ; return (Pair a (mkStrLitTyS (take (length rs - length bs) rs))) } , mkBinBIF "AppI-xx" tc ArgX ArgX noGuard -- (x++y1 ~ x++y2) => (y1 ~ y2) , mkBinBIF "AppI-yy" tc ArgY ArgY noGuard -- (x1++y ~ x2++y) => (x1 ~ x2) ] where tc = typeSymbolAppendTyCon ------------------------------------------------------------------------------- -- ConsSymbol ------------------------------------------------------------------------------- typeConsSymbolTyCon :: TyCon typeConsSymbolTyCon = mkFamilyTyCon name (mkTemplateAnonTyConBinders [ charTy, typeSymbolKind ]) typeSymbolKind Nothing (BuiltInSynFamTyCon ops) Nothing (Injective [True, True]) where name = mkWiredInTyConName UserSyntax gHC_INTERNAL_TYPELITS (fsLit "ConsSymbol") typeConsSymbolTyFamNameKey typeConsSymbolTyCon ops = BuiltInSynFamily { sfMatchFam = axConsRewrites , sfInteract = axConsInjectivity } axConsRewrites :: [BuiltInFamRewrite] axConsRewrites = [ mkBinConstFoldAxiom tc "ConsSymbolDef" isCharLitTy isStrLitTy $ -- 'a' : "bc" --> "abc" \x y -> Just $ mkStrLitTy (consFS x y) ] where tc = typeConsSymbolTyCon axConsInjectivity :: [BuiltInFamInjectivity] axConsInjectivity = [ -- ConsSymbol a b ~ "blah" => (a ~ 'b') mkTopBinFamDeduction "ConsSymbolT1" tc $ \ a _b r -> do { rs <- isStrLitTy r; (x,_) <- unconsFS rs; return (Pair a (mkCharLitTy x)) } , -- ConsSymbol a b ~ "blah" => (b ~ "lah") mkTopBinFamDeduction "ConsSymbolT2" tc $ \ _a b r -> do { rs <- isStrLitTy r; (_,xs) <- unconsFS rs; return (Pair b (mkStrLitTy xs)) } , mkBinBIF "ConsI-xx" tc ArgX ArgX noGuard -- (x:y1 ~ x:y2) => (y1 ~ y2) , mkBinBIF "ConsI-yy" tc ArgY ArgY noGuard -- (x1:y ~ x2:y) => (x1 ~ x2) ] where tc = typeConsSymbolTyCon ------------------------------------------------------------------------------- -- UnconsSymbol ------------------------------------------------------------------------------- typeUnconsSymbolTyCon :: TyCon typeUnconsSymbolTyCon = mkFamilyTyCon name (mkTemplateAnonTyConBinders [ typeSymbolKind ]) (mkMaybeTy charSymbolPairKind) Nothing (BuiltInSynFamTyCon ops) Nothing (Injective [True]) where name = mkWiredInTyConName UserSyntax gHC_INTERNAL_TYPELITS (fsLit "UnconsSymbol") typeUnconsSymbolTyFamNameKey typeUnconsSymbolTyCon ops = BuiltInSynFamily { sfMatchFam = axUnconsRewrites , sfInteract = axUnconsInjectivity } computeUncons :: FastString -> Type computeUncons str = mkPromotedMaybeTy charSymbolPairKind (fmap reify (unconsFS str)) where reify :: (Char, FastString) -> Type reify (c, s) = charSymbolPair (mkCharLitTy c) (mkStrLitTy s) axUnconsRewrites :: [BuiltInFamRewrite] axUnconsRewrites = [ mkUnaryConstFoldAxiom tc "ConsSymbolDef" isStrLitTy $ -- 'a' : "bc" --> "abc" \x -> Just $ computeUncons x ] where tc = typeUnconsSymbolTyCon axUnconsInjectivity :: [BuiltInFamInjectivity] axUnconsInjectivity = [ -- (UnconsSymbol b ~ Nothing) => (b ~ "") mkTopUnaryFamDeduction "UnconsSymbolT1" tc $ \b r -> do { Nothing <- isPromotedMaybeTy r; return (Pair b nullStrLitTy) } , -- (UnconsSymbol b ~ Just ('f',"oobar")) => (b ~ "foobar") mkTopUnaryFamDeduction "UnconsSymbolT2" tc $ \b r -> do { Just pr <- isPromotedMaybeTy r ; (c,s) <- isPromotedPairType pr ; chr <- isCharLitTy c ; str <- isStrLitTy s ; return (Pair b (mkStrLitTy (consFS chr str))) } , mkUnaryBIF "UnconsI1" tc -- (UnconsSymbol x1 ~ z, UnconsSymbol x2 ~ z) => (x1 ~ x2) ] where tc = typeUnconsSymbolTyCon ------------------------------------------------------------------------------- -- CharToNat ------------------------------------------------------------------------------- typeCharToNatTyCon :: TyCon typeCharToNatTyCon = mkFamilyTyCon name (mkTemplateAnonTyConBinders [ charTy ]) naturalTy Nothing (BuiltInSynFamTyCon ops) Nothing (Injective [True]) where name = mkWiredInTyConName UserSyntax gHC_INTERNAL_TYPELITS (fsLit "CharToNat") typeCharToNatTyFamNameKey typeCharToNatTyCon ops = BuiltInSynFamily { sfMatchFam = axCharToNatRewrites , sfInteract = axCharToNatInjectivity } axCharToNatRewrites :: [BuiltInFamRewrite] axCharToNatRewrites = [ mkUnaryConstFoldAxiom tc "CharToNatDef" isCharLitTy $ -- CharToNat 'a' --> 97 \x -> Just $ num (charToInteger x) ] where tc = typeCharToNatTyCon axCharToNatInjectivity :: [BuiltInFamInjectivity] axCharToNatInjectivity = [ -- (CharToNat c ~ 122) => (c ~ 'z') mkTopUnaryFamDeduction "CharToNatT1" typeCharToNatTyCon $ \c r -> do { nr <- isNumLitTy r; chr <- integerToChar nr; return (Pair c (mkCharLitTy chr)) } ] ------------------------------------------------------------------------------- -- NatToChar ------------------------------------------------------------------------------- typeNatToCharTyCon :: TyCon typeNatToCharTyCon = mkFamilyTyCon name (mkTemplateAnonTyConBinders [ naturalTy ]) charTy Nothing (BuiltInSynFamTyCon ops) Nothing (Injective [True]) where name = mkWiredInTyConName UserSyntax gHC_INTERNAL_TYPELITS (fsLit "NatToChar") typeNatToCharTyFamNameKey typeNatToCharTyCon ops = BuiltInSynFamily { sfMatchFam = axNatToCharRewrites , sfInteract = axNatToCharInjectivity } axNatToCharRewrites :: [BuiltInFamRewrite] axNatToCharRewrites = [ mkUnaryConstFoldAxiom tc "NatToCharDef" isNumLitTy $ -- NatToChar 97 --> 'a' \n -> fmap mkCharLitTy (integerToChar n) ] where tc = typeNatToCharTyCon axNatToCharInjectivity :: [BuiltInFamInjectivity] axNatToCharInjectivity = [ -- (NatToChar n ~ 'z') => (n ~ 122) mkTopUnaryFamDeduction "CharToNatT1" typeNatToCharTyCon $ \n r -> do { c <- isCharLitTy r; return (Pair n (mkNumLitTy (charToInteger c))) } ] ----------------------------------------------------------------------------- -- CmpChar ----------------------------------------------------------------------------- typeCharCmpTyCon :: TyCon typeCharCmpTyCon = mkFamilyTyCon name (mkTemplateAnonTyConBinders [ charTy, charTy ]) orderingKind Nothing (BuiltInSynFamTyCon ops) Nothing NotInjective where name = mkWiredInTyConName UserSyntax gHC_INTERNAL_TYPELITS_INTERNAL (fsLit "CmpChar") typeCharCmpTyFamNameKey typeCharCmpTyCon ops = BuiltInSynFamily { sfMatchFam = axCharCmpRewrites , sfInteract = axCharCmpInjectivity } sc :: TyVar -- Of kind Char (sc: _) = mkTemplateTyVars (repeat charTy) axCharCmpRewrites :: [BuiltInFamRewrite] axCharCmpRewrites = [ mkRewriteAxiom tc "CmpCharRefl" [sc] [var sc, var sc] (ordering EQ) -- s `cmp` s --> EQ , mkBinConstFoldAxiom tc "CmpCharDef" isCharLitTy isCharLitTy $ -- 'a' `cmp` 'b' --> LT \chr1 chr2 -> Just $ ordering $ compare chr1 chr2 ] where tc = typeCharCmpTyCon axCharCmpInjectivity :: [BuiltInFamInjectivity] axCharCmpInjectivity = [ -- (CmpChar s t ~ EQ) => s ~ t mkTopBinFamDeduction "CmpCharT" typeCharCmpTyCon $ \ s t r -> do { EQ <- isOrderingLitTy r; return (Pair s t) } ] {------------------------------------------------------------------------------- Various utilities for making axioms and types -------------------------------------------------------------------------------} (===) :: Type -> Type -> Pair Type x === y = Pair x y num :: Integer -> Type num = mkNumLitTy var :: TyVar -> Type var = mkTyVarTy (.+.) :: Type -> Type -> Type s .+. t = mkTyConApp typeNatAddTyCon [s,t] {- (.-.) :: Type -> Type -> Type s .-. t = mkTyConApp typeNatSubTyCon [s,t] (.*.) :: Type -> Type -> Type s .*. t = mkTyConApp typeNatMulTyCon [s,t] tDiv :: Type -> Type -> Type tDiv s t = mkTyConApp typeNatDivTyCon [s,t] tMod :: Type -> Type -> Type tMod s t = mkTyConApp typeNatModTyCon [s,t] (.^.) :: Type -> Type -> Type s .^. t = mkTyConApp typeNatExpTyCon [s,t] cmpNat :: Type -> Type -> Type cmpNat s t = mkTyConApp typeNatCmpTyCon [s,t] cmpSymbol :: Type -> Type -> Type cmpSymbol s t = mkTyConApp typeSymbolCmpTyCon [s,t] appendSymbol :: Type -> Type -> Type appendSymbol s t = mkTyConApp typeSymbolAppendTyCon [s, t] -} nullStrLitTy :: Type -- The type "" nullStrLitTy = mkStrLitTy nilFS isStrLitTyS :: Type -> Maybe String isStrLitTyS ty = do { fs <- isStrLitTy ty; return (unpackFS fs) } mkStrLitTyS :: String -> Type mkStrLitTyS s = mkStrLitTy (mkFastString s) charSymbolPair :: Type -> Type -> Type charSymbolPair = mkPromotedPairTy charTy typeSymbolKind charSymbolPairKind :: Kind charSymbolPairKind = mkTyConApp pairTyCon [charTy, typeSymbolKind] orderingKind :: Kind orderingKind = mkTyConApp orderingTyCon [] ordering :: Ordering -> Type ordering o = case o of LT -> mkTyConApp promotedLTDataCon [] EQ -> mkTyConApp promotedEQDataCon [] GT -> mkTyConApp promotedGTDataCon [] isOrderingLitTy :: Type -> Maybe Ordering isOrderingLitTy tc = do (tc1,[]) <- splitTyConApp_maybe tc case () of _ | tc1 == promotedLTDataCon -> return LT | tc1 == promotedEQDataCon -> return EQ | tc1 == promotedGTDataCon -> return GT | otherwise -> Nothing -- Make a unary built-in constructor of kind: Nat -> Nat mkTypeNatFunTyCon1 :: Name -> BuiltInSynFamily -> TyCon mkTypeNatFunTyCon1 op tcb = mkFamilyTyCon op (mkTemplateAnonTyConBinders [ naturalTy ]) naturalTy Nothing (BuiltInSynFamTyCon tcb) Nothing NotInjective -- Make a binary built-in constructor of kind: Nat -> Nat -> Nat mkTypeNatFunTyCon2 :: Name -> BuiltInSynFamily -> TyCon mkTypeNatFunTyCon2 op tcb = mkFamilyTyCon op (mkTemplateAnonTyConBinders [ naturalTy, naturalTy ]) naturalTy Nothing (BuiltInSynFamTyCon tcb) Nothing NotInjective -- Make a binary built-in constructor of kind: Symbol -> Symbol -> Symbol mkTypeSymbolFunTyCon2 :: Name -> BuiltInSynFamily -> TyCon mkTypeSymbolFunTyCon2 op tcb = mkFamilyTyCon op (mkTemplateAnonTyConBinders [ typeSymbolKind, typeSymbolKind ]) typeSymbolKind Nothing (BuiltInSynFamTyCon tcb) Nothing NotInjective same :: Type -> Type -> Maybe () same ty1 ty2 = guard (ty1 `tcEqType` ty2) known :: Type -> (Integer -> Bool) -> Maybe Integer known x p = do { nx <- isNumLitTy x; guard (p nx); return nx } charToInteger :: Char -> Integer charToInteger c = fromIntegral (Char.ord c) integerToChar :: Integer -> Maybe Char integerToChar n | inBounds = Just (Char.chr (fromInteger n)) where inBounds = n >= charToInteger minBound && n <= charToInteger maxBound integerToChar _ = Nothing {- ----------------------------------------------------------------------------- These inverse functions are used for simplifying propositions using concrete natural numbers. ----------------------------------------------------------------------------- -} -- | Subtract two natural numbers. minus :: Integer -> Integer -> Maybe Integer minus x y = if x >= y then Just (x - y) else Nothing -- | Compute the exact logarithm of a natural number. -- The logarithm base is the second argument. logExact :: Integer -> Integer -> Maybe Integer logExact x y = do (z,True) <- genLog x y return z -- | Divide two natural numbers. divide :: Integer -> Integer -> Maybe Integer divide _ 0 = Nothing divide x y = case divMod x y of (a,0) -> Just a _ -> Nothing -- | Compute the exact root of a natural number. -- The second argument specifies which root we are computing. rootExact :: Integer -> Integer -> Maybe Integer rootExact x y = do (z,True) <- genRoot x y return z {- | Compute the n-th root of a natural number, rounded down to the closest natural number. The boolean indicates if the result is exact (i.e., True means no rounding was done, False means rounded down). The second argument specifies which root we are computing. -} genRoot :: Integer -> Integer -> Maybe (Integer, Bool) genRoot _ 0 = Nothing genRoot x0 1 = Just (x0, True) genRoot x0 root = Just (search 0 (x0+1)) where search from to = let x = from + div (to - from) 2 a = x ^ root in case compare a x0 of EQ -> (x, True) LT | x /= from -> search x to | otherwise -> (from, False) GT | x /= to -> search from x | otherwise -> (from, False) {- | Compute the logarithm of a number in the given base, rounded down to the closest integer. The boolean indicates if we the result is exact (i.e., True means no rounding happened, False means we rounded down). The logarithm base is the second argument. -} genLog :: Integer -> Integer -> Maybe (Integer, Bool) genLog x 0 = if x == 1 then Just (0, True) else Nothing genLog _ 1 = Nothing genLog 0 _ = Nothing genLog x base = Just (exactLoop 0 x) where exactLoop s i | i == 1 = (s,True) | i < base = (s,False) | otherwise = let s1 = s + 1 in s1 `seq` case divMod i base of (j,r) | r == 0 -> exactLoop s1 j | otherwise -> (underLoop s1 j, False) underLoop s i | i < base = s | otherwise = let s1 = s + 1 in s1 `seq` underLoop s1 (div i base) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Builtin/Types/Prim.hs0000644000000000000000000016566507346545000021706 0ustar0000000000000000{- (c) The AQUA Project, Glasgow University, 1994-1998 Wired-in knowledge about primitive types -} {-# LANGUAGE CPP #-} {-# OPTIONS_GHC -Wno-incomplete-uni-patterns #-} -- | This module defines TyCons that can't be expressed in Haskell. -- They are all, therefore, wired-in TyCons. C.f module "GHC.Builtin.Types" module GHC.Builtin.Types.Prim( mkTemplateKindVar, mkTemplateKindVars, mkTemplateTyVars, mkTemplateTyVarsFrom, mkTemplateKiTyVars, mkTemplateKiTyVar, mkTemplateTyConBinders, mkTemplateKindTyConBinders, mkTemplateAnonTyConBinders, alphaTyVars, alphaTyVar, betaTyVar, gammaTyVar, deltaTyVar, alphaTyVarSpec, betaTyVarSpec, gammaTyVarSpec, deltaTyVarSpec, alphaTys, alphaTy, betaTy, gammaTy, deltaTy, alphaTyVarsUnliftedRep, alphaTyVarUnliftedRep, alphaTysUnliftedRep, alphaTyUnliftedRep, runtimeRep1TyVar, runtimeRep2TyVar, runtimeRep3TyVar, runtimeRep1TyVarInf, runtimeRep2TyVarInf, runtimeRep1Ty, runtimeRep2Ty, runtimeRep3Ty, levity1TyVar, levity2TyVar, levity1TyVarInf, levity2TyVarInf, levity1Ty, levity2Ty, alphaConstraintTyVar, alphaConstraintTy, openAlphaTyVar, openBetaTyVar, openGammaTyVar, openAlphaTyVarSpec, openBetaTyVarSpec, openGammaTyVarSpec, openAlphaTy, openBetaTy, openGammaTy, levPolyAlphaTyVar, levPolyBetaTyVar, levPolyAlphaTyVarSpec, levPolyBetaTyVarSpec, levPolyAlphaTy, levPolyBetaTy, multiplicityTyVar1, multiplicityTyVar2, -- Kind constructors... tYPETyCon, tYPETyConName, tYPEKind, cONSTRAINTTyCon, cONSTRAINTTyConName, cONSTRAINTKind, -- Arrows funTyFlagTyCon, isArrowTyCon, fUNTyCon, fUNTyConName, ctArrowTyCon, ctArrowTyConName, ccArrowTyCon, ccArrowTyConName, tcArrowTyCon, tcArrowTyConName, unexposedPrimTyCons, exposedPrimTyCons, primTyCons, charPrimTyCon, charPrimTy, charPrimTyConName, intPrimTyCon, intPrimTy, intPrimTyConName, wordPrimTyCon, wordPrimTy, wordPrimTyConName, addrPrimTyCon, addrPrimTy, addrPrimTyConName, floatPrimTyCon, floatPrimTy, floatPrimTyConName, doublePrimTyCon, doublePrimTy, doublePrimTyConName, statePrimTyCon, mkStatePrimTy, realWorldTyCon, realWorldTy, realWorldStatePrimTy, realWorldMutableByteArrayPrimTy, proxyPrimTyCon, mkProxyPrimTy, arrayPrimTyCon, mkArrayPrimTy, byteArrayPrimTyCon, byteArrayPrimTy, smallArrayPrimTyCon, mkSmallArrayPrimTy, mutableArrayPrimTyCon, mkMutableArrayPrimTy, mutableByteArrayPrimTyCon, mkMutableByteArrayPrimTy, smallMutableArrayPrimTyCon, mkSmallMutableArrayPrimTy, mutVarPrimTyCon, mkMutVarPrimTy, mVarPrimTyCon, mkMVarPrimTy, ioPortPrimTyCon, mkIOPortPrimTy, tVarPrimTyCon, mkTVarPrimTy, stablePtrPrimTyCon, mkStablePtrPrimTy, stableNamePrimTyCon, mkStableNamePrimTy, compactPrimTyCon, compactPrimTy, bcoPrimTyCon, bcoPrimTy, weakPrimTyCon, mkWeakPrimTy, threadIdPrimTyCon, threadIdPrimTy, stackSnapshotPrimTyCon, stackSnapshotPrimTy, promptTagPrimTyCon, mkPromptTagPrimTy, int8PrimTyCon, int8PrimTy, int8PrimTyConName, word8PrimTyCon, word8PrimTy, word8PrimTyConName, int16PrimTyCon, int16PrimTy, int16PrimTyConName, word16PrimTyCon, word16PrimTy, word16PrimTyConName, int32PrimTyCon, int32PrimTy, int32PrimTyConName, word32PrimTyCon, word32PrimTy, word32PrimTyConName, int64PrimTyCon, int64PrimTy, int64PrimTyConName, word64PrimTyCon, word64PrimTy, word64PrimTyConName, eqPrimTyCon, -- ty1 ~# ty2 eqReprPrimTyCon, -- ty1 ~R# ty2 (at role Representational) eqPhantPrimTyCon, -- ty1 ~P# ty2 (at role Phantom) equalityTyCon, -- * SIMD #include "primop-vector-tys-exports.hs-incl" ) where import GHC.Prelude import {-# SOURCE #-} GHC.Builtin.Types ( runtimeRepTy, levityTy, unboxedTupleKind, liftedTypeKind, unliftedTypeKind , boxedRepDataConTyCon, vecRepDataConTyCon , liftedRepTy, unliftedRepTy, zeroBitRepTy , intRepDataConTy , int8RepDataConTy, int16RepDataConTy, int32RepDataConTy, int64RepDataConTy , wordRepDataConTy , word16RepDataConTy, word8RepDataConTy, word32RepDataConTy, word64RepDataConTy , addrRepDataConTy , floatRepDataConTy, doubleRepDataConTy , vec2DataConTy, vec4DataConTy, vec8DataConTy, vec16DataConTy, vec32DataConTy , vec64DataConTy , int8ElemRepDataConTy, int16ElemRepDataConTy, int32ElemRepDataConTy , int64ElemRepDataConTy, word8ElemRepDataConTy, word16ElemRepDataConTy , word32ElemRepDataConTy, word64ElemRepDataConTy, floatElemRepDataConTy , doubleElemRepDataConTy , multiplicityTy , constraintKind ) import {-# SOURCE #-} GHC.Types.TyThing( mkATyCon ) import {-# SOURCE #-} GHC.Core.Type ( mkTyConApp, getLevity ) import GHC.Core.TyCon import GHC.Core.TyCo.Rep -- Doesn't need special access, but this is easier to avoid -- import loops which show up if you import Type instead import GHC.Types.Var ( TyVarBinder, TyVar,binderVar, binderVars , mkTyVar, mkTyVarBinder, mkTyVarBinders ) import GHC.Types.Name import GHC.Types.SrcLoc import GHC.Types.Unique import GHC.Builtin.Uniques import GHC.Builtin.Names import GHC.Utils.Misc ( changeLast ) import GHC.Utils.Panic ( assertPpr ) import GHC.Utils.Outputable import GHC.Data.FastString import Data.Char {- ********************************************************************* * * Building blocks * * ********************************************************************* -} mk_TYPE_app :: Type -> Type mk_TYPE_app rep = mkTyConApp tYPETyCon [rep] mk_CONSTRAINT_app :: Type -> Type mk_CONSTRAINT_app rep = mkTyConApp cONSTRAINTTyCon [rep] mkPrimTc :: FastString -> Unique -> TyCon -> Name mkPrimTc = mkGenPrimTc UserSyntax mkBuiltInPrimTc :: FastString -> Unique -> TyCon -> Name mkBuiltInPrimTc = mkGenPrimTc BuiltInSyntax mkGenPrimTc :: BuiltInSyntax -> FastString -> Unique -> TyCon -> Name mkGenPrimTc built_in_syntax occ key tycon = mkWiredInName gHC_PRIM (mkTcOccFS occ) key (mkATyCon tycon) built_in_syntax -- | Create a primitive 'TyCon' with the given 'Name', -- arguments of kind 'Type` with the given 'Role's, -- and the given result kind representation. -- -- Only use this in "GHC.Builtin.Types.Prim". pcPrimTyCon :: Name -> [Role] -> RuntimeRepType -> TyCon pcPrimTyCon name roles res_rep = mkPrimTyCon name binders result_kind roles where bndr_kis = liftedTypeKind <$ roles binders = mkTemplateAnonTyConBinders bndr_kis result_kind = mk_TYPE_app res_rep -- | Create a primitive nullary 'TyCon' with the given 'Name' -- and result kind representation. -- -- Only use this in "GHC.Builtin.Types.Prim". pcPrimTyCon0 :: Name -> RuntimeRepType -> TyCon pcPrimTyCon0 name res_rep = pcPrimTyCon name [] res_rep -- | Create a primitive 'TyCon' like 'pcPrimTyCon', except the last -- argument is levity-polymorphic, where the levity argument is -- implicit and comes before other arguments -- -- Only use this in "GHC.Builtin.Types.Prim". pcPrimTyCon_LevPolyLastArg :: Name -> [Role] -- ^ roles of the arguments (must be non-empty), -- not including the implicit argument of kind 'Levity', -- which always has 'Nominal' role -> RuntimeRepType -- ^ representation of the fully-applied type -> TyCon pcPrimTyCon_LevPolyLastArg name roles res_rep = mkPrimTyCon name binders result_kind (Nominal : roles) where result_kind = mk_TYPE_app res_rep lev_bndr = mkNamedTyConBinder Inferred levity1TyVar binders = lev_bndr : mkTemplateAnonTyConBinders anon_bndr_kis lev_tv = mkTyVarTy (binderVar lev_bndr) -- [ Type, ..., Type, TYPE (BoxedRep l) ] anon_bndr_kis = changeLast (liftedTypeKind <$ roles) $ mk_TYPE_app $ mkTyConApp boxedRepDataConTyCon [lev_tv] {- ********************************************************************* * * Primitive type constructors * * ********************************************************************* -} {- Note Note [Unexposed TyCons] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A few primitive TyCons are "unexposed", meaning: * We don't want users to be able to write them (see #15209); i.e. they aren't in scope, ever. In particular they do not appear in the exports of GHC.Prim: see GHC.Builtin.Utils.ghcPrimExports * We don't want users to see them in GHCi's @:browse@ output (see #12023). -} primTyCons :: [TyCon] primTyCons = unexposedPrimTyCons ++ exposedPrimTyCons -- | Primitive 'TyCon's that are defined in GHC.Prim but not "exposed". -- See Note [Unexposed TyCons] unexposedPrimTyCons :: [TyCon] unexposedPrimTyCons = [ eqPrimTyCon -- (~#) , eqReprPrimTyCon -- (~R#) , eqPhantPrimTyCon -- (~P#) -- These arrows are un-exposed for now , ctArrowTyCon -- (=>) , ccArrowTyCon -- (==>) , tcArrowTyCon -- (-=>) ] -- | Primitive 'TyCon's that are defined in, and exported from, GHC.Prim. exposedPrimTyCons :: [TyCon] exposedPrimTyCons = [ addrPrimTyCon , arrayPrimTyCon , byteArrayPrimTyCon , smallArrayPrimTyCon , charPrimTyCon , doublePrimTyCon , floatPrimTyCon , intPrimTyCon , int8PrimTyCon , int16PrimTyCon , int32PrimTyCon , int64PrimTyCon , bcoPrimTyCon , weakPrimTyCon , mutableArrayPrimTyCon , mutableByteArrayPrimTyCon , smallMutableArrayPrimTyCon , mVarPrimTyCon , ioPortPrimTyCon , tVarPrimTyCon , mutVarPrimTyCon , realWorldTyCon , stablePtrPrimTyCon , stableNamePrimTyCon , compactPrimTyCon , statePrimTyCon , proxyPrimTyCon , threadIdPrimTyCon , wordPrimTyCon , word8PrimTyCon , word16PrimTyCon , word32PrimTyCon , word64PrimTyCon , stackSnapshotPrimTyCon , promptTagPrimTyCon , fUNTyCon , tYPETyCon , cONSTRAINTTyCon #include "primop-vector-tycons.hs-incl" ] charPrimTyConName, intPrimTyConName, int8PrimTyConName, int16PrimTyConName, int32PrimTyConName, int64PrimTyConName, wordPrimTyConName, word32PrimTyConName, word8PrimTyConName, word16PrimTyConName, word64PrimTyConName, addrPrimTyConName, floatPrimTyConName, doublePrimTyConName, statePrimTyConName, proxyPrimTyConName, realWorldTyConName, arrayPrimTyConName, smallArrayPrimTyConName, byteArrayPrimTyConName, mutableArrayPrimTyConName, mutableByteArrayPrimTyConName, smallMutableArrayPrimTyConName, mutVarPrimTyConName, mVarPrimTyConName, ioPortPrimTyConName, tVarPrimTyConName, stablePtrPrimTyConName, stableNamePrimTyConName, compactPrimTyConName, bcoPrimTyConName, weakPrimTyConName, threadIdPrimTyConName, eqPrimTyConName, eqReprPrimTyConName, eqPhantPrimTyConName, stackSnapshotPrimTyConName, promptTagPrimTyConName :: Name charPrimTyConName = mkPrimTc (fsLit "Char#") charPrimTyConKey charPrimTyCon intPrimTyConName = mkPrimTc (fsLit "Int#") intPrimTyConKey intPrimTyCon int8PrimTyConName = mkPrimTc (fsLit "Int8#") int8PrimTyConKey int8PrimTyCon int16PrimTyConName = mkPrimTc (fsLit "Int16#") int16PrimTyConKey int16PrimTyCon int32PrimTyConName = mkPrimTc (fsLit "Int32#") int32PrimTyConKey int32PrimTyCon int64PrimTyConName = mkPrimTc (fsLit "Int64#") int64PrimTyConKey int64PrimTyCon wordPrimTyConName = mkPrimTc (fsLit "Word#") wordPrimTyConKey wordPrimTyCon word8PrimTyConName = mkPrimTc (fsLit "Word8#") word8PrimTyConKey word8PrimTyCon word16PrimTyConName = mkPrimTc (fsLit "Word16#") word16PrimTyConKey word16PrimTyCon word32PrimTyConName = mkPrimTc (fsLit "Word32#") word32PrimTyConKey word32PrimTyCon word64PrimTyConName = mkPrimTc (fsLit "Word64#") word64PrimTyConKey word64PrimTyCon addrPrimTyConName = mkPrimTc (fsLit "Addr#") addrPrimTyConKey addrPrimTyCon floatPrimTyConName = mkPrimTc (fsLit "Float#") floatPrimTyConKey floatPrimTyCon doublePrimTyConName = mkPrimTc (fsLit "Double#") doublePrimTyConKey doublePrimTyCon statePrimTyConName = mkPrimTc (fsLit "State#") statePrimTyConKey statePrimTyCon proxyPrimTyConName = mkPrimTc (fsLit "Proxy#") proxyPrimTyConKey proxyPrimTyCon eqPrimTyConName = mkPrimTc (fsLit "~#") eqPrimTyConKey eqPrimTyCon eqReprPrimTyConName = mkBuiltInPrimTc (fsLit "~R#") eqReprPrimTyConKey eqReprPrimTyCon eqPhantPrimTyConName = mkBuiltInPrimTc (fsLit "~P#") eqPhantPrimTyConKey eqPhantPrimTyCon realWorldTyConName = mkPrimTc (fsLit "RealWorld") realWorldTyConKey realWorldTyCon arrayPrimTyConName = mkPrimTc (fsLit "Array#") arrayPrimTyConKey arrayPrimTyCon byteArrayPrimTyConName = mkPrimTc (fsLit "ByteArray#") byteArrayPrimTyConKey byteArrayPrimTyCon smallArrayPrimTyConName = mkPrimTc (fsLit "SmallArray#") smallArrayPrimTyConKey smallArrayPrimTyCon mutableArrayPrimTyConName = mkPrimTc (fsLit "MutableArray#") mutableArrayPrimTyConKey mutableArrayPrimTyCon mutableByteArrayPrimTyConName = mkPrimTc (fsLit "MutableByteArray#") mutableByteArrayPrimTyConKey mutableByteArrayPrimTyCon smallMutableArrayPrimTyConName= mkPrimTc (fsLit "SmallMutableArray#") smallMutableArrayPrimTyConKey smallMutableArrayPrimTyCon mutVarPrimTyConName = mkPrimTc (fsLit "MutVar#") mutVarPrimTyConKey mutVarPrimTyCon ioPortPrimTyConName = mkPrimTc (fsLit "IOPort#") ioPortPrimTyConKey ioPortPrimTyCon mVarPrimTyConName = mkPrimTc (fsLit "MVar#") mVarPrimTyConKey mVarPrimTyCon tVarPrimTyConName = mkPrimTc (fsLit "TVar#") tVarPrimTyConKey tVarPrimTyCon stablePtrPrimTyConName = mkPrimTc (fsLit "StablePtr#") stablePtrPrimTyConKey stablePtrPrimTyCon stableNamePrimTyConName = mkPrimTc (fsLit "StableName#") stableNamePrimTyConKey stableNamePrimTyCon compactPrimTyConName = mkPrimTc (fsLit "Compact#") compactPrimTyConKey compactPrimTyCon stackSnapshotPrimTyConName = mkPrimTc (fsLit "StackSnapshot#") stackSnapshotPrimTyConKey stackSnapshotPrimTyCon bcoPrimTyConName = mkPrimTc (fsLit "BCO") bcoPrimTyConKey bcoPrimTyCon weakPrimTyConName = mkPrimTc (fsLit "Weak#") weakPrimTyConKey weakPrimTyCon threadIdPrimTyConName = mkPrimTc (fsLit "ThreadId#") threadIdPrimTyConKey threadIdPrimTyCon promptTagPrimTyConName = mkPrimTc (fsLit "PromptTag#") promptTagPrimTyConKey promptTagPrimTyCon {- ********************************************************************* * * Type variables * * ********************************************************************* -} {- alphaTyVars is a list of type variables for use in templates: ["a", "b", ..., "z", "t1", "t2", ... ] -} mkTemplateKindVar :: Kind -> TyVar mkTemplateKindVar = mkTyVar (mk_tv_name 0 "k") mkTemplateKindVars :: [Kind] -> [TyVar] -- k0 with unique (mkAlphaTyVarUnique 0) -- k1 with unique (mkAlphaTyVarUnique 1) -- ... etc mkTemplateKindVars [kind] = [mkTemplateKindVar kind] -- Special case for one kind: just "k" mkTemplateKindVars kinds = [ mkTyVar (mk_tv_name u ('k' : show u)) kind | (kind, u) <- kinds `zip` [0..] ] mk_tv_name :: Int -> String -> Name mk_tv_name u s = mkInternalName (mkAlphaTyVarUnique u) (mkTyVarOccFS (mkFastString s)) noSrcSpan mkTemplateTyVarsFrom :: Int -> [Kind] -> [TyVar] -- a with unique (mkAlphaTyVarUnique n) -- b with unique (mkAlphaTyVarUnique n+1) -- ... etc -- Typically called as -- mkTemplateTyVarsFrom (length kv_bndrs) kinds -- where kv_bndrs are the kind-level binders of a TyCon mkTemplateTyVarsFrom n kinds = [ mkTyVar name kind | (kind, index) <- zip kinds [0..], let ch_ord = index + ord 'a' name_str | ch_ord <= ord 'z' = [chr ch_ord] | otherwise = 't':show index name = mk_tv_name (index + n) name_str ] mkTemplateTyVars :: [Kind] -> [TyVar] mkTemplateTyVars = mkTemplateTyVarsFrom 1 mkTemplateTyConBinders :: [Kind] -- [k1, .., kn] Kinds of kind-forall'd vars -> ([Kind] -> [Kind]) -- Arg is [kv1:k1, ..., kvn:kn] -- same length as first arg -- Result is anon arg kinds -> [TyConBinder] mkTemplateTyConBinders kind_var_kinds mk_anon_arg_kinds = kv_bndrs ++ tv_bndrs where kv_bndrs = mkTemplateKindTyConBinders kind_var_kinds anon_kinds = mk_anon_arg_kinds (mkTyVarTys (binderVars kv_bndrs)) tv_bndrs = mkTemplateAnonTyConBindersFrom (length kv_bndrs) anon_kinds mkTemplateKiTyVars :: [Kind] -- [k1, .., kn] Kinds of kind-forall'd vars -> ([Kind] -> [Kind]) -- Arg is [kv1:k1, ..., kvn:kn] -- same length as first arg -- Result is anon arg kinds [ak1, .., akm] -> [TyVar] -- [kv1:k1, ..., kvn:kn, av1:ak1, ..., avm:akm] -- Example: if you want the tyvars for -- forall (r::RuntimeRep) (a::TYPE r) (b::Type). blah -- call mkTemplateKiTyVars [RuntimeRep] (\[r] -> [TYPE r, Type]) mkTemplateKiTyVars kind_var_kinds mk_arg_kinds = kv_bndrs ++ tv_bndrs where kv_bndrs = mkTemplateKindVars kind_var_kinds anon_kinds = mk_arg_kinds (mkTyVarTys kv_bndrs) tv_bndrs = mkTemplateTyVarsFrom (length kv_bndrs) anon_kinds mkTemplateKiTyVar :: Kind -- [k1, .., kn] Kind of kind-forall'd var -> (Kind -> [Kind]) -- Arg is kv1:k1 -- Result is anon arg kinds [ak1, .., akm] -> [TyVar] -- [kv1:k1, ..., kvn:kn, av1:ak1, ..., avm:akm] -- Example: if you want the tyvars for -- forall (r::RuntimeRep) (a::TYPE r) (b::Type). blah -- call mkTemplateKiTyVar RuntimeRep (\r -> [TYPE r, Type]) mkTemplateKiTyVar kind mk_arg_kinds = kv_bndr : tv_bndrs where kv_bndr = mkTemplateKindVar kind anon_kinds = mk_arg_kinds (mkTyVarTy kv_bndr) tv_bndrs = mkTemplateTyVarsFrom 1 anon_kinds mkTemplateKindTyConBinders :: [Kind] -> [TyConBinder] -- Makes named, Specified binders mkTemplateKindTyConBinders kinds = [mkNamedTyConBinder Specified tv | tv <- mkTemplateKindVars kinds] mkTemplateAnonTyConBinders :: [Kind] -> [TyConBinder] mkTemplateAnonTyConBinders kinds = mkAnonTyConBinders (mkTemplateTyVars kinds) mkTemplateAnonTyConBindersFrom :: Int -> [Kind] -> [TyConBinder] mkTemplateAnonTyConBindersFrom n kinds = mkAnonTyConBinders (mkTemplateTyVarsFrom n kinds) alphaTyVars :: [TyVar] alphaTyVars = mkTemplateTyVars $ repeat liftedTypeKind alphaTyVar, betaTyVar, gammaTyVar, deltaTyVar :: TyVar (alphaTyVar:betaTyVar:gammaTyVar:deltaTyVar:_) = alphaTyVars alphaTyVarSpec, betaTyVarSpec, gammaTyVarSpec, deltaTyVarSpec :: TyVarBinder (alphaTyVarSpec:betaTyVarSpec:gammaTyVarSpec:deltaTyVarSpec:_) = mkTyVarBinders Specified alphaTyVars alphaConstraintTyVars :: [TyVar] alphaConstraintTyVars = mkTemplateTyVars $ repeat constraintKind alphaConstraintTyVar :: TyVar (alphaConstraintTyVar:_) = alphaConstraintTyVars alphaConstraintTy :: Type alphaConstraintTy = mkTyVarTy alphaConstraintTyVar alphaTys :: [Type] alphaTys = mkTyVarTys alphaTyVars alphaTy, betaTy, gammaTy, deltaTy :: Type (alphaTy:betaTy:gammaTy:deltaTy:_) = alphaTys alphaTyVarsUnliftedRep :: [TyVar] alphaTyVarsUnliftedRep = mkTemplateTyVars $ repeat unliftedTypeKind alphaTyVarUnliftedRep :: TyVar (alphaTyVarUnliftedRep:_) = alphaTyVarsUnliftedRep alphaTysUnliftedRep :: [Type] alphaTysUnliftedRep = mkTyVarTys alphaTyVarsUnliftedRep alphaTyUnliftedRep :: Type (alphaTyUnliftedRep:_) = alphaTysUnliftedRep runtimeRep1TyVar, runtimeRep2TyVar, runtimeRep3TyVar :: TyVar (runtimeRep1TyVar : runtimeRep2TyVar : runtimeRep3TyVar : _) = drop 16 (mkTemplateTyVars (repeat runtimeRepTy)) -- selects 'q','r' runtimeRep1TyVarInf, runtimeRep2TyVarInf :: TyVarBinder runtimeRep1TyVarInf = mkTyVarBinder Inferred runtimeRep1TyVar runtimeRep2TyVarInf = mkTyVarBinder Inferred runtimeRep2TyVar runtimeRep1Ty, runtimeRep2Ty, runtimeRep3Ty :: RuntimeRepType runtimeRep1Ty = mkTyVarTy runtimeRep1TyVar runtimeRep2Ty = mkTyVarTy runtimeRep2TyVar runtimeRep3Ty = mkTyVarTy runtimeRep3TyVar openAlphaTyVar, openBetaTyVar, openGammaTyVar :: TyVar -- alpha :: TYPE r1 -- beta :: TYPE r2 -- gamma :: TYPE r3 [openAlphaTyVar,openBetaTyVar,openGammaTyVar] = mkTemplateTyVars [ mk_TYPE_app runtimeRep1Ty , mk_TYPE_app runtimeRep2Ty , mk_TYPE_app runtimeRep3Ty] openAlphaTyVarSpec, openBetaTyVarSpec, openGammaTyVarSpec :: TyVarBinder openAlphaTyVarSpec = mkTyVarBinder Specified openAlphaTyVar openBetaTyVarSpec = mkTyVarBinder Specified openBetaTyVar openGammaTyVarSpec = mkTyVarBinder Specified openGammaTyVar openAlphaTy, openBetaTy, openGammaTy :: Type openAlphaTy = mkTyVarTy openAlphaTyVar openBetaTy = mkTyVarTy openBetaTyVar openGammaTy = mkTyVarTy openGammaTyVar levity1TyVar, levity2TyVar :: TyVar (levity2TyVar : levity1TyVar : _) -- NB: levity2TyVar before levity1TyVar = drop 10 (mkTemplateTyVars (repeat levityTy)) -- selects 'k', 'l' -- The ordering of levity2TyVar before levity1TyVar is chosen so that -- the more common levity1TyVar uses the levity variable 'l'. levity1TyVarInf, levity2TyVarInf :: TyVarBinder levity1TyVarInf = mkTyVarBinder Inferred levity1TyVar levity2TyVarInf = mkTyVarBinder Inferred levity2TyVar levity1Ty, levity2Ty :: Type levity1Ty = mkTyVarTy levity1TyVar levity2Ty = mkTyVarTy levity2TyVar levPolyAlphaTyVar, levPolyBetaTyVar :: TyVar [levPolyAlphaTyVar, levPolyBetaTyVar] = mkTemplateTyVars [ mk_TYPE_app (mkTyConApp boxedRepDataConTyCon [levity1Ty]) , mk_TYPE_app (mkTyConApp boxedRepDataConTyCon [levity2Ty])] -- alpha :: TYPE ('BoxedRep l) -- beta :: TYPE ('BoxedRep k) levPolyAlphaTyVarSpec, levPolyBetaTyVarSpec :: TyVarBinder levPolyAlphaTyVarSpec = mkTyVarBinder Specified levPolyAlphaTyVar levPolyBetaTyVarSpec = mkTyVarBinder Specified levPolyBetaTyVar levPolyAlphaTy, levPolyBetaTy :: Type levPolyAlphaTy = mkTyVarTy levPolyAlphaTyVar levPolyBetaTy = mkTyVarTy levPolyBetaTyVar multiplicityTyVar1, multiplicityTyVar2 :: TyVar (multiplicityTyVar1 : multiplicityTyVar2 : _) = drop 13 (mkTemplateTyVars (repeat multiplicityTy)) -- selects 'n', 'm' {- ************************************************************************ * * FunTyCon * * ************************************************************************ -} {- Note [Function type constructors and FunTy] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We have four distinct function type constructors, and a type synonym FUN :: forall (m :: Multiplicity) -> forall {rep1 :: RuntimeRep} {rep2 :: RuntimeRep}. TYPE rep1 -> TYPE rep2 -> Type (=>) :: forall {rep1 :: RuntimeRep} {rep2 :: RuntimeRep}. CONSTRAINT rep1 -> TYPE rep2 -> Type (==>) :: forall {rep1 :: RuntimeRep} {rep2 :: RuntimeRep}. CONSTRAINT rep1 -> CONSTRAINT rep2 -> Constraint (-=>) :: forall {rep1 :: RuntimeRep} {rep2 :: RuntimeRep}. TYPE rep1 -> CONSTRAINT rep2 -> Constraint type (->) = FUN Many For efficiency, all four are always represented by FunTy { ft_af :: FunTyFlag, ft_mult :: Mult , ft_arg :: Type, ft_res :: Type } rather than by using a TyConApp. * The four TyCons FUN, (=>), (==>), (-=>) are all wired in. But (->) is just a regular synonym, with no special treatment; in particular it is not wired-in. * The ft_af :: FunTyFlag distinguishes the four cases. See Note [FunTyFlag] in GHC.Types.Var. * The ft_af field is redundant: it can always be gleaned from the kinds of ft_arg and ft_res. See Note [FunTyFlag] in GHC.Types.Var. * The ft_mult :: Mult field gives the first argument for FUN For the other three cases ft_mult is redundant; it is always Many. Note that of the four type constructors, only `FUN` takes a Multiplicity. * Functions in GHC.Core.Type help to build and decompose `FunTy`. * funTyConAppTy_maybe * funTyFlagTyCon * tyConAppFun_maybe * splitFunTy_maybe Use them! -} funTyFlagTyCon :: FunTyFlag -> TyCon -- `anonArgTyCon af` gets the TyCon that corresponds to the `FunTyFlag` -- But be careful: fUNTyCon has a different kind to the others! -- See Note [Function type constructors and FunTy] funTyFlagTyCon FTF_T_T = fUNTyCon funTyFlagTyCon FTF_T_C = tcArrowTyCon funTyFlagTyCon FTF_C_T = ctArrowTyCon funTyFlagTyCon FTF_C_C = ccArrowTyCon isArrowTyCon :: TyCon -> Bool -- We don't bother to look for plain (->), because this function -- should only be used after unwrapping synonyms isArrowTyCon tc = assertPpr (not (isTypeSynonymTyCon tc)) (ppr tc) getUnique tc `elem` [fUNTyConKey, ctArrowTyConKey, ccArrowTyConKey, tcArrowTyConKey] fUNTyConName, ctArrowTyConName, ccArrowTyConName, tcArrowTyConName :: Name fUNTyConName = mkPrimTc (fsLit "FUN") fUNTyConKey fUNTyCon ctArrowTyConName = mkBuiltInPrimTc (fsLit "=>") ctArrowTyConKey ctArrowTyCon ccArrowTyConName = mkBuiltInPrimTc (fsLit "==>") ccArrowTyConKey ccArrowTyCon tcArrowTyConName = mkBuiltInPrimTc (fsLit "-=>") tcArrowTyConKey tcArrowTyCon -- | The @FUN@ type constructor. -- -- @ -- FUN :: forall (m :: Multiplicity) -> -- forall {rep1 :: RuntimeRep} {rep2 :: RuntimeRep}. -- TYPE rep1 -> TYPE rep2 -> Type -- @ -- -- The runtime representations quantification is left inferred. This -- means they cannot be specified with @-XTypeApplications@. -- -- This is a deliberate choice to allow future extensions to the -- function arrow. fUNTyCon :: TyCon fUNTyCon = mkPrimTyCon fUNTyConName tc_bndrs liftedTypeKind tc_roles where -- See also unrestrictedFunTyCon tc_bndrs = [ mkNamedTyConBinder Required multiplicityTyVar1 , mkNamedTyConBinder Inferred runtimeRep1TyVar , mkNamedTyConBinder Inferred runtimeRep2TyVar ] ++ mkTemplateAnonTyConBinders [ mk_TYPE_app runtimeRep1Ty , mk_TYPE_app runtimeRep2Ty ] tc_roles = [Nominal, Nominal, Nominal, Representational, Representational] -- (=>) :: forall {rep1 :: RuntimeRep} {rep2 :: RuntimeRep}. -- CONSTRAINT rep1 -> TYPE rep2 -> Type ctArrowTyCon :: TyCon ctArrowTyCon = mkPrimTyCon ctArrowTyConName tc_bndrs liftedTypeKind tc_roles where -- See also unrestrictedFunTyCon tc_bndrs = [ mkNamedTyConBinder Inferred runtimeRep1TyVar , mkNamedTyConBinder Inferred runtimeRep2TyVar ] ++ mkTemplateAnonTyConBinders [ mk_CONSTRAINT_app runtimeRep1Ty , mk_TYPE_app runtimeRep2Ty ] tc_roles = [Nominal, Nominal, Representational, Representational] -- (==>) :: forall {rep1 :: RuntimeRep} {rep2 :: RuntimeRep}. -- CONSTRAINT rep1 -> CONSTRAINT rep2 -> Constraint ccArrowTyCon :: TyCon ccArrowTyCon = mkPrimTyCon ccArrowTyConName tc_bndrs constraintKind tc_roles where -- See also unrestrictedFunTyCon tc_bndrs = [ mkNamedTyConBinder Inferred runtimeRep1TyVar , mkNamedTyConBinder Inferred runtimeRep2TyVar ] ++ mkTemplateAnonTyConBinders [ mk_CONSTRAINT_app runtimeRep1Ty , mk_CONSTRAINT_app runtimeRep2Ty ] tc_roles = [Nominal, Nominal, Representational, Representational] -- (-=>) :: forall {rep1 :: RuntimeRep} {rep2 :: RuntimeRep}. -- TYPE rep1 -> CONSTRAINT rep2 -> Constraint tcArrowTyCon :: TyCon tcArrowTyCon = mkPrimTyCon tcArrowTyConName tc_bndrs constraintKind tc_roles where -- See also unrestrictedFunTyCon tc_bndrs = [ mkNamedTyConBinder Inferred runtimeRep1TyVar , mkNamedTyConBinder Inferred runtimeRep2TyVar ] ++ mkTemplateAnonTyConBinders [ mk_TYPE_app runtimeRep1Ty , mk_CONSTRAINT_app runtimeRep2Ty ] tc_roles = [Nominal, Nominal, Representational, Representational] {- ************************************************************************ * * Type and Constraint * * ************************************************************************ Note [TYPE and CONSTRAINT] aka Note [Type vs Constraint] ~~~~~~~~~~~~~~~~~~~~~~~~~~ GHC distinguishes Type from Constraint throughout the compiler. See GHC Proposal #518, and tickets #21623 and #11715. All types that classify values have a kind of the form (TYPE rr) or (CONSTRAINT rr) where the `RuntimeRep` parameter, rr, tells us how the value is represented at runtime. TYPE and CONSTRAINT are primitive type constructors. See Note [RuntimeRep polymorphism] about the `rr` parameter. There are a bunch of type synonyms and data types defined in the library ghc-prim:GHC.Types. All of them are also wired in to GHC, in GHC.Builtin.Types type Constraint = CONSTRAINT LiftedRep :: Type type Type = TYPE LiftedRep :: Type type UnliftedType = TYPE UnliftedRep :: Type type LiftedRep = BoxedRep Lifted :: RuntimeRep type UnliftedRep = BoxedRep Unlifted :: RuntimeRep data RuntimeRep -- Defined in ghc-prim:GHC.Types = BoxedRep Levity | IntRep | FloatRep .. etc .. data Levity = Lifted | Unlifted We abbreviate '*' specially (with -XStarIsType), as if we had this: type * = Type So for example: Int :: TYPE (BoxedRep Lifted) Array# Int :: TYPE (BoxedRep Unlifted) Int# :: TYPE IntRep Float# :: TYPE FloatRep Maybe :: TYPE (BoxedRep Lifted) -> TYPE (BoxedRep Lifted) (# , #) :: TYPE r1 -> TYPE r2 -> TYPE (TupleRep [r1, r2]) Eq Int :: CONSTRAINT (BoxedRep Lifted) IP "foo" Int :: CONSTRAINT (BoxedRep Lifted) a ~ b :: CONSTRAINT (BoxedRep Lifted) a ~# b :: CONSTRAINT (TupleRep []) Constraints are mostly lifted, but unlifted ones are useful too. Specifically (a ~# b) :: CONSTRAINT (TupleRep []) Wrinkles (W1) Type and Constraint are considered distinct throughout GHC. But they are not /apart/: see Note [Type and Constraint are not apart] (W2) We need two absent-error Ids, aBSENT_ERROR_ID for types of kind Type, and aBSENT_CONSTRAINT_ERROR_ID for types of kind Constraint. See Note [Type vs Constraint for error ids] in GHC.Core.Make. Ditto noInlineId vs noInlineConstraintId in GHC.Types.Id.Make; see Note [inlineId magic]. (W3) We need a TypeOrConstraint flag in LitRubbish. (W4) In the CPR transformation, we can't unbox constructors with constraint arguments because unboxed tuples (# …, … #) currently only supports fields of type TYPE rr. See (CPR2) in Note [Which types are unboxed?] in GHC.Core.Opt.WorkWrap.Utils. Note [Type and Constraint are not apart] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Type and Constraint are not equal (eqType) but they are not /apart/ either. Reason (c.f. #7451): * We want to allow newtype classes, where class C a where { op :: a -> a } * The axiom for such a class will look like axiom axC a :: (C a :: Constraint) ~# (a->a :: Type) * This axiom connects a type of kind Type with one of kind Constraint That is dangerous: kindCo (axC Int) :: Type ~N Constraint And /that/ is bad because we could have type family F a where F Type = Int F Constraint = Bool So now we can prove Int ~N Bool, and all is lost. We prevent this by saying that Type and Constraint are not Apart, which makes the above type family instances illegal. So we ensure that Type and Constraint are not apart; or, more precisely, that TYPE and CONSTRAINT are not apart. This non-apart-ness check is implemented in GHC.Core.Unify.unify_ty: look for `maybeApart MARTypeVsConstraint`. Note that, as before, nothing prevents writing instances like: instance C (Proxy @Type a) where ... In particular, TYPE and CONSTRAINT (and the synonyms Type, Constraint etc) are all allowed in instance heads. It's just that TYPE is not apart from CONSTRAINT, which means that the above instance would irretrievably overlap with: instance C (Proxy @Constraint a) where ... Wrinkles (W1) In GHC.Core.RoughMap.roughMatchTyConName we are careful to map TYPE and CONSTRAINT to the same rough-map key. Reason: If we insert (F @Constraint tys) into a FamInstEnv, and look up (F @Type tys'), we /must/ ensure that the (C @Constraint tys) appears among the unifiables when we do the lookupRM' in GHC.Core.FamInstEnv.lookup_fam_inst_env'. So for the RoughMap we simply pretend that they are the same type constructor. If we don't, we'll treat them as fully apart, which is unsound. (W2) We must extend this treatment to the different arrow types (see Note [Function type constructors and FunTy]): if we have FunCo (axC Int) :: (C Int => Int) ~ ((Int -> Int) -> Int), then we could extract an equality between (=>) and (->). We thus must ensure that (=>) and (->) (among the other arrow combinations) are not Apart. See the FunTy/FunTy case in GHC.Core.Unify.unify_ty. (W3) Are (TYPE IntRep) and (CONSTRAINT WordRep) apart? In truth yes, they are. But it's easier to say that they are not apart, by reporting "maybeApart" (which is always safe), rather than recurse into the arguments (whose kinds may be utterly different) to look for apartness inside them. Again this is in GHC.Core.Unify.unify_ty. (W4) We give a different Typeable instance for Type than for Constraint. For type classes instances (unlike type family instances) it is not /unsound/ for Type and Constraint to treated as fully distinct; and for Typeable is desirable to give them different TypeReps. Certainly, - both Type and Constraint must /have/ a TypeRep, and - they had better not be the same (else eqTypeRep would give us a proof Type ~N Constraint, which we do not want So in GHC.Tc.Instance.Class.matchTypeable, Type and Constraint are treated as separate TyCons; i.e. given no special treatment. Note [RuntimeRep polymorphism] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Generally speaking, you can't be polymorphic in `RuntimeRep`. E.g f :: forall (rr::RuntimeRep) (a::TYPE rr). a -> [a] f = /\(rr::RuntimeRep) (a::rr) \(a::rr). ... This is no good: we could not generate code for 'f', because the calling convention for 'f' varies depending on whether the argument is a a Int, Int#, or Float#. (You could imagine generating specialised code, one for each instantiation of 'rr', but we don't do that.) Certain functions CAN be runtime-rep-polymorphic, because the code generator never has to manipulate a value of type 'a :: TYPE rr'. * error :: forall (rr::RuntimeRep) (a::TYPE rr). String -> a Code generator never has to manipulate the return value. * unsafeCoerce#, defined in Desugar.mkUnsafeCoercePair: Always inlined to be a no-op unsafeCoerce# :: forall (r1 :: RuntimeRep) (r2 :: RuntimeRep) (a :: TYPE r1) (b :: TYPE r2). a -> b * Unboxed tuples, and unboxed sums, defined in GHC.Builtin.Types Always inlined, and hence specialised to the call site (#,#) :: forall (r1 :: RuntimeRep) (r2 :: RuntimeRep) (a :: TYPE r1) (b :: TYPE r2). a -> b -> TYPE ('TupleRep '[r1, r2]) -} ---------------------- tYPETyCon :: TyCon tYPETyCon = mkPrimTyCon tYPETyConName (mkTemplateAnonTyConBinders [runtimeRepTy]) liftedTypeKind [Nominal] tYPETyConName :: Name tYPETyConName = mkPrimTc (fsLit "TYPE") tYPETyConKey tYPETyCon tYPEKind :: Type tYPEKind = mkTyConTy tYPETyCon ---------------------- cONSTRAINTTyCon :: TyCon cONSTRAINTTyCon = mkPrimTyCon cONSTRAINTTyConName (mkTemplateAnonTyConBinders [runtimeRepTy]) liftedTypeKind [Nominal] cONSTRAINTTyConName :: Name cONSTRAINTTyConName = mkPrimTc (fsLit "CONSTRAINT") cONSTRAINTTyConKey cONSTRAINTTyCon cONSTRAINTKind :: Type cONSTRAINTKind = mkTyConTy cONSTRAINTTyCon {- ********************************************************************* * * Basic primitive types (Char#, Int#, etc.) * * ********************************************************************* -} charPrimTy :: Type charPrimTy = mkTyConTy charPrimTyCon charPrimTyCon :: TyCon charPrimTyCon = pcPrimTyCon0 charPrimTyConName wordRepDataConTy intPrimTy :: Type intPrimTy = mkTyConTy intPrimTyCon intPrimTyCon :: TyCon intPrimTyCon = pcPrimTyCon0 intPrimTyConName intRepDataConTy int8PrimTy :: Type int8PrimTy = mkTyConTy int8PrimTyCon int8PrimTyCon :: TyCon int8PrimTyCon = pcPrimTyCon0 int8PrimTyConName int8RepDataConTy int16PrimTy :: Type int16PrimTy = mkTyConTy int16PrimTyCon int16PrimTyCon :: TyCon int16PrimTyCon = pcPrimTyCon0 int16PrimTyConName int16RepDataConTy int32PrimTy :: Type int32PrimTy = mkTyConTy int32PrimTyCon int32PrimTyCon :: TyCon int32PrimTyCon = pcPrimTyCon0 int32PrimTyConName int32RepDataConTy int64PrimTy :: Type int64PrimTy = mkTyConTy int64PrimTyCon int64PrimTyCon :: TyCon int64PrimTyCon = pcPrimTyCon0 int64PrimTyConName int64RepDataConTy wordPrimTy :: Type wordPrimTy = mkTyConTy wordPrimTyCon wordPrimTyCon :: TyCon wordPrimTyCon = pcPrimTyCon0 wordPrimTyConName wordRepDataConTy word8PrimTy :: Type word8PrimTy = mkTyConTy word8PrimTyCon word8PrimTyCon :: TyCon word8PrimTyCon = pcPrimTyCon0 word8PrimTyConName word8RepDataConTy word16PrimTy :: Type word16PrimTy = mkTyConTy word16PrimTyCon word16PrimTyCon :: TyCon word16PrimTyCon = pcPrimTyCon0 word16PrimTyConName word16RepDataConTy word32PrimTy :: Type word32PrimTy = mkTyConTy word32PrimTyCon word32PrimTyCon :: TyCon word32PrimTyCon = pcPrimTyCon0 word32PrimTyConName word32RepDataConTy word64PrimTy :: Type word64PrimTy = mkTyConTy word64PrimTyCon word64PrimTyCon :: TyCon word64PrimTyCon = pcPrimTyCon0 word64PrimTyConName word64RepDataConTy addrPrimTy :: Type addrPrimTy = mkTyConTy addrPrimTyCon addrPrimTyCon :: TyCon addrPrimTyCon = pcPrimTyCon0 addrPrimTyConName addrRepDataConTy floatPrimTy :: Type floatPrimTy = mkTyConTy floatPrimTyCon floatPrimTyCon :: TyCon floatPrimTyCon = pcPrimTyCon0 floatPrimTyConName floatRepDataConTy doublePrimTy :: Type doublePrimTy = mkTyConTy doublePrimTyCon doublePrimTyCon :: TyCon doublePrimTyCon = pcPrimTyCon0 doublePrimTyConName doubleRepDataConTy {- ************************************************************************ * * The @State#@ type (and @_RealWorld@ types) * * ************************************************************************ Note [The equality types story] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ GHC sports a veritable menagerie of equality types: Type or Lifted? Hetero? Role Built in Defining module class? L/U TyCon ----------------------------------------------------------------------------------------- ~# T U hetero nominal eqPrimTyCon GHC.Prim ~~ C L hetero nominal heqTyCon GHC.Types ~ C L homo nominal eqTyCon GHC.Types :~: T L homo nominal (not built-in) Data.Type.Equality :~~: T L hetero nominal (not built-in) Data.Type.Equality ~R# T U hetero repr eqReprPrimTy GHC.Prim Coercible C L homo repr coercibleTyCon GHC.Types Coercion T L homo repr (not built-in) Data.Type.Coercion ~P# T U hetero phantom eqPhantPrimTyCon GHC.Prim Recall that "hetero" means the equality can related types of different kinds. Knowing that (t1 ~# t2) or (t1 ~R# t2) or even that (t1 ~P# t2) also means that (k1 ~# k2), where (t1 :: k1) and (t2 :: k2). To produce less confusion for end users, when not dumping and without -fprint-equality-relations, each of these groups is printed as the bottommost listed equality. That is, (~#) and (~~) are both rendered as (~) in error messages, and (~R#) is rendered as Coercible. Let's take these one at a time: -------------------------- (~#) :: forall k1 k2. k1 -> k2 -> TYPE (TupleRep '[]) -------------------------- This is The Type Of Equality in GHC. It classifies nominal coercions. This type is used in the solver for recording equality constraints. It responds "yes" to Type.isEqPrimPred and classifies as an EqPred in Type.classifyPredType. All wanted constraints of this type are built with coercion holes. (See Note [Coercion holes] in GHC.Core.TyCo.Rep.) But see also Note [Deferred errors for coercion holes] in GHC.Tc.Errors to see how equality constraints are deferred. Within GHC, ~# is called eqPrimTyCon, and it is defined in GHC.Builtin.Types.Prim. -------------------------- (~~) :: forall k1 k2. k1 -> k2 -> Constraint -------------------------- This is (almost) an ordinary class, defined as if by class a ~# b => a ~~ b instance a ~# b => a ~~ b Here's what's unusual about it: * We can't actually declare it that way because we don't have syntax for ~#. And ~# isn't a constraint, so even if we could write it, it wouldn't kind check. * Users cannot write instances of it. * It is "naturally coherent". This means that the solver won't hesitate to solve a goal of type (a ~~ b) even if there is, say (Int ~~ c) in the context. (Normally, it waits to learn more, just in case the given influences what happens next.) See Note [Solving equality classes] in GHC.Tc.Solver.Dict * It always terminates. That is, in the UndecidableInstances checks, we don't worry if a (~~) constraint is too big, as we know that solving equality terminates. On the other hand, this behaves just like any class w.r.t. eager superclass unpacking in the solver. So a lifted equality given quickly becomes an unlifted equality given. This is good, because the solver knows all about unlifted equalities. There is some special-casing in GHC.Tc.Solver.Dict.matchClassInst to pretend that there is an instance of this class, as we can't write the instance in Haskell. Within GHC, ~~ is called heqTyCon, and it is defined in GHC.Builtin.Types. -------------------------- (~) :: forall k. k -> k -> Constraint -------------------------- This is /exactly/ like (~~), except with a homogeneous kind. It is an almost-ordinary class defined as if by class a ~# b => (a :: k) ~ (b :: k) instance a ~# b => a ~ b * All the bullets for (~~) apply * In addition (~) is magical syntax, as ~ is a reserved symbol. It cannot be exported or imported. * The data constructor of the class is "Eq#", not ":C~" Within GHC, ~ is called eqTyCon, and it is defined in GHC.Builtin.Types. Historical note: prior to July 18 (~) was defined as a more-ordinary class with (~~) as a superclass. But that made it special in different ways; and the extra superclass selections to get from (~) to (~#) via (~~) were tiresome. Now it's defined uniformly with (~~) and Coercible; much nicer.) -------------------------- (:~:) :: forall k. k -> k -> * (:~~:) :: forall k1 k2. k1 -> k2 -> * -------------------------- These are perfectly ordinary GADTs, wrapping (~) and (~~) resp. They are not defined within GHC at all. -------------------------- (~R#) :: forall k1 k2. k1 -> k2 -> TYPE (TupleRep '[]) -------------------------- The is the representational analogue of ~#. This is the type of representational equalities that the solver works on. All wanted constraints of this type are built with coercion holes. Within GHC, ~R# is called eqReprPrimTyCon, and it is defined in GHC.Builtin.Types.Prim. -------------------------- Coercible :: forall k. k -> k -> Constraint -------------------------- This is quite like (~~) in the way it's defined and treated within GHC, but it's homogeneous. Homogeneity helps with type inference (as GHC can solve one kind from the other) and, in my (Richard's) estimation, will be more intuitive for users. An alternative design included HCoercible (like (~~)) and Coercible (like (~)). One annoyance was that we want `coerce :: Coercible a b => a -> b`, and we need the type of coerce to be fully wired-in. So the HCoercible/Coercible split required that both types be fully wired-in. Instead of doing this, I just got rid of HCoercible, as I'm not sure who would use it, anyway. Within GHC, Coercible is called coercibleTyCon, and it is defined in GHC.Builtin.Types. -------------------------- Coercion :: forall k. k -> k -> * -------------------------- This is a perfectly ordinary GADT, wrapping Coercible. It is not defined within GHC at all. -------------------------- (~P#) :: forall k1 k2. k1 -> k2 -> TYPE (TupleRep '[]) -------------------------- This is the phantom analogue of ~# and it is barely used at all. (The solver has no idea about this one.) Here is the motivation: data Phant a = MkPhant type role Phant phantom Phant _P :: Phant Int ~P# Phant Bool We just need to have something to put on that last line. You probably don't need to worry about it. Note [The State# TyCon] ~~~~~~~~~~~~~~~~~~~~~~~ State# is the primitive, unlifted type of states. It has one type parameter, thus State# RealWorld or State# s where s is a type variable. The only purpose of the type parameter is to keep different state threads separate. It is represented by nothing at all. The type parameter to State# is intended to keep separate threads separate. Even though this parameter is not used in the definition of State#, it is given role Nominal to enforce its intended use. -} mkStatePrimTy :: Type -> Type mkStatePrimTy ty = TyConApp statePrimTyCon [ty] statePrimTyCon :: TyCon -- See Note [The State# TyCon] statePrimTyCon = pcPrimTyCon statePrimTyConName [Nominal] zeroBitRepTy {- RealWorld is deeply magical. It is *primitive*, but it is not *unlifted* (hence ptrArg). We never manipulate values of type RealWorld; it's only used in the type system, to parameterise State#. -} realWorldTyCon :: TyCon realWorldTyCon = mkPrimTyCon realWorldTyConName [] liftedTypeKind [] realWorldTy :: Type realWorldTy = mkTyConTy realWorldTyCon realWorldStatePrimTy :: Type realWorldStatePrimTy = mkStatePrimTy realWorldTy -- State# RealWorld realWorldMutableByteArrayPrimTy :: Type realWorldMutableByteArrayPrimTy = mkMutableByteArrayPrimTy realWorldTy -- MutableByteArray# RealWorld mkProxyPrimTy :: Type -> Type -> Type mkProxyPrimTy k ty = TyConApp proxyPrimTyCon [k, ty] proxyPrimTyCon :: TyCon proxyPrimTyCon = mkPrimTyCon proxyPrimTyConName binders res_kind [Nominal,Phantom] where -- Kind: forall k. k -> TYPE (TupleRep '[]) binders = mkTemplateTyConBinders [liftedTypeKind] id res_kind = unboxedTupleKind [] {- ********************************************************************* * * Primitive equality constraints See Note [The equality types story] * * ********************************************************************* -} eqPrimTyCon :: TyCon -- The representation type for equality predicates -- See Note [The equality types story] eqPrimTyCon = mkPrimTyCon eqPrimTyConName binders res_kind roles where -- Kind :: forall k1 k2. k1 -> k2 -> CONSTRAINT ZeroBitRep binders = mkTemplateTyConBinders [liftedTypeKind, liftedTypeKind] id res_kind = TyConApp cONSTRAINTTyCon [zeroBitRepTy] roles = [Nominal, Nominal, Nominal, Nominal] -- like eqPrimTyCon, but the type for *Representational* coercions -- this should only ever appear as the type of a covar. Its role is -- interpreted in coercionRole eqReprPrimTyCon :: TyCon -- See Note [The equality types story] eqReprPrimTyCon = mkPrimTyCon eqReprPrimTyConName binders res_kind roles where -- Kind :: forall k1 k2. k1 -> k2 -> CONSTRAINT ZeroBitRep binders = mkTemplateTyConBinders [liftedTypeKind, liftedTypeKind] id res_kind = TyConApp cONSTRAINTTyCon [zeroBitRepTy] roles = [Nominal, Nominal, Representational, Representational] -- like eqPrimTyCon, but the type for *Phantom* coercions. -- This is only used to make higher-order equalities. Nothing -- should ever actually have this type! eqPhantPrimTyCon :: TyCon eqPhantPrimTyCon = mkPrimTyCon eqPhantPrimTyConName binders res_kind roles where -- Kind :: forall k1 k2. k1 -> k2 -> CONSTRAINT ZeroBitRep binders = mkTemplateTyConBinders [liftedTypeKind, liftedTypeKind] id res_kind = TyConApp cONSTRAINTTyCon [zeroBitRepTy] roles = [Nominal, Nominal, Phantom, Phantom] -- | Given a Role, what TyCon is the type of equality predicates at that role? equalityTyCon :: Role -> TyCon equalityTyCon Nominal = eqPrimTyCon equalityTyCon Representational = eqReprPrimTyCon equalityTyCon Phantom = eqPhantPrimTyCon {- ********************************************************************* * * The primitive array types * * ********************************************************************* -} arrayPrimTyCon, mutableArrayPrimTyCon, mutableByteArrayPrimTyCon, byteArrayPrimTyCon, smallArrayPrimTyCon, smallMutableArrayPrimTyCon :: TyCon arrayPrimTyCon = pcPrimTyCon_LevPolyLastArg arrayPrimTyConName [Representational] unliftedRepTy mutableArrayPrimTyCon = pcPrimTyCon_LevPolyLastArg mutableArrayPrimTyConName [Nominal, Representational] unliftedRepTy mutableByteArrayPrimTyCon = pcPrimTyCon mutableByteArrayPrimTyConName [Nominal] unliftedRepTy byteArrayPrimTyCon = pcPrimTyCon0 byteArrayPrimTyConName unliftedRepTy smallArrayPrimTyCon = pcPrimTyCon_LevPolyLastArg smallArrayPrimTyConName [Representational] unliftedRepTy smallMutableArrayPrimTyCon = pcPrimTyCon_LevPolyLastArg smallMutableArrayPrimTyConName [Nominal, Representational] unliftedRepTy mkArrayPrimTy :: Type -> Type mkArrayPrimTy elt = TyConApp arrayPrimTyCon [getLevity elt, elt] byteArrayPrimTy :: Type byteArrayPrimTy = mkTyConTy byteArrayPrimTyCon mkSmallArrayPrimTy :: Type -> Type mkSmallArrayPrimTy elt = TyConApp smallArrayPrimTyCon [getLevity elt, elt] mkMutableArrayPrimTy :: Type -> Type -> Type mkMutableArrayPrimTy s elt = TyConApp mutableArrayPrimTyCon [getLevity elt, s, elt] mkMutableByteArrayPrimTy :: Type -> Type mkMutableByteArrayPrimTy s = TyConApp mutableByteArrayPrimTyCon [s] mkSmallMutableArrayPrimTy :: Type -> Type -> Type mkSmallMutableArrayPrimTy s elt = TyConApp smallMutableArrayPrimTyCon [getLevity elt, s, elt] {- ********************************************************************* * * The mutable variable type * * ********************************************************************* -} mutVarPrimTyCon :: TyCon mutVarPrimTyCon = pcPrimTyCon_LevPolyLastArg mutVarPrimTyConName [Nominal, Representational] unliftedRepTy mkMutVarPrimTy :: Type -> Type -> Type mkMutVarPrimTy s elt = TyConApp mutVarPrimTyCon [getLevity elt, s, elt] {- ************************************************************************ * * \subsection[TysPrim-io-port-var]{The synchronizing I/O Port type} * * ************************************************************************ -} ioPortPrimTyCon :: TyCon ioPortPrimTyCon = pcPrimTyCon_LevPolyLastArg ioPortPrimTyConName [Nominal, Representational] unliftedRepTy mkIOPortPrimTy :: Type -> Type -> Type mkIOPortPrimTy s elt = TyConApp ioPortPrimTyCon [getLevity elt, s, elt] {- ************************************************************************ * * The synchronizing variable type \subsection[TysPrim-synch-var]{The synchronizing variable type} * * ************************************************************************ -} mVarPrimTyCon :: TyCon mVarPrimTyCon = pcPrimTyCon_LevPolyLastArg mVarPrimTyConName [Nominal, Representational] unliftedRepTy mkMVarPrimTy :: Type -> Type -> Type mkMVarPrimTy s elt = TyConApp mVarPrimTyCon [getLevity elt, s, elt] {- ************************************************************************ * * The transactional variable type * * ************************************************************************ -} tVarPrimTyCon :: TyCon tVarPrimTyCon = pcPrimTyCon_LevPolyLastArg tVarPrimTyConName [Nominal, Representational] unliftedRepTy mkTVarPrimTy :: Type -> Type -> Type mkTVarPrimTy s elt = TyConApp tVarPrimTyCon [getLevity elt, s, elt] {- ************************************************************************ * * The stable-pointer type * * ************************************************************************ -} stablePtrPrimTyCon :: TyCon stablePtrPrimTyCon = pcPrimTyCon_LevPolyLastArg stablePtrPrimTyConName [Representational] addrRepDataConTy mkStablePtrPrimTy :: Type -> Type mkStablePtrPrimTy ty = TyConApp stablePtrPrimTyCon [getLevity ty, ty] {- ************************************************************************ * * The stable-name type * * ************************************************************************ -} stableNamePrimTyCon :: TyCon stableNamePrimTyCon = pcPrimTyCon_LevPolyLastArg stableNamePrimTyConName [Phantom] unliftedRepTy mkStableNamePrimTy :: Type -> Type mkStableNamePrimTy ty = TyConApp stableNamePrimTyCon [getLevity ty, ty] {- ************************************************************************ * * The Compact NFData (CNF) type * * ************************************************************************ -} compactPrimTyCon :: TyCon compactPrimTyCon = pcPrimTyCon0 compactPrimTyConName unliftedRepTy compactPrimTy :: Type compactPrimTy = mkTyConTy compactPrimTyCon {- ************************************************************************ * * The @StackSnapshot#@ type * * ************************************************************************ -} stackSnapshotPrimTyCon :: TyCon stackSnapshotPrimTyCon = pcPrimTyCon0 stackSnapshotPrimTyConName unliftedRepTy stackSnapshotPrimTy :: Type stackSnapshotPrimTy = mkTyConTy stackSnapshotPrimTyCon {- ************************************************************************ * * The ``bytecode object'' type * * ************************************************************************ -} -- Unlike most other primitive types, BCO is lifted. This is because in -- general a BCO may be a thunk for the reasons given in Note [Updatable CAF -- BCOs] in GHCi.CreateBCO. bcoPrimTy :: Type bcoPrimTy = mkTyConTy bcoPrimTyCon bcoPrimTyCon :: TyCon bcoPrimTyCon = pcPrimTyCon0 bcoPrimTyConName liftedRepTy {- ************************************************************************ * * The ``weak pointer'' type * * ************************************************************************ -} weakPrimTyCon :: TyCon weakPrimTyCon = pcPrimTyCon_LevPolyLastArg weakPrimTyConName [Representational] unliftedRepTy mkWeakPrimTy :: Type -> Type mkWeakPrimTy v = TyConApp weakPrimTyCon [getLevity v, v] {- ************************************************************************ * * The ``thread id'' type * * ************************************************************************ A thread id is represented by a pointer to the TSO itself, to ensure that they are always unique and we can always find the TSO for a given thread id. However, this has the unfortunate consequence that a ThreadId# for a given thread is treated as a root by the garbage collector and can keep TSOs around for too long. Hence the programmer API for thread manipulation uses a weak pointer to the thread id internally. -} threadIdPrimTy :: Type threadIdPrimTy = mkTyConTy threadIdPrimTyCon threadIdPrimTyCon :: TyCon threadIdPrimTyCon = pcPrimTyCon0 threadIdPrimTyConName unliftedRepTy {- ************************************************************************ * * The ``prompt tag'' type * * ************************************************************************ -} promptTagPrimTyCon :: TyCon promptTagPrimTyCon = pcPrimTyCon promptTagPrimTyConName [Representational] unliftedRepTy mkPromptTagPrimTy :: Type -> Type mkPromptTagPrimTy v = TyConApp promptTagPrimTyCon [v] {- ************************************************************************ * * \subsection{SIMD vector types} * * ************************************************************************ -} #include "primop-vector-tys.hs-incl" ghc-lib-parser-9.12.2.20250421/compiler/GHC/Builtin/Uniques.hs0000644000000000000000000004066307346545000021312 0ustar0000000000000000 -- | This is where we define a mapping from Uniques to their associated -- known-key Names for things associated with tuples and sums. We use this -- mapping while deserializing known-key Names in interface file symbol tables, -- which are encoded as their Unique. See Note [Symbol table representation of -- names] for details. -- module GHC.Builtin.Uniques ( -- * Looking up known-key names knownUniqueName -- * Getting the 'Unique's of 'Name's -- ** Anonymous sums , mkSumTyConUnique, mkSumDataConUnique , isSumTyConUnique -- ** Tuples -- *** Vanilla , mkTupleTyConUnique , mkTupleDataConUnique , isTupleTyConUnique , isTupleDataConLikeUnique -- *** Constraint , mkCTupleTyConUnique , mkCTupleDataConUnique , mkCTupleSelIdUnique -- ** Making built-in uniques , mkAlphaTyVarUnique , mkPrimOpIdUnique, mkPrimOpWrapperUnique , mkPreludeMiscIdUnique, mkPreludeDataConUnique , mkPreludeTyConUnique, mkPreludeClassUnique , mkRegSingleUnique, mkRegPairUnique, mkRegClassUnique, mkRegSubUnique , mkCostCentreUnique , varNSUnique, dataNSUnique, tvNSUnique, tcNSUnique , mkFldNSUnique, isFldNSUnique , mkBuiltinUnique , mkPseudoUniqueE -- ** Deriving uniques -- *** From TyCon name uniques , tyConRepNameUnique -- *** From DataCon name uniques , dataConWorkerUnique, dataConTyRepNameUnique , initExitJoinUnique -- Boxing data types , mkBoxingTyConUnique, boxingDataConUnique ) where import GHC.Prelude import {-# SOURCE #-} GHC.Builtin.Types import {-# SOURCE #-} GHC.Core.TyCon import {-# SOURCE #-} GHC.Core.DataCon import {-# SOURCE #-} GHC.Types.Id import {-# SOURCE #-} GHC.Types.Name import GHC.Types.Basic import GHC.Types.Unique import GHC.Data.FastString import GHC.Utils.Outputable import GHC.Utils.Panic import Data.Maybe import GHC.Utils.Word64 (word64ToInt) -- | Get the 'Name' associated with a known-key 'Unique'. knownUniqueName :: Unique -> Maybe Name knownUniqueName u = case tag of 'z' -> Just $ getUnboxedSumName n '4' -> Just $ getTupleTyConName Boxed n '5' -> Just $ getTupleTyConName Unboxed n '7' -> Just $ getTupleDataConName Boxed n '8' -> Just $ getTupleDataConName Unboxed n 'j' -> Just $ getCTupleSelIdName n 'k' -> Just $ getCTupleTyConName n 'm' -> Just $ getCTupleDataConName n _ -> Nothing where (tag, n') = unpkUnique u -- Known unique names are guaranteed to fit in Int, so we don't need the whole Word64. n = assert (isValidKnownKeyUnique u) (word64ToInt n') {- Note [Unique layout for unboxed sums] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Sum arities start from 2. The encoding is a bit funny: we break up the integral part into bitfields for the arity, an alternative index (which is taken to be 0xfc in the case of the TyCon), and, in the case of a datacon, a tag (used to identify the sum's TypeRep binding). This layout is chosen to remain compatible with the usual unique allocation for wired-in data constructors described in GHC.Types.Unique TyCon for sum of arity k: 00000000 kkkkkkkk 11111100 TypeRep of TyCon for sum of arity k: 00000000 kkkkkkkk 11111101 DataCon for sum of arity k and alternative n (zero-based): 00000000 kkkkkkkk nnnnnn00 TypeRep for sum DataCon of arity k and alternative n (zero-based): 00000000 kkkkkkkk nnnnnn10 -} mkSumTyConUnique :: Arity -> Unique mkSumTyConUnique arity = assertPpr (arity <= 0x3f) (ppr arity) $ -- 0x3f since we only have 6 bits to encode the -- alternative mkUniqueInt 'z' (arity `shiftL` 8 .|. 0xfc) isSumTyConUnique :: Unique -> Maybe Arity isSumTyConUnique u = case (tag, n .&. 0xfc) of ('z', 0xfc) -> Just (word64ToInt n `shiftR` 8) _ -> Nothing where (tag, n) = unpkUnique u mkSumDataConUnique :: ConTagZ -> Arity -> Unique mkSumDataConUnique alt arity | alt >= arity = panic ("mkSumDataConUnique: " ++ show alt ++ " >= " ++ show arity) | otherwise = mkUniqueInt 'z' (arity `shiftL` 8 + alt `shiftL` 2) {- skip the tycon -} getUnboxedSumName :: Int -> Name getUnboxedSumName n | n .&. 0xfc == 0xfc = case tag of 0x0 -> tyConName $ sumTyCon arity 0x1 -> getRep $ sumTyCon arity _ -> pprPanic "getUnboxedSumName: invalid tag" (ppr tag) | tag == 0x0 = dataConName $ sumDataCon (alt + 1) arity | tag == 0x1 = getName $ dataConWrapId $ sumDataCon (alt + 1) arity | tag == 0x2 = getRep $ promoteDataCon $ sumDataCon (alt + 1) arity | otherwise = pprPanic "getUnboxedSumName" (ppr n) where arity = n `shiftR` 8 alt = (n .&. 0xfc) `shiftR` 2 tag = 0x3 .&. n getRep tycon = fromMaybe (pprPanic "getUnboxedSumName(getRep)" (ppr tycon)) $ tyConRepName_maybe tycon -- Note [Uniques for tuple type and data constructors] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- Wired-in type constructor keys occupy *two* slots: -- * u: the TyCon itself -- * u+1: the TyConRepName of the TyCon -- -- Wired-in tuple data constructor keys occupy *three* slots: -- * u: the DataCon itself -- * u+1: its worker Id -- * u+2: the TyConRepName of the promoted TyCon {- Note [Unique layout for constraint tuple selectors] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Constraint tuples, like boxed and unboxed tuples, have their type and data constructor Uniques wired in (see Note [Uniques for tuple type and data constructors]). Constraint tuples are somewhat more involved, however. For a boxed or unboxed n-tuple, we need: * A Unique for the type constructor, and * A Unique for the data constructor With a constraint n-tuple, however, we need: * A Unique for the type constructor, * A Unique for the data constructor, and * A Unique for each of the n superclass selectors To pick a concrete example (n = 2), the binary constraint tuple has a type constructor and data constructor (%,%) along with superclass selectors $p1(%,%) and $p2(%,%). Just as we wire in the Uniques for constraint tuple type constructors and data constructors, we wish to wire in the Uniques for the superclass selectors as well. Not only does this make everything consistent, it also avoids a compile-time performance penalty whenever GHC.Classes is loaded from an interface file. This is because GHC.Classes defines constraint tuples as class definitions, and if these classes weren't wired in, then loading GHC.Classes would also load every single constraint tuple type constructor, data constructor, and superclass selector. See #18635. We encode the Uniques for constraint tuple superclass selectors as follows. The integral part of the Unique is broken up into bitfields for the arity and the position of the superclass. Given a selector for a constraint tuple with arity n (zero-based) and position k (where 1 <= k <= n), its Unique will look like: 00000000 nnnnnnnn kkkkkkkk We can use bit-twiddling tricks to access the arity and position with cTupleSelIdArityBits and cTupleSelIdPosBitmask, respectively. This pattern bears a certain resemblance to the way that the Uniques for unboxed sums are encoded. This is because for a unboxed sum of arity n, there are n corresponding data constructors, each with an alternative position k. Similarly, for a constraint tuple of arity n, there are n corresponding superclass selectors. Reading Note [Unique layout for unboxed sums] will instill an appreciation for how the encoding for constraint tuple superclass selector Uniques takes inspiration from the encoding for unboxed sum Uniques. -} mkCTupleTyConUnique :: Arity -> Unique mkCTupleTyConUnique a = mkUniqueInt 'k' (2*a) mkCTupleDataConUnique :: Arity -> Unique mkCTupleDataConUnique a = mkUniqueInt 'm' (3*a) mkCTupleSelIdUnique :: ConTagZ -> Arity -> Unique mkCTupleSelIdUnique sc_pos arity | sc_pos >= arity = panic ("mkCTupleSelIdUnique: " ++ show sc_pos ++ " >= " ++ show arity) | otherwise = mkUniqueInt 'j' (arity `shiftL` cTupleSelIdArityBits + sc_pos) getCTupleTyConName :: Int -> Name getCTupleTyConName n = case n `divMod` 2 of (arity, 0) -> cTupleTyConName arity (arity, 1) -> mkPrelTyConRepName $ cTupleTyConName arity _ -> panic "getCTupleTyConName: impossible" getCTupleDataConName :: Int -> Name getCTupleDataConName n = case n `divMod` 3 of (arity, 0) -> cTupleDataConName arity (arity, 1) -> getName $ dataConWrapId $ cTupleDataCon arity (arity, 2) -> mkPrelTyConRepName $ cTupleDataConName arity _ -> panic "getCTupleDataConName: impossible" getCTupleSelIdName :: Int -> Name getCTupleSelIdName n = cTupleSelIdName (sc_pos + 1) arity where arity = n `shiftR` cTupleSelIdArityBits sc_pos = n .&. cTupleSelIdPosBitmask -- Given the arity of a constraint tuple, this is the number of bits by which -- one must shift it to the left in order to encode the arity in the Unique -- of a superclass selector for that constraint tuple. Alternatively, given the -- Unique for a constraint tuple superclass selector, this is the number of -- bits by which one must shift it to the right to retrieve the arity of the -- constraint tuple. See Note [Unique layout for constraint tuple selectors]. cTupleSelIdArityBits :: Int cTupleSelIdArityBits = 8 -- Given the Unique for a constraint tuple superclass selector, one can -- retrieve the position of the selector by ANDing this mask, which will -- clear all but the eight least significant bits. -- See Note [Unique layout for constraint tuple selectors]. cTupleSelIdPosBitmask :: Int cTupleSelIdPosBitmask = 0xff -------------------------------------------------- -- Normal tuples mkTupleDataConUnique :: Boxity -> Arity -> Unique mkTupleDataConUnique Boxed a = mkUniqueInt '7' (3*a) -- may be used in C labels mkTupleDataConUnique Unboxed a = mkUniqueInt '8' (3*a) mkTupleTyConUnique :: Boxity -> Arity -> Unique mkTupleTyConUnique Boxed a = mkUniqueInt '4' (2*a) mkTupleTyConUnique Unboxed a = mkUniqueInt '5' (2*a) -- | This function is an inverse of `mkTupleTyConUnique` isTupleTyConUnique :: Unique -> Maybe (Boxity, Arity) isTupleTyConUnique u = case (tag, i) of ('4', 0) -> Just (Boxed, arity) ('5', 0) -> Just (Unboxed, arity) _ -> Nothing where (tag, n) = unpkUnique u (arity', i) = quotRem n 2 arity = word64ToInt arity' -- | This function is an inverse of `mkTupleTyDataUnique` that also matches the worker and promoted tycon. isTupleDataConLikeUnique :: Unique -> Maybe (Boxity, Arity) isTupleDataConLikeUnique u = case tag of '7' -> Just (Boxed, arity) '8' -> Just (Unboxed, arity) _ -> Nothing where (tag, n) = unpkUnique u (arity', _) = quotRem n 3 arity = word64ToInt arity' getTupleTyConName :: Boxity -> Int -> Name getTupleTyConName boxity n = case n `divMod` 2 of (arity, 0) -> tyConName $ tupleTyCon boxity arity (arity, 1) -> fromMaybe (panic "getTupleTyConName") $ tyConRepName_maybe $ tupleTyCon boxity arity _ -> panic "getTupleTyConName: impossible" getTupleDataConName :: Boxity -> Int -> Name getTupleDataConName boxity n = case n `divMod` 3 of (arity, 0) -> dataConName $ tupleDataCon boxity arity (arity, 1) -> idName $ dataConWorkId $ tupleDataCon boxity arity (arity, 2) -> fromMaybe (panic "getTupleDataCon") $ tyConRepName_maybe $ promotedTupleDataCon boxity arity _ -> panic "getTupleDataConName: impossible" {- Note [Uniques for wired-in prelude things and known tags] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Allocation of unique supply characters: v,u: for renumbering value-, and usage- vars. B: builtin C-E: pseudo uniques (used in native-code generator) I: GHCi evaluation X: uniques from mkLocalUnique _: unifiable tyvars (above) 0-9: prelude things below (no numbers left any more..) :: (prelude) parallel array data constructors other a-z: lower case chars for unique supplies. Used so far: a TypeChecking? b Boxing tycons & datacons c StgToCmm/Renamer d desugarer f AbsC flattener i TypeChecking interface files j constraint tuple superclass selectors k constraint tuple tycons m constraint tuple datacons n Native/LLVM codegen r Hsc name cache s simplifier u Cmm pipeline y GHCi bytecode generator z anonymous sums Note [Related uniques for wired-in things] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * All wired in tycons actually use *two* uniques: * u: the TyCon itself * u+1: the TyConRepName of the TyCon (for use with TypeRep) The "+1" is implemented in tyConRepNameUnique. If this ever changes, make sure to also change the treatment for boxing tycons. * All wired in datacons use *three* uniques: * u: the DataCon itself * u+1: its worker Id * u+2: the TyConRepName of the promoted TyCon No wired-in datacons have wrappers. The "+1" is implemented in dataConWorkerUnique and the "+2" is in dataConTyRepNameUnique. If this ever changes, make sure to also change the treatment for boxing tycons. * Because boxing tycons (see Note [Boxing constructors] in GHC.Builtin.Types) come with both a tycon and a datacon, each one takes up five slots, combining the two cases above. Getting from the tycon to the datacon (by adding 2) is implemented in boxingDataConUnique. -} mkAlphaTyVarUnique :: Int -> Unique mkPreludeClassUnique :: Int -> Unique mkPrimOpIdUnique :: Int -> Unique -- See Note [Primop wrappers] in GHC.Builtin.PrimOps. mkPrimOpWrapperUnique :: Int -> Unique mkPreludeMiscIdUnique :: Int -> Unique mkAlphaTyVarUnique i = mkUniqueInt '1' i mkPreludeClassUnique i = mkUniqueInt '2' i -------------------------------------------------- mkPrimOpIdUnique op = mkUniqueInt '9' (2*op) mkPrimOpWrapperUnique op = mkUniqueInt '9' (2*op+1) mkPreludeMiscIdUnique i = mkUniqueInt '0' i mkPseudoUniqueE, mkBuiltinUnique :: Int -> Unique mkBuiltinUnique i = mkUniqueInt 'B' i mkPseudoUniqueE i = mkUniqueInt 'E' i -- used in NCG spiller to create spill VirtualRegs mkRegSingleUnique, mkRegPairUnique, mkRegSubUnique, mkRegClassUnique :: Int -> Unique mkRegSingleUnique = mkUniqueInt 'R' mkRegSubUnique = mkUniqueInt 'S' mkRegPairUnique = mkUniqueInt 'P' mkRegClassUnique = mkUniqueInt 'L' mkCostCentreUnique :: Int -> Unique mkCostCentreUnique = mkUniqueInt 'C' varNSUnique, dataNSUnique, tvNSUnique, tcNSUnique :: Unique varNSUnique = mkUnique 'i' 0 dataNSUnique = mkUnique 'd' 0 tvNSUnique = mkUnique 'v' 0 tcNSUnique = mkUnique 'c' 0 mkFldNSUnique :: FastString -> Unique mkFldNSUnique fs = mkUniqueInt 'f' (uniqueOfFS fs) isFldNSUnique :: Unique -> Bool isFldNSUnique uniq = case unpkUnique uniq of (tag, _) -> tag == 'f' initExitJoinUnique :: Unique initExitJoinUnique = mkUnique 's' 0 -------------------------------------------------- -- Wired-in type constructor keys occupy *two* slots: -- See Note [Related uniques for wired-in things] mkPreludeTyConUnique :: Int -> Unique mkPreludeTyConUnique i = mkUniqueInt '3' (2*i) tyConRepNameUnique :: Unique -> Unique tyConRepNameUnique u = incrUnique u -------------------------------------------------- -- Wired-in data constructor keys occupy *three* slots: -- See Note [Related uniques for wired-in things] mkPreludeDataConUnique :: Int -> Unique mkPreludeDataConUnique i = mkUniqueInt '6' (3*i) -- Must be alphabetic dataConTyRepNameUnique, dataConWorkerUnique :: Unique -> Unique dataConWorkerUnique u = incrUnique u dataConTyRepNameUnique u = stepUnique u 2 -------------------------------------------------- -- The data constructors of RuntimeRep occupy *five* slots: -- See Note [Related uniques for wired-in things] -- -- Example: WordRep -- -- * u: the TyCon of the boxing data type WordBox -- * u+1: the TyConRepName of the boxing data type -- * u+2: the DataCon for MkWordBox -- * u+3: the worker id for MkWordBox -- * u+4: the TyConRepName of the promoted TyCon 'MkWordBox -- -- Note carefully that -- * u,u+1 are in sync with the conventions for -- wired-in type constructors, above -- * u+2,u+3,u+4 are in sync with the conventions for -- wired-in data constructors, above -- A little delicate! mkBoxingTyConUnique :: Int -> Unique mkBoxingTyConUnique i = mkUniqueInt 'b' (5*i) boxingDataConUnique :: Unique -> Unique boxingDataConUnique u = stepUnique u 2 ghc-lib-parser-9.12.2.20250421/compiler/GHC/Builtin/Uniques.hs-boot0000644000000000000000000000200507346545000022237 0ustar0000000000000000module GHC.Builtin.Uniques where import GHC.Prelude import GHC.Types.Unique import {-# SOURCE #-} GHC.Types.Name import GHC.Types.Basic -- Needed by GHC.Builtin.Types knownUniqueName :: Unique -> Maybe Name mkSumTyConUnique :: Arity -> Unique mkSumDataConUnique :: ConTagZ -> Arity -> Unique mkCTupleTyConUnique :: Arity -> Unique mkCTupleDataConUnique :: Arity -> Unique mkTupleTyConUnique :: Boxity -> Arity -> Unique mkTupleDataConUnique :: Boxity -> Arity -> Unique mkAlphaTyVarUnique :: Int -> Unique mkPreludeClassUnique :: Int -> Unique mkPrimOpIdUnique :: Int -> Unique mkPrimOpWrapperUnique :: Int -> Unique mkPreludeMiscIdUnique :: Int -> Unique mkPseudoUniqueE, mkBuiltinUnique :: Int -> Unique mkRegSingleUnique, mkRegPairUnique, mkRegSubUnique, mkRegClassUnique :: Int -> Unique initExitJoinUnique :: Unique mkPreludeTyConUnique :: Int -> Unique tyConRepNameUnique :: Unique -> Unique mkPreludeDataConUnique :: Int -> Unique dataConTyRepNameUnique, dataConWorkerUnique :: Unique -> Unique ghc-lib-parser-9.12.2.20250421/compiler/GHC/ByteCode/0000755000000000000000000000000007346545000017404 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/ByteCode/Types.hs0000644000000000000000000002421107346545000021044 0ustar0000000000000000{-# LANGUAGE GeneralizedNewtypeDeriving #-} {-# LANGUAGE RecordWildCards #-} {-# LANGUAGE TypeApplications #-} {-# LANGUAGE MagicHash #-} {-# LANGUAGE UnliftedNewtypes #-} -- -- (c) The University of Glasgow 2002-2006 -- -- | Bytecode assembler types module GHC.ByteCode.Types ( CompiledByteCode(..), seqCompiledByteCode , BCOByteArray(..), mkBCOByteArray , FFIInfo(..) , RegBitmap(..) , NativeCallType(..), NativeCallInfo(..), voidTupleReturnInfo, voidPrimCallInfo , ByteOff(..), WordOff(..), HalfWord(..) , UnlinkedBCO(..), BCOPtr(..), BCONPtr(..) , ItblEnv, ItblPtr(..) , AddrEnv, AddrPtr(..) , CgBreakInfo(..) , ModBreaks (..), BreakIndex, emptyModBreaks , CCostCentre , FlatBag, sizeFlatBag, fromSizedSeq, elemsFlatBag ) where import GHC.Prelude import GHC.Data.FastString import GHC.Data.FlatBag import GHC.Types.Name import GHC.Types.Name.Env import GHC.Utils.Outputable import GHC.Builtin.PrimOps import GHC.Types.SptEntry import GHC.Types.SrcLoc import GHCi.BreakArray import GHCi.RemoteTypes import GHCi.FFI import Control.DeepSeq import GHCi.ResolvedBCO ( BCOByteArray(..), mkBCOByteArray ) import Foreign import Data.Array import Data.ByteString (ByteString) import Data.IntMap (IntMap) import qualified Data.IntMap as IntMap import qualified GHC.Exts.Heap as Heap import GHC.Stack.CCS import GHC.Cmm.Expr ( GlobalRegSet, emptyRegSet, regSetToList ) import GHC.Iface.Syntax import Language.Haskell.Syntax.Module.Name (ModuleName) -- ----------------------------------------------------------------------------- -- Compiled Byte Code data CompiledByteCode = CompiledByteCode { bc_bcos :: FlatBag UnlinkedBCO -- ^ Bunch of interpretable bindings , bc_itbls :: ItblEnv -- ^ Mapping from DataCons to their info tables , bc_ffis :: [FFIInfo] -- ^ ffi blocks we allocated , bc_strs :: AddrEnv -- ^ top-level strings (heap allocated) , bc_breaks :: Maybe ModBreaks -- ^ breakpoint info (Nothing if breakpoints are disabled) , bc_spt_entries :: ![SptEntry] -- ^ Static pointer table entries which should be loaded along with the -- BCOs. See Note [Grand plan for static forms] in -- "GHC.Iface.Tidy.StaticPtrTable". } -- ToDo: we're not tracking strings that we malloc'd newtype FFIInfo = FFIInfo (RemotePtr C_ffi_cif) deriving (Show, NFData) instance Outputable CompiledByteCode where ppr CompiledByteCode{..} = ppr $ elemsFlatBag bc_bcos -- Not a real NFData instance, because ModBreaks contains some things -- we can't rnf seqCompiledByteCode :: CompiledByteCode -> () seqCompiledByteCode CompiledByteCode{..} = rnf bc_bcos `seq` seqEltsNameEnv rnf bc_itbls `seq` rnf bc_ffis `seq` seqEltsNameEnv rnf bc_strs `seq` rnf (fmap seqModBreaks bc_breaks) newtype ByteOff = ByteOff Int deriving (Enum, Eq, Show, Integral, Num, Ord, Real, Outputable) newtype WordOff = WordOff Int deriving (Enum, Eq, Show, Integral, Num, Ord, Real, Outputable) -- A type for values that are half the size of a word on the target -- platform where the interpreter runs (which may be a different -- wordsize than the compiler). newtype HalfWord = HalfWord Word deriving (Enum, Eq, Show, Integral, Num, Ord, Real, Outputable) newtype RegBitmap = RegBitmap { unRegBitmap :: Word32 } deriving (Enum, Eq, Show, Integral, Num, Ord, Real, Bits, FiniteBits, Outputable) {- Note [GHCi TupleInfo] ~~~~~~~~~~~~~~~~~~~~~~~~ This contains the data we need for passing unboxed tuples between bytecode and native code In general we closely follow the native calling convention that GHC uses for unboxed tuples, but we don't use any registers in bytecode. All tuple elements are expanded to use a full register or a full word on the stack. The position of tuple elements that are returned on the stack in the native calling convention is unchanged when returning the same tuple in bytecode. The order of the remaining elements is determined by the register in which they would have been returned, rather than by their position in the tuple in the Haskell source code. This makes jumping between bytecode and native code easier: A map of live registers is enough to convert the tuple. See GHC.StgToByteCode.layoutTuple for more details. -} data NativeCallType = NativePrimCall | NativeTupleReturn deriving (Eq) data NativeCallInfo = NativeCallInfo { nativeCallType :: !NativeCallType , nativeCallSize :: !WordOff -- total size of arguments in words , nativeCallRegs :: !GlobalRegSet , nativeCallStackSpillSize :: !WordOff {- words spilled on the stack by GHCs native calling convention -} } instance Outputable NativeCallInfo where ppr NativeCallInfo{..} = text " ppr nativeCallSize <+> text "stack" <+> ppr nativeCallStackSpillSize <+> text "regs" <+> ppr (map (text @SDoc . show) $ regSetToList nativeCallRegs) <> char '>' voidTupleReturnInfo :: NativeCallInfo voidTupleReturnInfo = NativeCallInfo NativeTupleReturn 0 emptyRegSet 0 voidPrimCallInfo :: NativeCallInfo voidPrimCallInfo = NativeCallInfo NativePrimCall 0 emptyRegSet 0 type ItblEnv = NameEnv (Name, ItblPtr) type AddrEnv = NameEnv (Name, AddrPtr) -- We need the Name in the range so we know which -- elements to filter out when unloading a module newtype ItblPtr = ItblPtr (RemotePtr Heap.StgInfoTable) deriving (Show, NFData) newtype AddrPtr = AddrPtr (RemotePtr ()) deriving (NFData) data UnlinkedBCO = UnlinkedBCO { unlinkedBCOName :: !Name, unlinkedBCOArity :: {-# UNPACK #-} !Int, unlinkedBCOInstrs :: !(BCOByteArray Word16), -- insns unlinkedBCOBitmap :: !(BCOByteArray Word), -- bitmap unlinkedBCOLits :: !(FlatBag BCONPtr), -- non-ptrs unlinkedBCOPtrs :: !(FlatBag BCOPtr) -- ptrs } instance NFData UnlinkedBCO where rnf UnlinkedBCO{..} = rnf unlinkedBCOLits `seq` rnf unlinkedBCOPtrs data BCOPtr = BCOPtrName !Name | BCOPtrPrimOp !PrimOp | BCOPtrBCO !UnlinkedBCO | BCOPtrBreakArray (ForeignRef BreakArray) -- ^ a pointer to a breakpoint's module's BreakArray in GHCi's memory instance NFData BCOPtr where rnf (BCOPtrBCO bco) = rnf bco rnf x = x `seq` () data BCONPtr = BCONPtrWord {-# UNPACK #-} !Word | BCONPtrLbl !FastString | BCONPtrItbl !Name -- | A reference to a top-level string literal; see -- Note [Generating code for top-level string literal bindings] in GHC.StgToByteCode. | BCONPtrAddr !Name -- | Only used internally in the assembler in an intermediate representation; -- should never appear in a fully-assembled UnlinkedBCO. -- Also see Note [Allocating string literals] in GHC.ByteCode.Asm. | BCONPtrStr !ByteString instance NFData BCONPtr where rnf x = x `seq` () -- | Information about a breakpoint that we know at code-generation time -- In order to be used, this needs to be hydrated relative to the current HscEnv by -- 'hydrateCgBreakInfo'. Everything here can be fully forced and that's critical for -- preventing space leaks (see #22530) data CgBreakInfo = CgBreakInfo { cgb_tyvars :: ![IfaceTvBndr] -- ^ Type variables in scope at the breakpoint , cgb_vars :: ![Maybe (IfaceIdBndr, Word)] , cgb_resty :: !IfaceType } -- See Note [Syncing breakpoint info] in GHC.Runtime.Eval seqCgBreakInfo :: CgBreakInfo -> () seqCgBreakInfo CgBreakInfo{..} = rnf cgb_tyvars `seq` rnf cgb_vars `seq` rnf cgb_resty instance Outputable UnlinkedBCO where ppr (UnlinkedBCO nm _arity _insns _bitmap lits ptrs) = sep [text "BCO", ppr nm, text "with", ppr (sizeFlatBag lits), text "lits", ppr (sizeFlatBag ptrs), text "ptrs" ] instance Outputable CgBreakInfo where ppr info = text "CgBreakInfo" <+> parens (ppr (cgb_vars info) <+> ppr (cgb_resty info)) -- ----------------------------------------------------------------------------- -- Breakpoints -- | Breakpoint index type BreakIndex = Int -- | C CostCentre type data CCostCentre -- | All the information about the breakpoints for a module data ModBreaks = ModBreaks { modBreaks_flags :: ForeignRef BreakArray -- ^ The array of flags, one per breakpoint, -- indicating which breakpoints are enabled. , modBreaks_locs :: !(Array BreakIndex SrcSpan) -- ^ An array giving the source span of each breakpoint. , modBreaks_vars :: !(Array BreakIndex [OccName]) -- ^ An array giving the names of the free variables at each breakpoint. , modBreaks_decls :: !(Array BreakIndex [String]) -- ^ An array giving the names of the declarations enclosing each breakpoint. -- See Note [Field modBreaks_decls] , modBreaks_ccs :: !(Array BreakIndex (RemotePtr CostCentre)) -- ^ Array pointing to cost centre for each breakpoint , modBreaks_breakInfo :: IntMap CgBreakInfo -- ^ info about each breakpoint from the bytecode generator , modBreaks_module :: RemotePtr ModuleName } seqModBreaks :: ModBreaks -> () seqModBreaks ModBreaks{..} = rnf modBreaks_flags `seq` rnf modBreaks_locs `seq` rnf modBreaks_vars `seq` rnf modBreaks_decls `seq` rnf modBreaks_ccs `seq` rnf (fmap seqCgBreakInfo modBreaks_breakInfo) `seq` rnf modBreaks_module -- | Construct an empty ModBreaks emptyModBreaks :: ModBreaks emptyModBreaks = ModBreaks { modBreaks_flags = error "ModBreaks.modBreaks_array not initialised" -- ToDo: can we avoid this? , modBreaks_locs = array (0,-1) [] , modBreaks_vars = array (0,-1) [] , modBreaks_decls = array (0,-1) [] , modBreaks_ccs = array (0,-1) [] , modBreaks_breakInfo = IntMap.empty , modBreaks_module = toRemotePtr nullPtr } {- Note [Field modBreaks_decls] ~~~~~~~~~~~~~~~~~~~~~~ A value of eg ["foo", "bar", "baz"] in a `modBreaks_decls` field means: The breakpoint is in the function called "baz" that is declared in a `let` or `where` clause of a declaration called "bar", which itself is declared in a `let` or `where` clause of the top-level function called "foo". -} ghc-lib-parser-9.12.2.20250421/compiler/GHC/0000755000000000000000000000000007346545000015706 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Cmm.hs0000644000000000000000000005104107346545000016757 0ustar0000000000000000-- Cmm representations using Hoopl's Graph CmmNode e x. {-# LANGUAGE GADTs #-} {-# LANGUAGE KindSignatures #-} {-# LANGUAGE DataKinds #-} {-# LANGUAGE ExplicitNamespaces #-} {-# LANGUAGE DeriveFunctor #-} {-# LANGUAGE MultiParamTypeClasses #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE EmptyCase #-} module GHC.Cmm ( -- * Cmm top-level datatypes DCmmGroup, CmmProgram, CmmGroup, CmmGroupSRTs, RawCmmGroup, GenCmmGroup, CmmDecl, DCmmDecl, CmmDeclSRTs, GenCmmDecl(..), CmmDataDecl, cmmDataDeclCmmDecl, DCmmGraph, CmmGraph, GenCmmGraph, GenGenCmmGraph(..), toBlockMap, revPostorder, toBlockList, CmmBlock, RawCmmDecl, Section(..), SectionType(..), GenCmmStatics(..), type CmmStatics, type RawCmmStatics, CmmStatic(..), SectionProtection(..), sectionProtection, DWrap(..), unDeterm, removeDeterm, removeDetermDecl, removeDetermGraph, -- ** Blocks containing lists GenBasicBlock(..), blockId, ListGraph(..), pprBBlock, -- * Info Tables GenCmmTopInfo(..) , DCmmTopInfo , CmmTopInfo , CmmStackInfo(..), CmmInfoTable(..), topInfoTable, topInfoTableD, ClosureTypeInfo(..), ProfilingInfo(..), ConstrDescription, -- * Statements, expressions and types module GHC.Cmm.Node, module GHC.Cmm.Expr, -- * Pretty-printing pprCmmGroup, pprSection, pprStatic ) where import GHC.Prelude import GHC.Platform import GHC.Types.Id import GHC.Types.CostCentre import GHC.Cmm.CLabel import GHC.Cmm.BlockId import GHC.Cmm.Node import GHC.Runtime.Heap.Layout import GHC.Cmm.Expr import GHC.Cmm.Dataflow.Block import GHC.Cmm.Dataflow.Graph import GHC.Cmm.Dataflow.Label import GHC.Utils.Outputable import Data.Void (Void) import Data.List (intersperse) import Data.ByteString (ByteString) import qualified Data.ByteString as BS ----------------------------------------------------------------------------- -- Cmm, GenCmm ----------------------------------------------------------------------------- -- A CmmProgram is a list of CmmGroups -- A CmmGroup is a list of top-level declarations -- When object-splitting is on, each group is compiled into a separate -- .o file. So typically we put closely related stuff in a CmmGroup. -- Section-splitting follows suit and makes one .text subsection for each -- CmmGroup. type CmmProgram = [CmmGroup] type GenCmmGroup d h g = [GenCmmDecl d h g] -- | Cmm group after STG generation type DCmmGroup = GenCmmGroup CmmStatics DCmmTopInfo DCmmGraph -- | Cmm group before SRT generation type CmmGroup = GenCmmGroup CmmStatics CmmTopInfo CmmGraph -- | Cmm group with SRTs type CmmGroupSRTs = GenCmmGroup RawCmmStatics CmmTopInfo CmmGraph -- | "Raw" cmm group (TODO (osa): not sure what that means) type RawCmmGroup = GenCmmGroup RawCmmStatics (LabelMap RawCmmStatics) CmmGraph ----------------------------------------------------------------------------- -- CmmDecl, GenCmmDecl ----------------------------------------------------------------------------- -- GenCmmDecl is abstracted over -- d, the type of static data elements in CmmData -- h, the static info preceding the code of a CmmProc -- g, the control-flow graph of a CmmProc -- -- We expect there to be two main instances of this type: -- (a) C--, i.e. populated with various C-- constructs -- (b) Native code, populated with data/instructions -- | A top-level chunk, abstracted over the type of the contents of -- the basic blocks (Cmm or instructions are the likely instantiations). data GenCmmDecl d h g = CmmProc -- A procedure h -- Extra header such as the info table CLabel -- Entry label [GlobalRegUse] -- Registers live on entry. Note that the set of live -- registers will be correct in generated C-- code, but -- not in hand-written C-- code. However, -- splitAtProcPoints calculates correct liveness -- information for CmmProcs. g -- Control-flow graph for the procedure's code | CmmData -- Static data Section d deriving (Functor) instance (OutputableP Platform d, OutputableP Platform info, OutputableP Platform i) => OutputableP Platform (GenCmmDecl d info i) where pdoc = pprTop type DCmmDecl = GenCmmDecl CmmStatics DCmmTopInfo DCmmGraph type CmmDecl = GenCmmDecl CmmStatics CmmTopInfo CmmGraph type CmmDeclSRTs = GenCmmDecl RawCmmStatics CmmTopInfo CmmGraph type CmmDataDecl = GenCmmDataDecl CmmStatics type GenCmmDataDecl d = GenCmmDecl d Void Void -- When `CmmProc` case can be statically excluded cmmDataDeclCmmDecl :: GenCmmDataDecl d -> GenCmmDecl d h g cmmDataDeclCmmDecl = \ case CmmProc void _ _ _ -> case void of CmmData section d -> CmmData section d {-# INLINE cmmDataDeclCmmDecl #-} type RawCmmDecl = GenCmmDecl RawCmmStatics (LabelMap RawCmmStatics) CmmGraph ----------------------------------------------------------------------------- -- Graphs ----------------------------------------------------------------------------- type CmmGraph = GenCmmGraph CmmNode type DCmmGraph = GenGenCmmGraph DWrap CmmNode type GenCmmGraph n = GenGenCmmGraph LabelMap n data GenGenCmmGraph s n = CmmGraph { g_entry :: BlockId, g_graph :: Graph' s Block n C C } type CmmBlock = Block CmmNode C C instance OutputableP Platform CmmGraph where pdoc = pprCmmGraph toBlockMap :: CmmGraph -> LabelMap CmmBlock toBlockMap (CmmGraph {g_graph=GMany NothingO body NothingO}) = body pprCmmGraph :: Platform -> CmmGraph -> SDoc pprCmmGraph platform g = text "{" <> text "offset" $$ nest 2 (vcat $ map (pdoc platform) blocks) $$ text "}" where blocks = revPostorder g -- revPostorder has the side-effect of discarding unreachable code, -- so pretty-printed Cmm will omit any unreachable blocks. This can -- sometimes be confusing. revPostorder :: CmmGraph -> [CmmBlock] revPostorder g = {-# SCC "revPostorder" #-} revPostorderFrom (toBlockMap g) (g_entry g) toBlockList :: CmmGraph -> [CmmBlock] toBlockList g = mapElems $ toBlockMap g ----------------------------------------------------------------------------- -- Info Tables ----------------------------------------------------------------------------- -- | CmmTopInfo is attached to each CmmDecl (see defn of CmmGroup), and contains -- the extra info (beyond the executable code) that belongs to that CmmDecl. data GenCmmTopInfo f = TopInfo { info_tbls :: f CmmInfoTable , stack_info :: CmmStackInfo } newtype DWrap a = DWrap [(BlockId, a)] unDeterm :: DWrap a -> [(BlockId, a)] unDeterm (DWrap f) = f type DCmmTopInfo = GenCmmTopInfo DWrap type CmmTopInfo = GenCmmTopInfo LabelMap instance OutputableP Platform CmmTopInfo where pdoc = pprTopInfo pprTopInfo :: Platform -> CmmTopInfo -> SDoc pprTopInfo platform (TopInfo {info_tbls=info_tbl, stack_info=stack_info}) = vcat [text "info_tbls: " <> pdoc platform info_tbl, text "stack_info: " <> ppr stack_info] topInfoTableD :: GenCmmDecl a DCmmTopInfo (GenGenCmmGraph s n) -> Maybe CmmInfoTable topInfoTableD (CmmProc infos _ _ g) = case (info_tbls infos) of DWrap xs -> lookup (g_entry g) xs topInfoTableD _ = Nothing topInfoTable :: GenCmmDecl a CmmTopInfo (GenGenCmmGraph s n) -> Maybe CmmInfoTable topInfoTable (CmmProc infos _ _ g) = mapLookup (g_entry g) (info_tbls infos) topInfoTable _ = Nothing data CmmStackInfo = StackInfo { arg_space :: ByteOff, -- number of bytes of arguments on the stack on entry to the -- the proc. This is filled in by GHC.StgToCmm.codeGen, and -- used by the stack allocator later. do_layout :: Bool -- Do automatic stack layout for this proc. This is -- True for all code generated by the code generator, -- but is occasionally False for hand-written Cmm where -- we want to do the stack manipulation manually. } instance Outputable CmmStackInfo where ppr = pprStackInfo pprStackInfo :: CmmStackInfo -> SDoc pprStackInfo (StackInfo {arg_space=arg_space}) = text "arg_space: " <> ppr arg_space -- | Info table as a haskell data type data CmmInfoTable = CmmInfoTable { cit_lbl :: CLabel, -- Info table label cit_rep :: SMRep, cit_prof :: ProfilingInfo, cit_srt :: Maybe CLabel, -- empty, or a closure address cit_clo :: Maybe (Id, CostCentreStack) -- Just (id,ccs) <=> build a static closure later -- Nothing <=> don't build a static closure -- -- Static closures for FUNs and THUNKs are *not* generated by -- the code generator, because we might want to add SRT -- entries to them later (for FUNs at least; THUNKs are -- treated the same for consistency). See Note [SRTs] in -- GHC.Cmm.Info.Build, in particular the [FUN] optimisation. -- -- This is strictly speaking not a part of the info table that -- will be finally generated, but it's the only convenient -- place to convey this information from the code generator to -- where we build the static closures in -- GHC.Cmm.Info.Build.doSRTs. } deriving (Eq, Ord) instance OutputableP Platform CmmInfoTable where pdoc = pprInfoTable data ProfilingInfo = NoProfilingInfo | ProfilingInfo ByteString ByteString -- closure_type, closure_desc deriving (Eq, Ord) ----------------------------------------------------------------------------- -- Static Data ----------------------------------------------------------------------------- data SectionType = Text | Data | ReadOnlyData | RelocatableReadOnlyData | UninitialisedData -- See Note [Initializers and finalizers in Cmm] in GHC.Cmm.InitFini | InitArray -- .init_array on ELF, .ctor on Windows | FiniArray -- .fini_array on ELF, .dtor on Windows | CString | OtherSection String deriving (Show) data SectionProtection = ReadWriteSection | ReadOnlySection | WriteProtectedSection -- See Note [Relocatable Read-Only Data] deriving (Eq) -- | Should a data in this section be considered constant at runtime sectionProtection :: Section -> SectionProtection sectionProtection (Section t _) = case t of Text -> ReadOnlySection ReadOnlyData -> ReadOnlySection RelocatableReadOnlyData -> WriteProtectedSection InitArray -> ReadOnlySection FiniArray -> ReadOnlySection CString -> ReadOnlySection Data -> ReadWriteSection UninitialisedData -> ReadWriteSection (OtherSection _) -> ReadWriteSection {- Note [Relocatable Read-Only Data] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Relocatable data are only read-only after relocation at the start of the program. They should be writable from the source code until then. Failure to do so would end up in segfaults at execution when using linkers that do not enforce writability of those sections, such as the gold linker. -} data Section = Section SectionType CLabel data CmmStatic = CmmStaticLit CmmLit -- ^ a literal value, size given by cmmLitRep of the literal. | CmmUninitialised Int -- ^ uninitialised data, N bytes long | CmmString ByteString -- ^ string of 8-bit values only, not zero terminated. | CmmFileEmbed FilePath Int -- ^ an embedded binary file and its byte length instance OutputableP Platform CmmStatic where pdoc = pprStatic instance Outputable CmmStatic where ppr (CmmStaticLit lit) = text "CmmStaticLit" <+> ppr lit ppr (CmmUninitialised n) = text "CmmUninitialised" <+> ppr n ppr (CmmString _) = text "CmmString" ppr (CmmFileEmbed fp _) = text "CmmFileEmbed" <+> text fp -- | Static data before or after SRT generation data GenCmmStatics (rawOnly :: Bool) where CmmStatics :: CLabel -- Label of statics -> CmmInfoTable -> CostCentreStack -> [CmmLit] -- Payload -> [CmmLit] -- Non-pointers that go to the end of the closure -- This is used by stg_unpack_cstring closures. -- See Note [unpack_cstring closures] in StgStdThunks.cmm. -> GenCmmStatics 'False -- | Static data, after SRTs are generated CmmStaticsRaw :: CLabel -- Label of statics -> [CmmStatic] -- The static data itself -> GenCmmStatics a instance OutputableP Platform (GenCmmStatics a) where pdoc = pprStatics type CmmStatics = GenCmmStatics 'False type RawCmmStatics = GenCmmStatics 'True {- ----------------------------------------------------------------------------- -- Deterministic Cmm / Info Tables ----------------------------------------------------------------------------- Note [DCmmGroup vs CmmGroup or: Deterministic Info Tables] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consulting Note [Object determinism] one will learn that in order to produce deterministic objects just after cmm is produced we perform a renaming pass which provides fresh uniques for all unique-able things in the input Cmm. After this point, we use a deterministic unique supply (an incrementing counter) so any resulting labels which make their way into object code have a deterministic name. A key assumption to this process is that the input is deterministic modulo the uniques and the order that bindings appear in the definitions is the same. CmmGroup uses LabelMap in two places: * In CmmProc for info tables * In CmmGraph for the blocks of the graph LabelMap is not a deterministic structure, so traversing a LabelMap can process elements in different order (depending on the given uniques). Therefore before we do the renaming we need to use a deterministic structure, one which we can traverse in a guaranteed order. A list does the job perfectly. Once the renaming happens it is converted back into a LabelMap, which is now deterministic due to the uniques being generated and assigned in a deterministic manner. We prefer using the renamed LabelMap rather than the list in the rest of the code generation because it is much more efficient than lists for the needs of the code generator. -} -- Converting out of deterministic Cmm removeDeterm :: DCmmGroup -> CmmGroup removeDeterm = map removeDetermDecl removeDetermDecl :: DCmmDecl -> CmmDecl removeDetermDecl (CmmProc h e r g) = CmmProc (removeDetermTop h) e r (removeDetermGraph g) removeDetermDecl (CmmData a b) = CmmData a b removeDetermTop :: DCmmTopInfo -> CmmTopInfo removeDetermTop (TopInfo a b) = TopInfo (mapFromList $ unDeterm a) b removeDetermGraph :: DCmmGraph -> CmmGraph removeDetermGraph (CmmGraph x y) = let y' = case y of GMany a (DWrap b) c -> GMany a (mapFromList b) c in CmmGraph x y' -- ----------------------------------------------------------------------------- -- Basic blocks consisting of lists -- These are used by the LLVM and NCG backends, when populating Cmm -- with lists of instructions. data GenBasicBlock i = BasicBlock BlockId [i] deriving (Functor) -- | The branch block id is that of the first block in -- the branch, which is that branch's entry point blockId :: GenBasicBlock i -> BlockId blockId (BasicBlock blk_id _ ) = blk_id newtype ListGraph i = ListGraph [GenBasicBlock i] deriving (Functor) instance Outputable instr => Outputable (ListGraph instr) where ppr (ListGraph blocks) = vcat (map ppr blocks) instance OutputableP env instr => OutputableP env (ListGraph instr) where pdoc env g = ppr (fmap (pdoc env) g) instance Outputable instr => Outputable (GenBasicBlock instr) where ppr = pprBBlock instance OutputableP env instr => OutputableP env (GenBasicBlock instr) where pdoc env block = ppr (fmap (pdoc env) block) pprBBlock :: Outputable stmt => GenBasicBlock stmt -> SDoc pprBBlock (BasicBlock ident stmts) = hang (ppr ident <> colon) 4 (vcat (map ppr stmts)) -- -------------------------------------------------------------------------- -- Pretty-printing Cmm -- -------------------------------------------------------------------------- -- -- This is where we walk over Cmm emitting an external representation, -- suitable for parsing, in a syntax strongly reminiscent of C--. This -- is the "External Core" for the Cmm layer. -- -- As such, this should be a well-defined syntax: we want it to look nice. -- Thus, we try wherever possible to use syntax defined in [1], -- "The C-- Reference Manual", http://www.cs.tufts.edu/~nr/c--/index.html. We -- differ slightly, in some cases. For one, we use I8 .. I64 for types, rather -- than C--'s bits8 .. bits64. -- -- We try to ensure that all information available in the abstract -- syntax is reproduced, or reproducible, in the concrete syntax. -- Data that is not in printed out can be reconstructed according to -- conventions used in the pretty printer. There are at least two such -- cases: -- 1) if a value has wordRep type, the type is not appended in the -- output. -- 2) MachOps that operate over wordRep type are printed in a -- C-style, rather than as their internal MachRep name. -- -- These conventions produce much more readable Cmm output. pprCmmGroup :: (OutputableP Platform d, OutputableP Platform info, OutputableP Platform g) => Platform -> GenCmmGroup d info g -> SDoc pprCmmGroup platform tops = vcat $ intersperse blankLine $ map (pprTop platform) tops -- -------------------------------------------------------------------------- -- Top level `procedure' blocks. -- pprTop :: (OutputableP Platform d, OutputableP Platform info, OutputableP Platform i) => Platform -> GenCmmDecl d info i -> SDoc pprTop platform (CmmProc info lbl live graph) = vcat [ pdoc platform lbl <> lparen <> rparen <+> lbrace <+> text "// " <+> ppr live , nest 8 $ lbrace <+> pdoc platform info $$ rbrace , nest 4 $ pdoc platform graph , rbrace ] -- -------------------------------------------------------------------------- -- We follow [1], 4.5 -- -- section "data" { ... } -- pprTop platform (CmmData section ds) = (hang (pprSection platform section <+> lbrace) 4 (pdoc platform ds)) $$ rbrace -- -------------------------------------------------------------------------- -- Pretty-printing info tables -- -------------------------------------------------------------------------- pprInfoTable :: Platform -> CmmInfoTable -> SDoc pprInfoTable platform (CmmInfoTable { cit_lbl = lbl, cit_rep = rep , cit_prof = prof_info , cit_srt = srt }) = vcat [ text "label: " <> pdoc platform lbl , text "rep: " <> ppr rep , case prof_info of NoProfilingInfo -> empty ProfilingInfo ct cd -> vcat [ text "type: " <> text (show (BS.unpack ct)) , text "desc: " <> text (show (BS.unpack cd)) ] , text "srt: " <> pdoc platform srt ] -- -------------------------------------------------------------------------- -- Static data. -- Strings are printed as C strings, and we print them as I8[], -- following C-- -- pprStatics :: Platform -> GenCmmStatics a -> SDoc pprStatics platform (CmmStatics lbl itbl ccs payload extras) = pdoc platform lbl <> colon <+> pdoc platform itbl <+> ppr ccs <+> pdoc platform payload <+> ppr extras pprStatics platform (CmmStaticsRaw lbl ds) = vcat ((pdoc platform lbl <> colon) : map (pprStatic platform) ds) pprStatic :: Platform -> CmmStatic -> SDoc pprStatic platform s = case s of CmmStaticLit lit -> nest 4 $ text "const" <+> pdoc platform lit <> semi CmmUninitialised i -> nest 4 $ text "I8" <> brackets (int i) CmmString s' -> nest 4 $ text "I8[]" <+> text (show s') CmmFileEmbed path _ -> nest 4 $ text "incbin " <+> text (show path) -- -------------------------------------------------------------------------- -- data sections -- pprSection :: Platform -> Section -> SDoc pprSection platform (Section t suffix) = section <+> doubleQuotes (pprSectionType t <+> char '.' <+> pdoc platform suffix) where section = text "section" pprSectionType :: SectionType -> SDoc pprSectionType s = doubleQuotes $ case s of Text -> text "text" Data -> text "data" ReadOnlyData -> text "readonly" RelocatableReadOnlyData -> text "relreadonly" UninitialisedData -> text "uninitialised" InitArray -> text "initarray" FiniArray -> text "finiarray" CString -> text "cstring" OtherSection s' -> text s' ghc-lib-parser-9.12.2.20250421/compiler/GHC/Cmm/0000755000000000000000000000000007346545000016422 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Cmm/BlockId.hs0000644000000000000000000000327407346545000020273 0ustar0000000000000000{-# LANGUAGE TypeSynonymInstances #-} {-# OPTIONS_GHC -fno-warn-orphans #-} {- BlockId module should probably go away completely, being superseded by Label -} module GHC.Cmm.BlockId ( BlockId, mkBlockId -- ToDo: BlockId should be abstract, but it isn't yet , newBlockId , blockLbl, infoTblLbl ) where import GHC.Prelude import GHC.Cmm.CLabel import GHC.Data.FastString import GHC.Types.Id.Info import GHC.Types.Name import GHC.Types.Unique import qualified GHC.Types.Unique.DSM as DSM import GHC.Cmm.Dataflow.Label (Label, mkHooplLabel) ---------------------------------------------------------------- --- Block Ids, their environments, and their sets {- Note [Unique BlockId] ~~~~~~~~~~~~~~~~~~~~~~~~ Although a 'BlockId' is a local label, for reasons of implementation, 'BlockId's must be unique within an entire compilation unit. The reason is that each local label is mapped to an assembly-language label, and in most assembly languages allow, a label is visible throughout the entire compilation unit in which it appears. -} type BlockId = Label mkBlockId :: Unique -> BlockId mkBlockId unique = mkHooplLabel $ getKey unique -- If the monad unique instance uses a deterministic unique supply, this will -- give you a deterministic unique. Otherwise, it will not. Note that from Cmm -- onwards (after deterministic renaming in 'codeGen'), there should only exist -- deterministic block labels. newBlockId :: DSM.MonadGetUnique m => m BlockId newBlockId = mkBlockId <$> DSM.getUniqueM blockLbl :: BlockId -> CLabel blockLbl label = mkLocalBlockLabel (getUnique label) infoTblLbl :: BlockId -> CLabel infoTblLbl label = mkBlockInfoTableLabel (mkFCallName (getUnique label) (fsLit "block")) NoCafRefs ghc-lib-parser-9.12.2.20250421/compiler/GHC/Cmm/BlockId.hs-boot0000644000000000000000000000026007346545000021224 0ustar0000000000000000module GHC.Cmm.BlockId (BlockId, mkBlockId) where import GHC.Cmm.Dataflow.Label (Label) import GHC.Types.Unique (Unique) type BlockId = Label mkBlockId :: Unique -> BlockId ghc-lib-parser-9.12.2.20250421/compiler/GHC/Cmm/CLabel.hs0000644000000000000000000023564507346545000020117 0ustar0000000000000000{-# LANGUAGE LambdaCase #-} ----------------------------------------------------------------------------- -- -- Object-file symbols (called CLabel for historical reasons). -- -- (c) The University of Glasgow 2004-2006 -- ----------------------------------------------------------------------------- module GHC.Cmm.CLabel ( CLabel, -- abstract type NeedExternDecl (..), ForeignLabelSource(..), DynamicLinkerLabelInfo(..), ConInfoTableLocation(..), getConInfoTableLocation, -- * Constructors mkClosureLabel, mkSRTLabel, mkInfoTableLabel, mkEntryLabel, mkRednCountsLabel, mkTagHitLabel, mkConInfoTableLabel, mkApEntryLabel, mkApInfoTableLabel, mkClosureTableLabel, mkBytesLabel, mkLocalBlockLabel, mkBlockInfoTableLabel, mkBitmapLabel, mkStringLitLabel, mkInitializerStubLabel, mkInitializerArrayLabel, mkFinalizerStubLabel, mkFinalizerArrayLabel, mkAsmTempLabel, mkAsmTempDerivedLabel, mkAsmTempEndLabel, mkAsmTempProcEndLabel, mkAsmTempDieLabel, mkDirty_MUT_VAR_Label, mkMUT_VAR_CLEAN_infoLabel, mkNonmovingWriteBarrierEnabledLabel, mkOrigThunkInfoLabel, mkUpdInfoLabel, mkBHUpdInfoLabel, mkIndStaticInfoLabel, mkMainCapabilityLabel, mkMAP_FROZEN_CLEAN_infoLabel, mkMAP_FROZEN_DIRTY_infoLabel, mkMAP_DIRTY_infoLabel, mkSMAP_FROZEN_CLEAN_infoLabel, mkSMAP_FROZEN_DIRTY_infoLabel, mkSMAP_DIRTY_infoLabel, mkBadAlignmentLabel, mkOutOfBoundsAccessLabel, mkMemcpyRangeOverlapLabel, mkArrWords_infoLabel, mkSRTInfoLabel, mkTopTickyCtrLabel, mkCAFBlackHoleInfoTableLabel, mkRtsPrimOpLabel, mkRtsSlowFastTickyCtrLabel, mkRtsUnpackCStringLabel, mkRtsUnpackCStringUtf8Label, mkSelectorInfoLabel, mkSelectorEntryLabel, mkCmmInfoLabel, mkCmmEntryLabel, mkCmmRetInfoLabel, mkCmmRetLabel, mkCmmCodeLabel, mkCmmDataLabel, mkRtsCmmDataLabel, mkCmmClosureLabel, mkRtsApFastLabel, mkPrimCallLabel, mkForeignLabel, mkCCLabel, mkCCSLabel, mkIPELabel, InfoProvEnt(..), mkDynamicLinkerLabel, mkPicBaseLabel, mkDeadStripPreventer, mkHpcTicksLabel, -- * Predicates hasCAF, needsCDecl, maybeLocalBlockLabel, externallyVisibleCLabel, isMathFun, isCFunctionLabel, isGcPtrLabel, labelDynamic, isLocalCLabel, mayRedirectTo, isInfoTableLabel, isCmmInfoTableLabel, isConInfoTableLabel, isIdLabel, isTickyLabel, hasHaskellName, hasIdLabelInfo, isBytesLabel, isForeignLabel, isSomeRODataLabel, isStaticClosureLabel, -- * Conversions toClosureLbl, toSlowEntryLbl, toEntryLbl, toInfoLbl, toProcDelimiterLbl, -- * Pretty-printing LabelStyle (..), pprDebugCLabel, pprCLabel, pprAsmLabel, ppInternalProcLabel, -- * Others dynamicLinkerLabelInfo, CStubLabel (..), cStubLabel, fromCStubLabel, mapInternalNonDetUniques ) where import GHC.Prelude import GHC.Types.Id.Info import GHC.Types.Basic import {-# SOURCE #-} GHC.Cmm.BlockId (BlockId, mkBlockId) import GHC.Unit.Types import GHC.Types.Name import GHC.Types.Unique import GHC.Builtin.PrimOps import GHC.Types.CostCentre import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Data.FastString import GHC.Platform import GHC.Types.Unique.Set import GHC.Core.Ppr ( {- instances -} ) import GHC.Types.SrcLoc import qualified Data.Semigroup as S -- ----------------------------------------------------------------------------- -- The CLabel type {- | 'CLabel' is an abstract type that supports the following operations: - Pretty printing - In a C file, does it need to be declared before use? (i.e. is it guaranteed to be already in scope in the places we need to refer to it?) - If it needs to be declared, what type (code or data) should it be declared to have? - Is it visible outside this object file or not? - Is it "dynamic" (see details below) - Eq and Ord, so that we can make sets of CLabels (currently only used in outputting C as far as I can tell, to avoid generating more than one declaration for any given label). - Converting an info table label into an entry label. CLabel usage is a bit messy in GHC as they are used in a number of different contexts: - By the C-- AST to identify labels - By the unregisterised C code generator (\"PprC\") for naming functions (hence the name 'CLabel') - By the native and LLVM code generators to identify labels For extra fun, each of these uses a slightly different subset of constructors (e.g. 'AsmTempLabel' and 'AsmTempDerivedLabel' are used only in the NCG and LLVM backends). In general, we use 'IdLabel' to represent Haskell things early in the pipeline. However, later optimization passes will often represent blocks they create with 'LocalBlockLabel' where there is no obvious 'Name' to hang off the label. -} data CLabel = -- | A label related to the definition of a particular Id or Con in a .hs file. IdLabel Name CafInfo IdLabelInfo -- ^ encodes the suffix of the label -- | A label from a .cmm file that is not associated with a .hs level Id. | CmmLabel UnitId -- ^ what package the label belongs to. NeedExternDecl -- ^ does the label need an "extern .." declaration FastString -- ^ identifier giving the prefix of the label CmmLabelInfo -- ^ encodes the suffix of the label -- | A label with a baked-in \/ algorithmically generated name that definitely -- comes from the RTS. The code for it must compile into libHSrts.a \/ libHSrts.so -- If it doesn't have an algorithmically generated name then use a CmmLabel -- instead and give it an appropriate UnitId argument. | RtsLabel RtsLabelInfo -- | A label associated with a block. These aren't visible outside of the -- compilation unit in which they are defined. These are generally used to -- name blocks produced by Cmm-to-Cmm passes and the native code generator, -- where we don't have a 'Name' to associate the label to and therefore can't -- use 'IdLabel'. | LocalBlockLabel {-# UNPACK #-} !Unique -- | A 'C' (or otherwise foreign) label. -- | ForeignLabel FastString -- ^ name of the imported label. ForeignLabelSource -- ^ what package the foreign label is in. FunctionOrData -- | Local temporary label used for native (or LLVM) code generation; must not -- appear outside of these contexts. Use primarily for debug information | AsmTempLabel {-# UNPACK #-} !Unique -- | A label \"derived\" from another 'CLabel' by the addition of a suffix. -- Must not occur outside of the NCG or LLVM code generators. | AsmTempDerivedLabel CLabel FastString -- ^ suffix | StringLitLabel {-# UNPACK #-} !Unique | CC_Label CostCentre | CCS_Label CostCentreStack | IPE_Label InfoProvEnt -- | A per-module metadata label. | ModuleLabel !Module ModuleLabelKind -- | These labels are generated and used inside the NCG only. -- They are special variants of a label used for dynamic linking -- see module "GHC.CmmToAsm.PIC" for details. | DynamicLinkerLabel DynamicLinkerLabelInfo CLabel -- | This label is generated and used inside the NCG only. -- It is used as a base for PIC calculations on some platforms. -- It takes the form of a local numeric assembler label '1'; and -- is pretty-printed as 1b, referring to the previous definition -- of 1: in the assembler source file. | PicBaseLabel -- | A label before an info table to prevent excessive dead-stripping on darwin | DeadStripPreventer CLabel -- | Per-module table of tick locations | HpcTicksLabel Module -- | Static reference table | SRTLabel {-# UNPACK #-} !Unique -- | A bitmap (function or case return) | LargeBitmapLabel {-# UNPACK #-} !Unique deriving Eq instance Show CLabel where show = showPprUnsafe . pprDebugCLabel genericPlatform data ModuleLabelKind = MLK_Initializer LexicalFastString | MLK_InitializerArray | MLK_Finalizer LexicalFastString | MLK_FinalizerArray | MLK_IPEBuffer deriving (Eq, Ord) pprModuleLabelKind :: IsLine doc => ModuleLabelKind -> doc pprModuleLabelKind MLK_InitializerArray = text "init_arr" pprModuleLabelKind (MLK_Initializer (LexicalFastString s)) = text "init__" <> ftext s pprModuleLabelKind MLK_FinalizerArray = text "fini_arr" pprModuleLabelKind (MLK_Finalizer (LexicalFastString s)) = text "fini__" <> ftext s pprModuleLabelKind MLK_IPEBuffer = text "ipe_buf" {-# SPECIALIZE pprModuleLabelKind :: ModuleLabelKind -> SDoc #-} {-# SPECIALIZE pprModuleLabelKind :: ModuleLabelKind -> HLine #-} -- see Note [SPECIALIZE to HDoc] in GHC.Utils.Outputable isIdLabel :: CLabel -> Bool isIdLabel IdLabel{} = True isIdLabel _ = False -- Used in SRT analysis. See Note [Ticky labels in SRT analysis] in -- GHC.Cmm.Info.Build. isTickyLabel :: CLabel -> Bool isTickyLabel (IdLabel _ _ IdTickyInfo{}) = True isTickyLabel _ = False -- | Indicate if "GHC.CmmToC" has to generate an extern declaration for the -- label (e.g. "extern StgWordArray(foo)"). The type is fixed to StgWordArray. -- -- Symbols from the RTS don't need "extern" declarations because they are -- exposed via "rts/include/Stg.h" with the appropriate type. See 'needsCDecl'. -- -- The fixed StgWordArray type led to "conflicting types" issues with user -- provided Cmm files (not in the RTS) that declare data of another type (#15467 -- and test for #17920). Hence the Cmm parser considers that labels in data -- sections don't need the "extern" declaration (just add one explicitly if you -- need it). -- -- See https://gitlab.haskell.org/ghc/ghc/-/wikis/commentary/compiler/backends/ppr-c#prototypes -- for why extern declaration are needed at all. newtype NeedExternDecl = NeedExternDecl Bool deriving (Ord,Eq) -- This is laborious, but necessary. We can't derive Ord because -- Unique has a special Ord instance that cares for object determinism. -- Note nonDetCmpUnique and stableNameCmp in the implementation: -- * If -fobject-determinism, the internal uniques will be renamed, thus the -- comparison will actually be deterministic -- * Stable name compare guarantees deterministic ordering of Names despite -- the non-deterministic uniques underlying external names (which aren't -- renamed on -fobject-determinism). -- See Note [Unique Determinism and code generation] and Note [Object determinism] instance Ord CLabel where compare (IdLabel a1 b1 c1) (IdLabel a2 b2 c2) | isExternalName a1, isExternalName a2 = stableNameCmp a1 a2 S.<> compare b1 b2 S.<> compare c1 c2 | isExternalName a1 = GT | isExternalName a2 = LT compare (IdLabel a1 b1 c1) (IdLabel a2 b2 c2) = -- Comparing names here should deterministic because all unique should have -- been renamed deterministically, and external names compared above. compare a1 a2 S.<> compare b1 b2 S.<> compare c1 c2 compare (CmmLabel a1 b1 c1 d1) (CmmLabel a2 b2 c2 d2) = compare a1 a2 S.<> compare b1 b2 S.<> -- This is not non-deterministic because the uniques have been deterministically renamed. -- See Note [Object determinism] uniqCompareFS c1 c2 S.<> compare d1 d2 compare (RtsLabel a1) (RtsLabel a2) = compare a1 a2 compare (LocalBlockLabel u1) (LocalBlockLabel u2) = nonDetCmpUnique u1 u2 compare (ForeignLabel a1 b1 c1) (ForeignLabel a2 b2 c2) = uniqCompareFS a1 a2 S.<> compare b1 b2 S.<> compare c1 c2 compare (AsmTempLabel u1) (AsmTempLabel u2) = nonDetCmpUnique u1 u2 compare (AsmTempDerivedLabel a1 b1) (AsmTempDerivedLabel a2 b2) = compare a1 a2 S.<> lexicalCompareFS b1 b2 compare (StringLitLabel u1) (StringLitLabel u2) = nonDetCmpUnique u1 u2 compare (CC_Label a1) (CC_Label a2) = compare a1 a2 compare (CCS_Label a1) (CCS_Label a2) = compare a1 a2 compare (IPE_Label a1) (IPE_Label a2) = compare a1 a2 compare (ModuleLabel m1 k1) (ModuleLabel m2 k2) = compare m1 m2 S.<> compare k1 k2 compare (DynamicLinkerLabel a1 b1) (DynamicLinkerLabel a2 b2) = compare a1 a2 S.<> compare b1 b2 compare PicBaseLabel PicBaseLabel = EQ compare (DeadStripPreventer a1) (DeadStripPreventer a2) = compare a1 a2 compare (HpcTicksLabel a1) (HpcTicksLabel a2) = compare a1 a2 compare (SRTLabel u1) (SRTLabel u2) = nonDetCmpUnique u1 u2 compare (LargeBitmapLabel u1) (LargeBitmapLabel u2) = nonDetCmpUnique u1 u2 compare IdLabel{} _ = LT compare _ IdLabel{} = GT compare CmmLabel{} _ = LT compare _ CmmLabel{} = GT compare RtsLabel{} _ = LT compare _ RtsLabel{} = GT compare LocalBlockLabel{} _ = LT compare _ LocalBlockLabel{} = GT compare ForeignLabel{} _ = LT compare _ ForeignLabel{} = GT compare AsmTempLabel{} _ = LT compare _ AsmTempLabel{} = GT compare AsmTempDerivedLabel{} _ = LT compare _ AsmTempDerivedLabel{} = GT compare StringLitLabel{} _ = LT compare _ StringLitLabel{} = GT compare CC_Label{} _ = LT compare _ CC_Label{} = GT compare CCS_Label{} _ = LT compare _ CCS_Label{} = GT compare DynamicLinkerLabel{} _ = LT compare _ DynamicLinkerLabel{} = GT compare PicBaseLabel{} _ = LT compare _ PicBaseLabel{} = GT compare DeadStripPreventer{} _ = LT compare _ DeadStripPreventer{} = GT compare HpcTicksLabel{} _ = LT compare _ HpcTicksLabel{} = GT compare SRTLabel{} _ = LT compare _ SRTLabel{} = GT compare (IPE_Label {}) _ = LT compare _ (IPE_Label{}) = GT compare (ModuleLabel {}) _ = LT compare _ (ModuleLabel{}) = GT -- | Record where a foreign label is stored. data ForeignLabelSource -- | Label is in a named package = ForeignLabelInPackage UnitId -- | Label is in some external, system package that doesn't also -- contain compiled Haskell code, and is not associated with any .hi files. -- We don't have to worry about Haskell code being inlined from -- external packages. It is safe to treat the RTS package as "external". | ForeignLabelInExternalPackage -- | Label is in the package currently being compiled. -- This is only used for creating hacky tmp labels during code generation. -- Don't use it in any code that might be inlined across a package boundary -- (ie, core code) else the information will be wrong relative to the -- destination module. | ForeignLabelInThisPackage deriving (Eq, Ord) -- | For debugging problems with the CLabel representation. -- We can't make a Show instance for CLabel because lots of its components don't have instances. -- The regular Outputable instance only shows the label name, and not its other info. -- pprDebugCLabel :: Platform -> CLabel -> SDoc pprDebugCLabel platform lbl = pprAsmLabel platform lbl <> parens extra where extra = case lbl of IdLabel _ _ info -> text "IdLabel" <> whenPprDebug (text ":" <> ppr info) CmmLabel pkg _ext _name _info -> text "CmmLabel" <+> ppr pkg RtsLabel{} -> text "RtsLabel" ForeignLabel _name src funOrData -> text "ForeignLabel" <+> ppr src <+> ppr funOrData _ -> text "other CLabel" -- Dynamic ticky info for the id. data TickyIdInfo = TickyRednCounts -- ^ Used for dynamic allocations | TickyInferedTag !Unique -- ^ Used to track dynamic hits of tag inference. deriving (Eq,Show) instance Outputable TickyIdInfo where ppr TickyRednCounts = text "ct_rdn" ppr (TickyInferedTag unique) = text "ct_tag[" <> ppr unique <> char ']' -- | Don't depend on this if you need determinism. -- No determinism in the ncg backend, so we use the unique for Ord. -- Even if it pains me slightly. instance Ord TickyIdInfo where compare TickyRednCounts TickyRednCounts = EQ compare TickyRednCounts _ = LT compare _ TickyRednCounts = GT compare (TickyInferedTag unique1) (TickyInferedTag unique2) = nonDetCmpUnique unique1 unique2 data IdLabelInfo = Closure -- ^ Label for closure | InfoTable -- ^ Info tables for closures; always read-only | Entry -- ^ Entry point | Slow -- ^ Slow entry point | LocalInfoTable -- ^ Like InfoTable but not externally visible | LocalEntry -- ^ Like Entry but not externally visible | IdTickyInfo !TickyIdInfo -- ^ Label of place to keep Ticky-ticky hit info for this Id | ConEntry ConInfoTableLocation -- ^ Constructor entry point, when `-fdistinct-info-tables` is enabled then -- each usage of a constructor will be given a unique number and a fresh info -- table will be created in the module where the constructor is used. The -- argument is used to keep track of which info table a usage of a constructor -- should use. When the argument is 'Nothing' then it uses the info table which -- is defined in the module where the datatype is declared, this is the usual case. -- When it is (Just (m, k)) it will use the kth info table defined in module m. The -- point of this inefficiency is so that you can work out where allocations of data -- constructors are coming from when you are debugging. | ConInfoTable ConInfoTableLocation -- ^ Corresponding info table | ClosureTable -- ^ Table of closures for Enum tycons | Bytes -- ^ Content of a string literal. See -- Note [Bytes label]. | BlockInfoTable -- ^ Like LocalInfoTable but for a proc-point block -- instead of a closure entry-point. -- See Note [Proc-point local block entry-points]. deriving (Eq, Ord) -- | Which module is the info table from, and which number was it. data ConInfoTableLocation = UsageSite Module Int | DefinitionSite deriving (Eq, Ord) instance Outputable ConInfoTableLocation where ppr (UsageSite m n) = text "Loc(" <> ppr n <> text "):" <+> ppr m ppr DefinitionSite = empty getConInfoTableLocation :: IdLabelInfo -> Maybe ConInfoTableLocation getConInfoTableLocation (ConInfoTable ci) = Just ci getConInfoTableLocation _ = Nothing instance Outputable IdLabelInfo where ppr Closure = text "Closure" ppr InfoTable = text "InfoTable" ppr Entry = text "Entry" ppr Slow = text "Slow" ppr LocalInfoTable = text "LocalInfoTable" ppr LocalEntry = text "LocalEntry" ppr (ConEntry mn) = text "ConEntry" <+> ppr mn ppr (ConInfoTable mn) = text "ConInfoTable" <+> ppr mn ppr ClosureTable = text "ClosureTable" ppr Bytes = text "Bytes" ppr BlockInfoTable = text "BlockInfoTable" ppr (IdTickyInfo info) = text "IdTickyInfo" <+> ppr info data RtsLabelInfo = RtsSelectorInfoTable Bool{-updatable-} Int{-offset-} -- ^ Selector thunks | RtsSelectorEntry Bool{-updatable-} Int{-offset-} | RtsApInfoTable Bool{-updatable-} Int{-arity-} -- ^ AP thunks | RtsApEntry Bool{-updatable-} Int{-arity-} | RtsUnpackCStringInfoTable | RtsUnpackCStringUtf8InfoTable | RtsPrimOp PrimOp | RtsApFast NonDetFastString -- ^ _fast versions of generic apply | RtsSlowFastTickyCtr String deriving (Eq,Ord) -- | What type of Cmm label we're dealing with. -- Determines the suffix appended to the name when a CLabel.CmmLabel -- is pretty printed. data CmmLabelInfo = CmmInfo -- ^ misc rts info tables, suffix _info | CmmEntry -- ^ misc rts entry points, suffix _entry | CmmRetInfo -- ^ misc rts ret info tables, suffix _info | CmmRet -- ^ misc rts return points, suffix _ret | CmmData -- ^ misc rts data bits, eg CHARLIKE_closure | CmmCode -- ^ misc rts code | CmmClosure -- ^ closures eg CHARLIKE_closure | CmmPrimCall -- ^ a prim call to some hand written Cmm code deriving (Eq, Ord) data DynamicLinkerLabelInfo = CodeStub -- MachO: Lfoo$stub, ELF: foo@plt | SymbolPtr -- MachO: Lfoo$non_lazy_ptr, Windows: __imp_foo | GotSymbolPtr -- ELF: foo@got | GotSymbolOffset -- ELF: foo@gotoff deriving (Eq, Ord) -- ----------------------------------------------------------------------------- -- Constructing CLabels -- ----------------------------------------------------------------------------- -- Constructing IdLabels -- These are always local: mkSRTLabel :: Unique -> CLabel mkSRTLabel u = SRTLabel u -- See Note [ticky for LNE] mkRednCountsLabel :: Name -> CLabel mkRednCountsLabel name = IdLabel name NoCafRefs (IdTickyInfo TickyRednCounts) mkTagHitLabel :: Name -> Unique -> CLabel mkTagHitLabel name !uniq = IdLabel name NoCafRefs (IdTickyInfo (TickyInferedTag uniq)) mkClosureLabel :: Name -> CafInfo -> CLabel mkInfoTableLabel :: Name -> CafInfo -> CLabel mkEntryLabel :: Name -> CafInfo -> CLabel mkClosureTableLabel :: Name -> CafInfo -> CLabel mkConInfoTableLabel :: Name -> ConInfoTableLocation -> CLabel mkBytesLabel :: Name -> CLabel mkClosureLabel name c = IdLabel name c Closure -- | Decides between external and local labels based on the names externality. mkInfoTableLabel name c | isExternalName name = IdLabel name c InfoTable | otherwise = IdLabel name c LocalInfoTable mkEntryLabel name c = IdLabel name c Entry mkClosureTableLabel name c = IdLabel name c ClosureTable -- Special case for the normal 'DefinitionSite' case so that the 'ConInfoTable' application can be floated to a CAF. mkConInfoTableLabel name DefinitionSite = IdLabel name NoCafRefs (ConInfoTable DefinitionSite) mkConInfoTableLabel name k = IdLabel name NoCafRefs (ConInfoTable k) mkBytesLabel name = IdLabel name NoCafRefs Bytes mkBlockInfoTableLabel :: Name -> CafInfo -> CLabel mkBlockInfoTableLabel name c = IdLabel name c BlockInfoTable -- See Note [Proc-point local block entry-points]. -- Constructing Cmm Labels mkDirty_MUT_VAR_Label, mkNonmovingWriteBarrierEnabledLabel, mkOrigThunkInfoLabel, mkUpdInfoLabel, mkBHUpdInfoLabel, mkIndStaticInfoLabel, mkMainCapabilityLabel, mkMAP_FROZEN_CLEAN_infoLabel, mkMAP_FROZEN_DIRTY_infoLabel, mkMAP_DIRTY_infoLabel, mkArrWords_infoLabel, mkTopTickyCtrLabel, mkCAFBlackHoleInfoTableLabel, mkSMAP_FROZEN_CLEAN_infoLabel, mkSMAP_FROZEN_DIRTY_infoLabel, mkSMAP_DIRTY_infoLabel, mkBadAlignmentLabel, mkOutOfBoundsAccessLabel, mkMemcpyRangeOverlapLabel, mkMUT_VAR_CLEAN_infoLabel :: CLabel mkDirty_MUT_VAR_Label = mkForeignLabel (fsLit "dirty_MUT_VAR") ForeignLabelInExternalPackage IsFunction mkNonmovingWriteBarrierEnabledLabel = CmmLabel rtsUnitId (NeedExternDecl False) (fsLit "nonmoving_write_barrier_enabled") CmmData mkOrigThunkInfoLabel = CmmLabel rtsUnitId (NeedExternDecl False) (fsLit "stg_orig_thunk_info_frame") CmmInfo mkUpdInfoLabel = CmmLabel rtsUnitId (NeedExternDecl False) (fsLit "stg_upd_frame") CmmInfo mkBHUpdInfoLabel = CmmLabel rtsUnitId (NeedExternDecl False) (fsLit "stg_bh_upd_frame" ) CmmInfo mkIndStaticInfoLabel = CmmLabel rtsUnitId (NeedExternDecl False) (fsLit "stg_IND_STATIC") CmmInfo mkMainCapabilityLabel = CmmLabel rtsUnitId (NeedExternDecl False) (fsLit "MainCapability") CmmData mkMAP_FROZEN_CLEAN_infoLabel = CmmLabel rtsUnitId (NeedExternDecl False) (fsLit "stg_MUT_ARR_PTRS_FROZEN_CLEAN") CmmInfo mkMAP_FROZEN_DIRTY_infoLabel = CmmLabel rtsUnitId (NeedExternDecl False) (fsLit "stg_MUT_ARR_PTRS_FROZEN_DIRTY") CmmInfo mkMAP_DIRTY_infoLabel = CmmLabel rtsUnitId (NeedExternDecl False) (fsLit "stg_MUT_ARR_PTRS_DIRTY") CmmInfo mkTopTickyCtrLabel = CmmLabel rtsUnitId (NeedExternDecl False) (fsLit "top_ct") CmmData mkCAFBlackHoleInfoTableLabel = CmmLabel rtsUnitId (NeedExternDecl False) (fsLit "stg_CAF_BLACKHOLE") CmmInfo mkArrWords_infoLabel = CmmLabel rtsUnitId (NeedExternDecl False) (fsLit "stg_ARR_WORDS") CmmInfo mkSMAP_FROZEN_CLEAN_infoLabel = CmmLabel rtsUnitId (NeedExternDecl False) (fsLit "stg_SMALL_MUT_ARR_PTRS_FROZEN_CLEAN") CmmInfo mkSMAP_FROZEN_DIRTY_infoLabel = CmmLabel rtsUnitId (NeedExternDecl False) (fsLit "stg_SMALL_MUT_ARR_PTRS_FROZEN_DIRTY") CmmInfo mkSMAP_DIRTY_infoLabel = CmmLabel rtsUnitId (NeedExternDecl False) (fsLit "stg_SMALL_MUT_ARR_PTRS_DIRTY") CmmInfo mkBadAlignmentLabel = CmmLabel rtsUnitId (NeedExternDecl False) (fsLit "stg_badAlignment") CmmEntry mkOutOfBoundsAccessLabel = mkForeignLabel (fsLit "rtsOutOfBoundsAccess") ForeignLabelInExternalPackage IsFunction mkMemcpyRangeOverlapLabel = mkForeignLabel (fsLit "rtsMemcpyRangeOverlap") ForeignLabelInExternalPackage IsFunction mkMUT_VAR_CLEAN_infoLabel = CmmLabel rtsUnitId (NeedExternDecl False) (fsLit "stg_MUT_VAR_CLEAN") CmmInfo mkSRTInfoLabel :: Int -> CLabel mkSRTInfoLabel n = CmmLabel rtsUnitId (NeedExternDecl False) lbl CmmInfo where lbl = case n of 1 -> fsLit "stg_SRT_1" 2 -> fsLit "stg_SRT_2" 3 -> fsLit "stg_SRT_3" 4 -> fsLit "stg_SRT_4" 5 -> fsLit "stg_SRT_5" 6 -> fsLit "stg_SRT_6" 7 -> fsLit "stg_SRT_7" 8 -> fsLit "stg_SRT_8" 9 -> fsLit "stg_SRT_9" 10 -> fsLit "stg_SRT_10" 11 -> fsLit "stg_SRT_11" 12 -> fsLit "stg_SRT_12" 13 -> fsLit "stg_SRT_13" 14 -> fsLit "stg_SRT_14" 15 -> fsLit "stg_SRT_15" 16 -> fsLit "stg_SRT_16" _ -> panic "mkSRTInfoLabel" ----- mkCmmInfoLabel, mkCmmEntryLabel, mkCmmRetInfoLabel, mkCmmRetLabel, mkCmmCodeLabel, mkCmmClosureLabel :: UnitId -> FastString -> CLabel mkCmmDataLabel :: UnitId -> NeedExternDecl -> FastString -> CLabel mkRtsCmmDataLabel :: FastString -> CLabel mkCmmInfoLabel pkg str = CmmLabel pkg (NeedExternDecl True) str CmmInfo mkCmmEntryLabel pkg str = CmmLabel pkg (NeedExternDecl True) str CmmEntry mkCmmRetInfoLabel pkg str = CmmLabel pkg (NeedExternDecl True) str CmmRetInfo mkCmmRetLabel pkg str = CmmLabel pkg (NeedExternDecl True) str CmmRet mkCmmCodeLabel pkg str = CmmLabel pkg (NeedExternDecl True) str CmmCode mkCmmClosureLabel pkg str = CmmLabel pkg (NeedExternDecl True) str CmmClosure mkCmmDataLabel pkg ext str = CmmLabel pkg ext str CmmData mkRtsCmmDataLabel str = CmmLabel rtsUnitId (NeedExternDecl False) str CmmData -- RTS symbols don't need "GHC.CmmToC" to -- generate \"extern\" declaration (they are -- exposed via rts/include/Stg.h) mkLocalBlockLabel :: Unique -> CLabel mkLocalBlockLabel u = LocalBlockLabel u -- Constructing RtsLabels mkRtsPrimOpLabel :: PrimOp -> CLabel mkRtsPrimOpLabel primop = RtsLabel (RtsPrimOp primop) mkSelectorInfoLabel :: Platform -> Bool -> Int -> CLabel mkSelectorInfoLabel platform upd offset = assert (offset >= 0 && offset <= pc_MAX_SPEC_SELECTEE_SIZE (platformConstants platform)) $ RtsLabel (RtsSelectorInfoTable upd offset) mkSelectorEntryLabel :: Platform -> Bool -> Int -> CLabel mkSelectorEntryLabel platform upd offset = assert (offset >= 0 && offset <= pc_MAX_SPEC_SELECTEE_SIZE (platformConstants platform)) $ RtsLabel (RtsSelectorEntry upd offset) mkApInfoTableLabel :: Platform -> Bool -> Int -> CLabel mkApInfoTableLabel platform upd arity = assert (arity > 0 && arity <= pc_MAX_SPEC_AP_SIZE (platformConstants platform)) $ RtsLabel (RtsApInfoTable upd arity) mkApEntryLabel :: Platform -> Bool -> Int -> CLabel mkApEntryLabel platform upd arity = assert (arity > 0 && arity <= pc_MAX_SPEC_AP_SIZE (platformConstants platform)) $ RtsLabel (RtsApEntry upd arity) -- A call to some primitive hand written Cmm code mkPrimCallLabel :: PrimCall -> CLabel mkPrimCallLabel (PrimCall str pkg) = CmmLabel (toUnitId pkg) (NeedExternDecl True) str CmmPrimCall -- Constructing ForeignLabels -- | Make a foreign label mkForeignLabel :: FastString -- name -> ForeignLabelSource -- what package it's in -> FunctionOrData -> CLabel mkForeignLabel = ForeignLabel -- | Whether label is a top-level string literal isBytesLabel :: CLabel -> Bool isBytesLabel (IdLabel _ _ Bytes) = True isBytesLabel _lbl = False -- | Whether label is a non-haskell label (defined in C code) isForeignLabel :: CLabel -> Bool isForeignLabel (ForeignLabel _ _ _) = True isForeignLabel _lbl = False -- | Whether label is a static closure label (can come from haskell or cmm) isStaticClosureLabel :: CLabel -> Bool -- Closure defined in haskell (.hs) isStaticClosureLabel (IdLabel _ _ Closure) = True -- Closure defined in cmm isStaticClosureLabel (CmmLabel _ _ _ CmmClosure) = True isStaticClosureLabel _lbl = False -- | Whether label is a .rodata label isSomeRODataLabel :: CLabel -> Bool -- info table defined in haskell (.hs) isSomeRODataLabel (IdLabel _ _ ClosureTable) = True isSomeRODataLabel (IdLabel _ _ ConInfoTable {}) = True isSomeRODataLabel (IdLabel _ _ InfoTable) = True isSomeRODataLabel (IdLabel _ _ LocalInfoTable) = True isSomeRODataLabel (IdLabel _ _ BlockInfoTable) = True -- info table defined in cmm (.cmm) isSomeRODataLabel (CmmLabel _ _ _ CmmInfo) = True isSomeRODataLabel (CmmLabel _ _ _ CmmRetInfo) = True isSomeRODataLabel _lbl = False -- | Whether label is points to some kind of info table isInfoTableLabel :: CLabel -> Bool isInfoTableLabel (IdLabel _ _ InfoTable) = True isInfoTableLabel (IdLabel _ _ LocalInfoTable) = True isInfoTableLabel (IdLabel _ _ ConInfoTable {}) = True isInfoTableLabel (IdLabel _ _ BlockInfoTable) = True isInfoTableLabel (CmmLabel _ _ _ CmmInfo) = True isInfoTableLabel _ = False -- | Whether label points to an info table defined in Cmm isCmmInfoTableLabel :: CLabel -> Bool isCmmInfoTableLabel (CmmLabel _ _ _ CmmInfo) = True isCmmInfoTableLabel _ = False -- | Whether label is points to constructor info table isConInfoTableLabel :: CLabel -> Bool isConInfoTableLabel (IdLabel _ _ ConInfoTable {}) = True isConInfoTableLabel _ = False -- Constructing Large*Labels mkBitmapLabel :: Unique -> CLabel mkBitmapLabel uniq = LargeBitmapLabel uniq -- | Info Table Provenance Entry -- See Note [Mapping Info Tables to Source Positions] data InfoProvEnt = InfoProvEnt { infoTablePtr :: !CLabel -- Address of the info table , infoProvEntClosureType :: !Int -- The closure type of the info table (from ClosureMacros.h) , infoTableType :: !String -- The rendered Haskell type of the closure the table represents , infoProvModule :: !Module -- Origin module , infoTableProv :: !(Maybe (RealSrcSpan, LexicalFastString)) } -- Position and information about the info table deriving (Eq, Ord) instance OutputableP Platform InfoProvEnt where pdoc platform (InfoProvEnt clabel _ _ _ _) = pdoc platform clabel -- Constructing Cost Center Labels mkCCLabel :: CostCentre -> CLabel mkCCSLabel :: CostCentreStack -> CLabel mkIPELabel :: Module -> CLabel mkCCLabel cc = CC_Label cc mkCCSLabel ccs = CCS_Label ccs mkIPELabel mod = ModuleLabel mod MLK_IPEBuffer mkRtsApFastLabel :: FastString -> CLabel mkRtsApFastLabel str = RtsLabel (RtsApFast (NonDetFastString str)) mkRtsSlowFastTickyCtrLabel :: String -> CLabel mkRtsSlowFastTickyCtrLabel pat = RtsLabel (RtsSlowFastTickyCtr pat) -- | A standard string unpacking thunk. See Note [unpack_cstring closures] in -- StgStdThunks.cmm. mkRtsUnpackCStringLabel, mkRtsUnpackCStringUtf8Label :: CLabel mkRtsUnpackCStringLabel = RtsLabel RtsUnpackCStringInfoTable mkRtsUnpackCStringUtf8Label = RtsLabel RtsUnpackCStringUtf8InfoTable -- Constructing Code Coverage Labels mkHpcTicksLabel :: Module -> CLabel mkHpcTicksLabel = HpcTicksLabel -- Constructing labels used for dynamic linking mkDynamicLinkerLabel :: DynamicLinkerLabelInfo -> CLabel -> CLabel mkDynamicLinkerLabel = DynamicLinkerLabel dynamicLinkerLabelInfo :: CLabel -> Maybe (DynamicLinkerLabelInfo, CLabel) dynamicLinkerLabelInfo (DynamicLinkerLabel info lbl) = Just (info, lbl) dynamicLinkerLabelInfo _ = Nothing mkPicBaseLabel :: CLabel mkPicBaseLabel = PicBaseLabel -- Constructing miscellaneous other labels mkDeadStripPreventer :: CLabel -> CLabel mkDeadStripPreventer lbl = DeadStripPreventer lbl mkStringLitLabel :: Unique -> CLabel mkStringLitLabel = StringLitLabel mkInitializerStubLabel :: Module -> FastString -> CLabel mkInitializerStubLabel mod s = ModuleLabel mod (MLK_Initializer (LexicalFastString s)) mkInitializerArrayLabel :: Module -> CLabel mkInitializerArrayLabel mod = ModuleLabel mod MLK_InitializerArray mkFinalizerStubLabel :: Module -> FastString -> CLabel mkFinalizerStubLabel mod s = ModuleLabel mod (MLK_Finalizer (LexicalFastString s)) mkFinalizerArrayLabel :: Module -> CLabel mkFinalizerArrayLabel mod = ModuleLabel mod MLK_FinalizerArray mkAsmTempLabel :: Uniquable a => a -> CLabel mkAsmTempLabel a = AsmTempLabel (getUnique a) mkAsmTempDerivedLabel :: CLabel -> FastString -> CLabel mkAsmTempDerivedLabel = AsmTempDerivedLabel mkAsmTempEndLabel :: CLabel -> CLabel mkAsmTempEndLabel l = mkAsmTempDerivedLabel l (fsLit "_end") -- | A label indicating the end of a procedure. mkAsmTempProcEndLabel :: CLabel -> CLabel mkAsmTempProcEndLabel l = mkAsmTempDerivedLabel l (fsLit "_proc_end") -- | Construct a label for a DWARF Debug Information Entity (DIE) -- describing another symbol. mkAsmTempDieLabel :: CLabel -> CLabel mkAsmTempDieLabel l = mkAsmTempDerivedLabel l (fsLit "_die") -- ----------------------------------------------------------------------------- -- Convert between different kinds of label toClosureLbl :: Platform -> CLabel -> CLabel toClosureLbl platform lbl = case lbl of IdLabel n c _ -> IdLabel n c Closure CmmLabel m ext str _ -> CmmLabel m ext str CmmClosure _ -> pprPanic "toClosureLbl" (pprDebugCLabel platform lbl) toSlowEntryLbl :: Platform -> CLabel -> CLabel toSlowEntryLbl platform lbl = case lbl of IdLabel n _ BlockInfoTable -> pprPanic "toSlowEntryLbl" (ppr n) IdLabel n c _ -> IdLabel n c Slow _ -> pprPanic "toSlowEntryLbl" (pprDebugCLabel platform lbl) toEntryLbl :: Platform -> CLabel -> CLabel toEntryLbl platform lbl = case lbl of IdLabel n c LocalInfoTable -> IdLabel n c LocalEntry IdLabel n c (ConInfoTable k) -> IdLabel n c (ConEntry k) IdLabel n _ BlockInfoTable -> mkLocalBlockLabel (nameUnique n) -- See Note [Proc-point local block entry-points]. IdLabel n c _ -> IdLabel n c Entry CmmLabel m ext str CmmInfo -> CmmLabel m ext str CmmEntry CmmLabel m ext str CmmRetInfo -> CmmLabel m ext str CmmRet _ -> pprPanic "toEntryLbl" (pprDebugCLabel platform lbl) -- | Generate a CmmProc delimiter label from the actual entry label. -- -- This delimiter label might be the entry label itself, except when the entry -- label is a LocalBlockLabel. If we reused the entry label to delimit the proc, -- we would generate redundant labels (see #22792) toProcDelimiterLbl :: CLabel -> CLabel toProcDelimiterLbl lbl = case lbl of LocalBlockLabel {} -> mkAsmTempDerivedLabel lbl (fsLit "_entry") _ -> lbl toInfoLbl :: Platform -> CLabel -> CLabel toInfoLbl platform lbl = case lbl of IdLabel n c LocalEntry -> IdLabel n c LocalInfoTable IdLabel n c (ConEntry k) -> IdLabel n c (ConInfoTable k) IdLabel n c _ -> IdLabel n c InfoTable CmmLabel m ext str CmmEntry -> CmmLabel m ext str CmmInfo CmmLabel m ext str CmmRet -> CmmLabel m ext str CmmRetInfo _ -> pprPanic "CLabel.toInfoLbl" (pprDebugCLabel platform lbl) hasHaskellName :: CLabel -> Maybe Name hasHaskellName (IdLabel n _ _) = Just n hasHaskellName _ = Nothing hasIdLabelInfo :: CLabel -> Maybe IdLabelInfo hasIdLabelInfo (IdLabel _ _ l) = Just l hasIdLabelInfo _ = Nothing -- ----------------------------------------------------------------------------- -- Does a CLabel's referent itself refer to a CAF? hasCAF :: CLabel -> Bool hasCAF (IdLabel _ _ (IdTickyInfo TickyRednCounts)) = False -- See Note [ticky for LNE] hasCAF (IdLabel _ MayHaveCafRefs _) = True hasCAF (RtsLabel RtsUnpackCStringInfoTable) = True hasCAF (RtsLabel RtsUnpackCStringUtf8InfoTable) = True -- The info table stg_MK_STRING_info is for thunks hasCAF _ = False -- Note [ticky for LNE] -- ~~~~~~~~~~~~~~~~~~~~~ -- Until 14 Feb 2013, every ticky counter was associated with a -- closure. Thus, ticky labels used IdLabel. It is odd that -- GHC.Cmm.Info.Build.cafTransfers would consider such a ticky label -- reason to add the name to the CAFEnv (and thus eventually the SRT), -- but it was harmless because the ticky was only used if the closure -- was also. -- -- Since we now have ticky counters for LNEs, it is no longer the case -- that every ticky counter has an actual closure. So I changed the -- generation of ticky counters' CLabels to not result in their -- associated id ending up in the SRT. -- -- NB IdLabel is still appropriate for ticky ids (as opposed to -- CmmLabel) because the LNE's counter is still related to an .hs Id, -- that Id just isn't for a proper closure. -- ----------------------------------------------------------------------------- -- Does a CLabel need declaring before use or not? -- -- See wiki:commentary/compiler/backends/ppr-c#prototypes needsCDecl :: CLabel -> Bool -- False <=> it's pre-declared; don't bother -- don't bother declaring Bitmap labels, we always make sure -- they are defined before use. needsCDecl (SRTLabel _) = True needsCDecl (LargeBitmapLabel _) = False needsCDecl (IdLabel _ _ _) = True needsCDecl (LocalBlockLabel _) = True needsCDecl (StringLitLabel _) = False needsCDecl (AsmTempLabel _) = False needsCDecl (AsmTempDerivedLabel _ _) = False needsCDecl (RtsLabel _) = False needsCDecl (CmmLabel pkgId (NeedExternDecl external) _ _) -- local labels mustn't have it | not external = False -- Prototypes for labels defined in the runtime system are imported -- into HC files via rts/include/Stg.h. | pkgId == rtsUnitId = False -- For other labels we inline one into the HC file directly. | otherwise = True needsCDecl l@(ForeignLabel{}) = not (isMathFun l) needsCDecl (CC_Label _) = True needsCDecl (CCS_Label _) = True needsCDecl (IPE_Label {}) = True needsCDecl (ModuleLabel _ kind) = modLabelNeedsCDecl kind needsCDecl (HpcTicksLabel _) = True needsCDecl (DynamicLinkerLabel {}) = panic "needsCDecl DynamicLinkerLabel" needsCDecl PicBaseLabel = panic "needsCDecl PicBaseLabel" needsCDecl (DeadStripPreventer {}) = panic "needsCDecl DeadStripPreventer" modLabelNeedsCDecl :: ModuleLabelKind -> Bool -- Code for finalizers and initializers are emitted in stub objects modLabelNeedsCDecl (MLK_Initializer _) = True modLabelNeedsCDecl (MLK_Finalizer _) = True modLabelNeedsCDecl MLK_IPEBuffer = True -- The finalizer and initializer arrays are emitted in the code of the module modLabelNeedsCDecl MLK_InitializerArray = False modLabelNeedsCDecl MLK_FinalizerArray = False -- | If a label is a local block label then return just its 'BlockId', otherwise -- 'Nothing'. maybeLocalBlockLabel :: CLabel -> Maybe BlockId maybeLocalBlockLabel (LocalBlockLabel uq) = Just $ mkBlockId uq maybeLocalBlockLabel _ = Nothing -- | Check whether a label corresponds to a C function that has -- a prototype in a system header somewhere, or is built-in -- to the C compiler. For these labels we avoid generating our -- own C prototypes. isMathFun :: CLabel -> Bool isMathFun (ForeignLabel fs _ _) = fs `elementOfUniqSet` math_funs isMathFun _ = False math_funs :: UniqSet FastString math_funs = mkUniqSet [ -- _ISOC99_SOURCE (fsLit "acos"), (fsLit "acosf"), (fsLit "acosh"), (fsLit "acoshf"), (fsLit "acoshl"), (fsLit "acosl"), (fsLit "asin"), (fsLit "asinf"), (fsLit "asinl"), (fsLit "asinh"), (fsLit "asinhf"), (fsLit "asinhl"), (fsLit "atan"), (fsLit "atanf"), (fsLit "atanl"), (fsLit "atan2"), (fsLit "atan2f"), (fsLit "atan2l"), (fsLit "atanh"), (fsLit "atanhf"), (fsLit "atanhl"), (fsLit "cbrt"), (fsLit "cbrtf"), (fsLit "cbrtl"), (fsLit "ceil"), (fsLit "ceilf"), (fsLit "ceill"), (fsLit "copysign"), (fsLit "copysignf"), (fsLit "copysignl"), (fsLit "cos"), (fsLit "cosf"), (fsLit "cosl"), (fsLit "cosh"), (fsLit "coshf"), (fsLit "coshl"), (fsLit "erf"), (fsLit "erff"), (fsLit "erfl"), (fsLit "erfc"), (fsLit "erfcf"), (fsLit "erfcl"), (fsLit "exp"), (fsLit "expf"), (fsLit "expl"), (fsLit "exp2"), (fsLit "exp2f"), (fsLit "exp2l"), (fsLit "expm1"), (fsLit "expm1f"), (fsLit "expm1l"), (fsLit "fabs"), (fsLit "fabsf"), (fsLit "fabsl"), (fsLit "fdim"), (fsLit "fdimf"), (fsLit "fdiml"), (fsLit "floor"), (fsLit "floorf"), (fsLit "floorl"), (fsLit "fma"), (fsLit "fmaf"), (fsLit "fmal"), (fsLit "fmax"), (fsLit "fmaxf"), (fsLit "fmaxl"), (fsLit "fmin"), (fsLit "fminf"), (fsLit "fminl"), (fsLit "fmod"), (fsLit "fmodf"), (fsLit "fmodl"), (fsLit "frexp"), (fsLit "frexpf"), (fsLit "frexpl"), (fsLit "hypot"), (fsLit "hypotf"), (fsLit "hypotl"), (fsLit "ilogb"), (fsLit "ilogbf"), (fsLit "ilogbl"), (fsLit "ldexp"), (fsLit "ldexpf"), (fsLit "ldexpl"), (fsLit "lgamma"), (fsLit "lgammaf"), (fsLit "lgammal"), (fsLit "llrint"), (fsLit "llrintf"), (fsLit "llrintl"), (fsLit "llround"), (fsLit "llroundf"), (fsLit "llroundl"), (fsLit "log"), (fsLit "logf"), (fsLit "logl"), (fsLit "log10l"), (fsLit "log10"), (fsLit "log10f"), (fsLit "log1pl"), (fsLit "log1p"), (fsLit "log1pf"), (fsLit "log2"), (fsLit "log2f"), (fsLit "log2l"), (fsLit "logb"), (fsLit "logbf"), (fsLit "logbl"), (fsLit "lrint"), (fsLit "lrintf"), (fsLit "lrintl"), (fsLit "lround"), (fsLit "lroundf"), (fsLit "lroundl"), (fsLit "modf"), (fsLit "modff"), (fsLit "modfl"), (fsLit "nan"), (fsLit "nanf"), (fsLit "nanl"), (fsLit "nearbyint"), (fsLit "nearbyintf"), (fsLit "nearbyintl"), (fsLit "nextafter"), (fsLit "nextafterf"), (fsLit "nextafterl"), (fsLit "nexttoward"), (fsLit "nexttowardf"), (fsLit "nexttowardl"), (fsLit "pow"), (fsLit "powf"), (fsLit "powl"), (fsLit "remainder"), (fsLit "remainderf"), (fsLit "remainderl"), (fsLit "remquo"), (fsLit "remquof"), (fsLit "remquol"), (fsLit "rint"), (fsLit "rintf"), (fsLit "rintl"), (fsLit "round"), (fsLit "roundf"), (fsLit "roundl"), (fsLit "scalbln"), (fsLit "scalblnf"), (fsLit "scalblnl"), (fsLit "scalbn"), (fsLit "scalbnf"), (fsLit "scalbnl"), (fsLit "sin"), (fsLit "sinf"), (fsLit "sinl"), (fsLit "sinh"), (fsLit "sinhf"), (fsLit "sinhl"), (fsLit "sqrt"), (fsLit "sqrtf"), (fsLit "sqrtl"), (fsLit "tan"), (fsLit "tanf"), (fsLit "tanl"), (fsLit "tanh"), (fsLit "tanhf"), (fsLit "tanhl"), (fsLit "tgamma"), (fsLit "tgammaf"), (fsLit "tgammal"), (fsLit "trunc"), (fsLit "truncf"), (fsLit "truncl"), -- ISO C 99 also defines these function-like macros in math.h: -- fpclassify, isfinite, isinf, isnormal, signbit, isgreater, -- isgreaterequal, isless, islessequal, islessgreater, isunordered -- additional symbols from _BSD_SOURCE (fsLit "drem"), (fsLit "dremf"), (fsLit "dreml"), (fsLit "finite"), (fsLit "finitef"), (fsLit "finitel"), (fsLit "gamma"), (fsLit "gammaf"), (fsLit "gammal"), (fsLit "isinf"), (fsLit "isinff"), (fsLit "isinfl"), (fsLit "isnan"), (fsLit "isnanf"), (fsLit "isnanl"), (fsLit "j0"), (fsLit "j0f"), (fsLit "j0l"), (fsLit "j1"), (fsLit "j1f"), (fsLit "j1l"), (fsLit "jn"), (fsLit "jnf"), (fsLit "jnl"), (fsLit "lgamma_r"), (fsLit "lgammaf_r"), (fsLit "lgammal_r"), (fsLit "scalb"), (fsLit "scalbf"), (fsLit "scalbl"), (fsLit "significand"), (fsLit "significandf"), (fsLit "significandl"), (fsLit "y0"), (fsLit "y0f"), (fsLit "y0l"), (fsLit "y1"), (fsLit "y1f"), (fsLit "y1l"), (fsLit "yn"), (fsLit "ynf"), (fsLit "ynl"), -- These functions are described in IEEE Std 754-2008 - -- Standard for Floating-Point Arithmetic and ISO/IEC TS 18661 (fsLit "nextup"), (fsLit "nextupf"), (fsLit "nextupl"), (fsLit "nextdown"), (fsLit "nextdownf"), (fsLit "nextdownl") ] -- ----------------------------------------------------------------------------- -- | Is a CLabel visible outside this object file or not? -- From the point of view of the code generator, a name is -- externally visible if it has to be declared as exported -- in the .o file's symbol table; that is, made non-static. externallyVisibleCLabel :: CLabel -> Bool -- not C "static" externallyVisibleCLabel (StringLitLabel _) = False externallyVisibleCLabel (AsmTempLabel _) = False externallyVisibleCLabel (AsmTempDerivedLabel _ _)= False externallyVisibleCLabel (RtsLabel _) = True externallyVisibleCLabel (LocalBlockLabel _) = False externallyVisibleCLabel (CmmLabel _ _ _ _) = True externallyVisibleCLabel (ForeignLabel{}) = True externallyVisibleCLabel (IdLabel name _ info) = isExternalName name && externallyVisibleIdLabel info externallyVisibleCLabel (CC_Label _) = True externallyVisibleCLabel (CCS_Label _) = True externallyVisibleCLabel (IPE_Label {}) = True externallyVisibleCLabel (ModuleLabel {}) = True externallyVisibleCLabel (DynamicLinkerLabel _ _) = False externallyVisibleCLabel (HpcTicksLabel _) = True externallyVisibleCLabel (LargeBitmapLabel _) = False externallyVisibleCLabel (SRTLabel _) = False externallyVisibleCLabel (PicBaseLabel {}) = panic "externallyVisibleCLabel PicBaseLabel" externallyVisibleCLabel (DeadStripPreventer {}) = panic "externallyVisibleCLabel DeadStripPreventer" externallyVisibleIdLabel :: IdLabelInfo -> Bool externallyVisibleIdLabel LocalInfoTable = False externallyVisibleIdLabel LocalEntry = False externallyVisibleIdLabel BlockInfoTable = False externallyVisibleIdLabel _ = True -- ----------------------------------------------------------------------------- -- Finding the "type" of a CLabel -- For generating correct types in label declarations: data CLabelType = CodeLabel -- Address of some executable instructions | DataLabel -- Address of data, not a GC ptr | GcPtrLabel -- Address of a (presumably static) GC object isCFunctionLabel :: CLabel -> Bool isCFunctionLabel lbl = case labelType lbl of CodeLabel -> True _other -> False isGcPtrLabel :: CLabel -> Bool isGcPtrLabel lbl = case labelType lbl of GcPtrLabel -> True _other -> False -- | Work out the general type of data at the address of this label -- whether it be code, data, or static GC object. labelType :: CLabel -> CLabelType labelType (IdLabel _ _ info) = idInfoLabelType info labelType (CmmLabel _ _ _ CmmData) = DataLabel labelType (CmmLabel _ _ _ CmmClosure) = GcPtrLabel labelType (CmmLabel _ _ _ CmmCode) = CodeLabel labelType (CmmLabel _ _ _ CmmInfo) = DataLabel labelType (CmmLabel _ _ _ CmmEntry) = CodeLabel labelType (CmmLabel _ _ _ CmmPrimCall) = CodeLabel labelType (CmmLabel _ _ _ CmmRetInfo) = DataLabel labelType (CmmLabel _ _ _ CmmRet) = CodeLabel labelType (RtsLabel (RtsSelectorInfoTable _ _)) = DataLabel labelType (RtsLabel (RtsSelectorEntry _ _)) = CodeLabel labelType (RtsLabel (RtsApInfoTable _ _)) = DataLabel labelType (RtsLabel (RtsApEntry _ _)) = CodeLabel labelType (RtsLabel (RtsApFast _)) = CodeLabel labelType (RtsLabel RtsUnpackCStringInfoTable) = DataLabel labelType (RtsLabel RtsUnpackCStringUtf8InfoTable) = DataLabel labelType (RtsLabel (RtsPrimOp _)) = CodeLabel labelType (RtsLabel (RtsSlowFastTickyCtr _)) = DataLabel labelType (LocalBlockLabel _) = CodeLabel labelType (SRTLabel _) = DataLabel labelType (ForeignLabel _ _ IsFunction) = CodeLabel labelType (ForeignLabel _ _ IsData) = DataLabel labelType (AsmTempLabel _) = panic "labelType(AsmTempLabel)" labelType (AsmTempDerivedLabel _ _) = panic "labelType(AsmTempDerivedLabel)" labelType (StringLitLabel _) = DataLabel labelType (CC_Label _) = DataLabel labelType (CCS_Label _) = DataLabel labelType (IPE_Label {}) = DataLabel labelType (ModuleLabel _ kind) = moduleLabelKindType kind labelType (DynamicLinkerLabel _ _) = DataLabel -- Is this right? labelType PicBaseLabel = DataLabel labelType (DeadStripPreventer _) = DataLabel labelType (HpcTicksLabel _) = DataLabel labelType (LargeBitmapLabel _) = DataLabel moduleLabelKindType :: ModuleLabelKind -> CLabelType moduleLabelKindType kind = case kind of MLK_Initializer _ -> CodeLabel MLK_InitializerArray -> DataLabel MLK_Finalizer _ -> CodeLabel MLK_FinalizerArray -> DataLabel MLK_IPEBuffer -> DataLabel idInfoLabelType :: IdLabelInfo -> CLabelType idInfoLabelType info = case info of InfoTable -> DataLabel LocalInfoTable -> DataLabel BlockInfoTable -> DataLabel Closure -> GcPtrLabel ConInfoTable {} -> DataLabel ClosureTable -> DataLabel IdTickyInfo{} -> DataLabel Bytes -> DataLabel _ -> CodeLabel -- ----------------------------------------------------------------------------- -- | Is a 'CLabel' defined in the current module being compiled? -- -- Sometimes we can optimise references within a compilation unit in ways that -- we couldn't for inter-module references. This provides a conservative -- estimate of whether a 'CLabel' lives in the current module. isLocalCLabel :: Module -> CLabel -> Bool isLocalCLabel this_mod lbl = case lbl of IdLabel name _ _ | isInternalName name -> True | otherwise -> nameModule name == this_mod LocalBlockLabel _ -> True _ -> False -- ----------------------------------------------------------------------------- -- | Does a 'CLabel' need dynamic linkage? -- -- When referring to data in code, we need to know whether -- that data resides in a DLL or not. [Win32 only.] -- @labelDynamic@ returns @True@ if the label is located -- in a DLL, be it a data reference or not. labelDynamic :: Module -> Platform -> Bool -> CLabel -> Bool labelDynamic this_mod platform external_dynamic_refs lbl = case lbl of -- is the RTS in a DLL or not? RtsLabel _ -> external_dynamic_refs && (this_unit /= rtsUnitId) IdLabel n _ _ -> external_dynamic_refs && isDynLinkName platform this_mod n -- When compiling in the "dyn" way, each package is to be linked into -- its own shared library. CmmLabel lbl_unit _ _ _ | os == OSMinGW32 -> external_dynamic_refs && (this_unit /= lbl_unit) | otherwise -> external_dynamic_refs LocalBlockLabel _ -> False ForeignLabel _ source _ -> if os == OSMinGW32 then case source of -- Foreign label is in some un-named foreign package (or DLL). ForeignLabelInExternalPackage -> True -- Foreign label is linked into the same package as the -- source file currently being compiled. ForeignLabelInThisPackage -> False -- Foreign label is in some named package. -- When compiling in the "dyn" way, each package is to be -- linked into its own DLL. ForeignLabelInPackage pkgId -> external_dynamic_refs && (this_unit /= pkgId) else -- On Mac OS X and on ELF platforms, false positives are OK, -- so we claim that all foreign imports come from dynamic -- libraries True CC_Label cc -> external_dynamic_refs && not (ccFromThisModule cc this_mod) -- CCS_Label always contains a CostCentre defined in the current module CCS_Label _ -> False IPE_Label {} -> True HpcTicksLabel m -> external_dynamic_refs && this_mod /= m -- Note that DynamicLinkerLabels do NOT require dynamic linking themselves. _ -> False where os = platformOS platform this_unit = toUnitId (moduleUnit this_mod) ----------------------------------------------------------------------------- -- Printing out CLabels. {- Convention: _ where is _ for external names and for internal names. is one of the following: info Info table srt Static reference table entry Entry code (function, closure) slow Slow entry code (if any) ret Direct return address vtbl Vector table _alt Case alternative (tag n) dflt Default case alternative btm Large bitmap vector closure Static closure con_entry Dynamic Constructor entry code con_info Dynamic Constructor info table static_entry Static Constructor entry code static_info Static Constructor info table sel_info Selector info table sel_entry Selector entry code cc Cost centre ccs Cost centre stack Many of these distinctions are only for documentation reasons. For example, _ret is only distinguished from _entry to make it easy to tell whether a code fragment is a return point or a closure/function entry. Note [Closure and info labels] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For a function 'foo, we have: foo_info : Points to the info table describing foo's closure (and entry code for foo with tables next to code) foo_closure : Static (no-free-var) closure only: points to the statically-allocated closure For a data constructor (such as Just or Nothing), we have: Just_con_info: Info table for the data constructor itself the first word of a heap-allocated Just Just_info: Info table for the *worker function*, an ordinary Haskell function of arity 1 that allocates a (Just x) box: Just = \x -> Just x Just_entry: The entry code for the worker function Just_closure: The closure for this worker Nothing_closure: a statically allocated closure for Nothing Nothing_static_info: info table for Nothing_closure All these must be exported symbol, EXCEPT Just_info. We don't need to export this because in other modules we either have * A reference to 'Just'; use Just_closure * A saturated call 'Just x'; allocate using Just_con_info Not exporting these Just_info labels reduces the number of symbols somewhat. Note [Bytes label] ~~~~~~~~~~~~~~~~~~ For a top-level string literal 'foo', we have just one symbol 'foo_bytes', which points to a static data block containing the content of the literal. Note [Proc-point local block entry-points] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A label for a proc-point local block entry-point has no "_entry" suffix. With `infoTblLbl` we derive an info table label from a proc-point block ID. If we convert such an info table label into an entry label we must produce the label without an "_entry" suffix. So an info table label records the fact that it was derived from a block ID in `IdLabelInfo` as `BlockInfoTable`. The info table label and the local block label are both local labels and are not externally visible. Note [Bangs in CLabel] ~~~~~~~~~~~~~~~~~~~~~~ There are some carefully placed strictness annotations in this module, which were discovered in !5226 to significantly reduce compile-time allocation. Take care if you want to remove them! -} -- | Style of label pretty-printing. -- -- When we produce C sources or headers, we have to take into account -- that C compilers transform C labels when they convert them into -- symbols. For example, they can add prefixes (e.g., "_" on Darwin). -- So we provide two ways to pretty-print CLabels: C style or Asm -- style. -- data LabelStyle = CStyle -- ^ C label style (used by C and LLVM backends) | AsmStyle -- ^ Asm label style (used by NCG backend) pprAsmLabel :: IsLine doc => Platform -> CLabel -> doc pprAsmLabel platform lbl = pprCLabelStyle platform AsmStyle lbl {-# SPECIALIZE pprAsmLabel :: Platform -> CLabel -> SDoc #-} {-# SPECIALIZE pprAsmLabel :: Platform -> CLabel -> HLine #-} -- see Note [SPECIALIZE to HDoc] in GHC.Utils.Outputable pprCLabel :: IsLine doc => Platform -> CLabel -> doc pprCLabel platform lbl = pprCLabelStyle platform CStyle lbl {-# SPECIALIZE pprCLabel :: Platform -> CLabel -> SDoc #-} {-# SPECIALIZE pprCLabel :: Platform -> CLabel -> HLine #-} -- see Note [SPECIALIZE to HDoc] in GHC.Utils.Outputable instance OutputableP Platform CLabel where {-# INLINE pdoc #-} -- see Note [Bangs in CLabel] pdoc !platform lbl = getPprStyle $ \pp_sty -> case pp_sty of PprDump{} -> pprCLabel platform lbl _ -> let lbl_doc = (pprCLabel platform lbl) in pprTraceUserWarning (text "Labels in code should be printed with pprCLabel or pprAsmLabel" <> lbl_doc) lbl_doc pprCLabelStyle :: forall doc. IsLine doc => Platform -> LabelStyle -> CLabel -> doc pprCLabelStyle !platform !sty lbl = -- see Note [Bangs in CLabel] let !use_leading_underscores = platformLeadingUnderscore platform -- some platform (e.g. Darwin) require a leading "_" for exported asm -- symbols maybe_underscore :: doc -> doc maybe_underscore doc = case sty of AsmStyle | use_leading_underscores -> pp_cSEP <> doc _ -> doc tempLabelPrefixOrUnderscore :: doc tempLabelPrefixOrUnderscore = case sty of AsmStyle -> asmTempLabelPrefix platform CStyle -> char '_' in case lbl of LocalBlockLabel u -> case sty of AsmStyle -> tempLabelPrefixOrUnderscore <> pprUniqueAlways u CStyle -> tempLabelPrefixOrUnderscore <> text "blk_" <> pprUniqueAlways u AsmTempLabel u -> tempLabelPrefixOrUnderscore <> pprUniqueAlways u AsmTempDerivedLabel l suf -- we print a derived label, so we just print the parent label -- recursively. However we don't want to print the temp prefix (e.g. -- ".L") twice, so we must explicitely handle these cases. -> let skipTempPrefix = \case AsmTempLabel u -> pprUniqueAlways u AsmTempDerivedLabel l suf -> skipTempPrefix l <> ftext suf LocalBlockLabel u -> pprUniqueAlways u lbl -> pprAsmLabel platform lbl in asmTempLabelPrefix platform <> skipTempPrefix l <> ftext suf DynamicLinkerLabel info lbl -> pprDynamicLinkerAsmLabel platform info (pprAsmLabel platform lbl) PicBaseLabel -> text "1b" DeadStripPreventer lbl -> {- `lbl` can be temp one but we need to ensure that dsp label will stay in the final binary so we prepend non-temp prefix ("dsp_") and optional `_` (underscore) because this is how you mark non-temp symbols on some platforms (Darwin) -} maybe_underscore $ text "dsp_" <> pprCLabelStyle platform sty lbl <> text "_dsp" StringLitLabel u -> maybe_underscore $ pprUniqueAlways u <> text "_str" ForeignLabel fs _ _ -> maybe_underscore $ ftext fs IdLabel name _cafs flavor -> case sty of AsmStyle -> maybe_underscore $ internalNamePrefix <> pprName name <> ppIdFlavor flavor where isRandomGenerated = not (isExternalName name) internalNamePrefix = if isRandomGenerated then asmTempLabelPrefix platform else empty CStyle -> pprName name <> ppIdFlavor flavor SRTLabel u -> maybe_underscore $ tempLabelPrefixOrUnderscore <> pprUniqueAlways u <> pp_cSEP <> text "srt" RtsLabel (RtsApFast (NonDetFastString str)) -> maybe_underscore $ ftext str <> text "_fast" RtsLabel (RtsSelectorInfoTable upd_reqd offset) -> maybe_underscore $ hcat [ text "stg_sel_", int offset , if upd_reqd then text "_upd_info" else text "_noupd_info" ] RtsLabel (RtsSelectorEntry upd_reqd offset) -> maybe_underscore $ hcat [ text "stg_sel_", int offset , if upd_reqd then text "_upd_entry" else text "_noupd_entry" ] RtsLabel (RtsApInfoTable upd_reqd arity) -> maybe_underscore $ hcat [ text "stg_ap_", int arity , if upd_reqd then text "_upd_info" else text "_noupd_info" ] RtsLabel (RtsApEntry upd_reqd arity) -> maybe_underscore $ hcat [ text "stg_ap_", int arity , if upd_reqd then text "_upd_entry" else text "_noupd_entry" ] RtsLabel (RtsPrimOp primop) -> maybe_underscore $ text "stg_" <> pprPrimOp primop RtsLabel (RtsSlowFastTickyCtr pat) -> maybe_underscore $ text "SLOW_CALL_fast_" <> text pat <> text "_ctr" RtsLabel RtsUnpackCStringInfoTable -> maybe_underscore $ text "stg_unpack_cstring_info" RtsLabel RtsUnpackCStringUtf8InfoTable -> maybe_underscore $ text "stg_unpack_cstring_utf8_info" LargeBitmapLabel u -> maybe_underscore $ tempLabelPrefixOrUnderscore <> char 'b' <> pprUniqueAlways u <> pp_cSEP <> text "btm" -- Some bitmaps for tuple constructors have a numeric tag (e.g. '7') -- until that gets resolved we'll just force them to start -- with a letter so the label will be legal assembly code. HpcTicksLabel mod -> maybe_underscore $ text "_hpc_tickboxes_" <> pprModule mod <> text "_hpc" CC_Label cc -> maybe_underscore $ pprCostCentre cc CCS_Label ccs -> maybe_underscore $ pprCostCentreStack ccs IPE_Label (InfoProvEnt l _ _ m _) -> maybe_underscore $ (pprCLabel platform l <> text "_" <> pprModule m <> text "_ipe") ModuleLabel mod kind -> maybe_underscore $ pprModule mod <> text "_" <> pprModuleLabelKind kind CmmLabel _ _ fs CmmCode -> maybe_underscore $ ftext fs CmmLabel _ _ fs CmmData -> maybe_underscore $ ftext fs CmmLabel _ _ fs CmmPrimCall -> maybe_underscore $ ftext fs CmmLabel _ _ fs CmmInfo -> maybe_underscore $ ftext fs <> text "_info" CmmLabel _ _ fs CmmEntry -> maybe_underscore $ ftext fs <> text "_entry" CmmLabel _ _ fs CmmRetInfo -> maybe_underscore $ ftext fs <> text "_info" CmmLabel _ _ fs CmmRet -> maybe_underscore $ ftext fs <> text "_ret" CmmLabel _ _ fs CmmClosure -> maybe_underscore $ ftext fs <> text "_closure" {-# SPECIALIZE pprCLabelStyle :: Platform -> LabelStyle -> CLabel -> SDoc #-} {-# SPECIALIZE pprCLabelStyle :: Platform -> LabelStyle -> CLabel -> HLine #-} -- see Note [SPECIALIZE to HDoc] in GHC.Utils.Outputable -- Note [Internal proc labels] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- Some tools (e.g. the `perf` utility on Linux) rely on the symbol table -- for resolution of function names. To help these tools we provide the -- (enabled by default) -fexpose-all-symbols flag which causes GHC to produce -- symbols even for symbols with are internal to a module (although such -- symbols will have only local linkage). -- -- Note that these labels are *not* referred to by code. They are strictly for -- diagnostics purposes. -- -- To avoid confusion, it is desirable to add a module-qualifier to the -- symbol name. However, the Name type's Internal constructor doesn't carry -- knowledge of the current Module. Consequently, we have to pass this around -- explicitly. -- | Generate a label for a procedure internal to a module (if -- 'Opt_ExposeAllSymbols' is enabled). -- See Note [Internal proc labels]. ppInternalProcLabel :: IsLine doc => Module -- ^ the current module -> CLabel -> Maybe doc -- ^ the internal proc label ppInternalProcLabel this_mod (IdLabel nm _ flavour) | isInternalName nm = Just $ text "_" <> pprModule this_mod <> char '_' <> ztext (zEncodeFS (occNameFS (occName nm))) <> char '_' <> pprUniqueAlways (getUnique nm) <> ppIdFlavor flavour ppInternalProcLabel _ _ = Nothing {-# SPECIALIZE ppInternalProcLabel :: Module -> CLabel -> Maybe SDoc #-} {-# SPECIALIZE ppInternalProcLabel :: Module -> CLabel -> Maybe HLine #-} -- see Note [SPECIALIZE to HDoc] in GHC.Utils.Outputable ppIdFlavor :: IsLine doc => IdLabelInfo -> doc ppIdFlavor x = pp_cSEP <> case x of Closure -> text "closure" InfoTable -> text "info" LocalInfoTable -> text "info" Entry -> text "entry" LocalEntry -> text "entry" Slow -> text "slow" IdTickyInfo TickyRednCounts -> text "ct" IdTickyInfo (TickyInferedTag unique) -> text "ct_inf_tag" <> char '_' <> pprUniqueAlways unique ConEntry loc -> case loc of DefinitionSite -> text "con_entry" UsageSite m n -> pprModule m <> pp_cSEP <> int n <> pp_cSEP <> text "con_entry" ConInfoTable k -> case k of DefinitionSite -> text "con_info" UsageSite m n -> pprModule m <> pp_cSEP <> int n <> pp_cSEP <> text "con_info" ClosureTable -> text "closure_tbl" Bytes -> text "bytes" BlockInfoTable -> text "info" pp_cSEP :: IsLine doc => doc pp_cSEP = char '_' instance Outputable ForeignLabelSource where ppr fs = case fs of ForeignLabelInPackage pkgId -> parens $ text "package: " <> ppr pkgId ForeignLabelInThisPackage -> parens $ text "this package" ForeignLabelInExternalPackage -> parens $ text "external package" -- ----------------------------------------------------------------------------- -- Machine-dependent knowledge about labels. asmTempLabelPrefix :: IsLine doc => Platform -> doc -- for formatting labels asmTempLabelPrefix !platform = case platformOS platform of OSDarwin -> text "L" OSAIX -> text "__L" -- follow IBM XL C's convention _ -> text ".L" pprDynamicLinkerAsmLabel :: IsLine doc => Platform -> DynamicLinkerLabelInfo -> doc -> doc pprDynamicLinkerAsmLabel !platform dllInfo ppLbl = case platformOS platform of OSDarwin | platformArch platform == ArchX86_64 -> case dllInfo of CodeStub -> char 'L' <> ppLbl <> text "$stub" SymbolPtr -> char 'L' <> ppLbl <> text "$non_lazy_ptr" GotSymbolPtr -> ppLbl <> text "@GOTPCREL" GotSymbolOffset -> ppLbl | platformArch platform == ArchAArch64 -> ppLbl | otherwise -> panic "pprDynamicLinkerAsmLabel" OSAIX -> case dllInfo of SymbolPtr -> text "LC.." <> ppLbl -- GCC's naming convention _ -> panic "pprDynamicLinkerAsmLabel" _ | osElfTarget (platformOS platform) -> elfLabel OSMinGW32 -> case dllInfo of SymbolPtr -> text "__imp_" <> ppLbl _ -> panic "pprDynamicLinkerAsmLabel" _ -> panic "pprDynamicLinkerAsmLabel" where elfLabel | platformArch platform == ArchPPC = case dllInfo of CodeStub -> -- See Note [.LCTOC1 in PPC PIC code] ppLbl <> text "+32768@plt" SymbolPtr -> text ".LC_" <> ppLbl _ -> panic "pprDynamicLinkerAsmLabel" | platformArch platform == ArchAArch64 = ppLbl | platformArch platform == ArchRISCV64 = ppLbl | platformArch platform == ArchX86_64 = case dllInfo of CodeStub -> ppLbl <> text "@plt" GotSymbolPtr -> ppLbl <> text "@gotpcrel" GotSymbolOffset -> ppLbl SymbolPtr -> text ".LC_" <> ppLbl | platformArch platform == ArchPPC_64 ELF_V1 || platformArch platform == ArchPPC_64 ELF_V2 = case dllInfo of GotSymbolPtr -> text ".LC_" <> ppLbl <> text "@toc" GotSymbolOffset -> ppLbl SymbolPtr -> text ".LC_" <> ppLbl _ -> panic "pprDynamicLinkerAsmLabel" | otherwise = case dllInfo of CodeStub -> ppLbl <> text "@plt" SymbolPtr -> text ".LC_" <> ppLbl GotSymbolPtr -> ppLbl <> text "@got" GotSymbolOffset -> ppLbl <> text "@gotoff" -- Figure out whether `symbol` may serve as an alias -- to `target` within one compilation unit. -- -- This is true if any of these holds: -- * `target` is a module-internal haskell name. -- * `target` is an exported name, but comes from the same -- module as `symbol` -- -- These are sufficient conditions for establishing e.g. a -- GNU assembly alias ('.equiv' directive). Sadly, there is -- no such thing as an alias to an imported symbol (conf. -- http://blog.omega-prime.co.uk/2011/07/06/the-sad-state-of-symbol-aliases/) -- See Note [emit-time elimination of static indirections]. -- -- Precondition is that both labels represent the -- same semantic value. mayRedirectTo :: CLabel -> CLabel -> Bool mayRedirectTo symbol target | Just nam <- haskellName , staticClosureLabel , isExternalName nam , Just mod <- nameModule_maybe nam , Just anam <- hasHaskellName symbol , Just amod <- nameModule_maybe anam = amod == mod | Just nam <- haskellName , staticClosureLabel , isInternalName nam = True | otherwise = False where staticClosureLabel = isStaticClosureLabel target haskellName = hasHaskellName target {- Note [emit-time elimination of static indirections] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ As described in #15155, certain static values are representationally equivalent, e.g. 'cast'ed values (when created by 'newtype' wrappers). newtype A = A Int {-# NOINLINE a #-} a = A 42 a1_rYB :: Int [GblId, Caf=NoCafRefs, Unf=OtherCon []] a1_rYB = GHC.Types.I# 42# a [InlPrag=NOINLINE] :: A [GblId, Unf=OtherCon []] a = a1_rYB `cast` (Sym (T15155.N:A[0]) :: Int ~R# A) Formerly we created static indirections for these (IND_STATIC), which consist of a statically allocated forwarding closure that contains the (possibly tagged) indirectee. (See CMM/assembly below.) This approach is suboptimal for two reasons: (a) they occupy extra space, (b) they need to be entered in order to obtain the indirectee, thus they cannot be tagged. Fortunately there is a common case where static indirections can be eliminated while emitting assembly (native or LLVM), viz. when the indirectee is in the same module (object file) as the symbol that points to it. In this case an assembly-level identification can be created ('.equiv' directive), and as such the same object will be assigned two names in the symbol table. Any of the identified symbols can be referenced by a tagged pointer. Currently the 'mayRedirectTo' predicate will give a clue whether a label can be equated with another, already emitted, label (which can in turn be an alias). The general mechanics is that we identify data (IND_STATIC closures) that are amenable to aliasing while pretty-printing of assembly output, and emit the '.equiv' directive instead of static data in such a case. Here is a sketch how the output is massaged: Consider newtype A = A Int {-# NOINLINE a #-} a = A 42 -- I# 42# is the indirectee -- 'a' is exported results in STG a1_rXq :: GHC.Types.Int [GblId, Caf=NoCafRefs, Unf=OtherCon []] = CCS_DONT_CARE GHC.Types.I#! [42#]; T15155.a [InlPrag=NOINLINE] :: T15155.A [GblId, Unf=OtherCon []] = CAF_ccs \ u [] a1_rXq; and CMM [section ""data" . a1_rXq_closure" { a1_rXq_closure: const GHC.Types.I#_con_info; const 42; }] [section ""data" . T15155.a_closure" { T15155.a_closure: const stg_IND_STATIC_info; const a1_rXq_closure+1; const 0; const 0; }] The emitted assembly is ==== INDIRECTEE a1_rXq_closure: -- module local haskell value .quad GHC.Types.I#_con_info -- an Int .quad 42 ==== BEFORE .globl T15155.a_closure -- exported newtype wrapped value T15155.a_closure: .quad stg_IND_STATIC_info -- the closure info .quad a1_rXq_closure+1 -- indirectee ('+1' being the tag) .quad 0 .quad 0 ==== AFTER .globl T15155.a_closure -- exported newtype wrapped value .equiv a1_rXq_closure,T15155.a_closure -- both are shared The transformation is performed because T15155.a_closure `mayRedirectTo` a1_rXq_closure+1 returns True. -} -- | This type encodes the subset of 'CLabel' that occurs in C stubs of foreign -- declarations for the purpose of serializing to interface files. -- -- See Note [Foreign stubs and TH bytecode linking] data CStubLabel = CStubLabel { csl_is_initializer :: Bool, csl_module :: Module, csl_name :: FastString } instance Outputable CStubLabel where ppr CStubLabel {csl_is_initializer, csl_module, csl_name} = text ini <+> ppr csl_module <> colon <> text (unpackFS csl_name) where ini = if csl_is_initializer then "initializer" else "finalizer" -- | Project the constructor 'ModuleLabel' out of 'CLabel' if it is an -- initializer or finalizer. cStubLabel :: CLabel -> Maybe CStubLabel cStubLabel = \case ModuleLabel csl_module label_kind -> do (csl_is_initializer, csl_name) <- case label_kind of MLK_Initializer (LexicalFastString s) -> Just (True, s) MLK_Finalizer (LexicalFastString s) -> Just (False, s) _ -> Nothing Just (CStubLabel {csl_is_initializer, csl_module, csl_name}) _ -> Nothing -- | Inject a 'CStubLabel' into a 'CLabel' as a 'ModuleLabel'. fromCStubLabel :: CStubLabel -> CLabel fromCStubLabel (CStubLabel {csl_is_initializer, csl_module, csl_name}) = ModuleLabel csl_module (label_kind (LexicalFastString csl_name)) where label_kind = if csl_is_initializer then MLK_Initializer else MLK_Finalizer -- | A utility for renaming uniques in CLabels to produce deterministic object. -- Note that not all Uniques are mapped over. Only those that can be safely alpha -- renamed, e.g. uniques of local symbols, but not of external ones. -- See Note [Renaming uniques deterministically]. mapInternalNonDetUniques :: Applicative m => (Unique -> m Unique) -> CLabel -> m CLabel -- todo: Can we do less work here, e.g., do we really need to rename AsmTempLabel, LocalBlockLabel? mapInternalNonDetUniques f x = case x of IdLabel name cafInfo idLabelInfo | not (isExternalName name) -> IdLabel . setNameUnique name <$> f (nameUnique name) <*> pure cafInfo <*> pure idLabelInfo | otherwise -> pure x cl@CmmLabel{} -> pure cl RtsLabel rtsLblInfo -> pure $ RtsLabel rtsLblInfo LocalBlockLabel unique -> LocalBlockLabel <$> f unique fl@ForeignLabel{} -> pure fl AsmTempLabel unique -> AsmTempLabel <$> f unique AsmTempDerivedLabel clbl fs -> AsmTempDerivedLabel <$> mapInternalNonDetUniques f clbl <*> pure fs StringLitLabel unique -> StringLitLabel <$> f unique CC_Label cc -> pure $ CC_Label cc CCS_Label ccs -> pure $ CCS_Label ccs IPE_Label ipe@InfoProvEnt{infoTablePtr} -> (\cl' -> IPE_Label ipe{infoTablePtr = cl'}) <$> mapInternalNonDetUniques f infoTablePtr ml@ModuleLabel{} -> pure ml DynamicLinkerLabel dlli clbl -> DynamicLinkerLabel dlli <$> mapInternalNonDetUniques f clbl PicBaseLabel -> pure PicBaseLabel DeadStripPreventer clbl -> DeadStripPreventer <$> mapInternalNonDetUniques f clbl HpcTicksLabel mod -> pure $ HpcTicksLabel mod SRTLabel unique -> SRTLabel <$> f unique LargeBitmapLabel unique -> LargeBitmapLabel <$> f unique -- This is called *a lot* if renaming Cmm uniques, and won't specialise without this pragma: {-# INLINABLE mapInternalNonDetUniques #-} ghc-lib-parser-9.12.2.20250421/compiler/GHC/Cmm/CLabel.hs-boot0000644000000000000000000000022007346545000021033 0ustar0000000000000000module GHC.Cmm.CLabel where import GHC.Utils.Outputable import GHC.Platform data CLabel pprCLabel :: IsLine doc => Platform -> CLabel -> doc ghc-lib-parser-9.12.2.20250421/compiler/GHC/Cmm/Dataflow/0000755000000000000000000000000007346545000020163 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Cmm/Dataflow/Block.hs0000644000000000000000000002606007346545000021555 0ustar0000000000000000{-# LANGUAGE DataKinds #-} {-# LANGUAGE DeriveFunctor #-} {-# LANGUAGE GADTs #-} {-# LANGUAGE PolyKinds #-} {-# LANGUAGE RankNTypes #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE StandaloneDeriving #-} {-# LANGUAGE TypeFamilies #-} module GHC.Cmm.Dataflow.Block ( Extensibility (..) , O , C , MaybeO(..) , IndexedCO , Block(..) , blockAppend , blockConcat , blockCons , blockFromList , blockJoin , blockJoinHead , blockJoinTail , blockSnoc , blockSplit , blockSplitHead , blockSplitTail , blockToList , emptyBlock , firstNode , foldBlockNodesB , foldBlockNodesB3 , foldBlockNodesF , isEmptyBlock , lastNode , mapBlock , mapBlock' , mapBlock3' , replaceFirstNode , replaceLastNode ) where import GHC.Prelude -- ----------------------------------------------------------------------------- -- Shapes: Open and Closed -- | Used at the type level to indicate "open" vs "closed" structure. data Extensibility -- | An "open" structure with a unique, unnamed control-flow edge flowing in -- or out. \"Fallthrough\" and concatenation are permitted at an open point. = Open -- | A "closed" structure which supports control transfer only through the use -- of named labels---no "fallthrough" is permitted. The number of control-flow -- edges is unconstrained. | Closed type O = 'Open type C = 'Closed -- | Either type indexed by closed/open using type families type family IndexedCO (ex :: Extensibility) (a :: k) (b :: k) :: k type instance IndexedCO C a _b = a type instance IndexedCO O _a b = b -- | Maybe type indexed by open/closed data MaybeO ex t where JustO :: t -> MaybeO O t NothingO :: MaybeO C t deriving instance Functor (MaybeO ex) -- ----------------------------------------------------------------------------- -- The Block type -- | A sequence of nodes. May be any of four shapes (O/O, O/C, C/O, C/C). -- Open at the entry means single entry, mutatis mutandis for exit. -- A closed/closed block is a /basic/ block and can't be extended further. -- Clients should avoid manipulating blocks and should stick to either nodes -- or graphs. data Block n e x where BlockCO :: n C O -> Block n O O -> Block n C O BlockCC :: n C O -> Block n O O -> n O C -> Block n C C BlockOC :: Block n O O -> n O C -> Block n O C BNil :: Block n O O BMiddle :: n O O -> Block n O O BCat :: Block n O O -> Block n O O -> Block n O O BSnoc :: Block n O O -> n O O -> Block n O O BCons :: n O O -> Block n O O -> Block n O O -- ----------------------------------------------------------------------------- -- Simple operations on Blocks -- Predicates isEmptyBlock :: Block n e x -> Bool isEmptyBlock BNil = True isEmptyBlock (BCat l r) = isEmptyBlock l && isEmptyBlock r isEmptyBlock _ = False -- Building emptyBlock :: Block n O O emptyBlock = BNil blockCons :: n O O -> Block n O x -> Block n O x blockCons n b = case b of BlockOC b l -> (BlockOC $! (n `blockCons` b)) l BNil{} -> BMiddle n BMiddle{} -> n `BCons` b BCat{} -> n `BCons` b BSnoc{} -> n `BCons` b BCons{} -> n `BCons` b blockSnoc :: Block n e O -> n O O -> Block n e O blockSnoc b n = case b of BlockCO f b -> BlockCO f $! (b `blockSnoc` n) BNil{} -> BMiddle n BMiddle{} -> b `BSnoc` n BCat{} -> b `BSnoc` n BSnoc{} -> b `BSnoc` n BCons{} -> b `BSnoc` n blockJoinHead :: n C O -> Block n O x -> Block n C x blockJoinHead f (BlockOC b l) = BlockCC f b l blockJoinHead f b = BlockCO f BNil `cat` b blockJoinTail :: Block n e O -> n O C -> Block n e C blockJoinTail (BlockCO f b) t = BlockCC f b t blockJoinTail b t = b `cat` BlockOC BNil t blockJoin :: n C O -> Block n O O -> n O C -> Block n C C blockJoin f b t = BlockCC f b t blockAppend :: Block n e O -> Block n O x -> Block n e x blockAppend = cat blockConcat :: [Block n O O] -> Block n O O blockConcat = foldr blockAppend emptyBlock -- Taking apart firstNode :: Block n C x -> n C O firstNode (BlockCO n _) = n firstNode (BlockCC n _ _) = n lastNode :: Block n x C -> n O C lastNode (BlockOC _ n) = n lastNode (BlockCC _ _ n) = n blockSplitHead :: Block n C x -> (n C O, Block n O x) blockSplitHead (BlockCO n b) = (n, b) blockSplitHead (BlockCC n b t) = (n, BlockOC b t) blockSplitTail :: Block n e C -> (Block n e O, n O C) blockSplitTail (BlockOC b n) = (b, n) blockSplitTail (BlockCC f b t) = (BlockCO f b, t) -- | Split a closed block into its entry node, open middle block, and -- exit node. blockSplit :: Block n C C -> (n C O, Block n O O, n O C) blockSplit (BlockCC f b t) = (f, b, t) blockToList :: Block n O O -> [n O O] blockToList b = go b [] where go :: Block n O O -> [n O O] -> [n O O] go BNil r = r go (BMiddle n) r = n : r go (BCat b1 b2) r = go b1 $! go b2 r go (BSnoc b1 n) r = go b1 (n:r) go (BCons n b1) r = n : go b1 r blockFromList :: [n O O] -> Block n O O blockFromList = foldr BCons BNil -- Modifying replaceFirstNode :: Block n C x -> n C O -> Block n C x replaceFirstNode (BlockCO _ b) f = BlockCO f b replaceFirstNode (BlockCC _ b n) f = BlockCC f b n replaceLastNode :: Block n x C -> n O C -> Block n x C replaceLastNode (BlockOC b _) n = BlockOC b n replaceLastNode (BlockCC l b _) n = BlockCC l b n -- ----------------------------------------------------------------------------- -- General concatenation cat :: Block n e O -> Block n O x -> Block n e x cat x y = case x of BNil -> y BlockCO l b1 -> case y of BlockOC b2 n -> (BlockCC l $! (b1 `cat` b2)) n BNil -> x BMiddle _ -> BlockCO l $! (b1 `cat` y) BCat{} -> BlockCO l $! (b1 `cat` y) BSnoc{} -> BlockCO l $! (b1 `cat` y) BCons{} -> BlockCO l $! (b1 `cat` y) BMiddle n -> case y of BlockOC b2 n2 -> (BlockOC $! (x `cat` b2)) n2 BNil -> x BMiddle{} -> BCons n y BCat{} -> BCons n y BSnoc{} -> BCons n y BCons{} -> BCons n y BCat{} -> case y of BlockOC b3 n2 -> (BlockOC $! (x `cat` b3)) n2 BNil -> x BMiddle n -> BSnoc x n BCat{} -> BCat x y BSnoc{} -> BCat x y BCons{} -> BCat x y BSnoc{} -> case y of BlockOC b2 n2 -> (BlockOC $! (x `cat` b2)) n2 BNil -> x BMiddle n -> BSnoc x n BCat{} -> BCat x y BSnoc{} -> BCat x y BCons{} -> BCat x y BCons{} -> case y of BlockOC b2 n2 -> (BlockOC $! (x `cat` b2)) n2 BNil -> x BMiddle n -> BSnoc x n BCat{} -> BCat x y BSnoc{} -> BCat x y BCons{} -> BCat x y -- ----------------------------------------------------------------------------- -- Mapping -- | map a function over the nodes of a 'Block' mapBlock :: (forall e x. n e x -> n' e x) -> Block n e x -> Block n' e x mapBlock f (BlockCO n b ) = BlockCO (f n) (mapBlock f b) mapBlock f (BlockOC b n) = BlockOC (mapBlock f b) (f n) mapBlock f (BlockCC n b m) = BlockCC (f n) (mapBlock f b) (f m) mapBlock _ BNil = BNil mapBlock f (BMiddle n) = BMiddle (f n) mapBlock f (BCat b1 b2) = BCat (mapBlock f b1) (mapBlock f b2) mapBlock f (BSnoc b n) = BSnoc (mapBlock f b) (f n) mapBlock f (BCons n b) = BCons (f n) (mapBlock f b) -- | A strict 'mapBlock' mapBlock' :: (forall e x. n e x -> n' e x) -> (Block n e x -> Block n' e x) mapBlock' f = mapBlock3' (f, f, f) -- | map over a block, with different functions to apply to first nodes, -- middle nodes and last nodes respectively. The map is strict. -- mapBlock3' :: forall n n' e x . ( n C O -> n' C O , n O O -> n' O O, n O C -> n' O C) -> Block n e x -> Block n' e x mapBlock3' (f, m, l) b = go b where go :: forall e x . Block n e x -> Block n' e x go (BlockOC b y) = (BlockOC $! go b) $! l y go (BlockCO x b) = (BlockCO $! f x) $! (go b) go (BlockCC x b y) = ((BlockCC $! f x) $! go b) $! (l y) go BNil = BNil go (BMiddle n) = BMiddle $! m n go (BCat x y) = (BCat $! go x) $! (go y) go (BSnoc x n) = (BSnoc $! go x) $! (m n) go (BCons n x) = (BCons $! m n) $! (go x) -- ----------------------------------------------------------------------------- -- Folding -- | Fold a function over every node in a block, forward or backward. -- The fold function must be polymorphic in the shape of the nodes. foldBlockNodesF3 :: forall n a b c . ( n C O -> a -> b , n O O -> b -> b , n O C -> b -> c) -> (forall e x . Block n e x -> IndexedCO e a b -> IndexedCO x c b) foldBlockNodesF :: forall n a . (forall e x . n e x -> a -> a) -> (forall e x . Block n e x -> IndexedCO e a a -> IndexedCO x a a) foldBlockNodesB3 :: forall n a b c . ( n C O -> b -> c , n O O -> b -> b , n O C -> a -> b) -> (forall e x . Block n e x -> IndexedCO x a b -> IndexedCO e c b) foldBlockNodesB :: forall n a . (forall e x . n e x -> a -> a) -> (forall e x . Block n e x -> IndexedCO x a a -> IndexedCO e a a) foldBlockNodesF3 (ff, fm, fl) = block where block :: forall e x . Block n e x -> IndexedCO e a b -> IndexedCO x c b block (BlockCO f b ) = ff f `cat` block b block (BlockCC f b l) = ff f `cat` block b `cat` fl l block (BlockOC b l) = block b `cat` fl l block BNil = id block (BMiddle node) = fm node block (b1 `BCat` b2) = block b1 `cat` block b2 block (b1 `BSnoc` n) = block b1 `cat` fm n block (n `BCons` b2) = fm n `cat` block b2 cat :: forall a b c. (a -> b) -> (b -> c) -> a -> c cat f f' = f' . f foldBlockNodesF f = foldBlockNodesF3 (f, f, f) foldBlockNodesB3 (ff, fm, fl) = block where block :: forall e x . Block n e x -> IndexedCO x a b -> IndexedCO e c b block (BlockCO f b ) = ff f `cat` block b block (BlockCC f b l) = ff f `cat` block b `cat` fl l block (BlockOC b l) = block b `cat` fl l block BNil = id block (BMiddle node) = fm node block (b1 `BCat` b2) = block b1 `cat` block b2 block (b1 `BSnoc` n) = block b1 `cat` fm n block (n `BCons` b2) = fm n `cat` block b2 cat :: forall a b c. (b -> c) -> (a -> b) -> a -> c cat f f' = f . f' foldBlockNodesB f = foldBlockNodesB3 (f, f, f) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Cmm/Dataflow/Graph.hs0000644000000000000000000001575507346545000021575 0ustar0000000000000000{-# LANGUAGE DataKinds #-} {-# LANGUAGE GADTs #-} {-# LANGUAGE TypeFamilies #-} module GHC.Cmm.Dataflow.Graph ( Body , Graph , Graph'(..) , NonLocal(..) , addBlock , bodyList , bodyToBlockList , emptyBody , labelsDefined , mapGraph , mapGraphBlocks , revPostorderFrom ) where import GHC.Prelude import GHC.Utils.Misc import GHC.Cmm.Dataflow.Label import GHC.Cmm.Dataflow.Block import Data.Kind -- | A (possibly empty) collection of closed/closed blocks type Body s n = Body' s Block n -- | @Body@ abstracted over @block@ type Body' s block (n :: Extensibility -> Extensibility -> Type) = s (block n C C) ------------------------------- -- | Gives access to the anchor points for -- nonlocal edges as well as the edges themselves class NonLocal thing where entryLabel :: thing C x -> Label -- ^ The label of a first node or block successors :: thing e C -> [Label] -- ^ Gives control-flow successors instance NonLocal n => NonLocal (Block n) where entryLabel (BlockCO f _) = entryLabel f entryLabel (BlockCC f _ _) = entryLabel f successors (BlockOC _ n) = successors n successors (BlockCC _ _ n) = successors n emptyBody :: Body' LabelMap block n emptyBody = mapEmpty bodyList :: Body' LabelMap block n -> [(Label,block n C C)] bodyList body = mapToList body bodyToBlockList :: Body LabelMap n -> [Block n C C] bodyToBlockList body = mapElems body addBlock :: (NonLocal block, HasDebugCallStack) => block C C -> LabelMap (block C C) -> LabelMap (block C C) addBlock block body = mapAlter add lbl body where lbl = entryLabel block add Nothing = Just block add _ = error $ "duplicate label " ++ show lbl ++ " in graph" -- --------------------------------------------------------------------------- -- Graph -- | A control-flow graph, which may take any of four shapes (O/O, -- O/C, C/O, C/C). A graph open at the entry has a single, -- distinguished, anonymous entry point; if a graph is closed at the -- entry, its entry point(s) are supplied by a context. type Graph = Graph' LabelMap Block -- | @Graph'@ is abstracted over the block type, so that we can build -- graphs of annotated blocks for example (Compiler.Hoopl.Dataflow -- needs this). data Graph' s block (n :: Extensibility -> Extensibility -> Type) e x where GNil :: Graph' s block n O O GUnit :: block n O O -> Graph' s block n O O GMany :: MaybeO e (block n O C) -> Body' s block n -> MaybeO x (block n C O) -> Graph' s block n e x -- ----------------------------------------------------------------------------- -- Mapping over graphs -- | Maps over all nodes in a graph. mapGraph :: (forall e x. n e x -> n' e x) -> Graph n e x -> Graph n' e x mapGraph f = mapGraphBlocks mapMap (mapBlock f) -- | Function 'mapGraphBlocks' enables a change of representation of blocks, -- nodes, or both. It lifts a polymorphic block transform into a polymorphic -- graph transform. When the block representation stabilizes, a similar -- function should be provided for blocks. mapGraphBlocks :: forall s block n block' n' e x . (forall a b . (a -> b) -> s a -> s b) -> (forall e x . block n e x -> block' n' e x) -> (Graph' s block n e x -> Graph' s block' n' e x) mapGraphBlocks f g = map where map :: Graph' s block n e x -> Graph' s block' n' e x map GNil = GNil map (GUnit b) = GUnit (g b) map (GMany e b x) = GMany (fmap g e) (f g b) (fmap g x) -- ----------------------------------------------------------------------------- -- Extracting Labels from graphs labelsDefined :: forall block n e x . NonLocal (block n) => Graph' LabelMap block n e x -> LabelSet labelsDefined GNil = setEmpty labelsDefined (GUnit{}) = setEmpty labelsDefined (GMany _ body x) = mapFoldlWithKey addEntry (exitLabel x) body where addEntry :: forall a. LabelSet -> Label -> a -> LabelSet addEntry labels label _ = setInsert label labels exitLabel :: MaybeO x (block n C O) -> LabelSet exitLabel NothingO = setEmpty exitLabel (JustO b) = setSingleton (entryLabel b) ---------------------------------------------------------------- -- | Returns a list of blocks reachable from the provided Labels in the reverse -- postorder. -- -- This is the most important traversal over this data structure. It drops -- unreachable code and puts blocks in an order that is good for solving forward -- dataflow problems quickly. The reverse order is good for solving backward -- dataflow problems quickly. The forward order is also reasonably good for -- emitting instructions, except that it will not usually exploit Forrest -- Baskett's trick of eliminating the unconditional branch from a loop. For -- that you would need a more serious analysis, probably based on dominators, to -- identify loop headers. -- -- For forward analyses we want reverse postorder visitation, consider: -- @ -- A -> [B,C] -- B -> D -- C -> D -- @ -- Postorder: [D, C, B, A] (or [D, B, C, A]) -- Reverse postorder: [A, B, C, D] (or [A, C, B, D]) -- This matters for, e.g., forward analysis, because we want to analyze *both* -- B and C before we analyze D. revPostorderFrom :: forall block. (NonLocal block) => LabelMap (block C C) -> Label -> [block C C] revPostorderFrom graph start = go start_worklist setEmpty [] where start_worklist = lookup_for_descend start Nil -- To compute the postorder we need to "visit" a block (mark as done) *after* -- visiting all its successors. So we need to know whether we already -- processed all successors of each block (and @NonLocal@ allows arbitrary -- many successors). So we use an explicit stack with an extra bit -- of information: -- - @ConsTodo@ means to explore the block if it wasn't visited before -- - @ConsMark@ means that all successors were already done and we can add -- the block to the result. -- -- NOTE: We add blocks to the result list in postorder, but we *prepend* -- them (i.e., we use @(:)@), which means that the final list is in reverse -- postorder. go :: DfsStack (block C C) -> LabelSet -> [block C C] -> [block C C] go Nil !_ !result = result go (ConsMark block rest) !wip_or_done !result = go rest wip_or_done (block : result) go (ConsTodo block rest) !wip_or_done !result | entryLabel block `setMember` wip_or_done = go rest wip_or_done result | otherwise = let new_worklist = foldr lookup_for_descend (ConsMark block rest) (successors block) in go new_worklist (setInsert (entryLabel block) wip_or_done) result lookup_for_descend :: Label -> DfsStack (block C C) -> DfsStack (block C C) lookup_for_descend label wl | Just b <- mapLookup label graph = ConsTodo b wl | otherwise = error $ "Label that doesn't have a block?! " ++ show label data DfsStack a = ConsTodo a (DfsStack a) | ConsMark a (DfsStack a) | Nil ghc-lib-parser-9.12.2.20250421/compiler/GHC/Cmm/Dataflow/Label.hs0000644000000000000000000002104107346545000021534 0ustar0000000000000000{-# LANGUAGE DeriveTraversable #-} {-# LANGUAGE DerivingStrategies #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE GeneralizedNewtypeDeriving #-} {-# LANGUAGE MultiParamTypeClasses #-} {-# LANGUAGE TypeFamilies #-} {-# OPTIONS_GHC -Wno-unused-top-binds #-} module GHC.Cmm.Dataflow.Label ( Label , LabelMap , LabelSet , FactBase , lookupFact , mkHooplLabel -- * Set , setEmpty , setNull , setSize , setMember , setSingleton , setInsert , setDelete , setUnion , setUnions , setDifference , setIntersection , setIsSubsetOf , setFilter , setFoldl , setFoldr , setFromList , setElems -- * Map , mapNull , mapSize , mapMember , mapLookup , mapFindWithDefault , mapEmpty , mapSingleton , mapInsert , mapInsertWith , mapDelete , mapAlter , mapAdjust , mapUnion , mapUnions , mapUnionWithKey , mapDifference , mapIntersection , mapIsSubmapOf , mapMap , mapMapWithKey , mapFoldl , mapFoldr , mapFoldlWithKey , mapFoldMapWithKey , mapFilter , mapFilterWithKey , mapElems , mapKeys , mapToList , mapFromList , mapFromListWith ) where import GHC.Prelude import GHC.Utils.Outputable import GHC.Types.Unique (Uniquable(..), mkUniqueGrimily) -- The code generator will eventually be using all the labels stored in a -- LabelSet and LabelMap. For these reasons we use the strict variants of these -- data structures. We inline selectively to enable the RULES in Word64Map/Set -- to fire. import GHC.Data.Word64Set (Word64Set) import qualified GHC.Data.Word64Set as S import GHC.Data.Word64Map.Strict (Word64Map) import qualified GHC.Data.Word64Map.Strict as M import GHC.Data.TrieMap import Data.Word (Word64) import Data.List (foldl1') ----------------------------------------------------------------------------- -- Label ----------------------------------------------------------------------------- newtype Label = Label { lblToUnique :: Word64 } deriving newtype (Eq, Ord) mkHooplLabel :: Word64 -> Label mkHooplLabel = Label instance Show Label where show (Label n) = "L" ++ show n instance Uniquable Label where getUnique label = mkUniqueGrimily (lblToUnique label) instance Outputable Label where ppr label = ppr (getUnique label) instance OutputableP env Label where pdoc _ l = ppr l ----------------------------------------------------------------------------- -- LabelSet newtype LabelSet = LS Word64Set deriving newtype (Eq, Ord, Show, Monoid, Semigroup) setNull :: LabelSet -> Bool setNull (LS s) = S.null s setSize :: LabelSet -> Int setSize (LS s) = S.size s setMember :: Label -> LabelSet -> Bool setMember (Label k) (LS s) = S.member k s setEmpty :: LabelSet setEmpty = LS S.empty setSingleton :: Label -> LabelSet setSingleton (Label k) = LS (S.singleton k) setInsert :: Label -> LabelSet -> LabelSet setInsert (Label k) (LS s) = LS (S.insert k s) setDelete :: Label -> LabelSet -> LabelSet setDelete (Label k) (LS s) = LS (S.delete k s) setUnion :: LabelSet -> LabelSet -> LabelSet setUnion (LS x) (LS y) = LS (S.union x y) {-# INLINE setUnions #-} setUnions :: [LabelSet] -> LabelSet setUnions [] = setEmpty setUnions sets = foldl1' setUnion sets setDifference :: LabelSet -> LabelSet -> LabelSet setDifference (LS x) (LS y) = LS (S.difference x y) setIntersection :: LabelSet -> LabelSet -> LabelSet setIntersection (LS x) (LS y) = LS (S.intersection x y) setIsSubsetOf :: LabelSet -> LabelSet -> Bool setIsSubsetOf (LS x) (LS y) = S.isSubsetOf x y setFilter :: (Label -> Bool) -> LabelSet -> LabelSet setFilter f (LS s) = LS (S.filter (f . mkHooplLabel) s) {-# INLINE setFoldl #-} setFoldl :: (t -> Label -> t) -> t -> LabelSet -> t setFoldl k z (LS s) = S.foldl (\a v -> k a (mkHooplLabel v)) z s {-# INLINE setFoldr #-} setFoldr :: (Label -> t -> t) -> t -> LabelSet -> t setFoldr k z (LS s) = S.foldr (\v a -> k (mkHooplLabel v) a) z s {-# INLINE setElems #-} setElems :: LabelSet -> [Label] setElems (LS s) = map mkHooplLabel (S.elems s) {-# INLINE setFromList #-} setFromList :: [Label] -> LabelSet setFromList ks = LS (S.fromList (map lblToUnique ks)) ----------------------------------------------------------------------------- -- LabelMap newtype LabelMap v = LM (Word64Map v) deriving newtype (Eq, Ord, Show, Functor, Foldable) deriving stock Traversable mapNull :: LabelMap a -> Bool mapNull (LM m) = M.null m {-# INLINE mapSize #-} mapSize :: LabelMap a -> Int mapSize (LM m) = M.size m mapMember :: Label -> LabelMap a -> Bool mapMember (Label k) (LM m) = M.member k m mapLookup :: Label -> LabelMap a -> Maybe a mapLookup (Label k) (LM m) = M.lookup k m mapFindWithDefault :: a -> Label -> LabelMap a -> a mapFindWithDefault def (Label k) (LM m) = M.findWithDefault def k m mapEmpty :: LabelMap v mapEmpty = LM M.empty mapSingleton :: Label -> v -> LabelMap v mapSingleton (Label k) v = LM (M.singleton k v) mapInsert :: Label -> v -> LabelMap v -> LabelMap v mapInsert (Label k) v (LM m) = LM (M.insert k v m) mapInsertWith :: (v -> v -> v) -> Label -> v -> LabelMap v -> LabelMap v mapInsertWith f (Label k) v (LM m) = LM (M.insertWith f k v m) mapDelete :: Label -> LabelMap v -> LabelMap v mapDelete (Label k) (LM m) = LM (M.delete k m) mapAlter :: (Maybe v -> Maybe v) -> Label -> LabelMap v -> LabelMap v mapAlter f (Label k) (LM m) = LM (M.alter f k m) mapAdjust :: (v -> v) -> Label -> LabelMap v -> LabelMap v mapAdjust f (Label k) (LM m) = LM (M.adjust f k m) mapUnion :: LabelMap v -> LabelMap v -> LabelMap v mapUnion (LM x) (LM y) = LM (M.union x y) {-# INLINE mapUnions #-} mapUnions :: [LabelMap a] -> LabelMap a mapUnions [] = mapEmpty mapUnions maps = foldl1' mapUnion maps mapUnionWithKey :: (Label -> v -> v -> v) -> LabelMap v -> LabelMap v -> LabelMap v mapUnionWithKey f (LM x) (LM y) = LM (M.unionWithKey (f . mkHooplLabel) x y) mapDifference :: LabelMap v -> LabelMap b -> LabelMap v mapDifference (LM x) (LM y) = LM (M.difference x y) mapIntersection :: LabelMap v -> LabelMap b -> LabelMap v mapIntersection (LM x) (LM y) = LM (M.intersection x y) mapIsSubmapOf :: Eq a => LabelMap a -> LabelMap a -> Bool mapIsSubmapOf (LM x) (LM y) = M.isSubmapOf x y mapMap :: (a -> v) -> LabelMap a -> LabelMap v mapMap f (LM m) = LM (M.map f m) mapMapWithKey :: (Label -> a -> v) -> LabelMap a -> LabelMap v mapMapWithKey f (LM m) = LM (M.mapWithKey (f . mkHooplLabel) m) {-# INLINE mapFoldl #-} mapFoldl :: (a -> b -> a) -> a -> LabelMap b -> a mapFoldl k z (LM m) = M.foldl k z m {-# INLINE mapFoldr #-} mapFoldr :: (a -> b -> b) -> b -> LabelMap a -> b mapFoldr k z (LM m) = M.foldr k z m {-# INLINE mapFoldlWithKey #-} mapFoldlWithKey :: (t -> Label -> b -> t) -> t -> LabelMap b -> t mapFoldlWithKey k z (LM m) = M.foldlWithKey (\a v -> k a (mkHooplLabel v)) z m mapFoldMapWithKey :: Monoid m => (Label -> t -> m) -> LabelMap t -> m mapFoldMapWithKey f (LM m) = M.foldMapWithKey (\k v -> f (mkHooplLabel k) v) m {-# INLINEABLE mapFilter #-} mapFilter :: (v -> Bool) -> LabelMap v -> LabelMap v mapFilter f (LM m) = LM (M.filter f m) {-# INLINEABLE mapFilterWithKey #-} mapFilterWithKey :: (Label -> v -> Bool) -> LabelMap v -> LabelMap v mapFilterWithKey f (LM m) = LM (M.filterWithKey (f . mkHooplLabel) m) {-# INLINE mapElems #-} mapElems :: LabelMap a -> [a] mapElems (LM m) = M.elems m {-# INLINE mapKeys #-} mapKeys :: LabelMap a -> [Label] mapKeys (LM m) = map (mkHooplLabel . fst) (M.toList m) {-# INLINE mapToList #-} mapToList :: LabelMap b -> [(Label, b)] mapToList (LM m) = [(mkHooplLabel k, v) | (k, v) <- M.toList m] {-# INLINE mapFromList #-} mapFromList :: [(Label, v)] -> LabelMap v mapFromList assocs = LM (M.fromList [(lblToUnique k, v) | (k, v) <- assocs]) mapFromListWith :: (v -> v -> v) -> [(Label, v)] -> LabelMap v mapFromListWith f assocs = LM (M.fromListWith f [(lblToUnique k, v) | (k, v) <- assocs]) ----------------------------------------------------------------------------- -- Instances instance Outputable LabelSet where ppr = ppr . setElems instance Outputable a => Outputable (LabelMap a) where ppr = ppr . mapToList instance OutputableP env a => OutputableP env (LabelMap a) where pdoc env = pdoc env . mapToList instance TrieMap LabelMap where type Key LabelMap = Label emptyTM = mapEmpty lookupTM k m = mapLookup k m alterTM k f m = mapAlter f k m foldTM k m z = mapFoldr k z m filterTM f m = mapFilter f m ----------------------------------------------------------------------------- -- FactBase type FactBase f = LabelMap f lookupFact :: Label -> FactBase f -> Maybe f lookupFact = mapLookup ghc-lib-parser-9.12.2.20250421/compiler/GHC/Cmm/Expr.hs0000644000000000000000000005157607346545000017712 0ustar0000000000000000{-# LANGUAGE LambdaCase #-} {-# LANGUAGE UndecidableInstances #-} module GHC.Cmm.Expr ( CmmExpr(..), cmmExprType, cmmExprWidth, cmmExprAlignment, maybeInvertCmmExpr , CmmReg(..), cmmRegType, cmmRegWidth , CmmLit(..), cmmLitType , AlignmentSpec(..) -- TODO: Remove: , LocalReg(..), localRegType , GlobalReg(..), isArgReg, globalRegSpillType , GlobalRegUse(..) , spReg, hpReg, spLimReg, hpLimReg, nodeReg , currentTSOReg, currentNurseryReg, hpAllocReg, cccsReg , node, baseReg , DefinerOfRegs, UserOfRegs , foldRegsDefd, foldRegsUsed , foldLocalRegsDefd, foldLocalRegsUsed , RegSet, LocalRegSet, GlobalRegSet , emptyRegSet, elemRegSet, extendRegSet, deleteFromRegSet, mkRegSet , plusRegSet, minusRegSet, timesRegSet, sizeRegSet, nullRegSet , regSetToList , isTrivialCmmExpr , hasNoGlobalRegs , isLit , isComparisonExpr , Area(..) , module GHC.Cmm.MachOp , module GHC.Cmm.Type ) where import GHC.Prelude import GHC.Platform import GHC.Cmm.BlockId import GHC.Cmm.CLabel import GHC.Cmm.MachOp import GHC.Cmm.Type import GHC.Cmm.Reg import GHC.Utils.Panic (panic) import GHC.Utils.Outputable import Data.Maybe import Data.Set (Set) import qualified Data.Set as Set import Numeric ( fromRat ) import GHC.Types.Basic (Alignment, mkAlignment, alignmentOf) ----------------------------------------------------------------------------- -- CmmExpr -- An expression. Expressions have no side effects. ----------------------------------------------------------------------------- data CmmExpr = CmmLit !CmmLit -- Literal | CmmLoad !CmmExpr !CmmType !AlignmentSpec -- Read memory location | CmmReg !CmmReg -- Contents of register | CmmMachOp MachOp [CmmExpr] -- Machine operation (+, -, *, etc.) | CmmStackSlot Area {-# UNPACK #-} !Int -- Addressing expression of a stack slot -- See Note [CmmStackSlot aliasing] | CmmRegOff !CmmReg !Int -- CmmRegOff reg i -- ** is shorthand only, meaning ** -- CmmMachOp (MO_Add rep) [x, CmmLit (CmmInt (fromIntegral i) rep)] -- where rep = typeWidth (cmmRegType reg) deriving Show instance Eq CmmExpr where -- Equality ignores the types CmmLit l1 == CmmLit l2 = l1==l2 CmmLoad e1 _ _ == CmmLoad e2 _ _ = e1==e2 CmmReg r1 == CmmReg r2 = r1==r2 CmmRegOff r1 i1 == CmmRegOff r2 i2 = r1==r2 && i1==i2 CmmMachOp op1 es1 == CmmMachOp op2 es2 = op1==op2 && es1==es2 CmmStackSlot a1 i1 == CmmStackSlot a2 i2 = a1==a2 && i1==i2 _e1 == _e2 = False instance OutputableP Platform CmmExpr where pdoc = pprExpr data AlignmentSpec = NaturallyAligned | Unaligned deriving (Eq, Ord, Show) -- | A stack area is either the stack slot where a variable is spilled -- or the stack space where function arguments and results are passed. data Area = Old -- See Note [Old Area] | Young {-# UNPACK #-} !BlockId -- Invariant: must be a continuation BlockId -- See Note [Continuation BlockIds] in GHC.Cmm.Node. deriving (Eq, Ord, Show) instance Outputable Area where ppr e = pprArea e pprArea :: Area -> SDoc pprArea Old = text "old" pprArea (Young id) = hcat [ text "young<", ppr id, text ">" ] {- Note [Old Area] ~~~~~~~~~~~~~~~~~~ There is a single call area 'Old', allocated at the extreme old end of the stack frame (ie just younger than the return address) which holds: * incoming (overflow) parameters, * outgoing (overflow) parameter to tail calls, * outgoing (overflow) result values * the update frame (if any) Its size is the max of all these requirements. On entry, the stack pointer will point to the youngest incoming parameter, which is not necessarily at the young end of the Old area. End of note -} {- Note [CmmStackSlot aliasing] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When do two CmmStackSlots alias? - T[old+N] aliases with U[young(L)+M] for all T, U, L, N and M - T[old+N] aliases with U[old+M] only if the areas actually overlap Or more informally, different Areas may overlap with each other. An alternative semantics, that we previously had, was that different Areas do not overlap. The problem that lead to redefining the semantics of stack areas is described below. e.g. if we had x = Sp[old + 8] y = Sp[old + 16] Sp[young(L) + 8] = L Sp[young(L) + 16] = y Sp[young(L) + 24] = x call f() returns to L if areas semantically do not overlap, then we might optimise this to Sp[young(L) + 8] = L Sp[young(L) + 16] = Sp[old + 8] Sp[young(L) + 24] = Sp[old + 16] call f() returns to L and now young(L) cannot be allocated at the same place as old, and we are doomed to use more stack. - old+8 conflicts with young(L)+8 - old+16 conflicts with young(L)+16 and young(L)+8 so young(L)+8 == old+24 and we get Sp[-8] = L Sp[-16] = Sp[8] Sp[-24] = Sp[0] Sp -= 24 call f() returns to L However, if areas are defined to be "possibly overlapping" in the semantics, then we cannot commute any loads/stores of old with young(L), and we will be able to re-use both old+8 and old+16 for young(L). x = Sp[8] y = Sp[0] Sp[8] = L Sp[0] = y Sp[-8] = x Sp = Sp - 8 call f() returns to L Now, the assignments of y go away, x = Sp[8] Sp[8] = L Sp[-8] = x Sp = Sp - 8 call f() returns to L -} data CmmLit = CmmInt !Integer !Width -- Interpretation: the 2's complement representation of the value -- is truncated to the specified size. This is easier than trying -- to keep the value within range, because we don't know whether -- it will be used as a signed or unsigned value (the CmmType doesn't -- distinguish between signed & unsigned). | CmmFloat Rational !Width | CmmVec [CmmLit] -- Vector literal | CmmLabel CLabel -- Address of label | CmmLabelOff CLabel !Int -- Address of label + byte offset -- Due to limitations in the C backend, the following -- MUST ONLY be used inside the info table indicated by label2 -- (label2 must be the info label), and label1 must be an -- SRT, a slow entrypoint or a large bitmap (see the Mangler) -- Don't use it at all unless tablesNextToCode. -- It is also used inside the NCG during when generating -- position-independent code. | CmmLabelDiffOff CLabel CLabel !Int !Width -- label1 - label2 + offset -- In an expression, the width just has the effect of MO_SS_Conv -- from wordWidth to the desired width. -- -- In a static literal, the supported Widths depend on the -- architecture: wordWidth is supported on all -- architectures. Additionally W32 is supported on x86_64 when -- using the small memory model. | CmmBlock {-# UNPACK #-} !BlockId -- Code label -- Invariant: must be a continuation BlockId -- See Note [Continuation BlockIds] in GHC.Cmm.Node. | CmmHighStackMark -- A late-bound constant that stands for the max -- #bytes of stack space used during a procedure. -- During the stack-layout pass, CmmHighStackMark -- is replaced by a CmmInt for the actual number -- of bytes used deriving (Eq, Show) instance OutputableP Platform CmmLit where pdoc = pprLit instance Outputable CmmLit where ppr (CmmInt n w) = text "CmmInt" <+> ppr n <+> ppr w ppr (CmmFloat n w) = text "CmmFloat" <+> text (show n) <+> ppr w ppr (CmmVec xs) = text "CmmVec" <+> ppr xs ppr (CmmLabel _) = text "CmmLabel" ppr (CmmLabelOff _ _) = text "CmmLabelOff" ppr (CmmLabelDiffOff _ _ _ _) = text "CmmLabelDiffOff" ppr (CmmBlock blk) = text "CmmBlock" <+> ppr blk ppr CmmHighStackMark = text "CmmHighStackMark" cmmExprType :: Platform -> CmmExpr -> CmmType cmmExprType platform = \case (CmmLit lit) -> cmmLitType platform lit (CmmLoad _ rep _) -> rep (CmmReg reg) -> cmmRegType reg (CmmMachOp op args) -> machOpResultType platform op (map (cmmExprType platform) args) (CmmRegOff reg _) -> cmmRegType reg (CmmStackSlot _ _) -> bWord platform -- an address -- Careful though: what is stored at the stack slot may be bigger than -- an address cmmLitType :: Platform -> CmmLit -> CmmType cmmLitType platform = \case (CmmInt _ width) -> cmmBits width (CmmFloat _ width) -> cmmFloat width (CmmVec []) -> panic "cmmLitType: CmmVec []" (CmmVec (l:ls)) -> let ty = cmmLitType platform l in if all (`cmmEqType` ty) (map (cmmLitType platform) ls) then cmmVec (1+length ls) ty else panic "cmmLitType: CmmVec" (CmmLabel lbl) -> cmmLabelType platform lbl (CmmLabelOff lbl _) -> cmmLabelType platform lbl (CmmLabelDiffOff _ _ _ width) -> cmmBits width (CmmBlock _) -> bWord platform (CmmHighStackMark) -> bWord platform cmmLabelType :: Platform -> CLabel -> CmmType cmmLabelType platform lbl | isGcPtrLabel lbl = gcWord platform | otherwise = bWord platform cmmExprWidth :: Platform -> CmmExpr -> Width cmmExprWidth platform e = typeWidth (cmmExprType platform e) -- | Returns an alignment in bytes of a CmmExpr when it's a statically -- known integer constant, otherwise returns an alignment of 1 byte. -- The caller is responsible for using with a sensible CmmExpr -- argument. cmmExprAlignment :: CmmExpr -> Alignment cmmExprAlignment (CmmLit (CmmInt intOff _)) = alignmentOf (fromInteger intOff) cmmExprAlignment _ = mkAlignment 1 -------- --- Negation for conditional branches maybeInvertCmmExpr :: CmmExpr -> Maybe CmmExpr maybeInvertCmmExpr (CmmMachOp op args) = do op' <- maybeInvertComparison op return (CmmMachOp op' args) maybeInvertCmmExpr _ = Nothing --------------------------------------------------- -- CmmExpr predicates --------------------------------------------------- isTrivialCmmExpr :: CmmExpr -> Bool isTrivialCmmExpr (CmmLoad _ _ _) = False isTrivialCmmExpr (CmmMachOp _ _) = False isTrivialCmmExpr (CmmLit _) = True isTrivialCmmExpr (CmmReg _) = True isTrivialCmmExpr (CmmRegOff _ _) = True isTrivialCmmExpr (CmmStackSlot _ _) = panic "isTrivialCmmExpr CmmStackSlot" hasNoGlobalRegs :: CmmExpr -> Bool hasNoGlobalRegs (CmmLoad e _ _) = hasNoGlobalRegs e hasNoGlobalRegs (CmmMachOp _ es) = all hasNoGlobalRegs es hasNoGlobalRegs (CmmLit _) = True hasNoGlobalRegs (CmmReg (CmmLocal _)) = True hasNoGlobalRegs (CmmRegOff (CmmLocal _) _) = True hasNoGlobalRegs _ = False isLit :: CmmExpr -> Bool isLit (CmmLit _) = True isLit _ = False isComparisonExpr :: CmmExpr -> Bool isComparisonExpr (CmmMachOp op _) = isComparisonMachOp op isComparisonExpr _ = False ----------------------------------------------------------------------------- -- Register-use information for expressions and other types ----------------------------------------------------------------------------- -- | Sets of registers -- These are used for dataflow facts, and a common operation is taking -- the union of two RegSets and then asking whether the union is the -- same as one of the inputs. UniqSet isn't good here, because -- sizeUniqSet is O(n) whereas Set.size is O(1), so we use ordinary -- Sets. type RegSet r = Set r type LocalRegSet = RegSet LocalReg type GlobalRegSet = RegSet GlobalReg emptyRegSet :: RegSet r nullRegSet :: RegSet r -> Bool elemRegSet :: Ord r => r -> RegSet r -> Bool extendRegSet :: Ord r => RegSet r -> r -> RegSet r deleteFromRegSet :: Ord r => RegSet r -> r -> RegSet r mkRegSet :: Ord r => [r] -> RegSet r minusRegSet, plusRegSet, timesRegSet :: Ord r => RegSet r -> RegSet r -> RegSet r sizeRegSet :: RegSet r -> Int regSetToList :: RegSet r -> [r] emptyRegSet = Set.empty nullRegSet = Set.null elemRegSet = Set.member extendRegSet = flip Set.insert deleteFromRegSet = flip Set.delete mkRegSet = Set.fromList minusRegSet = Set.difference plusRegSet = Set.union timesRegSet = Set.intersection sizeRegSet = Set.size regSetToList = Set.toList class Ord r => UserOfRegs r a where foldRegsUsed :: Platform -> (b -> r -> b) -> b -> a -> b foldLocalRegsUsed :: UserOfRegs LocalReg a => Platform -> (b -> LocalReg -> b) -> b -> a -> b foldLocalRegsUsed = foldRegsUsed class Ord r => DefinerOfRegs r a where foldRegsDefd :: Platform -> (b -> r -> b) -> b -> a -> b foldLocalRegsDefd :: DefinerOfRegs LocalReg a => Platform -> (b -> LocalReg -> b) -> b -> a -> b foldLocalRegsDefd = foldRegsDefd instance UserOfRegs LocalReg CmmReg where foldRegsUsed _ f z (CmmLocal reg) = f z reg foldRegsUsed _ _ z (CmmGlobal _) = z instance DefinerOfRegs LocalReg CmmReg where foldRegsDefd _ f z (CmmLocal reg) = f z reg foldRegsDefd _ _ z (CmmGlobal _) = z instance UserOfRegs GlobalReg CmmReg where {-# INLINEABLE foldRegsUsed #-} foldRegsUsed _ _ z (CmmLocal _) = z foldRegsUsed _ f z (CmmGlobal (GlobalRegUse reg _)) = f z reg instance UserOfRegs GlobalRegUse CmmReg where {-# INLINEABLE foldRegsUsed #-} foldRegsUsed _ _ z (CmmLocal _) = z foldRegsUsed _ f z (CmmGlobal reg) = f z reg instance DefinerOfRegs GlobalReg CmmReg where foldRegsDefd _ _ z (CmmLocal _) = z foldRegsDefd _ f z (CmmGlobal (GlobalRegUse reg _)) = f z reg instance DefinerOfRegs GlobalRegUse CmmReg where foldRegsDefd _ _ z (CmmLocal _) = z foldRegsDefd _ f z (CmmGlobal reg) = f z reg instance Ord r => UserOfRegs r r where foldRegsUsed _ f z r = f z r instance Ord r => DefinerOfRegs r r where foldRegsDefd _ f z r = f z r instance (Ord r, UserOfRegs r CmmReg) => UserOfRegs r CmmExpr where -- The (Ord r) in the context is necessary here -- See Note [Recursive superclasses] in GHC.Tc.TyCl.Instance {-# INLINEABLE foldRegsUsed #-} foldRegsUsed platform f !z e = expr z e where expr z (CmmLit _) = z expr z (CmmLoad addr _ _) = foldRegsUsed platform f z addr expr z (CmmReg r) = foldRegsUsed platform f z r expr z (CmmMachOp _ exprs) = foldRegsUsed platform f z exprs expr z (CmmRegOff r _) = foldRegsUsed platform f z r expr z (CmmStackSlot _ _) = z instance UserOfRegs r a => UserOfRegs r [a] where foldRegsUsed platform f set as = foldl' (foldRegsUsed platform f) set as {-# INLINABLE foldRegsUsed #-} instance DefinerOfRegs r a => DefinerOfRegs r [a] where foldRegsDefd platform f set as = foldl' (foldRegsDefd platform f) set as {-# INLINABLE foldRegsDefd #-} -- -------------------------------------------------------------------------- -- Pretty-printing expressions -- -------------------------------------------------------------------------- pprExpr :: Platform -> CmmExpr -> SDoc pprExpr platform e = case e of CmmRegOff reg i -> pprExpr platform (CmmMachOp (MO_Add rep) [CmmReg reg, CmmLit (CmmInt (fromIntegral i) rep)]) where rep = typeWidth (cmmRegType reg) CmmLit lit -> pprLit platform lit _other -> pprExpr1 platform e -- Here's the precedence table from GHC.Cmm.Parser: -- %nonassoc '>=' '>' '<=' '<' '!=' '==' -- %left '|' -- %left '^' -- %left '&' -- %left '>>' '<<' -- %left '-' '+' -- %left '/' '*' '%' -- %right '~' -- We just cope with the common operators for now, the rest will get -- a default conservative behaviour. -- %nonassoc '>=' '>' '<=' '<' '!=' '==' pprExpr1, pprExpr7, pprExpr8 :: Platform -> CmmExpr -> SDoc pprExpr1 platform (CmmMachOp op [x,y]) | Just doc <- infixMachOp1 op = pprExpr7 platform x <+> doc <+> pprExpr7 platform y pprExpr1 platform e = pprExpr7 platform e infixMachOp1, infixMachOp7, infixMachOp8 :: MachOp -> Maybe SDoc infixMachOp1 (MO_Eq _) = Just (text "==") infixMachOp1 (MO_Ne _) = Just (text "!=") infixMachOp1 (MO_Shl _) = Just (text "<<") infixMachOp1 (MO_U_Shr _) = Just (text ">>") infixMachOp1 (MO_U_Ge _) = Just (text ">=") infixMachOp1 (MO_U_Le _) = Just (text "<=") infixMachOp1 (MO_U_Gt _) = Just (char '>') infixMachOp1 (MO_U_Lt _) = Just (char '<') infixMachOp1 _ = Nothing -- %left '-' '+' pprExpr7 platform (CmmMachOp (MO_Add rep1) [x, CmmLit (CmmInt i rep2)]) | i < 0 = pprExpr7 platform (CmmMachOp (MO_Sub rep1) [x, CmmLit (CmmInt (negate i) rep2)]) pprExpr7 platform (CmmMachOp op [x,y]) | Just doc <- infixMachOp7 op = pprExpr7 platform x <+> doc <+> pprExpr8 platform y pprExpr7 platform e = pprExpr8 platform e infixMachOp7 (MO_Add _) = Just (char '+') infixMachOp7 (MO_Sub _) = Just (char '-') infixMachOp7 _ = Nothing -- %left '/' '*' '%' pprExpr8 platform (CmmMachOp op [x,y]) | Just doc <- infixMachOp8 op = pprExpr8 platform x <+> doc <+> pprExpr9 platform y pprExpr8 platform e = pprExpr9 platform e infixMachOp8 (MO_U_Quot _) = Just (char '/') infixMachOp8 (MO_Mul _) = Just (char '*') infixMachOp8 (MO_U_Rem _) = Just (char '%') infixMachOp8 _ = Nothing pprExpr9 :: Platform -> CmmExpr -> SDoc pprExpr9 platform e = case e of CmmLit lit -> pprLit1 platform lit CmmLoad expr rep align -> let align_mark = case align of NaturallyAligned -> empty Unaligned -> text "^" in ppr rep <> align_mark <> brackets (pdoc platform expr) CmmReg reg -> ppr reg CmmRegOff reg off -> parens (ppr reg <+> char '+' <+> int off) CmmStackSlot a off -> parens (ppr a <+> char '+' <+> int off) CmmMachOp mop args -> genMachOp platform mop args genMachOp :: Platform -> MachOp -> [CmmExpr] -> SDoc genMachOp platform (MO_RelaxedRead w) [x] = ppr (cmmBits w) <> text "!" <> brackets (pdoc platform x) genMachOp platform mop args | Just doc <- infixMachOp mop = case args of -- dyadic [x,y] -> pprExpr9 platform x <+> doc <+> pprExpr9 platform y -- unary [x] -> doc <> pprExpr9 platform x _ -> pprTrace "GHC.Cmm.Expr.genMachOp: machop with strange number of args" (pprMachOp mop <+> parens (hcat $ punctuate comma (map (pprExpr platform) args))) empty | isJust (infixMachOp1 mop) || isJust (infixMachOp7 mop) || isJust (infixMachOp8 mop) = parens (pprExpr platform (CmmMachOp mop args)) | otherwise = char '%' <> ppr_op <> parens (commafy (map (pprExpr platform) args)) where ppr_op = text (map (\c -> if c == ' ' then '_' else c) (show mop)) -- replace spaces in (show mop) with underscores, -- -- Unsigned ops on the word size of the machine get nice symbols. -- All else get dumped in their ugly format. -- infixMachOp :: MachOp -> Maybe SDoc infixMachOp mop = case mop of MO_And _ -> Just $ char '&' MO_Or _ -> Just $ char '|' MO_Xor _ -> Just $ char '^' MO_Not _ -> Just $ char '~' MO_S_Neg _ -> Just $ char '-' -- there is no unsigned neg :) _ -> Nothing -- -------------------------------------------------------------------------- -- Pretty-printing literals -- -- To minimise line noise we adopt the convention that if the literal -- has the natural machine word size, we do not append the type -- -------------------------------------------------------------------------- pprLit :: Platform -> CmmLit -> SDoc pprLit platform lit = case lit of CmmInt i rep -> hcat [ (if i < 0 then parens else id)(integer i) , ppUnless (rep == wordWidth platform) $ space <> dcolon <+> ppr rep ] CmmFloat f rep -> hsep [ double (fromRat f), dcolon, ppr rep ] CmmVec lits -> char '<' <> commafy (map (pprLit platform) lits) <> char '>' CmmLabel clbl -> pdoc platform clbl CmmLabelOff clbl i -> pdoc platform clbl <> ppr_offset i CmmLabelDiffOff clbl1 clbl2 i _ -> pdoc platform clbl1 <> char '-' <> pdoc platform clbl2 <> ppr_offset i CmmBlock id -> ppr id CmmHighStackMark -> text "" pprLit1 :: Platform -> CmmLit -> SDoc pprLit1 platform lit@(CmmLabelOff {}) = parens (pprLit platform lit) pprLit1 platform lit = pprLit platform lit ppr_offset :: Int -> SDoc ppr_offset i | i==0 = empty | i>=0 = char '+' <> int i | otherwise = char '-' <> int (-i) commafy :: [SDoc] -> SDoc commafy xs = fsep $ punctuate comma xs ghc-lib-parser-9.12.2.20250421/compiler/GHC/Cmm/MachOp.hs0000644000000000000000000006615707346545000020144 0ustar0000000000000000{-# LANGUAGE LambdaCase #-} {-# OPTIONS_GHC -Wno-incomplete-uni-patterns #-} module GHC.Cmm.MachOp ( MachOp(..) , pprMachOp, isCommutableMachOp, isAssociativeMachOp , isComparisonMachOp, maybeIntComparison, machOpResultType , machOpArgReps, maybeInvertComparison, isFloatComparison -- MachOp builders , mo_wordAdd, mo_wordSub, mo_wordEq, mo_wordNe,mo_wordMul, mo_wordSQuot , mo_wordSRem, mo_wordSNeg, mo_wordUQuot, mo_wordURem , mo_wordSGe, mo_wordSLe, mo_wordSGt, mo_wordSLt, mo_wordUGe , mo_wordULe, mo_wordUGt, mo_wordULt , mo_wordAnd, mo_wordOr, mo_wordXor, mo_wordNot , mo_wordShl, mo_wordSShr, mo_wordUShr , mo_u_8To32, mo_s_8To32, mo_u_16To32, mo_s_16To32 , mo_u_8ToWord, mo_s_8ToWord, mo_u_16ToWord, mo_s_16ToWord , mo_u_32ToWord, mo_s_32ToWord , mo_32To8, mo_32To16, mo_WordTo8, mo_WordTo16, mo_WordTo32, mo_WordTo64 -- CallishMachOp , CallishMachOp(..), callishMachOpHints , pprCallishMachOp , machOpMemcpyishAlign -- Atomic read-modify-write , MemoryOrdering(..) , AtomicMachOp(..) -- Fused multiply-add , FMASign(..), pprFMASign ) where import GHC.Prelude import GHC.Platform import GHC.Cmm.Type import GHC.Utils.Outputable ----------------------------------------------------------------------------- -- MachOp ----------------------------------------------------------------------------- {- | Machine-level primops; ones which we can reasonably delegate to the native code generators to handle. Most operations are parameterised by the 'Width' that they operate on. Some operations have separate signed and unsigned versions, and float and integer versions. Note that there are variety of places in the native code generator where we assume that the code produced for a MachOp does not introduce new blocks. -} -- Note [MO_S_MulMayOflo significant width] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- -- There are two interpretations in the code about what a multiplication -- overflow exactly means: -- -- 1. The result does not fit into the specified width (of type Width.) -- 2. The result does not fit into a register. -- -- (2) has some flaws: A following MO_Mul has a width, too. So MO_S_MulMayOflo -- may signal no overflow, while MO_Mul truncates the result. There are -- architectures with several register widths and it might be hard to decide -- what's an overflow and what not. Both attributes can easily lead to subtle -- bugs. -- -- (1) has the benefit that its interpretation is completely independent of the -- architecture. So, the mid-term plan is to migrate to this -- interpretation/semantics. data MachOp -- Integer operations (insensitive to signed/unsigned) = MO_Add Width | MO_Sub Width | MO_Eq Width | MO_Ne Width | MO_Mul Width -- low word of multiply -- Signed multiply/divide | MO_S_MulMayOflo Width -- nonzero if signed multiply overflows. See -- Note [MO_S_MulMayOflo significant width] | MO_S_Quot Width -- signed / (same semantics as IntQuotOp) | MO_S_Rem Width -- signed % (same semantics as IntRemOp) | MO_S_Neg Width -- unary - -- Unsigned multiply/divide | MO_U_Quot Width -- unsigned / (same semantics as WordQuotOp) | MO_U_Rem Width -- unsigned % (same semantics as WordRemOp) -- Signed comparisons | MO_S_Ge Width | MO_S_Le Width | MO_S_Gt Width | MO_S_Lt Width -- Unsigned comparisons | MO_U_Ge Width | MO_U_Le Width | MO_U_Gt Width | MO_U_Lt Width -- Floating point arithmetic | MO_F_Add Width | MO_F_Sub Width | MO_F_Neg Width -- unary - | MO_F_Mul Width | MO_F_Quot Width -- Floating-point fused multiply-add operations -- | Fused multiply-add, see 'FMASign'. | MO_FMA FMASign Length Width -- Floating point comparison | MO_F_Eq Width | MO_F_Ne Width | MO_F_Ge Width | MO_F_Le Width | MO_F_Gt Width | MO_F_Lt Width | MO_F_Min Width | MO_F_Max Width -- Bitwise operations. Not all of these may be supported -- at all sizes, and only integral Widths are valid. | MO_And Width | MO_Or Width | MO_Xor Width | MO_Not Width -- Shifts. The shift amount must be in [0,widthInBits). | MO_Shl Width | MO_U_Shr Width -- unsigned shift right | MO_S_Shr Width -- signed shift right -- Conversions. Some of these will be NOPs. -- Floating-point conversions use the signed variant. | MO_SF_Round Width Width -- Signed int -> Float | MO_FS_Truncate Width Width -- Float -> Signed int | MO_SS_Conv Width Width -- Signed int -> Signed int | MO_UU_Conv Width Width -- unsigned int -> unsigned int | MO_XX_Conv Width Width -- int -> int; puts no requirements on the -- contents of upper bits when extending; -- narrowing is simply truncation; the only -- expectation is that we can recover the -- original value by applying the opposite -- MO_XX_Conv, e.g., -- MO_XX_CONV W64 W8 (MO_XX_CONV W8 W64 x) -- is equivalent to just x. | MO_FF_Conv Width Width -- Float -> Float | MO_WF_Bitcast Width -- Word32/Word64 -> Float/Double | MO_FW_Bitcast Width -- Float/Double -> Word32/Word64 -- Vector element insertion and extraction operations | MO_V_Broadcast Length Width -- Broadcast a scalar into a vector | MO_V_Insert Length Width -- Insert scalar into vector | MO_V_Extract Length Width -- Extract scalar from vector -- Integer vector operations | MO_V_Add Length Width | MO_V_Sub Length Width | MO_V_Mul Length Width -- Signed vector multiply/divide | MO_VS_Quot Length Width | MO_VS_Rem Length Width | MO_VS_Neg Length Width -- Unsigned vector multiply/divide | MO_VU_Quot Length Width | MO_VU_Rem Length Width -- Vector shuffles | MO_V_Shuffle Length Width [Int] | MO_VF_Shuffle Length Width [Int] -- Floating point vector element insertion and extraction operations | MO_VF_Broadcast Length Width -- Broadcast a scalar into a vector | MO_VF_Insert Length Width -- Insert scalar into vector | MO_VF_Extract Length Width -- Extract scalar from vector -- Floating point vector operations | MO_VF_Add Length Width | MO_VF_Sub Length Width | MO_VF_Neg Length Width -- unary negation | MO_VF_Mul Length Width | MO_VF_Quot Length Width -- Min/max operations | MO_VS_Min Length Width | MO_VS_Max Length Width | MO_VU_Min Length Width | MO_VU_Max Length Width | MO_VF_Min Length Width | MO_VF_Max Length Width -- | An atomic read with no memory ordering. Address msut -- be naturally aligned. | MO_RelaxedRead Width -- Alignment check (for -falignment-sanitisation) | MO_AlignmentCheck Int Width deriving (Eq, Show) pprMachOp :: MachOp -> SDoc pprMachOp mo = text (show mo) -- | Where are the signs in a fused multiply-add instruction? -- -- @x*y + z@ vs @x*y - z@ vs @-x*y+z@ vs @-x*y-z@. -- -- Warning: the signs aren't consistent across architectures (X86, PowerPC, AArch64). -- The user-facing implementation uses the X86 convention, while the relevant -- backends use their corresponding conventions. data FMASign -- | Fused multiply-add @x*y + z@. = FMAdd -- | Fused multiply-subtract. On X86: @x*y - z@. | FMSub -- | Fused multiply-add. On X86: @-x*y + z@. | FNMAdd -- | Fused multiply-subtract. On X86: @-x*y - z@. | FNMSub deriving (Eq, Show) pprFMASign :: IsLine doc => FMASign -> doc pprFMASign = \case FMAdd -> text "fmadd" FMSub -> text "fmsub" FNMAdd -> text "fnmadd" FNMSub -> text "fnmsub" -- ----------------------------------------------------------------------------- -- Some common MachReps -- A 'wordRep' is a machine word on the target architecture -- Specifically, it is the size of an Int#, Word#, Addr# -- and the unit of allocation on the stack and the heap -- Any pointer is also guaranteed to be a wordRep. mo_wordAdd, mo_wordSub, mo_wordEq, mo_wordNe,mo_wordMul, mo_wordSQuot , mo_wordSRem, mo_wordSNeg, mo_wordUQuot, mo_wordURem , mo_wordSGe, mo_wordSLe, mo_wordSGt, mo_wordSLt, mo_wordUGe , mo_wordULe, mo_wordUGt, mo_wordULt , mo_wordAnd, mo_wordOr, mo_wordXor, mo_wordNot, mo_wordShl, mo_wordSShr, mo_wordUShr , mo_u_8ToWord, mo_s_8ToWord, mo_u_16ToWord, mo_s_16ToWord, mo_u_32ToWord, mo_s_32ToWord , mo_WordTo8, mo_WordTo16, mo_WordTo32, mo_WordTo64 :: Platform -> MachOp mo_u_8To32, mo_s_8To32, mo_u_16To32, mo_s_16To32 , mo_32To8, mo_32To16 :: MachOp mo_wordAdd platform = MO_Add (wordWidth platform) mo_wordSub platform = MO_Sub (wordWidth platform) mo_wordEq platform = MO_Eq (wordWidth platform) mo_wordNe platform = MO_Ne (wordWidth platform) mo_wordMul platform = MO_Mul (wordWidth platform) mo_wordSQuot platform = MO_S_Quot (wordWidth platform) mo_wordSRem platform = MO_S_Rem (wordWidth platform) mo_wordSNeg platform = MO_S_Neg (wordWidth platform) mo_wordUQuot platform = MO_U_Quot (wordWidth platform) mo_wordURem platform = MO_U_Rem (wordWidth platform) mo_wordSGe platform = MO_S_Ge (wordWidth platform) mo_wordSLe platform = MO_S_Le (wordWidth platform) mo_wordSGt platform = MO_S_Gt (wordWidth platform) mo_wordSLt platform = MO_S_Lt (wordWidth platform) mo_wordUGe platform = MO_U_Ge (wordWidth platform) mo_wordULe platform = MO_U_Le (wordWidth platform) mo_wordUGt platform = MO_U_Gt (wordWidth platform) mo_wordULt platform = MO_U_Lt (wordWidth platform) mo_wordAnd platform = MO_And (wordWidth platform) mo_wordOr platform = MO_Or (wordWidth platform) mo_wordXor platform = MO_Xor (wordWidth platform) mo_wordNot platform = MO_Not (wordWidth platform) mo_wordShl platform = MO_Shl (wordWidth platform) mo_wordSShr platform = MO_S_Shr (wordWidth platform) mo_wordUShr platform = MO_U_Shr (wordWidth platform) mo_u_8To32 = MO_UU_Conv W8 W32 mo_s_8To32 = MO_SS_Conv W8 W32 mo_u_16To32 = MO_UU_Conv W16 W32 mo_s_16To32 = MO_SS_Conv W16 W32 mo_u_8ToWord platform = MO_UU_Conv W8 (wordWidth platform) mo_s_8ToWord platform = MO_SS_Conv W8 (wordWidth platform) mo_u_16ToWord platform = MO_UU_Conv W16 (wordWidth platform) mo_s_16ToWord platform = MO_SS_Conv W16 (wordWidth platform) mo_s_32ToWord platform = MO_SS_Conv W32 (wordWidth platform) mo_u_32ToWord platform = MO_UU_Conv W32 (wordWidth platform) mo_WordTo8 platform = MO_UU_Conv (wordWidth platform) W8 mo_WordTo16 platform = MO_UU_Conv (wordWidth platform) W16 mo_WordTo32 platform = MO_UU_Conv (wordWidth platform) W32 mo_WordTo64 platform = MO_UU_Conv (wordWidth platform) W64 mo_32To8 = MO_UU_Conv W32 W8 mo_32To16 = MO_UU_Conv W32 W16 -- ---------------------------------------------------------------------------- -- isCommutableMachOp {- | Returns 'True' if the MachOp has commutable arguments. This is used in the platform-independent Cmm optimisations. If in doubt, return 'False'. This generates worse code on the native routes, but is otherwise harmless. -} isCommutableMachOp :: MachOp -> Bool isCommutableMachOp mop = case mop of MO_Add _ -> True MO_Eq _ -> True MO_Ne _ -> True MO_Mul _ -> True MO_S_MulMayOflo _ -> True MO_And _ -> True MO_Or _ -> True MO_Xor _ -> True MO_F_Add _ -> True MO_F_Mul _ -> True MO_F_Min {} -> True MO_F_Max {} -> True _other -> False -- ---------------------------------------------------------------------------- -- isAssociativeMachOp {- | Returns 'True' if the MachOp is associative (i.e. @(x+y)+z == x+(y+z)@) This is used in the platform-independent Cmm optimisations. If in doubt, return 'False'. This generates worse code on the native routes, but is otherwise harmless. -} isAssociativeMachOp :: MachOp -> Bool isAssociativeMachOp mop = case mop of MO_Add {} -> True -- NB: does not include MO_Mul {} -> True -- floatint point! MO_And {} -> True MO_Or {} -> True MO_Xor {} -> True _other -> False -- ---------------------------------------------------------------------------- -- isComparisonMachOp {- | Returns 'True' if the MachOp is a comparison. If in doubt, return False. This generates worse code on the native routes, but is otherwise harmless. -} isComparisonMachOp :: MachOp -> Bool isComparisonMachOp mop = case mop of MO_Eq _ -> True MO_Ne _ -> True MO_S_Ge _ -> True MO_S_Le _ -> True MO_S_Gt _ -> True MO_S_Lt _ -> True MO_U_Ge _ -> True MO_U_Le _ -> True MO_U_Gt _ -> True MO_U_Lt _ -> True MO_F_Eq {} -> True MO_F_Ne {} -> True MO_F_Ge {} -> True MO_F_Le {} -> True MO_F_Gt {} -> True MO_F_Lt {} -> True _other -> False {- | Returns @Just w@ if the operation is an integer comparison with width @w@, or @Nothing@ otherwise. -} maybeIntComparison :: MachOp -> Maybe Width maybeIntComparison mop = case mop of MO_Eq w -> Just w MO_Ne w -> Just w MO_S_Ge w -> Just w MO_S_Le w -> Just w MO_S_Gt w -> Just w MO_S_Lt w -> Just w MO_U_Ge w -> Just w MO_U_Le w -> Just w MO_U_Gt w -> Just w MO_U_Lt w -> Just w _ -> Nothing isFloatComparison :: MachOp -> Bool isFloatComparison mop = case mop of MO_F_Eq {} -> True MO_F_Ne {} -> True MO_F_Ge {} -> True MO_F_Le {} -> True MO_F_Gt {} -> True MO_F_Lt {} -> True _other -> False -- Note [Inverting conditions] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- Sometimes it's useful to be able to invert the sense of a -- condition. Not all conditional tests are invertible: in -- particular, floating point conditionals cannot be inverted, because -- there exist floating-point values which return False for both senses -- of a condition (eg. !(NaN > NaN) && !(NaN /<= NaN)). maybeInvertComparison :: MachOp -> Maybe MachOp maybeInvertComparison op = case op of -- None of these Just cases include floating point MO_Eq w -> Just (MO_Ne w) MO_Ne w -> Just (MO_Eq w) MO_U_Lt w -> Just (MO_U_Ge w) MO_U_Gt w -> Just (MO_U_Le w) MO_U_Le w -> Just (MO_U_Gt w) MO_U_Ge w -> Just (MO_U_Lt w) MO_S_Lt w -> Just (MO_S_Ge w) MO_S_Gt w -> Just (MO_S_Le w) MO_S_Le w -> Just (MO_S_Gt w) MO_S_Ge w -> Just (MO_S_Lt w) _other -> Nothing -- ---------------------------------------------------------------------------- -- machOpResultType {- | Returns the MachRep of the result of a MachOp. -} machOpResultType :: Platform -> MachOp -> [CmmType] -> CmmType machOpResultType platform mop tys = case mop of MO_Add {} -> ty1 -- Preserve GC-ptr-hood MO_Sub {} -> ty1 -- of first arg MO_Mul w -> cmmBits w MO_S_MulMayOflo w -> cmmBits w MO_S_Quot w -> cmmBits w MO_S_Rem w -> cmmBits w MO_S_Neg w -> cmmBits w MO_U_Quot w -> cmmBits w MO_U_Rem w -> cmmBits w MO_Eq {} -> comparisonResultRep platform MO_Ne {} -> comparisonResultRep platform MO_S_Ge {} -> comparisonResultRep platform MO_S_Le {} -> comparisonResultRep platform MO_S_Gt {} -> comparisonResultRep platform MO_S_Lt {} -> comparisonResultRep platform MO_U_Ge {} -> comparisonResultRep platform MO_U_Le {} -> comparisonResultRep platform MO_U_Gt {} -> comparisonResultRep platform MO_U_Lt {} -> comparisonResultRep platform MO_F_Add w -> cmmFloat w MO_F_Sub w -> cmmFloat w MO_F_Mul w -> cmmFloat w MO_F_Quot w -> cmmFloat w MO_F_Neg w -> cmmFloat w MO_F_Min w -> cmmFloat w MO_F_Max w -> cmmFloat w MO_FMA _ l w -> if l == 1 then cmmFloat w else cmmVec l (cmmFloat w) MO_F_Eq {} -> comparisonResultRep platform MO_F_Ne {} -> comparisonResultRep platform MO_F_Ge {} -> comparisonResultRep platform MO_F_Le {} -> comparisonResultRep platform MO_F_Gt {} -> comparisonResultRep platform MO_F_Lt {} -> comparisonResultRep platform MO_And {} -> ty1 -- Used for pointer masking MO_Or {} -> ty1 MO_Xor {} -> ty1 MO_Not w -> cmmBits w MO_Shl w -> cmmBits w MO_U_Shr w -> cmmBits w MO_S_Shr w -> cmmBits w MO_SS_Conv _ to -> cmmBits to MO_UU_Conv _ to -> cmmBits to MO_XX_Conv _ to -> cmmBits to MO_FS_Truncate _ to -> cmmBits to MO_SF_Round _ to -> cmmFloat to MO_FF_Conv _ to -> cmmFloat to MO_WF_Bitcast w -> cmmFloat w MO_FW_Bitcast w -> cmmBits w MO_V_Broadcast l w -> cmmVec l (cmmBits w) MO_V_Insert l w -> cmmVec l (cmmBits w) MO_V_Extract _ w -> cmmBits w MO_V_Add l w -> cmmVec l (cmmBits w) MO_V_Sub l w -> cmmVec l (cmmBits w) MO_V_Mul l w -> cmmVec l (cmmBits w) MO_VS_Quot l w -> cmmVec l (cmmBits w) MO_VS_Rem l w -> cmmVec l (cmmBits w) MO_VS_Neg l w -> cmmVec l (cmmBits w) MO_VS_Min l w -> cmmVec l (cmmBits w) MO_VS_Max l w -> cmmVec l (cmmBits w) MO_VU_Quot l w -> cmmVec l (cmmBits w) MO_VU_Rem l w -> cmmVec l (cmmBits w) MO_VU_Min l w -> cmmVec l (cmmBits w) MO_VU_Max l w -> cmmVec l (cmmBits w) MO_V_Shuffle l w _ -> cmmVec l (cmmBits w) MO_VF_Shuffle l w _ -> cmmVec l (cmmFloat w) MO_VF_Broadcast l w -> cmmVec l (cmmFloat w) MO_VF_Insert l w -> cmmVec l (cmmFloat w) MO_VF_Extract _ w -> cmmFloat w MO_VF_Add l w -> cmmVec l (cmmFloat w) MO_VF_Sub l w -> cmmVec l (cmmFloat w) MO_VF_Mul l w -> cmmVec l (cmmFloat w) MO_VF_Quot l w -> cmmVec l (cmmFloat w) MO_VF_Neg l w -> cmmVec l (cmmFloat w) MO_VF_Min l w -> cmmVec l (cmmFloat w) MO_VF_Max l w -> cmmVec l (cmmFloat w) MO_RelaxedRead w -> cmmBits w MO_AlignmentCheck _ _ -> ty1 where (ty1:_) = tys comparisonResultRep :: Platform -> CmmType comparisonResultRep = bWord -- is it? -- ----------------------------------------------------------------------------- -- machOpArgReps -- | This function is used for debugging only: we can check whether an -- application of a MachOp is "type-correct" by checking that the MachReps of -- its arguments are the same as the MachOp expects. This is used when -- linting a CmmExpr. machOpArgReps :: Platform -> MachOp -> [Width] machOpArgReps platform op = case op of MO_Add w -> [w,w] MO_Sub w -> [w,w] MO_Eq w -> [w,w] MO_Ne w -> [w,w] MO_Mul w -> [w,w] MO_S_MulMayOflo w -> [w,w] MO_S_Quot w -> [w,w] MO_S_Rem w -> [w,w] MO_S_Neg w -> [w] MO_U_Quot w -> [w,w] MO_U_Rem w -> [w,w] MO_S_Ge w -> [w,w] MO_S_Le w -> [w,w] MO_S_Gt w -> [w,w] MO_S_Lt w -> [w,w] MO_U_Ge w -> [w,w] MO_U_Le w -> [w,w] MO_U_Gt w -> [w,w] MO_U_Lt w -> [w,w] MO_F_Add w -> [w,w] MO_F_Sub w -> [w,w] MO_F_Mul w -> [w,w] MO_F_Quot w -> [w,w] MO_F_Neg w -> [w] MO_F_Min w -> [w,w] MO_F_Max w -> [w,w] MO_FMA _ l w -> [vecwidth l w, vecwidth l w, vecwidth l w] MO_F_Eq w -> [w,w] MO_F_Ne w -> [w,w] MO_F_Ge w -> [w,w] MO_F_Le w -> [w,w] MO_F_Gt w -> [w,w] MO_F_Lt w -> [w,w] MO_And w -> [w,w] MO_Or w -> [w,w] MO_Xor w -> [w,w] MO_Not w -> [w] MO_Shl w -> [w, wordWidth platform] MO_U_Shr w -> [w, wordWidth platform] MO_S_Shr w -> [w, wordWidth platform] MO_SS_Conv from _ -> [from] MO_UU_Conv from _ -> [from] MO_XX_Conv from _ -> [from] MO_SF_Round from _ -> [from] MO_FS_Truncate from _ -> [from] MO_FF_Conv from _ -> [from] MO_WF_Bitcast w -> [w] MO_FW_Bitcast w -> [w] MO_V_Shuffle l w _ -> [vecwidth l w, vecwidth l w] MO_VF_Shuffle l w _ -> [vecwidth l w, vecwidth l w] MO_V_Broadcast _ w -> [w] MO_V_Insert l w -> [vecwidth l w, w, W32] MO_V_Extract l w -> [vecwidth l w, W32] MO_VF_Broadcast _ w -> [w] MO_VF_Insert l w -> [vecwidth l w, w, W32] MO_VF_Extract l w -> [vecwidth l w, W32] -- SIMD vector indices are always 32 bit MO_V_Add l w -> [vecwidth l w, vecwidth l w] MO_V_Sub l w -> [vecwidth l w, vecwidth l w] MO_V_Mul l w -> [vecwidth l w, vecwidth l w] MO_VS_Quot l w -> [vecwidth l w, vecwidth l w] MO_VS_Rem l w -> [vecwidth l w, vecwidth l w] MO_VS_Neg l w -> [vecwidth l w] MO_VS_Min l w -> [vecwidth l w, vecwidth l w] MO_VS_Max l w -> [vecwidth l w, vecwidth l w] MO_VU_Quot l w -> [vecwidth l w, vecwidth l w] MO_VU_Rem l w -> [vecwidth l w, vecwidth l w] MO_VU_Min l w -> [vecwidth l w, vecwidth l w] MO_VU_Max l w -> [vecwidth l w, vecwidth l w] -- NOTE: The below is owing to the fact that floats use the SSE registers MO_VF_Add l w -> [vecwidth l w, vecwidth l w] MO_VF_Sub l w -> [vecwidth l w, vecwidth l w] MO_VF_Mul l w -> [vecwidth l w, vecwidth l w] MO_VF_Quot l w -> [vecwidth l w, vecwidth l w] MO_VF_Neg l w -> [vecwidth l w] MO_VF_Min l w -> [vecwidth l w, vecwidth l w] MO_VF_Max l w -> [vecwidth l w, vecwidth l w] MO_RelaxedRead _ -> [wordWidth platform] MO_AlignmentCheck _ w -> [w] where vecwidth l w = widthFromBytes (l * widthInBytes w) ----------------------------------------------------------------------------- -- CallishMachOp ----------------------------------------------------------------------------- -- CallishMachOps tend to be implemented by foreign calls in some backends, -- so we separate them out. In Cmm, these can only occur in a -- statement position, in contrast to an ordinary MachOp which can occur -- anywhere in an expression. data CallishMachOp = MO_F64_Pwr | MO_F64_Sin | MO_F64_Cos | MO_F64_Tan | MO_F64_Sinh | MO_F64_Cosh | MO_F64_Tanh | MO_F64_Asin | MO_F64_Acos | MO_F64_Atan | MO_F64_Asinh | MO_F64_Acosh | MO_F64_Atanh | MO_F64_Log | MO_F64_Log1P | MO_F64_Exp | MO_F64_ExpM1 | MO_F64_Fabs | MO_F64_Sqrt | MO_F32_Pwr | MO_F32_Sin | MO_F32_Cos | MO_F32_Tan | MO_F32_Sinh | MO_F32_Cosh | MO_F32_Tanh | MO_F32_Asin | MO_F32_Acos | MO_F32_Atan | MO_F32_Asinh | MO_F32_Acosh | MO_F32_Atanh | MO_F32_Log | MO_F32_Log1P | MO_F32_Exp | MO_F32_ExpM1 | MO_F32_Fabs | MO_F32_Sqrt -- 64-bit int/word ops for when they exceed the native word size -- (i.e. on 32-bit architectures) | MO_I64_ToI | MO_I64_FromI | MO_W64_ToW | MO_W64_FromW | MO_x64_Neg | MO_x64_Add | MO_x64_Sub | MO_x64_Mul | MO_I64_Quot | MO_I64_Rem | MO_W64_Quot | MO_W64_Rem | MO_x64_And | MO_x64_Or | MO_x64_Xor | MO_x64_Not | MO_x64_Shl | MO_I64_Shr | MO_W64_Shr | MO_x64_Eq | MO_x64_Ne | MO_I64_Ge | MO_I64_Gt | MO_I64_Le | MO_I64_Lt | MO_W64_Ge | MO_W64_Gt | MO_W64_Le | MO_W64_Lt | MO_UF_Conv Width | MO_S_Mul2 Width | MO_S_QuotRem Width | MO_U_QuotRem Width | MO_U_QuotRem2 Width | MO_Add2 Width | MO_AddWordC Width | MO_SubWordC Width | MO_AddIntC Width | MO_SubIntC Width | MO_U_Mul2 Width | MO_Touch -- Keep variables live (when using interior pointers) -- Prefetch | MO_Prefetch_Data Int -- Prefetch hint. May change program performance but not -- program behavior. -- the Int can be 0-3. Needs to be known at compile time -- to interact with code generation correctly. -- TODO: add support for prefetch WRITES, -- currently only exposes prefetch reads, which -- would the majority of use cases in ghc anyways -- These three MachOps are parameterised by the known alignment -- of the destination and source (for memcpy/memmove) pointers. -- This information may be used for optimisation in backends. | MO_Memcpy Int | MO_Memset Int | MO_Memmove Int | MO_Memcmp Int | MO_PopCnt Width | MO_Pdep Width | MO_Pext Width | MO_Clz Width | MO_Ctz Width | MO_BSwap Width | MO_BRev Width | MO_AcquireFence | MO_ReleaseFence | MO_SeqCstFence -- | Atomic read-modify-write. Arguments are @[dest, n]@. | MO_AtomicRMW Width AtomicMachOp -- | Atomic read. Arguments are @[addr]@. | MO_AtomicRead Width MemoryOrdering -- | Atomic write. Arguments are @[addr, value]@. | MO_AtomicWrite Width MemoryOrdering -- | Atomic compare-and-swap. Arguments are @[dest, expected, new]@. -- Sequentially consistent. -- Possible future refactoring: should this be an'MO_AtomicRMW' variant? | MO_Cmpxchg Width -- | Atomic swap. Arguments are @[dest, new]@ | MO_Xchg Width -- These rts provided functions are special: suspendThread releases the -- capability, hence we mustn't sink any use of data stored in the capability -- after this instruction. | MO_SuspendThread | MO_ResumeThread deriving (Eq, Show) -- | C11 memory ordering semantics. data MemoryOrdering = MemOrderRelaxed -- ^ relaxed ordering | MemOrderAcquire -- ^ acquire ordering | MemOrderRelease -- ^ release ordering | MemOrderSeqCst -- ^ sequentially consistent deriving (Eq, Ord, Show) -- | The operation to perform atomically. data AtomicMachOp = AMO_Add | AMO_Sub | AMO_And | AMO_Nand | AMO_Or | AMO_Xor deriving (Eq, Show) pprCallishMachOp :: CallishMachOp -> SDoc pprCallishMachOp mo = text (show mo) -- | Return (results_hints,args_hints) callishMachOpHints :: CallishMachOp -> ([ForeignHint], [ForeignHint]) callishMachOpHints op = case op of MO_Memcpy _ -> ([], [AddrHint,AddrHint,NoHint]) MO_Memset _ -> ([], [AddrHint,NoHint,NoHint]) MO_Memmove _ -> ([], [AddrHint,AddrHint,NoHint]) MO_Memcmp _ -> ([], [AddrHint, AddrHint, NoHint]) MO_SuspendThread -> ([AddrHint], [AddrHint,NoHint]) MO_ResumeThread -> ([AddrHint], [AddrHint]) _ -> ([],[]) -- empty lists indicate NoHint -- | The alignment of a 'memcpy'-ish operation. machOpMemcpyishAlign :: CallishMachOp -> Maybe Int machOpMemcpyishAlign op = case op of MO_Memcpy align -> Just align MO_Memset align -> Just align MO_Memmove align -> Just align MO_Memcmp align -> Just align _ -> Nothing ghc-lib-parser-9.12.2.20250421/compiler/GHC/Cmm/Node.hs0000644000000000000000000011741007346545000017647 0ustar0000000000000000{-# LANGUAGE CPP #-} {-# LANGUAGE GADTs #-} {-# LANGUAGE UndecidableInstances #-} {-# LANGUAGE LambdaCase #-} -- CmmNode type for representation using Hoopl graphs. module GHC.Cmm.Node ( CmmNode(..), CmmFormal, CmmActual, CmmTickish, UpdFrameOffset, Convention(..), ForeignConvention(..), ForeignTarget(..), foreignTargetHints, CmmReturnInfo(..), mapExp, mapExpDeep, wrapRecExp, foldExp, foldExpDeep, wrapRecExpf, mapExpM, mapExpDeepM, wrapRecExpM, mapSuccessors, mapCollectSuccessors, -- * Tick scopes CmmTickScope(..), isTickSubScope, combineTickScopes, ) where import GHC.Prelude hiding (succ) import GHC.Platform.Regs import GHC.Cmm.CLabel import GHC.Cmm.Expr import GHC.Cmm.Switch import GHC.Data.FastString import GHC.Data.Pair import GHC.Types.ForeignCall import GHC.Utils.Outputable import GHC.Runtime.Heap.Layout import GHC.Types.Tickish (CmmTickish) import qualified GHC.Types.Unique as U import GHC.Types.Basic (FunctionOrData(..)) import GHC.Platform import GHC.Cmm.Dataflow.Block import GHC.Cmm.Dataflow.Graph import GHC.Cmm.Dataflow.Label import Data.Foldable (toList) import Data.Functor.Classes (liftCompare) import Data.Maybe import Data.List (tails,sortBy) import GHC.Types.Unique (nonDetCmpUnique) import GHC.Utils.Constants (debugIsOn) ------------------------ -- CmmNode #define ULabel {-# UNPACK #-} !Label data CmmNode e x where CmmEntry :: ULabel -> CmmTickScope -> CmmNode C O CmmComment :: FastString -> CmmNode O O -- Tick annotation, covering Cmm code in our tick scope. We only -- expect non-code @Tickish@ at this point (e.g. @SourceNote@). -- See Note [CmmTick scoping details] CmmTick :: !CmmTickish -> CmmNode O O -- Unwind pseudo-instruction, encoding stack unwinding -- instructions for a debugger. This describes how to reconstruct -- the "old" value of a register if we want to navigate the stack -- up one frame. Having unwind information for @Sp@ will allow the -- debugger to "walk" the stack. -- -- See Note [What is this unwinding business?] in "GHC.Cmm.DebugBlock" CmmUnwind :: [(GlobalReg, Maybe CmmExpr)] -> CmmNode O O CmmAssign :: !CmmReg -> !CmmExpr -> CmmNode O O -- Assign to register CmmStore :: !CmmExpr -> !CmmExpr -> !AlignmentSpec -> CmmNode O O -- Assign to memory location. Size is -- given by cmmExprType of the rhs. CmmUnsafeForeignCall :: -- An unsafe foreign call; -- see Note [Foreign calls] -- Like a "fat machine instruction"; can occur -- in the middle of a block ForeignTarget -> -- call target [CmmFormal] -> -- zero or more results [CmmActual] -> -- zero or more arguments CmmNode O O -- Semantics: clobbers any GlobalRegs for which callerSaves r == True -- See Note [Unsafe foreign calls clobber caller-save registers] -- -- Invariant: the arguments and the ForeignTarget must not -- mention any registers for which GHC.Platform.callerSaves -- is True. See Note [Register parameter passing]. CmmBranch :: ULabel -> CmmNode O C -- Goto another block in the same procedure CmmCondBranch :: { -- conditional branch cml_pred :: CmmExpr, cml_true, cml_false :: ULabel, cml_likely :: Maybe Bool -- likely result of the conditional, -- if known } -> CmmNode O C CmmSwitch :: CmmExpr -- Scrutinee, of some integral type -> SwitchTargets -- Cases. See Note [SwitchTargets] -> CmmNode O C CmmCall :: { -- A native call or tail call cml_target :: CmmExpr, -- never a CmmPrim to a CallishMachOp! cml_cont :: Maybe Label, -- Label of continuation (Nothing for return or tail call) -- -- Note [Continuation BlockIds] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- These BlockIds are called -- Continuation BlockIds, and are the only BlockIds that can -- occur in CmmExprs, namely as (CmmLit (CmmBlock b)) or -- (CmmStackSlot (Young b) _). cml_args_regs :: [GlobalRegUse], -- The argument GlobalRegs (Rx, Fx, Dx, Lx) that are passed -- to the call. This is essential information for the -- native code generator's register allocator; without -- knowing which GlobalRegs are live it has to assume that -- they are all live. This list should only include -- GlobalRegs that are mapped to real machine registers on -- the target platform. cml_args :: ByteOff, -- Byte offset, from the *old* end of the Area associated with -- the Label (if cml_cont = Nothing, then Old area), of -- youngest outgoing arg. Set the stack pointer to this before -- transferring control. -- (NB: an update frame might also have been stored in the Old -- area, but it'll be in an older part than the args.) cml_ret_args :: ByteOff, -- For calls *only*, the byte offset for youngest returned value -- This is really needed at the *return* point rather than here -- at the call, but in practice it's convenient to record it here. cml_ret_off :: ByteOff -- For calls *only*, the byte offset of the base of the frame that -- must be described by the info table for the return point. -- The older words are an update frames, which have their own -- info-table and layout information -- From a liveness point of view, the stack words older than -- cml_ret_off are treated as live, even if the sequel of -- the call goes into a loop. } -> CmmNode O C CmmForeignCall :: { -- A safe foreign call; see Note [Foreign calls] -- Always the last node of a block tgt :: ForeignTarget, -- call target and convention res :: [CmmFormal], -- zero or more results args :: [CmmActual], -- zero or more arguments; see Note [Register parameter passing] succ :: ULabel, -- Label of continuation ret_args :: ByteOff, -- same as cml_ret_args ret_off :: ByteOff, -- same as cml_ret_off intrbl:: Bool -- whether or not the call is interruptible } -> CmmNode O C instance OutputableP Platform (CmmNode e x) where pdoc = pprNode pprNode :: Platform -> CmmNode e x -> SDoc pprNode platform node = pp_node <+> pp_debug where pp_node :: SDoc pp_node = case node of -- label: CmmEntry id tscope -> (sdocOption sdocSuppressUniques $ \case True -> text "_lbl_" False -> ppr id ) <> colon <+> ppUnlessOption sdocSuppressTicks (text "//" <+> ppr tscope) -- // text CmmComment s -> text "//" <+> ftext s -- //tick bla<...> CmmTick t -> ppUnlessOption sdocSuppressTicks (text "//tick" <+> ppr t) -- unwind reg = expr; CmmUnwind regs -> text "unwind " <> commafy (map (\(r,e) -> ppr r <+> char '=' <+> pdoc platform e) regs) <> semi -- reg = expr; CmmAssign reg expr -> ppr reg <+> equals <+> pdoc platform expr <> semi -- rep[lv] = expr; CmmStore lv expr align -> rep <> align_mark <> brackets (pdoc platform lv) <+> equals <+> pdoc platform expr <> semi where align_mark = case align of Unaligned -> text "^" NaturallyAligned -> empty rep = ppr ( cmmExprType platform expr ) -- call "ccall" foo(x, y)[r1, r2]; -- ToDo ppr volatile CmmUnsafeForeignCall target results args -> hsep [ ppUnless (null results) $ parens (commafy $ map ppr results) <+> equals, text "call", pdoc platform target <> parens (commafy $ map (pdoc platform) args) <> semi] -- goto label; CmmBranch ident -> text "goto" <+> ppr ident <> semi -- if (expr) goto t; else goto f; CmmCondBranch expr t f l -> hsep [ text "if" , parens (pdoc platform expr) , case l of Nothing -> empty Just b -> parens (text "likely:" <+> ppr b) , text "goto" , ppr t <> semi , text "else goto" , ppr f <> semi ] CmmSwitch expr ids -> hang (hsep [ text "switch" , range , if isTrivialCmmExpr expr then pdoc platform expr else parens (pdoc platform expr) , text "{" ]) 4 (vcat (map ppCase cases) $$ def) $$ rbrace where (cases, mbdef) = switchTargetsFallThrough ids ppCase (is,l) = hsep [ text "case" , commafy $ toList $ fmap integer is , text ": goto" , ppr l <> semi ] def | Just l <- mbdef = hsep [ text "default:" , braces (text "goto" <+> ppr l <> semi) ] | otherwise = empty range = brackets $ hsep [integer lo, text "..", integer hi] where (lo,hi) = switchTargetsRange ids CmmCall tgt k regs out res updfr_off -> hcat [ text "call", space , pprFun tgt, parens (interpp'SP regs), space , returns <+> text "args: " <> ppr out <> comma <+> text "res: " <> ppr res <> comma <+> text "upd: " <> ppr updfr_off , semi ] where pprFun f@(CmmLit _) = pdoc platform f pprFun f = parens (pdoc platform f) returns | Just r <- k = text "returns to" <+> ppr r <> comma | otherwise = empty CmmForeignCall {tgt=t, res=rs, args=as, succ=s, ret_args=a, ret_off=u, intrbl=i} -> hcat $ if i then [text "interruptible", space] else [] ++ [ text "foreign call", space , pdoc platform t, text "(...)", space , text "returns to" <+> ppr s <+> text "args:" <+> parens (pdoc platform as) <+> text "ress:" <+> parens (ppr rs) , text "ret_args:" <+> ppr a , text "ret_off:" <+> ppr u , semi ] pp_debug :: SDoc pp_debug = if not debugIsOn then empty else case node of CmmEntry {} -> empty -- Looks terrible with text " // CmmEntry" CmmComment {} -> empty -- Looks also terrible with text " // CmmComment" CmmTick {} -> empty CmmUnwind {} -> text " // CmmUnwind" CmmAssign {} -> text " // CmmAssign" CmmStore {} -> text " // CmmStore" CmmUnsafeForeignCall {} -> text " // CmmUnsafeForeignCall" CmmBranch {} -> text " // CmmBranch" CmmCondBranch {} -> text " // CmmCondBranch" CmmSwitch {} -> text " // CmmSwitch" CmmCall {} -> text " // CmmCall" CmmForeignCall {} -> text " // CmmForeignCall" commafy :: [SDoc] -> SDoc commafy xs = hsep $ punctuate comma xs instance OutputableP Platform (Block CmmNode C C) where pdoc = pprBlock instance OutputableP Platform (Block CmmNode C O) where pdoc = pprBlock instance OutputableP Platform (Block CmmNode O C) where pdoc = pprBlock instance OutputableP Platform (Block CmmNode O O) where pdoc = pprBlock instance OutputableP Platform (Graph CmmNode e x) where pdoc = pprGraph pprBlock :: IndexedCO x SDoc SDoc ~ SDoc => Platform -> Block CmmNode e x -> IndexedCO e SDoc SDoc pprBlock platform block = foldBlockNodesB3 ( ($$) . pdoc platform , ($$) . (nest 4) . pdoc platform , ($$) . (nest 4) . pdoc platform ) block empty pprGraph :: Platform -> Graph CmmNode e x -> SDoc pprGraph platform = \case GNil -> empty GUnit block -> pdoc platform block GMany entry body exit -> text "{" $$ nest 2 (pprMaybeO entry $$ (vcat $ map (pdoc platform) $ bodyToBlockList body) $$ pprMaybeO exit) $$ text "}" where pprMaybeO :: OutputableP Platform (Block CmmNode e x) => MaybeO ex (Block CmmNode e x) -> SDoc pprMaybeO NothingO = empty pprMaybeO (JustO block) = pdoc platform block {- Note [Foreign calls] ~~~~~~~~~~~~~~~~~~~~~~~ A CmmUnsafeForeignCall is used for *unsafe* foreign calls; a CmmForeignCall call is used for *safe* foreign calls. Unsafe ones are mostly easy: think of them as a "fat machine instruction". In particular, they do *not* kill all live registers, just the registers they return to (there was a bit of code in GHC that conservatively assumed otherwise.) However, see [Register parameter passing]. Safe ones are trickier. A safe foreign call r = f(x) ultimately expands to push "return address" -- Never used to return to; -- just points an info table save registers into TSO call suspendThread r = f(x) -- Make the call call resumeThread restore registers pop "return address" We cannot "lower" a safe foreign call to this sequence of Cmms, because after we've saved Sp all the Cmm optimiser's assumptions are broken. Note that a safe foreign call needs an info table. So Safe Foreign Calls must remain as last nodes until the stack is made manifest in GHC.Cmm.LayoutStack, where they are lowered into the above sequence. -} {- Note [Unsafe foreign calls clobber caller-save registers] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A foreign call is defined to clobber any GlobalRegs that are mapped to caller-saves machine registers (according to the prevailing C ABI). GHC.StgToCmm.Utils.callerSaves tells you which GlobalRegs are caller-saves. This is a design choice that makes it easier to generate code later. We could instead choose to say that foreign calls do *not* clobber caller-saves regs, but then we would have to figure out which regs were live across the call later and insert some saves/restores. Furthermore when we generate code we never have any GlobalRegs live across a call, because they are always copied-in to LocalRegs and copied-out again before making a call/jump. So all we have to do is avoid any code motion that would make a caller-saves GlobalReg live across a foreign call during subsequent optimisations. -} {- Note [Register parameter passing] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ On certain architectures, some registers are utilized for parameter passing in the C calling convention. For example, in x86-64 Linux convention, rdi, rsi, rdx and rcx (as well as r8 and r9) may be used for argument passing. These are registers R3-R6, which our generated code may also be using; as a result, it's necessary to save these values before doing a foreign call. This is done during initial code generation in callerSaveVolatileRegs in GHC.StgToCmm.Utils. However, one result of doing this is that the contents of these registers may mysteriously change if referenced inside the arguments. This is dangerous, so you'll need to disable inlining much in the same way is done in GHC.Cmm.Sink currently. We should fix this! -} --------------------------------------------- -- Eq instance of CmmNode deriving instance Eq (CmmNode e x) ---------------------------------------------- -- Hoopl instances of CmmNode instance NonLocal CmmNode where entryLabel (CmmEntry l _) = l successors (CmmBranch l) = [l] successors (CmmCondBranch {cml_true=t, cml_false=f}) = [f, t] -- meets layout constraint successors (CmmSwitch _ ids) = switchTargetsToList ids successors (CmmCall {cml_cont=l}) = maybeToList l successors (CmmForeignCall {succ=l}) = [l] -------------------------------------------------- -- Various helper types type CmmActual = CmmExpr type CmmFormal = LocalReg type UpdFrameOffset = ByteOff -- | A convention maps a list of values (function arguments or return -- values) to registers or stack locations. data Convention = NativeDirectCall -- ^ top-level Haskell functions use @NativeDirectCall@, which -- maps arguments to registers starting with R2, according to -- how many registers are available on the platform. This -- convention ignores R1, because for a top-level function call -- the function closure is implicit, and doesn't need to be passed. | NativeNodeCall -- ^ non-top-level Haskell functions, which pass the address of -- the function closure in R1 (regardless of whether R1 is a -- real register or not), and the rest of the arguments in -- registers or on the stack. | NativeReturn -- ^ a native return. The convention for returns depends on -- how many values are returned: for just one value returned, -- the appropriate register is used (R1, F1, etc.). regardless -- of whether it is a real register or not. For multiple -- values returned, they are mapped to registers or the stack. | Slow -- ^ Slow entry points: all args pushed on the stack | GC -- ^ Entry to the garbage collector: uses the node reg! -- (TODO: I don't think we need this --SDM) deriving( Eq ) data ForeignConvention = ForeignConvention CCallConv -- Which foreign-call convention [ForeignHint] -- Extra info about the args [ForeignHint] -- Extra info about the result CmmReturnInfo deriving Eq instance Outputable ForeignConvention where ppr = pprForeignConvention pprForeignConvention :: ForeignConvention -> SDoc pprForeignConvention (ForeignConvention c args res ret) = doubleQuotes (ppr c) <+> text "arg hints: " <+> ppr args <+> text " result hints: " <+> ppr res <+> ppr ret data CmmReturnInfo = CmmMayReturn | CmmNeverReturns deriving ( Eq ) instance Outputable CmmReturnInfo where ppr = pprReturnInfo pprReturnInfo :: CmmReturnInfo -> SDoc pprReturnInfo CmmMayReturn = empty pprReturnInfo CmmNeverReturns = text "never returns" data ForeignTarget -- The target of a foreign call = ForeignTarget -- A foreign procedure CmmExpr -- Its address ForeignConvention -- Its calling convention | PrimTarget -- A possibly-side-effecting machine operation CallishMachOp -- Which one deriving Eq instance OutputableP Platform ForeignTarget where pdoc = pprForeignTarget pprForeignTarget :: Platform -> ForeignTarget -> SDoc pprForeignTarget platform (ForeignTarget fn c) = ppr c <+> ppr_target fn where ppr_target :: CmmExpr -> SDoc ppr_target t@(CmmLit _) = pdoc platform t ppr_target fn' = parens (pdoc platform fn') pprForeignTarget platform (PrimTarget op) -- HACK: We're just using a ForeignLabel to get this printed, the label -- might not really be foreign. = pdoc platform (mkForeignLabel (mkFastString (show op)) ForeignLabelInThisPackage IsFunction) instance Outputable Convention where ppr = pprConvention pprConvention :: Convention -> SDoc pprConvention (NativeNodeCall {}) = text "" pprConvention (NativeDirectCall {}) = text "" pprConvention (NativeReturn {}) = text "" pprConvention Slow = text "" pprConvention GC = text "" foreignTargetHints :: ForeignTarget -> ([ForeignHint], [ForeignHint]) foreignTargetHints target = ( res_hints ++ repeat NoHint , arg_hints ++ repeat NoHint ) where (res_hints, arg_hints) = case target of PrimTarget op -> callishMachOpHints op ForeignTarget _ (ForeignConvention _ arg_hints res_hints _) -> (res_hints, arg_hints) -------------------------------------------------- -- Instances of register and slot users / definers instance UserOfRegs LocalReg (CmmNode e x) where {-# INLINEABLE foldRegsUsed #-} foldRegsUsed platform f !z n = case n of CmmAssign _ expr -> fold f z expr CmmStore addr rval _ -> fold f (fold f z addr) rval CmmUnsafeForeignCall t _ args -> fold f (fold f z t) args CmmCondBranch expr _ _ _ -> fold f z expr CmmSwitch expr _ -> fold f z expr CmmCall {cml_target=tgt} -> fold f z tgt CmmForeignCall {tgt=tgt, args=args} -> fold f (fold f z tgt) args _ -> z where fold :: forall a b. UserOfRegs LocalReg a => (b -> LocalReg -> b) -> b -> a -> b fold f z n = foldRegsUsed platform f z n instance UserOfRegs GlobalRegUse (CmmNode e x) where {-# INLINEABLE foldRegsUsed #-} foldRegsUsed platform f !z n = case n of CmmAssign _ expr -> fold f z expr CmmStore addr rval _ -> fold f (fold f z addr) rval CmmUnsafeForeignCall t _ args -> fold f (fold f z t) args CmmCondBranch expr _ _ _ -> fold f z expr CmmSwitch expr _ -> fold f z expr CmmCall {cml_target=tgt, cml_args_regs=args} -> fold f (fold f z args) tgt CmmForeignCall {tgt=tgt, args=args} -> fold f (fold f z tgt) args _ -> z where fold :: forall a b. UserOfRegs GlobalRegUse a => (b -> GlobalRegUse -> b) -> b -> a -> b fold f z n = foldRegsUsed platform f z n instance (Ord r, UserOfRegs r CmmReg) => UserOfRegs r ForeignTarget where -- The (Ord r) in the context is necessary here -- See Note [Recursive superclasses] in GHC.Tc.TyCl.Instance {-# INLINEABLE foldRegsUsed #-} foldRegsUsed _ _ !z (PrimTarget _) = z foldRegsUsed platform f !z (ForeignTarget e _) = foldRegsUsed platform f z e instance DefinerOfRegs LocalReg (CmmNode e x) where {-# INLINEABLE foldRegsDefd #-} foldRegsDefd platform f !z n = case n of CmmAssign lhs _ -> fold f z lhs CmmUnsafeForeignCall _ fs _ -> fold f z fs CmmForeignCall {res=res} -> fold f z res _ -> z where fold :: forall a b. DefinerOfRegs LocalReg a => (b -> LocalReg -> b) -> b -> a -> b fold f z n = foldRegsDefd platform f z n instance DefinerOfRegs GlobalRegUse (CmmNode e x) where {-# INLINEABLE foldRegsDefd #-} foldRegsDefd platform f !z n = case n of CmmAssign lhs _ -> fold f z lhs CmmUnsafeForeignCall tgt _ _ -> fold f z (foreignTargetRegs tgt) CmmCall {} -> fold f z activeRegs CmmForeignCall {} -> fold f z activeRegs -- See Note [Safe foreign calls clobber STG registers] _ -> z where fold :: forall a b. DefinerOfRegs GlobalRegUse a => (b -> GlobalRegUse -> b) -> b -> a -> b fold f z n = foldRegsDefd platform f z n activeRegs :: [GlobalRegUse] activeRegs = map (\ r -> GlobalRegUse r (globalRegSpillType platform r)) $ activeStgRegs platform activeCallerSavesRegs = filter (callerSaves platform . globalRegUse_reg) activeRegs foreignTargetRegs (ForeignTarget _ (ForeignConvention _ _ _ CmmNeverReturns)) = [] foreignTargetRegs _ = activeCallerSavesRegs -- Note [Safe foreign calls clobber STG registers] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- During stack layout phase every safe foreign call is expanded into a block -- that contains unsafe foreign call (instead of safe foreign call) and ends -- with a normal call (See Note [Foreign calls]). This means that we must -- treat safe foreign call as if it was a normal call (because eventually it -- will be). This is important if we try to run sinking pass before stack -- layout phase. Consider this example of what might go wrong (this is cmm -- code from stablename001 test). Here is code after common block elimination -- (before stack layout): -- -- c1q6: -- _s1pf::P64 = R1; -- _c1q8::I64 = performMajorGC; -- I64[(young + 8)] = c1q9; -- foreign call "ccall" arg hints: [] result hints: [] (_c1q8::I64)(...) -- returns to c1q9 args: ([]) ress: ([])ret_args: 8ret_off: 8; -- c1q9: -- I64[(young + 8)] = c1qb; -- R1 = _s1pc::P64; -- call stg_makeStableName#(R1) returns to c1qb, args: 8, res: 8, upd: 8; -- -- If we run sinking pass now (still before stack layout) we will get this: -- -- c1q6: -- I64[(young + 8)] = c1q9; -- foreign call "ccall" arg hints: [] result hints: [] performMajorGC(...) -- returns to c1q9 args: ([]) ress: ([])ret_args: 8ret_off: 8; -- c1q9: -- I64[(young + 8)] = c1qb; -- _s1pf::P64 = R1; <------ _s1pf sunk past safe foreign call -- R1 = _s1pc::P64; -- call stg_makeStableName#(R1) returns to c1qb, args: 8, res: 8, upd: 8; -- -- Notice that _s1pf was sunk past a foreign call. When we run stack layout -- safe call to performMajorGC will be turned into: -- -- c1q6: -- _s1pc::P64 = P64[Sp + 8]; -- I64[Sp - 8] = c1q9; -- Sp = Sp - 8; -- I64[I64[CurrentTSO + 24] + 16] = Sp; -- P64[CurrentNursery + 8] = Hp + 8; -- (_u1qI::I64) = call "ccall" arg hints: [PtrHint,] -- result hints: [PtrHint] suspendThread(BaseReg, 0); -- call "ccall" arg hints: [] result hints: [] performMajorGC(); -- (_u1qJ::I64) = call "ccall" arg hints: [PtrHint] -- result hints: [PtrHint] resumeThread(_u1qI::I64); -- BaseReg = _u1qJ::I64; -- _u1qK::P64 = CurrentTSO; -- _u1qL::P64 = I64[_u1qK::P64 + 24]; -- Sp = I64[_u1qL::P64 + 16]; -- SpLim = _u1qL::P64 + 192; -- HpAlloc = 0; -- Hp = I64[CurrentNursery + 8] - 8; -- HpLim = I64[CurrentNursery] + (%MO_SS_Conv_W32_W64(I32[CurrentNursery + 48]) * 4096 - 1); -- call (I64[Sp])() returns to c1q9, args: 8, res: 8, upd: 8; -- c1q9: -- I64[(young + 8)] = c1qb; -- _s1pf::P64 = R1; <------ INCORRECT! -- R1 = _s1pc::P64; -- call stg_makeStableName#(R1) returns to c1qb, args: 8, res: 8, upd: 8; -- -- Notice that c1q6 now ends with a call. Sinking _s1pf::P64 = R1 past that -- call is clearly incorrect. This is what would happen if we assumed that -- safe foreign call has the same semantics as unsafe foreign call. To prevent -- this we need to treat safe foreign call as if was normal call. ----------------------------------- -- mapping Expr in GHC.Cmm.Node mapForeignTarget :: (CmmExpr -> CmmExpr) -> ForeignTarget -> ForeignTarget mapForeignTarget exp (ForeignTarget e c) = ForeignTarget (exp e) c mapForeignTarget _ m@(PrimTarget _) = m wrapRecExp :: (CmmExpr -> CmmExpr) -> CmmExpr -> CmmExpr -- Take a transformer on expressions and apply it recursively. -- (wrapRecExp f e) first recursively applies itself to sub-expressions of e -- then uses f to rewrite the resulting expression wrapRecExp f (CmmMachOp op es) = f (CmmMachOp op $ map (wrapRecExp f) es) wrapRecExp f (CmmLoad addr ty align) = f (CmmLoad (wrapRecExp f addr) ty align) wrapRecExp f e = f e mapExp :: (CmmExpr -> CmmExpr) -> CmmNode e x -> CmmNode e x mapExp _ f@(CmmEntry{}) = f mapExp _ m@(CmmComment _) = m mapExp _ m@(CmmTick _) = m mapExp f (CmmUnwind regs) = CmmUnwind (map (fmap (fmap f)) regs) mapExp f (CmmAssign r e) = CmmAssign r (f e) mapExp f (CmmStore addr e align) = CmmStore (f addr) (f e) align mapExp f (CmmUnsafeForeignCall tgt fs as) = CmmUnsafeForeignCall (mapForeignTarget f tgt) fs (map f as) mapExp _ l@(CmmBranch _) = l mapExp f (CmmCondBranch e ti fi l) = CmmCondBranch (f e) ti fi l mapExp f (CmmSwitch e ids) = CmmSwitch (f e) ids mapExp f n@CmmCall {cml_target=tgt} = n{cml_target = f tgt} mapExp f (CmmForeignCall tgt fs as succ ret_args updfr intrbl) = CmmForeignCall (mapForeignTarget f tgt) fs (map f as) succ ret_args updfr intrbl mapExpDeep :: (CmmExpr -> CmmExpr) -> CmmNode e x -> CmmNode e x mapExpDeep f = mapExp $ wrapRecExp f ------------------------------------------------------------------------ -- mapping Expr in GHC.Cmm.Node, but not performing allocation if no changes mapForeignTargetM :: (CmmExpr -> Maybe CmmExpr) -> ForeignTarget -> Maybe ForeignTarget mapForeignTargetM f (ForeignTarget e c) = (\x -> ForeignTarget x c) `fmap` f e mapForeignTargetM _ (PrimTarget _) = Nothing wrapRecExpM :: (CmmExpr -> Maybe CmmExpr) -> (CmmExpr -> Maybe CmmExpr) -- (wrapRecExpM f e) first recursively applies itself to sub-expressions of e -- then gives f a chance to rewrite the resulting expression wrapRecExpM f n@(CmmMachOp op es) = maybe (f n) (f . CmmMachOp op) (mapListM (wrapRecExpM f) es) wrapRecExpM f n@(CmmLoad addr ty align) = maybe (f n) (\addr' -> f $ CmmLoad addr' ty align) (wrapRecExpM f addr) wrapRecExpM f e = f e mapExpM :: (CmmExpr -> Maybe CmmExpr) -> CmmNode e x -> Maybe (CmmNode e x) mapExpM _ (CmmEntry{}) = Nothing mapExpM _ (CmmComment _) = Nothing mapExpM _ (CmmTick _) = Nothing mapExpM f (CmmUnwind regs) = CmmUnwind `fmap` mapM (\(r,e) -> mapM f e >>= \e' -> pure (r,e')) regs mapExpM f (CmmAssign r e) = CmmAssign r `fmap` f e mapExpM f (CmmStore addr e align) = (\ (Pair addr' e') -> CmmStore addr' e' align) `fmap` traverse f (Pair addr e) mapExpM _ (CmmBranch _) = Nothing mapExpM f (CmmCondBranch e ti fi l) = (\x -> CmmCondBranch x ti fi l) `fmap` f e mapExpM f (CmmSwitch e tbl) = (\x -> CmmSwitch x tbl) `fmap` f e mapExpM f (CmmCall tgt mb_id r o i s) = (\x -> CmmCall x mb_id r o i s) `fmap` f tgt mapExpM f (CmmUnsafeForeignCall tgt fs as) = case mapForeignTargetM f tgt of Just tgt' -> Just (CmmUnsafeForeignCall tgt' fs (mapListJ f as)) Nothing -> (\xs -> CmmUnsafeForeignCall tgt fs xs) `fmap` mapListM f as mapExpM f (CmmForeignCall tgt fs as succ ret_args updfr intrbl) = case mapForeignTargetM f tgt of Just tgt' -> Just (CmmForeignCall tgt' fs (mapListJ f as) succ ret_args updfr intrbl) Nothing -> (\xs -> CmmForeignCall tgt fs xs succ ret_args updfr intrbl) `fmap` mapListM f as -- share as much as possible mapListM :: (a -> Maybe a) -> [a] -> Maybe [a] mapListM f xs = let (b, r) = mapListT f xs in if b then Just r else Nothing mapListJ :: (a -> Maybe a) -> [a] -> [a] mapListJ f xs = snd (mapListT f xs) mapListT :: (a -> Maybe a) -> [a] -> (Bool, [a]) mapListT f xs = foldr g (False, []) (zip3 (tails xs) xs (map f xs)) where g (_, y, Nothing) (True, ys) = (True, y:ys) g (_, _, Just y) (True, ys) = (True, y:ys) g (ys', _, Nothing) (False, _) = (False, ys') g (_, _, Just y) (False, ys) = (True, y:ys) mapExpDeepM :: (CmmExpr -> Maybe CmmExpr) -> CmmNode e x -> Maybe (CmmNode e x) mapExpDeepM f = mapExpM $ wrapRecExpM f ----------------------------------- -- folding Expr in GHC.Cmm.Node foldExpForeignTarget :: (CmmExpr -> z -> z) -> ForeignTarget -> z -> z foldExpForeignTarget exp (ForeignTarget e _) z = exp e z foldExpForeignTarget _ (PrimTarget _) z = z -- Take a folder on expressions and apply it recursively. -- Specifically (wrapRecExpf f e z) deals with CmmMachOp and CmmLoad -- itself, delegating all the other CmmExpr forms to 'f'. wrapRecExpf :: (CmmExpr -> z -> z) -> CmmExpr -> z -> z wrapRecExpf f e@(CmmMachOp _ es) z = foldr (wrapRecExpf f) (f e z) es wrapRecExpf f e@(CmmLoad addr _ _) z = wrapRecExpf f addr (f e z) wrapRecExpf f e z = f e z foldExp :: (CmmExpr -> z -> z) -> CmmNode e x -> z -> z foldExp _ (CmmEntry {}) z = z foldExp _ (CmmComment {}) z = z foldExp _ (CmmTick {}) z = z foldExp f (CmmUnwind xs) z = foldr (maybe id f) z (map snd xs) foldExp f (CmmAssign _ e) z = f e z foldExp f (CmmStore addr e _) z = f addr $ f e z foldExp f (CmmUnsafeForeignCall t _ as) z = foldr f (foldExpForeignTarget f t z) as foldExp _ (CmmBranch _) z = z foldExp f (CmmCondBranch e _ _ _) z = f e z foldExp f (CmmSwitch e _) z = f e z foldExp f (CmmCall {cml_target=tgt}) z = f tgt z foldExp f (CmmForeignCall {tgt=tgt, args=args}) z = foldr f (foldExpForeignTarget f tgt z) args foldExpDeep :: (CmmExpr -> z -> z) -> CmmNode e x -> z -> z foldExpDeep f = foldExp (wrapRecExpf f) -- ----------------------------------------------------------------------------- mapSuccessors :: (Label -> Label) -> CmmNode O C -> CmmNode O C mapSuccessors f (CmmBranch bid) = CmmBranch (f bid) mapSuccessors f (CmmCondBranch p y n l) = CmmCondBranch p (f y) (f n) l mapSuccessors f (CmmSwitch e ids) = CmmSwitch e (mapSwitchTargets f ids) mapSuccessors _ n = n mapCollectSuccessors :: forall a. (Label -> (Label,a)) -> CmmNode O C -> (CmmNode O C, [a]) mapCollectSuccessors f (CmmBranch bid) = let (bid', acc) = f bid in (CmmBranch bid', [acc]) mapCollectSuccessors f (CmmCondBranch p y n l) = let (bidt, acct) = f y (bidf, accf) = f n in (CmmCondBranch p bidt bidf l, [accf, acct]) mapCollectSuccessors f (CmmSwitch e ids) = let lbls = switchTargetsToList ids :: [Label] lblMap = mapFromList $ zip lbls (map f lbls) :: LabelMap (Label, a) in ( CmmSwitch e (mapSwitchTargets (\l -> fst $ mapFindWithDefault (error "impossible") l lblMap) ids) , map snd (mapElems lblMap) ) mapCollectSuccessors _ n = (n, []) -- ----------------------------------------------------------------------------- -- | Tick scope identifier, allowing us to reason about what -- annotations in a Cmm block should scope over. We especially take -- care to allow optimisations to reorganise blocks without losing -- tick association in the process. data CmmTickScope = GlobalScope -- ^ The global scope is the "root" of the scope graph. Every -- scope is a sub-scope of the global scope. It doesn't make sense -- to add ticks to this scope. On the other hand, this means that -- setting this scope on a block means no ticks apply to it. | SubScope !U.Unique CmmTickScope -- ^ Constructs a new sub-scope to an existing scope. This allows -- us to translate Core-style scoping rules (see @tickishScoped@) -- into the Cmm world. Suppose the following code: -- -- tick<1> case ... of -- A -> tick<2> ... -- B -> tick<3> ... -- -- We want the top-level tick annotation to apply to blocks -- generated for the A and B alternatives. We can achieve that by -- generating tick<1> into a block with scope a, while the code -- for alternatives A and B gets generated into sub-scopes a/b and -- a/c respectively. | CombinedScope CmmTickScope CmmTickScope -- ^ A combined scope scopes over everything that the two given -- scopes cover. It is therefore a sub-scope of either scope. This -- is required for optimisations. Consider common block elimination: -- -- A -> tick<2> case ... of -- C -> [common] -- B -> tick<3> case ... of -- D -> [common] -- -- We will generate code for the C and D alternatives, and figure -- out afterwards that it's actually common code. Scoping rules -- dictate that the resulting common block needs to be covered by -- both tick<2> and tick<3>, therefore we need to construct a -- scope that is a child to *both* scope. Now we can do that - if -- we assign the scopes a/c and b/d to the common-ed up blocks, -- the new block could have a combined tick scope a/c+b/d, which -- both tick<2> and tick<3> apply to. -- Note [CmmTick scoping details] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- The scope of a @CmmTick@ is given by the @CmmEntry@ node of the -- same block. Note that as a result of this, optimisations making -- tick scopes more specific can *reduce* the amount of code a tick -- scopes over. Fixing this would require a separate @CmmTickScope@ -- field for @CmmTick@. Right now we do not do this simply because I -- couldn't find an example where it actually mattered -- multiple -- blocks within the same scope generally jump to each other, which -- prevents common block elimination from happening in the first -- place. But this is no strong reason, so if Cmm optimisations become -- more involved in future this might have to be revisited. -- | Output all scope paths. scopeToPaths :: CmmTickScope -> [[U.Unique]] scopeToPaths GlobalScope = [[]] scopeToPaths (SubScope u s) = map (u:) (scopeToPaths s) scopeToPaths (CombinedScope s1 s2) = scopeToPaths s1 ++ scopeToPaths s2 -- | Returns the head uniques of the scopes. This is based on the -- assumption that the @Unique@ of @SubScope@ identifies the -- underlying super-scope. Used for efficient equality and comparison, -- see below. scopeUniques :: CmmTickScope -> [U.Unique] scopeUniques GlobalScope = [] scopeUniques (SubScope u _) = [u] scopeUniques (CombinedScope s1 s2) = scopeUniques s1 ++ scopeUniques s2 -- Equality and order is based on the head uniques defined above. We -- take care to short-cut the (extremely) common cases. instance Eq CmmTickScope where GlobalScope == GlobalScope = True GlobalScope == _ = False _ == GlobalScope = False (SubScope u _) == (SubScope u' _) = u == u' (SubScope _ _) == _ = False _ == (SubScope _ _) = False scope == scope' = sortBy nonDetCmpUnique (scopeUniques scope) == sortBy nonDetCmpUnique (scopeUniques scope') -- This is still deterministic because -- the order is the same for equal lists -- This is non-deterministic but we do not currently support deterministic -- code-generation. See Note [Unique Determinism and code generation] -- See Note [No Ord for Unique] instance Ord CmmTickScope where compare GlobalScope GlobalScope = EQ compare GlobalScope _ = LT compare _ GlobalScope = GT compare (SubScope u _) (SubScope u' _) = nonDetCmpUnique u u' compare scope scope' = liftCompare nonDetCmpUnique (sortBy nonDetCmpUnique $ scopeUniques scope) (sortBy nonDetCmpUnique $ scopeUniques scope') instance Outputable CmmTickScope where ppr GlobalScope = text "global" ppr (SubScope us GlobalScope) = ppr us ppr (SubScope us s) = ppr s <> char '/' <> ppr us ppr combined = parens $ hcat $ punctuate (char '+') $ map (hcat . punctuate (char '/') . map ppr . reverse) $ scopeToPaths combined -- | Checks whether two tick scopes are sub-scopes of each other. True -- if the two scopes are equal. isTickSubScope :: CmmTickScope -> CmmTickScope -> Bool isTickSubScope = cmp where cmp _ GlobalScope = True cmp GlobalScope _ = False cmp (CombinedScope s1 s2) s' = cmp s1 s' && cmp s2 s' cmp s (CombinedScope s1' s2') = cmp s s1' || cmp s s2' cmp (SubScope u s) s'@(SubScope u' _) = u == u' || cmp s s' -- | Combine two tick scopes. The new scope should be sub-scope of -- both parameters. We simplify automatically if one tick scope is a -- sub-scope of the other already. combineTickScopes :: CmmTickScope -> CmmTickScope -> CmmTickScope combineTickScopes s1 s2 | s1 `isTickSubScope` s2 = s1 | s2 `isTickSubScope` s1 = s2 | otherwise = CombinedScope s1 s2 ghc-lib-parser-9.12.2.20250421/compiler/GHC/Cmm/Reg.hs0000644000000000000000000003257407346545000017506 0ustar0000000000000000{-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE MultiParamTypeClasses #-} module GHC.Cmm.Reg ( -- * Cmm Registers CmmReg(..) , cmmRegType , cmmRegWidth -- * Local registers , LocalReg(..) , localRegType -- * Global registers , GlobalReg(..), isArgReg, globalRegSpillType, pprGlobalReg , spReg, hpReg, spLimReg, hpLimReg, nodeReg , currentTSOReg, currentNurseryReg, hpAllocReg, cccsReg , node, baseReg , GlobalRegUse(..), pprGlobalRegUse , GlobalArgRegs(..) ) where import GHC.Prelude import GHC.Platform import GHC.Utils.Outputable import GHC.Types.Unique import GHC.Cmm.Type ----------------------------------------------------------------------------- -- Cmm registers ----------------------------------------------------------------------------- {- Note [GlobalReg vs GlobalRegUse] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We distinguish GlobalReg, which describes registers in the STG abstract machine, with GlobalRegUse, which describes an usage of such a register to store values of a particular CmmType. For example, we might want to load/store an 8-bit integer in a register that can store 32-bit integers. The width of the type must fit in the register, i.e. for a usage @GlobalRegUse reg ty@ we must have that > typeWidth ty <= typeWidth (globalRegSpillType reg) The restrictions about what categories of types can be stored in a given register are less easily stated. Some examples are: - Vanilla registers can contain both pointers (gcWord) and non-pointers (bWord), as well as sub-word sized values (e.g. b16). - On x86_64, SIMD registers can be used to hold vectors of both floating and integral values (e.g. XmmReg may store 2 Double values or 4 Int32 values). -} -- | A use of a global register at a particular type. -- -- While a 'GlobalReg' identifies a global register in the STG machine, -- a 'GlobalRegUse' also contains information about the type we are storing -- in the register. -- -- See Note [GlobalReg vs GlobalRegUse] for more information. data GlobalRegUse = GlobalRegUse { globalRegUse_reg :: !GlobalReg -- ^ The underlying 'GlobalReg' , globalRegUse_type :: !CmmType -- ^ The 'CmmType' at which we are using the 'GlobalReg'. -- -- Its width must be less than the width of the 'GlobalReg': -- -- > typeWidth ty <= typeWidth (globalRegSpillType platform reg) } deriving Show instance Outputable GlobalRegUse where ppr (GlobalRegUse reg _) = ppr reg pprGlobalRegUse :: IsLine doc => GlobalRegUse -> doc pprGlobalRegUse (GlobalRegUse reg _) = pprGlobalReg reg -- TODO: these instances should be removed in favour -- of more surgical uses of equality. instance Eq GlobalRegUse where GlobalRegUse r1 _ == GlobalRegUse r2 _ = r1 == r2 instance Ord GlobalRegUse where GlobalRegUse r1 _ `compare` GlobalRegUse r2 _ = compare r1 r2 data CmmReg = CmmLocal {-# UNPACK #-} !LocalReg | CmmGlobal GlobalRegUse deriving ( Eq, Ord, Show ) instance Outputable CmmReg where ppr e = pprReg e pprReg :: CmmReg -> SDoc pprReg r = case r of CmmLocal local -> pprLocalReg local CmmGlobal (GlobalRegUse global _ty) -> pprGlobalReg global cmmRegType :: CmmReg -> CmmType cmmRegType (CmmLocal reg) = localRegType reg cmmRegType (CmmGlobal reg) = globalRegUse_type reg cmmRegWidth :: CmmReg -> Width cmmRegWidth = typeWidth . cmmRegType ----------------------------------------------------------------------------- -- Local registers ----------------------------------------------------------------------------- data LocalReg = LocalReg {-# UNPACK #-} !Unique !CmmType -- ^ Parameters: -- 1. Identifier -- 2. Type deriving Show instance Eq LocalReg where (LocalReg u1 _) == (LocalReg u2 _) = u1 == u2 instance Outputable LocalReg where ppr e = pprLocalReg e -- This is non-deterministic but we do not currently support deterministic -- code-generation. See Note [Unique Determinism and code generation] -- See Note [No Ord for Unique] instance Ord LocalReg where compare (LocalReg u1 _) (LocalReg u2 _) = nonDetCmpUnique u1 u2 instance Uniquable LocalReg where getUnique (LocalReg uniq _) = uniq localRegType :: LocalReg -> CmmType localRegType (LocalReg _ rep) = rep -- -- We only print the type of the local reg if it isn't wordRep -- pprLocalReg :: LocalReg -> SDoc pprLocalReg (LocalReg uniq rep) = -- = ppr rep <> char '_' <> ppr uniq -- Temp Jan08 char '_' <> pprUnique uniq <> (if isWord32 rep -- && not (isGcPtrType rep) -- Temp Jan08 -- sigh then dcolon <> ptr <> ppr rep else dcolon <> ptr <> ppr rep) where pprUnique unique = sdocOption sdocSuppressUniques $ \case True -> text "_locVar_" False -> ppr unique ptr = empty --if isGcPtrType rep -- then doubleQuotes (text "ptr") -- else empty ----------------------------------------------------------------------------- -- Global STG registers ----------------------------------------------------------------------------- {- Note [Overlapping global registers] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The backend might not faithfully implement the abstraction of the STG machine with independent registers for different values of type GlobalReg. Specifically, certain pairs of registers (r1, r2) may overlap in the sense that a store to r1 invalidates the value in r2, and vice versa. Currently this occurs only on the x86_64 architecture where FloatReg n and DoubleReg n are assigned the same microarchitectural register, in order to allow functions to receive more Float# or Double# arguments in registers (as opposed to on the stack). There are no specific rules about which registers might overlap with which other registers, but presumably it's safe to assume that nothing will overlap with special registers like Sp or BaseReg. Use GHC.Cmm.Utils.regsOverlap to determine whether two GlobalRegs overlap on a particular platform. The instance Eq GlobalReg is syntactic equality of STG registers and does not take overlap into account. However it is still used in UserOfRegs/DefinerOfRegs and there are likely still bugs there, beware! -} -- | An abstract global register for the STG machine. -- -- See also 'GlobalRegUse', which denotes a usage of a register at a particular -- type (e.g. using a 32-bit wide register to store an 8-bit wide value), as per -- Note [GlobalReg vs GlobalRegUse]. data GlobalReg -- Argument and return registers = VanillaReg -- pointers, unboxed ints and chars {-# UNPACK #-} !Int -- its number | FloatReg -- single-precision floating-point registers {-# UNPACK #-} !Int -- its number | DoubleReg -- double-precision floating-point registers {-# UNPACK #-} !Int -- its number | LongReg -- long int registers (64-bit, really) {-# UNPACK #-} !Int -- its number -- I think we should redesign 'GlobalReg', for example instead of -- FloatReg/DoubleReg/XmmReg/YmmReg/ZmmReg we could have a single VecReg -- which also stores the type we are storing in it. -- -- We might then be able to get rid of GlobalRegUse, as the type information -- would already be contained in a 'GlobalReg'. | XmmReg -- 128-bit SIMD vector register {-# UNPACK #-} !Int -- its number | YmmReg -- 256-bit SIMD vector register {-# UNPACK #-} !Int -- its number | ZmmReg -- 512-bit SIMD vector register {-# UNPACK #-} !Int -- its number -- STG registers | Sp -- ^ Stack ptr; points to last occupied stack location. | SpLim -- ^ Stack limit | Hp -- ^ Heap ptr; points to last occupied heap location. | HpLim -- ^ Heap limit register | CCCS -- ^ Current cost-centre stack | CurrentTSO -- ^ pointer to current thread's TSO | CurrentNursery -- ^ pointer to allocation area | HpAlloc -- ^ allocation count for heap check failure -- We keep the address of some commonly-called -- functions in the register table, to keep code -- size down: | EagerBlackholeInfo -- ^ address of stg_EAGER_BLACKHOLE_info | GCEnter1 -- ^ address of stg_gc_enter_1 | GCFun -- ^ address of stg_gc_fun -- | Base offset for the register table, used for accessing registers -- which do not have real registers assigned to them. This register -- will only appear after we have expanded GlobalReg into memory accesses -- (where necessary) in the native code generator. | BaseReg -- | The register used by the platform for the C stack pointer. This is -- a break in the STG abstraction used exclusively to setup stack unwinding -- information. | MachSp -- | A dummy register used to indicate to the stack unwinder where -- a routine would return to. | UnwindReturnReg -- | Base Register for PIC (position-independent code) calculations. -- -- Only used inside the native code generator. Its exact meaning differs -- from platform to platform (see module PositionIndependentCode). | PicBaseReg deriving( Eq, Ord, Show ) -- NOTE: the Ord instance affects the tuple layout in GHCi, see -- Note [GHCi and native call registers] instance Outputable GlobalReg where ppr e = pprGlobalReg e instance OutputableP env GlobalReg where pdoc _ = ppr pprGlobalReg :: IsLine doc => GlobalReg -> doc pprGlobalReg gr = case gr of VanillaReg n -> char 'R' <> int n FloatReg n -> char 'F' <> int n DoubleReg n -> char 'D' <> int n LongReg n -> char 'L' <> int n XmmReg n -> text "XMM" <> int n YmmReg n -> text "YMM" <> int n ZmmReg n -> text "ZMM" <> int n Sp -> text "Sp" SpLim -> text "SpLim" Hp -> text "Hp" HpLim -> text "HpLim" MachSp -> text "MachSp" UnwindReturnReg-> text "UnwindReturnReg" CCCS -> text "CCCS" CurrentTSO -> text "CurrentTSO" CurrentNursery -> text "CurrentNursery" HpAlloc -> text "HpAlloc" EagerBlackholeInfo -> text "stg_EAGER_BLACKHOLE_info" GCEnter1 -> text "stg_gc_enter_1" GCFun -> text "stg_gc_fun" BaseReg -> text "BaseReg" PicBaseReg -> text "PicBaseReg" {-# SPECIALIZE pprGlobalReg :: GlobalReg -> SDoc #-} {-# SPECIALIZE pprGlobalReg :: GlobalReg -> HLine #-} -- see Note [SPECIALIZE to HDoc] in GHC.Utils.Outputable -- convenient aliases baseReg, spReg, hpReg, spLimReg, hpLimReg, nodeReg, currentTSOReg, currentNurseryReg, hpAllocReg, cccsReg :: Platform -> CmmReg baseReg p = CmmGlobal (GlobalRegUse BaseReg $ bWord p) spReg p = CmmGlobal (GlobalRegUse Sp $ bWord p) hpReg p = CmmGlobal (GlobalRegUse Hp $ gcWord p) hpLimReg p = CmmGlobal (GlobalRegUse HpLim $ bWord p) spLimReg p = CmmGlobal (GlobalRegUse SpLim $ bWord p) nodeReg p = CmmGlobal (GlobalRegUse (VanillaReg 1) $ gcWord p) currentTSOReg p = CmmGlobal (GlobalRegUse CurrentTSO $ bWord p) currentNurseryReg p = CmmGlobal (GlobalRegUse CurrentNursery $ bWord p) hpAllocReg p = CmmGlobal (GlobalRegUse HpAlloc $ bWord p) cccsReg p = CmmGlobal (GlobalRegUse CCCS $ bWord p) node :: GlobalReg node = VanillaReg 1 globalRegSpillType :: Platform -> GlobalReg -> CmmType globalRegSpillType platform = \case VanillaReg _ -> gcWord platform FloatReg _ -> cmmFloat W32 DoubleReg _ -> cmmFloat W64 LongReg _ -> cmmBits W64 -- TODO: improve the internal model of SIMD/vectorized registers -- the right design SHOULD improve handling of float and double code too. -- see remarks in Note [SIMD Design for the future] in GHC.StgToCmm.Prim XmmReg _ -> cmmVec 4 (cmmBits W32) YmmReg _ -> cmmVec 8 (cmmBits W32) ZmmReg _ -> cmmVec 16 (cmmBits W32) Hp -> gcWord platform -- The initialiser for all -- dynamically allocated closures _ -> bWord platform isArgReg :: GlobalReg -> Bool isArgReg (VanillaReg {}) = True isArgReg (FloatReg {}) = True isArgReg (DoubleReg {}) = True isArgReg (LongReg {}) = True isArgReg (XmmReg {}) = True isArgReg (YmmReg {}) = True isArgReg (ZmmReg {}) = True isArgReg _ = False -- -------------------------------------------------------------------------- -- | Global registers used for argument passing. -- -- See Note [realArgRegsCover] in GHC.Cmm.CallConv. data GlobalArgRegs -- | General-purpose (integer) argument-passing registers. = GP_ARG_REGS -- | Scalar (integer & floating-point) argument-passing registers. | SCALAR_ARG_REGS -- | 16 byte vector argument-passing registers, together with -- integer & floating-point argument-passing scalar registers. | V16_ARG_REGS -- | 32 byte vector argument-passing registers, together with -- integer & floating-point argument-passing scalar registers. | V32_ARG_REGS -- | 64 byte vector argument-passing registers, together with -- integer & floating-point argument-passing scalar registers. | V64_ARG_REGS deriving ( Show, Eq, Ord ) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Cmm/Switch.hs0000644000000000000000000004455607346545000020235 0ustar0000000000000000{-# LANGUAGE GADTs #-} {-# OPTIONS_GHC -Wno-incomplete-uni-patterns #-} module GHC.Cmm.Switch ( SwitchTargets, mkSwitchTargets, switchTargetsCases, switchTargetsDefault, switchTargetsRange, switchTargetsSigned, mapSwitchTargets, mapSwitchTargetsA, switchTargetsToTable, switchTargetsFallThrough, switchTargetsToList, eqSwitchTargetWith, SwitchPlan(..), backendHasNativeSwitch, createSwitchPlan, ) where import GHC.Prelude hiding (head) import GHC.Utils.Outputable import GHC.Driver.Backend import GHC.Utils.Panic import GHC.Cmm.Dataflow.Label (Label) import Data.Maybe import Data.List.NonEmpty (NonEmpty (..), groupWith, head) import qualified Data.Map as M -- Note [Cmm Switches, the general plan] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- Compiling a high-level switch statement, as it comes out of a STG case -- expression, for example, allows for a surprising amount of design decisions. -- Therefore, we cleanly separated this from the Stg → Cmm transformation, as -- well as from the actual code generation. -- -- The overall plan is: -- * The Stg → Cmm transformation creates a single `SwitchTargets` in -- emitSwitch and emitCmmLitSwitch in GHC.StgToCmm.Utils. -- At this stage, they are unsuitable for code generation. -- * A dedicated Cmm transformation (GHC.Cmm.Switch.Implement) replaces these -- switch statements with code that is suitable for code generation, i.e. -- a nice balanced tree of decisions with dense jump tables in the leafs. -- The actual planning of this tree is performed in pure code in createSwitchPlan -- in this module. See Note [createSwitchPlan]. -- * The actual code generation will not do any further processing and -- implement each CmmSwitch with a jump tables. -- -- When compiling to LLVM or C, GHC.Cmm.Switch.Implement leaves the switch -- statements alone, as we can turn a SwitchTargets value into a nice -- switch-statement in LLVM resp. C, and leave the rest to the compiler. -- -- See Note [GHC.Cmm.Switch vs. GHC.Cmm.Switch.Implement] why the two module are -- separated. -- Note [Magic Constants in GHC.Cmm.Switch] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- There are a lot of heuristics here that depend on magic values where it is -- hard to determine the "best" value (for whatever that means). These are the -- magic values: -- | Number of consecutive default values allowed in a jump table. If there are -- more of them, the jump tables are split. -- -- Currently 7, as it costs 7 words of additional code when a jump table is -- split (at least on x64, determined experimentally). maxJumpTableHole :: Integer maxJumpTableHole = 7 -- | Minimum size of a jump table. If the number is smaller, the switch is -- implemented using conditionals. -- Currently 5, because an if-then-else tree of 4 values is nice and compact. minJumpTableSize :: Int minJumpTableSize = 5 -- | Minimum non-zero offset for a jump table. See Note [Jump Table Offset]. minJumpTableOffset :: Integer minJumpTableOffset = 2 ----------------------------------------------------------------------------- -- Switch Targets -- Note [SwitchTargets] -- ~~~~~~~~~~~~~~~~~~~~ -- The branches of a switch are stored in a SwitchTargets, which consists of an -- (optional) default jump target, and a map from values to jump targets. -- -- If the default jump target is absent, the behaviour of the switch outside the -- values of the map is undefined. -- -- We use an Integer for the keys the map so that it can be used in switches on -- unsigned as well as signed integers. -- -- The map may be empty (we prune out-of-range branches here, so it could be us -- emptying it). -- -- Before code generation, the table needs to be brought into a form where all -- entries are non-negative, so that it can be compiled into a jump table. -- See switchTargetsToTable. -- | A value of type SwitchTargets contains the alternatives for a 'CmmSwitch' -- value, and knows whether the value is signed, the possible range, an -- optional default value and a map from values to jump labels. data SwitchTargets = SwitchTargets Bool -- Signed values (Integer, Integer) -- Range (Maybe Label) -- Default value (M.Map Integer Label) -- The branches deriving (Show, Eq) -- | The smart constructor mkSwitchTargets normalises the map a bit: -- * No entries outside the range -- * No entries equal to the default -- * No default if all elements have explicit values mkSwitchTargets :: Bool -> (Integer, Integer) -> Maybe Label -> M.Map Integer Label -> SwitchTargets mkSwitchTargets signed range@(lo,hi) mbdef ids = SwitchTargets signed range mbdef' ids' where ids' = dropDefault $ restrict ids mbdef' | defaultNeeded = mbdef | otherwise = Nothing -- Drop entries outside the range, if there is a range restrict = restrictMap (lo,hi) -- Drop entries that equal the default, if there is a default dropDefault | Just l <- mbdef = M.filter (/= l) | otherwise = id -- Check if the default is still needed defaultNeeded = fromIntegral (M.size ids') /= hi-lo+1 -- | Changes all labels mentioned in the SwitchTargets value mapSwitchTargets :: (Label -> Label) -> SwitchTargets -> SwitchTargets mapSwitchTargets f (SwitchTargets signed range mbdef branches) = SwitchTargets signed range (fmap f mbdef) (fmap f branches) -- | Changes all labels mentioned in the SwitchTargets value mapSwitchTargetsA :: Applicative m => (Label -> m Label) -> SwitchTargets -> m SwitchTargets mapSwitchTargetsA f (SwitchTargets signed range mbdef branches) = SwitchTargets signed range <$> traverse f mbdef <*> traverse f branches -- | Returns the list of non-default branches of the SwitchTargets value switchTargetsCases :: SwitchTargets -> [(Integer, Label)] switchTargetsCases (SwitchTargets _ _ _ branches) = M.toList branches -- | Return the default label of the SwitchTargets value switchTargetsDefault :: SwitchTargets -> Maybe Label switchTargetsDefault (SwitchTargets _ _ mbdef _) = mbdef -- | Return the range of the SwitchTargets value switchTargetsRange :: SwitchTargets -> (Integer, Integer) switchTargetsRange (SwitchTargets _ range _ _) = range -- | Return whether this is used for a signed value switchTargetsSigned :: SwitchTargets -> Bool switchTargetsSigned (SwitchTargets signed _ _ _) = signed -- | switchTargetsToTable creates a dense jump table, usable for code generation. -- -- Also returns an offset to add to the value; the list is 0-based on the -- result of that addition. -- -- The conversion from Integer to Int is a bit of a wart, as the actual -- scrutinee might be an unsigned word, but it just works, due to wrap-around -- arithmetic (as verified by the CmmSwitchTest test case). switchTargetsToTable :: SwitchTargets -> (Int, [Maybe Label]) switchTargetsToTable (SwitchTargets _ (lo,hi) mbdef branches) = (fromIntegral (-start), [ labelFor i | i <- [start..hi] ]) where labelFor i = case M.lookup i branches of Just l -> Just l Nothing -> mbdef start | lo >= 0 && lo < minJumpTableOffset = 0 -- See Note [Jump Table Offset] | otherwise = lo -- Note [Jump Table Offset] -- ~~~~~~~~~~~~~~~~~~~~~~~~ -- Usually, the code for a jump table starting at x will first subtract x from -- the value, to avoid a large amount of empty entries. But if x is very small, -- the extra entries are no worse than the subtraction in terms of code size, and -- not having to do the subtraction is quicker. -- -- I.e. instead of -- _u20N: -- leaq -1(%r14),%rax -- jmp *_n20R(,%rax,8) -- _n20R: -- .quad _c20p -- .quad _c20q -- do -- _u20N: -- jmp *_n20Q(,%r14,8) -- -- _n20Q: -- .quad 0 -- .quad _c20p -- .quad _c20q -- .quad _c20r -- | The list of all labels occurring in the SwitchTargets value. switchTargetsToList :: SwitchTargets -> [Label] switchTargetsToList (SwitchTargets _ _ mbdef branches) = maybeToList mbdef ++ M.elems branches -- | Groups cases with equal targets, suitable for pretty-printing to a -- c-like switch statement with fall-through semantics. switchTargetsFallThrough :: SwitchTargets -> ([(NonEmpty Integer, Label)], Maybe Label) switchTargetsFallThrough (SwitchTargets _ _ mbdef branches) = (groups, mbdef) where groups = fmap (\xs -> (fmap fst xs, snd (head xs))) $ groupWith snd $ M.toList branches -- | Custom equality helper, needed for "GHC.Cmm.CommonBlockElim" eqSwitchTargetWith :: (Label -> Label -> Bool) -> SwitchTargets -> SwitchTargets -> Bool eqSwitchTargetWith eq (SwitchTargets signed1 range1 mbdef1 ids1) (SwitchTargets signed2 range2 mbdef2 ids2) = signed1 == signed2 && range1 == range2 && goMB mbdef1 mbdef2 && goList (M.toList ids1) (M.toList ids2) where goMB Nothing Nothing = True goMB (Just l1) (Just l2) = l1 `eq` l2 goMB _ _ = False goList [] [] = True goList ((i1,l1):ls1) ((i2,l2):ls2) = i1 == i2 && l1 `eq` l2 && goList ls1 ls2 goList _ _ = False ----------------------------------------------------------------------------- -- Code generation for Switches -- | A SwitchPlan abstractly describes how a Switch statement ought to be -- implemented. See Note [createSwitchPlan] data SwitchPlan = Unconditionally Label | IfEqual Integer Label SwitchPlan | IfLT Bool Integer SwitchPlan SwitchPlan | JumpTable SwitchTargets deriving Show -- -- Note [createSwitchPlan] -- ~~~~~~~~~~~~~~~~~~~~~~~ -- A SwitchPlan describes how a Switch statement is to be broken down into -- smaller pieces suitable for code generation. -- -- createSwitchPlan creates such a switch plan, in these steps: -- 1. It splits the switch statement at segments of non-default values that -- are too large. See splitAtHoles and Note [Magic Constants in GHC.Cmm.Switch] -- 2. Too small jump tables should be avoided, so we break up smaller pieces -- in breakTooSmall. -- 3. We fill in the segments between those pieces with a jump to the default -- label (if there is one), returning a SeparatedList in mkFlatSwitchPlan -- 4. We find and replace two less-than branches by a single equal-to-test in -- findSingleValues -- 5. The thus collected pieces are assembled to a balanced binary tree. {- Note [Two alts + default] ~~~~~~~~~~~~~~~~~~~~~~~~~ Discussion and a bit more info at #14644 When dealing with a switch of the form: switch(e) { case 1: goto l1; case 3000: goto l2; default: goto ldef; } If we treat it as a sparse jump table we would generate: if (e > 3000) //Check if value is outside of the jump table. goto ldef; else { if (e < 3000) { //Compare to upper value if(e != 1) //Compare to remaining value goto ldef; else goto l2; } else goto l1; } Instead we special case this to : if (e==1) goto l1; else if (e==3000) goto l2; else goto l3; This means we have: * Less comparisons for: 1,<3000 * Unchanged for 3000 * One more for >3000 This improves code in a few ways: * One comparison less means smaller code which helps with cache. * It exchanges a taken jump for two jumps no taken in the >range case. Jumps not taken are cheaper (See Agner guides) making this about as fast. * For all other cases the first range check is removed making it faster. The end result is that the change is not measurably slower for the case >3000 and faster for the other cases. This makes running this kind of match in an inner loop cheaper by 10-20% depending on the data. In nofib this improves wheel-sieve1 by 4-9% depending on problem size. We could also add a second conditional jump after the comparison to keep the range check like this: cmp 3000, rArgument jg je While this is fairly cheap it made no big difference for the >3000 case and slowed down all other cases making it not worthwhile. -} -- | This function creates a SwitchPlan from a SwitchTargets value, breaking it -- down into smaller pieces suitable for code generation. createSwitchPlan :: SwitchTargets -> SwitchPlan -- Lets do the common case of a singleton map quickly and efficiently (#10677) createSwitchPlan (SwitchTargets _signed _range (Just defLabel) m) | [(x, l)] <- M.toList m = IfEqual x l (Unconditionally defLabel) -- And another common case, matching "booleans" createSwitchPlan (SwitchTargets _signed (lo,hi) Nothing m) | [(x1, l1), (_x2,l2)] <- M.toAscList m --Checking If |range| = 2 is enough if we have two unique literals , hi - lo == 1 = IfEqual x1 l1 (Unconditionally l2) -- See Note [Two alts + default] createSwitchPlan (SwitchTargets _signed _range (Just defLabel) m) | [(x1, l1), (x2,l2)] <- M.toAscList m = IfEqual x1 l1 (IfEqual x2 l2 (Unconditionally defLabel)) createSwitchPlan (SwitchTargets signed range mbdef m) = -- pprTrace "createSwitchPlan" (text (show ids) $$ text (show (range,m)) $$ text (show pieces) $$ text (show flatPlan) $$ text (show plan)) $ plan where pieces = concatMap breakTooSmall $ splitAtHoles maxJumpTableHole m flatPlan = findSingleValues $ mkFlatSwitchPlan signed mbdef range pieces plan = buildTree signed $ flatPlan --- --- Step 1: Splitting at large holes --- splitAtHoles :: Integer -> M.Map Integer a -> [M.Map Integer a] splitAtHoles _ m | M.null m = [] splitAtHoles holeSize m = map (\range -> restrictMap range m) nonHoles where holes = filter (\(l,h) -> h - l > holeSize) $ zip (M.keys m) (tail (M.keys m)) nonHoles = reassocTuples lo holes hi (lo,_) = M.findMin m (hi,_) = M.findMax m --- --- Step 2: Avoid small jump tables --- -- We do not want jump tables below a certain size. This breaks them up -- (into singleton maps, for now). breakTooSmall :: M.Map Integer a -> [M.Map Integer a] breakTooSmall m | M.size m > minJumpTableSize = [m] | otherwise = [M.singleton k v | (k,v) <- M.toList m] --- --- Step 3: Fill in the blanks --- -- | A FlatSwitchPlan is a list of SwitchPlans, with an integer in between every -- two entries, dividing the range. -- So if we have (abusing list syntax) [plan1,n,plan2], then we use plan1 if -- the expression is < n, and plan2 otherwise. type FlatSwitchPlan = SeparatedList Integer SwitchPlan mkFlatSwitchPlan :: Bool -> Maybe Label -> (Integer, Integer) -> [M.Map Integer Label] -> FlatSwitchPlan -- If we have no default (i.e. undefined where there is no entry), we can -- branch at the minimum of each map mkFlatSwitchPlan _ Nothing _ [] = pprPanic "mkFlatSwitchPlan with nothing left to do" empty mkFlatSwitchPlan signed Nothing _ (m:ms) = (mkLeafPlan signed Nothing m , [ (fst (M.findMin m'), mkLeafPlan signed Nothing m') | m' <- ms ]) -- If we have a default, we have to interleave segments that jump -- to the default between the maps mkFlatSwitchPlan signed (Just l) r ms = let ((_,p1):ps) = go r ms in (p1, ps) where go (lo,hi) [] | lo > hi = [] | otherwise = [(lo, Unconditionally l)] go (lo,hi) (m:ms) | lo < min = (lo, Unconditionally l) : go (min,hi) (m:ms) | lo == min = (lo, mkLeafPlan signed (Just l) m) : go (max+1,hi) ms | otherwise = pprPanic "mkFlatSwitchPlan" (integer lo <+> integer min) where min = fst (M.findMin m) max = fst (M.findMax m) mkLeafPlan :: Bool -> Maybe Label -> M.Map Integer Label -> SwitchPlan mkLeafPlan signed mbdef m | [(_,l)] <- M.toList m -- singleton map = Unconditionally l | otherwise = JumpTable $ mkSwitchTargets signed (min,max) mbdef m where min = fst (M.findMin m) max = fst (M.findMax m) --- --- Step 4: Reduce the number of branches using == --- -- A sequence of three unconditional jumps, with the outer two pointing to the -- same value and the bounds off by exactly one can be improved findSingleValues :: FlatSwitchPlan -> FlatSwitchPlan findSingleValues (Unconditionally l, (i, Unconditionally l2) : (i', Unconditionally l3) : xs) | l == l3 && i + 1 == i' = findSingleValues (IfEqual i l2 (Unconditionally l), xs) findSingleValues (p, (i,p'):xs) = (p,i) `consSL` findSingleValues (p', xs) findSingleValues (p, []) = (p, []) --- --- Step 5: Actually build the tree --- -- Build a balanced tree from a separated list buildTree :: Bool -> FlatSwitchPlan -> SwitchPlan buildTree _ (p,[]) = p buildTree signed sl = IfLT signed m (buildTree signed sl1) (buildTree signed sl2) where (sl1, m, sl2) = divideSL sl -- -- Utility data type: Non-empty lists with extra markers in between each -- element: -- type SeparatedList b a = (a, [(b,a)]) consSL :: (a, b) -> SeparatedList b a -> SeparatedList b a consSL (a, b) (a', xs) = (a, (b,a'):xs) divideSL :: SeparatedList b a -> (SeparatedList b a, b, SeparatedList b a) divideSL (_,[]) = error "divideSL: Singleton SeparatedList" divideSL (p,xs) = ((p, xs1), m, (p', xs2)) where (xs1, (m,p'):xs2) = splitAt (length xs `div` 2) xs -- -- Other Utilities -- restrictMap :: (Integer,Integer) -> M.Map Integer b -> M.Map Integer b restrictMap (lo,hi) m = mid where (_, mid_hi) = M.split (lo-1) m (mid, _) = M.split (hi+1) mid_hi -- for example: reassocTuples a [(b,c),(d,e)] f == [(a,b),(c,d),(e,f)] reassocTuples :: a -> [(a,a)] -> a -> [(a,a)] reassocTuples initial [] last = [(initial,last)] reassocTuples initial ((a,b):tuples) last = (initial,a) : reassocTuples b tuples last -- Note [GHC.Cmm.Switch vs. GHC.Cmm.Switch.Implement] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- I (Joachim) separated the two somewhat closely related modules -- -- - GHC.Cmm.Switch, which provides the CmmSwitchTargets type and contains the strategy -- for implementing a Cmm switch (createSwitchPlan), and -- - GHC.Cmm.Switch.Implement, which contains the actual Cmm graph modification, -- -- for these reasons: -- -- * GHC.Cmm.Switch is very low in the dependency tree, i.e. does not depend on any -- GHC specific modules at all (with the exception of Output and -- GHC.Cmm.Dataflow (Literal)). -- * GHC.Cmm.Switch.Implement is the Cmm transformation and hence very high in -- the dependency tree. -- * GHC.Cmm.Switch provides the CmmSwitchTargets data type, which is abstract, but -- used in GHC.Cmm.Node. -- * Because GHC.Cmm.Switch is low in the dependency tree, the separation allows -- for more parallelism when building GHC. -- * The interaction between the modules is very explicit and easy to -- understand, due to the small and simple interface. ghc-lib-parser-9.12.2.20250421/compiler/GHC/Cmm/Type.hs0000644000000000000000000003735607346545000017715 0ustar0000000000000000module GHC.Cmm.Type ( CmmType -- Abstract , b8, b16, b32, b64, b128, b256, b512, f32, f64, bWord, bHalfWord, gcWord , cInt , cmmBits, cmmFloat , typeWidth, setCmmTypeWidth , cmmEqType, cmmCompatType , isFloatType, isGcPtrType, isBitsType , isWordAny, isWord32, isWord64 , isFloat64, isFloat32 , Width(..) , widthInBits, widthInBytes, widthInLog, widthFromBytes , wordWidth, halfWordWidth, cIntWidth , halfWordMask , narrowU, narrowS , rEP_CostCentreStack_mem_alloc , rEP_CostCentreStack_scc_count , rEP_StgEntCounter_allocs , rEP_StgEntCounter_allocd , ForeignHint(..) , Length , vec, vec2, vec4, vec8, vec16 , vec2f64, vec2b64, vec4f32, vec4b32, vec8b16, vec16b8 , cmmVec , vecLength, vecElemType , isVecType , DoAlignSanitisation ) where import GHC.Prelude import GHC.Platform import GHC.Utils.Outputable import GHC.Utils.Panic import Data.Word import Data.Int ----------------------------------------------------------------------------- -- CmmType ----------------------------------------------------------------------------- -- NOTE: CmmType is an abstract type, not exported from this -- module so you can easily change its representation -- -- However Width is exported in a concrete way, -- and is used extensively in pattern-matching data CmmType -- The important one! = CmmType CmmCat !Width deriving Show data CmmCat -- "Category" (not exported) = GcPtrCat -- GC pointer | BitsCat -- Non-pointer | FloatCat -- Float | VecCat Length CmmCat -- Vector deriving( Eq, Show ) -- See Note [Signed vs unsigned] at the end instance Outputable CmmType where ppr (CmmType cat wid) = ppr cat <> ppr (widthInBits wid) instance Outputable CmmCat where ppr FloatCat = text "F" ppr GcPtrCat = text "P" ppr BitsCat = text "I" ppr (VecCat n cat) = ppr cat <> text "x" <> ppr n <> text "V" -- Why is CmmType stratified? For native code generation, -- most of the time you just want to know what sort of register -- to put the thing in, and for this you need to know how -- many bits thing has, and whether it goes in a floating-point -- register. By contrast, the distinction between GcPtr and -- GcNonPtr is of interest to only a few parts of the code generator. -------- Equality on CmmType -------------- -- CmmType is *not* an instance of Eq; sometimes we care about the -- Gc/NonGc distinction, and sometimes we don't -- So we use an explicit function to force you to think about it cmmEqType :: CmmType -> CmmType -> Bool -- Exact equality cmmEqType (CmmType c1 w1) (CmmType c2 w2) = c1==c2 && w1==w2 -- | A weaker notion of equality of 'CmmType's than 'cmmEqType', -- used (only) in Cmm Lint. -- -- Why "weaker"? Because: -- -- - we don't distinguish GcPtr vs NonGcPtr, because the the RTS files -- are not yet well-typed wrt pointers, -- - for vectors, we only compare the widths, because in practice things like -- X86 xmm registers support different types of data (e.g. 4xf32, 2xf64, 2xu64 etc). cmmCompatType :: CmmType -> CmmType -> Bool cmmCompatType (CmmType c1 w1) (CmmType c2 w2) = c1 `weak_eq` c2 && w1 == w2 where weak_eq :: CmmCat -> CmmCat -> Bool FloatCat `weak_eq` FloatCat = True FloatCat `weak_eq` _other = False _other `weak_eq` FloatCat = False (VecCat {}) `weak_eq` (VecCat {}) = True -- only compare overall width (VecCat {}) `weak_eq` _other = False _other `weak_eq` (VecCat {}) = False _word1 `weak_eq` _word2 = True -- Ignores GcPtr --- Simple operations on CmmType ----- typeWidth :: CmmType -> Width typeWidth (CmmType _ w) = w setCmmTypeWidth :: Width -> CmmType -> CmmType setCmmTypeWidth w (CmmType c _) = CmmType c w cmmBits, cmmFloat :: Width -> CmmType cmmBits = CmmType BitsCat cmmFloat = CmmType FloatCat -------- Common CmmTypes ------------ -- Floats and words of specific widths b8, b16, b32, b64, b128, b256, b512, f32, f64 :: CmmType b8 = cmmBits W8 b16 = cmmBits W16 b32 = cmmBits W32 b64 = cmmBits W64 b128 = cmmBits W128 b256 = cmmBits W256 b512 = cmmBits W512 f32 = cmmFloat W32 f64 = cmmFloat W64 -- CmmTypes of native word widths bWord :: Platform -> CmmType bWord platform = cmmBits (wordWidth platform) bHalfWord :: Platform -> CmmType bHalfWord platform = cmmBits (halfWordWidth platform) gcWord :: Platform -> CmmType gcWord platform = CmmType GcPtrCat (wordWidth platform) cInt :: Platform -> CmmType cInt platform = cmmBits (cIntWidth platform) ------------ Predicates ---------------- isFloatType, isGcPtrType, isBitsType :: CmmType -> Bool isFloatType (CmmType FloatCat _) = True isFloatType _other = False isGcPtrType (CmmType GcPtrCat _) = True isGcPtrType _other = False isBitsType (CmmType BitsCat _) = True isBitsType _ = False isWordAny, isWord32, isWord64, isFloat32, isFloat64 :: CmmType -> Bool -- isWord64 is true of 64-bit non-floats (both gc-ptrs and otherwise) -- isFloat32 and 64 are obvious isWordAny (CmmType BitsCat _) = True isWordAny (CmmType GcPtrCat _) = True isWordAny _other = False isWord64 (CmmType BitsCat W64) = True isWord64 (CmmType GcPtrCat W64) = True isWord64 _other = False isWord32 (CmmType BitsCat W32) = True isWord32 (CmmType GcPtrCat W32) = True isWord32 _other = False isFloat32 (CmmType FloatCat W32) = True isFloat32 _other = False isFloat64 (CmmType FloatCat W64) = True isFloat64 _other = False ----------------------------------------------------------------------------- -- Width ----------------------------------------------------------------------------- data Width = W8 | W16 | W32 | W64 | W128 | W256 | W512 deriving (Eq, Ord, Show) instance Outputable Width where ppr rep = text (show rep) -------- Common Widths ------------ -- | The width of the current platform's word size. wordWidth :: Platform -> Width wordWidth platform = case platformWordSize platform of PW4 -> W32 PW8 -> W64 -- | The width of the current platform's half-word size. halfWordWidth :: Platform -> Width halfWordWidth platform = case platformWordSize platform of PW4 -> W16 PW8 -> W32 -- | A bit-mask for the lower half-word of current platform. halfWordMask :: Platform -> Integer halfWordMask platform = case platformWordSize platform of PW4 -> 0xFFFF PW8 -> 0xFFFFFFFF -- cIntRep is the Width for a C-language 'int' cIntWidth :: Platform -> Width cIntWidth platform = case pc_CINT_SIZE (platformConstants platform) of 4 -> W32 8 -> W64 s -> panic ("cIntWidth: Unknown cINT_SIZE: " ++ show s) -- | A width in bits. widthInBits :: Width -> Int widthInBits W8 = 8 widthInBits W16 = 16 widthInBits W32 = 32 widthInBits W64 = 64 widthInBits W128 = 128 widthInBits W256 = 256 widthInBits W512 = 512 -- | A width in bytes. -- -- > widthFromBytes (widthInBytes w) === w widthInBytes :: Width -> Int widthInBytes W8 = 1 widthInBytes W16 = 2 widthInBytes W32 = 4 widthInBytes W64 = 8 widthInBytes W128 = 16 widthInBytes W256 = 32 widthInBytes W512 = 64 -- | *Partial* A width from the number of bytes. widthFromBytes :: Int -> Width widthFromBytes 1 = W8 widthFromBytes 2 = W16 widthFromBytes 4 = W32 widthFromBytes 8 = W64 widthFromBytes 16 = W128 widthFromBytes 32 = W256 widthFromBytes 64 = W512 widthFromBytes n = pprPanic "no width for given number of bytes" (ppr n) -- | log_2 of the width in bytes, useful for generating shifts. widthInLog :: Width -> Int widthInLog W8 = 0 widthInLog W16 = 1 widthInLog W32 = 2 widthInLog W64 = 3 widthInLog W128 = 4 widthInLog W256 = 5 widthInLog W512 = 6 -- widening / narrowing -- | Narrow a signed or unsigned value to the given width. The result will -- reside in @[0, +2^width)@. -- -- >>> narrowU W8 256 == 256 -- >>> narrowU W8 255 == 255 -- >>> narrowU W8 128 == 128 -- >>> narrowU W8 127 == 127 -- >>> narrowU W8 0 == 0 -- >>> narrowU W8 (-127) == 129 -- >>> narrowU W8 (-128) == 128 -- >>> narrowU W8 (-129) == 127 -- >>> narrowU W8 (-255) == 1 -- >>> narrowU W8 (-256) == 0 -- narrowU :: Width -> Integer -> Integer narrowU W8 x = fromIntegral (fromIntegral x :: Word8) narrowU W16 x = fromIntegral (fromIntegral x :: Word16) narrowU W32 x = fromIntegral (fromIntegral x :: Word32) narrowU W64 x = fromIntegral (fromIntegral x :: Word64) narrowU _ _ = panic "narrowTo" -- | Narrow a signed value to the given width. The result will reside -- in @[-2^(width-1), +2^(width-1))@. -- -- >>> narrowS W8 256 == 0 -- >>> narrowS W8 255 == -1 -- >>> narrowS W8 128 == -128 -- >>> narrowS W8 127 == 127 -- >>> narrowS W8 0 == 0 -- >>> narrowS W8 (-127) == -127 -- >>> narrowS W8 (-128) == -128 -- >>> narrowS W8 (-129) == 127 -- >>> narrowS W8 (-255) == 1 -- >>> narrowS W8 (-256) == 0 -- narrowS :: Width -> Integer -> Integer narrowS W8 x = fromIntegral (fromIntegral x :: Int8) narrowS W16 x = fromIntegral (fromIntegral x :: Int16) narrowS W32 x = fromIntegral (fromIntegral x :: Int32) narrowS W64 x = fromIntegral (fromIntegral x :: Int64) narrowS _ _ = panic "narrowTo" ----------------------------------------------------------------------------- -- SIMD ----------------------------------------------------------------------------- type Length = Int vec :: Length -> CmmType -> CmmType vec l (CmmType cat w) = CmmType (VecCat l cat) vecw where vecw :: Width vecw = widthFromBytes (l*widthInBytes w) vec2, vec4, vec8, vec16 :: CmmType -> CmmType vec2 = vec 2 vec4 = vec 4 vec8 = vec 8 vec16 = vec 16 vec2f64, vec2b64, vec4f32, vec4b32, vec8b16, vec16b8 :: CmmType vec2f64 = vec 2 f64 vec2b64 = vec 2 b64 vec4f32 = vec 4 f32 vec4b32 = vec 4 b32 vec8b16 = vec 8 b16 vec16b8 = vec 16 b8 cmmVec :: Int -> CmmType -> CmmType cmmVec n (CmmType cat w) = CmmType (VecCat n cat) (widthFromBytes (n*widthInBytes w)) vecLength :: CmmType -> Length vecLength (CmmType (VecCat l _) _) = l vecLength _ = panic "vecLength: not a vector" vecElemType :: CmmType -> CmmType vecElemType (CmmType (VecCat l cat) w) = CmmType cat scalw where scalw :: Width scalw = widthFromBytes (widthInBytes w `div` l) vecElemType _ = panic "vecElemType: not a vector" isVecType :: CmmType -> Bool isVecType (CmmType (VecCat {}) _) = True isVecType _ = False ------------------------------------------------------------------------- -- Hints -- Hints are extra type information we attach to the arguments and -- results of a foreign call, where more type information is sometimes -- needed by the ABI to make the correct kind of call. -- -- See Note [Signed vs unsigned] for one case where this is used. data ForeignHint = NoHint | AddrHint | SignedHint deriving( Eq ) -- Used to give extra per-argument or per-result -- information needed by foreign calling conventions instance Outputable ForeignHint where ppr NoHint = empty ppr SignedHint = quotes(text "signed") -- ppr AddrHint = quotes(text "address") -- Temp Jan08 ppr AddrHint = (text "PtrHint") ------------------------------------------------------------------------- -- These don't really belong here, but I don't know where is best to -- put them. rEP_CostCentreStack_mem_alloc :: Platform -> CmmType rEP_CostCentreStack_mem_alloc platform = cmmBits (widthFromBytes (pc_REP_CostCentreStack_mem_alloc pc)) where pc = platformConstants platform rEP_CostCentreStack_scc_count :: Platform -> CmmType rEP_CostCentreStack_scc_count platform = cmmBits (widthFromBytes (pc_REP_CostCentreStack_scc_count pc)) where pc = platformConstants platform rEP_StgEntCounter_allocs :: Platform -> CmmType rEP_StgEntCounter_allocs platform = cmmBits (widthFromBytes (pc_REP_StgEntCounter_allocs pc)) where pc = platformConstants platform rEP_StgEntCounter_allocd :: Platform -> CmmType rEP_StgEntCounter_allocd platform = cmmBits (widthFromBytes (pc_REP_StgEntCounter_allocd pc)) where pc = platformConstants platform ------------------------------------------------------------------------- {- Note [Signed vs unsigned] ~~~~~~~~~~~~~~~~~~~~~~~~~ Should a CmmType include a signed vs. unsigned distinction? This is very much like a "hint" in C-- terminology: it isn't necessary in order to generate correct code, but it might be useful in that the compiler can generate better code if it has access to higher-level hints about data. This is important at call boundaries, because the definition of a function is not visible at all of its call sites, so the compiler cannot infer the hints. Here in Cmm, we're taking a slightly different approach. We include the int vs. float hint in the CmmType, because (a) the majority of platforms have a strong distinction between float and int registers, and (b) we don't want to do any heavyweight hint-inference in the native code backend in order to get good code. We're treating the hint more like a type: our Cmm is always completely consistent with respect to hints. All coercions between float and int are explicit. What about the signed vs. unsigned hint? This information might be useful if we want to keep sub-word-sized values in word-size registers, which we must do if we only have word-sized registers. On such a system, there are two straightforward conventions for representing sub-word-sized values: (a) Leave the upper bits undefined. Comparison operations must sign- or zero-extend both operands before comparing them, depending on whether the comparison is signed or unsigned. (b) Always keep the values sign- or zero-extended as appropriate. Arithmetic operations must narrow the result to the appropriate size. A clever compiler might not use either (a) or (b) exclusively, instead it would attempt to minimize the coercions by analysis: the same kind of analysis that propagates hints around. In Cmm we don't want to have to do this, so we plump for having richer types and keeping the type information consistent. If signed/unsigned hints are missing from CmmType, then the only choice we have is (a), because we don't know whether the result of an operation should be sign- or zero-extended. Many architectures have extending load operations, which work well with (b). To make use of them with (a), you need to know whether the value is going to be sign- or zero-extended by an enclosing comparison (for example), which involves knowing above the context. This is doable but more complex. Further complicating the issue is foreign calls: a foreign calling convention can specify that signed 8-bit quantities are passed as sign-extended 32 bit quantities, for example (this is the case on the PowerPC). So we *do* need sign information on foreign call arguments. Pros for adding signed vs. unsigned to CmmType: - It would let us use convention (b) above, and get easier code generation for extending loads. - Less information required on foreign calls. - MachOp type would be simpler Cons: - More complexity - What is the CmmType for a VanillaReg? Currently it is always wordRep, but now we have to decide whether it is signed or unsigned. The same VanillaReg can thus have different CmmType in different parts of the program. - Extra coercions cluttering up expressions. Currently for GHC, the foreign call point is moot, because we do our own promotion of sub-word-sized values to word-sized values. The Int8 type is represented by an Int# which is kept sign-extended at all times (this is slightly naughty, because we're making assumptions about the C calling convention rather early on in the compiler). However, given this, the cons outweigh the pros. -} -- | is @-falignment-sanitisation@ enabled? type DoAlignSanitisation = Bool ghc-lib-parser-9.12.2.20250421/compiler/GHC/Cmm/Utils.hs0000644000000000000000000005502407346545000020064 0ustar0000000000000000{-# LANGUAGE GADTs #-} {-# LANGUAGE LambdaCase #-} {-# OPTIONS_GHC -Wno-incomplete-uni-patterns #-} ----------------------------------------------------------------------------- -- -- Cmm utilities. -- -- (c) The University of Glasgow 2004-2006 -- ----------------------------------------------------------------------------- module GHC.Cmm.Utils( -- CmmType primRepCmmType, slotCmmType, typeCmmType, typeForeignHint, primRepForeignHint, -- CmmLit zeroCLit, mkIntCLit, mkWordCLit, packHalfWordsCLit, mkByteStringCLit, mkFileEmbedLit, mkDataLits, mkRODataLits, mkStgWordCLit, -- CmmExpr mkIntExpr, zeroExpr, mkLblExpr, cmmRegOff, cmmOffset, cmmLabelOff, cmmOffsetLit, cmmOffsetExpr, cmmRegOffB, cmmOffsetB, cmmLabelOffB, cmmOffsetLitB, cmmOffsetExprB, cmmRegOffW, cmmOffsetW, cmmLabelOffW, cmmOffsetLitW, cmmOffsetExprW, cmmIndex, cmmIndexExpr, cmmLoadIndex, cmmLoadIndexW, cmmLoadBWord, cmmLoadGCWord, cmmNegate, cmmULtWord, cmmUGeWord, cmmUGtWord, cmmUShrWord, cmmSLtWord, cmmNeWord, cmmEqWord, cmmOrWord, cmmAndWord, cmmSubWord, cmmAddWord, cmmMulWord, cmmQuotWord, cmmToWord, cmmMkAssign, baseExpr, spExpr, hpExpr, spLimExpr, hpLimExpr, currentTSOExpr, currentNurseryExpr, cccsExpr, -- Tagging cmmTagMask, cmmPointerMask, cmmUntag, cmmIsTagged, cmmIsNotTagged, cmmConstrTag1, mAX_PTR_TAG, tAG_MASK, -- Overlap and usage regsOverlap, globalRegsOverlap, regUsedIn, globalRegUsedIn, -- Liveness and bitmaps mkLiveness, -- * Operations that probably don't belong here modifyGraph, ofBlockMap, toBlockMap, ofBlockList, toBlockList, toBlockListEntryFirst, toBlockListEntryFirstFalseFallthrough, foldlGraphBlocks, mapGraphNodes, mapGraphNodes1, -- * Ticks blockTicks ) where import GHC.Prelude import GHC.Core.TyCon ( PrimRep(..), PrimElemRep(..) ) import GHC.Types.RepType ( NvUnaryType, SlotTy (..), typePrimRepU ) import GHC.Platform import GHC.Runtime.Heap.Layout import GHC.Cmm import GHC.Cmm.BlockId import GHC.Cmm.CLabel import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Types.Unique import GHC.Platform.Regs import Data.ByteString (ByteString) import qualified Data.ByteString as BS import GHC.Cmm.Dataflow.Graph import GHC.Cmm.Dataflow.Label import GHC.Cmm.Dataflow.Block --------------------------------------------------- -- -- CmmTypes -- --------------------------------------------------- primRepCmmType :: Platform -> PrimRep -> CmmType primRepCmmType platform = \case BoxedRep _ -> gcWord platform IntRep -> bWord platform WordRep -> bWord platform Int8Rep -> b8 Word8Rep -> b8 Int16Rep -> b16 Word16Rep -> b16 Int32Rep -> b32 Word32Rep -> b32 Int64Rep -> b64 Word64Rep -> b64 AddrRep -> bWord platform FloatRep -> f32 DoubleRep -> f64 VecRep len rep -> vec len (primElemRepCmmType rep) slotCmmType :: Platform -> SlotTy -> CmmType slotCmmType platform = \case PtrUnliftedSlot -> gcWord platform PtrLiftedSlot -> gcWord platform WordSlot -> bWord platform Word64Slot -> b64 FloatSlot -> f32 DoubleSlot -> f64 VecSlot l e -> vec l (primElemRepCmmType e) primElemRepCmmType :: PrimElemRep -> CmmType primElemRepCmmType Int8ElemRep = b8 primElemRepCmmType Int16ElemRep = b16 primElemRepCmmType Int32ElemRep = b32 primElemRepCmmType Int64ElemRep = b64 primElemRepCmmType Word8ElemRep = b8 primElemRepCmmType Word16ElemRep = b16 primElemRepCmmType Word32ElemRep = b32 primElemRepCmmType Word64ElemRep = b64 primElemRepCmmType FloatElemRep = f32 primElemRepCmmType DoubleElemRep = f64 typeCmmType :: Platform -> NvUnaryType -> CmmType typeCmmType platform ty = primRepCmmType platform (typePrimRepU ty) primRepForeignHint :: PrimRep -> ForeignHint primRepForeignHint (BoxedRep _) = AddrHint primRepForeignHint IntRep = SignedHint primRepForeignHint Int8Rep = SignedHint primRepForeignHint Int16Rep = SignedHint primRepForeignHint Int32Rep = SignedHint primRepForeignHint Int64Rep = SignedHint primRepForeignHint WordRep = NoHint primRepForeignHint Word8Rep = NoHint primRepForeignHint Word16Rep = NoHint primRepForeignHint Word32Rep = NoHint primRepForeignHint Word64Rep = NoHint primRepForeignHint AddrRep = AddrHint -- NB! AddrHint, but NonPtrArg primRepForeignHint FloatRep = NoHint primRepForeignHint DoubleRep = NoHint primRepForeignHint (VecRep {}) = NoHint typeForeignHint :: NvUnaryType -> ForeignHint typeForeignHint = primRepForeignHint . typePrimRepU --------------------------------------------------- -- -- CmmLit -- --------------------------------------------------- -- XXX: should really be Integer, since Int doesn't necessarily cover -- the full range of target Ints. mkIntCLit :: Platform -> Int -> CmmLit mkIntCLit platform i = CmmInt (toInteger i) (wordWidth platform) mkIntExpr :: Platform -> Int -> CmmExpr mkIntExpr platform i = CmmLit $! mkIntCLit platform i zeroCLit :: Platform -> CmmLit zeroCLit platform = CmmInt 0 (wordWidth platform) zeroExpr :: Platform -> CmmExpr zeroExpr platform = CmmLit (zeroCLit platform) mkWordCLit :: Platform -> Integer -> CmmLit mkWordCLit platform wd = CmmInt wd (wordWidth platform) -- | We make a top-level decl for the string, and return a label pointing to it mkByteStringCLit :: CLabel -> ByteString -> (CmmLit, GenCmmDecl (GenCmmStatics raw) info stmt) mkByteStringCLit lbl bytes = (CmmLabel lbl, CmmData (Section sec lbl) $ CmmStaticsRaw lbl [CmmString bytes]) where -- This can not happen for String literals (as there \NUL is replaced by -- C0 80). However, it can happen with Addr# literals. sec = if 0 `BS.elem` bytes then ReadOnlyData else CString -- | We make a top-level decl for the embedded binary file, and return a label pointing to it mkFileEmbedLit :: CLabel -> FilePath -> Int -> (CmmLit, GenCmmDecl (GenCmmStatics raw) info stmt) mkFileEmbedLit lbl path len = (CmmLabel lbl, CmmData (Section ReadOnlyData lbl) (CmmStaticsRaw lbl [CmmFileEmbed path len])) -- | Build a data-segment data block mkDataLits :: Section -> CLabel -> [CmmLit] -> GenCmmDecl (GenCmmStatics raw) info stmt mkDataLits section lbl lits = CmmData section (CmmStaticsRaw lbl $ map CmmStaticLit lits) mkRODataLits :: CLabel -> [CmmLit] -> GenCmmDecl (GenCmmStatics raw) info stmt -- Build a read-only data block mkRODataLits lbl lits = mkDataLits section lbl lits where section | any needsRelocation lits = Section RelocatableReadOnlyData lbl | otherwise = Section ReadOnlyData lbl needsRelocation (CmmLabel _) = True needsRelocation (CmmLabelOff _ _) = True needsRelocation _ = False mkStgWordCLit :: Platform -> StgWord -> CmmLit mkStgWordCLit platform wd = CmmInt (fromStgWord wd) (wordWidth platform) packHalfWordsCLit :: Platform -> StgHalfWord -> StgHalfWord -> CmmLit -- Make a single word literal in which the lower_half_word is -- at the lower address, and the upper_half_word is at the -- higher address -- ToDo: consider using half-word lits instead -- but be careful: that's vulnerable when reversed packHalfWordsCLit platform lower_half_word upper_half_word = case platformByteOrder platform of BigEndian -> mkWordCLit platform ((l `shiftL` halfWordSizeInBits platform) .|. u) LittleEndian -> mkWordCLit platform (l .|. (u `shiftL` halfWordSizeInBits platform)) where l = fromStgHalfWord lower_half_word u = fromStgHalfWord upper_half_word --------------------------------------------------- -- -- CmmExpr -- --------------------------------------------------- mkLblExpr :: CLabel -> CmmExpr mkLblExpr lbl = CmmLit (CmmLabel lbl) cmmOffsetExpr :: Platform -> CmmExpr -> CmmExpr -> CmmExpr -- assumes base and offset have the same CmmType cmmOffsetExpr platform e (CmmLit (CmmInt n _)) = cmmOffset platform e (fromInteger n) cmmOffsetExpr platform e byte_off = CmmMachOp (MO_Add (cmmExprWidth platform e)) [e, byte_off] cmmOffset :: Platform -> CmmExpr -> Int -> CmmExpr cmmOffset _platform e 0 = e cmmOffset platform e byte_off = case e of CmmReg reg -> cmmRegOff reg byte_off CmmRegOff reg m -> cmmRegOff reg (m+byte_off) CmmLit lit -> CmmLit (cmmOffsetLit lit byte_off) CmmStackSlot area off -> CmmStackSlot area (off - byte_off) -- note stack area offsets increase towards lower addresses CmmMachOp (MO_Add rep) [expr, CmmLit (CmmInt byte_off1 _rep)] -> let !lit_off = (byte_off1 + toInteger byte_off) in CmmMachOp (MO_Add rep) [expr, CmmLit (CmmInt lit_off rep)] _ -> let !width = cmmExprWidth platform e in CmmMachOp (MO_Add width) [e, CmmLit (CmmInt (toInteger byte_off) width)] -- Smart constructor for CmmRegOff. Same caveats as cmmOffset above. cmmRegOff :: CmmReg -> Int -> CmmExpr cmmRegOff reg 0 = CmmReg reg cmmRegOff reg byte_off = CmmRegOff reg byte_off cmmOffsetLit :: CmmLit -> Int -> CmmLit cmmOffsetLit (CmmLabel l) byte_off = cmmLabelOff l byte_off cmmOffsetLit (CmmLabelOff l m) byte_off = cmmLabelOff l (m+byte_off) cmmOffsetLit (CmmLabelDiffOff l1 l2 m w) byte_off = CmmLabelDiffOff l1 l2 (m+byte_off) w cmmOffsetLit (CmmInt m rep) byte_off = CmmInt (m + fromIntegral byte_off) rep cmmOffsetLit _ byte_off = pprPanic "cmmOffsetLit" (ppr byte_off) cmmLabelOff :: CLabel -> Int -> CmmLit -- Smart constructor for CmmLabelOff cmmLabelOff lbl 0 = CmmLabel lbl cmmLabelOff lbl byte_off = CmmLabelOff lbl byte_off -- | Useful for creating an index into an array, with a statically known offset. -- The type is the element type; used for making the multiplier cmmIndex :: Platform -> Width -- Width w -> CmmExpr -- Address of vector of items of width w -> Int -- Which element of the vector (0 based) -> CmmExpr -- Address of i'th element cmmIndex platform width base idx = cmmOffset platform base (idx * widthInBytes width) -- | Useful for creating an index into an array, with an unknown offset. cmmIndexExpr :: Platform -> Width -- Width w -> CmmExpr -- Address of vector of items of width w -> CmmExpr -- Which element of the vector (0 based) -> CmmExpr -- Address of i'th element cmmIndexExpr platform width base (CmmLit (CmmInt n _)) = cmmIndex platform width base (fromInteger n) cmmIndexExpr platform width base idx = cmmOffsetExpr platform base byte_off where idx_w = cmmExprWidth platform idx byte_off = CmmMachOp (MO_Shl idx_w) [idx, mkIntExpr platform (widthInLog width)] cmmLoadIndex :: Platform -> CmmType -> CmmExpr -> Int -> CmmExpr cmmLoadIndex platform ty expr ix = CmmLoad (cmmIndex platform (typeWidth ty) expr ix) ty NaturallyAligned -- TODO: Audit uses -- | Load a naturally-aligned non-pointer word. cmmLoadBWord :: Platform -> CmmExpr -> CmmExpr cmmLoadBWord platform ptr = CmmLoad ptr (bWord platform) NaturallyAligned -- | Load a naturally-aligned GC pointer. cmmLoadGCWord :: Platform -> CmmExpr -> CmmExpr cmmLoadGCWord platform ptr = CmmLoad ptr (gcWord platform) NaturallyAligned -- The "B" variants take byte offsets cmmRegOffB :: CmmReg -> ByteOff -> CmmExpr cmmRegOffB = cmmRegOff cmmOffsetB :: Platform -> CmmExpr -> ByteOff -> CmmExpr cmmOffsetB = cmmOffset cmmOffsetExprB :: Platform -> CmmExpr -> CmmExpr -> CmmExpr cmmOffsetExprB = cmmOffsetExpr cmmLabelOffB :: CLabel -> ByteOff -> CmmLit cmmLabelOffB = cmmLabelOff cmmOffsetLitB :: CmmLit -> ByteOff -> CmmLit cmmOffsetLitB = cmmOffsetLit ----------------------- -- The "W" variants take word offsets cmmOffsetExprW :: Platform -> CmmExpr -> CmmExpr -> CmmExpr -- The second arg is a *word* offset; need to change it to bytes cmmOffsetExprW platform e (CmmLit (CmmInt n _)) = cmmOffsetW platform e (fromInteger n) cmmOffsetExprW platform e wd_off = cmmIndexExpr platform (wordWidth platform) e wd_off cmmOffsetW :: Platform -> CmmExpr -> WordOff -> CmmExpr cmmOffsetW platform e n = cmmOffsetB platform e (wordsToBytes platform n) cmmRegOffW :: Platform -> CmmReg -> WordOff -> CmmExpr cmmRegOffW platform reg wd_off = cmmRegOffB reg (wordsToBytes platform wd_off) cmmOffsetLitW :: Platform -> CmmLit -> WordOff -> CmmLit cmmOffsetLitW platform lit wd_off = cmmOffsetLitB lit (wordsToBytes platform wd_off) cmmLabelOffW :: Platform -> CLabel -> WordOff -> CmmLit cmmLabelOffW platform lbl wd_off = cmmLabelOffB lbl (wordsToBytes platform wd_off) cmmLoadIndexW :: Platform -> CmmExpr -> Int -> CmmType -> CmmExpr cmmLoadIndexW platform base off ty = CmmLoad (cmmOffsetW platform base off) ty NaturallyAligned -- TODO: Audit ses ----------------------- cmmULtWord, cmmUGeWord, cmmUGtWord, cmmUShrWord, cmmSLtWord, cmmNeWord, cmmEqWord, cmmOrWord, cmmAndWord, cmmSubWord, cmmAddWord, cmmMulWord, cmmQuotWord :: Platform -> CmmExpr -> CmmExpr -> CmmExpr cmmOrWord platform e1 e2 = CmmMachOp (mo_wordOr platform) [e1, e2] cmmAndWord platform e1 e2 = CmmMachOp (mo_wordAnd platform) [e1, e2] cmmNeWord platform e1 e2 = CmmMachOp (mo_wordNe platform) [e1, e2] cmmEqWord platform e1 e2 = CmmMachOp (mo_wordEq platform) [e1, e2] cmmULtWord platform e1 e2 = CmmMachOp (mo_wordULt platform) [e1, e2] cmmUGeWord platform e1 e2 = CmmMachOp (mo_wordUGe platform) [e1, e2] cmmUGtWord platform e1 e2 = CmmMachOp (mo_wordUGt platform) [e1, e2] cmmSLtWord platform e1 e2 = CmmMachOp (mo_wordSLt platform) [e1, e2] cmmUShrWord platform e1 e2 = CmmMachOp (mo_wordUShr platform) [e1, e2] cmmAddWord platform e1 e2 = CmmMachOp (mo_wordAdd platform) [e1, e2] cmmSubWord platform e1 e2 = CmmMachOp (mo_wordSub platform) [e1, e2] cmmMulWord platform e1 e2 = CmmMachOp (mo_wordMul platform) [e1, e2] cmmQuotWord platform e1 e2 = CmmMachOp (mo_wordUQuot platform) [e1, e2] cmmNegate :: Platform -> CmmExpr -> CmmExpr cmmNegate platform = \case (CmmLit (CmmInt n rep)) -> CmmLit (CmmInt (-n) rep) e -> CmmMachOp (MO_S_Neg (cmmExprWidth platform e)) [e] cmmToWord :: Platform -> CmmExpr -> CmmExpr cmmToWord platform e | w == word = e | otherwise = CmmMachOp (MO_UU_Conv w word) [e] where w = cmmExprWidth platform e word = wordWidth platform cmmMkAssign :: Platform -> CmmExpr -> Unique -> (CmmNode O O, CmmExpr) cmmMkAssign platform expr uq = let !ty = cmmExprType platform expr reg = (CmmLocal (LocalReg uq ty)) in (CmmAssign reg expr, CmmReg reg) --------------------------------------------------- -- -- Tagging -- --------------------------------------------------- tAG_MASK :: Platform -> Int tAG_MASK platform = (1 `shiftL` pc_TAG_BITS (platformConstants platform)) - 1 mAX_PTR_TAG :: Platform -> Int mAX_PTR_TAG = tAG_MASK -- Tag bits mask cmmTagMask, cmmPointerMask :: Platform -> CmmExpr cmmTagMask platform = mkIntExpr platform (tAG_MASK platform) cmmPointerMask platform = mkIntExpr platform (complement (tAG_MASK platform)) -- Used to untag a possibly tagged pointer -- A static label need not be untagged cmmUntag, cmmIsTagged, cmmIsNotTagged, cmmConstrTag1 :: Platform -> CmmExpr -> CmmExpr cmmUntag _ e@(CmmLit (CmmLabel _)) = e -- Default case cmmUntag platform e = cmmAndWord platform e (cmmPointerMask platform) -- Test if a closure pointer is untagged/tagged. cmmIsTagged platform e = cmmNeWord platform (cmmAndWord platform e (cmmTagMask platform)) (zeroExpr platform) cmmIsNotTagged platform e = cmmEqWord platform (cmmAndWord platform e (cmmTagMask platform)) (zeroExpr platform) -- Get constructor tag, but one based. cmmConstrTag1 platform e = cmmAndWord platform e (cmmTagMask platform) ----------------------------------------------------------------------------- -- Overlap and usage -- | Returns True if the two STG registers overlap on the specified -- platform, in the sense that writing to one will clobber the -- other. This includes the case that the two registers are the same -- STG register. See Note [Overlapping global registers] for details. regsOverlap :: Platform -> CmmReg -> CmmReg -> Bool regsOverlap platform (CmmGlobal (GlobalRegUse g1 _)) (CmmGlobal (GlobalRegUse g2 _)) = globalRegsOverlap platform g1 g2 regsOverlap _ reg reg' = reg == reg' globalRegsOverlap :: Platform -> GlobalReg -> GlobalReg -> Bool globalRegsOverlap platform g1 g2 | Just real <- globalRegMaybe platform g1 , Just real' <- globalRegMaybe platform g2 , real == real' = True | otherwise = g1 == g2 -- | Returns True if the STG register is used by the expression, in -- the sense that a store to the register might affect the value of -- the expression. -- -- We must check for overlapping registers and not just equal -- registers here, otherwise CmmSink may incorrectly reorder -- assignments that conflict due to overlap. See #10521 and Note -- [Overlapping global registers]. regUsedIn :: Platform -> CmmReg -> CmmExpr -> Bool regUsedIn platform = regUsedIn_ where _ `regUsedIn_` CmmLit _ = False reg `regUsedIn_` CmmLoad e _ _ = reg `regUsedIn_` e reg `regUsedIn_` CmmReg reg' = regsOverlap platform reg reg' reg `regUsedIn_` CmmRegOff reg' _ = regsOverlap platform reg reg' reg `regUsedIn_` CmmMachOp _ es = any (reg `regUsedIn_`) es _ `regUsedIn_` CmmStackSlot _ _ = False globalRegUsedIn :: Platform -> GlobalReg -> CmmExpr -> Bool globalRegUsedIn platform = globalRegUsedIn_ where _ `globalRegUsedIn_` CmmLit _ = False reg `globalRegUsedIn_` CmmLoad e _ _ = reg `globalRegUsedIn_` e reg `globalRegUsedIn_` CmmReg reg' | CmmGlobal (GlobalRegUse reg' _) <- reg' = globalRegsOverlap platform reg reg' | otherwise = False reg `globalRegUsedIn_` CmmRegOff reg' _ | CmmGlobal (GlobalRegUse reg' _) <- reg' = globalRegsOverlap platform reg reg' | otherwise = False reg `globalRegUsedIn_` CmmMachOp _ es = any (reg `globalRegUsedIn_`) es _ `globalRegUsedIn_` CmmStackSlot _ _ = False -------------------------------------------- -- -- mkLiveness -- --------------------------------------------- mkLiveness :: Platform -> [LocalReg] -> Liveness mkLiveness _ [] = [] mkLiveness platform (reg:regs) = bits ++ mkLiveness platform regs where word_size = platformWordSizeInBytes platform sizeW = (widthInBytes (typeWidth (localRegType reg)) + word_size - 1) `quot` word_size -- number of words, rounded up bits = replicate sizeW is_non_ptr -- True <=> Non Ptr is_non_ptr = not $ isGcPtrType (localRegType reg) -- ============================================== - -- ============================================== - -- ============================================== - --------------------------------------------------- -- -- Manipulating CmmGraphs -- --------------------------------------------------- modifyGraph :: (Graph n C C -> Graph n' C C) -> GenCmmGraph n -> GenCmmGraph n' modifyGraph f g = CmmGraph {g_entry=g_entry g, g_graph=f (g_graph g)} ofBlockMap :: BlockId -> LabelMap CmmBlock -> CmmGraph ofBlockMap entry bodyMap = CmmGraph {g_entry=entry, g_graph=GMany NothingO bodyMap NothingO} -- | like 'toBlockList', but the entry block always comes first toBlockListEntryFirst :: CmmGraph -> [CmmBlock] toBlockListEntryFirst g | mapNull m = [] | otherwise = entry_block : others where m = toBlockMap g entry_id = g_entry g Just entry_block = mapLookup entry_id m others = filter ((/= entry_id) . entryLabel) (mapElems m) -- | Like 'toBlockListEntryFirst', but we strive to ensure that we order blocks -- so that the false case of a conditional jumps to the next block in the output -- list of blocks. This matches the way OldCmm blocks were output since in -- OldCmm the false case was a fallthrough, whereas in Cmm conditional branches -- have both true and false successors. Block ordering can make a big difference -- in performance in the LLVM backend. Note that we rely crucially on the order -- of successors returned for CmmCondBranch by the NonLocal instance for CmmNode -- defined in "GHC.Cmm.Node". -GBM toBlockListEntryFirstFalseFallthrough :: CmmGraph -> [CmmBlock] toBlockListEntryFirstFalseFallthrough g | mapNull m = [] | otherwise = dfs setEmpty [entry_block] where m = toBlockMap g entry_id = g_entry g Just entry_block = mapLookup entry_id m dfs :: LabelSet -> [CmmBlock] -> [CmmBlock] dfs _ [] = [] dfs visited (block:bs) | id `setMember` visited = dfs visited bs | otherwise = block : dfs (setInsert id visited) bs' where id = entryLabel block bs' = foldr add_id bs (successors block) add_id id bs = case mapLookup id m of Just b -> b : bs Nothing -> bs ofBlockList :: BlockId -> [CmmBlock] -> CmmGraph ofBlockList entry blocks = CmmGraph { g_entry = entry , g_graph = GMany NothingO body NothingO } where body = foldr addBlock emptyBody blocks mapGraphNodes :: ( CmmNode C O -> CmmNode C O , CmmNode O O -> CmmNode O O , CmmNode O C -> CmmNode O C) -> CmmGraph -> CmmGraph mapGraphNodes funs@(mf,_,_) g = ofBlockMap (entryLabel $ mf $ CmmEntry (g_entry g) GlobalScope) $ mapMap (mapBlock3' funs) $ toBlockMap g mapGraphNodes1 :: (forall e x. CmmNode e x -> CmmNode e x) -> CmmGraph -> CmmGraph mapGraphNodes1 f = modifyGraph (mapGraph f) foldlGraphBlocks :: (a -> CmmBlock -> a) -> a -> CmmGraph -> a foldlGraphBlocks k z g = mapFoldl k z $ toBlockMap g ------------------------------------------------- -- Tick utilities -- | Extract all tick annotations from the given block blockTicks :: Block CmmNode C C -> [CmmTickish] blockTicks b = reverse $ foldBlockNodesF goStmt b [] where goStmt :: CmmNode e x -> [CmmTickish] -> [CmmTickish] goStmt (CmmTick t) ts = t:ts goStmt _other ts = ts -- ----------------------------------------------------------------------------- -- Access to common global registers baseExpr, spExpr, hpExpr, currentTSOExpr, currentNurseryExpr, spLimExpr, hpLimExpr, cccsExpr :: Platform -> CmmExpr baseExpr p = CmmReg $ baseReg p spExpr p = CmmReg $ spReg p spLimExpr p = CmmReg $ spLimReg p hpExpr p = CmmReg $ hpReg p hpLimExpr p = CmmReg $ hpLimReg p currentTSOExpr p = CmmReg $ currentTSOReg p currentNurseryExpr p = CmmReg $ currentNurseryReg p cccsExpr p = CmmReg $ cccsReg p ghc-lib-parser-9.12.2.20250421/compiler/GHC/CmmToAsm/CFG/0000755000000000000000000000000007346545000017765 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/CmmToAsm/CFG/Weight.hs0000644000000000000000000000451507346545000021555 0ustar0000000000000000module GHC.CmmToAsm.CFG.Weight ( Weights (..) , defaultWeights , parseWeights ) where import GHC.Prelude import GHC.Utils.Panic -- | Edge weights to use when generating a CFG from CMM data Weights = Weights { uncondWeight :: Int , condBranchWeight :: Int , switchWeight :: Int , callWeight :: Int , likelyCondWeight :: Int , unlikelyCondWeight :: Int , infoTablePenalty :: Int , backEdgeBonus :: Int } -- | Default edge weights defaultWeights :: Weights defaultWeights = Weights { uncondWeight = 1000 , condBranchWeight = 800 , switchWeight = 1 , callWeight = -10 , likelyCondWeight = 900 , unlikelyCondWeight = 300 , infoTablePenalty = 300 , backEdgeBonus = 400 } parseWeights :: String -> Weights -> Weights parseWeights s oldWeights = foldl' (\cfg (n,v) -> update n v cfg) oldWeights assignments where assignments = map assignment $ settings s update "uncondWeight" n w = w {uncondWeight = n} update "condBranchWeight" n w = w {condBranchWeight = n} update "switchWeight" n w = w {switchWeight = n} update "callWeight" n w = w {callWeight = n} update "likelyCondWeight" n w = w {likelyCondWeight = n} update "unlikelyCondWeight" n w = w {unlikelyCondWeight = n} update "infoTablePenalty" n w = w {infoTablePenalty = n} update "backEdgeBonus" n w = w {backEdgeBonus = n} update other _ _ = panic $ other ++ " is not a CFG weight parameter. " ++ exampleString settings s | (s1,rest) <- break (== ',') s , null rest = [s1] | (s1,rest) <- break (== ',') s = s1 : settings (drop 1 rest) assignment as | (name, _:val) <- break (== '=') as = (name,read val) | otherwise = panic $ "Invalid CFG weight parameters." ++ exampleString exampleString = "Example parameters: uncondWeight=1000," ++ "condBranchWeight=800,switchWeight=0,callWeight=300" ++ ",likelyCondWeight=900,unlikelyCondWeight=300" ++ ",infoTablePenalty=300,backEdgeBonus=400" ghc-lib-parser-9.12.2.20250421/compiler/GHC/CmmToLlvm/0000755000000000000000000000000007346545000017560 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/CmmToLlvm/Config.hs0000644000000000000000000000555407346545000021332 0ustar0000000000000000-- | Llvm code generator configuration module GHC.CmmToLlvm.Config ( LlvmCgConfig(..) , LlvmConfig(..) , LlvmTarget(..) , initLlvmConfig ) where import GHC.Prelude import GHC.Platform import GHC.Utils.Outputable import GHC.Settings.Utils import GHC.Utils.Panic import GHC.CmmToLlvm.Version.Type (LlvmVersion) import System.FilePath data LlvmCgConfig = LlvmCgConfig { llvmCgPlatform :: !Platform -- ^ Target platform , llvmCgContext :: !SDocContext -- ^ Context for LLVM code generation , llvmCgFillUndefWithGarbage :: !Bool -- ^ Fill undefined literals with garbage values , llvmCgSplitSection :: !Bool -- ^ Split sections , llvmCgAvxEnabled :: !Bool , llvmCgBmiVersion :: Maybe BmiVersion -- ^ (x86) BMI instructions , llvmCgLlvmVersion :: Maybe LlvmVersion -- ^ version of Llvm we're using , llvmCgDoWarn :: !Bool -- ^ True ==> warn unsupported Llvm version , llvmCgLlvmTarget :: !String -- ^ target triple passed to LLVM , llvmCgLlvmConfig :: !LlvmConfig -- ^ Supported LLVM configurations. -- see Note [LLVM configuration] } data LlvmTarget = LlvmTarget { lDataLayout :: String , lCPU :: String , lAttributes :: [String] } -- Note [LLVM configuration] -- ~~~~~~~~~~~~~~~~~~~~~~~~~ -- The `llvm-targets` and `llvm-passes` files are shipped with GHC and contain -- information needed by the LLVM backend to invoke `llc` and `opt`. -- Specifically: -- -- * llvm-targets maps autoconf host triples to the corresponding LLVM -- `data-layout` declarations. This information is extracted from clang using -- the script in utils/llvm-targets/gen-data-layout.sh and should be updated -- whenever we target a new version of LLVM. -- -- * llvm-passes maps GHC optimization levels to sets of LLVM optimization -- flags that GHC should pass to `opt`. -- -- This information is contained in files rather the GHC source to allow users -- to add new targets to GHC without having to recompile the compiler. -- initLlvmConfig :: FilePath -> IO LlvmConfig initLlvmConfig top_dir = do targets <- readAndParse "llvm-targets" passes <- readAndParse "llvm-passes" return $ LlvmConfig { llvmTargets = fmap mkLlvmTarget <$> targets , llvmPasses = passes } where readAndParse :: Read a => String -> IO a readAndParse name = do let f = top_dir name llvmConfigStr <- readFile f case maybeReadFuzzy llvmConfigStr of Just s -> return s Nothing -> pgmError ("Can't parse LLVM config file: " ++ show f) mkLlvmTarget :: (String, String, String) -> LlvmTarget mkLlvmTarget (dl, cpu, attrs) = LlvmTarget dl cpu (words attrs) data LlvmConfig = LlvmConfig { llvmTargets :: [(String, LlvmTarget)] , llvmPasses :: [(Int, String)] } ghc-lib-parser-9.12.2.20250421/compiler/GHC/CmmToLlvm/Version.hs0000644000000000000000000000212707346545000021543 0ustar0000000000000000module GHC.CmmToLlvm.Version ( LlvmVersion(..) , supportedLlvmVersionLowerBound , supportedLlvmVersionUpperBound , parseLlvmVersion , llvmVersionSupported , llvmVersionStr , llvmVersionList ) where import GHC.Prelude import GHC.CmmToLlvm.Version.Type import GHC.CmmToLlvm.Version.Bounds import Data.Char (isDigit) import Data.List (intercalate) import qualified Data.List.NonEmpty as NE parseLlvmVersion :: String -> Maybe LlvmVersion parseLlvmVersion = fmap LlvmVersion . NE.nonEmpty . go [] . dropWhile (not . isDigit) where go vs s | null ver_str = reverse vs | '.' : rest' <- rest = go (read ver_str : vs) rest' | otherwise = reverse (read ver_str : vs) where (ver_str, rest) = span isDigit s llvmVersionSupported :: LlvmVersion -> Bool llvmVersionSupported v = v >= supportedLlvmVersionLowerBound && v < supportedLlvmVersionUpperBound llvmVersionStr :: LlvmVersion -> String llvmVersionStr = intercalate "." . map show . llvmVersionList llvmVersionList :: LlvmVersion -> [Int] llvmVersionList = NE.toList . llvmVersionNE ghc-lib-parser-9.12.2.20250421/compiler/GHC/CmmToLlvm/Version/0000755000000000000000000000000007346545000021205 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/CmmToLlvm/Version/Bounds.hs0000644000000000000000000000113407346545000022772 0ustar0000000000000000module GHC.CmmToLlvm.Version.Bounds ( supportedLlvmVersionLowerBound , supportedLlvmVersionUpperBound ) where import GHC.Prelude () import GHC.CmmToLlvm.Version.Type import qualified Data.List.NonEmpty as NE -- | The (inclusive) lower bound on the LLVM Version that is currently supported. supportedLlvmVersionLowerBound :: LlvmVersion supportedLlvmVersionLowerBound = LlvmVersion (13 NE.:| []) -- | The (not-inclusive) upper bound bound on the LLVM Version that is currently supported. supportedLlvmVersionUpperBound :: LlvmVersion supportedLlvmVersionUpperBound = LlvmVersion (20 NE.:| []) ghc-lib-parser-9.12.2.20250421/compiler/GHC/CmmToLlvm/Version/Type.hs0000644000000000000000000000033407346545000022462 0ustar0000000000000000module GHC.CmmToLlvm.Version.Type ( LlvmVersion(..) ) where import GHC.Prelude import qualified Data.List.NonEmpty as NE newtype LlvmVersion = LlvmVersion { llvmVersionNE :: NE.NonEmpty Int } deriving (Eq, Ord) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core.hs0000644000000000000000000026744307346545000017152 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 -} {-# LANGUAGE NoPolyKinds #-} -- | GHC.Core holds all the main data types for use by for the Glasgow Haskell Compiler midsection module GHC.Core ( -- * Main data types Expr(..), Alt(..), Bind(..), AltCon(..), Arg, CoreProgram, CoreExpr, CoreAlt, CoreBind, CoreArg, CoreBndr, TaggedExpr, TaggedAlt, TaggedBind, TaggedArg, TaggedBndr(..), deTagExpr, -- * In/Out type synonyms InId, InBind, InExpr, InAlt, InArg, InType, InKind, InBndr, InVar, InCoercion, InTyVar, InCoVar, OutId, OutBind, OutExpr, OutAlt, OutArg, OutType, OutKind, OutBndr, OutVar, OutCoercion, OutTyVar, OutCoVar, MOutCoercion, -- ** 'Expr' construction mkLet, mkLets, mkLetNonRec, mkLetRec, mkLams, mkApps, mkTyApps, mkCoApps, mkVarApps, mkTyArg, mkIntLit, mkIntLitWrap, mkWordLit, mkWordLitWrap, mkWord8Lit, mkWord32LitWord32, mkWord64LitWord64, mkInt64LitInt64, mkCharLit, mkStringLit, mkFloatLit, mkFloatLitFloat, mkDoubleLit, mkDoubleLitDouble, mkConApp, mkConApp2, mkTyBind, mkCoBind, varToCoreExpr, varsToCoreExprs, mkBinds, isId, cmpAltCon, cmpAlt, ltAlt, -- ** Simple 'Expr' access functions and predicates bindersOf, bindersOfBinds, rhssOfBind, rhssOfAlts, foldBindersOfBindStrict, foldBindersOfBindsStrict, collectBinders, collectTyBinders, collectTyAndValBinders, collectNBinders, collectNValBinders_maybe, collectArgs, stripNArgs, collectArgsTicks, flattenBinds, collectFunSimple, exprToType, wrapLamBody, isValArg, isTypeArg, isCoArg, isTyCoArg, valArgCount, valBndrCount, isRuntimeArg, isRuntimeVar, -- * Unfolding data types Unfolding(..), UnfoldingCache(..), UnfoldingGuidance(..), UnfoldingSource(..), -- ** Constructing 'Unfolding's noUnfolding, bootUnfolding, evaldUnfolding, mkOtherCon, unSaturatedOk, needSaturated, boringCxtOk, boringCxtNotOk, -- ** Predicates and deconstruction on 'Unfolding' unfoldingTemplate, expandUnfolding_maybe, maybeUnfoldingTemplate, otherCons, isValueUnfolding, isEvaldUnfolding, isCheapUnfolding, isExpandableUnfolding, isConLikeUnfolding, isCompulsoryUnfolding, isStableUnfolding, isStableUserUnfolding, isStableSystemUnfolding, isInlineUnfolding, isBootUnfolding, isBetterUnfoldingThan, hasCoreUnfolding, hasSomeUnfolding, canUnfold, neverUnfoldGuidance, isStableSource, -- * Annotated expression data types AnnExpr, AnnExpr'(..), AnnBind(..), AnnAlt(..), -- ** Operations on annotated expressions collectAnnArgs, collectAnnArgsTicks, -- ** Operations on annotations deAnnotate, deAnnotate', deAnnAlt, deAnnBind, collectAnnBndrs, collectNAnnBndrs, -- * Orphanhood IsOrphan(..), isOrphan, notOrphan, chooseOrphanAnchor, -- * Core rule data types CoreRule(..), RuleName, RuleFun, IdUnfoldingFun, InScopeEnv(..), RuleOpts, -- ** Operations on 'CoreRule's ruleArity, ruleName, ruleIdName, ruleActivation, setRuleIdName, ruleModule, isBuiltinRule, isLocalRule, isAutoRule, ) where import GHC.Prelude import GHC.Platform import GHC.Types.Var.Env( InScopeSet ) import GHC.Types.Var import GHC.Core.Type import GHC.Core.Coercion import GHC.Core.Rules.Config ( RuleOpts ) import GHC.Types.Name import GHC.Types.Name.Set import GHC.Types.Literal import GHC.Types.Tickish import GHC.Core.DataCon import GHC.Unit.Module import GHC.Types.Basic import GHC.Types.Unique.Set import GHC.Utils.Binary import GHC.Utils.Misc import GHC.Utils.Outputable import GHC.Utils.Panic import Data.Data hiding (TyCon) import Data.Int import Data.Word infixl 4 `mkApps`, `mkTyApps`, `mkVarApps`, `App`, `mkCoApps` -- Left associative, so that we can say (f `mkTyApps` xs `mkVarApps` ys) {- ************************************************************************ * * \subsection{The main data types} * * ************************************************************************ These data types are the heart of the compiler -} -- | This is the data type that represents GHCs core intermediate language. Currently -- GHC uses System FC for this purpose, -- which is closely related to the simpler and better known System F . -- -- We get from Haskell source to this Core language in a number of stages: -- -- 1. The source code is parsed into an abstract syntax tree, which is represented -- by the data type 'GHC.Hs.Expr.HsExpr' with the names being 'GHC.Types.Name.Reader.RdrNames' -- -- 2. This syntax tree is /renamed/, which attaches a 'GHC.Types.Unique.Unique' to every 'GHC.Types.Name.Reader.RdrName' -- (yielding a 'GHC.Types.Name.Name') to disambiguate identifiers which are lexically identical. -- For example, this program: -- -- @ -- f x = let f x = x + 1 -- in f (x - 2) -- @ -- -- Would be renamed by having 'Unique's attached so it looked something like this: -- -- @ -- f_1 x_2 = let f_3 x_4 = x_4 + 1 -- in f_3 (x_2 - 2) -- @ -- But see Note [Shadowing in Core] below. -- -- 3. The resulting syntax tree undergoes type checking (which also deals with instantiating -- type class arguments) to yield a 'GHC.Hs.Expr.HsExpr' type that has 'GHC.Types.Id.Id' as it's names. -- -- 4. Finally the syntax tree is /desugared/ from the expressive 'GHC.Hs.Expr.HsExpr' type into -- this 'Expr' type, which has far fewer constructors and hence is easier to perform -- optimization, analysis and code generation on. -- -- The type parameter @b@ is for the type of binders in the expression tree. -- -- The language consists of the following elements: -- -- * Variables -- See Note [Variable occurrences in Core] -- -- * Primitive literals -- -- * Applications: note that the argument may be a 'Type'. -- See Note [Representation polymorphism invariants] -- -- * Lambda abstraction -- See Note [Representation polymorphism invariants] -- -- * Recursive and non recursive @let@s. Operationally -- this corresponds to allocating a thunk for the things -- bound and then executing the sub-expression. -- -- See Note [Core letrec invariant] -- See Note [Core let-can-float invariant] -- See Note [Representation polymorphism invariants] -- See Note [Core type and coercion invariant] -- -- * Case expression. Operationally this corresponds to evaluating -- the scrutinee (expression examined) to weak head normal form -- and then examining at most one level of resulting constructor (i.e. you -- cannot do nested pattern matching directly with this). -- -- The binder gets bound to the value of the scrutinee, -- and the 'Type' must be that of all the case alternatives -- -- IMPORTANT: see Note [Case expression invariants] -- -- * Cast an expression to a particular type. -- This is used to implement @newtype@s (a @newtype@ constructor or -- destructor just becomes a 'Cast' in Core) and GADTs. -- -- * Ticks. These are used to represent all the source annotation we -- support: profiling SCCs, HPC ticks, and GHCi breakpoints. -- -- * A type: this should only show up at the top level of an Arg -- -- * A coercion {- Note [Why does Case have a 'Type' field?] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The obvious alternative is exprType (Case scrut bndr alts) | (_,_,rhs1):_ <- alts = exprType rhs1 But caching the type in the Case constructor exprType (Case scrut bndr ty alts) = ty is better for at least three reasons: * It works when there are no alternatives (see case invariant 1 above) * It might be faster in deeply-nested situations. * It might not be quite the same as (exprType rhs) for one of the RHSs in alts. Consider a phantom type synonym type S a = Int and we want to form the case expression case x of { K (a::*) -> (e :: S a) } Then exprType of the RHS is (S a), but we cannot make that be the 'ty' in the Case constructor because 'a' is simply not in scope there. Instead we must expand the synonym to Int before putting it in the Case constructor. See GHC.Core.Utils.mkSingleAltCase. So we'd have to do synonym expansion in exprType which would be inefficient. * The type stored in the case is checked with lintInTy. This checks (among other things) that it does not mention any variables that are not in scope. If we did not have the type there, it would be a bit harder for Core Lint to reject case blah of Ex x -> x where data Ex = forall a. Ex a. -} -- If you edit this type, you may need to update the GHC formalism -- See Note [GHC Formalism] in GHC.Core.Lint data Expr b = Var Id | Lit Literal | App (Expr b) (Arg b) | Lam b (Expr b) | Let (Bind b) (Expr b) | Case (Expr b) b Type [Alt b] -- See Note [Case expression invariants] -- and Note [Why does Case have a 'Type' field?] | Cast (Expr b) CoercionR -- The Coercion has Representational role | Tick CoreTickish (Expr b) | Type Type | Coercion Coercion deriving Data -- | Type synonym for expressions that occur in function argument positions. -- Only 'Arg' should contain a 'Type' at top level, general 'Expr' should not type Arg b = Expr b -- | A case split alternative. Consists of the constructor leading to the alternative, -- the variables bound from the constructor, and the expression to be executed given that binding. -- The default alternative is @(DEFAULT, [], rhs)@ -- If you edit this type, you may need to update the GHC formalism -- See Note [GHC Formalism] in GHC.Core.Lint data Alt b = Alt AltCon [b] (Expr b) deriving (Data) -- | A case alternative constructor (i.e. pattern match) -- If you edit this type, you may need to update the GHC formalism -- See Note [GHC Formalism] in GHC.Core.Lint data AltCon = DataAlt DataCon -- ^ A plain data constructor: @case e of { Foo x -> ... }@. -- Invariant: the 'DataCon' is always from a @data@ type, and never from a @newtype@ | LitAlt Literal -- ^ A literal: @case e of { 1 -> ... }@ -- Invariant: always an *unlifted* literal -- See Note [Literal alternatives] | DEFAULT -- ^ Trivial alternative: @case e of { _ -> ... }@ deriving (Eq, Data) -- This instance is a bit shady. It can only be used to compare AltCons for -- a single type constructor. Fortunately, it seems quite unlikely that we'll -- ever need to compare AltCons for different type constructors. -- The instance adheres to the order described in Note [Case expression invariants] instance Ord AltCon where compare (DataAlt con1) (DataAlt con2) = assert (dataConTyCon con1 == dataConTyCon con2) $ compare (dataConTag con1) (dataConTag con2) compare (DataAlt _) _ = GT compare _ (DataAlt _) = LT compare (LitAlt l1) (LitAlt l2) = compare l1 l2 compare (LitAlt _) DEFAULT = GT compare DEFAULT DEFAULT = EQ compare DEFAULT _ = LT -- | Binding, used for top level bindings in a module and local bindings in a @let@. -- If you edit this type, you may need to update the GHC formalism -- See Note [GHC Formalism] in GHC.Core.Lint data Bind b = NonRec b (Expr b) | Rec [(b, (Expr b))] deriving Data -- | Helper function. You can use the result of 'mkBinds' with 'mkLets' for -- instance. -- -- * @'mkBinds' 'Recursive' binds@ makes a single mutually-recursive -- bindings with all the rhs/lhs pairs in @binds@ -- * @'mkBinds' 'NonRecursive' binds@ makes one non-recursive binding -- for each rhs/lhs pairs in @binds@ mkBinds :: RecFlag -> [(b, (Expr b))] -> [Bind b] mkBinds Recursive binds = [Rec binds] mkBinds NonRecursive binds = map (uncurry NonRec) binds {- Note [Literal alternatives] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Literal alternatives (LitAlt lit) are always for *un-lifted* literals. We have one literal, a literal Integer, that is lifted, and we don't allow in a LitAlt, because LitAlt cases don't do any evaluation. Also (see #5603) if you say case 3 of IS x -> ... IP _ -> ... IN _ -> ... (where IS, IP, IN are the constructors for Integer) we don't want the simplifier calling findAlt with argument (LitAlt 3). No no. Integer literals are an opaque encoding of an algebraic data type, not of an unlifted literal, like all the others. Also, we do not permit case analysis with literal patterns on floating-point types. See #9238 and Note [Rules for floating-point comparisons] in GHC.Core.Opt.ConstantFold for the rationale for this restriction. -------------------------- GHC.Core INVARIANTS --------------------------- Note [Variable occurrences in Core] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Variable /occurrences/ are never CoVars, though /bindings/ can be. All CoVars appear in Coercions. For example \(c :: Age~#Int) (d::Int). d |> (sym c) Here 'c' is a CoVar, which is lambda-bound, but it /occurs/ in a Coercion, (sym c). Note [Shadowing in Core] ~~~~~~~~~~~~~~~~~~~~~~~~ You might wonder if there is an invariant that a Core expression has no "shadowing". For example, is this illegal? \x. \x. blah -- x is shadowed Answer; no! Core does /not/ have a no-shadowing invariant. Neither the simplifier nor any other pass GUARANTEES that shadowing is avoided. Thus, all passes SHOULD work fine even in the presence of arbitrary shadowing in their inputs. So the Unique in a Var is not really unique at all. Still, it's very useful to give a constant-time equality/ordering for Vars, and to give a key that can be used to make sets of Vars (VarSet), or mappings from Vars to other things (VarEnv). Moreover, if you do want to eliminate shadowing, you can give a new Unique to an Id without changing its printable name, which makes debugging easier. It would in many ways be easier to have a no-shadowing invariant. And the Simplifier does its best to clone variables that are shadowed. But it is extremely difficult to GUARANTEE it: * We use `GHC.Types.Id.mkTemplateLocal` to make up local binders, with uniques that are locally-unique (enough for the purpose) but not globally unique. It is convenient not to have to plumb a unique supply to these functions. * It is very difficult for the Simplifier to gurantee a no-shadowing result. See Note [Shadowing in the Simplifier] in GHC.Core.Opt.Simplify.Iteration. * See Note [Shadowing in CSE] in GHC.Core.Opt.CSE * See Note [Shadowing in SpecConstr] in GHC.Core.Opt.SpecContr Note [Core letrec invariant] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The Core letrec invariant: The right hand sides of all /top-level/ or /recursive/ bindings must be of lifted type See "Type#type_classification" in GHC.Core.Type for the meaning of "lifted" vs. "unlifted". For the non-top-level, non-recursive case see Note [Core let-can-float invariant]. At top level, however, there are two exceptions to this rule: (TL1) A top-level binding is allowed to bind primitive string literal, (which is unlifted). See Note [Core top-level string literals]. (TL2) In Core, we generate a top-level binding for every non-newtype data constructor worker or wrapper e.g. data T = MkT Int we generate MkT :: Int -> T MkT = \x. MkT x (This binding looks recursive, but isn't; it defines a top-level, curried function whose body just allocates and returns the data constructor.) But if (a) the data constructor is nullary and (b) the data type is unlifted, this binding is unlifted. e.g. data S :: UnliftedType where { S1 :: S, S2 :: S -> S } we generate S1 :: S -- A top-level unlifted binding S1 = S1 We allow this top-level unlifted binding to exist. Note [Core let-can-float invariant] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The let-can-float invariant: The right hand side of a /non-top-level/, /non-recursive/ binding may be of unlifted type, but only if the expression is ok-for-speculation or the 'Let' is for a join point. (For top-level or recursive lets see Note [Core letrec invariant].) This means that the let can be floated around without difficulty. For example, this is OK: y::Int# = x +# 1# But this is not, as it may affect termination if the expression is floated out: y::Int# = fac 4# In this situation you should use @case@ rather than a @let@. The function 'GHC.Core.Utils.needsCaseBinding' can help you determine which to generate, or alternatively use 'GHC.Core.Make.mkCoreLet' rather than this constructor directly, which will generate a @case@ if necessary The let-can-float invariant is initially enforced by mkCoreLet in GHC.Core.Make. Historical Note [The let/app invariant] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Before 2022 GHC used the "let/app invariant", which applied the let-can-float rules to the argument of an application, as well as to the RHS of a let. This made some kind of sense, because 'let' can always be encoded as application: let x=rhs in b = (\x.b) rhs But the let/app invariant got in the way of RULES; see #19313. For example up :: Int# -> Int# {-# RULES "up/down" forall x. up (down x) = x #-} The LHS of this rule doesn't satisfy the let/app invariant. Indeed RULES is a big reason that GHC doesn't use ANF, where the argument of an application is always a variable or a constant. To allow RULES to work nicely we need to allow lots of things in the arguments of a call. TL;DR: we relaxed the let/app invariant to become the let-can-float invariant. Note [Core top-level string literals] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ As an exception to the usual rule that top-level binders must be lifted, we allow binding primitive string literals (of type Addr#) at the top level. This allows us to share string literals earlier in the pipeline and crucially allows other optimizations in the Core2Core pipeline to fire. Consider, f n = let a::Addr# = "foo"# in \x -> blah In order to be able to inline `f`, we would like to float `a` to the top. Another option would be to inline `a`, but that would lead to duplicating string literals, which we want to avoid. See #8472. The solution is simply to allow top-level unlifted binders. We can't allow arbitrary unlifted expression at the top-level though, unlifted binders cannot be thunks, so we just allow string literals. We allow the top-level primitive string literals to be wrapped in Ticks in the same way they can be wrapped when nested in an expression. CoreToSTG currently discards Ticks around top-level primitive string literals. See #14779. Also see Note [Compilation plan for top-level string literals]. Note [Compilation plan for top-level string literals] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Here is a summary on how top-level string literals are handled by various parts of the compilation pipeline. * In the source language, there is no way to bind a primitive string literal at the top level. * In Core, we have a special rule that permits top-level Addr# bindings. See Note [Core top-level string literals]. Core-to-core passes may introduce new top-level string literals. See GHC.Core.Utils.exprIsTopLevelBindable, and exprIsTickedString * In STG, top-level string literals are explicitly represented in the syntax tree. * A top-level string literal may end up exported from a module. In this case, in the object file, the content of the exported literal is given a label with the _bytes suffix. Note [NON-BOTTOM-DICTS invariant] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ It is a global invariant (not checkable by Lint) that every non-newtype dictionary-typed expression is non-bottom. These conditions are captured by GHC.Core.Type.isTerminatingType. How are we so sure about this? Dictionaries are built by GHC in only two ways: * A dictionary function (DFun), arising from an instance declaration. DFuns do no computation: they always return a data constructor immediately. See DFunUnfolding in GHC.Core. So the result of a call to a DFun is always non-bottom. Exception: newtype dictionaries. Plus: see the Very Nasty Wrinkle in Note [Speculative evaluation] in GHC.CoreToStg.Prep * A superclass selection from some other dictionary. This is harder to guarantee: see Note [Recursive superclasses] and Note [Solving superclass constraints] in GHC.Tc.TyCl.Instance. A bad Core-to-Core pass could invalidate this reasoning, but that's too bad. It's still an invariant of Core programs generated by GHC from Haskell, and Core-to-Core passes maintain it. Why is it useful to know that dictionaries are non-bottom? 1. It justifies the use of `-XDictsStrict`; see `GHC.Core.Types.Demand.strictifyDictDmd` 2. It means that (eq_sel d) is ok-for-speculation and thus case (eq_sel d) of _ -> blah can be discarded by the Simplifier. See these Notes: Note [exprOkForSpeculation and type classes] in GHC.Core.Utils Note[Speculative evaluation] in GHC.CoreToStg.Prep Note [Case expression invariants] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Case expressions are one of the more complicated elements of the Core language, and come with a number of invariants. All of them should be checked by Core Lint. 1. The list of alternatives may be empty; See Note [Empty case alternatives] 2. The 'DEFAULT' case alternative must be first in the list, if it occurs at all. Checked in GHC.Core.Lint.checkCaseAlts. 3. The remaining cases are in order of (strictly) increasing tag (for 'DataAlts') or lit (for 'LitAlts'). This makes finding the relevant constructor easy, and makes comparison easier too. Checked in GHC.Core.Lint.checkCaseAlts. 4. The list of alternatives must be exhaustive. An /exhaustive/ case does not necessarily mention all constructors: @ data Foo = Red | Green | Blue ... case x of Red -> True other -> f (case x of Green -> ... Blue -> ... ) ... @ The inner case does not need a @Red@ alternative, because @x@ can't be @Red@ at that program point. This is not checked by Core Lint -- it's very hard to do so. E.g. suppose that inner case was floated out, thus: let a = case x of Green -> ... Blue -> ... ) case x of Red -> True other -> f a Now it's really hard to see that the Green/Blue case is exhaustive. But it is. If you have a case-expression that really /isn't/ exhaustive, we may generate seg-faults. Consider the Green/Blue case above. Since there are only two branches we may generate code that tests for Green, and if not Green simply /assumes/ Blue (since, if the case is exhaustive, that's all that remains). Of course, if it's not Blue and we start fetching fields that should be in a Blue constructor, we may die horribly. See also Note [Core Lint guarantee] in GHC.Core.Lint. 5. Floating-point values must not be scrutinised against literals. See #9238 and Note [Rules for floating-point comparisons] in GHC.Core.Opt.ConstantFold for rationale. Checked in lintCaseExpr; see the call to isFloatingPrimTy. 6. The 'ty' field of (Case scrut bndr ty alts) is the type of the /entire/ case expression. Checked in lintAltExpr. See also Note [Why does Case have a 'Type' field?]. 7. The type of the scrutinee must be the same as the type of the case binder, obviously. Checked in lintCaseExpr. 8. The multiplicity of the binders in constructor patterns must be the multiplicity of the corresponding field /scaled by the multiplicity of the case binder/. Checked in lintCoreAlt. Note [Core type and coercion invariant] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We allow a /non-recursive/, /non-top-level/ let to bind type and coercion variables. These can be very convenient for postponing type substitutions until the next run of the simplifier. * A type variable binding must have a RHS of (Type ty) * A coercion variable binding must have a RHS of (Coercion co) It is possible to have terms that return a coercion, but we use case-binding for those; e.g. case (eq_sel d) of (co :: a ~# b) -> blah where eq_sel :: (a~b) -> (a~#b) Or even case (df @Int) of (co :: a ~# b) -> blah Which is very exotic, and I think never encountered; but see Note [Equality superclasses in quantified constraints] in GHC.Tc.Solver.Dict Note [Representation polymorphism invariants] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ GHC allows us to abstract over calling conventions using **representation polymorphism**. For example, we have: ($) :: forall (r :: RuntimeRep) (a :: Type) (b :: TYPE r). (a -> b) -> a -> b In this example, the type `b` is representation-polymorphic: it has kind `TYPE r`, where the type variable `r :: RuntimeRep` abstracts over the runtime representation of values of type `b`. To ensure that programs containing representation-polymorphism remain compilable, we enforce the following representation-polymorphism invariants: The paper "Levity Polymorphism" [PLDI'17] states the first two invariants: I1. The type of a bound variable must have a fixed runtime representation (except for join points: See Note [Invariants on join points]) I2. The type of a function argument must have a fixed runtime representation. Example of I1: \(r::RuntimeRep). \(a::TYPE r). \(x::a). e This contravenes I1 because x's type has kind (TYPE r), which has 'r' free. We thus wouldn't know how to compile this lambda abstraction. Example of I2: f (undefined :: (a :: TYPE r)) This contravenes I2: we are applying the function `f` to a value with an unknown runtime representation. Note that these two invariants require us to check other types than just the types of bound variables and types of function arguments, due to transformations that GHC performs. For example, the definition myCoerce :: forall {r} (a :: TYPE r) (b :: TYPE r). Coercible a b => a -> b myCoerce = coerce is invalid, because `coerce` has no binding (see GHC.Types.Id.Make.coerceId). So, before code-generation, GHC saturates the RHS of 'myCoerce' by performing an eta-expansion (see GHC.CoreToStg.Prep.maybeSaturate): myCoerce = \ (x :: TYPE r) -> coerce x However, this transformation would be invalid, because now the binding of x in the lambda abstraction would violate I1. See Note [Representation-polymorphism checking built-ins] in GHC.Tc.Utils.Concrete and Note [Linting representation-polymorphic builtins] in GHC.Core.Lint for more details. Note that we currently require something slightly stronger than a fixed runtime representation: we check whether bound variables and function arguments have a /fixed RuntimeRep/ in the sense of Note [Fixed RuntimeRep] in GHC.Tc.Utils.Concrete. See Note [Representation polymorphism checking] in GHC.Tc.Utils.Concrete for an overview of how we enforce these invariants in the typechecker. Note [Empty case alternatives] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The alternatives of a case expression should be exhaustive. But this exhaustive list can be empty! * A case expression can have empty alternatives if (and only if) the scrutinee is bound to raise an exception or diverge. When do we know this? See Note [Bottoming expressions] in GHC.Core.Utils. * The possibility of empty alternatives is one reason we need a type on the case expression: if the alternatives are empty we can't get the type from the alternatives! * In the case of empty types (see Note [Bottoming expressions]), say data T we do NOT want to replace case (x::T) of Bool {} --> error Bool "Inaccessible case" because x might raise an exception, and *that*'s what we want to see! (#6067 is an example.) To preserve semantics we'd have to say x `seq` error Bool "Inaccessible case" but the 'seq' is just such a case, so we are back to square 1. * We can use the empty-alternative construct to coerce error values from one type to another. For example f :: Int -> Int f n = error "urk" g :: Int -> (# Char, Bool #) g x = case f x of { 0 -> ..., n -> ... } Then if we inline f in g's RHS we get case (error Int "urk") of (# Char, Bool #) { ... } and we can discard the alternatives since the scrutinee is bottom to give case (error Int "urk") of (# Char, Bool #) {} This is nicer than using an unsafe coerce between Int ~ (# Char,Bool #), if for no other reason that we don't need to instantiate the (~) at an unboxed type. * We treat a case expression with empty alternatives as trivial iff its scrutinee is (see GHC.Core.Utils.exprIsTrivial). This is actually important; see Note [Empty case is trivial] in GHC.Core.Utils * We lower empty cases in GHC.CoreToStg.coreToStgExpr to an eval on the scrutinee. Historical Note: We used to lower EmptyCase in CorePrep by way of an unsafeCoercion on the scrutinee, but that yielded panics in CodeGen when we were beginning to eta expand in arguments, plus required to mess with heterogenously-kinded coercions. It's simpler to stick to it just a bit longer. Note [Join points] ~~~~~~~~~~~~~~~~~~ In Core, a *join point* is a specially tagged function whose only occurrences are saturated tail calls. A tail call can appear in these places: 1. In the branches (not the scrutinee) of a case 2. Underneath a let (value or join point) 3. Inside another join point We write a join-point declaration as join j @a @b x y = e1 in e2, like a let binding but with "join" instead (or "join rec" for "let rec"). Note that we put the parameters before the = rather than using lambdas; this is because it's relevant how many parameters the join point takes *as a join point.* This number is called the *join arity,* distinct from arity because it counts types as well as values. Note that a join point may return a lambda! So join j x = x + 1 is different from join j = \x -> x + 1 The former has join arity 1, while the latter has join arity 0. The identifier for a join point is called a join id or a *label.* An invocation is called a *jump.* We write a jump using the jump keyword: jump j 3 The words *label* and *jump* are evocative of assembly code (or Cmm) for a reason: join points are indeed compiled as labeled blocks, and jumps become actual jumps (plus argument passing and stack adjustment). There is no closure allocated and only a fraction of the function-call overhead. Hence we would like as many functions as possible to become join points (see OccurAnal) and the type rules for join points ensure we preserve the properties that make them efficient. In the actual AST, a join point is indicated by the IdDetails of the binder: a local value binding gets 'VanillaId' but a join point gets a 'JoinId' with its join arity. For more details, see the paper: Luke Maurer, Paul Downen, Zena Ariola, and Simon Peyton Jones. "Compiling without continuations." Submitted to PLDI'17. https://www.microsoft.com/en-us/research/publication/compiling-without-continuations/ Note [Invariants on join points] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Join points must follow these invariants: 1. All occurrences must be tail calls. Each of these tail calls must pass the same number of arguments, counting both types and values; we call this the "join arity" (to distinguish from regular arity, which only counts values). See Note [Join points are less general than the paper] 2. For join arity n, the right-hand side must begin with at least n lambdas. No ticks, no casts, just lambdas! C.f. GHC.Core.Utils.joinRhsArity. 2a. Moreover, this same constraint applies to any unfolding of the binder. Reason: if we want to push a continuation into the RHS we must push it into the unfolding as well. 2b. The Arity (in the IdInfo) of a join point varies independently of the join-arity. For example, we could have j x = case x of { T -> \y.y; F -> \y.3 } Its join-arity is 1, but its idArity is 2; and we do not eta-expand join points: see Note [Do not eta-expand join points] in GHC.Core.Opt.Simplify.Utils. Allowing the idArity to be bigger than the join-arity is important in arityType; see GHC.Core.Opt.Arity Note [Arity for recursive join bindings] Historical note: see #17294. 3. If the binding is recursive, then all other bindings in the recursive group must also be join points. 4. The binding's type must not be polymorphic in its return type (as defined in Note [The polymorphism rule of join points]). However, join points have simpler invariants in other ways 5. A join point can have an unboxed type without the RHS being ok-for-speculation (i.e. drop the let-can-float invariant) e.g. let j :: Int# = factorial x in ... 6. The RHS of join point is not required to have a fixed runtime representation, e.g. let j :: r :: TYPE l = fail (##) in ... This happened in an intermediate program #13394 Examples: join j1 x = 1 + x in jump j (jump j x) -- Fails 1: non-tail call join j1' x = 1 + x in if even a then jump j1 a else jump j1 a b -- Fails 1: inconsistent calls join j2 x = flip (+) x in j2 1 2 -- Fails 2: not enough lambdas join j2' x = \y -> x + y in j3 1 -- Passes: extra lams ok join j @a (x :: a) = x -- Fails 4: polymorphic in ret type Invariant 1 applies to left-hand sides of rewrite rules, so a rule for a join point must have an exact call as its LHS. Strictly speaking, invariant 3 is redundant, since a call from inside a lazy binding isn't a tail call. Since a let-bound value can't invoke a free join point, then, they can't be mutually recursive. (A Core binding group *can* include spurious extra bindings if the occurrence analyser hasn't run, so invariant 3 does still need to be checked.) For the rigorous definition of "tail call", see Section 3 of the paper (Note [Join points]). Invariant 4 is subtle; see Note [The polymorphism rule of join points]. Invariant 6 is to enable code like this: f = \(r :: RuntimeRep) (a :: TYPE r) (x :: T). join j :: a j = error @r @a "bloop" in case x of A -> j B -> j C -> error @r @a "blurp" Core Lint will check these invariants, anticipating that any binder whose OccInfo is marked AlwaysTailCalled will become a join point as soon as the simplifier (or simpleOptPgm) runs. Note [Join points are less general than the paper] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In the paper "Compiling without continuations", this expression is perfectly valid: join { j = \_ -> e } in (case blah of ) ( True -> j void# ) arg ( False -> blah ) assuming 'j' has arity 1. Here the call to 'j' does not look like a tail call, but actually everything is fine. See Section 3, "Managing \Delta" in the paper. In GHC, however, we adopt a slightly more restrictive subset, in which join point calls must be tail calls. I think we /could/ loosen it up, but in fact the simplifier ensures that we always get tail calls, and it makes the back end a bit easier I think. Generally, just less to think about; nothing deeper than that. Note [The type of a join point] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A join point has the same type it would have as a function. That is, if it takes an Int and a Bool and its body produces a String, its type is `Int -> Bool -> String`. Natural as this may seem, it can be awkward. A join point shouldn't be thought to "return" in the same sense a function does---a jump is one-way. This is crucial for understanding how case-of-case interacts with join points: case (join j :: Int -> Bool -> String j x y = ... in jump j z w) of "" -> True _ -> False The simplifier will pull the case into the join point (see Note [Join points and case-of-case] in GHC.Core.Opt.Simplify): join j :: Int -> Bool -> Bool -- changed! j x y = case ... of "" -> True _ -> False in jump j z w The body of the join point now returns a Bool, so the label `j` has to have its type updated accordingly, which is done by GHC.Core.Opt.Simplify.Env.adjustJoinPointType. Inconvenient though this may be, it has the advantage that 'GHC.Core.Utils.exprType' can still return a type for any expression, including a jump. Relationship to the paper This plan differs from the paper (see Note [Invariants on join points]). In the paper, we instead give j the type `Int -> Bool -> forall a. a`. Then each jump carries the "return type" as a parameter, exactly the way other non-returning functions like `error` work: case (join j :: Int -> Bool -> forall a. a j x y = ... in jump j z w @String) of "" -> True _ -> False Now we can move the case inward and we only have to change the jump: join j :: Int -> Bool -> forall a. a j x y = case ... of "" -> True _ -> False in jump j z w @Bool (Core Lint would still check that the body of the join point has the right type; that type would simply not be reflected in the join id.) Note [The polymorphism rule of join points] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Invariant 4 of Note [Invariants on join points] forbids a join point to be polymorphic in its return type. That is, if its type is forall a1 ... ak. t1 -> ... -> tn -> r where its join arity is k+n, none of the type parameters ai may occur free in r. In some way, this falls out of the fact that given join j @a1 ... @ak x1 ... xn = e1 in e2 then all calls to `j` are in tail-call positions of `e`, and expressions in tail-call positions in `e` have the same type as `e`. Therefore the type of `e1` -- the return type of the join point -- must be the same as the type of e2. Since the type variables aren't bound in `e2`, its type can't include them, and thus neither can the type of `e1`. This unfortunately prevents the `go` in the following code from being a join-point: iter :: forall a. Int -> (a -> a) -> a -> a iter @a n f x = go @a n f x where go :: forall a. Int -> (a -> a) -> a -> a go @a 0 _ x = x go @a n f x = go @a (n-1) f (f x) In this case, a static argument transformation would fix that (see ticket #14620): iter :: forall a. Int -> (a -> a) -> a -> a iter @a n f x = go' @a n f x where go' :: Int -> (a -> a) -> a -> a go' 0 _ x = x go' n f x = go' (n-1) f (f x) In general, loopification could be employed to do that (see #14068.) Can we simply drop the requirement, and allow `go` to be a join-point? We could, and it would work. But we could not longer apply the case-of-join-point transformation universally. This transformation would do: case (join go @a n f x = case n of 0 -> x n -> go @a (n-1) f (f x) in go @Bool n neg True) of True -> e1; False -> e2 ===> join go @a n f x = case n of 0 -> case x of True -> e1; False -> e2 n -> go @a (n-1) f (f x) in go @Bool n neg True but that is ill-typed, as `x` is type `a`, not `Bool`. This also justifies why we do not consider the `e` in `e |> co` to be in tail position: A cast changes the type, but the type must be the same. But operationally, casts are vacuous, so this is a bit unfortunate! See #14610 for ideas how to fix this. ************************************************************************ * * In/Out type synonyms * * ********************************************************************* -} {- Many passes apply a substitution, and it's very handy to have type synonyms to remind us whether or not the substitution has been applied -} -- Pre-cloning or substitution type InBndr = CoreBndr type InType = Type type InKind = Kind type InBind = CoreBind type InExpr = CoreExpr type InAlt = CoreAlt type InArg = CoreArg type InCoercion = Coercion -- Post-cloning or substitution type OutBndr = CoreBndr type OutType = Type type OutKind = Kind type OutCoercion = Coercion type OutBind = CoreBind type OutExpr = CoreExpr type OutAlt = CoreAlt type OutArg = CoreArg type MOutCoercion = MCoercion {- ************************************************************************ * * Orphans * * ************************************************************************ -} -- | Is this instance an orphan? If it is not an orphan, contains an 'OccName' -- witnessing the instance's non-orphanhood. -- See Note [Orphans] data IsOrphan = IsOrphan | NotOrphan !OccName -- The OccName 'n' witnesses the instance's non-orphanhood -- In that case, the instance is fingerprinted as part -- of the definition of 'n's definition deriving Data -- | Returns true if 'IsOrphan' is orphan. isOrphan :: IsOrphan -> Bool isOrphan IsOrphan = True isOrphan _ = False -- | Returns true if 'IsOrphan' is not an orphan. notOrphan :: IsOrphan -> Bool notOrphan NotOrphan{} = True notOrphan _ = False chooseOrphanAnchor :: NameSet -> IsOrphan -- Something (rule, instance) is relate to all the Names in this -- list. Choose one of them to be an "anchor" for the orphan. We make -- the choice deterministic to avoid gratuitous changes in the ABI -- hash (#4012). Specifically, use lexicographic comparison of -- OccName rather than comparing Uniques -- -- NB: 'minimum' use Ord, and (Ord OccName) works lexicographically -- chooseOrphanAnchor local_names | isEmptyNameSet local_names = IsOrphan | otherwise = NotOrphan (minimum occs) where occs = map nameOccName $ nonDetEltsUniqSet local_names -- It's OK to use nonDetEltsUFM here, see comments above instance Binary IsOrphan where put_ bh IsOrphan = putByte bh 0 put_ bh (NotOrphan n) = do putByte bh 1 put_ bh n get bh = do h <- getByte bh case h of 0 -> return IsOrphan _ -> do n <- get bh return $ NotOrphan n {- Note [Orphans] ~~~~~~~~~~~~~~ Class instances, rules, and family instances are divided into orphans and non-orphans. Roughly speaking, an instance/rule is an orphan if its left hand side mentions nothing defined in this module. Orphan-hood has two major consequences * A module that contains orphans is called an "orphan module". If the module being compiled depends (transitively) on an orphan module M, then M.hi is read in regardless of whether M is otherwise needed. This is to ensure that we don't miss any instance decls in M. But it's painful, because it means we need to keep track of all the orphan modules below us. * The "visible orphan modules" are all the orphan module in the transitive closure of the imports of this module. * During instance lookup, we filter orphan instances depending on whether or not the instance is in a visible orphan module. * A non-orphan is not finger-printed separately. Instead, for fingerprinting purposes it is treated as part of the entity it mentions on the LHS. For example data T = T1 | T2 instance Eq T where .... The instance (Eq T) is incorporated as part of T's fingerprint. In contrast, orphans are all fingerprinted together in the mi_orph_hash field of the ModIface. See GHC.Iface.Recomp.addFingerprints. Orphan-hood is computed * For class instances: when we make a ClsInst in GHC.Core.InstEnv.mkLocalClsInst (because it is needed during instance lookup) See Note [When exactly is an instance decl an orphan?] in GHC.Core.InstEnv * For rules when we generate a CoreRule (GHC.Core.Rules.mkRule) * For family instances: when we generate an IfaceFamInst (GHC.Iface.Make.instanceToIfaceInst) Orphan-hood is persisted into interface files, in ClsInst, FamInst, and CoreRules. -} {- ************************************************************************ * * \subsection{Rewrite rules} * * ************************************************************************ The CoreRule type and its friends are dealt with mainly in GHC.Core.Rules, but GHC.Core.FVs, GHC.Core.Subst, GHC.Core.Ppr, GHC.Core.Tidy also inspect the representation. -} -- | A 'CoreRule' is: -- -- * \"Local\" if the function it is a rule for is defined in the -- same module as the rule itself. -- -- * \"Orphan\" if nothing on the LHS is defined in the same module -- as the rule itself data CoreRule = Rule { ru_name :: RuleName, -- ^ Name of the rule, for communication with the user ru_act :: Activation, -- ^ When the rule is active -- Rough-matching stuff -- see comments with InstEnv.ClsInst( is_cls, is_rough ) ru_fn :: !Name, -- ^ Name of the 'GHC.Types.Id.Id' at the head of this rule ru_rough :: [Maybe Name], -- ^ Name at the head of each argument to the left hand side -- Proper-matching stuff -- see comments with InstEnv.ClsInst( is_tvs, is_tys ) ru_bndrs :: [CoreBndr], -- ^ Variables quantified over ru_args :: [CoreExpr], -- ^ Left hand side arguments -- And the right-hand side ru_rhs :: CoreExpr, -- ^ Right hand side of the rule -- Occurrence info is guaranteed correct -- See Note [OccInfo in unfoldings and rules] -- Locality ru_auto :: Bool, -- ^ @True@ <=> this rule is auto-generated -- (notably by Specialise or SpecConstr) -- @False@ <=> generated at the user's behest -- See Note [Trimming auto-rules] in "GHC.Iface.Tidy" -- for the sole purpose of this field. ru_origin :: !Module, -- ^ 'Module' the rule was defined in, used -- to test if we should see an orphan rule. ru_orphan :: !IsOrphan, -- ^ Whether or not the rule is an orphan. ru_local :: Bool -- ^ @True@ iff the fn at the head of the rule is -- defined in the same module as the rule -- and is not an implicit 'Id' (like a record selector, -- class operation, or data constructor). This -- is different from 'ru_orphan', where a rule -- can avoid being an orphan if *any* Name in -- LHS of the rule was defined in the same -- module as the rule. } -- | Built-in rules are used for constant folding -- and suchlike. They have no free variables. -- A built-in rule is always visible (there is no such thing as -- an orphan built-in rule.) | BuiltinRule { ru_name :: RuleName, -- ^ As above ru_fn :: Name, -- ^ As above ru_nargs :: Int, -- ^ Number of arguments that 'ru_try' consumes, -- if it fires, including type arguments ru_try :: RuleFun -- ^ This function does the rewrite. It given too many -- arguments, it simply discards them; the returned 'CoreExpr' -- is just the rewrite of 'ru_fn' applied to the first 'ru_nargs' args } -- See Note [Extra args in the target] in GHC.Core.Rules type RuleFun = RuleOpts -> InScopeEnv -> Id -> [CoreExpr] -> Maybe CoreExpr -- | The 'InScopeSet' in the 'InScopeEnv' is a /superset/ of variables that are -- currently in scope. See Note [The InScopeSet invariant]. data InScopeEnv = ISE InScopeSet IdUnfoldingFun type IdUnfoldingFun = Id -> Unfolding -- A function that embodies how to unfold an Id if you need -- to do that in the Rule. The reason we need to pass this info in -- is that whether an Id is unfoldable depends on the simplifier phase isBuiltinRule :: CoreRule -> Bool isBuiltinRule (BuiltinRule {}) = True isBuiltinRule _ = False isAutoRule :: CoreRule -> Bool isAutoRule (BuiltinRule {}) = False isAutoRule (Rule { ru_auto = is_auto }) = is_auto -- | The number of arguments the 'ru_fn' must be applied -- to before the rule can match on it ruleArity :: CoreRule -> Int ruleArity (BuiltinRule {ru_nargs = n}) = n ruleArity (Rule {ru_args = args}) = length args ruleName :: CoreRule -> RuleName ruleName = ru_name ruleModule :: CoreRule -> Maybe Module ruleModule Rule { ru_origin } = Just ru_origin ruleModule BuiltinRule {} = Nothing ruleActivation :: CoreRule -> Activation ruleActivation (BuiltinRule { }) = AlwaysActive ruleActivation (Rule { ru_act = act }) = act -- | The 'Name' of the 'GHC.Types.Id.Id' at the head of the rule left hand side ruleIdName :: CoreRule -> Name ruleIdName = ru_fn isLocalRule :: CoreRule -> Bool isLocalRule = ru_local -- | Set the 'Name' of the 'GHC.Types.Id.Id' at the head of the rule left hand side setRuleIdName :: Name -> CoreRule -> CoreRule setRuleIdName nm ru = ru { ru_fn = nm } {- ************************************************************************ * * Unfoldings * * ************************************************************************ The @Unfolding@ type is declared here to avoid numerous loops Note [Never put `OtherCon` unfoldings on lambda binders] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Based on #21496 we never attach unfoldings of any kind to lambda binders. It's just too easy for the call site to change and invalidate the unfolding. E.g. the caller of the lambda drops a seq (e.g. because the lambda is strict in it's binder) which in turn makes the OtherCon[] unfolding a lie. So unfoldings on lambda binders can never really be trusted when on lambda binders if there is the chance of the call site to change. So it's easiest to just never attach any to lambda binders to begin with, as well as stripping them off if we e.g. float out and expression while abstracting over some arguments. -} -- | Records the /unfolding/ of an identifier, which is approximately the form the -- identifier would have if we substituted its definition in for the identifier. -- This type should be treated as abstract everywhere except in "GHC.Core.Unfold" data Unfolding = NoUnfolding -- ^ We have no information about the unfolding. | BootUnfolding -- ^ We have no information about the unfolding, because -- this 'Id' came from an @hi-boot@ file. -- See Note [Inlining and hs-boot files] in "GHC.CoreToIface" -- for what this is used for. | OtherCon [AltCon] -- ^ It ain't one of these constructors. -- @OtherCon xs@ also indicates that something has been evaluated -- and hence there's no point in re-evaluating it. -- @OtherCon []@ is used even for non-data-type values -- to indicated evaluated-ness. Notably: -- -- > data C = C !(Int -> Int) -- > case x of { C f -> ... } -- -- Here, @f@ gets an @OtherCon []@ unfolding. | DFunUnfolding { -- The Unfolding of a DFunId -- See Note [DFun unfoldings] -- df = /\a1..am. \d1..dn. MkD t1 .. tk -- (op1 a1..am d1..dn) -- (op2 a1..am d1..dn) df_bndrs :: [Var], -- The bound variables [a1..m],[d1..dn] df_con :: DataCon, -- The dictionary data constructor (never a newtype datacon) df_args :: [CoreExpr] -- Args of the data con: types, superclasses and methods, } -- in positional order | CoreUnfolding { -- An unfolding for an Id with no pragma, -- or perhaps a NOINLINE pragma -- (For NOINLINE, the phase, if any, is in the -- InlinePragInfo for this Id.) uf_tmpl :: CoreExpr, -- The unfolding itself (aka "template") -- Always occ-analysed; -- See Note [OccInfo in unfoldings and rules] uf_src :: UnfoldingSource, -- Where the unfolding came from uf_is_top :: Bool, -- True <=> top level binding uf_cache :: UnfoldingCache, -- Cache of flags computable from the expr -- See Note [Tying the 'CoreUnfolding' knot] uf_guidance :: UnfoldingGuidance -- Tells about the *size* of the template. } -- ^ An unfolding with redundant cached information. Parameters: -- -- uf_tmpl: Template used to perform unfolding; -- NB: Occurrence info is guaranteed correct: -- see Note [OccInfo in unfoldings and rules] -- -- uf_is_top: Is this a top level binding? -- -- uf_is_value: 'exprIsHNF' template (cached); it is ok to discard a 'seq' on -- this variable -- -- uf_is_work_free: Does this waste only a little work if we expand it inside an inlining? -- Basically this is a cached version of 'exprIsWorkFree' -- -- uf_guidance: Tells us about the /size/ of the unfolding template -- | Properties of a 'CoreUnfolding' that could be computed on-demand from its template. -- See Note [UnfoldingCache] data UnfoldingCache = UnfoldingCache { uf_is_value :: !Bool, -- exprIsHNF template (cached); it is ok to discard -- a `seq` on this variable uf_is_conlike :: !Bool, -- True <=> applicn of constructor or CONLIKE function -- Cached version of exprIsConLike uf_is_work_free :: !Bool, -- True <=> doesn't waste (much) work to expand -- inside an inlining -- Cached version of exprIsCheap uf_expandable :: !Bool -- True <=> can expand in RULE matching -- Cached version of exprIsExpandable } deriving (Eq) -- | 'UnfoldingGuidance' says when unfolding should take place data UnfoldingGuidance = UnfWhen { -- Inline without thinking about the *size* of the uf_tmpl -- Used (a) for small *and* cheap unfoldings -- (b) for INLINE functions -- See Note [INLINE for small functions] in GHC.Core.Unfold ug_arity :: Arity, -- Number of value arguments expected ug_unsat_ok :: Bool, -- True <=> ok to inline even if unsaturated ug_boring_ok :: Bool -- True <=> ok to inline even if the context is boring -- So True,True means "always" } | UnfIfGoodArgs { -- Arose from a normal Id; the info here is the -- result of a simple analysis of the RHS ug_args :: [Int], -- Discount if the argument is evaluated. -- (i.e., a simplification will definitely -- be possible). One elt of the list per *value* arg. ug_size :: Int, -- The "size" of the unfolding. ug_res :: Int -- Scrutinee discount: the discount to subtract if the thing is in } -- a context (case (thing args) of ...), -- (where there are the right number of arguments.) | UnfNever -- The RHS is big, so don't inline it deriving (Eq) {- Note [UnfoldingCache] ~~~~~~~~~~~~~~~~~~~~~~~~ The UnfoldingCache field of an Unfolding holds four (strict) booleans, all derived from the uf_tmpl field of the unfolding. * We serialise the UnfoldingCache to and from interface files, for reasons described in Note [Tying the 'CoreUnfolding' knot] in GHC.IfaceToCore * Because it is a strict data type, we must be careful not to pattern-match on it until we actually want its values. E.g GHC.Core.Unfold.callSiteInline/tryUnfolding are careful not to force it unnecessarily. Just saves a bit of work. * When `seq`ing Core to eliminate space leaks, to suffices to `seq` on the cache, but not its fields, because it is strict in all fields. Note [Historical note: unfoldings for wrappers] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We used to have a nice clever scheme in interface files for wrappers. A wrapper's unfolding can be reconstructed from its worker's id and its strictness. This decreased .hi file size (sometimes significantly, for modules like GHC.Classes with many high-arity w/w splits) and had a slight corresponding effect on compile times. However, when we added the second demand analysis, this scheme lead to some Core lint errors. The second analysis could change the strictness signatures, which sometimes resulted in a wrapper's regenerated unfolding applying the wrapper to too many arguments. Instead of repairing the clever .hi scheme, we abandoned it in favor of simplicity. The .hi sizes are usually insignificant (excluding the +1M for base libraries), and compile time barely increases (~+1% for nofib). The nicer upshot is that the UnfoldingSource no longer mentions an Id, so, eg, substitutions need not traverse them. Note [DFun unfoldings] ~~~~~~~~~~~~~~~~~~~~~~ The Arity in a DFunUnfolding is total number of args (type and value) that the DFun needs to produce a dictionary. That's not necessarily related to the ordinary arity of the dfun Id, esp if the class has one method, so the dictionary is represented by a newtype. Example class C a where { op :: a -> Int } instance C a -> C [a] where op xs = op (head xs) The instance translates to $dfCList :: forall a. C a => C [a] -- Arity 2! $dfCList = /\a.\d. $copList {a} d |> co $copList :: forall a. C a => [a] -> Int -- Arity 2! $copList = /\a.\d.\xs. op {a} d (head xs) Now we might encounter (op (dfCList {ty} d) a1 a2) and we want the (op (dfList {ty} d)) rule to fire, because $dfCList has all its arguments, even though its (value) arity is 2. That's why we record the number of expected arguments in the DFunUnfolding. Note that although it's an Arity, it's most convenient for it to give the *total* number of arguments, both type and value. See the use site in exprIsConApp_maybe. -} -- Constants for the UnfWhen constructor needSaturated, unSaturatedOk :: Bool needSaturated = False unSaturatedOk = True boringCxtNotOk, boringCxtOk :: Bool boringCxtOk = True boringCxtNotOk = False ------------------------------------------------ noUnfolding :: Unfolding -- ^ There is no known 'Unfolding' evaldUnfolding :: Unfolding -- ^ This unfolding marks the associated thing as being evaluated noUnfolding = NoUnfolding evaldUnfolding = OtherCon [] -- | There is no known 'Unfolding', because this came from an -- hi-boot file. bootUnfolding :: Unfolding bootUnfolding = BootUnfolding mkOtherCon :: [AltCon] -> Unfolding mkOtherCon = OtherCon -- | Retrieves the template of an unfolding: panics if none is known unfoldingTemplate :: Unfolding -> CoreExpr unfoldingTemplate = uf_tmpl -- | Retrieves the template of an unfolding if possible -- maybeUnfoldingTemplate is used mainly when specialising, and we do -- want to specialise DFuns, so it's important to return a template -- for DFunUnfoldings maybeUnfoldingTemplate :: Unfolding -> Maybe CoreExpr maybeUnfoldingTemplate (CoreUnfolding { uf_tmpl = expr }) = Just expr maybeUnfoldingTemplate (DFunUnfolding { df_bndrs = bndrs, df_con = con, df_args = args }) = Just (mkLams bndrs (mkApps (Var (dataConWorkId con)) args)) maybeUnfoldingTemplate _ = Nothing -- | The constructors that the unfolding could never be: -- returns @[]@ if no information is available otherCons :: Unfolding -> [AltCon] otherCons (OtherCon cons) = cons otherCons _ = [] -- | Determines if it is certainly the case that the unfolding will -- yield a value (something in HNF): returns @False@ if unsure isValueUnfolding :: Unfolding -> Bool -- Returns False for OtherCon isValueUnfolding (CoreUnfolding { uf_cache = cache }) = uf_is_value cache isValueUnfolding (DFunUnfolding {}) = True isValueUnfolding _ = False -- | Determines if it possibly the case that the unfolding will -- yield a value. Unlike 'isValueUnfolding' it returns @True@ -- for 'OtherCon' isEvaldUnfolding :: Unfolding -> Bool -- Returns True for OtherCon isEvaldUnfolding (OtherCon _) = True isEvaldUnfolding (DFunUnfolding {}) = True isEvaldUnfolding (CoreUnfolding { uf_cache = cache }) = uf_is_value cache isEvaldUnfolding _ = False -- | @True@ if the unfolding is a constructor application, the application -- of a CONLIKE function or 'OtherCon' isConLikeUnfolding :: Unfolding -> Bool isConLikeUnfolding (CoreUnfolding { uf_cache = cache }) = uf_is_conlike cache isConLikeUnfolding _ = False -- | Is the thing we will unfold into certainly cheap? isCheapUnfolding :: Unfolding -> Bool isCheapUnfolding (CoreUnfolding { uf_cache = cache }) = uf_is_work_free cache isCheapUnfolding _ = False isExpandableUnfolding :: Unfolding -> Bool isExpandableUnfolding (CoreUnfolding { uf_cache = cache }) = uf_expandable cache isExpandableUnfolding _ = False expandUnfolding_maybe :: Unfolding -> Maybe CoreExpr -- Expand an expandable unfolding; this is used in rule matching -- See Note [Expanding variables] in GHC.Core.Rules -- The key point here is that CONLIKE things can be expanded expandUnfolding_maybe (CoreUnfolding { uf_cache = cache, uf_tmpl = rhs }) | uf_expandable cache = Just rhs expandUnfolding_maybe _ = Nothing isCompulsoryUnfolding :: Unfolding -> Bool isCompulsoryUnfolding (CoreUnfolding { uf_src = src }) = isCompulsorySource src isCompulsoryUnfolding _ = False isStableUnfolding :: Unfolding -> Bool -- True of unfoldings that should not be overwritten -- by a CoreUnfolding for the RHS of a let-binding isStableUnfolding (CoreUnfolding { uf_src = src }) = isStableSource src isStableUnfolding (DFunUnfolding {}) = True isStableUnfolding _ = False isStableUserUnfolding :: Unfolding -> Bool -- True of unfoldings that arise from an INLINE or INLINEABLE pragma isStableUserUnfolding (CoreUnfolding { uf_src = src }) = isStableUserSource src isStableUserUnfolding _ = False isStableSystemUnfolding :: Unfolding -> Bool -- True of unfoldings that arise from an INLINE or INLINEABLE pragma isStableSystemUnfolding (CoreUnfolding { uf_src = src }) = isStableSystemSource src isStableSystemUnfolding _ = False isInlineUnfolding :: Unfolding -> Bool -- ^ True of a /stable/ unfolding that is -- (a) always inlined; that is, with an `UnfWhen` guidance, or -- (b) a DFunUnfolding which never needs to be inlined isInlineUnfolding (CoreUnfolding { uf_src = src, uf_guidance = guidance }) | isStableSource src , UnfWhen {} <- guidance = True isInlineUnfolding (DFunUnfolding {}) = True -- Default case isInlineUnfolding _ = False -- | Only returns False if there is no unfolding information available at all hasSomeUnfolding :: Unfolding -> Bool hasSomeUnfolding NoUnfolding = False hasSomeUnfolding BootUnfolding = False hasSomeUnfolding _ = True isBootUnfolding :: Unfolding -> Bool isBootUnfolding BootUnfolding = True isBootUnfolding _ = False neverUnfoldGuidance :: UnfoldingGuidance -> Bool neverUnfoldGuidance UnfNever = True neverUnfoldGuidance _ = False hasCoreUnfolding :: Unfolding -> Bool -- An unfolding "has Core" if it contains a Core expression, which -- may mention free variables. See Note [Fragile unfoldings] hasCoreUnfolding (CoreUnfolding {}) = True hasCoreUnfolding (DFunUnfolding {}) = True hasCoreUnfolding _ = False -- NoUnfolding, BootUnfolding, OtherCon have no Core canUnfold :: Unfolding -> Bool canUnfold (CoreUnfolding { uf_guidance = g }) = not (neverUnfoldGuidance g) canUnfold _ = False isBetterUnfoldingThan :: Unfolding -> Unfolding -> Bool -- See Note [Better unfolding] isBetterUnfoldingThan NoUnfolding _ = False isBetterUnfoldingThan BootUnfolding _ = False isBetterUnfoldingThan (CoreUnfolding {uf_cache = uc1}) unf2 = case unf2 of CoreUnfolding {uf_cache = uc2} -> uf_is_value uc1 && not (uf_is_value uc2) OtherCon _ -> uf_is_value uc1 _ -> True -- Default case: CoreUnfolding better than NoUnfolding etc -- Better than DFunUnfolding? I don't care. isBetterUnfoldingThan (DFunUnfolding {}) unf2 | DFunUnfolding {} <- unf2 = False | otherwise = True isBetterUnfoldingThan (OtherCon cs1) unf2 = case unf2 of CoreUnfolding {uf_cache = uc} -- If unf1 is OtherCon and unf2 is -> not (uf_is_value uc) -- just a thunk, unf1 is better OtherCon cs2 -> not (null cs1) && null cs2 -- A bit crude DFunUnfolding {} -> False NoUnfolding -> True BootUnfolding -> True {- Note [Fragile unfoldings] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ An unfolding is "fragile" if it mentions free variables (and hence would need substitution) or might be affected by optimisation. The non-fragile ones are NoUnfolding, BootUnfolding OtherCon {} If we know this binder (say a lambda binder) will be bound to an evaluated thing, we want to retain that info in simpleOptExpr; see #13077. We consider even a StableUnfolding as fragile, because it needs substitution. Note [Better unfolding] ~~~~~~~~~~~~~~~~~~~~~~~ (unf1 `isBetterUnfoldingThan` unf2) is used when we have let x = in -- unf2 let $j y = ...x... in case x of K a -> ...$j v.... At the /call site/ of $j, `x` has a better unfolding than it does at the /defnition site/ of $j; so we are keener to inline $j. See Note [Inlining join points] in GHC.Core.Opt.Simplify.Inline for discussion. The notion of "better" is encapsulated here. Note [Stable unfoldings] ~~~~~~~~~~~~~~~~~~~~~~~~ When you say {-# INLINE f #-} f x = you intend that calls (f e) are replaced by [e/x] So we should capture (\x.) in the Unfolding of 'f', and never meddle with it. Meanwhile, we can optimise to our heart's content, leaving the original unfolding intact in Unfolding of 'f'. For example all xs = foldr (&&) True xs any p = all . map p {-# INLINE any #-} We optimise any's RHS fully, but leave the stable unfolding for `any` saying "all . map p", which deforests well at the call site. So INLINE pragma gives rise to a stable unfolding, which captures the original RHS. Moreover, it's only used when 'f' is applied to the specified number of arguments; that is, the number of argument on the LHS of the '=' sign in the original source definition. For example, (.) is now defined in the libraries like this {-# INLINE (.) #-} (.) f g = \x -> f (g x) so that it'll inline when applied to two arguments. If 'x' appeared on the left, thus (.) f g x = f (g x) it'd only inline when applied to three arguments. This slightly-experimental change was requested by Roman, but it seems to make sense. Note [OccInfo in unfoldings and rules] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In unfoldings and rules, we guarantee that the template is occ-analysed, so that the occurrence info on the binders is correct. That way, when the Simplifier inlines an unfolding, it doesn't need to occ-analysis it first. (The Simplifier is designed to simplify occ-analysed expressions.) Given this decision it's vital that we do *always* do it. * If we don't, we may get more simplifier iterations than necessary, because once-occ info isn't there * More seriously, we may get an infinite loop if there's a Rec without a loop breaker marked. * Or we may get code that mentions variables not in scope: #22761 e.g. Suppose we have a stable unfolding : \y. let z = p+1 in 3 Then the pre-simplifier occ-anal will occ-anal the unfolding (redundantly perhaps, but we need its free vars); this will not report the use of `p`; so p's binding will be discarded, and yet `p` is still mentioned. Better to occ-anal the unfolding at birth, which will drop the z-binding as dead code. (Remember, it's the occurrence analyser that drops dead code.) * Another example is #8892: \x -> letrec { f = ...g...; g* = f } in body where g* is (for some strange reason) the loop breaker. If we don't occ-anal it when reading it in, we won't mark g as a loop breaker, and we may inline g entirely in body, dropping its binding, and leaving the occurrence in f out of scope. This happened in #8892, where the unfolding in question was a DFun unfolding. ************************************************************************ * * AltCon * * ************************************************************************ -} -- The Ord is needed for the FiniteMap used in the lookForConstructor -- in GHC.Core.Opt.Simplify.Env. If you declared that lookForConstructor -- *ignores* constructor-applications with LitArg args, then you could get rid -- of this Ord. instance Outputable AltCon where ppr (DataAlt dc) = ppr dc ppr (LitAlt lit) = ppr lit ppr DEFAULT = text "__DEFAULT" cmpAlt :: Alt a -> Alt a -> Ordering cmpAlt (Alt con1 _ _) (Alt con2 _ _) = con1 `cmpAltCon` con2 ltAlt :: Alt a -> Alt a -> Bool ltAlt a1 a2 = (a1 `cmpAlt` a2) == LT cmpAltCon :: AltCon -> AltCon -> Ordering -- ^ Compares 'AltCon's within a single list of alternatives -- DEFAULT comes out smallest, so that sorting by AltCon puts -- alternatives in the order required: see Note [Case expression invariants] cmpAltCon DEFAULT DEFAULT = EQ cmpAltCon DEFAULT _ = LT cmpAltCon (DataAlt d1) (DataAlt d2) = dataConTag d1 `compare` dataConTag d2 cmpAltCon (DataAlt _) DEFAULT = GT cmpAltCon (LitAlt l1) (LitAlt l2) = l1 `compare` l2 cmpAltCon (LitAlt _) DEFAULT = GT cmpAltCon con1 con2 = pprPanic "cmpAltCon" (ppr con1 $$ ppr con2) {- ************************************************************************ * * \subsection{Useful synonyms} * * ************************************************************************ Note [CoreProgram] ~~~~~~~~~~~~~~~~~~ The top level bindings of a program, a CoreProgram, are represented as a list of CoreBind * Later bindings in the list can refer to earlier ones, but not vice versa. So this is OK NonRec { x = 4 } Rec { p = ...q...x... ; q = ...p...x } Rec { f = ...p..x..f.. } NonRec { g = ..f..q...x.. } But it would NOT be ok for 'f' to refer to 'g'. * The occurrence analyser does strongly-connected component analysis on each Rec binding, and splits it into a sequence of smaller bindings where possible. So the program typically starts life as a single giant Rec, which is then dependency-analysed into smaller chunks. -} -- If you edit this type, you may need to update the GHC formalism -- See Note [GHC Formalism] in GHC.Core.Lint type CoreProgram = [CoreBind] -- See Note [CoreProgram] -- | The common case for the type of binders and variables when -- we are manipulating the Core language within GHC type CoreBndr = Var -- | Expressions where binders are 'CoreBndr's type CoreExpr = Expr CoreBndr -- | Argument expressions where binders are 'CoreBndr's type CoreArg = Arg CoreBndr -- | Binding groups where binders are 'CoreBndr's type CoreBind = Bind CoreBndr -- | Case alternatives where binders are 'CoreBndr's type CoreAlt = Alt CoreBndr {- ************************************************************************ * * \subsection{Tagging} * * ************************************************************************ -} -- | Binders are /tagged/ with a t data TaggedBndr t = TB CoreBndr t -- TB for "tagged binder" type TaggedBind t = Bind (TaggedBndr t) type TaggedExpr t = Expr (TaggedBndr t) type TaggedArg t = Arg (TaggedBndr t) type TaggedAlt t = Alt (TaggedBndr t) instance Outputable b => Outputable (TaggedBndr b) where ppr (TB b l) = char '<' <> ppr b <> comma <> ppr l <> char '>' deTagExpr :: TaggedExpr t -> CoreExpr deTagExpr (Var v) = Var v deTagExpr (Lit l) = Lit l deTagExpr (Type ty) = Type ty deTagExpr (Coercion co) = Coercion co deTagExpr (App e1 e2) = App (deTagExpr e1) (deTagExpr e2) deTagExpr (Lam (TB b _) e) = Lam b (deTagExpr e) deTagExpr (Let bind body) = Let (deTagBind bind) (deTagExpr body) deTagExpr (Case e (TB b _) ty alts) = Case (deTagExpr e) b ty (map deTagAlt alts) deTagExpr (Tick t e) = Tick t (deTagExpr e) deTagExpr (Cast e co) = Cast (deTagExpr e) co deTagBind :: TaggedBind t -> CoreBind deTagBind (NonRec (TB b _) rhs) = NonRec b (deTagExpr rhs) deTagBind (Rec prs) = Rec [(b, deTagExpr rhs) | (TB b _, rhs) <- prs] deTagAlt :: TaggedAlt t -> CoreAlt deTagAlt (Alt con bndrs rhs) = Alt con [b | TB b _ <- bndrs] (deTagExpr rhs) {- ************************************************************************ * * \subsection{Core-constructing functions with checking} * * ************************************************************************ -} -- | Apply a list of argument expressions to a function expression in a nested fashion. Prefer to -- use 'GHC.Core.Make.mkCoreApps' if possible mkApps :: Expr b -> [Arg b] -> Expr b -- | Apply a list of type argument expressions to a function expression in a nested fashion mkTyApps :: Expr b -> [Type] -> Expr b -- | Apply a list of coercion argument expressions to a function expression in a nested fashion mkCoApps :: Expr b -> [Coercion] -> Expr b -- | Apply a list of type or value variables to a function expression in a nested fashion mkVarApps :: Expr b -> [Var] -> Expr b -- | Apply a list of argument expressions to a data constructor in a nested fashion. Prefer to -- use 'GHC.Core.Make.mkCoreConApps' if possible mkConApp :: DataCon -> [Arg b] -> Expr b mkApps f args = foldl' App f args mkCoApps f args = foldl' (\ e a -> App e (Coercion a)) f args mkVarApps f vars = foldl' (\ e a -> App e (varToCoreExpr a)) f vars mkConApp con args = mkApps (Var (dataConWorkId con)) args mkTyApps f args = foldl' (\ e a -> App e (mkTyArg a)) f args mkConApp2 :: DataCon -> [Type] -> [Var] -> Expr b mkConApp2 con tys arg_ids = Var (dataConWorkId con) `mkApps` map Type tys `mkApps` map varToCoreExpr arg_ids mkTyArg :: Type -> Expr b mkTyArg ty | Just co <- isCoercionTy_maybe ty = Coercion co | otherwise = Type ty -- | Create a machine integer literal expression of type @Int#@ from an @Integer@. -- If you want an expression of type @Int@ use 'GHC.Core.Make.mkIntExpr' mkIntLit :: Platform -> Integer -> Expr b mkIntLit platform n = Lit (mkLitInt platform n) -- | Create a machine integer literal expression of type @Int#@ from an -- @Integer@, wrapping if necessary. -- If you want an expression of type @Int@ use 'GHC.Core.Make.mkIntExpr' mkIntLitWrap :: Platform -> Integer -> Expr b mkIntLitWrap platform n = Lit (mkLitIntWrap platform n) -- | Create a machine word literal expression of type @Word#@ from an @Integer@. -- If you want an expression of type @Word@ use 'GHC.Core.Make.mkWordExpr' mkWordLit :: Platform -> Integer -> Expr b mkWordLit platform w = Lit (mkLitWord platform w) -- | Create a machine word literal expression of type @Word#@ from an -- @Integer@, wrapping if necessary. -- If you want an expression of type @Word@ use 'GHC.Core.Make.mkWordExpr' mkWordLitWrap :: Platform -> Integer -> Expr b mkWordLitWrap platform w = Lit (mkLitWordWrap platform w) mkWord8Lit :: Integer -> Expr b mkWord8Lit w = Lit (mkLitWord8 w) mkWord32LitWord32 :: Word32 -> Expr b mkWord32LitWord32 w = Lit (mkLitWord32 (toInteger w)) mkWord64LitWord64 :: Word64 -> Expr b mkWord64LitWord64 w = Lit (mkLitWord64 (toInteger w)) mkInt64LitInt64 :: Int64 -> Expr b mkInt64LitInt64 w = Lit (mkLitInt64 (toInteger w)) -- | Create a machine character literal expression of type @Char#@. -- If you want an expression of type @Char@ use 'GHC.Core.Make.mkCharExpr' mkCharLit :: Char -> Expr b -- | Create a machine string literal expression of type @Addr#@. -- If you want an expression of type @String@ use 'GHC.Core.Make.mkStringExpr' mkStringLit :: String -> Expr b mkCharLit c = Lit (mkLitChar c) mkStringLit s = Lit (mkLitString s) -- | Create a machine single precision literal expression of type @Float#@ from a @Rational@. -- If you want an expression of type @Float@ use 'GHC.Core.Make.mkFloatExpr' mkFloatLit :: Rational -> Expr b -- | Create a machine single precision literal expression of type @Float#@ from a @Float@. -- If you want an expression of type @Float@ use 'GHC.Core.Make.mkFloatExpr' mkFloatLitFloat :: Float -> Expr b mkFloatLit f = Lit (mkLitFloat f) mkFloatLitFloat f = Lit (mkLitFloat (toRational f)) -- | Create a machine double precision literal expression of type @Double#@ from a @Rational@. -- If you want an expression of type @Double@ use 'GHC.Core.Make.mkDoubleExpr' mkDoubleLit :: Rational -> Expr b -- | Create a machine double precision literal expression of type @Double#@ from a @Double@. -- If you want an expression of type @Double@ use 'GHC.Core.Make.mkDoubleExpr' mkDoubleLitDouble :: Double -> Expr b mkDoubleLit d = Lit (mkLitDouble d) mkDoubleLitDouble d = Lit (mkLitDouble (toRational d)) -- | Bind all supplied binding groups over an expression in a nested let expression. Assumes -- that the rhs satisfies the let-can-float invariant. Prefer to use -- 'GHC.Core.Make.mkCoreLets' if possible, which does guarantee the invariant mkLets :: [Bind b] -> Expr b -> Expr b -- | Bind all supplied binders over an expression in a nested lambda expression. Prefer to -- use 'GHC.Core.Make.mkCoreLams' if possible mkLams :: [b] -> Expr b -> Expr b mkLams binders body = foldr Lam body binders mkLets binds body = foldr mkLet body binds mkLet :: Bind b -> Expr b -> Expr b -- The desugarer sometimes generates an empty Rec group -- which Lint rejects, so we kill it off right away mkLet (Rec []) body = body mkLet bind body = Let bind body -- | @mkLetNonRec bndr rhs body@ wraps @body@ in a @let@ binding @bndr@. mkLetNonRec :: b -> Expr b -> Expr b -> Expr b mkLetNonRec b rhs body = Let (NonRec b rhs) body -- | @mkLetRec binds body@ wraps @body@ in a @let rec@ with the given set of -- @binds@ if binds is non-empty. mkLetRec :: [(b, Expr b)] -> Expr b -> Expr b mkLetRec [] body = body mkLetRec bs body = Let (Rec bs) body -- | Create a binding group where a type variable is bound to a type. -- Per Note [Core type and coercion invariant], -- this can only be used to bind something in a non-recursive @let@ expression mkTyBind :: TyVar -> Type -> CoreBind mkTyBind tv ty = NonRec tv (Type ty) -- | Create a binding group where a type variable is bound to a type. -- Per Note [Core type and coercion invariant], -- this can only be used to bind something in a non-recursive @let@ expression mkCoBind :: CoVar -> Coercion -> CoreBind mkCoBind cv co = NonRec cv (Coercion co) -- | Convert a binder into either a 'Var' or 'Type' 'Expr' appropriately varToCoreExpr :: CoreBndr -> Expr b varToCoreExpr v | isTyVar v = Type (mkTyVarTy v) | isCoVar v = Coercion (mkCoVarCo v) | otherwise = assert (isId v) $ Var v varsToCoreExprs :: [CoreBndr] -> [Expr b] varsToCoreExprs vs = map varToCoreExpr vs {- ************************************************************************ * * Getting a result type * * ************************************************************************ These are defined here to avoid a module loop between GHC.Core.Utils and GHC.Core.FVs -} -- | If the expression is a 'Type', converts. Otherwise, -- panics. NB: This does /not/ convert 'Coercion' to 'CoercionTy'. exprToType :: CoreExpr -> Type exprToType (Type ty) = ty exprToType _bad = pprPanic "exprToType" empty {- ************************************************************************ * * \subsection{Simple access functions} * * ************************************************************************ -} -- | Extract every variable by this group bindersOf :: Bind b -> [b] -- If you edit this function, you may need to update the GHC formalism -- See Note [GHC Formalism] in GHC.Core.Lint bindersOf (NonRec binder _) = [binder] bindersOf (Rec pairs) = [binder | (binder, _) <- pairs] -- | 'bindersOf' applied to a list of binding groups bindersOfBinds :: [Bind b] -> [b] bindersOfBinds binds = foldr ((++) . bindersOf) [] binds -- We inline this to avoid unknown function calls. {-# INLINE foldBindersOfBindStrict #-} foldBindersOfBindStrict :: (a -> b -> a) -> a -> Bind b -> a foldBindersOfBindStrict f = \z bind -> case bind of NonRec b _rhs -> f z b Rec pairs -> foldl' f z $ map fst pairs {-# INLINE foldBindersOfBindsStrict #-} foldBindersOfBindsStrict :: (a -> b -> a) -> a -> [Bind b] -> a foldBindersOfBindsStrict f = \z binds -> foldl' fold_bind z binds where fold_bind = (foldBindersOfBindStrict f) rhssOfBind :: Bind b -> [Expr b] rhssOfBind (NonRec _ rhs) = [rhs] rhssOfBind (Rec pairs) = [rhs | (_,rhs) <- pairs] rhssOfAlts :: [Alt b] -> [Expr b] rhssOfAlts alts = [e | Alt _ _ e <- alts] -- | Collapse all the bindings in the supplied groups into a single -- list of lhs\/rhs pairs suitable for binding in a 'Rec' binding group flattenBinds :: [Bind b] -> [(b, Expr b)] flattenBinds (NonRec b r : binds) = (b,r) : flattenBinds binds flattenBinds (Rec prs1 : binds) = prs1 ++ flattenBinds binds flattenBinds [] = [] -- | We often want to strip off leading lambdas before getting down to -- business. Variants are 'collectTyBinders', 'collectValBinders', -- and 'collectTyAndValBinders' collectBinders :: Expr b -> ([b], Expr b) collectTyBinders :: CoreExpr -> ([TyVar], CoreExpr) collectValBinders :: CoreExpr -> ([Id], CoreExpr) collectTyAndValBinders :: CoreExpr -> ([TyVar], [Id], CoreExpr) -- | Strip off exactly N leading lambdas (type or value). -- Good for use with join points. -- Panic if there aren't enough collectNBinders :: JoinArity -> Expr b -> ([b], Expr b) collectBinders expr = go [] expr where go bs (Lam b e) = go (b:bs) e go bs e = (reverse bs, e) collectTyBinders expr = go [] expr where go tvs (Lam b e) | isTyVar b = go (b:tvs) e go tvs e = (reverse tvs, e) collectValBinders expr = go [] expr where go ids (Lam b e) | isId b = go (b:ids) e go ids body = (reverse ids, body) collectTyAndValBinders expr = (tvs, ids, body) where (tvs, body1) = collectTyBinders expr (ids, body) = collectValBinders body1 collectNBinders orig_n orig_expr = go orig_n [] orig_expr where go 0 bs expr = (reverse bs, expr) go n bs (Lam b e) = go (n-1) (b:bs) e go _ _ _ = pprPanic "collectNBinders" $ int orig_n -- | Strip off exactly N leading value lambdas -- returning all the binders found up to that point -- Return Nothing if there aren't enough collectNValBinders_maybe :: Arity -> CoreExpr -> Maybe ([Var], CoreExpr) collectNValBinders_maybe orig_n orig_expr = go orig_n [] orig_expr where go 0 bs expr = Just (reverse bs, expr) go n bs (Lam b e) | isId b = go (n-1) (b:bs) e | otherwise = go n (b:bs) e go _ _ _ = Nothing -- | Takes a nested application expression and returns the function -- being applied and the arguments to which it is applied collectArgs :: Expr b -> (Expr b, [Arg b]) collectArgs expr = go expr [] where go (App f a) as = go f (a:as) go e as = (e, as) -- | Takes a nested application expression and returns the function -- being applied. Looking through casts and ticks to find it. collectFunSimple :: Expr b -> Expr b collectFunSimple expr = go expr where go expr' = case expr' of App f _a -> go f Tick _t e -> go e Cast e _co -> go e e -> e -- | fmap on the body of a lambda. -- wrapLamBody f (\x -> body) == (\x -> f body) wrapLamBody :: (CoreExpr -> CoreExpr) -> CoreExpr -> CoreExpr wrapLamBody f expr = go expr where go (Lam v body) = Lam v $ go body go expr = f expr -- | Attempt to remove the last N arguments of a function call. -- Strip off any ticks or coercions encountered along the way and any -- at the end. stripNArgs :: Word -> Expr a -> Maybe (Expr a) stripNArgs !n (Tick _ e) = stripNArgs n e stripNArgs n (Cast f _) = stripNArgs n f stripNArgs 0 e = Just e stripNArgs n (App f _) = stripNArgs (n - 1) f stripNArgs _ _ = Nothing -- | Like @collectArgs@, but also looks through floatable -- ticks if it means that we can find more arguments. collectArgsTicks :: (CoreTickish -> Bool) -> Expr b -> (Expr b, [Arg b], [CoreTickish]) collectArgsTicks skipTick expr = go expr [] [] where go (App f a) as ts = go f (a:as) ts go (Tick t e) as ts | skipTick t = go e as (t:ts) go e as ts = (e, as, reverse ts) {- ************************************************************************ * * \subsection{Predicates} * * ************************************************************************ At one time we optionally carried type arguments through to runtime. @isRuntimeVar v@ returns if (Lam v _) really becomes a lambda at runtime, i.e. if type applications are actual lambdas because types are kept around at runtime. Similarly isRuntimeArg. -} -- | Will this variable exist at runtime? isRuntimeVar :: Var -> Bool isRuntimeVar = isId -- | Will this argument expression exist at runtime? isRuntimeArg :: CoreExpr -> Bool isRuntimeArg = isValArg -- | Returns @True@ for value arguments, false for type args -- NB: coercions are value arguments (zero width, to be sure, -- like State#, but still value args). isValArg :: Expr b -> Bool isValArg e = not (isTypeArg e) -- | Returns @True@ iff the expression is a 'Type' or 'Coercion' -- expression at its top level isTyCoArg :: Expr b -> Bool isTyCoArg (Type {}) = True isTyCoArg (Coercion {}) = True isTyCoArg _ = False -- | Returns @True@ iff the expression is a 'Coercion' -- expression at its top level isCoArg :: Expr b -> Bool isCoArg (Coercion {}) = True isCoArg _ = False -- | Returns @True@ iff the expression is a 'Type' expression at its -- top level. Note this does NOT include 'Coercion's. isTypeArg :: Expr b -> Bool isTypeArg (Type {}) = True isTypeArg _ = False -- | The number of binders that bind values rather than types valBndrCount :: [CoreBndr] -> Int valBndrCount = count isId -- | The number of argument expressions that are values rather than types at their top level valArgCount :: [Arg b] -> Int valArgCount = count isValArg {- ************************************************************************ * * \subsection{Annotated core} * * ************************************************************************ -} -- | Annotated core: allows annotation at every node in the tree type AnnExpr bndr annot = (annot, AnnExpr' bndr annot) -- | A clone of the 'Expr' type but allowing annotation at every tree node data AnnExpr' bndr annot = AnnVar Id | AnnLit Literal | AnnLam bndr (AnnExpr bndr annot) | AnnApp (AnnExpr bndr annot) (AnnExpr bndr annot) | AnnCase (AnnExpr bndr annot) bndr Type [AnnAlt bndr annot] | AnnLet (AnnBind bndr annot) (AnnExpr bndr annot) | AnnCast (AnnExpr bndr annot) (annot, Coercion) -- Put an annotation on the (root of) the coercion | AnnTick CoreTickish (AnnExpr bndr annot) | AnnType Type | AnnCoercion Coercion -- | A clone of the 'Alt' type but allowing annotation at every tree node data AnnAlt bndr annot = AnnAlt AltCon [bndr] (AnnExpr bndr annot) -- | A clone of the 'Bind' type but allowing annotation at every tree node data AnnBind bndr annot = AnnNonRec bndr (AnnExpr bndr annot) | AnnRec [(bndr, AnnExpr bndr annot)] -- | Takes a nested application expression and returns the function -- being applied and the arguments to which it is applied collectAnnArgs :: AnnExpr b a -> (AnnExpr b a, [AnnExpr b a]) collectAnnArgs expr = go expr [] where go (_, AnnApp f a) as = go f (a:as) go e as = (e, as) collectAnnArgsTicks :: (CoreTickish -> Bool) -> AnnExpr b a -> (AnnExpr b a, [AnnExpr b a], [CoreTickish]) collectAnnArgsTicks tickishOk expr = go expr [] [] where go (_, AnnApp f a) as ts = go f (a:as) ts go (_, AnnTick t e) as ts | tickishOk t = go e as (t:ts) go e as ts = (e, as, reverse ts) deAnnotate :: AnnExpr bndr annot -> Expr bndr deAnnotate (_, e) = deAnnotate' e deAnnotate' :: AnnExpr' bndr annot -> Expr bndr deAnnotate' (AnnType t) = Type t deAnnotate' (AnnCoercion co) = Coercion co deAnnotate' (AnnVar v) = Var v deAnnotate' (AnnLit lit) = Lit lit deAnnotate' (AnnLam binder body) = Lam binder (deAnnotate body) deAnnotate' (AnnApp fun arg) = App (deAnnotate fun) (deAnnotate arg) deAnnotate' (AnnCast e (_,co)) = Cast (deAnnotate e) co deAnnotate' (AnnTick tick body) = Tick tick (deAnnotate body) deAnnotate' (AnnLet bind body) = Let (deAnnBind bind) (deAnnotate body) deAnnotate' (AnnCase scrut v t alts) = Case (deAnnotate scrut) v t (map deAnnAlt alts) deAnnAlt :: AnnAlt bndr annot -> Alt bndr deAnnAlt (AnnAlt con args rhs) = Alt con args (deAnnotate rhs) deAnnBind :: AnnBind b annot -> Bind b deAnnBind (AnnNonRec var rhs) = NonRec var (deAnnotate rhs) deAnnBind (AnnRec pairs) = Rec [(v,deAnnotate rhs) | (v,rhs) <- pairs] -- | As 'collectBinders' but for 'AnnExpr' rather than 'Expr' collectAnnBndrs :: AnnExpr bndr annot -> ([bndr], AnnExpr bndr annot) collectAnnBndrs e = collect [] e where collect bs (_, AnnLam b body) = collect (b:bs) body collect bs body = (reverse bs, body) -- | As 'collectNBinders' but for 'AnnExpr' rather than 'Expr' collectNAnnBndrs :: Int -> AnnExpr bndr annot -> ([bndr], AnnExpr bndr annot) collectNAnnBndrs orig_n e = collect orig_n [] e where collect 0 bs body = (reverse bs, body) collect n bs (_, AnnLam b body) = collect (n-1) (b:bs) body collect _ _ _ = pprPanic "collectNBinders" $ int orig_n ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core.hs-boot0000644000000000000000000000023007346545000020066 0ustar0000000000000000{-# LANGUAGE NoPolyKinds #-} module GHC.Core where import {-# SOURCE #-} GHC.Types.Var data Expr a type CoreBndr = Var type CoreExpr = Expr CoreBndr ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/0000755000000000000000000000000007346545000016576 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/Class.hs0000644000000000000000000003446107346545000020207 0ustar0000000000000000-- (c) The University of Glasgow 2006 -- (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 -- -- The @Class@ datatype module GHC.Core.Class ( Class, ClassOpItem, ClassATItem(..), TyFamEqnValidityInfo(..), ClassMinimalDef, DefMethInfo, pprDefMethInfo, FunDep, pprFundeps, pprFunDep, mkClass, mkAbstractClass, classTyVars, classArity, classKey, className, classATs, classATItems, classTyCon, classMethods, classOpItems, classBigSig, classExtraBigSig, classTvsFds, classSCTheta, classHasSCs, classAllSelIds, classSCSelId, classSCSelIds, classMinimalDef, classHasFds, isAbstractClass, ) where import GHC.Prelude import {-# SOURCE #-} GHC.Core.TyCon ( TyCon ) import {-# SOURCE #-} GHC.Core.TyCo.Rep ( Type, PredType ) import {-# SOURCE #-} GHC.Core.TyCo.Ppr ( pprType ) import GHC.Types.Var import GHC.Types.Name import GHC.Types.Basic import GHC.Types.Unique import GHC.Utils.Misc import GHC.Utils.Panic import GHC.Types.SrcLoc import GHC.Types.Var.Set import GHC.Utils.Outputable import GHC.Data.BooleanFormula (BooleanFormula, mkTrue) import qualified Data.Data as Data {- ************************************************************************ * * \subsection[Class-basic]{@Class@: basic definition} * * ************************************************************************ A @Class@ corresponds to a Greek kappa in the static semantics: -} data Class = Class { classTyCon :: TyCon, -- The data type constructor for -- dictionaries of this class -- See Note [ATyCon for classes] in GHC.Core.TyCo.Rep className :: Name, -- Just the cached name of the TyCon classKey :: Unique, -- Cached unique of TyCon classTyVars :: [TyVar], -- The class kind and type variables; -- identical to those of the TyCon -- If you want visibility info, look at the classTyCon -- This field is redundant because it's duplicated in the -- classTyCon, but classTyVars is used quite often, so maybe -- it's a bit faster to cache it here classFunDeps :: [FunDep TyVar], -- The functional dependencies classBody :: ClassBody -- Superclasses, ATs, methods } -- | e.g. -- -- > class C a b c | a b -> c, a c -> b where... -- -- Here fun-deps are [([a,b],[c]), ([a,c],[b])] type FunDep a = ([a],[a]) type ClassOpItem = (Id, DefMethInfo) -- Selector function; contains unfolding -- Default-method info type DefMethInfo = Maybe (Name, DefMethSpec Type) -- Nothing No default method -- Just ($dm, VanillaDM) A polymorphic default method, name $dm -- Just ($gm, GenericDM ty) A generic default method, name $gm, type ty -- The generic dm type is *not* quantified -- over the class variables; ie has the -- class variables free data ClassATItem = ATI TyCon -- See Note [Associated type tyvar names] (Maybe (Type, TyFamEqnValidityInfo)) -- ^ Default associated type (if any) from this template. -- -- As per Note [Associated type defaults], the Type has been renamed -- to use the class tyvars, while the 'TyFamEqnValidityInfo' uses -- the original user-written type variables. -- | Information about a type family equation, used for validity checking -- of closed type family equations and associated type family default equations. -- -- This type exists to delay validity-checking after typechecking type declaration -- groups, to avoid cyclic evaluation inside the typechecking knot. -- -- See @Note [Type-checking default assoc decls]@ in "GHC.Tc.TyCl". data TyFamEqnValidityInfo -- | Used for equations which don't need any validity checking, -- for example equations imported from another module. = NoVI -- | Information necessary for validity checking of a type family equation. | VI { vi_loc :: SrcSpan , vi_qtvs :: [TcTyVar] -- ^ LHS quantified type variables , vi_non_user_tvs :: TyVarSet -- ^ non-user-written type variables (for error message reporting) -- -- Example: with -XPolyKinds, typechecking @type instance forall a. F = ()@ -- introduces the kind variable @k@ for the kind of @a@. See #23734. , vi_pats :: [Type] -- ^ LHS patterns , vi_rhs :: Type -- ^ RHS of the equation -- -- NB: for associated type family default declarations, this is the RHS -- *before* applying the substitution from -- Note [Type-checking default assoc decls] in GHC.Tc.TyCl. } type ClassMinimalDef = BooleanFormula Name -- Required methods data ClassBody = AbstractClass | ConcreteClass { -- Superclasses: eg: (F a ~ b, F b ~ G a, Eq a, Show b) -- We need value-level selectors for both the dictionary -- superclasses and the equality superclasses cls_sc_theta :: [PredType], -- Immediate superclasses, cls_sc_sel_ids :: [Id], -- Selector functions to extract the -- superclasses from a -- dictionary of this class -- Associated types cls_ats :: [ClassATItem], -- Associated type families -- Class operations (methods, not superclasses) cls_ops :: [ClassOpItem], -- Ordered by tag -- Minimal complete definition cls_min_def :: ClassMinimalDef } -- TODO: maybe super classes should be allowed in abstract class definitions classMinimalDef :: Class -> ClassMinimalDef classMinimalDef Class{ classBody = ConcreteClass{ cls_min_def = d } } = d classMinimalDef _ = mkTrue -- TODO: make sure this is the right direction {- Note [Associated type defaults] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The following is an example of associated type defaults: class C a where data D a r type F x a b :: * type F p q r = (p,q)->r -- Default Note that * The TyCons for the associated types *share type variables* with the class, so that we can tell which argument positions should be instantiated in an instance decl. (The first for 'D', the second for 'F'.) * We can have default definitions only for *type* families, not data families * In the default decl, the "patterns" should all be type variables, but (in the source language) they don't need to be the same as in the 'type' decl signature or the class. It's more like a free-standing 'type instance' declaration. * HOWEVER, in the internal ClassATItem we rename the RHS to match the tyConTyVars of the family TyCon. So in the example above we'd get a ClassATItem of ATI F (Just ((x,a) -> b, validity_info) That is, the type stored in the first component of the pair has been renamed to use the class type variables. On the other hand, the TyFamEqnValidityInfo, used for validity checking of the type family equation (considered as a free-standing equation) uses the original types, e.g. involving the type variables 'p', 'q', 'r'. The @mkClass@ function fills in the indirect superclasses. The SrcSpan is for the entire original declaration. -} mkClass :: Name -> [TyVar] -> [FunDep TyVar] -> [PredType] -> [Id] -> [ClassATItem] -> [ClassOpItem] -> ClassMinimalDef -> TyCon -> Class mkClass cls_name tyvars fds super_classes superdict_sels at_stuff op_stuff mindef tycon = Class { classKey = nameUnique cls_name, className = cls_name, -- NB: tyConName tycon = cls_name, -- But it takes a module loop to assert it here classTyVars = tyvars, classFunDeps = fds, classBody = ConcreteClass { cls_sc_theta = super_classes, cls_sc_sel_ids = superdict_sels, cls_ats = at_stuff, cls_ops = op_stuff, cls_min_def = mindef }, classTyCon = tycon } mkAbstractClass :: Name -> [TyVar] -> [FunDep TyVar] -> TyCon -> Class mkAbstractClass cls_name tyvars fds tycon = Class { classKey = nameUnique cls_name, className = cls_name, -- NB: tyConName tycon = cls_name, -- But it takes a module loop to assert it here classTyVars = tyvars, classFunDeps = fds, classBody = AbstractClass, classTyCon = tycon } {- Note [Associated type tyvar names] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The TyCon of an associated type should use the same variable names as its parent class. Thus class C a b where type F b x a :: * We make F use the same Name for 'a' as C does, and similarly 'b'. The reason for this is when checking instances it's easier to match them up, to ensure they match. Eg instance C Int [d] where type F [d] x Int = .... we should make sure that the first and third args match the instance header. Having the same variables for class and tycon is also used in checkValidRoles (in GHC.Tc.TyCl) when checking a class's roles. ************************************************************************ * * \subsection[Class-selectors]{@Class@: simple selectors} * * ************************************************************************ The rest of these functions are just simple selectors. -} classArity :: Class -> Arity classArity clas = length (classTyVars clas) -- Could memoise this classAllSelIds :: Class -> [Id] -- Both superclass-dictionary and method selectors classAllSelIds c@(Class { classBody = ConcreteClass { cls_sc_sel_ids = sc_sels }}) = sc_sels ++ classMethods c classAllSelIds c = assert (null (classMethods c) ) [] classSCSelIds :: Class -> [Id] -- Both superclass-dictionary and method selectors classSCSelIds (Class { classBody = ConcreteClass { cls_sc_sel_ids = sc_sels }}) = sc_sels classSCSelIds c = assert (null (classMethods c) ) [] classSCSelId :: Class -> Int -> Id -- Get the n'th superclass selector Id -- where n is 0-indexed, and counts -- *all* superclasses including equalities classSCSelId (Class { classBody = ConcreteClass { cls_sc_sel_ids = sc_sels } }) n = assert (n >= 0 && lengthExceeds sc_sels n ) sc_sels !! n classSCSelId c n = pprPanic "classSCSelId" (ppr c <+> ppr n) classMethods :: Class -> [Id] classMethods (Class { classBody = ConcreteClass { cls_ops = op_stuff } }) = [op_sel | (op_sel, _) <- op_stuff] classMethods _ = [] classOpItems :: Class -> [ClassOpItem] classOpItems (Class { classBody = ConcreteClass { cls_ops = op_stuff }}) = op_stuff classOpItems _ = [] classATs :: Class -> [TyCon] classATs (Class { classBody = ConcreteClass { cls_ats = at_stuff } }) = [tc | ATI tc _ <- at_stuff] classATs _ = [] classATItems :: Class -> [ClassATItem] classATItems (Class { classBody = ConcreteClass { cls_ats = at_stuff }}) = at_stuff classATItems _ = [] classSCTheta :: Class -> [PredType] classSCTheta (Class { classBody = ConcreteClass { cls_sc_theta = theta_stuff }}) = theta_stuff classSCTheta _ = [] classHasSCs :: Class -> Bool classHasSCs cls = not (null (classSCTheta cls)) classTvsFds :: Class -> ([TyVar], [FunDep TyVar]) classTvsFds c = (classTyVars c, classFunDeps c) classHasFds :: Class -> Bool classHasFds (Class { classFunDeps = fds }) = not (null fds) classBigSig :: Class -> ([TyVar], [PredType], [Id], [ClassOpItem]) classBigSig (Class {classTyVars = tyvars, classBody = AbstractClass}) = (tyvars, [], [], []) classBigSig (Class {classTyVars = tyvars, classBody = ConcreteClass { cls_sc_theta = sc_theta, cls_sc_sel_ids = sc_sels, cls_ops = op_stuff }}) = (tyvars, sc_theta, sc_sels, op_stuff) classExtraBigSig :: Class -> ([TyVar], [FunDep TyVar], [PredType], [Id], [ClassATItem], [ClassOpItem]) classExtraBigSig (Class {classTyVars = tyvars, classFunDeps = fundeps, classBody = AbstractClass}) = (tyvars, fundeps, [], [], [], []) classExtraBigSig (Class {classTyVars = tyvars, classFunDeps = fundeps, classBody = ConcreteClass { cls_sc_theta = sc_theta, cls_sc_sel_ids = sc_sels, cls_ats = ats, cls_ops = op_stuff }}) = (tyvars, fundeps, sc_theta, sc_sels, ats, op_stuff) isAbstractClass :: Class -> Bool isAbstractClass Class{ classBody = AbstractClass } = True isAbstractClass _ = False {- ************************************************************************ * * \subsection[Class-instances]{Instance declarations for @Class@} * * ************************************************************************ We compare @Classes@ by their keys (which include @Uniques@). -} instance Eq Class where c1 == c2 = classKey c1 == classKey c2 c1 /= c2 = classKey c1 /= classKey c2 instance Uniquable Class where getUnique c = classKey c instance NamedThing Class where getName clas = className clas instance Outputable Class where ppr c = ppr (getName c) pprDefMethInfo :: DefMethInfo -> SDoc pprDefMethInfo Nothing = empty -- No default method pprDefMethInfo (Just (n, VanillaDM)) = text "Default method" <+> ppr n pprDefMethInfo (Just (n, GenericDM ty)) = text "Generic default method" <+> ppr n <+> dcolon <+> pprType ty pprFundeps :: Outputable a => [FunDep a] -> SDoc pprFundeps [] = empty pprFundeps fds = hsep (vbar : punctuate comma (map pprFunDep fds)) pprFunDep :: Outputable a => FunDep a -> SDoc pprFunDep (us, vs) = hsep [interppSP us, arrow, interppSP vs] instance Data.Data Class where -- don't traverse? toConstr _ = abstractConstr "Class" gunfold _ _ = error "gunfold" dataTypeOf _ = mkNoRepType "Class" ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/Coercion.hs0000644000000000000000000033715607346545000020712 0ustar0000000000000000{-# LANGUAGE DeriveFunctor #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE RankNTypes #-} {-# LANGUAGE ScopedTypeVariables #-} {- (c) The University of Glasgow 2006 -} -- | Module for (a) type kinds and (b) type coercions, -- as used in System FC. See 'GHC.Core.Expr' for -- more on System FC and how coercions fit into it. -- module GHC.Core.Coercion ( -- * Main data type Coercion, CoercionN, CoercionR, CoercionP, MCoercion(..), MCoercionN, MCoercionR, CoSel(..), FunSel(..), UnivCoProvenance, CoercionHole(..), coHoleCoVar, setCoHoleCoVar, LeftOrRight(..), Var, CoVar, TyCoVar, Role(..), ltRole, -- ** Functions over coercions coVarRType, coVarLType, coVarTypes, coVarKind, coVarTypesRole, coVarRole, coercionType, mkCoercionType, coercionKind, coercionLKind, coercionRKind,coercionKinds, coercionRole, coercionKindRole, -- ** Constructing coercions mkGReflCo, mkGReflMCo, mkReflCo, mkRepReflCo, mkNomReflCo, mkCoVarCo, mkCoVarCos, mkAxInstCo, mkUnbranchedAxInstCo, mkAxInstRHS, mkUnbranchedAxInstRHS, mkAxInstLHS, mkUnbranchedAxInstLHS, mkPiCo, mkPiCos, mkCoCast, mkSymCo, mkTransCo, mkSelCo, mkSelCoResRole, getNthFun, selectFromType, mkLRCo, mkInstCo, mkAppCo, mkAppCos, mkTyConAppCo, mkFunCo, mkFunCo2, mkFunCoNoFTF, mkFunResCo, mkNakedFunCo, mkNakedForAllCo, mkForAllCo, mkHomoForAllCos, mkPhantomCo, mkHoleCo, mkUnivCo, mkSubCo, mkProofIrrelCo, downgradeRole, mkAxiomCo, mkGReflRightCo, mkGReflLeftCo, mkCoherenceLeftCo, mkCoherenceRightCo, mkKindCo, castCoercionKind, castCoercionKind1, castCoercionKind2, mkPrimEqPred, mkReprPrimEqPred, mkPrimEqPredRole, mkNomPrimEqPred, -- ** Decomposition instNewTyCon_maybe, NormaliseStepper, NormaliseStepResult(..), composeSteppers, unwrapNewTypeStepper, topNormaliseNewType_maybe, topNormaliseTypeX, decomposeCo, decomposeFunCo, decomposePiCos, getCoVar_maybe, splitAppCo_maybe, splitFunCo_maybe, splitForAllCo_maybe, splitForAllCo_ty_maybe, splitForAllCo_co_maybe, tyConRole, tyConRolesX, tyConRolesRepresentational, setNominalRole_maybe, tyConRoleListX, tyConRoleListRepresentational, funRole, pickLR, isGReflCo, isReflCo, isReflCo_maybe, isGReflCo_maybe, isReflexiveCo, isReflexiveCo_maybe, isReflCoVar_maybe, isGReflMCo, mkGReflLeftMCo, mkGReflRightMCo, mkCoherenceRightMCo, coToMCo, mkTransMCo, mkTransMCoL, mkTransMCoR, mkCastTyMCo, mkSymMCo, mkFunResMCo, mkPiMCos, isReflMCo, checkReflexiveMCo, -- ** Coercion variables mkCoVar, isCoVar, coVarName, setCoVarName, setCoVarUnique, -- ** Free variables tyCoVarsOfCo, tyCoVarsOfCos, coVarsOfCo, tyCoFVsOfCo, tyCoFVsOfCos, tyCoVarsOfCoDSet, coercionSize, anyFreeVarsOfCo, -- ** Substitution CvSubstEnv, emptyCvSubstEnv, lookupCoVar, substCo, substCos, substCoVar, substCoVars, substCoWith, substCoVarBndr, extendTvSubstAndInScope, getCvSubstEnv, -- ** Lifting liftCoSubst, liftCoSubstTyVar, liftCoSubstWith, liftCoSubstWithEx, emptyLiftingContext, extendLiftingContext, extendLiftingContextAndInScope, liftCoSubstVarBndrUsing, isMappedByLC, extendLiftingContextCvSubst, mkSubstLiftingContext, liftingContextSubst, zapLiftingContext, substForAllCoBndrUsingLC, lcLookupCoVar, lcInScopeSet, LiftCoEnv, LiftingContext(..), liftEnvSubstLeft, liftEnvSubstRight, substRightCo, substLeftCo, swapLiftCoEnv, lcSubstLeft, lcSubstRight, -- ** Comparison eqCoercion, eqCoercionX, -- ** Forcing evaluation of coercions seqCo, -- * Pretty-printing pprCo, pprParendCo, pprCoAxiom, pprCoAxBranch, pprCoAxBranchLHS, pprCoAxBranchUser, tidyCoAxBndrsForUser, etaExpandCoAxBranch, -- * Tidying tidyCo, tidyCos, -- * Other promoteCoercion, buildCoercion, multToCo, mkRuntimeRepCo, hasCoercionHoleTy, hasCoercionHoleCo, hasThisCoercionHoleTy, setCoHoleType ) where import {-# SOURCE #-} GHC.CoreToIface (toIfaceTyCon, tidyToIfaceTcArgs) import GHC.Prelude import GHC.Iface.Type import GHC.Core.TyCo.Rep import GHC.Core.TyCo.FVs import GHC.Core.TyCo.Ppr import GHC.Core.TyCo.Subst import GHC.Core.TyCo.Tidy import GHC.Core.TyCo.Compare import GHC.Core.Type import GHC.Core.TyCon import GHC.Core.TyCon.RecWalk import GHC.Core.Coercion.Axiom import GHC.Types.Var import GHC.Types.Var.Env import GHC.Types.Var.Set import GHC.Types.Name hiding ( varName ) import GHC.Types.Basic import GHC.Types.Unique import GHC.Data.FastString import GHC.Data.Pair import GHC.Types.SrcLoc import GHC.Builtin.Names import GHC.Builtin.Types.Prim import GHC.Data.List.SetOps import GHC.Data.Maybe import GHC.Types.Unique.FM import GHC.Data.List.Infinite (Infinite (..)) import qualified GHC.Data.List.Infinite as Inf import GHC.Utils.Misc import GHC.Utils.Outputable import GHC.Utils.Panic import Control.Monad (foldM, zipWithM) import Data.Function ( on ) import Data.Char( isDigit ) import qualified Data.Monoid as Monoid import Control.DeepSeq {- %************************************************************************ %* * -- The coercion arguments always *precisely* saturate -- arity of (that branch of) the CoAxiom. If there are -- any left over, we use AppCo. See -- See [Coercion axioms applied to coercions] in GHC.Core.TyCo.Rep \subsection{Coercion variables} %* * %************************************************************************ -} coVarName :: CoVar -> Name coVarName = varName setCoVarUnique :: CoVar -> Unique -> CoVar setCoVarUnique = setVarUnique setCoVarName :: CoVar -> Name -> CoVar setCoVarName = setVarName {- %************************************************************************ %* * Pretty-printing CoAxioms %* * %************************************************************************ Defined here to avoid module loops. CoAxiom is loaded very early on. -} etaExpandCoAxBranch :: CoAxBranch -> ([TyVar], [Type], Type) -- Return the (tvs,lhs,rhs) after eta-expanding, -- to the way in which the axiom was originally written -- See Note [Eta reduction for data families] in GHC.Core.Coercion.Axiom etaExpandCoAxBranch (CoAxBranch { cab_tvs = tvs , cab_eta_tvs = eta_tvs , cab_lhs = lhs , cab_rhs = rhs }) -- ToDo: what about eta_cvs? = (tvs ++ eta_tvs, lhs ++ eta_tys, mkAppTys rhs eta_tys) where eta_tys = mkTyVarTys eta_tvs pprCoAxiom :: CoAxiom br -> SDoc -- Used in debug-printing only pprCoAxiom ax@(CoAxiom { co_ax_tc = tc, co_ax_branches = branches }) = hang (text "axiom" <+> ppr ax) 2 (braces $ vcat (map (pprCoAxBranchUser tc) (fromBranches branches))) pprCoAxBranchUser :: TyCon -> CoAxBranch -> SDoc -- Used when printing injectivity errors (FamInst.reportInjectivityErrors) -- and inaccessible branches (GHC.Tc.Validity.inaccessibleCoAxBranch) -- This happens in error messages: don't print the RHS of a data -- family axiom, which is meaningless to a user pprCoAxBranchUser tc br | isDataFamilyTyCon tc = pprCoAxBranchLHS tc br | otherwise = pprCoAxBranch tc br pprCoAxBranchLHS :: TyCon -> CoAxBranch -> SDoc -- Print the family-instance equation when reporting -- a conflict between equations (FamInst.conflictInstErr) -- For type families the RHS is important; for data families not so. -- Indeed for data families the RHS is a mysterious internal -- type constructor, so we suppress it (#14179) -- See FamInstEnv Note [Family instance overlap conflicts] pprCoAxBranchLHS = ppr_co_ax_branch pp_rhs where pp_rhs _ _ = empty pprCoAxBranch :: TyCon -> CoAxBranch -> SDoc pprCoAxBranch = ppr_co_ax_branch ppr_rhs where ppr_rhs env rhs = equals <+> pprPrecTypeX env topPrec rhs ppr_co_ax_branch :: (TidyEnv -> Type -> SDoc) -> TyCon -> CoAxBranch -> SDoc ppr_co_ax_branch ppr_rhs fam_tc branch = foldr1 (flip hangNotEmpty 2) [ pprUserForAll (mkForAllTyBinders Inferred bndrs') -- See Note [Printing foralls in type family instances] in GHC.Iface.Type , pp_lhs <+> ppr_rhs tidy_env ee_rhs , vcat [ text "-- Defined" <+> pp_loc , ppUnless (null incomps) $ whenPprDebug $ text "-- Incomps:" <+> vcat (map (pprCoAxBranch fam_tc) incomps) ] ] where incomps = coAxBranchIncomps branch loc = coAxBranchSpan branch pp_loc | isGoodSrcSpan loc = text "at" <+> ppr (srcSpanStart loc) | otherwise = text "in" <+> ppr loc -- Eta-expand LHS and RHS types, because sometimes data family -- instances are eta-reduced. -- See Note [Eta reduction for data families] in GHC.Core.Coercion.Axiom. (ee_tvs, ee_lhs, ee_rhs) = etaExpandCoAxBranch branch pp_lhs = pprIfaceTypeApp topPrec (toIfaceTyCon fam_tc) (tidyToIfaceTcArgs tidy_env fam_tc ee_lhs) (tidy_env, bndrs') = tidyCoAxBndrsForUser emptyTidyEnv ee_tvs tidyCoAxBndrsForUser :: TidyEnv -> [Var] -> (TidyEnv, [Var]) -- Tidy wildcards "_1", "_2" to "_", and do not return them -- in the list of binders to be printed -- This is so that in error messages we see -- forall a. F _ [a] _ = ... -- rather than -- forall a _1 _2. F _1 [a] _2 = ... -- -- This is a rather disgusting function -- See Note [Wildcard names] in GHC.Tc.Gen.HsType tidyCoAxBndrsForUser init_env tcvs = (tidy_env, reverse tidy_bndrs) where (tidy_env, tidy_bndrs) = foldl tidy_one (init_env, []) tcvs tidy_one (env@(occ_env, subst), rev_bndrs') bndr | is_wildcard bndr = (env_wild, rev_bndrs') | otherwise = (env', bndr' : rev_bndrs') where (env', bndr') = tidyVarBndr env bndr env_wild = (occ_env, extendVarEnv subst bndr wild_bndr) wild_bndr = setVarName bndr $ tidyNameOcc (varName bndr) (mkTyVarOccFS (fsLit "_")) -- Tidy the binder to "_" is_wildcard :: Var -> Bool is_wildcard tv = case occNameString (getOccName tv) of ('_' : rest) -> all isDigit rest _ -> False {- ********************************************************************* * * MCoercion * * ********************************************************************* -} coToMCo :: Coercion -> MCoercion -- Convert a coercion to a MCoercion, -- It's not clear whether or not isReflexiveCo would be better here -- See #19815 for a bit of data and discussion on this point coToMCo co | isReflCo co = MRefl | otherwise = MCo co checkReflexiveMCo :: MCoercion -> MCoercion checkReflexiveMCo MRefl = MRefl checkReflexiveMCo (MCo co) | isReflexiveCo co = MRefl | otherwise = MCo co -- | Tests if this MCoercion is obviously generalized reflexive -- Guaranteed to work very quickly. isGReflMCo :: MCoercion -> Bool isGReflMCo MRefl = True isGReflMCo (MCo co) | isGReflCo co = True isGReflMCo _ = False -- | Make a generalized reflexive coercion mkGReflCo :: Role -> Type -> MCoercionN -> Coercion mkGReflCo r ty mco | isGReflMCo mco = if r == Nominal then Refl ty else GRefl r ty MRefl | otherwise = -- I'd like to have this assert, but sadly it's not true during type -- inference because the types are not fully zonked -- assertPpr (case mco of -- MCo co -> typeKind ty `eqType` coercionLKind co -- MRefl -> True) -- (vcat [ text "ty" <+> ppr ty <+> dcolon <+> ppr (typeKind ty) -- , case mco of -- MCo co -> text "co" <+> ppr co -- <+> dcolon <+> ppr (coercionKind co) -- MRefl -> text "MRefl" -- , callStackDoc ]) $ GRefl r ty mco mkGReflMCo :: HasDebugCallStack => Role -> Type -> CoercionN -> Coercion mkGReflMCo r ty co = mkGReflCo r ty (MCo co) -- | Compose two MCoercions via transitivity mkTransMCo :: MCoercion -> MCoercion -> MCoercion mkTransMCo MRefl co2 = co2 mkTransMCo co1 MRefl = co1 mkTransMCo (MCo co1) (MCo co2) = MCo (mkTransCo co1 co2) mkTransMCoL :: MCoercion -> Coercion -> MCoercion mkTransMCoL MRefl co2 = coToMCo co2 mkTransMCoL (MCo co1) co2 = MCo (mkTransCo co1 co2) mkTransMCoR :: Coercion -> MCoercion -> MCoercion mkTransMCoR co1 MRefl = coToMCo co1 mkTransMCoR co1 (MCo co2) = MCo (mkTransCo co1 co2) -- | Get the reverse of an 'MCoercion' mkSymMCo :: MCoercion -> MCoercion mkSymMCo MRefl = MRefl mkSymMCo (MCo co) = MCo (mkSymCo co) -- | Cast a type by an 'MCoercion' mkCastTyMCo :: Type -> MCoercion -> Type mkCastTyMCo ty MRefl = ty mkCastTyMCo ty (MCo co) = ty `mkCastTy` co mkPiMCos :: [Var] -> MCoercion -> MCoercion mkPiMCos _ MRefl = MRefl mkPiMCos vs (MCo co) = MCo (mkPiCos Representational vs co) mkFunResMCo :: Id -> MCoercionR -> MCoercionR mkFunResMCo _ MRefl = MRefl mkFunResMCo arg_id (MCo co) = MCo (mkFunResCo Representational arg_id co) mkGReflLeftMCo :: Role -> Type -> MCoercionN -> Coercion mkGReflLeftMCo r ty MRefl = mkReflCo r ty mkGReflLeftMCo r ty (MCo co) = mkGReflLeftCo r ty co mkGReflRightMCo :: Role -> Type -> MCoercionN -> Coercion mkGReflRightMCo r ty MRefl = mkReflCo r ty mkGReflRightMCo r ty (MCo co) = mkGReflRightCo r ty co -- | Like 'mkCoherenceRightCo', but with an 'MCoercion' mkCoherenceRightMCo :: Role -> Type -> MCoercionN -> Coercion -> Coercion mkCoherenceRightMCo _ _ MRefl co2 = co2 mkCoherenceRightMCo r ty (MCo co) co2 = mkCoherenceRightCo r ty co co2 isReflMCo :: MCoercion -> Bool isReflMCo MRefl = True isReflMCo _ = False {- %************************************************************************ %* * Destructing coercions %* * %************************************************************************ -} -- | This breaks a 'Coercion' with type @T A B C ~ T D E F@ into -- a list of 'Coercion's of kinds @A ~ D@, @B ~ E@ and @E ~ F@. Hence: -- -- > decomposeCo 3 c [r1, r2, r3] = [nth r1 0 c, nth r2 1 c, nth r3 2 c] decomposeCo :: Arity -> Coercion -> Infinite Role -- the roles of the output coercions -> [Coercion] decomposeCo arity co rs = [mkSelCo (SelTyCon n r) co | (n,r) <- [0..(arity-1)] `zip` Inf.toList rs ] -- Remember, SelTyCon is zero-indexed decomposeFunCo :: HasDebugCallStack => Coercion -- Input coercion -> (CoercionN, Coercion, Coercion) -- Expects co :: (s1 %m1-> t1) ~ (s2 %m2-> t2) -- Returns (cow :: m1 ~N m2, co1 :: s1~s2, co2 :: t1~t2) -- actually cow will be a Phantom coercion if the input is a Phantom coercion decomposeFunCo (FunCo { fco_mult = w, fco_arg = co1, fco_res = co2 }) = (w, co1, co2) -- Short-circuits the calls to mkSelCo decomposeFunCo co = assertPpr all_ok (ppr co) $ ( mkSelCo (SelFun SelMult) co , mkSelCo (SelFun SelArg) co , mkSelCo (SelFun SelRes) co ) where Pair s1t1 s2t2 = coercionKind co all_ok = isFunTy s1t1 && isFunTy s2t2 {- Note [Pushing a coercion into a pi-type] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Suppose we have this: (f |> co) t1 .. tn Then we want to push the coercion into the arguments, so as to make progress. For example of why you might want to do so, see Note [Respecting definitional equality] in GHC.Core.TyCo.Rep. This is done by decomposePiCos. Specifically, if decomposePiCos co [t1,..,tn] = ([co1,...,cok], cor) then (f |> co) t1 .. tn = (f (t1 |> co1) ... (tk |> cok)) |> cor) t(k+1) ... tn Notes: * k can be smaller than n! That is decomposePiCos can return *fewer* coercions than there are arguments (ie k < n), if the kind provided doesn't have enough binders. * If there is a type error, we might see (f |> co) t1 where co :: (forall a. ty) ~ (ty1 -> ty2) Here 'co' is insoluble, but we don't want to crash in decoposePiCos. So decomposePiCos carefully tests both sides of the coercion to check they are both foralls or both arrows. Not doing this caused #15343. -} decomposePiCos :: HasDebugCallStack => CoercionN -> Pair Type -- Coercion and its kind -> [Type] -> ([CoercionN], CoercionN) -- See Note [Pushing a coercion into a pi-type] decomposePiCos orig_co (Pair orig_k1 orig_k2) orig_args = go [] (orig_subst,orig_k1) orig_co (orig_subst,orig_k2) orig_args where orig_subst = mkEmptySubst $ mkInScopeSet $ tyCoVarsOfTypes orig_args `unionVarSet` tyCoVarsOfCo orig_co go :: [CoercionN] -- accumulator for argument coercions, reversed -> (Subst,Kind) -- Lhs kind of coercion -> CoercionN -- coercion originally applied to the function -> (Subst,Kind) -- Rhs kind of coercion -> [Type] -- Arguments to that function -> ([CoercionN], Coercion) -- Invariant: co :: subst1(k1) ~ subst2(k2) go acc_arg_cos (subst1,k1) co (subst2,k2) (ty:tys) | Just (a, t1) <- splitForAllTyCoVar_maybe k1 , Just (b, t2) <- splitForAllTyCoVar_maybe k2 -- know co :: (forall a:s1.t1) ~ (forall b:s2.t2) -- function :: forall a:s1.t1 (the function is not passed to decomposePiCos) -- a :: s1 -- b :: s2 -- ty :: s2 -- need arg_co :: s2 ~ s1 -- res_co :: t1[ty |> arg_co / a] ~ t2[ty / b] = let arg_co = mkSelCo SelForAll (mkSymCo co) res_co = mkInstCo co (mkGReflLeftCo Nominal ty arg_co) subst1' = extendTCvSubst subst1 a (ty `CastTy` arg_co) subst2' = extendTCvSubst subst2 b ty in go (arg_co : acc_arg_cos) (subst1', t1) res_co (subst2', t2) tys | Just (af1, _w1, _s1, t1) <- splitFunTy_maybe k1 , Just (af2, _w1, _s2, t2) <- splitFunTy_maybe k2 , af1 == af2 -- Same sort of arrow -- know co :: (s1 -> t1) ~ (s2 -> t2) -- function :: s1 -> t1 -- ty :: s2 -- need arg_co :: s2 ~ s1 -- res_co :: t1 ~ t2 = let (_, sym_arg_co, res_co) = decomposeFunCo co -- It should be fine to ignore the multiplicity bit -- of the coercion for a Nominal coercion. arg_co = mkSymCo sym_arg_co in go (arg_co : acc_arg_cos) (subst1,t1) res_co (subst2,t2) tys | not (isEmptyTCvSubst subst1) || not (isEmptyTCvSubst subst2) = go acc_arg_cos (zapSubst subst1, substTy subst1 k1) co (zapSubst subst2, substTy subst1 k2) (ty:tys) -- tys might not be empty, if the left-hand type of the original coercion -- didn't have enough binders go acc_arg_cos _ki1 co _ki2 _tys = (reverse acc_arg_cos, co) -- | Extract a covar, if possible. This check is dirty. Be ashamed -- of yourself. (It's dirty because it cares about the structure of -- a coercion, which is morally reprehensible.) getCoVar_maybe :: Coercion -> Maybe CoVar getCoVar_maybe (CoVarCo cv) = Just cv getCoVar_maybe _ = Nothing multToCo :: Mult -> Coercion multToCo r = mkNomReflCo r -- first result has role equal to input; third result is Nominal splitAppCo_maybe :: Coercion -> Maybe (Coercion, Coercion) -- ^ Attempt to take a coercion application apart. splitAppCo_maybe (AppCo co arg) = Just (co, arg) splitAppCo_maybe (TyConAppCo r tc args) | args `lengthExceeds` tyConArity tc , Just (args', arg') <- snocView args = Just ( mkTyConAppCo r tc args', arg' ) | not (tyConMustBeSaturated tc) -- Never create unsaturated type family apps! , Just (args', arg') <- snocView args , Just arg'' <- setNominalRole_maybe (tyConRole r tc (length args')) arg' = Just ( mkTyConAppCo r tc args', arg'' ) -- Use mkTyConAppCo to preserve the invariant -- that identity coercions are always represented by Refl splitAppCo_maybe co | Just (ty, r) <- isReflCo_maybe co , Just (ty1, ty2) <- splitAppTy_maybe ty = Just (mkReflCo r ty1, mkNomReflCo ty2) splitAppCo_maybe _ = Nothing -- Only used in specialise/Rules splitFunCo_maybe :: Coercion -> Maybe (Coercion, Coercion) splitFunCo_maybe (FunCo { fco_arg = arg, fco_res = res }) = Just (arg, res) splitFunCo_maybe _ = Nothing splitForAllCo_maybe :: Coercion -> Maybe (TyCoVar, ForAllTyFlag, ForAllTyFlag, Coercion, Coercion) splitForAllCo_maybe (ForAllCo { fco_tcv = tv, fco_visL = vL, fco_visR = vR , fco_kind = k_co, fco_body = co }) = Just (tv, vL, vR, k_co, co) splitForAllCo_maybe co | Just (ty, r) <- isReflCo_maybe co , Just (Bndr tcv vis, body_ty) <- splitForAllForAllTyBinder_maybe ty = Just (tcv, vis, vis, mkNomReflCo (varType tcv), mkReflCo r body_ty) splitForAllCo_maybe _ = Nothing -- | Like 'splitForAllCo_maybe', but only returns Just for tyvar binder splitForAllCo_ty_maybe :: Coercion -> Maybe (TyVar, ForAllTyFlag, ForAllTyFlag, Coercion, Coercion) splitForAllCo_ty_maybe co | Just stuff@(tv, _, _, _, _) <- splitForAllCo_maybe co , isTyVar tv = Just stuff splitForAllCo_ty_maybe _ = Nothing -- | Like 'splitForAllCo_maybe', but only returns Just for covar binder splitForAllCo_co_maybe :: Coercion -> Maybe (CoVar, ForAllTyFlag, ForAllTyFlag, Coercion, Coercion) splitForAllCo_co_maybe co | Just stuff@(cv, _, _, _, _) <- splitForAllCo_maybe co , isCoVar cv = Just stuff splitForAllCo_co_maybe _ = Nothing ------------------------------------------------------- -- and some coercion kind stuff coVarLType, coVarRType :: HasDebugCallStack => CoVar -> Type coVarLType cv | (ty1, _, _) <- coVarTypesRole cv = ty1 coVarRType cv | (_, ty2, _) <- coVarTypesRole cv = ty2 coVarTypes :: HasDebugCallStack => CoVar -> Pair Type coVarTypes cv | (ty1, ty2, _) <- coVarTypesRole cv = Pair ty1 ty2 coVarTypesRole :: HasDebugCallStack => CoVar -> (Type,Type,Role) coVarTypesRole cv | Just (tc, [_,_,ty1,ty2]) <- splitTyConApp_maybe (varType cv) = (ty1, ty2, eqTyConRole tc) | otherwise = pprPanic "coVarTypesRole, non coercion variable" (ppr cv $$ ppr (varType cv)) coVarKind :: CoVar -> Type coVarKind cv = assert (isCoVar cv ) varType cv coVarRole :: CoVar -> Role coVarRole cv = eqTyConRole (case tyConAppTyCon_maybe (varType cv) of Just tc0 -> tc0 Nothing -> pprPanic "coVarRole: not tyconapp" (ppr cv)) eqTyConRole :: TyCon -> Role -- Given (~#) or (~R#) return the Nominal or Representational respectively eqTyConRole tc | tc `hasKey` eqPrimTyConKey = Nominal | tc `hasKey` eqReprPrimTyConKey = Representational | otherwise = pprPanic "eqTyConRole: unknown tycon" (ppr tc) -- | Given a coercion `co :: (t1 :: TYPE r1) ~ (t2 :: TYPE r2)` -- produce a coercion `rep_co :: r1 ~ r2` -- But actually it is possible that -- co :: (t1 :: CONSTRAINT r1) ~ (t2 :: CONSTRAINT r2) -- or co :: (t1 :: TYPE r1) ~ (t2 :: CONSTRAINT r2) -- or co :: (t1 :: CONSTRAINT r1) ~ (t2 :: TYPE r2) -- See Note [mkRuntimeRepCo] mkRuntimeRepCo :: HasDebugCallStack => Coercion -> Coercion mkRuntimeRepCo co = assert (isTYPEorCONSTRAINT k1 && isTYPEorCONSTRAINT k2) $ mkSelCo (SelTyCon 0 Nominal) kind_co where kind_co = mkKindCo co -- kind_co :: TYPE r1 ~ TYPE r2 Pair k1 k2 = coercionKind kind_co {- Note [mkRuntimeRepCo] ~~~~~~~~~~~~~~~~~~~~~~~~ Given class C a where { op :: Maybe a } we will get an axiom axC a :: (C a :: CONSTRAINT r1) ~ (Maybe a :: TYPE r2) (See Note [Type and Constraint are not apart] in GHC.Builtin.Types.Prim.) Then we may call mkRuntimeRepCo on (axC ty), and that will return mkSelCo (SelTyCon 0 Nominal) (Kind (axC ty)) :: r1 ~ r2 So mkSelCo needs to be happy with decomposing a coercion of kind CONSTRAINT r1 ~ TYPE r2 Hence the use of `tyConIsTYPEorCONSTRAINT` in the assertion `good_call` in `mkSelCo`. See #23018 for a concrete example. (In this context it's important that TYPE and CONSTRAINT have the same arity and kind, not merely that they are not-apart; otherwise SelCo would not make sense.) -} isReflCoVar_maybe :: Var -> Maybe Coercion -- If cv :: t~t then isReflCoVar_maybe cv = Just (Refl t) -- Works on all kinds of Vars, not just CoVars isReflCoVar_maybe cv | isCoVar cv , Pair ty1 ty2 <- coVarTypes cv , ty1 `eqType` ty2 = Just (mkReflCo (coVarRole cv) ty1) | otherwise = Nothing -- | Tests if this coercion is obviously a generalized reflexive coercion. -- Guaranteed to work very quickly. isGReflCo :: Coercion -> Bool isGReflCo (GRefl{}) = True isGReflCo (Refl{}) = True -- Refl ty == GRefl N ty MRefl isGReflCo _ = False -- | Tests if this coercion is obviously reflexive. Guaranteed to work -- very quickly. Sometimes a coercion can be reflexive, but not obviously -- so. c.f. 'isReflexiveCo' isReflCo :: Coercion -> Bool isReflCo (Refl{}) = True isReflCo (GRefl _ _ mco) | isGReflMCo mco = True isReflCo _ = False -- | Returns the type coerced if this coercion is a generalized reflexive -- coercion. Guaranteed to work very quickly. isGReflCo_maybe :: Coercion -> Maybe (Type, Role) isGReflCo_maybe (GRefl r ty _) = Just (ty, r) isGReflCo_maybe (Refl ty) = Just (ty, Nominal) isGReflCo_maybe _ = Nothing -- | Returns the type coerced if this coercion is reflexive. Guaranteed -- to work very quickly. Sometimes a coercion can be reflexive, but not -- obviously so. c.f. 'isReflexiveCo_maybe' isReflCo_maybe :: Coercion -> Maybe (Type, Role) isReflCo_maybe (Refl ty) = Just (ty, Nominal) isReflCo_maybe (GRefl r ty mco) | isGReflMCo mco = Just (ty, r) isReflCo_maybe _ = Nothing -- | Slowly checks if the coercion is reflexive. Don't call this in a loop, -- as it walks over the entire coercion. isReflexiveCo :: Coercion -> Bool isReflexiveCo = isJust . isReflexiveCo_maybe -- | Extracts the coerced type from a reflexive coercion. This potentially -- walks over the entire coercion, so avoid doing this in a loop. isReflexiveCo_maybe :: Coercion -> Maybe (Type, Role) isReflexiveCo_maybe (Refl ty) = Just (ty, Nominal) isReflexiveCo_maybe (GRefl r ty mco) | isGReflMCo mco = Just (ty, r) isReflexiveCo_maybe co | ty1 `eqType` ty2 = Just (ty1, r) | otherwise = Nothing where (Pair ty1 ty2, r) = coercionKindRole co {- %************************************************************************ %* * Building coercions %* * %************************************************************************ These "smart constructors" maintain the invariants listed in the definition of Coercion, and they perform very basic optimizations. Note [Role twiddling functions] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ There are a plethora of functions for twiddling roles: mkSubCo: Requires a nominal input coercion and always produces a representational output. This is used when you (the programmer) are sure you know exactly that role you have and what you want. downgradeRole_maybe: This function takes both the input role and the output role as parameters. (The *output* role comes first!) It can only *downgrade* a role -- that is, change it from N to R or P, or from R to P. This one-way behavior is why there is the "_maybe". If an upgrade is requested, this function produces Nothing. This is used when you need to change the role of a coercion, but you're not sure (as you're writing the code) of which roles are involved. This function could have been written using coercionRole to ascertain the role of the input. But, that function is recursive, and the caller of downgradeRole_maybe often knows the input role. So, this is more efficient. downgradeRole: This is just like downgradeRole_maybe, but it panics if the conversion isn't a downgrade. setNominalRole_maybe: This is the only function that can *upgrade* a coercion. The result (if it exists) is always Nominal. The input can be at any role. It works on a "best effort" basis, as it should never be strictly necessary to upgrade a coercion during compilation. It is currently only used within GHC in splitAppCo_maybe. In order to be a proper inverse of mkAppCo, the second coercion that splitAppCo_maybe returns must be nominal. But, it's conceivable that splitAppCo_maybe is operating over a TyConAppCo that uses a representational coercion. Hence the need for setNominalRole_maybe. splitAppCo_maybe, in turn, is used only within coercion optimization -- thus, it is not absolutely critical that setNominalRole_maybe be complete. Note that setNominalRole_maybe will never upgrade a phantom UnivCo. Phantom UnivCos are perfectly type-safe, whereas representational and nominal ones are not. (Nominal ones are no worse than representational ones, so this function *will* change a UnivCo Representational to a UnivCo Nominal.) Conal Elliott also came across a need for this function while working with the GHC API, as he was decomposing Core casts. The Core casts use representational coercions, as they must, but his use case required nominal coercions (he was building a GADT). So, that's why this function is exported from this module. One might ask: shouldn't downgradeRole_maybe just use setNominalRole_maybe as appropriate? I (Richard E.) have decided not to do this, because upgrading a role is bizarre and a caller should have to ask for this behavior explicitly. -} -- | Make a reflexive coercion mkReflCo :: Role -> Type -> Coercion mkReflCo Nominal ty = Refl ty mkReflCo r ty = GRefl r ty MRefl -- | Make a representational reflexive coercion mkRepReflCo :: Type -> Coercion mkRepReflCo ty = GRefl Representational ty MRefl -- | Make a nominal reflexive coercion mkNomReflCo :: Type -> Coercion mkNomReflCo = Refl -- | Apply a type constructor to a list of coercions. It is the -- caller's responsibility to get the roles correct on argument coercions. mkTyConAppCo :: HasDebugCallStack => Role -> TyCon -> [Coercion] -> Coercion mkTyConAppCo r tc cos | Just co <- tyConAppFunCo_maybe r tc cos = co -- Expand type synonyms | ExpandsSyn tv_co_prs rhs_ty leftover_cos <- expandSynTyCon_maybe tc cos = mkAppCos (liftCoSubst r (mkLiftingContext tv_co_prs) rhs_ty) leftover_cos | Just tys_roles <- traverse isReflCo_maybe cos = mkReflCo r (mkTyConApp tc (map fst tys_roles)) -- See Note [Refl invariant] | otherwise = TyConAppCo r tc cos mkFunCoNoFTF :: HasDebugCallStack => Role -> CoercionN -> Coercion -> Coercion -> Coercion -- This version of mkFunCo takes no FunTyFlags; it works them out mkFunCoNoFTF r w arg_co res_co = mkFunCo2 r afl afr w arg_co res_co where afl = chooseFunTyFlag argl_ty resl_ty afr = chooseFunTyFlag argr_ty resr_ty Pair argl_ty argr_ty = coercionKind arg_co Pair resl_ty resr_ty = coercionKind res_co -- | Build a function 'Coercion' from two other 'Coercion's. That is, -- given @co1 :: a ~ b@ and @co2 :: x ~ y@ produce @co :: (a -> x) ~ (b -> y)@ -- or @(a => x) ~ (b => y)@, depending on the kind of @a@/@b@. -- This (most common) version takes a single FunTyFlag, which is used -- for both fco_afl and ftf_afr of the FunCo mkFunCo :: Role -> FunTyFlag -> CoercionN -> Coercion -> Coercion -> Coercion mkFunCo r af w arg_co res_co = mkFunCo2 r af af w arg_co res_co mkNakedFunCo :: Role -> FunTyFlag -> CoercionN -> Coercion -> Coercion -> Coercion -- This version of mkFunCo does not check FunCo invariants (checkFunCo) -- It's a historical vestige; See Note [No assertion check on mkFunCo] mkNakedFunCo = mkFunCo mkFunCo2 :: Role -> FunTyFlag -> FunTyFlag -> CoercionN -> Coercion -> Coercion -> Coercion -- This is the smart constructor for FunCo; it checks invariants mkFunCo2 r afl afr w arg_co res_co -- See Note [No assertion check on mkFunCo] | Just (ty1, _) <- isReflCo_maybe arg_co , Just (ty2, _) <- isReflCo_maybe res_co , Just (w, _) <- isReflCo_maybe w = mkReflCo r (mkFunTy afl w ty1 ty2) -- See Note [Refl invariant] | otherwise = FunCo { fco_role = r, fco_afl = afl, fco_afr = afr , fco_mult = w, fco_arg = arg_co, fco_res = res_co } {- Note [No assertion check on mkFunCo] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We used to have a checkFunCo assertion on mkFunCo, but during typechecking we can (legitimately) have not-full-zonked types or coercion variables, so the assertion spuriously fails (test T11480b is a case in point). Lint checks all these things anyway. We used to get around the problem by calling mkNakedFunCo from within the typechecker, which dodged the assertion check. But then mkAppCo calls mkTyConAppCo, which calls tyConAppFunCo_maybe, which calls mkFunCo. Duplicating this stack of calls with "naked" versions of each seems too much. -- Commented out: see Note [No assertion check on mkFunCo] checkFunCo :: Role -> FunTyFlag -> FunTyFlag -> CoercionN -> Coercion -> Coercion -> Maybe SDoc -- Checks well-formed-ness for FunCo -- Used only in assertions and Lint {-# NOINLINE checkFunCo #-} checkFunCo _r afl afr _w arg_co res_co | not (ok argl_ty && ok argr_ty && ok resl_ty && ok resr_ty) = Just (hang (text "Bad arg or res types") 2 pp_inputs) | afl == computed_afl , afr == computed_afr = Nothing | otherwise = Just (vcat [ text "afl (provided,computed):" <+> ppr afl <+> ppr computed_afl , text "afr (provided,computed):" <+> ppr afr <+> ppr computed_afr , pp_inputs ]) where computed_afl = chooseFunTyFlag argl_ty resl_ty computed_afr = chooseFunTyFlag argr_ty resr_ty Pair argl_ty argr_ty = coercionKind arg_co Pair resl_ty resr_ty = coercionKind res_co pp_inputs = vcat [ pp_ty "argl" argl_ty, pp_ty "argr" argr_ty , pp_ty "resl" resl_ty, pp_ty "resr" resr_ty , text "arg_co:" <+> ppr arg_co , text "res_co:" <+> ppr res_co ] ok ty = isTYPEorCONSTRAINT (typeKind ty) pp_ty str ty = text str <> colon <+> hang (ppr ty) 2 (dcolon <+> ppr (typeKind ty)) -} -- | Apply a 'Coercion' to another 'Coercion'. -- The second coercion must be Nominal, unless the first is Phantom. -- If the first is Phantom, then the second can be either Phantom or Nominal. mkAppCo :: Coercion -- ^ :: t1 ~r t2 -> Coercion -- ^ :: s1 ~N s2, where s1 :: k1, s2 :: k2 -> Coercion -- ^ :: t1 s1 ~r t2 s2 mkAppCo co arg | Just (ty1, r) <- isReflCo_maybe co , Just (ty2, _) <- isReflCo_maybe arg = mkReflCo r (mkAppTy ty1 ty2) | Just (ty1, r) <- isReflCo_maybe co , Just (tc, tys) <- splitTyConApp_maybe ty1 -- Expand type synonyms; a TyConAppCo can't have a type synonym (#9102) = mkTyConAppCo r tc (zip_roles (tyConRolesX r tc) tys) where zip_roles (Inf r1 _) [] = [downgradeRole r1 Nominal arg] zip_roles (Inf r1 rs) (ty1:tys) = mkReflCo r1 ty1 : zip_roles rs tys mkAppCo (TyConAppCo r tc args) arg = case r of Nominal -> mkTyConAppCo Nominal tc (args ++ [arg]) Representational -> mkTyConAppCo Representational tc (args ++ [arg']) where new_role = tyConRolesRepresentational tc Inf.!! length args arg' = downgradeRole new_role Nominal arg Phantom -> mkTyConAppCo Phantom tc (args ++ [toPhantomCo arg]) mkAppCo co arg = AppCo co arg -- Note, mkAppCo is careful to maintain invariants regarding -- where Refl constructors appear; see the comments in the definition -- of Coercion and the Note [Refl invariant] in GHC.Core.TyCo.Rep. -- | Applies multiple 'Coercion's to another 'Coercion', from left to right. -- See also 'mkAppCo'. mkAppCos :: Coercion -> [Coercion] -> Coercion mkAppCos co1 cos = foldl' mkAppCo co1 cos -- | Make a Coercion from a tycovar, a kind coercion, and a body coercion. mkForAllCo :: HasDebugCallStack => TyCoVar -> ForAllTyFlag -> ForAllTyFlag -> CoercionN -> Coercion -> Coercion mkForAllCo v visL visR kind_co co | Just (ty, r) <- isReflCo_maybe co , isReflCo kind_co , visL `eqForAllVis` visR = mkReflCo r (mkTyCoForAllTy v visL ty) | otherwise = mkForAllCo_NoRefl v visL visR kind_co co -- | Make a Coercion quantified over a type/coercion variable; -- the variable has the same kind and visibility in both sides of the coercion mkHomoForAllCos :: [ForAllTyBinder] -> Coercion -> Coercion mkHomoForAllCos vs orig_co | Just (ty, r) <- isReflCo_maybe orig_co = mkReflCo r (mkTyCoForAllTys vs ty) | otherwise = foldr go orig_co vs where go (Bndr var vis) co = mkForAllCo_NoRefl var vis vis (mkNomReflCo (varType var)) co -- | Like 'mkForAllCo', but there is no need to check that the inner coercion isn't Refl; -- the caller has done that. (For example, it is guaranteed in 'mkHomoForAllCos'.) -- The kind of the tycovar should be the left-hand kind of the kind coercion. mkForAllCo_NoRefl :: TyCoVar -> ForAllTyFlag -> ForAllTyFlag -> CoercionN -> Coercion -> Coercion mkForAllCo_NoRefl tcv visL visR kind_co co = assertGoodForAllCo tcv visL visR kind_co co $ assertPpr (not (isReflCo co && isReflCo kind_co && visL == visR)) (ppr co) $ ForAllCo { fco_tcv = tcv, fco_visL = visL, fco_visR = visR , fco_kind = kind_co, fco_body = co } assertGoodForAllCo :: HasDebugCallStack => TyCoVar -> ForAllTyFlag -> ForAllTyFlag -> CoercionN -> Coercion -> a -> a -- Check ForAllCo invariants; see Note [ForAllCo] in GHC.Core.TyCo.Rep assertGoodForAllCo tcv visL visR kind_co co | isTyVar tcv = assertPpr (tcv_type `eqType` kind_co_lkind) doc | otherwise = assertPpr (tcv_type `eqType` kind_co_lkind) doc -- The kind of the tycovar should be the left-hand kind of the kind coercion. . assertPpr (almostDevoidCoVarOfCo tcv co) doc -- See (FC6) in Note [ForAllCo] in GHC.Core.TyCo.Rep . assertPpr (visL == coreTyLamForAllTyFlag && visR == coreTyLamForAllTyFlag) doc -- See (FC7) in Note [ForAllCo] in GHC.Core.TyCo.Rep where tcv_type = varType tcv kind_co_lkind = coercionLKind kind_co doc = vcat [ text "Var:" <+> ppr tcv <+> dcolon <+> ppr tcv_type , text "Vis:" <+> ppr visL <+> ppr visR , text "kind_co:" <+> ppr kind_co , text "kind_co_lkind" <+> ppr kind_co_lkind , text "body_co" <+> ppr co ] mkNakedForAllCo :: TyVar -- Never a CoVar -> ForAllTyFlag -> ForAllTyFlag -> CoercionN -> Coercion -> Coercion -- This version lacks the assertion checks. -- Used during type checking when the arguments may (legitimately) not be zonked -- and so the assertions might (bogusly) fail -- NB: since the coercions are un-zonked, we can't really deal with -- (FC6) and (FC7) in Note [ForAllCo] in GHC.Core.TyCo.Rep. -- Fortunately we don't have to: this function is needed only for /type/ variables. mkNakedForAllCo tv visL visR kind_co co | assertPpr (isTyVar tv) (ppr tv) True , Just (ty, r) <- isReflCo_maybe co , isReflCo kind_co , visL `eqForAllVis` visR = mkReflCo r (mkForAllTy (Bndr tv visL) ty) | otherwise = ForAllCo { fco_tcv = tv, fco_visL = visL, fco_visR = visR , fco_kind = kind_co, fco_body = co } mkCoVarCo :: CoVar -> Coercion -- cv :: s ~# t -- See Note [mkCoVarCo] mkCoVarCo cv = CoVarCo cv mkCoVarCos :: [CoVar] -> [Coercion] mkCoVarCos = map mkCoVarCo {- Note [mkCoVarCo] ~~~~~~~~~~~~~~~~~~~ In the past, mkCoVarCo optimised (c :: t~t) to (Refl t). That is valid (although see Note [Unbound RULE binders] in GHC.Core.Rules), but it's a relatively expensive test and perhaps better done in optCoercion. Not a big deal either way. -} mkAxInstCo :: Role -> CoAxiomRule -- Always BranchedAxiom or UnbranchedAxiom -> [Type] -> [Coercion] -> Coercion -- mkAxInstCo can legitimately be called over-saturated; -- i.e. with more type arguments than the coercion requires -- Only called with BranchedAxiom or UnbranchedAxiom mkAxInstCo role axr tys cos | arity == n_tys = downgradeRole role ax_role $ AxiomCo axr (rtys `chkAppend` cos) | otherwise = assert (arity < n_tys) $ downgradeRole role ax_role $ mkAppCos (AxiomCo axr (ax_args `chkAppend` cos)) leftover_args where (ax_role, branch) = case coAxiomRuleBranch_maybe axr of Just (_tc, ax_role, branch) -> (ax_role, branch) Nothing -> pprPanic "mkAxInstCo" (ppr axr) n_tys = length tys arity = length (coAxBranchTyVars branch) arg_roles = coAxBranchRoles branch rtys = zipWith mkReflCo (arg_roles ++ repeat Nominal) tys (ax_args, leftover_args) = splitAt arity rtys -- worker function mkAxiomCo :: CoAxiomRule -> [Coercion] -> Coercion mkAxiomCo = AxiomCo -- to be used only with unbranched axioms mkUnbranchedAxInstCo :: Role -> CoAxiom Unbranched -> [Type] -> [Coercion] -> Coercion mkUnbranchedAxInstCo role ax tys cos = mkAxInstCo role (UnbranchedAxiom ax) tys cos mkAxInstRHS :: CoAxiom br -> BranchIndex -> [Type] -> [Coercion] -> Type -- Instantiate the axiom with specified types, -- returning the instantiated RHS -- A companion to mkAxInstCo: -- mkAxInstRhs ax index tys = snd (coercionKind (mkAxInstCo ax index tys)) mkAxInstRHS ax index tys cos = assert (tvs `equalLength` tys1) $ mkAppTys rhs' tys2 where branch = coAxiomNthBranch ax index tvs = coAxBranchTyVars branch cvs = coAxBranchCoVars branch (tys1, tys2) = splitAtList tvs tys rhs' = substTyWith tvs tys1 $ substTyWithCoVars cvs cos $ coAxBranchRHS branch mkUnbranchedAxInstRHS :: CoAxiom Unbranched -> [Type] -> [Coercion] -> Type mkUnbranchedAxInstRHS ax = mkAxInstRHS ax 0 -- | Return the left-hand type of the axiom, when the axiom is instantiated -- at the types given. mkAxInstLHS :: CoAxiom br -> BranchIndex -> [Type] -> [Coercion] -> Type mkAxInstLHS ax index tys cos = assert (tvs `equalLength` tys1) $ mkTyConApp fam_tc (lhs_tys `chkAppend` tys2) where branch = coAxiomNthBranch ax index tvs = coAxBranchTyVars branch cvs = coAxBranchCoVars branch (tys1, tys2) = splitAtList tvs tys lhs_tys = substTysWith tvs tys1 $ substTysWithCoVars cvs cos $ coAxBranchLHS branch fam_tc = coAxiomTyCon ax -- | Instantiate the left-hand side of an unbranched axiom mkUnbranchedAxInstLHS :: CoAxiom Unbranched -> [Type] -> [Coercion] -> Type mkUnbranchedAxInstLHS ax = mkAxInstLHS ax 0 -- | Make a coercion from a coercion hole mkHoleCo :: CoercionHole -> Coercion mkHoleCo h = HoleCo h -- | Make a universal coercion between two arbitrary types. mkUnivCo :: UnivCoProvenance -> [Coercion] -- ^ Coercions on which this depends -> Role -- ^ role of the built coercion, "r" -> Type -- ^ t1 :: k1 -> Type -- ^ t2 :: k2 -> Coercion -- ^ :: t1 ~r t2 mkUnivCo prov deps role ty1 ty2 | ty1 `eqType` ty2 = mkReflCo role ty1 | otherwise = UnivCo { uco_prov = prov, uco_role = role , uco_lty = ty1, uco_rty = ty2 , uco_deps = deps } -- | Create a symmetric version of the given 'Coercion' that asserts -- equality between the same types but in the other "direction", so -- a kind of @t1 ~ t2@ becomes the kind @t2 ~ t1@. mkSymCo :: Coercion -> Coercion -- Do a few simple optimizations, mainly to expose the underlying -- constructors to other 'mk' functions. E.g. -- mkInstCo (mkSymCo (ForAllCo ...)) ty -- We want to push the SymCo inside the ForallCo, so that we can instantiate -- This can make a big difference. E.g without coercion optimisation, GHC.Read -- totally explodes; but when we push Sym inside ForAll, it's fine. mkSymCo co | isReflCo co = co mkSymCo (SymCo co) = co mkSymCo (SubCo (SymCo co)) = SubCo co mkSymCo co@(ForAllCo { fco_kind = kco, fco_body = body_co }) | isReflCo kco = co { fco_body = mkSymCo body_co } mkSymCo co = SymCo co -- | mkTransCo creates a new 'Coercion' by composing the two -- given 'Coercion's transitively: (co1 ; co2) mkTransCo :: HasDebugCallStack => Coercion -> Coercion -> Coercion mkTransCo co1 co2 | isReflCo co1 = co2 | isReflCo co2 = co1 | GRefl r t1 (MCo kco1) <- co1 , GRefl _ _ (MCo kco2) <- co2 = GRefl r t1 (MCo $ mkTransCo kco1 kco2) | otherwise = TransCo co1 co2 -------------------- {- Note [mkSelCo precondition] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To satisfy the Purely Kinded Type Invariant (PKTI), we require that in any call (mkSelCo cs co) * selectFromType cs (coercionLKind co) works * selectFromType cs (coercionRKind co) works * and hence coercionKind (SelCo cs co) works (PKTI) -} mkSelCo :: HasDebugCallStack => CoSel -> Coercion -> Coercion -- See Note [mkSelCo precondition] mkSelCo n co = mkSelCo_maybe n co `orElse` SelCo n co mkSelCo_maybe :: HasDebugCallStack => CoSel -> Coercion -> Maybe Coercion -- Note [mkSelCo precondition] mkSelCo_maybe cs co = assertPpr (good_call cs) bad_call_msg $ go cs co where go SelForAll (ForAllCo { fco_kind = kind_co }) = Just kind_co -- If co :: (forall a1:k1. t1) ~ (forall a2:k2. t2) -- then (nth SelForAll co :: k1 ~N k2) -- If co :: (forall a1:t1 ~ t2. t1) ~ (forall a2:t3 ~ t4. t2) -- then (nth SelForAll co :: (t1 ~ t2) ~N (t3 ~ t4)) go (SelFun fs) (FunCo _ _ _ w arg res) = Just (getNthFun fs w arg res) go (SelTyCon i r) (TyConAppCo r0 tc arg_cos) = assertPpr (r == tyConRole r0 tc i) (vcat [ ppr tc, ppr arg_cos, ppr r0, ppr i, ppr r ]) $ Just (arg_cos `getNth` i) go cs (SymCo co) -- Recurse, hoping to get to a TyConAppCo or FunCo = do { co' <- go cs co; return (mkSymCo co') } go cs co | Just (ty, co_role) <- isReflCo_maybe co = Just (mkReflCo (mkSelCoResRole cs co_role) (selectFromType cs ty)) -- mkSelCoreResRole: The role of the result may not be -- be equal to co_role, the role of co, per Note [SelCo]. -- This was revealed by #23938. | Pair ty1 ty2 <- coercionKind co , let sty1 = selectFromType cs ty1 sty2 = selectFromType cs ty2 co_role = coercionRole co , sty1 `eqType` sty2 = Just (mkReflCo (mkSelCoResRole cs co_role) sty1) -- Checking for fully reflexive-ness (by seeing if sty1=sty2) -- is worthwhile, because a non-Refl coercion `co` may well have a -- reflexive (SelCo cs co). -- E.g. co :: Either a b ~ Either a c -- Then (SubCo (SelTyCon 0) co) is reflexive | otherwise = Nothing ----------- Assertion checking -------------- -- NB: using coercionKind requires Note [mkSelCo precondition] Pair ty1 ty2 = coercionKind co bad_call_msg = vcat [ text "Coercion =" <+> ppr co , text "LHS ty =" <+> ppr ty1 , text "RHS ty =" <+> ppr ty2 , text "cs =" <+> ppr cs , text "coercion role =" <+> ppr (coercionRole co) ] -- good_call checks the typing rules given in Note [SelCo] good_call SelForAll | Just (_tv1, _) <- splitForAllTyCoVar_maybe ty1 , Just (_tv2, _) <- splitForAllTyCoVar_maybe ty2 = True good_call (SelFun {}) = isFunTy ty1 && isFunTy ty2 good_call (SelTyCon n r) | Just (tc1, tys1) <- splitTyConApp_maybe ty1 , Just (tc2, tys2) <- splitTyConApp_maybe ty2 , let { len1 = length tys1 ; len2 = length tys2 } = (tc1 == tc2 || (tyConIsTYPEorCONSTRAINT tc1 && tyConIsTYPEorCONSTRAINT tc2)) -- tyConIsTYPEorCONSTRAINT: see Note [mkRuntimeRepCo] && len1 == len2 && n < len1 && r == tyConRole (coercionRole co) tc1 n good_call _ = False mkSelCoResRole :: CoSel -> Role -> Role -- What is the role of (SelCo cs co), if co has role 'r'? -- It is not just 'r'! -- c.f. the SelCo case of coercionRole mkSelCoResRole SelForAll _ = Nominal mkSelCoResRole (SelTyCon _ r') _ = r' mkSelCoResRole (SelFun fs) r = funRole r fs -- | Extract the nth field of a FunCo getNthFun :: FunSel -> a -- ^ multiplicity -> a -- ^ argument -> a -- ^ result -> a -- ^ One of the above three getNthFun SelMult mult _ _ = mult getNthFun SelArg _ arg _ = arg getNthFun SelRes _ _ res = res selectFromType :: HasDebugCallStack => CoSel -> Type -> Type selectFromType (SelFun fs) ty | Just (_af, mult, arg, res) <- splitFunTy_maybe ty = getNthFun fs mult arg res selectFromType (SelTyCon n _) ty | Just args <- tyConAppArgs_maybe ty = assertPpr (args `lengthExceeds` n) (ppr n $$ ppr ty) $ args `getNth` n selectFromType SelForAll ty -- Works for both tyvar and covar | Just (tv,_) <- splitForAllTyCoVar_maybe ty = tyVarKind tv selectFromType cs ty = pprPanic "selectFromType" (ppr cs $$ ppr ty) -------------------- mkLRCo :: LeftOrRight -> Coercion -> Coercion mkLRCo lr co | Just (ty, eq) <- isReflCo_maybe co = mkReflCo eq (pickLR lr (splitAppTy ty)) | otherwise = LRCo lr co -- | Instantiates a 'Coercion'. -- Works for both tyvar and covar mkInstCo :: Coercion -> CoercionN -> Coercion mkInstCo co_fun co_arg | Just (tcv, _, _, kind_co, body_co) <- splitForAllCo_maybe co_fun , Just (arg, _) <- isReflCo_maybe co_arg = assertPpr (isReflexiveCo kind_co) (ppr co_fun $$ ppr co_arg) $ -- If the arg is Refl, then kind_co must be reflexive too substCoUnchecked (zipTCvSubst [tcv] [arg]) body_co mkInstCo co arg = InstCo co arg -- | Given @ty :: k1@, @co :: k1 ~ k2@, -- produces @co' :: ty ~r (ty |> co)@ mkGReflRightCo :: Role -> Type -> CoercionN -> Coercion mkGReflRightCo r ty co | isGReflCo co = mkReflCo r ty -- the kinds of @k1@ and @k2@ are the same, thus @isGReflCo@ -- instead of @isReflCo@ | otherwise = mkGReflMCo r ty co -- | Given @r@, @ty :: k1@, and @co :: k1 ~N k2@, -- produces @co' :: (ty |> co) ~r ty@ mkGReflLeftCo :: Role -> Type -> CoercionN -> Coercion mkGReflLeftCo r ty co | isGReflCo co = mkReflCo r ty -- the kinds of @k1@ and @k2@ are the same, thus @isGReflCo@ -- instead of @isReflCo@ | otherwise = mkSymCo $ mkGReflMCo r ty co -- | Given @ty :: k1@, @co :: k1 ~ k2@, @co2:: ty ~r ty'@, -- produces @co' :: (ty |> co) ~r ty' -- It is not only a utility function, but it saves allocation when co -- is a GRefl coercion. mkCoherenceLeftCo :: Role -> Type -> CoercionN -> Coercion -> Coercion mkCoherenceLeftCo r ty co co2 | isGReflCo co = co2 | otherwise = (mkSymCo $ mkGReflMCo r ty co) `mkTransCo` co2 -- | Given @ty :: k1@, @co :: k1 ~ k2@, @co2:: ty' ~r ty@, -- produces @co' :: ty' ~r (ty |> co) -- It is not only a utility function, but it saves allocation when co -- is a GRefl coercion. mkCoherenceRightCo :: HasDebugCallStack => Role -> Type -> CoercionN -> Coercion -> Coercion mkCoherenceRightCo r ty co co2 | isGReflCo co = co2 | otherwise = co2 `mkTransCo` mkGReflMCo r ty co -- | Given @co :: (a :: k) ~ (b :: k')@ produce @co' :: k ~ k'@. mkKindCo :: Coercion -> Coercion mkKindCo co | Just (ty, _) <- isReflCo_maybe co = Refl (typeKind ty) mkKindCo (GRefl _ _ (MCo co)) = co mkKindCo co | Pair ty1 ty2 <- coercionKind co -- Generally, calling coercionKind during coercion creation is a bad idea, -- as it can lead to exponential behavior. But, we don't have nested mkKindCos, -- so it's OK here. , let tk1 = typeKind ty1 tk2 = typeKind ty2 , tk1 `eqType` tk2 = Refl tk1 | otherwise = KindCo co mkSubCo :: HasDebugCallStack => Coercion -> Coercion -- Input coercion is Nominal, result is Representational -- see also Note [Role twiddling functions] mkSubCo (Refl ty) = GRefl Representational ty MRefl mkSubCo (GRefl Nominal ty co) = GRefl Representational ty co mkSubCo (TyConAppCo Nominal tc cos) = TyConAppCo Representational tc (applyRoles tc cos) mkSubCo co@(FunCo { fco_role = Nominal, fco_arg = arg, fco_res = res }) = co { fco_role = Representational , fco_arg = downgradeRole Representational Nominal arg , fco_res = downgradeRole Representational Nominal res } mkSubCo co = assertPpr (coercionRole co == Nominal) (ppr co <+> ppr (coercionRole co)) $ SubCo co -- | Changes a role, but only a downgrade. See Note [Role twiddling functions] downgradeRole_maybe :: Role -- ^ desired role -> Role -- ^ current role -> Coercion -> Maybe Coercion -- In (downgradeRole_maybe dr cr co) it's a precondition that -- cr = coercionRole co downgradeRole_maybe Nominal Nominal co = Just co downgradeRole_maybe Nominal _ _ = Nothing downgradeRole_maybe Representational Nominal co = Just (mkSubCo co) downgradeRole_maybe Representational Representational co = Just co downgradeRole_maybe Representational Phantom _ = Nothing downgradeRole_maybe Phantom Phantom co = Just co downgradeRole_maybe Phantom _ co = Just (toPhantomCo co) -- | Like 'downgradeRole_maybe', but panics if the change isn't a downgrade. -- See Note [Role twiddling functions] downgradeRole :: Role -- desired role -> Role -- current role -> Coercion -> Coercion downgradeRole r1 r2 co = case downgradeRole_maybe r1 r2 co of Just co' -> co' Nothing -> pprPanic "downgradeRole" (ppr co) -- | Make a "coercion between coercions". mkProofIrrelCo :: Role -- ^ role of the created coercion, "r" -> CoercionN -- ^ :: phi1 ~N phi2 -> Coercion -- ^ g1 :: phi1 -> Coercion -- ^ g2 :: phi2 -> Coercion -- ^ :: g1 ~r g2 -- if the two coercion prove the same fact, I just don't care what -- the individual coercions are. mkProofIrrelCo r co g _ | isGReflCo co = mkReflCo r (mkCoercionTy g) -- kco is a kind coercion, thus @isGReflCo@ rather than @isReflCo@ mkProofIrrelCo r kco g1 g2 = mkUnivCo ProofIrrelProv [kco] r (mkCoercionTy g1) (mkCoercionTy g2) {- %************************************************************************ %* * Roles %* * %************************************************************************ -} -- | Converts a coercion to be nominal, if possible. -- See Note [Role twiddling functions] setNominalRole_maybe :: Role -- of input coercion -> Coercion -> Maybe CoercionN setNominalRole_maybe r co | r == Nominal = Just co | otherwise = setNominalRole_maybe_helper co where setNominalRole_maybe_helper (SubCo co) = Just co setNominalRole_maybe_helper co@(Refl _) = Just co setNominalRole_maybe_helper (GRefl _ ty co) = Just $ GRefl Nominal ty co setNominalRole_maybe_helper (TyConAppCo Representational tc cos) = do { cos' <- zipWithM setNominalRole_maybe (tyConRoleListX Representational tc) cos ; return $ TyConAppCo Nominal tc cos' } setNominalRole_maybe_helper co@(FunCo { fco_role = Representational , fco_arg = co1, fco_res = co2 }) = do { co1' <- setNominalRole_maybe Representational co1 ; co2' <- setNominalRole_maybe Representational co2 ; return $ co { fco_role = Nominal, fco_arg = co1', fco_res = co2' } } setNominalRole_maybe_helper (SymCo co) = SymCo <$> setNominalRole_maybe_helper co setNominalRole_maybe_helper (TransCo co1 co2) = TransCo <$> setNominalRole_maybe_helper co1 <*> setNominalRole_maybe_helper co2 setNominalRole_maybe_helper (AppCo co1 co2) = AppCo <$> setNominalRole_maybe_helper co1 <*> pure co2 setNominalRole_maybe_helper co@(ForAllCo { fco_visL = visL, fco_visR = visR, fco_body = body_co }) | visL `eqForAllVis` visR -- See (FC3) in Note [ForAllCo] in GHC.Core.TyCo.Rep = do { body_co' <- setNominalRole_maybe_helper body_co ; return (co { fco_body = body_co' }) } setNominalRole_maybe_helper (SelCo cs co) = -- NB, this case recurses via setNominalRole_maybe, not -- setNominalRole_maybe_helper! case cs of SelTyCon n _r -> -- Remember to update the role in SelTyCon to nominal; -- not doing this caused #23362. -- See the typing rule in Note [SelCo] in GHC.Core.TyCo.Rep. SelCo (SelTyCon n Nominal) <$> setNominalRole_maybe (coercionRole co) co SelFun fs -> SelCo (SelFun fs) <$> setNominalRole_maybe (coercionRole co) co SelForAll -> pprPanic "setNominalRole_maybe: the coercion should already be nominal" (ppr co) setNominalRole_maybe_helper (InstCo co arg) = InstCo <$> setNominalRole_maybe_helper co <*> pure arg setNominalRole_maybe_helper co@(UnivCo { uco_prov = prov }) | case prov of PhantomProv {} -> False -- should always be phantom ProofIrrelProv {} -> True -- it's always safe PluginProv {} -> False -- who knows? This choice is conservative. = Just $ co { uco_role = Nominal } setNominalRole_maybe_helper _ = Nothing -- | Make a phantom coercion between two types. The coercion passed -- in must be a nominal coercion between the kinds of the -- types. mkPhantomCo :: Coercion -> Type -> Type -> Coercion mkPhantomCo h t1 t2 = mkUnivCo PhantomProv [h] Phantom t1 t2 -- takes any coercion and turns it into a Phantom coercion toPhantomCo :: Coercion -> Coercion toPhantomCo co = mkPhantomCo (mkKindCo co) ty1 ty2 where Pair ty1 ty2 = coercionKind co -- Convert args to a TyConAppCo Nominal to the same TyConAppCo Representational applyRoles :: TyCon -> [Coercion] -> [Coercion] applyRoles = zipWith (`downgradeRole` Nominal) . tyConRoleListRepresentational -- The Role parameter is the Role of the TyConAppCo -- defined here because this is intimately concerned with the implementation -- of TyConAppCo -- Always returns an infinite list (with a infinite tail of Nominal) tyConRolesX :: Role -> TyCon -> Infinite Role tyConRolesX Representational tc = tyConRolesRepresentational tc tyConRolesX role _ = Inf.repeat role tyConRoleListX :: Role -> TyCon -> [Role] tyConRoleListX role = Inf.toList . tyConRolesX role -- Returns the roles of the parameters of a tycon, with an infinite tail -- of Nominal tyConRolesRepresentational :: TyCon -> Infinite Role tyConRolesRepresentational tc = tyConRoles tc Inf.++ Inf.repeat Nominal -- Returns the roles of the parameters of a tycon, with an infinite tail -- of Nominal tyConRoleListRepresentational :: TyCon -> [Role] tyConRoleListRepresentational = Inf.toList . tyConRolesRepresentational tyConRole :: Role -> TyCon -> Int -> Role tyConRole Nominal _ _ = Nominal tyConRole Phantom _ _ = Phantom tyConRole Representational tc n = tyConRolesRepresentational tc Inf.!! n funRole :: Role -> FunSel -> Role funRole Nominal _ = Nominal funRole Phantom _ = Phantom funRole Representational fs = funRoleRepresentational fs funRoleRepresentational :: FunSel -> Role funRoleRepresentational SelMult = Nominal funRoleRepresentational SelArg = Representational funRoleRepresentational SelRes = Representational ltRole :: Role -> Role -> Bool -- Is one role "less" than another? -- Nominal < Representational < Phantom ltRole Phantom _ = False ltRole Representational Phantom = True ltRole Representational _ = False ltRole Nominal Nominal = False ltRole Nominal _ = True ------------------------------- -- | like mkKindCo, but aggressively & recursively optimizes to avoid using -- a KindCo constructor. The output role is nominal. promoteCoercion :: HasDebugCallStack => Coercion -> CoercionN -- First cases handles anything that should yield refl. promoteCoercion co = case co of Refl _ -> mkNomReflCo ki1 GRefl _ _ MRefl -> mkNomReflCo ki1 GRefl _ _ (MCo co) -> co _ | ki1 `eqType` ki2 -> mkNomReflCo (typeKind ty1) -- No later branch should return refl -- The assert (False )s throughout -- are these cases explicitly, but they should never fire. TyConAppCo _ tc args | Just co' <- instCoercions (mkNomReflCo (tyConKind tc)) args -> co' | otherwise -> mkKindCo co AppCo co1 arg | Just co' <- instCoercion (coercionKind (mkKindCo co1)) (promoteCoercion co1) arg -> co' | otherwise -> mkKindCo co ForAllCo { fco_tcv = tv, fco_body = g } | isTyVar tv -> promoteCoercion g ForAllCo {} -> assert False $ -- (ForAllCo {} :: (forall cv.t1) ~ (forall cv.t2) -- The tyvar case is handled above, so the bound var is a -- a coercion variable. So both sides have kind Type -- (Note [Weird typing rule for ForAllTy] in GHC.Core.TyCo.Rep). -- So the result is Refl, and that should have been caught by -- the first equation above. Hence `assert False` mkNomReflCo liftedTypeKind FunCo {} -> mkKindCo co -- We can get Type~Constraint or Constraint~Type -- from FunCo {} :: (a -> (b::Type)) ~ (a -=> (b'::Constraint)) CoVarCo {} -> mkKindCo co HoleCo {} -> mkKindCo co AxiomCo {} -> mkKindCo co UnivCo {} -> mkKindCo co -- We could instead return the (single) `uco_deps` coercion in -- the `ProofIrrelProv` and `PhantomProv` cases, but it doesn't -- quite seem worth doing. SymCo g -> mkSymCo (promoteCoercion g) TransCo co1 co2 -> mkTransCo (promoteCoercion co1) (promoteCoercion co2) SelCo n co1 | Just co' <- mkSelCo_maybe n co1 -> promoteCoercion co' | otherwise -> mkKindCo co LRCo lr co1 | Just (lco, rco) <- splitAppCo_maybe co1 -> case lr of CLeft -> promoteCoercion lco CRight -> promoteCoercion rco | otherwise -> mkKindCo co InstCo g _ | isForAllTy_ty ty1 -> assert (isForAllTy_ty ty2) $ promoteCoercion g | otherwise -> assert False $ mkNomReflCo liftedTypeKind -- See Note [Weird typing rule for ForAllTy] in GHC.Core.TyCo.Rep KindCo _ -> assert False $ -- See the first equation above mkNomReflCo liftedTypeKind SubCo g -> promoteCoercion g where Pair ty1 ty2 = coercionKind co ki1 = typeKind ty1 ki2 = typeKind ty2 -- | say @g = promoteCoercion h@. Then, @instCoercion g w@ yields @Just g'@, -- where @g' = promoteCoercion (h w)@. -- fails if this is not possible, if @g@ coerces between a forall and an -> -- or if second parameter has a representational role and can't be used -- with an InstCo. instCoercion :: Pair Type -- g :: lty ~ rty -> CoercionN -- ^ must be nominal -> Coercion -> Maybe CoercionN instCoercion (Pair lty rty) g w | (isForAllTy_ty lty && isForAllTy_ty rty) || (isForAllTy_co lty && isForAllTy_co rty) , Just w' <- setNominalRole_maybe (coercionRole w) w -- g :: (forall t1. t2) ~ (forall t1. t3) -- w :: s1 ~ s2 -- returns mkInstCo g w' :: t2 [t1 |-> s1 ] ~ t3 [t1 |-> s2] = Just $ mkInstCo g w' | isFunTy lty && isFunTy rty -- g :: (t1 -> t2) ~ (t3 -> t4) -- returns t2 ~ t4 = Just $ mkSelCo (SelFun SelRes) g -- extract result type | otherwise -- one forall, one funty... = Nothing -- | Repeated use of 'instCoercion' instCoercions :: CoercionN -> [Coercion] -> Maybe CoercionN instCoercions g ws = let arg_ty_pairs = map coercionKind ws in snd <$> foldM go (coercionKind g, g) (zip arg_ty_pairs ws) where go :: (Pair Type, Coercion) -> (Pair Type, Coercion) -> Maybe (Pair Type, Coercion) go (g_tys, g) (w_tys, w) = do { g' <- instCoercion g_tys g w ; return (piResultTy <$> g_tys <*> w_tys, g') } -- | Creates a new coercion with both of its types casted by different casts -- @castCoercionKind2 g r t1 t2 h1 h2@, where @g :: t1 ~r t2@, -- has type @(t1 |> h1) ~r (t2 |> h2)@. -- @h1@ and @h2@ must be nominal. castCoercionKind2 :: Coercion -> Role -> Type -> Type -> CoercionN -> CoercionN -> Coercion castCoercionKind2 g r t1 t2 h1 h2 = mkCoherenceRightCo r t2 h2 (mkCoherenceLeftCo r t1 h1 g) -- | @castCoercionKind1 g r t1 t2 h@ = @coercionKind g r t1 t2 h h@ -- That is, it's a specialised form of castCoercionKind, where the two -- kind coercions are identical -- @castCoercionKind1 g r t1 t2 h@, where @g :: t1 ~r t2@, -- has type @(t1 |> h) ~r (t2 |> h)@. -- @h@ must be nominal. -- See Note [castCoercionKind1] castCoercionKind1 :: Coercion -> Role -> Type -> Type -> CoercionN -> Coercion castCoercionKind1 g r t1 t2 h = case g of Refl {} -> assert (r == Nominal) $ -- Refl is always Nominal mkNomReflCo (mkCastTy t2 h) GRefl _ _ mco -> case mco of MRefl -> mkReflCo r (mkCastTy t2 h) MCo kind_co -> mkGReflMCo r (mkCastTy t1 h) (mkSymCo h `mkTransCo` kind_co `mkTransCo` h) _ -> castCoercionKind2 g r t1 t2 h h -- | Creates a new coercion with both of its types casted by different casts -- @castCoercionKind g h1 h2@, where @g :: t1 ~r t2@, -- has type @(t1 |> h1) ~r (t2 |> h2)@. -- @h1@ and @h2@ must be nominal. -- It calls @coercionKindRole@, so it's quite inefficient (which 'I' stands for) -- Use @castCoercionKind2@ instead if @t1@, @t2@, and @r@ are known beforehand. castCoercionKind :: Coercion -> CoercionN -> CoercionN -> Coercion castCoercionKind g h1 h2 = castCoercionKind2 g r t1 t2 h1 h2 where (Pair t1 t2, r) = coercionKindRole g mkPiCos :: Role -> [Var] -> Coercion -> Coercion mkPiCos r vs co = foldr (mkPiCo r) co vs -- | Make a forall 'Coercion', where both types related by the coercion -- are quantified over the same variable. mkPiCo :: Role -> Var -> Coercion -> Coercion mkPiCo r v co | isTyVar v = mkHomoForAllCos [Bndr v coreTyLamForAllTyFlag] co | isCoVar v = assert (not (v `elemVarSet` tyCoVarsOfCo co)) $ -- We didn't call mkForAllCo here because if v does not appear -- in co, the argument coercion will be nominal. But here we -- want it to be r. It is only called in 'mkPiCos', which is -- only used in GHC.Core.Opt.Simplify.Utils, where we are sure for -- now (Aug 2018) v won't occur in co. mkFunResCo r v co | otherwise = mkFunResCo r v co mkFunResCo :: Role -> Id -> Coercion -> Coercion -- Given res_co :: res1 ~ res2, -- mkFunResCo r m arg res_co :: (arg -> res1) ~r (arg -> res2) -- Reflexive in the multiplicity argument mkFunResCo role id res_co = mkFunCoNoFTF role mult arg_co res_co where arg_co = mkReflCo role (varType id) mult = multToCo (varMult id) -- mkCoCast (c :: s1 ~?r t1) (g :: (s1 ~?r t1) ~#R (s2 ~?r t2)) :: s2 ~?r t2 -- The first coercion might be lifted or unlifted; thus the ~? above -- Lifted and unlifted equalities take different numbers of arguments, -- so we have to make sure to supply the right parameter to decomposeCo. -- Also, note that the role of the first coercion is the same as the role of -- the equalities related by the second coercion. The second coercion is -- itself always representational. mkCoCast :: Coercion -> CoercionR -> Coercion mkCoCast c g | (g2:g1:_) <- reverse co_list = mkSymCo g1 `mkTransCo` c `mkTransCo` g2 | otherwise = pprPanic "mkCoCast" (ppr g $$ ppr (coercionKind g)) where -- g :: (s1 ~# t1) ~# (s2 ~# t2) -- g1 :: s1 ~# s2 -- g2 :: t1 ~# t2 (tc, _) = splitTyConApp (coercionLKind g) co_list = decomposeCo (tyConArity tc) g (tyConRolesRepresentational tc) {- Note [castCoercionKind1] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ castCoercionKind1 deals with the very important special case of castCoercionKind2 where the two kind coercions are identical. In that case we can exploit the situation where the main coercion is reflexive, via the special cases for Refl and GRefl. This is important when rewriting (ty |> co). We rewrite ty, yielding fco :: ty ~ ty' and now we want a coercion xco between xco :: (ty |> co) ~ (ty' |> co) That's exactly what castCoercionKind1 does. And it's very very common for fco to be Refl. In that case we do NOT want to get some terrible composition of mkLeftCoherenceCo and mkRightCoherenceCo, which is what castCoercionKind2 has to do in its full generality. See #18413. -} {- %************************************************************************ %* * Newtypes %* * %************************************************************************ -} -- | If `instNewTyCon_maybe T ts = Just (rep_ty, co)` -- then `co :: T ts ~R# rep_ty` -- -- Checks for a newtype, and for being saturated instNewTyCon_maybe :: TyCon -> [Type] -> Maybe (Type, Coercion) instNewTyCon_maybe tc tys | Just (tvs, ty, co_tc) <- unwrapNewTyConEtad_maybe tc -- Check for newtype , tvs `leLength` tys -- Check saturated enough = Just (applyTysX tvs ty tys, mkUnbranchedAxInstCo Representational co_tc tys []) | otherwise = Nothing {- ************************************************************************ * * Type normalisation * * ************************************************************************ -} -- | A function to check if we can reduce a type by one step. Used -- with 'topNormaliseTypeX'. type NormaliseStepper ev = RecTcChecker -> TyCon -- tc -> [Type] -- tys -> NormaliseStepResult ev -- | The result of stepping in a normalisation function. -- See 'topNormaliseTypeX'. data NormaliseStepResult ev = NS_Done -- ^ Nothing more to do | NS_Abort -- ^ Utter failure. The outer function should fail too. | NS_Step RecTcChecker Type ev -- ^ We stepped, yielding new bits; -- ^ ev is evidence; -- Usually a co :: old type ~ new type deriving (Functor) instance Outputable ev => Outputable (NormaliseStepResult ev) where ppr NS_Done = text "NS_Done" ppr NS_Abort = text "NS_Abort" ppr (NS_Step _ ty ev) = sep [text "NS_Step", ppr ty, ppr ev] -- | Try one stepper and then try the next, if the first doesn't make -- progress. -- So if it returns NS_Done, it means that both steppers are satisfied composeSteppers :: NormaliseStepper ev -> NormaliseStepper ev -> NormaliseStepper ev composeSteppers step1 step2 rec_nts tc tys = case step1 rec_nts tc tys of success@(NS_Step {}) -> success NS_Done -> step2 rec_nts tc tys NS_Abort -> NS_Abort -- | A 'NormaliseStepper' that unwraps newtypes, careful not to fall into -- a loop. If it would fall into a loop, it produces 'NS_Abort'. unwrapNewTypeStepper :: NormaliseStepper Coercion unwrapNewTypeStepper rec_nts tc tys | Just (ty', co) <- instNewTyCon_maybe tc tys = -- pprTrace "unNS" (ppr tc <+> ppr (getUnique tc) <+> ppr tys $$ ppr ty' $$ ppr rec_nts) $ case checkRecTc rec_nts tc of Just rec_nts' -> NS_Step rec_nts' ty' co Nothing -> NS_Abort | otherwise = NS_Done -- | A general function for normalising the top-level of a type. It continues -- to use the provided 'NormaliseStepper' until that function fails, and then -- this function returns. The roles of the coercions produced by the -- 'NormaliseStepper' must all be the same, which is the role returned from -- the call to 'topNormaliseTypeX'. -- -- Typically ev is Coercion. -- -- If topNormaliseTypeX step plus ty = Just (ev, ty') -- then ty ~ev1~ t1 ~ev2~ t2 ... ~evn~ ty' -- and ev = ev1 `plus` ev2 `plus` ... `plus` evn -- If it returns Nothing then no newtype unwrapping could happen topNormaliseTypeX :: NormaliseStepper ev -> (ev -> ev -> ev) -> Type -> Maybe (ev, Type) topNormaliseTypeX stepper plus ty | Just (tc, tys) <- splitTyConApp_maybe ty -- SPJ: The default threshold for initRecTc is 100 which is extremely dangerous -- for certain type synonyms, we should think about reducing it (see #20990) , NS_Step rec_nts ty' ev <- stepper initRecTc tc tys = go rec_nts ev ty' | otherwise = Nothing where go rec_nts ev ty | Just (tc, tys) <- splitTyConApp_maybe ty = case stepper rec_nts tc tys of NS_Step rec_nts' ty' ev' -> go rec_nts' (ev `plus` ev') ty' NS_Done -> Just (ev, ty) NS_Abort -> Nothing | otherwise = Just (ev, ty) topNormaliseNewType_maybe :: Type -> Maybe (Coercion, Type) -- ^ Sometimes we want to look through a @newtype@ and get its associated coercion. -- This function strips off @newtype@ layers enough to reveal something that isn't -- a @newtype@. Specifically, here's the invariant: -- -- > topNormaliseNewType_maybe rec_nts ty = Just (co, ty') -- -- then (a) @co : ty ~R ty'@. -- (b) ty' is not a newtype. -- -- The function returns @Nothing@ for non-@newtypes@, -- or unsaturated applications -- -- This function does *not* look through type families, because it has no access to -- the type family environment. If you do have that at hand, consider to use -- topNormaliseType_maybe, which should be a drop-in replacement for -- topNormaliseNewType_maybe -- If topNormliseNewType_maybe ty = Just (co, ty'), then co : ty ~R ty' topNormaliseNewType_maybe ty = topNormaliseTypeX unwrapNewTypeStepper mkTransCo ty {- %************************************************************************ %* * Comparison of coercions %* * %************************************************************************ -} -- | Syntactic equality of coercions eqCoercion :: Coercion -> Coercion -> Bool eqCoercion = eqType `on` coercionType -- | Compare two 'Coercion's, with respect to an RnEnv2 eqCoercionX :: RnEnv2 -> Coercion -> Coercion -> Bool eqCoercionX env = eqTypeX env `on` coercionType {- %************************************************************************ %* * "Lifting" substitution [(TyCoVar,Coercion)] -> Type -> Coercion %* * %************************************************************************ Note [Lifting coercions over types: liftCoSubst] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The KPUSH rule deals with this situation data T a = K (a -> Maybe a) g :: T t1 ~ T t2 x :: t1 -> Maybe t1 case (K @t1 x) |> g of K (y:t2 -> Maybe t2) -> rhs We want to push the coercion inside the constructor application. So we do this g' :: t1~t2 = SelCo (SelTyCon 0) g case K @t2 (x |> g' -> Maybe g') of K (y:t2 -> Maybe t2) -> rhs The crucial operation is that we * take the type of K's argument: a -> Maybe a * and substitute g' for a thus giving *coercion*. This is what liftCoSubst does. In the presence of kind coercions, this is a bit of a hairy operation. So, we refer you to the paper introducing kind coercions, available at www.cis.upenn.edu/~sweirich/papers/fckinds-extended.pdf Note [extendLiftingContextEx] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider we have datatype K :: /\k. /\a::k. P -> T k -- P be some type g :: T k1 ~ T k2 case (K @k1 @t1 x) |> g of K y -> rhs We want to push the coercion inside the constructor application. We first get the coercion mapped by the universal type variable k: lc = k |-> SelCo (SelTyCon 0) g :: k1~k2 Here, the important point is that the kind of a is coerced, and P might be dependent on the existential type variable a. Thus we first get the coercion of a's kind g2 = liftCoSubst lc k :: k1 ~ k2 Then we store a new mapping into the lifting context lc2 = a |-> (t1 ~ t1 |> g2), lc So later when we can correctly deal with the argument type P liftCoSubst lc2 P :: P [k|->k1][a|->t1] ~ P[k|->k2][a |-> (t1|>g2)] This is exactly what extendLiftingContextEx does. * For each (tyvar:k, ty) pair, we product the mapping tyvar |-> (ty ~ ty |> (liftCoSubst lc k)) * For each (covar:s1~s2, ty) pair, we produce the mapping covar |-> (co ~ co') co' = Sym (liftCoSubst lc s1) ;; covar ;; liftCoSubst lc s2 :: s1'~s2' This follows the lifting context extension definition in the "FC with Explicit Kind Equality" paper. -} -- ---------------------------------------------------- -- See Note [Lifting coercions over types: liftCoSubst] -- ---------------------------------------------------- data LiftingContext = LC Subst LiftCoEnv -- in optCoercion, we need to lift when optimizing InstCo. -- See Note [Optimising InstCo] in GHC.Core.Coercion.Opt -- We thus propagate the substitution from GHC.Core.Coercion.Opt here. instance Outputable LiftingContext where ppr (LC _ env) = hang (text "LiftingContext:") 2 (ppr env) type LiftCoEnv = VarEnv Coercion -- Maps *type variables* to *coercions*. -- That's the whole point of this function! -- Also maps coercion variables to ProofIrrelCos. -- like liftCoSubstWith, but allows for existentially-bound types as well liftCoSubstWithEx :: Role -- desired role for output coercion -> [TyVar] -- universally quantified tyvars -> [Coercion] -- coercions to substitute for those -> [TyCoVar] -- existentially quantified tycovars -> [Type] -- types and coercions to be bound to ex vars -> (Type -> Coercion, [Type]) -- (lifting function, converted ex args) liftCoSubstWithEx role univs omegas exs rhos = let theta = mkLiftingContext (zipEqual "liftCoSubstWithExU" univs omegas) psi = extendLiftingContextEx theta (zipEqual "liftCoSubstWithExX" exs rhos) in (ty_co_subst psi role, substTys (lcSubstRight psi) (mkTyCoVarTys exs)) liftCoSubstWith :: Role -> [TyCoVar] -> [Coercion] -> Type -> Coercion liftCoSubstWith r tvs cos ty = liftCoSubst r (mkLiftingContext $ zipEqual "liftCoSubstWith" tvs cos) ty -- | @liftCoSubst role lc ty@ produces a coercion (at role @role@) -- that coerces between @lc_left(ty)@ and @lc_right(ty)@, where -- @lc_left@ is a substitution mapping type variables to the left-hand -- types of the mapped coercions in @lc@, and similar for @lc_right@. liftCoSubst :: HasDebugCallStack => Role -> LiftingContext -> Type -> Coercion {-# INLINE liftCoSubst #-} -- Inlining this function is worth 2% of allocation in T9872d, liftCoSubst r lc@(LC subst env) ty | isEmptyVarEnv env = mkReflCo r (substTy subst ty) | otherwise = ty_co_subst lc r ty emptyLiftingContext :: InScopeSet -> LiftingContext emptyLiftingContext in_scope = LC (mkEmptySubst in_scope) emptyVarEnv mkLiftingContext :: [(TyCoVar,Coercion)] -> LiftingContext mkLiftingContext pairs = LC (mkEmptySubst $ mkInScopeSet $ tyCoVarsOfCos (map snd pairs)) (mkVarEnv pairs) mkSubstLiftingContext :: Subst -> LiftingContext mkSubstLiftingContext subst = LC subst emptyVarEnv liftingContextSubst :: LiftingContext -> Subst liftingContextSubst (LC subst _) = subst -- | Extend a lifting context with a new mapping. extendLiftingContext :: LiftingContext -- ^ original LC -> TyCoVar -- ^ new variable to map... -> Coercion -- ^ ...to this lifted version -> LiftingContext -- mappings to reflexive coercions are just substitutions extendLiftingContext (LC subst env) tv arg | Just (ty, _) <- isReflCo_maybe arg = LC (extendTCvSubst subst tv ty) env | otherwise = LC subst (extendVarEnv env tv arg) -- | Extend the substitution component of a lifting context with -- a new binding for a coercion variable. Used during coercion optimisation. extendLiftingContextCvSubst :: LiftingContext -> CoVar -> Coercion -> LiftingContext extendLiftingContextCvSubst (LC subst env) cv co = LC (extendCvSubst subst cv co) env -- | Extend a lifting context with a new mapping, and extend the in-scope set extendLiftingContextAndInScope :: LiftingContext -- ^ Original LC -> TyCoVar -- ^ new variable to map... -> Coercion -- ^ to this coercion -> LiftingContext extendLiftingContextAndInScope (LC subst env) tv co = extendLiftingContext (LC (extendSubstInScopeSet subst (tyCoVarsOfCo co)) env) tv co -- | Extend a lifting context with existential-variable bindings. -- See Note [extendLiftingContextEx] extendLiftingContextEx :: LiftingContext -- ^ original lifting context -> [(TyCoVar,Type)] -- ^ ex. var / value pairs -> LiftingContext -- Note that this is more involved than extendLiftingContext. That function -- takes a coercion to extend with, so it's assumed that the caller has taken -- into account any of the kind-changing stuff worried about here. extendLiftingContextEx lc [] = lc extendLiftingContextEx lc@(LC subst env) ((v,ty):rest) -- This function adds bindings for *Nominal* coercions. Why? Because it -- works with existentially bound variables, which are considered to have -- nominal roles. | isTyVar v = let lc' = LC (subst `extendSubstInScopeSet` tyCoVarsOfType ty) (extendVarEnv env v $ mkGReflRightCo Nominal ty (ty_co_subst lc Nominal (tyVarKind v))) in extendLiftingContextEx lc' rest | CoercionTy co <- ty = -- co :: s1 ~r s2 -- lift_s1 :: s1 ~r s1' -- lift_s2 :: s2 ~r s2' -- kco :: (s1 ~r s2) ~N (s1' ~r s2') assert (isCoVar v) $ let (s1, s2, r) = coVarTypesRole v lift_s1 = ty_co_subst lc r s1 lift_s2 = ty_co_subst lc r s2 kco = mkTyConAppCo Nominal (equalityTyCon r) [ mkKindCo lift_s1, mkKindCo lift_s2 , lift_s1 , lift_s2 ] lc' = LC (subst `extendSubstInScopeSet` tyCoVarsOfCo co) (extendVarEnv env v (mkProofIrrelCo Nominal kco co $ (mkSymCo lift_s1) `mkTransCo` co `mkTransCo` lift_s2)) in extendLiftingContextEx lc' rest | otherwise = pprPanic "extendLiftingContextEx" (ppr v <+> text "|->" <+> ppr ty) -- | Erase the environments in a lifting context zapLiftingContext :: LiftingContext -> LiftingContext zapLiftingContext (LC subst _) = LC (zapSubst subst) emptyVarEnv -- | Like 'substForAllCoBndr', but works on a lifting context substForAllCoBndrUsingLC :: SwapFlag -> (Coercion -> Coercion) -> LiftingContext -> TyCoVar -> Coercion -> (LiftingContext, TyCoVar, Coercion) substForAllCoBndrUsingLC sym sco (LC subst lc_env) tv co = (LC subst' lc_env, tv', co') where (subst', tv', co') = substForAllCoBndrUsing sym sco subst tv co -- | The \"lifting\" operation which substitutes coercions for type -- variables in a type to produce a coercion. -- -- For the inverse operation, see 'liftCoMatch' ty_co_subst :: LiftingContext -> Role -> Type -> Coercion ty_co_subst !lc role ty -- !lc: making this function strict in lc allows callers to -- pass its two components separately, rather than boxing them. -- Unfortunately, Boxity Analysis concludes that we need lc boxed -- because it's used that way in liftCoSubstTyVarBndrUsing. = go role ty where go :: Role -> Type -> Coercion go r ty | Just ty' <- coreView ty = go r ty' go Phantom ty = lift_phantom ty go r (TyVarTy tv) = expectJust "ty_co_subst bad roles" $ liftCoSubstTyVar lc r tv go r (AppTy ty1 ty2) = mkAppCo (go r ty1) (go Nominal ty2) go r (TyConApp tc tys) = mkTyConAppCo r tc (zipWith go (tyConRoleListX r tc) tys) go r (FunTy af w t1 t2) = mkFunCo r af (go Nominal w) (go r t1) (go r t2) go r t@(ForAllTy (Bndr v vis) ty) = let (lc', v', h) = liftCoSubstVarBndr lc v body_co = ty_co_subst lc' r ty in if isTyVar v' || almostDevoidCoVarOfCo v' body_co -- Lifting a ForAllTy over a coercion variable could fail as ForAllCo -- imposes an extra restriction on where a covar can appear. See -- (FC6) of Note [ForAllCo] in GHC.Tc.TyCo.Rep -- We specifically check for this and panic because we know that -- there's a hole in the type system here (see (FC6), and we'd rather -- panic than fall into it. then mkForAllCo v' vis vis h body_co else pprPanic "ty_co_subst: covar is not almost devoid" (ppr t) go r ty@(LitTy {}) = assert (r == Nominal) $ mkNomReflCo ty go r (CastTy ty co) = castCoercionKind (go r ty) (substLeftCo lc co) (substRightCo lc co) go r (CoercionTy co) = mkProofIrrelCo r kco (substLeftCo lc co) (substRightCo lc co) where kco = go Nominal (coercionType co) lift_phantom ty = mkPhantomCo (go Nominal (typeKind ty)) (substTy (lcSubstLeft lc) ty) (substTy (lcSubstRight lc) ty) {- Note [liftCoSubstTyVar] ~~~~~~~~~~~~~~~~~~~~~~~~~ This function can fail if a coercion in the environment is of too low a role. liftCoSubstTyVar is called from two places: in liftCoSubst (naturally), and also in matchAxiom in GHC.Core.Coercion.Opt. From liftCoSubst, the so-called lifting lemma guarantees that the roles work out. If we fail in this case, we really should panic -- something is deeply wrong. But, in matchAxiom, failing is fine. matchAxiom is trying to find a set of coercions that match, but it may fail, and this is healthy behavior. -} -- See Note [liftCoSubstTyVar] liftCoSubstTyVar :: LiftingContext -> Role -> TyVar -> Maybe Coercion liftCoSubstTyVar (LC subst env) r v | Just co_arg <- lookupVarEnv env v = downgradeRole_maybe r (coercionRole co_arg) co_arg | otherwise = Just $ mkReflCo r (substTyVar subst v) {- Note [liftCoSubstVarBndr] ~~~~~~~~~~~~~~~~~~~~~~~~~ callback: 'liftCoSubstVarBndrUsing' needs to be general enough to work in two situations: - in this module, which manipulates 'Coercion's, and - in GHC.Core.FamInstEnv, where we work with 'Reduction's, which contain a coercion as well as a type. To achieve this, we require that the return type of the 'callback' function contain a coercion within it. This is witnessed by the first argument to 'liftCoSubstVarBndrUsing': a getter, which allows us to retrieve the coercion inside the return type. Thus: - in this module, we simply pass 'id' as the getter, - in GHC.Core.FamInstEnv, we pass 'reductionCoercion' as the getter. liftCoSubstTyVarBndrUsing: Given forall tv:k. t We want to get forall (tv:k1) (kind_co :: k1 ~ k2) body_co We lift the kind k to get the kind_co kind_co = ty_co_subst k :: k1 ~ k2 Now in the LiftingContext, we add the new mapping tv |-> (tv :: k1) ~ ((tv |> kind_co) :: k2) liftCoSubstCoVarBndrUsing: Given forall cv:(s1 ~ s2). t We want to get forall (cv:s1'~s2') (kind_co :: (s1'~s2') ~ (t1 ~ t2)) body_co We lift s1 and s2 respectively to get eta1 :: s1' ~ t1 eta2 :: s2' ~ t2 And kind_co = TyConAppCo Nominal (~#) eta1 eta2 Now in the liftingContext, we add the new mapping cv |-> (cv :: s1' ~ s2') ~ ((sym eta1;cv;eta2) :: t1 ~ t2) -} -- See Note [liftCoSubstVarBndr] liftCoSubstVarBndr :: LiftingContext -> TyCoVar -> (LiftingContext, TyCoVar, Coercion) liftCoSubstVarBndr lc tv = liftCoSubstVarBndrUsing id callback lc tv where callback lc' ty' = ty_co_subst lc' Nominal ty' -- the callback must produce a nominal coercion liftCoSubstVarBndrUsing :: (r -> CoercionN) -- ^ coercion getter -> (LiftingContext -> Type -> r) -- ^ callback -> LiftingContext -> TyCoVar -> (LiftingContext, TyCoVar, r) liftCoSubstVarBndrUsing view_co fun lc old_var | isTyVar old_var = liftCoSubstTyVarBndrUsing view_co fun lc old_var | otherwise = liftCoSubstCoVarBndrUsing view_co fun lc old_var -- Works for tyvar binder liftCoSubstTyVarBndrUsing :: (r -> CoercionN) -- ^ coercion getter -> (LiftingContext -> Type -> r) -- ^ callback -> LiftingContext -> TyVar -> (LiftingContext, TyVar, r) liftCoSubstTyVarBndrUsing view_co fun lc@(LC subst cenv) old_var = assert (isTyVar old_var) $ ( LC (subst `extendSubstInScope` new_var) new_cenv , new_var, stuff ) where old_kind = tyVarKind old_var stuff = fun lc old_kind eta = view_co stuff k1 = coercionLKind eta new_var = uniqAway (getSubstInScope subst) (setVarType old_var k1) lifted = mkGReflRightCo Nominal (TyVarTy new_var) eta -- :: new_var ~ new_var |> eta new_cenv = extendVarEnv cenv old_var lifted -- Works for covar binder liftCoSubstCoVarBndrUsing :: (r -> CoercionN) -- ^ coercion getter -> (LiftingContext -> Type -> r) -- ^ callback -> LiftingContext -> CoVar -> (LiftingContext, CoVar, r) liftCoSubstCoVarBndrUsing view_co fun lc@(LC subst cenv) old_var = assert (isCoVar old_var) $ ( LC (subst `extendSubstInScope` new_var) new_cenv , new_var, stuff ) where old_kind = coVarKind old_var stuff = fun lc old_kind eta = view_co stuff k1 = coercionLKind eta new_var = uniqAway (getSubstInScope subst) (setVarType old_var k1) -- old_var :: s1 ~r s2 -- eta :: (s1' ~r s2') ~N (t1 ~r t2) -- eta1 :: s1' ~r t1 -- eta2 :: s2' ~r t2 -- co1 :: s1' ~r s2' -- co2 :: t1 ~r t2 -- lifted :: co1 ~N co2 role = coVarRole old_var eta' = downgradeRole role Nominal eta eta1 = mkSelCo (SelTyCon 2 role) eta' eta2 = mkSelCo (SelTyCon 3 role) eta' co1 = mkCoVarCo new_var co2 = mkSymCo eta1 `mkTransCo` co1 `mkTransCo` eta2 lifted = mkProofIrrelCo Nominal eta co1 co2 new_cenv = extendVarEnv cenv old_var lifted -- | Is a var in the domain of a lifting context? isMappedByLC :: TyCoVar -> LiftingContext -> Bool isMappedByLC tv (LC _ env) = tv `elemVarEnv` env -- If [a |-> g] is in the substitution and g :: t1 ~ t2, substitute a for t1 -- If [a |-> (g1, g2)] is in the substitution, substitute a for g1 substLeftCo :: LiftingContext -> Coercion -> Coercion substLeftCo lc co = substCo (lcSubstLeft lc) co -- Ditto, but for t2 and g2 substRightCo :: LiftingContext -> Coercion -> Coercion substRightCo lc co = substCo (lcSubstRight lc) co -- | Apply "sym" to all coercions in a 'LiftCoEnv' swapLiftCoEnv :: LiftCoEnv -> LiftCoEnv swapLiftCoEnv = mapVarEnv mkSymCo lcSubstLeft :: LiftingContext -> Subst lcSubstLeft (LC subst lc_env) = liftEnvSubstLeft subst lc_env lcSubstRight :: LiftingContext -> Subst lcSubstRight (LC subst lc_env) = liftEnvSubstRight subst lc_env liftEnvSubstLeft :: Subst -> LiftCoEnv -> Subst liftEnvSubstLeft = liftEnvSubst pFst liftEnvSubstRight :: Subst -> LiftCoEnv -> Subst liftEnvSubstRight = liftEnvSubst pSnd liftEnvSubst :: (forall a. Pair a -> a) -> Subst -> LiftCoEnv -> Subst liftEnvSubst selector subst lc_env = composeTCvSubst (Subst in_scope emptyIdSubstEnv tenv cenv) subst where pairs = nonDetUFMToList lc_env -- It's OK to use nonDetUFMToList here because we -- immediately forget the ordering by creating -- a VarEnv (tpairs, cpairs) = partitionWith ty_or_co pairs -- Make sure the in-scope set is wide enough to cover the range of the -- substitution (#22235). in_scope = mkInScopeSet $ tyCoVarsOfTypes (map snd tpairs) `unionVarSet` tyCoVarsOfCos (map snd cpairs) tenv = mkVarEnv_Directly tpairs cenv = mkVarEnv_Directly cpairs ty_or_co :: (Unique, Coercion) -> Either (Unique, Type) (Unique, Coercion) ty_or_co (u, co) | Just equality_co <- isCoercionTy_maybe equality_ty = Right (u, equality_co) | otherwise = Left (u, equality_ty) where equality_ty = selector (coercionKind co) -- | Lookup a 'CoVar' in the substitution in a 'LiftingContext' lcLookupCoVar :: LiftingContext -> CoVar -> Maybe Coercion lcLookupCoVar (LC subst _) cv = lookupCoVar subst cv -- | Get the 'InScopeSet' from a 'LiftingContext' lcInScopeSet :: LiftingContext -> InScopeSet lcInScopeSet (LC subst _) = getSubstInScope subst {- %************************************************************************ %* * Sequencing on coercions %* * %************************************************************************ -} seqMCo :: MCoercion -> () seqMCo MRefl = () seqMCo (MCo co) = seqCo co seqCo :: Coercion -> () seqCo (Refl ty) = seqType ty seqCo (GRefl r ty mco) = r `seq` seqType ty `seq` seqMCo mco seqCo (TyConAppCo r tc cos) = r `seq` tc `seq` seqCos cos seqCo (AppCo co1 co2) = seqCo co1 `seq` seqCo co2 seqCo (CoVarCo cv) = cv `seq` () seqCo (HoleCo h) = coHoleCoVar h `seq` () seqCo (SymCo co) = seqCo co seqCo (TransCo co1 co2) = seqCo co1 `seq` seqCo co2 seqCo (SelCo n co) = n `seq` seqCo co seqCo (LRCo lr co) = lr `seq` seqCo co seqCo (InstCo co arg) = seqCo co `seq` seqCo arg seqCo (KindCo co) = seqCo co seqCo (SubCo co) = seqCo co seqCo (AxiomCo _ cs) = seqCos cs seqCo (ForAllCo tv visL visR k co) = seqType (varType tv) `seq` rnf visL `seq` rnf visR `seq` seqCo k `seq` seqCo co seqCo (FunCo r af1 af2 w co1 co2) = r `seq` af1 `seq` af2 `seq` seqCo w `seq` seqCo co1 `seq` seqCo co2 seqCo (UnivCo { uco_prov = p, uco_role = r , uco_lty = t1, uco_rty = t2, uco_deps = deps }) = p `seq` r `seq` seqType t1 `seq` seqType t2 `seq` seqCos deps seqCos :: [Coercion] -> () seqCos [] = () seqCos (co:cos) = seqCo co `seq` seqCos cos {- %************************************************************************ %* * The kind of a type, and of a coercion %* * %************************************************************************ -} {- Note [coercionKind performance] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ coercionKind, coercionLKind, and coercionRKind are very "hot" functions; in some coercion-heavy programs they can have a material effect on compile time/allocation. Hence * Rather than making one function which returns a pair (lots of allocation and de-allocation) we have two functions, coercionLKind and coercionRKind, which return the left and right kind respectively. * Both are defined by a single worker function `coercion_lr_kind`, which takes a flag of type `LeftOrRight`. This worker function is marked INLINE, and inlined at its precisely-two call-sites in coercionLKind and coercionRKind. Take care when making changes here... it's easy to accidentally add allocation! -} -- | Apply 'coercionKind' to multiple 'Coercion's coercionKinds :: [Coercion] -> Pair [Type] coercionKinds tys = sequenceA $ map coercionKind tys -- | Get a coercion's kind and role. coercionKindRole :: Coercion -> (Pair Type, Role) coercionKindRole co = (coercionKind co, coercionRole co) coercionType :: Coercion -> Type coercionType co = case coercionKindRole co of (Pair ty1 ty2, r) -> mkCoercionType r ty1 ty2 ------------------ -- | If it is the case that -- -- > c :: (t1 ~ t2) -- -- i.e. the kind of @c@ relates @t1@ and @t2@, then @coercionKind c = Pair t1 t2@. coercionKind :: HasDebugCallStack => Coercion -> Pair Type -- See Note [coercionKind performance] coercionKind co = Pair (coercionLKind co) (coercionRKind co) coercionLKind, coercionRKind :: HasDebugCallStack => Coercion -> Type -- See Note [coercionKind performance] coercionLKind co = coercion_lr_kind CLeft co coercionRKind co = coercion_lr_kind CRight co coercion_lr_kind :: HasDebugCallStack => LeftOrRight -> Coercion -> Type {-# INLINE coercion_lr_kind #-} -- See Note [coercionKind performance] coercion_lr_kind which orig_co = go orig_co where go (Refl ty) = ty go (GRefl _ ty MRefl) = ty go (GRefl _ ty (MCo co1)) = pickLR which (ty, mkCastTy ty co1) go (TyConAppCo _ tc cos) = mkTyConApp tc (map go cos) go (AppCo co1 co2) = mkAppTy (go co1) (go co2) go (CoVarCo cv) = go_covar cv go (HoleCo h) = go_covar (coHoleCoVar h) go (SymCo co) = pickLR which (coercionRKind co, coercionLKind co) go (TransCo co1 co2) = pickLR which (go co1, go co2) go (LRCo lr co) = pickLR lr (splitAppTy (go co)) go (InstCo aco arg) = go_app aco [go arg] go (KindCo co) = typeKind (go co) go (SubCo co) = go co go (SelCo d co) = selectFromType d (go co) go (AxiomCo ax cos) = go_ax ax cos go (UnivCo { uco_lty = lty, uco_rty = rty}) = pickLR which (lty, rty) go (FunCo { fco_afl = afl, fco_afr = afr, fco_mult = mult , fco_arg = arg, fco_res = res}) = -- See Note [FunCo] FunTy { ft_af = pickLR which (afl, afr), ft_mult = go mult , ft_arg = go arg, ft_res = go res } go co@(ForAllCo { fco_tcv = tv1, fco_visL = visL, fco_visR = visR , fco_kind = k_co, fco_body = co1 }) = case which of CLeft -> mkTyCoForAllTy tv1 visL (go co1) CRight | isGReflCo k_co -- kind_co always has kind `Type`, thus `isGReflCo` -> mkTyCoForAllTy tv1 visR (go co1) | otherwise -> go_forall_right empty_subst co where empty_subst = mkEmptySubst (mkInScopeSet $ tyCoVarsOfCo co) ------------- go_covar cv = pickLR which (coVarLType cv, coVarRType cv) ------------- go_app :: Coercion -> [Type] -> Type -- Collect up all the arguments and apply all at once -- See Note [Nested InstCos] go_app (InstCo co arg) args = go_app co (go arg:args) go_app co args = piResultTys (go co) args ------------- go_ax axr@(BuiltInFamRew bif) cos = check_bif_res axr (bifrw_proves bif (map coercionKind cos)) go_ax axr@(BuiltInFamInj bif) [co] = check_bif_res axr (bifinj_proves bif (coercionKind co)) go_ax axr@(BuiltInFamInj {}) _ = crash axr go_ax (UnbranchedAxiom ax) cos = go_branch ax (coAxiomSingleBranch ax) cos go_ax (BranchedAxiom ax i) cos = go_branch ax (coAxiomNthBranch ax i) cos ------------- check_bif_res _ (Just (Pair lhs rhs)) = pickLR which (lhs,rhs) check_bif_res axr Nothing = crash axr crash :: CoAxiomRule -> Type crash axr = pprPanic "coercionKind" (ppr axr) ------------- go_branch :: CoAxiom br -> CoAxBranch -> [Coercion] -> Type go_branch ax (CoAxBranch { cab_tvs = tvs, cab_cvs = cvs , cab_lhs = lhs_tys, cab_rhs = rhs_ty }) cos = assert (cos `equalLength` tcvs) $ -- Invariant of AxiomRuleCo: cos should -- exactly saturate the axiom branch let (tys1, cotys1) = splitAtList tvs tys cos1 = map stripCoercionTy cotys1 in -- You might think to use -- substTy (zipTCvSubst tcvs ltys) (pickLR ...) -- but #25066 makes it much less efficient than the silly calls below substTyWith tvs tys1 $ substTyWithCoVars cvs cos1 $ pickLR which (mkTyConApp tc lhs_tys, rhs_ty) where tc = coAxiomTyCon ax tcvs | null cvs = tvs -- Very common case (currently always!) | otherwise = tvs ++ cvs tys = map go cos ------------- go_forall_right subst (ForAllCo { fco_tcv = tv1, fco_visR = visR , fco_kind = k_co, fco_body = co }) -- See Note [Nested ForAllCos] | isTyVar tv1 = mkForAllTy (Bndr tv2 visR) (go_forall_right subst' co) where k2 = coercionRKind k_co tv2 = setTyVarKind tv1 (substTy subst k2) subst' | isGReflCo k_co = extendSubstInScope subst tv1 -- kind_co always has kind @Type@, thus @isGReflCo@ | otherwise = extendTvSubst (extendSubstInScope subst tv2) tv1 $ TyVarTy tv2 `mkCastTy` mkSymCo k_co go_forall_right subst (ForAllCo { fco_tcv = cv1, fco_visR = visR , fco_kind = k_co, fco_body = co }) | isCoVar cv1 = mkTyCoForAllTy cv2 visR (go_forall_right subst' co) where k2 = coercionRKind k_co r = coVarRole cv1 k_co' = downgradeRole r Nominal k_co eta1 = mkSelCo (SelTyCon 2 r) k_co' eta2 = mkSelCo (SelTyCon 3 r) k_co' -- k_co :: (t1 ~r t2) ~N (s1 ~r s2) -- k1 = t1 ~r t2 -- k2 = s1 ~r s2 -- cv1 :: t1 ~r t2 -- cv2 :: s1 ~r s2 -- eta1 :: t1 ~r s1 -- eta2 :: t2 ~r s2 -- n_subst = (eta1 ; cv2 ; sym eta2) :: t1 ~r t2 cv2 = setVarType cv1 (substTy subst k2) n_subst = eta1 `mkTransCo` (mkCoVarCo cv2) `mkTransCo` (mkSymCo eta2) subst' | isReflCo k_co = extendSubstInScope subst cv1 | otherwise = extendCvSubst (extendSubstInScope subst cv2) cv1 n_subst go_forall_right subst other_co -- when other_co is not a ForAllCo = substTy subst (go other_co) {- Note [Nested ForAllCos] ~~~~~~~~~~~~~~~~~~~~~~~~~~ Suppose we need `coercionKind (ForAllCo a1 (ForAllCo a2 ... (ForAllCo an co)...) )`. We do not want to perform `n` single-type-variable substitutions over the kind of `co`; rather we want to do one substitution which substitutes for all of `a1`, `a2` ... simultaneously. If we do one at a time we get the performance hole reported in #11735. Solution: gather up the type variables for nested `ForAllCos`, and substitute for them all at once. Remarkably, for #11735 this single change reduces /total/ compile time by a factor of more than ten. Note [Nested InstCos] ~~~~~~~~~~~~~~~~~~~~~ In #5631 we found that 70% of the entire compilation time was being spent in coercionKind! The reason was that we had (g @ ty1 @ ty2 .. @ ty100) -- The "@s" are InstCos where g :: forall a1 a2 .. a100. phi If we deal with the InstCos one at a time, we'll do this: 1. Find the kind of (g @ ty1 .. @ ty99) : forall a100. phi' 2. Substitute phi'[ ty100/a100 ], a single tyvar->type subst But this is a *quadratic* algorithm, and the blew up #5631. So it's very important to do the substitution simultaneously; cf Type.piResultTys (which in fact we call here). -} -- | Retrieve the role from a coercion. coercionRole :: Coercion -> Role coercionRole = go where go (Refl _) = Nominal go (GRefl r _ _) = r go (TyConAppCo r _ _) = r go (AppCo co1 _) = go co1 go (ForAllCo { fco_body = co }) = go co go (FunCo { fco_role = r }) = r go (CoVarCo cv) = coVarRole cv go (HoleCo h) = coVarRole (coHoleCoVar h) go (UnivCo { uco_role = r }) = r go (SymCo co) = go co go (TransCo co1 _co2) = go co1 go (SelCo cs co) = mkSelCoResRole cs (coercionRole co) go (LRCo {}) = Nominal go (InstCo co _) = go co go (KindCo {}) = Nominal go (SubCo _) = Representational go (AxiomCo ax _) = coAxiomRuleRole ax -- | Makes a coercion type from two types: the types whose equality -- is proven by the relevant 'Coercion' mkCoercionType :: Role -> Type -> Type -> Type mkCoercionType Nominal = mkPrimEqPred mkCoercionType Representational = mkReprPrimEqPred mkCoercionType Phantom = \ty1 ty2 -> let ki1 = typeKind ty1 ki2 = typeKind ty2 in TyConApp eqPhantPrimTyCon [ki1, ki2, ty1, ty2] -- | Creates a primitive nominal type equality predicate. -- t1 ~# t2 -- Invariant: the types are not Coercions mkPrimEqPred :: Type -> Type -> Type mkPrimEqPred ty1 ty2 = mkTyConApp eqPrimTyCon [k1, k2, ty1, ty2] where k1 = typeKind ty1 k2 = typeKind ty2 -- | Creates a primitive representational type equality predicate. -- t1 ~R# t2 -- Invariant: the types are not Coercions mkReprPrimEqPred :: Type -> Type -> Type mkReprPrimEqPred ty1 ty2 = mkTyConApp eqReprPrimTyCon [k1, k2, ty1, ty2] where k1 = typeKind ty1 k2 = typeKind ty2 -- | Makes a lifted equality predicate at the given role mkPrimEqPredRole :: Role -> Type -> Type -> PredType mkPrimEqPredRole Nominal = mkPrimEqPred mkPrimEqPredRole Representational = mkReprPrimEqPred mkPrimEqPredRole Phantom = panic "mkPrimEqPredRole phantom" -- | Creates a primitive nominal type equality predicate with an explicit -- (but homogeneous) kind: (~#) k k ty1 ty2 mkNomPrimEqPred :: Kind -> Type -> Type -> Type mkNomPrimEqPred k ty1 ty2 = mkTyConApp eqPrimTyCon [k, k, ty1, ty2] -- | Assuming that two types are the same, ignoring coercions, find -- a nominal coercion between the types. This is useful when optimizing -- transitivity over coercion applications, where splitting two -- AppCos might yield different kinds. See Note [EtaAppCo] in -- "GHC.Core.Coercion.Opt". buildCoercion :: HasDebugCallStack => Type -> Type -> CoercionN buildCoercion orig_ty1 orig_ty2 = go orig_ty1 orig_ty2 where go ty1 ty2 | Just ty1' <- coreView ty1 = go ty1' ty2 | Just ty2' <- coreView ty2 = go ty1 ty2' go (CastTy ty1 co) ty2 = let co' = go ty1 ty2 r = coercionRole co' in mkCoherenceLeftCo r ty1 co co' go ty1 (CastTy ty2 co) = let co' = go ty1 ty2 r = coercionRole co' in mkCoherenceRightCo r ty2 co co' go ty1@(TyVarTy tv1) _tyvarty = assert (case _tyvarty of { TyVarTy tv2 -> tv1 == tv2 ; _ -> False }) $ mkNomReflCo ty1 go (FunTy { ft_af = af1, ft_mult = w1, ft_arg = arg1, ft_res = res1 }) (FunTy { ft_af = af2, ft_mult = w2, ft_arg = arg2, ft_res = res2 }) = assert (af1 == af2) $ mkFunCo Nominal af1 (go w1 w2) (go arg1 arg2) (go res1 res2) go (TyConApp tc1 args1) (TyConApp tc2 args2) = assertPpr (tc1 == tc2) (vcat [ ppr tc1 <+> ppr tc2 , text "orig_ty1:" <+> ppr orig_ty1 , text "orig_ty2:" <+> ppr orig_ty2 ]) $ mkTyConAppCo Nominal tc1 (zipWith go args1 args2) go (AppTy ty1a ty1b) ty2 | Just (ty2a, ty2b) <- splitAppTyNoView_maybe ty2 = mkAppCo (go ty1a ty2a) (go ty1b ty2b) go ty1 (AppTy ty2a ty2b) | Just (ty1a, ty1b) <- splitAppTyNoView_maybe ty1 = mkAppCo (go ty1a ty2a) (go ty1b ty2b) go (ForAllTy (Bndr tv1 flag1) ty1) (ForAllTy (Bndr tv2 flag2) ty2) | isTyVar tv1 = assert (isTyVar tv2) $ mkForAllCo tv1 flag1 flag2 kind_co (go ty1 ty2') where kind_co = go (tyVarKind tv1) (tyVarKind tv2) in_scope = mkInScopeSet $ tyCoVarsOfType ty2 `unionVarSet` tyCoVarsOfCo kind_co ty2' = substTyWithInScope in_scope [tv2] [mkTyVarTy tv1 `mkCastTy` kind_co] ty2 go (ForAllTy (Bndr cv1 flag1) ty1) (ForAllTy (Bndr cv2 flag2) ty2) = assert (isCoVar cv1 && isCoVar cv2) $ mkForAllCo cv1 flag1 flag2 kind_co (go ty1 ty2') where s1 = varType cv1 s2 = varType cv2 kind_co = go s1 s2 -- s1 = t1 ~r t2 -- s2 = t3 ~r t4 -- kind_co :: (t1 ~r t2) ~N (t3 ~r t4) -- eta1 :: t1 ~r t3 -- eta2 :: t2 ~r t4 r = coVarRole cv1 kind_co' = downgradeRole r Nominal kind_co eta1 = mkSelCo (SelTyCon 2 r) kind_co' eta2 = mkSelCo (SelTyCon 3 r) kind_co' subst = mkEmptySubst $ mkInScopeSet $ tyCoVarsOfType ty2 `unionVarSet` tyCoVarsOfCo kind_co ty2' = substTy (extendCvSubst subst cv2 $ mkSymCo eta1 `mkTransCo` mkCoVarCo cv1 `mkTransCo` eta2) ty2 go ty1@(LitTy lit1) _lit2 = assert (case _lit2 of { LitTy lit2 -> lit1 == lit2 ; _ -> False }) $ mkNomReflCo ty1 go (CoercionTy co1) (CoercionTy co2) = mkProofIrrelCo Nominal kind_co co1 co2 where kind_co = go (coercionType co1) (coercionType co2) go ty1 ty2 = pprPanic "buildKindCoercion" (vcat [ ppr orig_ty1, ppr orig_ty2 , ppr ty1, ppr ty2 ]) {- %************************************************************************ %* * Coercion holes %* * %************************************************************************ -} has_co_hole_ty :: Type -> Monoid.Any has_co_hole_co :: Coercion -> Monoid.Any (has_co_hole_ty, _, has_co_hole_co, _) = foldTyCo folder () where folder = TyCoFolder { tcf_view = noView , tcf_tyvar = const2 (Monoid.Any False) , tcf_covar = const2 (Monoid.Any False) , tcf_hole = \_ hole -> Monoid.Any (isHeteroKindCoHole hole) , tcf_tycobinder = const2 } -- | Is there a hetero-kind coercion hole in this type? -- (That is, a coercion hole with ch_hetero_kind=True.) -- See wrinkle (EIK2) of Note [Equalities with incompatible kinds] in GHC.Tc.Solver.Equality hasCoercionHoleTy :: Type -> Bool hasCoercionHoleTy = Monoid.getAny . has_co_hole_ty -- | Is there a hetero-kind coercion hole in this coercion? hasCoercionHoleCo :: Coercion -> Bool hasCoercionHoleCo = Monoid.getAny . has_co_hole_co hasThisCoercionHoleTy :: Type -> CoercionHole -> Bool hasThisCoercionHoleTy ty hole = Monoid.getAny (f ty) where (f, _, _, _) = foldTyCo folder () folder = TyCoFolder { tcf_view = noView , tcf_tyvar = const2 (Monoid.Any False) , tcf_covar = const2 (Monoid.Any False) , tcf_hole = \ _ h -> Monoid.Any (getUnique h == getUnique hole) , tcf_tycobinder = const2 } -- | Set the type of a 'CoercionHole' setCoHoleType :: CoercionHole -> Type -> CoercionHole setCoHoleType h t = setCoHoleCoVar h (setVarType (coHoleCoVar h) t) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/Coercion.hs-boot0000644000000000000000000000440007346545000021632 0ustar0000000000000000{-# LANGUAGE FlexibleContexts #-} module GHC.Core.Coercion where import GHC.Prelude import {-# SOURCE #-} GHC.Core.TyCo.Rep import {-# SOURCE #-} GHC.Core.TyCon import GHC.Types.Basic ( LeftOrRight ) import GHC.Core.Coercion.Axiom import GHC.Types.Var import GHC.Data.Pair import GHC.Utils.Misc mkReflCo :: Role -> Type -> Coercion mkTyConAppCo :: HasDebugCallStack => Role -> TyCon -> [Coercion] -> Coercion mkAppCo :: Coercion -> Coercion -> Coercion mkForAllCo :: HasDebugCallStack => TyCoVar -> ForAllTyFlag -> ForAllTyFlag -> Coercion -> Coercion -> Coercion mkFunCo :: Role -> FunTyFlag -> CoercionN -> Coercion -> Coercion -> Coercion mkNakedFunCo :: Role -> FunTyFlag -> CoercionN -> Coercion -> Coercion -> Coercion mkFunCo2 :: Role -> FunTyFlag -> FunTyFlag -> CoercionN -> Coercion -> Coercion -> Coercion mkCoVarCo :: CoVar -> Coercion mkPhantomCo :: Coercion -> Type -> Type -> Coercion mkUnivCo :: UnivCoProvenance -> [Coercion] -> Role -> Type -> Type -> Coercion mkSymCo :: Coercion -> Coercion mkTransCo :: HasDebugCallStack => Coercion -> Coercion -> Coercion mkSelCo :: HasDebugCallStack => CoSel -> Coercion -> Coercion mkLRCo :: LeftOrRight -> Coercion -> Coercion mkInstCo :: Coercion -> Coercion -> Coercion mkGReflCo :: Role -> Type -> MCoercionN -> Coercion mkNomReflCo :: Type -> Coercion mkKindCo :: Coercion -> Coercion mkSubCo :: HasDebugCallStack => Coercion -> Coercion mkProofIrrelCo :: Role -> Coercion -> Coercion -> Coercion -> Coercion mkAxiomCo :: CoAxiomRule -> [Coercion] -> Coercion funRole :: Role -> FunSel -> Role isGReflCo :: Coercion -> Bool isReflCo :: Coercion -> Bool isReflexiveCo :: Coercion -> Bool decomposePiCos :: HasDebugCallStack => Coercion -> Pair Type -> [Type] -> ([Coercion], Coercion) coVarTypesRole :: HasDebugCallStack => CoVar -> (Type, Type, Role) coVarRole :: CoVar -> Role mkCoercionType :: Role -> Type -> Type -> Type seqCo :: Coercion -> () coercionKind :: HasDebugCallStack => Coercion -> Pair Type coercionLKind :: HasDebugCallStack => Coercion -> Type coercionRKind :: HasDebugCallStack => Coercion -> Type coercionType :: Coercion -> Type topNormaliseNewType_maybe :: Type -> Maybe (Coercion, Type) -- used to look through newtypes to the right of -- function arrows, in 'GHC.Core.Type.getRuntimeArgTys' ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/Coercion/0000755000000000000000000000000007346545000020337 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/Coercion/Axiom.hs0000644000000000000000000007040507346545000021756 0ustar0000000000000000{-# OPTIONS_GHC -Wno-orphans #-} -- Outputable {-# LANGUAGE DataKinds #-} {-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE GADTs #-} {-# LANGUAGE KindSignatures #-} {-# LANGUAGE RoleAnnotations #-} {-# LANGUAGE ScopedTypeVariables #-} -- (c) The University of Glasgow 2012 -- | Module for coercion axioms, used to represent type family instances -- and newtypes module GHC.Core.Coercion.Axiom ( BranchFlag, Branched, Unbranched, BranchIndex, Branches(..), manyBranches, unbranched, fromBranches, numBranches, mapAccumBranches, CoAxiom(..), CoAxBranch(..), toBranchedAxiom, toUnbranchedAxiom, coAxiomName, coAxiomArity, coAxiomBranches, coAxiomTyCon, isImplicitCoAxiom, coAxiomNumPats, coAxiomNthBranch, coAxiomSingleBranch_maybe, coAxiomRole, coAxiomSingleBranch, coAxBranchTyVars, coAxBranchCoVars, coAxBranchRoles, coAxBranchLHS, coAxBranchRHS, coAxBranchSpan, coAxBranchIncomps, placeHolderIncomps, Role(..), fsFromRole, CoAxiomRule(..), BuiltInFamRewrite(..), BuiltInFamInjectivity(..), TypeEqn, coAxiomRuleArgRoles, coAxiomRuleRole, coAxiomRuleBranch_maybe, isNewtypeAxiomRule_maybe, BuiltInSynFamily(..), trivialBuiltInFamily ) where import GHC.Prelude import Language.Haskell.Syntax.Basic (Role(..)) import {-# SOURCE #-} GHC.Core.TyCo.Rep ( Type ) import {-# SOURCE #-} GHC.Core.TyCo.Ppr ( pprType, pprTyVar ) import {-# SOURCE #-} GHC.Core.TyCon ( TyCon, isNewTyCon ) import GHC.Utils.Outputable import GHC.Data.FastString import GHC.Types.Name import GHC.Types.Unique import GHC.Types.Var import GHC.Utils.Misc import GHC.Utils.Binary import GHC.Utils.Panic import GHC.Data.Pair import GHC.Types.Basic import Data.Typeable ( Typeable ) import GHC.Types.SrcLoc import qualified Data.Data as Data import Data.Array import Data.List ( mapAccumL ) {- Note [Coercion axiom branches] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In order to allow closed type families, an axiom needs to contain an ordered list of alternatives, called branches. The kind of the coercion built from an axiom is determined by which index is used when building the coercion from the axiom. For example, consider the axiom derived from the following declaration: type family F a where F [Int] = Bool F [a] = Double F (a b) = Char This will give rise to this axiom: axF :: { F [Int] ~ Bool ; forall (a :: *). F [a] ~ Double ; forall (k :: *) (a :: k -> *) (b :: k). F (a b) ~ Char } The axiom is used with the AxiomCo constructor of Coercion. If we wish to have a coercion showing that F (Maybe Int) ~ Char, it will look like axF[2] <*> :: F (Maybe Int) ~ Char -- or, written using concrete-ish syntax -- AxiomRuleCo axF 2 [Refl *, Refl Maybe, Refl Int] Note that the index is 0-based. For type-checking, it is also necessary to check that no previous pattern can unify with the supplied arguments. After all, it is possible that some of the type arguments are lambda-bound type variables whose instantiation may cause an earlier match among the branches. We wish to prohibit this behavior, so the type checker rules out the choice of a branch where a previous branch can unify. See also [Apartness] in GHC.Core.FamInstEnv. For example, the following is malformed, where 'a' is a lambda-bound type variable: axF[2] <*> :: F (a Bool) ~ Char Why? Because a might be instantiated with [], meaning that branch 1 should apply, not branch 2. This is a vital consistency check; without it, we could derive Int ~ Bool, and that is a Bad Thing. Note [Branched axioms] ~~~~~~~~~~~~~~~~~~~~~~ Although a CoAxiom has the capacity to store many branches, in certain cases, we want only one. These cases are in data/newtype family instances, newtype coercions, and type family instances. Furthermore, these unbranched axioms are used in a variety of places throughout GHC, and it would difficult to generalize all of that code to deal with branched axioms, especially when the code can be sure of the fact that an axiom is indeed a singleton. At the same time, it seems dangerous to assume singlehood in various places through GHC. The solution to this is to label a CoAxiom with a phantom type variable declaring whether it is known to be a singleton or not. The branches are stored using a special datatype, declared below, that ensures that the type variable is accurate. ************************************************************************ * * Branches * * ************************************************************************ -} {- Note [BranchIndex] ~~~~~~~~~~~~~~~~~~~~ A CoAxiom has 1 or more branches. Each branch has contains a list of the free type variables in that branch, the LHS type patterns, and the RHS type for that branch. When we apply an axiom to a list of coercions, we must choose which branch of the axiom we wish to use, as the different branches may have different numbers of free type variables. (The number of type patterns is always the same among branches, but that doesn't quite concern us here.) -} type BranchIndex = Int -- Counting from zero -- The index of the branch in the list of branches -- See Note [BranchIndex] -- promoted data type data BranchFlag = Branched | Unbranched type Branched = 'Branched type Unbranched = 'Unbranched -- By using type synonyms for the promoted constructors, we avoid needing -- DataKinds and the promotion quote in client modules. This also means that -- we don't need to export the term-level constructors, which should never be used. newtype Branches (br :: BranchFlag) = MkBranches { unMkBranches :: Array BranchIndex CoAxBranch } type role Branches nominal manyBranches :: [CoAxBranch] -> Branches Branched manyBranches brs = assert (snd bnds >= fst bnds ) MkBranches (listArray bnds brs) where bnds = (0, length brs - 1) unbranched :: CoAxBranch -> Branches Unbranched unbranched br = MkBranches (listArray (0, 0) [br]) toBranched :: Branches br -> Branches Branched toBranched = MkBranches . unMkBranches toUnbranched :: Branches br -> Branches Unbranched toUnbranched (MkBranches arr) = assert (bounds arr == (0,0) ) MkBranches arr fromBranches :: Branches br -> [CoAxBranch] fromBranches = elems . unMkBranches branchesNth :: Branches br -> BranchIndex -> CoAxBranch branchesNth (MkBranches arr) n = arr ! n numBranches :: Branches br -> Int numBranches (MkBranches arr) = snd (bounds arr) + 1 -- | The @[CoAxBranch]@ passed into the mapping function is a list of -- all previous branches, reversed mapAccumBranches :: ([CoAxBranch] -> CoAxBranch -> CoAxBranch) -> Branches br -> Branches br mapAccumBranches f (MkBranches arr) = MkBranches (listArray (bounds arr) (snd $ mapAccumL go [] (elems arr))) where go :: [CoAxBranch] -> CoAxBranch -> ([CoAxBranch], CoAxBranch) go prev_branches cur_branch = ( cur_branch : prev_branches , f prev_branches cur_branch ) {- ************************************************************************ * * Coercion axioms * * ************************************************************************ Note [Storing compatibility] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ During axiom application, we need to be aware of which branches are compatible with which others. The full explanation is in Note [Compatibility] in GHc.Core.FamInstEnv. (The code is placed there to avoid a dependency from GHC.Core.Coercion.Axiom on the unification algorithm.) Although we could theoretically compute compatibility on the fly, this is silly, so we store it in a CoAxiom. Specifically, each branch refers to all other branches with which it is incompatible. This list might well be empty, and it will always be for the first branch of any axiom. CoAxBranches that do not (yet) belong to a CoAxiom should have a panic thunk stored in cab_incomps. The incompatibilities are properly a property of the axiom as a whole, and they are computed only when the final axiom is built. During serialization, the list is converted into a list of the indices of the branches. Note [CoAxioms are homogeneous] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ All axioms must be *homogeneous*, meaning that the kind of the LHS must match the kind of the RHS. In practice, this means: Given a CoAxiom { co_ax_tc = ax_tc }, for every branch CoAxBranch { cab_lhs = lhs, cab_rhs = rhs }: typeKind (mkTyConApp ax_tc lhs) `eqType` typeKind rhs This is checked in FamInstEnv.mkCoAxBranch. -} -- | A 'CoAxiom' is a \"coercion constructor\", i.e. a named equality axiom. -- If you edit this type, you may need to update the GHC formalism -- See Note [GHC Formalism] in GHC.Core.Lint data CoAxiom br = CoAxiom -- Type equality axiom. { co_ax_unique :: Unique -- Unique identifier , co_ax_name :: Name -- Name for pretty-printing , co_ax_role :: Role -- Role of the axiom's equality , co_ax_tc :: TyCon -- The head of the LHS patterns -- e.g. the newtype or family tycon , co_ax_branches :: Branches br -- The branches that form this axiom , co_ax_implicit :: Bool -- True <=> the axiom is "implicit" -- See Note [Implicit axioms] -- INVARIANT: co_ax_implicit == True implies length co_ax_branches == 1. } -- | A branch of a coercion axiom, which provides the evidence for -- unwrapping a newtype or a type-family reduction step using a single equation. data CoAxBranch = CoAxBranch { cab_loc :: SrcSpan -- ^ Location of the defining equation -- See Note [CoAxiom locations] , cab_tvs :: [TyVar] -- ^ Bound type variables; not necessarily fresh -- See Note [CoAxBranch type variables] , cab_eta_tvs :: [TyVar] -- ^ Eta-reduced tyvars -- cab_tvs and cab_lhs may be eta-reduced; see -- Note [Eta reduction for data families] , cab_cvs :: [CoVar] -- ^ Bound coercion variables -- Always empty, for now. -- See Note [Constraints in patterns] -- in GHC.Tc.TyCl , cab_roles :: [Role] -- ^ See Note [CoAxBranch roles] , cab_lhs :: [Type] -- ^ Type patterns to match against , cab_rhs :: Type -- ^ Right-hand side of the equality -- See Note [CoAxioms are homogeneous] , cab_incomps :: [CoAxBranch] -- ^ The previous incompatible branches -- See Note [Storing compatibility] } deriving Data.Data toBranchedAxiom :: CoAxiom br -> CoAxiom Branched toBranchedAxiom ax@(CoAxiom { co_ax_branches = branches }) = ax { co_ax_branches = toBranched branches } toUnbranchedAxiom :: CoAxiom br -> CoAxiom Unbranched toUnbranchedAxiom ax@(CoAxiom { co_ax_branches = branches }) = ax { co_ax_branches = toUnbranched branches } coAxiomNumPats :: CoAxiom br -> Int coAxiomNumPats = length . coAxBranchLHS . (flip coAxiomNthBranch 0) coAxiomArity :: CoAxiom br -> BranchIndex -> Arity coAxiomArity ax index = length tvs + length cvs where CoAxBranch { cab_tvs = tvs, cab_cvs = cvs } = coAxiomNthBranch ax index coAxiomName :: CoAxiom br -> Name coAxiomName = co_ax_name coAxiomRole :: CoAxiom br -> Role coAxiomRole = co_ax_role coAxiomBranches :: CoAxiom br -> Branches br coAxiomBranches = co_ax_branches coAxiomNthBranch :: CoAxiom br -> BranchIndex -> CoAxBranch coAxiomNthBranch (CoAxiom { co_ax_branches = bs }) index = branchesNth bs index coAxiomSingleBranch :: CoAxiom Unbranched -> CoAxBranch coAxiomSingleBranch (CoAxiom { co_ax_branches = MkBranches arr }) = arr ! 0 coAxiomSingleBranch_maybe :: CoAxiom br -> Maybe CoAxBranch coAxiomSingleBranch_maybe (CoAxiom { co_ax_branches = MkBranches arr }) | snd (bounds arr) == 0 = Just $ arr ! 0 | otherwise = Nothing coAxiomTyCon :: CoAxiom br -> TyCon coAxiomTyCon = co_ax_tc coAxBranchTyVars :: CoAxBranch -> [TyVar] coAxBranchTyVars = cab_tvs coAxBranchCoVars :: CoAxBranch -> [CoVar] coAxBranchCoVars = cab_cvs coAxBranchLHS :: CoAxBranch -> [Type] coAxBranchLHS = cab_lhs coAxBranchRHS :: CoAxBranch -> Type coAxBranchRHS = cab_rhs coAxBranchRoles :: CoAxBranch -> [Role] coAxBranchRoles = cab_roles coAxBranchSpan :: CoAxBranch -> SrcSpan coAxBranchSpan = cab_loc isImplicitCoAxiom :: CoAxiom br -> Bool isImplicitCoAxiom = co_ax_implicit coAxBranchIncomps :: CoAxBranch -> [CoAxBranch] coAxBranchIncomps = cab_incomps -- See Note [Compatibility] in GHC.Core.FamInstEnv placeHolderIncomps :: [CoAxBranch] placeHolderIncomps = panic "placeHolderIncomps" {- Note [CoAxBranch type variables] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In the case of a CoAxBranch of an associated type-family instance, we use the *same* type variables in cab_tvs (where possible) as the enclosing class or instance. Consider instance C Int [z] where type F Int [z] = ... -- Second param must be [z] In the CoAxBranch in the instance decl (F Int [z]) we use the same 'z', so that it's easy to check that that type is the same as that in the instance header. However, I believe that the cab_tvs of any CoAxBranch are distinct from the cab_tvs of other CoAxBranches in the same CoAxiom. This is important when checking for compatiblity and apartness; e.g. see GHC.Core.FamInstEnv.compatibleBranches. (The story seems a bit wobbly here, but it seems to work.) Note [CoAxBranch roles] ~~~~~~~~~~~~~~~~~~~~~~~ Consider this code: newtype Age = MkAge Int newtype Wrap a = MkWrap a convert :: Wrap Age -> Int convert (MkWrap (MkAge i)) = i We want this to compile to: NTCo:Wrap :: forall a. Wrap a ~R a NTCo:Age :: Age ~R Int convert = \x -> x |> (NTCo:Wrap[0] NTCo:Age[0]) But, note that NTCo:Age is at role R. Thus, we need to be able to pass coercions at role R into axioms. However, we don't *always* want to be able to do this, as it would be disastrous with type families. The solution is to annotate the arguments to the axiom with roles, much like we annotate tycon tyvars. Where do these roles get set? Newtype axioms inherit their roles from the newtype tycon; family axioms are all at role N. Note [CoAxiom locations] ~~~~~~~~~~~~~~~~~~~~~~~~ The source location of a CoAxiom is stored in two places in the datatype tree. * The first is in the location info buried in the Name of the CoAxiom. This span includes all of the branches of a branched CoAxiom. * The second is in the cab_loc fields of the CoAxBranches. In the case of a single branch, we can extract the source location of the branch from the name of the CoAxiom. In other cases, we need an explicit SrcSpan to correctly store the location of the equation giving rise to the FamInstBranch. Note [Implicit axioms] ~~~~~~~~~~~~~~~~~~~~~~ See also Note [Implicit TyThings] in GHC.Types.TyThing * A CoAxiom arising from data/type family instances is not "implicit". That is, it has its own IfaceAxiom declaration in an interface file * The CoAxiom arising from a newtype declaration *is* "implicit". That is, it does not have its own IfaceAxiom declaration in an interface file; instead the CoAxiom is generated by type-checking the newtype declaration Note [Eta reduction for data families] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider this data family T a b :: * newtype instance T Int a = MkT (IO a) deriving( Monad ) We'd like this to work. From the 'newtype instance' you might think we'd get: newtype TInt a = MkT (IO a) axiom ax1 a :: T Int a ~ TInt a -- The newtype-instance part axiom ax2 a :: TInt a ~ IO a -- The newtype part But now what can we do? We have this problem Given: d :: Monad IO Wanted: d' :: Monad (T Int) = d |> ???? What coercion can we use for the ??? Solution: eta-reduce both axioms, thus: axiom ax1 :: T Int ~ TInt axiom ax2 :: TInt ~ IO Now d' = d |> Monad (sym (ax2 ; ax1)) ----- Bottom line ------ For a CoAxBranch for a data family instance with representation TyCon rep_tc: - cab_tvs (of its CoAxiom) may be shorter than tyConTyVars of rep_tc. - cab_lhs may be shorter than tyConArity of the family tycon i.e. LHS is unsaturated - cab_rhs will be (rep_tc cab_tvs) i.e. RHS is un-saturated - This eta reduction happens for data instances as well as newtype instances. Here we want to eta-reduce the data family axiom. - This eta-reduction is done in GHC.Tc.TyCl.Instance.tcDataFamInstDecl. But for a /type/ family - cab_lhs has the exact arity of the family tycon There are certain situations (e.g., pretty-printing) where it is necessary to deal with eta-expanded data family instances. For these situations, the cab_eta_tvs field records the stuff that has been eta-reduced away. So if we have axiom forall a b. F [a->b] = D b a and cab_eta_tvs is [p,q], then the original user-written definition looked like axiom forall a b p q. F [a->b] p q = D b a p q (See #9692, #14179, and #15845 for examples of what can go wrong if we don't eta-expand when showing things to the user.) See also: * Note [Newtype eta] in GHC.Core.TyCon. This is notionally separate and deals with the axiom connecting a newtype with its representation type; but it too is eta-reduced. * Note [Implementing eta reduction for data families] in "GHC.Tc.TyCl.Instance". This describes the implementation details of this eta reduction happen. * Note [RoughMap and rm_empty] for how this complicates the RoughMap implementation slightly. -} {- ********************************************************************* * * Instances, especially pretty-printing * * ********************************************************************* -} instance Eq (CoAxiom br) where a == b = getUnique a == getUnique b a /= b = getUnique a /= getUnique b instance Uniquable (CoAxiom br) where getUnique = co_ax_unique instance NamedThing (CoAxiom br) where getName = co_ax_name instance Typeable br => Data.Data (CoAxiom br) where -- don't traverse? toConstr _ = abstractConstr "CoAxiom" gunfold _ _ = error "gunfold" dataTypeOf _ = mkNoRepType "CoAxiom" instance Outputable (CoAxiom br) where -- You may want GHC.Core.Coercion.pprCoAxiom instead ppr = ppr . getName instance Outputable CoAxBranch where -- This instance doesn't know the name of the type family -- If possible, use GHC.Core.Coercion.pprCoAxBranch instead ppr (CoAxBranch { cab_tvs = tvs, cab_cvs = cvs , cab_lhs = lhs_tys, cab_rhs = rhs, cab_incomps = incomps }) = text "CoAxBranch" <+> braces payload where payload = hang (text "forall" <+> pprWithCommas pprTyVar (tvs ++ cvs) <> dot) 2 (vcat [ text "" <+> sep (map pprType lhs_tys) , nest 2 (text "=" <+> ppr rhs) , ppUnless (null incomps) $ text "incomps:" <+> vcat (map ppr incomps) ]) {- ************************************************************************ * * Roles * * ************************************************************************ Roles are defined here to avoid circular dependencies. -} -- These names are slurped into the parser code. Changing these strings -- will change the **surface syntax** that GHC accepts! If you want to -- change only the pretty-printing, do some replumbing. See -- mkRoleAnnotDecl in GHC.Parser.PostProcess fsFromRole :: Role -> FastString fsFromRole Nominal = fsLit "nominal" fsFromRole Representational = fsLit "representational" fsFromRole Phantom = fsLit "phantom" instance Outputable Role where ppr = ftext . fsFromRole instance Binary Role where put_ bh Nominal = putByte bh 1 put_ bh Representational = putByte bh 2 put_ bh Phantom = putByte bh 3 get bh = do tag <- getByte bh case tag of 1 -> return Nominal 2 -> return Representational 3 -> return Phantom _ -> panic ("get Role " ++ show tag) {- ************************************************************************ * * CoAxiomRule Rules for building Evidence * * ************************************************************************ Note [CoAxiomRule] ~~~~~~~~~~~~~~~~~~ A CoAxiomRule is a built-in axiom, one that we assume to be true: CoAxiomRules come in four flavours: * BuiltInFamRew: provides evidence for, say (ax1) 3+4 ----> 7 (ax2) s+0 ----> s The evidence looks like AxiomCo ax1 [3,4] :: 3+4 ~ 7 AxiomCo ax2 [s] :: s+0 ~ s The arguments in the AxiomCo are the /instantiating types/, or more generally coercions (see Note [Coercion axioms applied to coercions] in GHC.Core.TyCo.Rep). * BuiltInFamInj: provides evidence for the injectivity of type families For example (ax3) g1: a+b ~ 0 ---> a~0 (ax4) g2: a+b ~ 0 ---> b~0 (ax5) g3: a+b1 ~ a~b2 ---> b1~b2 The argument to the AxiomCo is the full coercion (always just one). So then: AxiomCo ax3 [g1] :: a ~ 0 AxiomCo ax4 [g2] :: b ~ 0 AxiomCo ax5 [g3] :: b1 ~ b2 * BranchedAxiom: used for closed type families type family F a where F Int = Bool F Bool = Char F a = a -> Int We get one (CoAxiom Branched) for the entire family; when used in an AxiomCo we pair it with the BranchIndex to say which branch to pick. * UnbranchedAxiom: used for several purposes; - Newtypes - Data family instances - Open type family instances Note [Avoiding allocating lots of CoAxiomRules] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CoAxiomRule is a sum type of four alternatives, which is very nice. But there is a danger of allocating lots of (BuiltInFamRew bif) objects, every time we (say) need a type-family rewrite. To avoid this allocation, we cache the appropraite CoAxiomRule inside each BuiltInFamRewrite, BuiltInFamInjectivity making a little circular data structure. See the `bifrw_axr` field of BuiltInFamRewrite, and similarly the others. It's simple to do this, and saves a percent or two of allocation in programs that do a lot of type-family work. -} -- | CoAxiomRule describes a built-in axiom, one that we assume to be true -- See Note [CoAxiomRule] data CoAxiomRule = BuiltInFamRew BuiltInFamRewrite -- Built-in type-family rewrites -- e.g. 3+5 ~ 7 | BuiltInFamInj BuiltInFamInjectivity -- Built-in type-family deductions -- e.g. a+b~0 ==> a~0 -- Always unary | BranchedAxiom (CoAxiom Branched) BranchIndex -- Closed type family | UnbranchedAxiom (CoAxiom Unbranched) -- Open type family instance, -- data family instances -- and newtypes instance Eq CoAxiomRule where (BuiltInFamRew bif1) == (BuiltInFamRew bif2) = bifrw_name bif1 == bifrw_name bif2 (BuiltInFamInj bif1) == (BuiltInFamInj bif2) = bifinj_name bif1 == bifinj_name bif2 (UnbranchedAxiom ax1) == (UnbranchedAxiom ax2) = getUnique ax1 == getUnique ax2 (BranchedAxiom ax1 i1) == (BranchedAxiom ax2 i2) = getUnique ax1 == getUnique ax2 && i1 == i2 _ == _ = False coAxiomRuleRole :: CoAxiomRule -> Role coAxiomRuleRole (BuiltInFamRew {}) = Nominal coAxiomRuleRole (BuiltInFamInj {}) = Nominal coAxiomRuleRole (UnbranchedAxiom ax) = coAxiomRole ax coAxiomRuleRole (BranchedAxiom ax _) = coAxiomRole ax coAxiomRuleArgRoles :: CoAxiomRule -> [Role] coAxiomRuleArgRoles (BuiltInFamRew bif) = replicate (bifrw_arity bif) Nominal coAxiomRuleArgRoles (BuiltInFamInj {}) = [Nominal] coAxiomRuleArgRoles (UnbranchedAxiom ax) = coAxBranchRoles (coAxiomSingleBranch ax) coAxiomRuleArgRoles (BranchedAxiom ax i) = coAxBranchRoles (coAxiomNthBranch ax i) coAxiomRuleBranch_maybe :: CoAxiomRule -> Maybe (TyCon, Role, CoAxBranch) coAxiomRuleBranch_maybe (UnbranchedAxiom ax) = Just (co_ax_tc ax, co_ax_role ax, coAxiomSingleBranch ax) coAxiomRuleBranch_maybe (BranchedAxiom ax i) = Just (co_ax_tc ax, co_ax_role ax, coAxiomNthBranch ax i) coAxiomRuleBranch_maybe _ = Nothing isNewtypeAxiomRule_maybe :: CoAxiomRule -> Maybe (TyCon, CoAxBranch) isNewtypeAxiomRule_maybe (UnbranchedAxiom ax) | let tc = coAxiomTyCon ax, isNewTyCon tc = Just (tc, coAxiomSingleBranch ax) isNewtypeAxiomRule_maybe _ = Nothing instance Data.Data CoAxiomRule where -- don't traverse? toConstr _ = abstractConstr "CoAxiomRule" gunfold _ _ = error "gunfold" dataTypeOf _ = mkNoRepType "CoAxiomRule" instance Outputable CoAxiomRule where ppr (BuiltInFamRew bif) = ppr (bifrw_name bif) ppr (BuiltInFamInj bif) = ppr (bifinj_name bif) ppr (UnbranchedAxiom ax) = ppr (coAxiomName ax) ppr (BranchedAxiom ax i) = ppr (coAxiomName ax) <> brackets (int i) {- ********************************************************************* * * Built-in families * * ********************************************************************* -} -- | A more explicit representation for `t1 ~ t2`. type TypeEqn = Pair Type -- Type checking of built-in families data BuiltInSynFamily = BuiltInSynFamily { sfMatchFam :: [BuiltInFamRewrite] , sfInteract :: [BuiltInFamInjectivity] -- If given these type arguments and RHS, returns the equalities that -- are guaranteed to hold. That is, if -- (ar, Pair s1 s2) is an element of (sfInteract tys ty) -- then AxiomRule ar [co :: F tys ~ ty] :: s1~s2 } data BuiltInFamInjectivity -- Argument and result role are always Nominal = BIF_Interact { bifinj_name :: FastString , bifinj_axr :: CoAxiomRule -- Cached copy of (BuiltInFamINj this-bif) -- See Note [Avoiding allocating lots of CoAxiomRules] , bifinj_proves :: TypeEqn -> Maybe TypeEqn -- ^ Always unary: just one TypeEqn argument -- Returns @Nothing@ when it doesn't like the supplied argument. -- When this happens in a coercion that means that the coercion is -- ill-formed, and Core Lint checks for that. } data BuiltInFamRewrite -- Argument roles and result role are always Nominal = BIF_Rewrite { bifrw_name :: FastString , bifrw_axr :: CoAxiomRule -- Cached copy of (BuiltInFamRew this-bif) -- See Note [Avoiding allocating lots of CoAxiomRules] , bifrw_fam_tc :: TyCon -- Needed for tyConsOfType , bifrw_arity :: Arity -- Number of type arguments needed -- to instantiate this axiom , bifrw_match :: [Type] -> Maybe ([Type], Type) -- coaxrMatch: does this reduce on the given arguments? -- If it does, returns (types to instantiate the rule at, rhs type) -- That is: mkAxiomCo ax (zipWith mkReflCo coAxiomRuleArgRoles ts) -- :: F tys ~N rhs, , bifrw_proves :: [TypeEqn] -> Maybe TypeEqn } -- length(inst_tys) = bifrw_arity -- INVARIANT: bifrw_match and bifrw_proves are related as follows: -- If Just (inst_tys, res_ty) = bifrw_match ax arg_tys -- then * length arg_tys = tyConArity fam_tc -- * length inst_tys = bifrw_arity -- * bifrw_proves (map (return @Pair) inst_tys) = Just (return @Pair res_ty) -- Provides default implementations that do nothing. trivialBuiltInFamily :: BuiltInSynFamily trivialBuiltInFamily = BuiltInSynFamily { sfMatchFam = [], sfInteract = [] } ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/Coercion/Opt.hs0000644000000000000000000015044707346545000021450 0ustar0000000000000000-- (c) The University of Glasgow 2006 {-# LANGUAGE CPP #-} module GHC.Core.Coercion.Opt ( optCoercion , OptCoercionOpts (..) ) where import GHC.Prelude import GHC.Tc.Utils.TcType ( exactTyCoVarsOfType ) import GHC.Core.TyCo.Rep import GHC.Core.TyCo.Subst import GHC.Core.TyCo.Compare( eqType, eqForAllVis ) import GHC.Core.Coercion import GHC.Core.Type as Type hiding( substTyVarBndr, substTy ) import GHC.Core.TyCon import GHC.Core.Coercion.Axiom import GHC.Core.Unify import GHC.Types.Basic( SwapFlag(..), flipSwap, isSwapped, pickSwap, notSwapped ) import GHC.Types.Var import GHC.Types.Var.Set import GHC.Types.Var.Env import GHC.Data.Pair import GHC.Utils.Outputable import GHC.Utils.Constants (debugIsOn) import GHC.Utils.Misc import GHC.Utils.Panic import Control.Monad ( zipWithM ) {- %************************************************************************ %* * Optimising coercions %* * %************************************************************************ This module does coercion optimisation. See the paper Evidence normalization in Systtem FV (RTA'13) https://simon.peytonjones.org/evidence-normalization/ The paper is also in the GHC repo, in docs/opt-coercion. Note [Optimising coercion optimisation] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Looking up a coercion's role or kind is linear in the size of the coercion. Thus, doing this repeatedly during the recursive descent of coercion optimisation is disastrous. We must be careful to avoid doing this if at all possible. Because it is generally easy to know a coercion's components' roles from the role of the outer coercion, we pass down the known role of the input in the algorithm below. We also keep functions opt_co2 and opt_co3 separate from opt_co4, so that the former two do Phantom checks that opt_co4 can avoid. This is a big win because Phantom coercions rarely appear within non-phantom coercions -- only in some TyConAppCos and some AxiomInstCos. We handle these cases specially by calling opt_co2. Note [Optimising InstCo] ~~~~~~~~~~~~~~~~~~~~~~~~ Optimising InstCo is pretty subtle: #15725, #25387. (1) tv is a type variable. We want to optimise InstCo (ForAllCo tv kco g) g2 --> S(g) where S is some substitution. Let's look at the typing rules. kco : k1 ~ k2 tv:k1 |- g : t1 ~ t2 ----------------------------- ForAllCo tv kco g : (all tv:k1.t1) ~ (all tv:k2.t2[tv |-> tv |> sym kco]) g1 : (all tv:k1.t1') ~ (all tv:k2.t2') g2 : (s1:k1) ~ (s2:k2) -------------------- InstCo g1 g2 : t1'[tv |-> s1] ~ t2'[tv |-> s2] Putting these two together kco : k1 ~ k2 tv:k1 |- g : t1 ~ t2 g2 : (s1:k1) ~ (s2:k2) -------------------- InstCo (ForAllCo tv kco g) g2 : t1[tv |-> s1] ~ t2[tv |-> s2 |> sym kco] We thus want S(g) to have kind S(g) :: (t1[tv |-> s1]) ~ (t2[tv |-> s2 |> sym kco]) All we need do is to substitute the coercion tv_co for tv: S = [tv :-> tv_co] where tv_co : s1 ~ (s2 |> sym kco) This looks bizarre, because we're substituting a /type variable/ with a /coercion/. However, this operation already exists: it's called *lifting*, and defined in GHC.Core.Coercion. We just need to enhance the lifting operation to be able to deal with an ambient substitution, which is why a LiftingContext stores a TCvSubst. In general if S = [tv :-> tv_co] tv_co : r1 ~ r2 g : t1 ~ t2 then S(g) : t1[tv :-> r1] ~ t2[tv :-> r2] The substitution S is embodied in the LiftingContext argument of `opt_co4`; See Note [The LiftingContext in optCoercion] (2) cv is a coercion variable Now consider we have (InstCo (ForAllCo cv h g) g2), we want to optimise. h : (t1 ~r t2) ~N (t3 ~r t4) cv : t1 ~r t2 |- g : t1' ~r2 t2' n1 = nth r 2 (downgradeRole r N h) :: t1 ~r t3 n2 = nth r 3 (downgradeRole r N h) :: t2 ~r t4 ------------------------------------------------ ForAllCo cv h g : (all cv:t1 ~r t2. t1') ~r2 (all cv:t3 ~r t4. t2'[cv |-> n1 ; cv ; sym n2]) g1 : (all cv:t1 ~r t2. t1') ~ (all cv: t3 ~r t4. t2') g2 : h1 ~N h2 h1 : t1 ~r t2 h2 : t3 ~r t4 ------------------------------------------------ InstCo g1 g2 : t1'[cv |-> h1] ~ t2'[cv |-> h2] We thus want some coercion proving this: t1'[cv |-> h1] ~ t2'[cv |-> n1 ; h2; sym n2] So we substitute the coercion variable c for the coercion (h1 ~N (n1; h2; sym n2)) in g. Note [The LiftingContext in optCoercion] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To suppport Note [Optimising InstCo] the coercion optimiser carries a GHC.Core.Coercion.LiftingContext, which comprises * An ordinary Subst * The `lc_env`: a mapping from /type variables/ to /coercions/ We don't actually have a separate function liftCoSubstCo :: LiftingContext -> Coercion -> Coercion The substitution of a type variable by a coercion is done by the calls to `liftCoSubst` (on a type) in the Refl and GRefl cases of `opt_co4`. We use the following invariants: (LC1) The coercions in the range of `lc_env` have already had all substitutions applied; they are "OutCoercions". If you re-optimise these coercions, you must zap the LiftingContext first. (LC2) However they have /not/ had the "ambient sym" (the second argument of `opt_co4`) applied. The ambient sym applies to the entire coercion not to the little bits being substituted. -} -- | Coercion optimisation options newtype OptCoercionOpts = OptCoercionOpts { optCoercionEnabled :: Bool -- ^ Enable coercion optimisation (reduce its size) } optCoercion :: OptCoercionOpts -> Subst -> Coercion -> NormalCo -- ^ optCoercion applies a substitution to a coercion, -- *and* optimises it to reduce its size optCoercion opts env co | optCoercionEnabled opts = optCoercion' env co {- = pprTrace "optCoercion {" (text "Co:" <> ppr (coercionSize co)) $ let result = optCoercion' env co in pprTrace "optCoercion }" (vcat [ text "Co:" <+> ppr (coercionSize co) , text "Optco:" <+> ppWhen (isReflCo result) (text "(refl)") <+> ppr (coercionSize result) ]) $ result -} | otherwise = substCo env co optCoercion' :: Subst -> Coercion -> NormalCo optCoercion' env co | debugIsOn = let out_co = opt_co1 lc NotSwapped co (Pair in_ty1 in_ty2, in_role) = coercionKindRole co (Pair out_ty1 out_ty2, out_role) = coercionKindRole out_co details = vcat [ text "in_co:" <+> ppr co , text "in_ty1:" <+> ppr in_ty1 , text "in_ty2:" <+> ppr in_ty2 , text "out_co:" <+> ppr out_co , text "out_ty1:" <+> ppr out_ty1 , text "out_ty2:" <+> ppr out_ty2 , text "in_role:" <+> ppr in_role , text "out_role:" <+> ppr out_role ] in warnPprTrace (not (isReflCo out_co) && isReflexiveCo out_co) "optCoercion: reflexive but not refl" details $ -- assertPpr (substTyUnchecked env in_ty1 `eqType` out_ty1 && -- substTyUnchecked env in_ty2 `eqType` out_ty2 && -- in_role == out_role) -- (hang (text "optCoercion changed types!") 2 details) $ out_co | otherwise = opt_co1 lc NotSwapped co where lc = mkSubstLiftingContext env -- ppr_one cv = ppr cv <+> dcolon <+> ppr (coVarKind cv) type NormalCo = Coercion -- Invariants: -- * The substitution has been fully applied -- * For trans coercions (co1 `trans` co2) -- co1 is not a trans, and neither co1 nor co2 is identity type NormalNonIdCo = NormalCo -- Extra invariant: not the identity -- | Do we force the result to be representational? type ReprFlag = Bool -- | Optimize a coercion, making no assumptions. All coercions in -- the lifting context are already optimized (and sym'd if nec'y) opt_co1 :: LiftingContext -> SwapFlag -- IsSwapped => apply Sym to the result -> Coercion -> NormalCo opt_co1 env sym co = opt_co2 env sym (coercionRole co) co -- See Note [Optimising coercion optimisation] -- | Optimize a coercion, knowing the coercion's role. No other assumptions. opt_co2 :: LiftingContext -> SwapFlag -- ^IsSwapped => apply Sym to the result -> Role -- ^ The role of the input coercion -> Coercion -> NormalCo opt_co2 env sym Phantom co = opt_phantom env sym co opt_co2 env sym r co = opt_co4 env sym False r co -- See Note [Optimising coercion optimisation] -- | Optimize a coercion, knowing the coercion's non-Phantom role, -- and with an optional downgrade opt_co3 :: LiftingContext -> SwapFlag -> Maybe Role -> Role -> Coercion -> NormalCo opt_co3 env sym (Just Phantom) _ co = opt_phantom env sym co opt_co3 env sym (Just Representational) r co = opt_co4 env sym True r co -- if mrole is Just Nominal, that can't be a downgrade, so we can ignore opt_co3 env sym _ r co = opt_co4 env sym False r co -- See Note [Optimising coercion optimisation] -- | Optimize a non-phantom coercion. opt_co4, opt_co4' :: LiftingContext -> SwapFlag -> ReprFlag -> Role -> Coercion -> NormalCo -- Precondition: In every call (opt_co4 lc sym rep role co) -- we should have role = coercionRole co -- Precondition: role is not Phantom -- Postcondition: The resulting coercion is equivalant to -- wrapsub (wrapsym (mksub co) -- where wrapsym is SymCo if sym=True -- wrapsub is SubCo if rep=True -- opt_co4 is there just to support tracing, when debugging -- Usually it just goes straight to opt_co4' opt_co4 = opt_co4' {- opt_co4 env sym rep r co = pprTrace "opt_co4 {" ( vcat [ text "Sym:" <+> ppr sym , text "Rep:" <+> ppr rep , text "Role:" <+> ppr r , text "Co:" <+> ppr co ]) $ assert (r == coercionRole co ) $ let result = opt_co4' env sym rep r co in pprTrace "opt_co4 }" (ppr co $$ text "---" $$ ppr result) $ assertPpr (res_role == coercionRole result) (vcat [ text "Role:" <+> ppr r , text "Result: " <+> ppr result , text "Result type:" <+> ppr (coercionType result) ]) $ result where res_role | rep = Representational | otherwise = r -} opt_co4' env sym rep r (Refl ty) = assertPpr (r == Nominal) (text "Expected role:" <+> ppr r $$ text "Found role:" <+> ppr Nominal $$ text "Type:" <+> ppr ty) $ wrapSym sym $ liftCoSubst (chooseRole rep r) env ty -- wrapSym: see (LC2) of Note [The LiftingContext in optCoercion] opt_co4' env sym rep r (GRefl _r ty MRefl) = assertPpr (r == _r) (text "Expected role:" <+> ppr r $$ text "Found role:" <+> ppr _r $$ text "Type:" <+> ppr ty) $ wrapSym sym $ liftCoSubst (chooseRole rep r) env ty -- wrapSym: see (LC2) of Note [The LiftingContext in optCoercion] opt_co4' env sym rep r (GRefl _r ty (MCo kco)) = assertPpr (r == _r) (text "Expected role:" <+> ppr r $$ text "Found role:" <+> ppr _r $$ text "Type:" <+> ppr ty) $ if isGReflCo kco || isGReflCo kco' then wrapSym sym ty_co else wrapSym sym $ mk_coherence_right_co r' (coercionRKind ty_co) kco' ty_co -- ty :: k1 -- kco :: k1 ~ k2 -- Desired result coercion: ty ~ ty |> co where r' = chooseRole rep r ty_co = liftCoSubst r' env ty kco' = opt_co4 env NotSwapped False Nominal kco opt_co4' env sym rep r (SymCo co) = opt_co4 env (flipSwap sym) rep r co -- surprisingly, we don't have to do anything to the env here. This is -- because any "lifting" substitutions in the env are tied to ForAllCos, -- which treat their left and right sides differently. We don't want to -- exchange them. opt_co4' env sym rep r g@(TyConAppCo _r tc cos) = assert (r == _r) $ case (rep, r) of (True, Nominal) -> mkTyConAppCo Representational tc (zipWith3 (opt_co3 env sym) (map Just (tyConRoleListRepresentational tc)) (repeat Nominal) cos) (False, Nominal) -> mkTyConAppCo Nominal tc (map (opt_co4 env sym False Nominal) cos) (_, Representational) -> -- must use opt_co2 here, because some roles may be P -- See Note [Optimising coercion optimisation] mkTyConAppCo r tc (zipWith (opt_co2 env sym) (tyConRoleListRepresentational tc) -- the current roles cos) (_, Phantom) -> pprPanic "opt_co4 sees a phantom!" (ppr g) opt_co4' env sym rep r (AppCo co1 co2) = mkAppCo (opt_co4 env sym rep r co1) (opt_co4 env sym False Nominal co2) opt_co4' env sym rep r (ForAllCo { fco_tcv = tv, fco_visL = visL, fco_visR = visR , fco_kind = k_co, fco_body = co }) = case optForAllCoBndr env sym tv k_co of (env', tv', k_co') -> mkForAllCo tv' visL' visR' k_co' $ opt_co4 env' sym rep r co -- Use the "mk" functions to check for nested Refls where !(visL', visR') = swapSym sym (visL, visR) opt_co4' env sym rep r (FunCo _r afl afr cow co1 co2) = assert (r == _r) $ mkFunCo2 r' afl' afr' cow' co1' co2' where co1' = opt_co4 env sym rep r co1 co2' = opt_co4 env sym rep r co2 cow' = opt_co1 env sym cow !r' | rep = Representational | otherwise = r !(afl', afr') = swapSym sym (afl, afr) opt_co4' env sym rep r (CoVarCo cv) | Just co <- lcLookupCoVar env cv -- see Note [Forall over coercion] for why -- this is the right thing here = -- pprTrace "CoVarCo" (ppr cv $$ ppr co) $ opt_co4 (zapLiftingContext env) sym rep r co | ty1 `eqType` ty2 -- See Note [Optimise CoVarCo to Refl] = mkReflCo (chooseRole rep r) ty1 | otherwise = assert (isCoVar cv1) $ wrapRole rep r $ wrapSym sym $ CoVarCo cv1 where Pair ty1 ty2 = coVarTypes cv1 cv1 = case lookupInScope (lcInScopeSet env) cv of Just cv1 -> cv1 Nothing -> warnPprTrace True "opt_co: not in scope" (ppr cv $$ ppr env) cv -- cv1 might have a substituted kind! opt_co4' _ _ _ _ (HoleCo h) = pprPanic "opt_univ fell into a hole" (ppr h) opt_co4' env sym rep r (AxiomCo con cos) -- Do *not* push sym inside top-level axioms -- e.g. if g is a top-level axiom -- g a : f a ~ a -- then (sym (g ty)) /= g (sym ty) !! = assert (r == coAxiomRuleRole con ) wrapRole rep (coAxiomRuleRole con) $ wrapSym sym $ -- some sub-cos might be P: use opt_co2 -- See Note [Optimising coercion optimisation] AxiomCo con (zipWith (opt_co2 env NotSwapped) (coAxiomRuleArgRoles con) cos) -- Note that the_co does *not* have sym pushed into it opt_co4' env sym rep r (UnivCo { uco_prov = prov, uco_lty = t1 , uco_rty = t2, uco_deps = deps }) = opt_univ env sym prov deps (chooseRole rep r) t1 t2 opt_co4' env sym rep r (TransCo co1 co2) -- sym (g `o` h) = sym h `o` sym g | isSwapped sym = opt_trans in_scope co2' co1' | otherwise = opt_trans in_scope co1' co2' where co1' = opt_co4 env sym rep r co1 co2' = opt_co4 env sym rep r co2 in_scope = lcInScopeSet env opt_co4' env sym rep r (SelCo cs co) -- Historical note 1: we used to check `co` for Refl, TyConAppCo etc -- before optimising `co`; but actually the SelCo will have been built -- with mkSelCo, so these tests always fail. -- Historical note 2: if rep=True and r=Nominal, we used to recursively -- call opt_co4 to re-optimse the result. But (a) that is inefficient -- and (b) wrapRole uses mkSubCo which does much the same job = wrapRole rep r $ mkSelCo cs $ opt_co1 env sym co opt_co4' env sym rep r (LRCo lr co) | Just pr_co <- splitAppCo_maybe co = assert (r == Nominal ) opt_co4 env sym rep Nominal (pick_lr lr pr_co) | Just pr_co <- splitAppCo_maybe co' = assert (r == Nominal) $ if rep then opt_co4 (zapLiftingContext env) NotSwapped True Nominal (pick_lr lr pr_co) else pick_lr lr pr_co | otherwise = wrapRole rep Nominal $ LRCo lr co' where co' = opt_co4 env sym False Nominal co pick_lr CLeft (l, _) = l pick_lr CRight (_, r) = r {- Note [Forall over coercion] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Example: type (:~:) :: forall k. k -> k -> Type Refl :: forall k (a :: k) (b :: k). forall (cv :: (~#) k k a b). (:~:) k a b k1,k2,k3,k4 :: Type eta :: (k1 ~# k2) ~# (k3 ~# k4) == ((~#) Type Type k1 k2) ~# ((~#) Type Type k3 k4) co1_3 :: k1 ~# k3 co2_4 :: k2 ~# k4 nth 2 eta :: k1 ~# k3 nth 3 eta :: k2 ~# k4 co11_31 :: ~# (sym co1_3) co22_24 :: ~# co2_4 (forall (cv :: eta). Refl co1_3 co2_4 (co11_31 ;; cv ;; co22_24)) :: (forall (cv :: k1 ~# k2). Refl Type k1 k2 ( ;; cv ;; ) ~# (forall (cv :: k3 ~# k4). Refl Type k3 k4 (sym co1_3 ;; nth 2 eta ;; cv ;; sym (nth 3 eta) ;; co2_4)) co1_2 :: k1 ~# k2 co3_4 :: k3 ~# k4 co5 :: co1_2 ~# co3_4 InstCo (forall (cv :: eta). Refl co1_3 co2_4 (co11_31 ;; cv ;; co22_24)) co5 :: (Refl Type k1 k2 ( ;; cv ;; ))[cv |-> co1_2] ~# (Refl Type k3 k4 (sym co1_3 ;; nth 2 eta ;; cv ;; sym (nth 3 eta) ;; co2_4))[cv |-> co3_4] == (Refl Type k1 k2 ( ;; co1_2 ;; )) ~# (Refl Type k3 k4 (sym co1_3 ;; nth 2 eta ;; co3_4 ;; sym (nth 3 eta) ;; co2_4)) ==> Refl co1_3 co2_4 (co11_31 ;; co1_2 ;; co22_24) Conclusion: Because of the way this all works, we want to put in the *left-hand* coercion in co5's type. (In the code, co5 is called `arg`.) So we extend the environment binding cv to arg's left-hand type. -} -- See Note [Optimising InstCo] opt_co4' env sym rep r (InstCo fun_co arg_co) -- forall over type... | Just (tv, _visL, _visR, k_co, body_co) <- splitForAllCo_ty_maybe fun_co -- tv :: k1 -- k_co :: k1 ~ k2 -- body_co :: t1 ~ t2 -- arg_co :: (s1:k1) ~ (s2:k2) , let arg_co' = opt_co4 env NotSwapped False Nominal arg_co -- Do /not/ push Sym into the arg_co, hence sym=False -- see (LC2) of Note [The LiftingContext in optCoercion] k_co' = opt_co4 env NotSwapped False Nominal k_co s2' = coercionRKind arg_co' tv_co = mk_coherence_right_co Nominal s2' (mkSymCo k_co') arg_co' -- mkSymCo kind_co :: k2 ~ k1 -- tv_co :: (s1 :: k1) ~ (((s2 :: k2) |> (sym kind_co)) :: k1) = opt_co4 (extendLiftingContext env tv tv_co) sym rep r body_co -- See Note [Forall over coercion] | Just (cv, _visL, _visR, _kind_co, body_co) <- splitForAllCo_co_maybe fun_co , CoercionTy h1 <- coercionLKind arg_co , let h1' = opt_co4 env NotSwapped False Nominal h1 = opt_co4 (extendLiftingContextCvSubst env cv h1') sym rep r body_co -- OK so those cases didn't work. See if it is a forall /after/ optimization -- If so, do an inefficient one-variable substitution, then re-optimize -- forall over type... | Just (tv', _visL, _visR, k_co', body_co') <- splitForAllCo_ty_maybe fun_co' , let s2' = coercionRKind arg_co' tv_co = mk_coherence_right_co Nominal s2' (mkSymCo k_co') arg_co' env' = extendLiftingContext (zapLiftingContext env) tv' tv_co = opt_co4 env' NotSwapped False r' body_co' -- See Note [Forall over coercion] | Just (cv', _visL, _visR, _kind_co', body_co') <- splitForAllCo_co_maybe fun_co' , CoercionTy h1' <- coercionLKind arg_co' , let env' = extendLiftingContextCvSubst (zapLiftingContext env) cv' h1' = opt_co4 env' NotSwapped False r' body_co' -- Those cases didn't work either, so rebuild the InstCo -- Push Sym into /both/ function /and/ arg_coument | otherwise = InstCo fun_co' arg_co' where -- fun_co' arg_co' are both optimised, /and/ we have pushed `sym` into both -- So no more sym'ing on th results of fun_co' arg_co' fun_co' = opt_co4 env sym rep r fun_co arg_co' = opt_co4 env sym False Nominal arg_co r' = chooseRole rep r opt_co4' env sym _rep r (KindCo co) = assert (r == Nominal) $ let kco' = promoteCoercion co in case kco' of KindCo co' -> promoteCoercion (opt_co1 env sym co') _ -> opt_co4 env sym False Nominal kco' -- This might be able to be optimized more to do the promotion -- and substitution/optimization at the same time opt_co4' env sym _ r (SubCo co) = assert (r == Representational) $ opt_co4 env sym True Nominal co {- Note [Optimise CoVarCo to Refl] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If we have (c :: t~t) we can optimise it to Refl. That increases the chances of floating the Refl upwards; e.g. Maybe c --> Refl (Maybe t) We do so here in optCoercion, not in mkCoVarCo; see Note [mkCoVarCo] in GHC.Core.Coercion. -} ------------- -- | Optimize a phantom coercion. The input coercion may not necessarily -- be a phantom, but the output sure will be. opt_phantom :: LiftingContext -> SwapFlag -> Coercion -> NormalCo opt_phantom env sym (UnivCo { uco_prov = prov, uco_lty = t1 , uco_rty = t2, uco_deps = deps }) = opt_univ env sym prov deps Phantom t1 t2 opt_phantom env sym co = opt_univ env sym PhantomProv [mkKindCo co] Phantom ty1 ty2 where Pair ty1 ty2 = coercionKind co {- Note [Differing kinds] ~~~~~~~~~~~~~~~~~~~~~~ The two types may not have the same kind (although that would be very unusual). But even if they have the same kind, and the same type constructor, the number of arguments in a `CoTyConApp` can differ. Consider Any :: forall k. k Any @Type Int :: Type Any @(Type->Type) Maybe Int :: Type Hence the need to compare argument lengths; see #13658 Note [opt_univ needs injectivity] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If opt_univ sees a coercion between `T a1 a2` and `T b1 b2` it will optimize it by producing a TyConAppCo for T, and pushing the UnivCo into the arguments. But this works only if T is injective. Otherwise we can have something like type family F x where F Int = Int F Bool = Int where `UnivCo :: F Int ~ F Bool` is reasonable (it is effectively just an alternative representation for a couple of uses of AxiomInstCos) but we do not want to produce `F (UnivCo :: Int ~ Bool)` where the inner coercion is clearly inconsistent. Hence the opt_univ case for TyConApps checks isInjectiveTyCon. See #19509. -} opt_univ :: LiftingContext -> SwapFlag -> UnivCoProvenance -> [Coercion] -> Role -> Type -> Type -> Coercion opt_univ env sym prov deps role ty1 ty2 = let ty1' = substTyUnchecked (lcSubstLeft env) ty1 ty2' = substTyUnchecked (lcSubstRight env) ty2 deps' = map (opt_co1 env sym) deps (ty1'', ty2'') = swapSym sym (ty1', ty2') in mkUnivCo prov deps' role ty1'' ty2'' {- opt_univ env PhantomProv cvs _r ty1 ty2 = mkUnivCo PhantomProv cvs Phantom ty1' ty2' where ty1' = substTy (lcSubstLeft env) ty1 ty2' = substTy (lcSubstRight env) ty2 opt_univ1 env prov cvs' role oty1 oty2 | Just (tc1, tys1) <- splitTyConApp_maybe oty1 , Just (tc2, tys2) <- splitTyConApp_maybe oty2 , tc1 == tc2 , isInjectiveTyCon tc1 role -- see Note [opt_univ needs injectivity] , equalLength tys1 tys2 -- see Note [Differing kinds] -- NB: prov must not be the two interesting ones (ProofIrrel & Phantom); -- Phantom is already taken care of, and ProofIrrel doesn't relate tyconapps = let roles = tyConRoleListX role tc1 arg_cos = zipWith3 (mkUnivCo prov cvs') roles tys1 tys2 arg_cos' = zipWith (opt_co4 env False False) roles arg_cos in mkTyConAppCo role tc1 arg_cos' -- can't optimize the AppTy case because we can't build the kind coercions. | Just (Bndr tv1 vis1, ty1) <- splitForAllForAllTyBinder_maybe oty1 , isTyVar tv1 , Just (Bndr tv2 vis2, ty2) <- splitForAllForAllTyBinder_maybe oty2 , isTyVar tv2 -- NB: prov isn't interesting here either = let k1 = tyVarKind tv1 k2 = tyVarKind tv2 eta = mkUnivCo prov cvs' Nominal k1 k2 -- eta gets opt'ed soon, but not yet. ty2' = substTyWith [tv2] [TyVarTy tv1 `mkCastTy` eta] ty2 (env', tv1', eta') = optForAllCoBndr env False tv1 eta in mkForAllCo tv1' vis1 vis2 eta' (opt_univ1 env' prov cvs' role ty1 ty2') | Just (Bndr cv1 vis1, ty1) <- splitForAllForAllTyBinder_maybe oty1 , isCoVar cv1 , Just (Bndr cv2 vis2, ty2) <- splitForAllForAllTyBinder_maybe oty2 , isCoVar cv2 -- NB: prov isn't interesting here either = let k1 = varType cv1 k2 = varType cv2 r' = coVarRole cv1 eta = mkUnivCo prov cvs' Nominal k1 k2 eta_d = downgradeRole r' Nominal eta -- eta gets opt'ed soon, but not yet. n_co = (mkSymCo $ mkSelCo (SelTyCon 2 r') eta_d) `mkTransCo` (mkCoVarCo cv1) `mkTransCo` (mkSelCo (SelTyCon 3 r') eta_d) ty2' = substTyWithCoVars [cv2] [n_co] ty2 (env', cv1', eta') = optForAllCoBndr env False cv1 eta in mkForAllCo cv1' vis1 vis2 eta' (opt_univ1 env' prov cvs' role ty1 ty2') | otherwise = let ty1 = substTyUnchecked (lcSubstLeft env) oty1 ty2 = substTyUnchecked (lcSubstRight env) oty2 in mkUnivCo prov cvs' role ty1 ty2 -} ------------- opt_transList :: HasDebugCallStack => InScopeSet -> [NormalCo] -> [NormalCo] -> [NormalCo] opt_transList is = zipWithEqual "opt_transList" (opt_trans is) -- The input lists must have identical length. opt_trans :: HasDebugCallStack => InScopeSet -> NormalCo -> NormalCo -> NormalCo -- opt_trans just allows us to add some debug tracing -- Usually it just goes to opt_trans' opt_trans is co1 co2 = -- (if coercionRKind co1 `eqType` coercionLKind co2 -- then (\x -> x) else -- pprTrace "opt_trans" (vcat [ text "co1" <+> ppr co1 -- , text "co2" <+> ppr co2 -- , text "co1 kind" <+> ppr (coercionKind co1) -- , text "co2 kind" <+> ppr (coercionKind co2) -- , callStackDoc ])) $ opt_trans' is co1 co2 {- opt_trans is co1 co2 = assertPpr (r1==r2) (vcat [ ppr r1 <+> ppr co1, ppr r2 <+> ppr co2]) $ assertPpr (rres == r1) (vcat [ ppr r1 <+> ppr co1, ppr r2 <+> ppr co2, text "res" <+> ppr rres <+> ppr res ]) $ res where res = opt_trans' is co1 co2 rres = coercionRole res r1 = coercionRole co1 r2 = coercionRole co1 -} opt_trans' :: HasDebugCallStack => InScopeSet -> NormalCo -> NormalCo -> NormalCo opt_trans' is co1 co2 | isReflCo co1 = co2 -- optimize when co1 is a Refl Co | otherwise = opt_trans1 is co1 co2 opt_trans1 :: HasDebugCallStack => InScopeSet -> NormalNonIdCo -> NormalCo -> NormalCo -- First arg is not the identity opt_trans1 is co1 co2 | isReflCo co2 = co1 -- optimize when co2 is a Refl Co | otherwise = opt_trans2 is co1 co2 opt_trans2 :: HasDebugCallStack => InScopeSet -> NormalNonIdCo -> NormalNonIdCo -> NormalCo -- Neither arg is the identity opt_trans2 is (TransCo co1a co1b) co2 -- Don't know whether the sub-coercions are the identity = opt_trans is co1a (opt_trans is co1b co2) opt_trans2 is co1 co2 | Just co <- opt_trans_rule is co1 co2 = co opt_trans2 is co1 (TransCo co2a co2b) | Just co1_2a <- opt_trans_rule is co1 co2a = if isReflCo co1_2a then co2b else opt_trans1 is co1_2a co2b opt_trans2 _ co1 co2 = mk_trans_co co1 co2 ------ -- Optimize coercions with a top-level use of transitivity. opt_trans_rule :: HasDebugCallStack => InScopeSet -> NormalNonIdCo -> NormalNonIdCo -> Maybe NormalCo opt_trans_rule _ in_co1 in_co2 | assertPpr (coercionRKind in_co1 `eqType` coercionLKind in_co2) (vcat [ text "in_co1" <+> ppr in_co1 , text "in_co2" <+> ppr in_co2 , text "in_co1 kind" <+> ppr (coercionKind in_co1) , text "in_co2 kind" <+> ppr (coercionKind in_co2) , callStackDoc ]) $ False = panic "opt_trans_rule" -- This entire equation is purely assertion checking opt_trans_rule is in_co1@(GRefl r1 t1 (MCo co1)) in_co2@(GRefl r2 _t2 (MCo co2)) = assert (r1 == r2) $ fireTransRule "GRefl" in_co1 in_co2 $ mk_grefl_right_co r1 t1 (opt_trans is co1 co2) -- Push transitivity through matching destructors opt_trans_rule is in_co1@(SelCo d1 co1) in_co2@(SelCo d2 co2) | d1 == d2 , coercionRole co1 == coercionRole co2 , co1 `compatible_co` co2 = fireTransRule "PushNth" in_co1 in_co2 $ mkSelCo d1 (opt_trans is co1 co2) opt_trans_rule is in_co1@(LRCo d1 co1) in_co2@(LRCo d2 co2) | d1 == d2 , co1 `compatible_co` co2 = fireTransRule "PushLR" in_co1 in_co2 $ mkLRCo d1 (opt_trans is co1 co2) -- Push transitivity inside instantiation opt_trans_rule is in_co1@(InstCo co1 ty1) in_co2@(InstCo co2 ty2) | ty1 `eqCoercion` ty2 , co1 `compatible_co` co2 = fireTransRule "TrPushInst" in_co1 in_co2 $ mkInstCo (opt_trans is co1 co2) ty1 opt_trans_rule _ in_co1@(UnivCo { uco_prov = p1, uco_role = r1, uco_lty = tyl1, uco_deps = deps1 }) in_co2@(UnivCo { uco_prov = p2, uco_role = r2, uco_rty = tyr2, uco_deps = deps2 }) | p1 == p2 -- If the provenances are different, opt'ing will be very confusing = assert (r1 == r2) $ fireTransRule "UnivCo" in_co1 in_co2 $ mkUnivCo p1 (deps1 ++ deps2) r1 tyl1 tyr2 -- Push transitivity down through matching top-level constructors. opt_trans_rule is in_co1@(TyConAppCo r1 tc1 cos1) in_co2@(TyConAppCo r2 tc2 cos2) | tc1 == tc2 = assert (r1 == r2) $ fireTransRule "PushTyConApp" in_co1 in_co2 $ mkTyConAppCo r1 tc1 (opt_transList is cos1 cos2) opt_trans_rule is in_co1@(FunCo r1 afl1 afr1 w1 co1a co1b) in_co2@(FunCo r2 afl2 afr2 w2 co2a co2b) = assert (r1 == r2) $ -- Just like the TyConAppCo/TyConAppCo case assert (afr1 == afl2) $ fireTransRule "PushFun" in_co1 in_co2 $ mkFunCo2 r1 afl1 afr2 (opt_trans is w1 w2) (opt_trans is co1a co2a) (opt_trans is co1b co2b) opt_trans_rule is in_co1@(AppCo co1a co1b) in_co2@(AppCo co2a co2b) -- Must call opt_trans_rule_app; see Note [EtaAppCo] = opt_trans_rule_app is in_co1 in_co2 co1a [co1b] co2a [co2b] -- Eta rules opt_trans_rule is co1@(TyConAppCo r tc cos1) co2 | Just cos2 <- etaTyConAppCo_maybe tc co2 = fireTransRule "EtaCompL" co1 co2 $ mkTyConAppCo r tc (opt_transList is cos1 cos2) opt_trans_rule is co1 co2@(TyConAppCo r tc cos2) | Just cos1 <- etaTyConAppCo_maybe tc co1 = fireTransRule "EtaCompR" co1 co2 $ mkTyConAppCo r tc (opt_transList is cos1 cos2) opt_trans_rule is co1@(AppCo co1a co1b) co2 | Just (co2a,co2b) <- etaAppCo_maybe co2 = opt_trans_rule_app is co1 co2 co1a [co1b] co2a [co2b] opt_trans_rule is co1 co2@(AppCo co2a co2b) | Just (co1a,co1b) <- etaAppCo_maybe co1 = opt_trans_rule_app is co1 co2 co1a [co1b] co2a [co2b] -- Push transitivity inside forall -- forall over types. opt_trans_rule is co1 co2 | Just (tv1, visL1, _visR1, eta1, r1) <- splitForAllCo_ty_maybe co1 , Just (tv2, _visL2, visR2, eta2, r2) <- etaForAllCo_ty_maybe co2 = push_trans tv1 eta1 r1 tv2 eta2 r2 visL1 visR2 | Just (tv2, _visL2, visR2, eta2, r2) <- splitForAllCo_ty_maybe co2 , Just (tv1, visL1, _visR1, eta1, r1) <- etaForAllCo_ty_maybe co1 = push_trans tv1 eta1 r1 tv2 eta2 r2 visL1 visR2 where push_trans tv1 eta1 r1 tv2 eta2 r2 visL visR -- Given: -- co1 = /\ tv1 : eta1 . r1 -- co2 = /\ tv2 : eta2 . r2 -- Wanted: -- /\tv1 : (eta1;eta2) . (r1; r2[tv2 |-> tv1 |> eta1]) = fireTransRule "EtaAllTy_ty" co1 co2 $ mkForAllCo tv1 visL visR (opt_trans is eta1 eta2) (opt_trans is' r1 r2') where is' = is `extendInScopeSet` tv1 r2' = substCoWithUnchecked [tv2] [mkCastTy (TyVarTy tv1) eta1] r2 -- Push transitivity inside forall -- forall over coercions. opt_trans_rule is co1 co2 | Just (cv1, visL1, _visR1, eta1, r1) <- splitForAllCo_co_maybe co1 , Just (cv2, _visL2, visR2, eta2, r2) <- etaForAllCo_co_maybe co2 = push_trans cv1 eta1 r1 cv2 eta2 r2 visL1 visR2 | Just (cv2, _visL2, visR2, eta2, r2) <- splitForAllCo_co_maybe co2 , Just (cv1, visL1, _visR1, eta1, r1) <- etaForAllCo_co_maybe co1 = push_trans cv1 eta1 r1 cv2 eta2 r2 visL1 visR2 where push_trans cv1 eta1 r1 cv2 eta2 r2 visL visR -- Given: -- co1 = /\ (cv1 : eta1) . r1 -- co2 = /\ (cv2 : eta2) . r2 -- Wanted: -- n1 = nth 2 eta1 -- n2 = nth 3 eta1 -- nco = /\ cv1 : (eta1;eta2). (r1; r2[cv2 |-> (sym n1);cv1;n2]) = fireTransRule "EtaAllTy_co" co1 co2 $ mkForAllCo cv1 visL visR (opt_trans is eta1 eta2) (opt_trans is' r1 r2') where is' = is `extendInScopeSet` cv1 role = coVarRole cv1 eta1' = downgradeRole role Nominal eta1 n1 = mkSelCo (SelTyCon 2 role) eta1' n2 = mkSelCo (SelTyCon 3 role) eta1' r2' = substCo (zipCvSubst [cv2] [(mkSymCo n1) `mk_trans_co` (mkCoVarCo cv1) `mk_trans_co` n2]) r2 -- Push transitivity inside axioms opt_trans_rule is co1 co2 -- TrPushAxSym/TrPushSymAx -- Put this first! Otherwise (#23619) we get -- newtype N a = MkN a -- axN :: forall a. N a ~ a -- Now consider (axN ty ; sym (axN ty)) -- If we put TrPushSymAxR first, we'll get -- (axN ty ; sym (axN ty)) :: N ty ~ N ty -- Obviously Refl -- --> axN (sym (axN ty)) :: N ty ~ N ty -- Very stupid | Just (sym1, axr1, cos1) <- isAxiomCo_maybe co1 , Just (sym2, axr2, cos2) <- isAxiomCo_maybe co2 , axr1 == axr2 , sym1 == flipSwap sym2 , Just (tc, role, branch) <- coAxiomRuleBranch_maybe axr1 , let qtvs = coAxBranchTyVars branch ++ coAxBranchCoVars branch lhs = mkTyConApp tc (coAxBranchLHS branch) rhs = coAxBranchRHS branch pivot_tvs = exactTyCoVarsOfType (pickSwap sym2 lhs rhs) , all (`elemVarSet` pivot_tvs) qtvs = fireTransRule "TrPushAxSym" co1 co2 $ if isSwapped sym2 -- TrPushAxSym then liftCoSubstWith role qtvs (opt_transList is cos1 (map mkSymCo cos2)) lhs -- TrPushSymAx else liftCoSubstWith role qtvs (opt_transList is (map mkSymCo cos1) cos2) rhs -- See Note [Push transitivity inside axioms] and -- Note [Push transitivity inside newtype axioms only] -- TrPushSymAxR | Just (sym, axr, cos1) <- isAxiomCo_maybe co1 , isSwapped sym , Just cos2 <- matchNewtypeBranch sym axr co2 , let newAxInst = AxiomCo axr (opt_transList is (map mkSymCo cos2) cos1) = fireTransRule "TrPushSymAxR" co1 co2 $ SymCo newAxInst -- TrPushAxR | Just (sym, axr, cos1) <- isAxiomCo_maybe co1 , notSwapped sym , Just cos2 <- matchNewtypeBranch sym axr co2 , let newAxInst = AxiomCo axr (opt_transList is cos1 cos2) = fireTransRule "TrPushAxR" co1 co2 newAxInst -- TrPushSymAxL | Just (sym, axr, cos2) <- isAxiomCo_maybe co2 , isSwapped sym , Just cos1 <- matchNewtypeBranch (flipSwap sym) axr co1 , let newAxInst = AxiomCo axr (opt_transList is cos2 (map mkSymCo cos1)) = fireTransRule "TrPushSymAxL" co1 co2 $ SymCo newAxInst -- TrPushAxL | Just (sym, axr, cos2) <- isAxiomCo_maybe co2 , notSwapped sym , Just cos1 <- matchNewtypeBranch (flipSwap sym) axr co1 , let newAxInst = AxiomCo axr (opt_transList is cos1 cos2) = fireTransRule "TrPushAxL" co1 co2 newAxInst opt_trans_rule _ co1 co2 -- Identity rule | let ty1 = coercionLKind co1 r = coercionRole co1 ty2 = coercionRKind co2 , ty1 `eqType` ty2 = fireTransRule "RedTypeDirRefl" co1 co2 $ mkReflCo r ty2 opt_trans_rule _ _ _ = Nothing -- See Note [EtaAppCo] opt_trans_rule_app :: InScopeSet -> Coercion -- original left-hand coercion (printing only) -> Coercion -- original right-hand coercion (printing only) -> Coercion -- left-hand coercion "function" -> [Coercion] -- left-hand coercion "args" -> Coercion -- right-hand coercion "function" -> [Coercion] -- right-hand coercion "args" -> Maybe Coercion opt_trans_rule_app is orig_co1 orig_co2 co1a co1bs co2a co2bs | AppCo co1aa co1ab <- co1a , Just (co2aa, co2ab) <- etaAppCo_maybe co2a = opt_trans_rule_app is orig_co1 orig_co2 co1aa (co1ab:co1bs) co2aa (co2ab:co2bs) | AppCo co2aa co2ab <- co2a , Just (co1aa, co1ab) <- etaAppCo_maybe co1a = opt_trans_rule_app is orig_co1 orig_co2 co1aa (co1ab:co1bs) co2aa (co2ab:co2bs) | otherwise = assert (co1bs `equalLength` co2bs) $ fireTransRule ("EtaApps:" ++ show (length co1bs)) orig_co1 orig_co2 $ let rt1a = coercionRKind co1a lt2a = coercionLKind co2a rt2a = coercionRole co2a rt1bs = map coercionRKind co1bs lt2bs = map coercionLKind co2bs rt2bs = map coercionRole co2bs kcoa = mkKindCo $ buildCoercion lt2a rt1a kcobs = map mkKindCo $ zipWith buildCoercion lt2bs rt1bs co2a' = mkCoherenceLeftCo rt2a lt2a kcoa co2a co2bs' = zipWith3 mkGReflLeftCo rt2bs lt2bs kcobs co2bs'' = zipWith mk_trans_co co2bs' co2bs in mkAppCos (opt_trans is co1a co2a') (zipWith (opt_trans is) co1bs co2bs'') fireTransRule :: String -> Coercion -> Coercion -> Coercion -> Maybe Coercion fireTransRule _rule _co1 _co2 res = Just res {- Note [Push transitivity inside axioms] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ opt_trans_rule tries to push transitivity inside axioms to deal with cases like the following: newtype N a = MkN a axN :: N a ~R# a covar :: a ~R# b co1 = axN :: N a ~R# a co2 = axN :: N b ~R# b co :: a ~R# b co = sym co1 ; N covar ; co2 When we are optimising co, we want to notice that the two axiom instantiations cancel out. This is implemented by rules such as TrPushSymAxR, which transforms sym (axN ) ; N covar into sym (axN covar) so that TrPushSymAx can subsequently transform sym (axN covar) ; axN into covar which is much more compact. In some perf test cases this kind of pattern can be generated repeatedly during simplification, so it is very important we squash it to stop coercions growing exponentially. For more details see the paper: Evidence normalisation in System FC Dimitrios Vytiniotis and Simon Peyton Jones RTA'13, 2013 https://www.microsoft.com/en-us/research/publication/evidence-normalization-system-fc-2/ Note [Push transitivity inside newtype axioms only] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The optimization described in Note [Push transitivity inside axioms] is possible for both newtype and type family axioms. However, for type family axioms it is relatively common to have transitive sequences of axioms instantiations, for example: data Nat = Zero | Suc Nat type family Index (n :: Nat) (xs :: [Type]) :: Type where Index Zero (x : xs) = x Index (Suc n) (x : xs) = Index n xs axIndex :: { forall x::Type. forall xs::[Type]. Index Zero (x : xs) ~ x ; forall n::Nat. forall x::Type. forall xs::[Type]. Index (Suc n) (x : xs) ~ Index n xs } co :: Index (Suc (Suc Zero)) [a, b, c] ~ c co = axIndex[1] <[b, c]> ; axIndex[1] <[c]> ; axIndex[0] <[]> Not only are there no cancellation opportunities here, but calling matchAxiom repeatedly down the transitive chain is very expensive. Hence we do not attempt to push transitivity inside type family axioms. See #8095, !9210 and related tickets. This is implemented by opt_trans_rule checking that the axiom is for a newtype constructor (i.e. not a type family). Adding these guards substantially improved performance (reduced bytes allocated by more than 10%) for the tests CoOpt_Singletons, LargeRecord, T12227, T12545, T13386, T15703, T5030, T8095. A side benefit is that we do not risk accidentally creating an ill-typed coercion; see Note [Why call checkAxInstCo during optimisation]. There may exist programs that previously relied on pushing transitivity inside type family axioms to avoid creating huge coercions, which will regress in compile time performance as a result of this change. We do not currently know of any examples, but if any come to light we may need to reconsider this behaviour. Note [Why call checkAxInstCo during optimisation] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ NB: The following is no longer relevant, because we no longer push transitivity into type family axioms (Note [Push transitivity inside newtype axioms only]). It is retained for reference in case we change this behaviour in the future. It is possible that otherwise-good-looking optimisations meet with disaster in the presence of axioms with multiple equations. Consider type family Equal (a :: *) (b :: *) :: Bool where Equal a a = True Equal a b = False type family Id (a :: *) :: * where Id a = a axEq :: { [a::*]. Equal a a ~ True ; [a::*, b::*]. Equal a b ~ False } axId :: [a::*]. Id a ~ a co1 = Equal (axId[0] Int) (axId[0] Bool) :: Equal (Id Int) (Id Bool) ~ Equal Int Bool co2 = axEq[1] :: Equal Int Bool ~ False We wish to optimise (co1 ; co2). We end up in rule TrPushAxL, noting that co2 is an axiom and that matchAxiom succeeds when looking at co1. But, what happens when we push the coercions inside? We get co3 = axEq[1] (axId[0] Int) (axId[0] Bool) :: Equal (Id Int) (Id Bool) ~ False which is bogus! This is because the type system isn't smart enough to know that (Id Int) and (Id Bool) are Surely Apart, as they're headed by type families. At the time of writing, I (Richard Eisenberg) couldn't think of a way of detecting this any more efficient than just building the optimised coercion and checking. Note [EtaAppCo] ~~~~~~~~~~~~~~~ Suppose we're trying to optimize (co1a co1b ; co2a co2b). Ideally, we'd like to rewrite this to (co1a ; co2a) (co1b ; co2b). The problem is that the resultant coercions might not be well kinded. Here is an example (things labeled with x don't matter in this example): k1 :: Type k2 :: Type a :: k1 -> Type b :: k1 h :: k1 ~ k2 co1a :: x1 ~ (a |> (h -> ) co1b :: x2 ~ (b |> h) co2a :: a ~ x3 co2b :: b ~ x4 First, convince yourself of the following: co1a co1b :: x1 x2 ~ (a |> (h -> )) (b |> h) co2a co2b :: a b ~ x3 x4 (a |> (h -> )) (b |> h) `eqType` a b That last fact is due to Note [Non-trivial definitional equality] in GHC.Core.TyCo.Rep, where we ignore coercions in types as long as two types' kinds are the same. In our case, we meet this last condition, because (a |> (h -> )) (b |> h) :: Type and a b :: Type So the input coercion (co1a co1b ; co2a co2b) is well-formed. But the suggested output coercions (co1a ; co2a) and (co1b ; co2b) are not -- the kinds don't match up. The solution here is to twiddle the kinds in the output coercions. First, we need to find coercions ak :: kind(a |> (h -> )) ~ kind(a) bk :: kind(b |> h) ~ kind(b) This can be done with mkKindCo and buildCoercion. The latter assumes two types are identical modulo casts and builds a coercion between them. Then, we build (co1a ; co2a |> sym ak) and (co1b ; co2b |> sym bk) as the output coercions. These are well-kinded. Also, note that all of this is done after accumulated any nested AppCo parameters. This step is to avoid quadratic behavior in calling coercionKind. The problem described here was first found in dependent/should_compile/dynamic-paper. -} ----------- swapSym :: SwapFlag -> (a,a) -> (a,a) swapSym IsSwapped (x,y) = (y,x) swapSym NotSwapped (x,y) = (x,y) wrapSym :: SwapFlag -> Coercion -> Coercion wrapSym IsSwapped co = mkSymCo co wrapSym NotSwapped co = co -- | Conditionally set a role to be representational wrapRole :: ReprFlag -> Role -- ^ current role -> Coercion -> Coercion wrapRole False _ = id wrapRole True current = downgradeRole Representational current -- | If we require a representational role, return that. Otherwise, -- return the "default" role provided. chooseRole :: ReprFlag -> Role -- ^ "default" role -> Role chooseRole True _ = Representational chooseRole _ r = r ----------- isAxiomCo_maybe :: Coercion -> Maybe (SwapFlag, CoAxiomRule, [Coercion]) -- We don't expect to see nested SymCo; and that lets us write a simple, -- non-recursive function. (If we see a nested SymCo we'll just fail, -- which is ok.) isAxiomCo_maybe (SymCo (AxiomCo ax cos)) = Just (IsSwapped, ax, cos) isAxiomCo_maybe (AxiomCo ax cos) = Just (NotSwapped, ax, cos) isAxiomCo_maybe _ = Nothing matchNewtypeBranch :: SwapFlag -- IsSwapped = match LHS, NotSwapped = match RHS -> CoAxiomRule -> Coercion -> Maybe [Coercion] matchNewtypeBranch sym axr co | Just (tc,branch) <- isNewtypeAxiomRule_maybe axr , CoAxBranch { cab_tvs = qtvs , cab_cvs = [] -- can't infer these, so fail if there are any , cab_roles = roles , cab_lhs = lhs , cab_rhs = rhs } <- branch , Just subst <- liftCoMatch (mkVarSet qtvs) (pickSwap sym rhs (mkTyConApp tc lhs)) co , all (`isMappedByLC` subst) qtvs = zipWithM (liftCoSubstTyVar subst) roles qtvs | otherwise = Nothing ------------- compatible_co :: Coercion -> Coercion -> Bool -- Check whether (co1 . co2) will be well-kinded compatible_co co1 co2 = x1 `eqType` x2 where x1 = coercionRKind co1 x2 = coercionLKind co2 ------------- {- etaForAllCo ~~~~~~~~~~~~~~~~~ (1) etaForAllCo_ty_maybe Suppose we have g : all a1:k1.t1 ~ all a2:k2.t2 but g is *not* a ForAllCo. We want to eta-expand it. So, we do this: g' = all a1:(ForAllKindCo g).(InstCo g (a1 ~ a1 |> ForAllKindCo g)) Call the kind coercion h1 and the body coercion h2. We can see that h2 : t1 ~ t2[a2 |-> (a1 |> h1)] According to the typing rule for ForAllCo, we get that g' : all a1:k1.t1 ~ all a1:k2.(t2[a2 |-> (a1 |> h1)][a1 |-> a1 |> sym h1]) or g' : all a1:k1.t1 ~ all a1:k2.(t2[a2 |-> a1]) as desired. (2) etaForAllCo_co_maybe Suppose we have g : all c1:(s1~s2). t1 ~ all c2:(s3~s4). t2 Similarly, we do this g' = all c1:h1. h2 : all c1:(s1~s2). t1 ~ all c1:(s3~s4). t2[c2 |-> (sym eta1;c1;eta2)] [c1 |-> eta1;c1;sym eta2] Here, h1 = mkSelCo Nominal 0 g :: (s1~s2)~(s3~s4) eta1 = mkSelCo (SelTyCon 2 r) h1 :: (s1 ~ s3) eta2 = mkSelCo (SelTyCon 3 r) h1 :: (s2 ~ s4) h2 = mkInstCo g (cv1 ~ (sym eta1;c1;eta2)) -} etaForAllCo_ty_maybe :: Coercion -> Maybe (TyVar, ForAllTyFlag, ForAllTyFlag, Coercion, Coercion) -- Try to make the coercion be of form (forall tv:kind_co. co) etaForAllCo_ty_maybe co | Just (tv, visL, visR, kind_co, r) <- splitForAllCo_ty_maybe co = Just (tv, visL, visR, kind_co, r) | (Pair ty1 ty2, role) <- coercionKindRole co , Just (Bndr tv1 vis1, _) <- splitForAllForAllTyBinder_maybe ty1 , isTyVar tv1 , Just (Bndr tv2 vis2, _) <- splitForAllForAllTyBinder_maybe ty2 , isTyVar tv2 -- can't eta-expand at nominal role unless visibilities match , (role /= Nominal) || (vis1 `eqForAllVis` vis2) , let kind_co = mkSelCo SelForAll co = Just ( tv1, vis1, vis2, kind_co , mkInstCo co (mk_grefl_right_co Nominal (TyVarTy tv1) kind_co)) | otherwise = Nothing etaForAllCo_co_maybe :: Coercion -> Maybe (CoVar, ForAllTyFlag, ForAllTyFlag, Coercion, Coercion) -- Try to make the coercion be of form (forall cv:kind_co. co) etaForAllCo_co_maybe co | Just (cv, visL, visR, kind_co, r) <- splitForAllCo_co_maybe co = Just (cv, visL, visR, kind_co, r) | (Pair ty1 ty2, role) <- coercionKindRole co , Just (Bndr cv1 vis1, _) <- splitForAllForAllTyBinder_maybe ty1 , isCoVar cv1 , Just (Bndr cv2 vis2, _) <- splitForAllForAllTyBinder_maybe ty2 , isCoVar cv2 -- can't eta-expand at nominal role unless visibilities match , (role /= Nominal) = let kind_co = mkSelCo SelForAll co r = coVarRole cv1 l_co = mkCoVarCo cv1 kind_co' = downgradeRole r Nominal kind_co r_co = mkSymCo (mkSelCo (SelTyCon 2 r) kind_co') `mk_trans_co` l_co `mk_trans_co` mkSelCo (SelTyCon 3 r) kind_co' in Just ( cv1, vis1, vis2, kind_co , mkInstCo co (mkProofIrrelCo Nominal kind_co l_co r_co)) | otherwise = Nothing etaAppCo_maybe :: Coercion -> Maybe (Coercion,Coercion) -- If possible, split a coercion -- g :: t1a t1b ~ t2a t2b -- into a pair of coercions (left g, right g) etaAppCo_maybe co | Just (co1,co2) <- splitAppCo_maybe co = Just (co1,co2) | (Pair ty1 ty2, Nominal) <- coercionKindRole co , Just (_,t1) <- splitAppTy_maybe ty1 , Just (_,t2) <- splitAppTy_maybe ty2 , let isco1 = isCoercionTy t1 , let isco2 = isCoercionTy t2 , isco1 == isco2 = Just (LRCo CLeft co, LRCo CRight co) | otherwise = Nothing etaTyConAppCo_maybe :: TyCon -> Coercion -> Maybe [Coercion] -- If possible, split a coercion -- g :: T s1 .. sn ~ T t1 .. tn -- into [ SelCo (SelTyCon 0) g :: s1~t1 -- , ... -- , SelCo (SelTyCon (n-1)) g :: sn~tn ] etaTyConAppCo_maybe tc (TyConAppCo _ tc2 cos2) = assert (tc == tc2) $ Just cos2 etaTyConAppCo_maybe tc co | not (tyConMustBeSaturated tc) , (Pair ty1 ty2, r) <- coercionKindRole co , Just (tc1, tys1) <- splitTyConApp_maybe ty1 , Just (tc2, tys2) <- splitTyConApp_maybe ty2 , tc1 == tc2 , isInjectiveTyCon tc r -- See Note [SelCo and newtypes] in GHC.Core.TyCo.Rep , let n = length tys1 , tys2 `lengthIs` n -- This can fail in an erroneous program -- E.g. T a ~# T a b -- #14607 = assert (tc == tc1) $ Just (decomposeCo n co (tyConRolesX r tc1)) -- NB: n might be <> tyConArity tc -- e.g. data family T a :: * -> * -- g :: T a b ~ T c d | otherwise = Nothing {- Note [Eta for AppCo] ~~~~~~~~~~~~~~~~~~~~ Suppose we have g :: s1 t1 ~ s2 t2 Then we can't necessarily make left g :: s1 ~ s2 right g :: t1 ~ t2 because it's possible that s1 :: * -> * t1 :: * s2 :: (*->*) -> * t2 :: * -> * and in that case (left g) does not have the same kind on either side. It's enough to check that kind t1 = kind t2 because if g is well-kinded then kind (s1 t2) = kind (s2 t2) and these two imply kind s1 = kind s2 -} optForAllCoBndr :: LiftingContext -> SwapFlag -> TyCoVar -> Coercion -> (LiftingContext, TyCoVar, Coercion) optForAllCoBndr env sym = substForAllCoBndrUsingLC sym (opt_co4 env sym False Nominal) env {- ********************************************************************** %* * Assertion-checking versions of functions in Coercion.hs %* * %********************************************************************* -} -- We can't check the assertions in the "main" functions of these -- functions, because the assertions don't hold during zonking. -- But they are fantastically helpful in finding bugs in the coercion -- optimiser itself, so I have copied them here with assertions. mk_trans_co :: HasDebugCallStack => Coercion -> Coercion -> Coercion -- Do assertion checking in mk_trans_co mk_trans_co co1 co2 = assertPpr (coercionRKind co1 `eqType` coercionLKind co2) (vcat [ text "co1" <+> ppr co1 , text "co2" <+> ppr co2 , text "co1 kind" <+> ppr (coercionKind co1) , text "co2 kind" <+> ppr (coercionKind co2) , callStackDoc ]) $ mkTransCo co1 co2 mk_coherence_right_co :: HasDebugCallStack => Role -> Type -> CoercionN -> Coercion -> Coercion mk_coherence_right_co r ty co co2 = assertGRefl ty co $ mkCoherenceRightCo r ty co co2 assertGRefl :: HasDebugCallStack => Type -> Coercion -> r -> r assertGRefl ty co res = assertPpr (typeKind ty `eqType` coercionLKind co) (vcat [ pp_ty "ty" ty , pp_co "co" co , callStackDoc ]) $ res mk_grefl_right_co :: Role -> Type -> CoercionN -> Coercion mk_grefl_right_co r ty co = assertGRefl ty co $ mkGReflRightCo r ty co pp_co :: String -> Coercion -> SDoc pp_co s co = text s <+> hang (ppr co) 2 (dcolon <+> ppr (coercionKind co)) pp_ty :: String -> Type -> SDoc pp_ty s ty = text s <+> hang (ppr ty) 2 (dcolon <+> ppr (typeKind ty)) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/ConLike.hs0000644000000000000000000002162607346545000020465 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1998 \section[ConLike]{@ConLike@: Constructor-like things} -} module GHC.Core.ConLike ( ConLike(..) , conLikeConLikeName , isVanillaConLike , conLikeArity , conLikeFieldLabels , conLikeConInfo , conLikeInstOrigArgTys , conLikeUserTyVarBinders , conLikeExTyCoVars , conLikeName , conLikeStupidTheta , conLikeImplBangs , conLikeFullSig , conLikeResTy , conLikeFieldType , conLikesWithFields , conLikeIsInfix , conLikeHasBuilder ) where import GHC.Prelude import GHC.Core.DataCon import GHC.Core.Multiplicity import GHC.Core.PatSyn import GHC.Core.TyCo.Rep (Type, ThetaType) import GHC.Core.TyCon (tyConDataCons) import GHC.Core.Type(mkTyConApp) import GHC.Types.Unique import GHC.Types.Name import GHC.Types.Name.Reader import GHC.Types.Basic import GHC.Types.GREInfo import GHC.Types.Var import GHC.Utils.Misc import GHC.Utils.Outputable import Data.Maybe( isJust ) import qualified Data.Data as Data import qualified Data.List as List {- ************************************************************************ * * \subsection{Constructor-like things} * * ************************************************************************ -} -- | A constructor-like thing data ConLike = RealDataCon DataCon | PatSynCon PatSyn -- | Is this a \'vanilla\' constructor-like thing -- (no existentials, no provided constraints)? isVanillaConLike :: ConLike -> Bool isVanillaConLike (RealDataCon con) = isVanillaDataCon con isVanillaConLike (PatSynCon ps ) = isVanillaPatSyn ps conLikeConLikeName :: ConLike -> ConLikeName conLikeConLikeName (RealDataCon dc) = DataConName (dataConName dc) conLikeConLikeName (PatSynCon ps) = PatSynName (patSynName ps) {- ************************************************************************ * * \subsection{Instances} * * ************************************************************************ -} instance Eq ConLike where (==) = eqConLike eqConLike :: ConLike -> ConLike -> Bool eqConLike x y = getUnique x == getUnique y -- There used to be an Ord ConLike instance here that used Unique for ordering. -- It was intentionally removed to prevent determinism problems. -- See Note [Unique Determinism] in GHC.Types.Unique. instance Uniquable ConLike where getUnique (RealDataCon dc) = getUnique dc getUnique (PatSynCon ps) = getUnique ps instance NamedThing ConLike where getName (RealDataCon dc) = getName dc getName (PatSynCon ps) = getName ps instance Outputable ConLike where ppr (RealDataCon dc) = ppr dc ppr (PatSynCon ps) = ppr ps instance OutputableBndr ConLike where pprInfixOcc (RealDataCon dc) = pprInfixOcc dc pprInfixOcc (PatSynCon ps) = pprInfixOcc ps pprPrefixOcc (RealDataCon dc) = pprPrefixOcc dc pprPrefixOcc (PatSynCon ps) = pprPrefixOcc ps instance Data.Data ConLike where -- don't traverse? toConstr _ = abstractConstr "ConLike" gunfold _ _ = error "gunfold" dataTypeOf _ = mkNoRepType "ConLike" -- | Number of arguments conLikeArity :: ConLike -> Arity conLikeArity (RealDataCon data_con) = dataConSourceArity data_con conLikeArity (PatSynCon pat_syn) = patSynArity pat_syn -- | Names of fields used for selectors conLikeFieldLabels :: ConLike -> [FieldLabel] conLikeFieldLabels (RealDataCon data_con) = dataConFieldLabels data_con conLikeFieldLabels (PatSynCon pat_syn) = patSynFieldLabels pat_syn -- | The 'ConInfo' (arity and field labels) associated to a 'ConLike'. conLikeConInfo :: ConLike -> ConInfo conLikeConInfo con = mkConInfo (conLikeConLikeInfo con) (conLikeArity con) (conLikeFieldLabels con) -- | Compute a 'ConLikeInfo' from a 'ConLike'. conLikeConLikeInfo :: ConLike -> ConLikeInfo conLikeConLikeInfo (RealDataCon con) = ConIsData { conLikeDataCons = getName <$> tyConDataCons (dataConTyCon con) } conLikeConLikeInfo (PatSynCon {}) = ConIsPatSyn -- | Returns just the instantiated /value/ argument types of a 'ConLike', -- (excluding dictionary args) conLikeInstOrigArgTys :: ConLike -> [Type] -> [Scaled Type] conLikeInstOrigArgTys (RealDataCon data_con) tys = dataConInstOrigArgTys data_con tys conLikeInstOrigArgTys (PatSynCon pat_syn) tys = map unrestricted $ patSynInstArgTys pat_syn tys -- | 'TyVarBinder's for the type variables of the 'ConLike'. For pattern -- synonyms, this will always consist of the universally quantified variables -- followed by the existentially quantified type variables. For data -- constructors, the situation is slightly more complicated—see -- @Note [DataCon user type variable binders]@ in "GHC.Core.DataCon". conLikeUserTyVarBinders :: ConLike -> [InvisTVBinder] conLikeUserTyVarBinders (RealDataCon data_con) = dataConUserTyVarBinders data_con conLikeUserTyVarBinders (PatSynCon pat_syn) = patSynUnivTyVarBinders pat_syn ++ patSynExTyVarBinders pat_syn -- The order here is because of the order in `GHC.Tc.TyCl.PatSyn`. -- | Existentially quantified type/coercion variables conLikeExTyCoVars :: ConLike -> [TyCoVar] conLikeExTyCoVars (RealDataCon dcon1) = dataConExTyCoVars dcon1 conLikeExTyCoVars (PatSynCon psyn1) = patSynExTyVars psyn1 conLikeName :: ConLike -> Name conLikeName (RealDataCon data_con) = dataConName data_con conLikeName (PatSynCon pat_syn) = patSynName pat_syn -- | The \"stupid theta\" of the 'ConLike', such as @data Eq a@ in: -- -- > data Eq a => T a = ... -- It is empty for `PatSynCon` as they do not allow such contexts. -- See @Note [The stupid context]@ in "GHC.Core.DataCon". conLikeStupidTheta :: ConLike -> ThetaType conLikeStupidTheta (RealDataCon data_con) = dataConStupidTheta data_con conLikeStupidTheta (PatSynCon {}) = [] -- | 'conLikeHasBuilder' returns True except for -- uni-directional pattern synonyms, which have no builder conLikeHasBuilder :: ConLike -> Bool conLikeHasBuilder (RealDataCon {}) = True conLikeHasBuilder (PatSynCon pat_syn) = isJust (patSynBuilder pat_syn) -- | Returns the strictness information for each constructor conLikeImplBangs :: ConLike -> [HsImplBang] conLikeImplBangs (RealDataCon data_con) = dataConImplBangs data_con conLikeImplBangs (PatSynCon pat_syn) = replicate (patSynArity pat_syn) HsLazy -- | Returns the type of the whole pattern conLikeResTy :: ConLike -> [Type] -> Type conLikeResTy (RealDataCon con) tys = mkTyConApp (dataConTyCon con) tys conLikeResTy (PatSynCon ps) tys = patSynInstResTy ps tys -- | The \"full signature\" of the 'ConLike' returns, in order: -- -- 1) The universally quantified type variables -- -- 2) The existentially quantified type/coercion variables -- -- 3) The equality specification -- -- 4) The provided theta (the constraints provided by a match) -- -- 5) The required theta (the constraints required for a match) -- -- 6) The original argument types (i.e. before -- any change of the representation of the type) -- -- 7) The original result type conLikeFullSig :: ConLike -> ([TyVar], [TyCoVar] -- Why tyvars for universal but tycovars for existential? -- See Note [Existential coercion variables] in GHC.Core.DataCon , [EqSpec] , ThetaType -- Provided theta , ThetaType -- Required theta , [Scaled Type] -- Arguments , Type ) -- Result conLikeFullSig (RealDataCon con) = let (univ_tvs, ex_tvs, eq_spec, theta, arg_tys, res_ty) = dataConFullSig con -- Required theta is empty as normal data cons require no additional -- constraints for a match in (univ_tvs, ex_tvs, eq_spec, theta, [], arg_tys, res_ty) conLikeFullSig (PatSynCon pat_syn) = let (univ_tvs, req, ex_tvs, prov, arg_tys, res_ty) = patSynSig pat_syn -- eqSpec is empty in (univ_tvs, ex_tvs, [], prov, req, arg_tys, res_ty) -- | Extract the type for any given labelled field of the 'ConLike' conLikeFieldType :: ConLike -> FieldLabelString -> Type conLikeFieldType (PatSynCon ps) label = patSynFieldType ps label conLikeFieldType (RealDataCon dc) label = dataConFieldType dc label -- | The ConLikes that have *all* the given fields conLikesWithFields :: [ConLike] -> [FieldLabelString] -> ( [ConLike] -- ConLikes containing the fields , [ConLike] ) -- ConLikes not containing the fields conLikesWithFields con_likes lbls = List.partition has_flds con_likes where has_flds dc = all (has_fld dc) lbls has_fld dc lbl = any (\ fl -> flLabel fl == lbl) (conLikeFieldLabels dc) conLikeIsInfix :: ConLike -> Bool conLikeIsInfix (RealDataCon dc) = dataConIsInfix dc conLikeIsInfix (PatSynCon ps) = patSynIsInfix ps ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/DataCon.hs0000644000000000000000000023601607346545000020453 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1998 \section[DataCon]{@DataCon@: Data Constructors} -} {-# LANGUAGE DeriveDataTypeable #-} {-# OPTIONS_GHC -Wno-orphans #-} -- Outputable, Binary module GHC.Core.DataCon ( -- * Main data types DataCon, DataConRep(..), SrcStrictness(..), SrcUnpackedness(..), HsSrcBang(..), HsBang(..), HsImplBang(..), StrictnessMark(..), ConTag, DataConEnv, -- ** Equality specs EqSpec, mkEqSpec, eqSpecTyVar, eqSpecType, eqSpecPair, eqSpecPreds, -- ** Field labels FieldLabel(..), flLabel, FieldLabelString, -- ** Type construction mkHsSrcBang, mkDataCon, fIRST_TAG, -- ** Type deconstruction dataConRepType, dataConInstSig, dataConFullSig, dataConName, dataConIdentity, dataConTag, dataConTagZ, dataConTyCon, dataConOrigTyCon, dataConWrapperType, dataConNonlinearType, dataConDisplayType, dataConUnivTyVars, dataConExTyCoVars, dataConUnivAndExTyCoVars, dataConConcreteTyVars, dataConUserTyVars, dataConUserTyVarBinders, dataConTheta, dataConStupidTheta, dataConOtherTheta, dataConInstArgTys, dataConOrigArgTys, dataConOrigResTy, dataConInstOrigArgTys, dataConRepArgTys, dataConResRepTyArgs, dataConInstUnivs, dataConFieldLabels, dataConFieldType, dataConFieldType_maybe, dataConSrcBangs, dataConSourceArity, dataConRepArity, dataConIsInfix, dataConWorkId, dataConWrapId, dataConWrapId_maybe, dataConImplicitTyThings, dataConRepStrictness, dataConImplBangs, dataConBoxer, splitDataProductType_maybe, -- ** Predicates on DataCons isNullarySrcDataCon, isNullaryRepDataCon, isTupleDataCon, isBoxedTupleDataCon, isUnboxedTupleDataCon, isUnboxedSumDataCon, isCovertGadtDataCon, isVanillaDataCon, isNewDataCon, isTypeDataCon, classDataCon, dataConCannotMatch, dataConUserTyVarsNeedWrapper, checkDataConTyVars, isBanged, isMarkedStrict, cbvFromStrictMark, eqHsBang, isSrcStrict, isSrcUnpacked, specialPromotedDc, -- ** Promotion related functions promoteDataCon ) where import GHC.Prelude import Language.Haskell.Syntax.Basic import Language.Haskell.Syntax.Module.Name import {-# SOURCE #-} GHC.Types.Id.Make ( DataConBoxer ) import GHC.Core.Type as Type import GHC.Core.Coercion import GHC.Core.Unify import GHC.Core.TyCon import GHC.Core.TyCo.Subst import GHC.Core.TyCo.Compare( eqType ) import GHC.Core.Multiplicity import {-# SOURCE #-} GHC.Types.TyThing import GHC.Types.FieldLabel import GHC.Types.SourceText import GHC.Core.Class import GHC.Types.Name import GHC.Builtin.Names import GHC.Core.Predicate import GHC.Types.Var import GHC.Types.Var.Env import GHC.Types.Basic import GHC.Data.FastString import GHC.Unit.Types import GHC.Utils.Binary import GHC.Types.Unique.FM ( UniqFM ) import GHC.Types.Unique.Set import GHC.Builtin.Uniques( mkAlphaTyVarUnique ) import GHC.Data.Graph.UnVar -- UnVarSet and operations import {-# SOURCE #-} GHC.Tc.Utils.TcType ( ConcreteTyVars ) import GHC.Utils.Outputable import GHC.Utils.Misc import GHC.Utils.Panic import Data.ByteString (ByteString) import qualified Data.ByteString.Builder as BSB import qualified Data.ByteString.Lazy as LBS import qualified Data.Data as Data import Data.Char import Data.List( find ) {- Note [Data constructor representation] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider the following Haskell data type declaration data T = T !Int ![Int] Using the strictness annotations, GHC will represent this as data T = T Int# [Int] That is, the Int has been unboxed. Furthermore, the Haskell source construction T e1 e2 is translated to case e1 of { I# x -> case e2 of { r -> T x r }} That is, the first argument is unboxed, and the second is evaluated. Finally, pattern matching is translated too: case e of { T a b -> ... } becomes case e of { T a' b -> let a = I# a' in ... } To keep ourselves sane, we name the different versions of the data constructor differently, as follows in Note [Data Constructor Naming]. The `dcRepType` field of a `DataCon` contains the type of the representation of the constructor /worker/, also called the Core representation. The Core representation may differ from the type of the constructor /wrapper/ (built by `mkDataConRep`). Besides unpacking (as seen in the example above), dictionaries and coercions become explict arguments in the Core representation of a constructor. Note that this representation is still *different* from runtime representation. (Which is what STG uses after unarise). See Note [Constructor applications in STG] in GHC.Stg.Syntax. Note [Data Constructor Naming] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Each data constructor C has two, and possibly up to four, Names associated with it: OccName Name space Name of Notes --------------------------------------------------------------------------- The "data con itself" C DataName DataCon In dom( GlobalRdrEnv ) The "worker data con" C VarName Id The worker The "wrapper data con" $WC VarName Id The wrapper The "newtype coercion" :CoT TcClsName TyCon EVERY data constructor (incl for newtypes) has the former two (the data con itself, and its worker. But only some data constructors have a wrapper (see Note [The need for a wrapper]). Each of these three has a distinct Unique. The "data con itself" name appears in the output of the renamer, and names the Haskell-source data constructor. The type checker translates it into either the wrapper Id (if it exists) or worker Id (otherwise). The data con has one or two Ids associated with it: The "worker Id", is the actual data constructor. * Every data constructor (newtype or data type) has a worker * The worker is very like a primop, in that it has no binding. * For a *data* type, the worker *is* the data constructor; it has no unfolding * For a *newtype*, the worker has a compulsory unfolding which does a cast, e.g. newtype T = MkT Int The worker for MkT has unfolding \\(x:Int). x `cast` sym CoT Here CoT is the type constructor, witnessing the FC axiom axiom CoT : T = Int The "wrapper Id", \$WC, goes as follows * Its type is exactly what it looks like in the source program. * It is an ordinary function, and it gets a top-level binding like any other function. * The wrapper Id isn't generated for a data type if there is nothing for the wrapper to do. That is, if its defn would be \$wC = C Note [Data constructor workers and wrappers] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * Algebraic data types - Always have a worker, with no unfolding - May or may not have a wrapper; see Note [The need for a wrapper] * Newtypes - Always have a worker, which has a compulsory unfolding (just a cast) - May or may not have a wrapper; see Note [The need for a wrapper] * INVARIANT: the dictionary constructor for a class never has a wrapper. * See Note [Data Constructor Naming] for how the worker and wrapper are named * The workers don't take the dcStupidTheta dicts as arguments, while the wrappers currently do * The wrapper (if it exists) takes dcOrigArgTys as its arguments. The worker takes dataConRepArgTys as its arguments If the wrapper is absent, dataConRepArgTys is the same as dcOrigArgTys * The 'NoDataConRep' case of DataConRep is important. Not only is it efficient, but it also ensures that the wrapper is replaced by the worker (because it *is* the worker) even when there are no args. E.g. in f (:) x the (:) *is* the worker. This is really important in rule matching, (We could match on the wrappers, but that makes it less likely that rules will match when we bring bits of unfoldings together.) Note [The need for a wrapper] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Why might the wrapper have anything to do? The full story is in wrapper_reqd in GHC.Types.Id.Make.mkDataConRep. * Unboxing strict fields (with -funbox-strict-fields) data T = MkT !(Int,Int) \$wMkT :: (Int,Int) -> T \$wMkT (x,y) = MkT x y Notice that the worker has two fields where the wrapper has just one. That is, the worker has type MkT :: Int -> Int -> T * Equality constraints for GADTs data T a where { MkT :: a -> T [a] } The worker gets a type with explicit equality constraints, thus: MkT :: forall a b. (a=[b]) => b -> T a The wrapper has the programmer-specified type: \$wMkT :: a -> T [a] \$wMkT a x = MkT [a] a [a] x The third argument is a coercion [a] :: [a]~[a] * Data family instances may do a cast on the result * Type variables may be permuted; see MkId Note [Data con wrappers and GADT syntax] * Datatype contexts require dropping some dictionary arguments. See Note [Instantiating stupid theta]. Note [The stupid context] ~~~~~~~~~~~~~~~~~~~~~~~~~ Data types can have a context: data (Eq a, Ord b) => T a b = T1 a b | T2 a And that makes the constructors have a context too. A constructor's context isn't necessarily the same as the data type's context, however. Per the Haskell98 Report, the part of the datatype context that is used in a data constructor is the largest subset of the datatype context that constrains only the type variables free in the data constructor's field types. For example, here are the types of T1 and T2: T1 :: (Eq a, Ord b) => a -> b -> T a b T2 :: (Eq a) => a -> T a b Notice that T2's context is "thinned". Since its field is of type `a`, only the part of the datatype context that mentions `a`—that is, `Eq a`—is included in T2's context. On the other hand, T1's fields mention both `a` and `b`, so T1's context includes all of the datatype context. Furthermore, this context pops up when pattern matching (though GHC hasn't implemented this, but it is in H98, and I've fixed GHC so that it now does): f (T2 x) = x gets inferred type f :: Eq a => T a b -> a I say the context is "stupid" because the dictionaries passed are immediately discarded -- they do nothing and have no benefit. (See Note [Instantiating stupid theta].) It's a flaw in the language. GHC has made some efforts to correct this flaw. In GHC, datatype contexts are not available by default. Instead, one must explicitly opt in to them by using the DatatypeContexts extension. To discourage their use, GHC has deprecated DatatypeContexts. Some other notes about stupid contexts: * Stupid contexts can interact badly with `deriving`. For instance, it's unclear how to make this derived Functor instance typecheck: data Eq a => T a = MkT a deriving Functor This is because the derived instance would need to look something like `instance Functor T where ...`, but there is nowhere to mention the requisite `Eq a` constraint. For this reason, GHC will throw an error if a user attempts to derive an instance for Functor (or a Functor-like class) where the last type variable is used in a datatype context. For Generic(1), the requirements are even harsher, as stupid contexts are not allowed at all in derived Generic(1) instances. (We could consider relaxing this requirement somewhat, although no one has asked for this yet.) Stupid contexts are permitted when deriving instances of non-Functor-like classes, or when deriving instances of Functor-like classes where the last type variable isn't mentioned in the stupid context. For example, the following is permitted: data Show a => T a = MkT deriving Eq Note that because of the "thinning" behavior mentioned above, the generated Eq instance should not mention `Show a`, as the type of MkT doesn't require it. That is, the following should be generated (#20501): instance Eq (T a) where (MkT == MkT) = True * It's not obvious how stupid contexts should interact with GADTs. For this reason, GHC disallows combining datatype contexts with GADT syntax. As a result, dcStupidTheta is always empty for data types defined using GADT syntax. Note [Instantiating stupid theta] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider a data type with a "stupid theta" (see Note [The stupid context]): data Ord a => T a = MkT (Maybe a) We want to generate an Ord constraint for every use of MkT; but we also want to allow visible type application, such as MkT @Int To achieve this, the wrapper for a data (or newtype) constructor with a datatype context contains a lambda which drops the dictionary argments corresponding to the datatype context: /\a \(_d:Ord a). MkT @a Notice that the wrapper discards the dictionary argument d. We don't need it; it was only there to generate a Wanted constraint. (That is why it is stupid.) This all happens in GHC.Types.Id.Make.mkDataConRep. ************************************************************************ * * \subsection{Data constructors} * * ************************************************************************ -} -- | A data constructor data DataCon = MkData { dcName :: Name, -- This is the name of the *source data con* -- (see "Note [Data Constructor Naming]" above) dcUnique :: Unique, -- Cached from Name dcTag :: ConTag, -- ^ Tag, used for ordering 'DataCon's -- Running example: -- -- *** As declared by the user -- data T a b c where -- MkT :: forall c y x b. (x~y,Ord x) => x -> y -> T (x,y) b c -- *** As represented internally -- data T a b c where -- MkT :: forall a b c. forall x y. (a~(x,y),x~y,Ord x) -- => x -> y -> T a b c -- -- The next six fields express the type of the constructor, in pieces -- e.g. -- -- dcUnivTyVars = [a,b,c] -- dcExTyCoVars = [x,y] -- dcUserTyVarBinders = [c,y,x,b] -- dcEqSpec = [a~(x,y)] -- dcOtherTheta = [x~y, Ord x] -- dcOrigArgTys = [x,y] -- dcRepTyCon = T -- In general, the dcUnivTyVars are NOT NECESSARILY THE SAME AS THE -- TYVARS FOR THE PARENT TyCon. (This is a change (Oct05): previously, -- vanilla datacons guaranteed to have the same type variables as their -- parent TyCon, but that seems ugly.) They can be different in the case -- where a GADT constructor uses different names for the universal -- tyvars than does the tycon. For example: -- -- data H a where -- MkH :: b -> H b -- -- Here, the tyConTyVars of H will be [a], but the dcUnivTyVars of MkH -- will be [b]. dcVanilla :: Bool, -- True <=> This is a vanilla Haskell 98 data constructor -- Its type is of form -- forall a1..an . t1 -> ... tm -> T a1..an -- No existentials, no coercions, nothing. -- That is: dcExTyCoVars = dcEqSpec = dcOtherTheta = [] -- NB 1: newtypes always have a vanilla data con -- NB 2: a vanilla constructor can still be declared in GADT-style -- syntax, provided its type looks like the above. -- The declaration format is held in the TyCon (algTcGadtSyntax) -- dcUnivTyVars: Universally-quantified type vars [a,b,c] -- INVARIANT: length matches arity of the dcRepTyCon -- INVARIANT: result type of data con worker is exactly (T a b c) -- COROLLARY: The dcUnivTyVars are always in one-to-one correspondence with -- the tyConTyVars of the parent TyCon dcUnivTyVars :: [TyVar], -- Existentially-quantified type and coercion vars [x,y] -- For an example involving coercion variables, -- Why TyCoVars? See Note [Existential coercion variables] dcExTyCoVars :: [TyCoVar], -- INVARIANT: the UnivTyVars and ExTyCoVars all have distinct OccNames -- Reason: less confusing, and easier to generate Iface syntax -- The type variables of this data constructor that must be -- instantiated to concrete types. For example: the RuntimeRep -- variables of unboxed tuples and unboxed sums. -- -- See Note [Representation-polymorphism checking built-ins] -- in GHC.Tc.Utils.Concrete. dcConcreteTyVars :: ConcreteTyVars, -- The type/coercion vars in the order the user wrote them [c,y,x,b] -- INVARIANT(dataConTyVars): the set of tyvars in dcUserTyVarBinders is -- exactly the set of tyvars (*not* covars) of dcExTyCoVars unioned -- with the set of dcUnivTyVars whose tyvars do not appear in dcEqSpec -- So dcUserTyVarBinders is a subset of (dcUnivTyVars ++ dcExTyCoVars) -- See Note [DataCon user type variable binders] dcUserTyVarBinders :: [InvisTVBinder], dcEqSpec :: [EqSpec], -- Equalities derived from the result type, -- _as written by the programmer_. -- Only non-dependent GADT equalities (dependent -- GADT equalities are in the covars of -- dcExTyCoVars). -- This field allows us to move conveniently between the two ways -- of representing a GADT constructor's type: -- MkT :: forall a b. (a ~ [b]) => b -> T a -- MkT :: forall b. b -> T [b] -- Each equality is of the form (a ~ ty), where 'a' is one of -- the universally quantified type variables. Moreover, the -- only place in the DataCon where this 'a' will occur is in -- dcUnivTyVars. See [The dcEqSpec domain invariant]. -- The next two fields give the type context of the data constructor -- (aside from the GADT constraints, -- which are given by the dcExpSpec) -- In GADT form, this is *exactly* what the programmer writes, even if -- the context constrains only universally quantified variables -- MkT :: forall a b. (a ~ b, Ord b) => a -> T a b dcOtherTheta :: ThetaType, -- The other constraints in the data con's type -- other than those in the dcEqSpec dcStupidTheta :: ThetaType, -- The context of the data type declaration -- data Eq a => T a = ... -- or, rather, a "thinned" version thereof -- "Thinned", because the Report says -- to eliminate any constraints that don't mention -- tyvars free in the arg types for this constructor. -- See Note [The stupid context]. -- -- INVARIANT: the free tyvars of dcStupidTheta are a subset of dcUnivTyVars -- Reason: dcStupidTeta is gotten by thinning the stupid theta from the tycon -- -- "Stupid", because the dictionaries aren't used for anything. -- Indeed, [as of March 02] they are no longer in the type of -- the wrapper Id, because that makes it harder to use the wrap-id -- to rebuild values after record selection or in generics. dcOrigArgTys :: [Scaled Type], -- Original argument types -- (before unboxing and flattening of strict fields) dcOrigResTy :: Type, -- Original result type, as seen by the user -- NB: for a data instance, the original user result type may -- differ from the DataCon's representation TyCon. Example -- data instance T [a] where MkT :: a -> T [a] -- The dcOrigResTy is T [a], but the dcRepTyCon might be R:TList -- Now the strictness annotations and field labels of the constructor dcSrcBangs :: [HsSrcBang], -- See Note [Bangs on data constructor arguments] -- -- The [HsSrcBang] as written by the programmer. -- -- Matches 1-1 with dcOrigArgTys -- Hence length = dataConSourceArity dataCon dcFields :: [FieldLabel], -- Field labels for this constructor, in the -- same order as the dcOrigArgTys; -- length = 0 (if not a record) or dataConSourceArity. -- The curried worker function that corresponds to the constructor: -- It doesn't have an unfolding; the code generator saturates these Ids -- and allocates a real constructor when it finds one. dcWorkId :: Id, -- Constructor representation dcRep :: DataConRep, -- Cached; see Note [DataCon arities] -- INVARIANT: dcRepArity == length dataConRepArgTys + count isCoVar (dcExTyCoVars) -- INVARIANT: dcSourceArity == length dcOrigArgTys dcRepArity :: Arity, dcSourceArity :: Arity, -- Result type of constructor is T t1..tn dcRepTyCon :: TyCon, -- Result tycon, T dcRepType :: Type, -- Type of the constructor -- forall a x y. (a~(x,y), x~y, Ord x) => -- x -> y -> T a -- (this is *not* of the constructor wrapper Id: -- see Note [Data constructor representation]) -- Notice that the existential type parameters come *second*. -- Reason: in a case expression we may find: -- case (e :: T t) of -- MkT x y co1 co2 (d:Ord x) (v:r) (w:F s) -> ... -- It's convenient to apply the rep-type of MkT to 't', to get -- forall x y. (t~(x,y), x~y, Ord x) => x -> y -> T t -- and use that to check the pattern. Mind you, this is really only -- used in GHC.Core.Lint. dcInfix :: Bool, -- True <=> declared infix -- Used for Template Haskell and 'deriving' only -- The actual fixity is stored elsewhere dcPromoted :: TyCon -- The promoted TyCon -- See Note [Promoted data constructors] in GHC.Core.TyCon } {- Note [TyVarBinders in DataCons] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For the TyVarBinders in a DataCon and PatSyn, each argument flag is either Inferred or Specified, never Required. Lifting this restriction is tracked at #18389 (DataCon) and #23704 (PatSyn). Why do we need the TyVarBinders, rather than just the TyVars? So that we can construct the right type for the DataCon with its foralls attributed the correct visibility. That in turn governs whether you can use visible type application at a call of the data constructor. See also [DataCon user type variable binders] for an extended discussion on the order in which TyVarBinders appear in a DataCon. Note [Existential coercion variables] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For now (Aug 2018) we can't write coercion quantifications in source Haskell, but we can in Core. Consider having: data T :: forall k. k -> k -> Constraint where MkT :: forall k (a::k) (b::k). forall k' (c::k') (co::k'~k). (b ~# (c|>co)) => T k a b dcUnivTyVars = [k,a,b] dcExTyCoVars = [k',c,co] dcUserTyVarBinders = [k,a,k',c] dcEqSpec = [b ~# (c|>co)] dcOtherTheta = [] dcOrigArgTys = [] dcRepTyCon = T Function call 'dataConKindEqSpec' returns [k'~k] Note [DataCon arities] ~~~~~~~~~~~~~~~~~~~~~~ A `DataCon`'s source and core representation may differ, meaning the source arity (`dcSourceArity`) and the core representation arity (`dcRepArity`) may differ too. Note that the source arity isn't exactly the number of arguments the data con /wrapper/ has, since `dcSourceArity` doesn't count constraints -- which may appear in the wrapper through `DatatypeContexts`, or if the constructor stores a dictionary. In this sense, the source arity counts the number of non-constraint arguments that appear at the source level. On the other hand, the Core representation arity is the number of arguments of the data constructor in its Core representation, which is also the number of arguments of the data con /worker/. The arity might differ since `dcRepArity` takes into account arguments such as quantified dictionaries and coercion arguments, lifted and unlifted (despite the unlifted coercion arguments having a zero-width runtime representation). For example: MkT :: Ord a => a -> T a dcSourceArity = 1 dcRepArity = 2 MkU :: (b ~ '[]) => U b dcSourceArity = 0 dcRepArity = 1 The arity might also differ due to unpacking, for example, consider the following datatype and its wrapper and worker's type: data V = MkV !() !Int $WMkV :: () -> Int -> V MkV :: Int# -> V As you see, because of unpacking we have both dropped the unit argument and unboxed the Int. In this case, the source arity (which is the arity of the wrapper) is 2, while the Core representation arity (the arity of the worker) is 1. Note [DataCon user type variable binders] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A DataCon has two different sets of type variables: * dcUserTyVarBinders, for the type variables binders in the order in which they originally arose in the user-written type signature. - They are the forall'd binders of the data con /wrapper/, which the user calls. - Their order *does* matter for TypeApplications, so they are full TyVarBinders, complete with visibilities. * dcUnivTyVars and dcExTyCoVars, for the "true underlying" (i.e. of the data con worker) universal type variable and existential type/coercion variables, respectively. - They (i.e. univ ++ ex) are the forall'd variables of the data con /worker/ - Their order is irrelevant for the purposes of TypeApplications, and as a consequence, they do not come equipped with visibilities (that is, they are TyVars/TyCoVars instead of ForAllTyBinders). Often (dcUnivTyVars ++ dcExTyCoVars) = dcUserTyVarBinders; but they may differ for two reasons, coming next: --- Reason (R1): Order of quantification in GADT syntax --- In System FC, data constructor type signatures always quantify over all of their universal type variables, followed by their existential type variables. Normally, this isn't a problem, as most datatypes naturally quantify their type variables in this order anyway. For example: data T a b = forall c. MkT b c Here, we have `MkT :: forall {k} (a :: k) (b :: *) (c :: *). b -> c -> T a b`, where k, a, and b are universal and c is existential. (The inferred variable k isn't available for TypeApplications, hence why it's in braces.) This is a perfectly reasonable order to use, as the syntax of H98-style datatypes (+ ExistentialQuantification) suggests it. Things become more complicated when GADT syntax enters the picture. Consider this example: data X a where MkX :: forall b a. b -> Proxy a -> X a If we adopt the earlier approach of quantifying all the universal variables followed by all the existential ones, GHC would come up with this type signature for MkX: MkX :: forall {k} (a :: k) (b :: *). b -> Proxy a -> X a But this is not what we want at all! After all, if a user were to use TypeApplications on MkX, they would expect to instantiate `b` before `a`, as that's the order in which they were written in the `forall`. (See #11721.) Instead, we'd like GHC to come up with this type signature: MkX :: forall {k} (b :: *) (a :: k). b -> Proxy a -> X a In fact, even if we left off the explicit forall: data X a where MkX :: b -> Proxy a -> X a Then a user should still expect `b` to be quantified before `a`, since according to the rules of TypeApplications, in the absence of `forall` GHC performs a stable topological sort on the type variables in the user-written type signature, which would place `b` before `a`. --- Reason (R2): GADT constructors quantify over different variables --- GADT constructors may quantify over different variables than the worker would. Consider data T a b where MkT :: forall c d. c -> T [c] d The dcUserTyVarBinders must be [c, d] -- that's what the user quantified over. But c is actually existential, as it is not equal to either of the two universal variables. Here is what we'll get: dcUserTyVarBinders = [c, d] dcUnivTyVars = [a, d] dcExTyCoVars = [c] Note that dcUnivTyVars contains `a` from the type header (the `data T a b`) and `d` from the signature for MkT. This is done because d is used in place of b in the result of MkT, and so we use the name d for the universal, as that might improve error messages. On the other hand, we need to use a fresh name for the first universal (recalling that the result of a worker must be the type constructor applied to a sequence of plain variables), so we use `a`, from the header. This choice of universals is made in GHC.Tc.TyCl.mkGADTVars. Because c is not a universal, it is an existential. Here, we see that (even ignoring order) dcUserTyVarBinders is not dcUnivTyVars ⋃ dcExTyCoVars, because the latter has `a` while the former does not. To understand this better, let's look at this type for the "true underlying" worker data con: MkT :: forall a d. forall c. (a ~# [c]) => c -> T a d We see here that the `a` universal is connected with the `c` existential via an equality constraint. It will always be the case (see the code in mkGADTVars) that the universals not mentioned in dcUserTyVarBinders will be used in a GADT equality -- that is, used on the left-hand side of an element of dcEqSpec: dcEqSpec = [a ~# [c]] Putting this all together, all variables used on the left-hand side of an equation in the dcEqSpec will be in dcUnivTyVars but *not* in dcUserTyVarBinders. --- End of Reasons --- INVARIANT(dataConTyVars): the set of tyvars in dcUserTyVarBinders consists of: * The set of tyvars in dcUnivTyVars whose type variables do not appear in dcEqSpec, unioned with: * The set of tyvars (*not* covars) in dcExTyCoVars No covars here because because they're not user-written When comparing for equality, we ignore differences concerning type variables whose kinds have kind Constraint. The word "set" is used above because the order in which the tyvars appear in dcUserTyVarBinders can be completely different from the order in dcUnivTyVars or dcExTyCoVars. That is, the tyvars in dcUserTyVarBinders are a permutation of (tyvars of dcExTyCoVars + a subset of dcUnivTyVars). But aside from the ordering, they in fact share the same type variables (with the same Uniques). We sometimes refer to this as "the dcUserTyVarBinders invariant". It is checked in checkDataConTyVars. dcUserTyVarBinders, as the name suggests, is the one that users will see most of the time. It's used when computing the type signature of a data constructor wrapper (see dataConWrapperType), and as a result, it's what matters from a TypeApplications perspective. Note [The dcEqSpec domain invariant] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider this example of a GADT constructor: data Y a where MkY :: Bool -> Y Bool The user-written type of MkY is `Bool -> Y Bool`, but what is the underlying Core type for MkY? There are two conceivable possibilities: 1. MkY :: forall a. (a ~# Bool) => Bool -> Y a 2. MkY :: forall a. (a ~# Bool) => a -> Y a In practice, GHC picks (1) as the Core type for MkY. This is because we maintain an invariant that the type variables in the domain of dcEqSpec will only ever appear in the dcUnivTyVars. As a consequence, the type variables in the domain of dcEqSpec will /never/ appear in the dcExTyCoVars, dcOtherTheta, dcOrigArgTys, or dcOrigResTy; these can only ever mention variables from dcUserTyVarBinders, which excludes things in the domain of dcEqSpec. (See Note [DataCon user type variable binders].) This explains why GHC would not pick (2) as the Core type, since the argument type `a` mentions a type variable in the dcEqSpec. There are certain parts of the codebase where it is convenient to apply the substitution arising from the dcEqSpec to the dcUnivTyVars in order to obtain the user-written return type of a GADT constructor. A consequence of the dcEqSpec domain invariant is that you /never/ need to apply the substitution to any other part of the constructor type, as they don't require it. -} -- | Data Constructor Representation -- See Note [Data constructor workers and wrappers] data DataConRep = -- NoDataConRep means that the data con has no wrapper NoDataConRep -- DCR means that the data con has a wrapper | DCR { dcr_wrap_id :: Id -- Takes src args, unboxes/flattens, -- and constructs the representation , dcr_boxer :: DataConBoxer , dcr_arg_tys :: [Scaled Type] -- Final, representation argument types, -- after unboxing and flattening, -- and *including* all evidence args , dcr_stricts :: [StrictnessMark] -- 1-1 with dcr_arg_tys -- See also Note [Data-con worker strictness] , dcr_bangs :: [HsImplBang] -- The actual decisions made (including failures) -- about the original arguments; 1-1 with orig_arg_tys -- See Note [Bangs on data constructor arguments] } type DataConEnv a = UniqFM DataCon a -- Keyed by DataCon ------------------------- -- | Haskell Source Bang -- -- Bangs on data constructor arguments as written by the user, including the -- source code for exact-printing. -- -- In the AST, the SourceText is deconstructed and hidden inside -- 'Language.Haskell.Syntax.Extension.XBangTy' extension point. data HsSrcBang = HsSrcBang SourceText HsBang -- See Note [Pragma source text] in "GHC.Types.SourceText" -- | Make a 'HsSrcBang' from all parts mkHsSrcBang :: SourceText -> SrcUnpackedness -> SrcStrictness -> HsSrcBang mkHsSrcBang stext u s = HsSrcBang stext (HsBang u s) -- | Haskell Implementation Bang -- -- Bangs of data constructor arguments as generated by the compiler -- after consulting HsSrcBang, flags, etc. data HsImplBang = HsLazy -- ^ Lazy field, or one with an unlifted type | HsStrict Bool -- ^ Strict but not unpacked field -- True <=> we could have unpacked, but opted not to -- because of -O0. -- See Note [Detecting useless UNPACK pragmas] | HsUnpack (Maybe Coercion) -- ^ Strict and unpacked field -- co :: arg-ty ~ product-ty HsBang deriving Data.Data ------------------------- -- StrictnessMark is used to indicate strictness -- of the DataCon *worker* fields data StrictnessMark = MarkedStrict | NotMarkedStrict deriving Eq -- | An 'EqSpec' is a tyvar/type pair representing an equality made in -- rejigging a GADT constructor data EqSpec = EqSpec TyVar Type -- | Make a non-dependent 'EqSpec' mkEqSpec :: TyVar -> Type -> EqSpec mkEqSpec tv ty = EqSpec tv ty eqSpecTyVar :: EqSpec -> TyVar eqSpecTyVar (EqSpec tv _) = tv eqSpecType :: EqSpec -> Type eqSpecType (EqSpec _ ty) = ty eqSpecPair :: EqSpec -> (TyVar, Type) eqSpecPair (EqSpec tv ty) = (tv, ty) eqSpecPreds :: [EqSpec] -> ThetaType eqSpecPreds spec = [ mkPrimEqPred (mkTyVarTy tv) ty | EqSpec tv ty <- spec ] instance Outputable EqSpec where ppr (EqSpec tv ty) = ppr (tv, ty) {- Note [Data-con worker strictness] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Notice that we do *not* say the worker Id is strict even if the data constructor is declared strict e.g. data T = MkT ![Int] Bool Even though most often the evals are done by the *wrapper* $WMkT, there are situations in which tag inference will re-insert evals around the worker. So for all intents and purposes the *worker* MkT is strict, too! Unfortunately, if we exposed accurate strictness of DataCon workers, we'd see the following transformation: f xs = case xs of xs' { __DEFAULT -> ... case MkT xs b of x { __DEFAULT -> [x] } } -- DmdAnal: Strict in xs ==> { drop-seq, binder swap on xs' } f xs = case MkT xs b of x { __DEFAULT -> [x] } -- DmdAnal: Still strict in xs ==> { case-to-let } f xs = let x = MkT xs' b in [x] -- DmdAnal: No longer strict in xs! I.e., we are ironically losing strictness in `xs` by dropping the eval on `xs` and then doing case-to-let. The issue is that `exprIsHNF` currently says that every DataCon worker app is a value. The implicit assumption is that surrounding evals will have evaluated strict fields like `xs` before! But now that we had just dropped the eval on `xs`, that assumption is no longer valid. Long story short: By keeping the demand signature lazy, the Simplifier will not drop the eval on `xs` and using `exprIsHNF` to decide case-to-let and others remains sound. Similarly, during demand analysis in dmdTransformDataConSig, we bump up the field demand with `C_01`, *not* `C_11`, because the latter exposes too much strictness that will drop the eval on `xs` above. This issue is discussed at length in "Failed idea: no wrappers for strict data constructors" in #21497 and #22475. Note [Bangs on data constructor arguments] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider data T = MkT !Int {-# UNPACK #-} !Int Bool When compiling the module, GHC will decide how to represent MkT, depending on the optimisation level, and settings of flags like -funbox-small-strict-fields. Terminology: * HsSrcBang: What the user wrote Constructors: HsSrcBang * HsImplBang: What GHC decided Constructors: HsLazy, HsStrict, HsUnpack * If T was defined in this module, MkT's dcSrcBangs field records the [HsSrcBang] of what the user wrote; in the example [ HsSrcBang _ NoSrcUnpack SrcStrict , HsSrcBang _ SrcUnpack SrcStrict , HsSrcBang _ NoSrcUnpack NoSrcStrictness] * However, if T was defined in an imported module, the importing module must follow the decisions made in the original module, regardless of the flag settings in the importing module. Also see Note [Bangs on imported data constructors] in GHC.Types.Id.Make * The dcr_bangs field of the dcRep field records the [HsImplBang] If T was defined in this module, Without -O the dcr_bangs might be [HsStrict _, HsStrict _, HsLazy] With -O it might be [HsStrict _, HsUnpack _, HsLazy] With -funbox-small-strict-fields it might be [HsUnpack, HsUnpack _, HsLazy] With -XStrictData it might be [HsStrict _, HsUnpack _, HsStrict _] Note [Detecting useless UNPACK pragmas] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We want to issue a warning when there's an UNPACK pragma in the source code, but we decided not to unpack. However, when compiling with -O0, we never unpack, and that'd generate spurious warnings. Therefore, we remember in HsStrict a boolean flag, whether we _could_ have unpacked. This flag is set in GHC.Types.Id.Make.dataConSrcToImplBang. Then, in GHC.Tc.TyCl.checkValidDataCon (sub-function check_bang), if the user wrote an `{-# UNPACK #-}` pragma (i.e. HsSrcBang contains SrcUnpack) we consult HsImplBang: HsUnpack _ => field unpacked, no warning Example: data T = MkT {-# UNPACK #-} !Int [with -O] HsStrict True => field not unpacked because -O0, no warning Example: data T = MkT {-# UNPACK #-} !Int [with -O0] HsStrict False => field not unpacked, warning Example: data T = MkT {-# UNPACK #-} !(Int -> Int) HsLazy => field not unpacked, warning This can happen in two scenarios: 1) UNPACK without a bang Example: data T = MkT {-# UNPACK #-} Int This will produce a warning about missing ! before UNPACK. 2) UNPACK of an unlifted datatype Because of bug #20204, we currently do not unpack type T, and therefore issue a warning: type IntU :: UnliftedType data IntU = IntU Int# data T = Test {-# UNPACK #-} IntU The boolean flag is used only for this warning. See #11270 for motivation. ************************************************************************ * * \subsection{Instances} * * ************************************************************************ -} instance Eq DataCon where a == b = getUnique a == getUnique b a /= b = getUnique a /= getUnique b instance Uniquable DataCon where getUnique = dcUnique instance NamedThing DataCon where getName = dcName instance Outputable DataCon where ppr con = ppr (dataConName con) instance OutputableBndr DataCon where pprInfixOcc con = pprInfixName (dataConName con) pprPrefixOcc con = pprPrefixName (dataConName con) instance Data.Data DataCon where -- don't traverse? toConstr _ = abstractConstr "DataCon" gunfold _ _ = error "gunfold" dataTypeOf _ = mkNoRepType "DataCon" instance Outputable HsBang where ppr (HsBang prag mark) = ppr prag <+> ppr mark instance Outputable HsImplBang where ppr HsLazy = text "Lazy" ppr (HsUnpack Nothing) = text "Unpacked" ppr (HsUnpack (Just co)) = text "Unpacked" <> parens (ppr co) ppr (HsStrict b) = text "StrictNotUnpacked" <> parens (ppr b) instance Outputable SrcStrictness where ppr SrcLazy = char '~' ppr SrcStrict = char '!' ppr NoSrcStrict = empty instance Outputable SrcUnpackedness where ppr SrcUnpack = text "{-# UNPACK #-}" ppr SrcNoUnpack = text "{-# NOUNPACK #-}" ppr NoSrcUnpack = empty instance Outputable StrictnessMark where ppr MarkedStrict = text "!" ppr NotMarkedStrict = empty instance Binary StrictnessMark where put_ bh NotMarkedStrict = putByte bh 0 put_ bh MarkedStrict = putByte bh 1 get bh = do h <- getByte bh case h of 0 -> return NotMarkedStrict 1 -> return MarkedStrict _ -> panic "Invalid binary format" instance Binary SrcStrictness where put_ bh SrcLazy = putByte bh 0 put_ bh SrcStrict = putByte bh 1 put_ bh NoSrcStrict = putByte bh 2 get bh = do h <- getByte bh case h of 0 -> return SrcLazy 1 -> return SrcStrict _ -> return NoSrcStrict instance Binary SrcUnpackedness where put_ bh SrcNoUnpack = putByte bh 0 put_ bh SrcUnpack = putByte bh 1 put_ bh NoSrcUnpack = putByte bh 2 get bh = do h <- getByte bh case h of 0 -> return SrcNoUnpack 1 -> return SrcUnpack _ -> return NoSrcUnpack -- | Compare strictness annotations eqHsBang :: HsImplBang -> HsImplBang -> Bool eqHsBang HsLazy HsLazy = True eqHsBang (HsStrict _) (HsStrict _) = True eqHsBang (HsUnpack Nothing) (HsUnpack Nothing) = True eqHsBang (HsUnpack (Just c1)) (HsUnpack (Just c2)) = eqType (coercionType c1) (coercionType c2) eqHsBang _ _ = False isBanged :: HsImplBang -> Bool isBanged (HsUnpack {}) = True isBanged (HsStrict {}) = True isBanged HsLazy = False isSrcStrict :: SrcStrictness -> Bool isSrcStrict SrcStrict = True isSrcStrict _ = False isSrcUnpacked :: SrcUnpackedness -> Bool isSrcUnpacked SrcUnpack = True isSrcUnpacked _ = False isMarkedStrict :: StrictnessMark -> Bool isMarkedStrict NotMarkedStrict = False isMarkedStrict _ = True -- All others are strict cbvFromStrictMark :: StrictnessMark -> CbvMark cbvFromStrictMark NotMarkedStrict = NotMarkedCbv cbvFromStrictMark MarkedStrict = MarkedCbv {- ********************************************************************* * * \subsection{Construction} * * ********************************************************************* -} -- | Build a new data constructor mkDataCon :: Name -> Bool -- ^ Is the constructor declared infix? -> TyConRepName -- ^ TyConRepName for the promoted TyCon -> [HsSrcBang] -- ^ Strictness/unpack annotations, from user -> [FieldLabel] -- ^ Field labels for the constructor, -- if it is a record, otherwise empty -> [TyVar] -- ^ Universals. -> [TyCoVar] -- ^ Existentials. -> ConcreteTyVars -- ^ TyVars which must be instantiated with -- concrete types -> [InvisTVBinder] -- ^ User-written 'TyVarBinder's. -- These must be Inferred/Specified. -- See @Note [TyVarBinders in DataCons]@ -> [EqSpec] -- ^ GADT equalities -> KnotTied ThetaType -- ^ Theta-type occurring before the arguments proper -> [KnotTied (Scaled Type)] -- ^ Original argument types -> KnotTied Type -- ^ Original result type -> PromDataConInfo -- ^ See comments on 'GHC.Core.TyCon.PromDataConInfo' -> KnotTied TyCon -- ^ Representation type constructor -> ConTag -- ^ Constructor tag -> ThetaType -- ^ The "stupid theta", context of the data -- declaration e.g. @data Eq a => T a ...@ -> Id -- ^ Worker Id -> DataConRep -- ^ Representation -> DataCon -- Can get the tag from the TyCon mkDataCon name declared_infix prom_info arg_stricts -- Must match orig_arg_tys 1-1 fields univ_tvs ex_tvs conc_tvs user_tvbs eq_spec theta orig_arg_tys orig_res_ty rep_info rep_tycon tag stupid_theta work_id rep -- Warning: mkDataCon is not a good place to check certain invariants. -- If the programmer writes the wrong result type in the decl, thus: -- data T a where { MkT :: S } -- then it's possible that the univ_tvs may hit an assertion failure -- if you pull on univ_tvs. This case is checked by checkValidDataCon, -- so the error is detected properly... it's just that assertions here -- are a little dodgy. = con where is_vanilla = null ex_tvs && null eq_spec && null theta con = MkData {dcName = name, dcUnique = nameUnique name, dcVanilla = is_vanilla, dcInfix = declared_infix, dcUnivTyVars = univ_tvs, dcExTyCoVars = ex_tvs, dcConcreteTyVars = conc_tvs, dcUserTyVarBinders = user_tvbs, dcEqSpec = eq_spec, dcOtherTheta = theta, dcStupidTheta = stupid_theta, dcOrigArgTys = orig_arg_tys, dcOrigResTy = orig_res_ty, dcRepTyCon = rep_tycon, dcSrcBangs = arg_stricts, dcFields = fields, dcTag = tag, dcRepType = rep_ty, dcWorkId = work_id, dcRep = rep, dcSourceArity = length orig_arg_tys, dcRepArity = length rep_arg_tys + count isCoVar ex_tvs, dcPromoted = promoted } -- The 'arg_stricts' passed to mkDataCon are simply those for the -- source-language arguments. We add extra ones for the -- dictionary arguments right here. rep_arg_tys = dataConRepArgTys con rep_ty = case rep of -- If the DataCon has no wrapper, then the worker's type *is* the -- user-facing type, so we can simply use dataConWrapperType. NoDataConRep -> dataConWrapperType con -- If the DataCon has a wrapper, then the worker's type is never seen -- by the user. The visibilities we pick do not matter here. DCR{} -> mkInfForAllTys univ_tvs $ mkTyCoInvForAllTys ex_tvs $ mkScaledFunctionTys rep_arg_tys $ mkTyConApp rep_tycon (mkTyVarTys univ_tvs) -- res_arg_tys is a mixture of TypeLike and ConstraintLike, -- so we don't know which FunTyFlag to use -- Hence using mkScaledFunctionTys. -- See Note [Promoted data constructors] in GHC.Core.TyCon prom_tv_bndrs = [ mkNamedTyConBinder (Invisible spec) tv | Bndr tv spec <- user_tvbs ] fresh_names = freshNames (map getName user_tvbs) -- fresh_names: make sure that the "anonymous" tyvars don't -- clash in name or unique with the universal/existential ones. -- Tiresome! And unnecessary because these tyvars are never looked at prom_arg_bndrs = [ mkAnonTyConBinder (mkTyVar n t) {- Visible -} | (n,t) <- dropList theta fresh_names `zip` map scaledThing orig_arg_tys ] prom_bndrs = prom_tv_bndrs ++ prom_arg_bndrs prom_res_kind = orig_res_ty promoted = mkPromotedDataCon con name prom_info prom_bndrs prom_res_kind roles rep_info roles = map (\tv -> if isTyVar tv then Nominal else Phantom) (univ_tvs ++ ex_tvs) ++ map (const Representational) (theta ++ map scaledThing orig_arg_tys) freshNames :: [Name] -> [Name] -- Make an infinite list of Names whose Uniques and OccNames -- differ from those in the 'avoid' list freshNames avoids = [ mkSystemName uniq occ | n <- [0..] , let uniq = mkAlphaTyVarUnique n occ = mkTyVarOccFS (mkFastString ('x' : show n)) , not (uniq `memberUniqueSet` avoid_uniqs) , not (occ `elemOccSet` avoid_occs) ] where avoid_uniqs :: UniqueSet avoid_uniqs = fromListUniqueSet (map getUnique avoids) avoid_occs :: OccSet avoid_occs = mkOccSet (map getOccName avoids) -- | The 'Name' of the 'DataCon', giving it a unique, rooted identification dataConName :: DataCon -> Name dataConName = dcName -- | The tag used for ordering 'DataCon's dataConTag :: DataCon -> ConTag dataConTag = dcTag dataConTagZ :: DataCon -> ConTagZ dataConTagZ con = dataConTag con - fIRST_TAG -- | The type constructor that we are building via this data constructor dataConTyCon :: DataCon -> TyCon dataConTyCon = dcRepTyCon -- | The original type constructor used in the definition of this data -- constructor. In case of a data family instance, that will be the family -- type constructor. dataConOrigTyCon :: DataCon -> TyCon dataConOrigTyCon dc | Just (tc, _) <- tyConFamInst_maybe (dcRepTyCon dc) = tc | otherwise = dcRepTyCon dc -- | The representation type of the data constructor, i.e. the sort -- type that will represent values of this type at runtime dataConRepType :: DataCon -> Type dataConRepType = dcRepType -- | Should the 'DataCon' be presented infix? dataConIsInfix :: DataCon -> Bool dataConIsInfix = dcInfix -- | The universally-quantified type variables of the constructor dataConUnivTyVars :: DataCon -> [TyVar] dataConUnivTyVars (MkData { dcUnivTyVars = tvbs }) = tvbs -- | The existentially-quantified type/coercion variables of the constructor -- including dependent (kind-) GADT equalities dataConExTyCoVars :: DataCon -> [TyCoVar] dataConExTyCoVars (MkData { dcExTyCoVars = tvbs }) = tvbs -- | Both the universal and existential type/coercion variables of the constructor dataConUnivAndExTyCoVars :: DataCon -> [TyCoVar] dataConUnivAndExTyCoVars (MkData { dcUnivTyVars = univ_tvs, dcExTyCoVars = ex_tvs }) = univ_tvs ++ ex_tvs -- | Which type variables of this data constructor that must be -- instantiated to concrete types? -- For example: the RuntimeRep variables of unboxed tuples and unboxed sums. -- -- See Note [Representation-polymorphism checking built-ins] -- in GHC.Tc.Utils.Concrete dataConConcreteTyVars :: DataCon -> ConcreteTyVars dataConConcreteTyVars (MkData { dcConcreteTyVars = concs }) = concs -- See Note [DataCon user type variable binders] -- | The type variables of the constructor, in the order the user wrote them dataConUserTyVars :: DataCon -> [TyVar] dataConUserTyVars (MkData { dcUserTyVarBinders = tvbs }) = binderVars tvbs -- See Note [DataCon user type variable binders] -- | 'InvisTVBinder's for the type variables of the constructor, in the order the -- user wrote them dataConUserTyVarBinders :: DataCon -> [InvisTVBinder] dataConUserTyVarBinders = dcUserTyVarBinders -- | Dependent (kind-level) equalities in a constructor. -- There are extracted from the existential variables. -- See Note [Existential coercion variables] dataConKindEqSpec :: DataCon -> [EqSpec] dataConKindEqSpec (MkData {dcExTyCoVars = ex_tcvs}) -- It is used in 'dataConEqSpec' (maybe also 'dataConFullSig' in the future), -- which are frequently used functions. -- For now (Aug 2018) this function always return empty set as we don't really -- have coercion variables. -- In the future when we do, we might want to cache this information in DataCon -- so it won't be computed every time when aforementioned functions are called. = [ EqSpec tv ty | cv <- ex_tcvs , isCoVar cv , let (ty1, ty, _) = coVarTypesRole cv tv = getTyVar ty1 ] -- | The *full* constraints on the constructor type, including dependent GADT -- equalities. dataConTheta :: DataCon -> ThetaType dataConTheta con@(MkData { dcEqSpec = eq_spec, dcOtherTheta = theta }) = eqSpecPreds (dataConKindEqSpec con ++ eq_spec) ++ theta -- | Get the Id of the 'DataCon' worker: a function that is the "actual" -- constructor and has no top level binding in the program. The type may -- be different from the obvious one written in the source program. Panics -- if there is no such 'Id' for this 'DataCon' dataConWorkId :: DataCon -> Id dataConWorkId dc = dcWorkId dc -- | Get the Id of the 'DataCon' wrapper: a function that wraps the "actual" -- constructor so it has the type visible in the source program: c.f. -- 'dataConWorkId'. -- Returns Nothing if there is no wrapper, which occurs for an algebraic data -- constructor and also for a newtype (whose constructor is inlined -- compulsorily) dataConWrapId_maybe :: DataCon -> Maybe Id dataConWrapId_maybe dc = case dcRep dc of NoDataConRep -> Nothing DCR { dcr_wrap_id = wrap_id } -> Just wrap_id -- | Returns an Id which looks like the Haskell-source constructor by using -- the wrapper if it exists (see 'dataConWrapId_maybe') and failing over to -- the worker (see 'dataConWorkId') dataConWrapId :: DataCon -> Id dataConWrapId dc = case dcRep dc of NoDataConRep-> dcWorkId dc -- worker=wrapper DCR { dcr_wrap_id = wrap_id } -> wrap_id -- | Find all the 'Id's implicitly brought into scope by the data constructor. Currently, -- the union of the 'dataConWorkId' and the 'dataConWrapId' dataConImplicitTyThings :: DataCon -> [TyThing] dataConImplicitTyThings (MkData { dcWorkId = work, dcRep = rep }) = [mkAnId work] ++ wrap_ids where wrap_ids = case rep of NoDataConRep -> [] DCR { dcr_wrap_id = wrap } -> [mkAnId wrap] -- | The labels for the fields of this particular 'DataCon' dataConFieldLabels :: DataCon -> [FieldLabel] dataConFieldLabels = dcFields -- | Extract the type for any given labelled field of the 'DataCon' dataConFieldType :: DataCon -> FieldLabelString -> Type dataConFieldType con label = case dataConFieldType_maybe con label of Just (_, ty) -> ty Nothing -> pprPanic "dataConFieldType" (ppr con <+> ppr label) -- | Extract the label and type for any given labelled field of the -- 'DataCon', or return 'Nothing' if the field does not belong to it dataConFieldType_maybe :: DataCon -> FieldLabelString -> Maybe (FieldLabel, Type) dataConFieldType_maybe con label = find ((== label) . flLabel . fst) (dcFields con `zip` (scaledThing <$> dcOrigArgTys con)) -- | Strictness/unpack annotations, from user; or, for imported -- DataCons, from the interface file -- The list is in one-to-one correspondence with the arity of the 'DataCon' dataConSrcBangs :: DataCon -> [HsSrcBang] dataConSrcBangs = dcSrcBangs -- | Source-level arity of the data constructor dataConSourceArity :: DataCon -> Arity dataConSourceArity (MkData { dcSourceArity = arity }) = arity -- | Gives the number of value arguments (including zero-width coercions) -- stored by the given `DataCon`'s worker in its Core representation. This may -- differ from the number of arguments that appear in the source code; see also -- Note [DataCon arities] dataConRepArity :: DataCon -> Arity dataConRepArity (MkData { dcRepArity = arity }) = arity -- | Return whether there are any argument types for this 'DataCon's original source type -- See Note [DataCon arities] isNullarySrcDataCon :: DataCon -> Bool isNullarySrcDataCon dc = dataConSourceArity dc == 0 -- | Return whether this `DataCon`'s worker, in its Core representation, takes -- any value arguments. -- -- In particular, remember that we include coercion arguments in the arity of -- the Core representation of the `DataCon` -- both lifted and unlifted -- coercions, despite the latter having zero-width runtime representation. -- -- See also Note [DataCon arities]. isNullaryRepDataCon :: DataCon -> Bool isNullaryRepDataCon dc = dataConRepArity dc == 0 dataConRepStrictness :: DataCon -> [StrictnessMark] -- ^ Give the demands on the arguments of a -- Core constructor application (Con dc args) dataConRepStrictness dc = case dcRep dc of NoDataConRep -> [NotMarkedStrict | _ <- dataConRepArgTys dc] DCR { dcr_stricts = strs } -> strs dataConImplBangs :: DataCon -> [HsImplBang] -- The implementation decisions about the strictness/unpack of each -- source program argument to the data constructor dataConImplBangs dc = case dcRep dc of NoDataConRep -> replicate (dcSourceArity dc) HsLazy DCR { dcr_bangs = bangs } -> bangs dataConBoxer :: DataCon -> Maybe DataConBoxer dataConBoxer (MkData { dcRep = DCR { dcr_boxer = boxer } }) = Just boxer dataConBoxer _ = Nothing dataConInstSig :: DataCon -> [Type] -- Instantiate the *universal* tyvars with these types -> ([TyCoVar], ThetaType, [Type]) -- Return instantiated existentials -- theta and arg tys -- ^ Instantiate the universal tyvars of a data con, -- returning -- ( instantiated existentials -- , instantiated constraints including dependent GADT equalities -- which are *also* listed in the instantiated existentials -- , instantiated args) dataConInstSig con@(MkData { dcUnivTyVars = univ_tvs, dcExTyCoVars = ex_tvs , dcOrigArgTys = arg_tys }) univ_tys = ( ex_tvs' , substTheta subst (dataConTheta con) , substTys subst (map scaledThing arg_tys)) where univ_subst = zipTvSubst univ_tvs univ_tys (subst, ex_tvs') = Type.substVarBndrs univ_subst ex_tvs -- | The \"full signature\" of the 'DataCon' returns, in order: -- -- 1) The result of 'dataConUnivTyVars' -- -- 2) The result of 'dataConExTyCoVars' -- -- 3) The non-dependent GADT equalities. -- Dependent GADT equalities are implied by coercion variables in -- return value (2). -- -- 4) The other constraints of the data constructor type, excluding GADT -- equalities -- -- 5) The original argument types to the 'DataCon' (i.e. before -- any change of the representation of the type) with linearity -- annotations -- -- 6) The original result type of the 'DataCon' dataConFullSig :: DataCon -> ([TyVar], [TyCoVar], [EqSpec], ThetaType, [Scaled Type], Type) dataConFullSig (MkData {dcUnivTyVars = univ_tvs, dcExTyCoVars = ex_tvs, dcEqSpec = eq_spec, dcOtherTheta = theta, dcOrigArgTys = arg_tys, dcOrigResTy = res_ty}) = (univ_tvs, ex_tvs, eq_spec, theta, arg_tys, res_ty) dataConOrigResTy :: DataCon -> Type dataConOrigResTy dc = dcOrigResTy dc -- | The \"stupid theta\" of the 'DataCon', such as @data Eq a@ in: -- -- > data Eq a => T a = ... -- -- See @Note [The stupid context]@. dataConStupidTheta :: DataCon -> ThetaType dataConStupidTheta dc = dcStupidTheta dc {- Note [Displaying linear fields] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A constructor with a linear field can be written either as MkT :: a %1 -> T a (with -XLinearTypes) or MkT :: a -> T a (with -XNoLinearTypes) There are three different methods to retrieve a type of a datacon. They differ in how linear fields are handled. 1. dataConWrapperType: The type of the wrapper in Core. For example, dataConWrapperType for Maybe is a %1 -> Just a. 2. dataConNonlinearType: The type of the constructor, with linear arrows replaced by unrestricted ones. Used when we don't want to introduce linear types to user (in holes and in types in hie used by haddock). 3. dataConDisplayType (takes a boolean indicating if -XLinearTypes is enabled): The type we'd like to show in error messages, :info and -ddump-types. Ideally, it should reflect the type written by the user; the function returns a type with arrows that would be required to write this constructor under the current setting of -XLinearTypes. In principle, this type can be different from the user's source code when the value of -XLinearTypes has changed, but we don't expect this to cause much trouble. Due to internal plumbing in checkValidDataCon, we can't just return a Doc. The multiplicity of arrows returned by dataConDisplayType and dataConDisplayType is used only for pretty-printing. -} dataConWrapperType :: DataCon -> Type -- ^ The user-declared type of the data constructor -- in the nice-to-read form: -- -- > T :: forall a b. a -> b -> T [a] -- -- rather than: -- -- > T :: forall a c. forall b. (c~[a]) => a -> b -> T c -- -- The type variables are quantified in the order that the user wrote them. -- See @Note [DataCon user type variable binders]@. -- -- NB: If the constructor is part of a data instance, the result type -- mentions the family tycon, not the internal one. dataConWrapperType (MkData { dcUserTyVarBinders = user_tvbs, dcOtherTheta = theta, dcOrigArgTys = arg_tys, dcOrigResTy = res_ty, dcStupidTheta = stupid_theta }) = mkInvisForAllTys user_tvbs $ mkInvisFunTys (stupid_theta ++ theta) $ mkScaledFunTys arg_tys $ res_ty dataConNonlinearType :: DataCon -> Type -- Just like dataConWrapperType, but with the -- linearity on the arguments all zapped to Many dataConNonlinearType (MkData { dcUserTyVarBinders = user_tvbs, dcOtherTheta = theta, dcOrigArgTys = arg_tys, dcOrigResTy = res_ty, dcStupidTheta = stupid_theta }) = mkInvisForAllTys user_tvbs $ mkInvisFunTys (stupid_theta ++ theta) $ mkScaledFunTys arg_tys' $ res_ty where arg_tys' = map (\(Scaled w t) -> Scaled (case w of OneTy -> ManyTy; _ -> w) t) arg_tys dataConDisplayType :: Bool -> DataCon -> Type dataConDisplayType show_linear_types dc = if show_linear_types then dataConWrapperType dc else dataConNonlinearType dc -- | Finds the instantiated types of the arguments required to construct a -- 'DataCon' representation -- NB: these INCLUDE any dictionary args -- but EXCLUDE the data-declaration context, which is discarded -- It's all post-flattening etc; this is a representation type dataConInstArgTys :: DataCon -- ^ A datacon with no existentials or equality constraints -- However, it can have a dcTheta (notably it can be a -- class dictionary, with superclasses) -> [Type] -- ^ Instantiated at these types -> [Scaled Type] dataConInstArgTys dc@(MkData {dcUnivTyVars = univ_tvs, dcExTyCoVars = ex_tvs}) inst_tys = assertPpr (univ_tvs `equalLength` inst_tys) (text "dataConInstArgTys" <+> ppr dc $$ ppr univ_tvs $$ ppr inst_tys) $ assertPpr (null ex_tvs) (ppr dc) $ map (mapScaledType (substTyWith univ_tvs inst_tys)) (dataConRepArgTys dc) -- | Returns just the instantiated /value/ argument types of a 'DataCon', -- (excluding dictionary args) dataConInstOrigArgTys :: DataCon -- Works for any DataCon -> [Type] -- Includes existential tyvar args, but NOT -- equality constraints or dicts -> [Scaled Type] -- For vanilla datacons, it's all quite straightforward -- But for the call in GHC.HsToCore.Match.Constructor, we really do want just -- the value args dataConInstOrigArgTys dc@(MkData {dcOrigArgTys = arg_tys, dcUnivTyVars = univ_tvs, dcExTyCoVars = ex_tvs}) inst_tys = assertPpr (tyvars `equalLength` inst_tys) (text "dataConInstOrigArgTys" <+> ppr dc $$ ppr tyvars $$ ppr inst_tys) $ substScaledTys subst arg_tys where tyvars = univ_tvs ++ ex_tvs subst = zipTCvSubst tyvars inst_tys -- | Given a data constructor @dc@ with /n/ universally quantified type -- variables @a_{1}@, @a_{2}@, ..., @a_{n}@, and given a list of argument -- types @dc_args@ of length /m/ where /m/ <= /n/, then: -- -- @ -- dataConInstUnivs dc dc_args -- @ -- -- Will return: -- -- @ -- [dc_arg_{1}, dc_arg_{2}, ..., dc_arg_{m}, a_{m+1}, ..., a_{n}] -- @ -- -- That is, return the list of universal type variables with -- @a_{1}@, @a_{2}@, ..., @a_{m}@ instantiated with -- @dc_arg_{1}@, @dc_arg_{2}@, ..., @dc_arg_{m}@. It is possible for @m@ to -- be less than @n@, in which case the remaining @n - m@ elements will simply -- be universal type variables (with their kinds possibly instantiated). -- -- Examples: -- -- * Given the data constructor @D :: forall a b. Foo a b@ and -- @dc_args@ @[Int, Bool]@, then @dataConInstUnivs D dc_args@ will return -- @[Int, Bool]@. -- -- * Given the data constructor @D :: forall a b. Foo a b@ and -- @dc_args@ @[Int]@, then @@dataConInstUnivs D dc_args@ will return -- @[Int, b]@. -- -- * Given the data constructor @E :: forall k (a :: k). Bar k a@ and -- @dc_args@ @[Type]@, then @@dataConInstUnivs D dc_args@ will return -- @[Type, (a :: Type)]@. -- -- This is primarily used in @GHC.Tc.Deriv.*@ in service of instantiating data -- constructors' field types. -- See @Note [Instantiating field types in stock deriving]@ for a notable -- example of this. dataConInstUnivs :: DataCon -> [Type] -> [Type] dataConInstUnivs dc dc_args = chkAppend dc_args $ map mkTyVarTy dc_args_suffix where (dc_univs_prefix, dc_univs_suffix) = -- Assert that m <= n assertPpr (dc_args `leLength` dataConUnivTyVars dc) (text "dataConInstUnivs" <+> ppr dc_args <+> ppr (dataConUnivTyVars dc)) $ splitAtList dc_args $ dataConUnivTyVars dc (_, dc_args_suffix) = substTyVarBndrs prefix_subst dc_univs_suffix prefix_subst = mkTvSubst prefix_in_scope prefix_env prefix_in_scope = mkInScopeSet $ tyCoVarsOfTypes dc_args prefix_env = zipTyEnv dc_univs_prefix dc_args -- | Returns the argument types of the wrapper, excluding all dictionary arguments -- and without substituting for any type variables dataConOrigArgTys :: DataCon -> [Scaled Type] dataConOrigArgTys dc = dcOrigArgTys dc -- | Returns constraints in the wrapper type, other than those in the dataConEqSpec dataConOtherTheta :: DataCon -> ThetaType dataConOtherTheta dc = dcOtherTheta dc -- | Returns the arg types of the worker, including *all* non-dependent -- evidence, after any flattening has been done and without substituting for -- any type variables dataConRepArgTys :: DataCon -> [Scaled Type] dataConRepArgTys (MkData { dcRep = rep , dcEqSpec = eq_spec , dcOtherTheta = theta , dcOrigArgTys = orig_arg_tys , dcRepTyCon = tc }) = case rep of DCR { dcr_arg_tys = arg_tys } -> arg_tys NoDataConRep | isTypeDataTyCon tc -> assert (null theta) $ orig_arg_tys -- `type data` declarations can be GADTs (and hence have an eq_spec) -- but no wrapper. They cannot have a theta. -- See Note [Type data declarations] in GHC.Rename.Module -- You might wonder why we ever call dataConRepArgTys for `type data`; -- I think it's because of the call in mkDataCon, which in turn feeds -- into dcRepArity, which in turn is used in mkDataConWorkId. -- c.f. #23022 | otherwise -> assert (null eq_spec) $ map unrestricted theta ++ orig_arg_tys -- | The string @package:module.name@ identifying a constructor, which is attached -- to its info table and used by the GHCi debugger and the heap profiler dataConIdentity :: DataCon -> ByteString -- We want this string to be UTF-8, so we get the bytes directly from the FastStrings. dataConIdentity dc = LBS.toStrict $ BSB.toLazyByteString $ mconcat [ BSB.shortByteString $ fastStringToShortByteString $ unitFS $ moduleUnit mod , BSB.int8 $ fromIntegral (ord ':') , BSB.shortByteString $ fastStringToShortByteString $ moduleNameFS $ moduleName mod , BSB.int8 $ fromIntegral (ord '.') , BSB.shortByteString $ fastStringToShortByteString $ occNameFS $ nameOccName name ] where name = dataConName dc mod = assert (isExternalName name) $ nameModule name isTupleDataCon :: DataCon -> Bool isTupleDataCon (MkData {dcRepTyCon = tc}) = isTupleTyCon tc isBoxedTupleDataCon :: DataCon -> Bool isBoxedTupleDataCon (MkData {dcRepTyCon = tc}) = isBoxedTupleTyCon tc isUnboxedTupleDataCon :: DataCon -> Bool isUnboxedTupleDataCon (MkData {dcRepTyCon = tc}) = isUnboxedTupleTyCon tc isUnboxedSumDataCon :: DataCon -> Bool isUnboxedSumDataCon (MkData {dcRepTyCon = tc}) = isUnboxedSumTyCon tc -- | Vanilla 'DataCon's are those that are nice boring Haskell 98 constructors isVanillaDataCon :: DataCon -> Bool isVanillaDataCon dc = dcVanilla dc -- | Is this the 'DataCon' of a newtype? isNewDataCon :: DataCon -> Bool isNewDataCon dc = isNewTyCon (dataConTyCon dc) -- | Is this data constructor in a "type data" declaration? -- See Note [Type data declarations] in GHC.Rename.Module. isTypeDataCon :: DataCon -> Bool isTypeDataCon dc = isTypeDataTyCon (dataConTyCon dc) isCovertGadtDataCon :: DataCon -> Bool -- See Note [isCovertGadtDataCon] isCovertGadtDataCon (MkData { dcUnivTyVars = univ_tvs , dcEqSpec = eq_spec , dcRepTyCon = rep_tc }) = not (null eq_spec) -- There are some constraints && not (any is_visible_spec eq_spec) -- But none of them are visible where visible_univ_tvs :: [TyVar] -- Visible arguments in result type visible_univ_tvs = [ univ_tv | (univ_tv, tcb) <- univ_tvs `zip` tyConBinders rep_tc , isVisibleTyConBinder tcb ] is_visible_spec :: EqSpec -> Bool is_visible_spec (EqSpec univ_tv ty) = univ_tv `elem` visible_univ_tvs && not (isTyVarTy ty) -- See Note [isCovertGadtDataCon] for -- an example where 'ty' is a tyvar {- Note [isCovertGadtDataCon] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ (isCovertGadtDataCon K) returns True if K is a GADT data constructor, but does not /look/ like it. Consider (#21447) type T :: TYPE r -> Type data T a where { MkT :: b -> T b } Here MkT doesn't look GADT-like, but it is. If we make the kind applications explicit we'd see: data T a where { MkT :: b -> T @LiftedRep b } The test for covert-ness is bit tricky, because we want to see if - dcEqSpec is non-empty - dcEqSpec does not constrain any of the /required/ (i.e. visible) arguments of the TyCon to a non-tyvar In the example above, the DataCon for MkT will have dcUnivTyVars: [(r::RuntimeRep), (a :: TYPE r)] dcExTyVars: [(b :: Type)] dcEqSpec: [(r, LiftedRep), (a, b)] Here * `r :: RuntimeRep` is constrained by dcEqSpec to LiftedRep * `a :: TYPE r` is constrained by dcEqSpec to `b :: Type` But the constraint on `a` is not visible to the user, so this counts as a covert GADT data con. The declaration MkT :: forall (b :: Type). b -> T b looks entirely non-GADT-ish. Wrinkles: * The visibility or otherwise is a property of the /TyCon/ binders * The dcUnivTyVars may or may not be the same as the TyCon binders * So we have to zip them together. * For a data family the TyCon in question is the /representation/ TyCon hence dcRepTyCon -} -- | Should this DataCon be allowed in a type even without -XDataKinds? -- Currently, only Lifted & Unlifted specialPromotedDc :: DataCon -> Bool specialPromotedDc = isKindTyCon . dataConTyCon classDataCon :: Class -> DataCon classDataCon clas = case tyConDataCons (classTyCon clas) of (dict_constr:no_more) -> assert (null no_more) dict_constr [] -> panic "classDataCon" dataConCannotMatch :: [Type] -> DataCon -> Bool -- Returns True iff the data con *definitely cannot* match a -- scrutinee of type (T tys) -- where T is the dcRepTyCon for the data con dataConCannotMatch tys con -- See (U6) in Note [Implementing unsafeCoerce] -- in base:Unsafe.Coerce | dataConName con == unsafeReflDataConName = False | null inst_theta = False -- Common | all isTyVarTy tys = False -- Also common | otherwise = typesCantMatch (concatMap predEqs inst_theta) where (_, inst_theta, _) = dataConInstSig con tys -- TODO: could gather equalities from superclasses too predEqs pred = case classifyPredType pred of EqPred NomEq ty1 ty2 -> [(ty1, ty2)] ClassPred eq args | eq `hasKey` eqTyConKey , [_, ty1, ty2] <- args -> [(ty1, ty2)] | eq `hasKey` heqTyConKey , [_, _, ty1, ty2] <- args -> [(ty1, ty2)] _ -> [] -- | Were the type variables of the data con written in a different order -- than the regular order (universal tyvars followed by existential tyvars)? -- -- This is not a cheap test, so we minimize its use in GHC as much as possible. -- Currently, its only call site in the GHC codebase is in 'mkDataConRep' in -- "MkId", and so 'dataConUserTyVarsNeedWrapper' is only called at most once -- during a data constructor's lifetime. dataConResRepTyArgs :: DataCon -> [Type] -- Returns the arguments of a GADT version of the /representation/ TyCon -- Thus data instance T [(x,y)] z where -- MkT :: forall p q. Int -> T [(Int,p)] (Maybe q) -- The "GADT version of the representation type" is -- data R:T x y z where -- MkT :: forall p q. Int -> R:T Int p (Maybe q) -- so dataConResRepTyArgs for MkT returns [Int, p, Maybe q] -- This is almost the same as (subst eq_spec univ_tvs); but not quite, -- because eq_spec omits constraint-kinded equalities dataConResRepTyArgs dc@(MkData { dcRepTyCon = rep_tc, dcOrigResTy = orig_res_ty }) | Just (fam_tc, fam_args) <- tyConFamInst_maybe rep_tc = -- fvs(fam_args) = tyConTyVars rep_tc -- These tyvars are the domain of subst -- Fvs(range(subst)) = tvars of the datacon case tcMatchTy (mkTyConApp fam_tc fam_args) orig_res_ty of Just subst -> map (substTyVar subst) (tyConTyVars rep_tc) Nothing -> pprPanic "datacOnResRepTyArgs" $ vcat [ ppr dc, ppr fam_tc <+> ppr fam_args , ppr orig_res_ty ] | otherwise = tyConAppArgs orig_res_ty checkDataConTyVars :: DataCon -> Bool -- Check that the worker and wrapper have the same set of type variables -- See Note [DataCon user type variable binders] -- Also ensures that no user tyvar is in the eq_spec (the eq_spec should -- only relate fresh universals from (R2) of the note) checkDataConTyVars dc@(MkData { dcUnivTyVars = univ_tvs , dcExTyCoVars = ex_tvs , dcEqSpec = eq_spec }) -- use of sets here: (R1) from the Note = mkUnVarSet depleted_worker_vars == mkUnVarSet wrapper_vars && all (not . is_eq_spec_var) wrapper_vars where worker_vars = univ_tvs ++ ex_tvs eq_spec_tvs = mkUnVarSet (map eqSpecTyVar eq_spec) is_eq_spec_var = (`elemUnVarSet` eq_spec_tvs) -- (R2) from the Note depleted_worker_vars = filterOut is_eq_spec_var worker_vars wrapper_vars = dataConUserTyVars dc dataConUserTyVarsNeedWrapper :: DataCon -> Bool -- Check whether the worker and wapper have the same type variables -- in the same order. If not, we need a wrapper to swizzle them. -- See Note [DataCon user type variable binders], as well as -- Note [Data con wrappers and GADT syntax] for an explanation of what -- mkDataConRep is doing with this function. dataConUserTyVarsNeedWrapper dc@(MkData { dcUnivTyVars = univ_tvs , dcExTyCoVars = ex_tvs , dcEqSpec = eq_spec }) = assert (null eq_spec || answer) -- all GADTs should say "yes" here answer where answer = (univ_tvs ++ ex_tvs) /= dataConUserTyVars dc -- Worker tyvars Wrapper tyvars {- %************************************************************************ %* * Promoting of data types to the kind level * * ************************************************************************ -} promoteDataCon :: DataCon -> TyCon promoteDataCon (MkData { dcPromoted = tc }) = tc {- ************************************************************************ * * \subsection{Splitting products} * * ************************************************************************ -} -- | Extract the type constructor, type argument, data constructor and it's -- /representation/ argument types from a type if it is a product type. -- -- Precisely, we return @Just@ for any data type that is all of: -- -- * Concrete (i.e. constructors visible) -- * Single-constructor -- * ... which has no existentials -- -- Whether the type is a @data@ type or a @newtype@. splitDataProductType_maybe :: Type -- ^ A product type, perhaps -> Maybe (TyCon, -- The type constructor [Type], -- Type args of the tycon DataCon, -- The data constructor [Scaled Type]) -- Its /representation/ arg types -- Rejecting existentials means we don't have to worry about -- freshening and substituting type variables -- (See "GHC.Type.Id.Make.dataConArgUnpack") splitDataProductType_maybe ty | Just (tycon, ty_args) <- splitTyConApp_maybe ty , Just con <- tyConSingleDataCon_maybe tycon , null (dataConExTyCoVars con) -- no existentials! See above = Just (tycon, ty_args, con, dataConInstArgTys con ty_args) | otherwise = Nothing ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/DataCon.hs-boot0000644000000000000000000000245307346545000021410 0ustar0000000000000000module GHC.Core.DataCon where import GHC.Prelude import {-# SOURCE #-} GHC.Types.Var( Id, TyVar, TyCoVar, InvisTVBinder ) import {-# SOURCE #-} GHC.Types.Name( Name, NamedThing ) import {-# SOURCE #-} GHC.Core.TyCon( TyCon ) import GHC.Types.FieldLabel ( FieldLabel ) import GHC.Types.Unique ( Uniquable ) import GHC.Utils.Outputable ( Outputable, OutputableBndr ) import GHC.Types.Basic (Arity) import {-# SOURCE #-} GHC.Core.TyCo.Rep ( Type, ThetaType, Scaled ) data DataCon data DataConRep data EqSpec dataConName :: DataCon -> Name dataConWorkId :: DataCon -> Id dataConTyCon :: DataCon -> TyCon dataConExTyCoVars :: DataCon -> [TyCoVar] dataConUserTyVars :: DataCon -> [TyVar] dataConUserTyVarBinders :: DataCon -> [InvisTVBinder] dataConSourceArity :: DataCon -> Arity dataConFieldLabels :: DataCon -> [FieldLabel] dataConInstOrigArgTys :: DataCon -> [Type] -> [Scaled Type] dataConStupidTheta :: DataCon -> ThetaType dataConFullSig :: DataCon -> ([TyVar], [TyCoVar], [EqSpec], ThetaType, [Scaled Type], Type) isUnboxedSumDataCon :: DataCon -> Bool isTypeDataCon :: DataCon -> Bool instance Eq DataCon instance Uniquable DataCon instance NamedThing DataCon instance Outputable DataCon instance OutputableBndr DataCon dataConWrapId :: DataCon -> Id promoteDataCon :: DataCon -> TyCon ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/FVs.hs0000644000000000000000000007327507346545000017646 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 Taken quite directly from the Peyton Jones/Lester paper. -} {-# LANGUAGE TypeFamilies #-} -- | A module concerned with finding the free variables of an expression. module GHC.Core.FVs ( -- * Free variables of expressions and binding groups exprFreeVars, exprsFreeVars, exprFreeVarsDSet, exprFreeVarsList, exprsFreeVarsList, exprFreeIds, exprsFreeIds, exprFreeIdsDSet, exprsFreeIdsDSet, exprFreeIdsList, exprsFreeIdsList, bindFreeVars, -- * Selective free variables of expressions InterestingVarFun, exprSomeFreeVars, exprsSomeFreeVars, exprSomeFreeVarsList, exprsSomeFreeVarsList, -- * Free variables of Rules, Vars and Ids varTypeTyCoVars, varTypeTyCoFVs, idUnfoldingVars, idFreeVars, dIdFreeVars, bndrRuleAndUnfoldingVarsDSet, bndrRuleAndUnfoldingIds, idFVs, idRuleVars, stableUnfoldingVars, ruleFreeVars, rulesFreeVars, rulesFreeVarsDSet, mkRuleInfo, ruleLhsFreeIds, ruleLhsFreeIdsList, ruleRhsFreeVars, rulesRhsFreeIds, exprFVs, -- * Orphan names orphNamesOfType, orphNamesOfTypes, orphNamesOfAxiomLHS, orphNamesOfExprs, -- * Core syntax tree annotation with free variables FVAnn, -- annotation, abstract CoreExprWithFVs, -- = AnnExpr Id FVAnn CoreExprWithFVs', -- = AnnExpr' Id FVAnn CoreBindWithFVs, -- = AnnBind Id FVAnn CoreAltWithFVs, -- = AnnAlt Id FVAnn freeVars, -- CoreExpr -> CoreExprWithFVs freeVarsBind, -- CoreBind -> DVarSet -> (DVarSet, CoreBindWithFVs) freeVarsOf, -- CoreExprWithFVs -> DIdSet freeVarsOfAnn ) where import GHC.Prelude import GHC.Core import GHC.Types.Id import GHC.Types.Id.Info import GHC.Types.Name.Set import GHC.Types.Name import GHC.Types.Tickish import GHC.Types.Var.Set import GHC.Types.Var import GHC.Core.Type import GHC.Core.TyCo.Rep import GHC.Core.TyCo.FVs import GHC.Core.TyCon import GHC.Core.Coercion.Axiom import GHC.Builtin.Types( unrestrictedFunTyConName ) import GHC.Builtin.Types.Prim( fUNTyCon ) import GHC.Data.Maybe( orElse ) import GHC.Utils.FV as FV import GHC.Utils.Misc import GHC.Utils.Panic.Plain {- ************************************************************************ * * \section{Finding the free variables of an expression} * * ************************************************************************ This function simply finds the free variables of an expression. So far as type variables are concerned, it only finds tyvars that are * free in type arguments, * free in the type of a binder, but not those that are free in the type of variable occurrence. -} -- | Find all locally-defined free Ids or type variables in an expression -- returning a non-deterministic set. exprFreeVars :: CoreExpr -> VarSet exprFreeVars = fvVarSet . exprFVs -- | Find all locally-defined free Ids or type variables in an expression -- returning a composable FV computation. See Note [FV naming conventions] in "GHC.Utils.FV" -- for why export it. exprFVs :: CoreExpr -> FV exprFVs = filterFV isLocalVar . expr_fvs -- | Find all locally-defined free Ids or type variables in an expression -- returning a deterministic set. exprFreeVarsDSet :: CoreExpr -> DVarSet exprFreeVarsDSet = fvDVarSet . exprFVs -- | Find all locally-defined free Ids or type variables in an expression -- returning a deterministically ordered list. exprFreeVarsList :: CoreExpr -> [Var] exprFreeVarsList = fvVarList . exprFVs -- | Find all locally-defined free Ids in an expression exprFreeIds :: CoreExpr -> IdSet -- Find all locally-defined free Ids exprFreeIds = exprSomeFreeVars isLocalId exprsFreeIds :: [CoreExpr] -> IdSet -- Find all locally-defined free Ids exprsFreeIds = exprsSomeFreeVars isLocalId -- | Find all locally-defined free Ids in an expression -- returning a deterministic set. exprFreeIdsDSet :: CoreExpr -> DIdSet -- Find all locally-defined free Ids exprFreeIdsDSet = exprSomeFreeVarsDSet isLocalId -- | Find all locally-defined free Ids in an expression -- returning a deterministically ordered list. exprFreeIdsList :: CoreExpr -> [Id] -- Find all locally-defined free Ids exprFreeIdsList = exprSomeFreeVarsList isLocalId -- | Find all locally-defined free Ids in several expressions -- returning a deterministic set. exprsFreeIdsDSet :: [CoreExpr] -> DIdSet -- Find all locally-defined free Ids exprsFreeIdsDSet = exprsSomeFreeVarsDSet isLocalId -- | Find all locally-defined free Ids in several expressions -- returning a deterministically ordered list. exprsFreeIdsList :: [CoreExpr] -> [Id] -- Find all locally-defined free Ids exprsFreeIdsList = exprsSomeFreeVarsList isLocalId -- | Find all locally-defined free Ids or type variables in several expressions -- returning a non-deterministic set. exprsFreeVars :: [CoreExpr] -> VarSet exprsFreeVars = fvVarSet . exprsFVs -- | Find all locally-defined free Ids or type variables in several expressions -- returning a composable FV computation. See Note [FV naming conventions] in "GHC.Utils.FV" -- for why export it. exprsFVs :: [CoreExpr] -> FV exprsFVs exprs = mapUnionFV exprFVs exprs -- | Find all locally-defined free Ids or type variables in several expressions -- returning a deterministically ordered list. exprsFreeVarsList :: [CoreExpr] -> [Var] exprsFreeVarsList = fvVarList . exprsFVs -- | Find all locally defined free Ids in a binding group bindFreeVars :: CoreBind -> VarSet bindFreeVars (NonRec b r) = fvVarSet $ filterFV isLocalVar $ rhs_fvs (b,r) bindFreeVars (Rec prs) = fvVarSet $ filterFV isLocalVar $ addBndrs (map fst prs) (mapUnionFV rhs_fvs prs) -- | Finds free variables in an expression selected by a predicate exprSomeFreeVars :: InterestingVarFun -- ^ Says which 'Var's are interesting -> CoreExpr -> VarSet exprSomeFreeVars fv_cand e = fvVarSet $ filterFV fv_cand $ expr_fvs e -- | Finds free variables in an expression selected by a predicate -- returning a deterministically ordered list. exprSomeFreeVarsList :: InterestingVarFun -- ^ Says which 'Var's are interesting -> CoreExpr -> [Var] exprSomeFreeVarsList fv_cand e = fvVarList $ filterFV fv_cand $ expr_fvs e -- | Finds free variables in an expression selected by a predicate -- returning a deterministic set. exprSomeFreeVarsDSet :: InterestingVarFun -- ^ Says which 'Var's are interesting -> CoreExpr -> DVarSet exprSomeFreeVarsDSet fv_cand e = fvDVarSet $ filterFV fv_cand $ expr_fvs e -- | Finds free variables in several expressions selected by a predicate exprsSomeFreeVars :: InterestingVarFun -- Says which 'Var's are interesting -> [CoreExpr] -> VarSet exprsSomeFreeVars fv_cand es = fvVarSet $ filterFV fv_cand $ mapUnionFV expr_fvs es -- | Finds free variables in several expressions selected by a predicate -- returning a deterministically ordered list. exprsSomeFreeVarsList :: InterestingVarFun -- Says which 'Var's are interesting -> [CoreExpr] -> [Var] exprsSomeFreeVarsList fv_cand es = fvVarList $ filterFV fv_cand $ mapUnionFV expr_fvs es -- | Finds free variables in several expressions selected by a predicate -- returning a deterministic set. exprsSomeFreeVarsDSet :: InterestingVarFun -- ^ Says which 'Var's are interesting -> [CoreExpr] -> DVarSet exprsSomeFreeVarsDSet fv_cand e = fvDVarSet $ filterFV fv_cand $ mapUnionFV expr_fvs e -- Comment about obsolete code -- We used to gather the free variables the RULES at a variable occurrence -- with the following cryptic comment: -- "At a variable occurrence, add in any free variables of its rule rhss -- Curiously, we gather the Id's free *type* variables from its binding -- site, but its free *rule-rhs* variables from its usage sites. This -- is a little weird. The reason is that the former is more efficient, -- but the latter is more fine grained, and a makes a difference when -- a variable mentions itself one of its own rule RHSs" -- Not only is this "weird", but it's also pretty bad because it can make -- a function seem more recursive than it is. Suppose -- f = ...g... -- g = ... -- RULE g x = ...f... -- Then f is not mentioned in its own RHS, and needn't be a loop breaker -- (though g may be). But if we collect the rule fvs from g's occurrence, -- it looks as if f mentions itself. (This bites in the eftInt/eftIntFB -- code in GHC.Enum.) -- -- Anyway, it seems plain wrong. The RULE is like an extra RHS for the -- function, so its free variables belong at the definition site. -- -- Deleted code looked like -- foldVarSet add_rule_var var_itself_set (idRuleVars var) -- add_rule_var var set | keep_it fv_cand in_scope var = extendVarSet set var -- | otherwise = set -- SLPJ Feb06 addBndr :: CoreBndr -> FV -> FV addBndr bndr fv fv_cand in_scope acc = (varTypeTyCoFVs bndr `unionFV` -- Include type variables in the binder's type -- (not just Ids; coercion variables too!) FV.delFV bndr fv) fv_cand in_scope acc addBndrs :: [CoreBndr] -> FV -> FV addBndrs bndrs fv = foldr addBndr fv bndrs expr_fvs :: CoreExpr -> FV expr_fvs (Type ty) fv_cand in_scope acc = tyCoFVsOfType ty fv_cand in_scope acc expr_fvs (Coercion co) fv_cand in_scope acc = tyCoFVsOfCo co fv_cand in_scope acc expr_fvs (Var var) fv_cand in_scope acc = FV.unitFV var fv_cand in_scope acc expr_fvs (Lit _) fv_cand in_scope acc = emptyFV fv_cand in_scope acc expr_fvs (Tick t expr) fv_cand in_scope acc = (tickish_fvs t `unionFV` expr_fvs expr) fv_cand in_scope acc expr_fvs (App fun arg) fv_cand in_scope acc = (expr_fvs fun `unionFV` expr_fvs arg) fv_cand in_scope acc expr_fvs (Lam bndr body) fv_cand in_scope acc = addBndr bndr (expr_fvs body) fv_cand in_scope acc expr_fvs (Cast expr co) fv_cand in_scope acc = (expr_fvs expr `unionFV` tyCoFVsOfCo co) fv_cand in_scope acc expr_fvs (Case scrut bndr ty alts) fv_cand in_scope acc = (expr_fvs scrut `unionFV` tyCoFVsOfType ty `unionFV` addBndr bndr (mapUnionFV alt_fvs alts)) fv_cand in_scope acc where alt_fvs (Alt _ bndrs rhs) = addBndrs bndrs (expr_fvs rhs) expr_fvs (Let (NonRec bndr rhs) body) fv_cand in_scope acc = (rhs_fvs (bndr, rhs) `unionFV` addBndr bndr (expr_fvs body)) fv_cand in_scope acc expr_fvs (Let (Rec pairs) body) fv_cand in_scope acc = addBndrs (map fst pairs) (mapUnionFV rhs_fvs pairs `unionFV` expr_fvs body) fv_cand in_scope acc --------- rhs_fvs :: (Id, CoreExpr) -> FV rhs_fvs (bndr, rhs) = expr_fvs rhs `unionFV` bndrRuleAndUnfoldingFVs bndr -- Treat any RULES as extra RHSs of the binding --------- exprs_fvs :: [CoreExpr] -> FV exprs_fvs exprs = mapUnionFV expr_fvs exprs tickish_fvs :: CoreTickish -> FV tickish_fvs (Breakpoint _ _ ids _) = FV.mkFVs ids tickish_fvs _ = emptyFV {- ********************************************************************** %* * Orphan names %* * %********************************************************************* -} {- Note [Finding orphan names] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The functions here (orphNamesOfType, orphNamesOfExpr etc) traverse a template: * the head of an class instance decl * the LHS of a type-family instance * the arguments of a RULE to find TyCons or (in the case of a RULE) Ids, that will be matched against when matching the template. If none of these orphNames are locally defined, the instance or RULE is an orphan: see Note [Orphans] in GHC.Core Wrinkles: (ON1) We do not need to look inside coercions, because we never match against them. Indeed, it'd be wrong to do so, because it could make an instance into a non-orphan, when it really is an orphan. (ON2) These orphNames functions are also (rather separately) used by GHCi, to implement :info. When you say ":info Foo", we show all the instances that involve `Foo`; that is, all the instances whose oprhNames include `Foo`. To support `:info (->)` we need to ensure that (->) is treated as an orphName of FunTy, which is a bit messy since the "real" TyCon is `FUN` -} orphNamesOfTyCon :: TyCon -> NameSet orphNamesOfTyCon tycon = unitNameSet (getName tycon) `unionNameSet` case tyConClass_maybe tycon of Nothing -> emptyNameSet Just cls -> unitNameSet (getName cls) orphNamesOfType :: Type -> NameSet orphNamesOfType ty | Just ty' <- coreView ty = orphNamesOfType ty' -- Look through type synonyms (#4912) orphNamesOfType (TyVarTy _) = emptyNameSet orphNamesOfType (LitTy {}) = emptyNameSet orphNamesOfType (ForAllTy bndr res) = orphNamesOfType (binderType bndr) `unionNameSet` orphNamesOfType res orphNamesOfType (AppTy fun arg) = orphNamesOfType fun `unionNameSet` orphNamesOfType arg orphNamesOfType (TyConApp tycon tys) = func `unionNameSet` orphNamesOfTyCon tycon `unionNameSet` orphNamesOfTypes tys where func = case tys of arg:_ | tycon == fUNTyCon -> orph_names_of_fun_ty_con arg _ -> emptyNameSet orphNamesOfType (FunTy af w arg res) = func `unionNameSet` unitNameSet fun_tc `unionNameSet` orphNamesOfType w `unionNameSet` orphNamesOfType arg `unionNameSet` orphNamesOfType res where func | isVisibleFunArg af = orph_names_of_fun_ty_con w | otherwise = emptyNameSet fun_tc = tyConName (funTyFlagTyCon af) -- Coercions: see wrinkle (ON1) of Note [Finding orphan names] orphNamesOfType (CastTy ty _co) = orphNamesOfType ty orphNamesOfType (CoercionTy _co) = emptyNameSet orphNamesOfThings :: (a -> NameSet) -> [a] -> NameSet orphNamesOfThings f = foldr (unionNameSet . f) emptyNameSet orphNamesOfTypes :: [Type] -> NameSet orphNamesOfTypes = orphNamesOfThings orphNamesOfType -- | `orphNamesOfAxiomLHS` collects the names of the concrete types and -- type constructors that make up the LHS of a type family instance, -- including the family name itself. -- -- For instance, given `type family Foo a b`: -- `type instance Foo (F (G (H a))) b = ...` would yield [Foo,F,G,H] -- -- Used (via orphNamesOfFamInst) in the implementation of ":info" in GHCi. -- and when determining orphan-hood for a FamInst or module orphNamesOfAxiomLHS :: CoAxiom br -> NameSet orphNamesOfAxiomLHS axiom = (orphNamesOfTypes $ concatMap coAxBranchLHS $ fromBranches $ coAxiomBranches axiom) `extendNameSet` getName (coAxiomTyCon axiom) -- Detect (FUN 'Many) as an application of (->), so that :i (->) works as expected -- (see #8535) Issue #16475 describes a more robust solution -- See wrinkle (ON2) of Note [Finding orphan names] orph_names_of_fun_ty_con :: Mult -> NameSet orph_names_of_fun_ty_con ManyTy = unitNameSet unrestrictedFunTyConName orph_names_of_fun_ty_con _ = emptyNameSet -- | Finds the free /external/ names of an expression, notably -- including the names of type constructors (which of course do not show -- up in 'exprFreeVars'). orphNamesOfExpr :: CoreExpr -> NameSet -- There's no need to delete local binders, because they will all -- be /internal/ names. orphNamesOfExpr e = go e where go (Var v) | isExternalName n = unitNameSet n | otherwise = emptyNameSet where n = idName v go (Lit _) = emptyNameSet go (Type ty) = orphNamesOfType ty -- Don't need free tyvars go (Coercion _co) = emptyNameSet -- See wrinkle (ON1) of Note [Finding orphan names] go (App e1 e2) = go e1 `unionNameSet` go e2 go (Lam v e) = go e `delFromNameSet` idName v go (Tick _ e) = go e go (Cast e _co) = go e -- See wrinkle (ON1) of Note [Finding orphan names] go (Let (NonRec _ r) e) = go e `unionNameSet` go r go (Let (Rec prs) e) = orphNamesOfExprs (map snd prs) `unionNameSet` go e go (Case e _ ty as) = go e `unionNameSet` orphNamesOfType ty `unionNameSet` unionNameSets (map go_alt as) go_alt (Alt _ _ r) = go r -- | Finds the free /external/ names of several expressions: see 'exprOrphNames' for details orphNamesOfExprs :: [CoreExpr] -> NameSet orphNamesOfExprs es = foldr (unionNameSet . orphNamesOfExpr) emptyNameSet es {- ************************************************************************ * * \section[freevars-everywhere]{Attaching free variables to every sub-expression} * * ************************************************************************ -} data RuleFVsFrom = LhsOnly | RhsOnly | BothSides -- | Those locally-defined variables free in the left and/or right hand sides -- of the rule, depending on the first argument. Returns an 'FV' computation. ruleFVs :: RuleFVsFrom -> CoreRule -> FV ruleFVs !_ (BuiltinRule {}) = emptyFV ruleFVs from (Rule { ru_fn = _do_not_include -- See Note [Rule free var hack] , ru_bndrs = bndrs , ru_rhs = rhs, ru_args = args }) = filterFV isLocalVar $ addBndrs bndrs (exprs_fvs exprs) where exprs = case from of LhsOnly -> args RhsOnly -> [rhs] BothSides -> rhs:args -- | Those locally-defined variables free in the left and/or right hand sides -- from several rules, depending on the first argument. -- Returns an 'FV' computation. rulesFVs :: RuleFVsFrom -> [CoreRule] -> FV rulesFVs from = mapUnionFV (ruleFVs from) -- | Those variables free in the right hand side of a rule returned as a -- non-deterministic set ruleRhsFreeVars :: CoreRule -> VarSet ruleRhsFreeVars = fvVarSet . ruleFVs RhsOnly -- | Those locally-defined free 'Id's in the right hand side of several rules -- returned as a non-deterministic set rulesRhsFreeIds :: [CoreRule] -> VarSet rulesRhsFreeIds = fvVarSet . filterFV isLocalId . rulesFVs RhsOnly ruleLhsFreeIds :: CoreRule -> VarSet -- ^ This finds all locally-defined free Ids on the left hand side of a rule -- and returns them as a non-deterministic set ruleLhsFreeIds = fvVarSet . filterFV isLocalId . ruleFVs LhsOnly ruleLhsFreeIdsList :: CoreRule -> [Var] -- ^ This finds all locally-defined free Ids on the left hand side of a rule -- and returns them as a deterministically ordered list ruleLhsFreeIdsList = fvVarList . filterFV isLocalId . ruleFVs LhsOnly -- | Those variables free in the both the left right hand sides of a rule -- returned as a non-deterministic set ruleFreeVars :: CoreRule -> VarSet ruleFreeVars = fvVarSet . ruleFVs BothSides -- | Those variables free in the both the left right hand sides of rules -- returned as a deterministic set rulesFreeVarsDSet :: [CoreRule] -> DVarSet rulesFreeVarsDSet rules = fvDVarSet $ rulesFVs BothSides rules -- | Those variables free in both the left right hand sides of several rules rulesFreeVars :: [CoreRule] -> VarSet rulesFreeVars rules = fvVarSet $ rulesFVs BothSides rules -- | Make a 'RuleInfo' containing a number of 'CoreRule's, suitable -- for putting into an 'IdInfo' mkRuleInfo :: [CoreRule] -> RuleInfo mkRuleInfo rules = RuleInfo rules (rulesFreeVarsDSet rules) {- Note [Rule free var hack] (Not a hack any more) ~~~~~~~~~~~~~~~~~~~~~~~~~ We used not to include the Id in its own rhs free-var set. Otherwise the occurrence analyser makes bindings recursive: f x y = x+y RULE: f (f x y) z ==> f x (f y z) However, the occurrence analyser distinguishes "non-rule loop breakers" from "rule-only loop breakers" (see BasicTypes.OccInfo). So it will put this 'f' in a Rec block, but will mark the binding as a non-rule loop breaker, which is perfectly inlinable. -} {- ************************************************************************ * * \section[freevars-everywhere]{Attaching free variables to every sub-expression} * * ************************************************************************ The free variable pass annotates every node in the expression with its NON-GLOBAL free variables and type variables. -} type FVAnn = DVarSet -- See Note [The FVAnn invariant] {- Note [The FVAnn invariant] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Invariant: a FVAnn, say S, is closed: That is: if v is in S, then freevars( v's type/kind ) is also in S -} -- | Every node in a binding group annotated with its -- (non-global) free variables, both Ids and TyVars, and type. type CoreBindWithFVs = AnnBind Id FVAnn -- | Every node in an expression annotated with its -- (non-global) free variables, both Ids and TyVars, and type. -- NB: see Note [The FVAnn invariant] type CoreExprWithFVs = AnnExpr Id FVAnn type CoreExprWithFVs' = AnnExpr' Id FVAnn -- | Every node in an expression annotated with its -- (non-global) free variables, both Ids and TyVars, and type. type CoreAltWithFVs = AnnAlt Id FVAnn freeVarsOf :: CoreExprWithFVs -> DIdSet -- ^ Inverse function to 'freeVars' freeVarsOf (fvs, _) = fvs -- | Extract the vars reported in a FVAnn freeVarsOfAnn :: FVAnn -> DIdSet freeVarsOfAnn fvs = fvs aFreeVar :: Var -> DVarSet aFreeVar = unitDVarSet unionFVs :: DVarSet -> DVarSet -> DVarSet unionFVs = unionDVarSet unionFVss :: [DVarSet] -> DVarSet unionFVss = unionDVarSets delBindersFV :: [Var] -> DVarSet -> DVarSet delBindersFV bs fvs = foldr delBinderFV fvs bs delBinderFV :: Var -> DVarSet -> DVarSet -- This way round, so we can do it multiple times using foldr -- (b `delBinderFV` s) -- * removes the binder b from the free variable set s, -- * AND *adds* to s the free variables of b's type -- -- This is really important for some lambdas: -- In (\x::a -> x) the only mention of "a" is in the binder. -- -- Also in -- let x::a = b in ... -- we should really note that "a" is free in this expression. -- It'll be pinned inside the /\a by the binding for b, but -- it seems cleaner to make sure that a is in the free-var set -- when it is mentioned. -- -- This also shows up in recursive bindings. Consider: -- /\a -> letrec x::a = x in E -- Now, there are no explicit free type variables in the RHS of x, -- but nevertheless "a" is free in its definition. So we add in -- the free tyvars of the types of the binders, and include these in the -- free vars of the group, attached to the top level of each RHS. -- -- This actually happened in the defn of errorIO in IOBase.hs: -- errorIO (ST io) = case (errorIO# io) of -- _ -> bottom -- where -- bottom = bottom -- Never evaluated delBinderFV b s = (s `delDVarSet` b) `unionFVs` dVarTypeTyCoVars b -- Include coercion variables too! varTypeTyCoVars :: Var -> TyCoVarSet -- Find the type/kind variables free in the type of the id/tyvar varTypeTyCoVars var = fvVarSet $ varTypeTyCoFVs var dVarTypeTyCoVars :: Var -> DTyCoVarSet -- Find the type/kind/coercion variables free in the type of the id/tyvar dVarTypeTyCoVars var = fvDVarSet $ varTypeTyCoFVs var varTypeTyCoFVs :: Var -> FV -- Find the free variables of a binder. -- In the case of ids, don't forget the multiplicity field! varTypeTyCoFVs var = tyCoFVsOfType (varType var) `unionFV` mult_fvs where mult_fvs = case varMultMaybe var of Just mult -> tyCoFVsOfType mult Nothing -> emptyFV idFreeVars :: Id -> VarSet idFreeVars id = assert (isId id) $ fvVarSet $ idFVs id dIdFreeVars :: Id -> DVarSet dIdFreeVars id = fvDVarSet $ idFVs id idFVs :: Id -> FV -- Type variables, rule variables, and inline variables idFVs id = assert (isId id) $ varTypeTyCoFVs id `unionFV` bndrRuleAndUnfoldingFVs id bndrRuleAndUnfoldingVarsDSet :: Id -> DVarSet bndrRuleAndUnfoldingVarsDSet id = fvDVarSet $ bndrRuleAndUnfoldingFVs id bndrRuleAndUnfoldingIds :: Id -> IdSet bndrRuleAndUnfoldingIds id = fvVarSet $ filterFV isId $ bndrRuleAndUnfoldingFVs id bndrRuleAndUnfoldingFVs :: Id -> FV bndrRuleAndUnfoldingFVs id | isId id = idRuleFVs id `unionFV` idUnfoldingFVs id | otherwise = emptyFV idRuleVars ::Id -> VarSet -- Does *not* include CoreUnfolding vars idRuleVars id = fvVarSet $ idRuleFVs id idRuleFVs :: Id -> FV idRuleFVs id = assert (isId id) $ FV.mkFVs (dVarSetElems $ ruleInfoFreeVars (idSpecialisation id)) idUnfoldingVars :: Id -> VarSet -- Produce free vars for an unfolding, but NOT for an ordinary -- (non-inline) unfolding, since it is a dup of the rhs -- and we'll get exponential behaviour if we look at both unf and rhs! -- But do look at the *real* unfolding, even for loop breakers, else -- we might get out-of-scope variables idUnfoldingVars id = fvVarSet $ idUnfoldingFVs id idUnfoldingFVs :: Id -> FV idUnfoldingFVs id = stableUnfoldingFVs (realIdUnfolding id) `orElse` emptyFV stableUnfoldingVars :: Unfolding -> Maybe VarSet stableUnfoldingVars unf = fvVarSet `fmap` stableUnfoldingFVs unf stableUnfoldingFVs :: Unfolding -> Maybe FV stableUnfoldingFVs unf = case unf of CoreUnfolding { uf_tmpl = rhs, uf_src = src } | isStableSource src -> Just (filterFV isLocalVar $ expr_fvs rhs) DFunUnfolding { df_bndrs = bndrs, df_args = args } -> Just (filterFV isLocalVar $ FV.delFVs (mkVarSet bndrs) $ exprs_fvs args) -- DFuns are top level, so no fvs from types of bndrs _other -> Nothing {- ************************************************************************ * * \subsection{Free variables (and types)} * * ************************************************************************ -} freeVarsBind :: CoreBind -> DVarSet -- Free vars of scope of binding -> (CoreBindWithFVs, DVarSet) -- Return free vars of binding + scope freeVarsBind (NonRec binder rhs) body_fvs = ( AnnNonRec binder rhs2 , freeVarsOf rhs2 `unionFVs` body_fvs2 `unionFVs` bndrRuleAndUnfoldingVarsDSet binder ) where rhs2 = freeVars rhs body_fvs2 = binder `delBinderFV` body_fvs freeVarsBind (Rec binds) body_fvs = ( AnnRec (binders `zip` rhss2) , delBindersFV binders all_fvs ) where (binders, rhss) = unzip binds rhss2 = map freeVars rhss rhs_body_fvs = foldr (unionFVs . freeVarsOf) body_fvs rhss2 binders_fvs = fvDVarSet $ mapUnionFV bndrRuleAndUnfoldingFVs binders -- See Note [The FVAnn invariant] all_fvs = rhs_body_fvs `unionFVs` binders_fvs -- The "delBinderFV" happens after adding the idSpecVars, -- since the latter may add some of the binders as fvs freeVars :: CoreExpr -> CoreExprWithFVs -- ^ Annotate a 'CoreExpr' with its (non-global) free type -- and value variables at every tree node. freeVars = go where go :: CoreExpr -> CoreExprWithFVs go (Var v) | isLocalVar v = (aFreeVar v `unionFVs` ty_fvs `unionFVs` mult_vars, AnnVar v) | otherwise = (emptyDVarSet, AnnVar v) where mult_vars = tyCoVarsOfTypeDSet (varMult v) ty_fvs = dVarTypeTyCoVars v -- See Note [The FVAnn invariant] go (Lit lit) = (emptyDVarSet, AnnLit lit) go (Lam b body) = ( b_fvs `unionFVs` (b `delBinderFV` body_fvs) , AnnLam b body' ) where body'@(body_fvs, _) = go body b_ty = idType b b_fvs = tyCoVarsOfTypeDSet b_ty -- See Note [The FVAnn invariant] go (App fun arg) = ( freeVarsOf fun' `unionFVs` freeVarsOf arg' , AnnApp fun' arg' ) where fun' = go fun arg' = go arg go (Case scrut bndr ty alts) = ( (bndr `delBinderFV` alts_fvs) `unionFVs` freeVarsOf scrut2 `unionFVs` tyCoVarsOfTypeDSet ty -- Don't need to look at (idType bndr) -- because that's redundant with scrut , AnnCase scrut2 bndr ty alts2 ) where scrut2 = go scrut (alts_fvs_s, alts2) = mapAndUnzip fv_alt alts alts_fvs = unionFVss alts_fvs_s fv_alt (Alt con args rhs) = (delBindersFV args (freeVarsOf rhs2), (AnnAlt con args rhs2)) where rhs2 = go rhs go (Let bind body) = (bind_fvs, AnnLet bind2 body2) where (bind2, bind_fvs) = freeVarsBind bind (freeVarsOf body2) body2 = go body go (Cast expr co) = ( freeVarsOf expr2 `unionFVs` cfvs , AnnCast expr2 (cfvs, co) ) where expr2 = go expr cfvs = tyCoVarsOfCoDSet co go (Tick tickish expr) = ( tickishFVs tickish `unionFVs` freeVarsOf expr2 , AnnTick tickish expr2 ) where expr2 = go expr tickishFVs (Breakpoint _ _ ids _) = mkDVarSet ids tickishFVs _ = emptyDVarSet go (Type ty) = (tyCoVarsOfTypeDSet ty, AnnType ty) go (Coercion co) = (tyCoVarsOfCoDSet co, AnnCoercion co) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/FamInstEnv.hs0000644000000000000000000020276607346545000021161 0ustar0000000000000000{-# LANGUAGE DeriveFunctor #-} {-# LANGUAGE GADTs #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE TupleSections #-} -- (c) The University of Glasgow 2006 -- -- FamInstEnv: Type checked family instance declarations module GHC.Core.FamInstEnv ( FamInst(..), FamFlavor(..), famInstAxiom, famInstTyCon, famInstRHS, famInstsRepTyCons, famInstRepTyCon_maybe, dataFamInstRepTyCon, pprFamInst, pprFamInsts, orphNamesOfFamInst, mkImportedFamInst, mkLocalFamInst, FamInstEnvs, FamInstEnv, emptyFamInstEnv, emptyFamInstEnvs, unionFamInstEnv, extendFamInstEnv, extendFamInstEnvList, famInstEnvElts, famInstEnvSize, familyInstances, familyNameInstances, -- * CoAxioms mkCoAxBranch, mkBranchedCoAxiom, mkUnbranchedCoAxiom, mkSingleCoAxiom, mkNewTypeCoAxiom, FamInstMatch(..), lookupFamInstEnv, lookupFamInstEnvConflicts, lookupFamInstEnvByTyCon, isDominatedBy, apartnessCheck, compatibleBranches, -- Injectivity InjectivityCheckResult(..), lookupFamInstEnvInjectivityConflicts, injectiveBranches, -- Normalisation topNormaliseType, topNormaliseType_maybe, normaliseType, normaliseTcApp, topReduceTyFamApp_maybe, reduceTyFamApp_maybe ) where import GHC.Prelude import GHC.Core( IsOrphan, chooseOrphanAnchor ) import GHC.Core.Unify import GHC.Core.Type as Type import GHC.Core.TyCo.Rep import GHC.Core.TyCo.Compare( eqType, eqTypes ) import GHC.Core.TyCon import GHC.Core.Coercion import GHC.Core.Coercion.Axiom import GHC.Core.Reduction import GHC.Core.RoughMap import GHC.Core.FVs( orphNamesOfAxiomLHS ) import GHC.Builtin.Types.Literals( tryMatchFam ) import GHC.Types.Var.Set import GHC.Types.Var.Env import GHC.Types.Name import GHC.Types.Var import GHC.Types.SrcLoc import GHC.Types.Name.Set import GHC.Utils.Misc import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Data.FastString import GHC.Data.Maybe import GHC.Data.Bag import GHC.Data.List.Infinite (Infinite (..)) import qualified GHC.Data.List.Infinite as Inf import Control.Monad import Data.List( mapAccumL ) import Data.Array( Array, assocs ) {- ************************************************************************ * * Type checked family instance heads * * ************************************************************************ Note [FamInsts and CoAxioms] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * CoAxioms and FamInsts are just like DFunIds and ClsInsts * A CoAxiom is a System-FC thing: it can relate any two types * A FamInst is a Haskell source-language thing, corresponding to a type/data family instance declaration. - The FamInst contains a CoAxiom, which is the evidence for the instance - The LHS of the CoAxiom is always of form F ty1 .. tyn where F is a type family -} data FamInst -- See Note [FamInsts and CoAxioms] = FamInst { fi_axiom :: CoAxiom Unbranched -- The new coercion axiom -- introduced by this family -- instance -- INVARIANT: apart from freshening (see below) -- fi_tvs = cab_tvs of the (single) axiom branch -- fi_cvs = cab_cvs ...ditto... -- fi_tys = cab_lhs ...ditto... -- fi_rhs = cab_rhs ...ditto... , fi_flavor :: FamFlavor -- Everything below here is a redundant, -- cached version of the two things above -- except that the TyVars are freshened , fi_fam :: Name -- Family name -- Used for "rough matching"; same idea as for class instances -- See Note [Rough matching in class and family instances] -- in GHC.Core.Unify , fi_tcs :: [RoughMatchTc] -- Top of type args -- INVARIANT: fi_tcs = roughMatchTcs fi_tys -- Used for "proper matching"; ditto , fi_tvs :: [TyVar] -- Template tyvars for full match , fi_cvs :: [CoVar] -- Template covars for full match -- Like ClsInsts, these variables are always fresh -- See Note [Template tyvars are fresh] in GHC.Core.InstEnv , fi_tys :: [Type] -- The LHS type patterns -- May be eta-reduced; see Note [Eta reduction for data families] -- in GHC.Core.Coercion.Axiom , fi_rhs :: Type -- the RHS, with its freshened vars , fi_orphan :: IsOrphan } data FamFlavor = SynFamilyInst -- A synonym family | DataFamilyInst TyCon -- A data family, with its representation TyCon {- Note [Arity of data families] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Data family instances might legitimately be over- or under-saturated. Under-saturation has two potential causes: U1) Eta reduction. See Note [Eta reduction for data families] in GHC.Core.Coercion.Axiom. U2) When the user has specified a return kind instead of written out patterns. Example: data family Sing (a :: k) data instance Sing :: Bool -> Type The data family tycon Sing has an arity of 2, the k and the a. But the data instance has only one pattern, Bool (standing in for k). This instance is equivalent to `data instance Sing (a :: Bool)`, but without the last pattern, we have an under-saturated data family instance. On its own, this example is not compelling enough to add support for under-saturation, but U1 makes this feature more compelling. Over-saturation is also possible: O1) If the data family's return kind is a type variable (see also #12369), an instance might legitimately have more arguments than the family. Example: data family Fix :: (Type -> k) -> k data instance Fix f = MkFix1 (f (Fix f)) data instance Fix f x = MkFix2 (f (Fix f x) x) In the first instance here, the k in the data family kind is chosen to be Type. In the second, it's (Type -> Type). However, we require that any over-saturation is eta-reducible. That is, we require that any extra patterns be bare unrepeated type variables; see Note [Eta reduction for data families] in GHC.Core.Coercion.Axiom. Accordingly, the FamInst is never over-saturated. Why can we allow such flexibility for data families but not for type families? Because data families can be decomposed -- that is, they are generative and injective. A Type family is neither and so always must be applied to all its arguments. -} -- Obtain the axiom of a family instance famInstAxiom :: FamInst -> CoAxiom Unbranched famInstAxiom = fi_axiom -- Split the left-hand side of the FamInst famInstSplitLHS :: FamInst -> (TyCon, [Type]) famInstSplitLHS (FamInst { fi_axiom = axiom, fi_tys = lhs }) = (coAxiomTyCon axiom, lhs) -- Get the RHS of the FamInst famInstRHS :: FamInst -> Type famInstRHS = fi_rhs -- Get the family TyCon of the FamInst famInstTyCon :: FamInst -> TyCon famInstTyCon = coAxiomTyCon . famInstAxiom -- Return the representation TyCons introduced by data family instances, if any famInstsRepTyCons :: [FamInst] -> [TyCon] famInstsRepTyCons fis = [tc | FamInst { fi_flavor = DataFamilyInst tc } <- fis] -- Extracts the TyCon for this *data* (or newtype) instance famInstRepTyCon_maybe :: FamInst -> Maybe TyCon famInstRepTyCon_maybe fi = case fi_flavor fi of DataFamilyInst tycon -> Just tycon SynFamilyInst -> Nothing dataFamInstRepTyCon :: FamInst -> TyCon dataFamInstRepTyCon fi = case fi_flavor fi of DataFamilyInst tycon -> tycon SynFamilyInst -> pprPanic "dataFamInstRepTyCon" (ppr fi) orphNamesOfFamInst :: FamInst -> NameSet orphNamesOfFamInst (FamInst { fi_axiom = ax }) = orphNamesOfAxiomLHS ax {- ************************************************************************ * * Pretty printing * * ************************************************************************ -} instance NamedThing FamInst where getName = coAxiomName . fi_axiom instance Outputable FamInst where ppr = pprFamInst pprFamInst :: FamInst -> SDoc -- Prints the FamInst as a family instance declaration -- NB: This function, FamInstEnv.pprFamInst, is used only for internal, -- debug printing. See GHC.Types.TyThing.Ppr.pprFamInst for printing for the user pprFamInst (FamInst { fi_flavor = flavor, fi_axiom = ax , fi_tvs = tvs, fi_tys = tys, fi_rhs = rhs }) = hang (ppr_tc_sort <+> text "instance" <+> pprCoAxBranchUser (coAxiomTyCon ax) (coAxiomSingleBranch ax)) 2 (whenPprDebug debug_stuff) where ppr_tc_sort = case flavor of SynFamilyInst -> text "type" DataFamilyInst tycon | isDataTyCon tycon -> text "data" | isNewTyCon tycon -> text "newtype" | isAbstractTyCon tycon -> text "data" | otherwise -> text "WEIRD" <+> ppr tycon debug_stuff = vcat [ text "Coercion axiom:" <+> ppr ax , text "Tvs:" <+> ppr tvs , text "LHS:" <+> ppr tys , text "RHS:" <+> ppr rhs ] pprFamInsts :: [FamInst] -> SDoc pprFamInsts finsts = vcat (map pprFamInst finsts) {- ********************************************************************* * * Making FamInsts * * ********************************************************************* -} mkLocalFamInst :: FamFlavor -> CoAxiom Unbranched -> [TyVar] -> [CoVar] -> [Type] -> Type -> FamInst mkLocalFamInst flavor axiom tvs cvs lhs rhs = FamInst { fi_fam = fam_tc_name , fi_flavor = flavor , fi_tcs = roughMatchTcs lhs , fi_tvs = tvs , fi_cvs = cvs , fi_tys = lhs , fi_rhs = rhs , fi_axiom = axiom , fi_orphan = chooseOrphanAnchor orph_names } where mod = assert (isExternalName (coAxiomName axiom)) $ nameModule (coAxiomName axiom) is_local name = nameIsLocalOrFrom mod name orph_names = filterNameSet is_local $ orphNamesOfAxiomLHS axiom `extendNameSet` fam_tc_name fam_tc_name = tyConName (coAxiomTyCon axiom) {- Note [Lazy axiom match] ~~~~~~~~~~~~~~~~~~~~~~~ It is Vitally Important that mkImportedFamInst is *lazy* in its axiom parameter. The axiom is loaded lazily, via a forkM, in GHC.IfaceToCore. Sometime later, mkImportedFamInst is called using that axiom. However, the axiom may itself depend on entities which are not yet loaded as of the time of the mkImportedFamInst. Thus, if mkImportedFamInst eagerly looks at the axiom, a dependency loop spontaneously appears and GHC hangs. The solution is simply for mkImportedFamInst never, ever to look inside of the axiom until everything else is good and ready to do so. We can assume that this readiness has been achieved when some other code pulls on the axiom in the FamInst. Thus, we pattern match on the axiom lazily (in the where clause, not in the parameter list) and we assert the consistency of names there also. -} -- Make a family instance representation from the information found in an -- interface file. In particular, we get the rough match info from the iface -- (instead of computing it here). mkImportedFamInst :: Name -- Name of the family -> [RoughMatchTc] -- Rough match info -> CoAxiom Unbranched -- Axiom introduced -> IsOrphan -> FamInst -- Resulting family instance mkImportedFamInst fam mb_tcs axiom orphan = FamInst { fi_fam = fam, fi_tcs = mb_tcs, fi_tvs = tvs, fi_cvs = cvs, fi_tys = tys, fi_rhs = rhs, fi_axiom = axiom, fi_flavor = flavor, fi_orphan = orphan } where -- See Note [Lazy axiom match] ~(CoAxBranch { cab_lhs = tys , cab_tvs = tvs , cab_cvs = cvs , cab_rhs = rhs }) = coAxiomSingleBranch axiom -- Derive the flavor for an imported FamInst rather disgustingly -- Maybe we should store it in the IfaceFamInst? flavor = case splitTyConApp_maybe rhs of Just (tc, _) | Just ax' <- tyConFamilyCoercion_maybe tc , ax' == axiom -> DataFamilyInst tc _ -> SynFamilyInst {- ************************************************************************ * * FamInstEnv * * ************************************************************************ Note [FamInstEnv] ~~~~~~~~~~~~~~~~~ A FamInstEnv is a RoughMap of instance heads. Specifically, the keys are formed by the family name and the instance arguments. That is, an instance: type instance Fam (Maybe Int) a would insert into the instance environment an instance with a key of the form [RM_KnownTc Fam, RM_KnownTc Maybe, RM_WildCard] See Note [RoughMap] in GHC.Core.RoughMap. The same FamInstEnv includes both 'data family' and 'type family' instances. Type families are reduced during type inference, but not data families; the user explains when to use a data family instance by using constructors and pattern matching. Nevertheless it is still useful to have data families in the FamInstEnv: - For finding overlaps and conflicts - For finding the representation type...see FamInstEnv.topNormaliseType and its call site in GHC.Core.Opt.Simplify.Iteration - In standalone deriving instance Eq (T [Int]) we need to find the representation type for T [Int] Note [Varying number of patterns for data family axioms] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For data families, the number of patterns may vary between instances. For example data family T a b data instance T Int a = T1 a | T2 data instance T Bool [a] = T3 a Then we get a data type for each instance, and an axiom: data TInt a = T1 a | T2 data TBoolList a = T3 a axiom ax7 :: T Int ~ TInt -- Eta-reduced axiom ax8 a :: T Bool [a] ~ TBoolList a These two axioms for T, one with one pattern, one with two; see Note [Eta reduction for data families] in GHC.Core.Coercion.Axiom Note [FamInstEnv determinism] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We turn FamInstEnvs into a list in some places that don't directly affect the ABI. That happens in family consistency checks and when producing output for `:info`. Unfortunately that nondeterminism is nonlocal and it's hard to tell what it affects without following a chain of functions. It's also easy to accidentally make that nondeterminism affect the ABI. Furthermore the envs should be relatively small, so it should be free to use deterministic maps here. Testing with nofib and validate detected no difference between UniqFM and UniqDFM. See Note [Deterministic UniqFM]. -} type FamInstEnvs = (FamInstEnv, FamInstEnv) -- External package inst-env, Home-package inst-env data FamInstEnv = FamIE !Int -- The number of instances, used to choose the smaller environment -- when checking type family consistency of home modules. !(RoughMap FamInst) -- See Note [FamInstEnv] -- See Note [FamInstEnv determinism] instance Outputable FamInstEnv where ppr (FamIE _ fs) = text "FamIE" <+> vcat (map ppr $ elemsRM fs) famInstEnvSize :: FamInstEnv -> Int famInstEnvSize (FamIE sz _) = sz -- | Create a 'FamInstEnv' from 'Name' indices. -- INVARIANTS: -- * The fs_tvs are distinct in each FamInst -- of a range value of the map (so we can safely unify them) emptyFamInstEnvs :: (FamInstEnv, FamInstEnv) emptyFamInstEnvs = (emptyFamInstEnv, emptyFamInstEnv) emptyFamInstEnv :: FamInstEnv emptyFamInstEnv = FamIE 0 emptyRM famInstEnvElts :: FamInstEnv -> [FamInst] famInstEnvElts (FamIE _ rm) = elemsRM rm -- See Note [FamInstEnv determinism] -- It's OK to use nonDetStrictFoldUDFM here since we're just computing the -- size. familyInstances :: (FamInstEnv, FamInstEnv) -> TyCon -> [FamInst] familyInstances envs tc = familyNameInstances envs (tyConName tc) familyNameInstances :: (FamInstEnv, FamInstEnv) -> Name -> [FamInst] familyNameInstances (pkg_fie, home_fie) fam = get home_fie ++ get pkg_fie where get :: FamInstEnv -> [FamInst] get (FamIE _ env) = lookupRM [RML_KnownTc fam] env -- | Makes no particular effort to detect conflicts. unionFamInstEnv :: FamInstEnv -> FamInstEnv -> FamInstEnv unionFamInstEnv (FamIE sa a) (FamIE sb b) = FamIE (sa + sb) (a `unionRM` b) extendFamInstEnvList :: FamInstEnv -> [FamInst] -> FamInstEnv extendFamInstEnvList inst_env fis = foldl' extendFamInstEnv inst_env fis extendFamInstEnv :: FamInstEnv -> FamInst -> FamInstEnv extendFamInstEnv (FamIE s inst_env) ins_item@(FamInst {fi_fam = cls_nm}) = FamIE (s+1) $ insertRM rough_tmpl ins_item inst_env where rough_tmpl = RM_KnownTc cls_nm : fi_tcs ins_item {- ************************************************************************ * * Compatibility * * ************************************************************************ Note [Apartness] ~~~~~~~~~~~~~~~~ In dealing with closed type families, we must be able to check that one type will never reduce to another. This check is called /apartness/. The check is always between a target (which may be an arbitrary type) and a pattern. Here is how we do it: apart(target, pattern) = not (unify(flatten(target), pattern)) where flatten (implemented in flattenTys, below) converts all type-family applications into fresh variables. (See Note [Flattening type-family applications when matching instances] in GHC.Core.Unify.) Note [Compatibility] ~~~~~~~~~~~~~~~~~~~~ Two patterns are /compatible/ if either of the following conditions hold: 1) The patterns are apart. 2) The patterns unify with a substitution S, and their right hand sides equal under that substitution. For open type families, only compatible instances are allowed. For closed type families, the story is slightly more complicated. Consider the following: type family F a where F Int = Bool F a = Int g :: Show a => a -> F a g x = length (show x) Should that type-check? No. We need to allow for the possibility that 'a' might be Int and therefore 'F a' should be Bool. We can simplify 'F a' to Int only when we can be sure that 'a' is not Int. To achieve this, after finding a possible match within the equations, we have to go back to all previous equations and check that, under the substitution induced by the match, other branches are surely apart. (See Note [Apartness].) This is similar to what happens with class instance selection, when we need to guarantee that there is only a match and no unifiers. The exact algorithm is different here because the potentially-overlapping group is closed. As another example, consider this: type family G x where G Int = Bool G a = Double type family H y -- no instances Now, we want to simplify (G (H Char)). We can't, because (H Char) might later simplify to be Int. So, (G (H Char)) is stuck, for now. While everything above is quite sound, it isn't as expressive as we'd like. Consider this: type family J a where J Int = Int J a = a Can we simplify (J b) to b? Sure we can. Yes, the first equation matches if b is instantiated with Int, but the RHSs coincide there, so it's all OK. So, the rule is this: when looking up a branch in a closed type family, we find a branch that matches the target, but then we make sure that the target is apart from every previous *incompatible* branch. We don't check the branches that are compatible with the matching branch, because they are either irrelevant (clause 1 of compatible) or benign (clause 2 of compatible). Note [Compatibility of eta-reduced axioms] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In newtype instances of data families we eta-reduce the axioms, See Note [Eta reduction for data families] in GHC.Core.Coercion.Axiom. This means that we sometimes need to test compatibility of two axioms that were eta-reduced to different degrees, e.g.: data family D a b c newtype instance D a Int c = DInt (Maybe a) -- D a Int ~ Maybe -- lhs = [a, Int] newtype instance D Bool Int Char = DIntChar Float -- D Bool Int Char ~ Float -- lhs = [Bool, Int, Char] These are obviously incompatible. We could detect this by saturating (eta-expanding) the shorter LHS with fresh tyvars until the lists are of equal length, but instead we can just remove the tail of the longer list, as those types will simply unify with the freshly introduced tyvars. By doing this, in case the LHS are unifiable, the yielded substitution won't mention the tyvars that appear in the tail we dropped off, and we might try to test equality RHSes of different kinds, but that's fine since this case occurs only for data families, where the RHS is a unique tycon and the equality fails anyway. -} -- See Note [Compatibility] compatibleBranches :: CoAxBranch -> CoAxBranch -> Bool compatibleBranches (CoAxBranch { cab_lhs = lhs1, cab_rhs = rhs1 }) (CoAxBranch { cab_lhs = lhs2, cab_rhs = rhs2 }) = case tcUnifyTysFG alwaysBindFun commonlhs1 commonlhs2 of -- Here we need the cab_tvs of the two branches to be disinct. -- See Note [CoAxBranch type variables] in GHC.Core.Coercion.Axiom. SurelyApart -> True MaybeApart {} -> False Unifiable subst -> Type.substTyAddInScope subst rhs1 `eqType` Type.substTyAddInScope subst rhs2 where (commonlhs1, commonlhs2) = zipAndUnzip lhs1 lhs2 -- See Note [Compatibility of eta-reduced axioms] -- | Result of testing two type family equations for injectiviy. data InjectivityCheckResult = InjectivityAccepted -- ^ Either RHSs are distinct or unification of RHSs leads to unification of -- LHSs | InjectivityUnified CoAxBranch CoAxBranch -- ^ RHSs unify but LHSs don't unify under that substitution. Relevant for -- closed type families where equation after unification might be -- overlapped (in which case it is OK if they don't unify). Constructor -- stores axioms after unification. -- | Check whether two type family axioms don't violate injectivity annotation. injectiveBranches :: [Bool] -> CoAxBranch -> CoAxBranch -> InjectivityCheckResult injectiveBranches injectivity ax1@(CoAxBranch { cab_tvs = tvs1, cab_lhs = lhs1, cab_rhs = rhs1 }) ax2@(CoAxBranch { cab_tvs = tvs2, cab_lhs = lhs2, cab_rhs = rhs2 }) -- See Note [Verifying injectivity annotation], case 1. = let getInjArgs = filterByList injectivity in_scope = mkInScopeSetList (tvs1 ++ tvs2) in case tcUnifyTyWithTFs True in_scope rhs1 rhs2 of -- True = two-way pre-unification Nothing -> InjectivityAccepted -- RHS are different, so equations are injective. -- This is case 1A from Note [Verifying injectivity annotation] Just subst -- RHS unify under a substitution -- If LHSs are equal under the substitution used for RHSs then this pair -- of equations does not violate injectivity annotation. If LHSs are not -- equal under that substitution then this pair of equations violates -- injectivity annotation, but for closed type families it still might -- be the case that one LHS after substitution is unreachable. | eqTypes lhs1Subst lhs2Subst -- check case 1B1 from Note. -> InjectivityAccepted | otherwise -> InjectivityUnified ( ax1 { cab_lhs = Type.substTys subst lhs1 , cab_rhs = Type.substTy subst rhs1 }) ( ax2 { cab_lhs = Type.substTys subst lhs2 , cab_rhs = Type.substTy subst rhs2 }) -- Payload of InjectivityUnified used only for check 1B2, only -- for closed type families where lhs1Subst = Type.substTys subst (getInjArgs lhs1) lhs2Subst = Type.substTys subst (getInjArgs lhs2) -- takes a CoAxiom with unknown branch incompatibilities and computes -- the compatibilities -- See Note [Storing compatibility] in GHC.Core.Coercion.Axiom computeAxiomIncomps :: [CoAxBranch] -> [CoAxBranch] computeAxiomIncomps branches = snd (mapAccumL go [] branches) where go :: [CoAxBranch] -> CoAxBranch -> ([CoAxBranch], CoAxBranch) go prev_brs cur_br = (new_br : prev_brs, new_br) where new_br = cur_br { cab_incomps = mk_incomps prev_brs cur_br } mk_incomps :: [CoAxBranch] -> CoAxBranch -> [CoAxBranch] mk_incomps prev_brs cur_br = filter (not . compatibleBranches cur_br) prev_brs {- ************************************************************************ * * Constructing axioms These functions are here because tidyType / tcUnifyTysFG are not available in GHC.Core.Coercion.Axiom Also computeAxiomIncomps is too sophisticated for CoAxiom * * ************************************************************************ Note [Tidy axioms when we build them] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Like types and classes, we build axioms fully quantified over all their variables, and tidy them when we build them. For example, we print out axioms and don't want to print stuff like F k k a b = ... Instead we must tidy those kind variables. See #7524. We could instead tidy when we print, but that makes it harder to get things like injectivity errors to come out right. Danger of Type family equation violates injectivity annotation. Kind variable ‘k’ cannot be inferred from the right-hand side. In the type family equation: PolyKindVars @[k1] @[k2] ('[] @k1) = '[] @k2 Note [Always number wildcard types in CoAxBranch] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider the following example (from the DataFamilyInstanceLHS test case): data family Sing (a :: k) data instance Sing (_ :: MyKind) where SingA :: Sing A SingB :: Sing B If we're not careful during tidying, then when this program is compiled with -ddump-types, we'll get the following information: COERCION AXIOMS axiom DataFamilyInstanceLHS.D:R:SingMyKind_0 :: Sing _ = DataFamilyInstanceLHS.R:SingMyKind_ _ It's misleading to have a wildcard type appearing on the RHS like that. To avoid this issue, when building a CoAxiom (which is what eventually gets printed above), we tidy all the variables in an env that already contains '_'. Thus, any variable named '_' will be renamed, giving us the nicer output here: COERCION AXIOMS axiom DataFamilyInstanceLHS.D:R:SingMyKind_0 :: Sing _1 = DataFamilyInstanceLHS.R:SingMyKind_ _1 Which is at least legal syntax. See also Note [CoAxBranch type variables] in GHC.Core.Coercion.Axiom; note that we are tidying (changing OccNames only), not freshening, in accordance with that Note. -} -- all axiom roles are Nominal, as this is only used with type families mkCoAxBranch :: [TyVar] -- original, possibly stale, tyvars -> [TyVar] -- Extra eta tyvars -> [CoVar] -- possibly stale covars -> [Type] -- LHS patterns -> Type -- RHS -> [Role] -> SrcSpan -> CoAxBranch mkCoAxBranch tvs eta_tvs cvs lhs rhs roles loc = CoAxBranch { cab_tvs = tvs' , cab_eta_tvs = eta_tvs' , cab_cvs = cvs' , cab_lhs = tidyTypes env lhs , cab_roles = roles , cab_rhs = tidyType env rhs , cab_loc = loc , cab_incomps = placeHolderIncomps } where (env1, tvs') = tidyVarBndrs init_tidy_env tvs (env2, eta_tvs') = tidyVarBndrs env1 eta_tvs (env, cvs') = tidyVarBndrs env2 cvs -- See Note [Tidy axioms when we build them] -- See also Note [CoAxBranch type variables] in GHC.Core.Coercion.Axiom init_occ_env = initTidyOccEnv [mkTyVarOccFS (fsLit "_")] init_tidy_env = mkEmptyTidyEnv init_occ_env -- See Note [Always number wildcard types in CoAxBranch] -- all of the following code is here to avoid mutual dependencies with -- Coercion mkBranchedCoAxiom :: Name -> TyCon -> [CoAxBranch] -> CoAxiom Branched mkBranchedCoAxiom ax_name fam_tc branches = CoAxiom { co_ax_unique = nameUnique ax_name , co_ax_name = ax_name , co_ax_tc = fam_tc , co_ax_role = Nominal , co_ax_implicit = False , co_ax_branches = manyBranches (computeAxiomIncomps branches) } mkUnbranchedCoAxiom :: Name -> TyCon -> CoAxBranch -> CoAxiom Unbranched mkUnbranchedCoAxiom ax_name fam_tc branch = CoAxiom { co_ax_unique = nameUnique ax_name , co_ax_name = ax_name , co_ax_tc = fam_tc , co_ax_role = Nominal , co_ax_implicit = False , co_ax_branches = unbranched (branch { cab_incomps = [] }) } mkSingleCoAxiom :: Role -> Name -> [TyVar] -> [TyVar] -> [CoVar] -> TyCon -> [Type] -> Type -> CoAxiom Unbranched -- Make a single-branch CoAxiom, including making the branch itself -- Used for both type family (Nominal) and data family (Representational) -- axioms, hence passing in the Role mkSingleCoAxiom role ax_name tvs eta_tvs cvs fam_tc lhs_tys rhs_ty = CoAxiom { co_ax_unique = nameUnique ax_name , co_ax_name = ax_name , co_ax_tc = fam_tc , co_ax_role = role , co_ax_implicit = False , co_ax_branches = unbranched (branch { cab_incomps = [] }) } where branch = mkCoAxBranch tvs eta_tvs cvs lhs_tys rhs_ty (map (const Nominal) tvs) (getSrcSpan ax_name) -- | Create a coercion constructor (axiom) suitable for the given -- newtype 'TyCon'. The 'Name' should be that of a new coercion -- 'CoAxiom', the 'TyVar's the arguments expected by the @newtype@ and -- the type the appropriate right hand side of the @newtype@, with -- the free variables a subset of those 'TyVar's. mkNewTypeCoAxiom :: Name -> TyCon -> [TyVar] -> [Role] -> Type -> CoAxiom Unbranched mkNewTypeCoAxiom name tycon tvs roles rhs_ty = CoAxiom { co_ax_unique = nameUnique name , co_ax_name = name , co_ax_implicit = True -- See Note [Implicit axioms] in GHC.Core.TyCon , co_ax_role = Representational , co_ax_tc = tycon , co_ax_branches = unbranched (branch { cab_incomps = [] }) } where branch = mkCoAxBranch tvs [] [] (mkTyVarTys tvs) rhs_ty roles (getSrcSpan name) {- ************************************************************************ * * Looking up a family instance * * ************************************************************************ @lookupFamInstEnv@ looks up in a @FamInstEnv@, using a one-way match. Multiple matches are only possible in case of type families (not data families), and then, it doesn't matter which match we choose (as the instances are guaranteed confluent). We return the matching family instances and the type instance at which it matches. For example, if we lookup 'T [Int]' and have a family instance data instance T [a] = .. desugared to data :R42T a = .. coe :Co:R42T a :: T [a] ~ :R42T a we return the matching instance '(FamInst{.., fi_tycon = :R42T}, Int)'. -} -- when matching a type family application, we get a FamInst, -- and the list of types the axiom should be applied to data FamInstMatch = FamInstMatch { fim_instance :: FamInst , fim_tys :: [Type] , fim_cos :: [Coercion] } -- See Note [Over-saturated matches] instance Outputable FamInstMatch where ppr (FamInstMatch { fim_instance = inst , fim_tys = tys , fim_cos = cos }) = text "match with" <+> parens (ppr inst) <+> ppr tys <+> ppr cos lookupFamInstEnvByTyCon :: FamInstEnvs -> TyCon -> [FamInst] lookupFamInstEnvByTyCon (pkg_ie, home_ie) fam_tc = get pkg_ie ++ get home_ie where get (FamIE _ rm) = lookupRM [RML_KnownTc (tyConName fam_tc)] rm lookupFamInstEnv :: FamInstEnvs -> TyCon -> [Type] -- What we are looking for -> [FamInstMatch] -- Successful matches -- Precondition: the tycon is saturated (or over-saturated) lookupFamInstEnv = lookup_fam_inst_env WantMatches lookupFamInstEnvConflicts :: FamInstEnvs -> FamInst -- Putative new instance -> [FamInst] -- Conflicting matches (don't look at the fim_tys field) -- E.g. when we are about to add -- f : type instance F [a] = a->a -- we do (lookupFamInstConflicts f [b]) -- to find conflicting matches -- -- Precondition: the tycon is saturated (or over-saturated) lookupFamInstEnvConflicts envs fam_inst = lookup_fam_inst_env (WantConflicts fam_inst) envs fam tys where (fam, tys) = famInstSplitLHS fam_inst -------------------------------------------------------------------------------- -- Type family injectivity checking bits -- -------------------------------------------------------------------------------- {- Note [Verifying injectivity annotation] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Injectivity means that the RHS of a type family uniquely determines the LHS (see Note [Type inference for type families with injectivity]). The user informs us about injectivity using an injectivity annotation and it is GHC's task to verify that this annotation is correct w.r.t. type family equations. Whenever we see a new equation of a type family we need to make sure that adding this equation to the already known equations of a type family does not violate the injectivity annotation supplied by the user (see Note [Injectivity annotation]). Of course if the type family has no injectivity annotation then no check is required. But if a type family has injectivity annotation we need to make sure that the following conditions hold: 1. For each pair of *different* equations of a type family, one of the following conditions holds: A: RHSs are different. (Check done in GHC.Core.FamInstEnv.injectiveBranches) B1: OPEN TYPE FAMILIES: If the RHSs can be unified under some substitution then it must be possible to unify the LHSs under the same substitution. Example: type family FunnyId a = r | r -> a type instance FunnyId Int = Int type instance FunnyId a = a RHSs of these two equations unify under [ a |-> Int ] substitution. Under this substitution LHSs are equal therefore these equations don't violate injectivity annotation. (Check done in GHC.Core.FamInstEnv.injectiveBranches) B2: CLOSED TYPE FAMILIES: If the RHSs can be unified under some substitution then either the LHSs unify under the same substitution or the LHS of the latter equation is overlapped by earlier equations. Example 1: type family SwapIntChar a = r | r -> a where SwapIntChar Int = Char SwapIntChar Char = Int SwapIntChar a = a Say we are checking the last two equations. RHSs unify under [ a |-> Int ] substitution but LHSs don't. So we apply the substitution to LHS of last equation and check whether it is overlapped by any of previous equations. Since it is overlapped by the first equation we conclude that pair of last two equations does not violate injectivity annotation. (Check done in GHC.Tc.Validity.checkValidCoAxiom#gather_conflicts) A special case of B is when RHSs unify with an empty substitution ie. they are identical. If any of the above two conditions holds we conclude that the pair of equations does not violate injectivity annotation. But if we find a pair of equations where neither of the above holds we report that this pair violates injectivity annotation because for a given RHS we don't have a unique LHS. (Note that (B) actually implies (A).) Note that we only take into account these LHS patterns that were declared as injective. 2. If an RHS of a type family equation is a bare type variable then all LHS variables (including implicit kind variables) also have to be bare. In other words, this has to be a sole equation of that type family and it has to cover all possible patterns. So for example this definition will be rejected: type family W1 a = r | r -> a type instance W1 [a] = a If it were accepted we could call `W1 [W1 Int]`, which would reduce to `W1 Int` and then by injectivity we could conclude that `[W1 Int] ~ Int`, which is bogus. Checked FamInst.bareTvInRHSViolated. 3. If the RHS of a type family equation is a type family application then the type family is rejected as not injective. This is checked by FamInst.isTFHeaded. 4. If a LHS type variable that is declared as injective is not mentioned in an injective position in the RHS then the type family is rejected as not injective. "Injective position" means either an argument to a type constructor or argument to a type family on injective position. There are subtleties here. See Note [Coverage condition for injective type families] in GHC.Tc.Instance.Family. Check (1) must be done for all family instances (transitively) imported. Other checks (2-4) should be done just for locally written equations, as they are checks involving just a single equation, not about interactions. Doing the other checks for imported equations led to #17405, as the behavior of check (4) depends on -XUndecidableInstances (see Note [Coverage condition for injective type families] in FamInst), which may vary between modules. See also Note [Injective type families] in GHC.Core.TyCon -} -- | Check whether an open type family equation can be added to already existing -- instance environment without causing conflicts with supplied injectivity -- annotations. Returns list of conflicting axioms (type instance -- declarations). lookupFamInstEnvInjectivityConflicts :: [Bool] -- injectivity annotation for this type family instance -- INVARIANT: list contains at least one True value -> FamInstEnvs -- all type instances seen so far -> FamInst -- new type instance that we're checking -> [CoAxBranch] -- conflicting instance declarations lookupFamInstEnvInjectivityConflicts injList fam_inst_envs fam_inst@(FamInst { fi_axiom = new_axiom }) | not $ isOpenFamilyTyCon fam = [] | otherwise -- See Note [Verifying injectivity annotation]. This function implements -- check (1.B1) for open type families described there. = map (coAxiomSingleBranch . fi_axiom) $ filter isInjConflict $ familyInstances fam_inst_envs fam where fam = famInstTyCon fam_inst new_branch = coAxiomSingleBranch new_axiom -- filtering function used by `lookup_inj_fam_conflicts` to check whether -- a pair of equations conflicts with the injectivity annotation. isInjConflict (FamInst { fi_axiom = old_axiom }) | InjectivityAccepted <- injectiveBranches injList (coAxiomSingleBranch old_axiom) new_branch = False -- no conflict | otherwise = True -------------------------------------------------------------------------------- -- Type family overlap checking bits -- -------------------------------------------------------------------------------- {- Note [Family instance overlap conflicts] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - In the case of data family instances, any overlap is fundamentally a conflict (as these instances imply injective type mappings). - In the case of type family instances, overlap is admitted as long as the right-hand sides of the overlapping rules coincide under the overlap substitution. eg type instance F a Int = a type instance F Int b = b These two overlap on (F Int Int) but then both RHSs are Int, so all is well. We require that they are syntactically equal; anything else would be difficult to test for at this stage. -} ------------------------------------------------------------ -- Might be a one-way match or a unifier data FamInstLookupMode a where -- The FamInst we are trying to find conflicts against WantConflicts :: FamInst -> FamInstLookupMode FamInst WantMatches :: FamInstLookupMode FamInstMatch lookup_fam_inst_env' -- The worker, local to this module :: forall a . FamInstLookupMode a -> FamInstEnv -> TyCon -> [Type] -- What we are looking for -> [a] lookup_fam_inst_env' lookup_mode (FamIE _ ie) fam match_tys | isOpenFamilyTyCon fam , let xs = rm_fun (lookupRM' rough_tmpl ie) -- The common case -- Avoid doing any of the allocation below if there are no instances to look at. , not $ null xs = mapMaybe' check_fun xs | otherwise = [] where rough_tmpl :: [RoughMatchLookupTc] rough_tmpl = RML_KnownTc (tyConName fam) : map typeToRoughMatchLookupTc match_tys rm_fun :: (Bag FamInst, [FamInst]) -> [FamInst] (rm_fun, check_fun) = case lookup_mode of WantConflicts fam_inst -> (snd, unify_fun fam_inst) WantMatches -> (bagToList . fst, match_fun) -- Function used for finding unifiers unify_fun orig_fam_inst item@(FamInst { fi_axiom = old_axiom, fi_tys = tpl_tys, fi_tvs = tpl_tvs }) = assertPpr (tyCoVarsOfTypes tys `disjointVarSet` mkVarSet tpl_tvs) ((ppr fam <+> ppr tys) $$ (ppr tpl_tvs <+> ppr tpl_tys)) $ -- Unification will break badly if the variables overlap -- They shouldn't because we allocate separate uniques for them if compatibleBranches (coAxiomSingleBranch old_axiom) new_branch then Nothing else Just item -- See Note [Family instance overlap conflicts] where new_branch = coAxiomSingleBranch (famInstAxiom orig_fam_inst) (fam, tys) = famInstSplitLHS orig_fam_inst -- Function used for checking matches match_fun item@(FamInst { fi_tvs = tpl_tvs, fi_cvs = tpl_cvs , fi_tys = tpl_tys }) = do subst <- tcMatchTys tpl_tys match_tys1 return (FamInstMatch { fim_instance = item , fim_tys = substTyVars subst tpl_tvs `chkAppend` match_tys2 , fim_cos = assert (all (isJust . lookupCoVar subst) tpl_cvs) $ substCoVars subst tpl_cvs }) where (match_tys1, match_tys2) = split_tys tpl_tys -- Precondition: the tycon is saturated (or over-saturated) -- Deal with over-saturation -- See Note [Over-saturated matches] split_tys tpl_tys | isTypeFamilyTyCon fam = pre_rough_split_tys | otherwise = let (match_tys1, match_tys2) = splitAtList tpl_tys match_tys in (match_tys1, match_tys2) (pre_match_tys1, pre_match_tys2) = splitAt (tyConArity fam) match_tys pre_rough_split_tys = (pre_match_tys1, pre_match_tys2) lookup_fam_inst_env -- The worker, local to this module :: FamInstLookupMode a -> FamInstEnvs -> TyCon -> [Type] -- What we are looking for -> [a] -- Successful matches -- Precondition: the tycon is saturated (or over-saturated) lookup_fam_inst_env match_fun (pkg_ie, home_ie) fam tys = lookup_fam_inst_env' match_fun home_ie fam tys ++ lookup_fam_inst_env' match_fun pkg_ie fam tys {- Note [Over-saturated matches] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ It's ok to look up an over-saturated type constructor. E.g. type family F a :: * -> * type instance F (a,b) = Either (a->b) The type instance gives rise to a newtype TyCon (at a higher kind which you can't do in Haskell!): newtype FPair a b = FP (Either (a->b)) Then looking up (F (Int,Bool) Char) will return a FamInstMatch (FPair, [Int,Bool,Char]) The "extra" type argument [Char] just stays on the end. We handle data families and type families separately here: * For type families, all instances of a type family must have the same arity, so we can precompute the split between the match_tys and the overflow tys. This is done in pre_rough_split_tys. * For data family instances, though, we need to re-split for each instance, because the breakdown might be different for each instance. Why? Because of eta reduction; see Note [Eta reduction for data families] in GHC.Core.Coercion.Axiom. -} -- checks if one LHS is dominated by a list of other branches -- in other words, if an application would match the first LHS, it is guaranteed -- to match at least one of the others. The RHSs are ignored. -- This algorithm is conservative: -- True -> the LHS is definitely covered by the others -- False -> no information -- It is currently (Oct 2012) used only for generating errors for -- inaccessible branches. If these errors go unreported, no harm done. -- This is defined here to avoid a dependency from CoAxiom to Unify isDominatedBy :: CoAxBranch -> [CoAxBranch] -> Bool isDominatedBy branch branches = or $ map match branches where lhs = coAxBranchLHS branch match (CoAxBranch { cab_lhs = tys }) = isJust $ tcMatchTys tys lhs {- ************************************************************************ * * Choosing an axiom application * * ************************************************************************ The lookupFamInstEnv function does a nice job for *open* type families, but we also need to handle closed ones when normalising a type: -} reduceTyFamApp_maybe :: FamInstEnvs -> Role -- Desired role of result coercion -> TyCon -> [Type] -> Maybe Reduction -- Attempt to do a *one-step* reduction of a type-family application -- but *not* newtypes -- Works on type-synonym families always; data-families only if -- the role we seek is representational -- It does *not* normalise the type arguments first, so this may not -- go as far as you want. If you want normalised type arguments, -- use topReduceTyFamApp_maybe -- -- The TyCon can be oversaturated. -- Works on both open and closed families -- -- Always returns a *homogeneous* coercion -- type family reductions are always -- homogeneous reduceTyFamApp_maybe envs role tc tys | Phantom <- role = Nothing | case role of Representational -> isOpenFamilyTyCon tc _ -> isOpenTypeFamilyTyCon tc -- If we seek a representational coercion -- (e.g. the call in topNormaliseType_maybe) then we can -- unwrap data families as well as type-synonym families; -- otherwise only type-synonym families , FamInstMatch { fim_instance = FamInst { fi_axiom = ax } , fim_tys = inst_tys , fim_cos = inst_cos } : _ <- lookupFamInstEnv envs tc tys -- NB: Allow multiple matches because of compatible overlap = let co = mkUnbranchedAxInstCo role ax inst_tys inst_cos in Just $ coercionRedn co | Just ax <- isClosedSynFamilyTyConWithAxiom_maybe tc , Just (ind, inst_tys, inst_cos) <- chooseBranch ax tys = let co = mkAxInstCo role (BranchedAxiom ax ind) inst_tys inst_cos in Just $ coercionRedn co | Just builtin_fam <- isBuiltInSynFamTyCon_maybe tc , Just (rewrite,ts,ty) <- tryMatchFam builtin_fam tys = let co = mkAxiomCo rewrite (map mkNomReflCo ts) in Just $ mkReduction co ty | otherwise = Nothing -- The axiom can be oversaturated. (Closed families only.) chooseBranch :: CoAxiom Branched -> [Type] -> Maybe (BranchIndex, [Type], [Coercion]) -- found match, with args chooseBranch axiom tys = do { let num_pats = coAxiomNumPats axiom (target_tys, extra_tys) = splitAt num_pats tys branches = coAxiomBranches axiom ; (ind, inst_tys, inst_cos) <- findBranch (unMkBranches branches) target_tys ; return ( ind, inst_tys `chkAppend` extra_tys, inst_cos ) } -- The axiom must *not* be oversaturated findBranch :: Array BranchIndex CoAxBranch -> [Type] -> Maybe (BranchIndex, [Type], [Coercion]) -- coercions relate requested types to returned axiom LHS at role N findBranch branches target_tys = foldr go Nothing (assocs branches) where go :: (BranchIndex, CoAxBranch) -> Maybe (BranchIndex, [Type], [Coercion]) -> Maybe (BranchIndex, [Type], [Coercion]) go (index, branch) other = let (CoAxBranch { cab_tvs = tpl_tvs, cab_cvs = tpl_cvs , cab_lhs = tpl_lhs , cab_incomps = incomps }) = branch in_scope = mkInScopeSet (unionVarSets $ map (tyCoVarsOfTypes . coAxBranchLHS) incomps) -- See Note [Flattening type-family applications when matching instances] -- in GHC.Core.Unify flattened_target = flattenTys in_scope target_tys in case tcMatchTys tpl_lhs target_tys of Just subst -- matching worked. now, check for apartness. | apartnessCheck flattened_target branch -> -- matching worked & we're apart from all incompatible branches. -- success assert (all (isJust . lookupCoVar subst) tpl_cvs) $ Just (index, substTyVars subst tpl_tvs, substCoVars subst tpl_cvs) -- failure. keep looking _ -> other -- | Do an apartness check, as described in the "Closed Type Families" paper -- (POPL '14). This should be used when determining if an equation -- ('CoAxBranch') of a closed type family can be used to reduce a certain target -- type family application. apartnessCheck :: [Type] -- ^ /flattened/ target arguments. Make sure they're flattened! See -- Note [Flattening type-family applications when matching instances] -- in GHC.Core.Unify. -> CoAxBranch -- ^ the candidate equation we wish to use -- Precondition: this matches the target -> Bool -- ^ True <=> equation can fire apartnessCheck flattened_target (CoAxBranch { cab_incomps = incomps }) = all (isSurelyApart . tcUnifyTysFG alwaysBindFun flattened_target . coAxBranchLHS) incomps where isSurelyApart SurelyApart = True isSurelyApart _ = False {- ************************************************************************ * * Looking up a family instance * * ************************************************************************ Note [Normalising types] ~~~~~~~~~~~~~~~~~~~~~~~~ The topNormaliseType function removes all occurrences of type families and newtypes from the top-level structure of a type. normaliseTcApp does the type family lookup and is fairly straightforward. normaliseType is a little more involved. The complication comes from the fact that a type family might be used in the kind of a variable bound in a forall. We wish to remove this type family application, but that means coming up with a fresh variable (with the new kind). Thus, we need a substitution to be built up as we recur through the type. However, an ordinary TCvSubst just won't do: when we hit a type variable whose kind has changed during normalisation, we need both the new type variable *and* the coercion. We could conjure up a new VarEnv with just this property, but a usable substitution environment already exists: LiftingContexts from the liftCoSubst family of functions, defined in GHC.Core.Coercion. A LiftingContext maps a type variable to a coercion and a coercion variable to a pair of coercions. Let's ignore coercion variables for now. Because the coercion a type variable maps to contains the destination type (via coercionKind), we don't need to store that destination type separately. Thus, a LiftingContext has what we need: a map from type variables to (Coercion, Type) pairs. We also benefit because we can piggyback on the liftCoSubstVarBndr function to deal with binders. However, I had to modify that function to work with this application. Thus, we now have liftCoSubstVarBndrUsing, which takes a function used to process the kind of the binder. We don't wish to lift the kind, but instead normalise it. So, we pass in a callback function that processes the kind of the binder. After that brilliant explanation of all this, I'm sure you've forgotten the dangling reference to coercion variables. What do we do with those? Nothing at all. The point of normalising types is to remove type family applications, but there's no sense in removing these from coercions. We would just get back a new coercion witnessing the equality between the same types as the original coercion. Because coercions are irrelevant anyway, there is no point in doing this. So, whenever we encounter a coercion, we just say that it won't change. That's what the CoercionTy case is doing within normalise_type. Note [Normalisation and type synonyms] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We need to be a bit careful about normalising in the presence of type synonyms (#13035). Suppose S is a type synonym, and we have S t1 t2 If S is family-free (on its RHS) we can just normalise t1 and t2 and reconstruct (S t1' t2'). Expanding S could not reveal any new redexes because type families are saturated. But if S has a type family on its RHS we expand /before/ normalising the args t1, t2. If we normalise t1, t2 first, we'll re-normalise them after expansion, and that can lead to /exponential/ behaviour; see #13035. Notice, though, that expanding first can in principle duplicate t1,t2, which might contain redexes. I'm sure you could conjure up an exponential case by that route too, but it hasn't happened in practice yet! -} topNormaliseType :: FamInstEnvs -> Type -> Type topNormaliseType env ty = case topNormaliseType_maybe env ty of Just redn -> reductionReducedType redn Nothing -> ty topNormaliseType_maybe :: FamInstEnvs -> Type -> Maybe Reduction -- ^ Get rid of *outermost* (or toplevel) -- * type function redex -- * data family redex -- * newtypes -- returning an appropriate Representational coercion. Specifically, if -- topNormaliseType_maybe env ty = Just (co, ty') -- then -- (a) co :: ty ~R ty' -- (b) ty' is not a newtype, and is not a type-family or data-family redex -- -- However, ty' can be something like (Maybe (F ty)), where -- (F ty) is a redex. -- -- Always operates homogeneously: the returned type has the same kind as the -- original type, and the returned coercion is always homogeneous. topNormaliseType_maybe env ty = do { ((co, mkind_co), nty) <- topNormaliseTypeX stepper combine ty ; let hredn = mkHetReduction (mkReduction co nty) mkind_co ; return $ homogeniseHetRedn Representational hredn } where stepper = unwrapNewTypeStepper' `composeSteppers` tyFamStepper combine (c1, mc1) (c2, mc2) = (c1 `mkTransCo` c2, mc1 `mkTransMCo` mc2) unwrapNewTypeStepper' :: NormaliseStepper (Coercion, MCoercionN) unwrapNewTypeStepper' rec_nts tc tys = (, MRefl) <$> unwrapNewTypeStepper rec_nts tc tys -- second coercion below is the kind coercion relating the original type's kind -- to the normalised type's kind tyFamStepper :: NormaliseStepper (Coercion, MCoercionN) tyFamStepper rec_nts tc tys -- Try to step a type/data family = case topReduceTyFamApp_maybe env tc tys of Just (HetReduction (Reduction co rhs) res_co) -> NS_Step rec_nts rhs (co, res_co) _ -> NS_Done --------------- -- | Try to simplify a type-family application, by *one* step -- If topReduceTyFamApp_maybe env r F tys = Just (HetReduction (Reduction co rhs) res_co) -- then co :: F tys ~R# rhs -- res_co :: typeKind(F tys) ~ typeKind(rhs) -- Type families and data families; always Representational role topReduceTyFamApp_maybe :: FamInstEnvs -> TyCon -> [Type] -> Maybe HetReduction topReduceTyFamApp_maybe envs fam_tc arg_tys | isFamilyTyCon fam_tc -- type families and data families , Just redn <- reduceTyFamApp_maybe envs role fam_tc ntys = Just $ mkHetReduction (mkTyConAppCo role fam_tc args_cos `mkTransRedn` redn) res_co | otherwise = Nothing where role = Representational ArgsReductions (Reductions args_cos ntys) res_co = initNormM envs role (tyCoVarsOfTypes arg_tys) $ normalise_tc_args fam_tc arg_tys --------------- normaliseType :: FamInstEnvs -> Role -- desired role of coercion -> Type -> Reduction normaliseType env role ty = initNormM env role (tyCoVarsOfType ty) $ normalise_type ty --------------- normaliseTcApp :: FamInstEnvs -> Role -> TyCon -> [Type] -> Reduction -- See comments on normaliseType for the arguments of this function normaliseTcApp env role tc tys = initNormM env role (tyCoVarsOfTypes tys) $ normalise_tc_app tc tys ------------------------------------------------------- -- Functions that work in the NormM monad ------------------------------------------------------- -- See Note [Normalising types] about the LiftingContext normalise_tc_app :: TyCon -> [Type] -> NormM Reduction normalise_tc_app tc tys | ExpandsSyn tenv rhs tys' <- expandSynTyCon_maybe tc tys , not (isFamFreeTyCon tc) -- Expand and try again = -- A synonym with type families in the RHS -- Expand and try again -- See Note [Normalisation and type synonyms] normalise_type (mkAppTys (substTy (mkTvSubstPrs tenv) rhs) tys') | isFamilyTyCon tc = -- A type-family application do { env <- getEnv ; role <- getRole ; ArgsReductions redns@(Reductions args_cos ntys) res_co <- normalise_tc_args tc tys ; case reduceTyFamApp_maybe env role tc ntys of Just redn1 -> do { redn2 <- normalise_reduction redn1 ; let redn3 = mkTyConAppCo role tc args_cos `mkTransRedn` redn2 ; return $ assemble_result role redn3 res_co } _ -> -- No unique matching family instance exists; -- we do not do anything return $ assemble_result role (mkTyConAppRedn role tc redns) res_co } | otherwise = -- A synonym with no type families in the RHS; or data type etc -- Just normalise the arguments and rebuild do { ArgsReductions redns res_co <- normalise_tc_args tc tys ; role <- getRole ; return $ assemble_result role (mkTyConAppRedn role tc redns) res_co } where assemble_result :: Role -- r, ambient role in NormM monad -> Reduction -- orig_ty ~r nty, possibly heterogeneous (nty possibly of changed kind) -> MCoercionN -- typeKind(orig_ty) ~N typeKind(nty) -> Reduction -- orig_ty ~r nty_casted -- where nty_casted has same kind as orig_ty assemble_result r redn kind_co = mkCoherenceRightMRedn r redn (mkSymMCo kind_co) normalise_tc_args :: TyCon -> [Type] -> NormM ArgsReductions normalise_tc_args tc tys = do { role <- getRole ; normalise_args (tyConKind tc) (tyConRolesX role tc) tys } normalise_type :: Type -> NormM Reduction -- Normalise the input type, by eliminating *all* type-function redexes -- but *not* newtypes (which are visible to the programmer) -- Returns with Refl if nothing happens -- Does nothing to newtypes -- The returned coercion *must* be *homogeneous* -- See Note [Normalising types] -- Try not to disturb type synonyms if possible normalise_type ty = go ty where go :: Type -> NormM Reduction go (TyConApp tc tys) = normalise_tc_app tc tys go ty@(LitTy {}) = do { r <- getRole ; return $ mkReflRedn r ty } go (AppTy ty1 ty2) = go_app_tys ty1 [ty2] go (FunTy { ft_af = vis, ft_mult = w, ft_arg = ty1, ft_res = ty2 }) = do { arg_redn <- go ty1 ; res_redn <- go ty2 ; w_redn <- withRole Nominal $ go w ; r <- getRole ; return $ mkFunRedn r vis w_redn arg_redn res_redn } go (ForAllTy (Bndr tcvar vis) ty) = do { (lc', tv', k_redn) <- normalise_var_bndr tcvar ; redn <- withLC lc' $ normalise_type ty ; return $ mkForAllRedn vis tv' k_redn redn } go (TyVarTy tv) = normalise_tyvar tv go (CastTy ty co) = do { redn <- go ty ; lc <- getLC ; let co' = substRightCo lc co ; return $ mkCastRedn2 Nominal ty co redn co' -- ^^^^^^^^^^^ uses castCoercionKind2 } go (CoercionTy co) = do { lc <- getLC ; r <- getRole ; let kco = liftCoSubst Nominal lc (coercionType co) co' = substRightCo lc co ; return $ mkProofIrrelRedn r kco co co' } go_app_tys :: Type -- function -> [Type] -- args -> NormM Reduction -- cf. GHC.Tc.Solver.Rewrite.rewrite_app_ty_args go_app_tys (AppTy ty1 ty2) tys = go_app_tys ty1 (ty2 : tys) go_app_tys fun_ty arg_tys = do { fun_redn@(Reduction fun_co nfun) <- go fun_ty ; case tcSplitTyConApp_maybe nfun of Just (tc, xis) -> do { redn <- go (mkTyConApp tc (xis ++ arg_tys)) -- rewrite_app_ty_args avoids redundantly processing the xis, -- but that's a much more performance-sensitive function. -- This type normalisation is not called in a loop. ; return $ mkAppCos fun_co (map mkNomReflCo arg_tys) `mkTransRedn` redn } Nothing -> do { ArgsReductions redns res_co <- normalise_args (typeKind nfun) (Inf.repeat Nominal) arg_tys ; role <- getRole ; return $ mkCoherenceRightMRedn role (mkAppRedns fun_redn redns) (mkSymMCo res_co) } } normalise_args :: Kind -- of the function -> Infinite Role -- roles at which to normalise args -> [Type] -- args -> NormM ArgsReductions -- returns ArgsReductions (Reductions cos xis) res_co, -- where each xi is the normalised version of the corresponding type, -- each co is orig_arg ~ xi, and res_co :: kind(f orig_args) ~ kind(f xis). -- NB: The xis might *not* have the same kinds as the input types, -- but the resulting application *will* be well-kinded -- cf. GHC.Tc.Solver.Rewrite.rewrite_args_slow normalise_args fun_ki roles args = do { normed_args <- zipWithM normalise1 (Inf.toList roles) args ; return $ simplifyArgsWorker ki_binders inner_ki fvs roles normed_args } where (ki_binders, inner_ki) = splitPiTys fun_ki fvs = tyCoVarsOfTypes args normalise1 role ty = withRole role $ normalise_type ty normalise_tyvar :: TyVar -> NormM Reduction normalise_tyvar tv = assert (isTyVar tv) $ do { lc <- getLC ; r <- getRole ; return $ case liftCoSubstTyVar lc r tv of Just co -> coercionRedn co Nothing -> mkReflRedn r (mkTyVarTy tv) } normalise_reduction :: Reduction -> NormM Reduction normalise_reduction (Reduction co ty) = do { redn' <- normalise_type ty ; return $ co `mkTransRedn` redn' } normalise_var_bndr :: TyCoVar -> NormM (LiftingContext, TyCoVar, Reduction) normalise_var_bndr tcvar -- works for both tvar and covar = do { lc1 <- getLC ; env <- getEnv ; let callback lc ki = runNormM (normalise_type ki) env lc Nominal ; return $ liftCoSubstVarBndrUsing reductionCoercion callback lc1 tcvar } -- | a monad for the normalisation functions, reading 'FamInstEnvs', -- a 'LiftingContext', and a 'Role'. newtype NormM a = NormM { runNormM :: FamInstEnvs -> LiftingContext -> Role -> a } deriving (Functor) initNormM :: FamInstEnvs -> Role -> TyCoVarSet -- the in-scope variables -> NormM a -> a initNormM env role vars (NormM thing_inside) = thing_inside env lc role where in_scope = mkInScopeSet vars lc = emptyLiftingContext in_scope getRole :: NormM Role getRole = NormM (\ _ _ r -> r) getLC :: NormM LiftingContext getLC = NormM (\ _ lc _ -> lc) getEnv :: NormM FamInstEnvs getEnv = NormM (\ env _ _ -> env) withRole :: Role -> NormM a -> NormM a withRole r thing = NormM $ \ envs lc _old_r -> runNormM thing envs lc r withLC :: LiftingContext -> NormM a -> NormM a withLC lc thing = NormM $ \ envs _old_lc r -> runNormM thing envs lc r instance Monad NormM where ma >>= fmb = NormM $ \env lc r -> let a = runNormM ma env lc r in runNormM (fmb a) env lc r instance Applicative NormM where pure x = NormM $ \ _ _ _ -> x (<*>) = ap ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/InstEnv.hs0000644000000000000000000020715107346545000020526 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 \section[InstEnv]{Utilities for typechecking instance declarations} The bits common to GHC.Tc.TyCl.Instance and GHC.Tc.Deriv. -} {-# LANGUAGE DeriveDataTypeable, DeriveGeneric #-} module GHC.Core.InstEnv ( DFunId, InstMatch, ClsInstLookupResult, CanonicalEvidence(..), PotentialUnifiers(..), getCoherentUnifiers, nullUnifiers, OverlapFlag(..), OverlapMode(..), setOverlapModeMaybe, ClsInst(..), DFunInstType, pprInstance, pprInstanceHdr, pprDFunId, pprInstances, instanceWarning, instanceHead, instanceSig, mkLocalClsInst, mkImportedClsInst, instanceDFunId, updateClsInstDFuns, updateClsInstDFun, fuzzyClsInstCmp, orphNamesOfClsInst, InstEnvs(..), VisibleOrphanModules, InstEnv, LookupInstanceErrReason (..), mkInstEnv, emptyInstEnv, unionInstEnv, extendInstEnv, filterInstEnv, deleteFromInstEnv, deleteDFunFromInstEnv, anyInstEnv, identicalClsInstHead, extendInstEnvList, lookupUniqueInstEnv, lookupInstEnv, instEnvElts, instEnvClasses, mapInstEnv, memberInstEnv, instIsVisible, classInstances, instanceBindFun, classNameInstances, instanceCantMatch, roughMatchTcs, isOverlappable, isOverlapping, isIncoherent ) where import GHC.Prelude hiding ( head, init, last, tail ) import GHC.Tc.Utils.TcType -- InstEnv is really part of the type checker, -- and depends on TcType in many ways import GHC.Core ( IsOrphan(..), isOrphan, chooseOrphanAnchor ) import GHC.Core.RoughMap import GHC.Core.Class import GHC.Core.Unify import GHC.Core.FVs( orphNamesOfTypes, orphNamesOfType ) import GHC.Hs.Extension import GHC.Unit.Module.Env import GHC.Unit.Module.Warnings import GHC.Unit.Types import GHC.Types.Var import GHC.Types.Unique.DSet import GHC.Types.Var.Set import GHC.Types.Name import GHC.Types.Name.Set import GHC.Types.Basic import GHC.Types.Id import GHC.Generics (Generic) import Data.Data ( Data ) import Data.List.NonEmpty ( NonEmpty (..), nonEmpty ) import qualified Data.List.NonEmpty as NE import Data.Maybe ( isJust ) import GHC.Utils.Outputable hiding ((<>)) import GHC.Utils.Panic import Data.Semigroup {- ************************************************************************ * * ClsInst: the data type for type-class instances * * ************************************************************************ -} -- | A type-class instance. Note that there is some tricky laziness at work -- here. See Note [ClsInst laziness and the rough-match fields] for more -- details. data ClsInst = ClsInst { -- Used for "rough matching"; see -- Note [ClsInst laziness and the rough-match fields] -- INVARIANT: is_tcs = KnownTc is_cls_nm : roughMatchTcs is_tys is_cls_nm :: Name -- ^ Class name , is_tcs :: [RoughMatchTc] -- ^ Top of type args -- The class itself is always -- the first element of this list -- | @is_dfun_name = idName . is_dfun@. -- -- We use 'is_dfun_name' for the visibility check, -- 'instIsVisible', which needs to know the 'Module' which the -- dictionary is defined in. However, we cannot use the 'Module' -- attached to 'is_dfun' since doing so would mean we would -- potentially pull in an entire interface file unnecessarily. -- This was the cause of #12367. , is_dfun_name :: Name -- Used for "proper matching"; see Note [Proper-match fields] , is_tvs :: [TyVar] -- Fresh template tyvars for full match -- See Note [Template tyvars are fresh] , is_cls :: Class -- The real class , is_tys :: [Type] -- Full arg types (mentioning is_tvs) -- INVARIANT: is_dfun Id has type -- forall is_tvs. (...) => is_cls is_tys -- (modulo alpha conversion) , is_dfun :: DFunId -- See Note [Haddock assumptions] , is_flag :: OverlapFlag -- See detailed comments with -- the decl of BasicTypes.OverlapFlag , is_orphan :: IsOrphan , is_warn :: Maybe (WarningTxt GhcRn) -- Warning emitted when the instance is used -- See Note [Implementation of deprecated instances] -- in GHC.Tc.Solver.Dict } deriving Data -- | A fuzzy comparison function for class instances, intended for sorting -- instances before displaying them to the user. fuzzyClsInstCmp :: ClsInst -> ClsInst -> Ordering fuzzyClsInstCmp x y = foldMap cmp (zip (is_tcs x) (is_tcs y)) where cmp (RM_WildCard, RM_WildCard) = EQ cmp (RM_WildCard, RM_KnownTc _) = LT cmp (RM_KnownTc _, RM_WildCard) = GT cmp (RM_KnownTc x, RM_KnownTc y) = stableNameCmp x y isOverlappable, isOverlapping, isIncoherent, isNonCanonical :: ClsInst -> Bool isOverlappable i = hasOverlappableFlag (overlapMode (is_flag i)) isOverlapping i = hasOverlappingFlag (overlapMode (is_flag i)) isIncoherent i = hasIncoherentFlag (overlapMode (is_flag i)) isNonCanonical i = hasNonCanonicalFlag (overlapMode (is_flag i)) {- Note [ClsInst laziness and the rough-match fields] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Suppose we load 'instance A.C B.T' from A.hi, but suppose that the type B.T is otherwise unused in the program. Then it's stupid to load B.hi, the data type declaration for B.T -- and perhaps further instance declarations! We avoid this as follows: * is_cls_nm, is_tcs, is_dfun_name are all Names. We can poke them to our heart's content. * Proper-match fields. is_dfun, and its related fields is_tvs, is_cls, is_tys contain TyVars, Class, Type, Class etc, and so are all lazy thunks. When we poke any of these fields we'll typecheck the DFunId declaration, and hence pull in interfaces that it refers to. See Note [Proper-match fields]. * Rough-match fields. During instance lookup, we use the is_cls_nm :: Name and is_tcs :: [RoughMatchTc] fields to perform a "rough match", *without* poking inside the DFunId. The rough-match fields allow us to say "definitely does not match", based only on Names. See GHC.Core.Unify Note [Rough matching in class and family instances] This laziness is very important; see #12367. Try hard to avoid pulling on the structured fields unless you really need the instance. * Another place to watch is InstEnv.instIsVisible, which needs the module to which the ClsInst belongs. We can get this from is_dfun_name. -} {- Note [Template tyvars are fresh] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The is_tvs field of a ClsInst has *completely fresh* tyvars. That is, they are * distinct from any other ClsInst * distinct from any tyvars free in predicates that may be looked up in the class instance environment Reason for freshness: we use unification when checking for overlap etc, and that requires the tyvars to be distinct. The invariant is checked by the ASSERT in lookupInstEnv'. Note [Proper-match fields] ~~~~~~~~~~~~~~~~~~~~~~~~~ The is_tvs, is_cls, is_tys fields are simply cached values, pulled out (lazily) from the dfun id. They are cached here simply so that we don't need to decompose the DFunId each time we want to match it. The hope is that the rough-match fields mean that we often never poke the proper-match fields. However, note that: * is_tvs must be a superset of the free vars of is_tys * is_tvs, is_tys may be alpha-renamed compared to the ones in the dfun Id Note [Haddock assumptions] ~~~~~~~~~~~~~~~~~~~~~~~~~~ For normal user-written instances, Haddock relies on * the SrcSpan of * the Name of * the is_dfun of * an Instance being equal to * the SrcSpan of * the instance head type of * the InstDecl used to construct the Instance. -} instanceDFunId :: ClsInst -> DFunId instanceDFunId = is_dfun updateClsInstDFun :: (DFunId -> DFunId) -> ClsInst -> ClsInst updateClsInstDFun tidy_dfun ispec = ispec { is_dfun = tidy_dfun (is_dfun ispec) } updateClsInstDFuns :: (DFunId -> DFunId) -> InstEnv -> InstEnv updateClsInstDFuns tidy_dfun (InstEnv rm) = InstEnv $ fmap (updateClsInstDFun tidy_dfun) rm instance NamedThing ClsInst where getName ispec = getName (is_dfun ispec) instance Outputable ClsInst where ppr = pprInstance pprDFunId :: DFunId -> SDoc -- Prints the analogous information to `pprInstance` -- but with just the DFunId pprDFunId dfun = hang dfun_header 2 (vcat [ text "--" <+> pprDefinedAt (getName dfun) , whenPprDebug (ppr dfun) ]) where dfun_header = ppr_overlap_dfun_hdr empty dfun pprInstance :: ClsInst -> SDoc -- Prints the ClsInst as an instance declaration pprInstance ispec = hang (pprInstanceHdr ispec) 2 (vcat [ text "--" <+> pprDefinedAt (getName ispec) , whenPprDebug (ppr (is_dfun ispec)) ]) -- * pprInstanceHdr is used in VStudio to populate the ClassView tree pprInstanceHdr :: ClsInst -> SDoc -- Prints the ClsInst as an instance declaration pprInstanceHdr (ClsInst { is_flag = flag, is_dfun = dfun }) = ppr_overlap_dfun_hdr (ppr flag) dfun ppr_overlap_dfun_hdr :: SDoc -> DFunId -> SDoc ppr_overlap_dfun_hdr flag_sdoc dfun = text "instance" <+> flag_sdoc <+> pprSigmaType (idType dfun) pprInstances :: [ClsInst] -> SDoc pprInstances ispecs = vcat (map pprInstance ispecs) instanceWarning :: ClsInst -> Maybe (WarningTxt GhcRn) instanceWarning = is_warn instanceHead :: ClsInst -> ([TyVar], Class, [Type]) -- Returns the head, using the fresh tyvars from the ClsInst instanceHead (ClsInst { is_tvs = tvs, is_cls = cls, is_tys = tys }) = (tvs, cls, tys) -- | Collects the names of concrete types and type constructors that make -- up the head of a class instance. For instance, given `class Foo a b`: -- -- `instance Foo (Either (Maybe Int) a) Bool` would yield -- [Either, Maybe, Int, Bool] -- -- Used in the implementation of ":info" in GHCi. -- -- The 'tcSplitSigmaTy' is because of -- instance Foo a => Baz T where ... -- The decl is an orphan if Baz and T are both not locally defined, -- even if Foo *is* locally defined orphNamesOfClsInst :: ClsInst -> NameSet orphNamesOfClsInst (ClsInst { is_cls_nm = cls_nm, is_tys = tys }) = orphNamesOfTypes tys `unionNameSet` unitNameSet cls_nm instanceSig :: ClsInst -> ([TyVar], [Type], Class, [Type]) -- Decomposes the DFunId instanceSig ispec = tcSplitDFunTy (idType (is_dfun ispec)) mkLocalClsInst :: DFunId -> OverlapFlag -> [TyVar] -> Class -> [Type] -> Maybe (WarningTxt GhcRn) -> ClsInst -- Used for local instances, where we can safely pull on the DFunId. -- Consider using newClsInst instead; this will also warn if -- the instance is an orphan. mkLocalClsInst dfun oflag tvs cls tys warn = ClsInst { is_flag = oflag, is_dfun = dfun , is_tvs = tvs , is_dfun_name = dfun_name , is_cls = cls, is_cls_nm = cls_name , is_tys = tys, is_tcs = RM_KnownTc cls_name : roughMatchTcs tys , is_orphan = orph, is_warn = warn } where cls_name = className cls dfun_name = idName dfun this_mod = assert (isExternalName dfun_name) $ nameModule dfun_name is_local name = nameIsLocalOrFrom this_mod name -- Compute orphanhood. See Note [Orphans] in GHC.Core.InstEnv (cls_tvs, fds) = classTvsFds cls arg_names = [filterNameSet is_local (orphNamesOfType ty) | ty <- tys] -- See Note [When exactly is an instance decl an orphan?] orph | is_local cls_name = NotOrphan (nameOccName cls_name) | all notOrphan mb_ns = NE.head mb_ns | otherwise = IsOrphan notOrphan NotOrphan{} = True notOrphan _ = False mb_ns :: NonEmpty IsOrphan -- One for each fundep; a locally-defined name -- that is not in the "determined" arguments mb_ns = case nonEmpty fds of Nothing -> NE.singleton (choose_one arg_names) Just fds -> fmap do_one fds do_one (_ltvs, rtvs) = choose_one [ns | (tv,ns) <- cls_tvs `zip` arg_names , not (tv `elem` rtvs)] choose_one nss = chooseOrphanAnchor (unionNameSets nss) mkImportedClsInst :: Name -- ^ the name of the class -> [RoughMatchTc] -- ^ the rough match signature of the instance -> Name -- ^ the 'Name' of the dictionary binding -> DFunId -- ^ the 'Id' of the dictionary. -> OverlapFlag -- ^ may this instance overlap? -> IsOrphan -- ^ is this instance an orphan? -> Maybe (WarningTxt GhcRn) -- ^ warning emitted when solved -> ClsInst -- Used for imported instances, where we get the rough-match stuff -- from the interface file -- The bound tyvars of the dfun are guaranteed fresh, because -- the dfun has been typechecked out of the same interface file mkImportedClsInst cls_nm mb_tcs dfun_name dfun oflag orphan warn = ClsInst { is_flag = oflag, is_dfun = dfun , is_tvs = tvs, is_tys = tys , is_dfun_name = dfun_name , is_cls_nm = cls_nm, is_cls = cls , is_tcs = RM_KnownTc cls_nm : mb_tcs , is_orphan = orphan , is_warn = warn } where (tvs, _, cls, tys) = tcSplitDFunTy (idType dfun) {- Note [When exactly is an instance decl an orphan?] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ (See GHC.Iface.Make.instanceToIfaceInst, which implements this.) See Note [Orphans] in GHC.Core Roughly speaking, an instance is an orphan if its head (after the =>) mentions nothing defined in this module. Functional dependencies complicate the situation though. Consider module M where { class C a b | a -> b } and suppose we are compiling module X: module X where import M data T = ... instance C Int T where ... This instance is an orphan, because when compiling a third module Y we might get a constraint (C Int v), and we'd want to improve v to T. So we must make sure X's instances are loaded, even if we do not directly use anything from X. More precisely, an instance is an orphan iff If there are no fundeps, then at least of the names in the instance head is locally defined. If there are fundeps, then for every fundep, at least one of the names free in a *non-determined* part of the instance head is defined in this module. (Note that these conditions hold trivially if the class is locally defined.) ************************************************************************ * * InstEnv, ClsInstEnv * * ************************************************************************ A @ClsInstEnv@ all the instances of that class. The @Id@ inside a ClsInstEnv mapping is the dfun for that instance. If class C maps to a list containing the item ([a,b], [t1,t2,t3], dfun), then forall a b, C t1 t2 t3 can be constructed by dfun or, to put it another way, we have instance (...) => C t1 t2 t3, witnessed by dfun -} --------------------------------------------------- {- Note [InstEnv determinism] ~~~~~~~~~~~~~~~~~~~~~~~~~~ We turn InstEnvs into a list in some places that don't directly affect the ABI. That happens when we create output for `:info`. Unfortunately that nondeterminism is nonlocal and it's hard to tell what it affects without following a chain of functions. It's also easy to accidentally make that nondeterminism affect the ABI. Furthermore the envs should be relatively small, so it should be free to use deterministic maps here. Testing with nofib and validate detected no difference between UniqFM and UniqDFM. See also Note [Deterministic UniqFM] -} -- Internally it's safe to indexable this map by -- by @Class@, the classes @Name@, the classes @TyCon@ -- or it's @Unique@. -- This is since: -- getUnique cls == getUnique (className cls) == getUnique (classTyCon cls) -- -- We still use Class as key type as it's both the common case -- and conveys the meaning better. But the implementation of --InstEnv is a bit more lax internally. newtype InstEnv = InstEnv (RoughMap ClsInst) -- Maps Class to instances for that class -- See Note [InstEnv determinism] instance Outputable InstEnv where ppr (InstEnv rm) = pprInstances $ elemsRM rm -- | 'InstEnvs' represents the combination of the global type class instance -- environment, the local type class instance environment, and the set of -- transitively reachable orphan modules (according to what modules have been -- directly imported) used to test orphan instance visibility. data InstEnvs = InstEnvs { ie_global :: InstEnv, -- External-package instances ie_local :: InstEnv, -- Home-package instances ie_visible :: VisibleOrphanModules -- Set of all orphan modules transitively -- reachable from the module being compiled -- See Note [Instance lookup and orphan instances] } -- | Set of visible orphan modules, according to what modules have been directly -- imported. This is based off of the dep_orphs field, which records -- transitively reachable orphan modules (modules that define orphan instances). type VisibleOrphanModules = ModuleSet -- INVARIANTS: -- * The is_tvs are distinct in each ClsInst -- of a ClsInstEnv (so we can safely unify them) -- Thus, the @ClsInstEnv@ for @Eq@ might contain the following entry: -- [a] ===> dfun_Eq_List :: forall a. Eq a => Eq [a] -- The "a" in the pattern must be one of the forall'd variables in -- the dfun type. emptyInstEnv :: InstEnv emptyInstEnv = InstEnv emptyRM mkInstEnv :: [ClsInst] -> InstEnv mkInstEnv = extendInstEnvList emptyInstEnv instEnvElts :: InstEnv -> [ClsInst] instEnvElts (InstEnv rm) = elemsRM rm -- See Note [InstEnv determinism] instEnvEltsForClass :: InstEnv -> Name -> [ClsInst] instEnvEltsForClass (InstEnv rm) cls_nm = lookupRM [RML_KnownTc cls_nm] rm -- N.B. this is not particularly efficient but used only by GHCi. instEnvClasses :: InstEnv -> UniqDSet Class instEnvClasses ie = mkUniqDSet $ map is_cls (instEnvElts ie) -- | Test if an instance is visible, by checking that its origin module -- is in 'VisibleOrphanModules'. -- See Note [Instance lookup and orphan instances] instIsVisible :: VisibleOrphanModules -> ClsInst -> Bool instIsVisible vis_mods ispec -- NB: Instances from the interactive package always are visible. We can't -- add interactive modules to the set since we keep creating new ones -- as a GHCi session progresses. = case nameModule_maybe (is_dfun_name ispec) of Nothing -> True Just mod | isInteractiveModule mod -> True | IsOrphan <- is_orphan ispec -> mod `elemModuleSet` vis_mods | otherwise -> True classInstances :: InstEnvs -> Class -> [ClsInst] classInstances envs cls = classNameInstances envs (className cls) classNameInstances :: InstEnvs -> Name -> [ClsInst] classNameInstances (InstEnvs { ie_global = pkg_ie, ie_local = home_ie, ie_visible = vis_mods }) cls = get home_ie ++ get pkg_ie where get :: InstEnv -> [ClsInst] get ie = filter (instIsVisible vis_mods) (instEnvEltsForClass ie cls) -- | Checks for an exact match of ClsInst in the instance environment. -- We use this when we do signature checking in "GHC.Tc.Module" memberInstEnv :: InstEnv -> ClsInst -> Bool memberInstEnv (InstEnv rm) ins_item@(ClsInst { is_tcs = tcs } ) = any (identicalDFunType ins_item) (fst $ lookupRM' (map roughMatchTcToLookup tcs) rm) where identicalDFunType cls1 cls2 = eqType (varType (is_dfun cls1)) (varType (is_dfun cls2)) -- | Makes no particular effort to detect conflicts. unionInstEnv :: InstEnv -> InstEnv -> InstEnv unionInstEnv (InstEnv a) (InstEnv b) = InstEnv (a `unionRM` b) extendInstEnvList :: InstEnv -> [ClsInst] -> InstEnv extendInstEnvList inst_env ispecs = foldl' extendInstEnv inst_env ispecs extendInstEnv :: InstEnv -> ClsInst -> InstEnv extendInstEnv (InstEnv rm) ins_item@(ClsInst { is_tcs = tcs }) = InstEnv $ insertRM tcs ins_item rm filterInstEnv :: (ClsInst -> Bool) -> InstEnv -> InstEnv filterInstEnv pred (InstEnv rm) = InstEnv $ filterRM pred rm anyInstEnv :: (ClsInst -> Bool) -> InstEnv -> Bool anyInstEnv pred (InstEnv rm) = foldRM (\x rest -> pred x || rest) False rm mapInstEnv :: (ClsInst -> ClsInst) -> InstEnv -> InstEnv mapInstEnv f (InstEnv rm) = InstEnv (f <$> rm) deleteFromInstEnv :: InstEnv -> ClsInst -> InstEnv deleteFromInstEnv (InstEnv rm) ins_item@(ClsInst { is_tcs = tcs }) = InstEnv $ filterMatchingRM (not . identicalClsInstHead ins_item) tcs rm deleteDFunFromInstEnv :: InstEnv -> DFunId -> InstEnv -- Delete a specific instance fron an InstEnv deleteDFunFromInstEnv (InstEnv rm) dfun = InstEnv $ filterMatchingRM (not . same_dfun) [RM_KnownTc (className cls)] rm where (_, _, cls, _) = tcSplitDFunTy (idType dfun) same_dfun (ClsInst { is_dfun = dfun' }) = dfun == dfun' identicalClsInstHead :: ClsInst -> ClsInst -> Bool -- ^ True when when the instance heads are the same -- e.g. both are Eq [(a,b)] -- Used for overriding in GHCi -- Obviously should be insensitive to alpha-renaming identicalClsInstHead (ClsInst { is_tcs = rough1, is_tys = tys1 }) (ClsInst { is_tcs = rough2, is_tys = tys2 }) = not (instanceCantMatch rough1 rough2) -- Fast check for no match, uses the "rough match" fields; -- also accounts for class name. && isJust (tcMatchTys tys1 tys2) && isJust (tcMatchTys tys2 tys1) {- ************************************************************************ * * Looking up an instance * * ************************************************************************ @lookupInstEnv@ looks up in a @InstEnv@, using a one-way match. Since the env is kept ordered, the first match must be the only one. The thing we are looking up can have an arbitrary "flexi" part. Note [Instance lookup and orphan instances] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Suppose we are compiling a module M, and we have a zillion packages loaded, and we are looking up an instance for C (T W). If we find a match in module 'X' from package 'p', should be "in scope"; that is, is p:X in the transitive closure of modules imported from M? The difficulty is that the "zillion packages" might include ones loaded through earlier invocations of the GHC API, or earlier module loads in GHCi. They might not be in the dependencies of M itself; and if not, the instances in them should not be visible. #2182, #8427. There are two cases: * If the instance is *not an orphan*, then module X defines C, T, or W. And in order for those types to be involved in typechecking M, it must be that X is in the transitive closure of M's imports. So we can use the instance. * If the instance *is an orphan*, the above reasoning does not apply. So we keep track of the set of orphan modules transitively below M; this is the ie_visible field of InstEnvs, of type VisibleOrphanModules. If module p:X is in this set, then we can use the instance, otherwise we can't. Note [Rules for instance lookup] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ These functions implement the carefully-written rules in the user manual section on "overlapping instances". At risk of duplication, here are the rules. If the rules change, change this text and the user manual simultaneously. The link may be this: http://www.haskell.org/ghc/docs/latest/html/users_guide/glasgow_exts.html#instance-overlap The willingness to be overlapped or incoherent is a property of the instance declaration itself, controlled by its `OverlapMode`, as follows * An instance is "incoherent" (OverlapMode = `Incoherent` or `NonCanonical`) if it has an `INCOHERENT` pragma, or if it appears in a module compiled with `-XIncoherentInstances`. In those cases: -fspecialise-incoherents on => Incoherent -fspecialise-incoherents off => NonCanonical NB: it is on by default * An instance is "overlappable" (OverlapMode = `Overlappable` or `Overlaps`) if it has an `OVERLAPPABLE` or `OVERLAPS` pragma, or if it appears in a module compiled with `-XOverlappingInstances`, or if the instance is incoherent. * An instance is "overlapping" (OverlapMode = `Overlapping` or `Overlaps`) if it has an `OVERLAPPING` or `OVERLAPS` pragma, or if it appears in a module compiled with `-XOverlappingInstances`, or if the instance is incoherent. Now suppose that, in some client module, we are searching for an instance of the target constraint (C ty1 .. tyn). The search works like this. (IL0) If there are any local Givens that match (potentially unifying any metavariables, even untouchable ones) the target constraint, the search fails. See Note [Instance and Given overlap] in GHC.Tc.Solver.Dict. (IL1) Find all instances `I` that *match* the target constraint; that is, the target constraint is a substitution instance of `I`. These instance declarations are the /candidates/. (IL2) If there are no candidates, the search fails. (IL3) Eliminate any candidate `IX` for which there is another candidate `IY` such that both of the following hold: - `IY` is strictly more specific than `IX`. That is, `IY` is a substitution instance of `IX` but not vice versa. - Either `IX` is *overlappable*, or `IY` is *overlapping*. (This "either/or" design, rather than a "both/and" design, allow a client to deliberately override an instance from a library, without requiring a change to the library.) This is done by `pruneOverlappingMatches` (IL4) If all the remaining candidates are *incoherent*, the search succeeds, returning an arbitrary surviving candidate. If any coherent or non-canonical incoherent unifiers were discarded, return NoUnifiers EvNonCanonical; if only canonical incoherent unifiers were discarded, return NoUnifiers EvCanonical (IL5) If more than one non-*incoherent* candidate remains, the search fails. Otherwise there is exactly one non-*incoherent* candidate; call it the "prime candidate". (IL6) Now find all instances that unify with the target constraint, but do not match it. Such non-candidate instances might match when the target constraint is further instantiated. If any are *coherent* (not incoherent) return them as PotentialUnifiers. If all are *incoherent* (OverlapFlag = Incoherent or NonCanonical) return (NoUnifiers nc), where nc is EvNonCanonical if any of the discarded unifiers are NonCanonical. Notice that these rules are not influenced by flag settings in the client module, where the instances are *used*. These rules make it possible for a library author to design a library that relies on overlapping instances without the client having to know. Note [Overlapping instances] (NB: these notes are quite old) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Overlap is permitted, but only in such a way that one can make a unique choice when looking up. That is, overlap is only permitted if one template matches the other, or vice versa. So this is ok: [a] [Int] but this is not (Int,a) (b,Int) If overlap is permitted, the list is kept most specific first, so that the first lookup is the right choice. For now we just use association lists. \subsection{Avoiding a problem with overlapping} Consider this little program: \begin{pseudocode} class C a where c :: a class C a => D a where d :: a instance C Int where c = 17 instance D Int where d = 13 instance C a => C [a] where c = [c] instance ({- C [a], -} D a) => D [a] where d = c instance C [Int] where c = [37] main = print (d :: [Int]) \end{pseudocode} What do you think `main' prints (assuming we have overlapping instances, and all that turned on)? Well, the instance for `D' at type `[a]' is defined to be `c' at the same type, and we've got an instance of `C' at `[Int]', so the answer is `[37]', right? (the generic `C [a]' instance shouldn't apply because the `C [Int]' instance is more specific). Ghc-4.04 gives `[37]', while ghc-4.06 gives `[17]', so 4.06 is wrong. That was easy ;-) Let's just consult hugs for good measure. Wait - if I use old hugs (pre-September99), I get `[17]', and stranger yet, if I use hugs98, it doesn't even compile! What's going on!? What hugs complains about is the `D [a]' instance decl. \begin{pseudocode} ERROR "mj.hs" (line 10): Cannot build superclass instance *** Instance : D [a] *** Context supplied : D a *** Required superclass : C [a] \end{pseudocode} You might wonder what hugs is complaining about. It's saying that you need to add `C [a]' to the context of the `D [a]' instance (as appears in comments). But there's that `C [a]' instance decl one line above that says that I can reduce the need for a `C [a]' instance to the need for a `C a' instance, and in this case, I already have the necessary `C a' instance (since we have `D a' explicitly in the context, and `C' is a superclass of `D'). Unfortunately, the above reasoning indicates a premature commitment to the generic `C [a]' instance. I.e., it prematurely rules out the more specific instance `C [Int]'. This is the mistake that ghc-4.06 makes. The fix is to add the context that hugs suggests (uncomment the `C [a]'), effectively deferring the decision about which instance to use. Now, interestingly enough, 4.04 has this same bug, but it's covered up in this case by a little known `optimization' that was disabled in 4.06. Ghc-4.04 silently inserts any missing superclass context into an instance declaration. In this case, it silently inserts the `C [a]', and everything happens to work out. (See `GHC.Types.Id.Make.mkDictFunId' for the code in question. Search for `Mark Jones', although Mark claims no credit for the `optimization' in question, and would rather it stopped being called the `Mark Jones optimization' ;-) So, what's the fix? I think hugs has it right. Here's why. Let's try something else out with ghc-4.04. Let's add the following line: d' :: D a => [a] d' = c Everyone raise their hand who thinks that `d :: [Int]' should give a different answer from `d' :: [Int]'. Well, in ghc-4.04, it does. The `optimization' only applies to instance decls, not to regular bindings, giving inconsistent behavior. Old hugs had this same bug. Here's how we fixed it: like GHC, the list of instances for a given class is ordered, so that more specific instances come before more generic ones. For example, the instance list for C might contain: ..., C Int, ..., C a, ... When we go to look for a `C Int' instance we'll get that one first. But what if we go looking for a `C b' (`b' is unconstrained)? We'll pass the `C Int' instance, and keep going. But if `b' is unconstrained, then we don't know yet if the more specific instance will eventually apply. GHC keeps going, and matches on the generic `C a'. The fix is to, at each step, check to see if there's a reverse match, and if so, abort the search. This prevents hugs from prematurely choosing a generic instance when a more specific one exists. --Jeff BUT NOTE [Nov 2001]: we must actually *unify* not reverse-match in this test. Suppose the instance envt had ..., forall a b. C a a b, ..., forall a b c. C a b c, ... (still most specific first) Now suppose we are looking for (C x y Int), where x and y are unconstrained. C x y Int doesn't match the template {a,b} C a a b but neither does C a a b match the template {x,y} C x y Int But still x and y might subsequently be unified so they *do* match. Simple story: unify, don't match. Note [Coherence and specialisation: overview] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ GHC's specialiser relies on the Coherence Assumption: that if d1 :: C tys d2 :: C tys then the dictionary d1 can be used in place of d2 and vice versa; it is as if (C tys) is a singleton type. If d1 and d2 are interchangeable, we say that they constitute /canonical evidence/ for (C tys). We have a special data type, `CanonoicalEvidence`, for recording whether evidence is canonical. Let's use this example class C a where { op :: a -> Int } instance C [a] where {...} -- (I1) instance {-# OVERLAPPING #-} C [Int] where {...} -- (I2) instance C a => C (Maybe a) where {...} -- (I3) instance {-# INCOHERENT #-} C (Maybe Int) where {...} -- (I4) instance C Int where {...} -- (I5) * When solving (C tys) from the top-level instances, we generally insist that there is a unique, most-specific match. (Incoherent instances change the picture a bit: see Note [Rules for instance lookup].) Example: [W] C [Int] -- Pick (I2) [W] C [Char] -- Pick (I1); does not match (I2) Caveat: if different usage sites see different instances (which the programmer can contrive, with some effort), all bets are off; we really can't make any guarantees at all. * But what about [W] C [b]? This might arise from risky :: b -> Int risky x = op [x] We can't pick (I2) because `b` is not Int. But if we pick (I1), and later the simplifier inlines a call (risky @Int) we'll get a dictionary of type (C [Int]) built by (I1), which might be utterly different to the dictionary of type (C [Int]) built by (I2). That breaks the Coherence Assumption. So GHC declines to pick either, and rejects `risky`. You have to write a different signature notRisky :: C [b] => b -> Int notRisky x = op [x] so that the dictionary is resolved at the call site. * The INCOHERENT pragma tells GHC to choose an instance anyway: see Note [Rules for instance lookup] step (IL6). Suppose we have veryRisky :: C b => b -> Int veryRisky x = op (Just x) So we have [W] C (Maybe b). Because (I4) is INCOHERENT, GHC is allowed to pick (I3). Of course, this risks breaking the Coherence Assumption, as described above. * What about the incoherence from step (IL4)? For example class D a b where { opD :: a -> b -> String } instance {-# INCOHERENT #-} D Int b where {...} -- (I7) instance {-# INCOHERENT #-} D a Int where {...} -- (I8) g (x::Int) = opD x x -- [W] D Int Int Here both (I7) and (I8) match, GHC picks an arbitrary one. So INCOHERENT may break the Coherence Assumption. But sometimes that is fine, because the programmer promises that it doesn't matter which one is chosen. A good example is in the `optics` library: data IxEq i is js where { IxEq :: IxEq i is is } class AppendIndices xs ys ks | xs ys -> ks where appendIndices :: IxEq i (Curry xs (Curry ys i)) (Curry ks i) instance {-# INCOHERENT #-} xs ~ zs => AppendIndices xs '[] zs where appendIndices = IxEq instance ys ~ zs => AppendIndices '[] ys zs where appendIndices = IxEq Here `xs` and `ys` are type-level lists, and for type inference purposes we want to solve the `AppendIndices` constraint when /either/ of them are the empty list. The dictionaries are the same in both cases (indeed the dictionary type is a singleton!), so we really don't care which is used. See #23287 for discussion. In short, sometimes we want to specialise on these incoherently-selected dictionaries, and sometimes we don't. It would be best to have a per-instance pragma, but for now we have a global flag: * If an instance has an `{-# INCOHERENT #-}` pragma, we the `OverlapFlag` of the `ClsInst` to label it as either * `Incoherent`: meaning incoherent but still specialisable, or * `NonCanonical`: meaning incoherent and not specialisable. The module-wide `-fspecialise-incoherents` flag (on by default) determines which choice is made. See GHC.Tc.Utils.Instantiate.getOverlapFlag. The rest of this note describes what happens for `NonCanonical` instances, i.e. with `-fno-specialise-incoherents`. To avoid this incoherence breaking the specialiser, * We label as "non-canonical" the dictionary constructed by a (potentially) incoherent use of an ClsInst whose `OverlapFlag` is `NonCanonical`. * We do not specialise a function if there is a non-canonical dictionary in the /transistive dependencies/ of its dictionary arguments. To see the transitive closure issue, consider deeplyRisky :: C b => b -> Int deeplyRisky x = op (Just (Just x)) From (op (Just (Just x))) we get [W] d1 : C (Maybe (Maybe b)) which we solve (coherently!) via (I3), giving [W] d2 : C (Maybe b) Now we can only solve this incoherently. So we end up with deeplyRisky @b (d1 :: C b) = op @(Maybe (Maybe b)) d1 where d1 :: C (Maybe (Maybe b)) = $dfI3 d2 -- Coherent decision d2 :: C (Maybe b) = $sfI3 d1 -- Incoherent decision So `d2` is incoherent, and hence (transitively) so is `d1`. Here are the moving parts: * GHC.Core.InstEnv.lookupInstEnv tells if any incoherent unifiers were discarded in step (IL6) of the instance lookup. * That info is recorded in the `cir_is_coherent` field of `OneInst`, and thence transferred to the `ep_is_coherent` field of the `EvBind` for the dictionary. * `GHC.HsToCore.Binds.dsHsWrapper` desugars the evidence application (f d) into (nospec f d) if `d` is incoherent. It has to do a dependency analysis to determine transitive dependencies, but we need to do that anyway. See Note [Desugaring non-canonical evidence] in GHC.HsToCore.Binds. See also Note [nospecId magic] in GHC.Types.Id.Make. -} type DFunInstType = Maybe Type -- Just ty => Instantiate with this type -- Nothing => Instantiate with any type of this tyvar's kind -- See Note [DFunInstType: instantiating types] type InstMatch = (ClsInst, [DFunInstType]) type ClsInstLookupResult = ( [InstMatch] -- Successful matches , PotentialUnifiers -- These don't match but do unify , [InstMatch] ) -- Unsafe overlapped instances under Safe Haskell -- (see Note [Safe Haskell Overlapping Instances] in -- GHC.Tc.Solver). {- Note [DFunInstType: instantiating types] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A successful match is a ClsInst, together with the types at which the dfun_id in the ClsInst should be instantiated The instantiating types are (Either TyVar Type)s because the dfun might have some tyvars that *only* appear in arguments dfun :: forall a b. C a b, Ord b => D [a] When we match this against D [ty], we return the instantiating types [Just ty, Nothing] where the 'Nothing' indicates that 'b' can be freely instantiated. (The caller instantiates it to a flexi type variable, which will presumably later become fixed via functional dependencies.) Note [Infinitary substitution in lookup] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider class C a b instance C c c instance C d (Maybe d) [W] C e (Maybe e) You would think we could just use the second instance, because the first doesn't unify. But that's just ever so slightly wrong. The reason we check for unifiers along with matchers is that we don't want the possibility that a type variable instantiation could cause an instance choice to change. Yet if we have type family M = Maybe M and choose (e |-> M), then both instances match. This is absurd, but we cannot rule it out. Yet, worrying about this case is awfully inconvenient to users, and so we pretend the problem doesn't exist, by considering a lookup that runs into this occurs-check issue to indicate that an instance surely does not apply (i.e. is like the SurelyApart case). In the brief time that we didn't treat infinitary substitutions specially, two tickets were filed: #19044 and #19052, both trying to do Real Work. Why don't we just exclude any instances that are MaybeApart? Because we might have a [W] C e (F e), where F is a type family. The second instance above does not match, but it should be included as a future possibility. Unification will return MaybeApart MARTypeFamily in this case. What can go wrong with this design choice? We might get incoherence -- but not loss of type safety. In particular, if we have [W] C M M (for the M type family above), then GHC might arbitrarily choose either instance, depending on how M reduces (or doesn't). For type families, we can't just ignore the problem (as we essentially do here), because doing so would give us a hole in the type safety proof (as explored in Section 6 of "Closed Type Families with Overlapping Equations", POPL'14). This possibility of an infinitary substitution manifests as closed type families that look like they should reduce, but don't. Users complain: #9082 and #17311. For open type families, we actually can have unsoundness if we don't take infinitary substitutions into account: #8162. But, luckily, for class instances, we just risk coherence -- not great, but it seems better to give users what they likely want. (Also, note that this problem existed for the entire decade of 201x without anyone noticing, so it's manifestly not ruining anyone's day.) -} -- |Look up an instance in the given instance environment. The given class application must match exactly -- one instance and the match may not contain any flexi type variables. If the lookup is unsuccessful, -- yield 'Left errorMessage'. lookupUniqueInstEnv :: InstEnvs -> Class -> [Type] -> Either LookupInstanceErrReason (ClsInst, [Type]) lookupUniqueInstEnv instEnv cls tys = case lookupInstEnv False instEnv cls tys of ([(inst, inst_tys)], _, _) | noFlexiVar -> Right (inst, inst_tys') | otherwise -> Left $ LookupInstErrFlexiVar where inst_tys' = [ty | Just ty <- inst_tys] noFlexiVar = all isJust inst_tys _other -> Left $ LookupInstErrNotFound -- | Why a particular typeclass application couldn't be looked up. data LookupInstanceErrReason = -- | Tyvars aren't an exact match. LookupInstErrNotExact | -- | One of the tyvars is flexible. LookupInstErrFlexiVar | -- | No matching instance was found. LookupInstErrNotFound deriving (Generic) -- | `CanonicalEvidence` says whether a piece of evidence has a singleton type; -- For example, given (d1 :: C Int), will any other (d2 :: C Int) do equally well? -- See Note [Coherence and specialisation: overview] above, and -- Note [Desugaring non-canonical evidence] in GHC.HsToCore.Binds data CanonicalEvidence = EvCanonical | EvNonCanonical andCanEv :: CanonicalEvidence -> CanonicalEvidence -> CanonicalEvidence -- Only canonical if both are andCanEv EvCanonical EvCanonical = EvCanonical andCanEv _ _ = EvNonCanonical -- See Note [Recording coherence information in `PotentialUnifiers`] data PotentialUnifiers = NoUnifiers CanonicalEvidence -- Either there were no unifiers, or all were incoherent -- -- NoUnifiers EvNonCanonical: -- We discarded (via INCOHERENT) some instances that unify, -- and that are marked NonCanonical; so the matching instance -- should be traeated as EvNonCanonical -- NoUnifiers EvCanonical: -- We discarded no NonCanonical incoherent unifying instances, -- so the matching instance can be treated as EvCanonical | OneOrMoreUnifiers (NonEmpty ClsInst) -- There are some /coherent/ unifiers; here they are -- -- This list is lazy as we only look at all the unifiers when -- printing an error message. It can be expensive to compute all -- the unifiers because if you are matching something like C a[sk] then -- all instances will unify. {- Note [Recording coherence information in `PotentialUnifiers`] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When we find a matching instance, there might be other instances that could potentially unify with the goal. For `INCOHERENT` instances, we don't care (see steps IL4 and IL6 in Note [Rules for instance lookup]). But if we have potentially unifying coherent instance, we report these `OneOrMoreUnifiers` so that `matchInstEnv` can go down the `NotSure` route. If this hurdle is passed, i.e. we have a unique solution up to `INCOHERENT` instances, the specialiser needs to know if that unique solution is canonical or not (see Note [Coherence and specialisation: overview] for why we care at all). So when the set of potential unifiers is empty, we record in `NoUnifiers` if the one solution is `Canonical`. -} instance Outputable CanonicalEvidence where ppr EvCanonical = text "canonical" ppr EvNonCanonical = text "non-canonical" instance Outputable PotentialUnifiers where ppr (NoUnifiers c) = text "NoUnifiers" <+> ppr c ppr xs = ppr (getCoherentUnifiers xs) instance Semigroup PotentialUnifiers where NoUnifiers c1 <> NoUnifiers c2 = NoUnifiers (c1 `andCanEv` c2) NoUnifiers _ <> u = u OneOrMoreUnifiers (unifier :| unifiers) <> u = OneOrMoreUnifiers (unifier :| (unifiers <> getCoherentUnifiers u)) getCoherentUnifiers :: PotentialUnifiers -> [ClsInst] getCoherentUnifiers NoUnifiers{} = [] getCoherentUnifiers (OneOrMoreUnifiers cls) = NE.toList cls nullUnifiers :: PotentialUnifiers -> Bool nullUnifiers NoUnifiers{} = True nullUnifiers _ = False lookupInstEnv' :: InstEnv -- InstEnv to look in -> VisibleOrphanModules -- But filter against this -> Class -> [Type] -- What we are looking for -> ([InstMatch], -- Successful matches PotentialUnifiers) -- These don't match but do unify -- (no incoherent ones in here) -- The second component of the result pair happens when we look up -- Foo [a] -- in an InstEnv that has entries for -- Foo [Int] -- Foo [b] -- Then which we choose would depend on the way in which 'a' -- is instantiated. So we report that Foo [b] is a match (mapping b->a) -- but Foo [Int] is a unifier. This gives the caller a better chance of -- giving a suitable error message lookupInstEnv' (InstEnv rm) vis_mods cls tys = (foldr check_match [] rough_matches, check_unifiers rough_unifiers) where (rough_matches, rough_unifiers) = lookupRM' rough_tcs rm rough_tcs = RML_KnownTc (className cls) : roughMatchTcsLookup tys -------------- check_match :: ClsInst -> [InstMatch] -> [InstMatch] check_match item@(ClsInst { is_tvs = tpl_tvs, is_tys = tpl_tys }) acc | not (instIsVisible vis_mods item) = acc -- See Note [Instance lookup and orphan instances] | Just subst <- tcMatchTys tpl_tys tys = ((item, map (lookupTyVar subst) tpl_tvs) : acc) | otherwise = acc check_unifiers :: [ClsInst] -> PotentialUnifiers check_unifiers [] = NoUnifiers EvCanonical check_unifiers (item@ClsInst { is_tvs = tpl_tvs, is_tys = tpl_tys }:items) | not (instIsVisible vis_mods item) = check_unifiers items -- See Note [Instance lookup and orphan instances] -- If it matches, check_match has gotten it, so skip over it here | Just {} <- tcMatchTys tpl_tys tys = check_unifiers items -- Does not match, so next check whether the things unify -- See Note [Overlapping instances] | otherwise = assertPpr (tys_tv_set `disjointVarSet` tpl_tv_set) ((ppr cls <+> ppr tys) $$ (ppr tpl_tvs <+> ppr tpl_tys)) $ -- Unification will break badly if the variables overlap -- They shouldn't because we allocate separate uniques for them -- See Note [Template tyvars are fresh] case tcUnifyTysFG instanceBindFun tpl_tys tys of -- We consider MaybeApart to be a case where the instance might -- apply in the future. This covers an instance like C Int and -- a target like [W] C (F a), where F is a type family. SurelyApart -> check_unifiers items -- See Note [Infinitary substitution in lookup] MaybeApart MARInfinite _ -> check_unifiers items _ -> add_unifier item (check_unifiers items) where tpl_tv_set = mkVarSet tpl_tvs tys_tv_set = tyCoVarsOfTypes tys add_unifier :: ClsInst -> PotentialUnifiers -> PotentialUnifiers -- Record that we encountered non-canonical instances: -- Note [Coherence and specialisation: overview] add_unifier item other_unifiers | not (isIncoherent item) = OneOrMoreUnifiers (item :| getCoherentUnifiers other_unifiers) -- So `item` is incoherent; see Note [Incoherent instances] | otherwise = case other_unifiers of OneOrMoreUnifiers{} -> other_unifiers NoUnifiers{} | isNonCanonical item -> NoUnifiers EvNonCanonical | otherwise -> other_unifiers --------------- -- This is the common way to call this function. lookupInstEnv :: Bool -- Check Safe Haskell overlap restrictions -> InstEnvs -- External and home package inst-env -> Class -> [Type] -- What we are looking for -> ClsInstLookupResult -- ^ See Note [Rules for instance lookup] -- ^ See Note [Safe Haskell Overlapping Instances] in "GHC.Tc.Solver" -- ^ See Note [Safe Haskell Overlapping Instances Implementation] in "GHC.Tc.Solver" lookupInstEnv check_overlap_safe (InstEnvs { ie_global = pkg_ie , ie_local = home_ie , ie_visible = vis_mods }) cls tys = (final_matches, final_unifs, unsafe_overlapped) where (home_matches, home_unifs) = lookupInstEnv' home_ie vis_mods cls tys (pkg_matches, pkg_unifs) = lookupInstEnv' pkg_ie vis_mods cls tys all_matches = home_matches <> pkg_matches all_unifs = home_unifs <> pkg_unifs final_matches = pruneOverlappedMatches all_matches -- Even if the unifs is non-empty (an error situation) -- we still prune the matches, so that the error message isn't -- misleading (complaining of multiple matches when some should be -- overlapped away) unsafe_overlapped = case final_matches of [match] -> check_safe match _ -> [] -- If the selected match is incoherent, discard all unifiers -- See (IL4) of Note [Rules for instance lookup] final_unifs = case final_matches of (m:ms) | isIncoherent (fst m) -- Incoherent match, so discard all unifiers, but -- keep track of dropping coherent or non-canonical ones -> assertPpr (null ms) (ppr final_matches) $ case all_unifs of OneOrMoreUnifiers{} -> NoUnifiers EvNonCanonical NoUnifiers{} -> all_unifs _ -> all_unifs -- Note [Safe Haskell isSafeOverlap] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- We restrict code compiled in 'Safe' mode from overriding code -- compiled in any other mode. The rationale is that code compiled -- in 'Safe' mode is code that is untrusted by the ghc user. So -- we shouldn't let that code change the behaviour of code the -- user didn't compile in 'Safe' mode since that's the code they -- trust. So 'Safe' instances can only overlap instances from the -- same module. A same instance origin policy for safe compiled -- instances. check_safe (inst,_) = case check_overlap_safe && unsafeTopInstance inst of -- make sure it only overlaps instances from the same module True -> go [] all_matches -- most specific is from a trusted location. False -> [] where go bad [] = bad go bad (i@(x,_):unchecked) = if inSameMod x || isOverlappable x then go bad unchecked else go (i:bad) unchecked inSameMod b = let na = getName $ getName inst la = isInternalName na nb = getName $ getName b lb = isInternalName nb in (la && lb) || (nameModule na == nameModule nb) -- We consider the most specific instance unsafe when it both: -- (1) Comes from a module compiled as `Safe` -- (2) Is an orphan instance, OR, an instance for a MPTC unsafeTopInstance inst = isSafeOverlap (is_flag inst) && (isOrphan (is_orphan inst) || classArity (is_cls inst) > 1) --------------- {- Note [Instance overlap and guards] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The first step is to find all instances that /match/ the constraint we are trying to solve. Next, using pruneOverlapped Matches, we eliminate from that list of instances any instances that are overlapped. For example: (A) instance C [a] where ... (B) instance {-# OVERLAPPING #-} C [[a] where ... (C) instance C (Maybe a) where Suppose we are trying to solve C [[Bool]]. The lookup will return a list [A,B] of the first two instances, since both match. (The Maybe instance doesn't match, so the lookup won't return (C).) Then pruneOverlappedMatches removes (A), since (B) is more specific. So we end up with just one match, (B). However pruneOverlappedMatches is a bit more subtle than you might think (#20946). Recall how we go about eliminating redundant instances, as described in Note [Rules for instance lookup]. - When instance I1 is more specific than instance I2, - and either I1 is overlapping or I2 is overlappable, then we can discard I2 in favour of I1. Note however that, as part of the instance resolution process, we don't want to immediately discard I2, as it can still be useful. For example, suppose we are trying to solve C [[Int]], and have instances: I1: instance C [[Int]] I2: instance {-# OVERLAPS #-} C [[a]] Both instances match. I2 is both overlappable and overlapping (that's what `OVERLAPS` means). Now I1 is more specific than I2, and I2 is overlappable, so we can discard I2. However, we should still keep I2 around when looking up instances, because it is overlapping and `I1` isn't: this means it can be used to eliminate other instances that I1 can't, such as: I3: instance C [a] I3 is more general than both I1 and I2, but it is not overlappable, and I1 is not overlapping. This means that we must use I2 to discard I3. To do this, in 'insert_overlapping', on top of keeping track of matching instances, we also keep track of /guards/, which are instances like I2 which we will discard in the end (because we have a more specific match that overrides it) but might still be useful for eliminating other instances (like I3 in this example). (A) Definition of guarding instances (guards). To add a matching instance G as a guard, it must satisfy the following conditions: A1. G is overlapped by a more specific match, M, A2. M is not overlapping, A3. G is overlapping. This means that we eliminate G from the set of matches (it is overridden by M), but we keep it around until we are done with instance resolution because it might still be useful to eliminate other matches. (B) Guards eliminate matches. There are two situations in which guards can eliminate a match: B1. We want to add a new instance, but it is overridden by a guard. We can immediately discard the instance. Example for B1: Suppose we want to solve C [[Int]], with instances: J1: instance C [[Int]] J2: instance {-# OVERLAPS #-} C [[a]] J3: instance C [a] Processing them in order: we add J1 as a match, then J2 as a guard. Now, when we come across J3, we can immediately discard it because it is overridden by the guard J2. B2. We have found a new guard. We must use it to discard matches we have already found. This is necessary because we must obtain the same result whether we process the instance or the guard first. Example for B2: Suppose we want to solve C [[Int]], with instances: K1: instance C [[Int]] K2: instance C [a] K3: instance {-# OVERLAPS #-} C [[a]] We start by considering K1 and K2. Neither has any overlapping flag set, so we end up with two matches, {K1, K2}. Next we look at K3: it is overridden by K1, but as K1 is not overlapping this means K3 should function as a guard. We must then ensure we eliminate K2 from the list of matches, as K3 guards against it. (C) Adding guards. When we already have collected some guards, and have come across a new guard, we can simply add it to the existing list of guards. We don't need to keep the set of guards minimal, as they will simply be thrown away at the end: we are only interested in the matches. Not having a minimal set of guards does not harm us, but it makes the code simpler. -} -- | Collect class instance matches, including matches that we know -- are overridden but might still be useful to override other instances -- (which we call "guards"). -- -- See Note [Instance overlap and guards]. data InstMatches = InstMatches { -- | Minimal matches: we have knocked out all strictly more general -- matches that are overlapped by a match in this list. instMatches :: [InstMatch] -- | Guards: matches that we know we won't pick in the end, -- but might still be useful for ruling out other instances, -- as per #20946. See Note [Instance overlap and guards], (A). , instGuards :: [ClsInst] } instance Outputable InstMatches where ppr (InstMatches { instMatches = matches, instGuards = guards }) = text "InstMatches" <+> braces (vcat [ text "instMatches:" <+> ppr matches , text "instGuards:" <+> ppr guards ]) noMatches :: InstMatches noMatches = InstMatches { instMatches = [], instGuards = [] } pruneOverlappedMatches :: [InstMatch] -> [InstMatch] -- ^ Remove from the argument list any InstMatches for which another -- element of the list is more specific, and overlaps it, using the -- rules of Note [Rules for instance lookup], esp (IL3) -- -- Incoherent instances are discarded, unless all are incoherent, -- in which case exactly one is kept. pruneOverlappedMatches all_matches = instMatches $ foldr insert_overlapping noMatches all_matches -- | Computes whether the first class instance overrides the second, -- i.e. the first is more specific and can overlap the second. -- -- More precisely, @instA `overrides` instB@ returns 'True' precisely when: -- -- - @instA@ is more specific than @instB@, -- - @instB@ is not more specific than @instA@, -- - @instA@ is overlapping OR @instB@ is overlappable. overrides :: ClsInst -> ClsInst -> Bool new_inst `overrides` old_inst = (new_inst `more_specific_than` old_inst) && (not $ old_inst `more_specific_than` new_inst) && (isOverlapping new_inst || isOverlappable old_inst) -- Overlap permitted if either the more specific instance -- is marked as overlapping, or the more general one is -- marked as overlappable. -- Latest change described in: #9242. -- Previous change: #3877, Dec 10. where -- `instB` can be instantiated to match `instA` -- or the two are equal instA `more_specific_than` instB = isJust (tcMatchTys (is_tys instB) (is_tys instA)) insert_overlapping :: InstMatch -> InstMatches -> InstMatches -- ^ Add a new solution, knocking out strictly less specific ones -- See Note [Rules for instance lookup] and Note [Instance overlap and guards]. -- -- /Property/: the order of insertion doesn't matter, i.e. -- @insert_overlapping inst1 (insert_overlapping inst2 matches)@ -- gives the same result as @insert_overlapping inst2 (insert_overlapping inst1 matches)@. insert_overlapping new_item@(new_inst,_) old@(InstMatches { instMatches = old_items, instGuards = guards }) -- If any of the "guarding" instances override this item, discard it. -- See Note [Instance overlap and guards], (B1). | any (`overrides` new_inst) guards = old | otherwise = insert_overlapping_new_item old_items where insert_overlapping_new_item :: [InstMatch] -> InstMatches insert_overlapping_new_item [] = InstMatches { instMatches = [new_item], instGuards = guards } insert_overlapping_new_item all_old_items@(old_item@(old_inst,_) : old_items) -- New strictly overrides old: throw out the old from the list of matches, -- but potentially keep it around as a guard if it can still be used -- to eliminate other instances. | new_inst `overrides` old_inst , InstMatches { instMatches = final_matches , instGuards = prev_guards } <- insert_overlapping_new_item old_items = if isOverlapping new_inst || not (isOverlapping old_inst) -- We're adding "new_inst" as a match. -- If "new_inst" is not overlapping but "old_inst" is, we should -- keep "old_inst" around as a guard. -- See Note [Instance overlap and guards], (A). then InstMatches { instMatches = final_matches , instGuards = prev_guards } else InstMatches { instMatches = final_matches , instGuards = old_inst : prev_guards } -- ^^^^^^^^^^^^^^^^^^^^^^ -- See Note [Instance overlap and guards], (C). -- Old strictly overrides new: throw it out from the list of matches, -- but potentially keep it around as a guard if it can still be used -- to eliminate other instances. | old_inst `overrides` new_inst = if isOverlapping old_inst || not (isOverlapping new_inst) -- We're discarding "new_inst", as it is overridden by "old_inst". -- However, it might still be useful as a guard if "old_inst" is not overlapping -- but "new_inst" is. -- See Note [Instance overlap and guards], (A). then InstMatches { instMatches = all_old_items , instGuards = guards } else InstMatches -- We're adding "new_inst" as a guard, so we must prune out -- any matches it overrides. -- See Note [Instance overlap and guards], (B2) { instMatches = filter (\(old_inst,_) -> not (new_inst `overrides` old_inst)) all_old_items -- See Note [Instance overlap and guards], (C) , instGuards = new_inst : guards } -- Discard incoherent instances; see Note [Incoherent instances] | isIncoherent old_inst -- Old is incoherent; discard it = insert_overlapping_new_item old_items | isIncoherent new_inst -- New is incoherent; discard it = InstMatches { instMatches = all_old_items , instGuards = guards } -- Equal or incomparable, and neither is incoherent; keep both | otherwise , InstMatches { instMatches = final_matches , instGuards = final_guards } <- insert_overlapping_new_item old_items = InstMatches { instMatches = old_item : final_matches , instGuards = final_guards } {- Note [Incoherent instances] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ For some classes, the choice of a particular instance does not matter, any one is good. E.g. consider class D a b where { opD :: a -> b -> String } instance D Int b where ... instance D a Int where ... g (x::Int) = opD x x -- Wanted: D Int Int For such classes this should work (without having to add an "instance D Int Int", and using -XOverlappingInstances, which would then work). This is what -XIncoherentInstances is for: Telling GHC "I don't care which instance you use; if you can use one, use it." Should this logic only work when *all* candidates have the incoherent flag, or even when all but one have it? The right choice is the latter, which can be justified by comparing the behaviour with how -XIncoherentInstances worked when it was only about the unify-check (Note [Overlapping instances]): Example: class C a b c where foo :: (a,b,c) instance C [a] b Int instance {-# INCOHERENT #-} C [Int] b c instance {-# INCOHERENT #-} C a Int c Thanks to the incoherent flags, [Wanted] C [a] b Int works: Only instance one matches, the others just unify, but are marked incoherent. So I can write (foo :: ([a],b,Int)) :: ([Int], Int, Int). but if that works then I really want to be able to write foo :: ([Int], Int, Int) as well. Now all three instances from above match. None is more specific than another, so none is ruled out by the normal overlapping rules. One of them is not incoherent, but we still want this to compile. Hence the "all-but-one-logic". The implementation is in insert_overlapping, where we remove matching incoherent instances as long as there are others. If the choice of instance *does* matter, all bets are still not off: users can consult the detailed specification of the instance selection algorithm in the GHC Users' Manual. However, this means we can end up with different instances at the same types at different parts of the program, and this difference has to be preserved. Note [Coherence and specialisation: overview] details how we achieve that. ************************************************************************ * * Binding decisions * * ************************************************************************ -} instanceBindFun :: BindFun instanceBindFun tv _rhs_ty | isOverlappableTyVar tv = Apart | otherwise = BindMe -- Note [Binding when looking up instances] {- Note [Binding when looking up instances] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When looking up in the instance environment, or family-instance environment, we are careful about multiple matches, as described above in Note [Overlapping instances] The target tys can contain skolem constants. For existentials and instance variables, we can guarantee that those are never going to be instantiated to anything, so we should not involve them in the unification test. These are called "super skolems". Example: class Foo a where { op :: a -> Int } instance Foo a => Foo [a] -- NB overlap instance Foo [Int] -- NB overlap data T = forall a. Foo a => MkT a f :: T -> Int f (MkT x) = op [x,x] The op [x,x] means we need (Foo [a]). This `a` will never be instantiated, and so it is a super skolem. (See the use of tcInstSuperSkolTyVarsX in GHC.Tc.Gen.Pat.tcDataConPat.) Super skolems respond True to isOverlappableTyVar, and the use of Apart in instanceBindFun, above, means that these will be treated as fresh constants in the unification algorithm during instance lookup. Without this treatment, GHC would complain, saying that the choice of instance depended on the instantiation of 'a'; but of course it isn't *going* to be instantiated. Note that it is necessary that the unification algorithm returns SurelyApart for these super-skolems for GHC to be able to commit to another instance. We do this only for super skolems. For example we reject g :: forall a => [a] -> Int g x = op x on the grounds that the correct instance depends on the instantiation of 'a' -} ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/Lint.hs0000644000000000000000000046617507346545000020063 0ustar0000000000000000{-# LANGUAGE MultiWayIf #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE PatternSynonyms #-} {-# LANGUAGE UnboxedTuples #-} {- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1993-1998 A ``lint'' pass to check for Core correctness. See Note [Core Lint guarantee]. -} module GHC.Core.Lint ( LintPassResultConfig (..), LintFlags (..), StaticPtrCheck (..), LintConfig (..), WarnsAndErrs, lintCoreBindings', lintUnfolding, lintPassResult, lintExpr, lintAnnots, lintAxioms, -- ** Debug output EndPassConfig (..), endPassIO, displayLintResults, dumpPassResult ) where import GHC.Prelude import GHC.Driver.DynFlags import GHC.Tc.Utils.TcType ( ConcreteTvOrigin(..), ConcreteTyVars , isFloatingPrimTy, isTyFamFree ) import GHC.Tc.Types.Origin ( FixedRuntimeRepOrigin(..) ) import GHC.Unit.Module.ModGuts import GHC.Platform import GHC.Core import GHC.Core.FVs import GHC.Core.Utils import GHC.Core.Stats ( coreBindsStats ) import GHC.Core.DataCon import GHC.Core.Ppr import GHC.Core.Coercion import GHC.Core.Type as Type import GHC.Core.Predicate( isCoVarType ) import GHC.Core.Multiplicity import GHC.Core.UsageEnv import GHC.Core.TyCo.Rep -- checks validity of types/coercions import GHC.Core.TyCo.Compare ( eqType, eqTypes, eqTypeIgnoringMultiplicity, eqForAllVis ) import GHC.Core.TyCo.Subst import GHC.Core.TyCo.FVs import GHC.Core.TyCo.Ppr import GHC.Core.TyCon as TyCon import GHC.Core.Coercion.Axiom import GHC.Core.FamInstEnv( compatibleBranches ) import GHC.Core.Unify import GHC.Core.Opt.Arity ( typeArity, exprIsDeadEnd ) import GHC.Core.Opt.Monad import GHC.Types.Literal import GHC.Types.Var as Var import GHC.Types.Var.Env import GHC.Types.Var.Set import GHC.Types.Name import GHC.Types.Name.Env import GHC.Types.Id import GHC.Types.Id.Info import GHC.Types.SrcLoc import GHC.Types.Tickish import GHC.Types.Unique.FM ( isNullUFM, sizeUFM ) import GHC.Types.RepType import GHC.Types.Basic import GHC.Types.Demand ( splitDmdSig, isDeadEndDiv ) import GHC.Builtin.Names import GHC.Builtin.Types.Prim import GHC.Builtin.Types ( multiplicityTy ) import GHC.Data.Bag import GHC.Data.List.SetOps import GHC.Utils.Monad import GHC.Utils.Outputable as Outputable import GHC.Utils.Panic import GHC.Utils.Constants (debugIsOn) import GHC.Utils.Misc import GHC.Utils.Error import qualified GHC.Utils.Error as Err import GHC.Utils.Logger import Control.Monad import Data.Foldable ( for_, toList ) import Data.List.NonEmpty ( NonEmpty(..), groupWith ) import Data.List ( partition ) import Data.Maybe import Data.IntMap.Strict ( IntMap ) import qualified Data.IntMap.Strict as IntMap ( lookup, keys, empty, fromList ) import GHC.Data.Pair import GHC.Base (oneShot) import GHC.Data.Unboxed {- Note [Core Lint guarantee] ~~~~~~~~~~~~~~~~~~~~~~~~~~ Core Lint is the type-checker for Core. Using it, we get the following guarantee: If all of: 1. Core Lint passes, 2. there are no unsafe coercions (i.e. unsafeEqualityProof), 3. all plugin-supplied coercions (i.e. PluginProv) are valid, and 4. all case-matches are complete then running the compiled program will not seg-fault, assuming no bugs downstream (e.g. in the code generator). This guarantee is quite powerful, in that it allows us to decouple the safety of the resulting program from the type inference algorithm. However, do note point (4) above. Core Lint does not check for incomplete case-matches; see Note [Case expression invariants] in GHC.Core, invariant (4). As explained there, an incomplete case-match might slip by Core Lint and cause trouble at runtime. Note [GHC Formalism] ~~~~~~~~~~~~~~~~~~~~ This file implements the type-checking algorithm for System FC, the "official" name of the Core language. Type safety of FC is heart of the claim that executables produced by GHC do not have segmentation faults. Thus, it is useful to be able to reason about System FC independently of reading the code. To this purpose, there is a document core-spec.pdf built in docs/core-spec that contains a formalism of the types and functions dealt with here. If you change just about anything in this file or you change other types/functions throughout the Core language (all signposted to this note), you should update that formalism. See docs/core-spec/README for more info about how to do so. Note [check vs lint] ~~~~~~~~~~~~~~~~~~~~ This file implements both a type checking algorithm and also general sanity checking. For example, the "sanity checking" checks for TyConApp on the left of an AppTy, which should never happen. These sanity checks don't really affect any notion of type soundness. Yet, it is convenient to do the sanity checks at the same time as the type checks. So, we use the following naming convention: - Functions that begin with 'lint'... are involved in type checking. These functions might also do some sanity checking. - Functions that begin with 'check'... are *not* involved in type checking. They exist only for sanity checking. Issues surrounding variable naming, shadowing, and such are considered *not* to be part of type checking, as the formalism omits these details. Summary of checks ~~~~~~~~~~~~~~~~~ Checks that a set of core bindings is well-formed. The PprStyle and String just control what we print in the event of an error. The Bool value indicates whether we have done any specialisation yet (in which case we do some extra checks). We check for (a) type errors (b) Out-of-scope type variables (c) Out-of-scope local variables (d) Ill-kinded types (e) Incorrect unsafe coercions If we have done specialisation the we check that there are (a) No top-level bindings of primitive (unboxed type) Note [Linting function types] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ All saturated applications of funTyCon are represented with the FunTy constructor. See Note [Function type constructors and FunTy] in GHC.Builtin.Types.Prim We check this invariant in lintType. Note [Linting type lets] ~~~~~~~~~~~~~~~~~~~~~~~~ In the desugarer, it's very very convenient to be able to say (in effect) let a = Type Bool in let x::a = True in That is, use a type let. See Note [Core type and coercion invariant] in "GHC.Core". One place it is used is in mkWwBodies; see Note [Join points and beta-redexes] in GHC.Core.Opt.WorkWrap.Utils. (Maybe there are other "clients" of this feature; I'm not sure). * Hence when linting we need to remember that a=Int, else we might reject a correct program. So we carry a type substitution (in this example [a -> Bool]) and apply this substitution before comparing types. In effect, in Lint, type equality is always equality-modulo-le-subst. This is in the le_subst field of LintEnv. But nota bene: (SI1) The le_subst substitution is applied to types and coercions only (SI2) The result of that substitution is used only to check for type equality, to check well-typed-ness, /but is then discarded/. The result of substitution does not outlive the CoreLint pass. (SI3) The InScopeSet of le_subst includes only TyVar and CoVar binders. * The function lintInTy :: Type -> LintM (Type, Kind) returns a substituted type. * When we encounter a binder (like x::a) we must apply the substitution to the type of the binding variable. lintBinders does this. * Clearly we need to clone tyvar binders as we go. * But take care (#17590)! We must also clone CoVar binders: let a = TYPE (ty |> cv) in \cv -> blah blindly substituting for `a` might capture `cv`. * Alas, when cloning a coercion variable we might choose a unique that happens to clash with an inner Id, thus \cv_66 -> let wild_X7 = blah in blah We decide to clone `cv_66` because it's already in scope. Fine, choose a new unique. Aha, X7 looks good. So we check the lambda body with le_subst of [cv_66 :-> cv_X7] This is all fine, even though we use the same unique as wild_X7. As (SI2) says, we do /not/ return a new lambda (\cv_X7 -> let wild_X7 = blah in ...) We simply use the le_subst substitution in types/coercions only, when checking for equality. * We still need to check that Id occurrences are bound by some enclosing binding. We do /not/ use the InScopeSet for the le_subst for this purpose -- it contains only TyCoVars. Instead we have a separate le_ids for the in-scope Id binders. Sigh. We might want to explore getting rid of type-let! Note [Bad unsafe coercion] ~~~~~~~~~~~~~~~~~~~~~~~~~~ For discussion see https://gitlab.haskell.org/ghc/ghc/wikis/bad-unsafe-coercions Linter introduces additional rules that checks improper coercion between different types, called bad coercions. Following coercions are forbidden: (a) coercions between boxed and unboxed values; (b) coercions between unlifted values of the different sizes, here active size is checked, i.e. size of the actual value but not the space allocated for value; (c) coercions between floating and integral boxed values, this check is not yet supported for unboxed tuples, as no semantics were specified for that; (d) coercions from / to vector type (e) If types are unboxed tuples then tuple (# A_1,..,A_n #) can be coerced to (# B_1,..,B_m #) if n=m and for each pair A_i, B_i rules (a-e) holds. Note [Join points] ~~~~~~~~~~~~~~~~~~ We check the rules listed in Note [Invariants on join points] in GHC.Core. The only one that causes any difficulty is the first: All occurrences must be tail calls. To this end, along with the in-scope set, we remember in le_joins the subset of in-scope Ids that are valid join ids. For example: join j x = ... in case e of A -> jump j y -- good B -> case (jump j z) of -- BAD C -> join h = jump j w in ... -- good D -> let x = jump j v in ... -- BAD A join point remains valid in case branches, so when checking the A branch, j is still valid. When we check the scrutinee of the inner case, however, we set le_joins to empty, and catch the error. Similarly, join points can occur free in RHSes of other join points but not the RHSes of value bindings (thunks and functions). Note [Avoiding compiler perf traps when constructing error messages.] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ It's quite common to put error messages into a where clause when it might be triggered by multiple branches. E.g. checkThing x y z = case x of X -> unless (correctX x) $ failWithL errMsg Y -> unless (correctY y) $ failWithL errMsg where errMsg = text "My error involving:" $$ ppr x <+> ppr y However ghc will compile this to: checkThink x y z = let errMsg = text "My error involving:" $$ ppr x <+> ppr y in case x of X -> unless (correctX x) $ failWithL errMsg Y -> unless (correctY y) $ failWithL errMsg Putting the allocation of errMsg into the common non-error path. One way to work around this is to turn errMsg into a function: checkThink x y z = case x of X -> unless (correctX x) $ failWithL (errMsg x y) Y -> unless (correctY y) $ failWithL (errMsg x y) where errMsg x y = text "My error involving:" $$ ppr x <+> ppr y This way `errMsg` is a static function and it being defined in the common path does not result in allocation in the hot path. This can be surprisingly impactful. Changing `lint_app` reduced allocations for one test program I was looking at by ~4%. Note [MCInfo for Lint] ~~~~~~~~~~~~~~~~~~~~~~ When printing a Lint message, use the MCInfo severity so that the message is printed on stderr rather than stdout (#13342). ************************************************************************ * * Beginning and ending passes * * ************************************************************************ -} -- | Configuration for boilerplate operations at the end of a -- compilation pass producing Core. data EndPassConfig = EndPassConfig { ep_dumpCoreSizes :: !Bool -- ^ Whether core bindings should be dumped with the size of what they -- are binding (i.e. the size of the RHS of the binding). , ep_lintPassResult :: !(Maybe LintPassResultConfig) -- ^ Whether we should lint the result of this pass. , ep_namePprCtx :: !NamePprCtx , ep_dumpFlag :: !(Maybe DumpFlag) , ep_prettyPass :: !SDoc , ep_passDetails :: !SDoc } endPassIO :: Logger -> EndPassConfig -> CoreProgram -> [CoreRule] -> IO () -- Used by the IO-is CorePrep too endPassIO logger cfg binds rules = do { dumpPassResult logger (ep_dumpCoreSizes cfg) (ep_namePprCtx cfg) mb_flag (renderWithContext defaultSDocContext (ep_prettyPass cfg)) (ep_passDetails cfg) binds rules ; for_ (ep_lintPassResult cfg) $ \lp_cfg -> lintPassResult logger lp_cfg binds } where mb_flag = case ep_dumpFlag cfg of Just flag | logHasDumpFlag logger flag -> Just flag | logHasDumpFlag logger Opt_D_verbose_core2core -> Just flag _ -> Nothing dumpPassResult :: Logger -> Bool -- dump core sizes? -> NamePprCtx -> Maybe DumpFlag -- Just df => show details in a file whose -- name is specified by df -> String -- Header -> SDoc -- Extra info to appear after header -> CoreProgram -> [CoreRule] -> IO () dumpPassResult logger dump_core_sizes name_ppr_ctx mb_flag hdr extra_info binds rules = do { forM_ mb_flag $ \flag -> do logDumpFile logger (mkDumpStyle name_ppr_ctx) flag hdr FormatCore dump_doc -- Report result size -- This has the side effect of forcing the intermediate to be evaluated -- if it's not already forced by a -ddump flag. ; Err.debugTraceMsg logger 2 size_doc } where size_doc = sep [text "Result size of" <+> text hdr, nest 2 (equals <+> ppr (coreBindsStats binds))] dump_doc = vcat [ nest 2 extra_info , size_doc , blankLine , if dump_core_sizes then pprCoreBindingsWithSize binds else pprCoreBindings binds , ppUnless (null rules) pp_rules ] pp_rules = vcat [ blankLine , text "------ Local rules for imported ids --------" , pprRules rules ] {- ************************************************************************ * * Top-level interfaces * * ************************************************************************ -} data LintPassResultConfig = LintPassResultConfig { lpr_diagOpts :: !DiagOpts , lpr_platform :: !Platform , lpr_makeLintFlags :: !LintFlags , lpr_showLintWarnings :: !Bool , lpr_passPpr :: !SDoc , lpr_localsInScope :: ![Var] } lintPassResult :: Logger -> LintPassResultConfig -> CoreProgram -> IO () lintPassResult logger cfg binds = do { let warns_and_errs = lintCoreBindings' (LintConfig { l_diagOpts = lpr_diagOpts cfg , l_platform = lpr_platform cfg , l_flags = lpr_makeLintFlags cfg , l_vars = lpr_localsInScope cfg }) binds ; Err.showPass logger $ "Core Linted result of " ++ renderWithContext defaultSDocContext (lpr_passPpr cfg) ; displayLintResults logger (lpr_showLintWarnings cfg) (lpr_passPpr cfg) (pprCoreBindings binds) warns_and_errs } displayLintResults :: Logger -> Bool -- ^ If 'True', display linter warnings. -- If 'False', ignore linter warnings. -> SDoc -- ^ The source of the linted program -> SDoc -- ^ The linted program, pretty-printed -> WarnsAndErrs -> IO () displayLintResults logger display_warnings pp_what pp_pgm (warns, errs) | not (isEmptyBag errs) = do { logMsg logger Err.MCInfo noSrcSpan -- See Note [MCInfo for Lint] $ withPprStyle defaultDumpStyle (vcat [ lint_banner "errors" pp_what, Err.pprMessageBag errs , text "*** Offending Program ***" , pp_pgm , text "*** End of Offense ***" ]) ; Err.ghcExit logger 1 } | not (isEmptyBag warns) , log_enable_debug (logFlags logger) , display_warnings = logMsg logger Err.MCInfo noSrcSpan -- See Note [MCInfo for Lint] $ withPprStyle defaultDumpStyle (lint_banner "warnings" pp_what $$ Err.pprMessageBag (mapBag ($$ blankLine) warns)) | otherwise = return () lint_banner :: String -> SDoc -> SDoc lint_banner string pass = text "*** Core Lint" <+> text string <+> text ": in result of" <+> pass <+> text "***" -- | Type-check a 'CoreProgram'. See Note [Core Lint guarantee]. lintCoreBindings' :: LintConfig -> CoreProgram -> WarnsAndErrs -- Returns (warnings, errors) -- If you edit this function, you may need to update the GHC formalism -- See Note [GHC Formalism] lintCoreBindings' cfg binds = initL cfg $ addLoc TopLevelBindings $ do { checkL (null dups) (dupVars dups) ; checkL (null ext_dups) (dupExtVars ext_dups) ; lintRecBindings TopLevel all_pairs $ \_ -> return () } where all_pairs = flattenBinds binds -- Put all the top-level binders in scope at the start -- This is because rewrite rules can bring something -- into use 'unexpectedly'; see Note [Glomming] in "GHC.Core.Opt.OccurAnal" binders = map fst all_pairs (_, dups) = removeDups compare binders -- ext_dups checks for names with different uniques -- but the same External name M.n. We don't -- allow this at top level: -- M.n{r3} = ... -- M.n{r29} = ... -- because they both get the same linker symbol ext_dups = snd $ removeDupsOn ord_ext $ filter isExternalName $ map Var.varName binders ord_ext n = (nameModule n, nameOccName n) {- ************************************************************************ * * \subsection[lintUnfolding]{lintUnfolding} * * ************************************************************************ Note [Linting Unfoldings from Interfaces] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We use this to check all top-level unfoldings that come in from interfaces (it is very painful to catch errors otherwise). We do not need to call lintUnfolding on unfoldings that are nested within top-level unfoldings; they are linted when we lint the top-level unfolding; hence the `TopLevelFlag` on `tcPragExpr` in GHC.IfaceToCore. -} lintUnfolding :: Bool -- ^ True <=> is a compulsory unfolding -> LintConfig -> SrcLoc -> CoreExpr -> Maybe (Bag SDoc) -- Nothing => OK lintUnfolding is_compulsory cfg locn expr | isEmptyBag errs = Nothing | otherwise = Just errs where (_warns, errs) = initL cfg $ if is_compulsory -- See Note [Checking for representation polymorphism] then noFixedRuntimeRepChecks linter else linter linter = addLoc (ImportedUnfolding locn) $ lintCoreExpr expr lintExpr :: LintConfig -> CoreExpr -> Maybe (Bag SDoc) -- Nothing => OK lintExpr cfg expr | isEmptyBag errs = Nothing | otherwise = Just errs where (_warns, errs) = initL cfg linter linter = addLoc TopLevelBindings $ lintCoreExpr expr {- ************************************************************************ * * \subsection[lintCoreBinding]{lintCoreBinding} * * ************************************************************************ Check a core binding, returning the list of variables bound. -} -- Returns a UsageEnv because this function is called in lintCoreExpr for -- Let lintRecBindings :: TopLevelFlag -> [(Id, CoreExpr)] -> ([LintedId] -> LintM a) -> LintM (a, [UsageEnv]) lintRecBindings top_lvl pairs thing_inside = lintIdBndrs top_lvl bndrs $ \ bndrs' -> do { ues <- zipWithM lint_pair bndrs' rhss ; a <- thing_inside bndrs' ; return (a, ues) } where (bndrs, rhss) = unzip pairs lint_pair bndr' rhs = addLoc (RhsOf bndr') $ do { (rhs_ty, ue) <- lintRhs bndr' rhs -- Check the rhs ; lintLetBind top_lvl Recursive bndr' rhs rhs_ty ; return ue } lintLetBody :: LintLocInfo -> [LintedId] -> CoreExpr -> LintM (LintedType, UsageEnv) lintLetBody loc bndrs body = do { (body_ty, body_ue) <- addLoc loc (lintCoreExpr body) ; mapM_ (lintJoinBndrType body_ty) bndrs ; return (body_ty, body_ue) } lintLetBind :: TopLevelFlag -> RecFlag -> LintedId -> CoreExpr -> LintedType -> LintM () -- Binder's type, and the RHS, have already been linted -- This function checks other invariants lintLetBind top_lvl rec_flag binder rhs rhs_ty = do { let binder_ty = idType binder ; ensureEqTys binder_ty rhs_ty (mkRhsMsg binder (text "RHS") rhs_ty) -- If the binding is for a CoVar, the RHS should be (Coercion co) -- See Note [Core type and coercion invariant] in GHC.Core ; checkL (not (isCoVar binder) || isCoArg rhs) (mkLetErr binder rhs) -- Check the let-can-float invariant -- See Note [Core let-can-float invariant] in GHC.Core ; checkL ( isJoinId binder || mightBeLiftedType binder_ty || (isNonRec rec_flag && exprOkForSpeculation rhs) || isDataConWorkId binder || isDataConWrapId binder -- until #17521 is fixed || exprIsTickedString rhs) (badBndrTyMsg binder (text "unlifted")) -- Check that if the binder is at the top level and has type Addr#, -- that it is a string literal. -- See Note [Core top-level string literals]. ; checkL (not (isTopLevel top_lvl && binder_ty `eqType` addrPrimTy) || exprIsTickedString rhs) (mkTopNonLitStrMsg binder) ; flags <- getLintFlags -- Check that a join-point binder has a valid type -- NB: lintIdBinder has checked that it is not top-level bound ; case idJoinPointHood binder of NotJoinPoint -> return () JoinPoint arity -> checkL (isValidJoinPointType arity binder_ty) (mkInvalidJoinPointMsg binder binder_ty) ; when (lf_check_inline_loop_breakers flags && isStableUnfolding (realIdUnfolding binder) && isStrongLoopBreaker (idOccInfo binder) && isInlinePragma (idInlinePragma binder)) (addWarnL (text "INLINE binder is (non-rule) loop breaker:" <+> ppr binder)) -- Only non-rule loop breakers inhibit inlining -- We used to check that the dmdTypeDepth of a demand signature never -- exceeds idArity, but that is an unnecessary complication, see -- Note [idArity varies independently of dmdTypeDepth] in GHC.Core.Opt.DmdAnal -- Check that the binder's arity is within the bounds imposed by the type -- and the strictness signature. See Note [Arity invariants for bindings] -- and Note [Trimming arity] ; checkL (typeArity (idType binder) >= idArity binder) (text "idArity" <+> ppr (idArity binder) <+> text "exceeds typeArity" <+> ppr (typeArity (idType binder)) <> colon <+> ppr binder) -- See Note [idArity varies independently of dmdTypeDepth] -- in GHC.Core.Opt.DmdAnal ; case splitDmdSig (idDmdSig binder) of (demands, result_info) | isDeadEndDiv result_info -> if (demands `lengthAtLeast` idArity binder) then return () else pprTrace "Hack alert: lintLetBind #24623" (ppr (idArity binder) $$ ppr (idDmdSig binder)) $ return () -- checkL (demands `lengthAtLeast` idArity binder) -- (text "idArity" <+> ppr (idArity binder) <+> -- text "exceeds arity imposed by the strictness signature" <+> -- ppr (idDmdSig binder) <> colon <+> -- ppr binder) _ -> return () ; addLoc (RuleOf binder) $ mapM_ (lintCoreRule binder binder_ty) (idCoreRules binder) ; addLoc (UnfoldingOf binder) $ lintIdUnfolding binder binder_ty (idUnfolding binder) ; return () } -- We should check the unfolding, if any, but this is tricky because -- the unfolding is a SimplifiableCoreExpr. Give up for now. -- | Checks the RHS of bindings. It only differs from 'lintCoreExpr' -- in that it doesn't reject occurrences of the function 'makeStatic' when they -- appear at the top level and @lf_check_static_ptrs == AllowAtTopLevel@, and -- for join points, it skips the outer lambdas that take arguments to the -- join point. -- -- See Note [Checking StaticPtrs]. lintRhs :: Id -> CoreExpr -> LintM (LintedType, UsageEnv) -- NB: the Id can be Linted or not -- it's only used for -- its OccInfo and join-pointer-hood lintRhs bndr rhs | JoinPoint arity <- idJoinPointHood bndr = lintJoinLams arity (Just bndr) rhs | AlwaysTailCalled arity <- tailCallInfo (idOccInfo bndr) = lintJoinLams arity Nothing rhs -- Allow applications of the data constructor @StaticPtr@ at the top -- but produce errors otherwise. lintRhs _bndr rhs = fmap lf_check_static_ptrs getLintFlags >>= go where -- Allow occurrences of 'makeStatic' at the top-level but produce errors -- otherwise. go :: StaticPtrCheck -> LintM (OutType, UsageEnv) go AllowAtTopLevel | (binders0, rhs') <- collectTyBinders rhs , Just (fun, t, info, e) <- collectMakeStaticArgs rhs' = markAllJoinsBad $ foldr -- imitate @lintCoreExpr (Lam ...)@ lintLambda -- imitate @lintCoreExpr (App ...)@ (do fun_ty_ue <- lintCoreExpr fun lintCoreArgs fun_ty_ue [Type t, info, e] ) binders0 go _ = markAllJoinsBad $ lintCoreExpr rhs -- | Lint the RHS of a join point with expected join arity of @n@ (see Note -- [Join points] in "GHC.Core"). lintJoinLams :: JoinArity -> Maybe Id -> CoreExpr -> LintM (LintedType, UsageEnv) lintJoinLams join_arity enforce rhs = go join_arity rhs where go 0 expr = lintCoreExpr expr go n (Lam var body) = lintLambda var $ go (n-1) body go n expr | Just bndr <- enforce -- Join point with too few RHS lambdas = failWithL $ mkBadJoinArityMsg bndr join_arity n rhs | otherwise -- Future join point, not yet eta-expanded = markAllJoinsBad $ lintCoreExpr expr -- Body of lambda is not a tail position lintIdUnfolding :: Id -> Type -> Unfolding -> LintM () lintIdUnfolding bndr bndr_ty uf | isStableUnfolding uf , Just rhs <- maybeUnfoldingTemplate uf = do { ty <- fst <$> (if isCompulsoryUnfolding uf then noFixedRuntimeRepChecks $ lintRhs bndr rhs -- ^^^^^^^^^^^^^^^^^^^^^^^ -- See Note [Checking for representation polymorphism] else lintRhs bndr rhs) ; ensureEqTys bndr_ty ty (mkRhsMsg bndr (text "unfolding") ty) } lintIdUnfolding _ _ _ = return () -- Do not Lint unstable unfoldings, because that leads -- to exponential behaviour; c.f. GHC.Core.FVs.idUnfoldingVars {- Note [Checking for INLINE loop breakers] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ It's very suspicious if a strong loop breaker is marked INLINE. However, the desugarer generates instance methods with INLINE pragmas that form a mutually recursive group. Only after a round of simplification are they unravelled. So we suppress the test for the desugarer. Here is an example: instance Eq T where t1 == t2 = blah t1 /= t2 = not (t1 == t2) {-# INLINE (/=) #-} This will generate something like -- From the class decl for Eq data Eq a = EqDict (a->a->Bool) (a->a->Bool) eq_sel :: Eq a -> (a->a->Bool) eq_sel (EqDict eq _) = eq -- From the instance Eq T $ceq :: T -> T -> Bool $ceq = blah Rec { $dfEqT :: Eq T {-# DFunId #-} $dfEqT = EqDict $ceq $cnoteq $cnoteq :: T -> T -> Bool {-# INLINE #-} $cnoteq x y = not (eq_sel $dfEqT x y) } Notice that * `$dfEqT` and `$cnotEq` are mutually recursive. * We do not want `$dfEqT` to be the loop breaker: it's a DFunId, and we want to let it "cancel" with "eq_sel" (see Note [ClassOp/DFun selection] in GHC.Tc.TyCl.Instance, which it can't do if it's a loop breaker. So we make `$cnoteq` into the loop breaker. That means it can't inline, despite the INLINE pragma. That's what gives rise to the warning, which is perfectly appropriate for, say Rec { {-# INLINE f #-} f = \x -> ...f.... } We can't inline a recursive function -- it's a loop breaker. But now we can optimise `eq_sel $dfEqT` to `$ceq`, so we get Rec { $dfEqT :: Eq T {-# DFunId #-} $dfEqT = EqDict $ceq $cnoteq $cnoteq :: T -> T -> Bool {-# INLINE #-} $cnoteq x y = not ($ceq x y) } and now the dependencies of the Rec have gone, and we can split it up to give NonRec { $dfEqT :: Eq T {-# DFunId #-} $dfEqT = EqDict $ceq $cnoteq } NonRec { $cnoteq :: T -> T -> Bool {-# INLINE #-} $cnoteq x y = not ($ceq x y) } Now $cnoteq is not a loop breaker any more, so the INLINE pragma can take effect -- the warning turned out to be temporary. To stop excessive warnings, this warning for INLINE loop breakers is switched off when linting the result of the desugarer. See lf_check_inline_loop_breakers in GHC.Core.Lint. Note [Checking for representation polymorphism] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We ordinarily want to check for bad representation polymorphism. See Note [Representation polymorphism invariants] in GHC.Core. However, we do *not* want to do this in a compulsory unfolding. Compulsory unfoldings arise only internally, for things like newtype wrappers, dictionaries, and (notably) unsafeCoerce#. These might legitimately be representation-polymorphic; indeed representation-polymorphic unfoldings are a primary reason for the very existence of compulsory unfoldings (we can't compile code for the original, representation-polymorphic, binding). It is vitally important that we do representation polymorphism checks *after* performing the unfolding, but not beforehand. This is all safe because we will check any unfolding after it has been unfolded; checking the unfolding beforehand is merely an optimization, and one that actively hurts us here. Note [Linting of runRW#] ~~~~~~~~~~~~~~~~~~~~~~~~ runRW# has some very special behavior (see Note [runRW magic] in GHC.CoreToStg.Prep) which CoreLint must accommodate, by allowing join points in its argument. For example, this is fine: join j x = ... in runRW# (\s. case v of A -> j 3 B -> j 4) Usually those calls to the join point 'j' would not be valid tail calls, because they occur in a function argument. But in the case of runRW# they are fine, because runRW# (\s.e) behaves operationally just like e. (runRW# is ultimately inlined in GHC.CoreToStg.Prep.) In the case that the continuation is /not/ a lambda we simply disable this special behaviour. For example, this is /not/ fine: join j = ... in runRW# @r @ty (jump j) Note [Coercions in terms] ~~~~~~~~~~~~~~~~~~~~~~~~~ The expression (Type ty) can occur only as the argument of an application, or the RHS of a non-recursive Let. But what about (Coercion co)? Currently it appears in ghc-prim:GHC.Types.coercible_sel, a WiredInId whose definition is: coercible_sel :: Coercible a b => (a ~R# b) coercible_sel d = case d of MkCoercibleDict (co :: a ~# b) -> Coercion co So this function has a (Coercion co) in the alternative of a case. Richard says (!11908): it shouldn't appear outside of arguments, but we've been loose about this. coercible_sel is some thin ice. Really we should be unpacking Coercible using case, not a selector. I recall looking into this a few years back and coming to the conclusion that the fix was worse than the disease. Don't remember the details, but could probably recover it if we want to revisit. So Lint current accepts (Coercion co) in arbitrary places. There is no harm in that: it really is a value, albeit a zero-bit value. ************************************************************************ * * \subsection[lintCoreExpr]{lintCoreExpr} * * ************************************************************************ -} -- Linted things: substitution applied, and type is linted type LintedType = Type type LintedKind = Kind type LintedCoercion = Coercion type LintedTyCoVar = TyCoVar type LintedId = Id -- | Lint an expression cast through the given coercion, returning the type -- resulting from the cast. lintCastExpr :: CoreExpr -> LintedType -> Coercion -> LintM LintedType lintCastExpr expr expr_ty co = do { co' <- lintCoercion co ; let (Pair from_ty to_ty, role) = coercionKindRole co' ; checkValueType to_ty $ text "target of cast" <+> quotes (ppr co') ; lintRole co' Representational role ; ensureEqTys from_ty expr_ty (mkCastErr expr co' from_ty expr_ty) ; return to_ty } lintCoreExpr :: CoreExpr -> LintM (LintedType, UsageEnv) -- The returned type has the substitution from the monad -- already applied to it: -- lintCoreExpr e subst = exprType (subst e) -- -- The returned "type" can be a kind, if the expression is (Type ty) -- If you edit this function, you may need to update the GHC formalism -- See Note [GHC Formalism] lintCoreExpr (Var var) = do var_pair@(var_ty, _) <- lintIdOcc var 0 -- See Note [Linting representation-polymorphic builtins] checkRepPolyBuiltin (Var var) [] var_ty --checkDataToTagPrimOpTyCon (Var var) [] return var_pair lintCoreExpr (Lit lit) = return (literalType lit, zeroUE) lintCoreExpr (Cast expr co) = do (expr_ty, ue) <- markAllJoinsBad (lintCoreExpr expr) -- markAllJoinsBad: see Note [Join points and casts] to_ty <- lintCastExpr expr expr_ty co return (to_ty, ue) lintCoreExpr (Tick tickish expr) = do case tickish of Breakpoint _ _ ids _ -> forM_ ids $ \id -> do checkDeadIdOcc id lookupIdInScope id _ -> return () markAllJoinsBadIf block_joins $ lintCoreExpr expr where block_joins = not (tickish `tickishScopesLike` SoftScope) -- TODO Consider whether this is the correct rule. It is consistent with -- the simplifier's behaviour - cost-centre-scoped ticks become part of -- the continuation, and thus they behave like part of an evaluation -- context, but soft-scoped and non-scoped ticks simply wrap the result -- (see Simplify.simplTick). lintCoreExpr (Let (NonRec tv (Type ty)) body) | isTyVar tv = -- See Note [Linting type lets] do { ty' <- lintType ty ; lintTyBndr tv $ \ tv' -> do { addLoc (RhsOf tv) $ lintTyKind tv' ty' -- Now extend the substitution so we -- take advantage of it in the body ; extendTvSubstL tv ty' $ addLoc (BodyOfLet tv) $ lintCoreExpr body } } lintCoreExpr (Let (NonRec bndr rhs) body) | isId bndr = do { -- First Lint the RHS, before bringing the binder into scope (rhs_ty, let_ue) <- lintRhs bndr rhs -- See Note [Multiplicity of let binders] in Var -- Now lint the binder ; lintBinder LetBind bndr $ \bndr' -> do { lintLetBind NotTopLevel NonRecursive bndr' rhs rhs_ty ; addAliasUE bndr let_ue (lintLetBody (BodyOfLet bndr') [bndr'] body) } } | otherwise = failWithL (mkLetErr bndr rhs) -- Not quite accurate lintCoreExpr e@(Let (Rec pairs) body) = do { -- Check that the list of pairs is non-empty checkL (not (null pairs)) (emptyRec e) -- Check that there are no duplicated binders ; let (_, dups) = removeDups compare bndrs ; checkL (null dups) (dupVars dups) -- Check that either all the binders are joins, or none ; checkL (all isJoinId bndrs || all (not . isJoinId) bndrs) $ mkInconsistentRecMsg bndrs -- See Note [Multiplicity of let binders] in Var ; ((body_type, body_ue), ues) <- lintRecBindings NotTopLevel pairs $ \ bndrs' -> lintLetBody (BodyOfLetRec bndrs') bndrs' body ; return (body_type, body_ue `addUE` scaleUE ManyTy (foldr1 addUE ues)) } where bndrs = map fst pairs lintCoreExpr e@(App _ _) | Var fun <- fun , fun `hasKey` runRWKey -- N.B. we may have an over-saturated application of the form: -- runRW (\s -> \x -> ...) y , ty_arg1 : ty_arg2 : arg3 : rest <- args = do { fun_pair1 <- lintCoreArg (idType fun, zeroUE) ty_arg1 ; (fun_ty2, ue2) <- lintCoreArg fun_pair1 ty_arg2 -- See Note [Linting of runRW#] ; let lintRunRWCont :: CoreArg -> LintM (LintedType, UsageEnv) lintRunRWCont expr@(Lam _ _) = lintJoinLams 1 (Just fun) expr lintRunRWCont other = markAllJoinsBad $ lintCoreExpr other -- TODO: Look through ticks? ; (arg3_ty, ue3) <- lintRunRWCont arg3 ; app_ty <- lintValApp arg3 fun_ty2 arg3_ty ue2 ue3 ; lintCoreArgs app_ty rest } | otherwise = do { fun_pair <- lintCoreFun fun (length args) ; app_pair@(app_ty, _) <- lintCoreArgs fun_pair args -- See Note [Linting representation-polymorphic builtins] ; checkRepPolyBuiltin fun args app_ty ; --checkDataToTagPrimOpTyCon fun args ; return app_pair} where skipTick t = case collectFunSimple e of (Var v) -> etaExpansionTick v t _ -> tickishFloatable t (fun, args, _source_ticks) = collectArgsTicks skipTick e -- We must look through source ticks to avoid #21152, for example: -- -- reallyUnsafePtrEquality -- = \ @a -> -- (src reallyUnsafePtrEquality#) -- @Lifted @a @Lifted @a -- -- To do this, we use `collectArgsTicks tickishFloatable` to match -- the eta expansion behaviour, as per Note [Eta expansion and source notes] -- in GHC.Core.Opt.Arity. -- Sadly this was not quite enough. So we now also accept things that CorePrep will allow. -- See Note [Ticks and mandatory eta expansion] lintCoreExpr (Lam var expr) = markAllJoinsBad $ lintLambda var $ lintCoreExpr expr lintCoreExpr (Case scrut var alt_ty alts) = lintCaseExpr scrut var alt_ty alts -- This case can't happen; linting types in expressions gets routed through -- lintCoreArgs lintCoreExpr (Type ty) = failWithL (text "Type found as expression" <+> ppr ty) lintCoreExpr (Coercion co) -- See Note [Coercions in terms] = do { co' <- addLoc (InCo co) $ lintCoercion co ; return (coercionType co', zeroUE) } ---------------------- lintIdOcc :: Var -> Int -- Number of arguments (type or value) being passed -> LintM (LintedType, UsageEnv) -- returns type of the *variable* lintIdOcc var nargs = addLoc (OccOf var) $ do { checkL (isNonCoVarId var) (text "Non term variable" <+> ppr var) -- See GHC.Core Note [Variable occurrences in Core] -- Check that the type of the occurrence is the same -- as the type of the binding site. The inScopeIds are -- /un-substituted/, so this checks that the occurrence type -- is identical to the binder type. -- This makes things much easier for things like: -- /\a. \(x::Maybe a). /\a. ...(x::Maybe a)... -- The "::Maybe a" on the occurrence is referring to the /outer/ a. -- If we compared /substituted/ types we'd risk comparing -- (Maybe a) from the binding site with bogus (Maybe a1) from -- the occurrence site. Comparing un-substituted types finesses -- this altogether ; (bndr, linted_bndr_ty) <- lookupIdInScope var ; let occ_ty = idType var bndr_ty = idType bndr ; ensureEqTys occ_ty bndr_ty $ mkBndrOccTypeMismatchMsg bndr var bndr_ty occ_ty -- Check for a nested occurrence of the StaticPtr constructor. -- See Note [Checking StaticPtrs]. ; lf <- getLintFlags ; when (nargs /= 0 && lf_check_static_ptrs lf /= AllowAnywhere) $ checkL (idName var /= makeStaticName) $ text "Found makeStatic nested in an expression" ; checkDeadIdOcc var ; checkJoinOcc var nargs ; case isDataConId_maybe var of Nothing -> return () Just dc -> checkTypeDataConOcc "expression" dc ; usage <- varCallSiteUsage var ; return (linted_bndr_ty, usage) } lintCoreFun :: CoreExpr -> Int -- Number of arguments (type or val) being passed -> LintM (LintedType, UsageEnv) -- Returns type of the *function* lintCoreFun (Var var) nargs = lintIdOcc var nargs lintCoreFun (Lam var body) nargs -- Act like lintCoreExpr of Lam, but *don't* call markAllJoinsBad; -- See Note [Beta redexes] | nargs /= 0 = lintLambda var $ lintCoreFun body (nargs - 1) lintCoreFun expr nargs = markAllJoinsBadIf (nargs /= 0) $ -- See Note [Join points are less general than the paper] lintCoreExpr expr ------------------ lintLambda :: Var -> LintM (Type, UsageEnv) -> LintM (Type, UsageEnv) lintLambda var lintBody = addLoc (LambdaBodyOf var) $ lintBinder LambdaBind var $ \ var' -> do { (body_ty, ue) <- lintBody ; ue' <- checkLinearity ue var' ; return (mkLamType var' body_ty, ue') } ------------------ checkDeadIdOcc :: Id -> LintM () -- Occurrences of an Id should never be dead.... -- except when we are checking a case pattern checkDeadIdOcc id | isDeadOcc (idOccInfo id) = do { in_case <- inCasePat ; checkL in_case (text "Occurrence of a dead Id" <+> ppr id) } | otherwise = return () ------------------ lintJoinBndrType :: LintedType -- Type of the body -> LintedId -- Possibly a join Id -> LintM () -- Checks that the return type of a join Id matches the body -- E.g. join j x = rhs in body -- The type of 'rhs' must be the same as the type of 'body' lintJoinBndrType body_ty bndr | JoinPoint arity <- idJoinPointHood bndr , let bndr_ty = idType bndr , (bndrs, res) <- splitPiTys bndr_ty = checkL (length bndrs >= arity && body_ty `eqType` mkPiTys (drop arity bndrs) res) $ hang (text "Join point returns different type than body") 2 (vcat [ text "Join bndr:" <+> ppr bndr <+> dcolon <+> ppr (idType bndr) , text "Join arity:" <+> ppr arity , text "Body type:" <+> ppr body_ty ]) | otherwise = return () checkJoinOcc :: Id -> JoinArity -> LintM () -- Check that if the occurrence is a JoinId, then so is the -- binding site, and it's a valid join Id checkJoinOcc var n_args | JoinPoint join_arity_occ <- idJoinPointHood var = do { mb_join_arity_bndr <- lookupJoinId var ; case mb_join_arity_bndr of { NotJoinPoint -> do { join_set <- getValidJoins ; addErrL (text "join set " <+> ppr join_set $$ invalidJoinOcc var) } ; JoinPoint join_arity_bndr -> do { checkL (join_arity_bndr == join_arity_occ) $ -- Arity differs at binding site and occurrence mkJoinBndrOccMismatchMsg var join_arity_bndr join_arity_occ ; checkL (n_args == join_arity_occ) $ -- Arity doesn't match #args mkBadJumpMsg var join_arity_occ n_args } } } | otherwise = return () checkTypeDataConOcc :: String -> DataCon -> LintM () -- Check that the Id is not a data constructor of a `type data` declaration -- Invariant (I1) of Note [Type data declarations] in GHC.Rename.Module checkTypeDataConOcc what dc = checkL (not (isTypeDataTyCon (dataConTyCon dc))) $ (text "type data constructor found in a" <+> text what <> colon <+> ppr dc) {- -- | Check that a use of a dataToTag# primop satisfies conditions DTT2 -- and DTT3 from Note [DataToTag overview] in GHC.Tc.Instance.Class -- -- Ignores applications not headed by dataToTag# primops. -- Commented out because GHC.PrimopWrappers doesn't respect this condition yet. -- See wrinkle DTW7 in Note [DataToTag overview]. checkDataToTagPrimOpTyCon :: CoreExpr -- ^ the function (head of the application) we are checking -> [CoreArg] -- ^ The arguments to the application -> LintM () checkDataToTagPrimOpTyCon (Var fun_id) args | Just op <- isPrimOpId_maybe fun_id , op == DataToTagSmallOp || op == DataToTagLargeOp = case args of Type _levity : Type dty : _rest | Just (tc, _) <- splitTyConApp_maybe dty , isValidDTT2TyCon tc -> do platform <- getPlatform let numConstrs = tyConFamilySize tc isSmallOp = op == DataToTagSmallOp checkL (isSmallFamily platform numConstrs == isSmallOp) $ text "dataToTag# primop-size/tycon-family-size mismatch" | otherwise -> failWithL $ text "dataToTagLarge# used at non-ADT type:" <+> ppr dty _ -> failWithL $ text "dataToTagLarge# needs two type arguments but has args:" <+> ppr (take 2 args) checkDataToTagPrimOpTyCon _ _ = pure () -} -- | Check representation-polymorphic invariants in an application of a -- built-in function or newtype constructor. -- -- See Note [Linting representation-polymorphic builtins]. checkRepPolyBuiltin :: CoreExpr -- ^ the function (head of the application) we are checking -> [CoreArg] -- ^ the arguments to the application -> LintedType -- ^ the instantiated type of the overall application -> LintM () checkRepPolyBuiltin (Var fun_id) args app_ty = do { do_rep_poly_checks <- lf_check_fixed_rep <$> getLintFlags ; when (do_rep_poly_checks && hasNoBinding fun_id) $ if -- (2) representation-polymorphic unlifted newtypes | Just dc <- isDataConId_maybe fun_id , isNewDataCon dc -> if tcHasFixedRuntimeRep $ dataConTyCon dc then return () else checkRepPolyNewtypeApp dc args app_ty -- (1) representation-polymorphic builtins | otherwise -> checkRepPolyBuiltinApp fun_id args } checkRepPolyBuiltin _ _ _ = return () checkRepPolyNewtypeApp :: DataCon -> [CoreArg] -> LintedType -> LintM () checkRepPolyNewtypeApp nt args app_ty -- If the newtype is saturated, we're OK. | any isValArg args = return () -- Otherwise, check we can eta-expand. | otherwise = case getRuntimeArgTys app_ty of (Scaled _ first_val_arg_ty, _):_ | not $ typeHasFixedRuntimeRep first_val_arg_ty -> failWithL (err_msg first_val_arg_ty) _ -> return () where err_msg :: Type -> SDoc err_msg bad_arg_ty = vcat [ text "Cannot eta expand unlifted newtype constructor" <+> quotes (ppr nt) <> dot , text "Its argument type does not have a fixed runtime representation:" , nest 2 $ ppr_ty_ki bad_arg_ty ] ppr_ty_ki :: Type -> SDoc ppr_ty_ki ty = bullet <+> ppr ty <+> dcolon <+> ppr (typeKind ty) checkRepPolyBuiltinApp :: Id -> [CoreArg] -> LintM () checkRepPolyBuiltinApp fun_id args = checkL (null not_concs) err_msg where conc_binder_positions :: IntMap ConcreteTvOrigin conc_binder_positions = concreteTyVarPositions fun_id $ idDetailsConcreteTvs $ idDetails fun_id max_pos :: Int max_pos = case IntMap.keys conc_binder_positions of [] -> 0 positions -> maximum positions not_concs :: [(SDoc, ConcreteTvOrigin)] not_concs = mapMaybe is_bad (zip [1..max_pos] (map Just args ++ repeat Nothing)) -- NB: 1-indexed is_bad :: (Int, Maybe CoreArg) -> Maybe (SDoc, ConcreteTvOrigin) is_bad (pos, mb_arg) | Just conc_reason <- IntMap.lookup pos conc_binder_positions , Just bad_ty <- case mb_arg of Just (Type ki) | isConcreteType ki -> Nothing | otherwise -- Here we handle the situation in which a "must be concrete" TyVar -- has been instantiated with a type that is not concrete. -> Just $ quotes (ppr ki) <+> text "is not concrete." -- We expected a type argument in this position, and got something else: panic! Just arg -> pprPanic "checkRepPolyBuiltinApp: expected a type in this position" $ vcat [ text "fun_id:" <+> ppr fun_id <+> dcolon <+> ppr (idType fun_id) , text "pos:" <+> ppr pos , text "arg:" <+> ppr arg ] Nothing -> -- Here we handle the situation in which a "must be concrete" TyVar -- has not been instantiated at all. case conc_reason of ConcreteFRR frr_orig -> let ty = frr_type frr_orig in Just $ ppr ty <+> dcolon <+> ppr (typeKind ty) = Just (bad_ty, conc_reason) | otherwise = Nothing err_msg :: SDoc err_msg = vcat $ map ((bullet <+>) . ppr_not_conc) not_concs ppr_not_conc :: (SDoc, ConcreteTvOrigin) -> SDoc ppr_not_conc (bad_ty, conc) = vcat [ ppr_conc_orig conc , nest 2 bad_ty ] ppr_conc_orig :: ConcreteTvOrigin -> SDoc ppr_conc_orig (ConcreteFRR frr_orig) = case frr_orig of FixedRuntimeRepOrigin { frr_context = ctxt } -> hsep [ ppr ctxt, text "does not have a fixed runtime representation:" ] -- | Compute the 1-indexed positions in the outer forall'd quantified type variables -- of the type in which the concrete type variables occur. -- -- See Note [Representation-polymorphism checking built-ins] in GHC.Tc.Utils.Concrete. concreteTyVarPositions :: Id -> ConcreteTyVars -> IntMap ConcreteTvOrigin concreteTyVarPositions fun_id conc_tvs | isNullUFM conc_tvs = IntMap.empty | otherwise = case splitForAllTyCoVars (idType fun_id) of ([], _) -> IntMap.empty (tvs, _) -> let positions = IntMap.fromList [ (pos, conc_orig) | (tv, pos) <- zip tvs [1..] , conc_orig <- maybeToList $ lookupNameEnv conc_tvs (tyVarName tv) ] -- Assert that we have as many positions as concrete type variables, -- i.e. we are not missing any concreteness information. in assertPpr (sizeUFM conc_tvs == length positions) (vcat [ text "concreteTyVarPositions: missing concreteness information" , text "fun_id:" <+> ppr fun_id , text "tvs:" <+> ppr tvs , text "Expected # of concrete tvs:" <+> ppr (sizeUFM conc_tvs) , text " Actual # of concrete tvs:" <+> ppr (length positions) ]) positions -- Check that the usage of var is consistent with var itself, and pop the var -- from the usage environment (this is important because of shadowing). checkLinearity :: UsageEnv -> Var -> LintM UsageEnv checkLinearity body_ue lam_var = case varMultMaybe lam_var of Just mult -> do let (lhs, body_ue') = popUE body_ue lam_var err_msg = text "Linearity failure in lambda:" <+> ppr lam_var $$ ppr lhs <+> text "⊈" <+> ppr mult ensureSubUsage lhs mult err_msg return body_ue' Nothing -> return body_ue -- A type variable {- Note [Join points and casts] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You might think that this should be OK: join j x = rhs in (case e of A -> alt1 B x -> (jump j x) |> co) You might think that, since the cast is ultimately erased, the jump to `j` should still be OK as a join point. But no! See #21716. Suppose newtype Age = MkAge Int -- axAge :: Age ~ Int f :: Int -> ... -- f strict in it's first argument and consider the expression f (join j :: Bool -> Age j x = (rhs1 :: Age) in case v of Just x -> (j x |> axAge :: Int) Nothing -> rhs2) Then, if the Simplifier pushes the strict call into the join points and alternatives we'll get join j' x = f (rhs1 :: Age) in case v of Just x -> j' x |> axAge Nothing -> f rhs2 Utterly bogus. `f` expects an `Int` and we are giving it an `Age`. No no no. Casts destroy the tail-call property. Henc markAllJoinsBad in the (Cast expr co) case of lintCoreExpr. Note [No alternatives lint check] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Case expressions with no alternatives are odd beasts, and it would seem like they would worth be looking at in the linter (cf #10180). We used to check two things: * exprIsHNF is false: it would *seem* to be terribly wrong if the scrutinee was already in head normal form. * exprIsDeadEnd is true: we should be able to see why GHC believes the scrutinee is diverging for sure. It was already known that the second test was not entirely reliable. Unfortunately (#13990), the first test turned out not to be reliable either. Getting the checks right turns out to be somewhat complicated. For example, suppose we have (comment 8) data T a where TInt :: T Int absurdTBool :: T Bool -> a absurdTBool v = case v of data Foo = Foo !(T Bool) absurdFoo :: Foo -> a absurdFoo (Foo x) = absurdTBool x GHC initially accepts the empty case because of the GADT conditions. But then we inline absurdTBool, getting absurdFoo (Foo x) = case x of x is in normal form (because the Foo constructor is strict) but the case is empty. To avoid this problem, GHC would have to recognize that matching on Foo x is already absurd, which is not so easy. More generally, we don't really know all the ways that GHC can lose track of why an expression is bottom, so we shouldn't make too much fuss when that happens. Note [Beta redexes] ~~~~~~~~~~~~~~~~~~~ Consider: join j @x y z = ... in (\@x y z -> jump j @x y z) @t e1 e2 This is clearly ill-typed, since the jump is inside both an application and a lambda, either of which is enough to disqualify it as a tail call (see Note [Invariants on join points] in GHC.Core). However, strictly from a lambda-calculus perspective, the term doesn't go wrong---after the two beta reductions, the jump *is* a tail call and everything is fine. Why would we want to allow this when we have let? One reason is that a compound beta redex (that is, one with more than one argument) has different scoping rules: naively reducing the above example using lets will capture any free occurrence of y in e2. More fundamentally, type lets are tricky; many passes, such as Float Out, tacitly assume that the incoming program's type lets have all been dealt with by the simplifier. Thus we don't want to let-bind any types in, say, GHC.Core.Subst.simpleOptPgm, which in some circumstances can run immediately before Float Out. All that said, currently GHC.Core.Subst.simpleOptPgm is the only thing using this loophole, doing so to avoid re-traversing large functions (beta-reducing a type lambda without introducing a type let requires a substitution). TODO: Improve simpleOptPgm so that we can forget all this ever happened. ************************************************************************ * * \subsection[lintCoreArgs]{lintCoreArgs} * * ************************************************************************ The basic version of these functions checks that the argument is a subtype of the required type, as one would expect. -} -- Takes the functions type and arguments as argument. -- Returns the *result* of applying the function to arguments. -- e.g. f :: Int -> Bool -> Int would return `Int` as result type. lintCoreArgs :: (LintedType, UsageEnv) -> [CoreArg] -> LintM (LintedType, UsageEnv) lintCoreArgs (fun_ty, fun_ue) args = foldM lintCoreArg (fun_ty, fun_ue) args lintCoreArg :: (LintedType, UsageEnv) -> CoreArg -> LintM (LintedType, UsageEnv) -- Type argument lintCoreArg (fun_ty, ue) (Type arg_ty) = do { checkL (not (isCoercionTy arg_ty)) (text "Unnecessary coercion-to-type injection:" <+> ppr arg_ty) ; arg_ty' <- lintType arg_ty ; res <- lintTyApp fun_ty arg_ty' ; return (res, ue) } -- Coercion argument lintCoreArg (fun_ty, ue) (Coercion co) = do { co' <- addLoc (InCo co) $ lintCoercion co ; res <- lintCoApp fun_ty co' ; return (res, ue) } -- Other value argument lintCoreArg (fun_ty, fun_ue) arg = do { (arg_ty, arg_ue) <- markAllJoinsBad $ lintCoreExpr arg -- See Note [Representation polymorphism invariants] in GHC.Core ; flags <- getLintFlags ; when (lf_check_fixed_rep flags) $ -- Only check that 'arg_ty' has a fixed RuntimeRep -- if 'lf_check_fixed_rep' is on. do { checkL (typeHasFixedRuntimeRep arg_ty) (text "Argument does not have a fixed runtime representation" <+> ppr arg <+> dcolon <+> parens (ppr arg_ty <+> dcolon <+> ppr (typeKind arg_ty))) } ; lintValApp arg fun_ty arg_ty fun_ue arg_ue } ----------------- lintAltBinders :: UsageEnv -> Var -- Case binder -> LintedType -- Scrutinee type -> LintedType -- Constructor type -> [(Mult, OutVar)] -- Binders -> LintM UsageEnv -- If you edit this function, you may need to update the GHC formalism -- See Note [GHC Formalism] lintAltBinders rhs_ue _case_bndr scrut_ty con_ty [] = do { ensureEqTys con_ty scrut_ty (mkBadPatMsg con_ty scrut_ty) ; return rhs_ue } lintAltBinders rhs_ue case_bndr scrut_ty con_ty ((var_w, bndr):bndrs) | isTyVar bndr = do { con_ty' <- lintTyApp con_ty (mkTyVarTy bndr) ; lintAltBinders rhs_ue case_bndr scrut_ty con_ty' bndrs } | otherwise = do { (con_ty', _) <- lintValApp (Var bndr) con_ty (idType bndr) zeroUE zeroUE -- We can pass zeroUE to lintValApp because we ignore its usage -- calculation and compute it in the call for checkCaseLinearity below. ; rhs_ue' <- checkCaseLinearity rhs_ue case_bndr var_w bndr ; lintAltBinders rhs_ue' case_bndr scrut_ty con_ty' bndrs } -- | Implements the case rules for linearity checkCaseLinearity :: UsageEnv -> Var -> Mult -> Var -> LintM UsageEnv checkCaseLinearity ue case_bndr var_w bndr = do ensureSubUsage lhs rhs err_msg lintLinearBinder (ppr bndr) (case_bndr_w `mkMultMul` var_w) (varMult bndr) return $ deleteUE ue bndr where lhs = bndr_usage `addUsage` (var_w `scaleUsage` case_bndr_usage) rhs = case_bndr_w `mkMultMul` var_w err_msg = (text "Linearity failure in variable:" <+> ppr bndr $$ ppr lhs <+> text "⊈" <+> ppr rhs $$ text "Computed by:" <+> text "LHS:" <+> lhs_formula <+> text "RHS:" <+> rhs_formula) lhs_formula = ppr bndr_usage <+> text "+" <+> parens (ppr case_bndr_usage <+> text "*" <+> ppr var_w) rhs_formula = ppr case_bndr_w <+> text "*" <+> ppr var_w case_bndr_w = varMult case_bndr case_bndr_usage = lookupUE ue case_bndr bndr_usage = lookupUE ue bndr ----------------- lintTyApp :: LintedType -> LintedType -> LintM LintedType lintTyApp fun_ty arg_ty | Just (tv,body_ty) <- splitForAllTyVar_maybe fun_ty = do { lintTyKind tv arg_ty ; in_scope <- getInScope -- substTy needs the set of tyvars in scope to avoid generating -- uniques that are already in scope. -- See Note [The substitution invariant] in GHC.Core.TyCo.Subst ; return (substTyWithInScope in_scope [tv] [arg_ty] body_ty) } | otherwise = failWithL (mkTyAppMsg fun_ty arg_ty) ----------------- lintCoApp :: LintedType -> LintedCoercion -> LintM LintedType lintCoApp fun_ty co | Just (cv,body_ty) <- splitForAllCoVar_maybe fun_ty , let co_ty = coercionType co cv_ty = idType cv , cv_ty `eqType` co_ty = do { in_scope <- getInScope ; let init_subst = mkEmptySubst in_scope subst = extendCvSubst init_subst cv co ; return (substTy subst body_ty) } | Just (_, _, arg_ty', res_ty') <- splitFunTy_maybe fun_ty , co_ty `eqType` arg_ty' = return (res_ty') | otherwise = failWithL (mkCoAppMsg fun_ty co) where co_ty = coercionType co ----------------- -- | @lintValApp arg fun_ty arg_ty@ lints an application of @fun arg@ -- where @fun :: fun_ty@ and @arg :: arg_ty@, returning the type of the -- application. lintValApp :: CoreExpr -> LintedType -> LintedType -> UsageEnv -> UsageEnv -> LintM (LintedType, UsageEnv) lintValApp arg fun_ty arg_ty fun_ue arg_ue | Just (_, w, arg_ty', res_ty') <- splitFunTy_maybe fun_ty = do { ensureEqTys arg_ty' arg_ty (mkAppMsg arg_ty' arg_ty arg) ; let app_ue = addUE fun_ue (scaleUE w arg_ue) ; return (res_ty', app_ue) } | otherwise = failWithL err2 where err2 = mkNonFunAppMsg fun_ty arg_ty arg lintTyKind :: OutTyVar -> LintedType -> LintM () -- Both args have had substitution applied -- If you edit this function, you may need to update the GHC formalism -- See Note [GHC Formalism] lintTyKind tyvar arg_ty = unless (arg_kind `eqType` tyvar_kind) $ addErrL (mkKindErrMsg tyvar arg_ty $$ (text "Linted Arg kind:" <+> ppr arg_kind)) where tyvar_kind = tyVarKind tyvar arg_kind = typeKind arg_ty {- ************************************************************************ * * \subsection[lintCoreAlts]{lintCoreAlts} * * ************************************************************************ -} lintCaseExpr :: CoreExpr -> Id -> Type -> [CoreAlt] -> LintM (LintedType, UsageEnv) lintCaseExpr scrut var alt_ty alts = do { let e = Case scrut var alt_ty alts -- Just for error messages -- Check the scrutinee ; (scrut_ty, scrut_ue) <- markAllJoinsBad $ lintCoreExpr scrut -- See Note [Join points are less general than the paper] -- in GHC.Core ; let scrut_mult = varMult var ; alt_ty <- addLoc (CaseTy scrut) $ lintValueType alt_ty ; var_ty <- addLoc (IdTy var) $ lintValueType (idType var) -- We used to try to check whether a case expression with no -- alternatives was legitimate, but this didn't work. -- See Note [No alternatives lint check] for details. -- Check that the scrutinee is not a floating-point type -- if there are any literal alternatives -- See GHC.Core Note [Case expression invariants] item (5) -- See Note [Rules for floating-point comparisons] in GHC.Core.Opt.ConstantFold ; let isLitPat (Alt (LitAlt _) _ _) = True isLitPat _ = False ; checkL (not $ isFloatingPrimTy scrut_ty && any isLitPat alts) (text "Lint warning: Scrutinising floating-point expression with literal pattern in case analysis (see #9238)." $$ text "scrut" <+> ppr scrut) ; case tyConAppTyCon_maybe (idType var) of Just tycon | debugIsOn , isAlgTyCon tycon , not (isAbstractTyCon tycon) , null (tyConDataCons tycon) , not (exprIsDeadEnd scrut) -> pprTrace "Lint warning: case binder's type has no constructors" (ppr var <+> ppr (idType var)) -- This can legitimately happen for type families $ return () _otherwise -> return () -- Don't use lintIdBndr on var, because unboxed tuple is legitimate ; subst <- getSubst ; ensureEqTys var_ty scrut_ty (mkScrutMsg var var_ty scrut_ty subst) -- See GHC.Core Note [Case expression invariants] item (7) ; lintBinder CaseBind var $ \_ -> do { -- Check the alternatives ; alt_ues <- mapM (lintCoreAlt var scrut_ty scrut_mult alt_ty) alts ; let case_ue = (scaleUE scrut_mult scrut_ue) `addUE` supUEs alt_ues ; checkCaseAlts e scrut_ty alts ; return (alt_ty, case_ue) } } checkCaseAlts :: CoreExpr -> LintedType -> [CoreAlt] -> LintM () -- a) Check that the alts are non-empty -- b1) Check that the DEFAULT comes first, if it exists -- b2) Check that the others are in increasing order -- c) Check that there's a default for infinite types -- NB: Algebraic cases are not necessarily exhaustive, because -- the simplifier correctly eliminates case that can't -- possibly match. checkCaseAlts e ty alts = do { checkL (all non_deflt con_alts) (mkNonDefltMsg e) -- See GHC.Core Note [Case expression invariants] item (2) ; checkL (increasing_tag con_alts) (mkNonIncreasingAltsMsg e) -- See GHC.Core Note [Case expression invariants] item (3) -- For types Int#, Word# with an infinite (well, large!) number of -- possible values, there should usually be a DEFAULT case -- But (see Note [Empty case alternatives] in GHC.Core) it's ok to -- have *no* case alternatives. -- In effect, this is a kind of partial test. I suppose it's possible -- that we might *know* that 'x' was 1 or 2, in which case -- case x of { 1 -> e1; 2 -> e2 } -- would be fine. ; checkL (isJust maybe_deflt || not is_infinite_ty || null alts) (nonExhaustiveAltsMsg e) } where (con_alts, maybe_deflt) = findDefault alts -- Check that successive alternatives have strictly increasing tags increasing_tag (alt1 : rest@( alt2 : _)) = alt1 `ltAlt` alt2 && increasing_tag rest increasing_tag _ = True non_deflt (Alt DEFAULT _ _) = False non_deflt _ = True is_infinite_ty = case tyConAppTyCon_maybe ty of Nothing -> False Just tycon -> isPrimTyCon tycon lintAltExpr :: CoreExpr -> LintedType -> LintM UsageEnv lintAltExpr expr ann_ty = do { (actual_ty, ue) <- lintCoreExpr expr ; ensureEqTys actual_ty ann_ty (mkCaseAltMsg expr actual_ty ann_ty) ; return ue } -- See GHC.Core Note [Case expression invariants] item (6) lintCoreAlt :: Var -- Case binder -> LintedType -- Type of scrutinee -> Mult -- Multiplicity of scrutinee -> LintedType -- Type of the alternative -> CoreAlt -> LintM UsageEnv -- If you edit this function, you may need to update the GHC formalism -- See Note [GHC Formalism] lintCoreAlt case_bndr _ scrut_mult alt_ty (Alt DEFAULT args rhs) = do { lintL (null args) (mkDefaultArgsMsg args) ; rhs_ue <- lintAltExpr rhs alt_ty ; let (case_bndr_usage, rhs_ue') = popUE rhs_ue case_bndr err_msg = text "Linearity failure in the DEFAULT clause:" <+> ppr case_bndr $$ ppr case_bndr_usage <+> text "⊈" <+> ppr scrut_mult ; ensureSubUsage case_bndr_usage scrut_mult err_msg ; return rhs_ue' } lintCoreAlt case_bndr scrut_ty _ alt_ty (Alt (LitAlt lit) args rhs) | litIsLifted lit = failWithL integerScrutinisedMsg | otherwise = do { lintL (null args) (mkDefaultArgsMsg args) ; ensureEqTys lit_ty scrut_ty (mkBadPatMsg lit_ty scrut_ty) ; rhs_ue <- lintAltExpr rhs alt_ty ; return (deleteUE rhs_ue case_bndr) -- No need for linearity checks } where lit_ty = literalType lit lintCoreAlt case_bndr scrut_ty _scrut_mult alt_ty alt@(Alt (DataAlt con) args rhs) | isNewTyCon (dataConTyCon con) = zeroUE <$ addErrL (mkNewTyDataConAltMsg scrut_ty alt) | Just (tycon, tycon_arg_tys) <- splitTyConApp_maybe scrut_ty = addLoc (CaseAlt alt) $ do { checkTypeDataConOcc "pattern" con ; lintL (tycon == dataConTyCon con) (mkBadConMsg tycon con) -- Instantiate the universally quantified -- type variables of the data constructor ; let { con_payload_ty = piResultTys (dataConRepType con) tycon_arg_tys ; binderMult (Named _) = ManyTy ; binderMult (Anon st _) = scaledMult st -- See Note [Validating multiplicities in a case] ; multiplicities = map binderMult $ fst $ splitPiTys con_payload_ty } -- And now bring the new binders into scope ; lintBinders CasePatBind args $ \ args' -> do { rhs_ue <- lintAltExpr rhs alt_ty ; rhs_ue' <- addLoc (CasePat alt) (lintAltBinders rhs_ue case_bndr scrut_ty con_payload_ty (zipEqual "lintCoreAlt" multiplicities args')) ; return $ deleteUE rhs_ue' case_bndr } } | otherwise -- Scrut-ty is wrong shape = zeroUE <$ addErrL (mkBadAltMsg scrut_ty alt) {- Note [Validating multiplicities in a case] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Suppose 'MkT :: a %m -> T m a'. If we are validating 'case (x :: T Many a) of MkT y -> ...', we have to substitute m := Many in the type of MkT - in particular, y can be used Many times and that expression would still be linear in x. We do this by looking at con_payload_ty, which is the type of the datacon applied to the surrounding arguments. Testcase: linear/should_compile/MultConstructor Data constructors containing existential tyvars will then have Named binders, which are always multiplicity Many. Testcase: indexed-types/should_compile/GADT1 -} lintLinearBinder :: SDoc -> Mult -> Mult -> LintM () lintLinearBinder doc actual_usage described_usage = ensureSubMult actual_usage described_usage err_msg where err_msg = (text "Multiplicity of variable does not agree with its context" $$ doc $$ ppr actual_usage $$ text "Annotation:" <+> ppr described_usage) {- ************************************************************************ * * \subsection[lint-types]{Types} * * ************************************************************************ -} -- When we lint binders, we (one at a time and in order): -- 1. Lint var types or kinds (possibly substituting) -- 2. Add the binder to the in scope set, and if its a coercion var, -- we may extend the substitution to reflect its (possibly) new kind lintBinders :: BindingSite -> [Var] -> ([Var] -> LintM a) -> LintM a lintBinders _ [] linterF = linterF [] lintBinders site (var:vars) linterF = lintBinder site var $ \var' -> lintBinders site vars $ \ vars' -> linterF (var':vars') -- If you edit this function, you may need to update the GHC formalism -- See Note [GHC Formalism] lintBinder :: BindingSite -> Var -> (Var -> LintM a) -> LintM a lintBinder site var linterF | isTyCoVar var = lintTyCoBndr var linterF | otherwise = lintIdBndr NotTopLevel site var linterF lintTyBndr :: TyVar -> (LintedTyCoVar -> LintM a) -> LintM a lintTyBndr = lintTyCoBndr -- We could specialise it, I guess lintTyCoBndr :: TyCoVar -> (LintedTyCoVar -> LintM a) -> LintM a lintTyCoBndr tcv thing_inside = do { subst <- getSubst ; tcv_type' <- lintType (varType tcv) ; let tcv' = uniqAway (getSubstInScope subst) $ setVarType tcv tcv_type' subst' = extendTCvSubstWithClone subst tcv tcv' -- See (FORALL1) and (FORALL2) in GHC.Core.Type ; if (isTyVar tcv) then -- Check that in (forall (a:ki). blah) we have ki:Type lintL (isLiftedTypeKind (typeKind tcv_type')) $ hang (text "TyVar whose kind does not have kind Type:") 2 (ppr tcv' <+> dcolon <+> ppr tcv_type' <+> dcolon <+> ppr (typeKind tcv_type')) else -- Check that in (forall (cv::ty). blah), -- then ty looks like (t1 ~# t2) lintL (isCoVarType tcv_type') $ text "CoVar with non-coercion type:" <+> pprTyVar tcv ; updateSubst subst' (thing_inside tcv') } lintIdBndrs :: forall a. TopLevelFlag -> [Id] -> ([LintedId] -> LintM a) -> LintM a lintIdBndrs top_lvl ids thing_inside = go ids thing_inside where go :: [Id] -> ([Id] -> LintM a) -> LintM a go [] thing_inside = thing_inside [] go (id:ids) thing_inside = lintIdBndr top_lvl LetBind id $ \id' -> go ids $ \ids' -> thing_inside (id' : ids') lintIdBndr :: TopLevelFlag -> BindingSite -> InVar -> (OutVar -> LintM a) -> LintM a -- Do substitution on the type of a binder and add the var with this -- new type to the in-scope set of the second argument -- ToDo: lint its rules lintIdBndr top_lvl bind_site id thing_inside = assertPpr (isId id) (ppr id) $ do { flags <- getLintFlags ; checkL (not (lf_check_global_ids flags) || isLocalId id) (text "Non-local Id binder" <+> ppr id) -- See Note [Checking for global Ids] -- Check that if the binder is nested, it is not marked as exported ; checkL (not (isExportedId id) || is_top_lvl) (mkNonTopExportedMsg id) -- Check that if the binder is nested, it does not have an external name ; checkL (not (isExternalName (Var.varName id)) || is_top_lvl) (mkNonTopExternalNameMsg id) -- See Note [Representation polymorphism invariants] in GHC.Core ; lintL (isJoinId id || not (lf_check_fixed_rep flags) || typeHasFixedRuntimeRep id_ty) $ text "Binder does not have a fixed runtime representation:" <+> ppr id <+> dcolon <+> parens (ppr id_ty <+> dcolon <+> ppr (typeKind id_ty)) -- Check that a join-id is a not-top-level let-binding ; when (isJoinId id) $ checkL (not is_top_lvl && is_let_bind) $ mkBadJoinBindMsg id -- Check that the Id does not have type (t1 ~# t2) or (t1 ~R# t2); -- if so, it should be a CoVar, and checked by lintCoVarBndr ; lintL (not (isCoVarType id_ty)) (text "Non-CoVar has coercion type" <+> ppr id <+> dcolon <+> ppr id_ty) -- Check that the lambda binder has no value or OtherCon unfolding. -- See #21496 ; lintL (not (bind_site == LambdaBind && isEvaldUnfolding (idUnfolding id))) (text "Lambda binder with value or OtherCon unfolding.") ; linted_ty <- addLoc (IdTy id) (lintValueType id_ty) ; addInScopeId id linted_ty $ thing_inside (setIdType id linted_ty) } where id_ty = idType id is_top_lvl = isTopLevel top_lvl is_let_bind = case bind_site of LetBind -> True _ -> False {- %************************************************************************ %* * Types %* * %************************************************************************ -} lintValueType :: Type -> LintM LintedType -- Types only, not kinds -- Check the type, and apply the substitution to it -- See Note [Linting type lets] lintValueType ty = addLoc (InType ty) $ do { ty' <- lintType ty ; let sk = typeKind ty' ; lintL (isTYPEorCONSTRAINT sk) $ hang (text "Ill-kinded type:" <+> ppr ty) 2 (text "has kind:" <+> ppr sk) ; return ty' } checkTyCon :: TyCon -> LintM () checkTyCon tc = checkL (not (isTcTyCon tc)) (text "Found TcTyCon:" <+> ppr tc) ------------------- checkTyCoVarInScope :: Subst -> TyCoVar -> LintM () checkTyCoVarInScope subst tcv = checkL (tcv `isInScope` subst) $ hang (text "The type or coercion variable" <+> pprBndr LetBind tcv) 2 (text "is out of scope") ------------------- lintType :: Type -> LintM LintedType -- If you edit this function, you may need to update the GHC formalism -- See Note [GHC Formalism] lintType (TyVarTy tv) | not (isTyVar tv) = failWithL (mkBadTyVarMsg tv) | otherwise = do { subst <- getSubst ; case lookupTyVar subst tv of Just linted_ty -> return linted_ty -- In GHCi we may lint an expression with a free -- type variable. Then it won't be in the -- substitution, but it should be in scope Nothing -> do { checkTyCoVarInScope subst tv ; return (TyVarTy tv) } } lintType ty@(AppTy t1 t2) | TyConApp {} <- t1 = failWithL $ text "TyConApp to the left of AppTy:" <+> ppr ty | otherwise = do { t1' <- lintType t1 ; t2' <- lintType t2 ; lint_ty_app ty (typeKind t1') [t2'] ; return (AppTy t1' t2') } lintType ty@(TyConApp tc tys) | isTypeSynonymTyCon tc || isTypeFamilyTyCon tc = do { report_unsat <- lf_report_unsat_syns <$> getLintFlags ; lintTySynFamApp report_unsat ty tc tys } | Just {} <- tyConAppFunTy_maybe tc tys -- We should never see a saturated application of funTyCon; such -- applications should be represented with the FunTy constructor. -- See Note [Linting function types] = failWithL (hang (text "Saturated application of" <+> quotes (ppr tc)) 2 (ppr ty)) | otherwise -- Data types, data families, primitive types = do { checkTyCon tc ; tys' <- mapM lintType tys ; lint_ty_app ty (tyConKind tc) tys' ; return (TyConApp tc tys') } -- arrows can related *unlifted* kinds, so this has to be separate from -- a dependent forall. lintType ty@(FunTy af tw t1 t2) = do { t1' <- lintType t1 ; t2' <- lintType t2 ; tw' <- lintType tw ; lintArrow (text "type or kind" <+> quotes (ppr ty)) t1' t2' tw' ; let real_af = chooseFunTyFlag t1 t2 ; unless (real_af == af) $ addErrL $ hang (text "Bad FunTyFlag in FunTy") 2 (vcat [ ppr ty , text "FunTyFlag =" <+> ppr af , text "Computed FunTyFlag =" <+> ppr real_af ]) ; return (FunTy af tw' t1' t2') } lintType ty@(ForAllTy (Bndr tcv vis) body_ty) | not (isTyCoVar tcv) = failWithL (text "Non-Tyvar or Non-Covar bound in type:" <+> ppr ty) | otherwise = lintTyCoBndr tcv $ \tcv' -> do { body_ty' <- lintType body_ty ; lintForAllBody tcv' body_ty' ; when (isCoVar tcv) $ lintL (tcv `elemVarSet` tyCoVarsOfType body_ty) $ text "Covar does not occur in the body:" <+> (ppr tcv $$ ppr body_ty) -- See GHC.Core.TyCo.Rep Note [Unused coercion variable in ForAllTy] ; return (ForAllTy (Bndr tcv' vis) body_ty') } lintType ty@(LitTy l) = do { lintTyLit l; return ty } lintType (CastTy ty co) = do { ty' <- lintType ty ; co' <- lintStarCoercion co ; let tyk = typeKind ty' cok = coercionLKind co' ; ensureEqTys tyk cok (mkCastTyErr ty co tyk cok) ; return (CastTy ty' co') } lintType (CoercionTy co) = do { co' <- lintCoercion co ; return (CoercionTy co') } ----------------- lintForAllBody :: LintedTyCoVar -> LintedType -> LintM () -- Do the checks for the body of a forall-type lintForAllBody tcv body_ty = do { checkValueType body_ty (text "the body of forall:" <+> ppr body_ty) -- For type variables, check for skolem escape -- See Note [Phantom type variables in kinds] in GHC.Core.Type -- The kind of (forall cv. th) is liftedTypeKind, so no -- need to check for skolem-escape in the CoVar case ; let body_kind = typeKind body_ty ; when (isTyVar tcv) $ case occCheckExpand [tcv] body_kind of Just {} -> return () Nothing -> failWithL $ hang (text "Variable escape in forall:") 2 (vcat [ text "tyvar:" <+> ppr tcv , text "type:" <+> ppr body_ty , text "kind:" <+> ppr body_kind ]) } ----------------- lintTySynFamApp :: Bool -> InType -> TyCon -> [InType] -> LintM LintedType -- The TyCon is a type synonym or a type family (not a data family) -- See Note [Linting type synonym applications] -- c.f. GHC.Tc.Validity.check_syn_tc_app lintTySynFamApp report_unsat ty tc tys | report_unsat -- Report unsaturated only if report_unsat is on , tys `lengthLessThan` tyConArity tc = failWithL (hang (text "Un-saturated type application") 2 (ppr ty)) -- Deal with type synonyms | ExpandsSyn tenv rhs tys' <- expandSynTyCon_maybe tc tys , let expanded_ty = mkAppTys (substTy (mkTvSubstPrs tenv) rhs) tys' = do { -- Kind-check the argument types, but without reporting -- un-saturated type families/synonyms tys' <- setReportUnsat False (mapM lintType tys) ; when report_unsat $ do { _ <- lintType expanded_ty ; return () } ; lint_ty_app ty (tyConKind tc) tys' ; return (TyConApp tc tys') } -- Otherwise this must be a type family | otherwise = do { tys' <- mapM lintType tys ; lint_ty_app ty (tyConKind tc) tys' ; return (TyConApp tc tys') } ----------------- -- Confirms that a type is really TYPE r or Constraint checkValueType :: LintedType -> SDoc -> LintM () checkValueType ty doc = lintL (isTYPEorCONSTRAINT kind) (text "Non-Type-like kind when Type-like expected:" <+> ppr kind $$ text "when checking" <+> doc) where kind = typeKind ty ----------------- lintArrow :: SDoc -> LintedType -> LintedType -> LintedType -> LintM () -- If you edit this function, you may need to update the GHC formalism -- See Note [GHC Formalism] lintArrow what t1 t2 tw -- Eg lintArrow "type or kind `blah'" k1 k2 kw -- or lintArrow "coercion `blah'" k1 k2 kw = do { unless (isTYPEorCONSTRAINT k1) (report (text "argument") k1) ; unless (isTYPEorCONSTRAINT k2) (report (text "result") k2) ; unless (isMultiplicityTy kw) (report (text "multiplicity") kw) } where k1 = typeKind t1 k2 = typeKind t2 kw = typeKind tw report ar k = addErrL (vcat [ hang (text "Ill-kinded" <+> ar) 2 (text "in" <+> what) , what <+> text "kind:" <+> ppr k ]) ----------------- lint_ty_app :: Type -> LintedKind -> [LintedType] -> LintM () lint_ty_app msg_ty k tys -- See Note [Avoiding compiler perf traps when constructing error messages.] = lint_app (\msg_ty -> text "type" <+> quotes (ppr msg_ty)) msg_ty k tys ---------------- lint_co_app :: Coercion -> LintedKind -> [LintedType] -> LintM () lint_co_app msg_ty k tys -- See Note [Avoiding compiler perf traps when constructing error messages.] = lint_app (\msg_ty -> text "coercion" <+> quotes (ppr msg_ty)) msg_ty k tys ---------------- lintTyLit :: TyLit -> LintM () lintTyLit (NumTyLit n) | n >= 0 = return () | otherwise = failWithL msg where msg = text "Negative type literal:" <+> integer n lintTyLit (StrTyLit _) = return () lintTyLit (CharTyLit _) = return () lint_app :: Outputable msg_thing => (msg_thing -> SDoc) -> msg_thing -> LintedKind -> [LintedType] -> LintM () -- (lint_app d fun_kind arg_tys) -- We have an application (f arg_ty1 .. arg_tyn), -- where f :: fun_kind -- If you edit this function, you may need to update the GHC formalism -- See Note [GHC Formalism] -- -- Being strict in the kind here avoids quite a few pointless thunks -- reducing allocations by ~5% lint_app mk_msg msg_type !kfn arg_tys = do { !in_scope <- getInScope -- We need the in_scope set to satisfy the invariant in -- Note [The substitution invariant] in GHC.Core.TyCo.Subst -- Forcing the in scope set eagerly here reduces allocations by up to 4%. ; go_app in_scope kfn arg_tys } where -- We use explicit recursion instead of a fold here to avoid go_app becoming -- an allocated function closure. This reduced allocations by up to 7% for some -- modules. go_app :: InScopeSet -> LintedKind -> [Type] -> LintM () go_app !in_scope !kfn ta | Just kfn' <- coreView kfn = go_app in_scope kfn' ta go_app _in_scope _kind [] = return () go_app in_scope fun_kind@(FunTy _ _ kfa kfb) (ta:tas) = do { let ka = typeKind ta ; unless (ka `eqType` kfa) $ addErrL (lint_app_fail_msg kfn arg_tys mk_msg msg_type (text "Fun:" <+> (ppr fun_kind $$ ppr ta <+> dcolon <+> ppr ka))) ; go_app in_scope kfb tas } go_app in_scope (ForAllTy (Bndr kv _vis) kfn) (ta:tas) = do { let kv_kind = varType kv ka = typeKind ta ; unless (ka `eqType` kv_kind) $ addErrL (lint_app_fail_msg kfn arg_tys mk_msg msg_type (text "Forall:" <+> (ppr kv $$ ppr kv_kind $$ ppr ta <+> dcolon <+> ppr ka))) ; let kind' = substTy (extendTCvSubst (mkEmptySubst in_scope) kv ta) kfn ; go_app in_scope kind' tas } go_app _ kfn ta = failWithL (lint_app_fail_msg kfn arg_tys mk_msg msg_type (text "Not a fun:" <+> (ppr kfn $$ ppr ta))) -- This is a top level definition to ensure we pass all variables of the error message -- explicitly and don't capture them as free variables. Otherwise this binder might -- become a thunk that get's allocated in the hot code path. -- See Note [Avoiding compiler perf traps when constructing error messages.] lint_app_fail_msg :: (Outputable a1, Outputable a2) => a1 -> a2 -> (t -> SDoc) -> t -> SDoc -> SDoc lint_app_fail_msg kfn arg_tys mk_msg msg_type extra = vcat [ hang (text "Kind application error in") 2 (mk_msg msg_type) , nest 2 (text "Function kind =" <+> ppr kfn) , nest 2 (text "Arg types =" <+> ppr arg_tys) , extra ] {- ********************************************************************* * * Linting rules * * ********************************************************************* -} lintCoreRule :: OutVar -> LintedType -> CoreRule -> LintM () lintCoreRule _ _ (BuiltinRule {}) = return () -- Don't bother lintCoreRule fun fun_ty rule@(Rule { ru_name = name, ru_bndrs = bndrs , ru_args = args, ru_rhs = rhs }) = lintBinders LambdaBind bndrs $ \ _ -> do { (lhs_ty, _) <- lintCoreArgs (fun_ty, zeroUE) args ; (rhs_ty, _) <- case idJoinPointHood fun of JoinPoint join_arity -> do { checkL (args `lengthIs` join_arity) $ mkBadJoinPointRuleMsg fun join_arity rule -- See Note [Rules for join points] ; lintCoreExpr rhs } _ -> markAllJoinsBad $ lintCoreExpr rhs ; ensureEqTys lhs_ty rhs_ty $ (rule_doc <+> vcat [ text "lhs type:" <+> ppr lhs_ty , text "rhs type:" <+> ppr rhs_ty , text "fun_ty:" <+> ppr fun_ty ]) ; let bad_bndrs = filter is_bad_bndr bndrs ; checkL (null bad_bndrs) (rule_doc <+> text "unbound" <+> ppr bad_bndrs) -- See Note [Linting rules] } where rule_doc = text "Rule" <+> doubleQuotes (ftext name) <> colon lhs_fvs = exprsFreeVars args rhs_fvs = exprFreeVars rhs is_bad_bndr :: Var -> Bool -- See Note [Unbound RULE binders] in GHC.Core.Rules is_bad_bndr bndr = not (bndr `elemVarSet` lhs_fvs) && bndr `elemVarSet` rhs_fvs && isNothing (isReflCoVar_maybe bndr) {- Note [Linting rules] ~~~~~~~~~~~~~~~~~~~~~~~ It's very bad if simplifying a rule means that one of the template variables (ru_bndrs) that /is/ mentioned on the RHS becomes not-mentioned in the LHS (ru_args). How can that happen? Well, in #10602, SpecConstr stupidly constructed a rule like forall x,c1,c2. f (x |> c1 |> c2) = .... But simplExpr collapses those coercions into one. (Indeed in #10602, it collapsed to the identity and was removed altogether.) We don't have a great story for what to do here, but at least this check will nail it. NB (#11643): it's possible that a variable listed in the binders becomes not-mentioned on both LHS and RHS. Here's a silly example: RULE forall x y. f (g x y) = g (x+1) (y-1) And suppose worker/wrapper decides that 'x' is Absent. Then we'll end up with RULE forall x y. f ($gw y) = $gw (x+1) This seems sufficiently obscure that there isn't enough payoff to try to trim the forall'd binder list. Note [Rules for join points] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A join point cannot be partially applied. However, the left-hand side of a rule for a join point is effectively a *pattern*, not a piece of code, so there's an argument to be made for allowing a situation like this: join $sj :: Int -> Int -> String $sj n m = ... j :: forall a. Eq a => a -> a -> String {-# RULES "SPEC j" jump j @ Int $dEq = jump $sj #-} j @a $dEq x y = ... Applying this rule can't turn a well-typed program into an ill-typed one, so conceivably we could allow it. But we can always eta-expand such an "undersaturated" rule (see 'GHC.Core.Opt.Arity.etaExpandToJoinPointRule'), and in fact the simplifier would have to in order to deal with the RHS. So we take a conservative view and don't allow undersaturated rules for join points. See Note [Join points and unfoldings/rules] in "GHC.Core.Opt.OccurAnal" for further discussion. -} {- ************************************************************************ * * Linting coercions * * ************************************************************************ -} {- Note [Asymptotic efficiency] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When linting coercions (and types actually) we return a linted (substituted) coercion. Then we often have to take the coercionKind of that returned coercion. If we get long chains, that can be asymptotically inefficient, notably in * TransCo * InstCo * SelCo (cf #9233) * LRCo But the code is simple. And this is only Lint. Let's wait to see if the bad perf bites us in practice. A solution would be to return the kind and role of the coercion, as well as the linted coercion. Or perhaps even *only* the kind and role, which is what used to happen. But that proved tricky and error prone (#17923), so now we return the coercion. -} -- lints a coercion, confirming that its lh kind and its rh kind are both * -- also ensures that the role is Nominal lintStarCoercion :: InCoercion -> LintM LintedCoercion lintStarCoercion g = do { g' <- lintCoercion g ; let Pair t1 t2 = coercionKind g' ; checkValueType t1 (text "the kind of the left type in" <+> ppr g) ; checkValueType t2 (text "the kind of the right type in" <+> ppr g) ; lintRole g Nominal (coercionRole g) ; return g' } lintCoercion :: InCoercion -> LintM LintedCoercion -- If you edit this function, you may need to update the GHC formalism -- See Note [GHC Formalism] lintCoercion (CoVarCo cv) | not (isCoVar cv) = failWithL (hang (text "Bad CoVarCo:" <+> ppr cv) 2 (text "With offending type:" <+> ppr (varType cv))) | otherwise = do { subst <- getSubst ; case lookupCoVar subst cv of Just linted_co -> return linted_co ; Nothing -> do { checkTyCoVarInScope subst cv ; return (CoVarCo cv) } } lintCoercion (Refl ty) = do { ty' <- lintType ty ; return (Refl ty') } lintCoercion (GRefl r ty MRefl) = do { ty' <- lintType ty ; return (GRefl r ty' MRefl) } lintCoercion (GRefl r ty (MCo co)) = do { ty' <- lintType ty ; co' <- lintCoercion co ; let tk = typeKind ty' tl = coercionLKind co' ; ensureEqTys tk tl $ hang (text "GRefl coercion kind mis-match:" <+> ppr co) 2 (vcat [ppr ty', ppr tk, ppr tl]) ; lintRole co' Nominal (coercionRole co') ; return (GRefl r ty' (MCo co')) } lintCoercion co@(TyConAppCo r tc cos) | Just {} <- tyConAppFunCo_maybe r tc cos = failWithL (hang (text "Saturated application of" <+> quotes (ppr tc)) 2 (ppr co)) -- All saturated TyConAppCos should be FunCos | Just {} <- synTyConDefn_maybe tc = failWithL (text "Synonym in TyConAppCo:" <+> ppr co) | otherwise = do { checkTyCon tc ; cos' <- mapM lintCoercion cos ; let (co_kinds, co_roles) = unzip (map coercionKindRole cos') ; lint_co_app co (tyConKind tc) (map pFst co_kinds) ; lint_co_app co (tyConKind tc) (map pSnd co_kinds) ; zipWithM_ (lintRole co) (tyConRoleListX r tc) co_roles ; return (TyConAppCo r tc cos') } lintCoercion co@(AppCo co1 co2) | TyConAppCo {} <- co1 = failWithL (text "TyConAppCo to the left of AppCo:" <+> ppr co) | Just (TyConApp {}, _) <- isReflCo_maybe co1 = failWithL (text "Refl (TyConApp ...) to the left of AppCo:" <+> ppr co) | otherwise = do { co1' <- lintCoercion co1 ; co2' <- lintCoercion co2 ; let (Pair lk1 rk1, r1) = coercionKindRole co1' (Pair lk2 rk2, r2) = coercionKindRole co2' ; lint_co_app co (typeKind lk1) [lk2] ; lint_co_app co (typeKind rk1) [rk2] ; if r1 == Phantom then lintL (r2 == Phantom || r2 == Nominal) (text "Second argument in AppCo cannot be R:" $$ ppr co) else lintRole co Nominal r2 ; return (AppCo co1' co2') } ---------- lintCoercion co@(ForAllCo { fco_tcv = tcv, fco_visL = visL, fco_visR = visR , fco_kind = kind_co, fco_body = body_co }) -- See Note [ForAllCo] in GHC.Core.TyCo.Rep, -- including the typing rule for ForAllCo | not (isTyCoVar tcv) = failWithL (text "Non tyco binder in ForAllCo:" <+> ppr co) | otherwise = do { kind_co' <- lintStarCoercion kind_co ; lintTyCoBndr tcv $ \tcv' -> do { body_co' <- lintCoercion body_co ; ensureEqTys (varType tcv') (coercionLKind kind_co') $ text "Kind mis-match in ForallCo" <+> ppr co -- Assuming kind_co :: k1 ~ k2 -- Need to check that -- (forall (tcv:k1). lty) and -- (forall (tcv:k2). rty[(tcv:k2) |> sym kind_co/tcv]) -- are both well formed. Easiest way is to call lintForAllBody -- for each; there is actually no need to do the funky substitution ; let (Pair lty rty, body_role) = coercionKindRole body_co' ; lintForAllBody tcv' lty ; lintForAllBody tcv' rty ; when (isCoVar tcv) $ do { lintL (visL == coreTyLamForAllTyFlag && visR == coreTyLamForAllTyFlag) $ text "Invalid visibility flags in CoVar ForAllCo" <+> ppr co -- See (FC7) in Note [ForAllCo] in GHC.Core.TyCo.Rep ; lintL (almostDevoidCoVarOfCo tcv body_co) $ text "Covar can only appear in Refl and GRefl: " <+> ppr co -- See (FC6) in Note [ForAllCo] in GHC.Core.TyCo.Rep } ; when (body_role == Nominal) $ lintL (visL `eqForAllVis` visR) $ text "Nominal ForAllCo has mismatched visibilities: " <+> ppr co ; return (co { fco_tcv = tcv', fco_kind = kind_co', fco_body = body_co' }) } } lintCoercion co@(FunCo { fco_role = r, fco_afl = afl, fco_afr = afr , fco_mult = cow, fco_arg = co1, fco_res = co2 }) = do { co1' <- lintCoercion co1 ; co2' <- lintCoercion co2 ; cow' <- lintCoercion cow ; let Pair lt1 rt1 = coercionKind co1 Pair lt2 rt2 = coercionKind co2 Pair ltw rtw = coercionKind cow ; lintL (afl == chooseFunTyFlag lt1 lt2) (bad_co_msg "afl") ; lintL (afr == chooseFunTyFlag rt1 rt2) (bad_co_msg "afr") ; lintArrow (bad_co_msg "arrowl") lt1 lt2 ltw ; lintArrow (bad_co_msg "arrowr") rt1 rt2 rtw ; lintRole co1 r (coercionRole co1) ; lintRole co2 r (coercionRole co2) ; ensureEqTys (typeKind ltw) multiplicityTy (bad_co_msg "mult-l") ; ensureEqTys (typeKind rtw) multiplicityTy (bad_co_msg "mult-r") ; let expected_mult_role = case r of Phantom -> Phantom _ -> Nominal ; lintRole cow expected_mult_role (coercionRole cow) ; return (co { fco_mult = cow', fco_arg = co1', fco_res = co2' }) } where bad_co_msg s = hang (text "Bad coercion" <+> parens (text s)) 2 (vcat [ text "afl:" <+> ppr afl , text "afr:" <+> ppr afr , text "arg_co:" <+> ppr co1 , text "res_co:" <+> ppr co2 ]) -- See Note [Bad unsafe coercion] lintCoercion co@(UnivCo { uco_role = r, uco_prov = prov , uco_lty = ty1, uco_rty = ty2, uco_deps = deps }) = do { -- Check the role. PhantomProv must have Phantom role, otherwise any role is fine case prov of PhantomProv -> lintRole co Phantom r _ -> return () -- Check the to and from types ; ty1' <- lintType ty1 ; ty2' <- lintType ty2 ; let k1 = typeKind ty1' k2 = typeKind ty2' ; when (r /= Phantom && isTYPEorCONSTRAINT k1 && isTYPEorCONSTRAINT k2) (checkTypes ty1 ty2) -- Check the coercions on which this UnivCo depends ; deps' <- mapM lintCoercion deps ; return (co { uco_lty = ty1', uco_rty = ty2', uco_deps = deps' }) } where report s = hang (text $ "Unsafe coercion: " ++ s) 2 (vcat [ text "From:" <+> ppr ty1 , text " To:" <+> ppr ty2]) isUnBoxed :: PrimRep -> Bool isUnBoxed = not . isGcPtrRep -- see #9122 for discussion of these checks checkTypes t1 t2 = do { checkWarnL fixed_rep_1 (report "left-hand type does not have a fixed runtime representation") ; checkWarnL fixed_rep_2 (report "right-hand type does not have a fixed runtime representation") ; when (fixed_rep_1 && fixed_rep_2) $ do { checkWarnL (reps1 `equalLength` reps2) (report "between values with different # of reps") ; zipWithM_ validateCoercion reps1 reps2 }} where fixed_rep_1 = typeHasFixedRuntimeRep t1 fixed_rep_2 = typeHasFixedRuntimeRep t2 -- don't look at these unless lev_poly1/2 are False -- Otherwise, we get #13458 reps1 = typePrimRep t1 reps2 = typePrimRep t2 validateCoercion :: PrimRep -> PrimRep -> LintM () validateCoercion rep1 rep2 = do { platform <- getPlatform ; checkWarnL (isUnBoxed rep1 == isUnBoxed rep2) (report "between unboxed and boxed value") ; checkWarnL (TyCon.primRepSizeB platform rep1 == TyCon.primRepSizeB platform rep2) (report "between unboxed values of different size") ; let fl = liftM2 (==) (TyCon.primRepIsFloat rep1) (TyCon.primRepIsFloat rep2) ; case fl of Nothing -> addWarnL (report "between vector types") Just False -> addWarnL (report "between float and integral values") _ -> return () } lintCoercion (SymCo co) = do { co' <- lintCoercion co ; return (SymCo co') } lintCoercion co@(TransCo co1 co2) = do { co1' <- lintCoercion co1 ; co2' <- lintCoercion co2 ; let ty1b = coercionRKind co1' ty2a = coercionLKind co2' ; ensureEqTys ty1b ty2a (hang (text "Trans coercion mis-match:" <+> ppr co) 2 (vcat [ppr (coercionKind co1'), ppr (coercionKind co2')])) ; lintRole co (coercionRole co1) (coercionRole co2) ; return (TransCo co1' co2') } lintCoercion the_co@(SelCo cs co) = do { co' <- lintCoercion co ; let (Pair s t, co_role) = coercionKindRole co' ; if -- forall (both TyVar and CoVar) | Just _ <- splitForAllTyCoVar_maybe s , Just _ <- splitForAllTyCoVar_maybe t , SelForAll <- cs , (isForAllTy_ty s && isForAllTy_ty t) || (isForAllTy_co s && isForAllTy_co t) -> return (SelCo cs co') -- function | isFunTy s , isFunTy t , SelFun {} <- cs -> return (SelCo cs co') -- TyCon | Just (tc_s, tys_s) <- splitTyConApp_maybe s , Just (tc_t, tys_t) <- splitTyConApp_maybe t , tc_s == tc_t , SelTyCon n r0 <- cs , isInjectiveTyCon tc_s co_role -- see Note [SelCo and newtypes] in GHC.Core.TyCo.Rep , tys_s `equalLength` tys_t , tys_s `lengthExceeds` n -> do { lintRole the_co (tyConRole co_role tc_s n) r0 ; return (SelCo cs co') } | otherwise -> failWithL (hang (text "Bad SelCo:") 2 (ppr the_co $$ ppr s $$ ppr t)) } lintCoercion the_co@(LRCo lr co) = do { co' <- lintCoercion co ; let Pair s t = coercionKind co' r = coercionRole co' ; lintRole co Nominal r ; case (splitAppTy_maybe s, splitAppTy_maybe t) of (Just _, Just _) -> return (LRCo lr co') _ -> failWithL (hang (text "Bad LRCo:") 2 (ppr the_co $$ ppr s $$ ppr t)) } lintCoercion (InstCo co arg) = do { co' <- lintCoercion co ; arg' <- lintCoercion arg ; let Pair t1 t2 = coercionKind co' Pair s1 s2 = coercionKind arg' ; lintRole arg Nominal (coercionRole arg') ; case (splitForAllTyVar_maybe t1, splitForAllTyVar_maybe t2) of -- forall over tvar { (Just (tv1,_), Just (tv2,_)) | typeKind s1 `eqType` tyVarKind tv1 , typeKind s2 `eqType` tyVarKind tv2 -> return (InstCo co' arg') | otherwise -> failWithL (text "Kind mis-match in inst coercion1" <+> ppr co) ; _ -> case (splitForAllCoVar_maybe t1, splitForAllCoVar_maybe t2) of -- forall over covar { (Just (cv1, _), Just (cv2, _)) | typeKind s1 `eqType` varType cv1 , typeKind s2 `eqType` varType cv2 , CoercionTy _ <- s1 , CoercionTy _ <- s2 -> return (InstCo co' arg') | otherwise -> failWithL (text "Kind mis-match in inst coercion2" <+> ppr co) ; _ -> failWithL (text "Bad argument of inst") }}} lintCoercion this_co@(AxiomCo ax cos) = do { cos' <- mapM lintCoercion cos ; let arg_kinds :: [Pair Type] = map coercionKind cos' ; lint_roles 0 (coAxiomRuleArgRoles ax) cos' ; lint_ax ax arg_kinds ; return (AxiomCo ax cos') } where lint_ax :: CoAxiomRule -> [Pair Type] -> LintM () lint_ax (BuiltInFamRew bif) prs = checkL (isJust (bifrw_proves bif prs)) bad_bif lint_ax (BuiltInFamInj bif) prs = checkL (case prs of [pr] -> isJust (bifinj_proves bif pr) _ -> False) bad_bif lint_ax (UnbranchedAxiom ax) prs = lintBranch this_co (coAxiomTyCon ax) (coAxiomSingleBranch ax) prs lint_ax (BranchedAxiom ax ind) prs = do { checkL (0 <= ind && ind < numBranches (coAxiomBranches ax)) (bad_ax this_co (text "index out of range")) ; lintBranch this_co (coAxiomTyCon ax) (coAxiomNthBranch ax ind) prs } bad_bif = bad_ax this_co (text "Proves returns Nothing") err :: forall a. String -> [SDoc] -> LintM a err m xs = failWithL $ hang (text m) 2 $ vcat (text "Rule:" <+> ppr ax : xs) lint_roles n (e : es) (co : cos) | e == coercionRole co = lint_roles (n+1) es cos | otherwise = err "Argument roles mismatch" [ text "In argument:" <+> int (n+1) , text "Expected:" <+> ppr e , text "Found:" <+> ppr (coercionRole co) ] lint_roles _ [] [] = return () lint_roles n [] rs = err "Too many coercion arguments" [ text "Expected:" <+> int n , text "Provided:" <+> int (n + length rs) ] lint_roles n es [] = err "Not enough coercion arguments" [ text "Expected:" <+> int (n + length es) , text "Provided:" <+> int n ] lintCoercion (KindCo co) = do { co' <- lintCoercion co ; return (KindCo co') } lintCoercion (SubCo co') = do { co' <- lintCoercion co' ; lintRole co' Nominal (coercionRole co') ; return (SubCo co') } lintCoercion (HoleCo h) = do { addErrL $ text "Unfilled coercion hole:" <+> ppr h ; lintCoercion (CoVarCo (coHoleCoVar h)) } {- Note [Conflict checking for axiom applications] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider the following type family and axiom: type family Equal (a :: k) (b :: k) :: Bool type instance where Equal a a = True Equal a b = False -- Equal :: forall k::*. k -> k -> Bool axEqual :: { forall k::*. forall a::k. Equal k a a ~ True ; forall k::*. forall a::k. forall b::k. Equal k a b ~ False } The coercion (axEqual[1] <*> ) :: (Equal * Int Int ~ False) and that all is OK. But, all is not OK: we want to use the first branch of the axiom in this case, not the second. The problem is that the parameters of the first branch can unify with the supplied coercions, thus meaning that the first branch should be taken. See also Note [Apartness] in "GHC.Core.FamInstEnv". For more details, see the section "Branched axiom conflict checking" in docs/core-spec, which defines the corresponding no_conflict function used by the Co_AxiomInstCo rule in the section "Coercion typing". -} -- | Check to make sure that an axiom application is internally consistent. -- Returns the conflicting branch, if it exists -- Note [Conflict checking for axiom applications] lintBranch :: Coercion -> TyCon-> CoAxBranch -> [Pair Type] -> LintM () -- defined here to avoid dependencies in GHC.Core.Coercion -- If you edit this function, you may need to update the GHC formalism -- See Note [GHC Formalism] in GHC.Core.Lint lintBranch this_co fam_tc branch arg_kinds | CoAxBranch { cab_tvs = ktvs, cab_cvs = cvs } <- branch = do { checkL (arg_kinds `equalLength` (ktvs ++ cvs)) $ (bad_ax this_co (text "lengths")) ; subst <- getSubst ; let empty_subst = zapSubst subst ; _ <- foldlM check_ki (empty_subst, empty_subst) (zip (ktvs ++ cvs) arg_kinds) ; case check_no_conflict flattened_target incomps of Nothing -> return () Just bad_branch -> failWithL $ bad_ax this_co $ text "inconsistent with" <+> pprCoAxBranch fam_tc bad_branch } where check_ki (subst_l, subst_r) (ktv, Pair s' t') = do { let sk' = typeKind s' tk' = typeKind t' ; let ktv_kind_l = substTy subst_l (tyVarKind ktv) ktv_kind_r = substTy subst_r (tyVarKind ktv) ; checkL (sk' `eqType` ktv_kind_l) (bad_ax this_co (text "check_ki1" <+> vcat [ ppr this_co, ppr sk', ppr ktv, ppr ktv_kind_l ] )) ; checkL (tk' `eqType` ktv_kind_r) (bad_ax this_co (text "check_ki2" <+> vcat [ ppr this_co, ppr tk', ppr ktv, ppr ktv_kind_r ] )) ; return (extendTCvSubst subst_l ktv s', extendTCvSubst subst_r ktv t') } tvs = coAxBranchTyVars branch cvs = coAxBranchCoVars branch incomps = coAxBranchIncomps branch (tys, cotys) = splitAtList tvs (map pFst arg_kinds) co_args = map stripCoercionTy cotys subst = zipTvSubst tvs tys `composeTCvSubst` zipCvSubst cvs co_args target = Type.substTys subst (coAxBranchLHS branch) in_scope = mkInScopeSet $ unionVarSets (map (tyCoVarsOfTypes . coAxBranchLHS) incomps) flattened_target = flattenTys in_scope target check_no_conflict :: [Type] -> [CoAxBranch] -> Maybe CoAxBranch check_no_conflict _ [] = Nothing check_no_conflict flat (b@CoAxBranch { cab_lhs = lhs_incomp } : rest) -- See Note [Apartness] in GHC.Core.FamInstEnv | SurelyApart <- tcUnifyTysFG alwaysBindFun flat lhs_incomp = check_no_conflict flat rest | otherwise = Just b bad_ax :: Coercion -> SDoc -> SDoc bad_ax this_co what = hang (text "Bad axiom application" <+> parens what) 2 (ppr this_co) {- ************************************************************************ * * Axioms * * ************************************************************************ -} lintAxioms :: Logger -> LintConfig -> SDoc -- ^ The source of the linted axioms -> [CoAxiom Branched] -> IO () lintAxioms logger cfg what axioms = displayLintResults logger True what (vcat $ map pprCoAxiom axioms) $ initL cfg $ do { mapM_ lint_axiom axioms ; let axiom_groups = groupWith coAxiomTyCon axioms ; mapM_ lint_axiom_group axiom_groups } lint_axiom :: CoAxiom Branched -> LintM () lint_axiom ax@(CoAxiom { co_ax_tc = tc, co_ax_branches = branches , co_ax_role = ax_role }) = addLoc (InAxiom ax) $ do { mapM_ (lint_branch tc) branch_list ; extra_checks } where branch_list = fromBranches branches extra_checks | isNewTyCon tc = do { CoAxBranch { cab_tvs = ax_tvs , cab_eta_tvs = eta_tvs , cab_cvs = cvs , cab_roles = roles , cab_lhs = lhs_tys } <- case branch_list of [branch] -> return branch _ -> failWithL (text "multi-branch axiom with newtype") -- The LHS of the axiom is (N lhs_tys) -- We expect it to be (N ax_tvs) ; lintL (mkTyVarTys ax_tvs `eqTypes` lhs_tys) (text "Newtype axiom LHS does not match newtype definition") ; lintL (null cvs) (text "Newtype axiom binds coercion variables") ; lintL (null eta_tvs) -- See Note [Eta reduction for data families] -- which is not about newtype axioms (text "Newtype axiom has eta-tvs") ; lintL (ax_role == Representational) (text "Newtype axiom role not representational") ; lintL (roles `equalLength` ax_tvs) (text "Newtype axiom roles list is the wrong length." $$ text "roles:" <+> sep (map ppr roles)) ; lintL (roles == takeList roles (tyConRoles tc)) (vcat [ text "Newtype axiom roles do not match newtype tycon's." , text "axiom roles:" <+> sep (map ppr roles) , text "tycon roles:" <+> sep (map ppr (tyConRoles tc)) ]) } | isFamilyTyCon tc = do { if | isTypeFamilyTyCon tc -> lintL (ax_role == Nominal) (text "type family axiom is not nominal") | isDataFamilyTyCon tc -> lintL (ax_role == Representational) (text "data family axiom is not representational") | otherwise -> addErrL (text "A family TyCon is neither a type family nor a data family:" <+> ppr tc) ; mapM_ (lint_family_branch tc) branch_list } | otherwise = addErrL (text "Axiom tycon is neither a newtype nor a family.") lint_branch :: TyCon -> CoAxBranch -> LintM () lint_branch ax_tc (CoAxBranch { cab_tvs = tvs, cab_cvs = cvs , cab_lhs = lhs_args, cab_rhs = rhs }) = lintBinders LambdaBind (tvs ++ cvs) $ \_ -> do { let lhs = mkTyConApp ax_tc lhs_args ; lhs' <- lintType lhs ; rhs' <- lintType rhs ; let lhs_kind = typeKind lhs' rhs_kind = typeKind rhs' ; lintL (not (lhs_kind `typesAreApart` rhs_kind)) $ hang (text "Inhomogeneous axiom") 2 (text "lhs:" <+> ppr lhs <+> dcolon <+> ppr lhs_kind $$ text "rhs:" <+> ppr rhs <+> dcolon <+> ppr rhs_kind) } -- Type and Constraint are not Apart, so this test allows -- the newtype axiom for a single-method class. Indeed the -- whole reason Type and Constraint are not Apart is to allow -- such axioms! -- these checks do not apply to newtype axioms lint_family_branch :: TyCon -> CoAxBranch -> LintM () lint_family_branch fam_tc br@(CoAxBranch { cab_tvs = tvs , cab_eta_tvs = eta_tvs , cab_cvs = cvs , cab_roles = roles , cab_lhs = lhs , cab_incomps = incomps }) = do { lintL (isDataFamilyTyCon fam_tc || null eta_tvs) (text "Type family axiom has eta-tvs") ; lintL (all (`elemVarSet` tyCoVarsOfTypes lhs) tvs) (text "Quantified variable in family axiom unused in LHS") ; lintL (all isTyFamFree lhs) (text "Type family application on LHS of family axiom") ; lintL (all (== Nominal) roles) (text "Non-nominal role in family axiom" $$ text "roles:" <+> sep (map ppr roles)) ; lintL (null cvs) (text "Coercion variables bound in family axiom") ; forM_ incomps $ \ br' -> lintL (not (compatibleBranches br br')) $ hang (text "Incorrect incompatible branches:") 2 (vcat [text "Branch:" <+> ppr br, text "Bogus incomp:" <+> ppr br']) } lint_axiom_group :: NonEmpty (CoAxiom Branched) -> LintM () lint_axiom_group (_ :| []) = return () lint_axiom_group (ax :| axs) = do { lintL (isOpenFamilyTyCon tc) (text "Non-open-family with multiple axioms") ; let all_pairs = [ (ax1, ax2) | ax1 <- all_axs , ax2 <- all_axs ] ; mapM_ (lint_axiom_pair tc) all_pairs } where all_axs = ax : axs tc = coAxiomTyCon ax lint_axiom_pair :: TyCon -> (CoAxiom Branched, CoAxiom Branched) -> LintM () lint_axiom_pair tc (ax1, ax2) | Just br1@(CoAxBranch { cab_tvs = tvs1 , cab_lhs = lhs1 , cab_rhs = rhs1 }) <- coAxiomSingleBranch_maybe ax1 , Just br2@(CoAxBranch { cab_tvs = tvs2 , cab_lhs = lhs2 , cab_rhs = rhs2 }) <- coAxiomSingleBranch_maybe ax2 = lintL (compatibleBranches br1 br2) $ vcat [ hsep [ text "Axioms", ppr ax1, text "and", ppr ax2 , text "are incompatible" ] , text "tvs1 =" <+> pprTyVars tvs1 , text "lhs1 =" <+> ppr (mkTyConApp tc lhs1) , text "rhs1 =" <+> ppr rhs1 , text "tvs2 =" <+> pprTyVars tvs2 , text "lhs2 =" <+> ppr (mkTyConApp tc lhs2) , text "rhs2 =" <+> ppr rhs2 ] | otherwise = addErrL (text "Open type family axiom has more than one branch: either" <+> ppr ax1 <+> text "or" <+> ppr ax2) {- ************************************************************************ * * \subsection[lint-monad]{The Lint monad} * * ************************************************************************ -} -- If you edit this type, you may need to update the GHC formalism -- See Note [GHC Formalism] data LintEnv = LE { le_flags :: LintFlags -- Linting the result of this pass , le_loc :: [LintLocInfo] -- Locations , le_subst :: Subst -- Current TyCo substitution -- See Note [Linting type lets] -- /Only/ substitutes for type variables; -- but might clone CoVars -- We also use le_subst to keep track of -- in-scope TyVars and CoVars (but not Ids) -- Range of the Subst is LintedType/LintedCo , le_ids :: VarEnv (Id, LintedType) -- In-scope Ids -- Used to check that occurrences have an enclosing binder. -- The Id is /pre-substitution/, used to check that -- the occurrence has an identical type to the binder -- The LintedType is used to return the type of the occurrence, -- without having to lint it again. , le_joins :: IdSet -- Join points in scope that are valid -- A subset of the InScopeSet in le_subst -- See Note [Join points] , le_ue_aliases :: NameEnv UsageEnv -- Assigns usage environments to the -- alias-like binders, as found in -- non-recursive lets. , le_platform :: Platform -- ^ Target platform , le_diagOpts :: DiagOpts -- ^ Target platform } data LintFlags = LF { lf_check_global_ids :: Bool -- See Note [Checking for global Ids] , lf_check_inline_loop_breakers :: Bool -- See Note [Checking for INLINE loop breakers] , lf_check_static_ptrs :: StaticPtrCheck -- ^ See Note [Checking StaticPtrs] , lf_report_unsat_syns :: Bool -- ^ See Note [Linting type synonym applications] , lf_check_linearity :: Bool -- ^ See Note [Linting linearity] , lf_check_fixed_rep :: Bool -- See Note [Checking for representation polymorphism] } -- See Note [Checking StaticPtrs] data StaticPtrCheck = AllowAnywhere -- ^ Allow 'makeStatic' to occur anywhere. | AllowAtTopLevel -- ^ Allow 'makeStatic' calls at the top-level only. | RejectEverywhere -- ^ Reject any 'makeStatic' occurrence. deriving Eq newtype LintM a = LintM' { unLintM :: LintEnv -> WarnsAndErrs -> -- Warning and error messages so far LResult a } -- Result and messages (if any) pattern LintM :: (LintEnv -> WarnsAndErrs -> LResult a) -> LintM a -- See Note [The one-shot state monad trick] in GHC.Utils.Monad pattern LintM m <- LintM' m where LintM m = LintM' (oneShot $ \env -> oneShot $ \we -> m env we) -- LintM m = LintM' (oneShot $ oneShot m) {-# COMPLETE LintM #-} instance Functor (LintM) where fmap f (LintM m) = LintM $ \e w -> mapLResult f (m e w) type WarnsAndErrs = (Bag SDoc, Bag SDoc) -- Using a unboxed tuple here reduced allocations for a lint heavy -- file by ~6%. Using MaybeUB reduced them further by another ~12%. type LResult a = (# MaybeUB a, WarnsAndErrs #) pattern LResult :: MaybeUB a -> WarnsAndErrs -> LResult a pattern LResult m w = (# m, w #) {-# COMPLETE LResult #-} mapLResult :: (a1 -> a2) -> LResult a1 -> LResult a2 mapLResult f (LResult r w) = LResult (fmapMaybeUB f r) w -- Just for testing. fromBoxedLResult :: (Maybe a, WarnsAndErrs) -> LResult a fromBoxedLResult (Just x, errs) = LResult (JustUB x) errs fromBoxedLResult (Nothing,errs) = LResult NothingUB errs {- Note [Checking for global Ids] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Before CoreTidy, all locally-bound Ids must be LocalIds, even top-level ones. See Note [Exported LocalIds] and #9857. Note [Checking StaticPtrs] ~~~~~~~~~~~~~~~~~~~~~~~~~~ See Note [Grand plan for static forms] in GHC.Iface.Tidy.StaticPtrTable for an overview. Every occurrence of the function 'makeStatic' should be moved to the top level by the FloatOut pass. It's vital that we don't have nested 'makeStatic' occurrences after CorePrep, because we populate the Static Pointer Table from the top-level bindings. See SimplCore Note [Grand plan for static forms]. The linter checks that no occurrence is left behind, nested within an expression. The check is enabled only after the FloatOut, CorePrep, and CoreTidy passes and only if the module uses the StaticPointers language extension. Checking more often doesn't help since the condition doesn't hold until after the first FloatOut pass. Note [Type substitution] ~~~~~~~~~~~~~~~~~~~~~~~~ Why do we need a type substitution? Consider /\(a:*). \(x:a). /\(a:*). id a x This is ill typed, because (renaming variables) it is really /\(a:*). \(x:a). /\(b:*). id b x Hence, when checking an application, we can't naively compare x's type (at its binding site) with its expected type (at a use site). So we rename type binders as we go, maintaining a substitution. The same substitution also supports let-type, current expressed as (/\(a:*). body) ty Here we substitute 'ty' for 'a' in 'body', on the fly. Note [Linting type synonym applications] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When linting a type-synonym, or type-family, application S ty1 .. tyn we behave as follows (#15057, #T15664): * If lf_report_unsat_syns = True, and S has arity < n, complain about an unsaturated type synonym or type family * Switch off lf_report_unsat_syns, and lint ty1 .. tyn. Reason: catch out of scope variables or other ill-kinded gubbins, even if S discards that argument entirely. E.g. (#15012): type FakeOut a = Int type family TF a type instance TF Int = FakeOut a Here 'a' is out of scope; but if we expand FakeOut, we conceal that out-of-scope error. Reason for switching off lf_report_unsat_syns: with LiberalTypeSynonyms, GHC allows unsaturated synonyms provided they are saturated when the type is expanded. Example type T f = f Int type S a = a -> a type Z = T S In Z's RHS, S appears unsaturated, but it is saturated when T is expanded. * If lf_report_unsat_syns is on, expand the synonym application and lint the result. Reason: want to check that synonyms are saturated when the type is expanded. Note [Linting linearity] ~~~~~~~~~~~~~~~~~~~~~~~~ Lint ignores linearity unless `-dlinear-core-lint` is set. For why, see below. But first, "ignore linearity" specifically means two things. When ignoring linearity: * In `ensureEqTypes`, use `eqTypeIgnoringMultiplicity` * In `ensureSubMult`, do nothing But why make `-dcore-lint` ignore linearity? Because optimisation passes are not (yet) guaranteed to maintain linearity. They should do so semantically (GHC is careful not to duplicate computation) but it is much harder to ensure that the statically-checkable constraints of Linear Core are maintained. The current Linear Core is described in the wiki at: https://gitlab.haskell.org/ghc/ghc/-/wikis/linear-types/implementation. Here are some examples of how the optimiser can break linearity checking. Other examples are documented in the linear-type implementation wiki page [https://gitlab.haskell.org/ghc/ghc/-/wikis/linear-types/implementation#core-to-core-passes] * EXAMPLE 1: the binder swap transformation Consider data T = MkT {-# UNPACK #-} !Int The wrapper for MkT is $wMkT :: Int %1 -> T $wMkT n = case %1 n of I# n' -> MkT n' This introduces, in particular, a `case %1` (this is not actual Haskell or Core syntax), where the `%1` means that the `case` expression consumes its scrutinee linearly. Now, `case %1` interacts with the binder swap optimisation in a non-trivial way. Take a slightly modified version of the code for $wMkT: case %1 x of z { I# n' -> (x, n') } Binder-swap changes this to case %1 x of z { I# n' -> let x = z in (x, n') } This is rejected by `-dlinear-core-lint` because 1/ n' must be used linearly 2/ `-dlinear-core-lint` recognises a use of `z` as a use of `n'`. So it sees two uses of n' where there should be a single one. * EXAMPLE 2: letrec Some optimisations can create a letrec which uses a variable linearly, e.g. letrec f True = f False f False = x in f True uses 'x' linearly, but this is not seen by the linter, which considers, conservatively, that a letrec always has multiplicity Many (in particular that every captured free variable must have multiplicity Many). This issue is discussed in ticket #18694. * EXAMPLE 3: rewrite rules Ignoring linearity means in particular that `a -> b` and `a %1 -> b` must be treated the same by rewrite rules (see also Note [Rewrite rules ignore multiplicities in FunTy] in GHC.Core.Unify). Consider m :: Bool -> A m' :: (Bool -> Bool) -> A {- RULES "ex" forall f. m (f True) = m' f -} f :: Bool %1 -> A x = m (f True) The rule "ex" must match . So the linter must accept `m' f`. * EXAMPLE 4: eta-reduction Eta-expansion can change linear functions into unrestricted functions f :: A %1 -> B g :: A %Many -> B g = \x -> f x Eta-reduction undoes this and produces: g :: A %Many -> B g = f Historical note: In the original linear-types implementation, we had tried to make every optimisation pass produce code that passes `-dlinear-core-lint`. It had proved very difficult. We kept finding corner case after corner case. Furthermore, to attempt to achieve that goal we ended up restricting transformations when `-dlinear-core-lint` couldn't typecheck the result. In the future, we may be able to lint the linearity of the output of Core-to-Core passes (#19165). But this shouldn't be done at the expense of producing efficient code. Therefore we lay the following principle. PRINCIPLE: The type system bends to the optimisation, not the other way around. There is a useful discussion at https://gitlab.haskell.org/ghc/ghc/-/issues/22123 Note [Linting representation-polymorphic builtins] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ As described in Note [Representation-polymorphism checking built-ins], on top of the two main representation-polymorphism invariants described in the Note [Representation polymorphism invariants], we must perform additional representation-polymorphism checks on builtin functions which don't have a binding, for example to ensure that we don't run afoul of the representation-polymorphism invariants when eta-expanding. There are two situations: 1. Builtins which have skolem type variables which must be instantiated to concrete types, such as the RuntimeRep type argument r to the catch# primop. 2. Representation-polymorphic unlifted newtypes, which must always be instantiated at a fixed runtime representation. For 1, consider for example 'coerce': coerce :: forall {r} (a :: TYPE r) (b :: TYPE r). Coercible a b => a -> b We store in the IdDetails of the coerce Id that the first binder, r, must always be instantiated to a concrete type. We thus check this in Core Lint: whenever we see an application of the form coerce @{rep1} ... we ensure that 'rep1' is concrete. This is done in the function "checkRepPolyBuiltinApp". Moreover, not instantiating these type variables at all is also an error, as we would again not be able to perform eta-expansion. (This is a bit more theoretical, as in user programs the typechecker will insert these type applications when instantiating, but it can still arise when constructing Core expressions). For 2, whenever we have an unlifted newtype such as type RR :: Type -> RuntimeRep type family RR a type F :: forall (a :: Type) -> TYPE (RR a) type family F a type N :: forall (a :: Type) -> TYPE (RR a) newtype N a = MkN (F a) and an unsaturated occurrence MkN @ty -- NB: no value argument! we check that the (instantiated) argument type has a fixed runtime representation. This is done in the function "checkRepPolyNewtypeApp". -} instance Applicative LintM where pure x = LintM $ \ _ errs -> LResult (JustUB x) errs --(Just x, errs) (<*>) = ap instance Monad LintM where m >>= k = LintM (\ env errs -> let res = unLintM m env errs in case res of LResult (JustUB r) errs' -> unLintM (k r) env errs' LResult NothingUB errs' -> LResult NothingUB errs' ) -- LError errs'-> LError errs') -- let (res, errs') = unLintM m env errs in -- Just r -> unLintM (k r) env errs' -- Nothing -> (Nothing, errs')) instance MonadFail LintM where fail err = failWithL (text err) getPlatform :: LintM Platform getPlatform = LintM (\ e errs -> (LResult (JustUB $ le_platform e) errs)) data LintLocInfo = RhsOf Id -- The variable bound | OccOf Id -- Occurrence of id | LambdaBodyOf Id -- The lambda-binder | RuleOf Id -- Rules attached to a binder | UnfoldingOf Id -- Unfolding of a binder | BodyOfLet Id -- The let-bound variable | BodyOfLetRec [Id] -- The binders of the let | CaseAlt CoreAlt -- Case alternative | CasePat CoreAlt -- The *pattern* of the case alternative | CaseTy CoreExpr -- The type field of a case expression -- with this scrutinee | IdTy Id -- The type field of an Id binder | AnExpr CoreExpr -- Some expression | ImportedUnfolding SrcLoc -- Some imported unfolding (ToDo: say which) | TopLevelBindings | InType Type -- Inside a type | InCo Coercion -- Inside a coercion | InAxiom (CoAxiom Branched) -- Inside a CoAxiom data LintConfig = LintConfig { l_diagOpts :: !DiagOpts -- ^ Diagnostics opts , l_platform :: !Platform -- ^ Target platform , l_flags :: !LintFlags -- ^ Linting the result of this pass , l_vars :: ![Var] -- ^ 'Id's that should be treated as being in scope } initL :: LintConfig -> LintM a -- ^ Action to run -> WarnsAndErrs initL cfg m = case unLintM m env (emptyBag, emptyBag) of LResult (JustUB _) errs -> errs LResult NothingUB errs@(_, e) | not (isEmptyBag e) -> errs | otherwise -> pprPanic ("Bug in Lint: a failure occurred " ++ "without reporting an error message") empty where (tcvs, ids) = partition isTyCoVar $ l_vars cfg env = LE { le_flags = l_flags cfg , le_subst = mkEmptySubst (mkInScopeSetList tcvs) , le_ids = mkVarEnv [(id, (id,idType id)) | id <- ids] , le_joins = emptyVarSet , le_loc = [] , le_ue_aliases = emptyNameEnv , le_platform = l_platform cfg , le_diagOpts = l_diagOpts cfg } setReportUnsat :: Bool -> LintM a -> LintM a -- Switch off lf_report_unsat_syns setReportUnsat ru thing_inside = LintM $ \ env errs -> let env' = env { le_flags = (le_flags env) { lf_report_unsat_syns = ru } } in unLintM thing_inside env' errs -- See Note [Checking for representation polymorphism] noFixedRuntimeRepChecks :: LintM a -> LintM a noFixedRuntimeRepChecks thing_inside = LintM $ \env errs -> let env' = env { le_flags = (le_flags env) { lf_check_fixed_rep = False } } in unLintM thing_inside env' errs getLintFlags :: LintM LintFlags getLintFlags = LintM $ \ env errs -> fromBoxedLResult (Just (le_flags env), errs) checkL :: Bool -> SDoc -> LintM () checkL True _ = return () checkL False msg = failWithL msg -- like checkL, but relevant to type checking lintL :: Bool -> SDoc -> LintM () lintL = checkL checkWarnL :: Bool -> SDoc -> LintM () checkWarnL True _ = return () checkWarnL False msg = addWarnL msg failWithL :: SDoc -> LintM a failWithL msg = LintM $ \ env (warns,errs) -> fromBoxedLResult (Nothing, (warns, addMsg True env errs msg)) addErrL :: SDoc -> LintM () addErrL msg = LintM $ \ env (warns,errs) -> fromBoxedLResult (Just (), (warns, addMsg True env errs msg)) addWarnL :: SDoc -> LintM () addWarnL msg = LintM $ \ env (warns,errs) -> fromBoxedLResult (Just (), (addMsg False env warns msg, errs)) addMsg :: Bool -> LintEnv -> Bag SDoc -> SDoc -> Bag SDoc addMsg is_error env msgs msg = assertPpr (notNull loc_msgs) msg $ msgs `snocBag` mk_msg msg where loc_msgs :: [(SrcLoc, SDoc)] -- Innermost first loc_msgs = map dumpLoc (le_loc env) cxt_doc = vcat [ vcat $ reverse $ map snd loc_msgs , text "Substitution:" <+> ppr (le_subst env) ] context | is_error = cxt_doc | otherwise = whenPprDebug cxt_doc -- Print voluminous info for Lint errors -- but not for warnings msg_span = case [ span | (loc,_) <- loc_msgs , let span = srcLocSpan loc , isGoodSrcSpan span ] of [] -> noSrcSpan (s:_) -> s !diag_opts = le_diagOpts env mk_msg msg = mkLocMessage (mkMCDiagnostic diag_opts WarningWithoutFlag Nothing) msg_span (msg $$ context) addLoc :: LintLocInfo -> LintM a -> LintM a addLoc extra_loc m = LintM $ \ env errs -> unLintM m (env { le_loc = extra_loc : le_loc env }) errs inCasePat :: LintM Bool -- A slight hack; see the unique call site inCasePat = LintM $ \ env errs -> fromBoxedLResult (Just (is_case_pat env), errs) where is_case_pat (LE { le_loc = CasePat {} : _ }) = True is_case_pat _other = False addInScopeId :: Id -> LintedType -> LintM a -> LintM a addInScopeId id linted_ty m = LintM $ \ env@(LE { le_ids = id_set, le_joins = join_set, le_ue_aliases = aliases }) errs -> unLintM m (env { le_ids = extendVarEnv id_set id (id, linted_ty) , le_joins = add_joins join_set , le_ue_aliases = delFromNameEnv aliases (idName id) }) errs -- When shadowing an alias, we need to make sure the Id is no longer -- classified as such. E.g. in -- let x = in case x of x { _DEFAULT -> } -- Occurrences of 'x' in e2 shouldn't count as occurrences of e1. where add_joins join_set | isJoinId id = extendVarSet join_set id -- Overwrite with new arity | otherwise = delVarSet join_set id -- Remove any existing binding getInScopeIds :: LintM (VarEnv (Id,LintedType)) getInScopeIds = LintM (\env errs -> fromBoxedLResult (Just (le_ids env), errs)) extendTvSubstL :: TyVar -> Type -> LintM a -> LintM a extendTvSubstL tv ty m = LintM $ \ env errs -> unLintM m (env { le_subst = Type.extendTvSubst (le_subst env) tv ty }) errs updateSubst :: Subst -> LintM a -> LintM a updateSubst subst' m = LintM $ \ env errs -> unLintM m (env { le_subst = subst' }) errs markAllJoinsBad :: LintM a -> LintM a markAllJoinsBad m = LintM $ \ env errs -> unLintM m (env { le_joins = emptyVarSet }) errs markAllJoinsBadIf :: Bool -> LintM a -> LintM a markAllJoinsBadIf True m = markAllJoinsBad m markAllJoinsBadIf False m = m getValidJoins :: LintM IdSet getValidJoins = LintM (\ env errs -> fromBoxedLResult (Just (le_joins env), errs)) getSubst :: LintM Subst getSubst = LintM (\ env errs -> fromBoxedLResult (Just (le_subst env), errs)) getUEAliases :: LintM (NameEnv UsageEnv) getUEAliases = LintM (\ env errs -> fromBoxedLResult (Just (le_ue_aliases env), errs)) getInScope :: LintM InScopeSet getInScope = LintM (\ env errs -> fromBoxedLResult (Just (getSubstInScope $ le_subst env), errs)) lookupIdInScope :: Id -> LintM (Id, LintedType) lookupIdInScope id_occ = do { in_scope_ids <- getInScopeIds ; case lookupVarEnv in_scope_ids id_occ of Just (id_bndr, linted_ty) -> do { checkL (not (bad_global id_bndr)) $ global_in_scope id_bndr ; return (id_bndr, linted_ty) } Nothing -> do { checkL (not is_local) local_out_of_scope ; return (id_occ, idType id_occ) } } -- We don't bother to lint the type -- of global (i.e. imported) Ids where is_local = mustHaveLocalBinding id_occ local_out_of_scope = text "Out of scope:" <+> pprBndr LetBind id_occ global_in_scope id_bndr = hang (text "Occurrence is GlobalId, but binding is LocalId") 2 $ vcat [hang (text "occurrence:") 2 $ pprBndr LetBind id_occ ,hang (text "binder :") 2 $ pprBndr LetBind id_bndr ] bad_global id_bnd = isGlobalId id_occ && isLocalId id_bnd && not (isWiredIn id_occ) -- 'bad_global' checks for the case where an /occurrence/ is -- a GlobalId, but there is an enclosing binding fora a LocalId. -- NB: the in-scope variables are mostly LocalIds, checked by lintIdBndr, -- but GHCi adds GlobalIds from the interactive context. These -- are fine; hence the test (isLocalId id == isLocalId v) -- NB: when compiling Control.Exception.Base, things like absentError -- are defined locally, but appear in expressions as (global) -- wired-in Ids after worker/wrapper -- So we simply disable the test in this case lookupJoinId :: Id -> LintM JoinPointHood -- Look up an Id which should be a join point, valid here -- If so, return its arity, if not return Nothing lookupJoinId id = do { join_set <- getValidJoins ; case lookupVarSet join_set id of Just id' -> return (idJoinPointHood id') Nothing -> return NotJoinPoint } addAliasUE :: Id -> UsageEnv -> LintM a -> LintM a addAliasUE id ue thing_inside = LintM $ \ env errs -> let new_ue_aliases = extendNameEnv (le_ue_aliases env) (getName id) ue in unLintM thing_inside (env { le_ue_aliases = new_ue_aliases }) errs varCallSiteUsage :: Id -> LintM UsageEnv varCallSiteUsage id = do m <- getUEAliases return $ case lookupNameEnv m (getName id) of Nothing -> singleUsageUE id Just id_ue -> id_ue ensureEqTys :: LintedType -> LintedType -> SDoc -> LintM () -- check ty2 is subtype of ty1 (ie, has same structure but usage -- annotations need only be consistent, not equal) -- Assumes ty1,ty2 are have already had the substitution applied {-# INLINE ensureEqTys #-} -- See Note [INLINE ensureEqTys] ensureEqTys ty1 ty2 msg = do { flags <- getLintFlags ; lintL (eq_type flags ty1 ty2) msg } eq_type :: LintFlags -> Type -> Type -> Bool -- When `-dlinear-core-lint` is off, then consider `a -> b` and `a %1 -> b` to -- be equal. See Note [Linting linearity]. eq_type flags ty1 ty2 | lf_check_linearity flags = eqType ty1 ty2 | otherwise = eqTypeIgnoringMultiplicity ty1 ty2 {- Note [INLINE ensureEqTys] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To make Lint fast, we want to avoid allocating a thunk for in ensureEqTypes ty1 ty2 because the test almost always succeeds, and isn't needed. So we INLINE `ensureEqTys`. This actually make a difference of 1-2% when compiling programs with -dcore-lint. -} ensureSubUsage :: Usage -> Mult -> SDoc -> LintM () ensureSubUsage Bottom _ _ = return () ensureSubUsage Zero described_mult err_msg = ensureSubMult ManyTy described_mult err_msg ensureSubUsage (MUsage m) described_mult err_msg = ensureSubMult m described_mult err_msg ensureSubMult :: Mult -> Mult -> SDoc -> LintM () ensureSubMult actual_mult described_mult err_msg = do flags <- getLintFlags when (lf_check_linearity flags) $ unless (deepSubMult actual_mult described_mult) $ addErrL err_msg where -- Check for submultiplicity using the following rules: -- 1. x*y <= z when x <= z and y <= z. -- This rule follows from the fact that x*y = sup{x,y} for any -- multiplicities x,y. -- 2. x <= y*z when x <= y or x <= z. -- This rule is not complete: when x = y*z, we cannot -- change y*z <= y*z to y*z <= y or y*z <= z. -- However, we eliminate products on the LHS in step 1. -- 3. One <= x and x <= Many for any x, as checked by 'submult'. -- 4. x <= x. -- Otherwise, we fail. deepSubMult :: Mult -> Mult -> Bool deepSubMult m n | Just (m1, m2) <- isMultMul m = deepSubMult m1 n && deepSubMult m2 n | Just (n1, n2) <- isMultMul n = deepSubMult m n1 || deepSubMult m n2 | Submult <- m `submult` n = True | otherwise = m `eqType` n lintRole :: Outputable thing => thing -- where the role appeared -> Role -- expected -> Role -- actual -> LintM () lintRole co r1 r2 = lintL (r1 == r2) (text "Role incompatibility: expected" <+> ppr r1 <> comma <+> text "got" <+> ppr r2 $$ text "in" <+> ppr co) {- ************************************************************************ * * \subsection{Error messages} * * ************************************************************************ -} dumpLoc :: LintLocInfo -> (SrcLoc, SDoc) dumpLoc (RhsOf v) = (getSrcLoc v, text "In the RHS of" <+> pp_binders [v]) dumpLoc (OccOf v) = (getSrcLoc v, text "In an occurrence of" <+> pp_binder v) dumpLoc (LambdaBodyOf b) = (getSrcLoc b, text "In the body of lambda with binder" <+> pp_binder b) dumpLoc (RuleOf b) = (getSrcLoc b, text "In a rule attached to" <+> pp_binder b) dumpLoc (UnfoldingOf b) = (getSrcLoc b, text "In the unfolding of" <+> pp_binder b) dumpLoc (BodyOfLet b) = (noSrcLoc, text "In the body of a let with binder" <+> pp_binder b) dumpLoc (BodyOfLetRec []) = (noSrcLoc, text "In body of a letrec with no binders") dumpLoc (BodyOfLetRec bs@(b:_)) = ( getSrcLoc b, text "In the body of a letrec with binders" <+> pp_binders bs) dumpLoc (AnExpr e) = (noSrcLoc, text "In the expression:" <+> ppr e) dumpLoc (CaseAlt (Alt con args _)) = (noSrcLoc, text "In a case alternative:" <+> parens (ppr con <+> pp_binders args)) dumpLoc (CasePat (Alt con args _)) = (noSrcLoc, text "In the pattern of a case alternative:" <+> parens (ppr con <+> pp_binders args)) dumpLoc (CaseTy scrut) = (noSrcLoc, hang (text "In the result-type of a case with scrutinee:") 2 (ppr scrut)) dumpLoc (IdTy b) = (getSrcLoc b, text "In the type of a binder:" <+> ppr b) dumpLoc (ImportedUnfolding locn) = (locn, text "In an imported unfolding") dumpLoc TopLevelBindings = (noSrcLoc, Outputable.empty) dumpLoc (InType ty) = (noSrcLoc, text "In the type" <+> quotes (ppr ty)) dumpLoc (InCo co) = (noSrcLoc, text "In the coercion" <+> quotes (ppr co)) dumpLoc (InAxiom ax) = (getSrcLoc ax, hang (text "In the coercion axiom") 2 (pprCoAxiom ax)) pp_binders :: [Var] -> SDoc pp_binders bs = sep (punctuate comma (map pp_binder bs)) pp_binder :: Var -> SDoc pp_binder b | isId b = hsep [ppr b, dcolon, ppr (idType b)] | otherwise = hsep [ppr b, dcolon, ppr (tyVarKind b)] ------------------------------------------------------ -- Messages for case expressions mkDefaultArgsMsg :: [Var] -> SDoc mkDefaultArgsMsg args = hang (text "DEFAULT case with binders") 4 (ppr args) mkCaseAltMsg :: CoreExpr -> Type -> Type -> SDoc mkCaseAltMsg e ty1 ty2 = hang (text "Type of case alternatives not the same as the annotation on case:") 4 (vcat [ text "Actual type:" <+> ppr ty1, text "Annotation on case:" <+> ppr ty2, text "Alt Rhs:" <+> ppr e ]) mkScrutMsg :: Id -> Type -> Type -> Subst -> SDoc mkScrutMsg var var_ty scrut_ty subst = vcat [text "Result binder in case doesn't match scrutinee:" <+> ppr var, text "Result binder type:" <+> ppr var_ty,--(idType var), text "Scrutinee type:" <+> ppr scrut_ty, hsep [text "Current TCv subst", ppr subst]] mkNonDefltMsg, mkNonIncreasingAltsMsg :: CoreExpr -> SDoc mkNonDefltMsg e = hang (text "Case expression with DEFAULT not at the beginning") 4 (ppr e) mkNonIncreasingAltsMsg e = hang (text "Case expression with badly-ordered alternatives") 4 (ppr e) nonExhaustiveAltsMsg :: CoreExpr -> SDoc nonExhaustiveAltsMsg e = hang (text "Case expression with non-exhaustive alternatives") 4 (ppr e) mkBadConMsg :: TyCon -> DataCon -> SDoc mkBadConMsg tycon datacon = vcat [ text "In a case alternative, data constructor isn't in scrutinee type:", text "Scrutinee type constructor:" <+> ppr tycon, text "Data con:" <+> ppr datacon ] mkBadPatMsg :: Type -> Type -> SDoc mkBadPatMsg con_result_ty scrut_ty = vcat [ text "In a case alternative, pattern result type doesn't match scrutinee type:", text "Pattern result type:" <+> ppr con_result_ty, text "Scrutinee type:" <+> ppr scrut_ty ] integerScrutinisedMsg :: SDoc integerScrutinisedMsg = text "In a LitAlt, the literal is lifted (probably Integer)" mkBadAltMsg :: Type -> CoreAlt -> SDoc mkBadAltMsg scrut_ty alt = vcat [ text "Data alternative when scrutinee is not a tycon application", text "Scrutinee type:" <+> ppr scrut_ty, text "Alternative:" <+> pprCoreAlt alt ] mkNewTyDataConAltMsg :: Type -> CoreAlt -> SDoc mkNewTyDataConAltMsg scrut_ty alt = vcat [ text "Data alternative for newtype datacon", text "Scrutinee type:" <+> ppr scrut_ty, text "Alternative:" <+> pprCoreAlt alt ] ------------------------------------------------------ -- Other error messages mkAppMsg :: Type -> Type -> CoreExpr -> SDoc mkAppMsg expected_arg_ty actual_arg_ty arg = vcat [text "Argument value doesn't match argument type:", hang (text "Expected arg type:") 4 (ppr expected_arg_ty), hang (text "Actual arg type:") 4 (ppr actual_arg_ty), hang (text "Arg:") 4 (ppr arg)] mkNonFunAppMsg :: Type -> Type -> CoreExpr -> SDoc mkNonFunAppMsg fun_ty arg_ty arg = vcat [text "Non-function type in function position", hang (text "Fun type:") 4 (ppr fun_ty), hang (text "Arg type:") 4 (ppr arg_ty), hang (text "Arg:") 4 (ppr arg)] mkLetErr :: TyVar -> CoreExpr -> SDoc mkLetErr bndr rhs = vcat [text "Bad `let' binding:", hang (text "Variable:") 4 (ppr bndr <+> dcolon <+> ppr (varType bndr)), hang (text "Rhs:") 4 (ppr rhs)] mkTyAppMsg :: Type -> Type -> SDoc mkTyAppMsg ty arg_ty = vcat [text "Illegal type application:", hang (text "Function type:") 4 (ppr ty <+> dcolon <+> ppr (typeKind ty)), hang (text "Type argument:") 4 (ppr arg_ty <+> dcolon <+> ppr (typeKind arg_ty))] mkCoAppMsg :: Type -> Coercion -> SDoc mkCoAppMsg fun_ty co = vcat [ text "Illegal coercion application:" , hang (text "Function type:") 4 (ppr fun_ty) , hang (text "Coercion argument:") 4 (ppr co <+> dcolon <+> ppr (coercionType co))] emptyRec :: CoreExpr -> SDoc emptyRec e = hang (text "Empty Rec binding:") 2 (ppr e) mkRhsMsg :: Id -> SDoc -> Type -> SDoc mkRhsMsg binder what ty = vcat [hsep [text "The type of this binder doesn't match the type of its" <+> what <> colon, ppr binder], hsep [text "Binder's type:", ppr (idType binder)], hsep [text "Rhs type:", ppr ty]] badBndrTyMsg :: Id -> SDoc -> SDoc badBndrTyMsg binder what = vcat [ text "The type of this binder is" <+> what <> colon <+> ppr binder , text "Binder's type:" <+> ppr (idType binder) ] mkNonTopExportedMsg :: Id -> SDoc mkNonTopExportedMsg binder = hsep [text "Non-top-level binder is marked as exported:", ppr binder] mkNonTopExternalNameMsg :: Id -> SDoc mkNonTopExternalNameMsg binder = hsep [text "Non-top-level binder has an external name:", ppr binder] mkTopNonLitStrMsg :: Id -> SDoc mkTopNonLitStrMsg binder = hsep [text "Top-level Addr# binder has a non-literal rhs:", ppr binder] mkKindErrMsg :: TyVar -> Type -> SDoc mkKindErrMsg tyvar arg_ty = vcat [text "Kinds don't match in type application:", hang (text "Type variable:") 4 (ppr tyvar <+> dcolon <+> ppr (tyVarKind tyvar)), hang (text "Arg type:") 4 (ppr arg_ty <+> dcolon <+> ppr (typeKind arg_ty))] mkCastErr :: CoreExpr -> Coercion -> Type -> Type -> SDoc mkCastErr expr = mk_cast_err "expression" "type" (ppr expr) mkCastTyErr :: Type -> Coercion -> Kind -> Kind -> SDoc mkCastTyErr ty = mk_cast_err "type" "kind" (ppr ty) mk_cast_err :: String -- ^ What sort of casted thing this is -- (\"expression\" or \"type\"). -> String -- ^ What sort of coercion is being used -- (\"type\" or \"kind\"). -> SDoc -- ^ The thing being casted. -> Coercion -> Type -> Type -> SDoc mk_cast_err thing_str co_str pp_thing co from_ty thing_ty = vcat [from_msg <+> text "of Cast differs from" <+> co_msg <+> text "of" <+> enclosed_msg, from_msg <> colon <+> ppr from_ty, text (capitalise co_str) <+> text "of" <+> enclosed_msg <> colon <+> ppr thing_ty, text "Actual" <+> enclosed_msg <> colon <+> pp_thing, text "Coercion used in cast:" <+> ppr co ] where co_msg, from_msg, enclosed_msg :: SDoc co_msg = text co_str from_msg = text "From-" <> co_msg enclosed_msg = text "enclosed" <+> text thing_str mkBadTyVarMsg :: Var -> SDoc mkBadTyVarMsg tv = text "Non-tyvar used in TyVarTy:" <+> ppr tv <+> dcolon <+> ppr (varType tv) mkBadJoinBindMsg :: Var -> SDoc mkBadJoinBindMsg var = vcat [ text "Bad join point binding:" <+> ppr var , text "Join points can be bound only by a non-top-level let" ] mkInvalidJoinPointMsg :: Var -> Type -> SDoc mkInvalidJoinPointMsg var ty = hang (text "Join point has invalid type:") 2 (ppr var <+> dcolon <+> ppr ty) mkBadJoinArityMsg :: Var -> Int -> Int -> CoreExpr -> SDoc mkBadJoinArityMsg var ar n rhs = vcat [ text "Join point has too few lambdas", text "Join var:" <+> ppr var, text "Join arity:" <+> ppr ar, text "Number of lambdas:" <+> ppr (ar - n), text "Rhs = " <+> ppr rhs ] invalidJoinOcc :: Var -> SDoc invalidJoinOcc var = vcat [ text "Invalid occurrence of a join variable:" <+> ppr var , text "The binder is either not a join point, or not valid here" ] mkBadJumpMsg :: Var -> Int -> Int -> SDoc mkBadJumpMsg var ar nargs = vcat [ text "Join point invoked with wrong number of arguments", text "Join var:" <+> ppr var, text "Join arity:" <+> ppr ar, text "Number of arguments:" <+> int nargs ] mkInconsistentRecMsg :: [Var] -> SDoc mkInconsistentRecMsg bndrs = vcat [ text "Recursive let binders mix values and join points", text "Binders:" <+> hsep (map ppr_with_details bndrs) ] where ppr_with_details bndr = ppr bndr <> ppr (idDetails bndr) mkJoinBndrOccMismatchMsg :: Var -> JoinArity -> JoinArity -> SDoc mkJoinBndrOccMismatchMsg bndr join_arity_bndr join_arity_occ = vcat [ text "Mismatch in join point arity between binder and occurrence" , text "Var:" <+> ppr bndr , text "Arity at binding site:" <+> ppr join_arity_bndr , text "Arity at occurrence: " <+> ppr join_arity_occ ] mkBndrOccTypeMismatchMsg :: Var -> Var -> LintedType -> LintedType -> SDoc mkBndrOccTypeMismatchMsg bndr var bndr_ty var_ty = vcat [ text "Mismatch in type between binder and occurrence" , text "Binder:" <+> ppr bndr <+> dcolon <+> ppr bndr_ty , text "Occurrence:" <+> ppr var <+> dcolon <+> ppr var_ty , text " Before subst:" <+> ppr (idType var) ] mkBadJoinPointRuleMsg :: JoinId -> JoinArity -> CoreRule -> SDoc mkBadJoinPointRuleMsg bndr join_arity rule = vcat [ text "Join point has rule with wrong number of arguments" , text "Var:" <+> ppr bndr , text "Join arity:" <+> ppr join_arity , text "Rule:" <+> ppr rule ] dupVars :: [NonEmpty Var] -> SDoc dupVars vars = hang (text "Duplicate variables brought into scope") 2 (ppr (map toList vars)) dupExtVars :: [NonEmpty Name] -> SDoc dupExtVars vars = hang (text "Duplicate top-level variables with the same qualified name") 2 (ppr (map toList vars)) {- ************************************************************************ * * \subsection{Annotation Linting} * * ************************************************************************ -} -- | This checks whether a pass correctly looks through debug -- annotations (@SourceNote@). This works a bit different from other -- consistency checks: We check this by running the given task twice, -- noting all differences between the results. lintAnnots :: SDoc -> (ModGuts -> CoreM ModGuts) -> ModGuts -> CoreM ModGuts lintAnnots pname pass guts = {-# SCC "lintAnnots" #-} do -- Run the pass as we normally would dflags <- getDynFlags logger <- getLogger when (gopt Opt_DoAnnotationLinting dflags) $ liftIO $ Err.showPass logger "Annotation linting - first run" -- If appropriate re-run it without debug annotations to make sure -- that they made no difference. if gopt Opt_DoAnnotationLinting dflags then do nguts <- pass guts liftIO $ Err.showPass logger "Annotation linting - second run" nguts' <- withoutAnnots pass guts -- Finally compare the resulting bindings liftIO $ Err.showPass logger "Annotation linting - comparison" let binds = flattenBinds $ mg_binds nguts binds' = flattenBinds $ mg_binds nguts' (diffs,_) = diffBinds True (mkRnEnv2 emptyInScopeSet) binds binds' when (not (null diffs)) $ GHC.Core.Opt.Monad.putMsg $ vcat [ lint_banner "warning" pname , text "Core changes with annotations:" , withPprStyle defaultDumpStyle $ nest 2 $ vcat diffs ] return nguts else pass guts -- | Run the given pass without annotations. This means that we both -- set the debugLevel setting to 0 in the environment as well as all -- annotations from incoming modules. withoutAnnots :: (ModGuts -> CoreM ModGuts) -> ModGuts -> CoreM ModGuts withoutAnnots pass guts = do -- Remove debug flag from environment. -- TODO: supply tag here as well ? let withoutFlag = mapDynFlagsCoreM $ \(!dflags) -> dflags { debugLevel = 0 } -- Nuke existing ticks in module. -- TODO: Ticks in unfoldings. Maybe change unfolding so it removes -- them in absence of debugLevel > 0. let nukeTicks = stripTicksE (not . tickishIsCode) nukeAnnotsBind :: CoreBind -> CoreBind nukeAnnotsBind bind = case bind of Rec bs -> Rec $ map (\(b,e) -> (b, nukeTicks e)) bs NonRec b e -> NonRec b $ nukeTicks e nukeAnnotsMod mg@ModGuts{mg_binds=binds} = mg{mg_binds = map nukeAnnotsBind binds} -- Perform pass with all changes applied. Drop the simple count so it doesn't -- effect the total also dropSimplCount $ withoutFlag $ pass (nukeAnnotsMod guts) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/Lint/0000755000000000000000000000000007346545000017504 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/Lint/Interactive.hs0000644000000000000000000000341707346545000022322 0ustar0000000000000000{-# LANGUAGE ScopedTypeVariables #-} {- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1993-1998 A ``lint'' pass to check for Core correctness. See Note [Core Lint guarantee]. -} module GHC.Core.Lint.Interactive ( interactiveInScope, ) where import GHC.Prelude import GHC.Runtime.Context import GHC.Core.Coercion import GHC.Core.TyCo.FVs import GHC.Core.InstEnv ( instanceDFunId, instEnvElts ) import GHC.Types.Id import GHC.Types.TypeEnv interactiveInScope :: InteractiveContext -> [Var] -- In GHCi we may lint expressions, or bindings arising from 'deriving' -- clauses, that mention variables bound in the interactive context. -- These are Local things (see Note [Interactively-bound Ids in GHCi] in GHC.Runtime.Context). -- So we have to tell Lint about them, lest it reports them as out of scope. -- -- We do this by find local-named things that may appear free in interactive -- context. This function is pretty revolting and quite possibly not quite right. -- When we are not in GHCi, the interactive context (hsc_IC hsc_env) is empty -- so this is a (cheap) no-op. -- -- See #8215 for an example interactiveInScope ictxt = tyvars ++ ids where -- C.f. GHC.Tc.Module.setInteractiveContext, Desugar.deSugarExpr (cls_insts, _fam_insts) = ic_instances ictxt te1 = mkTypeEnvWithImplicits (ic_tythings ictxt) te = extendTypeEnvWithIds te1 (map instanceDFunId $ instEnvElts cls_insts) ids = typeEnvIds te tyvars = tyCoVarsOfTypesList $ map idType ids -- Why the type variables? How can the top level envt have free tyvars? -- I think it's because of the GHCi debugger, which can bind variables -- f :: [t] -> [t] -- where t is a RuntimeUnk (see TcType) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/Make.hs0000644000000000000000000014746407346545000020027 0ustar0000000000000000{-# OPTIONS_GHC -Wno-incomplete-uni-patterns #-} -- | Handy functions for creating much Core syntax module GHC.Core.Make ( -- * Constructing normal syntax mkCoreLet, mkCoreLets, mkCoreApp, mkCoreApps, mkCoreConApps, mkCoreLams, mkWildCase, mkIfThenElse, mkWildValBinder, mkSingleAltCase, sortQuantVars, castBottomExpr, -- * Constructing boxed literals mkLitRubbish, mkWordExpr, mkIntExpr, mkIntExprInt, mkUncheckedIntExpr, mkIntegerExpr, mkNaturalExpr, mkFloatExpr, mkDoubleExpr, mkCharExpr, mkStringExpr, mkStringExprFS, mkStringExprFSWith, MkStringIds (..), getMkStringIds, -- * Floats FloatBind(..), wrapFloat, wrapFloats, floatBindings, -- * Constructing small tuples mkCoreVarTupTy, mkCoreTup, mkCoreUnboxedTuple, mkCoreUnboxedSum, mkCoreTupBoxity, unitExpr, -- * Constructing big tuples mkChunkified, chunkify, mkBigCoreVarTup, mkBigCoreVarTupSolo, mkBigCoreVarTupTy, mkBigCoreTupTy, mkBigCoreTup, -- * Deconstructing big tuples mkBigTupleSelector, mkBigTupleSelectorSolo, mkBigTupleCase, -- * Constructing list expressions mkNilExpr, mkConsExpr, mkListExpr, mkFoldrExpr, mkBuildExpr, -- * Constructing Maybe expressions mkNothingExpr, mkJustExpr, -- * Error Ids mkRuntimeErrorApp, mkImpossibleExpr, mkAbsentErrorApp, errorIds, rEC_CON_ERROR_ID, nON_EXHAUSTIVE_GUARDS_ERROR_ID, nO_METHOD_BINDING_ERROR_ID, pAT_ERROR_ID, rEC_SEL_ERROR_ID, tYPE_ERROR_ID, aBSENT_SUM_FIELD_ERROR_ID ) where import GHC.Prelude import GHC.Platform import GHC.Types.Id import GHC.Types.Var ( setTyVarUnique, visArgConstraintLike ) import GHC.Types.TyThing import GHC.Types.Id.Info import GHC.Types.Cpr import GHC.Types.Basic( TypeOrConstraint(..) ) import GHC.Types.Demand import GHC.Types.Name hiding ( varName ) import GHC.Types.Literal import GHC.Types.Unique.Supply import GHC.Core import GHC.Core.Utils ( exprType, mkSingleAltCase, bindNonRec ) import GHC.Core.Type import GHC.Core.Predicate ( isCoVarType ) import GHC.Core.TyCo.Compare ( eqType ) import GHC.Core.Coercion ( isCoVar ) import GHC.Core.DataCon ( DataCon, dataConWorkId ) import GHC.Core.Multiplicity import GHC.Builtin.Types import GHC.Builtin.Names import GHC.Builtin.Types.Prim import GHC.Utils.Outputable import GHC.Utils.Misc import GHC.Utils.Panic import GHC.Settings.Constants( mAX_TUPLE_SIZE ) import GHC.Data.FastString import Data.List ( partition ) import Data.Char ( ord ) infixl 4 `mkCoreApp`, `mkCoreApps` {- ************************************************************************ * * \subsection{Basic GHC.Core construction} * * ************************************************************************ -} -- | Sort the variables, putting type and covars first, in scoped order, -- and then other Ids -- -- It is a deterministic sort, meaning it doesn't look at the values of -- Uniques. For explanation why it's important See Note [Unique Determinism] -- in GHC.Types.Unique. sortQuantVars :: [Var] -> [Var] sortQuantVars vs = sorted_tcvs ++ ids where (tcvs, ids) = partition (isTyVar <||> isCoVar) vs sorted_tcvs = scopedSort tcvs -- | Bind a binding group over an expression, using a @let@ or @case@ as -- appropriate (see "GHC.Core#let_can_float_invariant") mkCoreLet :: CoreBind -> CoreExpr -> CoreExpr mkCoreLet (NonRec bndr rhs) body -- See Note [Core let-can-float invariant] = bindNonRec bndr rhs body mkCoreLet bind body = Let bind body -- | Create a lambda where the given expression has a number of variables -- bound over it. The leftmost binder is that bound by the outermost -- lambda in the result mkCoreLams :: [CoreBndr] -> CoreExpr -> CoreExpr mkCoreLams = mkLams -- | Bind a list of binding groups over an expression. The leftmost binding -- group becomes the outermost group in the resulting expression mkCoreLets :: [CoreBind] -> CoreExpr -> CoreExpr mkCoreLets binds body = foldr mkCoreLet body binds -- | Construct an expression which represents the application of a number of -- expressions to that of a data constructor expression. The leftmost expression -- in the list is applied first mkCoreConApps :: DataCon -> [CoreExpr] -> CoreExpr mkCoreConApps con args = mkCoreApps (Var (dataConWorkId con)) args -- | Construct an expression which represents the application of a number of -- expressions to another. The leftmost expression in the list is applied first mkCoreApps :: CoreExpr -- ^ function -> [CoreExpr] -- ^ arguments -> CoreExpr mkCoreApps fun args = fst $ foldl' (mkCoreAppTyped doc_string) (fun, fun_ty) args where doc_string = ppr fun_ty $$ ppr fun $$ ppr args fun_ty = exprType fun -- | Construct an expression which represents the application of one expression -- to the other mkCoreApp :: SDoc -> CoreExpr -- ^ function -> CoreExpr -- ^ argument -> CoreExpr mkCoreApp s fun arg = fst $ mkCoreAppTyped s (fun, exprType fun) arg -- | Construct an expression which represents the application of one expression -- paired with its type to an argument. The result is paired with its type. This -- function is not exported and used in the definition of 'mkCoreApp' and -- 'mkCoreApps'. mkCoreAppTyped :: SDoc -> (CoreExpr, Type) -> CoreExpr -> (CoreExpr, Type) mkCoreAppTyped _ (fun, fun_ty) (Type ty) = (App fun (Type ty), piResultTy fun_ty ty) mkCoreAppTyped _ (fun, fun_ty) (Coercion co) = (App fun (Coercion co), funResultTy fun_ty) mkCoreAppTyped d (fun, fun_ty) arg = assertPpr (isFunTy fun_ty) (ppr fun $$ ppr arg $$ d) (App fun arg, funResultTy fun_ty) {- ********************************************************************* * * Building case expressions * * ********************************************************************* -} -- | Make a /wildcard binder/. This is typically used when you need a binder -- that you expect to use only at a *binding* site. Do not use it at -- occurrence sites because it has a single, fixed unique, and it's very -- easy to get into difficulties with shadowing. That's why it is used so little. -- -- See Note [WildCard binders] in "GHC.Core.Opt.Simplify.Env" mkWildValBinder :: Mult -> Type -> Id mkWildValBinder w ty = mkLocalIdOrCoVar wildCardName w ty -- "OrCoVar" since a coercion can be a scrutinee with -fdefer-type-errors -- (e.g. see test T15695). Ticket #17291 covers fixing this problem. -- | Make a case expression whose case binder is unused -- The alts and res_ty should not have any occurrences of WildId mkWildCase :: CoreExpr -- ^ scrutinee -> Scaled Type -> Type -- ^ res_ty -> [CoreAlt] -- ^ alts -> CoreExpr mkWildCase scrut (Scaled w scrut_ty) res_ty alts = Case scrut (mkWildValBinder w scrut_ty) res_ty alts mkIfThenElse :: CoreExpr -- ^ guard -> CoreExpr -- ^ then -> CoreExpr -- ^ else -> CoreExpr mkIfThenElse guard then_expr else_expr -- Not going to be refining, so okay to take the type of the "then" clause = mkWildCase guard (linear boolTy) (exprType then_expr) [ Alt (DataAlt falseDataCon) [] else_expr, -- Increasing order of tag! Alt (DataAlt trueDataCon) [] then_expr ] castBottomExpr :: CoreExpr -> Type -> CoreExpr -- (castBottomExpr e ty), assuming that 'e' diverges, -- return an expression of type 'ty' -- See Note [Empty case alternatives] in GHC.Core castBottomExpr e res_ty | e_ty `eqType` res_ty = e | otherwise = Case e (mkWildValBinder OneTy e_ty) res_ty [] where e_ty = exprType e mkLitRubbish :: Type -> Maybe CoreExpr -- Make a rubbish-literal CoreExpr of the given type. -- Fail (returning Nothing) if -- * the RuntimeRep of the Type is not monomorphic; -- * the type is (a ~# b), the type of coercion -- See INVARIANT 1 and 2 of item (2) in Note [Rubbish literals] -- in GHC.Types.Literal mkLitRubbish ty | not (noFreeVarsOfType rep) = Nothing -- Satisfy INVARIANT 1 | isCoVarType ty = Nothing -- Satisfy INVARIANT 2 | otherwise = Just (Lit (LitRubbish torc rep) `mkTyApps` [ty]) where Just (torc, rep) = sORTKind_maybe (typeKind ty) {- ************************************************************************ * * \subsection{Making literals} * * ************************************************************************ -} -- | Create a 'CoreExpr' which will evaluate to the given @Int@ mkIntExpr :: Platform -> Integer -> CoreExpr -- Result = I# i :: Int mkIntExpr platform i = mkCoreConApps intDataCon [mkIntLit platform i] -- | Create a 'CoreExpr' which will evaluate to the given @Int@. Don't check -- that the number is in the range of the target platform @Int@ mkUncheckedIntExpr :: Integer -> CoreExpr -- Result = I# i :: Int mkUncheckedIntExpr i = mkCoreConApps intDataCon [Lit (mkLitIntUnchecked i)] -- | Create a 'CoreExpr' which will evaluate to the given @Int@ mkIntExprInt :: Platform -> Int -> CoreExpr -- Result = I# i :: Int mkIntExprInt platform i = mkCoreConApps intDataCon [mkIntLit platform (fromIntegral i)] -- | Create a 'CoreExpr' which will evaluate to a @Word@ with the given value mkWordExpr :: Platform -> Integer -> CoreExpr mkWordExpr platform w = mkCoreConApps wordDataCon [mkWordLit platform w] -- | Create a 'CoreExpr' which will evaluate to the given @Integer@ mkIntegerExpr :: Platform -> Integer -> CoreExpr -- Result :: Integer mkIntegerExpr platform i | platformInIntRange platform i = mkCoreConApps integerISDataCon [mkIntLit platform i] | i < 0 = mkCoreConApps integerINDataCon [Lit (mkLitBigNat (negate i))] | otherwise = mkCoreConApps integerIPDataCon [Lit (mkLitBigNat i)] -- | Create a 'CoreExpr' which will evaluate to the given @Natural@ mkNaturalExpr :: Platform -> Integer -> CoreExpr mkNaturalExpr platform w | platformInWordRange platform w = mkCoreConApps naturalNSDataCon [mkWordLit platform w] | otherwise = mkCoreConApps naturalNBDataCon [Lit (mkLitBigNat w)] -- | Create a 'CoreExpr' which will evaluate to the given @Float@ mkFloatExpr :: Float -> CoreExpr mkFloatExpr f = mkCoreConApps floatDataCon [mkFloatLitFloat f] -- | Create a 'CoreExpr' which will evaluate to the given @Double@ mkDoubleExpr :: Double -> CoreExpr mkDoubleExpr d = mkCoreConApps doubleDataCon [mkDoubleLitDouble d] -- | Create a 'CoreExpr' which will evaluate to the given @Char@ mkCharExpr :: Char -> CoreExpr -- Result = C# c :: Int mkCharExpr c = mkCoreConApps charDataCon [mkCharLit c] -- | Create a 'CoreExpr' which will evaluate to the given @String@ mkStringExpr :: MonadThings m => String -> m CoreExpr -- Result :: String mkStringExpr str = mkStringExprFS (mkFastString str) -- | Create a 'CoreExpr' which will evaluate to a string morally equivalent to the given @FastString@ mkStringExprFS :: MonadThings m => FastString -> m CoreExpr -- Result :: String mkStringExprFS = mkStringExprFSLookup lookupId mkStringExprFSLookup :: Monad m => (Name -> m Id) -> FastString -> m CoreExpr mkStringExprFSLookup lookupM str = do mk <- getMkStringIds lookupM pure (mkStringExprFSWith mk str) getMkStringIds :: Applicative m => (Name -> m Id) -> m MkStringIds getMkStringIds lookupM = MkStringIds <$> lookupM unpackCStringName <*> lookupM unpackCStringUtf8Name data MkStringIds = MkStringIds { unpackCStringId :: !Id , unpackCStringUtf8Id :: !Id } mkStringExprFSWith :: MkStringIds -> FastString -> CoreExpr mkStringExprFSWith ids str | nullFS str = mkNilExpr charTy | all safeChar chars = let !unpack_id = unpackCStringId ids in App (Var unpack_id) lit | otherwise = let !unpack_utf8_id = unpackCStringUtf8Id ids in App (Var unpack_utf8_id) lit where chars = unpackFS str safeChar c = ord c >= 1 && ord c <= 0x7F lit = Lit (LitString (bytesFS str)) {- ************************************************************************ * * Creating tuples and their types for Core expressions * * ************************************************************************ -} {- Note [Flattening one-tuples] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This family of functions creates a tuple of variables/expressions/types. mkCoreTup [e1,e2,e3] = (e1,e2,e3) What if there is just one variable/expression/type in the argument? We could do one of two things: * Flatten it out, so that mkCoreTup [e1] = e1 * Build a one-tuple (see Note [One-tuples] in GHC.Builtin.Types) mkCoreTupSolo [e1] = Solo e1 We use a suffix "Solo" to indicate this. Usually we want the former, but occasionally the latter. NB: The logic in tupleDataCon knows about () and Solo and (,), etc. Note [Don't flatten tuples from HsSyn] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If we get an explicit 1-tuple from HsSyn somehow (likely: Template Haskell), we should treat it really as a 1-tuple, without flattening. Note that a 1-tuple and a flattened value have different performance and laziness characteristics, so should just do what we're asked. This arose from discussions in #16881. One-tuples that arise internally depend on the circumstance; often flattening is a good idea. Decisions are made on a case-by-case basis. 'mkCoreBoxedTuple` and `mkBigCoreVarTupSolo` build tuples without flattening. -} -- | Build a small tuple holding the specified expressions -- One-tuples are *not* flattened; see Note [Flattening one-tuples] -- See also Note [Don't flatten tuples from HsSyn] -- Arguments must have kind Type mkCoreBoxedTuple :: HasDebugCallStack => [CoreExpr] -> CoreExpr mkCoreBoxedTuple cs = assertPpr (all (tcIsLiftedTypeKind . typeKind . exprType) cs) (ppr cs) mkCoreConApps (tupleDataCon Boxed (length cs)) (map (Type . exprType) cs ++ cs) -- | Build a small unboxed tuple holding the specified expressions. -- Do not include the RuntimeRep specifiers; this function calculates them -- for you. -- Does /not/ flatten one-tuples; see Note [Flattening one-tuples] mkCoreUnboxedTuple :: [CoreExpr] -> CoreExpr mkCoreUnboxedTuple exps = mkCoreConApps (tupleDataCon Unboxed (length tys)) (map (Type . getRuntimeRep) tys ++ map Type tys ++ exps) where tys = map exprType exps -- | Make a core tuple of the given boxity; don't flatten 1-tuples mkCoreTupBoxity :: Boxity -> [CoreExpr] -> CoreExpr mkCoreTupBoxity Boxed exps = mkCoreBoxedTuple exps mkCoreTupBoxity Unboxed exps = mkCoreUnboxedTuple exps -- | Build the type of a small tuple that holds the specified variables -- One-tuples are flattened; see Note [Flattening one-tuples] mkCoreVarTupTy :: [Id] -> Type mkCoreVarTupTy ids = mkBoxedTupleTy (map idType ids) -- | Build a small tuple holding the specified expressions -- One-tuples are flattened; see Note [Flattening one-tuples] mkCoreTup :: [CoreExpr] -> CoreExpr mkCoreTup [c] = c mkCoreTup cs = mkCoreBoxedTuple cs -- non-1-tuples are uniform -- | Build an unboxed sum. -- -- Alternative number ("alt") starts from 1. mkCoreUnboxedSum :: Int -> Int -> [Type] -> CoreExpr -> CoreExpr mkCoreUnboxedSum arity alt tys exp = assert (length tys == arity) $ assert (alt <= arity) $ mkCoreConApps (sumDataCon alt arity) (map (Type . getRuntimeRep) tys ++ map Type tys ++ [exp]) {- Note [Big tuples] ~~~~~~~~~~~~~~~~~~~~ "Big" tuples (`mkBigCoreTup` and friends) are more general than "small" ones (`mkCoreTup` and friends) in two ways. 1. GHCs built-in tuples can only go up to 'mAX_TUPLE_SIZE' in arity, but we might conceivably want to build such a massive tuple as part of the output of a desugaring stage (notably that for list comprehensions). `mkBigCoreTup` encodes such big tuples by creating and pattern matching on /nested/ small tuples that are directly expressible by GHC. Nesting policy: it's better to have a 2-tuple of 10-tuples (3 objects) than a 10-tuple of 2-tuples (11 objects), so we want the leaves of any construction to be big. 2. When desugaring arrows we gather up a tuple of free variables, which may include dictionaries (of kind Constraint) and unboxed values. These can't live in a tuple. `mkBigCoreTup` encodes such tuples by boxing up the offending arguments: see Note [Boxing constructors] in GHC.Builtin.Types. If you just use the 'mkBigCoreTup', 'mkBigCoreVarTupTy', 'mkBigTupleSelector' and 'mkBigTupleCase' functions to do all your work with tuples you should be fine, and not have to worry about the arity limitation, or kind limitation at all. The "big" tuple operations flatten 1-tuples just like "small" tuples. But see Note [Don't flatten tuples from HsSyn] -} mkBigCoreVarTupSolo :: [Id] -> CoreExpr -- Same as mkBigCoreVarTup, but: -- - one-tuples are not flattened -- see Note [Flattening one-tuples] -- - arguments should have kind Type mkBigCoreVarTupSolo [id] = mkCoreBoxedTuple [Var id] mkBigCoreVarTupSolo ids = mkChunkified mkCoreTup (map Var ids) -- | Build a big tuple holding the specified variables -- One-tuples are flattened; see Note [Flattening one-tuples] -- Arguments don't have to have kind Type mkBigCoreVarTup :: [Id] -> CoreExpr mkBigCoreVarTup ids = mkBigCoreTup (map Var ids) -- | Build a "big" tuple holding the specified expressions -- One-tuples are flattened; see Note [Flattening one-tuples] -- Arguments don't have to have kind Type; ones that do not are boxed -- This function crashes (in wrapBox) if given a non-Type -- argument that it doesn't know how to box. mkBigCoreTup :: [CoreExpr] -> CoreExpr mkBigCoreTup exprs = mkChunkified mkCoreTup (map wrapBox exprs) -- | Build the type of a big tuple that holds the specified variables -- One-tuples are flattened; see Note [Flattening one-tuples] mkBigCoreVarTupTy :: HasDebugCallStack => [Id] -> Type mkBigCoreVarTupTy ids = mkBigCoreTupTy (map idType ids) -- | Build the type of a big tuple that holds the specified type of thing -- One-tuples are flattened; see Note [Flattening one-tuples] mkBigCoreTupTy :: HasDebugCallStack => [Type] -> Type mkBigCoreTupTy tys = mkChunkified mkBoxedTupleTy $ map boxTy tys -- | The unit expression unitExpr :: CoreExpr unitExpr = Var unitDataConId -------------------------------------------------------------- wrapBox :: CoreExpr -> CoreExpr -- ^ If (e :: ty) and (ty :: Type), wrapBox is a no-op -- But if (ty :: ki), and ki is not Type, wrapBox returns (K @ty e) -- which has kind Type -- where K is the boxing data constructor for ki -- See Note [Boxing constructors] in GHC.Builtin.Types -- Panics if there /is/ no boxing data con wrapBox e = case boxingDataCon e_ty of BI_NoBoxNeeded -> e BI_Box { bi_inst_con = boxing_expr } -> App boxing_expr e BI_NoBoxAvailable -> pprPanic "wrapBox" (ppr e $$ ppr (exprType e)) -- We should do better than panicing: #22336 where e_ty = exprType e boxTy :: HasDebugCallStack => Type -> Type -- ^ `boxTy ty` is the boxed version of `ty`. That is, -- if `e :: ty`, then `wrapBox e :: boxTy ty`. -- Note that if `ty :: Type`, `boxTy ty` just returns `ty`. -- Panics if it is not possible to box `ty`, like `wrapBox` (#22336) -- See Note [Boxing constructors] in GHC.Builtin.Types boxTy ty = case boxingDataCon ty of BI_NoBoxNeeded -> ty BI_Box { bi_boxed_type = box_ty } -> box_ty BI_NoBoxAvailable -> pprPanic "boxTy" (ppr ty) -- We should do better than panicing: #22336 unwrapBox :: UniqSupply -> Id -> CoreExpr -> (UniqSupply, Id, CoreExpr) -- If v's type required boxing (i.e it is unlifted or a constraint) -- then (unwrapBox us v body) returns -- (case box_v of MkDict v -> body) -- together with box_v -- where box_v is a fresh variable -- Otherwise unwrapBox is a no-op -- Panics if no box is available (#22336) unwrapBox us var body = case boxingDataCon var_ty of BI_NoBoxNeeded -> (us, var, body) BI_NoBoxAvailable -> pprPanic "unwrapBox" (ppr var $$ ppr var_ty) -- We should do better than panicing: #22336 BI_Box { bi_data_con = box_con, bi_boxed_type = box_ty } -> (us', var', body') where var' = mkSysLocal (fsLit "uc") uniq ManyTy box_ty body' = Case (Var var') var' (exprType body) [Alt (DataAlt box_con) [var] body] where var_ty = idType var (uniq, us') = takeUniqFromSupply us -- | Lifts a \"small\" constructor into a \"big\" constructor by recursive decomposition mkChunkified :: ([a] -> a) -- ^ \"Small\" constructor function, of maximum input arity 'mAX_TUPLE_SIZE' -> [a] -- ^ Possible \"big\" list of things to construct from -> a -- ^ Constructed thing made possible by recursive decomposition mkChunkified small_tuple as = mk_big_tuple (chunkify as) where -- Each sub-list is short enough to fit in a tuple mk_big_tuple [as] = small_tuple as mk_big_tuple as_s = mk_big_tuple (chunkify (map small_tuple as_s)) chunkify :: [a] -> [[a]] -- ^ Split a list into lists that are small enough to have a corresponding -- tuple arity. The sub-lists of the result all have length <= 'mAX_TUPLE_SIZE' -- But there may be more than 'mAX_TUPLE_SIZE' sub-lists chunkify xs | n_xs <= mAX_TUPLE_SIZE = [xs] | otherwise = split xs where n_xs = length xs split [] = [] split xs = let (as, bs) = splitAt mAX_TUPLE_SIZE xs in as : split bs {- ************************************************************************ * * \subsection{Tuple destructors} * * ************************************************************************ -} -- | Builds a selector which scrutinises the given -- expression and extracts the one name from the list given. -- If you want the no-shadowing rule to apply, the caller -- is responsible for making sure that none of these names -- are in scope. -- -- If there is just one 'Id' in the tuple, then the selector is -- just the identity. -- -- If necessary, we pattern match on a \"big\" tuple. -- -- A tuple selector is not linear in its argument. Consequently, the case -- expression built by `mkBigTupleSelector` must consume its scrutinee 'Many' -- times. And all the argument variables must have multiplicity 'Many'. mkBigTupleSelector, mkBigTupleSelectorSolo :: [Id] -- ^ The 'Id's to pattern match the tuple against -> Id -- ^ The 'Id' to select -> Id -- ^ A variable of the same type as the scrutinee -> CoreExpr -- ^ Scrutinee -> CoreExpr -- ^ Selector expression -- mkBigTupleSelector [a,b,c,d] b v e -- = case e of v { -- (p,q) -> case p of p { -- (a,b) -> b }} -- We use 'tpl' vars for the p,q, since shadowing does not matter. -- -- In fact, it's more convenient to generate it innermost first, getting -- -- case (case e of v -- (p,q) -> p) of p -- (a,b) -> b mkBigTupleSelector vars the_var scrut_var scrut = mk_tup_sel (chunkify vars) the_var where mk_tup_sel [vars] the_var = mkSmallTupleSelector vars the_var scrut_var scrut mk_tup_sel vars_s the_var = mkSmallTupleSelector group the_var tpl_v $ mk_tup_sel (chunkify tpl_vs) tpl_v where tpl_tys = [mkBoxedTupleTy (map idType gp) | gp <- vars_s] tpl_vs = mkTemplateLocals tpl_tys [(tpl_v, group)] = [(tpl,gp) | (tpl,gp) <- zipEqual "mkBigTupleSelector" tpl_vs vars_s, the_var `elem` gp ] -- ^ 'mkBigTupleSelectorSolo' is like 'mkBigTupleSelector' -- but one-tuples are NOT flattened (see Note [Flattening one-tuples]) mkBigTupleSelectorSolo vars the_var scrut_var scrut | [_] <- vars = mkSmallTupleSelector1 vars the_var scrut_var scrut | otherwise = mkBigTupleSelector vars the_var scrut_var scrut -- | `mkSmallTupleSelector` is like 'mkBigTupleSelector', but for tuples that -- are guaranteed never to be "big". Also does not unwrap boxed types. -- -- > mkSmallTupleSelector [x] x v e = [| e |] -- > mkSmallTupleSelector [x,y,z] x v e = [| case e of v { (x,y,z) -> x } |] mkSmallTupleSelector, mkSmallTupleSelector1 :: [Id] -- The tuple args -> Id -- The selected one -> Id -- A variable of the same type as the scrutinee -> CoreExpr -- Scrutinee -> CoreExpr mkSmallTupleSelector [var] should_be_the_same_var _ scrut = assert (var == should_be_the_same_var) $ scrut -- Special case for 1-tuples mkSmallTupleSelector vars the_var scrut_var scrut = mkSmallTupleSelector1 vars the_var scrut_var scrut -- ^ 'mkSmallTupleSelector1' is like 'mkSmallTupleSelector' -- but one-tuples are NOT flattened (see Note [Flattening one-tuples]) mkSmallTupleSelector1 vars the_var scrut_var scrut = assert (notNull vars) $ Case scrut scrut_var (idType the_var) [Alt (DataAlt (tupleDataCon Boxed (length vars))) vars (Var the_var)] -- | A generalization of 'mkBigTupleSelector', allowing the body -- of the case to be an arbitrary expression. -- -- To avoid shadowing, we use uniques to invent new variables. -- -- If necessary we pattern match on a "big" tuple. mkBigTupleCase :: MonadUnique m -- For inventing names of intermediate variables => [Id] -- ^ The tuple identifiers to pattern match on; -- Bring these into scope in the body -> CoreExpr -- ^ Body of the case -> CoreExpr -- ^ Scrutinee -> m CoreExpr -- ToDo: eliminate cases where none of the variables are needed. -- -- mkBigTupleCase uniqs [a,b,c,d] body v e -- = case e of v { (p,q) -> -- case p of p { (a,b) -> -- case q of q { (c,d) -> -- body }}} mkBigTupleCase vars body scrut = do us <- getUniqueSupplyM let (wrapped_us, wrapped_vars, wrapped_body) = foldr unwrap (us,[],body) vars return $ mk_tuple_case wrapped_us (chunkify wrapped_vars) wrapped_body where scrut_ty = exprType scrut unwrap var (us,vars,body) = (us', var':vars, body') where (us', var', body') = unwrapBox us var body mk_tuple_case :: UniqSupply -> [[Id]] -> CoreExpr -> CoreExpr -- mk_tuple_case [[a1..an], [b1..bm], ...] body -- case scrut of (p,q, ...) -> -- case p of (a1,..an) -> -- case q of (b1,..bm) -> -- ... -> body -- This is the case where don't need any nesting mk_tuple_case us [vars] body = mkSmallTupleCase vars body scrut_var scrut where scrut_var = case scrut of Var v -> v _ -> snd (new_var us scrut_ty) -- This is the case where we must nest tuples at least once mk_tuple_case us vars_s body = mk_tuple_case us' (chunkify vars') body' where (us', vars', body') = foldr one_tuple_case (us, [], body) vars_s one_tuple_case chunk_vars (us, vs, body) = (us', scrut_var:vs, body') where tup_ty = mkBoxedTupleTy (map idType chunk_vars) (us', scrut_var) = new_var us tup_ty body' = mkSmallTupleCase chunk_vars body scrut_var (Var scrut_var) new_var :: UniqSupply -> Type -> (UniqSupply, Id) new_var us ty = (us', id) where (uniq, us') = takeUniqFromSupply us id = mkSysLocal (fsLit "ds") uniq ManyTy ty -- | As 'mkBigTupleCase', but for a tuple that is small enough to be guaranteed -- not to need nesting. mkSmallTupleCase :: [Id] -- ^ The tuple args -> CoreExpr -- ^ Body of the case -> Id -- ^ A variable of the same type as the scrutinee -> CoreExpr -- ^ Scrutinee -> CoreExpr mkSmallTupleCase [var] body _scrut_var scrut = bindNonRec var scrut body mkSmallTupleCase vars body scrut_var scrut = Case scrut scrut_var (exprType body) [Alt (DataAlt (tupleDataCon Boxed (length vars))) vars body] {- ************************************************************************ * * Floats * * ************************************************************************ -} data FloatBind = FloatLet CoreBind | FloatCase CoreExpr Id AltCon [Var] -- case e of y { C ys -> ... } -- See Note [Floating single-alternative cases] in GHC.Core.Opt.SetLevels instance Outputable FloatBind where ppr (FloatLet b) = text "LET" <+> ppr b ppr (FloatCase e b c bs) = hang (text "CASE" <+> ppr e <+> text "of" <+> ppr b) 2 (ppr c <+> ppr bs) wrapFloat :: FloatBind -> CoreExpr -> CoreExpr wrapFloat (FloatLet defns) body = Let defns body wrapFloat (FloatCase e b con bs) body = mkSingleAltCase e b con bs body -- | Applies the floats from right to left. That is @wrapFloats [b1, b2, …, bn] -- u = let b1 in let b2 in … in let bn in u@ wrapFloats :: [FloatBind] -> CoreExpr -> CoreExpr wrapFloats floats expr = foldr wrapFloat expr floats bindBindings :: CoreBind -> [Var] bindBindings (NonRec b _) = [b] bindBindings (Rec bnds) = map fst bnds floatBindings :: FloatBind -> [Var] floatBindings (FloatLet bnd) = bindBindings bnd floatBindings (FloatCase _ b _ bs) = b:bs {- ************************************************************************ * * \subsection{Common list manipulation expressions} * * ************************************************************************ Call the constructor Ids when building explicit lists, so that they interact well with rules. -} -- | Makes a list @[]@ for lists of the specified type mkNilExpr :: Type -> CoreExpr mkNilExpr ty = mkCoreConApps nilDataCon [Type ty] -- | Makes a list @(:)@ for lists of the specified type mkConsExpr :: Type -> CoreExpr -> CoreExpr -> CoreExpr mkConsExpr ty hd tl = mkCoreConApps consDataCon [Type ty, hd, tl] -- | Make a list containing the given expressions, where the list has the given type mkListExpr :: Type -> [CoreExpr] -> CoreExpr mkListExpr ty xs = foldr (mkConsExpr ty) (mkNilExpr ty) xs -- | Make a fully applied 'foldr' expression mkFoldrExpr :: MonadThings m => Type -- ^ Element type of the list -> Type -- ^ Fold result type -> CoreExpr -- ^ "Cons" function expression for the fold -> CoreExpr -- ^ "Nil" expression for the fold -> CoreExpr -- ^ List expression being folded acress -> m CoreExpr mkFoldrExpr elt_ty result_ty c n list = do foldr_id <- lookupId foldrName return (Var foldr_id `App` Type elt_ty `App` Type result_ty `App` c `App` n `App` list) -- | Make a 'build' expression applied to a locally-bound worker function mkBuildExpr :: (MonadFail m, MonadThings m, MonadUnique m) => Type -- ^ Type of list elements to be built -> ((Id, Type) -> (Id, Type) -> m CoreExpr) -- ^ Function that, given information about the 'Id's -- of the binders for the build worker function, returns -- the body of that worker -> m CoreExpr mkBuildExpr elt_ty mk_build_inside = do n_tyvar <- newTyVar alphaTyVar let n_ty = mkTyVarTy n_tyvar c_ty = mkVisFunTysMany [elt_ty, n_ty] n_ty [c, n] <- sequence [mkSysLocalM (fsLit "c") ManyTy c_ty, mkSysLocalM (fsLit "n") ManyTy n_ty] build_inside <- mk_build_inside (c, c_ty) (n, n_ty) build_id <- lookupId buildName return $ Var build_id `App` Type elt_ty `App` mkLams [n_tyvar, c, n] build_inside where newTyVar tyvar_tmpl = do uniq <- getUniqueM return (setTyVarUnique tyvar_tmpl uniq) {- ************************************************************************ * * Manipulating Maybe data type * * ************************************************************************ -} -- | Makes a Nothing for the specified type mkNothingExpr :: Type -> CoreExpr mkNothingExpr ty = mkConApp nothingDataCon [Type ty] -- | Makes a Just from a value of the specified type mkJustExpr :: Type -> CoreExpr -> CoreExpr mkJustExpr ty val = mkConApp justDataCon [Type ty, val] {- ************************************************************************ * * Error expressions * * ************************************************************************ -} mkRuntimeErrorApp :: Id -- Should be of type -- forall (r::RuntimeRep) (a::TYPE r). Addr# -> a -- or (a :: CONSTRAINT r) -- where Addr# points to a UTF8 encoded string -> Type -- The type to instantiate 'a' -> String -- The string to print -> CoreExpr mkRuntimeErrorApp err_id res_ty err_msg = mkApps (Var err_id) [ Type (getRuntimeRep res_ty) , Type res_ty, err_string ] where err_string = Lit (mkLitString err_msg) {- ************************************************************************ * * Error Ids * * ************************************************************************ GHC randomly injects these into the code. @patError@ is just a version of @error@ for pattern-matching failures. It knows various ``codes'' which expand to longer strings---this saves space! @absentErr@ is a thing we put in for ``absent'' arguments. They jolly well shouldn't be yanked on, but if one is, then you will get a friendly message from @absentErr@ (rather than a totally random crash). -} errorIds :: [Id] errorIds = [ nON_EXHAUSTIVE_GUARDS_ERROR_ID, nO_METHOD_BINDING_ERROR_ID, pAT_ERROR_ID, rEC_CON_ERROR_ID, rEC_SEL_ERROR_ID, iMPOSSIBLE_ERROR_ID, iMPOSSIBLE_CONSTRAINT_ERROR_ID, aBSENT_ERROR_ID, aBSENT_CONSTRAINT_ERROR_ID, aBSENT_SUM_FIELD_ERROR_ID, tYPE_ERROR_ID -- Used with Opt_DeferTypeErrors, see #10284 ] recSelErrorName, recConErrorName, patErrorName :: Name nonExhaustiveGuardsErrorName, noMethodBindingErrorName :: Name typeErrorName :: Name absentSumFieldErrorName :: Name recSelErrorName = err_nm "recSelError" recSelErrorIdKey rEC_SEL_ERROR_ID recConErrorName = err_nm "recConError" recConErrorIdKey rEC_CON_ERROR_ID patErrorName = err_nm "patError" patErrorIdKey pAT_ERROR_ID typeErrorName = err_nm "typeError" typeErrorIdKey tYPE_ERROR_ID noMethodBindingErrorName = err_nm "noMethodBindingError" noMethodBindingErrorIdKey nO_METHOD_BINDING_ERROR_ID nonExhaustiveGuardsErrorName = err_nm "nonExhaustiveGuardsError" nonExhaustiveGuardsErrorIdKey nON_EXHAUSTIVE_GUARDS_ERROR_ID err_nm :: String -> Unique -> Id -> Name err_nm str uniq id = mkWiredInIdName gHC_INTERNAL_CONTROL_EXCEPTION_BASE (fsLit str) uniq id rEC_SEL_ERROR_ID, rEC_CON_ERROR_ID :: Id pAT_ERROR_ID, nO_METHOD_BINDING_ERROR_ID, nON_EXHAUSTIVE_GUARDS_ERROR_ID :: Id tYPE_ERROR_ID, aBSENT_SUM_FIELD_ERROR_ID :: Id rEC_SEL_ERROR_ID = mkRuntimeErrorId TypeLike recSelErrorName rEC_CON_ERROR_ID = mkRuntimeErrorId TypeLike recConErrorName pAT_ERROR_ID = mkRuntimeErrorId TypeLike patErrorName nO_METHOD_BINDING_ERROR_ID = mkRuntimeErrorId TypeLike noMethodBindingErrorName nON_EXHAUSTIVE_GUARDS_ERROR_ID = mkRuntimeErrorId TypeLike nonExhaustiveGuardsErrorName tYPE_ERROR_ID = mkRuntimeErrorId TypeLike typeErrorName -- Note [aBSENT_SUM_FIELD_ERROR_ID] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- Unboxed sums are transformed into unboxed tuples in GHC.Stg.Unarise.mkUbxSum -- and fields that can't be reached are filled with rubbish values. -- For instance, consider the case of the program: -- -- f :: (# Int | Float# #) -> Int -- f = ... -- -- x = f (# | 2.0## #) -- -- Unarise will represent f's unboxed sum argument as a tuple (# Int#, Int, -- Float# #), where Int# is a tag. Consequently, `x` will be rewritten to: -- -- x = f (# 2#, ???, 2.0## #) -- -- We must come up with some rubbish literal to use in place of `???`. In the -- case of unboxed integer types this is easy: we can simply use 0 for -- Int#/Word# and 0.0 Float#/Double#. -- -- However, coming up with a rubbish pointer value is more delicate as the -- value must satisfy the following requirements: -- -- 1. it needs to be a valid closure pointer for the GC (not a NULL pointer) -- -- 2. it can't take arguments because it's used in unarise and applying an -- argument would require allocating a thunk, which is both difficult to -- do and costly. -- -- 3. it shouldn't be CAFfy since this would make otherwise non-CAFfy -- bindings CAFfy, incurring a cost in GC performance. Given that unboxed -- sums are intended to be used in performance-critical code, this is to -- We work-around this by declaring the absentSumFieldError as non-CAFfy, -- as described in Note [Wired-in exceptions are not CAFfy]. -- -- Getting this wrong causes hard-to-debug runtime issues, see #15038. -- -- 4. it can't be defined in `base` package. Afterall, not all code which -- uses unboxed sums uses depends upon `base`. Specifically, this became -- an issue when we wanted to use unboxed sums in boot libraries used by -- `base`, see #17791. -- -- To fill this role we define `ghc-prim:GHC.Prim.Panic.absentSumFieldError` -- with the type: -- -- absentSumFieldError :: forall a. a -- -- Note that this type is something of a lie since Unarise may use it at an -- unlifted type. However, this lie is benign as absent sum fields are examined -- only by the GC, which does not care about levity.. -- -- When entered, this closure calls `stg_panic#`, which immediately halts -- execution and cannot be caught. This is in contrast to most other runtime -- errors, which are thrown as proper Haskell exceptions. This design is -- intentional since entering an absent sum field is an indication that -- something has gone horribly wrong, very likely due to a compiler bug. -- -- Note [Wired-in exceptions are not CAFfy] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- GHC has logic wiring-in a small number of exceptions, which may be thrown in -- generated code. Specifically, these are implemented via closures (defined -- in `GHC.Prim.Exception` in `ghc-prim`) which, when entered, raise the desired -- exception. For instance, in the case of OverflowError we have -- -- raiseOverflow :: forall a. a -- raiseOverflow = runRW# (\s -> -- case raiseOverflow# s of -- (# _, _ #) -> let x = x in x) -- -- where `raiseOverflow#` is defined in the rts/Exception.cmm. -- -- Note that `raiseOverflow` and friends, being top-level thunks, are CAFs. -- Normally, this would be reflected in their IdInfo; however, as these -- functions are widely used and CAFfyness is transitive, we very much want to -- avoid declaring them as CAFfy. This is especially true in especially in -- performance-critical code like that using unboxed sums and -- absentSumFieldError. -- -- Consequently, `mkExceptionId` instead declares the exceptions to be -- non-CAFfy and rather ensure in the RTS (in `initBuiltinGcRoots` in -- rts/RtsStartup.c) that these closures remain reachable by creating a -- StablePtr to each. Note that we are using the StablePtr mechanism not -- because we need a StablePtr# object, but rather because the stable pointer -- table is a source of GC roots. -- -- At some point we could consider removing this optimisation as it is quite -- fragile, but we do want to be careful to avoid adding undue cost. Unboxed -- sums in particular are intended to be used in performance-critical contexts. -- -- See #15038, #21141. absentSumFieldErrorName = mkWiredInIdName gHC_PRIM_PANIC (fsLit "absentSumFieldError") absentSumFieldErrorIdKey aBSENT_SUM_FIELD_ERROR_ID aBSENT_SUM_FIELD_ERROR_ID = mkExceptionId absentSumFieldErrorName -- | Exception with type \"forall a. a\" -- -- Any exceptions added via this function needs to be added to -- the RTS's initBuiltinGcRoots() function. mkExceptionId :: Name -> Id mkExceptionId name = mkVanillaGlobalWithInfo name (mkSpecForAllTys [alphaTyVar] (mkTyVarTy alphaTyVar)) -- forall a . a (divergingIdInfo [] `setCafInfo` NoCafRefs) -- See Note [Wired-in exceptions are not CAFfy] -- | An 'IdInfo' for an Id, such as 'aBSENT_ERROR_ID', that -- throws an (imprecise) exception after being supplied one value arg for every -- argument 'Demand' in the list. The demands end up in the demand signature. -- -- 1. Sets the demand signature to unleash the given arg dmds 'botDiv' -- 2. Sets the arity info so that it matches the length of arg demands -- 3. Sets a bottoming CPR sig with the correct arity -- -- It's important that all 3 agree on the arity, which is what this defn ensures. divergingIdInfo :: [Demand] -> IdInfo divergingIdInfo arg_dmds = vanillaIdInfo `setArityInfo` arity `setDmdSigInfo` mkClosedDmdSig arg_dmds botDiv `setCprSigInfo` mkCprSig arity botCpr where arity = length arg_dmds {- Note [Error and friends have an "open-tyvar" forall] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 'error' and 'undefined' have types error :: forall (v :: RuntimeRep) (a :: TYPE v). String -> a undefined :: forall (v :: RuntimeRep) (a :: TYPE v). a Notice the runtime-representation polymorphism. This ensures that "error" can be instantiated at unboxed as well as boxed types. This is OK because it never returns, so the return type is irrelevant. ************************************************************************ * * iMPOSSIBLE_ERROR_ID * * ************************************************************************ -} iMPOSSIBLE_ERROR_ID, iMPOSSIBLE_CONSTRAINT_ERROR_ID :: Id iMPOSSIBLE_ERROR_ID = mkRuntimeErrorId TypeLike impossibleErrorName iMPOSSIBLE_CONSTRAINT_ERROR_ID = mkRuntimeErrorId ConstraintLike impossibleConstraintErrorName impossibleErrorName, impossibleConstraintErrorName :: Name impossibleErrorName = err_nm "impossibleError" impossibleErrorIdKey iMPOSSIBLE_ERROR_ID impossibleConstraintErrorName = err_nm "impossibleConstraintError" impossibleConstraintErrorIdKey iMPOSSIBLE_CONSTRAINT_ERROR_ID mkImpossibleExpr :: Type -> String -> CoreExpr mkImpossibleExpr res_ty str = mkRuntimeErrorApp err_id res_ty str where -- See Note [Type vs Constraint for error ids] err_id = case typeTypeOrConstraint res_ty of TypeLike -> iMPOSSIBLE_ERROR_ID ConstraintLike -> iMPOSSIBLE_CONSTRAINT_ERROR_ID {- Note [Type vs Constraint for error ids] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We need both iMPOSSIBLE_ERROR_ID :: forall (r::RuntimeRep) (a::TYPE r). Addr# -> a iMPOSSIBLE_CONSTRAINT_ERROR_ID :: forall (r::RuntimeRep) (a::CONSTRAINT r). Addr# -> a because we don't have polymorphism over TYPE vs CONSTRAINT. You might wonder if iMPOSSIBLE_CONSTRAINT_ERROR_ID is ever needed in practice, but it is: see #22634. So: * In Control.Exception.Base we have impossibleError :: forall (a::Type). Addr# -> a impossibleConstraintError :: forall (a::Type). Addr# -> a This generates the code for `impossibleError`, but because they are wired in the interface file definitions are never looked at (indeed, they don't even get serialised). * In this module GHC.Core.Make we define /wired-in/ Ids for iMPOSSIBLE_ERROR_ID iMPOSSIBLE_CONSTRAINT_ERROR_ID with the desired above types (i.e. runtime-rep polymorphic, and returning a constraint for the latter. Much the same plan works for aBSENT_ERROR_ID and aBSENT_CONSTRAINT_ERROR_ID ************************************************************************ * * aBSENT_ERROR_ID * * ************************************************************************ Note [aBSENT_ERROR_ID] ~~~~~~~~~~~~~~~~~~~~~~ We use aBSENT_ERROR_ID to build absent fillers for lifted types in workers. E.g. f x = (case x of (a,b) -> b) + 1::Int The demand analyser figures out that only the second component of x is used, and does a w/w split thus f x = case x of (a,b) -> $wf b $wf b = let a = absentError "blah" x = (a,b) in After some simplification, the (absentError "blah") thunk normally goes away. See also Note [Absent fillers] in GHC.Core.Opt.WorkWrap.Utils. Historical Note --------------- We used to have exprIsHNF respond True to absentError and *not* mark it as diverging. Here's the reason for the former. It doesn't apply anymore because we no longer say that `a` is absent (A). Instead it gets (head strict) demand 1A and we won't emit the absent error: #14285 had, roughly data T a = MkT a !a {-# INLINABLE f #-} f x = case x of MkT a b -> g (MkT b a) It turned out that g didn't use the second component, and hence f doesn't use the first. But the stable-unfolding for f looks like \x. case x of MkT a b -> g ($WMkT b a) where $WMkT is the wrapper for MkT that evaluates its arguments. We apply the same w/w split to this unfolding (see Note [Worker/wrapper for INLINABLE functions] in GHC.Core.Opt.WorkWrap) so the template ends up like \b. let a = absentError "blah" x = MkT a b in case x of MkT a b -> g ($WMkT b a) After doing case-of-known-constructor, and expanding $WMkT we get \b -> g (case absentError "blah" of a -> MkT b a) Yikes! That bogusly appears to evaluate the absentError! This is extremely tiresome. Another way to think of this is that, in Core, it is an invariant that a strict data constructor, like MkT, must be applied only to an argument in HNF. So (absentError "blah") had better be non-bottom. So the "solution" is to add a special case for absentError to exprIsHNFlike. This allows Simplify.rebuildCase, in the Note [Case to let transformation] branch, to convert the case on absentError into a let. We also make absentError *not* be diverging, unlike the other error-ids, so that we can be sure not to remove the case branches before converting the case to a let. If, by some bug or bizarre happenstance, we ever call absentError, we should throw an exception. This should never happen, of course, but we definitely can't return anything. e.g. if somehow we had case absentError "foo" of Nothing -> ... Just x -> ... then if we return, the case expression will select a field and continue. Seg fault city. Better to throw an exception. (Even though we've said it is in HNF :-) It might seem a bit surprising that seq on absentError is simply erased absentError "foo" `seq` x ==> x but that should be okay; since there's no pattern match we can't really be relying on anything from it. -} -- We need two absentError Ids: -- absentError :: forall (a :: Type). Addr# -> a -- absentConstraintError :: forall (a :: Constraint). Addr# -> a -- We don't have polymorphism over TypeOrConstraint! -- mkAbsentErrorApp chooses which one to use, based on the kind -- See Note [Type vs Constraint for error ids] mkAbsentErrorApp :: Type -- The type to instantiate 'a' -> String -- The string to print -> CoreExpr mkAbsentErrorApp res_ty err_msg = mkApps (Var err_id) [ Type res_ty, err_string ] where err_id = case typeTypeOrConstraint res_ty of TypeLike -> aBSENT_ERROR_ID ConstraintLike -> aBSENT_CONSTRAINT_ERROR_ID err_string = Lit (mkLitString err_msg) absentErrorName, absentConstraintErrorName :: Name absentErrorName = mkWiredInIdName gHC_PRIM_PANIC (fsLit "absentError") absentErrorIdKey aBSENT_ERROR_ID absentConstraintErrorName -- See Note [Type vs Constraint for error ids] = mkWiredInIdName gHC_PRIM_PANIC (fsLit "absentConstraintError") absentConstraintErrorIdKey aBSENT_CONSTRAINT_ERROR_ID aBSENT_ERROR_ID, aBSENT_CONSTRAINT_ERROR_ID :: Id aBSENT_ERROR_ID -- See Note [aBSENT_ERROR_ID] = mk_runtime_error_id absentErrorName absent_ty where -- absentError :: forall (a :: Type). Addr# -> a absent_ty = mkSpecForAllTys [alphaTyVar] $ mkVisFunTyMany addrPrimTy (mkTyVarTy alphaTyVar) -- Not runtime-rep polymorphic. aBSENT_ERROR_ID is only used for -- lifted-type things; see Note [Absent fillers] in GHC.Core.Opt.WorkWrap.Utils aBSENT_CONSTRAINT_ERROR_ID -- See Note [aBSENT_ERROR_ID] = mk_runtime_error_id absentConstraintErrorName absent_ty -- See Note [Type vs Constraint for error ids] where -- absentConstraintError :: forall (a :: Constraint). Addr# -> a absent_ty = mkSpecForAllTys [alphaConstraintTyVar] $ mkFunTy visArgConstraintLike ManyTy addrPrimTy (mkTyVarTy alphaConstraintTyVar) {- ************************************************************************ * * mkRuntimeErrorId * * ************************************************************************ -} mkRuntimeErrorId :: TypeOrConstraint -> Name -> Id -- Error function -- with type: forall (r::RuntimeRep) (a::TYPE r). Addr# -> a -- with arity: 1 -- which diverges after being given one argument -- The Addr# is expected to be the address of -- a UTF8-encoded error string mkRuntimeErrorId torc name = mk_runtime_error_id name (mkRuntimeErrorTy torc) mk_runtime_error_id :: Name -> Type -> Id mk_runtime_error_id name ty = mkVanillaGlobalWithInfo name ty (divergingIdInfo [evalDmd]) -- Do *not* mark them as NoCafRefs, because they can indeed have -- CAF refs. For example, pAT_ERROR_ID calls GHC.Err.untangle, -- which has some CAFs -- In due course we may arrange that these error-y things are -- regarded by the GC as permanently live, in which case we -- can give them NoCaf info. As it is, any function that calls -- any pc_bottoming_Id will itself have CafRefs, which bloats -- SRTs. mkRuntimeErrorTy :: TypeOrConstraint -> Type -- forall (rr :: RuntimeRep) (a :: rr). Addr# -> a -- See Note [Error and friends have an "open-tyvar" forall] mkRuntimeErrorTy torc = mkSpecForAllTys [runtimeRep1TyVar, tyvar] $ mkFunctionType ManyTy addrPrimTy (mkTyVarTy tyvar) where (tyvar:_) = mkTemplateTyVars [kind] kind = case torc of TypeLike -> mkTYPEapp runtimeRep1Ty ConstraintLike -> mkCONSTRAINTapp runtimeRep1Ty ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/Map/0000755000000000000000000000000007346545000017313 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/Map/Expr.hs0000644000000000000000000004276407346545000020602 0ustar0000000000000000{-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE RankNTypes #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE UndecidableInstances #-} {- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 -} {-# OPTIONS_GHC -Wno-orphans #-} -- Eq (DeBruijn CoreExpr) and Eq (DeBruijn CoreAlt) module GHC.Core.Map.Expr ( -- * Maps over Core expressions CoreMap, emptyCoreMap, extendCoreMap, lookupCoreMap, foldCoreMap, -- * Alpha equality eqDeBruijnExpr, eqCoreExpr, -- * 'TrieMap' class reexports TrieMap(..), insertTM, deleteTM, lkDFreeVar, xtDFreeVar, lkDNamed, xtDNamed, (>.>), (|>), (|>>), ) where import GHC.Prelude import GHC.Data.TrieMap import GHC.Core.Map.Type import GHC.Core import GHC.Core.Type import GHC.Types.Tickish import GHC.Types.Var import GHC.Utils.Misc import GHC.Utils.Outputable import qualified Data.Map as Map import GHC.Types.Name.Env import Control.Monad( (>=>) ) import GHC.Types.Literal (Literal) {- This module implements TrieMaps over Core related data structures like CoreExpr or Type. It is built on the Tries from the TrieMap module. The code is very regular and boilerplate-like, but there is some neat handling of *binders*. In effect they are deBruijn numbered on the fly. -} ---------------------- -- Recall that -- Control.Monad.(>=>) :: (a -> Maybe b) -> (b -> Maybe c) -> a -> Maybe c -- The CoreMap makes heavy use of GenMap. However the CoreMap Types are not -- known when defining GenMap so we can only specialize them here. {-# SPECIALIZE lkG :: Key CoreMapX -> CoreMapG a -> Maybe a #-} {-# SPECIALIZE xtG :: Key CoreMapX -> XT a -> CoreMapG a -> CoreMapG a #-} {-# SPECIALIZE mapG :: (a -> b) -> CoreMapG a -> CoreMapG b #-} {-# SPECIALIZE fdG :: (a -> b -> b) -> CoreMapG a -> b -> b #-} {- ************************************************************************ * * CoreMap * * ************************************************************************ -} {- Note [Binders] ~~~~~~~~~~~~~~ * In general we check binders as late as possible because types are less likely to differ than expression structure. That's why cm_lam :: CoreMapG (TypeMapG a) rather than cm_lam :: TypeMapG (CoreMapG a) * We don't need to look at the type of some binders, notably - the case binder in (Case _ b _ _) - the binders in an alternative because they are totally fixed by the context Note [Empty case alternatives] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * For a key (Case e b ty (alt:alts)) we don't need to look the return type 'ty', because every alternative has that type. * For a key (Case e b ty []) we MUST look at the return type 'ty', because otherwise (Case (error () "urk") _ Int []) would compare equal to (Case (error () "urk") _ Bool []) which is utterly wrong (#6097) We could compare the return type regardless, but the wildly common case is that it's unnecessary, so we have two fields (cm_case and cm_ecase) for the two possibilities. Only cm_ecase looks at the type. See also Note [Empty case alternatives] in GHC.Core. -} -- | @CoreMap a@ is a map from 'CoreExpr' to @a@. If you are a client, this -- is the type you want. newtype CoreMap a = CoreMap (CoreMapG a) -- TODO(22292): derive instance Functor CoreMap where fmap f = \ (CoreMap m) -> CoreMap (fmap f m) {-# INLINE fmap #-} instance TrieMap CoreMap where type Key CoreMap = CoreExpr emptyTM = CoreMap emptyTM lookupTM k (CoreMap m) = lookupTM (deBruijnize k) m alterTM k f (CoreMap m) = CoreMap (alterTM (deBruijnize k) f m) foldTM k (CoreMap m) = foldTM k m filterTM f (CoreMap m) = CoreMap (filterTM f m) -- | @CoreMapG a@ is a map from @DeBruijn CoreExpr@ to @a@. The extended -- key makes it suitable for recursive traversal, since it can track binders, -- but it is strictly internal to this module. If you are including a 'CoreMap' -- inside another 'TrieMap', this is the type you want. type CoreMapG = GenMap CoreMapX type LiteralMap a = Map.Map Literal a -- | @CoreMapX a@ is the base map from @DeBruijn CoreExpr@ to @a@, but without -- the 'GenMap' optimization. data CoreMapX a = CM { cm_var :: VarMap a , cm_lit :: LiteralMap a , cm_co :: CoercionMapG a , cm_type :: TypeMapG a , cm_cast :: CoreMapG (CoercionMapG a) , cm_tick :: CoreMapG (TickishMap a) , cm_app :: CoreMapG (CoreMapG a) , cm_lam :: CoreMapG (BndrMap a) -- Note [Binders] , cm_letn :: CoreMapG (CoreMapG (BndrMap a)) , cm_letr :: ListMap CoreMapG (CoreMapG (ListMap BndrMap a)) , cm_case :: CoreMapG (ListMap AltMap a) , cm_ecase :: CoreMapG (TypeMapG a) -- Note [Empty case alternatives] } instance Eq (DeBruijn CoreExpr) where (==) = eqDeBruijnExpr eqDeBruijnExpr :: DeBruijn CoreExpr -> DeBruijn CoreExpr -> Bool eqDeBruijnExpr (D env1 e1) (D env2 e2) = go e1 e2 where go (Var v1) (Var v2) = eqDeBruijnVar (D env1 v1) (D env2 v2) go (Lit lit1) (Lit lit2) = lit1 == lit2 go (Type t1) (Type t2) = eqDeBruijnType (D env1 t1) (D env2 t2) -- See Note [Alpha-equality for Coercion arguments] go (Coercion {}) (Coercion {}) = True go (Cast e1 co1) (Cast e2 co2) = D env1 co1 == D env2 co2 && go e1 e2 go (App f1 a1) (App f2 a2) = go f1 f2 && go a1 a2 go (Tick n1 e1) (Tick n2 e2) = eqDeBruijnTickish (D env1 n1) (D env2 n2) && go e1 e2 go (Lam b1 e1) (Lam b2 e2) = eqDeBruijnType (D env1 (varType b1)) (D env2 (varType b2)) && D env1 (varMultMaybe b1) == D env2 (varMultMaybe b2) && eqDeBruijnExpr (D (extendCME env1 b1) e1) (D (extendCME env2 b2) e2) go (Let (NonRec v1 r1) e1) (Let (NonRec v2 r2) e2) = go r1 r2 -- See Note [Alpha-equality for let-bindings] && eqDeBruijnExpr (D (extendCME env1 v1) e1) (D (extendCME env2 v2) e2) go (Let (Rec ps1) e1) (Let (Rec ps2) e2) = equalLength ps1 ps2 -- See Note [Alpha-equality for let-bindings] && all2 (\b1 b2 -> eqDeBruijnType (D env1 (varType b1)) (D env2 (varType b2))) bs1 bs2 && D env1' rs1 == D env2' rs2 && eqDeBruijnExpr (D env1' e1) (D env2' e2) where (bs1,rs1) = unzip ps1 (bs2,rs2) = unzip ps2 env1' = extendCMEs env1 bs1 env2' = extendCMEs env2 bs2 go (Case e1 b1 t1 a1) (Case e2 b2 t2 a2) | null a1 -- See Note [Empty case alternatives] = null a2 && go e1 e2 && D env1 t1 == D env2 t2 | otherwise = go e1 e2 && D (extendCME env1 b1) a1 == D (extendCME env2 b2) a2 go _ _ = False eqDeBruijnTickish :: DeBruijn CoreTickish -> DeBruijn CoreTickish -> Bool eqDeBruijnTickish (D env1 t1) (D env2 t2) = go t1 t2 where go (Breakpoint lext lid lids lmod) (Breakpoint rext rid rids rmod) = lid == rid && D env1 lids == D env2 rids && lext == rext && lmod == rmod go l r = l == r -- Compares for equality, modulo alpha eqCoreExpr :: CoreExpr -> CoreExpr -> Bool eqCoreExpr e1 e2 = eqDeBruijnExpr (deBruijnize e1) (deBruijnize e2) {- Note [Alpha-equality for Coercion arguments] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The 'Coercion' constructor only appears in argument positions, and so, if the functions are equal, then the arguments must have equal types. Because the comparison for coercions (correctly) checks only their types, checking for alpha-equality of the coercions is redundant. -} {- Note [Alpha-equality for let-bindings] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For /recursive/ let-bindings we need to check that the types of the binders are alpha-equivalent. Otherwise letrec (x : Bool) = x in x and letrec (y : Char) = y in y would be considered alpha-equivalent, which they are obviously not. For /non-recursive/ let-bindings, we do not have to check that the types of the binders are alpha-equivalent. When the RHSs (the expressions) of the non-recursive let-binders are well-formed and well-typed (which we assume they are at this point in the compiler), and the RHSs are alpha-equivalent, then the bindings must have the same type. In addition, it is also worth pointing out that letrec { x = e1; y = e2 } in b is NOT considered equal to letrec { y = e2; x = e1 } in b -} emptyE :: CoreMapX a emptyE = CM { cm_var = emptyTM, cm_lit = emptyTM , cm_co = emptyTM, cm_type = emptyTM , cm_cast = emptyTM, cm_app = emptyTM , cm_lam = emptyTM, cm_letn = emptyTM , cm_letr = emptyTM, cm_case = emptyTM , cm_ecase = emptyTM, cm_tick = emptyTM } -- TODO(22292): derive instance Functor CoreMapX where fmap f CM { cm_var = cvar, cm_lit = clit, cm_co = cco, cm_type = ctype, cm_cast = ccast , cm_app = capp, cm_lam = clam, cm_letn = cletn, cm_letr = cletr, cm_case = ccase , cm_ecase = cecase, cm_tick = ctick } = CM { cm_var = fmap f cvar, cm_lit = fmap f clit, cm_co = fmap f cco, cm_type = fmap f ctype , cm_cast = fmap (fmap f) ccast, cm_app = fmap (fmap f) capp, cm_lam = fmap (fmap f) clam , cm_letn = fmap (fmap (fmap f)) cletn, cm_letr = fmap (fmap (fmap f)) cletr , cm_case = fmap (fmap f) ccase, cm_ecase = fmap (fmap f) cecase , cm_tick = fmap (fmap f) ctick } instance TrieMap CoreMapX where type Key CoreMapX = DeBruijn CoreExpr emptyTM = emptyE lookupTM = lkE alterTM = xtE foldTM = fdE filterTM = ftE -------------------------- ftE :: (a->Bool) -> CoreMapX a -> CoreMapX a ftE f (CM { cm_var = cvar, cm_lit = clit , cm_co = cco, cm_type = ctype , cm_cast = ccast , cm_app = capp , cm_lam = clam, cm_letn = cletn , cm_letr = cletr, cm_case = ccase , cm_ecase = cecase, cm_tick = ctick }) = CM { cm_var = filterTM f cvar, cm_lit = filterTM f clit , cm_co = filterTM f cco, cm_type = filterTM f ctype , cm_cast = fmap (filterTM f) ccast, cm_app = fmap (filterTM f) capp , cm_lam = fmap (filterTM f) clam, cm_letn = fmap (fmap (filterTM f)) cletn , cm_letr = fmap (fmap (filterTM f)) cletr, cm_case = fmap (filterTM f) ccase , cm_ecase = fmap (filterTM f) cecase, cm_tick = fmap (filterTM f) ctick } -------------------------- lookupCoreMap :: CoreMap a -> CoreExpr -> Maybe a lookupCoreMap cm e = lookupTM e cm extendCoreMap :: CoreMap a -> CoreExpr -> a -> CoreMap a extendCoreMap m e v = alterTM e (\_ -> Just v) m foldCoreMap :: (a -> b -> b) -> b -> CoreMap a -> b foldCoreMap k z m = foldTM k m z emptyCoreMap :: CoreMap a emptyCoreMap = emptyTM instance Outputable a => Outputable (CoreMap a) where ppr m = text "CoreMap elts" <+> ppr (foldTM (:) m []) ------------------------- fdE :: (a -> b -> b) -> CoreMapX a -> b -> b fdE k m = foldTM k (cm_var m) . foldTM k (cm_lit m) . foldTM k (cm_co m) . foldTM k (cm_type m) . foldTM (foldTM k) (cm_cast m) . foldTM (foldTM k) (cm_tick m) . foldTM (foldTM k) (cm_app m) . foldTM (foldTM k) (cm_lam m) . foldTM (foldTM (foldTM k)) (cm_letn m) . foldTM (foldTM (foldTM k)) (cm_letr m) . foldTM (foldTM k) (cm_case m) . foldTM (foldTM k) (cm_ecase m) -- lkE: lookup in trie for expressions lkE :: DeBruijn CoreExpr -> CoreMapX a -> Maybe a lkE (D env expr) cm = go expr cm where go (Var v) = cm_var >.> lkVar env v go (Lit l) = cm_lit >.> lookupTM l go (Type t) = cm_type >.> lkG (D env t) go (Coercion c) = cm_co >.> lkG (D env c) go (Cast e c) = cm_cast >.> lkG (D env e) >=> lkG (D env c) go (Tick tickish e) = cm_tick >.> lkG (D env e) >=> lkTickish tickish go (App e1 e2) = cm_app >.> lkG (D env e2) >=> lkG (D env e1) go (Lam v e) = cm_lam >.> lkG (D (extendCME env v) e) >=> lkBndr env v go (Let (NonRec b r) e) = cm_letn >.> lkG (D env r) >=> lkG (D (extendCME env b) e) >=> lkBndr env b go (Let (Rec prs) e) = let (bndrs,rhss) = unzip prs env1 = extendCMEs env bndrs in cm_letr >.> lkList (lkG . D env1) rhss >=> lkG (D env1 e) >=> lkList (lkBndr env1) bndrs go (Case e b ty as) -- See Note [Empty case alternatives] | null as = cm_ecase >.> lkG (D env e) >=> lkG (D env ty) | otherwise = cm_case >.> lkG (D env e) >=> lkList (lkA (extendCME env b)) as xtE :: DeBruijn CoreExpr -> XT a -> CoreMapX a -> CoreMapX a xtE (D env (Var v)) f m = m { cm_var = cm_var m |> xtVar env v f } xtE (D env (Type t)) f m = m { cm_type = cm_type m |> xtG (D env t) f } xtE (D env (Coercion c)) f m = m { cm_co = cm_co m |> xtG (D env c) f } xtE (D _ (Lit l)) f m = m { cm_lit = cm_lit m |> alterTM l f } xtE (D env (Cast e c)) f m = m { cm_cast = cm_cast m |> xtG (D env e) |>> xtG (D env c) f } xtE (D env (Tick t e)) f m = m { cm_tick = cm_tick m |> xtG (D env e) |>> xtTickish t f } xtE (D env (App e1 e2)) f m = m { cm_app = cm_app m |> xtG (D env e2) |>> xtG (D env e1) f } xtE (D env (Lam v e)) f m = m { cm_lam = cm_lam m |> xtG (D (extendCME env v) e) |>> xtBndr env v f } xtE (D env (Let (NonRec b r) e)) f m = m { cm_letn = cm_letn m |> xtG (D (extendCME env b) e) |>> xtG (D env r) |>> xtBndr env b f } xtE (D env (Let (Rec prs) e)) f m = m { cm_letr = let (bndrs,rhss) = unzip prs env1 = extendCMEs env bndrs in cm_letr m |> xtList (xtG . D env1) rhss |>> xtG (D env1 e) |>> xtList (xtBndr env1) bndrs f } xtE (D env (Case e b ty as)) f m | null as = m { cm_ecase = cm_ecase m |> xtG (D env e) |>> xtG (D env ty) f } | otherwise = m { cm_case = cm_case m |> xtG (D env e) |>> let env1 = extendCME env b in xtList (xtA env1) as f } -- TODO: this seems a bit dodgy, see 'eqTickish' type TickishMap a = Map.Map CoreTickish a lkTickish :: CoreTickish -> TickishMap a -> Maybe a lkTickish = lookupTM xtTickish :: CoreTickish -> XT a -> TickishMap a -> TickishMap a xtTickish = alterTM ------------------------ data AltMap a -- A single alternative = AM { am_deflt :: CoreMapG a , am_data :: DNameEnv (CoreMapG a) , am_lit :: LiteralMap (CoreMapG a) } -- TODO(22292): derive instance Functor AltMap where fmap f AM { am_deflt = adeflt, am_data = adata, am_lit = alit } = AM { am_deflt = fmap f adeflt, am_data = fmap (fmap f) adata, am_lit = fmap (fmap f) alit } instance TrieMap AltMap where type Key AltMap = CoreAlt emptyTM = AM { am_deflt = emptyTM , am_data = emptyDNameEnv , am_lit = emptyTM } lookupTM = lkA emptyCME alterTM = xtA emptyCME foldTM = fdA filterTM = ftA instance Eq (DeBruijn CoreAlt) where D env1 a1 == D env2 a2 = go a1 a2 where go (Alt DEFAULT _ rhs1) (Alt DEFAULT _ rhs2) = D env1 rhs1 == D env2 rhs2 go (Alt (LitAlt lit1) _ rhs1) (Alt (LitAlt lit2) _ rhs2) = lit1 == lit2 && D env1 rhs1 == D env2 rhs2 go (Alt (DataAlt dc1) bs1 rhs1) (Alt (DataAlt dc2) bs2 rhs2) = dc1 == dc2 && D (extendCMEs env1 bs1) rhs1 == D (extendCMEs env2 bs2) rhs2 go _ _ = False ftA :: (a->Bool) -> AltMap a -> AltMap a ftA f (AM { am_deflt = adeflt, am_data = adata, am_lit = alit }) = AM { am_deflt = filterTM f adeflt , am_data = fmap (filterTM f) adata , am_lit = fmap (filterTM f) alit } lkA :: CmEnv -> CoreAlt -> AltMap a -> Maybe a lkA env (Alt DEFAULT _ rhs) = am_deflt >.> lkG (D env rhs) lkA env (Alt (LitAlt lit) _ rhs) = am_lit >.> lookupTM lit >=> lkG (D env rhs) lkA env (Alt (DataAlt dc) bs rhs) = am_data >.> lkDNamed dc >=> lkG (D (extendCMEs env bs) rhs) xtA :: CmEnv -> CoreAlt -> XT a -> AltMap a -> AltMap a xtA env (Alt DEFAULT _ rhs) f m = m { am_deflt = am_deflt m |> xtG (D env rhs) f } xtA env (Alt (LitAlt l) _ rhs) f m = m { am_lit = am_lit m |> alterTM l |>> xtG (D env rhs) f } xtA env (Alt (DataAlt d) bs rhs) f m = m { am_data = am_data m |> xtDNamed d |>> xtG (D (extendCMEs env bs) rhs) f } fdA :: (a -> b -> b) -> AltMap a -> b -> b fdA k m = foldTM k (am_deflt m) . foldTM (foldTM k) (am_data m) . foldTM (foldTM k) (am_lit m) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/Map/Type.hs0000644000000000000000000005677607346545000020615 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 -} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE TypeFamilies #-} module GHC.Core.Map.Type ( -- * Re-export generic interface TrieMap(..), XT, -- * Maps over 'Type's TypeMap, emptyTypeMap, extendTypeMap, lookupTypeMap, foldTypeMap, LooseTypeMap, -- ** With explicit scoping CmEnv, lookupCME, extendTypeMapWithScope, lookupTypeMapWithScope, mkDeBruijnContext, extendCME, extendCMEs, emptyCME, -- * Utilities for use by friends only TypeMapG, CoercionMapG, DeBruijn(..), deBruijnize, eqDeBruijnType, eqDeBruijnVar, BndrMap, xtBndr, lkBndr, VarMap, xtVar, lkVar, lkDFreeVar, xtDFreeVar, xtDNamed, lkDNamed ) where -- This module is separate from GHC.Core.Map.Expr to avoid a module loop -- between GHC.Core.Unify (which depends on this module) and GHC.Core import GHC.Prelude import GHC.Core.Type import GHC.Core.Coercion import GHC.Core.TyCo.Rep import GHC.Core.TyCon( isForgetfulSynTyCon ) import GHC.Core.TyCo.Compare( eqForAllVis ) import GHC.Data.TrieMap import GHC.Data.FastString import GHC.Types.Name import GHC.Types.Name.Env import GHC.Types.Var import GHC.Types.Var.Env import GHC.Types.Unique.FM import GHC.Utils.Outputable import GHC.Utils.Panic import qualified Data.Map as Map import qualified Data.IntMap as IntMap import Control.Monad ( (>=>) ) -- NB: Be careful about RULES and type families (#5821). So we should make sure -- to specify @Key TypeMapX@ (and not @DeBruijn Type@, the reduced form) {-# SPECIALIZE lkG :: Key TypeMapX -> TypeMapG a -> Maybe a #-} {-# SPECIALIZE lkG :: Key CoercionMapX -> CoercionMapG a -> Maybe a #-} {-# SPECIALIZE xtG :: Key TypeMapX -> XT a -> TypeMapG a -> TypeMapG a #-} {-# SPECIALIZE xtG :: Key CoercionMapX -> XT a -> CoercionMapG a -> CoercionMapG a #-} {-# SPECIALIZE mapG :: (a -> b) -> TypeMapG a -> TypeMapG b #-} {-# SPECIALIZE mapG :: (a -> b) -> CoercionMapG a -> CoercionMapG b #-} {-# SPECIALIZE fdG :: (a -> b -> b) -> TypeMapG a -> b -> b #-} {-# SPECIALIZE fdG :: (a -> b -> b) -> CoercionMapG a -> b -> b #-} {- ************************************************************************ * * Coercions * * ************************************************************************ -} -- We should really never care about the contents of a coercion. Instead, -- just look up the coercion's type. newtype CoercionMap a = CoercionMap (CoercionMapG a) -- TODO(22292): derive instance Functor CoercionMap where fmap f = \ (CoercionMap m) -> CoercionMap (fmap f m) {-# INLINE fmap #-} instance TrieMap CoercionMap where type Key CoercionMap = Coercion emptyTM = CoercionMap emptyTM lookupTM k (CoercionMap m) = lookupTM (deBruijnize k) m alterTM k f (CoercionMap m) = CoercionMap (alterTM (deBruijnize k) f m) foldTM k (CoercionMap m) = foldTM k m filterTM f (CoercionMap m) = CoercionMap (filterTM f m) type CoercionMapG = GenMap CoercionMapX newtype CoercionMapX a = CoercionMapX (TypeMapX a) -- TODO(22292): derive instance Functor CoercionMapX where fmap f = \ (CoercionMapX core_tm) -> CoercionMapX (fmap f core_tm) {-# INLINE fmap #-} instance TrieMap CoercionMapX where type Key CoercionMapX = DeBruijn Coercion emptyTM = CoercionMapX emptyTM lookupTM = lkC alterTM = xtC foldTM f (CoercionMapX core_tm) = foldTM f core_tm filterTM f (CoercionMapX core_tm) = CoercionMapX (filterTM f core_tm) instance Eq (DeBruijn Coercion) where D env1 co1 == D env2 co2 = D env1 (coercionType co1) == D env2 (coercionType co2) lkC :: DeBruijn Coercion -> CoercionMapX a -> Maybe a lkC (D env co) (CoercionMapX core_tm) = lkT (D env $ coercionType co) core_tm xtC :: DeBruijn Coercion -> XT a -> CoercionMapX a -> CoercionMapX a xtC (D env co) f (CoercionMapX m) = CoercionMapX (xtT (D env $ coercionType co) f m) {- ************************************************************************ * * Types * * ************************************************************************ -} -- | @TypeMapG a@ is a map from @DeBruijn Type@ to @a@. The extended -- key makes it suitable for recursive traversal, since it can track binders, -- but it is strictly internal to this module. If you are including a 'TypeMap' -- inside another 'TrieMap', this is the type you want. Note that this -- lookup does not do a kind-check. Thus, all keys in this map must have -- the same kind. Also note that this map respects the distinction between -- @Type@ and @Constraint@, despite the fact that they are equivalent type -- synonyms in Core. type TypeMapG = GenMap TypeMapX -- | @TypeMapX a@ is the base map from @DeBruijn Type@ to @a@, but without the -- 'GenMap' optimization. See Note [Computing equality on types] in GHC.Core.Type. data TypeMapX a = TM { tm_var :: VarMap a , tm_app :: TypeMapG (TypeMapG a) -- Note [Equality on AppTys] in GHC.Core.Type , tm_tycon :: DNameEnv a , tm_forall :: TypeMapG (BndrMap a) -- See Note [Binders] in GHC.Core.Map.Expr , tm_tylit :: TyLitMap a , tm_coerce :: Maybe a } -- Note that there is no tyconapp case; see Note [Equality on AppTys] in GHC.Core.Type -- | Squeeze out any synonyms, and change TyConApps to nested AppTys. Why the -- last one? See Note [Equality on AppTys] in GHC.Core.Type -- -- We also keep (Eq a => a) as a FunTy, distinct from ((->) (Eq a) a). trieMapView :: Type -> Maybe Type trieMapView ty -- First check for TyConApps that need to be expanded to -- AppTy chains. This includes eliminating FunTy entirely. | Just (tc, tys@(_:_)) <- splitTyConApp_maybe ty = Just $ foldl' AppTy (mkTyConTy tc) tys -- Then resolve any remaining nullary synonyms. | Just ty' <- coreView ty = Just ty' trieMapView _ = Nothing -- TODO(22292): derive instance Functor TypeMapX where fmap f TM { tm_var = tvar, tm_app = tapp, tm_tycon = ttycon, tm_forall = tforall , tm_tylit = tlit, tm_coerce = tcoerce } = TM { tm_var = fmap f tvar, tm_app = fmap (fmap f) tapp, tm_tycon = fmap f ttycon , tm_forall = fmap (fmap f) tforall , tm_tylit = fmap f tlit, tm_coerce = fmap f tcoerce } instance TrieMap TypeMapX where type Key TypeMapX = DeBruijn Type emptyTM = emptyT lookupTM = lkT alterTM = xtT foldTM = fdT filterTM = filterT instance Eq (DeBruijn Type) where (==) = eqDeBruijnType -- | An equality relation between two 'Type's (known below as @t1 :: k2@ -- and @t2 :: k2@) data TypeEquality = TNEQ -- ^ @t1 /= t2@ | TEQ -- ^ @t1 ~ t2@ and there are not casts in either, -- therefore we can conclude @k1 ~ k2@ | TEQX -- ^ @t1 ~ t2@ yet one of the types contains a cast so -- they may differ in kind eqDeBruijnType :: DeBruijn Type -> DeBruijn Type -> Bool eqDeBruijnType env_t1@(D env1 t1) env_t2@(D env2 t2) = -- See Note [Non-trivial definitional equality] in GHC.Core.TyCo.Rep -- See Note [Computing equality on types] case go env_t1 env_t2 of TEQX -> toBool (go (D env1 k1) (D env2 k2)) ty_eq -> toBool ty_eq where k1 = typeKind t1 k2 = typeKind t2 toBool :: TypeEquality -> Bool toBool TNEQ = False toBool _ = True liftEquality :: Bool -> TypeEquality liftEquality False = TNEQ liftEquality _ = TEQ hasCast :: TypeEquality -> TypeEquality hasCast TEQ = TEQX hasCast eq = eq andEq :: TypeEquality -> TypeEquality -> TypeEquality andEq TNEQ _ = TNEQ andEq TEQX e = hasCast e andEq TEQ e = e -- See Note [Comparing type synonyms] in GHC.Core.TyCo.Compare go (D env1 (TyConApp tc1 tys1)) (D env2 (TyConApp tc2 tys2)) | tc1 == tc2, not (isForgetfulSynTyCon tc1) = gos env1 env2 tys1 tys2 go env_t@(D env t) env_t'@(D env' t') | Just new_t <- coreView t = go (D env new_t) env_t' | Just new_t' <- coreView t' = go env_t (D env' new_t') | otherwise = case (t, t') of -- See Note [Non-trivial definitional equality] in GHC.Core.TyCo.Rep (CastTy t1 _, _) -> hasCast (go (D env t1) (D env t')) (_, CastTy t1' _) -> hasCast (go (D env t) (D env t1')) (TyVarTy v, TyVarTy v') -> liftEquality $ eqDeBruijnVar (D env v) (D env' v') -- See Note [Equality on AppTys] in GHC.Core.Type (AppTy t1 t2, s) | Just (t1', t2') <- splitAppTyNoView_maybe s -> go (D env t1) (D env' t1') `andEq` go (D env t2) (D env' t2') (s, AppTy t1' t2') | Just (t1, t2) <- splitAppTyNoView_maybe s -> go (D env t1) (D env' t1') `andEq` go (D env t2) (D env' t2') (FunTy v1 w1 t1 t2, FunTy v1' w1' t1' t2') -> liftEquality (v1 == v1') `andEq` -- NB: eqDeBruijnType does the kind check requested by -- Note [Equality on FunTys] in GHC.Core.TyCo.Rep liftEquality (eqDeBruijnType (D env t1) (D env' t1')) `andEq` liftEquality (eqDeBruijnType (D env t2) (D env' t2')) `andEq` -- Comparing multiplicities last because the test is usually true go (D env w1) (D env w1') (TyConApp tc tys, TyConApp tc' tys') -> liftEquality (tc == tc') `andEq` gos env env' tys tys' (LitTy l, LitTy l') -> liftEquality (l == l') (ForAllTy (Bndr tv vis) ty, ForAllTy (Bndr tv' vis') ty') -> -- See Note [ForAllTy and type equality] in -- GHC.Core.TyCo.Compare for why we use `eqForAllVis` here liftEquality (vis `eqForAllVis` vis') `andEq` go (D env (varType tv)) (D env' (varType tv')) `andEq` go (D (extendCME env tv) ty) (D (extendCME env' tv') ty') (CoercionTy {}, CoercionTy {}) -> TEQ _ -> TNEQ -- These bangs make 'gos' strict in the CMEnv, which in turn -- keeps the CMEnv unboxed across the go/gos mutual recursion -- (If you want a test case, T9872c really exercises this code.) gos !_ !_ [] [] = TEQ gos e1 e2 (ty1:tys1) (ty2:tys2) = go (D e1 ty1) (D e2 ty2) `andEq` gos e1 e2 tys1 tys2 gos _ _ _ _ = TNEQ instance Eq (DeBruijn Var) where (==) = eqDeBruijnVar eqDeBruijnVar :: DeBruijn Var -> DeBruijn Var -> Bool eqDeBruijnVar (D env1 v1) (D env2 v2) = case (lookupCME env1 v1, lookupCME env2 v2) of (Just b1, Just b2) -> b1 == b2 (Nothing, Nothing) -> v1 == v2 _ -> False instance {-# OVERLAPPING #-} Outputable a => Outputable (TypeMapG a) where ppr m = text "TypeMap elts" <+> ppr (foldTM (:) m []) emptyT :: TypeMapX a emptyT = TM { tm_var = emptyTM , tm_app = emptyTM , tm_tycon = emptyDNameEnv , tm_forall = emptyTM , tm_tylit = emptyTyLitMap , tm_coerce = Nothing } ----------------- lkT :: DeBruijn Type -> TypeMapX a -> Maybe a lkT (D env ty) m = go ty m where go ty | Just ty' <- trieMapView ty = go ty' go (TyVarTy v) = tm_var >.> lkVar env v go (AppTy t1 t2) = tm_app >.> lkG (D env t1) >=> lkG (D env t2) go (TyConApp tc []) = tm_tycon >.> lkDNamed tc go (LitTy l) = tm_tylit >.> lkTyLit l go (ForAllTy (Bndr tv _) ty) = tm_forall >.> lkG (D (extendCME env tv) ty) >=> lkBndr env tv go (CastTy t _) = go t go (CoercionTy {}) = tm_coerce -- trieMapView has eliminated non-nullary TyConApp -- and FunTy into an AppTy chain go ty@(TyConApp _ (_:_)) = pprPanic "lkT TyConApp" (ppr ty) go ty@(FunTy {}) = pprPanic "lkT FunTy" (ppr ty) ----------------- xtT :: DeBruijn Type -> XT a -> TypeMapX a -> TypeMapX a xtT (D env ty) f m | Just ty' <- trieMapView ty = xtT (D env ty') f m xtT (D env (TyVarTy v)) f m = m { tm_var = tm_var m |> xtVar env v f } xtT (D env (AppTy t1 t2)) f m = m { tm_app = tm_app m |> xtG (D env t1) |>> xtG (D env t2) f } xtT (D _ (TyConApp tc [])) f m = m { tm_tycon = tm_tycon m |> xtDNamed tc f } xtT (D _ (LitTy l)) f m = m { tm_tylit = tm_tylit m |> xtTyLit l f } xtT (D env (CastTy t _)) f m = xtT (D env t) f m xtT (D _ (CoercionTy {})) f m = m { tm_coerce = tm_coerce m |> f } xtT (D env (ForAllTy (Bndr tv _) ty)) f m = m { tm_forall = tm_forall m |> xtG (D (extendCME env tv) ty) |>> xtBndr env tv f } -- trieMapView has eliminated non-nullary TyConApp -- and FunTy into an AppTy chain xtT (D _ ty@(TyConApp _ (_:_))) _ _ = pprPanic "xtT TyConApp" (ppr ty) xtT (D _ ty@(FunTy {})) _ _ = pprPanic "xtT FunTy" (ppr ty) fdT :: (a -> b -> b) -> TypeMapX a -> b -> b fdT k m = foldTM k (tm_var m) . foldTM (foldTM k) (tm_app m) . foldTM k (tm_tycon m) . foldTM (foldTM k) (tm_forall m) . foldTyLit k (tm_tylit m) . foldMaybe k (tm_coerce m) filterT :: (a -> Bool) -> TypeMapX a -> TypeMapX a filterT f (TM { tm_var = tvar, tm_app = tapp, tm_tycon = ttycon , tm_forall = tforall, tm_tylit = tlit , tm_coerce = tcoerce }) = TM { tm_var = filterTM f tvar , tm_app = fmap (filterTM f) tapp , tm_tycon = filterTM f ttycon , tm_forall = fmap (filterTM f) tforall , tm_tylit = filterTM f tlit , tm_coerce = filterMaybe f tcoerce } ------------------------ data TyLitMap a = TLM { tlm_number :: Map.Map Integer a , tlm_string :: UniqFM FastString a , tlm_char :: Map.Map Char a } -- TODO(22292): derive instance Functor TyLitMap where fmap f TLM { tlm_number = tn, tlm_string = ts, tlm_char = tc } = TLM { tlm_number = Map.map f tn, tlm_string = mapUFM f ts, tlm_char = Map.map f tc } instance TrieMap TyLitMap where type Key TyLitMap = TyLit emptyTM = emptyTyLitMap lookupTM = lkTyLit alterTM = xtTyLit foldTM = foldTyLit filterTM = filterTyLit emptyTyLitMap :: TyLitMap a emptyTyLitMap = TLM { tlm_number = Map.empty, tlm_string = emptyUFM, tlm_char = Map.empty } lkTyLit :: TyLit -> TyLitMap a -> Maybe a lkTyLit l = case l of NumTyLit n -> tlm_number >.> Map.lookup n StrTyLit n -> tlm_string >.> (`lookupUFM` n) CharTyLit n -> tlm_char >.> Map.lookup n xtTyLit :: TyLit -> XT a -> TyLitMap a -> TyLitMap a xtTyLit l f m = case l of NumTyLit n -> m { tlm_number = Map.alter f n (tlm_number m) } StrTyLit n -> m { tlm_string = alterUFM f (tlm_string m) n } CharTyLit n -> m { tlm_char = Map.alter f n (tlm_char m) } foldTyLit :: (a -> b -> b) -> TyLitMap a -> b -> b foldTyLit l m = flip (nonDetFoldUFM l) (tlm_string m) . flip (Map.foldr l) (tlm_number m) . flip (Map.foldr l) (tlm_char m) filterTyLit :: (a -> Bool) -> TyLitMap a -> TyLitMap a filterTyLit f (TLM { tlm_number = tn, tlm_string = ts, tlm_char = tc }) = TLM { tlm_number = Map.filter f tn, tlm_string = filterUFM f ts, tlm_char = Map.filter f tc } ------------------------------------------------- -- | @TypeMap a@ is a map from 'Type' to @a@. If you are a client, this -- is the type you want. The keys in this map may have different kinds. newtype TypeMap a = TypeMap (TypeMapG (TypeMapG a)) -- TODO(22292): derive instance Functor TypeMap where fmap f = \ (TypeMap m) -> TypeMap (fmap (fmap f) m) {-# INLINE fmap #-} lkTT :: DeBruijn Type -> TypeMap a -> Maybe a lkTT (D env ty) (TypeMap m) = lkG (D env $ typeKind ty) m >>= lkG (D env ty) xtTT :: DeBruijn Type -> XT a -> TypeMap a -> TypeMap a xtTT (D env ty) f (TypeMap m) = TypeMap (m |> xtG (D env $ typeKind ty) |>> xtG (D env ty) f) -- Below are some client-oriented functions which operate on 'TypeMap'. instance TrieMap TypeMap where type Key TypeMap = Type emptyTM = TypeMap emptyTM lookupTM k m = lkTT (deBruijnize k) m alterTM k f m = xtTT (deBruijnize k) f m foldTM k (TypeMap m) = foldTM (foldTM k) m filterTM f (TypeMap m) = TypeMap (fmap (filterTM f) m) foldTypeMap :: (a -> b -> b) -> b -> TypeMap a -> b foldTypeMap k z m = foldTM k m z emptyTypeMap :: TypeMap a emptyTypeMap = emptyTM lookupTypeMap :: TypeMap a -> Type -> Maybe a lookupTypeMap cm t = lookupTM t cm extendTypeMap :: TypeMap a -> Type -> a -> TypeMap a extendTypeMap m t v = alterTM t (const (Just v)) m lookupTypeMapWithScope :: TypeMap a -> CmEnv -> Type -> Maybe a lookupTypeMapWithScope m cm t = lkTT (D cm t) m -- | Extend a 'TypeMap' with a type in the given context. -- @extendTypeMapWithScope m (mkDeBruijnContext [a,b,c]) t v@ is equivalent to -- @extendTypeMap m (forall a b c. t) v@, but allows reuse of the context over -- multiple insertions. extendTypeMapWithScope :: TypeMap a -> CmEnv -> Type -> a -> TypeMap a extendTypeMapWithScope m cm t v = xtTT (D cm t) (const (Just v)) m -- | Construct a deBruijn environment with the given variables in scope. -- e.g. @mkDeBruijnEnv [a,b,c]@ constructs a context @forall a b c.@ mkDeBruijnContext :: [Var] -> CmEnv mkDeBruijnContext = extendCMEs emptyCME -- | A 'LooseTypeMap' doesn't do a kind-check. Thus, when lookup up (t |> g), -- you'll find entries inserted under (t), even if (g) is non-reflexive. newtype LooseTypeMap a = LooseTypeMap (TypeMapG a) -- TODO(22292): derive instance Functor LooseTypeMap where fmap f = \ (LooseTypeMap m) -> LooseTypeMap (fmap f m) {-# INLINE fmap #-} instance TrieMap LooseTypeMap where type Key LooseTypeMap = Type emptyTM = LooseTypeMap emptyTM lookupTM k (LooseTypeMap m) = lookupTM (deBruijnize k) m alterTM k f (LooseTypeMap m) = LooseTypeMap (alterTM (deBruijnize k) f m) foldTM f (LooseTypeMap m) = foldTM f m filterTM f (LooseTypeMap m) = LooseTypeMap (filterTM f m) {- ************************************************************************ * * Variables * * ************************************************************************ -} type BoundVar = Int -- Bound variables are deBruijn numbered type BoundVarMap a = IntMap.IntMap a data CmEnv = CME { cme_next :: !BoundVar , cme_env :: VarEnv BoundVar } emptyCME :: CmEnv emptyCME = CME { cme_next = 0, cme_env = emptyVarEnv } extendCME :: CmEnv -> Var -> CmEnv extendCME (CME { cme_next = bv, cme_env = env }) v = CME { cme_next = bv+1, cme_env = extendVarEnv env v bv } extendCMEs :: CmEnv -> [Var] -> CmEnv extendCMEs env vs = foldl' extendCME env vs lookupCME :: CmEnv -> Var -> Maybe BoundVar lookupCME (CME { cme_env = env }) v = lookupVarEnv env v -- | @DeBruijn a@ represents @a@ modulo alpha-renaming. This is achieved -- by equipping the value with a 'CmEnv', which tracks an on-the-fly deBruijn -- numbering. This allows us to define an 'Eq' instance for @DeBruijn a@, even -- if this was not (easily) possible for @a@. Note: we purposely don't -- export the constructor. Make a helper function if you find yourself -- needing it. data DeBruijn a = D CmEnv a -- | Synthesizes a @DeBruijn a@ from an @a@, by assuming that there are no -- bound binders (an empty 'CmEnv'). This is usually what you want if there -- isn't already a 'CmEnv' in scope. deBruijnize :: a -> DeBruijn a deBruijnize = D emptyCME instance Eq (DeBruijn a) => Eq (DeBruijn [a]) where D _ [] == D _ [] = True D env (x:xs) == D env' (x':xs') = D env x == D env' x' && D env xs == D env' xs' _ == _ = False instance Eq (DeBruijn a) => Eq (DeBruijn (Maybe a)) where D _ Nothing == D _ Nothing = True D env (Just x) == D env' (Just x') = D env x == D env' x' _ == _ = False --------- Variable binders ------------- -- | A 'BndrMap' is a 'TypeMapG' which allows us to distinguish between -- binding forms whose binders have different types. For example, -- if we are doing a 'TrieMap' lookup on @\(x :: Int) -> ()@, we should -- not pick up an entry in the 'TrieMap' for @\(x :: Bool) -> ()@: -- we can disambiguate this by matching on the type (or kind, if this -- a binder in a type) of the binder. -- -- We also need to do the same for multiplicity! Which, since multiplicities are -- encoded simply as a 'Type', amounts to have a Trie for a pair of types. Tries -- of pairs are composition. data BndrMap a = BndrMap (TypeMapG (MaybeMap TypeMapG a)) -- TODO(22292): derive instance Functor BndrMap where fmap f = \ (BndrMap tm) -> BndrMap (fmap (fmap f) tm) {-# INLINE fmap #-} instance TrieMap BndrMap where type Key BndrMap = Var emptyTM = BndrMap emptyTM lookupTM = lkBndr emptyCME alterTM = xtBndr emptyCME foldTM = fdBndrMap filterTM = ftBndrMap fdBndrMap :: (a -> b -> b) -> BndrMap a -> b -> b fdBndrMap f (BndrMap tm) = foldTM (foldTM f) tm -- We need to use 'BndrMap' for 'Coercion', 'CoreExpr' AND 'Type', since all -- of these data types have binding forms. lkBndr :: CmEnv -> Var -> BndrMap a -> Maybe a lkBndr env v (BndrMap tymap) = do multmap <- lkG (D env (varType v)) tymap lookupTM (D env <$> varMultMaybe v) multmap xtBndr :: forall a . CmEnv -> Var -> XT a -> BndrMap a -> BndrMap a xtBndr env v xt (BndrMap tymap) = BndrMap (tymap |> xtG (D env (varType v)) |>> (alterTM (D env <$> varMultMaybe v) xt)) ftBndrMap :: (a -> Bool) -> BndrMap a -> BndrMap a ftBndrMap f (BndrMap tm) = BndrMap (fmap (filterTM f) tm) --------- Variable occurrence ------------- data VarMap a = VM { vm_bvar :: BoundVarMap a -- Bound variable , vm_fvar :: DVarEnv a } -- Free variable -- TODO(22292): derive instance Functor VarMap where fmap f VM { vm_bvar = bv, vm_fvar = fv } = VM { vm_bvar = fmap f bv, vm_fvar = fmap f fv } instance TrieMap VarMap where type Key VarMap = Var emptyTM = VM { vm_bvar = IntMap.empty, vm_fvar = emptyDVarEnv } lookupTM = lkVar emptyCME alterTM = xtVar emptyCME foldTM = fdVar filterTM = ftVar lkVar :: CmEnv -> Var -> VarMap a -> Maybe a lkVar env v | Just bv <- lookupCME env v = vm_bvar >.> lookupTM bv | otherwise = vm_fvar >.> lkDFreeVar v xtVar :: CmEnv -> Var -> XT a -> VarMap a -> VarMap a xtVar env v f m | Just bv <- lookupCME env v = m { vm_bvar = vm_bvar m |> alterTM bv f } | otherwise = m { vm_fvar = vm_fvar m |> xtDFreeVar v f } fdVar :: (a -> b -> b) -> VarMap a -> b -> b fdVar k m = foldTM k (vm_bvar m) . foldTM k (vm_fvar m) lkDFreeVar :: Var -> DVarEnv a -> Maybe a lkDFreeVar var env = lookupDVarEnv env var xtDFreeVar :: Var -> XT a -> DVarEnv a -> DVarEnv a xtDFreeVar v f m = alterDVarEnv f m v ftVar :: (a -> Bool) -> VarMap a -> VarMap a ftVar f (VM { vm_bvar = bv, vm_fvar = fv }) = VM { vm_bvar = filterTM f bv, vm_fvar = filterTM f fv } ------------------------------------------------- lkDNamed :: NamedThing n => n -> DNameEnv a -> Maybe a lkDNamed n env = lookupDNameEnv env (getName n) xtDNamed :: NamedThing n => n -> XT a -> DNameEnv a -> DNameEnv a xtDNamed tc f m = alterDNameEnv f m (getName tc) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/Multiplicity.hs0000644000000000000000000003623407346545000021632 0ustar0000000000000000{-# LANGUAGE PatternSynonyms #-} {-| This module defines the semi-ring of multiplicities, and associated functions. Multiplicities annotate arrow types to indicate the linearity of the arrow (in the sense of linear types). Mult is a type synonym for Type, used only when its kind is Multiplicity. To simplify dealing with multiplicities, functions such as mkMultMul perform simplifications such as Many * x = Many on the fly. -} module GHC.Core.Multiplicity ( Mult , pattern OneTy , pattern ManyTy , isMultMul , mkMultAdd , mkMultMul , mkMultSup , Scaled(..) , scaledMult , scaledThing , unrestricted , linear , tymult , irrelevantMult , mkScaled , scaledSet , scaleScaled , IsSubmult(..) , submult , mapScaledType , pprArrowWithMultiplicity , MultiplicityFlag(..) ) where import GHC.Prelude import GHC.Utils.Outputable import GHC.Core.Type import GHC.Core.TyCo.Rep import GHC.Types.Var( isFUNArg ) import {-# SOURCE #-} GHC.Builtin.Types ( multMulTyCon ) import GHC.Builtin.Names (multMulTyConKey) import GHC.Types.Unique (hasKey) {- Note [Linear types] ~~~~~~~~~~~~~~~~~~~ This module is the entry point for linear types. The detailed design is in the _Linear Haskell_ article [https://arxiv.org/abs/1710.09756]. Other important resources in the linear types implementation wiki page [https://gitlab.haskell.org/ghc/ghc/wikis/linear-types/implementation], and the proposal [https://github.com/ghc-proposals/ghc-proposals/blob/master/proposals/0111-linear-types.rst] which describes the concrete design at length. For the busy developer, though, here is a high-level view of linear types is the following: - Function arrows are annotated with a multiplicity (as defined by type `Mult` and its smart constructors in this module) - Multiplicities, in Haskell, are types of kind `GHC.Types.Multiplicity`. as in map :: forall (p :: Multiplicity). (a %p -> b) -> [a] %p -> [b] - The type constructor for function types (FUN) has type FUN :: forall (m :: Multiplicity) -> forall {r1) {r2}. TYPE r1 -> TYPE r2 -> Type The argument order is explained in https://gitlab.haskell.org/ghc/ghc/-/issues/20164 - (->) retains its backward compatible meaning: (->) a b = a -> b = a %'Many -> b To achieve this, `(->)` is defined as a type synonym to `FUN Many` (see below). - A ground multiplicity (that is, without a variable) can be `One` or `Many` (`Many` is generally rendered as ω in the scientific literature). Functions whose type is annotated with `One` are linear functions, functions whose type is annotated with `Many` are regular functions, often called “unrestricted” to contrast them with linear functions. - A linear function is defined as a function such that *if* its result is consumed exactly once, *then* its argument is consumed exactly once. You can think of “consuming exactly once” as evaluating a value in normal form exactly once (though not necessarily in one go). The _Linear Haskell_ article (see supra) has a more precise definition of “consuming exactly once”. - Data constructors are linear by default. See Note [Data constructors are linear by default]. - Multiplicities form a semiring. - Multiplicities can also be variables and we can universally quantify over these variables. This is referred to as “multiplicity polymorphism”. Furthermore, multiplicity can be formal semiring expressions combining variables. - Contrary to the paper, the sum of two multiplicities is always `Many`. This will have to change, however, if we want to add a multiplicity for 0. Whether we want to is still debated. - Case expressions have a multiplicity annotation too. A case expression with multiplicity `One`, consumes its scrutinee exactly once (provided the entire case expression is consumed exactly once); whereas a case expression with multiplicity `Many` can consume its scrutinee as many time as it wishes (no matter how much the case expression is consumed). For linear types in the linter see Note [Linting linearity] in GHC.Core.Lint. Note [Usages] ~~~~~~~~~~~~~ In the _Linear Haskell_ paper, you'll find typing rules such as these: Γ ⊢ f : A #π-> B Δ ⊢ u : A --------------------------- Γ + kΔ ⊢ f u : B If you read this as a type-checking algorithm going from the bottom up, this reads as: the algorithm has to find a split of some input context Ξ into an appropriate Γ and a Δ such as Ξ = Γ + kΔ, *and the multiplicities are chosen to make f and u typecheck*. This could be achieved by letting the typechecking of `f` use exactly the variable it needs, then passing the remainder, as `Delta` to the typechecking of u. But what does that mean if `x` is bound with multiplicity `p` (a variable) and `f` consumes `x` once? `Delta` would have to contain `x` with multiplicity `p-1`. It's not really clear how to make that works. In summary: bottom-up multiplicity checking forgoes addition and multiplication in favour of subtraction and division. And variables make the latter hard. The alternative is to read multiplicities from the top down: as an *output* from the typechecking algorithm, rather than an input. We call these output multiplicities Usages, to distinguish them from the multiplicities which come, as input, from the types of functions. Usages are checked for compatibility with multiplicity annotations using an ordering relation. In other words, the usage of x in the expression u is the smallest multiplicity which can be ascribed to x for u to typecheck. Usages are usually group in a UsageEnv, as defined in the UsageEnv module. So, in our function application example, the typechecking algorithm would receive usage environments f_ue from the typechecking of f, and u_ue from the typechecking of u. Then the output would be f_ue + (k * u_ue). Addition and scaling of usage environment is the pointwise extension of the semiring operations on multiplicities. Note [Zero as a usage] ~~~~~~~~~~~~~~~~~~~~~~ In the current presentation usages are not exactly multiplicities, because they can contain 0, and multiplicities can't. Why do we need a 0 usage? A function which doesn't use its argument will be required to annotate it with `Many`: \(x % Many) -> 0 However, we cannot replace absence with Many when computing usages compositionally: in (x, True) We expect x to have usage 1. But when computing the usage of x in True we would find that x is absent, hence has multiplicity Many. The final multiplicity would be One+Many = Many. Oops! Hence there is a usage Zero for absent variables. Zero is characterised by being the neutral element to usage addition. We may decide to add Zero as a multiplicity in the future. In which case, this distinction will go away. Note [Joining usages] ~~~~~~~~~~~~~~~~~~~~~ The usage of a variable is defined, in Note [Usages], as the minimum usage which can be ascribed to a variable. So what is the usage of x in case … of { p1 -> u -- usage env: u_ue ; p2 -> v } -- usage env: v_ue It must be the least upper bound, or _join_, of u_ue(x) and v_ue(x). So, contrary to a declarative presentation where the correct usage of x can be conjured out of thin air, we need to be able to compute the join of two multiplicities. Join is extended pointwise on usage environments. Note [Bottom as a usage] ~~~~~~~~~~~~~~~~~~~~~~ What is the usage of x in case … of {} Per usual linear logic, as well as the _Linear Haskell_ article, x can have every multiplicity. So we need a minimum usage _bottom_, which is also the neutral element for join. In fact, this is not such as nice solution, because it is not clear how to define sum and multiplication with bottom. We give reasonable definitions, but they are not complete (they don't respect the semiring laws, and it's possible to come up with examples of Core transformation which are not well-typed) A better solution would probably be to annotate case expressions with a usage environment, just like they are annotated with a type. Which, probably not coincidentally, is also primarily for empty cases. A side benefit of this approach is that the linter would not need to join multiplicities, anymore; hence would be closer to the presentation in the article. That's because it could use the annotation as the multiplicity for each branch. Note [Data constructors are linear by default] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ All data constructors defined without -XLinearTypes, as well as data constructors defined with the Haskell 98 in all circumstances, have all their fields linear. That is, in data Maybe a = Nothing | Just a We have Just :: a %1 -> Just a Irrespective of whether -XLinearTypes is turned on or not. Furthermore, when -XLinearTypes is turned off, the declaration data Endo a where { MkIntEndo :: (Int -> Int) -> T Int } gives MkIntEndo :: (Int -> Int) %1 -> T Int With -XLinearTypes turned on, instead, this would give data EndoU a where { MkIntEndoU :: (Int -> Int) -> T Int } MkIntEndoU :: (Int -> Int) -> T Int With -XLinearTypes turned on, to get a linear field with GADT syntax we would need to write data EndoL a where { MkIntEndoL :: (Int -> Int) %1 -> T Int } The goal is to maximise reuse of types between linear code and traditional code. This is argued at length in the proposal and the article (links in Note [Linear types]). Unrestricted field don't need to be consumed for a value to be consumed exactly once. So consuming a value of type `IntEndoU a` exactly once means forcing it at least once. Why “at least once”? Because if `case u of { MkIntEndoL x -> f (MkIntEndoL x) }` is linear (provided `f` is a linear function). But we might as well have done `case u of { !z -> f z }`. So, we can observe constructors as many times as we want, and we are actually allowed to force the same thing several times because laziness means that we are really forcing the value once, and observing its constructor several times. The type checker and the linter recognise some (but not all) of these multiple forces as indeed linear. Mostly just enough to support variable patterns. In summary: - Fields of data constructors defined with Haskell 98 syntax are always linear (even if `-XLinearTypes` is off). This choice has been made to favour sharing types between linearly typed Haskell and traditional Haskell. To avoid an ecosystem split. - When `-XLinearTypes` is off, GADT-syntax declaration can only use the regular arrow `(->)`. However all the fields are linear. Note [Polymorphisation of linear fields] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The choice in Note [Data constructors are linear by default] has an impact on backwards compatibility. Consider map Just We have map :: (a -> b) -> f a -> f b Just :: a %1 -> Just a Types don't match, we should get a type error. But this is legal Haskell 98 code! Bad! Bad! Bad! It could be solved with subtyping, but subtyping doesn't combine well with polymorphism. Instead, we generalise the type of Just, when used as term: Just :: forall {p}. a %p-> Just a This is solely a concern for higher-order code like this: when called fully applied linear constructors are more general than constructors with unrestricted fields. In particular, linear constructors can always be eta-expanded to their Haskell 98 type. This is explained in the paper (but there, we had a different strategy to resolve this type mismatch in higher-order code. It turned out to be insufficient, which is explained in the wiki page as well as the proposal). We only generalise linear fields this way: fields with multiplicity Many, or other multiplicity expressions are exclusive to -XLinearTypes, hence don't have backward compatibility implications. The implementation is described in Note [Typechecking data constructors] in GHC.Tc.Gen.Head. More details in the proposal. -} {- Note [Adding new multiplicities] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To add a new multiplicity, you need to: * Add the new type with Multiplicity kind * Update cases in mkMultAdd, mkMultMul, mkMultSup, submult, tcSubMult * Check supUE function that computes sup of a multiplicity and Zero -} isMultMul :: Mult -> Maybe (Mult, Mult) isMultMul ty | Just (tc, [x, y]) <- splitTyConApp_maybe ty , tc `hasKey` multMulTyConKey = Just (x, y) | otherwise = Nothing {- Note [Overapproximating multiplicities] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The functions mkMultAdd, mkMultMul, mkMultSup perform operations on multiplicities. They can return overapproximations: their result is merely guaranteed to be a submultiplicity of the actual value. They should be used only when an upper bound is acceptable. In most cases, they are used in usage environments (UsageEnv); in usage environments, replacing a usage with a larger one can only cause more programs to fail to typecheck. In future work, instead of approximating we might add type families and allow users to write types involving operations on multiplicities. In this case, we could enforce more invariants in Mult, for example, enforce that it is in the form of a sum of products, and even that the summands and factors are ordered somehow, to have more equalities. -} -- With only two multiplicities One and Many, we can always replace -- p + q by Many. See Note [Overapproximating multiplicities]. mkMultAdd :: Mult -> Mult -> Mult mkMultAdd _ _ = ManyTy mkMultMul :: Mult -> Mult -> Mult mkMultMul OneTy p = p mkMultMul p OneTy = p mkMultMul ManyTy _ = ManyTy mkMultMul _ ManyTy = ManyTy mkMultMul p q = mkTyConApp multMulTyCon [p, q] scaleScaled :: Mult -> Scaled a -> Scaled a scaleScaled m' (Scaled m t) = Scaled (m' `mkMultMul` m) t -- See Note [Joining usages] -- | @mkMultSup w1 w2@ returns a multiplicity such that @mkMultSup w1 -- w2 >= w1@ and @mkMultSup w1 w2 >= w2@. See Note [Overapproximating multiplicities]. mkMultSup :: Mult -> Mult -> Mult mkMultSup = mkMultMul -- Note: If you are changing this logic, check 'supUE' in UsageEnv as well. -- -- * Multiplicity ordering -- data IsSubmult = Submult -- Definitely a submult | Unknown -- Could be a submult, need to ask the typechecker deriving (Show, Eq) instance Outputable IsSubmult where ppr = text . show -- | @submult w1 w2@ check whether a value of multiplicity @w1@ is allowed where a -- value of multiplicity @w2@ is expected. This is a partial order. submult :: Mult -> Mult -> IsSubmult submult _ ManyTy = Submult submult OneTy OneTy = Submult -- The 1 <= p rule submult OneTy _ = Submult submult _ _ = Unknown pprArrowWithMultiplicity :: FunTyFlag -> Either Bool SDoc -> SDoc -- Pretty-print a multiplicity arrow. The multiplicity itself -- is described by the (Either Bool SDoc) -- Left False -- Many -- Left True -- One -- Right doc -- Something else -- In the Right case, the doc is in parens if not atomic pprArrowWithMultiplicity af pp_mult | isFUNArg af = case pp_mult of Left False -> arrow Left True -> lollipop Right doc -> text "%" <> doc <+> arrow | otherwise = ppr (funTyFlagTyCon af) -- | In Core, without `-dlinear-core-lint`, some function must ignore -- multiplicities. See Note [Linting linearity] in GHC.Core.Lint. data MultiplicityFlag = RespectMultiplicities | IgnoreMultiplicities ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/Opt/0000755000000000000000000000000007346545000017340 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/Opt/Arity.hs0000644000000000000000000037010407346545000020771 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 Arity and eta expansion -} {-# LANGUAGE CPP #-} -- | Arity and eta expansion module GHC.Core.Opt.Arity ( -- Finding arity manifestArity, joinRhsArity, exprArity , findRhsArity, cheapArityType , ArityOpts(..) -- ** Eta expansion , exprEtaExpandArity, etaExpand, etaExpandAT -- ** Eta reduction , tryEtaReduce -- ** ArityType , ArityType, mkBotArityType , arityTypeArity, idArityType -- ** Bottoming things , exprIsDeadEnd, exprBotStrictness_maybe, arityTypeBotSigs_maybe -- ** typeArity and the state hack , typeArity, typeOneShots, typeOneShot , isOneShotBndr , isStateHackType -- * Lambdas , zapLamBndrs -- ** Join points , etaExpandToJoinPoint, etaExpandToJoinPointRule -- ** Coercions and casts , pushCoArg, pushCoArgs, pushCoValArg, pushCoTyArg , pushCoercionIntoLambda, pushCoDataCon, collectBindersPushingCo ) where import GHC.Prelude import GHC.Core import GHC.Core.FVs import GHC.Core.Utils import GHC.Core.DataCon import GHC.Core.TyCon ( tyConArity ) import GHC.Core.TyCon.RecWalk ( initRecTc, checkRecTc ) import GHC.Core.Predicate ( isDictTy, isEvVar, isCallStackPredTy, isCallStackTy ) import GHC.Core.Multiplicity -- We have two sorts of substitution: -- GHC.Core.Subst.Subst, and GHC.Core.TyCo.Subst -- Both have substTy, substCo Hence need for qualification import GHC.Core.Subst as Core import GHC.Core.Type as Type import GHC.Core.Coercion as Type import GHC.Core.TyCo.Compare( eqType ) import GHC.Types.Demand import GHC.Types.Cpr( CprSig, mkCprSig, botCpr ) import GHC.Types.Id import GHC.Types.Var import GHC.Types.Var.Env import GHC.Types.Var.Set import GHC.Types.Basic import GHC.Types.Tickish import GHC.Builtin.Types.Prim import GHC.Builtin.Uniques import GHC.Data.FastString import GHC.Data.Graph.UnVar import GHC.Data.Pair import GHC.Utils.GlobalVars( unsafeHasNoStateHack ) import GHC.Utils.Constants (debugIsOn) import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Utils.Misc import Data.Maybe( isJust ) {- ************************************************************************ * * manifestArity and exprArity * * ************************************************************************ exprArity is a cheap-and-cheerful version of exprEtaExpandArity. It tells how many things the expression can be applied to before doing any work. It doesn't look inside cases, lets, etc. The idea is that exprEtaExpandArity will do the hard work, leaving something that's easy for exprArity to grapple with. In particular, Simplify uses exprArity to compute the ArityInfo for the Id. Originally I thought that it was enough just to look for top-level lambdas, but it isn't. I've seen this foo = PrelBase.timesInt We want foo to get arity 2 even though the eta-expander will leave it unchanged, in the expectation that it'll be inlined. But occasionally it isn't, because foo is blacklisted (used in a rule). Similarly, see the ok_note check in exprEtaExpandArity. So f = __inline_me (\x -> e) won't be eta-expanded. And in any case it seems more robust to have exprArity be a bit more intelligent. But note that (\x y z -> f x y z) should have arity 3, regardless of f's arity. -} manifestArity :: CoreExpr -> Arity -- ^ manifestArity sees how many leading value lambdas there are, -- after looking through casts manifestArity (Lam v e) | isId v = 1 + manifestArity e | otherwise = manifestArity e manifestArity (Tick t e) | not (tickishIsCode t) = manifestArity e manifestArity (Cast e _) = manifestArity e manifestArity _ = 0 joinRhsArity :: CoreExpr -> JoinArity -- Join points are supposed to have manifestly-visible -- lambdas at the top: no ticks, no casts, nothing -- Moreover, type lambdas count in JoinArity -- NB: For non-recursive bindings, the join arity of the binding may actually be -- less that the number of manifestly-visible lambdas. -- See Note [Join arity prediction based on joinRhsArity] in GHC.Core.Opt.OccurAnal joinRhsArity (Lam _ e) = 1 + joinRhsArity e joinRhsArity _ = 0 --------------- exprBotStrictness_maybe :: CoreExpr -> Maybe (Arity, DmdSig, CprSig) -- A cheap and cheerful function that identifies bottoming functions -- and gives them a suitable strictness and CPR signatures. -- It's used during float-out exprBotStrictness_maybe e = arityTypeBotSigs_maybe (cheapArityType e) arityTypeBotSigs_maybe :: ArityType -> Maybe (Arity, DmdSig, CprSig) -- Arity of a divergent function arityTypeBotSigs_maybe (AT lams div) | isDeadEndDiv div = Just ( arity , mkVanillaDmdSig arity botDiv , mkCprSig arity botCpr) | otherwise = Nothing where arity = length lams {- Note [exprArity for applications] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When we come to an application we check that the arg is trivial. eg f (fac x) does not have arity 2, even if f has arity 3! * We require that is trivial rather merely cheap. Suppose f has arity 2. Then f (Just y) has arity 0, because if we gave it arity 1 and then inlined f we'd get let v = Just y in \w. which has arity 0. And we try to maintain the invariant that we don't have arity decreases. * The `max 0` is important! (\x y -> f x) has arity 2, even if f is unknown, hence arity 0 ************************************************************************ * * typeArity and the "state hack" * * ********************************************************************* -} typeArity :: Type -> Arity -- ^ (typeArity ty) says how many arrows GHC can expose in 'ty', after -- looking through newtypes. More generally, (typeOneShots ty) returns -- ty's [OneShotInfo], based only on the type itself, using typeOneShot -- on the argument type to access the "state hack". typeArity = length . typeOneShots typeOneShots :: Type -> [OneShotInfo] -- How many value arrows are visible in the type? -- We look through foralls, and newtypes -- See Note [Arity invariants for bindings] typeOneShots ty = go initRecTc ty where go rec_nts ty | Just (tcv, ty') <- splitForAllTyCoVar_maybe ty = if isCoVar tcv then idOneShotInfo tcv : go rec_nts ty' else go rec_nts ty' | Just (_,_,arg,res) <- splitFunTy_maybe ty = typeOneShot arg : go rec_nts res | Just (tc,tys) <- splitTyConApp_maybe ty , Just (ty', _) <- instNewTyCon_maybe tc tys , Just rec_nts' <- checkRecTc rec_nts tc -- See Note [Expanding newtypes and products] -- in GHC.Core.TyCon -- , not (isClassTyCon tc) -- Do not eta-expand through newtype classes -- -- See Note [Newtype classes and eta expansion] -- (no longer required) = go rec_nts' ty' -- Important to look through non-recursive newtypes, so that, eg -- (f x) where f has arity 2, f :: Int -> IO () -- Here we want to get arity 1 for the result! -- -- AND through a layer of recursive newtypes -- e.g. newtype Stream m a b = Stream (m (Either b (a, Stream m a b))) | otherwise = [] typeOneShot :: Type -> OneShotInfo typeOneShot ty | isStateHackType ty = OneShotLam | otherwise = NoOneShotInfo -- | Like 'idOneShotInfo', but taking the Horrible State Hack in to account -- See Note [The state-transformer hack] in "GHC.Core.Opt.Arity" idStateHackOneShotInfo :: Id -> OneShotInfo idStateHackOneShotInfo id | isStateHackType (idType id) = OneShotLam | otherwise = idOneShotInfo id -- | Returns whether the lambda associated with the 'Id' is -- certainly applied at most once -- This one is the "business end", called externally. -- It works on type variables as well as Ids, returning True -- Its main purpose is to encapsulate the Horrible State Hack -- See Note [The state-transformer hack] in "GHC.Core.Opt.Arity" isOneShotBndr :: Var -> Bool isOneShotBndr var | isTyVar var = True | OneShotLam <- idStateHackOneShotInfo var = True | otherwise = False isStateHackType :: Type -> Bool isStateHackType ty | unsafeHasNoStateHack -- Switch off with -fno-state-hack = False | otherwise = case tyConAppTyCon_maybe ty of Just tycon -> tycon == statePrimTyCon _ -> False -- This is a gross hack. It claims that -- every function over realWorldStatePrimTy is a one-shot -- function. This is pretty true in practice, and makes a big -- difference. For example, consider -- a `thenST` \ r -> ...E... -- The early full laziness pass, if it doesn't know that r is one-shot -- will pull out E (let's say it doesn't mention r) to give -- let lvl = E in a `thenST` \ r -> ...lvl... -- When `thenST` gets inlined, we end up with -- let lvl = E in \s -> case a s of (r, s') -> ...lvl... -- and we don't re-inline E. -- -- It would be better to spot that r was one-shot to start with, but -- I don't want to rely on that. -- -- Another good example is in fill_in in PrelPack.hs. We should be able to -- spot that fill_in has arity 2 (and when Keith is done, we will) but we can't yet. {- Note [Arity invariants for bindings] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We have the following invariants for let-bindings (1) In any binding f = e, idArity f <= typeArity (idType f) We enforce this with trimArityType, called in findRhsArity; see Note [Arity trimming]. Note that we enforce this only for /bindings/. We do /not/ insist that arityTypeArity (arityType e) <= typeArity (exprType e) because that is quite a bit more expensive to guaranteed; it would mean checking at every Cast in the recursive arityType, for example. (2) If typeArity (exprType e) = n, then manifestArity (etaExpand e n) = n That is, etaExpand can always expand as much as typeArity says (or less, of course). So the case analysis in etaExpand and in typeArity must match. Consequence: because of (1), if we eta-expand to (idArity f), we will end up with n manifest lambdas. (3) In any binding f = e, idArity f <= arityTypeArity (safeArityType (arityType e)) That is, we call safeArityType before attributing e's arityType to f. See Note [SafeArityType]. So we call safeArityType in findRhsArity. Suppose we have f :: Int -> Int -> Int f x y = x+y -- Arity 2 g :: F Int g = case of { True -> f |> co1 ; False -> g |> co2 } where F is a type family. Now, we can't eta-expand g to have arity 2, because etaExpand, which works off the /type/ of the expression (albeit looking through newtypes), doesn't know how to make an eta-expanded binding g = (\a b. case x of ...) |> co because it can't make up `co` or the types of `a` and `b`. So invariant (1) ensures that every binding has an arity that is no greater than the typeArity of the RHS; and invariant (2) ensures that etaExpand and handle what typeArity says. Why is this important? Because - In GHC.Iface.Tidy we use exprArity/manifestArity to fix the *final arity* of each top-level Id, and in - In CorePrep we use etaExpand on each rhs, so that the visible lambdas actually match that arity, which in turn means that the StgRhs has a number of lambdas that precisely matches the arity. Note [Arity trimming] ~~~~~~~~~~~~~~~~~~~~~ Invariant (1) of Note [Arity invariants for bindings] is upheld by findRhsArity, which calls trimArityType to trim the ArityType to match the Arity of the binding. Failing to do so, and hence breaking invariant (1) led to #5441. How to trim? If we end in topDiv, it's easy. But we must take great care with dead ends (i.e. botDiv). Suppose the expression was (\x y. error "urk"), we'll get \??.⊥. We absolutely must not trim that to \?.⊥, because that claims that ((\x y. error "urk") |> co) diverges when given one argument, which it absolutely does not. And Bad Things happen if we think something returns bottom when it doesn't (#16066). So, if we need to trim a dead-ending arity type, switch (conservatively) to topDiv. Historical note: long ago, we unconditionally switched to topDiv when we encountered a cast, but that is far too conservative: see #5475 Note [Newtype classes and eta expansion] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ NB: this nasty special case is no longer required, because for newtype classes we don't use the class-op rule mechanism at all. See Note [Single-method classes] in GHC.Tc.TyCl.Instance. SLPJ May 2013 -------- Old out of date comments, just for interest ----------- We have to be careful when eta-expanding through newtypes. In general it's a good idea, but annoyingly it interacts badly with the class-op rule mechanism. Consider class C a where { op :: a -> a } instance C b => C [b] where op x = ... These translate to co :: forall a. (a->a) ~ C a $copList :: C b -> [b] -> [b] $copList d x = ... $dfList :: C b -> C [b] {-# DFunUnfolding = [$copList] #-} $dfList d = $copList d |> co@[b] Now suppose we have: dCInt :: C Int blah :: [Int] -> [Int] blah = op ($dfList dCInt) Now we want the built-in op/$dfList rule will fire to give blah = $copList dCInt But with eta-expansion 'blah' might (and in #3772, which is slightly more complicated, does) turn into blah = op (\eta. ($dfList dCInt |> sym co) eta) and now it is *much* harder for the op/$dfList rule to fire, because exprIsConApp_maybe won't hold of the argument to op. I considered trying to *make* it hold, but it's tricky and I gave up. The test simplCore/should_compile/T3722 is an excellent example. -------- End of old out of date comments, just for interest ----------- -} {- ******************************************************************** * * Zapping lambda binders * * ********************************************************************* -} zapLamBndrs :: FullArgCount -> [Var] -> [Var] -- If (\xyz. t) appears under-applied to only two arguments, -- we must zap the occ-info on x,y, because they appear (in 't') under the \z. -- See Note [Occurrence analysis for lambda binders] in GHc.Core.Opt.OccurAnal -- -- NB: both `arg_count` and `bndrs` include both type and value args/bndrs zapLamBndrs arg_count bndrs | no_need_to_zap = bndrs | otherwise = zap_em arg_count bndrs where no_need_to_zap = all isOneShotBndr (drop arg_count bndrs) zap_em :: FullArgCount -> [Var] -> [Var] zap_em 0 bs = bs zap_em _ [] = [] zap_em n (b:bs) | isTyVar b = b : zap_em (n-1) bs | otherwise = zapLamIdInfo b : zap_em (n-1) bs {- ********************************************************************* * * Computing the "arity" of an expression * * ************************************************************************ Note [Definition of arity] ~~~~~~~~~~~~~~~~~~~~~~~~~~ The "arity" of an expression 'e' is n if applying 'e' to *fewer* than n *value* arguments converges rapidly Or, to put it another way there is no work lost in duplicating the partial application (e x1 .. x(n-1)) In the divergent case, no work is lost by duplicating because if the thing is evaluated once, that's the end of the program. Or, to put it another way, in any context C C[ (\x1 .. xn. e x1 .. xn) ] is as efficient as C[ e ] It's all a bit more subtle than it looks: Note [One-shot lambdas] ~~~~~~~~~~~~~~~~~~~~~~~ Consider one-shot lambdas let x = expensive in \y z -> E We want this to have arity 1 if the \y-abstraction is a 1-shot lambda. Note [Dealing with bottom] ~~~~~~~~~~~~~~~~~~~~~~~~~~ GHC does some transformations that are technically unsound wrt bottom, because doing so improves arities... a lot! We describe them in this Note. The flag -fpedantic-bottoms (off by default) restore technically correct behaviour at the cots of efficiency. It's mostly to do with eta-expansion. Consider f = \x -> case x of True -> \s -> e1 False -> \s -> e2 This happens all the time when f :: Bool -> IO () In this case we do eta-expand, in order to get that \s to the top, and give f arity 2. This isn't really right in the presence of seq. Consider (f bot) `seq` 1 This should diverge! But if we eta-expand, it won't. We ignore this "problem" (unless -fpedantic-bottoms is on), because being scrupulous would lose an important transformation for many programs. (See #5587 for an example.) Consider also f = \x -> error "foo" Here, arity 1 is fine. But if it looks like this (see #22068) f = \x -> case x of True -> error "foo" False -> \y -> x+y then we want to get arity 2. Technically, this isn't quite right, because (f True) `seq` 1 should diverge, but it'll converge if we eta-expand f. Nevertheless, we do so; it improves some programs significantly, and increasing convergence isn't a bad thing. Hence the ABot/ATop in ArityType. So these two transformations aren't always the Right Thing, and we have several tickets reporting unexpected behaviour resulting from this transformation. So we try to limit it as much as possible: (1) Do NOT move a lambda outside a known-bottom case expression case undefined of { (a,b) -> \y -> e } This showed up in #5557 (2) Do NOT move a lambda outside a case unless (a) The scrutinee is ok-for-speculation, or (b) more liberally: the scrutinee is cheap (e.g. a variable), and -fpedantic-bottoms is not enforced (see #2915 for an example) Of course both (1) and (2) are readily defeated by disguising the bottoms. There also is an interaction with Note [Combining arity type with demand info], outlined in Wrinkle (CAD1). Note [Newtype arity] ~~~~~~~~~~~~~~~~~~~~ Non-recursive newtypes are transparent, and should not get in the way. We do (currently) eta-expand recursive newtypes too. So if we have, say newtype T = MkT ([T] -> Int) Suppose we have e = coerce T f where f has arity 1. Then: etaExpandArity e = 1; that is, etaExpandArity looks through the coerce. When we eta-expand e to arity 1: eta_expand 1 e T we want to get: coerce T (\x::[T] -> (coerce ([T]->Int) e) x) HOWEVER, note that if you use coerce bogusly you can ge coerce Int negate And since negate has arity 2, you might try to eta expand. But you can't decompose Int to a function type. Hence the final case in eta_expand. Note [The state-transformer hack] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Suppose we have f = e where e has arity n. Then, if we know from the context that f has a usage type like t1 -> ... -> tn -1-> t(n+1) -1-> ... -1-> tm -> ... then we can expand the arity to m. This usage type says that any application (x e1 .. en) will be applied to uniquely to (m-n) more args Consider f = \x. let y = in case x of True -> foo False -> \(s:RealWorld) -> e where foo has arity 1. Then we want the state hack to apply to foo too, so we can eta expand the case. Then we expect that if f is applied to one arg, it'll be applied to two (that's the hack -- we don't really know, and sometimes it's false) See also Id.isOneShotBndr. Note [State hack and bottoming functions] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ It's a terrible idea to use the state hack on a bottoming function. Here's what happens (#2861): f :: String -> IO T f = \p. error "..." Eta-expand, using the state hack: f = \p. (\s. ((error "...") |> g1) s) |> g2 g1 :: IO T ~ (S -> (S,T)) g2 :: (S -> (S,T)) ~ IO T Extrude the g2 f' = \p. \s. ((error "...") |> g1) s f = f' |> (String -> g2) Discard args for bottoming function f' = \p. \s. ((error "...") |> g1 |> g3 g3 :: (S -> (S,T)) ~ (S,T) Extrude g1.g3 f'' = \p. \s. (error "...") f' = f'' |> (String -> S -> g1.g3) And now we can repeat the whole loop. Aargh! The bug is in applying the state hack to a function which then swallows the argument. This arose in another guise in #3959. Here we had catch# (throw exn >> return ()) Note that (throw :: forall a e. Exn e => e -> a) is called with [a = IO ()]. After inlining (>>) we get catch# (\_. throw {IO ()} exn) We must *not* eta-expand to catch# (\_ _. throw {...} exn) because 'catch#' expects to get a (# _,_ #) after applying its argument to a State#, not another function! In short, we use the state hack to allow us to push let inside a lambda, but not to introduce a new lambda. Note [ArityType] ~~~~~~~~~~~~~~~~ ArityType can be thought of as an abstraction of an expression. The ArityType AT [ (IsCheap, NoOneShotInfo) , (IsExpensive, OneShotLam) , (IsCheap, OneShotLam) ] Dunno) abstracts an expression like \x. let in \y{os}. \z{os}. blah In general we have (AT lams div). Then * In lams :: [(Cost,OneShotInfo)] * The Cost flag describes the part of the expression down to the first (value) lambda. * The OneShotInfo flag gives the one-shot info on that lambda. * If 'div' is dead-ending ('isDeadEndDiv'), then application to 'length lams' arguments will surely diverge, similar to the situation with 'DmdType'. ArityType is the result of a compositional analysis on expressions, from which we can decide the real arity of the expression (extracted with function exprEtaExpandArity). We use the following notation: at ::= \p1..pn.div div ::= T | x | ⊥ p ::= (c o) c ::= X | C -- Expensive or Cheap o ::= ? | 1 -- NotOneShot or OneShotLam We may omit the \. if n = 0. And ⊥ stands for `AT [] botDiv` Here is an example demonstrating the notation: \(C?)(X1)(C1).T stands for AT [ (IsCheap,NoOneShotInfo) , (IsExpensive,OneShotLam) , (IsCheap,OneShotLam) ] topDiv See the 'Outputable' instance for more information. It's pretty simple. How can we use ArityType? Example: f = \x\y. let v = in \s(one-shot) \t(one-shot). blah 'f' has arity type \(C?)(C?)(X1)(C1).T The one-shot-ness means we can, in effect, push that 'let' inside the \st, and expand to arity 4 Suppose f = \xy. x+y Then f :: \(C?)(C?).T f v :: \(C?).T f :: \(X?).T Here is what the fields mean. If an arbitrary expression 'f' has ArityType 'at', then * If @at = AT [o1,..,on] botDiv@ (notation: \o1..on.⊥), then @f x1..xn@ definitely diverges. Partial applications to fewer than n args may *or may not* diverge. Ditto exnDiv. * If `f` has ArityType `at` we can eta-expand `f` to have (aritTypeOneShots at) arguments without losing sharing. This function checks that the either there are no expensive expressions, or the lambdas are one-shots. NB 'f' is an arbitrary expression, eg @f = g e1 e2@. This 'f' can have arity type @AT oss _@, with @length oss > 0@, only if e1 e2 are themselves cheap. * In both cases, @f@, @f x1@, ... @f x1 ... x(n-1)@ are definitely really functions, or bottom, but *not* casts from a data type, in at least one case branch. (If it's a function in one case branch but an unsafe cast from a data type in another, the program is bogus.) So eta expansion is dynamically ok; see Note [State hack and bottoming functions], the part about catch# Wrinkles * Wrinkle [Bottoming functions]: see function 'arityLam'. We treat bottoming functions as one-shot, because there is no point in floating work outside the lambda, and it's fine to float it inside. For example, this is fine (see test stranal/sigs/BottomFromInnerLambda) let x = in \y. error (g x y) ==> \y. let x = in error (g x y) Idea: perhaps we could enforce this invariant with data Arity Type = TopAT [(Cost, OneShotInfo)] | DivAT [Cost] Note [SafeArityType] ~~~~~~~~~~~~~~~~~~~~ The function safeArityType trims an ArityType to return a "safe" ArityType, for which we use a type synonym SafeArityType. It is "safe" in the sense that (arityTypeArity at) really reflects the arity of the expression, whereas a regular ArityType might have more lambdas in its [ATLamInfo] that the (cost-free) arity of the expression. For example \x.\y.let v = expensive in \z. blah has arityType = AT [C?, C?, X?, C?] Top But the expression actually has arity 2, not 4, because of the X. So safeArityType will trim it to (AT [C?, C?] Top), whose [ATLamInfo] now reflects the (cost-free) arity of the expression Why do we ever need an "unsafe" ArityType, such as the example above? Because its (cost-free) arity may increased by combineWithCallCards in findRhsArity. See Note [Combining arity type with demand info]. Thus the function `arityType` returns a regular "unsafe" ArityType, that goes deeply into the lambdas (including under IsExpensive). But that is very local; most ArityTypes are indeed "safe". We use the type synonym SafeArityType to indicate where we believe the ArityType is safe. -} -- | The analysis lattice of arity analysis. It is isomorphic to -- -- @ -- data ArityType' -- = AEnd Divergence -- | ALam OneShotInfo ArityType' -- @ -- -- Which is easier to display the Hasse diagram for: -- -- @ -- ALam OneShotLam at -- | -- AEnd topDiv -- | -- ALam NoOneShotInfo at -- | -- AEnd exnDiv -- | -- AEnd botDiv -- @ -- -- where the @at@ fields of @ALam@ are inductively subject to the same order. -- That is, @ALam os at1 < ALam os at2@ iff @at1 < at2@. -- -- Why the strange Top element? -- See Note [Combining case branches: optimistic one-shot-ness] -- -- We rely on this lattice structure for fixed-point iteration in -- 'findRhsArity'. For the semantics of 'ArityType', see Note [ArityType]. data ArityType -- See Note [ArityType] = AT ![ATLamInfo] !Divergence -- ^ `AT oss div` is an abstraction of the expression, which describes -- its lambdas, and how much work appears where. -- See Note [ArityType] for more information -- -- If `div` is dead-ending ('isDeadEndDiv'), then application to -- `length os` arguments will surely diverge, similar to the situation -- with 'DmdType'. deriving Eq type ATLamInfo = (Cost,OneShotInfo) -- ^ Info about one lambda in an ArityType -- See Note [ArityType] type SafeArityType = ArityType -- See Note [SafeArityType] data Cost = IsCheap | IsExpensive deriving( Eq ) allCosts :: (a -> Cost) -> [a] -> Cost allCosts f xs = foldr (addCost . f) IsCheap xs addCost :: Cost -> Cost -> Cost addCost IsCheap IsCheap = IsCheap addCost _ _ = IsExpensive -- | This is the BNF of the generated output: -- -- @ -- @ -- -- We format -- @AT [o1,..,on] topDiv@ as @\o1..on.T@ and -- @AT [o1,..,on] botDiv@ as @\o1..on.⊥@, respectively. -- More concretely, @AT [NOI,OS,OS] topDiv@ is formatted as @\?11.T@. -- If the one-shot info is empty, we omit the leading @\.@. instance Outputable ArityType where ppr (AT oss div) | null oss = pp_div div | otherwise = char '\\' <> hcat (map pp_os oss) <> dot <> pp_div div where pp_div Diverges = char '⊥' pp_div ExnOrDiv = char 'x' pp_div Dunno = char 'T' pp_os (IsCheap, OneShotLam) = text "(C1)" pp_os (IsExpensive, OneShotLam) = text "(X1)" pp_os (IsCheap, NoOneShotInfo) = text "(C?)" pp_os (IsExpensive, NoOneShotInfo) = text "(X?)" mkBotArityType :: [OneShotInfo] -> ArityType mkBotArityType oss = AT [(IsCheap,os) | os <- oss] botDiv botArityType :: ArityType botArityType = mkBotArityType [] topArityType :: ArityType topArityType = AT [] topDiv -- | The number of value args for the arity type arityTypeArity :: SafeArityType -> Arity arityTypeArity (AT lams _) = length lams arityTypeOneShots :: SafeArityType -> [OneShotInfo] -- Returns a list only as long as the arity should be arityTypeOneShots (AT lams _) = map snd lams safeArityType :: ArityType -> SafeArityType -- ^ Assuming this ArityType is all we know, find the arity of -- the function, and trim the argument info (and Divergence) -- to match that arity. See Note [SafeArityType] safeArityType at@(AT lams _) = case go 0 IsCheap lams of Nothing -> at -- No trimming needed Just ar -> AT (take ar lams) topDiv where go :: Arity -> Cost -> [(Cost,OneShotInfo)] -> Maybe Arity go _ _ [] = Nothing go ar ch1 ((ch2,os):lams) = case (ch1 `addCost` ch2, os) of (IsExpensive, NoOneShotInfo) -> Just ar (ch, _) -> go (ar+1) ch lams infixl 2 `trimArityType` trimArityType :: Arity -> ArityType -> ArityType -- ^ Trim an arity type so that it has at most the given arity. -- Any excess 'OneShotInfo's are truncated to 'topDiv', even if -- they end in 'ABot'. See Note [Arity trimming] trimArityType max_arity at@(AT lams _) | lams `lengthAtMost` max_arity = at | otherwise = AT (take max_arity lams) topDiv data ArityOpts = ArityOpts { ao_ped_bot :: !Bool -- See Note [Dealing with bottom] , ao_dicts_cheap :: !Bool -- See Note [Eta expanding through dictionaries] } -- | The Arity returned is the number of value args the -- expression can be applied to without doing much work exprEtaExpandArity :: HasDebugCallStack => ArityOpts -> CoreExpr -> Maybe SafeArityType -- exprEtaExpandArity is used when eta expanding -- e ==> \xy -> e x y -- Nothing if the expression has arity 0 exprEtaExpandArity opts e | AT [] _ <- arity_type = Nothing | otherwise = Just arity_type where arity_type = safeArityType (arityType (findRhsArityEnv opts False) e) {- ********************************************************************* * * findRhsArity * * ********************************************************************* -} findRhsArity :: ArityOpts -> RecFlag -> Id -> CoreExpr -> (Bool, SafeArityType) -- This implements the fixpoint loop for arity analysis -- See Note [Arity analysis] -- -- The Bool is True if the returned arity is greater than (exprArity rhs) -- so the caller should do eta-expansion -- That Bool is never True for join points, which are never eta-expanded -- -- Returns an SafeArityType that is guaranteed trimmed to typeArity of 'bndr' -- See Note [Arity trimming] findRhsArity opts is_rec bndr rhs | isJoinId bndr = (False, join_arity_type) -- False: see Note [Do not eta-expand join points] -- But do return the correct arity and bottom-ness, because -- these are used to set the bndr's IdInfo (#15517) -- Note [Invariants on join points] invariant 2b, in GHC.Core | otherwise = (arity_increased, non_join_arity_type) -- arity_increased: eta-expand if we'll get more lambdas -- to the top of the RHS where old_arity = exprArity rhs init_env :: ArityEnv init_env = findRhsArityEnv opts (isJoinId bndr) -- Non-join-points only non_join_arity_type = case is_rec of Recursive -> go 0 botArityType NonRecursive -> step init_env arity_increased = arityTypeArity non_join_arity_type > old_arity -- Join-points only -- See Note [Arity for non-recursive join bindings] -- and Note [Arity for recursive join bindings] join_arity_type = case is_rec of Recursive -> go 0 botArityType NonRecursive -> trimArityType ty_arity (cheapArityType rhs) ty_arity = typeArity (idType bndr) use_call_cards = useSiteCallCards bndr step :: ArityEnv -> SafeArityType step env = trimArityType ty_arity $ safeArityType $ -- See Note [Arity invariants for bindings], item (3) combineWithCallCards env (arityType env rhs) use_call_cards -- trimArityType: see Note [Trim arity inside the loop] -- combineWithCallCards: take account of the demand on the -- binder. Perhaps it is always called with 2 args -- let f = \x. blah in (f 3 4, f 1 9) -- f's demand-info says how many args it is called with -- The fixpoint iteration (go), done for recursive bindings. We -- always do one step, but usually that produces a result equal -- to old_arity, and then we stop right away, because old_arity -- is assumed to be sound. In other words, arities should never -- decrease. Result: the common case is that there is just one -- iteration go :: Int -> SafeArityType -> SafeArityType go !n cur_at@(AT lams div) | not (isDeadEndDiv div) -- the "stop right away" case , length lams <= old_arity = cur_at -- from above | next_at == cur_at = cur_at | otherwise -- Warn if more than 2 iterations. Why 2? See Note [Exciting arity] = warnPprTrace (debugIsOn && n > 2) "Exciting arity" (nest 2 (ppr bndr <+> ppr cur_at <+> ppr next_at $$ ppr rhs)) $ go (n+1) next_at where next_at = step (extendSigEnv init_env bndr cur_at) combineWithCallCards :: ArityEnv -> ArityType -> [Card] -> ArityType -- See Note [Combining arity type with demand info] combineWithCallCards env at@(AT lams div) cards | null lams = at | otherwise = AT (zip_lams lams oss) div where oss = map card_to_oneshot cards card_to_oneshot n | isAtMostOnce n, not (pedanticBottoms env) -- Take care for -fpedantic-bottoms; -- see Note [Combining arity type with demand info], Wrinkle (CAD1) = OneShotLam | n == C_11 -- Safe to eta-expand even in the presence of -fpedantic-bottoms -- see Note [Combining arity type with demand info], Wrinkle (CAD1) = OneShotLam | otherwise = NoOneShotInfo zip_lams :: [ATLamInfo] -> [OneShotInfo] -> [ATLamInfo] zip_lams lams [] = lams zip_lams [] oss | isDeadEndDiv div = [] | otherwise = [ (IsExpensive,OneShotLam) | _ <- takeWhile isOneShotInfo oss] zip_lams ((ch,os1):lams) (os2:oss) = (ch, os1 `bestOneShot` os2) : zip_lams lams oss useSiteCallCards :: Id -> [Card] useSiteCallCards bndr = call_arity_one_shots `zip_cards` dmd_one_shots where call_arity_one_shots :: [Card] call_arity_one_shots | call_arity == 0 = [] | otherwise = C_0N : replicate (call_arity-1) C_01 -- Call Arity analysis says /however often the function is called/, it is -- always applied to this many arguments. -- The first C_0N is because of the "however often it is called" part. -- Thus if Call Arity says "always applied to 3 args" then the one-shot info -- we get is [C_0N, C_01, C_01] call_arity = idCallArity bndr dmd_one_shots :: [Card] -- If the demand info is C(x,C(1,C(1,.))) then we know that an -- application to one arg is also an application to three dmd_one_shots = case idDemandInfo bndr of AbsDmd -> [] -- There is no use in eta expanding BotDmd -> [] -- when the binding could be dropped instead _ :* sd -> callCards sd -- Take the *longer* list zip_cards (n1:ns1) (n2:ns2) = (n1 `glbCard` n2) : zip_cards ns1 ns2 zip_cards [] ns2 = ns2 zip_cards ns1 [] = ns1 {- Note [Arity analysis] ~~~~~~~~~~~~~~~~~~~~~~~~ The motivating example for arity analysis is this: f = \x. let g = f (x+1) in \y. ...g... What arity does f have? Really it should have arity 2, but a naive look at the RHS won't see that. You need a fixpoint analysis which says it has arity "infinity" the first time round. This example happens a lot; it first showed up in Andy Gill's thesis, fifteen years ago! It also shows up in the code for 'rnf' on lists in #4138. We do the necessary, quite simple fixed-point iteration in 'findRhsArity', which assumes for a single binding 'ABot' on the first run and iterates until it finds a stable arity type. Two wrinkles * We often have to ask (see the Case or Let case of 'arityType') whether some expression is cheap. In the case of an application, that depends on the arity of the application head! That's why we have our own version of 'exprIsCheap', 'myExprIsCheap', that will integrate the optimistic arity types we have on f and g into the cheapness check. * Consider this (#18793) go = \ds. case ds of [] -> id (x:ys) -> let acc = go ys in case blah of True -> acc False -> \ x1 -> acc (negate x1) We must propagate go's optimistically large arity to @acc@, so that the tail call to @acc@ in the True branch has sufficient arity. This is done by the 'am_sigs' field in 'FindRhsArity', and 'lookupSigEnv' in the Var case of 'arityType'. Note [Exciting arity] ~~~~~~~~~~~~~~~~~~~~~ The fixed-point iteration in 'findRhsArity' stabilises very quickly in almost all cases. To get notified of cases where we need an usual number of iterations, we emit a warning in debug mode, so that we can investigate and make sure that we really can't do better. It's a gross hack, but catches real bugs (#18870). Now, which number is "unusual"? We pick n > 2. Here's a pretty common and expected example that takes two iterations and would ruin the specificity of the warning (from T18937): f :: [Int] -> Int -> Int f [] = id f (x:xs) = let y = sum [0..x] in \z -> f xs (y + z) Fixed-point iteration starts with arity type ⊥ for f. After the first iteration, we get arity type \??.T, e.g. arity 2, because we unconditionally 'floatIn' the let-binding (see its bottom case). After the second iteration, we get arity type \?.T, e.g. arity 1, because now we are no longer allowed to floatIn the non-cheap let-binding. Which is all perfectly benign, but means we do two iterations (well, actually 3 'step's to detect we are stable) and don't want to emit the warning. Note [Trim arity inside the loop] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Here's an example (from gadt/nbe.hs) which caused trouble. data Exp g t where Lam :: Ty a -> Exp (g,a) b -> Exp g (a->b) eval :: Exp g t -> g -> t eval (Lam _ e) g = \a -> eval e (g,a) The danger is that we get arity 3 from analysing this; and the next time arity 4, and so on for ever. Solution: use trimArityType on each iteration. Note [Combining arity type with demand info] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider let f = \x. let y = in \p \q{os}. blah in ...(f a b)...(f c d)... * From the RHS we get an ArityType like AT [ (IsCheap,?), (IsExpensive,?), (IsCheap,OneShotLam) ] Dunno where "?" means NoOneShotInfo * From the body, the demand analyser (or Call Arity) will tell us that the function is always applied to at least two arguments. Combining these two pieces of info, we can get the final ArityType AT [ (IsCheap,?), (IsExpensive,OneShotLam), (IsCheap,OneShotLam) ] Dunno result: arity=3, which is better than we could do from either source alone. The "combining" part is done by combineWithCallCards. It uses info from both Call Arity and demand analysis. We may have /more/ call demands from the calls than we have lambdas in the binding. E.g. let f1 = \x. g x x in ...(f1 p q r)... -- Demand on f1 is C(x,C(1,C(1,L))) let f2 = \y. error y in ...(f2 p q r)... -- Demand on f2 is C(x,C(1,C(1,L))) In both these cases we can eta expand f1 and f2 to arity 3. But /only/ for called-once demands. Suppose we had let f1 = \y. g x x in ...let h = f1 p q in ...(h r1)...(h r2)... Now we don't want to eta-expand f1 to have 3 args; only two. Nor, in the case of f2, do we want to push that error call under a lambda. Hence the takeWhile in combineWithDemandDoneShots. Wrinkles: (CAD1) #24296 exposed a subtle interaction with -fpedantic-bottoms (See Note [Dealing with bottom]). Consider let f = \x y. error "blah" in f 2 1 `seq` Just (f 3 2 1) -- Demand on f is C(x,C(1,C(M,L))) Usually, it is OK to consider a lambda that is called *at most* once (so call cardinality C_01, abbreviated M) a one-shot lambda and eta-expand over it. But with -fpedantic-bottoms that is no longer true: If we were to eta-expand f to arity 3, we'd discard the error raised when evaluating `f 2 1`. Hence in the presence of -fpedantic-bottoms, we must have C_11 for eta-expansion. Note [Do not eta-expand join points] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Similarly to CPR (see Note [Don't w/w join points for CPR] in GHC.Core.Opt.WorkWrap), a join point stands well to gain from its outer binding's eta-expansion, and eta-expanding a join point is fraught with issues like how to deal with a cast: let join $j1 :: IO () $j1 = ... $j2 :: Int -> IO () $j2 n = if n > 0 then $j1 else ... => let join $j1 :: IO () $j1 = (\eta -> ...) `cast` N:IO :: State# RealWorld -> (# State# RealWorld, ()) ~ IO () $j2 :: Int -> IO () $j2 n = (\eta -> if n > 0 then $j1 else ...) `cast` N:IO :: State# RealWorld -> (# State# RealWorld, ()) ~ IO () The cast here can't be pushed inside the lambda (since it's not casting to a function type), so the lambda has to stay, but it can't because it contains a reference to a join point. In fact, $j2 can't be eta-expanded at all. Rather than try and detect this situation (and whatever other situations crop up!), we don't bother; again, any surrounding eta-expansion will improve these join points anyway, since an outer cast can *always* be pushed inside. By the time CorePrep comes around, the code is very likely to look more like this: let join $j1 :: State# RealWorld -> (# State# RealWorld, ()) $j1 = (...) eta $j2 :: Int -> State# RealWorld -> (# State# RealWorld, ()) $j2 = if n > 0 then $j1 else (...) eta Note [Arity for recursive join bindings] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider f x = joinrec j 0 = \ a b c -> (a,x,b) j n = j (n-1) in j 20 Obviously `f` should get arity 4. But it's a bit tricky: 1. Remember, we don't eta-expand join points; see Note [Do not eta-expand join points]. 2. But even though we aren't going to eta-expand it, we still want `j` to get idArity=4, via the findRhsArity fixpoint. Then when we are doing findRhsArity for `f`, we'll call arityType on f's RHS: - At the letrec-binding for `j` we'll whiz up an arity-4 ArityType for `j` (See Note [arityType for non-recursive let-bindings] in GHC.Core.Opt.Arity)b - At the occurrence (j 20) that arity-4 ArityType will leave an arity-3 result. 3. All this, even though j's /join-arity/ (stored in the JoinId) is 1. This is is the Main Reason that we want the idArity to sometimes be larger than the join-arity c.f. Note [Invariants on join points] item 2b in GHC.Core. 4. Be very careful of things like this (#21755): g x = let j 0 = \y -> (x,y) j n = expensive n `seq` j (n-1) in j x Here we do /not/ want eta-expand `g`, lest we duplicate all those (expensive n) calls. But it's fine: the findRhsArity fixpoint calculation will compute arity-1 for `j` (not arity 2); and that's just what we want. But we do need that fixpoint. Historical note: an earlier version of GHC did a hack in which we gave join points an ArityType of ABot, but that did not work with this #21755 case. 5. arityType does not usually expect to encounter free join points; see GHC.Core.Opt.Arity Note [No free join points in arityType]. But consider f x = join j1 y = .... in joinrec j2 z = ...j1 y... in j2 v When doing findRhsArity on `j2` we'll encounter the free `j1`. But that is fine, because we aren't going to eta-expand `j2`; we just want to know its arity. So we have a flag am_no_eta, switched on when doing findRhsArity on a join point RHS. If the flag is on, we allow free join points, but not otherwise. Note [Arity for non-recursive join bindings] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Note [Arity for recursive join bindings] deals with recursive join bindings. But what about /non-recursive/ones? If we just call findRhsArity, it will call arityType. And that can be expensive when we have deeply nested join points: join j1 x1 = join j2 x2 = join j3 x3 = blah3 in blah2 in blah1 (e.g. test T18698b). So we call cheapArityType instead. It's good enough for practical purposes. (Side note: maybe we should use cheapArity for the RHS of let bindings in the main arityType function.) -} {- ********************************************************************* * * arityType * * ********************************************************************* -} arityLam :: Id -> ArityType -> ArityType arityLam id (AT oss div) = AT ((IsCheap, one_shot) : oss) div where one_shot | isDeadEndDiv div = OneShotLam | otherwise = idStateHackOneShotInfo id -- If the body diverges, treat it as one-shot: no point -- in floating out, and no penalty for floating in -- See Wrinkle [Bottoming functions] in Note [ArityType] floatIn :: Cost -> ArityType -> ArityType -- We have something like (let x = E in b), -- where b has the given arity type. -- NB: be as lazy as possible in the Cost-of-E argument; -- we can often get away without ever looking at it -- See Note [Care with nested expressions] floatIn ch at@(AT lams div) = case lams of [] -> at (IsExpensive,_):_ -> at (_,os):lams -> AT ((ch,os):lams) div addWork :: ArityType -> ArityType -- Add work to the outermost level of the arity type addWork at@(AT lams div) = case lams of [] -> at lam:lams' -> AT (add_work lam : lams') div add_work :: ATLamInfo -> ATLamInfo add_work (_,os) = (IsExpensive,os) arityApp :: ArityType -> Cost -> ArityType -- Processing (fun arg) where at is the ArityType of fun, -- Knock off an argument and behave like 'let' arityApp (AT ((ch1,_):oss) div) ch2 = floatIn (ch1 `addCost` ch2) (AT oss div) arityApp at _ = at -- | Least upper bound in the 'ArityType' lattice. -- See the haddocks on 'ArityType' for the lattice. -- -- Used for branches of a @case@. andArityType :: ArityEnv -> ArityType -> ArityType -> ArityType andArityType env (AT (lam1:lams1) div1) (AT (lam2:lams2) div2) | AT lams' div' <- andArityType env (AT lams1 div1) (AT lams2 div2) = AT ((lam1 `and_lam` lam2) : lams') div' where (ch1,os1) `and_lam` (ch2,os2) = ( ch1 `addCost` ch2, os1 `bestOneShot` os2) -- bestOneShot: see Note [Combining case branches: optimistic one-shot-ness] andArityType env (AT [] div1) at2 = andWithTail env div1 at2 andArityType env at1 (AT [] div2) = andWithTail env div2 at1 andWithTail :: ArityEnv -> Divergence -> ArityType -> ArityType andWithTail env div1 at2@(AT lams2 _) | isDeadEndDiv div1 -- case x of { T -> error; F -> \y.e } = at2 -- See Note | pedanticBottoms env -- [Combining case branches: andWithTail] = AT [] topDiv | otherwise -- case x of { T -> plusInt ; F -> \y.e } = AT (map add_work lams2) topDiv -- We know div1 = topDiv -- See Note [Combining case branches: andWithTail] {- Note [Combining case branches: optimistic one-shot-ness] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When combining the ArityTypes for two case branches (with andArityType) and both ArityTypes have ATLamInfo, then we just combine their expensive-ness and one-shot info. The tricky point is when we have case x of True -> \x{one-shot). blah1 Fale -> \y. blah2 Since one-shot-ness is about the /consumer/ not the /producer/, we optimistically assume that if either branch is one-shot, we combine the best of the two branches, on the (slightly dodgy) basis that if we know one branch is one-shot, then they all must be. Surprisingly, this means that the one-shot arity type is effectively the top element of the lattice. Hence the call to `bestOneShot` in `andArityType`. Here's an example: go = \x. let z = go e0 go2 = \x. case x of True -> z False -> \s(one-shot). e1 in go2 x We *really* want to respect the one-shot annotation provided by the user and eta-expand go and go2. In the first fixpoint iteration of 'go' we'll bind 'go' to botArityType (written \.⊥, see Note [ArityType]). So 'z' will get arityType \.⊥; so we end up combining the True and False branches: \.⊥ `andArityType` \1.T That gives \1.T (see Note [Combining case branches: andWithTail], first bullet). So 'go2' gets an arityType of \(C?)(C1).T, which is what we want. Note [Care with nested expressions] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider arityType (Just ) We will take arityType Just = AT [(IsCheap,os)] topDiv and then do arityApp (AT [(IsCheap os)] topDiv) (exprCost ) The result will be AT [] topDiv. It doesn't matter what is! The same is true of arityType (let x = in ) where the cost of doesn't matter unless has a useful arityType. TL;DR in `floatIn`, do not to look at the Cost argument until you have to. I found this when looking at #24471, although I don't think it was really the main culprit. Note [Combining case branches: andWithTail] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When combining the ArityTypes for two case branches (with andArityType) and one side or the other has run out of ATLamInfo; then we get into `andWithTail`. * If one branch is guaranteed bottom (isDeadEndDiv), we just take the other. Consider case x of True -> \x. error "urk" False -> \xy. error "urk2" Remember: \o1..on.⊥ means "if you apply to n args, it'll definitely diverge". So we need \??.⊥ for the whole thing, the /max/ of both arities. * Otherwise, if pedantic-bottoms is on, we just have to return AT [] topDiv. E.g. if we have f x z = case x of True -> \y. blah False -> z then we can't eta-expand, because that would change the behaviour of (f False bottom(). * But if pedantic-bottoms is not on, we allow ourselves to push `z` under a lambda (much as we allow ourselves to put the `case x` under a lambda). However we know nothing about the expensiveness or one-shot-ness of `z`, so we'd better assume it looks like (Expensive, NoOneShotInfo) all the way. Remembering Note [Combining case branches: optimistic one-shot-ness], we just add work to ever ATLamInfo, keeping the one-shot-ness. Note [Eta expanding through CallStacks] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Just as it's good to eta-expand through dictionaries, so it is good to do so through CallStacks. #20103 is a case in point, where we got foo :: HasCallStack => Int -> Int foo = \(d::CallStack). let d2 = pushCallStack blah d in \(x:Int). blah We really want to eta-expand this! #20103 is quite convincing! We do this regardless of -fdicts-cheap; it's not really a dictionary. We also want to check both for (IP blah CallStack) and for CallStack itself. We might have either d :: IP blah CallStack -- Or HasCallStack d = (cs-expr :: CallStack) |> (nt-ax :: CallStack ~ IP blah CallStack) or just cs :: CallStack cs = cs-expr Test T20103 is an example of the latter. Note [Eta expanding through dictionaries] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If the experimental -fdicts-cheap flag is on, we eta-expand through dictionary bindings. This improves arities. Thereby, it also means that full laziness is less prone to floating out the application of a function to its dictionary arguments, which can thereby lose opportunities for fusion. Example: foo :: Ord a => a -> ... foo = /\a \(d:Ord a). let d' = ...d... in \(x:a). .... -- So foo has arity 1 f = \x. foo dInt $ bar x The (foo DInt) is floated out, and makes ineffective a RULE foo (bar x) = ... One could go further and make exprIsCheap reply True to any dictionary-typed expression, but that's more work. -} --------------------------- data ArityEnv = AE { am_opts :: !ArityOpts , am_sigs :: !(IdEnv SafeArityType) -- NB `SafeArityType` so we can use this in myIsCheapApp -- See Note [Arity analysis] for details about fixed-point iteration. , am_free_joins :: !Bool -- True <=> free join points allowed -- Used /only/ to support assertion checks } instance Outputable ArityEnv where ppr (AE { am_sigs = sigs, am_free_joins = free_joins }) = text "AE" <+> braces (sep [ text "free joins:" <+> ppr free_joins , text "sigs:" <+> ppr sigs ]) -- | The @ArityEnv@ used by 'findRhsArity'. findRhsArityEnv :: ArityOpts -> Bool -> ArityEnv findRhsArityEnv opts free_joins = AE { am_opts = opts , am_free_joins = free_joins , am_sigs = emptyVarEnv } freeJoinsOK :: ArityEnv -> Bool freeJoinsOK (AE { am_free_joins = free_joins }) = free_joins -- First some internal functions in snake_case for deleting in certain VarEnvs -- of the ArityType. Don't call these; call delInScope* instead! modifySigEnv :: (IdEnv ArityType -> IdEnv ArityType) -> ArityEnv -> ArityEnv modifySigEnv f env@(AE { am_sigs = sigs }) = env { am_sigs = f sigs } {-# INLINE modifySigEnv #-} del_sig_env :: Id -> ArityEnv -> ArityEnv -- internal! del_sig_env id = modifySigEnv (\sigs -> delVarEnv sigs id) {-# INLINE del_sig_env #-} del_sig_env_list :: [Id] -> ArityEnv -> ArityEnv -- internal! del_sig_env_list ids = modifySigEnv (\sigs -> delVarEnvList sigs ids) {-# INLINE del_sig_env_list #-} -- end of internal deletion functions extendSigEnv :: ArityEnv -> Id -> SafeArityType -> ArityEnv extendSigEnv env id ar_ty = modifySigEnv (\sigs -> extendVarEnv sigs id ar_ty) $ env delInScope :: ArityEnv -> Id -> ArityEnv delInScope env id = del_sig_env id env delInScopeList :: ArityEnv -> [Id] -> ArityEnv delInScopeList env ids = del_sig_env_list ids env lookupSigEnv :: ArityEnv -> Id -> Maybe SafeArityType lookupSigEnv (AE { am_sigs = sigs }) id = lookupVarEnv sigs id -- | Whether the analysis should be pedantic about bottoms. -- 'exprBotStrictness_maybe' always is. pedanticBottoms :: ArityEnv -> Bool pedanticBottoms (AE { am_opts = ArityOpts{ ao_ped_bot = ped_bot }}) = ped_bot exprCost :: ArityEnv -> CoreExpr -> Maybe Type -> Cost exprCost env e mb_ty | myExprIsCheap env e mb_ty = IsCheap | otherwise = IsExpensive -- | A version of 'exprIsCheap' that considers results from arity analysis -- and optionally the expression's type. -- Under 'exprBotStrictness_maybe', no expressions are cheap. myExprIsCheap :: ArityEnv -> CoreExpr -> Maybe Type -> Bool myExprIsCheap (AE { am_opts = opts, am_sigs = sigs }) e mb_ty = cheap_dict || cheap_fun e where cheap_dict = case mb_ty of Nothing -> False Just ty -> (ao_dicts_cheap opts && isDictTy ty) || isCallStackPredTy ty || isCallStackTy ty -- See Note [Eta expanding through dictionaries] -- See Note [Eta expanding through CallStacks] cheap_fun e = exprIsCheapX (myIsCheapApp sigs) e -- | A version of 'isCheapApp' that considers results from arity analysis. -- See Note [Arity analysis] for what's in the signature environment and why -- it's important. myIsCheapApp :: IdEnv SafeArityType -> CheapAppFun myIsCheapApp sigs fn n_val_args = case lookupVarEnv sigs fn of -- Nothing means not a local function, fall back to regular -- 'GHC.Core.Utils.isCheapApp' Nothing -> isCheapApp fn n_val_args -- `Just at` means local function with `at` as current SafeArityType. -- NB the SafeArityType bit: that means we can ignore the cost flags -- in 'lams', and just consider the length -- Roughly approximate what 'isCheapApp' is doing. Just (AT lams div) | isDeadEndDiv div -> True -- See Note [isCheapApp: bottoming functions] in GHC.Core.Utils | n_val_args == 0 -> True -- Essentially | n_val_args < length lams -> True -- isWorkFreeApp | otherwise -> False ---------------- arityType :: HasDebugCallStack => ArityEnv -> CoreExpr -> ArityType -- Precondition: all the free join points of the expression -- are bound by the ArityEnv -- See Note [No free join points in arityType] -- -- Returns ArityType, not SafeArityType. The caller must do -- trimArityType if necessary. arityType env (Var v) | Just at <- lookupSigEnv env v -- Local binding = at | otherwise = assertPpr (freeJoinsOK env || not (isJoinId v)) (ppr v) $ -- All join-point should be in the ae_sigs -- See Note [No free join points in arityType] idArityType v arityType env (Cast e _) = arityType env e -- Lambdas; increase arity arityType env (Lam x e) | isId x = arityLam x (arityType env' e) | otherwise = arityType env' e where env' = delInScope env x -- Applications; decrease arity, except for types arityType env (App fun (Type _)) = arityType env fun arityType env (App fun arg ) = arityApp fun_at arg_cost where fun_at = arityType env fun arg_cost = exprCost env arg Nothing -- Case/Let; keep arity if either the expression is cheap -- or it's a 1-shot lambda -- The former is not really right for Haskell -- f x = case x of { (a,b) -> \y. e } -- ===> -- f x y = case x of { (a,b) -> e } -- The difference is observable using 'seq' -- arityType env (Case scrut bndr _ alts) | exprIsDeadEnd scrut || null alts = botArityType -- Do not eta expand. See (1) in Note [Dealing with bottom] | not (pedanticBottoms env) -- See (2) in Note [Dealing with bottom] , myExprIsCheap env scrut (Just (idType bndr)) = alts_type | exprOkForSpeculation scrut = alts_type | otherwise -- In the remaining cases we may not push = addWork alts_type -- evaluation of the scrutinee in where env' = delInScope env bndr arity_type_alt (Alt _con bndrs rhs) = arityType (delInScopeList env' bndrs) rhs alts_type = foldr1 (andArityType env) (map arity_type_alt alts) arityType env (Let (NonRec b rhs) e) = -- See Note [arityType for non-recursive let-bindings] floatIn rhs_cost (arityType env' e) where rhs_cost = exprCost env rhs (Just (idType b)) env' = extendSigEnv env b (safeArityType (arityType env rhs)) arityType env (Let (Rec prs) e) = -- See Note [arityType for recursive let-bindings] floatIn (allCosts bind_cost prs) (arityType env' e) where bind_cost (b,e) = exprCost env' e (Just (idType b)) env' = foldl extend_rec env prs extend_rec :: ArityEnv -> (Id,CoreExpr) -> ArityEnv extend_rec env (b,_) = extendSigEnv env b $ idArityType b -- See Note [arityType for recursive let-bindings] arityType env (Tick t e) | not (tickishIsCode t) = arityType env e arityType _ _ = topArityType -------------------- idArityType :: Id -> ArityType idArityType v | strict_sig <- idDmdSig v , (ds, div) <- splitDmdSig strict_sig , isDeadEndDiv div = AT (takeList ds one_shots) div | isEmptyTy id_ty = botArityType | otherwise = AT (take (idArity v) one_shots) topDiv where id_ty = idType v one_shots :: [(Cost,OneShotInfo)] -- One-shot-ness derived from the type one_shots = repeat IsCheap `zip` typeOneShots id_ty -------------------- cheapArityType :: HasDebugCallStack => CoreExpr -> ArityType -- A fast and cheap version of arityType. -- Returns an ArityType with IsCheap everywhere -- c.f. GHC.Core.Utils.exprIsDeadEnd -- -- /Can/ encounter a free join-point Id; e.g. via the call -- in exprBotStrictness_maybe, which is called in lots -- of places -- -- Returns ArityType, not SafeArityType. The caller must do -- trimArityType if necessary. cheapArityType e = go e where go (Var v) = idArityType v go (Cast e _) = go e go (Lam x e) | isId x = arityLam x (go e) | otherwise = go e go (App e a) | isTypeArg a = go e | otherwise = arity_app a (go e) go (Tick t e) | not (tickishIsCode t) = go e -- Null alts: see Note [Empty case alternatives] in GHC.Core go (Case _ _ _ alts) | null alts = botArityType -- Give up on let, case. In particular, unlike arityType, -- we make no attempt to look inside let's. go _ = topArityType -- Specialised version of arityApp; all costs in ArityType are IsCheap -- See Note [exprArity for applications] -- NB: (1) coercions count as a value argument -- (2) we use the super-cheap exprIsTrivial rather than the -- more complicated and expensive exprIsCheap arity_app _ at@(AT [] _) = at arity_app arg at@(AT ((cost,_):lams) div) | assertPpr (cost == IsCheap) (ppr at $$ ppr arg) $ isDeadEndDiv div = AT lams div | exprIsTrivial arg = AT lams topDiv | otherwise = topArityType --------------- exprArity :: CoreExpr -> Arity -- ^ An approximate, even faster, version of 'cheapArityType' -- Roughly exprArity e = arityTypeArity (cheapArityType e) -- But it's a bit less clever about bottoms -- -- We do /not/ guarantee that exprArity e <= typeArity e -- You may need to do arity trimming after calling exprArity -- See Note [Arity trimming] -- Reason: if we do arity trimming here we have take exprType -- and that can be expensive if there is a large cast exprArity e = go e where go (Var v) = idArity v go (Lam x e) | isId x = go e + 1 | otherwise = go e go (Tick t e) | not (tickishIsCode t) = go e go (Cast e _) = go e go (App e (Type _)) = go e go (App f a) | exprIsTrivial a = (go f - 1) `max` 0 -- See Note [exprArity for applications] -- NB: coercions count as a value argument go _ = 0 --------------- exprIsDeadEnd :: CoreExpr -> Bool -- See Note [Bottoming expressions] -- This function is, in effect, just a specialised (and hence cheap) -- version of cheapArityType: -- exprIsDeadEnd e = case cheapArityType e of -- AT lams div -> null lams && isDeadEndDiv div -- See also exprBotStrictness_maybe, which uses cheapArityType exprIsDeadEnd e = go 0 e where go :: Arity -> CoreExpr -> Bool -- (go n e) = True <=> expr applied to n value args is bottom go _ (Lit {}) = False go _ (Type {}) = False go _ (Coercion {}) = False go n (App e a) | isTypeArg a = go n e | otherwise = go (n+1) e go n (Tick _ e) = go n e go n (Cast e _) = go n e go n (Let _ e) = go n e go n (Lam v e) | isTyVar v = go n e | otherwise = False go _ (Case _ _ _ alts) = null alts -- See Note [Empty case alternatives] in GHC.Core go n (Var v) | isDeadEndAppSig (idDmdSig v) n = True | isEmptyTy (idType v) = True | otherwise = False {- Note [Bottoming expressions] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A bottoming expression is guaranteed to diverge, or raise an exception. We can test for it in two different ways, and exprIsDeadEnd checks for both of these situations: * Visibly-bottom computations. For example (error Int "Hello") is visibly bottom. The strictness analyser also finds out if a function diverges or raises an exception, and puts that info in its strictness signature. * Empty types. If a type is empty, its only inhabitant is bottom. For example: data T f :: T -> Bool f = \(x:t). case x of Bool {} Since T has no data constructors, the case alternatives are of course empty. However note that 'x' is not bound to a visibly-bottom value; it's the *type* that tells us it's going to diverge. A GADT may also be empty even though it has constructors: data T a where T1 :: a -> T Bool T2 :: T Int ...(case (x::T Char) of {})... Here (T Char) is uninhabited. A more realistic case is (Int ~ Bool), which is likewise uninhabited. Note [No free join points in arityType] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Suppose we call arityType on this expression (EX1) \x . case x of True -> \y. e False -> $j 3 where $j is a join point. It really makes no sense to talk of the arity of this expression, because it has a free join point. In particular, we can't eta-expand the expression because we'd have do the same thing to the binding of $j, and we can't see that binding. If we had (EX2) \x. join $j y = blah case x of True -> \y. e False -> $j 3 then it would make perfect sense: we can determine $j's ArityType, and propagate it to the usage site as usual. But how can we get (EX1)? It doesn't make much sense, because $j can't be a join point under the \x anyway. So we make it a precondition of arityType that the argument has no free join-point Ids. (This is checked with an assert in the Var case of arityType.) Wrinkles * We /do/ allow free join point when doing findRhsArity for join-point right-hand sides. See Note [Arity for recursive join bindings] point (5) in GHC.Core.Opt.Simplify.Utils. * The invariant (no free join point in arityType) risks being invalidated by one very narrow special case: runRW# join $j y = blah runRW# (\s. case x of True -> \y. e False -> $j x) We have special magic in OccurAnal, and Simplify to allow continuations to move into the body of a runRW# call. So we are careful never to attempt to eta-expand the (\s.blah) in the argument to runRW#, at least not when there is a literal lambda there, so that OccurAnal has seen it and allowed join points bound outside. See Note [No eta-expansion in runRW#] in GHC.Core.Opt.Simplify.Iteration. Note [arityType for non-recursive let-bindings] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For non-recursive let-bindings, we just get the arityType of the RHS, and extend the environment. That works nicely for things like this (#18793): go = \ ds. case ds_a2CF of { [] -> id : y ys -> case y of { GHC.Types.I# x -> let acc = go ys in case x ># 42# of { __DEFAULT -> acc 1# -> \x1. acc (negate x2) Here we want to get a good arity for `acc`, based on the ArityType of `go`. All this is particularly important for join points. Consider this (#18328) f x = join j y = case y of True -> \a. blah False -> \b. blah in case x of A -> j True B -> \c. blah C -> j False and suppose the join point is too big to inline. Now, what is the arity of f? If we inlined the join point, we'd definitely say "arity 2" because we are prepared to push case-scrutinisation inside a lambda. It's important that we extend the envt with j's ArityType, so that we can use that information in the A/C branch of the case. Note [arityType for recursive let-bindings] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For /recursive/ bindings it's more difficult, to call arityType (as we do in Note [arityType for non-recursive let-bindings]) because we don't have an ArityType to put in the envt for the recursively bound Ids. So for we satisfy ourselves with whizzing up up an ArityType from the idArity of the function, via idArityType. That is nearly equivalent to deleting the binder from the envt, at which point we'll call idArityType at the occurrences. But doing it here means (a) we only call idArityType once, no matter how many occurrences, and (b) we can check (in the arityType (Var v) case) that we don't mention free join-point Ids. See Note [No free join points in arityType]. But see Note [Arity for recursive join bindings] in GHC.Core.Opt.Simplify.Utils for dark corners. -} {- %************************************************************************ %* * The main eta-expander %* * %************************************************************************ We go for: f = \x1..xn -> N ==> f = \x1..xn y1..ym -> N y1..ym (n >= 0) where (in both cases) * The xi can include type variables * The yi are all value variables * N is a NORMAL FORM (i.e. no redexes anywhere) wanting a suitable number of extra args. The biggest reason for doing this is for cases like f = \x -> case x of True -> \y -> e1 False -> \y -> e2 Here we want to get the lambdas together. A good example is the nofib program fibheaps, which gets 25% more allocation if you don't do this eta-expansion. We may have to sandwich some coerces between the lambdas to make the types work. exprEtaExpandArity looks through coerces when computing arity; and etaExpand adds the coerces as necessary when actually computing the expansion. Note [No crap in eta-expanded code] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The eta expander is careful not to introduce "crap". In particular, given a CoreExpr satisfying the 'CpeRhs' invariant (in CorePrep), it returns a CoreExpr satisfying the same invariant. See Note [Eta expansion and the CorePrep invariants] in CorePrep. This means the eta-expander has to do a bit of on-the-fly simplification but it's not too hard. The alternative, of relying on a subsequent clean-up phase of the Simplifier to de-crapify the result, means you can't really use it in CorePrep, which is painful. Note [Eta expansion for join points] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The no-crap rule is very tiresome to guarantee when we have join points. Consider eta-expanding let j :: Int -> Int -> Bool j x = e in b The simple way is \(y::Int). (let j x = e in b) y The no-crap way is \(y::Int). let j' :: Int -> Bool j' x = e y in b[j'/j] y where I have written b[j'/j] to stress that j's type has changed. Note that (of course!) we have to push the application inside the RHS of the join as well as into the body. AND if j has an unfolding we have to push it into there too. AND j might be recursive... So for now I'm abandoning the no-crap rule in this case, conscious that this causes the ugly Wrinkle (EA1) of Note [Eta expansion of arguments in CorePrep]. (Moreover, I think that casts can make the no-crap rule fail too.) Note [Eta expansion and SCCs] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Note that SCCs are not treated specially by etaExpand. If we have etaExpand 2 (\x -> scc "foo" e) = (\xy -> (scc "foo" e) y) So the costs of evaluating 'e' (not 'e y') are attributed to "foo" Note [Eta expansion and source notes] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CorePrep puts floatable ticks outside of value applications, but not type applications. As a result we might be trying to eta-expand an expression like (src<...> v) @a which we want to lead to code like \x -> src<...> v @a x This means that we need to look through type applications and be ready to re-add floats on the top. Note [Eta expansion with ArityType] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The etaExpandAT function takes an ArityType (not just an Arity) to guide eta-expansion. Why? Because we want to preserve one-shot info. Consider foo = \x. case x of True -> (\s{os}. blah) |> co False -> wubble We'll get an ArityType for foo of \?1.T. Then we want to eta-expand to foo = (\x. \eta{os}. (case x of ...as before...) eta) |> some_co That 'eta' binder is fresh, and we really want it to have the one-shot flag from the inner \s{os}. By expanding with the ArityType gotten from analysing the RHS, we achieve this neatly. This makes a big difference to the one-shot monad trick; see Note [The one-shot state monad trick] in GHC.Utils.Monad. -} -- | @etaExpand n e@ returns an expression with -- the same meaning as @e@, but with arity @n@. -- -- Given: -- -- > e' = etaExpand n e -- -- We should have that: -- -- > ty = exprType e = exprType e' etaExpand :: Arity -> CoreExpr -> CoreExpr etaExpand n orig_expr = eta_expand in_scope (replicate n NoOneShotInfo) orig_expr where in_scope = {-#SCC "eta_expand:in-scopeX" #-} mkInScopeSet (exprFreeVars orig_expr) etaExpandAT :: InScopeSet -> SafeArityType -> CoreExpr -> CoreExpr -- See Note [Eta expansion with ArityType] -- -- We pass in the InScopeSet from the simplifier to avoid recomputing -- it here, which can be jolly expensive if the casts are big -- In #18223 it took 10% of compile time just to do the exprFreeVars! etaExpandAT in_scope at orig_expr = eta_expand in_scope (arityTypeOneShots at) orig_expr -- etaExpand arity e = res -- Then 'res' has at least 'arity' lambdas at the top -- possibly with a cast wrapped around the outside -- See Note [Eta expansion with ArityType] -- -- etaExpand deals with for-alls. For example: -- etaExpand 1 E -- where E :: forall a. a -> a -- would return -- (/\b. \y::a -> E b y) eta_expand :: InScopeSet -> [OneShotInfo] -> CoreExpr -> CoreExpr eta_expand in_scope one_shots (Cast expr co) = mkCast (eta_expand in_scope one_shots expr) co -- This mkCast is important, because eta_expand might return an -- expression with a cast at the outside; and tryCastWorkerWrapper -- asssumes that we don't have nested casts. Makes a difference -- in compile-time for T18223 eta_expand in_scope one_shots orig_expr = go in_scope one_shots [] orig_expr where -- Strip off existing lambdas and casts before handing off to mkEtaWW -- This is mainly to avoid spending time cloning binders and substituting -- when there is actually nothing to do. It's slightly awkward to deal -- with casts here, apart from the topmost one, and they are rare, so -- if we find one we just hand off to mkEtaWW anyway -- Note [Eta expansion and SCCs] go _ [] _ _ = orig_expr -- Already has the specified arity; no-op go in_scope oss@(_:oss1) vs (Lam v body) | isTyVar v = go (in_scope `extendInScopeSet` v) oss (v:vs) body | otherwise = go (in_scope `extendInScopeSet` v) oss1 (v:vs) body go in_scope oss rev_vs expr = -- pprTrace "ee" (vcat [ppr in_scope', ppr top_bndrs, ppr eis]) $ retick $ etaInfoAbs top_eis $ etaInfoApp in_scope' sexpr eis where (in_scope', eis@(EI eta_bndrs mco)) = mkEtaWW oss (ppr orig_expr) in_scope (exprType expr) top_bndrs = reverse rev_vs top_eis = EI (top_bndrs ++ eta_bndrs) (mkPiMCos top_bndrs mco) -- Find ticks behind type apps. -- See Note [Eta expansion and source notes] -- I don't really understand this code SLPJ May 21 (expr', args) = collectArgs expr (ticks, expr'') = stripTicksTop tickishFloatable expr' sexpr = mkApps expr'' args retick expr = foldr mkTick expr ticks {- ********************************************************************* * * The EtaInfo mechanism mkEtaWW, etaInfoAbs, etaInfoApp * * ********************************************************************* -} {- Note [The EtaInfo mechanism] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Suppose we have (e :: ty) and we want to eta-expand it to arity N. This what eta_expand does. We do it in two steps: 1. mkEtaWW: from 'ty' and 'N' build a EtaInfo which describes the shape of the expansion necessary to expand to arity N. 2. Build the term \ v1..vn. e v1 .. vn where those abstractions and applications are described by the same EtaInfo. Specifically we build the term etaInfoAbs etas (etaInfoApp in_scope e etas) where etas :: EtaInfo etaInfoAbs builds the lambdas etaInfoApp builds the applications Note that the /same/ EtaInfo drives both etaInfoAbs and etaInfoApp To a first approximation EtaInfo is just [Var]. But casts complicate the question. If we have newtype N a = MkN (S -> a) axN :: N a ~ S -> a and e :: N (N Int) then the eta-expansion should look like (\(x::S) (y::S) -> (e |> co) x y) |> sym co where co :: N (N Int) ~ S -> S -> Int co = axN @(N Int) ; (S -> axN @Int) We want to get one cast, at the top, to account for all those nested newtypes. This is expressed by the EtaInfo type: data EtaInfo = EI [Var] MCoercionR Precisely, here is the (EtaInfo Invariant): EI bs co :: EtaInfo describes a particular eta-expansion, thus: Abstraction: (\b1 b2 .. bn. []) |> sym co Application: ([] |> co) b1 b2 .. bn e :: T co :: T ~R (t1 -> t2 -> .. -> tn -> tr) e = (\b1 b2 ... bn. (e |> co) b1 b2 .. bn) |> sym co Note [Check for reflexive casts in eta expansion] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ It turns out that the casts created by the above mechanism are often Refl. When casts are very deeply nested (as happens in #18223), the repetition of types can make the overall term very large. So there is a big payoff in cancelling out casts aggressively wherever possible. (See also Note [No crap in eta-expanded code].) This matters particularly in etaInfoApp, where we * Do beta-reduction on the fly * Use getArg_maybe to get a cast out of the way, so that we can do beta reduction Together this makes a big difference. Consider when e is case x of True -> (\x -> e1) |> c1 False -> (\p -> e2) |> c2 When we eta-expand this to arity 1, say, etaInfoAbs will wrap a (\eta) around the outside and use etaInfoApp to apply each alternative to 'eta'. We want to beta-reduce all that junk away. #18223 was a dramatic example in which the intermediate term was grotesquely huge, even though the next Simplifier iteration squashed it. Better to kill it at birth. The crucial spots in etaInfoApp are: * `checkReflexiveMCo` in the (Cast e co) case of `go` * `checkReflexiveMCo` in `pushCoArg` * Less important: checkReflexiveMCo in the final case of `go` Collectively these make a factor-of-5 difference to the total allocation of T18223, so take care if you change this stuff! Example: newtype N = MkN (Y->Z) f :: X -> N f = \(x::X). ((\(y::Y). blah) |> fco) where fco :: (Y->Z) ~ N mkEtaWW makes an EtaInfo of (EI [(eta1:X), (eta2:Y)] eta_co where eta_co :: (X->N) ~ (X->Y->Z) eta_co = ( -> nco) nco :: N ~ (Y->Z) -- Comes from topNormaliseNewType_maybe Now, when we push that eta_co inward in etaInfoApp: * In the (Cast e co) case, the 'fco' and 'nco' will meet, and should cancel. * When we meet the (\y.e) we want no cast on the y. -} -------------- data EtaInfo = EI [Var] MCoercionR -- See Note [The EtaInfo mechanism] instance Outputable EtaInfo where ppr (EI vs mco) = text "EI" <+> ppr vs <+> parens (ppr mco) etaInfoApp :: InScopeSet -> CoreExpr -> EtaInfo -> CoreExpr -- (etaInfoApp s e (EI bs mco) returns something equivalent to -- ((substExpr s e) |> mco b1 .. bn) -- See Note [The EtaInfo mechanism] -- -- NB: With very deeply nested casts, this function can be expensive -- In T18223, this function alone costs 15% of allocation, all -- spent in the calls to substExprSC and substBindSC etaInfoApp in_scope expr eis = go (mkEmptySubst in_scope) expr eis where go :: Subst -> CoreExpr -> EtaInfo -> CoreExpr -- 'go' pushed down the eta-infos into the branch of a case -- and the body of a let; and does beta-reduction if possible -- go subst fun co [b1,..,bn] returns (subst(fun) |> co) b1 .. bn go subst (Tick t e) eis = Tick (substTickish subst t) (go subst e eis) go subst (Cast e co) (EI bs mco) = go subst e (EI bs mco') where mco' = checkReflexiveMCo (Core.substCo subst co `mkTransMCoR` mco) -- See Note [Check for reflexive casts in eta expansion] go subst (Case e b ty alts) eis = Case (Core.substExprSC subst e) b1 ty' alts' where (subst1, b1) = Core.substBndr subst b alts' = map subst_alt alts ty' = etaInfoAppTy (substTyUnchecked subst ty) eis subst_alt (Alt con bs rhs) = Alt con bs' (go subst2 rhs eis) where (subst2,bs') = Core.substBndrs subst1 bs go subst (Let b e) eis | not (isJoinBind b) -- See Note [Eta expansion for join points] = Let b' (go subst' e eis) where (subst', b') = Core.substBindSC subst b -- Beta-reduction if possible, pushing any intervening casts past -- the argument. See Note [The EtaInfo mechanism] go subst (Lam v e) (EI (b:bs) mco) | Just (arg,mco') <- pushMCoArg mco (varToCoreExpr b) = go (Core.extendSubst subst v arg) e (EI bs mco') -- Stop pushing down; just wrap the expression up -- See Note [Check for reflexive casts in eta expansion] go subst e (EI bs mco) = Core.substExprSC subst e `mkCastMCo` checkReflexiveMCo mco `mkVarApps` bs -------------- etaInfoAppTy :: Type -> EtaInfo -> Type -- If e :: ty -- then etaInfoApp e eis :: etaInfoApp ty eis etaInfoAppTy ty (EI bs mco) = applyTypeToArgs (text "etaInfoAppTy") ty1 (map varToCoreExpr bs) where ty1 = case mco of MRefl -> ty MCo co -> coercionRKind co -------------- etaInfoAbs :: EtaInfo -> CoreExpr -> CoreExpr -- See Note [The EtaInfo mechanism] etaInfoAbs (EI bs mco) expr = (mkLams bs expr) `mkCastMCo` mkSymMCo mco -------------- -- | @mkEtaWW n _ fvs ty@ will compute the 'EtaInfo' necessary for eta-expanding -- an expression @e :: ty@ to take @n@ value arguments, where @fvs@ are the -- free variables of @e@. -- -- Note that this function is entirely unconcerned about cost centres and other -- semantically-irrelevant source annotations, so call sites must take care to -- preserve that info. See Note [Eta expansion and SCCs]. mkEtaWW :: [OneShotInfo] -- ^ How many value arguments to eta-expand -> SDoc -- ^ The pretty-printed original expression, for warnings. -> InScopeSet -- ^ A super-set of the free vars of the expression to eta-expand. -> Type -> (InScopeSet, EtaInfo) -- ^ The variables in 'EtaInfo' are fresh wrt. to the incoming 'InScopeSet'. -- The outgoing 'InScopeSet' extends the incoming 'InScopeSet' with the -- fresh variables in 'EtaInfo'. mkEtaWW orig_oss ppr_orig_expr in_scope orig_ty = go 0 orig_oss empty_subst orig_ty where empty_subst = mkEmptySubst in_scope go :: Int -- For fresh names -> [OneShotInfo] -- Number of value args to expand to -> Subst -> Type -- We are really looking at subst(ty) -> (InScopeSet, EtaInfo) -- (go [o1,..,on] subst ty) = (in_scope, EI [b1,..,bn] co) -- co :: subst(ty) ~ b1_ty -> ... -> bn_ty -> tr go _ [] subst _ ----------- Done! No more expansion needed = (getSubstInScope subst, EI [] MRefl) go n oss@(one_shot:oss1) subst ty ----------- Forall types (forall a. ty) | Just (Bndr tcv vis, ty') <- splitForAllForAllTyBinder_maybe ty , (subst', tcv') <- Type.substVarBndr subst tcv , let oss' | isTyVar tcv = oss | otherwise = oss1 -- A forall can bind a CoVar, in which case -- we consume one of the [OneShotInfo] , (in_scope, EI bs mco) <- go n oss' subst' ty' = (in_scope, EI (tcv' : bs) (mkEtaForAllMCo (Bndr tcv' vis) ty' mco)) ----------- Function types (t1 -> t2) | Just (_af, mult, arg_ty, res_ty) <- splitFunTy_maybe ty , typeHasFixedRuntimeRep arg_ty -- See Note [Representation polymorphism invariants] in GHC.Core -- See also test case typecheck/should_run/EtaExpandLevPoly , (subst', eta_id) <- freshEtaId n subst (Scaled mult arg_ty) -- Avoid free vars of the original expression , let eta_id' = eta_id `setIdOneShotInfo` one_shot , (in_scope, EI bs mco) <- go (n+1) oss1 subst' res_ty = (in_scope, EI (eta_id' : bs) (mkFunResMCo eta_id' mco)) ----------- Newtypes -- Given this: -- newtype T = MkT ([T] -> Int) -- Consider eta-expanding this -- eta_expand 1 e T -- We want to get -- coerce T (\x::[T] -> (coerce ([T]->Int) e) x) | Just (co, ty') <- topNormaliseNewType_maybe ty , -- co :: ty ~ ty' let co' = Type.substCo subst co -- Remember to apply the substitution to co (#16979) -- (or we could have applied to ty, but then -- we'd have had to zap it for the recursive call) , (in_scope, EI bs mco) <- go n oss subst ty' -- mco :: subst(ty') ~ b1_ty -> ... -> bn_ty -> tr = (in_scope, EI bs (mkTransMCoR co' mco)) | otherwise -- We have an expression of arity > 0, -- but its type isn't a function, or a binder -- does not have a fixed runtime representation = warnPprTrace True "mkEtaWW" ((ppr orig_oss <+> ppr orig_ty) $$ ppr_orig_expr) (getSubstInScope subst, EI [] MRefl) -- This *can* legitimately happen: -- e.g. coerce Int (\x. x) Essentially the programmer is -- playing fast and loose with types (Happy does this a lot). -- So we simply decline to eta-expand. Otherwise we'd end up -- with an explicit lambda having a non-function type mkEtaForAllMCo :: ForAllTyBinder -> Type -> MCoercion -> MCoercion mkEtaForAllMCo (Bndr tcv vis) ty mco = case mco of MRefl | vis == coreTyLamForAllTyFlag -> MRefl | otherwise -> mk_fco (mkRepReflCo ty) MCo co -> mk_fco co where mk_fco co = MCo (mkForAllCo tcv vis coreTyLamForAllTyFlag (mkNomReflCo (varType tcv)) co) -- coreTyLamForAllTyFlag: See Note [The EtaInfo mechanism], particularly -- the (EtaInfo Invariant). (sym co) wraps a lambda that always has -- a ForAllTyFlag of coreTyLamForAllTyFlag; see Note [Required foralls in Core] -- in GHC.Core.TyCo.Rep {- ************************************************************************ * * Eta reduction * * ************************************************************************ Note [Eta reduction makes sense] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ GHC's eta reduction transforms \x y. x y ---> We discuss when this is /sound/ in Note [Eta reduction soundness]. But even assuming it is sound, when is it /desirable/. That is what we discuss here. This test is made by `ok_fun` in tryEtaReduce. 1. We want to eta-reduce only if we get all the way to a trivial expression; we don't want to remove extra lambdas unless we are going to avoid allocating this thing altogether. Trivial means *including* casts and type lambdas: * `\x. f x |> co --> f |> (ty(x) -> co)` (provided `co` doesn't mention `x`) * `/\a. \x. f @(Maybe a) x --> /\a. f @(Maybe a)` See Note [Do not eta reduce PAPs] for why we insist on a trivial head. Of course, eta reduction is not always sound. See Note [Eta reduction soundness] for when it is. When there are multiple arguments, we might get multiple eta-redexes. Example: \x y. e x y ==> { reduce \y. (e x) y in context \x._ } \x. e x ==> { reduce \x. e x in context _ } e And (1) implies that we never want to stop with `\x. e x`, because that is not a trivial expression. So in practice, the implementation works by considering a whole group of leading lambdas to reduce. These delicacies are why we don't simply use 'exprIsTrivial' and 'exprIsHNF' in 'tryEtaReduce'. Alas. Note [Eta reduction soundness] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ GHC's eta reduction transforms \x y. x y ---> For soundness, we obviously require that `x` and `y` to not occur free. But what /other/ restrictions are there for eta reduction to be sound? We discuss separately what it means for eta reduction to be /desirable/, in Note [Eta reduction makes sense]. Eta reduction is *not* a sound transformation in general, because it may change termination behavior if *value* lambdas are involved: `bot` /= `\x. bot x` (as can be observed by a simple `seq`) The past has shown that oversight of this fact can not only lead to endless loops or exceptions, but also straight out *segfaults*. Nevertheless, we can give the following criteria for when it is sound to perform eta reduction on an expression with n leading lambdas `\xs. e xs` (checked in 'is_eta_reduction_sound' in 'tryEtaReduce', which focuses on the case where `e` is trivial): (A) It is sound to eta-reduce n arguments as long as n does not exceed the `exprArity` of `e`. (Needs Arity analysis.) This criterion exploits information about how `e` is *defined*. Example: If `e = \x. bot` then we know it won't diverge until it is called with one argument. Hence it is safe to eta-reduce `\x. e x` to `e`. By contrast, it would be *unsound* to eta-reduce 2 args, `\x y. e x y` to `e`: `e 42` diverges when `(\x y. e x y) 42` does not. (S) It is sound to eta-reduce n arguments in an evaluation context in which all calls happen with at least n arguments. (Needs Strictness analysis.) NB: This treats evaluations like a call with 0 args. NB: This criterion exploits information about how `e` is *used*. Example: Given a function `g` like `g c = Just (c 1 2 + c 2 3)` it is safe to eta-reduce the arg in `g (\x y. e x y)` to `g e` without knowing *anything* about `e` (perhaps it's a parameter occ itself), simply because `g` always calls its parameter with 2 arguments. It is also safe to eta-reduce just one arg, e.g., `g (\x. e x)` to `g e`. By contrast, it would *unsound* to eta-reduce 3 args in a call site like `g (\x y z. e x y z)` to `g e`, because that diverges when `e = \x y. bot`. Could we relax to "*At least one call in the same trace* is with n args"? No. Consider what happens for ``g2 c = c True `seq` c False 42`` Here, `g2` will call `c` with 2 arguments (if there is a call at all). But it is unsound to eta-reduce the arg in `g2 (\x y. e x y)` to `g2 e` when `e = \x. if x then bot else id`, because the latter will diverge when the former would not. Fortunately, the strictness analyser will report "Not always called with two arguments" for `g2` and we won't eta-expand. See Note [Eta reduction based on evaluation context] for the implementation details. This criterion is tested extensively in T21261. (R) Note [Eta reduction in recursive RHSs] tells us that we should not eta-reduce `f` in its own RHS and describes our fix. There we have `f = \x. f x` and we should not eta-reduce to `f=f`. Which might change a terminating program (think @f `seq` e@) to a non-terminating one. (E) (See fun_arity in tryEtaReduce.) As a perhaps special case on the boundary of (A) and (S), when we know that a fun binder `f` is in WHNF, we simply assume it has arity 1 and apply (A). Example: g f = f `seq` \x. f x Here it's sound eta-reduce `\x. f x` to `f`, because `f` can't be bottom after the `seq`. This turned up in #7542. T. If the binders are all type arguments, it's always safe to eta-reduce, regardless of the arity of f. /\a b. f @a @b --> f 2. Type and dictionary abstraction. Regardless of whether 'f' is a value, it is always sound to reduce /type lambdas/, thus: (/\a -> f a) --> f Moreover, we always want to, because it makes RULEs apply more often: This RULE: `forall g. foldr (build (/\a -> g a))` should match `foldr (build (/\b -> ...something complex...))` and the simplest way to do so is eta-reduce `/\a -> g a` in the RULE to `g`. More debatably, we extend this to dictionary arguments too, because the type checker can insert these eta-expanded versions, with both type and dictionary lambdas; hence the slightly ad-hoc (all ok_lam bndrs). That is, we eta-reduce \(d::Num a). f d --> f regardless of f's arity. Its not clear whether or not this is important, and it is not in general sound. But that's the way it is right now. And here are a few more technical criteria for when it is *not* sound to eta-reduce that are specific to Core and GHC: (J) We may not undersaturate join points. See Note [Invariants on join points] in GHC.Core, and #20599. (B) We may not undersaturate functions with no binding. See Note [Eta expanding primops]. (W) We may not undersaturate StrictWorkerIds. See Note [CBV Function Ids] in GHC.Types.Id.Info. Here is a list of historic accidents surrounding unsound eta-reduction: * Consider f = \x.f x h y = case (case y of { True -> f `seq` True; False -> False }) of True -> ...; False -> ... If we (unsoundly) eta-reduce f to get f=f, the strictness analyser says f=bottom, and replaces the (f `seq` True) with just (f `cast` unsafe-co). [SG in 2022: I don't think worker/wrapper would do this today.] BUT, as things stand, 'f' got arity 1, and it *keeps* arity 1 (perhaps also wrongly). So CorePrep eta-expands the definition again, so that it does not terminate after all. Result: seg-fault because the boolean case actually gets a function value. See #1947. * Never *reduce* arity. For example f = \xy. g x y Then if h has arity 1 we don't want to eta-reduce because then f's arity would decrease, and that is bad [SG in 2022: I don't understand this point. There is no `h`, perhaps that should have been `g`. Even then, this proposed eta-reduction is invalid by criterion (A), which might actually be the point this anecdote is trying to make. Perhaps the "no arity decrease" idea is also related to Note [Arity robustness]?] Note [Do not eta reduce PAPs] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ I considered eta-reducing if the result is a PAP: \x. f e1 e2 x ==> f e1 e2 This reduces clutter, sometimes a lot. See Note [Do not eta-expand PAPs] in GHC.Core.Opt.Simplify.Utils, where we are careful not to eta-expand a PAP. If eta-expanding is bad, then eta-reducing is good! Also the code generator likes eta-reduced PAPs; see GHC.CoreToStg.Prep Note [No eta reduction needed in rhsToBody]. But note that we don't want to eta-reduce \x y. f x y to f The former has arity 2, and repeats for every call of the function; the latter has arity 0, and shares . We don't want to change behaviour. Hence the call to exprIsCheap in ok_fun. I noticed this when examining #18993 and, although it is delicate, eta-reducing to a PAP happens to fix the regression in #18993. HOWEVER, if we transform \x. f y x ==> f y that might mean that f isn't saturated any more, and does not inline. This led to some other regressions. TL;DR currently we do /not/ eta reduce if the result is a PAP. Note [Eta reduction with casted arguments] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider (\(x:t3). f (x |> g)) :: t3 -> t2 where f :: t1 -> t2 g :: t3 ~ t1 This should be eta-reduced to f |> (sym g -> t2) So we need to accumulate a coercion, pushing it inward (past variable arguments only) thus: f (x |> co_arg) |> co --> (f |> (sym co_arg -> co)) x f (x:t) |> co --> (f |> (t -> co)) x f @ a |> co --> (f |> (forall a.co)) @ a f @ (g:t1~t2) |> co --> (f |> (t1~t2 => co)) @ (g:t1~t2) These are the equations for ok_arg. Note [Eta reduction with casted function] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Since we are pushing a coercion inwards, it is easy to accommodate (\xy. (f x |> g) y) (\xy. (f x y) |> g) See the `(Cast e co)` equation for `go` in `tryEtaReduce`. The eta-expander pushes those casts outwards, so you might think we won't ever see a cast here, but if we have \xy. (f x y |> g) we will call tryEtaReduce [x,y] (f x y |> g), and we'd like that to work. This happens in GHC.Core.Opt.Simplify.Utils.mkLam, where eta-expansion may be turned off (by sm_eta_expand). Note [Eta reduction based on evaluation context] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Note [Eta reduction soundness], criterion (S) allows us to eta-reduce `g (\x y. e x y)` to `g e` when we know that `g` always calls its parameter with at least 2 arguments. So how do we read that off `g`'s demand signature? Let's take the simple example of #21261, where `g` (actually, `f`) is defined as g c = c 1 2 + c 3 4 Then this is how the pieces are put together: * Demand analysis infers `` for `g`'s demand signature * When the Simplifier next simplifies the argument in `g (\x y. e x y)`, it looks up the *evaluation context* of the argument in the form of the sub-demand `C(S,C(1,L))` and stores it in the 'SimplCont'. (Why does it drop the outer evaluation cardinality of the demand, `S`? Because it's irrelevant! When we simplify an expression, we do so under the assumption that it is currently under evaluation.) This sub-demand literally says "Whenever this expression is evaluated, it is called with at least two arguments, potentially multiple times". * Then the simplifier takes apart the lambda and simplifies the lambda group and then calls 'tryEtaReduce' when rebuilding the lambda, passing the evaluation context `C(S,C(1,L))` along. Then we simply peel off 2 call sub-demands `Cn` and see whether all of the n's (here: `S=C_1N` and `1=C_11`) were strict. And strict they are! Thus, it will eta-reduce `\x y. e x y` to `e`. Note [Eta reduction in recursive RHSs] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider the following recursive function: f = \x. ....g (\y. f y).... The recursive call of f in its own RHS seems like a fine opportunity for eta-reduction because f has arity 1. And often it is! Alas, that is unsound in general if the eta-reduction happens in a tail context. Making the arity visible in the RHS allows us to eta-reduce f = \x -> f x to f = f which means we optimise terminating programs like (f `seq` ()) into non-terminating ones. Nor is this problem just for tail calls. Consider f = id (\x -> f x) where we have (for some reason) not yet inlined `id`. We must not eta-reduce to f = id f because that will then simplify to `f = f` as before. An immediate idea might be to look at whether the called function is a local loopbreaker and refrain from eta-expanding. But that doesn't work for mutually recursive function like in #21652: f = g g* x = f x Here, g* is the loopbreaker but f isn't. What can we do? Fix 1: Zap `idArity` when analysing recursive RHSs and re-attach the info when entering the let body. Has the disadvantage that other transformations which make use of arity (such as dropping of `seq`s when arity > 0) will no longer work in the RHS. Plus it requires non-trivial refactorings to both the simple optimiser (in the way `subst_opt_bndr` is used) as well as the Simplifier (in the way `simplRecBndrs` and `simplRecJoinBndrs` is used), modifying the SimplEnv's substitution twice in the process. A very complicated stop-gap. Fix 2: Pass the set of enclosing recursive binders to `tryEtaReduce`; these are the ones we should not eta-reduce. All call-site must maintain this set. Example: rec { f1 = ....rec { g = ... (\x. g x)...(\y. f2 y)... }... ; f2 = ...f1... } when eta-reducing those inner lambdas, we need to know that we are in the rec group for {f1, f2, g}. This is very much like the solution in Note [Speculative evaluation] in GHC.CoreToStg.Prep. It is a bit tiresome to maintain this info, because it means another field in SimplEnv and SimpleOptEnv. We implement Fix (2) because of it isn't as complicated to maintain as (1). Plus, it is the correct fix to begin with. After all, the arity is correct, but doing the transformation isn't. The moving parts are: * A field `scRecIds` in `SimplEnv` tracks the enclosing recursive binders * We extend the `scRecIds` set in `GHC.Core.Opt.Simplify.simplRecBind` * We consult the set in `is_eta_reduction_sound` in `tryEtaReduce` The situation is very similar to Note [Speculative evaluation] which has the same fix. -} -- | `tryEtaReduce [x,y,z] e sd` returns `Just e'` if `\x y z -> e` is evaluated -- according to `sd` and can soundly and gainfully be eta-reduced to `e'`. -- See Note [Eta reduction soundness] -- and Note [Eta reduction makes sense] when that is the case. tryEtaReduce :: UnVarSet -> [Var] -> CoreExpr -> SubDemand -> Maybe CoreExpr -- Return an expression equal to (\bndrs. body) tryEtaReduce rec_ids bndrs body eval_sd = go (reverse bndrs) body (mkRepReflCo (exprType body)) where incoming_arity = count isId bndrs -- See Note [Eta reduction makes sense], point (2) go :: [Var] -- Binders, innermost first, types [a3,a2,a1] -> CoreExpr -- Of type tr -> Coercion -- Of type tr ~ ts -> Maybe CoreExpr -- Of type a1 -> a2 -> a3 -> ts -- See Note [Eta reduction with casted arguments] -- for why we have an accumulating coercion -- -- Invariant: (go bs body co) returns an expression -- equivalent to (\(reverse bs). (body |> co)) -- See Note [Eta reduction with casted function] go bs (Cast e co1) co2 = go bs e (co1 `mkTransCo` co2) go bs (Tick t e) co | tickishFloatable t = fmap (Tick t) $ go bs e co -- Float app ticks: \x -> Tick t (e x) ==> Tick t e go (b : bs) (App fun arg) co | Just (co', ticks) <- ok_arg b arg co (exprType fun) = fmap (flip (foldr mkTick) ticks) $ go bs fun co' -- Float arg ticks: \x -> e (Tick t x) ==> Tick t e go remaining_bndrs fun co | all isTyVar remaining_bndrs -- If all the remaining_bnrs are tyvars, then the etad_exp -- will be trivial, which is what we want. -- e.g. We might have /\a \b. f [a] b, and we want to -- eta-reduce to /\a. f [a] -- We don't want to give up on this one: see #20040 -- See Note [Eta reduction makes sense], point (1) , remaining_bndrs `ltLength` bndrs -- Only reply Just if /something/ has happened , ok_fun fun , let used_vars = exprFreeVars fun `unionVarSet` tyCoVarsOfCo co reduced_bndrs = mkVarSet (dropList remaining_bndrs bndrs) -- reduced_bndrs are the ones we are eta-reducing away , used_vars `disjointVarSet` reduced_bndrs -- Check for any of the reduced_bndrs (about to be dropped) -- free in the result, including the accumulated coercion. -- See Note [Eta reduction makes sense], intro and point (1) -- NB: don't compute used_vars from exprFreeVars (mkCast fun co) -- because the latter may be ill formed if the guard fails (#21801) = Just (mkLams (reverse remaining_bndrs) (mkCast fun co)) go _remaining_bndrs _fun _ = -- pprTrace "tER fail" (ppr _fun $$ ppr _remaining_bndrs) $ Nothing --------------- -- See Note [Eta reduction makes sense], point (1) ok_fun (App fun (Type {})) = ok_fun fun ok_fun (Cast fun _) = ok_fun fun ok_fun (Tick _ expr) = ok_fun expr ok_fun (Var fun_id) = is_eta_reduction_sound fun_id ok_fun _fun = False --------------- -- See Note [Eta reduction soundness], this is THE place to check soundness! is_eta_reduction_sound fun | fun `elemUnVarSet` rec_ids -- Criterion (R) = False -- Don't eta-reduce in fun in its own recursive RHSs | cantEtaReduceFun fun -- Criteria (J), (W), (B) = False -- Function can't be eta reduced to arity 0 -- without violating invariants of Core and GHC | otherwise = -- Check that eta-reduction won't make the program stricter... fun_arity fun >= incoming_arity -- Criterion (A) and (E) || all_calls_with_arity incoming_arity -- Criterion (S) || all ok_lam bndrs -- Criterion (T) all_calls_with_arity n = isStrict (fst $ peelManyCalls n eval_sd) -- See Note [Eta reduction based on evaluation context] --------------- fun_arity fun | arity > 0 = arity | isEvaldUnfolding (idUnfolding fun) = 1 -- See Note [Eta reduction soundness], criterion (E) | otherwise = 0 where arity = idArity fun --------------- ok_lam v = isTyVar v || isEvVar v -- See Note [Eta reduction makes sense], point (2) --------------- ok_arg :: Var -- Of type bndr_t -> CoreExpr -- Of type arg_t -> Coercion -- Of kind (t1~t2) -> Type -- Type (arg_t -> t1) of the function -- to which the argument is supplied -> Maybe (Coercion -- Of type (arg_t -> t1 ~ bndr_t -> t2) -- (and similarly for tyvars, coercion args) , [CoreTickish]) -- See Note [Eta reduction with casted arguments] ok_arg bndr (Type arg_ty) co fun_ty | Just tv <- getTyVar_maybe arg_ty , bndr == tv = case splitForAllForAllTyBinder_maybe fun_ty of Just (Bndr _ vis, _) -> Just (fco, []) where !fco = mkForAllCo tv vis coreTyLamForAllTyFlag kco co -- The lambda we are eta-reducing always has visibility -- 'coreTyLamForAllTyFlag' which may or may not match -- the visibility on the inner function (#24014) kco = mkNomReflCo (tyVarKind tv) Nothing -> pprPanic "tryEtaReduce: type arg to non-forall type" (text "fun:" <+> ppr bndr $$ text "arg:" <+> ppr arg_ty $$ text "fun_ty:" <+> ppr fun_ty) ok_arg bndr (Var v) co fun_ty | bndr == v , let mult = idMult bndr , Just (_af, fun_mult, _, _) <- splitFunTy_maybe fun_ty , mult `eqType` fun_mult -- There is no change in multiplicity, otherwise we must abort = Just (mkFunResCo Representational bndr co, []) ok_arg bndr (Cast e co_arg) co fun_ty | (ticks, Var v) <- stripTicksTop tickishFloatable e , Just (_, fun_mult, _, _) <- splitFunTy_maybe fun_ty , bndr == v , fun_mult `eqType` idMult bndr = Just (mkFunCoNoFTF Representational (multToCo fun_mult) (mkSymCo co_arg) co, ticks) -- The simplifier combines multiple casts into one, -- so we can have a simple-minded pattern match here ok_arg bndr (Tick t arg) co fun_ty | tickishFloatable t, Just (co', ticks) <- ok_arg bndr arg co fun_ty = Just (co', t:ticks) ok_arg _ _ _ _ = Nothing -- | Can we eta-reduce the given function -- See Note [Eta reduction soundness], criteria (B), (J), and (W). cantEtaReduceFun :: Id -> Bool cantEtaReduceFun fun = hasNoBinding fun -- (B) -- Don't undersaturate functions with no binding. || isJoinId fun -- (J) -- Don't undersaturate join points. -- See Note [Invariants on join points] in GHC.Core, and #20599 || (isJust (idCbvMarks_maybe fun)) -- (W) -- Don't undersaturate StrictWorkerIds. -- See Note [CBV Function Ids] in GHC.Types.Id.Info. {- ********************************************************************* * * The "push rules" * * ************************************************************************ Here we implement the "push rules" from FC papers: * The push-argument rules, where we can move a coercion past an argument. We have (fun |> co) arg and we want to transform it to (fun arg') |> co' for some suitable co' and transformed arg'. * The PushK rule for data constructors. We have (K e1 .. en) |> co and we want to transform to (K e1' .. en') by pushing the coercion into the arguments -} pushCoArgs :: CoercionR -> [CoreArg] -> Maybe ([CoreArg], MCoercion) pushCoArgs co [] = return ([], MCo co) pushCoArgs co (arg:args) = do { (arg', m_co1) <- pushCoArg co arg ; case m_co1 of MCo co1 -> do { (args', m_co2) <- pushCoArgs co1 args ; return (arg':args', m_co2) } MRefl -> return (arg':args, MRefl) } pushMCoArg :: MCoercionR -> CoreArg -> Maybe (CoreArg, MCoercion) pushMCoArg MRefl arg = Just (arg, MRefl) pushMCoArg (MCo co) arg = pushCoArg co arg pushCoArg :: CoercionR -> CoreArg -> Maybe (CoreArg, MCoercion) -- We have (fun |> co) arg, and we want to transform it to -- (fun arg) |> co -- This may fail, e.g. if (fun :: N) where N is a newtype -- C.f. simplCast in GHC.Core.Opt.Simplify -- 'co' is always Representational pushCoArg co arg | Type ty <- arg = do { (ty', m_co') <- pushCoTyArg co ty ; return (Type ty', m_co') } | otherwise = do { (arg_mco, m_co') <- pushCoValArg co ; let arg_mco' = checkReflexiveMCo arg_mco -- checkReflexiveMCo: see Note [Check for reflexive casts in eta expansion] -- The coercion is very often (arg_co -> res_co), but without -- the argument coercion actually being ReflCo ; return (arg `mkCastMCo` arg_mco', m_co') } pushCoTyArg :: CoercionR -> Type -> Maybe (Type, MCoercionR) -- We have (fun |> co) @ty -- Push the coercion through to return -- (fun @ty') |> co' -- 'co' is always Representational -- If the returned coercion is Nothing, then it would have been reflexive; -- it's faster not to compute it, though. pushCoTyArg co ty -- The following is inefficient - don't do `eqType` here, the coercion -- optimizer will take care of it. See #14737. -- -- | tyL `eqType` tyR -- -- = Just (ty, Nothing) | isReflCo co = Just (ty, MRefl) | isForAllTy_ty tyL = assertPpr (isForAllTy_ty tyR) (ppr co $$ ppr ty) $ Just (ty `mkCastTy` co1, MCo co2) | otherwise = Nothing where Pair tyL tyR = coercionKind co -- co :: tyL ~R tyR -- tyL = forall (a1 :: k1). ty1 -- tyR = forall (a2 :: k2). ty2 co1 = mkSymCo (mkSelCo SelForAll co) -- co1 :: k2 ~N k1 -- Note that SelCo extracts a Nominal equality between the -- kinds of the types related by a coercion between forall-types. -- See the SelCo case in GHC.Core.Lint. co2 = mkInstCo co (mkGReflLeftCo Nominal ty co1) -- co2 :: ty1[ (ty|>co1)/a1 ] ~R ty2[ ty/a2 ] -- Arg of mkInstCo is always nominal, hence Nominal -- | If @pushCoValArg co = Just (co_arg, co_res)@, then -- -- > (\x.body) |> co = (\y. let { x = y |> co_arg } in body) |> co_res) -- -- or, equivalently -- -- > (fun |> co) arg = (fun (arg |> co_arg)) |> co_res -- -- If the LHS is well-typed, then so is the RHS. In particular, the argument -- @arg |> co_arg@ is guaranteed to have a fixed 'RuntimeRep', in the sense of -- Note [Fixed RuntimeRep] in GHC.Tc.Utils.Concrete. pushCoValArg :: CoercionR -> Maybe (MCoercionR, MCoercionR) pushCoValArg co -- The following is inefficient - don't do `eqType` here, the coercion -- optimizer will take care of it. See #14737. -- -- | tyL `eqType` tyR -- -- = Just (mkRepReflCo arg, Nothing) | isReflCo co = Just (MRefl, MRefl) | isFunTy tyL , (_, co1, co2) <- decomposeFunCo co -- If co :: (tyL1 -> tyL2) ~ (tyR1 -> tyR2) -- then co1 :: tyL1 ~ tyR1 -- co2 :: tyL2 ~ tyR2 , typeHasFixedRuntimeRep new_arg_ty -- We can't push the coercion inside if it would give rise to -- a representation-polymorphic argument. = assertPpr (isFunTy tyL && isFunTy tyR) (vcat [ text "co:" <+> ppr co , text "old_arg_ty:" <+> ppr old_arg_ty , text "new_arg_ty:" <+> ppr new_arg_ty ]) $ Just (coToMCo (mkSymCo co1), coToMCo co2) -- Critically, coToMCo to checks for ReflCo; the whole coercion may not -- be reflexive, but either of its components might be -- We could use isReflexiveCo, but it's not clear if the benefit -- is worth the cost, and it makes no difference in #18223 | otherwise = Nothing where old_arg_ty = funArgTy tyR new_arg_ty = funArgTy tyL Pair tyL tyR = coercionKind co pushCoercionIntoLambda :: HasDebugCallStack => InScopeSet -> Var -> CoreExpr -> CoercionR -> Maybe (Var, CoreExpr) -- This implements the Push rule from the paper on coercions -- (\x. e) |> co -- ===> -- (\x'. e |> co') pushCoercionIntoLambda in_scope x e co | assert (not (isTyVar x) && not (isCoVar x)) True , Pair s1s2 t1t2 <- coercionKind co , Just {} <- splitFunTy_maybe s1s2 , Just (_, w1, t1,_t2) <- splitFunTy_maybe t1t2 , (_, co1, co2) <- decomposeFunCo co , typeHasFixedRuntimeRep t1 -- We can't push the coercion into the lambda if it would create -- a representation-polymorphic binder. = let -- Should we optimize the coercions here? -- Otherwise they might not match too well x' = x `setIdType` t1 `setIdMult` w1 in_scope' = in_scope `extendInScopeSet` x' subst = extendIdSubst (mkEmptySubst in_scope') x (mkCast (Var x') (mkSymCo co1)) -- We substitute x' for x, except we need to preserve types. -- The types are as follows: -- x :: s1, x' :: t1, co1 :: s1 ~# t1, -- so we extend the substitution with x |-> (x' |> sym co1). in Just (x', substExpr subst e `mkCast` co2) | otherwise = Nothing pushCoDataCon :: DataCon -> [CoreExpr] -> MCoercion -> Maybe (DataCon , [Type] -- Universal type args , [CoreExpr]) -- All other args incl existentials -- Implement the KPush reduction rule as described in "Down with kinds" -- The transformation applies iff we have -- (C e1 ... en) `cast` co -- where co :: (T t1 .. tn) ~ to_ty -- The left-hand one must be a T, because exprIsConApp returned True -- but the right-hand one might not be. (Though it usually will.) pushCoDataCon dc dc_args MRefl = Just $! (push_dc_refl dc dc_args) pushCoDataCon dc dc_args (MCo co) = push_dc_gen dc dc_args co (coercionKind co) push_dc_refl :: DataCon -> [CoreExpr] -> (DataCon, [Type], [CoreExpr]) push_dc_refl dc dc_args = (dc, map exprToType univ_ty_args, rest_args) where !(univ_ty_args, rest_args) = splitAtList (dataConUnivTyVars dc) dc_args push_dc_gen :: DataCon -> [CoreExpr] -> Coercion -> Pair Type -> Maybe (DataCon, [Type], [CoreExpr]) push_dc_gen dc dc_args co (Pair from_ty to_ty) | from_ty `eqType` to_ty -- try cheap test first = Just $! (push_dc_refl dc dc_args) | Just (to_tc, to_tc_arg_tys) <- splitTyConApp_maybe to_ty , to_tc == dataConTyCon dc -- These two tests can fail; we might see -- (C x y) `cast` (g :: T a ~ S [a]), -- where S is a type function. In fact, exprIsConApp -- will probably not be called in such circumstances, -- but there's nothing wrong with it = let tc_arity = tyConArity to_tc dc_univ_tyvars = dataConUnivTyVars dc dc_ex_tcvars = dataConExTyCoVars dc arg_tys = dataConRepArgTys dc non_univ_args = dropList dc_univ_tyvars dc_args (ex_args, val_args) = splitAtList dc_ex_tcvars non_univ_args -- Make the "Psi" from the paper omegas = decomposeCo tc_arity co (tyConRolesRepresentational to_tc) (psi_subst, to_ex_arg_tys) = liftCoSubstWithEx Representational dc_univ_tyvars omegas dc_ex_tcvars (map exprToType ex_args) -- Cast the value arguments (which include dictionaries) new_val_args = zipWith cast_arg (map scaledThing arg_tys) val_args cast_arg arg_ty arg = mkCast arg (psi_subst arg_ty) to_ex_args = map Type to_ex_arg_tys dump_doc = vcat [ppr dc, ppr dc_univ_tyvars, ppr dc_ex_tcvars, ppr arg_tys, ppr dc_args, ppr ex_args, ppr val_args, ppr co, ppr from_ty, ppr to_ty, ppr to_tc , ppr $ mkTyConApp to_tc (map exprToType $ takeList dc_univ_tyvars dc_args) ] in assertPpr (eqType from_ty (mkTyConApp to_tc (map exprToType $ takeList dc_univ_tyvars dc_args))) dump_doc $ assertPpr (equalLength val_args arg_tys) dump_doc $ Just (dc, to_tc_arg_tys, to_ex_args ++ new_val_args) | otherwise = Nothing collectBindersPushingCo :: CoreExpr -> ([Var], CoreExpr) -- Collect lambda binders, pushing coercions inside if possible -- E.g. (\x.e) |> g g :: -> blah -- = (\x. e |> SelCo (SelFun SelRes) g) -- -- That is, -- -- collectBindersPushingCo ((\x.e) |> g) === ([x], e |> SelCo (SelFun SelRes) g) collectBindersPushingCo e = go [] e where -- Peel off lambdas until we hit a cast. go :: [Var] -> CoreExpr -> ([Var], CoreExpr) -- The accumulator is in reverse order go bs (Lam b e) = go (b:bs) e go bs (Cast e co) = go_c bs e co go bs e = (reverse bs, e) -- We are in a cast; peel off casts until we hit a lambda. go_c :: [Var] -> CoreExpr -> CoercionR -> ([Var], CoreExpr) -- (go_c bs e c) is same as (go bs e (e |> c)) go_c bs (Cast e co1) co2 = go_c bs e (co1 `mkTransCo` co2) go_c bs (Lam b e) co = go_lam bs b e co go_c bs e co = (reverse bs, mkCast e co) -- We are in a lambda under a cast; peel off lambdas and build a -- new coercion for the body. go_lam :: [Var] -> Var -> CoreExpr -> CoercionR -> ([Var], CoreExpr) -- (go_lam bs b e c) is same as (go_c bs (\b.e) c) go_lam bs b e co | isTyVar b , let Pair tyL tyR = coercionKind co , assert (isForAllTy_ty tyL) $ isForAllTy_ty tyR , isReflCo (mkSelCo SelForAll co) -- See Note [collectBindersPushingCo] = go_c (b:bs) e (mkInstCo co (mkNomReflCo (mkTyVarTy b))) | isCoVar b , let Pair tyL tyR = coercionKind co , assert (isForAllTy_co tyL) $ isForAllTy_co tyR , isReflCo (mkSelCo SelForAll co) -- See Note [collectBindersPushingCo] , let cov = mkCoVarCo b = go_c (b:bs) e (mkInstCo co (mkNomReflCo (mkCoercionTy cov))) | isId b , let Pair tyL tyR = coercionKind co , assert (isFunTy tyL) $ isFunTy tyR , (co_mult, co_arg, co_res) <- decomposeFunCo co , isReflCo co_mult -- See Note [collectBindersPushingCo] , isReflCo co_arg -- See Note [collectBindersPushingCo] = go_c (b:bs) e co_res | otherwise = (reverse bs, mkCast (Lam b e) co) {- Note [collectBindersPushingCo] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We just look for coercions of form % w -> blah (and similarly for foralls) to keep this function simple. We could do more elaborate stuff, but it'd involve substitution etc. -} {- ********************************************************************* * * Join points * * ********************************************************************* -} ------------------- -- | Split an expression into the given number of binders and a body, -- eta-expanding if necessary. Counts value *and* type binders. etaExpandToJoinPoint :: JoinArity -> CoreExpr -> ([CoreBndr], CoreExpr) etaExpandToJoinPoint join_arity expr = go join_arity [] expr where go 0 rev_bs e = (reverse rev_bs, e) go n rev_bs (Lam b e) = go (n-1) (b : rev_bs) e go n rev_bs e = case etaBodyForJoinPoint n e of (bs, e') -> (reverse rev_bs ++ bs, e') etaExpandToJoinPointRule :: JoinArity -> CoreRule -> CoreRule etaExpandToJoinPointRule _ rule@(BuiltinRule {}) = warnPprTrace True "Can't eta-expand built-in rule:" (ppr rule) -- How did a local binding get a built-in rule anyway? Probably a plugin. rule etaExpandToJoinPointRule join_arity rule@(Rule { ru_bndrs = bndrs, ru_rhs = rhs , ru_args = args }) | need_args == 0 = rule | need_args < 0 = pprPanic "etaExpandToJoinPointRule" (ppr join_arity $$ ppr rule) | otherwise = rule { ru_bndrs = bndrs ++ new_bndrs , ru_args = args ++ new_args , ru_rhs = new_rhs } -- new_rhs really ought to be occ-analysed (see GHC.Core Note -- [OccInfo in unfoldings and rules]), but it makes a module loop to -- do so; it doesn't happen often; and it doesn't really matter if -- the outer binders have bogus occurrence info; and new_rhs won't -- have dead code if rhs didn't. where need_args = join_arity - length args (new_bndrs, new_rhs) = etaBodyForJoinPoint need_args rhs new_args = varsToCoreExprs new_bndrs -- Adds as many binders as asked for; assumes expr is not a lambda etaBodyForJoinPoint :: Int -> CoreExpr -> ([CoreBndr], CoreExpr) etaBodyForJoinPoint need_args body = go need_args body_ty (mkEmptySubst in_scope) [] body where go 0 _ _ rev_bs e = (reverse rev_bs, e) go n ty subst rev_bs e | Just (tv, res_ty) <- splitForAllTyCoVar_maybe ty , let (subst', tv') = substVarBndr subst tv = go (n-1) res_ty subst' (tv' : rev_bs) (e `App` varToCoreExpr tv') -- The varToCoreExpr is important: `tv` might be a coercion variable | Just (_, mult, arg_ty, res_ty) <- splitFunTy_maybe ty , let (subst', b) = freshEtaId n subst (Scaled mult arg_ty) = go (n-1) res_ty subst' (b : rev_bs) (e `App` varToCoreExpr b) -- The varToCoreExpr is important: `b` might be a coercion variable | otherwise = pprPanic "etaBodyForJoinPoint" $ int need_args $$ ppr body $$ ppr (exprType body) body_ty = exprType body in_scope = mkInScopeSet (exprFreeVars body `unionVarSet` tyCoVarsOfType body_ty) -- in_scope is a bit tricky. -- - We are wrapping `body` in some value lambdas, so must not shadow -- any free vars of `body` -- - We are wrapping `body` in some type lambdas, so must not shadow any -- tyvars in body_ty. Example: body is just a variable -- (g :: forall (a::k). T k a -> Int) -- We must not shadown that `k` when adding the /\a. So treat the free vars -- of body_ty as in-scope. Showed up in #23026. -------------- freshEtaId :: Int -> Subst -> Scaled Type -> (Subst, Id) -- Make a fresh Id, with specified type (after applying substitution) -- It should be "fresh" in the sense that it's not in the in-scope set -- of the TvSubstEnv; and it should itself then be added to the in-scope -- set of the TvSubstEnv -- -- The Int is just a reasonable starting point for generating a unique; -- it does not necessarily have to be unique itself. freshEtaId n subst ty = (subst', eta_id') where Scaled mult' ty' = Type.substScaledTyUnchecked subst ty eta_id' = uniqAway (getSubstInScope subst) $ mkSysLocalOrCoVar (fsLit "eta") (mkBuiltinUnique n) mult' ty' -- "OrCoVar" since this can be used to eta-expand -- coercion abstractions subst' = extendSubstInScope subst eta_id' ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/Opt/CallerCC.hs0000644000000000000000000001007707346545000021311 0ustar0000000000000000{-# LANGUAGE NamedFieldPuns #-} {-# LANGUAGE TypeApplications #-} {-# LANGUAGE DeriveGeneric #-} {-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE DeriveAnyClass #-} {-# LANGUAGE DerivingStrategies #-} {-# LANGUAGE TupleSections #-} -- | Adds cost-centers to call sites selected with the @-fprof-caller=...@ -- flag. module GHC.Core.Opt.CallerCC ( addCallerCostCentres , CallerCcFilter(..) , NamePattern(..) , parseCallerCcFilter ) where import Data.Maybe import Control.Applicative import GHC.Utils.Monad.State.Strict import Control.Monad import GHC.Prelude import GHC.Utils.Outputable as Outputable import GHC.Driver.DynFlags import GHC.Types.CostCentre import GHC.Types.CostCentre.State import GHC.Types.Name hiding (varName) import GHC.Types.Tickish import GHC.Unit.Module.ModGuts import GHC.Types.SrcLoc import GHC.Types.Var import GHC.Unit.Types import GHC.Data.FastString import GHC.Core import GHC.Core.Opt.Monad import GHC.Core.Opt.CallerCC.Types addCallerCostCentres :: ModGuts -> CoreM ModGuts addCallerCostCentres guts = do dflags <- getDynFlags let filters = callerCcFilters dflags let env :: Env env = Env { thisModule = mg_module guts , ccState = newCostCentreState , countEntries = gopt Opt_ProfCountEntries dflags , revParents = [] , filters = filters } let guts' = guts { mg_binds = doCoreProgram env (mg_binds guts) } return guts' doCoreProgram :: Env -> CoreProgram -> CoreProgram doCoreProgram env binds = flip evalState newCostCentreState $ do mapM (doBind env) binds doBind :: Env -> CoreBind -> M CoreBind doBind env (NonRec b rhs) = NonRec b <$> doExpr (addParent b env) rhs doBind env (Rec bs) = Rec <$> mapM doPair bs where doPair (b,rhs) = (b,) <$> doExpr (addParent b env) rhs doExpr :: Env -> CoreExpr -> M CoreExpr doExpr env e@(Var v) | needsCallSiteCostCentre env v = do let nameDoc :: SDoc nameDoc = withUserStyle alwaysQualify DefaultDepth $ hcat (punctuate dot (map ppr (parents env))) <> parens (text "calling:" <> ppr v) ccName :: CcName ccName = mkFastString $ renderWithContext defaultSDocContext nameDoc ccIdx <- getCCIndex' ccName let count = countEntries env span = case revParents env of top:_ -> nameSrcSpan $ varName top _ -> noSrcSpan cc = NormalCC (mkExprCCFlavour ccIdx) ccName (thisModule env) span tick :: CoreTickish tick = ProfNote cc count True pure $ Tick tick e | otherwise = pure e doExpr _env e@(Lit _) = pure e doExpr env (f `App` x) = App <$> doExpr env f <*> doExpr env x doExpr env (Lam b x) = Lam b <$> doExpr env x doExpr env (Let b rhs) = Let <$> doBind env b <*> doExpr env rhs doExpr env (Case scrut b ty alts) = Case <$> doExpr env scrut <*> pure b <*> pure ty <*> mapM doAlt alts where doAlt (Alt con bs rhs) = Alt con bs <$> doExpr env rhs doExpr env (Cast expr co) = Cast <$> doExpr env expr <*> pure co doExpr env (Tick t e) = Tick t <$> doExpr env e doExpr _env e@(Type _) = pure e doExpr _env e@(Coercion _) = pure e type M = State CostCentreState getCCIndex' :: FastString -> M CostCentreIndex getCCIndex' name = state (getCCIndex name) data Env = Env { thisModule :: Module , countEntries :: !Bool , ccState :: CostCentreState , revParents :: [Id] , filters :: [CallerCcFilter] } addParent :: Id -> Env -> Env addParent i env = env { revParents = i : revParents env } parents :: Env -> [Id] parents env = reverse (revParents env) needsCallSiteCostCentre :: Env -> Id -> Bool needsCallSiteCostCentre env i = any matches (filters env) where matches :: CallerCcFilter -> Bool matches ccf = checkModule && checkFunc where checkModule = case ccfModuleName ccf of Just modFilt | Just iMod <- nameModule_maybe (varName i) -> moduleName iMod == modFilt | otherwise -> False Nothing -> True checkFunc = occNameMatches (ccfFuncName ccf) (getOccName i) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/Opt/CallerCC/0000755000000000000000000000000007346545000020750 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/Opt/CallerCC/Types.hs0000644000000000000000000000633607346545000022420 0ustar0000000000000000module GHC.Core.Opt.CallerCC.Types ( NamePattern(..) , CallerCcFilter(..) , occNameMatches , parseCallerCcFilter , parseNamePattern ) where import Data.Word (Word8) import Data.Maybe import Control.Applicative import Data.Either import Control.Monad import qualified Text.ParserCombinators.ReadP as P import GHC.Prelude import GHC.Utils.Outputable as Outputable import GHC.Types.Name hiding (varName) import GHC.Utils.Panic import qualified GHC.Utils.Binary as B import Data.Char import Language.Haskell.Syntax.Module.Name data NamePattern = PChar Char NamePattern | PWildcard NamePattern | PEnd instance Outputable NamePattern where ppr (PChar c rest) = char c <> ppr rest ppr (PWildcard rest) = char '*' <> ppr rest ppr PEnd = Outputable.empty instance B.Binary NamePattern where get bh = do tag <- B.get bh case tag :: Word8 of 0 -> PChar <$> B.get bh <*> B.get bh 1 -> PWildcard <$> B.get bh 2 -> pure PEnd _ -> panic "Binary(NamePattern): Invalid tag" put_ bh (PChar x y) = B.put_ bh (0 :: Word8) >> B.put_ bh x >> B.put_ bh y put_ bh (PWildcard x) = B.put_ bh (1 :: Word8) >> B.put_ bh x put_ bh PEnd = B.put_ bh (2 :: Word8) occNameMatches :: NamePattern -> OccName -> Bool occNameMatches pat = go pat . occNameString where go :: NamePattern -> String -> Bool go PEnd "" = True go (PChar c rest) (d:s) = d == c && go rest s go (PWildcard rest) s = go rest s || go (PWildcard rest) (tail s) go _ _ = False type Parser = P.ReadP parseNamePattern :: Parser NamePattern parseNamePattern = namePattern where namePattern = star P.<++ wildcard P.<++ char P.<++ end star = PChar '*' <$ P.string "\\*" <*> namePattern wildcard = do void $ P.char '*' PWildcard <$> namePattern char = PChar <$> P.get <*> namePattern end = PEnd <$ P.eof data CallerCcFilter = CallerCcFilter { ccfModuleName :: Maybe ModuleName , ccfFuncName :: NamePattern } instance Outputable CallerCcFilter where ppr ccf = maybe (char '*') ppr (ccfModuleName ccf) <> char '.' <> ppr (ccfFuncName ccf) instance B.Binary CallerCcFilter where get bh = CallerCcFilter <$> B.get bh <*> B.get bh put_ bh (CallerCcFilter x y) = B.put_ bh x >> B.put_ bh y parseCallerCcFilter :: String -> Either String CallerCcFilter parseCallerCcFilter inp = case P.readP_to_S parseCallerCcFilter' inp of ((result, ""):_) -> Right result _ -> Left $ "parse error on " ++ inp parseCallerCcFilter' :: Parser CallerCcFilter parseCallerCcFilter' = CallerCcFilter <$> moduleFilter <* P.char '.' <*> parseNamePattern where moduleFilter :: Parser (Maybe ModuleName) moduleFilter = (Just . mkModuleName <$> moduleName) <|> (Nothing <$ P.char '*') moduleName :: Parser String moduleName = do c <- P.satisfy isUpper cs <- P.munch1 (\c -> isUpper c || isLower c || isDigit c || c == '_') rest <- optional $ P.char '.' >> fmap ('.':) moduleName return $ c : (cs ++ fromMaybe "" rest) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/Opt/ConstantFold.hs0000644000000000000000000043037407346545000022305 0ustar0000000000000000{- (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 Conceptually, constant folding should be parameterized with the kind of target machine to get identical behaviour during compilation time and runtime. We cheat a little bit here... ToDo: check boundaries before folding, e.g. we can fold the Float addition (i1 + i2) only if it results in a valid Float. -} {-# LANGUAGE AllowAmbiguousTypes #-} {-# LANGUAGE DeriveFunctor #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE MultiWayIf #-} {-# LANGUAGE PatternSynonyms #-} {-# LANGUAGE RankNTypes #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE TypeApplications #-} {-# LANGUAGE ViewPatterns #-} {-# OPTIONS_GHC -optc-DNON_POSIX_SOURCE -Wno-incomplete-uni-patterns #-} -- | Constant Folder module GHC.Core.Opt.ConstantFold ( primOpRules , builtinRules , caseRules , caseRules2 ) where import GHC.Prelude import GHC.Platform import GHC.Float import GHC.Types.Id.Make ( unboxedUnitExpr ) import GHC.Types.Id import GHC.Types.Literal import GHC.Types.Name.Occurrence ( occNameFS ) import GHC.Types.Tickish import GHC.Types.Name ( Name, nameOccName ) import GHC.Types.Basic import GHC.Core import GHC.Core.Make import GHC.Core.SimpleOpt ( exprIsConApp_maybe, exprIsLiteral_maybe ) import GHC.Core.DataCon ( DataCon,dataConTagZ, dataConTyCon, dataConWrapId, dataConWorkId ) import GHC.Core.Utils ( cheapEqExpr, exprIsHNF , stripTicksTop, stripTicksTopT, mkTicks ) import GHC.Core.Multiplicity import GHC.Core.Rules.Config import GHC.Core.Type import GHC.Core.TyCo.Compare( eqType ) import GHC.Core.TyCon ( TyCon, tyConDataCons_maybe, tyConDataCons, tyConFamilySize , isEnumerationTyCon, isValidDTT2TyCon, isNewTyCon ) import GHC.Core.Map.Expr ( eqCoreExpr ) import GHC.Builtin.PrimOps ( PrimOp(..), tagToEnumKey ) import GHC.Builtin.PrimOps.Ids (primOpId) import GHC.Builtin.Types import GHC.Builtin.Types.Prim import GHC.Builtin.Names import GHC.Cmm.MachOp ( FMASign(..) ) import GHC.Cmm.Type ( Width(..) ) import GHC.Data.FastString import GHC.Data.Maybe ( orElse ) import GHC.Utils.Outputable import GHC.Utils.Misc import GHC.Utils.Panic import Control.Applicative ( Alternative(..) ) import Control.Monad import Data.Functor (($>)) import qualified Data.ByteString as BS import Data.Ratio import Data.Word import Data.Maybe (fromMaybe, fromJust) {- Note [Constant folding] ~~~~~~~~~~~~~~~~~~~~~~~ primOpRules generates a rewrite rule for each primop These rules do what is often called "constant folding" E.g. the rules for +# might say 4 +# 5 = 9 Well, of course you'd need a lot of rules if you did it like that, so we use a BuiltinRule instead, so that we can match in any two literal values. So the rule is really more like (Lit x) +# (Lit y) = Lit (x+#y) where the (+#) on the rhs is done at compile time That is why these rules are built in here. -} primOpRules :: Name -> PrimOp -> Maybe CoreRule primOpRules nm = \case TagToEnumOp -> mkPrimOpRule nm 2 [ tagToEnumRule ] DataToTagSmallOp -> mkPrimOpRule nm 3 [ dataToTagRule ] DataToTagLargeOp -> mkPrimOpRule nm 3 [ dataToTagRule ] -- Int8 operations Int8AddOp -> mkPrimOpRule nm 2 [ binaryLit (int8Op2 (+)) , identity zeroI8 , addFoldingRules Int8AddOp int8Ops ] Int8SubOp -> mkPrimOpRule nm 2 [ binaryLit (int8Op2 (-)) , rightIdentity zeroI8 , equalArgs $> Lit zeroI8 , subFoldingRules Int8SubOp int8Ops ] Int8MulOp -> mkPrimOpRule nm 2 [ binaryLit (int8Op2 (*)) , zeroElem , identity oneI8 , mulFoldingRules Int8MulOp int8Ops ] Int8QuotOp -> mkPrimOpRule nm 2 [ nonZeroLit 1 >> binaryLit (int8Op2 quot) , leftZero , rightIdentity oneI8 , equalArgs $> Lit oneI8 , quotFoldingRules int8Ops ] Int8RemOp -> mkPrimOpRule nm 2 [ nonZeroLit 1 >> binaryLit (int8Op2 rem) , leftZero , oneLit 1 $> Lit zeroI8 , equalArgs $> Lit zeroI8 ] Int8NegOp -> mkPrimOpRule nm 1 [ unaryLit negOp , semiInversePrimOp Int8NegOp ] Int8SllOp -> mkPrimOpRule nm 2 [ shiftRule LitNumInt8 (const shiftL) , rightIdentity zeroI8 ] Int8SraOp -> mkPrimOpRule nm 2 [ shiftRule LitNumInt8 (const shiftR) , rightIdentity zeroI8 ] Int8SrlOp -> mkPrimOpRule nm 2 [ shiftRule LitNumInt8 $ const $ shiftRightLogical @Word8 , rightIdentity zeroI8 ] -- Word8 operations Word8AddOp -> mkPrimOpRule nm 2 [ binaryLit (word8Op2 (+)) , identity zeroW8 , addFoldingRules Word8AddOp word8Ops ] Word8SubOp -> mkPrimOpRule nm 2 [ binaryLit (word8Op2 (-)) , rightIdentity zeroW8 , equalArgs $> Lit zeroW8 , subFoldingRules Word8SubOp word8Ops ] Word8MulOp -> mkPrimOpRule nm 2 [ binaryLit (word8Op2 (*)) , identity oneW8 , mulFoldingRules Word8MulOp word8Ops ] Word8QuotOp -> mkPrimOpRule nm 2 [ nonZeroLit 1 >> binaryLit (word8Op2 quot) , rightIdentity oneW8 , quotFoldingRules word8Ops ] Word8RemOp -> mkPrimOpRule nm 2 [ nonZeroLit 1 >> binaryLit (word8Op2 rem) , leftZero , oneLit 1 $> Lit zeroW8 , equalArgs $> Lit zeroW8 ] Word8AndOp -> mkPrimOpRule nm 2 [ binaryLit (word8Op2 (.&.)) , idempotent , zeroElem , identity (mkLitWord8 0xFF) , sameArgIdempotentCommut Word8AndOp , andFoldingRules word8Ops ] Word8OrOp -> mkPrimOpRule nm 2 [ binaryLit (word8Op2 (.|.)) , idempotent , identity zeroW8 , sameArgIdempotentCommut Word8OrOp , orFoldingRules word8Ops ] Word8XorOp -> mkPrimOpRule nm 2 [ binaryLit (word8Op2 xor) , identity zeroW8 , equalArgs $> Lit zeroW8 ] Word8NotOp -> mkPrimOpRule nm 1 [ unaryLit complementOp , semiInversePrimOp Word8NotOp ] Word8SllOp -> mkPrimOpRule nm 2 [ shiftRule LitNumWord8 (const shiftL) ] Word8SrlOp -> mkPrimOpRule nm 2 [ shiftRule LitNumWord8 $ const $ shiftRightLogical @Word8 ] -- Int16 operations Int16AddOp -> mkPrimOpRule nm 2 [ binaryLit (int16Op2 (+)) , identity zeroI16 , addFoldingRules Int16AddOp int16Ops ] Int16SubOp -> mkPrimOpRule nm 2 [ binaryLit (int16Op2 (-)) , rightIdentity zeroI16 , equalArgs $> Lit zeroI16 , subFoldingRules Int16SubOp int16Ops ] Int16MulOp -> mkPrimOpRule nm 2 [ binaryLit (int16Op2 (*)) , zeroElem , identity oneI16 , mulFoldingRules Int16MulOp int16Ops ] Int16QuotOp -> mkPrimOpRule nm 2 [ nonZeroLit 1 >> binaryLit (int16Op2 quot) , leftZero , rightIdentity oneI16 , equalArgs $> Lit oneI16 , quotFoldingRules int16Ops ] Int16RemOp -> mkPrimOpRule nm 2 [ nonZeroLit 1 >> binaryLit (int16Op2 rem) , leftZero , oneLit 1 $> Lit zeroI16 , equalArgs $> Lit zeroI16 ] Int16NegOp -> mkPrimOpRule nm 1 [ unaryLit negOp , semiInversePrimOp Int16NegOp ] Int16SllOp -> mkPrimOpRule nm 2 [ shiftRule LitNumInt16 (const shiftL) , rightIdentity zeroI16 ] Int16SraOp -> mkPrimOpRule nm 2 [ shiftRule LitNumInt16 (const shiftR) , rightIdentity zeroI16 ] Int16SrlOp -> mkPrimOpRule nm 2 [ shiftRule LitNumInt16 $ const $ shiftRightLogical @Word16 , rightIdentity zeroI16 ] -- Word16 operations Word16AddOp -> mkPrimOpRule nm 2 [ binaryLit (word16Op2 (+)) , identity zeroW16 , addFoldingRules Word16AddOp word16Ops ] Word16SubOp -> mkPrimOpRule nm 2 [ binaryLit (word16Op2 (-)) , rightIdentity zeroW16 , equalArgs $> Lit zeroW16 , subFoldingRules Word16SubOp word16Ops ] Word16MulOp -> mkPrimOpRule nm 2 [ binaryLit (word16Op2 (*)) , identity oneW16 , mulFoldingRules Word16MulOp word16Ops ] Word16QuotOp-> mkPrimOpRule nm 2 [ nonZeroLit 1 >> binaryLit (word16Op2 quot) , rightIdentity oneW16 , quotFoldingRules word16Ops ] Word16RemOp -> mkPrimOpRule nm 2 [ nonZeroLit 1 >> binaryLit (word16Op2 rem) , leftZero , oneLit 1 $> Lit zeroW16 , equalArgs $> Lit zeroW16 ] Word16AndOp -> mkPrimOpRule nm 2 [ binaryLit (word16Op2 (.&.)) , idempotent , zeroElem , identity (mkLitWord16 0xFFFF) , sameArgIdempotentCommut Word16AndOp , andFoldingRules word16Ops ] Word16OrOp -> mkPrimOpRule nm 2 [ binaryLit (word16Op2 (.|.)) , idempotent , identity zeroW16 , sameArgIdempotentCommut Word16OrOp , orFoldingRules word16Ops ] Word16XorOp -> mkPrimOpRule nm 2 [ binaryLit (word16Op2 xor) , identity zeroW16 , equalArgs $> Lit zeroW16 ] Word16NotOp -> mkPrimOpRule nm 1 [ unaryLit complementOp , semiInversePrimOp Word16NotOp ] Word16SllOp -> mkPrimOpRule nm 2 [ shiftRule LitNumWord16 (const shiftL) ] Word16SrlOp -> mkPrimOpRule nm 2 [ shiftRule LitNumWord16 $ const $ shiftRightLogical @Word16 ] -- Int32 operations Int32AddOp -> mkPrimOpRule nm 2 [ binaryLit (int32Op2 (+)) , identity zeroI32 , addFoldingRules Int32AddOp int32Ops ] Int32SubOp -> mkPrimOpRule nm 2 [ binaryLit (int32Op2 (-)) , rightIdentity zeroI32 , equalArgs $> Lit zeroI32 , subFoldingRules Int32SubOp int32Ops ] Int32MulOp -> mkPrimOpRule nm 2 [ binaryLit (int32Op2 (*)) , zeroElem , identity oneI32 , mulFoldingRules Int32MulOp int32Ops ] Int32QuotOp -> mkPrimOpRule nm 2 [ nonZeroLit 1 >> binaryLit (int32Op2 quot) , leftZero , rightIdentity oneI32 , equalArgs $> Lit oneI32 , quotFoldingRules int32Ops ] Int32RemOp -> mkPrimOpRule nm 2 [ nonZeroLit 1 >> binaryLit (int32Op2 rem) , leftZero , oneLit 1 $> Lit zeroI32 , equalArgs $> Lit zeroI32 ] Int32NegOp -> mkPrimOpRule nm 1 [ unaryLit negOp , semiInversePrimOp Int32NegOp ] Int32SllOp -> mkPrimOpRule nm 2 [ shiftRule LitNumInt32 (const shiftL) , rightIdentity zeroI32 ] Int32SraOp -> mkPrimOpRule nm 2 [ shiftRule LitNumInt32 (const shiftR) , rightIdentity zeroI32 ] Int32SrlOp -> mkPrimOpRule nm 2 [ shiftRule LitNumInt32 $ const $ shiftRightLogical @Word32 , rightIdentity zeroI32 ] -- Word32 operations Word32AddOp -> mkPrimOpRule nm 2 [ binaryLit (word32Op2 (+)) , identity zeroW32 , addFoldingRules Word32AddOp word32Ops ] Word32SubOp -> mkPrimOpRule nm 2 [ binaryLit (word32Op2 (-)) , rightIdentity zeroW32 , equalArgs $> Lit zeroW32 , subFoldingRules Word32SubOp word32Ops ] Word32MulOp -> mkPrimOpRule nm 2 [ binaryLit (word32Op2 (*)) , identity oneW32 , mulFoldingRules Word32MulOp word32Ops ] Word32QuotOp-> mkPrimOpRule nm 2 [ nonZeroLit 1 >> binaryLit (word32Op2 quot) , rightIdentity oneW32 , quotFoldingRules word32Ops ] Word32RemOp -> mkPrimOpRule nm 2 [ nonZeroLit 1 >> binaryLit (word32Op2 rem) , leftZero , oneLit 1 $> Lit zeroW32 , equalArgs $> Lit zeroW32 ] Word32AndOp -> mkPrimOpRule nm 2 [ binaryLit (word32Op2 (.&.)) , idempotent , zeroElem , identity (mkLitWord32 0xFFFFFFFF) , sameArgIdempotentCommut Word32AndOp , andFoldingRules word32Ops ] Word32OrOp -> mkPrimOpRule nm 2 [ binaryLit (word32Op2 (.|.)) , idempotent , identity zeroW32 , sameArgIdempotentCommut Word32OrOp , orFoldingRules word32Ops ] Word32XorOp -> mkPrimOpRule nm 2 [ binaryLit (word32Op2 xor) , identity zeroW32 , equalArgs $> Lit zeroW32 ] Word32NotOp -> mkPrimOpRule nm 1 [ unaryLit complementOp , semiInversePrimOp Word32NotOp ] Word32SllOp -> mkPrimOpRule nm 2 [ shiftRule LitNumWord32 (const shiftL) ] Word32SrlOp -> mkPrimOpRule nm 2 [ shiftRule LitNumWord32 $ const $ shiftRightLogical @Word32 ] -- Int64 operations Int64AddOp -> mkPrimOpRule nm 2 [ binaryLit (int64Op2 (+)) , identity zeroI64 , addFoldingRules Int64AddOp int64Ops ] Int64SubOp -> mkPrimOpRule nm 2 [ binaryLit (int64Op2 (-)) , rightIdentity zeroI64 , equalArgs $> Lit zeroI64 , subFoldingRules Int64SubOp int64Ops ] Int64MulOp -> mkPrimOpRule nm 2 [ binaryLit (int64Op2 (*)) , zeroElem , identity oneI64 , mulFoldingRules Int64MulOp int64Ops ] Int64QuotOp -> mkPrimOpRule nm 2 [ nonZeroLit 1 >> binaryLit (int64Op2 quot) , leftZero , rightIdentity oneI64 , equalArgs $> Lit oneI64 , quotFoldingRules int64Ops ] Int64RemOp -> mkPrimOpRule nm 2 [ nonZeroLit 1 >> binaryLit (int64Op2 rem) , leftZero , oneLit 1 $> Lit zeroI64 , equalArgs $> Lit zeroI64 ] Int64NegOp -> mkPrimOpRule nm 1 [ unaryLit negOp , semiInversePrimOp Int64NegOp ] Int64SllOp -> mkPrimOpRule nm 2 [ shiftRule LitNumInt64 (const shiftL) , rightIdentity zeroI64 ] Int64SraOp -> mkPrimOpRule nm 2 [ shiftRule LitNumInt64 (const shiftR) , rightIdentity zeroI64 ] Int64SrlOp -> mkPrimOpRule nm 2 [ shiftRule LitNumInt64 $ const $ shiftRightLogical @Word64 , rightIdentity zeroI64 ] -- Word64 operations Word64AddOp -> mkPrimOpRule nm 2 [ binaryLit (word64Op2 (+)) , identity zeroW64 , addFoldingRules Word64AddOp word64Ops ] Word64SubOp -> mkPrimOpRule nm 2 [ binaryLit (word64Op2 (-)) , rightIdentity zeroW64 , equalArgs $> Lit zeroW64 , subFoldingRules Word64SubOp word64Ops ] Word64MulOp -> mkPrimOpRule nm 2 [ binaryLit (word64Op2 (*)) , identity oneW64 , mulFoldingRules Word64MulOp word64Ops ] Word64QuotOp-> mkPrimOpRule nm 2 [ nonZeroLit 1 >> binaryLit (word64Op2 quot) , rightIdentity oneW64 , quotFoldingRules word64Ops ] Word64RemOp -> mkPrimOpRule nm 2 [ nonZeroLit 1 >> binaryLit (word64Op2 rem) , leftZero , oneLit 1 $> Lit zeroW64 , equalArgs $> Lit zeroW64 ] Word64AndOp -> mkPrimOpRule nm 2 [ binaryLit (word64Op2 (.&.)) , idempotent , zeroElem , identity (mkLitWord64 0xFFFFFFFFFFFFFFFF) , sameArgIdempotentCommut Word64AndOp , andFoldingRules word64Ops ] Word64OrOp -> mkPrimOpRule nm 2 [ binaryLit (word64Op2 (.|.)) , idempotent , identity zeroW64 , sameArgIdempotentCommut Word64OrOp , orFoldingRules word64Ops ] Word64XorOp -> mkPrimOpRule nm 2 [ binaryLit (word64Op2 xor) , identity zeroW64 , equalArgs $> Lit zeroW64 ] Word64NotOp -> mkPrimOpRule nm 1 [ unaryLit complementOp , semiInversePrimOp Word64NotOp ] Word64SllOp -> mkPrimOpRule nm 2 [ shiftRule LitNumWord64 (const shiftL) ] Word64SrlOp -> mkPrimOpRule nm 2 [ shiftRule LitNumWord64 $ const $ shiftRightLogical @Word64 ] -- Int operations IntAddOp -> mkPrimOpRule nm 2 [ binaryLit (intOp2 (+)) , identityPlatform zeroi , addFoldingRules IntAddOp intOps ] IntSubOp -> mkPrimOpRule nm 2 [ binaryLit (intOp2 (-)) , rightIdentityPlatform zeroi , equalArgs >> retLit zeroi , subFoldingRules IntSubOp intOps ] IntAddCOp -> mkPrimOpRule nm 2 [ binaryLit (intOpC2 (+)) , identityCPlatform zeroi ] IntSubCOp -> mkPrimOpRule nm 2 [ binaryLit (intOpC2 (-)) , rightIdentityCPlatform zeroi , equalArgs >> retLitNoC zeroi ] IntMulOp -> mkPrimOpRule nm 2 [ binaryLit (intOp2 (*)) , zeroElem , identityPlatform onei , mulFoldingRules IntMulOp intOps ] IntMul2Op -> mkPrimOpRule nm 2 [ do [Lit (LitNumber _ l1), Lit (LitNumber _ l2)] <- getArgs platform <- getPlatform let r = l1 * l2 pure $ mkCoreUnboxedTuple [ Lit (if platformInIntRange platform r then zeroi platform else onei platform) , mkIntLitWrap platform (r `shiftR` platformWordSizeInBits platform) , mkIntLitWrap platform r ] , zeroElem >>= \z -> pure (mkCoreUnboxedTuple [z,z,z]) -- timesInt2# 1# other -- ~~~> -- (# 0#, 0# -# (other >># (WORD_SIZE_IN_BITS-1)), other #) -- The second element is the sign bit -- repeated to fill a word. , identityPlatform onei >>= \other -> do platform <- getPlatform pure $ mkCoreUnboxedTuple [ Lit (zeroi platform) , mkCoreApps (Var (primOpId IntSubOp)) [ Lit (zeroi platform) , mkCoreApps (Var (primOpId IntSrlOp)) [ other , mkIntLit platform (fromIntegral (platformWordSizeInBits platform - 1)) ] ] , other ] ] IntQuotOp -> mkPrimOpRule nm 2 [ nonZeroLit 1 >> binaryLit (intOp2 quot) , leftZero , rightIdentityPlatform onei , equalArgs >> retLit onei , quotFoldingRules intOps ] IntRemOp -> mkPrimOpRule nm 2 [ nonZeroLit 1 >> binaryLit (intOp2 rem) , leftZero , oneLit 1 >> retLit zeroi , equalArgs >> retLit zeroi ] IntAndOp -> mkPrimOpRule nm 2 [ binaryLit (intOp2 (.&.)) , idempotent , zeroElem , identityPlatform (\p -> mkLitInt p (-1)) , sameArgIdempotentCommut IntAndOp , andFoldingRules intOps ] IntOrOp -> mkPrimOpRule nm 2 [ binaryLit (intOp2 (.|.)) , idempotent , identityPlatform zeroi , sameArgIdempotentCommut IntOrOp , orFoldingRules intOps ] IntXorOp -> mkPrimOpRule nm 2 [ binaryLit (intOp2 xor) , identityPlatform zeroi , equalArgs >> retLit zeroi ] IntNotOp -> mkPrimOpRule nm 1 [ unaryLit complementOp , semiInversePrimOp IntNotOp ] IntNegOp -> mkPrimOpRule nm 1 [ unaryLit negOp , semiInversePrimOp IntNegOp ] IntSllOp -> mkPrimOpRule nm 2 [ shiftRule LitNumInt (const shiftL) , rightIdentityPlatform zeroi ] IntSraOp -> mkPrimOpRule nm 2 [ shiftRule LitNumInt (const shiftR) , rightIdentityPlatform zeroi ] IntSrlOp -> mkPrimOpRule nm 2 [ shiftRule LitNumInt shiftRightLogicalNative , rightIdentityPlatform zeroi ] -- Word operations WordAddOp -> mkPrimOpRule nm 2 [ binaryLit (wordOp2 (+)) , identityPlatform zerow , addFoldingRules WordAddOp wordOps ] WordSubOp -> mkPrimOpRule nm 2 [ binaryLit (wordOp2 (-)) , rightIdentityPlatform zerow , equalArgs >> retLit zerow , subFoldingRules WordSubOp wordOps ] WordAddCOp -> mkPrimOpRule nm 2 [ binaryLit (wordOpC2 (+)) , identityCPlatform zerow ] WordSubCOp -> mkPrimOpRule nm 2 [ binaryLit (wordOpC2 (-)) , rightIdentityCPlatform zerow , equalArgs >> retLitNoC zerow ] WordMulOp -> mkPrimOpRule nm 2 [ binaryLit (wordOp2 (*)) , identityPlatform onew , mulFoldingRules WordMulOp wordOps ] WordQuotOp -> mkPrimOpRule nm 2 [ nonZeroLit 1 >> binaryLit (wordOp2 quot) , rightIdentityPlatform onew , quotFoldingRules wordOps ] WordRemOp -> mkPrimOpRule nm 2 [ nonZeroLit 1 >> binaryLit (wordOp2 rem) , leftZero , oneLit 1 >> retLit zerow , equalArgs >> retLit zerow ] WordAndOp -> mkPrimOpRule nm 2 [ binaryLit (wordOp2 (.&.)) , idempotent , zeroElem , identityPlatform (\p -> mkLitWord p (platformMaxWord p)) , sameArgIdempotentCommut WordAndOp , andFoldingRules wordOps ] WordOrOp -> mkPrimOpRule nm 2 [ binaryLit (wordOp2 (.|.)) , idempotent , identityPlatform zerow , sameArgIdempotentCommut WordOrOp , orFoldingRules wordOps ] WordXorOp -> mkPrimOpRule nm 2 [ binaryLit (wordOp2 xor) , identityPlatform zerow , equalArgs >> retLit zerow ] WordNotOp -> mkPrimOpRule nm 1 [ unaryLit complementOp , semiInversePrimOp WordNotOp ] WordSllOp -> mkPrimOpRule nm 2 [ shiftRule LitNumWord (const shiftL) ] WordSrlOp -> mkPrimOpRule nm 2 [ shiftRule LitNumWord shiftRightLogicalNative ] PopCnt8Op -> mkPrimOpRule nm 1 [ pop_count @Word8 ] PopCnt16Op -> mkPrimOpRule nm 1 [ pop_count @Word16 ] PopCnt32Op -> mkPrimOpRule nm 1 [ pop_count @Word32 ] PopCnt64Op -> mkPrimOpRule nm 1 [ pop_count @Word64 ] PopCntOp -> mkPrimOpRule nm 1 [ getWordSize >>= \case PW4 -> pop_count @Word32 PW8 -> pop_count @Word64 ] Ctz8Op -> mkPrimOpRule nm 1 [ ctz @Word8 ] Ctz16Op -> mkPrimOpRule nm 1 [ ctz @Word16 ] Ctz32Op -> mkPrimOpRule nm 1 [ ctz @Word32 ] Ctz64Op -> mkPrimOpRule nm 1 [ ctz @Word64 ] CtzOp -> mkPrimOpRule nm 1 [ getWordSize >>= \case PW4 -> ctz @Word32 PW8 -> ctz @Word64 ] Clz8Op -> mkPrimOpRule nm 1 [ clz @Word8 ] Clz16Op -> mkPrimOpRule nm 1 [ clz @Word16 ] Clz32Op -> mkPrimOpRule nm 1 [ clz @Word32 ] Clz64Op -> mkPrimOpRule nm 1 [ clz @Word64 ] ClzOp -> mkPrimOpRule nm 1 [ getWordSize >>= \case PW4 -> clz @Word32 PW8 -> clz @Word64 ] -- coercions Int8ToIntOp -> mkPrimOpRule nm 1 [ liftLitPlatform convertToIntLit ] Int16ToIntOp -> mkPrimOpRule nm 1 [ liftLitPlatform convertToIntLit ] Int32ToIntOp -> mkPrimOpRule nm 1 [ liftLitPlatform convertToIntLit ] Int64ToIntOp -> mkPrimOpRule nm 1 [ liftLitPlatform convertToIntLit ] IntToInt8Op -> mkPrimOpRule nm 1 [ liftLit narrowInt8Lit , narrowSubsumesAnd IntAndOp IntToInt8Op 8 ] IntToInt16Op -> mkPrimOpRule nm 1 [ liftLit narrowInt16Lit , narrowSubsumesAnd IntAndOp IntToInt16Op 16 ] IntToInt32Op -> mkPrimOpRule nm 1 [ liftLit narrowInt32Lit , narrowSubsumesAnd IntAndOp IntToInt32Op 32 ] IntToInt64Op -> mkPrimOpRule nm 1 [ liftLit narrowInt64Lit ] Word8ToWordOp -> mkPrimOpRule nm 1 [ liftLitPlatform convertToWordLit , extendNarrowPassthrough WordToWord8Op 0xFF ] Word16ToWordOp -> mkPrimOpRule nm 1 [ liftLitPlatform convertToWordLit , extendNarrowPassthrough WordToWord16Op 0xFFFF ] Word32ToWordOp -> mkPrimOpRule nm 1 [ liftLitPlatform convertToWordLit , extendNarrowPassthrough WordToWord32Op 0xFFFFFFFF ] Word64ToWordOp -> mkPrimOpRule nm 1 [ liftLitPlatform convertToWordLit ] WordToWord8Op -> mkPrimOpRule nm 1 [ liftLit narrowWord8Lit , narrowSubsumesAnd WordAndOp WordToWord8Op 8 ] WordToWord16Op -> mkPrimOpRule nm 1 [ liftLit narrowWord16Lit , narrowSubsumesAnd WordAndOp WordToWord16Op 16 ] WordToWord32Op -> mkPrimOpRule nm 1 [ liftLit narrowWord32Lit , narrowSubsumesAnd WordAndOp WordToWord32Op 32 ] WordToWord64Op -> mkPrimOpRule nm 1 [ liftLit narrowWord64Lit ] Word8ToInt8Op -> mkPrimOpRule nm 1 [ liftLitPlatform (litNumCoerce LitNumInt8) ] Int8ToWord8Op -> mkPrimOpRule nm 1 [ liftLitPlatform (litNumCoerce LitNumWord8) ] Word16ToInt16Op-> mkPrimOpRule nm 1 [ liftLitPlatform (litNumCoerce LitNumInt16) ] Int16ToWord16Op-> mkPrimOpRule nm 1 [ liftLitPlatform (litNumCoerce LitNumWord16) ] Word32ToInt32Op-> mkPrimOpRule nm 1 [ liftLitPlatform (litNumCoerce LitNumInt32) ] Int32ToWord32Op-> mkPrimOpRule nm 1 [ liftLitPlatform (litNumCoerce LitNumWord32) ] Word64ToInt64Op-> mkPrimOpRule nm 1 [ liftLitPlatform (litNumCoerce LitNumInt64) ] Int64ToWord64Op-> mkPrimOpRule nm 1 [ liftLitPlatform (litNumCoerce LitNumWord64) ] WordToIntOp -> mkPrimOpRule nm 1 [ liftLitPlatform (litNumCoerce LitNumInt) ] IntToWordOp -> mkPrimOpRule nm 1 [ liftLitPlatform (litNumCoerce LitNumWord) ] Narrow8IntOp -> mkPrimOpRule nm 1 [ liftLitPlatform (litNumNarrow LitNumInt8) , subsumedByPrimOp Narrow8IntOp , Narrow8IntOp `subsumesPrimOp` Narrow16IntOp , Narrow8IntOp `subsumesPrimOp` Narrow32IntOp , narrowSubsumesAnd IntAndOp Narrow8IntOp 8 ] Narrow16IntOp -> mkPrimOpRule nm 1 [ liftLitPlatform (litNumNarrow LitNumInt16) , subsumedByPrimOp Narrow8IntOp , subsumedByPrimOp Narrow16IntOp , Narrow16IntOp `subsumesPrimOp` Narrow32IntOp , narrowSubsumesAnd IntAndOp Narrow16IntOp 16 ] Narrow32IntOp -> mkPrimOpRule nm 1 [ liftLitPlatform (litNumNarrow LitNumInt32) , subsumedByPrimOp Narrow8IntOp , subsumedByPrimOp Narrow16IntOp , subsumedByPrimOp Narrow32IntOp , removeOp32 , narrowSubsumesAnd IntAndOp Narrow32IntOp 32 ] Narrow8WordOp -> mkPrimOpRule nm 1 [ liftLitPlatform (litNumNarrow LitNumWord8) , subsumedByPrimOp Narrow8WordOp , Narrow8WordOp `subsumesPrimOp` Narrow16WordOp , Narrow8WordOp `subsumesPrimOp` Narrow32WordOp , narrowSubsumesAnd WordAndOp Narrow8WordOp 8 ] Narrow16WordOp -> mkPrimOpRule nm 1 [ liftLitPlatform (litNumNarrow LitNumWord16) , subsumedByPrimOp Narrow8WordOp , subsumedByPrimOp Narrow16WordOp , Narrow16WordOp `subsumesPrimOp` Narrow32WordOp , narrowSubsumesAnd WordAndOp Narrow16WordOp 16 ] Narrow32WordOp -> mkPrimOpRule nm 1 [ liftLitPlatform (litNumNarrow LitNumWord32) , subsumedByPrimOp Narrow8WordOp , subsumedByPrimOp Narrow16WordOp , subsumedByPrimOp Narrow32WordOp , removeOp32 , narrowSubsumesAnd WordAndOp Narrow32WordOp 32 ] CastWord64ToDoubleOp -> mkPrimOpRule nm 1 [ unaryLit $ \_env -> \case LitNumber _ n | v <- castWord64ToDouble (fromInteger n) -- we can't represent those float literals in Core until #18897 is fixed , not (isNaN v || isInfinite v || isNegativeZero v) -> Just (mkDoubleLitDouble v) _ -> Nothing ] CastWord32ToFloatOp -> mkPrimOpRule nm 1 [ unaryLit $ \_env -> \case LitNumber _ n | v <- castWord32ToFloat (fromInteger n) -- we can't represent those float literals in Core until #18897 is fixed , not (isNaN v || isInfinite v || isNegativeZero v) -> Just (mkFloatLitFloat v) _ -> Nothing ] CastDoubleToWord64Op -> mkPrimOpRule nm 1 [ unaryLit $ \_env -> \case LitDouble n -> Just (mkWord64LitWord64 (castDoubleToWord64 (fromRational n))) _ -> Nothing ] CastFloatToWord32Op -> mkPrimOpRule nm 1 [ unaryLit $ \_env -> \case LitFloat n -> Just (mkWord32LitWord32 (castFloatToWord32 (fromRational n))) _ -> Nothing ] OrdOp -> mkPrimOpRule nm 1 [ liftLit charToIntLit , semiInversePrimOp ChrOp ] ChrOp -> mkPrimOpRule nm 1 [ do [Lit lit] <- getArgs guard (litFitsInChar lit) liftLit intToCharLit , semiInversePrimOp OrdOp ] FloatToIntOp -> mkPrimOpRule nm 1 [ liftLit floatToIntLit ] IntToFloatOp -> mkPrimOpRule nm 1 [ liftLit intToFloatLit ] DoubleToIntOp -> mkPrimOpRule nm 1 [ liftLit doubleToIntLit ] IntToDoubleOp -> mkPrimOpRule nm 1 [ liftLit intToDoubleLit ] -- SUP: Not sure what the standard says about precision in the following 2 cases FloatToDoubleOp -> mkPrimOpRule nm 1 [ liftLit floatToDoubleLit ] DoubleToFloatOp -> mkPrimOpRule nm 1 [ liftLit doubleToFloatLit ] -- Float FloatAddOp -> mkPrimOpRule nm 2 [ binaryLit (floatOp2 (+)) , identity zerof ] FloatSubOp -> mkPrimOpRule nm 2 [ binaryLit (floatOp2 (-)) , rightIdentity zerof ] FloatMulOp -> mkPrimOpRule nm 2 [ binaryLit (floatOp2 (*)) , identity onef , strengthReduction twof FloatAddOp ] FloatFMAdd -> mkPrimOpRule nm 3 (fmaRules FMAdd W32) FloatFMSub -> mkPrimOpRule nm 3 (fmaRules FMSub W32) FloatFNMAdd -> mkPrimOpRule nm 3 (fmaRules FNMAdd W32) FloatFNMSub -> mkPrimOpRule nm 3 (fmaRules FNMSub W32) -- zeroElem zerof doesn't hold because of NaN FloatDivOp -> mkPrimOpRule nm 2 [ guardFloatDiv >> binaryLit (floatOp2 (/)) , rightIdentity onef ] FloatNegOp -> mkPrimOpRule nm 1 [ unaryLit negOp , semiInversePrimOp FloatNegOp ] FloatDecode_IntOp -> mkPrimOpRule nm 1 [ unaryLit floatDecodeOp ] -- Double DoubleAddOp -> mkPrimOpRule nm 2 [ binaryLit (doubleOp2 (+)) , identity zerod ] DoubleSubOp -> mkPrimOpRule nm 2 [ binaryLit (doubleOp2 (-)) , rightIdentity zerod ] DoubleMulOp -> mkPrimOpRule nm 2 [ binaryLit (doubleOp2 (*)) , identity oned , strengthReduction twod DoubleAddOp ] DoubleFMAdd -> mkPrimOpRule nm 3 (fmaRules FMAdd W64) DoubleFMSub -> mkPrimOpRule nm 3 (fmaRules FMSub W64) DoubleFNMAdd -> mkPrimOpRule nm 3 (fmaRules FNMAdd W64) DoubleFNMSub -> mkPrimOpRule nm 3 (fmaRules FNMSub W64) -- zeroElem zerod doesn't hold because of NaN DoubleDivOp -> mkPrimOpRule nm 2 [ guardDoubleDiv >> binaryLit (doubleOp2 (/)) , rightIdentity oned ] DoubleNegOp -> mkPrimOpRule nm 1 [ unaryLit negOp , semiInversePrimOp DoubleNegOp ] DoubleDecode_Int64Op -> mkPrimOpRule nm 1 [ unaryLit doubleDecodeOp ] -- Relational operators, equality Int8EqOp -> mkRelOpRule nm (==) [ litEq True ] Int8NeOp -> mkRelOpRule nm (/=) [ litEq False ] Int16EqOp -> mkRelOpRule nm (==) [ litEq True ] Int16NeOp -> mkRelOpRule nm (/=) [ litEq False ] Int32EqOp -> mkRelOpRule nm (==) [ litEq True ] Int32NeOp -> mkRelOpRule nm (/=) [ litEq False ] Int64EqOp -> mkRelOpRule nm (==) [ litEq True ] Int64NeOp -> mkRelOpRule nm (/=) [ litEq False ] IntEqOp -> mkRelOpRule nm (==) [ litEq True ] IntNeOp -> mkRelOpRule nm (/=) [ litEq False ] Word8EqOp -> mkRelOpRule nm (==) [ litEq True ] Word8NeOp -> mkRelOpRule nm (/=) [ litEq False ] Word16EqOp -> mkRelOpRule nm (==) [ litEq True ] Word16NeOp -> mkRelOpRule nm (/=) [ litEq False ] Word32EqOp -> mkRelOpRule nm (==) [ litEq True ] Word32NeOp -> mkRelOpRule nm (/=) [ litEq False ] Word64EqOp -> mkRelOpRule nm (==) [ litEq True ] Word64NeOp -> mkRelOpRule nm (/=) [ litEq False ] WordEqOp -> mkRelOpRule nm (==) [ litEq True ] WordNeOp -> mkRelOpRule nm (/=) [ litEq False ] CharEqOp -> mkRelOpRule nm (==) [ litEq True ] CharNeOp -> mkRelOpRule nm (/=) [ litEq False ] FloatEqOp -> mkFloatingRelOpRule nm (==) FloatNeOp -> mkFloatingRelOpRule nm (/=) DoubleEqOp -> mkFloatingRelOpRule nm (==) DoubleNeOp -> mkFloatingRelOpRule nm (/=) -- Relational operators, ordering Int8GtOp -> mkRelOpRule nm (>) [ boundsCmp Gt ] Int8GeOp -> mkRelOpRule nm (>=) [ boundsCmp Ge ] Int8LeOp -> mkRelOpRule nm (<=) [ boundsCmp Le ] Int8LtOp -> mkRelOpRule nm (<) [ boundsCmp Lt ] Int16GtOp -> mkRelOpRule nm (>) [ boundsCmp Gt ] Int16GeOp -> mkRelOpRule nm (>=) [ boundsCmp Ge ] Int16LeOp -> mkRelOpRule nm (<=) [ boundsCmp Le ] Int16LtOp -> mkRelOpRule nm (<) [ boundsCmp Lt ] Int32GtOp -> mkRelOpRule nm (>) [ boundsCmp Gt ] Int32GeOp -> mkRelOpRule nm (>=) [ boundsCmp Ge ] Int32LeOp -> mkRelOpRule nm (<=) [ boundsCmp Le ] Int32LtOp -> mkRelOpRule nm (<) [ boundsCmp Lt ] Int64GtOp -> mkRelOpRule nm (>) [ boundsCmp Gt ] Int64GeOp -> mkRelOpRule nm (>=) [ boundsCmp Ge ] Int64LeOp -> mkRelOpRule nm (<=) [ boundsCmp Le ] Int64LtOp -> mkRelOpRule nm (<) [ boundsCmp Lt ] IntGtOp -> mkRelOpRule nm (>) [ boundsCmp Gt ] IntGeOp -> mkRelOpRule nm (>=) [ boundsCmp Ge ] IntLeOp -> mkRelOpRule nm (<=) [ boundsCmp Le ] IntLtOp -> mkRelOpRule nm (<) [ boundsCmp Lt ] Word8GtOp -> mkRelOpRule nm (>) [ boundsCmp Gt ] Word8GeOp -> mkRelOpRule nm (>=) [ boundsCmp Ge ] Word8LeOp -> mkRelOpRule nm (<=) [ boundsCmp Le ] Word8LtOp -> mkRelOpRule nm (<) [ boundsCmp Lt ] Word16GtOp -> mkRelOpRule nm (>) [ boundsCmp Gt ] Word16GeOp -> mkRelOpRule nm (>=) [ boundsCmp Ge ] Word16LeOp -> mkRelOpRule nm (<=) [ boundsCmp Le ] Word16LtOp -> mkRelOpRule nm (<) [ boundsCmp Lt ] Word32GtOp -> mkRelOpRule nm (>) [ boundsCmp Gt ] Word32GeOp -> mkRelOpRule nm (>=) [ boundsCmp Ge ] Word32LeOp -> mkRelOpRule nm (<=) [ boundsCmp Le ] Word32LtOp -> mkRelOpRule nm (<) [ boundsCmp Lt ] Word64GtOp -> mkRelOpRule nm (>) [ boundsCmp Gt ] Word64GeOp -> mkRelOpRule nm (>=) [ boundsCmp Ge ] Word64LeOp -> mkRelOpRule nm (<=) [ boundsCmp Le ] Word64LtOp -> mkRelOpRule nm (<) [ boundsCmp Lt ] WordGtOp -> mkRelOpRule nm (>) [ boundsCmp Gt ] WordGeOp -> mkRelOpRule nm (>=) [ boundsCmp Ge ] WordLeOp -> mkRelOpRule nm (<=) [ boundsCmp Le ] WordLtOp -> mkRelOpRule nm (<) [ boundsCmp Lt ] CharGtOp -> mkRelOpRule nm (>) [ boundsCmp Gt ] CharGeOp -> mkRelOpRule nm (>=) [ boundsCmp Ge ] CharLeOp -> mkRelOpRule nm (<=) [ boundsCmp Le ] CharLtOp -> mkRelOpRule nm (<) [ boundsCmp Lt ] FloatGtOp -> mkFloatingRelOpRule nm (>) FloatGeOp -> mkFloatingRelOpRule nm (>=) FloatLeOp -> mkFloatingRelOpRule nm (<=) FloatLtOp -> mkFloatingRelOpRule nm (<) DoubleGtOp -> mkFloatingRelOpRule nm (>) DoubleGeOp -> mkFloatingRelOpRule nm (>=) DoubleLeOp -> mkFloatingRelOpRule nm (<=) DoubleLtOp -> mkFloatingRelOpRule nm (<) -- Misc AddrAddOp -> mkPrimOpRule nm 2 [ rightIdentityPlatform zeroi ] SparkOp -> mkPrimOpRule nm 4 [ sparkRule ] _ -> Nothing {- ************************************************************************ * * \subsection{Doing the business} * * ************************************************************************ -} -- useful shorthands mkPrimOpRule :: Name -> Int -> [RuleM CoreExpr] -> Maybe CoreRule mkPrimOpRule nm arity rules = Just $ mkBasicRule nm arity (msum rules) mkRelOpRule :: Name -> (forall a . Ord a => a -> a -> Bool) -> [RuleM CoreExpr] -> Maybe CoreRule mkRelOpRule nm cmp extra = mkPrimOpRule nm 2 $ binaryCmpLit cmp : equal_rule : extra where -- x `cmp` x does not depend on x, so -- compute it for the arbitrary value 'True' -- and use that result equal_rule = do { equalArgs ; platform <- getPlatform ; return (if cmp True True then trueValInt platform else falseValInt platform) } {- Note [Rules for floating-point comparisons] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We need different rules for floating-point values because for floats it is not true that x = x (for NaNs); so we do not want the equal_rule rule that mkRelOpRule uses. Note also that, in the case of equality/inequality, we do /not/ want to switch to a case-expression. For example, we do not want to convert case (eqFloat# x 3.8#) of True -> this False -> that to case x of 3.8#::Float# -> this _ -> that See #9238. Reason: comparing floating-point values for equality delicate, and we don't want to implement that delicacy in the code for case expressions. So we make it an invariant of Core that a case expression never scrutinises a Float# or Double#. This transformation is what the litEq rule does; see Note [The litEq rule: converting equality to case]. So we /refrain/ from using litEq for mkFloatingRelOpRule. -} mkFloatingRelOpRule :: Name -> (forall a . Ord a => a -> a -> Bool) -> Maybe CoreRule -- See Note [Rules for floating-point comparisons] mkFloatingRelOpRule nm cmp = mkPrimOpRule nm 2 [binaryCmpLit cmp] -- common constants zeroi, onei, zerow, onew :: Platform -> Literal zeroi platform = mkLitInt platform 0 onei platform = mkLitInt platform 1 zerow platform = mkLitWord platform 0 onew platform = mkLitWord platform 1 zeroI8, oneI8, zeroW8, oneW8 :: Literal zeroI8 = mkLitInt8 0 oneI8 = mkLitInt8 1 zeroW8 = mkLitWord8 0 oneW8 = mkLitWord8 1 zeroI16, oneI16, zeroW16, oneW16 :: Literal zeroI16 = mkLitInt16 0 oneI16 = mkLitInt16 1 zeroW16 = mkLitWord16 0 oneW16 = mkLitWord16 1 zeroI32, oneI32, zeroW32, oneW32 :: Literal zeroI32 = mkLitInt32 0 oneI32 = mkLitInt32 1 zeroW32 = mkLitWord32 0 oneW32 = mkLitWord32 1 zeroI64, oneI64, zeroW64, oneW64 :: Literal zeroI64 = mkLitInt64 0 oneI64 = mkLitInt64 1 zeroW64 = mkLitWord64 0 oneW64 = mkLitWord64 1 zerof, onef, twof, zerod, oned, twod :: Literal zerof = mkLitFloat 0.0 onef = mkLitFloat 1.0 twof = mkLitFloat 2.0 zerod = mkLitDouble 0.0 oned = mkLitDouble 1.0 twod = mkLitDouble 2.0 cmpOp :: Platform -> (forall a . Ord a => a -> a -> Bool) -> Literal -> Literal -> Maybe CoreExpr cmpOp platform cmp = go where done True = Just $ trueValInt platform done False = Just $ falseValInt platform -- These compares are at different types go (LitChar i1) (LitChar i2) = done (i1 `cmp` i2) go (LitFloat i1) (LitFloat i2) = done (i1 `cmp` i2) go (LitDouble i1) (LitDouble i2) = done (i1 `cmp` i2) go (LitNumber nt1 i1) (LitNumber nt2 i2) | nt1 /= nt2 = Nothing | otherwise = done (i1 `cmp` i2) go _ _ = Nothing -------------------------- negOp :: RuleOpts -> Literal -> Maybe CoreExpr -- Negate negOp env = \case (LitFloat 0.0) -> Nothing -- can't represent -0.0 as a Rational (LitFloat f) -> Just (mkFloatVal env (-f)) (LitDouble 0.0) -> Nothing (LitDouble d) -> Just (mkDoubleVal env (-d)) (LitNumber nt i) | litNumIsSigned nt -> Just (Lit (mkLitNumberWrap (roPlatform env) nt (-i))) _ -> Nothing complementOp :: RuleOpts -> Literal -> Maybe CoreExpr -- Binary complement complementOp env (LitNumber nt i) = Just (Lit (mkLitNumberWrap (roPlatform env) nt (complement i))) complementOp _ _ = Nothing int8Op2 :: (Integral a, Integral b) => (a -> b -> Integer) -> RuleOpts -> Literal -> Literal -> Maybe CoreExpr int8Op2 op _ (LitNumber LitNumInt8 i1) (LitNumber LitNumInt8 i2) = int8Result (fromInteger i1 `op` fromInteger i2) int8Op2 _ _ _ _ = Nothing int16Op2 :: (Integral a, Integral b) => (a -> b -> Integer) -> RuleOpts -> Literal -> Literal -> Maybe CoreExpr int16Op2 op _ (LitNumber LitNumInt16 i1) (LitNumber LitNumInt16 i2) = int16Result (fromInteger i1 `op` fromInteger i2) int16Op2 _ _ _ _ = Nothing int32Op2 :: (Integral a, Integral b) => (a -> b -> Integer) -> RuleOpts -> Literal -> Literal -> Maybe CoreExpr int32Op2 op _ (LitNumber LitNumInt32 i1) (LitNumber LitNumInt32 i2) = int32Result (fromInteger i1 `op` fromInteger i2) int32Op2 _ _ _ _ = Nothing int64Op2 :: (Integral a, Integral b) => (a -> b -> Integer) -> RuleOpts -> Literal -> Literal -> Maybe CoreExpr int64Op2 op _ (LitNumber LitNumInt64 i1) (LitNumber LitNumInt64 i2) = int64Result (fromInteger i1 `op` fromInteger i2) int64Op2 _ _ _ _ = Nothing intOp2 :: (Integral a, Integral b) => (a -> b -> Integer) -> RuleOpts -> Literal -> Literal -> Maybe CoreExpr intOp2 = intOp2' . const intOp2' :: (Integral a, Integral b) => (RuleOpts -> a -> b -> Integer) -> RuleOpts -> Literal -> Literal -> Maybe CoreExpr intOp2' op env (LitNumber LitNumInt i1) (LitNumber LitNumInt i2) = let o = op env in intResult (roPlatform env) (fromInteger i1 `o` fromInteger i2) intOp2' _ _ _ _ = Nothing intOpC2 :: (Integral a, Integral b) => (a -> b -> Integer) -> RuleOpts -> Literal -> Literal -> Maybe CoreExpr intOpC2 op env (LitNumber LitNumInt i1) (LitNumber LitNumInt i2) = intCResult (roPlatform env) (fromInteger i1 `op` fromInteger i2) intOpC2 _ _ _ _ = Nothing shiftRightLogical :: forall t. (Integral t, Bits t) => Integer -> Int -> Integer shiftRightLogical x n = fromIntegral (fromInteger x `shiftR` n :: t) -- | Shift right, putting zeros in rather than sign-propagating as -- 'Bits.shiftR' would do. Do this by converting to the appropriate Word -- and back. Obviously this won't work for too-big values, but its ok as -- we use it here. shiftRightLogicalNative :: Platform -> Integer -> Int -> Integer shiftRightLogicalNative platform = case platformWordSize platform of PW4 -> shiftRightLogical @Word32 PW8 -> shiftRightLogical @Word64 -------------------------- retLit :: (Platform -> Literal) -> RuleM CoreExpr retLit l = do platform <- getPlatform return $ Lit $ l platform retLitNoC :: (Platform -> Literal) -> RuleM CoreExpr retLitNoC l = do platform <- getPlatform let lit = l platform return $ mkCoreUnboxedTuple [Lit lit, Lit (zeroi platform)] word8Op2 :: (Integral a, Integral b) => (a -> b -> Integer) -> RuleOpts -> Literal -> Literal -> Maybe CoreExpr word8Op2 op _ (LitNumber LitNumWord8 i1) (LitNumber LitNumWord8 i2) = word8Result (fromInteger i1 `op` fromInteger i2) word8Op2 _ _ _ _ = Nothing word16Op2 :: (Integral a, Integral b) => (a -> b -> Integer) -> RuleOpts -> Literal -> Literal -> Maybe CoreExpr word16Op2 op _ (LitNumber LitNumWord16 i1) (LitNumber LitNumWord16 i2) = word16Result (fromInteger i1 `op` fromInteger i2) word16Op2 _ _ _ _ = Nothing word32Op2 :: (Integral a, Integral b) => (a -> b -> Integer) -> RuleOpts -> Literal -> Literal -> Maybe CoreExpr word32Op2 op _ (LitNumber LitNumWord32 i1) (LitNumber LitNumWord32 i2) = word32Result (fromInteger i1 `op` fromInteger i2) word32Op2 _ _ _ _ = Nothing word64Op2 :: (Integral a, Integral b) => (a -> b -> Integer) -> RuleOpts -> Literal -> Literal -> Maybe CoreExpr word64Op2 op _ (LitNumber LitNumWord64 i1) (LitNumber LitNumWord64 i2) = word64Result (fromInteger i1 `op` fromInteger i2) word64Op2 _ _ _ _ = Nothing wordOp2 :: (Integral a, Integral b) => (a -> b -> Integer) -> RuleOpts -> Literal -> Literal -> Maybe CoreExpr wordOp2 op env (LitNumber LitNumWord w1) (LitNumber LitNumWord w2) = wordResult (roPlatform env) (fromInteger w1 `op` fromInteger w2) wordOp2 _ _ _ _ = Nothing wordOpC2 :: (Integral a, Integral b) => (a -> b -> Integer) -> RuleOpts -> Literal -> Literal -> Maybe CoreExpr wordOpC2 op env (LitNumber LitNumWord w1) (LitNumber LitNumWord w2) = wordCResult (roPlatform env) (fromInteger w1 `op` fromInteger w2) wordOpC2 _ _ _ _ = Nothing shiftRule :: LitNumType -> (Platform -> Integer -> Int -> Integer) -> RuleM CoreExpr -- Shifts take an Int; hence third arg of op is Int -- Used for shift primops -- IntSllOp, IntSraOp, IntSrlOp :: Int# -> Int# -> Int# -- SllOp, SrlOp :: Word# -> Int# -> Word# shiftRule lit_num_ty shift_op = do platform <- getPlatform [e1, Lit (LitNumber LitNumInt shift_len)] <- getArgs bit_size <- case litNumBitSize platform lit_num_ty of Nothing -> mzero Just bs -> pure (toInteger bs) case e1 of _ | shift_len == 0 -> pure e1 -- See Note [Guarding against silly shifts] _ | shift_len < 0 || shift_len >= bit_size -> pure $ Lit $ mkLitNumberWrap platform lit_num_ty 0 -- Be sure to use lit_num_ty here, so we get a correctly typed zero. -- See #18589 Lit (LitNumber nt x) | 0 < shift_len && shift_len <= bit_size -> assert (nt == lit_num_ty) $ let op = shift_op platform -- Do the shift at type Integer, but shift length is Int. -- Using host's Int is ok even if target's Int has a different size -- because we test that shift_len <= bit_size (which is at most 64) y = x `op` fromInteger shift_len in pure $ Lit $ mkLitNumberWrap platform nt y _ -> mzero -------------------------- floatOp2 :: (Rational -> Rational -> Rational) -> RuleOpts -> Literal -> Literal -> Maybe (Expr CoreBndr) floatOp2 op env (LitFloat f1) (LitFloat f2) = Just (mkFloatVal env (f1 `op` f2)) floatOp2 _ _ _ _ = Nothing -------------------------- floatDecodeOp :: RuleOpts -> Literal -> Maybe CoreExpr floatDecodeOp env (LitFloat ((decodeFloat . fromRational @Float) -> (m, e))) = Just $ mkCoreUnboxedTuple [ mkIntVal (roPlatform env) (toInteger m) , mkIntVal (roPlatform env) (toInteger e) ] floatDecodeOp _ _ = Nothing -------------------------- doubleOp2 :: (Rational -> Rational -> Rational) -> RuleOpts -> Literal -> Literal -> Maybe (Expr CoreBndr) doubleOp2 op env (LitDouble f1) (LitDouble f2) = Just (mkDoubleVal env (f1 `op` f2)) doubleOp2 _ _ _ _ = Nothing -------------------------- doubleDecodeOp :: RuleOpts -> Literal -> Maybe CoreExpr doubleDecodeOp env (LitDouble ((decodeFloat . fromRational @Double) -> (m, e))) = Just $ mkCoreUnboxedTuple [ Lit (mkLitInt64Wrap (toInteger m)) , mkIntVal platform (toInteger e) ] where platform = roPlatform env doubleDecodeOp _ _ = Nothing -------------------------- -- | Constant folding rules for fused multiply-add operations. fmaRules :: FMASign -> Width -> [RuleM CoreExpr] fmaRules signs width = [ fmaLit signs width , fmaZero_z signs width , fmaOne signs width ] -- | Compute @a * b + c@ when @a@, @b@, @c@ are all literals. fmaLit :: FMASign -> Width -> RuleM CoreExpr fmaLit signs width = do env <- getRuleOpts [Lit l1, Lit l2, Lit l3] <- getArgs liftMaybe $ op env (convFloating env l1) (convFloating env l2) (convFloating env l3) where op env l1 l2 l3 = case width of W32 | LitFloat x <- l1 , LitFloat y <- l2 , LitFloat z <- l3 -> Just $ mkFloatVal env $ case signs of FMAdd -> x * y + z FMSub -> x * y - z FNMAdd -> negate ( x * y ) + z FNMSub -> negate ( x * y ) - z W64 | LitDouble x <- l1 , LitDouble y <- l2 , LitDouble z <- l3 -> Just $ mkDoubleVal env $ case signs of FMAdd -> x * y + z FMSub -> x * y - z FNMAdd -> negate ( x * y ) + z FNMSub -> negate ( x * y ) - z _ -> Nothing -- | @x * y + 0 = x * y@. fmaZero_z :: FMASign -> Width -> RuleM CoreExpr fmaZero_z signs width = do [x, y, Lit z] <- getArgs let -- TODO: we should additionally check the sign of z. -- FMAdd, FNMAdd: should be -0.0. -- FMSub, FNMSub: should be +0.0. ok = case width of W32 | LitFloat 0 <- z -> True W64 | LitDouble 0 <- z -> True _ -> False neg = case width of W32 -> FloatNegOp W64 -> DoubleNegOp _ -> panic "fmaZero_xy: not Float# or Double#" mul = case width of W32 -> FloatMulOp W64 -> DoubleMulOp _ -> panic "fmaZero_z: not Float# or Double#" if ok then return $ case signs of FMAdd -> Var (primOpId mul) `App` x `App` y FMSub -> Var (primOpId mul) `App` x `App` y FNMAdd -> Var (primOpId neg) `App` (Var (primOpId mul) `App` x `App` y) FNMSub -> Var (primOpId neg) `App` (Var (primOpId mul) `App` x `App` y) else mzero -- | @±1 * y + z ==> z ± y@ and @x * ±1 + z ==> z ± x@. fmaOne :: FMASign -> Width -> RuleM CoreExpr fmaOne signs width = do [x, y, z] <- getArgs let posNegOne_maybe :: Rational -> Maybe Bool posNegOne_maybe i | i == 1 = Just False | i == -1 = Just True | otherwise = Nothing ok = case width of W32 | Lit (LitFloat i) <- x , Just sgn <- posNegOne_maybe i -> Just (sgn, y) | Lit (LitFloat i) <- y , Just sgn <- posNegOne_maybe i -> Just (sgn, x) W64 | Lit (LitDouble i) <- x , Just sgn <- posNegOne_maybe i -> Just (sgn, y) | Lit (LitDouble i) <- y , Just sgn <- posNegOne_maybe i -> Just (sgn, x) _ -> Nothing neg = case width of W32 -> FloatNegOp W64 -> DoubleNegOp _ -> panic "fmaOne: not Float# or Double#" add = case width of W32 -> FloatAddOp W64 -> DoubleAddOp _ -> panic "fmaOne: not Float# or Double#" sub = case width of W32 -> FloatSubOp W64 -> DoubleSubOp _ -> panic "fmaOne: not Float# or Double#" case ok of Nothing -> mzero Just (sgn, t) -> return $ if -- t + z | ( signs == FMAdd && sgn == False ) || ( signs == FNMAdd && sgn == True ) -> Var (primOpId add) `App` t `App` z -- - t + z | signs == FMAdd || signs == FNMAdd -> Var (primOpId sub) `App` z `App` t -- t - z | ( signs == FMSub && sgn == False ) || ( signs == FNMSub && sgn == True ) -> Var (primOpId sub) `App` t `App` z -- - t - z | signs == FMSub || signs == FNMSub -> Var (primOpId neg) `App` (Var (primOpId add) `App` t `App` z) | otherwise -> pprPanic "fmaOne: non-exhaustive pattern match" $ vcat [ text "signs:" <+> text (show signs) , text "sign:" <+> ppr sgn ] -------------------------- {- Note [The litEq rule: converting equality to case] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This stuff turns n ==# 3# into case n of 3# -> True m -> False This is a Good Thing, because it allows case-of case things to happen, and case-default absorption to happen. For example: if (n ==# 3#) || (n ==# 4#) then e1 else e2 will transform to case n of 3# -> e1 4# -> e1 m -> e2 (modulo the usual precautions to avoid duplicating e1) -} litEq :: Bool -- True <=> equality, False <=> inequality -> RuleM CoreExpr litEq is_eq = msum [ do [Lit lit, expr] <- getArgs platform <- getPlatform do_lit_eq platform lit expr , do [expr, Lit lit] <- getArgs platform <- getPlatform do_lit_eq platform lit expr ] where do_lit_eq platform lit expr = do guard (not (litIsLifted lit)) return (mkWildCase expr (unrestricted $ literalType lit) intPrimTy [ Alt DEFAULT [] val_if_neq , Alt (LitAlt lit) [] val_if_eq]) where val_if_eq | is_eq = trueValInt platform | otherwise = falseValInt platform val_if_neq | is_eq = falseValInt platform | otherwise = trueValInt platform -- | Check if there is comparison with minBound or maxBound, that is -- always true or false. For instance, an Int cannot be smaller than its -- minBound, so we can replace such comparison with False. boundsCmp :: Comparison -> RuleM CoreExpr boundsCmp op = do platform <- getPlatform [a, b] <- getArgs liftMaybe $ mkRuleFn platform op a b data Comparison = Gt | Ge | Lt | Le mkRuleFn :: Platform -> Comparison -> CoreExpr -> CoreExpr -> Maybe CoreExpr mkRuleFn platform Gt (Lit lit) _ | isMinBound platform lit = Just $ falseValInt platform mkRuleFn platform Le (Lit lit) _ | isMinBound platform lit = Just $ trueValInt platform mkRuleFn platform Ge _ (Lit lit) | isMinBound platform lit = Just $ trueValInt platform mkRuleFn platform Lt _ (Lit lit) | isMinBound platform lit = Just $ falseValInt platform mkRuleFn platform Ge (Lit lit) _ | isMaxBound platform lit = Just $ trueValInt platform mkRuleFn platform Lt (Lit lit) _ | isMaxBound platform lit = Just $ falseValInt platform mkRuleFn platform Gt _ (Lit lit) | isMaxBound platform lit = Just $ falseValInt platform mkRuleFn platform Le _ (Lit lit) | isMaxBound platform lit = Just $ trueValInt platform mkRuleFn _ _ _ _ = Nothing -- | Create an Int literal expression while ensuring the given Integer is in the -- target Int range int8Result :: Integer -> Maybe CoreExpr int8Result result = Just (int8Result' result) int8Result' :: Integer -> CoreExpr int8Result' result = Lit (mkLitInt8Wrap result) -- | Create an Int literal expression while ensuring the given Integer is in the -- target Int range int16Result :: Integer -> Maybe CoreExpr int16Result result = Just (int16Result' result) int16Result' :: Integer -> CoreExpr int16Result' result = Lit (mkLitInt16Wrap result) -- | Create an Int literal expression while ensuring the given Integer is in the -- target Int range int32Result :: Integer -> Maybe CoreExpr int32Result result = Just (int32Result' result) int32Result' :: Integer -> CoreExpr int32Result' result = Lit (mkLitInt32Wrap result) intResult :: Platform -> Integer -> Maybe CoreExpr intResult platform result = Just (intResult' platform result) intResult' :: Platform -> Integer -> CoreExpr intResult' platform result = Lit (mkLitIntWrap platform result) -- | Create an unboxed pair of an Int literal expression, ensuring the given -- Integer is in the target Int range and the corresponding overflow flag -- (@0#@/@1#@) if it wasn't. intCResult :: Platform -> Integer -> Maybe CoreExpr intCResult platform result = Just (mkCoreUnboxedTuple [Lit lit, Lit c]) where (lit, b) = mkLitIntWrapC platform result c = if b then onei platform else zeroi platform -- | Create a Word literal expression while ensuring the given Integer is in the -- target Word range word8Result :: Integer -> Maybe CoreExpr word8Result result = Just (word8Result' result) word8Result' :: Integer -> CoreExpr word8Result' result = Lit (mkLitWord8Wrap result) -- | Create a Word literal expression while ensuring the given Integer is in the -- target Word range word16Result :: Integer -> Maybe CoreExpr word16Result result = Just (word16Result' result) word16Result' :: Integer -> CoreExpr word16Result' result = Lit (mkLitWord16Wrap result) -- | Create a Word literal expression while ensuring the given Integer is in the -- target Word range word32Result :: Integer -> Maybe CoreExpr word32Result result = Just (word32Result' result) word32Result' :: Integer -> CoreExpr word32Result' result = Lit (mkLitWord32Wrap result) -- | Create a Word literal expression while ensuring the given Integer is in the -- target Word range wordResult :: Platform -> Integer -> Maybe CoreExpr wordResult platform result = Just (wordResult' platform result) wordResult' :: Platform -> Integer -> CoreExpr wordResult' platform result = Lit (mkLitWordWrap platform result) -- | Create an unboxed pair of a Word literal expression, ensuring the given -- Integer is in the target Word range and the corresponding carry flag -- (@0#@/@1#@) if it wasn't. wordCResult :: Platform -> Integer -> Maybe CoreExpr wordCResult platform result = Just (mkCoreUnboxedTuple [Lit lit, Lit c]) where (lit, b) = mkLitWordWrapC platform result c = if b then onei platform else zeroi platform int64Result :: Integer -> Maybe CoreExpr int64Result result = Just (int64Result' result) int64Result' :: Integer -> CoreExpr int64Result' result = Lit (mkLitInt64Wrap result) word64Result :: Integer -> Maybe CoreExpr word64Result result = Just (word64Result' result) word64Result' :: Integer -> CoreExpr word64Result' result = Lit (mkLitWord64Wrap result) -- | 'ambient (primop x) = x', but not necessarily 'primop (ambient x) = x'. semiInversePrimOp :: PrimOp -> RuleM CoreExpr semiInversePrimOp primop = do [Var primop_id `App` e] <- getArgs matchPrimOpId primop primop_id return e subsumesPrimOp :: PrimOp -> PrimOp -> RuleM CoreExpr this `subsumesPrimOp` that = do [Var primop_id `App` e] <- getArgs matchPrimOpId that primop_id return (Var (primOpId this) `App` e) subsumedByPrimOp :: PrimOp -> RuleM CoreExpr subsumedByPrimOp primop = do [e@(Var primop_id `App` _)] <- getArgs matchPrimOpId primop primop_id return e -- | Transform `extendWordN (narrowWordN x)` into `x .&. 0xFF..FF` extendNarrowPassthrough :: PrimOp -> Integer -> RuleM CoreExpr extendNarrowPassthrough narrow_primop n = do [Var primop_id `App` x] <- getArgs matchPrimOpId narrow_primop primop_id return (Var (primOpId WordAndOp) `App` x `App` Lit (LitNumber LitNumWord n)) -- | narrow subsumes bitwise `and` with full mask (cf #16402): -- -- narrowN (x .&. m) -- m .&. (2^N-1) = 2^N-1 -- ==> narrowN x -- -- e.g. narrow16 (x .&. 0xFFFF) -- ==> narrow16 x -- narrowSubsumesAnd :: PrimOp -> PrimOp -> Int -> RuleM CoreExpr narrowSubsumesAnd and_primop narrw n = do [Var primop_id `App` x `App` y] <- getArgs matchPrimOpId and_primop primop_id let mask = bit n -1 g v (Lit (LitNumber _ m)) = do guard (m .&. mask == mask) return (Var (primOpId narrw) `App` v) g _ _ = mzero g x y <|> g y x idempotent :: RuleM CoreExpr idempotent = do [e1, e2] <- getArgs guard $ cheapEqExpr e1 e2 return e1 -- | Match -- (op (op v e) e) -- or (op e (op v e)) -- or (op (op e v) e) -- or (op e (op e v)) -- and return the innermost (op v e) or (op e v). sameArgIdempotentCommut :: PrimOp -> RuleM CoreExpr sameArgIdempotentCommut op = do [a,b] <- getArgs case (a,b) of (is_binop op -> Just (e1,e2), e3) | cheapEqExpr e2 e3 -> return a | cheapEqExpr e1 e3 -> return a (e3, is_binop op -> Just (e1,e2)) | cheapEqExpr e2 e3 -> return b | cheapEqExpr e1 e3 -> return b _ -> mzero {- Note [Guarding against silly shifts] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider this code: import Data.Bits( (.|.), shiftL ) chunkToBitmap :: [Bool] -> Word32 chunkToBitmap chunk = foldr (.|.) 0 [ 1 `shiftL` n | (True,n) <- zip chunk [0..] ] This optimises to: Shift.$wgo = \ (w_sCS :: GHC.Prim.Int#) (w1_sCT :: [GHC.Types.Bool]) -> case w1_sCT of _ { [] -> 0##; : x_aAW xs_aAX -> case x_aAW of _ { GHC.Types.False -> case w_sCS of wild2_Xh { __DEFAULT -> Shift.$wgo (GHC.Prim.+# wild2_Xh 1) xs_aAX; 9223372036854775807 -> 0## }; GHC.Types.True -> case GHC.Prim.>=# w_sCS 64 of _ { GHC.Types.False -> case w_sCS of wild3_Xh { __DEFAULT -> case Shift.$wgo (GHC.Prim.+# wild3_Xh 1) xs_aAX of ww_sCW { __DEFAULT -> GHC.Prim.or# (GHC.Prim.narrow32Word# (GHC.Prim.uncheckedShiftL# 1## wild3_Xh)) ww_sCW }; 9223372036854775807 -> GHC.Prim.narrow32Word# !!!!--> (GHC.Prim.uncheckedShiftL# 1## 9223372036854775807) }; GHC.Types.True -> case w_sCS of wild3_Xh { __DEFAULT -> Shift.$wgo (GHC.Prim.+# wild3_Xh 1) xs_aAX; 9223372036854775807 -> 0## } } } } Note the massive shift on line "!!!!". It can't happen, because we've checked that w < 64, but the optimiser didn't spot that. We DO NOT want to constant-fold this! Moreover, if the programmer writes (n `uncheckedShiftL` 9223372036854775807), we can't constant fold it, but if it gets to the assembler we get Error: operand type mismatch for `shl' So the best thing to do is to rewrite the shift with a call to error, when the second arg is large. However, in general we cannot do this; consider this case let x = I# (uncheckedIShiftL# n 80) in ... Here x contains an invalid shift and consequently we would like to rewrite it as follows: let x = I# (error "invalid shift") in ... This was originally done in the fix to #16449 but this breaks the let-can-float invariant (see Note [Core let-can-float invariant] in GHC.Core) as noted in #16742. For the reasons discussed under "NoEffect" in Note [Classifying primop effects] (in GHC.Builtin.PrimOps) there is no safe way to rewrite the argument of I# such that it bottoms. Consequently we instead take advantage of the fact that the result of a large shift is unspecified (see associated documentation in primops.txt.pp) and transform the invalid shift into an "obviously incorrect" value. There are two cases: - Shifting fixed-width things: the primops IntSll, Sll, etc These are handled by shiftRule. We are happy to shift by any amount up to wordSize but no more. - Shifting Bignums (Integer, Natural): these are handled by bignum_shift. Here we could in principle shift by any amount, but we arbitrary limit the shift to 4 bits; in particular we do not want shift by a huge amount, which can happen in code like that above. The two cases are more different in their code paths that is comfortable, but that is only a historical accident. ************************************************************************ * * \subsection{Vaguely generic functions} * * ************************************************************************ -} mkBasicRule :: Name -> Int -> RuleM CoreExpr -> CoreRule -- Gives the Rule the same name as the primop itself mkBasicRule op_name n_args rm = BuiltinRule { ru_name = occNameFS (nameOccName op_name), ru_fn = op_name, ru_nargs = n_args, ru_try = runRuleM rm } newtype RuleM r = RuleM { runRuleM :: RuleOpts -> InScopeEnv -> Id -> [CoreExpr] -> Maybe r } deriving (Functor) instance Applicative RuleM where pure x = RuleM $ \_ _ _ _ -> Just x (<*>) = ap instance Monad RuleM where RuleM f >>= g = RuleM $ \env iu fn args -> case f env iu fn args of Nothing -> Nothing Just r -> runRuleM (g r) env iu fn args instance MonadFail RuleM where fail _ = mzero instance Alternative RuleM where empty = RuleM $ \_ _ _ _ -> Nothing RuleM f1 <|> RuleM f2 = RuleM $ \env iu fn args -> f1 env iu fn args <|> f2 env iu fn args instance MonadPlus RuleM getPlatform :: RuleM Platform getPlatform = roPlatform <$> getRuleOpts getWordSize :: RuleM PlatformWordSize getWordSize = platformWordSize <$> getPlatform getRuleOpts :: RuleM RuleOpts getRuleOpts = RuleM $ \rule_opts _ _ _ -> Just rule_opts liftMaybe :: Maybe a -> RuleM a liftMaybe Nothing = mzero liftMaybe (Just x) = return x liftLit :: (Literal -> Literal) -> RuleM CoreExpr liftLit f = liftLitPlatform (const f) liftLitPlatform :: (Platform -> Literal -> Literal) -> RuleM CoreExpr liftLitPlatform f = do platform <- getPlatform [Lit lit] <- getArgs return $ Lit (f platform lit) removeOp32 :: RuleM CoreExpr removeOp32 = do platform <- getPlatform case platformWordSize platform of PW4 -> do [e] <- getArgs return e PW8 -> mzero getArgs :: RuleM [CoreExpr] getArgs = RuleM $ \_ _ _ args -> Just args getInScopeEnv :: RuleM InScopeEnv getInScopeEnv = RuleM $ \_ iu _ _ -> Just iu getFunction :: RuleM Id getFunction = RuleM $ \_ _ fn _ -> Just fn isLiteral :: CoreExpr -> RuleM Literal isLiteral e = do env <- getInScopeEnv case exprIsLiteral_maybe env e of Nothing -> mzero Just l -> pure l -- | Match BigNat#, Integer and Natural literals isBignumLiteral :: CoreExpr -> RuleM Integer isBignumLiteral e = isNumberLiteral e <|> isIntegerLiteral e <|> isNaturalLiteral e -- | Match numeric literals isNumberLiteral :: CoreExpr -> RuleM Integer isNumberLiteral e = isLiteral e >>= \case LitNumber _ x -> pure x _ -> mzero -- | Match the application of a DataCon to a numeric literal. -- -- Can be used to match e.g.: -- IS 123# -- IP bigNatLiteral -- W# 123## isLitNumConApp :: CoreExpr -> RuleM (DataCon,Integer) isLitNumConApp e = do env <- getInScopeEnv case exprIsConApp_maybe env e of Just (_env,_fb,dc,_tys,[arg]) -> case exprIsLiteral_maybe env arg of Just (LitNumber _ i) -> pure (dc,i) _ -> mzero _ -> mzero isIntegerLiteral :: CoreExpr -> RuleM Integer isIntegerLiteral e = do (dc,i) <- isLitNumConApp e if | dc == integerISDataCon -> pure i | dc == integerINDataCon -> pure (negate i) | dc == integerIPDataCon -> pure i | otherwise -> mzero isBigIntegerLiteral :: CoreExpr -> RuleM Integer isBigIntegerLiteral e = do (dc,i) <- isLitNumConApp e if | dc == integerINDataCon -> pure (negate i) | dc == integerIPDataCon -> pure i | otherwise -> mzero isNaturalLiteral :: CoreExpr -> RuleM Integer isNaturalLiteral e = do (dc,i) <- isLitNumConApp e if | dc == naturalNSDataCon -> pure i | dc == naturalNBDataCon -> pure i | otherwise -> mzero -- return the n-th argument of this rule, if it is a literal -- argument indices start from 0 getLiteral :: Int -> RuleM Literal getLiteral n = RuleM $ \_ _ _ exprs -> case drop n exprs of (Lit l:_) -> Just l _ -> Nothing unaryLit :: (RuleOpts -> Literal -> Maybe CoreExpr) -> RuleM CoreExpr unaryLit op = do env <- getRuleOpts [Lit l] <- getArgs liftMaybe $ op env (convFloating env l) binaryLit :: (RuleOpts -> Literal -> Literal -> Maybe CoreExpr) -> RuleM CoreExpr binaryLit op = do env <- getRuleOpts [Lit l1, Lit l2] <- getArgs liftMaybe $ op env (convFloating env l1) (convFloating env l2) binaryCmpLit :: (forall a . Ord a => a -> a -> Bool) -> RuleM CoreExpr binaryCmpLit op = do platform <- getPlatform binaryLit (\_ -> cmpOp platform op) leftIdentity :: Literal -> RuleM CoreExpr leftIdentity id_lit = leftIdentityPlatform (const id_lit) rightIdentity :: Literal -> RuleM CoreExpr rightIdentity id_lit = rightIdentityPlatform (const id_lit) identity :: Literal -> RuleM CoreExpr identity lit = leftIdentity lit `mplus` rightIdentity lit leftIdentityPlatform :: (Platform -> Literal) -> RuleM CoreExpr leftIdentityPlatform id_lit = do platform <- getPlatform [Lit l1, e2] <- getArgs guard $ l1 == id_lit platform return e2 -- | Left identity rule for PrimOps like 'IntAddC' and 'WordAddC', where, in -- addition to the result, we have to indicate that no carry/overflow occurred. leftIdentityCPlatform :: (Platform -> Literal) -> RuleM CoreExpr leftIdentityCPlatform id_lit = do platform <- getPlatform [Lit l1, e2] <- getArgs guard $ l1 == id_lit platform let no_c = Lit (zeroi platform) return (mkCoreUnboxedTuple [e2, no_c]) rightIdentityPlatform :: (Platform -> Literal) -> RuleM CoreExpr rightIdentityPlatform id_lit = do platform <- getPlatform [e1, Lit l2] <- getArgs guard $ l2 == id_lit platform return e1 -- | Right identity rule for PrimOps like 'IntSubC' and 'WordSubC', where, in -- addition to the result, we have to indicate that no carry/overflow occurred. rightIdentityCPlatform :: (Platform -> Literal) -> RuleM CoreExpr rightIdentityCPlatform id_lit = do platform <- getPlatform [e1, Lit l2] <- getArgs guard $ l2 == id_lit platform let no_c = Lit (zeroi platform) return (mkCoreUnboxedTuple [e1, no_c]) identityPlatform :: (Platform -> Literal) -> RuleM CoreExpr identityPlatform lit = leftIdentityPlatform lit `mplus` rightIdentityPlatform lit -- | Identity rule for PrimOps like 'IntAddC' and 'WordAddC', where, in addition -- to the result, we have to indicate that no carry/overflow occurred. identityCPlatform :: (Platform -> Literal) -> RuleM CoreExpr identityCPlatform lit = leftIdentityCPlatform lit `mplus` rightIdentityCPlatform lit leftZero :: RuleM CoreExpr leftZero = do [Lit l1, _] <- getArgs guard $ isZeroLit l1 return $ Lit l1 rightZero :: RuleM CoreExpr rightZero = do [_, Lit l2] <- getArgs guard $ isZeroLit l2 return $ Lit l2 zeroElem :: RuleM CoreExpr zeroElem = leftZero `mplus` rightZero equalArgs :: RuleM () equalArgs = do [e1, e2] <- getArgs guard $ e1 `cheapEqExpr` e2 nonZeroLit :: Int -> RuleM () nonZeroLit n = getLiteral n >>= guard . not . isZeroLit oneLit :: Int -> RuleM () oneLit n = getLiteral n >>= guard . isOneLit lift_bits_op :: forall a. (Num a, FiniteBits a) => (a -> Integer) -> RuleM CoreExpr lift_bits_op op = do platform <- getPlatform [Lit (LitNumber _ l)] <- getArgs pure $ mkWordLit platform $ op (fromInteger l :: a) pop_count :: forall a. (Num a, FiniteBits a) => RuleM CoreExpr pop_count = lift_bits_op @a (fromIntegral . popCount) ctz :: forall a. (Num a, FiniteBits a) => RuleM CoreExpr ctz = lift_bits_op @a (fromIntegral . countTrailingZeros) clz :: forall a. (Num a, FiniteBits a) => RuleM CoreExpr clz = lift_bits_op @a (fromIntegral . countLeadingZeros) -- When excess precision is not requested, cut down the precision of the -- Rational value to that of Float/Double. We confuse host architecture -- and target architecture here, but it's convenient (and wrong :-). convFloating :: RuleOpts -> Literal -> Literal convFloating env (LitFloat f) | not (roExcessRationalPrecision env) = LitFloat (toRational (fromRational f :: Float )) convFloating env (LitDouble d) | not (roExcessRationalPrecision env) = LitDouble (toRational (fromRational d :: Double)) convFloating _ l = l guardFloatDiv :: RuleM () guardFloatDiv = do [Lit (LitFloat f1), Lit (LitFloat f2)] <- getArgs guard $ (f1 /=0 || f2 > 0) -- see Note [negative zero] && f2 /= 0 -- avoid NaN and Infinity/-Infinity guardDoubleDiv :: RuleM () guardDoubleDiv = do [Lit (LitDouble d1), Lit (LitDouble d2)] <- getArgs guard $ (d1 /=0 || d2 > 0) -- see Note [negative zero] && d2 /= 0 -- avoid NaN and Infinity/-Infinity -- Note [negative zero] -- ~~~~~~~~~~~~~~~~~~~~ -- Avoid (0 / -d), otherwise 0/(-1) reduces to -- zero, but we might want to preserve the negative zero here which -- is representable in Float/Double but not in (normalised) -- Rational. (#3676) Perhaps we should generate (0 :% (-1)) instead? strengthReduction :: Literal -> PrimOp -> RuleM CoreExpr strengthReduction two_lit add_op = do -- Note [Strength reduction] arg <- msum [ do [arg, Lit mult_lit] <- getArgs guard (mult_lit == two_lit) return arg , do [Lit mult_lit, arg] <- getArgs guard (mult_lit == two_lit) return arg ] return $ Var (primOpId add_op) `App` arg `App` arg -- Note [Strength reduction] -- ~~~~~~~~~~~~~~~~~~~~~~~~~ -- This rule turns floating point multiplications of the form 2.0 * x and -- x * 2.0 into x + x addition, because addition costs less than multiplication. -- See #7116 -- Note [What's true and false] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- trueValInt and falseValInt represent true and false values returned by -- comparison primops for Char, Int, Word, Integer, Double, Float and Addr. -- True is represented as an unboxed 1# literal, while false is represented -- as 0# literal. -- We still need Bool data constructors (True and False) to use in a rule -- for constant folding of equal Strings trueValInt, falseValInt :: Platform -> Expr CoreBndr trueValInt platform = Lit $ onei platform -- see Note [What's true and false] falseValInt platform = Lit $ zeroi platform trueValBool, falseValBool :: Expr CoreBndr trueValBool = Var trueDataConId -- see Note [What's true and false] falseValBool = Var falseDataConId ltVal, eqVal, gtVal :: Expr CoreBndr ltVal = Var ordLTDataConId eqVal = Var ordEQDataConId gtVal = Var ordGTDataConId mkIntVal :: Platform -> Integer -> Expr CoreBndr mkIntVal platform i = Lit (mkLitInt platform i) mkFloatVal :: RuleOpts -> Rational -> Expr CoreBndr mkFloatVal env f = Lit (convFloating env (LitFloat f)) mkDoubleVal :: RuleOpts -> Rational -> Expr CoreBndr mkDoubleVal env d = Lit (convFloating env (LitDouble d)) matchPrimOpId :: PrimOp -> Id -> RuleM () matchPrimOpId op id = do op' <- liftMaybe $ isPrimOpId_maybe id guard $ op == op' {- ************************************************************************ * * \subsection{Special rules for seq, tagToEnum, dataToTag} * * ************************************************************************ Note [tagToEnum#] ~~~~~~~~~~~~~~~~~ Nasty check to ensure that tagToEnum# is applied to a type that is an enumeration TyCon. Unification may refine the type later, but this check won't see that, alas. It's crude but it works. Here's are two cases that should fail f :: forall a. a f = tagToEnum# 0 -- Can't do tagToEnum# at a type variable g :: Int g = tagToEnum# 0 -- Int is not an enumeration We used to make this check in the type inference engine, but it's quite ugly to do so, because the delayed constraint solving means that we don't really know what's going on until the end. It's very much a corner case because we don't expect the user to call tagToEnum# at all; we merely generate calls in derived instances of Enum. So we compromise: a rewrite rule rewrites a bad instance of tagToEnum# to an error call, and emits a warning. -} tagToEnumRule :: RuleM CoreExpr -- If data T a = A | B | C -- then tagToEnum# (T ty) 2# --> B ty tagToEnumRule = do [Type ty, Lit (LitNumber LitNumInt i)] <- getArgs case splitTyConApp_maybe ty of Just (tycon, tc_args) | isEnumerationTyCon tycon -> do let tag = fromInteger i correct_tag dc = (dataConTagZ dc) == tag (dc:rest) <- return $ filter correct_tag (tyConDataCons_maybe tycon `orElse` []) massert (null rest) return $ mkTyApps (Var (dataConWorkId dc)) tc_args -- See Note [tagToEnum#] _ -> warnPprTrace True "tagToEnum# on non-enumeration type" (ppr ty) $ return $ mkImpossibleExpr ty "tagToEnum# on non-enumeration type" ------------------------------ dataToTagRule :: RuleM CoreExpr -- Used for both dataToTagSmall# and dataToTagLarge#. -- See Note [DataToTag overview] in GHC.Tc.Instance.Class, -- particularly wrinkle DTW5. dataToTagRule = a `mplus` b where -- dataToTag (tagToEnum x) ==> x a = do [Type _lev, Type ty1, Var tag_to_enum `App` Type ty2 `App` tag] <- getArgs guard $ tag_to_enum `hasKey` tagToEnumKey guard $ ty1 `eqType` ty2 return tag -- dataToTag (K e1 e2) ==> tag-of K -- This also works (via exprIsConApp_maybe) for -- dataToTag x -- where x's unfolding is a constructor application b = do platform <- getPlatform [_lev, _ty, val_arg] <- getArgs in_scope <- getInScopeEnv (_,floats, dc,_,_) <- liftMaybe $ exprIsConApp_maybe in_scope val_arg massert (not (isNewTyCon (dataConTyCon dc))) return $ wrapFloats floats (mkIntVal platform (toInteger (dataConTagZ dc))) {- ********************************************************************* * * unsafeEqualityProof * * ********************************************************************* -} -- unsafeEqualityProof k t t ==> UnsafeRefl (Refl t) -- That is, if the two types are equal, it's not unsafe! unsafeEqualityProofRule :: RuleM CoreExpr unsafeEqualityProofRule = do { [Type rep, Type t1, Type t2] <- getArgs ; guard (t1 `eqType` t2) ; fn <- getFunction ; let (_, ue) = splitForAllTyCoVars (idType fn) tc = tyConAppTyCon ue -- tycon: UnsafeEquality (dc:_) = tyConDataCons tc -- data con: UnsafeRefl -- UnsafeRefl :: forall (r :: RuntimeRep) (a :: TYPE r). -- UnsafeEquality r a a ; return (mkTyApps (Var (dataConWrapId dc)) [rep, t1]) } {- ********************************************************************* * * Rules for spark# * * ********************************************************************* -} -- spark# :: forall a s . a -> State# s -> (# State# s, a #) sparkRule :: RuleM CoreExpr sparkRule = do -- reduce on HNF [Type _ty_a, Type _ty_s, a, s] <- getArgs guard $ exprIsHNF a return $ mkCoreUnboxedTuple [s, a] -- XXX perhaps we shouldn't do this, because a spark eliminated by -- this rule won't be counted as a dud at runtime? {- ************************************************************************ * * \subsection{Built in rules} * * ************************************************************************ Note [Scoping for Builtin rules] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When compiling a (base-package) module that defines one of the functions mentioned in the RHS of a built-in rule, there's a danger that we'll see f = ...(eq String x).... ....and lower down... eqString = ... Then a rewrite would give f = ...(eqString x)... ....and lower down... eqString = ... and lo, eqString is not in scope. This only really matters when we get to code generation. But the occurrence analyser does a GlomBinds step when necessary, that does a new SCC analysis on the whole set of bindings (see occurAnalysePgm), which sorts out the dependency, so all is fine. -} builtinRules :: [CoreRule] -- Rules for non-primops that can't be expressed using a RULE pragma builtinRules = [BuiltinRule { ru_name = fsLit "CStringFoldrLit", ru_fn = unpackCStringFoldrName, ru_nargs = 4, ru_try = match_cstring_foldr_lit_C }, BuiltinRule { ru_name = fsLit "CStringFoldrLitUtf8", ru_fn = unpackCStringFoldrUtf8Name, ru_nargs = 4, ru_try = match_cstring_foldr_lit_utf8 }, BuiltinRule { ru_name = fsLit "CStringAppendLit", ru_fn = unpackCStringAppendName, ru_nargs = 2, ru_try = match_cstring_append_lit_C }, BuiltinRule { ru_name = fsLit "CStringAppendLitUtf8", ru_fn = unpackCStringAppendUtf8Name, ru_nargs = 2, ru_try = match_cstring_append_lit_utf8 }, BuiltinRule { ru_name = fsLit "EqString", ru_fn = eqStringName, ru_nargs = 2, ru_try = match_eq_string }, BuiltinRule { ru_name = fsLit "CStringLength", ru_fn = cstringLengthName, ru_nargs = 1, ru_try = match_cstring_length }, BuiltinRule { ru_name = fsLit "Inline", ru_fn = inlineIdName, ru_nargs = 2, ru_try = \_ _ _ -> match_inline }, mkBasicRule unsafeEqualityProofName 3 unsafeEqualityProofRule, mkBasicRule divIntName 2 $ msum [ nonZeroLit 1 >> binaryLit (intOp2 div) , leftZero , do [arg, Lit (LitNumber LitNumInt d)] <- getArgs Just n <- return $ exactLog2 d platform <- getPlatform return $ Var (primOpId IntSraOp) `App` arg `App` mkIntVal platform n ], mkBasicRule modIntName 2 $ msum [ nonZeroLit 1 >> binaryLit (intOp2 mod) , leftZero , do [arg, Lit (LitNumber LitNumInt d)] <- getArgs Just _ <- return $ exactLog2 d platform <- getPlatform return $ Var (primOpId IntAndOp) `App` arg `App` mkIntVal platform (d - 1) ] ] ++ builtinBignumRules {-# NOINLINE builtinRules #-} -- there is no benefit to inlining these yet, despite this, GHC produces -- unfoldings for this regardless since the floated list entries look small. builtinBignumRules :: [CoreRule] builtinBignumRules = [ -- conversions lit_to_integer "Word# -> Integer" integerFromWordName , lit_to_integer "Int64# -> Integer" integerFromInt64Name , lit_to_integer "Word64# -> Integer" integerFromWord64Name , lit_to_integer "Natural -> Integer" integerFromNaturalName , integer_to_lit "Integer -> Word# (wrap)" integerToWordName mkWordLitWrap , integer_to_lit "Integer -> Int# (wrap)" integerToIntName mkIntLitWrap , integer_to_lit "Integer -> Word64# (wrap)" integerToWord64Name (\_ -> mkWord64LitWord64 . fromInteger) , integer_to_lit "Integer -> Int64# (wrap)" integerToInt64Name (\_ -> mkInt64LitInt64 . fromInteger) , integer_to_lit "Integer -> Float#" integerToFloatName (\_ -> mkFloatLitFloat . fromInteger) , integer_to_lit "Integer -> Double#" integerToDoubleName (\_ -> mkDoubleLitDouble . fromInteger) , integer_to_natural "Integer -> Natural (clamp)" integerToNaturalClampName False True , integer_to_natural "Integer -> Natural (wrap)" integerToNaturalName False False , integer_to_natural "Integer -> Natural (throw)" integerToNaturalThrowName True False , natural_to_word "Natural -> Word# (wrap)" naturalToWordName -- comparisons (return an unlifted Int#) , bignum_bin_pred "bigNatEq#" bignatEqName (==) -- comparisons (return an Ordering) , bignum_compare "bignatCompare" bignatCompareName , bignum_compare "bignatCompareWord#" bignatCompareWordName -- binary operations , integer_binop "integerAdd" integerAddName (+) , integer_binop "integerSub" integerSubName (-) , integer_binop "integerMul" integerMulName (*) , integer_binop "integerGcd" integerGcdName gcd , integer_binop "integerLcm" integerLcmName lcm , integer_binop "integerAnd" integerAndName (.&.) , integer_binop "integerOr" integerOrName (.|.) , integer_binop "integerXor" integerXorName xor , natural_binop "naturalAdd" naturalAddName (+) , natural_binop "naturalMul" naturalMulName (*) , natural_binop "naturalGcd" naturalGcdName gcd , natural_binop "naturalLcm" naturalLcmName lcm , natural_binop "naturalAnd" naturalAndName (.&.) , natural_binop "naturalOr" naturalOrName (.|.) , natural_binop "naturalXor" naturalXorName xor -- Natural subtraction: it's a binop but it can fail because of underflow so -- we have several primitives to handle here. , natural_sub "naturalSubUnsafe" naturalSubUnsafeName , natural_sub "naturalSubThrow" naturalSubThrowName , mkRule "naturalSub" naturalSubName 2 $ do [a0,a1] <- getArgs x <- isNaturalLiteral a0 y <- isNaturalLiteral a1 -- return an unboxed sum: (# (# #) | Natural #) let ret n v = pure $ mkCoreUnboxedSum 2 n [unboxedUnitTy,naturalTy] v platform <- getPlatform if x < y then ret 1 unboxedUnitExpr else ret 2 $ mkNaturalExpr platform (x - y) -- unary operations , bignum_unop "integerNegate" integerNegateName mkIntegerExpr negate , bignum_unop "integerAbs" integerAbsName mkIntegerExpr abs , bignum_unop "integerComplement" integerComplementName mkIntegerExpr complement , bignum_popcount "integerPopCount" integerPopCountName mkLitIntWrap , bignum_popcount "naturalPopCount" naturalPopCountName mkLitWordWrap -- Bits.bit , bignum_bit "integerBit" integerBitName mkIntegerExpr , bignum_bit "naturalBit" naturalBitName mkNaturalExpr -- Bits.testBit , bignum_testbit "integerTestBit" integerTestBitName , bignum_testbit "naturalTestBit" naturalTestBitName -- Bits.shift , bignum_shift "integerShiftL" integerShiftLName shiftL mkIntegerExpr , bignum_shift "integerShiftR" integerShiftRName shiftR mkIntegerExpr , bignum_shift "naturalShiftL" naturalShiftLName shiftL mkNaturalExpr , bignum_shift "naturalShiftR" naturalShiftRName shiftR mkNaturalExpr -- division , divop_one "integerQuot" integerQuotName quot mkIntegerExpr , divop_one "integerRem" integerRemName rem mkIntegerExpr , divop_one "integerDiv" integerDivName div mkIntegerExpr , divop_one "integerMod" integerModName mod mkIntegerExpr , divop_both "integerDivMod" integerDivModName divMod mkIntegerExpr , divop_both "integerQuotRem" integerQuotRemName quotRem mkIntegerExpr , divop_one "naturalQuot" naturalQuotName quot mkNaturalExpr , divop_one "naturalRem" naturalRemName rem mkNaturalExpr , divop_both "naturalQuotRem" naturalQuotRemName quotRem mkNaturalExpr -- conversions from Rational for Float/Double literals , rational_to "rationalToFloat" rationalToFloatName mkFloatExpr , rational_to "rationalToDouble" rationalToDoubleName mkDoubleExpr -- conversions from Integer for Float/Double literals , integer_encode_float "integerEncodeFloat" integerEncodeFloatName mkFloatLitFloat , integer_encode_float "integerEncodeDouble" integerEncodeDoubleName mkDoubleLitDouble ] where mkRule str name nargs f = BuiltinRule { ru_name = fsLit str , ru_fn = name , ru_nargs = nargs , ru_try = runRuleM $ do env <- getRuleOpts guard (roBignumRules env) f } integer_to_lit str name convert = mkRule str name 1 $ do [a0] <- getArgs platform <- getPlatform -- we only match on Big Integer literals. Small literals -- are matched by the "Int# -> Integer -> *" rules x <- isBigIntegerLiteral a0 pure (convert platform x) natural_to_word str name = mkRule str name 1 $ do [a0] <- getArgs n <- isNaturalLiteral a0 platform <- getPlatform pure (Lit (mkLitWordWrap platform n)) integer_to_natural str name thrw clamp = mkRule str name 1 $ do [a0] <- getArgs x <- isIntegerLiteral a0 platform <- getPlatform if | x >= 0 -> pure $ mkNaturalExpr platform x | thrw -> mzero | clamp -> pure $ mkNaturalExpr platform 0 -- clamp to 0 | otherwise -> pure $ mkNaturalExpr platform (abs x) -- negate/wrap lit_to_integer str name = mkRule str name 1 $ do [a0] <- getArgs platform <- getPlatform i <- isBignumLiteral a0 -- convert any numeric literal into an Integer literal pure (mkIntegerExpr platform i) integer_binop str name op = mkRule str name 2 $ do [a0,a1] <- getArgs x <- isIntegerLiteral a0 y <- isIntegerLiteral a1 platform <- getPlatform pure (mkIntegerExpr platform (x `op` y)) natural_binop str name op = mkRule str name 2 $ do [a0,a1] <- getArgs x <- isNaturalLiteral a0 y <- isNaturalLiteral a1 platform <- getPlatform pure (mkNaturalExpr platform (x `op` y)) natural_sub str name = mkRule str name 2 $ do [a0,a1] <- getArgs x <- isNaturalLiteral a0 y <- isNaturalLiteral a1 guard (x >= y) platform <- getPlatform pure (mkNaturalExpr platform (x - y)) bignum_bin_pred str name op = mkRule str name 2 $ do platform <- getPlatform [a0,a1] <- getArgs x <- isBignumLiteral a0 y <- isBignumLiteral a1 pure $ if x `op` y then trueValInt platform else falseValInt platform bignum_compare str name = mkRule str name 2 $ do [a0,a1] <- getArgs x <- isBignumLiteral a0 y <- isBignumLiteral a1 pure $ case x `compare` y of LT -> ltVal EQ -> eqVal GT -> gtVal bignum_unop str name mk_lit op = mkRule str name 1 $ do [a0] <- getArgs x <- isBignumLiteral a0 platform <- getPlatform pure $ mk_lit platform (op x) bignum_popcount str name mk_lit = mkRule str name 1 $ do platform <- getPlatform -- We use a host Int to compute the popCount. If we compile on a 32-bit -- host for a 64-bit target, the result may be different than if computed -- by the target. So we disable this rule if sizes don't match. guard (platformWordSizeInBits platform <= finiteBitSize (0 :: Word)) [a0] <- getArgs x <- isBignumLiteral a0 pure $ Lit (mk_lit platform (fromIntegral (popCount x))) bignum_bit str name mk_lit = mkRule str name 1 $ do [a0] <- getArgs platform <- getPlatform n <- isNumberLiteral a0 -- Make sure n is positive and small enough to yield a decently -- small number. Attempting to construct the Integer for -- (integerBit 9223372036854775807#) -- would be a bad idea (#14959) guard (n >= 0 && n <= fromIntegral (platformWordSizeInBits platform)) -- it's safe to convert a target Int value into a host Int value -- to perform the "bit" operation because n is very small (<= 64). pure $ mk_lit platform (bit (fromIntegral n)) bignum_testbit str name = mkRule str name 2 $ do [a0,a1] <- getArgs platform <- getPlatform x <- isBignumLiteral a0 n <- isNumberLiteral a1 -- ensure that we can store 'n' in a host Int guard (n >= 0 && n <= fromIntegral (maxBound :: Int)) pure $ if testBit x (fromIntegral n) then trueValInt platform else falseValInt platform bignum_shift str name shift_op mk_lit = mkRule str name 2 $ do [a0,a1] <- getArgs x <- isBignumLiteral a0 n <- isNumberLiteral a1 -- See Note [Guarding against silly shifts] -- Restrict constant-folding of shifts on Integers, somewhat arbitrary. -- We can get huge shifts in inaccessible code (#15673) guard (n <= 4) platform <- getPlatform pure $ mk_lit platform (x `shift_op` fromIntegral n) divop_one str name divop mk_lit = mkRule str name 2 $ do [a0,a1] <- getArgs n <- isBignumLiteral a0 d <- isBignumLiteral a1 guard (d /= 0) platform <- getPlatform pure $ mk_lit platform (n `divop` d) divop_both str name divop mk_lit = mkRule str name 2 $ do [a0,a1] <- getArgs n <- isBignumLiteral a0 d <- isBignumLiteral a1 guard (d /= 0) let (r,s) = n `divop` d platform <- getPlatform pure $ mkCoreUnboxedTuple [mk_lit platform r, mk_lit platform s] integer_encode_float :: RealFloat a => String -> Name -> (a -> CoreExpr) -> CoreRule integer_encode_float str name mk_lit = mkRule str name 2 $ do [a0,a1] <- getArgs x <- isIntegerLiteral a0 y <- isNumberLiteral a1 -- check that y (a target Int) is in the host Int range guard (y <= fromIntegral (maxBound :: Int)) pure (mk_lit $ encodeFloat x (fromInteger y)) rational_to :: RealFloat a => String -> Name -> (a -> CoreExpr) -> CoreRule rational_to str name mk_lit = mkRule str name 2 $ do -- This turns `rationalToFloat n d` where `n` and `d` are literals into -- a literal Float (and similarly for Double). [a0,a1] <- getArgs n <- isIntegerLiteral a0 d <- isIntegerLiteral a1 -- it's important to not match d == 0, because that may represent a -- literal "0/0" or similar, and we can't produce a literal value for -- NaN or +-Inf guard (d /= 0) pure $ mk_lit (fromRational (n % d)) --------------------------------------------------- -- The rules are: -- unpackAppendCString*# "foo"# (unpackCString*# "baz"#) -- = unpackCString*# "foobaz"# -- -- unpackAppendCString*# "foo"# (unpackAppendCString*# "baz"# e) -- = unpackAppendCString*# "foobaz"# e -- -- CString version match_cstring_append_lit_C :: RuleFun match_cstring_append_lit_C = match_cstring_append_lit unpackCStringAppendIdKey unpackCStringIdKey -- CStringUTF8 version match_cstring_append_lit_utf8 :: RuleFun match_cstring_append_lit_utf8 = match_cstring_append_lit unpackCStringAppendUtf8IdKey unpackCStringUtf8IdKey {-# INLINE match_cstring_append_lit #-} match_cstring_append_lit :: Unique -> Unique -> RuleFun match_cstring_append_lit append_key unpack_key _ env _ [lit1, e2] | Just (LitString s1) <- exprIsLiteral_maybe env lit1 , (strTicks, Var unpk `App` lit2) <- stripStrTopTicks env e2 , unpk `hasKey` unpack_key , Just (LitString s2) <- exprIsLiteral_maybe env lit2 = Just $ mkTicks strTicks $ Var unpk `App` Lit (LitString (s1 `BS.append` s2)) | Just (LitString s1) <- exprIsLiteral_maybe env lit1 , (strTicks, Var appnd `App` lit2 `App` e) <- stripStrTopTicks env e2 , appnd `hasKey` append_key , Just (LitString s2) <- exprIsLiteral_maybe env lit2 = Just $ mkTicks strTicks $ Var appnd `App` Lit (LitString (s1 `BS.append` s2)) `App` e match_cstring_append_lit _ _ _ _ _ _ = Nothing --------------------------------------------------- -- The rule is this: -- unpackFoldrCString*# "foo"# c (unpackFoldrCString*# "baz"# c n) -- = unpackFoldrCString*# "foobaz"# c n -- -- See also Note [String literals in GHC] in CString.hs -- CString version match_cstring_foldr_lit_C :: RuleFun match_cstring_foldr_lit_C = match_cstring_foldr_lit unpackCStringFoldrIdKey -- CStringUTF8 version match_cstring_foldr_lit_utf8 :: RuleFun match_cstring_foldr_lit_utf8 = match_cstring_foldr_lit unpackCStringFoldrUtf8IdKey {-# INLINE match_cstring_foldr_lit #-} match_cstring_foldr_lit :: Unique -> RuleFun match_cstring_foldr_lit foldVariant _ env _ [ Type ty1 , lit1 , c1 , e2 ] | (strTicks, Var unpk `App` Type ty2 `App` lit2 `App` c2 `App` n) <- stripStrTopTicks env e2 , unpk `hasKey` foldVariant , Just (LitString s1) <- exprIsLiteral_maybe env lit1 , Just (LitString s2) <- exprIsLiteral_maybe env lit2 , eqCoreExpr c1 c2 , (c1Ticks, c1') <- stripStrTopTicks env c1 , c2Ticks <- stripStrTopTicksT c2 = assert (ty1 `eqType` ty2) $ Just $ mkTicks strTicks $ Var unpk `App` Type ty1 `App` Lit (LitString (s1 `BS.append` s2)) `App` mkTicks (c1Ticks ++ c2Ticks) c1' `App` n match_cstring_foldr_lit _ _ _ _ _ = Nothing -- N.B. Ensure that we strip off any ticks (e.g. source notes) from the -- argument, lest this may fail to fire when building with -g3. See #16740. -- -- Also, look into variable's unfolding just in case the expression we look for -- is in a top-level thunk. stripStrTopTicks :: InScopeEnv -> CoreExpr -> ([CoreTickish], CoreExpr) stripStrTopTicks (ISE _ id_unf) e = case e of Var v | Just rhs <- expandUnfolding_maybe (id_unf v) -> stripTicksTop tickishFloatable rhs _ -> stripTicksTop tickishFloatable e stripStrTopTicksT :: CoreExpr -> [CoreTickish] stripStrTopTicksT e = stripTicksTopT tickishFloatable e --------------------------------------------------- -- The rule is this: -- eqString (unpackCString# (Lit s1)) (unpackCString# (Lit s2)) = s1==s2 -- Also matches unpackCStringUtf8# match_eq_string :: RuleFun match_eq_string _ env _ [e1, e2] | (ticks1, Var unpk1 `App` lit1) <- stripStrTopTicks env e1 , (ticks2, Var unpk2 `App` lit2) <- stripStrTopTicks env e2 , unpk_key1 <- getUnique unpk1 , unpk_key2 <- getUnique unpk2 , unpk_key1 == unpk_key2 -- For now we insist the literals have to agree in their encoding -- to keep the rule simple. But we could check if the decoded strings -- compare equal in here as well. , unpk_key1 `elem` [unpackCStringUtf8IdKey, unpackCStringIdKey] , Just (LitString s1) <- exprIsLiteral_maybe env lit1 , Just (LitString s2) <- exprIsLiteral_maybe env lit2 = Just $ mkTicks (ticks1 ++ ticks2) $ (if s1 == s2 then trueValBool else falseValBool) match_eq_string _ _ _ _ = Nothing ----------------------------------------------------------------------- -- Illustration of this rule: -- -- cstringLength# "foobar"# --> 6 -- cstringLength# "fizz\NULzz"# --> 4 -- -- Nota bene: Addr# literals are suffixed by a NUL byte when they are -- compiled to read-only data sections. That's why cstringLength# is -- well defined on Addr# literals that do not explicitly have an embedded -- NUL byte. -- -- See GHC issue #5218, MR 2165, and bytestring PR 191. This is particularly -- helpful when using OverloadedStrings to create a ByteString since the -- function computing the length of such ByteStrings can often be constant -- folded. match_cstring_length :: RuleFun match_cstring_length rule_env env _ [lit1] | Just (LitString str) <- exprIsLiteral_maybe env lit1 -- If elemIndex returns Just, it has the index of the first embedded NUL -- in the string. If no NUL bytes are present (the common case) then use -- full length of the byte string. = let len = fromMaybe (BS.length str) (BS.elemIndex 0 str) in Just (Lit (mkLitInt (roPlatform rule_env) (fromIntegral len))) match_cstring_length _ _ _ _ = Nothing {- Note [inlineId magic] ~~~~~~~~~~~~~~~~~~~~~~~~ The call 'inline f' arranges that 'f' is inlined, regardless of its size. More precisely, the call 'inline f' rewrites to the right-hand side of 'f's definition. This allows the programmer to control inlining from a particular call site rather than the definition site of the function. The moving parts are simple: * A very simple definition in the library base:GHC.Magic {-# NOINLINE[0] inline #-} inline :: a -> a inline x = x So in phase 0, 'inline' will be inlined, so its use imposes no overhead. * A rewrite rule, in GHC.Core.Opt.ConstantFold, which makes (inline f) inline, implemented by match_inline. The rule for the 'inline' function is this: inline f_ty (f a b c) = a b c (if f has an unfolding, EVEN if it's a loop breaker) Additionally the rule looks through ticks/casts as well (#24808): inline f_ty (f a b c |> co) = a b c |> co inline f_ty ( f a b c ) = a b c It's important to allow the argument to 'inline' to have args itself (a) because its more forgiving to allow the programmer to write either inline f a b c or inline (f a b c) (b) because a polymorphic f wll get a type argument that the programmer can't avoid, so the call may look like inline (map @Int @Bool) g xs Also, don't forget about 'inline's type argument! -} match_inline :: [Expr CoreBndr] -> Maybe (Expr CoreBndr) match_inline (Type _ : e : _) = go e -- Maybe Monad ahead: where go (Var f) = -- Ignore the IdUnfoldingFun here! (maybeUnfoldingTemplate (realIdUnfolding f)) go (App f a) = do { f' <- go f; pure $ App f' a } -- inline (f |> co) go (Cast e co) = do { app <- go e; pure (Cast app co) } -- inline ( f) go (Tick t e) = do { app <- go e; pure (Tick t app) } go _ = Nothing match_inline _ = Nothing -------------------------------------------------------- -- Note [Constant folding through nested expressions] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- -- We use rewrites rules to perform constant folding. It means that we don't -- have a global view of the expression we are trying to optimise. As a -- consequence we only perform local (small-step) transformations that either: -- 1) reduce the number of operations -- 2) rearrange the expression to increase the odds that other rules will -- match -- -- We don't try to handle more complex expression optimisation cases that would -- require a global view. For example, rewriting expressions to increase -- sharing (e.g., Horner's method); optimisations that require local -- transformations increasing the number of operations; rearrangements to -- cancel/factorize terms (e.g., (a+b-a-b) isn't rearranged to reduce to 0). -- -- We already have rules to perform constant folding on expressions with the -- following shape (where a and/or b are literals): -- -- D) op -- /\ -- / \ -- / \ -- a b -- -- To support nested expressions, we match three other shapes of expression -- trees: -- -- A) op1 B) op1 C) op1 -- /\ /\ /\ -- / \ / \ / \ -- / \ / \ / \ -- a op2 op2 c op2 op3 -- /\ /\ /\ /\ -- / \ / \ / \ / \ -- b c a b a b c d -- -- -- R1) +/- simplification: -- ops = + or -, two literals (not siblings) -- -- Examples: -- A: 5 + (10-x) ==> 15-x -- B: (10+x) + 5 ==> 15+x -- C: (5+a)-(5-b) ==> 0+(a+b) -- -- R2) *, `and`, `or` simplification -- ops = *, `and`, `or` two literals (not siblings) -- -- Examples: -- A: 5 * (10*x) ==> 50*x -- B: (10*x) * 5 ==> 50*x -- C: (5*a)*(5*b) ==> 25*(a*b) -- -- R3) * distribution over +/- -- op1 = *, op2 = + or -, two literals (not siblings) -- -- This transformation doesn't reduce the number of operations but switches -- the outer and the inner operations so that the outer is (+) or (-) instead -- of (*). It increases the odds that other rules will match after this one. -- -- Examples: -- A: 5 * (10-x) ==> 50 - (5*x) -- B: (10+x) * 5 ==> 50 + (5*x) -- C: Not supported as it would increase the number of operations: -- (5+a)*(5-b) ==> 25 - 5*b + 5*a - a*b -- -- R4) Simple factorization -- -- op1 = + or -, op2/op3 = *, -- one literal for each innermost * operation (except in the D case), -- the two other terms are equals -- -- Examples: -- A: x - (10*x) ==> (-9)*x -- B: (10*x) + x ==> 11*x -- C: (5*x)-(x*3) ==> 2*x -- D: x+x ==> 2*x -- -- R5) +/- propagation -- -- ops = + or -, one literal -- -- This transformation doesn't reduce the number of operations but propagates -- the constant to the outer level. It increases the odds that other rules -- will match after this one. -- -- Examples: -- A: x - (10-y) ==> (x+y) - 10 -- B: (10+x) - y ==> 10 + (x-y) -- C: N/A (caught by the A and B cases) -- -------------------------------------------------------- -- Rules to perform constant folding into nested expressions -- --See Note [Constant folding through nested expressions] addFoldingRules :: PrimOp -> NumOps -> RuleM CoreExpr addFoldingRules op num_ops = do massert (op == numAdd num_ops) env <- getRuleOpts guard (roNumConstantFolding env) [arg1,arg2] <- getArgs platform <- getPlatform liftMaybe -- commutativity for + is handled here (addFoldingRules' platform arg1 arg2 num_ops <|> addFoldingRules' platform arg2 arg1 num_ops) subFoldingRules :: PrimOp -> NumOps -> RuleM CoreExpr subFoldingRules op num_ops = do massert (op == numSub num_ops) env <- getRuleOpts guard (roNumConstantFolding env) [arg1,arg2] <- getArgs platform <- getPlatform liftMaybe (subFoldingRules' platform arg1 arg2 num_ops) mulFoldingRules :: PrimOp -> NumOps -> RuleM CoreExpr mulFoldingRules op num_ops = do massert (op == numMul num_ops) env <- getRuleOpts guard (roNumConstantFolding env) [arg1,arg2] <- getArgs platform <- getPlatform liftMaybe -- commutativity for * is handled here (mulFoldingRules' platform arg1 arg2 num_ops <|> mulFoldingRules' platform arg2 arg1 num_ops) andFoldingRules :: NumOps -> RuleM CoreExpr andFoldingRules num_ops = do env <- getRuleOpts guard (roNumConstantFolding env) [arg1,arg2] <- getArgs platform <- getPlatform liftMaybe -- commutativity for `and` is handled here (andFoldingRules' platform arg1 arg2 num_ops <|> andFoldingRules' platform arg2 arg1 num_ops) orFoldingRules :: NumOps -> RuleM CoreExpr orFoldingRules num_ops = do env <- getRuleOpts guard (roNumConstantFolding env) [arg1,arg2] <- getArgs platform <- getPlatform liftMaybe -- commutativity for `or` is handled here (orFoldingRules' platform arg1 arg2 num_ops <|> orFoldingRules' platform arg2 arg1 num_ops) quotFoldingRules :: NumOps -> RuleM CoreExpr quotFoldingRules num_ops = do env <- getRuleOpts guard (roNumConstantFolding env) [arg1,arg2] <- getArgs platform <- getPlatform liftMaybe (quotFoldingRules' platform arg1 arg2 num_ops) addFoldingRules' :: Platform -> CoreExpr -> CoreExpr -> NumOps -> Maybe CoreExpr addFoldingRules' platform arg1 arg2 num_ops = case (arg1, arg2) of -- x + (-y) ==> x-y (x, is_neg num_ops -> Just y) -> Just (x `sub` y) -- R1) +/- simplification -- l1 + (l2 + x) ==> (l1+l2) + x (L l1, is_lit_add num_ops -> Just (l2,x)) -> Just (mkL (l1+l2) `add` x) -- l1 + (l2 - x) ==> (l1+l2) - x (L l1, is_sub num_ops -> Just (L l2,x)) -> Just (mkL (l1+l2) `sub` x) -- l1 + (x - l2) ==> (l1-l2) + x (L l1, is_sub num_ops -> Just (x,L l2)) -> Just (mkL (l1-l2) `add` x) -- (l1 + x) + (l2 + y) ==> (l1+l2) + (x+y) (is_lit_add num_ops -> Just (l1,x), is_lit_add num_ops -> Just (l2,y)) -> Just (mkL (l1+l2) `add` (x `add` y)) -- (l1 + x) + (l2 - y) ==> (l1+l2) + (x-y) (is_lit_add num_ops -> Just (l1,x), is_sub num_ops -> Just (L l2,y)) -> Just (mkL (l1+l2) `add` (x `sub` y)) -- (l1 + x) + (y - l2) ==> (l1-l2) + (x+y) (is_lit_add num_ops -> Just (l1,x), is_sub num_ops -> Just (y,L l2)) -> Just (mkL (l1-l2) `add` (x `add` y)) -- (l1 - x) + (l2 - y) ==> (l1+l2) - (x+y) (is_sub num_ops -> Just (L l1,x), is_sub num_ops -> Just (L l2,y)) -> Just (mkL (l1+l2) `sub` (x `add` y)) -- (l1 - x) + (y - l2) ==> (l1-l2) + (y-x) (is_sub num_ops -> Just (L l1,x), is_sub num_ops -> Just (y,L l2)) -> Just (mkL (l1-l2) `add` (y `sub` x)) -- (x - l1) + (y - l2) ==> (0-l1-l2) + (x+y) (is_sub num_ops -> Just (x,L l1), is_sub num_ops -> Just (y,L l2)) -> Just (mkL (0-l1-l2) `add` (x `add` y)) -- R4) Simple factorization -- x + x ==> 2 * x _ | Just l1 <- is_expr_mul num_ops arg1 arg2 -> Just (mkL (l1+1) `mul` arg1) -- (l1 * x) + x ==> (l1+1) * x _ | Just l1 <- is_expr_mul num_ops arg2 arg1 -> Just (mkL (l1+1) `mul` arg2) -- (l1 * x) + (l2 * x) ==> (l1+l2) * x (is_lit_mul num_ops -> Just (l1,x), is_expr_mul num_ops x -> Just l2) -> Just (mkL (l1+l2) `mul` x) -- R5) +/- propagation: these transformations push literals outwards -- with the hope that other rules can then be applied. -- In the following rules, x can't be a literal otherwise another -- rule would have combined it with the other literal in arg2. So we -- don't have to check this to avoid loops here. -- x + (l1 + y) ==> l1 + (x + y) (_, is_lit_add num_ops -> Just (l1,y)) -> Just (mkL l1 `add` (arg1 `add` y)) -- x + (l1 - y) ==> l1 + (x - y) (_, is_sub num_ops -> Just (L l1,y)) -> Just (mkL l1 `add` (arg1 `sub` y)) -- x + (y - l1) ==> (x + y) - l1 (_, is_sub num_ops -> Just (y,L l1)) -> Just ((arg1 `add` y) `sub` mkL l1) _ -> Nothing where mkL = Lit . mkNumLiteral platform num_ops add x y = BinOpApp x (numAdd num_ops) y sub x y = BinOpApp x (numSub num_ops) y mul x y = BinOpApp x (numMul num_ops) y subFoldingRules' :: Platform -> CoreExpr -> CoreExpr -> NumOps -> Maybe CoreExpr subFoldingRules' platform arg1 arg2 num_ops = case (arg1,arg2) of -- x - (-y) ==> x+y (x, is_neg num_ops -> Just y) -> Just (x `add` y) -- R1) +/- simplification -- l1 - (l2 + x) ==> (l1-l2) - x (L l1, is_lit_add num_ops -> Just (l2,x)) -> Just (mkL (l1-l2) `sub` x) -- l1 - (l2 - x) ==> (l1-l2) + x (L l1, is_sub num_ops -> Just (L l2,x)) -> Just (mkL (l1-l2) `add` x) -- l1 - (x - l2) ==> (l1+l2) - x (L l1, is_sub num_ops -> Just (x, L l2)) -> Just (mkL (l1+l2) `sub` x) -- (l1 + x) - l2 ==> (l1-l2) + x (is_lit_add num_ops -> Just (l1,x), L l2) -> Just (mkL (l1-l2) `add` x) -- (l1 - x) - l2 ==> (l1-l2) - x (is_sub num_ops -> Just (L l1,x), L l2) -> Just (mkL (l1-l2) `sub` x) -- (x - l1) - l2 ==> x - (l1+l2) (is_sub num_ops -> Just (x,L l1), L l2) -> Just (x `sub` mkL (l1+l2)) -- (l1 + x) - (l2 + y) ==> (l1-l2) + (x-y) (is_lit_add num_ops -> Just (l1,x), is_lit_add num_ops -> Just (l2,y)) -> Just (mkL (l1-l2) `add` (x `sub` y)) -- (l1 + x) - (l2 - y) ==> (l1-l2) + (x+y) (is_lit_add num_ops -> Just (l1,x), is_sub num_ops -> Just (L l2,y)) -> Just (mkL (l1-l2) `add` (x `add` y)) -- (l1 + x) - (y - l2) ==> (l1+l2) + (x-y) (is_lit_add num_ops -> Just (l1,x), is_sub num_ops -> Just (y,L l2)) -> Just (mkL (l1+l2) `add` (x `sub` y)) -- (l1 - x) - (l2 + y) ==> (l1-l2) - (x+y) (is_sub num_ops -> Just (L l1,x), is_lit_add num_ops -> Just (l2,y)) -> Just (mkL (l1-l2) `sub` (x `add` y)) -- (x - l1) - (l2 + y) ==> (0-l1-l2) + (x-y) (is_sub num_ops -> Just (x,L l1), is_lit_add num_ops -> Just (l2,y)) -> Just (mkL (0-l1-l2) `add` (x `sub` y)) -- (l1 - x) - (l2 - y) ==> (l1-l2) + (y-x) (is_sub num_ops -> Just (L l1,x), is_sub num_ops -> Just (L l2,y)) -> Just (mkL (l1-l2) `add` (y `sub` x)) -- (l1 - x) - (y - l2) ==> (l1+l2) - (x+y) (is_sub num_ops -> Just (L l1,x), is_sub num_ops -> Just (y,L l2)) -> Just (mkL (l1+l2) `sub` (x `add` y)) -- (x - l1) - (l2 - y) ==> (0-l1-l2) + (x+y) (is_sub num_ops -> Just (x,L l1), is_sub num_ops -> Just (L l2,y)) -> Just (mkL (0-l1-l2) `add` (x `add` y)) -- (x - l1) - (y - l2) ==> (l2-l1) + (x-y) (is_sub num_ops -> Just (x,L l1), is_sub num_ops -> Just (y,L l2)) -> Just (mkL (l2-l1) `add` (x `sub` y)) -- R4) Simple factorization -- x - (l1 * x) ==> (1-l1) * x _ | Just l1 <- is_expr_mul num_ops arg1 arg2 -> Just (mkL (1-l1) `mul` arg1) -- (l1 * x) - x ==> (l1-1) * x _ | Just l1 <- is_expr_mul num_ops arg2 arg1 -> Just (mkL (l1-1) `mul` arg2) -- (l1 * x) - (l2 * x) ==> (l1-l2) * x (is_lit_mul num_ops -> Just (l1,x), is_expr_mul num_ops x -> Just l2) -> Just (mkL (l1-l2) `mul` x) -- R5) +/- propagation: these transformations push literals outwards -- with the hope that other rules can then be applied. -- In the following rules, x can't be a literal otherwise another -- rule would have combined it with the other literal in arg2. So we -- don't have to check this to avoid loops here. -- x - (l1 + y) ==> (x - y) - l1 (_, is_lit_add num_ops -> Just (l1,y)) -> Just ((arg1 `sub` y) `sub` mkL l1) -- (l1 + x) - y ==> l1 + (x - y) (is_lit_add num_ops -> Just (l1,x), _) -> Just (mkL l1 `add` (x `sub` arg2)) -- x - (l1 - y) ==> (x + y) - l1 (_, is_sub num_ops -> Just (L l1,y)) -> Just ((arg1 `add` y) `sub` mkL l1) -- x - (y - l1) ==> l1 + (x - y) (_, is_sub num_ops -> Just (y,L l1)) -> Just (mkL l1 `add` (arg1 `sub` y)) -- (l1 - x) - y ==> l1 - (x + y) (is_sub num_ops -> Just (L l1,x), _) -> Just (mkL l1 `sub` (x `add` arg2)) -- (x - l1) - y ==> (x - y) - l1 (is_sub num_ops -> Just (x,L l1), _) -> Just ((x `sub` arg2) `sub` mkL l1) _ -> Nothing where mkL = Lit . mkNumLiteral platform num_ops add x y = BinOpApp x (numAdd num_ops) y sub x y = BinOpApp x (numSub num_ops) y mul x y = BinOpApp x (numMul num_ops) y mulFoldingRules' :: Platform -> CoreExpr -> CoreExpr -> NumOps -> Maybe CoreExpr mulFoldingRules' platform arg1 arg2 num_ops = case (arg1,arg2) of -- (-x) * (-y) ==> x*y (is_neg num_ops -> Just x, is_neg num_ops -> Just y) -> Just (x `mul` y) -- l1 * (-x) ==> (-l1) * x (L l1, is_neg num_ops -> Just x) -> Just (mkL (-l1) `mul` x) -- l1 * (l2 * x) ==> (l1*l2) * x (L l1, is_lit_mul num_ops -> Just (l2,x)) -> Just (mkL (l1*l2) `mul` x) -- l1 * (l2 + x) ==> (l1*l2) + (l1 * x) (L l1, is_lit_add num_ops -> Just (l2,x)) -> Just (mkL (l1*l2) `add` (arg1 `mul` x)) -- l1 * (l2 - x) ==> (l1*l2) - (l1 * x) (L l1, is_sub num_ops -> Just (L l2,x)) -> Just (mkL (l1*l2) `sub` (arg1 `mul` x)) -- l1 * (x - l2) ==> (l1 * x) - (l1*l2) (L l1, is_sub num_ops -> Just (x, L l2)) -> Just ((arg1 `mul` x) `sub` mkL (l1*l2)) -- (l1 * x) * (l2 * y) ==> (l1*l2) * (x * y) (is_lit_mul num_ops -> Just (l1,x), is_lit_mul num_ops -> Just (l2,y)) -> Just (mkL (l1*l2) `mul` (x `mul` y)) _ -> Nothing where mkL = Lit . mkNumLiteral platform num_ops add x y = BinOpApp x (numAdd num_ops) y sub x y = BinOpApp x (numSub num_ops) y mul x y = BinOpApp x (numMul num_ops) y andFoldingRules' :: Platform -> CoreExpr -> CoreExpr -> NumOps -> Maybe CoreExpr andFoldingRules' platform arg1 arg2 num_ops = case (arg1, arg2) of -- R2) * `or` `and` simplifications -- l1 and (l2 and x) ==> (l1 and l2) and x (L l1, is_lit_and num_ops -> Just (l2, x)) -> Just (mkL (l1 .&. l2) `and` x) -- l1 and (l2 or x) ==> (l1 and l2) or (l1 and x) -- does not decrease operations -- (l1 and x) and (l2 and y) ==> (l1 and l2) and (x and y) (is_lit_and num_ops -> Just (l1, x), is_lit_and num_ops -> Just (l2, y)) -> Just (mkL (l1 .&. l2) `and` (x `and` y)) -- (l1 and x) and (l2 or y) ==> (l1 and l2 and x) or (l1 and x and y) -- (l1 or x) and (l2 or y) ==> (l1 and l2) or (x and l2) or (l1 and y) or (x and y) -- increase operation numbers -- x and (y or ... or x or ... or z) ==> x (x, is_or_list num_ops -> Just xs) | any (cheapEqExpr x) xs -> Just x _ -> Nothing where mkL = Lit . mkNumLiteral platform num_ops and x y = BinOpApp x (fromJust (numAnd num_ops)) y orFoldingRules' :: Platform -> CoreExpr -> CoreExpr -> NumOps -> Maybe CoreExpr orFoldingRules' platform arg1 arg2 num_ops = case (arg1, arg2) of -- R2) * `or` `and` simplifications -- l1 or (l2 or x) ==> (l1 or l2) or x (L l1, is_lit_or num_ops -> Just (l2, x)) -> Just (mkL (l1 .|. l2) `or` x) -- l1 or (l2 and x) ==> (l1 or l2) and (l1 and x) -- does not decrease operations -- (l1 or x) or (l2 or y) ==> (l1 or l2) or (x or y) (is_lit_or num_ops -> Just (l1, x), is_lit_or num_ops -> Just (l2, y)) -> Just (mkL (l1 .|. l2) `or` (x `or` y)) -- (l1 and x) or (l2 or y) ==> (l1 and l2 and x) or (l1 and x and y) -- (l1 and x) or (l2 and y) ==> (l1 and l2) or (x and l2) or (l1 and y) or (x and y) -- increase operation numbers -- x or (y and ... and x and ... and z) ==> x (x, is_and_list num_ops -> Just xs) | any (cheapEqExpr x) xs -> Just x _ -> Nothing where mkL = Lit . mkNumLiteral platform num_ops or x y = BinOpApp x (fromJust (numOr num_ops)) y quotFoldingRules' :: Platform -> CoreExpr -> CoreExpr -> NumOps -> Maybe CoreExpr quotFoldingRules' platform arg1 arg2 num_ops = case (arg1, arg2) of -- (x / l1) / l2 -- l1 and l2 /= 0 -- l1*l2 doesn't overflow -- ==> x / (l1 * l2) (is_div num_ops -> Just (x, L l1), L l2) | l1 /= 0 , l2 /= 0 -- check that the result of the multiplication is in range , Just l <- mkNumLiteralMaybe platform num_ops (l1 * l2) -> Just (div x (Lit l)) -- NB: we could directly return 0 or (-1) in case of overflow, -- but we would need to know -- (1) if we're dealing with a quot or a div operation -- (2) if it's an underflow or an overflow. -- Left as future work for now. _ -> Nothing where div x y = BinOpApp x (fromJust (numDiv num_ops)) y is_binop :: PrimOp -> CoreExpr -> Maybe (Arg CoreBndr, Arg CoreBndr) is_binop op e = case e of BinOpApp x op' y | op == op' -> Just (x,y) _ -> Nothing is_op :: PrimOp -> CoreExpr -> Maybe (Arg CoreBndr) is_op op e = case e of App (OpVal op') x | op == op' -> Just x _ -> Nothing is_add, is_sub, is_mul, is_and, is_or, is_div :: NumOps -> CoreExpr -> Maybe (CoreArg, CoreArg) is_add num_ops e = is_binop (numAdd num_ops) e is_sub num_ops e = is_binop (numSub num_ops) e is_mul num_ops e = is_binop (numMul num_ops) e is_and num_ops e = numAnd num_ops >>= \op -> is_binop op e is_or num_ops e = numOr num_ops >>= \op -> is_binop op e is_div num_ops e = numDiv num_ops >>= \op -> is_binop op e is_neg :: NumOps -> CoreExpr -> Maybe (Arg CoreBndr) is_neg num_ops e = numNeg num_ops >>= \op -> is_op op e -- Return a list of operands for a given operation. -- E.e. is_and_list (a and ... and z) => [a,...,z] for any nesting of the and -- operation is_list :: (CoreExpr -> Maybe (CoreArg,CoreArg)) -> CoreExpr -> Maybe [CoreArg] is_list f e_org = case f e_org of -- do we have the operator at all? Just (a,b) -> Just (go [a,b]) Nothing -> Nothing where go = \case [] -> [] (e:es) -> case f e of -- we can't split any more: add to the result list Nothing -> e : go es Just (a,b) -> go (a:b:es) is_and_list, is_or_list :: NumOps -> CoreExpr -> Maybe [CoreArg] is_and_list ops = is_list (is_and ops) is_or_list ops = is_list (is_or ops) -- match operation with a literal (handles commutativity) is_lit_add, is_lit_mul, is_lit_and, is_lit_or :: NumOps -> CoreExpr -> Maybe (Integer, Arg CoreBndr) is_lit_add num_ops e = is_lit' is_add num_ops e is_lit_mul num_ops e = is_lit' is_mul num_ops e is_lit_and num_ops e = is_lit' is_and num_ops e is_lit_or num_ops e = is_lit' is_or num_ops e is_lit' :: (NumOps -> CoreExpr -> Maybe (Arg CoreBndr, Arg CoreBndr)) -> NumOps -> CoreExpr -> Maybe (Integer, Arg CoreBndr) is_lit' f num_ops e = case f num_ops e of Just (L l, x ) -> Just (l,x) Just (x , L l) -> Just (l,x) _ -> Nothing -- match given "x": return 1 -- match "lit * x": return lit value (handles commutativity) is_expr_mul :: NumOps -> Expr CoreBndr -> Expr CoreBndr -> Maybe Integer is_expr_mul num_ops x e = if | x `cheapEqExpr` e -> Just 1 | Just (k,x') <- is_lit_mul num_ops e , x `cheapEqExpr` x' -> return k | otherwise -> Nothing -- | Match the application of a binary primop pattern BinOpApp :: Arg CoreBndr -> PrimOp -> Arg CoreBndr -> CoreExpr pattern BinOpApp x op y = OpVal op `App` x `App` y -- | Match a primop pattern OpVal:: PrimOp -> Arg CoreBndr pattern OpVal op <- Var (isPrimOpId_maybe -> Just op) where OpVal op = Var (primOpId op) -- | Match a literal pattern L :: Integer -> Arg CoreBndr pattern L i <- Lit (LitNumber _ i) -- | Explicit "type-class"-like dictionary for numeric primops data NumOps = NumOps { numAdd :: !PrimOp -- ^ Add two numbers , numSub :: !PrimOp -- ^ Sub two numbers , numMul :: !PrimOp -- ^ Multiply two numbers , numDiv :: !(Maybe PrimOp) -- ^ Divide two numbers , numAnd :: !(Maybe PrimOp) -- ^ And two numbers , numOr :: !(Maybe PrimOp) -- ^ Or two numbers , numNeg :: !(Maybe PrimOp) -- ^ Negate a number , numLitType :: !LitNumType -- ^ Literal type } -- | Create a numeric literal mkNumLiteral :: Platform -> NumOps -> Integer -> Literal mkNumLiteral platform ops i = mkLitNumberWrap platform (numLitType ops) i -- | Create a numeric literal if it is in range mkNumLiteralMaybe :: Platform -> NumOps -> Integer -> Maybe Literal mkNumLiteralMaybe platform ops i = mkLitNumberMaybe platform (numLitType ops) i int8Ops :: NumOps int8Ops = NumOps { numAdd = Int8AddOp , numSub = Int8SubOp , numMul = Int8MulOp , numDiv = Just Int8QuotOp , numAnd = Nothing , numOr = Nothing , numNeg = Just Int8NegOp , numLitType = LitNumInt8 } word8Ops :: NumOps word8Ops = NumOps { numAdd = Word8AddOp , numSub = Word8SubOp , numMul = Word8MulOp , numDiv = Just Word8QuotOp , numAnd = Just Word8AndOp , numOr = Just Word8OrOp , numNeg = Nothing , numLitType = LitNumWord8 } int16Ops :: NumOps int16Ops = NumOps { numAdd = Int16AddOp , numSub = Int16SubOp , numMul = Int16MulOp , numDiv = Just Int16QuotOp , numAnd = Nothing , numOr = Nothing , numNeg = Just Int16NegOp , numLitType = LitNumInt16 } word16Ops :: NumOps word16Ops = NumOps { numAdd = Word16AddOp , numSub = Word16SubOp , numMul = Word16MulOp , numDiv = Just Word16QuotOp , numAnd = Just Word16AndOp , numOr = Just Word16OrOp , numNeg = Nothing , numLitType = LitNumWord16 } int32Ops :: NumOps int32Ops = NumOps { numAdd = Int32AddOp , numSub = Int32SubOp , numMul = Int32MulOp , numDiv = Just Int32QuotOp , numAnd = Nothing , numOr = Nothing , numNeg = Just Int32NegOp , numLitType = LitNumInt32 } word32Ops :: NumOps word32Ops = NumOps { numAdd = Word32AddOp , numSub = Word32SubOp , numMul = Word32MulOp , numDiv = Just Word32QuotOp , numAnd = Just Word32AndOp , numOr = Just Word32OrOp , numNeg = Nothing , numLitType = LitNumWord32 } int64Ops :: NumOps int64Ops = NumOps { numAdd = Int64AddOp , numSub = Int64SubOp , numMul = Int64MulOp , numDiv = Just Int64QuotOp , numAnd = Nothing , numOr = Nothing , numNeg = Just Int64NegOp , numLitType = LitNumInt64 } word64Ops :: NumOps word64Ops = NumOps { numAdd = Word64AddOp , numSub = Word64SubOp , numMul = Word64MulOp , numDiv = Just Word64QuotOp , numAnd = Just Word64AndOp , numOr = Just Word64OrOp , numNeg = Nothing , numLitType = LitNumWord64 } intOps :: NumOps intOps = NumOps { numAdd = IntAddOp , numSub = IntSubOp , numMul = IntMulOp , numDiv = Just IntQuotOp , numAnd = Just IntAndOp , numOr = Just IntOrOp , numNeg = Just IntNegOp , numLitType = LitNumInt } wordOps :: NumOps wordOps = NumOps { numAdd = WordAddOp , numSub = WordSubOp , numMul = WordMulOp , numDiv = Just WordQuotOp , numAnd = Just WordAndOp , numOr = Just WordOrOp , numNeg = Nothing , numLitType = LitNumWord } -------------------------------------------------------- -- Constant folding through case-expressions -- -- cf Scrutinee Constant Folding in simplCore/GHC.Core.Opt.Simplify.Utils -------------------------------------------------------- -- | Match the scrutinee of a case and potentially return a new scrutinee and a -- function to apply to each literal alternative. caseRules :: Platform -> CoreExpr -- Scrutinee -> Maybe ( CoreExpr -- New scrutinee , AltCon -> Maybe AltCon -- How to fix up the alt pattern -- Nothing <=> Unreachable -- See Note [Unreachable caseRules alternatives] , Id -> CoreExpr) -- How to reconstruct the original scrutinee -- from the new case-binder -- e.g case e of b { -- ...; -- con bs -> rhs; -- ... } -- ==> -- case e' of b' { -- ...; -- fixup_altcon[con] bs -> let b = mk_orig[b] in rhs; -- ... } caseRules platform (App (App (Var f) v) (Lit l)) -- v `op` x# | Just op <- isPrimOpId_maybe f , LitNumber _ x <- l , Just adjust_lit <- adjustDyadicRight op x = Just (v, tx_lit_con platform adjust_lit , \v -> (App (App (Var f) (Var v)) (Lit l))) caseRules platform (App (App (Var f) (Lit l)) v) -- x# `op` v | Just op <- isPrimOpId_maybe f , LitNumber _ x <- l , Just adjust_lit <- adjustDyadicLeft x op = Just (v, tx_lit_con platform adjust_lit , \v -> (App (App (Var f) (Lit l)) (Var v))) caseRules platform (App (Var f) v ) -- op v | Just op <- isPrimOpId_maybe f , Just adjust_lit <- adjustUnary op = Just (v, tx_lit_con platform adjust_lit , \v -> App (Var f) (Var v)) -- See Note [caseRules for tagToEnum] caseRules platform (App (App (Var f) type_arg) v) | Just TagToEnumOp <- isPrimOpId_maybe f = Just (v, tx_con_tte platform , \v -> (App (App (Var f) type_arg) (Var v))) -- See Note [caseRules for dataToTag] caseRules _ (Var f `App` Type lev `App` Type ty `App` v) -- dataToTag x | Just op <- isPrimOpId_maybe f , op == DataToTagSmallOp || op == DataToTagLargeOp = case splitTyConApp_maybe ty of Just (tc, _) | isValidDTT2TyCon tc -> Just (v, tx_con_dtt tc , \v' -> Var f `App` Type lev `App` Type ty `App` Var v') _ -> pprTraceUserWarning warnMsg Nothing where warnMsg = vcat $ map text [ "Found dataToTag primop applied to a non-ADT type. This could" , "be a future bug in GHC, or it may be caused by an unsupported" , "use of the ghc-internal primops dataToTagSmall# and dataToTagLarge#." , "In either case, the GHC developers would like to know about it!" , "Please report this as a GHC bug: http://www.haskell.org/ghc/reportabug" ] caseRules _ _ = Nothing -- | Case rules -- -- It's important that occurrence info are present, hence the use of In* types. caseRules2 :: InExpr -- ^ Scutinee -> InId -- ^ Case-binder -> [InAlt] -- ^ Alternatives in standard (increasing) order -> Maybe (InExpr, InId, [InAlt]) caseRules2 scrut bndr alts -- case quotRem# x y of -- (# q, _ #) -> body -- ====> -- case quot# x y of -- q -> body -- -- case quotRem# x y of -- (# _, r #) -> body -- ====> -- case rem# x y of -- r -> body | BinOpApp x op y <- scrut , Just (quot,rem) <- is_any_quot_rem op , [Alt (DataAlt _) [q,r] body] <- alts , isDeadBinder bndr , dead_q <- isDeadBinder q , dead_r <- isDeadBinder r , dead_q || dead_r = if | dead_q -> Just $ (BinOpApp x rem y, r, [Alt DEFAULT [] body]) | dead_r -> Just $ (BinOpApp x quot y, q, [Alt DEFAULT [] body]) | otherwise -> Nothing | otherwise = Nothing -- | If the given primop is a quotRem, return the corresponding (quot,rem). is_any_quot_rem :: PrimOp -> Maybe (PrimOp, PrimOp) is_any_quot_rem = \case IntQuotRemOp -> Just (IntQuotOp , IntRemOp) Int8QuotRemOp -> Just (Int8QuotOp, Int8RemOp) Int16QuotRemOp -> Just (Int16QuotOp, Int16RemOp) Int32QuotRemOp -> Just (Int32QuotOp, Int32RemOp) -- Int64QuotRemOp doesn't exist (yet) WordQuotRemOp -> Just (WordQuotOp, WordRemOp) Word8QuotRemOp -> Just (Word8QuotOp, Word8RemOp) Word16QuotRemOp -> Just (Word16QuotOp, Word16RemOp) Word32QuotRemOp -> Just (Word32QuotOp, Word32RemOp) -- Word64QuotRemOp doesn't exist (yet) _ -> Nothing tx_lit_con :: Platform -> (Integer -> Integer) -> AltCon -> Maybe AltCon tx_lit_con _ _ DEFAULT = Just DEFAULT tx_lit_con platform adjust (LitAlt l) = Just $ LitAlt (mapLitValue platform adjust l) tx_lit_con _ _ alt = pprPanic "caseRules" (ppr alt) -- NB: mapLitValue uses mkLitIntWrap etc, to ensure that the -- literal alternatives remain in Word/Int target ranges -- (See Note [Word/Int underflow/overflow] in GHC.Types.Literal and #13172). adjustDyadicRight :: PrimOp -> Integer -> Maybe (Integer -> Integer) -- Given (x `op` lit) return a function 'f' s.t. f (x `op` lit) = x adjustDyadicRight op lit = case op of WordAddOp -> Just (\y -> y-lit ) IntAddOp -> Just (\y -> y-lit ) WordSubOp -> Just (\y -> y+lit ) IntSubOp -> Just (\y -> y+lit ) WordXorOp -> Just (\y -> y `xor` lit) IntXorOp -> Just (\y -> y `xor` lit) _ -> Nothing adjustDyadicLeft :: Integer -> PrimOp -> Maybe (Integer -> Integer) -- Given (lit `op` x) return a function 'f' s.t. f (lit `op` x) = x adjustDyadicLeft lit op = case op of WordAddOp -> Just (\y -> y-lit ) IntAddOp -> Just (\y -> y-lit ) WordSubOp -> Just (\y -> lit-y ) IntSubOp -> Just (\y -> lit-y ) WordXorOp -> Just (\y -> y `xor` lit) IntXorOp -> Just (\y -> y `xor` lit) _ -> Nothing adjustUnary :: PrimOp -> Maybe (Integer -> Integer) -- Given (op x) return a function 'f' s.t. f (op x) = x adjustUnary op = case op of WordNotOp -> Just (\y -> complement y) IntNotOp -> Just (\y -> complement y) IntNegOp -> Just (\y -> negate y ) _ -> Nothing tx_con_tte :: Platform -> AltCon -> Maybe AltCon tx_con_tte _ DEFAULT = Just DEFAULT tx_con_tte _ alt@(LitAlt {}) = pprPanic "caseRules" (ppr alt) tx_con_tte platform (DataAlt dc) -- See Note [caseRules for tagToEnum] = Just $ LitAlt $ mkLitInt platform $ toInteger $ dataConTagZ dc tx_con_dtt :: TyCon -> AltCon -> Maybe AltCon tx_con_dtt _ DEFAULT = Just DEFAULT tx_con_dtt tc (LitAlt (LitNumber LitNumInt i)) | tag >= 0 , tag < n_data_cons = Just (DataAlt (data_cons !! tag)) -- tag is zero-indexed, as is (!!) | otherwise = Nothing where tag = fromInteger i :: ConTagZ n_data_cons = tyConFamilySize tc data_cons = tyConDataCons tc tx_con_dtt _ alt = pprPanic "caseRules/dataToTag: bad alt" (ppr alt) {- Note [caseRules for tagToEnum] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We want to transform case tagToEnum# x of False -> e1 True -> e2 into case x of 0# -> e1 1# -> e2 See #8317. This rule eliminates a lot of boilerplate. For if (x>y) then e2 else e1 we generate case tagToEnum# (x ># y) of False -> e1 True -> e2 and it is nice to then get rid of the tagToEnum#. Beware (#14768): avoid the temptation to map constructor 0 to DEFAULT, in the hope of getting this case (x ># y) of DEFAULT -> e1 1# -> e2 That fails utterly in the case of data Colour = Red | Green | Blue case tagToEnum x of DEFAULT -> e1 Red -> e2 We don't want to get this! case x of DEFAULT -> e1 DEFAULT -> e2 Instead, when possible, we turn one branch into DEFAULT in GHC.Core.Opt.Simplify.Utils.mkCase2; see Note [Literal cases] in that module. Note [caseRules for dataToTag] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ See also Note [DataToTag overview] in GHC.Tc.Instance.Class. We want to transform case dataToTagSmall# x of DEFAULT -> e1 1# -> e2 into case x of DEFAULT -> e1 (:) _ _ -> e2 (Note the need for some wildcard binders in the 'cons' case.) This transformation often enables further optimisation via case-flattening and case-of-known-constructor and can be very important for code using derived Eq instances. We can apply this transformation only when we can easily get the constructors from the type at which dataToTagSmall# is used. And we cannot apply this transformation at "type data"-related types without breaking invariant I1 from Note [Type data declarations] in GHC.Rename.Module. That leaves exactly the types satisfying condition DTT2 from Note [DataToTag overview] in GHC.Tc.Instance.Class. All of the above applies identically for `dataToTagLarge#`. And thanks to wrinkle DTW5, there is no need to worry about large-tag arguments for `dataToTagSmall#`; those cause undefined behavior anyway. Note [Unreachable caseRules alternatives] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Take care if we see something like case dataToTag x of DEFAULT -> e1 -1# -> e2 100 -> e3 because there isn't a data constructor with tag -1 or 100. In this case the out-of-range alternative is dead code -- we know the range of tags for x. Hence caseRules returns (AltCon -> Maybe AltCon), with Nothing indicating an alternative that is unreachable. You may wonder how this can happen: check out #15436. -} ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/Opt/ConstantFold.hs-boot0000644000000000000000000000025607346545000023236 0ustar0000000000000000module GHC.Core.Opt.ConstantFold where import GHC.Prelude import GHC.Core import GHC.Builtin.PrimOps import GHC.Types.Name primOpRules :: Name -> PrimOp -> Maybe CoreRule ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/Opt/Monad.hs0000644000000000000000000003323307346545000020736 0ustar0000000000000000{- (c) The AQUA Project, Glasgow University, 1993-1998 -} {-# LANGUAGE DeriveFunctor #-} module GHC.Core.Opt.Monad ( -- * Types used in core-to-core passes FloatOutSwitches(..), -- * The monad CoreM, runCoreM, mapDynFlagsCoreM, dropSimplCount, -- ** Reading from the monad getHscEnv, getModule, initRuleEnv, getExternalRuleBase, getDynFlags, getPackageFamInstEnv, getInteractiveContext, getUniqTag, getNamePprCtx, getSrcSpanM, -- ** Writing to the monad addSimplCount, -- ** Lifting into the monad liftIO, liftIOWithCount, -- ** Dealing with annotations getAnnotations, getFirstAnnotations, -- ** Screen output putMsg, putMsgS, errorMsg, msg, fatalErrorMsg, fatalErrorMsgS, debugTraceMsg, debugTraceMsgS, ) where import GHC.Prelude hiding ( read ) import GHC.Driver.DynFlags import GHC.Driver.Env import GHC.Core.Rules ( RuleBase, RuleEnv, mkRuleEnv ) import GHC.Core.Opt.Stats ( SimplCount, zeroSimplCount, plusSimplCount ) import GHC.Types.Annotations import GHC.Types.Unique.Supply import GHC.Types.Name.Env import GHC.Types.SrcLoc import GHC.Types.Error import GHC.Utils.Error ( errorDiagnostic ) import GHC.Utils.Outputable as Outputable import GHC.Utils.Logger import GHC.Utils.Monad import GHC.Data.IOEnv hiding ( liftIO, failM, failWithM ) import qualified GHC.Data.IOEnv as IOEnv import GHC.Runtime.Context ( InteractiveContext ) import GHC.Unit.Module import GHC.Unit.Module.ModGuts import GHC.Unit.External import Data.Bifunctor ( bimap ) import Data.Dynamic import Data.Maybe (listToMaybe) import Data.Word import Control.Monad import Control.Applicative ( Alternative(..) ) data FloatOutSwitches = FloatOutSwitches { floatOutLambdas :: Maybe Int -- ^ Just n <=> float lambdas to top level, if -- doing so will abstract over n or fewer -- value variables -- Nothing <=> float all lambdas to top level, -- regardless of how many free variables -- Just 0 is the vanilla case: float a lambda -- iff it has no free vars , floatOutConstants :: Bool -- ^ True <=> float constants to top level, -- even if they do not escape a lambda , floatOutOverSatApps :: Bool -- ^ True <=> float out over-saturated applications -- based on arity information. -- See Note [Floating over-saturated applications] -- in GHC.Core.Opt.SetLevels , floatToTopLevelOnly :: Bool -- ^ Allow floating to the top level only. , floatJoinsToTop :: Bool -- ^ Float join points to top level if possible -- See Note [Floating join point bindings] -- in GHC.Core.Opt.SetLevels } instance Outputable FloatOutSwitches where ppr = pprFloatOutSwitches pprFloatOutSwitches :: FloatOutSwitches -> SDoc pprFloatOutSwitches sw = text "FOS" <+> (braces $ sep $ punctuate comma $ [ text "Lam =" <+> ppr (floatOutLambdas sw) , text "Consts =" <+> ppr (floatOutConstants sw) , text "JoinsToTop =" <+> ppr (floatJoinsToTop sw) , text "OverSatApps =" <+> ppr (floatOutOverSatApps sw) ]) {- ************************************************************************ * * Monad and carried data structure definitions * * ************************************************************************ -} data CoreReader = CoreReader { cr_hsc_env :: HscEnv, cr_rule_base :: RuleBase, -- Home package table rules cr_module :: Module, cr_name_ppr_ctx :: NamePprCtx, cr_loc :: SrcSpan, -- Use this for log/error messages so they -- are at least tagged with the right source file cr_uniq_tag :: !Char -- Tag for creating unique values } -- Note: CoreWriter used to be defined with data, rather than newtype. If it -- is defined that way again, the cw_simpl_count field, at least, must be -- strict to avoid a space leak (#7702). newtype CoreWriter = CoreWriter { cw_simpl_count :: SimplCount } emptyWriter :: Bool -- ^ -ddump-simpl-stats -> CoreWriter emptyWriter dump_simpl_stats = CoreWriter { cw_simpl_count = zeroSimplCount dump_simpl_stats } plusWriter :: CoreWriter -> CoreWriter -> CoreWriter plusWriter w1 w2 = CoreWriter { cw_simpl_count = (cw_simpl_count w1) `plusSimplCount` (cw_simpl_count w2) } type CoreIOEnv = IOEnv CoreReader -- | The monad used by Core-to-Core passes to register simplification statistics. -- Also used to have common state (in the form of UniqueSupply) for generating Uniques. newtype CoreM a = CoreM { unCoreM :: CoreIOEnv (a, CoreWriter) } deriving (Functor) instance Monad CoreM where mx >>= f = CoreM $ do (x, w1) <- unCoreM mx (y, w2) <- unCoreM (f x) let w = w1 `plusWriter` w2 return $ seq w (y, w) -- forcing w before building the tuple avoids a space leak -- (#7702) instance Applicative CoreM where pure x = CoreM $ nop x (<*>) = ap m *> k = m >>= \_ -> k instance Alternative CoreM where empty = CoreM Control.Applicative.empty m <|> n = CoreM (unCoreM m <|> unCoreM n) instance MonadPlus CoreM instance MonadUnique CoreM where getUniqueSupplyM = do tag <- read cr_uniq_tag liftIO $! mkSplitUniqSupply tag getUniqueM = do tag <- read cr_uniq_tag liftIO $! uniqFromTag tag runCoreM :: HscEnv -> RuleBase -> Char -- ^ Mask -> Module -> NamePprCtx -> SrcSpan -> CoreM a -> IO (a, SimplCount) runCoreM hsc_env rule_base tag mod name_ppr_ctx loc m = liftM extract $ runIOEnv reader $ unCoreM m where reader = CoreReader { cr_hsc_env = hsc_env, cr_rule_base = rule_base, cr_module = mod, cr_name_ppr_ctx = name_ppr_ctx, cr_loc = loc, cr_uniq_tag = tag } extract :: (a, CoreWriter) -> (a, SimplCount) extract (value, writer) = (value, cw_simpl_count writer) {- ************************************************************************ * * Core combinators, not exported * * ************************************************************************ -} nop :: a -> CoreIOEnv (a, CoreWriter) nop x = do logger <- hsc_logger . cr_hsc_env <$> getEnv return (x, emptyWriter $ logHasDumpFlag logger Opt_D_dump_simpl_stats) read :: (CoreReader -> a) -> CoreM a read f = CoreM $ getEnv >>= (\r -> nop (f r)) write :: CoreWriter -> CoreM () write w = CoreM $ return ((), w) -- \subsection{Lifting IO into the monad} -- | Lift an 'IOEnv' operation into 'CoreM' liftIOEnv :: CoreIOEnv a -> CoreM a liftIOEnv mx = CoreM (mx >>= (\x -> nop x)) instance MonadIO CoreM where liftIO = liftIOEnv . IOEnv.liftIO -- | Lift an 'IO' operation into 'CoreM' while consuming its 'SimplCount' liftIOWithCount :: IO (SimplCount, a) -> CoreM a liftIOWithCount what = liftIO what >>= (\(count, x) -> addSimplCount count >> return x) {- ************************************************************************ * * Reader, writer and state accessors * * ************************************************************************ -} getHscEnv :: CoreM HscEnv getHscEnv = read cr_hsc_env getHomeRuleBase :: CoreM RuleBase getHomeRuleBase = read cr_rule_base initRuleEnv :: ModGuts -> CoreM RuleEnv initRuleEnv guts = do { hpt_rules <- getHomeRuleBase ; eps_rules <- getExternalRuleBase ; return (mkRuleEnv guts eps_rules hpt_rules) } getExternalRuleBase :: CoreM RuleBase getExternalRuleBase = eps_rule_base <$> get_eps getNamePprCtx :: CoreM NamePprCtx getNamePprCtx = read cr_name_ppr_ctx getSrcSpanM :: CoreM SrcSpan getSrcSpanM = read cr_loc addSimplCount :: SimplCount -> CoreM () addSimplCount count = write (CoreWriter { cw_simpl_count = count }) getUniqTag :: CoreM Char getUniqTag = read cr_uniq_tag -- Convenience accessors for useful fields of HscEnv -- | Adjust the dyn flags passed to the argument action mapDynFlagsCoreM :: (DynFlags -> DynFlags) -> CoreM a -> CoreM a mapDynFlagsCoreM f m = CoreM $ do !e <- getEnv let !e' = e { cr_hsc_env = hscUpdateFlags f $ cr_hsc_env e } liftIO $ runIOEnv e' $! unCoreM m -- | Drop the single count of the argument action so it doesn't effect -- the total. dropSimplCount :: CoreM a -> CoreM a dropSimplCount m = CoreM $ do (a, _) <- unCoreM m unCoreM $ pure a instance HasDynFlags CoreM where getDynFlags = fmap hsc_dflags getHscEnv instance HasLogger CoreM where getLogger = fmap hsc_logger getHscEnv instance HasModule CoreM where getModule = read cr_module getInteractiveContext :: CoreM InteractiveContext getInteractiveContext = hsc_IC <$> getHscEnv getPackageFamInstEnv :: CoreM PackageFamInstEnv getPackageFamInstEnv = eps_fam_inst_env <$> get_eps get_eps :: CoreM ExternalPackageState get_eps = do hsc_env <- getHscEnv liftIO $ hscEPS hsc_env {- ************************************************************************ * * Dealing with annotations * * ************************************************************************ -} -- | Get all annotations of a given type. This happens lazily, that is -- no deserialization will take place until the [a] is actually demanded and -- the [a] can also be empty (the UniqFM is not filtered). -- -- This should be done once at the start of a Core-to-Core pass that uses -- annotations. -- -- See Note [Annotations] getAnnotations :: Typeable a => ([Word8] -> a) -> ModGuts -> CoreM (ModuleEnv [a], NameEnv [a]) getAnnotations deserialize guts = do hsc_env <- getHscEnv ann_env <- liftIO $ prepareAnnotations hsc_env (Just guts) return (deserializeAnns deserialize ann_env) -- | Get at most one annotation of a given type per annotatable item. getFirstAnnotations :: Typeable a => ([Word8] -> a) -> ModGuts -> CoreM (ModuleEnv a, NameEnv a) getFirstAnnotations deserialize guts = bimap mod name <$> getAnnotations deserialize guts where mod = mapMaybeModuleEnv (const listToMaybe) name = mapMaybeNameEnv listToMaybe {- Note [Annotations] ~~~~~~~~~~~~~~~~~~ A Core-to-Core pass that wants to make use of annotations calls getAnnotations or getFirstAnnotations at the beginning to obtain a UniqFM with annotations of a specific type. This produces all annotations from interface files read so far. However, annotations from interface files read during the pass will not be visible until getAnnotations is called again. This is similar to how rules work and probably isn't too bad. The current implementation could be optimised a bit: when looking up annotations for a thing from the HomePackageTable, we could search directly in the module where the thing is defined rather than building one UniqFM which contains all annotations we know of. This would work because annotations can only be given to things defined in the same module. However, since we would only want to deserialise every annotation once, we would have to build a cache for every module in the HTP. In the end, it's probably not worth it as long as we aren't using annotations heavily. ************************************************************************ * * Direct screen output * * ************************************************************************ -} msg :: MessageClass -> SDoc -> CoreM () msg msg_class doc = do logger <- getLogger loc <- getSrcSpanM name_ppr_ctx <- getNamePprCtx let sty = case msg_class of MCDiagnostic _ _ _ -> err_sty MCDump -> dump_sty _ -> user_sty err_sty = mkErrStyle name_ppr_ctx user_sty = mkUserStyle name_ppr_ctx AllTheWay dump_sty = mkDumpStyle name_ppr_ctx liftIO $ logMsg logger msg_class loc (withPprStyle sty doc) -- | Output a String message to the screen putMsgS :: String -> CoreM () putMsgS = putMsg . text -- | Output a message to the screen putMsg :: SDoc -> CoreM () putMsg = msg MCInfo -- | Output an error to the screen. Does not cause the compiler to die. errorMsg :: SDoc -> CoreM () errorMsg doc = msg errorDiagnostic doc -- | Output a fatal error to the screen. Does not cause the compiler to die. fatalErrorMsgS :: String -> CoreM () fatalErrorMsgS = fatalErrorMsg . text -- | Output a fatal error to the screen. Does not cause the compiler to die. fatalErrorMsg :: SDoc -> CoreM () fatalErrorMsg = msg MCFatal -- | Output a string debugging message at verbosity level of @-v@ or higher debugTraceMsgS :: String -> CoreM () debugTraceMsgS = debugTraceMsg . text -- | Outputs a debugging message at verbosity level of @-v@ or higher debugTraceMsg :: SDoc -> CoreM () debugTraceMsg = msg MCDump ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/Opt/OccurAnal.hs0000644000000000000000000050661607346545000021561 0ustar0000000000000000{-# LANGUAGE ViewPatterns #-} {-# OPTIONS_GHC -cpp -Wno-incomplete-record-updates #-} {-# OPTIONS_GHC -fmax-worker-args=12 #-} -- The -fmax-worker-args=12 is there because the main functions -- are strict in the OccEnv, and it turned out that with the default settting -- some functions would unbox the OccEnv ad some would not, depending on how -- many /other/ arguments the function has. Inconsistent unboxing is very -- bad for performance, so I increased the limit to allow it to unbox -- consistently. {- (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 ************************************************************************ * * \section[OccurAnal]{Occurrence analysis pass} * * ************************************************************************ The occurrence analyser re-typechecks a core expression, returning a new core expression with (hopefully) improved usage information. -} module GHC.Core.Opt.OccurAnal ( occurAnalysePgm, occurAnalyseExpr, zapLambdaBndrs, BinderSwapDecision(..), scrutOkForBinderSwap ) where import GHC.Prelude hiding ( head, init, last, tail ) import GHC.Core import GHC.Core.FVs import GHC.Core.Utils ( exprIsTrivial, isDefaultAlt, isExpandableApp, mkCastMCo, mkTicks ) import GHC.Core.Opt.Arity ( joinRhsArity, isOneShotBndr ) import GHC.Core.Coercion import GHC.Core.Predicate ( isDictId ) import GHC.Core.Type import GHC.Core.TyCo.FVs ( tyCoVarsOfMCo ) import GHC.Data.Maybe( orElse ) import GHC.Data.Graph.Directed ( SCC(..), Node(..) , stronglyConnCompFromEdgedVerticesUniq , stronglyConnCompFromEdgedVerticesUniqR ) import GHC.Types.Unique import GHC.Types.Unique.FM import GHC.Types.Unique.Set import GHC.Types.Id import GHC.Types.Id.Info import GHC.Types.Basic import GHC.Types.Tickish import GHC.Types.Var.Set import GHC.Types.Var.Env import GHC.Types.Var import GHC.Types.Demand ( argOneShots, argsOneShots, isDeadEndSig ) import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Utils.Misc import GHC.Builtin.Names( runRWKey ) import GHC.Unit.Module( Module ) import Data.List (mapAccumL) {- ************************************************************************ * * occurAnalysePgm, occurAnalyseExpr * * ************************************************************************ Here's the externally-callable interface: -} -- | Do occurrence analysis, and discard occurrence info returned occurAnalyseExpr :: CoreExpr -> CoreExpr occurAnalyseExpr expr = expr' where WUD _ expr' = occAnal initOccEnv expr occurAnalysePgm :: Module -- Used only in debug output -> (Id -> Bool) -- Active unfoldings -> (Activation -> Bool) -- Active rules -> [CoreRule] -- Local rules for imported Ids -> CoreProgram -> CoreProgram occurAnalysePgm this_mod active_unf active_rule imp_rules binds | isEmptyDetails final_usage = occ_anald_binds | otherwise -- See Note [Glomming] = warnPprTrace True "Glomming in" (hang (ppr this_mod <> colon) 2 (ppr final_usage)) occ_anald_glommed_binds where init_env = initOccEnv { occ_rule_act = active_rule , occ_unf_act = active_unf } WUD final_usage occ_anald_binds = go binds init_env WUD _ occ_anald_glommed_binds = occAnalRecBind init_env TopLevel imp_rule_edges (flattenBinds binds) initial_uds -- It's crucial to re-analyse the glommed-together bindings -- so that we establish the right loop breakers. Otherwise -- we can easily create an infinite loop (#9583 is an example) -- -- Also crucial to re-analyse the /original/ bindings -- in case the first pass accidentally discarded as dead code -- a binding that was actually needed (albeit before its -- definition site). #17724 threw this up. initial_uds = addManyOccs emptyDetails (rulesFreeVars imp_rules) -- The RULES declarations keep things alive! -- imp_rule_edges maps a top-level local binder 'f' to the -- RHS free vars of any IMP-RULE, a local RULE for an imported function, -- where 'f' appears on the LHS -- e.g. RULE foldr f = blah -- imp_rule_edges contains f :-> fvs(blah) -- We treat such RULES as extra rules for 'f' -- See Note [Preventing loops due to imported functions rules] imp_rule_edges :: ImpRuleEdges imp_rule_edges = foldr (plusVarEnv_C (++)) emptyVarEnv [ mapVarEnv (const [(act,rhs_fvs)]) $ getUniqSet $ exprsFreeIds args `delVarSetList` bndrs | Rule { ru_act = act, ru_bndrs = bndrs , ru_args = args, ru_rhs = rhs } <- imp_rules -- Not BuiltinRules; see Note [Plugin rules] , let rhs_fvs = exprFreeIds rhs `delVarSetList` bndrs ] go :: [CoreBind] -> OccEnv -> WithUsageDetails [CoreBind] go [] _ = WUD initial_uds [] go (bind:binds) env = occAnalBind env TopLevel imp_rule_edges bind (go binds) (++) {- ********************************************************************* * * IMP-RULES Local rules for imported functions * * ********************************************************************* -} type ImpRuleEdges = IdEnv [(Activation, VarSet)] -- Mapping from a local Id 'f' to info about its IMP-RULES, -- i.e. /local/ rules for an imported Id that mention 'f' on the LHS -- We record (a) its Activation and (b) the RHS free vars -- See Note [IMP-RULES: local rules for imported functions] noImpRuleEdges :: ImpRuleEdges noImpRuleEdges = emptyVarEnv lookupImpRules :: ImpRuleEdges -> Id -> [(Activation,VarSet)] lookupImpRules imp_rule_edges bndr = case lookupVarEnv imp_rule_edges bndr of Nothing -> [] Just vs -> vs impRulesScopeUsage :: [(Activation,VarSet)] -> UsageDetails -- Variable mentioned in RHS of an IMP-RULE for the bndr, -- whether active or not impRulesScopeUsage imp_rules_info = foldr add emptyDetails imp_rules_info where add (_,vs) usage = addManyOccs usage vs impRulesActiveFvs :: (Activation -> Bool) -> VarSet -> [(Activation,VarSet)] -> VarSet impRulesActiveFvs is_active bndr_set vs = foldr add emptyVarSet vs `intersectVarSet` bndr_set where add (act,vs) acc | is_active act = vs `unionVarSet` acc | otherwise = acc {- Note [IMP-RULES: local rules for imported functions] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We quite often have * A /local/ rule * for an /imported/ function like this: foo x = blah {-# RULE "map/foo" forall xs. map foo xs = xs #-} We call them IMP-RULES. They are important in practice, and occur a lot in the libraries. IMP-RULES are held in mg_rules of ModGuts, and passed in to occurAnalysePgm. Main Invariant: * Throughout, we treat an IMP-RULE that mentions 'f' on its LHS just like a RULE for f. Note [IMP-RULES: unavoidable loops] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider this f = /\a. B.g a RULE B.g Int = 1 + f Int Note that * The RULE is for an imported function. * f is non-recursive Now we can get f Int --> B.g Int Inlining f --> 1 + f Int Firing RULE and so the simplifier goes into an infinite loop. This would not happen if the RULE was for a local function, because we keep track of dependencies through rules. But that is pretty much impossible to do for imported Ids. Suppose f's definition had been f = /\a. C.h a where (by some long and devious process), C.h eventually inlines to B.g. We could only spot such loops by exhaustively following unfoldings of C.h etc, in case we reach B.g, and hence (via the RULE) f. We regard this potential infinite loop as a *programmer* error. It's up the programmer not to write silly rules like RULE f x = f x and the example above is just a more complicated version. Note [Specialising imported functions] (referred to from Specialise) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For *automatically-generated* rules, the programmer can't be responsible for the "programmer error" in Note [IMP-RULES: unavoidable loops]. In particular, consider specialising a recursive function defined in another module. If we specialise a recursive function B.g, we get g_spec = .....(B.g Int)..... RULE B.g Int = g_spec Here, g_spec doesn't look recursive, but when the rule fires, it becomes so. And if B.g was mutually recursive, the loop might not be as obvious as it is here. To avoid this, * When specialising a function that is a loop breaker, give a NOINLINE pragma to the specialised function Note [Preventing loops due to imported functions rules] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider: import GHC.Base (foldr) {-# RULES "filterList" forall p. foldr (filterFB (:) p) [] = filter p #-} filter p xs = build (\c n -> foldr (filterFB c p) n xs) filterFB c p = ... f = filter p xs Note that filter is not a loop-breaker, so what happens is: f = filter p xs = {inline} build (\c n -> foldr (filterFB c p) n xs) = {inline} foldr (filterFB (:) p) [] xs = {RULE} filter p xs We are in an infinite loop. A more elaborate example (that I actually saw in practice when I went to mark GHC.List.filter as INLINABLE) is as follows. Say I have this module: {-# LANGUAGE RankNTypes #-} module GHCList where import Prelude hiding (filter) import GHC.Base (build) {-# INLINABLE filter #-} filter :: (a -> Bool) -> [a] -> [a] filter p [] = [] filter p (x:xs) = if p x then x : filter p xs else filter p xs {-# NOINLINE [0] filterFB #-} filterFB :: (a -> b -> b) -> (a -> Bool) -> a -> b -> b filterFB c p x r | p x = x `c` r | otherwise = r {-# RULES "filter" [~1] forall p xs. filter p xs = build (\c n -> foldr (filterFB c p) n xs) "filterList" [1] forall p. foldr (filterFB (:) p) [] = filter p #-} Then (because RULES are applied inside INLINABLE unfoldings, but inlinings are not), the unfolding given to "filter" in the interface file will be: filter p [] = [] filter p (x:xs) = if p x then x : build (\c n -> foldr (filterFB c p) n xs) else build (\c n -> foldr (filterFB c p) n xs Note that because this unfolding does not mention "filter", filter is not marked as a strong loop breaker. Therefore at a use site in another module: filter p xs = {inline} case xs of [] -> [] (x:xs) -> if p x then x : build (\c n -> foldr (filterFB c p) n xs) else build (\c n -> foldr (filterFB c p) n xs) build (\c n -> foldr (filterFB c p) n xs) = {inline} foldr (filterFB (:) p) [] xs = {RULE} filter p xs And we are in an infinite loop again, except that this time the loop is producing an infinitely large *term* (an unrolling of filter) and so the simplifier finally dies with "ticks exhausted" SOLUTION: we treat the rule "filterList" as an extra rule for 'filterFB' because it mentions 'filterFB' on the LHS. This is the Main Invariant in Note [IMP-RULES: local rules for imported functions]. So, during loop-breaker analysis: - for each active RULE for a local function 'f' we add an edge between 'f' and the local FVs of the rule RHS - for each active RULE for an *imported* function we add dependency edges between the *local* FVS of the rule LHS and the *local* FVS of the rule RHS. Even with this extra hack we aren't always going to get things right. For example, it might be that the rule LHS mentions an imported Id, and another module has a RULE that can rewrite that imported Id to one of our local Ids. Note [Plugin rules] ~~~~~~~~~~~~~~~~~~~ Conal Elliott (#11651) built a GHC plugin that added some BuiltinRules (for imported Ids) to the mg_rules field of ModGuts, to do some domain-specific transformations that could not be expressed with an ordinary pattern-matching CoreRule. But then we can't extract the dependencies (in imp_rule_edges) from ru_rhs etc, because a BuiltinRule doesn't have any of that stuff. So we simply assume that BuiltinRules have no dependencies, and filter them out from the imp_rule_edges comprehension. Note [Glomming] ~~~~~~~~~~~~~~~ RULES for imported Ids can make something at the top refer to something at the bottom: foo = ...(B.f @Int)... $sf = blah RULE: B.f @Int = $sf Applying this rule makes foo refer to $sf, although foo doesn't appear to depend on $sf. (And, as in Note [IMP-RULES: local rules for imported functions], the dependency might be more indirect. For example, foo might mention C.t rather than B.f, where C.t eventually inlines to B.f.) NOTICE that this cannot happen for rules whose head is a locally-defined function, because we accurately track dependencies through RULES. It only happens for rules whose head is an imported function (B.f in the example above). Solution: - When simplifying, bring all top level identifiers into scope at the start, ignoring the Rec/NonRec structure, so that when 'h' pops up in f's rhs, we find it in the in-scope set (as the simplifier generally expects). This happens in simplTopBinds. - In the occurrence analyser, if there are any out-of-scope occurrences that pop out of the top, which will happen after firing the rule: f = \x -> h x h = \y -> 3 then just glom all the bindings into a single Rec, so that the *next* iteration of the occurrence analyser will sort them all out. This part happens in occurAnalysePgm. This is a legitimate situation where the need for glomming doesn't point to any problems. However, when GHC is compiled with -DDEBUG, we produce a warning addressed to the GHC developers just in case we require glomming due to an out-of-order reference that is caused by some earlier transformation stage misbehaving. -} {- ************************************************************************ * * Bindings * * ************************************************************************ Note [Recursive bindings: the grand plan] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Loop breaking is surprisingly subtle. First read the section 4 of "Secrets of the GHC inliner". This describes our basic plan. We avoid infinite inlinings by choosing loop breakers, and ensuring that a loop breaker cuts each loop. See also Note [Inlining and hs-boot files] in GHC.Core.ToIface, which deals with a closely related source of infinite loops. When we come across a binding group Rec { x1 = r1; ...; xn = rn } we treat it like this (occAnalRecBind): 1. Note [Forming Rec groups] Occurrence-analyse each right hand side, and build a "Details" for each binding to capture the results. Wrap the details in a LetrecNode, ready for SCC analysis. All this is done by makeNode. The edges of this graph are the "scope edges". 2. Do SCC-analysis on these Nodes: - Each CyclicSCC will become a new Rec - Each AcyclicSCC will become a new NonRec The key property is that every free variable of a binding is accounted for by the scope edges, so that when we are done everything is still in scope. 3. For each AcyclicSCC, just make a NonRec binding. 4. For each CyclicSCC of the scope-edge SCC-analysis in (2), we identify suitable loop-breakers to ensure that inlining terminates. This is done by occAnalRec. To do so, form the loop-breaker graph, do SCC analysis. For each CyclicSCC we choose a loop breaker, delete all edges to that node, re-analyse the SCC, and iterate. See Note [Choosing loop breakers] for the details Note [Dead code] ~~~~~~~~~~~~~~~~ Dropping dead code for a cyclic Strongly Connected Component is done in a very simple way: the entire SCC is dropped if none of its binders are mentioned in the body; otherwise the whole thing is kept. The key observation is that dead code elimination happens after dependency analysis: so 'occAnalBind' processes SCCs instead of the original term's binding groups. Thus 'occAnalBind' does indeed drop 'f' in an example like letrec f = ...g... g = ...(...g...)... in ...g... when 'g' no longer uses 'f' at all (eg 'f' does not occur in a RULE in 'g'). 'occAnalBind' first consumes 'CyclicSCC g' and then it consumes 'AcyclicSCC f', where 'body_usage' won't contain 'f'. Note [Forming Rec groups] ~~~~~~~~~~~~~~~~~~~~~~~~~ The key point about the "Forming Rec groups" step is that it /preserves scoping/. If 'x' is mentioned, it had better be bound somewhere. So if we start with Rec { f = ...h... ; g = ...f... ; h = ...f... } we can split into SCCs Rec { f = ...h... ; h = ..f... } NonRec { g = ...f... } We put bindings {f = ef; g = eg } in a Rec group if "f uses g" and "g uses f", no matter how indirectly. We do a SCC analysis with an edge f -> g if "f mentions g". That is, g is free in: a) the rhs 'ef' b) or the RHS of a rule for f, whether active or inactive Note [Rules are extra RHSs] c) or the LHS or a rule for f, whether active or inactive Note [Rule dependency info] d) the RHS of an /active/ local IMP-RULE Note [IMP-RULES: local rules for imported functions] (b) and (c) apply regardless of the activation of the RULE, because even if the rule is inactive its free variables must be bound. But (d) doesn't need to worry about this because IMP-RULES are always notionally at the bottom of the file. * Note [Rules are extra RHSs] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ A RULE for 'f' is like an extra RHS for 'f'. That way the "parent" keeps the specialised "children" alive. If the parent dies (because it isn't referenced any more), then the children will die too (unless they are already referenced directly). So in Example [eftInt], eftInt and eftIntFB will be put in the same Rec, even though their 'main' RHSs are both non-recursive. We must also include inactive rules, so that their free vars remain in scope. * Note [Rule dependency info] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ The VarSet in a RuleInfo is used for dependency analysis in the occurrence analyser. We must track free vars in *both* lhs and rhs. Hence use of idRuleVars, rather than idRuleRhsVars in occAnalBind. Why both? Consider x = y RULE f x = v+4 Then if we substitute y for x, we'd better do so in the rule's LHS too, so we'd better ensure the RULE appears to mention 'x' as well as 'v' * Note [Rules are visible in their own rec group] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We want the rules for 'f' to be visible in f's right-hand side. And we'd like them to be visible in other functions in f's Rec group. E.g. in Note [Specialisation rules] we want f' rule to be visible in both f's RHS, and fs's RHS. This means that we must simplify the RULEs first, before looking at any of the definitions. This is done by Simplify.simplRecBind, when it calls addLetIdInfo. Note [TailUsageDetails when forming Rec groups] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The `TailUsageDetails` stored in the `nd_uds` field of a `NodeDetails` is computed by `occAnalLamTail` applied to the RHS, not `occAnalExpr`. That is because the binding might still become a *non-recursive join point* in the AcyclicSCC case of dependency analysis! Hence we do the delayed `adjustTailUsage` in `occAnalRec`/`tagRecBinders` to get a regular, adjusted UsageDetails. See Note [Join points and unfoldings/rules] for more details on the contract. Note [Stable unfoldings] ~~~~~~~~~~~~~~~~~~~~~~~~ None of the above stuff about RULES applies to a stable unfolding stored in a CoreUnfolding. The unfolding, if any, is simplified at the same time as the regular RHS of the function (ie *not* like Note [Rules are visible in their own rec group]), so it should be treated *exactly* like an extra RHS. Or, rather, when computing loop-breaker edges, * If f has an INLINE pragma, and it is active, we treat the INLINE rhs as f's rhs * If it's inactive, we treat f as having no rhs * If it has no INLINE pragma, we look at f's actual rhs There is a danger that we'll be sub-optimal if we see this f = ...f... [INLINE f = ..no f...] where f is recursive, but the INLINE is not. This can just about happen with a sufficiently odd set of rules; eg foo :: Int -> Int {-# INLINE [1] foo #-} foo x = x+1 bar :: Int -> Int {-# INLINE [1] bar #-} bar x = foo x + 1 {-# RULES "foo" [~1] forall x. foo x = bar x #-} Here the RULE makes bar recursive; but it's INLINE pragma remains non-recursive. It's tempting to then say that 'bar' should not be a loop breaker, but an attempt to do so goes wrong in two ways: a) We may get $df = ...$cfoo... $cfoo = ...$df.... [INLINE $cfoo = ...no-$df...] But we want $cfoo to depend on $df explicitly so that we put the bindings in the right order to inline $df in $cfoo and perhaps break the loop altogether. (Maybe this b) Example [eftInt] ~~~~~~~~~~~~~~~ Example (from GHC.Enum): eftInt :: Int# -> Int# -> [Int] eftInt x y = ...(non-recursive)... {-# INLINE [0] eftIntFB #-} eftIntFB :: (Int -> r -> r) -> r -> Int# -> Int# -> r eftIntFB c n x y = ...(non-recursive)... {-# RULES "eftInt" [~1] forall x y. eftInt x y = build (\ c n -> eftIntFB c n x y) "eftIntList" [1] eftIntFB (:) [] = eftInt #-} Note [Specialisation rules] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider this group, which is typical of what SpecConstr builds: fs a = ....f (C a).... f x = ....f (C a).... {-# RULE f (C a) = fs a #-} So 'f' and 'fs' are in the same Rec group (since f refers to fs via its RULE). But watch out! If 'fs' is not chosen as a loop breaker, we may get an infinite loop: - the RULE is applied in f's RHS (see Note [Rules for recursive functions] in GHC.Core.Opt.Simplify - fs is inlined (say it's small) - now there's another opportunity to apply the RULE This showed up when compiling Control.Concurrent.Chan.getChanContents. Hence the transitive rule_fv_env stuff described in Note [Rules and loop breakers]. Note [Occurrence analysis for join points] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider these two somewhat artificial programs (#22404) Program (P1) Program (P2) ------------------------------ ------------------------------------- let v = in let v = in join j = case v of (a,b) -> a in case x of in case x of A -> case v of (a,b) -> a A -> j B -> case v of (a,b) -> a B -> j C -> case v of (a,b) -> b C -> case v of (a,b) -> b D -> [] D -> [] In (P1), `v` gets allocated, as a thunk, every time this code is executed. But notice that `v` occurs at most once in any case branch; the occurrence analyser spots this and returns a OneOcc{ occ_n_br = 3 } for `v`. Then the code in GHC.Core.Opt.Simplify.Utils.postInlineUnconditionally inlines `v` at its three use sites, and discards the let-binding. That way, we avoid allocating `v` in the A,B,C branches (though we still compute it of course), and branch D doesn't involve at all. This sometimes makes a Really Big Difference. In (P2) we have shared the common RHS of A, B, in a join point `j`. We would like to inline `v` in just the same way as in (P1). But the usual strategy for let bindings is conservative and uses `andUDs` to combine usage from j's RHS to its body; as if `j` was called on every code path (once, albeit). In the case of (P2), we'll get ManyOccs for `v`. Important optimisation lost! Solving this problem makes the Simplifier less fragile. For example, the Simplifier might inline `j`, and convert (P2) into (P1)... or it might not, depending in a perhaps-fragile way on the size of the join point. I was motivated to implement this feature of the occurrence analyser when trying to make optimisation join points simpler and more robust (see e.g. #23627). The occurrence analyser therefore has clever code that behaves just as if you inlined `j` at all its call sites. Here is a tricky variant to keep in mind: Program (P3) ------------------------------- join j = case v of (a,b) -> a in case f v of A -> j B -> j C -> [] If you mentally inline `j` you'll see that `v` is used twice on the path through A, so it should have ManyOcc. Bear this case in mind! * We treat /non-recursive/ join points specially. Recursive join points are treated like any other letrec, as before. Moreover, we only give this special treatment to /pre-existing/ non-recursive join points, not the ones that we discover for the first time in this sweep of the occurrence analyser. * In occ_env, the new (occ_join_points :: IdEnv OccInfoEnv) maps each in-scope non-recursive join point, such as `j` above, to a "zeroed form" of its RHS's usage details. The "zeroed form" * deletes ManyOccs * maps a OneOcc to OneOcc{ occ_n_br = 0 } In our example, occ_join_points will be extended with [j :-> [v :-> OneOcc{occ_n_br=0}]] See addJoinPoint. * At an occurrence of a join point, we do everything as normal, but add in the UsageDetails from the occ_join_points. See mkOneOcc. * Crucially, at the NonRec binding of the join point, in `occAnalBind`, we use `orUDs`, not `andUDs` to combine the usage from the RHS with the usage from the body. Here are the consequences * Because of the perhaps-surprising OneOcc{occ_n_br=0} idea of the zeroed form, the occ_n_br field of a OneOcc binder still counts the number of /actual lexical occurrences/ of the variable. In Program P2, for example, `v` will end up with OneOcc{occ_n_br=2}, not occ_n_br=3. There are two lexical occurrences of `v`! (NB: `orUDs` adds occ_n_br together, so occ_n_br=1 is impossible, too.) * In the tricky (P3) we'll get an `andUDs` of * OneOcc{occ_n_br=0} from the occurrences of `j`) * OneOcc{occ_n_br=1} from the (f v) These are `andUDs` together in `addOccInfo`, and hence `v` gets ManyOccs, just as it should. Clever! There are a couple of tricky wrinkles (W1) Consider this example which shadows `j`: join j = rhs in in case x of { K j -> ..j..; ... } Clearly when we come to the pattern `K j` we must drop the `j` entry in occ_join_points. This is done by `drop_shadowed_joins` in `addInScope`. (W2) Consider this example which shadows `v`: join j = ...v... in case x of { K v -> ..j..; ... } We can't make j's occurrences in the K alternative give rise to an occurrence of `v` (via occ_join_points), because it'll just be deleted by the `K v` pattern. Yikes. This is rare because shadowing is rare, but it definitely can happen. Solution: when bringing `v` into scope at the `K v` pattern, chuck out of occ_join_points any elements whose UsageDetails mentions `v`. Instead, just `andUDs` all that usage in right here. This requires work in two places. * In `preprocess_env`, we detect if the newly-bound variables intersect the free vars of occ_join_points. (These free vars are conveniently simply the domain of the OccInfoEnv for that join point.) If so, we zap the entire occ_join_points. * In `postprcess_uds`, we add the chucked-out join points to the returned UsageDetails, with `andUDs`. (W3) Consider this example, which shadows `j`, but this time in an argument join j = rhs in f (case x of { K j -> ...; ... }) We can zap the entire occ_join_points when looking at the argument, because `j` can't posibly occur -- it's a join point! And the smaller occ_join_points is, the better. Smaller to look up in mkOneOcc, and more important, less looking-up when checking (W2). This is done in setNonTailCtxt. It's important /not/ to do this for join-point RHS's because of course `j` can occur there! NB: this is just about efficiency: it is always safe /not/ to zap the occ_join_points. (W4) What if the join point binding has a stable unfolding, or RULES? They are just alternative right-hand sides, and at each call site we will use only one of them. So again, we can use `orUDs` to combine usage info from all these alternatives RHSs. Wrinkles (W1) and (W2) are very similar to Note [Binder swap] (BS3). Note [Finding join points] ~~~~~~~~~~~~~~~~~~~~~~~~~~ It's the occurrence analyser's job to find bindings that we can turn into join points, but it doesn't perform that transformation right away. Rather, it marks the eligible bindings as part of their occurrence data, leaving it to the simplifier (or to simpleOptPgm) to actually change the binder's 'IdDetails'. The simplifier then eta-expands the RHS if needed and then updates the occurrence sites. Dividing the work this way means that the occurrence analyser still only takes one pass, yet one can always tell the difference between a function call and a jump by looking at the occurrence (because the same pass changes the 'IdDetails' and propagates the binders to their occurrence sites). To track potential join points, we use the 'occ_tail' field of OccInfo. A value of `AlwaysTailCalled n` indicates that every occurrence of the variable is a tail call with `n` arguments (counting both value and type arguments). Otherwise 'occ_tail' will be 'NoTailCallInfo'. The tail call info flows bottom-up with the rest of 'OccInfo' until it goes on the binder. Note [Join arity prediction based on joinRhsArity] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In general, the join arity from tail occurrences of a join point (O) may be higher or lower than the manifest join arity of the join body (M). E.g., -- M > O: let f x y = x + y -- M = 2 in if b then f 1 else f 2 -- O = 1 ==> { Contify for join arity 1 } join f x = \y -> x + y in if b then jump f 1 else jump f 2 -- M < O let f = id -- M = 0 in if ... then f 12 else f 13 -- O = 1 ==> { Contify for join arity 1, eta-expand f } join f x = id x in if b then jump f 12 else jump f 13 But for *recursive* let, it is crucial that both arities match up, consider letrec f x y = if ... then f x else True in f 42 Here, M=2 but O=1. If we settled for a joinrec arity of 1, the recursive jump would not happen in a tail context! Contification is invalid here. So indeed it is crucial to demand that M=O. (Side note: Actually, we could be more specific: Let O1 be the join arity of occurrences from the letrec RHS and O2 the join arity from the let body. Then we need M=O1 and M<=O2 and could simply eta-expand the RHS to match O2 later. M=O is the specific case where we don't want to eta-expand. Neither the join points paper nor GHC does this at the moment.) We can capitalise on this observation and conclude that *if* f could become a joinrec (without eta-expansion), it will have join arity M. Now, M is just the result of 'joinRhsArity', a rather simple, local analysis. It is also the join arity inside the 'TailUsageDetails' returned by 'occAnalLamTail', so we can predict join arity without doing any fixed-point iteration or really doing any deep traversal of let body or RHS at all. We check for M in the 'adjustTailUsage' call inside 'tagRecBinders'. All this is quite apparent if you look at the contification transformation in Fig. 5 of "Compiling without Continuations" (which does not account for eta-expansion at all, mind you). The letrec case looks like this letrec f = /\as.\xs. L[us] in L'[es] ... and a bunch of conditions establishing that f only occurs in app heads of join arity (len as + len xs) inside us and es ... The syntactic form `/\as.\xs. L[us]` forces M=O iff `f` occurs in `us`. However, for non-recursive functions, this is the definition of contification from the paper: let f = /\as.\xs.u in L[es] ... conditions ... Note that u could be a lambda itself, as we have seen. No relationship between M and O to exploit here. Note [Join points and unfoldings/rules] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider let j2 y = blah let j x = j2 (x+x) {-# INLINE [2] j #-} in case e of { A -> j 1; B -> ...; C -> j 2 } Before j is inlined, we'll have occurrences of j2 in both j's RHS and in its stable unfolding. We want to discover j2 as a join point. So 'occAnalUnfolding' returns an unadjusted 'TailUsageDetails', like 'occAnalLamTail'. We adjust the usage details of the unfolding to the actual join arity using the same 'adjustTailArity' as for the RHS, see Note [Adjusting right-hand sides]. Same with rules. Suppose we have: let j :: Int -> Int j y = 2 * y let k :: Int -> Int -> Int {-# RULES "SPEC k 0" k 0 y = j y #-} k x y = x + 2 * y in case e of { A -> k 1 2; B -> k 3 5; C -> blah } We identify k as a join point, and we want j to be a join point too. Without the RULE it would be, and we don't want the RULE to mess it up. So provided the join-point arity of k matches the args of the rule we can allow the tail-call info from the RHS of the rule to propagate. * Note that the join arity of the RHS and that of the unfolding or RULE might mismatch: let j x y = j2 (x+x) {-# INLINE[2] j = \x. g #-} {-# RULE forall x y z. j x y z = h 17 #-} in j 1 2 So it is crucial that we adjust each TailUsageDetails individually with the actual join arity 2 here before we combine with `andUDs`. Here, that means losing tail call info on `g` and `h`. * Wrinkle for Rec case: We store one TailUsageDetails in the node Details for RHS, unfolding and RULE combined. Clearly, if they don't agree on their join arity, we have to do some adjusting. We choose to adjust to the join arity of the RHS, because that is likely the join arity that the join point will have; see Note [Join arity prediction based on joinRhsArity]. If the guess is correct, then tail calls in the RHS are preserved; a necessary condition for the whole binding becoming a joinrec. The guess can only be incorrect in the 'AcyclicSCC' case when the binding becomes a non-recursive join point with a different join arity. But then the eventual call to 'adjustTailUsage' in 'tagRecBinders'/'occAnalRec' will be with a different join arity and destroy unsound tail call info with 'markNonTail'. * Wrinkle for RULES. Suppose the example was a bit different: let j :: Int -> Int j y = 2 * y k :: Int -> Int -> Int {-# RULES "SPEC k 0" k 0 = j #-} k x y = x + 2 * y in ... If we eta-expanded the rule all would be well, but as it stands the one arg of the rule don't match the join-point arity of 2. Conceivably we could notice that a potential join point would have an "undersaturated" rule and account for it. This would mean we could make something that's been specialised a join point, for instance. But local bindings are rarely specialised, and being overly cautious about rules only costs us anything when, for some `j`: * Before specialisation, `j` has non-tail calls, so it can't be a join point. * During specialisation, `j` gets specialised and thus acquires rules. * Sometime afterward, the non-tail calls to `j` disappear (as dead code, say), and so now `j` *could* become a join point. This appears to be very rare in practice. TODO Perhaps we should gather statistics to be sure. ------------------------------------------------------------ Note [Adjusting right-hand sides] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ There's a bit of a dance we need to do after analysing a lambda expression or a right-hand side. In particular, we need to a) call 'markAllNonTail' *unless* the binding is for a join point, and the TailUsageDetails from the RHS has the right join arity; e.g. join j x y = case ... of A -> j2 p B -> j2 q in j a b Here we want the tail calls to j2 to be tail calls of the whole expression b) call 'markAllInsideLam' *unless* the binding is for a thunk, a one-shot lambda, or a non-recursive join point Some examples, with how the free occurrences in e (assumed not to be a value lambda) get marked: inside lam non-tail-called ------------------------------------------------------------ let x = e No Yes let f = \x -> e Yes Yes let f = \x{OneShot} -> e No Yes \x -> e Yes Yes join j x = e No No joinrec j x = e Yes No There are a few other caveats; most importantly, if we're marking a binding as 'AlwaysTailCalled', it's *going* to be a join point, so we treat it as one so that the effect cascades properly. Consequently, at the time the RHS is analysed, we won't know what adjustments to make; thus 'occAnalLamTail' must return the unadjusted 'TailUsageDetails', to be adjusted by 'adjustTailUsage' once join-point-hood has been decided and eventual one-shot annotations have been added through 'markNonRecJoinOneShots'. It is not so simple to see that 'occAnalNonRecBind' and 'occAnalRecBind' indeed perform a similar sequence of steps. Thus, here is an interleaving of events of both functions, serving as a specification: 1. Call 'occAnalLamTail' to find usage information for the RHS. Recursive case: 'makeNode' Non-recursive case: 'occAnalNonRecBind' 2. (Analyse the binding's scope. Done in 'occAnalBind'/`occAnal Let{}`. Same whether recursive or not.) 3. Call 'tagNonRecBinder' or 'tagRecBinders', which decides whether to make the binding a join point. Cyclic Recursive case: 'mkLoopBreakerNodes' Acyclic Recursive case: `occAnalRec AcyclicSCC{}` Non-recursive case: 'occAnalNonRecBind' 4. Non-recursive join point: Call 'markNonRecJoinOneShots' so that e.g., FloatOut sees one-shot annotations on lambdas Acyclic Recursive case: `occAnalRec AcyclicSCC{}` calls 'adjustNonRecRhs' Non-recursive case: 'occAnalNonRecBind' calls 'adjustNonRecRhs' 5. Call 'adjustTailUsage' accordingly. Cyclic Recursive case: 'tagRecBinders' Acyclic Recursive case: 'adjustNonRecRhs' Non-recursive case: 'adjustNonRecRhs' -} ------------------------------------------------------------------ -- occAnalBind ------------------------------------------------------------------ occAnalBind :: OccEnv -> TopLevelFlag -> ImpRuleEdges -> CoreBind -> (OccEnv -> WithUsageDetails r) -- Scope of the bind -> ([CoreBind] -> r -> r) -- How to combine the scope with new binds -> WithUsageDetails r -- Of the whole let(rec) occAnalBind env lvl ire (Rec pairs) thing_inside combine = addInScopeList env (map fst pairs) $ \env -> let WUD body_uds body' = thing_inside env WUD bind_uds binds' = occAnalRecBind env lvl ire pairs body_uds in WUD bind_uds (combine binds' body') occAnalBind !env lvl ire (NonRec bndr rhs) thing_inside combine | isTyVar bndr -- A type let; we don't gather usage info = let !(WUD body_uds res) = addInScopeOne env bndr thing_inside in WUD body_uds (combine [NonRec bndr rhs] res) -- /Existing/ non-recursive join points -- See Note [Occurrence analysis for join points] | mb_join@(JoinPoint {}) <- idJoinPointHood bndr = -- Analyse the RHS and /then/ the body let -- Analyse the rhs first, generating rhs_uds !(rhs_uds_s, bndr', rhs') = occAnalNonRecRhs env lvl ire mb_join bndr rhs rhs_uds = foldr1 orUDs rhs_uds_s -- NB: orUDs. See (W4) of -- Note [Occurrence analysis for join points] -- Now analyse the body, adding the join point -- into the environment with addJoinPoint !(WUD body_uds (occ, body)) = occAnalNonRecBody env bndr' $ \env -> thing_inside (addJoinPoint env bndr' rhs_uds) in if isDeadOcc occ -- Drop dead code; see Note [Dead code] then WUD body_uds body else WUD (rhs_uds `orUDs` body_uds) -- Note `orUDs` (combine [NonRec (fst (tagNonRecBinder lvl occ bndr')) rhs'] body) -- The normal case, including newly-discovered join points -- Analyse the body and /then/ the RHS | WUD body_uds (occ,body) <- occAnalNonRecBody env bndr thing_inside = if isDeadOcc occ -- Drop dead code; see Note [Dead code] then WUD body_uds body else let -- Get the join info from the *new* decision; NB: bndr is not already a JoinId -- See Note [Join points and unfoldings/rules] -- => join arity O of Note [Join arity prediction based on joinRhsArity] (tagged_bndr, mb_join) = tagNonRecBinder lvl occ bndr !(rhs_uds_s, final_bndr, rhs') = occAnalNonRecRhs env lvl ire mb_join tagged_bndr rhs in WUD (foldr andUDs body_uds rhs_uds_s) -- Note `andUDs` (combine [NonRec final_bndr rhs'] body) ----------------- occAnalNonRecBody :: OccEnv -> Id -> (OccEnv -> WithUsageDetails r) -- Scope of the bind -> (WithUsageDetails (OccInfo, r)) occAnalNonRecBody env bndr thing_inside = addInScopeOne env bndr $ \env -> let !(WUD inner_uds res) = thing_inside env !occ = lookupLetOccInfo inner_uds bndr in WUD inner_uds (occ, res) ----------------- occAnalNonRecRhs :: OccEnv -> TopLevelFlag -> ImpRuleEdges -> JoinPointHood -> Id -> CoreExpr -> ([UsageDetails], Id, CoreExpr) occAnalNonRecRhs !env lvl imp_rule_edges mb_join bndr rhs | null rules, null imp_rule_infos = -- Fast path for common case of no rules. This is only worth -- 0.1% perf on average, but it's also only a line or two of code ( [adj_rhs_uds, adj_unf_uds], final_bndr_no_rules, final_rhs ) | otherwise = (adj_rhs_uds : adj_unf_uds : adj_rule_uds, final_bndr_with_rules, final_rhs ) where --------- Right hand side --------- -- For join points, set occ_encl to OccVanilla, via setTailCtxt. If we have -- join j = Just (f x) in ... -- we do not want to float the (f x) to -- let y = f x in join j = Just y in ... -- That's that OccRhs would do; but there's no point because -- j will never be scrutinised. rhs_env = mkRhsOccEnv env NonRecursive rhs_ctxt mb_join bndr rhs rhs_ctxt = mkNonRecRhsCtxt lvl bndr unf -- See Note [Join arity prediction based on joinRhsArity] -- Match join arity O from mb_join_arity with manifest join arity M as -- returned by of occAnalLamTail. It's totally OK for them to mismatch; -- hence adjust the UDs from the RHS WUD adj_rhs_uds final_rhs = adjustNonRecRhs mb_join $ occAnalLamTail rhs_env rhs final_bndr_with_rules | noBinderSwaps env = bndr -- See Note [Unfoldings and rules] | otherwise = bndr `setIdSpecialisation` mkRuleInfo rules' `setIdUnfolding` unf1 final_bndr_no_rules | noBinderSwaps env = bndr -- See Note [Unfoldings and rules] | otherwise = bndr `setIdUnfolding` unf1 --------- Unfolding --------- -- See Note [Join points and unfoldings/rules] unf = idUnfolding bndr WTUD unf_tuds unf1 = occAnalUnfolding rhs_env unf adj_unf_uds = adjustTailArity mb_join unf_tuds --------- Rules --------- -- See Note [Rules are extra RHSs] and Note [Rule dependency info] -- and Note [Join points and unfoldings/rules] rules = idCoreRules bndr rules_w_uds = map (occAnalRule rhs_env) rules rules' = map fstOf3 rules_w_uds imp_rule_infos = lookupImpRules imp_rule_edges bndr imp_rule_uds = [impRulesScopeUsage imp_rule_infos] -- imp_rule_uds: consider -- h = ... -- g = ... -- RULE map g = h -- Then we want to ensure that h is in scope everywhere -- that g is (since the RULE might turn g into h), so -- we make g mention h. adj_rule_uds :: [UsageDetails] adj_rule_uds = imp_rule_uds ++ [ l `andUDs` adjustTailArity mb_join r | (_,l,r) <- rules_w_uds ] mkNonRecRhsCtxt :: TopLevelFlag -> Id -> Unfolding -> OccEncl -- Precondition: Id is not a join point mkNonRecRhsCtxt lvl bndr unf | certainly_inline = OccVanilla -- See Note [Cascading inlines] | otherwise = OccRhs where certainly_inline -- See Note [Cascading inlines] = -- mkNonRecRhsCtxt is only used for non-join points, so occAnalBind -- has set the OccInfo for this binder before calling occAnalNonRecRhs case idOccInfo bndr of OneOcc { occ_in_lam = NotInsideLam, occ_n_br = 1 } -> active && not stable_unf && not top_bottoming _ -> False active = isAlwaysActive (idInlineActivation bndr) stable_unf = isStableUnfolding unf top_bottoming = isTopLevel lvl && isDeadEndId bndr ----------------- occAnalRecBind :: OccEnv -> TopLevelFlag -> ImpRuleEdges -> [(Var,CoreExpr)] -> UsageDetails -> WithUsageDetails [CoreBind] -- For a recursive group, we -- * occ-analyse all the RHSs -- * compute strongly-connected components -- * feed those components to occAnalRec -- See Note [Recursive bindings: the grand plan] occAnalRecBind !rhs_env lvl imp_rule_edges pairs body_usage = foldr (occAnalRec rhs_env lvl) (WUD body_usage []) sccs where sccs :: [SCC NodeDetails] sccs = stronglyConnCompFromEdgedVerticesUniq nodes nodes :: [LetrecNode] nodes = map (makeNode rhs_env imp_rule_edges bndr_set) pairs bndrs = map fst pairs bndr_set = mkVarSet bndrs ----------------------------- occAnalRec :: OccEnv -> TopLevelFlag -> SCC NodeDetails -> WithUsageDetails [CoreBind] -> WithUsageDetails [CoreBind] -- The NonRec case is just like a Let (NonRec ...) above occAnalRec !_ lvl (AcyclicSCC (ND { nd_bndr = bndr, nd_rhs = wtuds })) (WUD body_uds binds) | isDeadOcc occ -- Check for dead code: see Note [Dead code] = WUD body_uds binds | otherwise = let (bndr', mb_join) = tagNonRecBinder lvl occ bndr !(WUD rhs_uds' rhs') = adjustNonRecRhs mb_join wtuds in WUD (body_uds `andUDs` rhs_uds') (NonRec bndr' rhs' : binds) where occ = lookupLetOccInfo body_uds bndr -- The Rec case is the interesting one -- See Note [Recursive bindings: the grand plan] -- See Note [Loop breaking] occAnalRec env lvl (CyclicSCC details_s) (WUD body_uds binds) | not (any needed details_s) = -- Check for dead code: see Note [Dead code] -- NB: Only look at body_uds, ignoring uses in the SCC WUD body_uds binds | otherwise = WUD final_uds (Rec pairs : binds) where all_simple = all nd_simple details_s needed :: NodeDetails -> Bool needed (ND { nd_bndr = bndr }) = isExportedId bndr || bndr `elemVarEnv` body_env body_env = ud_env body_uds ------------------------------ -- Make the nodes for the loop-breaker analysis -- See Note [Choosing loop breakers] for loop_breaker_nodes final_uds :: UsageDetails loop_breaker_nodes :: [LoopBreakerNode] WUD final_uds loop_breaker_nodes = mkLoopBreakerNodes env lvl body_uds details_s ------------------------------ weak_fvs :: VarSet weak_fvs = mapUnionVarSet nd_weak_fvs details_s --------------------------- -- Now reconstruct the cycle pairs :: [(Id,CoreExpr)] pairs | all_simple = reOrderNodes 0 weak_fvs loop_breaker_nodes [] | otherwise = loopBreakNodes 0 weak_fvs loop_breaker_nodes [] -- In the common case when all are "simple" (no rules at all) -- the loop_breaker_nodes will include all the scope edges -- so a SCC computation would yield a single CyclicSCC result; -- and reOrderNodes deals with exactly that case. -- Saves a SCC analysis in a common case {- ********************************************************************* * * Loop breaking * * ********************************************************************* -} {- Note [Choosing loop breakers] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In Step 4 in Note [Recursive bindings: the grand plan]), occAnalRec does loop-breaking on each CyclicSCC of the original program: * mkLoopBreakerNodes: Form the loop-breaker graph for that CyclicSCC * loopBreakNodes: Do SCC analysis on it * reOrderNodes: For each CyclicSCC, pick a loop breaker * Delete edges to that loop breaker * Do another SCC analysis on that reduced SCC * Repeat To form the loop-breaker graph, we construct a new set of Nodes, the "loop-breaker nodes", with the same details but different edges, the "loop-breaker edges". The loop-breaker nodes have both more and fewer dependencies than the scope edges: More edges: If f calls g, and g has an active rule that mentions h then we add an edge from f -> h. See Note [Rules and loop breakers]. Fewer edges: we only include dependencies * only on /active/ rules, * on rule /RHSs/ (not LHSs) The scope edges, by contrast, must be much more inclusive. The nd_simple flag tracks the common case when a binding has no RULES at all, in which case the loop-breaker edges will be identical to the scope edges. Note that in Example [eftInt], *neither* eftInt *nor* eftIntFB is chosen as a loop breaker, because their RHSs don't mention each other. And indeed both can be inlined safely. Note [inl_fvs] ~~~~~~~~~~~~~~ Note that the loop-breaker graph includes edges for occurrences in /both/ the RHS /and/ the stable unfolding. Consider this, which actually occurred when compiling BooleanFormula.hs in GHC: Rec { lvl1 = go ; lvl2[StableUnf = go] = lvl1 ; go = ...go...lvl2... } From the point of view of infinite inlining, we need only these edges: lvl1 :-> go lvl2 :-> go -- The RHS lvl1 will never be used for inlining go :-> go, lvl2 But the danger is that, lacking any edge to lvl1, we'll put it at the end thus Rec { lvl2[ StableUnf = go] = lvl1 ; go[LoopBreaker] = ...go...lvl2... } ; lvl1[Occ=Once] = go } And now the Simplifer will try to use PreInlineUnconditionally on lvl1 (which occurs just once), but because it is last we won't actually substitute in lvl2. Sigh. To avoid this possibility, we include edges from lvl2 to /both/ its stable unfolding /and/ its RHS. Hence the defn of inl_fvs in makeNode. Maybe we could be more clever, but it's very much a corner case. Note [Weak loop breakers] ~~~~~~~~~~~~~~~~~~~~~~~~~ There is a last nasty wrinkle. Suppose we have Rec { f = f_rhs RULE f [] = g h = h_rhs g = h ...more... } Remember that we simplify the RULES before any RHS (see Note [Rules are visible in their own rec group] above). So we must *not* postInlineUnconditionally 'g', even though its RHS turns out to be trivial. (I'm assuming that 'g' is not chosen as a loop breaker.) Why not? Because then we drop the binding for 'g', which leaves it out of scope in the RULE! Here's a somewhat different example of the same thing Rec { q = r ; r = ...p... ; p = p_rhs RULE p [] = q } Here the RULE is "below" q, but we *still* can't postInlineUnconditionally q, because the RULE for p is active throughout. So the RHS of r might rewrite to r = ...q... So q must remain in scope in the output program! We "solve" this by: Make q a "weak" loop breaker (OccInfo = IAmLoopBreaker True) iff q is a mentioned in the RHS of any RULE (active on not) in the Rec group Note the "active or not" comment; even if a RULE is inactive, we want its RHS free vars to stay alive (#20820)! A normal "strong" loop breaker has IAmLoopBreaker False. So: Inline postInlineUnconditionally strong IAmLoopBreaker False no no weak IAmLoopBreaker True yes no other yes yes The **sole** reason for this kind of loop breaker is so that postInlineUnconditionally does not fire. Ugh. Annoyingly, since we simplify the rules *first* we'll never inline q into p's RULE. That trivial binding for q will hang around until we discard the rule. Yuk. But it's rare. Note [Rules and loop breakers] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When we form the loop-breaker graph (Step 4 in Note [Recursive bindings: the grand plan]), we must be careful about RULEs. For a start, we want a loop breaker to cut every cycle, so inactive rules play no part; we need only consider /active/ rules. See Note [Finding rule RHS free vars] The second point is more subtle. A RULE is like an equation for 'f' that is *always* inlined if it is applicable. We do *not* disable rules for loop-breakers. It's up to whoever makes the rules to make sure that the rules themselves always terminate. See Note [Rules for recursive functions] in GHC.Core.Opt.Simplify Hence, if f's RHS (or its stable unfolding if it has one) mentions g, and g has a RULE that mentions h, and h has a RULE that mentions f then we *must* choose f to be a loop breaker. Example: see Note [Specialisation rules]. So our plan is this: Take the free variables of f's RHS, and augment it with all the variables reachable by a transitive sequence RULES from those starting points. That is the whole reason for computing rule_fv_env in mkLoopBreakerNodes. Wrinkles: * We only consider /active/ rules. See Note [Finding rule RHS free vars] * We need only consider free vars that are also binders in this Rec group. See also Note [Finding rule RHS free vars] * We only consider variables free in the *RHS* of the rule, in contrast to the way we build the Rec group in the first place (Note [Rule dependency info]) * Why "transitive sequence of rules"? Because active rules apply unconditionally, without checking loop-breaker-ness. See Note [Loop breaker dependencies]. Note [Finding rule RHS free vars] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider this real example from Data Parallel Haskell tagZero :: Array Int -> Array Tag {-# INLINE [1] tagZeroes #-} tagZero xs = pmap (\x -> fromBool (x==0)) xs {-# RULES "tagZero" [~1] forall xs n. pmap fromBool = tagZero xs #-} So tagZero's RHS mentions pmap, and pmap's RULE mentions tagZero. However, tagZero can only be inlined in phase 1 and later, while the RULE is only active *before* phase 1. So there's no problem. To make this work, we look for the RHS free vars only for *active* rules. That's the reason for the occ_rule_act field of the OccEnv. Note [loopBreakNodes] ~~~~~~~~~~~~~~~~~~~~~ loopBreakNodes is applied to the list of nodes for a cyclic strongly connected component (there's guaranteed to be a cycle). It returns the same nodes, but a) in a better order, b) with some of the Ids having a IAmALoopBreaker pragma The "loop-breaker" Ids are sufficient to break all cycles in the SCC. This means that the simplifier can guarantee not to loop provided it never records an inlining for these no-inline guys. Furthermore, the order of the binds is such that if we neglect dependencies on the no-inline Ids then the binds are topologically sorted. This means that the simplifier will generally do a good job if it works from top bottom, recording inlinings for any Ids which aren't marked as "no-inline" as it goes. -} type Binding = (Id,CoreExpr) -- See Note [loopBreakNodes] loopBreakNodes :: Int -> VarSet -- Binders whose dependencies may be "missing" -- See Note [Weak loop breakers] -> [LoopBreakerNode] -> [Binding] -- Append these to the end -> [Binding] -- Return the bindings sorted into a plausible order, and marked with loop breakers. -- See Note [loopBreakNodes] loopBreakNodes depth weak_fvs nodes binds = -- pprTrace "loopBreakNodes" (ppr nodes) $ go (stronglyConnCompFromEdgedVerticesUniqR nodes) where go [] = binds go (scc:sccs) = loop_break_scc scc (go sccs) loop_break_scc scc binds = case scc of AcyclicSCC node -> nodeBinding (mk_non_loop_breaker weak_fvs) node : binds CyclicSCC nodes -> reOrderNodes depth weak_fvs nodes binds ---------------------------------- reOrderNodes :: Int -> VarSet -> [LoopBreakerNode] -> [Binding] -> [Binding] -- Choose a loop breaker, mark it no-inline, -- and call loopBreakNodes on the rest reOrderNodes _ _ [] _ = panic "reOrderNodes" reOrderNodes _ _ [node] binds = nodeBinding mk_loop_breaker node : binds reOrderNodes depth weak_fvs (node : nodes) binds = -- pprTrace "reOrderNodes" (vcat [ text "unchosen" <+> ppr unchosen -- , text "chosen" <+> ppr chosen_nodes ]) $ loopBreakNodes new_depth weak_fvs unchosen $ (map (nodeBinding mk_loop_breaker) chosen_nodes ++ binds) where (chosen_nodes, unchosen) = chooseLoopBreaker approximate_lb (snd_score (node_payload node)) [node] [] nodes approximate_lb = depth >= 2 new_depth | approximate_lb = 0 | otherwise = depth+1 -- After two iterations (d=0, d=1) give up -- and approximate, returning to d=0 nodeBinding :: (Id -> Id) -> LoopBreakerNode -> Binding nodeBinding set_id_occ (node_payload -> SND { snd_bndr = bndr, snd_rhs = rhs}) = (set_id_occ bndr, rhs) mk_loop_breaker :: Id -> Id mk_loop_breaker bndr = bndr `setIdOccInfo` occ' where occ' = strongLoopBreaker { occ_tail = tail_info } tail_info = tailCallInfo (idOccInfo bndr) mk_non_loop_breaker :: VarSet -> Id -> Id -- See Note [Weak loop breakers] mk_non_loop_breaker weak_fvs bndr | bndr `elemVarSet` weak_fvs = setIdOccInfo bndr occ' | otherwise = bndr where occ' = weakLoopBreaker { occ_tail = tail_info } tail_info = tailCallInfo (idOccInfo bndr) ---------------------------------- chooseLoopBreaker :: Bool -- True <=> Too many iterations, -- so approximate -> NodeScore -- Best score so far -> [LoopBreakerNode] -- Nodes with this score -> [LoopBreakerNode] -- Nodes with higher scores -> [LoopBreakerNode] -- Unprocessed nodes -> ([LoopBreakerNode], [LoopBreakerNode]) -- This loop looks for the bind with the lowest score -- to pick as the loop breaker. The rest accumulate in chooseLoopBreaker _ _ loop_nodes acc [] = (loop_nodes, acc) -- Done -- If approximate_loop_breaker is True, we pick *all* -- nodes with lowest score, else just one -- See Note [Complexity of loop breaking] chooseLoopBreaker approx_lb loop_sc loop_nodes acc (node : nodes) | approx_lb , rank sc == rank loop_sc = chooseLoopBreaker approx_lb loop_sc (node : loop_nodes) acc nodes | sc `betterLB` loop_sc -- Better score so pick this new one = chooseLoopBreaker approx_lb sc [node] (loop_nodes ++ acc) nodes | otherwise -- Worse score so don't pick it = chooseLoopBreaker approx_lb loop_sc loop_nodes (node : acc) nodes where sc = snd_score (node_payload node) {- Note [Complexity of loop breaking] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The loop-breaking algorithm knocks out one binder at a time, and performs a new SCC analysis on the remaining binders. That can behave very badly in tightly-coupled groups of bindings; in the worst case it can be (N**2)*log N, because it does a full SCC on N, then N-1, then N-2 and so on. To avoid this, we switch plans after 2 (or whatever) attempts: Plan A: pick one binder with the lowest score, make it a loop breaker, and try again Plan B: pick *all* binders with the lowest score, make them all loop breakers, and try again Since there are only a small finite number of scores, this will terminate in a constant number of iterations, rather than O(N) iterations. You might thing that it's very unlikely, but RULES make it much more likely. Here's a real example from #1969: Rec { $dm = \d.\x. op d {-# RULES forall d. $dm Int d = $s$dm1 forall d. $dm Bool d = $s$dm2 #-} dInt = MkD .... opInt ... dInt = MkD .... opBool ... opInt = $dm dInt opBool = $dm dBool $s$dm1 = \x. op dInt $s$dm2 = \x. op dBool } The RULES stuff means that we can't choose $dm as a loop breaker (Note [Choosing loop breakers]), so we must choose at least (say) opInt *and* opBool, and so on. The number of loop breakers is linear in the number of instance declarations. Note [Loop breakers and INLINE/INLINABLE pragmas] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Avoid choosing a function with an INLINE pramga as the loop breaker! If such a function is mutually-recursive with a non-INLINE thing, then the latter should be the loop-breaker. It's vital to distinguish between INLINE and INLINABLE (the Bool returned by hasStableCoreUnfolding_maybe). If we start with Rec { {-# INLINABLE f #-} f x = ...f... } and then worker/wrapper it through strictness analysis, we'll get Rec { {-# INLINABLE $wf #-} $wf p q = let x = (p,q) in ...f... {-# INLINE f #-} f x = case x of (p,q) -> $wf p q } Now it is vital that we choose $wf as the loop breaker, so we can inline 'f' in '$wf'. Note [DFuns should not be loop breakers] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ It's particularly bad to make a DFun into a loop breaker. See Note [How instance declarations are translated] in GHC.Tc.TyCl.Instance We give DFuns a higher score than ordinary CONLIKE things because if there's a choice we want the DFun to be the non-loop breaker. Eg rec { sc = /\ a \$dC. $fBWrap (T a) ($fCT @ a $dC) $fCT :: forall a_afE. (Roman.C a_afE) => Roman.C (Roman.T a_afE) {-# DFUN #-} $fCT = /\a \$dC. MkD (T a) ((sc @ a $dC) |> blah) ($ctoF @ a $dC) } Here 'sc' (the superclass) looks CONLIKE, but we'll never get to it if we can't unravel the DFun first. Note [Constructor applications] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ It's really really important to inline dictionaries. Real example (the Enum Ordering instance from GHC.Base): rec f = \ x -> case d of (p,q,r) -> p x g = \ x -> case d of (p,q,r) -> q x d = (v, f, g) Here, f and g occur just once; but we can't inline them into d. On the other hand we *could* simplify those case expressions if we didn't stupidly choose d as the loop breaker. But we won't because constructor args are marked "Many". Inlining dictionaries is really essential to unravelling the loops in static numeric dictionaries, see GHC.Float. Note [Closure conversion] ~~~~~~~~~~~~~~~~~~~~~~~~~ We treat (\x. C p q) as a high-score candidate in the letrec scoring algorithm. The immediate motivation came from the result of a closure-conversion transformation which generated code like this: data Clo a b = forall c. Clo (c -> a -> b) c ($:) :: Clo a b -> a -> b Clo f env $: x = f env x rec { plus = Clo plus1 () ; plus1 _ n = Clo plus2 n ; plus2 Zero n = n ; plus2 (Succ m) n = Succ (plus $: m $: n) } If we inline 'plus' and 'plus1', everything unravels nicely. But if we choose 'plus1' as the loop breaker (which is entirely possible otherwise), the loop does not unravel nicely. @occAnalUnfolding@ deals with the question of bindings where the Id is marked by an INLINE pragma. For these we record that anything which occurs in its RHS occurs many times. This pessimistically assumes that this inlined binder also occurs many times in its scope, but if it doesn't we'll catch it next time round. At worst this costs an extra simplifier pass. ToDo: try using the occurrence info for the inline'd binder. [March 97] We do the same for atomic RHSs. Reason: see notes with loopBreakSCC. [June 98, SLPJ] I've undone this change; I don't understand it. See notes with loopBreakSCC. ************************************************************************ * * Making nodes * * ************************************************************************ -} -- | Digraph node as constructed by 'makeNode' and consumed by 'occAnalRec'. -- The Unique key is gotten from the Id. type LetrecNode = Node Unique NodeDetails -- | Node details as consumed by 'occAnalRec'. data NodeDetails = ND { nd_bndr :: Id -- Binder , nd_rhs :: !(WithTailUsageDetails CoreExpr) -- ^ RHS, already occ-analysed -- With TailUsageDetails from RHS, and RULES, and stable unfoldings, -- ignoring phase (ie assuming all are active). -- NB: Unadjusted TailUsageDetails, as if this Node becomes a -- non-recursive join point! -- See Note [TailUsageDetails when forming Rec groups] , nd_inl :: IdSet -- Free variables of the stable unfolding and the RHS -- but excluding any RULES -- This is the IdSet that may be used if the Id is inlined , nd_simple :: Bool -- True iff this binding has no local RULES -- If all nodes are simple we don't need a loop-breaker -- dep-anal before reconstructing. , nd_weak_fvs :: IdSet -- Variables bound in this Rec group that are free -- in the RHS of any rule (active or not) for this bndr -- See Note [Weak loop breakers] , nd_active_rule_fvs :: IdSet -- Variables bound in this Rec group that are free -- in the RHS of an active rule for this bndr -- See Note [Rules and loop breakers] } instance Outputable NodeDetails where ppr nd = text "ND" <> braces (sep [ text "bndr =" <+> ppr (nd_bndr nd) , text "uds =" <+> ppr uds , text "inl =" <+> ppr (nd_inl nd) , text "simple =" <+> ppr (nd_simple nd) , text "active_rule_fvs =" <+> ppr (nd_active_rule_fvs nd) ]) where WTUD uds _ = nd_rhs nd -- | Digraph with simplified and completely occurrence analysed -- 'SimpleNodeDetails', retaining just the info we need for breaking loops. type LoopBreakerNode = Node Unique SimpleNodeDetails -- | Condensed variant of 'NodeDetails' needed during loop breaking. data SimpleNodeDetails = SND { snd_bndr :: IdWithOccInfo -- OccInfo accurate , snd_rhs :: CoreExpr -- properly occur-analysed , snd_score :: NodeScore } instance Outputable SimpleNodeDetails where ppr nd = text "SND" <> braces (sep [ text "bndr =" <+> ppr (snd_bndr nd) , text "score =" <+> ppr (snd_score nd) ]) -- The NodeScore is compared lexicographically; -- e.g. lower rank wins regardless of size type NodeScore = ( Int -- Rank: lower => more likely to be picked as loop breaker , Int -- Size of rhs: higher => more likely to be picked as LB -- Maxes out at maxExprSize; we just use it to prioritise -- small functions , Bool ) -- Was it a loop breaker before? -- True => more likely to be picked -- Note [Loop breakers, node scoring, and stability] rank :: NodeScore -> Int rank (r, _, _) = r makeNode :: OccEnv -> ImpRuleEdges -> VarSet -> (Var, CoreExpr) -> LetrecNode -- See Note [Recursive bindings: the grand plan] makeNode !env imp_rule_edges bndr_set (bndr, rhs) = -- pprTrace "makeNode" (ppr bndr <+> ppr (sizeVarSet bndr_set)) $ DigraphNode { node_payload = details , node_key = varUnique bndr , node_dependencies = nonDetKeysUniqSet scope_fvs } -- It's OK to use nonDetKeysUniqSet here as stronglyConnCompFromEdgedVerticesR -- is still deterministic with edges in nondeterministic order as -- explained in Note [Deterministic SCC] in GHC.Data.Graph.Directed. where details = ND { nd_bndr = bndr' , nd_rhs = WTUD (TUD rhs_ja unadj_scope_uds) rhs' , nd_inl = inl_fvs , nd_simple = null rules_w_uds && null imp_rule_info , nd_weak_fvs = weak_fvs , nd_active_rule_fvs = active_rule_fvs } bndr' | noBinderSwaps env = bndr -- See Note [Unfoldings and rules] | otherwise = bndr `setIdUnfolding` unf' `setIdSpecialisation` mkRuleInfo rules' -- NB: Both adj_unf_uds and adj_rule_uds have been adjusted to match the -- JoinArity rhs_ja of unadj_rhs_uds. unadj_inl_uds = unadj_rhs_uds `andUDs` adj_unf_uds unadj_scope_uds = unadj_inl_uds `andUDs` adj_rule_uds -- Note [Rules are extra RHSs] -- Note [Rule dependency info] scope_fvs = udFreeVars bndr_set unadj_scope_uds -- scope_fvs: all occurrences from this binder: RHS, unfolding, -- and RULES, both LHS and RHS thereof, active or inactive inl_fvs = udFreeVars bndr_set unadj_inl_uds -- inl_fvs: vars that would become free if the function was inlined. -- We conservatively approximate that by the free vars from the RHS -- and the unfolding together. -- See Note [inl_fvs] --------- Right hand side --------- -- Constructing the edges for the main Rec computation -- See Note [Forming Rec groups] -- and Note [TailUsageDetails when forming Rec groups] -- Compared to occAnalNonRecBind, we can't yet adjust the RHS because -- (a) we don't yet know the final joinpointhood. It might not become a -- join point after all! -- (b) we don't even know whether it stays a recursive RHS after the SCC -- analysis we are about to seed! So we can't markAllInsideLam in -- advance, because if it ends up as a non-recursive join point we'll -- consider it as one-shot and don't need to markAllInsideLam. -- Instead, do the occAnalLamTail call here and postpone adjustTailUsage -- until occAnalRec. In effect, we pretend that the RHS becomes a -- non-recursive join point and fix up later with adjustTailUsage. rhs_env = mkRhsOccEnv env Recursive OccRhs (idJoinPointHood bndr) bndr rhs -- If bndr isn't an /existing/ join point (so idJoinPointHood = NotJoinPoint), -- it's safe to zap the occ_join_points, because they can't occur in RHS. WTUD (TUD rhs_ja unadj_rhs_uds) rhs' = occAnalLamTail rhs_env rhs -- The corresponding call to adjustTailUsage is in occAnalRec and tagRecBinders --------- Unfolding --------- -- See Note [Join points and unfoldings/rules] unf = realIdUnfolding bndr -- realIdUnfolding: Ignore loop-breaker-ness -- here because that is what we are setting! WTUD unf_tuds unf' = occAnalUnfolding rhs_env unf adj_unf_uds = adjustTailArity (JoinPoint rhs_ja) unf_tuds -- `rhs_ja` is `joinRhsArity rhs` and is the prediction for source M -- of Note [Join arity prediction based on joinRhsArity] --------- IMP-RULES -------- is_active = occ_rule_act env :: Activation -> Bool imp_rule_info = lookupImpRules imp_rule_edges bndr imp_rule_uds = impRulesScopeUsage imp_rule_info imp_rule_fvs = impRulesActiveFvs is_active bndr_set imp_rule_info --------- All rules -------- -- See Note [Join points and unfoldings/rules] -- `rhs_ja` is `joinRhsArity rhs'` and is the prediction for source M -- of Note [Join arity prediction based on joinRhsArity] rules_w_uds :: [(CoreRule, UsageDetails, UsageDetails)] rules_w_uds = [ (r,l,adjustTailArity (JoinPoint rhs_ja) rhs_wuds) | rule <- idCoreRules bndr , let (r,l,rhs_wuds) = occAnalRule rhs_env rule ] rules' = map fstOf3 rules_w_uds adj_rule_uds = foldr add_rule_uds imp_rule_uds rules_w_uds add_rule_uds (_, l, r) uds = l `andUDs` r `andUDs` uds -------- active_rule_fvs ------------ active_rule_fvs = foldr add_active_rule imp_rule_fvs rules_w_uds add_active_rule (rule, _, rhs_uds) fvs | is_active (ruleActivation rule) = udFreeVars bndr_set rhs_uds `unionVarSet` fvs | otherwise = fvs -------- weak_fvs ------------ -- See Note [Weak loop breakers] weak_fvs = foldr add_rule emptyVarSet rules_w_uds add_rule (_, _, rhs_uds) fvs = udFreeVars bndr_set rhs_uds `unionVarSet` fvs mkLoopBreakerNodes :: OccEnv -> TopLevelFlag -> UsageDetails -- for BODY of let -> [NodeDetails] -> WithUsageDetails [LoopBreakerNode] -- with OccInfo up-to-date -- See Note [Choosing loop breakers] -- This function primarily creates the Nodes for the -- loop-breaker SCC analysis. More specifically: -- a) tag each binder with its occurrence info -- b) add a NodeScore to each node -- c) make a Node with the right dependency edges for -- the loop-breaker SCC analysis -- d) adjust each RHS's usage details according to -- the binder's (new) shotness and join-point-hood mkLoopBreakerNodes !env lvl body_uds details_s = WUD final_uds (zipWithEqual "mkLoopBreakerNodes" mk_lb_node details_s bndrs') where WUD final_uds bndrs' = tagRecBinders lvl body_uds details_s mk_lb_node nd@(ND { nd_bndr = old_bndr, nd_inl = inl_fvs , nd_rhs = WTUD _ rhs }) new_bndr = DigraphNode { node_payload = simple_nd , node_key = varUnique old_bndr , node_dependencies = nonDetKeysUniqSet lb_deps } -- It's OK to use nonDetKeysUniqSet here as -- stronglyConnCompFromEdgedVerticesR is still deterministic with edges -- in nondeterministic order as explained in -- Note [Deterministic SCC] in GHC.Data.Graph.Directed. where simple_nd = SND { snd_bndr = new_bndr, snd_rhs = rhs, snd_score = score } score = nodeScore env new_bndr lb_deps nd lb_deps = extendFvs_ rule_fv_env inl_fvs -- See Note [Loop breaker dependencies] rule_fv_env :: IdEnv IdSet -- Maps a variable f to the variables from this group -- reachable by a sequence of RULES starting with f -- Domain is *subset* of bound vars (others have no rule fvs) -- See Note [Finding rule RHS free vars] -- Why transClosureFV? See Note [Loop breaker dependencies] rule_fv_env = transClosureFV $ mkVarEnv $ [ (b, rule_fvs) | ND { nd_bndr = b, nd_active_rule_fvs = rule_fvs } <- details_s , not (isEmptyVarSet rule_fvs) ] {- Note [Loop breaker dependencies] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The loop breaker dependencies of x in a recursive group { f1 = e1; ...; fn = en } are: - The "inline free variables" of f: the fi free in f's stable unfolding and RHS; see Note [inl_fvs] - Any fi reachable from those inline free variables by a sequence of RULE rewrites. Remember, rule rewriting is not affected by fi being a loop breaker, so we have to take the transitive closure in case f is the only possible loop breaker in the loop. Hence rule_fv_env. We need only account for /active/ rules. -} ------------------------------------------ nodeScore :: OccEnv -> Id -- Binder with new occ-info -> VarSet -- Loop-breaker dependencies -> NodeDetails -> NodeScore nodeScore !env new_bndr lb_deps (ND { nd_bndr = old_bndr, nd_rhs = WTUD _ bind_rhs }) | not (isId old_bndr) -- A type or coercion variable is never a loop breaker = (100, 0, False) | old_bndr `elemVarSet` lb_deps -- Self-recursive things are great loop breakers = (0, 0, True) -- See Note [Self-recursion and loop breakers] | not (occ_unf_act env old_bndr) -- A binder whose inlining is inactive (e.g. has = (0, 0, True) -- a NOINLINE pragma) makes a great loop breaker | exprIsTrivial rhs = mk_score 10 -- Practically certain to be inlined -- Used to have also: && not (isExportedId bndr) -- But I found this sometimes cost an extra iteration when we have -- rec { d = (a,b); a = ...df...; b = ...df...; df = d } -- where df is the exported dictionary. Then df makes a really -- bad choice for loop breaker | DFunUnfolding { df_args = args } <- old_unf -- Never choose a DFun as a loop breaker -- Note [DFuns should not be loop breakers] = (9, length args, is_lb) -- Data structures are more important than INLINE pragmas -- so that dictionary/method recursion unravels | CoreUnfolding { uf_guidance = UnfWhen {} } <- old_unf = mk_score 6 | is_con_app rhs -- Data types help with cases: = mk_score 5 -- Note [Constructor applications] | isStableUnfolding old_unf , can_unfold = mk_score 3 | isOneOcc (idOccInfo new_bndr) = mk_score 2 -- Likely to be inlined | can_unfold -- The Id has some kind of unfolding = mk_score 1 | otherwise = (0, 0, is_lb) where mk_score :: Int -> NodeScore mk_score rank = (rank, rhs_size, is_lb) -- is_lb: see Note [Loop breakers, node scoring, and stability] is_lb = isStrongLoopBreaker (idOccInfo old_bndr) old_unf = realIdUnfolding old_bndr can_unfold = canUnfold old_unf rhs = case old_unf of CoreUnfolding { uf_src = src, uf_tmpl = unf_rhs } | isStableSource src -> unf_rhs _ -> bind_rhs -- 'bind_rhs' is irrelevant for inlining things with a stable unfolding rhs_size = case old_unf of CoreUnfolding { uf_guidance = guidance } | UnfIfGoodArgs { ug_size = size } <- guidance -> size _ -> cheapExprSize rhs -- Checking for a constructor application -- Cheap and cheerful; the simplifier moves casts out of the way -- The lambda case is important to spot x = /\a. C (f a) -- which comes up when C is a dictionary constructor and -- f is a default method. -- Example: the instance for Show (ST s a) in GHC.ST -- -- However we *also* treat (\x. C p q) as a con-app-like thing, -- Note [Closure conversion] is_con_app (Var v) = isConLikeId v is_con_app (App f _) = is_con_app f is_con_app (Lam _ e) = is_con_app e is_con_app (Tick _ e) = is_con_app e is_con_app (Let _ e) = is_con_app e -- let x = let y = blah in (a,b) is_con_app _ = False -- We will float the y out, so treat -- the x-binding as a con-app (#20941) maxExprSize :: Int maxExprSize = 20 -- Rather arbitrary cheapExprSize :: CoreExpr -> Int -- Maxes out at maxExprSize cheapExprSize e = go 0 e where go n e | n >= maxExprSize = n | otherwise = go1 n e go1 n (Var {}) = n+1 go1 n (Lit {}) = n+1 go1 n (Type {}) = n go1 n (Coercion {}) = n go1 n (Tick _ e) = go1 n e go1 n (Cast e _) = go1 n e go1 n (App f a) = go (go1 n f) a go1 n (Lam b e) | isTyVar b = go1 n e | otherwise = go (n+1) e go1 n (Let b e) = gos (go1 n e) (rhssOfBind b) go1 n (Case e _ _ as) = gos (go1 n e) (rhssOfAlts as) gos n [] = n gos n (e:es) | n >= maxExprSize = n | otherwise = gos (go1 n e) es betterLB :: NodeScore -> NodeScore -> Bool -- If n1 `betterLB` n2 then choose n1 as the loop breaker betterLB (rank1, size1, lb1) (rank2, size2, _) | rank1 < rank2 = True | rank1 > rank2 = False | size1 < size2 = False -- Make the bigger n2 into the loop breaker | size1 > size2 = True | lb1 = True -- Tie-break: if n1 was a loop breaker before, choose it | otherwise = False -- See Note [Loop breakers, node scoring, and stability] {- Note [Self-recursion and loop breakers] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If we have rec { f = ...f...g... ; g = .....f... } then 'f' has to be a loop breaker anyway, so we may as well choose it right away, so that g can inline freely. This is really just a cheap hack. Consider rec { f = ...g... ; g = ..f..h... ; h = ...f....} Here f or g are better loop breakers than h; but we might accidentally choose h. Finding the minimal set of loop breakers is hard. Note [Loop breakers, node scoring, and stability] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To choose a loop breaker, we give a NodeScore to each node in the SCC, and pick the one with the best score (according to 'betterLB'). We need to be jolly careful (#12425, #12234) about the stability of this choice. Suppose we have let rec { f = ...g...g... ; g = ...f...f... } in case x of True -> ...f.. False -> ..f... In each iteration of the simplifier the occurrence analyser OccAnal chooses a loop breaker. Suppose in iteration 1 it choose g as the loop breaker. That means it is free to inline f. Suppose that GHC decides to inline f in the branches of the case, but (for some reason; eg it is not saturated) in the rhs of g. So we get let rec { f = ...g...g... ; g = ...f...f... } in case x of True -> ...g...g..... False -> ..g..g.... Now suppose that, for some reason, in the next iteration the occurrence analyser chooses f as the loop breaker, so it can freely inline g. And again for some reason the simplifier inlines g at its calls in the case branches, but not in the RHS of f. Then we get let rec { f = ...g...g... ; g = ...f...f... } in case x of True -> ...(...f...f...)...(...f..f..)..... False -> ..(...f...f...)...(..f..f...).... You can see where this is going! Each iteration of the simplifier doubles the number of calls to f or g. No wonder GHC is slow! (In the particular example in comment:3 of #12425, f and g are the two mutually recursive fmap instances for CondT and Result. They are both marked INLINE which, oddly, is why they don't inline in each other's RHS, because the call there is not saturated.) The root cause is that we flip-flop on our choice of loop breaker. I always thought it didn't matter, and indeed for any single iteration to terminate, it doesn't matter. But when we iterate, it matters a lot!! So The Plan is this: If there is a tie, choose the node that was a loop breaker last time round Hence the is_lb field of NodeScore -} {- ********************************************************************* * * Lambda groups * * ********************************************************************* -} {- Note [Occurrence analysis for lambda binders] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For value lambdas we do a special hack. Consider (\x. \y. ...x...) If we did nothing, x is used inside the \y, so would be marked as dangerous to dup. But in the common case where the abstraction is applied to two arguments this is over-pessimistic, which delays inlining x, which forces more simplifier iterations. So the occurrence analyser collaborates with the simplifier to treat a /lambda-group/ specially. A lambda-group is a contiguous run of lambda and casts, e.g. Lam x (Lam y (Cast (Lam z body) co)) * Occurrence analyser: we just mark each binder in the lambda-group (here: x,y,z) with its occurrence info in the *body* of the lambda-group. See occAnalLamTail. * Simplifier. The simplifier is careful when partially applying lambda-groups. See the call to zapLambdaBndrs in GHC.Core.Opt.Simplify.simplExprF1 GHC.Core.SimpleOpt.simple_app * Why do we take care to account for intervening casts? Answer: currently we don't do eta-expansion and cast-swizzling in a stable unfolding (see Historical-note [Eta-expansion in stable unfoldings]). So we can get f = \x. ((\y. ...x...y...) |> co) Now, since the lambdas aren't together, the occurrence analyser will say that x is OnceInLam. Now if we have a call (f e1 |> co) e2 we'll end up with let x = e1 in ...x..e2... and it'll take an extra iteration of the Simplifier to substitute for x. A thought: a lambda-group is pretty much what GHC.Core.Opt.Arity.manifestArity recognises except that the latter looks through (some) ticks. Maybe a lambda group should also look through (some) ticks? -} isOneShotFun :: CoreExpr -> Bool -- The top level lambdas, ignoring casts, of the expression -- are all one-shot. If there aren't any lambdas at all, this is True isOneShotFun (Lam b e) = isOneShotBndr b && isOneShotFun e isOneShotFun (Cast e _) = isOneShotFun e isOneShotFun _ = True zapLambdaBndrs :: CoreExpr -> FullArgCount -> CoreExpr -- If (\xyz. t) appears under-applied to only two arguments, -- we must zap the occ-info on x,y, because they appear under the \z -- See Note [Occurrence analysis for lambda binders] in GHc.Core.Opt.OccurAnal -- -- NB: `arg_count` includes both type and value args zapLambdaBndrs fun arg_count = -- If the lambda is fully applied, leave it alone; if not -- zap the OccInfo on the lambdas that do have arguments, -- so they beta-reduce to use-many Lets rather than used-once ones. zap arg_count fun `orElse` fun where zap :: FullArgCount -> CoreExpr -> Maybe CoreExpr -- Nothing => No need to change the occ-info -- Just e => Had to change zap 0 e | isOneShotFun e = Nothing -- All remaining lambdas are one-shot | otherwise = Just e -- in which case no need to zap zap n (Cast e co) = do { e' <- zap n e; return (Cast e' co) } zap n (Lam b e) = do { e' <- zap (n-1) e ; return (Lam (zap_bndr b) e') } zap _ _ = Nothing -- More arguments than lambdas zap_bndr b | isTyVar b = b | otherwise = zapLamIdInfo b occAnalLamTail :: OccEnv -> CoreExpr -> WithTailUsageDetails CoreExpr -- ^ See Note [Occurrence analysis for lambda binders]. -- It does the following: -- * Sets one-shot info on the lambda binder from the OccEnv, and -- removes that one-shot info from the OccEnv -- * Sets the OccEnv to OccVanilla when going under a value lambda -- * Tags each lambda with its occurrence information -- * Walks through casts -- * Package up the analysed lambda with its manifest join arity -- -- This function does /not/ do -- markAllInsideLam or -- markAllNonTail -- The caller does that, via adjustTailUsage (mostly calls go through -- adjustNonRecRhs). Every call to occAnalLamTail must ultimately call -- adjustTailUsage to discharge the assumed join arity. -- -- In effect, the analysis result is for a non-recursive join point with -- manifest arity and adjustTailUsage does the fixup. -- See Note [Adjusting right-hand sides] occAnalLamTail env expr = let !(WUD usage expr') = occ_anal_lam_tail env expr in WTUD (TUD (joinRhsArity expr) usage) expr' occ_anal_lam_tail :: OccEnv -> CoreExpr -> WithUsageDetails CoreExpr -- Does not markInsideLam etc for the outmost batch of lambdas occ_anal_lam_tail env expr@(Lam {}) = go env [] expr where go :: OccEnv -> [Var] -> CoreExpr -> WithUsageDetails CoreExpr go env rev_bndrs (Lam bndr body) | isTyVar bndr = go env (bndr:rev_bndrs) body -- Important: Unlike a value binder, do not modify occ_encl -- to OccVanilla, so that with a RHS like -- \(@ x) -> K @x (f @x) -- we'll see that (K @x (f @x)) is in a OccRhs, and hence refrain -- from inlining f. See the beginning of Note [Cascading inlines]. | otherwise = let (env_one_shots', bndr') = case occ_one_shots env of [] -> ([], bndr) (os : oss) -> (oss, updOneShotInfo bndr os) -- Use updOneShotInfo, not setOneShotInfo, as pre-existing -- one-shot info might be better than what we can infer, e.g. -- due to explicit use of the magic 'oneShot' function. -- See Note [oneShot magic] env' = env { occ_encl = OccVanilla, occ_one_shots = env_one_shots' } in go env' (bndr':rev_bndrs) body go env rev_bndrs body = addInScope env rev_bndrs $ \env -> let !(WUD usage body') = occ_anal_lam_tail env body wrap_lam body bndr = Lam (tagLamBinder usage bndr) body in WUD (usage `addLamCoVarOccs` rev_bndrs) (foldl' wrap_lam body' rev_bndrs) -- For casts, keep going in the same lambda-group -- See Note [Occurrence analysis for lambda binders] occ_anal_lam_tail env (Cast expr co) = let WUD usage expr' = occ_anal_lam_tail env expr -- usage1: see Note [Gather occurrences of coercion variables] usage1 = addManyOccs usage (coVarsOfCo co) -- usage2: see Note [Occ-anal and cast worker/wrapper] usage2 = case expr of Var {} | isRhsEnv env -> markAllMany usage1 _ -> usage1 -- usage3: you might think this was not necessary, because of -- the markAllNonTail in adjustTailUsage; but not so! For a -- join point, adjustTailUsage doesn't do this; yet if there is -- a cast, we must! Also: why markAllNonTail? See -- GHC.Core.Lint: Note Note [Join points and casts] usage3 = markAllNonTail usage2 in WUD usage3 (Cast expr' co) occ_anal_lam_tail env expr -- Not Lam, not Cast = occAnal env expr {- Note [Occ-anal and cast worker/wrapper] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider y = e; x = y |> co If we mark y as used-once, we'll inline y into x, and the Cast worker/wrapper transform will float it straight back out again. See Note [Cast worker/wrapper] in GHC.Core.Opt.Simplify. So in this particular case we want to mark 'y' as Many. It's very ad-hoc, but it's also simple. It's also what would happen if we gave the binding for x a stable unfolding (as we usually do for wrappers, thus y = e {-# INLINE x #-} x = y |> co Now y appears twice -- once in x's stable unfolding, and once in x's RHS. So it'll get a Many occ-info. (Maybe Cast w/w should create a stable unfolding, which would obviate this Note; but that seems a bit of a heavyweight solution.) We only need to this in occAnalLamTail, not occAnal, because the top leve of a right hand side is handled by occAnalLamTail. -} {- ********************************************************************* * * Right hand sides * * ********************************************************************* -} occAnalUnfolding :: OccEnv -> Unfolding -> WithTailUsageDetails Unfolding -- Occurrence-analyse a stable unfolding; -- discard a non-stable one altogether and return empty usage details. occAnalUnfolding !env unf = case unf of unf@(CoreUnfolding { uf_tmpl = rhs, uf_src = src }) | isStableSource src -> let WTUD (TUD rhs_ja uds) rhs' = occAnalLamTail env rhs unf' = unf { uf_tmpl = rhs' } in WTUD (TUD rhs_ja (markAllMany uds)) unf' -- markAllMany: see Note [Occurrences in stable unfoldings] | otherwise -> WTUD (TUD 0 emptyDetails) unf -- For non-Stable unfoldings we leave them undisturbed, but -- don't count their usage because the simplifier will discard them. -- We leave them undisturbed because nodeScore uses their size info -- to guide its decisions. It's ok to leave un-substituted -- expressions in the tree because all the variables that were in -- scope remain in scope; there is no cloning etc. unf@(DFunUnfolding { df_bndrs = bndrs, df_args = args }) -> let WUD uds args' = addInScopeList env bndrs $ \ env -> occAnalList env args in WTUD (TUD 0 uds) (unf { df_args = args' }) -- No need to use tagLamBinders because we -- never inline DFuns so the occ-info on binders doesn't matter unf -> WTUD (TUD 0 emptyDetails) unf occAnalRule :: OccEnv -> CoreRule -> (CoreRule, -- Each (non-built-in) rule UsageDetails, -- Usage details for LHS TailUsageDetails) -- Usage details for RHS occAnalRule env rule@(Rule { ru_bndrs = bndrs, ru_args = args, ru_rhs = rhs }) = (rule', lhs_uds', TUD rhs_ja rhs_uds') where rule' = rule { ru_args = args', ru_rhs = rhs' } WUD lhs_uds args' = addInScopeList env bndrs $ \env -> occAnalList env args lhs_uds' = markAllManyNonTail lhs_uds WUD rhs_uds rhs' = addInScopeList env bndrs $ \env -> occAnal env rhs -- Note [Rules are extra RHSs] -- Note [Rule dependency info] rhs_uds' = markAllMany rhs_uds rhs_ja = length args -- See Note [Join points and unfoldings/rules] occAnalRule _ other_rule = (other_rule, emptyDetails, TUD 0 emptyDetails) {- Note [Occurrences in stable unfoldings] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider f p = BIG {-# INLINE g #-} g y = not (f y) where this is the /only/ occurrence of 'f'. So 'g' will get a stable unfolding. Now suppose that g's RHS gets optimised (perhaps by a rule or inlining f) so that it doesn't mention 'f' any more. Now the last remaining call to f is in g's Stable unfolding. But, even though there is only one syntactic occurrence of f, we do /not/ want to do preinlineUnconditionally here! The INLINE pragma says "inline exactly this RHS"; perhaps the programmer wants to expose that 'not', say. If we inline f that will make the Stable unfoldign big, and that wasn't what the programmer wanted. Another way to think about it: if we inlined g as-is into multiple call sites, now there's be multiple calls to f. Bottom line: treat all occurrences in a stable unfolding as "Many". We still leave tail call information intact, though, as to not spoil potential join points. Note [Unfoldings and rules] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Generally unfoldings and rules are already occurrence-analysed, so we don't want to reconstruct their trees; we just want to analyse them to find how they use their free variables. EXCEPT if there is a binder-swap going on, in which case we do want to produce a new tree. So we have a fast-path that keeps the old tree if the occ_bs_env is empty. This just saves a bit of allocation and reconstruction; not a big deal. Two tricky corners: * Dead bindings (#22761). Supose we have Unfolding = \x. let y = foo in x+1 which includes a dead binding for `y`. In occAnalUnfolding we occ-anal the unfolding and produce /no/ occurrences of `foo` (since `y` is dead). But if we discard the occ-analysed syntax tree (which we do on our fast path), and use the old one, we still /have/ an occurrence of `foo` -- and that can lead to out-of-scope variables (#22761). Solution: always keep occ-analysed trees in unfoldings and rules, so they have no dead code. See Note [OccInfo in unfoldings and rules] in GHC.Core. * One-shot binders. Consider {- f has Stable unfolding \p q -> blah Demand on f is LC(L,C(1,!P(L)); that is, one-shot in its second ar -} f = \x y. blah Now we `mkRhsOccEnv` will build an OccEnv for f's RHS that has occ_one_shots = [NoOneShortInfo, OneShotLam] This will put OneShotLam on the \y. And it'll put it on the \q. But the noBinderSwap check will mean that we discard this new occ-anal'd unfolding and keep the old one, with no OneShotInfo. This looks a little inconsistent, but the Stable unfolding is just used for inlinings; OneShotInfo isn't a lot of use here. Note [Cascading inlines] ~~~~~~~~~~~~~~~~~~~~~~~~ By default we use an OccRhs for the RHS of a binding. This tells the occ anal n that it's looking at an RHS, which has an effect in occAnalApp. In particular, for constructor applications, it makes the arguments appear to have NoOccInfo, so that we don't inline into them. Thus x = f y k = Just x we do not want to inline x. But there's a problem. Consider x1 = a0 : [] x2 = a1 : x1 x3 = a2 : x2 g = f x3 First time round, it looks as if x1 and x2 occur as an arg of a let-bound constructor ==> give them a many-occurrence. But then x3 is inlined (unconditionally as it happens) and next time round, x2 will be, and the next time round x1 will be Result: multiple simplifier iterations. Sigh. So, when analysing the RHS of x3 we notice that x3 will itself definitely inline the next time round, and so we analyse x3's rhs in an OccVanilla context, not OccRhs. Hence the "certainly_inline" stuff. Annoyingly, we have to approximate GHC.Core.Opt.Simplify.Utils.preInlineUnconditionally. If (a) the RHS is expandable (see isExpandableApp in occAnalApp), and (b) certainly_inline says "yes" when preInlineUnconditionally says "no" then the simplifier iterates indefinitely: x = f y k = Just x -- We decide that k is 'certainly_inline' v = ...k... -- but preInlineUnconditionally doesn't inline it inline ==> k = Just (f y) v = ...k... float ==> x1 = f y k = Just x1 v = ...k... This is worse than the slow cascade, so we only want to say "certainly_inline" if it really is certain. Look at the note with preInlineUnconditionally for the various clauses. See #24582 for an example of the two getting out of sync. ************************************************************************ * * Expressions * * ************************************************************************ -} occAnalList :: OccEnv -> [CoreExpr] -> WithUsageDetails [CoreExpr] occAnalList !_ [] = WUD emptyDetails [] occAnalList env (e:es) = let (WUD uds1 e') = occAnal env e (WUD uds2 es') = occAnalList env es in WUD (uds1 `andUDs` uds2) (e' : es') occAnal :: OccEnv -> CoreExpr -> WithUsageDetails CoreExpr -- Gives info only about the "interesting" Ids occAnal !_ expr@(Lit _) = WUD emptyDetails expr occAnal env expr@(Var _) = occAnalApp env (expr, [], []) -- At one stage, I gathered the idRuleVars for the variable here too, -- which in a way is the right thing to do. -- But that went wrong right after specialisation, when -- the *occurrences* of the overloaded function didn't have any -- rules in them, so the *specialised* versions looked as if they -- weren't used at all. occAnal _ expr@(Type ty) = WUD (addManyOccs emptyDetails (coVarsOfType ty)) expr occAnal _ expr@(Coercion co) = WUD (addManyOccs emptyDetails (coVarsOfCo co)) expr -- See Note [Gather occurrences of coercion variables] {- Note [Gather occurrences of coercion variables] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We need to gather info about what coercion variables appear, for two reasons: 1. So that we can sort them into the right place when doing dependency analysis. 2. So that we know when they are surely dead. It is useful to know when they a coercion variable is surely dead, when we want to discard a case-expression, in GHC.Core.Opt.Simplify.rebuildCase. For example (#20143): case unsafeEqualityProof @blah of UnsafeRefl cv -> ...no use of cv... Here we can discard the case, since unsafeEqualityProof always terminates. But only if the coercion variable 'cv' is unused. Another example from #15696: we had something like case eq_sel d of co -> ...(typeError @(...co...) "urk")... Then 'd' was substituted by a dictionary, so the expression simpified to case (Coercion ) of cv -> ...(typeError @(...cv...) "urk")... We can only drop the case altogether if 'cv' is unused, which is not the case here. Conclusion: we need accurate dead-ness info for CoVars. We gather CoVar occurrences from: * The (Type ty) and (Coercion co) cases of occAnal * The type 'ty' of a lambda-binder (\(x:ty). blah) See addCoVarOccs But it is not necessary to gather CoVars from the types of other binders. * For let-binders, if the type mentions a CoVar, so will the RHS (since it has the same type) * For case-alt binders, if the type mentions a CoVar, so will the scrutinee (since it has the same type) -} occAnal env (Tick tickish body) = WUD usage' (Tick tickish body') where WUD usage body' = occAnal env body usage' | tickish `tickishScopesLike` SoftScope = usage -- For soft-scoped ticks (including SourceNotes) we don't want -- to lose join-point-hood, so we don't mess with `usage` (#24078) -- For a non-soft tick scope, we can inline lambdas only, so we -- abandon tail calls, and do markAllInsideLam too: usage_lam | Breakpoint _ _ ids _ <- tickish = -- Never substitute for any of the Ids in a Breakpoint addManyOccs usage_lam (mkVarSet ids) | otherwise = usage_lam usage_lam = markAllNonTail (markAllInsideLam usage) -- TODO There may be ways to make ticks and join points play -- nicer together, but right now there are problems: -- let j x = ... in tick (j 1) -- Making j a join point may cause the simplifier to drop t -- (if the tick is put into the continuation). So we don't -- count j 1 as a tail call. -- See #14242. occAnal env (Cast expr co) = let (WUD usage expr') = occAnal env expr usage1 = addManyOccs usage (coVarsOfCo co) -- usage2: see Note [Gather occurrences of coercion variables] usage2 = markAllNonTail usage1 -- usage3: calls inside expr aren't tail calls any more in WUD usage2 (Cast expr' co) occAnal env app@(App _ _) = occAnalApp env (collectArgsTicks tickishFloatable app) occAnal env expr@(Lam {}) = adjustNonRecRhs NotJoinPoint $ -- NotJoinPoint <=> markAllManyNonTail occAnalLamTail env expr occAnal env (Case scrut bndr ty alts) = let WUD scrut_usage scrut' = occAnal (setScrutCtxt env alts) scrut WUD alts_usage (tagged_bndr, alts') = addInScopeOne env bndr $ \env -> let alt_env = addBndrSwap scrut' bndr $ setTailCtxt env -- Kill off OccRhs WUD alts_usage alts' = do_alts alt_env alts tagged_bndr = tagLamBinder alts_usage bndr in WUD alts_usage (tagged_bndr, alts') total_usage = markAllNonTail scrut_usage `andUDs` alts_usage -- Alts can have tail calls, but the scrutinee can't in WUD total_usage (Case scrut' tagged_bndr ty alts') where do_alts :: OccEnv -> [CoreAlt] -> WithUsageDetails [CoreAlt] do_alts _ [] = WUD emptyDetails [] do_alts env (alt:alts) = WUD (uds1 `orUDs` uds2) (alt':alts') where WUD uds1 alt' = do_alt env alt WUD uds2 alts' = do_alts env alts do_alt !env (Alt con bndrs rhs) = addInScopeList env bndrs $ \ env -> let WUD rhs_usage rhs' = occAnal env rhs tagged_bndrs = tagLamBinders rhs_usage bndrs in -- See Note [Binders in case alternatives] WUD rhs_usage (Alt con tagged_bndrs rhs') occAnal env (Let bind body) = occAnalBind env NotTopLevel noImpRuleEdges bind (\env -> occAnal env body) mkLets occAnalArgs :: OccEnv -> CoreExpr -> [CoreExpr] -> [OneShots] -- Very commonly empty, notably prior to dmd anal -> WithUsageDetails CoreExpr -- The `fun` argument is just an accumulating parameter, -- the base for building the application we return occAnalArgs !env fun args !one_shots = go emptyDetails fun args one_shots where env_args = setNonTailCtxt encl env -- Make bottoming functions interesting -- See Note [Bottoming function calls] encl | Var f <- fun, isDeadEndSig (idDmdSig f) = OccScrut | otherwise = OccVanilla go uds fun [] _ = WUD uds fun go uds fun (arg:args) one_shots = go (uds `andUDs` arg_uds) (fun `App` arg') args one_shots' where !(WUD arg_uds arg') = occAnal arg_env arg !(arg_env, one_shots') | isTypeArg arg = (env_args, one_shots) | otherwise = case one_shots of [] -> (env_args, []) -- Fast path; one_shots is often empty (os : one_shots') -> (setOneShots os env_args, one_shots') {- Applications are dealt with specially because we want the "build hack" to work. Note [Bottoming function calls] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider let x = (a,b) in case p of A -> ...(error x).. B -> ...(ertor x)... postInlineUnconditionally may duplicate x's binding, but sometimes it does so only if the use site IsInteresting. Pushing allocation into error branches is good, so we try to make bottoming calls look interesting, by setting occ_encl = OccScrut for such calls. The slightly-artificial test T21128 is a good example. It's probably not a huge deal. Note [Arguments of let-bound constructors] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider f x = let y = expensive x in let z = (True,y) in (case z of {(p,q)->q}, case z of {(p,q)->q}) We feel free to duplicate the WHNF (True,y), but that means that y may be duplicated thereby. If we aren't careful we duplicate the (expensive x) call! Constructors are rather like lambdas in this way. -} occAnalApp :: OccEnv -> (Expr CoreBndr, [Arg CoreBndr], [CoreTickish]) -> WithUsageDetails (Expr CoreBndr) -- Naked variables (not applied) end up here too occAnalApp !env (Var fun, args, ticks) -- Account for join arity of runRW# continuation -- See Note [Simplification of runRW#] -- -- NB: Do not be tempted to make the next (Var fun, args, tick) -- equation into an 'otherwise' clause for this equation -- The former has a bang-pattern to occ-anal the args, and -- we don't want to occ-anal them twice in the runRW# case! -- This caused #18296 | fun `hasKey` runRWKey , [t1, t2, arg] <- args , WUD usage arg' <- adjustNonRecRhs (JoinPoint 1) $ occAnalLamTail env arg = WUD usage (mkTicks ticks $ mkApps (Var fun) [t1, t2, arg']) occAnalApp env (Var fun_id, args, ticks) = WUD all_uds (mkTicks ticks app') where -- Lots of banged bindings: this is a very heavily bit of code, -- so it pays not to make lots of thunks here, all of which -- will ultimately be forced. !(fun', fun_id') = lookupBndrSwap env fun_id !(WUD args_uds app') = occAnalArgs env fun' args one_shots fun_uds = mkOneOcc env fun_id' int_cxt n_args -- NB: fun_uds is computed for fun_id', not fun_id -- See (BS1) in Note [The binder-swap substitution] all_uds = fun_uds `andUDs` final_args_uds !final_args_uds = markAllNonTail $ markAllInsideLamIf (isRhsEnv env && is_exp) $ -- isRhsEnv: see Note [OccEncl] args_uds -- We mark the free vars of the argument of a constructor or PAP -- as "inside-lambda", if it is the RHS of a let(rec). -- This means that nothing gets inlined into a constructor or PAP -- argument position, which is what we want. Typically those -- constructor arguments are just variables, or trivial expressions. -- We use inside-lam because it's like eta-expanding the PAP. -- -- This is the *whole point* of the isRhsEnv predicate -- See Note [Arguments of let-bound constructors] !n_val_args = valArgCount args !n_args = length args !int_cxt = case occ_encl env of OccScrut -> IsInteresting _other | n_val_args > 0 -> IsInteresting | otherwise -> NotInteresting !is_exp = isExpandableApp fun_id n_val_args -- See Note [CONLIKE pragma] in GHC.Types.Basic -- The definition of is_exp should match that in GHC.Core.Opt.Simplify.prepareRhs one_shots = argsOneShots (idDmdSig fun_id) guaranteed_val_args guaranteed_val_args = n_val_args + length (takeWhile isOneShotInfo (occ_one_shots env)) -- See Note [Sources of one-shot information], bullet point A'] occAnalApp env (fun, args, ticks) = WUD (markAllNonTail (fun_uds `andUDs` args_uds)) (mkTicks ticks app') where !(WUD args_uds app') = occAnalArgs env fun' args [] !(WUD fun_uds fun') = occAnal (addAppCtxt env args) fun -- The addAppCtxt is a bit cunning. One iteration of the simplifier -- often leaves behind beta redexes like -- (\x y -> e) a1 a2 -- Here we would like to mark x,y as one-shot, and treat the whole -- thing much like a let. We do this by pushing some OneShotLam items -- onto the context stack. addAppCtxt :: OccEnv -> [Arg CoreBndr] -> OccEnv addAppCtxt env@(OccEnv { occ_one_shots = ctxt }) args | n_val_args > 0 = env { occ_one_shots = replicate n_val_args OneShotLam ++ ctxt , occ_encl = OccVanilla } -- OccVanilla: the function part of the application -- is no longer on OccRhs or OccScrut | otherwise = env where n_val_args = valArgCount args {- Note [Sources of one-shot information] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The occurrence analyser obtains one-shot-lambda information from two sources: A: Saturated applications: eg f e1 .. en In general, given a call (f e1 .. en) we can propagate one-shot info from f's strictness signature into e1 .. en, but /only/ if n is enough to saturate the strictness signature. A strictness signature like f :: C(1,C(1,L))LS means that *if f is applied to three arguments* then it will guarantee to call its first argument at most once, and to call the result of that at most once. But if f has fewer than three arguments, all bets are off; e.g. map (f (\x y. expensive) e2) xs Here the \x y abstraction may be called many times (once for each element of xs) so we should not mark x and y as one-shot. But if it was map (f (\x y. expensive) 3 2) xs then the first argument of f will be called at most once. The one-shot info, derived from f's strictness signature, is computed by 'argsOneShots', called in occAnalApp. A': Non-obviously saturated applications: eg build (f (\x y -> expensive)) where f is as above. In this case, f is only manifestly applied to one argument, so it does not look saturated. So by the previous point, we should not use its strictness signature to learn about the one-shotness of \x y. But in this case we can: build is fully applied, so we may use its strictness signature; and from that we learn that build calls its argument with two arguments *at most once*. So there is really only one call to f, and it will have three arguments. In that sense, f is saturated, and we may proceed as described above. Hence the computation of 'guaranteed_val_args' in occAnalApp, using '(occ_one_shots env)'. See also #13227, comment:9 B: Let-bindings: eg let f = \c. let ... in \n -> blah in (build f, build f) Propagate one-shot info from the demand-info on 'f' to the lambdas in its RHS (which may not be syntactically at the top) This information must have come from a previous run of the demand analyser. Previously, the demand analyser would *also* set the one-shot information, but that code was buggy (see #11770), so doing it only in on place, namely here, is saner. Note [OneShots] ~~~~~~~~~~~~~~~ When analysing an expression, the occ_one_shots argument contains information about how the function is being used. The length of the list indicates how many arguments will eventually be passed to the analysed expression, and the OneShotInfo indicates whether this application is once or multiple times. Example: Context of f occ_one_shots when analysing f f 1 2 [OneShot, OneShot] map (f 1) [OneShot, NoOneShotInfo] build f [OneShot, OneShot] f 1 2 `seq` f 2 1 [NoOneShotInfo, OneShot] Note [Binders in case alternatives] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider case x of y { (a,b) -> f y } We treat 'a', 'b' as dead, because they don't physically occur in the case alternative. (Indeed, a variable is dead iff it doesn't occur in its scope in the output of OccAnal.) It really helps to know when binders are unused. See esp the call to isDeadBinder in Simplify.mkDupableAlt In this example, though, the Simplifier will bring 'a' and 'b' back to life, because it binds 'y' to (a,b) (imagine got inlined and scrutinised y). -} {- ************************************************************************ * * OccEnv * * ************************************************************************ -} data OccEnv = OccEnv { occ_encl :: !OccEncl -- Enclosing context information , occ_one_shots :: !OneShots -- See Note [OneShots] , occ_unf_act :: Id -> Bool -- Which Id unfoldings are active , occ_rule_act :: Activation -> Bool -- Which rules are active -- See Note [Finding rule RHS free vars] -- See Note [The binder-swap substitution] -- If x :-> (y, co) is in the env, -- then please replace x by (y |> mco) -- Invariant of course: idType x = exprType (y |> mco) , occ_bs_env :: !(IdEnv (OutId, MCoercion)) -- Domain is Global and Local Ids -- Range is just Local Ids , occ_bs_rng :: !VarSet -- Vars (TyVars and Ids) free in the range of occ_bs_env -- Usage details of the RHS of in-scope non-recursive join points -- Invariant: no Id maps to an empty OccInfoEnv -- See Note [Occurrence analysis for join points] , occ_join_points :: !JoinPointInfo } type JoinPointInfo = IdEnv OccInfoEnv ----------------------------- {- Note [OccEncl] ~~~~~~~~~~~~~~~~~ OccEncl is used to control whether to inline into constructor arguments. * OccRhs: consider let p = in let x = Just p in ...case p of ... Here `p` occurs syntactically once, but we want to mark it as InsideLam to stop `p` inlining. We want to leave the x-binding as a constructor applied to variables, so that the Simplifier can simplify that inner `case`. The OccRhs just tells occAnalApp to mark occurrences in constructor args * OccScrut: consider (case x of ...). Here we want to give `x` OneOcc with "interesting context" field int_cxt = True. The OccScrut tells occAnalApp (which deals with lone variables too) when to set this field to True. -} data OccEncl -- See Note [OccEncl] = OccRhs -- RHS of let(rec), albeit perhaps inside a type lambda | OccScrut -- Scrutintee of a case | OccVanilla -- Everything else instance Outputable OccEncl where ppr OccRhs = text "occRhs" ppr OccScrut = text "occScrut" ppr OccVanilla = text "occVanilla" -- See Note [OneShots] type OneShots = [OneShotInfo] initOccEnv :: OccEnv initOccEnv = OccEnv { occ_encl = OccVanilla , occ_one_shots = [] -- To be conservative, we say that all -- inlines and rules are active , occ_unf_act = \_ -> True , occ_rule_act = \_ -> True , occ_join_points = emptyVarEnv , occ_bs_env = emptyVarEnv , occ_bs_rng = emptyVarSet } noBinderSwaps :: OccEnv -> Bool noBinderSwaps (OccEnv { occ_bs_env = bs_env }) = isEmptyVarEnv bs_env setScrutCtxt :: OccEnv -> [CoreAlt] -> OccEnv setScrutCtxt !env alts = setNonTailCtxt encl env where encl | interesting_alts = OccScrut | otherwise = OccVanilla interesting_alts = case alts of [] -> False [alt] -> not (isDefaultAlt alt) _ -> True -- 'interesting_alts' is True if the case has at least one -- non-default alternative. That in turn influences -- pre/postInlineUnconditionally. Grep for "occ_int_cxt"! {- Note [The OccEnv for a right hand side] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ How do we create the OccEnv for a RHS (in mkRhsOccEnv)? For a non-join point binding, x = rhs * occ_encl: set to OccRhs; but see `mkNonRecRhsCtxt` for wrinkles * occ_join_points: zap them! * occ_one_shots: initialise from the idDemandInfo; see Note [Sources of one-shot information] For a join point binding, j x = rhs * occ_encl: Consider x = e join j = Just x We want to inline x into j right away, so we don't want to give the join point a OccRhs (#14137); we want OccVanilla. It's not a huge deal, because the FloatIn pass knows to float into join point RHSs; and the simplifier does not float things out of join point RHSs. But it's a simple, cheap thing to do. * occ_join_points: no need to zap. * occ_one_shots: we start with one-shot-info from the context, which indeed applies to the /body/ of the join point, after walking past the binders. So we add to the front a OneShotInfo for each value-binder of the join point: see `extendOneShotsForJoinPoint`. (Failing to account for the join-point binders caused #25096.) For the join point binders themselves, of a /non-recursive/ join point, we make the binder a OneShotLam. Again see `extendOneShotsForJoinPoint`. These one-shot infos then get attached to the binder by `occAnalLamTail`. -} setNonTailCtxt :: OccEncl -> OccEnv -> OccEnv setNonTailCtxt ctxt !env = env { occ_encl = ctxt , occ_one_shots = [] , occ_join_points = zapJoinPointInfo (occ_join_points env) } setTailCtxt :: OccEnv -> OccEnv setTailCtxt !env = env { occ_encl = OccVanilla } -- Preserve occ_one_shots, occ_join points -- Do not use OccRhs for the RHS of a join point (which is a tail ctxt): mkRhsOccEnv :: OccEnv -> RecFlag -> OccEncl -> JoinPointHood -> Id -> CoreExpr -> OccEnv -- See Note [The OccEnv for a right hand side] -- For a join point: -- - Keep occ_one_shots, occ_joinPoints from the context -- - But push enough OneShotInfo onto occ_one_shots to account -- for the join-point value binders -- - Set occ_encl to OccVanilla -- For non-join points -- - Zap occ_one_shots and occ_join_points -- - Set occ_encl to specified OccEncl mkRhsOccEnv env@(OccEnv { occ_one_shots = ctxt_one_shots, occ_join_points = ctxt_join_points }) is_rec encl jp_hood bndr rhs | JoinPoint join_arity <- jp_hood = env { occ_encl = OccVanilla , occ_one_shots = extendOneShotsForJoinPoint is_rec join_arity rhs ctxt_one_shots , occ_join_points = ctxt_join_points } | otherwise = env { occ_encl = encl , occ_one_shots = argOneShots (idDemandInfo bndr) -- argOneShots: see Note [Sources of one-shot information] , occ_join_points = zapJoinPointInfo ctxt_join_points } zapJoinPointInfo :: JoinPointInfo -> JoinPointInfo -- (zapJoinPointInfo jp_info) basically just returns emptyVarEnv (hence zapped). -- See (W3) of Note [Occurrence analysis for join points] -- -- Zapping improves efficiency, slightly, if you accidentally introduce a bug, -- in which you zap [jx :-> uds] and then find an occurrence of jx anyway, you -- might lose those uds, and that might mean we don't record all occurrencs, and -- that means we duplicate a redex.... a very nasty bug (which I encountered!). -- Hence this DEBUG code which doesn't remove jx from the envt; it just gives it -- emptyDetails, which in turn causes a panic in mkOneOcc. That will catch this -- bug before it does any damage. #ifdef DEBUG zapJoinPointInfo jp_info = mapVarEnv (\ _ -> emptyVarEnv) jp_info #else zapJoinPointInfo _ = emptyVarEnv #endif extendOneShotsForJoinPoint :: RecFlag -> JoinArity -> CoreExpr -> [OneShotInfo] -> [OneShotInfo] -- Push enough OneShortInfos on the front of ctxt_one_shots -- to account for the value lambdas of the join point extendOneShotsForJoinPoint is_rec join_arity rhs ctxt_one_shots = go join_arity rhs where -- For a /non-recursive/ join point we can mark all -- its join-lambda as one-shot; and it's a good idea to do so -- But not so for recursive ones os = case is_rec of NonRecursive -> OneShotLam Recursive -> NoOneShotInfo go 0 _ = ctxt_one_shots go n (Lam b rhs) | isId b = os : go (n-1) rhs | otherwise = go (n-1) rhs go _ _ = [] -- Not enough lambdas. This can legitimately happen. -- e.g. let j = case ... in j True -- This will become an arity-1 join point after the -- simplifier has eta-expanded it; but it may not have -- enough lambdas /yet/. (Lint checks that JoinIds do -- have enough lambdas.) setOneShots :: OneShots -> OccEnv -> OccEnv setOneShots os !env | null os = env -- Fast path for common case | otherwise = env { occ_one_shots = os } isRhsEnv :: OccEnv -> Bool isRhsEnv (OccEnv { occ_encl = cxt }) = case cxt of OccRhs -> True _ -> False addInScopeList :: OccEnv -> [Var] -> (OccEnv -> WithUsageDetails a) -> WithUsageDetails a {-# INLINE addInScopeList #-} addInScopeList env bndrs thing_inside | null bndrs = thing_inside env -- E.g. nullary constructors in a `case` | otherwise = addInScope env bndrs thing_inside addInScopeOne :: OccEnv -> Id -> (OccEnv -> WithUsageDetails a) -> WithUsageDetails a {-# INLINE addInScopeOne #-} addInScopeOne env bndr = addInScope env [bndr] addInScope :: OccEnv -> [Var] -> (OccEnv -> WithUsageDetails a) -> WithUsageDetails a {-# INLINE addInScope #-} -- This function is called a lot, so we want to inline the fast path -- so we don't have to allocate thing_inside and call it -- The bndrs must include TyVars as well as Ids, because of -- (BS3) in Note [Binder swap] -- We do not assume that the bndrs are in scope order; in fact the -- call in occ_anal_lam_tail gives them to addInScope in /reverse/ order -- Fast path when the is no environment-munging to do -- This is rather common: notably at top level, but nested too addInScope env bndrs thing_inside | isEmptyVarEnv (occ_bs_env env) , isEmptyVarEnv (occ_join_points env) , WUD uds res <- thing_inside env = WUD (delBndrsFromUDs bndrs uds) res addInScope env bndrs thing_inside = WUD uds' res where bndr_set = mkVarSet bndrs !(env', bad_joins) = preprocess_env env bndr_set !(WUD uds res) = thing_inside env' uds' = postprocess_uds bndrs bad_joins uds preprocess_env :: OccEnv -> VarSet -> (OccEnv, JoinPointInfo) preprocess_env env@(OccEnv { occ_join_points = join_points , occ_bs_rng = bs_rng_vars }) bndr_set | bad_joins = (drop_shadowed_swaps (drop_shadowed_joins env), join_points) | otherwise = (drop_shadowed_swaps env, emptyVarEnv) where drop_shadowed_swaps :: OccEnv -> OccEnv -- See Note [The binder-swap substitution] (BS3) drop_shadowed_swaps env@(OccEnv { occ_bs_env = swap_env }) | isEmptyVarEnv swap_env = env | bs_rng_vars `intersectsVarSet` bndr_set = env { occ_bs_env = emptyVarEnv, occ_bs_rng = emptyVarSet } | otherwise = env { occ_bs_env = swap_env `minusUFM` bndr_fm } drop_shadowed_joins :: OccEnv -> OccEnv -- See Note [Occurrence analysis for join points] wrinkle2 (W1) and (W2) drop_shadowed_joins env = env { occ_join_points = emptyVarEnv } -- bad_joins is true if it would be wrong to push occ_join_points inwards -- (a) `bndrs` includes any of the occ_join_points -- (b) `bndrs` includes any variables free in the RHSs of occ_join_points bad_joins :: Bool bad_joins = nonDetStrictFoldVarEnv_Directly is_bad False join_points bndr_fm :: UniqFM Var Var bndr_fm = getUniqSet bndr_set is_bad :: Unique -> OccInfoEnv -> Bool -> Bool is_bad uniq join_uds rest = uniq `elemUniqSet_Directly` bndr_set || not (bndr_fm `disjointUFM` join_uds) || rest postprocess_uds :: [Var] -> JoinPointInfo -> UsageDetails -> UsageDetails postprocess_uds bndrs bad_joins uds = add_bad_joins (delBndrsFromUDs bndrs uds) where add_bad_joins :: UsageDetails -> UsageDetails -- Add usage info for occ_join_points that we cannot push inwards -- because of shadowing -- See Note [Occurrence analysis for join points] wrinkle (W2) add_bad_joins uds | isEmptyVarEnv bad_joins = uds | otherwise = modifyUDEnv extend_with_bad_joins uds extend_with_bad_joins :: OccInfoEnv -> OccInfoEnv extend_with_bad_joins env = nonDetStrictFoldUFM_Directly add_bad_join env bad_joins add_bad_join :: Unique -> OccInfoEnv -> OccInfoEnv -> OccInfoEnv -- Behave like `andUDs` when adding in the bad_joins add_bad_join uniq join_env env | uniq `elemVarEnvByKey` env = plusVarEnv_C andLocalOcc env join_env | otherwise = env addJoinPoint :: OccEnv -> Id -> UsageDetails -> OccEnv addJoinPoint env bndr rhs_uds | isEmptyVarEnv zeroed_form = env | otherwise = env { occ_join_points = extendVarEnv (occ_join_points env) bndr zeroed_form } where zeroed_form = mkZeroedForm rhs_uds mkZeroedForm :: UsageDetails -> OccInfoEnv -- See Note [Occurrence analysis for join points] for "zeroed form" mkZeroedForm (UD { ud_env = rhs_occs }) = mapMaybeUFM do_one rhs_occs where do_one :: LocalOcc -> Maybe LocalOcc do_one (ManyOccL {}) = Nothing do_one occ@(OneOccL {}) = Just (occ { lo_n_br = 0 }) -------------------- transClosureFV :: VarEnv VarSet -> VarEnv VarSet -- If (f,g), (g,h) are in the input, then (f,h) is in the output -- as well as (f,g), (g,h) transClosureFV env | no_change = env | otherwise = transClosureFV (listToUFM_Directly new_fv_list) where (no_change, new_fv_list) = mapAccumL bump True (nonDetUFMToList env) -- It's OK to use nonDetUFMToList here because we'll forget the -- ordering by creating a new set with listToUFM bump no_change (b,fvs) | no_change_here = (no_change, (b,fvs)) | otherwise = (False, (b,new_fvs)) where (new_fvs, no_change_here) = extendFvs env fvs ------------- extendFvs_ :: VarEnv VarSet -> VarSet -> VarSet extendFvs_ env s = fst (extendFvs env s) -- Discard the Bool flag extendFvs :: VarEnv VarSet -> VarSet -> (VarSet, Bool) -- (extendFVs env s) returns -- (s `union` env(s), env(s) `subset` s) extendFvs env s | isNullUFM env = (s, True) | otherwise = (s `unionVarSet` extras, extras `subVarSet` s) where extras :: VarSet -- env(s) extras = nonDetStrictFoldUFM unionVarSet emptyVarSet $ -- It's OK to use nonDetStrictFoldUFM here because unionVarSet commutes intersectUFM_C (\x _ -> x) env (getUniqSet s) {- ************************************************************************ * * Binder swap * * ************************************************************************ Note [Binder swap] ~~~~~~~~~~~~~~~~~~ The "binder swap" transformation swaps occurrence of the scrutinee of a case for occurrences of the case-binder: (1) case x of b { pi -> ri } ==> case x of b { pi -> ri[b/x] } (2) case (x |> co) of b { pi -> ri } ==> case (x |> co) of b { pi -> ri[b |> sym co/x] } The substitution ri[b/x] etc is done by the occurrence analyser. See Note [The binder-swap substitution]. There are two reasons for making this swap: (A) It reduces the number of occurrences of the scrutinee, x. That in turn might reduce its occurrences to one, so we can inline it and save an allocation. E.g. let x = factorial y in case x of b { I# v -> ...x... } If we replace 'x' by 'b' in the alternative we get let x = factorial y in case x of b { I# v -> ...b... } and now we can inline 'x', thus case (factorial y) of b { I# v -> ...b... } (B) The case-binder b has unfolding information; in the example above we know that b = I# v. That in turn allows nested cases to simplify. Consider case x of b { I# v -> ...(case x of b2 { I# v2 -> rhs })... If we replace 'x' by 'b' in the alternative we get case x of b { I# v -> ...(case b of b2 { I# v2 -> rhs })... and now it is trivial to simplify the inner case: case x of b { I# v -> ...(let b2 = b in rhs)... The same can happen even if the scrutinee is a variable with a cast: see Note [Case of cast] The reason for doing these transformations /here in the occurrence analyser/ is because it allows us to adjust the OccInfo for 'x' and 'b' as we go. * Suppose the only occurrences of 'x' are the scrutinee and in the ri; then this transformation makes it occur just once, and hence get inlined right away. * If instead the Simplifier replaces occurrences of x with occurrences of b, that will mess up b's occurrence info. That in turn might have consequences. There is a danger though. Consider let v = x +# y in case (f v) of w -> ...v...v... And suppose that (f v) expands to just v. Then we'd like to use 'w' instead of 'v' in the alternative. But it may be too late; we may have substituted the (cheap) x+#y for v in the same simplifier pass that reduced (f v) to v. I think this is just too bad. CSE will recover some of it. Note [The binder-swap substitution] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The binder-swap is implemented by the occ_bs_env field of OccEnv. There are two main pieces: * Given case x |> co of b { alts } we add [x :-> (b, sym co)] to the occ_bs_env environment; this is done by addBndrSwap. * Then, at an occurrence of a variable, we look up in the occ_bs_env to perform the swap. This is done by lookupBndrSwap. Some tricky corners: (BS1) We do the substitution before gathering occurrence info. So in the above example, an occurrence of x turns into an occurrence of b, and that's what we gather in the UsageDetails. It's as if the binder-swap occurred before occurrence analysis. See the computation of fun_uds in occAnalApp. (BS2) When doing a lookup in occ_bs_env, we may need to iterate, as you can see implemented in lookupBndrSwap. Why? Consider case x of a { 1# -> e1; DEFAULT -> case x of b { 2# -> e2; DEFAULT -> case x of c { 3# -> e3; DEFAULT -> ..x..a..b.. }}} At the first case addBndrSwap will extend occ_bs_env with [x :-> a] At the second case we occ-anal the scrutinee 'x', which looks up 'x in occ_bs_env, returning 'a', as it should. Then addBndrSwap will add [a :-> b] to occ_bs_env, yielding occ_bs_env = [x :-> a, a :-> b] At the third case we'll again look up 'x' which returns 'a'. But we don't want to stop the lookup there, else we'll end up with case x of a { 1# -> e1; DEFAULT -> case a of b { 2# -> e2; DEFAULT -> case a of c { 3# -> e3; DEFAULT -> ..a..b..c.. }}} Instead, we want iterate the lookup in addBndrSwap, to give case x of a { 1# -> e1; DEFAULT -> case a of b { 2# -> e2; DEFAULT -> case b of c { 3# -> e3; DEFAULT -> ..c..c..c.. }}} This makes a particular difference for case-merge, which works only if the scrutinee is the case-binder of the immediately enclosing case (Note [Merge Nested Cases] in GHC.Core.Opt.Simplify.Utils See #19581 for the bug report that showed this up. (BS3) We need care when shadowing. Suppose [x :-> b] is in occ_bs_env, and we encounter: (i) \x. blah Here we want to delete the x-binding from occ_bs_env (ii) \b. blah This is harder: we really want to delete all bindings that have 'b' free in the range. That is a bit tiresome to implement, so we compromise. We keep occ_bs_rng, which is the set of free vars of rng(occc_bs_env). If a binder shadows any of these variables, we discard all of occ_bs_env. Safe, if a bit brutal. NB, however: the simplifer de-shadows the code, so the next time around this won't happen. These checks are implemented in addInScope. (i) is needed only for Ids, but (ii) is needed for tyvars too (#22623) because if occ_bs_env has [x :-> ...a...] where `a` is a tyvar, we must not replace `x` by `...a...` under /\a. ...x..., or similarly under a case pattern match that binds `a`. An alternative would be for the occurrence analyser to do cloning as it goes. In principle it could do so, but it'd make it a bit more complicated and there is no great benefit. The simplifer uses cloning to get a no-shadowing situation, the care-when-shadowing behaviour above isn't needed for long. (BS4) The domain of occ_bs_env can include GlobaIds. Eg case M.foo of b { alts } We extend occ_bs_env with [M.foo :-> b]. That's fine. (BS5) We have to apply the occ_bs_env substitution uniformly, including to (local) rules and unfoldings. (BS6) We must be very careful with dictionaries. See Note [Care with binder-swap on dictionaries] Note [Case of cast] ~~~~~~~~~~~~~~~~~~~ Consider case (x `cast` co) of b { I# -> ... (case (x `cast` co) of {...}) ... We'd like to eliminate the inner case. That is the motivation for equation (2) in Note [Binder swap]. When we get to the inner case, we inline x, cancel the casts, and away we go. Note [Care with binder-swap on dictionaries] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This Note explains why we need isDictId in scrutOkForBinderSwap. Consider this tricky example (#21229, #21470): class Sing (b :: Bool) where sing :: Bool instance Sing 'True where sing = True instance Sing 'False where sing = False f :: forall a. Sing a => blah h = \ @(a :: Bool) ($dSing :: Sing a) let the_co = Main.N:Sing[0] :: Sing a ~R# Bool case ($dSing |> the_co) of wild True -> f @'True (True |> sym the_co) False -> f @a dSing Now do a binder-swap on the case-expression: h = \ @(a :: Bool) ($dSing :: Sing a) let the_co = Main.N:Sing[0] :: Sing a ~R# Bool case ($dSing |> the_co) of wild True -> f @'True (True |> sym the_co) False -> f @a (wild |> sym the_co) And now substitute `False` for `wild` (since wild=False in the False branch): h = \ @(a :: Bool) ($dSing :: Sing a) let the_co = Main.N:Sing[0] :: Sing a ~R# Bool case ($dSing |> the_co) of wild True -> f @'True (True |> sym the_co) False -> f @a (False |> sym the_co) And now we have a problem. The specialiser will specialise (f @a d)a (for all vtypes a and dictionaries d!!) with the dictionary (False |> sym the_co), using Note [Specialising polymorphic dictionaries] in GHC.Core.Opt.Specialise. The real problem is the binder-swap. It swaps a dictionary variable $dSing (of kind Constraint) for a term variable wild (of kind Type). And that is dangerous: a dictionary is a /singleton/ type whereas a general term variable is not. In this particular example, Bool is most certainly not a singleton type! Conclusion: for a /dictionary variable/ do not perform the clever cast version of the binder-swap Hence the subtle isDictId in scrutOkForBinderSwap. Note [Zap case binders in proxy bindings] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ From the original case x of cb(dead) { p -> ...x... } we will get case x of cb(live) { p -> ...cb... } Core Lint never expects to find an *occurrence* of an Id marked as Dead, so we must zap the OccInfo on cb before making the binding x = cb. See #5028. NB: the OccInfo on /occurrences/ really doesn't matter much; the simplifier doesn't use it. So this is only to satisfy the perhaps-over-picky Lint. -} addBndrSwap :: OutExpr -> Id -> OccEnv -> OccEnv -- See Note [The binder-swap substitution] addBndrSwap scrut case_bndr env@(OccEnv { occ_bs_env = swap_env, occ_bs_rng = rng_vars }) | DoBinderSwap scrut_var mco <- scrutOkForBinderSwap scrut , scrut_var /= case_bndr -- Consider: case x of x { ... } -- Do not add [x :-> x] to occ_bs_env, else lookupBndrSwap will loop = env { occ_bs_env = extendVarEnv swap_env scrut_var (case_bndr', mco) , occ_bs_rng = rng_vars `extendVarSet` case_bndr' `unionVarSet` tyCoVarsOfMCo mco } | otherwise = env where case_bndr' = zapIdOccInfo case_bndr -- See Note [Zap case binders in proxy bindings] -- | See bBinderSwaOk. data BinderSwapDecision = NoBinderSwap | DoBinderSwap OutVar MCoercion scrutOkForBinderSwap :: OutExpr -> BinderSwapDecision -- If (scrutOkForBinderSwap e = DoBinderSwap v mco, then -- v = e |> mco -- See Note [Case of cast] -- See Note [Care with binder-swap on dictionaries] -- -- We use this same function in SpecConstr, and Simplify.Iteration, -- when something binder-swap-like is happening scrutOkForBinderSwap (Var v) = DoBinderSwap v MRefl scrutOkForBinderSwap (Cast (Var v) co) | not (isDictId v) = DoBinderSwap v (MCo (mkSymCo co)) -- Cast: see Note [Case of cast] -- isDictId: see Note [Care with binder-swap on dictionaries] -- The isDictId rejects a Constraint/Constraint binder-swap, perhaps -- over-conservatively. But I have never seen one, so I'm leaving -- the code as simple as possible. Losing the binder-swap in a -- rare case probably has very low impact. scrutOkForBinderSwap (Tick _ e) = scrutOkForBinderSwap e -- Drop ticks scrutOkForBinderSwap _ = NoBinderSwap lookupBndrSwap :: OccEnv -> Id -> (CoreExpr, Id) -- See Note [The binder-swap substitution] -- Returns an expression of the same type as Id lookupBndrSwap env@(OccEnv { occ_bs_env = bs_env }) bndr = case lookupVarEnv bs_env bndr of { Nothing -> (Var bndr, bndr) ; Just (bndr1, mco) -> -- Why do we iterate here? -- See (BS2) in Note [The binder-swap substitution] case lookupBndrSwap env bndr1 of (fun, fun_id) -> (mkCastMCo fun mco, fun_id) } {- Historical note [Proxy let-bindings] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We used to do the binder-swap transformation by introducing a proxy let-binding, thus; case x of b { pi -> ri } ==> case x of b { pi -> let x = b in ri } But that had two problems: 1. If 'x' is an imported GlobalId, we'd end up with a GlobalId on the LHS of a let-binding which isn't allowed. We worked around this for a while by "localising" x, but it turned out to be very painful #16296, 2. In CorePrep we use the occurrence analyser to do dead-code elimination (see Note [Dead code in CorePrep]). But that occasionally led to an unlifted let-binding case x of b { DEFAULT -> let x::Int# = b in ... } which disobeys one of CorePrep's output invariants (no unlifted let-bindings) -- see #5433. Doing a substitution (via occ_bs_env) is much better. Historical Note [no-case-of-case] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We *used* to suppress the binder-swap in case expressions when -fno-case-of-case is on. Old remarks: "This happens in the first simplifier pass, and enhances full laziness. Here's the bad case: f = \ y -> ...(case x of I# v -> ...(case x of ...) ... ) If we eliminate the inner case, we trap it inside the I# v -> arm, which might prevent some full laziness happening. I've seen this in action in spectral/cichelli/Prog.hs: [(m,n) | m <- [1..max], n <- [1..max]] Hence the check for NoCaseOfCase." However, now the full-laziness pass itself reverses the binder-swap, so this check is no longer necessary. Historical Note [Suppressing the case binder-swap] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This old note describes a problem that is also fixed by doing the binder-swap in OccAnal: There is another situation when it might make sense to suppress the case-expression binde-swap. If we have case x of w1 { DEFAULT -> case x of w2 { A -> e1; B -> e2 } ...other cases .... } We'll perform the binder-swap for the outer case, giving case x of w1 { DEFAULT -> case w1 of w2 { A -> e1; B -> e2 } ...other cases .... } But there is no point in doing it for the inner case, because w1 can't be inlined anyway. Furthermore, doing the case-swapping involves zapping w2's occurrence info (see paragraphs that follow), and that forces us to bind w2 when doing case merging. So we get case x of w1 { A -> let w2 = w1 in e1 B -> let w2 = w1 in e2 ...other cases .... } This is plain silly in the common case where w2 is dead. Even so, I can't see a good way to implement this idea. I tried not doing the binder-swap if the scrutinee was already evaluated but that failed big-time: data T = MkT !Int case v of w { MkT x -> case x of x1 { I# y1 -> case x of x2 { I# y2 -> ... Notice that because MkT is strict, x is marked "evaluated". But to eliminate the last case, we must either make sure that x (as well as x1) has unfolding MkT y1. The straightforward thing to do is to do the binder-swap. So this whole note is a no-op. It's fixed by doing the binder-swap in OccAnal because we can do the binder-swap unconditionally and still get occurrence analysis information right. ************************************************************************ * * \subsection[OccurAnal-types]{OccEnv} * * ************************************************************************ Note [UsageDetails and zapping] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ On many occasions, we must modify all gathered occurrence data at once. For instance, all occurrences underneath a (non-one-shot) lambda set the 'occ_in_lam' flag to become 'True'. We could use 'mapVarEnv' to do this, but that takes O(n) time and we will do this often---in particular, there are many places where tail calls are not allowed, and each of these causes all variables to get marked with 'NoTailCallInfo'. Instead of relying on `mapVarEnv`, then, we carry three 'IdEnv's around along with the 'OccInfoEnv'. Each of these extra environments is a "zapped set" recording which variables have been zapped in some way. Zapping all occurrence info then simply means setting the corresponding zapped set to the whole 'OccInfoEnv', a fast O(1) operation. Note [LocalOcc] ~~~~~~~~~~~~~~~ LocalOcc is used purely internally, in the occurrence analyser. It differs from GHC.Types.Basic.OccInfo because it has only OneOcc and ManyOcc; it does not need IAmDead or IAmALoopBreaker. Note that `OneOccL` doesn't meant that it occurs /syntactially/ only once; it means that it is /used/ only once. It might occur syntactically many times. For example, in (case x of A -> y; B -> y; C -> True), * `y` is used only once * but it occurs syntactically twice -} type OccInfoEnv = IdEnv LocalOcc -- A finite map from an expression's -- free variables to their usage data LocalOcc -- See Note [LocalOcc] = OneOccL { lo_n_br :: {-# UNPACK #-} !BranchCount -- Number of syntactic occurrences , lo_tail :: !TailCallInfo -- Combining (AlwaysTailCalled 2) and (AlwaysTailCalled 3) -- gives NoTailCallInfo , lo_int_cxt :: !InterestingCxt } | ManyOccL !TailCallInfo instance Outputable LocalOcc where ppr (OneOccL { lo_n_br = n, lo_tail = tci }) = text "OneOccL" <> braces (ppr n <> comma <> ppr tci) ppr (ManyOccL tci) = text "ManyOccL" <> braces (ppr tci) localTailCallInfo :: LocalOcc -> TailCallInfo localTailCallInfo (OneOccL { lo_tail = tci }) = tci localTailCallInfo (ManyOccL tci) = tci type ZappedSet = OccInfoEnv -- Values are ignored data UsageDetails = UD { ud_env :: !OccInfoEnv , ud_z_many :: !ZappedSet -- apply 'markMany' to these , ud_z_in_lam :: !ZappedSet -- apply 'markInsideLam' to these , ud_z_tail :: !ZappedSet -- zap tail-call info for these } -- INVARIANT: All three zapped sets are subsets of ud_env instance Outputable UsageDetails where ppr ud@(UD { ud_env = env, ud_z_tail = z_tail }) = text "UD" <+> (braces $ fsep $ punctuate comma $ [ ppr uq <+> text ":->" <+> ppr (lookupOccInfoByUnique ud uq) | (uq, _) <- nonDetStrictFoldVarEnv_Directly do_one [] env ]) $$ nest 2 (text "ud_z_tail" <+> ppr z_tail) where do_one :: Unique -> LocalOcc -> [(Unique,LocalOcc)] -> [(Unique,LocalOcc)] do_one uniq occ occs = (uniq, occ) : occs --------------------- -- | TailUsageDetails captures the result of applying 'occAnalLamTail' -- to a function `\xyz.body`. The TailUsageDetails pairs together -- * the number of lambdas (including type lambdas: a JoinArity) -- * UsageDetails for the `body` of the lambda, unadjusted by `adjustTailUsage`. -- If the binding turns out to be a join point with the indicated join -- arity, this unadjusted usage details is just what we need; otherwise we -- need to discard tail calls. That's what `adjustTailUsage` does. data TailUsageDetails = TUD !JoinArity !UsageDetails instance Outputable TailUsageDetails where ppr (TUD ja uds) = lambda <> ppr ja <> ppr uds --------------------- data WithUsageDetails a = WUD !UsageDetails !a data WithTailUsageDetails a = WTUD !TailUsageDetails !a ------------------- -- UsageDetails API andUDs, orUDs :: UsageDetails -> UsageDetails -> UsageDetails andUDs = combineUsageDetailsWith andLocalOcc orUDs = combineUsageDetailsWith orLocalOcc mkOneOcc :: OccEnv -> Id -> InterestingCxt -> JoinArity -> UsageDetails mkOneOcc !env id int_cxt arity | not (isLocalId id) = emptyDetails | Just join_uds <- lookupVarEnv (occ_join_points env) id = -- See Note [Occurrence analysis for join points] assertPpr (not (isEmptyVarEnv join_uds)) (ppr id) $ -- We only put non-empty join-points into occ_join_points mkSimpleDetails (extendVarEnv join_uds id occ) | otherwise = mkSimpleDetails (unitVarEnv id occ) where occ = OneOccL { lo_n_br = 1, lo_int_cxt = int_cxt , lo_tail = AlwaysTailCalled arity } -- Add several occurrences, assumed not to be tail calls add_many_occ :: Var -> OccInfoEnv -> OccInfoEnv add_many_occ v env | isId v = extendVarEnv env v (ManyOccL NoTailCallInfo) | otherwise = env -- Give a non-committal binder info (i.e noOccInfo) because -- a) Many copies of the specialised thing can appear -- b) We don't want to substitute a BIG expression inside a RULE -- even if that's the only occurrence of the thing -- (Same goes for INLINE.) addManyOccs :: UsageDetails -> VarSet -> UsageDetails addManyOccs uds var_set | isEmptyVarSet var_set = uds | otherwise = uds { ud_env = add_to (ud_env uds) } where add_to env = nonDetStrictFoldUniqSet add_many_occ env var_set -- It's OK to use nonDetStrictFoldUniqSet here because add_many_occ commutes addLamCoVarOccs :: UsageDetails -> [Var] -> UsageDetails -- Add any CoVars free in the type of a lambda-binder -- See Note [Gather occurrences of coercion variables] addLamCoVarOccs uds bndrs = foldr add uds bndrs where add bndr uds = uds `addManyOccs` coVarsOfType (varType bndr) emptyDetails :: UsageDetails emptyDetails = mkSimpleDetails emptyVarEnv isEmptyDetails :: UsageDetails -> Bool isEmptyDetails (UD { ud_env = env }) = isEmptyVarEnv env mkSimpleDetails :: OccInfoEnv -> UsageDetails mkSimpleDetails env = UD { ud_env = env , ud_z_many = emptyVarEnv , ud_z_in_lam = emptyVarEnv , ud_z_tail = emptyVarEnv } modifyUDEnv :: (OccInfoEnv -> OccInfoEnv) -> UsageDetails -> UsageDetails modifyUDEnv f uds@(UD { ud_env = env }) = uds { ud_env = f env } delBndrsFromUDs :: [Var] -> UsageDetails -> UsageDetails -- Delete these binders from the UsageDetails delBndrsFromUDs bndrs (UD { ud_env = env, ud_z_many = z_many , ud_z_in_lam = z_in_lam, ud_z_tail = z_tail }) = UD { ud_env = env `delVarEnvList` bndrs , ud_z_many = z_many `delVarEnvList` bndrs , ud_z_in_lam = z_in_lam `delVarEnvList` bndrs , ud_z_tail = z_tail `delVarEnvList` bndrs } markAllMany, markAllInsideLam, markAllNonTail, markAllManyNonTail :: UsageDetails -> UsageDetails markAllMany ud@(UD { ud_env = env }) = ud { ud_z_many = env } markAllInsideLam ud@(UD { ud_env = env }) = ud { ud_z_in_lam = env } markAllNonTail ud@(UD { ud_env = env }) = ud { ud_z_tail = env } markAllManyNonTail = markAllMany . markAllNonTail -- effectively sets to noOccInfo markAllInsideLamIf, markAllNonTailIf :: Bool -> UsageDetails -> UsageDetails markAllInsideLamIf True ud = markAllInsideLam ud markAllInsideLamIf False ud = ud markAllNonTailIf True ud = markAllNonTail ud markAllNonTailIf False ud = ud lookupTailCallInfo :: UsageDetails -> Id -> TailCallInfo lookupTailCallInfo uds id | UD { ud_z_tail = z_tail, ud_env = env } <- uds , not (id `elemVarEnv` z_tail) , Just occ <- lookupVarEnv env id = localTailCallInfo occ | otherwise = NoTailCallInfo udFreeVars :: VarSet -> UsageDetails -> VarSet -- Find the subset of bndrs that are mentioned in uds udFreeVars bndrs (UD { ud_env = env }) = restrictFreeVars bndrs env restrictFreeVars :: VarSet -> OccInfoEnv -> VarSet restrictFreeVars bndrs fvs = restrictUniqSetToUFM bndrs fvs ------------------- -- Auxiliary functions for UsageDetails implementation combineUsageDetailsWith :: (LocalOcc -> LocalOcc -> LocalOcc) -> UsageDetails -> UsageDetails -> UsageDetails {-# INLINE combineUsageDetailsWith #-} combineUsageDetailsWith plus_occ_info uds1@(UD { ud_env = env1, ud_z_many = z_many1, ud_z_in_lam = z_in_lam1, ud_z_tail = z_tail1 }) uds2@(UD { ud_env = env2, ud_z_many = z_many2, ud_z_in_lam = z_in_lam2, ud_z_tail = z_tail2 }) | isEmptyVarEnv env1 = uds2 | isEmptyVarEnv env2 = uds1 | otherwise = UD { ud_env = plusVarEnv_C plus_occ_info env1 env2 , ud_z_many = plusVarEnv z_many1 z_many2 , ud_z_in_lam = plusVarEnv z_in_lam1 z_in_lam2 , ud_z_tail = plusVarEnv z_tail1 z_tail2 } lookupLetOccInfo :: UsageDetails -> Id -> OccInfo -- Don't use locally-generated occ_info for exported (visible-elsewhere) -- things. Instead just give noOccInfo. -- NB: setBinderOcc will (rightly) erase any LoopBreaker info; -- we are about to re-generate it and it shouldn't be "sticky" lookupLetOccInfo ud id | isExportedId id = noOccInfo | otherwise = lookupOccInfoByUnique ud (idUnique id) lookupOccInfo :: UsageDetails -> Id -> OccInfo lookupOccInfo ud id = lookupOccInfoByUnique ud (idUnique id) lookupOccInfoByUnique :: UsageDetails -> Unique -> OccInfo lookupOccInfoByUnique (UD { ud_env = env , ud_z_many = z_many , ud_z_in_lam = z_in_lam , ud_z_tail = z_tail }) uniq = case lookupVarEnv_Directly env uniq of Nothing -> IAmDead Just (OneOccL { lo_n_br = n_br, lo_int_cxt = int_cxt , lo_tail = tail_info }) | uniq `elemVarEnvByKey`z_many -> ManyOccs { occ_tail = mk_tail_info tail_info } | otherwise -> OneOcc { occ_in_lam = in_lam , occ_n_br = n_br , occ_int_cxt = int_cxt , occ_tail = mk_tail_info tail_info } where in_lam | uniq `elemVarEnvByKey` z_in_lam = IsInsideLam | otherwise = NotInsideLam Just (ManyOccL tail_info) -> ManyOccs { occ_tail = mk_tail_info tail_info } where mk_tail_info ti | uniq `elemVarEnvByKey` z_tail = NoTailCallInfo | otherwise = ti ------------------- -- See Note [Adjusting right-hand sides] adjustNonRecRhs :: JoinPointHood -> WithTailUsageDetails CoreExpr -> WithUsageDetails CoreExpr -- ^ This function concentrates shared logic between occAnalNonRecBind and the -- AcyclicSCC case of occAnalRec. -- It returns the adjusted rhs UsageDetails combined with the body usage adjustNonRecRhs mb_join_arity rhs_wuds@(WTUD _ rhs) = WUD (adjustTailUsage mb_join_arity rhs_wuds) rhs adjustTailUsage :: JoinPointHood -> WithTailUsageDetails CoreExpr -- Rhs usage, AFTER occAnalLamTail -> UsageDetails adjustTailUsage mb_join_arity (WTUD (TUD rhs_ja uds) rhs) = -- c.f. occAnal (Lam {}) markAllInsideLamIf (not one_shot) $ markAllNonTailIf (not exact_join) $ uds where one_shot = isOneShotFun rhs exact_join = mb_join_arity == JoinPoint rhs_ja adjustTailArity :: JoinPointHood -> TailUsageDetails -> UsageDetails adjustTailArity mb_rhs_ja (TUD ja usage) = markAllNonTailIf (mb_rhs_ja /= JoinPoint ja) usage type IdWithOccInfo = Id tagLamBinders :: UsageDetails -- Of scope -> [Id] -- Binders -> [IdWithOccInfo] -- Tagged binders tagLamBinders usage binders = map (tagLamBinder usage) binders tagLamBinder :: UsageDetails -- Of scope -> Id -- Binder -> IdWithOccInfo -- Tagged binders -- Used for lambda and case binders -- No-op on TyVars -- A lambda binder never has an unfolding, so no need to look for that tagLamBinder usage bndr = setBinderOcc (markNonTail occ) bndr -- markNonTail: don't try to make an argument into a join point where occ = lookupOccInfo usage bndr tagNonRecBinder :: TopLevelFlag -- At top level? -> OccInfo -- Of scope -> CoreBndr -- Binder -> (IdWithOccInfo, JoinPointHood) -- Tagged binder -- No-op on TyVars -- Precondition: OccInfo is not IAmDead tagNonRecBinder lvl occ bndr | okForJoinPoint lvl bndr tail_call_info , AlwaysTailCalled ar <- tail_call_info = (setBinderOcc occ bndr, JoinPoint ar) | otherwise = (setBinderOcc zapped_occ bndr, NotJoinPoint) where tail_call_info = tailCallInfo occ zapped_occ = markNonTail occ tagRecBinders :: TopLevelFlag -- At top level? -> UsageDetails -- Of body of let ONLY -> [NodeDetails] -> WithUsageDetails -- Adjusted details for whole scope, -- with binders removed [IdWithOccInfo] -- Tagged binders -- Substantially more complicated than non-recursive case. Need to adjust RHS -- details *before* tagging binders (because the tags depend on the RHSes). tagRecBinders lvl body_uds details_s = let bndrs = map nd_bndr details_s -- 1. See Note [Join arity prediction based on joinRhsArity] -- Determine possible join-point-hood of whole group, by testing for -- manifest join arity M. -- This (re-)asserts that makeNode had made tuds for that same arity M! unadj_uds = foldr (andUDs . test_manifest_arity) body_uds details_s test_manifest_arity ND{nd_rhs = WTUD tuds rhs} = adjustTailArity (JoinPoint (joinRhsArity rhs)) tuds will_be_joins = decideRecJoinPointHood lvl unadj_uds bndrs mb_join_arity :: Id -> JoinPointHood -- mb_join_arity: See Note [Join arity prediction based on joinRhsArity] -- This is the source O mb_join_arity bndr -- Can't use willBeJoinId_maybe here because we haven't tagged -- the binder yet (the tag depends on these adjustments!) | will_be_joins , AlwaysTailCalled arity <- lookupTailCallInfo unadj_uds bndr = JoinPoint arity | otherwise = assert (not will_be_joins) -- Should be AlwaysTailCalled if NotJoinPoint -- we are making join points! -- 2. Adjust usage details of each RHS, taking into account the -- join-point-hood decision rhs_udss' = [ adjustTailUsage (mb_join_arity bndr) rhs_wuds -- Matching occAnalLamTail in makeNode | ND { nd_bndr = bndr, nd_rhs = rhs_wuds } <- details_s ] -- 3. Compute final usage details from adjusted RHS details adj_uds = foldr andUDs body_uds rhs_udss' -- 4. Tag each binder with its adjusted details bndrs' = [ setBinderOcc (lookupLetOccInfo adj_uds bndr) bndr | bndr <- bndrs ] in WUD adj_uds bndrs' setBinderOcc :: OccInfo -> CoreBndr -> CoreBndr setBinderOcc occ_info bndr | isTyVar bndr = bndr | occ_info == idOccInfo bndr = bndr | otherwise = setIdOccInfo bndr occ_info -- | Decide whether some bindings should be made into join points or not, based -- on its occurrences. This is -- Returns `False` if they can't be join points. Note that it's an -- all-or-nothing decision, as if multiple binders are given, they're -- assumed to be mutually recursive. -- -- It must, however, be a final decision. If we say `True` for 'f', -- and then subsequently decide /not/ make 'f' into a join point, then -- the decision about another binding 'g' might be invalidated if (say) -- 'f' tail-calls 'g'. -- -- See Note [Invariants on join points] in "GHC.Core". decideRecJoinPointHood :: TopLevelFlag -> UsageDetails -> [CoreBndr] -> Bool decideRecJoinPointHood lvl usage bndrs = all ok bndrs -- Invariant 3: Either all are join points or none are where ok bndr = okForJoinPoint lvl bndr (lookupTailCallInfo usage bndr) okForJoinPoint :: TopLevelFlag -> Id -> TailCallInfo -> Bool -- See Note [Invariants on join points]; invariants cited by number below. -- Invariant 2 is always satisfiable by the simplifier by eta expansion. okForJoinPoint lvl bndr tail_call_info | isJoinId bndr -- A current join point should still be one! = warnPprTrace lost_join "Lost join point" lost_join_doc $ True | valid_join = True | otherwise = False where valid_join | NotTopLevel <- lvl , AlwaysTailCalled arity <- tail_call_info , -- Invariant 1 as applied to LHSes of rules all (ok_rule arity) (idCoreRules bndr) -- Invariant 2a: stable unfoldings -- See Note [Join points and INLINE pragmas] , ok_unfolding arity (realIdUnfolding bndr) -- Invariant 4: Satisfies polymorphism rule , isValidJoinPointType arity (idType bndr) = True | otherwise = False lost_join | JoinPoint ja <- idJoinPointHood bndr = not valid_join || (case tail_call_info of -- Valid join but arity differs AlwaysTailCalled ja' -> ja /= ja' _ -> False) | otherwise = False ok_rule _ BuiltinRule{} = False -- only possible with plugin shenanigans ok_rule join_arity (Rule { ru_args = args }) = args `lengthIs` join_arity -- Invariant 1 as applied to LHSes of rules -- ok_unfolding returns False if we should /not/ convert a non-join-id -- into a join-id, even though it is AlwaysTailCalled ok_unfolding join_arity (CoreUnfolding { uf_src = src, uf_tmpl = rhs }) = not (isStableSource src && join_arity > joinRhsArity rhs) ok_unfolding _ (DFunUnfolding {}) = False ok_unfolding _ _ = True lost_join_doc = vcat [ text "bndr:" <+> ppr bndr , text "tc:" <+> ppr tail_call_info , text "rules:" <+> ppr (idCoreRules bndr) , case tail_call_info of AlwaysTailCalled arity -> vcat [ text "ok_unf:" <+> ppr (ok_unfolding arity (realIdUnfolding bndr)) , text "ok_type:" <+> ppr (isValidJoinPointType arity (idType bndr)) ] _ -> empty ] {- Note [Join points and INLINE pragmas] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider f x = let g = \x. not -- Arity 1 {-# INLINE g #-} in case x of A -> g True True B -> g True False C -> blah2 Here 'g' is always tail-called applied to 2 args, but the stable unfolding captured by the INLINE pragma has arity 1. If we try to convert g to be a join point, its unfolding will still have arity 1 (since it is stable, and we don't meddle with stable unfoldings), and Lint will complain (see Note [Invariants on join points], (2a), in GHC.Core. #13413. Moreover, since g is going to be inlined anyway, there is no benefit from making it a join point. If it is recursive, and uselessly marked INLINE, this will stop us making it a join point, which is annoying. But occasionally (notably in class methods; see Note [Instances and loop breakers] in GHC.Tc.TyCl.Instance) we mark recursive things as INLINE but the recursion unravels; so ignoring INLINE pragmas on recursive things isn't good either. See Invariant 2a of Note [Invariants on join points] in GHC.Core ************************************************************************ * * \subsection{Operations over OccInfo} * * ************************************************************************ -} markNonTail :: OccInfo -> OccInfo markNonTail IAmDead = IAmDead markNonTail occ = occ { occ_tail = NoTailCallInfo } andLocalOcc :: LocalOcc -> LocalOcc -> LocalOcc andLocalOcc occ1 occ2 = ManyOccL (tci1 `andTailCallInfo` tci2) where !tci1 = localTailCallInfo occ1 !tci2 = localTailCallInfo occ2 orLocalOcc :: LocalOcc -> LocalOcc -> LocalOcc -- (orLocalOcc occ1 occ2) is used -- when combining occurrence info from branches of a case orLocalOcc (OneOccL { lo_n_br = nbr1, lo_int_cxt = int_cxt1, lo_tail = tci1 }) (OneOccL { lo_n_br = nbr2, lo_int_cxt = int_cxt2, lo_tail = tci2 }) = OneOccL { lo_n_br = nbr1 + nbr2 , lo_int_cxt = int_cxt1 `mappend` int_cxt2 , lo_tail = tci1 `andTailCallInfo` tci2 } orLocalOcc occ1 occ2 = andLocalOcc occ1 occ2 andTailCallInfo :: TailCallInfo -> TailCallInfo -> TailCallInfo andTailCallInfo info@(AlwaysTailCalled arity1) (AlwaysTailCalled arity2) | arity1 == arity2 = info andTailCallInfo _ _ = NoTailCallInfo ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/Opt/Pipeline/0000755000000000000000000000000007346545000021105 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/Opt/Pipeline/Types.hs0000644000000000000000000001016007346545000022543 0ustar0000000000000000module GHC.Core.Opt.Pipeline.Types ( -- * Configuration of the core-to-core passes CorePluginPass, CoreToDo(..), bindsOnlyPass, pprPassDetails, ) where import GHC.Prelude import GHC.Core ( CoreProgram ) import GHC.Core.Opt.Monad ( CoreM, FloatOutSwitches ) import GHC.Core.Opt.Simplify ( SimplifyOpts(..) ) import GHC.Types.Basic ( CompilerPhase(..) ) import GHC.Unit.Module.ModGuts import GHC.Utils.Outputable as Outputable {- ************************************************************************ * * The CoreToDo type and related types Abstraction of core-to-core passes to run. * * ************************************************************************ -} -- | A description of the plugin pass itself type CorePluginPass = ModGuts -> CoreM ModGuts bindsOnlyPass :: (CoreProgram -> CoreM CoreProgram) -> ModGuts -> CoreM ModGuts bindsOnlyPass pass guts = do { binds' <- pass (mg_binds guts) ; return (guts { mg_binds = binds' }) } data CoreToDo -- These are diff core-to-core passes, -- which may be invoked in any order, -- as many times as you like. = CoreDoSimplify !SimplifyOpts -- ^ The core-to-core simplifier. | CoreDoPluginPass String CorePluginPass | CoreDoFloatInwards | CoreDoFloatOutwards FloatOutSwitches | CoreLiberateCase | CoreDoPrintCore | CoreDoStaticArgs | CoreDoCallArity | CoreDoExitify | CoreDoDemand Bool -- Bool: Do worker/wrapper afterwards? -- See Note [Don't change boxity without worker/wrapper] | CoreDoCpr | CoreDoWorkerWrapper | CoreDoSpecialising | CoreDoSpecConstr | CoreCSE | CoreDoRuleCheck CompilerPhase String -- Check for non-application of rules -- matching this string | CoreDoNothing -- Useful when building up | CoreDoPasses [CoreToDo] -- lists of these things | CoreDesugar -- Right after desugaring, no simple optimisation yet! | CoreDesugarOpt -- CoreDesugarXXX: Not strictly a core-to-core pass, but produces -- Core output, and hence useful to pass to endPass | CoreTidy | CorePrep | CoreAddCallerCcs | CoreAddLateCcs instance Outputable CoreToDo where ppr (CoreDoSimplify _) = text "Simplifier" ppr (CoreDoPluginPass s _) = text "Core plugin: " <+> text s ppr CoreDoFloatInwards = text "Float inwards" ppr (CoreDoFloatOutwards f) = text "Float out" <> parens (ppr f) ppr CoreLiberateCase = text "Liberate case" ppr CoreDoStaticArgs = text "Static argument" ppr CoreDoCallArity = text "Called arity analysis" ppr CoreDoExitify = text "Exitification transformation" ppr (CoreDoDemand True) = text "Demand analysis (including Boxity)" ppr (CoreDoDemand False) = text "Demand analysis" ppr CoreDoCpr = text "Constructed Product Result analysis" ppr CoreDoWorkerWrapper = text "Worker Wrapper binds" ppr CoreDoSpecialising = text "Specialise" ppr CoreDoSpecConstr = text "SpecConstr" ppr CoreCSE = text "Common sub-expression" ppr CoreDesugar = text "Desugar (before optimization)" ppr CoreDesugarOpt = text "Desugar (after optimization)" ppr CoreTidy = text "Tidy Core" ppr CoreAddCallerCcs = text "Add caller cost-centres" ppr CoreAddLateCcs = text "Add late core cost-centres" ppr CorePrep = text "CorePrep" ppr CoreDoPrintCore = text "Print core" ppr (CoreDoRuleCheck {}) = text "Rule check" ppr CoreDoNothing = text "CoreDoNothing" ppr (CoreDoPasses passes) = text "CoreDoPasses" <+> ppr passes pprPassDetails :: CoreToDo -> SDoc pprPassDetails (CoreDoSimplify cfg) = vcat [ text "Max iterations =" <+> int n , ppr md ] where n = so_iterations cfg md = so_mode cfg pprPassDetails _ = Outputable.empty ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/Opt/Simplify.hs0000644000000000000000000005464207346545000021503 0ustar0000000000000000{-# LANGUAGE CPP #-} module GHC.Core.Opt.Simplify ( SimplifyExprOpts(..), SimplifyOpts(..) , simplifyExpr, simplifyPgm ) where import GHC.Prelude import GHC.Driver.Flags import GHC.Core import GHC.Core.Rules import GHC.Core.Ppr ( pprCoreBindings, pprCoreExpr ) import GHC.Core.Opt.OccurAnal ( occurAnalysePgm, occurAnalyseExpr ) import GHC.Core.Stats ( coreBindsSize, coreBindsStats, exprSize ) import GHC.Core.Utils ( mkTicks, stripTicksTop ) import GHC.Core.Lint ( LintPassResultConfig, dumpPassResult, lintPassResult ) import GHC.Core.Opt.Simplify.Iteration ( simplTopBinds, simplExpr, simplImpRules ) import GHC.Core.Opt.Simplify.Utils ( activeRule ) import GHC.Core.Opt.Simplify.Inline ( activeUnfolding ) import GHC.Core.Opt.Simplify.Env import GHC.Core.Opt.Simplify.Monad import GHC.Core.Opt.Stats ( simplCountN ) import GHC.Core.FamInstEnv import GHC.Utils.Error ( withTiming ) import GHC.Utils.Logger as Logger import GHC.Utils.Outputable import GHC.Utils.Constants (debugIsOn) import GHC.Unit.Env ( UnitEnv, ueEPS ) import GHC.Unit.External import GHC.Unit.Module.ModGuts import GHC.Types.Id import GHC.Types.Id.Info import GHC.Types.Basic import GHC.Types.Var.Set import GHC.Types.Var.Env import GHC.Types.Tickish import GHC.Types.Unique.FM import Control.Monad import Data.Foldable ( for_ ) {- ************************************************************************ * * Gentle simplification * * ************************************************************************ -} -- | Configuration record for `simplifyExpr`. -- The values of this datatype are /only/ driven by the demands of that function. data SimplifyExprOpts = SimplifyExprOpts { se_fam_inst :: ![FamInst] , se_mode :: !SimplMode , se_top_env_cfg :: !TopEnvConfig } simplifyExpr :: Logger -> ExternalUnitCache -> SimplifyExprOpts -> CoreExpr -> IO CoreExpr -- simplifyExpr is called by the driver to simplify an -- expression typed in at the interactive prompt simplifyExpr logger euc opts expr = withTiming logger (text "Simplify [expr]") (const ()) $ do { eps <- eucEPS euc ; ; let fam_envs = ( eps_fam_inst_env eps , extendFamInstEnvList emptyFamInstEnv $ se_fam_inst opts ) simpl_env = mkSimplEnv (se_mode opts) fam_envs top_env_cfg = se_top_env_cfg opts read_eps_rules = eps_rule_base <$> eucEPS euc read_ruleenv = updExternalPackageRules emptyRuleEnv <$> read_eps_rules ; let sz = exprSize expr ; (expr', counts) <- initSmpl logger read_ruleenv top_env_cfg sz $ simplExprGently simpl_env expr ; Logger.putDumpFileMaybe logger Opt_D_dump_simpl_stats "Simplifier statistics" FormatText (pprSimplCount counts) ; Logger.putDumpFileMaybe logger Opt_D_dump_simpl "Simplified expression" FormatCore (pprCoreExpr expr') ; return expr' } simplExprGently :: SimplEnv -> CoreExpr -> SimplM CoreExpr -- Simplifies an expression -- does occurrence analysis, then simplification -- and repeats (twice currently) because one pass -- alone leaves tons of crud. -- Used (a) for user expressions typed in at the interactive prompt -- (b) the LHS and RHS of a RULE -- (c) Template Haskell splices -- -- The name 'Gently' suggests that the SimplMode is InitialPhase, -- and in fact that is so.... but the 'Gently' in simplExprGently doesn't -- enforce that; it just simplifies the expression twice -- It's important that simplExprGently does eta reduction; see -- Note [Simplify rule LHS] above. The -- simplifier does indeed do eta reduction (it's in GHC.Core.Opt.Simplify.completeLam) -- but only if -O is on. simplExprGently env expr = do expr1 <- simplExpr env (occurAnalyseExpr expr) simplExpr env (occurAnalyseExpr expr1) {- ************************************************************************ * * \subsection{The driver for the simplifier} * * ************************************************************************ -} -- | Configuration record for `simplifyPgm`. -- The values of this datatype are /only/ driven by the demands of that function. data SimplifyOpts = SimplifyOpts { so_dump_core_sizes :: !Bool , so_iterations :: !Int , so_mode :: !SimplMode , so_pass_result_cfg :: !(Maybe LintPassResultConfig) -- Nothing => Do not Lint -- Just cfg => Lint like this , so_hpt_rules :: !RuleBase , so_top_env_cfg :: !TopEnvConfig } simplifyPgm :: Logger -> UnitEnv -> NamePprCtx -- For dumping -> SimplifyOpts -> ModGuts -> IO (SimplCount, ModGuts) -- New bindings simplifyPgm logger unit_env name_ppr_ctx opts guts@(ModGuts { mg_module = this_mod , mg_binds = binds, mg_rules = local_rules , mg_fam_inst_env = fam_inst_env }) = do { (termination_msg, it_count, counts_out, guts') <- do_iteration 1 [] binds local_rules ; when (logHasDumpFlag logger Opt_D_verbose_core2core && logHasDumpFlag logger Opt_D_dump_simpl_stats) $ logDumpMsg logger "Simplifier statistics for following pass" (vcat [text termination_msg <+> text "after" <+> ppr it_count <+> text "iterations", blankLine, pprSimplCount counts_out]) ; return (counts_out, guts') } where dump_core_sizes = so_dump_core_sizes opts mode = so_mode opts max_iterations = so_iterations opts top_env_cfg = so_top_env_cfg opts active_rule = activeRule mode active_unf = activeUnfolding mode -- Note the bang in !guts_no_binds. If you don't force `guts_no_binds` -- the old bindings are retained until the end of all simplifier iterations !guts_no_binds = guts { mg_binds = [], mg_rules = [] } hpt_rule_env :: RuleEnv hpt_rule_env = mkRuleEnv guts emptyRuleBase (so_hpt_rules opts) -- emptyRuleBase: no EPS rules yet; we will update -- them on each iteration to pick up the most up to date set do_iteration :: Int -- Counts iterations -> [SimplCount] -- Counts from earlier iterations, reversed -> CoreProgram -- Bindings -> [CoreRule] -- Local rules for imported Ids -> IO (String, Int, SimplCount, ModGuts) do_iteration iteration_no counts_so_far binds local_rules -- iteration_no is the number of the iteration we are -- about to begin, with '1' for the first | iteration_no > max_iterations -- Stop if we've run out of iterations = warnPprTrace (debugIsOn && (max_iterations > 2)) "Simplifier bailing out" ( hang (ppr this_mod <> text ", after" <+> int max_iterations <+> text "iterations" <+> (brackets $ hsep $ punctuate comma $ map (int . simplCountN) (reverse counts_so_far))) 2 (text "Size =" <+> ppr (coreBindsStats binds))) $ -- Subtract 1 from iteration_no to get the -- number of iterations we actually completed return ( "Simplifier bailed out", iteration_no - 1 , totalise counts_so_far , guts_no_binds { mg_binds = binds, mg_rules = local_rules } ) -- Try and force thunks off the binds; significantly reduces -- space usage, especially with -O. JRS, 000620. | let sz = coreBindsSize binds , () <- sz `seq` () -- Force it = do { -- Occurrence analysis let { tagged_binds = {-# SCC "OccAnal" #-} occurAnalysePgm this_mod active_unf active_rule local_rules binds } ; Logger.putDumpFileMaybe logger Opt_D_dump_occur_anal "Occurrence analysis" FormatCore (pprCoreBindings tagged_binds); -- read_eps_rules: -- We need to read rules from the EPS regularly because simplification can -- poke on IdInfo thunks, which in turn brings in new rules -- behind the scenes. Otherwise there's a danger we'll simply -- miss the rules for Ids hidden inside imported inlinings -- Hence just before attempting to match a rule we read the EPS -- value (via read_rule_env) and then combine it with the existing rule base. -- See `GHC.Core.Opt.Simplify.Monad.getSimplRules`. eps <- ueEPS unit_env ; let { -- base_rule_env contains -- (a) home package rules, fixed across all iterations -- (b) local rules (substituted) from `local_rules` arg to do_iteration -- Forcing base_rule_env to avoid unnecessary allocations. -- Not doing so results in +25.6% allocations of LargeRecord. ; !base_rule_env = updLocalRules hpt_rule_env local_rules ; read_eps_rules :: IO PackageRuleBase ; read_eps_rules = eps_rule_base <$> ueEPS unit_env ; read_rule_env :: IO RuleEnv ; read_rule_env = updExternalPackageRules base_rule_env <$> read_eps_rules ; fam_envs = (eps_fam_inst_env eps, fam_inst_env) ; simpl_env = mkSimplEnv mode fam_envs } ; -- Simplify the program ((binds1, rules1), counts1) <- initSmpl logger read_rule_env top_env_cfg sz $ do { (floats, env1) <- {-# SCC "SimplTopBinds" #-} simplTopBinds simpl_env tagged_binds -- Apply the substitution to rules defined in this module -- for imported Ids. Eg RULE map my_f = blah -- If we have a substitution my_f :-> other_f, we'd better -- apply it to the rule to, or it'll never match ; rules1 <- simplImpRules env1 local_rules ; return (getTopFloatBinds floats, rules1) } ; -- Stop if nothing happened; don't dump output -- See Note [Which transformations are innocuous] in GHC.Core.Opt.Stats if isZeroSimplCount counts1 then return ( "Simplifier reached fixed point", iteration_no , totalise (counts1 : counts_so_far) -- Include "free" ticks , guts_no_binds { mg_binds = binds1, mg_rules = rules1 } ) else do { -- Short out indirections -- We do this *after* at least one run of the simplifier -- because indirection-shorting uses the export flag on *occurrences* -- and that isn't guaranteed to be ok until after the first run propagates -- stuff from the binding site to its occurrences -- -- ToDo: alas, this means that indirection-shorting does not happen at all -- if the simplifier does nothing (not common, I know, but unsavoury) let { binds2 = {-# SCC "ZapInd" #-} shortOutIndirections binds1 } ; -- Dump the result of this iteration dump_end_iteration logger dump_core_sizes name_ppr_ctx iteration_no counts1 binds2 rules1 ; for_ (so_pass_result_cfg opts) $ \pass_result_cfg -> lintPassResult logger pass_result_cfg binds2 ; -- Loop do_iteration (iteration_no + 1) (counts1:counts_so_far) binds2 rules1 } } where -- Remember the counts_so_far are reversed totalise :: [SimplCount] -> SimplCount totalise = foldr (\c acc -> acc `plusSimplCount` c) (zeroSimplCount $ logHasDumpFlag logger Opt_D_dump_simpl_stats) dump_end_iteration :: Logger -> Bool -> NamePprCtx -> Int -> SimplCount -> CoreProgram -> [CoreRule] -> IO () dump_end_iteration logger dump_core_sizes name_ppr_ctx iteration_no counts binds rules = dumpPassResult logger dump_core_sizes name_ppr_ctx mb_flag hdr pp_counts binds rules where mb_flag | logHasDumpFlag logger Opt_D_dump_simpl_iterations = Just Opt_D_dump_simpl_iterations | otherwise = Nothing -- Show details if Opt_D_dump_simpl_iterations is on hdr = "Simplifier iteration=" ++ show iteration_no pp_counts = vcat [ text "---- Simplifier counts for" <+> text hdr , pprSimplCount counts , text "---- End of simplifier counts for" <+> text hdr ] {- ************************************************************************ * * Shorting out indirections * * ************************************************************************ If we have this: x_local = ...bindings... x_exported = x_local where x_exported is exported, and x_local is not, then we replace it with this: x_exported = x_local = x_exported ...bindings... Without this we never get rid of the x_exported = x_local thing. This save a gratuitous jump (from \tr{x_exported} to \tr{x_local}), and makes strictness information propagate better. This used to happen in the final phase, but it's tidier to do it here. Note [Messing up the exported Id's RULES] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We must be careful about discarding (obviously) or even merging the RULES on the exported Id. The example that went bad on me at one stage was this one: iterate :: (a -> a) -> a -> [a] [Exported] iterate = iterateList iterateFB c f x = x `c` iterateFB c f (f x) iterateList f x = x : iterateList f (f x) [Not exported] {-# RULES "iterate" forall f x. iterate f x = build (\c _n -> iterateFB c f x) "iterateFB" iterateFB (:) = iterateList #-} This got shorted out to: iterateList :: (a -> a) -> a -> [a] iterateList = iterate iterateFB c f x = x `c` iterateFB c f (f x) iterate f x = x : iterate f (f x) {-# RULES "iterate" forall f x. iterate f x = build (\c _n -> iterateFB c f x) "iterateFB" iterateFB (:) = iterate #-} And now we get an infinite loop in the rule system iterate f x -> build (\cn -> iterateFB c f x) -> iterateFB (:) f x -> iterate f x Old "solution": use rule switching-off pragmas to get rid of iterateList in the first place But in principle the user *might* want rules that only apply to the Id they say. And inline pragmas are similar {-# NOINLINE f #-} f = local local = Then we do not want to get rid of the NOINLINE. Hence hasShortableIdinfo. Note [Rules and indirection-zapping] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Problem: what if x_exported has a RULE that mentions something in ...bindings...? Then the things mentioned can be out of scope! Solution a) Make sure that in this pass the usage-info from x_exported is available for ...bindings... b) If there are any such RULES, rec-ify the entire top-level. It'll get sorted out next time round Other remarks ~~~~~~~~~~~~~ If more than one exported thing is equal to a local thing (i.e., the local thing really is shared), then we do one only: \begin{verbatim} x_local = .... x_exported1 = x_local x_exported2 = x_local ==> x_exported1 = .... x_exported2 = x_exported1 \end{verbatim} We rely on prior eta reduction to simplify things like \begin{verbatim} x_exported = /\ tyvars -> x_local tyvars ==> x_exported = x_local \end{verbatim} Hence,there's a possibility of leaving unchanged something like this: \begin{verbatim} x_local = .... x_exported1 = x_local Int \end{verbatim} By the time we've thrown away the types in STG land this could be eliminated. But I don't think it's very common and it's dangerous to do this fiddling in STG land because we might eliminate a binding that's mentioned in the unfolding for something. Note [Indirection zapping and ticks] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Unfortunately this is another place where we need a special case for ticks. The following happens quite regularly: x_local = x_exported = tick x_local Which we want to become: x_exported = tick As it makes no sense to keep the tick and the expression on separate bindings. Note however that this might increase the ticks scoping over the execution of x_local, so we can only do this for floatable ticks. More often than not, other references will be unfoldings of x_exported, and therefore carry the tick anyway. -} type IndEnv = IdEnv (Id, [CoreTickish]) -- Maps local_id -> exported_id, ticks shortOutIndirections :: CoreProgram -> CoreProgram shortOutIndirections binds | isEmptyVarEnv ind_env = binds | no_need_to_flatten = binds' -- See Note [Rules and indirection-zapping] | otherwise = [Rec (flattenBinds binds')] -- for this no_need_to_flatten stuff where ind_env = makeIndEnv binds -- These exported Ids are the subjects of the indirection-elimination exp_ids = map fst $ nonDetEltsUFM ind_env -- It's OK to use nonDetEltsUFM here because we forget the ordering -- by immediately converting to a set or check if all the elements -- satisfy a predicate. exp_id_set = mkVarSet exp_ids no_need_to_flatten = all (null . ruleInfoRules . idSpecialisation) exp_ids binds' = concatMap zap binds zap (NonRec bndr rhs) = [NonRec b r | (b,r) <- zapPair (bndr,rhs)] zap (Rec pairs) = [Rec (concatMap zapPair pairs)] zapPair (bndr, rhs) | bndr `elemVarSet` exp_id_set = [] -- Kill the exported-id binding | Just (exp_id, ticks) <- lookupVarEnv ind_env bndr , (exp_id', lcl_id') <- transferIdInfo exp_id bndr = -- Turn a local-id binding into two bindings -- exp_id = rhs; lcl_id = exp_id [ (exp_id', mkTicks ticks rhs), (lcl_id', Var exp_id') ] | otherwise = [(bndr,rhs)] makeIndEnv :: [CoreBind] -> IndEnv makeIndEnv binds = foldl' add_bind emptyVarEnv binds where add_bind :: IndEnv -> CoreBind -> IndEnv add_bind env (NonRec exported_id rhs) = add_pair env (exported_id, rhs) add_bind env (Rec pairs) = foldl' add_pair env pairs add_pair :: IndEnv -> (Id,CoreExpr) -> IndEnv add_pair env (exported_id, exported) | (ticks, Var local_id) <- stripTicksTop tickishFloatable exported , shortMeOut env exported_id local_id = extendVarEnv env local_id (exported_id, ticks) add_pair env _ = env shortMeOut :: IndEnv -> Id -> Id -> Bool shortMeOut ind_env exported_id local_id -- The if-then-else stuff is just so I can get a pprTrace to see -- how often I don't get shorting out because of IdInfo stuff = if isExportedId exported_id && -- Only if this is exported isLocalId local_id && -- Only if this one is defined in this -- module, so that we *can* change its -- binding to be the exported thing! not (isExportedId local_id) && -- Only if this one is not itself exported, -- since the transformation will nuke it not (local_id `elemVarEnv` ind_env) -- Only if not already substituted for then if hasShortableIdInfo exported_id then True -- See Note [Messing up the exported Id's RULES] else warnPprTrace True "Not shorting out" (ppr exported_id) False else False hasShortableIdInfo :: Id -> Bool -- True if there is no user-attached IdInfo on exported_id, -- so we can safely discard it -- See Note [Messing up the exported Id's RULES] hasShortableIdInfo id = isEmptyRuleInfo (ruleInfo info) && isDefaultInlinePragma (inlinePragInfo info) && not (isStableUnfolding (realUnfoldingInfo info)) where info = idInfo id {- Note [Transferring IdInfo] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If we have lcl_id = e; exp_id = lcl_id and lcl_id has useful IdInfo, we don't want to discard it by going gbl_id = e; lcl_id = gbl_id Instead, transfer IdInfo from lcl_id to exp_id, specifically * (Stable) unfolding * Strictness * Rules * Inline pragma Overwriting, rather than merging, seems to work ok. For the lcl_id we * Zap the InlinePragma. It might originally have had a NOINLINE, which we have now transferred; and we really want the lcl_id to inline now that its RHS is trivial! * Zap any Stable unfolding. agian, we want lcl_id = gbl_id to inline, replacing lcl_id by gbl_id. That won't happen if lcl_id has its original great big Stable unfolding -} transferIdInfo :: Id -> Id -> (Id, Id) -- See Note [Transferring IdInfo] transferIdInfo exported_id local_id = ( modifyIdInfo transfer exported_id , modifyIdInfo zap_info local_id ) where local_info = idInfo local_id transfer exp_info = exp_info `setDmdSigInfo` dmdSigInfo local_info `setCprSigInfo` cprSigInfo local_info `setUnfoldingInfo` realUnfoldingInfo local_info `setInlinePragInfo` inlinePragInfo local_info `setRuleInfo` addRuleInfo (ruleInfo exp_info) new_info new_info = setRuleInfoHead (idName exported_id) (ruleInfo local_info) -- Remember to set the function-name field of the -- rules as we transfer them from one function to another zap_info lcl_info = lcl_info `setInlinePragInfo` defaultInlinePragma `setUnfoldingInfo` noUnfolding ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/Opt/Simplify/0000755000000000000000000000000007346545000021134 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/Opt/Simplify/Env.hs0000644000000000000000000014502707346545000022231 0ustar0000000000000000{- (c) The AQUA Project, Glasgow University, 1993-1998 \section[GHC.Core.Opt.Simplify.Monad]{The simplifier Monad} -} module GHC.Core.Opt.Simplify.Env ( -- * The simplifier mode SimplMode(..), updMode, smPedanticBottoms, smPlatform, -- * Environments SimplEnv(..), pprSimplEnv, -- Temp not abstract seArityOpts, seCaseCase, seCaseFolding, seCaseMerge, seCastSwizzle, seDoEtaReduction, seEtaExpand, seFloatEnable, seInline, seNames, seOptCoercionOpts, sePedanticBottoms, sePhase, sePlatform, sePreInline, seRuleOpts, seRules, seUnfoldingOpts, mkSimplEnv, extendIdSubst, extendCvIdSubst, extendTvSubst, extendCvSubst, zapSubstEnv, setSubstEnv, bumpCaseDepth, getInScope, setInScopeFromE, setInScopeFromF, setInScopeSet, modifyInScope, addNewInScopeIds, getSimplRules, enterRecGroupRHSs, reSimplifying, -- * Substitution results SimplSR(..), mkContEx, substId, lookupRecBndr, -- * Simplifying 'Id' binders simplNonRecBndr, simplNonRecJoinBndr, simplRecBndrs, simplRecJoinBndrs, simplBinder, simplBinders, substTy, substTyVar, getSubst, substCo, substCoVar, -- * Floats SimplFloats(..), emptyFloats, isEmptyFloats, mkRecFloats, mkFloatBind, addLetFloats, addJoinFloats, addFloats, extendFloats, wrapFloats, isEmptyJoinFloats, isEmptyLetFloats, doFloatFromRhs, getTopFloatBinds, -- * LetFloats LetFloats, FloatEnable(..), letFloatBinds, emptyLetFloats, unitLetFloat, addLetFlts, mapLetFloats, -- * JoinFloats JoinFloat, JoinFloats, emptyJoinFloats, wrapJoinFloats, wrapJoinFloatsX, unitJoinFloat, addJoinFlts ) where import GHC.Prelude import GHC.Core.Coercion.Opt ( OptCoercionOpts ) import GHC.Core.FamInstEnv ( FamInstEnv ) import GHC.Core.Opt.Arity ( ArityOpts(..) ) import GHC.Core.Opt.Simplify.Monad import GHC.Core.Rules.Config ( RuleOpts(..) ) import GHC.Core import GHC.Core.Utils import GHC.Core.Unfold import GHC.Core.TyCo.Subst (emptyIdSubstEnv) import GHC.Core.Multiplicity( Scaled(..), mkMultMul ) import GHC.Core.Make ( mkWildValBinder, mkCoreLet ) import GHC.Core.Type hiding ( substTy, substTyVar, substTyVarBndr, substCo , extendTvSubst, extendCvSubst ) import qualified GHC.Core.Coercion as Coercion import GHC.Core.Coercion hiding ( substCo, substCoVar, substCoVarBndr ) import qualified GHC.Core.Type as Type import GHC.Types.Var import GHC.Types.Var.Env import GHC.Types.Var.Set import GHC.Types.Id as Id import GHC.Types.Basic import GHC.Types.Unique.FM ( pprUniqFM ) import GHC.Data.OrdList import GHC.Data.Graph.UnVar import GHC.Builtin.Types import GHC.Platform ( Platform ) import GHC.Utils.Monad import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Utils.Misc import Data.List ( intersperse, mapAccumL ) {- ************************************************************************ * * \subsubsection{The @SimplEnv@ type} * * ************************************************************************ -} {- Note [The environments of the Simplify pass] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The functions of the Simplify pass draw their contextual data from two environments: `SimplEnv`, which is passed to the functions as an argument, and `SimplTopEnv`, which is part of the `SimplM` monad. For both environments exist corresponding configuration records `SimplMode` and `TopEnvConfig` respectively. A configuration record denotes a unary datatype bundeling the various options and switches we provide to control the behaviour of the respective part of the Simplify pass. The value is provided by the driver using the functions found in the GHC.Driver.Config.Core.Opt.Simplify module. These configuration records are part in the environment to avoid needless copying of their values. This raises the question which data value goes in which of the four datatypes. For each value needed by the pass we ask the following two questions: * Does the value only make sense in a monadic environment? * Is it part of the configuration of the pass and provided by the user or is it it an internal value? Examples of values that make only sense in conjunction with `SimplM` are the logger and the values related to counting. As it does not make sense to use them in a pure function (the logger needs IO and counting needs access to the accumulated counts in the monad) we want these to live in `SimplTopEnv`. Other values, like the switches controlling the behaviour of the pass (e.g. whether to do case merging or not) are perfectly usable in a non-monadic setting. Indeed many of those are used in guard expressions and it would be cumbersome to query them from the monadic environment and feed them to the pure functions as an argument. Hence we conveniently store them in the `SpecEnv` environment which can be passed to pure functions as a whole. Now that we know in which of the two environments a particular value lives we turn to the second question to determine if the value is part of the configuration record embedded in the environment or if it is stored in an own field in the environment type. Some values like the tick factor must be provided from outside as we can neither derive it from other values provided to us nor does a constant value make sense. Other values like the maximal number of ticks are computed on environment initialization and we wish not to expose the field to the "user" or the pass -- it is an internal value. Therefore the distinction here is between "freely set by the caller" and "internally managed by the pass". Note that it doesn't matter for the decision procedure wheter a value is altered throughout an iteration of the Simplify pass: The fields sm_phase, sm_inline, sm_rules, sm_cast_swizzle and sm_eta_expand are updated locally (See the definitions of `updModeForStableUnfoldings` and `updModeForRules` in GHC.Core.Opt.Simplify.Utils) but they are still part of `SimplMode` as the caller of the Simplify pass needs to provide the initial values for those fields. The decision which value goes into which datatype can be summarized by the following table: | Usable in a | | pure setting | monadic setting | |----------------------------|--------------|-----------------| | Set by user | SimplMode | TopEnvConfig | | Computed on initialization | SimplEnv | SimplTopEnv | Note [Inline depth] ~~~~~~~~~~~~~~~~~~~ When we inline an /already-simplified/ unfolding, we * Zap the substitution environment; the inlined thing is an OutExpr * Bump the seInlineDepth in the SimplEnv Both these tasks are done in zapSubstEnv. The seInlineDepth tells us how deep in inlining we are. Currently, seInlineDepth is used for just one purpose: when we encounter a coercion we don't apply optCoercion to it if seInlineDepth>0. Reason: it has already been optimised once, no point in doing so again. -} data SimplEnv = SimplEnv { ----------- Static part of the environment ----------- -- Static in the sense of lexically scoped, -- wrt the original expression -- See Note [The environments of the Simplify pass] seMode :: !SimplMode , seFamEnvs :: !(FamInstEnv, FamInstEnv) -- The current substitution , seTvSubst :: TvSubstEnv -- InTyVar |--> OutType , seCvSubst :: CvSubstEnv -- InCoVar |--> OutCoercion , seIdSubst :: SimplIdSubst -- InId |--> OutExpr -- | Fast OutVarSet tracking which recursive RHSs we are analysing. -- See Note [Eta reduction in recursive RHSs] in GHC.Core.Opt.Arity. , seRecIds :: !UnVarSet ----------- Dynamic part of the environment ----------- -- Dynamic in the sense of describing the setup where -- the expression finally ends up -- The current set of in-scope variables -- They are all OutVars, and all bound in this module , seInScope :: !InScopeSet -- OutVars only , seCaseDepth :: !Int -- Depth of multi-branch case alternatives , seInlineDepth :: !Int -- 0 initially, 1 when we inline an already-simplified -- unfolding, and simplify again; and so on -- See Note [Inline depth] } seArityOpts :: SimplEnv -> ArityOpts seArityOpts env = sm_arity_opts (seMode env) seCaseCase :: SimplEnv -> Bool seCaseCase env = sm_case_case (seMode env) seCaseFolding :: SimplEnv -> Bool seCaseFolding env = sm_case_folding (seMode env) seCaseMerge :: SimplEnv -> Bool seCaseMerge env = sm_case_merge (seMode env) seCastSwizzle :: SimplEnv -> Bool seCastSwizzle env = sm_cast_swizzle (seMode env) seDoEtaReduction :: SimplEnv -> Bool seDoEtaReduction env = sm_do_eta_reduction (seMode env) seEtaExpand :: SimplEnv -> Bool seEtaExpand env = sm_eta_expand (seMode env) seFloatEnable :: SimplEnv -> FloatEnable seFloatEnable env = sm_float_enable (seMode env) seInline :: SimplEnv -> Bool seInline env = sm_inline (seMode env) seNames :: SimplEnv -> [String] seNames env = sm_names (seMode env) seOptCoercionOpts :: SimplEnv -> OptCoercionOpts seOptCoercionOpts env = sm_co_opt_opts (seMode env) sePedanticBottoms :: SimplEnv -> Bool sePedanticBottoms env = smPedanticBottoms (seMode env) sePhase :: SimplEnv -> CompilerPhase sePhase env = sm_phase (seMode env) sePlatform :: SimplEnv -> Platform sePlatform env = smPlatform (seMode env) sePreInline :: SimplEnv -> Bool sePreInline env = sm_pre_inline (seMode env) seRuleOpts :: SimplEnv -> RuleOpts seRuleOpts env = sm_rule_opts (seMode env) seRules :: SimplEnv -> Bool seRules env = sm_rules (seMode env) seUnfoldingOpts :: SimplEnv -> UnfoldingOpts seUnfoldingOpts env = sm_uf_opts (seMode env) -- See Note [The environments of the Simplify pass] data SimplMode = SimplMode -- See comments in GHC.Core.Opt.Simplify.Monad { sm_phase :: !CompilerPhase , sm_names :: ![String] -- ^ Name(s) of the phase , sm_rules :: !Bool -- ^ Whether RULES are enabled , sm_inline :: !Bool -- ^ Whether inlining is enabled , sm_eta_expand :: !Bool -- ^ Whether eta-expansion is enabled , sm_cast_swizzle :: !Bool -- ^ Do we swizzle casts past lambdas? , sm_uf_opts :: !UnfoldingOpts -- ^ Unfolding options , sm_case_case :: !Bool -- ^ Whether case-of-case is enabled , sm_pre_inline :: !Bool -- ^ Whether pre-inlining is enabled , sm_float_enable :: !FloatEnable -- ^ Whether to enable floating out , sm_do_eta_reduction :: !Bool , sm_arity_opts :: !ArityOpts , sm_rule_opts :: !RuleOpts , sm_case_folding :: !Bool , sm_case_merge :: !Bool , sm_co_opt_opts :: !OptCoercionOpts -- ^ Coercion optimiser options } instance Outputable SimplMode where ppr (SimplMode { sm_phase = p , sm_names = ss , sm_rules = r, sm_inline = i , sm_cast_swizzle = cs , sm_eta_expand = eta, sm_case_case = cc }) = text "SimplMode" <+> braces ( sep [ text "Phase =" <+> ppr p <+> brackets (text (concat $ intersperse "," ss)) <> comma , pp_flag i (text "inline") <> comma , pp_flag r (text "rules") <> comma , pp_flag eta (text "eta-expand") <> comma , pp_flag cs (text "cast-swizzle") <> comma , pp_flag cc (text "case-of-case") ]) where pp_flag f s = ppUnless f (text "no") <+> s smPedanticBottoms :: SimplMode -> Bool smPedanticBottoms opts = ao_ped_bot (sm_arity_opts opts) smPlatform :: SimplMode -> Platform smPlatform opts = roPlatform (sm_rule_opts opts) data FloatEnable -- Controls local let-floating = FloatDisabled -- Do no local let-floating | FloatNestedOnly -- Local let-floating for nested (NotTopLevel) bindings only | FloatEnabled -- Do local let-floating on all bindings {- Note [Local floating] ~~~~~~~~~~~~~~~~~~~~~ The Simplifier can perform local let-floating: it floats let-bindings out of the RHS of let-bindings. See Let-floating: moving bindings to give faster programs (ICFP'96) https://www.microsoft.com/en-us/research/publication/let-floating-moving-bindings-to-give-faster-programs/ Here's an example x = let y = v+1 in (y,true) The RHS of x is a thunk. Much better to float that y-binding out to give y = v+1 x = (y,true) Not only have we avoided building a thunk, but any (case x of (p,q) -> ...) in the scope of the x-binding can now be simplified. This local let-floating is done in GHC.Core.Opt.Simplify.prepareBinding, controlled by the predicate GHC.Core.Opt.Simplify.Env.doFloatFromRhs. The `FloatEnable` data type controls where local let-floating takes place; it allows you to specify that it should be done only for nested bindings; or for top-level bindings as well; or not at all. Note that all of this is quite separate from the global FloatOut pass; see GHC.Core.Opt.FloatOut. -} data SimplFloats = SimplFloats { -- Ordinary let bindings sfLetFloats :: LetFloats -- See Note [LetFloats] -- Join points , sfJoinFloats :: JoinFloats -- Handled separately; they don't go very far -- We consider these to be /inside/ sfLetFloats -- because join points can refer to ordinary bindings, -- but not vice versa -- Includes all variables bound by sfLetFloats and -- sfJoinFloats, plus at least whatever is in scope where -- these bindings land up. , sfInScope :: InScopeSet -- All OutVars } instance Outputable SimplFloats where ppr (SimplFloats { sfLetFloats = lf, sfJoinFloats = jf, sfInScope = is }) = text "SimplFloats" <+> braces (vcat [ text "lets: " <+> ppr lf , text "joins:" <+> ppr jf , text "in_scope:" <+> ppr is ]) emptyFloats :: SimplEnv -> SimplFloats emptyFloats env = SimplFloats { sfLetFloats = emptyLetFloats , sfJoinFloats = emptyJoinFloats , sfInScope = seInScope env } isEmptyFloats :: SimplFloats -> Bool -- Precondition: used only when sfJoinFloats is empty isEmptyFloats (SimplFloats { sfLetFloats = LetFloats fs _ , sfJoinFloats = js }) = assertPpr (isNilOL js) (ppr js ) $ isNilOL fs pprSimplEnv :: SimplEnv -> SDoc -- Used for debugging; selective pprSimplEnv env = vcat [text "TvSubst:" <+> ppr (seTvSubst env), text "CvSubst:" <+> ppr (seCvSubst env), text "IdSubst:" <+> id_subst_doc, text "InScope:" <+> in_scope_vars_doc ] where id_subst_doc = pprUniqFM ppr (seIdSubst env) in_scope_vars_doc = pprVarSet (getInScopeVars (seInScope env)) (vcat . map ppr_one) ppr_one v | isId v = ppr v <+> ppr (idUnfolding v) | otherwise = ppr v type SimplIdSubst = IdEnv SimplSR -- IdId |--> OutExpr -- See Note [Extending the IdSubstEnv] in GHC.Core.Subst -- | A substitution result. data SimplSR = DoneEx OutExpr JoinPointHood -- If x :-> DoneEx e ja is in the SimplIdSubst -- then replace occurrences of x by e -- and ja = Just a <=> x is a join-point of arity a -- See Note [Join arity in SimplIdSubst] | DoneId OutId -- If x :-> DoneId v is in the SimplIdSubst -- then replace occurrences of x by v -- and v is a join-point of arity a -- <=> x is a join-point of arity a | ContEx TvSubstEnv -- A suspended substitution CvSubstEnv SimplIdSubst InExpr -- If x :-> ContEx tv cv id e is in the SimplISubst -- then replace occurrences of x by (subst (tv,cv,id) e) instance Outputable SimplSR where ppr (DoneId v) = text "DoneId" <+> ppr v ppr (DoneEx e mj) = text "DoneEx" <> pp_mj <+> ppr e where pp_mj = case mj of NotJoinPoint -> empty JoinPoint n -> parens (int n) ppr (ContEx _tv _cv _id e) = vcat [text "ContEx" <+> ppr e {-, ppr (filter_env tv), ppr (filter_env id) -}] -- where -- fvs = exprFreeVars e -- filter_env env = filterVarEnv_Directly keep env -- keep uniq _ = uniq `elemUFM_Directly` fvs {- Note [SimplEnv invariants] ~~~~~~~~~~~~~~~~~~~~~~~~~~ seInScope: The in-scope part of Subst includes *all* in-scope TyVars and Ids The elements of the set may have better IdInfo than the occurrences of in-scope Ids, and (more important) they will have a correctly-substituted type. So we use a lookup in this set to replace occurrences The Ids in the InScopeSet are replete with their Rules, and as we gather info about the unfolding of an Id, we replace it in the in-scope set. The in-scope set is actually a mapping OutVar -> OutVar, and in case expressions we sometimes bind seIdSubst: The substitution is *apply-once* only, because InIds and OutIds can overlap. For example, we generally omit mappings a77 -> a77 from the substitution, when we decide not to clone a77, but it's quite legitimate to put the mapping in the substitution anyway. Furthermore, consider let x = case k of I# x77 -> ... in let y = case k of I# x77 -> ... in ... and suppose the body is strict in both x and y. Then the simplifier will pull the first (case k) to the top; so the second (case k) will cancel out, mapping x77 to, well, x77! But one is an in-Id and the other is an out-Id. Of course, the substitution *must* applied! Things in its domain simply aren't necessarily bound in the result. * substId adds a binding (DoneId new_id) to the substitution if the Id's unique has changed Note, though that the substitution isn't necessarily extended if the type of the Id changes. Why not? Because of the next point: * We *always, always* finish by looking up in the in-scope set any variable that doesn't get a DoneEx or DoneVar hit in the substitution. Reason: so that we never finish up with a "old" Id in the result. An old Id might point to an old unfolding and so on... which gives a space leak. [The DoneEx and DoneVar hits map to "new" stuff.] * It follows that substExpr must not do a no-op if the substitution is empty. substType is free to do so, however. * When we come to a let-binding (say) we generate new IdInfo, including an unfolding, attach it to the binder, and add this newly adorned binder to the in-scope set. So all subsequent occurrences of the binder will get mapped to the full-adorned binder, which is also the one put in the binding site. * The in-scope "set" usually maps x->x; we use it simply for its domain. But sometimes we have two in-scope Ids that are synonyms, and should map to the same target: x->x, y->x. Notably: case y of x { ... } That's why the "set" is actually a VarEnv Var Note [Join arity in SimplIdSubst] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We have to remember which incoming variables are join points: the occurrences may not be marked correctly yet, and we're in change of propagating the change if OccurAnal makes something a join point). Normally the in-scope set is where we keep the latest information, but the in-scope set tracks only OutVars; if a binding is unconditionally inlined (via DoneEx), it never makes it into the in-scope set, and we need to know at the occurrence site that the variable is a join point so that we know to drop the context. Thus we remember which join points we're substituting. -} mkSimplEnv :: SimplMode -> (FamInstEnv, FamInstEnv) -> SimplEnv mkSimplEnv mode fam_envs = SimplEnv { seMode = mode , seFamEnvs = fam_envs , seInScope = init_in_scope , seTvSubst = emptyVarEnv , seCvSubst = emptyVarEnv , seIdSubst = emptyVarEnv , seRecIds = emptyUnVarSet , seCaseDepth = 0 , seInlineDepth = 0 } -- The top level "enclosing CC" is "SUBSUMED". init_in_scope :: InScopeSet init_in_scope = mkInScopeSet (unitVarSet (mkWildValBinder ManyTy unitTy)) -- See Note [WildCard binders] {- Note [WildCard binders] ~~~~~~~~~~~~~~~~~~~~~~~ The program to be simplified may have wild binders case e of wild { p -> ... } We want to *rename* them away, so that there are no occurrences of 'wild-id' (with wildCardKey). The easy way to do that is to start of with a representative Id in the in-scope set There can be *occurrences* of wild-id. For example, GHC.Core.Make.mkCoreApp transforms e (a /# b) --> case (a /# b) of wild { DEFAULT -> e wild } This is ok provided 'wild' isn't free in 'e', and that's the delicate thing. Generally, you want to run the simplifier to get rid of the wild-ids before doing much else. It's a very dark corner of GHC. Maybe it should be cleaned up. -} updMode :: (SimplMode -> SimplMode) -> SimplEnv -> SimplEnv updMode upd env = -- Avoid keeping env alive in case inlining fails. let mode = upd $! (seMode env) in env { seMode = mode } bumpCaseDepth :: SimplEnv -> SimplEnv bumpCaseDepth env = env { seCaseDepth = seCaseDepth env + 1 } reSimplifying :: SimplEnv -> Bool reSimplifying (SimplEnv { seInlineDepth = n }) = n>0 --------------------- extendIdSubst :: SimplEnv -> Id -> SimplSR -> SimplEnv extendIdSubst env@(SimplEnv {seIdSubst = subst}) var res = assertPpr (isId var && not (isCoVar var)) (ppr var) $ env { seIdSubst = extendVarEnv subst var res } extendTvSubst :: SimplEnv -> TyVar -> Type -> SimplEnv extendTvSubst env@(SimplEnv {seTvSubst = tsubst}) var res = assertPpr (isTyVar var) (ppr var $$ ppr res) $ env {seTvSubst = extendVarEnv tsubst var res} extendCvSubst :: SimplEnv -> CoVar -> Coercion -> SimplEnv extendCvSubst env@(SimplEnv {seCvSubst = csubst}) var co = assert (isCoVar var) $ env {seCvSubst = extendVarEnv csubst var co} extendCvIdSubst :: SimplEnv -> Id -> OutExpr -> SimplEnv extendCvIdSubst env bndr (Coercion co) = extendCvSubst env bndr co extendCvIdSubst env bndr rhs = extendIdSubst env bndr (DoneEx rhs NotJoinPoint) --------------------- getInScope :: SimplEnv -> InScopeSet getInScope env = seInScope env setInScopeSet :: SimplEnv -> InScopeSet -> SimplEnv setInScopeSet env in_scope = env {seInScope = in_scope} setInScopeFromE :: SimplEnv -> SimplEnv -> SimplEnv -- See Note [Setting the right in-scope set] setInScopeFromE rhs_env here_env = rhs_env { seInScope = seInScope here_env } setInScopeFromF :: SimplEnv -> SimplFloats -> SimplEnv setInScopeFromF env floats = env { seInScope = sfInScope floats } addNewInScopeIds :: SimplEnv -> [CoreBndr] -> SimplEnv -- The new Ids are guaranteed to be freshly allocated addNewInScopeIds env@(SimplEnv { seInScope = in_scope, seIdSubst = id_subst }) vs -- See Note [Bangs in the Simplifier] = let !in_scope1 = in_scope `extendInScopeSetList` vs !id_subst1 = id_subst `delVarEnvList` vs in env { seInScope = in_scope1, seIdSubst = id_subst1 } -- Why delete? Consider -- let x = a*b in (x, \x -> x+3) -- We add [x |-> a*b] to the substitution, but we must -- _delete_ it from the substitution when going inside -- the (\x -> ...)! modifyInScope :: SimplEnv -> CoreBndr -> SimplEnv -- The variable should already be in scope, but -- replace the existing version with this new one -- which has more information modifyInScope env@(SimplEnv {seInScope = in_scope}) v = env {seInScope = extendInScopeSet in_scope v} enterRecGroupRHSs :: SimplEnv -> [OutBndr] -> (SimplEnv -> SimplM (r, SimplEnv)) -> SimplM (r, SimplEnv) enterRecGroupRHSs env bndrs k = do --pprTraceM "enterRecGroupRHSs" (ppr bndrs) (r, env'') <- k env{seRecIds = extendUnVarSetList bndrs (seRecIds env)} return (r, env''{seRecIds = seRecIds env}) {- Note [Setting the right in-scope set] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider \x. (let x = e in b) arg[x] where the let shadows the lambda. Really this means something like \x1. (let x2 = e in b) arg[x1] - When we capture the 'arg' in an ApplyToVal continuation, we capture the environment, which says what 'x' is bound to, namely x1 - Then that continuation gets pushed under the let - Finally we simplify 'arg'. We want - the static, lexical environment binding x :-> x1 - the in-scopeset from "here", under the 'let' which includes both x1 and x2 It's important to have the right in-scope set, else we may rename a variable to one that is already in scope. So we must pick up the in-scope set from "here", but otherwise use the environment we captured along with 'arg'. This transfer of in-scope set is done by setInScopeFromE. -} --------------------- zapSubstEnv :: SimplEnv -> SimplEnv -- See Note [Inline depth] -- We call zapSubstEnv precisely when we are about to -- simplify an already-simplified term zapSubstEnv env@(SimplEnv { seInlineDepth = n }) = env { seTvSubst = emptyVarEnv, seCvSubst = emptyVarEnv, seIdSubst = emptyVarEnv , seInlineDepth = n+1 } setSubstEnv :: SimplEnv -> TvSubstEnv -> CvSubstEnv -> SimplIdSubst -> SimplEnv setSubstEnv env tvs cvs ids = env { seTvSubst = tvs, seCvSubst = cvs, seIdSubst = ids } mkContEx :: SimplEnv -> InExpr -> SimplSR mkContEx (SimplEnv { seTvSubst = tvs, seCvSubst = cvs, seIdSubst = ids }) e = ContEx tvs cvs ids e {- ************************************************************************ * * \subsection{LetFloats} * * ************************************************************************ Note [LetFloats] ~~~~~~~~~~~~~~~~ The LetFloats is a bunch of bindings, classified by a FloatFlag. The `FloatFlag` contains summary information about the bindings, see the data type declaration of `FloatFlag` Examples NonRec x (y:ys) FltLifted Rec [(x,rhs)] FltLifted NonRec x* (p:q) FltOKSpec -- RHS is WHNF. Question: why not FltLifted? NonRec x# (y +# 3) FltOkSpec -- Unboxed, but ok-for-spec'n NonRec x* (f y) FltCareful -- Strict binding; might fail or diverge NonRec x# (a /# b) FltCareful -- Might fail; does not satisfy let-can-float invariant NonRec x# (f y) FltCareful -- Might diverge; does not satisfy let-can-float invariant -} data LetFloats = LetFloats (OrdList OutBind) FloatFlag -- See Note [LetFloats] type JoinFloat = OutBind type JoinFloats = OrdList JoinFloat data FloatFlag = FltLifted -- All bindings are lifted and lazy *or* -- consist of a single primitive string literal -- Hence ok to float to top level, or recursive -- NB: consequence: all bindings satisfy let-can-float invariant | FltOkSpec -- All bindings are FltLifted *or* -- strict (perhaps because unlifted, -- perhaps because of a strict binder), -- *and* ok-for-speculation -- Hence ok to float out of the RHS -- of a lazy non-recursive let binding -- (but not to top level, or into a rec group) -- NB: consequence: all bindings satisfy let-can-float invariant | FltCareful -- At least one binding is strict (or unlifted) -- and not guaranteed cheap -- Do not float these bindings out of a lazy let! -- NB: some bindings may not satisfy let-can-float instance Outputable LetFloats where ppr (LetFloats binds ff) = ppr ff $$ ppr (fromOL binds) instance Outputable FloatFlag where ppr FltLifted = text "FltLifted" ppr FltOkSpec = text "FltOkSpec" ppr FltCareful = text "FltCareful" andFF :: FloatFlag -> FloatFlag -> FloatFlag andFF FltCareful _ = FltCareful andFF FltOkSpec FltCareful = FltCareful andFF FltOkSpec _ = FltOkSpec andFF FltLifted flt = flt doFloatFromRhs :: FloatEnable -> TopLevelFlag -> RecFlag -> Bool -> SimplFloats -> OutExpr -> Bool -- If you change this function look also at FloatIn.noFloatIntoRhs doFloatFromRhs fe lvl rec strict_bind (SimplFloats { sfLetFloats = LetFloats fs ff }) rhs = floatEnabled lvl fe && not (isNilOL fs) && want_to_float && can_float where want_to_float = isTopLevel lvl || exprIsCheap rhs || exprIsExpandable rhs -- See Note [Float when cheap or expandable] can_float = case ff of FltLifted -> True FltOkSpec -> isNotTopLevel lvl && isNonRec rec FltCareful -> isNotTopLevel lvl && isNonRec rec && strict_bind -- Whether any floating is allowed by flags. floatEnabled :: TopLevelFlag -> FloatEnable -> Bool floatEnabled _ FloatDisabled = False floatEnabled lvl FloatNestedOnly = not (isTopLevel lvl) floatEnabled _ FloatEnabled = True {- Note [Float when cheap or expandable] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We want to float a let from a let if the residual RHS is a) cheap, such as (\x. blah) b) expandable, such as (f b) if f is CONLIKE But there are - cheap things that are not expandable (eg \x. expensive) - expandable things that are not cheap (eg (f b) where b is CONLIKE) so we must take the 'or' of the two. -} emptyLetFloats :: LetFloats emptyLetFloats = LetFloats nilOL FltLifted isEmptyLetFloats :: LetFloats -> Bool isEmptyLetFloats (LetFloats fs _) = isNilOL fs emptyJoinFloats :: JoinFloats emptyJoinFloats = nilOL isEmptyJoinFloats :: JoinFloats -> Bool isEmptyJoinFloats = isNilOL unitLetFloat :: OutBind -> LetFloats -- This key function constructs a singleton float with the right form unitLetFloat bind = assert (all (not . isJoinId) (bindersOf bind)) $ LetFloats (unitOL bind) (flag bind) where flag (Rec {}) = FltLifted flag (NonRec bndr rhs) | not (isStrictId bndr) = FltLifted | exprIsTickedString rhs = FltLifted -- String literals can be floated freely. -- See Note [Core top-level string literals] in GHC.Core. | exprOkForSpeculation rhs = FltOkSpec -- Unlifted, and lifted but ok-for-spec (eg HNF) | otherwise = FltCareful unitJoinFloat :: OutBind -> JoinFloats unitJoinFloat bind = assert (all isJoinId (bindersOf bind)) $ unitOL bind mkFloatBind :: SimplEnv -> OutBind -> (SimplFloats, SimplEnv) -- Make a singleton SimplFloats, and -- extend the incoming SimplEnv's in-scope set with its binders -- These binders may already be in the in-scope set, -- but may have by now been augmented with more IdInfo mkFloatBind env bind = (floats, env { seInScope = in_scope' }) where floats | isJoinBind bind = SimplFloats { sfLetFloats = emptyLetFloats , sfJoinFloats = unitJoinFloat bind , sfInScope = in_scope' } | otherwise = SimplFloats { sfLetFloats = unitLetFloat bind , sfJoinFloats = emptyJoinFloats , sfInScope = in_scope' } -- See Note [Bangs in the Simplifier] !in_scope' = seInScope env `extendInScopeSetBind` bind extendFloats :: SimplFloats -> OutBind -> SimplFloats -- Add this binding to the floats, and extend the in-scope env too extendFloats (SimplFloats { sfLetFloats = floats , sfJoinFloats = jfloats , sfInScope = in_scope }) bind | isJoinBind bind = SimplFloats { sfInScope = in_scope' , sfLetFloats = floats , sfJoinFloats = jfloats' } | otherwise = SimplFloats { sfInScope = in_scope' , sfLetFloats = floats' , sfJoinFloats = jfloats } where in_scope' = in_scope `extendInScopeSetBind` bind floats' = floats `addLetFlts` unitLetFloat bind jfloats' = jfloats `addJoinFlts` unitJoinFloat bind addLetFloats :: SimplFloats -> LetFloats -> SimplFloats -- Add the let-floats for env2 to env1; -- *plus* the in-scope set for env2, which is bigger -- than that for env1 addLetFloats floats let_floats = floats { sfLetFloats = sfLetFloats floats `addLetFlts` let_floats , sfInScope = sfInScope floats `extendInScopeFromLF` let_floats } extendInScopeFromLF :: InScopeSet -> LetFloats -> InScopeSet extendInScopeFromLF in_scope (LetFloats binds _) = foldlOL extendInScopeSetBind in_scope binds addJoinFloats :: SimplFloats -> JoinFloats -> SimplFloats addJoinFloats floats join_floats = floats { sfJoinFloats = sfJoinFloats floats `addJoinFlts` join_floats , sfInScope = foldlOL extendInScopeSetBind (sfInScope floats) join_floats } addFloats :: SimplFloats -> SimplFloats -> SimplFloats -- Add both let-floats and join-floats for env2 to env1; -- *plus* the in-scope set for env2, which is bigger -- than that for env1 addFloats (SimplFloats { sfLetFloats = lf1, sfJoinFloats = jf1 }) (SimplFloats { sfLetFloats = lf2, sfJoinFloats = jf2, sfInScope = in_scope }) = SimplFloats { sfLetFloats = lf1 `addLetFlts` lf2 , sfJoinFloats = jf1 `addJoinFlts` jf2 , sfInScope = in_scope } addLetFlts :: LetFloats -> LetFloats -> LetFloats addLetFlts (LetFloats bs1 l1) (LetFloats bs2 l2) = LetFloats (bs1 `appOL` bs2) (l1 `andFF` l2) letFloatBinds :: LetFloats -> [CoreBind] letFloatBinds (LetFloats bs _) = fromOL bs addJoinFlts :: JoinFloats -> JoinFloats -> JoinFloats addJoinFlts = appOL mkRecFloats :: SimplFloats -> SimplFloats -- Flattens the floats into a single Rec group, -- They must either all be lifted LetFloats or all JoinFloats mkRecFloats floats@(SimplFloats { sfLetFloats = LetFloats bs _ff , sfJoinFloats = jbs , sfInScope = in_scope }) = assertPpr (isNilOL bs || isNilOL jbs) (ppr floats) $ SimplFloats { sfLetFloats = floats' , sfJoinFloats = jfloats' , sfInScope = in_scope } where -- See Note [Bangs in the Simplifier] !floats' | isNilOL bs = emptyLetFloats | otherwise = unitLetFloat (Rec (flattenBinds (fromOL bs))) !jfloats' | isNilOL jbs = emptyJoinFloats | otherwise = unitJoinFloat (Rec (flattenBinds (fromOL jbs))) wrapFloats :: SimplFloats -> OutExpr -> OutExpr -- Wrap the floats around the expression wrapFloats (SimplFloats { sfLetFloats = LetFloats bs flag , sfJoinFloats = jbs }) body = foldrOL mk_let (wrapJoinFloats jbs body) bs -- Note: Always safe to put the joins on the inside -- since the values can't refer to them where mk_let | FltCareful <- flag = mkCoreLet -- need to enforce let-can-float-invariant | otherwise = Let -- let-can-float invariant hold wrapJoinFloatsX :: SimplFloats -> OutExpr -> (SimplFloats, OutExpr) -- Wrap the sfJoinFloats of the env around the expression, -- and take them out of the SimplEnv wrapJoinFloatsX floats body = ( floats { sfJoinFloats = emptyJoinFloats } , wrapJoinFloats (sfJoinFloats floats) body ) wrapJoinFloats :: JoinFloats -> OutExpr -> OutExpr -- Wrap the sfJoinFloats of the env around the expression, -- and take them out of the SimplEnv wrapJoinFloats join_floats body = foldrOL Let body join_floats getTopFloatBinds :: SimplFloats -> [CoreBind] getTopFloatBinds (SimplFloats { sfLetFloats = lbs , sfJoinFloats = jbs}) = assert (isNilOL jbs) $ -- Can't be any top-level join bindings letFloatBinds lbs {-# INLINE mapLetFloats #-} mapLetFloats :: LetFloats -> ((Id,CoreExpr) -> (Id,CoreExpr)) -> LetFloats mapLetFloats (LetFloats fs ff) fun = LetFloats fs1 ff where app (NonRec b e) = case fun (b,e) of (b',e') -> NonRec b' e' app (Rec bs) = Rec (strictMap fun bs) !fs1 = (mapOL' app fs) -- See Note [Bangs in the Simplifier] {- ************************************************************************ * * Substitution of Vars * * ************************************************************************ Note [Global Ids in the substitution] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We look up even a global (eg imported) Id in the substitution. Consider case X.g_34 of b { (a,b) -> ... case X.g_34 of { (p,q) -> ...} ... } The binder-swap in the occurrence analyser will add a binding for a LocalId version of g (with the same unique though): case X.g_34 of b { (a,b) -> let g_34 = b in ... case X.g_34 of { (p,q) -> ...} ... } So we want to look up the inner X.g_34 in the substitution, where we'll find that it has been substituted by b. (Or conceivably cloned.) -} substId :: SimplEnv -> InId -> SimplSR -- Returns DoneEx only on a non-Var expression substId (SimplEnv { seInScope = in_scope, seIdSubst = ids }) v = case lookupVarEnv ids v of -- Note [Global Ids in the substitution] Nothing -> DoneId (refineFromInScope in_scope v) Just (DoneId v) -> DoneId (refineFromInScope in_scope v) Just res -> res -- DoneEx non-var, or ContEx -- Get the most up-to-date thing from the in-scope set -- Even though it isn't in the substitution, it may be in -- the in-scope set with better IdInfo. -- -- See also Note [In-scope set as a substitution] in GHC.Core.Opt.Simplify. refineFromInScope :: InScopeSet -> Var -> Var refineFromInScope in_scope v | isLocalId v = case lookupInScope in_scope v of Just v' -> v' Nothing -> pprPanic "refineFromInScope" (ppr in_scope $$ ppr v) -- c.f #19074 for a subtle place where this went wrong | otherwise = v lookupRecBndr :: SimplEnv -> InId -> OutId -- Look up an Id which has been put into the envt by simplRecBndrs, -- but where we have not yet done its RHS lookupRecBndr (SimplEnv { seInScope = in_scope, seIdSubst = ids }) v = case lookupVarEnv ids v of Just (DoneId v) -> v Just _ -> pprPanic "lookupRecBndr" (ppr v) Nothing -> refineFromInScope in_scope v {- ************************************************************************ * * \section{Substituting an Id binder} * * ************************************************************************ These functions are in the monad only so that they can be made strict via seq. Note [Return type for join points] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider (join j :: Char -> Int -> Int) 77 ( j x = \y. y + ord x ) (in case v of ) ( A -> j 'x' ) ( B -> j 'y' ) ( C -> ) The simplifier pushes the "apply to 77" continuation inwards to give join j :: Char -> Int j x = (\y. y + ord x) 77 in case v of A -> j 'x' B -> j 'y' C -> 77 Notice that the "apply to 77" continuation went into the RHS of the join point. And that meant that the return type of the join point changed!! That's why we pass res_ty into simplNonRecJoinBndr, and substIdBndr takes a (Just res_ty) argument so that it knows to do the type-changing thing. See also Note [Scaling join point arguments]. -} simplBinders :: SimplEnv -> [InBndr] -> SimplM (SimplEnv, [OutBndr]) simplBinders !env bndrs = mapAccumLM simplBinder env bndrs ------------- simplBinder :: SimplEnv -> InBndr -> SimplM (SimplEnv, OutBndr) -- Used for lambda and case-bound variables -- Clone Id if necessary, substitute type -- Return with IdInfo already substituted, but (fragile) occurrence info zapped -- The substitution is extended only if the variable is cloned, because -- we *don't* need to use it to track occurrence info. simplBinder !env bndr | isTyVar bndr = do { let (env', tv) = substTyVarBndr env bndr ; seqTyVar tv `seq` return (env', tv) } | otherwise = do { let (env', id) = substIdBndr env bndr ; seqId id `seq` return (env', id) } --------------- simplNonRecBndr :: SimplEnv -> InBndr -> SimplM (SimplEnv, OutBndr) -- A non-recursive let binder simplNonRecBndr !env id -- See Note [Bangs in the Simplifier] = do { let (!env1, id1) = substIdBndr env id ; seqId id1 `seq` return (env1, id1) } --------------- simplRecBndrs :: SimplEnv -> [InBndr] -> SimplM SimplEnv -- Recursive let binders simplRecBndrs env@(SimplEnv {}) ids -- See Note [Bangs in the Simplifier] = assert (all (not . isJoinId) ids) $ do { let (!env1, ids1) = mapAccumL substIdBndr env ids ; seqIds ids1 `seq` return env1 } --------------- substIdBndr :: SimplEnv -> InBndr -> (SimplEnv, OutBndr) -- Might be a coercion variable substIdBndr env bndr | isCoVar bndr = substCoVarBndr env bndr | otherwise = substNonCoVarIdBndr env bndr --------------- substNonCoVarIdBndr :: SimplEnv -> InBndr -- Env and binder to transform -> (SimplEnv, OutBndr) -- Clone Id if necessary, substitute its type -- Return an Id with its -- * Type substituted -- * UnfoldingInfo, Rules, WorkerInfo zapped -- * Fragile OccInfo (only) zapped: Note [Robust OccInfo] -- * Robust info, retained especially arity and demand info, -- so that they are available to occurrences that occur in an -- earlier binding of a letrec -- -- For the robust info, see Note [Arity robustness] -- -- Augment the substitution if the unique changed -- Extend the in-scope set with the new Id -- -- Similar to GHC.Core.Subst.substIdBndr, except that -- the type of id_subst differs -- all fragile info is zapped substNonCoVarIdBndr env id = subst_id_bndr env id (\x -> x) -- Inline to make the (OutId -> OutId) function a known call. -- This is especially important for `substNonCoVarIdBndr` which -- passes an identity lambda. {-# INLINE subst_id_bndr #-} subst_id_bndr :: SimplEnv -> InBndr -- Env and binder to transform -> (OutId -> OutId) -- Adjust the type -> (SimplEnv, OutBndr) subst_id_bndr env@(SimplEnv { seInScope = in_scope, seIdSubst = id_subst }) old_id adjust_type = assertPpr (not (isCoVar old_id)) (ppr old_id) (env { seInScope = new_in_scope, seIdSubst = new_subst }, new_id) -- It's important that both seInScope and seIdSubst are updated with -- the new_id, /after/ applying adjust_type. That's why adjust_type -- is done here. If we did adjust_type in simplJoinBndr (the only -- place that gives a non-identity adjust_type) we'd have to fiddle -- afresh with both seInScope and seIdSubst where -- See Note [Bangs in the Simplifier] !id1 = uniqAway in_scope old_id !id2 = substIdType env id1 !id3 = zapFragileIdInfo id2 -- Zaps rules, worker-info, unfolding -- and fragile OccInfo !new_id = adjust_type id3 -- Extend the substitution if the unique has changed, -- or there's some useful occurrence information -- See the notes with substTyVarBndr for the delSubstEnv !new_subst | new_id /= old_id = extendVarEnv id_subst old_id (DoneId new_id) | otherwise = delVarEnv id_subst old_id !new_in_scope = in_scope `extendInScopeSet` new_id ------------------------------------ seqTyVar :: TyVar -> () seqTyVar b = b `seq` () seqId :: Id -> () seqId id = seqType (idType id) `seq` idInfo id `seq` () seqIds :: [Id] -> () seqIds [] = () seqIds (id:ids) = seqId id `seq` seqIds ids {- Note [Arity robustness] ~~~~~~~~~~~~~~~~~~~~~~~ We *do* transfer the arity from the in_id of a let binding to the out_id so that its arity is visible in its RHS. Examples: * f = \x y. let g = \p q. f (p+q) in Just (...g..g...) Here we want to give `g` arity 3 and eta-expand. `findRhsArity` will have a hard time figuring that out when `f` only has arity 0 in its own RHS. * f = \x y. ....(f `seq` blah).... We want to drop the seq. * f = \x. g (\y. f y) You'd think we could eta-reduce `\y. f y` to `f` here. And indeed, that is true. Unfortunately, it is not sound in general to eta-reduce in f's RHS. Example: `f = \x. f x`. See Note [Eta reduction in recursive RHSs] for how we prevent that. Note [Robust OccInfo] ~~~~~~~~~~~~~~~~~~~~~ It's important that we *do* retain the loop-breaker OccInfo, because that's what stops the Id getting inlined infinitely, in the body of the letrec. -} {- ********************************************************************* * * Join points * * ********************************************************************* -} simplNonRecJoinBndr :: SimplEnv -> InBndr -> Mult -> OutType -> SimplM (SimplEnv, OutBndr) -- A non-recursive let binder for a join point; -- context being pushed inward may change the type -- See Note [Return type for join points] simplNonRecJoinBndr env id mult res_ty = do { let (env1, id1) = simplJoinBndr mult res_ty env id ; seqId id1 `seq` return (env1, id1) } simplRecJoinBndrs :: SimplEnv -> [InBndr] -> Mult -> OutType -> SimplM SimplEnv -- Recursive let binders for join points; -- context being pushed inward may change types -- See Note [Return type for join points] simplRecJoinBndrs env@(SimplEnv {}) ids mult res_ty = assert (all isJoinId ids) $ do { let (env1, ids1) = mapAccumL (simplJoinBndr mult res_ty) env ids ; seqIds ids1 `seq` return env1 } --------------- simplJoinBndr :: Mult -> OutType -> SimplEnv -> InBndr -> (SimplEnv, OutBndr) simplJoinBndr mult res_ty env id = subst_id_bndr env id (adjustJoinPointType mult res_ty) --------------- adjustJoinPointType :: Mult -> Type -- New result type -> Id -- Old join-point Id -> Id -- Adjusted join-point Id -- (adjustJoinPointType mult new_res_ty join_id) does two things: -- -- 1. Set the return type of the join_id to new_res_ty -- See Note [Return type for join points] -- -- 2. Adjust the multiplicity of arrows in join_id's type, as -- directed by 'mult'. See Note [Scaling join point arguments] -- -- INVARIANT: If any of the first n binders are foralls, those tyvars -- cannot appear in the original result type. See isValidJoinPointType. adjustJoinPointType mult new_res_ty join_id = assert (isJoinId join_id) $ setIdType join_id new_join_ty where join_arity = idJoinArity join_id orig_ty = idType join_id res_torc = typeTypeOrConstraint new_res_ty :: TypeOrConstraint new_join_ty = go join_arity orig_ty :: Type go :: JoinArity -> Type -> Type go n ty | n == 0 = new_res_ty | Just (arg_bndr, body_ty) <- splitPiTy_maybe ty , let body_ty' = go (n-1) body_ty = case arg_bndr of Named b -> mkForAllTy b body_ty' Anon (Scaled arg_mult arg_ty) af -> mkFunTy af' arg_mult' arg_ty body_ty' where -- Using "!": See Note [Bangs in the Simplifier] -- mkMultMul: see Note [Scaling join point arguments] !arg_mult' = arg_mult `mkMultMul` mult -- the new_res_ty might be ConstraintLike while the original -- one was TypeLike. So we may need to adjust the FunTyFlag. -- (see #23952) !af' = mkFunTyFlag (funTyFlagArgTypeOrConstraint af) res_torc | otherwise = pprPanic "adjustJoinPointType" (ppr join_arity <+> ppr orig_ty) {- Note [Scaling join point arguments] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider a join point which is linear in its variable, in some context E: E[join j :: a %1 -> a j x = x in case v of A -> j 'x' B -> ] The simplifier changes to: join j :: a %1 -> a j x = E[x] in case v of A -> j 'x' B -> E[] If E uses its argument in a nonlinear way (e.g. a case['Many]), then this is wrong: the join point has to change its type to a -> a. Otherwise, we'd get a linearity error. See also Note [Return type for join points] and Note [Join points and case-of-case]. -} {- ************************************************************************ * * Impedance matching to type substitution * * ************************************************************************ -} getSubst :: SimplEnv -> Subst getSubst (SimplEnv { seInScope = in_scope, seTvSubst = tv_env, seCvSubst = cv_env }) = mkTCvSubst in_scope tv_env cv_env substTy :: HasDebugCallStack => SimplEnv -> Type -> Type substTy env ty = Type.substTy (getSubst env) ty substTyVar :: SimplEnv -> TyVar -> Type substTyVar env tv = Type.substTyVar (getSubst env) tv substTyVarBndr :: SimplEnv -> TyVar -> (SimplEnv, TyVar) substTyVarBndr env tv = case Type.substTyVarBndr (getSubst env) tv of (Subst in_scope' _ tv_env' cv_env', tv') -> (env { seInScope = in_scope', seTvSubst = tv_env', seCvSubst = cv_env' }, tv') substCoVar :: SimplEnv -> CoVar -> Coercion substCoVar env tv = Coercion.substCoVar (getSubst env) tv substCoVarBndr :: SimplEnv -> CoVar -> (SimplEnv, CoVar) substCoVarBndr env cv = case Coercion.substCoVarBndr (getSubst env) cv of (Subst in_scope' _ tv_env' cv_env', cv') -> (env { seInScope = in_scope', seTvSubst = tv_env', seCvSubst = cv_env' }, cv') substCo :: SimplEnv -> Coercion -> Coercion substCo env co = Coercion.substCo (getSubst env) co ------------------ substIdType :: SimplEnv -> Id -> Id substIdType (SimplEnv { seInScope = in_scope, seTvSubst = tv_env, seCvSubst = cv_env }) id | (isEmptyVarEnv tv_env && isEmptyVarEnv cv_env) || no_free_vars = id | otherwise = Id.updateIdTypeAndMult (Type.substTyUnchecked subst) id -- The tyCoVarsOfType is cheaper than it looks -- because we cache the free tyvars of the type -- in a Note in the id's type itself where no_free_vars = noFreeVarsOfType old_ty && noFreeVarsOfType old_w subst = Subst in_scope emptyIdSubstEnv tv_env cv_env old_ty = idType id old_w = varMult id ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/Opt/Simplify/Inline.hs0000644000000000000000000007401207346545000022712 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The AQUA Project, Glasgow University, 1994-1998 This module contains inlining logic used by the simplifier. -} module GHC.Core.Opt.Simplify.Inline ( -- * Cheap and cheerful inlining checks. couldBeSmallEnoughToInline, smallEnoughToInline, activeUnfolding, -- * The smart inlining decisions are made by callSiteInline callSiteInline, CallCtxt(..), ) where import GHC.Prelude import GHC.Driver.Flags import GHC.Core.Opt.Simplify.Env import GHC.Core import GHC.Core.Unfold import GHC.Core.FVs( exprFreeIds ) import GHC.Types.Id import GHC.Types.Var.Env( InScopeSet, lookupInScope ) import GHC.Types.Var.Set import GHC.Types.Basic ( Arity, RecFlag(..), isActive ) import GHC.Utils.Logger import GHC.Utils.Misc import GHC.Utils.Outputable import GHC.Types.Name import Data.List (isPrefixOf) {- ************************************************************************ * * \subsection[considerUnfolding]{Given all the info, do (not) do the unfolding} * * ************************************************************************ We use 'couldBeSmallEnoughToInline' to avoid exporting inlinings that we ``couldn't possibly use'' on the other side. Can be overridden w/ flaggery. Just the same as smallEnoughToInline, except that it has no actual arguments. -} couldBeSmallEnoughToInline :: UnfoldingOpts -> Int -> CoreExpr -> Bool couldBeSmallEnoughToInline opts threshold rhs = case sizeExpr opts threshold [] body of TooBig -> False _ -> True where (_, body) = collectBinders rhs ---------------- smallEnoughToInline :: UnfoldingOpts -> Unfolding -> Bool smallEnoughToInline opts (CoreUnfolding {uf_guidance = guidance}) = case guidance of UnfIfGoodArgs {ug_size = size} -> size <= unfoldingUseThreshold opts UnfWhen {} -> True UnfNever -> False smallEnoughToInline _ _ = False {- ************************************************************************ * * \subsection{callSiteInline} * * ************************************************************************ This is the key function. It decides whether to inline a variable at a call site callSiteInline is used at call sites, so it is a bit more generous. It's a very important function that embodies lots of heuristics. A non-WHNF can be inlined if it doesn't occur inside a lambda, and occurs exactly once or occurs once in each branch of a case and is small If the thing is in WHNF, there's no danger of duplicating work, so we can inline if it occurs once, or is small NOTE: we don't want to inline top-level functions that always diverge. It just makes the code bigger. Tt turns out that the convenient way to prevent them inlining is to give them a NOINLINE pragma, which we do in StrictAnal.addStrictnessInfoToTopId -} callSiteInline :: SimplEnv -> Logger -> Id -- The Id -> Bool -- True if there are no arguments at all (incl type args) -> [ArgSummary] -- One for each value arg; True if it is interesting -> CallCtxt -- True <=> continuation is interesting -> Maybe CoreExpr -- Unfolding, if any callSiteInline env logger id lone_variable arg_infos cont_info = case idUnfolding id of -- idUnfolding checks for loop-breakers, returning NoUnfolding -- Things with an INLINE pragma may have an unfolding *and* -- be a loop breaker (maybe the knot is not yet untied) CoreUnfolding { uf_tmpl = unf_template , uf_cache = unf_cache , uf_guidance = guidance } | active_unf -> tryUnfolding env logger id lone_variable arg_infos cont_info unf_template unf_cache guidance | otherwise -> traceInline logger uf_opts id "Inactive unfolding:" (ppr id) Nothing NoUnfolding -> Nothing BootUnfolding -> Nothing OtherCon {} -> Nothing DFunUnfolding {} -> Nothing -- Never unfold a DFun where uf_opts = seUnfoldingOpts env active_unf = activeUnfolding (seMode env) id activeUnfolding :: SimplMode -> Id -> Bool activeUnfolding mode id | isCompulsoryUnfolding (realIdUnfolding id) = True -- Even sm_inline can't override compulsory unfoldings | otherwise = isActive (sm_phase mode) (idInlineActivation id) && sm_inline mode -- `or` isStableUnfolding (realIdUnfolding id) -- Inline things when -- (a) they are active -- (b) sm_inline says so, except that for stable unfoldings -- (ie pragmas) we inline anyway -- | Report the inlining of an identifier's RHS to the user, if requested. traceInline :: Logger -> UnfoldingOpts -> Id -> String -> SDoc -> a -> a traceInline logger opts inline_id str doc result -- We take care to ensure that doc is used in only one branch, ensuring that -- the simplifier can push its allocation into the branch. See Note [INLINE -- conditional tracing utilities]. | enable = logTraceMsg logger str doc result | otherwise = result where enable | logHasDumpFlag logger Opt_D_dump_verbose_inlinings = True | Just prefix <- unfoldingReportPrefix opts = prefix `isPrefixOf` occNameString (getOccName inline_id) | otherwise = False {-# INLINE traceInline #-} -- see Note [INLINE conditional tracing utilities] {- Note [Avoid inlining into deeply nested cases] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Also called "exponential inlining". Consider a function f like this: (#18730) f arg1 arg2 = case ... ... -> g arg1 ... -> g arg2 This function is small. So should be safe to inline. However sometimes this doesn't quite work out like that. Consider this code: f1 arg1 arg2 ... = ... case _foo of alt1 -> ... f2 arg1 ... alt2 -> ... f2 arg2 ... f2 arg1 arg2 ... = ... case _foo of alt1 -> ... f3 arg1 ... alt2 -> ... f3 arg2 ... f3 arg1 arg2 ... = ... ... repeats up to n times. And then f1 is applied to some arguments: foo = ... f1 ... Initially f2..fn are not interesting to inline so we don't. However we see that f1 is applied to interesting args. So it's an obvious choice to inline those: foo = ... case _foo of alt1 -> ... f2 ... alt2 -> ... f2 ... As a result we go and inline f2 both mentions of f2 in turn are now applied to interesting arguments and f2 is small: foo = ... case _foo of alt1 -> ... case _foo of alt1 -> ... f3 ... alt2 -> ... f3 ... alt2 -> ... case _foo of alt1 -> ... f3 ... alt2 -> ... f3 ... The same thing happens for each binding up to f_n, duplicating the amount of inlining done in each step. Until at some point we are either done or run out of simplifier ticks/RAM. This pattern happened #18730. To combat this we introduce one more heuristic when weighing inlining decision. We keep track of a "case-depth". Which increases each time we look inside a case expression with more than one alternative. We then apply a penalty to inlinings based on the case-depth at which they would be inlined. Bounding the number of inlinings in such a scenario. The heuristic can be tuned in two ways: * We can ignore the first n levels of case nestings for inlining decisions using -funfolding-case-threshold. * The penalty grows linear with the depth. It's computed as size*(depth-threshold)/scaling. Scaling can be set with -funfolding-case-scaling. Reflections and wrinkles * See also Note [Do not add unfoldings to join points at birth] in GHC.Core.Opt.Simplify.Iteration * The total case depth is really the wrong thing; it will inhibit inlining of a local function, just because there is some giant case nest further out. What we want is the /difference/ in case-depth between the binding site and the call site. That could be done quite easily by adding the case-depth to the Unfolding of the function. * What matters more than /depth/ is total /width/; that is how many alternatives are in the tree. We could perhaps multiply depth by width at each case expression. * There might be a case nest with many alternatives, but the function is called in only a handful of them. So maybe we should ignore case-depth, and instead penalise funtions that are called many times -- after all, inlining them bloats code. But in the scenario above, we are simplifying an inlined fuction, without doing a global occurrence analysis each time. So if we based the penalty on multiple occurences, we should /also/ add a penalty when simplifying an already-simplified expression. We do track this (seInlineDepth) but currently we barely use it. An advantage of using occurrences+inline depth is that it'll work when no case expressions are involved. See #15488. * Test T18730 did not involve join points. But join points are very prone to the same kind of thing. For exampe in #13253, and several related tickets, we got an exponential blowup in code size from a program that looks like this. let j1a x = case f y of { True -> p; False -> q } j1b x = case f y of { True -> q; False -> p } j2a x = case f (y+1) of { True -> j1a x; False -> j1b x} j2b x = case f (y+1) of { True -> j1b x; False -> j1a x} ... in case f (y+10) of { True -> j10a 7; False -> j10b 8 } The first danger is this: in Simplifier iteration 1 postInlineUnconditionally inlines the last functions, j10a and j10b (they are both small). Now we have two calls to j9a and two to j9b. In the next Simplifer iteration, postInlineUnconditionally inlines all four of these calls, leaving four calls to j8a and j8b. Etc. Happily, this probably /won't/ happen because the Simplifier works top down, so it'll inline j1a/j1b into j2a/j2b, which will make the latter bigger; so the process will stop. But we still need to stop the inline cascade described at the head of this Note. Some guidance on setting these defaults: * A low threshold (<= 2) is needed to prevent exponential cases from spiraling out of control. We picked 2 for no particular reason. * Scaling the penalty by any more than 30 means the reproducer from T18730 won't compile even with reasonably small values of n. Instead it will run out of runs/ticks. This means to positively affect the reproducer a scaling <= 30 is required. * A scaling of >= 15 still causes a few very large regressions on some nofib benchmarks. (+80% for gc/fulsom, +90% for real/ben-raytrace, +20% for spectral/fibheaps) * A scaling of >= 25 showed no regressions on nofib. However it showed a number of (small) regression for compiler perf benchmarks. The end result is that we are settling for a scaling of 30, with a threshold of 2. This gives us minimal compiler perf regressions. No nofib runtime regressions and will still avoid this pattern sometimes. This is a "safe" default, where we err on the side of compiler blowup instead of risking runtime regressions. For cases where the default falls short the flag can be changed to allow more/less inlining as needed on a per-module basis. -} tryUnfolding :: SimplEnv -> Logger -> Id -> Bool -> [ArgSummary] -> CallCtxt -> CoreExpr -> UnfoldingCache -> UnfoldingGuidance -> Maybe CoreExpr tryUnfolding env logger id lone_variable arg_infos cont_info unf_template unf_cache guidance = case guidance of UnfNever -> traceInline logger opts id str (text "UnfNever") Nothing UnfWhen { ug_arity = uf_arity, ug_unsat_ok = unsat_ok, ug_boring_ok = boring_ok } | enough_args && (boring_ok || some_benefit || unfoldingVeryAggressive opts) -- See Note [INLINE for small functions] (3) -> traceInline logger opts id str (mk_doc some_benefit empty True) (Just unf_template) | otherwise -> traceInline logger opts id str (mk_doc some_benefit empty False) Nothing where some_benefit = calc_some_benefit uf_arity True enough_args = (n_val_args >= uf_arity) || (unsat_ok && n_val_args > 0) UnfIfGoodArgs { ug_args = arg_discounts, ug_res = res_discount, ug_size = size } | isJoinId id, small_enough -> inline_join_point | unfoldingVeryAggressive opts -> yes | is_wf, some_benefit, small_enough -> yes | otherwise -> no where yes = traceInline logger opts id str (mk_doc some_benefit extra_doc True) (Just unf_template) no = traceInline logger opts id str (mk_doc some_benefit extra_doc False) Nothing some_benefit = calc_some_benefit (length arg_discounts) False -- depth_penalty: see Note [Avoid inlining into deeply nested cases] depth_threshold = unfoldingCaseThreshold opts depth_scaling = unfoldingCaseScaling opts depth_penalty | case_depth <= depth_threshold = 0 | otherwise = (size * (case_depth - depth_threshold)) `div` depth_scaling adjusted_size = size + depth_penalty - discount small_enough = adjusted_size <= unfoldingUseThreshold opts discount = computeDiscount arg_discounts res_discount arg_infos cont_info extra_doc = vcat [ ppWhen (isJoinId id) $ text "join" <+> fsep [ ppr (v, hasCoreUnfolding (idUnfolding v) , fmap (isEvaldUnfolding . idUnfolding) (lookupInScope in_scope v) , is_more_evald in_scope v) | v <- vselems (exprFreeIds unf_template) ] , text "depth based penalty =" <+> int depth_penalty , text "adjusted size =" <+> int adjusted_size ] inline_join_point -- See Note [Inlining join points] | or (zipWith scrut_arg arg_discounts arg_infos) = yes | anyVarSet (is_more_evald in_scope) $ exprFreeIds unf_template = yes | otherwise = no -- scrut_arg is True if the function body has a discount and the arg is a value scrut_arg disc ValueArg = disc > 0 scrut_arg _ _ = False where opts = seUnfoldingOpts env case_depth = seCaseDepth env inline_depth = seInlineDepth env in_scope = seInScope env -- Unpack the UnfoldingCache lazily because it may not be needed, and all -- its fields are strict; so evaluating unf_cache at all forces all the -- isWorkFree etc computations to take place. That risks wasting effort for -- Ids that are never going to inline anyway. -- See Note [UnfoldingCache] in GHC.Core UnfoldingCache{ uf_is_work_free = is_wf, uf_expandable = is_exp } = unf_cache mk_doc some_benefit extra_doc yes_or_no = vcat [ text "arg infos" <+> ppr arg_infos , text "interesting continuation" <+> ppr cont_info , text "some_benefit" <+> ppr some_benefit , text "is exp:" <+> ppr is_exp , text "is work-free:" <+> ppr is_wf , text "guidance" <+> ppr guidance , text "case depth =" <+> int case_depth , text "inline depth =" <+> int inline_depth , extra_doc , text "ANSWER =" <+> if yes_or_no then text "YES" else text "NO"] ctx = log_default_dump_context (logFlags logger) str = "Considering inlining: " ++ showSDocOneLine ctx (ppr id) n_val_args = length arg_infos -- some_benefit is used when the RHS is small enough -- and the call has enough (or too many) value -- arguments (ie n_val_args >= arity). But there must -- be *something* interesting about some argument, or the -- result context, to make it worth inlining calc_some_benefit :: Arity -> Bool -> Bool -- The Arity is the number of args -- expected by the unfolding calc_some_benefit uf_arity is_inline | not saturated = interesting_args -- Under-saturated -- Note [Unsaturated applications] | otherwise = interesting_args -- Saturated or over-saturated || interesting_call where saturated = n_val_args >= uf_arity over_saturated = n_val_args > uf_arity interesting_args = any nonTriv arg_infos -- NB: (any nonTriv arg_infos) looks at the -- over-saturated args too which is "wrong"; -- but if over-saturated we inline anyway. interesting_call | over_saturated = True | otherwise = case cont_info of CaseCtxt -> not (lone_variable && is_exp) -- Note [Lone variables] ValAppCtxt -> True -- Note [Cast then apply] RuleArgCtxt -> uf_arity > 0 -- See Note [RHS of lets] DiscArgCtxt -> uf_arity > 0 -- Note [Inlining in ArgCtxt] RhsCtxt NonRecursive | is_inline -> uf_arity > 0 -- See Note [RHS of lets] _other -> False -- See Note [Nested functions] vselems :: VarSet -> [Var] vselems s = nonDetStrictFoldVarSet (\v vs -> v : vs) [] s is_more_evald :: InScopeSet -> Id -> Bool -- See Note [Inlining join points] is_more_evald in_scope v | Just v1 <- lookupInScope in_scope v , idUnfolding v1 `isBetterUnfoldingThan` idUnfolding v = True | otherwise = False {- Note [RHS of lets] ~~~~~~~~~~~~~~~~~~~~~ When the call is the argument of a function with a RULE, or the RHS of a let, we are a little bit keener to inline (in tryUnfolding). For example f y = (y,y,y) g y = let x = f y in ...(case x of (a,b,c) -> ...) ... We'd inline 'f' if the call was in a case context, and it kind-of-is, only we can't see it. Also x = f v could be expensive whereas x = case v of (a,b) -> a is patently cheap and may allow more eta expansion. So, in `interesting_call` in `tryUnfolding`, we treat the RHS of a /non-recursive/ let as not-totally-boring. A /recursive/ let isn't going be inlined so there is much less point. Hence the (only reason for the) RecFlag in RhsCtxt We inline only if `f` has an `UnfWhen` guidance. I found that being more eager led to fruitless inlining. See Note [Seq is boring] wrinkle (SB1) in GHC.Core.Opt.Simplify.Utils. Note [Inlining join points] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ In general we /do not/ want to inline join points /even if they are small/. See Note [Duplicating join points] in GHC.Core.Opt.Simplify.Iteration. But, assuming it is small, there are various times when we /do/ want to inline a (non-recursive) join point. Namely, if either of these hold: (1) A /scrutinised/ argument (non-zero discount) has a /ValueArg/ info. Inlining will give some benefit. (2) A free variable of the RHS is * Is /not/ evaluated at the join point defn site * Is evaluated at the join point call site. This is the is_more_evald predicate. (1) is fairly obvious but (2) is less so. Here is the code for `integerGT` without (2): integerGt = \ (x :: Integer) (y :: Integer) -> join fail _ = case x of { IS x1 -> case y of { IS y1 -> case <# x1 y1 of _DEFAULT -> case ==# x1 y1 of DEFAULT -> True; 1# -> False 1# -> False IP ds1 -> False IN ds1 -> True IP x1 -> case y of { _DEFAULT -> True; IP y1 -> case bigNatCompare x1 y1 of _DEFAULT -> False; GT -> True IN x1 -> case y of { _DEFAULT -> False; IN y1 -> case bigNatCompare y1 x1 of _DEFAULT -> False; GT -> True in case x of { _DEFAULT -> jump fail GHC.Prim.(##); IS x1 -> case y of { _DEFAULT -> jump fail GHC.Prim.(##); IS y1 -> tagToEnum# @Bool (># x1 y1) If we inline `fail` we get /much/ better code. The only clue is that `x` and `y` (a) are not evaluated at the definition site, and (b) are evaluated at the call site. This predicate is `isBetterUnfoldingThan`. You might think that the variable should also be /scrutinised/ in the join-point RHS, but here are two reasons for not taking that into account. First, we see code somewhat like this in imaginary/wheel-sieve1: let x = in join $j = (x,y) in case z of A -> case x of P -> $j Q -> blah B -> (x,x) C -> True Here `x` can't be duplicated into the branches becuase it is used in both the join point and the A branch. But if we inline $j we get let x = in case z of A -> case x of x' P -> (x', y) Q -> blah B -> x C -> True and now we /can/ duplicate x into the branches, at which point: * it is used strictly in the A branch (evaluated, but no thunk) * it is used lazily in the B branch (still a thunk) * it is not used at all in the C branch (no thunk) Second, spectral/treejoin gets a big win from SpecConstr due to evaluated-ness. Something like this: join $j x = ...(foo fv)... in case fv of I# x -> ... jump $j True ... If we inline $j, SpecConstr sees a call (foo (I# x)) and specialises. Note [Unsaturated applications] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When a call is not saturated, we *still* inline if one of the arguments has interesting structure. That's sometimes very important. A good example is the Ord instance for Bool in Base: Rec { $fOrdBool =GHC.Classes.D:Ord @ Bool ... $cmin_ajX $cmin_ajX [Occ=LoopBreaker] :: Bool -> Bool -> Bool $cmin_ajX = GHC.Classes.$dmmin @ Bool $fOrdBool } But the defn of GHC.Classes.$dmmin is: $dmmin :: forall a. GHC.Classes.Ord a => a -> a -> a {- Arity: 3, HasNoCafRefs, Strictness: SLL, Unfolding: (\ @ a $dOrd :: GHC.Classes.Ord a x :: a y :: a -> case @ a GHC.Classes.<= @ a $dOrd x y of wild { GHC.Types.False -> y GHC.Types.True -> x }) -} We *really* want to inline $dmmin, even though it has arity 3, in order to unravel the recursion. Note [Things to watch] ~~~~~~~~~~~~~~~~~~~~~~ * { y = I# 3; x = y `cast` co; ...case (x `cast` co) of ... } Assume x is exported, so not inlined unconditionally. Then we want x to inline unconditionally; no reason for it not to, and doing so avoids an indirection. * { x = I# 3; ....f x.... } Make sure that x does not inline unconditionally! Lest we get extra allocation. Note [Nested functions] ~~~~~~~~~~~~~~~~~~~~~~~ At one time we treated a call of a non-top-level function as "interesting" (regardless of how boring the context) in the hope that inlining it would eliminate the binding, and its allocation. Specifically, in the default case of interesting_call we had _other -> not is_top && uf_arity > 0 But actually postInlineUnconditionally does some of this and overall it makes virtually no difference to nofib. So I simplified away this special case Note [Cast then apply] ~~~~~~~~~~~~~~~~~~~~~~ Consider myIndex = __inline_me ( (/\a. ) |> co ) co :: (forall a. a -> a) ~ (forall a. T a) ... /\a.\x. case ((myIndex a) |> sym co) x of { ... } ... We need to inline myIndex to unravel this; but the actual call (myIndex a) has no value arguments. The ValAppCtxt gives it enough incentive to inline. Note [Inlining in ArgCtxt] ~~~~~~~~~~~~~~~~~~~~~~~~~~ The condition (arity > 0) here is very important, because otherwise we end up inlining top-level stuff into useless places; eg x = I# 3# f = \y. g x This can make a very big difference: it adds 16% to nofib 'integer' allocs, and 20% to 'power'. At one stage I replaced this condition by 'True' (leading to the above slow-down). The motivation was test eyeball/inline1.hs; but that seems to work ok now. NOTE: arguably, we should inline in ArgCtxt only if the result of the call is at least CONLIKE. At least for the cases where we use ArgCtxt for the RHS of a 'let', we only profit from the inlining if we get a CONLIKE thing (modulo lets). Note [Lone variables] ~~~~~~~~~~~~~~~~~~~~~ See also Note [Interaction of exprIsWorkFree and lone variables] which appears below The "lone-variable" case is important. I spent ages messing about with unsatisfactory variants, but this is nice. The idea is that if a variable appears all alone as an arg of lazy fn, or rhs BoringCtxt as scrutinee of a case CaseCtxt as arg of a fn ArgCtxt AND it is bound to a cheap expression then we should not inline it (unless there is some other reason, e.g. it is the sole occurrence). That is what is happening at the use of 'lone_variable' in 'interesting_call'. Why? At least in the case-scrutinee situation, turning let x = (a,b) in case x of y -> ... into let x = (a,b) in case (a,b) of y -> ... and thence to let x = (a,b) in let y = (a,b) in ... is bad if the binding for x will remain. Another example: I discovered that strings were getting inlined straight back into applications of 'error' because the latter is strict. s = "foo" f = \x -> ...(error s)... Fundamentally such contexts should not encourage inlining because, provided the RHS is "expandable" (see Note [exprIsExpandable] in GHC.Core.Utils) the context can ``see'' the unfolding of the variable (e.g. case or a RULE) so there's no gain. However, watch out: * Consider this: foo = \n. [n]) {-# INLINE foo #-} bar = foo 20 {-# INLINE bar #-} baz = \n. case bar of { (m:_) -> m + n } Here we really want to inline 'bar' so that we can inline 'foo' and the whole thing unravels as it should obviously do. This is important: in the NDP project, 'bar' generates a closure data structure rather than a list. So the non-inlining of lone_variables should only apply if the unfolding is regarded as expandable; because that is when exprIsConApp_maybe looks through the unfolding. Hence the "&& is_exp" in the CaseCtxt branch of interesting_call * Even a type application or coercion isn't a lone variable. Consider case $fMonadST @ RealWorld of { :DMonad a b c -> c } We had better inline that sucker! The case won't see through it. For now, I'm treating treating a variable applied to types in a *lazy* context "lone". The motivating example was f = /\a. \x. BIG g = /\a. \y. h (f a) There's no advantage in inlining f here, and perhaps a significant disadvantage. Hence some_val_args in the Stop case Note [Interaction of exprIsWorkFree and lone variables] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The lone-variable test says "don't inline if a case expression scrutinises a lone variable whose unfolding is cheap". It's very important that, under these circumstances, exprIsConApp_maybe can spot a constructor application. So, for example, we don't consider let x = e in (x,x) to be cheap, and that's good because exprIsConApp_maybe doesn't think that expression is a constructor application. In the 'not (lone_variable && is_wf)' test, I used to test is_value rather than is_wf, which was utterly wrong, because the above expression responds True to exprIsHNF, which is what sets is_value. This kind of thing can occur if you have {-# INLINE foo #-} foo = let x = e in (x,x) which Roman did. -} computeDiscount :: [Int] -> Int -> [ArgSummary] -> CallCtxt -> Int computeDiscount arg_discounts res_discount arg_infos cont_info = 10 -- Discount of 10 because the result replaces the call -- so we count 10 for the function itself + 10 * length actual_arg_discounts -- Discount of 10 for each arg supplied, -- because the result replaces the call + total_arg_discount + res_discount' where actual_arg_discounts = zipWith mk_arg_discount arg_discounts arg_infos total_arg_discount = sum actual_arg_discounts mk_arg_discount _ TrivArg = 0 mk_arg_discount _ NonTrivArg = 10 mk_arg_discount discount ValueArg = discount res_discount' | LT <- arg_discounts `compareLength` arg_infos = res_discount -- Over-saturated | otherwise = case cont_info of BoringCtxt -> 0 CaseCtxt -> res_discount -- Presumably a constructor ValAppCtxt -> res_discount -- Presumably a function _ -> 40 `min` res_discount -- ToDo: this 40 `min` res_discount doesn't seem right -- for DiscArgCtxt it shouldn't matter because the function will -- get the arg discount for any non-triv arg -- for RuleArgCtxt we do want to be keener to inline; but not only -- constructor results -- for RhsCtxt I suppose that exposing a data con is good in general -- And 40 seems very arbitrary -- -- res_discount can be very large when a function returns -- constructors; but we only want to invoke that large discount -- when there's a case continuation. -- Otherwise we, rather arbitrarily, threshold it. Yuk. -- But we want to avoid inlining large functions that return -- constructors into contexts that are simply "interesting" ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/Opt/Simplify/Iteration.hs0000644000000000000000000062310607346545000023436 0ustar0000000000000000{- (c) The AQUA Project, Glasgow University, 1993-1998 \section[Simplify]{The main module of the simplifier} -} {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE MultiWayIf #-} {-# OPTIONS_GHC -Wno-incomplete-uni-patterns #-} module GHC.Core.Opt.Simplify.Iteration ( simplTopBinds, simplExpr, simplImpRules ) where import GHC.Prelude import GHC.Driver.Flags import GHC.Core import GHC.Core.Opt.Simplify.Monad import GHC.Core.Opt.ConstantFold import GHC.Core.Type hiding ( substCo, substTy, substTyVar, extendTvSubst, extendCvSubst ) import GHC.Core.TyCo.Compare( eqType ) import GHC.Core.Opt.Simplify.Env import GHC.Core.Opt.Simplify.Inline import GHC.Core.Opt.Simplify.Utils import GHC.Core.Opt.OccurAnal ( occurAnalyseExpr, zapLambdaBndrs, scrutOkForBinderSwap, BinderSwapDecision (..) ) import GHC.Core.Make ( FloatBind, mkImpossibleExpr, castBottomExpr ) import qualified GHC.Core.Make import GHC.Core.Coercion hiding ( substCo, substCoVar ) import GHC.Core.Reduction import GHC.Core.Coercion.Opt ( optCoercion ) import GHC.Core.FamInstEnv ( FamInstEnv, topNormaliseType_maybe ) import GHC.Core.DataCon ( DataCon, dataConWorkId, dataConRepStrictness , dataConRepArgTys, isUnboxedTupleDataCon , StrictnessMark (..), dataConWrapId_maybe ) import GHC.Core.Opt.Stats ( Tick(..) ) import GHC.Core.Ppr ( pprCoreExpr ) import GHC.Core.Unfold import GHC.Core.Unfold.Make import GHC.Core.Utils import GHC.Core.Opt.Arity ( ArityType, exprArity, arityTypeBotSigs_maybe , pushCoTyArg, pushCoValArg, exprIsDeadEnd , typeArity, arityTypeArity, etaExpandAT ) import GHC.Core.SimpleOpt ( exprIsConApp_maybe, joinPointBinding_maybe, joinPointBindings_maybe ) import GHC.Core.FVs ( mkRuleInfo {- exprsFreeIds -} ) import GHC.Core.Rules ( lookupRule, getRules ) import GHC.Core.Multiplicity import GHC.Types.Literal ( litIsLifted ) --, mkLitInt ) -- temporarily commented out. See #8326 import GHC.Types.SourceText import GHC.Types.Id import GHC.Types.Id.Make ( seqId ) import GHC.Types.Id.Info import GHC.Types.Name ( mkSystemVarName, isExternalName, getOccFS ) import GHC.Types.Demand import GHC.Types.Unique ( hasKey ) import GHC.Types.Basic import GHC.Types.Tickish import GHC.Types.Var ( isTyCoVar ) import GHC.Builtin.Types.Prim( realWorldStatePrimTy ) import GHC.Builtin.Names( runRWKey, seqHashKey ) import GHC.Data.Maybe ( isNothing, orElse, mapMaybe ) import GHC.Data.FastString import GHC.Unit.Module ( moduleName ) import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Utils.Constants (debugIsOn) import GHC.Utils.Monad ( mapAccumLM, liftIO ) import GHC.Utils.Logger import GHC.Utils.Misc import Control.Monad {- The guts of the simplifier is in this module, but the driver loop for the simplifier is in GHC.Core.Opt.Pipeline Note [The big picture] ~~~~~~~~~~~~~~~~~~~~~~ The general shape of the simplifier is this: simplExpr :: SimplEnv -> InExpr -> SimplCont -> SimplM (SimplFloats, OutExpr) simplBind :: SimplEnv -> InBind -> SimplM (SimplFloats, SimplEnv) * SimplEnv contains - Simplifier mode - Ambient substitution - InScopeSet * SimplFloats contains - Let-floats (which includes ok-for-spec case-floats) - Join floats - InScopeSet (including all the floats) * Expressions simplExpr :: SimplEnv -> InExpr -> SimplCont -> SimplM (SimplFloats, OutExpr) The result of simplifying an /expression/ is (floats, expr) - A bunch of floats (let bindings, join bindings) - A simplified expression. The overall result is effectively (let floats in expr) * Bindings simplBind :: SimplEnv -> InBind -> SimplM (SimplFloats, SimplEnv) The result of simplifying a binding is - A bunch of floats, the last of which is the simplified binding There may be auxiliary bindings too; see prepareRhs - An environment suitable for simplifying the scope of the binding The floats may also be empty, if the binding is inlined unconditionally; in that case the returned SimplEnv will have an augmented substitution. The returned floats and env both have an in-scope set, and they are guaranteed to be the same. Eta expansion ~~~~~~~~~~~~~~ For eta expansion, we want to catch things like case e of (a,b) -> \x -> case a of (p,q) -> \y -> r If the \x was on the RHS of a let, we'd eta expand to bring the two lambdas together. And in general that's a good thing to do. Perhaps we should eta expand wherever we find a (value) lambda? Then the eta expansion at a let RHS can concentrate solely on the PAP case. Note [In-scope set as a substitution] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ As per Note [Lookups in in-scope set], an in-scope set can act as a substitution. Specifically, it acts as a substitution from variable to variables /with the same unique/. Why do we need this? Well, during the course of the simplifier, we may want to adjust inessential properties of a variable. For instance, when performing a beta-reduction, we change (\x. e) u ==> let x = u in e We typically want to add an unfolding to `x` so that it inlines to (the simplification of) `u`. We do that by adding the unfolding to the binder `x`, which is added to the in-scope set. When simplifying occurrences of `x` (every occurrence!), they are replaced by their “updated” version from the in-scope set, hence inherit the unfolding. This happens in `SimplEnv.substId`. Another example. Consider case x of y { Node a b -> ...y... ; Leaf v -> ...y... } In the Node branch want y's unfolding to be (Node a b); in the Leaf branch we want y's unfolding to be (Leaf v). We achieve this by adding the appropriate unfolding to y, and re-adding it to the in-scope set. See the calls to `addBinderUnfolding` in `Simplify.addAltUnfoldings` and elsewhere. It's quite convenient. This way we don't need to manipulate the substitution all the time: every update to a binder is automatically reflected to its bound occurrences. Note [Bangs in the Simplifier] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Both SimplFloats and SimplEnv do *not* generally benefit from making their fields strict. I don't know if this is because of good use of laziness or unintended side effects like closures capturing more variables after WW has run. But the end result is that we keep these lazy, but force them in some places where we know it's beneficial to the compiler. Similarly environments returned from functions aren't *always* beneficial to force. In some places they would never be demanded so forcing them early increases allocation. In other places they almost always get demanded so it's worthwhile to force them early. Would it be better to through every allocation of e.g. SimplEnv and decide wether or not to make this one strict? Absolutely! Would be a good use of someones time? Absolutely not! I made these strict that showed up during a profiled build or which I noticed while looking at core for one reason or another. The result sadly is that we end up with "random" bangs in the simplifier where we sometimes force e.g. the returned environment from a function and sometimes we don't for the same function. Depending on the context around the call. The treatment is also not very consistent. I only added bangs where I saw it making a difference either in the core or benchmarks. Some patterns where it would be beneficial aren't convered as a consequence as I neither have the time to go through all of the core and some cases are too small to show up in benchmarks. ************************************************************************ * * \subsection{Bindings} * * ************************************************************************ -} simplTopBinds :: SimplEnv -> [InBind] -> SimplM (SimplFloats, SimplEnv) -- See Note [The big picture] simplTopBinds env0 binds0 = do { -- Put all the top-level binders into scope at the start -- so that if a rewrite rule has unexpectedly brought -- anything into scope, then we don't get a complaint about that. -- It's rather as if the top-level binders were imported. -- See Note [Glomming] in "GHC.Core.Opt.OccurAnal". -- See Note [Bangs in the Simplifier] ; !env1 <- {-#SCC "simplTopBinds-simplRecBndrs" #-} simplRecBndrs env0 (bindersOfBinds binds0) ; (floats, env2) <- {-#SCC "simplTopBinds-simpl_binds" #-} simpl_binds env1 binds0 ; freeTick SimplifierDone ; return (floats, env2) } where -- We need to track the zapped top-level binders, because -- they should have their fragile IdInfo zapped (notably occurrence info) -- That's why we run down binds and bndrs' simultaneously. -- simpl_binds :: SimplEnv -> [InBind] -> SimplM (SimplFloats, SimplEnv) simpl_binds env [] = return (emptyFloats env, env) simpl_binds env (bind:binds) = do { (float, env1) <- simpl_bind env bind ; (floats, env2) <- simpl_binds env1 binds -- See Note [Bangs in the Simplifier] ; let !floats1 = float `addFloats` floats ; return (floats1, env2) } simpl_bind env (Rec pairs) = simplRecBind env (BC_Let TopLevel Recursive) pairs simpl_bind env (NonRec b r) = do { let bind_cxt = BC_Let TopLevel NonRecursive ; (env', b') <- addBndrRules env b (lookupRecBndr env b) bind_cxt ; simplRecOrTopPair env' bind_cxt b b' r } {- ************************************************************************ * * Lazy bindings * * ************************************************************************ simplRecBind is used for * recursive bindings only -} simplRecBind :: SimplEnv -> BindContext -> [(InId, InExpr)] -> SimplM (SimplFloats, SimplEnv) simplRecBind env0 bind_cxt pairs0 = do { (env1, triples) <- mapAccumLM add_rules env0 pairs0 ; let new_bndrs = map sndOf3 triples ; (rec_floats, env2) <- enterRecGroupRHSs env1 new_bndrs $ \env -> go env triples ; return (mkRecFloats rec_floats, env2) } where add_rules :: SimplEnv -> (InBndr,InExpr) -> SimplM (SimplEnv, (InBndr, OutBndr, InExpr)) -- Add the (substituted) rules to the binder add_rules env (bndr, rhs) = do { (env', bndr') <- addBndrRules env bndr (lookupRecBndr env bndr) bind_cxt ; return (env', (bndr, bndr', rhs)) } go env [] = return (emptyFloats env, env) go env ((old_bndr, new_bndr, rhs) : pairs) = do { (float, env1) <- simplRecOrTopPair env bind_cxt old_bndr new_bndr rhs ; (floats, env2) <- go env1 pairs ; return (float `addFloats` floats, env2) } {- simplOrTopPair is used for * recursive bindings (whether top level or not) * top-level non-recursive bindings It assumes the binder has already been simplified, but not its IdInfo. -} simplRecOrTopPair :: SimplEnv -> BindContext -> InId -> OutBndr -> InExpr -- Binder and rhs -> SimplM (SimplFloats, SimplEnv) simplRecOrTopPair env bind_cxt old_bndr new_bndr rhs | Just env' <- preInlineUnconditionally env (bindContextLevel bind_cxt) old_bndr rhs env = {-#SCC "simplRecOrTopPair-pre-inline-uncond" #-} simplTrace "SimplBindr:inline-uncond" (ppr old_bndr) $ do { tick (PreInlineUnconditionally old_bndr) ; return ( emptyFloats env, env' ) } | otherwise = case bind_cxt of BC_Join is_rec cont -> simplTrace "SimplBind:join" (ppr old_bndr) $ simplJoinBind is_rec cont (old_bndr,env) (new_bndr,env) (rhs,env) BC_Let top_lvl is_rec -> simplTrace "SimplBind:normal" (ppr old_bndr) $ simplLazyBind top_lvl is_rec (old_bndr,env) (new_bndr,env) (rhs,env) simplTrace :: String -> SDoc -> SimplM a -> SimplM a simplTrace herald doc thing_inside = do logger <- getLogger if logHasDumpFlag logger Opt_D_verbose_core2core then logTraceMsg logger herald doc thing_inside else thing_inside -------------------------- simplLazyBind :: TopLevelFlag -> RecFlag -> (InId, SimplEnv) -- InBinder, and static env for its unfolding (if any) -> (OutId, SimplEnv) -- OutBinder, and SimplEnv after simplifying that binder -- The OutId has IdInfo (notably RULES), -- except arity, unfolding -> (InExpr, SimplEnv) -- The RHS and its static environment -> SimplM (SimplFloats, SimplEnv) -- Precondition: Ids only, no TyVars; not a JoinId -- Precondition: rhs obeys the let-can-float invariant simplLazyBind top_lvl is_rec (bndr,unf_se) (bndr1,env) (rhs,rhs_se) = assert (isId bndr ) assertPpr (not (isJoinId bndr)) (ppr bndr) $ -- pprTrace "simplLazyBind" ((ppr bndr <+> ppr bndr1) $$ ppr rhs $$ ppr (seIdSubst rhs_se)) $ do { let !rhs_env = rhs_se `setInScopeFromE` env -- See Note [Bangs in the Simplifier] (tvs, body) = case collectTyAndValBinders rhs of (tvs, [], body) | surely_not_lam body -> (tvs, body) _ -> ([], rhs) surely_not_lam (Lam {}) = False surely_not_lam (Tick t e) | not (tickishFloatable t) = surely_not_lam e -- eta-reduction could float surely_not_lam _ = True -- Do not do the "abstract tyvar" thing if there's -- a lambda inside, because it defeats eta-reduction -- f = /\a. \x. g a x -- should eta-reduce. ; (body_env, tvs') <- {-#SCC "simplBinders" #-} simplBinders rhs_env tvs -- See Note [Floating and type abstraction] in GHC.Core.Opt.Simplify.Utils -- Simplify the RHS ; let rhs_cont = mkRhsStop (substTy body_env (exprType body)) is_rec (idDemandInfo bndr) ; (body_floats0, body0) <- {-#SCC "simplExprF" #-} simplExprF body_env body rhs_cont -- ANF-ise a constructor or PAP rhs ; (body_floats2, body2) <- {-#SCC "prepareBinding" #-} prepareBinding env top_lvl is_rec False -- Not strict; this is simplLazyBind bndr1 body_floats0 body0 -- Subtle point: we do not need or want tvs' in the InScope set -- of body_floats2, so we pass in 'env' not 'body_env'. -- Don't want: if tvs' are in-scope in the scope of this let-binding, we may do -- more renaming than necessary => extra work (see !7777 and test T16577). -- Don't need: we wrap tvs' around the RHS anyway. ; (rhs_floats, body3) <- if isEmptyFloats body_floats2 || null tvs then -- Simple floating {-#SCC "simplLazyBind-simple-floating" #-} return (body_floats2, body2) else -- Non-empty floats, and non-empty tyvars: do type-abstraction first {-#SCC "simplLazyBind-type-abstraction-first" #-} do { (poly_binds, body3) <- abstractFloats (seUnfoldingOpts env) top_lvl tvs' body_floats2 body2 ; let poly_floats = foldl' extendFloats (emptyFloats env) poly_binds ; return (poly_floats, body3) } ; let env1 = env `setInScopeFromF` rhs_floats ; rhs' <- rebuildLam env1 tvs' body3 rhs_cont ; (bind_float, env2) <- completeBind (BC_Let top_lvl is_rec) (bndr,unf_se) (bndr1,rhs',env1) ; return (rhs_floats `addFloats` bind_float, env2) } -------------------------- simplJoinBind :: RecFlag -> SimplCont -> (InId, SimplEnv) -- InBinder, with static env for its unfolding -> (OutId, SimplEnv) -- OutBinder; SimplEnv has the binder in scope -- The OutId has IdInfo, except arity, unfolding -> (InExpr, SimplEnv) -- The right hand side and its env -> SimplM (SimplFloats, SimplEnv) simplJoinBind is_rec cont (old_bndr, unf_se) (new_bndr, env) (rhs, rhs_se) = do { let rhs_env = rhs_se `setInScopeFromE` env ; rhs' <- simplJoinRhs rhs_env old_bndr rhs cont ; completeBind (BC_Join is_rec cont) (old_bndr, unf_se) (new_bndr, rhs', env) } -------------------------- simplAuxBind :: String -> SimplEnv -> InId -- Old binder; not a JoinId -> OutExpr -- Simplified RHS -> SimplM (SimplFloats, SimplEnv) -- A specialised variant of completeBindX used to construct non-recursive -- auxiliary bindings, notably in knownCon. -- -- The binder comes from a case expression (case binder or alternative) -- and so does not have rules, unfolding, inline pragmas etc. -- -- Precondition: rhs satisfies the let-can-float invariant simplAuxBind _str env bndr new_rhs | assertPpr (isId bndr && not (isJoinId bndr)) (ppr bndr) $ isDeadBinder bndr -- Not uncommon; e.g. case (a,b) of c { (p,q) -> p } = return (emptyFloats env, env) -- Here c is dead, and we avoid -- creating the binding c = (a,b) -- Next we have a fast-path for cases that would be inlined unconditionally by -- completeBind: but it seems not uncommon, and it turns to be a little more -- efficient (in compile time allocations) to do it here. -- Effectively this is just a vastly-simplified postInlineUnconditionally -- See Note [Post-inline for single-use things] in GHC.Core.Opt.Simplify.Utils -- We could instead use postInlineUnconditionally itself, but I think it's simpler -- and more direct to focus on the "hot" cases. -- e.g. auxiliary bindings have no NOLINE pragmas, RULEs, or stable unfoldings | exprIsTrivial new_rhs -- Short-cut for let x = y in ... || case (idOccInfo bndr) of OneOcc{ occ_n_br = 1, occ_in_lam = NotInsideLam } -> True _ -> False = return ( emptyFloats env , extendCvIdSubst env bndr new_rhs ) -- bndr can be a CoVar | otherwise = do { -- ANF-ise the RHS let !occ_fs = getOccFS bndr ; (anf_floats, rhs1) <- prepareRhs env NotTopLevel occ_fs new_rhs ; unless (isEmptyLetFloats anf_floats) (tick LetFloatFromLet) ; let rhs_floats = emptyFloats env `addLetFloats` anf_floats -- Simplify the binder and complete the binding ; (env1, new_bndr) <- simplBinder (env `setInScopeFromF` rhs_floats) bndr ; (bind_float, env2) <- completeBind (BC_Let NotTopLevel NonRecursive) (bndr,env) (new_bndr, rhs1, env1) ; return (rhs_floats `addFloats` bind_float, env2) } {- ********************************************************************* * * Cast worker/wrapper * * ************************************************************************ Note [Cast worker/wrapper] ~~~~~~~~~~~~~~~~~~~~~~~~~~ When we have a binding x = e |> co we want to do something very similar to worker/wrapper: $wx = e x = $wx |> co We call this making a cast worker/wrapper in tryCastWorkerWrapper. The main motivaiton is that x can be inlined freely. There's a chance that e will be a constructor application or function, or something like that, so moving the coercion to the usage site may well cancel the coercions and lead to further optimisation. Example: data family T a :: * data instance T Int = T Int foo :: Int -> Int -> Int foo m n = ... where t = T m go 0 = 0 go n = case t of { T m -> go (n-m) } -- This case should optimise A second reason for doing cast worker/wrapper is that the worker/wrapper pass after strictness analysis can't deal with RHSs like f = (\ a b c. blah) |> co Instead, it relies on cast worker/wrapper to get rid of the cast, leaving a simpler job for demand-analysis worker/wrapper. See #19874. Wrinkles 1. We must /not/ do cast w/w on f = g |> co otherwise it'll just keep repeating forever! You might think this is avoided because the call to tryCastWorkerWrapper is guarded by preInlineUnconditinally, but I'm worried that a loop-breaker or an exported Id might say False to preInlineUnonditionally. 2. We need to be careful with inline/noinline pragmas: rec { {-# NOINLINE f #-} f = (...g...) |> co ; g = ...f... } This is legitimate -- it tells GHC to use f as the loop breaker rather than g. Now we do the cast thing, to get something like rec { $wf = ...g... ; f = $wf |> co ; g = ...f... } Where should the NOINLINE pragma go? If we leave it on f we'll get rec { $wf = ...g... ; {-# NOINLINE f #-} f = $wf |> co ; g = ...f... } and that is bad: the whole point is that we want to inline that cast! We want to transfer the pagma to $wf: rec { {-# NOINLINE $wf #-} $wf = ...g... ; f = $wf |> co ; g = ...f... } c.f. Note [Worker/wrapper for NOINLINE functions] in GHC.Core.Opt.WorkWrap. 3. We should still do cast w/w even if `f` is INLINEABLE. E.g. {- f: Stable unfolding = -} f = (\xy. ) |> co Then we want to w/w to {- $wf: Stable unfolding = |> sym co -} $wf = \xy. f = $wf |> co Notice that the stable unfolding moves to the worker! Now demand analysis will work fine on $wf, whereas it has trouble with the original f. c.f. Note [Worker/wrapper for INLINABLE functions] in GHC.Core.Opt.WorkWrap. This point also applies to strong loopbreakers with INLINE pragmas, see wrinkle (4). 4. We should /not/ do cast w/w for non-loop-breaker INLINE functions (hence hasInlineUnfolding in tryCastWorkerWrapper, which responds False to loop-breakers) because they'll definitely be inlined anyway, cast and all. And if we do cast w/w for an INLINE function with arity zero, we get something really silly: we inline that "worker" right back into the wrapper! Worse than a no-op, because we have then lost the stable unfolding. All these wrinkles are exactly like worker/wrapper for strictness analysis: f is the wrapper and must inline like crazy $wf is the worker and must carry f's original pragma See Note [Worker/wrapper for INLINABLE functions] and Note [Worker/wrapper for NOINLINE functions] in GHC.Core.Opt.WorkWrap. See #17673, #18093, #18078, #19890. Note [Preserve strictness in cast w/w] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In the Note [Cast worker/wrapper] transformation, keep the strictness info. Eg f = e `cast` co -- f has strictness SSL When we transform to f' = e -- f' also has strictness SSL f = f' `cast` co -- f still has strictness SSL Its not wrong to drop it on the floor, but better to keep it. Note [Preserve RuntimeRep info in cast w/w] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We must not do cast w/w when the presence of the coercion is needed in order to determine the runtime representation. Example: Suppose we have a type family: type F :: RuntimeRep type family F where F = LiftedRep together with a type `ty :: TYPE F` and a top-level binding a :: ty |> TYPE F[0] The kind of `ty |> TYPE F[0]` is `LiftedRep`, so `a` is a top-level lazy binding. However, were we to apply cast w/w, we would get: b :: ty b = ... a :: ty |> TYPE F[0] a = b `cast` GRefl (TYPE F[0]) Now we are in trouble because `ty :: TYPE F` does not have a known runtime representation, because we need to be able to reduce the nullary type family application `F` to find that out. Conclusion: only do cast w/w when doing so would not lose the RuntimeRep information. That is, when handling `Cast rhs co`, don't attempt cast w/w unless the kind of the type of rhs is concrete, in the sense of Note [Concrete types] in GHC.Tc.Utils.Concrete. -} tryCastWorkerWrapper :: SimplEnv -> BindContext -> InId -> OutId -> OutExpr -> SimplM (SimplFloats, SimplEnv) -- See Note [Cast worker/wrapper] tryCastWorkerWrapper env bind_cxt old_bndr bndr (Cast rhs co) | BC_Let top_lvl is_rec <- bind_cxt -- Not join points , not (isDFunId bndr) -- nor DFuns; cast w/w is no help, and we can't transform -- a DFunUnfolding in mk_worker_unfolding , not (exprIsTrivial rhs) -- Not x = y |> co; Wrinkle 1 , not (hasInlineUnfolding info) -- Not INLINE things: Wrinkle 4 , typeHasFixedRuntimeRep work_ty -- Don't peel off a cast if doing so would -- lose the underlying runtime representation. -- See Note [Preserve RuntimeRep info in cast w/w] , not (isOpaquePragma (idInlinePragma old_bndr)) -- Not for OPAQUE bindings -- See Note [OPAQUE pragma] = do { uniq <- getUniqueM ; let work_name = mkSystemVarName uniq occ_fs work_id = mkLocalIdWithInfo work_name ManyTy work_ty work_info is_strict = isStrictId bndr ; (rhs_floats, work_rhs) <- prepareBinding env top_lvl is_rec is_strict work_id (emptyFloats env) rhs ; work_unf <- mk_worker_unfolding top_lvl work_id work_rhs ; let work_id_w_unf = work_id `setIdUnfolding` work_unf floats = rhs_floats `addLetFloats` unitLetFloat (NonRec work_id_w_unf work_rhs) triv_rhs = Cast (Var work_id_w_unf) co ; if postInlineUnconditionally env bind_cxt old_bndr bndr triv_rhs -- Almost always True, because the RHS is trivial -- In that case we want to eliminate the binding fast -- We conservatively use postInlineUnconditionally so that we -- check all the right things then do { tick (PostInlineUnconditionally bndr) ; return ( floats , extendIdSubst (setInScopeFromF env floats) old_bndr $ DoneEx triv_rhs NotJoinPoint ) } else do { wrap_unf <- mkLetUnfolding env top_lvl VanillaSrc bndr False triv_rhs ; let bndr' = bndr `setInlinePragma` mkCastWrapperInlinePrag (idInlinePragma bndr) `setIdUnfolding` wrap_unf floats' = floats `extendFloats` NonRec bndr' triv_rhs ; return ( floats', setInScopeFromF env floats' ) } } where -- Force the occ_fs so that the old Id is not retained in the new Id. !occ_fs = getOccFS bndr work_ty = coercionLKind co info = idInfo bndr work_arity = arityInfo info `min` typeArity work_ty work_info = vanillaIdInfo `setDmdSigInfo` dmdSigInfo info `setCprSigInfo` cprSigInfo info `setDemandInfo` demandInfo info `setInlinePragInfo` inlinePragInfo info `setArityInfo` work_arity -- We do /not/ want to transfer OccInfo, Rules -- Note [Preserve strictness in cast w/w] -- and Wrinkle 2 of Note [Cast worker/wrapper] ----------- Worker unfolding ----------- -- Stable case: if there is a stable unfolding we have to compose with (Sym co); -- the next round of simplification will do the job -- Non-stable case: use work_rhs -- Wrinkle 3 of Note [Cast worker/wrapper] mk_worker_unfolding top_lvl work_id work_rhs = case realUnfoldingInfo info of -- NB: the real one, even for loop-breakers unf@(CoreUnfolding { uf_tmpl = unf_rhs, uf_src = src }) | isStableSource src -> return (unf { uf_tmpl = mkCast unf_rhs (mkSymCo co) }) _ -> mkLetUnfolding env top_lvl VanillaSrc work_id False work_rhs tryCastWorkerWrapper env _ _ bndr rhs -- All other bindings = do { traceSmpl "tcww:no" (vcat [ text "bndr:" <+> ppr bndr , text "rhs:" <+> ppr rhs ]) ; return (mkFloatBind env (NonRec bndr rhs)) } mkCastWrapperInlinePrag :: InlinePragma -> InlinePragma -- See Note [Cast worker/wrapper] mkCastWrapperInlinePrag (InlinePragma { inl_inline = fn_inl, inl_act = fn_act, inl_rule = rule_info }) = InlinePragma { inl_src = SourceText $ fsLit "{-# INLINE" , inl_inline = fn_inl -- See Note [Worker/wrapper for INLINABLE functions] , inl_sat = Nothing -- in GHC.Core.Opt.WorkWrap , inl_act = wrap_act -- See Note [Wrapper activation] , inl_rule = rule_info } -- in GHC.Core.Opt.WorkWrap -- RuleMatchInfo is (and must be) unaffected where -- See Note [Wrapper activation] in GHC.Core.Opt.WorkWrap -- But simpler, because we don't need to disable during InitialPhase wrap_act | isNeverActive fn_act = activateDuringFinal | otherwise = fn_act {- ********************************************************************* * * prepareBinding, prepareRhs, makeTrivial * * ********************************************************************* -} prepareBinding :: SimplEnv -> TopLevelFlag -> RecFlag -> Bool -> Id -- Used only for its OccName; can be InId or OutId -> SimplFloats -> OutExpr -> SimplM (SimplFloats, OutExpr) -- In (prepareBinding ... bndr floats rhs), the binding is really just -- bndr = let floats in rhs -- Maybe we can ANF-ise this binding and float out; e.g. -- bndr = let a = f x in K a a (g x) -- we could float out to give -- a = f x -- tmp = g x -- bndr = K a a tmp -- That's what prepareBinding does -- Precondition: binder is not a JoinId -- Postcondition: the returned SimplFloats contains only let-floats prepareBinding env top_lvl is_rec strict_bind bndr rhs_floats rhs = do { -- Never float join-floats out of a non-join let-binding (which this is) -- So wrap the body in the join-floats right now -- Hence: rhs_floats1 consists only of let-floats let (rhs_floats1, rhs1) = wrapJoinFloatsX rhs_floats rhs -- rhs_env: add to in-scope set the binders from rhs_floats -- so that prepareRhs knows what is in scope in rhs ; let rhs_env = env `setInScopeFromF` rhs_floats1 -- Force the occ_fs so that the old Id is not retained in the new Id. !occ_fs = getOccFS bndr -- Now ANF-ise the remaining rhs ; (anf_floats, rhs2) <- prepareRhs rhs_env top_lvl occ_fs rhs1 -- Finally, decide whether or not to float ; let all_floats = rhs_floats1 `addLetFloats` anf_floats ; if doFloatFromRhs (seFloatEnable env) top_lvl is_rec strict_bind all_floats rhs2 then -- Float! do { tick LetFloatFromLet ; return (all_floats, rhs2) } else -- Abandon floating altogether; revert to original rhs -- Since we have already built rhs1, we just need to add -- rhs_floats1 to it return (emptyFloats env, wrapFloats rhs_floats1 rhs1) } {- Note [prepareRhs] ~~~~~~~~~~~~~~~~~~~~ prepareRhs takes a putative RHS, checks whether it's a PAP or constructor application and, if so, converts it to ANF, so that the resulting thing can be inlined more easily. Thus x = (f a, g b) becomes t1 = f a t2 = g b x = (t1,t2) We also want to deal well cases like this v = (f e1 `cast` co) e2 Here we want to make e1,e2 trivial and get x1 = e1; x2 = e2; v = (f x1 `cast` co) v2 That's what the 'go' loop in prepareRhs does -} prepareRhs :: HasDebugCallStack => SimplEnv -> TopLevelFlag -> FastString -- Base for any new variables -> OutExpr -> SimplM (LetFloats, OutExpr) -- Transforms a RHS into a better RHS by ANF'ing args -- for expandable RHSs: constructors and PAPs -- e.g x = Just e -- becomes a = e -- 'a' is fresh -- x = Just a -- See Note [prepareRhs] prepareRhs env top_lvl occ rhs0 | is_expandable = anfise rhs0 | otherwise = return (emptyLetFloats, rhs0) where -- We can't use exprIsExpandable because the WHOLE POINT is that -- we want to treat (K ) as expandable, because we are just -- about "anfise" the expression. exprIsExpandable would -- just say no! is_expandable = go rhs0 0 where go (Var fun) n_val_args = isExpandableApp fun n_val_args go (App fun arg) n_val_args | isTypeArg arg = go fun n_val_args | otherwise = go fun (n_val_args + 1) go (Cast rhs _) n_val_args = go rhs n_val_args go (Tick _ rhs) n_val_args = go rhs n_val_args go _ _ = False anfise :: OutExpr -> SimplM (LetFloats, OutExpr) anfise (Cast rhs co) = do { (floats, rhs') <- anfise rhs ; return (floats, Cast rhs' co) } anfise (App fun (Type ty)) = do { (floats, rhs') <- anfise fun ; return (floats, App rhs' (Type ty)) } anfise (App fun arg) = do { (floats1, fun') <- anfise fun ; (floats2, arg') <- makeTrivial env top_lvl topDmd occ arg ; return (floats1 `addLetFlts` floats2, App fun' arg') } anfise (Var fun) = return (emptyLetFloats, Var fun) anfise (Tick t rhs) -- We want to be able to float bindings past this -- tick. Non-scoping ticks don't care. | tickishScoped t == NoScope = do { (floats, rhs') <- anfise rhs ; return (floats, Tick t rhs') } -- On the other hand, for scoping ticks we need to be able to -- copy them on the floats, which in turn is only allowed if -- we can obtain non-counting ticks. | (not (tickishCounts t) || tickishCanSplit t) = do { (floats, rhs') <- anfise rhs ; let tickIt (id, expr) = (id, mkTick (mkNoCount t) expr) floats' = mapLetFloats floats tickIt ; return (floats', Tick t rhs') } anfise other = return (emptyLetFloats, other) makeTrivialArg :: HasDebugCallStack => SimplEnv -> ArgSpec -> SimplM (LetFloats, ArgSpec) makeTrivialArg env arg@(ValArg { as_arg = e, as_dmd = dmd }) = do { (floats, e') <- makeTrivial env NotTopLevel dmd (fsLit "arg") e ; return (floats, arg { as_arg = e' }) } makeTrivialArg _ arg = return (emptyLetFloats, arg) -- CastBy, TyArg makeTrivial :: HasDebugCallStack => SimplEnv -> TopLevelFlag -> Demand -> FastString -- ^ A "friendly name" to build the new binder from -> OutExpr -> SimplM (LetFloats, OutExpr) -- Binds the expression to a variable, if it's not trivial, returning the variable -- For the Demand argument, see Note [Keeping demand info in StrictArg Plan A] makeTrivial env top_lvl dmd occ_fs expr | exprIsTrivial expr -- Already trivial || not (bindingOk top_lvl expr expr_ty) -- Cannot trivialise -- See Note [Cannot trivialise] = return (emptyLetFloats, expr) | Cast expr' co <- expr = do { (floats, triv_expr) <- makeTrivial env top_lvl dmd occ_fs expr' ; return (floats, Cast triv_expr co) } | otherwise -- 'expr' is not of form (Cast e co) = do { (floats, expr1) <- prepareRhs env top_lvl occ_fs expr ; uniq <- getUniqueM ; let name = mkSystemVarName uniq occ_fs var = mkLocalIdWithInfo name ManyTy expr_ty id_info -- Now something very like completeBind, -- but without the postInlineUnconditionally part ; (arity_type, expr2) <- tryEtaExpandRhs env (BC_Let top_lvl NonRecursive) var expr1 -- Technically we should extend the in-scope set in 'env' with -- the 'floats' from prepareRHS; but they are all fresh, so there is -- no danger of introducing name shadowing in eta expansion ; unf <- mkLetUnfolding env top_lvl VanillaSrc var False expr2 ; let final_id = addLetBndrInfo var arity_type unf bind = NonRec final_id expr2 ; traceSmpl "makeTrivial" (vcat [text "final_id" <+> ppr final_id, text "rhs" <+> ppr expr2 ]) ; return ( floats `addLetFlts` unitLetFloat bind, Var final_id ) } where id_info = vanillaIdInfo `setDemandInfo` dmd expr_ty = exprType expr bindingOk :: TopLevelFlag -> CoreExpr -> Type -> Bool -- True iff we can have a binding of this expression at this level -- Precondition: the type is the type of the expression bindingOk top_lvl expr expr_ty | isTopLevel top_lvl = exprIsTopLevelBindable expr expr_ty | otherwise = True {- Note [Cannot trivialise] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider: f :: Int -> Addr# foo :: Bar foo = Bar (f 3) Then we can't ANF-ise foo, even though we'd like to, because we can't make a top-level binding for the Addr# (f 3). And if so we don't want to turn it into foo = let x = f 3 in Bar x because we'll just end up inlining x back, and that makes the simplifier loop. Better not to ANF-ise it at all. Literal strings are an exception. foo = Ptr "blob"# We want to turn this into: foo1 = "blob"# foo = Ptr foo1 See Note [Core top-level string literals] in GHC.Core. ************************************************************************ * * Completing a lazy binding * * ************************************************************************ completeBind * deals only with Ids, not TyVars * takes an already-simplified binder and RHS * is used for both recursive and non-recursive bindings * is used for both top-level and non-top-level bindings It does the following: - tries discarding a dead binding - tries PostInlineUnconditionally - add unfolding [this is the only place we add an unfolding] - add arity - extend the InScopeSet of the SimplEnv It does *not* attempt to do let-to-case. Why? Because it is used for - top-level bindings (when let-to-case is impossible) - many situations where the "rhs" is known to be a WHNF (so let-to-case is inappropriate). Nor does it do the atomic-argument thing -} completeBind :: BindContext -> (InId, SimplEnv) -- Old binder, and the static envt in which to simplify -- its stable unfolding (if any) -> (OutId, OutExpr, SimplEnv) -- New binder and rhs; can be a JoinId. -- And the SimplEnv with that OutId in scope. -> SimplM (SimplFloats, SimplEnv) -- completeBind may choose to do its work -- * by extending the substitution (e.g. let x = y in ...) -- * or by adding to the floats in the envt -- -- Binder /can/ be a JoinId -- Precondition: rhs obeys the let-can-float invariant completeBind bind_cxt (old_bndr, unf_se) (new_bndr, new_rhs, env) | isCoVar old_bndr = case new_rhs of Coercion co -> return (emptyFloats env, extendCvSubst env old_bndr co) _ -> return (mkFloatBind env (NonRec new_bndr new_rhs)) | otherwise = assert (isId new_bndr) $ do { let old_info = idInfo old_bndr old_unf = realUnfoldingInfo old_info -- Do eta-expansion on the RHS of the binding -- See Note [Eta-expanding at let bindings] in GHC.Core.Opt.Simplify.Utils ; (new_arity, eta_rhs) <- tryEtaExpandRhs env bind_cxt new_bndr new_rhs -- Simplify the unfolding; see Note [Environment for simplLetUnfolding] ; new_unfolding <- simplLetUnfolding (unf_se `setInScopeFromE` env) bind_cxt old_bndr eta_rhs (idType new_bndr) new_arity old_unf ; let new_bndr_w_info = addLetBndrInfo new_bndr new_arity new_unfolding -- See Note [In-scope set as a substitution] ; if postInlineUnconditionally env bind_cxt old_bndr new_bndr_w_info eta_rhs then -- Inline and discard the binding do { tick (PostInlineUnconditionally old_bndr) ; let unf_rhs = maybeUnfoldingTemplate new_unfolding `orElse` eta_rhs -- See Note [Use occ-anald RHS in postInlineUnconditionally] ; simplTrace "PostInlineUnconditionally" (ppr new_bndr <+> ppr unf_rhs) $ return ( emptyFloats env , extendIdSubst env old_bndr $ DoneEx unf_rhs (idJoinPointHood new_bndr)) } -- Use the substitution to make quite, quite sure that the -- substitution will happen, since we are going to discard the binding else -- Keep the binding; do cast worker/wrapper -- simplTrace "completeBind" (vcat [ text "bndrs" <+> ppr old_bndr <+> ppr new_bndr -- , text "eta_rhs" <+> ppr eta_rhs ]) $ tryCastWorkerWrapper env bind_cxt old_bndr new_bndr_w_info eta_rhs } addLetBndrInfo :: OutId -> ArityType -> Unfolding -> OutId addLetBndrInfo new_bndr new_arity_type new_unf = new_bndr `setIdInfo` info5 where new_arity = arityTypeArity new_arity_type info1 = idInfo new_bndr `setArityInfo` new_arity -- Unfolding info: Note [Setting the new unfolding] info2 = info1 `setUnfoldingInfo` new_unf -- Demand info: Note [Setting the demand info] info3 | isEvaldUnfolding new_unf = lazifyDemandInfo info2 `orElse` info2 | otherwise = info2 -- Bottoming bindings: see Note [Bottoming bindings] info4 = case arityTypeBotSigs_maybe new_arity_type of Nothing -> info3 Just (ar, str_sig, cpr_sig) -> assert (ar == new_arity) $ info3 `setDmdSigInfo` str_sig `setCprSigInfo` cpr_sig -- Zap call arity info. We have used it by now (via -- `tryEtaExpandRhs`), and the simplifier can invalidate this -- information, leading to broken code later (e.g. #13479) info5 = zapCallArityInfo info4 {- Note [Bottoming bindings] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Suppose we have let x = error "urk" in ...(case x of )... or let f = \y. error (y ++ "urk") in ...(case f "foo" of )... Then we'd like to drop the dead immediately. So it's good to propagate the info that x's (or f's) RHS is bottom to x's (or f's) IdInfo as rapidly as possible. We use tryEtaExpandRhs on every binding, and it turns out that the arity computation it performs (via GHC.Core.Opt.Arity.findRhsArity) already does a simple bottoming-expression analysis. So all we need to do is propagate that info to the binder's IdInfo. This showed up in #12150; see comment:16. There is a second reason for settting the strictness signature. Consider let -- f :: <[S]b> f = \x. error "urk" in ...(f a b c)... Then, in GHC.Core.Opt.Arity.findRhsArity we'll use the demand-info on `f` to eta-expand to let f = \x y z. error "urk" in ...(f a b c)... But now f's strictness signature has too short an arity; see GHC.Core.Opt.DmdAnal Note [idArity varies independently of dmdTypeDepth]. Fortuitously, the same strictness-signature-fixup code gives the function a new strictness signature with the right number of arguments. Example in stranal/should_compile/EtaExpansion. Note [Setting the demand info] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If the unfolding is a value, the demand info may go pear-shaped, so we nuke it. Example: let x = (a,b) in case x of (p,q) -> h p q x Here x is certainly demanded. But after we've nuked the case, we'll get just let x = (a,b) in h a b x and now x is not demanded (I'm assuming h is lazy) This really happens. Similarly let f = \x -> e in ...f..f... After inlining f at some of its call sites the original binding may (for example) be no longer strictly demanded. The solution here is a bit ad hoc... Note [Use occ-anald RHS in postInlineUnconditionally] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Suppose we postInlineUnconditionally 'f in let f = \x -> x True in ...(f blah)... then we'd like to inline the /occ-anald/ RHS for 'f'. If we use the non-occ-anald version, we'll end up with a ...(let x = blah in x True)... and hence an extra Simplifier iteration. We already /have/ the occ-anald version in the Unfolding for the Id. Well, maybe not /quite/ always. If the binder is Dead, postInlineUnconditionally will return True, but we may not have an unfolding because it's too big. Hence the belt-and-braces `orElse` in the defn of unf_rhs. The Nothing case probably never happens. Note [Environment for simplLetUnfolding] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We need to be rather careful about the static environment in which we simplify a stable unfolding. Consider (#24242): f x = let y_Xb = ... in let step1_Xb {Stable unfolding = ....y_Xb...} = rhs in ... Note that `y_Xb` and `step1_Xb` have the same unique (`Xb`). This can happen; see Note [Shadowing in Core] in GHC.Core, and Note [Shadowing in the Simplifier]. This is perfectly fine. The `y_Xb` in the stable unfolding of the non- recursive binding for `step1` refers, of course, to `let y_Xb = ....`. When simplifying the binder `step1_Xb` we'll give it a new unique, and extend the static environment with [Xb :-> step1_Xc], say. But when simplifying step1's stable unfolding, we must use static environment /before/ simplifying the binder `step1_Xb`; that is, a static envt that maps [Xb :-> y_Xb], /not/ [Xb :-> step1_Xc]. That is why we pass around a pair `(InId, SimplEnv)` for the binder, keeping track of the right environment for the unfolding of that InId. See the type of `simplLazyBind`, `simplJoinBind`, `completeBind`. This only matters when we have - A non-recursive binding for f - has a stable unfolding - and that unfolding mentions a variable y - that has the same unique as f. So triggering a bug here is really hard! ************************************************************************ * * \subsection[Simplify-simplExpr]{The main function: simplExpr} * * ************************************************************************ The reason for this OutExprStuff stuff is that we want to float *after* simplifying a RHS, not before. If we do so naively we get quadratic behaviour as things float out. To see why it's important to do it after, consider this (real) example: let t = f x in fst t ==> let t = let a = e1 b = e2 in (a,b) in fst t ==> let a = e1 b = e2 t = (a,b) in a -- Can't inline a this round, cos it appears twice ==> e1 Each of the ==> steps is a round of simplification. We'd save a whole round if we float first. This can cascade. Consider let f = g d in \x -> ...f... ==> let f = let d1 = ..d.. in \y -> e in \x -> ...f... ==> let d1 = ..d.. in \x -> ...(\y ->e)... Only in this second round can the \y be applied, and it might do the same again. -} simplExpr :: SimplEnv -> CoreExpr -> SimplM CoreExpr simplExpr !env (Type ty) -- See Note [Bangs in the Simplifier] = do { ty' <- simplType env ty -- See Note [Avoiding space leaks in OutType] ; return (Type ty') } simplExpr env expr = simplExprC env expr (mkBoringStop expr_out_ty) where expr_out_ty :: OutType expr_out_ty = substTy env (exprType expr) -- NB: Since 'expr' is term-valued, not (Type ty), this call -- to exprType will succeed. exprType fails on (Type ty). simplExprC :: SimplEnv -> InExpr -- A term-valued expression, never (Type ty) -> SimplCont -> SimplM OutExpr -- Simplify an expression, given a continuation simplExprC env expr cont = -- pprTrace "simplExprC" (ppr expr $$ ppr cont) $ do { (floats, expr') <- simplExprF env expr cont ; -- pprTrace "simplExprC ret" (ppr expr $$ ppr expr') $ -- pprTrace "simplExprC ret3" (ppr (seInScope env')) $ -- pprTrace "simplExprC ret4" (ppr (seLetFloats env')) $ return (wrapFloats floats expr') } -------------------------------------------------- simplExprF :: SimplEnv -> InExpr -- A term-valued expression, never (Type ty) -> SimplCont -> SimplM (SimplFloats, OutExpr) simplExprF !env e !cont -- See Note [Bangs in the Simplifier] = {- pprTrace "simplExprF" (vcat [ ppr e , text "cont =" <+> ppr cont , text "inscope =" <+> ppr (seInScope env) , text "tvsubst =" <+> ppr (seTvSubst env) , text "idsubst =" <+> ppr (seIdSubst env) , text "cvsubst =" <+> ppr (seCvSubst env) ]) $ -} simplExprF1 env e cont simplExprF1 :: HasDebugCallStack => SimplEnv -> InExpr -> SimplCont -> SimplM (SimplFloats, OutExpr) simplExprF1 _ (Type ty) cont = pprPanic "simplExprF: type" (ppr ty <+> text"cont: " <+> ppr cont) -- simplExprF does only with term-valued expressions -- The (Type ty) case is handled separately by simplExpr -- and by the other callers of simplExprF simplExprF1 env (Var v) cont = {-#SCC "simplIdF" #-} simplIdF env v cont simplExprF1 env (Lit lit) cont = {-#SCC "rebuild" #-} rebuild env (Lit lit) cont simplExprF1 env (Tick t expr) cont = {-#SCC "simplTick" #-} simplTick env t expr cont simplExprF1 env (Cast body co) cont = {-#SCC "simplCast" #-} simplCast env body co cont simplExprF1 env (Coercion co) cont = {-#SCC "simplCoercionF" #-} simplCoercionF env co cont simplExprF1 env (App fun arg) cont = {-#SCC "simplExprF1-App" #-} case arg of Type ty -> do { -- The argument type will (almost) certainly be used -- in the output program, so just force it now. -- See Note [Avoiding space leaks in OutType] arg' <- simplType env ty -- But use substTy, not simplType, to avoid forcing -- the hole type; it will likely not be needed. -- See Note [The hole type in ApplyToTy] ; let hole' = substTy env (exprType fun) ; simplExprF env fun $ ApplyToTy { sc_arg_ty = arg' , sc_hole_ty = hole' , sc_cont = cont } } _ -> -- Crucially, sc_hole_ty is a /lazy/ binding. It will -- be forced only if we need to run contHoleType. -- When these are forced, we might get quadratic behavior; -- this quadratic blowup could be avoided by drilling down -- to the function and getting its multiplicities all at once -- (instead of one-at-a-time). But in practice, we have not -- observed the quadratic behavior, so this extra entanglement -- seems not worthwhile. simplExprF env fun $ ApplyToVal { sc_arg = arg, sc_env = env , sc_hole_ty = substTy env (exprType fun) , sc_dup = NoDup, sc_cont = cont } simplExprF1 env expr@(Lam {}) cont = {-#SCC "simplExprF1-Lam" #-} simplLam env (zapLambdaBndrs expr n_args) cont -- zapLambdaBndrs: the issue here is under-saturated lambdas -- (\x1. \x2. e) arg1 -- Here x1 might have "occurs-once" occ-info, because occ-info -- is computed assuming that a group of lambdas is applied -- all at once. If there are too few args, we must zap the -- occ-info, UNLESS the remaining binders are one-shot where n_args = countArgs cont -- NB: countArgs counts all the args (incl type args) -- and likewise drop counts all binders (incl type lambdas) simplExprF1 env (Case scrut bndr _ alts) cont = {-#SCC "simplExprF1-Case" #-} simplExprF env scrut (Select { sc_dup = NoDup, sc_bndr = bndr , sc_alts = alts , sc_env = env, sc_cont = cont }) simplExprF1 env (Let (Rec pairs) body) cont | Just pairs' <- joinPointBindings_maybe pairs = {-#SCC "simplRecJoinPoin" #-} simplRecJoinPoint env pairs' body cont | otherwise = {-#SCC "simplRecE" #-} simplRecE env pairs body cont simplExprF1 env (Let (NonRec bndr rhs) body) cont | Type ty <- rhs -- First deal with type lets (let a = Type ty in e) = {-#SCC "simplExprF1-NonRecLet-Type" #-} assert (isTyVar bndr) $ do { ty' <- simplType env ty ; simplExprF (extendTvSubst env bndr ty') body cont } | Just env' <- preInlineUnconditionally env NotTopLevel bndr rhs env -- Because of the let-can-float invariant, it's ok to -- inline freely, or to drop the binding if it is dead. = do { tick (PreInlineUnconditionally bndr) ; simplExprF env' body cont } -- Now check for a join point. It's better to do the preInlineUnconditionally -- test first, because joinPointBinding_maybe has to eta-expand, so a trivial -- binding like { j = j2 |> co } would first be eta-expanded and then inlined -- Better to test preInlineUnconditionally first. | Just (bndr', rhs') <- joinPointBinding_maybe bndr rhs = {-#SCC "simplNonRecJoinPoint" #-} simplNonRecJoinPoint env bndr' rhs' body cont | otherwise = {-#SCC "simplNonRecE" #-} simplNonRecE env FromLet bndr (rhs, env) body cont {- Note [Avoiding space leaks in OutType] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Since the simplifier is run for multiple iterations, we need to ensure that any thunks in the output of one simplifier iteration are forced by the evaluation of the next simplifier iteration. Otherwise we may retain multiple copies of the Core program and leak a terrible amount of memory (as in #13426). The simplifier is naturally strict in the entire "Expr part" of the input Core program, because any expression may contain binders, which we must find in order to extend the SimplEnv accordingly. But types do not contain binders and so it is tempting to write things like simplExpr env (Type ty) = return (Type (substTy env ty)) -- Bad! This is Bad because the result includes a thunk (substTy env ty) which retains a reference to the whole simplifier environment; and the next simplifier iteration will not force this thunk either, because the line above is not strict in ty. So instead our strategy is for the simplifier to fully evaluate OutTypes when it emits them into the output Core program, for example simplExpr env (Type ty) = do { ty' <- simplType env ty -- Good ; return (Type ty') } where the only difference from above is that simplType calls seqType on the result of substTy. However, SimplCont can also contain OutTypes and it's not necessarily a good idea to force types on the way in to SimplCont, because they may end up not being used and forcing them could be a lot of wasted work. T5631 is a good example of this. - For ApplyToTy's sc_arg_ty, we force the type on the way in because the type will almost certainly appear as a type argument in the output program. - For the hole types in Stop and ApplyToTy, we force the type when we emit it into the output program, after obtaining it from contResultType. (The hole type in ApplyToTy is only directly used to form the result type in a new Stop continuation.) -} --------------------------------- -- Simplify a join point, adding the context. -- Context goes *inside* the lambdas. IOW, if the join point has arity n, we do: -- \x1 .. xn -> e => \x1 .. xn -> E[e] -- Note that we need the arity of the join point, since e may be a lambda -- (though this is unlikely). See Note [Join points and case-of-case]. simplJoinRhs :: SimplEnv -> InId -> InExpr -> SimplCont -> SimplM OutExpr simplJoinRhs env bndr expr cont | JoinPoint arity <- idJoinPointHood bndr = do { let (join_bndrs, join_body) = collectNBinders arity expr mult = contHoleScaling cont ; (env', join_bndrs') <- simplLamBndrs env (map (scaleVarBy mult) join_bndrs) ; join_body' <- simplExprC env' join_body cont ; return $ mkLams join_bndrs' join_body' } | otherwise = pprPanic "simplJoinRhs" (ppr bndr) --------------------------------- simplType :: SimplEnv -> InType -> SimplM OutType -- Kept monadic just so we can do the seqType -- See Note [Avoiding space leaks in OutType] simplType env ty = -- pprTrace "simplType" (ppr ty $$ ppr (seTvSubst env)) $ seqType new_ty `seq` return new_ty where new_ty = substTy env ty --------------------------------- simplCoercionF :: SimplEnv -> InCoercion -> SimplCont -> SimplM (SimplFloats, OutExpr) simplCoercionF env co cont = do { co' <- simplCoercion env co ; rebuild env (Coercion co') cont } simplCoercion :: SimplEnv -> InCoercion -> SimplM OutCoercion simplCoercion env co = do { let opt_co | reSimplifying env = substCo env co | otherwise = optCoercion opts subst co -- If (reSimplifying env) is True we have already simplified -- this coercion once, and we don't want do so again; doing -- so repeatedly risks non-linear behaviour -- See Note [Inline depth] in GHC.Core.Opt.Simplify.Env ; seqCo opt_co `seq` return opt_co } where subst = getSubst env opts = seOptCoercionOpts env ----------------------------------- -- | Push a TickIt context outwards past applications and cases, as -- long as this is a non-scoping tick, to let case and application -- optimisations apply. simplTick :: SimplEnv -> CoreTickish -> InExpr -> SimplCont -> SimplM (SimplFloats, OutExpr) simplTick env tickish expr cont -- A scoped tick turns into a continuation, so that we can spot -- (scc t (\x . e)) in simplLam and eliminate the scc. If we didn't do -- it this way, then it would take two passes of the simplifier to -- reduce ((scc t (\x . e)) e'). -- NB, don't do this with counting ticks, because if the expr is -- bottom, then rebuildCall will discard the continuation. -------------------------- -- | tickishScoped tickish && not (tickishCounts tickish) -- = simplExprF env expr (TickIt tickish cont) -- XXX: we cannot do this, because the simplifier assumes that -- the context can be pushed into a case with a single branch. e.g. -- scc case expensive of p -> e -- becomes -- case expensive of p -> scc e -- -- So I'm disabling this for now. It just means we will do more -- simplifier iterations that necessary in some cases. -------------------------- -- For unscoped or soft-scoped ticks, we are allowed to float in new -- cost, so we simply push the continuation inside the tick. This -- has the effect of moving the tick to the outside of a case or -- application context, allowing the normal case and application -- optimisations to fire. | tickish `tickishScopesLike` SoftScope = do { (floats, expr') <- simplExprF env expr cont ; return (floats, mkTick tickish expr') } -- Push tick inside if the context looks like this will allow us to -- do a case-of-case - see Note [case-of-scc-of-case] | Select {} <- cont, Just expr' <- push_tick_inside = simplExprF env expr' cont -- We don't want to move the tick, but we might still want to allow -- floats to pass through with appropriate wrapping (or not, see -- wrap_floats below) --- | not (tickishCounts tickish) || tickishCanSplit tickish -- = wrap_floats | otherwise = no_floating_past_tick where -- Try to push tick inside a case, see Note [case-of-scc-of-case]. push_tick_inside = case expr0 of Case scrut bndr ty alts -> Just $ Case (tickScrut scrut) bndr ty (map tickAlt alts) _other -> Nothing where (ticks, expr0) = stripTicksTop movable (Tick tickish expr) movable t = not (tickishCounts t) || t `tickishScopesLike` NoScope || tickishCanSplit t tickScrut e = foldr mkTick e ticks -- Alternatives get annotated with all ticks that scope in some way, -- but we don't want to count entries. tickAlt (Alt c bs e) = Alt c bs (foldr mkTick e ts_scope) ts_scope = map mkNoCount $ filter (not . (`tickishScopesLike` NoScope)) ticks no_floating_past_tick = do { let (inc,outc) = splitCont cont ; (floats, expr1) <- simplExprF env expr inc ; let expr2 = wrapFloats floats expr1 tickish' = simplTickish env tickish ; rebuild env (mkTick tickish' expr2) outc } -- Alternative version that wraps outgoing floats with the tick. This -- results in ticks being duplicated, as we don't make any attempt to -- eliminate the tick if we re-inline the binding (because the tick -- semantics allows unrestricted inlining of HNFs), so I'm not doing -- this any more. FloatOut will catch any real opportunities for -- floating. -- -- wrap_floats = -- do { let (inc,outc) = splitCont cont -- ; (env', expr') <- simplExprF (zapFloats env) expr inc -- ; let tickish' = simplTickish env tickish -- ; let wrap_float (b,rhs) = (zapIdDmdSig (setIdArity b 0), -- mkTick (mkNoCount tickish') rhs) -- -- when wrapping a float with mkTick, we better zap the Id's -- -- strictness info and arity, because it might be wrong now. -- ; let env'' = addFloats env (mapFloats env' wrap_float) -- ; rebuild env'' expr' (TickIt tickish' outc) -- } simplTickish env tickish | Breakpoint ext n ids modl <- tickish = Breakpoint ext n (mapMaybe (getDoneId . substId env) ids) modl | otherwise = tickish -- Push type application and coercion inside a tick splitCont :: SimplCont -> (SimplCont, SimplCont) splitCont cont@(ApplyToTy { sc_cont = tail }) = (cont { sc_cont = inc }, outc) where (inc,outc) = splitCont tail splitCont cont@(CastIt { sc_cont = tail }) = (cont { sc_cont = inc }, outc) where (inc,outc) = splitCont tail splitCont other = (mkBoringStop (contHoleType other), other) getDoneId (DoneId id) = Just id getDoneId (DoneEx (Var id) _) = Just id getDoneId (DoneEx e _) = getIdFromTrivialExpr_maybe e -- Note [substTickish] in GHC.Core.Subst getDoneId other = pprPanic "getDoneId" (ppr other) -- Note [case-of-scc-of-case] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~ -- It's pretty important to be able to transform case-of-case when -- there's an SCC in the way. For example, the following comes up -- in nofib/real/compress/Encode.hs: -- -- case scctick -- case $wcode_string_r13s wild_XC w1_s137 w2_s138 l_aje -- of _ { (# ww1_s13f, ww2_s13g, ww3_s13h #) -> -- (ww1_s13f, ww2_s13g, ww3_s13h) -- } -- of _ { (ww_s12Y, ww1_s12Z, ww2_s130) -> -- tick -- (ww_s12Y, -- ww1_s12Z, -- PTTrees.PT -- @ GHC.Types.Char @ GHC.Types.Int wild2_Xj ww2_s130 r_ajf) -- } -- -- We really want this case-of-case to fire, because then the 3-tuple -- will go away (indeed, the CPR optimisation is relying on this -- happening). But the scctick is in the way - we need to push it -- inside to expose the case-of-case. So we perform this -- transformation on the inner case: -- -- scctick c (case e of { p1 -> e1; ...; pn -> en }) -- ==> -- case (scctick c e) of { p1 -> scc c e1; ...; pn -> scc c en } -- -- So we've moved a constant amount of work out of the scc to expose -- the case. We only do this when the continuation is interesting: in -- for now, it has to be another Case (maybe generalise this later). {- ************************************************************************ * * \subsection{The main rebuilder} * * ************************************************************************ -} rebuild :: SimplEnv -> OutExpr -> SimplCont -> SimplM (SimplFloats, OutExpr) -- At this point the substitution in the SimplEnv should be irrelevant; -- only the in-scope set matters rebuild env expr cont = case cont of Stop {} -> return (emptyFloats env, expr) TickIt t cont -> rebuild env (mkTick t expr) cont CastIt { sc_co = co, sc_opt = opt, sc_cont = cont } -> rebuild env (mkCast expr co') cont -- NB: mkCast implements the (Coercion co |> g) optimisation where co' = optOutCoercion env co opt Select { sc_bndr = bndr, sc_alts = alts, sc_env = se, sc_cont = cont } -> rebuildCase (se `setInScopeFromE` env) expr bndr alts cont StrictArg { sc_fun = fun, sc_cont = cont, sc_fun_ty = fun_ty } -> rebuildCall env (addValArgTo fun expr fun_ty ) cont StrictBind { sc_bndr = b, sc_body = body, sc_env = se , sc_cont = cont, sc_from = from_what } -> completeBindX (se `setInScopeFromE` env) from_what b expr body cont ApplyToTy { sc_arg_ty = ty, sc_cont = cont} -> rebuild env (App expr (Type ty)) cont ApplyToVal { sc_arg = arg, sc_env = se, sc_dup = dup_flag , sc_cont = cont, sc_hole_ty = fun_ty } -- See Note [Avoid redundant simplification] -> do { (_, _, arg') <- simplLazyArg env dup_flag fun_ty Nothing se arg ; rebuild env (App expr arg') cont } completeBindX :: SimplEnv -> FromWhat -> InId -> OutExpr -- Non-recursively bind this Id to this (simplified) expression -- (the let-can-float invariant may not be satisfied) -> InExpr -- In this body -> SimplCont -- Consumed by this continuation -> SimplM (SimplFloats, OutExpr) completeBindX env from_what bndr rhs body cont | FromBeta arg_levity <- from_what , needsCaseBindingL arg_levity rhs -- Enforcing the let-can-float-invariant = do { (env1, bndr1) <- simplNonRecBndr env bndr -- Lambda binders don't have rules ; (floats, expr') <- simplNonRecBody env1 from_what body cont -- Do not float floats past the Case binder below ; let expr'' = wrapFloats floats expr' case_expr = Case rhs bndr1 (contResultType cont) [Alt DEFAULT [] expr''] ; return (emptyFloats env, case_expr) } | otherwise -- Make a let-binding = do { (env1, bndr1) <- simplNonRecBndr env bndr ; (env2, bndr2) <- addBndrRules env1 bndr bndr1 (BC_Let NotTopLevel NonRecursive) ; let is_strict = isStrictId bndr2 -- isStrictId: use simplified binder because the InId bndr might not have -- a fixed runtime representation, which isStrictId doesn't expect -- c.f. Note [Dark corner with representation polymorphism] ; (rhs_floats, rhs1) <- prepareBinding env NotTopLevel NonRecursive is_strict bndr2 (emptyFloats env) rhs -- NB: it makes a surprisingly big difference (5% in compiler allocation -- in T9630) to pass 'env' rather than 'env1'. It's fine to pass 'env', -- because this is completeBindX, so bndr is not in scope in the RHS. ; let env3 = env2 `setInScopeFromF` rhs_floats ; (bind_float, env4) <- completeBind (BC_Let NotTopLevel NonRecursive) (bndr,env) (bndr2, rhs1, env3) -- Must pass env1 to completeBind in case simplBinder had to clone, -- and extended the substitution with [bndr :-> new_bndr] -- Simplify the body ; (body_floats, body') <- simplNonRecBody env4 from_what body cont ; let all_floats = rhs_floats `addFloats` bind_float `addFloats` body_floats ; return ( all_floats, body' ) } {- ************************************************************************ * * \subsection{Lambdas} * * ************************************************************************ -} {- Note [Optimising reflexivity] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ It's important (for compiler performance) to get rid of reflexivity as soon as it appears. See #11735, #14737, and #15019. In particular, we want to behave well on * e |> co1 |> co2 where the two happen to cancel out entirely. That is quite common; e.g. a newtype wrapping and unwrapping cancel. * (f |> co) @t1 @t2 ... @tn x1 .. xm Here we will use pushCoTyArg and pushCoValArg successively, which build up SelCo stacks. Silly to do that if co is reflexive. However, we don't want to call isReflexiveCo too much, because it uses type equality which is expensive on big types (#14737 comment:7). A good compromise (determined experimentally) seems to be to call isReflexiveCo * when composing casts, and * at the end In investigating this I saw missed opportunities for on-the-fly coercion shrinkage. See #15090. Note [Avoid re-simplifying coercions] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In some benchmarks (with deeply nested cases) we successively push casts onto the SimplCont. We don't want to call the coercion optimiser on each successive composition -- that's at least quadratic. So: * The CastIt constructor in SimplCont has a `sc_opt :: Bool` flag to record whether the coercion optimiser has been applied to the coercion. * In `simplCast`, when we see (Cast e co), we simplify `co` to get an OutCoercion, and built a CastIt with sc_opt=True. Actually not quite: if we are simplifying the result of inlining an unfolding (seInlineDepth > 0), then instead of /optimising/ it again, just /substitute/ which is cheaper. See `simplCoercion`. * In `addCoerce` (in `simplCast`) if we combine this new coercion with an existing once, we build a CastIt for (co1 ; co2) with sc_opt=False. * When unpacking a CastIt, in `rebuildCall` and `rebuild`, we optimise the (presumably composed) coercion if sc_opt=False; this is done by `optOutCoercion`. * When duplicating a continuation in `mkDupableContWithDmds`, before duplicating a CastIt, optimise the coercion. Otherwise we'll end up optimising it separately in the duplicate copies. -} optOutCoercion :: SimplEnv -> OutCoercion -> Bool -> OutCoercion -- See Note [Avoid re-simplifying coercions] optOutCoercion env co already_optimised | already_optimised = co -- See Note [Avoid re-simplifying coercions] | otherwise = optCoercion opts empty_subst co where empty_subst = mkEmptySubst (seInScope env) opts = seOptCoercionOpts env simplCast :: SimplEnv -> InExpr -> InCoercion -> SimplCont -> SimplM (SimplFloats, OutExpr) simplCast env body co0 cont0 = do { co1 <- {-#SCC "simplCast-simplCoercion" #-} simplCoercion env co0 ; cont1 <- {-#SCC "simplCast-addCoerce" #-} if isReflCo co1 then return cont0 -- See Note [Optimising reflexivity] else addCoerce co1 True cont0 -- True <=> co1 is optimised ; {-#SCC "simplCast-simplExprF" #-} simplExprF env body cont1 } where -- If the first parameter is MRefl, then simplifying revealed a -- reflexive coercion. Omit. addCoerceM :: MOutCoercion -> Bool -> SimplCont -> SimplM SimplCont addCoerceM MRefl _ cont = return cont addCoerceM (MCo co) opt cont = addCoerce co opt cont addCoerce :: OutCoercion -> Bool -> SimplCont -> SimplM SimplCont addCoerce co1 _ (CastIt { sc_co = co2, sc_cont = cont }) -- See Note [Optimising reflexivity] = addCoerce (mkTransCo co1 co2) False cont -- False: (mkTransCo co1 co2) is not fully optimised -- See Note [Avoid re-simplifying coercions] addCoerce co opt (ApplyToTy { sc_arg_ty = arg_ty, sc_cont = tail }) | Just (arg_ty', m_co') <- pushCoTyArg co arg_ty = {-#SCC "addCoerce-pushCoTyArg" #-} do { tail' <- addCoerceM m_co' opt tail ; return (ApplyToTy { sc_arg_ty = arg_ty' , sc_cont = tail' , sc_hole_ty = coercionLKind co }) } -- NB! As the cast goes past, the -- type of the hole changes (#16312) -- (f |> co) e ===> (f (e |> co1)) |> co2 -- where co :: (s1->s2) ~ (t1->t2) -- co1 :: t1 ~ s1 -- co2 :: s2 ~ t2 addCoerce co opt cont@(ApplyToVal { sc_arg = arg, sc_env = arg_se , sc_dup = dup, sc_cont = tail , sc_hole_ty = fun_ty }) | not opt -- pushCoValArg duplicates the coercion, so optimise first = addCoerce (optOutCoercion env co opt) True cont | Just (m_co1, m_co2) <- pushCoValArg co , fixed_rep m_co1 = {-#SCC "addCoerce-pushCoValArg" #-} do { tail' <- addCoerceM m_co2 opt tail ; case m_co1 of { MRefl -> return (cont { sc_cont = tail' , sc_hole_ty = coercionLKind co }) ; -- See Note [Avoiding simplifying repeatedly] MCo co1 -> do { (dup', arg_se', arg') <- simplLazyArg env dup fun_ty Nothing arg_se arg -- When we build the ApplyTo we can't mix the OutCoercion -- 'co' with the InExpr 'arg', so we simplify -- to make it all consistent. It's a bit messy. -- But it isn't a common case. -- Example of use: #995 ; return (ApplyToVal { sc_arg = mkCast arg' co1 , sc_env = arg_se' , sc_dup = dup' , sc_cont = tail' , sc_hole_ty = coercionLKind co }) } } } addCoerce co opt cont | isReflCo co = return cont -- Having this at the end makes a huge -- difference in T12227, for some reason -- See Note [Optimising reflexivity] | otherwise = return (CastIt { sc_co = co, sc_opt = opt, sc_cont = cont }) fixed_rep :: MCoercionR -> Bool fixed_rep MRefl = True fixed_rep (MCo co) = typeHasFixedRuntimeRep $ coercionRKind co -- Without this check, we can get an argument which does not -- have a fixed runtime representation. -- See Note [Representation polymorphism invariants] in GHC.Core -- test: typecheck/should_run/EtaExpandLevPoly simplLazyArg :: SimplEnv -> DupFlag -> OutType -- ^ Type of the function applied to this arg -> Maybe ArgInfo -- ^ Just <=> This arg `ai` occurs in an app -- `f a1 ... an` where we have ArgInfo on -- how `f` uses `ai`, affecting the Stop -- continuation passed to 'simplExprC' -> StaticEnv -> CoreExpr -- ^ Expression with its static envt -> SimplM (DupFlag, StaticEnv, OutExpr) simplLazyArg env dup_flag fun_ty mb_arg_info arg_env arg | isSimplified dup_flag = return (dup_flag, arg_env, arg) | otherwise = do { let arg_env' = arg_env `setInScopeFromE` env ; let arg_ty = funArgTy fun_ty ; let stop = case mb_arg_info of Nothing -> mkBoringStop arg_ty Just ai -> mkLazyArgStop arg_ty ai ; arg' <- simplExprC arg_env' arg stop ; return (Simplified, zapSubstEnv arg_env', arg') } -- Return a StaticEnv that includes the in-scope set from 'env', -- because arg' may well mention those variables (#20639) {- ************************************************************************ * * \subsection{Lambdas} * * ************************************************************************ -} simplNonRecBody :: SimplEnv -> FromWhat -> InExpr -> SimplCont -> SimplM (SimplFloats, OutExpr) simplNonRecBody env from_what body cont = case from_what of FromLet -> simplExprF env body cont FromBeta {} -> simplLam env body cont simplLam :: SimplEnv -> InExpr -> SimplCont -> SimplM (SimplFloats, OutExpr) simplLam env (Lam bndr body) cont = simpl_lam env bndr body cont simplLam env expr cont = simplExprF env expr cont simpl_lam :: HasDebugCallStack => SimplEnv -> InBndr -> InExpr -> SimplCont -> SimplM (SimplFloats, OutExpr) -- Type beta-reduction simpl_lam env bndr body (ApplyToTy { sc_arg_ty = arg_ty, sc_cont = cont }) = do { tick (BetaReduction bndr) ; simplLam (extendTvSubst env bndr arg_ty) body cont } -- Coercion beta-reduction simpl_lam env bndr body (ApplyToVal { sc_arg = Coercion arg_co, sc_env = arg_se , sc_cont = cont }) = assertPpr (isCoVar bndr) (ppr bndr) $ do { tick (BetaReduction bndr) ; let arg_co' = substCo (arg_se `setInScopeFromE` env) arg_co ; simplLam (extendCvSubst env bndr arg_co') body cont } -- Value beta-reduction -- This works for /coercion/ lambdas too simpl_lam env bndr body (ApplyToVal { sc_arg = arg, sc_env = arg_se , sc_cont = cont, sc_dup = dup , sc_hole_ty = fun_ty}) = do { tick (BetaReduction bndr) ; let from_what = FromBeta arg_levity arg_levity | isForAllTy fun_ty = assertPpr (isCoVar bndr) (ppr bndr) Unlifted | otherwise = typeLevity (funArgTy fun_ty) -- Example: (\(cv::a ~# b). blah) co -- The type of (\cv.blah) can be (forall cv. ty); see GHC.Core.Utils.mkLamType -- Using fun_ty: see Note [Dark corner with representation polymorphism] -- e.g (\r \(a::TYPE r) \(x::a). blah) @LiftedRep @Int arg -- When we come to `x=arg` we must choose lazy/strict correctly -- It's wrong to err in either direction -- But fun_ty is an OutType, so is fully substituted ; if | isSimplified dup -- Don't re-simplify if we've simplified it once -- Including don't preInlineUnconditionally -- See Note [Avoiding simplifying repeatedly] -> completeBindX env from_what bndr arg body cont | Just env' <- preInlineUnconditionally env NotTopLevel bndr arg arg_se , not (needsCaseBindingL arg_levity arg) -- Ok to test arg::InExpr in needsCaseBinding because -- exprOkForSpeculation is stable under simplification -> do { tick (PreInlineUnconditionally bndr) ; simplLam env' body cont } | otherwise -> simplNonRecE env from_what bndr (arg, arg_se) body cont } -- Discard a non-counting tick on a lambda. This may change the -- cost attribution slightly (moving the allocation of the -- lambda elsewhere), but we don't care: optimisation changes -- cost attribution all the time. simpl_lam env bndr body (TickIt tickish cont) | not (tickishCounts tickish) = simpl_lam env bndr body cont -- Not enough args, so there are real lambdas left to put in the result simpl_lam env bndr body cont = do { let (inner_bndrs, inner_body) = collectBinders body ; (env', bndrs') <- simplLamBndrs env (bndr:inner_bndrs) ; body' <- simplExpr env' inner_body ; new_lam <- rebuildLam env' bndrs' body' cont ; rebuild env' new_lam cont } ------------- simplLamBndr :: SimplEnv -> InBndr -> SimplM (SimplEnv, OutBndr) -- Historically this had a special case for when a lambda-binder -- could have a stable unfolding; -- see Historical Note [Case binders and join points] -- But now it is much simpler! We now only remove unfoldings. -- See Note [Never put `OtherCon` unfoldings on lambda binders] simplLamBndr env bndr = simplBinder env (zapIdUnfolding bndr) simplLamBndrs :: SimplEnv -> [InBndr] -> SimplM (SimplEnv, [OutBndr]) simplLamBndrs env bndrs = mapAccumLM simplLamBndr env bndrs ------------------ simplNonRecE :: HasDebugCallStack => SimplEnv -> FromWhat -> InId -- The binder, always an Id -- Never a join point -- The static env for its unfolding (if any) is the first parameter -> (InExpr, SimplEnv) -- Rhs of binding (or arg of lambda) -> InExpr -- Body of the let/lambda -> SimplCont -> SimplM (SimplFloats, OutExpr) -- simplNonRecE is used for -- * from=FromLet: a non-top-level non-recursive non-join-point let-expression -- * from=FromBeta: a binding arising from a beta reduction -- -- simplNonRecE env b (rhs, rhs_se) body k -- = let env in -- cont< let b = rhs_se(rhs) in body > -- -- It deals with strict bindings, via the StrictBind continuation, -- which may abort the whole process. -- -- from_what=FromLet => the RHS satisfies the let-can-float invariant -- Otherwise it may or may not satisfy it. simplNonRecE env from_what bndr (rhs, rhs_se) body cont | assert (isId bndr && not (isJoinId bndr) ) $ is_strict_bind = -- Evaluate RHS strictly simplExprF (rhs_se `setInScopeFromE` env) rhs (StrictBind { sc_bndr = bndr, sc_body = body, sc_from = from_what , sc_env = env, sc_cont = cont, sc_dup = NoDup }) | otherwise -- Evaluate RHS lazily = do { (env1, bndr1) <- simplNonRecBndr env bndr ; (env2, bndr2) <- addBndrRules env1 bndr bndr1 (BC_Let NotTopLevel NonRecursive) ; (floats1, env3) <- simplLazyBind NotTopLevel NonRecursive (bndr,env) (bndr2,env2) (rhs,rhs_se) ; (floats2, expr') <- simplNonRecBody env3 from_what body cont ; return (floats1 `addFloats` floats2, expr') } where is_strict_bind = case from_what of FromBeta Unlifted -> True -- If we are coming from a beta-reduction (FromBeta) we must -- establish the let-can-float invariant, so go via StrictBind -- If not, the invariant holds already, and it's optional. -- (FromBeta Lifted) or FromLet: look at the demand info _ -> seCaseCase env && isStrUsedDmd (idDemandInfo bndr) ------------------ simplRecE :: SimplEnv -> [(InId, InExpr)] -> InExpr -> SimplCont -> SimplM (SimplFloats, OutExpr) -- simplRecE is used for -- * non-top-level recursive lets in expressions -- Precondition: not a join-point binding simplRecE env pairs body cont = do { let bndrs = map fst pairs ; massert (all (not . isJoinId) bndrs) ; env1 <- simplRecBndrs env bndrs -- NB: bndrs' don't have unfoldings or rules -- We add them as we go down ; (floats1, env2) <- simplRecBind env1 (BC_Let NotTopLevel Recursive) pairs ; (floats2, expr') <- simplExprF env2 body cont ; return (floats1 `addFloats` floats2, expr') } {- Note [Dark corner with representation polymorphism] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In `simplNonRecE`, the call to `needsCaseBinding` or to `isStrictId` will fail if the binder does not have a fixed runtime representation, e.g. if it is of kind (TYPE r). So we are careful to call `isStrictId` on the OutId, not the InId, in case we have ((\(r::RuntimeRep) \(x::TYPE r). blah) Lifted arg) That will lead to `simplNonRecE env (x::TYPE r) arg`, and we can't tell if x is lifted or unlifted from that. We only get such redexes from the compulsory inlining of a wired-in, representation-polymorphic function like `rightSection` (see GHC.Types.Id.Make). Mind you, SimpleOpt should probably have inlined such compulsory inlinings already, but belt and braces does no harm. Plus, it turns out that GHC.Driver.Main.hscCompileCoreExpr calls the Simplifier without first calling SimpleOpt, so anything involving GHCi or TH and operator sections will fall over if we don't take care here. Note [Avoiding simplifying repeatedly] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ One way in which we can get exponential behaviour is if we simplify a big expression, and then re-simplify it -- and then this happens in a deeply-nested way. So we must be jolly careful about re-simplifying an expression (#13379). Example: f BIG, where f has a RULE Then * We simplify BIG before trying the rule; but the rule does not fire (forcing this simplification is why we have the RULE in this example) * We inline f = \x. g x, in `simpl_lam` * So if `simpl_lam` did preInlineUnconditionally we get (g BIG) * Now if g has a RULE we'll simplify BIG again, and this whole thing can iterate. * However, if `f` did not have a RULE, so that BIG has /not/ already been simplified, we /want/ to do preInlineUnconditionally in simpl_lam. So we go to some effort to avoid repeatedly simplifying the same thing: * ApplyToVal has a (sc_dup :: DupFlag) field which records if the argument has been evaluated. * simplArg checks this flag to avoid re-simplifying. * simpl_lam has: - a case for (isSimplified dup), which goes via completeBindX, and - a case for an un-simplified argument, which tries preInlineUnconditionally * We go to some efforts to avoid unnecessarily simplifying ApplyToVal, in at least two places - In simplCast/addCoerce, where we check for isReflCo - In rebuildCall we avoid simplifying arguments before we have to (see Note [Trying rewrite rules]) All that said /postInlineUnconditionally/ (called in `completeBind`) does fire in the above (f BIG) situation. See Note [Post-inline for single-use things] in Simplify.Utils. This certainly risks repeated simplification, but in practice seems to be a small win. ************************************************************************ * * Join points * * ********************************************************************* -} {- Note [Rules and unfolding for join points] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Suppose we have simplExpr (join j x = rhs ) cont ( {- RULE j (p:ps) = blah -} ) ( {- StableUnfolding j = blah -} ) (in blah ) Then we will push 'cont' into the rhs of 'j'. But we should *also* push 'cont' into the RHS of * Any RULEs for j, e.g. generated by SpecConstr * Any stable unfolding for j, e.g. the result of an INLINE pragma Simplifying rules and stable-unfoldings happens a bit after simplifying the right-hand side, so we remember whether or not it is a join point, and what 'cont' is, in a value of type MaybeJoinCont #13900 was caused by forgetting to push 'cont' into the RHS of a SpecConstr-generated RULE for a join point. -} simplNonRecJoinPoint :: SimplEnv -> InId -> InExpr -> InExpr -> SimplCont -> SimplM (SimplFloats, OutExpr) simplNonRecJoinPoint env bndr rhs body cont = assert (isJoinId bndr ) $ wrapJoinCont env cont $ \ env cont -> do { -- We push join_cont into the join RHS and the body; -- and wrap wrap_cont around the whole thing ; let mult = contHoleScaling cont res_ty = contResultType cont ; (env1, bndr1) <- simplNonRecJoinBndr env bndr mult res_ty ; (env2, bndr2) <- addBndrRules env1 bndr bndr1 (BC_Join NonRecursive cont) ; (floats1, env3) <- simplJoinBind NonRecursive cont (bndr,env) (bndr2,env2) (rhs,env) ; (floats2, body') <- simplExprF env3 body cont ; return (floats1 `addFloats` floats2, body') } ------------------ simplRecJoinPoint :: SimplEnv -> [(InId, InExpr)] -> InExpr -> SimplCont -> SimplM (SimplFloats, OutExpr) simplRecJoinPoint env pairs body cont = wrapJoinCont env cont $ \ env cont -> do { let bndrs = map fst pairs mult = contHoleScaling cont res_ty = contResultType cont ; env1 <- simplRecJoinBndrs env bndrs mult res_ty -- NB: bndrs' don't have unfoldings or rules -- We add them as we go down ; (floats1, env2) <- simplRecBind env1 (BC_Join Recursive cont) pairs ; (floats2, body') <- simplExprF env2 body cont ; return (floats1 `addFloats` floats2, body') } -------------------- wrapJoinCont :: SimplEnv -> SimplCont -> (SimplEnv -> SimplCont -> SimplM (SimplFloats, OutExpr)) -> SimplM (SimplFloats, OutExpr) -- Deal with making the continuation duplicable if necessary, -- and with the no-case-of-case situation. wrapJoinCont env cont thing_inside | contIsStop cont -- Common case; no need for fancy footwork = thing_inside env cont | not (seCaseCase env) -- See Note [Join points with -fno-case-of-case] = do { (floats1, expr1) <- thing_inside env (mkBoringStop (contHoleType cont)) ; let (floats2, expr2) = wrapJoinFloatsX floats1 expr1 ; (floats3, expr3) <- rebuild (env `setInScopeFromF` floats2) expr2 cont ; return (floats2 `addFloats` floats3, expr3) } | otherwise -- Normal case; see Note [Join points and case-of-case] = do { (floats1, cont') <- mkDupableCont env cont ; (floats2, result) <- thing_inside (env `setInScopeFromF` floats1) cont' ; return (floats1 `addFloats` floats2, result) } -------------------- trimJoinCont :: Id -- Used only in error message -> JoinPointHood -> SimplCont -> SimplCont -- Drop outer context from join point invocation (jump) -- See Note [Join points and case-of-case] trimJoinCont _ NotJoinPoint cont = cont -- Not a jump trimJoinCont var (JoinPoint arity) cont = trim arity cont where trim 0 cont@(Stop {}) = cont trim 0 cont = mkBoringStop (contResultType cont) trim n cont@(ApplyToVal { sc_cont = k }) = cont { sc_cont = trim (n-1) k } trim n cont@(ApplyToTy { sc_cont = k }) = cont { sc_cont = trim (n-1) k } -- join arity counts types! trim _ cont = pprPanic "completeCall" $ ppr var $$ ppr cont {- Note [Join points and case-of-case] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When we perform the case-of-case transform (or otherwise push continuations inward), we want to treat join points specially. Since they're always tail-called and we want to maintain this invariant, we can do this (for any evaluation context E): E[join j = e in case ... of A -> jump j 1 B -> jump j 2 C -> f 3] --> join j = E[e] in case ... of A -> jump j 1 B -> jump j 2 C -> E[f 3] As is evident from the example, there are two components to this behavior: 1. When entering the RHS of a join point, copy the context inside. 2. When a join point is invoked, discard the outer context. We need to be very careful here to remain consistent---neither part is optional! We need do make the continuation E duplicable (since we are duplicating it) with mkDupableCont. Note [Join points with -fno-case-of-case] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Supose case-of-case is switched off, and we are simplifying case (join j x = in case y of A -> j 1 B -> j 2 C -> e) of Usually, we'd push the outer continuation (case . of ) into both the RHS and the body of the join point j. But since we aren't doing case-of-case we may then end up with this totally bogus result join x = case of in case (case y of A -> j 1 B -> j 2 C -> e) of This would be OK in the language of the paper, but not in GHC: j is no longer a join point. We can only do the "push continuation into the RHS of the join point j" if we also push the continuation right down to the /jumps/ to j, so that it can evaporate there. If we are doing case-of-case, we'll get to join x = case of in case y of A -> j 1 B -> j 2 C -> case e of which is great. Bottom line: if case-of-case is off, we must stop pushing the continuation inwards altogether at any join point. Instead simplify the (join ... in ...) with a Stop continuation, and wrap the original continuation around the outside. Surprisingly tricky! ************************************************************************ * * Variables * * ************************************************************************ Note [zapSubstEnv] ~~~~~~~~~~~~~~~~~~ When simplifying something that has already been simplified, be sure to zap the SubstEnv. This is VITAL. Consider let x = e in let y = \z -> ...x... in \ x -> ...y... We'll clone the inner \x, adding x->x' in the id_subst Then when we inline y, we must *not* replace x by x' in the inlined copy!! Note [Fast path for data constructors] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For applications of a data constructor worker, the full glory of rebuildCall is a waste of effort; * They never inline, obviously * They have no rewrite rules * They are not strict (see Note [Data-con worker strictness] in GHC.Core.DataCon) So it's fine to zoom straight to `rebuild` which just rebuilds the call in a very straightforward way. Some programs have a /lot/ of data constructors in the source program (compiler/perf/T9961 is an example), so this fast path can be very valuable. -} simplVar :: SimplEnv -> InVar -> SimplM OutExpr -- Look up an InVar in the environment simplVar env var -- Why $! ? See Note [Bangs in the Simplifier] | isTyVar var = return $! Type $! (substTyVar env var) | isCoVar var = return $! Coercion $! (substCoVar env var) | otherwise = case substId env var of ContEx tvs cvs ids e -> let env' = setSubstEnv env tvs cvs ids in simplExpr env' e DoneId var1 -> return (Var var1) DoneEx e _ -> return e simplIdF :: SimplEnv -> InId -> SimplCont -> SimplM (SimplFloats, OutExpr) simplIdF env var cont | isDataConWorkId var -- See Note [Fast path for data constructors] = rebuild env (Var var) cont | otherwise = case substId env var of ContEx tvs cvs ids e -> simplExprF env' e cont -- Don't trimJoinCont; haven't already simplified e, -- so the cont is not embodied in e where env' = setSubstEnv env tvs cvs ids DoneId var1 -> do { rule_base <- getSimplRules ; let cont' = trimJoinCont var1 (idJoinPointHood var1) cont info = mkArgInfo env rule_base var1 cont' ; rebuildCall env info cont' } DoneEx e mb_join -> simplExprF env' e cont' where cont' = trimJoinCont var mb_join cont env' = zapSubstEnv env -- See Note [zapSubstEnv] --------------------------------------------------------- -- Dealing with a call site rebuildCall :: SimplEnv -> ArgInfo -> SimplCont -> SimplM (SimplFloats, OutExpr) ---------- Bottoming applications -------------- rebuildCall env (ArgInfo { ai_fun = fun, ai_args = rev_args, ai_dmds = [] }) cont -- When we run out of strictness args, it means -- that the call is definitely bottom; see GHC.Core.Opt.Simplify.Utils.mkArgInfo -- Then we want to discard the entire strict continuation. E.g. -- * case (error "hello") of { ... } -- * (error "Hello") arg -- * f (error "Hello") where f is strict -- etc -- Then, especially in the first of these cases, we'd like to discard -- the continuation, leaving just the bottoming expression. But the -- type might not be right, so we may have to add a coerce. | not (contIsTrivial cont) -- Only do this if there is a non-trivial -- continuation to discard, else we do it -- again and again! = seqType cont_ty `seq` -- See Note [Avoiding space leaks in OutType] return (emptyFloats env, castBottomExpr res cont_ty) where res = argInfoExpr fun rev_args cont_ty = contResultType cont ---------- Try inlining, if ai_rewrite = TryInlining -------- -- In the TryInlining case we try inlining immediately, before simplifying -- any (more) arguments. Why? See Note [Rewrite rules and inlining]. -- -- If there are rewrite rules we'll skip this case until we have -- simplified enough args to satisfy nr_wanted==0 in the TryRules case below -- Then we'll try the rules, and if that fails, we'll do TryInlining rebuildCall env info@(ArgInfo { ai_fun = fun, ai_args = rev_args , ai_rewrite = TryInlining }) cont = do { logger <- getLogger ; let full_cont = pushSimplifiedRevArgs env rev_args cont ; mb_inline <- tryInlining env logger fun full_cont ; case mb_inline of Just expr -> do { checkedTick (UnfoldingDone fun) ; let env1 = zapSubstEnv env ; simplExprF env1 expr full_cont } Nothing -> rebuildCall env (info { ai_rewrite = TryNothing }) cont } ---------- Try rewrite RULES, if ai_rewrite = TryRules -------------- -- See Note [Rewrite rules and inlining] -- See also Note [Trying rewrite rules] rebuildCall env info@(ArgInfo { ai_fun = fun, ai_args = rev_args , ai_rewrite = TryRules nr_wanted rules }) cont | nr_wanted == 0 || no_more_args = -- We've accumulated a simplified call in -- so try rewrite rules; see Note [RULES apply to simplified arguments] -- See also Note [Rules for recursive functions] do { mb_match <- tryRules env rules fun (reverse rev_args) cont ; case mb_match of Just (env', rhs, cont') -> simplExprF env' rhs cont' Nothing -> rebuildCall env (info { ai_rewrite = TryInlining }) cont } where -- If we have run out of arguments, just try the rules; there might -- be some with lower arity. Casts get in the way -- they aren't -- allowed on rule LHSs no_more_args = case cont of ApplyToTy {} -> False ApplyToVal {} -> False _ -> True ---------- Simplify type applications and casts -------------- rebuildCall env info (CastIt { sc_co = co, sc_opt = opt, sc_cont = cont }) = rebuildCall env (addCastTo info co') cont where co' = optOutCoercion env co opt rebuildCall env info (ApplyToTy { sc_arg_ty = arg_ty, sc_hole_ty = hole_ty, sc_cont = cont }) = rebuildCall env (addTyArgTo info arg_ty hole_ty) cont ---------- The runRW# rule. Do this after absorbing all arguments ------ -- See Note [Simplification of runRW#] in GHC.CoreToSTG.Prep. -- -- runRW# :: forall (r :: RuntimeRep) (o :: TYPE r). (State# RealWorld -> o) -> o -- K[ runRW# rr ty body ] --> runRW rr' ty' (\s. K[ body s ]) rebuildCall env (ArgInfo { ai_fun = fun_id, ai_args = rev_args }) (ApplyToVal { sc_arg = arg, sc_env = arg_se , sc_cont = cont, sc_hole_ty = fun_ty }) | fun_id `hasKey` runRWKey , [ TyArg { as_arg_ty = hole_ty }, TyArg {} ] <- rev_args -- Do this even if (contIsStop cont), or if seCaseCase is off. -- See Note [No eta-expansion in runRW#] = do { let arg_env = arg_se `setInScopeFromE` env overall_res_ty = contResultType cont -- hole_ty is the type of the current runRW# application (outer_cont, new_runrw_res_ty, inner_cont) | seCaseCase env = (mkBoringStop overall_res_ty, overall_res_ty, cont) | otherwise = (cont, hole_ty, mkBoringStop hole_ty) -- Only when case-of-case is on. See GHC.Driver.Config.Core.Opt.Simplify -- Note [Case-of-case and full laziness] -- If the argument is a literal lambda already, take a short cut -- This isn't just efficiency: -- * If we don't do this we get a beta-redex every time, so the -- simplifier keeps doing more iterations. -- * Even more important: see Note [No eta-expansion in runRW#] ; arg' <- case arg of Lam s body -> do { (env', s') <- simplBinder arg_env s ; body' <- simplExprC env' body inner_cont ; return (Lam s' body') } -- Important: do not try to eta-expand this lambda -- See Note [No eta-expansion in runRW#] _ -> do { s' <- newId (fsLit "s") ManyTy realWorldStatePrimTy ; let (m,_,_) = splitFunTy fun_ty env' = arg_env `addNewInScopeIds` [s'] cont' = ApplyToVal { sc_dup = Simplified, sc_arg = Var s' , sc_env = env', sc_cont = inner_cont , sc_hole_ty = mkVisFunTy m realWorldStatePrimTy new_runrw_res_ty } -- cont' applies to s', then K ; body' <- simplExprC env' arg cont' ; return (Lam s' body') } ; let rr' = getRuntimeRep new_runrw_res_ty call' = mkApps (Var fun_id) [mkTyArg rr', mkTyArg new_runrw_res_ty, arg'] ; rebuild env call' outer_cont } ---------- Simplify value arguments -------------------- rebuildCall env fun_info (ApplyToVal { sc_arg = arg, sc_env = arg_se , sc_dup = dup_flag, sc_hole_ty = fun_ty , sc_cont = cont }) -- Argument is already simplified | isSimplified dup_flag -- See Note [Avoid redundant simplification] = rebuildCall env (addValArgTo fun_info arg fun_ty) cont -- Strict arguments | isStrictArgInfo fun_info , seCaseCase env -- Only when case-of-case is on. See GHC.Driver.Config.Core.Opt.Simplify -- Note [Case-of-case and full laziness] = -- pprTrace "Strict Arg" (ppr arg $$ ppr (seIdSubst env) $$ ppr (seInScope env)) $ simplExprF (arg_se `setInScopeFromE` env) arg (StrictArg { sc_fun = fun_info, sc_fun_ty = fun_ty , sc_dup = Simplified , sc_cont = cont }) -- Note [Shadowing in the Simplifier] -- Lazy arguments | otherwise -- DO NOT float anything outside, hence simplExprC -- There is no benefit (unlike in a let-binding), and we'd -- have to be very careful about bogus strictness through -- floating a demanded let. = do { (_, _, arg') <- simplLazyArg env dup_flag fun_ty (Just fun_info) arg_se arg ; rebuildCall env (addValArgTo fun_info arg' fun_ty) cont } ---------- No further useful info, revert to generic rebuild ------------ rebuildCall env (ArgInfo { ai_fun = fun, ai_args = rev_args }) cont = rebuild env (argInfoExpr fun rev_args) cont ----------------------------------- tryInlining :: SimplEnv -> Logger -> OutId -> SimplCont -> SimplM (Maybe OutExpr) tryInlining env logger var cont | Just expr <- callSiteInline env logger var lone_variable arg_infos interesting_cont = do { dump_inline expr cont ; return (Just expr) } | otherwise = return Nothing where (lone_variable, arg_infos, call_cont) = contArgs cont interesting_cont = interestingCallContext env call_cont log_inlining doc = liftIO $ logDumpFile logger (mkDumpStyle alwaysQualify) Opt_D_dump_inlinings "" FormatText doc dump_inline unfolding cont | not (logHasDumpFlag logger Opt_D_dump_inlinings) = return () | not (logHasDumpFlag logger Opt_D_verbose_core2core) = when (isExternalName (idName var)) $ log_inlining $ sep [text "Inlining done:", nest 4 (ppr var)] | otherwise = log_inlining $ sep [text "Inlining done: " <> ppr var, nest 4 (vcat [text "Inlined fn: " <+> nest 2 (ppr unfolding), text "Cont: " <+> ppr cont])] {- Note [Trying rewrite rules] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider an application (f e1 e2 e3) where the e1,e2,e3 are not yet simplified. We want to simplify enough arguments to allow the rules to apply, but it's more efficient to avoid simplifying e2,e3 if e1 alone is sufficient. Example: class ops (+) dNumInt e2 e3 If we rewrite ((+) dNumInt) to plusInt, we can take advantage of the latter's strictness when simplifying e2, e3. Moreover, suppose we have RULE f Int = \x. x True Then given (f Int e1) we rewrite to (\x. x True) e1 without simplifying e1. Now we can inline x into its unique call site, and absorb the True into it all in the same pass. If we simplified e1 first, we couldn't do that; see Note [Avoiding simplifying repeatedly]. So we try to apply rules if either (a) no_more_args: we've run out of argument that the rules can "see" (b) nr_wanted: none of the rules wants any more arguments Note [RULES apply to simplified arguments] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ It's very desirable to try RULES once the arguments have been simplified, because doing so ensures that rule cascades work in one pass. Consider {-# RULES g (h x) = k x f (k x) = x #-} ...f (g (h x))... Then we want to rewrite (g (h x)) to (k x) and only then try f's rules. If we match f's rules against the un-simplified RHS, it won't match. This makes a particularly big difference when superclass selectors are involved: op ($p1 ($p2 (df d))) We want all this to unravel in one sweep. Note [Rewrite rules and inlining] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In general we try to arrange that inlining is disabled (via a pragma) if a rewrite rule should apply, so that the rule has a decent chance to fire before we inline the function. But it turns out that (especially when type-class specialisation or SpecConstr is involved) it is very helpful for the the rewrite rule to "win" over inlining when both are active at once: see #21851, #22097. The simplifier arranges to do this, as follows. In effect, the ai_rewrite field of the ArgInfo record is the state of a little state-machine: * mkArgInfo sets the ai_rewrite field to TryRules if there are any rewrite rules avaialable for that function. * rebuildCall simplifies arguments until enough are simplified to match the rule with greatest arity. See Note [RULES apply to simplified arguments] and the first field of `TryRules`. But no more! As soon as we have simplified enough arguments to satisfy the maximum-arity rules, we try the rules; see Note [Trying rewrite rules]. * Once we have tried rules (or immediately if there are no rules) set ai_rewrite to TryInlining, and the Simplifier will try to inline the function. We want to try this immediately (before simplifying any (more) arguments). Why? Consider f BIG where f = \x{OneOcc}. ...x... If we inline `f` before simplifying `BIG` well use preInlineUnconditionally, and we'll simplify BIG once, at x's occurrence, rather than twice. * GHC.Core.Opt.Simplify.Utils. mkRewriteCall: if there are no rules, and no unfolding, we can skip both TryRules and TryInlining, which saves work. Note [Avoid redundant simplification] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Because RULES apply to simplified arguments, there's a danger of repeatedly simplifying already-simplified arguments. An important example is that of (>>=) d e1 e2 Here e1, e2 are simplified before the rule is applied, but don't really participate in the rule firing. So we mark them as Simplified to avoid re-simplifying them. Note [Shadowing in the Simplifier] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This part of the simplifier may return an expression that has shadowing. (See Note [Shadowing in Core] in GHC.Core.hs.) Consider f (...(\a -> e)...) (case y of (a,b) -> e') where f is strict in its second arg If we simplify the innermost one first we get (...(\a -> e)...) Simplifying the second arg makes us float the case out, so we end up with case y of (a,b) -> f (...(\a -> e)...) e' So the output does not have the no-shadowing invariant. However, there is no danger of getting name-capture, because when the first arg was simplified we used an in-scope set that at least mentioned all the variables free in its static environment, and that is enough. We can't just do innermost first, or we'd end up with a dual problem: case x of (a,b) -> f e (...(\a -> e')...) I spent hours trying to recover the no-shadowing invariant, but I just could not think of an elegant way to do it. The simplifier is already knee-deep in continuations. We have to keep the right in-scope set around; AND we have to get the effect that finding (error "foo") in a strict arg position will discard the entire application and replace it with (error "foo"). Getting all this at once is TOO HARD! See also Note [Shadowing in prepareAlts] in GHC.Core.Opt.Simplify.Utils. Note [No eta-expansion in runRW#] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When we see `runRW# (\s. blah)` we must not attempt to eta-expand that lambda. Why not? Because * `blah` can mention join points bound outside the runRW# * eta-expansion uses arityType, and * `arityType` cannot cope with free join Ids: So the simplifier spots the literal lambda, and simplifies inside it. It's a very special lambda, because it is the one the OccAnal spots and allows join points bound /outside/ to be called /inside/. See Note [No free join points in arityType] in GHC.Core.Opt.Arity ************************************************************************ * * Rewrite rules * * ************************************************************************ -} tryRules :: SimplEnv -> [CoreRule] -> Id -> [ArgSpec] -- In /normal, forward/ order -> SimplCont -> SimplM (Maybe (SimplEnv, CoreExpr, SimplCont)) tryRules env rules fn args call_cont | null rules = return Nothing | Just (rule, rule_rhs) <- lookupRule ropts (getUnfoldingInRuleMatch env) (activeRule (seMode env)) fn (argInfoAppArgs args) rules -- Fire a rule for the function = do { logger <- getLogger ; checkedTick (RuleFired (ruleName rule)) ; let cont' = pushSimplifiedArgs zapped_env (drop (ruleArity rule) args) call_cont -- (ruleArity rule) says how -- many args the rule consumed occ_anald_rhs = occurAnalyseExpr rule_rhs -- See Note [Occurrence-analyse after rule firing] ; dump logger rule rule_rhs ; return (Just (zapped_env, occ_anald_rhs, cont')) } -- The occ_anald_rhs and cont' are all Out things -- hence zapping the environment | otherwise -- No rule fires = do { logger <- getLogger ; nodump logger -- This ensures that an empty file is written ; return Nothing } where ropts = seRuleOpts env zapped_env = zapSubstEnv env -- See Note [zapSubstEnv] printRuleModule rule = parens (maybe (text "BUILTIN") (pprModuleName . moduleName) (ruleModule rule)) dump logger rule rule_rhs | logHasDumpFlag logger Opt_D_dump_rule_rewrites = log_rule Opt_D_dump_rule_rewrites "Rule fired" $ vcat [ text "Rule:" <+> ftext (ruleName rule) , text "Module:" <+> printRuleModule rule , text "Before:" <+> hang (ppr fn) 2 (sep (map ppr args)) , text "After: " <+> hang (pprCoreExpr rule_rhs) 2 (sep $ map ppr $ drop (ruleArity rule) args) , text "Cont: " <+> ppr call_cont ] | logHasDumpFlag logger Opt_D_dump_rule_firings = log_rule Opt_D_dump_rule_firings "Rule fired:" $ ftext (ruleName rule) <+> printRuleModule rule | otherwise = return () nodump logger | logHasDumpFlag logger Opt_D_dump_rule_rewrites = liftIO $ touchDumpFile logger Opt_D_dump_rule_rewrites | logHasDumpFlag logger Opt_D_dump_rule_firings = liftIO $ touchDumpFile logger Opt_D_dump_rule_firings | otherwise = return () log_rule flag hdr details = do { logger <- getLogger ; liftIO $ logDumpFile logger (mkDumpStyle alwaysQualify) flag "" FormatText $ sep [text hdr, nest 4 details] } trySeqRules :: SimplEnv -> OutExpr -> InExpr -- Scrutinee and RHS -> SimplCont -> SimplM (Maybe (SimplEnv, CoreExpr, SimplCont)) -- See Note [User-defined RULES for seq] trySeqRules in_env scrut rhs cont = do { rule_base <- getSimplRules ; tryRules in_env (getRules rule_base seqId) seqId out_args rule_cont } where no_cast_scrut = drop_casts scrut scrut_ty = exprType no_cast_scrut seq_id_ty = idType seqId -- forall r a (b::TYPE r). a -> b -> b res1_ty = piResultTy seq_id_ty rhs_rep -- forall a (b::TYPE rhs_rep). a -> b -> b res2_ty = piResultTy res1_ty scrut_ty -- forall (b::TYPE rhs_rep). scrut_ty -> b -> b res3_ty = piResultTy res2_ty rhs_ty -- scrut_ty -> rhs_ty -> rhs_ty res4_ty = funResultTy res3_ty -- rhs_ty -> rhs_ty rhs_ty = substTy in_env (exprType rhs) rhs_rep = getRuntimeRep rhs_ty out_args = [ TyArg { as_arg_ty = rhs_rep , as_hole_ty = seq_id_ty } , TyArg { as_arg_ty = scrut_ty , as_hole_ty = res1_ty } , TyArg { as_arg_ty = rhs_ty , as_hole_ty = res2_ty } , ValArg { as_arg = no_cast_scrut , as_dmd = seqDmd , as_hole_ty = res3_ty } ] rule_cont = ApplyToVal { sc_dup = NoDup, sc_arg = rhs , sc_env = in_env, sc_cont = cont , sc_hole_ty = res4_ty } -- Lazily evaluated, so we don't do most of this drop_casts (Cast e _) = drop_casts e drop_casts e = e {- Note [User-defined RULES for seq] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Given case (scrut |> co) of _ -> rhs look for rules that match the expression seq @t1 @t2 scrut where scrut :: t1 rhs :: t2 If you find a match, rewrite it, and apply to 'rhs'. Notice that we can simply drop casts on the fly here, which makes it more likely that a rule will match. See Note [User-defined RULES for seq] in GHC.Types.Id.Make. Note [Occurrence-analyse after rule firing] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ After firing a rule, we occurrence-analyse the instantiated RHS before simplifying it. Usually this doesn't make much difference, but it can be huge. Here's an example (simplCore/should_compile/T7785) map f (map f (map f xs) = -- Use build/fold form of map, twice map f (build (\cn. foldr (mapFB c f) n (build (\cn. foldr (mapFB c f) n xs)))) = -- Apply fold/build rule map f (build (\cn. (\cn. foldr (mapFB c f) n xs) (mapFB c f) n)) = -- Beta-reduce -- Alas we have no occurrence-analysed, so we don't know -- that c is used exactly once map f (build (\cn. let c1 = mapFB c f in foldr (mapFB c1 f) n xs)) = -- Use mapFB rule: mapFB (mapFB c f) g = mapFB c (f.g) -- We can do this because (mapFB c n) is a PAP and hence expandable map f (build (\cn. let c1 = mapFB c n in foldr (mapFB c (f.f)) n x)) This is not too bad. But now do the same with the outer map, and we get another use of mapFB, and t can interact with /both/ remaining mapFB calls in the above expression. This is stupid because actually that 'c1' binding is dead. The outer map introduces another c2. If there is a deep stack of maps we get lots of dead bindings, and lots of redundant work as we repeatedly simplify the result of firing rules. The easy thing to do is simply to occurrence analyse the result of the rule firing. Note that this occ-anals not only the RHS of the rule, but also the function arguments, which by now are OutExprs. E.g. RULE f (g x) = x+1 Call f (g BIG) --> (\x. x+1) BIG The rule binders are lambda-bound and applied to the OutExpr arguments (here BIG) which lack all internal occurrence info. Is this inefficient? Not really: we are about to walk over the result of the rule firing to simplify it, so occurrence analysis is at most a constant factor. Note, however, that the rule RHS is /already/ occ-analysed; see Note [OccInfo in unfoldings and rules] in GHC.Core. There is something unsatisfactory about doing it twice; but the rule RHS is usually very small, and this is simple. Note [Rules for recursive functions] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You might think that we shouldn't apply rules for a loop breaker: doing so might give rise to an infinite loop, because a RULE is rather like an extra equation for the function: RULE: f (g x) y = x+y Eqn: f a y = a-y But it's too drastic to disable rules for loop breakers. Even the foldr/build rule would be disabled, because foldr is recursive, and hence a loop breaker: foldr k z (build g) = g k z So it's up to the programmer: rules can cause divergence ************************************************************************ * * Rebuilding a case expression * * ************************************************************************ Note [Case elimination] ~~~~~~~~~~~~~~~~~~~~~~~ The case-elimination transformation discards redundant case expressions. Start with a simple situation: case x# of ===> let y# = x# in e y# -> e (when x#, y# are of primitive type, of course). We can't (in general) do this for algebraic cases, because we might turn bottom into non-bottom! The code in GHC.Core.Opt.Simplify.Utils.prepareAlts has the effect of generalise this idea to look for a case where we're scrutinising a variable, and we know that only the default case can match. For example: case x of 0# -> ... DEFAULT -> ...(case x of 0# -> ... DEFAULT -> ...) ... Here the inner case is first trimmed to have only one alternative, the DEFAULT, after which it's an instance of the previous case. This really only shows up in eliminating error-checking code. Note that GHC.Core.Opt.Simplify.Utils.mkCase combines identical RHSs. So case e of ===> case e of DEFAULT -> r True -> r False -> r Now again the case may be eliminated by the CaseElim transformation. This includes things like (==# a# b#)::Bool so that we simplify case ==# a# b# of { True -> x; False -> x } to just x This particular example shows up in default methods for comparison operations (e.g. in (>=) for Int.Int32) Note [Case to let transformation] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If a case over a lifted type has a single alternative, and is being used as a strict 'let' (all isDeadBinder bndrs), we may want to do this transformation: case e of r ===> let r = e in ...r... _ -> ...r... We treat the unlifted and lifted cases separately: * Unlifted case: 'e' satisfies exprOkForSpeculation (ok-for-spec is needed to satisfy the let-can-float invariant). This turns case a +# b of r -> ...r... into let r = a +# b in ...r... and thence .....(a +# b).... However, if we have case indexArray# a i of r -> ...r... we might like to do the same, and inline the (indexArray# a i). But indexArray# is not okForSpeculation, so we don't build a let in rebuildCase (lest it get floated *out*), so the inlining doesn't happen either. Annoying. * Lifted case: we need to be sure that the expression is already evaluated (exprIsHNF). If it's not already evaluated - we risk losing exceptions, divergence or user-specified thunk-forcing - even if 'e' is guaranteed to converge, we don't want to create a thunk (call by need) instead of evaluating it right away (call by value) However, we can turn the case into a /strict/ let if the 'r' is used strictly in the body. Then we won't lose divergence; and we won't build a thunk because the let is strict. See also Note [Case-to-let for strictly-used binders] Note [Case-to-let for strictly-used binders] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If we have this: case of r { _ -> ..r.. } where 'r' is used strictly in (..r..), we /could/ safely transform to let r = in ...r... As a special case, we have a plain `seq` like case r of r1 { _ -> ...r1... } where `r` is used strictly, we /could/ simply drop the `case` to get ...r.... HOWEVER, there are some serious downsides to this transformation, so GHC doesn't do it any longer (#24251): * Suppose the Simplifier sees case x of y* { __DEFAULT -> let z = case y of { __DEFAULT -> expr } in z+1 } The "y*" means "y is used strictly in its scope. Now we may: - Eliminate the inner case because `y` is evaluated. Now the demand-info on `y` is not right, because `y` is no longer used strictly in its scope. But it is hard to spot that without doing a new demand analysis. So there is a danger that we will subsequently: - Eliminate the outer case because `y` is used strictly Yikes! We can't eliminate both! * It introduces space leaks (#24251). Consider go 0 where go x = x `seq` go (x + 1) It is an infinite loop, true, but it should not leak space. Yet if we drop the `seq`, it will. Another great example is #21741. * Dropping the outer `case` can change the error behaviour. For example, we might transform case x of { _ -> error "bad" } --> error "bad" which is might be puzzling if 'x' currently lambda-bound, but later gets let-bound to (error "good"). Tht is OK accoring to the paper "A semantics for imprecise exceptions", but see #8900 for an example where the loss of this transformation bit us in practice. * If we have (case e of x -> f x), where `f` is strict, then it looks as if `x` is strictly used, and we could soundly transform to let x = e in f x But if f's strictness info got worse (which can happen in in obscure cases; see #21392) then we might have turned a non-thunk into a thunk! Bad. Lacking this "drop-strictly-used-seq" transformation means we can end up with some redundant-looking evals. For example, consider f x y = case x of DEFAULT -> -- A redundant-looking eval case y of True -> case x of { Nothing -> False; Just z -> z } False -> case x of { Nothing -> True; Just z -> z } That outer eval will be retained right through to code generation. But, perhaps surprisingly, that is probably a /good/ thing: Key point: those inner (case x) expressions will be compiled a simple 'if', because the code generator can see that `x` is, at those points, evaluated and properly tagged. If we dropped the outer eval, both the inner (case x) expressions would need to do a proper eval, pushing a return address, with an info table. See the example in #15631 where, in the Description, the (case ys) will be a simple multi-way jump. In fact (#24251), when I stopped GHC implementing the drop-strictly-used-seqs transformation, binary sizes fell by 1%, and a few programs actually allocated less and ran faster. A case in point is nofib/imaginary/digits-of-e2. (I'm not sure exactly why it improves so much, though.) Slightly related: Note [Empty case alternatives] in GHC.Core. Historical notes: There have been various earlier versions of this patch: * By Sept 18 the code looked like this: || scrut_is_demanded_var scrut scrut_is_demanded_var :: CoreExpr -> Bool scrut_is_demanded_var (Cast s _) = scrut_is_demanded_var s scrut_is_demanded_var (Var _) = isStrUsedDmd (idDemandInfo case_bndr) scrut_is_demanded_var _ = False This only fired if the scrutinee was a /variable/, which seems an unnecessary restriction. So in #15631 I relaxed it to allow arbitrary scrutinees. Less code, less to explain -- but the change had 0.00% effect on nofib. * Previously, in Jan 13 the code looked like this: || case_bndr_evald_next rhs case_bndr_evald_next :: CoreExpr -> Bool -- See Note [Case binder next] case_bndr_evald_next (Var v) = v == case_bndr case_bndr_evald_next (Cast e _) = case_bndr_evald_next e case_bndr_evald_next (App e _) = case_bndr_evald_next e case_bndr_evald_next (Case e _ _ _) = case_bndr_evald_next e case_bndr_evald_next _ = False This patch was part of fixing #7542. See also Note [Eta reduction soundness], criterion (E) in GHC.Core.Utils.) Further notes about case elimination ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider: test :: Integer -> IO () test = print Turns out that this compiles to: Print.test = \ eta :: Integer eta1 :: Void# -> case PrelNum.< eta PrelNum.zeroInteger of wild { __DEFAULT -> case hPutStr stdout (PrelNum.jtos eta ($w[] @ Char)) eta1 of wild1 { (# new_s, a4 #) -> PrelIO.lvl23 new_s }} Notice the strange '<' which has no effect at all. This is a funny one. It started like this: f x y = if x < 0 then jtos x else if y==0 then "" else jtos x At a particular call site we have (f v 1). So we inline to get if v < 0 then jtos x else if 1==0 then "" else jtos x Now simplify the 1==0 conditional: if v<0 then jtos v else jtos v Now common-up the two branches of the case: case (v<0) of DEFAULT -> jtos v Why don't we drop the case? Because it's strict in v. It's technically wrong to drop even unnecessary evaluations, and in practice they may be a result of 'seq' so we *definitely* don't want to drop those. I don't really know how to improve this situation. Note [FloatBinds from constructor wrappers] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If we have FloatBinds coming from the constructor wrapper (as in Note [exprIsConApp_maybe on data constructors with wrappers]), we cannot float past them. We'd need to float the FloatBind together with the simplify floats, unfortunately the simplifier doesn't have case-floats. The simplest thing we can do is to wrap all the floats here. The next iteration of the simplifier will take care of all these cases and lets. Given data T = MkT !Bool, this allows us to simplify case $WMkT b of { MkT x -> f x } to case b of { b' -> f b' }. We could try and be more clever (like maybe wfloats only contain let binders, so we could float them). But the need for the extra complication is not clear. Note [Do not duplicate constructor applications] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider this (#20125) let x = (a,b) in ...(case x of x' -> blah)...x...x... We want that `case` to vanish (since `x` is bound to a data con) leaving let x = (a,b) in ...(let x'=x in blah)...x..x... In rebuildCase, `exprIsConApp_maybe` will succeed on the scrutinee `x`, since is bound to (a,b). But in eliminating the case, if the scrutinee is trivial, we want to bind the case-binder to the scrutinee, /not/ to the constructor application. Hence the case_bndr_rhs in rebuildCase. This applies equally to a non-DEFAULT case alternative, say let x = (a,b) in ...(case x of x' { (p,q) -> blah })... This variant is handled by bind_case_bndr in knownCon. We want to bind x' to x, and not to a duplicated (a,b)). -} --------------------------------------------------------- -- Eliminate the case if possible rebuildCase, reallyRebuildCase :: SimplEnv -> OutExpr -- Scrutinee -> InId -- Case binder -> [InAlt] -- Alternatives (increasing order) -> SimplCont -> SimplM (SimplFloats, OutExpr) -------------------------------------------------- -- 1. Eliminate the case if there's a known constructor -------------------------------------------------- rebuildCase env scrut case_bndr alts cont | Lit lit <- scrut -- No need for same treatment as constructors -- because literals are inlined more vigorously , not (litIsLifted lit) = do { tick (KnownBranch case_bndr) ; case findAlt (LitAlt lit) alts of Nothing -> missingAlt env case_bndr alts cont Just (Alt _ bs rhs) -> simple_rhs env [] scrut bs rhs } | Just (in_scope', wfloats, con, ty_args, other_args) <- exprIsConApp_maybe (getUnfoldingInRuleMatch env) scrut -- Works when the scrutinee is a variable with a known unfolding -- as well as when it's an explicit constructor application , let env0 = setInScopeSet env in_scope' = do { tick (KnownBranch case_bndr) ; let scaled_wfloats = map scale_float wfloats -- case_bndr_unf: see Note [Do not duplicate constructor applications] case_bndr_rhs | exprIsTrivial scrut = scrut | otherwise = con_app con_app = Var (dataConWorkId con) `mkTyApps` ty_args `mkApps` other_args ; case findAlt (DataAlt con) alts of Nothing -> missingAlt env0 case_bndr alts cont Just (Alt DEFAULT bs rhs) -> simple_rhs env0 scaled_wfloats case_bndr_rhs bs rhs Just (Alt _ bs rhs) -> knownCon env0 scrut scaled_wfloats con ty_args other_args case_bndr bs rhs cont } where simple_rhs env wfloats case_bndr_rhs bs rhs = assert (null bs) $ do { (floats1, env') <- simplAuxBind "rebuildCase" env case_bndr case_bndr_rhs -- scrut is a constructor application, -- hence satisfies let-can-float invariant ; (floats2, expr') <- simplExprF env' rhs cont ; case wfloats of [] -> return (floats1 `addFloats` floats2, expr') _ -> return -- See Note [FloatBinds from constructor wrappers] ( emptyFloats env, GHC.Core.Make.wrapFloats wfloats $ wrapFloats (floats1 `addFloats` floats2) expr' )} -- This scales case floats by the multiplicity of the continuation hole (see -- Note [Scaling in case-of-case]). Let floats are _not_ scaled, because -- they are aliases anyway. scale_float (GHC.Core.Make.FloatCase scrut case_bndr con vars) = let scale_id id = scaleVarBy holeScaling id in GHC.Core.Make.FloatCase scrut (scale_id case_bndr) con (map scale_id vars) scale_float f = f holeScaling = contHoleScaling cont `mkMultMul` idMult case_bndr -- We are in the following situation -- case[p] case[q] u of { D x -> C v } of { C x -> w } -- And we are producing case[??] u of { D x -> w[x\v]} -- -- What should the multiplicity `??` be? In order to preserve the usage of -- variables in `u`, it needs to be `pq`. -- -- As an illustration, consider the following -- case[Many] case[1] of { C x -> C x } of { C x -> (x, x) } -- Where C :: A %1 -> T is linear -- If we were to produce a case[1], like the inner case, we would get -- case[1] of { C x -> (x, x) } -- Which is ill-typed with respect to linearity. So it needs to be a -- case[Many]. -------------------------------------------------- -- 2. Eliminate the case if scrutinee is evaluated -------------------------------------------------- rebuildCase env scrut case_bndr alts@[Alt _ bndrs rhs] cont -- See if we can get rid of the case altogether -- See Note [Case elimination] -- mkCase made sure that if all the alternatives are equal, -- then there is now only one (DEFAULT) rhs -- 2a. Dropping the case altogether, if -- a) it binds nothing (so it's really just a 'seq') -- b) evaluating the scrutinee has no side effects | is_plain_seq , exprOkToDiscard scrut -- The entire case is dead, so we can drop it -- if the scrutinee converges without having imperative -- side effects or raising a Haskell exception = simplExprF env rhs cont -- 2b. Turn the case into a let, if -- a) it binds only the case-binder -- b) unlifted case: the scrutinee is ok-for-speculation -- lifted case: the scrutinee is in HNF (or will later be demanded) -- See Note [Case to let transformation] | all_dead_bndrs , doCaseToLet scrut case_bndr = do { tick (CaseElim case_bndr) ; (floats1, env') <- simplAuxBind "rebuildCaseAlt1" env case_bndr scrut ; (floats2, expr') <- simplExprF env' rhs cont ; return (floats1 `addFloats` floats2, expr') } -- 2c. Try the seq rules if -- a) it binds only the case binder -- b) a rule for seq applies -- See Note [User-defined RULES for seq] in GHC.Types.Id.Make | is_plain_seq = do { mb_rule <- trySeqRules env scrut rhs cont ; case mb_rule of Just (env', rule_rhs, cont') -> simplExprF env' rule_rhs cont' Nothing -> reallyRebuildCase env scrut case_bndr alts cont } -------------------------------------------------- -- 3. Primop-related case-rules -------------------------------------------------- |Just (scrut', case_bndr', alts') <- caseRules2 scrut case_bndr alts = reallyRebuildCase env scrut' case_bndr' alts' cont where all_dead_bndrs = all isDeadBinder bndrs -- bndrs are [InId] is_plain_seq = all_dead_bndrs && isDeadBinder case_bndr -- Evaluation *only* for effect rebuildCase env scrut case_bndr alts cont = reallyRebuildCase env scrut case_bndr alts cont doCaseToLet :: OutExpr -- Scrutinee -> InId -- Case binder -> Bool -- The situation is case scrut of b { DEFAULT -> body } -- Can we transform thus? let { b = scrut } in body doCaseToLet scrut case_bndr | isTyCoVar case_bndr -- Respect GHC.Core = isTyCoArg scrut -- Note [Core type and coercion invariant] | isUnliftedType (exprType scrut) -- We can call isUnliftedType here: scrutinees always have a fixed RuntimeRep (see FRRCase). -- Note however that we must check 'scrut' (which is an 'OutExpr') and not 'case_bndr' -- (which is an 'InId'): see Note [Dark corner with representation polymorphism]. -- Using `exprType` is typically cheap because `scrut` is typically a variable. -- We could instead use mightBeUnliftedType (idType case_bndr), but that hurts -- the brain more. Consider that if this test ever turns out to be a perf -- problem (which seems unlikely). = exprOkForSpeculation scrut | otherwise -- Scrut has a lifted type = exprIsHNF scrut -- || isStrUsedDmd (idDemandInfo case_bndr) -- We no longer look at the demand on the case binder -- See Note [Case-to-let for strictly-used binders] -------------------------------------------------- -- 3. Catch-all case -------------------------------------------------- reallyRebuildCase env scrut case_bndr alts cont | not (seCaseCase env) -- Only when case-of-case is on. -- See GHC.Driver.Config.Core.Opt.Simplify -- Note [Case-of-case and full laziness] = do { case_expr <- simplAlts env scrut case_bndr alts (mkBoringStop (contHoleType cont)) ; rebuild env case_expr cont } | otherwise = do { (floats, env', cont') <- mkDupableCaseCont env alts cont ; case_expr <- simplAlts env' scrut (scaleIdBy holeScaling case_bndr) (scaleAltsBy holeScaling alts) cont' ; return (floats, case_expr) } where holeScaling = contHoleScaling cont -- Note [Scaling in case-of-case] {- simplCaseBinder checks whether the scrutinee is a variable, v. If so, try to eliminate uses of v in the RHSs in favour of case_bndr; that way, there's a chance that v will now only be used once, and hence inlined. Historical note: we use to do the "case binder swap" in the Simplifier so there were additional complications if the scrutinee was a variable. Now the binder-swap stuff is done in the occurrence analyser; see "GHC.Core.Opt.OccurAnal" Note [Binder swap]. Note [knownCon occ info] ~~~~~~~~~~~~~~~~~~~~~~~~ If the case binder is not dead, then neither are the pattern bound variables: case of x { (a,b) -> case x of { (p,q) -> p } } Here (a,b) both look dead, but come alive after the inner case is eliminated. The point is that we bring into the envt a binding let x = (a,b) after the outer case, and that makes (a,b) alive. At least we do unless the case binder is guaranteed dead. Note [DataAlt occ info] ~~~~~~~~~~~~~~~~~~~~~~~ Our general goal is to preserve dead-ness occ-info on the field binders of a case alternative. Why? It's generally a good idea, but one specific reason is to support (SEQ4) of Note [seq# magic]. But we have to be careful: even if the field binder is not mentioned in the case alternative and thus annotated IAmDead by OccurAnal, it might "come back to life" in one of two ways: 1. If the case binder is alive, its unfolding might bring back the field binder, as in Note [knownCon occ info]: case blah of y { I# _ -> $wf (case y of I# v -> v) } ==> case blah of y { I# v -> $wf v } 2. Even if the case binder appears to be dead, there is the scenario in Note [Add unfolding for scrutinee], in which the fields come back to live through the unfolding of variable scrutinee, as follows: join j = case x of Just v -> blah v; Nothing -> ... in case x of Just _ -> jump j; Nothing -> ... ==> { inline j, unfold x to Just v, simplify } join j = case x of Just v -> blah v; Nothing -> ... in case x of Just v -> blah v; Nothing -> ... Thus, when we are simply reconstructing a case (the common case), and the case binder is not dead, or the scrutinee is a variable, we zap the occurrence info on DataAlt field binders. See `adjustFieldOccInfo`. Note [Improving seq] ~~~~~~~~~~~~~~~~~~~~ Consider type family F :: * -> * type instance F Int = Int We'd like to transform case e of (x :: F Int) { DEFAULT -> rhs } ===> case e `cast` co of (x'::Int) I# x# -> let x = x' `cast` sym co in rhs so that 'rhs' can take advantage of the form of x'. Notice that Note [Case of cast] (in OccurAnal) may then apply to the result. We'd also like to eliminate empty types (#13468). So if data Void type instance F Bool = Void then we'd like to transform case (x :: F Bool) of { _ -> error "urk" } ===> case (x |> co) of (x' :: Void) of {} Nota Bene: we used to have a built-in rule for 'seq' that dropped casts, so that case (x |> co) of { _ -> blah } dropped the cast; in order to improve the chances of trySeqRules firing. But that works in the /opposite/ direction to Note [Improving seq] so there's a danger of flip/flopping. Better to make trySeqRules insensitive to the cast, which is now is. The need for [Improving seq] showed up in Roman's experiments. Example: foo :: F Int -> Int -> Int foo t n = t `seq` bar n where bar 0 = 0 bar n = bar (n - case t of TI i -> i) Here we'd like to avoid repeated evaluating t inside the loop, by taking advantage of the `seq`. At one point I did transformation in LiberateCase, but it's more robust here. (Otherwise, there's a danger that we'll simply drop the 'seq' altogether, before LiberateCase gets to see it.) Note [Scaling in case-of-case] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When two cases commute, if done naively, the multiplicities will be wrong: case (case u of w[1] { (x[1], y[1]) } -> f x y) of w'[Many] { (z[Many], t[Many]) -> z } The multiplicities here, are correct, but if I perform a case of case: case u of w[1] { (x[1], y[1]) -> case f x y of w'[Many] of { (z[Many], t[Many]) -> z } } This is wrong! Using `f x y` inside a `case … of w'[Many]` means that `x` and `y` must have multiplicities `Many` not `1`! The correct solution is to make all the `1`-s be `Many`-s instead: case u of w[Many] { (x[Many], y[Many]) -> case f x y of w'[Many] of { (z[Many], t[Many]) -> z } } In general, when commuting two cases, the rule has to be: case (case … of x[p] {…}) of y[q] { … } ===> case … of x[p*q] { … case … of y[q] { … } } This is materialised, in the simplifier, by the fact that every time we simplify case alternatives with a continuation (the surrounded case (or more!)), we must scale the entire case we are simplifying, by a scaling factor which can be computed in the continuation (with function `contHoleScaling`). -} simplAlts :: SimplEnv -> OutExpr -- Scrutinee -> InId -- Case binder -> [InAlt] -- Non-empty -> SimplCont -> SimplM OutExpr -- Returns the complete simplified case expression simplAlts env0 scrut case_bndr alts cont' = do { traceSmpl "simplAlts" (vcat [ ppr case_bndr , text "cont':" <+> ppr cont' , text "in_scope" <+> ppr (seInScope env0) ]) ; (env1, case_bndr1) <- simplBinder env0 case_bndr ; let case_bndr2 = case_bndr1 `setIdUnfolding` evaldUnfolding env2 = modifyInScope env1 case_bndr2 -- See Note [Case binder evaluated-ness] fam_envs = seFamEnvs env0 ; (alt_env', scrut', case_bndr') <- improveSeq fam_envs env2 scrut case_bndr case_bndr2 alts ; (imposs_deflt_cons, in_alts) <- prepareAlts scrut' case_bndr alts -- NB: it's possible that the returned in_alts is empty: this is handled -- by the caller (rebuildCase) in the missingAlt function -- NB: pass case_bndr::InId, not case_bndr' :: OutId, to prepareAlts -- See Note [Shadowing in prepareAlts] in GHC.Core.Opt.Simplify.Utils ; alts' <- forM in_alts $ simplAlt alt_env' (Just scrut') imposs_deflt_cons case_bndr' (scrutOkForBinderSwap scrut) cont' ; let alts_ty' = contResultType cont' -- See Note [Avoiding space leaks in OutType] ; seqType alts_ty' `seq` mkCase (seMode env0) scrut' case_bndr' alts_ty' alts' } ------------------------------------ improveSeq :: (FamInstEnv, FamInstEnv) -> SimplEnv -> OutExpr -> InId -> OutId -> [InAlt] -> SimplM (SimplEnv, OutExpr, OutId) -- Note [Improving seq] improveSeq fam_envs env scrut case_bndr case_bndr1 [Alt DEFAULT _ _] | Just (Reduction co ty2) <- topNormaliseType_maybe fam_envs (idType case_bndr1) = do { case_bndr2 <- newId (fsLit "nt") ManyTy ty2 ; let rhs = DoneEx (Var case_bndr2 `Cast` mkSymCo co) NotJoinPoint env2 = extendIdSubst env case_bndr rhs ; return (env2, scrut `Cast` co, case_bndr2) } improveSeq _ env scrut _ case_bndr1 _ = return (env, scrut, case_bndr1) ------------------------------------ simplAlt :: SimplEnv -> Maybe OutExpr -- The scrutinee -> [AltCon] -- These constructors can't be present when -- matching the DEFAULT alternative -> OutId -- The case binder `bndr` -> BinderSwapDecision -- DoBinderSwap v co <==> scrut = Just (v |> co), -- add unfolding `v :-> bndr |> sym co` -> SimplCont -> InAlt -> SimplM OutAlt simplAlt env _scrut' imposs_deflt_cons case_bndr' bndr_swap' cont' (Alt DEFAULT bndrs rhs) = assert (null bndrs) $ do { let env' = addDefaultUnfoldings env case_bndr' bndr_swap' imposs_deflt_cons ; rhs' <- simplExprC env' rhs cont' ; return (Alt DEFAULT [] rhs') } simplAlt env _scrut' _ case_bndr' bndr_swap' cont' (Alt (LitAlt lit) bndrs rhs) = assert (null bndrs) $ do { let env' = addAltUnfoldings env case_bndr' bndr_swap' (Lit lit) ; rhs' <- simplExprC env' rhs cont' ; return (Alt (LitAlt lit) [] rhs') } simplAlt env scrut' _ case_bndr' bndr_swap' cont' (Alt (DataAlt con) vs rhs) = do { -- See Note [Adding evaluatedness info to pattern-bound variables] -- and Note [DataAlt occ info] ; let vs_with_info = adjustFieldsIdInfo scrut' case_bndr' bndr_swap' con vs -- Adjust evaluated-ness and occ-info flags before `simplBinders` -- because the latter extends the in-scope set, which propagates this -- adjusted info to use sites. ; (env', vs') <- simplBinders env vs_with_info -- Bind the case-binder to (con args) ; let inst_tys' = tyConAppArgs (idType case_bndr') con_app :: OutExpr con_app = mkConApp2 con inst_tys' vs' env'' = addAltUnfoldings env' case_bndr' bndr_swap' con_app ; rhs' <- simplExprC env'' rhs cont' ; return (Alt (DataAlt con) vs' rhs') } {- Note [Adding evaluatedness info to pattern-bound variables] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ addEvals records the evaluated-ness of the bound variables of a case pattern. This is *important*. Consider data T = T !Int !Int case x of { T a b -> T (a+1) b } We really must record that b is already evaluated so that we don't go and re-evaluate it when constructing the result. See Note [Data-con worker strictness] in GHC.Core.DataCon NB: simplLamBndrs preserves this eval info In addition to handling data constructor fields with !s, addEvals also records the fact that the result of seq# is always in WHNF. See Note [seq# magic] in GHC.Types.Id.Make. Example (#15226): case seq# v s of (# s', v' #) -> E we want the compiler to be aware that v' is in WHNF in E. Open problem: we don't record that v itself is in WHNF (and we can't do it here). The right thing is to do some kind of binder-swap; see #15226 for discussion. -} adjustFieldsIdInfo :: Maybe OutExpr -> OutId -> BinderSwapDecision -> DataCon -> [Id] -> [Id] -- See Note [Adding evaluatedness info to pattern-bound variables] -- and Note [DataAlt occ info] adjustFieldsIdInfo scrut case_bndr bndr_swap con vs -- Deal with seq# applications | Just scr <- scrut , isUnboxedTupleDataCon con , [s,x] <- vs -- Use stripNArgs rather than collectArgsTicks to avoid building -- a list of arguments only to throw it away immediately. , Just (Var f) <- stripNArgs 4 scr , f `hasKey` seqHashKey , let x' = setCaseBndrEvald MarkedStrict x = map (adjustFieldOccInfo case_bndr bndr_swap) [s, x'] -- Deal with banged datacon fields -- This case is quite allocation sensitive to T9233 which has a large record -- with strict fields. Hence we try not to update vs twice! adjustFieldsIdInfo _scrut case_bndr bndr_swap con vs | Nothing <- dataConWrapId_maybe con -- A common fast path; no need to allocate the_strs when they are all lazy -- anyway! It shaves off 2% in T9675 = map (adjustFieldOccInfo case_bndr bndr_swap) vs | otherwise = go vs the_strs where the_strs = dataConRepStrictness con go [] [] = [] go (v:vs') strs | isTyVar v = v : go vs' strs go (v:vs') (str:strs) = adjustFieldOccInfo case_bndr bndr_swap (setCaseBndrEvald str v) : go vs' strs go _ _ = pprPanic "Simplify.adjustFieldsIdInfo" (ppr con $$ ppr vs $$ ppr_with_length (map strdisp the_strs) $$ ppr_with_length (dataConRepArgTys con) $$ ppr_with_length (dataConRepStrictness con)) where ppr_with_length list = ppr list <+> parens (text "length =" <+> ppr (length list)) strdisp :: StrictnessMark -> SDoc strdisp MarkedStrict = text "MarkedStrict" strdisp NotMarkedStrict = text "NotMarkedStrict" adjustFieldOccInfo :: OutId -> BinderSwapDecision -> CoreBndr -> CoreBndr -- Kill occ info if we do binder swap and the case binder is alive; -- see Note [DataAlt occ info] adjustFieldOccInfo case_bndr bndr_swap field_bndr | isTyVar field_bndr = field_bndr | not (isDeadBinder case_bndr) -- (1) in the Note: If the case binder is alive, = zapIdOccInfo field_bndr -- the field binders might come back alive | DoBinderSwap{} <- bndr_swap -- (2) in the Note: If binder swap might take place, = zapIdOccInfo field_bndr -- the case binder might come back alive | otherwise = field_bndr -- otherwise the field binders stay dead addDefaultUnfoldings :: SimplEnv -> OutId -> BinderSwapDecision -> [AltCon] -> SimplEnv addDefaultUnfoldings env case_bndr bndr_swap imposs_deflt_cons = env2 where unf = mkOtherCon imposs_deflt_cons -- Record the constructors that the case-binder *can't* be. env1 = addBinderUnfolding env case_bndr unf env2 | DoBinderSwap v _mco <- bndr_swap = addBinderUnfolding env1 v unf | otherwise = env1 addAltUnfoldings :: SimplEnv -> OutId -> BinderSwapDecision -> OutExpr -> SimplEnv addAltUnfoldings env case_bndr bndr_swap con_app = env2 where con_app_unf = mk_simple_unf con_app env1 = addBinderUnfolding env case_bndr con_app_unf -- See Note [Add unfolding for scrutinee] env2 | DoBinderSwap v mco <- bndr_swap = addBinderUnfolding env1 v $ if isReflMCo mco -- isReflMCo: avoid calling mk_simple_unf then con_app_unf -- twice in the common case else mk_simple_unf (mkCastMCo con_app mco) | otherwise = env1 -- Force the opts, so that the whole SimplEnv isn't retained !opts = seUnfoldingOpts env mk_simple_unf = mkSimpleUnfolding opts addBinderUnfolding :: SimplEnv -> Id -> Unfolding -> SimplEnv addBinderUnfolding env bndr unf | debugIsOn, Just tmpl <- maybeUnfoldingTemplate unf = warnPprTrace (not (eqType (idType bndr) (exprType tmpl))) "unfolding type mismatch" (ppr bndr $$ ppr (idType bndr) $$ ppr tmpl $$ ppr (exprType tmpl)) $ modifyInScope env (bndr `setIdUnfolding` unf) | otherwise = modifyInScope env (bndr `setIdUnfolding` unf) zapBndrOccInfo :: Bool -> Id -> Id -- Consider case e of b { (a,b) -> ... } -- Then if we bind b to (a,b) in "...", and b is not dead, -- then we must zap the deadness info on a,b zapBndrOccInfo keep_occ_info pat_id | keep_occ_info = pat_id | otherwise = zapIdOccInfo pat_id {- Note [Case binder evaluated-ness] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We pin on a (OtherCon []) unfolding to the case-binder of a Case, even though it'll be over-ridden in every case alternative with a more informative unfolding. Why? Because suppose a later, less clever, pass simply replaces all occurrences of the case binder with the binder itself; then Lint may complain about the let-can-float invariant. Example case e of b { DEFAULT -> let v = reallyUnsafePtrEquality# b y in .... ; K -> blah } The let-can-float invariant requires that y is evaluated in the call to reallyUnsafePtrEquality#, which it is. But we still want that to be true if we propagate binders to occurrences. This showed up in #13027. Note [Add unfolding for scrutinee] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In general it's unlikely that a variable scrutinee will appear in the case alternatives case x of { ...x unlikely to appear... } because the binder-swap in OccurAnal has got rid of all such occurrences See Note [Binder swap] in "GHC.Core.Opt.OccurAnal". BUT it is still VERY IMPORTANT to add a suitable unfolding for a variable scrutinee, in simplAlt. Here's why case x of y (a,b) -> case b of c I# v -> ...(f y)... There is no occurrence of 'b' in the (...(f y)...). But y gets the unfolding (a,b), and *that* mentions b. If f has a RULE RULE f (p, I# q) = ... we want that rule to match, so we must extend the in-scope env with a suitable unfolding for 'y'. It's *essential* for rule matching; but it's also good for case-elimination -- suppose that 'f' was inlined and did multi-level case analysis, then we'd solve it in one simplifier sweep instead of two. HOWEVER, given case x of y { Just a -> r1; Nothing -> r2 } we do not want to add the unfolding x -> y to 'x', which might seem cool, since 'y' itself has different unfoldings in r1 and r2. Reason: if we did that, we'd have to zap y's deadness info and that is a very useful piece of information. So instead we add the unfolding x -> Just a, and x -> Nothing in the respective RHSs. Since this transformation is tantamount to a binder swap, we use GHC.Core.Opt.OccurAnal.scrutOkForBinderSwap to do the check. Exactly the same issue arises in GHC.Core.Opt.SpecConstr; see Note [Add scrutinee to ValueEnv too] in GHC.Core.Opt.SpecConstr ************************************************************************ * * \subsection{Known constructor} * * ************************************************************************ We are a bit careful with occurrence info. Here's an example (\x* -> case x of (a*, b) -> f a) (h v, e) where the * means "occurs once". This effectively becomes case (h v, e) of (a*, b) -> f a) and then let a* = h v; b = e in f a and then f (h v) All this should happen in one sweep. -} knownCon :: SimplEnv -> OutExpr -- The scrutinee -> [FloatBind] -> DataCon -> [OutType] -> [OutExpr] -- The scrutinee (in pieces) -> InId -> [InBndr] -> InExpr -- The alternative -> SimplCont -> SimplM (SimplFloats, OutExpr) knownCon env scrut dc_floats dc dc_ty_args dc_args bndr bs rhs cont = do { (floats1, env1) <- bind_args env bs dc_args ; (floats2, env2) <- bind_case_bndr env1 ; (floats3, expr') <- simplExprF env2 rhs cont ; case dc_floats of [] -> return (floats1 `addFloats` floats2 `addFloats` floats3, expr') _ -> return ( emptyFloats env -- See Note [FloatBinds from constructor wrappers] , GHC.Core.Make.wrapFloats dc_floats $ wrapFloats (floats1 `addFloats` floats2 `addFloats` floats3) expr') } where zap_occ = zapBndrOccInfo (isDeadBinder bndr) -- bndr is an InId -- Ugh! bind_args env' [] _ = return (emptyFloats env', env') bind_args env' (b:bs') (Type ty : args) = assert (isTyVar b ) bind_args (extendTvSubst env' b ty) bs' args bind_args env' (b:bs') (Coercion co : args) = assert (isCoVar b ) bind_args (extendCvSubst env' b co) bs' args bind_args env' (b:bs') (arg : args) = assert (isId b) $ do { let b' = zap_occ b -- zap_occ: the binder might be "dead", because it doesn't -- occur in the RHS; and simplAuxBind may therefore discard it. -- Nevertheless we must keep it if the case-binder is alive, -- because it may be used in the con_app. See Note [knownCon occ info] ; (floats1, env2) <- simplAuxBind "knownCon" env' b' arg -- arg satisfies let-can-float invariant ; (floats2, env3) <- bind_args env2 bs' args ; return (floats1 `addFloats` floats2, env3) } bind_args _ _ _ = pprPanic "bind_args" $ ppr dc $$ ppr bs $$ ppr dc_args $$ text "scrut:" <+> ppr scrut -- It's useful to bind bndr to scrut, rather than to a fresh -- binding x = Con arg1 .. argn -- because very often the scrut is a variable, so we avoid -- creating, and then subsequently eliminating, a let-binding -- BUT, if scrut is a not a variable, we must be careful -- about duplicating the arg redexes; in that case, make -- a new con-app from the args bind_case_bndr env | isDeadBinder bndr = return (emptyFloats env, env) | exprIsTrivial scrut = return (emptyFloats env , extendIdSubst env bndr (DoneEx scrut NotJoinPoint)) -- See Note [Do not duplicate constructor applications] | otherwise = do { dc_args <- mapM (simplVar env) bs -- dc_ty_args are already OutTypes, -- but bs are InBndrs ; let con_app = Var (dataConWorkId dc) `mkTyApps` dc_ty_args `mkApps` dc_args ; simplAuxBind "case-bndr" env bndr con_app } ------------------- missingAlt :: SimplEnv -> Id -> [InAlt] -> SimplCont -> SimplM (SimplFloats, OutExpr) -- This isn't strictly an error, although it is unusual. -- It's possible that the simplifier might "see" that -- an inner case has no accessible alternatives before -- it "sees" that the entire branch of an outer case is -- inaccessible. So we simply put an error case here instead. missingAlt env case_bndr _ cont = warnPprTrace True "missingAlt" (ppr case_bndr) $ -- See Note [Avoiding space leaks in OutType] let cont_ty = contResultType cont in seqType cont_ty `seq` return (emptyFloats env, mkImpossibleExpr cont_ty "Simplify.Iteration.missingAlt") {- ************************************************************************ * * \subsection{Duplicating continuations} * * ************************************************************************ Consider let x* = case e of { True -> e1; False -> e2 } in b where x* is a strict binding. Then mkDupableCont will be given the continuation case [] of { True -> e1; False -> e2 } ; let x* = [] in b ; stop and will split it into dupable: case [] of { True -> $j1; False -> $j2 } ; stop join floats: $j1 = e1, $j2 = e2 non_dupable: let x* = [] in b; stop Putting this back together would give let x* = let { $j1 = e1; $j2 = e2 } in case e of { True -> $j1; False -> $j2 } in b (Of course we only do this if 'e' wants to duplicate that continuation.) Note how important it is that the new join points wrap around the inner expression, and not around the whole thing. In contrast, any let-bindings introduced by mkDupableCont can wrap around the entire thing. Note [Bottom alternatives] ~~~~~~~~~~~~~~~~~~~~~~~~~~ When we have case (case x of { A -> error .. ; B -> e; C -> error ..) of alts then we can just duplicate those alts because the A and C cases will disappear immediately. This is more direct than creating join points and inlining them away. See #4930. -} -------------------- mkDupableCaseCont :: SimplEnv -> [InAlt] -> SimplCont -> SimplM ( SimplFloats -- Join points (if any) , SimplEnv -- Use this for the alts , SimplCont) mkDupableCaseCont env alts cont | altsWouldDup alts = do { (floats, cont) <- mkDupableCont env cont ; let env' = bumpCaseDepth $ env `setInScopeFromF` floats ; return (floats, env', cont) } | otherwise = return (emptyFloats env, env, cont) altsWouldDup :: [InAlt] -> Bool -- True iff strictly > 1 non-bottom alternative altsWouldDup [] = False -- See Note [Bottom alternatives] altsWouldDup [_] = False altsWouldDup (alt:alts) | is_bot_alt alt = altsWouldDup alts | otherwise = not (all is_bot_alt alts) -- otherwise case: first alt is non-bot, so all the rest must be bot where is_bot_alt (Alt _ _ rhs) = exprIsDeadEnd rhs ------------------------- mkDupableCont :: SimplEnv -> SimplCont -> SimplM ( SimplFloats -- Incoming SimplEnv augmented with -- extra let/join-floats and in-scope variables , SimplCont) -- dup_cont: duplicable continuation mkDupableCont env cont = mkDupableContWithDmds env (repeat topDmd) cont mkDupableContWithDmds :: SimplEnv -> [Demand] -- Demands on arguments; always infinite -> SimplCont -> SimplM ( SimplFloats, SimplCont) mkDupableContWithDmds env _ cont | contIsDupable cont = return (emptyFloats env, cont) mkDupableContWithDmds _ _ (Stop {}) = panic "mkDupableCont" -- Handled by previous eqn mkDupableContWithDmds env dmds (CastIt { sc_co = co, sc_opt = opt, sc_cont = cont }) = do { (floats, cont') <- mkDupableContWithDmds env dmds cont ; return (floats, CastIt { sc_co = optOutCoercion env co opt , sc_opt = True, sc_cont = cont' }) } -- optOutCoercion: see Note [Avoid re-simplifying coercions] -- Duplicating ticks for now, not sure if this is good or not mkDupableContWithDmds env dmds (TickIt t cont) = do { (floats, cont') <- mkDupableContWithDmds env dmds cont ; return (floats, TickIt t cont') } mkDupableContWithDmds env _ (StrictBind { sc_bndr = bndr, sc_body = body, sc_from = from_what , sc_env = se, sc_cont = cont}) -- See Note [Duplicating StrictBind] -- K[ let x = <> in b ] --> join j x = K[ b ] -- j <> = do { let sb_env = se `setInScopeFromE` env ; (sb_env1, bndr') <- simplBinder sb_env bndr ; (floats1, join_inner) <- simplNonRecBody sb_env1 from_what body cont -- No need to use mkDupableCont before simplNonRecBody; we -- use cont once here, and then share the result if necessary ; let join_body = wrapFloats floats1 join_inner res_ty = contResultType cont ; mkDupableStrictBind env bndr' join_body res_ty } mkDupableContWithDmds env _ (StrictArg { sc_fun = fun, sc_cont = cont , sc_fun_ty = fun_ty }) -- NB: sc_dup /= OkToDup; that is caught earlier by contIsDupable | isNothing (isDataConId_maybe (ai_fun fun)) -- isDataConId: see point (DJ4) of Note [Duplicating join points] , thumbsUpPlanA cont = -- Use Plan A of Note [Duplicating StrictArg] -- pprTrace "Using plan A" (ppr (ai_fun fun) $$ text "args" <+> ppr (ai_args fun) $$ text "cont" <+> ppr cont) $ do { let (_ : dmds) = ai_dmds fun ; (floats1, cont') <- mkDupableContWithDmds env dmds cont -- Use the demands from the function to add the right -- demand info on any bindings we make for further args ; (floats_s, args') <- mapAndUnzipM (makeTrivialArg env) (ai_args fun) ; return ( foldl' addLetFloats floats1 floats_s , StrictArg { sc_fun = fun { ai_args = args' } , sc_cont = cont' , sc_fun_ty = fun_ty , sc_dup = OkToDup} ) } | otherwise = -- Use Plan B of Note [Duplicating StrictArg] -- K[ f a b <> ] --> join j x = K[ f a b x ] -- j <> do { let rhs_ty = contResultType cont (m,arg_ty,_) = splitFunTy fun_ty ; arg_bndr <- newId (fsLit "arg") m arg_ty ; let env' = env `addNewInScopeIds` [arg_bndr] ; (floats, join_rhs) <- rebuildCall env' (addValArgTo fun (Var arg_bndr) fun_ty) cont ; mkDupableStrictBind env' arg_bndr (wrapFloats floats join_rhs) rhs_ty } where thumbsUpPlanA (StrictArg {}) = False thumbsUpPlanA (StrictBind {}) = True thumbsUpPlanA (Stop {}) = True thumbsUpPlanA (Select {}) = True thumbsUpPlanA (CastIt { sc_cont = k }) = thumbsUpPlanA k thumbsUpPlanA (TickIt _ k) = thumbsUpPlanA k thumbsUpPlanA (ApplyToVal { sc_cont = k }) = thumbsUpPlanA k thumbsUpPlanA (ApplyToTy { sc_cont = k }) = thumbsUpPlanA k mkDupableContWithDmds env dmds (ApplyToTy { sc_cont = cont, sc_arg_ty = arg_ty, sc_hole_ty = hole_ty }) = do { (floats, cont') <- mkDupableContWithDmds env dmds cont ; return (floats, ApplyToTy { sc_cont = cont' , sc_arg_ty = arg_ty, sc_hole_ty = hole_ty }) } mkDupableContWithDmds env dmds (ApplyToVal { sc_arg = arg, sc_dup = dup, sc_env = se , sc_cont = cont, sc_hole_ty = hole_ty }) = -- e.g. [...hole...] (...arg...) -- ==> -- let a = ...arg... -- in [...hole...] a -- NB: sc_dup /= OkToDup; that is caught earlier by contIsDupable do { let (dmd:cont_dmds) = dmds -- Never fails ; (floats1, cont') <- mkDupableContWithDmds env cont_dmds cont ; let env' = env `setInScopeFromF` floats1 ; (_, se', arg') <- simplLazyArg env' dup hole_ty Nothing se arg ; (let_floats2, arg'') <- makeTrivial env NotTopLevel dmd (fsLit "karg") arg' ; let all_floats = floats1 `addLetFloats` let_floats2 ; return ( all_floats , ApplyToVal { sc_arg = arg'' , sc_env = se' `setInScopeFromF` all_floats -- Ensure that sc_env includes the free vars of -- arg'' in its in-scope set, even if makeTrivial -- has turned arg'' into a fresh variable -- See Note [StaticEnv invariant] in GHC.Core.Opt.Simplify.Utils , sc_dup = OkToDup, sc_cont = cont' , sc_hole_ty = hole_ty }) } mkDupableContWithDmds env _ (Select { sc_bndr = case_bndr, sc_alts = alts, sc_env = se, sc_cont = cont }) = -- e.g. (case [...hole...] of { pi -> ei }) -- ===> -- let ji = \xij -> ei -- in case [...hole...] of { pi -> ji xij } -- NB: sc_dup /= OkToDup; that is caught earlier by contIsDupable do { tick (CaseOfCase case_bndr) ; (floats, alt_env, alt_cont) <- mkDupableCaseCont (se `setInScopeFromE` env) alts cont -- NB: We call mkDupableCaseCont here to make cont duplicable -- (if necessary, depending on the number of alts) -- And this is important: see Note [Fusing case continuations] ; let cont_scaling = contHoleScaling cont -- See Note [Scaling in case-of-case] ; (alt_env', case_bndr') <- simplBinder alt_env (scaleIdBy cont_scaling case_bndr) ; alts' <- forM (scaleAltsBy cont_scaling alts) $ simplAlt alt_env' Nothing [] case_bndr' NoBinderSwap alt_cont -- Safe to say that there are no handled-cons for the DEFAULT case -- NB: simplBinder does not zap deadness occ-info, so -- a dead case_bndr' will still advertise its deadness -- This is really important because in -- case e of b { (# p,q #) -> ... } -- b is always dead, and indeed we are not allowed to bind b to (# p,q #), -- which might happen if e was an explicit unboxed pair and b wasn't marked dead. -- In the new alts we build, we have the new case binder, so it must retain -- its deadness. -- NB: we don't use alt_env further; it has the substEnv for -- the alternatives, and we don't want that ; (join_floats, alts'') <- mapAccumLM (mkDupableAlt env case_bndr') emptyJoinFloats alts' ; let all_floats = floats `addJoinFloats` join_floats -- Note [Duplicated env] ; return (all_floats , Select { sc_dup = OkToDup , sc_bndr = case_bndr' , sc_alts = alts'' , sc_env = zapSubstEnv se `setInScopeFromF` all_floats -- See Note [StaticEnv invariant] in GHC.Core.Opt.Simplify.Utils , sc_cont = mkBoringStop (contResultType cont) } ) } mkDupableStrictBind :: SimplEnv -> OutId -> OutExpr -> OutType -> SimplM (SimplFloats, SimplCont) mkDupableStrictBind env arg_bndr join_rhs res_ty | uncondInlineJoin [arg_bndr] join_rhs -- See point (DJ2) of Note [Duplicating join points] = return (emptyFloats env , StrictBind { sc_bndr = arg_bndr , sc_body = join_rhs , sc_env = zapSubstEnv env , sc_from = FromLet -- See Note [StaticEnv invariant] in GHC.Core.Opt.Simplify.Utils , sc_dup = OkToDup , sc_cont = mkBoringStop res_ty } ) | otherwise = do { join_bndr <- newJoinId [arg_bndr] res_ty ; let arg_info = ArgInfo { ai_fun = join_bndr , ai_rewrite = TryNothing, ai_args = [] , ai_encl = False, ai_dmds = repeat topDmd , ai_discs = repeat 0 } ; return ( addJoinFloats (emptyFloats env) $ unitJoinFloat $ NonRec join_bndr $ Lam (setOneShotLambda arg_bndr) join_rhs , StrictArg { sc_dup = OkToDup , sc_fun = arg_info , sc_fun_ty = idType join_bndr , sc_cont = mkBoringStop res_ty } ) } mkDupableAlt :: SimplEnv -> OutId -> JoinFloats -> OutAlt -> SimplM (JoinFloats, OutAlt) mkDupableAlt _env case_bndr jfloats (Alt con alt_bndrs alt_rhs_in) | uncondInlineJoin alt_bndrs alt_rhs_in -- See point (DJ2) of Note [Duplicating join points] = return (jfloats, Alt con alt_bndrs alt_rhs_in) | otherwise = do { let rhs_ty' = exprType alt_rhs_in bangs | DataAlt c <- con = dataConRepStrictness c | otherwise = [] abstracted_binders = abstract_binders alt_bndrs bangs abstract_binders :: [Var] -> [StrictnessMark] -> [(Id,StrictnessMark)] abstract_binders [] [] -- Abstract over the case binder too if it's used. | isDeadBinder case_bndr = [] | otherwise = [(case_bndr,MarkedStrict)] abstract_binders (alt_bndr:alt_bndrs) marks -- Abstract over all type variables just in case | isTyVar alt_bndr = (alt_bndr,NotMarkedStrict) : abstract_binders alt_bndrs marks abstract_binders (alt_bndr:alt_bndrs) (mark:marks) -- The deadness info on the new Ids is preserved by simplBinders -- We don't abstract over dead ids here. | isDeadBinder alt_bndr = abstract_binders alt_bndrs marks | otherwise = (alt_bndr,mark) : abstract_binders alt_bndrs marks abstract_binders _ _ = pprPanic "abstrict_binders - failed to abstract" (ppr $ Alt con alt_bndrs alt_rhs_in) filtered_binders = map fst abstracted_binders -- We want to make any binder with an evaldUnfolding strict in the rhs. -- See Note [Call-by-value for worker args] (which also applies to join points) rhs_with_seqs = mkStrictFieldSeqs abstracted_binders alt_rhs_in final_args = varsToCoreExprs filtered_binders -- Note [Join point abstraction] -- We make the lambdas into one-shot-lambdas. The -- join point is sure to be applied at most once, and doing so -- prevents the body of the join point being floated out by -- the full laziness pass final_bndrs = map one_shot filtered_binders one_shot v | isId v = setOneShotLambda v | otherwise = v -- No lambda binder has an unfolding, but (currently) case binders can, -- so we must zap them here. join_rhs = mkLams (map zapIdUnfolding final_bndrs) rhs_with_seqs ; join_bndr <- newJoinId filtered_binders rhs_ty' ; let -- join_bndr_w_unf = join_bndr `setIdUnfolding` -- mkUnfolding uf_opts VanillaSrc False False join_rhs Nothing -- See Note [Do not add unfoldings to join points at birth] join_call = mkApps (Var join_bndr) final_args alt' = Alt con alt_bndrs join_call ; return ( jfloats `addJoinFlts` unitJoinFloat (NonRec join_bndr join_rhs) , alt') } -- See Note [Duplicated env] {- Note [Do not add unfoldings to join points at birth] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider this (#15360) case (case (case (case ...))) of Left x -> e1 Right y -> e2 We will make a join point for e1, e2, thus $j1a x = e1 $j1b y = e2 Now those join point calls count as "duplicable" , so we feel free to duplicate them into the loop nest. And each of those calls are then subject to callSiteInline, which might inline them, if e1, e2 are reasonably small. Now, if this applies recursive to the next `case` inwards, and so on, the net effect is that we can get an exponential number of calls to $j1a and $j1b, and an exponential number of inlinings (since each is done independently). This hit #15360 (not a complicated program!) badly. Our simple solution is this: when a join point is born, we don't give it an unfolding, so it will not be inlined at its call sites, at least not in that pass. So we end up with $j1a x = e1 $j1b y = e2 $j2a x = ...$j1a ... $j1b... $j2b x = ...$j1a ... $j1b... ... and so on... In the next iteration of the Simplifier we are into Note [Avoid inlining into deeply nested cases] in Simplify.Inline, which is still a challenge. But at least we have a chance. If we add inlinings at birth we never get that chance. Wrinkle (JU1) It turns out that the same problem shows up in a different guise, via Note [Post-inline for single-use things] in Simplify.Utils. I think we have something like case K (join $j x = in jblah) of K y{OneOcc} -> blah where $j is a freshly-born join point. After case-of-known-constructor wo we end up substituting (join $j x = in jblah) for `y` in `blah`; and thus we re-simplify that join binding. In test T15630 this results in massive duplication. So in `simplLetUnfolding` we spot this case a bit hackily; a freshly-born join point will have OccInfo of ManyOccs, unlike an existing join point which will have OneOcc. So in simplLetUnfolding we kill the unfolding of a freshly born join point. I can't quite articulate precisely why this is so important. But it makes a MASSIVE difference in T15630 (a fantastic test case); and at worst it'll merely delay inlining join points by one simplifier iteration. In effect (JU1) just extends the original Note [Do not add unfoldings to join points at birth] to occasions where we re-visit the same join-point in the same Simplifier iteration. Note [Fusing case continuations] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ It's important to fuse two successive case continuations when the first has one alternative. That's why we call prepareCaseCont here. Consider this, which arises from thunk splitting (see Note [Thunk splitting] in GHC.Core.Opt.WorkWrap): let x* = case (case v of {pn -> rn}) of I# a -> I# a in body The simplifier will find (Var v) with continuation Select (pn -> rn) ( Select [I# a -> I# a] ( StrictBind body Stop So we'll call mkDupableCont on Select [I# a -> I# a] (StrictBind body Stop) There is just one alternative in the first Select, so we want to simplify the rhs (I# a) with continuation (StrictBind body Stop) Supposing that body is big, we end up with let $j a = in case v of { pn -> case rn of I# a -> $j a } This is just what we want because the rn produces a box that the case rn cancels with. See #4957 a fuller example. Note [Duplicating join points] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In #19996 we discovered that we want to be really careful about inlining join points. Consider case (join $j x = K f x ) (in case v of ) ( p1 -> $j x1 ) of ( p2 -> $j x2 ) ( p3 -> $j x3 ) K g y -> blah[g,y] Here the join-point RHS is very small, just a constructor application (K f x). So we might inline it to get case (case v of ) ( p1 -> K f x1 ) of ( p2 -> K f x2 ) ( p3 -> K f x3 ) K g y -> blah[g,y] But now we have to make `blah` into a join point, /abstracted/ over `g` and `y`. In contrast, if we /don't/ inline $j we don't need a join point for `blah` and we'll get join $j x = let g=f, y=x in blah[g,y] in case v of p1 -> $j x1 p2 -> $j x2 p3 -> $j x3 This can make a /massive/ difference, because `blah` can see what `f` is, instead of lambda-abstracting over it. Beyond this, not-inlining join points reduces duplication. In the above example, if `blah` was small enough we'd inline it, but that duplicates code, for no gain. Best just to keep not-inline the join point in the first place. So not-inlining join points is our default: but see Note [Inlining join points] in GHC.Core.Opt.Simplify.Inline for when we /do/ inline them. To achieve this parsimonious inlining of join points, we need to do two things: (a) create a join point even if the RHS is small; and (b) don't do unconditional-inlining for join points. (DJ1) Do not postInlineUnconditionally a join point, ever. Doing postInlineUnconditionally is primarily to push allocation into cold branches; but a join point doesn't allocate, so that's a non-motivation. (DJ2) In mkDupableAlt and mkDupableStrictBind, generate an alterative for /all/ alternatives, /except/ for ones that will definitely inline unconditionally straight away. (In that case it's silly to make a join point in the first place; it just takes an extra Simplifier iteration to undo.) This choice is made by GHC.Core.Unfold.uncondInlineJoin. This plan generates a lot of join points, but makes them much more case-of-case friendly. (DJ3) When should `uncondInlineJoin` return True? * (exprIsTrivial rhs); this includes uses of unsafeEqualityProof etc; see the defn of exprIsTrivial. Also nullary constructors. * The RHS is a call ($j x y z), where the arguments are all trivial and $j is a join point: there is no point in creating an indirection. (DJ4) By the same token we want to use Plan B in Note [Duplicating StrictArg] when the RHS of the new join point is a data constructor application. See the call to isDataConId in the StrictArg case of mkDupableContWithDmds. That same Note [Duplicating StrictArg] explains why we sometimes want Plan A when the RHS of the new join point would be a non-data-constructor application (DJ5) You might worry that $j = K x y might look so small that it is inlined by the call site inliner, defeating (DJ3). But in fact - The UnfoldingGuidance for a join point is only UnfWhen (unconditional) if `uncondInlineJoin` is true; see GHC.Core.Unfold.uncondInline - `GHC.Core.Opt.Simplify.Inline.tryUnfolding` has a special case for join points, described Note [Inlining join points] in that module. Historical Note [Case binders and join points] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ NB: this entire Note is now irrelevant. In Jun 21 we stopped adding unfoldings to lambda binders (#17530). It was always a hack and bit us in multiple small and not-so-small ways Consider this case (case .. ) of c { I# c# -> ....c.... If we make a join point with c but not c# we get $j = \c -> ....c.... But if later inlining scrutinises the c, thus $j = \c -> ... case c of { I# y -> ... } ... we won't see that 'c' has already been scrutinised. This actually happens in the 'tabulate' function in wave4main, and makes a significant difference to allocation. An alternative plan is this: $j = \c# -> let c = I# c# in ...c.... but that is bad if 'c' is *not* later scrutinised. So instead we do both: we pass 'c' and 'c#' , and record in c's inlining (a stable unfolding) that it's really I# c#, thus $j = \c# -> \c[=I# c#] -> ...c.... Absence analysis may later discard 'c'. NB: take great care when doing strictness analysis; see Note [Lambda-bound unfoldings] in GHC.Core.Opt.DmdAnal. Also note that we can still end up passing stuff that isn't used. Before strictness analysis we have let $j x y c{=(x,y)} = (h c, ...) in ... After strictness analysis we see that h is strict, we end up with let $j x y c{=(x,y)} = ($wh x y, ...) and c is unused. Note [Duplicated env] ~~~~~~~~~~~~~~~~~~~~~ Some of the alternatives are simplified, but have not been turned into a join point So they *must* have a zapped subst-env. So we can't use completeNonRecX to bind the join point, because it might to do PostInlineUnconditionally, and we'd lose that when zapping the subst-env. We could have a per-alt subst-env, but zapping it (as we do in mkDupableCont, the Select case) is safe, and at worst delays the join-point inlining. Note [Funky mkLamTypes] ~~~~~~~~~~~~~~~~~~~~~~ Notice the funky mkLamTypes. If the constructor has existentials it's possible that the join point will be abstracted over type variables as well as term variables. Example: Suppose we have data T = forall t. C [t] Then faced with case (case e of ...) of C t xs::[t] -> rhs We get the join point let j :: forall t. [t] -> ... j = /\t \xs::[t] -> rhs in case (case e of ...) of C t xs::[t] -> j t xs Note [Duplicating StrictArg] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Dealing with making a StrictArg continuation duplicable has turned out to be one of the trickiest corners of the simplifier, giving rise to several cases in which the simplier expanded the program's size *exponentially*. They include #13253 exponential inlining #10421 ditto #18140 strict constructors #18282 another nested-function call case Suppose we have a call f e1 (case x of { True -> r1; False -> r2 }) e3 and f is strict in its second argument. Then we end up in mkDupableCont with a StrictArg continuation for (f e1 <> e3). There are two ways to make it duplicable. * Plan A: move the entire call inwards, being careful not to duplicate e1 or e3, thus: let a1 = e1 a3 = e3 in case x of { True -> f a1 r1 a3 ; False -> f a1 r2 a3 } * Plan B: make a join point: join $j x = f e1 x e3 in case x of { True -> jump $j r1 ; False -> jump $j r2 } Notice that Plan B is very like the way we handle strict bindings; see Note [Duplicating StrictBind]. And Plan B is exactly what we'd get if we turned use a case expression to evaluate the strict arg: case (case x of { True -> r1; False -> r2 }) of r -> f e1 r e3 So, looking at Note [Duplicating join points], we also want Plan B when `f` is a data constructor. Plan A is often good: * The calls to `f` may well be able to inline, since they are now applied to more informative arguments, `r1`, `r2`. For example: && E (case x of { T -> F; F -> T }) Pushing the call inward (being careful not to duplicate E) we get let a = E in case x of { T -> && a F; F -> && a T } and now the (&& a F) etc can optimise. * Moreover there might be a RULE for the function that can fire when it "sees" the particular case alternative. * More specialisation can happen. Here's an example from #3116 go (n+1) (case l of 1 -> bs' _ -> Chunk p fpc (o+1) (l-1) bs') If we pushed the entire call for 'go' inside the case, we get call-pattern specialisation for 'go', which is *crucial* for this particular program. But Plan A can have terrible, terrible behaviour. Here is a classic case: f (f (f (f (f True)))) Suppose f is strict, and has a body that is small enough to inline. The innermost call inlines (seeing the True) to give f (f (f (f (case v of { True -> e1; False -> e2 })))) Now, suppose we naively push the entire continuation into both case branches (it doesn't look large, just f.f.f.f). We get case v of True -> f (f (f (f e1))) False -> f (f (f (f e2))) And now the process repeats, so we end up with an exponentially large number of copies of f. No good! CONCLUSION: we want Plan A in general, but do Plan B is there a danger of this nested call behaviour. The function that decides this is called thumbsUpPlanA. Note [Keeping demand info in StrictArg Plan A] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Following on from Note [Duplicating StrictArg], another common code pattern that can go bad is this: f (case x1 of { T -> F; F -> T }) (case x2 of { T -> F; F -> T }) ...etc... when f is strict in all its arguments. (It might, for example, be a strict data constructor whose wrapper has not yet been inlined.) We use Plan A (because there is no nesting) giving let a2 = case x2 of ... a3 = case x3 of ... in case x1 of { T -> f F a2 a3 ... ; F -> f T a2 a3 ... } Now we must be careful! a2 and a3 are small, and the OneOcc code in postInlineUnconditionally may inline them both at both sites; see Note Note [Inline small things to avoid creating a thunk] in Simplify.Utils. But if we do inline them, the entire process will repeat -- back to exponential behaviour. So we are careful to keep the demand-info on a2 and a3. Then they'll be /strict/ let-bindings, which will be dealt with by StrictBind. That's why contIsDupableWithDmds is careful to propagage demand info to the auxiliary bindings it creates. See the Demand argument to makeTrivial. Note [Duplicating StrictBind] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We make a StrictBind duplicable in a very similar way to that for case expressions. After all, let x* = e in b is similar to case e of x -> b So we potentially make a join-point for the body, thus: let x = <> in b ==> join j x = b in j <> Just like StrictArg in fact -- and indeed they share code. Note [Join point abstraction] Historical note ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ NB: This note is now historical, describing how (in the past) we used to add a void argument to nullary join points. But now that "join point" is not a fuzzy concept but a formal syntactic construct (as distinguished by the JoinId constructor of IdDetails), each of these concerns is handled separately, with no need for a vestigial extra argument. Join points always have at least one value argument, for several reasons * If we try to lift a primitive-typed something out for let-binding-purposes, we will *caseify* it (!), with potentially-disastrous strictness results. So instead we turn it into a function: \v -> e where v::Void#. The value passed to this function is void, which generates (almost) no code. * CPR. We used to say "&& isUnliftedType rhs_ty'" here, but now we make the join point into a function whenever used_bndrs' is empty. This makes the join-point more CPR friendly. Consider: let j = if .. then I# 3 else I# 4 in case .. of { A -> j; B -> j; C -> ... } Now CPR doesn't w/w j because it's a thunk, so that means that the enclosing function can't w/w either, which is a lose. Here's the example that happened in practice: kgmod :: Int -> Int -> Int kgmod x y = if x > 0 && y < 0 || x < 0 && y > 0 then 78 else 5 * Let-no-escape. We want a join point to turn into a let-no-escape so that it is implemented as a jump, and one of the conditions for LNE is that it's not updatable. In CoreToStg, see Note [What is a non-escaping let] * Floating. Since a join point will be entered once, no sharing is gained by floating out, but something might be lost by doing so because it might be allocated. I have seen a case alternative like this: True -> \v -> ... It's a bit silly to add the realWorld dummy arg in this case, making $j = \s v -> ... True -> $j s (the \v alone is enough to make CPR happy) but I think it's rare There's a slight infelicity here: we pass the overall case_bndr to all the join points if it's used in *any* RHS, because we don't know its usage in each RHS separately ************************************************************************ * * Unfoldings * * ************************************************************************ -} simplLetUnfolding :: SimplEnv -> BindContext -> InId -> OutExpr -> OutType -> ArityType -> Unfolding -> SimplM Unfolding simplLetUnfolding env bind_cxt id new_rhs rhs_ty arity unf | isStableUnfolding unf = simplStableUnfolding env bind_cxt id rhs_ty arity unf | freshly_born_join_point id = -- This is a tricky one! -- See wrinkle (JU1) in Note [Do not add unfoldings to join points at birth] return noUnfolding | isExitJoinId id = -- See Note [Do not inline exit join points] in GHC.Core.Opt.Exitify return noUnfolding | otherwise = mkLetUnfolding env (bindContextLevel bind_cxt) VanillaSrc id is_join_point new_rhs where is_join_point = isJoinId id freshly_born_join_point id = is_join_point && isManyOccs (idOccInfo id) -- OLD: too_many_occs (OneOcc { occ_n_br = n }) = n > 10 -- See #23627 ------------------- mkLetUnfolding :: SimplEnv -> TopLevelFlag -> UnfoldingSource -> InId -> Bool -- True <=> this is a join point -> OutExpr -> SimplM Unfolding mkLetUnfolding env top_lvl src id is_join new_rhs = return (mkUnfolding uf_opts src is_top_lvl is_bottoming is_join new_rhs Nothing) -- We make an unfolding *even for loop-breakers*. -- Reason: (a) It might be useful to know that they are WHNF -- (b) In GHC.Iface.Tidy we currently assume that, if we want to -- expose the unfolding then indeed we *have* an unfolding -- to expose. (We could instead use the RHS, but currently -- we don't.) The simple thing is always to have one. where -- !opts: otherwise, we end up retaining all the SimpleEnv !uf_opts = seUnfoldingOpts env -- Might as well force this, profiles indicate up to -- 0.5MB of thunks just from this site. !is_top_lvl = isTopLevel top_lvl -- See Note [Force bottoming field] !is_bottoming = isDeadEndId id ------------------- simplStableUnfolding :: SimplEnv -> BindContext -> InId -> OutType -> ArityType -- Used to eta expand, but only for non-join-points -> Unfolding ->SimplM Unfolding -- Note [Setting the new unfolding] simplStableUnfolding env bind_cxt id rhs_ty id_arity unf = case unf of NoUnfolding -> return unf BootUnfolding -> return unf OtherCon {} -> return unf DFunUnfolding { df_bndrs = bndrs, df_con = con, df_args = args } -> do { (env', bndrs') <- simplBinders unf_env bndrs ; args' <- mapM (simplExpr env') args ; return (mkDFunUnfolding bndrs' con args') } CoreUnfolding { uf_tmpl = expr, uf_src = src, uf_guidance = guide } | isStableSource src -> do { expr' <- case bind_cxt of BC_Join _ cont -> -- Binder is a join point -- See Note [Rules and unfolding for join points] simplJoinRhs unf_env id expr cont BC_Let _ is_rec -> -- Binder is not a join point do { let cont = mkRhsStop rhs_ty is_rec topDmd -- mkRhsStop: switch off eta-expansion at the top level ; expr' <- simplExprC unf_env expr cont ; return (eta_expand expr') } ; case guide of UnfWhen { ug_boring_ok = boring_ok } -- Happens for INLINE things -- Really important to force new_boring_ok since otherwise -- `ug_boring_ok` is a thunk chain of -- inlineBoringExprOk expr0 || inlineBoringExprOk expr1 || ... -- See #20134 -> let !new_boring_ok = boring_ok || inlineBoringOk expr' guide' = guide { ug_boring_ok = new_boring_ok } -- Refresh the boring-ok flag, in case expr' -- has got small. This happens, notably in the inlinings -- for dfuns for single-method classes; see -- Note [Single-method classes] in GHC.Tc.TyCl.Instance. -- A test case is #4138 -- But retain a previous boring_ok of True; e.g. see -- the way it is set in calcUnfoldingGuidanceWithArity in return (mkCoreUnfolding src is_top_lvl expr' Nothing guide') -- See Note [Top-level flag on inline rules] in GHC.Core.Unfold _other -- Happens for INLINABLE things -> mkLetUnfolding env top_lvl src id False expr' } -- If the guidance is UnfIfGoodArgs, this is an INLINABLE -- unfolding, and we need to make sure the guidance is kept up -- to date with respect to any changes in the unfolding. | otherwise -> return noUnfolding -- Discard unstable unfoldings where -- Forcing this can save about 0.5MB of max residency and the result -- is small and easy to compute so might as well force it. top_lvl = bindContextLevel bind_cxt !is_top_lvl = isTopLevel top_lvl act = idInlineActivation id unf_env = updMode (updModeForStableUnfoldings act) env -- See Note [Simplifying inside stable unfoldings] in GHC.Core.Opt.Simplify.Utils -- See Note [Eta-expand stable unfoldings] -- Use the arity from the main Id (in id_arity), rather than computing it from rhs -- Not used for join points eta_expand expr | seEtaExpand env , exprArity expr < arityTypeArity id_arity , wantEtaExpansion expr = etaExpandAT (getInScope env) id_arity expr | otherwise = expr {- Note [Eta-expand stable unfoldings] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For INLINE/INLINABLE things (which get stable unfoldings) there's a danger of getting f :: Int -> Int -> Int -> Blah [ Arity = 3 -- Good arity , Unf=Stable (\xy. blah) -- Less good arity, only 2 f = \pqr. e This can happen because f's RHS is optimised more vigorously than its stable unfolding. Now suppose we have a call g = f x Because f has arity=3, g will have arity=2. But if we inline f (using its stable unfolding) g's arity will reduce to 1, because hasn't been optimised yet. This happened in the 'parsec' library, for Text.Pasec.Char.string. Generally, if we know that 'f' has arity N, it seems sensible to eta-expand the stable unfolding to arity N too. Simple and consistent. Wrinkles * See Historical-note [Eta-expansion in stable unfoldings] in GHC.Core.Opt.Simplify.Utils * Don't eta-expand a trivial expr, else each pass will eta-reduce it, and then eta-expand again. See Note [Which RHSs do we eta-expand?] in GHC.Core.Opt.Simplify.Utils. * Don't eta-expand join points; see Note [Do not eta-expand join points] in GHC.Core.Opt.Simplify.Utils. We uphold this because the join-point case (bind_cxt = BC_Join {}) doesn't use eta_expand. Note [Force bottoming field] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We need to force bottoming, or the new unfolding holds on to the old unfolding (which is part of the id). Note [Setting the new unfolding] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * If there's an INLINE pragma, we simplify the RHS gently. Maybe we should do nothing at all, but simplifying gently might get rid of more crap. * If not, we make an unfolding from the new RHS. But *only* for non-loop-breakers. Making loop breakers not have an unfolding at all means that we can avoid tests in exprIsConApp, for example. This is important: if exprIsConApp says 'yes' for a recursive thing, then we can get into an infinite loop If there's a stable unfolding on a loop breaker (which happens for INLINABLE), we hang on to the inlining. It's pretty dodgy, but the user did say 'INLINE'. May need to revisit this choice. ************************************************************************ * * Rules * * ************************************************************************ Note [Rules in a letrec] ~~~~~~~~~~~~~~~~~~~~~~~~ After creating fresh binders for the binders of a letrec, we substitute the RULES and add them back onto the binders; this is done *before* processing any of the RHSs. This is important. Manuel found cases where he really, really wanted a RULE for a recursive function to apply in that function's own right-hand side. See Note [Forming Rec groups] in "GHC.Core.Opt.OccurAnal" -} addBndrRules :: SimplEnv -> InBndr -> OutBndr -> BindContext -> SimplM (SimplEnv, OutBndr) -- Rules are added back into the bin addBndrRules env in_id out_id bind_cxt | null old_rules = return (env, out_id) | otherwise = do { new_rules <- simplRules env (Just out_id) old_rules bind_cxt ; let final_id = out_id `setIdSpecialisation` mkRuleInfo new_rules ; return (modifyInScope env final_id, final_id) } where old_rules = ruleInfoRules (idSpecialisation in_id) simplImpRules :: SimplEnv -> [CoreRule] -> SimplM [CoreRule] -- Simplify local rules for imported Ids simplImpRules env rules = simplRules env Nothing rules (BC_Let TopLevel NonRecursive) simplRules :: SimplEnv -> Maybe OutId -> [CoreRule] -> BindContext -> SimplM [CoreRule] simplRules env mb_new_id rules bind_cxt = mapM simpl_rule rules where simpl_rule rule@(BuiltinRule {}) = return rule simpl_rule rule@(Rule { ru_bndrs = bndrs, ru_args = args , ru_fn = fn_name, ru_rhs = rhs , ru_act = act }) = do { (env', bndrs') <- simplBinders env bndrs ; let rhs_ty = substTy env' (exprType rhs) rhs_cont = case bind_cxt of -- See Note [Rules and unfolding for join points] BC_Let {} -> mkBoringStop rhs_ty BC_Join _ cont -> assertPpr join_ok bad_join_msg cont lhs_env = updMode updModeForRules env' rhs_env = updMode (updModeForStableUnfoldings act) env' -- See Note [Simplifying the RHS of a RULE] -- Force this to avoid retaining reference to old Id !fn_name' = case mb_new_id of Just id -> idName id Nothing -> fn_name -- join_ok is an assertion check that the join-arity of the -- binder matches that of the rule, so that pushing the -- continuation into the RHS makes sense join_ok = case mb_new_id of Just id | JoinPoint join_arity <- idJoinPointHood id -> length args == join_arity _ -> False bad_join_msg = vcat [ ppr mb_new_id, ppr rule , ppr (fmap idJoinPointHood mb_new_id) ] ; args' <- mapM (simplExpr lhs_env) args ; rhs' <- simplExprC rhs_env rhs rhs_cont ; return (rule { ru_bndrs = bndrs' , ru_fn = fn_name' , ru_args = args' , ru_rhs = occurAnalyseExpr rhs' }) } -- Remember to occ-analyse, to drop dead code. -- See Note [OccInfo in unfoldings and rules] in GHC.Core {- Note [Simplifying the RHS of a RULE] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We can simplify the RHS of a RULE much as we do the RHS of a stable unfolding. We used to use the much more conservative updModeForRules for the RHS as well as the LHS, but that seems more conservative than necesary. Allowing some inlining might, for example, eliminate a binding. -} ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/Opt/Simplify/Monad.hs0000644000000000000000000002364207346545000022535 0ustar0000000000000000{-# LANGUAGE PatternSynonyms #-} {- (c) The AQUA Project, Glasgow University, 1993-1998 \section[GHC.Core.Opt.Simplify.Monad]{The simplifier Monad} -} module GHC.Core.Opt.Simplify.Monad ( -- The monad TopEnvConfig(..), SimplM, initSmpl, traceSmpl, getSimplRules, -- Unique supply MonadUnique(..), newId, newJoinId, -- Counting SimplCount, tick, freeTick, checkedTick, getSimplCount, zeroSimplCount, pprSimplCount, plusSimplCount, isZeroSimplCount ) where import GHC.Prelude import GHC.Types.Var ( Var, isId, mkLocalVar ) import GHC.Types.Name ( mkSystemVarName ) import GHC.Types.Id ( Id, mkSysLocalOrCoVarM ) import GHC.Types.Id.Info ( IdDetails(..), vanillaIdInfo, setArityInfo ) import GHC.Core.Type ( Type, Mult ) import GHC.Core.Opt.Stats import GHC.Core.Rules import GHC.Core.Utils ( mkLamTypes ) import GHC.Types.Unique.Supply import GHC.Driver.Flags import GHC.Utils.Outputable import GHC.Data.FastString import GHC.Utils.Monad import GHC.Utils.Logger as Logger import GHC.Utils.Misc ( count ) import GHC.Utils.Panic (throwGhcExceptionIO, GhcException (..)) import GHC.Types.Basic ( IntWithInf, treatZeroAsInf, mkIntWithInf ) import Control.Monad ( ap ) import GHC.Core.Multiplicity ( pattern ManyTy ) import GHC.Exts( oneShot ) {- ************************************************************************ * * \subsection{Monad plumbing} * * ************************************************************************ -} newtype SimplM result = SM' { unSM :: SimplTopEnv -> SimplCount -> IO (result, SimplCount)} -- We only need IO here for dump output, but since we already have it -- we might as well use it for uniques. pattern SM :: (SimplTopEnv -> SimplCount -> IO (result, SimplCount)) -> SimplM result -- This pattern synonym makes the simplifier monad eta-expand, -- which as a very beneficial effect on compiler performance -- (worth a 1-2% reduction in bytes-allocated). See #18202. -- See Note [The one-shot state monad trick] in GHC.Utils.Monad pattern SM m <- SM' m where SM m = SM' (oneShot $ \env -> oneShot $ \ct -> m env ct) -- See Note [The environments of the Simplify pass] data TopEnvConfig = TopEnvConfig { te_history_size :: !Int , te_tick_factor :: !Int } data SimplTopEnv = STE { -- See Note [The environments of the Simplify pass] st_config :: !TopEnvConfig , st_logger :: !Logger , st_max_ticks :: !IntWithInf -- ^ Max #ticks in this simplifier run , st_read_ruleenv :: !(IO RuleEnv) -- ^ The action to retrieve an up-to-date EPS RuleEnv -- See Note [Overall plumbing for rules] } initSmpl :: Logger -> IO RuleEnv -> TopEnvConfig -> Int -- ^ Size of the bindings, used to limit the number of ticks we allow -> SimplM a -> IO (a, SimplCount) initSmpl logger read_ruleenv cfg size m = do -- No init count; set to 0 let simplCount = zeroSimplCount $ logHasDumpFlag logger Opt_D_dump_simpl_stats unSM m env simplCount where env = STE { st_config = cfg , st_logger = logger , st_max_ticks = computeMaxTicks cfg size , st_read_ruleenv = read_ruleenv } computeMaxTicks :: TopEnvConfig -> Int -> IntWithInf -- Compute the max simplifier ticks as -- (base-size + pgm-size) * magic-multiplier * tick-factor/100 -- where -- magic-multiplier is a constant that gives reasonable results -- base-size is a constant to deal with size-zero programs computeMaxTicks cfg size = treatZeroAsInf $ fromInteger ((toInteger (size + base_size) * toInteger (tick_factor * magic_multiplier)) `div` 100) where tick_factor = te_tick_factor cfg base_size = 100 magic_multiplier = 40 -- MAGIC NUMBER, multiplies the simplTickFactor -- We can afford to be generous; this is really -- just checking for loops, and shouldn't usually fire -- A figure of 20 was too small: see #5539. {-# INLINE thenSmpl #-} {-# INLINE thenSmpl_ #-} {-# INLINE returnSmpl #-} {-# INLINE mapSmpl #-} instance Functor SimplM where fmap = mapSmpl instance Applicative SimplM where pure = returnSmpl (<*>) = ap (*>) = thenSmpl_ instance Monad SimplM where (>>) = (*>) (>>=) = thenSmpl mapSmpl :: (a -> b) -> SimplM a -> SimplM b mapSmpl f m = thenSmpl m (returnSmpl . f) returnSmpl :: a -> SimplM a returnSmpl e = SM (\_st_env sc -> return (e, sc)) thenSmpl :: SimplM a -> (a -> SimplM b) -> SimplM b thenSmpl_ :: SimplM a -> SimplM b -> SimplM b thenSmpl m k = SM $ \st_env sc0 -> do (m_result, sc1) <- unSM m st_env sc0 unSM (k m_result) st_env sc1 thenSmpl_ m k = SM $ \st_env sc0 -> do (_, sc1) <- unSM m st_env sc0 unSM k st_env sc1 -- TODO: this specializing is not allowed -- {-# SPECIALIZE mapM :: (a -> SimplM b) -> [a] -> SimplM [b] #-} -- {-# SPECIALIZE mapAndUnzipM :: (a -> SimplM (b, c)) -> [a] -> SimplM ([b],[c]) #-} -- {-# SPECIALIZE mapAccumLM :: (acc -> b -> SimplM (acc,c)) -> acc -> [b] -> SimplM (acc, [c]) #-} traceSmpl :: String -> SDoc -> SimplM () traceSmpl herald doc = do logger <- getLogger liftIO $ Logger.putDumpFileMaybe logger Opt_D_dump_simpl_trace "Simpl Trace" FormatText (hang (text herald) 2 doc) {-# INLINE traceSmpl #-} -- see Note [INLINE conditional tracing utilities] {- ************************************************************************ * * \subsection{The unique supply} * * ************************************************************************ -} -- See Note [Uniques for wired-in prelude things and known tags] in GHC.Builtin.Uniques simplTag :: Char simplTag = 's' instance MonadUnique SimplM where getUniqueSupplyM = liftIO $ mkSplitUniqSupply simplTag getUniqueM = liftIO $ uniqFromTag simplTag instance HasLogger SimplM where getLogger = gets st_logger instance MonadIO SimplM where liftIO = liftIOWithEnv . const getSimplRules :: SimplM RuleEnv getSimplRules = liftIOWithEnv st_read_ruleenv liftIOWithEnv :: (SimplTopEnv -> IO a) -> SimplM a liftIOWithEnv m = SM (\st_env sc -> do x <- m st_env return (x, sc)) gets :: (SimplTopEnv -> a) -> SimplM a gets f = liftIOWithEnv (return . f) newId :: FastString -> Mult -> Type -> SimplM Id newId fs w ty = mkSysLocalOrCoVarM fs w ty -- | Make a join id with given type and arity but without call-by-value annotations. newJoinId :: [Var] -> Type -> SimplM Id newJoinId bndrs body_ty = do { uniq <- getUniqueM ; let name = mkSystemVarName uniq (fsLit "$j") join_id_ty = mkLamTypes bndrs body_ty -- Note [Funky mkLamTypes] arity = count isId bndrs -- arity: See Note [Invariants on join points] invariant 2b, in GHC.Core join_arity = length bndrs details = JoinId join_arity Nothing id_info = vanillaIdInfo `setArityInfo` arity ; return (mkLocalVar details name ManyTy join_id_ty id_info) } {- ************************************************************************ * * \subsection{Counting up what we've done} * * ************************************************************************ -} getSimplCount :: SimplM SimplCount getSimplCount = SM (\_st_env sc -> return (sc, sc)) tick :: Tick -> SimplM () tick t = SM (\st_env sc -> let history_size = te_history_size (st_config st_env) sc' = doSimplTick history_size t sc in sc' `seq` return ((), sc')) checkedTick :: Tick -> SimplM () -- Try to take a tick, but fail if too many checkedTick t = SM (\st_env sc -> if st_max_ticks st_env <= mkIntWithInf (simplCountN sc) then throwGhcExceptionIO $ PprProgramError "Simplifier ticks exhausted" (msg sc) else let history_size = te_history_size (st_config st_env) sc' = doSimplTick history_size t sc in sc' `seq` return ((), sc')) where msg sc = vcat [ text "When trying" <+> ppr t , text "To increase the limit, use -fsimpl-tick-factor=N (default 100)." , space , text "In addition try adjusting -funfolding-case-threshold=N and" , text "-funfolding-case-scaling=N for the module in question." , text "Using threshold=1 and scaling=5 should break most inlining loops." , space , text "If you need to increase the tick factor substantially, while also" , text "adjusting unfolding parameters please file a bug report and" , text "indicate the factor you needed." , space , text "If GHC was unable to complete compilation even" <+> text "with a very large factor" , text "(a thousand or more), please consult the" <+> doubleQuotes (text "Known bugs or infelicities") , text "section in the Users Guide before filing a report. There are a" , text "few situations unlikely to occur in practical programs for which" , text "simplifier non-termination has been judged acceptable." , space , pp_details sc , pprSimplCount sc ] pp_details sc | hasDetailedCounts sc = empty | otherwise = text "To see detailed counts use -ddump-simpl-stats" freeTick :: Tick -> SimplM () -- Record a tick, but don't add to the total tick count, which is -- used to decide when nothing further has happened freeTick t = SM (\_st_env sc -> let sc' = doFreeSimplTick t sc in sc' `seq` return ((), sc')) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/Opt/Simplify/Utils.hs0000644000000000000000000034702507346545000022603 0ustar0000000000000000{- (c) The AQUA Project, Glasgow University, 1993-1998 The simplifier utilities -} module GHC.Core.Opt.Simplify.Utils ( -- Rebuilding rebuildLam, mkCase, prepareAlts, tryEtaExpandRhs, wantEtaExpansion, -- Inlining, preInlineUnconditionally, postInlineUnconditionally, activeRule, getUnfoldingInRuleMatch, updModeForStableUnfoldings, updModeForRules, -- The BindContext type BindContext(..), bindContextLevel, -- The continuation type SimplCont(..), DupFlag(..), FromWhat(..), StaticEnv, isSimplified, contIsStop, contIsDupable, contResultType, contHoleType, contHoleScaling, contIsTrivial, contArgs, contIsRhs, countArgs, mkBoringStop, mkRhsStop, mkLazyArgStop, interestingCallContext, -- ArgInfo ArgInfo(..), ArgSpec(..), RewriteCall(..), mkArgInfo, addValArgTo, addCastTo, addTyArgTo, argInfoExpr, argInfoAppArgs, pushSimplifiedArgs, pushSimplifiedRevArgs, isStrictArgInfo, lazyArgContext, abstractFloats, -- Utilities isExitJoinId ) where import GHC.Prelude hiding (head, init, last, tail) import qualified GHC.Prelude as Partial (head) import GHC.Core import GHC.Types.Literal ( isLitRubbish ) import GHC.Core.Opt.Simplify.Env import GHC.Core.Opt.Simplify.Inline( smallEnoughToInline ) import GHC.Core.Opt.Stats ( Tick(..) ) import qualified GHC.Core.Subst import GHC.Core.Ppr import GHC.Core.TyCo.Ppr ( pprParendType ) import GHC.Core.FVs import GHC.Core.Utils import GHC.Core.Rules( RuleEnv, getRules ) import GHC.Core.Opt.Arity import GHC.Core.Unfold import GHC.Core.Unfold.Make import GHC.Core.Opt.Simplify.Monad import GHC.Core.Type hiding( substTy ) import GHC.Core.Coercion hiding( substCo ) import GHC.Core.DataCon ( dataConWorkId, isNullaryRepDataCon ) import GHC.Core.Multiplicity import GHC.Core.Opt.ConstantFold import GHC.Types.Name import GHC.Types.Id import GHC.Types.Id.Info import GHC.Types.Tickish import GHC.Types.Demand import GHC.Types.Var.Set import GHC.Types.Basic import GHC.Data.OrdList ( isNilOL ) import GHC.Data.FastString ( fsLit ) import GHC.Utils.Misc import GHC.Utils.Monad import GHC.Utils.Outputable import GHC.Utils.Panic import Control.Monad ( when ) import Data.List ( sortBy ) import GHC.Types.Name.Env import Data.Graph {- ********************************************************************* * * The BindContext type * * ********************************************************************* -} -- What sort of binding is this? A let-binding or a join-binding? data BindContext = BC_Let -- A regular let-binding TopLevelFlag RecFlag | BC_Join -- A join point with continuation k RecFlag -- See Note [Rules and unfolding for join points] SimplCont -- in GHC.Core.Opt.Simplify bindContextLevel :: BindContext -> TopLevelFlag bindContextLevel (BC_Let top_lvl _) = top_lvl bindContextLevel (BC_Join {}) = NotTopLevel bindContextRec :: BindContext -> RecFlag bindContextRec (BC_Let _ rec_flag) = rec_flag bindContextRec (BC_Join rec_flag _) = rec_flag isJoinBC :: BindContext -> Bool isJoinBC (BC_Let {}) = False isJoinBC (BC_Join {}) = True {- ********************************************************************* * * The SimplCont and DupFlag types * * ************************************************************************ A SimplCont allows the simplifier to traverse the expression in a zipper-like fashion. The SimplCont represents the rest of the expression, "above" the point of interest. You can also think of a SimplCont as an "evaluation context", using that term in the way it is used for operational semantics. This is the way I usually think of it, For example you'll often see a syntax for evaluation context looking like C ::= [] | C e | case C of alts | C `cast` co That's the kind of thing we are doing here, and I use that syntax in the comments. Key points: * A SimplCont describes a *strict* context (just like evaluation contexts do). E.g. Just [] is not a SimplCont * A SimplCont describes a context that *does not* bind any variables. E.g. \x. [] is not a SimplCont -} data SimplCont = Stop -- ^ Stop[e] = e OutType -- ^ Type of the CallCtxt -- ^ Tells if there is something interesting about -- the syntactic context, and hence the inliner -- should be a bit keener (see interestingCallContext) -- Specifically: -- This is an argument of a function that has RULES -- Inlining the call might allow the rule to fire -- Never ValAppCxt (use ApplyToVal instead) -- or CaseCtxt (use Select instead) SubDemand -- ^ The evaluation context of e. Tells how e is evaluated. -- This fuels eta-expansion or eta-reduction without looking -- at lambda bodies, for example. -- -- See Note [Eta reduction based on evaluation context] -- The evaluation context for other SimplConts can be -- reconstructed with 'contEvalContext' | CastIt -- (CastIt co K)[e] = K[ e `cast` co ] { sc_co :: OutCoercion -- The coercion simplified -- Invariant: never an identity coercion , sc_opt :: Bool -- True <=> sc_co has had optCoercion applied to it -- See Note [Avoid re-simplifying coercions] -- in GHC.Core.Opt.Simplify.Iteration , sc_cont :: SimplCont } | ApplyToVal -- (ApplyToVal arg K)[e] = K[ e arg ] { sc_dup :: DupFlag -- See Note [DupFlag invariants] , sc_hole_ty :: OutType -- Type of the function, presumably (forall a. blah) -- See Note [The hole type in ApplyToTy] , sc_arg :: InExpr -- The argument, , sc_env :: StaticEnv -- see Note [StaticEnv invariant] , sc_cont :: SimplCont } | ApplyToTy -- (ApplyToTy ty K)[e] = K[ e ty ] { sc_arg_ty :: OutType -- Argument type , sc_hole_ty :: OutType -- Type of the function, presumably (forall a. blah) -- See Note [The hole type in ApplyToTy] , sc_cont :: SimplCont } | Select -- (Select alts K)[e] = K[ case e of alts ] { sc_dup :: DupFlag -- See Note [DupFlag invariants] , sc_bndr :: InId -- case binder , sc_alts :: [InAlt] -- Alternatives , sc_env :: StaticEnv -- See Note [StaticEnv invariant] , sc_cont :: SimplCont } -- The two strict forms have no DupFlag, because we never duplicate them | StrictBind -- (StrictBind x b K)[e] = let x = e in K[b] -- or, equivalently, = K[ (\x.b) e ] { sc_dup :: DupFlag -- See Note [DupFlag invariants] , sc_from :: FromWhat , sc_bndr :: InId , sc_body :: InExpr , sc_env :: StaticEnv -- Static env for both sc_bndr (stable unfolding thereof) -- and sc_body. Also see Note [StaticEnv invariant] , sc_cont :: SimplCont } | StrictArg -- (StrictArg (f e1 ..en) K)[e] = K[ f e1 .. en e ] { sc_dup :: DupFlag -- Always Simplified or OkToDup , sc_fun :: ArgInfo -- Specifies f, e1..en, Whether f has rules, etc -- plus demands and discount flags for *this* arg -- and further args -- So ai_dmds and ai_discs are never empty , sc_fun_ty :: OutType -- Type of the function (f e1 .. en), -- presumably (arg_ty -> res_ty) -- where res_ty is expected by sc_cont , sc_cont :: SimplCont } | TickIt -- (TickIt t K)[e] = K[ tick t e ] CoreTickish -- Tick tickish SimplCont type StaticEnv = SimplEnv -- Just the static part is relevant data FromWhat = FromLet | FromBeta Levity -- See Note [DupFlag invariants] data DupFlag = NoDup -- Unsimplified, might be big | Simplified -- Simplified | OkToDup -- Simplified and small isSimplified :: DupFlag -> Bool isSimplified NoDup = False isSimplified _ = True -- Invariant: the subst-env is empty perhapsSubstTy :: DupFlag -> StaticEnv -> Type -> Type perhapsSubstTy dup env ty | isSimplified dup = ty | otherwise = substTy env ty {- Note [StaticEnv invariant] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We pair up an InExpr or InAlts with a StaticEnv, which establishes the lexical scope for that InExpr. When we simplify that InExpr/InAlts, we use - Its captured StaticEnv - Overriding its InScopeSet with the larger one at the simplification point. Why override the InScopeSet? Example: (let y = ey in f) ex By the time we simplify ex, 'y' will be in scope. However the InScopeSet in the StaticEnv is not irrelevant: it should include all the free vars of applying the substitution to the InExpr. Reason: contHoleType uses perhapsSubstTy to apply the substitution to the expression, and that (rightly) gives ASSERT failures if the InScopeSet isn't big enough. Note [DupFlag invariants] ~~~~~~~~~~~~~~~~~~~~~~~~~ In both ApplyToVal { se_dup = dup, se_env = env, se_cont = k} and Select { se_dup = dup, se_env = env, se_cont = k} the following invariants hold (a) if dup = OkToDup, then continuation k is also ok-to-dup (b) if dup = OkToDup or Simplified, the subst-env is empty, or at least is always ignored; the payload is already an OutThing -} instance Outputable DupFlag where ppr OkToDup = text "ok" ppr NoDup = text "nodup" ppr Simplified = text "simpl" instance Outputable SimplCont where ppr (Stop ty interesting eval_sd) = text "Stop" <> brackets (sep $ punctuate comma pps) <+> ppr ty where pps = [ppr interesting] ++ [ppr eval_sd | eval_sd /= topSubDmd] ppr (CastIt { sc_co = co, sc_cont = cont }) = (text "CastIt" <+> pprOptCo co) $$ ppr cont ppr (TickIt t cont) = (text "TickIt" <+> ppr t) $$ ppr cont ppr (ApplyToTy { sc_arg_ty = ty, sc_cont = cont }) = (text "ApplyToTy" <+> pprParendType ty) $$ ppr cont ppr (ApplyToVal { sc_arg = arg, sc_dup = dup, sc_cont = cont, sc_hole_ty = hole_ty }) = (hang (text "ApplyToVal" <+> ppr dup <+> text "hole" <+> ppr hole_ty) 2 (pprParendExpr arg)) $$ ppr cont ppr (StrictBind { sc_bndr = b, sc_cont = cont }) = (text "StrictBind" <+> ppr b) $$ ppr cont ppr (StrictArg { sc_fun = ai, sc_cont = cont }) = (text "StrictArg" <+> ppr (ai_fun ai)) $$ ppr cont ppr (Select { sc_dup = dup, sc_bndr = bndr, sc_alts = alts, sc_cont = cont }) = (text "Select" <+> ppr dup <+> ppr bndr) $$ whenPprDebug (nest 2 $ ppr alts) $$ ppr cont {- Note [The hole type in ApplyToTy] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The sc_hole_ty field of ApplyToTy records the type of the "hole" in the continuation. It is absolutely necessary to compute contHoleType, but it is not used for anything else (and hence may not be evaluated). Why is it necessary for contHoleType? Consider the continuation ApplyToType Int (Stop Int) corresponding to ( @Int) :: Int What is the type of ? It could be (forall a. Int) or (forall a. a), and there is no way to know which, so we must record it. In a chain of applications (f @t1 @t2 @t3) we'll lazily compute exprType for (f @t1) and (f @t1 @t2), which is potentially non-linear; but it probably doesn't matter because we'll never compute them all. ************************************************************************ * * ArgInfo and ArgSpec * * ************************************************************************ -} data ArgInfo = ArgInfo { ai_fun :: OutId, -- The function ai_args :: [ArgSpec], -- ...applied to these args (which are in *reverse* order) ai_rewrite :: RewriteCall, -- What transformation to try next for this call -- See Note [Rewrite rules and inlining] in GHC.Core.Opt.Simplify.Iteration ai_encl :: Bool, -- Flag saying whether this function -- or an enclosing one has rules (recursively) -- True => be keener to inline in all args ai_dmds :: [Demand], -- Demands on remaining value arguments (beyond ai_args) -- Usually infinite, but if it is finite it guarantees -- that the function diverges after being given -- that number of args ai_discs :: [Int] -- Discounts for remaining value arguments (beyond ai_args) -- non-zero => be keener to inline -- Always infinite } data RewriteCall -- What rewriting to try next for this call -- See Note [Rewrite rules and inlining] in GHC.Core.Opt.Simplify.Iteration = TryRules FullArgCount [CoreRule] | TryInlining | TryNothing data ArgSpec = ValArg { as_dmd :: Demand -- Demand placed on this argument , as_arg :: OutExpr -- Apply to this (coercion or value); c.f. ApplyToVal , as_hole_ty :: OutType } -- Type of the function (presumably t1 -> t2) | TyArg { as_arg_ty :: OutType -- Apply to this type; c.f. ApplyToTy , as_hole_ty :: OutType } -- Type of the function (presumably forall a. blah) | CastBy OutCoercion -- Cast by this; c.f. CastIt -- Coercion is optimised instance Outputable ArgInfo where ppr (ArgInfo { ai_fun = fun, ai_args = args, ai_dmds = dmds }) = text "ArgInfo" <+> braces (sep [ text "fun =" <+> ppr fun , text "dmds(first 10) =" <+> ppr (take 10 dmds) , text "args =" <+> ppr args ]) instance Outputable ArgSpec where ppr (ValArg { as_arg = arg }) = text "ValArg" <+> ppr arg ppr (TyArg { as_arg_ty = ty }) = text "TyArg" <+> ppr ty ppr (CastBy c) = text "CastBy" <+> ppr c addValArgTo :: ArgInfo -> OutExpr -> OutType -> ArgInfo addValArgTo ai arg hole_ty | ArgInfo { ai_dmds = dmd:dmds, ai_discs = _:discs, ai_rewrite = rew } <- ai -- Pop the top demand and and discounts off , let arg_spec = ValArg { as_arg = arg, as_hole_ty = hole_ty, as_dmd = dmd } = ai { ai_args = arg_spec : ai_args ai , ai_dmds = dmds , ai_discs = discs , ai_rewrite = decArgCount rew } | otherwise = pprPanic "addValArgTo" (ppr ai $$ ppr arg) -- There should always be enough demands and discounts addTyArgTo :: ArgInfo -> OutType -> OutType -> ArgInfo addTyArgTo ai arg_ty hole_ty = ai { ai_args = arg_spec : ai_args ai , ai_rewrite = decArgCount (ai_rewrite ai) } where arg_spec = TyArg { as_arg_ty = arg_ty, as_hole_ty = hole_ty } addCastTo :: ArgInfo -> OutCoercion -> ArgInfo addCastTo ai co = ai { ai_args = CastBy co : ai_args ai } isStrictArgInfo :: ArgInfo -> Bool -- True if the function is strict in the next argument isStrictArgInfo (ArgInfo { ai_dmds = dmds }) | dmd:_ <- dmds = isStrUsedDmd dmd | otherwise = False argInfoAppArgs :: [ArgSpec] -> [OutExpr] argInfoAppArgs [] = [] argInfoAppArgs (CastBy {} : _) = [] -- Stop at a cast argInfoAppArgs (ValArg { as_arg = arg } : as) = arg : argInfoAppArgs as argInfoAppArgs (TyArg { as_arg_ty = ty } : as) = Type ty : argInfoAppArgs as pushSimplifiedArgs, pushSimplifiedRevArgs :: SimplEnv -> [ArgSpec] -- In normal, forward order for pushSimplifiedArgs, -- in /reverse/ order for pushSimplifiedRevArgs -> SimplCont -> SimplCont pushSimplifiedArgs env args cont = foldr (pushSimplifiedArg env) cont args pushSimplifiedRevArgs env args cont = foldl' (\k a -> pushSimplifiedArg env a k) cont args pushSimplifiedArg :: SimplEnv -> ArgSpec -> SimplCont -> SimplCont pushSimplifiedArg _env (TyArg { as_arg_ty = arg_ty, as_hole_ty = hole_ty }) cont = ApplyToTy { sc_arg_ty = arg_ty, sc_hole_ty = hole_ty, sc_cont = cont } pushSimplifiedArg env (ValArg { as_arg = arg, as_hole_ty = hole_ty }) cont = ApplyToVal { sc_arg = arg, sc_env = env, sc_dup = Simplified -- The SubstEnv will be ignored since sc_dup=Simplified , sc_hole_ty = hole_ty, sc_cont = cont } pushSimplifiedArg _ (CastBy c) cont = CastIt { sc_co = c, sc_cont = cont, sc_opt = True } argInfoExpr :: OutId -> [ArgSpec] -> OutExpr -- NB: the [ArgSpec] is reversed so that the first arg -- in the list is the last one in the application argInfoExpr fun rev_args = go rev_args where go [] = Var fun go (ValArg { as_arg = arg } : as) = go as `App` arg go (TyArg { as_arg_ty = ty } : as) = go as `App` Type ty go (CastBy co : as) = mkCast (go as) co decArgCount :: RewriteCall -> RewriteCall decArgCount (TryRules n rules) = TryRules (n-1) rules decArgCount rew = rew mkRewriteCall :: Id -> RuleEnv -> RewriteCall -- See Note [Rewrite rules and inlining] in GHC.Core.Opt.Simplify.Iteration -- We try to skip any unnecessary stages: -- No rules => skip TryRules -- No unfolding => skip TryInlining -- This skipping is "just" for efficiency. But rebuildCall is -- quite a heavy hammer, so skipping stages is a good plan. -- And it's extremely simple to do. mkRewriteCall fun rule_env | not (null rules) = TryRules n_required rules | canUnfold unf = TryInlining | otherwise = TryNothing where n_required = maximum (map ruleArity rules) rules = getRules rule_env fun unf = idUnfolding fun {- ************************************************************************ * * Functions on SimplCont * * ************************************************************************ -} mkBoringStop :: OutType -> SimplCont mkBoringStop ty = Stop ty BoringCtxt topSubDmd mkRhsStop :: OutType -> RecFlag -> Demand -> SimplCont -- See Note [RHS of lets] in GHC.Core.Unfold mkRhsStop ty is_rec bndr_dmd = Stop ty (RhsCtxt is_rec) (subDemandIfEvaluated bndr_dmd) mkLazyArgStop :: OutType -> ArgInfo -> SimplCont mkLazyArgStop ty fun_info = Stop ty (lazyArgContext fun_info) arg_sd where arg_sd = subDemandIfEvaluated (Partial.head (ai_dmds fun_info)) ------------------- contIsRhs :: SimplCont -> Maybe RecFlag contIsRhs (Stop _ (RhsCtxt is_rec) _) = Just is_rec contIsRhs (CastIt { sc_cont = k }) = contIsRhs k -- For f = e |> co, treat e as Rhs context contIsRhs _ = Nothing ------------------- contIsStop :: SimplCont -> Bool contIsStop (Stop {}) = True contIsStop _ = False contIsDupable :: SimplCont -> Bool contIsDupable (Stop {}) = True contIsDupable (ApplyToTy { sc_cont = k }) = contIsDupable k contIsDupable (ApplyToVal { sc_dup = OkToDup }) = True -- See Note [DupFlag invariants] contIsDupable (Select { sc_dup = OkToDup }) = True -- ...ditto... contIsDupable (StrictArg { sc_dup = OkToDup }) = True -- ...ditto... contIsDupable (CastIt { sc_cont = k }) = contIsDupable k contIsDupable _ = False ------------------- contIsTrivial :: SimplCont -> Bool contIsTrivial (Stop {}) = True contIsTrivial (ApplyToTy { sc_cont = k }) = contIsTrivial k -- This one doesn't look right. A value application is not trivial -- contIsTrivial (ApplyToVal { sc_arg = Coercion _, sc_cont = k }) = contIsTrivial k contIsTrivial (CastIt { sc_cont = k }) = contIsTrivial k contIsTrivial _ = False ------------------- contResultType :: SimplCont -> OutType contResultType (Stop ty _ _) = ty contResultType (CastIt { sc_cont = k }) = contResultType k contResultType (StrictBind { sc_cont = k }) = contResultType k contResultType (StrictArg { sc_cont = k }) = contResultType k contResultType (Select { sc_cont = k }) = contResultType k contResultType (ApplyToTy { sc_cont = k }) = contResultType k contResultType (ApplyToVal { sc_cont = k }) = contResultType k contResultType (TickIt _ k) = contResultType k contHoleType :: SimplCont -> OutType contHoleType (Stop ty _ _) = ty contHoleType (TickIt _ k) = contHoleType k contHoleType (CastIt { sc_co = co }) = coercionLKind co contHoleType (StrictBind { sc_bndr = b, sc_dup = dup, sc_env = se }) = perhapsSubstTy dup se (idType b) contHoleType (StrictArg { sc_fun_ty = ty }) = funArgTy ty contHoleType (ApplyToTy { sc_hole_ty = ty }) = ty -- See Note [The hole type in ApplyToTy] contHoleType (ApplyToVal { sc_hole_ty = ty }) = ty -- See Note [The hole type in ApplyToTy] contHoleType (Select { sc_dup = d, sc_bndr = b, sc_env = se }) = perhapsSubstTy d se (idType b) -- Computes the multiplicity scaling factor at the hole. That is, in (case [] of -- x ::(p) _ { … }) (respectively for arguments of functions), the scaling -- factor is p. And in E[G[]], the scaling factor is the product of the scaling -- factor of E and that of G. -- -- The scaling factor at the hole of E[] is used to determine how a binder -- should be scaled if it commutes with E. This appears, in particular, in the -- case-of-case transformation. contHoleScaling :: SimplCont -> Mult contHoleScaling (Stop _ _ _) = OneTy contHoleScaling (CastIt { sc_cont = k }) = contHoleScaling k contHoleScaling (StrictBind { sc_bndr = id, sc_cont = k }) = idMult id `mkMultMul` contHoleScaling k contHoleScaling (Select { sc_bndr = id, sc_cont = k }) = idMult id `mkMultMul` contHoleScaling k contHoleScaling (StrictArg { sc_fun_ty = fun_ty, sc_cont = k }) = w `mkMultMul` contHoleScaling k where (w, _, _) = splitFunTy fun_ty contHoleScaling (ApplyToTy { sc_cont = k }) = contHoleScaling k contHoleScaling (ApplyToVal { sc_cont = k }) = contHoleScaling k contHoleScaling (TickIt _ k) = contHoleScaling k ------------------- countArgs :: SimplCont -> Int -- Count all arguments, including types, coercions, -- and other values; skipping over casts. countArgs (ApplyToTy { sc_cont = cont }) = 1 + countArgs cont countArgs (ApplyToVal { sc_cont = cont }) = 1 + countArgs cont countArgs (CastIt { sc_cont = cont }) = countArgs cont countArgs _ = 0 countValArgs :: SimplCont -> Int -- Count value arguments only countValArgs (ApplyToTy { sc_cont = cont }) = countValArgs cont countValArgs (ApplyToVal { sc_cont = cont }) = 1 + countValArgs cont countValArgs (CastIt { sc_cont = cont }) = countValArgs cont countValArgs _ = 0 ------------------- contArgs :: SimplCont -> (Bool, [ArgSummary], SimplCont) -- Summarises value args, discards type args and coercions -- The returned continuation of the call is only used to -- answer questions like "are you interesting?" contArgs cont | lone cont = (True, [], cont) | otherwise = go [] cont where lone (ApplyToTy {}) = False -- See Note [Lone variables] in GHC.Core.Unfold lone (ApplyToVal {}) = False -- NB: even a type application or cast lone (CastIt {}) = False -- stops it being "lone" lone _ = True go args (ApplyToVal { sc_arg = arg, sc_env = se, sc_cont = k }) = go (is_interesting arg se : args) k go args (ApplyToTy { sc_cont = k }) = go args k go args (CastIt { sc_cont = k }) = go args k go args k = (False, reverse args, k) is_interesting arg se = interestingArg se arg -- Do *not* use short-cutting substitution here -- because we want to get as much IdInfo as possible -- | Describes how the 'SimplCont' will evaluate the hole as a 'SubDemand'. -- This can be more insightful than the limited syntactic context that -- 'SimplCont' provides, because the 'Stop' constructor might carry a useful -- 'SubDemand'. -- For example, when simplifying the argument `e` in `f e` and `f` has the -- demand signature ``, this function will give you back `P(S,A)` when -- simplifying `e`. -- -- PRECONDITION: Don't call with 'ApplyToVal'. We haven't thoroughly thought -- about what to do then and no call sites so far seem to care. contEvalContext :: SimplCont -> SubDemand contEvalContext k = case k of Stop _ _ sd -> sd TickIt _ k -> contEvalContext k CastIt { sc_cont = k } -> contEvalContext k ApplyToTy{ sc_cont = k } -> contEvalContext k -- ApplyToVal{sc_cont=k} -> mkCalledOnceDmd $ contEvalContext k -- Not 100% sure that's correct, . Here's an example: -- f (e x) and f :: -- then what is the evaluation context of 'e' when we simplify it? E.g., -- simpl e (ApplyToVal x $ Stop "C(S,C(1,L))") -- then it *should* be "C(1,C(S,C(1,L))", so perhaps correct after all. -- But for now we just panic: ApplyToVal{} -> pprPanic "contEvalContext" (ppr k) StrictArg{sc_fun=fun_info} -> subDemandIfEvaluated (Partial.head (ai_dmds fun_info)) StrictBind{sc_bndr=bndr} -> subDemandIfEvaluated (idDemandInfo bndr) Select{} -> topSubDmd -- Perhaps reconstruct the demand on the scrutinee by looking at field -- and case binder dmds, see addCaseBndrDmd. No priority right now. ------------------- mkArgInfo :: SimplEnv -> RuleEnv -> Id -> SimplCont -> ArgInfo mkArgInfo env rule_base fun cont | n_val_args < idArity fun -- Note [Unsaturated functions] = ArgInfo { ai_fun = fun, ai_args = [] , ai_rewrite = fun_rewrite , ai_encl = False , ai_dmds = vanilla_dmds , ai_discs = vanilla_discounts } | otherwise = ArgInfo { ai_fun = fun , ai_args = [] , ai_rewrite = fun_rewrite , ai_encl = fun_has_rules || contHasRules cont , ai_dmds = add_type_strictness (idType fun) arg_dmds , ai_discs = arg_discounts } where n_val_args = countValArgs cont fun_rewrite = mkRewriteCall fun rule_base fun_has_rules = case fun_rewrite of TryRules {} -> True _ -> False vanilla_discounts, arg_discounts :: [Int] vanilla_discounts = repeat 0 arg_discounts = case idUnfolding fun of CoreUnfolding {uf_guidance = UnfIfGoodArgs {ug_args = discounts}} -> discounts ++ vanilla_discounts _ -> vanilla_discounts vanilla_dmds, arg_dmds :: [Demand] vanilla_dmds = repeat topDmd arg_dmds | not (seInline env) = vanilla_dmds -- See Note [Do not expose strictness if sm_inline=False] | otherwise = -- add_type_str fun_ty $ case splitDmdSig (idDmdSig fun) of (demands, result_info) | not (demands `lengthExceeds` n_val_args) -> -- Enough args, use the strictness given. -- For bottoming functions we used to pretend that the arg -- is lazy, so that we don't treat the arg as an -- interesting context. This avoids substituting -- top-level bindings for (say) strings into -- calls to error. But now we are more careful about -- inlining lone variables, so its ok -- (see GHC.Core.Op.Simplify.Utils.analyseCont) if isDeadEndDiv result_info then demands -- Finite => result is bottom else demands ++ vanilla_dmds | otherwise -> warnPprTrace True "More demands than arity" (ppr fun <+> ppr (idArity fun) <+> ppr n_val_args <+> ppr demands) $ vanilla_dmds -- Not enough args, or no strictness add_type_strictness :: Type -> [Demand] -> [Demand] -- If the function arg types are strict, record that in the 'strictness bits' -- No need to instantiate because unboxed types (which dominate the strict -- types) can't instantiate type variables. -- add_type_strictness is done repeatedly (for each call); -- might be better once-for-all in the function -- But beware primops/datacons with no strictness add_type_strictness fun_ty dmds | null dmds = [] | Just (_, fun_ty') <- splitForAllTyCoVar_maybe fun_ty = add_type_strictness fun_ty' dmds -- Look through foralls | Just (_, _, arg_ty, fun_ty') <- splitFunTy_maybe fun_ty -- Add strict-type info , dmd : rest_dmds <- dmds , let dmd' | definitelyUnliftedType arg_ty = strictifyDmd dmd | otherwise -- Something that's not definitely unlifted. -- If the type is representation-polymorphic, we can't know whether -- it's strict. = dmd = dmd' : add_type_strictness fun_ty' rest_dmds | otherwise = dmds {- Note [Unsaturated functions] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider (test eyeball/inline4) x = a:as y = f x where f has arity 2. Then we do not want to inline 'x', because it'll just be floated out again. Even if f has lots of discounts on its first argument -- it must be saturated for these to kick in Note [Do not expose strictness if sm_inline=False] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #15163 showed a case in which we had {-# INLINE [1] zip #-} zip = undefined {-# RULES "foo" forall as bs. stream (zip as bs) = ..blah... #-} If we expose zip's bottoming nature when simplifying the LHS of the RULE we get {-# RULES "foo" forall as bs. stream (case zip of {}) = ..blah... #-} discarding the arguments to zip. Usually this is fine, but on the LHS of a rule it's not, because 'as' and 'bs' are now not bound on the LHS. This is a pretty pathological example, so I'm not losing sleep over it, but the simplest solution was to check sm_inline; if it is False, which it is on the LHS of a rule (see updModeForRules), then don't make use of the strictness info for the function. -} {- ************************************************************************ * * Interesting arguments * * ************************************************************************ Note [Interesting call context] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We want to avoid inlining an expression where there can't possibly be any gain, such as in an argument position. Hence, if the continuation is interesting (eg. a case scrutinee that isn't just a seq, application etc.) then we inline, otherwise we don't. Previously some_benefit used to return True only if the variable was applied to some value arguments. This didn't work: let x = _coerce_ (T Int) Int (I# 3) in case _coerce_ Int (T Int) x of I# y -> .... we want to inline x, but can't see that it's a constructor in a case scrutinee position, and some_benefit is False. Another example: dMonadST = _/\_ t -> :Monad (g1 _@_ t, g2 _@_ t, g3 _@_ t) .... case dMonadST _@_ x0 of (a,b,c) -> .... we'd really like to inline dMonadST here, but we *don't* want to inline if the case expression is just case x of y { DEFAULT -> ... } since we can just eliminate this case instead (x is in WHNF). Similar applies when x is bound to a lambda expression. Hence contIsInteresting looks for case expressions with just a single default case. Note [No case of case is boring] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If we see case f x of we'd usually treat the context as interesting, to encourage 'f' to inline. But if case-of-case is off, it's really not so interesting after all, because we are unlikely to be able to push the case expression into the branches of any case in f's unfolding. So, to reduce unnecessary code expansion, we just make the context look boring. This made a small compile-time perf improvement in perf/compiler/T6048, and it looks plausible to me. Note [Seq is boring] ~~~~~~~~~~~~~~~~~~~~ Suppose f x = case v of True -> Just x False -> Just (x-1) Now consider these variants of case (f x) of ... 1. [Dead case binder]: inline f case f x of b{-dead-} { DEFAULT -> blah[no b] } Inlining (f x) will allow us to avoid ever allocating (Just x), since the case binder `b` is dead. We will end up with a join point for blah, thus join j = blah in case v of { True -> j; False -> j } which will turn into (case v of DEFAULT -> blah) All good 2. [Live case binder, live alt binders]: inline f case f x of b { Just y -> blah[y,b] } Inlining (f x) will mean we still allocate (Just x), but we also get to bind `y` without fetching it out of the Just, thus join j y b = blah[y,b] case v of { True -> j x (Just x) ; False -> let y = x-1 in j y (Just y) } Inlining (f x) has a small benefit, perhaps. (To T14955 it makes a surprisingly large difference of ~30% to inline here.) 3. [Live case binder, dead alt binders]: maybe don't inline f case f x of b { DEFAULT -> blah[b] } Inlining (f x) will still mean we allocate (Just x). We'd get: join j b = blah[b] case v of { True -> j (Just x); False -> j (Just (x-1)) } No new optimisations are revealed. Nothing is gained. (This is the situation in T22317.) A variant is when we have a data constructor with dead binders: case g x of b { (x{-dead-}, x{-dead-}) -> blah[b, no x, no y] } Instead of DEFAULT we have a single constructor alternative with all dead binders. Again, no gain from inlining (f x) 4. [Live case binder, dead alt binders]: small f Suppose f is CPR'd, so it looks like f x = case $wf x of (# a #) -> Just a Then even in case (3) we want to inline: case f x of b { DEFAULT -> blah[b] } --> case $wf x of (# a #) -> let b = Just a in blah[b] This is very good; we now know a lot about `b` (instead of nothing) and `blah` might benefit. Similarly if `f` has a join point f x = join $j y = Just y in ... Again the case (f x) is now consuming a constructor (Just y). This is very like the situation described in Note [RHS of lets] in GHC.Core.Opt.Simplify.Inline; (case e of b -> blah) is just like a strict `let`. Conclusion: in interestingCallCtxt, a case-expression (i.e. Select continuation) usually gives a CaseCtxt (cases 1,2); but when (cases 3,4): * It has a non-dead case-binder * It has one alternative * All the binders in the alternative are dead then the `case` is just a strict let-binding, so use RhsCtxt NonRecursive. This RhsCtxt gives a small incentive for small functions to inline. That incentive is what is needed in case (4). Wrinkle (SB1). The 'small incentive' is implemented by `calc_some_benefit` in GHC.Core.Opt.Simplify.Inline.tryUnfolding. We restrict the incentive just to funtions that have unfolding guidance of `UnfWhen`, which particularly includes wrappers created by CPR, exactly case (4) above. Without this limitation I got too much fruitless inlining, which led to regressions (#22317 is an example). A good example of a function where this 'small incentive' is important is GHC.Num.Integer where we ended up with calls like this: case (integerSignum a b) of r -> ... but were failing to inline integerSignum, even though it always returns a single constructor, so it is very helpful to inline it. There is also an issue of confluence-of-the-simplifier. Suppose we have f x = case x of r -> ... and the Simplifier sees f (integerSigNum a b) Because `f` scrutines `x`, the unfolding guidance for f gives a discount for `x`; and that discount makes interestingCallContext for the context `f <>` return DiscArgCtxt, which again gives that incentive. We don't want the incentive to disappear when we inline `f`! -} lazyArgContext :: ArgInfo -> CallCtxt -- Use this for lazy arguments lazyArgContext (ArgInfo { ai_encl = encl_rules, ai_discs = discs }) | encl_rules = RuleArgCtxt | disc:_ <- discs, disc > 0 = DiscArgCtxt -- Be keener here | otherwise = BoringCtxt -- Nothing interesting strictArgContext :: ArgInfo -> CallCtxt strictArgContext (ArgInfo { ai_encl = encl_rules, ai_discs = discs }) -- Use this for strict arguments | encl_rules = RuleArgCtxt | disc:_ <- discs, disc > 0 = DiscArgCtxt -- Be keener here | otherwise = RhsCtxt NonRecursive -- Why RhsCtxt? if we see f (g x), and f is strict, we -- want to be a bit more eager to inline g, because it may -- expose an eval (on x perhaps) that can be eliminated or -- shared. I saw this in nofib 'boyer2', RewriteFuns.onewayunify1 -- It's worth an 18% improvement in allocation for this -- particular benchmark; 5% on 'mate' and 1.3% on 'multiplier' -- -- Why NonRecursive? Becuase it's a bit like -- let a = g x in f a interestingCallContext :: SimplEnv -> SimplCont -> CallCtxt -- See Note [Interesting call context] interestingCallContext env cont = interesting cont where interesting (Select {sc_alts=alts, sc_bndr=case_bndr}) | not (seCaseCase env) = BoringCtxt -- See Note [No case of case is boring] | [Alt _ bs _] <- alts , all isDeadBinder bs , not (isDeadBinder case_bndr) = RhsCtxt NonRecursive -- See Note [Seq is boring] | otherwise = CaseCtxt interesting (ApplyToVal {}) = ValAppCtxt -- Can happen if we have (f Int |> co) y -- If f has an INLINE prag we need to give it some -- motivation to inline. See Note [Cast then apply] -- in GHC.Core.Unfold interesting (StrictArg { sc_fun = fun }) = strictArgContext fun interesting (StrictBind {}) = BoringCtxt interesting (Stop _ cci _) = cci interesting (TickIt _ k) = interesting k interesting (ApplyToTy { sc_cont = k }) = interesting k interesting (CastIt { sc_cont = k }) = interesting k -- If this call is the arg of a strict function, the context -- is a bit interesting. If we inline here, we may get useful -- evaluation information to avoid repeated evals: e.g. -- x + (y * z) -- Here the contIsInteresting makes the '*' keener to inline, -- which in turn exposes a constructor which makes the '+' inline. -- Assuming that +,* aren't small enough to inline regardless. -- -- It's also very important to inline in a strict context for things -- like -- foldr k z (f x) -- Here, the context of (f x) is strict, and if f's unfolding is -- a build it's *great* to inline it here. So we must ensure that -- the context for (f x) is not totally uninteresting. contHasRules :: SimplCont -> Bool -- If the argument has form (f x y), where x,y are boring, -- and f is marked INLINE, then we don't want to inline f. -- But if the context of the argument is -- g (f x y) -- where g has rules, then we *do* want to inline f, in case it -- exposes a rule that might fire. Similarly, if the context is -- h (g (f x x)) -- where h has rules, then we do want to inline f. So contHasRules -- tries to see if the context of the f-call is a call to a function -- with rules. -- -- The ai_encl flag makes this happen; if it's -- set, the inliner gets just enough keener to inline f -- regardless of how boring f's arguments are, if it's marked INLINE -- -- The alternative would be to *always* inline an INLINE function, -- regardless of how boring its context is; but that seems overkill -- For example, it'd mean that wrapper functions were always inlined contHasRules cont = go cont where go (ApplyToVal { sc_cont = cont }) = go cont go (ApplyToTy { sc_cont = cont }) = go cont go (CastIt { sc_cont = cont }) = go cont go (StrictArg { sc_fun = fun }) = ai_encl fun go (Stop _ RuleArgCtxt _) = True go (TickIt _ c) = go c go (Select {}) = False go (StrictBind {}) = False -- ?? go (Stop _ _ _) = False {- Note [Interesting arguments] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ An argument is interesting if it deserves a discount for unfoldings with a discount in that argument position. The idea is to avoid unfolding a function that is applied only to variables that have no unfolding (i.e. they are probably lambda bound): f x y z There is little point in inlining f here. Generally, *values* (like (C a b) and (\x.e)) deserve discounts. But we must look through lets, eg (let x = e in C a b), because the let will float, exposing the value, if we inline. That makes it different to exprIsHNF. Before 2009 we said it was interesting if the argument had *any* structure at all; i.e. (hasSomeUnfolding v). But does too much inlining; see #3016. But we don't regard (f x y) as interesting, unless f is unsaturated. If it's saturated and f hasn't inlined, then it's probably not going to now! Note [Conlike is interesting] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider f d = ...((*) d x y)... ... f (df d')... where df is con-like. Then we'd really like to inline 'f' so that the rule for (*) (df d) can fire. To do this a) we give a discount for being an argument of a class-op (eg (*) d) b) we say that a con-like argument (eg (df d)) is interesting -} interestingArg :: SimplEnv -> CoreExpr -> ArgSummary -- See Note [Interesting arguments] interestingArg env e = go env 0 e where -- n is # value args to which the expression is applied go env n (Var v) = case substId env v of DoneId v' -> go_var n v' DoneEx e _ -> go (zapSubstEnv env) n e ContEx tvs cvs ids e -> go (setSubstEnv env tvs cvs ids) n e go _ _ (Lit l) | isLitRubbish l = TrivArg -- Leads to unproductive inlining in WWRec, #20035 | otherwise = ValueArg go _ _ (Type _) = TrivArg go _ _ (Coercion _) = TrivArg go env n (App fn (Type _)) = go env n fn go env n (App fn _) = go env (n+1) fn go env n (Tick _ a) = go env n a go env n (Cast e _) = go env n e go env n (Lam v e) | isTyVar v = go env n e | n>0 = NonTrivArg -- (\x.b) e is NonTriv | otherwise = ValueArg go _ _ (Case {}) = NonTrivArg go env n (Let b e) = case go env' n e of ValueArg -> ValueArg _ -> NonTrivArg where env' = env `addNewInScopeIds` bindersOf b go_var n v | isConLikeId v = ValueArg -- Experimenting with 'conlike' rather that -- data constructors here -- DFuns are con-like; see Note [Conlike is interesting] | idArity v > n = ValueArg -- Catches (eg) primops with arity but no unfolding | n > 0 = NonTrivArg -- Saturated or unknown call | otherwise -- n==0, no value arguments; look for an interesting unfolding = case idUnfolding v of OtherCon [] -> NonTrivArg -- It's evaluated, but that's all we know OtherCon _ -> ValueArg -- Evaluated and we know it isn't these constructors -- See Note [OtherCon and interestingArg] DFunUnfolding {} -> ValueArg -- We konw that idArity=0 CoreUnfolding{ uf_cache = cache } | uf_is_conlike cache -> ValueArg -- Includes constructor applications | uf_is_value cache -> NonTrivArg -- Things like partial applications | otherwise -> TrivArg BootUnfolding -> TrivArg NoUnfolding -> TrivArg {- Note [OtherCon and interestingArg] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ interstingArg returns (a) NonTrivArg for an arg with an OtherCon [] unfolding (b) ValueArg for an arg with an OtherCon [c1,c2..] unfolding. Reason for (a): I found (in the GHC.Num.Integer library) that I was inlining a pretty big function when all we knew was that its arguments were evaluated, nothing more. That in turn make the enclosing function too big to inline elsewhere. Reason for (b): we want to inline integerCompare here integerLt# :: Integer -> Integer -> Bool# integerLt# (IS x) (IS y) = x <# y integerLt# x y | LT <- integerCompare x y = 1# integerLt# _ _ = 0# ************************************************************************ * * SimplMode * * ************************************************************************ -} updModeForStableUnfoldings :: Activation -> SimplMode -> SimplMode -- See Note [The environments of the Simplify pass] updModeForStableUnfoldings unf_act current_mode = current_mode { sm_phase = phaseFromActivation unf_act , sm_eta_expand = False , sm_inline = True } -- sm_eta_expand: see Note [Eta expansion in stable unfoldings and rules] -- sm_rules: just inherit; sm_rules might be "off" -- because of -fno-enable-rewrite-rules where phaseFromActivation (ActiveAfter _ n) = Phase n phaseFromActivation _ = InitialPhase updModeForRules :: SimplMode -> SimplMode -- See Note [Simplifying rules] -- See Note [The environments of the Simplify pass] updModeForRules current_mode = current_mode { sm_phase = InitialPhase , sm_inline = False -- See Note [Do not expose strictness if sm_inline=False] , sm_rules = False , sm_cast_swizzle = False -- See Note [Cast swizzling on rule LHSs] , sm_eta_expand = False } {- Note [Simplifying rules] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When simplifying a rule LHS, refrain from /any/ inlining or applying of other RULES. Doing anything to the LHS is plain confusing, because it means that what the rule matches is not what the user wrote. c.f. #10595, and #10528. * sm_inline, sm_rules: inlining (or applying rules) on rule LHSs risks introducing Ticks into the LHS, which makes matching trickier. #10665, #10745. Doing this to either side confounds tools like HERMIT, which seek to reason about and apply the RULES as originally written. See #10829. See also Note [Do not expose strictness if sm_inline=False] * sm_eta_expand: the template (LHS) of a rule must only mention coercion /variables/ not arbitrary coercions. See Note [Casts in the template] in GHC.Core.Rules. Eta expansion can create new coercions; so we switch it off. There is, however, one case where we are pretty much /forced/ to transform the LHS of a rule: postInlineUnconditionally. For instance, in the case of let f = g @Int in f We very much want to inline f into the body of the let. However, to do so (and be able to safely drop f's binding) we must inline into all occurrences of f, including those in the LHS of rules. This can cause somewhat surprising results; for instance, in #18162 we found that a rule template contained ticks in its arguments, because postInlineUnconditionally substituted in a trivial expression that contains ticks. See Note [Tick annotations in RULE matching] in GHC.Core.Rules for details. Note [Cast swizzling on rule LHSs] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In the LHS of a RULE we may have (\x. blah |> CoVar cv) where `cv` is a coercion variable. Critically, we really only want coercion /variables/, not general coercions, on the LHS of a RULE. So we don't want to swizzle this to (\x. blah) |> (Refl xty `FunCo` CoVar cv) So we switch off cast swizzling in updModeForRules. Note [Eta expansion in stable unfoldings and rules] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ SPJ Jul 22: whether or not eta-expansion is switched on in a stable unfolding, or the RHS of a RULE, seems to be a bit moot. But switching it on adds clutter, so I'm experimenting with switching off eta-expansion in such places. In the olden days, we really /wanted/ to switch it off. Old note: If we have a stable unfolding f :: Ord a => a -> IO () -- Unfolding template -- = /\a \(d:Ord a) (x:a). bla we do not want to eta-expand to f :: Ord a => a -> IO () -- Unfolding template -- = (/\a \(d:Ord a) (x:a) (eta:State#). bla eta) |> co because now specialisation of the overloading doesn't work properly (see Note [Specialisation shape] in GHC.Core.Opt.Specialise), #9509. So we disable eta-expansion in stable unfoldings. But this old note is no longer relevant because the specialiser has improved: see Note [Account for casts in binding] in GHC.Core.Opt.Specialise. So we seem to have a free choice. Note [Inlining in gentle mode] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Something is inlined if (i) the sm_inline flag is on, AND (ii) the thing has an INLINE pragma, AND (iii) the thing is inlinable in the earliest phase. Example of why (iii) is important: {-# INLINE [~1] g #-} g = ... {-# INLINE f #-} f x = g (g x) If we were to inline g into f's inlining, then an importing module would never be able to do f e --> g (g e) ---> RULE fires because the stable unfolding for f has had g inlined into it. On the other hand, it is bad not to do ANY inlining into an stable unfolding, because then recursive knots in instance declarations don't get unravelled. However, *sometimes* SimplGently must do no call-site inlining at all (hence sm_inline = False). Before full laziness we must be careful not to inline wrappers, because doing so inhibits floating e.g. ...(case f x of ...)... ==> ...(case (case x of I# x# -> fw x#) of ...)... ==> ...(case x of I# x# -> case fw x# of ...)... and now the redex (f x) isn't floatable any more. The no-inlining thing is also important for Template Haskell. You might be compiling in one-shot mode with -O2; but when TH compiles a splice before running it, we don't want to use -O2. Indeed, we don't want to inline anything, because the byte-code interpreter might get confused about unboxed tuples and suchlike. Note [Simplifying inside stable unfoldings] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We must take care with simplification inside stable unfoldings (which come from INLINE pragmas). First, consider the following example let f = \pq -> BIG in let g = \y -> f y y {-# INLINE g #-} in ...g...g...g...g...g... Now, if that's the ONLY occurrence of f, it might be inlined inside g, and thence copied multiple times when g is inlined. HENCE we treat any occurrence in a stable unfolding as a multiple occurrence, not a single one; see OccurAnal.addRuleUsage. Second, we do want *do* to some modest rules/inlining stuff in stable unfoldings, partly to eliminate senseless crap, and partly to break the recursive knots generated by instance declarations. However, suppose we have {-# INLINE f #-} f = meaning "inline f in phases p where activation (p) holds". Then what inlinings/rules can we apply to the copy of captured in f's stable unfolding? Our model is that literally is substituted for f when it is inlined. So our conservative plan (implemented by updModeForStableUnfoldings) is this: ------------------------------------------------------------- When simplifying the RHS of a stable unfolding, set the phase to the phase in which the stable unfolding first becomes active ------------------------------------------------------------- That ensures that a) Rules/inlinings that *cease* being active before p will not apply to the stable unfolding, consistent with it being inlined in its *original* form in phase p. b) Rules/inlinings that only become active *after* p will not apply to the stable unfolding, again to be consistent with inlining the *original* rhs in phase p. For example, {-# INLINE f #-} f x = ...g... {-# NOINLINE [1] g #-} g y = ... {-# RULE h g = ... #-} Here we must not inline g into f's RHS, even when we get to phase 0, because when f is later inlined into some other module we want the rule for h to fire. Similarly, consider {-# INLINE f #-} f x = ...g... g y = ... and suppose that there are auto-generated specialisations and a strictness wrapper for g. The specialisations get activation AlwaysActive, and the strictness wrapper get activation (ActiveAfter 0). So the strictness wrepper fails the test and won't be inlined into f's stable unfolding. That means f can inline, expose the specialised call to g, so the specialisation rules can fire. A note about wrappers ~~~~~~~~~~~~~~~~~~~~~ It's also important not to inline a worker back into a wrapper. A wrapper looks like wraper = inline_me (\x -> ...worker... ) Normally, the inline_me prevents the worker getting inlined into the wrapper (initially, the worker's only call site!). But, if the wrapper is sure to be called, the strictness analyser will mark it 'demanded', so when the RHS is simplified, it'll get an ArgOf continuation. -} getUnfoldingInRuleMatch :: SimplEnv -> InScopeEnv -- When matching in RULE, we want to "look through" an unfolding -- (to see a constructor) if *rules* are on, even if *inlinings* -- are not. A notable example is DFuns, which really we want to -- match in rules like (op dfun) in gentle mode. Another example -- is 'otherwise' which we want exprIsConApp_maybe to be able to -- see very early on getUnfoldingInRuleMatch env = ISE in_scope id_unf where in_scope = seInScope env phase = sePhase env id_unf = whenActiveUnfoldingFun (isActive phase) -- When sm_rules was off we used to test for a /stable/ unfolding, -- but that seems wrong (#20941) ---------------------- activeRule :: SimplMode -> Activation -> Bool -- Nothing => No rules at all activeRule mode | not (sm_rules mode) = \_ -> False -- Rewriting is off | otherwise = isActive (sm_phase mode) {- ************************************************************************ * * preInlineUnconditionally * * ************************************************************************ preInlineUnconditionally ~~~~~~~~~~~~~~~~~~~~~~~~ @preInlineUnconditionally@ examines a bndr to see if it is used just once in a completely safe way, so that it is safe to discard the binding inline its RHS at the (unique) usage site, REGARDLESS of how big the RHS might be. If this is the case we don't simplify the RHS first, but just inline it un-simplified. This is much better than first simplifying a perhaps-huge RHS and then inlining and re-simplifying it. Indeed, it can be at least quadratically better. Consider x1 = e1 x2 = e2[x1] x3 = e3[x2] ...etc... xN = eN[xN-1] We may end up simplifying e1 N times, e2 N-1 times, e3 N-3 times etc. This can happen with cascades of functions too: f1 = \x1.e1 f2 = \xs.e2[f1] f3 = \xs.e3[f3] ...etc... THE MAIN INVARIANT is this: ---- preInlineUnconditionally invariant ----- IF preInlineUnconditionally chooses to inline x = THEN doing the inlining should not change the occurrence info for the free vars of ---------------------------------------------- For example, it's tempting to look at trivial binding like x = y and inline it unconditionally. But suppose x is used many times, but this is the unique occurrence of y. Then inlining x would change y's occurrence info, which breaks the invariant. It matters: y might have a BIG rhs, which will now be dup'd at every occurrence of x. Even RHSs labelled InlineMe aren't caught here, because there might be no benefit from inlining at the call site. [Sept 01] Don't unconditionally inline a top-level thing, because that can simply make a static thing into something built dynamically. E.g. x = (a,b) main = \s -> h x [Remember that we treat \s as a one-shot lambda.] No point in inlining x unless there is something interesting about the call site. But watch out: if you aren't careful, some useful foldr/build fusion can be lost (most notably in spectral/hartel/parstof) because the foldr didn't see the build. Doing the dynamic allocation isn't a big deal, in fact, but losing the fusion can be. But the right thing here seems to be to do a callSiteInline based on the fact that there is something interesting about the call site (it's strict). Hmm. That seems a bit fragile. Conclusion: inline top level things gaily until FinalPhase (the last phase), at which point don't. Note [pre/postInlineUnconditionally in gentle mode] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Even in gentle mode we want to do preInlineUnconditionally. The reason is that too little clean-up happens if you don't inline use-once things. Also a bit of inlining is *good* for full laziness; it can expose constant sub-expressions. Example in spectral/mandel/Mandel.hs, where the mandelset function gets a useful let-float if you inline windowToViewport However, as usual for Gentle mode, do not inline things that are inactive in the initial stages. See Note [Gentle mode]. Note [Stable unfoldings and preInlineUnconditionally] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Surprisingly, do not pre-inline-unconditionally Ids with INLINE pragmas! Example {-# INLINE f #-} f :: Eq a => a -> a f x = ... fInt :: Int -> Int fInt = f Int dEqInt ...fInt...fInt...fInt... Here f occurs just once, in the RHS of fInt. But if we inline it there it might make fInt look big, and we'll lose the opportunity to inline f at each of fInt's call sites. The INLINE pragma will only inline when the application is saturated for exactly this reason; and we don't want PreInlineUnconditionally to second-guess it. A live example is #3736. c.f. Note [Stable unfoldings and postInlineUnconditionally] NB: this only applies for INLINE things. Do /not/ switch off preInlineUnconditionally for * INLINABLE. It just says to GHC "inline this if you like". If there is a unique occurrence, we want to inline the stable unfolding, not the RHS. * NONLINE[n] just switches off inlining until phase n. We should respect that, but after phase n, just behave as usual. * NoUserInlinePrag. There is no pragma at all. This ends up on wrappers. (See #18815.) Note [Top-level bottoming Ids] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Don't inline top-level Ids that are bottoming, even if they are used just once, because FloatOut has gone to some trouble to extract them out. Inlining them won't make the program run faster! Note [Do not inline CoVars unconditionally] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Coercion variables appear inside coercions, and the RHS of a let-binding is a term (not a coercion) so we can't necessarily inline the latter in the former. -} preInlineUnconditionally :: SimplEnv -> TopLevelFlag -> InId -> InExpr -> StaticEnv -- These two go together -> Maybe SimplEnv -- Returned env has extended substitution -- Precondition: rhs satisfies the let-can-float invariant -- See Note [Core let-can-float invariant] in GHC.Core -- Reason: we don't want to inline single uses, or discard dead bindings, -- for unlifted, side-effect-ful bindings preInlineUnconditionally env top_lvl bndr rhs rhs_env | not pre_inline_unconditionally = Nothing | not active = Nothing | isTopLevel top_lvl && isDeadEndId bndr = Nothing -- Note [Top-level bottoming Ids] | isCoVar bndr = Nothing -- Note [Do not inline CoVars unconditionally] | isExitJoinId bndr = Nothing -- Note [Do not inline exit join points] -- in module Exitify | not (one_occ (idOccInfo bndr)) = Nothing | not (isStableUnfolding unf) = Just $! (extend_subst_with rhs) -- See Note [Stable unfoldings and preInlineUnconditionally] | not (isInlinePragma inline_prag) , Just inl <- maybeUnfoldingTemplate unf = Just $! (extend_subst_with inl) | otherwise = Nothing where unf = idUnfolding bndr extend_subst_with inl_rhs = extendIdSubst env bndr $! (mkContEx rhs_env inl_rhs) one_occ IAmDead = True -- Happens in ((\x.1) v) one_occ OneOcc{ occ_n_br = 1 , occ_in_lam = NotInsideLam } = isNotTopLevel top_lvl || early_phase one_occ OneOcc{ occ_n_br = 1 , occ_in_lam = IsInsideLam , occ_int_cxt = IsInteresting } = canInlineInLam rhs one_occ _ = False pre_inline_unconditionally = sePreInline env active = isActive (sePhase env) (inlinePragmaActivation inline_prag) -- See Note [pre/postInlineUnconditionally in gentle mode] inline_prag = idInlinePragma bndr -- Be very careful before inlining inside a lambda, because (a) we must not -- invalidate occurrence information, and (b) we want to avoid pushing a -- single allocation (here) into multiple allocations (inside lambda). -- Inlining a *function* with a single *saturated* call would be ok, mind you. -- || (if is_cheap && not (canInlineInLam rhs) then pprTrace "preinline" (ppr bndr <+> ppr rhs) ok else ok) -- where -- is_cheap = exprIsCheap rhs -- ok = is_cheap && int_cxt -- int_cxt The context isn't totally boring -- E.g. let f = \ab.BIG in \y. map f xs -- Don't want to substitute for f, because then we allocate -- its closure every time the \y is called -- But: let f = \ab.BIG in \y. map (f y) xs -- Now we do want to substitute for f, even though it's not -- saturated, because we're going to allocate a closure for -- (f y) every time round the loop anyhow. -- canInlineInLam => free vars of rhs are (Once in_lam) or Many, -- so substituting rhs inside a lambda doesn't change the occ info. -- Sadly, not quite the same as exprIsHNF. canInlineInLam (Lit _) = True canInlineInLam (Lam b e) = isRuntimeVar b || canInlineInLam e canInlineInLam (Tick t e) = not (tickishIsCode t) && canInlineInLam e canInlineInLam _ = False -- not ticks. Counting ticks cannot be duplicated, and non-counting -- ticks around a Lam will disappear anyway. early_phase = sePhase env /= FinalPhase -- If we don't have this early_phase test, consider -- x = length [1,2,3] -- The full laziness pass carefully floats all the cons cells to -- top level, and preInlineUnconditionally floats them all back in. -- Result is (a) static allocation replaced by dynamic allocation -- (b) many simplifier iterations because this tickles -- a related problem; only one inlining per pass -- -- On the other hand, I have seen cases where top-level fusion is -- lost if we don't inline top level thing (e.g. string constants) -- Hence the test for phase zero (which is the phase for all the final -- simplifications). Until phase zero we take no special notice of -- top level things, but then we become more leery about inlining -- them. -- -- What exactly to check in `early_phase` above is the subject of #17910. -- -- !10088 introduced an additional Simplifier iteration in LargeRecord -- because we first FloatOut `case unsafeEqualityProof of ... -> I# 2#` -- (a non-trivial value) which we immediately inline back in. -- Ideally, we'd never have inlined it because the binding turns out to -- be expandable; unfortunately we need an iteration of the Simplifier to -- attach the proper unfolding and can't check isExpandableUnfolding right -- here. -- (Nor can we check for `exprIsExpandable rhs`, because that needs to look -- at the non-existent unfolding for the `I# 2#` which is also floated out.) {- ************************************************************************ * * postInlineUnconditionally * * ************************************************************************ postInlineUnconditionally ~~~~~~~~~~~~~~~~~~~~~~~~~ @postInlineUnconditionally@ decides whether to unconditionally inline a thing based on the form of its RHS; in particular if it has a trivial RHS. If so, we can inline and discard the binding altogether. NB: a loop breaker has must_keep_binding = True and non-loop-breakers only have *forward* references. Hence, it's safe to discard the binding NOTE: This isn't our last opportunity to inline. We're at the binding site right now, and we'll get another opportunity when we get to the occurrence(s) Note that we do this unconditional inlining only for trivial RHSs. Don't inline even WHNFs inside lambdas; doing so may simply increase allocation when the function is called. This isn't the last chance; see NOTE above. NB: Even inline pragmas (e.g. IMustBeINLINEd) are ignored here Why? Because we don't even want to inline them into the RHS of constructor arguments. See NOTE above NB: At one time even NOINLINE was ignored here: if the rhs is trivial it's best to inline it anyway. We often get a=E; b=a from desugaring, with both a and b marked NOINLINE. But that seems incompatible with our new view that inlining is like a RULE, so I'm sticking to the 'active' story for now. NB: unconditional inlining of this sort can introduce ticks in places that may seem surprising; for instance, the LHS of rules. See Note [Simplifying rules] for details. -} postInlineUnconditionally :: SimplEnv -> BindContext -> InId -> OutId -- The binder (*not* a CoVar), including its unfolding -> OutExpr -> Bool -- Precondition: rhs satisfies the let-can-float invariant -- See Note [Core let-can-float invariant] in GHC.Core -- Reason: we don't want to inline single uses, or discard dead bindings, -- for unlifted, side-effect-ful bindings postInlineUnconditionally env bind_cxt old_bndr bndr rhs | not active = False | isWeakLoopBreaker occ_info = False -- If it's a loop-breaker of any kind, don't inline -- because it might be referred to "earlier" | isStableUnfolding unfolding = False -- Note [Stable unfoldings and postInlineUnconditionally] | isTopLevel (bindContextLevel bind_cxt) = False -- Note [Top level and postInlineUnconditionally] | exprIsTrivial rhs = True | BC_Join {} <- bind_cxt = False -- See point (1) of Note [Duplicating join points] -- in GHC.Core.Opt.Simplify.Iteration | otherwise = case occ_info of OneOcc { occ_in_lam = in_lam, occ_int_cxt = int_cxt, occ_n_br = n_br } -- See Note [Inline small things to avoid creating a thunk] | n_br >= 100 -> False -- See #23627 | n_br == 1, NotInsideLam <- in_lam -- One syntactic occurrence -> True -- See Note [Post-inline for single-use things] -- | is_unlifted -- Unlifted binding, hence ok-for-spec -- -> True -- hence cheap to inline probably just a primop -- -- Not a big deal either way -- No, this is wrong. {v = p +# q; x = K v}. -- Don't inline v; it'll just get floated out again. Stupid. | is_demanded -> False -- No allocation (it'll be a case expression in the end) -- so inlining duplicates code but nothing more | otherwise -> work_ok in_lam int_cxt && smallEnoughToInline uf_opts unfolding -- Multiple syntactic occurences; but lazy, and small enough to dup -- ToDo: consider discount on smallEnoughToInline if int_cxt is true IAmDead -> True -- This happens; for example, the case_bndr during case of -- known constructor: case (a,b) of x { (p,q) -> ... } -- Here x isn't mentioned in the RHS, so we don't want to -- create the (dead) let-binding let x = (a,b) in ... _ -> False where work_ok NotInsideLam _ = True work_ok IsInsideLam IsInteresting = isCheapUnfolding unfolding work_ok IsInsideLam NotInteresting = False -- NotInsideLam: outside a lambda, we want to be reasonably aggressive -- about inlining into multiple branches of case -- e.g. let x = -- in case y of { C1 -> ..x..; C2 -> ..x..; C3 -> ... } -- Inlining can be a big win if C3 is the hot-spot, even if -- the uses in C1, C2 are not 'interesting' -- An example that gets worse if you add int_cxt here is 'clausify' -- InsideLam: check for acceptable work duplication, using isCheapUnfoldign -- int_cxt to prevent us inlining inside a lambda without some -- good reason. See the notes on int_cxt in preInlineUnconditionally -- is_unlifted = isUnliftedType (idType bndr) is_demanded = isStrUsedDmd (idDemandInfo bndr) occ_info = idOccInfo old_bndr unfolding = idUnfolding bndr uf_opts = seUnfoldingOpts env phase = sePhase env active = isActive phase (idInlineActivation bndr) -- See Note [pre/postInlineUnconditionally in gentle mode] {- Note [Inline small things to avoid creating a thunk] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The point of examining occ_info here is that for *non-values* that occur outside a lambda, the call-site inliner won't have a chance (because it doesn't know that the thing only occurs once). The pre-inliner won't have gotten it either, if the thing occurs in more than one branch So the main target is things like let x = f y in case v of True -> case x of ... False -> case x of ... This is very important in practice; e.g. wheel-seive1 doubles in allocation if you miss this out. And bits of GHC itself start to allocate more. An egregious example is test perf/compiler/T14697, where GHC.Driver.CmdLine.$wprocessArgs allocated hugely more. Note [Post-inline for single-use things] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If we have let x = rhs in ...x... and `x` is used exactly once, and not inside a lambda, then we will usually preInlineUnconditinally. But we can still get this situation in postInlineUnconditionally: case K rhs of K x -> ...x.... Here we'll use `simplAuxBind` to bind `x` to (the already-simplified) `rhs`; and `x` is used exactly once. It's beneficial to inline right away; otherwise we risk creating let x = rhs in ...x... which will take another iteration of the Simplifier to eliminate. We do this in two places 1. In the full `postInlineUnconditionally` look for the special case of "one occurrence, not under a lambda", and inline unconditionally then. This is a bit risky: see Note [Avoiding simplifying repeatedly] in Simplify.Iteration. But in practice it seems to be a small win. 2. `simplAuxBind` does a kind of poor-man's `postInlineUnconditionally`. It does not need to account for many of the cases (e.g. top level) that the full `postInlineUnconditionally` does. Moreover, we don't have an OutId, which `postInlineUnconditionally` needs. I got a slight improvement in compiler performance when I added this test. Here's an example that we don't currently handle well: let f = if b then Left (\x.BIG) else Right (\y.BIG) in \y. ....case f of {...} .... Here f is used just once, and duplicating the case work is fine (exprIsCheap). But - We can't preInlineUnconditionally because that would invalidate the occ info for b. - We can't postInlineUnconditionally because the RHS is big, and that risks exponential behaviour - We can't call-site inline, because the rhs is big Alas! Note [Top level and postInlineUnconditionally] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We don't do postInlineUnconditionally for top-level things (even for ones that are trivial): * Doing so will inline top-level error expressions that have been carefully floated out by FloatOut. More generally, it might replace static allocation with dynamic. * Even for trivial expressions there's a problem. Consider {-# RULE "foo" forall (xs::[T]). reverse xs = ruggle xs #-} blah xs = reverse xs ruggle = sort In one simplifier pass we might fire the rule, getting blah xs = ruggle xs but in *that* simplifier pass we must not do postInlineUnconditionally on 'ruggle' because then we'll have an unbound occurrence of 'ruggle' If the rhs is trivial it'll be inlined by callSiteInline, and then the binding will be dead and discarded by the next use of OccurAnal * There is less point, because the main goal is to get rid of local bindings used in multiple case branches. * The inliner should inline trivial things at call sites anyway. * The Id might be exported. We could check for that separately, but since we aren't going to postInlineUnconditionally /any/ top-level bindings, we don't need to test. Note [Stable unfoldings and postInlineUnconditionally] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Do not do postInlineUnconditionally if the Id has a stable unfolding, otherwise we lose the unfolding. Example -- f has stable unfolding with rhs (e |> co) -- where 'e' is big f = e |> co Then there's a danger we'll optimise to f' = e f = f' |> co and now postInlineUnconditionally, losing the stable unfolding on f. Now f' won't inline because 'e' is too big. c.f. Note [Stable unfoldings and preInlineUnconditionally] ************************************************************************ * * Rebuilding a lambda * * ************************************************************************ -} rebuildLam :: SimplEnv -> [OutBndr] -> OutExpr -> SimplCont -> SimplM OutExpr -- (rebuildLam env bndrs body cont) -- returns expr which means the same as \bndrs. body -- -- But it tries -- a) eta reduction, if that gives a trivial expression -- b) eta expansion [only if there are some value lambdas] -- -- NB: the SimplEnv already includes the [OutBndr] in its in-scope set rebuildLam _env [] body _cont = return body rebuildLam env bndrs@(bndr:_) body cont = {-# SCC "rebuildLam" #-} try_eta bndrs body where rec_ids = seRecIds env in_scope = getInScope env -- Includes 'bndrs' mb_rhs = contIsRhs cont -- See Note [Eta reduction based on evaluation context] eval_sd = contEvalContext cont -- NB: cont is never ApplyToVal, because beta-reduction would -- have happened. So contEvalContext can panic on ApplyToVal. try_eta :: [OutBndr] -> OutExpr -> SimplM OutExpr try_eta bndrs body | -- Try eta reduction seDoEtaReduction env , Just etad_lam <- tryEtaReduce rec_ids bndrs body eval_sd = do { tick (EtaReduction bndr) ; return etad_lam } | -- Try eta expansion Nothing <- mb_rhs -- See Note [Eta expanding lambdas] , seEtaExpand env , any isRuntimeVar bndrs -- Only when there is at least one value lambda already , Just body_arity <- exprEtaExpandArity (seArityOpts env) body = do { tick (EtaExpansion bndr) ; let body' = etaExpandAT in_scope body_arity body ; traceSmpl "eta expand" (vcat [text "before" <+> ppr body , text "after" <+> ppr body']) -- NB: body' might have an outer Cast, but if so -- mk_lams will pull it further out, past 'bndrs' to the top ; return (mk_lams bndrs body') } | otherwise = return (mk_lams bndrs body) mk_lams :: [OutBndr] -> OutExpr -> OutExpr -- mk_lams pulls casts and ticks to the top mk_lams bndrs body@(Lam {}) = mk_lams (bndrs ++ bndrs1) body1 where (bndrs1, body1) = collectBinders body mk_lams bndrs (Tick t expr) | tickishFloatable t = mkTick t (mk_lams bndrs expr) mk_lams bndrs (Cast body co) | -- Note [Casts and lambdas] seCastSwizzle env , not (any bad bndrs) = mkCast (mk_lams bndrs body) (mkPiCos Representational bndrs co) where co_vars = tyCoVarsOfCo co bad bndr = isCoVar bndr && bndr `elemVarSet` co_vars mk_lams bndrs body = mkLams bndrs body {- Note [Eta expanding lambdas] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In general we *do* want to eta-expand lambdas. Consider f (\x -> case x of (a,b) -> \s -> blah) where 's' is a state token, and hence can be eta expanded. This showed up in the code for GHc.IO.Handle.Text.hPutChar, a rather important function! The eta-expansion will never happen unless we do it now. (Well, it's possible that CorePrep will do it, but CorePrep only has a half-baked eta-expander that can't deal with casts. So it's much better to do it here.) However, when the lambda is let-bound, as the RHS of a let, we have a better eta-expander (in the form of tryEtaExpandRhs), so we don't bother to try expansion in mkLam in that case; hence the contIsRhs guard. Note [Casts and lambdas] ~~~~~~~~~~~~~~~~~~~~~~~~ Consider (\(x:tx). (\(y:ty). e) `cast` co) We float the cast out, thus (\(x:tx) (y:ty). e) `cast` (tx -> co) We do this for at least three reasons: 1. There is a danger here that the two lambdas look separated, and the full laziness pass might float an expression to between the two. 2. The occurrence analyser will mark x as InsideLam if the Lam nodes are separated (see the Lam case of occAnal). By floating the cast out we put the two Lams together, so x can get a vanilla Once annotation. If this lambda is the RHS of a let, which we inline, we can do preInlineUnconditionally on that x=arg binding. With the InsideLam OccInfo, we can't do that, which results in an extra iteration of the Simplifier. 3. It may cancel with another cast. E.g (\x. e |> co1) |> co2 If we float out co1 it might cancel with co2. Similarly let f = (\x. e |> co1) in ... If we float out co1, and then do cast worker/wrapper, we get let f1 = \x.e; f = f1 |> co1 in ... and now we can inline f, hoping that co1 may cancel at a call site. TL;DR: put the lambdas together if at all possible. In general, here's the transformation: \x. e `cast` co ===> (\x. e) `cast` (tx -> co) /\a. e `cast` co ===> (/\a. e) `cast` (/\a. co) /\g. e `cast` co ===> (/\g. e) `cast` (/\g. co) (if not (g `in` co)) We call this "cast swizzling". It is controlled by sm_cast_swizzle. See also Note [Cast swizzling on rule LHSs] Wrinkles * Notice that it works regardless of 'e'. Originally it worked only if 'e' was itself a lambda, but in some cases that resulted in fruitless iteration in the simplifier. A good example was when compiling Text.ParserCombinators.ReadPrec, where we had a definition like (\x. Get `cast` g) where Get is a constructor with nonzero arity. Then mkLam eta-expanded the Get, and the next iteration eta-reduced it, and then eta-expanded it again. * Note also the side condition for the case of coercion binders, namely not (any bad bndrs). It does not make sense to transform /\g. e `cast` g ==> (/\g.e) `cast` (/\g.g) because the latter is not well-kinded. ************************************************************************ * * Eta expansion * * ************************************************************************ -} tryEtaExpandRhs :: SimplEnv -> BindContext -> OutId -> OutExpr -> SimplM (ArityType, OutExpr) -- See Note [Eta-expanding at let bindings] tryEtaExpandRhs env bind_cxt bndr rhs | seEtaExpand env -- If Eta-expansion is on , wantEtaExpansion rhs -- and we'd like to eta-expand e , do_eta_expand -- and e's manifest arity is lower than -- what it could be -- (never true for join points) = -- Do eta-expansion. assertPpr( not (isJoinBC bind_cxt) ) (ppr bndr) $ -- assert: this never happens for join points; see GHC.Core.Opt.Arity -- Note [Do not eta-expand join points] do { tick (EtaExpansion bndr) ; return (arity_type, etaExpandAT in_scope arity_type rhs) } | otherwise = return (arity_type, rhs) where in_scope = getInScope env arity_opts = seArityOpts env is_rec = bindContextRec bind_cxt (do_eta_expand, arity_type) = findRhsArity arity_opts is_rec bndr rhs wantEtaExpansion :: CoreExpr -> Bool -- Mostly True; but False of PAPs which will immediately eta-reduce again -- See Note [Which RHSs do we eta-expand?] wantEtaExpansion (Cast e _) = wantEtaExpansion e wantEtaExpansion (Tick _ e) = wantEtaExpansion e wantEtaExpansion (Lam b e) | isTyVar b = wantEtaExpansion e wantEtaExpansion (App e _) = wantEtaExpansion e wantEtaExpansion (Var {}) = False wantEtaExpansion (Lit {}) = False wantEtaExpansion _ = True {- Note [Eta-expanding at let bindings] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We now eta expand at let-bindings, which is where the payoff comes. The most significant thing is that we can do a simple arity analysis (in GHC.Core.Opt.Arity.findRhsArity), which we can't do for free-floating lambdas One useful consequence of not eta-expanding lambdas is this example: genMap :: C a => ... {-# INLINE genMap #-} genMap f xs = ... myMap :: D a => ... {-# INLINE myMap #-} myMap = genMap Notice that 'genMap' should only inline if applied to two arguments. In the stable unfolding for myMap we'll have the unfolding (\d -> genMap Int (..d..)) We do not want to eta-expand to (\d f xs -> genMap Int (..d..) f xs) because then 'genMap' will inline, and it really shouldn't: at least as far as the programmer is concerned, it's not applied to two arguments! Note [Which RHSs do we eta-expand?] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We don't eta-expand: * Trivial RHSs, e.g. f = g If we eta expand do f = \x. g x we'll just eta-reduce again, and so on; so the simplifier never terminates. * PAPs: see Note [Do not eta-expand PAPs] What about things like this? f = case y of p -> \x -> blah Here we do eta-expand. This is a change (Jun 20), but if we have really decided that f has arity 1, then putting that lambda at the top seems like a Good idea. Note [Do not eta-expand PAPs] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We used to have old_arity = manifestArity rhs, which meant that we would eta-expand even PAPs. But this gives no particular advantage, and can lead to a massive blow-up in code size, exhibited by #9020. Suppose we have a PAP foo :: IO () foo = returnIO () Then we can eta-expand to foo = (\eta. (returnIO () |> sym g) eta) |> g where g :: IO () ~ State# RealWorld -> (# State# RealWorld, () #) But there is really no point in doing this, and it generates masses of coercions and whatnot that eventually disappear again. For T9020, GHC allocated 6.6G before, and 0.8G afterwards; and residency dropped from 1.8G to 45M. Moreover, if we eta expand f = g d ==> f = \x. g d x that might in turn make g inline (if it has an inline pragma), which we might not want. After all, INLINE pragmas say "inline only when saturated" so we don't want to be too gung-ho about saturating! But note that this won't eta-expand, say f = \g -> map g Does it matter not eta-expanding such functions? I'm not sure. Perhaps strictness analysis will have less to bite on? ************************************************************************ * * \subsection{Floating lets out of big lambdas} * * ************************************************************************ Note [Floating and type abstraction] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider this: x = /\a. C e1 e2 We'd like to float this to y1 = /\a. e1 y2 = /\a. e2 x = /\a. C (y1 a) (y2 a) for the usual reasons: we want to inline x rather vigorously. You may think that this kind of thing is rare. But in some programs it is common. For example, if you do closure conversion you might get: data a :-> b = forall e. (e -> a -> b) :$ e f_cc :: forall a. a :-> a f_cc = /\a. (\e. id a) :$ () Now we really want to inline that f_cc thing so that the construction of the closure goes away. So I have elaborated simplLazyBind to understand right-hand sides that look like /\ a1..an. body and treat them specially. The real work is done in GHC.Core.Opt.Simplify.Utils.abstractFloats, but there is quite a bit of plumbing in simplLazyBind as well. The same transformation is good when there are lets in the body: /\abc -> let(rec) x = e in b ==> let(rec) x' = /\abc -> let x = x' a b c in e in /\abc -> let x = x' a b c in b This is good because it can turn things like: let f = /\a -> letrec g = ... g ... in g into letrec g' = /\a -> ... g' a ... in let f = /\ a -> g' a which is better. In effect, it means that big lambdas don't impede let-floating. This optimisation is CRUCIAL in eliminating the junk introduced by desugaring mutually recursive definitions. Don't eliminate it lightly! [May 1999] If we do this transformation *regardless* then we can end up with some pretty silly stuff. For example, let st = /\ s -> let { x1=r1 ; x2=r2 } in ... in .. becomes let y1 = /\s -> r1 y2 = /\s -> r2 st = /\s -> ...[y1 s/x1, y2 s/x2] in .. Unless the "..." is a WHNF there is really no point in doing this. Indeed it can make things worse. Suppose x1 is used strictly, and is of the form x1* = case f y of { (a,b) -> e } If we abstract this wrt the tyvar we then can't do the case inline as we would normally do. That's why the whole transformation is part of the same process that floats let-bindings and constructor arguments out of RHSs. In particular, it is guarded by the doFloatFromRhs call in simplLazyBind. Note [Which type variables to abstract over] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Abstract only over the type variables free in the rhs wrt which the new binding is abstracted. Several points worth noting (AB1) The naive approach of abstracting wrt the tyvars free in the Id's /type/ fails. Consider: /\ a b -> let t :: (a,b) = (e1, e2) x :: a = fst t in ... Here, b isn't free in x's type, but we must nevertheless abstract wrt b as well, because t's type mentions b. Since t is floated too, we'd end up with the bogus: poly_t = /\ a b -> (e1, e2) poly_x = /\ a -> fst (poly_t a *b*) (AB2) We must do closeOverKinds. Example (#10934): f = /\k (f:k->*) (a:k). let t = AccFailure @ (f a) in ... Here we want to float 't', but we must remember to abstract over 'k' as well, even though it is not explicitly mentioned in the RHS, otherwise we get t = /\ (f:k->*) (a:k). AccFailure @ (f a) which is obviously bogus. (AB3) We get the variables to abstract over by filtering down the the main_tvs for the original function, picking only ones mentioned in the abstracted body. This means: - they are automatically in dependency order, because main_tvs is - there is no issue about non-determinism - we don't gratuitously change order, which may help (in a tiny way) with CSE and/or the compiler-debugging experience (AB4) For a recursive group, it's a bit of a pain to work out the minimal set of tyvars over which to abstract: /\ a b c. let x = ...a... in letrec { p = ...x...q... q = .....p...b... } in ... Since 'x' is abstracted over 'a', the {p,q} group must be abstracted over 'a' (because x is replaced by (poly_x a)) as well as 'b'. Remember this bizarre case too: x::a = x Here, we must abstract 'x' over 'a'. Why is it worth doing this? Partly tidiness; and partly #22459 which showed that it's harder to do polymorphic specialisation well if there are dictionaries abstracted over unnecessary type variables. See Note [Weird special case for SpecDict] in GHC.Core.Opt.Specialise (AB5) We do dependency analysis on recursive groups prior to determining which variables to abstract over. This is useful, because ANFisation in prepareBinding may float out values out of a complex recursive binding, e.g., letrec { xs = g @a "blah"# ((:) 1 []) xs } in ... ==> { prepareBinding } letrec { foo = "blah"# bar = [42] xs = g @a foo bar xs } in ... and we don't want to abstract foo and bar over @a. (Why is it OK to float the unlifted `foo` there? See Note [Core top-level string literals] in GHC.Core; it is controlled by GHC.Core.Opt.Simplify.Env.unitLetFloat.) It is also necessary to do dependency analysis, because otherwise (in #24551) we might get `foo = \@_ -> "missing"#` at the top-level, and that triggers a CoreLint error because `foo` is *not* manifestly a literal string. -} abstractFloats :: UnfoldingOpts -> TopLevelFlag -> [OutTyVar] -> SimplFloats -> OutExpr -> SimplM ([OutBind], OutExpr) abstractFloats uf_opts top_lvl main_tvs floats body = assert (notNull body_floats) $ assert (isNilOL (sfJoinFloats floats)) $ do { let sccs = concatMap to_sccs body_floats ; (subst, float_binds) <- mapAccumLM abstract empty_subst sccs ; return (float_binds, GHC.Core.Subst.substExpr subst body) } where is_top_lvl = isTopLevel top_lvl body_floats = letFloatBinds (sfLetFloats floats) empty_subst = GHC.Core.Subst.mkEmptySubst (sfInScope floats) -- See wrinkle (AB5) in Note [Which type variables to abstract over] -- for why we need to re-do dependency analysis to_sccs :: OutBind -> [SCC (Id, CoreExpr, VarSet)] to_sccs (NonRec id e) = [AcyclicSCC (id, e, emptyVarSet)] -- emptyVarSet: abstract doesn't need it to_sccs (Rec prs) = sccs where (ids,rhss) = unzip prs sccs = depAnal (\(id,_rhs,_fvs) -> [getName id]) (\(_id,_rhs,fvs) -> nonDetStrictFoldVarSet ((:) . getName) [] fvs) -- Wrinkle (AB3) (zip3 ids rhss (map exprFreeVars rhss)) abstract :: GHC.Core.Subst.Subst -> SCC (Id, CoreExpr, VarSet) -> SimplM (GHC.Core.Subst.Subst, OutBind) abstract subst (AcyclicSCC (id, rhs, _empty_var_set)) = do { (poly_id1, poly_app) <- mk_poly1 tvs_here id ; let (poly_id2, poly_rhs) = mk_poly2 poly_id1 tvs_here rhs' !subst' = GHC.Core.Subst.extendIdSubst subst id poly_app ; return (subst', NonRec poly_id2 poly_rhs) } where rhs' = GHC.Core.Subst.substExpr subst rhs -- tvs_here: see Note [Which type variables to abstract over] tvs_here = choose_tvs (exprSomeFreeVars isTyVar rhs') abstract subst (CyclicSCC trpls) = do { (poly_ids, poly_apps) <- mapAndUnzipM (mk_poly1 tvs_here) ids ; let subst' = GHC.Core.Subst.extendSubstList subst (ids `zip` poly_apps) poly_pairs = [ mk_poly2 poly_id tvs_here rhs' | (poly_id, rhs) <- poly_ids `zip` rhss , let rhs' = GHC.Core.Subst.substExpr subst' rhs ] ; return (subst', Rec poly_pairs) } where (ids,rhss,_fvss) = unzip3 trpls -- tvs_here: see Note [Which type variables to abstract over] tvs_here = choose_tvs (mapUnionVarSet get_bind_fvs trpls) -- See wrinkle (AB4) in Note [Which type variables to abstract over] get_bind_fvs (id,_rhs,rhs_fvs) = tyCoVarsOfType (idType id) `unionVarSet` get_rec_rhs_tvs rhs_fvs get_rec_rhs_tvs rhs_fvs = nonDetStrictFoldVarSet get_tvs emptyVarSet rhs_fvs -- nonDet is safe because of wrinkle (AB3) get_tvs :: Var -> VarSet -> VarSet get_tvs var free_tvs | isTyVar var -- CoVars have been substituted away = extendVarSet free_tvs var | isCoVar var -- CoVars can be free in the RHS, but they are never let-bound; = free_tvs -- Do not call lookupIdSubst_maybe, though (#23426) -- because it has a non-CoVar precondition | Just poly_app <- GHC.Core.Subst.lookupIdSubst_maybe subst var = -- 'var' is like 'x' in (AB4) exprSomeFreeVars isTyVar poly_app `unionVarSet` free_tvs | otherwise = free_tvs choose_tvs free_tvs = filter (`elemVarSet` all_free_tvs) main_tvs -- (AB3) where all_free_tvs = closeOverKinds free_tvs -- (AB2) mk_poly1 :: [TyVar] -> Id -> SimplM (Id, CoreExpr) mk_poly1 tvs_here var = do { uniq <- getUniqueM ; let poly_name = setNameUnique (idName var) uniq -- Keep same name poly_ty = mkInfForAllTys tvs_here (idType var) -- But new type of course poly_id = transferPolyIdInfo var tvs_here $ -- Note [transferPolyIdInfo] in GHC.Types.Id mkLocalId poly_name (idMult var) poly_ty ; return (poly_id, mkTyApps (Var poly_id) (mkTyVarTys tvs_here)) } -- In the olden days, it was crucial to copy the occInfo of the original var, -- because we were looking at occurrence-analysed but as yet unsimplified code! -- In particular, we mustn't lose the loop breakers. BUT NOW we are looking -- at already simplified code, so it doesn't matter -- -- It's even right to retain single-occurrence or dead-var info: -- Suppose we started with /\a -> let x = E in B -- where x occurs once in B. Then we transform to: -- let x' = /\a -> E in /\a -> let x* = x' a in B -- where x* has an INLINE prag on it. Now, once x* is inlined, -- the occurrences of x' will be just the occurrences originally -- pinned on x. mk_poly2 :: Id -> [TyVar] -> CoreExpr -> (Id, CoreExpr) mk_poly2 poly_id tvs_here rhs = (poly_id `setIdUnfolding` unf, poly_rhs) where poly_rhs = mkLams tvs_here rhs unf = mkUnfolding uf_opts VanillaSrc is_top_lvl False False poly_rhs Nothing -- We want the unfolding. Consider -- let -- x = /\a. let y = ... in Just y -- in body -- Then we float the y-binding out (via abstractFloats and addPolyBind) -- but 'x' may well then be inlined in 'body' in which case we'd like the -- opportunity to inline 'y' too. {- Note [Abstract over coercions] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If a coercion variable (g :: a ~ Int) is free in the RHS, then so is the type variable a. Rather than sort this mess out, we simply bale out and abstract wrt all the type variables if any of them are coercion variables. Historical note: if you use let-bindings instead of a substitution, beware of this: -- Suppose we start with: -- -- x = /\ a -> let g = G in E -- -- Then we'll float to get -- -- x = let poly_g = /\ a -> G -- in /\ a -> let g = poly_g a in E -- -- But now the occurrence analyser will see just one occurrence -- of poly_g, not inside a lambda, so the simplifier will -- PreInlineUnconditionally poly_g back into g! Badk to square 1! -- (I used to think that the "don't inline lone occurrences" stuff -- would stop this happening, but since it's the *only* occurrence, -- PreInlineUnconditionally kicks in first!) -- -- Solution: put an INLINE note on g's RHS, so that poly_g seems -- to appear many times. (NB: mkInlineMe eliminates -- such notes on trivial RHSs, so do it manually.) ************************************************************************ * * prepareAlts * * ************************************************************************ prepareAlts tries these things: 1. filterAlts: eliminate alternatives that cannot match, including the DEFAULT alternative. Here "cannot match" includes knowledge from GADTs 2. refineDefaultAlt: if the DEFAULT alternative can match only one possible constructor, then make that constructor explicit. e.g. case e of x { DEFAULT -> rhs } ===> case e of x { (a,b) -> rhs } where the type is a single constructor type. This gives better code when rhs also scrutinises x or e. See GHC.Core.Utils Note [Refine DEFAULT case alternatives] 3. combineIdenticalAlts: combine identical alternatives into a DEFAULT. See CoreUtils Note [Combine identical alternatives], which also says why we do this on InAlts not on OutAlts 4. Returns a list of the constructors that cannot holds in the DEFAULT alternative (if there is one) It's a good idea to do this stuff before simplifying the alternatives, to avoid simplifying alternatives we know can't happen, and to come up with the list of constructors that are handled, to put into the IdInfo of the case binder, for use when simplifying the alternatives. Eliminating the default alternative in (1) isn't so obvious, but it can happen: data Colour = Red | Green | Blue f x = case x of Red -> .. Green -> .. DEFAULT -> h x h y = case y of Blue -> .. DEFAULT -> [ case y of ... ] If we inline h into f, the default case of the inlined h can't happen. If we don't notice this, we may end up filtering out *all* the cases of the inner case y, which give us nowhere to go! Note [Shadowing in prepareAlts] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Note that we pass case_bndr::InId to prepareAlts; an /InId/, not an /OutId/. This is vital, because `refineDefaultAlt` uses `tys` to build a new /InAlt/. If you pass an OutId, we'll end up applying the substitution twice: disaster (#23012). However this does mean that filling in the default alt might be delayed by a simplifier cycle, because an InId has less info than an OutId. Test simplCore/should_compile/simpl013 apparently shows this up, although I'm not sure exactly how.. -} prepareAlts :: OutExpr -> InId -> [InAlt] -> SimplM ([AltCon], [InAlt]) -- The returned alternatives can be empty, none are possible -- -- Note that case_bndr is an InId; see Note [Shadowing in prepareAlts] prepareAlts scrut case_bndr alts | Just (tc, tys) <- splitTyConApp_maybe (idType case_bndr) = do { us <- getUniquesM ; let (idcs1, alts1) = filterAlts tc tys imposs_cons alts (yes2, alts2) = refineDefaultAlt us (idMult case_bndr) tc tys idcs1 alts1 -- The multiplicity on case_bndr's is the multiplicity of the -- case expression The newly introduced patterns in -- refineDefaultAlt must be scaled by this multiplicity (yes3, idcs3, alts3) = combineIdenticalAlts idcs1 alts2 -- "idcs" stands for "impossible default data constructors" -- i.e. the constructors that can't match the default case ; when yes2 $ tick (FillInCaseDefault case_bndr) ; when yes3 $ tick (AltMerge case_bndr) ; return (idcs3, alts3) } | otherwise -- Not a data type, so nothing interesting happens = return ([], alts) where imposs_cons = case scrut of Var v -> otherCons (idUnfolding v) _ -> [] {- Note [Merging nested cases] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The basic case-merge stuff is described in Note [Merge Nested Cases] in GHC.Core.Utils We do it here in `prepareAlts` (on InAlts) rather than after (on OutAlts) for two reasons: * It "belongs" here with `filterAlts`, `refineDefaultAlt` and `combineIdenticalAlts`. * In test perf/compiler/T22428 I found that I was getting extra Simplifer iterations: 1. Create a join point 2. That join point gets inlined at all call sites, so it is now dead. 3. Case-merge happened, but left behind some trivial bindings (see `mergeCaseAlts`) 4. Get rid of the trivial bindings The first two seem reasonable. It's imaginable that we could do better on (3), by making case-merge join-point-aware, but it's not trivial. But the fourth is just stupid. Rather than always do an extra iteration, it's better to do the transformation on the input-end of teh Simplifier. -} {- ************************************************************************ * * mkCase * * ************************************************************************ mkCase tries these things * Note [Eliminate Identity Case] * Note [Scrutinee Constant Folding] Note [Eliminate Identity Case] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ case e of ===> e True -> True; False -> False and similar friends. Note [Scrutinee Constant Folding] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ case x op# k# of _ { ===> case x of _ { a1# -> e1 (a1# inv_op# k#) -> e1 a2# -> e2 (a2# inv_op# k#) -> e2 ... ... DEFAULT -> ed DEFAULT -> ed where (x op# k#) inv_op# k# == x And similarly for commuted arguments and for some unary operations. The purpose of this transformation is not only to avoid an arithmetic operation at runtime but to allow other transformations to apply in cascade. Example with the "Merge Nested Cases" optimization (from #12877): main = case t of t0 0## -> ... DEFAULT -> case t0 `minusWord#` 1## of t1 0## -> ... DEFAULT -> case t1 `minusWord#` 1## of t2 0## -> ... DEFAULT -> case t2 `minusWord#` 1## of _ 0## -> ... DEFAULT -> ... becomes: main = case t of _ 0## -> ... 1## -> ... 2## -> ... 3## -> ... DEFAULT -> ... There are some wrinkles. Wrinkle 1: Do not apply caseRules if there is just a single DEFAULT alternative, unless the case-binder is dead. Example: case e +# 3# of b { DEFAULT -> rhs } If we applied the transformation here we would (stupidly) get case e of b' { DEFAULT -> let b = b' +# 3# in rhs } and now the process may repeat, because that let will really be a case. But if the original case binder b is dead, we instead get case e of b' { DEFAULT -> rhs } and there is no such problem. See Note [Example of case-merging and caseRules] for a compelling example of why this dead-binder business can be really important. Wrinkle 2: The type of the scrutinee might change. E.g. case tagToEnum (x :: Int#) of (b::Bool) False -> e1 True -> e2 ==> case x of (b'::Int#) DEFAULT -> e1 1# -> e2 Wrinkle 3: The case binder may be used in the right hand sides, so we need to make a local binding for it, if it is alive. e.g. case e +# 10# of b DEFAULT -> blah...b... 44# -> blah2...b... ===> case e of b' DEFAULT -> let b = b' +# 10# in blah...b... 34# -> let b = 44# in blah2...b... Note that in the non-DEFAULT cases we know what to bind 'b' to, whereas in the DEFAULT case we must reconstruct the original value. But NB: we use b'; we do not duplicate 'e'. Wrinkle 4: In dataToTag we might need to make up some fake binders; see Note [caseRules for dataToTag] in GHC.Core.Opt.ConstantFold Note [Example of case-merging and caseRules] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The case-transformation rules are quite powerful. Here's a subtle example from #22375. We start with data T = A | B | ... deriving Eq f :: T -> String f x = if | x==A -> "one" | x==B -> "two" | ... In Core after a bit of simplification we get: f x = case dataToTagLarge# x of a# { _DEFAULT -> case a# of _DEFAULT -> case dataToTagLarge# x of b# { _DEFAULT -> case b# of _DEFAULT -> ... 1# -> "two" } 0# -> "one" } Now consider what mkCase does to these case expressions. The case-merge transformation Note [Merge Nested Cases] does this (affecting both pairs of cases): f x = case dataToTagLarge# x of a# { _DEFAULT -> case dataToTagLarge# x of b# { _DEFAULT -> ... 1# -> "two" } 0# -> "one" } Now Note [caseRules for dataToTag] does its work, again on both dataToTagLarge# cases: f x = case x of x1 { _DEFAULT -> case dataToTagLarge# x1 of a# { _DEFAULT -> case x of x2 { _DEFAULT -> case dataToTagLarge# x2 of b# { _DEFAULT -> ... } B -> "two" }} A -> "one" } The new dataToTagLarge# calls come from the "reconstruct scrutinee" part of caseRules (note that a# and b# were not dead in the original program before all this merging). However, since a# and b# /are/ in fact dead in the resulting program, we are left with redundant dataToTagLarge# calls. But they are easily eliminated by doing caseRules again, in the next Simplifier iteration, this time noticing that a# and b# are dead. Hence the "dead-binder" sub-case of Wrinkle 1 of Note [Scrutinee Constant Folding] above. Once we do this we get f x = case x of x1 { _DEFAULT -> case x1 of x2 { _DEFAULT -> case x1 of x2 { _DEFAULT -> case x2 of x3 { _DEFAULT -> ... } B -> "two" }} A -> "one" } and now we can do case-merge again, getting the desired f x = case x of A -> "one" B -> "two" ... -} mkCase, mkCase1, mkCase2, mkCase3 :: SimplMode -> OutExpr -> OutId -> OutType -> [OutAlt] -- Alternatives in standard (increasing) order -> SimplM OutExpr -------------------------------------------------- -- 1. Merge Nested Cases -- See Note [Merge Nested Cases] -- Note [Example of case-merging and caseRules] -- Note [Cascading case merge] -------------------------------------------------- mkCase mode scrut outer_bndr alts_ty alts | sm_case_merge mode , Just (joins, alts') <- mergeCaseAlts outer_bndr alts = do { tick (CaseMerge outer_bndr) ; case_expr <- mkCase1 mode scrut outer_bndr alts_ty alts' ; return (mkLets joins case_expr) } -- mkCase1: don't call mkCase recursively! -- Firstly, there's no point, because inner alts have already had -- mkCase applied to them, so they won't have a case in their default -- Secondly, if you do, you get an infinite loop, because the bindCaseBndr -- in munge_rhs may put a case into the DEFAULT branch! | otherwise = mkCase1 mode scrut outer_bndr alts_ty alts -------------------------------------------------- -- 2. Eliminate Identity Case -- See Note [Eliminate Identity Case] -------------------------------------------------- mkCase1 _mode scrut case_bndr _ alts@(Alt _ _ rhs1 : alts') -- Identity case | all identity_alt alts = do { tick (CaseIdentity case_bndr) ; return (mkTicks ticks $ re_cast scrut rhs1) } where ticks = concatMap (\(Alt _ _ rhs) -> stripTicksT tickishFloatable rhs) alts' identity_alt (Alt con args rhs) = check_eq rhs con args check_eq (Cast rhs co) con args -- See Note [RHS casts] = not (any (`elemVarSet` tyCoVarsOfCo co) args) && check_eq rhs con args check_eq (Tick t e) alt args = tickishFloatable t && check_eq e alt args check_eq (Lit lit) (LitAlt lit') _ = lit == lit' check_eq (Var v) _ _ | v == case_bndr = True check_eq (Var v) (DataAlt con) args | null arg_tys, null args = v == dataConWorkId con -- Optimisation only check_eq rhs (DataAlt con) args = cheapEqExpr' tickishFloatable rhs $ mkConApp2 con arg_tys args check_eq _ _ _ = False arg_tys = tyConAppArgs (idType case_bndr) -- Note [RHS casts] -- ~~~~~~~~~~~~~~~~ -- We've seen this: -- case e of x { _ -> x `cast` c } -- And we definitely want to eliminate this case, to give -- e `cast` c -- So we throw away the cast from the RHS, and reconstruct -- it at the other end. All the RHS casts must be the same -- if (all identity_alt alts) holds. -- -- Don't worry about nested casts, because the simplifier combines them re_cast scrut (Cast rhs co) = Cast (re_cast scrut rhs) co re_cast scrut _ = scrut mkCase1 mode scrut bndr alts_ty alts = mkCase2 mode scrut bndr alts_ty alts -------------------------------------------------- -- 2. Scrutinee Constant Folding -- See Note [Scrutinee Constant Folding] -------------------------------------------------- mkCase2 mode scrut bndr alts_ty alts | -- See Note [Scrutinee Constant Folding] case alts of [Alt DEFAULT _ _] -> isDeadBinder bndr -- see wrinkle 1 _ -> True , sm_case_folding mode , Just (scrut', tx_con, mk_orig) <- caseRules (smPlatform mode) scrut = do { bndr' <- newId (fsLit "lwild") ManyTy (exprType scrut') ; alts' <- mapMaybeM (tx_alt tx_con mk_orig bndr') alts -- mapMaybeM: discard unreachable alternatives -- See Note [Unreachable caseRules alternatives] -- in GHC.Core.Opt.ConstantFold ; mkCase3 mode scrut' bndr' alts_ty $ add_default (re_sort alts') } | otherwise = mkCase3 mode scrut bndr alts_ty alts where -- We need to keep the correct association between the scrutinee and its -- binder if the latter isn't dead. Hence we wrap rhs of alternatives with -- "let bndr = ... in": -- -- case v + 10 of y =====> case v of y' -- 20 -> e1 10 -> let y = 20 in e1 -- DEFAULT -> e2 DEFAULT -> let y = y' + 10 in e2 -- -- This wrapping is done in tx_alt; we use mk_orig, returned by caseRules, -- to construct an expression equivalent to the original one, for use -- in the DEFAULT case tx_alt :: (AltCon -> Maybe AltCon) -> (Id -> CoreExpr) -> Id -> CoreAlt -> SimplM (Maybe CoreAlt) tx_alt tx_con mk_orig new_bndr (Alt con bs rhs) = case tx_con con of Nothing -> return Nothing Just con' -> do { bs' <- mk_new_bndrs new_bndr con' ; return (Just (Alt con' bs' rhs')) } where rhs' | isDeadBinder bndr = rhs | otherwise = bindNonRec bndr orig_val rhs orig_val = case con of DEFAULT -> mk_orig new_bndr LitAlt l -> Lit l DataAlt dc -> mkConApp2 dc (tyConAppArgs (idType bndr)) bs mk_new_bndrs new_bndr (DataAlt dc) | not (isNullaryRepDataCon dc) = -- For non-nullary data cons we must invent some fake binders -- See Note [caseRules for dataToTag] in GHC.Core.Opt.ConstantFold do { us <- getUniquesM ; let (ex_tvs, arg_ids) = dataConRepInstPat us (idMult new_bndr) dc (tyConAppArgs (idType new_bndr)) ; return (ex_tvs ++ arg_ids) } mk_new_bndrs _ _ = return [] re_sort :: [CoreAlt] -> [CoreAlt] -- Sort the alternatives to re-establish -- GHC.Core Note [Case expression invariants] re_sort alts = sortBy cmpAlt alts add_default :: [CoreAlt] -> [CoreAlt] -- See Note [Literal cases] add_default (Alt (LitAlt {}) bs rhs : alts) = Alt DEFAULT bs rhs : alts add_default alts = alts {- Note [Literal cases] ~~~~~~~~~~~~~~~~~~~~~~~ If we have case tagToEnum (a ># b) of False -> e1 True -> e2 then caseRules for TagToEnum will turn it into case tagToEnum (a ># b) of 0# -> e1 1# -> e2 Since the case is exhaustive (all cases are) we can convert it to case tagToEnum (a ># b) of DEFAULT -> e1 1# -> e2 This may generate slightly better code (although it should not, since all cases are exhaustive) and/or optimise better. I'm not certain that it's necessary, but currently we do make this change. We do it here, NOT in the TagToEnum rules (see "Beware" in Note [caseRules for tagToEnum] in GHC.Core.Opt.ConstantFold) -} -------------------------------------------------- -- Catch-all -------------------------------------------------- mkCase3 _mode scrut bndr alts_ty alts = return (Case scrut bndr alts_ty alts) -- See Note [Exitification] and Note [Do not inline exit join points] in -- GHC.Core.Opt.Exitify -- This lives here (and not in Id) because occurrence info is only valid on -- InIds, so it's crucial that isExitJoinId is only called on freshly -- occ-analysed code. It's not a generic function you can call anywhere. isExitJoinId :: Var -> Bool isExitJoinId id = isJoinId id && isOneOcc (idOccInfo id) && occ_in_lam (idOccInfo id) == IsInsideLam {- Note [Dead binders] ~~~~~~~~~~~~~~~~~~~~ Note that dead-ness is maintained by the simplifier, so that it is accurate after simplification as well as before. -} ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/Opt/Stats.hs0000644000000000000000000003150407346545000020775 0ustar0000000000000000{- (c) The AQUA Project, Glasgow University, 1993-1998 -} {-# LANGUAGE DerivingVia #-} {-# OPTIONS_GHC -Wno-incomplete-record-updates #-} module GHC.Core.Opt.Stats ( SimplCount, doSimplTick, doFreeSimplTick, simplCountN, pprSimplCount, plusSimplCount, zeroSimplCount, isZeroSimplCount, hasDetailedCounts, Tick(..) ) where import GHC.Prelude import GHC.Types.Var import GHC.Types.Error import GHC.Utils.Outputable as Outputable import GHC.Data.FastString import Data.List (sortOn) import Data.List.NonEmpty (NonEmpty(..)) import qualified Data.List.NonEmpty as NE import Data.Ord import Data.Map (Map) import qualified Data.Map as Map import qualified Data.Map.Strict as MapStrict import GHC.Utils.Panic (throwGhcException, GhcException(..)) getVerboseSimplStats :: (Bool -> SDoc) -> SDoc getVerboseSimplStats = getPprDebug -- For now, anyway zeroSimplCount :: Bool -- ^ -ddump-simpl-stats -> SimplCount isZeroSimplCount :: SimplCount -> Bool hasDetailedCounts :: SimplCount -> Bool pprSimplCount :: SimplCount -> SDoc doSimplTick :: Int -- ^ History size of the elaborate counter -> Tick -> SimplCount -> SimplCount doFreeSimplTick :: Tick -> SimplCount -> SimplCount plusSimplCount :: SimplCount -> SimplCount -> SimplCount data SimplCount = VerySimplCount !Int -- Used when don't want detailed stats | SimplCount { ticks :: !Int, -- Total ticks details :: !TickCounts, -- How many of each type n_log :: !Int, -- N log1 :: [Tick], -- Last N events; <= opt_HistorySize, -- most recent first log2 :: [Tick] -- Last opt_HistorySize events before that -- Having log1, log2 lets us accumulate the -- recent history reasonably efficiently } type TickCounts = Map Tick Int simplCountN :: SimplCount -> Int simplCountN (VerySimplCount n) = n simplCountN (SimplCount { ticks = n }) = n zeroSimplCount dump_simpl_stats -- This is where we decide whether to do -- the VerySimpl version or the full-stats version | dump_simpl_stats = SimplCount {ticks = 0, details = Map.empty, n_log = 0, log1 = [], log2 = []} | otherwise = VerySimplCount 0 isZeroSimplCount (VerySimplCount n) = n==0 isZeroSimplCount (SimplCount { ticks = n }) = n==0 hasDetailedCounts (VerySimplCount {}) = False hasDetailedCounts (SimplCount {}) = True doFreeSimplTick tick sc@SimplCount { details = dts } = sc { details = dts `addTick` tick } doFreeSimplTick _ sc = sc doSimplTick history_size tick sc@(SimplCount { ticks = tks, details = dts, n_log = nl, log1 = l1 }) | nl >= history_size = sc1 { n_log = 1, log1 = [tick], log2 = l1 } | otherwise = sc1 { n_log = nl+1, log1 = tick : l1 } where sc1 = sc { ticks = tks+1, details = dts `addTick` tick } doSimplTick _ _ (VerySimplCount n) = VerySimplCount (n+1) addTick :: TickCounts -> Tick -> TickCounts addTick fm tick = MapStrict.insertWith (+) tick 1 fm plusSimplCount sc1@(SimplCount { ticks = tks1, details = dts1 }) sc2@(SimplCount { ticks = tks2, details = dts2 }) = log_base { ticks = tks1 + tks2 , details = MapStrict.unionWith (+) dts1 dts2 } where -- A hackish way of getting recent log info log_base | null (log1 sc2) = sc1 -- Nothing at all in sc2 | null (log2 sc2) = sc2 { log2 = log1 sc1 } | otherwise = sc2 plusSimplCount (VerySimplCount n) (VerySimplCount m) = VerySimplCount (n+m) plusSimplCount lhs rhs = throwGhcException . PprProgramError "plusSimplCount" $ vcat [ text "lhs" , pprSimplCount lhs , text "rhs" , pprSimplCount rhs ] -- We use one or the other consistently pprSimplCount (VerySimplCount n) = text "Total ticks:" <+> int n pprSimplCount (SimplCount { ticks = tks, details = dts, log1 = l1, log2 = l2 }) = vcat [text "Total ticks: " <+> int tks, blankLine, pprTickCounts dts, getVerboseSimplStats $ \dbg -> if dbg then vcat [blankLine, text "Log (most recent first)", nest 4 (vcat (map ppr l1) $$ vcat (map ppr l2))] else Outputable.empty ] {- Note [Which transformations are innocuous] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ At one point (Jun 18) I wondered if some transformations (ticks) might be "innocuous", in the sense that they do not unlock a later transformation that does not occur in the same pass. If so, we could refrain from bumping the overall tick-count for such innocuous transformations, and perhaps terminate the simplifier one pass earlier. But alas I found that virtually nothing was innocuous! This Note just records what I learned, in case anyone wants to try again. These transformations are not innocuous: *** NB: I think these ones could be made innocuous EtaExpansion LetFloatFromLet LetFloatFromLet x = K (let z = e2 in Just z) prepareRhs transforms to x2 = let z=e2 in Just z x = K xs And now more let-floating can happen in the next pass, on x2 PreInlineUnconditionally Example in spectral/cichelli/Auxil hinsert = ...let lo = e in let j = ...lo... in case x of False -> () True -> case lo of I# lo' -> ...j... When we PreInlineUnconditionally j, lo's occ-info changes to once, so it can be PreInlineUnconditionally in the next pass, and a cascade of further things can happen. PostInlineUnconditionally let x = e in let y = ...x.. in case .. of { A -> ...x...y... B -> ...x...y... } Current postinlineUnconditinaly will inline y, and then x; sigh. But PostInlineUnconditionally might also unlock subsequent transformations for the same reason as PreInlineUnconditionally, so it's probably not innocuous anyway. One annoying variant is this. CaseMerge introduces auxiliary bindings let b = b' in ... This takes another full run of the simplifier to elimiante. But if the PostInlineUnconditionally, replacing b with b', is the only thing that happens in a Simplifier run, that probably really is innocuous. Perhaps an opportunity here. KnownBranch, BetaReduction: May drop chunks of code, and thereby enable PreInlineUnconditionally for some let-binding which now occurs once EtaExpansion: Example in imaginary/digits-of-e1 fail = \void. e where e :: IO () --> etaExpandRhs fail = \void. (\s. (e |> g) s) |> sym g where g :: IO () ~ S -> (S,()) --> Next iteration of simplify fail1 = \void. \s. (e |> g) s fail = fail1 |> Void# -> sym g And now inline 'fail' CaseMerge: case x of y { DEFAULT -> case y of z { pi -> ei } alts2 } ---> CaseMerge case x of { pi -> let z = y in ei ; alts2 } The "let z=y" case-binder-swap gets dealt with in the next pass -} pprTickCounts :: Map Tick Int -> SDoc pprTickCounts counts = vcat (map pprTickGroup groups) where groups :: [NonEmpty (Tick, Int)] -- Each group shares a common tag -- toList returns common tags adjacent groups = NE.groupWith (tickToTag . fst) (Map.toList counts) pprTickGroup :: NonEmpty (Tick, Int) -> SDoc pprTickGroup group@((tick1,_) :| _) = hang (int (sum (fmap snd group)) <+> pprTickType tick1) 2 (vcat [ int n <+> pprTickCts tick -- flip as we want largest first | (tick,n) <- sortOn (Down . snd) (NE.toList group)]) data Tick -- See Note [Which transformations are innocuous] = PreInlineUnconditionally Id | PostInlineUnconditionally Id | UnfoldingDone Id | RuleFired FastString -- Rule name | LetFloatFromLet | EtaExpansion Id -- LHS binder | EtaReduction Id -- Binder on outer lambda | BetaReduction Id -- Lambda binder | CaseOfCase Id -- Bndr on *inner* case | KnownBranch Id -- Case binder | CaseMerge Id -- Binder on outer case | AltMerge Id -- Case binder | CaseElim Id -- Case binder | CaseIdentity Id -- Case binder | FillInCaseDefault Id -- Case binder | SimplifierDone -- Ticked at each iteration of the simplifier instance Outputable Tick where ppr tick = pprTickType tick <+> pprTickCts tick instance Eq Tick where a == b = case a `cmpTick` b of EQ -> True _ -> False instance Ord Tick where compare = cmpTick tickToTag :: Tick -> Int tickToTag (PreInlineUnconditionally _) = 0 tickToTag (PostInlineUnconditionally _) = 1 tickToTag (UnfoldingDone _) = 2 tickToTag (RuleFired _) = 3 tickToTag LetFloatFromLet = 4 tickToTag (EtaExpansion _) = 5 tickToTag (EtaReduction _) = 6 tickToTag (BetaReduction _) = 7 tickToTag (CaseOfCase _) = 8 tickToTag (KnownBranch _) = 9 tickToTag (CaseMerge _) = 10 tickToTag (CaseElim _) = 11 tickToTag (CaseIdentity _) = 12 tickToTag (FillInCaseDefault _) = 13 tickToTag SimplifierDone = 16 tickToTag (AltMerge _) = 17 pprTickType :: Tick -> SDoc pprTickType (PreInlineUnconditionally _) = text "PreInlineUnconditionally" pprTickType (PostInlineUnconditionally _)= text "PostInlineUnconditionally" pprTickType (UnfoldingDone _) = text "UnfoldingDone" pprTickType (RuleFired _) = text "RuleFired" pprTickType LetFloatFromLet = text "LetFloatFromLet" pprTickType (EtaExpansion _) = text "EtaExpansion" pprTickType (EtaReduction _) = text "EtaReduction" pprTickType (BetaReduction _) = text "BetaReduction" pprTickType (CaseOfCase _) = text "CaseOfCase" pprTickType (KnownBranch _) = text "KnownBranch" pprTickType (CaseMerge _) = text "CaseMerge" pprTickType (AltMerge _) = text "AltMerge" pprTickType (CaseElim _) = text "CaseElim" pprTickType (CaseIdentity _) = text "CaseIdentity" pprTickType (FillInCaseDefault _) = text "FillInCaseDefault" pprTickType SimplifierDone = text "SimplifierDone" pprTickCts :: Tick -> SDoc pprTickCts (PreInlineUnconditionally v) = ppr v pprTickCts (PostInlineUnconditionally v)= ppr v pprTickCts (UnfoldingDone v) = ppr v pprTickCts (RuleFired v) = ppr v pprTickCts LetFloatFromLet = Outputable.empty pprTickCts (EtaExpansion v) = ppr v pprTickCts (EtaReduction v) = ppr v pprTickCts (BetaReduction v) = ppr v pprTickCts (CaseOfCase v) = ppr v pprTickCts (KnownBranch v) = ppr v pprTickCts (CaseMerge v) = ppr v pprTickCts (AltMerge v) = ppr v pprTickCts (CaseElim v) = ppr v pprTickCts (CaseIdentity v) = ppr v pprTickCts (FillInCaseDefault v) = ppr v pprTickCts _ = Outputable.empty cmpTick :: Tick -> Tick -> Ordering cmpTick a b = case (tickToTag a `compare` tickToTag b) of GT -> GT EQ -> cmpEqTick a b LT -> LT cmpEqTick :: Tick -> Tick -> Ordering cmpEqTick (PreInlineUnconditionally a) (PreInlineUnconditionally b) = a `compare` b cmpEqTick (PostInlineUnconditionally a) (PostInlineUnconditionally b) = a `compare` b cmpEqTick (UnfoldingDone a) (UnfoldingDone b) = a `compare` b cmpEqTick (RuleFired a) (RuleFired b) = a `uniqCompareFS` b cmpEqTick (EtaExpansion a) (EtaExpansion b) = a `compare` b cmpEqTick (EtaReduction a) (EtaReduction b) = a `compare` b cmpEqTick (BetaReduction a) (BetaReduction b) = a `compare` b cmpEqTick (CaseOfCase a) (CaseOfCase b) = a `compare` b cmpEqTick (KnownBranch a) (KnownBranch b) = a `compare` b cmpEqTick (CaseMerge a) (CaseMerge b) = a `compare` b cmpEqTick (AltMerge a) (AltMerge b) = a `compare` b cmpEqTick (CaseElim a) (CaseElim b) = a `compare` b cmpEqTick (CaseIdentity a) (CaseIdentity b) = a `compare` b cmpEqTick (FillInCaseDefault a) (FillInCaseDefault b) = a `compare` b cmpEqTick _ _ = EQ ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/PatSyn.hs0000644000000000000000000004363407346545000020362 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1998 \section[PatSyn]{@PatSyn@: Pattern synonyms} -} module GHC.Core.PatSyn ( -- * Main data types PatSyn, PatSynMatcher, PatSynBuilder, mkPatSyn, -- ** Type deconstruction patSynName, patSynArity, patSynIsInfix, patSynResultType, isVanillaPatSyn, patSynArgs, patSynMatcher, patSynBuilder, patSynUnivTyVarBinders, patSynExTyVars, patSynExTyVarBinders, patSynSig, patSynSigBndr, patSynInstArgTys, patSynInstResTy, patSynFieldLabels, patSynFieldType, pprPatSynType ) where import GHC.Prelude import GHC.Core.Type import GHC.Core.TyCo.Ppr import GHC.Types.Name import GHC.Types.Unique import GHC.Types.Basic import GHC.Types.Var import GHC.Types.FieldLabel import GHC.Utils.Misc import GHC.Utils.Outputable import GHC.Utils.Panic import Language.Haskell.Syntax.Basic (FieldLabelString(..)) import qualified Data.Data as Data import Data.Function import Data.List (find) {- ************************************************************************ * * \subsection{Pattern synonyms} * * ************************************************************************ -} -- | Pattern Synonym -- -- See Note [Pattern synonym representation] -- See Note [Pattern synonym signature contexts] data PatSyn = MkPatSyn { psName :: Name, psUnique :: Unique, -- Cached from Name psArgs :: [FRRType], -- ^ Argument types psArity :: Arity, -- == length psArgs psInfix :: Bool, -- True <=> declared infix psFieldLabels :: [FieldLabel], -- List of fields for a -- record pattern synonym -- INVARIANT: either empty if no -- record pat syn or same length as -- psArgs -- Universally-quantified type variables psUnivTyVars :: [InvisTVBinder], -- Required dictionaries (may mention psUnivTyVars) psReqTheta :: ThetaType, -- Existentially-quantified type vars psExTyVars :: [InvisTVBinder], -- Provided dictionaries (may mention psUnivTyVars or psExTyVars) psProvTheta :: ThetaType, -- Result type psResultTy :: Type, -- Mentions only psUnivTyVars -- See Note [Pattern synonym result type] -- See Note [Matchers and builders for pattern synonyms] -- See Note [Keep Ids out of PatSyn] psMatcher :: PatSynMatcher, psBuilder :: PatSynBuilder } type PatSynMatcher = (Name, Type, Bool) -- Matcher function. -- If Bool is True then prov_theta and arg_tys are empty -- and type is -- forall (p :: RuntimeRep) (r :: TYPE p) univ_tvs. -- req_theta -- => res_ty -- -> (forall ex_tvs. Void# -> r) -- -> (Void# -> r) -- -> r -- -- Otherwise type is -- forall (p :: RuntimeRep) (r :: TYPE r) univ_tvs. -- req_theta -- => res_ty -- -> (forall ex_tvs. prov_theta => arg_tys -> r) -- -> (Void# -> r) -- -> r type PatSynBuilder = Maybe (Name, Type, Bool) -- Nothing => uni-directional pattern synonym -- Just (builder, is_unlifted) => bi-directional -- Builder function, of type -- forall univ_tvs, ex_tvs. (req_theta, prov_theta) -- => arg_tys -> res_ty -- See Note [Builder for pattern synonyms with unboxed type] {- Note [Pattern synonym signature contexts] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In a pattern synonym signature we write pattern P :: req => prov => t1 -> ... tn -> res_ty Note that the "required" context comes first, then the "provided" context. Moreover, the "required" context must not mention existentially-bound type variables; that is, ones not mentioned in res_ty. See lots of discussion in #10928. If there is no "provided" context, you can omit it; but you can't omit the "required" part (unless you omit both). Example 1: pattern P1 :: (Num a, Eq a) => b -> Maybe (a,b) pattern P1 x = Just (3,x) We require (Num a, Eq a) to match the 3; there is no provided context. Example 2: data T2 where MkT2 :: (Num a, Eq a) => a -> a -> T2 pattern P2 :: () => (Num a, Eq a) => a -> T2 pattern P2 x = MkT2 3 x When we match against P2 we get a Num dictionary provided. We can use that to check the match against 3. Example 3: pattern P3 :: Eq a => a -> b -> T3 b This signature is illegal because the (Eq a) is a required constraint, but it mentions the existentially-bound variable 'a'. You can see it's existential because it doesn't appear in the result type (T3 b). Note [Pattern synonym result type] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider data T a b = MkT b a pattern P :: a -> T [a] Bool pattern P x = MkT True [x] P's psResultTy is (T a Bool), and it really only matches values of type (T [a] Bool). For example, this is ill-typed f :: T p q -> String f (P x) = "urk" This is different to the situation with GADTs: data S a where MkS :: Int -> S Bool Now MkS (and pattern synonyms coming from MkS) can match a value of type (S a), not just (S Bool); we get type refinement. That in turn means that if you have a pattern P x :: T [ty] Bool it's not entirely straightforward to work out the instantiation of P's universal tyvars. You have to /match/ the type of the pattern, (T [ty] Bool) against the psResultTy for the pattern synonym, T [a] Bool to get the instantiation a := ty. This is very unlike DataCons, where univ tyvars match 1-1 the arguments of the TyCon. Side note: I (SG) get the impression that instantiated return types should generate a *required* constraint for pattern synonyms, rather than a *provided* constraint like it's the case for GADTs. For example, I'd expect these declarations to have identical semantics: pattern Just42 :: Maybe Int pattern Just42 = Just 42 pattern Just'42 :: (a ~ Int) => Maybe a pattern Just'42 = Just 42 The latter generates the proper required constraint, the former does not. Also rather different to GADTs is the fact that Just42 doesn't have any universally quantified type variables, whereas Just'42 or MkS above has. Note [Keep Ids out of PatSyn] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We carefully arrange that PatSyn does not contain the Ids for the matcher and builder. We want PatSyn, like TyCon and DataCon, to be completely immutable. But, the matcher and builder are relatively sophisticated functions, and we want to get their final IdInfo in the same way as any other Id, so we'd have to update the Ids in the PatSyn too. Rather than try to tidy PatSyns (which is easy to forget and is a bit tricky, see #19074), it seems cleaner to make them entirely immutable, like TyCons and Classes. To that end PatSynBuilder and PatSynMatcher contain Names not Ids. Which, it turns out, is absolutely fine. c.f. DefMethInfo in Class, which contains the Name, but not the Id, of the default method. Note [Pattern synonym representation] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider the following pattern synonym declaration pattern P x = MkT [x] (Just 42) where data T a where MkT :: (Show a, Ord b) => [b] -> a -> T a so pattern P has type b -> T (Maybe t) with the following typeclass constraints: requires: (Eq t, Num t) provides: (Show (Maybe t), Ord b) In this case, the fields of MkPatSyn will be set as follows: psArgs = [b] psArity = 1 psInfix = False psUnivTyVars = [t] psExTyVars = [b] psProvTheta = (Show (Maybe t), Ord b) psReqTheta = (Eq t, Num t) psResultTy = T (Maybe t) Note [Matchers and builders for pattern synonyms] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For each pattern synonym P, we generate * a "matcher" function, used to desugar uses of P in patterns, which implements pattern matching * A "builder" function (for bidirectional pattern synonyms only), used to desugar uses of P in expressions, which constructs P-values. For the above example, the matcher function has type: $mP :: forall (r :: ?) t. (Eq t, Num t) => T (Maybe t) -> (forall b. (Show (Maybe t), Ord b) => b -> r) -> (Void# -> r) -> r with the following implementation: $mP @r @t $dEq $dNum scrut cont fail = case scrut of MkT @b $dShow $dOrd [x] (Just 42) -> cont @b $dShow $dOrd x _ -> fail Void# Notice that the return type 'r' has an open kind, so that it can be instantiated by an unboxed type; for example where we see f (P x) = 3# The extra Void# argument for the failure continuation is needed so that it is lazy even when the result type is unboxed. For the same reason, if the pattern has no arguments, an extra Void# argument is added to the success continuation as well. For *bidirectional* pattern synonyms, we also generate a "builder" function which implements the pattern synonym in an expression context. For our running example, it will be: $bP :: forall t b. (Eq t, Num t, Show (Maybe t), Ord b) => b -> T (Maybe t) $bP x = MkT [x] (Just 42) NB: the existential/universal and required/provided split does not apply to the builder since you are only putting stuff in, not getting stuff out. Injectivity of bidirectional pattern synonyms is checked in tcPatToExpr which walks the pattern and returns its corresponding expression when available. Note [Builder for pattern synonyms with unboxed type] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For bidirectional pattern synonyms that have no arguments and have an unboxed type, we add an extra Void# argument to the builder, else it would be a top-level declaration with an unboxed type. pattern P = 0# $bP :: Void# -> Int# $bP _ = 0# This means that when typechecking an occurrence of P in an expression, we must remember that the builder has this void argument. This is done by GHC.Tc.TyCl.PatSyn.patSynBuilderOcc. Note [Pattern synonyms and the data type Type] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The type of a pattern synonym is of the form (See Note [Pattern synonym signatures] in GHC.Tc.Gen.Sig): forall univ_tvs. req => forall ex_tvs. prov => ... We cannot in general represent this by a value of type Type: - if ex_tvs is empty, then req and prov cannot be distinguished from each other - if req is empty, then univ_tvs and ex_tvs cannot be distinguished from each other, and moreover, prov is seen as the "required" context (as it is the only context) ************************************************************************ * * \subsection{Instances} * * ************************************************************************ -} instance Eq PatSyn where (==) = (==) `on` getUnique (/=) = (/=) `on` getUnique instance Uniquable PatSyn where getUnique = psUnique instance NamedThing PatSyn where getName = patSynName instance Outputable PatSyn where ppr = ppr . getName instance OutputableBndr PatSyn where pprInfixOcc = pprInfixName . getName pprPrefixOcc = pprPrefixName . getName instance Data.Data PatSyn where -- don't traverse? toConstr _ = abstractConstr "PatSyn" gunfold _ _ = error "gunfold" dataTypeOf _ = mkNoRepType "PatSyn" {- ************************************************************************ * * \subsection{Construction} * * ************************************************************************ -} -- | Build a new pattern synonym mkPatSyn :: Name -> Bool -- ^ Is the pattern synonym declared infix? -> ([InvisTVBinder], ThetaType) -- ^ Universally-quantified type -- variables and required dicts -> ([InvisTVBinder], ThetaType) -- ^ Existentially-quantified type -- variables and provided dicts -> [FRRType] -- ^ Original arguments -> Type -- ^ Original result type -> PatSynMatcher -- ^ Matcher -> PatSynBuilder -- ^ Builder -> [FieldLabel] -- ^ Names of fields for -- a record pattern synonym -> PatSyn -- NB: The univ and ex vars are both in PiTyVarBinder form and TyVar form for -- convenience. All the TyBinders should be Named! mkPatSyn name declared_infix (univ_tvs, req_theta) (ex_tvs, prov_theta) orig_args orig_res_ty matcher builder field_labels = MkPatSyn {psName = name, psUnique = getUnique name, psUnivTyVars = univ_tvs, psExTyVars = ex_tvs, psProvTheta = prov_theta, psReqTheta = req_theta, psInfix = declared_infix, psArgs = orig_args, psArity = length orig_args, psResultTy = orig_res_ty, psMatcher = matcher, psBuilder = builder, psFieldLabels = field_labels } -- | The 'Name' of the 'PatSyn', giving it a unique, rooted identification patSynName :: PatSyn -> Name patSynName = psName -- | Should the 'PatSyn' be presented infix? patSynIsInfix :: PatSyn -> Bool patSynIsInfix = psInfix -- | Arity of the pattern synonym patSynArity :: PatSyn -> Arity patSynArity = psArity -- | Is this a \'vanilla\' pattern synonym (no existentials, no provided constraints)? isVanillaPatSyn :: PatSyn -> Bool isVanillaPatSyn ps = null (psExTyVars ps) && null (psProvTheta ps) patSynArgs :: PatSyn -> [Type] patSynArgs = psArgs patSynFieldLabels :: PatSyn -> [FieldLabel] patSynFieldLabels = psFieldLabels -- | Extract the type for any given labelled field of the 'DataCon' patSynFieldType :: PatSyn -> FieldLabelString -> Type patSynFieldType ps label = case find ((== label) . flLabel . fst) (psFieldLabels ps `zip` psArgs ps) of Just (_, ty) -> ty Nothing -> pprPanic "dataConFieldType" (ppr ps <+> ppr label) patSynUnivTyVarBinders :: PatSyn -> [InvisTVBinder] patSynUnivTyVarBinders = psUnivTyVars patSynExTyVars :: PatSyn -> [TyVar] patSynExTyVars ps = binderVars (psExTyVars ps) patSynExTyVarBinders :: PatSyn -> [InvisTVBinder] patSynExTyVarBinders = psExTyVars patSynSigBndr :: PatSyn -> ([InvisTVBinder], ThetaType, [InvisTVBinder], ThetaType, [Scaled Type], Type) patSynSigBndr (MkPatSyn { psUnivTyVars = univ_tvs, psExTyVars = ex_tvs , psProvTheta = prov, psReqTheta = req , psArgs = arg_tys, psResultTy = res_ty }) = (univ_tvs, req, ex_tvs, prov, map unrestricted arg_tys, res_ty) patSynSig :: PatSyn -> ([TyVar], ThetaType, [TyVar], ThetaType, [Scaled Type], Type) patSynSig ps = let (u_tvs, req, e_tvs, prov, arg_tys, res_ty) = patSynSigBndr ps in (binderVars u_tvs, req, binderVars e_tvs, prov, arg_tys, res_ty) patSynMatcher :: PatSyn -> PatSynMatcher patSynMatcher = psMatcher patSynBuilder :: PatSyn -> PatSynBuilder patSynBuilder = psBuilder patSynResultType :: PatSyn -> Type patSynResultType = psResultTy patSynInstArgTys :: PatSyn -> [Type] -> [Type] -- Return the types of the argument patterns -- e.g. data D a = forall b. MkD a b (b->a) -- pattern P f x y = MkD (x,True) y f -- D :: forall a. forall b. a -> b -> (b->a) -> D a -- P :: forall c. forall b. (b->(c,Bool)) -> c -> b -> P c -- patSynInstArgTys P [Int,bb] = [bb->(Int,Bool), Int, bb] -- NB: the inst_tys should be both universal and existential patSynInstArgTys (MkPatSyn { psName = name, psUnivTyVars = univ_tvs , psExTyVars = ex_tvs, psArgs = arg_tys }) inst_tys = assertPpr (tyvars `equalLength` inst_tys) (text "patSynInstArgTys" <+> ppr name $$ ppr tyvars $$ ppr inst_tys) $ map (substTyWith tyvars inst_tys) arg_tys where tyvars = binderVars (univ_tvs ++ ex_tvs) patSynInstResTy :: PatSyn -> [Type] -> Type -- Return the type of whole pattern -- E.g. pattern P x y = Just (x,x,y) -- P :: a -> b -> Just (a,a,b) -- (patSynInstResTy P [Int,Bool] = Maybe (Int,Int,Bool) -- NB: unlike patSynInstArgTys, the inst_tys should be just the *universal* tyvars patSynInstResTy (MkPatSyn { psName = name, psUnivTyVars = univ_tvs , psResultTy = res_ty }) inst_tys = assertPpr (univ_tvs `equalLength` inst_tys) (text "patSynInstResTy" <+> ppr name $$ ppr univ_tvs $$ ppr inst_tys) $ substTyWith (binderVars univ_tvs) inst_tys res_ty -- | Print the type of a pattern synonym. The foralls are printed explicitly pprPatSynType :: PatSyn -> SDoc pprPatSynType (MkPatSyn { psUnivTyVars = univ_tvs, psReqTheta = req_theta , psExTyVars = ex_tvs, psProvTheta = prov_theta , psArgs = orig_args, psResultTy = orig_res_ty }) = sep [ pprForAll $ tyVarSpecToBinders univ_tvs , pprThetaArrowTy req_theta , ppWhen insert_empty_ctxt $ parens empty <+> darrow , pprType sigma_ty ] where sigma_ty = mkInvisForAllTys ex_tvs $ mkInvisFunTys prov_theta $ mkVisFunTysMany orig_args orig_res_ty insert_empty_ctxt = null req_theta && not (null prov_theta && null ex_tvs) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/Ppr.hs0000644000000000000000000005765707346545000017717 0ustar0000000000000000{-# LANGUAGE LambdaCase #-} {- these are needed for the Outputable instance for GenTickish, since we need XTickishId to be Outputable. This should immediately resolve to something like Id. -} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE UndecidableInstances #-} {-# OPTIONS_GHC -fno-warn-orphans #-} {- (c) The University of Glasgow 2006 (c) The AQUA Project, Glasgow University, 1996-1998 Printing of Core syntax -} module GHC.Core.Ppr ( pprCoreExpr, pprParendExpr, pprCoreBinding, pprCoreBindings, pprCoreAlt, pprCoreBindingWithSize, pprCoreBindingsWithSize, pprCoreBinder, pprCoreBinders, pprId, pprIds, pprRule, pprRules, pprOptCo, pprOcc, pprOccWithTick ) where import GHC.Prelude import GHC.Core import GHC.Core.Stats (exprStats) import GHC.Types.Fixity (LexicalFixity(..)) import GHC.Types.Literal( pprLiteral ) import GHC.Types.Name( pprInfixName, pprPrefixName ) import GHC.Types.Var import GHC.Types.Id import GHC.Types.Id.Info import GHC.Types.Demand import GHC.Types.Cpr import GHC.Core.DataCon import GHC.Core.TyCon import GHC.Core.TyCo.Ppr import GHC.Core.Coercion import GHC.Types.Basic import GHC.Utils.Misc import GHC.Utils.Outputable import GHC.Types.SrcLoc ( pprUserRealSpan ) import GHC.Types.Tickish {- ************************************************************************ * * \subsection{Public interfaces for Core printing (excluding instances)} * * ************************************************************************ @pprParendCoreExpr@ puts parens around non-atomic Core expressions. -} pprCoreBindings :: OutputableBndr b => [Bind b] -> SDoc pprCoreBinding :: OutputableBndr b => Bind b -> SDoc pprCoreExpr :: OutputableBndr b => Expr b -> SDoc pprParendExpr :: OutputableBndr b => Expr b -> SDoc pprCoreBindings = pprTopBinds noAnn pprCoreBinding = pprTopBind noAnn pprCoreBindingsWithSize :: [CoreBind] -> SDoc pprCoreBindingWithSize :: CoreBind -> SDoc pprCoreBindingsWithSize = pprTopBinds sizeAnn pprCoreBindingWithSize = pprTopBind sizeAnn instance OutputableBndr b => Outputable (Bind b) where ppr bind = ppr_bind noAnn bind instance OutputableBndr b => Outputable (Expr b) where ppr expr = pprCoreExpr expr instance OutputableBndr b => Outputable (Alt b) where ppr expr = pprCoreAlt expr {- ************************************************************************ * * \subsection{The guts} * * ************************************************************************ -} -- | A function to produce an annotation for a given right-hand-side type Annotation b = Expr b -> SDoc -- | Annotate with the size of the right-hand-side sizeAnn :: CoreExpr -> SDoc sizeAnn e = text "-- RHS size:" <+> ppr (exprStats e) -- | No annotation noAnn :: Expr b -> SDoc noAnn _ = empty pprTopBinds :: OutputableBndr a => Annotation a -- ^ generate an annotation to place before the -- binding -> [Bind a] -- ^ bindings to show -> SDoc -- ^ the pretty result pprTopBinds ann binds = vcat (map (pprTopBind ann) binds) pprTopBind :: OutputableBndr a => Annotation a -> Bind a -> SDoc pprTopBind ann (NonRec binder expr) = ppr_binding ann (binder,expr) $$ blankLine pprTopBind _ (Rec []) = text "Rec { }" pprTopBind ann (Rec (b:bs)) = vcat [text "Rec {", ppr_binding ann b, vcat [blankLine $$ ppr_binding ann b | b <- bs], text "end Rec }", blankLine] ppr_bind :: OutputableBndr b => Annotation b -> Bind b -> SDoc ppr_bind ann (NonRec val_bdr expr) = ppr_binding ann (val_bdr, expr) ppr_bind ann (Rec binds) = vcat (map pp binds) where pp bind = ppr_binding ann bind <> semi ppr_binding :: OutputableBndr b => Annotation b -> (b, Expr b) -> SDoc ppr_binding ann (val_bdr, expr) = vcat [ ann expr , ppUnlessOption sdocSuppressTypeSignatures (pprBndr LetBind val_bdr) , pp_bind ] where pp_val_bdr = pprPrefixOcc val_bdr pp_bind = case bndrIsJoin_maybe val_bdr of NotJoinPoint -> pp_normal_bind JoinPoint ar -> pp_join_bind ar pp_normal_bind = hang pp_val_bdr 2 (equals <+> pprCoreExpr expr) -- For a join point of join arity n, we want to print j = \x1 ... xn -> e -- as "j x1 ... xn = e" to differentiate when a join point returns a -- lambda (the first rendering looks like a nullary join point returning -- an n-argument function). pp_join_bind join_arity | bndrs `lengthAtLeast` join_arity = hang (pp_val_bdr <+> sep (map (pprBndr LambdaBind) lhs_bndrs)) 2 (equals <+> pprCoreExpr rhs) | otherwise -- Yikes! A join-binding with too few lambda -- Lint will complain, but we don't want to crash -- the pretty-printer else we can't see what's wrong -- So refer to printing j = e = pp_normal_bind where (bndrs, body) = collectBinders expr (lhs_bndrs, rest) = splitAt join_arity bndrs rhs = mkLams rest body pprParendExpr expr = ppr_expr parens expr pprCoreExpr expr = ppr_expr noParens expr noParens :: SDoc -> SDoc noParens pp = pp pprOptCo :: Coercion -> SDoc -- Print a coercion optionally; i.e. honouring -dsuppress-coercions pprOptCo co = sdocOption sdocSuppressCoercions $ \case True -> angleBrackets (text "Co:" <> int (coercionSize co)) <+> dcolon <+> co_type False -> parens $ sep [ppr co, dcolon <+> co_type] where co_type = sdocOption sdocSuppressCoercionTypes $ \case True -> text "..." False -> ppr (coercionType co) ppr_id_occ :: (SDoc -> SDoc) -> Id -> SDoc ppr_id_occ add_par id | isJoinId id = add_par ((text "jump") <+> pp_id) | otherwise = pp_id where pp_id = ppr id -- We could use pprPrefixOcc to print (+) etc, but this is -- Core where we don't print things infix anyway, so doing -- so just adds extra redundant parens ppr_expr :: OutputableBndr b => (SDoc -> SDoc) -> Expr b -> SDoc -- The function adds parens in context that need -- an atomic value (e.g. function args) ppr_expr add_par (Var id) = ppr_id_occ add_par id ppr_expr add_par (Type ty) = add_par (text "TYPE:" <+> ppr ty) -- Weird ppr_expr add_par (Coercion co) = add_par (text "CO:" <+> ppr co) ppr_expr add_par (Lit lit) = pprLiteral add_par lit ppr_expr add_par (Cast expr co) = add_par $ sep [pprParendExpr expr, text "`cast`" <+> pprOptCo co] ppr_expr add_par expr@(Lam _ _) = let (bndrs, body) = collectBinders expr in add_par $ hang (text "\\" <+> sep (map (pprBndr LambdaBind) bndrs) <+> arrow) 2 (pprCoreExpr body) ppr_expr add_par expr@(App {}) = sdocOption sdocSuppressTypeApplications $ \supp_ty_app -> case collectArgs expr of { (fun, args) -> let pp_args = sep (map pprArg args) val_args = dropWhile isTypeArg args -- Drop the type arguments for tuples pp_tup_args = pprWithCommas pprCoreExpr val_args args' | supp_ty_app = val_args | otherwise = args parens | null args' = id | otherwise = add_par in case fun of Var f -> case isDataConWorkId_maybe f of -- Notice that we print the *worker* -- for tuples in paren'd format. Just dc | saturated , Just sort <- tyConTuple_maybe tc -> tupleParens sort pp_tup_args where tc = dataConTyCon dc saturated = val_args `lengthIs` idArity f _ -> parens (hang fun_doc 2 pp_args) where fun_doc = ppr_id_occ noParens f _ -> parens (hang (pprParendExpr fun) 2 pp_args) } ppr_expr add_par (Case expr _ ty []) -- Empty Case = add_par $ sep [text "case" <+> pprCoreExpr expr <+> whenPprDebug (text "return" <+> ppr ty), text "of {}"] ppr_expr add_par (Case expr var ty [Alt con args rhs]) -- Single alt Case = sdocOption sdocPrintCaseAsLet $ \case True -> add_par $ -- See Note [Print case as let] sep [ sep [ text "let! {" <+> ppr_case_pat con args <+> text "~" <+> ppr_bndr var , text "<-" <+> ppr_expr id expr <+> text "} in" ] , pprCoreExpr rhs ] False -> add_par $ sep [sep [sep [ text "case" <+> pprCoreExpr expr , whenPprDebug (text "return" <+> ppr ty) , text "of" <+> ppr_bndr var ] , char '{' <+> ppr_case_pat con args <+> arrow ] , pprCoreExpr rhs , char '}' ] where ppr_bndr = pprBndr CaseBind ppr_expr add_par (Case expr var ty alts) -- Multi alt Case = add_par $ sep [sep [text "case" <+> pprCoreExpr expr <+> whenPprDebug (text "return" <+> ppr ty), text "of" <+> ppr_bndr var <+> char '{'], nest 2 (vcat (punctuate semi (map pprCoreAlt alts))), char '}' ] where ppr_bndr = pprBndr CaseBind -- special cases: let ... in let ... -- ("disgusting" SLPJ) {- ppr_expr add_par (Let bind@(NonRec val_bdr rhs@(Let _ _)) body) = add_par $ vcat [ hsep [text "let {", (pprBndr LetBind val_bdr $$ ppr val_bndr), equals], nest 2 (pprCoreExpr rhs), text "} in", pprCoreExpr body ] ppr_expr add_par (Let bind@(NonRec val_bdr rhs) expr@(Let _ _)) = add_par (hang (text "let {") 2 (hsep [ppr_binding (val_bdr,rhs), text "} in"]) $$ pprCoreExpr expr) -} -- General case (recursive case, too) ppr_expr add_par (Let bind expr) = add_par $ sep [hang (keyword bind <+> char '{') 2 (ppr_bind noAnn bind <+> text "} in"), pprCoreExpr expr] where keyword (NonRec b _) | isJoinPoint (bndrIsJoin_maybe b) = text "join" | otherwise = text "let" keyword (Rec pairs) | ((b,_):_) <- pairs , isJoinPoint (bndrIsJoin_maybe b) = text "joinrec" | otherwise = text "letrec" ppr_expr add_par (Tick tickish expr) = sdocOption sdocSuppressTicks $ \case -- Only hide non-runtime relevant ticks. True | not (tickishIsCode tickish) -> ppr_expr add_par expr _ -> add_par (sep [ppr tickish, pprCoreExpr expr]) pprCoreAlt :: OutputableBndr a => Alt a -> SDoc pprCoreAlt (Alt con args rhs) = hang (ppr_case_pat con args <+> arrow) 2 (pprCoreExpr rhs) ppr_case_pat :: OutputableBndr a => AltCon -> [a] -> SDoc ppr_case_pat (DataAlt dc) args | Just sort <- tyConTuple_maybe tc = tupleParens sort (pprWithCommas ppr_bndr args) where ppr_bndr = pprBndr CasePatBind tc = dataConTyCon dc ppr_case_pat con args = ppr con <+> (fsep (map ppr_bndr args)) where ppr_bndr = pprBndr CasePatBind -- | Pretty print the argument in a function application. pprArg :: OutputableBndr a => Expr a -> SDoc pprArg (Type ty) = ppUnlessOption sdocSuppressTypeApplications (text "@" <> pprParendType ty) pprArg (Coercion co) = text "@~" <> pprOptCo co pprArg expr = pprParendExpr expr {- Note [Print case as let] ~~~~~~~~~~~~~~~~~~~~~~~~ Single-branch case expressions are very common: case x of y { I# x' -> case p of q { I# p' -> ... } } These are, in effect, just strict let's, with pattern matching. With -dppr-case-as-let we print them as such: let! { I# x' ~ y <- x } in let! { I# p' ~ q <- p } in ... Other printing bits-and-bobs used with the general @pprCoreBinding@ and @pprCoreExpr@ functions. Note [Binding-site specific printing] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ pprCoreBinder and pprTypedLamBinder receive a BindingSite argument to adjust the information printed. Let-bound binders are printed with their full type and idInfo. Case-bound variables (both the case binder and pattern variables) are printed without a type and without their unfolding. Furthermore, a dead case-binder is completely ignored, while otherwise, dead binders are printed as "_". -} -- These instances are sadly orphans instance OutputableBndr Var where pprBndr = pprCoreBinder pprInfixOcc = pprInfixName . varName pprPrefixOcc = pprPrefixName . varName bndrIsJoin_maybe = idJoinPointHood instance Outputable b => OutputableBndr (TaggedBndr b) where pprBndr _ b = ppr b -- Simple pprInfixOcc b = ppr b pprPrefixOcc b = ppr b bndrIsJoin_maybe (TB b _) = idJoinPointHood b pprOcc :: OutputableBndr a => LexicalFixity -> a -> SDoc pprOcc Infix = pprInfixOcc pprOcc Prefix = pprPrefixOcc pprOccWithTick :: OutputableBndr a => LexicalFixity -> PromotionFlag -> a -> SDoc pprOccWithTick fixity prom op | isPromoted prom = quote (pprOcc fixity op) | otherwise = pprOcc fixity op pprCoreBinder :: BindingSite -> Var -> SDoc pprCoreBinder LetBind binder | isTyVar binder = pprKindedTyVarBndr binder | otherwise = pprTypedLetBinder binder $$ ppIdInfo binder (idInfo binder) -- Lambda bound type variables are preceded by "@" pprCoreBinder bind_site bndr = getPprDebug $ \debug -> pprTypedLamBinder bind_site debug bndr pprCoreBinders :: [Var] -> SDoc -- Print as lambda-binders, i.e. with their type pprCoreBinders vs = sep (map (pprCoreBinder LambdaBind) vs) pprUntypedBinder :: Var -> SDoc pprUntypedBinder binder | isTyVar binder = text "@" <> ppr binder -- NB: don't print kind | otherwise = pprIdBndr binder pprTypedLamBinder :: BindingSite -> Bool -> Var -> SDoc -- For lambda and case binders, show the unfolding info (usually none) pprTypedLamBinder bind_site debug_on var = sdocOption sdocSuppressTypeSignatures $ \suppress_sigs -> case () of _ | not debug_on -- Show case-bound wild binders only if debug is on , CaseBind <- bind_site , isDeadBinder var -> empty | not debug_on -- Even dead binders can be one-shot , isDeadBinder var -> char '_' <+> ppWhen (isId var) (pprIdBndrInfo (idInfo var)) | not debug_on -- No parens, no kind info , CaseBind <- bind_site -> pprUntypedBinder var | not debug_on , CasePatBind <- bind_site -> pprUntypedBinder var | suppress_sigs -> pprUntypedBinder var | isTyVar var -> parens (pprKindedTyVarBndr var) | otherwise -> parens (hang (pprIdBndr var) 2 (vcat [ dcolon <+> pprType (idType var) , pp_unf])) where unf_info = realUnfoldingInfo (idInfo var) pp_unf | hasSomeUnfolding unf_info = text "Unf=" <> ppr unf_info | otherwise = empty pprTypedLetBinder :: Var -> SDoc -- Print binder with a type or kind signature (not paren'd) pprTypedLetBinder binder = sdocOption sdocSuppressTypeSignatures $ \suppress_sigs -> case () of _ | isTyVar binder -> pprKindedTyVarBndr binder | suppress_sigs -> pprIdBndr binder | otherwise -> hang (pprIdBndr binder) 2 (dcolon <+> pprType (idType binder)) pprKindedTyVarBndr :: TyVar -> SDoc -- Print a type variable binder with its kind (but not if *) pprKindedTyVarBndr tyvar = text "@" <> pprTyVar tyvar -- pprId x prints x :: ty pprId :: Id -> SDoc pprId x = ppr x <+> dcolon <+> ppr (idType x) pprIds :: [Id] -> SDoc pprIds xs = sep (map pprId xs) -- pprIdBndr does *not* print the type -- When printing any Id binder in debug mode, we print its inline pragma and one-shot-ness pprIdBndr :: Id -> SDoc pprIdBndr id = pprPrefixOcc id <+> pprIdBndrInfo (idInfo id) pprIdBndrInfo :: IdInfo -> SDoc pprIdBndrInfo info = ppUnlessOption sdocSuppressIdInfo (info `seq` doc) -- The seq is useful for poking on black holes where prag_info = inlinePragInfo info occ_info = occInfo info dmd_info = demandInfo info lbv_info = oneShotInfo info has_prag = not (isDefaultInlinePragma prag_info) has_occ = not (isNoOccInfo occ_info) has_dmd = not $ isTopDmd dmd_info has_lbv = not (hasNoOneShotInfo lbv_info) doc = showAttributes [ (has_prag, text "InlPrag=" <> pprInlineDebug prag_info) , (has_occ, text "Occ=" <> ppr occ_info) , (has_dmd, text "Dmd=" <> ppr dmd_info) , (has_lbv , text "OS=" <> ppr lbv_info) ] instance Outputable IdInfo where ppr info = showAttributes [ (has_prag, text "InlPrag=" <> pprInlineDebug prag_info) , (has_occ, text "Occ=" <> ppr occ_info) , (has_dmd, text "Dmd=" <> ppr dmd_info) , (has_lbv , text "OS=" <> ppr lbv_info) , (has_arity, text "Arity=" <> int arity) , (has_called_arity, text "CallArity=" <> int called_arity) , (has_caf_info, text "Caf=" <> ppr caf_info) , (has_str_info, text "Str=" <> pprStrictness str_info) , (has_unf, text "Unf=" <> ppr unf_info) , (has_rules, text "RULES:" <+> vcat (map pprRule rules)) ] where prag_info = inlinePragInfo info has_prag = not (isDefaultInlinePragma prag_info) occ_info = occInfo info has_occ = not (isManyOccs occ_info) dmd_info = demandInfo info has_dmd = not $ isTopDmd dmd_info lbv_info = oneShotInfo info has_lbv = not (hasNoOneShotInfo lbv_info) arity = arityInfo info has_arity = arity /= 0 called_arity = callArityInfo info has_called_arity = called_arity /= 0 caf_info = cafInfo info has_caf_info = not (mayHaveCafRefs caf_info) str_info = dmdSigInfo info has_str_info = not (isNopSig str_info) unf_info = realUnfoldingInfo info has_unf = hasSomeUnfolding unf_info rules = ruleInfoRules (ruleInfo info) has_rules = not (null rules) {- ----------------------------------------------------- -- IdDetails and IdInfo ----------------------------------------------------- -} ppIdInfo :: Id -> IdInfo -> SDoc ppIdInfo id info = ppUnlessOption sdocSuppressIdInfo $ showAttributes [ (True, pp_scope <> ppr (idDetails id)) , (has_arity, text "Arity=" <> int arity) , (has_called_arity, text "CallArity=" <> int called_arity) , (has_caf_info, text "Caf=" <> ppr caf_info) , (has_str_info, text "Str=" <> pprStrictness str_info) , (has_cpr_info, text "Cpr=" <> ppr cpr_info) , (has_unf, text "Unf=" <> ppr unf_info) , (not (null rules), text "RULES:" <+> vcat (map pprRule rules)) ] -- Inline pragma, occ, demand, one-shot info -- printed out with all binders (when debug is on); -- see GHC.Core.Ppr.pprIdBndr where pp_scope | isGlobalId id = text "GblId" | isExportedId id = text "LclIdX" | otherwise = text "LclId" arity = arityInfo info has_arity = arity /= 0 called_arity = callArityInfo info has_called_arity = called_arity /= 0 caf_info = cafInfo info has_caf_info = not (mayHaveCafRefs caf_info) str_info = dmdSigInfo info has_str_info = not (isNopSig str_info) cpr_info = cprSigInfo info has_cpr_info = cpr_info /= topCprSig unf_info = realUnfoldingInfo info has_unf = hasSomeUnfolding unf_info rules = ruleInfoRules (ruleInfo info) showAttributes :: [(Bool,SDoc)] -> SDoc showAttributes stuff | null docs = empty | otherwise = brackets (sep (punctuate comma docs)) where docs = [d | (True,d) <- stuff] {- ----------------------------------------------------- -- Unfolding and UnfoldingGuidance ----------------------------------------------------- -} instance Outputable UnfoldingGuidance where ppr UnfNever = text "NEVER" ppr (UnfWhen { ug_arity = arity, ug_unsat_ok = unsat_ok, ug_boring_ok = boring_ok }) = text "ALWAYS_IF" <> parens (text "arity=" <> int arity <> comma <> text "unsat_ok=" <> ppr unsat_ok <> comma <> text "boring_ok=" <> ppr boring_ok) ppr (UnfIfGoodArgs { ug_args = cs, ug_size = size, ug_res = discount }) = hsep [ text "IF_ARGS", brackets (hsep (map int cs)), int size, int discount ] instance Outputable Unfolding where ppr NoUnfolding = text "No unfolding" ppr BootUnfolding = text "No unfolding (from boot)" ppr (OtherCon cs) = text "OtherCon" <+> ppr cs ppr (DFunUnfolding { df_bndrs = bndrs, df_con = con, df_args = args }) = hang (text "DFun:" <+> char '\\' <+> sep (map (pprBndr LambdaBind) bndrs) <+> arrow) 2 (ppr con <+> sep (map ppr args)) ppr (CoreUnfolding { uf_src = src , uf_tmpl=rhs, uf_is_top=top , uf_cache=cache, uf_guidance=g }) = text "Unf" <> braces (pp_info $$ pp_rhs) where pp_info = fsep $ punctuate comma [ text "Src=" <> ppr src , text "TopLvl=" <> ppr top , ppr cache , text "Guidance=" <> ppr g ] pp_tmpl = ppUnlessOption sdocSuppressUnfoldings (text "Tmpl=" <+> ppr rhs) pp_rhs | isStableSource src = pp_tmpl | otherwise = empty -- Don't print the RHS or we get a quadratic -- blowup in the size of the printout! instance Outputable UnfoldingCache where ppr (UnfoldingCache { uf_is_value = hnf, uf_is_conlike = conlike , uf_is_work_free = wf, uf_expandable = exp }) = fsep $ punctuate comma [ text "Value=" <> ppr hnf , text "ConLike=" <> ppr conlike , text "WorkFree=" <> ppr wf , text "Expandable=" <> ppr exp ] {- ----------------------------------------------------- -- Rules ----------------------------------------------------- -} instance Outputable CoreRule where ppr = pprRule pprRules :: [CoreRule] -> SDoc pprRules rules = vcat (map pprRule rules) pprRule :: CoreRule -> SDoc pprRule (BuiltinRule { ru_fn = fn, ru_name = name}) = text "Built in rule for" <+> ppr fn <> colon <+> doubleQuotes (ftext name) pprRule (Rule { ru_name = name, ru_act = act, ru_fn = fn, ru_bndrs = tpl_vars, ru_args = tpl_args, ru_rhs = rhs }) = hang (doubleQuotes (ftext name) <+> ppr act) 4 (sep [text "forall" <+> pprCoreBinders tpl_vars <> dot, nest 2 (ppr fn <+> sep (map pprArg tpl_args)), nest 2 (text "=" <+> pprCoreExpr rhs) ]) {- ----------------------------------------------------- -- Tickish ----------------------------------------------------- -} instance Outputable (XTickishId pass) => Outputable (GenTickish pass) where ppr (HpcTick modl ix) = hcat [text "hpc<", ppr modl, comma, ppr ix, text ">"] ppr (Breakpoint _ext ix vars modl) = hcat [text "break<", ppr modl, comma, ppr ix, text ">", parens (hcat (punctuate comma (map ppr vars)))] ppr (ProfNote { profNoteCC = cc, profNoteCount = tick, profNoteScope = scope }) = case (tick,scope) of (True,True) -> hcat [text "scctick<", ppr cc, char '>'] (True,False) -> hcat [text "tick<", ppr cc, char '>'] _ -> hcat [text "scc<", ppr cc, char '>'] ppr (SourceNote span _) = hcat [ text "src<", pprUserRealSpan True span, char '>'] ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/Ppr.hs-boot0000644000000000000000000000042307346545000020633 0ustar0000000000000000{-# OPTIONS_GHC -fno-warn-orphans #-} module GHC.Core.Ppr where import {-# SOURCE #-} GHC.Core import {-# SOURCE #-} GHC.Types.Var (Var) import GHC.Utils.Outputable (OutputableBndr, Outputable) instance OutputableBndr b => Outputable (Expr b) instance OutputableBndr Var ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/Predicate.hs0000644000000000000000000003654307346545000021045 0ustar0000000000000000{-# LANGUAGE DerivingStrategies #-} {- Describes predicates as they are considered by the solver. -} module GHC.Core.Predicate ( Pred(..), classifyPredType, isPredTy, isEvVarType, -- Equality predicates EqRel(..), eqRelRole, isEqPrimPred, isNomEqPred, isReprEqPrimPred, isEqPred, isCoVarType, getEqPredTys, getEqPredTys_maybe, getEqPredRole, predTypeEqRel, mkPrimEqPred, mkReprPrimEqPred, mkPrimEqPredRole, mkNomPrimEqPred, -- Class predicates mkClassPred, isDictTy, typeDeterminesValue, isClassPred, isEqualityClass, isCTupleClass, getClassPredTys, getClassPredTys_maybe, classMethodTy, classMethodInstTy, -- Implicit parameters isIPLikePred, mentionsIP, isIPTyCon, isIPClass, isCallStackTy, isCallStackPred, isCallStackPredTy, isExceptionContextPred, isIPPred_maybe, -- Evidence variables DictId, isEvVar, isDictId ) where import GHC.Prelude import GHC.Core.Type import GHC.Core.Class import GHC.Core.TyCo.Compare( eqType ) import GHC.Core.TyCon import GHC.Core.TyCon.RecWalk import GHC.Types.Var import GHC.Core.Coercion import GHC.Core.Multiplicity ( scaledThing ) import GHC.Builtin.Names import GHC.Utils.Outputable import GHC.Utils.Misc import GHC.Utils.Panic import GHC.Data.FastString -- | A predicate in the solver. The solver tries to prove Wanted predicates -- from Given ones. data Pred -- | A typeclass predicate. = ClassPred Class [Type] -- | A type equality predicate, (t1 ~#N t2) or (t1 ~#R t2) | EqPred EqRel Type Type -- | An irreducible predicate. | IrredPred PredType -- | A quantified predicate. -- -- See Note [Quantified constraints] in GHC.Tc.Solver.Solve | ForAllPred [TyVar] [PredType] PredType -- NB: There is no TuplePred case -- Tuple predicates like (Eq a, Ord b) are just treated -- as ClassPred, as if we had a tuple class with two superclasses -- class (c1, c2) => CTuple2 c1 c2 classifyPredType :: PredType -> Pred classifyPredType ev_ty = case splitTyConApp_maybe ev_ty of Just (tc, [_, _, ty1, ty2]) | tc `hasKey` eqReprPrimTyConKey -> EqPred ReprEq ty1 ty2 | tc `hasKey` eqPrimTyConKey -> EqPred NomEq ty1 ty2 Just (tc, tys) | Just clas <- tyConClass_maybe tc -> ClassPred clas tys _ | (tvs, rho) <- splitForAllTyCoVars ev_ty , (theta, pred) <- splitFunTys rho , not (null tvs && null theta) -> ForAllPred tvs (map scaledThing theta) pred | otherwise -> IrredPred ev_ty -- --------------------- Dictionary types --------------------------------- mkClassPred :: Class -> [Type] -> PredType mkClassPred clas tys = mkTyConApp (classTyCon clas) tys isDictTy :: Type -> Bool -- True of dictionaries (Eq a) and -- dictionary functions (forall a. Eq a => Eq [a]) -- See Note [Type determines value] -- See #24370 (and the isDictId call in GHC.HsToCore.Binds.decomposeRuleLhs) -- for why it's important to catch dictionary bindings isDictTy ty = isClassPred pred where (_, pred) = splitInvisPiTys ty typeDeterminesValue :: Type -> Bool -- See Note [Type determines value] typeDeterminesValue ty = isDictTy ty && not (isIPLikePred ty) getClassPredTys :: HasDebugCallStack => PredType -> (Class, [Type]) getClassPredTys ty = case getClassPredTys_maybe ty of Just (clas, tys) -> (clas, tys) Nothing -> pprPanic "getClassPredTys" (ppr ty) getClassPredTys_maybe :: PredType -> Maybe (Class, [Type]) getClassPredTys_maybe ty = case splitTyConApp_maybe ty of Just (tc, tys) | Just clas <- tyConClass_maybe tc -> Just (clas, tys) _ -> Nothing classMethodTy :: Id -> Type -- Takes a class selector op :: forall a. C a => meth_ty -- and returns the type of its method, meth_ty -- The selector can be a superclass selector, in which case -- you get back a superclass classMethodTy sel_id = funResultTy $ -- meth_ty dropForAlls $ -- C a => meth_ty varType sel_id -- forall a. C n => meth_ty classMethodInstTy :: Id -> [Type] -> Type -- Takes a class selector op :: forall a b. C a b => meth_ty -- and the types [ty1, ty2] at which it is instantiated, -- returns the instantiated type of its method, meth_ty[t1/a,t2/b] -- The selector can be a superclass selector, in which case -- you get back a superclass classMethodInstTy sel_id arg_tys = funResultTy $ piResultTys (varType sel_id) arg_tys {- Note [Type determines value] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Only specialise on non-impicit-parameter predicates, because these are the ones whose *type* determines their *value*. In particular, with implicit params, the type args *don't* say what the value of the implicit param is! See #7101. So we treat implicit params just like ordinary arguments for the purposes of specialisation. Note that we still want to specialise functions with implicit params if they have *other* dicts which are class params; see #17930. -} -- --------------------- Equality predicates --------------------------------- -- | A choice of equality relation. This is separate from the type 'Role' -- because 'Phantom' does not define a (non-trivial) equality relation. data EqRel = NomEq | ReprEq deriving (Eq, Ord) instance Outputable EqRel where ppr NomEq = text "nominal equality" ppr ReprEq = text "representational equality" eqRelRole :: EqRel -> Role eqRelRole NomEq = Nominal eqRelRole ReprEq = Representational getEqPredTys :: PredType -> (Type, Type) getEqPredTys ty = case splitTyConApp_maybe ty of Just (tc, [_, _, ty1, ty2]) | tc `hasKey` eqPrimTyConKey || tc `hasKey` eqReprPrimTyConKey -> (ty1, ty2) _ -> pprPanic "getEqPredTys" (ppr ty) getEqPredTys_maybe :: PredType -> Maybe (Role, Type, Type) getEqPredTys_maybe ty = case splitTyConApp_maybe ty of Just (tc, [_, _, ty1, ty2]) | tc `hasKey` eqPrimTyConKey -> Just (Nominal, ty1, ty2) | tc `hasKey` eqReprPrimTyConKey -> Just (Representational, ty1, ty2) _ -> Nothing getEqPredRole :: PredType -> Role -- Precondition: the PredType is (s ~#N t) or (s ~#R t) getEqPredRole ty = eqRelRole (predTypeEqRel ty) -- | Get the equality relation relevant for a pred type -- Returns NomEq for dictionary predicates, etc predTypeEqRel :: PredType -> EqRel predTypeEqRel ty | isReprEqPrimPred ty = ReprEq | otherwise = NomEq {------------------------------------------- Predicates on PredType --------------------------------------------} {- Note [Evidence for quantified constraints] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The superclass mechanism in GHC.Tc.Solver.Dict.makeSuperClasses risks taking a quantified constraint like (forall a. C a => a ~ b) and generate superclass evidence (forall a. C a => a ~# b) This is a funny thing: neither isPredTy nor isCoVarType are true of it. So we are careful not to generate it in the first place: see Note [Equality superclasses in quantified constraints] in GHC.Tc.Solver.Dict. -} -- | Does this type classify a core (unlifted) Coercion? -- At either role nominal or representational -- (t1 ~# t2) or (t1 ~R# t2) -- See Note [Types for coercions, predicates, and evidence] in "GHC.Core.TyCo.Rep" isCoVarType :: Type -> Bool -- ToDo: should we check saturation? isCoVarType ty = isEqPrimPred ty isEvVarType :: Type -> Bool -- True of (a) predicates, of kind Constraint, such as (Eq t), and (s ~ t) -- (b) coercion types, such as (s ~# t) or (s ~R# t) -- See Note [Types for coercions, predicates, and evidence] in GHC.Core.TyCo.Rep -- See Note [Evidence for quantified constraints] isEvVarType ty = isCoVarType ty || isPredTy ty isEqPrimPred :: PredType -> Bool -- True of (s ~# t) (s ~R# t) isEqPrimPred ty | Just tc <- tyConAppTyCon_maybe ty = tc `hasKey` eqPrimTyConKey || tc `hasKey` eqReprPrimTyConKey | otherwise = False isReprEqPrimPred :: PredType -> Bool isReprEqPrimPred ty | Just tc <- tyConAppTyCon_maybe ty = tc `hasKey` eqReprPrimTyConKey | otherwise = False isNomEqPred :: PredType -> Bool -- A nominal equality, primitive or not (s ~# t), (s ~ t), or (s ~~ t) isNomEqPred ty | Just tc <- tyConAppTyCon_maybe ty = tc `hasKey` eqPrimTyConKey || tc `hasKey` heqTyConKey || tc `hasKey` eqTyConKey | otherwise = False isClassPred :: PredType -> Bool isClassPred ty = case tyConAppTyCon_maybe ty of Just tc -> isClassTyCon tc _ -> False isEqPred :: PredType -> Bool isEqPred ty -- True of (s ~ t) and (s ~~ t) -- ToDo: should we check saturation? | Just tc <- tyConAppTyCon_maybe ty , Just cls <- tyConClass_maybe tc = isEqualityClass cls | otherwise = False isEqualityClass :: Class -> Bool -- True of (~), (~~), and Coercible -- These all have a single primitive-equality superclass, either (~N# or ~R#) isEqualityClass cls = cls `hasKey` heqTyConKey || cls `hasKey` eqTyConKey || cls `hasKey` coercibleTyConKey isCTupleClass :: Class -> Bool isCTupleClass cls = isTupleTyCon (classTyCon cls) {- ********************************************************************* * * Implicit parameters * * ********************************************************************* -} isIPTyCon :: TyCon -> Bool isIPTyCon tc = tc `hasKey` ipClassKey -- Class and its corresponding TyCon have the same Unique isIPClass :: Class -> Bool isIPClass cls = cls `hasKey` ipClassKey -- | Decomposes a predicate if it is an implicit parameter. Does not look in -- superclasses. See also [Local implicit parameters]. isIPPred_maybe :: Class -> [Type] -> Maybe (Type, Type) isIPPred_maybe cls tys | isIPClass cls , [t1,t2] <- tys = Just (t1,t2) | otherwise = Nothing -- --------------------- ExceptionContext predicates -------------------------- -- | Is a 'PredType' an @ExceptionContext@ implicit parameter? -- -- If so, return the name of the parameter. isExceptionContextPred :: Class -> [Type] -> Maybe FastString isExceptionContextPred cls tys | [ty1, ty2] <- tys , isIPClass cls , isExceptionContextTy ty2 = isStrLitTy ty1 | otherwise = Nothing -- | Is a type a 'CallStack'? isExceptionContextTy :: Type -> Bool isExceptionContextTy ty | Just tc <- tyConAppTyCon_maybe ty = tc `hasKey` exceptionContextTyConKey | otherwise = False -- --------------------- CallStack predicates --------------------------------- isCallStackPredTy :: Type -> Bool -- True of HasCallStack, or IP "blah" CallStack isCallStackPredTy ty | Just (tc, tys) <- splitTyConApp_maybe ty , Just cls <- tyConClass_maybe tc , Just {} <- isCallStackPred cls tys = True | otherwise = False -- | Is a 'PredType' a 'CallStack' implicit parameter? -- -- If so, return the name of the parameter. isCallStackPred :: Class -> [Type] -> Maybe FastString isCallStackPred cls tys | [ty1, ty2] <- tys , isIPClass cls , isCallStackTy ty2 = isStrLitTy ty1 | otherwise = Nothing -- | Is a type a 'CallStack'? isCallStackTy :: Type -> Bool isCallStackTy ty | Just tc <- tyConAppTyCon_maybe ty = tc `hasKey` callStackTyConKey | otherwise = False -- --------------------- isIPLike and mentionsIP -------------------------- -- See Note [Local implicit parameters] isIPLikePred :: Type -> Bool -- Is `pred`, or any of its superclasses, an implicit parameter? -- See Note [Local implicit parameters] isIPLikePred pred = mentions_ip_pred initIPRecTc Nothing pred mentionsIP :: Type -> Class -> [Type] -> Bool -- Is (cls tys) an implicit parameter with key `str_ty`, or -- is any of its superclasses such at thing. -- See Note [Local implicit parameters] mentionsIP str_ty cls tys = mentions_ip initIPRecTc (Just str_ty) cls tys mentions_ip :: RecTcChecker -> Maybe Type -> Class -> [Type] -> Bool mentions_ip rec_clss mb_str_ty cls tys | Just (str_ty', _) <- isIPPred_maybe cls tys = case mb_str_ty of Nothing -> True Just str_ty -> str_ty `eqType` str_ty' | otherwise = or [ mentions_ip_pred rec_clss mb_str_ty (classMethodInstTy sc_sel_id tys) | sc_sel_id <- classSCSelIds cls ] mentions_ip_pred :: RecTcChecker -> Maybe Type -> Type -> Bool mentions_ip_pred rec_clss mb_str_ty ty | Just (cls, tys) <- getClassPredTys_maybe ty , let tc = classTyCon cls , Just rec_clss' <- if isTupleTyCon tc then Just rec_clss else checkRecTc rec_clss tc = mentions_ip rec_clss' mb_str_ty cls tys | otherwise = False -- Includes things like (D []) where D is -- a Constraint-ranged family; #7785 initIPRecTc :: RecTcChecker initIPRecTc = setRecTcMaxBound 1 initRecTc {- Note [Local implicit parameters] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ See also wrinkle (SIP1) in Note [Shadowing of implicit parameters] in GHC.Tc.Solver.Dict. The function isIPLikePred tells if this predicate, or any of its superclasses, is an implicit parameter. Why are implicit parameters special? Unlike normal classes, we can have local instances for implicit parameters, in the form of let ?x = True in ... So in various places we must be careful not to assume that any value of the right type will do; we must carefully look for the innermost binding. So isIPLikePred checks whether this is an implicit parameter, or has a superclass that is an implicit parameter. Several wrinkles * We must be careful with superclasses, as #18649 showed. Haskell doesn't allow an implicit parameter as a superclass class (?x::a) => C a where ... but with a constraint tuple we might have (% Eq a, ?x::Int %) and /its/ superclasses, namely (Eq a) and (?x::Int), /do/ include an implicit parameter. With ConstraintKinds this can apply to /any/ class, e.g. class sc => C sc where ... Then (C (?x::Int)) has (?x::Int) as a superclass. So we must instantiate and check each superclass, one by one, in hasIPSuperClasses. * With -XUndecidableSuperClasses, the superclass hunt can go on forever, so we need a RecTcChecker to cut it off. * Another apparent additional complexity involves type families. For example, consider type family D (v::*->*) :: Constraint type instance D [] = () f :: D v => v Char -> Int If we see a call (f "foo"), we'll pass a "dictionary" () |> (g :: () ~ D []) and it's good to specialise f at this dictionary. So the question is: can an implicit parameter "hide inside" a type-family constraint like (D a). Well, no. We don't allow type instance D Maybe = ?x:Int Hence the umbrella 'otherwise' case in is_ip_like_pred. See #7785. Small worries (Sept 20): * I don't see what stops us having that 'type instance'. Indeed I think nothing does. * I'm a little concerned about type variables; such a variable might be instantiated to an implicit parameter. I don't think this matters in the cases for which isIPLikePred is used, and it's pretty obscure anyway. * The superclass hunt stops when it encounters the same class again, but in principle we could have the same class, differently instantiated, and the second time it could have an implicit parameter I'm going to treat these as problems for another day. They are all exotic. -} {- ********************************************************************* * * Evidence variables * * ********************************************************************* -} isEvVar :: Var -> Bool isEvVar var = isEvVarType (varType var) isDictId :: Id -> Bool isDictId id = isDictTy (varType id) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/Reduction.hs0000644000000000000000000010225107346545000021067 0ustar0000000000000000 module GHC.Core.Reduction ( -- * Reductions Reduction(..), ReductionN, ReductionR, HetReduction(..), Reductions(..), mkReduction, mkReductions, mkHetReduction, coercionRedn, reductionOriginalType, downgradeRedn, mkSubRedn, mkTransRedn, mkCoherenceRightRedn, mkCoherenceRightMRedn, mkCastRedn1, mkCastRedn2, mkReflRedn, mkGReflRightRedn, mkGReflRightMRedn, mkGReflLeftRedn, mkGReflLeftMRedn, mkAppRedn, mkAppRedns, mkFunRedn, mkForAllRedn, mkHomoForAllRedn, mkTyConAppRedn, mkClassPredRedn, mkProofIrrelRedn, mkReflCoRedn, homogeniseHetRedn, unzipRedns, -- * Rewriting type arguments ArgsReductions(..), simplifyArgsWorker ) where import GHC.Prelude import GHC.Core.Class ( Class(classTyCon) ) import GHC.Core.Coercion import GHC.Core.Predicate ( mkClassPred ) import GHC.Core.TyCon ( TyCon ) import GHC.Core.Type import GHC.Data.Pair ( Pair(Pair) ) import GHC.Data.List.Infinite ( Infinite (..) ) import qualified GHC.Data.List.Infinite as Inf import GHC.Types.Var ( VarBndr(..), setTyVarKind ) import GHC.Types.Var.Env ( mkInScopeSet ) import GHC.Types.Var.Set ( TyCoVarSet ) import GHC.Utils.Misc ( HasDebugCallStack, equalLength ) import GHC.Utils.Outputable import GHC.Utils.Panic ( assertPpr ) {- %************************************************************************ %* * Reductions %* * %************************************************************************ Note [The Reduction type] ~~~~~~~~~~~~~~~~~~~~~~~~~ Many functions in the type-checker rewrite a type, using Given type equalitie or type-family reductions, and return a Reduction, which is just a pair of the coercion and the RHS type of the coercion: data Reduction = Reduction Coercion !Type The order of the arguments to the constructor serves as a reminder of what the Type is. In Reduction co ty `ty` appears to the right of `co`, reminding us that we must have: co :: unrewritten_ty ~ ty Example functions that use this datatype: GHC.Core.FamInstEnv.topNormaliseType_maybe :: FamInstEnvs -> Type -> Maybe Reduction GHC.Tc.Solver.Rewrite.rewrite :: CtEvidence -> TcType -> TcS Reduction Having Reduction as a data type, with a strict Type field, rather than using a pair (Coercion,Type) gives several advantages (see #20161) * The strictness in Type improved performance in rewriting of type families (around 2.5% improvement in T9872), * Compared to the situation before, it gives improved consistency around orientation of rewritings, as a Reduction is always left-to-right (the coercion's RHS type is always the type stored in the 'Reduction'). No more 'mkSymCo's needed to convert between left-to-right and right-to-left. One could imagine storing the LHS type of the coercion in the Reduction as well, but in fact `reductionOriginalType` is very seldom used, so it's not worth it. -} -- | A 'Reduction' is the result of an operation that rewrites a type @ty_in@. -- The 'Reduction' includes the rewritten type @ty_out@ and a 'Coercion' @co@ -- such that @co :: ty_in ~ ty_out@, where the role of the coercion is determined -- by the context. That is, the LHS type of the coercion is the original type -- @ty_in@, while its RHS type is the rewritten type @ty_out@. -- -- A Reduction is always homogeneous, unless it is wrapped inside a 'HetReduction', -- which separately stores the kind coercion. -- -- See Note [The Reduction type]. data Reduction = Reduction { reductionCoercion :: Coercion , reductionReducedType :: !Type } -- N.B. the 'Coercion' field must be lazy: see for instance GHC.Tc.Solver.Rewrite.rewrite_tyvar2 -- which returns an error in the 'Coercion' field when dealing with a Derived constraint -- (which is OK as this Coercion gets ignored later). -- We might want to revisit the strictness once Deriveds are removed. -- | Stores a heterogeneous reduction. -- -- The stored kind coercion must relate the kinds of the -- stored reduction. That is, in @HetReduction (Reduction co xi) kco@, -- we must have: -- -- > co :: ty ~ xi -- > kco :: typeKind ty ~ typeKind xi data HetReduction = HetReduction Reduction MCoercionN -- N.B. strictness annotations don't seem to make a difference here -- | Create a heterogeneous reduction. -- -- Pre-condition: the provided kind coercion (second argument) -- relates the kinds of the stored reduction. -- That is, if the coercion stored in the 'Reduction' is of the form -- -- > co :: ty ~ xi -- -- Then the kind coercion supplied must be of the form: -- -- > kco :: typeKind ty ~ typeKind xi mkHetReduction :: Reduction -- ^ heterogeneous reduction -> MCoercionN -- ^ kind coercion -> HetReduction mkHetReduction redn mco = HetReduction redn mco {-# INLINE mkHetReduction #-} -- | Homogenise a heterogeneous reduction. -- -- Given @HetReduction (Reduction co xi) kco@, with -- -- > co :: ty ~ xi -- > kco :: typeKind(ty) ~ typeKind(xi) -- -- this returns the homogeneous reduction: -- -- > hco :: ty ~ ( xi |> sym kco ) homogeniseHetRedn :: Role -> HetReduction -> Reduction homogeniseHetRedn role (HetReduction redn kco) = mkCoherenceRightMRedn role redn (mkSymMCo kco) {-# INLINE homogeniseHetRedn #-} -- | Create a 'Reduction' from a pair of a 'Coercion' and a 'Type. -- -- Pre-condition: the RHS type of the coercion matches the provided type -- (perhaps up to zonking). -- -- Use 'coercionRedn' when you only have the coercion. mkReduction :: Coercion -> Type -> Reduction mkReduction co ty = Reduction co ty {-# INLINE mkReduction #-} instance Outputable Reduction where ppr redn = braces $ vcat [ text "reductionOriginalType:" <+> ppr (reductionOriginalType redn) , text " reductionReducedType:" <+> ppr (reductionReducedType redn) , text " reductionCoercion:" <+> ppr (reductionCoercion redn) ] -- | A 'Reduction' in which the 'Coercion' has 'Nominal' role. type ReductionN = Reduction -- | A 'Reduction' in which the 'Coercion' has 'Representational' role. type ReductionR = Reduction -- | Get the original, unreduced type corresponding to a 'Reduction'. -- -- This is obtained by computing the LHS kind of the stored coercion, -- which may be slow. reductionOriginalType :: Reduction -> Type reductionOriginalType = coercionLKind . reductionCoercion {-# INLINE reductionOriginalType #-} -- | Turn a 'Coercion' into a 'Reduction' -- by inspecting the RHS type of the coercion. -- -- Prefer using 'mkReduction' when you already know -- the RHS type of the coercion, to avoid computing it anew. coercionRedn :: Coercion -> Reduction coercionRedn co = Reduction co (coercionRKind co) {-# INLINE coercionRedn #-} -- | Downgrade the role of the coercion stored in the 'Reduction'. downgradeRedn :: Role -- ^ desired role -> Role -- ^ current role -> Reduction -> Reduction downgradeRedn new_role old_role redn@(Reduction co _) = redn { reductionCoercion = downgradeRole new_role old_role co } {-# INLINE downgradeRedn #-} -- | Downgrade the role of the coercion stored in the 'Reduction', -- from 'Nominal' to 'Representational'. mkSubRedn :: Reduction -> Reduction mkSubRedn redn@(Reduction co _) = redn { reductionCoercion = mkSubCo co } {-# INLINE mkSubRedn #-} -- | Compose a reduction with a coercion on the left. -- -- Pre-condition: the provided coercion's RHS type must match the LHS type -- of the coercion that is stored in the reduction. mkTransRedn :: Coercion -> Reduction -> Reduction mkTransRedn co1 redn@(Reduction co2 _) = redn { reductionCoercion = co1 `mkTransCo` co2 } {-# INLINE mkTransRedn #-} -- | The reflexive reduction. mkReflRedn :: Role -> Type -> Reduction mkReflRedn r ty = mkReduction (mkReflCo r ty) ty -- | Create a 'Reduction' from a kind cast, in which -- the casted type is the rewritten type. -- -- Given @ty :: k1@, @mco :: k1 ~ k2@, -- produces the 'Reduction' @ty ~res_co~> (ty |> mco)@ -- at the given 'Role'. mkGReflRightRedn :: Role -> Type -> CoercionN -> Reduction mkGReflRightRedn role ty co = mkReduction (mkGReflRightCo role ty co) (mkCastTy ty co) {-# INLINE mkGReflRightRedn #-} -- | Create a 'Reduction' from a kind cast, in which -- the casted type is the rewritten type. -- -- Given @ty :: k1@, @mco :: k1 ~ k2@, -- produces the 'Reduction' @ty ~res_co~> (ty |> mco)@ -- at the given 'Role'. mkGReflRightMRedn :: Role -> Type -> MCoercionN -> Reduction mkGReflRightMRedn role ty mco = mkReduction (mkGReflRightMCo role ty mco) (mkCastTyMCo ty mco) {-# INLINE mkGReflRightMRedn #-} -- | Create a 'Reduction' from a kind cast, in which -- the casted type is the original (non-rewritten) type. -- -- Given @ty :: k1@, @mco :: k1 ~ k2@, -- produces the 'Reduction' @(ty |> mco) ~res_co~> ty@ -- at the given 'Role'. mkGReflLeftRedn :: Role -> Type -> CoercionN -> Reduction mkGReflLeftRedn role ty co = mkReduction (mkGReflLeftCo role ty co) ty {-# INLINE mkGReflLeftRedn #-} -- | Create a 'Reduction' from a kind cast, in which -- the casted type is the original (non-rewritten) type. -- -- Given @ty :: k1@, @mco :: k1 ~ k2@, -- produces the 'Reduction' @(ty |> mco) ~res_co~> ty@ -- at the given 'Role'. mkGReflLeftMRedn :: Role -> Type -> MCoercionN -> Reduction mkGReflLeftMRedn role ty mco = mkReduction (mkGReflLeftMCo role ty mco) ty {-# INLINE mkGReflLeftMRedn #-} -- | Apply a cast to the result of a 'Reduction'. -- -- Given a 'Reduction' @ty1 ~co1~> (ty2 :: k2)@ and a kind coercion @kco@ -- with LHS kind @k2@, produce a new 'Reduction' @ty1 ~co2~> ( ty2 |> kco )@ -- of the given 'Role' (which must match the role of the coercion stored -- in the 'Reduction' argument). mkCoherenceRightRedn :: Role -> Reduction -> CoercionN -> Reduction mkCoherenceRightRedn r (Reduction co1 ty2) kco = mkReduction (mkCoherenceRightCo r ty2 kco co1) (mkCastTy ty2 kco) {-# INLINE mkCoherenceRightRedn #-} -- | Apply a cast to the result of a 'Reduction', using an 'MCoercionN'. -- -- Given a 'Reduction' @ty1 ~co1~> (ty2 :: k2)@ and a kind coercion @mco@ -- with LHS kind @k2@, produce a new 'Reduction' @ty1 ~co2~> ( ty2 |> mco )@ -- of the given 'Role' (which must match the role of the coercion stored -- in the 'Reduction' argument). mkCoherenceRightMRedn :: Role -> Reduction -> MCoercionN -> Reduction mkCoherenceRightMRedn r (Reduction co1 ty2) kco = mkReduction (mkCoherenceRightMCo r ty2 kco co1) (mkCastTyMCo ty2 kco) {-# INLINE mkCoherenceRightMRedn #-} -- | Apply a cast to a 'Reduction', casting both the original and the reduced type. -- -- Given @cast_co@ and 'Reduction' @ty ~co~> xi@, this function returns -- the 'Reduction' @(ty |> cast_co) ~return_co~> (xi |> cast_co)@ -- of the given 'Role' (which must match the role of the coercion stored -- in the 'Reduction' argument). -- -- Pre-condition: the 'Type' passed in is the same as the LHS type -- of the coercion stored in the 'Reduction'. mkCastRedn1 :: Role -> Type -- ^ original type -> CoercionN -- ^ coercion to cast with -> Reduction -- ^ rewritten type, with rewriting coercion -> Reduction mkCastRedn1 r ty cast_co (Reduction co xi) -- co :: ty ~r ty' -- return_co :: (ty |> cast_co) ~r (ty' |> cast_co) = mkReduction (castCoercionKind1 co r ty xi cast_co) (mkCastTy xi cast_co) {-# INLINE mkCastRedn1 #-} -- | Apply casts on both sides of a 'Reduction' (of the given 'Role'). -- -- Use 'mkCastRedn1' when you want to cast both the original and reduced types -- in a 'Reduction' using the same coercion. -- -- Pre-condition: the 'Type' passed in is the same as the LHS type -- of the coercion stored in the 'Reduction'. mkCastRedn2 :: Role -> Type -- ^ original type -> CoercionN -- ^ coercion to cast with on the left -> Reduction -- ^ rewritten type, with rewriting coercion -> CoercionN -- ^ coercion to cast with on the right -> Reduction mkCastRedn2 r ty cast_co (Reduction nco nty) cast_co' = mkReduction (castCoercionKind2 nco r ty nty cast_co cast_co') (mkCastTy nty cast_co') {-# INLINE mkCastRedn2 #-} -- | Apply one 'Reduction' to another. -- -- Combines 'mkAppCo' and 'mkAppTy`. mkAppRedn :: Reduction -> Reduction -> Reduction mkAppRedn (Reduction co1 ty1) (Reduction co2 ty2) = mkReduction (mkAppCo co1 co2) (mkAppTy ty1 ty2) {-# INLINE mkAppRedn #-} -- | Create a function 'Reduction'. -- -- Combines 'mkFunCo' and 'mkFunTy'. mkFunRedn :: Role -> FunTyFlag -> ReductionN -- ^ multiplicity reduction -> Reduction -- ^ argument reduction -> Reduction -- ^ result reduction -> Reduction mkFunRedn r af (Reduction w_co w_ty) (Reduction arg_co arg_ty) (Reduction res_co res_ty) = mkReduction (mkFunCo r af w_co arg_co res_co) (mkFunTy af w_ty arg_ty res_ty) {-# INLINE mkFunRedn #-} -- | Create a 'Reduction' associated to a Π type, -- from a kind 'Reduction' and a body 'Reduction'. -- -- Combines 'mkForAllCo' and 'mkForAllTy'. mkForAllRedn :: ForAllTyFlag -> TyVar -> ReductionN -- ^ kind reduction -> Reduction -- ^ body reduction -> Reduction mkForAllRedn vis tv1 (Reduction h ki') (Reduction co ty) = mkReduction (mkForAllCo tv1 vis vis h co) (mkForAllTy (Bndr tv2 vis) ty) where tv2 = setTyVarKind tv1 ki' {-# INLINE mkForAllRedn #-} -- | Create a 'Reduction' of a quantified type from a -- 'Reduction' of the body. -- -- Combines 'mkHomoForAllCos' and 'mkForAllTys'. mkHomoForAllRedn :: [TyVarBinder] -> Reduction -> Reduction mkHomoForAllRedn bndrs (Reduction co ty) = mkReduction (mkHomoForAllCos bndrs co) (mkForAllTys bndrs ty) {-# INLINE mkHomoForAllRedn #-} -- | Create a 'Reduction' from a coercion between coercions. -- -- Combines 'mkProofIrrelCo' and 'mkCoercionTy'. mkProofIrrelRedn :: Role -- ^ role of the created coercion, "r" -> CoercionN -- ^ co :: phi1 ~N phi2 -> Coercion -- ^ g1 :: phi1 -> Coercion -- ^ g2 :: phi2 -> Reduction -- ^ res_co :: g1 ~r g2 mkProofIrrelRedn role co g1 g2 = mkReduction (mkProofIrrelCo role co g1 g2) (mkCoercionTy g2) {-# INLINE mkProofIrrelRedn #-} -- | Create a reflexive 'Reduction' whose RHS is the given 'Coercion', -- with the specified 'Role'. mkReflCoRedn :: Role -> Coercion -> Reduction mkReflCoRedn role co = mkReduction (mkReflCo role co_ty) co_ty where co_ty = mkCoercionTy co {-# INLINE mkReflCoRedn #-} -- | A collection of 'Reduction's where the coercions and the types are stored separately. -- -- Use 'unzipRedns' to obtain 'Reductions' from a list of 'Reduction's. -- -- This datatype is used in 'mkAppRedns', 'mkClassPredRedns' and 'mkTyConAppRedn', -- which expect separate types and coercions. -- -- Invariant: the two stored lists are of the same length, -- and the RHS type of each coercion is the corresponding type. data Reductions = Reductions [Coercion] [Type] -- | Create 'Reductions' from individual lists of coercions and types. -- -- The lists should be of the same length, and the RHS type of each coercion -- should match the specified type in the other list. mkReductions :: [Coercion] -> [Type] -> Reductions mkReductions cos tys = Reductions cos tys {-# INLINE mkReductions #-} -- | Combines 'mkAppCos' and 'mkAppTys'. mkAppRedns :: Reduction -> Reductions -> Reduction mkAppRedns (Reduction co ty) (Reductions cos tys) = mkReduction (mkAppCos co cos) (mkAppTys ty tys) {-# INLINE mkAppRedns #-} -- | 'TyConAppCo' for 'Reduction's: combines 'mkTyConAppCo' and `mkTyConApp`. mkTyConAppRedn :: Role -> TyCon -> Reductions -> Reduction mkTyConAppRedn role tc (Reductions cos tys) = mkReduction (mkTyConAppCo role tc cos) (mkTyConApp tc tys) {-# INLINE mkTyConAppRedn #-} -- | Reduce the arguments of a 'Class' 'TyCon'. mkClassPredRedn :: Class -> Reductions -> Reduction mkClassPredRedn cls (Reductions cos tys) = mkReduction (mkTyConAppCo Nominal (classTyCon cls) cos) (mkClassPred cls tys) {-# INLINE mkClassPredRedn #-} -- | Obtain 'Reductions' from a list of 'Reduction's by unzipping. unzipRedns :: [Reduction] -> Reductions unzipRedns = foldr accRedn (Reductions [] []) where accRedn :: Reduction -> Reductions -> Reductions accRedn (Reduction co xi) (Reductions cos xis) = Reductions (co:cos) (xi:xis) {-# INLINE unzipRedns #-} -- NB: this function is currently used in two locations: -- -- - GHC.Tc.Gen.Foreign.normaliseFfiType', with one call of the form: -- -- unzipRedns <$> zipWithM f tys roles -- -- - GHC.Tc.Solver.Monad.breakTyEqCycle_maybe, with two calls of the form: -- -- unzipRedns <$> mapM f tys -- -- It is possible to write 'mapAndUnzipM' functions to handle these cases, -- but the above locations aren't performance critical, so it was deemed -- to not be worth it. {- %************************************************************************ %* * Simplifying types %* * %************************************************************************ The function below morally belongs in GHC.Tc.Solver.Rewrite, but it is used also in FamInstEnv, and so lives here. Note [simplifyArgsWorker] ~~~~~~~~~~~~~~~~~~~~~~~~~ Invariant (F2) of Note [Rewriting] in GHC.Tc.Solver.Rewrite says that rewriting is homogeneous. This causes some trouble when rewriting a function applied to a telescope of arguments, perhaps with dependency. For example, suppose type family F :: forall (j :: Type) (k :: Type). Maybe j -> Either j k -> Bool -> [k] and we wish to rewrite the args of (with kind applications explicit) F @a @b (Just @a c) (Right @a @b d) False where all variables are skolems and a :: Type b :: Type c :: a d :: b [G] aco :: a ~ fa [G] bco :: b ~ fb [G] cco :: c ~ fc [G] dco :: d ~ fd The first step is to rewrite all the arguments. This is done before calling simplifyArgsWorker. We start from a b Just @a c Right @a @b d False and get left-to-right reductions whose coercions are as follows: co1 :: a ~ fa co2 :: b ~ fb co3 :: (Just @a c) ~ (Just @fa (fc |> aco) |> co6) co4 :: (Right @a @b d) ~ (Right @fa @fb (fd |> bco) |> co7) co5 :: False ~ False where co6 = Maybe (sym aco) :: Maybe fa ~ Maybe a co7 = Either (sym aco) (sym bco) :: Either fa fb ~ Either a b We now process the rewritten args in left-to-right order. The first two args need no further processing. But now consider the third argument. Let f3 = the rewritten result, Just fa (fc |> aco) |> co6. This f3 rewritten argument has kind (Maybe a), due to homogeneity of rewriting (F2). And yet, when we build the application (F @fa @fb ...), we need this argument to have kind (Maybe fa), not (Maybe a). We must cast this argument. The coercion to use is determined by the kind of F: we see in F's kind that the third argument has kind Maybe j. Critically, we also know that the argument corresponding to j (in our example, a) rewrote with a coercion co1. We can thus know the coercion needed for the 3rd argument is (Maybe co1), thus building (f3 |> Maybe co1) More generally, we must use the Lifting Lemma, as implemented in Coercion.liftCoSubst. As we work left-to-right, any variable that is a dependent parameter (j and k, in our example) gets mapped in a lifting context to the coercion that is output from rewriting the corresponding argument (co1 and co2, in our example). Then, after rewriting later arguments, we lift the kind of these arguments in the lifting context that we've be building up. This coercion is then used to keep the result of rewriting well-kinded. Working through our example, this is what happens: 1. Extend the (empty) LC with [j |-> co1]. No new casting must be done, because the binder associated with the first argument has a closed type (no variables). 2. Extend the LC with [k |-> co2]. No casting to do. 3. Lifting the kind (Maybe j) with our LC yields co8 :: Maybe a ~ Maybe fa. Use (f3 |> co8) as the argument to F. 4. Lifting the kind (Either j k) with our LC yields co9 :: Either a b ~ Either fa fb. Use (f4 |> co9) as the 4th argument to F, where f4 is the rewritten form of argument 4, written above. 5. We lift Bool with our LC, getting ; casting has no effect. We're now almost done, but the new application F @fa @fb (f3 |> co8) (f4 |> co9) False has the wrong kind. Its kind is [fb], instead of the original [b]. So we must use our LC one last time to lift the result kind [k], getting res_co :: [fb] ~ [b], and we cast our result. Accordingly, the final result is F @fa @fb (Just @fa (fc |> aco) |> Maybe (sym aco) |> sym (Maybe (sym aco))) (Right @fa @fb (fd |> bco) |> Either (sym aco) (sym bco) |> sym (Either (sym aco) (sym bco))) False |> [sym bco] The res_co (in this case, [sym bco]) is the third component of the tuple returned by simplifyArgsWorker. Note [Last case in simplifyArgsWorker] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In writing simplifyArgsWorker's `go`, we know here that args cannot be empty, because that case is first. We've run out of binders. But perhaps inner_ki is a tyvar that has been instantiated with a Π-type. Here is an example. a :: forall (k :: Type). k -> k Proxy :: forall j. j -> Type type family Star axStar :: Star ~ Type type family NoWay :: Bool axNoWay :: NoWay ~ False bo :: Type [G] bc :: bo ~ Bool (in inert set) co :: (forall j. j -> Type) ~ (forall (j :: Star). (j |> axStar) -> Star) co = forall (j :: sym axStar). ( -> sym axStar) We are rewriting: a (forall (j :: Star). (j |> axStar) -> Star) -- 1 (Proxy |> co) -- 2 (bo |> sym axStar) -- 3 (NoWay |> sym bc) -- 4 :: Star First, we rewrite all the arguments (before simplifyArgsWorker), like so: co1 :: (forall (j :: Star). (j |> axStar) -> Star) ~ (forall j. j -> Type) -- 1 co2 :: (Proxy |> co) ~ (Proxy |> co) -- 2 co3 :: (bo |> sym axStar) ~ (Bool |> sym axStar) -- 3 co4 :: (NoWay |> sym bc) ~ (False |> sym bc) -- 4 Then we do the process described in Note [simplifyArgsWorker]. 1. Lifting Type (the kind of the first arg) gives us a reflexive coercion, so we don't use it. But we do build a lifting context [k -> co1] (where co1 is a result of rewriting an argument, written above). 2. Lifting k gives us co1, so the second argument becomes (Proxy |> co |> co1). This is not a dependent argument, so we don't extend the lifting context. Now we need to deal with argument (3). The way we normally proceed is to lift the kind of the binder, to see whether it's dependent. But here, the remainder of the kind of `a` that we're left with after processing two arguments is just `k`. The way forward is look up k in the lifting context, getting co1. If we're at all well-typed, co1 will be a coercion between Π-types, with at least one binder. So, let's decompose co1 with decomposePiCos. This decomposition needs arguments to use to instantiate any kind parameters. Look at the type of co1. If we just decomposed it, we would end up with coercions whose types include j, which is out of scope here. Accordingly, decomposePiCos takes a list of types whose kinds are the *unrewritten* types in the decomposed coercion. (See comments on decomposePiCos.) Because the rewritten types have unrewritten kinds (because rewriting is homogeneous), passing the list of rewritten types to decomposePiCos just won't do: later arguments' kinds won't be as expected. So we need to get the *unrewritten* types to pass to decomposePiCos. We can do this easily enough by taking the kind of the argument coercions, passed in originally. (Alternative 1: We could re-engineer decomposePiCos to deal with this situation. But that function is already gnarly, and other call sites of decomposePiCos would suffer from the change, even though they are much more common than this one.) (Alternative 2: We could avoid calling decomposePiCos entirely, integrating its behavior into simplifyArgsWorker. This would work, I think, but then all of the complication of decomposePiCos would end up layered on top of all the complication here. Please, no.) (Alternative 3: We could pass the unrewritten arguments into simplifyArgsWorker so that we don't have to recreate them. But that would complicate the interface of this function to handle a very dark, dark corner case. Better to keep our demons to ourselves here instead of exposing them to callers. This decision is easily reversed if there is ever any performance trouble due to the call of coercionKind.) So we now call decomposePiCos co1 (Pair (forall (j :: Star). (j |> axStar) -> Star) (forall j. j -> Type)) [bo |> sym axStar, NoWay |> sym bc] to get co5 :: Star ~ Type co6 :: (j |> axStar) ~ (j |> co5), substituted to (bo |> sym axStar |> axStar) ~ (bo |> sym axStar |> co5) == bo ~ bo res_co :: Type ~ Star We then use these casts on (the rewritten) (3) and (4) to get (Bool |> sym axStar |> co5 :: Type) -- (C3) (False |> sym bc |> co6 :: bo) -- (C4) We can simplify to Bool -- (C3) (False |> sym bc :: bo) -- (C4) Of course, we still must do the processing in Note [simplifyArgsWorker] to finish the job. We thus want to recur. Our new function kind is the left-hand type of co1 (gotten, recall, by lifting the variable k that was the return kind of the original function). Why the left-hand type (as opposed to the right-hand type)? Because we have casted all the arguments according to decomposePiCos, which gets us from the right-hand type to the left-hand one. We thus recur with that new function kind, zapping our lifting context, because we have essentially applied it. This recursive call returns ([Bool, False], [...], Refl). The Bool and False are the correct arguments we wish to return. But we must be careful about the result coercion: our new, rewritten application will have kind Type, but we want to make sure that the result coercion casts this back to Star. (Why? Because we started with an application of kind Star, and rewriting is homogeneous.) So, we have to twiddle the result coercion appropriately. Let's check whether this is well-typed. We know a :: forall (k :: Type). k -> k a (forall j. j -> Type) :: (forall j. j -> Type) -> forall j. j -> Type a (forall j. j -> Type) Proxy :: forall j. j -> Type a (forall j. j -> Type) Proxy Bool :: Bool -> Type a (forall j. j -> Type) Proxy Bool False :: Type a (forall j. j -> Type) Proxy Bool False |> res_co :: Star as desired. Whew. Historical note: I (Richard E) once thought that the final part of the kind had to be a variable k (as in the example above). But it might not be: it could be an application of a variable. Here is the example: let f :: forall (a :: Type) (b :: a -> Type). b (Any @a) k :: Type x :: k rewrite (f @Type @((->) k) x) After instantiating [a |-> Type, b |-> ((->) k)], we see that `b (Any @a)` is `k -> Any @a`, and thus the third argument of `x :: k` is well-kinded. -} -- | Stores 'Reductions' as well as a kind coercion. -- -- Used when rewriting arguments to a type function @f@. -- -- Invariant: -- when the stored reductions are of the form -- co_i :: ty_i ~ xi_i, -- the kind coercion is of the form -- kco :: typeKind (f ty_1 ... ty_n) ~ typeKind (f xi_1 ... xi_n) -- -- The type function @f@ depends on context. data ArgsReductions = ArgsReductions {-# UNPACK #-} !Reductions !MCoercionN -- The strictness annotations and UNPACK pragma here are crucial -- to getting good performance in simplifyArgsWorker's tight loop. -- This is shared between the rewriter and the normaliser in GHC.Core.FamInstEnv. -- See Note [simplifyArgsWorker] {-# INLINE simplifyArgsWorker #-} -- NB. INLINE yields a ~1% decrease in allocations in T9872d compared to INLINEABLE -- This function is only called in two locations, so the amount of code duplication -- should be rather reasonable despite the size of the function. simplifyArgsWorker :: HasDebugCallStack => [PiTyBinder] -> Kind -- the binders & result kind (not a Π-type) of the function applied to the args -- list of binders can be shorter or longer than the list of args -> TyCoVarSet -- free vars of the args -> Infinite Role-- list of roles, r -> [Reduction] -- rewritten type arguments, arg_i -- each comes with the coercion used to rewrite it, -- arg_co_i :: ty_i ~ arg_i -> ArgsReductions -- Returns ArgsReductions (Reductions cos xis) res_co, where co_i :: ty_i ~ xi_i, -- and res_co :: kind (f ty_1 ... ty_n) ~ kind (f xi_1 ... xi_n), where f is the function -- that we are applying. -- Precondition: if f :: forall bndrs. inner_ki (where bndrs and inner_ki are passed in), -- then (f ty_1 ... ty_n) is well kinded. Note that (f arg_1 ... arg_n) might *not* be well-kinded. -- Massaging the arg_i in order to make the function application well-kinded is what this -- function is all about. That is, (f xi_1 ... xi_n), where xi_i are the returned arguments, -- *is* well kinded. simplifyArgsWorker orig_ki_binders orig_inner_ki orig_fvs orig_roles orig_simplified_args = go orig_lc orig_ki_binders orig_inner_ki orig_roles orig_simplified_args where orig_lc = emptyLiftingContext $ mkInScopeSet orig_fvs go :: LiftingContext -- mapping from tyvars to rewriting coercions -> [PiTyBinder] -- Unsubsted binders of function's kind -> Kind -- Unsubsted result kind of function (not a Pi-type) -> Infinite Role -- Roles at which to rewrite these ... -> [Reduction] -- rewritten arguments, with their rewriting coercions -> ArgsReductions go !lc binders inner_ki _ [] -- The !lc makes the function strict in the lifting context -- which means GHC can unbox that pair. A modest win. = ArgsReductions (mkReductions [] []) kind_co where final_kind = mkPiTys binders inner_ki kind_co | noFreeVarsOfType final_kind = MRefl | otherwise = MCo $ liftCoSubst Nominal lc final_kind go lc (binder:binders) inner_ki (Inf role roles) (arg_redn:arg_redns) = -- We rewrite an argument ty with arg_redn = Reduction arg_co arg -- By Note [Rewriting] in GHC.Tc.Solver.Rewrite invariant (F2), -- typeKind(ty) = typeKind(arg). -- However, it is possible that arg will be used as an argument to a function -- whose kind is different, if earlier arguments have been rewritten. -- We thus need to compose the reduction with a kind coercion to ensure -- well-kindedness (see the call to mkCoherenceRightRedn below). -- -- The bangs here have been observed to improve performance -- significantly in optimized builds; see #18502 let !kind_co = liftCoSubst Nominal lc (piTyBinderType binder) !(Reduction casted_co casted_xi) = mkCoherenceRightRedn role arg_redn kind_co -- now, extend the lifting context with the new binding !new_lc | Just tv <- namedPiTyBinder_maybe binder = extendLiftingContextAndInScope lc tv casted_co | otherwise = lc !(ArgsReductions (Reductions cos xis) final_kind_co) = go new_lc binders inner_ki roles arg_redns in ArgsReductions (Reductions (casted_co:cos) (casted_xi:xis)) final_kind_co -- See Note [Last case in simplifyArgsWorker] go lc [] inner_ki roles arg_redns = let co1 = liftCoSubst Nominal lc inner_ki co1_kind = coercionKind co1 unrewritten_tys = map reductionOriginalType arg_redns (arg_cos, res_co) = decomposePiCos co1 co1_kind unrewritten_tys casted_args = assertPpr (equalLength arg_redns arg_cos) (ppr arg_redns $$ ppr arg_cos) $ zipWith3 mkCoherenceRightRedn (Inf.toList roles) arg_redns arg_cos -- In general decomposePiCos can return fewer cos than tys, -- but not here; because we're well typed, there will be enough -- binders. Note that decomposePiCos does substitutions, so even -- if the original substitution results in something ending with -- ... -> k, that k will be substituted to perhaps reveal more -- binders. zapped_lc = zapLiftingContext lc Pair rewritten_kind _ = co1_kind (bndrs, new_inner) = splitPiTys rewritten_kind ArgsReductions redns_out res_co_out = go zapped_lc bndrs new_inner roles casted_args in ArgsReductions redns_out (res_co `mkTransMCoR` res_co_out) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/RoughMap.hs0000644000000000000000000005153207346545000020662 0ustar0000000000000000-- | 'RoughMap' is an approximate finite map data structure keyed on -- @['RoughMatchTc']@. This is useful when keying maps on lists of 'Type's -- (e.g. an instance head). module GHC.Core.RoughMap ( -- * RoughMatchTc RoughMatchTc(..) , isRoughWildcard , typeToRoughMatchTc , RoughMatchLookupTc(..) , typeToRoughMatchLookupTc , roughMatchTcToLookup , roughMatchTcs , roughMatchTcsLookup , instanceCantMatch -- * RoughMap , RoughMap , emptyRM , lookupRM , lookupRM' , insertRM , filterRM , filterMatchingRM , elemsRM , sizeRM , foldRM , unionRM ) where import GHC.Prelude import GHC.Data.Bag import GHC.Core.TyCon import GHC.Core.TyCo.Rep import GHC.Core.Type import GHC.Utils.Outputable import GHC.Types.Name import GHC.Types.Name.Env import GHC.Builtin.Types.Prim( cONSTRAINTTyConName, tYPETyConName ) import Control.Monad (join) import Data.Data (Data) import GHC.Utils.Panic {- Note [RoughMap] ~~~~~~~~~~~~~~~ We often want to compute whether one type matches another. That is, given `ty1` and `ty2`, we want to know whether `ty1` is a substitution instance of `ty2`. We can bail out early by taking advantage of the following observation: If `ty2` is headed by a generative type constructor, say `tc`, but `ty1` is not headed by that same type constructor, then `ty1` does not match `ty2`. The idea is that we can use a `RoughMap` as a pre-filter, to produce a short-list of candidates to examine more closely. This means we can avoid computing a full substitution if we represent types as applications of known generative type constructors. So, after type synonym expansion, we classify application heads into two categories ('RoughMatchTc') - `RM_KnownTc tc`: the head is the generative type constructor `tc`, - `RM_Wildcard`: anything else. A (RoughMap val) is semantically a list of (key,[val]) pairs, where key :: [RoughMatchTc] So, writing # for `OtherTc`, and Int for `KnownTc "Int"`, we might have [ ([#, Int, Maybe, #, Int], v1) , ([Int, #, List], v2 ] This map is stored as a trie, so looking up a key is very fast. See Note [Matching a RoughMap] and Note [Simple Matching Semantics] for details on lookup. We lookup a key of type [RoughMatchLookupTc], and return the list of all values whose keys "match": Given the above map, here are the results of some lookups: Lookup key Result ------------------------- [Int, Int] [v1,v2] -- Matches because the prefix of both entries matches [Int,Int,List] [v2] [Bool] [] Notice that a single key can map to /multiple/ values. E.g. if we started with (Maybe Int, val1) and (Maybe Bool, val2), we'd generate a RoughMap that is semantically the list [( Maybe, [val1,val2] )] Note [RoughMap and beta reduction] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ There is one tricky case we have to account for when matching a rough map due to Note [Eta reduction for data families] in `GHC.Core.Coercion.Axiom`: Consider that the user has written a program containing a data family: > data family Fam a b > data instance Fam Int a = SomeType -- known henceforth as FamIntInst The LHS of this instance will be eta reduced, as described in Note [Eta reduction for data families]. Consequently, we will end up with a `FamInst` with `fi_tcs = [KnownTc Int]`. Naturally, we need RoughMap to return this instance when queried for an instance with template, e.g., `[KnownTc Fam, KnownTc Int, KnownTc Char]`. This explains the third clause of the mightMatch specification in Note [Simple Matching Semantics]. As soon as the lookup key runs out, the remaining instances might match. This only matters for the data-family case of a FamInstEnv (see Note [Over-saturated matches] in GHC.Core.FamInstEnv; it's irrelevantfor ClsInstEnv and for type-family instances. But we use RoughMaps for all cases, so we are conservative. Note [Matching a RoughMap] ~~~~~~~~~~~~~~~~~~~~~~~~~~ The /lookup key/ into a rough map (RoughMatchLookupTc) is slightly different to the /insertion key/ (RoughMatchTc). Like the insertion key each lookup argument is classified to a simpler key which describes what could match that position. There are three possibilities: * RML_KnownTc Name: The argument is headed by a known type constructor. Example: 'Bool' is classified as 'RML_KnownTc Bool' and '[Int]' is classified as `RML_KnownTc []` * RML_NoKnownTc: The argument is definitely not headed by any known type constructor. Example: For instance matching 'a[sk], a[tau]' and 'F a[sk], F a[tau]' are classified as 'RML_NoKnownTc', for family instance matching no examples. * RML_WildCard: The argument could match anything, we don't know enough about it. For instance matching no examples, for type family matching, things to do with variables. The interesting case for instance matching is the second case, because it does not appear in an insertion key. The second case arises in two situations: 1. The head of the application is a type variable. The type variable definitely doesn't match with any of the KnownTC instances so we can discard them all. For example: Show a[sk] or Show (a[sk] b[sk]). One place constraints like this arise is when typechecking derived instances. 2. The head of the application is a known type family. For example: F a[sk]. The application of F is stuck, and because F is a type family it won't match any KnownTC instance so it's safe to discard all these instances. Of course, these two cases can still match instances of the form `forall a . Show a =>`, and those instances are retained as they are classified as RM_WildCard instances. Note [Matches vs Unifiers] ~~~~~~~~~~~~~~~~~~~~~~~~~~ The lookupRM' function returns a pair of potential /matches/ and potential /unifiers/. The potential matches is likely to be much smaller than the bag of potential unifiers due to the reasoning about rigid type variables described in Note [Matching a RoughMap]. On the other hand, the instances captured by the RML_NoKnownTC case can still potentially unify with any instance (depending on the substitution of said rigid variable) so they can't be discounted from the list of potential unifiers. This is achieved by the RML_NoKnownTC case continuing the lookup for unifiers by replacing RML_NoKnownTC with RML_LookupOtherTC. This distinction between matches and unifiers is also important for type families. During normal type family lookup, we care about matches and when checking for consistency we care about the unifiers. This is evident in the code as `lookup_fam_inst_env` is parameterised over a lookup function which either performs matching checking or unification checking. In addition to this, we only care whether there are zero or non-zero potential unifiers, even if we have many candidates, the search can stop before consulting each candidate. We only need the full list of unifiers when displaying error messages. Therefore the list is computed lazily so much work can be avoided constructing the list in the first place. Note [Simple Matching Semantics] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Suppose `rm` is a RoughMap representing a set of (key,vals) pairs, where key::[RoughMapTc] and val::a. Suppose I look up a key lk :: [RoughMapLookupTc] in `rm` Then I get back (matches, unifiers) where matches = [ vals | (key,vals) <- rm, key `mightMatch` lk ] unifiers = [ vals | (key,vals) <- rm, key `mightUnify` lk ] Where mightMatch is defined like this: mightMatch :: [RoughMapTc] -> [RoughMapLookupTc] -> Bool mightMatch [] [] = True -- A perfectly sized match might match mightMatch key [] = True -- A shorter lookup key matches everything mightMatch [] (_:_) = True -- If the lookup key is longer, then still might match -- Note [RoughMap and beta reduction] mightMatch (k:ks) (lk:lks) = = case (k,lk) of -- Standard case, matching on a specific known TyCon. (RM_KnownTc n1, RML_KnownTc n2) -> n1==n2 && mightMatch ks lks -- For example, if the key for 'Show Bool' is [RM_KnownTc Show, RM_KnownTc Bool] ---and we match against (Show a[sk]) [RM_KnownTc Show, RML_NoKnownTc] -- then Show Bool can never match Show a[sk] so return False. (RM_KnownTc _, RML_NoKnownTc) -> False -- Wildcard cases don't inform us anything about the match. (RM_WildCard, _ ) -> mightMatch ks lks (_, RML_WildCard) -> mightMatch ks lks -- Might unify is very similar to mightMatch apart from RML_NoKnownTc may -- unify with any instance. mightUnify :: [RoughMapTc] -> [RoughMapLookupTc] -> Bool mightUnify [] [] = True -- A perfectly sized match might unify mightUnify key [] = True -- A shorter lookup key matches everything mightUnify [] (_:_) = True mightUnify (k:ks) (lk:lks) = = case (k,lk) of (RM_KnownTc n1, RML_KnownTc n2) -> n1==n2 && mightUnify ks lks (RM_KnownTc _, RML_NoKnownTc) -> mightUnify (k:ks) (RML_WildCard:lks) (RM_WildCard, _ ) -> mightUnify ks lks (_, RML_WildCard) -> mightUnify ks lks The guarantee that RoughMap provides is that if insert_ty `tcMatchTy` lookup_ty then definitely typeToRoughMatchTc insert_ty `mightMatch` typeToRoughMatchLookupTc lookup_ty but not vice versa this statement encodes the intuition that the RoughMap is used as a quick pre-filter to remove instances from the matching pool. The contrapositive states that if the RoughMap reports that the instance doesn't match then `tcMatchTy` will report that the types don't match as well. -} {- ********************************************************************* * * Rough matching * * ********************************************************************* -} {- Note [Rough matching in class and family instances] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider instance C (Maybe [Tree a]) Bool and suppose we are looking up C Bool Bool We can very quickly rule the instance out, because the first argument is headed by Maybe, whereas in the constraint we are looking up has first argument headed by Bool. These "headed by" TyCons are called the "rough match TyCons" of the constraint or instance. They are used for a quick filter, to check when an instance cannot possibly match. The main motivation is to avoid sucking in whole instance declarations that are utterly useless. See GHC.Core.InstEnv Note [ClsInst laziness and the rough-match fields]. INVARIANT: a rough-match TyCons `tc` is always a real, generative tycon, like Maybe or Either, including a newtype or a data family, both of which are generative. It replies True to `isGenerativeTyCon tc Nominal`. But it is never - A type synonym E.g. Int and (S Bool) might match if (S Bool) is a synonym for Int - A type family (#19336) E.g. (Just a) and (F a) might match if (F a) reduces to (Just a) albeit perhaps only after 'a' is instantiated. -} -- Key for insertion into a RoughMap data RoughMatchTc = RM_KnownTc Name -- INVARIANT: Name refers to a TyCon tc that responds -- true to `isGenerativeTyCon tc Nominal`. See -- Note [Rough matching in class and family instances] | RM_WildCard -- e.g. type variable at the head deriving( Data ) -- Key for lookup into a RoughMap -- See Note [Matching a RoughMap] data RoughMatchLookupTc = RML_KnownTc Name -- ^ The position only matches the specified KnownTc | RML_NoKnownTc -- ^ The position definitely doesn't match any KnownTc | RML_WildCard -- ^ The position can match anything deriving ( Data ) instance Outputable RoughMatchLookupTc where ppr (RML_KnownTc nm) = text "RML_KnownTc" <+> ppr nm ppr RML_NoKnownTc = text "RML_NoKnownTC" ppr RML_WildCard = text "_" instance Outputable RoughMatchTc where ppr (RM_KnownTc nm) = text "KnownTc" <+> ppr nm ppr RM_WildCard = text "OtherTc" instanceCantMatch :: [RoughMatchTc] -> [RoughMatchTc] -> Bool -- (instanceCantMatch tcs1 tcs2) returns True if tcs1 cannot -- possibly be instantiated to actual, nor vice versa; -- False is non-committal instanceCantMatch (mt : ts) (ma : as) = itemCantMatch mt ma || instanceCantMatch ts as instanceCantMatch _ _ = False -- Safe itemCantMatch :: RoughMatchTc -> RoughMatchTc -> Bool itemCantMatch (RM_KnownTc t) (RM_KnownTc a) = t /= a itemCantMatch _ _ = False roughMatchTcToLookup :: RoughMatchTc -> RoughMatchLookupTc roughMatchTcToLookup (RM_KnownTc n) = RML_KnownTc n roughMatchTcToLookup RM_WildCard = RML_WildCard isRoughWildcard :: RoughMatchTc -> Bool isRoughWildcard RM_WildCard = True isRoughWildcard (RM_KnownTc {}) = False roughMatchTcs :: [Type] -> [RoughMatchTc] roughMatchTcs tys = map typeToRoughMatchTc tys roughMatchTcsLookup :: [Type] -> [RoughMatchLookupTc] roughMatchTcsLookup tys = map typeToRoughMatchLookupTc tys typeToRoughMatchLookupTc :: Type -> RoughMatchLookupTc typeToRoughMatchLookupTc ty -- Expand synonyms first, as explained in Note [Rough matching in class and family instances]. -- Failing to do so led to #22985. | Just ty' <- coreView ty = typeToRoughMatchLookupTc ty' | CastTy ty' _ <- ty = typeToRoughMatchLookupTc ty' | otherwise = case splitAppTys ty of -- Case 1: Head of application is a type variable, does not match any KnownTc. (TyVarTy {}, _) -> RML_NoKnownTc (TyConApp tc _, _) -- Case 2: Head of application is a known type constructor, hence KnownTc. | not (isTypeFamilyTyCon tc) -> RML_KnownTc $! roughMatchTyConName tc -- Case 3: Head is a type family so it's stuck and therefore doesn't match -- any KnownTc | isTypeFamilyTyCon tc -> RML_NoKnownTc -- Fallthrough: Otherwise, anything might match this position _ -> RML_WildCard typeToRoughMatchTc :: Type -> RoughMatchTc typeToRoughMatchTc ty | Just (ty', _) <- splitCastTy_maybe ty = typeToRoughMatchTc ty' | Just (tc,_) <- splitTyConApp_maybe ty , not (isTypeFamilyTyCon tc) = RM_KnownTc $! roughMatchTyConName tc -- See Note [Rough matching in class and family instances] | otherwise = RM_WildCard roughMatchTyConName :: TyCon -> Name roughMatchTyConName tc | tc_name == cONSTRAINTTyConName = tYPETyConName -- TYPE and CONSTRAINT are not apart, so they must use -- the same rough-map key. We arbitrarily use TYPE. -- See Note [Type and Constraint are not apart] -- wrinkle (W1) in GHC.Builtin.Types.Prim | otherwise = assertPpr (isGenerativeTyCon tc Nominal) (ppr tc) tc_name where tc_name = tyConName tc -- | Trie of @[RoughMatchTc]@ -- -- *Examples* -- @ -- insert [OtherTc] 1 -- insert [OtherTc] 2 -- lookup [OtherTc] == [1,2] -- @ data RoughMap a = RMEmpty -- An optimised (finite) form of emptyRM -- Invariant: Empty RoughMaps are always represented with RMEmpty | RM { rm_empty :: Bag a -- Keyed by an empty [RoughMapTc] , rm_known :: DNameEnv (RoughMap a) -- Keyed by (RM_KnownTc tc : rm_tcs) -- DNameEnv: see Note [InstEnv determinism] in GHC.Core.InstEnv , rm_wild :: RoughMap a } -- Keyed by (RM_WildCard : rm_tcs) deriving (Functor) instance Outputable a => Outputable (RoughMap a) where ppr (RM empty known unknown) = vcat [text "RM" , nest 2 (vcat [ text "Empty:" <+> ppr empty , text "Known:" <+> ppr known , text "Unknown:" <+> ppr unknown])] ppr RMEmpty = text "{}" emptyRM :: RoughMap a emptyRM = RMEmpty -- | Order of result is deterministic. lookupRM :: [RoughMatchLookupTc] -> RoughMap a -> [a] lookupRM tcs rm = bagToList (fst $ lookupRM' tcs rm) -- | N.B. Returns a 'Bag' for matches, which allows us to avoid rebuilding all of the lists -- we find in 'rm_empty', which would otherwise be necessary due to '++' if we -- returned a list. We use a list for unifiers because the tail is computed lazily and -- we often only care about the first couple of potential unifiers. Constructing a -- bag forces the tail which performs much too much work. -- -- See Note [Matching a RoughMap] -- See Note [Matches vs Unifiers] lookupRM' :: [RoughMatchLookupTc] -> RoughMap a -> (Bag a -- Potential matches , [a]) -- Potential unifiers lookupRM' _ RMEmpty -- The RoughMap is empty = (emptyBag, []) lookupRM' [] rm -- See Note [Simple Matching Semantics] about why = (listToBag m, m) -- we return everything when the lookup key runs out where m = elemsRM rm lookupRM' (RML_KnownTc tc : tcs) rm = let (common_m, common_u) = lookupRM' tcs (rm_wild rm) (m, u) = maybe (emptyBag, []) (lookupRM' tcs) (lookupDNameEnv (rm_known rm) tc) in ( rm_empty rm `unionBags` common_m `unionBags` m , bagToList (rm_empty rm) ++ common_u ++ u) -- A RML_NoKnownTC does **not** match any KnownTC but can unify lookupRM' (RML_NoKnownTc : tcs) rm = let (u_m, _u_u) = lookupRM' tcs (rm_wild rm) in ( rm_empty rm `unionBags` u_m -- Definitely don't match , snd $ lookupRM' (RML_WildCard : tcs) rm) -- But could unify.. lookupRM' (RML_WildCard : tcs) rm = -- pprTrace "RM wild" (ppr tcs $$ ppr (eltsDNameEnv (rm_known rm))) $ let (m, u) = foldDNameEnv add_one (emptyBag, []) (rm_known rm) (u_m, u_u) = lookupRM' tcs (rm_wild rm) in ( rm_empty rm `unionBags` u_m `unionBags` m , bagToList (rm_empty rm) ++ u_u ++ u ) where add_one :: RoughMap a -> (Bag a, [a]) -> (Bag a, [a]) add_one rm ~(m2, u2) = (m1 `unionBags` m2, u1 ++ u2) where (m1,u1) = lookupRM' tcs rm unionRM :: RoughMap a -> RoughMap a -> RoughMap a unionRM RMEmpty a = a unionRM a RMEmpty = a unionRM a b = RM { rm_empty = rm_empty a `unionBags` rm_empty b , rm_known = plusDNameEnv_C unionRM (rm_known a) (rm_known b) , rm_wild = rm_wild a `unionRM` rm_wild b } insertRM :: [RoughMatchTc] -> a -> RoughMap a -> RoughMap a insertRM k v RMEmpty = insertRM k v $ RM { rm_empty = emptyBag , rm_known = emptyDNameEnv , rm_wild = emptyRM } insertRM [] v rm@(RM {}) = -- See Note [Simple Matching Semantics] rm { rm_empty = v `consBag` rm_empty rm } insertRM (RM_KnownTc k : ks) v rm@(RM {}) = rm { rm_known = alterDNameEnv f (rm_known rm) k } where f Nothing = Just $ (insertRM ks v emptyRM) f (Just m) = Just $ (insertRM ks v m) insertRM (RM_WildCard : ks) v rm@(RM {}) = rm { rm_wild = insertRM ks v (rm_wild rm) } filterRM :: (a -> Bool) -> RoughMap a -> RoughMap a filterRM _ RMEmpty = RMEmpty filterRM pred rm = normalise $ RM { rm_empty = filterBag pred (rm_empty rm), rm_known = mapDNameEnv (filterRM pred) (rm_known rm), rm_wild = filterRM pred (rm_wild rm) } -- | Place a 'RoughMap' in normal form, turning all empty 'RM's into -- 'RMEmpty's. Necessary after removing items. normalise :: RoughMap a -> RoughMap a normalise RMEmpty = RMEmpty normalise (RM empty known RMEmpty) | isEmptyBag empty , isEmptyDNameEnv known = RMEmpty normalise rm = rm -- | Filter all elements that might match a particular key with the given -- predicate. filterMatchingRM :: (a -> Bool) -> [RoughMatchTc] -> RoughMap a -> RoughMap a filterMatchingRM _ _ RMEmpty = RMEmpty filterMatchingRM pred [] rm = filterRM pred rm filterMatchingRM pred (RM_KnownTc tc : tcs) rm = normalise $ RM { rm_empty = filterBag pred (rm_empty rm), rm_known = alterDNameEnv (join . fmap (dropEmpty . filterMatchingRM pred tcs)) (rm_known rm) tc, rm_wild = filterMatchingRM pred tcs (rm_wild rm) } filterMatchingRM pred (RM_WildCard : tcs) rm = normalise $ RM { rm_empty = filterBag pred (rm_empty rm), rm_known = mapDNameEnv (filterMatchingRM pred tcs) (rm_known rm), rm_wild = filterMatchingRM pred tcs (rm_wild rm) } dropEmpty :: RoughMap a -> Maybe (RoughMap a) dropEmpty RMEmpty = Nothing dropEmpty rm = Just rm elemsRM :: RoughMap a -> [a] elemsRM = foldRM (:) [] foldRM :: (a -> b -> b) -> b -> RoughMap a -> b foldRM f = go where -- N.B. local worker ensures that the loop can be specialised to the fold -- function. go z RMEmpty = z go z (RM{ rm_wild = unk, rm_known = known, rm_empty = empty}) = foldr f (foldDNameEnv (flip go) (go z unk) known ) empty nonDetStrictFoldRM :: (b -> a -> b) -> b -> RoughMap a -> b nonDetStrictFoldRM f = go where -- N.B. local worker ensures that the loop can be specialised to the fold -- function. go !z RMEmpty = z go z rm@(RM{}) = foldl' f (nonDetStrictFoldDNameEnv (flip go) (go z (rm_wild rm)) (rm_known rm) ) (rm_empty rm) sizeRM :: RoughMap a -> Int sizeRM = nonDetStrictFoldRM (\acc _ -> acc + 1) 0 ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/Rules.hs0000644000000000000000000023714107346545000020234 0ustar0000000000000000{- (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 \section[CoreRules]{Rewrite rules} -} -- | Functions for collecting together and applying rewrite rules to a module. -- The 'CoreRule' datatype itself is declared elsewhere. module GHC.Core.Rules ( -- ** Looking up rules lookupRule, matchExprs, -- ** RuleBase, RuleEnv RuleBase, RuleEnv(..), mkRuleEnv, emptyRuleEnv, updExternalPackageRules, addLocalRules, updLocalRules, emptyRuleBase, mkRuleBase, extendRuleBaseList, pprRuleBase, -- ** Checking rule applications ruleCheckProgram, -- ** Manipulating 'RuleInfo' rules extendRuleInfo, addRuleInfo, addIdSpecialisations, addRulesToId, -- ** RuleBase and RuleEnv -- * Misc. CoreRule helpers rulesOfBinds, getRules, pprRulesForUser, -- * Making rules mkRule, mkSpecRule, roughTopNames ) where import GHC.Prelude import GHC.Unit.Module ( Module ) import GHC.Unit.Module.Env import GHC.Unit.Module.ModGuts( ModGuts(..) ) import GHC.Unit.Module.Deps( Dependencies(..) ) import GHC.Driver.DynFlags( DynFlags ) import GHC.Driver.Ppr( showSDoc ) import GHC.Core -- All of it import GHC.Core.Subst import GHC.Core.SimpleOpt ( exprIsLambda_maybe ) import GHC.Core.FVs ( exprFreeVars, bindFreeVars , rulesFreeVarsDSet, orphNamesOfExprs ) import GHC.Core.Utils ( exprType, mkTick, mkTicks , stripTicksTopT, stripTicksTopE , isJoinBind, mkCastMCo ) import GHC.Core.Ppr ( pprRules ) import GHC.Core.Unify as Unify ( ruleMatchTyKiX ) import GHC.Core.Type as Type ( Type, extendTvSubst, extendCvSubst , substTy, getTyVar_maybe ) import GHC.Core.TyCo.Ppr( pprParendType ) import GHC.Core.Coercion as Coercion import GHC.Core.Tidy ( tidyRules ) import GHC.Core.Map.Expr ( eqCoreExpr ) import GHC.Core.Opt.Arity( etaExpandToJoinPointRule ) import GHC.Core.Make ( mkCoreLams ) import GHC.Core.Opt.OccurAnal( occurAnalyseExpr ) import GHC.Tc.Utils.TcType ( tcSplitTyConApp_maybe ) import GHC.Builtin.Types ( anyTypeOfKind ) import GHC.Types.Id import GHC.Types.Id.Info ( RuleInfo( RuleInfo ) ) import GHC.Types.Var import GHC.Types.Var.Env import GHC.Types.Var.Set import GHC.Types.Name ( Name, NamedThing(..), nameIsLocalOrFrom ) import GHC.Types.Name.Set import GHC.Types.Name.Env import GHC.Types.Name.Occurrence( occNameFS ) import GHC.Types.Unique.FM import GHC.Types.Tickish import GHC.Types.Basic import GHC.Data.FastString import GHC.Data.Maybe import GHC.Data.Bag import GHC.Data.List.SetOps( hasNoDups ) import GHC.Utils.FV( filterFV, fvVarSet ) import GHC.Utils.Misc as Utils import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Utils.Constants (debugIsOn) import Data.List (sortBy, mapAccumL, isPrefixOf) import Data.Function ( on ) import Control.Monad ( guard ) {- Note [Overall plumbing for rules] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * After the desugarer: - The ModGuts initially contains mg_rules :: [CoreRule] of locally-declared rules for imported Ids. - Locally-declared rules for locally-declared Ids are attached to the IdInfo for that Id. See Note [Attach rules to local ids] in GHC.HsToCore.Binds * GHC.Iface.Tidy strips off all the rules from local Ids and adds them to mg_rules, so that the ModGuts has *all* the locally-declared rules. * The HomePackageTable contains a ModDetails for each home package module. Each contains md_rules :: [CoreRule] of rules declared in that module. The HomePackageTable grows as ghc --make does its up-sweep. In batch mode (ghc -c), the HPT is empty; all imported modules are treated by the "external" route, discussed next, regardless of which package they come from. * The ExternalPackageState has a single eps_rule_base :: RuleBase for Ids in other packages. This RuleBase simply grow monotonically, as ghc --make compiles one module after another. During simplification, interface files may get demand-loaded, as the simplifier explores the unfoldings for Ids it has in its hand. (Via an unsafePerformIO; the EPS is really a cache.) That in turn may make the EPS rule-base grow. In contrast, the HPT never grows in this way. * The result of all this is that during Core-to-Core optimisation there are four sources of rules: (a) Rules in the IdInfo of the Id they are a rule for. These are easy: fast to look up, and if you apply a substitution then it'll be applied to the IdInfo as a matter of course. (b) Rules declared in this module for imported Ids, kept in the ModGuts. If you do a substitution, you'd better apply the substitution to these. There are seldom many of these. (c) Rules declared in the HomePackageTable. These never change. (d) Rules in the ExternalPackageTable. These can grow in response to lazy demand-loading of interfaces. * At the moment (c) is carried in a reader-monad way by the GHC.Core.Opt.Monad. The HomePackageTable doesn't have a single RuleBase because technically we should only be able to "see" rules "below" this module; so we generate a RuleBase for (c) by combining rules from all the modules "below" us. That's why we can't just select the home-package RuleBase from HscEnv. [NB: we are inconsistent here. We should do the same for external packages, but we don't. Same for type-class instances.] * So in the outer simplifier loop (simplifyPgmIO), we combine (b & c) into a single RuleBase, reading (b) from the ModGuts, (c) from the GHC.Core.Opt.Monad, and just before doing rule matching we read (d) from its mutable variable and combine it with the results from (b & c). In a single simplifier run new rules can be added into the EPS so it matters to keep an up-to-date view of which rules have been loaded. For examples of where this went wrong and caused cryptic performance regressions see T19790 and !6735. ************************************************************************ * * \subsection[specialisation-IdInfo]{Specialisation info about an @Id@} * * ************************************************************************ A CoreRule holds details of one rule for an Id, which includes its specialisations. For example, if a rule for f is RULE "f" forall @a @b d. f @(List a) @b d = f' a b then when we find an application of f to matching types, we simply replace it by the matching RHS: f (List Int) Bool dict ===> f' Int Bool All the stuff about how many dictionaries to discard, and what types to apply the specialised function to, are handled by the fact that the Rule contains a template for the result of the specialisation. -} mkRule :: Module -> Bool -> Bool -> RuleName -> Activation -> Name -> [CoreBndr] -> [CoreExpr] -> CoreExpr -> CoreRule -- ^ Used to make 'CoreRule' for an 'Id' defined in the module being -- compiled. See also 'GHC.Core.CoreRule' mkRule this_mod is_auto is_local name act fn bndrs args rhs = Rule { ru_name = name , ru_act = act , ru_fn = fn , ru_bndrs = bndrs , ru_args = args , ru_rhs = occurAnalyseExpr rhs -- See Note [OccInfo in unfoldings and rules] , ru_rough = roughTopNames args , ru_origin = this_mod , ru_orphan = orph , ru_auto = is_auto , ru_local = is_local } where -- Compute orphanhood. See Note [Orphans] in GHC.Core.InstEnv -- A rule is an orphan only if none of the variables -- mentioned on its left-hand side are locally defined lhs_names = extendNameSet (orphNamesOfExprs args) fn -- Since rules get eventually attached to one of the free names -- from the definition when compiling the ABI hash, we should make -- it deterministic. This chooses the one with minimal OccName -- as opposed to uniq value. local_lhs_names = filterNameSet (nameIsLocalOrFrom this_mod) lhs_names orph = chooseOrphanAnchor local_lhs_names -------------- mkSpecRule :: DynFlags -> Module -> Bool -> Activation -> SDoc -> Id -> [CoreBndr] -> [CoreExpr] -> CoreExpr -> CoreRule -- Make a specialisation rule, for Specialise or SpecConstr mkSpecRule dflags this_mod is_auto inl_act herald fn bndrs args rhs = case idJoinPointHood fn of JoinPoint join_arity -> etaExpandToJoinPointRule join_arity rule NotJoinPoint -> rule where rule = mkRule this_mod is_auto is_local rule_name inl_act -- Note [Auto-specialisation and RULES] (idName fn) bndrs args rhs is_local = isLocalId fn rule_name = mkSpecRuleName dflags herald fn args mkSpecRuleName :: DynFlags -> SDoc -> Id -> [CoreExpr] -> FastString mkSpecRuleName dflags herald fn args = mkFastString $ showSDoc dflags $ herald <+> ftext (occNameFS (getOccName fn)) -- This name ends up in interface files, so use occNameFS. -- Otherwise uniques end up there, making builds -- less deterministic (See #4012 comment:61 ff) <+> hsep (mapMaybe ppr_call_key_ty args) where ppr_call_key_ty :: CoreExpr -> Maybe SDoc ppr_call_key_ty (Type ty) = case getTyVar_maybe ty of Just {} -> Just (text "@_") Nothing -> Just $ char '@' <> pprParendType ty ppr_call_key_ty _ = Nothing -------------- roughTopNames :: [CoreExpr] -> [Maybe Name] -- ^ Find the \"top\" free names of several expressions. -- Such names are either: -- -- 1. The function finally being applied to in an application chain -- (if that name is a GlobalId: see "GHC.Types.Var#globalvslocal"), or -- -- 2. The 'TyCon' if the expression is a 'Type' -- -- This is used for the fast-match-check for rules; -- if the top names don't match, the rest can't roughTopNames args = map roughTopName args roughTopName :: CoreExpr -> Maybe Name roughTopName (Type ty) = case tcSplitTyConApp_maybe ty of Just (tc,_) -> Just (getName tc) Nothing -> Nothing roughTopName (Coercion _) = Nothing roughTopName (App f _) = roughTopName f roughTopName (Var f) | isGlobalId f -- Note [Care with roughTopName] , isDataConWorkId f || idArity f > 0 = Just (idName f) roughTopName (Tick t e) | tickishFloatable t = roughTopName e roughTopName _ = Nothing ruleCantMatch :: [Maybe Name] -> [Maybe Name] -> Bool -- ^ @ruleCantMatch tpl actual@ returns True only if @actual@ -- definitely can't match @tpl@ by instantiating @tpl@. -- It's only a one-way match; unlike instance matching we -- don't consider unification. -- -- Notice that [_$_] -- @ruleCantMatch [Nothing] [Just n2] = False@ -- Reason: a template variable can be instantiated by a constant -- Also: -- @ruleCantMatch [Just n1] [Nothing] = False@ -- Reason: a local variable @v@ in the actuals might [_$_] ruleCantMatch (Just n1 : ts) (Just n2 : as) = n1 /= n2 || ruleCantMatch ts as ruleCantMatch (_ : ts) (_ : as) = ruleCantMatch ts as ruleCantMatch _ _ = False {- Note [Care with roughTopName] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider this module M where { x = a:b } module N where { ...f x... RULE f (p:q) = ... } You'd expect the rule to match, because the matcher can look through the unfolding of 'x'. So we must avoid roughTopName returning 'M.x' for the call (f x), or else it'll say "can't match" and we won't even try!! However, suppose we have RULE g (M.h x) = ... foo = ...(g (M.k v)).... where k is a *function* exported by M. We never really match functions (lambdas) except by name, so in this case it seems like a good idea to treat 'M.k' as a roughTopName of the call. -} pprRulesForUser :: [CoreRule] -> SDoc -- (a) tidy the rules -- (b) sort them into order based on the rule name -- (c) suppress uniques (unless -dppr-debug is on) -- This combination makes the output stable so we can use in testing -- It's here rather than in GHC.Core.Ppr because it calls tidyRules pprRulesForUser rules = withPprStyle defaultUserStyle $ pprRules $ sortBy (lexicalCompareFS `on` ruleName) $ tidyRules emptyTidyEnv rules {- ************************************************************************ * * RuleInfo: the rules in an IdInfo * * ************************************************************************ -} extendRuleInfo :: RuleInfo -> [CoreRule] -> RuleInfo extendRuleInfo (RuleInfo rs1 fvs1) rs2 = RuleInfo (rs2 ++ rs1) (rulesFreeVarsDSet rs2 `unionDVarSet` fvs1) addRuleInfo :: RuleInfo -> RuleInfo -> RuleInfo addRuleInfo (RuleInfo rs1 fvs1) (RuleInfo rs2 fvs2) = RuleInfo (rs1 ++ rs2) (fvs1 `unionDVarSet` fvs2) addIdSpecialisations :: Id -> [CoreRule] -> Id addIdSpecialisations id rules | null rules = id | otherwise = setIdSpecialisation id $ extendRuleInfo (idSpecialisation id) rules addRulesToId :: RuleBase -> Id -> Id -- Add rules in the RuleBase to the rules in the Id addRulesToId rule_base bndr | Just rules <- lookupNameEnv rule_base (idName bndr) = bndr `addIdSpecialisations` rules | otherwise = bndr -- | Gather all the rules for locally bound identifiers from the supplied bindings rulesOfBinds :: [CoreBind] -> [CoreRule] rulesOfBinds binds = concatMap (concatMap idCoreRules . bindersOf) binds {- ************************************************************************ * * RuleBase * * ************************************************************************ -} -- | Gathers a collection of 'CoreRule's. Maps (the name of) an 'Id' to its rules type RuleBase = NameEnv [CoreRule] -- The rules are unordered; -- we sort out any overlaps on lookup emptyRuleBase :: RuleBase emptyRuleBase = emptyNameEnv mkRuleBase :: [CoreRule] -> RuleBase mkRuleBase rules = extendRuleBaseList emptyRuleBase rules extendRuleBaseList :: RuleBase -> [CoreRule] -> RuleBase extendRuleBaseList rule_base new_guys = foldl' extendRuleBase rule_base new_guys extendRuleBase :: RuleBase -> CoreRule -> RuleBase extendRuleBase rule_base rule = extendNameEnv_Acc (:) Utils.singleton rule_base (ruleIdName rule) rule pprRuleBase :: RuleBase -> SDoc pprRuleBase rules = pprUFM rules $ \rss -> vcat [ pprRules (tidyRules emptyTidyEnv rs) | rs <- rss ] -- | A full rule environment which we can apply rules from. Like a 'RuleBase', -- but it also includes the set of visible orphans we use to filter out orphan -- rules which are not visible (even though we can see them...) -- See Note [Orphans] in GHC.Core data RuleEnv = RuleEnv { re_local_rules :: !RuleBase -- Rules from this module , re_home_rules :: !RuleBase -- Rule from the home package -- (excl this module) , re_eps_rules :: !RuleBase -- Rules from other packages -- see Note [External package rules] , re_visible_orphs :: !ModuleSet } mkRuleEnv :: ModGuts -> RuleBase -> RuleBase -> RuleEnv mkRuleEnv (ModGuts { mg_module = this_mod , mg_deps = deps , mg_rules = local_rules }) eps_rules hpt_rules = RuleEnv { re_local_rules = mkRuleBase local_rules , re_home_rules = hpt_rules , re_eps_rules = eps_rules , re_visible_orphs = mkModuleSet vis_orphs } where vis_orphs = this_mod : dep_orphs deps updExternalPackageRules :: RuleEnv -> RuleBase -> RuleEnv -- Completely over-ride the external rules in RuleEnv updExternalPackageRules rule_env eps_rules = rule_env { re_eps_rules = eps_rules } updLocalRules :: RuleEnv -> [CoreRule] -> RuleEnv -- Completely over-ride the local rules in RuleEnv updLocalRules rule_env local_rules = rule_env { re_local_rules = mkRuleBase local_rules } addLocalRules :: RuleEnv -> [CoreRule] -> RuleEnv -- Add new local rules addLocalRules rule_env rules = rule_env { re_local_rules = extendRuleBaseList (re_local_rules rule_env) rules } emptyRuleEnv :: RuleEnv emptyRuleEnv = RuleEnv { re_local_rules = emptyNameEnv , re_home_rules = emptyNameEnv , re_eps_rules = emptyNameEnv , re_visible_orphs = emptyModuleSet } getRules :: RuleEnv -> Id -> [CoreRule] -- Given a RuleEnv and an Id, find the visible rules for that Id -- See Note [Where rules are found] -- -- This function is quite heavily used, so it's worth trying to make it efficient getRules (RuleEnv { re_local_rules = local_rule_base , re_home_rules = home_rule_base , re_eps_rules = eps_rule_base , re_visible_orphs = orphs }) fn | Just {} <- isDataConId_maybe fn -- Short cut for data constructor workers = [] -- and wrappers, which never have any rules | Just export_flag <- isLocalId_maybe fn = -- LocalIds can't have rules in the local_rule_base (used for imported fns) -- nor external packages; but there can (just) be rules in another module -- in the home package, if it is exported case export_flag of NotExported -> idCoreRules fn Exported -> case get home_rule_base of [] -> idCoreRules fn home_rules -> drop_orphs home_rules ++ idCoreRules fn | otherwise = -- This case expression is a fast path, to avoid calling the -- recursive (++) in the common case where there are no rules at all case (get local_rule_base, get home_rule_base, get eps_rule_base) of ([], [], []) -> idCoreRules fn (local_rules, home_rules, eps_rules) -> local_rules ++ drop_orphs home_rules ++ drop_orphs eps_rules ++ idCoreRules fn where fn_name = idName fn drop_orphs [] = [] -- Fast path; avoid invoking recursive filter drop_orphs xs = filter (ruleIsVisible orphs) xs get rb = lookupNameEnv rb fn_name `orElse` [] ruleIsVisible :: ModuleSet -> CoreRule -> Bool ruleIsVisible _ BuiltinRule{} = True ruleIsVisible vis_orphs Rule { ru_orphan = orph, ru_origin = origin } = notOrphan orph || origin `elemModuleSet` vis_orphs {- Note [Where rules are found] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The rules for an Id come from two places: (a) the ones it is born with, stored inside the Id itself (idCoreRules fn), (b) rules added in other modules, stored in the global RuleBase (imp_rules) It's tempting to think that - LocalIds have only (a) - non-LocalIds have only (b) but that isn't quite right: - PrimOps and ClassOps are born with a bunch of rules inside the Id, even when they are imported - The rules in GHC.Core.Opt.ConstantFold.builtinRules should be active even in the module defining the Id (when it's a LocalId), but the rules are kept in the global RuleBase Note [External package rules] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In Note [Overall plumbing for rules], it is explained that the final RuleBase which we must consider is combined from 4 different sources. During simplifier runs, the fourth source of rules is constantly being updated as new interfaces are loaded into the EPS. Therefore just before we check to see if any rules match we get the EPS RuleBase and combine it with the existing RuleBase and then perform exactly 1 lookup into the new map. It is more efficient to avoid combining the environments and store the uncombined environments as we can instead perform 1 lookup into each environment and then combine the results. Essentially we use the identity: > lookupNameEnv n (plusNameEnv_C (++) rb1 rb2) > = lookupNameEnv n rb1 ++ lookupNameEnv n rb2 The latter being more efficient as we don't construct an intermediate map. -} {- ************************************************************************ * * Matching * * ************************************************************************ -} -- | The main rule matching function. Attempts to apply all (active) -- supplied rules to this instance of an application in a given -- context, returning the rule applied and the resulting expression if -- successful. lookupRule :: RuleOpts -> InScopeEnv -> (Activation -> Bool) -- When rule is active -> Id -- Function head -> [CoreExpr] -- Args -> [CoreRule] -- Rules -> Maybe (CoreRule, CoreExpr) -- See Note [Extra args in the target] -- See comments on matchRule lookupRule opts rule_env@(ISE in_scope _) is_active fn args rules = -- pprTrace "lookupRule" (ppr fn <+> ppr args $$ ppr rules $$ ppr in_scope) $ case go [] rules of [] -> Nothing (m:ms) -> Just (findBest in_scope (fn,args') m ms) where rough_args = map roughTopName args -- Strip ticks from arguments, see Note [Tick annotations in RULE -- matching]. We only collect ticks if a rule actually matches - -- this matters for performance tests. args' = map (stripTicksTopE tickishFloatable) args ticks = concatMap (stripTicksTopT tickishFloatable) args go :: [(CoreRule,CoreExpr)] -> [CoreRule] -> [(CoreRule,CoreExpr)] go ms [] = ms go ms (r:rs) | Just e <- matchRule opts rule_env is_active fn args' rough_args r = go ((r,mkTicks ticks e):ms) rs | otherwise = -- pprTrace "match failed" (ppr r $$ ppr args $$ -- ppr [ (arg_id, unfoldingTemplate unf) -- | Var arg_id <- args -- , let unf = idUnfolding arg_id -- , isCheapUnfolding unf] ) go ms rs findBest :: InScopeSet -> (Id, [CoreExpr]) -> (CoreRule,CoreExpr) -> [(CoreRule,CoreExpr)] -> (CoreRule,CoreExpr) -- All these pairs matched the expression -- Return the pair the most specific rule -- The (fn,args) is just for overlap reporting findBest _ _ (rule,ans) [] = (rule,ans) findBest in_scope target (rule1,ans1) ((rule2,ans2):prs) | isMoreSpecific in_scope rule1 rule2 = findBest in_scope target (rule1,ans1) prs | isMoreSpecific in_scope rule2 rule1 = findBest in_scope target (rule2,ans2) prs | debugIsOn = let pp_rule rule = ifPprDebug (ppr rule) (doubleQuotes (ftext (ruleName rule))) in pprTrace "Rules.findBest: rule overlap (Rule 1 wins)" (vcat [ whenPprDebug $ text "Expression to match:" <+> ppr fn <+> sep (map ppr args) , text "Rule 1:" <+> pp_rule rule1 , text "Rule 2:" <+> pp_rule rule2]) $ findBest in_scope target (rule1,ans1) prs | otherwise = findBest in_scope target (rule1,ans1) prs where (fn,args) = target isMoreSpecific :: InScopeSet -> CoreRule -> CoreRule -> Bool -- The call (rule1 `isMoreSpecific` rule2) -- sees if rule2 can be instantiated to look like rule1 -- See Note [isMoreSpecific] isMoreSpecific _ (BuiltinRule {}) _ = False isMoreSpecific _ (Rule {}) (BuiltinRule {}) = True isMoreSpecific in_scope (Rule { ru_bndrs = bndrs1, ru_args = args1 }) (Rule { ru_bndrs = bndrs2, ru_args = args2 }) = isJust (matchExprs in_scope_env bndrs2 args2 args1) where full_in_scope = in_scope `extendInScopeSetList` bndrs1 in_scope_env = ISE full_in_scope noUnfoldingFun -- noUnfoldingFun: don't expand in templates noBlackList :: Activation -> Bool noBlackList _ = False -- Nothing is black listed {- Note [isMoreSpecific] ~~~~~~~~~~~~~~~~~~~~~~~~ The call (rule1 `isMoreSpecific` rule2) sees if rule2 can be instantiated to look like rule1. Wrinkle: * We take the view that a BuiltinRule is less specific than anything else, because we want user-defined rules to "win" In particular, class ops have a built-in rule, but we prefer any user-specific rules to win: eg (#4397) truncate :: (RealFrac a, Integral b) => a -> b {-# RULES "truncate/Double->Int" truncate = double2Int #-} double2Int :: Double -> Int We want the specific RULE to beat the built-in class-op rule Note [Extra args in the target] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If we find a matching rule, we return (Just (rule, rhs)), /but/ the rule firing has only consumed as many of the input args as the ruleArity says. The unused arguments are handled by the code in GHC.Core.Opt.Simplify.tryRules, using the arity of the returned rule. E.g. Rule "foo": forall a b. f p1 p2 = rhs Target: f e1 e2 e3 Then lookupRule returns Just (Rule "foo", rhs), where Rule "foo" has ruleArity 2. The real rewrite is f e1 e2 e3 ==> rhs e3 You might think it'd be cleaner for lookupRule to deal with the leftover arguments, by applying 'rhs' to them, but the main call in the Simplifier works better as it is. Reason: the 'args' passed to lookupRule are the result of a lazy substitution Historical note: At one stage I tried to match even if there are more args in the /template/ than the target. I now think this is probably a bad idea. Should the template (map f xs) match (map g)? I think not. For a start, in general eta expansion wastes work. SLPJ July 99 -} ------------------------------------ matchRule :: RuleOpts -> InScopeEnv -> (Activation -> Bool) -> Id -> [CoreExpr] -> [Maybe Name] -> CoreRule -> Maybe CoreExpr -- If (matchRule rule args) returns Just (name,rhs) -- then (f args) matches the rule, and the corresponding -- rewritten RHS is rhs -- -- The returned expression is occurrence-analysed -- -- Example -- -- The rule -- forall f g x. map f (map g x) ==> map (f . g) x -- is stored -- CoreRule "map/map" -- [f,g,x] -- tpl_vars -- [f,map g x] -- tpl_args -- map (f.g) x) -- rhs -- -- Then the expression -- map e1 (map e2 e3) e4 -- results in a call to -- matchRule the_rule [e1,map e2 e3,e4] -- = Just ("map/map", (\f,g,x -> rhs) e1 e2 e3) -- -- NB: The 'surplus' argument e4 in the input is simply dropped. -- See Note [Extra args in the target] matchRule opts rule_env _is_active fn args _rough_args (BuiltinRule { ru_try = match_fn }) -- Built-in rules can't be switched off, it seems = case match_fn opts rule_env fn args of Nothing -> Nothing Just expr -> Just expr matchRule _ rule_env is_active _ args rough_args (Rule { ru_name = rule_name, ru_act = act, ru_rough = tpl_tops , ru_bndrs = tpl_vars, ru_args = tpl_args, ru_rhs = rhs }) | not (is_active act) = Nothing | ruleCantMatch tpl_tops rough_args = Nothing | otherwise = matchN rule_env rule_name tpl_vars tpl_args args rhs --------------------------------------- matchN :: InScopeEnv -> RuleName -> [Var] -> [CoreExpr] -> [CoreExpr] -> CoreExpr -- ^ Target; can have more elements than the template -> Maybe CoreExpr -- For a given match template and context, find bindings to wrap around -- the entire result and what should be substituted for each template variable. -- -- Fail if there are too few actual arguments from the target to match the template -- -- See Note [Extra args in the target] -- If there are too /many/ actual arguments, we simply ignore the -- trailing ones, returning the result of applying the rule to a prefix -- of the actual arguments. matchN ise _rule_name tmpl_vars tmpl_es target_es rhs = do { (bind_wrapper, matched_es) <- matchExprs ise tmpl_vars tmpl_es target_es ; return (bind_wrapper $ mkLams tmpl_vars rhs `mkApps` matched_es) } matchExprs :: InScopeEnv -> [Var] -> [CoreExpr] -> [CoreExpr] -> Maybe (BindWrapper, [CoreExpr]) -- 1-1 with the [Var] matchExprs (ISE in_scope id_unf) tmpl_vars tmpl_es target_es = do { rule_subst <- match_exprs init_menv emptyRuleSubst tmpl_es target_es ; let (_, matched_es) = mapAccumL (lookup_tmpl rule_subst) (mkEmptySubst in_scope) $ tmpl_vars `zip` tmpl_vars1 ; let bind_wrapper = rs_binds rule_subst -- Floated bindings; see Note [Matching lets] ; return (bind_wrapper, matched_es) } where (init_rn_env, tmpl_vars1) = mapAccumL rnBndrL (mkRnEnv2 in_scope) tmpl_vars -- See Note [Cloning the template binders] init_menv = RV { rv_tmpls = mkVarSet tmpl_vars1 , rv_lcl = init_rn_env , rv_fltR = mkEmptySubst (rnInScopeSet init_rn_env) , rv_unf = id_unf } lookup_tmpl :: RuleSubst -> Subst -> (InVar,OutVar) -> (Subst, CoreExpr) -- Need to return a RuleSubst solely for the benefit of fake_ty lookup_tmpl (RS { rs_tv_subst = tv_subst, rs_id_subst = id_subst }) tcv_subst (tmpl_var, tmpl_var1) | isId tmpl_var1 = case lookupVarEnv id_subst tmpl_var1 of Just e | Coercion co <- e -> (Type.extendCvSubst tcv_subst tmpl_var1 co, Coercion co) | otherwise -> (tcv_subst, e) Nothing | Just refl_co <- isReflCoVar_maybe tmpl_var1 , let co = Coercion.substCo tcv_subst refl_co -> -- See Note [Unbound RULE binders] (Type.extendCvSubst tcv_subst tmpl_var1 co, Coercion co) | otherwise -> unbound tmpl_var | otherwise = (Type.extendTvSubst tcv_subst tmpl_var1 ty', Type ty') where ty' = case lookupVarEnv tv_subst tmpl_var1 of Just ty -> ty Nothing -> fake_ty -- See Note [Unbound RULE binders] fake_ty = anyTypeOfKind (Type.substTy tcv_subst (tyVarKind tmpl_var1)) -- This substitution is the sole reason we accumulate -- TCvSubst in lookup_tmpl unbound tmpl_var = pprPanic "Template variable unbound in rewrite rule" $ vcat [ text "Variable:" <+> ppr tmpl_var <+> dcolon <+> ppr (varType tmpl_var) , text "Rule bndrs:" <+> ppr tmpl_vars , text "LHS args:" <+> ppr tmpl_es , text "Actual args:" <+> ppr target_es ] ---------------------- match_exprs :: RuleMatchEnv -> RuleSubst -> [CoreExpr] -- Templates -> [CoreExpr] -- Targets -> Maybe RuleSubst -- If the targets are longer than templates, succeed, simply ignoring -- the leftover targets. This matters in the call in matchN. -- -- Precondition: corresponding elements of es1 and es2 have the same -- type, assuming earlier elements match. -- Example: f :: forall v. v -> blah -- match_exprs [Type a, y::a] [Type Int, 3] -- Then, after matching Type a against Type Int, -- the type of (y::a) matches that of (3::Int) match_exprs _ subst [] _ = Just subst match_exprs renv subst (e1:es1) (e2:es2) = do { subst' <- match renv subst e1 e2 MRefl ; match_exprs renv subst' es1 es2 } match_exprs _ _ _ _ = Nothing {- Note [Unbound RULE binders] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ It can be the case that the binder in a rule is not actually bound on the LHS: * Type variables. Type synonyms with phantom args can give rise to unbound template type variables. Consider this (#10689, simplCore/should_compile/T10689): type Foo a b = b f :: Eq a => a -> Bool f x = x==x {-# RULES "foo" forall (x :: Foo a Char). f x = True #-} finkle = f 'c' The rule looks like forall (a::*) (d::Eq Char) (x :: Foo a Char). f (Foo a Char) d x = True Matching the rule won't bind 'a', and legitimately so. We fudge by pretending that 'a' is bound to (Any :: *). * Coercion variables. On the LHS of a RULE for a local binder we might have RULE forall (c :: a~b). f (x |> c) = e Now, if that binding is inlined, so that a=b=Int, we'd get RULE forall (c :: Int~Int). f (x |> c) = e and now when we simplify the LHS (Simplify.simplRule) we optCoercion (look at the CoVarCo case) will turn that 'c' into Refl: RULE forall (c :: Int~Int). f (x |> ) = e and then perhaps drop it altogether. Now 'c' is unbound. It's tricky to be sure this never happens, so instead I say it's OK to have an unbound coercion binder in a RULE provided its type is (c :: t~t). Then, when the RULE fires we can substitute for c. This actually happened (in a RULE for a local function) in #13410, and also in test T10602. Note [Cloning the template binders] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider the following match (example 1): Template: forall x. f x Target: f (x+1) This should succeed, because the template variable 'x' has nothing to do with the 'x' in the target. Likewise this one (example 2): Template: forall x. f (\x.x) Target: f (\y.y) We achieve this simply by using rnBndrL to clone the template binders if they are already in scope. ------ Historical note ------- At one point I tried simply adding the template binders to the in-scope set /without/ cloning them, but that failed in a horribly obscure way in #14777. Problem was that during matching we look up target-term variables in the in-scope set (see Note [Lookup in-scope]). If a target-term variable happens to name-clash with a template variable, that lookup will find the template variable, which is /utterly/ bogus. In #14777, this transformed a term variable into a type variable, and then crashed when we wanted its idInfo. ------ End of historical note ------- ************************************************************************ * * The main matcher * * ********************************************************************* -} data RuleMatchEnv = RV { rv_lcl :: RnEnv2 -- Renamings for *local bindings* -- (lambda/case) , rv_tmpls :: VarSet -- Template variables -- (after applying envL of rv_lcl) , rv_fltR :: Subst -- Renamings for floated let-bindings -- (domain disjoint from envR of rv_lcl) -- See Note [Matching lets] -- N.B. The InScopeSet of rv_fltR is always ignored; -- see (4) in Note [Matching lets]. , rv_unf :: IdUnfoldingFun } {- Note [rv_lcl in RuleMatchEnv] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider matching Template: \x->f Target: \f->f where 'f' is free in the template. When we meet the lambdas we must remember to rename f :-> f' in the target, as well as x :-> f in the template. The rv_lcl::RnEnv2 does that. Similarly, consider matching Template: {a} \b->b Target: \a->3 We must rename the \a. Otherwise when we meet the lambdas we might substitute [b :-> a] in the template, and then erroneously succeed in matching what looks like the template variable 'a' against 3. So we must add the template vars to the in-scope set before starting; see `init_menv` in `matchN`. -} -- * The domain of the TvSubstEnv and IdSubstEnv are the template -- variables passed into the match. -- -- * The BindWrapper in a RuleSubst are the bindings floated out -- from nested matches; see the Let case of match, below -- data RuleSubst = RS { -- Substitution; applied only to the template, not the target -- Domain is the template variables -- Range never includes template variables rs_tv_subst :: TvSubstEnv , rs_id_subst :: IdSubstEnv -- Floated bindings , rs_binds :: BindWrapper -- Floated bindings , rs_bndrs :: [Var] -- Variables bound by floated lets } type BindWrapper = CoreExpr -> CoreExpr -- See Notes [Matching lets] and [Matching cases] -- we represent the floated bindings as a core-to-core function emptyRuleSubst :: RuleSubst emptyRuleSubst = RS { rs_tv_subst = emptyVarEnv, rs_id_subst = emptyVarEnv , rs_binds = \e -> e, rs_bndrs = [] } {- Note [Casts in the target] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ As far as possible we don't want casts in the target to get in the way of matching. E.g. * (let bind in e) |> co * (case e of alts) |> co * (\ a b. f a b) |> co In the first two cases we want to float the cast inwards so we can match on the let/case. This is not important in practice because the Simplifier does this anyway. But the third case /is/ important: we don't want the cast to get in the way of eta-reduction. See Note [Cancel reflexive casts] for a real life example. The most convenient thing is to make 'match' take an MCoercion argument, thus: * The main matching function match env subst template target mco matches template ~ (target |> mco) * Invariant: typeof( subst(template) ) = typeof( target |> mco ) Note that for applications (e1 e2) ~ (d1 d2) |> co where 'co' is non-reflexive, we simply fail. You might wonder about (e1 e2) ~ ((d1 |> co1) d2) |> co2 but the Simplifer pushes the casts in an application to to the right, if it can, so this doesn't really arise. Note [Casts in the template] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This Note concerns `matchTemplateCast`. Consider the definition f x = e, and SpecConstr on call pattern f ((e1,e2) |> co) The danger is that We'll make a RULE RULE forall a,b,g. f ((a,b)|> g) = $sf a b g $sf a b g = e[ ((a,b)|> g) / x ] This requires the rule-matcher to bind the coercion variable `g`. That is Very Deeply Suspicious: * It would be unreasonable to match on a structured coercion in a pattern, such as RULE forall g. f (x |> Sym g) = ... because the strucure of a coercion is arbitrary and may change -- it's their /type/ that matters. * We considered insisting that in a template, in a cast (e |> co), the the cast `co` is always a /variable/ cv. That looks a bit more plausible, but #23209 (and related tickets) shows that it's very fragile. For example suppose `e` is a variable `f`, and the simplifier has an unconditional substitution [f :-> g |> co2] Now the rule LHS becomes (f |> (co2 ; cv)); not a coercion variable any more! In short, it is Very Deeply Suspicious for a rule to quantify over a coercion variable. And SpecConstr no longer does so: see Note [SpecConstr and casts] in SpecConstr. It is, however, OK for a cast to appear in a template. For example newtype N a = MkN (a,a) -- Axiom ax:N a :: (a,a) ~R N a f :: N a -> bah RULE forall b x:b y:b. f @b ((x,y) |> (axN @b)) = ... When matching we can just move these casts to the other side: match (tmpl |> co) tgt --> match tmpl (tgt |> sym co) See matchTemplateCast. Wrinkles: (CT1) We need to be careful about scoping, and to match left-to-right, so that we know the substitution [a :-> b] before we meet (co :: (a,a) ~R N a), and so we can apply that substitition (CT2) Annoyingly, we still want support one case in which the RULE quantifies over a coercion variable: the dreaded map/coerce RULE. See Note [Getting the map/coerce RULE to work] in GHC.Core.SimpleOpt. Since that can happen, matchTemplateCast laboriously checks whether the coercion mentions a template coercion variable; and if so does the Very Deeply Suspicious `match_co` instead. It works fine for map/coerce, where the coercion is always a variable and will (robustly) remain so. See also * Note [Coercion arguments] * Note [Matching coercion variables] in GHC.Core.Unify. * Note [Cast swizzling on rule LHSs] in GHC.Core.Opt.Simplify.Utils: sm_cast_swizzle is switched off in the template of a RULE Note [Coercion arguments] ~~~~~~~~~~~~~~~~~~~~~~~~~ What if we have (f (Coercion co)) in the template, where the 'co' is a coercion argument to f? Right now we have nothing in place to ensure that a coercion /argument/ in the template is a variable. We really should, perhaps by abstracting over that variable. C.f. the treatment of dictionaries in GHC.HsToCore.Binds.decompseRuleLhs. For now, though, we simply behave badly, by failing in match_co. We really should never rely on matching the structure of a coercion (which is just a proof). -} ---------------------- match :: RuleMatchEnv -> RuleSubst -- Substitution applies to template only -> CoreExpr -- Template -> CoreExpr -- Target -> MCoercion -> Maybe RuleSubst -- Postcondition (TypeInv): if matching succeeds, then -- typeof( subst(template) ) = typeof( target |> mco ) -- But this is /not/ a pre-condition! The types of template and target -- may differ, see the (App e1 e2) case -- -- Invariant (CoInv): if mco :: ty ~ ty, then it is MRefl, not MCo co -- See Note [Cancel reflexive casts] -- -- See the notes with Unify.match, which matches types -- Everything is very similar for terms ------------------------ Ticks --------------------- -- We look through certain ticks. See Note [Tick annotations in RULE matching] match renv subst e1 (Tick t e2) mco | tickishFloatable t = match renv subst' e1 e2 mco | otherwise = Nothing where subst' = subst { rs_binds = rs_binds subst . mkTick t } match renv subst e@(Tick t e1) e2 mco | tickishFloatable t -- Ignore floatable ticks in rule template. = match renv subst e1 e2 mco | otherwise = pprPanic "Tick in rule" (ppr e) ------------------------ Types --------------------- match renv subst (Type ty1) (Type ty2) _mco = match_ty renv subst ty1 ty2 ------------------------ Coercions --------------------- -- See Note [Coercion arguments] for why this isn't really right match renv subst (Coercion co1) (Coercion co2) MRefl = match_co renv subst co1 co2 -- The MCo case corresponds to matching co ~ (co2 |> co3) -- and I have no idea what to do there -- or even if it can occur -- Failing seems the simplest thing to do; it's certainly safe. ------------------------ Casts --------------------- -- See Note [Casts in the template] -- Note [Casts in the target] -- Note [Cancel reflexive casts] match renv subst e1 (Cast e2 co2) mco = match renv subst e1 e2 (checkReflexiveMCo (mkTransMCoR co2 mco)) -- checkReflexiveMCo: cancel casts if possible -- This is important: see Note [Cancel reflexive casts] match renv subst (Cast e1 co1) e2 mco = matchTemplateCast renv subst e1 co1 e2 mco ------------------------ Literals --------------------- match _ subst (Lit lit1) (Lit lit2) mco | lit1 == lit2 = assertPpr (isReflMCo mco) (ppr mco) $ Just subst ------------------------ Variables --------------------- -- The Var case follows closely what happens in GHC.Core.Unify.match match renv subst (Var v1) e2 mco = match_var renv subst v1 (mkCastMCo e2 mco) match renv subst e1 (Var v2) mco -- Note [Expanding variables] | not (inRnEnvR rn_env v2) -- Note [Do not expand locally-bound variables] , Just e2' <- expandUnfolding_maybe (rv_unf renv v2') = match (renv { rv_lcl = nukeRnEnvR rn_env }) subst e1 e2' mco where v2' = lookupRnInScope rn_env v2 rn_env = rv_lcl renv -- Notice that we look up v2 in the in-scope set -- See Note [Lookup in-scope] -- No need to apply any renaming first (hence no rnOccR) -- because of the not-inRnEnvR ------------------------ Applications --------------------- -- See Note [Matching higher order patterns] match renv@(RV { rv_tmpls = tmpls, rv_lcl = rn_env }) subst e1@App{} e2 MRefl -- Like the App case we insist on Refl here -- See Note [Casts in the target] | (Var f, args) <- collectArgs e1 , let f' = rnOccL rn_env f -- See similar rnOccL in match_var , f' `elemVarSet` tmpls -- (HOP1) , Just vs2 <- traverse arg_as_lcl_var args -- (HOP2), (HOP3) , hasNoDups vs2 -- (HOP4) , not can_decompose_app_instead = match_tmpl_var renv subst f' (mkCoreLams vs2 e2) -- match_tmpl_var checks (HOP5) and (HOP6) where arg_as_lcl_var :: CoreExpr -> Maybe Var arg_as_lcl_var (Var v) | Just v' <- rnOccL_maybe rn_env v , not (v' `elemVarSet` tmpls) -- rnEnvL contains the template variables = Just (to_target v') -- to_target: see (W1) -- in Note [Matching higher order patterns] arg_as_lcl_var _ = Nothing can_decompose_app_instead -- Template (e1 v), target (e2 v), and v # fvs(e2) = case (e1, e2) of -- See (W2) in Note [Matching higher order patterns] (App _ (Var v1), App f2 (Var v2)) -> rnOccL rn_env v1 == rnOccR rn_env v2 && not (v2 `elemVarSet` exprFreeVars f2) _ -> False ---------------- -- to_target: see (W1) in Note [Matching higher order patterns] to_target :: Var -> Var -- From canonical variable back to target-expr variable to_target v = lookupVarEnv rev_envR v `orElse` v rev_envR :: VarEnv Var -- Inverts rnEnvR: from canonical variable -- back to target-expr variable rev_envR = nonDetStrictFoldVarEnv_Directly add_one emptyVarEnv (rnEnvR rn_env) add_one uniq var env = extendVarEnv env var (var `setVarUnique` uniq) {- Note [Matching higher order patterns] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Higher order patterns provide a limited form of higher order matching. See GHC Proposal #555 https://github.com/ghc-proposals/ghc-proposals/blob/master/proposals/0555-template-patterns.rst and #22465 for more details and related work. Consider the potential match: Template: forall f. foo (\x -> f x) Target: foo (\x -> x*2 + x) The expression `x*2 + x` in the target is not literally an application of a function to the variable `x`, so the simple application rule does not apply. However, we can match them modulo beta equivalence with the substitution: [f :-> \x -> x*2 + x] The general problem of higher order matching is tricky to implement, but the subproblem which we call /higher order pattern matching/ is sufficient for the given example and much easier to implement. Design: We start with terminology. * /Template variables/. The forall'd variables are called the template variables. In the example match above, `f` is a template variable. * /Local binders/. The local binders of a rule are the variables bound inside the template. In the example match above, `x` is a local binder. Note that local binders can be term variables and type variables. A /higher order pattern/ (HOP) is a sub-expression of the template, of form (f x y z) where: * (HOP1) f is a template variable * (HOP2) x, y, z are local binders (like y in rule "wombat" above; see definitions). * (HOP3) The arguments x, y, z are term variables * (HOP4) The arguments x, y, z are distinct (no duplicates) Matching of higher order patterns (HOP-matching). A higher order pattern (f x y z) (in the template) matches any target expression e provided: * (HOP5) The target has the same type as the template * (HOP6) No local binder is free in e, other than x, y, z. If these two condition hold, the higher order pattern (f x y z) matches the target expression e, yielding the substitution [f :-> \x y z. e]. Notice that this substitution is type preserving, and the RHS of the substitution has no free local binders. HOP matching is small enough to be done in-line in the `match` function. Two wrinkles: (W1) Consider the potential match: Template: forall f. foo (\x -> f x) Target: foo (\y -> (y, y)) During matching we make `x` the canonical variable for the lambdas and then we see: Template: f x rnEnvL = [] Target: (y, y) rnEnvR = [y :-> x] We could bind [f :-> \x. (x,x)], by applying rnEnvR substitution to the target expression. But that is tiresome (a) because it involves a traversal, and (b) because rnEnvR is a VarEnv Var, and we don't have a substitution function for that. So instead, we invert rnEnvR, and apply it to the binders, to get [f :-> \y. (y,y)]. This is done by `to_target` in the HOP-matching case. It takes a little bit of thinking to be sure this will work right in the case of shadowing. E.g. Template (\x y. f x y) Target (\p p. p*p) Here rnEnvR will be just [p :-> y], so after inversion we'll get [f :-> \x p. p*p] but that is fine. (W2) This wrinkle concerns the overlap between the new HOP rule and the existing decompose-application rule. See 3.1 of GHC Proposal #555 for a discussion. Consider potential match: Template: forall f. foo (\x y. Just (f y x)) Target: foo (\p q. Just (h (1+q) p))) During matching we will encounter: Template: f x y Target: h (1+q) p rnEnvR = [p:->x, q:->y] The rnEnvR renaming `[p:->x, q:->y]` is done by the matcher (today) on the fly, to make the bound variables of the template and target "line up". But now we can: * Either use the new HOP rule to succeed with [f :-> \x y. h (1+x) y] * Or use the existing decompose-application rule to match (f x) against (h (1+q)) and `y` against `p`. This will succeed with [f :-> \y. h (1+y)] Note that the result of the HOP rule will always be eta-equivalent to the result of the decompose-application rule. But the proposal specifies that we should use the decompose-application rule because it involves less eta-expansion. But take care: Template: forall f. foo (\x y. Just (f y x)) Target: foo (\p q. Just (h (p+q) p))) Then during matching we will encounter: Template: f x y Target: h (p+q) p rnEnvR = [p:->x, q:->y] Now, we cannot use the decompose-application rule, because p is free in (h (p+q)). So, we can only use the new HOP rule. (W3) You might wonder if a HOP can have /type/ arguments, thus (in Core) RULE forall h. f (\(MkT @b (d::Num b) (x::b)) -> h @b d x) = ... where the HOP is (h @b d x). In principle this might be possible, but it seems fragile; e.g. we would still need to insist that the (invisible) @b was a type variable. And since `h` gets a polymoprhic type, that type would have to be declared by the programmer. Maybe one day. But for now, we insist (in `arg_as_lcl_var`)that a HOP has only term-variable arguments. -} -- Note the match on MRefl! We fail if there is a cast in the target -- (e1 e2) ~ (d1 d2) |> co -- See Note [Cancel reflexive casts]: in the Cast equations for 'match' -- we aggressively ensure that if MCo is reflective, it really is MRefl. match renv subst (App f1 a1) (App f2 a2) MRefl = do { subst' <- match renv subst f1 f2 MRefl ; match renv subst' a1 a2 MRefl } ------------------------ Float lets --------------------- match renv subst e1 (Let bind e2) mco | -- pprTrace "match:Let" (vcat [ppr bind, ppr $ okToFloat (rv_lcl renv) (bindFreeVars bind)]) $ not (isJoinBind bind) -- can't float join point out of argument position , okToFloat (rv_lcl renv) (bindFreeVars bind) -- See Note [Matching lets] = match (renv { rv_fltR = flt_subst' , rv_lcl = rv_lcl renv `extendRnInScopeSetList` new_bndrs }) -- We are floating the let-binding out, as if it had enclosed -- the entire target from Day 1. So we must add its binders to -- the in-scope set (#20200) (subst { rs_binds = rs_binds subst . Let bind' , rs_bndrs = new_bndrs ++ rs_bndrs subst }) e1 e2 mco | otherwise = Nothing where in_scope = rnInScopeSet (rv_lcl renv) `extendInScopeSetList` rs_bndrs subst -- in_scope: see (4) in Note [Matching lets] flt_subst = rv_fltR renv `setInScope` in_scope (flt_subst', bind') = substBind flt_subst bind new_bndrs = bindersOf bind' ------------------------ Lambdas --------------------- match renv subst (Lam x1 e1) e2 mco | let casted_e2 = mkCastMCo e2 mco in_scope = extendInScopeSetSet (rnInScopeSet (rv_lcl renv)) (exprFreeVars casted_e2) in_scope_env = ISE in_scope (rv_unf renv) -- extendInScopeSetSet: The InScopeSet of rn_env is not necessarily -- a superset of the free vars of e2; it is only guaranteed a superset of -- applying the (rnEnvR rn_env) substitution to e2. But exprIsLambda_maybe -- wants an in-scope set that includes all the free vars of its argument. -- Hence adding adding (exprFreeVars casted_e2) to the in-scope set (#23630) , Just (x2, e2', ts) <- exprIsLambda_maybe in_scope_env casted_e2 -- See Note [Lambdas in the template] = let renv' = rnMatchBndr2 renv x1 x2 subst' = subst { rs_binds = rs_binds subst . flip (foldr mkTick) ts } in match renv' subst' e1 e2' MRefl match renv subst e1 e2@(Lam {}) mco | Just (renv', e2') <- eta_reduce renv e2 -- See Note [Eta reduction in the target] = match renv' subst e1 e2' mco {- Note [Lambdas in the template] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If we match Template: (\x. blah_template) Target: (\y. blah_target) then we want to match inside the lambdas, using rv_lcl to match up x and y. But what about this? Template (\x. (blah1 |> cv)) Target (\y. blah2) |> co This happens quite readily, because the Simplifier generally moves casts outside lambdas: see Note [Casts and lambdas] in GHC.Core.Opt.Simplify.Utils. So, tiresomely, we want to push `co` back inside, which is what `exprIsLambda_maybe` does. But we've stripped off that cast, so now we need to put it back, hence mkCastMCo. Unlike the target, where we attempt eta-reduction, we do not attempt to eta-reduce the template, and may therefore fail on Template: \x. f True x Target f True It's not especially easy to deal with eta reducing the template, and never happens, because no one write eta-expanded left-hand-sides. -} ------------------------ Case expression --------------------- {- Disabled: see Note [Matching cases] below match renv (tv_subst, id_subst, binds) e1 (Case scrut case_bndr ty [(con, alt_bndrs, rhs)]) | exprOkForSpeculation scrut -- See Note [Matching cases] , okToFloat rn_env bndrs (exprFreeVars scrut) = match (renv { me_env = rn_env' }) (tv_subst, id_subst, binds . case_wrap) e1 rhs where rn_env = me_env renv rn_env' = extendRnInScopeList rn_env bndrs bndrs = case_bndr : alt_bndrs case_wrap rhs' = Case scrut case_bndr ty [(con, alt_bndrs, rhs')] -} match renv subst (Case e1 x1 ty1 alts1) (Case e2 x2 ty2 alts2) mco = do { subst1 <- match_ty renv subst ty1 ty2 ; subst2 <- match renv subst1 e1 e2 MRefl ; let renv' = rnMatchBndr2 renv x1 x2 ; match_alts renv' subst2 alts1 alts2 mco -- Alts are both sorted } -- Everything else fails match _ _ _e1 _e2 _mco = -- pprTrace "Failing at" ((text "e1:" <+> ppr _e1) $$ (text "e2:" <+> ppr _e2)) $ Nothing ------------- eta_reduce :: RuleMatchEnv -> CoreExpr -> Maybe (RuleMatchEnv, CoreExpr) -- See Note [Eta reduction in the target] eta_reduce renv e@(Lam {}) = go renv id [] e where go :: RuleMatchEnv -> BindWrapper -> [Var] -> CoreExpr -> Maybe (RuleMatchEnv, CoreExpr) go renv bw vs (Let b e) = go renv (bw . Let b) vs e go renv bw vs (Lam v e) = go renv' bw (v':vs) e where (rn_env', v') = rnBndrR (rv_lcl renv) v renv' = renv { rv_lcl = rn_env' } go renv bw (v:vs) (App f arg) | Var a <- arg, v == rnOccR (rv_lcl renv) a = go renv bw vs f | Type ty <- arg, Just tv <- getTyVar_maybe ty , v == rnOccR (rv_lcl renv) tv = go renv bw vs f go renv bw [] e = Just (renv, bw e) go _ _ (_:_) _ = Nothing eta_reduce _ _ = Nothing {- Note [Eta reduction in the target] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Suppose we are faced with this (#19790) Template {x} f x Target (\a b c. let blah in f x a b c) You might wonder why we have an eta-expanded target (see first subtle point below), but regardless of how it came about, we'd like eta-expansion not to impede matching. So eta_reduce does on-the-fly eta-reduction of the target expression. Given (\a b c. let blah in e a b c), it returns (let blah in e). Subtle points: * Consider a target: \x. f x In the main eta-reducer we do not eta-reduce this, because doing so might reduce the arity of the expression (from 1 to zero, because of ). But for rule-matching we /do/ want to match template (f a) against target (\x. f x), with a := This is a compelling reason for not relying on the Simplifier's eta-reducer. * The Lam case of eta_reduce renames as it goes. Consider (\x. \x. f x x). We should not eta-reduce this. As we go we rename the first x to x1, and the second to x2; then both argument x's are x2. * eta_reduce does /not/ need to check that the bindings 'blah' and expression 'e' don't mention a b c; but it /does/ extend the rv_lcl RnEnv2 (see rn_bndr in eta_reduce). * If 'blah' mentions the binders, the let-float rule won't fire; and * if 'e' mentions the binders we we'll also fail to match e.g. because of the exprFreeVars test in match_tmpl_var. Example: Template: {x} f a -- Some top-level 'a' Target: (\a b. f a a b) -- The \a shadows top level 'a' Then eta_reduce will /succeed/, with (rnEnvR = [a :-> a'], f a) The returned RnEnv will map [a :-> a'], where a' is fresh. (There is no need to rename 'b' because (in this example) it is not in scope. So it's as if we'd returned (f a') from eta_reduce; the renaming applied to the target is simply deferred. Note [Cancel reflexive casts] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Here is an example (from #19790) which we want to catch (f x) ~ (\a b. (f x |> co) a b) |> sym co where f :: Int -> Stream co :: Stream ~ T1 -> T2 -> T3 when we eta-reduce (\a b. blah a b) to 'blah', we'll get (f x) ~ (f x) |> co |> sym co and we really want to spot that the co/sym-co cancels out. Hence * We keep an invariant that the MCoercion is always MRefl if the MCoercion is reflexive * We maintain this invariant via the call to checkReflexiveMCo in the Cast case of 'match'. -} ------------- matchTemplateCast :: RuleMatchEnv -> RuleSubst -> CoreExpr -> Coercion -> CoreExpr -> MCoercion -> Maybe RuleSubst matchTemplateCast renv subst e1 co1 e2 mco | isEmptyVarSet $ fvVarSet $ filterFV (`elemVarSet` rv_tmpls renv) $ -- Check that the coercion does not tyCoFVsOfCo substed_co -- mention any of the template variables = -- This is the good path -- See Note [Casts in the template] match renv subst e1 e2 (checkReflexiveMCo (mkTransMCoL mco (mkSymCo substed_co))) | otherwise = -- This is the Deeply Suspicious Path do { let co2 = case mco of MRefl -> mkRepReflCo (exprType e2) MCo co2 -> co2 ; subst1 <- match_co renv subst co1 co2 -- If match_co succeeds, then (exprType e1) = (exprType e2) -- Hence the MRefl in the next line ; match renv subst1 e1 e2 MRefl } where substed_co = substCo current_subst co1 current_subst :: Subst current_subst = mkTCvSubst (rnInScopeSet (rv_lcl renv)) (rs_tv_subst subst) emptyCvSubstEnv -- emptyCvSubstEnv: ugh! -- If there were any CoVar substitutions they would be in -- rs_id_subst; but we don't expect there to be any; see -- Note [Casts in the template] match_co :: RuleMatchEnv -> RuleSubst -> Coercion -> Coercion -> Maybe RuleSubst -- We only match if the template is a coercion variable or Refl: -- see Note [Casts in the template] -- Like 'match' it is /not/ guaranteed that -- coercionKind template = coercionKind target -- But if match_co succeeds, it /is/ guaranteed that -- coercionKind (subst template) = coercionKind target match_co renv subst co1 co2 | Just cv <- getCoVar_maybe co1 = match_var renv subst cv (Coercion co2) | Just (ty1, r1) <- isReflCo_maybe co1 = do { (ty2, r2) <- isReflCo_maybe co2 ; guard (r1 == r2) ; match_ty renv subst ty1 ty2 } | debugIsOn = pprTrace "match_co: needs more cases" (ppr co1 $$ ppr co2) Nothing -- Currently just deals with CoVarCo and Refl | otherwise = Nothing ------------- rnMatchBndr2 :: RuleMatchEnv -> Var -> Var -> RuleMatchEnv rnMatchBndr2 renv x1 x2 = renv { rv_lcl = rnBndr2 (rv_lcl renv) x1 x2 , rv_fltR = delBndr (rv_fltR renv) x2 } ------------------------------------------ match_alts :: RuleMatchEnv -> RuleSubst -> [CoreAlt] -- Template -> [CoreAlt] -> MCoercion -- Target -> Maybe RuleSubst match_alts _ subst [] [] _ = return subst match_alts renv subst (Alt c1 vs1 r1:alts1) (Alt c2 vs2 r2:alts2) mco | c1 == c2 = do { subst1 <- match renv' subst r1 r2 mco ; match_alts renv subst1 alts1 alts2 mco } where renv' = foldl' mb renv (vs1 `zip` vs2) mb renv (v1,v2) = rnMatchBndr2 renv v1 v2 match_alts _ _ _ _ _ = Nothing ------------------------------------------ okToFloat :: RnEnv2 -> VarSet -> Bool okToFloat rn_env bind_fvs = allVarSet not_captured bind_fvs where not_captured fv = not (inRnEnvR rn_env fv) ------------------------------------------ match_var :: RuleMatchEnv -> RuleSubst -> Var -- Template -> CoreExpr -- Target -> Maybe RuleSubst match_var renv@(RV { rv_tmpls = tmpls, rv_lcl = rn_env, rv_fltR = flt_env }) subst v1 e2 | v1' `elemVarSet` tmpls = match_tmpl_var renv subst v1' e2 | otherwise -- v1' is not a template variable; check for an exact match with e2 = case e2 of -- Remember, envR of rn_env is disjoint from rv_fltR Var v2 | Just v2' <- rnOccR_maybe rn_env v2 -> -- v2 was bound by a nested lambda or case if v1' == v2' then Just subst else Nothing -- v2 is not bound nestedly; it is free -- in the whole expression being matched -- So it will be in the InScopeSet for flt_env (#20200) | Var v2' <- lookupIdSubst flt_env v2 , v1' == v2' -> Just subst | otherwise -> Nothing _ -> Nothing where v1' = rnOccL rn_env v1 -- If the template is -- forall x. f x (\x -> x) = ... -- Then the x inside the lambda isn't the -- template x, so we must rename first! ------------------------------------------ match_tmpl_var :: RuleMatchEnv -> RuleSubst -> Var -- Template -> CoreExpr -- Target -> Maybe RuleSubst match_tmpl_var renv@(RV { rv_lcl = rn_env, rv_fltR = flt_env }) subst@(RS { rs_id_subst = id_subst, rs_bndrs = let_bndrs }) v1' e2 -- anyInRnEnvR is lazy in the 2nd arg which allows us to avoid computing fvs -- if the right side of the env is empty. | anyInRnEnvR rn_env (exprFreeVars e2) = Nothing -- Skolem-escape failure -- e.g. match forall a. (\x -> a) against (\y -> y) | Just e1' <- lookupVarEnv id_subst v1' = if eqCoreExpr e1' e2' then Just subst else Nothing | otherwise -- See Note [Matching variable types] = do { subst' <- match_ty renv subst (idType v1') (exprType e2) ; return (subst' { rs_id_subst = id_subst' }) } where -- e2' is the result of applying flt_env to e2 e2' | null let_bndrs = e2 | otherwise = substExpr flt_env e2 id_subst' = extendVarEnv (rs_id_subst subst) v1' e2' -- No further renaming to do on e2', -- because no free var of e2' is in the rnEnvR of the envt ------------------------------------------ match_ty :: RuleMatchEnv -> RuleSubst -> Type -- Template -> Type -- Target -> Maybe RuleSubst -- Matching Core types: use the matcher in GHC.Tc.Utils.TcType. -- Notice that we treat newtypes as opaque. For example, suppose -- we have a specialised version of a function at a newtype, say -- newtype T = MkT Int -- We only want to replace (f T) with f', not (f Int). match_ty (RV { rv_tmpls = tmpls, rv_lcl = rn_env }) subst@(RS { rs_tv_subst = tv_subst }) ty1 ty2 = do { tv_subst' <- Unify.ruleMatchTyKiX tmpls rn_env tv_subst ty1 ty2 -- NB: ruleMatchTyKiX applis tv_subst to ty1 only -- and of course only binds 'tmpls' ; return (subst { rs_tv_subst = tv_subst' }) } {- Note [Matching variable types] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When matching x ~ e, where 'x' is a template variable, we must check that x's type matches e's type, to establish (TypeInv). For example forall (c::Char->Int) (x::Char). f (c x) = "RULE FIRED" We must not match on, say (f (pred (3::Int))). It's actually quite difficult to come up with an example that shows you need type matching, esp since matching is left-to-right, so type args get matched first. But it's possible (e.g. simplrun008) and this is the Right Thing to do. An alternative would be to make (TypeInf) into a /pre-condition/. It is threatened only by the App rule. So when matching an application (e1 e2) ~ (d1 d2) would be to collect args of the application chain, match the types of the head, then match arg-by-arg. However that alternative seems a bit more complicated. And by matching types at variables we do one match_ty for each template variable, rather than one for each application chain. Usually there are fewer template variables, although for simple rules it could be the other way around. Note [Expanding variables] ~~~~~~~~~~~~~~~~~~~~~~~~~~ Here is another Very Important rule: if the term being matched is a variable, we expand it so long as its unfolding is "expandable". (Its occurrence information is not necessarily up to date, so we don't use it.) By "expandable" we mean a WHNF or a "constructor-like" application. This is the key reason for "constructor-like" Ids. If we have {-# NOINLINE [1] CONLIKE g #-} {-# RULE f (g x) = h x #-} then in the term let v = g 3 in ....(f v).... we want to make the rule fire, to replace (f v) with (h 3). Note [Do not expand locally-bound variables] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Do *not* expand locally-bound variables, else there's a worry that the unfolding might mention variables that are themselves renamed. Example case x of y { (p,q) -> ...y... } Don't expand 'y' to (p,q) because p,q might themselves have been renamed. Essentially we only expand unfoldings that are "outside" the entire match. Hence, (a) the guard (not (isLocallyBoundR v2)) (b) when we expand we nuke the renaming envt (nukeRnEnvR). Note [Tick annotations in RULE matching] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We used to unconditionally look through ticks in both template and expression being matched. This is actually illegal for counting or cost-centre-scoped ticks, because we have no place to put them without changing entry counts and/or costs. So now we just fail the match in these cases. On the other hand, where we are allowed to insert new cost into the tick scope, we can float them upwards to the rule application site. Moreover, we may encounter ticks in the template of a rule. There are a few ways in which these may be introduced (e.g. #18162, #17619). Such ticks are ignored by the matcher. See Note [Simplifying rules] in GHC.Core.Opt.Simplify.Utils for details. cf Note [Tick annotations in call patterns] in GHC.Core.Opt.SpecConstr Note [Matching lets] ~~~~~~~~~~~~~~~~~~~~ Matching a let-expression. Consider RULE forall x. f (g x) = and target expression f (let { w=R } in g E)) Then we'd like the rule to match, to generate let { w=R } in (\x. ) E In effect, we want to float the let-binding outward, to enable the match to happen. This is the WHOLE REASON for accumulating bindings in the RuleSubst We can only do this if the free variables of R are not bound by the part of the target expression outside the let binding; e.g. f (\v. let w = v+1 in g E) Here we obviously cannot float the let-binding for w. Hence the use of okToFloat. There are a couple of tricky points: (a) What if floating the binding captures a variable that is free in the entire expression? f (let v = x+1 in v) v --> NOT! let v = x+1 in f (x+1) v (b) What if the let shadows a local binding? f (\v -> (v, let v = x+1 in (v,v)) --> NOT! let v = x+1 in f (\v -> (v, (v,v))) (c) What if two non-nested let bindings bind the same variable? f (let v = e1 in b1) (let v = e2 in b2) --> NOT! let v = e1 in let v = e2 in (f b2 b2) See testsuite test `T4814`. Our cunning plan is this: (1) Along with the growing substitution for template variables we maintain a growing set of floated let-bindings (rs_binds) plus the set of variables thus bound (rs_bndrs). (2) The RnEnv2 in the MatchEnv binds only the local binders in the term (lambdas, case), not the floated let-bndrs. (3) When we encounter a `let` in the term to be matched, in the Let case of `match`, we use `okToFloat` to check that it does not mention any locally bound (lambda, case) variables. If so we fail. (4) In the Let case of `match`, we use GHC.Core.Subst.substBind to freshen the binding (which, remember (3), mentions no locally bound variables), in a lexically-scoped way (via rv_fltR in MatchEnv). The subtle point is that we want an in-scope set for this substitution that includes /two/ sets: * The in-scope variables at this point, so that we avoid using those local names for the floated binding; points (a) and (b) above. * All "earlier" floated bindings, so that we avoid using the same name for two different floated bindings; point (c) above. Because we have to compute the in-scope set here, the in-scope set stored in `rv_fltR` is always ignored; we leave it only because it's convenient to have `rv_fltR :: Subst` (with an always-ignored `InScopeSet`) rather than storing three separate substitutions. (5) We apply that freshening substitution, in a lexically-scoped way to the term, although lazily; this is the rv_fltR field. See #4814, which is an issue resulting from getting this wrong. Note [Matching cases] ~~~~~~~~~~~~~~~~~~~~~ {- NOTE: This idea is currently disabled. It really only works if the primops involved are OkForSpeculation, and, since they have side effects readIntOfAddr and touch are not. Maybe we'll get back to this later . -} Consider f (case readIntOffAddr# p# i# realWorld# of { (# s#, n# #) -> case touch# fp s# of { _ -> I# n# } } ) This happened in a tight loop generated by stream fusion that Roman encountered. We'd like to treat this just like the let case, because the primops concerned are ok-for-speculation. That is, we'd like to behave as if it had been case readIntOffAddr# p# i# realWorld# of { (# s#, n# #) -> case touch# fp s# of { _ -> f (I# n# } } ) Note [Lookup in-scope] ~~~~~~~~~~~~~~~~~~~~~~ Consider this example foo :: Int -> Maybe Int -> Int foo 0 (Just n) = n foo m (Just n) = foo (m-n) (Just n) SpecConstr sees this fragment: case w_smT of wild_Xf [Just A] { Data.Maybe.Nothing -> lvl_smf; Data.Maybe.Just n_acT [Just S(L)] -> case n_acT of wild1_ams [Just A] { GHC.Base.I# y_amr [Just L] -> $wfoo_smW (GHC.Prim.-# ds_Xmb y_amr) wild_Xf }}; and correctly generates the rule RULES: "SC:$wfoo1" [0] __forall {y_amr [Just L] :: GHC.Prim.Int# sc_snn :: GHC.Prim.Int#} $wfoo_smW sc_snn (Data.Maybe.Just @ GHC.Base.Int (GHC.Base.I# y_amr)) = $s$wfoo_sno y_amr sc_snn ;] BUT we must ensure that this rule matches in the original function! Note that the call to $wfoo is $wfoo_smW (GHC.Prim.-# ds_Xmb y_amr) wild_Xf During matching we expand wild_Xf to (Just n_acT). But then we must also expand n_acT to (I# y_amr). And we can only do that if we look up n_acT in the in-scope set, because in wild_Xf's unfolding it won't have an unfolding at all. That is why the 'lookupRnInScope' call in the (Var v2) case of 'match' is so important. ************************************************************************ * * Rule-check the program * * ************************************************************************ We want to know what sites have rules that could have fired but didn't. This pass runs over the tree (without changing it) and reports such. -} -- | Report partial matches for rules beginning with the specified -- string for the purposes of error reporting ruleCheckProgram :: RuleOpts -- ^ Rule options -> CompilerPhase -- ^ Rule activation test -> String -- ^ Rule pattern -> (Id -> [CoreRule]) -- ^ Rules for an Id -> CoreProgram -- ^ Bindings to check in -> SDoc -- ^ Resulting check message ruleCheckProgram ropts phase rule_pat rules binds | isEmptyBag results = text "Rule check results: no rule application sites" | otherwise = vcat [text "Rule check results:", line, vcat [ p $$ line | p <- bagToList results ] ] where line = text (replicate 20 '-') env = RuleCheckEnv { rc_is_active = isActive phase , rc_id_unf = idUnfolding -- Not quite right -- Should use activeUnfolding , rc_pattern = rule_pat , rc_rules = rules , rc_ropts = ropts , rc_in_scope = emptyInScopeSet } results = go env binds go _ [] = emptyBag go env (bind:binds) = let (env', ds) = ruleCheckBind env bind in ds `unionBags` go env' binds data RuleCheckEnv = RuleCheckEnv { rc_is_active :: Activation -> Bool , rc_id_unf :: IdUnfoldingFun , rc_pattern :: String , rc_rules :: Id -> [CoreRule] , rc_ropts :: RuleOpts , rc_in_scope :: InScopeSet } extendInScopeRC :: RuleCheckEnv -> Var -> RuleCheckEnv extendInScopeRC env@(RuleCheckEnv { rc_in_scope = in_scope }) v = env { rc_in_scope = in_scope `extendInScopeSet` v } extendInScopeListRC :: RuleCheckEnv -> [Var] -> RuleCheckEnv extendInScopeListRC env@(RuleCheckEnv { rc_in_scope = in_scope }) vs = env { rc_in_scope = in_scope `extendInScopeSetList` vs } ruleCheckBind :: RuleCheckEnv -> CoreBind -> (RuleCheckEnv, Bag SDoc) -- The Bag returned has one SDoc for each call site found ruleCheckBind env (NonRec b r) = (env `extendInScopeRC` b, ruleCheck env r) ruleCheckBind env (Rec prs) = (env', unionManyBags (map (ruleCheck env') rhss)) where (bs, rhss) = unzip prs env' = env `extendInScopeListRC` bs ruleCheck :: RuleCheckEnv -> CoreExpr -> Bag SDoc ruleCheck _ (Var _) = emptyBag ruleCheck _ (Lit _) = emptyBag ruleCheck _ (Type _) = emptyBag ruleCheck _ (Coercion _) = emptyBag ruleCheck env (App f a) = ruleCheckApp env (App f a) [] ruleCheck env (Tick _ e) = ruleCheck env e ruleCheck env (Cast e _) = ruleCheck env e ruleCheck env (Let bd e) = let (env', ds) = ruleCheckBind env bd in ds `unionBags` ruleCheck env' e ruleCheck env (Lam b e) = ruleCheck (env `extendInScopeRC` b) e ruleCheck env (Case e b _ as) = ruleCheck env e `unionBags` unionManyBags [ruleCheck (env `extendInScopeListRC` (b:bs)) r | Alt _ bs r <- as] ruleCheckApp :: RuleCheckEnv -> Expr CoreBndr -> [Arg CoreBndr] -> Bag SDoc ruleCheckApp env (App f a) as = ruleCheck env a `unionBags` ruleCheckApp env f (a:as) ruleCheckApp env (Var f) as = ruleCheckFun env f as ruleCheckApp env other _ = ruleCheck env other ruleCheckFun :: RuleCheckEnv -> Id -> [CoreExpr] -> Bag SDoc -- Produce a report for all rules matching the predicate -- saying why it doesn't match the specified application ruleCheckFun env fn args | null name_match_rules = emptyBag | otherwise = unitBag (ruleAppCheck_help env fn args name_match_rules) where name_match_rules = filter match (rc_rules env fn) match rule = rc_pattern env `isPrefixOf` unpackFS (ruleName rule) ruleAppCheck_help :: RuleCheckEnv -> Id -> [CoreExpr] -> [CoreRule] -> SDoc ruleAppCheck_help env fn args rules = -- The rules match the pattern, so we want to print something vcat [text "Expression:" <+> ppr (mkApps (Var fn) args), vcat (map check_rule rules)] where in_scope = rc_in_scope env n_args = length args i_args = args `zip` [1::Int ..] rough_args = map roughTopName args check_rule rule = rule_herald rule <> colon <+> rule_info (rc_ropts env) rule rule_herald (BuiltinRule { ru_name = name }) = text "Builtin rule" <+> doubleQuotes (ftext name) rule_herald (Rule { ru_name = name }) = text "Rule" <+> doubleQuotes (ftext name) rule_info opts rule | Just _ <- matchRule opts (ISE emptyInScopeSet (rc_id_unf env)) noBlackList fn args rough_args rule = text "matches (which is very peculiar!)" rule_info _ (BuiltinRule {}) = text "does not match" rule_info _ (Rule { ru_act = act, ru_bndrs = rule_bndrs, ru_args = rule_args}) | not (rc_is_active env act) = text "active only in later phase" | n_args < n_rule_args = text "too few arguments" | n_mismatches == n_rule_args = text "no arguments match" | n_mismatches == 0 = text "all arguments match (considered individually), but rule as a whole does not" | otherwise = text "arguments" <+> ppr mismatches <+> text "do not match (1-indexing)" where n_rule_args = length rule_args n_mismatches = length mismatches mismatches = [i | (rule_arg, (arg,i)) <- rule_args `zip` i_args, not (isJust (match_fn rule_arg arg))] match_fn rule_arg arg = match renv emptyRuleSubst rule_arg arg MRefl where renv = RV { rv_lcl = mkRnEnv2 in_scope , rv_tmpls = mkVarSet rule_bndrs , rv_fltR = mkEmptySubst in_scope , rv_unf = rc_id_unf env } ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/Rules/0000755000000000000000000000000007346545000017670 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/Rules/Config.hs0000644000000000000000000000074207346545000021434 0ustar0000000000000000module GHC.Core.Rules.Config where import GHC.Prelude import GHC.Platform -- | Rule options data RuleOpts = RuleOpts { roPlatform :: !Platform -- ^ Target platform , roNumConstantFolding :: !Bool -- ^ Enable more advanced numeric constant folding , roExcessRationalPrecision :: !Bool -- ^ Cut down precision of Rational values to that of Float/Double if disabled , roBignumRules :: !Bool -- ^ Enable rules for bignums } ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/Seq.hs0000644000000000000000000000757407346545000017677 0ustar0000000000000000-- | -- Various utilities for forcing Core structures -- -- It can often be useful to force various parts of the AST. This module -- provides a number of @seq@-like functions to accomplish this. module GHC.Core.Seq ( -- * Utilities for forcing Core structures seqExpr, seqExprs, seqUnfolding, seqRules, megaSeqIdInfo, seqRuleInfo, seqBinds, ) where import GHC.Prelude import GHC.Core import GHC.Types.Id.Info import GHC.Types.Demand( seqDemand, seqDmdSig ) import GHC.Types.Cpr( seqCprSig ) import GHC.Types.Basic( seqOccInfo ) import GHC.Types.Tickish import GHC.Types.Var.Set( seqDVarSet ) import GHC.Types.Var( varType, tyVarKind ) import GHC.Core.Type( seqType, isTyVar ) import GHC.Core.Coercion( seqCo ) import GHC.Types.Id( idInfo ) -- | Evaluate all the fields of the 'IdInfo' that are generally demanded by the -- compiler megaSeqIdInfo :: IdInfo -> () megaSeqIdInfo info = seqRuleInfo (ruleInfo info) `seq` -- Omitting this improves runtimes a little, presumably because -- some unfoldings are not calculated at all -- seqUnfolding (realUnfoldingInfo info) `seq` seqDemand (demandInfo info) `seq` seqDmdSig (dmdSigInfo info) `seq` seqCprSig (cprSigInfo info) `seq` seqCaf (cafInfo info) `seq` seqOneShot (oneShotInfo info) `seq` seqOccInfo (occInfo info) seqOneShot :: OneShotInfo -> () seqOneShot l = l `seq` () seqRuleInfo :: RuleInfo -> () seqRuleInfo (RuleInfo rules fvs) = seqRules rules `seq` seqDVarSet fvs seqCaf :: CafInfo -> () seqCaf c = c `seq` () seqRules :: [CoreRule] -> () seqRules [] = () seqRules (Rule { ru_bndrs = bndrs, ru_args = args, ru_rhs = rhs } : rules) = seqBndrs bndrs `seq` seqExprs (rhs:args) `seq` seqRules rules seqRules (BuiltinRule {} : rules) = seqRules rules seqExpr :: CoreExpr -> () seqExpr (Var v) = v `seq` () seqExpr (Lit lit) = lit `seq` () seqExpr (App f a) = seqExpr f `seq` seqExpr a seqExpr (Lam b e) = seqBndr b `seq` seqExpr e seqExpr (Let b e) = seqBind b `seq` seqExpr e seqExpr (Case e b t as) = seqExpr e `seq` seqBndr b `seq` seqType t `seq` seqAlts as seqExpr (Cast e co) = seqExpr e `seq` seqCo co seqExpr (Tick n e) = seqTickish n `seq` seqExpr e seqExpr (Type t) = seqType t seqExpr (Coercion co) = seqCo co seqExprs :: [CoreExpr] -> () seqExprs [] = () seqExprs (e:es) = seqExpr e `seq` seqExprs es seqTickish :: CoreTickish -> () seqTickish ProfNote{ profNoteCC = cc } = cc `seq` () seqTickish HpcTick{} = () seqTickish Breakpoint{ breakpointFVs = ids } = seqBndrs ids seqTickish SourceNote{} = () seqBndr :: CoreBndr -> () seqBndr b | isTyVar b = seqType (tyVarKind b) | otherwise = seqType (varType b) `seq` megaSeqIdInfo (idInfo b) seqBndrs :: [CoreBndr] -> () seqBndrs [] = () seqBndrs (b:bs) = seqBndr b `seq` seqBndrs bs seqBinds :: [Bind CoreBndr] -> () seqBinds bs = foldr (seq . seqBind) () bs seqBind :: Bind CoreBndr -> () seqBind (NonRec b e) = seqBndr b `seq` seqExpr e seqBind (Rec prs) = seqPairs prs seqPairs :: [(CoreBndr, CoreExpr)] -> () seqPairs [] = () seqPairs ((b,e):prs) = seqBndr b `seq` seqExpr e `seq` seqPairs prs seqAlts :: [CoreAlt] -> () seqAlts [] = () seqAlts (Alt c bs e:alts) = c `seq` seqBndrs bs `seq` seqExpr e `seq` seqAlts alts seqUnfolding :: Unfolding -> () seqUnfolding (CoreUnfolding { uf_tmpl = e, uf_is_top = top, uf_cache = cache, uf_guidance = g}) = seqExpr e `seq` top `seq` cache `seq` seqGuidance g -- The unf_cache :: UnfoldingCache field is a strict data type, -- so it is sufficient to use plain `seq` for this field -- See Note [UnfoldingCache] in GHC.Core seqUnfolding _ = () seqGuidance :: UnfoldingGuidance -> () seqGuidance (UnfIfGoodArgs ns n b) = n `seq` sum ns `seq` b `seq` () seqGuidance _ = () ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/SimpleOpt.hs0000644000000000000000000016607407346545000021064 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 -} module GHC.Core.SimpleOpt ( SimpleOpts (..), defaultSimpleOpts, -- ** Simple expression optimiser simpleOptPgm, simpleOptExpr, simpleOptExprWith, -- ** Join points joinPointBinding_maybe, joinPointBindings_maybe, -- ** Predicates on expressions exprIsConApp_maybe, exprIsLiteral_maybe, exprIsLambda_maybe, ) where import GHC.Prelude import GHC.Core import GHC.Core.Opt.Arity import GHC.Core.Subst import GHC.Core.Utils import GHC.Core.FVs import GHC.Core.Unfold import GHC.Core.Unfold.Make import GHC.Core.Make ( FloatBind(..), mkWildValBinder ) import GHC.Core.Opt.OccurAnal( occurAnalyseExpr, occurAnalysePgm, zapLambdaBndrs ) import GHC.Core.DataCon import GHC.Core.Coercion.Opt ( optCoercion, OptCoercionOpts (..) ) import GHC.Core.Type hiding ( substTy, extendTvSubst, extendCvSubst, extendTvSubstList , isInScope, substTyVarBndr, cloneTyVarBndr ) import GHC.Core.Predicate( isCoVarType ) import GHC.Core.Coercion hiding ( substCo, substCoVarBndr ) import GHC.Types.Literal import GHC.Types.Id import GHC.Types.Id.Info ( realUnfoldingInfo, setUnfoldingInfo, setRuleInfo, IdInfo (..) ) import GHC.Types.Var ( isNonCoVarId ) import GHC.Types.Var.Set import GHC.Types.Var.Env import GHC.Types.Demand( etaConvertDmdSig, topSubDmd ) import GHC.Types.Tickish import GHC.Types.Basic import GHC.Builtin.Types import GHC.Builtin.Names import GHC.Unit.Module ( Module ) import GHC.Utils.Encoding import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Utils.Misc import GHC.Data.Maybe ( orElse ) import GHC.Data.Graph.UnVar import Data.List (mapAccumL) import qualified Data.ByteString as BS {- ************************************************************************ * * The Simple Optimiser * * ************************************************************************ Note [The simple optimiser] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ The simple optimiser is a lightweight, pure (non-monadic) function that rapidly does a lot of simple optimisations, including - inlining things that occur just once, or whose RHS turns out to be trivial - beta reduction - case of known constructor - dead code elimination It does NOT do any call-site inlining; it only inlines a function if it can do so unconditionally, dropping the binding. It thereby guarantees to leave no un-reduced beta-redexes. It is careful to follow the guidance of "Secrets of the GHC inliner", and in particular the pre-inline-unconditionally and post-inline-unconditionally story, to do effective beta reduction on functions called precisely once, without repeatedly optimising the same expression. In fact, the simple optimiser is a good example of this little dance in action; the full Simplifier is a lot more complicated. -} -- | Simple optimiser options data SimpleOpts = SimpleOpts { so_uf_opts :: !UnfoldingOpts -- ^ Unfolding options , so_co_opts :: !OptCoercionOpts -- ^ Coercion optimiser options , so_eta_red :: !Bool -- ^ Eta reduction on? } -- | Default options for the Simple optimiser. defaultSimpleOpts :: SimpleOpts defaultSimpleOpts = SimpleOpts { so_uf_opts = defaultUnfoldingOpts , so_co_opts = OptCoercionOpts { optCoercionEnabled = False } , so_eta_red = False } simpleOptExpr :: HasDebugCallStack => SimpleOpts -> CoreExpr -> CoreExpr -- See Note [The simple optimiser] -- Do simple optimisation on an expression -- The optimisation is very straightforward: just -- inline non-recursive bindings that are used only once, -- or where the RHS is trivial -- -- We also inline bindings that bind a Eq# box: see -- See Note [Getting the map/coerce RULE to work]. -- -- Also we convert functions to join points where possible (as -- the occurrence analyser does most of the work anyway). -- -- The result is NOT guaranteed occurrence-analysed, because -- in (let x = y in ....) we substitute for x; so y's occ-info -- may change radically -- -- Note that simpleOptExpr is a pure function that we want to be able to call -- from lots of places, including ones that don't have DynFlags (e.g to optimise -- unfoldings of statically defined Ids via mkCompulsoryUnfolding). It used to -- fetch its options directly from the DynFlags, however, so some callers had to -- resort to using unsafeGlobalDynFlags (a global mutable variable containing -- the DynFlags). It has been modified to take its own SimpleOpts that may be -- created from DynFlags, but not necessarily. simpleOptExpr opts expr = -- pprTrace "simpleOptExpr" (ppr init_subst $$ ppr expr) simpleOptExprWith opts init_subst expr where init_subst = mkEmptySubst (mkInScopeSet (exprFreeVars expr)) -- It's potentially important to make a proper in-scope set -- Consider let x = ..y.. in \y. ...x... -- Then we should remember to clone y before substituting -- for x. It's very unlikely to occur, because we probably -- won't *be* substituting for x if it occurs inside a -- lambda. -- -- It's a bit painful to call exprFreeVars, because it makes -- three passes instead of two (occ-anal, and go) simpleOptExprWith :: HasDebugCallStack => SimpleOpts -> Subst -> InExpr -> OutExpr -- See Note [The simple optimiser] simpleOptExprWith opts subst expr = simple_opt_expr init_env (occurAnalyseExpr expr) where init_env = (emptyEnv opts) { soe_subst = subst } ---------------------- simpleOptPgm :: SimpleOpts -> Module -> CoreProgram -> [CoreRule] -> (CoreProgram, [CoreRule], CoreProgram) -- See Note [The simple optimiser] simpleOptPgm opts this_mod binds rules = (reverse binds', rules', occ_anald_binds) where occ_anald_binds = occurAnalysePgm this_mod (\_ -> True) {- All unfoldings active -} (\_ -> False) {- No rules active -} rules binds (final_env, binds') = foldl' do_one (emptyEnv opts, []) occ_anald_binds final_subst = soe_subst final_env rules' = substRulesForImportedIds final_subst rules -- We never unconditionally inline into rules, -- hence paying just a substitution do_one (env, binds') bind = case simple_opt_bind env bind TopLevel of (env', Nothing) -> (env', binds') (env', Just bind') -> (env', bind':binds') -- In these functions the substitution maps InVar -> OutExpr ---------------------- type SimpleClo = (SimpleOptEnv, InExpr) data SimpleOptEnv = SOE { soe_opts :: {-# UNPACK #-} !SimpleOpts -- ^ Simplifier options , soe_inl :: IdEnv SimpleClo -- ^ Deals with preInlineUnconditionally; things -- that occur exactly once and are inlined -- without having first been simplified , soe_subst :: Subst -- ^ Deals with cloning; includes the InScopeSet , soe_rec_ids :: !UnVarSet -- ^ Fast OutVarSet tracking which recursive RHSs we are analysing. -- See Note [Eta reduction in recursive RHSs] } instance Outputable SimpleOptEnv where ppr (SOE { soe_inl = inl, soe_subst = subst }) = text "SOE {" <+> vcat [ text "soe_inl =" <+> ppr inl , text "soe_subst =" <+> ppr subst ] <+> text "}" emptyEnv :: SimpleOpts -> SimpleOptEnv emptyEnv opts = SOE { soe_inl = emptyVarEnv , soe_subst = emptySubst , soe_rec_ids = emptyUnVarSet , soe_opts = opts } soeZapSubst :: SimpleOptEnv -> SimpleOptEnv soeZapSubst env@(SOE { soe_subst = subst }) = env { soe_inl = emptyVarEnv, soe_subst = zapSubst subst } soeInScope :: SimpleOptEnv -> InScopeSet soeInScope (SOE { soe_subst = subst }) = getSubstInScope subst soeSetInScope :: InScopeSet -> SimpleOptEnv -> SimpleOptEnv soeSetInScope in_scope env2@(SOE { soe_subst = subst2 }) = env2 { soe_subst = setInScope subst2 in_scope } enterRecGroupRHSs :: SimpleOptEnv -> [OutBndr] -> (SimpleOptEnv -> (SimpleOptEnv, r)) -> (SimpleOptEnv, r) enterRecGroupRHSs env bndrs k = (env'{soe_rec_ids = soe_rec_ids env}, r) where (env', r) = k env{soe_rec_ids = extendUnVarSetList bndrs (soe_rec_ids env)} --------------- simple_opt_clo :: HasDebugCallStack => InScopeSet -> SimpleClo -> OutExpr simple_opt_clo in_scope (e_env, e) = simple_opt_expr (soeSetInScope in_scope e_env) e simple_opt_expr :: HasDebugCallStack => SimpleOptEnv -> InExpr -> OutExpr simple_opt_expr env expr = go expr where rec_ids = soe_rec_ids env subst = soe_subst env in_scope = getSubstInScope subst in_scope_env = ISE in_scope alwaysActiveUnfoldingFun --------------- go (Var v) | Just clo <- lookupVarEnv (soe_inl env) v = simple_opt_clo in_scope clo | otherwise = lookupIdSubst (soe_subst env) v go (App e1 e2) = simple_app env e1 [(env,e2)] go (Type ty) = Type (substTyUnchecked subst ty) go (Coercion co) = Coercion (go_co co) go (Lit lit) = Lit lit go (Tick tickish e) = mkTick (substTickish subst tickish) (go e) go (Cast e co) = mk_cast (go e) (go_co co) go (Let bind body) = case simple_opt_bind env bind NotTopLevel of (env', Nothing) -> simple_opt_expr env' body (env', Just bind) -> Let bind (simple_opt_expr env' body) go lam@(Lam {}) = go_lam env [] lam go (Case e b ty as) | isDeadBinder b , Just (_, [], con, _tys, es) <- exprIsConApp_maybe in_scope_env e' -- We don't need to be concerned about floats when looking for coerce. , Just (Alt altcon bs rhs) <- findAlt (DataAlt con) as = case altcon of DEFAULT -> go rhs _ -> foldr wrapLet (simple_opt_expr env' rhs) mb_prs where (env', mb_prs) = mapAccumL (simple_out_bind NotTopLevel) env $ zipEqual "simpleOptExpr" bs es -- See Note [Getting the map/coerce RULE to work] | isDeadBinder b , [Alt DEFAULT _ rhs] <- as , isCoVarType (varType b) , (Var fun, _args) <- collectArgs e , fun `hasKey` coercibleSCSelIdKey -- without this last check, we get #11230 = go rhs | otherwise = Case e' b' (substTyUnchecked subst ty) (map (go_alt env') as) where e' = go e (env', b') = subst_opt_bndr env b ---------------------- go_co co = optCoercion (so_co_opts (soe_opts env)) subst co ---------------------- go_alt env (Alt con bndrs rhs) = Alt con bndrs' (simple_opt_expr env' rhs) where (env', bndrs') = subst_opt_bndrs env bndrs ---------------------- -- go_lam tries eta reduction -- It is quite important that it does so. I tried removing this code and -- got a lot of regressions, e.g., +11% ghc/alloc in T18223 and many -- run/alloc increases. Presumably RULEs are affected. go_lam env bs' (Lam b e) = go_lam env' (b':bs') e where (env', b') = subst_opt_bndr env b go_lam env bs' e | so_eta_red (soe_opts env) , Just etad_e <- tryEtaReduce rec_ids bs e' topSubDmd = etad_e | otherwise = mkLams bs e' where bs = reverse bs' e' = simple_opt_expr env e mk_cast :: CoreExpr -> CoercionR -> CoreExpr -- Like GHC.Core.Utils.mkCast, but does a full reflexivity check. -- mkCast doesn't do that because the Simplifier does (in simplCast) -- But in SimpleOpt it's nice to kill those nested casts (#18112) mk_cast (Cast e co1) co2 = mk_cast e (co1 `mkTransCo` co2) mk_cast (Tick t e) co = Tick t (mk_cast e co) mk_cast e co | isReflexiveCo co = e | otherwise = Cast e co ---------------------- -- simple_app collects arguments for beta reduction simple_app :: HasDebugCallStack => SimpleOptEnv -> InExpr -> [SimpleClo] -> CoreExpr simple_app env (Var v) as | Just (env', e) <- lookupVarEnv (soe_inl env) v = simple_app (soeSetInScope (soeInScope env) env') e as | let unf = idUnfolding v , isCompulsoryUnfolding (idUnfolding v) , isAlwaysActive (idInlineActivation v) -- See Note [Unfold compulsory unfoldings in RULE LHSs] = simple_app (soeZapSubst env) (unfoldingTemplate unf) as | otherwise , let out_fn = lookupIdSubst (soe_subst env) v = finish_app env out_fn as simple_app env (App e1 e2) as = simple_app env e1 ((env, e2) : as) simple_app env e@(Lam {}) as@(_:_) = do_beta env (zapLambdaBndrs e n_args) as -- Be careful to zap the lambda binders if necessary -- c.f. the Lam case of simplExprF1 in GHC.Core.Opt.Simplify -- Lacking this zap caused #19347, when we had a redex -- (\ a b. K a b) e1 e2 -- where (as it happens) the eta-expanded K is produced by -- Note [Typechecking data constructors] in GHC.Tc.Gen.Head where n_args = length as do_beta env (Lam b body) (a:as) | -- simpl binder before looking at its type -- See Note [Dark corner with representation polymorphism] needsCaseBinding (idType b') (snd a) -- This arg must not be inlined (side-effects) and cannot be let-bound, -- due to the let-can-float invariant. So simply case-bind it here. , let a' = simple_opt_clo (soeInScope env) a = mkDefaultCase a' b' $ do_beta env' body as | (env'', mb_pr) <- simple_bind_pair env' b (Just b') a NotTopLevel = wrapLet mb_pr $ do_beta env'' body as where (env', b') = subst_opt_bndr env b do_beta env body as = simple_app env body as simple_app env (Tick t e) as -- Okay to do "(Tick t e) x ==> Tick t (e x)"? | t `tickishScopesLike` SoftScope = mkTick t $ simple_app env e as -- (let x = e in b) a1 .. an => let x = e in (b a1 .. an) -- The let might appear there as a result of inlining -- e.g. let f = let x = e in b -- in f a1 a2 -- (#13208) -- However, do /not/ do this transformation for join points -- See Note [simple_app and join points] simple_app env (Let bind body) args = case simple_opt_bind env bind NotTopLevel of (env', Nothing) -> simple_app env' body args (env', Just bind') | isJoinBind bind' -> finish_app env expr' args | otherwise -> Let bind' (simple_app env' body args) where expr' = Let bind' (simple_opt_expr env' body) simple_app env e as = finish_app env (simple_opt_expr env e) as finish_app :: HasDebugCallStack => SimpleOptEnv -> OutExpr -> [SimpleClo] -> OutExpr -- See Note [Eliminate casts in function position] finish_app env (Cast (Lam x e) co) as@(_:_) | not (isTyVar x) && not (isCoVar x) , assert (not $ x `elemVarSet` tyCoVarsOfCo co) True , Just (x',e') <- pushCoercionIntoLambda (soeInScope env) x e co = simple_app (soeZapSubst env) (Lam x' e') as finish_app env fun args = foldl mk_app fun args where in_scope = soeInScope env mk_app fun arg = App fun (simple_opt_clo in_scope arg) ---------------------- simple_opt_bind :: SimpleOptEnv -> InBind -> TopLevelFlag -> (SimpleOptEnv, Maybe OutBind) simple_opt_bind env (NonRec b r) top_level = (env', case mb_pr of Nothing -> Nothing Just (b,r) -> Just (NonRec b r)) where (b', r') = joinPointBinding_maybe b r `orElse` (b, r) (env', mb_pr) = simple_bind_pair env b' Nothing (env,r') top_level simple_opt_bind env (Rec prs) top_level = (env2, res_bind) where res_bind = Just (Rec (reverse rev_prs')) prs' = joinPointBindings_maybe prs `orElse` prs (env1, bndrs') = subst_opt_bndrs env (map fst prs') (env2, rev_prs') = enterRecGroupRHSs env1 bndrs' $ \env -> foldl' do_pr (env, []) (prs' `zip` bndrs') do_pr (env, prs) ((b,r), b') = (env', case mb_pr of Just pr -> pr : prs Nothing -> prs) where (env', mb_pr) = simple_bind_pair env b (Just b') (env,r) top_level ---------------------- simple_bind_pair :: SimpleOptEnv -> InVar -> Maybe OutVar -> SimpleClo -> TopLevelFlag -> (SimpleOptEnv, Maybe (OutVar, OutExpr)) -- (simple_bind_pair subst in_var out_rhs) -- either extends subst with (in_var -> out_rhs) -- or returns Nothing simple_bind_pair env@(SOE { soe_inl = inl_env, soe_subst = subst }) in_bndr mb_out_bndr clo@(rhs_env, in_rhs) top_level | Type ty <- in_rhs -- let a::* = TYPE ty in , let out_ty = substTyUnchecked (soe_subst rhs_env) ty = assertPpr (isTyVar in_bndr) (ppr in_bndr $$ ppr in_rhs) $ (env { soe_subst = extendTvSubst subst in_bndr out_ty }, Nothing) | Coercion co <- in_rhs , let out_co = optCoercion (so_co_opts (soe_opts env)) (soe_subst rhs_env) co = assert (isCoVar in_bndr) (env { soe_subst = extendCvSubst subst in_bndr out_co }, Nothing) | assertPpr (isNonCoVarId in_bndr) (ppr in_bndr) -- The previous two guards got rid of tyvars and coercions -- See Note [Core type and coercion invariant] in GHC.Core pre_inline_unconditionally = (env { soe_inl = extendVarEnv inl_env in_bndr clo }, Nothing) | otherwise = simple_out_bind_pair env in_bndr mb_out_bndr out_rhs occ active stable_unf top_level where stable_unf = isStableUnfolding (idUnfolding in_bndr) active = isAlwaysActive (idInlineActivation in_bndr) occ = idOccInfo in_bndr in_scope = getSubstInScope subst out_rhs | JoinPoint join_arity <- idJoinPointHood in_bndr = simple_join_rhs join_arity | otherwise = simple_opt_clo in_scope clo simple_join_rhs join_arity -- See Note [Preserve join-binding arity] = mkLams join_bndrs' (simple_opt_expr env_body join_body) where env0 = soeSetInScope in_scope rhs_env (join_bndrs, join_body) = collectNBinders join_arity in_rhs (env_body, join_bndrs') = subst_opt_bndrs env0 join_bndrs pre_inline_unconditionally :: Bool pre_inline_unconditionally | isExportedId in_bndr = False | stable_unf = False | not active = False -- Note [Inline prag in simplOpt] | not (safe_to_inline occ) = False | otherwise = True -- Unconditionally safe to inline safe_to_inline :: OccInfo -> Bool safe_to_inline IAmALoopBreaker{} = False safe_to_inline IAmDead = True safe_to_inline OneOcc{ occ_in_lam = NotInsideLam , occ_n_br = 1 } = True safe_to_inline OneOcc{} = False safe_to_inline ManyOccs{} = False do_beta_by_substitution :: Id -> CoreExpr -> Bool -- True <=> you can inline (bndr = rhs) by substitution -- See Note [Exploit occ-info in exprIsConApp_maybe] do_beta_by_substitution bndr rhs = exprIsTrivial rhs -- Can duplicate || safe_to_inline (idOccInfo bndr) -- Occurs at most once do_case_elim :: CoreExpr -> Id -> [Id] -> Bool do_case_elim scrut case_bndr alt_bndrs = exprIsHNF scrut && safe_to_inline (idOccInfo case_bndr) && all isDeadBinder alt_bndrs ------------------- simple_out_bind :: TopLevelFlag -> SimpleOptEnv -> (InVar, OutExpr) -> (SimpleOptEnv, Maybe (OutVar, OutExpr)) simple_out_bind top_level env@(SOE { soe_subst = subst }) (in_bndr, out_rhs) | Type out_ty <- out_rhs = assertPpr (isTyVar in_bndr) (ppr in_bndr $$ ppr out_ty $$ ppr out_rhs) (env { soe_subst = extendTvSubst subst in_bndr out_ty }, Nothing) | Coercion out_co <- out_rhs = assert (isCoVar in_bndr) (env { soe_subst = extendCvSubst subst in_bndr out_co }, Nothing) | otherwise = simple_out_bind_pair env in_bndr Nothing out_rhs (idOccInfo in_bndr) True False top_level ------------------- simple_out_bind_pair :: SimpleOptEnv -> InId -> Maybe OutId -> OutExpr -> OccInfo -> Bool -> Bool -> TopLevelFlag -> (SimpleOptEnv, Maybe (OutVar, OutExpr)) simple_out_bind_pair env in_bndr mb_out_bndr out_rhs occ_info active stable_unf top_level | assertPpr (isNonCoVarId in_bndr) (ppr in_bndr) -- Type and coercion bindings are caught earlier -- See Note [Core type and coercion invariant] post_inline_unconditionally = ( env' { soe_subst = extendIdSubst (soe_subst env) in_bndr out_rhs } , Nothing) | otherwise = ( env', Just (out_bndr, out_rhs) ) where (env', bndr1) = case mb_out_bndr of Just out_bndr -> (env, out_bndr) Nothing -> subst_opt_bndr env in_bndr out_bndr = add_info env' in_bndr top_level out_rhs bndr1 post_inline_unconditionally :: Bool post_inline_unconditionally | isExportedId in_bndr = False -- Note [Exported Ids and trivial RHSs] | stable_unf = False -- Note [Stable unfoldings and postInlineUnconditionally] | not active = False -- in GHC.Core.Opt.Simplify.Utils | is_loop_breaker = False -- If it's a loop-breaker of any kind, don't inline -- because it might be referred to "earlier" | exprIsTrivial out_rhs = True | coercible_hack = True | otherwise = False is_loop_breaker = isWeakLoopBreaker occ_info -- See Note [Getting the map/coerce RULE to work] coercible_hack | (Var fun, args) <- collectArgs out_rhs , Just dc <- isDataConWorkId_maybe fun , dc `hasKey` heqDataConKey || dc `hasKey` coercibleDataConKey = all exprIsTrivial args | otherwise = False {- Note [Exported Ids and trivial RHSs] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We obviously do not want to unconditionally inline an Id that is exported. In GHC.Core.Opt.Simplify.Utils, Note [Top level and postInlineUnconditionally], we explain why we don't inline /any/ top-level things unconditionally, even trivial ones. But we do here! Why? In the simple optimiser * We do no rule rewrites * We do no call-site inlining Those differences obviate the reasons for not inlining a trivial rhs, and increase the benefit for doing so. So we unconditionally inline trivial rhss here. Note [Eliminate casts in function position] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider the following program: type R :: Type -> RuntimeRep type family R a where { R Float = FloatRep; R Double = DoubleRep } type F :: forall (a :: Type) -> TYPE (R a) type family F a where { F Float = Float# ; F Double = Double# } type N :: forall (a :: Type) -> TYPE (R a) newtype N a = MkN (F a) As MkN is a newtype, its unfolding is a lambda which wraps its argument in a cast: MkN :: forall (a :: Type). F a -> N a MkN = /\a \(x::F a). x |> co_ax -- recall that F a :: TYPE (R a) This is a representation-polymorphic lambda, in which the binder has an unknown representation (R a). We can't compile such a lambda on its own, but we can compile instantiations, such as `MkN @Float` or `MkN @Double`. Our strategy to avoid running afoul of the representation-polymorphism invariants of Note [Representation polymorphism invariants] in GHC.Core is thus: 1. Give the newtype a compulsory unfolding (it has no binding, as we can't define lambdas with representation-polymorphic value binders in source Haskell). 2. Rely on the optimiser to beta-reduce away any representation-polymorphic value binders. For example, consider the application MkN @Float 34.0# After inlining MkN we'll get ((/\a \(x:F a). x |> co_ax) @Float) |> co 34# where co :: (F Float -> N Float) ~ (Float# ~ N Float) But to actually beta-reduce that lambda, we need to push the 'co' inside the `\x` with pushCoecionIntoLambda. Hence the extra equation for Cast-of-Lam in finish_app. This is regrettably delicate. Note [Preserve join-binding arity] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Be careful /not/ to eta-reduce the RHS of a join point, lest we lose the join-point arity invariant. #15108 was caused by simplifying the RHS with simple_opt_expr, which does eta-reduction. Solution: simplify the RHS of a join point by simplifying under the lambdas (which of course should be there). Note [simple_app and join points] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In general for let-bindings we can do this: (let { x = e } in b) a ==> let { x = e } in b a But not for join points! For two reasons: - We would need to push the continuation into the RHS: (join { j = e } in b) a ==> let { j' = e a } in b[j'/j] a NB ----^^ and also change the type of j, hence j'. That's a bit sophisticated for the very simple optimiser. - We might end up with something like join { j' = e a } in (case blah of ) ( True -> j' void# ) a ( False -> blah ) and now the call to j' doesn't look like a tail call, and Lint may reject. I say "may" because this is /explicitly/ allowed in the "Compiling without Continuations" paper (Section 3, "Managing \Delta"). But GHC currently does not allow this slightly-more-flexible form. See GHC.Core Note [Join points are less general than the paper]. The simple thing to do is to disable this transformation for join points in the simple optimiser Note [The Let-Unfoldings Invariant] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A program has the Let-Unfoldings property iff: - For every let-bound variable f, whether top-level or nested, whether recursive or not: - Both the binding Id of f, and every occurrence Id of f, has an idUnfolding. - For non-INLINE things, that unfolding will be f's right hand sids - For INLINE things (which have a "stable" unfolding) that unfolding is semantically equivalent to f's RHS, but derived from the original RHS of f rather that its current RHS. Informally, we can say that in a program that has the Let-Unfoldings property, all let-bound Id's have an explicit unfolding attached to them. Currently, the simplifier guarantees the Let-Unfoldings invariant for anything it outputs. -} ---------------------- subst_opt_bndrs :: SimpleOptEnv -> [InVar] -> (SimpleOptEnv, [OutVar]) subst_opt_bndrs env bndrs = mapAccumL subst_opt_bndr env bndrs subst_opt_bndr :: SimpleOptEnv -> InVar -> (SimpleOptEnv, OutVar) subst_opt_bndr env bndr | isTyVar bndr = (env { soe_subst = subst_tv }, tv') | isCoVar bndr = (env { soe_subst = subst_cv }, cv') | otherwise = subst_opt_id_bndr env bndr where subst = soe_subst env (subst_tv, tv') = substTyVarBndr subst bndr (subst_cv, cv') = substCoVarBndr subst bndr subst_opt_id_bndr :: SimpleOptEnv -> InId -> (SimpleOptEnv, OutId) -- Nuke all fragile IdInfo, unfolding, and RULES; it gets added back later by -- add_info. -- -- Rather like SimplEnv.substIdBndr -- -- It's important to zap fragile OccInfo (which GHC.Core.Subst.substIdBndr -- carefully does not do) because simplOptExpr invalidates it subst_opt_id_bndr env@(SOE { soe_subst = subst, soe_inl = inl }) old_id = (env { soe_subst = new_subst, soe_inl = new_inl }, new_id) where Subst in_scope id_subst tv_subst cv_subst = subst id1 = uniqAway in_scope old_id id2 = updateIdTypeAndMult (substTyUnchecked subst) id1 new_id = zapFragileIdInfo id2 -- Zaps rules, unfolding, and fragile OccInfo -- The unfolding and rules will get added back later, by add_info new_in_scope = in_scope `extendInScopeSet` new_id no_change = new_id == old_id -- Extend the substitution if the unique has changed, -- See the notes with substTyVarBndr for the delSubstEnv new_id_subst | no_change = delVarEnv id_subst old_id | otherwise = extendVarEnv id_subst old_id (Var new_id) new_subst = Subst new_in_scope new_id_subst tv_subst cv_subst new_inl = delVarEnv inl old_id ---------------------- add_info :: SimpleOptEnv -> InVar -> TopLevelFlag -> OutExpr -> OutVar -> OutVar add_info env old_bndr top_level new_rhs new_bndr | isTyVar old_bndr = new_bndr | otherwise = lazySetIdInfo new_bndr new_info where subst = soe_subst env uf_opts = so_uf_opts (soe_opts env) old_info = idInfo old_bndr -- Add back in the rules and unfolding which were -- removed by zapFragileIdInfo in subst_opt_id_bndr. -- -- See Note [The Let-Unfoldings Invariant] new_info = idInfo new_bndr `setRuleInfo` new_rules `setUnfoldingInfo` new_unfolding old_rules = ruleInfo old_info new_rules = substRuleInfo subst new_bndr old_rules old_unfolding = realUnfoldingInfo old_info new_unfolding | isStableUnfolding old_unfolding = substUnfolding subst old_unfolding | otherwise = unfolding_from_rhs unfolding_from_rhs = mkUnfolding uf_opts VanillaSrc (isTopLevel top_level) False -- may be bottom or not False -- Not a join point new_rhs Nothing wrapLet :: Maybe (Id,CoreExpr) -> CoreExpr -> CoreExpr wrapLet Nothing body = body wrapLet (Just (b,r)) body = Let (NonRec b r) body {- Note [Inline prag in simplOpt] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If there's an INLINE/NOINLINE pragma that restricts the phase in which the binder can be inlined, we don't inline here; after all, we don't know what phase we're in. Here's an example foo :: Int -> Int -> Int {-# INLINE foo #-} foo m n = inner m where {-# INLINE [1] inner #-} inner m = m+n bar :: Int -> Int bar n = foo n 1 When inlining 'foo' in 'bar' we want the let-binding for 'inner' to remain visible until Phase 1 Note [Unfold compulsory unfoldings in RULE LHSs] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When the user writes `RULES map coerce = coerce` as a rule, the rule will only ever match if simpleOptExpr replaces coerce by its unfolding on the LHS, because that is the core that the rule matching engine will find. So do that for everything that has a compulsory unfolding. Also see Note [Desugaring coerce as cast] in GHC.HsToCore. However, we don't want to inline 'seq', which happens to also have a compulsory unfolding, so we only do this unfolding only for things that are always-active. See Note [User-defined RULES for seq] in GHC.Types.Id.Make. Note [Getting the map/coerce RULE to work] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We wish to allow the "map/coerce" RULE to fire: {-# RULES "map/coerce" map coerce = coerce #-} The naive core produced for this is forall a b (dict :: Coercible * a b). map @a @b (coerce @a @b @dict) = coerce @[a] @[b] @dict' where dict' :: Coercible [a] [b] dict' = ... This matches literal uses of `map coerce` in code, but that's not what we want. We want it to match, say, `map MkAge` (where newtype Age = MkAge Int) too. Achieving all this is surprisingly tricky: (MC1) We must compulsorily unfold MkAge to a cast. See Note [Compulsory newtype unfolding] in GHC.Types.Id.Make (MC2) We must compulsorily unfolding coerce on the rule LHS, yielding forall a b (dict :: Coercible * a b). map @a @b (\(x :: a) -> case dict of MkCoercible (co :: a ~R# b) -> x |> co) = ... Getting better. But this isn't exactly what gets produced. This is because Coercible essentially has ~R# as a superclass, and superclasses get eagerly extracted during solving. So we get this: forall a b (dict :: Coercible * a b). case Coercible_SCSel @* @a @b dict of _ [Dead] -> map @a @b (\(x :: a) -> case dict of MkCoercible (co :: a ~R# b) -> x |> co) = ... Unfortunately, this still abstracts over a Coercible dictionary. We really want it to abstract over the ~R# evidence. So, we have Desugar.unfold_coerce, which transforms the above to Desugar) forall a b (co :: a ~R# b). let dict = MkCoercible @* @a @b co in case Coercible_SCSel @* @a @b dict of _ [Dead] -> map @a @b (\(x :: a) -> case dict of MkCoercible (co :: a ~R# b) -> x |> co) = let dict = ... in ... See Note [Desugaring coerce as cast] in GHC.HsToCore (MC3) Now, we need simpleOptExpr to fix this up. It does so by taking three separate actions: 1. Inline certain non-recursive bindings. The choice whether to inline is made in simple_bind_pair. Note the rather specific check for MkCoercible in there. 2. Stripping case expressions like the Coercible_SCSel one. See the `Case` case of simple_opt_expr's `go` function. 3. Look for case expressions that unpack something that was just packed and inline them. This is also done in simple_opt_expr's `go` function. (MC4) The map/coerce rule is the only compelling reason for having a RULE that quantifies over a coercion variable, something that is otherwise Very Deeply Suspicous. See Note [Casts in the template] in GHC.Core.Rules. Ugh! This is all a fair amount of special-purpose hackery, but it's for a good cause. And it won't hurt other RULES and such that it comes across. ************************************************************************ * * Join points * * ************************************************************************ -} {- Note [Strictness and join points] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Suppose we have let f = \x. if x>200 then e1 else e1 and we know that f is strict in x. Then if we subsequently discover that f is an arity-2 join point, we'll eta-expand it to let f = \x y. if x>200 then e1 else e1 and now it's only strict if applied to two arguments. So we should adjust the strictness info. A more common case is when f = \x. error ".." and again its arity increases (#15517) -} -- | Returns Just (bndr,rhs) if the binding is a join point: -- If it's a JoinId, just return it -- If it's not yet a JoinId but is always tail-called, -- make it into a JoinId and return it. -- In the latter case, eta-expand the RHS if necessary, to make the -- lambdas explicit, as is required for join points -- -- Precondition: the InBndr has been occurrence-analysed, -- so its OccInfo is valid joinPointBinding_maybe :: InBndr -> InExpr -> Maybe (InBndr, InExpr) joinPointBinding_maybe bndr rhs | not (isId bndr) = Nothing | isJoinId bndr = Just (bndr, rhs) | AlwaysTailCalled join_arity <- tailCallInfo (idOccInfo bndr) , (bndrs, body) <- etaExpandToJoinPoint join_arity rhs , let str_sig = idDmdSig bndr str_arity = count isId bndrs -- Strictness demands are for Ids only join_bndr = bndr `asJoinId` join_arity `setIdDmdSig` etaConvertDmdSig str_arity str_sig = Just (join_bndr, mkLams bndrs body) | otherwise = Nothing joinPointBindings_maybe :: [(InBndr, InExpr)] -> Maybe [(InBndr, InExpr)] joinPointBindings_maybe bndrs = mapM (uncurry joinPointBinding_maybe) bndrs {- ********************************************************************* * * exprIsConApp_maybe * * ************************************************************************ Note [exprIsConApp_maybe] ~~~~~~~~~~~~~~~~~~~~~~~~~ exprIsConApp_maybe is a very important function. There are two principal uses: * case e of { .... } * cls_op e, where cls_op is a class operation In both cases you want to know if e is of form (C e1..en) where C is a data constructor. However e might not *look* as if Note [exprIsConApp_maybe on literal strings] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ See #9400 and #13317. Conceptually, a string literal "abc" is just ('a':'b':'c':[]), but in Core they are represented as unpackCString# "abc"# by GHC.Core.Make.mkStringExprFS, or unpackCStringUtf8# when the literal contains multi-byte UTF8 characters. For optimizations we want to be able to treat it as a list, so they can be decomposed when used in a case-statement. exprIsConApp_maybe detects those calls to unpackCString# and returns: Just (':', [Char], ['a', unpackCString# "bc"]). We need to be careful about UTF8 strings here. ""# contains an encoded ByteString, so we call utf8UnconsByteString to correctly deal with the encoding and splitting. We must also be careful about lvl = "foo"# ...(unpackCString# lvl)... to ensure that we see through the let-binding for 'lvl'. Hence the (exprIsLiteral_maybe .. arg) in the guard before the call to dealWithStringLiteral. The tests for this function are in T9400. Note [Push coercions in exprIsConApp_maybe] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In #13025 I found a case where we had op (df @t1 @t2) -- op is a ClassOp where df = (/\a b. K e1 e2) |> g To get this to come out we need to simplify on the fly ((/\a b. K e1 e2) |> g) @t1 @t2 Hence the use of pushCoArgs. Note [exprIsConApp_maybe on data constructors with wrappers] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Problem: - some data constructors have wrappers - these wrappers inline late (see MkId Note [Activation for data constructor wrappers]) - but we still want case-of-known-constructor to fire early. Example: data T = MkT !Int $WMkT n = case n of n' -> MkT n' -- Wrapper for MkT foo x = case $WMkT e of MkT y -> blah Here we want the case-of-known-constructor transformation to fire, giving foo x = case e of x' -> let y = x' in blah Here's how exprIsConApp_maybe achieves this: 0. Start with scrutinee = $WMkT e 1. Inline $WMkT on-the-fly. That's why data-constructor wrappers are marked as expandable. (See GHC.Core.Utils.isExpandableApp.) Now we have scrutinee = (\n. case n of n' -> MkT n') e 2. Beta-reduce the application, generating a floated 'let'. See Note [beta-reduction in exprIsConApp_maybe] below. Now we have scrutinee = case n of n' -> MkT n' with floats {Let n = e} 3. Float the "case x of x' ->" binding out. Now we have scrutinee = MkT n' with floats {Let n = e; case n of n' ->} And now we have a known-constructor MkT that we can return. Notice that both (2) and (3) require exprIsConApp_maybe to gather and return a bunch of floats, both let and case bindings. Note that this strategy introduces some subtle scenarios where a data-con wrapper can be replaced by a data-con worker earlier than we’d like, see Note [exprIsConApp_maybe for data-con wrappers: tricky corner]. Note [beta-reduction in exprIsConApp_maybe] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The unfolding a definition (_e.g._ a let-bound variable or a datacon wrapper) is typically a function. For instance, take the wrapper for MkT in Note [exprIsConApp_maybe on data constructors with wrappers]: $WMkT n = case n of { n' -> T n' } If `exprIsConApp_maybe` is trying to analyse `$MkT arg`, upon unfolding of $MkT, it will see (\n -> case n of { n' -> T n' }) arg In order to go progress, `exprIsConApp_maybe` must perform a beta-reduction. We don't want to blindly substitute `arg` in the body of the function, because it duplicates work. We can (and, in fact, used to) substitute `arg` in the body, but only when `arg` is a variable (or something equally work-free). But, because of Note [exprIsConApp_maybe on data constructors with wrappers], 'exprIsConApp_maybe' now returns floats. So, instead, we can beta-reduce _always_: (\x -> body) arg Is transformed into let x = arg in body Which, effectively, means emitting a float `let x = arg` and recursively analysing the body. For newtypes, this strategy requires that their wrappers have compulsory unfoldings. Suppose we have newtype T a b where MkT :: a -> T b a -- Note args swapped This defines a worker function MkT, a wrapper function $WMkT, and an axT: $WMkT :: forall a b. a -> T b a $WMkT = /\b a. \(x:a). MkT a b x -- A real binding MkT :: forall a b. a -> T a b MkT = /\a b. \(x:a). x |> (ax a b) -- A compulsory unfolding axiom axT :: a ~R# T a b Now we are optimising case $WMkT (I# 3) |> sym axT of I# y -> ... we clearly want to simplify this. If $WMkT did not have a compulsory unfolding, we would end up with let a = I# 3 in case a of I# y -> ... because in general, we do this on-the-fly beta-reduction (\x. e) blah --> let x = blah in e and then float the let. (Substitution would risk duplicating 'blah'.) But if the case-of-known-constructor doesn't actually fire (i.e. exprIsConApp_maybe does not return Just) then nothing happens, and nothing will happen the next time either. See test T16254, which checks the behavior of newtypes. Note [Exploit occ-info in exprIsConApp_maybe] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Suppose (#23159) we have a simple data constructor wrapper like this (this one might have come from a data family instance): $WK x y = K x y |> co Now suppose the simplifier sees case ($WK e1 e2) |> co2 of K p q -> case q of ... `exprIsConApp_maybe` expands the wrapper on the fly (see Note [beta-reduction in exprIsConApp_maybe]). It effectively expands that ($WK e1 e2) to let x = e1; y = e2 in K x y |> co So the Simplifier might end up producing this: let x = e1; y = e2 in case x of ... But suppose `q` was used just once in the body of the `K p q` alternative; we don't want to wait a whole Simplifier iteration to inline that `x`. (e1 might be another constructor for example.) This would happen if `exprIsConApp_maybe` we created a let for every (non-trivial) argument. So let's not do that when the binder is used just once! Instead, take advantage of the occurrence-info on `x` and `y` in the unfolding of `$WK`. Since in `$WK` both `x` and `y` occur once, we want to effectively expand `($WK e1 e2)` to `(K e1 e2 |> co)`. Hence in `do_beta_by_substitution` we say "yes" if (a) the RHS is trivial (so we can duplicate it); see call to `exprIsTrivial` or (b) the binder occurs at most once (so there is no worry about duplication); see call to `safe_to_inline`. To see this in action, look at testsuite/tests/perf/compiler/T15703. The initial Simlifier run takes 5 iterations without (b), but only 3 when we add (b). Note [Don't float join points] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ exprIsConApp_maybe should succeed on let v = e in Just v returning [x=e] as one of the [FloatBind]. But it must NOT succeed on join j x = rhs in Just v because join-points can't be gaily floated. Consider case (join j x = rhs in Just) of K p q -> blah We absolutely must not "simplify" this to join j x = rhs in blah because j's return type is (Maybe t), quite different to blah's. You might think this could never happen, because j can't be tail-called in the body if the body returns a constructor. But in !3113 we had a /dead/ join point (which is not illegal), and its return type was wonky. The simple thing is not to float a join point. The next iteration of the simplifier will sort everything out. And it there is a join point, the chances are that the body is not a constructor application, so failing faster is good. Note [exprIsConApp_maybe for data-con wrappers: tricky corner] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Generally speaking * exprIsConApp_maybe honours the inline phase; that is, it does not look inside the unfolding for an Id unless its unfolding is active in this phase. That phase-sensitivity is expressed in the InScopeEnv (specifically, the IdUnfoldingFun component of the InScopeEnv) passed to exprIsConApp_maybe. * Data-constructor wrappers are active only in phase 0 (the last phase); see Note [Activation for data constructor wrappers] in GHC.Types.Id.Make. On the face of it that means that exprIsConApp_maybe won't look inside data constructor wrappers until phase 0. But that seems pretty Bad. So we cheat. For data con wrappers we unconditionally look inside its unfolding, regardless of phase, so that we get case-of-known-constructor to fire in every phase. Perhaps unsurprisingly, this cheating can backfire. An example: data T = C !A B foo p q = let x = C e1 e2 in seq x $ f x {-# RULE "wurble" f (C a b) = b #-} In Core, the RHS of foo is let x = $WC e1 e2 in case x of y { C _ _ -> f x } and after doing a binder swap and inlining x, we have: case $WC e1 e2 of y { C _ _ -> f y } Case-of-known-constructor fires, but now we have to reconstruct a binding for `y` (which was dead before the binder swap) on the RHS of the case alternative. Naturally, we’ll use the worker: case e1 of a { DEFAULT -> let y = C a e2 in f y } and after inlining `y`, we have: case e1 of a { DEFAULT -> f (C a e2) } Now we might hope the "wurble" rule would fire, but alas, it will not: we have replaced $WC with C, but the (desugared) rule matches on $WC! We weren’t supposed to inline $WC yet for precisely that reason (see Note [Activation for data constructor wrappers]), but our cheating in exprIsConApp_maybe came back to bite us. This is rather unfortunate, especially since this can happen inside stable unfoldings as well as ordinary code (which really happened, see !3041). But there is no obvious solution except to delay case-of-known-constructor on data-con wrappers, and that cure would be worse than the disease. This Note exists solely to document the problem. -} data ConCont = CC [CoreExpr] MCoercion -- Substitution already applied -- | Returns @Just ([b1..bp], dc, [t1..tk], [x1..xn])@ if the argument -- expression is a *saturated* constructor application of the form @let b1 in -- .. let bp in dc t1..tk x1 .. xn@, where t1..tk are the -- *universally-quantified* type args of 'dc'. Floats can also be (and most -- likely are) single-alternative case expressions. Why does -- 'exprIsConApp_maybe' return floats? We may have to look through lets and -- cases to detect that we are in the presence of a data constructor wrapper. In -- this case, we need to return the lets and cases that we traversed. See Note -- [exprIsConApp_maybe on data constructors with wrappers]. Data constructor wrappers -- are unfolded late, but we really want to trigger case-of-known-constructor as -- early as possible. See also Note [Activation for data constructor wrappers] -- in "GHC.Types.Id.Make". -- -- We also return the incoming InScopeSet, augmented with -- the binders from any [FloatBind] that we return exprIsConApp_maybe :: HasDebugCallStack => InScopeEnv -> CoreExpr -> Maybe (InScopeSet, [FloatBind], DataCon, [Type], [CoreExpr]) exprIsConApp_maybe ise@(ISE in_scope id_unf) expr = go (Left in_scope) [] expr (CC [] MRefl) where go :: Either InScopeSet Subst -- Left in-scope means "empty substitution" -- Right subst means "apply this substitution to the CoreExpr" -- NB: in the call (go subst floats expr cont) -- the substitution applies to 'expr', but /not/ to 'floats' or 'cont' -> [FloatBind] -> CoreExpr -> ConCont -- Notice that the floats here are in reverse order -> Maybe (InScopeSet, [FloatBind], DataCon, [Type], [CoreExpr]) go subst floats (Tick t expr) cont | not (tickishIsCode t) = go subst floats expr cont go subst floats (Cast expr co1) (CC args m_co2) | Just (args', m_co1') <- pushCoArgs (subst_co subst co1) args -- See Note [Push coercions in exprIsConApp_maybe] = go subst floats expr (CC args' (m_co1' `mkTransMCo` m_co2)) go subst floats (App fun arg) (CC args mco) | let arg_type = exprType arg , not (isTypeArg arg) && needsCaseBinding arg_type arg -- An unlifted argument that’s not ok for speculation must not simply be -- put into the args, as these are going to be substituted into the case -- alternatives, and possibly lost on the way. -- -- Instead, we need need to -- make sure they are evaluated right here (using a case float), and -- the case binder can then be substituted into the case alternaties. -- -- Example: -- Simplifying case Mk# exp of Mk# a → rhs -- will use exprIsConApp_maybe (Mk# exp) -- -- Bad: returning (Mk#, [exp]) with no floats -- simplifier produces rhs[exp/a], changing semantics if exp is not ok-for-spec -- Good: returning (Mk#, [x]) with a float of case exp of x { DEFAULT -> [] } -- simplifier produces case exp of a { DEFAULT -> exp[x/a] } = let arg' = subst_expr subst arg bndr = uniqAway (subst_in_scope subst) (mkWildValBinder ManyTy arg_type) float = FloatCase arg' bndr DEFAULT [] subst' = subst_extend_in_scope subst bndr in go subst' (float:floats) fun (CC (Var bndr : args) mco) | otherwise = go subst floats fun (CC (subst_expr subst arg : args) mco) go subst floats (Lam bndr body) (CC (arg:args) mco) | do_beta_by_substitution bndr arg = go (extend subst bndr arg) floats body (CC args mco) | otherwise = let (subst', bndr') = subst_bndr subst bndr float = FloatLet (NonRec bndr' arg) in go subst' (float:floats) body (CC args mco) go subst floats (Let (NonRec bndr rhs) expr) cont | not (isJoinId bndr) -- Crucial guard! See Note [Don't float join points] = let rhs' = subst_expr subst rhs (subst', bndr') = subst_bndr subst bndr float = FloatLet (NonRec bndr' rhs') in go subst' (float:floats) expr cont go subst floats (Case scrut b _ [Alt con vars expr]) cont | do_case_elim scrut' b vars -- See Note [Case elim in exprIsConApp_maybe] = go (extend subst b scrut') floats expr cont | otherwise = let (subst', b') = subst_bndr subst b (subst'', vars') = subst_bndrs subst' vars float = FloatCase scrut' b' con vars' in go subst'' (float:floats) expr cont where scrut' = subst_expr subst scrut go (Right sub) floats (Var v) cont = go (Left (getSubstInScope sub)) floats (lookupIdSubst sub v) cont go (Left in_scope) floats (Var fun) cont@(CC args mco) | Just con <- isDataConWorkId_maybe fun , count isValArg args == idArity fun = succeedWith in_scope floats $ pushCoDataCon con args mco -- Look through data constructor wrappers: they inline late (See Note -- [Activation for data constructor wrappers]) but we want to do -- case-of-known-constructor optimisation eagerly (see Note -- [exprIsConApp_maybe on data constructors with wrappers]). | isDataConWrapId fun , let rhs = uf_tmpl (realIdUnfolding fun) = go (Left in_scope) floats rhs cont -- Look through dictionary functions; see Note [Unfolding DFuns] | DFunUnfolding { df_bndrs = bndrs, df_con = con, df_args = dfun_args } <- unfolding , bndrs `equalLength` args -- See Note [DFun arity check] , let in_scope' = extend_in_scope (exprsFreeVars dfun_args) subst = mkOpenSubst in_scope' (bndrs `zip` args) -- We extend the in-scope set here to silence warnings from -- substExpr when it finds not-in-scope Ids in dfun_args. -- simplOptExpr initialises the in-scope set with exprFreeVars, -- but that doesn't account for DFun unfoldings = succeedWith in_scope floats $ pushCoDataCon con (map (substExpr subst) dfun_args) mco -- Look through unfoldings, but only arity-zero one; -- if arity > 0 we are effectively inlining a function call, -- and that is the business of callSiteInline. -- In practice, without this test, most of the "hits" were -- CPR'd workers getting inlined back into their wrappers, | idArity fun == 0 , Just rhs <- expandUnfolding_maybe unfolding , let in_scope' = extend_in_scope (exprFreeVars rhs) = go (Left in_scope') floats rhs cont -- See Note [exprIsConApp_maybe on literal strings] | (fun `hasKey` unpackCStringIdKey) || (fun `hasKey` unpackCStringUtf8IdKey) , [arg] <- args , Just (LitString str) <- exprIsLiteral_maybe ise arg = succeedWith in_scope floats $ dealWithStringLiteral fun str mco where unfolding = id_unf fun extend_in_scope unf_fvs | isLocalId fun = in_scope `extendInScopeSetSet` unf_fvs | otherwise = in_scope -- A GlobalId has no (LocalId) free variables; and the -- in-scope set tracks only LocalIds go _ _ _ _ = Nothing succeedWith :: InScopeSet -> [FloatBind] -> Maybe (DataCon, [Type], [CoreExpr]) -> Maybe (InScopeSet, [FloatBind], DataCon, [Type], [CoreExpr]) succeedWith in_scope rev_floats x = do { (con, tys, args) <- x ; let floats = reverse rev_floats ; return (in_scope, floats, con, tys, args) } ---------------------------- -- Operations on the (Either InScopeSet GHC.Core.Subst) -- The Left case is wildly dominant subst_in_scope (Left in_scope) = in_scope subst_in_scope (Right s) = getSubstInScope s subst_extend_in_scope (Left in_scope) v = Left (in_scope `extendInScopeSet` v) subst_extend_in_scope (Right s) v = Right (s `extendSubstInScope` v) subst_co (Left {}) co = co subst_co (Right s) co = GHC.Core.Subst.substCo s co subst_expr (Left {}) e = e subst_expr (Right s) e = substExpr s e subst_bndr msubst bndr = (Right subst', bndr') where (subst', bndr') = substBndr subst bndr subst = case msubst of Left in_scope -> mkEmptySubst in_scope Right subst -> subst subst_bndrs subst bs = mapAccumL subst_bndr subst bs extend (Left in_scope) v e = Right (extendSubst (mkEmptySubst in_scope) v e) extend (Right s) v e = Right (extendSubst s v e) -- See Note [exprIsConApp_maybe on literal strings] dealWithStringLiteral :: Var -> BS.ByteString -> MCoercion -> Maybe (DataCon, [Type], [CoreExpr]) -- This is not possible with user-supplied empty literals, GHC.Core.Make.mkStringExprFS -- turns those into [] automatically, but just in case something else in GHC -- generates a string literal directly. dealWithStringLiteral fun str mco = case utf8UnconsByteString str of Nothing -> pushCoDataCon nilDataCon [Type charTy] mco Just (char, charTail) -> let char_expr = mkConApp charDataCon [mkCharLit char] -- In singleton strings, just add [] instead of unpackCstring# ""#. rest = if BS.null charTail then mkConApp nilDataCon [Type charTy] else App (Var fun) (Lit (LitString charTail)) in pushCoDataCon consDataCon [Type charTy, char_expr, rest] mco {- Note [Case elim in exprIsConApp_maybe] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Suppose we have data K a = MkK !a $WMkK x = case x of y -> K y -- Wrapper for MkK ...case $WMkK v of K w -> We call `exprIsConApp_maybe` on ($WMkK v); we inline the wrapper and beta-reduce, so we get to exprIsConApp_maybe (case v of y -> K y) So we may float the case, and end up with case v of y -> [y/w] But if `v` is already evaluated, the next run of the Simplifier will eliminate the case, and we may then make more progress with . Better to do it in one iteration. Hence the `do_case_elim` check in `exprIsConApp_maybe`. Note [Unfolding DFuns] ~~~~~~~~~~~~~~~~~~~~~~ DFuns look like df :: forall a b. (Eq a, Eq b) -> Eq (a,b) df a b d_a d_b = MkEqD (a,b) ($c1 a b d_a d_b) ($c2 a b d_a d_b) So to split it up we just need to apply the ops $c1, $c2 etc to the very same args as the dfun. It takes a little more work to compute the type arguments to the dictionary constructor. Note [DFun arity check] ~~~~~~~~~~~~~~~~~~~~~~~ Here we check that the total number of supplied arguments (including type args) matches what the dfun is expecting. This may be *less* than the ordinary arity of the dfun: see Note [DFun unfoldings] in GHC.Core -} exprIsLiteral_maybe :: InScopeEnv -> CoreExpr -> Maybe Literal -- Same deal as exprIsConApp_maybe, but much simpler -- Nevertheless we do need to look through unfoldings for -- string literals, which are vigorously hoisted to top level -- and not subsequently inlined exprIsLiteral_maybe env@(ISE _ id_unf) e = case e of Lit l -> Just l Tick _ e' -> exprIsLiteral_maybe env e' -- dubious? Var v -> expandUnfolding_maybe (id_unf v) >>= exprIsLiteral_maybe env _ -> Nothing {- Note [exprIsLambda_maybe] ~~~~~~~~~~~~~~~~~~~~~~~~~~ exprIsLambda_maybe will, given an expression `e`, try to turn it into the form `Lam v e'` (returned as `Just (v,e')`). Besides using lambdas, it looks through casts (using the Push rule), and it unfolds function calls if the unfolding has a greater arity than arguments are present. Currently, it is used in GHC.Core.Rules.match, and is required to make "map coerce = coerce" match. -} exprIsLambda_maybe :: HasDebugCallStack => InScopeEnv -> CoreExpr -> Maybe (Var, CoreExpr,[CoreTickish]) -- See Note [exprIsLambda_maybe] -- The simple case: It is a lambda already exprIsLambda_maybe _ (Lam x e) = Just (x, e, []) -- Still straightforward: Ticks that we can float out of the way exprIsLambda_maybe ise (Tick t e) | tickishFloatable t , Just (x, e, ts) <- exprIsLambda_maybe ise e = Just (x, e, t:ts) -- Also possible: A casted lambda. Push the coercion inside exprIsLambda_maybe ise@(ISE in_scope_set _) (Cast casted_e co) | Just (x, e,ts) <- exprIsLambda_maybe ise casted_e -- Only do value lambdas. -- this implies that x is not in scope in gamma (makes this code simpler) , not (isTyVar x) && not (isCoVar x) , assert (not $ x `elemVarSet` tyCoVarsOfCo co) True , Just (x',e') <- pushCoercionIntoLambda in_scope_set x e co , let res = Just (x',e',ts) = --pprTrace "exprIsLambda_maybe:Cast" (vcat [ppr casted_e,ppr co,ppr res)]) res -- Another attempt: See if we find a partial unfolding exprIsLambda_maybe ise@(ISE in_scope_set id_unf) e | (Var f, as, ts) <- collectArgsTicks tickishFloatable e , idArity f > count isValArg as -- Make sure there is hope to get a lambda , Just rhs <- expandUnfolding_maybe (id_unf f) -- Optimize, for beta-reduction , let e' = simpleOptExprWith defaultSimpleOpts (mkEmptySubst in_scope_set) (rhs `mkApps` as) -- Recurse, because of possible casts , Just (x', e'', ts') <- exprIsLambda_maybe ise e' , let res = Just (x', e'', ts++ts') = -- pprTrace "exprIsLambda_maybe:Unfold" (vcat [ppr e, ppr (x',e'')]) res exprIsLambda_maybe _ _e = -- pprTrace "exprIsLambda_maybe:Fail" (vcat [ppr _e]) Nothing ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/SimpleOpt.hs-boot0000644000000000000000000000040707346545000022010 0ustar0000000000000000module GHC.Core.SimpleOpt where import GHC.Core import {-# SOURCE #-} GHC.Core.Unfold import GHC.Utils.Misc (HasDebugCallStack) data SimpleOpts so_uf_opts :: SimpleOpts -> UnfoldingOpts simpleOptExpr :: HasDebugCallStack => SimpleOpts -> CoreExpr -> CoreExpr ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/Stats.hs0000644000000000000000000001137207346545000020234 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-2015 -} -- | Functions to computing the statistics reflective of the "size" -- of a Core expression module GHC.Core.Stats ( -- * Expression and bindings size coreBindsSize, exprSize, CoreStats(..), coreBindsStats, exprStats, ) where import GHC.Prelude import GHC.Types.Basic import GHC.Core import GHC.Utils.Outputable import GHC.Core.Coercion import GHC.Types.Tickish import GHC.Types.Var import GHC.Core.Type(Type, typeSize) import GHC.Types.Id (isJoinId) data CoreStats = CS { cs_tm :: !Int -- Terms , cs_ty :: !Int -- Types , cs_co :: !Int -- Coercions , cs_vb :: !Int -- Local value bindings , cs_jb :: !Int } -- Local join bindings instance Outputable CoreStats where ppr (CS { cs_tm = i1, cs_ty = i2, cs_co = i3, cs_vb = i4, cs_jb = i5 }) = braces (sep [text "terms:" <+> intWithCommas i1 <> comma, text "types:" <+> intWithCommas i2 <> comma, text "coercions:" <+> intWithCommas i3 <> comma, text "joins:" <+> intWithCommas i5 <> char '/' <> intWithCommas (i4 + i5) ]) plusCS :: CoreStats -> CoreStats -> CoreStats plusCS (CS { cs_tm = p1, cs_ty = q1, cs_co = r1, cs_vb = v1, cs_jb = j1 }) (CS { cs_tm = p2, cs_ty = q2, cs_co = r2, cs_vb = v2, cs_jb = j2 }) = CS { cs_tm = p1+p2, cs_ty = q1+q2, cs_co = r1+r2, cs_vb = v1+v2 , cs_jb = j1+j2 } zeroCS, oneTM :: CoreStats zeroCS = CS { cs_tm = 0, cs_ty = 0, cs_co = 0, cs_vb = 0, cs_jb = 0 } oneTM = zeroCS { cs_tm = 1 } sumCS :: (a -> CoreStats) -> [a] -> CoreStats sumCS f = foldl' (\s a -> plusCS s (f a)) zeroCS coreBindsStats :: [CoreBind] -> CoreStats coreBindsStats = sumCS (bindStats TopLevel) bindStats :: TopLevelFlag -> CoreBind -> CoreStats bindStats top_lvl (NonRec v r) = bindingStats top_lvl v r bindStats top_lvl (Rec prs) = sumCS (\(v,r) -> bindingStats top_lvl v r) prs bindingStats :: TopLevelFlag -> Var -> CoreExpr -> CoreStats bindingStats top_lvl v r = letBndrStats top_lvl v `plusCS` exprStats r bndrStats :: Var -> CoreStats bndrStats v = oneTM `plusCS` tyStats (varType v) letBndrStats :: TopLevelFlag -> Var -> CoreStats letBndrStats top_lvl v | isTyVar v || isTopLevel top_lvl = bndrStats v | isJoinId v = oneTM { cs_jb = 1 } `plusCS` ty_stats | otherwise = oneTM { cs_vb = 1 } `plusCS` ty_stats where ty_stats = tyStats (varType v) exprStats :: CoreExpr -> CoreStats exprStats (Var {}) = oneTM exprStats (Lit {}) = oneTM exprStats (Type t) = tyStats t exprStats (Coercion c) = coStats c exprStats (App f a) = exprStats f `plusCS` exprStats a exprStats (Lam b e) = bndrStats b `plusCS` exprStats e exprStats (Let b e) = bindStats NotTopLevel b `plusCS` exprStats e exprStats (Case e b _ as) = exprStats e `plusCS` bndrStats b `plusCS` sumCS altStats as exprStats (Cast e co) = coStats co `plusCS` exprStats e exprStats (Tick _ e) = exprStats e altStats :: CoreAlt -> CoreStats altStats (Alt _ bs r) = altBndrStats bs `plusCS` exprStats r altBndrStats :: [Var] -> CoreStats -- Charge one for the alternative, not for each binder altBndrStats vs = oneTM `plusCS` sumCS (tyStats . varType) vs tyStats :: Type -> CoreStats tyStats ty = zeroCS { cs_ty = typeSize ty } coStats :: Coercion -> CoreStats coStats co = zeroCS { cs_co = coercionSize co } coreBindsSize :: [CoreBind] -> Int -- We use coreBindsStats for user printout -- but this one is a quick and dirty basis for -- the simplifier's tick limit coreBindsSize bs = sum (map bindSize bs) exprSize :: CoreExpr -> Int -- ^ A measure of the size of the expressions, strictly greater than 0 -- Counts *leaves*, not internal nodes. Types and coercions are not counted. exprSize (Var _) = 1 exprSize (Lit _) = 1 exprSize (App f a) = exprSize f + exprSize a exprSize (Lam b e) = bndrSize b + exprSize e exprSize (Let b e) = bindSize b + exprSize e exprSize (Case e b _ as) = exprSize e + bndrSize b + 1 + sum (map altSize as) exprSize (Cast e _) = 1 + exprSize e exprSize (Tick n e) = tickSize n + exprSize e exprSize (Type _) = 1 exprSize (Coercion _) = 1 tickSize :: CoreTickish -> Int tickSize (ProfNote _ _ _) = 1 tickSize _ = 1 bndrSize :: Var -> Int bndrSize _ = 1 bndrsSize :: [Var] -> Int bndrsSize = sum . map bndrSize bindSize :: CoreBind -> Int bindSize (NonRec b e) = bndrSize b + exprSize e bindSize (Rec prs) = sum (map pairSize prs) pairSize :: (Var, CoreExpr) -> Int pairSize (b,e) = bndrSize b + exprSize e altSize :: CoreAlt -> Int altSize (Alt _ bs e) = bndrsSize bs + exprSize e ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/Subst.hs0000644000000000000000000006650307346545000020244 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 Utility functions on @Core@ syntax -} module GHC.Core.Subst ( -- * Main data types Subst(..), -- Implementation exported for supercompiler's Renaming.hs only TvSubstEnv, IdSubstEnv, InScopeSet, -- ** Substituting into expressions and related types deShadowBinds, substRuleInfo, substRulesForImportedIds, substTyUnchecked, substCo, substExpr, substExprSC, substBind, substBindSC, substUnfolding, substUnfoldingSC, lookupIdSubst, lookupIdSubst_maybe, substIdType, substIdOcc, substTickish, substDVarSet, substIdInfo, -- ** Operations on substitutions emptySubst, mkEmptySubst, mkTCvSubst, mkOpenSubst, isEmptySubst, extendIdSubst, extendIdSubstList, extendTCvSubst, extendTvSubstList, extendIdSubstWithClone, extendSubst, extendSubstList, extendSubstWithVar, extendSubstInScope, extendSubstInScopeList, extendSubstInScopeSet, isInScope, setInScope, getSubstInScope, extendTvSubst, extendCvSubst, delBndr, delBndrs, zapSubst, -- ** Substituting and cloning binders substBndr, substBndrs, substRecBndrs, substTyVarBndr, substCoVarBndr, cloneBndr, cloneBndrs, cloneIdBndr, cloneIdBndrs, cloneRecIdBndrs, ) where import GHC.Prelude import GHC.Core import GHC.Core.FVs import GHC.Core.Seq import GHC.Core.Utils -- We are defining local versions import GHC.Core.Type hiding ( substTy ) import GHC.Core.Coercion ( tyCoFVsOfCo, mkCoVarCo, substCoVarBndr ) import GHC.Types.Var.Set import GHC.Types.Var.Env as InScopeSet import GHC.Types.Id import GHC.Types.Name ( Name ) import GHC.Types.Var import GHC.Types.Tickish import GHC.Types.Id.Info import GHC.Types.Unique.Supply import GHC.Builtin.Names import GHC.Data.Maybe import GHC.Utils.Misc import GHC.Utils.Outputable import GHC.Utils.Panic import Data.Functor.Identity (Identity (..)) import Data.List (mapAccumL) {- ************************************************************************ * * \subsection{Substitutions} * * ************************************************************************ -} {- Note [Extending the IdSubstEnv] ~~~~~~~~~~~~~~~~~~~~~~~~~~ We make a different choice for Ids than we do for TyVars. For TyVars, see Note [Extending the TvSubstEnv and CvSubstEnv] in GHC.Core.TyCo.Subst. For Ids, we have a different invariant The IdSubstEnv is extended *only* when the Unique on an Id changes Otherwise, we just extend the InScopeSet In consequence: * If all subst envs are empty, substExpr would be a no-op, so substExprSC ("short cut") does nothing. However, substExpr still goes ahead and substitutes. Reason: we may want to replace existing Ids with new ones from the in-scope set, to avoid space leaks. * In substIdBndr, we extend the IdSubstEnv only when the unique changes * If the CvSubstEnv, TvSubstEnv and IdSubstEnv are all empty, substExpr does nothing (Note that the above rule for substIdBndr maintains this property. If the incoming envts are both empty, then substituting the type and IdInfo can't change anything.) * In lookupIdSubst, we *must* look up the Id in the in-scope set, because it may contain non-trivial changes. Example: (/\a. \x:a. ...x...) Int We extend the TvSubstEnv with [a |-> Int]; but x's unique does not change so we only extend the in-scope set. Then we must look up in the in-scope set when we find the occurrence of x. * The requirement to look up the Id in the in-scope set means that we must NOT take no-op short cut when the IdSubst is empty. We must still look up every Id in the in-scope set. * (However, we don't need to do so for expressions found in the IdSubst itself, whose range is assumed to be correct wrt the in-scope set.) Why do we make a different choice for the IdSubstEnv than the TvSubstEnv and CvSubstEnv? * For Ids, we change the IdInfo all the time (e.g. deleting the unfolding), and adding it back later, so using the TyVar convention would entail extending the substitution almost all the time * The simplifier wants to look up in the in-scope set anyway, in case it can see a better unfolding from an enclosing case expression * For TyVars, only coercion variables can possibly change, and they are easy to spot -} ---------------------------- -- We keep GHC.Core.Subst separate from GHC.Core.TyCo.Subst to avoid creating -- circular dependencies. Functions in this file that don't depend on -- the definition of CoreExpr can be moved to GHC.Core.TyCo.Subst, as long -- as it does not require importing too many additional hs-boot files and -- cause a significant drop in performance. -- | Add a substitution for an 'Id' to the 'Subst': you must ensure that the in-scope set is -- such that TyCoSubst Note [The substitution invariant] -- holds after extending the substitution like this extendIdSubst :: Subst -> Id -> CoreExpr -> Subst -- ToDo: add an ASSERT that fvs(subst-result) is already in the in-scope set extendIdSubst (Subst in_scope ids tvs cvs) v r = assertPpr (isNonCoVarId v) (ppr v $$ ppr r) $ Subst in_scope (extendVarEnv ids v r) tvs cvs extendIdSubstWithClone :: Subst -> Id -> Id -> Subst extendIdSubstWithClone (Subst in_scope ids tvs cvs) v v' = assertPpr (isNonCoVarId v) (ppr v $$ ppr v') $ Subst (extendInScopeSetSet in_scope new_in_scope) (extendVarEnv ids v (varToCoreExpr v')) tvs cvs where new_in_scope = tyCoVarsOfType (varType v') `extendVarSet` v' -- | Adds multiple 'Id' substitutions to the 'Subst': see also 'extendIdSubst' extendIdSubstList :: Subst -> [(Id, CoreExpr)] -> Subst extendIdSubstList (Subst in_scope ids tvs cvs) prs = assert (all (isNonCoVarId . fst) prs) $ Subst in_scope (extendVarEnvList ids prs) tvs cvs -- | Add a substitution appropriate to the thing being substituted -- (whether an expression, type, or coercion). See also -- 'extendIdSubst', 'extendTvSubst', 'extendCvSubst' extendSubst :: Subst -> Var -> CoreArg -> Subst extendSubst subst var arg = case arg of Type ty -> assert (isTyVar var) $ extendTvSubst subst var ty Coercion co -> assert (isCoVar var) $ extendCvSubst subst var co _ -> assert (isId var) $ extendIdSubst subst var arg extendSubstWithVar :: Subst -> Var -> Var -> Subst extendSubstWithVar subst v1 v2 | isTyVar v1 = assert (isTyVar v2) $ extendTvSubst subst v1 (mkTyVarTy v2) | isCoVar v1 = assert (isCoVar v2) $ extendCvSubst subst v1 (mkCoVarCo v2) | otherwise = assert (isId v2) $ extendIdSubst subst v1 (Var v2) -- | Add a substitution as appropriate to each of the terms being -- substituted (whether expressions, types, or coercions). See also -- 'extendSubst'. extendSubstList :: Subst -> [(Var,CoreArg)] -> Subst extendSubstList subst [] = subst extendSubstList subst ((var,rhs):prs) = extendSubstList (extendSubst subst var rhs) prs -- | Find the substitution for an 'Id' in the 'Subst' -- The Id should not be a CoVar lookupIdSubst :: HasDebugCallStack => Subst -> Id -> CoreExpr lookupIdSubst (Subst in_scope ids _ _) v | assertPpr (isId v && not (isCoVar v)) (ppr v) not (isLocalId v) = Var v | Just e <- lookupVarEnv ids v = e | Just v' <- lookupInScope in_scope v = Var v' -- Vital! See Note [Extending the IdSubstEnv] -- If v isn't in the InScopeSet, we panic, because -- it's a bad bug and we really want to know | otherwise = pprPanic "lookupIdSubst" (ppr v $$ ppr in_scope) lookupIdSubst_maybe :: HasDebugCallStack => Subst -> Id -> Maybe CoreExpr -- Just look up in the substitution; do not check the in-scope set lookupIdSubst_maybe (Subst _ ids _ _) v = assertPpr (isId v && not (isCoVar v)) (ppr v) $ lookupVarEnv ids v delBndr :: Subst -> Var -> Subst delBndr (Subst in_scope ids tvs cvs) v | isCoVar v = Subst in_scope ids tvs (delVarEnv cvs v) | isTyVar v = Subst in_scope ids (delVarEnv tvs v) cvs | otherwise = Subst in_scope (delVarEnv ids v) tvs cvs delBndrs :: Subst -> [Var] -> Subst delBndrs (Subst in_scope ids tvs cvs) vs = Subst in_scope (delVarEnvList ids vs) (delVarEnvList tvs vs) (delVarEnvList cvs vs) -- Easiest thing is just delete all from all! -- | Simultaneously substitute for a bunch of variables -- No left-right shadowing -- ie the substitution for (\x \y. e) a1 a2 -- so neither x nor y scope over a1 a2 mkOpenSubst :: InScopeSet -> [(Var,CoreArg)] -> Subst mkOpenSubst in_scope pairs = Subst in_scope (mkVarEnv [(id,e) | (id, e) <- pairs, isId id]) (mkVarEnv [(tv,ty) | (tv, Type ty) <- pairs]) (mkVarEnv [(v,co) | (v, Coercion co) <- pairs]) ------------------------------ {- ************************************************************************ * * Substituting expressions * * ************************************************************************ -} substExprSC :: HasDebugCallStack => Subst -> CoreExpr -> CoreExpr -- Just like substExpr, but a no-op if the substitution is empty -- Note that this does /not/ replace occurrences of free vars with -- their canonical representatives in the in-scope set substExprSC subst orig_expr | isEmptySubst subst = orig_expr | otherwise = -- pprTrace "enter subst-expr" (doc $$ ppr orig_expr) $ substExpr subst orig_expr -- | substExpr applies a substitution to an entire 'CoreExpr'. Remember, -- you may only apply the substitution /once/: -- See Note [Substitutions apply only once] in "GHC.Core.TyCo.Subst" -- -- Do *not* attempt to short-cut in the case of an empty substitution! -- See Note [Extending the IdSubstEnv] substExpr :: HasDebugCallStack => Subst -> CoreExpr -> CoreExpr -- HasDebugCallStack so we can track failures in lookupIdSubst substExpr subst expr = go expr where go (Var v) = lookupIdSubst subst v go (Type ty) = Type (substTyUnchecked subst ty) go (Coercion co) = Coercion (substCo subst co) go (Lit lit) = Lit lit go (App fun arg) = App (go fun) (go arg) go (Tick tickish e) = mkTick (substTickish subst tickish) (go e) go (Cast e co) = Cast (go e) (substCo subst co) -- Do not optimise even identity coercions -- Reason: substitution applies to the LHS of RULES, and -- if you "optimise" an identity coercion, you may -- lose a binder. We optimise the LHS of rules at -- construction time go (Lam bndr body) = Lam bndr' (substExpr subst' body) where (subst', bndr') = substBndr subst bndr go (Let bind body) = Let bind' (substExpr subst' body) where (subst', bind') = substBind subst bind go (Case scrut bndr ty alts) = Case (go scrut) bndr' (substTyUnchecked subst ty) (map (go_alt subst') alts) where (subst', bndr') = substBndr subst bndr go_alt subst (Alt con bndrs rhs) = Alt con bndrs' (substExpr subst' rhs) where (subst', bndrs') = substBndrs subst bndrs -- | Apply a substitution to an entire 'CoreBind', additionally returning an updated 'Subst' -- that should be used by subsequent substitutions. substBind, substBindSC :: HasDebugCallStack => Subst -> CoreBind -> (Subst, CoreBind) substBindSC subst bind -- Short-cut if the substitution is empty | not (isEmptySubst subst) = substBind subst bind | otherwise = case bind of NonRec bndr rhs -> (subst', NonRec bndr' rhs) where (subst', bndr') = substBndr subst bndr Rec pairs -> (subst', Rec (bndrs' `zip` rhss')) where (bndrs, rhss) = unzip pairs (subst', bndrs') = substRecBndrs subst bndrs rhss' | isEmptySubst subst' = rhss | otherwise = map (substExpr subst') rhss substBind subst (NonRec bndr rhs) = (subst', NonRec bndr' (substExpr subst rhs)) where (subst', bndr') = substBndr subst bndr substBind subst (Rec pairs) = (subst', Rec (bndrs' `zip` rhss')) where (bndrs, rhss) = unzip pairs (subst', bndrs') = substRecBndrs subst bndrs rhss' = map (substExpr subst') rhss -- | De-shadowing the program is sometimes a useful pre-pass. It can be done simply -- by running over the bindings with an empty substitution, because substitution -- returns a result that has no-shadowing guaranteed. -- -- (Actually, within a single /type/ there might still be shadowing, because -- 'substTy' is a no-op for the empty substitution, but that's probably OK.) -- -- [Aug 09] This function is not used in GHC at the moment, but seems so -- short and simple that I'm going to leave it here deShadowBinds :: CoreProgram -> CoreProgram deShadowBinds binds = snd (mapAccumL substBind emptySubst binds) {- ************************************************************************ * * Substituting binders * * ************************************************************************ Remember that substBndr and friends are used when doing expression substitution only. Their only business is substitution, so they preserve all IdInfo (suitably substituted). For example, we *want* to preserve occ info in rules. -} -- | Substitutes a 'Var' for another one according to the 'Subst' given, returning -- the result and an updated 'Subst' that should be used by subsequent substitutions. -- 'IdInfo' is preserved by this process, although it is substituted into appropriately. substBndr :: Subst -> Var -> (Subst, Var) substBndr subst bndr | isTyVar bndr = substTyVarBndr subst bndr | isCoVar bndr = substCoVarBndr subst bndr | otherwise = substIdBndr (text "var-bndr") subst subst bndr -- | Applies 'substBndr' to a number of 'Var's, accumulating a new 'Subst' left-to-right substBndrs :: Traversable f => Subst -> f Var -> (Subst, f Var) substBndrs = mapAccumL substBndr {-# INLINE substBndrs #-} -- | Substitute in a mutually recursive group of 'Id's substRecBndrs :: Traversable f => Subst -> f Id -> (Subst, f Id) substRecBndrs subst bndrs = (new_subst, new_bndrs) where -- Here's the reason we need to pass rec_subst to subst_id (new_subst, new_bndrs) = mapAccumL (substIdBndr (text "rec-bndr") new_subst) subst bndrs {-# SPECIALIZE substRecBndrs :: Subst -> [Id] -> (Subst, [Id]) #-} {-# SPECIALIZE substRecBndrs :: Subst -> Identity Id -> (Subst, Identity Id) #-} substIdBndr :: SDoc -> Subst -- ^ Substitution to use for the IdInfo -> Subst -> Id -- ^ Substitution and Id to transform -> (Subst, Id) -- ^ Transformed pair -- NB: unfolding may be zapped substIdBndr _doc rec_subst subst@(Subst in_scope env tvs cvs) old_id = -- pprTrace "substIdBndr" (doc $$ ppr old_id $$ ppr in_scope) $ (Subst new_in_scope new_env tvs cvs, new_id) where id1 = uniqAway in_scope old_id -- id1 is cloned if necessary id2 | no_type_change = id1 | otherwise = updateIdTypeAndMult (substTyUnchecked subst) id1 old_ty = idType old_id old_w = idMult old_id no_type_change = (isEmptyVarEnv tvs && isEmptyVarEnv cvs) || (noFreeVarsOfType old_ty && noFreeVarsOfType old_w) -- new_id has the right IdInfo -- The lazy-set is because we're in a loop here, with -- rec_subst, when dealing with a mutually-recursive group !new_id = maybeModifyIdInfo mb_new_info id2 mb_new_info = substIdInfo rec_subst id2 (idInfo id2) -- NB: unfolding info may be zapped -- Extend the substitution if the unique has changed -- See the notes with substTyVarBndr for the delVarEnv !new_in_scope = in_scope `InScopeSet.extendInScopeSet` new_id -- Forcing new_in_scope improves T9675 by 1.7% !new_env | no_change = delVarEnv env old_id | otherwise = extendVarEnv env old_id (Var new_id) no_change = id1 == old_id -- See Note [Extending the IdSubstEnv] -- it's /not/ necessary to check mb_new_info and no_type_change {- Now a variant that unconditionally allocates a new unique. It also unconditionally zaps the OccInfo. -} -- | Very similar to 'substBndr', but it always allocates a new 'Unique' for -- each variable in its output. It substitutes the IdInfo though. -- Discards non-Stable unfoldings cloneIdBndr :: Subst -> UniqSupply -> Id -> (Subst, Id) cloneIdBndr subst us old_id = clone_id subst subst (old_id, uniqFromSupply us) -- | Applies 'cloneIdBndr' to a number of 'Id's, accumulating a final -- substitution from left to right -- Discards non-Stable unfoldings cloneIdBndrs :: Subst -> UniqSupply -> [Id] -> (Subst, [Id]) cloneIdBndrs subst us ids = mapAccumL (clone_id subst) subst (ids `zip` uniqsFromSupply us) cloneBndrs :: MonadUnique m => Subst -> [Var] -> m (Subst, [Var]) -- Works for all kinds of variables (typically case binders) -- not just Ids cloneBndrs subst vs = do us <- getUniquesM pure $ mapAccumL (\subst (v, u) -> cloneBndr subst u v) subst (vs `zip` us) cloneBndr :: Subst -> Unique -> Var -> (Subst, Var) cloneBndr subst uniq v | isTyVar v = cloneTyVarBndr subst v uniq | otherwise = clone_id subst subst (v,uniq) -- Works for coercion variables too -- | Clone a mutually recursive group of 'Id's cloneRecIdBndrs :: MonadUnique m => Subst -> [Id] -> m (Subst, [Id]) cloneRecIdBndrs subst ids = do us <- getUniquesM let (subst', ids') = mapAccumL (clone_id subst') subst (ids `zip` us) pure (subst', ids') -- Just like substIdBndr, except that it always makes a new unique -- It is given the unique to use -- Discards non-Stable unfoldings clone_id :: Subst -- Substitution for the IdInfo -> Subst -> (Id, Unique) -- Substitution and Id to transform -> (Subst, Id) -- Transformed pair clone_id rec_subst subst@(Subst in_scope idvs tvs cvs) (old_id, uniq) = (Subst new_in_scope new_idvs tvs new_cvs, new_id) where id1 = setVarUnique old_id uniq id2 = substIdType subst id1 !new_id = maybeModifyIdInfo (substIdInfo rec_subst id2 (idInfo old_id)) id2 !new_in_scope = in_scope `InScopeSet.extendInScopeSet` new_id -- Forcing new_in_scope improves T9675 by 1.7% (!new_idvs, !new_cvs) | isCoVar old_id = (idvs, extendVarEnv cvs old_id (mkCoVarCo new_id)) | otherwise = (extendVarEnv idvs old_id (Var new_id), cvs) {- ************************************************************************ * * Types and Coercions * * ************************************************************************ -} {- ************************************************************************ * * \section{IdInfo substitution} * * ************************************************************************ -} substIdType :: Subst -> Id -> Id substIdType subst@(Subst _ _ tv_env cv_env) id | (isEmptyVarEnv tv_env && isEmptyVarEnv cv_env) || (noFreeVarsOfType old_ty && noFreeVarsOfType old_w) = id | otherwise = updateIdTypeAndMult (substTyUnchecked subst) id -- The tyCoVarsOfType is cheaper than it looks -- because we cache the free tyvars of the type -- in a Note in the id's type itself where old_ty = idType id old_w = varMult id ------------------ -- | Substitute into some 'IdInfo' with regard to the supplied new 'Id'. -- Discards unfoldings, unless they are Stable substIdInfo :: Subst -> Id -> IdInfo -> Maybe IdInfo substIdInfo subst new_id info | nothing_to_do = Nothing | otherwise = Just (info `setRuleInfo` substRuleInfo subst new_id old_rules `setUnfoldingInfo` substUnfolding subst old_unf) where old_rules = ruleInfo info old_unf = realUnfoldingInfo info nothing_to_do = isEmptyRuleInfo old_rules && not (hasCoreUnfolding old_unf) ------------------ -- | Substitutes for the 'Id's within an unfolding -- NB: substUnfolding /discards/ any unfolding without -- without a Stable source. This is usually what we want, -- but it may be a bit unexpected substUnfolding, substUnfoldingSC :: Subst -> Unfolding -> Unfolding -- Seq'ing on the returned Unfolding is enough to cause -- all the substitutions to happen completely substUnfoldingSC subst unf -- Short-cut version | isEmptySubst subst = unf | otherwise = substUnfolding subst unf substUnfolding subst df@(DFunUnfolding { df_bndrs = bndrs, df_args = args }) = df { df_bndrs = bndrs', df_args = args' } where (subst',bndrs') = substBndrs subst bndrs args' = map (substExpr subst') args substUnfolding subst unf@(CoreUnfolding { uf_tmpl = tmpl, uf_src = src }) -- Retain stable unfoldings | not (isStableSource src) -- Zap an unstable unfolding, to save substitution work = NoUnfolding | otherwise -- But keep a stable one! = seqExpr new_tmpl `seq` unf { uf_tmpl = new_tmpl } where new_tmpl = substExpr subst tmpl substUnfolding _ unf = unf -- NoUnfolding, OtherCon ------------------ substIdOcc :: Subst -> Id -> Id -- These Ids should not be substituted to non-Ids substIdOcc subst v = case lookupIdSubst subst v of Var v' -> v' other -> pprPanic "substIdOcc" (vcat [ppr v <+> ppr other, ppr subst]) ------------------ -- | Substitutes for the 'Id's within the 'RuleInfo' given the new function 'Id' substRuleInfo :: Subst -> Id -> RuleInfo -> RuleInfo substRuleInfo subst new_id (RuleInfo rules rhs_fvs) = RuleInfo (map (substRule subst subst_ru_fn) rules) (substDVarSet subst rhs_fvs) where subst_ru_fn = const (idName new_id) ------------------ substRulesForImportedIds :: Subst -> [CoreRule] -> [CoreRule] substRulesForImportedIds subst rules = map (substRule subst not_needed) rules where not_needed name = pprPanic "substRulesForImportedIds" (ppr name) ------------------ substRule :: Subst -> (Name -> Name) -> CoreRule -> CoreRule -- The subst_ru_fn argument is applied to substitute the ru_fn field -- of the rule: -- - Rules for *imported* Ids never change ru_fn -- - Rules for *local* Ids are in the IdInfo for that Id, -- and the ru_fn field is simply replaced by the new name -- of the Id substRule _ _ rule@(BuiltinRule {}) = rule substRule subst subst_ru_fn rule@(Rule { ru_bndrs = bndrs, ru_args = args , ru_fn = fn_name, ru_rhs = rhs , ru_local = is_local }) = rule { ru_bndrs = bndrs' , ru_fn = if is_local then subst_ru_fn fn_name else fn_name , ru_args = map (substExpr subst') args , ru_rhs = substExpr subst' rhs } -- Do NOT optimise the RHS (previously we did simplOptExpr here) -- See Note [Substitute lazily] where (subst', bndrs') = substBndrs subst bndrs ------------------ substDVarSet :: HasDebugCallStack => Subst -> DVarSet -> DVarSet substDVarSet subst@(Subst _ _ tv_env cv_env) fvs = mkDVarSet $ fst $ foldr subst_fv ([], emptyVarSet) $ dVarSetElems fvs where subst_fv :: Var -> ([Var], VarSet) -> ([Var], VarSet) subst_fv fv acc | isTyVar fv , let fv_ty = lookupVarEnv tv_env fv `orElse` mkTyVarTy fv = tyCoFVsOfType fv_ty (const True) emptyVarSet $! acc | isCoVar fv , let fv_co = lookupVarEnv cv_env fv `orElse` mkCoVarCo fv = tyCoFVsOfCo fv_co (const True) emptyVarSet $! acc | otherwise , let fv_expr = lookupIdSubst subst fv = exprFVs fv_expr (const True) emptyVarSet $! acc ------------------ -- | Drop free vars from the breakpoint if they have a non-variable substitution. substTickish :: Subst -> CoreTickish -> CoreTickish substTickish subst (Breakpoint ext n ids modl) = Breakpoint ext n (mapMaybe do_one ids) modl where do_one = getIdFromTrivialExpr_maybe . lookupIdSubst subst substTickish _subst other = other {- Note [Substitute lazily] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ The functions that substitute over IdInfo must be pretty lazy, because they are knot-tied by substRecBndrs. One case in point was #10627 in which a rule for a function 'f' referred to 'f' (at a different type) on the RHS. But instead of just substituting in the rhs of the rule, we were calling simpleOptExpr, which looked at the idInfo for 'f'; result <>. In any case we don't need to optimise the RHS of rules, or unfoldings, because the simplifier will do that. Another place this went wrong was in `substRuleInfo`, which would immediately force the lazy call to substExpr, which led to an infinite loop (as reported by #20112). This time the call stack looked something like: * `substRecBndrs` * `substIdBndr` * `substIdInfo` * `substRuleInfo` * `substRule` * `substExpr` * `mkTick` * `isSaturatedConApp` * Look at `IdInfo` for thing we are currently substituting because the rule is attached to `transpose` and mentions it in the `RHS` of the rule. and the rule was {-# RULES "transpose/overlays1" forall xs. transpose (overlays1 xs) = overlays1 (fmap transpose xs) #-} This rule was attached to `transpose`, but also mentions itself in the RHS so we have to be careful to not force the `IdInfo` for transpose when dealing with the RHS of the rule. Note [substTickish] ~~~~~~~~~~~~~~~~~~~~~~ A Breakpoint contains a list of Ids. What happens if we ever want to substitute an expression for one of these Ids? First, we ensure that we only ever substitute trivial expressions for these Ids, by marking them as NoOccInfo in the occurrence analyser. Then, when substituting for the Id, we unwrap any type applications and abstractions to get back to an Id, with getIdFromTrivialExpr. Second, we have to ensure that we never try to substitute a literal for an Id in a breakpoint. We ensure this by never storing an Id with an unlifted type in a Breakpoint - see GHC.HsToCore.Ticks.mkTickish. Breakpoints can't handle free variables with unlifted types anyway. These measures are only reliable with unoptimized code. Since we can now enable optimizations for GHCi with @-fno-unoptimized-core-for-interpreter -O@, nontrivial expressions can be substituted, e.g. by specializations. Therefore we resort to discarding free variables from breakpoints when this situation occurs. -} {- Note [Worker inlining] ~~~~~~~~~~~~~~~~~~~~~~ A worker can get substituted away entirely. - it might be trivial - it might simply be very small We do not treat an InlWrapper as an 'occurrence' in the occurrence analyser, so it's possible that the worker is not even in scope any more. In all these cases we simply drop the special case, returning to InlVanilla. The WARN is just so I can see if it happens a lot. -} ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/Tidy.hs0000644000000000000000000004125207346545000020047 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The AQUA Project, Glasgow University, 1996-1998 This module contains "tidying" code for *nested* expressions, bindings, rules. The code for *top-level* bindings is in GHC.Iface.Tidy. -} module GHC.Core.Tidy ( tidyExpr, tidyRules, tidyCbvInfoTop, tidyBndrs ) where import GHC.Prelude import GHC.Core import GHC.Core.Type import GHC.Core.Seq ( seqUnfolding ) import GHC.Types.Id import GHC.Types.Id.Info import GHC.Types.Demand ( zapDmdEnvSig, isStrUsedDmd ) import GHC.Core.Coercion ( tidyCo ) import GHC.Types.Var import GHC.Types.Var.Env import GHC.Types.Unique (getUnique) import GHC.Types.Unique.FM import GHC.Types.Name hiding (tidyNameOcc) import GHC.Types.Name.Set import GHC.Types.SrcLoc import GHC.Types.Tickish import GHC.Data.Maybe import GHC.Utils.Misc import Data.List (mapAccumL) import GHC.Utils.Outputable import GHC.Types.RepType (typePrimRep) import GHC.Utils.Panic import GHC.Types.Basic (isMarkedCbv, CbvMark (..)) import GHC.Core.Utils (shouldUseCbvForId) {- ************************************************************************ * * \subsection{Tidying expressions, rules} * * ************************************************************************ -} tidyBind :: TidyEnv -> CoreBind -> (TidyEnv, CoreBind) tidyBind env (NonRec bndr rhs) = -- pprTrace "tidyBindNonRec" (ppr bndr) $ let cbv_bndr = (tidyCbvInfoLocal bndr rhs) (env', bndr') = tidyLetBndr env env cbv_bndr tidy_rhs = (tidyExpr env' rhs) in (env', NonRec bndr' tidy_rhs) tidyBind env (Rec prs) = -- pprTrace "tidyBindRec" (ppr $ map fst prs) $ let cbv_bndrs = map ((\(bnd,rhs) -> tidyCbvInfoLocal bnd rhs)) prs (_bndrs, rhss) = unzip prs (env', bndrs') = mapAccumL (tidyLetBndr env') env cbv_bndrs in map (tidyExpr env') rhss =: \ rhss' -> (env', Rec (zip bndrs' rhss')) -- Note [Attaching CBV Marks to ids] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- See Note [CBV Function Ids] for the *why*. -- Before tidy, we turn all worker functions into worker like ids. -- This way we can later tell if we can assume the existence of a wrapper. This also applies to -- specialized versions of functions generated by SpecConstr for which we, in a sense, -- consider the unspecialized version to be the wrapper. -- During tidy we take the demands on the arguments for these ids and compute -- CBV (call-by-value) semantics for each individual argument. -- The marks themselves then are put onto the function id itself. -- This means the code generator can get the full calling convention by only looking at the function -- itself without having to inspect the RHS. -- -- The actual logic is in computeCbvInfo and takes: -- * The function id -- * The functions rhs -- And gives us back the function annotated with the marks. -- We call it in: -- * tidyTopPair for top level bindings -- * tidyBind for local bindings. -- -- Not that we *have* to look at the untidied rhs. -- During tidying some knot-tying occurs which can blow up -- if we look at the post-tidy types of the arguments here. -- However we only care if the types are unlifted and that doesn't change during tidy. -- so we can just look at the untidied types. -- -- If the id is boot-exported we don't use a cbv calling convention via marks, -- as the boot file won't contain them. Which means code calling boot-exported -- ids might expect these ids to have a vanilla calling convention even if we -- determine a different one here. -- To be able to avoid this we pass a set of boot exported ids for this module around. -- For non top level ids we can skip this. Local ids are never boot-exported -- as boot files don't have unfoldings. So there this isn't a concern. -- See also Note [CBV Function Ids] -- See Note [CBV Function Ids] tidyCbvInfoTop :: HasDebugCallStack => NameSet -> Id -> CoreExpr -> Id tidyCbvInfoTop boot_exports id rhs -- Can't change calling convention for boot exported things | elemNameSet (idName id) boot_exports = id | otherwise = computeCbvInfo id rhs -- See Note [CBV Function Ids] tidyCbvInfoLocal :: HasDebugCallStack => Id -> CoreExpr -> Id tidyCbvInfoLocal id rhs = computeCbvInfo id rhs -- | For a binding we: -- * Look at the args -- * Mark any argument as call-by-value if: -- - It's argument to a worker and demanded strictly -- - Unless it's an unlifted type already -- * Update the id -- See Note [CBV Function Ids] -- See Note [Attaching CBV Marks to ids] computeCbvInfo :: HasCallStack => Id -- The function -> CoreExpr -- It's RHS -> Id -- computeCbvInfo fun_id rhs = fun_id computeCbvInfo fun_id rhs | is_wkr_like || isJoinPoint mb_join_id , valid_unlifted_worker val_args = -- pprTrace "computeCbvInfo" -- (text "fun" <+> ppr fun_id $$ -- text "arg_tys" <+> ppr (map idType val_args) $$ -- text "prim_rep" <+> ppr (map typePrimRep_maybe $ map idType val_args) $$ -- text "rrarg" <+> ppr (map isRuntimeVar val_args) $$ -- text "cbv_marks" <+> ppr cbv_marks $$ -- text "out_id" <+> ppr cbv_bndr $$ -- ppr rhs) cbv_bndr | otherwise = fun_id where mb_join_id = idJoinPointHood fun_id is_wkr_like = isWorkerLikeId fun_id val_args = filter isId lam_bndrs -- When computing CbvMarks, we limit the arity of join points to -- the JoinArity, because that's the arity we are going to use -- when calling it. There may be more lambdas than that on the RHS. lam_bndrs | JoinPoint join_arity <- mb_join_id = fst $ collectNBinders join_arity rhs | otherwise = fst $ collectBinders rhs cbv_marks = -- assert: CBV marks are only set during tidy so none should be present already. assertPpr (maybe True null $ idCbvMarks_maybe fun_id) (ppr fun_id <+> (ppr $ idCbvMarks_maybe fun_id) $$ ppr rhs) $ map mkMark val_args cbv_bndr | any isMarkedCbv cbv_marks = cbv_marks `seqList` setIdCbvMarks fun_id cbv_marks -- seqList: avoid retaining the original rhs | otherwise = -- pprTraceDebug "computeCbvInfo: Worker seems to take unboxed tuple/sum types!" -- (ppr fun_id <+> ppr rhs) asNonWorkerLikeId fun_id -- We don't set CBV marks on functions which take unboxed tuples or sums as -- arguments. Doing so would require us to compute the result of unarise -- here in order to properly determine argument positions at runtime. -- -- In practice this doesn't matter much. Most "interesting" functions will -- get a W/W split which will eliminate unboxed tuple arguments, and unboxed -- sums are rarely used. But we could change this in the future and support -- unboxed sums/tuples as well. valid_unlifted_worker args = -- pprTrace "valid_unlifted" (ppr fun_id $$ ppr args) $ all isSingleUnarisedArg args isSingleUnarisedArg v | isUnboxedSumType ty = False | isUnboxedTupleType ty = isSimplePrimRep (typePrimRep ty) | otherwise = isSimplePrimRep (typePrimRep ty) where ty = idType v isSimplePrimRep [] = True isSimplePrimRep [_] = True isSimplePrimRep _ = False mkMark arg | not $ shouldUseCbvForId arg = NotMarkedCbv -- We can only safely use cbv for strict arguments | (isStrUsedDmd (idDemandInfo arg)) , not (isDeadEndId fun_id) = MarkedCbv | otherwise = NotMarkedCbv ------------ Expressions -------------- tidyExpr :: TidyEnv -> CoreExpr -> CoreExpr tidyExpr env (Var v) = Var (tidyVarOcc env v) tidyExpr env (Type ty) = Type (tidyType env ty) tidyExpr env (Coercion co) = Coercion (tidyCo env co) tidyExpr _ (Lit lit) = Lit lit tidyExpr env (App f a) = App (tidyExpr env f) (tidyExpr env a) tidyExpr env (Tick t e) = Tick (tidyTickish env t) (tidyExpr env e) tidyExpr env (Cast e co) = Cast (tidyExpr env e) (tidyCo env co) tidyExpr env (Let b e) = tidyBind env b =: \ (env', b') -> Let b' (tidyExpr env' e) tidyExpr env (Case e b ty alts) = tidyBndr env b =: \ (env', b) -> Case (tidyExpr env e) b (tidyType env ty) (map (tidyAlt env') alts) tidyExpr env (Lam b e) = tidyBndr env b =: \ (env', b) -> Lam b (tidyExpr env' e) ------------ Case alternatives -------------- tidyAlt :: TidyEnv -> CoreAlt -> CoreAlt tidyAlt env (Alt con vs rhs) = tidyBndrs env vs =: \ (env', vs) -> (Alt con vs (tidyExpr env' rhs)) ------------ Tickish -------------- tidyTickish :: TidyEnv -> CoreTickish -> CoreTickish tidyTickish env (Breakpoint ext ix ids modl) = Breakpoint ext ix (map (tidyVarOcc env) ids) modl tidyTickish _ other_tickish = other_tickish ------------ Rules -------------- tidyRules :: TidyEnv -> [CoreRule] -> [CoreRule] tidyRules _ [] = [] tidyRules env (rule : rules) = tidyRule env rule =: \ rule -> tidyRules env rules =: \ rules -> (rule : rules) tidyRule :: TidyEnv -> CoreRule -> CoreRule tidyRule _ rule@(BuiltinRule {}) = rule tidyRule env rule@(Rule { ru_bndrs = bndrs, ru_args = args, ru_rhs = rhs, ru_fn = fn, ru_rough = mb_ns }) = tidyBndrs env bndrs =: \ (env', bndrs) -> map (tidyExpr env') args =: \ args -> rule { ru_bndrs = bndrs, ru_args = args, ru_rhs = tidyExpr env' rhs, ru_fn = tidyNameOcc env fn, ru_rough = map (fmap (tidyNameOcc env')) mb_ns } {- ************************************************************************ * * \subsection{Tidying non-top-level binders} * * ************************************************************************ -} tidyNameOcc :: TidyEnv -> Name -> Name -- In rules and instances, we have Names, and we must tidy them too -- Fortunately, we can lookup in the VarEnv with a name tidyNameOcc (_, var_env) n = case lookupUFM_Directly var_env (getUnique n) of Nothing -> n Just v -> idName v tidyVarOcc :: TidyEnv -> Var -> Var tidyVarOcc (_, var_env) v = lookupVarEnv var_env v `orElse` v -- tidyBndr is used for lambda and case binders tidyBndr :: TidyEnv -> Var -> (TidyEnv, Var) tidyBndr env var | isTyCoVar var = tidyVarBndr env var | otherwise = tidyIdBndr env var tidyBndrs :: TidyEnv -> [Var] -> (TidyEnv, [Var]) tidyBndrs env vars = mapAccumL tidyBndr env vars -- Non-top-level variables, not covars tidyIdBndr :: TidyEnv -> Id -> (TidyEnv, Id) tidyIdBndr env@(tidy_env, var_env) id = -- Do this pattern match strictly, otherwise we end up holding on to -- stuff in the OccName. case tidyOccName tidy_env (getOccName id) of { (tidy_env', occ') -> let -- Give the Id a fresh print-name, *and* rename its type -- The SrcLoc isn't important now, -- though we could extract it from the Id -- ty' = tidyType env (idType id) mult' = tidyType env (idMult id) name' = mkInternalName (idUnique id) occ' noSrcSpan id' = mkLocalIdWithInfo name' mult' ty' new_info var_env' = extendVarEnv var_env id id' -- Note [Tidy IdInfo] new_info = vanillaIdInfo `setOccInfo` occInfo old_info `setUnfoldingInfo` new_unf -- see Note [Preserve OneShotInfo] `setOneShotInfo` oneShotInfo old_info old_info = idInfo id old_unf = realUnfoldingInfo old_info new_unf = trimUnfolding old_unf -- See Note [Preserve evaluatedness] in ((tidy_env', var_env'), id') } tidyLetBndr :: TidyEnv -- Knot-tied version for unfoldings -> TidyEnv -- The one to extend -> Id -> (TidyEnv, Id) -- Used for local (non-top-level) let(rec)s -- Just like tidyIdBndr above, but with more IdInfo tidyLetBndr rec_tidy_env env@(tidy_env, var_env) id = case tidyOccName tidy_env (getOccName id) of { (tidy_env', occ') -> let ty' = tidyType env (idType id) mult' = tidyType env (idMult id) name' = mkInternalName (idUnique id) occ' noSrcSpan details = idDetails id id' = mkLocalVar details name' mult' ty' new_info var_env' = extendVarEnv var_env id id' -- Note [Tidy IdInfo] -- We need to keep around any interesting strictness and -- demand info because later on we may need to use it when -- converting to A-normal form. -- eg. -- f (g x), where f is strict in its argument, will be converted -- into case (g x) of z -> f z by CorePrep, but only if f still -- has its strictness info. -- -- Similarly for the demand info - on a let binder, this tells -- CorePrep to turn the let into a case. -- But: Remove the usage demand here -- (See Note [Zapping DmdEnv after Demand Analyzer] in GHC.Core.Opt.WorkWrap) -- -- Similarly arity info for eta expansion in CorePrep -- Don't attempt to recompute arity here; this is just tidying! -- Trying to do so led to #17294 -- -- Set inline-prag info so that we preserve it across -- separate compilation boundaries old_info = idInfo id new_info = vanillaIdInfo `setOccInfo` occInfo old_info `setArityInfo` arityInfo old_info `setDmdSigInfo` zapDmdEnvSig (dmdSigInfo old_info) `setDemandInfo` demandInfo old_info `setInlinePragInfo` inlinePragInfo old_info `setUnfoldingInfo` new_unf old_unf = realUnfoldingInfo old_info new_unf = tidyNestedUnfolding rec_tidy_env old_unf in ((tidy_env', var_env'), id') } ------------ Unfolding -------------- tidyNestedUnfolding :: TidyEnv -> Unfolding -> Unfolding tidyNestedUnfolding _ NoUnfolding = NoUnfolding tidyNestedUnfolding _ BootUnfolding = BootUnfolding tidyNestedUnfolding _ (OtherCon {}) = evaldUnfolding tidyNestedUnfolding tidy_env df@(DFunUnfolding { df_bndrs = bndrs, df_args = args }) = df { df_bndrs = bndrs', df_args = map (tidyExpr tidy_env') args } where (tidy_env', bndrs') = tidyBndrs tidy_env bndrs tidyNestedUnfolding tidy_env unf@(CoreUnfolding { uf_tmpl = unf_rhs, uf_src = src, uf_cache = cache }) | isStableSource src = seqIt $ unf { uf_tmpl = tidyExpr tidy_env unf_rhs } -- Preserves OccInfo -- This seqIt avoids a space leak: otherwise the uf_cache -- field may retain a reference to the pre-tidied -- expression forever (GHC.CoreToIface doesn't look at -- them) -- Discard unstable unfoldings, but see Note [Preserve evaluatedness] | uf_is_value cache = evaldUnfolding | otherwise = noUnfolding where seqIt unf = seqUnfolding unf `seq` unf {- Note [Tidy IdInfo] ~~~~~~~~~~~~~~~~~~ All nested Ids now have the same IdInfo, namely vanillaIdInfo, which should save some space; except that we preserve occurrence info for two reasons: (a) To make printing tidy core nicer (b) Because we tidy RULES and unfoldings, which may then propagate via --make into the compilation of the next module, and we want the benefit of that occurrence analysis when we use the rule or or inline the function. In particular, it's vital not to lose loop-breaker info, else we get an infinite inlining loop Note that tidyLetBndr puts more IdInfo back. Note [Preserve evaluatedness] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider data T = MkT !Bool ....(case v of MkT y -> let z# = case y of True -> 1# False -> 2# in ...) The z# binding is ok because the RHS is ok-for-speculation, but Lint will complain unless it can *see* that. So we preserve the evaluated-ness on 'y' in tidyBndr. (Another alternative would be to tidy unboxed lets into cases, but that seems more indirect and surprising.) Note [Preserve OneShotInfo] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ We keep the OneShotInfo because we want it to propagate into the interface. Not all OneShotInfo is determined by a compiler analysis; some is added by a call of GHC.Exts.oneShot, which is then discarded before the end of the optimisation pipeline, leaving only the OneShotInfo on the lambda. Hence we must preserve this info in inlinings. See Note [oneShot magic] in GHC.Types.Id.Make. This applies to lambda binders only, hence it is stored in IfaceLamBndr. -} (=:) :: a -> (a -> b) -> b m =: k = m `seq` k m ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/TyCo/0000755000000000000000000000000007346545000017454 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/TyCo/Compare.hs0000644000000000000000000010520407346545000021400 0ustar0000000000000000-- (c) The University of Glasgow 2006 -- (c) The GRASP/AQUA Project, Glasgow University, 1998 {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE MagicHash #-} -- | Type equality and comparison module GHC.Core.TyCo.Compare ( -- * Type equality eqType, eqTypeIgnoringMultiplicity, eqTypeX, eqTypes, eqVarBndrs, pickyEqType, tcEqType, tcEqKind, tcEqTypeNoKindCheck, tcEqTyConApps, mayLookIdentical, -- * Type comparison nonDetCmpType, -- * Visiblity comparision eqForAllVis, cmpForAllVis ) where import GHC.Prelude import GHC.Core.Type( typeKind, coreView, tcSplitAppTyNoView_maybe, splitAppTyNoView_maybe , isLevityTy, isRuntimeRepTy, isMultiplicityTy ) import GHC.Core.TyCo.Rep import GHC.Core.TyCo.FVs import GHC.Core.TyCon import GHC.Core.Multiplicity( MultiplicityFlag(..) ) import GHC.Types.Var import GHC.Types.Unique import GHC.Types.Var.Env import GHC.Types.Var.Set import GHC.Utils.Outputable import GHC.Utils.Misc import GHC.Utils.Panic import GHC.Base (reallyUnsafePtrEquality#) import qualified Data.Semigroup as S {- GHC.Core.TyCo.Compare overview ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This module implements type equality and comparison It uses a few functions from GHC.Core.Type, notably `typeKind`, so it currently sits "on top of" GHC.Core.Type. -} {- ********************************************************************* * * Type equality We don't use (==) from class Eq, partly so that we know where type equality is called, and partly because there are multiple variants. * * ********************************************************************* -} {- Note [Computing equality on types] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This module implements type equality, notably `eqType`. This is "definitional equality" or just "equality" for short. There are several places within GHC that depend on the precise choice of definitional equality used. If we change that definition, all these places must be updated. This Note merely serves as a place for all these places to refer to, so searching for references to this Note will find every place that needs to be updated. * See Note [Non-trivial definitional equality] in GHC.Core.TyCo.Rep. * See Historical Note [Typechecker equality vs definitional equality] below Note [Casts and coercions in type comparision] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ As (EQTYPE) in Note [Non-trivial definitional equality] says, our general plan, implemented by `fullEq`, is: (1) ignore both casts and coercions when comparing types, (2) instead, compare the /kinds/ of the two types, as well as the types themselves If possible we want to avoid step (2), comparing the kinds; doing so involves calling `typeKind` and doing another comparision. When can we avoid doing so? Answer: we can certainly avoid doing so if the types we are comparing have no casts or coercions. But we can do better. Consider eqType (TyConApp T [s1, ..., sn]) (TyConApp T [t1, .., tn]) We are going to call (eqType s1 t1), (eqType s2 t2) etc. The kinds of `s1` and `t1` must be equal, because these TyConApps are well-kinded, and both TyConApps are headed by the same T. So the first recursive call to `eqType` certainly doesn't need to check kinds. If that call returns False, we stop. Otherwise, we know that `s1` and `t1` are themselves equal (not just their kinds). This makes the kinds of `s2` and `t2` to be equal, because those kinds come from the kind of T instantiated with `s1` and `t1` -- which are the same. Thus we do not need to check the kinds of `s2` and `t2`. By induction, we don't need to check the kinds of *any* of the types in a TyConApp, and we also do not need to check the kinds of the TyConApps themselves. Conclusion: * casts and coercions under a TyConApp don't matter -- even including type synonyms * In step (2), use `hasCasts` to tell if there are any casts to worry about. It does not look very deep, because TyConApps and FunTys are so common, and it doesn't allocate. The only recursive cases are AppTy and ForAllTy. Alternative implementation. Instead of `hasCasts`, we could make the generic_eq_type function return data EqResult = NotEq | EqWithNoCasts | EqWithCasts Practically free; but stylistically I prefer useing `hasCasts`: * `generic_eq_type` can just uses familiar booleans * There is a lot more branching with the three-value variant. * It separates concerns. No need to think about cast-tracking when doing the equality comparison. * Indeed sometimes we omit the kind check unconditionally, so tracking it is just wasted work. I did try both; there was no perceptible perf difference so I chose `hasCasts` version. Note [Equality on AppTys] ~~~~~~~~~~~~~~~~~~~~~~~~~ In our cast-ignoring equality, we want to say that the following two are equal: (Maybe |> co) (Int |> co') ~? Maybe Int But the left is an AppTy while the right is a TyConApp. The solution is to use splitAppTyNoView_maybe to break up the TyConApp into its pieces and then continue. Easy to do, but also easy to forget to do. Note [Comparing type synonyms] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider the task of testing equality between two 'Type's of the form TyConApp tc tys1 = TyConApp tc tys2 where `tc` is a type synonym. A naive way to perform this comparison these would first expand the synonym and then compare the resulting expansions. However, this is obviously wasteful and the RHS of `tc` may be large. We'd prefer to compare `tys1 = tys2`. When is that sound? Precisely when the synonym is not /forgetful/; that is, all its type variables appear in its RHS -- see `GHC.Core.TyCon.isForgetfulSynTyCon`. Of course, if we find that the TyCons are *not* equal then we still need to perform the expansion as their RHSs may still be equal. This works fine for /equality/, but not for /comparison/. Consider type S a b = (b, a) Now consider S Int Bool `compare` S Char Char The ordering may depend on whether we expand the synonym or not, and we don't want the result to depend on that. So for comparison we stick to /nullary/ synonyms only, which is still useful. We perform this optimisation in a number of places: * GHC.Core.TyCo.Compare.eqType (works for non-nullary synonyms) * GHC.Core.Map.TYpe.eqDeBruijnType (works for non-nullary synonyms) * GHC.Core.Types.nonDetCmpType (nullary only) This optimisation is especially helpful for the ubiquitous GHC.Types.Type, since GHC prefers to use the type synonym over @TYPE 'LiftedRep@ applications whenever possible. See Note [Using synonyms to compress types] in GHC.Core.Type for details. Currently-missed opportunity (#25009): * In the case of forgetful synonyms, we could still compare the args, pairwise, and then compare the RHS's with a suitably extended RnEnv2. That would avoid comparing the same arg repeatedly. e.g. type S a b = (a,a) Compare S y ~ S y If we expand, we end up compare with itself twice. But since forgetful synonyms are rare, we have not tried this. Note [Type comparisons using object pointer comparisons] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Quite often we substitute the type from a definition site into occurrences without a change. This means for code like: \x -> (x,x,x) The type of every `x` will often be represented by a single object in the heap. We can take advantage of this by shortcutting the equality check if two types are represented by the same pointer under the hood. In some cases this reduces compiler allocations by ~2%. See Note [Pointer comparison operations] in GHC.Builtin.primops.txt.pp Note [Respecting multiplicity when comparing types] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Generally speaking, we respect multiplicities (i.e. the linear part of the type system) when comparing types. Doing so is of course crucial during typechecking. But for reasons described in Note [Linting linearity] in GHC.Core.Lint, it is hard to ensure that Core is always type-correct when it comes to linearity. So * `eqTypeIgnoringMultiplicity` provides a way to compare types that /ignores/ multiplicities * We use this multiplicity-blind comparison very occasionally, notably - in Core Lint: see Note [Linting linearity] in GHC.Core.Lint - in rule matching: see Note [Rewrite rules ignore multiplicities in FunTy] in GHC.Core.Unify -} tcEqKind :: HasDebugCallStack => Kind -> Kind -> Bool tcEqKind = tcEqType tcEqType :: HasDebugCallStack => Type -> Type -> Bool tcEqType = eqType -- | Just like 'tcEqType', but will return True for types of different kinds -- as long as their non-coercion structure is identical. tcEqTypeNoKindCheck :: Type -> Type -> Bool tcEqTypeNoKindCheck = eqTypeNoKindCheck -- | Check whether two TyConApps are the same; if the number of arguments -- are different, just checks the common prefix of arguments. tcEqTyConApps :: TyCon -> [Type] -> TyCon -> [Type] -> Bool tcEqTyConApps tc1 args1 tc2 args2 = tc1 == tc2 && and (zipWith tcEqTypeNoKindCheck args1 args2) -- No kind check necessary: if both arguments are well typed, then -- any difference in the kinds of later arguments would show up -- as differences in earlier (dependent) arguments -- | Type equality on lists of types, looking through type synonyms eqTypes :: [Type] -> [Type] -> Bool eqTypes [] [] = True eqTypes (t1:ts1) (t2:ts2) = eqType t1 t2 && eqTypes ts1 ts2 eqTypes _ _ = False eqVarBndrs :: HasCallStack => RnEnv2 -> [Var] -> [Var] -> Maybe RnEnv2 -- Check that the var lists are the same length -- and have matching kinds; if so, extend the RnEnv2 -- Returns Nothing if they don't match eqVarBndrs env [] [] = Just env eqVarBndrs env (tv1:tvs1) (tv2:tvs2) | eqTypeX env (varType tv1) (varType tv2) = eqVarBndrs (rnBndr2 env tv1 tv2) tvs1 tvs2 eqVarBndrs _ _ _= Nothing initRnEnv :: Type -> Type -> RnEnv2 initRnEnv ta tb = mkRnEnv2 $ mkInScopeSet $ tyCoVarsOfType ta `unionVarSet` tyCoVarsOfType tb eqTypeNoKindCheck :: Type -> Type -> Bool eqTypeNoKindCheck ty1 ty2 = eq_type_expand_respect ty1 ty2 -- | Type equality comparing both visible and invisible arguments, -- expanding synonyms and respecting multiplicities. eqType :: HasCallStack => Type -> Type -> Bool eqType ta tb = fullEq eq_type_expand_respect ta tb -- | Compare types with respect to a (presumably) non-empty 'RnEnv2'. eqTypeX :: HasCallStack => RnEnv2 -> Type -> Type -> Bool eqTypeX env ta tb = fullEq (eq_type_expand_respect_x env) ta tb eqTypeIgnoringMultiplicity :: Type -> Type -> Bool -- See Note [Respecting multiplicity when comparing types] eqTypeIgnoringMultiplicity ta tb = fullEq eq_type_expand_ignore ta tb -- | Like 'pickyEqTypeVis', but returns a Bool for convenience pickyEqType :: Type -> Type -> Bool -- Check when two types _look_ the same, _including_ synonyms. -- So (pickyEqType String [Char]) returns False -- This ignores kinds and coercions, because this is used only for printing. pickyEqType ta tb = eq_type_keep_respect ta tb {- Note [Specialising type equality] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The type equality predicates in Type are hit pretty hard by GHC. Consequently we take pains to ensure that these paths are compiled to efficient, minimally-allocating code. Plan: * The main workhorse is `inline_generic_eq_type_x`. It is /non-recursive/ and is marked INLINE. * `inline_generic_eq_type_x` has various parameters that control what it does: * syn_flag::SynFlag whether type synonyms are expanded or kept. * mult_flag::MultiplicityFlag whether multiplicities are ignored or respected * mb_env::Maybe RnEnv2 an optional RnEnv2. * `inline_generic_eq_type_x` has a handful of call sites, namely the ones in `eq_type_expand_respect`, `eq_type_expand_repect_x` etc. It inlines at all these sites, specialising to the data values passed for the control parameters. * All /other/ calls to `inline_generic_eq_type_x` go via generic_eq_type_x = inline_generic_eq_type_x {-# NOINLNE generic_eq_type_x #-} The idea is that all calls to `generic_eq_type_x` are specialised by the RULES, so this NOINLINE version is seldom, if ever, actually called. * For each of specialised copy of `inline_generic_eq_type_x, there is a corresponding rewrite RULE that rewrites a call to (generic_eq_type_x args) into the appropriate specialied version. See #19226. -} -- | This flag controls whether we expand synonyms during comparison data SynFlag = ExpandSynonyms | KeepSynonyms eq_type_expand_respect, eq_type_expand_ignore, eq_type_keep_respect :: Type -> Type -> Bool eq_type_expand_respect_x, eq_type_expand_ignore_x, eq_type_keep_respect_x :: RnEnv2 -> Type -> Type -> Bool eq_type_expand_respect = inline_generic_eq_type_x ExpandSynonyms RespectMultiplicities Nothing eq_type_expand_respect_x env = inline_generic_eq_type_x ExpandSynonyms RespectMultiplicities (Just env) eq_type_expand_ignore = inline_generic_eq_type_x ExpandSynonyms IgnoreMultiplicities Nothing eq_type_expand_ignore_x env = inline_generic_eq_type_x ExpandSynonyms IgnoreMultiplicities (Just env) eq_type_keep_respect = inline_generic_eq_type_x KeepSynonyms RespectMultiplicities Nothing eq_type_keep_respect_x env = inline_generic_eq_type_x KeepSynonyms RespectMultiplicities (Just env) {-# RULES "eqType1" generic_eq_type_x ExpandSynonyms RespectMultiplicities Nothing = eq_type_expand_respect "eqType2" forall env. generic_eq_type_x ExpandSynonyms RespectMultiplicities (Just env) = eq_type_expand_respect_x env "eqType3" generic_eq_type_x ExpandSynonyms IgnoreMultiplicities Nothing = eq_type_expand_ignore "eqType4" forall env. generic_eq_type_x ExpandSynonyms IgnoreMultiplicities (Just env) = eq_type_expand_ignore_x env "eqType5" generic_eq_type_x KeepSynonyms RespectMultiplicities Nothing = eq_type_keep_respect "eqType6" forall env. generic_eq_type_x KeepSynonyms RespectMultiplicities (Just env) = eq_type_keep_respect_x env #-} -- --------------------------------------------------------------- -- | Real worker for 'eqType'. No kind check! -- Inline it at the (handful of local) call sites -- The "generic" bit refers to the flag paramerisation -- See Note [Specialising type equality]. generic_eq_type_x, inline_generic_eq_type_x :: SynFlag -> MultiplicityFlag -> Maybe RnEnv2 -> Type -> Type -> Bool {-# NOINLINE generic_eq_type_x #-} generic_eq_type_x = inline_generic_eq_type_x -- See Note [Computing equality on types] in Type {-# INLINE inline_generic_eq_type_x #-} -- This non-recursive function can inline at its (few) call sites. The -- recursion goes via generic_eq_type_x, which is the loop-breaker. inline_generic_eq_type_x syn_flag mult_flag mb_env = \ t1 t2 -> t1 `seq` t2 `seq` let go = generic_eq_type_x syn_flag mult_flag mb_env -- Abbreviation for recursive calls gos [] [] = True gos (t1:ts1) (t2:ts2) = go t1 t2 && gos ts1 ts2 gos _ _ = False in case (t1,t2) of _ | 1# <- reallyUnsafePtrEquality# t1 t2 -> True -- See Note [Type comparisons using object pointer comparisons] (TyConApp tc1 tys1, TyConApp tc2 tys2) | tc1 == tc2, not (isForgetfulSynTyCon tc1) -- See Note [Comparing type synonyms] -> gos tys1 tys2 _ | ExpandSynonyms <- syn_flag, Just t1' <- coreView t1 -> go t1' t2 | ExpandSynonyms <- syn_flag, Just t2' <- coreView t2 -> go t1 t2' (TyConApp tc1 ts1, TyConApp tc2 ts2) | tc1 == tc2 -> gos ts1 ts2 | otherwise -> False (TyVarTy tv1, TyVarTy tv2) -> case mb_env of Nothing -> tv1 == tv2 Just env -> rnOccL env tv1 == rnOccR env tv2 (LitTy lit1, LitTy lit2) -> lit1 == lit2 (CastTy t1' _, _) -> go t1' t2 -- Ignore casts (_, CastTy t2' _) -> go t1 t2' -- Ignore casts (CoercionTy {}, CoercionTy {}) -> True -- Ignore coercions -- Make sure we handle all FunTy cases since falling through to the -- AppTy case means that tcSplitAppTyNoView_maybe may see an unzonked -- kind variable, which causes things to blow up. -- See Note [Equality on FunTys] in GHC.Core.TyCo.Rep: we must check -- kinds here (FunTy _ w1 arg1 res1, FunTy _ w2 arg2 res2) -> fullEq go arg1 arg2 && fullEq go res1 res2 && (case mult_flag of RespectMultiplicities -> go w1 w2 IgnoreMultiplicities -> True) -- See Note [Equality on AppTys] in GHC.Core.Type (AppTy s1 t1', _) | Just (s2, t2') <- tcSplitAppTyNoView_maybe t2 -> go s1 s2 && go t1' t2' (_, AppTy s2 t2') | Just (s1, t1') <- tcSplitAppTyNoView_maybe t1 -> go s1 s2 && go t1' t2' (ForAllTy (Bndr tv1 vis1) body1, ForAllTy (Bndr tv2 vis2) body2) -> case mb_env of Nothing -> generic_eq_type_x syn_flag mult_flag (Just (initRnEnv t1 t2)) t1 t2 Just env | vis1 `eqForAllVis` vis2 -- See Note [ForAllTy and type equality] -> go (varType tv1) (varType tv2) -- Always do kind-check && generic_eq_type_x syn_flag mult_flag (Just (rnBndr2 env tv1 tv2)) body1 body2 | otherwise -> False _ -> False fullEq :: (Type -> Type -> Bool) -> Type -> Type -> Bool -- Do "full equality" including the kind check -- See Note [Casts and coercions in type comparision] {-# INLINE fullEq #-} fullEq eq ty1 ty2 = case eq ty1 ty2 of False -> False True | hasCasts ty1 || hasCasts ty2 -> eq (typeKind ty1) (typeKind ty2) | otherwise -> True hasCasts :: Type -> Bool -- Fast, does not look deep, does not allocate hasCasts (CastTy {}) = True hasCasts (CoercionTy {}) = True hasCasts (AppTy t1 t2) = hasCasts t1 || hasCasts t2 hasCasts (ForAllTy _ ty) = hasCasts ty hasCasts _ = False -- TyVarTy, TyConApp, FunTy, LitTy {- ********************************************************************* * * Comparing ForAllTyFlags * * ********************************************************************* -} -- | Do these denote the same level of visibility? 'Required' -- arguments are visible, others are not. So this function -- equates 'Specified' and 'Inferred'. Used for printing. eqForAllVis :: ForAllTyFlag -> ForAllTyFlag -> Bool -- See Note [ForAllTy and type equality] eqForAllVis Required Required = True eqForAllVis (Invisible _) (Invisible _) = True eqForAllVis _ _ = False -- | Do these denote the same level of visibility? 'Required' -- arguments are visible, others are not. So this function -- equates 'Specified' and 'Inferred'. Used for printing. cmpForAllVis :: ForAllTyFlag -> ForAllTyFlag -> Ordering -- See Note [ForAllTy and type equality] cmpForAllVis Required Required = EQ cmpForAllVis Required (Invisible {}) = LT cmpForAllVis (Invisible _) Required = GT cmpForAllVis (Invisible _) (Invisible _) = EQ {- Note [ForAllTy and type equality] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When we compare (ForAllTy (Bndr tv1 vis1) ty1) and (ForAllTy (Bndr tv2 vis2) ty2) what should we do about `vis1` vs `vis2`? We had a long debate about this: see #22762 and GHC Proposal 558. Here is the conclusion. * In Haskell, we really do want (forall a. ty) and (forall a -> ty) to be distinct types, not interchangeable. The latter requires a type argument, but the former does not. See GHC Proposal 558. * We /really/ do not want the typechecker and Core to have different notions of equality. That is, we don't want `tcEqType` and `eqType` to differ. Why not? Not so much because of code duplication but because it is virtually impossible to cleave the two apart. Here is one particularly awkward code path: The type checker calls `substTy`, which calls `mkAppTy`, which calls `mkCastTy`, which calls `isReflexiveCo`, which calls `eqType`. * Moreover the resolution of the TYPE vs CONSTRAINT story was to make the typechecker and Core have a single notion of equality. * So in GHC: - `tcEqType` and `eqType` implement the same equality - (forall a. ty) and (forall a -> ty) are distinct types in both Core and typechecker - That is, both `eqType` and `tcEqType` distinguish them. * But /at representational role/ we can relate the types. That is, (forall a. ty) ~R (forall a -> ty) After all, since types are erased, they are represented the same way. See Note [ForAllCo] and the typing rule for ForAllCo given there * What about (forall a. ty) and (forall {a}. ty)? See Note [Comparing visibility]. Note [Comparing visibility] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ We are sure that we want to distinguish (forall a. ty) and (forall a -> ty); see Note [ForAllTy and type equality]. But we have /three/ settings for the ForAllTyFlag: * Specified: forall a. ty * Inferred: forall {a}. ty * Required: forall a -> ty We could (and perhaps should) distinguish all three. But for now we distinguish Required from Specified/Inferred, and ignore the distinction between Specified and Inferred. The answer doesn't matter too much, provided we are consistent. And we are consistent because we always compare ForAllTyFlags with * `eqForAllVis` * `cmpForAllVis`. (You can only really check this by inspecting all pattern matches on ForAllTyFlags.) So if we change the decision, we just need to change those functions. Why don't we distinguish all three? Should GHC type-check the following program (adapted from #15740)? {-# LANGUAGE PolyKinds, ... #-} data D a type family F :: forall k. k -> Type type instance F = D Due to the way F is declared, any instance of F must have a right-hand side whose kind is equal to `forall k. k -> Type`. The kind of D is `forall {k}. k -> Type`, which is very close, but technically uses distinct Core: ----------------------------------------------------------- | Source Haskell | Core | ----------------------------------------------------------- | forall k. <...> | ForAllTy (Bndr k Specified) (<...>) | | forall {k}. <...> | ForAllTy (Bndr k Inferred) (<...>) | ----------------------------------------------------------- We could deem these kinds to be unequal, but that would imply rejecting programs like the one above. Whether a kind variable binder ends up being specified or inferred can be somewhat subtle, however, especially for kinds that aren't explicitly written out in the source code (like in D above). For now, we decide the specified/inferred status of an invisible type variable binder does not affect GHC's notion of equality. That is, we have the following: -------------------------------------------------- | Type 1 | Type 2 | Equal? | --------------------|----------------------------- | forall k. <...> | forall k. <...> | Yes | | | forall {k}. <...> | Yes | | | forall k -> <...> | No | -------------------------------------------------- | forall {k}. <...> | forall k. <...> | Yes | | | forall {k}. <...> | Yes | | | forall k -> <...> | No | -------------------------------------------------- | forall k -> <...> | forall k. <...> | No | | | forall {k}. <...> | No | | | forall k -> <...> | Yes | -------------------------------------------------- Examples: T16946, T15079. Historical Note [Typechecker equality vs definitional equality] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This Note describes some history, in case there are vestiges of this history lying around in the code. Summary: prior to summer 2022, GHC had have two notions of equality over Core types. But now there is only one: definitional equality, or just equality for short. The old setup was: * Definitional equality, as implemented by GHC.Core.Type.eqType. See Note [Non-trivial definitional equality] in GHC.Core.TyCo.Rep. * Typechecker equality, as implemented by tcEqType. GHC.Tc.Solver.Equality.canonicaliseEquality also respects typechecker equality. Typechecker equality implied definitional equality: if two types are equal according to typechecker equality, then they are also equal according to definitional equality. The converse is not always true, as typechecker equality is more finer-grained than definitional equality in two places: * Constraint vs Type. Definitional equality equated Type and Constraint, but typechecker treats them as distinct types. * Unlike definitional equality, which does not care about the ForAllTyFlag of a ForAllTy, typechecker equality treats Required type variable binders as distinct from Invisible type variable binders. See Note [ForAllTy and type equality] ************************************************************************ * * Comparison for types Not so heavily used, less carefully optimised * * ************************************************************************ -- Now here comes the real worker Note [nonDetCmpType nondeterminism] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ nonDetCmpType is implemented in terms of nonDetCmpTypeX. nonDetCmpTypeX uses nonDetCmpTc which compares TyCons by their Unique value. Using Uniques for ordering leads to nondeterminism. We hit the same problem in the TyVarTy case, comparing type variables is nondeterministic, note the call to nonDetCmpVar in nonDetCmpTypeX. See Note [Unique Determinism] for more details. -} nonDetCmpType :: Type -> Type -> Ordering {-# INLINE nonDetCmpType #-} nonDetCmpType !t1 !t2 -- See Note [Type comparisons using object pointer comparisons] | 1# <- reallyUnsafePtrEquality# t1 t2 = EQ nonDetCmpType (TyConApp tc1 []) (TyConApp tc2 []) | tc1 == tc2 = EQ nonDetCmpType t1 t2 -- we know k1 and k2 have the same kind, because they both have kind *. = nonDetCmpTypeX rn_env t1 t2 where rn_env = mkRnEnv2 (mkInScopeSet (tyCoVarsOfTypes [t1, t2])) -- | An ordering relation between two 'Type's (known below as @t1 :: k1@ -- and @t2 :: k2@) data TypeOrdering = TLT -- ^ @t1 < t2@ | TEQ -- ^ @t1 ~ t2@ and there are no casts in either, -- therefore we can conclude @k1 ~ k2@ | TEQX -- ^ @t1 ~ t2@ yet one of the types contains a cast so -- they may differ in kind. | TGT -- ^ @t1 > t2@ deriving (Eq, Ord, Enum, Bounded) nonDetCmpTypeX :: RnEnv2 -> Type -> Type -> Ordering -- Main workhorse -- See Note [Non-trivial definitional equality] in GHC.Core.TyCo.Rep -- See Note [Computing equality on types] -- Always respects multiplicities, unlike eqType nonDetCmpTypeX env orig_t1 orig_t2 = case go env orig_t1 orig_t2 of -- If there are casts then we also need to do a comparison of -- the kinds of the types being compared TEQX -> toOrdering $ go env k1 k2 ty_ordering -> toOrdering ty_ordering where k1 = typeKind orig_t1 k2 = typeKind orig_t2 toOrdering :: TypeOrdering -> Ordering toOrdering TLT = LT toOrdering TEQ = EQ toOrdering TEQX = EQ toOrdering TGT = GT liftOrdering :: Ordering -> TypeOrdering liftOrdering LT = TLT liftOrdering EQ = TEQ liftOrdering GT = TGT thenCmpTy :: TypeOrdering -> TypeOrdering -> TypeOrdering thenCmpTy TEQ rel = rel thenCmpTy TEQX rel = hasCast rel thenCmpTy rel _ = rel hasCast :: TypeOrdering -> TypeOrdering hasCast TEQ = TEQX hasCast rel = rel -- Returns both the resulting ordering relation between -- the two types and whether either contains a cast. go :: RnEnv2 -> Type -> Type -> TypeOrdering go _ (TyConApp tc1 []) (TyConApp tc2 []) | tc1 == tc2 = TEQ -- See Note [Comparing type synonyms] go env t1 t2 | Just t1' <- coreView t1 = go env t1' t2 | Just t2' <- coreView t2 = go env t1 t2' go env (TyVarTy tv1) (TyVarTy tv2) = liftOrdering $ rnOccL env tv1 `nonDetCmpVar` rnOccR env tv2 go env (ForAllTy (Bndr tv1 vis1) t1) (ForAllTy (Bndr tv2 vis2) t2) = liftOrdering (vis1 `cmpForAllVis` vis2) -- See Note [ForAllTy and type equality] `thenCmpTy` go env (varType tv1) (varType tv2) `thenCmpTy` go (rnBndr2 env tv1 tv2) t1 t2 -- See Note [Equality on AppTys] go env (AppTy s1 t1) ty2 | Just (s2, t2) <- splitAppTyNoView_maybe ty2 = go env s1 s2 `thenCmpTy` go env t1 t2 go env ty1 (AppTy s2 t2) | Just (s1, t1) <- splitAppTyNoView_maybe ty1 = go env s1 s2 `thenCmpTy` go env t1 t2 go env (FunTy _ w1 s1 t1) (FunTy _ w2 s2 t2) -- NB: nonDepCmpTypeX does the kind check requested by -- Note [Equality on FunTys] in GHC.Core.TyCo.Rep = liftOrdering (nonDetCmpTypeX env s1 s2 S.<> nonDetCmpTypeX env t1 t2) `thenCmpTy` go env w1 w2 -- Comparing multiplicities last because the test is usually true go env (TyConApp tc1 tys1) (TyConApp tc2 tys2) = liftOrdering (tc1 `nonDetCmpTc` tc2) `thenCmpTy` gos env tys1 tys2 go _ (LitTy l1) (LitTy l2) = liftOrdering (nonDetCmpTyLit l1 l2) go env (CastTy t1 _) t2 = hasCast $ go env t1 t2 go env t1 (CastTy t2 _) = hasCast $ go env t1 t2 go _ (CoercionTy {}) (CoercionTy {}) = TEQ -- Deal with the rest: TyVarTy < CoercionTy < AppTy < LitTy < TyConApp < ForAllTy go _ ty1 ty2 = liftOrdering $ (get_rank ty1) `compare` (get_rank ty2) where get_rank :: Type -> Int get_rank (CastTy {}) = pprPanic "nonDetCmpTypeX.get_rank" (ppr [ty1,ty2]) get_rank (TyVarTy {}) = 0 get_rank (CoercionTy {}) = 1 get_rank (AppTy {}) = 3 get_rank (LitTy {}) = 4 get_rank (TyConApp {}) = 5 get_rank (FunTy {}) = 6 get_rank (ForAllTy {}) = 7 gos :: RnEnv2 -> [Type] -> [Type] -> TypeOrdering gos _ [] [] = TEQ gos _ [] _ = TLT gos _ _ [] = TGT gos env (ty1:tys1) (ty2:tys2) = go env ty1 ty2 `thenCmpTy` gos env tys1 tys2 ------------- -- | Compare two 'TyCon's. -- See Note [nonDetCmpType nondeterminism] nonDetCmpTc :: TyCon -> TyCon -> Ordering nonDetCmpTc tc1 tc2 = u1 `nonDetCmpUnique` u2 where u1 = tyConUnique tc1 u2 = tyConUnique tc2 {- ********************************************************************* * * mayLookIdentical * * ********************************************************************* -} mayLookIdentical :: Type -> Type -> Bool -- | Returns True if the /visible/ part of the types -- might look equal, even if they are really unequal (in the invisible bits) -- -- This function is very similar to tc_eq_type but it is much more -- heuristic. Notably, it is always safe to return True, even with types -- that might (in truth) be unequal -- this affects error messages only -- (Originally this test was done by eqType with an extra flag, but the result -- was hard to understand.) mayLookIdentical orig_ty1 orig_ty2 = go orig_env orig_ty1 orig_ty2 where orig_env = mkRnEnv2 $ mkInScopeSet $ tyCoVarsOfTypes [orig_ty1, orig_ty2] go :: RnEnv2 -> Type -> Type -> Bool go env (TyConApp tc1 ts1) (TyConApp tc2 ts2) | tc1 == tc2, not (isForgetfulSynTyCon tc1) -- See Note [Comparing type synonyms] = gos env (tyConBinders tc1) ts1 ts2 go env t1 t2 | Just t1' <- coreView t1 = go env t1' t2 go env t1 t2 | Just t2' <- coreView t2 = go env t1 t2' go env (TyVarTy tv1) (TyVarTy tv2) = rnOccL env tv1 == rnOccR env tv2 go _ (LitTy lit1) (LitTy lit2) = lit1 == lit2 go env (CastTy t1 _) t2 = go env t1 t2 go env t1 (CastTy t2 _) = go env t1 t2 go _ (CoercionTy {}) (CoercionTy {}) = True go env (ForAllTy (Bndr tv1 vis1) ty1) (ForAllTy (Bndr tv2 vis2) ty2) = vis1 `eqForAllVis` vis2 -- See Note [ForAllTy and type equality] && go (rnBndr2 env tv1 tv2) ty1 ty2 -- Visible stuff only: ignore kinds of binders -- If we have (forall (r::RunTimeRep). ty1 ~ blah) then respond -- with True. Reason: the type pretty-printer defaults RuntimeRep -- foralls (see Ghc.Iface.Type.hideNonStandardTypes). That can make, -- say (forall r. TYPE r -> Type) into (Type -> Type), so it looks the -- same as a very different type (#24553). By responding True, we -- tell GHC (see calls of mayLookIdentical) to display without defaulting. -- See Note [Showing invisible bits of types in error messages] -- in GHC.Tc.Errors.Ppr go _ (ForAllTy b _) _ | isDefaultableBndr b = True go _ _ (ForAllTy b _) | isDefaultableBndr b = True go env (FunTy _ w1 arg1 res1) (FunTy _ w2 arg2 res2) = go env arg1 arg2 && go env res1 res2 && go env w1 w2 -- Visible stuff only: ignore agg kinds -- See Note [Equality on AppTys] in GHC.Core.Type go env (AppTy s1 t1) ty2 | Just (s2, t2) <- tcSplitAppTyNoView_maybe ty2 = go env s1 s2 && go env t1 t2 go env ty1 (AppTy s2 t2) | Just (s1, t1) <- tcSplitAppTyNoView_maybe ty1 = go env s1 s2 && go env t1 t2 go env (TyConApp tc1 ts1) (TyConApp tc2 ts2) = tc1 == tc2 && gos env (tyConBinders tc1) ts1 ts2 go _ _ _ = False gos :: RnEnv2 -> [TyConBinder] -> [Type] -> [Type] -> Bool gos _ _ [] [] = True gos env bs (t1:ts1) (t2:ts2) | (invisible, bs') <- case bs of [] -> (False, []) (b:bs) -> (isInvisibleTyConBinder b, bs) = (invisible || go env t1 t2) && gos env bs' ts1 ts2 gos _ _ _ _ = False isDefaultableBndr :: ForAllTyBinder -> Bool -- This function should line up with the defaulting done -- by GHC.Iface.Type.defaultIfaceTyVarsOfKind -- See Note [Showing invisible bits of types in error messages] -- in GHC.Tc.Errors.Ppr isDefaultableBndr (Bndr tv vis) = isInvisibleForAllTyFlag vis && is_defaultable (tyVarKind tv) where is_defaultable ki = isLevityTy ki || isRuntimeRepTy ki || isMultiplicityTy ki ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/TyCo/FVs.hs0000644000000000000000000016104007346545000020510 0ustar0000000000000000{-# LANGUAGE MultiWayIf #-} module GHC.Core.TyCo.FVs ( shallowTyCoVarsOfType, shallowTyCoVarsOfTypes, tyCoVarsOfType, tyCoVarsOfTypes, tyCoVarsOfTypeDSet, tyCoVarsOfTypesDSet, tyCoFVsBndr, tyCoFVsVarBndr, tyCoFVsVarBndrs, tyCoFVsOfType, tyCoVarsOfTypeList, tyCoFVsOfTypes, tyCoVarsOfTypesList, deepTcvFolder, shallowTyCoVarsOfTyVarEnv, shallowTyCoVarsOfCoVarEnv, shallowTyCoVarsOfCo, shallowTyCoVarsOfCos, tyCoVarsOfCo, tyCoVarsOfCos, tyCoVarsOfMCo, coVarsOfType, coVarsOfTypes, coVarsOfCo, coVarsOfCos, tyCoVarsOfCoDSet, tyCoFVsOfCo, tyCoFVsOfCos, tyCoVarsOfCoList, coVarsOfCoDSet, coVarsOfCosDSet, almostDevoidCoVarOfCo, -- Injective free vars injectiveVarsOfType, injectiveVarsOfTypes, isInjectiveInType, invisibleVarsOfType, invisibleVarsOfTypes, -- Any and No Free vars anyFreeVarsOfType, anyFreeVarsOfTypes, anyFreeVarsOfCo, noFreeVarsOfType, noFreeVarsOfTypes, noFreeVarsOfCo, -- * Free type constructors tyConsOfType, tyConsOfTypes, -- * Free vars with visible/invisible separate visVarsOfTypes, visVarsOfType, -- * Occurrence-check expansion occCheckExpand, -- * Well-scoped free variables scopedSort, tyCoVarsOfTypeWellScoped, tyCoVarsOfTypesWellScoped, -- * Closing over kinds closeOverKindsDSet, closeOverKindsList, closeOverKinds, -- * Raw materials Endo(..), runTyCoVars ) where import GHC.Prelude import {-# SOURCE #-} GHC.Core.Type( partitionInvisibleTypes, coreView, rewriterView ) import {-# SOURCE #-} GHC.Core.Coercion( coercionLKind ) import GHC.Builtin.Types.Prim( funTyFlagTyCon ) import Data.Monoid as DM ( Any(..) ) import GHC.Core.TyCo.Rep import GHC.Core.TyCon import GHC.Core.Coercion.Axiom( CoAxiomRule(..), BuiltInFamRewrite(..), coAxiomTyCon ) import GHC.Utils.FV import GHC.Types.Var import GHC.Types.Unique.FM import GHC.Types.Unique.Set import GHC.Types.Var.Set import GHC.Types.Var.Env import GHC.Utils.Misc import GHC.Utils.Panic import GHC.Data.Pair import Data.Semigroup {- %************************************************************************ %* * Free variables of types and coercions %* * %************************************************************************ -} {- Note [Shallow and deep free variables] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Definitions * Shallow free variables of a type: the variables affected by substitution. Specifically, the (TyVarTy tv) and (CoVar cv) that appear - In the type and coercions appearing in the type - In shallow free variables of the kind of a Forall binder but NOT in the kind of the /occurrences/ of a type variable. * Deep free variables of a type: shallow free variables, plus the deep free variables of the kinds of those variables. That is, deepFVs( t ) = closeOverKinds( shallowFVs( t ) ) Examples: Type Shallow Deep --------------------------------- (a : (k:Type)) {a} {a,k} forall (a:(k:Type)). a {k} {k} (a:k->Type) (b:k) {a,b} {a,b,k} -} {- Note [Free variables of types] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The family of functions tyCoVarsOfType, tyCoVarsOfTypes etc, returns a VarSet that is closed over the types of its variables. More precisely, if S = tyCoVarsOfType( t ) and (a:k) is in S then tyCoVarsOftype( k ) is a subset of S Example: The tyCoVars of this ((a:* -> k) Int) is {a, k}. We could /not/ close over the kinds of the variable occurrences, and instead do so at call sites, but it seems that we always want to do so, so it's easiest to do it here. It turns out that getting the free variables of types is performance critical, so we profiled several versions, exploring different implementation strategies. 1. Baseline version: uses FV naively. Essentially: tyCoVarsOfType ty = fvVarSet $ tyCoFVsOfType ty This is not nice, because FV introduces some overhead to implement determinism, and through its "interesting var" function, neither of which we need here, so they are a complete waste. 2. UnionVarSet version: instead of reusing the FV-based code, we simply used VarSets directly, trying to avoid the overhead of FV. E.g.: -- FV version: tyCoFVsOfType (AppTy fun arg) a b c = (tyCoFVsOfType fun `unionFV` tyCoFVsOfType arg) a b c -- UnionVarSet version: tyCoVarsOfType (AppTy fun arg) = (tyCoVarsOfType fun `unionVarSet` tyCoVarsOfType arg) This looks deceptively similar, but while FV internally builds a list- and set-generating function, the VarSet functions manipulate sets directly, and the latter performs a lot worse than the naive FV version. 3. Accumulator-style VarSet version: this is what we use now. We do use VarSet as our data structure, but delegate the actual work to a new ty_co_vars_of_... family of functions, which use accumulator style and the "in-scope set" filter found in the internals of FV, but without the determinism overhead. See #14880. Note [Closing over free variable kinds] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ tyCoVarsOfType and tyCoFVsOfType, while traversing a type, will also close over free variable kinds. In previous GHC versions, this happened naively: whenever we would encounter an occurrence of a free type variable, we would close over its kind. This, however is wrong for two reasons (see #14880): 1. Efficiency. If we have Proxy (a::k) -> Proxy (a::k) -> Proxy (a::k), then we don't want to have to traverse k more than once. 2. Correctness. Imagine we have forall k. b -> k, where b has kind k, for some k bound in an outer scope. If we look at b's kind inside the forall, we'll collect that k is free and then remove k from the set of free variables. This is plain wrong. We must instead compute that b is free and then conclude that b's kind is free. An obvious first approach is to move the closing-over-kinds from the occurrences of a type variable to after finding the free vars - however, this turns out to introduce performance regressions, and isn't even entirely correct. In fact, it isn't even important *when* we close over kinds; what matters is that we handle each type var exactly once, and that we do it in the right context. So the next approach we tried was to use the "in-scope set" part of FV or the equivalent argument in the accumulator-style `ty_co_vars_of_type` function, to say "don't bother with variables we have already closed over". This should work fine in theory, but the code is complicated and doesn't perform well. But there is a simpler way, which is implemented here. Consider the two points above: 1. Efficiency: we now have an accumulator, so the second time we encounter 'a', we'll ignore it, certainly not looking at its kind - this is why pre-checking set membership before inserting ends up not only being faster, but also being correct. 2. Correctness: we have an "in-scope set" (I think we should call it it a "bound-var set"), specifying variables that are bound by a forall in the type we are traversing; we simply ignore these variables, certainly not looking at their kind. So now consider: forall k. b -> k where b :: k->Type is free; but of course, it's a different k! When looking at b -> k we'll have k in the bound-var set. So we'll ignore the k. But suppose this is our first encounter with b; we want the free vars of its kind. But we want to behave as if we took the free vars of its kind at the end; that is, with no bound vars in scope. So the solution is easy. The old code was this: ty_co_vars_of_type (TyVarTy v) is acc | v `elemVarSet` is = acc | v `elemVarSet` acc = acc | otherwise = ty_co_vars_of_type (tyVarKind v) is (extendVarSet acc v) Now all we need to do is take the free vars of tyVarKind v *with an empty bound-var set*, thus: ty_co_vars_of_type (TyVarTy v) is acc | v `elemVarSet` is = acc | v `elemVarSet` acc = acc | otherwise = ty_co_vars_of_type (tyVarKind v) emptyVarSet (extendVarSet acc v) ^^^^^^^^^^^ And that's it. This works because a variable is either bound or free. If it is bound, then we won't look at it at all. If it is free, then all the variables free in its kind are free -- regardless of whether some local variable has the same Unique. So if we're looking at a variable occurrence at all, then all variables in its kind are free. Note [Free vars and synonyms] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When finding free variables we generally do not expand synonyms. So given type T a = Int the type (T [b]) will return `b` as a free variable, even though expanding the synonym would get rid of it. Expanding synonyms might lead to types that look ill-scoped; an alternative we have not explored. But see `occCheckExpand` in this module for a function that does, selectively, expand synonyms to reduce free-var occurences. -} {- ********************************************************************* * * Endo for free variables * * ********************************************************************* -} {- Note [Accumulating parameter free variables] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We can use foldType to build an accumulating-parameter version of a free-var finder, thus: fvs :: Type -> TyCoVarSet fvs ty = appEndo (foldType folder ty) emptyVarSet Recall that foldType :: TyCoFolder env a -> env -> Type -> a newtype Endo a = Endo (a -> a) -- In Data.Monoid instance Monoid a => Monoid (Endo a) where (Endo f) `mappend` (Endo g) = Endo (f.g) appEndo :: Endo a -> a -> a appEndo (Endo f) x = f x So `mappend` for Endos is just function composition. It's very important that, after optimisation, we end up with * an arity-three function * that is strict in the accumulator fvs env (TyVarTy v) acc | v `elemVarSet` env = acc | v `elemVarSet` acc = acc | otherwise = acc `extendVarSet` v fvs env (AppTy t1 t2) = fvs env t1 (fvs env t2 acc) ... The "strict in the accumulator" part is to ensure that in the AppTy equation we don't build a thunk for (fvs env t2 acc). The optimiser does do all this, but not very robustly. It depends critically on the basic arity-2 function not being exported, so that all its calls are visibly to three arguments. This analysis is done by the Call Arity pass. TL;DR: check this regularly! -} runTyCoVars :: Endo TyCoVarSet -> TyCoVarSet {-# INLINE runTyCoVars #-} runTyCoVars f = appEndo f emptyVarSet {- ********************************************************************* * * Deep free variables See Note [Shallow and deep free variables] * * ********************************************************************* -} tyCoVarsOfType :: Type -> TyCoVarSet -- The "deep" TyCoVars of the the type tyCoVarsOfType ty = runTyCoVars (deep_ty ty) -- Alternative: -- tyCoVarsOfType ty = closeOverKinds (shallowTyCoVarsOfType ty) tyCoVarsOfTypes :: [Type] -> TyCoVarSet -- The "deep" TyCoVars of the the type tyCoVarsOfTypes tys = runTyCoVars (deep_tys tys) -- Alternative: -- tyCoVarsOfTypes tys = closeOverKinds (shallowTyCoVarsOfTypes tys) tyCoVarsOfCo :: Coercion -> TyCoVarSet -- The "deep" TyCoVars of the the coercion -- See Note [Free variables of types] tyCoVarsOfCo co = runTyCoVars (deep_co co) tyCoVarsOfMCo :: MCoercion -> TyCoVarSet tyCoVarsOfMCo MRefl = emptyVarSet tyCoVarsOfMCo (MCo co) = tyCoVarsOfCo co tyCoVarsOfCos :: [Coercion] -> TyCoVarSet tyCoVarsOfCos cos = runTyCoVars (deep_cos cos) deep_ty :: Type -> Endo TyCoVarSet deep_tys :: [Type] -> Endo TyCoVarSet deep_co :: Coercion -> Endo TyCoVarSet deep_cos :: [Coercion] -> Endo TyCoVarSet (deep_ty, deep_tys, deep_co, deep_cos) = foldTyCo deepTcvFolder emptyVarSet deepTcvFolder :: TyCoFolder TyCoVarSet (Endo TyCoVarSet) deepTcvFolder = TyCoFolder { tcf_view = noView -- See Note [Free vars and synonyms] , tcf_tyvar = do_tcv, tcf_covar = do_tcv , tcf_hole = do_hole, tcf_tycobinder = do_bndr } where do_tcv is v = Endo do_it where do_it acc | v `elemVarSet` is = acc | v `elemVarSet` acc = acc | otherwise = appEndo (deep_ty (varType v)) $ acc `extendVarSet` v do_bndr is tcv _ = extendVarSet is tcv do_hole is hole = do_tcv is (coHoleCoVar hole) -- See Note [CoercionHoles and coercion free variables] -- in GHC.Core.TyCo.Rep {- ********************************************************************* * * Shallow free variables See Note [Shallow and deep free variables] * * ********************************************************************* -} shallowTyCoVarsOfType :: Type -> TyCoVarSet -- See Note [Free variables of types] shallowTyCoVarsOfType ty = runTyCoVars (shallow_ty ty) shallowTyCoVarsOfTypes :: [Type] -> TyCoVarSet shallowTyCoVarsOfTypes tys = runTyCoVars (shallow_tys tys) shallowTyCoVarsOfCo :: Coercion -> TyCoVarSet shallowTyCoVarsOfCo co = runTyCoVars (shallow_co co) shallowTyCoVarsOfCos :: [Coercion] -> TyCoVarSet shallowTyCoVarsOfCos cos = runTyCoVars (shallow_cos cos) -- | Returns free variables of types, including kind variables as -- a non-deterministic set. For type synonyms it does /not/ expand the -- synonym. shallowTyCoVarsOfTyVarEnv :: TyVarEnv Type -> TyCoVarSet -- See Note [Free variables of types] shallowTyCoVarsOfTyVarEnv tys = shallowTyCoVarsOfTypes (nonDetEltsUFM tys) -- It's OK to use nonDetEltsUFM here because we immediately -- forget the ordering by returning a set shallowTyCoVarsOfCoVarEnv :: CoVarEnv Coercion -> TyCoVarSet shallowTyCoVarsOfCoVarEnv cos = shallowTyCoVarsOfCos (nonDetEltsUFM cos) -- It's OK to use nonDetEltsUFM here because we immediately -- forget the ordering by returning a set shallow_ty :: Type -> Endo TyCoVarSet shallow_tys :: [Type] -> Endo TyCoVarSet shallow_co :: Coercion -> Endo TyCoVarSet shallow_cos :: [Coercion] -> Endo TyCoVarSet (shallow_ty, shallow_tys, shallow_co, shallow_cos) = foldTyCo shallowTcvFolder emptyVarSet shallowTcvFolder :: TyCoFolder TyCoVarSet (Endo TyCoVarSet) shallowTcvFolder = TyCoFolder { tcf_view = noView -- See Note [Free vars and synonyms] , tcf_tyvar = do_tcv, tcf_covar = do_tcv , tcf_hole = do_hole, tcf_tycobinder = do_bndr } where do_tcv is v = Endo do_it where do_it acc | v `elemVarSet` is = acc | v `elemVarSet` acc = acc | otherwise = acc `extendVarSet` v do_bndr is tcv _ = extendVarSet is tcv do_hole _ _ = mempty -- Ignore coercion holes {- ********************************************************************* * * Free coercion variables * * ********************************************************************* -} {- Note [Finding free coercion variables] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Here we are only interested in the free /coercion/ variables. We can achieve this through a slightly different TyCo folder. Notice that we look deeply, into kinds. See #14880. -} -- See Note [Finding free coercion variables] coVarsOfType :: Type -> CoVarSet coVarsOfTypes :: [Type] -> CoVarSet coVarsOfCo :: Coercion -> CoVarSet coVarsOfCos :: [Coercion] -> CoVarSet coVarsOfType ty = runTyCoVars (deep_cv_ty ty) coVarsOfTypes tys = runTyCoVars (deep_cv_tys tys) coVarsOfCo co = runTyCoVars (deep_cv_co co) coVarsOfCos cos = runTyCoVars (deep_cv_cos cos) deep_cv_ty :: Type -> Endo CoVarSet deep_cv_tys :: [Type] -> Endo CoVarSet deep_cv_co :: Coercion -> Endo CoVarSet deep_cv_cos :: [Coercion] -> Endo CoVarSet (deep_cv_ty, deep_cv_tys, deep_cv_co, deep_cv_cos) = foldTyCo deepCoVarFolder emptyVarSet deepCoVarFolder :: TyCoFolder TyCoVarSet (Endo CoVarSet) deepCoVarFolder = TyCoFolder { tcf_view = noView , tcf_tyvar = do_tyvar, tcf_covar = do_covar , tcf_hole = do_hole, tcf_tycobinder = do_bndr } where do_tyvar _ _ = mempty -- This do_tyvar means we won't see any CoVars in this -- TyVar's kind. This may be wrong; but it's the way it's -- always been. And its awkward to change, because -- the tyvar won't end up in the accumulator, so -- we'd look repeatedly. Blargh. do_covar is v = Endo do_it where do_it acc | v `elemVarSet` is = acc | v `elemVarSet` acc = acc | otherwise = appEndo (deep_cv_ty (varType v)) $ acc `extendVarSet` v do_bndr is tcv _ = extendVarSet is tcv do_hole is hole = do_covar is (coHoleCoVar hole) -- See Note [CoercionHoles and coercion free variables] -- in GHC.Core.TyCo.Rep ------- Same again, but for DCoVarSet ---------- -- But this time the free vars are shallow coVarsOfCosDSet :: [Coercion] -> DCoVarSet coVarsOfCosDSet cos = fvDVarSetSome isCoVar (tyCoFVsOfCos cos) coVarsOfCoDSet :: Coercion -> DCoVarSet coVarsOfCoDSet co = fvDVarSetSome isCoVar (tyCoFVsOfCo co) {- ********************************************************************* * * Closing over kinds * * ********************************************************************* -} ------------- Closing over kinds ----------------- closeOverKinds :: TyCoVarSet -> TyCoVarSet -- For each element of the input set, -- add the deep free variables of its kind closeOverKinds vs = nonDetStrictFoldVarSet do_one vs vs where do_one v acc = appEndo (deep_ty (varType v)) acc {- --------------- Alternative version 1 (using FV) ------------ closeOverKinds = fvVarSet . closeOverKindsFV . nonDetEltsUniqSet -} {- ---------------- Alternative version 2 ------------- -- | Add the kind variables free in the kinds of the tyvars in the given set. -- Returns a non-deterministic set. closeOverKinds :: TyCoVarSet -> TyCoVarSet closeOverKinds vs = go vs vs where go :: VarSet -- Work list -> VarSet -- Accumulator, always a superset of wl -> VarSet go wl acc | isEmptyVarSet wl = acc | otherwise = go wl_kvs (acc `unionVarSet` wl_kvs) where k v inner_acc = ty_co_vars_of_type (varType v) acc inner_acc wl_kvs = nonDetFoldVarSet k emptyVarSet wl -- wl_kvs = union of shallow free vars of the kinds of wl -- but don't bother to collect vars in acc -} {- ---------------- Alternative version 3 ------------- -- | Add the kind variables free in the kinds of the tyvars in the given set. -- Returns a non-deterministic set. closeOverKinds :: TyVarSet -> TyVarSet closeOverKinds vs = close_over_kinds vs emptyVarSet close_over_kinds :: TyVarSet -- Work list -> TyVarSet -- Accumulator -> TyVarSet -- Precondition: in any call (close_over_kinds wl acc) -- for every tv in acc, the shallow kind-vars of tv -- are either in the work list wl, or in acc -- Postcondition: result is the deep free vars of (wl `union` acc) close_over_kinds wl acc = nonDetFoldVarSet do_one acc wl where do_one :: Var -> TyVarSet -> TyVarSet -- (do_one v acc) adds v and its deep free-vars to acc do_one v acc | v `elemVarSet` acc = acc | otherwise = close_over_kinds (shallowTyCoVarsOfType (varType v)) $ acc `extendVarSet` v -} {- ********************************************************************* * * The FV versions return deterministic results * * ********************************************************************* -} -- | Given a list of tyvars returns a deterministic FV computation that -- returns the given tyvars with the kind variables free in the kinds of the -- given tyvars. closeOverKindsFV :: [TyVar] -> FV closeOverKindsFV tvs = mapUnionFV (tyCoFVsOfType . tyVarKind) tvs `unionFV` mkFVs tvs -- | Add the kind variables free in the kinds of the tyvars in the given set. -- Returns a deterministically ordered list. closeOverKindsList :: [TyVar] -> [TyVar] closeOverKindsList tvs = fvVarList $ closeOverKindsFV tvs -- | Add the kind variables free in the kinds of the tyvars in the given set. -- Returns a deterministic set. closeOverKindsDSet :: DTyVarSet -> DTyVarSet closeOverKindsDSet = fvDVarSet . closeOverKindsFV . dVarSetElems -- | `tyCoFVsOfType` that returns free variables of a type in a deterministic -- set. For explanation of why using `VarSet` is not deterministic see -- Note [Deterministic FV] in "GHC.Utils.FV". tyCoVarsOfTypeDSet :: Type -> DTyCoVarSet -- See Note [Free variables of types] tyCoVarsOfTypeDSet ty = fvDVarSet $ tyCoFVsOfType ty -- | `tyCoFVsOfType` that returns free variables of a type in deterministic -- order. For explanation of why using `VarSet` is not deterministic see -- Note [Deterministic FV] in "GHC.Utils.FV". tyCoVarsOfTypeList :: Type -> [TyCoVar] -- See Note [Free variables of types] tyCoVarsOfTypeList ty = fvVarList $ tyCoFVsOfType ty -- | Returns free variables of types, including kind variables as -- a deterministic set. For type synonyms it does /not/ expand the -- synonym. tyCoVarsOfTypesDSet :: [Type] -> DTyCoVarSet -- See Note [Free variables of types] tyCoVarsOfTypesDSet tys = fvDVarSet $ tyCoFVsOfTypes tys -- | Returns free variables of types, including kind variables as -- a deterministically ordered list. For type synonyms it does /not/ expand the -- synonym. tyCoVarsOfTypesList :: [Type] -> [TyCoVar] -- See Note [Free variables of types] tyCoVarsOfTypesList tys = fvVarList $ tyCoFVsOfTypes tys -- | The worker for `tyCoFVsOfType` and `tyCoFVsOfTypeList`. -- The previous implementation used `unionVarSet` which is O(n+m) and can -- make the function quadratic. -- It's exported, so that it can be composed with -- other functions that compute free variables. -- See Note [FV naming conventions] in "GHC.Utils.FV". -- -- Eta-expanded because that makes it run faster (apparently) -- See Note [FV eta expansion] in "GHC.Utils.FV" for explanation. tyCoFVsOfType :: Type -> FV -- See Note [Free variables of types] tyCoFVsOfType (TyVarTy v) f bound_vars (acc_list, acc_set) | not (f v) = (acc_list, acc_set) | v `elemVarSet` bound_vars = (acc_list, acc_set) | v `elemVarSet` acc_set = (acc_list, acc_set) | otherwise = tyCoFVsOfType (tyVarKind v) f emptyVarSet -- See Note [Closing over free variable kinds] (v:acc_list, extendVarSet acc_set v) tyCoFVsOfType (TyConApp _ tys) f bound_vars acc = tyCoFVsOfTypes tys f bound_vars acc -- See Note [Free vars and synonyms] tyCoFVsOfType (LitTy {}) f bound_vars acc = emptyFV f bound_vars acc tyCoFVsOfType (AppTy fun arg) f bound_vars acc = (tyCoFVsOfType fun `unionFV` tyCoFVsOfType arg) f bound_vars acc tyCoFVsOfType (FunTy _ w arg res) f bound_vars acc = (tyCoFVsOfType w `unionFV` tyCoFVsOfType arg `unionFV` tyCoFVsOfType res) f bound_vars acc tyCoFVsOfType (ForAllTy bndr ty) f bound_vars acc = tyCoFVsBndr bndr (tyCoFVsOfType ty) f bound_vars acc tyCoFVsOfType (CastTy ty co) f bound_vars acc = (tyCoFVsOfType ty `unionFV` tyCoFVsOfCo co) f bound_vars acc tyCoFVsOfType (CoercionTy co) f bound_vars acc = tyCoFVsOfCo co f bound_vars acc tyCoFVsBndr :: ForAllTyBinder -> FV -> FV -- Free vars of (forall b. ) tyCoFVsBndr (Bndr tv _) fvs = tyCoFVsVarBndr tv fvs tyCoFVsVarBndrs :: [Var] -> FV -> FV tyCoFVsVarBndrs vars fvs = foldr tyCoFVsVarBndr fvs vars tyCoFVsVarBndr :: Var -> FV -> FV tyCoFVsVarBndr var fvs = tyCoFVsOfType (varType var) -- Free vars of its type/kind `unionFV` delFV var fvs -- Delete it from the thing-inside tyCoFVsOfTypes :: [Type] -> FV -- See Note [Free variables of types] tyCoFVsOfTypes (ty:tys) fv_cand in_scope acc = (tyCoFVsOfType ty `unionFV` tyCoFVsOfTypes tys) fv_cand in_scope acc tyCoFVsOfTypes [] fv_cand in_scope acc = emptyFV fv_cand in_scope acc -- | Get a deterministic set of the vars free in a coercion tyCoVarsOfCoDSet :: Coercion -> DTyCoVarSet -- See Note [Free variables of types] tyCoVarsOfCoDSet co = fvDVarSet $ tyCoFVsOfCo co tyCoVarsOfCoList :: Coercion -> [TyCoVar] -- See Note [Free variables of types] tyCoVarsOfCoList co = fvVarList $ tyCoFVsOfCo co tyCoFVsOfMCo :: MCoercion -> FV tyCoFVsOfMCo MRefl = emptyFV tyCoFVsOfMCo (MCo co) = tyCoFVsOfCo co tyCoFVsOfCo :: Coercion -> FV -- Extracts type and coercion variables from a coercion -- See Note [Free variables of types] tyCoFVsOfCo (Refl ty) fv_cand in_scope acc = tyCoFVsOfType ty fv_cand in_scope acc tyCoFVsOfCo (GRefl _ ty mco) fv_cand in_scope acc = (tyCoFVsOfType ty `unionFV` tyCoFVsOfMCo mco) fv_cand in_scope acc tyCoFVsOfCo (TyConAppCo _ _ cos) fv_cand in_scope acc = tyCoFVsOfCos cos fv_cand in_scope acc tyCoFVsOfCo (AppCo co arg) fv_cand in_scope acc = (tyCoFVsOfCo co `unionFV` tyCoFVsOfCo arg) fv_cand in_scope acc tyCoFVsOfCo (ForAllCo { fco_tcv = tv, fco_kind = kind_co, fco_body = co }) fv_cand in_scope acc = (tyCoFVsVarBndr tv (tyCoFVsOfCo co) `unionFV` tyCoFVsOfCo kind_co) fv_cand in_scope acc tyCoFVsOfCo (FunCo { fco_mult = w, fco_arg = co1, fco_res = co2 }) fv_cand in_scope acc = (tyCoFVsOfCo co1 `unionFV` tyCoFVsOfCo co2 `unionFV` tyCoFVsOfCo w) fv_cand in_scope acc tyCoFVsOfCo (CoVarCo v) fv_cand in_scope acc = tyCoFVsOfCoVar v fv_cand in_scope acc tyCoFVsOfCo (HoleCo h) fv_cand in_scope acc = tyCoFVsOfCoVar (coHoleCoVar h) fv_cand in_scope acc -- See Note [CoercionHoles and coercion free variables] tyCoFVsOfCo (AxiomCo _ cs) fv_cand in_scope acc = tyCoFVsOfCos cs fv_cand in_scope acc tyCoFVsOfCo (UnivCo { uco_lty = t1, uco_rty = t2, uco_deps = deps}) fv_cand in_scope acc = (tyCoFVsOfCos deps `unionFV` tyCoFVsOfType t1 `unionFV` tyCoFVsOfType t2) fv_cand in_scope acc tyCoFVsOfCo (SymCo co) fv_cand in_scope acc = tyCoFVsOfCo co fv_cand in_scope acc tyCoFVsOfCo (TransCo co1 co2) fv_cand in_scope acc = (tyCoFVsOfCo co1 `unionFV` tyCoFVsOfCo co2) fv_cand in_scope acc tyCoFVsOfCo (SelCo _ co) fv_cand in_scope acc = tyCoFVsOfCo co fv_cand in_scope acc tyCoFVsOfCo (LRCo _ co) fv_cand in_scope acc = tyCoFVsOfCo co fv_cand in_scope acc tyCoFVsOfCo (InstCo co arg) fv_cand in_scope acc = (tyCoFVsOfCo co `unionFV` tyCoFVsOfCo arg) fv_cand in_scope acc tyCoFVsOfCo (KindCo co) fv_cand in_scope acc = tyCoFVsOfCo co fv_cand in_scope acc tyCoFVsOfCo (SubCo co) fv_cand in_scope acc = tyCoFVsOfCo co fv_cand in_scope acc tyCoFVsOfCoVar :: CoVar -> FV tyCoFVsOfCoVar v fv_cand in_scope acc = (unitFV v `unionFV` tyCoFVsOfType (varType v)) fv_cand in_scope acc tyCoFVsOfCos :: [Coercion] -> FV tyCoFVsOfCos [] fv_cand in_scope acc = emptyFV fv_cand in_scope acc tyCoFVsOfCos (co:cos) fv_cand in_scope acc = (tyCoFVsOfCo co `unionFV` tyCoFVsOfCos cos) fv_cand in_scope acc ----- Whether a covar is /Almost Devoid/ in a type or coercion ---- -- | Given a covar and a coercion, returns True if covar is almost devoid in -- the coercion. That is, covar can only appear in Refl and GRefl. -- See (FC6) in Note [ForAllCo] in "GHC.Core.TyCo.Rep" almostDevoidCoVarOfCo :: CoVar -> Coercion -> Bool almostDevoidCoVarOfCo cv co = almost_devoid_co_var_of_co co cv almost_devoid_co_var_of_co :: Coercion -> CoVar -> Bool almost_devoid_co_var_of_co (Refl {}) _ = True -- covar is allowed in Refl and almost_devoid_co_var_of_co (GRefl {}) _ = True -- GRefl, so we don't look into -- the coercions almost_devoid_co_var_of_co (TyConAppCo _ _ cos) cv = almost_devoid_co_var_of_cos cos cv almost_devoid_co_var_of_co (AppCo co arg) cv = almost_devoid_co_var_of_co co cv && almost_devoid_co_var_of_co arg cv almost_devoid_co_var_of_co (ForAllCo { fco_tcv = v, fco_kind = kind_co, fco_body = co }) cv = almost_devoid_co_var_of_co kind_co cv && (v == cv || almost_devoid_co_var_of_co co cv) almost_devoid_co_var_of_co (FunCo { fco_mult = w, fco_arg = co1, fco_res = co2 }) cv = almost_devoid_co_var_of_co w cv && almost_devoid_co_var_of_co co1 cv && almost_devoid_co_var_of_co co2 cv almost_devoid_co_var_of_co (CoVarCo v) cv = v /= cv almost_devoid_co_var_of_co (HoleCo h) cv = (coHoleCoVar h) /= cv almost_devoid_co_var_of_co (AxiomCo _ cs) cv = almost_devoid_co_var_of_cos cs cv almost_devoid_co_var_of_co (UnivCo { uco_lty = t1, uco_rty = t2, uco_deps = deps }) cv = almost_devoid_co_var_of_cos deps cv && almost_devoid_co_var_of_type t1 cv && almost_devoid_co_var_of_type t2 cv almost_devoid_co_var_of_co (SymCo co) cv = almost_devoid_co_var_of_co co cv almost_devoid_co_var_of_co (TransCo co1 co2) cv = almost_devoid_co_var_of_co co1 cv && almost_devoid_co_var_of_co co2 cv almost_devoid_co_var_of_co (SelCo _ co) cv = almost_devoid_co_var_of_co co cv almost_devoid_co_var_of_co (LRCo _ co) cv = almost_devoid_co_var_of_co co cv almost_devoid_co_var_of_co (InstCo co arg) cv = almost_devoid_co_var_of_co co cv && almost_devoid_co_var_of_co arg cv almost_devoid_co_var_of_co (KindCo co) cv = almost_devoid_co_var_of_co co cv almost_devoid_co_var_of_co (SubCo co) cv = almost_devoid_co_var_of_co co cv almost_devoid_co_var_of_cos :: [Coercion] -> CoVar -> Bool almost_devoid_co_var_of_cos [] _ = True almost_devoid_co_var_of_cos (co:cos) cv = almost_devoid_co_var_of_co co cv && almost_devoid_co_var_of_cos cos cv almost_devoid_co_var_of_type :: Type -> CoVar -> Bool almost_devoid_co_var_of_type (TyVarTy _) _ = True almost_devoid_co_var_of_type (TyConApp _ tys) cv = almost_devoid_co_var_of_types tys cv almost_devoid_co_var_of_type (LitTy {}) _ = True almost_devoid_co_var_of_type (AppTy fun arg) cv = almost_devoid_co_var_of_type fun cv && almost_devoid_co_var_of_type arg cv almost_devoid_co_var_of_type (FunTy _ w arg res) cv = almost_devoid_co_var_of_type w cv && almost_devoid_co_var_of_type arg cv && almost_devoid_co_var_of_type res cv almost_devoid_co_var_of_type (ForAllTy (Bndr v _) ty) cv = almost_devoid_co_var_of_type (varType v) cv && (v == cv || almost_devoid_co_var_of_type ty cv) almost_devoid_co_var_of_type (CastTy ty co) cv = almost_devoid_co_var_of_type ty cv && almost_devoid_co_var_of_co co cv almost_devoid_co_var_of_type (CoercionTy co) cv = almost_devoid_co_var_of_co co cv almost_devoid_co_var_of_types :: [Type] -> CoVar -> Bool almost_devoid_co_var_of_types [] _ = True almost_devoid_co_var_of_types (ty:tys) cv = almost_devoid_co_var_of_type ty cv && almost_devoid_co_var_of_types tys cv {- %************************************************************************ %* * Free tyvars, but with visible/invisible info %* * %************************************************************************ -} -- | Retrieve the free variables in this type, splitting them based -- on whether they are used visibly or invisibly. Invisible ones come -- first. visVarsOfType :: Type -> Pair TyCoVarSet visVarsOfType orig_ty = Pair invis_vars vis_vars where Pair invis_vars1 vis_vars = go orig_ty invis_vars = invis_vars1 `minusVarSet` vis_vars go (TyVarTy tv) = Pair (tyCoVarsOfType $ tyVarKind tv) (unitVarSet tv) go (AppTy t1 t2) = go t1 `mappend` go t2 go (TyConApp tc tys) = go_tc tc tys go (FunTy _ w t1 t2) = go w `mappend` go t1 `mappend` go t2 go (ForAllTy (Bndr tv _) ty) = ((`delVarSet` tv) <$> go ty) `mappend` (invisible (tyCoVarsOfType $ varType tv)) go (LitTy {}) = mempty go (CastTy ty co) = go ty `mappend` invisible (tyCoVarsOfCo co) go (CoercionTy co) = invisible $ tyCoVarsOfCo co invisible vs = Pair vs emptyVarSet go_tc tc tys = let (invis, vis) = partitionInvisibleTypes tc tys in invisible (tyCoVarsOfTypes invis) `mappend` foldMap go vis visVarsOfTypes :: [Type] -> Pair TyCoVarSet visVarsOfTypes = foldMap visVarsOfType {- ********************************************************************* * * Injective free vars * * ********************************************************************* -} isInjectiveInType :: TyVar -> Type -> Bool -- True <=> tv /definitely/ appears injectively in ty -- A bit more efficient that (tv `elemVarSet` injectiveTyVarsOfType ty) -- Ignore occurrence in coercions, and even in injective positions of -- type families. isInjectiveInType tv ty = go ty where go ty | Just ty' <- rewriterView ty = go ty' go (TyVarTy tv') = tv' == tv go (AppTy f a) = go f || go a go (FunTy _ w ty1 ty2) = go w || go ty1 || go ty2 go (TyConApp tc tys) = go_tc tc tys go (ForAllTy (Bndr tv' _) ty) = go (tyVarKind tv') || (tv /= tv' && go ty) go LitTy{} = False go (CastTy ty _) = go ty go CoercionTy{} = False go_tc tc tys | isTypeFamilyTyCon tc = False | otherwise = any go tys -- | Returns the free variables of a 'Type' that are in injective positions. -- Specifically, it finds the free variables while: -- -- * Expanding type synonyms -- -- * Ignoring the coercion in @(ty |> co)@ -- -- * Ignoring the non-injective fields of a 'TyConApp' -- -- -- For example, if @F@ is a non-injective type family, then: -- -- @ -- injectiveTyVarsOf( Either c (Maybe (a, F b c)) ) = {a,c} -- @ -- -- If @'injectiveVarsOfType' ty = itvs@, then knowing @ty@ fixes @itvs@. -- More formally, if -- @a@ is in @'injectiveVarsOfType' ty@ -- and @S1(ty) ~ S2(ty)@, -- then @S1(a) ~ S2(a)@, -- where @S1@ and @S2@ are arbitrary substitutions. -- -- See @Note [When does a tycon application need an explicit kind signature?]@. injectiveVarsOfType :: Bool -- ^ Should we look under injective type families? -- See Note [Coverage condition for injective type families] -- in "GHC.Tc.Instance.Family". -> Type -> FV injectiveVarsOfType look_under_tfs = go where go ty | Just ty' <- rewriterView ty = go ty' go (TyVarTy v) = unitFV v `unionFV` go (tyVarKind v) go (AppTy f a) = go f `unionFV` go a go (FunTy _ w ty1 ty2) = go w `unionFV` go ty1 `unionFV` go ty2 go (TyConApp tc tys) = go_tc tc tys go (ForAllTy (Bndr tv _) ty) = go (tyVarKind tv) `unionFV` delFV tv (go ty) go LitTy{} = emptyFV go (CastTy ty _) = go ty go CoercionTy{} = emptyFV go_tc tc tys | isTypeFamilyTyCon tc = if | look_under_tfs , Injective flags <- tyConInjectivityInfo tc -> mapUnionFV go $ filterByList (flags ++ repeat True) tys -- Oversaturated arguments to a tycon are -- always injective, hence the repeat True | otherwise -- No injectivity info for this type family -> emptyFV | otherwise -- Data type, injective in all positions = mapUnionFV go tys -- | Returns the free variables of a 'Type' that are in injective positions. -- Specifically, it finds the free variables while: -- -- * Expanding type synonyms -- -- * Ignoring the coercion in @(ty |> co)@ -- -- * Ignoring the non-injective fields of a 'TyConApp' -- -- See @Note [When does a tycon application need an explicit kind signature?]@. injectiveVarsOfTypes :: Bool -- ^ look under injective type families? -- See Note [Coverage condition for injective type families] -- in "GHC.Tc.Instance.Family". -> [Type] -> FV injectiveVarsOfTypes look_under_tfs = mapUnionFV (injectiveVarsOfType look_under_tfs) {- ********************************************************************* * * Invisible vars * * ********************************************************************* -} -- | Returns the set of variables that are used invisibly anywhere within -- the given type. A variable will be included even if it is used both visibly -- and invisibly. An invisible use site includes: -- * In the kind of a variable -- * In the kind of a bound variable in a forall -- * In a coercion -- * In a Specified or Inferred argument to a function -- See Note [VarBndrs, ForAllTyBinders, TyConBinders, and visibility] in "GHC.Core.TyCo.Rep" invisibleVarsOfType :: Type -> FV invisibleVarsOfType = go where go ty | Just ty' <- coreView ty = go ty' go (TyVarTy v) = go (tyVarKind v) go (AppTy f a) = go f `unionFV` go a go (FunTy _ w ty1 ty2) = go w `unionFV` go ty1 `unionFV` go ty2 go (TyConApp tc tys) = tyCoFVsOfTypes invisibles `unionFV` invisibleVarsOfTypes visibles where (invisibles, visibles) = partitionInvisibleTypes tc tys go (ForAllTy tvb ty) = tyCoFVsBndr tvb $ go ty go LitTy{} = emptyFV go (CastTy ty co) = tyCoFVsOfCo co `unionFV` go ty go (CoercionTy co) = tyCoFVsOfCo co -- | Like 'invisibleVarsOfType', but for many types. invisibleVarsOfTypes :: [Type] -> FV invisibleVarsOfTypes = mapUnionFV invisibleVarsOfType {- ********************************************************************* * * Any free vars * * ********************************************************************* -} {-# INLINE afvFolder #-} -- so that specialization to (const True) works afvFolder :: (TyCoVar -> Bool) -> TyCoFolder TyCoVarSet DM.Any -- 'afvFolder' is short for "any-free-var folder", good for checking -- if any free var of a type satisfies a predicate `check_fv` afvFolder check_fv = TyCoFolder { tcf_view = noView -- See Note [Free vars and synonyms] , tcf_tyvar = do_tcv, tcf_covar = do_tcv , tcf_hole = do_hole, tcf_tycobinder = do_bndr } where do_tcv is tv = Any (not (tv `elemVarSet` is) && check_fv tv) do_hole _ _ = Any False -- I'm unsure; probably never happens do_bndr is tv _ = is `extendVarSet` tv anyFreeVarsOfType :: (TyCoVar -> Bool) -> Type -> Bool anyFreeVarsOfType check_fv ty = DM.getAny (f ty) where (f, _, _, _) = foldTyCo (afvFolder check_fv) emptyVarSet anyFreeVarsOfTypes :: (TyCoVar -> Bool) -> [Type] -> Bool anyFreeVarsOfTypes check_fv tys = DM.getAny (f tys) where (_, f, _, _) = foldTyCo (afvFolder check_fv) emptyVarSet anyFreeVarsOfCo :: (TyCoVar -> Bool) -> Coercion -> Bool anyFreeVarsOfCo check_fv co = DM.getAny (f co) where (_, _, f, _) = foldTyCo (afvFolder check_fv) emptyVarSet noFreeVarsOfType :: Type -> Bool noFreeVarsOfType ty = not $ DM.getAny (f ty) where (f, _, _, _) = foldTyCo (afvFolder (const True)) emptyVarSet noFreeVarsOfTypes :: [Type] -> Bool noFreeVarsOfTypes tys = not $ DM.getAny (f tys) where (_, f, _, _) = foldTyCo (afvFolder (const True)) emptyVarSet noFreeVarsOfCo :: Coercion -> Bool noFreeVarsOfCo co = not $ DM.getAny (f co) where (_, _, f, _) = foldTyCo (afvFolder (const True)) emptyVarSet {- ********************************************************************* * * scopedSort * * ********************************************************************* -} {- Note [ScopedSort] ~~~~~~~~~~~~~~~~~~~~ Consider foo :: Proxy a -> Proxy (b :: k) -> Proxy (a :: k2) -> () This function type is implicitly generalised over [a, b, k, k2]. These variables will be Specified; that is, they will be available for visible type application. This is because they are written in the type signature by the user. However, we must ask: what order will they appear in? In cases without dependency, this is easy: we just use the lexical left-to-right ordering of first occurrence. With dependency, we cannot get off the hook so easily. We thus state: * These variables appear in the order as given by ScopedSort, where the input to ScopedSort is the left-to-right order of first occurrence. Note that this applies only to *implicit* quantification, without a `forall`. If the user writes a `forall`, then we just use the order given. ScopedSort is defined thusly (as proposed in #15743): * Work left-to-right through the input list, with a cursor. * If variable v at the cursor is depended on by any earlier variable w, move v immediately before the leftmost such w. INVARIANT: The prefix of variables before the cursor form a valid telescope. Note that ScopedSort makes sense only after type inference is done and all types/kinds are fully settled and zonked. -} -- | Do a topological sort on a list of tyvars, -- so that binders occur before occurrences -- E.g. given [ a::k, k::*, b::k ] -- it'll return a well-scoped list [ k::*, a::k, b::k ] -- -- This is a deterministic sorting operation -- (that is, doesn't depend on Uniques). -- -- It is also meant to be stable: that is, variables should not -- be reordered unnecessarily. This is specified in Note [ScopedSort] -- See also Note [Ordering of implicit variables] in "GHC.Rename.HsType" scopedSort :: [TyCoVar] -> [TyCoVar] scopedSort = go [] [] where go :: [TyCoVar] -- already sorted, in reverse order -> [TyCoVarSet] -- each set contains all the variables which must be placed -- before the tv corresponding to the set; they are accumulations -- of the fvs in the sorted tvs' kinds -- This list is in 1-to-1 correspondence with the sorted tyvars -- INVARIANT: -- all (\tl -> all (`subVarSet` head tl) (tail tl)) (tails fv_list) -- That is, each set in the list is a superset of all later sets. -> [TyCoVar] -- yet to be sorted -> [TyCoVar] go acc _fv_list [] = reverse acc go acc fv_list (tv:tvs) = go acc' fv_list' tvs where (acc', fv_list') = insert tv acc fv_list insert :: TyCoVar -- var to insert -> [TyCoVar] -- sorted list, in reverse order -> [TyCoVarSet] -- list of fvs, as above -> ([TyCoVar], [TyCoVarSet]) -- augmented lists insert tv [] [] = ([tv], [tyCoVarsOfType (tyVarKind tv)]) insert tv (a:as) (fvs:fvss) | tv `elemVarSet` fvs , (as', fvss') <- insert tv as fvss = (a:as', fvs `unionVarSet` fv_tv : fvss') | otherwise = (tv:a:as, fvs `unionVarSet` fv_tv : fvs : fvss) where fv_tv = tyCoVarsOfType (tyVarKind tv) -- lists not in correspondence insert _ _ _ = panic "scopedSort" -- | Get the free vars of a type in scoped order tyCoVarsOfTypeWellScoped :: Type -> [TyVar] tyCoVarsOfTypeWellScoped = scopedSort . tyCoVarsOfTypeList -- | Get the free vars of types in scoped order tyCoVarsOfTypesWellScoped :: [Type] -> [TyVar] tyCoVarsOfTypesWellScoped = scopedSort . tyCoVarsOfTypesList {- ************************************************************************ * * Free type constructors * * ************************************************************************ -} {- Note [tyConsOfType] ~~~~~~~~~~~~~~~~~~~~~~ It is slightly odd to find the TyCons of a type. Especially since, via a type family reduction or axiom, a type that doesn't mention T might start to mention T. This function is used in only three places: * In GHC.Tc.Validity.validDerivPred, when identifying "exotic" predicates. * In GHC.Tc.Errors.Ppr.pprTcSolverReportMsg, when trying to print a helpful error about overlapping instances * In utils/dump-decls/Main.hs, an ill-documented module. None seem critical. Currently tyConsOfType looks inside coercions, but perhaps it doesn't even need to do that. -} -- | All type constructors occurring in the type; looking through type -- synonyms, but not newtypes. -- When it finds a Class, it returns the class TyCon. tyConsOfType :: Type -> UniqSet TyCon tyConsOfType ty = go ty where go :: Type -> UniqSet TyCon -- The UniqSet does duplicate elim go ty | Just ty' <- coreView ty = go ty' go (TyVarTy {}) = emptyUniqSet go (LitTy {}) = emptyUniqSet go (TyConApp tc tys) = go_tc tc `unionUniqSets` tyConsOfTypes tys go (AppTy a b) = go a `unionUniqSets` go b go (FunTy af w a b) = go w `unionUniqSets` go a `unionUniqSets` go b `unionUniqSets` go_tc (funTyFlagTyCon af) go (ForAllTy (Bndr tv _) ty) = go ty `unionUniqSets` go (varType tv) go (CastTy ty co) = go ty `unionUniqSets` go_co co go (CoercionTy co) = go_co co go_co (Refl ty) = go ty go_co (GRefl _ ty mco) = go ty `unionUniqSets` go_mco mco go_co (TyConAppCo _ tc args) = go_tc tc `unionUniqSets` go_cos args go_co (AppCo co arg) = go_co co `unionUniqSets` go_co arg go_co (ForAllCo { fco_kind = kind_co, fco_body = co }) = go_co kind_co `unionUniqSets` go_co co go_co (FunCo { fco_mult = m, fco_arg = a, fco_res = r }) = go_co m `unionUniqSets` go_co a `unionUniqSets` go_co r go_co (AxiomCo ax args) = go_ax ax `unionUniqSets` go_cos args go_co (UnivCo { uco_lty = t1, uco_rty = t2, uco_deps = cos }) = go t1 `unionUniqSets` go t2 `unionUniqSets` go_cos cos go_co (CoVarCo {}) = emptyUniqSet go_co (HoleCo {}) = emptyUniqSet go_co (SymCo co) = go_co co go_co (TransCo co1 co2) = go_co co1 `unionUniqSets` go_co co2 go_co (SelCo _ co) = go_co co go_co (LRCo _ co) = go_co co go_co (InstCo co arg) = go_co co `unionUniqSets` go_co arg go_co (KindCo co) = go_co co go_co (SubCo co) = go_co co go_mco MRefl = emptyUniqSet go_mco (MCo co) = go_co co go_cos cos = foldr (unionUniqSets . go_co) emptyUniqSet cos go_tc tc = unitUniqSet tc go_ax (UnbranchedAxiom ax) = go_tc $ coAxiomTyCon ax go_ax (BranchedAxiom ax _) = go_tc $ coAxiomTyCon ax go_ax (BuiltInFamRew bif) = go_tc $ bifrw_fam_tc bif go_ax (BuiltInFamInj {}) = emptyUniqSet -- A free-floating axiom tyConsOfTypes :: [Type] -> UniqSet TyCon tyConsOfTypes tys = foldr (unionUniqSets . tyConsOfType) emptyUniqSet tys {- ********************************************************************** * * Occurs check expansion %* * %********************************************************************* -} {- Note [Occurs check expansion] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ (occurCheckExpand tv xi) expands synonyms in xi just enough to get rid of occurrences of tv outside type function arguments, if that is possible; otherwise, it returns Nothing. For example, suppose we have type F a b = [a] Then occCheckExpand b (F Int b) = Just [Int] but occCheckExpand a (F a Int) = Nothing We don't promise to do the absolute minimum amount of expanding necessary, but we try not to do expansions we don't need to. We prefer doing inner expansions first. For example, type F a b = (a, Int, a, [a]) type G b = Char We have occCheckExpand b (F (G b)) = Just (F Char) even though we could also expand F to get rid of b. Note [Occurrence checking: look inside kinds] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Suppose we are considering unifying (alpha :: *) ~ Int -> (beta :: alpha -> alpha) This may be an error (what is that alpha doing inside beta's kind?), but we must not make the mistake of actually unifying or we'll build an infinite data structure. So when looking for occurrences of alpha in the rhs, we must look in the kinds of type variables that occur there. occCheckExpand tries to expand type synonyms to remove unnecessary occurrences of a variable, and thereby get past an occurs-check failure. This is good; but we can't do it in the /kind/ of a variable /occurrence/ For example #18451 built an infinite type: type Const a b = a data SameKind :: k -> k -> Type type T (k :: Const Type a) = forall (b :: k). SameKind a b We have b :: k k :: Const Type a a :: k (must be same as b) So if we aren't careful, a's kind mentions a, which is bad. And expanding an /occurrence/ of 'a' doesn't help, because the /binding site/ is the master copy and all the occurrences should match it. Here's a related example: f :: forall a b (c :: Const Type b). Proxy '[a, c] The list means that 'a' gets the same kind as 'c'; but that kind mentions 'b', so the binders are out of order. Bottom line: in occCheckExpand, do not expand inside the kinds of occurrences. See bad_var_occ in occCheckExpand. And see #18451 for more debate. -} occCheckExpand :: [Var] -> Type -> Maybe Type -- See Note [Occurs check expansion] -- We may have needed to do some type synonym unfolding in order to -- get rid of the variable (or forall), so we also return the unfolded -- version of the type, which is guaranteed to be syntactically free -- of the given type variable. If the type is already syntactically -- free of the variable, then the same type is returned. occCheckExpand vs_to_avoid ty | null vs_to_avoid -- Efficient shortcut = Just ty -- Can happen, eg. GHC.Core.Utils.mkSingleAltCase | otherwise = go (mkVarSet vs_to_avoid, emptyVarEnv) ty where go :: (VarSet, VarEnv TyCoVar) -> Type -> Maybe Type -- The VarSet is the set of variables we are trying to avoid -- The VarEnv carries mappings necessary -- because of kind expansion go (as, env) ty@(TyVarTy tv) | Just tv' <- lookupVarEnv env tv = return (mkTyVarTy tv') | bad_var_occ as tv = Nothing | otherwise = return ty go _ ty@(LitTy {}) = return ty go cxt (AppTy ty1 ty2) = do { ty1' <- go cxt ty1 ; ty2' <- go cxt ty2 ; return (AppTy ty1' ty2') } go cxt ty@(FunTy _ w ty1 ty2) = do { w' <- go cxt w ; ty1' <- go cxt ty1 ; ty2' <- go cxt ty2 ; return (ty { ft_mult = w', ft_arg = ty1', ft_res = ty2' }) } go cxt@(as, env) (ForAllTy (Bndr tv vis) body_ty) = do { ki' <- go cxt (varType tv) ; let tv' = setVarType tv ki' env' = extendVarEnv env tv tv' as' = as `delVarSet` tv ; body' <- go (as', env') body_ty ; return (ForAllTy (Bndr tv' vis) body') } -- For a type constructor application, first try expanding away the -- offending variable from the arguments. If that doesn't work, next -- see if the type constructor is a type synonym, and if so, expand -- it and try again. go cxt ty@(TyConApp tc tys) = case mapM (go cxt) tys of Just tys' -> return (TyConApp tc tys') Nothing | Just ty' <- coreView ty -> go cxt ty' | otherwise -> Nothing -- Failing that, try to expand a synonym go cxt (CastTy ty co) = do { ty' <- go cxt ty ; co' <- go_co cxt co ; return (CastTy ty' co') } go cxt (CoercionTy co) = do { co' <- go_co cxt co ; return (CoercionTy co') } ------------------ bad_var_occ :: VarSet -> Var -> Bool -- Works for TyVar and CoVar -- See Note [Occurrence checking: look inside kinds] bad_var_occ vs_to_avoid v = v `elemVarSet` vs_to_avoid || tyCoVarsOfType (varType v) `intersectsVarSet` vs_to_avoid ------------------ go_mco _ MRefl = return MRefl go_mco ctx (MCo co) = MCo <$> go_co ctx co ------------------ go_co cxt (Refl ty) = do { ty' <- go cxt ty ; return (Refl ty') } go_co cxt (GRefl r ty mco) = do { mco' <- go_mco cxt mco ; ty' <- go cxt ty ; return (GRefl r ty' mco') } -- Note: Coercions do not contain type synonyms go_co cxt (TyConAppCo r tc args) = do { args' <- mapM (go_co cxt) args ; return (TyConAppCo r tc args') } go_co cxt (AppCo co arg) = do { co' <- go_co cxt co ; arg' <- go_co cxt arg ; return (AppCo co' arg') } go_co cxt (SymCo co) = do { co' <- go_co cxt co ; return (SymCo co') } go_co cxt (TransCo co1 co2) = do { co1' <- go_co cxt co1 ; co2' <- go_co cxt co2 ; return (TransCo co1' co2') } go_co cxt (SelCo n co) = do { co' <- go_co cxt co ; return (SelCo n co') } go_co cxt (LRCo lr co) = do { co' <- go_co cxt co ; return (LRCo lr co') } go_co cxt (InstCo co arg) = do { co' <- go_co cxt co ; arg' <- go_co cxt arg ; return (InstCo co' arg') } go_co cxt (KindCo co) = do { co' <- go_co cxt co ; return (KindCo co') } go_co cxt (SubCo co) = do { co' <- go_co cxt co ; return (SubCo co') } go_co cxt@(as, env) co@(ForAllCo { fco_tcv = tv, fco_kind = kind_co, fco_body = body_co }) = do { kind_co' <- go_co cxt kind_co ; let tv' = setVarType tv $ coercionLKind kind_co' env' = extendVarEnv env tv tv' as' = as `delVarSet` tv ; body' <- go_co (as', env') body_co ; return (co { fco_tcv = tv', fco_kind = kind_co', fco_body = body' }) } go_co cxt co@(FunCo { fco_mult = w, fco_arg = co1 ,fco_res = co2 }) = do { co1' <- go_co cxt co1 ; co2' <- go_co cxt co2 ; w' <- go_co cxt w ; return (co { fco_mult = w', fco_arg = co1', fco_res = co2' })} go_co (as,env) co@(CoVarCo c) | Just c' <- lookupVarEnv env c = return (CoVarCo c') | bad_var_occ as c = Nothing | otherwise = return co go_co (as,_) co@(HoleCo h) | bad_var_occ as (ch_co_var h) = Nothing | otherwise = return co go_co cxt (AxiomCo ax cs) = do { cs' <- mapM (go_co cxt) cs ; return (AxiomCo ax cs') } go_co cxt co@(UnivCo { uco_lty = ty1, uco_rty = ty2, uco_deps = cos }) = do { ty1' <- go cxt ty1 ; ty2' <- go cxt ty2 ; cos' <- mapM (go_co cxt) cos ; return (co { uco_lty = ty1', uco_rty = ty2', uco_deps = cos' }) } ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/TyCo/FVs.hs-boot0000644000000000000000000000033407346545000021447 0ustar0000000000000000module GHC.Core.TyCo.FVs where import GHC.Prelude ( Bool ) import GHC.Types.Var.Set( TyCoVarSet ) import {-# SOURCE #-} GHC.Core.TyCo.Rep ( Type ) noFreeVarsOfType :: Type -> Bool tyCoVarsOfType :: Type -> TyCoVarSetghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/TyCo/Ppr.hs0000644000000000000000000003037207346545000020556 0ustar0000000000000000{-# LANGUAGE PatternSynonyms #-} -- | Pretty-printing types and coercions. module GHC.Core.TyCo.Ppr ( -- * Precedence PprPrec(..), topPrec, sigPrec, opPrec, funPrec, appPrec, maybeParen, -- * Pretty-printing types pprType, pprParendType, pprTidiedType, pprPrecType, pprPrecTypeX, pprTypeApp, pprTCvBndr, pprTCvBndrs, pprSigmaType, pprTheta, pprParendTheta, pprForAll, pprUserForAll, pprTyVar, pprTyVars, pprThetaArrowTy, pprClassPred, pprKind, pprParendKind, pprTyLit, pprDataCons, pprWithInvisibleBitsWhen, pprWithTYPE, pprSourceTyCon, -- * Pretty-printing coercions pprCo, pprParendCo, debugPprType, ) where import GHC.Prelude import {-# SOURCE #-} GHC.CoreToIface ( toIfaceTypeX, toIfaceTyLit, toIfaceForAllBndrs , toIfaceTyCon, toIfaceTcArgs, toIfaceCoercionX ) import {-# SOURCE #-} GHC.Core.DataCon ( dataConFullSig , dataConUserTyVarBinders, DataCon ) import GHC.Core.Type ( pickyIsLiftedTypeKind, pattern OneTy, pattern ManyTy, splitForAllReqTyBinders, splitForAllInvisTyBinders ) import GHC.Core.TyCon import GHC.Core.TyCo.Rep import GHC.Core.TyCo.Tidy import GHC.Core.TyCo.FVs import GHC.Core.Class import GHC.Types.Var import GHC.Core.Multiplicity( pprArrowWithMultiplicity ) import GHC.Iface.Type import GHC.Types.Var.Set import GHC.Types.Var.Env import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Types.Basic ( PprPrec(..), topPrec, sigPrec, opPrec , funPrec, appPrec, maybeParen ) {- %************************************************************************ %* * Pretty-printing types Defined very early because of debug printing in assertions %* * %************************************************************************ @pprType@ is the standard @Type@ printer; the overloaded @ppr@ function is defined to use this. @pprParendType@ is the same, except it puts parens around the type, except for the atomic cases. @pprParendType@ works just by setting the initial context precedence very high. Note that any function which pretty-prints a @Type@ first converts the @Type@ to an @IfaceType@. See Note [Pretty printing via Iface syntax] in GHC.Types.TyThing.Ppr. See Note [Precedence in types] in GHC.Types.Basic. -} pprType, pprParendType, pprTidiedType :: Type -> SDoc pprType = pprPrecType topPrec pprParendType = pprPrecType appPrec -- already pre-tidied pprTidiedType = pprIfaceType . toIfaceTypeX emptyVarSet pprPrecType :: PprPrec -> Type -> SDoc pprPrecType = pprPrecTypeX emptyTidyEnv pprPrecTypeX :: TidyEnv -> PprPrec -> Type -> SDoc pprPrecTypeX env prec ty = getPprStyle $ \sty -> getPprDebug $ \debug -> if debug -- Use debugPprType when in then debug_ppr_ty prec ty -- when in debug-style else pprPrecIfaceType prec (tidyToIfaceTypeStyX env ty sty) -- NB: debug-style is used for -dppr-debug -- dump-style is used for -ddump-tc-trace etc tidyToIfaceTypeStyX :: TidyEnv -> Type -> PprStyle -> IfaceType tidyToIfaceTypeStyX env ty sty | userStyle sty = tidyToIfaceTypeX env ty | otherwise = toIfaceTypeX (tyCoVarsOfType ty) ty -- in latter case, don't tidy, as we'll be printing uniques. pprTyLit :: TyLit -> SDoc pprTyLit = pprIfaceTyLit . toIfaceTyLit pprKind, pprParendKind :: Kind -> SDoc pprKind = pprType pprParendKind = pprParendType tidyToIfaceType :: Type -> IfaceType tidyToIfaceType = tidyToIfaceTypeX emptyTidyEnv tidyToIfaceTypeX :: TidyEnv -> Type -> IfaceType -- It's vital to tidy before converting to an IfaceType -- or nested binders will become indistinguishable! -- -- Also for the free type variables, tell toIfaceTypeX to -- leave them as IfaceFreeTyVar. This is super-important -- for debug printing. tidyToIfaceTypeX env ty = toIfaceTypeX (mkVarSet free_tcvs) (tidyType env' ty) -- NB: if the type has /already/ been tidied (for example by the typechecker) -- the tidy step here is a no-op. See Note [Tidying is idempotent] -- in GHC.Core.TyCo.Tidy where env' = tidyFreeTyCoVars env free_tcvs free_tcvs = tyCoVarsOfTypeList ty ------------ pprCo, pprParendCo :: Coercion -> SDoc pprCo co = getPprStyle $ \ sty -> pprIfaceCoercion (tidyToIfaceCoSty co sty) pprParendCo co = getPprStyle $ \ sty -> pprParendIfaceCoercion (tidyToIfaceCoSty co sty) tidyToIfaceCoSty :: Coercion -> PprStyle -> IfaceCoercion tidyToIfaceCoSty co sty | userStyle sty = tidyToIfaceCo co | otherwise = toIfaceCoercionX (tyCoVarsOfCo co) co -- in latter case, don't tidy, as we'll be printing uniques. tidyToIfaceCo :: Coercion -> IfaceCoercion -- It's vital to tidy before converting to an IfaceType -- or nested binders will become indistinguishable! -- -- Also for the free type variables, tell toIfaceCoercionX to -- leave them as IfaceFreeCoVar. This is super-important -- for debug printing. tidyToIfaceCo co = toIfaceCoercionX (mkVarSet free_tcvs) (tidyCo env co) where env = tidyFreeTyCoVars emptyTidyEnv free_tcvs free_tcvs = scopedSort $ tyCoVarsOfCoList co ------------ pprClassPred :: Class -> [Type] -> SDoc pprClassPred clas tys = pprTypeApp (classTyCon clas) tys ------------ pprTheta :: ThetaType -> SDoc pprTheta = pprIfaceContext topPrec . map tidyToIfaceType pprParendTheta :: ThetaType -> SDoc pprParendTheta = pprIfaceContext appPrec . map tidyToIfaceType pprThetaArrowTy :: ThetaType -> SDoc pprThetaArrowTy = pprIfaceContextArr . map tidyToIfaceType ------------------ pprSigmaType :: Type -> SDoc pprSigmaType = pprIfaceSigmaType ShowForAllWhen . tidyToIfaceType pprForAll :: [ForAllTyBinder] -> SDoc pprForAll tvs = pprIfaceForAll (toIfaceForAllBndrs tvs) -- | Print a user-level forall; see @Note [When to print foralls]@ in -- "GHC.Iface.Type". pprUserForAll :: [ForAllTyBinder] -> SDoc pprUserForAll = pprUserIfaceForAll . toIfaceForAllBndrs pprTCvBndrs :: [ForAllTyBinder] -> SDoc pprTCvBndrs tvs = sep (map pprTCvBndr tvs) pprTCvBndr :: ForAllTyBinder -> SDoc pprTCvBndr = pprTyVar . binderVar pprTyVars :: [TyVar] -> SDoc pprTyVars tvs = sep (map pprTyVar tvs) pprTyVar :: TyVar -> SDoc -- Print a type variable binder with its kind (but not if *) -- Here we do not go via IfaceType, because the duplication with -- pprIfaceTvBndr is minimal, and the loss of uniques etc in -- debug printing is disastrous pprTyVar tv | pickyIsLiftedTypeKind kind = ppr tv -- See Note [Suppressing * kinds] | otherwise = parens (ppr tv <+> dcolon <+> ppr kind) where kind = tyVarKind tv {- Note [Suppressing * kinds] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Generally we want to print forall a. a->a not forall (a::*). a->a or forall (a::Type). a->a That is, for brevity we suppress a kind ascription of '*' (or Type). But what if the kind is (Const Type x)? type Const p q = p Then (Const Type x) is just a long way of saying Type. But it may be jolly confusing to suppress the 'x'. Suppose we have (polykinds/T18451a) foo :: forall a b (c :: Const Type b). Proxy '[a, c] Then this error message • These kind and type variables: a b (c :: Const Type b) are out of dependency order. Perhaps try this ordering: (b :: k) (a :: Const (*) b) (c :: Const (*) b) would be much less helpful if we suppressed the kind ascription on 'a'. Hence the use of pickyIsLiftedTypeKind. -} ----------------- debugPprType :: Type -> SDoc -- ^ debugPprType is a simple pretty printer that prints a type -- without going through IfaceType. It does not format as prettily -- as the normal route, but it's much more direct, and that can -- be useful for debugging. E.g. with -dppr-debug it prints the -- kind on type-variable /occurrences/ which the normal route -- fundamentally cannot do. debugPprType ty = debug_ppr_ty topPrec ty debug_ppr_ty :: PprPrec -> Type -> SDoc debug_ppr_ty _ (LitTy l) = ppr l debug_ppr_ty _ (TyVarTy tv) = ppr tv -- With -dppr-debug we get (tv :: kind) debug_ppr_ty prec (FunTy { ft_af = af, ft_mult = mult, ft_arg = arg, ft_res = res }) = maybeParen prec funPrec $ sep [debug_ppr_ty funPrec arg, arr <+> debug_ppr_ty prec res] where arr = pprArrowWithMultiplicity af $ case mult of OneTy -> Left True ManyTy -> Left False _ -> Right (debug_ppr_ty appPrec mult) debug_ppr_ty prec (TyConApp tc tys) | null tys = ppr tc | otherwise = maybeParen prec appPrec $ hang (ppr tc) 2 (sep (map (debug_ppr_ty appPrec) tys)) debug_ppr_ty _ (AppTy t1 t2) = hang (debug_ppr_ty appPrec t1) -- Print parens so we see ((a b) c) 2 (debug_ppr_ty appPrec t2) -- so that we can distinguish -- TyConApp from AppTy debug_ppr_ty prec (CastTy ty co) = maybeParen prec topPrec $ hang (debug_ppr_ty topPrec ty) 2 (text "|>" <+> ppr co) debug_ppr_ty _ (CoercionTy co) = parens (text "CO" <+> ppr co) -- Invisible forall: forall {k} (a :: k). t debug_ppr_ty prec t | (bndrs, body) <- splitForAllInvisTyBinders t , not (null bndrs) = maybeParen prec funPrec $ sep [ text "forall" <+> fsep (map ppr_bndr bndrs) <> dot, ppr body ] where -- (ppr tv) will print the binder kind-annotated -- when in debug-style ppr_bndr (Bndr tv InferredSpec) = braces (ppr tv) ppr_bndr (Bndr tv SpecifiedSpec) = ppr tv -- Visible forall: forall x y -> t debug_ppr_ty prec t | (bndrs, body) <- splitForAllReqTyBinders t , not (null bndrs) = maybeParen prec funPrec $ sep [ text "forall" <+> fsep (map ppr_bndr bndrs) <+> arrow, ppr body ] where -- (ppr tv) will print the binder kind-annotated -- when in debug-style ppr_bndr (Bndr tv ()) = ppr tv -- Impossible case: neither visible nor invisible forall. debug_ppr_ty _ ForAllTy{} = panic "debug_ppr_ty: neither splitForAllInvisTyBinders nor splitForAllReqTyBinders returned any binders" {- Note [Infix type variables] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ With TypeOperators you can say f :: (a ~> b) -> b and the (~>) is considered a type variable. However, the type pretty-printer in this module will just see (a ~> b) as App (App (TyVarTy "~>") (TyVarTy "a")) (TyVarTy "b") So it'll print the type in prefix form. To avoid confusion we must remember to parenthesise the operator, thus (~>) a b -> b See #2766. -} pprDataCons :: TyCon -> SDoc pprDataCons = sepWithVBars . fmap pprDataConWithArgs . tyConDataCons where sepWithVBars [] = empty sepWithVBars docs = sep (punctuate (space <> vbar) docs) pprDataConWithArgs :: DataCon -> SDoc pprDataConWithArgs dc = sep [forAllDoc, thetaDoc, ppr dc <+> argsDoc] where (_univ_tvs, _ex_tvs, _eq_spec, theta, arg_tys, _res_ty) = dataConFullSig dc user_bndrs = tyVarSpecToBinders $ dataConUserTyVarBinders dc forAllDoc = pprUserForAll user_bndrs thetaDoc = pprThetaArrowTy theta argsDoc = hsep (fmap pprParendType (map scaledThing arg_tys)) pprTypeApp :: TyCon -> [Type] -> SDoc pprTypeApp tc tys = pprIfaceTypeApp topPrec (toIfaceTyCon tc) (toIfaceTcArgs tc tys) -- TODO: toIfaceTcArgs seems rather wasteful here ------------------ -- | Display all foralls, runtime-reps, and kind information -- when provided 'Bool' argument is 'True'. See GHC.Tc.Errors.Ppr -- Note [Showing invisible bits of types in error messages] pprWithInvisibleBitsWhen :: Bool -> SDoc -> SDoc pprWithInvisibleBitsWhen b = updSDocContext $ \ctx -> if b then ctx { sdocPrintExplicitKinds = True , sdocPrintExplicitRuntimeReps = True } else ctx -- | This variant preserves any use of TYPE in a type, effectively -- locally setting -fprint-explicit-runtime-reps. pprWithTYPE :: Type -> SDoc pprWithTYPE ty = updSDocContext (\ctx -> ctx { sdocPrintExplicitRuntimeReps = True }) $ ppr ty -- | Pretty prints a 'TyCon', using the family instance in case of a -- representation tycon. For example: -- -- > data T [a] = ... -- -- In that case we want to print @T [a]@, where @T@ is the family 'TyCon' pprSourceTyCon :: TyCon -> SDoc pprSourceTyCon tycon | Just (fam_tc, tys) <- tyConFamInst_maybe tycon = ppr $ fam_tc `TyConApp` tys -- can't be FunTyCon | otherwise = ppr tycon ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/TyCo/Ppr.hs-boot0000644000000000000000000000052507346545000021514 0ustar0000000000000000module GHC.Core.TyCo.Ppr where import {-# SOURCE #-} GHC.Types.Var ( TyVar ) import {-# SOURCE #-} GHC.Core.TyCo.Rep (Type, Kind, Coercion, TyLit) import GHC.Utils.Outputable ( SDoc ) pprType :: Type -> SDoc debugPprType :: Type -> SDoc pprKind :: Kind -> SDoc pprCo :: Coercion -> SDoc pprTyLit :: TyLit -> SDoc pprTyVar :: TyVar -> SDoc ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/TyCo/Rep.hs0000644000000000000000000023721707346545000020552 0ustar0000000000000000{-# LANGUAGE DeriveDataTypeable #-} {-# OPTIONS_HADDOCK not-home #-} {- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1998 \section[GHC.Core.TyCo.Rep]{Type and Coercion - friends' interface} Note [The Type-related module hierarchy] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ GHC.Core.Class GHC.Core.Coercion.Axiom GHC.Core.TyCon imports GHC.Core.{Class, Coercion.Axiom} GHC.Core.TyCo.Rep imports GHC.Core.{Class, Coercion.Axiom, TyCon} GHC.Core.TyCo.Ppr imports GHC.Core.TyCo.Rep GHC.Core.TyCo.FVs imports GHC.Core.TyCo.Rep GHC.Core.TyCo.Subst imports GHC.Core.TyCo.{Rep, FVs, Ppr} GHC.Core.TyCo.Tidy imports GHC.Core.TyCo.{Rep, FVs} GHC.Builtin.Types.Prim imports GHC.Core.TyCo.Rep ( including mkTyConTy ) GHC.Core.Coercion imports GHC.Core.Type -} -- We expose the relevant stuff from this module via the Type module module GHC.Core.TyCo.Rep ( -- * Types Type(..), TyLit(..), KindOrType, Kind, RuntimeRepType, LevityType, KnotTied, PredType, ThetaType, FRRType, -- Synonyms ForAllTyFlag(..), FunTyFlag(..), -- * Coercions Coercion(..), CoSel(..), FunSel(..), UnivCoProvenance(..), CoercionHole(..), coHoleCoVar, setCoHoleCoVar, isHeteroKindCoHole, CoercionN, CoercionR, CoercionP, KindCoercion, MCoercion(..), MCoercionR, MCoercionN, -- * Functions over types mkNakedTyConTy, mkTyVarTy, mkTyVarTys, mkTyCoVarTy, mkTyCoVarTys, mkFunTy, mkNakedFunTy, mkVisFunTy, mkScaledFunTys, mkInvisFunTy, mkInvisFunTys, tcMkVisFunTy, tcMkInvisFunTy, tcMkScaledFunTy, tcMkScaledFunTys, mkForAllTy, mkForAllTys, mkInvisForAllTys, mkPiTy, mkPiTys, mkVisFunTyMany, mkVisFunTysMany, nonDetCmpTyLit, cmpTyLit, -- * Functions over coercions pickLR, -- ** Analyzing types TyCoFolder(..), foldTyCo, noView, -- * Sizes typeSize, typesSize, coercionSize, -- * Multiplicities Scaled(..), scaledMult, scaledThing, mapScaledType, Mult ) where import GHC.Prelude import {-# SOURCE #-} GHC.Core.TyCo.Ppr ( pprType, pprCo, pprTyLit ) import {-# SOURCE #-} GHC.Builtin.Types import {-# SOURCE #-} GHC.Core.TyCo.FVs( tyCoVarsOfType ) -- Use in assertions import {-# SOURCE #-} GHC.Core.Type( chooseFunTyFlag, typeKind, typeTypeOrConstraint ) -- Transitively pulls in a LOT of stuff, better to break the loop -- friends: import GHC.Types.Var import GHC.Types.Var.Set( elemVarSet ) import GHC.Core.TyCon import GHC.Core.Coercion.Axiom -- others import GHC.Builtin.Names import GHC.Types.Basic ( LeftOrRight(..), pickLR ) import GHC.Utils.Outputable import GHC.Data.FastString import GHC.Utils.Misc import GHC.Utils.Panic import GHC.Utils.Binary -- libraries import qualified Data.Data as Data hiding ( TyCon ) import Data.IORef ( IORef ) -- for CoercionHole import Control.DeepSeq {- ********************************************************************** * * Type * * ********************************************************************** -} -- | The key representation of types within the compiler type KindOrType = Type -- See Note [Arguments to type constructors] -- | The key type representing kinds in the compiler. type Kind = Type -- | Type synonym used for types of kind RuntimeRep. type RuntimeRepType = Type -- | Type synonym used for types of kind Levity. type LevityType = Type -- A type with a syntactically fixed RuntimeRep, in the sense -- of Note [Fixed RuntimeRep] in GHC.Tc.Utils.Concrete. type FRRType = Type -- If you edit this type, you may need to update the GHC formalism -- See Note [GHC Formalism] in GHC.Core.Lint data Type -- See Note [Non-trivial definitional equality] = TyVarTy Var -- ^ Vanilla type or kind variable (*never* a coercion variable) | AppTy Type Type -- ^ Type application to something other than a 'TyCon'. Parameters: -- -- 1) Function: must /not/ be a 'TyConApp' or 'CastTy', -- must be another 'AppTy', or 'TyVarTy' -- See Note [Respecting definitional equality] \(EQ1) about the -- no 'CastTy' requirement -- -- 2) Argument type | TyConApp TyCon [KindOrType] -- ^ Application of a 'TyCon', including newtypes /and/ synonyms. -- Invariant: saturated applications of 'FunTyCon' must -- use 'FunTy' and saturated synonyms must use their own -- constructors. However, /unsaturated/ 'FunTyCon's -- do appear as 'TyConApp's. -- Parameters: -- -- 1) Type constructor being applied to. -- -- 2) Type arguments. Might not have enough type arguments -- here to saturate the constructor. -- Even type synonyms are not necessarily saturated; -- for example unsaturated type synonyms -- can appear as the right hand side of a type synonym. | ForAllTy -- See Note [ForAllTy] {-# UNPACK #-} !ForAllTyBinder Type -- ^ A Π type. -- See Note [Why ForAllTy can quantify over a coercion variable] -- INVARIANT: If the binder is a coercion variable, it must -- be mentioned in the Type. -- See Note [Unused coercion variable in ForAllTy] | FunTy -- ^ FUN m t1 t2 Very common, so an important special case -- See Note [Function types] { ft_af :: FunTyFlag -- Is this (->/FUN) or (=>) or (==>)? -- This info is fully specified by the kinds in -- ft_arg and ft_res -- Note [FunTyFlag] in GHC.Types.Var , ft_mult :: Mult -- Multiplicity; always Many for (=>) and (==>) , ft_arg :: Type -- Argument type , ft_res :: Type } -- Result type | LitTy TyLit -- ^ Type literals are similar to type constructors. | CastTy Type KindCoercion -- ^ A kind cast. The coercion is always nominal. -- INVARIANT: The cast is never reflexive \(EQ2) -- INVARIANT: The Type is not a CastTy (use TransCo instead) \(EQ3) -- INVARIANT: The Type is not a ForAllTy over a tyvar \(EQ4) -- See Note [Respecting definitional equality] | CoercionTy Coercion -- ^ Injection of a Coercion into a type -- This should only ever be used in the RHS of an AppTy, -- in the list of a TyConApp, when applying a promoted -- GADT data constructor deriving Data.Data instance Outputable Type where ppr = pprType -- NOTE: Other parts of the code assume that type literals do not contain -- types or type variables. data TyLit = NumTyLit Integer | StrTyLit FastString | CharTyLit Char deriving (Eq, Data.Data) -- Non-determinism arises due to uniqCompareFS nonDetCmpTyLit :: TyLit -> TyLit -> Ordering nonDetCmpTyLit = cmpTyLitWith NonDetFastString -- Slower than nonDetCmpTyLit but deterministic cmpTyLit :: TyLit -> TyLit -> Ordering cmpTyLit = cmpTyLitWith LexicalFastString {-# INLINE cmpTyLitWith #-} cmpTyLitWith :: Ord r => (FastString -> r) -> TyLit -> TyLit -> Ordering cmpTyLitWith _ (NumTyLit x) (NumTyLit y) = compare x y cmpTyLitWith w (StrTyLit x) (StrTyLit y) = compare (w x) (w y) cmpTyLitWith _ (CharTyLit x) (CharTyLit y) = compare x y cmpTyLitWith _ a b = compare (tag a) (tag b) where tag :: TyLit -> Int tag NumTyLit{} = 0 tag StrTyLit{} = 1 tag CharTyLit{} = 2 instance Outputable TyLit where ppr = pprTyLit {- Note [Function types] ~~~~~~~~~~~~~~~~~~~~~~~~ FunTy is the constructor for a function type. Here are the details: * The primitive function type constructor FUN has kind FUN :: forall (m :: Multiplicity) -> forall {r1 :: RuntimeRep} {r2 :: RuntimeRep}. TYPE r1 -> TYPE r2 -> Type mkTyConApp ensures that we convert a saturated application TyConApp FUN [m,r1,r2,t1,t2] into FunTy FTF_T_T m t1 t2 dropping the 'r1' and 'r2' arguments; they are easily recovered from 't1' and 't2'. The FunTyFlag is always FTF_T_T, because we build constraint arrows (=>) with e.g. mkPhiTy and friends, never `mkTyConApp funTyCon args`. * For the time being its RuntimeRep quantifiers are left inferred. This is to allow for it to evolve. * Because the RuntimeRep args came first historically (that is, the arrow type constructor gained these arguments before gaining the Multiplicity argument), we wanted to be able to say type (->) = FUN Many which we do in library module GHC.Types. This means that the Multiplicity argument must precede the RuntimeRep arguments -- and it means changing the name of the primitive constructor from (->) to FUN. * The multiplicity argument is dependent, because Typeable does not support a type such as `Multiplicity -> forall {r1 r2 :: RuntimeRep}. ...`. There is a plan to change the argument order and make the multiplicity argument nondependent in #20164. * Re the ft_af field: see Note [FunTyFlag] in GHC.Types.Var See Note [Types for coercions, predicates, and evidence] This visibility info makes no difference in Core; it matters only when we regard the type as a Haskell source type. Note [Types for coercions, predicates, and evidence] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We treat differently: (a) Predicate types Test: isPredTy Binders: DictIds Kind: Constraint Examples: (Eq a), and (a ~ b) (b) Coercion types are primitive, unboxed equalities Test: isCoVarTy Binders: CoVars (can appear in coercions) Kind: TYPE (TupleRep []) Examples: (t1 ~# t2) or (t1 ~R# t2) (c) Evidence types is the type of evidence manipulated by the type constraint solver. Test: isEvVarType Binders: EvVars Kind: Constraint or TYPE (TupleRep []) Examples: all coercion types and predicate types Coercion types and predicate types are mutually exclusive, but evidence types are a superset of both. When treated as a user type, - Predicates (of kind Constraint) are invisible and are implicitly instantiated - Coercion types, and non-pred evidence types (i.e. not of kind Constraint), are just regular old types, are visible, and are not implicitly instantiated. In a FunTy { ft_af = af } and af = FTF_C_T or FTF_C_C, the argument type is always a Predicate type. Note [Weird typing rule for ForAllTy] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Here are the typing rules for ForAllTy: tyvar : Type inner : TYPE r tyvar does not occur in r ------------------------------------ ForAllTy (Bndr tyvar vis) inner : TYPE r inner : TYPE r ------------------------------------ ForAllTy (Bndr covar vis) inner : Type Note that the kind of the result depends on whether the binder is a tyvar or a covar. The kind of a forall-over-tyvar is the same as the kind of the inner type. This is because quantification over types is erased before runtime. By contrast, the kind of a forall-over-covar is always Type, because a forall-over-covar is compiled into a function taking a 0-bit-wide erased coercion argument. Because the tyvar form above includes r in its result, we must be careful not to let any variables escape -- thus the last premise of the rule above. Note [Arguments to type constructors] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Because of kind polymorphism, in addition to type application we now have kind instantiation. We reuse the same notations to do so. For example: Just (* -> *) Maybe Right * Nat Zero are represented by: TyConApp (PromotedDataCon Just) [* -> *, Maybe] TyConApp (PromotedDataCon Right) [*, Nat, (PromotedDataCon Zero)] Important note: Nat is used as a *kind* and not as a type. This can be confusing, since type-level Nat and kind-level Nat are identical. We use the kind of (PromotedDataCon Right) to know if its arguments are kinds or types. This kind instantiation only happens in TyConApp currently. Note [Non-trivial definitional equality] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Is ((IO |> co1) Int |> co2) equal to (IO Int)? Assume co1 :: (Type->Type) ~ (Type->Wombat) co2 :: Wombat ~ Type Well, yes. The casts are just getting in the way. See also Note [Respecting definitional equality]. So we do this: (EQTYPE) The `eqType` function, which defines Core's type equality relation, - /ignores/ casts, and - /ignores/ coercion arguments - /provided/ two types have the same kind This allows us to be a little sloppier in keeping track of coercions, which is a good thing. It also means that eqType does not depend on eqCoercion, which is also a good thing. Why is this sensible? That is, why is something different than α-equivalence appropriate for the implementation of eqType? Anything smaller than ~ and homogeneous is an appropriate definition for equality. The type safety of FC depends only on ~. Let's say η : τ ~ σ. Any expression of type τ can be transmuted to one of type σ at any point by casting. The same is true of expressions of type σ. So in some sense, τ and σ are interchangeable. But let's be more precise. If we examine the typing rules of FC (say, those in https://richarde.dev/papers/2015/equalities/equalities.pdf) there are several places where the same metavariable is used in two different premises to a rule. (For example, see Ty_App.) There is an implicit equality check here. What definition of equality should we use? By convention, we use α-equivalence. Take any rule with one (or more) of these implicit equality checks. Then there is an admissible rule that uses ~ instead of the implicit check, adding in casts as appropriate. The only problem here is that ~ is heterogeneous. To make the kinds work out in the admissible rule that uses ~, it is necessary to homogenize the coercions. That is, if we have η : (τ : κ1) ~ (σ : κ2), then we don't use η; we use η |> kind η, which is homogeneous. The effect of this all is that eqType, the implementation of the implicit equality check, can use any homogeneous relation that is smaller than ~, as those rules must also be admissible. A more drawn out argument around all of this is presented in Section 7.2 of Richard E's thesis (http://richarde.dev/papers/2016/thesis/eisenberg-thesis.pdf). What would go wrong if we insisted on the casts matching? See the beginning of Section 8 in the unpublished paper above. Theoretically, nothing at all goes wrong. But in practical terms, getting the coercions right proved to be nightmarish. And types would explode: during kind-checking, we often produce reflexive kind coercions. When we try to cast by these, mkCastTy just discards them. But if we used an eqType that distinguished between Int and Int |> <*>, then we couldn't discard -- the output of kind-checking would be enormous, and we would need enormous casts with lots of CoherenceCo's to straighten them out. Would anything go wrong if eqType looked through type families? No, not at all. But that makes eqType rather hard to implement. Thus, the guideline for eqType is that it should be the largest easy-to-implement relation that is still smaller than ~ and homogeneous. The precise choice of relation is somewhat incidental, as long as the smart constructors and destructors in Type respect whatever relation is chosen. Another helpful principle with eqType is this: (EQ) If (t1 `eqType` t2) then I can replace t1 by t2 anywhere. This principle also tells us that eqType must relate only types with the same kinds. Interestingly, it must be the case that the free variables of t1 and t2 might be different, even if t1 `eqType` t2. A simple example of this is if we have both cv1 :: k1 ~ k2 and cv2 :: k1 ~ k2 in the environment. Then t1 = t |> cv1 and t2 = t |> cv2 are eqType; yet cv1 is in the free vars of t1 and cv2 is in the free vars of t2. Unless we choose to implement eqType to be just α-equivalence, this wrinkle around free variables remains. Yet not all is lost: we can say that any two equal types share the same *relevant* free variables. Here, a relevant variable is a shallow free variable (see Note [Shallow and deep free variables] in GHC.Core.TyCo.FVs) that does not appear within a coercion. Note that type variables can appear within coercions (in, say, a Refl node), but that coercion variables cannot appear outside a coercion. We do not (yet) have a function to extract relevant free variables, but it would not be hard to write if the need arises. Note [Respecting definitional equality] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Note [Non-trivial definitional equality] introduces the property (EQ). How is this upheld? Any function that pattern matches on all the constructors will have to consider the possibility of CastTy. Presumably, those functions will handle CastTy appropriately and we'll be OK. More dangerous are the splitXXX functions. Let's focus on splitTyConApp. We don't want it to fail on (T a b c |> co). Happily, if we have (T a b c |> co) `eqType` (T d e f) then co must be reflexive. Why? eqType checks that the kinds are equal, as well as checking that (a `eqType` d), (b `eqType` e), and (c `eqType` f). By the kind check, we know that (T a b c |> co) and (T d e f) have the same kind. So the only way that co could be non-reflexive is for (T a b c) to have a different kind than (T d e f). But because T's kind is closed (all tycon kinds are closed), the only way for this to happen is that one of the arguments has to differ, leading to a contradiction. Thus, co is reflexive. Accordingly, by eliminating reflexive casts, splitTyConApp need not worry about outermost casts to uphold (EQ). Eliminating reflexive casts is done in mkCastTy. This is (EQ2) below. Unfortunately, that's not the end of the story. Consider comparing (T a b c) =? (T a b |> (co -> )) (c |> co) These two types have the same kind (Type), but the left type is a TyConApp while the right type is not. To handle this case, we say that the right-hand type is ill-formed, requiring an AppTy never to have a casted TyConApp on its left. It is easy enough to pull around the coercions to maintain this invariant, as done in Type.mkAppTy. In the example above, trying to form the right-hand type will instead yield (T a b (c |> co |> sym co) |> ). Both the casts there are reflexive and will be dropped. Huzzah. This idea of pulling coercions to the right works for splitAppTy as well. However, there is one hiccup: it's possible that a coercion doesn't relate two Pi-types. For example, if we have @type family Fun a b where Fun a b = a -> b@, then we might have (T :: Fun Type Type) and (T |> axFun) Int. That axFun can't be pulled to the right. But we don't need to pull it: (T |> axFun) Int is not `eqType` to any proper TyConApp -- thus, leaving it where it is doesn't violate our (EQ) property. In order to detect reflexive casts reliably, we must make sure not to have nested casts: we update (t |> co1 |> co2) to (t |> (co1 `TransCo` co2)). This is (EQ3) below. One other troublesome case is ForAllTy. See Note [Weird typing rule for ForAllTy]. The kind of the body is the same as the kind of the ForAllTy. Accordingly, ForAllTy tv (ty |> co) and (ForAllTy tv ty) |> co are `eqType`. But only the first can be split by splitForAllTy. So we forbid the second form, instead pushing the coercion inside to get the first form. This is done in mkCastTy. In sum, in order to uphold (EQ), we need the following invariants: (EQ1) No decomposable CastTy to the left of an AppTy, where a "decomposable cast" is one that relates either a FunTy to a FunTy, or a ForAllTy to a ForAllTy. (EQ2) No reflexive casts in CastTy. (EQ3) No nested CastTys. (EQ4) No CastTy over (ForAllTy (Bndr tyvar vis) body). See Note [Weird typing rule for ForAllTy] These invariants are all documented above, in the declaration for Type. Note [Equality on FunTys] ~~~~~~~~~~~~~~~~~~~~~~~~~ A (FunTy vis mult arg res) is just an abbreviation for a TyConApp funTyCon [mult, arg_rep, res_rep, arg, res] where arg :: TYPE arg_rep res :: TYPE res_rep Note that the vis field of a FunTy appears nowhere in the equivalent TyConApp. In Core, this is OK, because we no longer care about the visibility of the argument in a FunTy (the vis distinguishes between arg -> res and arg => res). In the type-checker, we are careful not to decompose FunTys with an invisible argument. See also Note [Decomposing fat arrow c=>t] in GHC.Core.Type. In order to compare FunTys while respecting how they could expand into TyConApps, we must check the kinds of the arg and the res. Note [ForAllTy] ~~~~~~~~~~~~~~~ A (ForAllTy (Bndr tcv vis) ty) can quantify over a TyVar or, less commonly, a CoVar. See Note [Why ForAllTy can quantify over a coercion variable] for why we need the latter. (FT1) Invariant: See Note [Weird typing rule for ForAllTy] (FT2) Invariant: in (ForAllTy (Bndr tcv vis) ty), if tcv is a CoVar, then vis = coreTyLamForAllTyFlag. Visibility is not important for coercion abstractions, because they are not user-visible. (FT3) Invariant: see Note [Unused coercion variable in ForAllTy] Note [Why ForAllTy can quantify over a coercion variable] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The ForAllTyBinder in a ForAllTy can be (most often) a TyVar or (rarely) a CoVar. We support quantifying over a CoVar here in order to support a homogeneous (~#) relation (someday -- not yet implemented). Here is the example: type (:~~:) :: forall k1 k2. k1 -> k2 -> Type data a :~~: b where HRefl :: a :~~: a Assuming homogeneous equality (that is, with (~#) :: forall k. k -> k -> TYPE (TupleRep '[]) ) after rejigging to make equalities explicit, we get a constructor that looks like HRefl :: forall k1 k2 (a :: k1) (b :: k2). forall (cv :: k1 ~# k2). (a |> cv) ~# b => (:~~:) k1 k2 a b Note that we must cast `a` by a cv bound in the same type in order to make this work out. See also https://gitlab.haskell.org/ghc/ghc/-/wikis/dependent-haskell/phase2 which gives a general road map that covers this space. Having this feature in Core does *not* mean we have it in source Haskell. See #15710 about that. Note [Unused coercion variable in ForAllTy] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Suppose we have \(co:t1 ~# t2). e What type should we give to the above expression? (1) forall (co:t1 ~# t2) -> t (2) (t1 ~# t2) -> t If co is used in t, (1) should be the right choice. if co is not used in t, we would like to have (1) and (2) equivalent. However, we want to keep eqType simple and don't want eqType (1) (2) to return True in any case. We decide to always construct (2) if co is not used in t. Thus in mkLamType, we check whether the variable is a coercion variable (of type (t1 ~# t2), and whether it is un-used in the body. If so, it returns a FunTy instead of a ForAllTy. There are cases we want to skip the check. For example, the check is unnecessary when it is known from the context that the input variable is a type variable. In those cases, we use mkForAllTy. Note [Weird typing rule for ForAllTy] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Here is the (truncated) typing rule for the dependent ForAllTy: inner : TYPE r tyvar is not free in r ---------------------------------------- ForAllTy (Bndr tyvar vis) inner : TYPE r Note that the kind of `inner` is the kind of the overall ForAllTy. This is necessary because every ForAllTy over a type variable is erased at runtime. Thus the runtime representation of a ForAllTy (as encoded, via TYPE rep, in the kind) must be the same as the representation of the body. We must check for skolem-escape, though. The skolem-escape would prevent a definition like undefined :: forall (r :: RuntimeRep) (a :: TYPE r). a because the type's kind (TYPE r) mentions the out-of-scope r. Luckily, the real type of undefined is undefined :: forall (r :: RuntimeRep) (a :: TYPE r). HasCallStack => a and that HasCallStack constraint neatly sidesteps the potential skolem-escape problem. If the bound variable is a coercion variable: inner : TYPE r covar is free in inner ------------------------------------ ForAllTy (Bndr covar vis) inner : Type Here, the kind of the ForAllTy is just Type, because coercion abstractions are *not* erased. The "covar is free in inner" premise is solely to maintain the representation invariant documented in Note [Unused coercion variable in ForAllTy]. Though there is surface similarity between this free-var check and the one in the tyvar rule, these two restrictions are truly unrelated. -} -- | A type labeled 'KnotTied' might have knot-tied tycons in it. See -- Note [Type checking recursive type and class declarations] in -- "GHC.Tc.TyCl" type KnotTied ty = ty {- ********************************************************************** * * PredType * * ********************************************************************** -} -- | A type of the form @p@ of constraint kind represents a value whose type is -- the Haskell predicate @p@, where a predicate is what occurs before -- the @=>@ in a Haskell type. -- -- We use 'PredType' as documentation to mark those types that we guarantee to -- have this kind. -- -- It can be expanded into its representation, but: -- -- * The type checker must treat it as opaque -- -- * The rest of the compiler treats it as transparent -- -- Consider these examples: -- -- > f :: (Eq a) => a -> Int -- > g :: (?x :: Int -> Int) => a -> Int -- > h :: (r\l) => {r} => {l::Int | r} -- -- Here the @Eq a@ and @?x :: Int -> Int@ and @r\l@ are all called \"predicates\" type PredType = Type -- | A collection of 'PredType's type ThetaType = [PredType] {- (We don't support TREX records yet, but the setup is designed to expand to allow them.) A Haskell qualified type, such as that for f,g,h above, is represented using * a FunTy for the double arrow * with a type of kind Constraint as the function argument The predicate really does turn into a real extra argument to the function. If the argument has type (p :: Constraint) then the predicate p is represented by evidence of type p. %************************************************************************ %* * Simple constructors %* * %************************************************************************ These functions are here so that they can be used by GHC.Builtin.Types.Prim, which in turn is imported by Type -} mkTyVarTy :: TyVar -> Type mkTyVarTy v = assertPpr (isTyVar v) (ppr v <+> dcolon <+> ppr (tyVarKind v)) $ TyVarTy v mkTyVarTys :: [TyVar] -> [Type] mkTyVarTys = map mkTyVarTy -- a common use of mkTyVarTy mkTyCoVarTy :: TyCoVar -> Type mkTyCoVarTy v | isTyVar v = TyVarTy v | otherwise = CoercionTy (CoVarCo v) mkTyCoVarTys :: [TyCoVar] -> [Type] mkTyCoVarTys = map mkTyCoVarTy infixr 3 `mkFunTy`, `mkInvisFunTy`, `mkVisFunTyMany` mkNakedFunTy :: FunTyFlag -> Kind -> Kind -> Kind -- See Note [Naked FunTy] in GHC.Builtin.Types -- Always Many multiplicity; kinds have no linearity mkNakedFunTy af arg res = FunTy { ft_af = af, ft_mult = manyDataConTy , ft_arg = arg, ft_res = res } mkFunTy :: HasDebugCallStack => FunTyFlag -> Mult -> Type -> Type -> Type mkFunTy af mult arg res = assertPpr (af == chooseFunTyFlag arg res) (vcat [ text "af" <+> ppr af , text "chooseAAF" <+> ppr (chooseFunTyFlag arg res) , text "arg" <+> ppr arg <+> dcolon <+> ppr (typeKind arg) , text "res" <+> ppr res <+> dcolon <+> ppr (typeKind res) ]) $ FunTy { ft_af = af , ft_mult = mult , ft_arg = arg , ft_res = res } mkInvisFunTy :: HasDebugCallStack => Type -> Type -> Type mkInvisFunTy arg res = mkFunTy (invisArg (typeTypeOrConstraint res)) manyDataConTy arg res mkInvisFunTys :: HasDebugCallStack => [Type] -> Type -> Type mkInvisFunTys args res = foldr (mkFunTy af manyDataConTy) res args where af = invisArg (typeTypeOrConstraint res) mkVisFunTy :: HasDebugCallStack => Mult -> Type -> Type -> Type -- Always TypeLike, user-specified multiplicity. mkVisFunTy = mkFunTy visArgTypeLike -- | Make nested arrow types -- | Special, common, case: Arrow type with mult Many mkVisFunTyMany :: HasDebugCallStack => Type -> Type -> Type -- Always TypeLike, multiplicity Many mkVisFunTyMany = mkVisFunTy manyDataConTy mkVisFunTysMany :: [Type] -> Type -> Type -- Always TypeLike, multiplicity Many mkVisFunTysMany tys ty = foldr mkVisFunTyMany ty tys --------------- mkScaledFunTy :: HasDebugCallStack => FunTyFlag -> Scaled Type -> Type -> Type mkScaledFunTy af (Scaled mult arg) res = mkFunTy af mult arg res mkScaledFunTys :: HasDebugCallStack => [Scaled Type] -> Type -> Type -- All visible args -- Result type can be TypeLike or ConstraintLike -- Example of the latter: dataConWrapperType for the data con of a class mkScaledFunTys tys ty = foldr (mkScaledFunTy af) ty tys where af = visArg (typeTypeOrConstraint ty) --------------- -- | Like 'mkTyCoForAllTy', but does not check the occurrence of the binder -- See Note [Unused coercion variable in ForAllTy] mkForAllTy :: ForAllTyBinder -> Type -> Type mkForAllTy bndr body = assertPpr (good_bndr bndr) (ppr bndr <+> ppr body) $ ForAllTy bndr body where -- Check ForAllTy invariants good_bndr (Bndr cv vis) | isCoVar cv = vis == coreTyLamForAllTyFlag -- See (FT2) in Note [ForAllTy] && (cv `elemVarSet` tyCoVarsOfType body) -- See (FT3) in Note [ForAllTy] | otherwise = True -- | Wraps foralls over the type using the provided 'TyCoVar's from left to right mkForAllTys :: [ForAllTyBinder] -> Type -> Type mkForAllTys tyvars ty = foldr ForAllTy ty tyvars -- | Wraps foralls over the type using the provided 'InvisTVBinder's from left to right mkInvisForAllTys :: [InvisTVBinder] -> Type -> Type mkInvisForAllTys tyvars = mkForAllTys (tyVarSpecToBinders tyvars) mkPiTy :: HasDebugCallStack => PiTyBinder -> Type -> Type mkPiTy (Anon ty1 af) ty2 = mkScaledFunTy af ty1 ty2 mkPiTy (Named bndr) ty = mkForAllTy bndr ty mkPiTys :: HasDebugCallStack => [PiTyBinder] -> Type -> Type mkPiTys tbs ty = foldr mkPiTy ty tbs -- | 'mkNakedTyConTy' creates a nullary 'TyConApp'. In general you -- should rather use 'GHC.Core.Type.mkTyConTy', which picks the shared -- nullary TyConApp from inside the TyCon (via tyConNullaryTy. But -- we have to build the TyConApp tc [] in that TyCon field; that's -- what 'mkNakedTyConTy' is for. mkNakedTyConTy :: TyCon -> Type mkNakedTyConTy tycon = TyConApp tycon [] tcMkVisFunTy :: Mult -> Type -> Type -> Type -- Always TypeLike result, user-specified multiplicity. -- Does not have the assert-checking in mkFunTy: used by the typechecker -- to avoid looking at the result kind, which may not be zonked tcMkVisFunTy mult arg res = FunTy { ft_af = visArgTypeLike, ft_mult = mult , ft_arg = arg, ft_res = res } tcMkInvisFunTy :: TypeOrConstraint -> Type -> Type -> Type -- Always invisible (constraint) argument, result specified by res_torc -- Does not have the assert-checking in mkFunTy: used by the typechecker -- to avoid looking at the result kind, which may not be zonked tcMkInvisFunTy res_torc arg res = FunTy { ft_af = invisArg res_torc, ft_mult = manyDataConTy , ft_arg = arg, ft_res = res } tcMkScaledFunTys :: [Scaled Type] -> Type -> Type -- All visible args -- Result type must be TypeLike -- No mkFunTy assert checking; result kind may not be zonked tcMkScaledFunTys tys ty = foldr tcMkScaledFunTy ty tys tcMkScaledFunTy :: Scaled Type -> Type -> Type tcMkScaledFunTy (Scaled mult arg) res = tcMkVisFunTy mult arg res {- %************************************************************************ %* * Coercions %* * %************************************************************************ -} -- | A 'Coercion' is concrete evidence of the equality/convertibility -- of two types. -- If you edit this type, you may need to update the GHC formalism -- See Note [GHC Formalism] in GHC.Core.Lint data Coercion -- Each constructor has a "role signature", indicating the way roles are -- propagated through coercions. -- - P, N, and R stand for coercions of the given role -- - e stands for a coercion of a specific unknown role -- (think "role polymorphism") -- - "e" stands for an explicit role parameter indicating role e. -- - _ stands for a parameter that is not a Role or Coercion. -- These ones mirror the shape of types = -- Refl :: _ -> N -- A special case reflexivity for a very common case: Nominal reflexivity -- If you need Representational, use (GRefl Representational ty MRefl) -- not (SubCo (Refl ty)) Refl Type -- See Note [Refl invariant] -- GRefl :: "e" -> _ -> Maybe N -> e -- See Note [Generalized reflexive coercion] | GRefl Role Type MCoercionN -- See Note [Refl invariant] -- Use (Refl ty), not (GRefl Nominal ty MRefl) -- Use (GRefl Representational _ _), not (SubCo (GRefl Nominal _ _)) -- These ones simply lift the correspondingly-named -- Type constructors into Coercions -- TyConAppCo :: "e" -> _ -> ?? -> e -- See Note [TyConAppCo roles] | TyConAppCo Role TyCon [Coercion] -- lift TyConApp -- The TyCon is never a synonym; -- we expand synonyms eagerly -- But it can be a type function -- TyCon is never a saturated (->); use FunCo instead | AppCo Coercion CoercionN -- lift AppTy -- AppCo :: e -> N -> e -- See Note [ForAllCo] | ForAllCo { fco_tcv :: TyCoVar , fco_visL :: !ForAllTyFlag -- Visibility of coercionLKind , fco_visR :: !ForAllTyFlag -- Visibility of coercionRKind -- See (FC7) of Note [ForAllCo] , fco_kind :: KindCoercion , fco_body :: Coercion } -- ForAllCo :: _ -> N -> e -> e | FunCo -- FunCo :: "e" -> N/P -> e -> e -> e -- See Note [FunCo] for fco_afl, fco_afr { fco_role :: Role , fco_afl :: FunTyFlag -- Arrow for coercionLKind , fco_afr :: FunTyFlag -- Arrow for coercionRKind , fco_mult :: CoercionN , fco_arg, fco_res :: Coercion } -- (if the role "e" is Phantom, the first coercion is, too) -- the first coercion is for the multiplicity -- These are special | CoVarCo CoVar -- :: _ -> (N or R) -- result role depends on the tycon of the variable's type | AxiomCo CoAxiomRule [Coercion] -- The coercion arguments always *precisely* saturate -- arity of (that branch of) the CoAxiom. If there are -- any left over, we use AppCo. -- See [Coercion axioms applied to coercions] -- The roles of the argument coercions are determined -- by the cab_roles field of the relevant branch of the CoAxiom | UnivCo -- See Note [UnivCo] -- Of kind (lty ~role rty) { uco_prov :: UnivCoProvenance , uco_role :: Role , uco_lty, uco_rty :: Type , uco_deps :: [Coercion] -- Coercions on which it depends -- See Note [The importance of tracking UnivCo dependencies] } | SymCo Coercion -- :: e -> e | TransCo Coercion Coercion -- :: e -> e -> e | SelCo CoSel Coercion -- See Note [SelCo] | LRCo LeftOrRight CoercionN -- Decomposes (t_left t_right) -- :: _ -> N -> N | InstCo Coercion CoercionN -- :: e -> N -> e -- See Note [InstCo roles] -- Extract a kind coercion from a (heterogeneous) type coercion -- NB: all kind coercions are Nominal | KindCo Coercion -- :: e -> N | SubCo CoercionN -- Turns a ~N into a ~R -- :: N -> R | HoleCo CoercionHole -- ^ See Note [Coercion holes] -- Only present during typechecking deriving Data.Data data CoSel -- See Note [SelCo] = SelTyCon Int Role -- Decomposes (T co1 ... con); zero-indexed -- Invariant: Given: SelCo (SelTyCon i r) co -- we have r == tyConRole (coercionRole co) tc -- and tc1 == tc2 -- where T tc1 _ = coercionLKind co -- T tc2 _ = coercionRKind co -- See Note [SelCo] | SelFun FunSel -- Decomposes (co1 -> co2) | SelForAll -- Decomposes (forall a. co) deriving( Eq, Data.Data, Ord ) data FunSel -- See Note [SelCo] = SelMult -- Multiplicity | SelArg -- Argument of function | SelRes -- Result of function deriving( Eq, Data.Data, Ord ) type CoercionN = Coercion -- always nominal type CoercionR = Coercion -- always representational type CoercionP = Coercion -- always phantom type KindCoercion = CoercionN -- always nominal instance Outputable Coercion where ppr = pprCo instance Outputable CoSel where ppr (SelTyCon n r) = text "Tc" <> parens (int n <> comma <> pprOneCharRole r) ppr SelForAll = text "All" ppr (SelFun fs) = text "Fun" <> parens (ppr fs) pprOneCharRole :: Role -> SDoc pprOneCharRole Nominal = char 'N' pprOneCharRole Representational = char 'R' pprOneCharRole Phantom = char 'P' instance Outputable FunSel where ppr SelMult = text "mult" ppr SelArg = text "arg" ppr SelRes = text "res" instance Binary CoSel where put_ bh (SelTyCon n r) = do { putByte bh 0; put_ bh n; put_ bh r } put_ bh SelForAll = putByte bh 1 put_ bh (SelFun SelMult) = putByte bh 2 put_ bh (SelFun SelArg) = putByte bh 3 put_ bh (SelFun SelRes) = putByte bh 4 get bh = do { h <- getByte bh ; case h of 0 -> do { n <- get bh; r <- get bh; return (SelTyCon n r) } 1 -> return SelForAll 2 -> return (SelFun SelMult) 3 -> return (SelFun SelArg) _ -> return (SelFun SelRes) } instance NFData CoSel where rnf (SelTyCon n r) = n `seq` r `seq` () rnf SelForAll = () rnf (SelFun fs) = fs `seq` () -- | A semantically more meaningful type to represent what may or may not be a -- useful 'Coercion'. data MCoercion = MRefl -- A trivial Reflexivity coercion | MCo Coercion -- Other coercions deriving Data.Data type MCoercionR = MCoercion type MCoercionN = MCoercion instance Outputable MCoercion where ppr MRefl = text "MRefl" ppr (MCo co) = text "MCo" <+> ppr co {- Note [Refl invariant] ~~~~~~~~~~~~~~~~~~~~~~~~ Invariant 1: Refl lifting Refl (similar for GRefl r ty MRefl) is always lifted as far as possible. For example (Refl T) (Refl a) (Refl b) is normalised (by mkAppCo) to (Refl (T a b)). You might think that a consequences is: Every identity coercion has Refl at the root But that's not quite true because of coercion variables. Consider g where g :: Int~Int Left h where h :: Maybe Int ~ Maybe Int etc. So the consequence is only true of coercions that have no coercion variables. Invariant 2: TyConAppCo An application of (Refl T) to some coercions, at least one of which is NOT the identity, is normalised to TyConAppCo. (They may not be fully saturated however.) TyConAppCo coercions (like all coercions other than Refl) are NEVER the identity. Note [Generalized reflexive coercion] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ GRefl is a generalized reflexive coercion (see #15192). It wraps a kind coercion, which might be reflexive (MRefl) or any coercion (MCo co). The typing rules for GRefl: ty : k1 ------------------------------------ GRefl r ty MRefl: ty ~r ty ty : k1 co :: k1 ~ k2 ------------------------------------ GRefl r ty (MCo co) : ty ~r ty |> co Consider we have g1 :: s ~r t s :: k1 g2 :: k1 ~ k2 and we want to construct a coercions co which has type (s |> g2) ~r t We can define co = Sym (GRefl r s g2) ; g1 It is easy to see that Refl == GRefl Nominal ty MRefl :: ty ~n ty A nominal reflexive coercion is quite common, so we keep the special form Refl to save allocation. Note [SelCo] ~~~~~~~~~~~~ The Coercion form SelCo allows us to decompose a structural coercion, one between ForallTys, or TyConApps, or FunTys. There are three forms, split by the CoSel field inside the SelCo: SelTyCon, SelForAll, and SelFun. * SelTyCon: co : (T s1..sn) ~r0 (T t1..tn) T is a data type, not a newtype, nor an arrow type r = tyConRole tc r0 i i < n (i is zero-indexed) ---------------------------------- SelCo (SelTyCon i r) co : si ~r ti "Not a newtype": see Note [SelCo and newtypes] "Not an arrow type": see SelFun below See Note [SelCo Cached Roles] * SelForAll: co : forall (a:k1).t1 ~r0 forall (a:k2).t2 ---------------------------------- SelCo SelForAll co : k1 ~N k2 NB: SelForAll always gives a Nominal coercion. * The SelFun form, for functions, has three sub-forms for the three components of the function type (multiplicity, argument, result). co : (s1 %{m1}-> t1) ~r0 (s2 %{m2}-> t2) r = funRole r0 SelMult ---------------------------------- SelCo (SelFun SelMult) co : m1 ~r m2 co : (s1 %{m1}-> t1) ~r0 (s2 %{m2}-> t2) r = funRole r0 SelArg ---------------------------------- SelCo (SelFun SelArg) co : s1 ~r s2 co : (s1 %{m1}-> t1) ~r0 (s2 %{m2}-> t2) r = funRole r0 SelRes ---------------------------------- SelCo (SelFun SelRes) co : t1 ~r t2 Note [FunCo] ~~~~~~~~~~~~ Just as FunTy has a ft_af :: FunTyFlag field, FunCo (which connects two function types) has two FunTyFlag fields: funco_afl, funco_afr :: FunTyFlag In all cases, the FunTyFlag is recoverable from the kinds of the argument and result types/coercions; but experiments show that it's better to cache it. Why does FunCo need /two/ flags? If we have a single method class, implemented as a newtype class C a where { op :: [a] -> a } then we can have a coercion co :: C Int ~R ([Int]->Int) So now we can define FunCo co : (C Int => Bool) ~R (([Int]->Int) -> Bool) Notice that the left and right arrows are different! Hence two flags, one for coercionLKind and one for coercionRKind. Note [Coercion axioms applied to coercions] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The reason coercion axioms can be applied to coercions and not just types is to allow for better optimization. There are some cases where we need to be able to "push transitivity inside" an axiom in order to expose further opportunities for optimization. For example, suppose we have C a : t[a] ~ F a g : b ~ c and we want to optimize sym (C b) ; t[g] ; C c which has the kind F b ~ F c (stopping through t[b] and t[c] along the way). We'd like to optimize this to just F g -- but how? The key is that we need to allow axioms to be instantiated by *coercions*, not just by types. Then we can (in certain cases) push transitivity inside the axiom instantiations, and then react opposite-polarity instantiations of the same axiom. In this case, e.g., we match t[g] against the LHS of (C c)'s kind, to obtain the substitution a |-> g (note this operation is sort of the dual of lifting!) and hence end up with C g : t[b] ~ F c which indeed has the same kind as t[g] ; C c. Now we have sym (C b) ; C g which can be optimized to F g. Note [Required foralls in Core] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider the CoreExpr (Lam a e) where `a` is a TyVar, and (e::e_ty). It has type forall a. e_ty Note the Specified visibility of (forall a. e_ty); the Core type just isn't able to express more than one visiblity, and we pick `Specified`. See `exprType` and `mkLamType` in GHC.Core.Utils, and `GHC.Type.Var.coreLamForAllTyFlag`. So how can we ever get a term of type (forall a -> e_ty)? Answer: /only/ via a cast built with ForAllCo. See `GHC.Tc.Types.Evidence.mkWpForAllCast`. This does not seem very satisfying, but it does the job. An alternative would be to put a visibility flag into `Lam` (a huge change), or into a `TyVar` (a more plausible change), but we leave that for the future. See also Note [ForAllTy and type equality] in GHC.Core.TyCo.Compare. Note [ForAllCo] ~~~~~~~~~~~~~~~ See also Note [ForAllTy and type equality] in GHC.Core.TyCo.Compare. Constructing coercions between forall-types can be a bit tricky, because the kinds of the bound tyvars can be different. The typing rule is: kind_co : k1 ~N k2 tv1:k1 |- co : t1 ~r t2 if r=N, then vis1=vis2 ------------------------------------ ForAllCo (tv1:k1) vis1 vis2 kind_co co : forall (tv1:k1) . t1 ~r forall (tv1:k2) . (t2[tv1 |-> (tv1:k2) |> sym kind_co]) Several things to note here (FC1) First, the TyCoVar stored in a ForAllCo is really just a convenience: this field should be a Name, as its kind is redundant. Thinking of the field as a Name is helpful in understanding what a ForAllCo means. The kind of TyCoVar always matches the left-hand kind of the coercion. * The idea is that kind_co gives the two kinds of the tyvar. See how, in the conclusion, tv1 is assigned kind k1 on the left but kind k2 on the right. * Of course, a type variable can't have different kinds at the same time. So, in `co` itself we use (tv1 : k1); hence the premise tv1:k1 |- co : t1 ~r t2 * The last wrinkle is that we need to fix the kinds in the conclusion. In t2, tv1 is assumed to have kind k1, but it has kind k2 in the conclusion of the rule. So we do a kind-fixing substitution, replacing (tv1:k1) with (tv1:k2) |> sym kind_co. This substitution is slightly bizarre, because it mentions the same name with different kinds, but it *is* well-kinded, noting that `(tv1:k2) |> sym kind_co` has kind k1. We could instead store just a Name in the ForAllCo, and it might even be more efficient to do so. But we can't add Names to, e.g., VarSets, and there generally is just an impedance mismatch in a bunch of places. So we use tv1. When we need tv2, we can use setTyVarKind. (FC2) Note that the kind coercion must be Nominal; and that the role `r` of the final coercion is the same as that of the body coercion. (FC3) A ForAllCo allows casting between visibilities. For example: ForAllCo a Required Specified (SubCo (Refl ty)) : (forall a -> ty) ~R (forall a. ty) But you can only cast between visiblities at Representational role; Hence the premise if r=N, then vis1=vis2 in the typing rule. See also Note [ForAllTy and type equality] in GHC.Core.TyCo.Compare. (FC4) See Note [Required foralls in Core]. (FC5) In a /type/, in (ForAllTy cv ty) where cv is a CoVar, we insist that `cv` must appear free in `ty`; see Note [Unused coercion variable in ForAllTy] in GHC.Core.TyCo.Rep for the motivation. If it does not appear free, use FunTy. However we do /not/ impose the same restriction on ForAllCo in /coercions/. Instead, in coercionLKind and coercionRKind, we use mkTyCoForAllTy to perform the check and construct a FunTy when necessary. Why? * For a coercion, all that matters is its kind, So ForAllCo vs FunCo does not make a difference. * Even if cv occurs in body_co, it is possible that cv does not occur in the kind of body_co. Therefore the check in coercionKind is inevitable. (FC6) Invariant: in a ForAllCo where fco_tcv is a coercion variable, `cv`, we insist that `cv` appears only in positions that are erased. In fact we use a conservative approximation of this: we require that (almostDevoidCoVarOfCo cv fco_body) holds. This function checks that `cv` appers only within the type in a Refl node and under a GRefl node (including in the Coercion stored in a GRefl). It's possible other places are OK, too, but this is a safe approximation. Why all this fuss? See Section 5.8.5.2 of Richard's thesis. The idea is that we cannot prove that the type system is consistent with unrestricted use of this cv; the consistency proof uses an untyped rewrite relation that works over types with all coercions and casts removed. So, we can allow the cv to appear only in positions that are erased. Sadly, with heterogeneous equality, this restriction might be able to be violated; Richard's thesis is unable to prove that it isn't. Specifically, the liftCoSubst function might create an invalid coercion. Because a violation of the restriction might lead to a program that "goes wrong", it is checked all the time, even in a production compiler and without -dcore-lint. We *have* proved that the problem does not occur with homogeneous equality, so this check can be dropped once ~# is made to be homogeneous. (FC7) Invariant: in a ForAllCo, if fco_tcv is a CoVar, then fco_visL = fco_visR = coreTyLamForAllTyFlag c.f. (FT2) in Note [ForAllTy] Note [Predicate coercions] ~~~~~~~~~~~~~~~~~~~~~~~~~~ Suppose we have g :: a~b How can we coerce between types ([c]~a) => [a] -> c and ([c]~b) => [b] -> c where the equality predicate *itself* differs? Answer: we simply treat (~) as an ordinary type constructor, so these types really look like ((~) [c] a) -> [a] -> c ((~) [c] b) -> [b] -> c So the coercion between the two is obviously ((~) [c] g) -> [g] -> c Another way to see this to say that we simply collapse predicates to their representation type (see Type.coreView and Type.predTypeRep). This collapse is done by mkPredCo; there is no PredCo constructor in Coercion. This is important because we need Nth to work on predicates too: SelCo (SelTyCon 1) ((~) [c] g) = g See Simplify.simplCoercionF, which generates such selections. Note [Roles] ~~~~~~~~~~~~ Roles are a solution to the GeneralizedNewtypeDeriving problem, articulated in #1496. The full story is in docs/core-spec/core-spec.pdf. Also, see https://gitlab.haskell.org/ghc/ghc/wikis/roles-implementation Here is one way to phrase the problem: Given: newtype Age = MkAge Int type family F x type instance F Age = Bool type instance F Int = Char This compiles down to: axAge :: Age ~ Int axF1 :: F Age ~ Bool axF2 :: F Int ~ Char Then, we can make: (sym (axF1) ; F axAge ; axF2) :: Bool ~ Char Yikes! The solution is _roles_, as articulated in "Generative Type Abstraction and Type-level Computation" (POPL 2010), available at http://www.seas.upenn.edu/~sweirich/papers/popl163af-weirich.pdf The specification for roles has evolved somewhat since that paper. For the current full details, see the documentation in docs/core-spec. Here are some highlights. We label every equality with a notion of type equivalence, of which there are three options: Nominal, Representational, and Phantom. A ground type is nominally equivalent only with itself. A newtype (which is considered a ground type in Haskell) is representationally equivalent to its representation. Anything is "phantomly" equivalent to anything else. We use "N", "R", and "P" to denote the equivalences. The axioms above would be: axAge :: Age ~R Int axF1 :: F Age ~N Bool axF2 :: F Age ~N Char Then, because transitivity applies only to coercions proving the same notion of equivalence, the above construction is impossible. However, there is still an escape hatch: we know that any two types that are nominally equivalent are representationally equivalent as well. This is what the form SubCo proves -- it "demotes" a nominal equivalence into a representational equivalence. So, it would seem the following is possible: sub (sym axF1) ; F axAge ; sub axF2 :: Bool ~R Char -- WRONG What saves us here is that the arguments to a type function F, lifted into a coercion, *must* prove nominal equivalence. So, (F axAge) is ill-formed, and we are safe. Roles are attached to parameters to TyCons. When lifting a TyCon into a coercion (through TyConAppCo), we need to ensure that the arguments to the TyCon respect their roles. For example: data T a b = MkT a (F b) If we know that a1 ~R a2, then we know (T a1 b) ~R (T a2 b). But, if we know that b1 ~R b2, we know nothing about (T a b1) and (T a b2)! This is because the type function F branches on b's *name*, not representation. So, we say that 'a' has role Representational and 'b' has role Nominal. The third role, Phantom, is for parameters not used in the type's definition. Given the following definition data Q a = MkQ Int the Phantom role allows us to say that (Q Bool) ~R (Q Char), because we can construct the coercion Bool ~P Char (using UnivCo). See the paper cited above for more examples and information. Note [TyConAppCo roles] ~~~~~~~~~~~~~~~~~~~~~~~ The TyConAppCo constructor has a role parameter, indicating the role at which the coercion proves equality. The choice of this parameter affects the required roles of the arguments of the TyConAppCo. To help explain it, assume the following definition: type instance F Int = Bool -- Axiom axF : F Int ~N Bool newtype Age = MkAge Int -- Axiom axAge : Age ~R Int data Foo a = MkFoo a -- Role on Foo's parameter is Representational TyConAppCo Nominal Foo axF : Foo (F Int) ~N Foo Bool For (TyConAppCo Nominal) all arguments must have role Nominal. Why? So that Foo Age ~N Foo Int does *not* hold. TyConAppCo Representational Foo (SubCo axF) : Foo (F Int) ~R Foo Bool TyConAppCo Representational Foo axAge : Foo Age ~R Foo Int For (TyConAppCo Representational), all arguments must have the roles corresponding to the result of tyConRoles on the TyCon. This is the whole point of having roles on the TyCon to begin with. So, we can have Foo Age ~R Foo Int, if Foo's parameter has role R. If a Representational TyConAppCo is over-saturated (which is otherwise fine), the spill-over arguments must all be at Nominal. This corresponds to the behavior for AppCo. TyConAppCo Phantom Foo (UnivCo Phantom Int Bool) : Foo Int ~P Foo Bool All arguments must have role Phantom. This one isn't strictly necessary for soundness, but this choice removes ambiguity. The rules here dictate the roles of the parameters to mkTyConAppCo (should be checked by Lint). Note [SelCo and newtypes] ~~~~~~~~~~~~~~~~~~~~~~~~~ Suppose we have newtype N a = MkN Int type role N representational This yields axiom NTCo:N :: forall a. N a ~R Int We can then build co :: forall a b. N a ~R N b co = NTCo:N a ; sym (NTCo:N b) for any `a` and `b`. Because of the role annotation on N, if we use SelCo, we'll get out a representational coercion. That is: SelCo (SelTyCon 0 r) co :: forall a b. a ~r b Yikes! Clearly, this is terrible. The solution is simple: forbid SelCo to be used on newtypes if the internal coercion is representational. See the SelCo equation for GHC.Core.Lint.lintCoercion. This is not just some corner case discovered by a segfault somewhere; it was discovered in the proof of soundness of roles and described in the "Safe Coercions" paper (ICFP '14). Note [SelCo Cached Roles] ~~~~~~~~~~~~~~~~~~~~~~~~~ Why do we cache the role of SelCo in the SelCo constructor? Because computing role(Nth i co) involves figuring out that co :: T tys1 ~ T tys2 using coercionKind, and finding (coercionRole co), and then looking at the tyConRoles of T. Avoiding bad asymptotic behaviour here means we have to compute the kind and role of a coercion simultaneously, which makes the code complicated and inefficient. This only happens for SelCo. Caching the role solves the problem, and allows coercionKind and coercionRole to be simple. See #11735 Note [InstCo roles] ~~~~~~~~~~~~~~~~~~~ Here is (essentially) the typing rule for InstCo: g :: (forall a. t1) ~r (forall a. t2) w :: s1 ~N s2 ------------------------------- InstCo InstCo g w :: (t1 [a |-> s1]) ~r (t2 [a |-> s2]) Note that the Coercion w *must* be nominal. This is necessary because the variable a might be used in a "nominal position" (that is, a place where role inference would require a nominal role) in t1 or t2. If we allowed w to be representational, we could get bogus equalities. A more nuanced treatment might be able to relax this condition somewhat, by checking if t1 and/or t2 use their bound variables in nominal ways. If not, having w be representational is OK. %************************************************************************ %* * UnivCo %* * %************************************************************************ Note [UnivCo] ~~~~~~~~~~~~~ A UnivCo is a coercion whose proof does not directly express its role and kind (indeed for some UnivCos, like PluginProv, there /is/ no proof). The different kinds of UnivCo are described by UnivCoProvenance. Really each is entirely separate, but they all share the need to represent these fields: UnivCo { uco_prov :: UnivCoProvenance , uco_role :: Role , uco_lty, uco_rty :: Type , uco_deps :: [Coercion] -- Coercions on which it depends Here, * uco_role, uco_lty, uco_rty express the type of the coercion * uco_prov says where it came from * uco_deps specifies the coercions on which this proof (which is not explicity given) depends. See Note [The importance of tracking UnivCo dependencies] -} -- | For simplicity, we have just one UnivCo that represents a coercion from -- some type to some other type, with (in general) no restrictions on the -- type. The UnivCoProvenance specifies more exactly what the coercion really -- is and why a program should (or shouldn't!) trust the coercion. -- It is reasonable to consider each constructor of 'UnivCoProvenance' -- as a totally independent coercion form; their only commonality is -- that they don't tell you what types they coercion between. (That info -- is in the 'UnivCo' constructor of 'Coercion'. data UnivCoProvenance = PhantomProv -- ^ See Note [Phantom coercions]. Only in Phantom -- roled coercions | ProofIrrelProv -- ^ From the fact that any two coercions are -- considered equivalent. See Note [ProofIrrelProv]. -- Can be used in Nominal or Representational coercions | PluginProv String -- ^ From a plugin, which asserts that this coercion is sound. -- The string and the variable set are for the use by the plugin. deriving (Eq, Ord, Data.Data) -- Why Ord? See Note [Ord instance of IfaceType] in GHC.Iface.Type instance Outputable UnivCoProvenance where ppr PhantomProv = text "(phantom)" ppr ProofIrrelProv = text "(proof irrel)" ppr (PluginProv str) = parens (text "plugin" <+> brackets (text str)) instance NFData UnivCoProvenance where rnf p = p `seq` () instance Binary UnivCoProvenance where put_ bh PhantomProv = putByte bh 1 put_ bh ProofIrrelProv = putByte bh 2 put_ bh (PluginProv a) = putByte bh 3 >> put_ bh a get bh = do tag <- getByte bh case tag of 1 -> return PhantomProv 2 -> return ProofIrrelProv 3 -> do a <- get bh return $ PluginProv a _ -> panic ("get UnivCoProvenance " ++ show tag) {- Note [Phantom coercions] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider data T a = T1 | T2 Then we have T s ~R T t for any old s,t. The witness for this is (TyConAppCo T Rep co), where (co :: s ~P t) is a phantom coercion built with PhantomProv. The role of the UnivCo is always Phantom. The Coercion stored is the (nominal) kind coercion between the types kind(s) ~N kind (t) Note [ProofIrrelProv] ~~~~~~~~~~~~~~~~~~~~~ A ProofIrrelProv is a coercion between coercions. For example: data G a where MkG :: G Bool In core, we get G :: * -> * MkG :: forall (a :: *). (a ~# Bool) -> G a Now, consider 'MkG -- that is, MkG used in a type -- and suppose we want a proof that ('MkG a1 co1) ~ ('MkG a2 co2). This will have to be TyConAppCo Nominal MkG [co3, co4] where co3 :: co1 ~ co2 co4 :: a1 ~ a2 Note that co1 :: a1 ~ Bool co2 :: a2 ~ Bool Here, co3 = UnivCo ProofIrrelProv Nominal (CoercionTy co1) (CoercionTy co2) [co5] where co5 :: (a1 ~# Bool) ~# (a2 ~# Bool) co5 = TyConAppCo Nominal (~#) [, , co4, ] Note [The importance of tracking UnivCo dependencies] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ It is vital that `UnivCo` (a coercion that lacks a proper proof) tracks the coercions on which it depends. To see why, consider this program: type S :: Nat -> Nat data T (a::Nat) where T1 :: T 0 T2 :: ... f :: T a -> S (a+1) -> S 1 f = /\a (x:T a) (y:a). case x of T1 (gco : a ~# 0) -> y |> wco For this to typecheck we need `wco :: S (a+1) ~# S 1`, given that `gco : a ~# 0`. To prove that we need to know that `a+1 = 1` if `a=0`, which a plugin might know. So it solves `wco` by providing a `UnivCo (PluginProv "my-plugin") (a+1) 1 [gco]`. But the `uco_deps` in `PluginProv` must mention `gco`! Why? Otherwise we might float the entire expression (y |> wco) out of the the case alternative for `T1` which brings `gco` into scope. If this happens then we aren't far from a segmentation fault or much worse. See #23923 for a real-world example of this happening. So it is /crucial/ for the `UnivCo` to mention, in `uco_deps`, the coercion variables used by the plugin to justify the `UnivCo` that it builds. You should think of it like `TyConAppCo`: the `UnivCo` proof constructor is applied to a list of coercions, just as `TyConAppCo` is It's very convenient to record a full coercion, not just a set of free coercion variables, because during typechecking those coercions might contain coercion holes `HoleCo`, which get filled in later. -} {- ********************************************************************** %* * Coercion holes %* * %********************************************************************* -} -- | A coercion to be filled in by the type-checker. See Note [Coercion holes] data CoercionHole = CoercionHole { ch_co_var :: CoVar -- See Note [CoercionHoles and coercion free variables] , ch_ref :: IORef (Maybe Coercion) , ch_hetero_kind :: Bool -- True <=> arises from a kind-level equality -- See Note [Equalities with incompatible kinds] -- in GHC.Tc.Solver.Equality, wrinkle (EIK2) } coHoleCoVar :: CoercionHole -> CoVar coHoleCoVar = ch_co_var isHeteroKindCoHole :: CoercionHole -> Bool isHeteroKindCoHole = ch_hetero_kind setCoHoleCoVar :: CoercionHole -> CoVar -> CoercionHole setCoHoleCoVar h cv = h { ch_co_var = cv } instance Data.Data CoercionHole where -- don't traverse? toConstr _ = abstractConstr "CoercionHole" gunfold _ _ = error "gunfold" dataTypeOf _ = mkNoRepType "CoercionHole" instance Outputable CoercionHole where ppr (CoercionHole { ch_co_var = cv, ch_hetero_kind = hk }) = braces (ppr cv <> ppWhen hk (text "[hk]")) instance Uniquable CoercionHole where getUnique (CoercionHole { ch_co_var = cv }) = getUnique cv {- Note [Coercion holes] ~~~~~~~~~~~~~~~~~~~~~~~~ During typechecking, constraint solving for type classes works by - Generate an evidence Id, d7 :: Num a - Wrap it in a Wanted constraint, [W] d7 :: Num a - Use the evidence Id where the evidence is needed - Solve the constraint later - When solved, add an enclosing let-binding let d7 = .... in .... which actually binds d7 to the (Num a) evidence For equality constraints we use a different strategy. See Note [The equality types story] in GHC.Builtin.Types.Prim for background on equality constraints. - For /boxed/ equality constraints, (t1 ~N t2) and (t1 ~R t2), it's just like type classes above. (Indeed, boxed equality constraints *are* classes.) - But for /unboxed/ equality constraints (t1 ~R# t2) and (t1 ~N# t2) we use a different plan For unboxed equalities: - Generate a CoercionHole, a mutable variable just like a unification variable - Wrap the CoercionHole in a Wanted constraint; see GHC.Tc.Utils.TcEvDest - Use the CoercionHole in a Coercion, via HoleCo - Solve the constraint later - When solved, fill in the CoercionHole by side effect, instead of doing the let-binding thing The main reason for all this is that there may be no good place to let-bind the evidence for unboxed equalities: - We emit constraints for kind coercions, to be used to cast a type's kind. These coercions then must be used in types. Because they might appear in a top-level type, there is no place to bind these (unlifted) coercions in the usual way. - A coercion for (forall a. t1) ~ (forall a. t2) will look like forall a. (coercion for t1~t2) But the coercion for (t1~t2) may mention 'a', and we don't have let-bindings within coercions. We could add them, but coercion holes are easier. - Moreover, nothing is lost from the lack of let-bindings. For dictionaries want to achieve sharing to avoid recomputing the dictionary. But coercions are entirely erased, so there's little benefit to sharing. Indeed, even if we had a let-binding, we always inline types and coercions at every use site and drop the binding. Other notes about HoleCo: * INVARIANT: CoercionHole and HoleCo are used only during type checking, and should never appear in Core. Just like unification variables; a Type can contain a TcTyVar, but only during type checking. If, one day, we use type-level information to separate out forms that can appear during type-checking vs forms that can appear in core proper, holes in Core will be ruled out. * See Note [CoercionHoles and coercion free variables] * Coercion holes can be compared for equality like other coercions: by looking at the types coerced. Note [CoercionHoles and coercion free variables] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Why does a CoercionHole contain a CoVar, as well as reference to fill in? Because we want to treat that CoVar as a free variable of the coercion. See #14584, and Note [What prevents a constraint from floating] in GHC.Tc.Solver, item (4): forall k. [W] co1 :: t1 ~# t2 |> co2 [W] co2 :: k ~# * Here co2 is a CoercionHole. But we /must/ know that it is free in co1, because that's all that stops it floating outside the implication. -} {- ********************************************************************* * * foldType and foldCoercion * * ********************************************************************* -} {- Note [foldType] ~~~~~~~~~~~~~~~~~~ foldType is a bit more powerful than perhaps it looks: * You can fold with an accumulating parameter, via TyCoFolder env (Endo a) Recall newtype Endo a = Endo (a->a) * You can fold monadically with a monad M, via TyCoFolder env (M a) provided you have instance .. => Monoid (M a) Note [mapType vs foldType] ~~~~~~~~~~~~~~~~~~~~~~~~~~ We define foldType here, but mapType in module Type. Why? * foldType is used in GHC.Core.TyCo.FVs for finding free variables. It's a very simple function that analyses a type, but does not construct one. * mapType constructs new types, and so it needs to call the "smart constructors", mkAppTy, mkCastTy, and so on. These are sophisticated functions, and can't be defined here in GHC.Core.TyCo.Rep. Note [Specialising foldType] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We inline foldType at every call site (there are not many), so that it becomes specialised for the particular monoid *and* TyCoFolder at that site. This is just for efficiency, but walking over types is done a *lot* in GHC, so worth optimising. We were worried that TyCoFolder env (Endo a) might not eta-expand. Recall newtype Endo a = Endo (a->a). In particular, given fvs :: Type -> TyCoVarSet fvs ty = appEndo (foldType tcf emptyVarSet ty) emptyVarSet tcf :: TyCoFolder enf (Endo a) tcf = TyCoFolder { tcf_tyvar = do_tv, ... } where do_tvs is tv = Endo do_it where do_it acc | tv `elemVarSet` is = acc | tv `elemVarSet` acc = acc | otherwise = acc `extendVarSet` tv we want to end up with fvs ty = go emptyVarSet ty emptyVarSet where go env (TyVarTy tv) acc = acc `extendVarSet` tv ..etc.. And indeed this happens. - Selections from 'tcf' are done at compile time - 'go' is nicely eta-expanded. We were also worried about deep_fvs :: Type -> TyCoVarSet deep_fvs ty = appEndo (foldType deep_tcf emptyVarSet ty) emptyVarSet deep_tcf :: TyCoFolder enf (Endo a) deep_tcf = TyCoFolder { tcf_tyvar = do_tv, ... } where do_tvs is tv = Endo do_it where do_it acc | tv `elemVarSet` is = acc | tv `elemVarSet` acc = acc | otherwise = deep_fvs (varType tv) `unionVarSet` acc `extendVarSet` tv Here deep_fvs and deep_tcf are mutually recursive, unlike fvs and tcf. But, amazingly, we get good code here too. GHC is careful not to mark TyCoFolder data constructor for deep_tcf as a loop breaker, so the record selections still cancel. And eta expansion still happens too. Note [Use explicit recursion in foldTyCo] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In foldTyCo you'll see things like: go_tys _ [] = mempty go_tys env (t:ts) = go_ty env t `mappend` go_tys env ts where we use /explicit recursion/. You might wonder about using foldl instead: go_tys env = foldl (\t acc -> go_ty env t `mappend` acc) mempty Or maybe foldl', or foldr. But don't do that for two reasons (see #24591) * We sometimes instantiate `a` to (Endo VarSet). Remembering newtype Endo a = Endo (a->a) after inlining `foldTyCo` bodily, the explicit recursion looks like go_tys _ [] = \acc -> acc go_tys env (t:ts) = \acc -> go_ty env t (go_tys env ts acc) The strictness analyser has no problem spotting that this function is strict in `acc`, provided `go_ty` is. But in the foldl form that is /much/ less obvious, and the strictness analyser fails utterly. Result: lots and lots of thunks get built. In !12037, Mikolaj found that GHC allocated /six times/ as much heap on test perf/compiler/T9198 as a result of this single problem! * Second, while I think that using `foldr` would be fine (simple experiments in #24591 suggest as much), it builds a local loop (with env free) and I'm not 100% confident it'll be lambda lifted in the end. It seems more direct just to write the code we want. On the other hand in `go_cvs` we might hope that the `foldr` will fuse with the `dVarSetElems` so I have used `foldr`. -} data TyCoFolder env a = TyCoFolder { tcf_view :: Type -> Maybe Type -- Optional "view" function -- E.g. expand synonyms , tcf_tyvar :: env -> TyVar -> a -- Does not automatically recur , tcf_covar :: env -> CoVar -> a -- into kinds of variables , tcf_hole :: env -> CoercionHole -> a -- ^ What to do with coercion holes. -- See Note [Coercion holes] in "GHC.Core.TyCo.Rep". , tcf_tycobinder :: env -> TyCoVar -> ForAllTyFlag -> env -- ^ The returned env is used in the extended scope } {-# INLINE foldTyCo #-} -- See Note [Specialising foldType] foldTyCo :: Monoid a => TyCoFolder env a -> env -> (Type -> a, [Type] -> a, Coercion -> a, [Coercion] -> a) foldTyCo (TyCoFolder { tcf_view = view , tcf_tyvar = tyvar , tcf_tycobinder = tycobinder , tcf_covar = covar , tcf_hole = cohole }) env = (go_ty env, go_tys env, go_co env, go_cos env) where go_ty env ty | Just ty' <- view ty = go_ty env ty' go_ty env (TyVarTy tv) = tyvar env tv go_ty env (AppTy t1 t2) = go_ty env t1 `mappend` go_ty env t2 go_ty _ (LitTy {}) = mempty go_ty env (CastTy ty co) = go_ty env ty `mappend` go_co env co go_ty env (CoercionTy co) = go_co env co go_ty env (FunTy _ w arg res) = go_ty env w `mappend` go_ty env arg `mappend` go_ty env res go_ty env (TyConApp _ tys) = go_tys env tys go_ty env (ForAllTy (Bndr tv vis) inner) = let !env' = tycobinder env tv vis -- Avoid building a thunk here in go_ty env (varType tv) `mappend` go_ty env' inner -- See Note [Use explicit recursion in foldTyCo] go_tys _ [] = mempty go_tys env (t:ts) = go_ty env t `mappend` go_tys env ts -- See Note [Use explicit recursion in foldTyCo] go_cos _ [] = mempty go_cos env (c:cs) = go_co env c `mappend` go_cos env cs go_co env (Refl ty) = go_ty env ty go_co env (GRefl _ ty MRefl) = go_ty env ty go_co env (GRefl _ ty (MCo co)) = go_ty env ty `mappend` go_co env co go_co env (TyConAppCo _ _ args) = go_cos env args go_co env (AppCo c1 c2) = go_co env c1 `mappend` go_co env c2 go_co env (CoVarCo cv) = covar env cv go_co env (AxiomCo _ cos) = go_cos env cos go_co env (HoleCo hole) = cohole env hole go_co env (UnivCo { uco_lty = t1, uco_rty = t2, uco_deps = deps }) = go_ty env t1 `mappend` go_ty env t2 `mappend` go_cos env deps go_co env (SymCo co) = go_co env co go_co env (TransCo c1 c2) = go_co env c1 `mappend` go_co env c2 go_co env (SelCo _ co) = go_co env co go_co env (LRCo _ co) = go_co env co go_co env (InstCo co arg) = go_co env co `mappend` go_co env arg go_co env (KindCo co) = go_co env co go_co env (SubCo co) = go_co env co go_co env (FunCo { fco_mult = cw, fco_arg = c1, fco_res = c2 }) = go_co env cw `mappend` go_co env c1 `mappend` go_co env c2 go_co env (ForAllCo tv _vis1 _vis2 kind_co co) = go_co env kind_co `mappend` go_ty env (varType tv) `mappend` go_co env' co where env' = tycobinder env tv Inferred -- | A view function that looks through nothing. noView :: Type -> Maybe Type noView _ = Nothing {- ********************************************************************* * * typeSize, coercionSize * * ********************************************************************* -} -- NB: We put typeSize/coercionSize here because they are mutually -- recursive, and have the CPR property. If we have mutual -- recursion across a hi-boot file, we don't get the CPR property -- and these functions allocate a tremendous amount of rubbish. -- It's not critical (because typeSize is really only used in -- debug mode, but I tripped over an example (T5642) in which -- typeSize was one of the biggest single allocators in all of GHC. -- And it's easy to fix, so I did. -- NB: typeSize does not respect `eqType`, in that two types that -- are `eqType` may return different sizes. This is OK, because this -- function is used only in reporting, not decision-making. typeSize :: Type -> Int -- The size of the syntax tree of a type. No special treatment -- for type synonyms or type families. typeSize (LitTy {}) = 1 typeSize (TyVarTy {}) = 1 typeSize (AppTy t1 t2) = typeSize t1 + typeSize t2 typeSize (FunTy _ _ t1 t2) = typeSize t1 + typeSize t2 typeSize (ForAllTy (Bndr tv _) t) = typeSize (varType tv) + typeSize t typeSize (TyConApp _ ts) = 1 + typesSize ts typeSize (CastTy ty co) = typeSize ty + coercionSize co typeSize (CoercionTy co) = coercionSize co typesSize :: [Type] -> Int typesSize tys = foldr ((+) . typeSize) 0 tys coercionSize :: Coercion -> Int coercionSize (Refl ty) = typeSize ty coercionSize (GRefl _ ty MRefl) = typeSize ty coercionSize (GRefl _ ty (MCo co)) = 1 + typeSize ty + coercionSize co coercionSize (TyConAppCo _ _ args) = 1 + sum (map coercionSize args) coercionSize (AppCo co arg) = coercionSize co + coercionSize arg coercionSize (ForAllCo { fco_kind = h, fco_body = co }) = 1 + coercionSize co + coercionSize h coercionSize (FunCo _ _ _ w c1 c2) = 1 + coercionSize c1 + coercionSize c2 + coercionSize w coercionSize (CoVarCo _) = 1 coercionSize (HoleCo _) = 1 coercionSize (AxiomCo _ cs) = 1 + sum (map coercionSize cs) coercionSize (UnivCo { uco_lty = t1, uco_rty = t2 }) = 1 + typeSize t1 + typeSize t2 coercionSize (SymCo co) = 1 + coercionSize co coercionSize (TransCo co1 co2) = 1 + coercionSize co1 + coercionSize co2 coercionSize (SelCo _ co) = 1 + coercionSize co coercionSize (LRCo _ co) = 1 + coercionSize co coercionSize (InstCo co arg) = 1 + coercionSize co + coercionSize arg coercionSize (KindCo co) = 1 + coercionSize co coercionSize (SubCo co) = 1 + coercionSize co {- ************************************************************************ * * Multiplicities * * ************************************************************************ These definitions are here to avoid module loops, and to keep GHC.Core.Multiplicity above this module. -} -- | A shorthand for data with an attached 'Mult' element (the multiplicity). data Scaled a = Scaled !Mult a deriving (Data.Data) -- You might think that this would be a natural candidate for -- Functor, Traversable but Krzysztof says (!3674) "it was too easy -- to accidentally lift functions (substitutions, zonking etc.) from -- Type -> Type to Scaled Type -> Scaled Type, ignoring -- multiplicities and causing bugs". So we don't. -- -- Being strict in a is worse for performance, so we are only strict on the -- Mult part of scaled. instance (Outputable a) => Outputable (Scaled a) where ppr (Scaled _cnt t) = ppr t -- Do not print the multiplicity here because it tends to be too verbose scaledMult :: Scaled a -> Mult scaledMult (Scaled m _) = m scaledThing :: Scaled a -> a scaledThing (Scaled _ t) = t -- | Apply a function to both the Mult and the Type in a 'Scaled Type' mapScaledType :: (Type -> Type) -> Scaled Type -> Scaled Type mapScaledType f (Scaled m t) = Scaled (f m) (f t) {- | Mult is a type alias for Type. Mult must contain Type because multiplicity variables are mere type variables (of kind Multiplicity) in Haskell. So the simplest implementation is to make Mult be Type. Multiplicities can be formed with: - One: GHC.Types.One (= oneDataCon) - Many: GHC.Types.Many (= manyDataCon) - Multiplication: GHC.Types.MultMul (= multMulTyCon) So that Mult feels a bit more structured, we provide pattern synonyms and smart constructors for these. -} type Mult = Type ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/TyCo/Rep.hs-boot0000644000000000000000000000177307346545000021507 0ustar0000000000000000{-# LANGUAGE NoPolyKinds #-} module GHC.Core.TyCo.Rep where import GHC.Utils.Outputable ( Outputable ) import Data.Data ( Data ) import {-# SOURCE #-} GHC.Types.Var( Var, VarBndr, FunTyFlag ) import {-# SOURCE #-} GHC.Core.TyCon ( TyCon ) import Language.Haskell.Syntax.Specificity (ForAllTyFlag) data Type data Coercion data FunSel data CoSel data UnivCoProvenance data TyLit data MCoercion data Scaled a scaledThing :: Scaled a -> a type Mult = Type type PredType = Type type RuntimeRepType = Type type Kind = Type type ThetaType = [PredType] type CoercionN = Coercion type MCoercionN = MCoercion mkForAllTy :: VarBndr Var ForAllTyFlag -> Type -> Type mkNakedTyConTy :: TyCon -> Type mkNakedFunTy :: FunTyFlag -> Type -> Type -> Type -- To support Data instances in GHC.Core.Coercion.Axiom instance Data Type -- To support instances PiTyBinder in Var instance Data a => Data (Scaled a) -- To support debug pretty-printing instance Outputable Type instance Outputable a => Outputable (Scaled a) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/TyCo/Subst.hs0000644000000000000000000013336607346545000021124 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1998 Type and Coercion - friends' interface -} -- | Substitution into types and coercions. module GHC.Core.TyCo.Subst ( -- * Substitutions Subst(..), TvSubstEnv, CvSubstEnv, IdSubstEnv, emptyIdSubstEnv, emptyTvSubstEnv, emptyCvSubstEnv, composeTCvSubst, emptySubst, mkEmptySubst, isEmptyTCvSubst, isEmptySubst, mkTCvSubst, mkTvSubst, mkCvSubst, mkIdSubst, getTvSubstEnv, getIdSubstEnv, getCvSubstEnv, getSubstInScope, setInScope, getSubstRangeTyCoFVs, isInScope, elemSubst, notElemSubst, zapSubst, extendSubstInScope, extendSubstInScopeList, extendSubstInScopeSet, extendTCvSubst, extendTCvSubstWithClone, extendCvSubst, extendCvSubstWithClone, extendTvSubst, extendTvSubstWithClone, extendTvSubstList, extendTvSubstAndInScope, extendTCvSubstList, unionSubst, zipTyEnv, zipCoEnv, zipTvSubst, zipCvSubst, zipTCvSubst, mkTvSubstPrs, substTyWith, substTyWithCoVars, substTysWith, substTysWithCoVars, substCoWith, substTy, substTyAddInScope, substScaledTy, substTyUnchecked, substTysUnchecked, substScaledTysUnchecked, substThetaUnchecked, substTyWithUnchecked, substScaledTyUnchecked, substCoUnchecked, substCoWithUnchecked, substTyWithInScope, substTys, substScaledTys, substTheta, lookupTyVar, substCo, substCos, substCoVar, substCoVars, lookupCoVar, cloneTyVarBndr, cloneTyVarBndrs, substVarBndr, substVarBndrs, substTyVarBndr, substTyVarBndrs, substCoVarBndr, substDCoVarSet, substTyVar, substTyVars, substTyVarToTyVar, substTyCoVars, substTyCoBndr, substForAllCoBndr, substVarBndrUsing, substForAllCoBndrUsing, checkValidSubst, isValidTCvSubst, ) where import GHC.Prelude import {-# SOURCE #-} GHC.Core.Type ( mkCastTy, mkAppTy, isCoercionTy, mkTyConApp, getTyVar_maybe ) import {-# SOURCE #-} GHC.Core.Coercion ( mkCoVarCo, mkKindCo, mkSelCo, mkTransCo , mkNomReflCo, mkSubCo, mkSymCo , mkFunCo2, mkForAllCo, mkUnivCo , mkAxiomCo, mkAppCo, mkGReflCo , mkInstCo, mkLRCo, mkTyConAppCo , mkCoercionType , coercionKind, coercionLKind, coVarTypesRole ) import {-# SOURCE #-} GHC.Core.TyCo.Ppr ( pprTyVar ) import {-# SOURCE #-} GHC.Core.Ppr ( ) -- instance Outputable CoreExpr import {-# SOURCE #-} GHC.Core ( CoreExpr ) import GHC.Core.TyCo.Rep import GHC.Core.TyCo.FVs import GHC.Types.Basic( SwapFlag(..), isSwapped, pickSwap, notSwapped ) import GHC.Types.Var import GHC.Types.Var.Set import GHC.Types.Var.Env import GHC.Data.Pair import GHC.Utils.Constants (debugIsOn) import GHC.Utils.Misc import GHC.Types.Unique.Supply import GHC.Types.Unique import GHC.Types.Unique.FM import GHC.Types.Unique.Set import GHC.Utils.Outputable import GHC.Utils.Panic import Data.List (mapAccumL) {- %************************************************************************ %* * Substitutions Data type defined here to avoid unnecessary mutual recursion %* * %************************************************************************ -} -- | Type & coercion & id substitution -- -- The "Subst" data type defined in this module contains substitution -- for tyvar, covar and id. However, operations on IdSubstEnv (mapping -- from "Id" to "CoreExpr") that require the definition of the "Expr" -- data type are defined in GHC.Core.Subst to avoid circular module -- dependency. data Subst = Subst InScopeSet -- Variables in scope (both Ids and TyVars) /after/ -- applying the substitution IdSubstEnv -- Substitution from NcIds to CoreExprs TvSubstEnv -- Substitution from TyVars to Types CvSubstEnv -- Substitution from CoVars to Coercions -- INVARIANT 1: See Note [The substitution invariant] -- This is what lets us deal with name capture properly -- -- INVARIANT 2: The substitution is apply-once; -- see Note [Substitutions apply only once] -- -- INVARIANT 3: See Note [Extending the IdSubstEnv] in "GHC.Core.Subst" -- and Note [Extending the TvSubstEnv and CvSubstEnv] -- -- INVARIANT 4: See Note [Substituting types, coercions, and expressions] -- | A substitution of 'Expr's for non-coercion 'Id's type IdSubstEnv = IdEnv CoreExpr -- Domain is NonCoVarIds, i.e. not coercions -- | A substitution of 'Type's for 'TyVar's -- and 'Kind's for 'KindVar's type TvSubstEnv = TyVarEnv Type -- NB: A TvSubstEnv is used -- both inside a TCvSubst (with the apply-once invariant -- discussed in Note [Substitutions apply only once], -- and also independently in the middle of matching, -- and unification (see Types.Unify). -- So you have to look at the context to know if it's idempotent or -- apply-once or whatever -- | A substitution of 'Coercion's for 'CoVar's type CvSubstEnv = CoVarEnv Coercion {- Note [The substitution invariant] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When calling (substTy subst ty) it should be the case that the in-scope set in the substitution is a superset of both: (SIa) The free vars of the range of the substitution (SIb) The free vars of ty minus the domain of the substitution * Reason for (SIa). Consider substTy [a :-> Maybe b] (forall b. b->a) we must rename the forall b, to get forall b2. b2 -> Maybe b Making 'b' part of the in-scope set forces this renaming to take place. * Reason for (SIb). Consider substTy [a :-> Maybe b] (forall b. (a,b,x)) Then if we use the in-scope set {b}, satisfying (SIa), there is a danger we will rename the forall'd variable to 'x' by mistake, getting this: forall x. (Maybe b, x, x) Breaking (SIb) caused the bug from #11371. Note: if the free vars of the range of the substitution are freshly created, then the problems of (SIa) can't happen, and so it would be sound to ignore (SIa). Note [Substitutions apply only once] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We use TCvSubsts to instantiate things, and we might instantiate forall a b. ty with the types [a, b], or [b, a]. So the substitution might go [a->b, b->a]. A similar situation arises in Core when we find a beta redex like (/\ a /\ b -> e) b a Then we also end up with a substitution that permutes type variables. Other variations happen to; for example [a -> (a, b)]. ******************************************************** *** So a substitution must be applied precisely once *** ******************************************************** A TCvSubst is not idempotent, but, unlike the non-idempotent substitution we use during unifications, it must not be repeatedly applied. Note [Extending the TvSubstEnv and CvSubstEnv] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ See #tcvsubst_invariant# for the invariants that must hold. This invariant allows a short-cut when the subst envs are empty: if the TvSubstEnv and CvSubstEnv are empty --- i.e. (isEmptyTCvSubst subst) holds --- then (substTy subst ty) does nothing. For example, consider: (/\a. /\b:(a~Int). ...b..) Int We substitute Int for 'a'. The Unique of 'b' does not change, but nevertheless we add 'b' to the TvSubstEnv, because b's kind does change This invariant has several crucial consequences: * In substVarBndr, we need extend the TvSubstEnv - if the unique has changed - or if the kind has changed * In substTyVar, we do not need to consult the in-scope set; the TvSubstEnv is enough * In substTy, substTheta, we can short-circuit when the TvSubstEnv is empty Note [Substituting types, coercions, and expressions] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Types and coercions are mutually recursive, and either may have variables "belonging" to the other. Thus, every time we wish to substitute in a type, we may also need to substitute in a coercion, and vice versa. Likewise, expressions may contain type variables or coercion variables. However, we use different constructors for constructing expression variables, coercion variables, and type variables, so we carry three VarEnvs for each variable type. Note that it would be possible to use the CoercionTy constructor and the Type constructor to combine these environments, but that seems like a false economy. Note that the domain of the VarEnvs must be respected, despite the fact that TyVar, Id, and CoVar are all type synonyms of the Var type. For example, TvSubstEnv should *never* map a CoVar (built with the Id constructor) and the CvSubstEnv should *never* map a TyVar. Furthermore, the range of the TvSubstEnv should *never* include a type headed with CoercionTy. -} emptyIdSubstEnv :: IdSubstEnv emptyIdSubstEnv = emptyVarEnv emptyTvSubstEnv :: TvSubstEnv emptyTvSubstEnv = emptyVarEnv emptyCvSubstEnv :: CvSubstEnv emptyCvSubstEnv = emptyVarEnv -- | Composes two substitutions, applying the second one provided first, -- like in function composition. This function leaves IdSubstEnv untouched -- because IdSubstEnv is not used during substitution for types. composeTCvSubst :: Subst -> Subst -> Subst composeTCvSubst subst1@(Subst is1 ids1 tenv1 cenv1) (Subst is2 _ tenv2 cenv2) = Subst is3 ids1 tenv3 cenv3 where is3 = is1 `unionInScope` is2 tenv3 = tenv1 `plusVarEnv` mapVarEnv (substTy extended_subst1) tenv2 cenv3 = cenv1 `plusVarEnv` mapVarEnv (substCo extended_subst1) cenv2 -- Make sure the in-scope set in the first substitution is wide enough to -- cover the free variables in the range of the second substitution before -- applying it (#22235). extended_subst1 = subst1 `setInScope` is3 emptySubst :: Subst emptySubst = Subst emptyInScopeSet emptyVarEnv emptyVarEnv emptyVarEnv mkEmptySubst :: InScopeSet -> Subst mkEmptySubst in_scope = Subst in_scope emptyVarEnv emptyVarEnv emptyVarEnv isEmptySubst :: Subst -> Bool isEmptySubst (Subst _ id_env tv_env cv_env) = isEmptyVarEnv id_env && isEmptyVarEnv tv_env && isEmptyVarEnv cv_env -- | Checks whether the tyvar and covar environments are empty. -- This function should be used over 'isEmptySubst' when substituting -- for types, because types currently do not contain expressions; we can -- safely disregard the expression environment when deciding whether -- to skip a substitution. Using 'isEmptyTCvSubst' gives us a non-trivial -- performance boost (up to 70% less allocation for T18223) isEmptyTCvSubst :: Subst -> Bool isEmptyTCvSubst (Subst _ _ tv_env cv_env) = isEmptyVarEnv tv_env && isEmptyVarEnv cv_env mkTCvSubst :: InScopeSet -> TvSubstEnv -> CvSubstEnv -> Subst mkTCvSubst in_scope tvs cvs = Subst in_scope emptyIdSubstEnv tvs cvs mkIdSubst :: InScopeSet -> IdSubstEnv -> Subst mkIdSubst in_scope ids = Subst in_scope ids emptyTvSubstEnv emptyCvSubstEnv mkTvSubst :: InScopeSet -> TvSubstEnv -> Subst -- ^ Make a TCvSubst with specified tyvar subst and empty covar subst mkTvSubst in_scope tenv = Subst in_scope emptyIdSubstEnv tenv emptyCvSubstEnv mkCvSubst :: InScopeSet -> CvSubstEnv -> Subst -- ^ Make a TCvSubst with specified covar subst and empty tyvar subst mkCvSubst in_scope cenv = Subst in_scope emptyIdSubstEnv emptyTvSubstEnv cenv getIdSubstEnv :: Subst -> IdSubstEnv getIdSubstEnv (Subst _ ids _ _) = ids getTvSubstEnv :: Subst -> TvSubstEnv getTvSubstEnv (Subst _ _ tenv _) = tenv getCvSubstEnv :: Subst -> CvSubstEnv getCvSubstEnv (Subst _ _ _ cenv) = cenv -- | Find the in-scope set: see Note [The substitution invariant] getSubstInScope :: Subst -> InScopeSet getSubstInScope (Subst in_scope _ _ _) = in_scope setInScope :: Subst -> InScopeSet -> Subst setInScope (Subst _ ids tvs cvs) in_scope = Subst in_scope ids tvs cvs -- | Returns the free variables of the types in the range of a substitution as -- a non-deterministic set. getSubstRangeTyCoFVs :: Subst -> VarSet getSubstRangeTyCoFVs (Subst _ _ tenv cenv) = tenvFVs `unionVarSet` cenvFVs where tenvFVs = shallowTyCoVarsOfTyVarEnv tenv cenvFVs = shallowTyCoVarsOfCoVarEnv cenv isInScope :: Var -> Subst -> Bool isInScope v (Subst in_scope _ _ _) = v `elemInScopeSet` in_scope elemSubst :: Var -> Subst -> Bool elemSubst v (Subst _ ids tenv cenv) | isTyVar v = v `elemVarEnv` tenv | isCoVar v = v `elemVarEnv` cenv | otherwise = v `elemVarEnv` ids notElemSubst :: Var -> Subst -> Bool notElemSubst v = not . elemSubst v -- | Remove all substitutions that might have been built up -- while preserving the in-scope set -- originally called zapSubstEnv zapSubst :: Subst -> Subst zapSubst (Subst in_scope _ _ _) = Subst in_scope emptyVarEnv emptyVarEnv emptyVarEnv -- | Add the 'Var' to the in-scope set extendSubstInScope :: Subst -> Var -> Subst extendSubstInScope (Subst in_scope ids tvs cvs) v = Subst (in_scope `extendInScopeSet` v) ids tvs cvs -- | Add the 'Var's to the in-scope set: see also 'extendInScope' extendSubstInScopeList :: Subst -> [Var] -> Subst extendSubstInScopeList (Subst in_scope ids tvs cvs) vs = Subst (in_scope `extendInScopeSetList` vs) ids tvs cvs -- | Add the 'Var's to the in-scope set: see also 'extendInScope' extendSubstInScopeSet :: Subst -> VarSet -> Subst extendSubstInScopeSet (Subst in_scope ids tvs cvs) vs = Subst (in_scope `extendInScopeSetSet` vs) ids tvs cvs extendTCvSubst :: Subst -> TyCoVar -> Type -> Subst extendTCvSubst subst v ty | isTyVar v = extendTvSubst subst v ty | CoercionTy co <- ty = extendCvSubst subst v co | otherwise = pprPanic "extendTCvSubst" (ppr v <+> text "|->" <+> ppr ty) extendTCvSubstWithClone :: Subst -> TyCoVar -> TyCoVar -> Subst extendTCvSubstWithClone subst tcv | isTyVar tcv = extendTvSubstWithClone subst tcv | otherwise = extendCvSubstWithClone subst tcv -- | Add a substitution for a 'TyVar' to the 'Subst' -- The 'TyVar' *must* be a real TyVar, and not a CoVar -- You must ensure that the in-scope set is such that -- Note [The substitution invariant] holds -- after extending the substitution like this. extendTvSubst :: Subst -> TyVar -> Type -> Subst extendTvSubst (Subst in_scope ids tvs cvs) tv ty = assert (isTyVar tv) $ Subst in_scope ids (extendVarEnv tvs tv ty) cvs extendTvSubstWithClone :: Subst -> TyVar -> TyVar -> Subst -- Adds a new tv -> tv mapping, /and/ extends the in-scope set with the clone -- Does not look in the kind of the new variable; -- those variables should be in scope already extendTvSubstWithClone (Subst in_scope idenv tenv cenv) tv tv' = Subst (extendInScopeSet in_scope tv') idenv (extendVarEnv tenv tv (mkTyVarTy tv')) cenv -- | Add a substitution from a 'CoVar' to a 'Coercion' to the 'Subst': -- you must ensure that the in-scope set satisfies -- Note [The substitution invariant] -- after extending the substitution like this extendCvSubst :: Subst -> CoVar -> Coercion -> Subst extendCvSubst (Subst in_scope ids tvs cvs) v r = assert (isCoVar v) $ Subst in_scope ids tvs (extendVarEnv cvs v r) extendCvSubstWithClone :: Subst -> CoVar -> CoVar -> Subst extendCvSubstWithClone (Subst in_scope ids tenv cenv) cv cv' = Subst (extendInScopeSetSet in_scope new_in_scope) ids tenv (extendVarEnv cenv cv (mkCoVarCo cv')) where new_in_scope = tyCoVarsOfType (varType cv') `extendVarSet` cv' extendTvSubstAndInScope :: Subst -> TyVar -> Type -> Subst -- Also extends the in-scope set extendTvSubstAndInScope (Subst in_scope ids tenv cenv) tv ty = Subst (in_scope `extendInScopeSetSet` tyCoVarsOfType ty) ids (extendVarEnv tenv tv ty) cenv -- | Adds multiple 'TyVar' substitutions to the 'Subst': see also 'extendTvSubst' extendTvSubstList :: Subst -> [(TyVar,Type)] -> Subst extendTvSubstList subst vrs = foldl' extend subst vrs where extend subst (v, r) = extendTvSubst subst v r extendTCvSubstList :: Subst -> [Var] -> [Type] -> Subst extendTCvSubstList subst tvs tys = foldl2 extendTCvSubst subst tvs tys unionSubst :: Subst -> Subst -> Subst -- Works when the ranges are disjoint unionSubst (Subst in_scope1 ids1 tenv1 cenv1) (Subst in_scope2 ids2 tenv2 cenv2) = assert (ids1 `disjointVarEnv` ids2 && tenv1 `disjointVarEnv` tenv2 && cenv1 `disjointVarEnv` cenv2 ) Subst (in_scope1 `unionInScope` in_scope2) (ids1 `plusVarEnv` ids2) (tenv1 `plusVarEnv` tenv2) (cenv1 `plusVarEnv` cenv2) -- | Generates the in-scope set for the 'Subst' from the types in the incoming -- environment. No CoVars or Ids, please! zipTvSubst :: HasDebugCallStack => [TyVar] -> [Type] -> Subst zipTvSubst tvs tys = mkTvSubst (mkInScopeSet (shallowTyCoVarsOfTypes tys)) tenv where tenv = zipTyEnv tvs tys -- | Generates the in-scope set for the 'Subst' from the types in the incoming -- environment. No TyVars, please! zipCvSubst :: HasDebugCallStack => [CoVar] -> [Coercion] -> Subst zipCvSubst cvs cos = mkCvSubst (mkInScopeSet (shallowTyCoVarsOfCos cos)) cenv where cenv = zipCoEnv cvs cos zipTCvSubst :: HasDebugCallStack => [TyCoVar] -> [Type] -> Subst zipTCvSubst tcvs tys = zip_tcvsubst tcvs tys $ mkEmptySubst $ mkInScopeSet $ shallowTyCoVarsOfTypes tys where zip_tcvsubst :: [TyCoVar] -> [Type] -> Subst -> Subst zip_tcvsubst (tv:tvs) (ty:tys) subst = zip_tcvsubst tvs tys (extendTCvSubst subst tv ty) zip_tcvsubst [] [] subst = subst -- empty case zip_tcvsubst _ _ _ = pprPanic "zipTCvSubst: length mismatch" (ppr tcvs <+> ppr tys) -- | Generates the in-scope set for the 'TCvSubst' from the types in the -- incoming environment. No CoVars, please! The InScopeSet is just a thunk -- so with a bit of luck it'll never be evaluated mkTvSubstPrs :: [(TyVar, Type)] -> Subst mkTvSubstPrs [] = emptySubst mkTvSubstPrs prs = assertPpr onlyTyVarsAndNoCoercionTy (text "prs" <+> ppr prs) $ mkTvSubst in_scope tenv where tenv = mkVarEnv prs in_scope = mkInScopeSet $ shallowTyCoVarsOfTypes $ map snd prs onlyTyVarsAndNoCoercionTy = and [ isTyVar tv && not (isCoercionTy ty) | (tv, ty) <- prs ] -- | The InScopeSet is just a thunk so with a bit of luck it'll never be evaluated zipTyEnv :: HasDebugCallStack => [TyVar] -> [Type] -> TvSubstEnv zipTyEnv tyvars tys | debugIsOn , not (all isTyVar tyvars && (tyvars `equalLength` tys)) = pprPanic "zipTyEnv" (ppr tyvars $$ ppr tys) | otherwise = assert (all (not . isCoercionTy) tys ) zipToUFM tyvars tys -- There used to be a special case for when -- ty == TyVarTy tv -- (a not-uncommon case) in which case the substitution was dropped. -- But the type-tidier changes the print-name of a type variable without -- changing the unique, and that led to a bug. Why? Pre-tidying, we had -- a type {Foo t}, where Foo is a one-method class. So Foo is really a newtype. -- And it happened that t was the type variable of the class. Post-tiding, -- it got turned into {Foo t2}. The ext-core printer expanded this using -- sourceTypeRep, but that said "Oh, t == t2" because they have the same unique, -- and so generated a rep type mentioning t not t2. -- -- Simplest fix is to nuke the "optimisation" zipCoEnv :: HasDebugCallStack => [CoVar] -> [Coercion] -> CvSubstEnv zipCoEnv cvs cos | debugIsOn , not (all isCoVar cvs) = pprPanic "zipCoEnv" (ppr cvs <+> ppr cos) | otherwise = mkVarEnv (zipEqual "zipCoEnv" cvs cos) -- Pretty printing, for debugging only instance Outputable Subst where ppr (Subst in_scope ids tvs cvs) = text " in_scope_doc $$ text " IdSubst =" <+> ppr ids $$ text " TvSubst =" <+> ppr tvs $$ text " CvSubst =" <+> ppr cvs <> char '>' where in_scope_doc = pprVarSet (getInScopeVars in_scope) (braces . fsep . map ppr) {- %************************************************************************ %* * Performing type or kind substitutions %* * %************************************************************************ Note [Sym and ForAllCo] ~~~~~~~~~~~~~~~~~~~~~~~ In OptCoercion, we try to push "sym" out to the leaves of a coercion. But, how do we push sym into a ForAllCo? It's a little ugly. Ignoring visibility, here is the typing rule (see Note [ForAllCo] in GHC.Core.TyCo.Rep). h : k1 ~# k2 (tv : k1) |- g : ty1 ~# ty2 ---------------------------- ForAllCo tv h g : (ForAllTy (tv : k1) ty1) ~# (ForAllTy (tv : k2) (ty2[tv |-> tv |> sym h])) Here is what we want: ForAllCo tv h' g' : (ForAllTy (tv : k2) (ty2[tv |-> tv |> sym h])) ~# (ForAllTy (tv : k1) ty1) Because the kinds of the type variables to the right of the colon are the kinds coerced by h', we know (h' : k2 ~# k1). Thus, (h' = sym h). Now, we can rewrite ty1 to be (ty1[tv |-> tv |> sym h' |> h']). We thus want ForAllCo tv h' g' : (ForAllTy (tv : k2) (ty2[tv |-> tv |> h'])) ~# (ForAllTy (tv : k1) (ty1[tv |-> tv |> h'][tv |-> tv |> sym h'])) We thus see that we want g' : ty2[tv |-> tv |> h'] ~# ty1[tv |-> tv |> h'] and thus g' = sym (g[tv |-> tv |> h']). Putting it all together, we get this: sym (ForAllCo tv h g) ==> ForAllCo tv (sym h) (sym g[tv |-> tv |> sym h]) Note [Substituting in a coercion hole] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ It seems highly suspicious to be substituting in a coercion that still has coercion holes. Yet, this can happen in a situation like this: f :: forall k. k :~: Type -> () f Refl = let x :: forall (a :: k). [a] -> ... x = ... When we check x's type signature, we require that k ~ Type. We indeed know this due to the Refl pattern match, but the eager unifier can't make use of givens. So, when we're done looking at x's type, a coercion hole will remain. Then, when we're checking x's definition, we skolemise x's type (in order to, e.g., bring the scoped type variable `a` into scope). This requires performing a substitution for the fresh skolem variables. This substitution needs to affect the kind of the coercion hole, too -- otherwise, the kind will have an out-of-scope variable in it. More problematically in practice (we won't actually notice the out-of-scope variable ever), skolems in the kind might have too high a level, triggering a failure to uphold the invariant that no free variables in a type have a higher level than the ambient level in the type checker. In the event of having free variables in the hole's kind, I'm pretty sure we'll always have an erroneous program, so we don't need to worry what will happen when the hole gets filled in. After all, a hole relating a locally-bound type variable will be unable to be solved. This is why it's OK not to look through the IORef of a coercion hole during substitution. -} -- | Type substitution, see 'zipTvSubst' substTyWith :: HasDebugCallStack => [TyVar] -> [Type] -> Type -> Type -- Works only if the domain of the substitution is a -- superset of the type being substituted into substTyWith tvs tys = {-#SCC "substTyWith" #-} assert (tvs `equalLength` tys ) substTy (zipTvSubst tvs tys) -- | Type substitution, see 'zipTvSubst'. Disables sanity checks. -- The problems that the sanity checks in substTy catch are described in -- Note [The substitution invariant]. -- The goal of #11371 is to migrate all the calls of substTyUnchecked to -- substTy and remove this function. Please don't use in new code. substTyWithUnchecked :: [TyVar] -> [Type] -> Type -> Type substTyWithUnchecked tvs tys = assert (tvs `equalLength` tys ) substTyUnchecked (zipTvSubst tvs tys) -- | Substitute tyvars within a type using a known 'InScopeSet'. -- Pre-condition: the 'in_scope' set should satisfy Note [The substitution -- invariant]; specifically it should include the free vars of 'tys', -- and of 'ty' minus the domain of the subst. substTyWithInScope :: HasDebugCallStack => InScopeSet -> [TyVar] -> [Type] -> Type -> Type substTyWithInScope in_scope tvs tys ty = assert (tvs `equalLength` tys ) substTy (mkTvSubst in_scope tenv) ty where tenv = zipTyEnv tvs tys -- | Coercion substitution, see 'zipTvSubst' substCoWith :: HasDebugCallStack => [TyVar] -> [Type] -> Coercion -> Coercion substCoWith tvs tys = assert (tvs `equalLength` tys ) substCo (zipTvSubst tvs tys) -- | Coercion substitution, see 'zipTvSubst'. Disables sanity checks. -- The problems that the sanity checks in substCo catch are described in -- Note [The substitution invariant]. -- The goal of #11371 is to migrate all the calls of substCoUnchecked to -- substCo and remove this function. Please don't use in new code. substCoWithUnchecked :: [TyVar] -> [Type] -> Coercion -> Coercion substCoWithUnchecked tvs tys = assert (tvs `equalLength` tys ) substCoUnchecked (zipTvSubst tvs tys) -- | Substitute covars within a type substTyWithCoVars :: [CoVar] -> [Coercion] -> Type -> Type substTyWithCoVars cvs cos = substTy (zipCvSubst cvs cos) -- | Type substitution, see 'zipTvSubst' substTysWith :: HasDebugCallStack => [TyVar] -> [Type] -> [Type] -> [Type] substTysWith tvs tys = assert (tvs `equalLength` tys ) substTys (zipTvSubst tvs tys) -- | Type substitution, see 'zipTvSubst' substTysWithCoVars :: HasDebugCallStack => [CoVar] -> [Coercion] -> [Type] -> [Type] substTysWithCoVars cvs cos = assert (cvs `equalLength` cos ) substTys (zipCvSubst cvs cos) -- | Substitute within a 'Type' after adding the free variables of the type -- to the in-scope set. This is useful for the case when the free variables -- aren't already in the in-scope set or easily available. -- See also Note [The substitution invariant]. substTyAddInScope :: HasDebugCallStack => Subst -> Type -> Type substTyAddInScope subst ty = substTy (extendSubstInScopeSet subst $ tyCoVarsOfType ty) ty -- | When calling `substTy` it should be the case that the in-scope set in -- the substitution is a superset of the free vars of the range of the -- substitution. -- See also Note [The substitution invariant]. -- TODO: take into account ids and rename as isValidSubst isValidTCvSubst :: Subst -> Bool isValidTCvSubst (Subst in_scope _ tenv cenv) = (tenvFVs `varSetInScope` in_scope) && (cenvFVs `varSetInScope` in_scope) where tenvFVs = shallowTyCoVarsOfTyVarEnv tenv cenvFVs = shallowTyCoVarsOfCoVarEnv cenv -- | This checks if the substitution satisfies the invariant from -- Note [The substitution invariant]. checkValidSubst :: HasDebugCallStack => Subst -> [Type] -> [Coercion] -> a -> a checkValidSubst subst@(Subst in_scope _ tenv cenv) tys cos a = assertPpr (isValidTCvSubst subst) (text "in_scope" <+> ppr in_scope $$ text "tenv" <+> ppr tenv $$ text "tenvFVs" <+> ppr (shallowTyCoVarsOfTyVarEnv tenv) $$ text "cenv" <+> ppr cenv $$ text "cenvFVs" <+> ppr (shallowTyCoVarsOfCoVarEnv cenv) $$ text "tys" <+> ppr tys $$ text "cos" <+> ppr cos) $ assertPpr tysCosFVsInScope (text "in_scope" <+> ppr in_scope $$ text "tenv" <+> ppr tenv $$ text "cenv" <+> ppr cenv $$ text "tys" <+> ppr tys $$ text "cos" <+> ppr cos $$ text "needInScope" <+> ppr needInScope) a where substDomain = nonDetKeysUFM tenv ++ nonDetKeysUFM cenv -- It's OK to use nonDetKeysUFM here, because we only use this list to -- remove some elements from a set needInScope = (shallowTyCoVarsOfTypes tys `unionVarSet` shallowTyCoVarsOfCos cos) `delListFromUniqSet_Directly` substDomain tysCosFVsInScope = needInScope `varSetInScope` in_scope -- | Substitute within a 'Type' -- The substitution has to satisfy the invariants described in -- Note [The substitution invariant]. substTy :: HasDebugCallStack => Subst -> Type -> Type substTy subst ty | isEmptyTCvSubst subst = ty | otherwise = checkValidSubst subst [ty] [] $ subst_ty subst ty -- | Substitute within a 'Type' disabling the sanity checks. -- The problems that the sanity checks in substTy catch are described in -- Note [The substitution invariant]. -- The goal of #11371 is to migrate all the calls of substTyUnchecked to -- substTy and remove this function. Please don't use in new code. substTyUnchecked :: Subst -> Type -> Type substTyUnchecked subst ty | isEmptyTCvSubst subst = ty | otherwise = subst_ty subst ty substScaledTy :: HasDebugCallStack => Subst -> Scaled Type -> Scaled Type substScaledTy subst scaled_ty = mapScaledType (substTy subst) scaled_ty substScaledTyUnchecked :: HasDebugCallStack => Subst -> Scaled Type -> Scaled Type substScaledTyUnchecked subst scaled_ty = mapScaledType (substTyUnchecked subst) scaled_ty -- | Substitute within several 'Type's -- The substitution has to satisfy the invariants described in -- Note [The substitution invariant]. substTys :: HasDebugCallStack => Subst -> [Type] -> [Type] substTys subst tys | isEmptyTCvSubst subst = tys | otherwise = checkValidSubst subst tys [] $ map (subst_ty subst) tys substScaledTys :: HasDebugCallStack => Subst -> [Scaled Type] -> [Scaled Type] substScaledTys subst scaled_tys | isEmptyTCvSubst subst = scaled_tys | otherwise = checkValidSubst subst (map scaledMult scaled_tys ++ map scaledThing scaled_tys) [] $ map (mapScaledType (subst_ty subst)) scaled_tys -- | Substitute within several 'Type's disabling the sanity checks. -- The problems that the sanity checks in substTys catch are described in -- Note [The substitution invariant]. -- The goal of #11371 is to migrate all the calls of substTysUnchecked to -- substTys and remove this function. Please don't use in new code. substTysUnchecked :: Subst -> [Type] -> [Type] substTysUnchecked subst tys | isEmptyTCvSubst subst = tys | otherwise = map (subst_ty subst) tys substScaledTysUnchecked :: Subst -> [Scaled Type] -> [Scaled Type] substScaledTysUnchecked subst tys | isEmptyTCvSubst subst = tys | otherwise = map (mapScaledType (subst_ty subst)) tys -- | Substitute within a 'ThetaType' -- The substitution has to satisfy the invariants described in -- Note [The substitution invariant]. substTheta :: HasDebugCallStack => Subst -> ThetaType -> ThetaType substTheta = substTys -- | Substitute within a 'ThetaType' disabling the sanity checks. -- The problems that the sanity checks in substTys catch are described in -- Note [The substitution invariant]. -- The goal of #11371 is to migrate all the calls of substThetaUnchecked to -- substTheta and remove this function. Please don't use in new code. substThetaUnchecked :: Subst -> ThetaType -> ThetaType substThetaUnchecked = substTysUnchecked subst_ty :: Subst -> Type -> Type -- subst_ty is the main workhorse for type substitution -- -- Note that the in_scope set is poked only if we hit a forall -- so it may often never be fully computed subst_ty subst ty = go ty where go (TyVarTy tv) = substTyVar subst tv go (AppTy fun arg) = (mkAppTy $! (go fun)) $! (go arg) -- The mkAppTy smart constructor is important -- we might be replacing (a Int), represented with App -- by [Int], represented with TyConApp go ty@(TyConApp tc []) = tc `seq` ty -- avoid allocation in this common case go (TyConApp tc tys) = (mkTyConApp $! tc) $! strictMap go tys -- NB: mkTyConApp, not TyConApp. -- mkTyConApp has optimizations. -- See Note [Using synonyms to compress types] -- in GHC.Core.Type go ty@(FunTy { ft_mult = mult, ft_arg = arg, ft_res = res }) = let !mult' = go mult !arg' = go arg !res' = go res in ty { ft_mult = mult', ft_arg = arg', ft_res = res' } go (ForAllTy (Bndr tv vis) ty) = case substVarBndrUnchecked subst tv of (subst', tv') -> (ForAllTy $! ((Bndr $! tv') vis)) $! (subst_ty subst' ty) go (LitTy n) = LitTy $! n go (CastTy ty co) = (mkCastTy $! (go ty)) $! (subst_co subst co) go (CoercionTy co) = CoercionTy $! (subst_co subst co) substTyVar :: Subst -> TyVar -> Type substTyVar (Subst _ _ tenv _) tv = assert (isTyVar tv) $ case lookupVarEnv tenv tv of Just ty -> ty Nothing -> TyVarTy tv substTyVarToTyVar :: HasDebugCallStack => Subst -> TyVar -> TyVar -- Apply the substitution, expecting the result to be a TyVarTy substTyVarToTyVar (Subst _ _ tenv _) tv = assert (isTyVar tv) $ case lookupVarEnv tenv tv of Just ty -> case getTyVar_maybe ty of Just tv -> tv Nothing -> pprPanic "substTyVarToTyVar" (ppr tv $$ ppr ty) Nothing -> tv substTyVars :: Subst -> [TyVar] -> [Type] substTyVars subst = map $ substTyVar subst substTyCoVars :: Subst -> [TyCoVar] -> [Type] substTyCoVars subst = map $ substTyCoVar subst substTyCoVar :: Subst -> TyCoVar -> Type substTyCoVar subst tv | isTyVar tv = substTyVar subst tv | otherwise = CoercionTy $ substCoVar subst tv lookupTyVar :: Subst -> TyVar -> Maybe Type -- See Note [Extending the TvSubstEnv and CvSubstEnv] lookupTyVar (Subst _ _ tenv _) tv = assert (isTyVar tv ) lookupVarEnv tenv tv -- | Substitute within a 'Coercion' -- The substitution has to satisfy the invariants described in -- Note [The substitution invariant]. substCo :: HasDebugCallStack => Subst -> Coercion -> Coercion substCo subst co | isEmptyTCvSubst subst = co | otherwise = checkValidSubst subst [] [co] $ subst_co subst co -- | Substitute within a 'Coercion' disabling sanity checks. -- The problems that the sanity checks in substCo catch are described in -- Note [The substitution invariant]. -- The goal of #11371 is to migrate all the calls of substCoUnchecked to -- substCo and remove this function. Please don't use in new code. substCoUnchecked :: Subst -> Coercion -> Coercion substCoUnchecked subst co | isEmptyTCvSubst subst = co | otherwise = subst_co subst co -- | Substitute within several 'Coercion's -- The substitution has to satisfy the invariants described in -- Note [The substitution invariant]. substCos :: HasDebugCallStack => Subst -> [Coercion] -> [Coercion] substCos subst cos | isEmptyTCvSubst subst = cos | otherwise = checkValidSubst subst [] cos $ map (subst_co subst) cos subst_co :: Subst -> Coercion -> Coercion subst_co subst co = go co where go_ty :: Type -> Type go_ty = subst_ty subst go_mco :: MCoercion -> MCoercion go_mco MRefl = MRefl go_mco (MCo co) = MCo (go co) go :: Coercion -> Coercion go (Refl ty) = mkNomReflCo $! (go_ty ty) go (GRefl r ty mco) = (mkGReflCo r $! (go_ty ty)) $! (go_mco mco) go (TyConAppCo r tc args)= mkTyConAppCo r tc $! go_cos args go (AxiomCo con cos) = mkAxiomCo con $! go_cos cos go (AppCo co arg) = (mkAppCo $! go co) $! go arg go (ForAllCo tv visL visR kind_co co) = case substForAllCoBndrUnchecked subst tv kind_co of (subst', tv', kind_co') -> ((mkForAllCo $! tv') visL visR $! kind_co') $! subst_co subst' co go (FunCo r afl afr w co1 co2) = ((mkFunCo2 r afl afr $! go w) $! go co1) $! go co2 go (CoVarCo cv) = substCoVar subst cv go (UnivCo { uco_prov = p, uco_role = r , uco_lty = t1, uco_rty = t2, uco_deps = deps }) = ((((mkUnivCo $! p) $! go_cos deps) $! r) $! (go_ty t1)) $! (go_ty t2) go (SymCo co) = mkSymCo $! (go co) go (TransCo co1 co2) = (mkTransCo $! (go co1)) $! (go co2) go (SelCo d co) = mkSelCo d $! (go co) go (LRCo lr co) = mkLRCo lr $! (go co) go (InstCo co arg) = (mkInstCo $! (go co)) $! go arg go (KindCo co) = mkKindCo $! (go co) go (SubCo co) = mkSubCo $! (go co) go (HoleCo h) = HoleCo $! go_hole h go_cos cos = let cos' = map go cos in cos' `seqList` cos' -- See Note [Substituting in a coercion hole] go_hole h@(CoercionHole { ch_co_var = cv }) = h { ch_co_var = updateVarType go_ty cv } -- | Perform a substitution within a 'DVarSet' of free variables, -- returning the shallow free coercion variables. substDCoVarSet :: Subst -> DCoVarSet -> DCoVarSet substDCoVarSet subst cvs = coVarsOfCosDSet $ map (substCoVar subst) $ dVarSetElems cvs substForAllCoBndr :: Subst -> TyCoVar -> KindCoercion -> (Subst, TyCoVar, Coercion) substForAllCoBndr subst = substForAllCoBndrUsing NotSwapped (substCo subst) subst -- | Like 'substForAllCoBndr', but disables sanity checks. -- The problems that the sanity checks in substCo catch are described in -- Note [The substitution invariant]. -- The goal of #11371 is to migrate all the calls of substCoUnchecked to -- substCo and remove this function. Please don't use in new code. substForAllCoBndrUnchecked :: Subst -> TyCoVar -> KindCoercion -> (Subst, TyCoVar, Coercion) substForAllCoBndrUnchecked subst = substForAllCoBndrUsing NotSwapped (substCoUnchecked subst) subst -- See Note [Sym and ForAllCo] substForAllCoBndrUsing :: SwapFlag -- Apply sym to binder? -> (Coercion -> Coercion) -- transformation to kind co -> Subst -> TyCoVar -> KindCoercion -> (Subst, TyCoVar, KindCoercion) substForAllCoBndrUsing sym sco subst old_var | isTyVar old_var = substForAllCoTyVarBndrUsing sym sco subst old_var | otherwise = substForAllCoCoVarBndrUsing sym sco subst old_var substForAllCoTyVarBndrUsing :: SwapFlag -- Apply sym to binder? -> (Coercion -> Coercion) -- transformation to kind co -> Subst -> TyVar -> KindCoercion -> (Subst, TyVar, KindCoercion) substForAllCoTyVarBndrUsing sym sco (Subst in_scope idenv tenv cenv) old_var old_kind_co = assert (isTyVar old_var ) ( Subst (in_scope `extendInScopeSet` new_var) idenv new_env cenv , new_var, new_kind_co ) where new_env | no_change, notSwapped sym = delVarEnv tenv old_var | isSwapped sym = extendVarEnv tenv old_var $ TyVarTy new_var `CastTy` new_kind_co | otherwise = extendVarEnv tenv old_var (TyVarTy new_var) no_kind_change = noFreeVarsOfCo old_kind_co no_change = no_kind_change && (new_var == old_var) new_kind_co | no_kind_change = old_kind_co | otherwise = sco old_kind_co new_ki1 = coercionLKind new_kind_co -- We could do substitution to (tyVarKind old_var). We don't do so because -- we already substituted new_kind_co, which contains the kind information -- we want. We don't want to do substitution once more. Also, in most cases, -- new_kind_co is a Refl, in which case coercionKind is really fast. new_var = uniqAway in_scope (setTyVarKind old_var new_ki1) substForAllCoCoVarBndrUsing :: SwapFlag -- Apply sym to binder? -> (Coercion -> Coercion) -- transformation to kind co -> Subst -> CoVar -> KindCoercion -> (Subst, CoVar, KindCoercion) substForAllCoCoVarBndrUsing sym sco (Subst in_scope idenv tenv cenv) old_var old_kind_co = assert (isCoVar old_var ) ( Subst (in_scope `extendInScopeSet` new_var) idenv tenv new_cenv , new_var, new_kind_co ) where new_cenv | no_change, notSwapped sym = delVarEnv cenv old_var | otherwise = extendVarEnv cenv old_var (mkCoVarCo new_var) no_kind_change = noFreeVarsOfCo old_kind_co no_change = no_kind_change && (new_var == old_var) new_kind_co | no_kind_change = old_kind_co | otherwise = sco old_kind_co Pair h1 h2 = coercionKind new_kind_co new_var = uniqAway in_scope $ mkCoVar (varName old_var) new_var_type new_var_type = pickSwap sym h1 h2 substCoVar :: Subst -> CoVar -> Coercion substCoVar (Subst _ _ _ cenv) cv = case lookupVarEnv cenv cv of Just co -> co Nothing -> CoVarCo cv substCoVars :: Subst -> [CoVar] -> [Coercion] substCoVars subst cvs = map (substCoVar subst) cvs lookupCoVar :: Subst -> Var -> Maybe Coercion lookupCoVar (Subst _ _ _ cenv) v = lookupVarEnv cenv v substTyVarBndr :: HasDebugCallStack => Subst -> TyVar -> (Subst, TyVar) substTyVarBndr = substTyVarBndrUsing substTy substTyVarBndrs :: HasDebugCallStack => Subst -> [TyVar] -> (Subst, [TyVar]) substTyVarBndrs = mapAccumL substTyVarBndr substVarBndr :: HasDebugCallStack => Subst -> TyCoVar -> (Subst, TyCoVar) substVarBndr = substVarBndrUsing substTy substVarBndrs :: HasDebugCallStack => Subst -> [TyCoVar] -> (Subst, [TyCoVar]) substVarBndrs = mapAccumL substVarBndr substCoVarBndr :: HasDebugCallStack => Subst -> CoVar -> (Subst, CoVar) substCoVarBndr = substCoVarBndrUsing substTy -- | Like 'substVarBndr', but disables sanity checks. -- The problems that the sanity checks in substTy catch are described in -- Note [The substitution invariant]. -- The goal of #11371 is to migrate all the calls of substTyUnchecked to -- substTy and remove this function. Please don't use in new code. substVarBndrUnchecked :: Subst -> TyCoVar -> (Subst, TyCoVar) substVarBndrUnchecked = substVarBndrUsing substTyUnchecked substVarBndrUsing :: (Subst -> Type -> Type) -> Subst -> TyCoVar -> (Subst, TyCoVar) substVarBndrUsing subst_fn subst v | isTyVar v = substTyVarBndrUsing subst_fn subst v | otherwise = substCoVarBndrUsing subst_fn subst v -- | Substitute a tyvar in a binding position, returning an -- extended subst and a new tyvar. -- Use the supplied function to substitute in the kind substTyVarBndrUsing :: (Subst -> Type -> Type) -- ^ Use this to substitute in the kind -> Subst -> TyVar -> (Subst, TyVar) substTyVarBndrUsing subst_fn subst@(Subst in_scope idenv tenv cenv) old_var = assertPpr _no_capture (pprTyVar old_var $$ pprTyVar new_var $$ ppr subst) $ assert (isTyVar old_var ) (Subst (in_scope `extendInScopeSet` new_var) idenv new_env cenv, new_var) where new_env | no_change = delVarEnv tenv old_var | otherwise = extendVarEnv tenv old_var (TyVarTy new_var) _no_capture = not (new_var `elemVarSet` shallowTyCoVarsOfTyVarEnv tenv) -- Assertion check that we are not capturing something in the substitution old_ki = tyVarKind old_var no_kind_change = noFreeVarsOfType old_ki -- verify that kind is closed no_change = no_kind_change && (new_var == old_var) -- no_change means that the new_var is identical in -- all respects to the old_var (same unique, same kind) -- See Note [Extending the TvSubstEnv and CvSubstEnv] -- -- In that case we don't need to extend the substitution -- to map old to new. But instead we must zap any -- current substitution for the variable. For example: -- (\x.e) with id_subst = [x |-> e'] -- Here we must simply zap the substitution for x new_var | no_kind_change = uniqAway in_scope old_var | otherwise = uniqAway in_scope $ setTyVarKind old_var (subst_fn subst old_ki) -- The uniqAway part makes sure the new variable is not already in scope -- | Substitute a covar in a binding position, returning an -- extended subst and a new covar. -- Use the supplied function to substitute in the kind substCoVarBndrUsing :: (Subst -> Type -> Type) -> Subst -> CoVar -> (Subst, CoVar) substCoVarBndrUsing subst_fn subst@(Subst in_scope idenv tenv cenv) old_var = assert (isCoVar old_var) (Subst (in_scope `extendInScopeSet` new_var) idenv tenv new_cenv, new_var) where new_co = mkCoVarCo new_var no_kind_change = noFreeVarsOfTypes [t1, t2] no_change = new_var == old_var && no_kind_change new_cenv | no_change = delVarEnv cenv old_var | otherwise = extendVarEnv cenv old_var new_co new_var = uniqAway in_scope subst_old_var subst_old_var = mkCoVar (varName old_var) new_var_type (t1, t2, role) = coVarTypesRole old_var t1' = subst_fn subst t1 t2' = subst_fn subst t2 new_var_type = mkCoercionType role t1' t2' -- It's important to do the substitution for coercions, -- because they can have free type variables cloneTyVarBndr :: Subst -> TyVar -> Unique -> (Subst, TyVar) cloneTyVarBndr subst@(Subst in_scope id_env tv_env cv_env) tv uniq = assertPpr (isTyVar tv) (ppr tv) -- I think it's only called on TyVars ( Subst (extendInScopeSet in_scope tv') id_env (extendVarEnv tv_env tv (mkTyVarTy tv')) cv_env , tv') where old_ki = tyVarKind tv no_kind_change = noFreeVarsOfType old_ki -- verify that kind is closed tv1 | no_kind_change = tv | otherwise = setTyVarKind tv (substTy subst old_ki) tv' = setVarUnique tv1 uniq cloneTyVarBndrs :: Subst -> [TyVar] -> UniqSupply -> (Subst, [TyVar]) cloneTyVarBndrs subst [] _usupply = (subst, []) cloneTyVarBndrs subst (t:ts) usupply = (subst'', tv:tvs) where (uniq, usupply') = takeUniqFromSupply usupply (subst' , tv ) = cloneTyVarBndr subst t uniq (subst'', tvs) = cloneTyVarBndrs subst' ts usupply' substTyCoBndr :: Subst -> PiTyBinder -> (Subst, PiTyBinder) substTyCoBndr subst (Anon ty af) = (subst, Anon (substScaledTy subst ty) af) substTyCoBndr subst (Named (Bndr tv vis)) = (subst', Named (Bndr tv' vis)) where (subst', tv') = substVarBndr subst tv ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/TyCo/Tidy.hs0000644000000000000000000003303107346545000020721 0ustar0000000000000000{-# OPTIONS_GHC -Wno-incomplete-uni-patterns #-} -- | Tidying types and coercions for printing in error messages. module GHC.Core.TyCo.Tidy ( -- * Tidying type related things up for printing tidyType, tidyTypes, tidyCo, tidyCos, tidyTopType, tidyOpenType, tidyOpenTypes, tidyOpenTypeX, tidyOpenTypesX, tidyFreeTyCoVars, tidyFreeTyCoVarX, tidyFreeTyCoVarsX, tidyAvoiding, tidyVarBndr, tidyVarBndrs, avoidNameClashes, tidyForAllTyBinder, tidyForAllTyBinders, tidyTyCoVarOcc ) where import GHC.Prelude import GHC.Data.FastString import GHC.Core.TyCo.Rep import GHC.Core.TyCo.FVs import GHC.Types.Name hiding (varName) import GHC.Types.Var import GHC.Types.Var.Env import GHC.Utils.Misc (strictMap) import Data.List (mapAccumL) {- ********************************************************************** TidyType ********************************************************************** -} {- Note [Tidying open types] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When tidying some open types [t1,..,tn], we find their free vars, and tidy them first. But (tricky point) we restrict the occ_env part of inner_env to just the /free/ vars of [t1..tn], so that we don't gratuitously rename the /bound/ variables. Example: assume the TidyEnv ({"a1","b"} , [a_4 :-> a1, b_7 :-> b]) and call tidyOpenTypes on [a_1, forall a_2. Maybe (a_2,a_4), forall b. (b,a_1)] All the a's have the same OccName, but different uniques. The TidyOccEnv binding for "b" relates b_7, which doesn't appear free in the these types at all, so we don't want that to mess up the tidying for the (forall b...). So we proceed as follows: 1. Find the free vars. In our example:the free vars are a_1 and a_4: 2. Use tidyFreeTyCoVars to tidy them (workhorse: `tidyFreeCoVarX`) In our example: * a_4 already has a tidy form, a1, so don't change that * a_1 gets tidied to a2 3. Trim the TidyOccEnv to OccNames of the tidied free vars (`trimTidyEnv`) In our example "a1" and "a2" 4. Now tidy the types. In our example we get [a2, forall a3. Maybe (a3,a1), forall b. (b, a2)] Note [Tidying is idempotent] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Key invariant: tidyFreeTyCoVars is idempotent, at least if you start with an empty TidyEnv. This is important because: * The typechecker error message processing carefully tidies types, using global knowledge; see for example calls to `tidyCt` in GHC.Tc.Errors. * Then the type pretty-printer, GHC.Core.TyCo.Ppr.pprType tidies the type again, because that's important for pretty-printing types in general. But the second tidying is a no-op if the first step has happened, because all the free vars will have distinct OccNames, so no renaming needs to happen. Note [tidyAvoiding] ~~~~~~~~~~~~~~~~~~~ Consider tidying this unsolved constraint in GHC.Tc.Errors.report_unsolved. C a_33, (forall a. Eq a => D a) Here a_33 is a free unification variable. If we firs tidy [a_33 :-> "a"] then we have no choice but to tidy the `forall a` to something else. But it is confusing (sometimes very confusing) to gratuitously rename skolems in this way -- see #24868. So it is better to : * Find the /bound/ skolems (just `a` in this case) * Initialise the TidyOccEnv to avoid using "a" * Now tidy the free a_33 to, say, "a1" * Delete "a" from the TidyOccEnv This is done by `tidyAvoiding`. The last step is very important; if we leave "a" in the TidyOccEnv, when we get to the (forall a. blah) we'll rename `a` to "a2", avoiding "a". -} -- | This tidies up a type for printing in an error message, or in -- an interface file. -- -- It doesn't change the uniques at all, just the print names. tidyVarBndrs :: TidyEnv -> [TyCoVar] -> (TidyEnv, [TyCoVar]) tidyVarBndrs tidy_env tvs = mapAccumL tidyVarBndr (avoidNameClashes tvs tidy_env) tvs tidyVarBndr :: TidyEnv -> TyCoVar -> (TidyEnv, TyCoVar) tidyVarBndr tidy_env@(occ_env, subst) var = case tidyOccName occ_env (getHelpfulOccName var) of (occ_env', occ') -> ((occ_env', subst'), var') where subst' = extendVarEnv subst var var' var' = updateVarType (tidyType tidy_env) (setVarName var name') name' = tidyNameOcc name occ' name = varName var avoidNameClashes :: [TyCoVar] -> TidyEnv -> TidyEnv -- Seed the occ_env with clashes among the names, see -- Note [Tidying multiple names at once] in GHC.Types.Name.Occurrence avoidNameClashes tvs (occ_env, subst) = (avoidClashesOccEnv occ_env occs, subst) where occs = map getHelpfulOccName tvs getHelpfulOccName :: TyCoVar -> OccName -- A TcTyVar with a System Name is probably a -- unification variable; when we tidy them we give them a trailing -- "0" (or 1 etc) so that they don't take precedence for the -- un-modified name. Plus, indicating a unification variable in -- this way is a helpful clue for users getHelpfulOccName tv | isSystemName name, isTcTyVar tv = mkTyVarOccFS (occNameFS occ `appendFS` fsLit "0") | otherwise = occ where name = varName tv occ = getOccName name tidyForAllTyBinder :: TidyEnv -> VarBndr TyCoVar vis -> (TidyEnv, VarBndr TyCoVar vis) tidyForAllTyBinder tidy_env (Bndr tv vis) = (tidy_env', Bndr tv' vis) where (tidy_env', tv') = tidyVarBndr tidy_env tv tidyForAllTyBinders :: TidyEnv -> [VarBndr TyCoVar vis] -> (TidyEnv, [VarBndr TyCoVar vis]) tidyForAllTyBinders tidy_env tvbs = mapAccumL tidyForAllTyBinder (avoidNameClashes (binderVars tvbs) tidy_env) tvbs --------------- tidyFreeTyCoVars :: TidyEnv -> [TyCoVar] -> TidyEnv -- ^ Add the free 'TyVar's to the env in tidy form, -- so that we can tidy the type they are free in -- Precondition: input free vars are closed over kinds and -- This function does a scopedSort, so that tidied variables -- have tidied kinds. -- See Note [Tidying is idempotent] tidyFreeTyCoVars tidy_env tyvars = fst (tidyFreeTyCoVarsX tidy_env tyvars) --------------- tidyFreeTyCoVarsX :: TidyEnv -> [TyCoVar] -> (TidyEnv, [TyCoVar]) -- Precondition: input free vars are closed over kinds and -- This function does a scopedSort, so that tidied variables -- have tidied kinds. -- See Note [Tidying is idempotent] tidyFreeTyCoVarsX env tyvars = mapAccumL tidyFreeTyCoVarX env $ scopedSort tyvars --------------- tidyFreeTyCoVarX :: TidyEnv -> TyCoVar -> (TidyEnv, TyCoVar) -- ^ Treat a new 'TyCoVar' as a binder, and give it a fresh tidy name -- using the environment if one has not already been allocated. See -- also 'tidyVarBndr' -- See Note [Tidying is idempotent] tidyFreeTyCoVarX env@(_, subst) tyvar = case lookupVarEnv subst tyvar of Just tyvar' -> (env, tyvar') -- Already substituted Nothing -> tidyVarBndr env tyvar -- Treat it as a binder --------------- tidyTyCoVarOcc :: TidyEnv -> TyCoVar -> TyCoVar tidyTyCoVarOcc env@(_, subst) tcv = case lookupVarEnv subst tcv of Nothing -> updateVarType (tidyType env) tcv Just tcv' -> tcv' --------------- {- Note [Strictness in tidyType and friends] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Since the result of tidying will be inserted into the HPT, a potentially long-lived structure, we generally want to avoid pieces of the old AST being retained by the thunks produced by tidying. For this reason we take great care to ensure that all pieces of the tidied AST are evaluated strictly. So you will see lots of strict applications ($!) and uses of `strictMap` in `tidyType`, `tidyTypes` and `tidyCo`. In the case of tidying of lists (e.g. lists of arguments) we prefer to use `strictMap f xs` rather than `seqList (map f xs)` as the latter will unnecessarily allocate a thunk, which will then be almost-immediately evaluated, for each list element. Making `tidyType` strict has a rather large effect on performance: see #14738. Sometimes as much as a 5% reduction in allocation. -} -- | Tidy a list of Types -- -- See Note [Strictness in tidyType and friends] tidyTypes :: TidyEnv -> [Type] -> [Type] tidyTypes env tys = strictMap (tidyType env) tys --------------- -- | Tidy a Type -- -- See Note [Strictness in tidyType and friends] tidyType :: TidyEnv -> Type -> Type tidyType _ t@(LitTy {}) = t -- Preserve sharing tidyType env (TyVarTy tv) = TyVarTy $! tidyTyCoVarOcc env tv tidyType _ t@(TyConApp _ []) = t -- Preserve sharing if possible tidyType env (TyConApp tycon tys) = TyConApp tycon $! tidyTypes env tys tidyType env (AppTy fun arg) = (AppTy $! (tidyType env fun)) $! (tidyType env arg) tidyType env (CastTy ty co) = (CastTy $! tidyType env ty) $! (tidyCo env co) tidyType env (CoercionTy co) = CoercionTy $! (tidyCo env co) tidyType env ty@(FunTy _ w arg res) = let { !w' = tidyType env w ; !arg' = tidyType env arg ; !res' = tidyType env res } in ty { ft_mult = w', ft_arg = arg', ft_res = res' } tidyType env (ty@(ForAllTy{})) = tidyForAllType env ty tidyForAllType :: TidyEnv -> Type -> Type tidyForAllType env ty = (mkForAllTys' $! (zip tcvs' vis)) $! tidyType body_env body_ty where (tcvs, vis, body_ty) = splitForAllTyCoVars' ty (body_env, tcvs') = tidyVarBndrs env tcvs -- The following two functions differ from mkForAllTys and splitForAllTyCoVars in that -- they expect/preserve the ForAllTyFlag argument. These belong to "GHC.Core.Type", but -- how should they be named? mkForAllTys' :: [(TyCoVar, ForAllTyFlag)] -> Type -> Type mkForAllTys' tvvs ty = foldr strictMkForAllTy ty tvvs where strictMkForAllTy (tv,vis) ty = (ForAllTy $! ((Bndr $! tv) $! vis)) $! ty splitForAllTyCoVars' :: Type -> ([TyCoVar], [ForAllTyFlag], Type) splitForAllTyCoVars' ty = go ty [] [] where go (ForAllTy (Bndr tv vis) ty) tvs viss = go ty (tv:tvs) (vis:viss) go ty tvs viss = (reverse tvs, reverse viss, ty) --------------- tidyAvoiding :: [OccName] -> (TidyEnv -> a -> TidyEnv) -> a -> TidyEnv -- Initialise an empty TidyEnv with some bound vars to avoid, -- run the do_tidy function, and then remove the bound vars again. -- See Note [tidyAvoiding] tidyAvoiding bound_var_avoids do_tidy thing = (occs' `delTidyOccEnvList` bound_var_avoids, vars') where (occs', vars') = do_tidy init_tidy_env thing init_tidy_env = mkEmptyTidyEnv (initTidyOccEnv bound_var_avoids) --------------- trimTidyEnv :: TidyEnv -> [TyCoVar] -> TidyEnv trimTidyEnv (occ_env, var_env) tcvs = (trimTidyOccEnv occ_env (map getOccName tcvs), var_env) --------------- -- | Grabs the free type variables, tidies them -- and then uses 'tidyType' to work over the type itself tidyOpenTypesX :: TidyEnv -> [Type] -> (TidyEnv, [Type]) -- See Note [Tidying open types] tidyOpenTypesX env tys = (env1, tidyTypes inner_env tys) where free_tcvs :: [TyCoVar] -- Closed over kinds free_tcvs = tyCoVarsOfTypesList tys (env1, free_tcvs') = tidyFreeTyCoVarsX env free_tcvs inner_env = trimTidyEnv env1 free_tcvs' --------------- tidyOpenTypeX :: TidyEnv -> Type -> (TidyEnv, Type) -- See Note [Tidying open types] tidyOpenTypeX env ty = (env1, tidyType inner_env ty) where free_tcvs = tyCoVarsOfTypeList ty (env1, free_tcvs') = tidyFreeTyCoVarsX env free_tcvs inner_env = trimTidyEnv env1 free_tcvs' --------------- tidyOpenTypes :: TidyEnv -> [Type] -> [Type] tidyOpenTypes env ty = snd (tidyOpenTypesX env ty) tidyOpenType :: TidyEnv -> Type -> Type tidyOpenType env ty = snd (tidyOpenTypeX env ty) --------------- -- | Calls 'tidyType' on a top-level type (i.e. with an empty tidying environment) tidyTopType :: Type -> Type tidyTopType ty = tidyType emptyTidyEnv ty --------------- -- | Tidy a Coercion -- -- See Note [Strictness in tidyType and friends] tidyCo :: TidyEnv -> Coercion -> Coercion tidyCo env co = go co where go_mco MRefl = MRefl go_mco (MCo co) = MCo $! go co go (Refl ty) = Refl $! tidyType env ty go (GRefl r ty mco) = (GRefl r $! tidyType env ty) $! go_mco mco go (TyConAppCo r tc cos) = TyConAppCo r tc $! strictMap go cos go (AppCo co1 co2) = (AppCo $! go co1) $! go co2 go (ForAllCo tv visL visR h co) = ((((ForAllCo $! tvp) $! visL) $! visR) $! (go h)) $! (tidyCo envp co) where (envp, tvp) = tidyVarBndr env tv -- the case above duplicates a bit of work in tidying h and the kind -- of tv. But the alternative is to use coercionKind, which seems worse. go (FunCo r afl afr w co1 co2) = ((FunCo r afl afr $! go w) $! go co1) $! go co2 go (CoVarCo cv) = CoVarCo $! go_cv cv go (HoleCo h) = HoleCo $! go_hole h go (AxiomCo ax cos) = AxiomCo ax $ strictMap go cos go co@(UnivCo { uco_lty = t1, uco_rty = t2 }) = co { uco_lty = tidyType env t1, uco_rty = tidyType env t2 } -- Don't bother to tidy the uco_deps field go (SymCo co) = SymCo $! go co go (TransCo co1 co2) = (TransCo $! go co1) $! go co2 go (SelCo d co) = SelCo d $! go co go (LRCo lr co) = LRCo lr $! go co go (InstCo co ty) = (InstCo $! go co) $! go ty go (KindCo co) = KindCo $! go co go (SubCo co) = SubCo $! go co go_cv cv = tidyTyCoVarOcc env cv go_hole (CoercionHole cv r h) = (CoercionHole $! go_cv cv) r h -- Tidy even the holes; tidied types should have tidied kinds tidyCos :: TidyEnv -> [Coercion] -> [Coercion] tidyCos env = strictMap (tidyCo env) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/TyCon.hs0000644000000000000000000035144007346545000020175 0ustar0000000000000000{-# LANGUAGE CPP #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE DeriveDataTypeable #-} {- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 The @TyCon@ datatype -} module GHC.Core.TyCon( -- * Main TyCon data types TyCon, AlgTyConRhs(..), visibleDataCons, AlgTyConFlav(..), isNoParent, FamTyConFlav(..), Role(..), Injectivity(..), PromDataConInfo(..), TyConFlavour(..), -- * TyConBinder TyConBinder, TyConBndrVis(..), mkNamedTyConBinder, mkNamedTyConBinders, mkRequiredTyConBinder, mkAnonTyConBinder, mkAnonTyConBinders, tyConBinderForAllTyFlag, tyConBndrVisForAllTyFlag, isNamedTyConBinder, isVisibleTyConBinder, isInvisibleTyConBinder, isVisibleTcbVis, isInvisSpecTcbVis, -- ** Field labels tyConFieldLabels, lookupTyConFieldLabel, -- ** Constructing TyCons mkAlgTyCon, mkClassTyCon, mkPrimTyCon, mkTupleTyCon, mkSumTyCon, mkDataTyConRhs, mkLevPolyDataTyConRhs, mkSynonymTyCon, mkFamilyTyCon, mkPromotedDataCon, mkTcTyCon, noTcTyConScopedTyVars, -- ** Predicates on TyCons isAlgTyCon, isVanillaAlgTyCon, isClassTyCon, isFamInstTyCon, isPrimTyCon, isTupleTyCon, isUnboxedTupleTyCon, isBoxedTupleTyCon, isUnboxedSumTyCon, isPromotedTupleTyCon, isLiftedAlgTyCon, isTypeSynonymTyCon, tyConMustBeSaturated, isPromotedDataCon, isPromotedDataCon_maybe, isDataKindsPromotedDataCon, isKindTyCon, isKindName, isLiftedTypeKindTyConName, isTauTyCon, isFamFreeTyCon, isForgetfulSynTyCon, isDataTyCon, isTypeDataTyCon, isEnumerationTyCon, isNewTyCon, isAbstractTyCon, isFamilyTyCon, isOpenFamilyTyCon, isTypeFamilyTyCon, isDataFamilyTyCon, isOpenTypeFamilyTyCon, isClosedSynFamilyTyConWithAxiom_maybe, tyConInjectivityInfo, isBuiltInSynFamTyCon_maybe, isGadtSyntaxTyCon, isInjectiveTyCon, isGenerativeTyCon, isGenInjAlgRhs, isTyConAssoc, tyConAssoc_maybe, tyConFlavourAssoc_maybe, isImplicitTyCon, isTyConWithSrcDataCons, isTcTyCon, setTcTyConKind, tcHasFixedRuntimeRep, isConcreteTyCon, isValidDTT2TyCon, -- ** Extracting information out of TyCons tyConName, tyConSkolem, tyConKind, tyConUnique, tyConTyVars, tyConVisibleTyVars, tyConCType_maybe, tyConDataCons, tyConDataCons_maybe, tyConSingleDataCon_maybe, tyConSingleDataCon, tyConAlgDataCons_maybe, tyConSingleAlgDataCon_maybe, tyConFamilySize, tyConStupidTheta, tyConArity, tyConNullaryTy, mkTyConTy, tyConRoles, tyConFlavour, tyConTuple_maybe, tyConClass_maybe, tyConATs, tyConFamInst_maybe, tyConFamInstSig_maybe, tyConFamilyCoercion_maybe, tyConFamilyResVar_maybe, synTyConDefn_maybe, synTyConRhs_maybe, famTyConFlav_maybe, algTyConRhs, newTyConRhs, newTyConEtadArity, newTyConEtadRhs, unwrapNewTyCon_maybe, unwrapNewTyConEtad_maybe, newTyConDataCon_maybe, algTcFields, tyConPromDataConInfo, tyConBinders, tyConResKind, tyConInvisTVBinders, tcTyConScopedTyVars, isMonoTcTyCon, tyConHasClosedResKind, mkTyConTagMap, -- ** Manipulating TyCons ExpandSynResult(..), expandSynTyCon_maybe, newTyConCo, newTyConCo_maybe, pprPromotionQuote, mkTyConKind, -- ** Predicated on TyConFlavours tcFlavourIsOpen, -- * Runtime type representation TyConRepName, tyConRepName_maybe, mkPrelTyConRepName, tyConRepModOcc, -- * Primitive representations of Types PrimRep(..), PrimElemRep(..), Levity(..), PrimOrVoidRep(..), primElemRepToPrimRep, isGcPtrRep, primRepSizeB, primRepSizeW64_B, primElemRepSizeB, primElemRepSizeW64_B, primRepIsFloat, primRepsCompatible, primRepCompatible, primRepIsWord, primRepIsInt, ) where import GHC.Prelude import GHC.Platform import {-# SOURCE #-} GHC.Core.TyCo.Rep ( Kind, Type, PredType, mkForAllTy, mkNakedFunTy, mkNakedTyConTy ) import {-# SOURCE #-} GHC.Core.TyCo.FVs ( noFreeVarsOfType ) import {-# SOURCE #-} GHC.Core.TyCo.Ppr ( pprType ) import {-# SOURCE #-} GHC.Builtin.Types ( runtimeRepTyCon, constraintKind, levityTyCon , multiplicityTyCon , vecCountTyCon, vecElemTyCon ) import {-# SOURCE #-} GHC.Core.DataCon ( DataCon, dataConFieldLabels , dataConTyCon, dataConFullSig , isUnboxedSumDataCon, isTypeDataCon ) import {-# SOURCE #-} GHC.Core.Type ( isLiftedTypeKind ) import GHC.Builtin.Uniques ( tyConRepNameUnique , dataConTyRepNameUnique ) import GHC.Utils.Binary import GHC.Types.Var import GHC.Types.Var.Set import GHC.Core.Class import GHC.Types.Basic import GHC.Types.ForeignCall import GHC.Types.Name import GHC.Types.Name.Env import GHC.Core.Coercion.Axiom import GHC.Builtin.Names import GHC.Data.Maybe import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Data.FastString.Env import GHC.Types.FieldLabel import GHC.Settings.Constants import GHC.Utils.Misc import GHC.Types.Unique.Set import GHC.Unit.Module import Language.Haskell.Syntax.Basic (FieldLabelString(..)) import qualified Data.Data as Data {- ----------------------------------------------- Notes about type families ----------------------------------------------- Note [Type synonym families] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * Type synonym families, also known as "type functions", map directly onto the type functions in FC: type family F a :: Type type instance F Int = Bool ..etc... * Reply "yes" to isTypeFamilyTyCon, and isFamilyTyCon * From the user's point of view (F Int) and Bool are simply equivalent types. * A Haskell 98 type synonym is a degenerate form of a type synonym family. * Type functions can't appear in the LHS of a type function: type instance F (F Int) = ... -- BAD! * Translation of type family decl: type family F a :: Type translates to a FamilyTyCon 'F', whose FamTyConFlav is OpenSynFamilyTyCon type family G a :: Type where G Int = Bool G Bool = Char G a = () translates to a FamilyTyCon 'G', whose FamTyConFlav is ClosedSynFamilyTyCon, with the appropriate CoAxiom representing the equations We also support injective type families -- see Note [Injective type families] Note [Data type families] ~~~~~~~~~~~~~~~~~~~~~~~~~ See also Note [Wrappers for data instance tycons] in GHC.Types.Id.Make * Data type families are declared thus data family T a :: Type data instance T Int = T1 | T2 Bool Here T is the "family TyCon". * Reply "yes" to isDataFamilyTyCon, and isFamilyTyCon * The user does not see any "equivalent types" as they did with type synonym families. They just see constructors with types T1 :: T Int T2 :: Bool -> T Int * Here's the FC version of the above declarations: data T a data R:TInt = T1 | T2 Bool axiom ax_ti : T Int ~R R:TInt Note that this is a *representational* coercion The R:TInt is the "representation TyCons". It has an AlgTyConFlav of DataFamInstTyCon T [Int] ax_ti * The axiom ax_ti may be eta-reduced; see Note [Eta reduction for data families] in GHC.Core.Coercion.Axiom * Data family instances may have a different arity than the data family. See Note [Arity of data families] in GHC.Core.FamInstEnv * The data constructor T2 has a wrapper (which is what the source-level "T2" invokes): $WT2 :: Bool -> T Int $WT2 b = T2 b `cast` sym ax_ti * A data instance can declare a fully-fledged GADT: data instance T (a,b) where X1 :: T (Int,Bool) X2 :: a -> b -> T (a,b) Here's the FC version of the above declaration: data R:TPair a b where X1 :: R:TPair Int Bool X2 :: a -> b -> R:TPair a b axiom ax_pr :: T (a,b) ~R R:TPair a b $WX1 :: forall a b. a -> b -> T (a,b) $WX1 a b (x::a) (y::b) = X2 a b x y `cast` sym (ax_pr a b) The R:TPair are the "representation TyCons". We have a bit of work to do, to unpick the result types of the data instance declaration for T (a,b), to get the result type in the representation; e.g. T (a,b) --> R:TPair a b The representation TyCon R:TList, has an AlgTyConFlav of DataFamInstTyCon T [(a,b)] ax_pr * Notice that T is NOT translated to a FC type function; it just becomes a "data type" with no constructors, which can be coerced into R:TInt, R:TPair by the axioms. These axioms axioms come into play when (and *only* when) you - use a data constructor - do pattern matching Rather like newtype, in fact As a result - T behaves just like a data type so far as decomposition is concerned - (T Int) is not implicitly converted to R:TInt during type inference. Indeed the latter type is unknown to the programmer. - There *is* an instance for (T Int) in the type-family instance environment, but it is looked up (via tcLookupDataFamilyInst) in can_eq_nc (via tcTopNormaliseNewTypeTF_maybe) when trying to solve representational equalities like T Int ~R# Bool Here we look up (T Int), convert it to R:TInt, and then unwrap the newtype R:TInt. It is also looked up in reduceTyFamApp_maybe. - It's fine to have T in the LHS of a type function: type instance F (T a) = [a] It was this last point that confused me! The big thing is that you should not think of a data family T as a *type function* at all, not even an injective one! We can't allow even injective type functions on the LHS of a type function: type family injective G a :: Type type instance F (G Int) = Bool is no good, even if G is injective, because consider type instance G Int = Bool type instance F Bool = Char So a data type family is not an injective type function. It's just a data type with some axioms that connect it to other data types. * The tyConTyVars of the representation tycon are the tyvars that the user wrote in the patterns. This is important in GHC.Tc.Deriv, where we bring these tyvars into scope before type-checking the deriving clause. This fact is arranged for in TcInstDecls.tcDataFamInstDecl. Note [Associated families and their parent class] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ *Associated* families are just like *non-associated* families, except that they have a famTcParent field of (Just cls_tc), which identifies the parent class. However there is an important sharing relationship between * the tyConTyVars of the parent Class * the tyConTyVars of the associated TyCon class C a b where data T p a type F a q b Here the 'a' and 'b' are shared with the 'Class'; that is, they have the same Unique. This is important. In an instance declaration we expect * all the shared variables to be instantiated the same way * the non-shared variables of the associated type should not be instantiated at all instance C [x] (Tree y) where data T p [x] = T1 x | T2 p type F [x] q (Tree y) = (x,y,q) Note [TyCon Role signatures] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Every tycon has a role signature, assigning a role to each of the tyConTyVars (or of equal length to the tyConArity, if there are no tyConTyVars). An example demonstrates these best: say we have a tycon T, with parameters a at nominal, b at representational, and c at phantom. Then, to prove representational equality between T a1 b1 c1 and T a2 b2 c2, we need to have nominal equality between a1 and a2, representational equality between b1 and b2, and nothing in particular (i.e., phantom equality) between c1 and c2. This might happen, say, with the following declaration: data T a b c where MkT :: b -> T Int b c Data and class tycons have their roles inferred (see inferRoles in GHC.Tc.TyCl.Utils), as do vanilla synonym tycons. Family tycons have all parameters at role N, though it is conceivable that we could relax this restriction. (->)'s and tuples' parameters are at role R. Each primitive tycon declares its roles; it's worth noting that (~#)'s parameters are at role N. Promoted data constructors' type arguments are at role R. All kind arguments are at role N. Note [Unboxed tuple RuntimeRep vars] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The contents of an unboxed tuple may have any representation. Accordingly, the kind of the unboxed tuple constructor is runtime-representation polymorphic. Type constructor (2 kind arguments) (#,#) :: forall (q :: RuntimeRep) (r :: RuntimeRep). TYPE q -> TYPE r -> TYPE (TupleRep [q, r]) Data constructor (4 type arguments) (#,#) :: forall (q :: RuntimeRep) (r :: RuntimeRep) (a :: TYPE q) (b :: TYPE r). a -> b -> (# a, b #) These extra tyvars (q and r) cause some delicate processing around tuples, where we need to manually insert RuntimeRep arguments. The same situation happens with unboxed sums: each alternative has its own RuntimeRep. For boxed tuples, there is no representation polymorphism, and therefore we add RuntimeReps only for the unboxed version. Type constructor (no kind arguments) (,) :: Type -> Type -> Type Data constructor (2 type arguments) (,) :: forall a b. a -> b -> (a, b) Note [Injective type families] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We allow injectivity annotations for type families (both open and closed): type family F (a :: k) (b :: k) = r | r -> a type family G a b = res | res -> a b where ... Injectivity information is stored in the `famTcInj` field of `FamilyTyCon`. `famTcInj` maybe stores a list of Bools, where each entry corresponds to a single element of `tyConTyVars` (both lists should have identical length). If no injectivity annotation was provided `famTcInj` is Nothing. From this follows an invariant that if `famTcInj` is a Just then at least one element in the list must be True. See also: * [Injectivity annotation] in GHC.Hs.Decls * [Renaming injectivity annotation] in GHC.Rename.Module * [Verifying injectivity annotation] in GHC.Core.FamInstEnv * [Type inference for type families with injectivity] in GHC.Tc.Solver.Equality Note [Sharing nullary TyConApps] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Nullary type constructor applications are extremely common. For this reason each TyCon carries with it a @TyConApp tycon []@. This ensures that 'mkTyConTy' does not need to allocate and eliminates quite a bit of heap residency. Furthermore, we use 'mkTyConTy' in the nullary case of 'mkTyConApp', ensuring that this function also benefits from sharing. This optimisation improves allocations in the Cabal test by around 0.3% and decreased cache misses measurably. See #19367. ************************************************************************ * * TyConBinder * * ************************************************************************ -} type TyConBinder = VarBndr TyVar TyConBndrVis data TyConBndrVis = NamedTCB ForAllTyFlag -- ^ A named, forall-bound variable (invisible or not) | AnonTCB -- ^ an ordinary, visible type argument instance Outputable TyConBndrVis where ppr (NamedTCB flag) = ppr flag ppr AnonTCB = text "AnonTCB" mkAnonTyConBinder :: TyVar -> TyConBinder -- Make a visible anonymous TyCon binder mkAnonTyConBinder tv = assert (isTyVar tv) $ Bndr tv AnonTCB mkAnonTyConBinders :: [TyVar] -> [TyConBinder] mkAnonTyConBinders tvs = map mkAnonTyConBinder tvs mkNamedTyConBinder :: ForAllTyFlag -> TyVar -> TyConBinder -- The odd argument order supports currying mkNamedTyConBinder vis tv = assert (isTyVar tv) $ Bndr tv (NamedTCB vis) mkNamedTyConBinders :: ForAllTyFlag -> [TyVar] -> [TyConBinder] -- The odd argument order supports currying mkNamedTyConBinders vis tvs = map (mkNamedTyConBinder vis) tvs -- | Make a Required TyConBinder. It chooses between NamedTCB and -- AnonTCB based on whether the tv is mentioned in the dependent set mkRequiredTyConBinder :: TyCoVarSet -- these are used dependently -> TyVar -> TyConBinder mkRequiredTyConBinder dep_set tv | tv `elemVarSet` dep_set = mkNamedTyConBinder Required tv | otherwise = mkAnonTyConBinder tv tyConBinderForAllTyFlag :: TyConBinder -> ForAllTyFlag tyConBinderForAllTyFlag (Bndr _ vis) = tyConBndrVisForAllTyFlag vis tyConBndrVisForAllTyFlag :: TyConBndrVis -> ForAllTyFlag tyConBndrVisForAllTyFlag (NamedTCB vis) = vis tyConBndrVisForAllTyFlag AnonTCB = Required isNamedTyConBinder :: TyConBinder -> Bool -- Identifies kind variables -- E.g. data T k (a:k) = blah -- Here 'k' is a NamedTCB, a variable used in the kind of other binders isNamedTyConBinder (Bndr _ (NamedTCB {})) = True isNamedTyConBinder _ = False isVisibleTyConBinder :: VarBndr tv TyConBndrVis -> Bool -- Works for IfaceTyConBinder too isVisibleTyConBinder (Bndr _ tcb_vis) = isVisibleTcbVis tcb_vis isVisibleTcbVis :: TyConBndrVis -> Bool isVisibleTcbVis (NamedTCB vis) = isVisibleForAllTyFlag vis isVisibleTcbVis AnonTCB = True isInvisSpecTcbVis :: TyConBndrVis -> Bool isInvisSpecTcbVis (NamedTCB Specified) = True isInvisSpecTcbVis _ = False isInvisibleTyConBinder :: VarBndr tv TyConBndrVis -> Bool -- Works for IfaceTyConBinder too isInvisibleTyConBinder tcb = not (isVisibleTyConBinder tcb) -- Build the 'tyConKind' from the binders and the result kind. -- Keep in sync with 'mkTyConKind' in GHC.Iface.Type. mkTyConKind :: [TyConBinder] -> Kind -> Kind mkTyConKind bndrs res_kind = foldr mk res_kind bndrs where mk :: TyConBinder -> Kind -> Kind mk (Bndr tv (NamedTCB vis)) k = mkForAllTy (Bndr tv vis) k mk (Bndr tv AnonTCB) k = mkNakedFunTy FTF_T_T (varType tv) k -- mkNakedFunTy: see Note [Naked FunTy] in GHC.Builtin.Types -- | (mkTyConTy tc) returns (TyConApp tc []) -- but arranges to share that TyConApp among all calls -- See Note [Sharing nullary TyConApps] -- So it's just an alias for tyConNullaryTy! mkTyConTy :: TyCon -> Type mkTyConTy tycon = tyConNullaryTy tycon tyConInvisTVBinders :: [TyConBinder] -- From the TyCon -> [InvisTVBinder] -- Suitable for the foralls of a term function -- See Note [Building TyVarBinders from TyConBinders] tyConInvisTVBinders tc_bndrs = map mk_binder tc_bndrs where mk_binder (Bndr tv tc_vis) = mkTyVarBinder vis tv where vis = case tc_vis of AnonTCB -> SpecifiedSpec NamedTCB Required -> SpecifiedSpec NamedTCB (Invisible vis) -> vis -- Returns only tyvars, as covars are always inferred tyConVisibleTyVars :: TyCon -> [TyVar] tyConVisibleTyVars tc = [ tv | Bndr tv vis <- tyConBinders tc , isVisibleTcbVis vis ] {- Note [Building TyVarBinders from TyConBinders] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We sometimes need to build the quantified type of a value from the TyConBinders of a type or class. For that we need not TyConBinders but TyVarBinders (used in forall-type) E.g: * From data T a = MkT (Maybe a) we are going to make a data constructor with type MkT :: forall a. Maybe a -> T a See the ForAllTyBinders passed to buildDataCon * From class C a where { op :: a -> Maybe a } we are going to make a default method $dmop :: forall a. C a => a -> Maybe a See the ForAllTyBinders passed to mkSigmaTy in mkDefaultMethodType Both of these are user-callable. (NB: default methods are not callable directly by the user but rather via the code generated by 'deriving', which uses visible type application; see mkDefMethBind.) Since they are user-callable we must get their type-argument visibility information right; and that info is in the TyConBinders. Here is an example: data App a b = MkApp (a b) -- App :: forall {k}. (k->Type) -> k -> Type The TyCon has tyConTyBinders = [ Named (Bndr (k :: Type) Inferred), Anon (k->Type), Anon k ] The TyConBinders for App line up with App's kind, given above. But the DataCon MkApp has the type MkApp :: forall {k} (a:k->Type) (b:k). a b -> App k a b That is, its ForAllTyBinders should be dataConUnivTyVarBinders = [ Bndr (k:Type) Inferred , Bndr (a:k->Type) Specified , Bndr (b:k) Specified ] So tyConTyVarBinders converts TyCon's TyConBinders into TyVarBinders: - variable names from the TyConBinders - but changing Anon/Required to Specified The last part about Required->Specified comes from this: data T k (a :: k) b = MkT (a b) Here k is Required in T's kind, but we didn't have Required binders in types of terms before the advent of the new, experimental RequiredTypeArguments extension. So we historically changed Required to Specified when making MkT's PiTyBinders and now continue to do so to avoid a breaking change. -} {- Note [The binders/kind/arity fields of a TyCon] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ All TyCons have this group of fields tyConBinders :: [TyConBinder] tyConResKind :: Kind tyConTyVars :: [TyVar] -- Cached = binderVars tyConBinders -- NB: Currently (Aug 2018), TyCons that own this -- field really only contain TyVars. So it is -- [TyVar] instead of [TyCoVar]. tyConKind :: Kind -- Cached = mkTyConKind tyConBinders tyConResKind tyConArity :: Arity -- Cached = length tyConBinders They fit together like so: * tyConBinders gives the telescope of type variables on the LHS of the type declaration. For example: type App a (b :: k) = a b tyConBinders = [ Bndr (k::Type) (NamedTCB Inferred) , Bndr (a:k->Type) AnonTCB , Bndr (b:k) AnonTCB ] Note that there are three binders here, including the kind variable k. See Note [tyConBinders and lexical scoping] * See Note [VarBndrs, ForAllTyBinders, TyConBinders, and visibility] in GHC.Core.TyCo.Rep for what the visibility flag means. * Each TyConBinder in tyConBinders has a TyVar, and that TyVar may scope over some other part of the TyCon's definition. Eg type T a = a -> a we have tyConBinders = [ Bndr (a:Type) AnonTCB ] synTcRhs = a -> a So the 'a' scopes over the synTcRhs * From the tyConBinders and tyConResKind we can get the tyConKind E.g for our App example: App :: forall k. (k->Type) -> k -> Type We get a 'forall' in the kind for each NamedTCB, and an arrow for each AnonTCB tyConKind is the full kind of the TyCon, not just the result kind * For type families, tyConArity is the arguments this TyCon must be applied to, to be considered saturated. Here we mean "applied to in the actual Type", not surface syntax; i.e. including implicit kind variables. So it's just (length tyConBinders) * For an algebraic data type, or data instance, the tyConResKind is always (TYPE r); that is, the tyConBinders are enough to saturate the type constructor. I'm not quite sure why we have this invariant, but it's enforced by splitTyConKind Note [tyConBinders and lexical scoping] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In a TyCon, and a PolyTcTyCon, we obey the following rule: The Name of the TyConBinder is precisely the lexically scoped Name from the original declaration (precisely = both OccName and Unique) For example, data T a (b :: wombat) = MkT We will get tyConBinders of [k, wombat, a::k, b::wombat] The 'k' is made up; the user didn't specify it. But for the kind of 'b' we must use 'wombat'. Why do we have this invariant? * Similarly, when typechecking default definitions for class methods, in GHC.Tc.TyCl.Class.tcClassDecl2, we only have the (final) Class available; but the variables bound in that class must be in scope. Example (#19738): type P :: k -> Type data P a = MkP type T :: k -> Constraint class T (a :: j) where f :: P a f = MkP @j @a -- 'j' must be in scope when we typecheck 'f' * When typechecking `deriving` clauses for top-level data declarations, the tcTyConScopedTyVars are brought into scope in through the `di_scoped_tvs` field of GHC.Tc.Deriv.DerivInfo. Example (#16731): class C x1 x2 type T :: a -> Type data T (x :: z) deriving (C z) When typechecking `C z`, we want `z` to map to `a`, which is exactly what the tcTyConScopedTyVars for T give us. -} instance OutputableBndr tv => Outputable (VarBndr tv TyConBndrVis) where ppr (Bndr v bi) = ppr bi <+> parens (pprBndr LetBind v) instance Binary TyConBndrVis where put_ bh AnonTCB = do { putByte bh 0 } put_ bh (NamedTCB vis) = do { putByte bh 1; put_ bh vis } get bh = do { h <- getByte bh ; case h of 0 -> return AnonTCB _ -> do { vis <- get bh; return (NamedTCB vis) } } {- ********************************************************************* * * The TyCon type * * ************************************************************************ -} -- | TyCons represent type constructors. Type constructors are introduced by -- things such as: -- -- 1) Data declarations: @data Foo = ...@ creates the @Foo@ type constructor of -- kind @Type@ -- -- 2) Type synonyms: @type Foo = ...@ creates the @Foo@ type constructor -- -- 3) Newtypes: @newtype Foo a = MkFoo ...@ creates the @Foo@ type constructor -- of kind @Type -> Type@ -- -- 4) Class declarations: @class Foo where@ creates the @Foo@ type constructor -- of kind @Constraint@ -- -- This data type also encodes a number of primitive, built in type constructors -- such as those for function and tuple types. -- -- If you edit this type, you may need to update the GHC formalism -- See Note [GHC Formalism] in GHC.Core.Lint data TyCon = TyCon { tyConUnique :: !Unique, -- ^ A Unique of this TyCon. Invariant: -- identical to Unique of Name stored in -- tyConName field. tyConName :: !Name, -- ^ Name of the constructor -- See Note [The binders/kind/arity fields of a TyCon] tyConBinders :: [TyConBinder], -- ^ Full binders tyConResKind :: Kind, -- ^ Result kind tyConHasClosedResKind :: Bool, -- Cached values tyConTyVars :: [TyVar], -- ^ TyVar binders tyConKind :: Kind, -- ^ Kind of this TyCon tyConArity :: Arity, -- ^ Arity tyConNullaryTy :: Type, -- ^ A pre-allocated @TyConApp tycon []@ tyConRoles :: [Role], -- ^ The role for each type variable -- This list has length = tyConArity -- See also Note [TyCon Role signatures] tyConDetails :: !TyConDetails } data TyConDetails = -- | Algebraic data types, from -- - @data@ declarations -- - @newtype@ declarations -- - data instance declarations -- - type instance declarations -- - the TyCon generated by a class declaration -- - boxed tuples -- - unboxed tuples -- - constraint tuples -- - unboxed sums -- Data/newtype/type /families/ are handled by 'FamilyTyCon'. -- See 'AlgTyConRhs' for more information. AlgTyCon { -- The tyConTyVars scope over: -- -- 1. The 'algTcStupidTheta' -- 2. The cached types in algTyConRhs.NewTyCon -- 3. The family instance types if present -- -- Note that it does /not/ scope over the data -- constructors. tyConCType :: Maybe CType,-- ^ The C type that should be used -- for this type when using the FFI -- and CAPI algTcGadtSyntax :: Bool, -- ^ Was the data type declared with GADT -- syntax? If so, that doesn't mean it's a -- true GADT; only that the "where" form -- was used. This field is used only to -- guide pretty-printing algTcStupidTheta :: [PredType], -- ^ The \"stupid theta\" for the data -- type (always empty for GADTs). A -- \"stupid theta\" is the context to -- the left of an algebraic type -- declaration, e.g. @Eq a@ in the -- declaration @data Eq a => T a ...@. -- See @Note [The stupid context]@ in -- "GHC.Core.DataCon". algTcRhs :: AlgTyConRhs, -- ^ Contains information about the -- data constructors of the algebraic type algTcFields :: FieldLabelEnv, -- ^ Maps a label to information -- about the field algTcFlavour :: AlgTyConFlav -- ^ The flavour of this algebraic tycon. -- Gives the class or family declaration -- 'TyCon' for derived 'TyCon's representing -- class or family instances, respectively. } -- | Represents type synonyms | SynonymTyCon { -- tyConTyVars scope over: synTcRhs synTcRhs :: Type, -- ^ Contains information about the expansion -- of the synonym synIsTau :: Bool, -- True <=> the RHS of this synonym does not -- have any foralls, after expanding any -- nested synonyms synIsFamFree :: Bool, -- True <=> the RHS of this synonym does not mention -- any type synonym families (data families -- are fine), again after expanding any -- nested synonyms synIsForgetful :: Bool, -- See Note [Forgetful type synonyms] -- True <= at least one argument is not mentioned -- in the RHS (or is mentioned only under -- forgetful synonyms) -- Test is conservative, so True does not guarantee -- forgetfulness. False conveys definite information -- (definitely not forgetful); True is always safe. synIsConcrete :: Bool -- True <= If 'tys' are concrete then the expansion -- of (S tys) is definitely concrete -- But False is always safe } -- | Represents families (both type and data) -- Argument roles are all Nominal | FamilyTyCon { -- tyConTyVars connect an associated family TyCon -- with its parent class; see GHC.Tc.Validity.checkConsistentFamInst famTcResVar :: Maybe Name, -- ^ Name of result type variable, used -- for pretty-printing with --show-iface -- and for reifying TyCon in Template -- Haskell famTcFlav :: FamTyConFlav, -- ^ Type family flavour: open, closed, -- abstract, built-in. See comments for -- FamTyConFlav famTcParent :: Maybe TyCon, -- ^ For *associated* type/data families -- The class tycon in which the family is declared -- See Note [Associated families and their parent class] famTcInj :: Injectivity -- ^ is this a type family injective in -- its type variables? Nothing if no -- injectivity annotation was given } -- | Primitive types; cannot be defined in Haskell. This includes -- the usual suspects (such as @Int#@) as well as foreign-imported -- types and kinds (@*@, @#@, and @?@) | PrimTyCon { primRepName :: TyConRepName -- ^ The 'Typeable' representation. -- A cached version of -- @'mkPrelTyConRepName' ('tyConName' tc)@. } -- | Represents promoted data constructor. -- The kind of a promoted data constructor is the *wrapper* type of -- the original data constructor. This type must not have constraints -- (as checked in GHC.Tc.Gen.HsType.tcTyVar). | PromotedDataCon { -- See Note [Promoted data constructors] dataCon :: DataCon, -- ^ Corresponding data constructor tcRepName :: TyConRepName, promDcInfo :: PromDataConInfo -- ^ See comments with 'PromDataConInfo' } -- | These exist only during type-checking. -- See Note [TcTyCon, MonoTcTyCon, and PolyTcTyCon] in "GHC.Tc.TyCl" | TcTyCon { -- NB: the tyConArity of a TcTyCon must match -- the number of Required (positional, user-specified) -- arguments to the type constructor; see the use -- of tyConArity in generaliseTcTyCon tctc_scoped_tvs :: [(Name,TcTyVar)], -- ^ Scoped tyvars over the tycon's body -- The range is always a skolem or TcTyVar, be -- MonoTcTyCon only: see Note [Scoped tyvars in a TcTyCon] tctc_is_poly :: Bool, -- ^ Is this TcTyCon already generalized? -- Used only to make zonking more efficient tctc_flavour :: TyConFlavour TyCon -- ^ What sort of 'TyCon' this represents. } {- Note [Scoped tyvars in a TcTyCon] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The tcTyConScopedTyVars field records the lexicial-binding connection between the original, user-specified Name (i.e. thing in scope) and the TcTyVar that the Name is bound to. Order *does* matter; the tcTyConScopedTyVars list consists of specified_tvs ++ required_tvs where * specified ones first * required_tvs the same as tyConTyVars * tyConArity = length required_tvs tcTyConScopedTyVars are used only for MonoTcTyCons, not PolyTcTyCons. See Note [TcTyCon, MonoTcTyCon, and PolyTcTyCon] in GHC.Tc.TyCl Note [Representation-polymorphic TyCons] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To check for representation-polymorphism directly in the typechecker, e.g. when using GHC.Tc.Utils.TcMType.checkTypeHasFixedRuntimeRep, we need to compute whether a type has a syntactically fixed RuntimeRep, as per Note [Fixed RuntimeRep] in GHC.Tc.Utils.Concrete. It's useful to have a quick way to check whether a saturated application of a type constructor has a fixed RuntimeRep. That is, we want to know, given a TyCon 'T' of arity 'n', does T a_1 ... a_n always have a fixed RuntimeRep? That is, is it always the case that this application has a kind of the form T a_1 ... a_n :: TYPE rep in which 'rep' is a concrete 'RuntimeRep'? ('Concrete' in the sense of Note [The Concrete mechanism] in GHC.Tc.Utils.Concrete: it contains no type-family applications or type variables.) To answer this question, we have 'tcHasFixedRuntimeRep'. If 'tcHasFixedRuntimeRep' returns 'True', it means we're sure that every saturated application of `T` has a fixed RuntimeRep. However, if it returns 'False', we don't know: perhaps some application might not have a fixed RuntimeRep. Examples: - For type families, we won't know in general whether an application will have a fixed RuntimeRep: type F :: k -> k type family F a where {..} `tcHasFixedRuntimeRep F = False' - For newtypes, we're usually OK: newtype N a b c = MkN Int No matter what arguments we apply `N` to, we always get something of kind `Type`, which has a fixed RuntimeRep. Thus `tcHasFixedRuntimeRep N = True`. However, with `-XUnliftedNewtypes`, we can have representation-polymorphic newtypes: type UN :: TYPE rep -> TYPE rep newtype UN a = MkUN a `tcHasFixedRuntimeRep UN = False` For example, `UN @Int8Rep Int8#` is represented by an 8-bit value, while `UN @LiftedRep Int` is represented by a heap pointer. To distinguish whether we are dealing with a representation-polymorphic newtype, we keep track of which situation we are in using the 'nt_fixed_rep' field of the 'NewTyCon' constructor of 'AlgTyConRhs', and read this field to compute 'tcHasFixedRuntimeRep'. - A similar story can be told for datatypes: we're usually OK, except with `-XUnliftedDatatypes` which allows for levity polymorphism, e.g.: type UC :: TYPE (BoxedRep l) -> TYPE (BoxedRep l) type UC a = MkUC a `tcHasFixedRuntimeRep UC = False` Here, we keep track of whether we are dealing with a levity-polymorphic unlifted datatype using the 'data_fixed_lev' field of the 'DataTyCon' constructor of 'AlgTyConRhs'. N.B.: technically, the representation of a datatype is fixed, as it is always a pointer. However, we currently require that we know the specific `RuntimeRep`: knowing that it's `BoxedRep l` for a type-variable `l` isn't enough. See #15532. -} -- | Represents right-hand-sides of 'TyCon's for algebraic types data AlgTyConRhs -- | Says that we know nothing about this data type, except that -- it's represented by a pointer. Used when we export a data type -- abstractly into an .hi file. = AbstractTyCon -- | Information about those 'TyCon's derived from a @data@ -- declaration. This includes data types with no constructors at -- all. | DataTyCon { data_cons :: [DataCon], -- ^ The data type constructors; can be empty if the -- user declares the type to have no constructors -- -- INVARIANT: Kept in order of increasing 'DataCon' -- tag (see the tag assignment in mkTyConTagMap) data_cons_size :: Int, -- ^ Cached value: length data_cons is_enum :: Bool, -- ^ Cached value: is this an enumeration type? -- See Note [Enumeration types] is_type_data :: Bool, -- from a "type data" declaration -- See Note [Type data declarations] in GHC.Rename.Module data_fixed_lev :: Bool -- ^ 'True' if the data type constructor has -- a known, fixed levity when fully applied -- to its arguments, False otherwise. -- -- This can only be 'False' with UnliftedDatatypes, -- e.g. -- -- > data A :: TYPE (BoxedRep l) where { MkA :: Int -> A } -- -- This boolean is cached to make it cheaper to check -- for levity and representation-polymorphism in -- tcHasFixedRuntimeRep. } | TupleTyCon { -- A boxed, unboxed, or constraint tuple data_con :: DataCon, -- NB: it can be an *unboxed* tuple tup_sort :: TupleSort -- ^ Is this a boxed, unboxed or constraint -- tuple? } -- | An unboxed sum type. | SumTyCon { data_cons :: [DataCon], data_cons_size :: Int -- ^ Cached value: length data_cons } -- | Information about those 'TyCon's derived from a @newtype@ declaration | NewTyCon { data_con :: DataCon, -- ^ The unique constructor for the @newtype@. -- It has no existentials nt_rhs :: Type, -- ^ Cached value: the argument type of the -- constructor, which is just the representation -- type of the 'TyCon' (remember that @newtype@s -- do not exist at runtime so need a different -- representation type). -- -- The free 'TyVar's of this type are the -- 'tyConTyVars' from the corresponding 'TyCon' nt_etad_rhs :: ([TyVar], Type), -- ^ Same as the 'nt_rhs', but this time eta-reduced. -- Hence the list of 'TyVar's in this field may be -- shorter than the declared arity of the 'TyCon'. -- See Note [Newtype eta] nt_co :: CoAxiom Unbranched, -- The axiom coercion that creates the @newtype@ -- from the representation 'Type'. The axiom witnesses -- a representational coercion: -- nt_co :: N ty1 ~R# rep_tys -- See Note [Newtype coercions] -- Invariant: arity = #tvs in nt_etad_rhs; -- See Note [Newtype eta] -- Watch out! If any newtypes become transparent -- again check #1072. nt_fixed_rep :: Bool -- ^ 'True' if the newtype has a known, fixed representation -- when fully applied to its arguments, 'False' otherwise. -- This can only ever be 'False' with UnliftedNewtypes. -- -- Example: -- -- > newtype N (a :: TYPE r) = MkN a -- -- Invariant: nt_fixed_rep nt = tcHasFixedRuntimeRep (nt_rhs nt) -- -- This boolean is cached to make it cheaper to check if a -- variable binding is representation-polymorphic -- in tcHasFixedRuntimeRep. } mkSumTyConRhs :: [DataCon] -> AlgTyConRhs mkSumTyConRhs data_cons = SumTyCon data_cons (length data_cons) -- | Create an 'AlgTyConRhs' from the data constructors, -- for a potentially levity-polymorphic datatype (with `UnliftedDatatypes`). mkLevPolyDataTyConRhs :: Bool -- ^ whether the 'DataCon' has a fixed levity -> Bool -- ^ True if this is a "type data" declaration -- See Note [Type data declarations] -- in GHC.Rename.Module -> [DataCon] -> AlgTyConRhs mkLevPolyDataTyConRhs fixed_lev type_data cons = DataTyCon { data_cons = cons, data_cons_size = length cons, is_enum = not (null cons) && all is_enum_con cons, -- See Note [Enumeration types] in GHC.Core.TyCon is_type_data = type_data, data_fixed_lev = fixed_lev } where is_enum_con con | (_univ_tvs, ex_tvs, eq_spec, theta, arg_tys, _res) <- dataConFullSig con = null ex_tvs && null eq_spec && null theta && null arg_tys -- | Create an 'AlgTyConRhs' from the data constructors. -- -- Use 'mkLevPolyDataConRhs' if the datatype can be levity-polymorphic -- or if it comes from a "data type" declaration mkDataTyConRhs :: [DataCon] -> AlgTyConRhs mkDataTyConRhs = mkLevPolyDataTyConRhs True False -- | Some promoted datacons signify extra info relevant to GHC. For example, -- the `IntRep` constructor of `RuntimeRep` corresponds to the 'IntRep' -- constructor of 'PrimRep'. This data structure allows us to store this -- information right in the 'TyCon'. The other approach would be to look -- up things like `RuntimeRep`'s `PrimRep` by known-key every time. -- See also Note [Getting from RuntimeRep to PrimRep] in "GHC.Types.RepType" data PromDataConInfo = NoPromInfo -- ^ an ordinary promoted data con | RuntimeRep ([Type] -> [PrimRep]) -- ^ A constructor of `RuntimeRep`. The argument to the function should -- be the list of arguments to the promoted datacon. | VecCount Int -- ^ A constructor of `VecCount` | VecElem PrimElemRep -- ^ A constructor of `VecElem` | Levity Levity -- ^ A constructor of `Levity` -- | Extract those 'DataCon's that we are able to learn about. Note -- that visibility in this sense does not correspond to visibility in -- the context of any particular user program! visibleDataCons :: AlgTyConRhs -> [DataCon] visibleDataCons (AbstractTyCon {}) = [] visibleDataCons (DataTyCon{ data_cons = cs }) = cs visibleDataCons (NewTyCon{ data_con = c }) = [c] visibleDataCons (TupleTyCon{ data_con = c }) = [c] visibleDataCons (SumTyCon{ data_cons = cs }) = cs -- | Describes the flavour of an algebraic type constructor. For -- classes and data families, this flavour includes a reference to -- the parent 'TyCon'. data AlgTyConFlav = -- | An ordinary algebraic type constructor. This includes unlifted and -- representation-polymorphic datatypes and newtypes and unboxed tuples, -- but NOT unboxed sums; see UnboxedSumTyCon. VanillaAlgTyCon TyConRepName -- For Typeable -- | An unboxed sum type constructor. This is distinct from VanillaAlgTyCon -- because we currently don't allow unboxed sums to be Typeable since -- there are too many of them. See #13276. | UnboxedSumTyCon -- | Type constructors representing a class dictionary. -- See Note [ATyCon for classes] in "GHC.Core.TyCo.Rep" | ClassTyCon Class -- INVARIANT: the classTyCon of this Class is the -- current tycon TyConRepName -- | Type constructors representing an *instance* of a *data* family. -- Parameters: -- -- 1) The type family in question -- -- 2) Instance types; free variables are the 'tyConTyVars' -- of the current 'TyCon' (not the family one). INVARIANT: -- the number of types matches the arity of the family 'TyCon' -- -- 3) A 'CoTyCon' identifying the representation -- type with the type instance family | DataFamInstTyCon -- See Note [Data type families] (CoAxiom Unbranched) -- The coercion axiom. -- A *Representational* coercion, -- of kind T ty1 ty2 ~R R:T a b c -- where T is the family TyCon, -- and R:T is the representation TyCon (ie this one) -- and a,b,c are the tyConTyVars of this TyCon -- -- BUT may be eta-reduced; see -- Note [Eta reduction for data families] in -- GHC.Core.Coercion.Axiom -- Cached fields of the CoAxiom, but adjusted to -- use the tyConTyVars of this TyCon TyCon -- The family TyCon [Type] -- Argument types (mentions the tyConTyVars of this TyCon) -- No shorter in length than the tyConTyVars of the family TyCon -- How could it be longer? See [Arity of data families] in GHC.Core.FamInstEnv -- E.g. data instance T [a] = ... -- gives a representation tycon: -- data R:TList a = ... -- axiom co a :: T [a] ~ R:TList a -- with R:TList's algTcFlavour = DataFamInstTyCon T [a] co instance Outputable AlgTyConFlav where ppr (VanillaAlgTyCon {}) = text "Vanilla ADT" ppr (UnboxedSumTyCon {}) = text "Unboxed sum" ppr (ClassTyCon cls _) = text "Class parent" <+> ppr cls ppr (DataFamInstTyCon _ tc tys) = text "Family parent (family instance)" <+> ppr tc <+> sep (map pprType tys) -- | Checks the invariants of a 'AlgTyConFlav' given the appropriate type class -- name, if any okParent :: Name -> AlgTyConFlav -> Bool okParent _ (VanillaAlgTyCon {}) = True okParent _ (UnboxedSumTyCon {}) = True okParent tc_name (ClassTyCon cls _) = tc_name == tyConName (classTyCon cls) okParent _ (DataFamInstTyCon _ fam_tc tys) = tys `lengthAtLeast` tyConArity fam_tc isNoParent :: AlgTyConFlav -> Bool isNoParent (VanillaAlgTyCon {}) = True isNoParent _ = False -------------------- data Injectivity = NotInjective | Injective [Bool] -- 1-1 with tyConTyVars (incl kind vars) deriving( Eq ) -- | Information pertaining to the expansion of a type synonym (@type@) data FamTyConFlav = -- | Represents an open type family without a fixed right hand -- side. Additional instances can appear at any time. -- -- These are introduced by either a top level declaration: -- -- > data family T a :: Type -- -- Or an associated data type declaration, within a class declaration: -- -- > class C a b where -- > data T b :: Type DataFamilyTyCon TyConRepName -- | An open type synonym family e.g. @type family F x y :: Type -> Type@ | OpenSynFamilyTyCon -- | A closed type synonym family e.g. -- @type family F x where { F Int = Bool }@ | ClosedSynFamilyTyCon (Maybe (CoAxiom Branched)) -- See Note [Closed type families] -- | A closed type synonym family declared in an hs-boot file with -- type family F a where .. | AbstractClosedSynFamilyTyCon -- | Built-in type family used by the TypeNats solver | BuiltInSynFamTyCon BuiltInSynFamily instance Outputable FamTyConFlav where ppr (DataFamilyTyCon n) = text "data family" <+> ppr n ppr OpenSynFamilyTyCon = text "open type family" ppr (ClosedSynFamilyTyCon Nothing) = text "closed type family" ppr (ClosedSynFamilyTyCon (Just coax)) = text "closed type family" <+> ppr coax ppr AbstractClosedSynFamilyTyCon = text "abstract closed type family" ppr (BuiltInSynFamTyCon _) = text "built-in type family" {- Note [Closed type families] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * In an open type family you can add new instances later. This is the usual case. * In a closed type family you can only put equations where the family is defined. A non-empty closed type family has a single axiom with multiple branches, stored in the 'ClosedSynFamilyTyCon' constructor. A closed type family with no equations does not have an axiom, because there is nothing for the axiom to prove! Note [Promoted data constructors] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ All data constructors can be promoted to become a type constructor, via the PromotedDataCon alternative in GHC.Core.TyCon. * The TyCon promoted from a DataCon has the *same* Name and Unique as the DataCon. Eg. If the data constructor Data.Maybe.Just(unique 78) is promoted to a TyCon whose name is Data.Maybe.Just(unique 78) * We promote the *user* type of the DataCon. Eg data T = MkT {-# UNPACK #-} !(Bool, Bool) The promoted kind is 'MkT :: (Bool,Bool) -> T *not* 'MkT :: Bool -> Bool -> T * Similarly for GADTs: data G a where MkG :: forall b. b -> G [b] The promoted data constructor has kind 'MkG :: forall b. b -> G [b] *not* 'MkG :: forall a b. (a ~# [b]) => b -> G a Note [Enumeration types] ~~~~~~~~~~~~~~~~~~~~~~~~ We define datatypes with no constructors to *not* be enumerations; this fixes #2578, Otherwise we end up generating an empty table for __closure_tbl which is used by tagToEnum# to map Int# to constructors in an enumeration. The empty table apparently upset the linker. Moreover, all the data constructor must be enumerations, meaning they have type (forall abc. T a b c). GADTs are not enumerations. For example consider data T a where T1 :: T Int T2 :: T Bool T3 :: T a What would [T1 ..] be? [T1,T3] :: T Int? Easiest thing is to exclude them. See #4528. Note [Newtype coercions] ~~~~~~~~~~~~~~~~~~~~~~~~ The NewTyCon field nt_co is a CoAxiom which is used for coercing from the representation type of the newtype, to the newtype itself. For example, newtype T a = MkT (a -> a) the NewTyCon for T will contain nt_co = CoT where CoT :: forall a. T a ~ a -> a. We might also eta-contract the axiom: see Note [Newtype eta]. Note [Newtype eta] ~~~~~~~~~~~~~~~~~~ Consider newtype Parser a = MkParser (IO a) deriving Monad Are these two types equal? That is, does a coercion exist between them? Monad Parser Monad IO (We need this coercion to make the derived instance for Monad Parser.) Well, yes. But to see that easily we eta-reduce the RHS type of Parser, in this case to IO, so that even unsaturated applications of Parser will work right. So instead of axParser :: forall a. Parser a ~ IO a we generate an eta-reduced axiom axParser :: Parser ~ IO This eta reduction is done when the type constructor is built, in GHC.Tc.TyCl.Build.mkNewTyConRhs, and cached in NewTyCon. Here's an example that I think showed up in practice. Source code: newtype T a = MkT [a] newtype Foo m = MkFoo (forall a. m a -> Int) w1 :: Foo [] w1 = ... w2 :: Foo T w2 = MkFoo (\(MkT x) -> case w1 of MkFoo f -> f x) After desugaring, and discarding the data constructors for the newtypes, we would like to get: w2 = w1 `cast` Foo axT so that w2 and w1 share the same code. To do this, the coercion axiom axT must have kind: axT :: T ~ [] and arity: 0 See also Note [Newtype eta and homogeneous axioms] in GHC.Tc.TyCl.Build. ************************************************************************ * * TyConRepName * * ********************************************************************* -} type TyConRepName = Name -- The Name of the top-level declaration for the Typeable world -- $tcMaybe :: Data.Typeable.Internal.TyCon -- $tcMaybe = TyCon { tyConName = "Maybe", ... } tyConRepName_maybe :: TyCon -> Maybe TyConRepName tyConRepName_maybe (TyCon { tyConDetails = details }) = get_rep_nm details where get_rep_nm (PrimTyCon { primRepName = rep_nm }) = Just rep_nm get_rep_nm (AlgTyCon { algTcFlavour = parent }) = case parent of VanillaAlgTyCon rep_nm -> Just rep_nm UnboxedSumTyCon -> Nothing ClassTyCon _ rep_nm -> Just rep_nm DataFamInstTyCon {} -> Nothing get_rep_nm (FamilyTyCon { famTcFlav = DataFamilyTyCon rep_nm }) = Just rep_nm get_rep_nm (PromotedDataCon { dataCon = dc, tcRepName = rep_nm }) | isUnboxedSumDataCon dc -- see #13276 = Nothing | otherwise = Just rep_nm get_rep_nm _ = Nothing -- | Make a 'Name' for the 'Typeable' representation of the given wired-in type mkPrelTyConRepName :: Name -> TyConRepName -- See Note [Grand plan for Typeable] in "GHC.Tc.Instance.Typeable". mkPrelTyConRepName tc_name -- Prelude tc_name is always External, -- so nameModule will work = mkExternalName rep_uniq rep_mod rep_occ (nameSrcSpan tc_name) where name_occ = nameOccName tc_name name_mod = nameModule tc_name name_uniq = nameUnique tc_name rep_uniq | isTcOcc name_occ = tyConRepNameUnique name_uniq | otherwise = dataConTyRepNameUnique name_uniq (rep_mod, rep_occ) = tyConRepModOcc name_mod name_occ -- | The name (and defining module) for the Typeable representation (TyCon) of a -- type constructor. -- -- See Note [Grand plan for Typeable] in "GHC.Tc.Instance.Typeable". tyConRepModOcc :: Module -> OccName -> (Module, OccName) tyConRepModOcc tc_module tc_occ = (rep_module, mkTyConRepOcc tc_occ) where rep_module | tc_module == gHC_PRIM = gHC_TYPES | otherwise = tc_module {- ********************************************************************* * * PrimRep * * ************************************************************************ Note [rep swamp] ~~~~~~~~~~~~~~~~ GHC has a rich selection of types that represent "primitive types" of one kind or another. Each of them makes a different set of distinctions, and mostly the differences are for good reasons, although it's probably true that we could merge some of these. Roughly in order of "includes more information": - A Width ("GHC.Cmm.Type") is simply a binary value with the specified number of bits. It may represent a signed or unsigned integer, a floating-point value, or an address. data Width = W8 | W16 | W32 | W64 | W128 - Size, which is used in the native code generator, is Width + floating point information. data Size = II8 | II16 | II32 | II64 | FF32 | FF64 it is necessary because e.g. the instruction to move a 64-bit float on x86 (movsd) is different from the instruction to move a 64-bit integer (movq), so the mov instruction is parameterised by Size. - CmmType wraps Width with more information: GC ptr, float, or other value. data CmmType = CmmType CmmCat Width data CmmCat -- "Category" (not exported) = GcPtrCat -- GC pointer | BitsCat -- Non-pointer | FloatCat -- Float It is important to have GcPtr information in Cmm, since we generate info tables containing pointerhood for the GC from this. As for why we have float (and not signed/unsigned) here, see Note [Signed vs unsigned]. - ArgRep makes only the distinctions necessary for the call and return conventions of the STG machine. It is essentially CmmType + void. - PrimRep makes a few more distinctions than ArgRep: it divides non-GC-pointers into signed/unsigned and addresses, information that is necessary for passing these values to foreign functions. There's another tension here: whether the type encodes its size in bytes, or whether its size depends on the machine word size. Width and CmmType have the size built-in, whereas ArgRep and PrimRep do not. This means to turn an ArgRep/PrimRep into a CmmType requires DynFlags. On the other hand, CmmType includes some "nonsense" values, such as CmmType GcPtrCat W32 on a 64-bit machine. The PrimRep type is closely related to the user-visible RuntimeRep type. See Note [RuntimeRep and PrimRep] in GHC.Types.RepType. -} -- | A 'PrimRep' is an abstraction of a /non-void/ type. -- (Use 'PrimRepOrVoidRep' if you want void types too.) -- It contains information that the code generator needs -- in order to pass arguments, return results, -- and store values of this type. See also Note [RuntimeRep and PrimRep] in -- "GHC.Types.RepType" and Note [VoidRep] in "GHC.Types.RepType". data PrimRep -- Unpacking of sum types is only supported since 9.6.1 #if MIN_VERSION_GLASGOW_HASKELL(9,6,0,0) = BoxedRep {-# UNPACK #-} !(Maybe Levity) -- ^ Boxed, heap value #else = BoxedRep !(Maybe Levity) -- ^ Boxed, heap value #endif | Int8Rep -- ^ Signed, 8-bit value | Int16Rep -- ^ Signed, 16-bit value | Int32Rep -- ^ Signed, 32-bit value | Int64Rep -- ^ Signed, 64 bit value | IntRep -- ^ Signed, word-sized value | Word8Rep -- ^ Unsigned, 8 bit value | Word16Rep -- ^ Unsigned, 16 bit value | Word32Rep -- ^ Unsigned, 32 bit value | Word64Rep -- ^ Unsigned, 64 bit value | WordRep -- ^ Unsigned, word-sized value | AddrRep -- ^ A pointer, but /not/ to a Haskell value (use 'BoxedRep') | FloatRep | DoubleRep | VecRep Int PrimElemRep -- ^ A vector deriving( Data.Data, Eq, Ord, Show ) data PrimOrVoidRep = VoidRep | NVRep PrimRep -- See Note [VoidRep] in GHC.Types.RepType deriving (Data.Data, Eq, Ord, Show) data PrimElemRep = Int8ElemRep | Int16ElemRep | Int32ElemRep | Int64ElemRep | Word8ElemRep | Word16ElemRep | Word32ElemRep | Word64ElemRep | FloatElemRep | DoubleElemRep deriving( Data.Data, Eq, Ord, Show, Enum ) instance Outputable PrimRep where ppr r = text (show r) instance Outputable PrimElemRep where ppr r = text (show r) instance Binary PrimRep where put_ bh (BoxedRep ml) = case ml of -- cheaper storage of the levity than using -- the Binary (Maybe Levity) instance Nothing -> putByte bh 0 Just Lifted -> putByte bh 1 Just Unlifted -> putByte bh 2 put_ bh Int8Rep = putByte bh 3 put_ bh Int16Rep = putByte bh 4 put_ bh Int32Rep = putByte bh 5 put_ bh Int64Rep = putByte bh 6 put_ bh IntRep = putByte bh 7 put_ bh Word8Rep = putByte bh 8 put_ bh Word16Rep = putByte bh 9 put_ bh Word32Rep = putByte bh 10 put_ bh Word64Rep = putByte bh 11 put_ bh WordRep = putByte bh 12 put_ bh AddrRep = putByte bh 13 put_ bh FloatRep = putByte bh 14 put_ bh DoubleRep = putByte bh 15 put_ bh (VecRep n per) = putByte bh 16 *> put_ bh n *> put_ bh per get bh = do h <- getByte bh case h of 0 -> pure $ BoxedRep Nothing 1 -> pure $ BoxedRep (Just Lifted) 2 -> pure $ BoxedRep (Just Unlifted) 3 -> pure Int8Rep 4 -> pure Int16Rep 5 -> pure Int32Rep 6 -> pure Int64Rep 7 -> pure IntRep 8 -> pure Word8Rep 9 -> pure Word16Rep 10 -> pure Word32Rep 11 -> pure Word64Rep 12 -> pure WordRep 13 -> pure AddrRep 14 -> pure FloatRep 15 -> pure DoubleRep 16 -> VecRep <$> get bh <*> get bh _ -> pprPanic "Binary:PrimRep" (int (fromIntegral h)) instance Binary PrimElemRep where put_ bh per = putByte bh (fromIntegral (fromEnum per)) get bh = toEnum . fromIntegral <$> getByte bh isGcPtrRep :: PrimRep -> Bool isGcPtrRep (BoxedRep _) = True isGcPtrRep _ = False -- A PrimRep is compatible with another iff one can be coerced to the other. -- See Note [Bad unsafe coercion] in GHC.Core.Lint for when are two types coercible. primRepCompatible :: Platform -> PrimRep -> PrimRep -> Bool primRepCompatible platform rep1 rep2 = (isUnboxed rep1 == isUnboxed rep2) && (primRepSizeB platform rep1 == primRepSizeB platform rep2) && (primRepIsFloat rep1 == primRepIsFloat rep2) where isUnboxed = not . isGcPtrRep -- More general version of `primRepCompatible` for types represented by zero or -- more than one PrimReps. primRepsCompatible :: Platform -> [PrimRep] -> [PrimRep] -> Bool primRepsCompatible platform reps1 reps2 = length reps1 == length reps2 && and (zipWith (primRepCompatible platform) reps1 reps2) -- | The size of a 'PrimRep' in bytes. -- -- This applies also when used in a constructor, where we allow packing the -- fields. For instance, in @data Foo = Foo Float# Float#@ the two fields will -- take only 8 bytes, which for 64-bit arch will be equal to 1 word. -- See also mkVirtHeapOffsetsWithPadding for details of how data fields are -- laid out. primRepSizeB :: Platform -> PrimRep -> Int primRepSizeB platform = \case IntRep -> platformWordSizeInBytes platform WordRep -> platformWordSizeInBytes platform Int8Rep -> 1 Int16Rep -> 2 Int32Rep -> 4 Int64Rep -> 8 Word8Rep -> 1 Word16Rep -> 2 Word32Rep -> 4 Word64Rep -> 8 FloatRep -> fLOAT_SIZE DoubleRep -> dOUBLE_SIZE AddrRep -> platformWordSizeInBytes platform BoxedRep _ -> platformWordSizeInBytes platform (VecRep len rep) -> len * primElemRepSizeB platform rep -- | Like primRepSizeB but assumes pointers/words are 8 words wide. -- -- This can be useful to compute the size of a rep as if we were compiling -- for a 64bit platform. primRepSizeW64_B :: PrimRep -> Int primRepSizeW64_B = \case IntRep -> 8 WordRep -> 8 Int8Rep -> 1 Int16Rep -> 2 Int32Rep -> 4 Int64Rep -> 8 Word8Rep -> 1 Word16Rep -> 2 Word32Rep -> 4 Word64Rep -> 8 FloatRep -> fLOAT_SIZE DoubleRep -> dOUBLE_SIZE AddrRep -> 8 BoxedRep{} -> 8 (VecRep len rep) -> len * primElemRepSizeW64_B rep primElemRepSizeB :: Platform -> PrimElemRep -> Int primElemRepSizeB platform = primRepSizeB platform . primElemRepToPrimRep -- | Like primElemRepSizeB but assumes pointers/words are 8 words wide. -- -- This can be useful to compute the size of a rep as if we were compiling -- for a 64bit platform. primElemRepSizeW64_B :: PrimElemRep -> Int primElemRepSizeW64_B = primRepSizeW64_B . primElemRepToPrimRep primElemRepToPrimRep :: PrimElemRep -> PrimRep primElemRepToPrimRep Int8ElemRep = Int8Rep primElemRepToPrimRep Int16ElemRep = Int16Rep primElemRepToPrimRep Int32ElemRep = Int32Rep primElemRepToPrimRep Int64ElemRep = Int64Rep primElemRepToPrimRep Word8ElemRep = Word8Rep primElemRepToPrimRep Word16ElemRep = Word16Rep primElemRepToPrimRep Word32ElemRep = Word32Rep primElemRepToPrimRep Word64ElemRep = Word64Rep primElemRepToPrimRep FloatElemRep = FloatRep primElemRepToPrimRep DoubleElemRep = DoubleRep -- | Return if Rep stands for floating type, -- returns Nothing for vector types. primRepIsFloat :: PrimRep -> Maybe Bool primRepIsFloat FloatRep = Just True primRepIsFloat DoubleRep = Just True primRepIsFloat (VecRep _ _) = Nothing primRepIsFloat _ = Just False -- Rep is one of the word reps. primRepIsWord :: PrimRep -> Bool primRepIsWord WordRep = True primRepIsWord (Word8Rep) = True primRepIsWord (Word16Rep) = True primRepIsWord (Word32Rep) = True primRepIsWord (Word64Rep) = True primRepIsWord _ = False -- Rep is one of the int reps. primRepIsInt :: PrimRep -> Bool primRepIsInt (IntRep) = True primRepIsInt (Int8Rep) = True primRepIsInt (Int16Rep) = True primRepIsInt (Int32Rep) = True primRepIsInt (Int64Rep) = True primRepIsInt _ = False {- ************************************************************************ * * Field labels * * ************************************************************************ -} -- | The labels for the fields of this particular 'TyCon' tyConFieldLabels :: TyCon -> [FieldLabel] tyConFieldLabels tc = dFsEnvElts $ tyConFieldLabelEnv tc -- | The labels for the fields of this particular 'TyCon' tyConFieldLabelEnv :: TyCon -> FieldLabelEnv tyConFieldLabelEnv (TyCon { tyConDetails = details }) | AlgTyCon { algTcFields = fields } <- details = fields | otherwise = emptyDFsEnv -- | Look up a field label belonging to this 'TyCon' lookupTyConFieldLabel :: FieldLabelString -> TyCon -> Maybe FieldLabel lookupTyConFieldLabel lbl tc = lookupDFsEnv (tyConFieldLabelEnv tc) (field_label lbl) -- | Make a map from strings to FieldLabels from all the data -- constructors of this algebraic tycon fieldsOfAlgTcRhs :: AlgTyConRhs -> FieldLabelEnv fieldsOfAlgTcRhs rhs = mkDFsEnv [ (field_label $ flLabel fl, fl) | fl <- dataConsFields (visibleDataCons rhs) ] where -- Duplicates in this list will be removed by 'mkFsEnv' dataConsFields dcs = concatMap dataConFieldLabels dcs {- ************************************************************************ * * \subsection{TyCon Construction} * * ************************************************************************ Note: the TyCon constructors all take a Kind as one argument, even though they could, in principle, work out their Kind from their other arguments. But to do so they need functions from Types, and that makes a nasty module mutual-recursion. And they aren't called from many places. So we compromise, and move their Kind calculation to the call site. -} mkTyCon :: Name -> [TyConBinder] -> Kind -> [Role] -> TyConDetails -> TyCon mkTyCon name binders res_kind roles details = tc where -- Recurisve binding because of tcNullaryTy tc = TyCon { tyConName = name , tyConUnique = nameUnique name , tyConBinders = binders , tyConResKind = res_kind , tyConRoles = roles , tyConDetails = details -- Cached things , tyConKind = mkTyConKind binders res_kind , tyConArity = length binders , tyConNullaryTy = mkNakedTyConTy tc , tyConHasClosedResKind = noFreeVarsOfType res_kind , tyConTyVars = binderVars binders } -- | This is the making of an algebraic 'TyCon'. mkAlgTyCon :: Name -> [TyConBinder] -- ^ Binders of the 'TyCon' -> Kind -- ^ Result kind -> [Role] -- ^ The roles for each TyVar -> Maybe CType -- ^ The C type this type corresponds to -- when using the CAPI FFI -> [PredType] -- ^ Stupid theta: see 'algTcStupidTheta' -> AlgTyConRhs -- ^ Information about data constructors -> AlgTyConFlav -- ^ What flavour is it? -- (e.g. vanilla, type family) -> Bool -- ^ Was the 'TyCon' declared with GADT syntax? -> TyCon mkAlgTyCon name binders res_kind roles cType stupid rhs parent gadt_syn = mkTyCon name binders res_kind roles $ AlgTyCon { tyConCType = cType , algTcStupidTheta = stupid , algTcRhs = rhs , algTcFields = fieldsOfAlgTcRhs rhs , algTcFlavour = assertPpr (okParent name parent) (ppr name $$ ppr parent) parent , algTcGadtSyntax = gadt_syn } -- | Simpler specialization of 'mkAlgTyCon' for classes mkClassTyCon :: Name -> [TyConBinder] -> [Role] -> AlgTyConRhs -> Class -> Name -> TyCon mkClassTyCon name binders roles rhs clas tc_rep_name = mkAlgTyCon name binders constraintKind roles Nothing [] rhs (ClassTyCon clas tc_rep_name) False mkTupleTyCon :: Name -> [TyConBinder] -> Kind -- ^ Result kind of the 'TyCon' -> DataCon -> TupleSort -- ^ Whether the tuple is boxed or unboxed -> AlgTyConFlav -> TyCon mkTupleTyCon name binders res_kind con sort parent = mkTyCon name binders res_kind (constRoles binders Representational) $ AlgTyCon { tyConCType = Nothing , algTcGadtSyntax = False , algTcStupidTheta = [] , algTcRhs = TupleTyCon { data_con = con , tup_sort = sort } , algTcFields = emptyDFsEnv , algTcFlavour = parent } constRoles :: [TyConBinder] -> Role -> [Role] constRoles bndrs role = [role | _ <- bndrs] mkSumTyCon :: Name -> [TyConBinder] -> Kind -- ^ Kind of the resulting 'TyCon' -> [DataCon] -> AlgTyConFlav -> TyCon mkSumTyCon name binders res_kind cons parent = mkTyCon name binders res_kind (constRoles binders Representational) $ AlgTyCon { tyConCType = Nothing , algTcGadtSyntax = False , algTcStupidTheta = [] , algTcRhs = mkSumTyConRhs cons , algTcFields = emptyDFsEnv , algTcFlavour = parent } -- | Makes a tycon suitable for use during type-checking. It stores -- a variety of details about the definition of the TyCon, but no -- right-hand side. It lives only during the type-checking of a -- mutually-recursive group of tycons; it is then zonked to a proper -- TyCon in zonkTcTyCon. -- See Note [TcTyCon, MonoTcTyCon, and PolyTcTyCon] in "GHC.Tc.TyCl" mkTcTyCon :: Name -> [TyConBinder] -> Kind -- ^ /result/ kind only -> [(Name,TcTyVar)] -- ^ Scoped type variables; -> Bool -- ^ Is this TcTyCon generalised already? -> TyConFlavour TyCon -- ^ What sort of 'TyCon' this represents -> TyCon mkTcTyCon name binders res_kind scoped_tvs poly flav = mkTyCon name binders res_kind (constRoles binders Nominal) $ TcTyCon { tctc_scoped_tvs = scoped_tvs , tctc_is_poly = poly , tctc_flavour = flav } -- | No scoped type variables (to be used with mkTcTyCon). noTcTyConScopedTyVars :: [(Name, TcTyVar)] noTcTyConScopedTyVars = [] -- | Create an primitive 'TyCon', such as @Int#@, @Type@ or @RealWorld@ -- Primitive TyCons are marshalable iff not lifted. -- If you'd like to change this, modify marshalablePrimTyCon. mkPrimTyCon :: Name -> [TyConBinder] -> Kind -- ^ /result/ kind -- Must answer 'True' to 'isFixedRuntimeRepKind' (i.e., no representation polymorphism). -- (If you need a representation-polymorphic PrimTyCon, -- change tcHasFixedRuntimeRep, marshalablePrimTyCon, reifyTyCon for PrimTyCons.) -> [Role] -> TyCon mkPrimTyCon name binders res_kind roles = mkTyCon name binders res_kind roles $ PrimTyCon { primRepName = mkPrelTyConRepName name } -- | Create a type synonym 'TyCon' mkSynonymTyCon :: Name -> [TyConBinder] -> Kind -- ^ /result/ kind -> [Role] -> Type -> Bool -> Bool -> Bool -> Bool -> TyCon mkSynonymTyCon name binders res_kind roles rhs is_tau is_fam_free is_forgetful is_concrete = mkTyCon name binders res_kind roles $ SynonymTyCon { synTcRhs = rhs , synIsTau = is_tau , synIsFamFree = is_fam_free , synIsForgetful = is_forgetful , synIsConcrete = is_concrete } -- | Create a type family 'TyCon' mkFamilyTyCon :: Name -> [TyConBinder] -> Kind -- ^ /result/ kind -> Maybe Name -> FamTyConFlav -> Maybe Class -> Injectivity -> TyCon mkFamilyTyCon name binders res_kind resVar flav parent inj = mkTyCon name binders res_kind (constRoles binders Nominal) $ FamilyTyCon { famTcResVar = resVar , famTcFlav = flav , famTcParent = classTyCon <$> parent , famTcInj = inj } -- | Create a promoted data constructor 'TyCon' -- Somewhat dodgily, we give it the same Name -- as the data constructor itself; when we pretty-print -- the TyCon we add a quote; see the Outputable TyCon instance mkPromotedDataCon :: DataCon -> Name -> TyConRepName -> [TyConBinder] -> Kind -> [Role] -> PromDataConInfo -> TyCon mkPromotedDataCon con name rep_name binders res_kind roles rep_info = mkTyCon name binders res_kind roles $ PromotedDataCon { dataCon = con , tcRepName = rep_name , promDcInfo = rep_info } -- | Test if the 'TyCon' is algebraic but abstract (invisible data constructors) isAbstractTyCon :: TyCon -> Bool isAbstractTyCon (TyCon { tyConDetails = details }) | AlgTyCon { algTcRhs = AbstractTyCon {} } <- details = True | otherwise = False -- | Does this 'TyCon' represent something that cannot be defined in Haskell? isPrimTyCon :: TyCon -> Bool isPrimTyCon (TyCon { tyConDetails = details }) | PrimTyCon {} <- details = True | otherwise = False -- | Returns @True@ if the supplied 'TyCon' resulted from either a -- @data@ or @newtype@ declaration isAlgTyCon :: TyCon -> Bool isAlgTyCon (TyCon { tyConDetails = details }) | AlgTyCon {} <- details = True | otherwise = False -- | Returns @True@ for vanilla AlgTyCons -- that is, those created -- with a @data@ or @newtype@ declaration. isVanillaAlgTyCon :: TyCon -> Bool isVanillaAlgTyCon (TyCon { tyConDetails = details }) | AlgTyCon { algTcFlavour = VanillaAlgTyCon _ } <- details = True | otherwise = False -- | Returns @True@ if a boxed type headed by the given @TyCon@ -- satisfies condition DTT2 of Note [DataToTag overview] in -- GHC.Tc.Instance.Class isValidDTT2TyCon :: TyCon -> Bool isValidDTT2TyCon = isDataTyCon isDataTyCon :: TyCon -> Bool -- ^ Returns @True@ for data types that are /definitely/ represented by -- heap-allocated constructors. These are scrutinised by Core-level -- @case@ expressions, and they get info tables allocated for them. -- -- Generally, the function will be true for all @data@ types and false -- for @newtype@s, unboxed tuples, unboxed sums and type family -- 'TyCon's. But it is not guaranteed to return @True@ in all cases -- that it could. -- -- NB: for a data type family, only the /instance/ 'TyCon's -- get an info table. The family declaration 'TyCon' does not isDataTyCon (TyCon { tyConDetails = details }) | AlgTyCon {algTcRhs = rhs} <- details = case rhs of TupleTyCon { tup_sort = sort } -> isBoxed (tupleSortBoxity sort) SumTyCon {} -> False -- Constructors from "type data" declarations exist only at -- the type level. -- See Note [Type data declarations] in GHC.Rename.Module. DataTyCon { is_type_data = type_data } -> not type_data NewTyCon {} -> False AbstractTyCon {} -> False -- We don't know, so return False isDataTyCon _ = False -- | Was this 'TyCon' declared as "type data"? -- See Note [Type data declarations] in GHC.Rename.Module. isTypeDataTyCon :: TyCon -> Bool isTypeDataTyCon (TyCon { tyConDetails = details }) | AlgTyCon {algTcRhs = DataTyCon {is_type_data = type_data }} <- details = type_data | otherwise = False -- | 'isInjectiveTyCon' is true of 'TyCon's for which this property holds -- (where r is the role passed in): -- If (T a1 b1 c1) ~r (T a2 b2 c2), then (a1 ~r1 a2), (b1 ~r2 b2), and (c1 ~r3 c2) -- (where r1, r2, and r3, are the roles given by tyConRolesX tc r) -- See also Note [Decomposing TyConApp equalities] in "GHC.Tc.Solver.Equality" isInjectiveTyCon :: TyCon -> Role -> Bool isInjectiveTyCon (TyCon { tyConDetails = details }) role = go details role where go _ Phantom = True -- Vacuously; (t1 ~P t2) holds for all t1, t2! go (AlgTyCon {}) Nominal = True go (AlgTyCon {algTcRhs = rhs}) Representational = isGenInjAlgRhs rhs go (SynonymTyCon {}) _ = False go (FamilyTyCon { famTcFlav = DataFamilyTyCon _ }) Nominal = True go (FamilyTyCon { famTcInj = Injective inj }) Nominal = and inj go (FamilyTyCon {}) _ = False go (PrimTyCon {}) _ = True go (PromotedDataCon {}) _ = True go (TcTyCon {}) _ = True -- Reply True for TcTyCon to minimise knock on type errors -- See (W1) in Note [TcTyCon, MonoTcTyCon, and PolyTcTyCon] in GHC.Tc.TyCl -- | 'isGenerativeTyCon' is true of 'TyCon's for which this property holds -- (where r is the role passed in): -- If (T tys ~r t), then (t's head ~r T). -- See also Note [Decomposing TyConApp equalities] in "GHC.Tc.Solver.Equality" -- -- NB: at Nominal role, isGenerativeTyCon is simple: -- isGenerativeTyCon tc Nominal -- = not (isTypeFamilyTyCon tc || isSynonymTyCon tc) isGenerativeTyCon :: TyCon -> Role -> Bool isGenerativeTyCon tc@(TyCon { tyConDetails = details }) role = go role details where go Nominal (FamilyTyCon { famTcFlav = DataFamilyTyCon _ }) = True go _ (FamilyTyCon {}) = False -- In all other cases, injectivity implies generativity go r _ = isInjectiveTyCon tc r -- | Is this an 'AlgTyConRhs' of a 'TyCon' that is generative and injective -- with respect to representational equality? isGenInjAlgRhs :: AlgTyConRhs -> Bool isGenInjAlgRhs (TupleTyCon {}) = True isGenInjAlgRhs (SumTyCon {}) = True isGenInjAlgRhs (DataTyCon {}) = True isGenInjAlgRhs (AbstractTyCon {}) = False isGenInjAlgRhs (NewTyCon {}) = False -- | Is this 'TyCon' that for a @newtype@ isNewTyCon :: TyCon -> Bool isNewTyCon (TyCon { tyConDetails = details }) | AlgTyCon {algTcRhs = NewTyCon {}} <- details = True | otherwise = False -- | Take a 'TyCon' apart into the 'TyVar's it scopes over, the 'Type' it -- expands into, and (possibly) a coercion from the representation type to the -- @newtype@. -- Returns @Nothing@ if this is not possible. unwrapNewTyCon_maybe :: TyCon -> Maybe ([TyVar], Type, CoAxiom Unbranched) unwrapNewTyCon_maybe (TyCon { tyConTyVars = tvs, tyConDetails = details }) | AlgTyCon { algTcRhs = NewTyCon { nt_co = co, nt_rhs = rhs }} <- details = Just (tvs, rhs, co) | otherwise = Nothing unwrapNewTyConEtad_maybe :: TyCon -> Maybe ([TyVar], Type, CoAxiom Unbranched) unwrapNewTyConEtad_maybe (TyCon { tyConDetails = details }) | AlgTyCon { algTcRhs = NewTyCon { nt_co = co , nt_etad_rhs = (tvs,rhs) }} <- details = Just (tvs, rhs, co) | otherwise = Nothing -- | Is this a 'TyCon' representing a regular H98 type synonym (@type@)? {-# INLINE isTypeSynonymTyCon #-} -- See Note [Inlining coreView] in GHC.Core.Type isTypeSynonymTyCon :: TyCon -> Bool isTypeSynonymTyCon (TyCon { tyConDetails = details }) | SynonymTyCon {} <- details = True | otherwise = False isTauTyCon :: TyCon -> Bool isTauTyCon (TyCon { tyConDetails = details }) | SynonymTyCon { synIsTau = is_tau } <- details = is_tau | otherwise = True -- | Is this tycon neither a type family nor a synonym that expands -- to a type family? isFamFreeTyCon :: TyCon -> Bool isFamFreeTyCon (TyCon { tyConDetails = details }) | SynonymTyCon { synIsFamFree = fam_free } <- details = fam_free | FamilyTyCon { famTcFlav = flav } <- details = isDataFamFlav flav | otherwise = True -- | Is this a forgetful type synonym? If this is a type synonym whose -- RHS does not mention one (or more) of its bound variables, returns -- True. Thus, False means that all bound variables appear on the RHS; -- True may not mean anything, as the test to set this flag is -- conservative. -- -- See Note [Forgetful type synonyms] isForgetfulSynTyCon :: TyCon -> Bool isForgetfulSynTyCon (TyCon { tyConDetails = details }) | SynonymTyCon { synIsForgetful = forget } <- details = forget | otherwise = False {- Note [Forgetful type synonyms] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A type synonyms is /forgetful/ if its RHS fails to mention one (or more) of its bound variables. Forgetfulness is conservative: * A non-forgetful synonym /guarantees/ to mention all its bound variables in its RHS. * It is always safe to classify a synonym as forgetful. Examples: type R = Int -- Not forgetful type S a = Int -- Forgetful type T1 a = Int -> S a -- Forgetful type T2 a = a -> S a -- Not forgetful type T3 a = Int -> F a -- Not forgetful where type family F a * R shows that nullary synonyms are not forgetful. * T2 shows that forgetfulness needs to account for uses of forgetful synonyms. `a` appears on the RHS, but only under a forgetful S * T3 shows that non-forgetfulness is not the same as injectivity. T3 mentions its bound variable on its RHS, but under a type family. So it is entirely possible that T3 Int ~ T3 Bool * Since type synonyms are non-recursive, we don't need a fixpoint analysis to determine forgetfulness. It's rather easy -- see `GHC.Core.Type.buildSynTyCon`, which is a bit over-conservative for over-saturated synonyms. -} -- As for newtypes, it is in some contexts important to distinguish between -- closed synonyms and synonym families, as synonym families have no unique -- right hand side to which a synonym family application can expand. -- -- | True iff we can decompose (T a b c) into ((T a b) c) -- I.e. is it injective and generative w.r.t nominal equality? -- That is, if (T a b) ~N d e f, is it always the case that -- (T ~N d), (a ~N e) and (b ~N f)? -- Specifically NOT true of synonyms (open and otherwise) -- -- It'd be unusual to call tyConMustBeSaturated on a regular H98 -- type synonym, because you should probably have expanded it first -- But regardless, it's not decomposable tyConMustBeSaturated :: TyCon -> Bool tyConMustBeSaturated = tcFlavourMustBeSaturated . tyConFlavour -- | Is this an algebraic 'TyCon' declared with the GADT syntax? isGadtSyntaxTyCon :: TyCon -> Bool isGadtSyntaxTyCon (TyCon { tyConDetails = details }) | AlgTyCon { algTcGadtSyntax = res } <- details = res | otherwise = False -- | Is this an algebraic 'TyCon' which is just an enumeration of values? isEnumerationTyCon :: TyCon -> Bool -- See Note [Enumeration types] in GHC.Core.TyCon isEnumerationTyCon (TyCon { tyConArity = arity, tyConDetails = details }) | AlgTyCon { algTcRhs = rhs } <- details = case rhs of DataTyCon { is_enum = res } -> res TupleTyCon {} -> arity == 0 _ -> False | otherwise = False -- | Is this a 'TyCon', synonym or otherwise, that defines a family? isFamilyTyCon :: TyCon -> Bool isFamilyTyCon (TyCon { tyConDetails = details }) | FamilyTyCon {} <- details = True | otherwise = False -- | Is this a 'TyCon', synonym or otherwise, that defines a family with -- instances? isOpenFamilyTyCon :: TyCon -> Bool isOpenFamilyTyCon (TyCon { tyConDetails = details }) | FamilyTyCon {famTcFlav = flav } <- details = case flav of OpenSynFamilyTyCon -> True DataFamilyTyCon {} -> True _ -> False | otherwise = False -- | Is this a type family 'TyCon' (whether open or closed)? isTypeFamilyTyCon :: TyCon -> Bool isTypeFamilyTyCon (TyCon { tyConDetails = details }) | FamilyTyCon { famTcFlav = flav } <- details = not (isDataFamFlav flav) | otherwise = False -- | Is this a data family 'TyCon'? isDataFamilyTyCon :: TyCon -> Bool isDataFamilyTyCon (TyCon { tyConDetails = details }) | FamilyTyCon { famTcFlav = flav } <- details = isDataFamFlav flav | otherwise = False -- | Is this an open type family TyCon? isOpenTypeFamilyTyCon :: TyCon -> Bool isOpenTypeFamilyTyCon (TyCon { tyConDetails = details }) | FamilyTyCon {famTcFlav = OpenSynFamilyTyCon } <- details = True | otherwise = False -- | Is this a non-empty closed type family? Returns 'Nothing' for -- abstract or empty closed families. isClosedSynFamilyTyConWithAxiom_maybe :: TyCon -> Maybe (CoAxiom Branched) isClosedSynFamilyTyConWithAxiom_maybe (TyCon { tyConDetails = details }) | FamilyTyCon {famTcFlav = ClosedSynFamilyTyCon mb} <- details = mb | otherwise = Nothing isBuiltInSynFamTyCon_maybe :: TyCon -> Maybe BuiltInSynFamily isBuiltInSynFamTyCon_maybe (TyCon { tyConDetails = details }) | FamilyTyCon {famTcFlav = BuiltInSynFamTyCon ops} <- details = Just ops | otherwise = Nothing -- | Extract type variable naming the result of injective type family tyConFamilyResVar_maybe :: TyCon -> Maybe Name tyConFamilyResVar_maybe (TyCon { tyConDetails = details }) | FamilyTyCon {famTcResVar = res} <- details = res | otherwise = Nothing -- | @'tyConInjectivityInfo' tc@ returns @'Injective' is@ if @tc@ is an -- injective tycon (where @is@ states for which 'tyConBinders' @tc@ is -- injective), or 'NotInjective' otherwise. tyConInjectivityInfo :: TyCon -> Injectivity tyConInjectivityInfo tc@(TyCon { tyConDetails = details }) | FamilyTyCon { famTcInj = inj } <- details = inj | isInjectiveTyCon tc Nominal = Injective (replicate (tyConArity tc) True) | otherwise = NotInjective isDataFamFlav :: FamTyConFlav -> Bool isDataFamFlav (DataFamilyTyCon {}) = True -- Data family isDataFamFlav _ = False -- Type synonym family -- | Is this TyCon for an associated type? isTyConAssoc :: TyCon -> Bool isTyConAssoc = isJust . tyConAssoc_maybe -- | Get the enclosing class TyCon (if there is one) for the given TyCon. tyConAssoc_maybe :: TyCon -> Maybe TyCon tyConAssoc_maybe = tyConFlavourAssoc_maybe . tyConFlavour -- The unit tycon didn't used to be classed as a tuple tycon -- but I thought that was silly so I've undone it -- If it can't be for some reason, it should be a AlgTyCon isTupleTyCon :: TyCon -> Bool -- ^ Does this 'TyCon' represent a tuple? -- -- NB: when compiling @Data.Tuple@, the tycons won't reply @True@ to -- 'isTupleTyCon', because they are built as 'AlgTyCons'. However they -- get spat into the interface file as tuple tycons, so I don't think -- it matters. isTupleTyCon (TyCon { tyConDetails = details }) | AlgTyCon { algTcRhs = TupleTyCon {} } <- details = True | otherwise = False tyConTuple_maybe :: TyCon -> Maybe TupleSort tyConTuple_maybe (TyCon { tyConDetails = details }) | AlgTyCon { algTcRhs = rhs } <- details , TupleTyCon { tup_sort = sort} <- rhs = Just sort | otherwise = Nothing -- | Is this the 'TyCon' for an unboxed tuple? isUnboxedTupleTyCon :: TyCon -> Bool isUnboxedTupleTyCon (TyCon { tyConDetails = details }) | AlgTyCon { algTcRhs = rhs } <- details , TupleTyCon { tup_sort = sort } <- rhs = not (isBoxed (tupleSortBoxity sort)) | otherwise = False -- | Is this the 'TyCon' for a boxed tuple? isBoxedTupleTyCon :: TyCon -> Bool isBoxedTupleTyCon (TyCon { tyConDetails = details }) | AlgTyCon { algTcRhs = rhs } <- details , TupleTyCon { tup_sort = sort } <- rhs = isBoxed (tupleSortBoxity sort) | otherwise = False -- | Is this the 'TyCon' for an unboxed sum? isUnboxedSumTyCon :: TyCon -> Bool isUnboxedSumTyCon (TyCon { tyConDetails = details }) | AlgTyCon { algTcRhs = rhs } <- details , SumTyCon {} <- rhs = True | otherwise = False isLiftedAlgTyCon :: TyCon -> Bool isLiftedAlgTyCon (TyCon { tyConResKind = res_kind, tyConDetails = details }) | AlgTyCon {} <- details = isLiftedTypeKind res_kind | otherwise = False -- | Retrieves the promoted DataCon if this is a PromotedDataCon; isPromotedDataCon_maybe :: TyCon -> Maybe DataCon isPromotedDataCon_maybe (TyCon { tyConDetails = details }) | PromotedDataCon { dataCon = dc } <- details = Just dc | otherwise = Nothing -- | Is this the 'TyCon' for a /promoted/ tuple? isPromotedTupleTyCon :: TyCon -> Bool isPromotedTupleTyCon tyCon | Just dataCon <- isPromotedDataCon_maybe tyCon , isTupleTyCon (dataConTyCon dataCon) = True | otherwise = False -- | Is this a PromotedDataCon? isPromotedDataCon :: TyCon -> Bool isPromotedDataCon (TyCon { tyConDetails = details }) | PromotedDataCon {} <- details = True | otherwise = False -- | This function identifies PromotedDataCon's from data constructors in -- `data T = K1 | K2`, promoted by -XDataKinds. These type constructors -- are printed with a tick mark 'K1 and 'K2, and similarly have a tick -- mark added to their OccName's. -- -- In contrast, constructors in `type data T = K1 | K2` are printed and -- represented with their original undecorated names. -- See Note [Type data declarations] in GHC.Rename.Module isDataKindsPromotedDataCon :: TyCon -> Bool isDataKindsPromotedDataCon (TyCon { tyConDetails = details }) | PromotedDataCon { dataCon = dc } <- details = not (isTypeDataCon dc) | otherwise = False -- | Is this 'TyCon' really meant for use at the kind level? That is, -- should it be permitted without @DataKinds@? isKindTyCon :: TyCon -> Bool isKindTyCon = isKindUniquable -- | This is 'Name' really meant for use at the kind level? That is, -- should it be permitted wihout @DataKinds@? isKindName :: Name -> Bool isKindName = isKindUniquable -- | The workhorse for 'isKindTyCon' and 'isKindName'. isKindUniquable :: Uniquable a => a -> Bool isKindUniquable thing = getUnique thing `memberUniqueSet` kindTyConKeys -- | These TyCons should be allowed at the kind level, even without -- -XDataKinds. kindTyConKeys :: UniqueSet kindTyConKeys = fromListUniqueSet $ -- Make sure to keep this in sync with the following: -- -- - The Overview section in docs/users_guide/exts/data_kinds.rst in the GHC -- User's Guide. -- -- - The typecheck/should_compile/T22141f.hs test case, which ensures that all -- of these can successfully be used without DataKinds. [ liftedTypeKindTyConKey, liftedRepTyConKey, constraintKindTyConKey, tYPETyConKey, cONSTRAINTTyConKey ] ++ concatMap tycon_with_datacons [ runtimeRepTyCon, levityTyCon , multiplicityTyCon , vecCountTyCon, vecElemTyCon ] where tycon_with_datacons tc = getUnique tc : map getUnique (tyConDataCons tc) isLiftedTypeKindTyConName :: Name -> Bool isLiftedTypeKindTyConName = (`hasKey` liftedTypeKindTyConKey) -- | Identifies implicit tycons that, in particular, do not go into interface -- files (because they are implicitly reconstructed when the interface is -- read). -- -- Note that: -- -- * Associated families are implicit, as they are re-constructed from -- the class declaration in which they reside, and -- -- * Family instances are /not/ implicit as they represent the instance body -- (similar to a @dfun@ does that for a class instance). -- -- * Tuples are implicit iff they have a wired-in name -- (namely: boxed and unboxed tuples are wired-in and implicit, -- but constraint tuples are not) isImplicitTyCon :: TyCon -> Bool isImplicitTyCon (TyCon { tyConName = name, tyConDetails = details }) = go details where go (PrimTyCon {}) = True go (PromotedDataCon {}) = True go (SynonymTyCon {}) = False go (TcTyCon {}) = False go (FamilyTyCon { famTcParent = parent }) = isJust parent go (AlgTyCon { algTcRhs = rhs }) | TupleTyCon {} <- rhs = isWiredInName name | SumTyCon {} <- rhs = True | otherwise = False tyConCType_maybe :: TyCon -> Maybe CType tyConCType_maybe (TyCon { tyConDetails = details }) | AlgTyCon { tyConCType = mb_ctype} <- details = mb_ctype | otherwise = Nothing -- | Does this 'TyCon' have a syntactically fixed RuntimeRep when fully applied, -- as per Note [Fixed RuntimeRep] in GHC.Tc.Utils.Concrete? -- -- False is safe. True means we're sure. -- Does only a quick check, based on the TyCon's category. -- -- See Note [Representation-polymorphic TyCons] tcHasFixedRuntimeRep :: TyCon -> Bool tcHasFixedRuntimeRep tc@(TyCon { tyConDetails = details }) | AlgTyCon { algTcRhs = rhs } <- details = case rhs of AbstractTyCon {} -> False -- An abstract TyCon might not have a fixed runtime representation. -- Note that this is an entirely different matter from the concreteness -- of the 'TyCon', in the sense of 'isConcreteTyCon'. DataTyCon { data_fixed_lev = fixed_lev } -> fixed_lev -- A datatype might not have a fixed levity with UnliftedDatatypes (#20423). -- NB: the current representation-polymorphism checks require that -- the representation be fully-known, including levity variables. -- This might be relaxed in the future (#15532). TupleTyCon { tup_sort = tuple_sort } -> isBoxed (tupleSortBoxity tuple_sort) || -- (# #) also has fixed rep. tyConArity tc == 0 SumTyCon {} -> False -- only unboxed sums here NewTyCon { nt_fixed_rep = fixed_rep } -> fixed_rep -- A newtype might not have a fixed runtime representation -- with UnliftedNewtypes (#17360) | SynonymTyCon {} <- details = False -- conservative choice | FamilyTyCon{} <- details = False | PrimTyCon{} <- details = True | TcTyCon{} <- details = False | PromotedDataCon{} <- details = pprPanic "tcHasFixedRuntimeRep datacon" (ppr tc) -- | Is this 'TyCon' concrete? -- More specifically, if 'tys' are all concrete, is (T tys) concrete? -- (for synonyms this requires us to look at the RHS) -- Used for representation polymorphism checks. -- See Note [Concrete types] in GHC.Tc.Utils.Concrete isConcreteTyCon :: TyCon -> Bool isConcreteTyCon tc@(TyCon { tyConDetails = details }) = case details of AlgTyCon {} -> True -- Includes AbstractTyCon PrimTyCon {} -> True PromotedDataCon {} -> True FamilyTyCon {} -> False SynonymTyCon { synIsConcrete = is_conc } -> is_conc TcTyCon {} -> pprPanic "isConcreteTyCon" (ppr tc) -- isConcreteTyCon is only used on "real" tycons {- ----------------------------------------------- -- TcTyCon ----------------------------------------------- -} -- | Is this a TcTyCon? (That is, one only used during type-checking?) isTcTyCon :: TyCon -> Bool isTcTyCon (TyCon { tyConDetails = details }) | TcTyCon {} <- details = True | otherwise = False setTcTyConKind :: TyCon -> Kind -> TyCon -- Update the Kind of a TcTyCon -- The new kind is always a zonked version of its previous -- kind, so we don't need to update any other fields. -- See Note [The Purely Kinded Type Invariant (PKTI)] in GHC.Tc.Gen.HsType setTcTyConKind tc kind = assert (isMonoTcTyCon tc) $ let tc' = tc { tyConKind = kind , tyConNullaryTy = mkNakedTyConTy tc' } -- See Note [Sharing nullary TyConApps] in tc' isMonoTcTyCon :: TyCon -> Bool isMonoTcTyCon (TyCon { tyConDetails = details }) | TcTyCon { tctc_is_poly = is_poly } <- details = not is_poly | otherwise = False tcTyConScopedTyVars :: TyCon -> [(Name,TcTyVar)] tcTyConScopedTyVars tc@(TyCon { tyConDetails = details }) | TcTyCon { tctc_scoped_tvs = scoped_tvs } <- details = scoped_tvs | otherwise = pprPanic "tcTyConScopedTyVars" (ppr tc) {- ----------------------------------------------- -- Expand type-constructor applications ----------------------------------------------- -} data ExpandSynResult tyco = NoExpansion | ExpandsSyn [(TyVar,tyco)] Type [tyco] expandSynTyCon_maybe :: TyCon -> [tyco] -- ^ Arguments to 'TyCon' -> ExpandSynResult tyco -- ^ Returns a 'TyVar' substitution, the body -- type of the synonym (not yet substituted) -- and any arguments remaining from the -- application -- ^ Expand a type synonym application -- Return Nothing if the TyCon is not a synonym, -- or if not enough arguments are supplied expandSynTyCon_maybe (TyCon { tyConTyVars = tvs, tyConArity = arity , tyConDetails = details }) tys | SynonymTyCon { synTcRhs = rhs } <- details = if arity == 0 then ExpandsSyn [] rhs tys -- Avoid a bit of work in the case of nullary synonyms else case tys `listLengthCmp` arity of GT -> ExpandsSyn (tvs `zip` tys) rhs (drop arity tys) EQ -> ExpandsSyn (tvs `zip` tys) rhs [] LT -> NoExpansion | otherwise = NoExpansion ---------------- -- | Check if the tycon actually refers to a proper `data` or `newtype` -- with user defined constructors rather than one from a class or other -- construction. -- NB: This is only used in GHC.Tc.Gen.Export.checkPatSynParent to determine if an -- exported tycon can have a pattern synonym bundled with it, e.g., -- module Foo (TyCon(.., PatSyn)) where isTyConWithSrcDataCons :: TyCon -> Bool isTyConWithSrcDataCons (TyCon { tyConDetails = details }) | AlgTyCon { algTcRhs = rhs, algTcFlavour = parent } <- details , let isSrcParent = isNoParent parent = case rhs of DataTyCon {} -> isSrcParent NewTyCon {} -> isSrcParent TupleTyCon {} -> isSrcParent _ -> False | FamilyTyCon { famTcFlav = DataFamilyTyCon {} } <- details = True -- #14058 | otherwise = False -- | As 'tyConDataCons_maybe', but returns the empty list of constructors if no -- constructors could be found tyConDataCons :: TyCon -> [DataCon] -- It's convenient for tyConDataCons to return the -- empty list for type synonyms etc tyConDataCons tycon = tyConDataCons_maybe tycon `orElse` [] -- | Determine the 'DataCon's originating from the given 'TyCon', if the 'TyCon' -- is the sort that can have any constructors (note: this does not include -- abstract algebraic types) tyConDataCons_maybe :: TyCon -> Maybe [DataCon] tyConDataCons_maybe (TyCon { tyConDetails = details }) | AlgTyCon {algTcRhs = rhs} <- details = case rhs of DataTyCon { data_cons = cons } -> Just cons NewTyCon { data_con = con } -> Just [con] TupleTyCon { data_con = con } -> Just [con] SumTyCon { data_cons = cons } -> Just cons _ -> Nothing tyConDataCons_maybe _ = Nothing -- | If the given 'TyCon' has a /single/ data constructor, i.e. it is a @data@ -- type with one alternative, a tuple type or a @newtype@ then that constructor -- is returned. If the 'TyCon' has more than one constructor, or represents a -- primitive or function type constructor then @Nothing@ is returned. tyConSingleDataCon_maybe :: TyCon -> Maybe DataCon tyConSingleDataCon_maybe (TyCon { tyConDetails = details }) | AlgTyCon { algTcRhs = rhs } <- details = case rhs of DataTyCon { data_cons = [c] } -> Just c TupleTyCon { data_con = c } -> Just c NewTyCon { data_con = c } -> Just c _ -> Nothing | otherwise = Nothing -- | Like 'tyConSingleDataCon_maybe', but panics if 'Nothing'. tyConSingleDataCon :: TyCon -> DataCon tyConSingleDataCon tc = case tyConSingleDataCon_maybe tc of Just c -> c Nothing -> pprPanic "tyConDataCon" (ppr tc) -- | Like 'tyConSingleDataCon_maybe', but returns 'Nothing' for newtypes. tyConSingleAlgDataCon_maybe :: TyCon -> Maybe DataCon tyConSingleAlgDataCon_maybe tycon | isNewTyCon tycon = Nothing | otherwise = tyConSingleDataCon_maybe tycon -- | Returns @Just dcs@ if the given 'TyCon' is a @data@ type, a tuple type -- or a sum type with data constructors dcs. If the 'TyCon' has more than one -- constructor, or represents a primitive or function type constructor then -- @Nothing@ is returned. -- -- Like 'tyConDataCons_maybe', but returns 'Nothing' for newtypes. tyConAlgDataCons_maybe :: TyCon -> Maybe [DataCon] tyConAlgDataCons_maybe tycon | isNewTyCon tycon = Nothing | otherwise = tyConDataCons_maybe tycon -- | Determine the number of value constructors a 'TyCon' has. Panics if the -- 'TyCon' is not algebraic or a tuple tyConFamilySize :: TyCon -> Int tyConFamilySize tc@(TyCon { tyConDetails = details }) | AlgTyCon { algTcRhs = rhs } <- details = case rhs of DataTyCon { data_cons_size = size } -> size NewTyCon {} -> 1 TupleTyCon {} -> 1 SumTyCon { data_cons_size = size } -> size _ -> pprPanic "tyConFamilySize 1" (ppr tc) | otherwise = pprPanic "tyConFamilySize 2" (ppr tc) -- | Extract an 'AlgTyConRhs' with information about data constructors from an -- algebraic or tuple 'TyCon'. Panics for any other sort of 'TyCon' algTyConRhs :: TyCon -> AlgTyConRhs algTyConRhs tc@(TyCon { tyConDetails = details }) | AlgTyCon {algTcRhs = rhs} <- details = rhs | otherwise = pprPanic "algTyConRhs" (ppr tc) -- | Extract the bound type variables and type expansion of a type synonym -- 'TyCon'. Panics if the 'TyCon' is not a synonym newTyConRhs :: TyCon -> ([TyVar], Type) newTyConRhs tc@(TyCon { tyConTyVars = tvs, tyConDetails = details }) | AlgTyCon { algTcRhs = NewTyCon { nt_rhs = rhs }} <- details = (tvs, rhs) | otherwise = pprPanic "newTyConRhs" (ppr tc) -- | The number of type parameters that need to be passed to a newtype to -- resolve it. May be less than in the definition if it can be eta-contracted. newTyConEtadArity :: TyCon -> Int newTyConEtadArity tc@(TyCon { tyConDetails = details }) | AlgTyCon {algTcRhs = NewTyCon { nt_etad_rhs = tvs_rhs }} <- details = length (fst tvs_rhs) | otherwise = pprPanic "newTyConEtadArity" (ppr tc) -- | Extract the bound type variables and type expansion of an eta-contracted -- type synonym 'TyCon'. Panics if the 'TyCon' is not a synonym newTyConEtadRhs :: TyCon -> ([TyVar], Type) newTyConEtadRhs tc@(TyCon { tyConDetails = details }) | AlgTyCon {algTcRhs = NewTyCon { nt_etad_rhs = tvs_rhs }} <- details = tvs_rhs | otherwise = pprPanic "newTyConEtadRhs" (ppr tc) -- | Extracts the @newtype@ coercion from such a 'TyCon', which can be used to -- construct something with the @newtype@s type from its representation type -- (right hand side). If the supplied 'TyCon' is not a @newtype@, returns -- @Nothing@ newTyConCo_maybe :: TyCon -> Maybe (CoAxiom Unbranched) newTyConCo_maybe (TyCon { tyConDetails = details }) | AlgTyCon {algTcRhs = NewTyCon { nt_co = co }} <- details = Just co | otherwise = Nothing newTyConCo :: TyCon -> CoAxiom Unbranched newTyConCo tc = case newTyConCo_maybe tc of Just co -> co Nothing -> pprPanic "newTyConCo" (ppr tc) newTyConDataCon_maybe :: TyCon -> Maybe DataCon newTyConDataCon_maybe (TyCon { tyConDetails = details }) | AlgTyCon {algTcRhs = NewTyCon { data_con = con }} <- details = Just con | otherwise = Nothing -- | Find the \"stupid theta\" of the 'TyCon'. A \"stupid theta\" is the context -- to the left of an algebraic type declaration, e.g. @Eq a@ in the declaration -- @data Eq a => T a ...@. See @Note [The stupid context]@ in "GHC.Core.DataCon". tyConStupidTheta :: TyCon -> [PredType] tyConStupidTheta tc@(TyCon { tyConDetails = details }) | AlgTyCon {algTcStupidTheta = stupid} <- details = stupid | PrimTyCon {} <- details = [] | otherwise = pprPanic "tyConStupidTheta" (ppr tc) -- | Extract the 'TyVar's bound by a vanilla type synonym -- and the corresponding (unsubstituted) right hand side. synTyConDefn_maybe :: TyCon -> Maybe ([TyVar], Type) synTyConDefn_maybe (TyCon { tyConTyVars = tyvars, tyConDetails = details }) | SynonymTyCon {synTcRhs = ty} <- details = Just (tyvars, ty) | otherwise = Nothing -- | Extract the information pertaining to the right hand side of a type synonym -- (@type@) declaration. synTyConRhs_maybe :: TyCon -> Maybe Type synTyConRhs_maybe (TyCon { tyConDetails = details }) | SynonymTyCon {synTcRhs = rhs} <- details = Just rhs | otherwise = Nothing -- | Extract the flavour of a type family (with all the extra information that -- it carries) famTyConFlav_maybe :: TyCon -> Maybe FamTyConFlav famTyConFlav_maybe (TyCon { tyConDetails = details }) | FamilyTyCon {famTcFlav = flav} <- details = Just flav | otherwise = Nothing -- | Is this 'TyCon' that for a class instance? isClassTyCon :: TyCon -> Bool isClassTyCon (TyCon { tyConDetails = details }) | AlgTyCon {algTcFlavour = ClassTyCon {}} <- details = True | otherwise = False -- | If this 'TyCon' is that for a class instance, return the class it is for. -- Otherwise returns @Nothing@ tyConClass_maybe :: TyCon -> Maybe Class tyConClass_maybe (TyCon { tyConDetails = details }) | AlgTyCon {algTcFlavour = ClassTyCon clas _} <- details = Just clas | otherwise = Nothing -- | Return the associated types of the 'TyCon', if any tyConATs :: TyCon -> [TyCon] tyConATs (TyCon { tyConDetails = details }) | AlgTyCon {algTcFlavour = ClassTyCon clas _} <- details = classATs clas | otherwise = [] ---------------------------------------------------------------------------- -- | Is this 'TyCon' that for a data family instance? isFamInstTyCon :: TyCon -> Bool isFamInstTyCon (TyCon { tyConDetails = details }) | AlgTyCon {algTcFlavour = DataFamInstTyCon {} } <- details = True | otherwise = False tyConFamInstSig_maybe :: TyCon -> Maybe (TyCon, [Type], CoAxiom Unbranched) tyConFamInstSig_maybe (TyCon { tyConDetails = details }) | AlgTyCon {algTcFlavour = DataFamInstTyCon ax f ts } <- details = Just (f, ts, ax) | otherwise = Nothing -- | If this 'TyCon' is that of a data family instance, return the family in question -- and the instance types. Otherwise, return @Nothing@ tyConFamInst_maybe :: TyCon -> Maybe (TyCon, [Type]) tyConFamInst_maybe (TyCon { tyConDetails = details }) | AlgTyCon {algTcFlavour = DataFamInstTyCon _ f ts } <- details = Just (f, ts) | otherwise = Nothing -- | If this 'TyCon' is that of a data family instance, return a 'TyCon' which -- represents a coercion identifying the representation type with the type -- instance family. Otherwise, return @Nothing@ tyConFamilyCoercion_maybe :: TyCon -> Maybe (CoAxiom Unbranched) tyConFamilyCoercion_maybe (TyCon { tyConDetails = details }) | AlgTyCon {algTcFlavour = DataFamInstTyCon ax _ _ } <- details = Just ax | otherwise = Nothing -- | Extract any 'RuntimeRepInfo' from this TyCon tyConPromDataConInfo :: TyCon -> PromDataConInfo tyConPromDataConInfo (TyCon { tyConDetails = details }) | PromotedDataCon { promDcInfo = rri } <- details = rri | otherwise = NoPromInfo -- could panic in that second case. But Douglas Adams told me not to. {- Note [Constructor tag allocation] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When typechecking we need to allocate constructor tags to constructors. They are allocated based on the position in the data_cons field of TyCon, with the first constructor getting fIRST_TAG. We used to pay linear cost per constructor, with each constructor looking up its relative index in the constructor list. That was quadratic and prohibitive for large data types with more than 10k constructors. The current strategy is to build a NameEnv with a mapping from constructor's Name to ConTag and pass it down to buildDataCon for efficient lookup. Relevant ticket: #14657 -} mkTyConTagMap :: TyCon -> NameEnv ConTag mkTyConTagMap tycon = mkNameEnv $ map getName (tyConDataCons tycon) `zip` [fIRST_TAG..] -- See Note [Constructor tag allocation] {- ************************************************************************ * * \subsection[TyCon-instances]{Instance declarations for @TyCon@} * * ************************************************************************ @TyCon@s are compared by comparing their @Unique@s. -} instance Eq TyCon where a == b = getUnique a == getUnique b a /= b = getUnique a /= getUnique b instance Uniquable TyCon where getUnique tc = tyConUnique tc instance Outputable TyCon where -- At the moment a promoted TyCon has the same Name as its -- corresponding TyCon, so we add the quote to distinguish it here ppr tc = pprPromotionQuote tc <> ppr (tyConName tc) <> pp_tc where pp_tc = getPprStyle $ \sty -> getPprDebug $ \debug -> if ((debug || dumpStyle sty) && isTcTyCon tc) then text "[tc]" else empty tyConFlavour :: TyCon -> TyConFlavour TyCon tyConFlavour (TyCon { tyConDetails = details }) | AlgTyCon { algTcFlavour = parent, algTcRhs = rhs } <- details = case parent of ClassTyCon {} -> ClassFlavour _ -> case rhs of TupleTyCon { tup_sort = sort } -> TupleFlavour (tupleSortBoxity sort) SumTyCon {} -> SumFlavour DataTyCon {} -> DataTypeFlavour NewTyCon {} -> NewtypeFlavour AbstractTyCon {} -> AbstractTypeFlavour | FamilyTyCon { famTcFlav = flav, famTcParent = parent } <- details = case flav of DataFamilyTyCon{} -> OpenFamilyFlavour IAmData parent OpenSynFamilyTyCon -> OpenFamilyFlavour IAmType parent ClosedSynFamilyTyCon{} -> ClosedTypeFamilyFlavour AbstractClosedSynFamilyTyCon -> ClosedTypeFamilyFlavour BuiltInSynFamTyCon{} -> ClosedTypeFamilyFlavour | SynonymTyCon {} <- details = TypeSynonymFlavour | PrimTyCon {} <- details = BuiltInTypeFlavour | PromotedDataCon {} <- details = PromotedDataConFlavour | TcTyCon { tctc_flavour = flav } <-details = flav -- | Can this flavour of 'TyCon' appear unsaturated? tcFlavourMustBeSaturated :: TyConFlavour tc -> Bool tcFlavourMustBeSaturated ClassFlavour = False tcFlavourMustBeSaturated DataTypeFlavour = False tcFlavourMustBeSaturated NewtypeFlavour = False tcFlavourMustBeSaturated TupleFlavour{} = False tcFlavourMustBeSaturated SumFlavour = False tcFlavourMustBeSaturated AbstractTypeFlavour {} = False tcFlavourMustBeSaturated BuiltInTypeFlavour = False tcFlavourMustBeSaturated PromotedDataConFlavour = False tcFlavourMustBeSaturated (OpenFamilyFlavour td _)= case td of { IAmData -> False; IAmType -> True } tcFlavourMustBeSaturated TypeSynonymFlavour = True tcFlavourMustBeSaturated ClosedTypeFamilyFlavour = True -- | Is this flavour of 'TyCon' an open type family or a data family? tcFlavourIsOpen :: TyConFlavour tc -> Bool tcFlavourIsOpen OpenFamilyFlavour{} = True tcFlavourIsOpen ClosedTypeFamilyFlavour = False tcFlavourIsOpen ClassFlavour = False tcFlavourIsOpen DataTypeFlavour = False tcFlavourIsOpen NewtypeFlavour = False tcFlavourIsOpen TupleFlavour{} = False tcFlavourIsOpen SumFlavour = False tcFlavourIsOpen AbstractTypeFlavour {} = False tcFlavourIsOpen BuiltInTypeFlavour = False tcFlavourIsOpen PromotedDataConFlavour = False tcFlavourIsOpen TypeSynonymFlavour = False pprPromotionQuote :: TyCon -> SDoc -- Promoted data constructors already have a tick in their OccName pprPromotionQuote tc = getPprStyle $ \sty -> let name = getOccName tc ticked = isDataKindsPromotedDataCon tc && promTick sty (PromotedItemDataCon name) in if ticked then char '\'' else empty instance NamedThing TyCon where getName = tyConName instance Data.Data TyCon where -- don't traverse? toConstr _ = abstractConstr "TyCon" gunfold _ _ = error "gunfold" dataTypeOf _ = mkNoRepType "TyCon" instance Binary Injectivity where put_ bh NotInjective = putByte bh 0 put_ bh (Injective xs) = putByte bh 1 >> put_ bh xs get bh = do { h <- getByte bh ; case h of 0 -> return NotInjective _ -> do { xs <- get bh ; return (Injective xs) } } -- | Returns whether or not this 'TyCon' is definite, or a hole -- that may be filled in at some later point. See Note [Skolem abstract data] tyConSkolem :: TyCon -> Bool tyConSkolem = isHoleName . tyConName -- Note [Skolem abstract data] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- Skolem abstract data arises from data declarations in an hsig file. -- -- The best analogy is to interpret the types declared in signature files as -- elaborating to universally quantified type variables; e.g., -- -- unit p where -- signature H where -- data T -- data S -- module M where -- import H -- f :: (T ~ S) => a -> b -- f x = x -- -- elaborates as (with some fake structural types): -- -- p :: forall t s. { f :: forall a b. t ~ s => a -> b } -- p = { f = \x -> x } -- ill-typed -- -- It is clear that inside p, t ~ s is not provable (and -- if we tried to write a function to cast t to s, that -- would not work), but if we call p @Int @Int, clearly Int ~ Int -- is provable. The skolem variables are all distinct from -- one another, but we can't make assumptions like "f is -- inaccessible", because the skolem variables will get -- instantiated eventually! -- -- Skolem abstractness can apply to "non-abstract" data as well): -- -- unit p where -- signature H1 where -- data T = MkT -- signature H2 where -- data T = MkT -- module M where -- import qualified H1 -- import qualified H2 -- f :: (H1.T ~ H2.T) => a -> b -- f x = x -- -- This is why the test is on the original name of the TyCon, -- not whether it is abstract or not. ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/TyCon.hs-boot0000644000000000000000000000073407346545000021133 0ustar0000000000000000module GHC.Core.TyCon where import GHC.Prelude import GHC.Types.Unique ( Uniquable ) import {-# SOURCE #-} GHC.Types.Name import GHC.Utils.Outputable data TyCon instance Uniquable TyCon instance Outputable TyCon type TyConRepName = Name isNewTyCon :: TyCon -> Bool isTupleTyCon :: TyCon -> Bool isUnboxedTupleTyCon :: TyCon -> Bool tyConRepName_maybe :: TyCon -> Maybe TyConRepName mkPrelTyConRepName :: Name -> TyConRepName tyConName :: TyCon -> Name ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/TyCon/0000755000000000000000000000000007346545000017632 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/TyCon/Env.hs0000644000000000000000000001243507346545000020723 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 \section[TyConEnv]{@TyConEnv@: tyCon environments} -} {-# LANGUAGE ScopedTypeVariables #-} module GHC.Core.TyCon.Env ( -- * TyCon environment (map) TyConEnv, -- ** Manipulating these environments mkTyConEnv, mkTyConEnvWith, emptyTyConEnv, isEmptyTyConEnv, unitTyConEnv, nonDetTyConEnvElts, extendTyConEnv_C, extendTyConEnv_Acc, extendTyConEnv, extendTyConEnvList, extendTyConEnvList_C, filterTyConEnv, anyTyConEnv, plusTyConEnv, plusTyConEnv_C, plusTyConEnv_CD, plusTyConEnv_CD2, alterTyConEnv, lookupTyConEnv, lookupTyConEnv_NF, delFromTyConEnv, delListFromTyConEnv, elemTyConEnv, mapTyConEnv, disjointTyConEnv, DTyConEnv, emptyDTyConEnv, isEmptyDTyConEnv, lookupDTyConEnv, delFromDTyConEnv, filterDTyConEnv, mapDTyConEnv, mapMaybeDTyConEnv, adjustDTyConEnv, alterDTyConEnv, extendDTyConEnv, foldDTyConEnv ) where import GHC.Prelude import GHC.Types.Unique.FM import GHC.Types.Unique.DFM import GHC.Core.TyCon (TyCon) import GHC.Data.Maybe {- ************************************************************************ * * \subsection{TyCon environment} * * ************************************************************************ -} -- | TyCon Environment type TyConEnv a = UniqFM TyCon a -- Domain is TyCon emptyTyConEnv :: TyConEnv a isEmptyTyConEnv :: TyConEnv a -> Bool mkTyConEnv :: [(TyCon,a)] -> TyConEnv a mkTyConEnvWith :: (a -> TyCon) -> [a] -> TyConEnv a nonDetTyConEnvElts :: TyConEnv a -> [a] alterTyConEnv :: (Maybe a-> Maybe a) -> TyConEnv a -> TyCon -> TyConEnv a extendTyConEnv_C :: (a->a->a) -> TyConEnv a -> TyCon -> a -> TyConEnv a extendTyConEnv_Acc :: (a->b->b) -> (a->b) -> TyConEnv b -> TyCon -> a -> TyConEnv b extendTyConEnv :: TyConEnv a -> TyCon -> a -> TyConEnv a plusTyConEnv :: TyConEnv a -> TyConEnv a -> TyConEnv a plusTyConEnv_C :: (a->a->a) -> TyConEnv a -> TyConEnv a -> TyConEnv a plusTyConEnv_CD :: (a->a->a) -> TyConEnv a -> a -> TyConEnv a -> a -> TyConEnv a plusTyConEnv_CD2 :: (Maybe a->Maybe a->a) -> TyConEnv a -> TyConEnv a -> TyConEnv a extendTyConEnvList :: TyConEnv a -> [(TyCon,a)] -> TyConEnv a extendTyConEnvList_C :: (a->a->a) -> TyConEnv a -> [(TyCon,a)] -> TyConEnv a delFromTyConEnv :: TyConEnv a -> TyCon -> TyConEnv a delListFromTyConEnv :: TyConEnv a -> [TyCon] -> TyConEnv a elemTyConEnv :: TyCon -> TyConEnv a -> Bool unitTyConEnv :: TyCon -> a -> TyConEnv a lookupTyConEnv :: TyConEnv a -> TyCon -> Maybe a lookupTyConEnv_NF :: TyConEnv a -> TyCon -> a filterTyConEnv :: (elt -> Bool) -> TyConEnv elt -> TyConEnv elt anyTyConEnv :: (elt -> Bool) -> TyConEnv elt -> Bool mapTyConEnv :: (elt1 -> elt2) -> TyConEnv elt1 -> TyConEnv elt2 disjointTyConEnv :: TyConEnv a -> TyConEnv a -> Bool nonDetTyConEnvElts x = nonDetEltsUFM x emptyTyConEnv = emptyUFM isEmptyTyConEnv = isNullUFM unitTyConEnv x y = unitUFM x y extendTyConEnv x y z = addToUFM x y z extendTyConEnvList x l = addListToUFM x l lookupTyConEnv x y = lookupUFM x y alterTyConEnv = alterUFM mkTyConEnv l = listToUFM l mkTyConEnvWith f = mkTyConEnv . map (\a -> (f a, a)) elemTyConEnv x y = elemUFM x y plusTyConEnv x y = plusUFM x y plusTyConEnv_C f x y = plusUFM_C f x y plusTyConEnv_CD f x d y b = plusUFM_CD f x d y b plusTyConEnv_CD2 f x y = plusUFM_CD2 f x y extendTyConEnv_C f x y z = addToUFM_C f x y z mapTyConEnv f x = mapUFM f x extendTyConEnv_Acc x y z a b = addToUFM_Acc x y z a b extendTyConEnvList_C x y z = addListToUFM_C x y z delFromTyConEnv x y = delFromUFM x y delListFromTyConEnv x y = delListFromUFM x y filterTyConEnv x y = filterUFM x y anyTyConEnv f x = nonDetFoldUFM ((||) . f) False x disjointTyConEnv x y = disjointUFM x y lookupTyConEnv_NF env n = expectJust "lookupTyConEnv_NF" (lookupTyConEnv env n) -- | Deterministic TyCon Environment -- -- See Note [Deterministic UniqFM] in "GHC.Types.Unique.DFM" for explanation why -- we need DTyConEnv. type DTyConEnv a = UniqDFM TyCon a emptyDTyConEnv :: DTyConEnv a emptyDTyConEnv = emptyUDFM isEmptyDTyConEnv :: DTyConEnv a -> Bool isEmptyDTyConEnv = isNullUDFM lookupDTyConEnv :: DTyConEnv a -> TyCon -> Maybe a lookupDTyConEnv = lookupUDFM delFromDTyConEnv :: DTyConEnv a -> TyCon -> DTyConEnv a delFromDTyConEnv = delFromUDFM filterDTyConEnv :: (a -> Bool) -> DTyConEnv a -> DTyConEnv a filterDTyConEnv = filterUDFM mapDTyConEnv :: (a -> b) -> DTyConEnv a -> DTyConEnv b mapDTyConEnv = mapUDFM mapMaybeDTyConEnv :: (a -> Maybe b) -> DTyConEnv a -> DTyConEnv b mapMaybeDTyConEnv = mapMaybeUDFM adjustDTyConEnv :: (a -> a) -> DTyConEnv a -> TyCon -> DTyConEnv a adjustDTyConEnv = adjustUDFM alterDTyConEnv :: (Maybe a -> Maybe a) -> DTyConEnv a -> TyCon -> DTyConEnv a alterDTyConEnv = alterUDFM extendDTyConEnv :: DTyConEnv a -> TyCon -> a -> DTyConEnv a extendDTyConEnv = addToUDFM foldDTyConEnv :: (elt -> a -> a) -> a -> DTyConEnv elt -> a foldDTyConEnv = foldUDFM ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/TyCon/RecWalk.hs0000644000000000000000000000632207346545000021521 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 Check for recursive type constructors. -} module GHC.Core.TyCon.RecWalk ( -- * Recursion breaking RecTcChecker, initRecTc, defaultRecTcMaxBound, setRecTcMaxBound, checkRecTc ) where import GHC.Prelude import GHC.Core.TyCon import GHC.Core.TyCon.Env import GHC.Utils.Outputable {- ************************************************************************ * * Walking over recursive TyCons * * ************************************************************************ Note [Expanding newtypes and products] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When expanding a type to expose a data-type constructor, we need to be careful about newtypes, lest we fall into an infinite loop. Here are the key examples: newtype Id x = MkId x newtype Fix f = MkFix (f (Fix f)) newtype T = MkT (T -> T) Type Expansion -------------------------- T T -> T Fix Maybe Maybe (Fix Maybe) Id (Id Int) Int Fix Id NO NO NO Notice that * We can expand T, even though it's recursive. * We can expand Id (Id Int), even though the Id shows up twice at the outer level, because Id is non-recursive So, when expanding, we keep track of when we've seen a recursive newtype at outermost level; and bail out if we see it again. We sometimes want to do the same for product types, so that the strictness analyser doesn't unbox infinitely deeply. More precisely, we keep a *count* of how many times we've seen it. This is to account for data instance T (a,b) = MkT (T a) (T b) Then (#10482) if we have a type like T (Int,(Int,(Int,(Int,Int)))) we can still unbox deeply enough during strictness analysis. We have to treat T as potentially recursive, but it's still good to be able to unwrap multiple layers. The function that manages all this is checkRecTc. -} data RecTcChecker = RC !Int (TyConEnv Int) -- The upper bound, and the number of times -- we have encountered each TyCon instance Outputable RecTcChecker where ppr (RC n env) = text "RC:" <> int n <+> ppr env -- | Initialise a 'RecTcChecker' with 'defaultRecTcMaxBound'. initRecTc :: RecTcChecker initRecTc = RC defaultRecTcMaxBound emptyTyConEnv -- | The default upper bound (100) for the number of times a 'RecTcChecker' is -- allowed to encounter each 'TyCon'. defaultRecTcMaxBound :: Int defaultRecTcMaxBound = 100 -- Should we have a flag for this? -- | Change the upper bound for the number of times a 'RecTcChecker' is allowed -- to encounter each 'TyCon'. setRecTcMaxBound :: Int -> RecTcChecker -> RecTcChecker setRecTcMaxBound new_bound (RC _old_bound rec_nts) = RC new_bound rec_nts checkRecTc :: RecTcChecker -> TyCon -> Maybe RecTcChecker -- Nothing => Recursion detected -- Just rec_tcs => Keep going checkRecTc (RC bound rec_nts) tc = case lookupTyConEnv rec_nts tc of Just n | n >= bound -> Nothing | otherwise -> Just (RC bound (extendTyConEnv rec_nts tc (n+1))) Nothing -> Just (RC bound (extendTyConEnv rec_nts tc 1)) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/Type.hs0000644000000000000000000041362707346545000020070 0ustar0000000000000000-- (c) The University of Glasgow 2006 -- (c) The GRASP/AQUA Project, Glasgow University, 1998 -- -- Type - public interface {-# LANGUAGE FlexibleContexts, PatternSynonyms, ViewPatterns, MultiWayIf, RankNTypes #-} {-# OPTIONS_GHC -fno-warn-orphans #-} -- | Main functions for manipulating types and type-related things module GHC.Core.Type ( -- Note some of this is just re-exports from TyCon.. -- * Main data types representing Types -- $type_classification -- $representation_types Type, ForAllTyFlag(..), FunTyFlag(..), Specificity(..), KindOrType, PredType, ThetaType, FRRType, Var, TyVar, isTyVar, TyCoVar, PiTyBinder, ForAllTyBinder, TyVarBinder, Mult, Scaled, KnotTied, RuntimeRepType, -- ** Constructing and deconstructing types mkTyVarTy, mkTyVarTys, getTyVar, getTyVar_maybe, repGetTyVar_maybe, getCastedTyVar_maybe, tyVarKind, varType, mkAppTy, mkAppTys, splitAppTy, splitAppTys, splitAppTysNoView, splitAppTy_maybe, splitAppTyNoView_maybe, tcSplitAppTyNoView_maybe, mkFunTy, mkVisFunTy, mkVisFunTyMany, mkVisFunTysMany, mkScaledFunTys, mkInvisFunTy, mkInvisFunTys, tcMkVisFunTy, tcMkScaledFunTys, tcMkInvisFunTy, splitFunTy, splitFunTy_maybe, splitFunTys, funResultTy, funArgTy, funTyConAppTy_maybe, funTyFlagTyCon, tyConAppFunTy_maybe, tyConAppFunCo_maybe, mkFunctionType, mkScaledFunctionTys, chooseFunTyFlag, mkTyConApp, mkTyConTy, tyConAppTyCon_maybe, tyConAppTyConPicky_maybe, tyConAppArgs_maybe, tyConAppTyCon, tyConAppArgs, splitTyConApp_maybe, splitTyConAppNoView_maybe, splitTyConApp, tcSplitTyConApp, tcSplitTyConApp_maybe, mkForAllTy, mkForAllTys, mkInvisForAllTys, mkTyCoInvForAllTys, mkSpecForAllTy, mkSpecForAllTys, mkVisForAllTys, mkTyCoForAllTy, mkTyCoForAllTys, mkTyCoInvForAllTy, mkInfForAllTy, mkInfForAllTys, splitForAllTyCoVars, splitForAllTyVars, splitForAllReqTyBinders, splitForAllInvisTyBinders, splitForAllForAllTyBinders, splitForAllForAllTyBinder_maybe, splitForAllTyCoVar_maybe, splitForAllTyCoVar, splitForAllTyVar_maybe, splitForAllCoVar_maybe, splitPiTy_maybe, splitPiTy, splitPiTys, getRuntimeArgTys, mkTyConBindersPreferAnon, mkPiTy, mkPiTys, piResultTy, piResultTys, applyTysX, dropForAlls, mkFamilyTyConApp, buildSynTyCon, mkNumLitTy, isNumLitTy, mkStrLitTy, isStrLitTy, mkCharLitTy, isCharLitTy, isLitTy, isPredTy, getRuntimeRep, splitRuntimeRep_maybe, kindRep_maybe, kindRep, getLevity, levityType_maybe, mkCastTy, mkCoercionTy, splitCastTy_maybe, ErrorMsgType, userTypeError_maybe, deepUserTypeError_maybe, pprUserTypeErrorTy, coAxNthLHS, stripCoercionTy, splitInvisPiTys, splitInvisPiTysN, invisibleTyBndrCount, filterOutInvisibleTypes, filterOutInferredTypes, partitionInvisibleTypes, partitionInvisibles, tyConForAllTyFlags, appTyForAllTyFlags, -- ** Analyzing types TyCoMapper(..), mapTyCo, mapTyCoX, TyCoFolder(..), foldTyCo, noView, -- (Newtypes) newTyConInstRhs, -- ** Binders mkForAllTyBinder, mkForAllTyBinders, mkTyVarBinder, mkTyVarBinders, tyVarSpecToBinders, isAnonPiTyBinder, binderVar, binderVars, binderType, binderFlag, binderFlags, piTyBinderType, namedPiTyBinder_maybe, anonPiTyBinderType_maybe, isVisibleForAllTyFlag, isInvisibleForAllTyFlag, isVisiblePiTyBinder, isInvisiblePiTyBinder, isNamedPiTyBinder, tyConBindersPiTyBinders, -- ** Predicates on types isTyVarTy, isFunTy, isCoercionTy, isCoercionTy_maybe, isForAllTy, isForAllTy_ty, isForAllTy_co, isForAllTy_invis_ty, isPiTy, isTauTy, isFamFreeTy, isAtomicTy, isValidJoinPointType, tyConAppNeedsKindSig, -- * Space-saving construction mkTYPEapp, mkTYPEapp_maybe, mkCONSTRAINTapp, mkCONSTRAINTapp_maybe, mkBoxedRepApp_maybe, mkTupleRepApp_maybe, typeOrConstraintKind, -- *** Levity and boxity sORTKind_maybe, typeTypeOrConstraint, typeLevity, typeLevity_maybe, tyConIsTYPEorCONSTRAINT, isLiftedTypeKind, isUnliftedTypeKind, pickyIsLiftedTypeKind, isLiftedRuntimeRep, isUnliftedRuntimeRep, runtimeRepLevity_maybe, isBoxedRuntimeRep, isLiftedLevity, isUnliftedLevity, isUnliftedType, isBoxedType, isUnboxedTupleType, isUnboxedSumType, kindBoxedRepLevity_maybe, mightBeLiftedType, mightBeUnliftedType, definitelyLiftedType, definitelyUnliftedType, isAlgType, isDataFamilyAppType, isPrimitiveType, isStrictType, isTerminatingType, isLevityTy, isLevityVar, isRuntimeRepTy, isRuntimeRepVar, isRuntimeRepKindedTy, dropRuntimeRepArgs, -- * Multiplicity isMultiplicityTy, isMultiplicityVar, unrestricted, linear, tymult, mkScaled, irrelevantMult, scaledSet, pattern OneTy, pattern ManyTy, isOneTy, isManyTy, isLinearType, -- * Main data types representing Kinds Kind, -- ** Finding the kind of a type typeKind, typeHasFixedRuntimeRep, tcIsLiftedTypeKind, isConstraintKind, isConstraintLikeKind, returnsConstraintKind, tcIsBoxedTypeKind, isTypeLikeKind, -- ** Common Kind liftedTypeKind, unliftedTypeKind, -- * Type free variables tyCoFVsOfType, tyCoFVsBndr, tyCoFVsVarBndr, tyCoFVsVarBndrs, tyCoVarsOfType, tyCoVarsOfTypes, tyCoVarsOfTypeDSet, coVarsOfType, coVarsOfTypes, anyFreeVarsOfType, anyFreeVarsOfTypes, noFreeVarsOfType, expandTypeSynonyms, expandSynTyConApp_maybe, typeSize, occCheckExpand, -- ** Closing over kinds closeOverKindsDSet, closeOverKindsList, closeOverKinds, -- * Well-scoped lists of variables scopedSort, tyCoVarsOfTypeWellScoped, tyCoVarsOfTypesWellScoped, -- * Forcing evaluation of types seqType, seqTypes, -- * Other views onto Types coreView, coreFullView, rewriterView, tyConsOfType, -- * Main type substitution data types TvSubstEnv, -- Representation widely visible IdSubstEnv, Subst(..), -- Representation visible to a few friends -- ** Manipulating type substitutions emptyTvSubstEnv, emptySubst, mkEmptySubst, mkTCvSubst, zipTvSubst, mkTvSubstPrs, zipTCvSubst, notElemSubst, getTvSubstEnv, zapSubst, getSubstInScope, setInScope, getSubstRangeTyCoFVs, extendSubstInScope, extendSubstInScopeList, extendSubstInScopeSet, extendTCvSubst, extendCvSubst, extendTvSubst, extendTvSubstList, extendTvSubstAndInScope, extendTCvSubstList, extendTvSubstWithClone, extendTCvSubstWithClone, isInScope, composeTCvSubst, zipTyEnv, zipCoEnv, isEmptySubst, unionSubst, isEmptyTCvSubst, -- ** Performing substitution on types and kinds substTy, substTys, substScaledTy, substScaledTys, substTyWith, substTysWith, substTheta, substTyAddInScope, substTyUnchecked, substTysUnchecked, substScaledTyUnchecked, substScaledTysUnchecked, substThetaUnchecked, substTyWithUnchecked, substCo, substCoUnchecked, substCoWithUnchecked, substTyVarBndr, substTyVarBndrs, substTyVar, substTyVars, substVarBndr, substVarBndrs, substTyCoBndr, substTyVarToTyVar, cloneTyVarBndr, cloneTyVarBndrs, lookupTyVar, -- * Tidying type related things up for printing tidyType, tidyTypes, tidyOpenType, tidyOpenTypes, tidyOpenTypeX, tidyOpenTypesX, tidyVarBndr, tidyVarBndrs, tidyFreeTyCoVars, tidyFreeTyCoVarX, tidyFreeTyCoVarsX, tidyTyCoVarOcc, tidyTopType, tidyForAllTyBinder, tidyForAllTyBinders, -- * Kinds isTYPEorCONSTRAINT, isConcreteType, isFixedRuntimeRepKind, ) where import GHC.Prelude import GHC.Types.Basic -- We import the representation and primitive functions from GHC.Core.TyCo.Rep. -- Many things are reexported, but not the representation! import GHC.Core.TyCo.Rep import GHC.Core.TyCo.Subst import GHC.Core.TyCo.Tidy import GHC.Core.TyCo.FVs -- friends: import GHC.Types.Var import GHC.Types.Var.Env import GHC.Types.Var.Set import GHC.Core.TyCon import GHC.Builtin.Types.Prim import {-# SOURCE #-} GHC.Builtin.Types ( charTy, naturalTy , typeSymbolKind, liftedTypeKind, unliftedTypeKind , constraintKind, zeroBitTypeKind , manyDataConTy, oneDataConTy , liftedRepTy, unliftedRepTy, zeroBitRepTy ) import GHC.Types.Name( Name ) import GHC.Builtin.Names import GHC.Core.Coercion.Axiom import {-# SOURCE #-} GHC.Core.Coercion ( mkNomReflCo, mkGReflCo, mkReflCo , mkTyConAppCo, mkAppCo , mkForAllCo, mkFunCo2, mkAxiomCo, mkUnivCo , mkSymCo, mkTransCo, mkSelCo, mkLRCo, mkInstCo , mkKindCo, mkSubCo, mkFunCo, funRole , decomposePiCos, coercionKind , coercionRKind, coercionType , isReflexiveCo, seqCo , topNormaliseNewType_maybe ) import {-# SOURCE #-} GHC.Tc.Utils.TcType ( isConcreteTyVar ) -- others import GHC.Utils.Misc import GHC.Utils.FV import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Data.FastString import GHC.Data.Maybe ( orElse, isJust, firstJust ) -- $type_classification -- #type_classification# -- -- Types are any, but at least one, of: -- -- [Boxed] Iff its representation is a pointer to an object on the -- GC'd heap. Operationally, heap objects can be entered as -- a means of evaluation. -- -- [Lifted] Iff it has bottom as an element: An instance of a -- lifted type might diverge when evaluated. -- GHC Haskell's unboxed types are unlifted. -- An unboxed, but lifted type is not very useful. -- (Example: A byte-represented type, where evaluating 0xff -- computes the 12345678th collatz number modulo 0xff.) -- Only lifted types may be unified with a type variable. -- -- [Algebraic] Iff it is a type with one or more constructors, whether -- declared with @data@ or @newtype@. -- An algebraic type is one that can be deconstructed -- with a case expression. There are algebraic types that -- are not lifted types, like unlifted data types or -- unboxed tuples. -- -- [Data] Iff it is a type declared with @data@, or a boxed tuple. -- There are also /unlifted/ data types. -- -- [Primitive] Iff it is a built-in type that can't be expressed in Haskell. -- -- [Unlifted] Anything that isn't lifted is considered unlifted. -- -- Currently, all primitive types are unlifted, but that's not necessarily -- the case: for example, @Int@ could be primitive. -- -- Some primitive types are unboxed, such as @Int#@, whereas some are boxed -- but unlifted (such as @ByteArray#@). The only primitive types that we -- classify as algebraic are the unboxed tuples. -- -- Some examples of type classifications that may make this a bit clearer are: -- -- @ -- Type primitive boxed lifted algebraic -- ----------------------------------------------------------------------------- -- Int# Yes No No No -- ByteArray# Yes Yes No No -- (\# a, b \#) Yes No No Yes -- (\# a | b \#) Yes No No Yes -- ( a, b ) No Yes Yes Yes -- [a] No Yes Yes Yes -- @ -- $representation_types -- A /source type/ is a type that is a separate type as far as the type checker is -- concerned, but which has a more low-level representation as far as Core-to-Core -- passes and the rest of the back end is concerned. -- -- You don't normally have to worry about this, as the utility functions in -- this module will automatically convert a source into a representation type -- if they are spotted, to the best of its abilities. If you don't want this -- to happen, use the equivalent functions from the "TcType" module. {- ************************************************************************ * * Type representation * * ************************************************************************ -} rewriterView :: Type -> Maybe Type -- Unwrap a type synonym only when either: -- The type synonym is forgetful, or -- the type synonym mentions a type family in its expansion -- See Note [Rewriting synonyms] {-# INLINE rewriterView #-} rewriterView (TyConApp tc tys) | isTypeSynonymTyCon tc , isForgetfulSynTyCon tc || not (isFamFreeTyCon tc) = expandSynTyConApp_maybe tc tys rewriterView _other = Nothing coreView :: Type -> Maybe Type -- ^ This function strips off the /top layer only/ of a type synonym -- application (if any) its underlying representation type. -- Returns 'Nothing' if there is nothing to look through. -- -- This function does not look through type family applications. -- -- By being non-recursive and inlined, this case analysis gets efficiently -- joined onto the case analysis that the caller is already doing coreView (TyConApp tc tys) = expandSynTyConApp_maybe tc tys coreView _ = Nothing -- See Note [Inlining coreView]. {-# INLINE coreView #-} coreFullView, core_full_view :: Type -> Type -- ^ Iterates 'coreView' until there is no more to synonym to expand. -- NB: coreFullView is non-recursive and can be inlined; -- core_full_view is the recursive one -- See Note [Inlining coreView]. coreFullView ty@(TyConApp tc _) | isTypeSynonymTyCon tc = core_full_view ty coreFullView ty = ty {-# INLINE coreFullView #-} core_full_view ty | Just ty' <- coreView ty = core_full_view ty' | otherwise = ty ----------------------------------------------- -- | @expandSynTyConApp_maybe tc tys@ expands the RHS of type synonym @tc@ -- instantiated at arguments @tys@, or returns 'Nothing' if @tc@ is not a -- synonym. expandSynTyConApp_maybe :: TyCon -> [Type] -> Maybe Type {-# INLINE expandSynTyConApp_maybe #-} -- This INLINE will inline the call to expandSynTyConApp_maybe in coreView, -- which will eliminate the allocation Just/Nothing in the result -- Don't be tempted to make `expand_syn` (which is NOINLINE) return the -- Just/Nothing, else you'll increase allocation expandSynTyConApp_maybe tc arg_tys | Just (tvs, rhs) <- synTyConDefn_maybe tc , arg_tys `saturates` tyConArity tc = Just $! (expand_syn tvs rhs arg_tys) -- Why strict application? Because every client of this function will evaluat -- that (expand_syn ...) thunk, so it's more efficient not to build a thunk. -- Mind you, this function is always INLINEd, so the client context is probably -- enough to avoid thunk construction and so the $! is just belt-and-braces. | otherwise = Nothing saturates :: [Type] -> Arity -> Bool saturates _ 0 = True saturates [] _ = False saturates (_:tys) n = assert( n >= 0 ) $ saturates tys (n-1) -- Arities are always positive; the assertion just checks -- that, to avoid an infinite loop in the bad case -- | A helper for 'expandSynTyConApp_maybe' to avoid inlining this cold path -- into call-sites. -- -- Precondition: the call is saturated or over-saturated; -- i.e. length tvs <= length arg_tys expand_syn :: [TyVar] -- ^ the variables bound by the synonym -> Type -- ^ the RHS of the synonym -> [Type] -- ^ the type arguments the synonym is instantiated at. -> Type {-# NOINLINE expand_syn #-} -- We never want to inline this cold-path. expand_syn tvs rhs arg_tys -- No substitution necessary if either tvs or tys is empty -- This is both more efficient, and steers clear of an infinite -- loop; see Note [Care using synonyms to compress types] | null arg_tys = assert (null tvs) rhs | null tvs = mkAppTys rhs arg_tys | otherwise = go empty_subst tvs arg_tys where empty_subst = mkEmptySubst in_scope in_scope = mkInScopeSet $ shallowTyCoVarsOfTypes $ arg_tys -- The free vars of 'rhs' should all be bound by 'tenv', -- so we only need the free vars of tys -- See also Note [The substitution invariant] in GHC.Core.TyCo.Subst. go subst [] tys | null tys = rhs' -- Exactly Saturated | otherwise = mkAppTys rhs' tys -- Its important to use mkAppTys, rather than (foldl AppTy), -- because the function part might well return a -- partially-applied type constructor; indeed, usually will! where rhs' = substTy subst rhs go subst (tv:tvs) (ty:tys) = go (extendTvSubst subst tv ty) tvs tys go _ (_:_) [] = pprPanic "expand_syn" (ppr tvs $$ ppr rhs $$ ppr arg_tys) -- Under-saturated, precondition failed {- Note [Inlining coreView] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ It is very common to have a function f :: Type -> ... f ty | Just ty' <- coreView ty = f ty' f (TyVarTy ...) = ... f ... = ... If f is not otherwise recursive, the initial call to coreView causes f to become recursive, which kills the possibility of inlining. Instead, for non-recursive functions, we prefer to use coreFullView, which guarantees to unwrap top-level type synonyms. It can be inlined and is efficient and non-allocating in its fast path. For this to really be fast, all calls made on its fast path must also be inlined, linked back to this Note. -} {- ********************************************************************* * * expandTypeSynonyms * * ********************************************************************* -} expandTypeSynonyms :: Type -> Type -- ^ Expand out all type synonyms. Actually, it'd suffice to expand out -- just the ones that discard type variables (e.g. type Funny a = Int) -- But we don't know which those are currently, so we just expand all. -- -- 'expandTypeSynonyms' only expands out type synonyms mentioned in the type, -- not in the kinds of any TyCon or TyVar mentioned in the type. -- -- Keep this synchronized with 'synonymTyConsOfType' expandTypeSynonyms ty = go (mkEmptySubst in_scope) ty where in_scope = mkInScopeSet (tyCoVarsOfType ty) go subst (TyConApp tc tys) | ExpandsSyn tenv rhs tys' <- expandSynTyCon_maybe tc expanded_tys = let subst' = mkTvSubst in_scope (mkVarEnv tenv) -- Make a fresh substitution; rhs has nothing to -- do with anything that has happened so far -- NB: if you make changes here, be sure to build an -- /idempotent/ substitution, even in the nested case -- type T a b = a -> b -- type S x y = T y x -- (#11665) in mkAppTys (go subst' rhs) tys' | otherwise = TyConApp tc expanded_tys where expanded_tys = (map (go subst) tys) go _ (LitTy l) = LitTy l go subst (TyVarTy tv) = substTyVar subst tv go subst (AppTy t1 t2) = mkAppTy (go subst t1) (go subst t2) go subst ty@(FunTy _ mult arg res) = ty { ft_mult = go subst mult, ft_arg = go subst arg, ft_res = go subst res } go subst (ForAllTy (Bndr tv vis) t) = let (subst', tv') = substVarBndrUsing go subst tv in ForAllTy (Bndr tv' vis) (go subst' t) go subst (CastTy ty co) = mkCastTy (go subst ty) (go_co subst co) go subst (CoercionTy co) = mkCoercionTy (go_co subst co) go_mco _ MRefl = MRefl go_mco subst (MCo co) = MCo (go_co subst co) go_co subst (Refl ty) = mkNomReflCo (go subst ty) go_co subst (GRefl r ty mco) = mkGReflCo r (go subst ty) (go_mco subst mco) -- NB: coercions are always expanded upon creation go_co subst (TyConAppCo r tc args) = mkTyConAppCo r tc (map (go_co subst) args) go_co subst (AppCo co arg) = mkAppCo (go_co subst co) (go_co subst arg) go_co subst (ForAllCo { fco_tcv = tv, fco_visL = visL, fco_visR = visR , fco_kind = kind_co, fco_body = co }) = let (subst', tv', kind_co') = go_cobndr subst tv kind_co in mkForAllCo tv' visL visR kind_co' (go_co subst' co) go_co subst (FunCo r afl afr w co1 co2) = mkFunCo2 r afl afr (go_co subst w) (go_co subst co1) (go_co subst co2) go_co subst (CoVarCo cv) = substCoVar subst cv go_co subst (AxiomCo ax cs) = mkAxiomCo ax (map (go_co subst) cs) go_co subst co@(UnivCo { uco_lty = lty, uco_rty = rty }) = co { uco_lty = go subst lty, uco_rty = go subst rty } go_co subst (SymCo co) = mkSymCo (go_co subst co) go_co subst (TransCo co1 co2) = mkTransCo (go_co subst co1) (go_co subst co2) go_co subst (SelCo n co) = mkSelCo n (go_co subst co) go_co subst (LRCo lr co) = mkLRCo lr (go_co subst co) go_co subst (InstCo co arg) = mkInstCo (go_co subst co) (go_co subst arg) go_co subst (KindCo co) = mkKindCo (go_co subst co) go_co subst (SubCo co) = mkSubCo (go_co subst co) go_co _ (HoleCo h) = pprPanic "expandTypeSynonyms hit a hole" (ppr h) -- the "False" and "const" are to accommodate the type of -- substForAllCoBndrUsing, which is general enough to -- handle coercion optimization (which sometimes swaps the -- order of a coercion) go_cobndr subst = substForAllCoBndrUsing NotSwapped (go_co subst) subst {- Notes on type synonyms ~~~~~~~~~~~~~~~~~~~~~~~~~ The various "split" functions (splitFunTy, splitRhoTy, splitForAllTy) try to return type synonyms wherever possible. Thus type Foo a = a -> a we want splitFunTys (a -> Foo a) = ([a], Foo a) not ([a], a -> a) The reason is that we then get better (shorter) type signatures in interfaces. Notably this plays a role in tcTySigs in GHC.Tc.Gen.Bind. -} {- ********************************************************************* * * Random functions (todo: organise) * * ********************************************************************* -} -- | An INLINE helper for function such as 'kindRep_maybe' below. -- -- @isTyConKeyApp_maybe key ty@ returns @Just tys@ iff -- the type @ty = T tys@, where T's unique = key -- key must not be `fUNTyConKey`; to test for functions, use `splitFunTy_maybe`. -- Thanks to this fact, we don't have to pattern match on `FunTy` here. isTyConKeyApp_maybe :: Unique -> Type -> Maybe [Type] isTyConKeyApp_maybe key ty | TyConApp tc args <- coreFullView ty , tc `hasKey` key = Just args | otherwise = Nothing {-# INLINE isTyConKeyApp_maybe #-} -- | Extract the RuntimeRep classifier of a type from its kind. For example, -- @kindRep * = LiftedRep@; Panics if this is not possible. -- Treats * and Constraint as the same kindRep :: HasDebugCallStack => Kind -> RuntimeRepType kindRep k = case kindRep_maybe k of Just r -> r Nothing -> pprPanic "kindRep" (ppr k) -- | Given a kind (TYPE rr) or (CONSTRAINT rr), extract its RuntimeRep classifier rr. -- For example, @kindRep_maybe * = Just LiftedRep@ -- Returns 'Nothing' if the kind is not of form (TYPE rr) kindRep_maybe :: HasDebugCallStack => Kind -> Maybe RuntimeRepType kindRep_maybe kind | Just (_, rep) <- sORTKind_maybe kind = Just rep | otherwise = Nothing -- | Returns True if the argument is (lifted) Type or Constraint -- See Note [TYPE and CONSTRAINT] in GHC.Builtin.Types.Prim isLiftedTypeKind :: Kind -> Bool isLiftedTypeKind kind = case kindRep_maybe kind of Just rep -> isLiftedRuntimeRep rep Nothing -> False -- | Returns True if the kind classifies unlifted types (like 'Int#') and False -- otherwise. Note that this returns False for representation-polymorphic -- kinds, which may be specialized to a kind that classifies unlifted types. isUnliftedTypeKind :: Kind -> Bool isUnliftedTypeKind kind = case kindRep_maybe kind of Just rep -> isUnliftedRuntimeRep rep Nothing -> False pickyIsLiftedTypeKind :: Kind -> Bool -- Checks whether the kind is literally -- TYPE LiftedRep -- or TYPE ('BoxedRep 'Lifted) -- or Type -- without expanding type synonyms or anything -- Used only when deciding whether to suppress the ":: *" in -- (a :: *) when printing kinded type variables -- See Note [Suppressing * kinds] in GHC.Core.TyCo.Ppr pickyIsLiftedTypeKind kind | TyConApp tc [arg] <- kind , tc `hasKey` tYPETyConKey , TyConApp rr_tc rr_args <- arg = case rr_args of [] -> rr_tc `hasKey` liftedRepTyConKey [rr_arg] | rr_tc `hasKey` boxedRepDataConKey , TyConApp lev [] <- rr_arg , lev `hasKey` liftedDataConKey -> True _ -> False | TyConApp tc [] <- kind , tc `hasKey` liftedTypeKindTyConKey = True | otherwise = False -- | Check whether a kind is of the form `TYPE (BoxedRep Lifted)` -- or `TYPE (BoxedRep Unlifted)`. -- -- Returns: -- -- - `Just Lifted` for `TYPE (BoxedRep Lifted)` and `Type`, -- - `Just Unlifted` for `TYPE (BoxedRep Unlifted)` and `UnliftedType`, -- - `Nothing` for anything else, e.g. `TYPE IntRep`, `TYPE (BoxedRep l)`, etc. kindBoxedRepLevity_maybe :: Type -> Maybe Levity kindBoxedRepLevity_maybe ty | Just rep <- kindRep_maybe ty , isBoxedRuntimeRep rep = runtimeRepLevity_maybe rep | otherwise = Nothing -- | Check whether a type of kind 'RuntimeRep' is lifted. -- -- 'isLiftedRuntimeRep' is: -- -- * True of @LiftedRep :: RuntimeRep@ -- * False of type variables, type family applications, -- and of other reps such as @IntRep :: RuntimeRep@. isLiftedRuntimeRep :: RuntimeRepType -> Bool isLiftedRuntimeRep rep = runtimeRepLevity_maybe rep == Just Lifted -- | Check whether a type of kind 'RuntimeRep' is unlifted. -- -- * True of definitely unlifted 'RuntimeRep's such as -- 'UnliftedRep', 'IntRep', 'FloatRep', ... -- * False of 'LiftedRep', -- * False for type variables and type family applications. isUnliftedRuntimeRep :: RuntimeRepType -> Bool isUnliftedRuntimeRep rep = runtimeRepLevity_maybe rep == Just Unlifted -- | An INLINE helper for functions such as 'isLiftedLevity' and 'isUnliftedLevity'. -- -- Checks whether the type is a nullary 'TyCon' application, -- for a 'TyCon' with the given 'Unique'. isNullaryTyConKeyApp :: Unique -> Type -> Bool isNullaryTyConKeyApp key ty | Just args <- isTyConKeyApp_maybe key ty = assert (null args) True | otherwise = False {-# INLINE isNullaryTyConKeyApp #-} isLiftedLevity :: Type -> Bool isLiftedLevity = isNullaryTyConKeyApp liftedDataConKey isUnliftedLevity :: Type -> Bool isUnliftedLevity = isNullaryTyConKeyApp unliftedDataConKey -- | Is this the type 'Levity'? isLevityTy :: Type -> Bool isLevityTy = isNullaryTyConKeyApp levityTyConKey -- | Is this the type 'RuntimeRep'? isRuntimeRepTy :: Type -> Bool isRuntimeRepTy = isNullaryTyConKeyApp runtimeRepTyConKey -- | Is a tyvar of type 'RuntimeRep'? isRuntimeRepVar :: TyVar -> Bool isRuntimeRepVar = isRuntimeRepTy . tyVarKind -- | Is a tyvar of type 'Levity'? isLevityVar :: TyVar -> Bool isLevityVar = isLevityTy . tyVarKind -- | Is this the type 'Multiplicity'? isMultiplicityTy :: Type -> Bool isMultiplicityTy = isNullaryTyConKeyApp multiplicityTyConKey -- | Is a tyvar of type 'Multiplicity'? isMultiplicityVar :: TyVar -> Bool isMultiplicityVar = isMultiplicityTy . tyVarKind -------------------------------------------- -- Splitting RuntimeRep -------------------------------------------- -- | (splitRuntimeRep_maybe rr) takes a Type rr :: RuntimeRep, and -- returns the (TyCon,[Type]) for the RuntimeRep, if possible, where -- the TyCon is one of the promoted DataCons of RuntimeRep. -- Remember: the unique on TyCon that is a a promoted DataCon is the -- same as the unique on the DataCon -- See Note [Promoted data constructors] in GHC.Core.TyCon -- May not be possible if `rr` is a type variable or type -- family application splitRuntimeRep_maybe :: RuntimeRepType -> Maybe (TyCon, [Type]) splitRuntimeRep_maybe rep | TyConApp rr_tc args <- coreFullView rep , isPromotedDataCon rr_tc -- isPromotedDataCon: be careful of type families (F tys) :: RuntimeRep, = Just (rr_tc, args) | otherwise = Nothing -- | See 'isBoxedRuntimeRep_maybe'. isBoxedRuntimeRep :: RuntimeRepType -> Bool isBoxedRuntimeRep rep = isJust (isBoxedRuntimeRep_maybe rep) -- | `isBoxedRuntimeRep_maybe (rep :: RuntimeRep)` returns `Just lev` if `rep` -- expands to `Boxed lev` and returns `Nothing` otherwise. -- -- Types with this runtime rep are represented by pointers on the GC'd heap. isBoxedRuntimeRep_maybe :: RuntimeRepType -> Maybe LevityType isBoxedRuntimeRep_maybe rep | Just (rr_tc, args) <- splitRuntimeRep_maybe rep , rr_tc `hasKey` boxedRepDataConKey , [lev] <- args = Just lev | otherwise = Nothing -- | Check whether a type (usually of kind 'RuntimeRep') is lifted, unlifted, -- or unknown. Returns Nothing if the type isn't of kind 'RuntimeRep'. -- -- `runtimeRepLevity_maybe rr` returns: -- -- * `Just Lifted` if `rr` is `LiftedRep :: RuntimeRep` -- * `Just Unlifted` if `rr` is definitely unlifted, e.g. `IntRep` -- * `Nothing` if not known (e.g. it's a type variable or a type family application). runtimeRepLevity_maybe :: RuntimeRepType -> Maybe Levity runtimeRepLevity_maybe rep | Just (rr_tc, args) <- splitRuntimeRep_maybe rep = -- NB: args might be non-empty e.g. TupleRep [r1, .., rn] if (rr_tc `hasKey` boxedRepDataConKey) then case args of [lev] -> levityType_maybe lev _ -> Nothing -- Type isn't of kind RuntimeRep -- The latter case happens via the call to isLiftedRuntimeRep -- in GHC.Tc.Errors.Ppr.pprMismatchMsg (#22742) else Just Unlifted -- Avoid searching all the unlifted RuntimeRep type cons -- In the RuntimeRep data type, only LiftedRep is lifted | otherwise = Nothing -------------------------------------------- -- Splitting Levity -------------------------------------------- -- | `levityType_maybe` takes a Type of kind Levity, and returns its levity -- May not be possible for a type variable or type family application levityType_maybe :: LevityType -> Maybe Levity levityType_maybe lev | TyConApp lev_tc args <- coreFullView lev = if | lev_tc `hasKey` liftedDataConKey -> assert( null args) $ Just Lifted | lev_tc `hasKey` unliftedDataConKey -> assert( null args) $ Just Unlifted | otherwise -> Nothing | otherwise = Nothing {- ********************************************************************* * * mapType * * ************************************************************************ These functions do a map-like operation over types, performing some operation on all variables and binding sites. Primarily used for zonking. Note [Efficiency for ForAllCo case of mapTyCoX] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ As noted in Note [ForAllCo] in GHC.Core.TyCo.Rep, a ForAllCo is a bit redundant. It stores a TyCoVar and a Coercion, where the kind of the TyCoVar always matches the left-hand kind of the coercion. This is convenient lots of the time, but not when mapping a function over a coercion. The problem is that tcm_tybinder will affect the TyCoVar's kind and mapCoercion will affect the Coercion, and we hope that the results will be the same. Even if they are the same (which should generally happen with correct algorithms), then there is an efficiency issue. In particular, this problem seems to make what should be a linear algorithm into a potentially exponential one. But it's only going to be bad in the case where there's lots of foralls in the kinds of other foralls. Like this: forall a : (forall b : (forall c : ...). ...). ... This construction seems unlikely. So we'll do the inefficient, easy way for now. Note [Specialising mappers] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ These INLINE pragmas are indispensable. mapTyCo and mapTyCoX are used to implement zonking, and it's vital that they get specialised to the TcM monad and the particular mapper in use. Even specialising to the monad alone made a 20% allocation difference in perf/compiler/T5030. See Note [Specialising foldType] in "GHC.Core.TyCo.Rep" for more details of this idiom. -} -- | This describes how a "map" operation over a type/coercion should behave data TyCoMapper env m = TyCoMapper { tcm_tyvar :: env -> TyVar -> m Type , tcm_covar :: env -> CoVar -> m Coercion , tcm_hole :: env -> CoercionHole -> m Coercion -- ^ What to do with coercion holes. -- See Note [Coercion holes] in "GHC.Core.TyCo.Rep". , tcm_tycobinder :: forall r. env -> TyCoVar -> ForAllTyFlag -> (env -> TyCoVar -> m r) -> m r -- ^ The returned env is used in the extended scope , tcm_tycon :: TyCon -> m TyCon -- ^ This is used only for TcTyCons -- a) To zonk TcTyCons -- b) To turn TcTyCons into TyCons. -- See Note [Type checking recursive type and class declarations] -- in "GHC.Tc.TyCl" } {-# INLINE mapTyCo #-} -- See Note [Specialising mappers] mapTyCo :: Monad m => TyCoMapper () m -> ( Type -> m Type , [Type] -> m [Type] , Coercion -> m Coercion , [Coercion] -> m [Coercion] ) mapTyCo mapper = case mapTyCoX mapper of (go_ty, go_tys, go_co, go_cos) -> (go_ty (), go_tys (), go_co (), go_cos ()) {-# INLINE mapTyCoX #-} -- See Note [Specialising mappers] mapTyCoX :: forall m env. Monad m => TyCoMapper env m -> ( env -> Type -> m Type , env -> [Type] -> m [Type] , env -> Coercion -> m Coercion , env -> [Coercion] -> m [Coercion] ) mapTyCoX (TyCoMapper { tcm_tyvar = tyvar , tcm_tycobinder = tycobinder , tcm_tycon = tycon , tcm_covar = covar , tcm_hole = cohole }) = (go_ty, go_tys, go_co, go_cos) where -- See Note [Use explicit recursion in mapTyCo] go_tys !_ [] = return [] go_tys !env (ty:tys) = (:) <$> go_ty env ty <*> go_tys env tys go_ty !env (TyVarTy tv) = tyvar env tv go_ty !env (AppTy t1 t2) = mkAppTy <$> go_ty env t1 <*> go_ty env t2 go_ty !_ ty@(LitTy {}) = return ty go_ty !env (CastTy ty co) = mkCastTy <$> go_ty env ty <*> go_co env co go_ty !env (CoercionTy co) = CoercionTy <$> go_co env co go_ty !env ty@(FunTy _ w arg res) = do { w' <- go_ty env w; arg' <- go_ty env arg; res' <- go_ty env res ; return (ty { ft_mult = w', ft_arg = arg', ft_res = res' }) } go_ty !env ty@(TyConApp tc tys) | isTcTyCon tc = do { tc' <- tycon tc ; mkTyConApp tc' <$> go_tys env tys } -- Not a TcTyCon | null tys -- Avoid allocation in this very = return ty -- common case (E.g. Int, LiftedRep etc) | otherwise = mkTyConApp tc <$> go_tys env tys go_ty !env (ForAllTy (Bndr tv vis) inner) = do { tycobinder env tv vis $ \env' tv' -> do ; inner' <- go_ty env' inner ; return $ ForAllTy (Bndr tv' vis) inner' } -- See Note [Use explicit recursion in mapTyCo] go_cos !_ [] = return [] go_cos !env (co:cos) = (:) <$> go_co env co <*> go_cos env cos go_mco !_ MRefl = return MRefl go_mco !env (MCo co) = MCo <$> (go_co env co) go_co :: env -> Coercion -> m Coercion go_co !env (Refl ty) = Refl <$> go_ty env ty go_co !env (GRefl r ty mco) = mkGReflCo r <$> go_ty env ty <*> go_mco env mco go_co !env (AppCo c1 c2) = mkAppCo <$> go_co env c1 <*> go_co env c2 go_co !env (FunCo r afl afr cw c1 c2) = mkFunCo2 r afl afr <$> go_co env cw <*> go_co env c1 <*> go_co env c2 go_co !env (CoVarCo cv) = covar env cv go_co !env (HoleCo hole) = cohole env hole go_co !env (UnivCo { uco_prov = p, uco_role = r , uco_lty = t1, uco_rty = t2, uco_deps = deps }) = mkUnivCo <$> pure p <*> go_cos env deps <*> pure r <*> go_ty env t1 <*> go_ty env t2 go_co !env (SymCo co) = mkSymCo <$> go_co env co go_co !env (TransCo c1 c2) = mkTransCo <$> go_co env c1 <*> go_co env c2 go_co !env (AxiomCo r cos) = mkAxiomCo r <$> go_cos env cos go_co !env (SelCo i co) = mkSelCo i <$> go_co env co go_co !env (LRCo lr co) = mkLRCo lr <$> go_co env co go_co !env (InstCo co arg) = mkInstCo <$> go_co env co <*> go_co env arg go_co !env (KindCo co) = mkKindCo <$> go_co env co go_co !env (SubCo co) = mkSubCo <$> go_co env co go_co !env co@(TyConAppCo r tc cos) | isTcTyCon tc = do { tc' <- tycon tc ; mkTyConAppCo r tc' <$> go_cos env cos } -- Not a TcTyCon | null cos -- Avoid allocation in this very = return co -- common case (E.g. Int, LiftedRep etc) | otherwise = mkTyConAppCo r tc <$> go_cos env cos go_co !env (ForAllCo { fco_tcv = tv, fco_visL = visL, fco_visR = visR , fco_kind = kind_co, fco_body = co }) = do { kind_co' <- go_co env kind_co ; tycobinder env tv visL $ \env' tv' -> do ; co' <- go_co env' co ; return $ mkForAllCo tv' visL visR kind_co' co' } -- See Note [Efficiency for ForAllCo case of mapTyCoX] {- Note [Use explicit recursion in mapTyCo] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We use explicit recursion in `mapTyCo`, rather than calling, say, `strictFoldDVarSet`, for exactly the same reason as in Note [Use explicit recursion in foldTyCo] in GHC.Core.TyCo.Rep. We are in a monadic context, and using too-clever higher order functions makes the strictness analyser produce worse results. We could probably use `foldr`, since it is inlined bodily, fairly early; but I'm doing the simple thing and inlining it by hand. See !12037 for performance glitches caused by using `strictFoldDVarSet` (which is definitely not inlined bodily). -} {- ********************************************************************* * * TyVarTy * * ********************************************************************* -} -- | Attempts to obtain the type variable underlying a 'Type', and panics with the -- given message if this is not a type variable type. See also 'getTyVar_maybe' getTyVar :: HasDebugCallStack => Type -> TyVar getTyVar ty = case getTyVar_maybe ty of Just tv -> tv Nothing -> pprPanic "getTyVar" (ppr ty) -- | Attempts to obtain the type variable underlying a 'Type' getTyVar_maybe :: Type -> Maybe TyVar getTyVar_maybe = repGetTyVar_maybe . coreFullView -- | Attempts to obtain the type variable underlying a 'Type', without -- any expansion repGetTyVar_maybe :: Type -> Maybe TyVar repGetTyVar_maybe (TyVarTy tv) = Just tv repGetTyVar_maybe _ = Nothing isTyVarTy :: Type -> Bool isTyVarTy ty = isJust (getTyVar_maybe ty) -- | If the type is a tyvar, possibly under a cast, returns it, along -- with the coercion. Thus, the co is :: kind tv ~N kind ty getCastedTyVar_maybe :: Type -> Maybe (TyVar, CoercionN) getCastedTyVar_maybe ty = case coreFullView ty of CastTy (TyVarTy tv) co -> Just (tv, co) TyVarTy tv -> Just (tv, mkReflCo Nominal (tyVarKind tv)) _ -> Nothing {- ********************************************************************* * * AppTy * * ********************************************************************* -} {- We need to be pretty careful with AppTy to make sure we obey the invariant that a TyConApp is always visibly so. mkAppTy maintains the invariant: use it. Note [Decomposing fat arrow c=>t] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Can we unify (a b) with (Eq a => ty)? If we do so, we end up with a partial application like ((=>) (Eq a)) which doesn't make sense in source Haskell. In contrast, we *can* unify (a b) with (t1 -> t2). Here's an example (#9858) of how you might do it: i :: (Typeable a, Typeable b) => Proxy (a b) -> TypeRep i p = typeRep p j = i (Proxy :: Proxy (Eq Int => Int)) The type (Proxy (Eq Int => Int)) is only accepted with -XImpredicativeTypes, but suppose we want that. But then in the call to 'i', we end up decomposing (Eq Int => Int), and we definitely don't want that. We are willing to split (t1 -=> t2) because the argument is still of kind Type, not Constraint. So the criterion is isVisibleFunArg. In Core there is no real reason to avoid such decomposition. But for now I've put the test in splitAppTyNoView_maybe, which applies throughout, because the other calls to splitAppTy are in GHC.Core.Unify, which is also used by the type checker (e.g. when matching type-function equations). -} -- | Applies a type to another, as in e.g. @k a@ mkAppTy :: Type -> Type -> Type -- See Note [Respecting definitional equality], invariant (EQ1). mkAppTy (CastTy fun_ty co) arg_ty | ([arg_co], res_co) <- decomposePiCos co (coercionKind co) [arg_ty] = (fun_ty `mkAppTy` (arg_ty `mkCastTy` arg_co)) `mkCastTy` res_co mkAppTy (TyConApp tc tys) ty2 = mkTyConApp tc (tys ++ [ty2]) mkAppTy ty1 ty2 = AppTy ty1 ty2 -- Note that the TyConApp could be an -- under-saturated type synonym. GHC allows that; e.g. -- type Foo k = k a -> k a -- type Id x = x -- foo :: Foo Id -> Foo Id -- -- Here Id is partially applied in the type sig for Foo, -- but once the type synonyms are expanded all is well -- -- Moreover in GHC.Tc.Types.tcInferTyApps we build up a type -- (T t1 t2 t3) one argument at a type, thus forming -- (T t1), (T t1 t2), etc mkAppTys :: Type -> [Type] -> Type mkAppTys ty1 [] = ty1 mkAppTys (CastTy fun_ty co) arg_tys -- much more efficient then nested mkAppTy -- Why do this? See (EQ1) of -- Note [Respecting definitional equality] -- in GHC.Core.TyCo.Rep = foldl' AppTy ((mkAppTys fun_ty casted_arg_tys) `mkCastTy` res_co) leftovers where (arg_cos, res_co) = decomposePiCos co (coercionKind co) arg_tys (args_to_cast, leftovers) = splitAtList arg_cos arg_tys casted_arg_tys = zipWith mkCastTy args_to_cast arg_cos mkAppTys (TyConApp tc tys1) tys2 = mkTyConApp tc (tys1 ++ tys2) mkAppTys ty1 tys2 = foldl' AppTy ty1 tys2 ------------- splitAppTy_maybe :: Type -> Maybe (Type, Type) -- ^ Attempt to take a type application apart, whether it is a -- function, type constructor, or plain type application. Note -- that type family applications are NEVER unsaturated by this! splitAppTy_maybe = splitAppTyNoView_maybe . coreFullView splitAppTy :: Type -> (Type, Type) -- ^ Attempts to take a type application apart, as in 'splitAppTy_maybe', -- and panics if this is not possible splitAppTy ty = splitAppTy_maybe ty `orElse` pprPanic "splitAppTy" (ppr ty) ------------- splitAppTyNoView_maybe :: HasDebugCallStack => Type -> Maybe (Type,Type) -- ^ Does the AppTy split as in 'splitAppTy_maybe', but assumes that -- any coreView stuff is already done splitAppTyNoView_maybe (AppTy ty1 ty2) = Just (ty1, ty2) splitAppTyNoView_maybe (FunTy af w ty1 ty2) | Just (tc, tys) <- funTyConAppTy_maybe af w ty1 ty2 , Just (tys', ty') <- snocView tys = Just (TyConApp tc tys', ty') splitAppTyNoView_maybe (TyConApp tc tys) | not (tyConMustBeSaturated tc) || tys `lengthExceeds` tyConArity tc , Just (tys', ty') <- snocView tys = Just (TyConApp tc tys', ty') -- Never create unsaturated type family apps! splitAppTyNoView_maybe _other = Nothing tcSplitAppTyNoView_maybe :: Type -> Maybe (Type,Type) -- ^ Just like splitAppTyNoView_maybe, but does not split (c => t) -- See Note [Decomposing fat arrow c=>t] tcSplitAppTyNoView_maybe ty | FunTy { ft_af = af } <- ty , not (isVisibleFunArg af) -- See Note [Decomposing fat arrow c=>t] = Nothing | otherwise = splitAppTyNoView_maybe ty ------------- splitAppTys :: HasDebugCallStack => Type -> (Type, [Type]) -- ^ Recursively splits a type as far as is possible, leaving a residual -- type being applied to and the type arguments applied to it. Never fails, -- even if that means returning an empty list of type applications. splitAppTys ty = split ty ty [] where split orig_ty ty args | Just ty' <- coreView ty = split orig_ty ty' args split _ (AppTy ty arg) args = split ty ty (arg:args) split _ (TyConApp tc tc_args) args = let -- keep type families saturated n | tyConMustBeSaturated tc = tyConArity tc | otherwise = 0 (tc_args1, tc_args2) = splitAt n tc_args in (TyConApp tc tc_args1, tc_args2 ++ args) split _ (FunTy af w ty1 ty2) args | Just (tc,tys) <- funTyConAppTy_maybe af w ty1 ty2 = assert (null args ) (TyConApp tc [], tys) split orig_ty _ args = (orig_ty, args) -- | Like 'splitAppTys', but doesn't look through type synonyms splitAppTysNoView :: HasDebugCallStack => Type -> (Type, [Type]) splitAppTysNoView ty = split ty [] where split (AppTy ty arg) args = split ty (arg:args) split (TyConApp tc tc_args) args = let n | tyConMustBeSaturated tc = tyConArity tc | otherwise = 0 (tc_args1, tc_args2) = splitAt n tc_args in (TyConApp tc tc_args1, tc_args2 ++ args) split (FunTy af w ty1 ty2) args | Just (tc, tys) <- funTyConAppTy_maybe af w ty1 ty2 = assert (null args ) (TyConApp tc [], tys) split ty args = (ty, args) {- ********************************************************************* * * LitTy * * ********************************************************************* -} mkNumLitTy :: Integer -> Type mkNumLitTy n = LitTy (NumTyLit n) -- | Is this a numeric literal. We also look through type synonyms. isNumLitTy :: Type -> Maybe Integer isNumLitTy ty | LitTy (NumTyLit n) <- coreFullView ty = Just n | otherwise = Nothing mkStrLitTy :: FastString -> Type mkStrLitTy s = LitTy (StrTyLit s) -- | Is this a symbol literal. We also look through type synonyms. isStrLitTy :: Type -> Maybe FastString isStrLitTy ty | LitTy (StrTyLit s) <- coreFullView ty = Just s | otherwise = Nothing mkCharLitTy :: Char -> Type mkCharLitTy c = LitTy (CharTyLit c) -- | Is this a char literal? We also look through type synonyms. isCharLitTy :: Type -> Maybe Char isCharLitTy ty | LitTy (CharTyLit s) <- coreFullView ty = Just s | otherwise = Nothing -- | Is this a type literal (symbol, numeric, or char)? isLitTy :: Type -> Maybe TyLit isLitTy ty | LitTy l <- coreFullView ty = Just l | otherwise = Nothing -- | A type of kind 'ErrorMessage' (from the 'GHC.TypeError' module). type ErrorMsgType = Type -- | Is this type a custom user error? -- If so, give us the error message. userTypeError_maybe :: Type -> Maybe ErrorMsgType userTypeError_maybe ty | Just ty' <- coreView ty = userTypeError_maybe ty' userTypeError_maybe (TyConApp tc (_kind : msg : _)) | tyConName tc == errorMessageTypeErrorFamName -- There may be more than 2 arguments, if the type error is -- used as a type constructor (e.g. at kind `Type -> Type`). = Just msg userTypeError_maybe _ = Nothing deepUserTypeError_maybe :: Type -> Maybe ErrorMsgType -- Look for custom user error, deeply inside the type deepUserTypeError_maybe ty | Just ty' <- coreView ty = userTypeError_maybe ty' deepUserTypeError_maybe (TyConApp tc tys) | tyConName tc == errorMessageTypeErrorFamName , _kind : msg : _ <- tys -- There may be more than 2 arguments, if the type error is -- used as a type constructor (e.g. at kind `Type -> Type`). = Just msg | tyConMustBeSaturated tc -- Don't go looking for user type errors -- inside type family arguments (see #20241). = foldr (firstJust . deepUserTypeError_maybe) Nothing (drop (tyConArity tc) tys) | otherwise = foldr (firstJust . deepUserTypeError_maybe) Nothing tys deepUserTypeError_maybe (ForAllTy _ ty) = deepUserTypeError_maybe ty deepUserTypeError_maybe (FunTy { ft_arg = arg, ft_res = res }) = deepUserTypeError_maybe arg `firstJust` deepUserTypeError_maybe res deepUserTypeError_maybe (AppTy t1 t2) = deepUserTypeError_maybe t1 `firstJust` deepUserTypeError_maybe t2 deepUserTypeError_maybe (CastTy ty _) = deepUserTypeError_maybe ty deepUserTypeError_maybe _ -- TyVarTy, CoercionTy, LitTy = Nothing -- | Render a type corresponding to a user type error into a SDoc. pprUserTypeErrorTy :: ErrorMsgType -> SDoc pprUserTypeErrorTy ty = case splitTyConApp_maybe ty of -- Text "Something" Just (tc,[txt]) | tyConName tc == typeErrorTextDataConName , Just str <- isStrLitTy txt -> ftext str -- ShowType t Just (tc,[_k,t]) | tyConName tc == typeErrorShowTypeDataConName -> ppr t -- t1 :<>: t2 Just (tc,[t1,t2]) | tyConName tc == typeErrorAppendDataConName -> pprUserTypeErrorTy t1 <> pprUserTypeErrorTy t2 -- t1 :$$: t2 Just (tc,[t1,t2]) | tyConName tc == typeErrorVAppendDataConName -> pprUserTypeErrorTy t1 $$ pprUserTypeErrorTy t2 -- An unevaluated type function _ -> ppr ty {- ********************************************************************* * * FunTy * * ********************************************************************* -} {- Note [Representation of function types] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Functions (e.g. Int -> Char) can be thought of as being applications of funTyCon (known in Haskell surface syntax as (->)), (note that `RuntimeRep' quantifiers are left inferred) (->) :: forall {r1 :: RuntimeRep} {r2 :: RuntimeRep} (a :: TYPE r1) (b :: TYPE r2). a -> b -> Type However, for efficiency's sake we represent saturated applications of (->) with FunTy. For instance, the type, (->) r1 r2 a b is equivalent to, FunTy (Anon a) b Note how the RuntimeReps are implied in the FunTy representation. For this reason we must be careful when reconstructing the TyConApp representation (see, for instance, splitTyConApp_maybe). In the compiler we maintain the invariant that all saturated applications of (->) are represented with FunTy. See #11714. -} ----------------------------------------------- funTyConAppTy_maybe :: FunTyFlag -> Type -> Type -> Type -> Maybe (TyCon, [Type]) -- ^ Given the components of a FunTy -- figure out the corresponding TyConApp. funTyConAppTy_maybe af mult arg res | Just arg_rep <- getRuntimeRep_maybe arg , Just res_rep <- getRuntimeRep_maybe res -- If you're changing the lines below, you'll probably want to adapt the -- `fUNTyCon` case of GHC.Core.Unify.unify_ty correspondingly. , let args | isFUNArg af = [mult, arg_rep, res_rep, arg, res] | otherwise = [ arg_rep, res_rep, arg, res] = Just $ (funTyFlagTyCon af, args) | otherwise = Nothing tyConAppFunTy_maybe :: HasDebugCallStack => TyCon -> [Type] -> Maybe Type -- ^ Return Just if this TyConApp should be represented as a FunTy tyConAppFunTy_maybe tc tys | Just (af, mult, arg, res) <- ty_con_app_fun_maybe manyDataConTy tc tys = Just (FunTy { ft_af = af, ft_mult = mult, ft_arg = arg, ft_res = res }) | otherwise = Nothing tyConAppFunCo_maybe :: HasDebugCallStack => Role -> TyCon -> [Coercion] -> Maybe Coercion -- ^ Return Just if this TyConAppCo should be represented as a FunCo tyConAppFunCo_maybe r tc cos | Just (af, mult, arg, res) <- ty_con_app_fun_maybe mult_refl tc cos = Just (mkFunCo r af mult arg res) | otherwise = Nothing where mult_refl = mkReflCo (funRole r SelMult) manyDataConTy ty_con_app_fun_maybe :: (HasDebugCallStack, Outputable a) => a -> TyCon -> [a] -> Maybe (FunTyFlag, a, a, a) {-# INLINE ty_con_app_fun_maybe #-} -- Specialise this function for its two call sites ty_con_app_fun_maybe many_ty_co tc args | tc_uniq == fUNTyConKey = fUN_case | tc_uniq == tcArrowTyConKey = non_FUN_case FTF_T_C | tc_uniq == ctArrowTyConKey = non_FUN_case FTF_C_T | tc_uniq == ccArrowTyConKey = non_FUN_case FTF_C_C | otherwise = Nothing where tc_uniq = tyConUnique tc fUN_case | (w:_r1:_r2:a1:a2:rest) <- args = assertPpr (null rest) (ppr tc <+> ppr args) $ Just (FTF_T_T, w, a1, a2) | otherwise = Nothing non_FUN_case ftf | (_r1:_r2:a1:a2:rest) <- args = assertPpr (null rest) (ppr tc <+> ppr args) $ Just (ftf, many_ty_co, a1, a2) | otherwise = Nothing mkFunctionType :: HasDebugCallStack => Mult -> Type -> Type -> Type -- ^ This one works out the FunTyFlag from the argument type -- See GHC.Types.Var Note [FunTyFlag] mkFunctionType mult arg_ty res_ty = FunTy { ft_af = af, ft_arg = arg_ty, ft_res = res_ty , ft_mult = assertPpr mult_ok (ppr [mult, arg_ty, res_ty]) $ mult } where af = chooseFunTyFlag arg_ty res_ty mult_ok = isVisibleFunArg af || isManyTy mult mkScaledFunctionTys :: [Scaled Type] -> Type -> Type -- ^ Like mkFunctionType, compute the FunTyFlag from the arguments mkScaledFunctionTys arg_tys res_ty = foldr mk res_ty arg_tys where mk (Scaled mult arg_ty) res_ty = mkFunTy (chooseFunTyFlag arg_ty res_ty) mult arg_ty res_ty chooseFunTyFlag :: HasDebugCallStack => Type -> Type -> FunTyFlag -- ^ See GHC.Types.Var Note [FunTyFlag] chooseFunTyFlag arg_ty res_ty = mkFunTyFlag (typeTypeOrConstraint arg_ty) (typeTypeOrConstraint res_ty) splitFunTy :: Type -> (Mult, Type, Type) -- ^ Attempts to extract the multiplicity, argument and result types from a type, -- and panics if that is not possible. See also 'splitFunTy_maybe' splitFunTy ty = case splitFunTy_maybe ty of Just (_af, mult, arg, res) -> (mult,arg,res) Nothing -> pprPanic "splitFunTy" (ppr ty) {-# INLINE splitFunTy_maybe #-} splitFunTy_maybe :: Type -> Maybe (FunTyFlag, Mult, Type, Type) -- ^ Attempts to extract the multiplicity, argument and result types from a type splitFunTy_maybe ty | FunTy af w arg res <- coreFullView ty = Just (af, w, arg, res) | otherwise = Nothing splitFunTys :: Type -> ([Scaled Type], Type) splitFunTys ty = split [] ty ty where -- common case first split args _ (FunTy _ w arg res) = split (Scaled w arg : args) res res split args orig_ty ty | Just ty' <- coreView ty = split args orig_ty ty' split args orig_ty _ = (reverse args, orig_ty) funResultTy :: HasDebugCallStack => Type -> Type -- ^ Extract the function result type and panic if that is not possible funResultTy ty | FunTy { ft_res = res } <- coreFullView ty = res | otherwise = pprPanic "funResultTy" (ppr ty) funArgTy :: HasDebugCallStack => Type -> Type -- ^ Extract the function argument type and panic if that is not possible funArgTy ty | FunTy { ft_arg = arg } <- coreFullView ty = arg | otherwise = pprPanic "funArgTy" (ppr ty) -- ^ Just like 'piResultTys' but for a single argument -- Try not to iterate 'piResultTy', because it's inefficient to substitute -- one variable at a time; instead use 'piResultTys" piResultTy :: HasDebugCallStack => Type -> Type -> Type piResultTy ty arg = case piResultTy_maybe ty arg of Just res -> res Nothing -> pprPanic "piResultTy" (ppr ty $$ ppr arg) piResultTy_maybe :: Type -> Type -> Maybe Type -- We don't need a 'tc' version, because -- this function behaves the same for Type and Constraint piResultTy_maybe ty arg = case coreFullView ty of FunTy { ft_res = res } -> Just res ForAllTy (Bndr tv _) res -> let empty_subst = mkEmptySubst $ mkInScopeSet $ tyCoVarsOfTypes [arg,res] in Just (substTy (extendTCvSubst empty_subst tv arg) res) _ -> Nothing -- | (piResultTys f_ty [ty1, .., tyn]) gives the type of (f ty1 .. tyn) -- where f :: f_ty -- 'piResultTys' is interesting because: -- 1. 'f_ty' may have more for-alls than there are args -- 2. Less obviously, it may have fewer for-alls -- For case 2. think of: -- piResultTys (forall a.a) [forall b.b, Int] -- This really can happen, but only (I think) in situations involving -- undefined. For example: -- undefined :: forall a. a -- Term: undefined @(forall b. b->b) @Int -- This term should have type (Int -> Int), but notice that -- there are more type args than foralls in 'undefined's type. -- If you edit this function, you may need to update the GHC formalism -- See Note [GHC Formalism] in GHC.Core.Lint -- This is a heavily used function (e.g. from typeKind), -- so we pay attention to efficiency, especially in the special case -- where there are no for-alls so we are just dropping arrows from -- a function type/kind. piResultTys :: HasDebugCallStack => Type -> [Type] -> Type piResultTys ty [] = ty piResultTys ty orig_args@(arg:args) | FunTy { ft_res = res } <- ty = piResultTys res args | ForAllTy (Bndr tcv _) res <- ty = -- Both type and coercion variables go (extendTCvSubst init_subst tcv arg) res args | Just ty' <- coreView ty = piResultTys ty' orig_args | otherwise = pprPanic "piResultTys1" (ppr ty $$ ppr orig_args) where init_subst = mkEmptySubst $ mkInScopeSet (tyCoVarsOfTypes (ty:orig_args)) go :: Subst -> Type -> [Type] -> Type go subst ty [] = substTyUnchecked subst ty go subst ty all_args@(arg:args) | FunTy { ft_res = res } <- ty = go subst res args | ForAllTy (Bndr tv _) res <- ty = go (extendTCvSubst subst tv arg) res args | Just ty' <- coreView ty = go subst ty' all_args | not (isEmptyTCvSubst subst) -- See Note [Care with kind instantiation] = go init_subst (substTy subst ty) all_args | otherwise = -- We have not run out of arguments, but the function doesn't -- have the right kind to apply to them; so panic. -- Without the explicit isEmptyVarEnv test, an ill-kinded type -- would give an infinite loop, which is very unhelpful -- c.f. #15473 pprPanic "piResultTys2" (ppr ty $$ ppr orig_args $$ ppr all_args) applyTysX :: HasDebugCallStack => [TyVar] -> Type -> [Type] -> Type -- applyTysX beta-reduces (/\tvs. body_ty) arg_tys -- Assumes that (/\tvs. body_ty) is closed applyTysX tvs body_ty arg_tys = assertPpr (tvs `leLength` arg_tys) pp_stuff $ assertPpr (tyCoVarsOfType body_ty `subVarSet` mkVarSet tvs) pp_stuff $ mkAppTys (substTyWith tvs arg_tys_prefix body_ty) arg_tys_rest where pp_stuff = vcat [ppr tvs, ppr body_ty, ppr arg_tys] (arg_tys_prefix, arg_tys_rest) = splitAtList tvs arg_tys {- Note [Care with kind instantiation] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Suppose we have T :: forall k. k and we are finding the kind of T (forall b. b -> b) * Int Then T (forall b. b->b) :: k[ k :-> forall b. b->b] :: forall b. b -> b So T (forall b. b->b) * :: (b -> b)[ b :-> *] :: * -> * In other words we must instantiate the forall! Similarly (#15428) S :: forall k f. k -> f k and we are finding the kind of S * (* ->) Int Bool We have S * (* ->) :: (k -> f k)[ k :-> *, f :-> (* ->)] :: * -> * -> * So again we must instantiate. The same thing happens in GHC.CoreToIface.toIfaceAppArgsX. -} {- ********************************************************************* * * TyConApp * * ********************************************************************* -} -- splitTyConApp "looks through" synonyms, because they don't -- mean a distinct type, but all other type-constructor applications -- including functions are returned as Just .. -- | Retrieve the tycon heading this type, if there is one. Does /not/ -- look through synonyms. tyConAppTyConPicky_maybe :: Type -> Maybe TyCon tyConAppTyConPicky_maybe (TyConApp tc _) = Just tc tyConAppTyConPicky_maybe (FunTy { ft_af = af }) = Just (funTyFlagTyCon af) tyConAppTyConPicky_maybe _ = Nothing -- | The same as @fst . splitTyConApp@ -- We can short-cut the FunTy case {-# INLINE tyConAppTyCon_maybe #-} tyConAppTyCon_maybe :: Type -> Maybe TyCon tyConAppTyCon_maybe ty = case coreFullView ty of TyConApp tc _ -> Just tc FunTy { ft_af = af } -> Just (funTyFlagTyCon af) _ -> Nothing tyConAppTyCon :: HasDebugCallStack => Type -> TyCon tyConAppTyCon ty = tyConAppTyCon_maybe ty `orElse` pprPanic "tyConAppTyCon" (ppr ty) -- | The same as @snd . splitTyConApp@ tyConAppArgs_maybe :: Type -> Maybe [Type] tyConAppArgs_maybe ty = case splitTyConApp_maybe ty of Just (_, tys) -> Just tys Nothing -> Nothing tyConAppArgs :: HasDebugCallStack => Type -> [Type] tyConAppArgs ty = tyConAppArgs_maybe ty `orElse` pprPanic "tyConAppArgs" (ppr ty) -- | Attempts to tease a type apart into a type constructor and the application -- of a number of arguments to that constructor. Panics if that is not possible. -- See also 'splitTyConApp_maybe' splitTyConApp :: Type -> (TyCon, [Type]) splitTyConApp ty = splitTyConApp_maybe ty `orElse` pprPanic "splitTyConApp" (ppr ty) -- | Attempts to tease a type apart into a type constructor and the application -- of a number of arguments to that constructor splitTyConApp_maybe :: HasDebugCallStack => Type -> Maybe (TyCon, [Type]) splitTyConApp_maybe ty = splitTyConAppNoView_maybe (coreFullView ty) splitTyConAppNoView_maybe :: HasDebugCallStack => Type -> Maybe (TyCon, [Type]) -- Same as splitTyConApp_maybe but without looking through synonyms splitTyConAppNoView_maybe ty = case ty of FunTy { ft_af = af, ft_mult = w, ft_arg = arg, ft_res = res} -> funTyConAppTy_maybe af w arg res TyConApp tc tys -> Just (tc, tys) _ -> Nothing -- | tcSplitTyConApp_maybe splits a type constructor application into -- its type constructor and applied types. -- -- Differs from splitTyConApp_maybe in that it does *not* split types -- headed with (=>), as that's not a TyCon in the type-checker. -- -- Note that this may fail (in funTyConAppTy_maybe) in the case -- of a 'FunTy' with an argument of unknown kind 'FunTy' -- (e.g. `FunTy (a :: k) Int`, since the kind of @a@ isn't of -- the form `TYPE rep`. This isn't usually a problem but may -- be temporarily the case during canonicalization: -- see Note [Decomposing FunTy] in GHC.Tc.Solver.Equality -- and Note [The Purely Kinded Type Invariant (PKTI)] in GHC.Tc.Gen.HsType, -- Wrinkle around FunTy -- -- Consequently, you may need to zonk your type before -- using this function. tcSplitTyConApp_maybe :: HasDebugCallStack => Type -> Maybe (TyCon, [Type]) -- Defined here to avoid module loops between Unify and TcType. tcSplitTyConApp_maybe ty = case coreFullView ty of FunTy { ft_af = af, ft_mult = w, ft_arg = arg, ft_res = res} | isVisibleFunArg af -- Visible args only -- See Note [Decomposing fat arrow c=>t] -> funTyConAppTy_maybe af w arg res TyConApp tc tys -> Just (tc, tys) _ -> Nothing tcSplitTyConApp :: Type -> (TyCon, [Type]) tcSplitTyConApp ty = tcSplitTyConApp_maybe ty `orElse` pprPanic "tcSplitTyConApp" (ppr ty) --------------------------- newTyConInstRhs :: TyCon -> [Type] -> Type -- ^ Unwrap one 'layer' of newtype on a type constructor and its -- arguments, using an eta-reduced version of the @newtype@ if possible. -- This requires tys to have at least @newTyConInstArity tycon@ elements. newTyConInstRhs tycon tys = assertPpr (tvs `leLength` tys) (ppr tycon $$ ppr tys $$ ppr tvs) $ applyTysX tvs rhs tys where (tvs, rhs) = newTyConEtadRhs tycon {- ********************************************************************* * * CastTy * * ********************************************************************* -} splitCastTy_maybe :: Type -> Maybe (Type, Coercion) splitCastTy_maybe ty | CastTy ty' co <- coreFullView ty = Just (ty', co) | otherwise = Nothing -- | Make a 'CastTy'. The Coercion must be nominal. Checks the -- Coercion for reflexivity, dropping it if it's reflexive. -- See @Note [Respecting definitional equality]@ in "GHC.Core.TyCo.Rep" mkCastTy :: Type -> Coercion -> Type mkCastTy orig_ty co | isReflexiveCo co = orig_ty -- (EQ2) from the Note -- NB: Do the slow check here. This is important to keep the splitXXX -- functions working properly. Otherwise, we may end up with something -- like (((->) |> something_reflexive_but_not_obviously_so) biz baz) -- fails under splitFunTy_maybe. This happened with the cheaper check -- in test dependent/should_compile/dynamic-paper. mkCastTy orig_ty co = mk_cast_ty orig_ty co -- | Like 'mkCastTy', but avoids checking the coercion for reflexivity, -- as that can be expensive. mk_cast_ty :: Type -> Coercion -> Type mk_cast_ty orig_ty co = go orig_ty where go :: Type -> Type -- See Note [Using coreView in mk_cast_ty] go ty | Just ty' <- coreView ty = go ty' go (CastTy ty co1) -- (EQ3) from the Note = mkCastTy ty (co1 `mkTransCo` co) -- call mkCastTy again for the reflexivity check go (ForAllTy (Bndr tv vis) inner_ty) -- (EQ4) from the Note -- See Note [Weird typing rule for ForAllTy] in GHC.Core.TyCo.Rep. | isTyVar tv , let fvs = tyCoVarsOfCo co = -- have to make sure that pushing the co in doesn't capture the bound var! if tv `elemVarSet` fvs then let empty_subst = mkEmptySubst (mkInScopeSet fvs) (subst, tv') = substVarBndr empty_subst tv in ForAllTy (Bndr tv' vis) (substTy subst inner_ty `mk_cast_ty` co) else ForAllTy (Bndr tv vis) (inner_ty `mk_cast_ty` co) go _ = CastTy orig_ty co -- NB: orig_ty: preserve synonyms if possible {- Note [Using coreView in mk_cast_ty] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Invariants (EQ3) and (EQ4) of Note [Respecting definitional equality] in GHC.Core.TyCo.Rep must apply regardless of type synonyms. For instance, consider this example (#19742): type EqSameNat = () |> co useNatEq :: EqSameNat |> sym co (Those casts aren't visible in the user-source code, of course; see #19742 for what the user might write.) The type `EqSameNat |> sym co` looks as if it satisfies (EQ3), as it has no nested casts, but if we expand EqSameNat, we see that it doesn't. And then Bad Things happen. The solution is easy: just use `coreView` when establishing (EQ3) and (EQ4) in `mk_cast_ty`. -} {- ********************************************************************* * * CoercionTy CoercionTy allows us to inject coercions into types. A CoercionTy should appear only in the right-hand side of an application. * * ********************************************************************* -} mkCoercionTy :: Coercion -> Type mkCoercionTy = CoercionTy isCoercionTy :: Type -> Bool isCoercionTy (CoercionTy _) = True isCoercionTy _ = False isCoercionTy_maybe :: Type -> Maybe Coercion isCoercionTy_maybe (CoercionTy co) = Just co isCoercionTy_maybe _ = Nothing stripCoercionTy :: Type -> Coercion stripCoercionTy (CoercionTy co) = co stripCoercionTy ty = pprPanic "stripCoercionTy" (ppr ty) {- ********************************************************************* * * ForAllTy * * ********************************************************************* -} tyConBindersPiTyBinders :: [TyConBinder] -> [PiTyBinder] -- Return the tyConBinders in PiTyBinder form tyConBindersPiTyBinders = map to_tyb where to_tyb (Bndr tv (NamedTCB vis)) = Named (Bndr tv vis) to_tyb (Bndr tv AnonTCB) = Anon (tymult (varType tv)) FTF_T_T -- | Make a dependent forall over a TyCoVar mkTyCoForAllTy :: TyCoVar -> ForAllTyFlag -> Type -> Type mkTyCoForAllTy tv vis ty | isCoVar tv , not (tv `elemVarSet` tyCoVarsOfType ty) -- Maintain ForAllTy's invariants -- See Note [Unused coercion variable in ForAllTy] in GHC.Core.TyCo.Rep = mkVisFunTyMany (varType tv) ty | otherwise = ForAllTy (mkForAllTyBinder vis tv) ty -- | Make a dependent forall over a TyCoVar mkTyCoForAllTys :: [ForAllTyBinder] -> Type -> Type mkTyCoForAllTys bndrs ty = foldr (\(Bndr var vis) -> mkTyCoForAllTy var vis) ty bndrs -- | Make a dependent forall over an 'Inferred' variable mkTyCoInvForAllTy :: TyCoVar -> Type -> Type mkTyCoInvForAllTy tv ty = mkTyCoForAllTy tv Inferred ty -- | Like 'mkTyCoInvForAllTy', but tv should be a tyvar mkInfForAllTy :: TyVar -> Type -> Type mkInfForAllTy tv ty = assert (isTyVar tv ) ForAllTy (Bndr tv Inferred) ty -- | Like 'mkForAllTys', but assumes all variables are dependent and -- 'Inferred', a common case mkTyCoInvForAllTys :: [TyCoVar] -> Type -> Type mkTyCoInvForAllTys tvs ty = foldr mkTyCoInvForAllTy ty tvs -- | Like 'mkTyCoInvForAllTys', but tvs should be a list of tyvar mkInfForAllTys :: [TyVar] -> Type -> Type mkInfForAllTys tvs ty = foldr mkInfForAllTy ty tvs -- | Like 'mkForAllTy', but assumes the variable is dependent and 'Specified', -- a common case mkSpecForAllTy :: TyVar -> Type -> Type mkSpecForAllTy tv ty = assert (isTyVar tv ) -- covar is always Inferred, so input should be tyvar ForAllTy (Bndr tv Specified) ty -- | Like 'mkForAllTys', but assumes all variables are dependent and -- 'Specified', a common case mkSpecForAllTys :: [TyVar] -> Type -> Type mkSpecForAllTys tvs ty = foldr mkSpecForAllTy ty tvs -- | Like mkForAllTys, but assumes all variables are dependent and visible mkVisForAllTys :: [TyVar] -> Type -> Type mkVisForAllTys tvs = assert (all isTyVar tvs ) -- covar is always Inferred, so all inputs should be tyvar mkForAllTys [ Bndr tv Required | tv <- tvs ] -- | Given a list of type-level vars and the free vars of a result kind, -- makes PiTyBinders, preferring anonymous binders -- if the variable is, in fact, not dependent. -- e.g. mkTyConBindersPreferAnon [(k:*),(b:k),(c:k)] (k->k) -- We want (k:*) Named, (b:k) Anon, (c:k) Anon -- -- All non-coercion binders are /visible/. mkTyConBindersPreferAnon :: [TyVar] -- ^ binders -> TyCoVarSet -- ^ free variables of result -> [TyConBinder] mkTyConBindersPreferAnon vars inner_tkvs = assert (all isTyVar vars) fst (go vars) where go :: [TyVar] -> ([TyConBinder], VarSet) -- also returns the free vars go [] = ([], inner_tkvs) go (v:vs) | v `elemVarSet` fvs = ( Bndr v (NamedTCB Required) : binders , fvs `delVarSet` v `unionVarSet` kind_vars ) | otherwise = ( Bndr v AnonTCB : binders , fvs `unionVarSet` kind_vars ) where (binders, fvs) = go vs kind_vars = tyCoVarsOfType $ tyVarKind v -- | Take a ForAllTy apart, returning the binders and result type splitForAllForAllTyBinders :: Type -> ([ForAllTyBinder], Type) splitForAllForAllTyBinders ty = split ty ty [] where split _ (ForAllTy b res) bs = split res res (b:bs) split orig_ty ty bs | Just ty' <- coreView ty = split orig_ty ty' bs split orig_ty _ bs = (reverse bs, orig_ty) {-# INLINE splitForAllForAllTyBinders #-} -- | Take a ForAllTy apart, returning the list of tycovars and the result type. -- This always succeeds, even if it returns only an empty list. Note that the -- result type returned may have free variables that were bound by a forall. splitForAllTyCoVars :: Type -> ([TyCoVar], Type) splitForAllTyCoVars ty = split ty ty [] where split _ (ForAllTy (Bndr tv _) ty) tvs = split ty ty (tv:tvs) split orig_ty ty tvs | Just ty' <- coreView ty = split orig_ty ty' tvs split orig_ty _ tvs = (reverse tvs, orig_ty) -- | Like 'splitForAllTyCoVars', but split only for tyvars. -- This always succeeds, even if it returns only an empty list. Note that the -- result type returned may have free variables that were bound by a forall. splitForAllTyVars :: Type -> ([TyVar], Type) splitForAllTyVars ty = split ty ty [] where split _ (ForAllTy (Bndr tv _) ty) tvs | isTyVar tv = split ty ty (tv:tvs) split orig_ty ty tvs | Just ty' <- coreView ty = split orig_ty ty' tvs split orig_ty _ tvs = (reverse tvs, orig_ty) -- | Like 'splitForAllTyCoVars', but only splits 'ForAllTy's with 'Required' type -- variable binders. Furthermore, each returned tyvar is annotated with '()'. splitForAllReqTyBinders :: Type -> ([ReqTyBinder], Type) splitForAllReqTyBinders ty = split ty ty [] where split _ (ForAllTy (Bndr tv Required) ty) tvs = split ty ty (Bndr tv ():tvs) split orig_ty ty tvs | Just ty' <- coreView ty = split orig_ty ty' tvs split orig_ty _ tvs = (reverse tvs, orig_ty) -- | Like 'splitForAllTyCoVars', but only splits 'ForAllTy's with 'Invisible' type -- variable binders. Furthermore, each returned tyvar is annotated with its -- 'Specificity'. splitForAllInvisTyBinders :: Type -> ([InvisTyBinder], Type) splitForAllInvisTyBinders ty = split ty ty [] where split _ (ForAllTy (Bndr tv (Invisible spec)) ty) tvs = split ty ty (Bndr tv spec:tvs) split orig_ty ty tvs | Just ty' <- coreView ty = split orig_ty ty' tvs split orig_ty _ tvs = (reverse tvs, orig_ty) -- | Checks whether this is a proper forall (with a named binder) isForAllTy :: Type -> Bool isForAllTy ty | ForAllTy {} <- coreFullView ty = True | otherwise = False -- | Like `isForAllTy`, but returns True only if it is a tyvar binder isForAllTy_ty :: Type -> Bool isForAllTy_ty ty | ForAllTy (Bndr tv _) _ <- coreFullView ty , isTyVar tv = True | otherwise = False -- | Like `isForAllTy`, but returns True only if it is an inferred tyvar binder isForAllTy_invis_ty :: Type -> Bool isForAllTy_invis_ty ty | ForAllTy (Bndr tv (Invisible InferredSpec)) _ <- coreFullView ty , isTyVar tv = True | otherwise = False -- | Like `isForAllTy`, but returns True only if it is a covar binder isForAllTy_co :: Type -> Bool isForAllTy_co ty | ForAllTy (Bndr tv _) _ <- coreFullView ty , isCoVar tv = True | otherwise = False -- | Is this a function or forall? isPiTy :: Type -> Bool isPiTy ty = case coreFullView ty of ForAllTy {} -> True FunTy {} -> True _ -> False -- | Is this a function? -- Note: `forall {b}. Show b => b -> IO b` will not be considered a function by this function. -- It would merely be a forall wrapping a function type. isFunTy :: Type -> Bool isFunTy ty | FunTy {} <- coreFullView ty = True | otherwise = False -- | Take a forall type apart, or panics if that is not possible. splitForAllTyCoVar :: Type -> (TyCoVar, Type) splitForAllTyCoVar ty | Just answer <- splitForAllTyCoVar_maybe ty = answer | otherwise = pprPanic "splitForAllTyCoVar" (ppr ty) -- | Drops all ForAllTys dropForAlls :: Type -> Type dropForAlls ty = go ty where go (ForAllTy _ res) = go res go ty | Just ty' <- coreView ty = go ty' go res = res -- | Attempts to take a ForAllTy apart, returning the full ForAllTyBinder splitForAllForAllTyBinder_maybe :: Type -> Maybe (ForAllTyBinder, Type) splitForAllForAllTyBinder_maybe ty | ForAllTy bndr inner_ty <- coreFullView ty = Just (bndr, inner_ty) | otherwise = Nothing -- | Attempts to take a ForAllTy apart, returning the Var splitForAllTyCoVar_maybe :: Type -> Maybe (TyCoVar, Type) splitForAllTyCoVar_maybe ty | ForAllTy (Bndr tv _) inner_ty <- coreFullView ty = Just (tv, inner_ty) | otherwise = Nothing -- | Attempts to take a ForAllTy apart, but only if the binder is a TyVar splitForAllTyVar_maybe :: Type -> Maybe (TyVar, Type) splitForAllTyVar_maybe ty | ForAllTy (Bndr tv _) inner_ty <- coreFullView ty , isTyVar tv = Just (tv, inner_ty) | otherwise = Nothing -- | Like 'splitForAllTyCoVar_maybe', but only returns Just if it is a covar binder. splitForAllCoVar_maybe :: Type -> Maybe (CoVar, Type) splitForAllCoVar_maybe ty | ForAllTy (Bndr tv _) inner_ty <- coreFullView ty , isCoVar tv = Just (tv, inner_ty) | otherwise = Nothing -- | Attempts to take a forall type apart; works with proper foralls and -- functions {-# INLINE splitPiTy_maybe #-} -- callers will immediately deconstruct splitPiTy_maybe :: Type -> Maybe (PiTyBinder, Type) splitPiTy_maybe ty = case coreFullView ty of ForAllTy bndr ty -> Just (Named bndr, ty) FunTy { ft_af = af, ft_mult = w, ft_arg = arg, ft_res = res} -> Just (Anon (mkScaled w arg) af, res) _ -> Nothing -- | Takes a forall type apart, or panics splitPiTy :: Type -> (PiTyBinder, Type) splitPiTy ty | Just answer <- splitPiTy_maybe ty = answer | otherwise = pprPanic "splitPiTy" (ppr ty) -- | Split off all PiTyBinders to a type, splitting both proper foralls -- and functions splitPiTys :: Type -> ([PiTyBinder], Type) splitPiTys ty = split ty ty [] where split _ (ForAllTy b res) bs = split res res (Named b : bs) split _ (FunTy { ft_af = af, ft_mult = w, ft_arg = arg, ft_res = res }) bs = split res res (Anon (Scaled w arg) af : bs) split orig_ty ty bs | Just ty' <- coreView ty = split orig_ty ty' bs split orig_ty _ bs = (reverse bs, orig_ty) -- | Extracts a list of run-time arguments from a function type, -- looking through newtypes to the right of arrows. -- -- Examples: -- -- @ -- newtype Identity a = I a -- -- getRuntimeArgTys (Int -> Bool -> Double) == [(Int, FTF_T_T), (Bool, FTF_T_T)] -- getRuntimeArgTys (Identity Int -> Bool -> Double) == [(Identity Int, FTF_T_T), (Bool, FTF_T_T)] -- getRuntimeArgTys (Int -> Identity (Bool -> Identity Double)) == [(Int, FTF_T_T), (Bool, FTF_T_T)] -- getRuntimeArgTys (forall a. Show a => Identity a -> a -> Int -> Bool) -- == [(Show a, FTF_C_T), (Identity a, FTF_T_T),(a, FTF_T_T),(Int, FTF_T_T)] -- @ -- -- Note that, in the last case, the returned types might mention an out-of-scope -- type variable. This function is used only when we really care about the /kinds/ -- of the returned types, so this is OK. -- -- **Warning**: this function can return an infinite list. For example: -- -- @ -- newtype N a = MkN (a -> N a) -- getRuntimeArgTys (N a) == repeat (a, FTF_T_T) -- @ getRuntimeArgTys :: Type -> [(Scaled Type, FunTyFlag)] getRuntimeArgTys = go where go :: Type -> [(Scaled Type, FunTyFlag)] go (ForAllTy _ res) = go res go (FunTy { ft_mult = w, ft_arg = arg, ft_res = res, ft_af = af }) = (Scaled w arg, af) : go res go ty | Just ty' <- coreView ty = go ty' | Just (_,ty') <- topNormaliseNewType_maybe ty = go ty' | otherwise = [] invisibleTyBndrCount :: Type -> Int -- Returns the number of leading invisible forall'd binders in the type -- Includes invisible predicate arguments; e.g. for -- e.g. forall {k}. (k ~ *) => k -> k -- returns 2 not 1 invisibleTyBndrCount ty = length (fst (splitInvisPiTys ty)) -- | Like 'splitPiTys', but returns only *invisible* binders, including constraints. -- Stops at the first visible binder. splitInvisPiTys :: Type -> ([PiTyBinder], Type) splitInvisPiTys ty = split ty ty [] where split _ (ForAllTy b res) bs | Bndr _ vis <- b , isInvisibleForAllTyFlag vis = split res res (Named b : bs) split _ (FunTy { ft_af = af, ft_mult = mult, ft_arg = arg, ft_res = res }) bs | isInvisibleFunArg af = split res res (Anon (mkScaled mult arg) af : bs) split orig_ty ty bs | Just ty' <- coreView ty = split orig_ty ty' bs split orig_ty _ bs = (reverse bs, orig_ty) splitInvisPiTysN :: Int -> Type -> ([PiTyBinder], Type) -- ^ Same as 'splitInvisPiTys', but stop when -- - you have found @n@ 'PiTyBinder's, -- - or you run out of invisible binders splitInvisPiTysN n ty = split n ty ty [] where split n orig_ty ty bs | n == 0 = (reverse bs, orig_ty) | Just ty' <- coreView ty = split n orig_ty ty' bs | ForAllTy b res <- ty , Bndr _ vis <- b , isInvisibleForAllTyFlag vis = split (n-1) res res (Named b : bs) | FunTy { ft_af = af, ft_mult = mult, ft_arg = arg, ft_res = res } <- ty , isInvisibleFunArg af = split (n-1) res res (Anon (Scaled mult arg) af : bs) | otherwise = (reverse bs, orig_ty) -- | Given a 'TyCon' and a list of argument types, filter out any invisible -- (i.e., 'Inferred' or 'Specified') arguments. filterOutInvisibleTypes :: TyCon -> [Type] -> [Type] filterOutInvisibleTypes tc tys = snd $ partitionInvisibleTypes tc tys -- | Given a 'TyCon' and a list of argument types, filter out any 'Inferred' -- arguments. filterOutInferredTypes :: TyCon -> [Type] -> [Type] filterOutInferredTypes tc tys = filterByList (map (/= Inferred) $ tyConForAllTyFlags tc tys) tys -- | Given a 'TyCon' and a list of argument types, partition the arguments -- into: -- -- 1. 'Inferred' or 'Specified' (i.e., invisible) arguments and -- -- 2. 'Required' (i.e., visible) arguments partitionInvisibleTypes :: TyCon -> [Type] -> ([Type], [Type]) partitionInvisibleTypes tc tys = partitionByList (map isInvisibleForAllTyFlag $ tyConForAllTyFlags tc tys) tys -- | Given a list of things paired with their visibilities, partition the -- things into (invisible things, visible things). partitionInvisibles :: [(a, ForAllTyFlag)] -> ([a], [a]) partitionInvisibles = partitionWith pick_invis where pick_invis :: (a, ForAllTyFlag) -> Either a a pick_invis (thing, vis) | isInvisibleForAllTyFlag vis = Left thing | otherwise = Right thing -- | Given a 'TyCon' and a list of argument types to which the 'TyCon' is -- applied, determine each argument's visibility -- ('Inferred', 'Specified', or 'Required'). -- -- Wrinkle: consider the following scenario: -- -- > T :: forall k. k -> k -- > tyConForAllTyFlags T [forall m. m -> m -> m, S, R, Q] -- -- After substituting, we get -- -- > T (forall m. m -> m -> m) :: (forall m. m -> m -> m) -> forall n. n -> n -> n -- -- Thus, the first argument is invisible, @S@ is visible, @R@ is invisible again, -- and @Q@ is visible. tyConForAllTyFlags :: TyCon -> [Type] -> [ForAllTyFlag] tyConForAllTyFlags tc = fun_kind_arg_flags (tyConKind tc) -- | Given a 'Type' and a list of argument types to which the 'Type' is -- applied, determine each argument's visibility -- ('Inferred', 'Specified', or 'Required'). -- -- Most of the time, the arguments will be 'Required', but not always. Consider -- @f :: forall a. a -> Type@. In @f Type Bool@, the first argument (@Type@) is -- 'Specified' and the second argument (@Bool@) is 'Required'. It is precisely -- this sort of higher-rank situation in which 'appTyForAllTyFlags' comes in handy, -- since @f Type Bool@ would be represented in Core using 'AppTy's. -- (See also #15792). appTyForAllTyFlags :: Type -> [Type] -> [ForAllTyFlag] appTyForAllTyFlags ty = fun_kind_arg_flags (typeKind ty) -- | Given a function kind and a list of argument types (where each argument's -- kind aligns with the corresponding position in the argument kind), determine -- each argument's visibility ('Inferred', 'Specified', or 'Required'). fun_kind_arg_flags :: Kind -> [Type] -> [ForAllTyFlag] fun_kind_arg_flags = go emptySubst where go subst ki arg_tys | Just ki' <- coreView ki = go subst ki' arg_tys go _ _ [] = [] go subst (ForAllTy (Bndr tv argf) res_ki) (arg_ty:arg_tys) = argf : go subst' res_ki arg_tys where subst' = extendTvSubst subst tv arg_ty go subst (TyVarTy tv) arg_tys | Just ki <- lookupTyVar subst tv = go subst ki arg_tys -- This FunTy case is important to handle kinds with nested foralls, such -- as this kind (inspired by #16518): -- -- forall {k1} k2. k1 -> k2 -> forall k3. k3 -> Type -- -- Here, we want to get the following ForAllTyFlags: -- -- [Inferred, Specified, Required, Required, Specified, Required] -- forall {k1}. forall k2. k1 -> k2 -> forall k3. k3 -> Type go subst (FunTy{ft_af = af, ft_res = res_ki}) (_:arg_tys) = argf : go subst res_ki arg_tys where argf | isVisibleFunArg af = Required | otherwise = Inferred go _ _ arg_tys = map (const Required) arg_tys -- something is ill-kinded. But this can happen -- when printing errors. Assume everything is Required. -- @isTauTy@ tests if a type has no foralls or (=>) isTauTy :: Type -> Bool isTauTy ty | Just ty' <- coreView ty = isTauTy ty' isTauTy (TyVarTy _) = True isTauTy (LitTy {}) = True isTauTy (TyConApp tc tys) = all isTauTy tys && isTauTyCon tc isTauTy (AppTy a b) = isTauTy a && isTauTy b isTauTy (FunTy { ft_af = af, ft_mult = w, ft_arg = a, ft_res = b }) | isInvisibleFunArg af = False -- e.g., Eq a => b | otherwise = isTauTy w && isTauTy a && isTauTy b -- e.g., a -> b isTauTy (ForAllTy {}) = False isTauTy (CastTy ty _) = isTauTy ty isTauTy (CoercionTy _) = False -- Not sure about this isAtomicTy :: Type -> Bool -- True if the type is just a single token, and can be printed compactly -- Used when deciding how to lay out type error messages; see the -- call in GHC.Tc.Errors isAtomicTy (TyVarTy {}) = True isAtomicTy (LitTy {}) = True isAtomicTy (TyConApp _ []) = True isAtomicTy ty | isLiftedTypeKind ty = True -- 'Type' prints compactly as * -- See GHC.Iface.Type.ppr_kind_type isAtomicTy _ = False {- ************************************************************************ * * \subsection{Type families} * * ************************************************************************ -} mkFamilyTyConApp :: TyCon -> [Type] -> Type -- ^ Given a family instance TyCon and its arg types, return the -- corresponding family type. E.g: -- -- > data family T a -- > data instance T (Maybe b) = MkT b -- -- Where the instance tycon is :RTL, so: -- -- > mkFamilyTyConApp :RTL Int = T (Maybe Int) mkFamilyTyConApp tc tys | Just (fam_tc, fam_tys) <- tyConFamInst_maybe tc , let tvs = tyConTyVars tc fam_subst = assertPpr (tvs `equalLength` tys) (ppr tc <+> ppr tys) $ zipTvSubst tvs tys = mkTyConApp fam_tc (substTys fam_subst fam_tys) | otherwise = mkTyConApp tc tys -- | Get the type on the LHS of a coercion induced by a type/data -- family instance. coAxNthLHS :: CoAxiom br -> Int -> Type coAxNthLHS ax ind = mkTyConApp (coAxiomTyCon ax) (coAxBranchLHS (coAxiomNthBranch ax ind)) isFamFreeTy :: Type -> Bool isFamFreeTy ty | Just ty' <- coreView ty = isFamFreeTy ty' isFamFreeTy (TyVarTy _) = True isFamFreeTy (LitTy {}) = True isFamFreeTy (TyConApp tc tys) = all isFamFreeTy tys && isFamFreeTyCon tc isFamFreeTy (AppTy a b) = isFamFreeTy a && isFamFreeTy b isFamFreeTy (FunTy _ w a b) = isFamFreeTy w && isFamFreeTy a && isFamFreeTy b isFamFreeTy (ForAllTy _ ty) = isFamFreeTy ty isFamFreeTy (CastTy ty _) = isFamFreeTy ty isFamFreeTy (CoercionTy _) = False -- Not sure about this buildSynTyCon :: Name -> [KnotTied TyConBinder] -> Kind -- ^ /result/ kind -> [Role] -> KnotTied Type -> TyCon -- This function is here because here is where we have -- isFamFree and isTauTy buildSynTyCon name binders res_kind roles rhs = mkSynonymTyCon name binders res_kind roles rhs is_tau is_fam_free is_forgetful is_concrete where qtvs = mkVarSet (map binderVar binders) is_tau = isTauTy rhs is_fam_free = isFamFreeTy rhs is_concrete = isConcreteTypeWith qtvs rhs is_forgetful = not (qtvs `subVarSet` expanded_rhs_tyvars) expanded_rhs_tyvars = tyCoVarsOfType (expandTypeSynonyms rhs) -- See Note [Forgetful type synonyms] in GHC.Core.TyCon -- To find out if this TyCon is forgetful, expand the synonyms in its RHS -- and check that all of the binders are free in the expanded type. -- We really only need to expand the /forgetful/ synonyms on the RHS, -- but we don't currently have a function to do that. -- Failing to expand the RHS led to #25094, e.g. -- type Bucket a b c = Key (a,b,c) -- type Key x = Any -- Here Bucket is definitely forgetful! {- ************************************************************************ * * \subsection{Liftedness} * * ************************************************************************ -} -- | Tries to compute the 'Levity' of the given type. Returns either -- a definite 'Levity', or 'Nothing' if we aren't sure (e.g. the -- type is representation-polymorphic). -- -- Panics if the kind does not have the shape @TYPE r@. typeLevity_maybe :: HasDebugCallStack => Type -> Maybe Levity typeLevity_maybe ty = runtimeRepLevity_maybe (getRuntimeRep ty) typeLevity :: HasDebugCallStack => Type -> Levity typeLevity ty = case typeLevity_maybe ty of Just lev -> lev Nothing -> pprPanic "typeLevity" (ppr ty) -- | Is the given type definitely unlifted? -- See "Type#type_classification" for what an unlifted type is. -- -- Panics on representation-polymorphic types; See 'mightBeUnliftedType' for -- a more approximate predicate that behaves better in the presence of -- representation polymorphism. isUnliftedType :: HasDebugCallStack => Type -> Bool -- isUnliftedType returns True for forall'd unlifted types: -- x :: forall a. Int# -- I found bindings like these were getting floated to the top level. isUnliftedType ty = case typeLevity_maybe ty of Just Lifted -> False Just Unlifted -> True Nothing -> pprPanic "isUnliftedType" (ppr ty <+> dcolon <+> ppr (typeKind ty)) -- | Returns: -- -- * 'False' if the type is /guaranteed/ unlifted or -- * 'True' if it lifted, OR we aren't sure -- (e.g. in a representation-polymorphic case) mightBeLiftedType :: Type -> Bool mightBeLiftedType = mightBeLifted . typeLevity_maybe definitelyLiftedType :: Type -> Bool definitelyLiftedType = not . mightBeUnliftedType -- | Returns: -- -- * 'False' if the type is /guaranteed/ lifted or -- * 'True' if it is unlifted, OR we aren't sure -- (e.g. in a representation-polymorphic case) mightBeUnliftedType :: Type -> Bool mightBeUnliftedType = mightBeUnlifted . typeLevity_maybe definitelyUnliftedType :: Type -> Bool definitelyUnliftedType = not . mightBeLiftedType -- | See "Type#type_classification" for what a boxed type is. -- Panics on representation-polymorphic types; See 'mightBeUnliftedType' for -- a more approximate predicate that behaves better in the presence of -- representation polymorphism. isBoxedType :: Type -> Bool isBoxedType ty = isBoxedRuntimeRep (getRuntimeRep ty) -- | Is this a type of kind RuntimeRep? (e.g. LiftedRep) isRuntimeRepKindedTy :: Type -> Bool isRuntimeRepKindedTy = isRuntimeRepTy . typeKind -- | Drops prefix of RuntimeRep constructors in 'TyConApp's. Useful for e.g. -- dropping 'LiftedRep arguments of unboxed tuple TyCon applications: -- -- dropRuntimeRepArgs [ 'LiftedRep, 'IntRep -- , String, Int# ] == [String, Int#] -- dropRuntimeRepArgs :: [Type] -> [Type] dropRuntimeRepArgs = dropWhile isRuntimeRepKindedTy -- | Extract the RuntimeRep classifier of a type. For instance, -- @getRuntimeRep_maybe Int = Just LiftedRep@. Returns 'Nothing' if this is not -- possible. getRuntimeRep_maybe :: HasDebugCallStack => Type -> Maybe RuntimeRepType getRuntimeRep_maybe = kindRep_maybe . typeKind -- | Extract the RuntimeRep classifier of a type. For instance, -- @getRuntimeRep_maybe Int = LiftedRep@. Panics if this is not possible. getRuntimeRep :: HasDebugCallStack => Type -> RuntimeRepType getRuntimeRep ty = case getRuntimeRep_maybe ty of Just r -> r Nothing -> pprPanic "getRuntimeRep" (ppr ty <+> dcolon <+> ppr (typeKind ty)) -- | Extract the 'Levity' of a type. For example, @getLevity_maybe Int = Just Lifted@, -- @getLevity (Array# Int) = Just Unlifted@, @getLevity Float# = Nothing@. -- -- Returns 'Nothing' if this is not possible. Does not look through type family applications. getLevity_maybe :: HasDebugCallStack => Type -> Maybe Type getLevity_maybe ty | Just rep <- getRuntimeRep_maybe ty -- Directly matching on TyConApp after expanding type synonyms -- saves allocations compared to `splitTyConApp_maybe`. See #22254. -- Given that this is a pretty hot function we make use of the fact -- and use isTyConKeyApp_maybe instead. , Just [lev] <- isTyConKeyApp_maybe boxedRepDataConKey rep = Just lev | otherwise = Nothing -- | Extract the 'Levity' of a type. For example, @getLevity Int = Lifted@, -- or @getLevity (Array# Int) = Unlifted@. -- -- Panics if this is not possible. Does not look through type family applications. getLevity :: HasDebugCallStack => Type -> Type getLevity ty | Just lev <- getLevity_maybe ty = lev | otherwise = pprPanic "getLevity" (ppr ty <+> dcolon <+> ppr (typeKind ty)) isUnboxedTupleType :: Type -> Bool isUnboxedTupleType ty = tyConAppTyCon (getRuntimeRep ty) `hasKey` tupleRepDataConKey -- NB: Do not use typePrimRep, as that can't tell the difference between -- unboxed tuples and unboxed sums isUnboxedSumType :: Type -> Bool isUnboxedSumType ty = tyConAppTyCon (getRuntimeRep ty) `hasKey` sumRepDataConKey -- | See "Type#type_classification" for what an algebraic type is. -- Should only be applied to /types/, as opposed to e.g. partially -- saturated type constructors isAlgType :: Type -> Bool isAlgType ty = case splitTyConApp_maybe ty of Just (tc, ty_args) -> assert (ty_args `lengthIs` tyConArity tc ) isAlgTyCon tc _other -> False -- | Check whether a type is a data family type isDataFamilyAppType :: Type -> Bool isDataFamilyAppType ty = case tyConAppTyCon_maybe ty of Just tc -> isDataFamilyTyCon tc _ -> False -- | Computes whether an argument (or let right hand side) should -- be computed strictly or lazily, based only on its type. -- Currently, it's just 'isUnliftedType'. -- Panics on representation-polymorphic types. isStrictType :: HasDebugCallStack => Type -> Bool isStrictType = isUnliftedType isTerminatingType :: HasDebugCallStack => Type -> Bool -- ^ True <=> a term of this type cannot be bottom -- This identifies the types described by -- Note [NON-BOTTOM-DICTS invariant] in GHC.Core -- NB: unlifted types are not terminating types! -- e.g. you can write a term (loop 1)::Int# that diverges. isTerminatingType ty = case tyConAppTyCon_maybe ty of Just tc -> isClassTyCon tc && not (isNewTyCon tc) _ -> False isPrimitiveType :: Type -> Bool -- ^ Returns true of types that are opaque to Haskell. isPrimitiveType ty = case splitTyConApp_maybe ty of Just (tc, ty_args) -> assert (ty_args `lengthIs` tyConArity tc ) isPrimTyCon tc _ -> False {- ************************************************************************ * * \subsection{Join points} * * ************************************************************************ -} -- | Determine whether a type could be the type of a join point of given total -- arity, according to the polymorphism rule. A join point cannot be polymorphic -- in its return type, since given -- join j @a @b x y z = e1 in e2, -- the types of e1 and e2 must be the same, and a and b are not in scope for e2. -- (See Note [The polymorphism rule of join points] in "GHC.Core".) Returns False -- also if the type simply doesn't have enough arguments. -- -- Note that we need to know how many arguments (type *and* value) the putative -- join point takes; for instance, if -- j :: forall a. a -> Int -- then j could be a binary join point returning an Int, but it could *not* be a -- unary join point returning a -> Int. -- -- TODO: See Note [Excess polymorphism and join points] isValidJoinPointType :: JoinArity -> Type -> Bool isValidJoinPointType arity ty = valid_under emptyVarSet arity ty where valid_under tvs arity ty | arity == 0 = tvs `disjointVarSet` tyCoVarsOfType ty | Just (t, ty') <- splitForAllTyCoVar_maybe ty = valid_under (tvs `extendVarSet` t) (arity-1) ty' | Just (_, _, _, res_ty) <- splitFunTy_maybe ty = valid_under tvs (arity-1) res_ty | otherwise = False {- Note [Excess polymorphism and join points] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In principle, if a function would be a join point except that it fails the polymorphism rule (see Note [The polymorphism rule of join points] in GHC.Core), it can still be made a join point with some effort. This is because all tail calls must return the same type (they return to the same context!), and thus if the return type depends on an argument, that argument must always be the same. For instance, consider: let f :: forall a. a -> Char -> [a] f @a x c = ... f @a y 'a' ... in ... f @Int 1 'b' ... f @Int 2 'c' ... (where the calls are tail calls). `f` fails the polymorphism rule because its return type is [a], where [a] is bound. But since the type argument is always 'Int', we can rewrite it as: let f' :: Int -> Char -> [Int] f' x c = ... f' y 'a' ... in ... f' 1 'b' ... f 2 'c' ... and now we can make f' a join point: join f' :: Int -> Char -> [Int] f' x c = ... jump f' y 'a' ... in ... jump f' 1 'b' ... jump f' 2 'c' ... It's not clear that this comes up often, however. TODO: Measure how often and add this analysis if necessary. See #14620. ************************************************************************ * * \subsection{Sequencing on types} * * ************************************************************************ -} seqType :: Type -> () seqType (LitTy n) = n `seq` () seqType (TyVarTy tv) = tv `seq` () seqType (AppTy t1 t2) = seqType t1 `seq` seqType t2 seqType (FunTy _ w t1 t2) = seqType w `seq` seqType t1 `seq` seqType t2 seqType (TyConApp tc tys) = tc `seq` seqTypes tys seqType (ForAllTy (Bndr tv _) ty) = seqType (varType tv) `seq` seqType ty seqType (CastTy ty co) = seqType ty `seq` seqCo co seqType (CoercionTy co) = seqCo co seqTypes :: [Type] -> () seqTypes [] = () seqTypes (ty:tys) = seqType ty `seq` seqTypes tys {- ************************************************************************ * * The kind of a type * * ************************************************************************ Note [Kinding rules for types] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Here are the key kinding rules for types torc1 is TYPE or CONSTRAINT torc2 is TYPE or CONSTRAINT t1 : torc1 rep1 t2 : torc2 rep2 (FUN) ---------------- t1 -> t2 : torc2 LiftedRep -- In fact the arrow varies with torc1/torc2 -- See Note [Function type constructors and FunTy] -- in GHC.Builtin.Types.Prim torc is TYPE or CONSTRAINT ty : body_torc rep ki : Type `a` is a type variable `a` is not free in rep (FORALL1) ----------------------- forall (a::ki). ty : body_torc rep torc is TYPE or CONSTRAINT ty : body_torc rep `c` is a coercion variable `c` is not free in rep `c` is free in ty -- Surprise 1! (FORALL2) ------------------------- forall (cv::k1 ~#{N,R} k2). ty : body_torc LiftedRep -- Surprise 2! Note that: * (FORALL1) rejects (forall (a::Maybe). blah) * (FORALL2) Surprise 1: See GHC.Core.TyCo.Rep Note [Unused coercion variable in ForAllTy] * (FORALL2) Surprise 2: coercion abstractions are not erased, so this must be LiftedRep, just like (FUN). (FORALL2) is just a dependent form of (FUN). Note [Phantom type variables in kinds] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider type K (r :: RuntimeRep) = Type -- Note 'r' is unused data T r :: K r -- T :: forall r -> K r foo :: forall r. T r The body of the forall in foo's type has kind (K r), and normally it would make no sense to have forall r. (ty :: K r) because the kind of the forall would escape the binding of 'r'. But in this case it's fine because (K r) expands to Type, so we explicitly /permit/ the type forall r. T r To accommodate such a type, in typeKind (forall a.ty) we use occCheckExpand to expand any type synonyms in the kind of 'ty' to eliminate 'a'. See kinding rule (FORALL) in Note [Kinding rules for types] See also * GHC.Core.Type.occCheckExpand * GHC.Core.Utils.coreAltsType * GHC.Tc.Validity.checkEscapingKind all of which grapple with the same problem. See #14939. -} ----------------------------- typeKind :: HasDebugCallStack => Type -> Kind -- No need to expand synonyms typeKind (TyConApp tc tys) = piResultTys (tyConKind tc) tys typeKind (LitTy l) = typeLiteralKind l typeKind (FunTy { ft_af = af }) = case funTyFlagResultTypeOrConstraint af of TypeLike -> liftedTypeKind ConstraintLike -> constraintKind typeKind (TyVarTy tyvar) = tyVarKind tyvar typeKind (CastTy _ty co) = coercionRKind co typeKind (CoercionTy co) = coercionType co typeKind (AppTy fun arg) = go fun [arg] where -- Accumulate the type arguments, so we can call piResultTys, -- rather than a succession of calls to piResultTy (which is -- asymptotically costly as the number of arguments increases) go (AppTy fun arg) args = go fun (arg:args) go fun args = piResultTys (typeKind fun) args typeKind ty@(ForAllTy {}) = assertPpr (not (null tcvs)) (ppr ty) $ -- If tcvs is empty somehow we'll get an infinite loop! case occCheckExpand tcvs body_kind of -- We must make sure tvs do not occur in kind, -- as they would be out of scope! -- See Note [Phantom type variables in kinds] Nothing -> pprPanic "typeKind" (ppr ty $$ ppr tcvs $$ ppr body <+> dcolon <+> ppr body_kind) Just k' | all isTyVar tcvs -> k' -- Rule (FORALL1) | otherwise -> lifted_kind_from_body -- Rule (FORALL2) where (tcvs, body) = splitForAllTyCoVars ty -- Important: splits both TyVar and CoVar binders body_kind = typeKind body lifted_kind_from_body -- Implements (FORALL2) = case sORTKind_maybe body_kind of Just (ConstraintLike, _) -> constraintKind Just (TypeLike, _) -> liftedTypeKind Nothing -> pprPanic "typeKind" (ppr body_kind) --------------------------------------------- sORTKind_maybe :: Kind -> Maybe (TypeOrConstraint, Type) -- Sees if the argument is of form (TYPE rep) or (CONSTRAINT rep) -- and if so returns which, and the runtime rep -- -- This is a "hot" function. Do not call splitTyConApp_maybe here, -- to avoid the faff with FunTy sORTKind_maybe (TyConApp tc tys) -- First, short-cuts for Type and Constraint that do no allocation | tc_uniq == liftedTypeKindTyConKey = assert( null tys ) $ Just (TypeLike, liftedRepTy) | tc_uniq == constraintKindTyConKey = assert( null tys ) $ Just (ConstraintLike, liftedRepTy) | tc_uniq == tYPETyConKey = get_rep TypeLike | tc_uniq == cONSTRAINTTyConKey = get_rep ConstraintLike | Just ty' <- expandSynTyConApp_maybe tc tys = sORTKind_maybe ty' where !tc_uniq = tyConUnique tc -- This bang on tc_uniq is important. It means that sORTKind_maybe starts -- by evaluating tc_uniq, and then ends up with a single case with a 4-way branch get_rep torc = case tys of (rep:_reps) -> assert (null _reps) $ Just (torc, rep) [] -> Nothing sORTKind_maybe _ = Nothing typeTypeOrConstraint :: HasDebugCallStack => Type -> TypeOrConstraint -- Precondition: expects a type that classifies values. -- Returns whether it is TypeLike or ConstraintLike. -- Equivalent to calling sORTKind_maybe, but faster in the FunTy case typeTypeOrConstraint ty = case coreFullView ty of FunTy { ft_af = af } -> funTyFlagResultTypeOrConstraint af ty' | Just (torc, _) <- sORTKind_maybe (typeKind ty') -> torc | otherwise -> pprPanic "typeOrConstraint" (ppr ty <+> dcolon <+> ppr (typeKind ty)) isPredTy :: HasDebugCallStack => Type -> Bool -- Precondition: expects a type that classifies values -- See Note [Types for coercions, predicates, and evidence] in GHC.Core.TyCo.Rep -- Returns True for types of kind (CONSTRAINT _), False for ones of kind (TYPE _) isPredTy ty = case typeTypeOrConstraint ty of TypeLike -> False ConstraintLike -> True -- | Does this classify a type allowed to have values? Responds True to things -- like *, TYPE Lifted, TYPE IntRep, TYPE v, Constraint. isTYPEorCONSTRAINT :: Kind -> Bool -- ^ True of a kind `TYPE _` or `CONSTRAINT _` isTYPEorCONSTRAINT k = isJust (sORTKind_maybe k) tyConIsTYPEorCONSTRAINT :: TyCon -> Bool tyConIsTYPEorCONSTRAINT tc = tc_uniq == tYPETyConKey || tc_uniq == cONSTRAINTTyConKey where !tc_uniq = tyConUnique tc isConstraintLikeKind :: Kind -> Bool -- True of (CONSTRAINT _) isConstraintLikeKind kind = case sORTKind_maybe kind of Just (ConstraintLike, _) -> True _ -> False isConstraintKind :: Kind -> Bool -- True of (CONSTRAINT LiftedRep) isConstraintKind kind = case sORTKind_maybe kind of Just (ConstraintLike, rep) -> isLiftedRuntimeRep rep _ -> False tcIsLiftedTypeKind :: Kind -> Bool -- ^ Is this kind equivalent to 'Type' i.e. TYPE LiftedRep? tcIsLiftedTypeKind kind | Just (TypeLike, rep) <- sORTKind_maybe kind = isLiftedRuntimeRep rep | otherwise = False tcIsBoxedTypeKind :: Kind -> Bool -- ^ Is this kind equivalent to @TYPE (BoxedRep l)@ for some @l :: Levity@? tcIsBoxedTypeKind kind | Just (TypeLike, rep) <- sORTKind_maybe kind = isBoxedRuntimeRep rep | otherwise = False -- | Is this kind equivalent to @TYPE r@ (for some unknown r)? -- -- This considers 'Constraint' to be distinct from @*@. isTypeLikeKind :: Kind -> Bool isTypeLikeKind kind = case sORTKind_maybe kind of Just (TypeLike, _) -> True _ -> False returnsConstraintKind :: Kind -> Bool -- True <=> the Kind ultimately returns a Constraint -- E.g. * -> Constraint -- forall k. k -> Constraint returnsConstraintKind kind | Just kind' <- coreView kind = returnsConstraintKind kind' returnsConstraintKind (ForAllTy _ ty) = returnsConstraintKind ty returnsConstraintKind (FunTy { ft_res = ty }) = returnsConstraintKind ty returnsConstraintKind kind = isConstraintLikeKind kind -------------------------- typeLiteralKind :: TyLit -> Kind typeLiteralKind (NumTyLit {}) = naturalTy typeLiteralKind (StrTyLit {}) = typeSymbolKind typeLiteralKind (CharTyLit {}) = charTy -- | Returns True if a type has a syntactically fixed runtime rep, -- as per Note [Fixed RuntimeRep] in GHC.Tc.Utils.Concrete. -- -- This function is equivalent to `isFixedRuntimeRepKind . typeKind` -- but much faster. -- -- __Precondition:__ The type has kind @('TYPE' blah)@ typeHasFixedRuntimeRep :: HasDebugCallStack => Type -> Bool typeHasFixedRuntimeRep = go where go (TyConApp tc _) | tcHasFixedRuntimeRep tc = True go (FunTy {}) = True go (LitTy {}) = True go (ForAllTy _ ty) = go ty go ty = isFixedRuntimeRepKind (typeKind ty) -- | Checks that a kind of the form 'Type', 'Constraint' -- or @'TYPE r@ is concrete. See 'isConcreteType'. -- -- __Precondition:__ The type has kind `TYPE blah` or `CONSTRAINT blah` isFixedRuntimeRepKind :: HasDebugCallStack => Kind -> Bool isFixedRuntimeRepKind k = assertPpr (isTYPEorCONSTRAINT k) (ppr k) $ -- the isLiftedTypeKind check is necessary b/c of Constraint isConcreteType k -- | Tests whether the given type is concrete, i.e. it -- whether it consists only of concrete type constructors, -- concrete type variables, and applications. -- -- See Note [Concrete types] in GHC.Tc.Utils.Concrete. isConcreteType :: Type -> Bool isConcreteType = isConcreteTypeWith emptyVarSet isConcreteTypeWith :: TyVarSet -> Type -> Bool -- See Note [Concrete types] in GHC.Tc.Utils.Concrete. -- For this "With" version we pass in a set of TyVars to be considered -- concrete. This supports mkSynonymTyCon, which needs to test the RHS -- for concreteness, under the assumption that the binders are instantiated -- to concrete types isConcreteTypeWith conc_tvs = go where go (TyVarTy tv) = isConcreteTyVar tv || tv `elemVarSet` conc_tvs go (AppTy ty1 ty2) = go ty1 && go ty2 go (TyConApp tc tys) = go_tc tc tys go ForAllTy{} = False go (FunTy _ w t1 t2) = go w && go (typeKind t1) && go t1 && go (typeKind t2) && go t2 go LitTy{} = True go CastTy{} = False go CoercionTy{} = False go_tc tc tys | isForgetfulSynTyCon tc -- E.g. type S a = Int -- Then (S x) is concrete even if x isn't , Just ty' <- expandSynTyConApp_maybe tc tys = go ty' -- Apart from forgetful synonyms, isConcreteTyCon -- is enough; no need to expand. This is good for e.g -- type LiftedRep = BoxedRep Lifted | isConcreteTyCon tc = all go tys | otherwise -- E.g. type families = False {- %************************************************************************ %* * Pretty-printing %* * %************************************************************************ Most pretty-printing is either in GHC.Core.TyCo.Rep or GHC.Iface.Type. -} -- | Does a 'TyCon' (that is applied to some number of arguments) need to be -- ascribed with an explicit kind signature to resolve ambiguity if rendered as -- a source-syntax type? -- (See @Note [When does a tycon application need an explicit kind signature?]@ -- for a full explanation of what this function checks for.) tyConAppNeedsKindSig :: Bool -- ^ Should specified binders count towards injective positions in -- the kind of the TyCon? (If you're using visible kind -- applications, then you want True here. -> TyCon -> Int -- ^ The number of args the 'TyCon' is applied to. -> Bool -- ^ Does @T t_1 ... t_n@ need a kind signature? (Where @n@ is the -- number of arguments) tyConAppNeedsKindSig spec_inj_pos tc n_args | LT <- listLengthCmp tc_binders n_args = False | otherwise = let (dropped_binders, remaining_binders) = splitAt n_args tc_binders result_kind = mkTyConKind remaining_binders tc_res_kind result_vars = tyCoVarsOfType result_kind dropped_vars = fvVarSet $ mapUnionFV injective_vars_of_binder dropped_binders in not (subVarSet result_vars dropped_vars) where tc_binders = tyConBinders tc tc_res_kind = tyConResKind tc -- Returns the variables that would be fixed by knowing a TyConBinder. See -- Note [When does a tycon application need an explicit kind signature?] -- for a more detailed explanation of what this function does. injective_vars_of_binder :: TyConBinder -> FV injective_vars_of_binder (Bndr tv vis) = case vis of AnonTCB -> injectiveVarsOfType False -- conservative choice (varType tv) NamedTCB argf | source_of_injectivity argf -> unitFV tv `unionFV` injectiveVarsOfType False (varType tv) _ -> emptyFV source_of_injectivity Required = True source_of_injectivity Specified = spec_inj_pos source_of_injectivity Inferred = False {- Note [When does a tycon application need an explicit kind signature?] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ There are a couple of places in GHC where we convert Core Types into forms that more closely resemble user-written syntax. These include: 1. Template Haskell Type reification (see, for instance, GHC.Tc.Gen.Splice.reify_tc_app) 2. Converting Types to LHsTypes (such as in Haddock.Convert in haddock) This conversion presents a challenge: how do we ensure that the resulting type has enough kind information so as not to be ambiguous? To better motivate this question, consider the following Core type: -- Foo :: Type -> Type type Foo = Proxy Type There is nothing ambiguous about the RHS of Foo in Core. But if we were to, say, reify it into a TH Type, then it's tempting to just drop the invisible Type argument and simply return `Proxy`. But now we've lost crucial kind information: we don't know if we're dealing with `Proxy Type` or `Proxy Bool` or `Proxy Int` or something else! We've inadvertently introduced ambiguity. Unlike in other situations in GHC, we can't just turn on -fprint-explicit-kinds, as we need to produce something which has the same structure as a source-syntax type. Moreover, we can't rely on visible kind application, since the first kind argument to Proxy is inferred, not specified. Our solution is to annotate certain tycons with their kinds whenever they appear in applied form in order to resolve the ambiguity. For instance, we would reify the RHS of Foo like so: type Foo = (Proxy :: Type -> Type) We need to devise an algorithm that determines precisely which tycons need these explicit kind signatures. We certainly don't want to annotate _every_ tycon with a kind signature, or else we might end up with horribly bloated types like the following: (Either :: Type -> Type -> Type) (Int :: Type) (Char :: Type) We only want to annotate tycons that absolutely require kind signatures in order to resolve some sort of ambiguity, and nothing more. Suppose we have a tycon application (T ty_1 ... ty_n). Why might this type require a kind signature? It might require it when we need to fill in any of T's omitted arguments. By "omitted argument", we mean one that is dropped when reifying ty_1 ... ty_n. Sometimes, the omitted arguments are inferred and specified arguments (e.g., TH reification in GHC.Tc.Gen.Splice), and sometimes the omitted arguments are only the inferred ones (e.g., in situations where specified arguments are reified through visible kind application). Regardless, the key idea is that _some_ arguments are going to be omitted after reification, and the only mechanism we have at our disposal for filling them in is through explicit kind signatures. What do we mean by "fill in"? Let's consider this small example: T :: forall {k}. Type -> (k -> Type) -> k Moreover, we have this application of T: T @{j} Int aty When we reify this type, we omit the inferred argument @{j}. Is it fixed by the other (non-inferred) arguments? Yes! If we know the kind of (aty :: blah), then we'll generate an equality constraint (kappa -> Type) and, assuming we can solve it, that will fix `kappa`. (Here, `kappa` is the unification variable that we instantiate `k` with.) Therefore, for any application of a tycon T to some arguments, the Question We Must Answer is: * Given the first n arguments of T, do the kinds of the non-omitted arguments fill in the omitted arguments? (This is still a bit hand-wavy, but we'll refine this question incrementally as we explain more of the machinery underlying this process.) Answering this question is precisely the role that the `injectiveVarsOfType` and `injective_vars_of_binder` functions exist to serve. If an omitted argument `a` appears in the set returned by `injectiveVarsOfType ty`, then knowing `ty` determines (i.e., fills in) `a`. (More on `injective_vars_of_binder` in a bit.) More formally, if `a` is in `injectiveVarsOfType ty` and S1(ty) ~ S2(ty), then S1(a) ~ S2(a), where S1 and S2 are arbitrary substitutions. For example, is `F` is a non-injective type family, then injectiveVarsOfType(Either c (Maybe (a, F b c))) = {a, c} Now that we know what this function does, here is a second attempt at the Question We Must Answer: * Given the first n arguments of T (ty_1 ... ty_n), consider the binders of T that are instantiated by non-omitted arguments. Do the injective variables of these binders fill in the remainder of T's kind? Alright, we're getting closer. Next, we need to clarify what the injective variables of a tycon binder are. This the role that the `injective_vars_of_binder` function serves. Here is what this function does for each form of tycon binder: * Anonymous binders are injective positions. For example, in the promoted data constructor '(:): '(:) :: forall a. a -> [a] -> [a] The second and third tyvar binders (of kinds `a` and `[a]`) are both anonymous, so if we had '(:) 'True '[], then the kinds of 'True and '[] would contribute to the kind of '(:) 'True '[]. Therefore, injective_vars_of_binder(_ :: a) = injectiveVarsOfType(a) = {a}. (Similarly, injective_vars_of_binder(_ :: [a]) = {a}.) * Named binders: - Inferred binders are never injective positions. For example, in this data type: data Proxy a Proxy :: forall {k}. k -> Type If we had Proxy 'True, then the kind of 'True would not contribute to the kind of Proxy 'True. Therefore, injective_vars_of_binder(forall {k}. ...) = {}. - Required binders are injective positions. For example, in this data type: data Wurble k (a :: k) :: k Wurble :: forall k -> k -> k The first tyvar binder (of kind `forall k`) has required visibility, so if we had Wurble (Maybe a) Nothing, then the kind of Maybe a would contribute to the kind of Wurble (Maybe a) Nothing. Hence, injective_vars_of_binder(forall a -> ...) = {a}. - Specified binders /might/ be injective positions, depending on how you approach things. Continuing the '(:) example: '(:) :: forall a. a -> [a] -> [a] Normally, the (forall a. ...) tyvar binder wouldn't contribute to the kind of '(:) 'True '[], since it's not explicitly instantiated by the user. But if visible kind application is enabled, then this is possible, since the user can write '(:) @Bool 'True '[]. (In that case, injective_vars_of_binder(forall a. ...) = {a}.) There are some situations where using visible kind application is appropriate and others where it is not (e.g., TH reification), so the `injective_vars_of_binder` function is parameterized by a Bool which decides if specified binders should be counted towards injective positions or not. Now that we've defined injective_vars_of_binder, we can refine the Question We Must Answer once more: * Given the first n arguments of T (ty_1 ... ty_n), consider the binders of T that are instantiated by non-omitted arguments. For each such binder b_i, take the union of all injective_vars_of_binder(b_i). Is this set a superset of the free variables of the remainder of T's kind? If the answer to this question is "no", then (T ty_1 ... ty_n) needs an explicit kind signature, since T's kind has kind variables leftover that aren't fixed by the non-omitted arguments. One last sticking point: what does "the remainder of T's kind" mean? You might be tempted to think that it corresponds to all of the arguments in the kind of T that would normally be instantiated by omitted arguments. But this isn't quite right, strictly speaking. Consider the following (silly) example: S :: forall {k}. Type -> Type And suppose we have this application of S: S Int Bool The Int argument would be omitted, and injective_vars_of_binder(_ :: Type) = {}. This is not a superset of {k}, which might suggest that (S Bool) needs an explicit kind signature. But (S Bool :: Type) doesn't actually fix `k`! This is because the kind signature only affects the /result/ of the application, not all of the individual arguments. So adding a kind signature here won't make a difference. Therefore, the fourth (and final) iteration of the Question We Must Answer is: * Given the first n arguments of T (ty_1 ... ty_n), consider the binders of T that are instantiated by non-omitted arguments. For each such binder b_i, take the union of all injective_vars_of_binder(b_i). Is this set a superset of the free variables of the kind of (T ty_1 ... ty_n)? Phew, that was a lot of work! How can be sure that this is correct? That is, how can we be sure that in the event that we leave off a kind annotation, that one could infer the kind of the tycon application from its arguments? It's essentially a proof by induction: if we can infer the kinds of every subtree of a type, then the whole tycon application will have an inferrable kind--unless, of course, the remainder of the tycon application's kind has uninstantiated kind variables. What happens if T is oversaturated? That is, if T's kind has fewer than n arguments, in the case that the concrete application instantiates a result kind variable with an arrow kind? If we run out of arguments, we do not attach a kind annotation. This should be a rare case, indeed. Here is an example: data T1 :: k1 -> k2 -> * data T2 :: k1 -> k2 -> * type family G (a :: k) :: k type instance G T1 = T2 type instance F Char = (G T1 Bool :: (* -> *) -> *) -- F from above Here G's kind is (forall k. k -> k), and the desugared RHS of that last instance of F is (G (* -> (* -> *) -> *) (T1 * (* -> *)) Bool). According to the algorithm above, there are 3 arguments to G so we should peel off 3 arguments in G's kind. But G's kind has only two arguments. This is the rare special case, and we choose not to annotate the application of G with a kind signature. After all, we needn't do this, since that instance would be reified as: type instance F Char = G (T1 :: * -> (* -> *) -> *) Bool So the kind of G isn't ambiguous anymore due to the explicit kind annotation on its argument. See #8953 and test th/T8953. -} {- ************************************************************************ * * Multiplicities * * ************************************************************************ These functions would prefer to be in GHC.Core.Multiplicity, but they some are used elsewhere in this module, and wanted to bring their friends here with them. -} unrestricted, linear, tymult :: a -> Scaled a -- | Scale a payload by Many unrestricted = Scaled ManyTy -- | Scale a payload by One linear = Scaled OneTy -- | Scale a payload by Many; used for type arguments in core tymult = Scaled ManyTy irrelevantMult :: Scaled a -> a irrelevantMult = scaledThing mkScaled :: Mult -> a -> Scaled a mkScaled = Scaled scaledSet :: Scaled a -> b -> Scaled b scaledSet (Scaled m _) b = Scaled m b pattern OneTy :: Mult pattern OneTy <- (isOneTy -> True) where OneTy = oneDataConTy pattern ManyTy :: Mult pattern ManyTy <- (isManyTy -> True) where ManyTy = manyDataConTy isManyTy :: Mult -> Bool isManyTy ty | Just tc <- tyConAppTyCon_maybe ty = tc `hasKey` manyDataConKey isManyTy _ = False isOneTy :: Mult -> Bool isOneTy ty | Just tc <- tyConAppTyCon_maybe ty = tc `hasKey` oneDataConKey isOneTy _ = False isLinearType :: Type -> Bool -- ^ @isLinear t@ returns @True@ of a if @t@ is a type of (curried) function -- where at least one argument is linear (or otherwise non-unrestricted). We use -- this function to check whether it is safe to eta reduce an Id in CorePrep. It -- is always safe to return 'True', because 'True' deactivates the optimisation. isLinearType ty = case ty of FunTy _ ManyTy _ res -> isLinearType res FunTy _ _ _ _ -> True ForAllTy _ res -> isLinearType res _ -> False {- ********************************************************************* * * Space-saving construction * * ********************************************************************* -} {- Note [Using synonyms to compress types] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Was: [Prefer Type over TYPE (BoxedRep Lifted)] The Core of nearly any program will have numerous occurrences of the Types TyConApp BoxedRep [TyConApp Lifted []] -- Synonym LiftedRep TyConApp BoxedRep [TyConApp Unlifted []] -- Synonym UnliftedREp TyConApp TYPE [TyConApp LiftedRep []] -- Synonym Type TyConApp TYPE [TyConApp UnliftedRep []] -- Synonym UnliftedType While investigating #17292 we found that these constituted a majority of all TyConApp constructors on the heap: (From a sample of 100000 TyConApp closures) 0x45f3523 - 28732 - `Type` 0x420b840702 - 9629 - generic type constructors 0x42055b7e46 - 9596 0x420559b582 - 9511 0x420bb15a1e - 9509 0x420b86c6ba - 9501 0x42055bac1e - 9496 0x45e68fd - 538 - `TYPE ...` Consequently, we try hard to ensure that operations on such types are efficient. Specifically, we strive to a. Avoid heap allocation of such types; use a single static TyConApp b. Use a small (shallow in the tree-depth sense) representation for such types Goal (b) is particularly useful as it makes traversals (e.g. free variable traversal, substitution, and comparison) more efficient. Comparison in particular takes special advantage of nullary type synonym applications (e.g. things like @TyConApp typeTyCon []@). See * Note [Comparing type synonyms] in "GHC.Core.TyCo.Compare" * Note [Unifying type synonyms] in "GHC.Core.Unify" To accomplish these we use a number of tricks, implemented by mkTyConApp. 1. Instead of (TyConApp BoxedRep [TyConApp Lifted []]), we prefer a statically-allocated (TyConApp LiftedRep []) where `LiftedRep` is a type synonym: type LiftedRep = BoxedRep Lifted Similarly for UnliftedRep 2. Instead of (TyConApp TYPE [TyConApp LiftedRep []]) we prefer the statically-allocated (TyConApp Type []) where `Type` is a type synonym type Type = TYPE LiftedRep Similarly for UnliftedType These serve goal (b) since there are no applied type arguments to traverse, e.g., during comparison. 3. We have a single, statically allocated top-level binding to represent `TyConApp GHC.Types.Type []` (namely 'GHC.Builtin.Types.Prim.liftedTypeKind'), ensuring that we don't need to allocate such types (goal (a)). See functions mkTYPEapp and mkBoxedRepApp 4. We use the sharing mechanism described in Note [Sharing nullary TyConApps] in GHC.Core.TyCon to ensure that we never need to allocate such nullary applications (goal (a)). See #17958, #20541 -} -- | A key function: builds a 'TyConApp' or 'FunTy' as appropriate to -- its arguments. Applies its arguments to the constructor from left to right. mkTyConApp :: TyCon -> [Type] -> Type mkTyConApp tycon [] = -- See Note [Sharing nullary TyConApps] in GHC.Core.TyCon mkTyConTy tycon mkTyConApp tycon tys@(ty1:rest) | Just fun_ty <- tyConAppFunTy_maybe tycon tys = fun_ty -- See Note [Using synonyms to compress types] | key == tYPETyConKey , Just ty <- mkTYPEapp_maybe ty1 = assert (null rest) ty | key == cONSTRAINTTyConKey , Just ty <- mkCONSTRAINTapp_maybe ty1 = assert (null rest) ty -- See Note [Using synonyms to compress types] | key == boxedRepDataConTyConKey , Just ty <- mkBoxedRepApp_maybe ty1 = assert (null rest) ty | key == tupleRepDataConTyConKey , Just ty <- mkTupleRepApp_maybe ty1 = assert (null rest) ty -- The catch-all case | otherwise = TyConApp tycon tys where key = tyConUnique tycon {- Note [Care using synonyms to compress types] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Using a synonym to compress a types has a tricky wrinkle. Consider coreView applied to (TyConApp LiftedRep []) * coreView expands the LiftedRep synonym: type LiftedRep = BoxedRep Lifted * Danger: we might apply the empty substitution to the RHS of the synonym. And substTy calls mkTyConApp BoxedRep [Lifted]. And mkTyConApp compresses that back to LiftedRep. Loop! * Solution: in expandSynTyConApp_maybe, don't call substTy for nullary type synonyms. That's more efficient anyway. -} mkTYPEapp :: RuntimeRepType -> Type mkTYPEapp rr = case mkTYPEapp_maybe rr of Just ty -> ty Nothing -> TyConApp tYPETyCon [rr] mkTYPEapp_maybe :: RuntimeRepType -> Maybe Type -- ^ Given a @RuntimeRep@, applies @TYPE@ to it. -- On the fly it rewrites -- TYPE LiftedRep --> liftedTypeKind (a synonym) -- TYPE UnliftedRep --> unliftedTypeKind (ditto) -- TYPE ZeroBitRep --> zeroBitTypeKind (ditto) -- NB: no need to check for TYPE (BoxedRep Lifted), TYPE (BoxedRep Unlifted) -- because those inner types should already have been rewritten -- to LiftedRep and UnliftedRep respectively, by mkTyConApp -- -- see Note [TYPE and CONSTRAINT] in GHC.Builtin.Types.Prim. -- See Note [Using synonyms to compress types] in GHC.Core.Type {-# NOINLINE mkTYPEapp_maybe #-} mkTYPEapp_maybe (TyConApp tc args) | key == liftedRepTyConKey = assert (null args) $ Just liftedTypeKind -- TYPE LiftedRep | key == unliftedRepTyConKey = assert (null args) $ Just unliftedTypeKind -- TYPE UnliftedRep | key == zeroBitRepTyConKey = assert (null args) $ Just zeroBitTypeKind -- TYPE ZeroBitRep where key = tyConUnique tc mkTYPEapp_maybe _ = Nothing ------------------ mkCONSTRAINTapp :: RuntimeRepType -> Type -- ^ Just like mkTYPEapp mkCONSTRAINTapp rr = case mkCONSTRAINTapp_maybe rr of Just ty -> ty Nothing -> TyConApp cONSTRAINTTyCon [rr] mkCONSTRAINTapp_maybe :: RuntimeRepType -> Maybe Type -- ^ Just like mkTYPEapp_maybe {-# NOINLINE mkCONSTRAINTapp_maybe #-} mkCONSTRAINTapp_maybe (TyConApp tc args) | tc `hasKey` liftedRepTyConKey = assert (null args) $ Just constraintKind -- CONSTRAINT LiftedRep mkCONSTRAINTapp_maybe _ = Nothing ------------------ mkBoxedRepApp_maybe :: LevityType -> Maybe Type -- ^ Given a `Levity`, apply `BoxedRep` to it -- On the fly, rewrite -- BoxedRep Lifted --> liftedRepTy (a synonym) -- BoxedRep Unlifted --> unliftedRepTy (ditto) -- See Note [TYPE and CONSTRAINT] in GHC.Builtin.Types.Prim. -- See Note [Using synonyms to compress types] in GHC.Core.Type {-# NOINLINE mkBoxedRepApp_maybe #-} mkBoxedRepApp_maybe (TyConApp tc args) | key == liftedDataConKey = assert (null args) $ Just liftedRepTy -- BoxedRep Lifted | key == unliftedDataConKey = assert (null args) $ Just unliftedRepTy -- BoxedRep Unlifted where key = tyConUnique tc mkBoxedRepApp_maybe _ = Nothing mkTupleRepApp_maybe :: Type -> Maybe Type -- ^ Given a `[RuntimeRep]`, apply `TupleRep` to it -- On the fly, rewrite -- TupleRep [] -> zeroBitRepTy (a synonym) -- See Note [TYPE and CONSTRAINT] in GHC.Builtin.Types.Prim. -- See Note [Using synonyms to compress types] in GHC.Core.Type {-# NOINLINE mkTupleRepApp_maybe #-} mkTupleRepApp_maybe (TyConApp tc args) | key == nilDataConKey = assert (isSingleton args) $ Just zeroBitRepTy -- ZeroBitRep where key = tyConUnique tc mkTupleRepApp_maybe _ = Nothing typeOrConstraintKind :: TypeOrConstraint -> RuntimeRepType -> Kind typeOrConstraintKind TypeLike rep = mkTYPEapp rep typeOrConstraintKind ConstraintLike rep = mkCONSTRAINTapp rep ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/Type.hs-boot0000644000000000000000000000230507346545000021014 0ustar0000000000000000{-# LANGUAGE FlexibleContexts #-} module GHC.Core.Type where import GHC.Prelude import {-# SOURCE #-} GHC.Core.TyCon import {-# SOURCE #-} GHC.Core.TyCo.Rep( Type, Coercion ) import GHC.Utils.Misc import GHC.Types.Var( FunTyFlag, TyVar ) import GHC.Types.Basic( TypeOrConstraint ) isPredTy :: HasDebugCallStack => Type -> Bool isCoercionTy :: Type -> Bool mkAppTy :: Type -> Type -> Type mkCastTy :: Type -> Coercion -> Type mkTyConApp :: TyCon -> [Type] -> Type mkCoercionTy :: Coercion -> Type piResultTy :: HasDebugCallStack => Type -> Type -> Type typeKind :: HasDebugCallStack => Type -> Type typeTypeOrConstraint :: HasDebugCallStack => Type -> TypeOrConstraint coreView :: Type -> Maybe Type rewriterView :: Type -> Maybe Type isRuntimeRepTy :: Type -> Bool isLevityTy :: Type -> Bool isMultiplicityTy :: Type -> Bool isLiftedTypeKind :: Type -> Bool splitTyConApp_maybe :: HasDebugCallStack => Type -> Maybe (TyCon, [Type]) tyConAppTyCon_maybe :: Type -> Maybe TyCon getTyVar_maybe :: Type -> Maybe TyVar getLevity :: HasDebugCallStack => Type -> Type partitionInvisibleTypes :: TyCon -> [Type] -> ([Type], [Type]) chooseFunTyFlag :: HasDebugCallStack => Type -> Type -> FunTyFlag ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/Unfold.hs0000644000000000000000000011377707346545000020401 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The AQUA Project, Glasgow University, 1994-1998 Core-syntax unfoldings Unfoldings (which can travel across module boundaries) are in Core syntax (namely @CoreExpr@s). The type @Unfolding@ sits ``above'' simply-Core-expressions unfoldings, capturing ``higher-level'' things we know about a binding, usually things that the simplifier found out (e.g., ``it's a literal''). In the corner of a @CoreUnfolding@ unfolding, you will find, unsurprisingly, a Core expression. -} module GHC.Core.Unfold ( Unfolding, UnfoldingGuidance, -- Abstract types ExprSize(..), sizeExpr, ArgSummary(..), nonTriv, CallCtxt(..), UnfoldingOpts (..), defaultUnfoldingOpts, updateCreationThreshold, updateUseThreshold, updateFunAppDiscount, updateDictDiscount, updateVeryAggressive, updateCaseScaling, updateCaseThreshold, updateReportPrefix, inlineBoringOk, calcUnfoldingGuidance, uncondInlineJoin ) where import GHC.Prelude import GHC.Core import GHC.Core.Utils import GHC.Core.DataCon import GHC.Core.Type import GHC.Types.Id import GHC.Types.Literal import GHC.Types.Id.Info import GHC.Types.RepType ( isZeroBitTy ) import GHC.Types.Basic ( Arity, RecFlag ) import GHC.Types.ForeignCall import GHC.Types.Tickish import GHC.Builtin.PrimOps import GHC.Builtin.Names import GHC.Data.Bag import GHC.Utils.Misc import GHC.Utils.Outputable import qualified Data.ByteString as BS -- | Unfolding options data UnfoldingOpts = UnfoldingOpts { unfoldingCreationThreshold :: !Int -- ^ Threshold above which unfoldings are not *created* , unfoldingUseThreshold :: !Int -- ^ Threshold above which unfoldings are not *inlined* , unfoldingFunAppDiscount :: !Int -- ^ Discount for lambdas that are used (applied) , unfoldingDictDiscount :: !Int -- ^ Discount for dictionaries , unfoldingVeryAggressive :: !Bool -- ^ Force inlining in many more cases , unfoldingCaseThreshold :: !Int -- ^ Don't consider depth up to x , unfoldingCaseScaling :: !Int -- ^ Penalize depth with 1/x , unfoldingReportPrefix :: !(Maybe String) -- ^ Only report inlining decisions for names with this prefix } defaultUnfoldingOpts :: UnfoldingOpts defaultUnfoldingOpts = UnfoldingOpts { unfoldingCreationThreshold = 750 -- The unfoldingCreationThreshold threshold must be reasonably high -- to take account of possible discounts. -- E.g. 450 is not enough in 'fulsom' for Interval.sqr to -- inline into Csg.calc (The unfolding for sqr never makes it -- into the interface file.) , unfoldingUseThreshold = 90 -- Last adjusted upwards in #18282, when I reduced -- the result discount for constructors. , unfoldingFunAppDiscount = 60 -- Be fairly keen to inline a function if that means -- we'll be able to pick the right method from a dictionary , unfoldingDictDiscount = 30 -- Be fairly keen to inline a function if that means -- we'll be able to pick the right method from a dictionary , unfoldingVeryAggressive = False -- Only apply scaling once we are deeper than threshold cases -- in an RHS. , unfoldingCaseThreshold = 2 -- Penalize depth with (size*depth)/scaling , unfoldingCaseScaling = 30 -- Don't filter inlining decision reports , unfoldingReportPrefix = Nothing } -- Helpers for "GHC.Driver.Session" updateCreationThreshold :: Int -> UnfoldingOpts -> UnfoldingOpts updateCreationThreshold n opts = opts { unfoldingCreationThreshold = n } updateUseThreshold :: Int -> UnfoldingOpts -> UnfoldingOpts updateUseThreshold n opts = opts { unfoldingUseThreshold = n } updateFunAppDiscount :: Int -> UnfoldingOpts -> UnfoldingOpts updateFunAppDiscount n opts = opts { unfoldingFunAppDiscount = n } updateDictDiscount :: Int -> UnfoldingOpts -> UnfoldingOpts updateDictDiscount n opts = opts { unfoldingDictDiscount = n } updateVeryAggressive :: Bool -> UnfoldingOpts -> UnfoldingOpts updateVeryAggressive n opts = opts { unfoldingVeryAggressive = n } updateCaseThreshold :: Int -> UnfoldingOpts -> UnfoldingOpts updateCaseThreshold n opts = opts { unfoldingCaseThreshold = n } updateCaseScaling :: Int -> UnfoldingOpts -> UnfoldingOpts updateCaseScaling n opts = opts { unfoldingCaseScaling = n } updateReportPrefix :: Maybe String -> UnfoldingOpts -> UnfoldingOpts updateReportPrefix n opts = opts { unfoldingReportPrefix = n } data ArgSummary = TrivArg -- Nothing interesting | NonTrivArg -- Arg has structure | ValueArg -- Arg is a con-app or PAP -- ..or con-like. Note [Conlike is interesting] instance Outputable ArgSummary where ppr TrivArg = text "TrivArg" ppr NonTrivArg = text "NonTrivArg" ppr ValueArg = text "ValueArg" nonTriv :: ArgSummary -> Bool nonTriv TrivArg = False nonTriv _ = True data CallCtxt = BoringCtxt | RhsCtxt RecFlag -- Rhs of a let-binding; see Note [RHS of lets] | DiscArgCtxt -- Argument of a function with non-zero arg discount | RuleArgCtxt -- We are somewhere in the argument of a function with rules | ValAppCtxt -- We're applied to at least one value arg -- This arises when we have ((f x |> co) y) -- Then the (f x) has argument 'x' but in a ValAppCtxt | CaseCtxt -- We're the scrutinee of a case -- that decomposes its scrutinee instance Outputable CallCtxt where ppr CaseCtxt = text "CaseCtxt" ppr ValAppCtxt = text "ValAppCtxt" ppr BoringCtxt = text "BoringCtxt" ppr (RhsCtxt ir)= text "RhsCtxt" <> parens (ppr ir) ppr DiscArgCtxt = text "DiscArgCtxt" ppr RuleArgCtxt = text "RuleArgCtxt" {- Note [Calculate unfolding guidance on the non-occ-anal'd expression] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Notice that we give the non-occur-analysed expression to calcUnfoldingGuidance. In some ways it'd be better to occur-analyse first; for example, sometimes during simplification, there's a large let-bound thing which has been substituted, and so is now dead; so 'expr' contains two copies of the thing while the occurrence-analysed expression doesn't. Nevertheless, we *don't* and *must not* occ-analyse before computing the size because a) The size computation bales out after a while, whereas occurrence analysis does not. b) Residency increases sharply if you occ-anal first. I'm not 100% sure why, but it's a large effect. Compiling Cabal went from residency of 534M to over 800M with this one change. This can occasionally mean that the guidance is very pessimistic; it gets fixed up next round. And it should be rare, because large let-bound things that are dead are usually caught by preInlineUnconditionally ************************************************************************ * * \subsection{The UnfoldingGuidance type} * * ************************************************************************ -} inlineBoringOk :: CoreExpr -> Bool -- See Note [INLINE for small functions] -- True => the result of inlining the expression is -- no bigger than the expression itself -- eg (\x y -> f y x) -- This is a quick and dirty version. It doesn't attempt -- to deal with (\x y z -> x (y z)) -- The really important one is (x `cast` c) inlineBoringOk e = go 0 e where go :: Int -> CoreExpr -> Bool go credit (Lam x e) | isId x = go (credit+1) e | otherwise = go credit e -- See Note [Count coercion arguments in boring contexts] go credit (App f (Type {})) = go credit f go credit (App f a) | credit > 0 , exprIsTrivial a = go (credit-1) f go credit (Tick _ e) = go credit e -- dubious go credit (Cast e _) = go credit e go credit (Case e b _ alts) | null alts = go credit e -- EmptyCase is like e | Just rhs <- isUnsafeEqualityCase e b alts = go credit rhs -- See Note [Inline unsafeCoerce] go _ (Var {}) = boringCxtOk go _ (Lit l) = litIsTrivial l && boringCxtOk go _ _ = boringCxtNotOk calcUnfoldingGuidance :: UnfoldingOpts -> Bool -- Definitely a top-level, bottoming binding -> Bool -- True <=> join point -> CoreExpr -- Expression to look at -> UnfoldingGuidance calcUnfoldingGuidance opts is_top_bottoming is_join (Tick t expr) | not (tickishIsCode t) -- non-code ticks don't matter for unfolding = calcUnfoldingGuidance opts is_top_bottoming is_join expr calcUnfoldingGuidance opts is_top_bottoming is_join expr = case sizeExpr opts bOMB_OUT_SIZE val_bndrs body of TooBig -> UnfNever SizeIs size cased_bndrs scrut_discount | uncondInline is_join expr bndrs n_val_bndrs body size -> UnfWhen { ug_unsat_ok = unSaturatedOk , ug_boring_ok = boringCxtOk , ug_arity = n_val_bndrs } -- Note [INLINE for small functions] | is_top_bottoming -> UnfNever -- See Note [Do not inline top-level bottoming functions] | otherwise -> UnfIfGoodArgs { ug_args = map (mk_discount cased_bndrs) val_bndrs , ug_size = size , ug_res = scrut_discount } where (bndrs, body) = collectBinders expr bOMB_OUT_SIZE = unfoldingCreationThreshold opts -- Bomb out if size gets bigger than this val_bndrs = filter isId bndrs n_val_bndrs = length val_bndrs mk_discount :: Bag (Id,Int) -> Id -> Int mk_discount cbs bndr = foldl' combine 0 cbs where combine acc (bndr', disc) | bndr == bndr' = acc `plus_disc` disc | otherwise = acc plus_disc :: Int -> Int -> Int plus_disc | isFunTy (idType bndr) = max | otherwise = (+) -- See Note [Function and non-function discounts] {- Note [Inline unsafeCoerce] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We really want to inline unsafeCoerce, even when applied to boring arguments. It doesn't look as if its RHS is smaller than the call unsafeCoerce x = case unsafeEqualityProof @a @b of UnsafeRefl -> x but that case is discarded in CoreToStg -- see Note [Implementing unsafeCoerce] in base:Unsafe.Coerce. Moreover, if we /don't/ inline it, we may be left with f (unsafeCoerce x) which will build a thunk -- bad, bad, bad. Conclusion: we really want inlineBoringOk to be True of the RHS of unsafeCoerce. And it really is, because we regard case unsafeEqualityProof @a @b of UnsafeRefl -> rhs as trivial iff rhs is. This is (U4) in Note [Implementing unsafeCoerce]. Note [Computing the size of an expression] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The basic idea of sizeExpr is obvious enough: count nodes. But getting the heuristics right has taken a long time. Here's the basic strategy: * Variables, literals: 0 (Exception for string literals, see litSize.) * Function applications (f e1 .. en): 1 + #value args * Constructor applications: 1, regardless of #args * Let(rec): 1 + size of components * Note, cast: 0 Examples Size Term -------------- 0 42# 0 x 0 True 2 f x 1 Just x 4 f (g x) Notice that 'x' counts 0, while (f x) counts 2. That's deliberate: there's a function call to account for. Notice also that constructor applications are very cheap, because exposing them to a caller is so valuable. [25/5/11] All sizes are now multiplied by 10, except for primops (which have sizes like 1 or 4. This makes primops look fantastically cheap, and seems to be almost universally beneficial. Done partly as a result of #4978. Note [Do not inline top-level bottoming functions] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The FloatOut pass has gone to some trouble to float out calls to 'error' and similar friends. See Note [Bottoming floats] in GHC.Core.Opt.SetLevels. Do not re-inline them! But we *do* still inline if they are very small (the uncondInline stuff). Note [INLINE for small functions] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider {-# INLINE f #-} f x = Just x g y = f y Then f's RHS is no larger than its LHS, so we should inline it into even the most boring context. In general, f the function is sufficiently small that its body is as small as the call itself, the inline unconditionally, regardless of how boring the context is. Things to note: (1) We inline *unconditionally* if inlined thing is smaller (using sizeExpr) than the thing it's replacing. Notice that (f x) --> (g 3) -- YES, unconditionally (f x) --> x : [] -- YES, *even though* there are two -- arguments to the cons x --> g 3 -- NO x --> Just v -- NO It's very important not to unconditionally replace a variable by a non-atomic term. (2) We do this even if the thing isn't saturated, else we end up with the silly situation that f x y = x ...map (f 3)... doesn't inline. Even in a boring context, inlining without being saturated will give a lambda instead of a PAP, and will be more efficient at runtime. (3) However, when the function's arity > 0, we do insist that it has at least one value argument at the call site. (This check is made in the UnfWhen case of callSiteInline.) Otherwise we find this: f = /\a \x:a. x d = /\b. MkD (f b) If we inline f here we get d = /\b. MkD (\x:b. x) and then prepareRhs floats out the argument, abstracting the type variables, so we end up with the original again! (4) We must be much more cautious about arity-zero things. Consider let x = y +# z in ... In *size* terms primops look very small, because the generate a single instruction, but we do not want to unconditionally replace every occurrence of x with (y +# z). So we only do the unconditional-inline thing for *trivial* expressions. NB: you might think that PostInlineUnconditionally would do this but it doesn't fire for top-level things; see GHC.Core.Opt.Simplify.Utils Note [Top level and postInlineUnconditionally] Note [Count coercion arguments in boring contexts] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In inlineBoringOK, we ignore type arguments when deciding whether an expression is okay to inline into boring contexts. This is good, since if we have a definition like let y = x @Int in f y y there’s no reason not to inline y at both use sites — no work is actually duplicated. It may seem like the same reasoning applies to coercion arguments, and indeed, in #17182 we changed inlineBoringOK to treat coercions the same way. However, this isn’t a good idea: unlike type arguments, which have no runtime representation, coercion arguments *do* have a runtime representation (albeit the zero-width VoidRep, see Note [Coercion tokens] in "GHC.CoreToStg"). This caused trouble in #17787 for DataCon wrappers for nullary GADT constructors: the wrappers would be inlined and each use of the constructor would lead to a separate allocation instead of just sharing the wrapper closure. The solution: don’t ignore coercion arguments after all. -} uncondInline :: Bool -> CoreExpr -> [Var] -> Arity -> CoreExpr -> Int -> Bool -- Inline unconditionally if there no size increase -- Size of call is arity (+1 for the function) -- See Note [INLINE for small functions] uncondInline is_join rhs bndrs arity body size | is_join = uncondInlineJoin bndrs body | arity > 0 = size <= 10 * (arity + 1) -- See Note [INLINE for small functions] (1) | otherwise = exprIsTrivial rhs -- See Note [INLINE for small functions] (4) uncondInlineJoin :: [Var] -> CoreExpr -> Bool -- See Note [Duplicating join points] point (DJ3) in GHC.Core.Opt.Simplify.Iteration uncondInlineJoin _bndrs body | exprIsTrivial body = True -- Nullary constructors, literals | (Var v, args) <- collectArgs body , all exprIsTrivial args , isJoinId v -- Indirection to another join point; always inline = True | otherwise = False sizeExpr :: UnfoldingOpts -> Int -- Bomb out if it gets bigger than this -> [Id] -- Arguments; we're interested in which of these -- get case'd -> CoreExpr -> ExprSize -- Note [Computing the size of an expression] -- Forcing bOMB_OUT_SIZE early prevents repeated -- unboxing of the Int argument. sizeExpr opts !bOMB_OUT_SIZE top_args expr = size_up expr where size_up (Cast e _) = size_up e size_up (Tick _ e) = size_up e size_up (Type _) = sizeZero -- Types cost nothing size_up (Coercion _) = sizeZero size_up (Lit lit) = sizeN (litSize lit) size_up (Var f) | isZeroBitId f = sizeZero -- Make sure we get constructor discounts even -- on nullary constructors | otherwise = size_up_call f [] 0 size_up (App fun arg) | isTyCoArg arg = size_up fun | otherwise = size_up arg `addSizeNSD` size_up_app fun [arg] (if isZeroBitExpr arg then 1 else 0) size_up (Lam b e) | isId b && not (isZeroBitId b) = lamScrutDiscount opts (size_up e `addSizeN` 10) | otherwise = size_up e size_up (Let (NonRec binder rhs) body) = size_up_rhs (binder, rhs) `addSizeNSD` size_up body `addSizeN` size_up_alloc binder size_up (Let (Rec pairs) body) = foldr (addSizeNSD . size_up_rhs) (size_up body `addSizeN` sum (map (size_up_alloc . fst) pairs)) pairs size_up (Case e _ _ alts) | null alts = size_up e -- case e of {} never returns, so take size of scrutinee size_up (Case e _ _ alts) -- Now alts is non-empty | Just v <- is_top_arg e -- We are scrutinising an argument variable = let alt_sizes = map size_up_alt alts -- alts_size tries to compute a good discount for -- the case when we are scrutinising an argument variable alts_size (SizeIs tot tot_disc tot_scrut) -- Size of all alternatives (SizeIs max _ _) -- Size of biggest alternative = SizeIs tot (unitBag (v, 20 + tot - max) `unionBags` tot_disc) tot_scrut -- If the variable is known, we produce a -- discount that will take us back to 'max', -- the size of the largest alternative The -- 1+ is a little discount for reduced -- allocation in the caller -- -- Notice though, that we return tot_disc, -- the total discount from all branches. I -- think that's right. alts_size tot_size _ = tot_size in alts_size (foldr1 addAltSize alt_sizes) -- alts is non-empty (foldr1 maxSize alt_sizes) -- Good to inline if an arg is scrutinised, because -- that may eliminate allocation in the caller -- And it eliminates the case itself where is_top_arg (Var v) | v `elem` top_args = Just v is_top_arg (Cast e _) = is_top_arg e is_top_arg _ = Nothing size_up (Case e _ _ alts) = size_up e `addSizeNSD` foldr (addAltSize . size_up_alt) case_size alts where case_size | is_inline_scrut e, lengthAtMost alts 1 = sizeN (-10) | otherwise = sizeZero -- Normally we don't charge for the case itself, but -- we charge one per alternative (see size_up_alt, -- below) to account for the cost of the info table -- and comparisons. -- -- However, in certain cases (see is_inline_scrut -- below), no code is generated for the case unless -- there are multiple alts. In these cases we -- subtract one, making the first alt free. -- e.g. case x# +# y# of _ -> ... should cost 1 -- case touch# x# of _ -> ... should cost 0 -- (see #4978) -- -- I would like to not have the "lengthAtMost alts 1" -- condition above, but without that some programs got worse -- (spectral/hartel/event and spectral/para). I don't fully -- understand why. (SDM 24/5/11) -- unboxed variables, inline primops and unsafe foreign calls -- are all "inline" things: is_inline_scrut (Var v) = isUnliftedType (idType v) -- isUnliftedType is OK here: scrutinees have a fixed RuntimeRep (search for FRRCase) is_inline_scrut scrut | (Var f, _) <- collectArgs scrut = case idDetails f of FCallId fc -> not (isSafeForeignCall fc) PrimOpId op _ -> not (primOpOutOfLine op) _other -> False | otherwise = False size_up_rhs (bndr, rhs) | JoinPoint join_arity <- idJoinPointHood bndr -- Skip arguments to join point , (_bndrs, body) <- collectNBinders join_arity rhs = size_up body | otherwise = size_up rhs ------------ -- size_up_app is used when there's ONE OR MORE value args size_up_app (App fun arg) args voids | isTyCoArg arg = size_up_app fun args voids | isZeroBitExpr arg = size_up_app fun (arg:args) (voids + 1) | otherwise = size_up arg `addSizeNSD` size_up_app fun (arg:args) voids size_up_app (Var fun) args voids = size_up_call fun args voids size_up_app (Tick _ expr) args voids = size_up_app expr args voids size_up_app (Cast expr _) args voids = size_up_app expr args voids size_up_app other args voids = size_up other `addSizeN` callSize (length args) voids -- if the lhs is not an App or a Var, or an invisible thing like a -- Tick or Cast, then we should charge for a complete call plus the -- size of the lhs itself. ------------ size_up_call :: Id -> [CoreExpr] -> Int -> ExprSize size_up_call fun val_args voids = case idDetails fun of FCallId _ -> sizeN (callSize (length val_args) voids) DataConWorkId dc -> conSize dc (length val_args) PrimOpId op _ -> primOpSize op (length val_args) ClassOpId {} -> classOpSize opts top_args val_args _ -> funSize opts top_args fun (length val_args) voids ------------ size_up_alt (Alt _con _bndrs rhs) = size_up rhs `addSizeN` 10 -- Don't charge for args, so that wrappers look cheap -- (See comments about wrappers with Case) -- -- IMPORTANT: *do* charge 1 for the alternative, else we -- find that giant case nests are treated as practically free -- A good example is Foreign.C.Error.errnoToIOError ------------ -- Cost to allocate binding with given binder size_up_alloc bndr | isTyVar bndr -- Doesn't exist at runtime || isJoinId bndr -- Not allocated at all || not (isBoxedType (idType bndr)) -- Doesn't live in heap = 0 | otherwise = 10 ------------ -- These addSize things have to be here because -- I don't want to give them bOMB_OUT_SIZE as an argument addSizeN TooBig _ = TooBig addSizeN (SizeIs n xs d) m = mkSizeIs bOMB_OUT_SIZE (n + m) xs d -- addAltSize is used to add the sizes of case alternatives addAltSize TooBig _ = TooBig addAltSize _ TooBig = TooBig addAltSize (SizeIs n1 xs d1) (SizeIs n2 ys d2) = mkSizeIs bOMB_OUT_SIZE (n1 + n2) (xs `unionBags` ys) (d1 + d2) -- Note [addAltSize result discounts] -- This variant ignores the result discount from its LEFT argument -- It's used when the second argument isn't part of the result addSizeNSD TooBig _ = TooBig addSizeNSD _ TooBig = TooBig addSizeNSD (SizeIs n1 xs _) (SizeIs n2 ys d2) = mkSizeIs bOMB_OUT_SIZE (n1 + n2) (xs `unionBags` ys) d2 -- Ignore d1 -- don't count expressions such as State# RealWorld -- exclude join points, because they can be rep-polymorphic -- and typePrimRep will crash isZeroBitId id = not (isJoinId id) && isZeroBitTy (idType id) isZeroBitExpr (Var id) = isZeroBitId id isZeroBitExpr (Tick _ e) = isZeroBitExpr e isZeroBitExpr _ = False -- | Finds a nominal size of a string literal. litSize :: Literal -> Int -- Used by GHC.Core.Unfold.sizeExpr litSize (LitNumber LitNumBigNat _) = 100 litSize (LitString str) = 10 + 10 * ((BS.length str + 3) `div` 4) -- If size could be 0 then @f "x"@ might be too small -- [Sept03: make literal strings a bit bigger to avoid fruitless -- duplication of little strings] litSize _other = 0 -- Must match size of nullary constructors -- Key point: if x |-> 4, then x must inline unconditionally -- (eg via case binding) classOpSize :: UnfoldingOpts -> [Id] -> [CoreExpr] -> ExprSize -- See Note [Conlike is interesting] classOpSize _ _ [] = sizeZero classOpSize opts top_args (arg1 : other_args) = SizeIs size arg_discount 0 where size = 20 + (10 * length other_args) -- If the class op is scrutinising a lambda bound dictionary then -- give it a discount, to encourage the inlining of this function -- The actual discount is rather arbitrarily chosen arg_discount = case arg1 of Var dict | dict `elem` top_args -> unitBag (dict, unfoldingDictDiscount opts) _other -> emptyBag -- | The size of a function call callSize :: Int -- ^ number of value args -> Int -- ^ number of value args that are void -> Int callSize n_val_args voids = 10 * (1 + n_val_args - voids) -- The 1+ is for the function itself -- Add 1 for each non-trivial arg; -- the allocation cost, as in let(rec) -- | The size of a jump to a join point jumpSize :: Int -- ^ number of value args -> Int -- ^ number of value args that are void -> Int jumpSize _n_val_args _voids = 0 -- Jumps are small, and we don't want penalise them -- Old version: -- 2 * (1 + n_val_args - voids) -- A jump is 20% the size of a function call. Making jumps free reopens -- bug #6048, but making them any more expensive loses a 21% improvement in -- spectral/puzzle. TODO Perhaps adjusting the default threshold would be a -- better solution? funSize :: UnfoldingOpts -> [Id] -> Id -> Int -> Int -> ExprSize -- Size for functions that are not constructors or primops -- Note [Function applications] funSize opts top_args fun n_val_args voids | fun `hasKey` buildIdKey = buildSize | fun `hasKey` augmentIdKey = augmentSize | otherwise = SizeIs size arg_discount res_discount where some_val_args = n_val_args > 0 is_join = isJoinId fun size | is_join = jumpSize n_val_args voids | not some_val_args = 0 | otherwise = callSize n_val_args voids -- DISCOUNTS -- See Note [Function and non-function discounts] arg_discount | some_val_args && fun `elem` top_args = unitBag (fun, unfoldingFunAppDiscount opts) | otherwise = emptyBag -- If the function is an argument and is applied -- to some values, give it an arg-discount res_discount | idArity fun > n_val_args = unfoldingFunAppDiscount opts | otherwise = 0 -- If the function is partially applied, show a result discount -- XXX maybe behave like ConSize for eval'd variable conSize :: DataCon -> Int -> ExprSize conSize dc n_val_args | n_val_args == 0 = SizeIs 0 emptyBag 10 -- Like variables -- See Note [Unboxed tuple size and result discount] | isUnboxedTupleDataCon dc = SizeIs 0 emptyBag 10 -- See Note [Constructor size and result discount] | otherwise = SizeIs 10 emptyBag 10 {- Note [Constructor size and result discount] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Treat a constructors application as size 10, regardless of how many arguments it has; we are keen to expose them (and we charge separately for their args). We can't treat them as size zero, else we find that (Just x) has size 0, which is the same as a lone variable; and hence 'v' will always be replaced by (Just x), where v is bound to Just x. The "result discount" is applied if the result of the call is scrutinised (say by a case). For a constructor application that will mean the constructor application will disappear, so we don't need to charge it to the function. So the discount should at least match the cost of the constructor application, namely 10. Historical note 1: Until Jun 2020 we gave it a "bit of extra incentive" via a discount of 10*(1 + n_val_args), but that was FAR too much (#18282). In particular, consider a huge case tree like let r = case y1 of Nothing -> B1 a b c Just v1 -> case y2 of Nothing -> B1 c b a Just v2 -> ... If conSize gives a cost of 10 (regardless of n_val_args) and a discount of 10, that'll make each alternative RHS cost zero. We charge 10 for each case alternative (see size_up_alt). If we give a bigger discount (say 20) in conSize, we'll make the case expression cost *nothing*, and that can make a huge case tree cost nothing. This leads to massive, sometimes exponential inlinings (#18282). In short, don't give a discount that give a negative size to a sub-expression! Historical note 2: Much longer ago, Simon M tried a MUCH bigger discount: (10 * (10 + n_val_args)), and said it was an "unambiguous win", but its terribly dangerous because a function with many many case branches, each finishing with a constructor, can have an arbitrarily large discount. This led to terrible code bloat: see #6099. Note [Unboxed tuple size and result discount] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ However, unboxed tuples count as size zero. I found occasions where we had f x y z = case op# x y z of { s -> (# s, () #) } and f wasn't getting inlined. I tried giving unboxed tuples a *result discount* of zero (see the commented-out line). Why? When returned as a result they do not allocate, so maybe we don't want to charge so much for them. If you have a non-zero discount here, we find that workers often get inlined back into wrappers, because it look like f x = case $wf x of (# a,b #) -> (a,b) and we are keener because of the case. However while this change shrank binary sizes by 0.5% it also made spectral/boyer allocate 5% more. All other changes were very small. So it's not a big deal but I didn't adopt the idea. When fixing #18282 (see Note [Constructor size and result discount]) I changed the result discount to be just 10, not 10*(1+n_val_args). Note [Function and non-function discounts] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We want a discount if the function is applied. A good example is monadic combinators with continuation arguments, where inlining is quite important. But we don't want a big discount when a function is called many times (see the detailed comments with #6048) because if the function is big it won't be inlined at its many call sites and no benefit results. Indeed, we can get exponentially big inlinings this way; that is what #6048 is about. On the other hand, for data-valued arguments, if there are lots of case expressions in the body, each one will get smaller if we apply the function to a constructor application, so we *want* a big discount if the argument is scrutinised by many case expressions. Conclusion: - For functions, take the max of the discounts - For data values, take the sum of the discounts Note [Literal integer size] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Literal integers *can* be big (mkInteger [...coefficients...]), but need not be (IS n). We just use an arbitrary big-ish constant here so that, in particular, we don't inline top-level defns like n = IS 5 There's no point in doing so -- any optimisations will see the IS through n's unfolding. Nor will a big size inhibit unfoldings functions that mention a literal Integer, because the float-out pass will float all those constants to top level. -} primOpSize :: PrimOp -> Int -> ExprSize primOpSize op n_val_args = if primOpOutOfLine op then sizeN (op_size + n_val_args) else sizeN op_size where op_size = primOpCodeSize op buildSize :: ExprSize buildSize = SizeIs 0 emptyBag 40 -- We really want to inline applications of build -- build t (\cn -> e) should cost only the cost of e (because build will be inlined later) -- Indeed, we should add a result_discount because build is -- very like a constructor. We don't bother to check that the -- build is saturated (it usually is). The "-2" discounts for the \c n, -- The "4" is rather arbitrary. augmentSize :: ExprSize augmentSize = SizeIs 0 emptyBag 40 -- Ditto (augment t (\cn -> e) ys) should cost only the cost of -- e plus ys. The -2 accounts for the \cn -- When we return a lambda, give a discount if it's used (applied) lamScrutDiscount :: UnfoldingOpts -> ExprSize -> ExprSize lamScrutDiscount opts (SizeIs n vs _) = SizeIs n vs (unfoldingFunAppDiscount opts) lamScrutDiscount _ TooBig = TooBig {- Note [addAltSize result discounts] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When adding the size of alternatives, we *add* the result discounts too, rather than take the *maximum*. For a multi-branch case, this gives a discount for each branch that returns a constructor, making us keener to inline. I did try using 'max' instead, but it makes nofib 'rewrite' and 'puzzle' allocate significantly more, and didn't make binary sizes shrink significantly either. Note [Discounts and thresholds] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Constants for discounts and thresholds are defined in 'UnfoldingOpts'. They are: unfoldingCreationThreshold At a definition site, if the unfolding is bigger than this, we may discard it altogether unfoldingUseThreshold At a call site, if the unfolding, less discounts, is smaller than this, then it's small enough inline unfoldingDictDiscount The discount for each occurrence of a dictionary argument as an argument of a class method. Should be pretty small else big functions may get inlined unfoldingFunAppDiscount Discount for a function argument that is applied. Quite large, because if we inline we avoid the higher-order call. unfoldingVeryAggressive If True, the compiler ignores all the thresholds and inlines very aggressively. It still adheres to arity, simplifier phase control and loop breakers. Historical Note: Before April 2020 we had another factor, ufKeenessFactor, which would scale the discounts before they were subtracted from the size. This was justified with the following comment: -- We multiply the raw discounts (args_discount and result_discount) -- ty opt_UnfoldingKeenessFactor because the former have to do with -- *size* whereas the discounts imply that there's some extra -- *efficiency* to be gained (e.g. beta reductions, case reductions) -- by inlining. However, this is highly suspect since it means that we subtract a *scaled* size from an absolute size, resulting in crazy (e.g. negative) scores in some cases (#15304). We consequently killed off ufKeenessFactor and bumped up the ufUseThreshold to compensate. Note [Function applications] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In a function application (f a b) - If 'f' is an argument to the function being analysed, and there's at least one value arg, record a FunAppDiscount for f - If the application if a PAP (arity > 2 in this example) record a *result* discount (because inlining with "extra" args in the call may mean that we now get a saturated application) Code for manipulating sizes -} -- | The size of a candidate expression for unfolding data ExprSize = TooBig | SizeIs { _es_size_is :: {-# UNPACK #-} !Int -- ^ Size found , _es_args :: !(Bag (Id,Int)) -- ^ Arguments cased herein, and discount for each such , _es_discount :: {-# UNPACK #-} !Int -- ^ Size to subtract if result is scrutinised by a case -- expression } instance Outputable ExprSize where ppr TooBig = text "TooBig" ppr (SizeIs a _ c) = brackets (int a <+> int c) -- subtract the discount before deciding whether to bale out. eg. we -- want to inline a large constructor application into a selector: -- tup = (a_1, ..., a_99) -- x = case tup of ... -- mkSizeIs :: Int -> Int -> Bag (Id, Int) -> Int -> ExprSize mkSizeIs max n xs d | (n - d) > max = TooBig | otherwise = SizeIs n xs d maxSize :: ExprSize -> ExprSize -> ExprSize maxSize TooBig _ = TooBig maxSize _ TooBig = TooBig maxSize s1@(SizeIs n1 _ _) s2@(SizeIs n2 _ _) | n1 > n2 = s1 | otherwise = s2 sizeZero :: ExprSize sizeN :: Int -> ExprSize sizeZero = SizeIs 0 emptyBag 0 sizeN n = SizeIs n emptyBag 0 ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/Unfold.hs-boot0000644000000000000000000000106507346545000021324 0ustar0000000000000000module GHC.Core.Unfold where import GHC.Prelude data UnfoldingOpts defaultUnfoldingOpts :: UnfoldingOpts updateCreationThreshold :: Int -> UnfoldingOpts -> UnfoldingOpts updateUseThreshold :: Int -> UnfoldingOpts -> UnfoldingOpts updateFunAppDiscount :: Int -> UnfoldingOpts -> UnfoldingOpts updateDictDiscount :: Int -> UnfoldingOpts -> UnfoldingOpts updateVeryAggressive :: Bool -> UnfoldingOpts -> UnfoldingOpts updateCaseThreshold :: Int -> UnfoldingOpts -> UnfoldingOpts updateCaseScaling :: Int -> UnfoldingOpts -> UnfoldingOpts ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/Unfold/0000755000000000000000000000000007346545000020025 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/Unfold/Make.hs0000644000000000000000000005053107346545000021242 0ustar0000000000000000{-# OPTIONS_GHC -Wno-incomplete-record-updates #-} -- | Unfolding creation module GHC.Core.Unfold.Make ( noUnfolding , mkUnfolding , mkCoreUnfolding , mkFinalUnfolding , mkFinalUnfolding' , mkSimpleUnfolding , mkWorkerUnfolding , mkInlineUnfoldingWithArity, mkInlineUnfoldingNoArity , mkInlinableUnfolding , mkWrapperUnfolding , mkCompulsoryUnfolding, mkCompulsoryUnfolding' , mkDFunUnfolding , mkDataConUnfolding , specUnfolding , certainlyWillInline ) where import GHC.Prelude import GHC.Core import GHC.Core.Unfold import GHC.Core.Opt.OccurAnal ( occurAnalyseExpr ) import GHC.Core.Opt.Arity ( manifestArity ) import GHC.Core.DataCon import GHC.Core.Utils import GHC.Types.Basic import GHC.Types.Id import GHC.Types.Id.Info import GHC.Types.Demand ( DmdSig, isDeadEndSig ) import GHC.Utils.Outputable import GHC.Utils.Misc import GHC.Utils.Panic import Data.Maybe ( fromMaybe ) -- the very simple optimiser is used to optimise unfoldings import {-# SOURCE #-} GHC.Core.SimpleOpt mkFinalUnfolding :: UnfoldingOpts -> UnfoldingSource -> DmdSig -> CoreExpr -> Unfolding -- "Final" in the sense that this is a GlobalId that will not be further -- simplified; so the unfolding should be occurrence-analysed mkFinalUnfolding opts src strict_sig expr = mkFinalUnfolding' opts src strict_sig expr Nothing -- See Note [Tying the 'CoreUnfolding' knot] for why interfaces need -- to pass a precomputed 'UnfoldingCache' mkFinalUnfolding' :: UnfoldingOpts -> UnfoldingSource -> DmdSig -> CoreExpr -> Maybe UnfoldingCache -> Unfolding -- "Final" in the sense that this is a GlobalId that will not be further -- simplified; so the unfolding should be occurrence-analysed mkFinalUnfolding' opts src strict_sig expr = mkUnfolding opts src True {- Top level -} (isDeadEndSig strict_sig) False {- Not a join point -} expr -- | Same as 'mkCompulsoryUnfolding' but simplifies the unfolding first mkCompulsoryUnfolding' :: SimpleOpts -> CoreExpr -> Unfolding mkCompulsoryUnfolding' opts expr = mkCompulsoryUnfolding (simpleOptExpr opts expr) -- | Used for things that absolutely must be unfolded mkCompulsoryUnfolding :: CoreExpr -> Unfolding mkCompulsoryUnfolding expr = mkCoreUnfolding CompulsorySrc True expr Nothing (UnfWhen { ug_arity = 0 -- Arity of unfolding doesn't matter , ug_unsat_ok = unSaturatedOk, ug_boring_ok = boringCxtOk }) -- Note [Top-level flag on inline rules] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- Slight hack: note that mk_inline_rules conservatively sets the -- top-level flag to True. It gets set more accurately by the simplifier -- Simplify.simplUnfolding. mkSimpleUnfolding :: UnfoldingOpts -> CoreExpr -> Unfolding mkSimpleUnfolding !opts rhs = mkUnfolding opts VanillaSrc False False False rhs Nothing mkDFunUnfolding :: [Var] -> DataCon -> [CoreExpr] -> Unfolding mkDFunUnfolding bndrs con ops = DFunUnfolding { df_bndrs = bndrs , df_con = con , df_args = map occurAnalyseExpr ops } -- See Note [OccInfo in unfoldings and rules] in GHC.Core mkDataConUnfolding :: CoreExpr -> Unfolding -- Used for non-newtype data constructors with non-trivial wrappers mkDataConUnfolding expr = mkCoreUnfolding StableSystemSrc True expr Nothing guide -- No need to simplify the expression where guide = UnfWhen { ug_arity = manifestArity expr , ug_unsat_ok = unSaturatedOk , ug_boring_ok = False } mkWrapperUnfolding :: SimpleOpts -> CoreExpr -> Arity -> Unfolding -- Make the unfolding for the wrapper in a worker/wrapper split -- after demand/CPR analysis mkWrapperUnfolding opts expr arity = mkCoreUnfolding StableSystemSrc True (simpleOptExpr opts expr) Nothing (UnfWhen { ug_arity = arity , ug_unsat_ok = unSaturatedOk , ug_boring_ok = boringCxtNotOk }) mkWorkerUnfolding :: SimpleOpts -> (CoreExpr -> CoreExpr) -> Unfolding -> Unfolding -- See Note [Worker/wrapper for INLINABLE functions] in GHC.Core.Opt.WorkWrap mkWorkerUnfolding opts work_fn (CoreUnfolding { uf_src = src, uf_tmpl = tmpl , uf_is_top = top_lvl }) | isStableSource src = mkCoreUnfolding src top_lvl new_tmpl Nothing guidance where new_tmpl = simpleOptExpr opts (work_fn tmpl) guidance = calcUnfoldingGuidance (so_uf_opts opts) False False new_tmpl mkWorkerUnfolding _ _ _ = noUnfolding -- | Make an INLINE unfolding that may be used unsaturated -- (ug_unsat_ok = unSaturatedOk) and that is reported as having its -- manifest arity (the number of outer lambdas applications will -- resolve before doing any work). mkInlineUnfoldingNoArity :: SimpleOpts -> UnfoldingSource -> CoreExpr -> Unfolding mkInlineUnfoldingNoArity opts src expr = mkCoreUnfolding src True -- Note [Top-level flag on inline rules] expr' Nothing guide where expr' = simpleOptExpr opts expr guide = UnfWhen { ug_arity = manifestArity expr' , ug_unsat_ok = unSaturatedOk , ug_boring_ok = boring_ok } boring_ok = inlineBoringOk expr' -- | Make an INLINE unfolding that will be used once the RHS has been saturated -- to the given arity. mkInlineUnfoldingWithArity :: SimpleOpts -> UnfoldingSource -> Arity -> CoreExpr -> Unfolding mkInlineUnfoldingWithArity opts src arity expr = mkCoreUnfolding src True -- Note [Top-level flag on inline rules] expr' Nothing guide where expr' = simpleOptExpr opts expr guide = UnfWhen { ug_arity = arity , ug_unsat_ok = needSaturated , ug_boring_ok = boring_ok } -- See Note [INLINE pragmas and boring contexts] as to why we need to look -- at the arity here. boring_ok | arity == 0 = True | otherwise = inlineBoringOk expr' mkInlinableUnfolding :: SimpleOpts -> UnfoldingSource -> CoreExpr -> Unfolding mkInlinableUnfolding opts src expr = mkUnfolding (so_uf_opts opts) src False False False expr' Nothing where expr' = simpleOptExpr opts expr specUnfolding :: SimpleOpts -> [Var] -> (CoreExpr -> CoreExpr) -> [CoreArg] -- LHS arguments in the RULE -> Unfolding -> Unfolding -- See Note [Specialising unfoldings] -- specUnfolding spec_bndrs spec_args unf -- = \spec_bndrs. unf spec_args -- specUnfolding opts spec_bndrs spec_app rule_lhs_args df@(DFunUnfolding { df_bndrs = old_bndrs, df_con = con, df_args = args }) = assertPpr (rule_lhs_args `equalLength` old_bndrs) (ppr df $$ ppr rule_lhs_args) $ -- For this ASSERT see Note [Specialising DFuns] in GHC.Core.Opt.Specialise mkDFunUnfolding spec_bndrs con (map spec_arg args) -- For DFunUnfoldings we transform -- \obs. MkD ... -- to -- \sbs. MkD ((\obs. ) spec_args) ... ditto where spec_arg arg = simpleOptExpr opts $ spec_app (mkLams old_bndrs arg) -- The beta-redexes created by spec_app will be -- simplified away by simplOptExpr specUnfolding opts spec_bndrs spec_app rule_lhs_args (CoreUnfolding { uf_src = src, uf_tmpl = tmpl , uf_is_top = top_lvl , uf_guidance = old_guidance }) | isStableSource src -- See Note [Specialising unfoldings] , UnfWhen { ug_arity = old_arity } <- old_guidance = mkCoreUnfolding src top_lvl new_tmpl Nothing (old_guidance { ug_arity = old_arity - arity_decrease }) where new_tmpl = simpleOptExpr opts $ mkLams spec_bndrs $ spec_app tmpl -- The beta-redexes created by spec_app -- will be simplified away by simplOptExpr arity_decrease = count isValArg rule_lhs_args - count isId spec_bndrs specUnfolding _ _ _ _ _ = noUnfolding {- Note [Specialising unfoldings] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When we specialise a function for some given type-class arguments, we use specUnfolding to specialise its unfolding. Some important points: * If the original function has a DFunUnfolding, the specialised one must do so too! Otherwise we lose the magic rules that make it interact with ClassOps * For a /stable/ CoreUnfolding, we specialise the unfolding, no matter how big, iff it has UnfWhen guidance. This happens for INLINE functions, and for wrappers. For these, it would be very odd if a function marked INLINE was specialised (because of some local use), and then forever after (including importing modules) the specialised version wasn't INLINEd! After all, the programmer said INLINE. * However, for a stable CoreUnfolding with guidance UnfoldIfGoodArgs, which arises from INLINABLE functions, we drop the unfolding. See #4874 for persuasive examples. Suppose we have {-# INLINABLE f #-} f :: Ord a => [a] -> Int f xs = letrec f' = ...f'... in f' Then, when f is specialised and optimised we might get wgo :: [Int] -> Int# wgo = ...wgo... f_spec :: [Int] -> Int f_spec xs = case wgo xs of { r -> I# r } and we clearly want to inline f_spec at call sites. But if we still have the big, un-optimised of f (albeit specialised) captured in the stable unfolding for f_spec, we won't get that optimisation. This happens with Control.Monad.liftM3, and can cause a lot more allocation as a result (nofib n-body shows this). Moreover, keeping the stable unfolding isn't much help, because the specialised function (probably) isn't overloaded any more. TL;DR: we simply drop the stable unfolding when specialising. It's not really a complete solution; ignoring specialisation for now, INLINABLE functions don't get properly strictness analysed, for example. Moreover, it means that the specialised function has an INLINEABLE pragma, but no stable unfolding. But it works well for examples involving specialisation, which is the dominant use of INLINABLE. Note [Honour INLINE on 0-ary bindings] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider x = {-# INLINE x #-} f y = ...x... The semantics of an INLINE pragma is inline x at every call site, provided it is saturated; that is, applied to at least as many arguments as appear on the LHS of the Haskell source definition. (This source-code-derived arity is stored in the `ug_arity` field of the `UnfoldingGuidance`.) In the example, x's ug_arity is 0, so we should inline it at every use site. It's rare to have such an INLINE pragma (usually INLINE is on functions), but it's occasionally very important (#15578, #15519). In #15519 we had something like x = case (g a b) of I# r -> T r {-# INLINE x #-} f y = ...(h x).... where h is strict. So we got f y = ...(case g a b of I# r -> h (T r))... and that in turn allowed SpecConstr to ramp up performance. How do we deliver on this? By adjusting the ug_boring_ok flag in mkInlineUnfoldingWithArity; see Note [INLINE pragmas and boring contexts] NB: there is a real risk that full laziness will float it right back out again. Consider again x = factorial 200 {-# INLINE x #-} f y = ...x... After inlining we get f y = ...(factorial 200)... but it's entirely possible that full laziness will do lvl23 = factorial 200 f y = ...lvl23... That's a problem for another day. Note [INLINE pragmas and boring contexts] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ An INLINE pragma uses mkInlineUnfoldingWithArity to build the unfolding. That sets the ug_boring_ok flag to False if the function is not tiny (inlineBoringOK), so that even INLINE functions are not inlined in an utterly boring context. E.g. \x y. Just (f y x) Nothing is gained by inlining f here, even if it has an INLINE pragma. But for 0-ary bindings, we want to inline regardless; see Note [Honour INLINE on 0-ary bindings]. I'm a bit worried that it's possible for the same kind of problem to arise for non-0-ary functions too, but let's wait and see. -} mkUnfolding :: UnfoldingOpts -> UnfoldingSource -> Bool -- Is top-level -> Bool -- Definitely a bottoming binding -- (only relevant for top-level bindings) -> Bool -- True <=> join point -> CoreExpr -> Maybe UnfoldingCache -> Unfolding -- Calculates unfolding guidance -- Occurrence-analyses the expression before capturing it mkUnfolding opts src top_lvl is_bottoming is_join expr cache = mkCoreUnfolding src top_lvl expr cache guidance where is_top_bottoming = top_lvl && is_bottoming guidance = calcUnfoldingGuidance opts is_top_bottoming is_join expr -- NB: *not* (calcUnfoldingGuidance (occurAnalyseExpr expr))! -- See Note [Calculate unfolding guidance on the non-occ-anal'd expression] mkCoreUnfolding :: UnfoldingSource -> Bool -> CoreExpr -> Maybe UnfoldingCache -> UnfoldingGuidance -> Unfolding -- Occurrence-analyses the expression before capturing it mkCoreUnfolding src top_lvl expr precomputed_cache guidance = CoreUnfolding { uf_tmpl = cache `seq` occurAnalyseExpr expr -- occAnalyseExpr: see Note [OccInfo in unfoldings and rules] in GHC.Core -- See #20905 for what a discussion of this 'seq'. -- We are careful to make sure we only -- have one copy of an unfolding around at once. -- Note [Thoughtful forcing in mkCoreUnfolding] , uf_src = src , uf_is_top = top_lvl , uf_cache = cache , uf_guidance = guidance } where is_value = exprIsHNF expr is_conlike = exprIsConLike expr is_work_free = exprIsWorkFree expr is_expandable = exprIsExpandable expr recomputed_cache = UnfoldingCache { uf_is_value = is_value , uf_is_conlike = is_conlike , uf_is_work_free = is_work_free , uf_expandable = is_expandable } cache = fromMaybe recomputed_cache precomputed_cache ---------------- certainlyWillInline :: UnfoldingOpts -> IdInfo -> CoreExpr -> Maybe Unfolding -- ^ Sees if the unfolding is pretty certain to inline. -- If so, return a *stable* unfolding for it, that will always inline. -- The CoreExpr is the WW'd and simplified RHS. In contrast, the unfolding -- template might not have been WW'd yet. certainlyWillInline opts fn_info rhs' = case fn_unf of CoreUnfolding { uf_guidance = guidance, uf_src = src } | noinline -> Nothing -- See Note [Worker/wrapper for NOINLINE functions] | otherwise -> case guidance of UnfNever -> Nothing UnfWhen {} -> Just (fn_unf { uf_src = src', uf_tmpl = tmpl' }) -- INLINE functions have UnfWhen UnfIfGoodArgs { ug_size = size, ug_args = args } -> do_cunf size args src' tmpl' where src' | isCompulsorySource src = src -- Do not change InlineCompulsory! | otherwise = StableSystemSrc tmpl' | isStableSource src = uf_tmpl fn_unf | otherwise = occurAnalyseExpr rhs' -- Do not overwrite stable unfoldings! DFunUnfolding {} -> Just fn_unf -- Don't w/w DFuns; it never makes sense -- to do so, and even if it is currently a -- loop breaker, it may not be later _other_unf -> Nothing where noinline = isNoInlinePragma (inlinePragInfo fn_info) fn_unf = unfoldingInfo fn_info -- NB: loop-breakers never inline -- The UnfIfGoodArgs case seems important. If we w/w small functions -- binary sizes go up by 10%! (This is with SplitObjs.) -- I'm not totally sure why. -- INLINABLE functions come via this path -- See Note [certainlyWillInline: INLINABLE] do_cunf size args src' tmpl' | arityInfo fn_info > 0 -- See Note [certainlyWillInline: be careful of thunks] , not (isDeadEndSig (dmdSigInfo fn_info)) -- Do not unconditionally inline a bottoming functions even if -- it seems smallish. We've carefully lifted it out to top level, -- so we don't want to re-inline it. , let unf_arity = length args , size - (10 * (unf_arity + 1)) <= unfoldingUseThreshold opts = Just (fn_unf { uf_src = src' , uf_tmpl = tmpl' , uf_guidance = UnfWhen { ug_arity = unf_arity , ug_unsat_ok = unSaturatedOk , ug_boring_ok = inlineBoringOk tmpl' } }) -- Note the "unsaturatedOk". A function like f = \ab. a -- will certainly inline, even if partially applied (f e), so we'd -- better make sure that the transformed inlining has the same property | otherwise = Nothing {- Note [certainlyWillInline: be careful of thunks] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Don't claim that thunks will certainly inline, because that risks work duplication. Even if the work duplication is not great (eg is_cheap holds), it can make a big difference in an inner loop In #5623 we found that the WorkWrap phase thought that y = case x of F# v -> F# (v +# v) was certainlyWillInline, so the addition got duplicated. Note that we check arityInfo instead of the arity of the unfolding to detect this case. This is so that we don't accidentally fail to inline small partial applications, like `f = g 42` (where `g` recurses into `f`) where g has arity 2 (say). Here there is no risk of work duplication, and the RHS is tiny, so certainlyWillInline should return True. But `unf_arity` is zero! However f's arity, gotten from `arityInfo fn_info`, is 1. Failing to say that `f` will inline forces W/W to generate a potentially huge worker for f that will immediately cancel with `g`'s wrapper anyway, causing unnecessary churn in the Simplifier while arriving at the same result. Note [certainlyWillInline: INLINABLE] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ certainlyWillInline /must/ return Nothing for a large INLINABLE thing, even though we have a stable inlining, so that strictness w/w takes place. It makes a big difference to efficiency, and the w/w pass knows how to transfer the INLINABLE info to the worker; see WorkWrap Note [Worker/wrapper for INLINABLE functions] Note [Thoughtful forcing in mkCoreUnfolding] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Core expressions retained in unfoldings is one of biggest uses of memory when compiling a program. Therefore we have to be careful about retaining copies of old or redundant templates (see !6202 for a particularly bad case). With that in mind we want to maintain the invariant that each unfolding only references a single CoreExpr. One place where we have to be careful is in mkCoreUnfolding. * The template of the unfolding is the result of performing occurrence analysis (Note [OccInfo in unfoldings and rules] in GHC.Core) * Predicates are applied to the unanalysed expression Therefore if we are not thoughtful about forcing you can end up in a situation where the template is forced but not all the predicates are forced so the unfolding will retain both the old and analysed expressions. I investigated this using ghc-debug and it was clear this situation did often arise: ``` (["ghc:GHC.Core:Lam","ghc-prim:GHC.Types:True","THUNK_1_0","THUNK_1_0","THUNK_1_0"],Count 4307) ``` Here the predicates are unforced but the template is forced. Therefore we basically had two options in order to fix this: 1. Perform the predicates on the analysed expression. 2. Force the predicates to remove retainer to the old expression if we force the template. Option 1 is bad because occurrence analysis is expensive and destroys any sharing of the unfolding with the actual program. (Testing this approach showed peak 25G memory usage) Therefore we got for Option 2 which performs a little more work but compensates by reducing memory pressure. The result of fixing this led to a 1G reduction in peak memory usage (12G -> 11G) when compiling a very large module (peak 3 million terms). For more discussion see #20905. -} ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/Unify.hs0000644000000000000000000026177007346545000020241 0ustar0000000000000000-- (c) The University of Glasgow 2006 {-# LANGUAGE ScopedTypeVariables, PatternSynonyms, MultiWayIf #-} {-# LANGUAGE DeriveFunctor #-} module GHC.Core.Unify ( tcMatchTy, tcMatchTyKi, tcMatchTys, tcMatchTyKis, tcMatchTyX, tcMatchTysX, tcMatchTyKisX, tcMatchTyX_BM, ruleMatchTyKiX, -- Side-effect free unification tcUnifyTy, tcUnifyTyKi, tcUnifyTys, tcUnifyTyKis, tcUnifyTysFG, tcUnifyTyWithTFs, BindFun, BindFlag(..), matchBindFun, alwaysBindFun, UnifyResult, UnifyResultM(..), MaybeApartReason(..), typesCantMatch, typesAreApart, -- Matching a type against a lifted type (coercion) liftCoMatch, -- The core flattening algorithm flattenTys, flattenTysX, ) where import GHC.Prelude import GHC.Types.Var import GHC.Types.Var.Env import GHC.Types.Var.Set import GHC.Types.Name( Name, mkSysTvName, mkSystemVarName ) import GHC.Builtin.Names( tYPETyConKey, cONSTRAINTTyConKey ) import GHC.Core.Type hiding ( getTvSubstEnv ) import GHC.Core.Coercion hiding ( getCvSubstEnv ) import GHC.Core.TyCon import GHC.Core.TyCo.Rep import GHC.Core.TyCo.Compare ( eqType, tcEqType ) import GHC.Core.TyCo.FVs ( tyCoVarsOfCoList, tyCoFVsOfTypes ) import GHC.Core.TyCo.Subst ( mkTvSubst, emptyIdSubstEnv ) import GHC.Core.Map.Type import GHC.Utils.FV( FV, fvVarList ) import GHC.Utils.Misc import GHC.Data.Pair import GHC.Utils.Outputable import GHC.Types.Unique import GHC.Types.Unique.FM import GHC.Types.Unique.Set import GHC.Exts( oneShot ) import GHC.Utils.Panic import GHC.Data.FastString import Data.List ( mapAccumL ) import Control.Monad import qualified Data.Semigroup as S import GHC.Builtin.Types.Prim (fUNTyCon) import GHC.Core.Multiplicity {- Unification is much tricker than you might think. 1. The substitution we generate binds the *template type variables* which are given to us explicitly. 2. We want to match in the presence of foralls; e.g (forall a. t1) ~ (forall b. t2) That is what the RnEnv2 is for; it does the alpha-renaming that makes it as if a and b were the same variable. Initialising the RnEnv2, so that it can generate a fresh binder when necessary, entails knowing the free variables of both types. 3. We must be careful not to bind a template type variable to a locally bound variable. E.g. (forall a. x) ~ (forall b. b) where x is the template type variable. Then we do not want to bind x to a/b! This is a kind of occurs check. The necessary locals accumulate in the RnEnv2. Note [tcMatchTy vs tcMatchTyKi] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This module offers two variants of matching: with kinds and without. The TyKi variant takes two types, of potentially different kinds, and matches them. Along the way, it necessarily also matches their kinds. The Ty variant instead assumes that the kinds are already eqType and so skips matching up the kinds. How do you choose between them? 1. If you know that the kinds of the two types are eqType, use the Ty variant. It is more efficient, as it does less work. 2. If the kinds of variables in the template type might mention type families, use the Ty variant (and do other work to make sure the kinds work out). These pure unification functions do a straightforward syntactic unification and do no complex reasoning about type families. Note that the types of the variables in instances can indeed mention type families, so instance lookup must use the Ty variant. (Nothing goes terribly wrong -- no panics -- if there might be type families in kinds in the TyKi variant. You just might get match failure even though a reducing a type family would lead to success.) 3. Otherwise, if you're sure that the variable kinds do not mention type families and you're not already sure that the kind of the template equals the kind of the target, then use the TyKi version. -} -- | Some unification functions are parameterised by a 'BindFun', which -- says whether or not to allow a certain unification to take place. -- A 'BindFun' takes the 'TyVar' involved along with the 'Type' it will -- potentially be bound to. -- -- It is possible for the variable to actually be a coercion variable -- (Note [Matching coercion variables]), but only when one-way matching. -- In this case, the 'Type' will be a 'CoercionTy'. type BindFun = TyCoVar -> Type -> BindFlag -- | @tcMatchTy t1 t2@ produces a substitution (over fvs(t1)) -- @s@ such that @s(t1)@ equals @t2@. -- The returned substitution might bind coercion variables, -- if the variable is an argument to a GADT constructor. -- -- Precondition: typeKind ty1 `eqType` typeKind ty2 -- -- We don't pass in a set of "template variables" to be bound -- by the match, because tcMatchTy (and similar functions) are -- always used on top-level types, so we can bind any of the -- free variables of the LHS. -- See also Note [tcMatchTy vs tcMatchTyKi] tcMatchTy :: Type -> Type -> Maybe Subst tcMatchTy ty1 ty2 = tcMatchTys [ty1] [ty2] tcMatchTyX_BM :: BindFun -> Subst -> Type -> Type -> Maybe Subst tcMatchTyX_BM bind_me subst ty1 ty2 = tc_match_tys_x bind_me False subst [ty1] [ty2] -- | Like 'tcMatchTy', but allows the kinds of the types to differ, -- and thus matches them as well. -- See also Note [tcMatchTy vs tcMatchTyKi] tcMatchTyKi :: Type -> Type -> Maybe Subst tcMatchTyKi ty1 ty2 = tc_match_tys alwaysBindFun True [ty1] [ty2] -- | This is similar to 'tcMatchTy', but extends a substitution -- See also Note [tcMatchTy vs tcMatchTyKi] tcMatchTyX :: Subst -- ^ Substitution to extend -> Type -- ^ Template -> Type -- ^ Target -> Maybe Subst tcMatchTyX subst ty1 ty2 = tc_match_tys_x alwaysBindFun False subst [ty1] [ty2] -- | Like 'tcMatchTy' but over a list of types. -- See also Note [tcMatchTy vs tcMatchTyKi] tcMatchTys :: [Type] -- ^ Template -> [Type] -- ^ Target -> Maybe Subst -- ^ One-shot; in principle the template -- variables could be free in the target tcMatchTys tys1 tys2 = tc_match_tys alwaysBindFun False tys1 tys2 -- | Like 'tcMatchTyKi' but over a list of types. -- See also Note [tcMatchTy vs tcMatchTyKi] tcMatchTyKis :: [Type] -- ^ Template -> [Type] -- ^ Target -> Maybe Subst -- ^ One-shot substitution tcMatchTyKis tys1 tys2 = tc_match_tys alwaysBindFun True tys1 tys2 -- | Like 'tcMatchTys', but extending a substitution -- See also Note [tcMatchTy vs tcMatchTyKi] tcMatchTysX :: Subst -- ^ Substitution to extend -> [Type] -- ^ Template -> [Type] -- ^ Target -> Maybe Subst -- ^ One-shot substitution tcMatchTysX subst tys1 tys2 = tc_match_tys_x alwaysBindFun False subst tys1 tys2 -- | Like 'tcMatchTyKis', but extending a substitution -- See also Note [tcMatchTy vs tcMatchTyKi] tcMatchTyKisX :: Subst -- ^ Substitution to extend -> [Type] -- ^ Template -> [Type] -- ^ Target -> Maybe Subst -- ^ One-shot substitution tcMatchTyKisX subst tys1 tys2 = tc_match_tys_x alwaysBindFun True subst tys1 tys2 -- | Same as tc_match_tys_x, but starts with an empty substitution tc_match_tys :: BindFun -> Bool -- ^ match kinds? -> [Type] -> [Type] -> Maybe Subst tc_match_tys bind_me match_kis tys1 tys2 = tc_match_tys_x bind_me match_kis (mkEmptySubst in_scope) tys1 tys2 where in_scope = mkInScopeSet (tyCoVarsOfTypes tys1 `unionVarSet` tyCoVarsOfTypes tys2) -- | Worker for 'tcMatchTysX' and 'tcMatchTyKisX' tc_match_tys_x :: BindFun -> Bool -- ^ match kinds? -> Subst -> [Type] -> [Type] -> Maybe Subst tc_match_tys_x bind_me match_kis (Subst in_scope id_env tv_env cv_env) tys1 tys2 = case tc_unify_tys bind_me False -- Matching, not unifying False -- Not an injectivity check match_kis RespectMultiplicities (mkRnEnv2 in_scope) tv_env cv_env tys1 tys2 of Unifiable (tv_env', cv_env') -> Just $ Subst in_scope id_env tv_env' cv_env' _ -> Nothing -- | This one is called from the expression matcher, -- which already has a MatchEnv in hand ruleMatchTyKiX :: TyCoVarSet -- ^ template variables -> RnEnv2 -> TvSubstEnv -- ^ type substitution to extend -> Type -- ^ Template -> Type -- ^ Target -> Maybe TvSubstEnv ruleMatchTyKiX tmpl_tvs rn_env tenv tmpl target -- See Note [Kind coercions in Unify] = case tc_unify_tys (matchBindFun tmpl_tvs) False False True -- <-- this means to match the kinds IgnoreMultiplicities -- See Note [Rewrite rules ignore multiplicities in FunTy] rn_env tenv emptyCvSubstEnv [tmpl] [target] of Unifiable (tenv', _) -> Just tenv' _ -> Nothing -- | Allow binding only for any variable in the set. Variables may -- be bound to any type. -- Used when doing simple matching; e.g. can we find a substitution -- -- @ -- S = [a :-> t1, b :-> t2] such that -- S( Maybe (a, b->Int ) = Maybe (Bool, Char -> Int) -- @ matchBindFun :: TyCoVarSet -> BindFun matchBindFun tvs tv _ty | tv `elemVarSet` tvs = BindMe | otherwise = Apart -- | Allow the binding of any variable to any type alwaysBindFun :: BindFun alwaysBindFun _tv _ty = BindMe {- ************************************************************************ * * GADTs * * ************************************************************************ Note [Pruning dead case alternatives] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider data T a where T1 :: T Int T2 :: T a newtype X = MkX Int newtype Y = MkY Char type family F a type instance F Bool = Int Now consider case x of { T1 -> e1; T2 -> e2 } The question before the house is this: if I know something about the type of x, can I prune away the T1 alternative? Suppose x::T Char. It's impossible to construct a (T Char) using T1, Answer = YES we can prune the T1 branch (clearly) Suppose x::T (F a), where 'a' is in scope. Then 'a' might be instantiated to 'Bool', in which case x::T Int, so ANSWER = NO (clearly) We see here that we want precisely the apartness check implemented within tcUnifyTysFG. So that's what we do! Two types cannot match if they are surely apart. Note that since we are simply dropping dead code, a conservative test suffices. -} -- | Given a list of pairs of types, are any two members of a pair surely -- apart, even after arbitrary type function evaluation and substitution? typesCantMatch :: [(Type,Type)] -> Bool -- See Note [Pruning dead case alternatives] typesCantMatch prs = any (uncurry typesAreApart) prs typesAreApart :: Type -> Type -> Bool typesAreApart t1 t2 = case tcUnifyTysFG alwaysBindFun [t1] [t2] of SurelyApart -> True _ -> False {- ************************************************************************ * * Unification * * ************************************************************************ Note [Fine-grained unification] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Do the types (x, x) and ([y], y) unify? The answer is seemingly "no" -- no substitution to finite types makes these match. But, a substitution to *infinite* types can unify these two types: [x |-> [[[...]]], y |-> [[[...]]] ]. Why do we care? Consider these two type family instances: type instance F x x = Int type instance F [y] y = Bool If we also have type instance Looper = [Looper] then the instances potentially overlap. The solution is to use unification over infinite terms. This is possible (see [1] for lots of gory details), but a full algorithm is a little more power than we need. Instead, we make a conservative approximation and just omit the occurs check. [1]: http://research.microsoft.com/en-us/um/people/simonpj/papers/ext-f/axioms-extended.pdf tcUnifyTys considers an occurs-check problem as the same as general unification failure. tcUnifyTysFG ("fine-grained") returns one of three results: success, occurs-check failure ("MaybeApart"), or general failure ("SurelyApart"). See also #8162. It's worth noting that unification in the presence of infinite types is not complete. This means that, sometimes, a closed type family does not reduce when it should. See test case indexed-types/should_fail/Overlap15 for an example. Note [Unification result] ~~~~~~~~~~~~~~~~~~~~~~~~~ When unifying t1 ~ t2, we return * Unifiable s, if s is a substitution such that s(t1) is syntactically the same as s(t2), modulo type-synonym expansion. * SurelyApart, if there is no substitution s such that s(t1) = s(t2), where "=" includes type-family reductions. * MaybeApart mar s, when we aren't sure. `mar` is a MaybeApartReason. Examples * [a] ~ Maybe b: SurelyApart, because [] and Maybe can't unify * [(a,Int)] ~ [(Bool,b)]: Unifiable * [F Int] ~ [Bool]: MaybeApart MARTypeFamily, because F Int might reduce to Bool (the unifier does not try this) * a ~ Maybe a: MaybeApart MARInfinite. Not Unifiable clearly, but not SurelyApart either; consider a := Loop where type family Loop where Loop = Maybe Loop There is the possibility that two types are MaybeApart for *both* reasons: * (a, F Int) ~ (Maybe a, Bool) What reason should we use? The *only* consumer of the reason is described in Note [Infinitary substitution in lookup] in GHC.Core.InstEnv. The goal there is identify which instances might match a target later (but don't match now) -- except that we want to ignore the possibility of infinitary substitutions. So let's examine a concrete scenario: class C a b c instance C a (Maybe a) Bool -- other instances, including one that will actually match [W] C b b (F Int) Do we want the instance as a future possibility? No. The only way that instance can match is in the presence of an infinite type (infinitely nested Maybes). We thus say that MARInfinite takes precedence, so that InstEnv treats this case as an infinitary substitution case; the fact that a type family is involved is only incidental. We thus define the Semigroup instance for MaybeApartReason to prefer MARInfinite. Note [The substitution in MaybeApart] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The constructor MaybeApart carries data with it, typically a TvSubstEnv. Why? Because consider unifying these: (a, a, Int) ~ (b, [b], Bool) If we go left-to-right, we start with [a |-> b]. Then, on the middle terms, we apply the subst we have so far and discover that we need [b |-> [b]]. Because this fails the occurs check, we say that the types are MaybeApart (see above Note [Fine-grained unification]). But, we can't stop there! Because if we continue, we discover that Int is SurelyApart from Bool, and therefore the types are apart. This has practical consequences for the ability for closed type family applications to reduce. See test case indexed-types/should_compile/Overlap14. Note [Rewrite rules ignore multiplicities in FunTy] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider the following (higher-order) rule: m :: Bool -> Bool -> Bool {-# RULES "m" forall f. m (f True) = f #-} let x = m ((,) @Bool @Bool True True) The rewrite rule expects an `f :: Bool -> Bool`, but `(,) @Bool @Bool True :: Bool %1 -> Bool` is linear (see Note [Data constructors are linear by default] in GHC.Core.Multiplicity) Should the rule match? Yes! According to the principles laid out in Note [Linting linearity] in GHC.Core.Lint, optimisation shouldn't be constrained by linearity. However, when matching the template variable `f` to `(,) True`, we do check that their types unify (see Note [Matching variable types] in GHC.Core.Rules). So when unifying types for the sake of rule-matching, the unification algorithm must be able to ignore multiplicities altogether. How is this done? (1) The `um_arr_mult` field of `UMEnv` recordsw when we are doing rule-matching, and hence want to ignore multiplicities. (2) The field is set to True in by `ruleMatchTyKiX`. (3) It is consulted when matching `FunTy` in `unify_ty`. Wrinkle in (3). In `unify_tc_app`, in `unify_ty`, `FunTy` is handled as if it was a regular type constructor. In this case, and when the types being unified are *function* arrows, but not constraint arrows, then the first argument is a multiplicity. We select this situation by comparing the type constructor with fUNTyCon. In this case, and this case only, we can safely drop the first argument (using the tail function) and unify the rest. -} -- | Simple unification of two types; all type variables are bindable -- Precondition: the kinds are already equal tcUnifyTy :: Type -> Type -- All tyvars are bindable -> Maybe Subst -- A regular one-shot (idempotent) substitution tcUnifyTy t1 t2 = tcUnifyTys alwaysBindFun [t1] [t2] -- | Like 'tcUnifyTy', but also unifies the kinds tcUnifyTyKi :: Type -> Type -> Maybe Subst tcUnifyTyKi t1 t2 = tcUnifyTyKis alwaysBindFun [t1] [t2] -- | Unify two types, treating type family applications as possibly unifying -- with anything and looking through injective type family applications. -- Precondition: kinds are the same tcUnifyTyWithTFs :: Bool -- ^ True <=> do two-way unification; -- False <=> do one-way matching. -- See end of sec 5.2 from the paper -> InScopeSet -- Should include the free tyvars of both Type args -> Type -> Type -- Types to unify -> Maybe Subst -- This algorithm is an implementation of the "Algorithm U" presented in -- the paper "Injective type families for Haskell", Figures 2 and 3. -- The code is incorporated with the standard unifier for convenience, but -- its operation should match the specification in the paper. tcUnifyTyWithTFs twoWay in_scope t1 t2 = case tc_unify_tys alwaysBindFun twoWay True False RespectMultiplicities rn_env emptyTvSubstEnv emptyCvSubstEnv [t1] [t2] of Unifiable (tv_subst, _cv_subst) -> Just $ maybe_fix tv_subst MaybeApart _reason (tv_subst, _cv_subst) -> Just $ maybe_fix tv_subst -- we want to *succeed* in questionable cases. This is a -- pre-unification algorithm. SurelyApart -> Nothing where rn_env = mkRnEnv2 in_scope maybe_fix | twoWay = niFixSubst in_scope | otherwise = mkTvSubst in_scope -- when matching, don't confuse -- domain with range ----------------- tcUnifyTys :: BindFun -> [Type] -> [Type] -> Maybe Subst -- ^ A regular one-shot (idempotent) substitution -- that unifies the erased types. See comments -- for 'tcUnifyTysFG' -- The two types may have common type variables, and indeed do so in the -- second call to tcUnifyTys in GHC.Tc.Instance.FunDeps.checkClsFD tcUnifyTys bind_fn tys1 tys2 = case tcUnifyTysFG bind_fn tys1 tys2 of Unifiable result -> Just result _ -> Nothing -- | Like 'tcUnifyTys' but also unifies the kinds tcUnifyTyKis :: BindFun -> [Type] -> [Type] -> Maybe Subst tcUnifyTyKis bind_fn tys1 tys2 = case tcUnifyTyKisFG bind_fn tys1 tys2 of Unifiable result -> Just result _ -> Nothing -- This type does double-duty. It is used in the UM (unifier monad) and to -- return the final result. See Note [Fine-grained unification] type UnifyResult = UnifyResultM Subst -- | See Note [Unification result] data UnifyResultM a = Unifiable a -- the subst that unifies the types | MaybeApart MaybeApartReason a -- the subst has as much as we know -- it must be part of a most general unifier -- See Note [The substitution in MaybeApart] | SurelyApart deriving Functor -- | Why are two types 'MaybeApart'? 'MARInfinite' takes precedence: -- This is used (only) in Note [Infinitary substitution in lookup] in GHC.Core.InstEnv -- As of Feb 2022, we never differentiate between MARTypeFamily and MARTypeVsConstraint; -- it's really only MARInfinite that's interesting here. data MaybeApartReason = MARTypeFamily -- ^ matching e.g. F Int ~? Bool | MARInfinite -- ^ matching e.g. a ~? Maybe a | MARTypeVsConstraint -- ^ matching Type ~? Constraint or the arrow types -- See Note [Type and Constraint are not apart] in GHC.Builtin.Types.Prim instance Outputable MaybeApartReason where ppr MARTypeFamily = text "MARTypeFamily" ppr MARInfinite = text "MARInfinite" ppr MARTypeVsConstraint = text "MARTypeVsConstraint" instance Semigroup MaybeApartReason where -- see end of Note [Unification result] for why MARTypeFamily <> r = r MARInfinite <> _ = MARInfinite MARTypeVsConstraint <> r = r instance Applicative UnifyResultM where pure = Unifiable (<*>) = ap instance Monad UnifyResultM where SurelyApart >>= _ = SurelyApart MaybeApart r1 x >>= f = case f x of Unifiable y -> MaybeApart r1 y MaybeApart r2 y -> MaybeApart (r1 S.<> r2) y SurelyApart -> SurelyApart Unifiable x >>= f = f x -- | @tcUnifyTysFG bind_tv tys1 tys2@ attempts to find a substitution @s@ (whose -- domain elements all respond 'BindMe' to @bind_tv@) such that -- @s(tys1)@ and that of @s(tys2)@ are equal, as witnessed by the returned -- Coercions. This version requires that the kinds of the types are the same, -- if you unify left-to-right. tcUnifyTysFG :: BindFun -> [Type] -> [Type] -> UnifyResult tcUnifyTysFG bind_fn tys1 tys2 = tc_unify_tys_fg False bind_fn tys1 tys2 tcUnifyTyKisFG :: BindFun -> [Type] -> [Type] -> UnifyResult tcUnifyTyKisFG bind_fn tys1 tys2 = tc_unify_tys_fg True bind_fn tys1 tys2 tc_unify_tys_fg :: Bool -> BindFun -> [Type] -> [Type] -> UnifyResult tc_unify_tys_fg match_kis bind_fn tys1 tys2 = do { (env, _) <- tc_unify_tys bind_fn True False match_kis RespectMultiplicities rn_env emptyTvSubstEnv emptyCvSubstEnv tys1 tys2 ; return $ niFixSubst in_scope env } where in_scope = mkInScopeSet $ tyCoVarsOfTypes tys1 `unionVarSet` tyCoVarsOfTypes tys2 rn_env = mkRnEnv2 in_scope -- | This function is actually the one to call the unifier -- a little -- too general for outside clients, though. tc_unify_tys :: BindFun -> AmIUnifying -- ^ True <=> unify; False <=> match -> Bool -- ^ True <=> doing an injectivity check -> Bool -- ^ True <=> treat the kinds as well -> MultiplicityFlag -- ^ see Note [Rewrite rules ignore multiplicities in FunTy] in GHC.Core.Unify -> RnEnv2 -> TvSubstEnv -- ^ substitution to extend -> CvSubstEnv -> [Type] -> [Type] -> UnifyResultM (TvSubstEnv, CvSubstEnv) -- NB: It's tempting to ASSERT here that, if we're not matching kinds, then -- the kinds of the types should be the same. However, this doesn't work, -- as the types may be a dependent telescope, where later types have kinds -- that mention variables occurring earlier in the list of types. Here's an -- example (from typecheck/should_fail/T12709): -- template: [rep :: RuntimeRep, a :: TYPE rep] -- target: [LiftedRep :: RuntimeRep, Int :: TYPE LiftedRep] -- We can see that matching the first pair will make the kinds of the second -- pair equal. Yet, we still don't need a separate pass to unify the kinds -- of these types, so it's appropriate to use the Ty variant of unification. -- See also Note [tcMatchTy vs tcMatchTyKi]. tc_unify_tys bind_fn unif inj_check match_kis match_mults rn_env tv_env cv_env tys1 tys2 = initUM tv_env cv_env $ do { when match_kis $ unify_tys env kis1 kis2 ; unify_tys env tys1 tys2 ; (,) <$> getTvSubstEnv <*> getCvSubstEnv } where env = UMEnv { um_bind_fun = bind_fn , um_skols = emptyVarSet , um_unif = unif , um_inj_tf = inj_check , um_arr_mult = match_mults , um_rn_env = rn_env } kis1 = map typeKind tys1 kis2 = map typeKind tys2 instance Outputable a => Outputable (UnifyResultM a) where ppr SurelyApart = text "SurelyApart" ppr (Unifiable x) = text "Unifiable" <+> ppr x ppr (MaybeApart r x) = text "MaybeApart" <+> ppr r <+> ppr x {- ************************************************************************ * * Non-idempotent substitution * * ************************************************************************ Note [Non-idempotent substitution] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ During unification we use a TvSubstEnv/CvSubstEnv pair that is (a) non-idempotent (b) loop-free; ie repeatedly applying it yields a fixed point Note [Finding the substitution fixpoint] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Finding the fixpoint of a non-idempotent substitution arising from a unification is much trickier than it looks, because of kinds. Consider T k (H k (f:k)) ~ T * (g:*) If we unify, we get the substitution [ k -> * , g -> H k (f:k) ] To make it idempotent we don't want to get just [ k -> * , g -> H * (f:k) ] We also want to substitute inside f's kind, to get [ k -> * , g -> H k (f:*) ] If we don't do this, we may apply the substitution to something, and get an ill-formed type, i.e. one where typeKind will fail. This happened, for example, in #9106. It gets worse. In #14164 we wanted to take the fixpoint of this substitution [ xs_asV :-> F a_aY6 (z_aY7 :: a_aY6) (rest_aWF :: G a_aY6 (z_aY7 :: a_aY6)) , a_aY6 :-> a_aXQ ] We have to apply the substitution for a_aY6 two levels deep inside the invocation of F! We don't have a function that recursively applies substitutions inside the kinds of variable occurrences (and probably rightly so). So, we work as follows: 1. Start with the current substitution (which we are trying to fixpoint [ xs :-> F a (z :: a) (rest :: G a (z :: a)) , a :-> b ] 2. Take all the free vars of the range of the substitution: {a, z, rest, b} NB: the free variable finder closes over the kinds of variable occurrences 3. If none are in the domain of the substitution, stop. We have found a fixpoint. 4. Remove the variables that are bound by the substitution, leaving {z, rest, b} 5. Do a topo-sort to put them in dependency order: [ b :: *, z :: a, rest :: G a z ] 6. Apply the substitution left-to-right to the kinds of these tyvars, extending it each time with a new binding, so we finish up with [ xs :-> ..as before.. , a :-> b , b :-> b :: * , z :-> z :: b , rest :-> rest :: G b (z :: b) ] Note that rest now has the right kind 7. Apply this extended substitution (once) to the range of the /original/ substitution. (Note that we do the extended substitution would go on forever if you tried to find its fixpoint, because it maps z to z.) 8. And go back to step 1 In Step 6 we use the free vars from Step 2 as the initial in-scope set, because all of those variables appear in the range of the substitution, so they must all be in the in-scope set. But NB that the type substitution engine does not look up variables in the in-scope set; it is used only to ensure no shadowing. -} niFixSubst :: InScopeSet -> TvSubstEnv -> Subst -- Find the idempotent fixed point of the non-idempotent substitution -- This is surprisingly tricky: -- see Note [Finding the substitution fixpoint] -- ToDo: use laziness instead of iteration? niFixSubst in_scope tenv | not_fixpoint = niFixSubst in_scope (mapVarEnv (substTy subst) tenv) | otherwise = subst where range_fvs :: FV range_fvs = tyCoFVsOfTypes (nonDetEltsUFM tenv) -- It's OK to use nonDetEltsUFM here because the -- order of range_fvs, range_tvs is immaterial range_tvs :: [TyVar] range_tvs = fvVarList range_fvs not_fixpoint = any in_domain range_tvs in_domain tv = tv `elemVarEnv` tenv free_tvs = scopedSort (filterOut in_domain range_tvs) -- See Note [Finding the substitution fixpoint], Step 6 subst = foldl' add_free_tv (mkTvSubst in_scope tenv) free_tvs add_free_tv :: Subst -> TyVar -> Subst add_free_tv subst tv = extendTvSubst subst tv (mkTyVarTy tv') where tv' = updateTyVarKind (substTy subst) tv niSubstTvSet :: TvSubstEnv -> TyCoVarSet -> TyCoVarSet -- Apply the non-idempotent substitution to a set of type variables, -- remembering that the substitution isn't necessarily idempotent -- This is used in the occurs check, before extending the substitution niSubstTvSet tsubst tvs = nonDetStrictFoldUniqSet (unionVarSet . get) emptyVarSet tvs -- It's OK to use a non-deterministic fold here because we immediately forget -- the ordering by creating a set. where get tv | Just ty <- lookupVarEnv tsubst tv = niSubstTvSet tsubst (tyCoVarsOfType ty) | otherwise = unitVarSet tv {- ************************************************************************ * * unify_ty: the main workhorse * * ************************************************************************ Note [Specification of unification] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The pure unifier, unify_ty, defined in this module, tries to work out a substitution to make two types say True to eqType. NB: eqType is itself not purely syntactic; it accounts for CastTys; see Note [Non-trivial definitional equality] in GHC.Core.TyCo.Rep Unlike the "impure unifiers" in the typechecker (the eager unifier in GHC.Tc.Utils.Unify, and the constraint solver itself in GHC.Tc.Solver.Equality), the pure unifier does /not/ work up to ~. The algorithm implemented here is rather delicate, and we depend on it to uphold certain properties. This is a summary of these required properties. Notation: θ,φ substitutions ξ type-function-free types τ,σ other types τ♭ type τ, flattened ≡ eqType (U1) Soundness. If (unify τ₁ τ₂) = Unifiable θ, then θ(τ₁) ≡ θ(τ₂). θ is a most general unifier for τ₁ and τ₂. (U2) Completeness. If (unify ξ₁ ξ₂) = SurelyApart, then there exists no substitution θ such that θ(ξ₁) ≡ θ(ξ₂). These two properties are stated as Property 11 in the "Closed Type Families" paper (POPL'14). Below, this paper is called [CTF]. (U3) Apartness under substitution. If (unify ξ τ♭) = SurelyApart, then (unify ξ θ(τ)♭) = SurelyApart, for any θ. (Property 12 from [CTF]) (U4) Apart types do not unify. If (unify ξ τ♭) = SurelyApart, then there exists no θ such that θ(ξ) = θ(τ). (Property 13 from [CTF]) THEOREM. Completeness w.r.t ~ If (unify τ₁♭ τ₂♭) = SurelyApart, then there exists no proof that (τ₁ ~ τ₂). PROOF. See appendix of [CTF]. The unification algorithm is used for type family injectivity, as described in the "Injective Type Families" paper (Haskell'15), called [ITF]. When run in this mode, it has the following properties. (I1) If (unify σ τ) = SurelyApart, then σ and τ are not unifiable, even after arbitrary type family reductions. Note that σ and τ are not flattened here. (I2) If (unify σ τ) = MaybeApart θ, and if some φ exists such that φ(σ) ~ φ(τ), then φ extends θ. Furthermore, the RULES matching algorithm requires this property, but only when using this algorithm for matching: (M1) If (match σ τ) succeeds with θ, then all matchable tyvars in σ are bound in θ. Property M1 means that we must extend the substitution with, say (a ↦ a) when appropriate during matching. See also Note [Self-substitution when matching]. (M2) Completeness of matching. If θ(σ) = τ, then (match σ τ) = Unifiable φ, where θ is an extension of φ. Sadly, property M2 and I2 conflict. Consider type family F1 a b where F1 Int Bool = Char F1 Double String = Char Consider now two matching problems: P1. match (F1 a Bool) (F1 Int Bool) P2. match (F1 a Bool) (F1 Double String) In case P1, we must find (a ↦ Int) to satisfy M2. In case P2, we must /not/ find (a ↦ Double), in order to satisfy I2. (Note that the correct mapping for I2 is (a ↦ Int). There is no way to discover this, but we mustn't map a to anything else!) We thus must parameterize the algorithm over whether it's being used for an injectivity check (refrain from looking at non-injective arguments to type families) or not (do indeed look at those arguments). This is implemented by the um_inj_tf field of UMEnv. (It's all a question of whether or not to include equation (7) from Fig. 2 of [ITF].) This extra parameter is a bit fiddly, perhaps, but seemingly less so than having two separate, almost-identical algorithms. Note [Self-substitution when matching] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ What should happen when we're *matching* (not unifying) a1 with a1? We should get a substitution [a1 |-> a1]. A successful match should map all the template variables (except ones that disappear when expanding synonyms). But when unifying, we don't want to do this, because we'll then fall into a loop. This arrangement affects the code in three places: - If we're matching a refined template variable, don't recur. Instead, just check for equality. That is, if we know [a |-> Maybe a] and are matching (a ~? Maybe Int), we want to just fail. - Skip the occurs check when matching. This comes up in two places, because matching against variables is handled separately from matching against full-on types. Note that this arrangement was provoked by a real failure, where the same unique ended up in the template as in the target. (It was a rule firing when compiling Data.List.NonEmpty.) Note [Matching coercion variables] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider this: type family F a data G a where MkG :: F a ~ Bool => G a type family Foo (x :: G a) :: F a type instance Foo MkG = False We would like that to be accepted. For that to work, we need to introduce a coercion variable on the left and then use it on the right. Accordingly, at use sites of Foo, we need to be able to use matching to figure out the value for the coercion. (See the desugared version: axFoo :: [a :: *, c :: F a ~ Bool]. Foo (MkG c) = False |> (sym c) ) We never want this action to happen during *unification* though, when all bets are off. Note [Kind coercions in Unify] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We wish to match/unify while ignoring casts. But, we can't just ignore them completely, or we'll end up with ill-kinded substitutions. For example, say we're matching `a` with `ty |> co`. If we just drop the cast, we'll return [a |-> ty], but `a` and `ty` might have different kinds. We can't just match/unify their kinds, either, because this might gratuitously fail. After all, `co` is the witness that the kinds are the same -- they may look nothing alike. So, we pass a kind coercion to the match/unify worker. This coercion witnesses the equality between the substed kind of the left-hand type and the substed kind of the right-hand type. Note that we do not unify kinds at the leaves (as we did previously). We thus have Hence: (Unification Kind Invariant) ----------------------------------- In the call unify_ty ty1 ty2 kco it must be that subst(kco) :: subst(kind(ty1)) ~N subst(kind(ty2)) where `subst` is the ambient substitution in the UM monad. And in the call unify_tys tys1 tys2 (which has no kco), after we unify any prefix of tys1,tys2, the kinds of the head of the remaining tys1,tys2 are identical after substitution. This implies, for example, that the kinds of the head of tys1,tys2 are identical after substitution. To get this coercion, we first have to match/unify the kinds before looking at the types. Happily, we need look only one level up, as all kinds are guaranteed to have kind *. When we're working with type applications (either TyConApp or AppTy) we need to worry about establishing INVARIANT, as the kinds of the function & arguments aren't (necessarily) included in the kind of the result. When unifying two TyConApps, this is easy, because the two TyCons are the same. Their kinds are thus the same. As long as we unify left-to-right, we'll be sure to unify types' kinds before the types themselves. (For example, think about Proxy :: forall k. k -> *. Unifying the first args matches up the kinds of the second args.) For AppTy, we must unify the kinds of the functions, but once these are unified, we can continue unifying arguments without worrying further about kinds. The interface to this module includes both "...Ty" functions and "...TyKi" functions. The former assume that INVARIANT is already established, either because the kinds are the same or because the list of types being passed in are the well-typed arguments to some type constructor (see two paragraphs above). The latter take a separate pre-pass over the kinds to establish INVARIANT. Sometimes, it's important not to take the second pass, as it caused #12442. We thought, at one point, that this was all unnecessary: why should casts be in types in the first place? But they are sometimes. In dependent/should_compile/KindEqualities2, we see, for example the constraint Num (Int |> (blah ; sym blah)). We naturally want to find a dictionary for that constraint, which requires dealing with coercions in this manner. Note [Matching in the presence of casts (1)] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When matching, it is crucial that no variables from the template end up in the range of the matching substitution (obviously!). When unifying, that's not a constraint; instead we take the fixpoint of the substitution at the end. So what should we do with this, when matching? unify_ty (tmpl |> co) tgt kco Previously, wrongly, we pushed 'co' in the (horrid) accumulating 'kco' argument like this: unify_ty (tmpl |> co) tgt kco = unify_ty tmpl tgt (kco ; co) But that is obviously wrong because 'co' (from the template) ends up in 'kco', which in turn ends up in the range of the substitution. This all came up in #13910. Because we match tycon arguments left-to-right, the ambient substitution will already have a matching substitution for any kinds; so there is an easy fix: just apply the substitution-so-far to the coercion from the LHS. Note that * When matching, the first arg of unify_ty is always the template; we never swap round. * The above argument is distressingly indirect. We seek a better way. * One better way is to ensure that type patterns (the template in the matching process) have no casts. See #14119. Note [Matching in the presence of casts (2)] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ There is another wrinkle (#17395). Suppose (T :: forall k. k -> Type) and we are matching tcMatchTy (T k (a::k)) (T j (b::j)) Then we'll match k :-> j, as expected. But then in unify_tys we invoke unify_tys env (a::k) (b::j) (Refl j) Although we have unified k and j, it's very important that we put (Refl j), /not/ (Refl k) as the fourth argument to unify_tys. If we put (Refl k) we'd end up with the substitution a :-> b |> Refl k which is bogus because one of the template variables, k, appears in the range of the substitution. Eek. Similar care is needed in unify_ty_app. Note [Polykinded tycon applications] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Suppose T :: forall k. Type -> K and we are unifying ty1: T @Type Int :: Type ty2: T @(Type->Type) Int Int :: Type These two TyConApps have the same TyCon at the front but they (legitimately) have different numbers of arguments. They are surelyApart, so we can report that without looking any further (see #15704). Note [Unifying type applications] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Unifying type applications is quite subtle, as we found in #23134 and #22647, when type families are involved. Suppose type family F a :: Type -> Type type family G k :: k = r | r -> k and consider these examples: * F Int ~ F Char, where F is injective Since F is injective, we can reduce this to Int ~ Char, therefore SurelyApart. * F Int ~ F Char, where F is not injective Without injectivity, return MaybeApart. * G Type ~ G (Type -> Type) Int Even though G is injective and the arguments to G are different, we cannot deduce apartness because the RHS is oversaturated. For example, G might be defined as G Type = Maybe Int G (Type -> Type) = Maybe So we return MaybeApart. * F Int Bool ~ F Int Char -- SurelyApart (since Bool is apart from Char) F Int Bool ~ Maybe a -- MaybeApart F Int Bool ~ a b -- MaybeApart F Int Bool ~ Char -> Bool -- MaybeApart An oversaturated type family can match an application, whether it's a TyConApp, AppTy or FunTy. Decompose. * F Int ~ a b We cannot decompose a saturated, or under-saturated type family application. We return MaybeApart. To handle all those conditions, unify_ty goes through the following checks in sequence, where Fn is a type family of arity n: * (C1) Fn x_1 ... x_n ~ Fn y_1 .. y_n A saturated application. Here we can unify arguments in which Fn is injective. * (C2) Fn x_1 ... x_n ~ anything, anything ~ Fn x_1 ... x_n A saturated type family can match anything - we return MaybeApart. * (C3) Fn x_1 ... x_m ~ a b, a b ~ Fn x_1 ... x_m where m > n An oversaturated type family can be decomposed. * (C4) Fn x_1 ... x_m ~ anything, anything ~ Fn x_1 ... x_m, where m > n If we couldn't decompose in the previous step, we return SurelyApart. Afterwards, the rest of the code doesn't have to worry about type families. Note [Unifying type synonyms] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider the task of unifying two 'Type's of the form TyConApp tc [] ~ TyConApp tc [] where `tc` is a type synonym. A naive way to perform this comparison these would first expand the synonym and then compare the resulting expansions. However, this is obviously wasteful and the RHS of `tc` may be large; it is much better to rather compare the TyCons directly. Consequently, before expanding type synonyms in type comparisons we first look for a nullary TyConApp and simply compare the TyCons if we find one. Of course, if we find that the TyCons are *not* equal then we still need to perform the expansion as their RHSs may still be unifiable. E.g type T = S (a->a) type S a = [a] and consider T Int ~ S (Int -> Int) We can't decompose non-nullary synonyms. E.g. type R a = F a -- Where F is a type family and consider R (a->a) ~ R Int We can't conclude that (a->) ~ Int. (There is a currently-missed opportunity here; if we knew that R was /injective/, perhaps we could decompose.) We perform the nullary-type-synonym optimisation in a number of places: * GHC.Core.Unify.unify_ty * GHC.Tc.Solver.Equality.can_eq_nc' * GHC.Tc.Utils.Unify.uType This optimisation is especially helpful for the ubiquitous GHC.Types.Type, since GHC prefers to use the type synonym over @TYPE 'LiftedRep@ applications whenever possible. See Note [Using synonyms to compress types] in GHC.Core.Type for details. c.f. Note [Comparing type synonyms] in GHC.Core.TyCo.Compare -} -------------- unify_ty: the main workhorse ----------- type AmIUnifying = Bool -- True <=> Unifying -- False <=> Matching unify_ty :: UMEnv -> Type -> Type -- Types to be unified and a co -> CoercionN -- A coercion between their kinds -- See Note [Kind coercions in Unify] -> UM () -- Precondition: see (Unification Kind Invariant) -- -- See Note [Specification of unification] -- Respects newtypes, PredTypes -- See Note [Computing equality on types] in GHC.Core.Type unify_ty _env (TyConApp tc1 []) (TyConApp tc2 []) _kco -- See Note [Unifying type synonyms] | tc1 == tc2 = return () unify_ty env ty1 ty2 kco -- Now handle the cases we can "look through": synonyms and casts. | Just ty1' <- coreView ty1 = unify_ty env ty1' ty2 kco | Just ty2' <- coreView ty2 = unify_ty env ty1 ty2' kco | CastTy ty1' co <- ty1 = if um_unif env then unify_ty env ty1' ty2 (co `mkTransCo` kco) else -- See Note [Matching in the presence of casts (1)] do { subst <- getSubst env ; let co' = substCo subst co ; unify_ty env ty1' ty2 (co' `mkTransCo` kco) } | CastTy ty2' co <- ty2 = unify_ty env ty1 ty2' (kco `mkTransCo` mkSymCo co) unify_ty env (TyVarTy tv1) ty2 kco = uVar env tv1 ty2 kco unify_ty env ty1 (TyVarTy tv2) kco | um_unif env -- If unifying, can swap args = uVar (umSwapRn env) tv2 ty1 (mkSymCo kco) unify_ty env ty1 ty2 _kco -- Handle non-oversaturated type families first -- See Note [Unifying type applications] -- -- (C1) If we have T x1 ... xn ~ T y1 ... yn, use injectivity information of T -- Note that both sides must not be oversaturated | Just (tc1, tys1) <- isSatTyFamApp mb_tc_app1 , Just (tc2, tys2) <- isSatTyFamApp mb_tc_app2 , tc1 == tc2 = do { let inj = case tyConInjectivityInfo tc1 of NotInjective -> repeat False Injective bs -> bs (inj_tys1, noninj_tys1) = partitionByList inj tys1 (inj_tys2, noninj_tys2) = partitionByList inj tys2 ; unify_tys env inj_tys1 inj_tys2 ; unless (um_inj_tf env) $ -- See (end of) Note [Specification of unification] don'tBeSoSure MARTypeFamily $ unify_tys env noninj_tys1 noninj_tys2 } | Just _ <- isSatTyFamApp mb_tc_app1 -- (C2) A (not-over-saturated) type-family application = maybeApart MARTypeFamily -- behaves like a type variable; might match | Just _ <- isSatTyFamApp mb_tc_app2 -- (C2) A (not-over-saturated) type-family application -- behaves like a type variable; might unify -- but doesn't match (as in the TyVarTy case) = if um_unif env then maybeApart MARTypeFamily else surelyApart -- Handle oversaturated type families. -- -- They can match an application (TyConApp/FunTy/AppTy), this is handled -- the same way as in the AppTy case below. -- -- If there is no application, an oversaturated type family can only -- match a type variable or a saturated type family, -- both of which we handled earlier. So we can say surelyApart. | Just (tc1, _) <- mb_tc_app1 , isTypeFamilyTyCon tc1 = if | Just (ty1a, ty1b) <- tcSplitAppTyNoView_maybe ty1 , Just (ty2a, ty2b) <- tcSplitAppTyNoView_maybe ty2 -> unify_ty_app env ty1a [ty1b] ty2a [ty2b] -- (C3) | otherwise -> surelyApart -- (C4) | Just (tc2, _) <- mb_tc_app2 , isTypeFamilyTyCon tc2 = if | Just (ty1a, ty1b) <- tcSplitAppTyNoView_maybe ty1 , Just (ty2a, ty2b) <- tcSplitAppTyNoView_maybe ty2 -> unify_ty_app env ty1a [ty1b] ty2a [ty2b] -- (C3) | otherwise -> surelyApart -- (C4) -- At this point, neither tc1 nor tc2 can be a type family. | Just (tc1, tys1) <- mb_tc_app1 , Just (tc2, tys2) <- mb_tc_app2 , tc1 == tc2 = do { massertPpr (isInjectiveTyCon tc1 Nominal) (ppr tc1) ; unify_tc_app tc1 tys1 tys2 } -- TYPE and CONSTRAINT are not Apart -- See Note [Type and Constraint are not apart] in GHC.Builtin.Types.Prim -- NB: at this point we know that the two TyCons do not match | Just (tc1,_) <- mb_tc_app1, let u1 = tyConUnique tc1 , Just (tc2,_) <- mb_tc_app2, let u2 = tyConUnique tc2 , (u1 == tYPETyConKey && u2 == cONSTRAINTTyConKey) || (u2 == tYPETyConKey && u1 == cONSTRAINTTyConKey) = maybeApart MARTypeVsConstraint -- We don't bother to look inside; wrinkle (W3) in GHC.Builtin.Types.Prim -- Note [Type and Constraint are not apart] -- The arrow types are not Apart -- See Note [Type and Constraint are not apart] in GHC.Builtin.Types.Prim -- wrinkle (W2) -- NB1: at this point we know that the two TyCons do not match -- NB2: In the common FunTy/FunTy case you might wonder if we want to go via -- splitTyConApp_maybe. But yes we do: we need to look at those implied -- kind argument in order to satisfy (Unification Kind Invariant) | FunTy {} <- ty1 , FunTy {} <- ty2 = maybeApart MARTypeVsConstraint -- We don't bother to look inside; wrinkle (W3) in GHC.Builtin.Types.Prim -- Note [Type and Constraint are not apart] where mb_tc_app1 = splitTyConApp_maybe ty1 mb_tc_app2 = splitTyConApp_maybe ty2 unify_tc_app tc tys1 tys2 | tc == fUNTyCon , IgnoreMultiplicities <- um_arr_mult env , (_mult1 : no_mult_tys1) <- tys1 , (_mult2 : no_mult_tys2) <- tys2 = -- We're comparing function arrow types here (not constraint arrow -- types!), and they have at least one argument, which is the arrow's -- multiplicity annotation. The flag `um_arr_mult` instructs us to -- ignore multiplicities in this very case. This is a little tricky: see -- point (3) in Note [Rewrite rules ignore multiplicities in FunTy]. unify_tys env no_mult_tys1 no_mult_tys2 | otherwise = unify_tys env tys1 tys2 -- Applications need a bit of care! -- They can match FunTy and TyConApp, so use splitAppTy_maybe -- NB: we've already dealt with type variables, -- so if one type is an App the other one jolly well better be too unify_ty env (AppTy ty1a ty1b) ty2 _kco | Just (ty2a, ty2b) <- tcSplitAppTyNoView_maybe ty2 = unify_ty_app env ty1a [ty1b] ty2a [ty2b] unify_ty env ty1 (AppTy ty2a ty2b) _kco | Just (ty1a, ty1b) <- tcSplitAppTyNoView_maybe ty1 = unify_ty_app env ty1a [ty1b] ty2a [ty2b] unify_ty _ (LitTy x) (LitTy y) _kco | x == y = return () unify_ty env (ForAllTy (Bndr tv1 _) ty1) (ForAllTy (Bndr tv2 _) ty2) kco = do { unify_ty env (varType tv1) (varType tv2) (mkNomReflCo liftedTypeKind) ; let env' = umRnBndr2 env tv1 tv2 ; unify_ty env' ty1 ty2 kco } -- See Note [Matching coercion variables] unify_ty env (CoercionTy co1) (CoercionTy co2) kco = do { c_subst <- getCvSubstEnv ; case co1 of CoVarCo cv | not (um_unif env) , not (cv `elemVarEnv` c_subst) , let (_, co_l, co_r) = decomposeFunCo kco -- Because the coercion is used in a type, it should be safe to -- ignore the multiplicity coercion. -- cv :: t1 ~ t2 -- co2 :: s1 ~ s2 -- co_l :: t1 ~ s1 -- co_r :: t2 ~ s2 rhs_co = co_l `mkTransCo` co2 `mkTransCo` mkSymCo co_r , BindMe <- tvBindFlag env cv (CoercionTy rhs_co) -> do { checkRnEnv env (tyCoVarsOfCo co2) ; extendCvEnv cv rhs_co } _ -> return () } unify_ty _ _ _ _ = surelyApart unify_ty_app :: UMEnv -> Type -> [Type] -> Type -> [Type] -> UM () unify_ty_app env ty1 ty1args ty2 ty2args | Just (ty1', ty1a) <- splitAppTyNoView_maybe ty1 , Just (ty2', ty2a) <- splitAppTyNoView_maybe ty2 = unify_ty_app env ty1' (ty1a : ty1args) ty2' (ty2a : ty2args) | otherwise = do { let ki1 = typeKind ty1 ki2 = typeKind ty2 -- See Note [Kind coercions in Unify] ; unify_ty env ki1 ki2 (mkNomReflCo liftedTypeKind) ; unify_ty env ty1 ty2 (mkNomReflCo ki2) -- Very important: 'ki2' not 'ki1' -- See Note [Matching in the presence of casts (2)] ; unify_tys env ty1args ty2args } unify_tys :: UMEnv -> [Type] -> [Type] -> UM () -- Precondition: see (Unification Kind Invariant) unify_tys env orig_xs orig_ys = go orig_xs orig_ys where go [] [] = return () go (x:xs) (y:ys) -- See Note [Kind coercions in Unify] = do { unify_ty env x y (mkNomReflCo $ typeKind y) -- Very important: 'y' not 'x' -- See Note [Matching in the presence of casts (2)] ; go xs ys } go _ _ = surelyApart -- Possibly different saturations of a polykinded tycon -- See Note [Polykinded tycon applications] isSatTyFamApp :: Maybe (TyCon, [Type]) -> Maybe (TyCon, [Type]) -- Return the argument if we have a saturated type family application -- If it is /over/ saturated then we return False. E.g. -- unify_ty (F a b) (c d) where F has arity 1 -- we definitely want to decompose that type application! (#22647) isSatTyFamApp tapp@(Just (tc, tys)) | isTypeFamilyTyCon tc && not (tys `lengthExceeds` tyConArity tc) -- Not over-saturated = tapp isSatTyFamApp _ = Nothing --------------------------------- uVar :: UMEnv -> InTyVar -- Variable to be unified -> Type -- with this Type -> Coercion -- :: kind tv ~N kind ty -> UM () uVar env tv1 ty kco = do { -- Apply the ambient renaming let tv1' = umRnOccL env tv1 -- Check to see whether tv1 is refined by the substitution ; subst <- getTvSubstEnv ; case (lookupVarEnv subst tv1') of Just ty' | um_unif env -- Unifying, so call -> unify_ty env ty' ty kco -- back into unify | otherwise -> -- Matching, we don't want to just recur here. -- this is because the range of the subst is the target -- type, not the template type. So, just check for -- normal type equality. unless ((ty' `mkCastTy` kco) `tcEqType` ty) $ surelyApart -- NB: it's important to use `tcEqType` instead of `eqType` here, -- otherwise we might not reject a substitution -- which unifies `Type` with `Constraint`, e.g. -- a call to tc_unify_tys with arguments -- -- tys1 = [k,k] -- tys2 = [Type, Constraint] -- -- See test cases: T11715b, T20521. Nothing -> uUnrefined env tv1' ty ty kco } -- No, continue uUnrefined :: UMEnv -> OutTyVar -- variable to be unified -> Type -- with this Type -> Type -- (version w/ expanded synonyms) -> Coercion -- :: kind tv ~N kind ty -> UM () -- We know that tv1 isn't refined uUnrefined env tv1' ty2 ty2' kco | Just ty2'' <- coreView ty2' = uUnrefined env tv1' ty2 ty2'' kco -- Unwrap synonyms -- This is essential, in case we have -- type Foo a = a -- and then unify a ~ Foo a | TyVarTy tv2 <- ty2' = do { let tv2' = umRnOccR env tv2 ; unless (tv1' == tv2' && um_unif env) $ do -- If we are unifying a ~ a, just return immediately -- Do not extend the substitution -- See Note [Self-substitution when matching] -- Check to see whether tv2 is refined { subst <- getTvSubstEnv ; case lookupVarEnv subst tv2 of { Just ty' | um_unif env -> uUnrefined env tv1' ty' ty' kco ; _ -> do { -- So both are unrefined -- Bind one or the other, depending on which is bindable ; let rhs1 = ty2 `mkCastTy` mkSymCo kco rhs2 = ty1 `mkCastTy` kco b1 = tvBindFlag env tv1' rhs1 b2 = tvBindFlag env tv2' rhs2 ty1 = mkTyVarTy tv1' ; case (b1, b2) of (BindMe, _) -> bindTv env tv1' rhs1 (_, BindMe) | um_unif env -> bindTv (umSwapRn env) tv2 rhs2 _ | tv1' == tv2' -> return () -- How could this happen? If we're only matching and if -- we're comparing forall-bound variables. _ -> surelyApart }}}} uUnrefined env tv1' ty2 _ kco -- ty2 is not a type variable = case tvBindFlag env tv1' rhs of Apart -> surelyApart BindMe -> bindTv env tv1' rhs where rhs = ty2 `mkCastTy` mkSymCo kco bindTv :: UMEnv -> OutTyVar -> Type -> UM () -- OK, so we want to extend the substitution with tv := ty -- But first, we must do a couple of checks bindTv env tv1 ty2 = do { let free_tvs2 = tyCoVarsOfType ty2 -- Make sure tys mentions no local variables -- E.g. (forall a. b) ~ (forall a. [a]) -- We should not unify b := [a]! ; checkRnEnv env free_tvs2 -- Occurs check, see Note [Fine-grained unification] -- Make sure you include 'kco' (which ty2 does) #14846 ; occurs <- occursCheck env tv1 free_tvs2 ; if occurs then maybeApart MARInfinite else extendTvEnv tv1 ty2 } occursCheck :: UMEnv -> TyVar -> VarSet -> UM Bool occursCheck env tv free_tvs | um_unif env = do { tsubst <- getTvSubstEnv ; return (tv `elemVarSet` niSubstTvSet tsubst free_tvs) } | otherwise -- Matching; no occurs check = return False -- See Note [Self-substitution when matching] {- %************************************************************************ %* * Binding decisions * * ************************************************************************ -} data BindFlag = BindMe -- ^ A regular type variable | Apart -- ^ Declare that this type variable is /apart/ from the -- type provided. That is, the type variable will never -- be instantiated to that type. -- See also Note [Binding when looking up instances] -- in GHC.Core.InstEnv. deriving Eq -- NB: It would be conceivable to have an analogue to MaybeApart here, -- but there is not yet a need. {- ************************************************************************ * * Unification monad * * ************************************************************************ -} data UMEnv = UMEnv { um_unif :: AmIUnifying , um_inj_tf :: Bool -- Checking for injectivity? -- See (end of) Note [Specification of unification] , um_arr_mult :: MultiplicityFlag -- Whether to unify multiplicity arguments when unifying arrows. -- See Note [Rewrite rules ignore multiplicities in FunTy] , um_rn_env :: RnEnv2 -- Renaming InTyVars to OutTyVars; this eliminates -- shadowing, and lines up matching foralls on the left -- and right , um_skols :: TyVarSet -- OutTyVars bound by a forall in this unification; -- Do not bind these in the substitution! -- See the function tvBindFlag , um_bind_fun :: BindFun -- User-supplied BindFlag function, -- for variables not in um_skols } data UMState = UMState { um_tv_env :: TvSubstEnv , um_cv_env :: CvSubstEnv } newtype UM a = UM' { unUM :: UMState -> UnifyResultM (UMState, a) } -- See Note [The one-shot state monad trick] in GHC.Utils.Monad pattern UM :: (UMState -> UnifyResultM (UMState, a)) -> UM a -- See Note [The one-shot state monad trick] in GHC.Utils.Monad pattern UM m <- UM' m where UM m = UM' (oneShot m) {-# COMPLETE UM #-} instance Functor UM where fmap f (UM m) = UM (\s -> fmap (\(s', v) -> (s', f v)) (m s)) instance Applicative UM where pure a = UM (\s -> pure (s, a)) (<*>) = ap instance Monad UM where {-# INLINE (>>=) #-} -- See Note [INLINE pragmas and (>>)] in GHC.Utils.Monad m >>= k = UM (\state -> do { (state', v) <- unUM m state ; unUM (k v) state' }) instance MonadFail UM where fail _ = UM (\_ -> SurelyApart) -- failed pattern match initUM :: TvSubstEnv -- subst to extend -> CvSubstEnv -> UM a -> UnifyResultM a initUM subst_env cv_subst_env um = case unUM um state of Unifiable (_, subst) -> Unifiable subst MaybeApart r (_, subst) -> MaybeApart r subst SurelyApart -> SurelyApart where state = UMState { um_tv_env = subst_env , um_cv_env = cv_subst_env } tvBindFlag :: UMEnv -> OutTyVar -> Type -> BindFlag tvBindFlag env tv rhs | tv `elemVarSet` um_skols env = Apart | otherwise = um_bind_fun env tv rhs getTvSubstEnv :: UM TvSubstEnv getTvSubstEnv = UM $ \state -> Unifiable (state, um_tv_env state) getCvSubstEnv :: UM CvSubstEnv getCvSubstEnv = UM $ \state -> Unifiable (state, um_cv_env state) getSubst :: UMEnv -> UM Subst getSubst env = do { tv_env <- getTvSubstEnv ; cv_env <- getCvSubstEnv ; let in_scope = rnInScopeSet (um_rn_env env) ; return (mkTCvSubst in_scope tv_env cv_env) } extendTvEnv :: TyVar -> Type -> UM () extendTvEnv tv ty = UM $ \state -> Unifiable (state { um_tv_env = extendVarEnv (um_tv_env state) tv ty }, ()) extendCvEnv :: CoVar -> Coercion -> UM () extendCvEnv cv co = UM $ \state -> Unifiable (state { um_cv_env = extendVarEnv (um_cv_env state) cv co }, ()) umRnBndr2 :: UMEnv -> TyCoVar -> TyCoVar -> UMEnv umRnBndr2 env v1 v2 = env { um_rn_env = rn_env', um_skols = um_skols env `extendVarSet` v' } where (rn_env', v') = rnBndr2_var (um_rn_env env) v1 v2 checkRnEnv :: UMEnv -> VarSet -> UM () checkRnEnv env varset | isEmptyVarSet skol_vars = return () | varset `disjointVarSet` skol_vars = return () | otherwise = surelyApart where skol_vars = um_skols env -- NB: That isEmptyVarSet guard is a critical optimization; -- it means we don't have to calculate the free vars of -- the type, often saving quite a bit of allocation. -- | Converts any SurelyApart to a MaybeApart don'tBeSoSure :: MaybeApartReason -> UM () -> UM () don'tBeSoSure r um = UM $ \ state -> case unUM um state of SurelyApart -> MaybeApart r (state, ()) other -> other umRnOccL :: UMEnv -> TyVar -> TyVar umRnOccL env v = rnOccL (um_rn_env env) v umRnOccR :: UMEnv -> TyVar -> TyVar umRnOccR env v = rnOccR (um_rn_env env) v umSwapRn :: UMEnv -> UMEnv umSwapRn env = env { um_rn_env = rnSwap (um_rn_env env) } maybeApart :: MaybeApartReason -> UM () maybeApart r = UM (\state -> MaybeApart r (state, ())) surelyApart :: UM a surelyApart = UM (\_ -> SurelyApart) {- %************************************************************************ %* * Matching a (lifted) type against a coercion %* * %************************************************************************ This section defines essentially an inverse to liftCoSubst. It is defined here to avoid a dependency from Coercion on this module. -} data MatchEnv = ME { me_tmpls :: TyVarSet , me_env :: RnEnv2 } -- | 'liftCoMatch' is sort of inverse to 'liftCoSubst'. In particular, if -- @liftCoMatch vars ty co == Just s@, then @liftCoSubst s ty == co@, -- where @==@ there means that the result of 'liftCoSubst' has the same -- type as the original co; but may be different under the hood. -- That is, it matches a type against a coercion of the same -- "shape", and returns a lifting substitution which could have been -- used to produce the given coercion from the given type. -- Note that this function is incomplete -- it might return Nothing -- when there does indeed exist a possible lifting context. -- -- This function is incomplete in that it doesn't respect the equality -- in `eqType`. That is, it's possible that this will succeed for t1 and -- fail for t2, even when t1 `eqType` t2. That's because it depends on -- there being a very similar structure between the type and the coercion. -- This incompleteness shouldn't be all that surprising, especially because -- it depends on the structure of the coercion, which is a silly thing to do. -- -- The lifting context produced doesn't have to be exacting in the roles -- of the mappings. This is because any use of the lifting context will -- also require a desired role. Thus, this algorithm prefers mapping to -- nominal coercions where it can do so. liftCoMatch :: TyCoVarSet -> Type -> Coercion -> Maybe LiftingContext liftCoMatch tmpls ty co = do { cenv1 <- ty_co_match menv emptyVarEnv ki ki_co ki_ki_co ki_ki_co ; cenv2 <- ty_co_match menv cenv1 ty co (mkNomReflCo co_lkind) (mkNomReflCo co_rkind) ; return (LC (mkEmptySubst in_scope) cenv2) } where menv = ME { me_tmpls = tmpls, me_env = mkRnEnv2 in_scope } in_scope = mkInScopeSet (tmpls `unionVarSet` tyCoVarsOfCo co) -- Like tcMatchTy, assume all the interesting variables -- in ty are in tmpls ki = typeKind ty ki_co = promoteCoercion co ki_ki_co = mkNomReflCo liftedTypeKind Pair co_lkind co_rkind = coercionKind ki_co -- | 'ty_co_match' does all the actual work for 'liftCoMatch'. ty_co_match :: MatchEnv -- ^ ambient helpful info -> LiftCoEnv -- ^ incoming subst -> Type -- ^ ty, type to match -> Coercion -- ^ co :: lty ~r rty, coercion to match against -> Coercion -- ^ :: kind(lsubst(ty)) ~N kind(lty) -> Coercion -- ^ :: kind(rsubst(ty)) ~N kind(rty) -> Maybe LiftCoEnv -- ^ Just env ==> liftCoSubst Nominal env ty == co, modulo roles. -- Also: Just env ==> lsubst(ty) == lty and rsubst(ty) == rty, -- where lsubst = lcSubstLeft(env) and rsubst = lcSubstRight(env) ty_co_match menv subst ty co lkco rkco | Just ty' <- coreView ty = ty_co_match menv subst ty' co lkco rkco -- handle Refl case: | tyCoVarsOfType ty `isNotInDomainOf` subst , Just (ty', _) <- isReflCo_maybe co , ty `eqType` ty' -- Why `eqType` and not `tcEqType`? Because this function is only used -- during coercion optimisation, after type-checking has finished. = Just subst where isNotInDomainOf :: VarSet -> VarEnv a -> Bool isNotInDomainOf set env = noneSet (\v -> elemVarEnv v env) set noneSet :: (Var -> Bool) -> VarSet -> Bool noneSet f = allVarSet (not . f) ty_co_match menv subst ty co lkco rkco | CastTy ty' co' <- ty -- See Note [Matching in the presence of casts (1)] = let empty_subst = mkEmptySubst (rnInScopeSet (me_env menv)) substed_co_l = substCo (liftEnvSubstLeft empty_subst subst) co' substed_co_r = substCo (liftEnvSubstRight empty_subst subst) co' in ty_co_match menv subst ty' co (substed_co_l `mkTransCo` lkco) (substed_co_r `mkTransCo` rkco) | SymCo co' <- co = swapLiftCoEnv <$> ty_co_match menv (swapLiftCoEnv subst) ty co' rkco lkco -- Match a type variable against a non-refl coercion ty_co_match menv subst (TyVarTy tv1) co lkco rkco | Just co1' <- lookupVarEnv subst tv1' -- tv1' is already bound to co1 = if eqCoercionX (nukeRnEnvL rn_env) co1' co then Just subst else Nothing -- no match since tv1 matches two different coercions | tv1' `elemVarSet` me_tmpls menv -- tv1' is a template var = if any (inRnEnvR rn_env) (tyCoVarsOfCoList co) then Nothing -- occurs check failed else Just $ extendVarEnv subst tv1' $ castCoercionKind co (mkSymCo lkco) (mkSymCo rkco) | otherwise = Nothing where rn_env = me_env menv tv1' = rnOccL rn_env tv1 -- just look through SubCo's. We don't really care about roles here. ty_co_match menv subst ty (SubCo co) lkco rkco = ty_co_match menv subst ty co lkco rkco ty_co_match menv subst (AppTy ty1a ty1b) co _lkco _rkco | Just (co2, arg2) <- splitAppCo_maybe co -- c.f. Unify.match on AppTy = ty_co_match_app menv subst ty1a [ty1b] co2 [arg2] ty_co_match menv subst ty1 (AppCo co2 arg2) _lkco _rkco | Just (ty1a, ty1b) <- splitAppTyNoView_maybe ty1 -- yes, the one from Type, not TcType; this is for coercion optimization = ty_co_match_app menv subst ty1a [ty1b] co2 [arg2] ty_co_match menv subst (TyConApp tc1 tys) (TyConAppCo _ tc2 cos) _lkco _rkco = ty_co_match_tc menv subst tc1 tys tc2 cos ty_co_match menv subst (FunTy { ft_mult = w, ft_arg = ty1, ft_res = ty2 }) (FunCo { fco_mult = co_w, fco_arg = co1, fco_res = co2 }) _lkco _rkco = ty_co_match_args menv subst [w, rep1, rep2, ty1, ty2] [co_w, co1_rep, co2_rep, co1, co2] where rep1 = getRuntimeRep ty1 rep2 = getRuntimeRep ty2 co1_rep = mkRuntimeRepCo co1 co2_rep = mkRuntimeRepCo co2 -- NB: we include the RuntimeRep arguments in the matching; -- not doing so caused #21205. ty_co_match menv subst (ForAllTy (Bndr tv1 vis1t) ty1) (ForAllCo tv2 vis1c vis2c kind_co2 co2) lkco rkco | isTyVar tv1 && isTyVar tv2 , vis1t == vis1c && vis1c == vis2c -- Is this necessary? -- Is this visibility check necessary? @rae says: yes, I think the -- check is necessary, if we're caring about visibility (and we are). -- But ty_co_match is a dark and not important corner. = do { subst1 <- ty_co_match menv subst (tyVarKind tv1) kind_co2 ki_ki_co ki_ki_co ; let rn_env0 = me_env menv rn_env1 = rnBndr2 rn_env0 tv1 tv2 menv' = menv { me_env = rn_env1 } ; ty_co_match menv' subst1 ty1 co2 lkco rkco } where ki_ki_co = mkNomReflCo liftedTypeKind -- ty_co_match menv subst (ForAllTy (Bndr cv1 _) ty1) -- (ForAllCo cv2 kind_co2 co2) -- lkco rkco -- | isCoVar cv1 && isCoVar cv2 -- We seems not to have enough information for this case -- 1. Given: -- cv1 :: (s1 :: k1) ~r (s2 :: k2) -- kind_co2 :: (s1' ~ s2') ~N (t1 ~ t2) -- eta1 = mkSelCo (SelTyCon 2 role) (downgradeRole r Nominal kind_co2) -- :: s1' ~ t1 -- eta2 = mkSelCo (SelTyCon 3 role) (downgradeRole r Nominal kind_co2) -- :: s2' ~ t2 -- Wanted: -- subst1 <- ty_co_match menv subst s1 eta1 kco1 kco2 -- subst2 <- ty_co_match menv subst1 s2 eta2 kco3 kco4 -- Question: How do we get kcoi? -- 2. Given: -- lkco :: <*> -- See Note [Weird typing rule for ForAllTy] in GHC.Core.TyCo.Rep -- rkco :: <*> -- Wanted: -- ty_co_match menv' subst2 ty1 co2 lkco' rkco' -- Question: How do we get lkco' and rkco'? ty_co_match _ subst (CoercionTy {}) _ _ _ = Just subst -- don't inspect coercions ty_co_match menv subst ty (GRefl r t (MCo co)) lkco rkco = ty_co_match menv subst ty (GRefl r t MRefl) lkco (rkco `mkTransCo` mkSymCo co) ty_co_match menv subst ty co1 lkco rkco | Just (CastTy t co, r) <- isReflCo_maybe co1 -- In @pushRefl@, pushing reflexive coercion inside CastTy will give us -- t |> co ~ t ; ; t ~ t |> co -- But transitive coercions are not helpful. Therefore we deal -- with it here: we do recursion on the smaller reflexive coercion, -- while propagating the correct kind coercions. = let kco' = mkSymCo co in ty_co_match menv subst ty (mkReflCo r t) (lkco `mkTransCo` kco') (rkco `mkTransCo` kco') ty_co_match menv subst ty co lkco rkco | Just co' <- pushRefl co = ty_co_match menv subst ty co' lkco rkco | otherwise = Nothing ty_co_match_tc :: MatchEnv -> LiftCoEnv -> TyCon -> [Type] -> TyCon -> [Coercion] -> Maybe LiftCoEnv ty_co_match_tc menv subst tc1 tys1 tc2 cos2 = do { guard (tc1 == tc2) ; ty_co_match_args menv subst tys1 cos2 } ty_co_match_app :: MatchEnv -> LiftCoEnv -> Type -> [Type] -> Coercion -> [Coercion] -> Maybe LiftCoEnv ty_co_match_app menv subst ty1 ty1args co2 co2args | Just (ty1', ty1a) <- splitAppTyNoView_maybe ty1 , Just (co2', co2a) <- splitAppCo_maybe co2 = ty_co_match_app menv subst ty1' (ty1a : ty1args) co2' (co2a : co2args) | otherwise = do { subst1 <- ty_co_match menv subst ki1 ki2 ki_ki_co ki_ki_co ; let Pair lkco rkco = mkNomReflCo <$> coercionKind ki2 ; subst2 <- ty_co_match menv subst1 ty1 co2 lkco rkco ; ty_co_match_args menv subst2 ty1args co2args } where ki1 = typeKind ty1 ki2 = promoteCoercion co2 ki_ki_co = mkNomReflCo liftedTypeKind ty_co_match_args :: MatchEnv -> LiftCoEnv -> [Type] -> [Coercion] -> Maybe LiftCoEnv ty_co_match_args menv subst (ty:tys) (arg:args) = do { let Pair lty rty = coercionKind arg lkco = mkNomReflCo (typeKind lty) rkco = mkNomReflCo (typeKind rty) ; subst' <- ty_co_match menv subst ty arg lkco rkco ; ty_co_match_args menv subst' tys args } ty_co_match_args _ subst [] [] = Just subst ty_co_match_args _ _ _ _ = Nothing pushRefl :: Coercion -> Maybe Coercion pushRefl co = case (isReflCo_maybe co) of Just (AppTy ty1 ty2, Nominal) -> Just (AppCo (mkReflCo Nominal ty1) (mkNomReflCo ty2)) Just (FunTy af w ty1 ty2, r) -> Just (FunCo r af af (mkReflCo r w) (mkReflCo r ty1) (mkReflCo r ty2)) Just (TyConApp tc tys, r) -> Just (TyConAppCo r tc (zipWith mkReflCo (tyConRoleListX r tc) tys)) Just (ForAllTy (Bndr tv vis) ty, r) -> Just (ForAllCo { fco_tcv = tv, fco_visL = vis, fco_visR = vis , fco_kind = mkNomReflCo (varType tv) , fco_body = mkReflCo r ty }) _ -> Nothing {- ************************************************************************ * * Flattening * * ************************************************************************ Note [Flattening type-family applications when matching instances] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ As described in "Closed type families with overlapping equations" http://research.microsoft.com/en-us/um/people/simonpj/papers/ext-f/axioms-extended.pdf we need to flatten core types before unifying them, when checking for "surely-apart" against earlier equations of a closed type family. Flattening means replacing all top-level uses of type functions with fresh variables, *taking care to preserve sharing*. That is, the type (Either (F a b) (F a b)) should flatten to (Either c c), never (Either c d). Here is a nice example of why it's all necessary: type family F a b where F Int Bool = Char F a b = Double type family G a -- open, no instances How do we reduce (F (G Float) (G Float))? The first equation clearly doesn't match, while the second equation does. But, before reducing, we must make sure that the target can never become (F Int Bool). Well, no matter what G Float becomes, it certainly won't become *both* Int and Bool, so indeed we're safe reducing (F (G Float) (G Float)) to Double. This is necessary not only to get more reductions (which we might be willing to give up on), but for substitutivity. If we have (F x x), we can see that (F x x) can reduce to Double. So, it had better be the case that (F blah blah) can reduce to Double, no matter what (blah) is! Flattening as done below ensures this. We also use this flattening operation to check for class instances. If we have instance C (Maybe b) instance {-# OVERLAPPING #-} C (Maybe Bool) [W] C (Maybe (F a)) we want to know that the second instance might match later. So we flatten the (F a) in the target before trying to unify with instances. (This is done in GHC.Core.InstEnv.lookupInstEnv'.) The algorithm works by building up a TypeMap TyVar, mapping type family applications to fresh variables. This mapping must be threaded through all the function calls, as any entry in the mapping must be propagated to all future nodes in the tree. The algorithm also must track the set of in-scope variables, in order to make fresh variables as it flattens. (We are far from a source of fresh Uniques.) See Wrinkle 2, below. There are wrinkles, of course: 1. The flattening algorithm must account for the possibility of inner `forall`s. (A `forall` seen here can happen only because of impredicativity. However, the flattening operation is an algorithm in Core, which is impredicative.) Suppose we have (forall b. F b) -> (forall b. F b). Of course, those two bs are entirely unrelated, and so we should certainly not flatten the two calls F b to the same variable. Instead, they must be treated separately. We thus carry a substitution that freshens variables; we must apply this substitution (in `coreFlattenTyFamApp`) before looking up an application in the environment. Note that the range of the substitution contains only TyVars, never anything else. For the sake of efficiency, we only apply this substitution when absolutely necessary. Namely: * We do not perform the substitution at all if it is empty. * We only need to worry about the arguments of a type family that are within the arity of said type family, so we can get away with not applying the substitution to any oversaturated type family arguments. * Importantly, we do /not/ achieve this substitution by recursively flattening the arguments, as this would be wrong. Consider `F (G a)`, where F and G are type families. We might decide that `F (G a)` flattens to `beta`. Later, the substitution is non-empty (but does not map `a`) and so we flatten `G a` to `gamma` and try to flatten `F gamma`. Of course, `F gamma` is unknown, and so we flatten it to `delta`, but it really should have been `beta`! Argh! Moral of the story: instead of flattening the arguments, just substitute them directly. 2. There are two different reasons we might add a variable to the in-scope set as we work: A. We have just invented a new flattening variable. B. We have entered a `forall`. Annoying here is that in-scope variable source (A) must be threaded through the calls. For example, consider (F b -> forall c. F c). Suppose that, when flattening F b, we invent a fresh variable c. Now, when we encounter (forall c. F c), we need to know c is already in scope so that we locally rename c to c'. However, if we don't thread through the in-scope set from one argument of (->) to the other, we won't know this and might get very confused. In contrast, source (B) increases only as we go deeper, as in-scope sets normally do. However, even here we must be careful. The TypeMap TyVar that contains mappings from type family applications to freshened variables will be threaded through both sides of (forall b. F b) -> (forall b. F b). We thus must make sure that the two `b`s don't get renamed to the same b1. (If they did, then looking up `F b1` would yield the same flatten var for each.) So, even though `forall`-bound variables should really be in the in-scope set only when they are in scope, we retain these variables even outside of their scope. This ensures that, if we encounter a fresh `forall`-bound b, we will rename it to b2, not b1. Note that keeping a larger in-scope set than strictly necessary is always OK, as in-scope sets are only ever used to avoid collisions. Sadly, the freshening substitution described in (1) really mustn't bind variables outside of their scope: note that its domain is the *unrenamed* variables. This means that the substitution gets "pushed down" (like a reader monad) while the in-scope set gets threaded (like a state monad). Because a Subst contains its own in-scope set, we don't carry a Subst; instead, we just carry a TvSubstEnv down, tying it to the InScopeSet traveling separately as necessary. 3. Consider `F ty_1 ... ty_n`, where F is a type family with arity k: type family F ty_1 ... ty_k :: res_k It's tempting to just flatten `F ty_1 ... ty_n` to `alpha`, where alpha is a flattening skolem. But we must instead flatten it to `alpha ty_(k+1) ... ty_n`—that is, by only flattening up to the arity of the type family. Why is this better? Consider the following concrete example from #16995: type family Param :: Type -> Type type family LookupParam (a :: Type) :: Type where LookupParam (f Char) = Bool LookupParam x = Int foo :: LookupParam (Param ()) foo = 42 In order for `foo` to typecheck, `LookupParam (Param ())` must reduce to `Int`. But if we flatten `Param ()` to `alpha`, then GHC can't be sure if `alpha` is apart from `f Char`, so it won't fall through to the second equation. But since the `Param` type family has arity 0, we can instead flatten `Param ()` to `alpha ()`, about which GHC knows with confidence is apart from `f Char`, permitting the second equation to be reached. Not only does this allow more programs to be accepted, it's also important for correctness. Not doing this was the root cause of the Core Lint error in #16995. flattenTys is defined here because of module dependencies. -} data FlattenEnv = FlattenEnv { fe_type_map :: TypeMap (TyVar, TyCon, [Type]) -- domain: exactly-saturated type family applications -- range: (fresh variable, type family tycon, args) , fe_in_scope :: InScopeSet } -- See Note [Flattening type-family applications when matching instances] emptyFlattenEnv :: InScopeSet -> FlattenEnv emptyFlattenEnv in_scope = FlattenEnv { fe_type_map = emptyTypeMap , fe_in_scope = in_scope } updateInScopeSet :: FlattenEnv -> (InScopeSet -> InScopeSet) -> FlattenEnv updateInScopeSet env upd = env { fe_in_scope = upd (fe_in_scope env) } flattenTys :: InScopeSet -> [Type] -> [Type] -- See Note [Flattening type-family applications when matching instances] flattenTys in_scope tys = fst (flattenTysX in_scope tys) flattenTysX :: InScopeSet -> [Type] -> ([Type], TyVarEnv (TyCon, [Type])) -- See Note [Flattening type-family applications when matching instances] -- NB: the returned types mention the fresh type variables -- in the domain of the returned env, whose range includes -- the original type family applications. Building a substitution -- from this information and applying it would yield the original -- types -- almost. The problem is that the original type might -- have something like (forall b. F a b); the returned environment -- can't really sensibly refer to that b. So it may include a locally- -- bound tyvar in its range. Currently, the only usage of this env't -- checks whether there are any meta-variables in it -- (in GHC.Tc.Solver.Monad.mightEqualLater), so this is all OK. flattenTysX in_scope tys = let (env, result) = coreFlattenTys emptyTvSubstEnv (emptyFlattenEnv in_scope) tys in (result, build_env (fe_type_map env)) where build_env :: TypeMap (TyVar, TyCon, [Type]) -> TyVarEnv (TyCon, [Type]) build_env env_in = foldTM (\(tv, tc, tys) env_out -> extendVarEnv env_out tv (tc, tys)) env_in emptyVarEnv coreFlattenTys :: TvSubstEnv -> FlattenEnv -> [Type] -> (FlattenEnv, [Type]) coreFlattenTys subst = mapAccumL (coreFlattenTy subst) coreFlattenTy :: TvSubstEnv -> FlattenEnv -> Type -> (FlattenEnv, Type) coreFlattenTy subst = go where go env ty | Just ty' <- coreView ty = go env ty' go env (TyVarTy tv) | Just ty <- lookupVarEnv subst tv = (env, ty) | otherwise = let (env', ki) = go env (tyVarKind tv) in (env', mkTyVarTy $ setTyVarKind tv ki) go env (AppTy ty1 ty2) = let (env1, ty1') = go env ty1 (env2, ty2') = go env1 ty2 in (env2, AppTy ty1' ty2') go env (TyConApp tc tys) -- NB: Don't just check if isFamilyTyCon: this catches *data* families, -- which are generative and thus can be preserved during flattening | not (isGenerativeTyCon tc Nominal) = coreFlattenTyFamApp subst env tc tys | otherwise = let (env', tys') = coreFlattenTys subst env tys in (env', mkTyConApp tc tys') go env ty@(FunTy { ft_mult = mult, ft_arg = ty1, ft_res = ty2 }) = let (env1, ty1') = go env ty1 (env2, ty2') = go env1 ty2 (env3, mult') = go env2 mult in (env3, ty { ft_mult = mult', ft_arg = ty1', ft_res = ty2' }) go env (ForAllTy (Bndr tv vis) ty) = let (env1, subst', tv') = coreFlattenVarBndr subst env tv (env2, ty') = coreFlattenTy subst' env1 ty in (env2, ForAllTy (Bndr tv' vis) ty') go env ty@(LitTy {}) = (env, ty) go env (CastTy ty co) = let (env1, ty') = go env ty (env2, co') = coreFlattenCo subst env1 co in (env2, CastTy ty' co') go env (CoercionTy co) = let (env', co') = coreFlattenCo subst env co in (env', CoercionTy co') -- when flattening, we don't care about the contents of coercions. -- so, just return a fresh variable of the right (flattened) type coreFlattenCo :: TvSubstEnv -> FlattenEnv -> Coercion -> (FlattenEnv, Coercion) coreFlattenCo subst env co = (env2, mkCoVarCo covar) where (env1, kind') = coreFlattenTy subst env (coercionType co) covar = mkFlattenFreshCoVar (fe_in_scope env1) kind' -- Add the covar to the FlattenEnv's in-scope set. -- See Note [Flattening type-family applications when matching instances], wrinkle 2A. env2 = updateInScopeSet env1 (flip extendInScopeSet covar) coreFlattenVarBndr :: TvSubstEnv -> FlattenEnv -> TyCoVar -> (FlattenEnv, TvSubstEnv, TyVar) coreFlattenVarBndr subst env tv = (env2, subst', tv') where -- See Note [Flattening type-family applications when matching instances], wrinkle 2B. kind = varType tv (env1, kind') = coreFlattenTy subst env kind tv' = uniqAway (fe_in_scope env1) (setVarType tv kind') subst' = extendVarEnv subst tv (mkTyVarTy tv') env2 = updateInScopeSet env1 (flip extendInScopeSet tv') coreFlattenTyFamApp :: TvSubstEnv -> FlattenEnv -> TyCon -- type family tycon -> [Type] -- args, already flattened -> (FlattenEnv, Type) coreFlattenTyFamApp tv_subst env fam_tc fam_args = case lookupTypeMap type_map fam_ty of Just (tv, _, _) -> (env', mkAppTys (mkTyVarTy tv) leftover_args') Nothing -> let tyvar_name = mkFlattenFreshTyName fam_tc tv = uniqAway in_scope $ mkTyVar tyvar_name (typeKind fam_ty) ty' = mkAppTys (mkTyVarTy tv) leftover_args' env'' = env' { fe_type_map = extendTypeMap type_map fam_ty (tv, fam_tc, sat_fam_args) , fe_in_scope = extendInScopeSet in_scope tv } in (env'', ty') where arity = tyConArity fam_tc tcv_subst = Subst (fe_in_scope env) emptyIdSubstEnv tv_subst emptyVarEnv (sat_fam_args, leftover_args) = assert (arity <= length fam_args) $ splitAt arity fam_args -- Apply the substitution before looking up an application in the -- environment. See Note [Flattening type-family applications when matching instances], -- wrinkle 1. -- NB: substTys short-cuts the common case when the substitution is empty. sat_fam_args' = substTys tcv_subst sat_fam_args (env', leftover_args') = coreFlattenTys tv_subst env leftover_args -- `fam_tc` may be over-applied to `fam_args` (see -- Note [Flattening type-family applications when matching instances] -- wrinkle 3), so we split it into the arguments needed to saturate it -- (sat_fam_args') and the rest (leftover_args') fam_ty = mkTyConApp fam_tc sat_fam_args' FlattenEnv { fe_type_map = type_map , fe_in_scope = in_scope } = env' mkFlattenFreshTyName :: Uniquable a => a -> Name mkFlattenFreshTyName unq = mkSysTvName (getUnique unq) (fsLit "flt") mkFlattenFreshCoVar :: InScopeSet -> Kind -> CoVar mkFlattenFreshCoVar in_scope kind = let uniq = unsafeGetFreshLocalUnique in_scope name = mkSystemVarName uniq (fsLit "flc") in mkCoVar name kind ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/UsageEnv.hs0000644000000000000000000000671107346545000020654 0ustar0000000000000000module GHC.Core.UsageEnv ( Usage(..) , UsageEnv , addUE , addUsage , bottomUE , deleteUE , lookupUE , popUE , scaleUE , scaleUsage , supUE , supUEs , singleUsageUE , zeroUE ) where import Data.Foldable import GHC.Prelude import GHC.Core.Multiplicity import GHC.Types.Var import GHC.Types.Name import GHC.Types.Name.Env import GHC.Utils.Outputable import GHC.Utils.Panic -- -- * Usage environments -- -- The typechecker and the linter output usage environments. See Note [Usages] -- in Multiplicity. Every absent name being considered to map to 'Zero' of -- 'Bottom' depending on a flag. See Note [Zero as a usage] in Multiplicity, see -- Note [Bottom as a usage] in Multiplicity. data Usage = Zero | Bottom | MUsage Mult instance Outputable Usage where ppr Zero = text "0" ppr Bottom = text "Bottom" ppr (MUsage x) = ppr x addUsage :: Usage -> Usage -> Usage addUsage Zero x = x addUsage x Zero = x addUsage Bottom x = x addUsage x Bottom = x addUsage (MUsage x) (MUsage y) = MUsage $ mkMultAdd x y scaleUsage :: Mult -> Usage -> Usage scaleUsage OneTy Bottom = Bottom scaleUsage _ Zero = Zero scaleUsage x Bottom = MUsage x scaleUsage x (MUsage y) = MUsage $ mkMultMul x y -- For now, we use extra multiplicity Bottom for empty case. data UsageEnv = UsageEnv !(NameEnv Mult) Bool -- | Record a single usage of an Id, i.e. {n: 1} -- Exception: We do not record external names (both GlobalIds and top-level LocalIds) -- because they're not relevant to linearity checking. singleUsageUE :: Id -> UsageEnv singleUsageUE x | isExternalName n = zeroUE | otherwise = UsageEnv (unitNameEnv n OneTy) False where n = getName x zeroUE, bottomUE :: UsageEnv zeroUE = UsageEnv emptyNameEnv False bottomUE = UsageEnv emptyNameEnv True addUE :: UsageEnv -> UsageEnv -> UsageEnv addUE (UsageEnv e1 b1) (UsageEnv e2 b2) = UsageEnv (plusNameEnv_C mkMultAdd e1 e2) (b1 || b2) scaleUE :: Mult -> UsageEnv -> UsageEnv scaleUE OneTy ue = ue scaleUE w (UsageEnv e _) = UsageEnv (mapNameEnv (mkMultMul w) e) False supUE :: UsageEnv -> UsageEnv -> UsageEnv supUE (UsageEnv e1 False) (UsageEnv e2 False) = UsageEnv (plusNameEnv_CD mkMultSup e1 ManyTy e2 ManyTy) False supUE (UsageEnv e1 b1) (UsageEnv e2 b2) = UsageEnv (plusNameEnv_CD2 combineUsage e1 e2) (b1 && b2) where combineUsage (Just x) (Just y) = mkMultSup x y combineUsage Nothing (Just x) | b1 = x | otherwise = ManyTy combineUsage (Just x) Nothing | b2 = x | otherwise = ManyTy combineUsage Nothing Nothing = pprPanic "supUE" (ppr e1 <+> ppr e2) -- Note: If you are changing this logic, check 'mkMultSup' in Multiplicity as well. supUEs :: [UsageEnv] -> UsageEnv supUEs = foldr supUE bottomUE deleteUE :: NamedThing n => UsageEnv -> n -> UsageEnv deleteUE (UsageEnv e b) x = UsageEnv (delFromNameEnv e (getName x)) b -- | |lookupUE x env| returns the multiplicity assigned to |x| in |env|, if |x| is not -- bound in |env|, then returns |Zero| or |Bottom|. lookupUE :: NamedThing n => UsageEnv -> n -> Usage lookupUE (UsageEnv e has_bottom) x = case lookupNameEnv e (getName x) of Just w -> MUsage w Nothing -> if has_bottom then Bottom else Zero popUE :: NamedThing n => UsageEnv -> n -> (Usage, UsageEnv) popUE ue x = (lookupUE ue x, deleteUE ue x) instance Outputable UsageEnv where ppr (UsageEnv ne b) = text "UsageEnv:" <+> ppr ne <+> ppr b ghc-lib-parser-9.12.2.20250421/compiler/GHC/Core/Utils.hs0000644000000000000000000034710207346545000020241 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 Utility functions on @Core@ syntax -} -- | Commonly useful utilities for manipulating the Core language module GHC.Core.Utils ( -- * Constructing expressions mkCast, mkCastMCo, mkPiMCo, mkTick, mkTicks, mkTickNoHNF, tickHNFArgs, bindNonRec, needsCaseBinding, needsCaseBindingL, mkAltExpr, mkDefaultCase, mkSingleAltCase, -- * Taking expressions apart findDefault, addDefault, findAlt, isDefaultAlt, mergeAlts, mergeCaseAlts, trimConArgs, filterAlts, combineIdenticalAlts, refineDefaultAlt, scaleAltsBy, -- * Properties of expressions exprType, coreAltType, coreAltsType, mkLamType, mkLamTypes, mkFunctionType, exprIsTrivial, getIdFromTrivialExpr, getIdFromTrivialExpr_maybe, trivial_expr_fold, exprIsDupable, exprIsCheap, exprIsExpandable, exprIsCheapX, CheapAppFun, exprIsHNF, exprOkForSpeculation, exprOkToDiscard, exprOkForSpecEval, exprIsWorkFree, exprIsConLike, isCheapApp, isExpandableApp, isSaturatedConApp, exprIsTickedString, exprIsTickedString_maybe, exprIsTopLevelBindable, altsAreExhaustive, etaExpansionTick, -- * Equality cheapEqExpr, cheapEqExpr', diffBinds, -- * Manipulating data constructors and types exprToType, applyTypeToArgs, dataConRepInstPat, dataConRepFSInstPat, isEmptyTy, normSplitTyConApp_maybe, -- * Working with ticks stripTicksTop, stripTicksTopE, stripTicksTopT, stripTicksE, stripTicksT, -- * InScopeSet things which work over CoreBinds mkInScopeSetBndrs, extendInScopeSetBind, extendInScopeSetBndrs, -- * StaticPtr collectMakeStaticArgs, -- * Join points isJoinBind, -- * Tag inference mkStrictFieldSeqs, shouldStrictifyIdForCbv, shouldUseCbvForId, -- * unsafeEqualityProof isUnsafeEqualityCase, -- * Dumping stuff dumpIdInfoOfProgram ) where import GHC.Prelude import GHC.Platform import GHC.Core import GHC.Core.Ppr import GHC.Core.FVs( bindFreeVars ) import GHC.Core.DataCon import GHC.Core.Type as Type import GHC.Core.Predicate( isCoVarType ) import GHC.Core.FamInstEnv import GHC.Core.TyCo.Compare( eqType, eqTypeX ) import GHC.Core.Coercion import GHC.Core.Reduction import GHC.Core.TyCon import GHC.Core.Multiplicity import GHC.Builtin.Names ( makeStaticName, unsafeEqualityProofIdKey, unsafeReflDataConKey ) import GHC.Builtin.PrimOps import GHC.Types.Var import GHC.Types.SrcLoc import GHC.Types.Var.Env import GHC.Types.Var.Set import GHC.Types.Name import GHC.Types.Literal import GHC.Types.Tickish import GHC.Types.Id import GHC.Types.Id.Info import GHC.Types.Basic( Arity ) import GHC.Types.Unique import GHC.Types.Unique.Set import GHC.Types.Demand import GHC.Types.RepType (isZeroBitTy) import GHC.Data.FastString import GHC.Data.Maybe import GHC.Data.List.SetOps( minusList ) import GHC.Data.OrdList import GHC.Utils.Constants (debugIsOn) import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Utils.Misc import Data.ByteString ( ByteString ) import Data.Function ( on ) import Data.List ( sort, sortBy, partition, zipWith4, mapAccumL ) import Data.Ord ( comparing ) import Control.Monad ( guard ) import qualified Data.Set as Set {- ************************************************************************ * * \subsection{Find the type of a Core atom/expression} * * ************************************************************************ -} exprType :: HasDebugCallStack => CoreExpr -> Type -- ^ Recover the type of a well-typed Core expression. Fails when -- applied to the actual 'GHC.Core.Type' expression as it cannot -- really be said to have a type exprType (Var var) = idType var exprType (Lit lit) = literalType lit exprType (Coercion co) = coercionType co exprType (Let bind body) | NonRec tv rhs <- bind -- See Note [Type bindings] , Type ty <- rhs = substTyWithUnchecked [tv] [ty] (exprType body) | otherwise = exprType body exprType (Case _ _ ty _) = ty exprType (Cast _ co) = coercionRKind co exprType (Tick _ e) = exprType e exprType (Lam binder expr) = mkLamType binder (exprType expr) exprType e@(App _ _) = case collectArgs e of (fun, args) -> applyTypeToArgs (pprCoreExpr e) (exprType fun) args exprType (Type ty) = pprPanic "exprType" (ppr ty) coreAltType :: CoreAlt -> Type -- ^ Returns the type of the alternatives right hand side coreAltType alt@(Alt _ bs rhs) = case occCheckExpand bs rhs_ty of -- Note [Existential variables and silly type synonyms] Just ty -> ty Nothing -> pprPanic "coreAltType" (pprCoreAlt alt $$ ppr rhs_ty) where rhs_ty = exprType rhs coreAltsType :: [CoreAlt] -> Type -- ^ Returns the type of the first alternative, which should be the same as for all alternatives coreAltsType (alt:_) = coreAltType alt coreAltsType [] = panic "coreAltsType" mkLamType :: HasDebugCallStack => Var -> Type -> Type -- ^ Makes a @(->)@ type or an implicit forall type, depending -- on whether it is given a type variable or a term variable. -- This is used, for example, when producing the type of a lambda. -- mkLamTypes :: [Var] -> Type -> Type -- ^ 'mkLamType' for multiple type or value arguments mkLamType v body_ty | isTyVar v = mkForAllTy (Bndr v coreTyLamForAllTyFlag) body_ty -- coreTyLamForAllTyFlag: see Note [Required foralls in Core] -- in GHC.Core.TyCo.Rep | isCoVar v , v `elemVarSet` tyCoVarsOfType body_ty -- See Note [Unused coercion variable in ForAllTy] in GHC.Core.TyCo.Rep = mkForAllTy (Bndr v coreTyLamForAllTyFlag) body_ty | otherwise = mkFunctionType (varMult v) (varType v) body_ty mkLamTypes vs ty = foldr mkLamType ty vs {- Note [Type bindings] ~~~~~~~~~~~~~~~~~~~~ Core does allow type bindings, although such bindings are not much used, except in the output of the desugarer. Example: let a = Int in (\x:a. x) Given this, exprType must be careful to substitute 'a' in the result type (#8522). Note [Existential variables and silly type synonyms] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider data T = forall a. T (Funny a) type Funny a = Bool f :: T -> Bool f (T x) = x Now, the type of 'x' is (Funny a), where 'a' is existentially quantified. That means that 'exprType' and 'coreAltsType' may give a result that *appears* to mention an out-of-scope type variable. See #3409 for a more real-world example. Various possibilities suggest themselves: - Ignore the problem, and make Lint not complain about such variables - Expand all type synonyms (or at least all those that discard arguments) This is tricky, because at least for top-level things we want to retain the type the user originally specified. - Expand synonyms on the fly, when the problem arises. That is what we are doing here. It's not too expensive, I think. Note that there might be existentially quantified coercion variables, too. -} applyTypeToArgs :: HasDebugCallStack => SDoc -> Type -> [CoreExpr] -> Type -- ^ Determines the type resulting from applying an expression with given type --- to given argument expressions. -- The first argument is just for debugging, and gives some context applyTypeToArgs pp_e op_ty args = go op_ty args where go op_ty [] = op_ty go op_ty (Type ty : args) = go_ty_args op_ty [ty] args go op_ty (Coercion co : args) = go_ty_args op_ty [mkCoercionTy co] args go op_ty (_ : args) | Just (_, _, _, res_ty) <- splitFunTy_maybe op_ty = go res_ty args go _ args = pprPanic "applyTypeToArgs" (panic_msg args) -- go_ty_args: accumulate type arguments so we can -- instantiate all at once with piResultTys go_ty_args op_ty rev_tys (Type ty : args) = go_ty_args op_ty (ty:rev_tys) args go_ty_args op_ty rev_tys (Coercion co : args) = go_ty_args op_ty (mkCoercionTy co : rev_tys) args go_ty_args op_ty rev_tys args = go (piResultTys op_ty (reverse rev_tys)) args panic_msg as = vcat [ text "Expression:" <+> pp_e , text "Type:" <+> ppr op_ty , text "Args:" <+> ppr args , text "Args':" <+> ppr as ] mkCastMCo :: CoreExpr -> MCoercionR -> CoreExpr mkCastMCo e MRefl = e mkCastMCo e (MCo co) = Cast e co -- We are careful to use (MCo co) only when co is not reflexive -- Hence (Cast e co) rather than (mkCast e co) mkPiMCo :: Var -> MCoercionR -> MCoercionR mkPiMCo _ MRefl = MRefl mkPiMCo v (MCo co) = MCo (mkPiCo Representational v co) {- ********************************************************************* * * Casts * * ********************************************************************* -} -- | Wrap the given expression in the coercion safely, dropping -- identity coercions and coalescing nested coercions mkCast :: HasDebugCallStack => CoreExpr -> CoercionR -> CoreExpr mkCast expr co = assertPpr (coercionRole co == Representational) (text "coercion" <+> ppr co <+> text "passed to mkCast" <+> ppr expr <+> text "has wrong role" <+> ppr (coercionRole co)) $ warnPprTrace (not (coercionLKind co `eqType` exprType expr)) "Trying to coerce" (text "(" <> ppr expr $$ text "::" <+> ppr (exprType expr) <> text ")" $$ ppr co $$ ppr (coercionType co) $$ callStackDoc) $ case expr of Cast expr co2 -> mkCast expr (mkTransCo co2 co) Tick t expr -> Tick t (mkCast expr co) Coercion e_co | isCoVarType (coercionRKind co) -- The guard here checks that g has a (~#) on both sides, -- otherwise decomposeCo fails. Can in principle happen -- with unsafeCoerce -> Coercion (mkCoCast e_co co) _ | isReflCo co -> expr | otherwise -> Cast expr co {- ********************************************************************* * * Attaching ticks * * ********************************************************************* -} -- | Wraps the given expression in the source annotation, dropping the -- annotation if possible. mkTick :: CoreTickish -> CoreExpr -> CoreExpr mkTick t orig_expr = mkTick' id id orig_expr where -- Some ticks (cost-centres) can be split in two, with the -- non-counting part having laxer placement properties. canSplit = tickishCanSplit t && tickishPlace (mkNoCount t) /= tickishPlace t -- mkTick' handles floating of ticks *into* the expression. -- In this function, `top` is applied after adding the tick, and `rest` before. -- This will result in applications that look like (top $ Tick t $ rest expr). -- If we want to push the tick deeper, we pre-compose `top` with a function -- adding the tick. mkTick' :: (CoreExpr -> CoreExpr) -- apply after adding tick (float through) -> (CoreExpr -> CoreExpr) -- apply before adding tick (float with) -> CoreExpr -- current expression -> CoreExpr mkTick' top rest expr = case expr of -- Cost centre ticks should never be reordered relative to each -- other. Therefore we can stop whenever two collide. Tick t2 e | ProfNote{} <- t2, ProfNote{} <- t -> top $ Tick t $ rest expr -- Otherwise we assume that ticks of different placements float -- through each other. | tickishPlace t2 /= tickishPlace t -> mkTick' (top . Tick t2) rest e -- For annotations this is where we make sure to not introduce -- redundant ticks. | tickishContains t t2 -> mkTick' top rest e | tickishContains t2 t -> orig_expr | otherwise -> mkTick' top (rest . Tick t2) e -- Ticks don't care about types, so we just float all ticks -- through them. Note that it's not enough to check for these -- cases top-level. While mkTick will never produce Core with type -- expressions below ticks, such constructs can be the result of -- unfoldings. We therefore make an effort to put everything into -- the right place no matter what we start with. Cast e co -> mkTick' (top . flip Cast co) rest e Coercion co -> Coercion co Lam x e -- Always float through type lambdas. Even for non-type lambdas, -- floating is allowed for all but the most strict placement rule. | not (isRuntimeVar x) || tickishPlace t /= PlaceRuntime -> mkTick' (top . Lam x) rest e -- If it is both counting and scoped, we split the tick into its -- two components, often allowing us to keep the counting tick on -- the outside of the lambda and push the scoped tick inside. -- The point of this is that the counting tick can probably be -- floated, and the lambda may then be in a position to be -- beta-reduced. | canSplit -> top $ Tick (mkNoScope t) $ rest $ Lam x $ mkTick (mkNoCount t) e App f arg -- Always float through type applications. | not (isRuntimeArg arg) -> mkTick' (top . flip App arg) rest f -- We can also float through constructor applications, placement -- permitting. Again we can split. | isSaturatedConApp expr && (tickishPlace t==PlaceCostCentre || canSplit) -> if tickishPlace t == PlaceCostCentre then top $ rest $ tickHNFArgs t expr else top $ Tick (mkNoScope t) $ rest $ tickHNFArgs (mkNoCount t) expr Var x | notFunction && tickishPlace t == PlaceCostCentre -> orig_expr | notFunction && canSplit -> top $ Tick (mkNoScope t) $ rest expr where -- SCCs can be eliminated on variables provided the variable -- is not a function. In these cases the SCC makes no difference: -- the cost of evaluating the variable will be attributed to its -- definition site. When the variable refers to a function, however, -- an SCC annotation on the variable affects the cost-centre stack -- when the function is called, so we must retain those. notFunction = not (isFunTy (idType x)) Lit{} | tickishPlace t == PlaceCostCentre -> orig_expr -- Catch-all: Annotate where we stand _any -> top $ Tick t $ rest expr mkTicks :: [CoreTickish] -> CoreExpr -> CoreExpr mkTicks ticks expr = foldr mkTick expr ticks isSaturatedConApp :: CoreExpr -> Bool isSaturatedConApp e = go e [] where go (App f a) as = go f (a:as) go (Var fun) args = isConLikeId fun && idArity fun == valArgCount args go (Cast f _) as = go f as go _ _ = False mkTickNoHNF :: CoreTickish -> CoreExpr -> CoreExpr mkTickNoHNF t e | exprIsHNF e = tickHNFArgs t e | otherwise = mkTick t e -- push a tick into the arguments of a HNF (call or constructor app) tickHNFArgs :: CoreTickish -> CoreExpr -> CoreExpr tickHNFArgs t e = push t e where push t (App f (Type u)) = App (push t f) (Type u) push t (App f arg) = App (push t f) (mkTick t arg) push _t e = e -- | Strip ticks satisfying a predicate from top of an expression stripTicksTop :: (CoreTickish -> Bool) -> Expr b -> ([CoreTickish], Expr b) stripTicksTop p = go [] where go ts (Tick t e) | p t = go (t:ts) e go ts other = (reverse ts, other) -- | Strip ticks satisfying a predicate from top of an expression, -- returning the remaining expression stripTicksTopE :: (CoreTickish -> Bool) -> Expr b -> Expr b stripTicksTopE p = go where go (Tick t e) | p t = go e go other = other -- | Strip ticks satisfying a predicate from top of an expression, -- returning the ticks stripTicksTopT :: (CoreTickish -> Bool) -> Expr b -> [CoreTickish] stripTicksTopT p = go [] where go ts (Tick t e) | p t = go (t:ts) e go ts _ = ts -- | Completely strip ticks satisfying a predicate from an -- expression. Note this is O(n) in the size of the expression! stripTicksE :: (CoreTickish -> Bool) -> Expr b -> Expr b stripTicksE p expr = go expr where go (App e a) = App (go e) (go a) go (Lam b e) = Lam b (go e) go (Let b e) = Let (go_bs b) (go e) go (Case e b t as) = Case (go e) b t (map go_a as) go (Cast e c) = Cast (go e) c go (Tick t e) | p t = go e | otherwise = Tick t (go e) go other = other go_bs (NonRec b e) = NonRec b (go e) go_bs (Rec bs) = Rec (map go_b bs) go_b (b, e) = (b, go e) go_a (Alt c bs e) = Alt c bs (go e) stripTicksT :: (CoreTickish -> Bool) -> Expr b -> [CoreTickish] stripTicksT p expr = fromOL $ go expr where go (App e a) = go e `appOL` go a go (Lam _ e) = go e go (Let b e) = go_bs b `appOL` go e go (Case e _ _ as) = go e `appOL` concatOL (map go_a as) go (Cast e _) = go e go (Tick t e) | p t = t `consOL` go e | otherwise = go e go _ = nilOL go_bs (NonRec _ e) = go e go_bs (Rec bs) = concatOL (map go_b bs) go_b (_, e) = go e go_a (Alt _ _ e) = go e {- ************************************************************************ * * \subsection{Other expression construction} * * ************************************************************************ -} bindNonRec :: HasDebugCallStack => Id -> CoreExpr -> CoreExpr -> CoreExpr -- ^ @bindNonRec x r b@ produces either: -- -- > let x = r in b -- -- or: -- -- > case r of x { _DEFAULT_ -> b } -- -- depending on whether we have to use a @case@ or @let@ -- binding for the expression (see 'needsCaseBinding'). -- It's used by the desugarer to avoid building bindings -- that give Core Lint a heart attack, although actually -- the simplifier deals with them perfectly well. See -- also 'GHC.Core.Make.mkCoreLet' bindNonRec bndr rhs body | isTyVar bndr = let_bind | isCoVar bndr = if isCoArg rhs then let_bind {- See Note [Binding coercions] -} else case_bind | isJoinId bndr = let_bind | needsCaseBinding (idType bndr) rhs = case_bind | otherwise = let_bind where case_bind = mkDefaultCase rhs bndr body let_bind = Let (NonRec bndr rhs) body -- | `needsCaseBinding` tests whether we have to use a @case@ rather than @let@ -- binding for this expression as per the invariants of 'CoreExpr': see -- "GHC.Core#let_can_float_invariant" -- (needsCaseBinding ty rhs) requires that `ty` has a well-defined levity, else -- `typeLevity ty` will fail; but that should be the case because -- `needsCaseBinding` is only called once typechecking is complete needsCaseBinding :: HasDebugCallStack => Type -> CoreExpr -> Bool needsCaseBinding ty rhs = needsCaseBindingL (typeLevity ty) rhs needsCaseBindingL :: Levity -> CoreExpr -> Bool -- True <=> make a case expression instead of a let -- These can arise either from the desugarer, -- or from beta reductions: (\x.e) (x +# y) needsCaseBindingL Lifted _rhs = False needsCaseBindingL Unlifted rhs = not (exprOkForSpeculation rhs) mkAltExpr :: AltCon -- ^ Case alternative constructor -> [CoreBndr] -- ^ Things bound by the pattern match -> [Type] -- ^ The type arguments to the case alternative -> CoreExpr -- ^ This guy constructs the value that the scrutinee must have -- given that you are in one particular branch of a case mkAltExpr (DataAlt con) args inst_tys = mkConApp con (map Type inst_tys ++ varsToCoreExprs args) mkAltExpr (LitAlt lit) [] [] = Lit lit mkAltExpr (LitAlt _) _ _ = panic "mkAltExpr LitAlt" mkAltExpr DEFAULT _ _ = panic "mkAltExpr DEFAULT" mkDefaultCase :: CoreExpr -> Id -> CoreExpr -> CoreExpr -- Make (case x of y { DEFAULT -> e } mkDefaultCase scrut case_bndr body = Case scrut case_bndr (exprType body) [Alt DEFAULT [] body] mkSingleAltCase :: CoreExpr -> Id -> AltCon -> [Var] -> CoreExpr -> CoreExpr -- Use this function if possible, when building a case, -- because it ensures that the type on the Case itself -- doesn't mention variables bound by the case -- See Note [Care with the type of a case expression] mkSingleAltCase scrut case_bndr con bndrs body = Case scrut case_bndr case_ty [Alt con bndrs body] where body_ty = exprType body case_ty -- See Note [Care with the type of a case expression] | Just body_ty' <- occCheckExpand bndrs body_ty = body_ty' | otherwise = pprPanic "mkSingleAltCase" (ppr scrut $$ ppr bndrs $$ ppr body_ty) {- Note [Care with the type of a case expression] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider a phantom type synonym type S a = Int and we want to form the case expression case x of K (a::*) -> (e :: S a) We must not make the type field of the case-expression (S a) because 'a' isn't in scope. Hence the call to occCheckExpand. This caused issue #17056. NB: this situation can only arise with type synonyms, which can falsely "mention" type variables that aren't "really there", and which can be eliminated by expanding the synonym. Note [Binding coercions] ~~~~~~~~~~~~~~~~~~~~~~~~ Consider binding a CoVar, c = e. Then, we must satisfy Note [Core type and coercion invariant] in GHC.Core, which allows only (Coercion co) on the RHS. ************************************************************************ * * Operations over case alternatives * * ************************************************************************ The default alternative must be first, if it exists at all. This makes it easy to find, though it makes matching marginally harder. -} -- | Extract the default case alternative findDefault :: [Alt b] -> ([Alt b], Maybe (Expr b)) findDefault (Alt DEFAULT args rhs : alts) = assert (null args) (alts, Just rhs) findDefault alts = (alts, Nothing) addDefault :: [Alt b] -> Maybe (Expr b) -> [Alt b] addDefault alts Nothing = alts addDefault alts (Just rhs) = Alt DEFAULT [] rhs : alts isDefaultAlt :: Alt b -> Bool isDefaultAlt (Alt DEFAULT _ _) = True isDefaultAlt _ = False -- | Find the case alternative corresponding to a particular -- constructor: panics if no such constructor exists findAlt :: AltCon -> [Alt b] -> Maybe (Alt b) -- A "Nothing" result *is* legitimate -- See Note [Unreachable code] findAlt con alts = case alts of (deflt@(Alt DEFAULT _ _):alts) -> go alts (Just deflt) _ -> go alts Nothing where go [] deflt = deflt go (alt@(Alt con1 _ _) : alts) deflt = case con `cmpAltCon` con1 of LT -> deflt -- Missed it already; the alts are in increasing order EQ -> Just alt GT -> assert (not (con1 == DEFAULT)) $ go alts deflt {- Note [Unreachable code] ~~~~~~~~~~~~~~~~~~~~~~~~~~ It is possible (although unusual) for GHC to find a case expression that cannot match. For example: data Col = Red | Green | Blue x = Red f v = case x of Red -> ... _ -> ...(case x of { Green -> e1; Blue -> e2 })... Suppose that for some silly reason, x isn't substituted in the case expression. (Perhaps there's a NOINLINE on it, or profiling SCC stuff gets in the way; cf #3118.) Then the full-laziness pass might produce this x = Red lvl = case x of { Green -> e1; Blue -> e2 }) f v = case x of Red -> ... _ -> ...lvl... Now if x gets inlined, we won't be able to find a matching alternative for 'Red'. That's because 'lvl' is unreachable. So rather than crashing we generate (error "Inaccessible alternative"). Similar things can happen (augmented by GADTs) when the Simplifier filters down the matching alternatives in GHC.Core.Opt.Simplify.rebuildCase. -} --------------------------------- mergeCaseAlts :: Id -> [CoreAlt] -> Maybe ([CoreBind], [CoreAlt]) -- See Note [Merge Nested Cases] mergeCaseAlts outer_bndr (Alt DEFAULT _ deflt_rhs : outer_alts) | Just (joins, inner_alts) <- go deflt_rhs = Just (joins, mergeAlts outer_alts inner_alts) -- NB: mergeAlts gives priority to the left -- case x of -- A -> e1 -- DEFAULT -> case x of -- A -> e2 -- B -> e3 -- When we merge, we must ensure that e1 takes -- precedence over e2 as the value for A! where go :: CoreExpr -> Maybe ([CoreBind], [CoreAlt]) -- Whizzo: we can merge! go (Case (Var inner_scrut_var) inner_bndr _ inner_alts) | inner_scrut_var == outer_bndr , not (inner_bndr == outer_bndr) -- Avoid shadowing , let wrap_let rhs' = Let (NonRec inner_bndr (Var outer_bndr)) rhs' -- inner_bndr is never dead! It's the scrutinee! -- The let is OK even for unboxed binders -- See Note [Merge Nested Cases] wrinkle (MC2) do_one (Alt con bndrs rhs) | any (== outer_bndr) bndrs = Nothing | otherwise = Just (Alt con bndrs (wrap_let rhs)) = do { alts' <- mapM do_one inner_alts ; return ([], alts') } -- Deal with tagToEnum# See Note [Merge Nested Cases] wrinkle (MC3) go (App (App (Var f) (Type type_arg)) (Var v)) | v == outer_bndr , Just TagToEnumOp <- isPrimOpId_maybe f , Just tc <- tyConAppTyCon_maybe type_arg , Just (dc1:dcs) <- tyConDataCons_maybe tc -- At least one data constructor , dcs `lengthAtMost` 3 -- Arbitrary = return ( [], mk_alts dc1 dcs) where mk_lit dc = mkLitIntUnchecked $ toInteger $ dataConTagZ dc mk_rhs dc = Var (dataConWorkId dc) mk_alts dc1 dcs = Alt DEFAULT [] (mk_rhs dc1) : [Alt (LitAlt (mk_lit dc)) [] (mk_rhs dc) | dc <- dcs] -- Float out let/join bindings -- See Note [Merge Nested Cases] wrinkle (MC4) go (Let bind body) | null outer_alts || isJoinBind bind = do { (joins, alts) <- go body -- Check for capture; but only if we could otherwise do a merge ; let capture = outer_bndr `elem` bindersOf bind || outer_bndr `elemVarSet` bindFreeVars bind ; guard (not capture) ; return (bind:joins, alts ) } | otherwise = Nothing -- We don't want ticks to get in the way; just push them inwards. -- (This happens when you add SourceTicks e.g. GHC.Num.Integer.integerLt#) go (Tick t body) = do { (joins, alts) <- go body ; return (joins, [Alt con bs (Tick t rhs) | Alt con bs rhs <- alts]) } go _ = Nothing mergeCaseAlts _ _ = Nothing --------------------------------- mergeAlts :: [Alt a] -> [Alt a] -> [Alt a] -- ^ Merge alternatives preserving order; alternatives in -- the first argument shadow ones in the second mergeAlts [] as2 = as2 mergeAlts as1 [] = as1 mergeAlts (a1:as1) (a2:as2) = case a1 `cmpAlt` a2 of LT -> a1 : mergeAlts as1 (a2:as2) EQ -> a1 : mergeAlts as1 as2 -- Discard a2 GT -> a2 : mergeAlts (a1:as1) as2 --------------------------------- trimConArgs :: AltCon -> [CoreArg] -> [CoreArg] -- ^ Given: -- -- > case (C a b x y) of -- > C b x y -> ... -- -- We want to drop the leading type argument of the scrutinee -- leaving the arguments to match against the pattern trimConArgs DEFAULT args = assert (null args) [] trimConArgs (LitAlt _) args = assert (null args) [] trimConArgs (DataAlt dc) args = dropList (dataConUnivTyVars dc) args filterAlts :: TyCon -- ^ Type constructor of scrutinee's type (used to prune possibilities) -> [Type] -- ^ And its type arguments -> [AltCon] -- ^ 'imposs_cons': constructors known to be impossible due to the form of the scrutinee -> [Alt b] -- ^ Alternatives -> ([AltCon], [Alt b]) -- Returns: -- 1. Constructors that will never be encountered by the -- *default* case (if any). A superset of imposs_cons -- 2. The new alternatives, trimmed by -- a) remove imposs_cons -- b) remove constructors which can't match because of GADTs -- -- NB: the final list of alternatives may be empty: -- This is a tricky corner case. If the data type has no constructors, -- which GHC allows, or if the imposs_cons covers all constructors (after taking -- account of GADTs), then no alternatives can match. -- -- If callers need to preserve the invariant that there is always at least one branch -- in a "case" statement then they will need to manually add a dummy case branch that just -- calls "error" or similar. filterAlts _tycon inst_tys imposs_cons alts = imposs_deflt_cons `seqList` (imposs_deflt_cons, addDefault trimmed_alts maybe_deflt) -- Very important to force `imposs_deflt_cons` as that forces `alt_cons`, which -- is essentially as retaining `alts_wo_default` or any `Alt b` for that matter -- leads to a huge space leak (see #22102 and !8896) where (alts_wo_default, maybe_deflt) = findDefault alts alt_cons = [con | Alt con _ _ <- alts_wo_default] trimmed_alts = filterOut (impossible_alt inst_tys) alts_wo_default imposs_cons_set = Set.fromList imposs_cons imposs_deflt_cons = imposs_cons ++ filterOut (`Set.member` imposs_cons_set) alt_cons -- "imposs_deflt_cons" are handled -- EITHER by the context, -- OR by a non-DEFAULT branch in this case expression. impossible_alt :: [Type] -> Alt b -> Bool impossible_alt _ (Alt con _ _) | con `Set.member` imposs_cons_set = True impossible_alt inst_tys (Alt (DataAlt con) _ _) = dataConCannotMatch inst_tys con impossible_alt _ _ = False -- | Refine the default alternative to a 'DataAlt', if there is a unique way to do so. -- See Note [Refine DEFAULT case alternatives] refineDefaultAlt :: [Unique] -- ^ Uniques for constructing new binders -> Mult -- ^ Multiplicity annotation of the case expression -> TyCon -- ^ Type constructor of scrutinee's type -> [Type] -- ^ Type arguments of scrutinee's type -> [AltCon] -- ^ Constructors that cannot match the DEFAULT (if any) -> [CoreAlt] -> (Bool, [CoreAlt]) -- ^ 'True', if a default alt was replaced with a 'DataAlt' refineDefaultAlt us mult tycon tys imposs_deflt_cons all_alts | Alt DEFAULT _ rhs : rest_alts <- all_alts , isAlgTyCon tycon -- It's a data type, tuple, or unboxed tuples. , not (isNewTyCon tycon) -- Exception 1 in Note [Refine DEFAULT case alternatives] , not (isTypeDataTyCon tycon) -- Exception 2 in Note [Refine DEFAULT case alternatives] , Just all_cons <- tyConDataCons_maybe tycon , let imposs_data_cons = mkUniqSet [con | DataAlt con <- imposs_deflt_cons] -- We now know it's a data type, so we can use -- UniqSet rather than Set (more efficient) impossible con = con `elementOfUniqSet` imposs_data_cons || dataConCannotMatch tys con = case filterOut impossible all_cons of -- Eliminate the default alternative -- altogether if it can't match: [] -> (False, rest_alts) -- It matches exactly one constructor, so fill it in: [con] -> (True, mergeAlts rest_alts [Alt (DataAlt con) (ex_tvs ++ arg_ids) rhs]) -- We need the mergeAlts to keep the alternatives in the right order where (ex_tvs, arg_ids) = dataConRepInstPat us mult con tys -- It matches more than one, so do nothing _ -> (False, all_alts) | debugIsOn, isAlgTyCon tycon, null (tyConDataCons tycon) , not (isFamilyTyCon tycon || isAbstractTyCon tycon) -- Check for no data constructors -- This can legitimately happen for abstract types and type families, -- so don't report that = (False, all_alts) | otherwise -- The common case = (False, all_alts) {- Note [Merge Nested Cases] ~~~~~~~~~~~~~~~~~~~~~~~~~ case e of b { ==> case e of b { p1 -> rhs1 p1 -> rhs1 ... ... pm -> rhsm pm -> rhsm _ -> case b of b' { pn -> let b'=b in rhsn pn -> rhsn ... ... po -> let b'=b in rhso po -> rhso _ -> let b'=b in rhsd _ -> rhsd } which merges two cases in one case when -- the default alternative of the outer case scrutinises the same variable as the outer case. This transformation is called Case Merging. It avoids that the same variable is scrutinised multiple times. Wrinkles (MC1) Historical note. I tried making `mergeCaseAlts` "looks though" an inner single-alternative case-on-variable. For example case x of { ...outer-alts... DEFAULT -> case y of (a,b) -> case x of { A -> rhs1; B -> rhs2 } ===> case x of ...outer-alts... a -> case y of (a,b) -> rhs1 B -> case y of (a,b) -> rhs2 This duplicates the `case y` but it removes the case x; so it is a win in terms of execution time (combining the cases on x) at the cost of perhaps duplicating the `case y`. A case in point is integerEq, which is defined thus integerEq :: Integer -> Integer -> Bool integerEq !x !y = isTrue# (integerEq# x y) which becomes integerEq = \ (x :: Integer) (y_aAL :: Integer) -> case x of x1 { __DEFAULT -> case y of y1 { __DEFAULT -> case x1 of { IS x2 -> case y1 of { __DEFAULT -> GHC.Types.False; IS y2 -> tagToEnum# @Bool (==# x2 y2) }; IP x2 -> ... IN x2 -> ... We want to merge the outer `case x` with the inner `case x1`. But (a) this is all a bit dubious: see #24251, and (b) it is hard to combine with (MC4) So I'm not doing this any more. If we want to do it, we'll handle it separately: #24251. End of historical note (MC2) The auxiliary bindings b'=b are annoying, because they force another simplifier pass, but there seems no easy way to avoid them. See Note [Which transformations are innocuous] in GHC.Core.Opt.Stats. (MC3) Consider case f x of (r::Int#) -> tagToEnum# r :: Bool `mergeCaseAlts` as a special case to treat this as if it was case f x of r -> case r of { 0# -> False; 1# -> True } which can be merged to case f x of { 0# -> False; 1# -> True } To see why this is important, return to case f x of (r::Int#) -> tagToEnum# r :: Bool and supppose `f` inlines to a case expression. Then then we get let $j r = tagToEnum# r case .. of { .. jump $j 0#; ...jump $j 1# ... } Now if the entire expression is consumed by another case-expression, that outer case will only see (tagToEnum# r) which it can't do much with. Whereas the result of the above case-merge generates much better code: no branching on Int# (MC4) Consider case f x of r -> join $j y = in case r of { ...alts ... } This is pretty common, and it a pity for it to defeat the case-merge transformation; and it makes the optimiser fragile to inlining decisions for join points. So `mergeCaseAlts` floats out any join points. It doesn't float out non-join-points unless the /outer/ case has just one alternative; doing so would risk more allocation (MC5) See Note [Cascading case merge] See also Note [Example of case-merging and caseRules] in GHC.Core.Opt.Simplify.Utils Note [Cascading case merge] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Case merging should cascade in one sweep, because the Simplifier tries it /after/ simplifying (and hence case-merging) the inner case. For example case e of a { DEFAULT -> case a of b DEFAULT -> case b of c { DEFAULT -> e A -> ea B -> eb C -> ec ==> {simplify inner case} case e of a { DEFAULT -> case a of b DEFAULT -> let c = b in e A -> let c = b in ea B -> eb C -> ec ==> {case-merge on outer case} case e of a { DEFAULT -> let b = a in let c = b in e A -> let b = a in let c = b in ea B -> let b = a in eb C -> ec However here's a tricky case that we still don't catch, and I don't see how to catch it in one pass: case x of c1 { I# a1 -> case a1 of c2 -> 0 -> ... DEFAULT -> case x of c3 { I# a2 -> case a2 of ... After occurrence analysis (and its binder-swap) we get this case x of c1 { I# a1 -> let x = c1 in -- Binder-swap addition case a1 of c2 -> 0 -> ... DEFAULT -> case x of c3 { I# a2 -> case a2 of ... When we simplify the inner case x, we'll see that x=c1=I# a1. So we'll bind a2 to a1, and get case x of c1 { I# a1 -> case a1 of c2 -> 0 -> ... DEFAULT -> case a1 of ... This is correct, but we can't do a case merge in this sweep because c2 /= a1. Reason: the binding c1=I# a1 went inwards without getting changed to c1=I# c2. I don't think this is worth fixing, even if I knew how. It'll all come out in the next pass anyway. Note [Refine DEFAULT case alternatives] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ refineDefaultAlt replaces the DEFAULT alt with a constructor if there is one possible value it could be. The simplest example being foo :: () -> () foo x = case x of !_ -> () which rewrites to foo :: () -> () foo x = case x of () -> () There are two reasons in general why replacing a DEFAULT alternative with a specific constructor is desirable. 1. We can simplify inner expressions. For example data Foo = Foo1 () test :: Foo -> () test x = case x of DEFAULT -> mid (case x of Foo1 x1 -> x1) refineDefaultAlt fills in the DEFAULT here with `Foo ip1` and then x becomes bound to `Foo ip1` so is inlined into the other case which causes the KnownBranch optimisation to kick in. If we don't refine DEFAULT to `Foo ip1`, we are left with both case expressions. 2. combineIdenticalAlts does a better job. For example (Simon Jacobi) data D = C0 | C1 | C2 case e of DEFAULT -> e0 C0 -> e1 C1 -> e1 When we apply combineIdenticalAlts to this expression, it can't combine the alts for C0 and C1, as we already have a default case. But if we apply refineDefaultAlt first, we get case e of C0 -> e1 C1 -> e1 C2 -> e0 and combineIdenticalAlts can turn that into case e of DEFAULT -> e1 C2 -> e0 It isn't obvious that refineDefaultAlt does this but if you look at its one call site in GHC.Core.Opt.Simplify.Utils then the `imposs_deflt_cons` argument is populated with constructors which are matched elsewhere. There are two exceptions where we avoid refining a DEFAULT case: * Exception 1: Newtypes We can have a newtype, if we are just doing an eval: case x of { DEFAULT -> e } And we don't want to fill in a default for them! * Exception 2: `type data` declarations The data constructors for a `type data` declaration (see Note [Type data declarations] in GHC.Rename.Module) do not exist at the value level. Nevertheless, it is possible to strictly evaluate a value whose type is a `type data` declaration. Test case type-data/should_compile/T2294b.hs contains an example: type data T a where A :: T Int f :: T a -> () f !x = () We want to generate the following Core for f: f = \(@a) (x :: T a) -> case x of __DEFAULT -> () Namely, we do _not_ want to match on `A`, as it doesn't exist at the value level! See wrinkle (W2b) in Note [Type data declarations] in GHC.Rename.Module Note [Combine identical alternatives] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If several alternatives are identical, merge them into a single DEFAULT alternative. I've occasionally seen this making a big difference: case e of =====> case e of C _ -> f x D v -> ....v.... D v -> ....v.... DEFAULT -> f x DEFAULT -> f x The point is that we merge common RHSs, at least for the DEFAULT case. [One could do something more elaborate but I've never seen it needed.] To avoid an expensive test, we just merge branches equal to the *first* alternative; this picks up the common cases a) all branches equal b) some branches equal to the DEFAULT (which occurs first) The case where Combine Identical Alternatives transformation showed up was like this (base/Foreign/C/Err/Error.hs): x | p `is` 1 -> e1 | p `is` 2 -> e2 ...etc... where @is@ was something like p `is` n = p /= (-1) && p == n This gave rise to a horrible sequence of cases case p of (-1) -> $j p 1 -> e1 DEFAULT -> $j p and similarly in cascade for all the join points! Note [Combine identical alternatives: wrinkles] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * It's important that we try to combine alternatives *before* simplifying them, rather than after. Reason: because Simplify.simplAlt may zap the occurrence info on the binders in the alternatives, which in turn defeats combineIdenticalAlts use of isDeadBinder (see #7360). You can see this in the call to combineIdenticalAlts in GHC.Core.Opt.Simplify.Utils.prepareAlts. Here the alternatives have type InAlt (the "In" meaning input) rather than OutAlt. * combineIdenticalAlts does not work well for nullary constructors case x of y [] -> f [] (_:_) -> f y Here we won't see that [] and y are the same. Sigh! This problem is solved in CSE, in GHC.Core.Opt.CSE.combineAlts, which does a better version of combineIdenticalAlts. But sadly it doesn't have the occurrence info we have here. See Note [Combine case alts: awkward corner] in GHC.Core.Opt.CSE). Note [Care with impossible-constructors when combining alternatives] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Suppose we have (#10538) data T = A | B | C | D case x::T of (Imposs-default-cons {A,B}) DEFAULT -> e1 A -> e2 B -> e1 When calling combineIdentialAlts, we'll have computed that the "impossible constructors" for the DEFAULT alt is {A,B}, since if x is A or B we'll take the other alternatives. But suppose we combine B into the DEFAULT, to get case x::T of (Imposs-default-cons {A}) DEFAULT -> e1 A -> e2 Then we must be careful to trim the impossible constructors to just {A}, else we risk compiling 'e1' wrong! Not only that, but we take care when there is no DEFAULT beforehand, because we are introducing one. Consider case x of (Imposs-default-cons {A,B,C}) A -> e1 B -> e2 C -> e1 Then when combining the A and C alternatives we get case x of (Imposs-default-cons {B}) DEFAULT -> e1 B -> e2 Note that we have a new DEFAULT branch that we didn't have before. So we need delete from the "impossible-default-constructors" all the known-con alternatives that we have eliminated. (In #11172 we missed the first one.) -} combineIdenticalAlts :: [AltCon] -- Constructors that cannot match DEFAULT -> [CoreAlt] -> (Bool, -- True <=> something happened [AltCon], -- New constructors that cannot match DEFAULT [CoreAlt]) -- New alternatives -- See Note [Combine identical alternatives] -- True <=> we did some combining, result is a single DEFAULT alternative combineIdenticalAlts imposs_deflt_cons (Alt con1 bndrs1 rhs1 : rest_alts) | all isDeadBinder bndrs1 -- Remember the default , not (null elim_rest) -- alternative comes first = (True, imposs_deflt_cons', deflt_alt : filtered_rest) where (elim_rest, filtered_rest) = partition identical_to_alt1 rest_alts deflt_alt = Alt DEFAULT [] (mkTicks (concat tickss) rhs1) -- See Note [Care with impossible-constructors when combining alternatives] imposs_deflt_cons' = imposs_deflt_cons `minusList` elim_cons elim_cons = elim_con1 ++ map (\(Alt con _ _) -> con) elim_rest elim_con1 = case con1 of -- Don't forget con1! DEFAULT -> [] _ -> [con1] cheapEqTicked e1 e2 = cheapEqExpr' tickishFloatable e1 e2 identical_to_alt1 (Alt _con bndrs rhs) = all isDeadBinder bndrs && rhs `cheapEqTicked` rhs1 tickss = map (\(Alt _ _ rhs) -> stripTicksT tickishFloatable rhs) elim_rest combineIdenticalAlts imposs_cons alts = (False, imposs_cons, alts) -- Scales the multiplicity of the binders of a list of case alternatives. That -- is, in [C x1…xn -> u], the multiplicity of x1…xn is scaled. scaleAltsBy :: Mult -> [CoreAlt] -> [CoreAlt] scaleAltsBy w alts = map scaleAlt alts where scaleAlt :: CoreAlt -> CoreAlt scaleAlt (Alt con bndrs rhs) = Alt con (map scaleBndr bndrs) rhs scaleBndr :: CoreBndr -> CoreBndr scaleBndr b = scaleVarBy w b {- ********************************************************************* * * exprIsTrivial * * ************************************************************************ Note [exprIsTrivial] ~~~~~~~~~~~~~~~~~~~~ @exprIsTrivial@ is true of expressions we are unconditionally happy to duplicate; simple variables and constants, and type applications. Note that primop Ids aren't considered trivial unless Note [Variables are trivial] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ There used to be a gruesome test for (hasNoBinding v) in the Var case: exprIsTrivial (Var v) | hasNoBinding v = idArity v == 0 The idea here is that a constructor worker, like \$wJust, is really short for (\x -> \$wJust x), because \$wJust has no binding. So it should be treated like a lambda. Ditto unsaturated primops. But now constructor workers are not "have-no-binding" Ids. And completely un-applied primops and foreign-call Ids are sufficiently rare that I plan to allow them to be duplicated and put up with saturating them. Note [Tick trivial] ~~~~~~~~~~~~~~~~~~~ Ticks are only trivial if they are pure annotations. If we treat "tick x" as trivial, it will be inlined inside lambdas and the entry count will be skewed, for example. Furthermore "scc x" will turn into just "x" in mkTick. Note [Empty case is trivial] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The expression (case (x::Int) Bool of {}) is just a type-changing case used when we are sure that 'x' will not return. See Note [Empty case alternatives] in GHC.Core. If the scrutinee is trivial, then so is the whole expression; and the CoreToSTG pass in fact drops the case expression leaving only the scrutinee. Having more trivial expressions is good. Moreover, if we don't treat it as trivial we may land up with let-bindings like let v = case x of {} in ... and after CoreToSTG that gives let v = x in ... and that confuses the code generator (#11155). So best to kill it off at source. -} {-# INLINE trivial_expr_fold #-} trivial_expr_fold :: (Id -> r) -> (Literal -> r) -> r -> r -> CoreExpr -> r -- ^ The worker function for Note [exprIsTrivial] and Note [getIdFromTrivialExpr] -- This is meant to have the code of both functions in one place and make it -- easy to derive custom predicates. -- -- (trivial_expr_fold k_id k_triv k_not_triv e) -- * returns (k_id x) if `e` is a variable `x` (with trivial wrapping) -- * returns (k_lit x) if `e` is a trivial literal `l` (with trivial wrapping) -- * returns k_triv if `e` is a literal, type, or coercion (with trivial wrapping) -- * returns k_not_triv otherwise -- -- where "trivial wrapping" is -- * Type application or abstraction -- * Ticks other than `tickishIsCode` -- * `case e of {}` an empty case trivial_expr_fold k_id k_lit k_triv k_not_triv = go where -- If you change this function, be sure to change SetLevels.notWorthFloating -- as well! -- (Or yet better: Come up with a way to share code with this function.) go (Var v) = k_id v -- See Note [Variables are trivial] go (Lit l) | litIsTrivial l = k_lit l go (Type _) = k_triv go (Coercion _) = k_triv go (App f t) | not (isRuntimeArg t) = go f go (Lam b e) | not (isRuntimeVar b) = go e go (Tick t e) | not (tickishIsCode t) = go e -- See Note [Tick trivial] go (Cast e _) = go e go (Case e b _ as) | null as = go e -- See Note [Empty case is trivial] | Just rhs <- isUnsafeEqualityCase e b as = go rhs -- See (U2) of Note [Implementing unsafeCoerce] in base:Unsafe.Coerce go _ = k_not_triv exprIsTrivial :: CoreExpr -> Bool exprIsTrivial e = trivial_expr_fold (const True) (const True) True False e {- Note [getIdFromTrivialExpr] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ When substituting in a breakpoint we need to strip away the type cruft from a trivial expression and get back to the Id. The invariant is that the expression we're substituting was originally trivial according to exprIsTrivial, AND the expression is not a literal. See Note [substTickish] for how breakpoint substitution preserves this extra invariant. We also need this functionality in CorePrep to extract out Id of a function which we are saturating. However, in this case we don't know if the variable actually refers to a literal; thus we use 'getIdFromTrivialExpr_maybe' to handle this case. See test T12076lit for an example where this matters. -} getIdFromTrivialExpr :: HasDebugCallStack => CoreExpr -> Id -- See Note [getIdFromTrivialExpr] getIdFromTrivialExpr e = trivial_expr_fold id (const panic) panic panic e where panic = pprPanic "getIdFromTrivialExpr" (ppr e) getIdFromTrivialExpr_maybe :: CoreExpr -> Maybe Id getIdFromTrivialExpr_maybe e = trivial_expr_fold Just (const Nothing) Nothing Nothing e {- ********************************************************************* * * exprIsDupable * * ************************************************************************ Note [exprIsDupable] ~~~~~~~~~~~~~~~~~~~~ @exprIsDupable@ is true of expressions that can be duplicated at a modest cost in code size. This will only happen in different case branches, so there's no issue about duplicating work. That is, exprIsDupable returns True of (f x) even if f is very very expensive to call. Its only purpose is to avoid fruitless let-binding and then inlining of case join points -} exprIsDupable :: Platform -> CoreExpr -> Bool exprIsDupable platform e = isJust (go dupAppSize e) where go :: Int -> CoreExpr -> Maybe Int go n (Type {}) = Just n go n (Coercion {}) = Just n go n (Var {}) = decrement n go n (Tick _ e) = go n e go n (Cast e _) = go n e go n (App f a) | Just n' <- go n a = go n' f go n (Lit lit) | litIsDupable platform lit = decrement n go _ _ = Nothing decrement :: Int -> Maybe Int decrement 0 = Nothing decrement n = Just (n-1) dupAppSize :: Int dupAppSize = 8 -- Size of term we are prepared to duplicate -- This is *just* big enough to make test MethSharing -- inline enough join points. Really it should be -- smaller, and could be if we fixed #4960. {- ************************************************************************ * * exprIsCheap, exprIsExpandable * * ************************************************************************ Note [exprIsWorkFree] ~~~~~~~~~~~~~~~~~~~~~ exprIsWorkFree is used when deciding whether to inline something; we don't inline it if doing so might duplicate work, by peeling off a complete copy of the expression. Here we do not want even to duplicate a primop (#5623): eg let x = a #+ b in x +# x we do not want to inline/duplicate x Previously we were a bit more liberal, which led to the primop-duplicating problem. However, being more conservative did lead to a big regression in one nofib benchmark, wheel-sieve1. The situation looks like this: let noFactor_sZ3 :: GHC.Types.Int -> GHC.Types.Bool noFactor_sZ3 = case s_adJ of _ { GHC.Types.I# x_aRs -> case GHC.Prim.<=# x_aRs 2 of _ { GHC.Types.False -> notDivBy ps_adM qs_adN; GHC.Types.True -> lvl_r2Eb }} go = \x. ...(noFactor (I# y))....(go x')... The function 'noFactor' is heap-allocated and then called. Turns out that 'notDivBy' is strict in its THIRD arg, but that is invisible to the caller of noFactor, which therefore cannot do w/w and heap-allocates noFactor's argument. At the moment (May 12) we are just going to put up with this, because the previous more aggressive inlining (which treated 'noFactor' as work-free) was duplicating primops, which in turn was making inner loops of array calculations runs slow (#5623) Note [Case expressions are work-free] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Are case-expressions work-free? Consider let v = case x of (p,q) -> p go = \y -> ...case v of ... Should we inline 'v' at its use site inside the loop? At the moment we do. I experimented with saying that case are *not* work-free, but that increased allocation slightly. It's a fairly small effect, and at the moment we go for the slightly more aggressive version which treats (case x of ....) as work-free if the alternatives are. Moreover it improves arities of overloaded functions where there is only dictionary selection (no construction) involved Note [exprIsCheap] ~~~~~~~~~~~~~~~~~~ See also Note [Interaction of exprIsWorkFree and lone variables] in GHC.Core.Unfold @exprIsCheap@ looks at a Core expression and returns \tr{True} if it is obviously in weak head normal form, or is cheap to get to WHNF. Note that that's not the same as exprIsDupable; an expression might be big, and hence not dupable, but still cheap. By ``cheap'' we mean a computation we're willing to: push inside a lambda, or inline at more than one place That might mean it gets evaluated more than once, instead of being shared. The main examples of things which aren't WHNF but are ``cheap'' are: * case e of pi -> ei (where e, and all the ei are cheap) * let x = e in b (where e and b are cheap) * op x1 ... xn (where op is a cheap primitive operator) * error "foo" (because we are happy to substitute it inside a lambda) Notice that a variable is considered 'cheap': we can push it inside a lambda, because sharing will make sure it is only evaluated once. Note [exprIsCheap and exprIsHNF] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Note that exprIsHNF does not imply exprIsCheap. Eg let x = fac 20 in Just x This responds True to exprIsHNF (you can discard a seq), but False to exprIsCheap. Note [Arguments and let-bindings exprIsCheapX] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ What predicate should we apply to the argument of an application, or the RHS of a let-binding? We used to say "exprIsTrivial arg" due to concerns about duplicating nested constructor applications, but see #4978. So now we just recursively use exprIsCheapX. We definitely want to treat let and app the same. The principle here is that let x = blah in f x should behave equivalently to f blah This in turn means that the 'letrec g' does not prevent eta expansion in this (which it previously was): f = \x. let v = case x of True -> letrec g = \w. blah in g False -> \x. x in \w. v True -} -------------------- exprIsWorkFree :: CoreExpr -> Bool -- See Note [exprIsWorkFree] exprIsWorkFree e = exprIsCheapX isWorkFreeApp e exprIsCheap :: CoreExpr -> Bool exprIsCheap e = exprIsCheapX isCheapApp e exprIsCheapX :: CheapAppFun -> CoreExpr -> Bool {-# INLINE exprIsCheapX #-} -- allow specialization of exprIsCheap and exprIsWorkFree -- instead of having an unknown call to ok_app exprIsCheapX ok_app e = ok e where ok e = go 0 e -- n is the number of value arguments go n (Var v) = ok_app v n go _ (Lit {}) = True go _ (Type {}) = True go _ (Coercion {}) = True go n (Cast e _) = go n e go n (Case scrut _ _ alts) = ok scrut && and [ go n rhs | Alt _ _ rhs <- alts ] go n (Tick t e) | tickishCounts t = False | otherwise = go n e go n (Lam x e) | isRuntimeVar x = n==0 || go (n-1) e | otherwise = go n e go n (App f e) | isRuntimeArg e = go (n+1) f && ok e | otherwise = go n f go n (Let (NonRec _ r) e) = go n e && ok r go n (Let (Rec prs) e) = go n e && all (ok . snd) prs -- Case: see Note [Case expressions are work-free] -- App, Let: see Note [Arguments and let-bindings exprIsCheapX] {- Note [exprIsExpandable] ~~~~~~~~~~~~~~~~~~~~~~~~~~ An expression is "expandable" if we are willing to duplicate it, if doing so might make a RULE or case-of-constructor fire. Consider let x = (a,b) y = build g in ....(case x of (p,q) -> rhs)....(foldr k z y).... We don't inline 'x' or 'y' (see Note [Lone variables] in GHC.Core.Unfold), but we do want * the case-expression to simplify (via exprIsConApp_maybe, exprIsLiteral_maybe) * the foldr/build RULE to fire (by expanding the unfolding during rule matching) So we classify the unfolding of a let-binding as "expandable" (via the uf_expandable field) if we want to do this kind of on-the-fly expansion. Specifically: * True of constructor applications (K a b) * True of applications of a "CONLIKE" Id; see Note [CONLIKE pragma] in GHC.Types.Basic. (NB: exprIsCheap might not be true of this) * False of case-expressions. If we have let x = case ... in ...(case x of ...)... we won't simplify. We have to inline x. See #14688. * False of let-expressions (same reason); and in any case we float lets out of an RHS if doing so will reveal an expandable application (see SimplEnv.doFloatFromRhs). * Take care: exprIsExpandable should /not/ be true of primops. I found this in test T5623a: let q = /\a. Ptr a (a +# b) in case q @ Float of Ptr v -> ...q... q's inlining should not be expandable, else exprIsConApp_maybe will say that (q @ Float) expands to (Ptr a (a +# b)), and that will duplicate the (a +# b) primop, which we should not do lightly. (It's quite hard to trigger this bug, but T13155 does so for GHC 8.0.) -} ------------------------------------- exprIsExpandable :: CoreExpr -> Bool -- See Note [exprIsExpandable] exprIsExpandable e = ok e where ok e = go 0 e -- n is the number of value arguments go n (Var v) = isExpandableApp v n go _ (Lit {}) = True go _ (Type {}) = True go _ (Coercion {}) = True go n (Cast e _) = go n e go n (Tick t e) | tickishCounts t = False | otherwise = go n e go n (Lam x e) | isRuntimeVar x = n==0 || go (n-1) e | otherwise = go n e go n (App f e) | isRuntimeArg e = go (n+1) f && ok e | otherwise = go n f go _ (Case {}) = False go _ (Let {}) = False ------------------------------------- type CheapAppFun = Id -> Arity -> Bool -- Is an application of this function to n *value* args -- always cheap, assuming the arguments are cheap? -- True mainly of data constructors, partial applications; -- but with minor variations: -- isWorkFreeApp -- isCheapApp isWorkFreeApp :: CheapAppFun isWorkFreeApp fn n_val_args | n_val_args == 0 -- No value args = True | n_val_args < idArity fn -- Partial application = True | otherwise = case idDetails fn of DataConWorkId {} -> True PrimOpId op _ -> primOpIsWorkFree op _ -> False isCheapApp :: CheapAppFun isCheapApp fn n_val_args | isWorkFreeApp fn n_val_args = True | isDeadEndId fn = True -- See Note [isCheapApp: bottoming functions] | otherwise = case idDetails fn of DataConWorkId {} -> True -- Actually handled by isWorkFreeApp RecSelId {} -> n_val_args == 1 -- See Note [Record selection] ClassOpId {} -> n_val_args == 1 PrimOpId op _ -> primOpIsCheap op _ -> False -- In principle we should worry about primops -- that return a type variable, since the result -- might be applied to something, but I'm not going -- to bother to check the number of args isExpandableApp :: CheapAppFun isExpandableApp fn n_val_args | isWorkFreeApp fn n_val_args = True | otherwise = case idDetails fn of RecSelId {} -> n_val_args == 1 -- See Note [Record selection] ClassOpId {} -> n_val_args == 1 PrimOpId {} -> False _ | isDeadEndId fn -> False -- See Note [isExpandableApp: bottoming functions] | isConLikeId fn -> True | all_args_are_preds -> True | otherwise -> False where -- See if all the arguments are PredTys (implicit params or classes) -- If so we'll regard it as expandable; see Note [Expandable overloadings] all_args_are_preds = all_pred_args n_val_args (idType fn) all_pred_args n_val_args ty | n_val_args == 0 = True | Just (bndr, ty) <- splitPiTy_maybe ty = case bndr of Named {} -> all_pred_args n_val_args ty Anon _ af -> isInvisibleFunArg af && all_pred_args (n_val_args-1) ty | otherwise = False {- Note [isCheapApp: bottoming functions] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ I'm not sure why we have a special case for bottoming functions in isCheapApp. Maybe we don't need it. Note [isExpandableApp: bottoming functions] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ It's important that isExpandableApp does not respond True to bottoming functions. Recall undefined :: HasCallStack => a Suppose isExpandableApp responded True to (undefined d), and we had: x = undefined Then Simplify.prepareRhs would ANF the RHS: d = x = undefined d This is already bad: we gain nothing from having x bound to (undefined var), unlike the case for data constructors. Worse, we get the simplifier loop described in OccurAnal Note [Cascading inlines]. Suppose x occurs just once; OccurAnal.occAnalNonRecRhs decides x will certainly_inline; so we end up inlining d right back into x; but in the end x doesn't inline because it is bottom (preInlineUnconditionally); so the process repeats.. We could elaborate the certainly_inline logic some more, but it's better just to treat bottoming bindings as non-expandable, because ANFing them is a bad idea in the first place. Note [Record selection] ~~~~~~~~~~~~~~~~~~~~~~~~~~ I'm experimenting with making record selection look cheap, so we will substitute it inside a lambda. Particularly for dictionary field selection. BUT: Take care with (sel d x)! The (sel d) might be cheap, but there's no guarantee that (sel d x) will be too. Hence (n_val_args == 1) Note [Expandable overloadings] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Suppose the user wrote this {-# RULE forall x. foo (negate x) = h x #-} f x = ....(foo (negate x)).... They'd expect the rule to fire. But since negate is overloaded, we might get this: f = \d -> let n = negate d in \x -> ...foo (n x)... So we treat the application of a function (negate in this case) to a *dictionary* as expandable. In effect, every function is CONLIKE when it's applied only to dictionaries. ************************************************************************ * * exprOkForSpeculation * * ************************************************************************ -} ----------------------------- -- | To a first approximation, 'exprOkForSpeculation' returns True of -- an expression that is: -- -- * Safe to evaluate even if normal order eval might not -- evaluate the expression at all, and -- -- * Safe /not/ to evaluate even if normal order would do so -- -- More specifically, this means that: -- * A: Evaluation of the expression reaches weak-head-normal-form, -- * B: soon, -- * C: without causing a write side effect (e.g. writing a mutable variable). -- -- In particular, an expression that may -- * throw a synchronous Haskell exception, or -- * risk an unchecked runtime exception (e.g. array -- out of bounds, divide by zero) -- is /not/ considered OK-for-speculation, as these violate condition A. -- -- For 'exprOkToDiscard', condition A is weakened to allow expressions -- that might risk an unchecked runtime exception but must otherwise -- reach weak-head-normal-form. -- (Note that 'exprOkForSpeculation' implies 'exprOkToDiscard') -- -- But in fact both functions are a bit more conservative than the above, -- in at least the following ways: -- -- * W1: We do not take advantage of already-evaluated lifted variables. -- As a result, 'exprIsHNF' DOES NOT imply 'exprOkForSpeculation'; -- if @y@ is a case-binder of lifted type, then @exprIsHNF y@ is -- 'True', while @exprOkForSpeculation y@ is 'False'. -- See Note [exprOkForSpeculation and evaluated variables] for why. -- * W2: Read-effects on mutable variables are currently also included. -- See Note [Classifying primop effects] "GHC.Builtin.PrimOps". -- * W3: Currently, 'exprOkForSpeculation' always returns 'False' for -- let-expressions. Lets can be stacked deeply, so we just give up. -- In any case, the argument of 'exprOkForSpeculation' is usually in -- a strict context, so any lets will have been floated away. -- -- -- As an example of the considerations in this test, consider: -- -- > let x = case y# +# 1# of { r# -> I# r# } -- > in E -- -- being translated to: -- -- > case y# +# 1# of { r# -> -- > let x = I# r# -- > in E -- > } -- -- We can only do this if the @y# +# 1#@ is ok for speculation: it has no -- side effects, and can't diverge or raise an exception. -- -- -- See also Note [Classifying primop effects] in "GHC.Builtin.PrimOps" -- and Note [Transformations affected by primop effects]. -- -- 'exprOkForSpeculation' is used to define Core's let-can-float -- invariant. (See Note [Core let-can-float invariant] in -- "GHC.Core".) It is therefore frequently called on arguments of -- unlifted type, especially via 'needsCaseBinding'. But it is -- sometimes called on expressions of lifted type as well. For -- example, see Note [Speculative evaluation] in "GHC.CoreToStg.Prep". exprOkForSpeculation, exprOkToDiscard :: CoreExpr -> Bool exprOkForSpeculation = expr_ok fun_always_ok primOpOkForSpeculation exprOkToDiscard = expr_ok fun_always_ok primOpOkToDiscard fun_always_ok :: Id -> Bool fun_always_ok _ = True -- | A special version of 'exprOkForSpeculation' used during -- Note [Speculative evaluation]. When the predicate arg `fun_ok` returns False -- for `b`, then `b` is never considered ok-for-spec. exprOkForSpecEval :: (Id -> Bool) -> CoreExpr -> Bool exprOkForSpecEval fun_ok = expr_ok fun_ok primOpOkForSpeculation expr_ok :: (Id -> Bool) -> (PrimOp -> Bool) -> CoreExpr -> Bool expr_ok _ _ (Lit _) = True expr_ok _ _ (Type _) = True expr_ok _ _ (Coercion _) = True expr_ok fun_ok primop_ok (Var v) = app_ok fun_ok primop_ok v [] expr_ok fun_ok primop_ok (Cast e _) = expr_ok fun_ok primop_ok e expr_ok fun_ok primop_ok (Lam b e) | isTyVar b = expr_ok fun_ok primop_ok e | otherwise = True -- Tick annotations that *tick* cannot be speculated, because these -- are meant to identify whether or not (and how often) the particular -- source expression was evaluated at runtime. expr_ok fun_ok primop_ok (Tick tickish e) | tickishCounts tickish = False | otherwise = expr_ok fun_ok primop_ok e expr_ok _ _ (Let {}) = False -- See W3 in the Haddock comment for exprOkForSpeculation expr_ok fun_ok primop_ok (Case scrut bndr _ alts) = -- See Note [exprOkForSpeculation: case expressions] expr_ok fun_ok primop_ok scrut && isUnliftedType (idType bndr) -- OK to call isUnliftedType: binders always have a fixed RuntimeRep && all (\(Alt _ _ rhs) -> expr_ok fun_ok primop_ok rhs) alts && altsAreExhaustive alts expr_ok fun_ok primop_ok other_expr | (expr, args) <- collectArgs other_expr = case stripTicksTopE (not . tickishCounts) expr of Var f -> app_ok fun_ok primop_ok f args -- 'LitRubbish' is the only literal that can occur in the head of an -- application and will not be matched by the above case (Var /= Lit). -- See Note [How a rubbish literal can be the head of an application] -- in GHC.Types.Literal Lit lit | debugIsOn, not (isLitRubbish lit) -> pprPanic "Non-rubbish lit in app head" (ppr lit) | otherwise -> True _ -> False ----------------------------- app_ok :: (Id -> Bool) -> (PrimOp -> Bool) -> Id -> [CoreExpr] -> Bool app_ok fun_ok primop_ok fun args | not (fun_ok fun) = False -- This code path is only taken for Note [Speculative evaluation] | idArity fun > n_val_args -- Partial application: just check passing the arguments is OK = args_ok | otherwise = case idDetails fun of DFunId new_type -> not new_type -- DFuns terminate, unless the dict is implemented -- with a newtype in which case they may not DataConWorkId {} -> args_ok -- The strictness of the constructor has already -- been expressed by its "wrapper", so we don't need -- to take the arguments into account -- Well, we thought so. But it's definitely wrong! -- See #20749 and Note [How untagged pointers can -- end up in strict fields] in GHC.Stg.InferTags ClassOpId _ is_terminating_result | is_terminating_result -- See Note [exprOkForSpeculation and type classes] -> assertPpr (n_val_args == 1) (ppr fun $$ ppr args) $ True -- assert: terminating result type => can't be applied; -- c.f the _other case below PrimOpId op _ | primOpIsDiv op , Lit divisor <- last args -- there can be 2 args (most div primops) or 3 args -- (WordQuotRem2Op), hence the use of last/init -> not (isZeroLit divisor) && all (expr_ok fun_ok primop_ok) (init args) -- Special case for dividing operations that fail -- In general they are NOT ok-for-speculation -- (which primop_ok will catch), but they ARE OK -- if the divisor is definitely non-zero. -- Often there is a literal divisor, and this -- can get rid of a thunk in an inner loop | otherwise -> primop_ok op && args_ok _other -- Unlifted and terminating types; -- Also c.f. the Var case of exprIsHNF | isTerminatingType fun_ty -- See Note [exprOkForSpeculation and type classes] || definitelyUnliftedType fun_ty -> assertPpr (n_val_args == 0) (ppr fun $$ ppr args) True -- Both terminating types (e.g. Eq a), and unlifted types (e.g. Int#) -- are non-functions and so will have no value args. The assert is -- just to check this. -- (If we added unlifted function types this would change, -- and we'd need to actually test n_val_args == 0.) -- Functions that terminate fast without raising exceptions etc -- See (U12) of Note [Implementing unsafeCoerce] | fun `hasKey` unsafeEqualityProofIdKey -> True | otherwise -> False -- NB: even in the nullary case, do /not/ check -- for evaluated-ness of the fun; -- see Note [exprOkForSpeculation and evaluated variables] where fun_ty = idType fun n_val_args = valArgCount args (arg_tys, _) = splitPiTys fun_ty -- Even if a function call itself is OK, any unlifted -- args are still evaluated eagerly and must be checked args_ok = and (zipWith arg_ok arg_tys args) arg_ok :: PiTyVarBinder -> CoreExpr -> Bool arg_ok (Named _) _ = True -- A type argument arg_ok (Anon ty _) arg -- A term argument | definitelyLiftedType (scaledThing ty) = True -- lifted args are not evaluated eagerly | otherwise = expr_ok fun_ok primop_ok arg ----------------------------- altsAreExhaustive :: [Alt b] -> Bool -- True <=> the case alternatives are definitely exhaustive -- False <=> they may or may not be altsAreExhaustive [] = True -- The scrutinee never returns; see Note [Empty case alternatives] in GHC.Core altsAreExhaustive (Alt con1 _ _ : alts) = case con1 of DEFAULT -> True LitAlt {} -> False DataAlt c -> alts `lengthIs` (tyConFamilySize (dataConTyCon c) - 1) -- It is possible to have an exhaustive case that does not -- enumerate all constructors, notably in a GADT match, but -- we behave conservatively here -- I don't think it's important -- enough to deserve special treatment -- | Should we look past this tick when eta-expanding the given function? -- -- See Note [Ticks and mandatory eta expansion] -- Takes the function we are applying as argument. etaExpansionTick :: Id -> GenTickish pass -> Bool etaExpansionTick id t = hasNoBinding id && ( tickishFloatable t || isProfTick t ) {- Note [exprOkForSpeculation and type classes] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider (#22745, #15205) \(d :: C a b). case eq_sel (sc_sel d) of (co :: t1 ~# t2) [Dead] -> blah We know that * eq_sel's argument (sc_sel d) has dictionary type, so it definitely terminates (again Note [NON-BOTTOM-DICTS invariant] in GHC.Core) * eq_sel is simply a superclass selector, and hence is fast * The field that eq_sel picks is of unlifted type, and hence can't be bottom (remember the dictionary argument itself is non-bottom) So we can treat (eq_sel (sc_sel d)) as ok-for-speculation. We must check a) That the function is a class-op, with IdDetails of ClassOpId b) That the result type of the class-op is terminating or unlifted. E.g. for class C a => D a where ... class C a where { op :: a -> a } Since C is represented by a newtype, (sc_sel (d :: D a)) might not be terminating. Rather than repeatedly test if the result of the class-op is a terminating/unlifted type, we cache it as a field of ClassOpId. See GHC.Types.Id.Make.mkDictSelId for where this field is initialised. Note [exprOkForSpeculation: case expressions] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ exprOkForSpeculation accepts very special case expressions. Reason: (a ==# b) is ok-for-speculation, but the litEq rules in GHC.Core.Opt.ConstantFold convert it (a ==# 3#) to case a of { DEFAULT -> 0#; 3# -> 1# } for excellent reasons described in GHC.Core.Opt.ConstantFold Note [The litEq rule: converting equality to case]. So, annoyingly, we want that case expression to be ok-for-speculation too. Bother. But we restrict it sharply: * We restrict it to unlifted scrutinees. Consider this: case x of y { DEFAULT -> ... (let v::Int# = case y of { True -> e1 ; False -> e2 } in ...) ... Does the RHS of v satisfy the let-can-float invariant? Previously we said yes, on the grounds that y is evaluated. But the binder-swap done by GHC.Core.Opt.SetLevels would transform the inner alternative to DEFAULT -> ... (let v::Int# = case x of { ... } in ...) .... which does /not/ satisfy the let-can-float invariant, because x is not evaluated. See Note [Binder-swap during float-out] in GHC.Core.Opt.SetLevels. To avoid this awkwardness it seems simpler to stick to unlifted scrutinees where the issue does not arise. * We restrict it to exhaustive alternatives. A non-exhaustive case manifestly isn't ok-for-speculation. for example, this is a valid program (albeit a slightly dodgy one) let v = case x of { B -> ...; C -> ... } in case x of A -> ... _ -> ...v...v.... Should v be considered ok-for-speculation? Its scrutinee may be evaluated, but the alternatives are incomplete so we should not evaluate it strictly. Now, all this is for lifted types, but it'd be the same for any finite unlifted type. We don't have many of them, but we might add unlifted algebraic types in due course. ----- Historical note: #15696: -------- Previously GHC.Core.Opt.SetLevels used exprOkForSpeculation to guide floating of single-alternative cases; it now uses exprIsHNF Note [Floating single-alternative cases]. But in those days, consider case e of x { DEAFULT -> ...(case x of y A -> ... _ -> ...(case (case x of { B -> p; C -> p }) of I# r -> blah)... If GHC.Core.Opt.SetLevels considers the inner nested case as ok-for-speculation it can do case-floating (in GHC.Core.Opt.SetLevels). So we'd float to: case e of x { DEAFULT -> case (case x of { B -> p; C -> p }) of I# r -> ...(case x of y A -> ... _ -> ...blah...)... which is utterly bogus (seg fault); see #5453. ----- Historical note: #3717: -------- foo :: Int -> Int foo 0 = 0 foo n = (if n < 5 then 1 else 2) `seq` foo (n-1) In earlier GHCs, we got this: T.$wfoo = \ (ww :: GHC.Prim.Int#) -> case ww of ds { __DEFAULT -> case (case <# ds 5 of _ { GHC.Types.False -> lvl1; GHC.Types.True -> lvl}) of _ { __DEFAULT -> T.$wfoo (GHC.Prim.-# ds_XkE 1) }; 0 -> 0 } Before join-points etc we could only get rid of two cases (which are redundant) by recognising that the (case <# ds 5 of { ... }) is ok-for-speculation, even though it has /lifted/ type. But now join points do the job nicely. ------- End of historical note ------------ Note [exprOkForSpeculation and evaluated variables] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider these examples: * case x of y { DEFAULT -> ....y.... } Should 'y' (alone) be considered ok-for-speculation? * case x of y { DEFAULT -> ....let z = dataToTagLarge# y... } Should (dataToTagLarge# y) be considered ok-for-spec? Recall that dataToTagLarge# :: forall a. a -> Int# must always evaluate its argument. (See also Note [DataToTag overview].) You could argue 'yes', because in the case alternative we know that 'y' is evaluated. But the binder-swap transformation, which is extremely useful for float-out, changes these expressions to case x of y { DEFAULT -> ....x.... } case x of y { DEFAULT -> ....let z = dataToTagLarge# x... } And now the expression does not obey the let-can-float invariant! Yikes! Moreover we really might float (dataToTagLarge# x) outside the case, and then it really, really doesn't obey the let-can-float invariant. The solution is simple: exprOkForSpeculation does not try to take advantage of the evaluated-ness of (lifted) variables. And it returns False (always) for primops that perform evaluation. We achieve the latter by marking the relevant primops as "ThrowsException" or "ReadWriteEffect"; see also Note [Classifying primop effects] in GHC.Builtin.PrimOps. Note that exprIsHNF /can/ and does take advantage of evaluated-ness; it doesn't have the trickiness of the let-can-float invariant to worry about. ************************************************************************ * * exprIsHNF, exprIsConLike * * ************************************************************************ -} -- Note [exprIsHNF] See also Note [exprIsCheap and exprIsHNF] -- ~~~~~~~~~~~~~~~~ -- | exprIsHNF returns true for expressions that are certainly /already/ -- evaluated to /head/ normal form. This is used to decide whether it's ok -- to perform case-to-let for lifted expressions, which changes: -- -- > case x of x' { _ -> e } -- -- into: -- -- > let x' = x in e -- -- and in so doing makes the binding lazy. -- -- So, it does /not/ treat variables as evaluated, unless they say they are. -- However, it /does/ treat partial applications and constructor applications -- as values, even if their arguments are non-trivial, provided the argument -- type is lifted. For example, both of these are values: -- -- > (:) (f x) (map f xs) -- > map (...redex...) -- -- because 'seq' on such things completes immediately. -- -- For unlifted argument types, we have to be careful: -- -- > C (f x :: Int#) -- -- Suppose @f x@ diverges; then @C (f x)@ is not a value. -- We check for this using needsCaseBinding below exprIsHNF :: CoreExpr -> Bool -- True => Value-lambda, constructor, PAP exprIsHNF = exprIsHNFlike isDataConWorkId isEvaldUnfolding -- | Similar to 'exprIsHNF' but includes CONLIKE functions as well as -- data constructors. Conlike arguments are considered interesting by the -- inliner. exprIsConLike :: CoreExpr -> Bool -- True => lambda, conlike, PAP exprIsConLike = exprIsHNFlike isConLikeId isConLikeUnfolding -- | Returns true for values or value-like expressions. These are lambdas, -- constructors / CONLIKE functions (as determined by the function argument) -- or PAPs. -- exprIsHNFlike :: HasDebugCallStack => (Var -> Bool) -> (Unfolding -> Bool) -> CoreExpr -> Bool exprIsHNFlike is_con is_con_unf = is_hnf_like where is_hnf_like (Var v) -- NB: There are no value args at this point = id_app_is_value v 0 -- Catches nullary constructors, -- so that [] and () are values, for example -- and (e.g.) primops that don't have unfoldings || is_con_unf (idUnfolding v) -- Check the thing's unfolding; it might be bound to a value -- or to a guaranteed-evaluated variable (isEvaldUnfolding) -- Contrast with Note [exprOkForSpeculation and evaluated variables] -- We don't look through loop breakers here, which is a bit conservative -- but otherwise I worry that if an Id's unfolding is just itself, -- we could get an infinite loop || definitelyUnliftedType (idType v) -- Unlifted binders are always evaluated (#20140) is_hnf_like (Lit l) = not (isLitRubbish l) -- Regarding a LitRubbish as ConLike leads to unproductive inlining in -- WWRec, see #20035 is_hnf_like (Type _) = True -- Types are honorary Values; -- we don't mind copying them is_hnf_like (Coercion _) = True -- Same for coercions is_hnf_like (Lam b e) = isRuntimeVar b || is_hnf_like e is_hnf_like (Tick tickish e) = not (tickishCounts tickish) && is_hnf_like e -- See Note [exprIsHNF Tick] is_hnf_like (Cast e _) = is_hnf_like e is_hnf_like (App e a) | isValArg a = app_is_value e 1 | otherwise = is_hnf_like e is_hnf_like (Let _ e) = is_hnf_like e -- Lazy let(rec)s don't affect us is_hnf_like (Case e b _ as) | Just rhs <- isUnsafeEqualityCase e b as = is_hnf_like rhs is_hnf_like _ = False -- 'n' is the number of value args to which the expression is applied -- And n>0: there is at least one value argument app_is_value :: CoreExpr -> Int -> Bool app_is_value (Var f) nva = id_app_is_value f nva app_is_value (Tick _ f) nva = app_is_value f nva app_is_value (Cast f _) nva = app_is_value f nva app_is_value (App f a) nva | isValArg a = app_is_value f (nva + 1) && not (needsCaseBinding (exprType a) a) -- For example f (x /# y) where f has arity two, and the first -- argument is unboxed. This is not a value! -- But f 34# is a value. -- NB: Check app_is_value first, the arity check is cheaper | otherwise = app_is_value f nva app_is_value _ _ = False id_app_is_value id n_val_args = is_con id || idArity id > n_val_args {- Note [exprIsHNF Tick] ~~~~~~~~~~~~~~~~~~~~~ We can discard source annotations on HNFs as long as they aren't tick-like: scc c (\x . e) => \x . e scc c (C x1..xn) => C x1..xn So we regard these as HNFs. Tick annotations that tick are not regarded as HNF if the expression they surround is HNF, because the tick is there to tell us that the expression was evaluated, so we don't want to discard a seq on it. -} -- | Can we bind this 'CoreExpr' at the top level? exprIsTopLevelBindable :: CoreExpr -> Type -> Bool -- See Note [Core top-level string literals] -- Precondition: exprType expr = ty -- Top-level literal strings can't even be wrapped in ticks -- see Note [Core top-level string literals] in "GHC.Core" exprIsTopLevelBindable expr ty = not (mightBeUnliftedType ty) -- Note that 'expr' may not have a fixed runtime representation here, -- consequently we must use 'mightBeUnliftedType' rather than 'isUnliftedType', -- as the latter would panic. || exprIsTickedString expr -- | Check if the expression is zero or more Ticks wrapped around a literal -- string. exprIsTickedString :: CoreExpr -> Bool exprIsTickedString = isJust . exprIsTickedString_maybe -- | Extract a literal string from an expression that is zero or more Ticks -- wrapped around a literal string. Returns Nothing if the expression has a -- different shape. -- Used to "look through" Ticks in places that need to handle literal strings. exprIsTickedString_maybe :: CoreExpr -> Maybe ByteString exprIsTickedString_maybe (Lit (LitString bs)) = Just bs exprIsTickedString_maybe (Tick t e) -- we don't tick literals with CostCentre ticks, compare to mkTick | tickishPlace t == PlaceCostCentre = Nothing | otherwise = exprIsTickedString_maybe e exprIsTickedString_maybe _ = Nothing {- ************************************************************************ * * Instantiating data constructors * * ************************************************************************ These InstPat functions go here to avoid circularity between DataCon and Id -} dataConRepInstPat :: [Unique] -> Mult -> DataCon -> [Type] -> ([TyCoVar], [Id]) dataConRepFSInstPat :: [FastString] -> [Unique] -> Mult -> DataCon -> [Type] -> ([TyCoVar], [Id]) dataConRepInstPat = dataConInstPat (repeat ((fsLit "ipv"))) dataConRepFSInstPat = dataConInstPat dataConInstPat :: [FastString] -- A long enough list of FSs to use for names -> [Unique] -- An equally long list of uniques, at least one for each binder -> Mult -- The multiplicity annotation of the case expression: scales the multiplicity of variables -> DataCon -> [Type] -- Types to instantiate the universally quantified tyvars -> ([TyCoVar], [Id]) -- Return instantiated variables -- dataConInstPat arg_fun fss us mult con inst_tys returns a tuple -- (ex_tvs, arg_ids), -- -- ex_tvs are intended to be used as binders for existential type args -- -- arg_ids are intended to be used as binders for value arguments, -- and their types have been instantiated with inst_tys and ex_tys -- The arg_ids include both evidence and -- programmer-specified arguments (both after rep-ing) -- -- Example. -- The following constructor T1 -- -- data T a where -- T1 :: forall b. Int -> b -> T(a,b) -- ... -- -- has representation type -- forall a. forall a1. forall b. (a ~ (a1,b)) => -- Int -> b -> T a -- -- dataConInstPat fss us T1 (a1',b') will return -- -- ([a1'', b''], [c :: (a1', b')~(a1'', b''), x :: Int, y :: b'']) -- -- where the double-primed variables are created with the FastStrings and -- Uniques given as fss and us dataConInstPat fss uniqs mult con inst_tys = assert (univ_tvs `equalLength` inst_tys) $ (ex_bndrs, arg_ids) where univ_tvs = dataConUnivTyVars con ex_tvs = dataConExTyCoVars con arg_tys = dataConRepArgTys con arg_strs = dataConRepStrictness con -- 1-1 with arg_tys n_ex = length ex_tvs -- split the Uniques and FastStrings (ex_uniqs, id_uniqs) = splitAt n_ex uniqs (ex_fss, id_fss) = splitAt n_ex fss -- Make the instantiating substitution for universals univ_subst = zipTvSubst univ_tvs inst_tys -- Make existential type variables, applying and extending the substitution (full_subst, ex_bndrs) = mapAccumL mk_ex_var univ_subst (zip3 ex_tvs ex_fss ex_uniqs) mk_ex_var :: Subst -> (TyCoVar, FastString, Unique) -> (Subst, TyCoVar) mk_ex_var subst (tv, fs, uniq) = (Type.extendTCvSubstWithClone subst tv new_tv , new_tv) where new_tv | isTyVar tv = mkTyVar (mkSysTvName uniq fs) kind | otherwise = mkCoVar (mkSystemVarName uniq fs) kind kind = Type.substTyUnchecked subst (varType tv) -- Make value vars, instantiating types arg_ids = zipWith4 mk_id_var id_uniqs id_fss arg_tys arg_strs mk_id_var uniq fs (Scaled m ty) str = setCaseBndrEvald str $ -- See Note [Mark evaluated arguments] mkUserLocalOrCoVar (mkVarOccFS fs) uniq (mult `mkMultMul` m) (Type.substTy full_subst ty) noSrcSpan {- Note [Mark evaluated arguments] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When pattern matching on a constructor with strict fields, the binder can have an 'evaldUnfolding'. Moreover, it *should* have one, so that when loading an interface file unfolding like: data T = MkT !Int f x = case x of { MkT y -> let v::Int# = case y of I# n -> n+1 in ... } we don't want Lint to complain. The 'y' is evaluated, so the case in the RHS of the binding for 'v' is fine. But only if we *know* that 'y' is evaluated. c.f. add_evals in GHC.Core.Opt.Simplify.simplAlt ************************************************************************ * * Equality * * ************************************************************************ -} -- | A cheap equality test which bales out fast! -- If it returns @True@ the arguments are definitely equal, -- otherwise, they may or may not be equal. cheapEqExpr :: Expr b -> Expr b -> Bool cheapEqExpr = cheapEqExpr' (const False) -- | Cheap expression equality test, can ignore ticks by type. cheapEqExpr' :: (CoreTickish -> Bool) -> Expr b -> Expr b -> Bool {-# INLINE cheapEqExpr' #-} cheapEqExpr' ignoreTick e1 e2 = go e1 e2 where go (Var v1) (Var v2) = v1 == v2 go (Lit lit1) (Lit lit2) = lit1 == lit2 go (Type t1) (Type t2) = t1 `eqType` t2 go (Coercion c1) (Coercion c2) = c1 `eqCoercion` c2 go (App f1 a1) (App f2 a2) = f1 `go` f2 && a1 `go` a2 go (Cast e1 t1) (Cast e2 t2) = e1 `go` e2 && t1 `eqCoercion` t2 go (Tick t1 e1) e2 | ignoreTick t1 = go e1 e2 go e1 (Tick t2 e2) | ignoreTick t2 = go e1 e2 go (Tick t1 e1) (Tick t2 e2) = t1 == t2 && e1 `go` e2 go _ _ = False -- Used by diffBinds, which is itself only used in GHC.Core.Lint.lintAnnots eqTickish :: RnEnv2 -> CoreTickish -> CoreTickish -> Bool eqTickish env (Breakpoint lext lid lids lmod) (Breakpoint rext rid rids rmod) = lid == rid && map (rnOccL env) lids == map (rnOccR env) rids && lext == rext && lmod == rmod eqTickish _ l r = l == r -- | Finds differences between core bindings, see @diffExpr@. -- -- The main problem here is that while we expect the binds to have the -- same order in both lists, this is not guaranteed. To do this -- properly we'd either have to do some sort of unification or check -- all possible mappings, which would be seriously expensive. So -- instead we simply match single bindings as far as we can. This -- leaves us just with mutually recursive and/or mismatching bindings, -- which we then speculatively match by ordering them. It's by no means -- perfect, but gets the job done well enough. -- -- Only used in GHC.Core.Lint.lintAnnots diffBinds :: Bool -> RnEnv2 -> [(Var, CoreExpr)] -> [(Var, CoreExpr)] -> ([SDoc], RnEnv2) diffBinds top env binds1 = go (length binds1) env binds1 where go _ env [] [] = ([], env) go _fuel env [] binds2 -- No binds remaining to compare on the left? Bail out early. = (warn env [] binds2, env) go _fuel env binds1 [] -- No binds remaining to compare on the right? Bail out early. = (warn env binds1 [], env) go fuel env binds1@(bind1:_) binds2@(_:_) -- Iterated over all binds without finding a match? Then -- try speculatively matching binders by order. | fuel == 0 = if not $ env `inRnEnvL` fst bind1 then let env' = uncurry (rnBndrs2 env) $ unzip $ zip (sort $ map fst binds1) (sort $ map fst binds2) in go (length binds1) env' binds1 binds2 -- If we have already tried that, give up else (warn env binds1 binds2, env) go fuel env ((bndr1,expr1):binds1) binds2 | let matchExpr (bndr,expr) = (isTyVar bndr || not top || null (diffIdInfo env bndr bndr1)) && null (diffExpr top (rnBndr2 env bndr1 bndr) expr1 expr) , (binds2l, (bndr2,_):binds2r) <- break matchExpr binds2 = go (length binds1) (rnBndr2 env bndr1 bndr2) binds1 (binds2l ++ binds2r) | otherwise -- No match, so push back (FIXME O(n^2)) = go (fuel-1) env (binds1++[(bndr1,expr1)]) binds2 -- We have tried everything, but couldn't find a good match. So -- now we just return the comparison results when we pair up -- the binds in a pseudo-random order. warn env binds1 binds2 = concatMap (uncurry (diffBind env)) (zip binds1' binds2') ++ unmatched "unmatched left-hand:" (drop l binds1') ++ unmatched "unmatched right-hand:" (drop l binds2') where binds1' = sortBy (comparing fst) binds1 binds2' = sortBy (comparing fst) binds2 l = min (length binds1') (length binds2') unmatched _ [] = [] unmatched txt bs = [text txt $$ ppr (Rec bs)] diffBind env (bndr1,expr1) (bndr2,expr2) | ds@(_:_) <- diffExpr top env expr1 expr2 = locBind "in binding" bndr1 bndr2 ds -- Special case for TyVar, which we checked were bound to the same types in -- diffExpr, but don't have any IdInfo we would panic if called diffIdInfo. -- These let-bound types are created temporarily by the simplifier but inlined -- immediately. | isTyVar bndr1 && isTyVar bndr2 = [] | otherwise = diffIdInfo env bndr1 bndr2 -- | Finds differences between core expressions, modulo alpha and -- renaming. Setting @top@ means that the @IdInfo@ of bindings will be -- checked for differences as well. diffExpr :: Bool -> RnEnv2 -> CoreExpr -> CoreExpr -> [SDoc] diffExpr _ env (Var v1) (Var v2) | rnOccL env v1 == rnOccR env v2 = [] diffExpr _ _ (Lit lit1) (Lit lit2) | lit1 == lit2 = [] diffExpr _ env (Type t1) (Type t2) | eqTypeX env t1 t2 = [] diffExpr _ env (Coercion co1) (Coercion co2) | eqCoercionX env co1 co2 = [] diffExpr top env (Cast e1 co1) (Cast e2 co2) | eqCoercionX env co1 co2 = diffExpr top env e1 e2 diffExpr top env (Tick n1 e1) e2 | not (tickishIsCode n1) = diffExpr top env e1 e2 diffExpr top env e1 (Tick n2 e2) | not (tickishIsCode n2) = diffExpr top env e1 e2 diffExpr top env (Tick n1 e1) (Tick n2 e2) | eqTickish env n1 n2 = diffExpr top env e1 e2 -- The error message of failed pattern matches will contain -- generated names, which are allowed to differ. diffExpr _ _ (App (App (Var absent) _) _) (App (App (Var absent2) _) _) | isDeadEndId absent && isDeadEndId absent2 = [] diffExpr top env (App f1 a1) (App f2 a2) = diffExpr top env f1 f2 ++ diffExpr top env a1 a2 diffExpr top env (Lam b1 e1) (Lam b2 e2) | eqTypeX env (varType b1) (varType b2) -- False for Id/TyVar combination = diffExpr top (rnBndr2 env b1 b2) e1 e2 diffExpr top env (Let bs1 e1) (Let bs2 e2) = let (ds, env') = diffBinds top env (flattenBinds [bs1]) (flattenBinds [bs2]) in ds ++ diffExpr top env' e1 e2 diffExpr top env (Case e1 b1 t1 a1) (Case e2 b2 t2 a2) | equalLength a1 a2 && not (null a1) || eqTypeX env t1 t2 -- See Note [Empty case alternatives] in GHC.Data.TrieMap = diffExpr top env e1 e2 ++ concat (zipWith diffAlt a1 a2) where env' = rnBndr2 env b1 b2 diffAlt (Alt c1 bs1 e1) (Alt c2 bs2 e2) | c1 /= c2 = [text "alt-cons " <> ppr c1 <> text " /= " <> ppr c2] | otherwise = diffExpr top (rnBndrs2 env' bs1 bs2) e1 e2 diffExpr _ _ e1 e2 = [fsep [ppr e1, text "/=", ppr e2]] -- | Find differences in @IdInfo@. We will especially check whether -- the unfoldings match, if present (see @diffUnfold@). diffIdInfo :: RnEnv2 -> Var -> Var -> [SDoc] diffIdInfo env bndr1 bndr2 | arityInfo info1 == arityInfo info2 && cafInfo info1 == cafInfo info2 && oneShotInfo info1 == oneShotInfo info2 && inlinePragInfo info1 == inlinePragInfo info2 && occInfo info1 == occInfo info2 && demandInfo info1 == demandInfo info2 && callArityInfo info1 == callArityInfo info2 = locBind "in unfolding of" bndr1 bndr2 $ diffUnfold env (realUnfoldingInfo info1) (realUnfoldingInfo info2) | otherwise = locBind "in Id info of" bndr1 bndr2 [fsep [pprBndr LetBind bndr1, text "/=", pprBndr LetBind bndr2]] where info1 = idInfo bndr1; info2 = idInfo bndr2 -- | Find differences in unfoldings. Note that we will not check for -- differences of @IdInfo@ in unfoldings, as this is generally -- redundant, and can lead to an exponential blow-up in complexity. diffUnfold :: RnEnv2 -> Unfolding -> Unfolding -> [SDoc] diffUnfold _ NoUnfolding NoUnfolding = [] diffUnfold _ BootUnfolding BootUnfolding = [] diffUnfold _ (OtherCon cs1) (OtherCon cs2) | cs1 == cs2 = [] diffUnfold env (DFunUnfolding bs1 c1 a1) (DFunUnfolding bs2 c2 a2) | c1 == c2 && equalLength bs1 bs2 = concatMap (uncurry (diffExpr False env')) (zip a1 a2) where env' = rnBndrs2 env bs1 bs2 diffUnfold env (CoreUnfolding t1 _ _ c1 g1) (CoreUnfolding t2 _ _ c2 g2) | c1 == c2 && g1 == g2 = diffExpr False env t1 t2 diffUnfold _ uf1 uf2 = [fsep [ppr uf1, text "/=", ppr uf2]] -- | Add location information to diff messages locBind :: String -> Var -> Var -> [SDoc] -> [SDoc] locBind loc b1 b2 diffs = map addLoc diffs where addLoc d = d $$ nest 2 (parens (text loc <+> bindLoc)) bindLoc | b1 == b2 = ppr b1 | otherwise = ppr b1 <> char '/' <> ppr b2 {- ********************************************************************* * * \subsection{Determining non-updatable right-hand-sides} * * ************************************************************************ Top-level constructor applications can usually be allocated statically, but they can't if the constructor, or any of the arguments, come from another DLL (because we can't refer to static labels in other DLLs). If this happens we simply make the RHS into an updatable thunk, and 'execute' it rather than allocating it statically. -} {- ************************************************************************ * * \subsection{Type utilities} * * ************************************************************************ -} -- | True if the type has no non-bottom elements, e.g. when it is an empty -- datatype, or a GADT with non-satisfiable type parameters, e.g. Int :~: Bool. -- See Note [Bottoming expressions] -- -- See Note [No alternatives lint check] for another use of this function. isEmptyTy :: Type -> Bool isEmptyTy ty -- Data types where, given the particular type parameters, no data -- constructor matches, are empty. -- This includes data types with no constructors, e.g. Data.Void.Void. | Just (tc, inst_tys) <- splitTyConApp_maybe ty , Just dcs <- tyConDataCons_maybe tc , all (dataConCannotMatch inst_tys) dcs = True | otherwise = False -- | If @normSplitTyConApp_maybe _ ty = Just (tc, tys, co)@ -- then @ty |> co = tc tys@. It's 'splitTyConApp_maybe', but looks through -- coercions via 'topNormaliseType_maybe'. Hence the \"norm\" prefix. normSplitTyConApp_maybe :: FamInstEnvs -> Type -> Maybe (TyCon, [Type], Coercion) normSplitTyConApp_maybe fam_envs ty | let Reduction co ty1 = topNormaliseType_maybe fam_envs ty `orElse` (mkReflRedn Representational ty) , Just (tc, tc_args) <- splitTyConApp_maybe ty1 = Just (tc, tc_args, co) normSplitTyConApp_maybe _ _ = Nothing {- ***************************************************** * * InScopeSet things * ***************************************************** -} extendInScopeSetBind :: InScopeSet -> CoreBind -> InScopeSet extendInScopeSetBind (InScope in_scope) binds = InScope $ foldBindersOfBindStrict extendVarSet in_scope binds extendInScopeSetBndrs :: InScopeSet -> [CoreBind] -> InScopeSet extendInScopeSetBndrs (InScope in_scope) binds = InScope $ foldBindersOfBindsStrict extendVarSet in_scope binds mkInScopeSetBndrs :: [CoreBind] -> InScopeSet mkInScopeSetBndrs binds = foldBindersOfBindsStrict extendInScopeSet emptyInScopeSet binds {- ***************************************************** * * StaticPtr * ***************************************************** -} -- | @collectMakeStaticArgs (makeStatic t srcLoc e)@ yields -- @Just (makeStatic, t, srcLoc, e)@. -- -- Returns @Nothing@ for every other expression. collectMakeStaticArgs :: CoreExpr -> Maybe (CoreExpr, Type, CoreExpr, CoreExpr) collectMakeStaticArgs e | (fun@(Var b), [Type t, loc, arg], _) <- collectArgsTicks (const True) e , idName b == makeStaticName = Just (fun, t, loc, arg) collectMakeStaticArgs _ = Nothing {- ************************************************************************ * * \subsection{Join points} * * ************************************************************************ -} -- | Does this binding bind a join point (or a recursive group of join points)? isJoinBind :: CoreBind -> Bool isJoinBind (NonRec b _) = isJoinId b isJoinBind (Rec ((b, _) : _)) = isJoinId b isJoinBind _ = False dumpIdInfoOfProgram :: Bool -> (IdInfo -> SDoc) -> CoreProgram -> SDoc dumpIdInfoOfProgram dump_locals ppr_id_info binds = vcat (map printId ids) where ids = sortBy (stableNameCmp `on` getName) (concatMap getIds binds) getIds (NonRec i _) = [ i ] getIds (Rec bs) = map fst bs -- By default only include full info for exported ids, unless we run in the verbose -- pprDebug mode. printId id | isExportedId id || dump_locals = ppr id <> colon <+> (ppr_id_info (idInfo id)) | otherwise = empty {- ************************************************************************ * * \subsection{Tag inference things} * * ************************************************************************ -} {- Note [Call-by-value for worker args] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If we unbox a constructor with strict fields we want to preserve the information that some of the arguments came out of strict fields and therefore should be already properly tagged, however we can't express this directly in core. Instead what we do is generate a worker like this: data T = MkT A !B foo = case T of MkT a b -> $wfoo a b $wfoo a b = case b of b' -> rhs[b/b'] This makes the worker strict in b causing us to use a more efficient calling convention for `b` where the caller needs to ensure `b` is properly tagged and evaluated before it's passed to $wfoo. See Note [CBV Function Ids]. Usually the argument will be known to be properly tagged at the call site so there is no additional work for the caller and the worker can be more efficient since it can assume the presence of a tag. This is especially true for recursive functions like this: -- myPred expect it's argument properly tagged myPred !x = ... loop :: MyPair -> Int loop (MyPair !x !y) = case x of A -> 1 B -> 2 _ -> loop (MyPair (myPred x) (myPred y)) Here we would ordinarily not be strict in y after unboxing. However if we pass it as a regular argument then this means on every iteration of loop we will incur an extra seq on y before we can pass it to `myPred` which isn't great! That is in STG after tag inference we get: Rec { Find.$wloop [InlPrag=[2], Occ=LoopBreaker] :: Find.MyEnum -> Find.MyEnum -> GHC.Prim.Int# [GblId[StrictWorker([!, ~])], Arity=2, Str=<1L>, Unf=OtherCon []] = {} \r [x y] case x of x' [Occ=Once1] { __DEFAULT -> case y of y' [Occ=Once1] { __DEFAULT -> case Find.$wmyPred y' of pred_y [Occ=Once1] { __DEFAULT -> case Find.$wmyPred x' of pred_x [Occ=Once1] { __DEFAULT -> Find.$wloop pred_x pred_y; }; }; Find.A -> 1#; Find.B -> 2#; }; end Rec } Here comes the tricky part: If we make $wloop strict in both x/y and we get: Rec { Find.$wloop [InlPrag=[2], Occ=LoopBreaker] :: Find.MyEnum -> Find.MyEnum -> GHC.Prim.Int# [GblId[StrictWorker([!, !])], Arity=2, Str=<1L>, Unf=OtherCon []] = {} \r [x y] case y of y' [Occ=Once1] { __DEFAULT -> case x of x' [Occ=Once1] { __DEFAULT -> case Find.$wmyPred y' of pred_y [Occ=Once1] { __DEFAULT -> case Find.$wmyPred x' of pred_x [Occ=Once1] { __DEFAULT -> Find.$wloop pred_x pred_y; }; }; Find.A -> 1#; Find.B -> 2#; }; end Rec } Here both x and y are known to be tagged in the function body since we pass strict worker args using unlifted cbv. This means the seqs on x and y both become no-ops and compared to the first version the seq on `y` disappears at runtime. The downside is that the caller of $wfoo potentially has to evaluate `y` once if we can't prove it isn't already evaluated. But y coming out of a strict field is in WHNF so safe to evaluated. And most of the time it will be properly tagged+evaluated already at the call site because of the Strict Field Invariant! See Note [Strict Field Invariant] for more in this. This makes GHC itself around 1% faster despite doing slightly more work! So this is generally quite good. We only apply this when we think there is a benefit in doing so however. There are a number of cases in which it would be useless to insert an extra seq. ShouldStrictifyIdForCbv tries to identify these to avoid churn in the simplifier. See Note [Which Ids should be strictified] for details on this. -} mkStrictFieldSeqs :: [(Id,StrictnessMark)] -> CoreExpr -> (CoreExpr) mkStrictFieldSeqs args rhs = foldr addEval rhs args where case_ty = exprType rhs addEval :: (Id,StrictnessMark) -> (CoreExpr) -> (CoreExpr) addEval (arg_id,arg_cbv) (rhs) -- Argument representing strict field. | isMarkedStrict arg_cbv , shouldStrictifyIdForCbv arg_id -- Make sure to remove unfoldings here to avoid the simplifier dropping those for OtherCon[] unfoldings. = Case (Var $! zapIdUnfolding arg_id) arg_id case_ty ([Alt DEFAULT [] rhs]) -- Normal argument | otherwise = do rhs {- Note [Which Ids should be strictified] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For some arguments we would like to convince GHC to pass them call by value. One way to achieve this is described in see Note [Call-by-value for worker args]. We separate the concerns of "should we pass this argument using cbv" and "should we do so by making the rhs strict in this argument". This note deals with the second part. There are multiple reasons why we might not want to insert a seq in the rhs to strictify a functions argument: 1) The argument doesn't exist at runtime. For zero width types (like Types) there is no benefit as we don't operate on them at runtime at all. This includes things like void#, coercions and state tokens. 2) The argument is a unlifted type. If the argument is a unlifted type the calling convention already is explicitly cbv. This means inserting a seq on this argument wouldn't do anything as the seq would be a no-op *and* it wouldn't affect the calling convention. 3) The argument is absent. If the argument is absent in the body there is no advantage to it being passed as cbv to the function. The function won't ever look at it so we don't safe any work. This mostly happens for join point. For example we might have: data T = MkT ![Int] [Char] f t = case t of MkT xs{strict} ys-> snd (xs,ys) and abstract the case alternative to: f t = join j1 = \xs ys -> snd (xs,ys) in case t of MkT xs{strict} ys-> j1 xs xy While we "use" xs inside `j1` it's not used inside the function `snd` we pass it to. In short a absent demand means neither our RHS, nor any function we pass the argument to will inspect it. So there is no work to be saved by forcing `xs` early. NB: There is an edge case where if we rebox we *can* end up seqing an absent value. Note [Absent fillers] has an example of this. However this is so rare it's not worth caring about here. 4) The argument is already strict. Consider this code: data T = MkT ![Int] f t = case t of MkT xs{strict} -> reverse xs The `xs{strict}` indicates that `xs` is used strictly by the `reverse xs`. If we do a w/w split, and add the extra eval on `xs`, we'll get $wf xs = case xs of xs1 -> let t = MkT xs1 in case t of MkT xs2 -> reverse xs2 That's not wrong; but the w/w body will simplify to $wf xs = case xs of xs1 -> reverse xs1 and now we'll drop the `case xs` because `xs1` is used strictly in its scope. Adding that eval was a waste of time. So don't add it for strictly-demanded Ids. 5) Functions Functions are tricky (see Note [TagInfo of functions] in InferTags). But the gist of it even if we make a higher order function argument strict we can't avoid the tag check when it's used later in the body. So there is no benefit. -} -- | Do we expect there to be any benefit if we make this var strict -- in order for it to get treated as as cbv argument? -- See Note [Which Ids should be strictified] -- See Note [CBV Function Ids] for more background. shouldStrictifyIdForCbv :: Var -> Bool shouldStrictifyIdForCbv = wantCbvForId False -- Like shouldStrictifyIdForCbv but also wants to use cbv for strict args. shouldUseCbvForId :: Var -> Bool shouldUseCbvForId = wantCbvForId True -- When we strictify we want to skip strict args otherwise the logic is the same -- as for shouldUseCbvForId so we common up the logic here. -- Basically returns true if it would be beneficial for runtime to pass this argument -- as CBV independent of weither or not it's correct. E.g. it might return true for lazy args -- we are not allowed to force. wantCbvForId :: Bool -> Var -> Bool wantCbvForId cbv_for_strict v -- Must be a runtime var. -- See Note [Which Ids should be strictified] point 1) | isId v , not $ isZeroBitTy ty -- Unlifted things don't need special measures to be treated as cbv -- See Note [Which Ids should be strictified] point 2) , mightBeLiftedType ty -- Functions sometimes get a zero tag so we can't eliminate the tag check. -- See Note [TagInfo of functions] in InferTags. -- See Note [Which Ids should be strictified] point 5) , not $ isFunTy ty -- If the var is strict already a seq is redundant. -- See Note [Which Ids should be strictified] point 4) , not (isStrictDmd dmd) || cbv_for_strict -- If the var is absent a seq is almost always useless. -- See Note [Which Ids should be strictified] point 3) , not (isAbsDmd dmd) = True | otherwise = False where ty = idType v dmd = idDemandInfo v {- ********************************************************************* * * unsafeEqualityProof * * ********************************************************************* -} isUnsafeEqualityCase :: CoreExpr -> Id -> [CoreAlt] -> Maybe CoreExpr -- See (U3) and (U4) in -- Note [Implementing unsafeCoerce] in base:Unsafe.Coerce isUnsafeEqualityCase scrut bndr alts | [Alt ac _ rhs] <- alts , DataAlt dc <- ac , dc `hasKey` unsafeReflDataConKey , isDeadBinder bndr -- We can only discard the case if the case-binder is dead -- It usually is, but see #18227 , Var v `App` _ `App` _ `App` _ <- scrut , v `hasKey` unsafeEqualityProofIdKey -- Check that the scrutinee really is unsafeEqualityProof -- and not, say, error = Just rhs | otherwise = Nothing ghc-lib-parser-9.12.2.20250421/compiler/GHC/CoreToIface.hs0000644000000000000000000007510307346545000020373 0ustar0000000000000000{-# LANGUAGE Strict #-} -- See Note [Avoiding space leaks in toIface*] -- | Functions for converting Core things to interface file things. module GHC.CoreToIface ( -- * Binders toIfaceTvBndr , toIfaceTvBndrs , toIfaceIdBndr , toIfaceBndr , toIfaceTopBndr , toIfaceForAllBndr , toIfaceForAllBndrs , toIfaceTyVar -- * Types , toIfaceType, toIfaceTypeX , toIfaceKind , toIfaceTcArgs , toIfaceTyCon , toIfaceTyCon_name , toIfaceTyLit -- * Tidying types , tidyToIfaceType , tidyToIfaceContext , tidyToIfaceTcArgs -- * Coercions , toIfaceCoercion, toIfaceCoercionX -- * Pattern synonyms , patSynToIfaceDecl -- * Expressions , toIfaceExpr , toIfaceBang , toIfaceSrcBang , toIfaceLetBndr , toIfaceIdDetails , toIfaceIdInfo , toIfUnfolding , toIfaceTickish , toIfaceBind , toIfaceTopBind , toIfaceAlt , toIfaceCon , toIfaceApp , toIfaceVar -- * Other stuff , toIfaceLFInfo -- * CgBreakInfo , dehydrateCgBreakInfo ) where import GHC.Prelude import GHC.StgToCmm.Types import GHC.ByteCode.Types import GHC.Core import GHC.Core.TyCon hiding ( pprPromotionQuote ) import GHC.Core.Coercion.Axiom import GHC.Core.DataCon import GHC.Core.Type import GHC.Core.Multiplicity import GHC.Core.PatSyn import GHC.Core.TyCo.Rep import GHC.Core.TyCo.Compare( eqType ) import GHC.Core.TyCo.Tidy ( tidyCo ) import GHC.Builtin.Types.Prim ( eqPrimTyCon, eqReprPrimTyCon ) import GHC.Builtin.Types ( heqTyCon ) import GHC.Iface.Syntax import GHC.Data.FastString import GHC.Types.Id import GHC.Types.Id.Info import GHC.Types.Id.Make ( noinlineIdName, noinlineConstraintIdName ) import GHC.Types.Literal import GHC.Types.Name import GHC.Types.Basic import GHC.Types.Var import GHC.Types.Var.Env import GHC.Types.Var.Set import GHC.Types.Tickish import GHC.Types.Demand ( isNopSig ) import GHC.Types.Cpr ( topCprSig ) import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Utils.Misc import Data.Maybe ( isNothing, catMaybes ) {- Note [Avoiding space leaks in toIface*] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Building a interface file depends on the output of the simplifier. If we build these lazily this would mean keeping the Core AST alive much longer than necessary causing a space "leak". This happens for example when we only write the interface file to disk after code gen has run, in which case we might carry megabytes of core AST in the heap which is no longer needed. We avoid this in two ways. * First we use -XStrict in GHC.CoreToIface which avoids many thunks to begin with. * Second we define NFData instance for Iface syntax and use them to force any remaining thunks. -XStrict is not sufficient as patterns of the form `f (g x)` would still result in a thunk being allocated for `g x`. NFData is sufficient for the space leak, but using -XStrict reduces allocation by ~0.1% when compiling with -O. (nofib/spectral/simple, T10370). It's essentially free performance hence we use -XStrict on top of NFData. MR !1633 on gitlab, has more discussion on the topic. -} ---------------- toIfaceTvBndr :: TyVar -> IfaceTvBndr toIfaceTvBndr = toIfaceTvBndrX emptyVarSet toIfaceTvBndrX :: VarSet -> TyVar -> IfaceTvBndr toIfaceTvBndrX fr tyvar = ( mkIfLclName (occNameFS (getOccName tyvar)) , toIfaceTypeX fr (tyVarKind tyvar) ) toIfaceTvBndrs :: [TyVar] -> [IfaceTvBndr] toIfaceTvBndrs = map toIfaceTvBndr toIfaceIdBndr :: Id -> IfaceIdBndr toIfaceIdBndr = toIfaceIdBndrX emptyVarSet toIfaceIdBndrX :: VarSet -> CoVar -> IfaceIdBndr toIfaceIdBndrX fr covar = ( toIfaceType (idMult covar) , mkIfLclName (occNameFS (getOccName covar)) , toIfaceTypeX fr (varType covar) ) toIfaceBndr :: Var -> IfaceBndr toIfaceBndr var | isId var = IfaceIdBndr (toIfaceIdBndr var) | otherwise = IfaceTvBndr (toIfaceTvBndr var) toIfaceBndrX :: VarSet -> Var -> IfaceBndr toIfaceBndrX fr var | isId var = IfaceIdBndr (toIfaceIdBndrX fr var) | otherwise = IfaceTvBndr (toIfaceTvBndrX fr var) toIfaceForAllBndrs :: [VarBndr TyCoVar vis] -> [VarBndr IfaceBndr vis] toIfaceForAllBndrs = map toIfaceForAllBndr toIfaceForAllBndr :: VarBndr TyCoVar flag -> VarBndr IfaceBndr flag toIfaceForAllBndr = toIfaceForAllBndrX emptyVarSet toIfaceForAllBndrX :: VarSet -> (VarBndr TyCoVar flag) -> (VarBndr IfaceBndr flag) toIfaceForAllBndrX fr (Bndr v vis) = Bndr (toIfaceBndrX fr v) vis {- ************************************************************************ * * Conversion from Type to IfaceType * * ************************************************************************ -} toIfaceKind :: Type -> IfaceType toIfaceKind = toIfaceType --------------------- toIfaceType :: Type -> IfaceType toIfaceType = toIfaceTypeX emptyVarSet toIfaceTypeX :: VarSet -> Type -> IfaceType -- (toIfaceTypeX free ty) -- translates the tyvars in 'free' as IfaceFreeTyVars -- -- Synonyms are retained in the interface type toIfaceTypeX fr (TyVarTy tv) -- See Note [Free TyVars and CoVars in IfaceType] in GHC.Iface.Type | tv `elemVarSet` fr = IfaceFreeTyVar tv | otherwise = IfaceTyVar (toIfaceTyVar tv) toIfaceTypeX fr ty@(AppTy {}) = -- Flatten as many argument AppTys as possible, then turn them into an -- IfaceAppArgs list. -- See Note [Suppressing invisible arguments] in GHC.Iface.Type. let (head, args) = splitAppTys ty in IfaceAppTy (toIfaceTypeX fr head) (toIfaceAppTyArgsX fr head args) toIfaceTypeX _ (LitTy n) = IfaceLitTy (toIfaceTyLit n) toIfaceTypeX fr (ForAllTy b t) = IfaceForAllTy (toIfaceForAllBndrX fr b) (toIfaceTypeX (fr `delVarSet` binderVar b) t) toIfaceTypeX fr (FunTy { ft_arg = t1, ft_mult = w, ft_res = t2, ft_af = af }) = IfaceFunTy af (toIfaceTypeX fr w) (toIfaceTypeX fr t1) (toIfaceTypeX fr t2) toIfaceTypeX fr (CastTy ty co) = IfaceCastTy (toIfaceTypeX fr ty) (toIfaceCoercionX fr co) toIfaceTypeX fr (CoercionTy co) = IfaceCoercionTy (toIfaceCoercionX fr co) toIfaceTypeX fr (TyConApp tc tys) -- tuples | Just sort <- tyConTuple_maybe tc , n_tys == arity = IfaceTupleTy sort NotPromoted (toIfaceTcArgsX fr tc tys) | Just dc <- isPromotedDataCon_maybe tc , isBoxedTupleDataCon dc , n_tys == 2*arity = IfaceTupleTy BoxedTuple IsPromoted (toIfaceTcArgsX fr tc (drop arity tys)) | tc `elem` [ eqPrimTyCon, eqReprPrimTyCon, heqTyCon ] , (k1:k2:_) <- tys = let info = mkIfaceTyConInfo NotPromoted sort sort | k1 `eqType` k2 = IfaceEqualityTyCon | otherwise = IfaceNormalTyCon in IfaceTyConApp (IfaceTyCon (tyConName tc) info) (toIfaceTcArgsX fr tc tys) -- other applications | otherwise = IfaceTyConApp (toIfaceTyCon tc) (toIfaceTcArgsX fr tc tys) where arity = tyConArity tc n_tys = length tys toIfaceTyVar :: TyVar -> IfLclName toIfaceTyVar = mkIfLclName . occNameFS . getOccName toIfaceCoVar :: CoVar -> IfLclName toIfaceCoVar = mkIfLclName . occNameFS . getOccName ---------------- toIfaceTyCon :: TyCon -> IfaceTyCon toIfaceTyCon tc = IfaceTyCon tc_name info where tc_name = tyConName tc info = mkIfaceTyConInfo promoted sort promoted | isDataKindsPromotedDataCon tc = IsPromoted | otherwise = NotPromoted tupleSort :: TyCon -> Maybe IfaceTyConSort tupleSort tc' = case tyConTuple_maybe tc' of Just UnboxedTuple -> let arity = tyConArity tc' `div` 2 in Just $ IfaceTupleTyCon arity UnboxedTuple Just sort -> let arity = tyConArity tc' in Just $ IfaceTupleTyCon arity sort Nothing -> Nothing sort | Just tsort <- tupleSort tc = tsort | Just dcon <- isPromotedDataCon_maybe tc , let tc' = dataConTyCon dcon , Just tsort <- tupleSort tc' = tsort | isUnboxedSumTyCon tc , Just cons <- tyConDataCons_maybe tc = IfaceSumTyCon (length cons) | otherwise = IfaceNormalTyCon toIfaceTyCon_name :: Name -> IfaceTyCon toIfaceTyCon_name n = IfaceTyCon n info where info = mkIfaceTyConInfo NotPromoted IfaceNormalTyCon -- Used for the "rough-match" tycon stuff, -- where pretty-printing is not an issue toIfaceTyLit :: TyLit -> IfaceTyLit toIfaceTyLit (NumTyLit x) = IfaceNumTyLit x toIfaceTyLit (StrTyLit x) = IfaceStrTyLit (LexicalFastString x) toIfaceTyLit (CharTyLit x) = IfaceCharTyLit x ---------------- toIfaceCoercion :: Coercion -> IfaceCoercion toIfaceCoercion = toIfaceCoercionX emptyVarSet toIfaceCoercionX :: VarSet -> Coercion -> IfaceCoercion -- (toIfaceCoercionX free ty) -- translates the tyvars in 'free' as IfaceFreeTyVars toIfaceCoercionX fr co = go co where go_mco MRefl = IfaceMRefl go_mco (MCo co) = IfaceMCo $ go co go (Refl ty) = IfaceReflCo (toIfaceTypeX fr ty) go (GRefl r ty mco) = IfaceGReflCo r (toIfaceTypeX fr ty) (go_mco mco) go (CoVarCo cv) -- See Note [Free TyVars and CoVars in IfaceType] in GHC.Iface.Type | cv `elemVarSet` fr = IfaceFreeCoVar cv | otherwise = IfaceCoVarCo (toIfaceCoVar cv) go (HoleCo h) = IfaceHoleCo (coHoleCoVar h) go (AppCo co1 co2) = IfaceAppCo (go co1) (go co2) go (SymCo co) = IfaceSymCo (go co) go (TransCo co1 co2) = IfaceTransCo (go co1) (go co2) go (SelCo d co) = IfaceSelCo d (go co) go (LRCo lr co) = IfaceLRCo lr (go co) go (InstCo co arg) = IfaceInstCo (go co) (go arg) go (KindCo c) = IfaceKindCo (go c) go (SubCo co) = IfaceSubCo (go co) go (AxiomCo ax cs) = IfaceAxiomCo (toIfaceAxiomRule ax) (map go cs) go (UnivCo { uco_prov = p, uco_role = r, uco_lty = t1, uco_rty = t2, uco_deps = deps }) = IfaceUnivCo p r (toIfaceTypeX fr t1) (toIfaceTypeX fr t2) (map go deps) go co@(TyConAppCo r tc cos) = assertPpr (isNothing (tyConAppFunCo_maybe r tc cos)) (ppr co) $ IfaceTyConAppCo r (toIfaceTyCon tc) (map go cos) go (FunCo { fco_role = r, fco_mult = w, fco_arg = co1, fco_res = co2 }) = IfaceFunCo r (go w) (go co1) (go co2) go (ForAllCo tv visL visR k co) = IfaceForAllCo (toIfaceBndr tv) visL visR (toIfaceCoercionX fr' k) (toIfaceCoercionX fr' co) where fr' = fr `delVarSet` tv toIfaceAxiomRule :: CoAxiomRule -> IfaceAxiomRule toIfaceAxiomRule (BuiltInFamRew bif) = IfaceAR_X (mkIfLclName (bifrw_name bif)) toIfaceAxiomRule (BuiltInFamInj bif) = IfaceAR_X (mkIfLclName (bifinj_name bif)) toIfaceAxiomRule (BranchedAxiom ax i) = IfaceAR_B (coAxiomName ax) i toIfaceAxiomRule (UnbranchedAxiom ax) = IfaceAR_U (coAxiomName ax) toIfaceTcArgs :: TyCon -> [Type] -> IfaceAppArgs toIfaceTcArgs = toIfaceTcArgsX emptyVarSet toIfaceTcArgsX :: VarSet -> TyCon -> [Type] -> IfaceAppArgs toIfaceTcArgsX fr tc ty_args = toIfaceAppArgsX fr (tyConKind tc) ty_args toIfaceAppTyArgsX :: VarSet -> Type -> [Type] -> IfaceAppArgs toIfaceAppTyArgsX fr ty ty_args = toIfaceAppArgsX fr (typeKind ty) ty_args toIfaceAppArgsX :: VarSet -> Kind -> [Type] -> IfaceAppArgs -- See Note [Suppressing invisible arguments] in GHC.Iface.Type -- We produce a result list of args describing visibility -- The awkward case is -- T :: forall k. * -> k -- And consider -- T (forall j. blah) * blib -- Is 'blib' visible? It depends on the visibility flag on j, -- so we have to substitute for k. Annoying! toIfaceAppArgsX fr kind ty_args | null ty_args = IA_Nil | otherwise = go (mkEmptySubst in_scope) kind ty_args where in_scope = mkInScopeSet (tyCoVarsOfTypes ty_args) go _ _ [] = IA_Nil go env ty ts | Just ty' <- coreView ty = go env ty' ts go env (ForAllTy (Bndr tv vis) res) (t:ts) = IA_Arg t' vis ts' where t' = toIfaceTypeX fr t ts' = go (extendTCvSubst env tv t) res ts go env (FunTy { ft_af = af, ft_res = res }) (t:ts) = assert (isVisibleFunArg af) IA_Arg (toIfaceTypeX fr t) Required (go env res ts) go env ty ts@(t1:ts1) | not (isEmptyTCvSubst env) = go (zapSubst env) (substTy env ty) ts -- See Note [Care with kind instantiation] in GHC.Core.Type | otherwise = -- There's a kind error in the type we are trying to print -- e.g. kind = k, ty_args = [Int] -- This is probably a compiler bug, so we print a trace and -- carry on as if it were FunTy. Without the test for -- isEmptyTCvSubst we'd get an infinite loop (#15473) warnPprTrace True "toIfaceAppArgsX" (ppr kind $$ ppr ty_args) $ IA_Arg (toIfaceTypeX fr t1) Required (go env ty ts1) tidyToIfaceType :: TidyEnv -> Type -> IfaceType tidyToIfaceType env ty = toIfaceType (tidyType env ty) tidyToIfaceTcArgs :: TidyEnv -> TyCon -> [Type] -> IfaceAppArgs tidyToIfaceTcArgs env tc tys = toIfaceTcArgs tc (tidyTypes env tys) tidyToIfaceContext :: TidyEnv -> ThetaType -> IfaceContext tidyToIfaceContext env theta = map (tidyToIfaceType env) theta {- ************************************************************************ * * Conversion of pattern synonyms * * ************************************************************************ -} patSynToIfaceDecl :: PatSyn -> IfaceDecl patSynToIfaceDecl ps = IfacePatSyn { ifName = getName $ ps , ifPatMatcher = to_if_pr (patSynMatcher ps) , ifPatBuilder = fmap to_if_pr (patSynBuilder ps) , ifPatIsInfix = patSynIsInfix ps , ifPatUnivBndrs = map toIfaceForAllBndr univ_bndrs' , ifPatExBndrs = map toIfaceForAllBndr ex_bndrs' , ifPatProvCtxt = tidyToIfaceContext env2 prov_theta , ifPatReqCtxt = tidyToIfaceContext env2 req_theta , ifPatArgs = map (tidyToIfaceType env2 . scaledThing) args , ifPatTy = tidyToIfaceType env2 rhs_ty , ifFieldLabels = (patSynFieldLabels ps) } where (_univ_tvs, req_theta, _ex_tvs, prov_theta, args, rhs_ty) = patSynSig ps univ_bndrs = patSynUnivTyVarBinders ps ex_bndrs = patSynExTyVarBinders ps (env1, univ_bndrs') = tidyForAllTyBinders emptyTidyEnv univ_bndrs (env2, ex_bndrs') = tidyForAllTyBinders env1 ex_bndrs to_if_pr (name, _type, needs_dummy) = (name, needs_dummy) {- ************************************************************************ * * Conversion of other things * * ************************************************************************ -} toIfaceBang :: TidyEnv -> HsImplBang -> IfaceBang toIfaceBang _ HsLazy = IfNoBang toIfaceBang _ (HsUnpack Nothing) = IfUnpack toIfaceBang env (HsUnpack (Just co)) = IfUnpackCo (toIfaceCoercion (tidyCo env co)) toIfaceBang _ (HsStrict _) = IfStrict toIfaceSrcBang :: HsSrcBang -> IfaceSrcBang toIfaceSrcBang (HsSrcBang _ (HsBang unpk bang)) = IfSrcBang unpk bang toIfaceLetBndr :: Id -> IfaceLetBndr toIfaceLetBndr id = IfLetBndr (mkIfLclName (occNameFS (getOccName id))) (toIfaceType (idType id)) (toIfaceIdInfo (idInfo id)) (idJoinPointHood id) -- Put into the interface file any IdInfo that GHC.Core.Tidy.tidyLetBndr -- has left on the Id. See Note [IdInfo on nested let-bindings] in GHC.Iface.Syntax toIfaceTopBndr :: Id -> IfaceTopBndrInfo toIfaceTopBndr id = if isExternalName name then IfGblTopBndr name else IfLclTopBndr (mkIfLclName (occNameFS (getOccName id))) (toIfaceType (idType id)) (toIfaceIdInfo (idInfo id)) (toIfaceIdDetails (idDetails id)) where name = getName id toIfaceIdDetails :: IdDetails -> IfaceIdDetails toIfaceIdDetails VanillaId = IfVanillaId toIfaceIdDetails (WorkerLikeId dmds) = IfWorkerLikeId dmds toIfaceIdDetails (DFunId {}) = IfDFunId toIfaceIdDetails (RecSelId { sel_naughty = n , sel_tycon = tc , sel_fieldLabel = fl }) = let (iface, first_con) = case tc of RecSelData ty_con -> ( Left (toIfaceTyCon ty_con), dataConName $ head $ tyConDataCons ty_con) RecSelPatSyn pat_syn -> ( Right (patSynToIfaceDecl pat_syn), patSynName pat_syn) in IfRecSelId iface first_con n fl -- The remaining cases are all "implicit Ids" which don't -- appear in interface files at all toIfaceIdDetails other = pprTrace "toIfaceIdDetails" (ppr other) IfVanillaId -- Unexpected; the other toIfaceIdInfo :: IdInfo -> IfaceIdInfo toIfaceIdInfo id_info = catMaybes [arity_hsinfo, caf_hsinfo, strict_hsinfo, cpr_hsinfo, inline_hsinfo, unfold_hsinfo] -- NB: strictness and arity must appear in the list before unfolding -- See GHC.IfaceToCore.tcUnfolding where ------------ Arity -------------- arity_info = arityInfo id_info arity_hsinfo | arity_info == 0 = Nothing | otherwise = Just (HsArity arity_info) ------------ Caf Info -------------- caf_info = cafInfo id_info caf_hsinfo = case caf_info of NoCafRefs -> Just HsNoCafRefs _other -> Nothing ------------ Strictness -------------- -- No point in explicitly exporting TopSig sig_info = dmdSigInfo id_info strict_hsinfo | not (isNopSig sig_info) = Just (HsDmdSig sig_info) | otherwise = Nothing ------------ CPR -------------- cpr_info = cprSigInfo id_info cpr_hsinfo | cpr_info /= topCprSig = Just (HsCprSig cpr_info) | otherwise = Nothing ------------ Unfolding -------------- unfold_hsinfo = toIfUnfolding loop_breaker (realUnfoldingInfo id_info) loop_breaker = isStrongLoopBreaker (occInfo id_info) ------------ Inline prag -------------- inline_prag = inlinePragInfo id_info inline_hsinfo | isDefaultInlinePragma inline_prag = Nothing | otherwise = Just (HsInline inline_prag) -------------------------- toIfUnfolding :: Bool -> Unfolding -> Maybe IfaceInfoItem toIfUnfolding lb (CoreUnfolding { uf_tmpl = rhs , uf_src = src , uf_cache = cache , uf_guidance = guidance }) = Just $ HsUnfold lb $ IfCoreUnfold src cache (toIfGuidance src guidance) (toIfaceExpr rhs) -- Yes, even if guidance is UnfNever, expose the unfolding -- If we didn't want to expose the unfolding, GHC.Iface.Tidy would -- have stuck in NoUnfolding. For supercompilation we want -- to see that unfolding! toIfUnfolding lb (DFunUnfolding { df_bndrs = bndrs, df_args = args }) = Just (HsUnfold lb (IfDFunUnfold (map toIfaceBndr bndrs) (map toIfaceExpr args))) -- No need to serialise the data constructor; -- we can recover it from the type of the dfun toIfUnfolding _ (OtherCon {}) = Nothing -- The binding site of an Id doesn't have OtherCon, except perhaps -- where we have called trimUnfolding; and that evald'ness info is -- not needed by importing modules toIfUnfolding _ BootUnfolding = Nothing -- Can't happen; we only have BootUnfolding for imported binders toIfUnfolding _ NoUnfolding = Nothing toIfGuidance :: UnfoldingSource -> UnfoldingGuidance -> IfGuidance toIfGuidance src guidance | UnfWhen arity unsat_ok boring_ok <- guidance , isStableSource src = IfWhen arity unsat_ok boring_ok | otherwise = IfNoGuidance {- ************************************************************************ * * Conversion of expressions * * ************************************************************************ -} toIfaceExpr :: CoreExpr -> IfaceExpr toIfaceExpr (Var v) = toIfaceVar v toIfaceExpr (Lit (LitRubbish tc r)) = IfaceLitRubbish tc (toIfaceType r) toIfaceExpr (Lit l) = IfaceLit l toIfaceExpr (Type ty) = IfaceType (toIfaceType ty) toIfaceExpr (Coercion co) = IfaceCo (toIfaceCoercion co) toIfaceExpr (Lam x b) = IfaceLam (toIfaceBndr x, toIfaceOneShot x) (toIfaceExpr b) toIfaceExpr (App f a) = toIfaceApp f [a] toIfaceExpr (Case s x ty as) | null as = IfaceECase (toIfaceExpr s) (toIfaceType ty) | otherwise = IfaceCase (toIfaceExpr s) (mkIfLclName (getOccFS x)) (map toIfaceAlt as) toIfaceExpr (Let b e) = IfaceLet (toIfaceBind b) (toIfaceExpr e) toIfaceExpr (Cast e co) = IfaceCast (toIfaceExpr e) (toIfaceCoercion co) toIfaceExpr (Tick t e) = IfaceTick (toIfaceTickish t) (toIfaceExpr e) toIfaceOneShot :: Id -> IfaceOneShot toIfaceOneShot id | isId id , OneShotLam <- oneShotInfo (idInfo id) = IfaceOneShot | otherwise = IfaceNoOneShot --------------------- toIfaceTickish :: CoreTickish -> IfaceTickish toIfaceTickish (ProfNote cc tick push) = IfaceSCC cc tick push toIfaceTickish (HpcTick modl ix) = IfaceHpcTick modl ix toIfaceTickish (SourceNote src (LexicalFastString names)) = IfaceSource src names toIfaceTickish (Breakpoint _ ix fv m) = IfaceBreakpoint ix (toIfaceVar <$> fv) m --------------------- toIfaceBind :: Bind Id -> IfaceBinding IfaceLetBndr toIfaceBind (NonRec b r) = IfaceNonRec (toIfaceLetBndr b) (toIfaceExpr r) toIfaceBind (Rec prs) = IfaceRec [(toIfaceLetBndr b, toIfaceExpr r) | (b,r) <- prs] toIfaceTopBind :: Bind Id -> IfaceBindingX IfaceMaybeRhs IfaceTopBndrInfo toIfaceTopBind b = case b of NonRec b r -> uncurry IfaceNonRec (do_one (b, r)) Rec prs -> IfaceRec (map do_one prs) where do_one (b, rhs) = let top_bndr = toIfaceTopBndr b rhs' = case top_bndr of -- Use the existing unfolding for a global binder if we store that anyway. -- See Note [Interface File with Core: Sharing RHSs] IfGblTopBndr {} -> if already_has_unfolding b then IfUseUnfoldingRhs else IfRhs (toIfaceExpr rhs) -- Local binders will have had unfoldings trimmed so have -- to serialise the whole RHS. IfLclTopBndr {} -> IfRhs (toIfaceExpr rhs) in (top_bndr, rhs') -- The sharing behaviour is currently disabled due to #22807, and relies on -- finished #20056 to be re-enabled. disabledDueTo22807 = True already_has_unfolding b = not disabledDueTo22807 && -- The identifier has an unfolding, which we are going to serialise anyway hasCoreUnfolding (realIdUnfolding b) -- But not a stable unfolding, we want the optimised unfoldings. && not (isStableUnfolding (realIdUnfolding b)) --------------------- toIfaceAlt :: CoreAlt -> IfaceAlt toIfaceAlt (Alt c bs r) = IfaceAlt (toIfaceCon c) (map (mkIfLclName . getOccFS) bs) (toIfaceExpr r) --------------------- toIfaceCon :: AltCon -> IfaceConAlt toIfaceCon (DataAlt dc) = IfaceDataAlt (getName dc) toIfaceCon (LitAlt l) = assertPpr (not (isLitRubbish l)) (ppr l) $ -- assert: see Note [Rubbish literals] wrinkle (b) IfaceLitAlt l toIfaceCon DEFAULT = IfaceDefaultAlt --------------------- toIfaceApp :: Expr CoreBndr -> [Arg CoreBndr] -> IfaceExpr toIfaceApp (App f a) as = toIfaceApp f (a:as) toIfaceApp (Var v) as = case isDataConWorkId_maybe v of -- We convert the *worker* for tuples into IfaceTuples Just dc | saturated , Just tup_sort <- tyConTuple_maybe tc -> IfaceTuple tup_sort tup_args where val_args = dropWhile isTypeArg as saturated = val_args `lengthIs` idArity v tup_args = map toIfaceExpr val_args tc = dataConTyCon dc _ -> mkIfaceApps (toIfaceVar v) as toIfaceApp e as = mkIfaceApps (toIfaceExpr e) as mkIfaceApps :: IfaceExpr -> [CoreExpr] -> IfaceExpr mkIfaceApps f as = foldl' (\f a -> IfaceApp f (toIfaceExpr a)) f as --------------------- toIfaceVar :: Id -> IfaceExpr toIfaceVar v | isBootUnfolding (idUnfolding v) = -- See Note [Inlining and hs-boot files] IfaceApp (IfaceApp (IfaceExt noinline_id) (IfaceType (toIfaceType ty))) (IfaceExt name) -- don't use mkIfaceApps, or infinite loop | Just fcall <- isFCallId_maybe v = IfaceFCall fcall (toIfaceType (idType v)) -- Foreign calls have special syntax | isExternalName name = IfaceExt name | otherwise = IfaceLcl (mkIfLclName (occNameFS $ nameOccName name)) where name = idName v ty = idType v noinline_id | isConstraintKind (typeKind ty) = noinlineConstraintIdName | otherwise = noinlineIdName --------------------- toIfaceLFInfo :: Name -> LambdaFormInfo -> IfaceLFInfo toIfaceLFInfo nm lfi = case lfi of LFReEntrant top_lvl arity no_fvs _arg_descr -> -- Exported LFReEntrant closures are top level, and top-level closures -- don't have free variables assertPpr (isTopLevel top_lvl) (ppr nm) $ assertPpr no_fvs (ppr nm) $ IfLFReEntrant arity LFThunk top_lvl no_fvs updatable sfi mb_fun -> -- Exported LFThunk closures are top level (which don't have free -- variables) and non-standard (see cgTopRhsClosure) assertPpr (isTopLevel top_lvl) (ppr nm) $ assertPpr no_fvs (ppr nm) $ assertPpr (sfi == NonStandardThunk) (ppr nm) $ IfLFThunk updatable mb_fun LFCon dc -> IfLFCon (dataConName dc) LFUnknown mb_fun -> IfLFUnknown mb_fun LFUnlifted -> IfLFUnlifted LFLetNoEscape -> panic "toIfaceLFInfo: LFLetNoEscape" -- Dehydrating CgBreakInfo dehydrateCgBreakInfo :: [TyVar] -> [Maybe (Id, Word)] -> Type -> CgBreakInfo dehydrateCgBreakInfo ty_vars idOffSets tick_ty = CgBreakInfo { cgb_tyvars = map toIfaceTvBndr ty_vars , cgb_vars = map (fmap (\(i, offset) -> (toIfaceIdBndr i, offset))) idOffSets , cgb_resty = toIfaceType tick_ty } {- Note [Inlining and hs-boot files] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider this example (#10083, #12789): ---------- RSR.hs-boot ------------ module RSR where data RSR eqRSR :: RSR -> RSR -> Bool ---------- SR.hs ------------ module SR where import {-# SOURCE #-} RSR data SR = MkSR RSR eqSR (MkSR r1) (MkSR r2) = eqRSR r1 r2 ---------- RSR.hs ------------ module RSR where import SR data RSR = MkRSR SR -- deriving( Eq ) eqRSR (MkRSR s1) (MkRSR s2) = (eqSR s1 s2) foo x y = not (eqRSR x y) When compiling RSR we get this code RSR.eqRSR :: RSR -> RSR -> Bool RSR.eqRSR = \ (ds1 :: RSR.RSR) (ds2 :: RSR.RSR) -> case ds1 of _ { RSR.MkRSR s1 -> case ds2 of _ { RSR.MkRSR s2 -> SR.eqSR s1 s2 }} RSR.foo :: RSR -> RSR -> Bool RSR.foo = \ (x :: RSR) (y :: RSR) -> not (RSR.eqRSR x y) Now, when optimising foo: Inline eqRSR (small, non-rec) Inline eqSR (small, non-rec) but the result of inlining eqSR from SR is another call to eqRSR, so everything repeats. Neither eqSR nor eqRSR are (apparently) loop breakers. Solution: in the unfolding of eqSR in SR.hi, replace `eqRSR` in SR with `noinline eqRSR`, so that eqRSR doesn't get inlined. This means that when GHC inlines `eqSR`, it will not also inline `eqRSR`, exactly as would have been the case if `foo` had been defined in SR.hs (and marked as a loop-breaker). But how do we arrange for this to happen? There are two ingredients: 1. When we serialize out unfoldings to IfaceExprs (toIfaceVar), for every variable reference we see if we are referring to an 'Id' that came from an hs-boot file. If so, we add a `noinline` to the reference. See Note [noinlineId magic] in GHC.Types.Id.Make 2. But how do we know if a reference came from an hs-boot file or not? We could record this directly in the 'IdInfo', but actually we deduce this by looking at the unfolding: 'Id's that come from boot files are given a special unfolding (upon typechecking) 'BootUnfolding' which say that there is no unfolding, and the reason is because the 'Id' came from a boot file. Here is a solution that doesn't work: when compiling RSR, add a NOINLINE pragma to every function exported by the boot-file for RSR (if it exists). Doing so makes the bootstrapped GHC itself slower by 8% overall (on #9872a-d, and T1969: the reason is that these NOINLINE'd functions now can't be profitably inlined outside of the hs-boot loop. Note [Interface File with Core: Sharing RHSs] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ IMPORTANT: This optimisation is currently disabled due to #22807, it can be re-enabled once #22056 is implemented. In order to avoid duplicating definitions for bindings which already have unfoldings we do some minor headstands to avoid serialising the RHS of a definition if it has *any* unfolding. * Only global things have unfoldings, because local things have had their unfoldings stripped. * For any global thing which has an unstable unfolding, we just use that. In order to implement this sharing: * When creating the interface, check the criteria above and don't serialise the RHS if such a case. See * When reading an interface, look at the realIdUnfolding, and then the unfoldingTemplate. See `tc_iface_binding` for where this happens. There are two main reasons why the mi_extra_decls field exists rather than shoe-horning all the core bindings 1. mi_extra_decls retains the recursive group structure of the original program which is very convenient as otherwise we would have to do the analysis again when loading the program. 2. There are additional local top-level bindings which don't make it into mi_decls. It's best to keep these separate from mi_decls as mi_decls is used to compute the ABI hash. -} ghc-lib-parser-9.12.2.20250421/compiler/GHC/CoreToIface.hs-boot0000644000000000000000000000141107346545000021323 0ustar0000000000000000module GHC.CoreToIface where import {-# SOURCE #-} GHC.Core.TyCo.Rep ( Type, TyLit, Coercion ) import {-# SOURCE #-} GHC.Iface.Type( IfaceType, IfaceTyCon, IfaceBndr , IfaceCoercion, IfaceTyLit, IfaceAppArgs ) import GHC.Types.Var ( VarBndr, TyCoVar ) import GHC.Types.Var.Env ( TidyEnv ) import GHC.Core.TyCon ( TyCon ) import GHC.Types.Var.Set( VarSet ) -- For GHC.Core.TyCo.Rep toIfaceTypeX :: VarSet -> Type -> IfaceType toIfaceTyLit :: TyLit -> IfaceTyLit toIfaceForAllBndrs :: [VarBndr TyCoVar flag] -> [VarBndr IfaceBndr flag] toIfaceTyCon :: TyCon -> IfaceTyCon toIfaceTcArgs :: TyCon -> [Type] -> IfaceAppArgs toIfaceCoercionX :: VarSet -> Coercion -> IfaceCoercion tidyToIfaceTcArgs :: TidyEnv -> TyCon -> [Type] -> IfaceAppArgs ghc-lib-parser-9.12.2.20250421/compiler/GHC/Data/0000755000000000000000000000000007346545000016557 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Data/Bag.hs0000644000000000000000000003400307346545000017604 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 Bag: an unordered collection with duplicates -} {-# LANGUAGE ScopedTypeVariables, DeriveTraversable, TypeFamilies #-} {-# OPTIONS_GHC -Wno-unrecognised-warning-flags -Wno-x-data-list-nonempty-unzip #-} module GHC.Data.Bag ( Bag, -- abstract type emptyBag, unitBag, unionBags, unionManyBags, mapBag, pprBag, elemBag, lengthBag, filterBag, partitionBag, partitionBagWith, concatBag, catBagMaybes, foldBag, isEmptyBag, isSingletonBag, consBag, snocBag, anyBag, allBag, listToBag, nonEmptyToBag, bagToList, headMaybe, mapAccumBagL, concatMapBag, concatMapBagPair, mapMaybeBag, mapMaybeBagM, unzipBag, mapBagM, mapBagM_, lookupBag, flatMapBagM, flatMapBagPairM, mapAndUnzipBagM, mapAccumBagLM, anyBagM, filterBagM ) where import GHC.Prelude import GHC.Exts ( IsList(..) ) import GHC.Utils.Outputable import GHC.Utils.Misc import GHC.Utils.Monad import Control.Monad import Data.Data import Data.Maybe( mapMaybe ) import Data.List ( partition, mapAccumL ) import Data.List.NonEmpty ( NonEmpty(..) ) import qualified Data.List.NonEmpty as NE import qualified Data.Semigroup ( (<>) ) import Control.Applicative( Alternative( (<|>) ) ) import Control.DeepSeq infixr 3 `consBag` infixl 3 `snocBag` data Bag a = EmptyBag | UnitBag a | TwoBags (Bag a) (Bag a) -- INVARIANT: neither branch is empty | ListBag (NonEmpty a) deriving (Foldable, Functor, Traversable) instance NFData a => NFData (Bag a) where rnf EmptyBag = () rnf (UnitBag a) = rnf a rnf (TwoBags a b) = rnf a `seq` rnf b rnf (ListBag a) = rnf a emptyBag :: Bag a emptyBag = EmptyBag unitBag :: a -> Bag a unitBag = UnitBag lengthBag :: Bag a -> Int lengthBag EmptyBag = 0 lengthBag (UnitBag {}) = 1 lengthBag (TwoBags b1 b2) = lengthBag b1 + lengthBag b2 lengthBag (ListBag xs) = length xs elemBag :: Eq a => a -> Bag a -> Bool elemBag _ EmptyBag = False elemBag x (UnitBag y) = x == y elemBag x (TwoBags b1 b2) = x `elemBag` b1 || x `elemBag` b2 elemBag x (ListBag ys) = any (x ==) ys unionManyBags :: [Bag a] -> Bag a unionManyBags xs = foldr unionBags EmptyBag xs -- This one is a bit stricter! The bag will get completely evaluated. unionBags :: Bag a -> Bag a -> Bag a unionBags EmptyBag b = b unionBags b EmptyBag = b unionBags b1 b2 = TwoBags b1 b2 consBag :: a -> Bag a -> Bag a snocBag :: Bag a -> a -> Bag a consBag elt bag = (unitBag elt) `unionBags` bag snocBag bag elt = bag `unionBags` (unitBag elt) isEmptyBag :: Bag a -> Bool isEmptyBag EmptyBag = True isEmptyBag _ = False isSingletonBag :: Bag a -> Bool isSingletonBag EmptyBag = False isSingletonBag (UnitBag _) = True isSingletonBag (TwoBags _ _) = False -- Neither is empty isSingletonBag (ListBag (_:|xs)) = null xs filterBag :: (a -> Bool) -> Bag a -> Bag a filterBag _ EmptyBag = EmptyBag filterBag pred b@(UnitBag val) = if pred val then b else EmptyBag filterBag pred (TwoBags b1 b2) = sat1 `unionBags` sat2 where sat1 = filterBag pred b1 sat2 = filterBag pred b2 filterBag pred (ListBag vs) = listToBag (filter pred (toList vs)) filterBagM :: Monad m => (a -> m Bool) -> Bag a -> m (Bag a) filterBagM _ EmptyBag = return EmptyBag filterBagM pred b@(UnitBag val) = do flag <- pred val if flag then return b else return EmptyBag filterBagM pred (TwoBags b1 b2) = do sat1 <- filterBagM pred b1 sat2 <- filterBagM pred b2 return (sat1 `unionBags` sat2) filterBagM pred (ListBag vs) = do sat <- filterM pred (toList vs) return (listToBag sat) {-# INLINEABLE filterBagM #-} lookupBag :: Eq a => a -> Bag (a,b) -> Maybe b lookupBag _ EmptyBag = Nothing lookupBag k (UnitBag kv) = lookup_one k kv lookupBag k (TwoBags b1 b2) = lookupBag k b1 <|> lookupBag k b2 lookupBag k (ListBag xs) = foldr ((<|>) . lookup_one k) Nothing xs {-# INLINEABLE lookupBag #-} lookup_one :: Eq a => a -> (a,b) -> Maybe b lookup_one k (k',v) | k==k' = Just v | otherwise = Nothing allBag :: (a -> Bool) -> Bag a -> Bool allBag _ EmptyBag = True allBag p (UnitBag v) = p v allBag p (TwoBags b1 b2) = allBag p b1 && allBag p b2 allBag p (ListBag xs) = all p xs anyBag :: (a -> Bool) -> Bag a -> Bool anyBag _ EmptyBag = False anyBag p (UnitBag v) = p v anyBag p (TwoBags b1 b2) = anyBag p b1 || anyBag p b2 anyBag p (ListBag xs) = any p xs anyBagM :: Monad m => (a -> m Bool) -> Bag a -> m Bool anyBagM _ EmptyBag = return False anyBagM p (UnitBag v) = p v anyBagM p (TwoBags b1 b2) = do flag <- anyBagM p b1 if flag then return True else anyBagM p b2 anyBagM p (ListBag xs) = anyM p xs {-# INLINEABLE anyBagM #-} concatBag :: Bag (Bag a) -> Bag a concatBag = foldr unionBags emptyBag catBagMaybes :: Bag (Maybe a) -> Bag a catBagMaybes bs = foldr add emptyBag bs where add Nothing rs = rs add (Just x) rs = x `consBag` rs partitionBag :: (a -> Bool) -> Bag a -> (Bag a {- Satisfy predicate -}, Bag a {- Don't -}) partitionBag _ EmptyBag = (EmptyBag, EmptyBag) partitionBag pred b@(UnitBag val) = if pred val then (b, EmptyBag) else (EmptyBag, b) partitionBag pred (TwoBags b1 b2) = (sat1 `unionBags` sat2, fail1 `unionBags` fail2) where (sat1, fail1) = partitionBag pred b1 (sat2, fail2) = partitionBag pred b2 partitionBag pred (ListBag vs) = (listToBag sats, listToBag fails) where (sats, fails) = partition pred (toList vs) partitionBagWith :: (a -> Either b c) -> Bag a -> (Bag b {- Left -}, Bag c {- Right -}) partitionBagWith _ EmptyBag = (EmptyBag, EmptyBag) partitionBagWith pred (UnitBag val) = case pred val of Left a -> (UnitBag a, EmptyBag) Right b -> (EmptyBag, UnitBag b) partitionBagWith pred (TwoBags b1 b2) = (sat1 `unionBags` sat2, fail1 `unionBags` fail2) where (sat1, fail1) = partitionBagWith pred b1 (sat2, fail2) = partitionBagWith pred b2 partitionBagWith pred (ListBag vs) = (listToBag sats, listToBag fails) where (sats, fails) = partitionWith pred (toList vs) foldBag :: (r -> r -> r) -- Replace TwoBags with this; should be associative -> (a -> r) -- Replace UnitBag with this -> r -- Replace EmptyBag with this -> Bag a -> r {- Standard definition foldBag t u e EmptyBag = e foldBag t u e (UnitBag x) = u x foldBag t u e (TwoBags b1 b2) = (foldBag t u e b1) `t` (foldBag t u e b2) foldBag t u e (ListBag xs) = foldr (t.u) e xs -} -- More tail-recursive definition, exploiting associativity of "t" foldBag _ _ e EmptyBag = e foldBag t u e (UnitBag x) = u x `t` e foldBag t u e (TwoBags b1 b2) = foldBag t u (foldBag t u e b2) b1 foldBag t u e (ListBag xs) = foldr (t.u) e xs mapBag :: (a -> b) -> Bag a -> Bag b mapBag = fmap concatMapBag :: (a -> Bag b) -> Bag a -> Bag b concatMapBag _ EmptyBag = EmptyBag concatMapBag f (UnitBag x) = f x concatMapBag f (TwoBags b1 b2) = unionBags (concatMapBag f b1) (concatMapBag f b2) concatMapBag f (ListBag xs) = foldr (unionBags . f) emptyBag xs concatMapBagPair :: (a -> (Bag b, Bag c)) -> Bag a -> (Bag b, Bag c) concatMapBagPair _ EmptyBag = (EmptyBag, EmptyBag) concatMapBagPair f (UnitBag x) = f x concatMapBagPair f (TwoBags b1 b2) = (unionBags r1 r2, unionBags s1 s2) where (r1, s1) = concatMapBagPair f b1 (r2, s2) = concatMapBagPair f b2 concatMapBagPair f (ListBag xs) = foldr go (emptyBag, emptyBag) xs where go a (s1, s2) = (unionBags r1 s1, unionBags r2 s2) where (r1, r2) = f a mapMaybeBag :: (a -> Maybe b) -> Bag a -> Bag b mapMaybeBag _ EmptyBag = EmptyBag mapMaybeBag f (UnitBag x) = case f x of Nothing -> EmptyBag Just y -> UnitBag y mapMaybeBag f (TwoBags b1 b2) = unionBags (mapMaybeBag f b1) (mapMaybeBag f b2) mapMaybeBag f (ListBag xs) = listToBag $ mapMaybe f (toList xs) mapMaybeBagM :: Monad m => (a -> m (Maybe b)) -> Bag a -> m (Bag b) mapMaybeBagM _ EmptyBag = return EmptyBag mapMaybeBagM f (UnitBag x) = do r <- f x return $ case r of Nothing -> EmptyBag Just y -> UnitBag y mapMaybeBagM f (TwoBags b1 b2) = do r1 <- mapMaybeBagM f b1 r2 <- mapMaybeBagM f b2 return $ unionBags r1 r2 mapMaybeBagM f (ListBag xs) = listToBag <$> mapMaybeM f (toList xs) mapBagM :: Monad m => (a -> m b) -> Bag a -> m (Bag b) mapBagM _ EmptyBag = return EmptyBag mapBagM f (UnitBag x) = do r <- f x return (UnitBag r) mapBagM f (TwoBags b1 b2) = do r1 <- mapBagM f b1 r2 <- mapBagM f b2 return (TwoBags r1 r2) mapBagM f (ListBag xs) = do rs <- mapM f xs return (ListBag rs) {-# INLINEABLE mapBagM #-} mapBagM_ :: Monad m => (a -> m b) -> Bag a -> m () mapBagM_ _ EmptyBag = return () mapBagM_ f (UnitBag x) = f x >> return () mapBagM_ f (TwoBags b1 b2) = mapBagM_ f b1 >> mapBagM_ f b2 mapBagM_ f (ListBag xs) = mapM_ f xs {-# INLINEABLE mapBagM_ #-} flatMapBagM :: Monad m => (a -> m (Bag b)) -> Bag a -> m (Bag b) flatMapBagM _ EmptyBag = return EmptyBag flatMapBagM f (UnitBag x) = f x flatMapBagM f (TwoBags b1 b2) = do r1 <- flatMapBagM f b1 r2 <- flatMapBagM f b2 return (r1 `unionBags` r2) flatMapBagM f (ListBag xs) = foldrM k EmptyBag xs where k x b2 = do { b1 <- f x; return (b1 `unionBags` b2) } {-# INLINEABLE flatMapBagM #-} flatMapBagPairM :: Monad m => (a -> m (Bag b, Bag c)) -> Bag a -> m (Bag b, Bag c) flatMapBagPairM _ EmptyBag = return (EmptyBag, EmptyBag) flatMapBagPairM f (UnitBag x) = f x flatMapBagPairM f (TwoBags b1 b2) = do (r1,s1) <- flatMapBagPairM f b1 (r2,s2) <- flatMapBagPairM f b2 return (r1 `unionBags` r2, s1 `unionBags` s2) flatMapBagPairM f (ListBag xs) = foldrM k (EmptyBag, EmptyBag) xs where k x (r2,s2) = do { (r1,s1) <- f x ; return (r1 `unionBags` r2, s1 `unionBags` s2) } {-# INLINEABLE flatMapBagPairM #-} mapAndUnzipBagM :: Monad m => (a -> m (b,c)) -> Bag a -> m (Bag b, Bag c) mapAndUnzipBagM _ EmptyBag = return (EmptyBag, EmptyBag) mapAndUnzipBagM f (UnitBag x) = do (r,s) <- f x return (UnitBag r, UnitBag s) mapAndUnzipBagM f (TwoBags b1 b2) = do (r1,s1) <- mapAndUnzipBagM f b1 (r2,s2) <- mapAndUnzipBagM f b2 return (TwoBags r1 r2, TwoBags s1 s2) mapAndUnzipBagM f (ListBag xs) = do ts <- mapM f xs let (rs,ss) = NE.unzip ts return (ListBag rs, ListBag ss) {-# INLINEABLE mapAndUnzipBagM #-} mapAccumBagL ::(acc -> x -> (acc, y)) -- ^ combining function -> acc -- ^ initial state -> Bag x -- ^ inputs -> (acc, Bag y) -- ^ final state, outputs mapAccumBagL _ s EmptyBag = (s, EmptyBag) mapAccumBagL f s (UnitBag x) = let (s1, x1) = f s x in (s1, UnitBag x1) mapAccumBagL f s (TwoBags b1 b2) = let (s1, b1') = mapAccumBagL f s b1 (s2, b2') = mapAccumBagL f s1 b2 in (s2, TwoBags b1' b2') mapAccumBagL f s (ListBag xs) = let (s', xs') = mapAccumL f s xs in (s', ListBag xs') mapAccumBagLM :: Monad m => (acc -> x -> m (acc, y)) -- ^ combining function -> acc -- ^ initial state -> Bag x -- ^ inputs -> m (acc, Bag y) -- ^ final state, outputs mapAccumBagLM _ s EmptyBag = return (s, EmptyBag) mapAccumBagLM f s (UnitBag x) = do { (s1, x1) <- f s x; return (s1, UnitBag x1) } mapAccumBagLM f s (TwoBags b1 b2) = do { (s1, b1') <- mapAccumBagLM f s b1 ; (s2, b2') <- mapAccumBagLM f s1 b2 ; return (s2, TwoBags b1' b2') } mapAccumBagLM f s (ListBag xs) = do { (s', xs') <- mapAccumLM f s xs ; return (s', ListBag xs') } {-# INLINEABLE mapAccumBagLM #-} listToBag :: [a] -> Bag a listToBag [] = EmptyBag listToBag [x] = UnitBag x listToBag (x:xs) = ListBag (x:|xs) nonEmptyToBag :: NonEmpty a -> Bag a nonEmptyToBag (x :| []) = UnitBag x nonEmptyToBag xs = ListBag xs bagToList :: Bag a -> [a] bagToList b = foldr (:) [] b unzipBag :: Bag (a, b) -> (Bag a, Bag b) unzipBag EmptyBag = (EmptyBag, EmptyBag) unzipBag (UnitBag (a, b)) = (UnitBag a, UnitBag b) unzipBag (TwoBags xs1 xs2) = (TwoBags as1 as2, TwoBags bs1 bs2) where (as1, bs1) = unzipBag xs1 (as2, bs2) = unzipBag xs2 unzipBag (ListBag xs) = (ListBag as, ListBag bs) where (as, bs) = NE.unzip xs headMaybe :: Bag a -> Maybe a headMaybe EmptyBag = Nothing headMaybe (UnitBag v) = Just v headMaybe (TwoBags b1 _) = headMaybe b1 headMaybe (ListBag (v:|_)) = Just v instance (Outputable a) => Outputable (Bag a) where ppr = pprBag pprBag :: Outputable a => Bag a -> SDoc pprBag bag = braces (pprWithCommas ppr (bagToList bag)) instance Data a => Data (Bag a) where gfoldl k z b = z listToBag `k` bagToList b -- traverse abstract type abstractly toConstr _ = abstractConstr $ "Bag("++show (typeOf (undefined::a))++")" gunfold _ _ = error "gunfold" dataTypeOf _ = mkNoRepType "Bag" dataCast1 x = gcast1 x instance IsList (Bag a) where type Item (Bag a) = a fromList = listToBag toList = bagToList instance Semigroup (Bag a) where (<>) = unionBags instance Monoid (Bag a) where mempty = emptyBag ghc-lib-parser-9.12.2.20250421/compiler/GHC/Data/Bool.hs0000644000000000000000000000072507346545000020012 0ustar0000000000000000module GHC.Data.Bool ( OverridingBool(..) , overrideWith ) where import GHC.Prelude.Basic data OverridingBool = Auto | Never | Always deriving ( Show , Read -- ^ @since 9.4.1 , Eq -- ^ @since 9.4.1 , Ord -- ^ @since 9.4.1 , Enum -- ^ @since 9.4.1 , Bounded -- ^ @since 9.4.1 ) overrideWith :: Bool -> OverridingBool -> Bool overrideWith b Auto = b overrideWith _ Never = False overrideWith _ Always = True ghc-lib-parser-9.12.2.20250421/compiler/GHC/Data/BooleanFormula.hs0000644000000000000000000002226507346545000022027 0ustar0000000000000000{-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE DeriveTraversable #-} -------------------------------------------------------------------------------- -- | Boolean formulas without quantifiers and without negation. -- Such a formula consists of variables, conjunctions (and), and disjunctions (or). -- -- This module is used to represent minimal complete definitions for classes. -- module GHC.Data.BooleanFormula ( BooleanFormula(..), LBooleanFormula, mkFalse, mkTrue, mkAnd, mkOr, mkVar, isFalse, isTrue, eval, simplify, isUnsatisfied, implies, impliesAtom, pprBooleanFormula, pprBooleanFormulaNice ) where import GHC.Prelude hiding ( init, last ) import Data.List ( nub, intersperse ) import Data.List.NonEmpty ( NonEmpty (..), init, last ) import Data.Data import GHC.Utils.Monad import GHC.Utils.Outputable import GHC.Parser.Annotation ( LocatedL ) import GHC.Types.SrcLoc import GHC.Types.Unique import GHC.Types.Unique.Set ---------------------------------------------------------------------- -- Boolean formula type and smart constructors ---------------------------------------------------------------------- type LBooleanFormula a = LocatedL (BooleanFormula a) data BooleanFormula a = Var a | And [LBooleanFormula a] | Or [LBooleanFormula a] | Parens (LBooleanFormula a) deriving (Eq, Data, Functor, Foldable, Traversable) mkVar :: a -> BooleanFormula a mkVar = Var mkFalse, mkTrue :: BooleanFormula a mkFalse = Or [] mkTrue = And [] -- Convert a Bool to a BooleanFormula mkBool :: Bool -> BooleanFormula a mkBool False = mkFalse mkBool True = mkTrue -- Make a conjunction, and try to simplify mkAnd :: Eq a => [LBooleanFormula a] -> BooleanFormula a mkAnd = maybe mkFalse (mkAnd' . nub) . concatMapM fromAnd where -- See Note [Simplification of BooleanFormulas] fromAnd :: LBooleanFormula a -> Maybe [LBooleanFormula a] fromAnd (L _ (And xs)) = Just xs -- assume that xs are already simplified -- otherwise we would need: fromAnd (And xs) = concat <$> traverse fromAnd xs fromAnd (L _ (Or [])) = Nothing -- in case of False we bail out, And [..,mkFalse,..] == mkFalse fromAnd x = Just [x] mkAnd' [x] = unLoc x mkAnd' xs = And xs mkOr :: Eq a => [LBooleanFormula a] -> BooleanFormula a mkOr = maybe mkTrue (mkOr' . nub) . concatMapM fromOr where -- See Note [Simplification of BooleanFormulas] fromOr (L _ (Or xs)) = Just xs fromOr (L _ (And [])) = Nothing fromOr x = Just [x] mkOr' [x] = unLoc x mkOr' xs = Or xs {- Note [Simplification of BooleanFormulas] ~~~~~~~~~~~~~~~~~~~~~~ The smart constructors (`mkAnd` and `mkOr`) do some attempt to simplify expressions. In particular, 1. Collapsing nested ands and ors, so `(mkAnd [x, And [y,z]]` is represented as `And [x,y,z]` Implemented by `fromAnd`/`fromOr` 2. Collapsing trivial ands and ors, so `mkAnd [x]` becomes just `x`. Implemented by mkAnd' / mkOr' 3. Conjunction with false, disjunction with true is simplified, i.e. `mkAnd [mkFalse,x]` becomes `mkFalse`. 4. Common subexpression elimination: `mkAnd [x,x,y]` is reduced to just `mkAnd [x,y]`. This simplification is not exhaustive, in the sense that it will not produce the smallest possible equivalent expression. For example, `Or [And [x,y], And [x]]` could be simplified to `And [x]`, but it currently is not. A general simplifier would need to use something like BDDs. The reason behind the (crude) simplifier is to make for more user friendly error messages. E.g. for the code > class Foo a where > {-# MINIMAL bar, (foo, baq | foo, quux) #-} > instance Foo Int where > bar = ... > baz = ... > quux = ... We don't show a ridiculous error message like Implement () and (either (`foo' and ()) or (`foo' and ())) -} ---------------------------------------------------------------------- -- Evaluation and simplification ---------------------------------------------------------------------- isFalse :: BooleanFormula a -> Bool isFalse (Or []) = True isFalse _ = False isTrue :: BooleanFormula a -> Bool isTrue (And []) = True isTrue _ = False eval :: (a -> Bool) -> BooleanFormula a -> Bool eval f (Var x) = f x eval f (And xs) = all (eval f . unLoc) xs eval f (Or xs) = any (eval f . unLoc) xs eval f (Parens x) = eval f (unLoc x) -- Simplify a boolean formula. -- The argument function should give the truth of the atoms, or Nothing if undecided. simplify :: Eq a => (a -> Maybe Bool) -> BooleanFormula a -> BooleanFormula a simplify f (Var a) = case f a of Nothing -> Var a Just b -> mkBool b simplify f (And xs) = mkAnd (map (\(L l x) -> L l (simplify f x)) xs) simplify f (Or xs) = mkOr (map (\(L l x) -> L l (simplify f x)) xs) simplify f (Parens x) = simplify f (unLoc x) -- Test if a boolean formula is satisfied when the given values are assigned to the atoms -- if it is, returns Nothing -- if it is not, return (Just remainder) isUnsatisfied :: Eq a => (a -> Bool) -> BooleanFormula a -> Maybe (BooleanFormula a) isUnsatisfied f bf | isTrue bf' = Nothing | otherwise = Just bf' where f' x = if f x then Just True else Nothing bf' = simplify f' bf -- prop_simplify: -- eval f x == True <==> isTrue (simplify (Just . f) x) -- eval f x == False <==> isFalse (simplify (Just . f) x) -- If the boolean formula holds, does that mean that the given atom is always true? impliesAtom :: Eq a => BooleanFormula a -> a -> Bool Var x `impliesAtom` y = x == y And xs `impliesAtom` y = any (\x -> (unLoc x) `impliesAtom` y) xs -- we have all of xs, so one of them implying y is enough Or xs `impliesAtom` y = all (\x -> (unLoc x) `impliesAtom` y) xs Parens x `impliesAtom` y = (unLoc x) `impliesAtom` y implies :: Uniquable a => BooleanFormula a -> BooleanFormula a -> Bool implies e1 e2 = go (Clause emptyUniqSet [e1]) (Clause emptyUniqSet [e2]) where go :: Uniquable a => Clause a -> Clause a -> Bool go l@Clause{ clauseExprs = hyp:hyps } r = case hyp of Var x | memberClauseAtoms x r -> True | otherwise -> go (extendClauseAtoms l x) { clauseExprs = hyps } r Parens hyp' -> go l { clauseExprs = unLoc hyp':hyps } r And hyps' -> go l { clauseExprs = map unLoc hyps' ++ hyps } r Or hyps' -> all (\hyp' -> go l { clauseExprs = unLoc hyp':hyps } r) hyps' go l r@Clause{ clauseExprs = con:cons } = case con of Var x | memberClauseAtoms x l -> True | otherwise -> go l (extendClauseAtoms r x) { clauseExprs = cons } Parens con' -> go l r { clauseExprs = unLoc con':cons } And cons' -> all (\con' -> go l r { clauseExprs = unLoc con':cons }) cons' Or cons' -> go l r { clauseExprs = map unLoc cons' ++ cons } go _ _ = False -- A small sequent calculus proof engine. data Clause a = Clause { clauseAtoms :: UniqSet a, clauseExprs :: [BooleanFormula a] } extendClauseAtoms :: Uniquable a => Clause a -> a -> Clause a extendClauseAtoms c x = c { clauseAtoms = addOneToUniqSet (clauseAtoms c) x } memberClauseAtoms :: Uniquable a => a -> Clause a -> Bool memberClauseAtoms x c = x `elementOfUniqSet` clauseAtoms c ---------------------------------------------------------------------- -- Pretty printing ---------------------------------------------------------------------- -- Pretty print a BooleanFormula, -- using the arguments as pretty printers for Var, And and Or respectively pprBooleanFormula' :: (Rational -> a -> SDoc) -> (Rational -> [SDoc] -> SDoc) -> (Rational -> [SDoc] -> SDoc) -> Rational -> BooleanFormula a -> SDoc pprBooleanFormula' pprVar pprAnd pprOr = go where go p (Var x) = pprVar p x go p (And []) = cparen (p > 0) $ empty go p (And xs) = pprAnd p (map (go 3 . unLoc) xs) go _ (Or []) = keyword $ text "FALSE" go p (Or xs) = pprOr p (map (go 2 . unLoc) xs) go p (Parens x) = go p (unLoc x) -- Pretty print in source syntax, "a | b | c,d,e" pprBooleanFormula :: (Rational -> a -> SDoc) -> Rational -> BooleanFormula a -> SDoc pprBooleanFormula pprVar = pprBooleanFormula' pprVar pprAnd pprOr where pprAnd p = cparen (p > 3) . fsep . punctuate comma pprOr p = cparen (p > 2) . fsep . intersperse vbar -- Pretty print human in readable format, "either `a' or `b' or (`c', `d' and `e')"? pprBooleanFormulaNice :: Outputable a => BooleanFormula a -> SDoc pprBooleanFormulaNice = pprBooleanFormula' pprVar pprAnd pprOr 0 where pprVar _ = quotes . ppr pprAnd p = cparen (p > 1) . pprAnd' pprAnd' [] = empty pprAnd' [x,y] = x <+> text "and" <+> y pprAnd' (x:xs) = fsep (punctuate comma (init (x:|xs))) <> text ", and" <+> last (x:|xs) pprOr p xs = cparen (p > 1) $ text "either" <+> sep (intersperse (text "or") xs) instance (OutputableBndr a) => Outputable (BooleanFormula a) where ppr = pprBooleanFormulaNormal pprBooleanFormulaNormal :: (OutputableBndr a) => BooleanFormula a -> SDoc pprBooleanFormulaNormal = go where go (Var x) = pprPrefixOcc x go (And xs) = fsep $ punctuate comma (map (go . unLoc) xs) go (Or []) = keyword $ text "FALSE" go (Or xs) = fsep $ intersperse vbar (map (go . unLoc) xs) go (Parens x) = parens (go $ unLoc x) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Data/EnumSet.hs0000644000000000000000000000370307346545000020476 0ustar0000000000000000-- | A tiny wrapper around 'IntSet.IntSet' for representing sets of 'Enum' -- things. module GHC.Data.EnumSet ( EnumSet , member , insert , delete , toList , fromList , empty , difference ) where import GHC.Prelude import GHC.Utils.Binary import Control.DeepSeq import qualified Data.IntSet as IntSet newtype EnumSet a = EnumSet IntSet.IntSet deriving (Semigroup, Monoid, NFData) member :: Enum a => a -> EnumSet a -> Bool member x (EnumSet s) = IntSet.member (fromEnum x) s insert :: Enum a => a -> EnumSet a -> EnumSet a insert x (EnumSet s) = EnumSet $ IntSet.insert (fromEnum x) s delete :: Enum a => a -> EnumSet a -> EnumSet a delete x (EnumSet s) = EnumSet $ IntSet.delete (fromEnum x) s toList :: Enum a => EnumSet a -> [a] toList (EnumSet s) = map toEnum $ IntSet.toList s fromList :: Enum a => [a] -> EnumSet a fromList = EnumSet . IntSet.fromList . map fromEnum empty :: EnumSet a empty = EnumSet IntSet.empty difference :: EnumSet a -> EnumSet a -> EnumSet a difference (EnumSet a) (EnumSet b) = EnumSet (IntSet.difference a b) -- | Represents the 'EnumSet' as a bit set. -- -- Assumes that all elements are non-negative. -- -- This is only efficient for values that are sufficiently small, -- for example in the lower hundreds. instance Binary (EnumSet a) where put_ bh = put_ bh . enumSetToBitArray get bh = bitArrayToEnumSet <$> get bh -- TODO: Using 'Natural' instead of 'Integer' should be slightly more efficient -- but we don't currently have a 'Binary' instance for 'Natural'. type BitArray = Integer enumSetToBitArray :: EnumSet a -> BitArray enumSetToBitArray (EnumSet int_set) = IntSet.foldl' setBit 0 int_set bitArrayToEnumSet :: BitArray -> EnumSet a bitArrayToEnumSet ba = EnumSet (go (popCount ba) 0 IntSet.empty) where go 0 _ !int_set = int_set go n i !int_set = if ba `testBit` i then go (pred n) (succ i) (IntSet.insert i int_set) else go n (succ i) int_set ghc-lib-parser-9.12.2.20250421/compiler/GHC/Data/FastMutInt.hs0000644000000000000000000000236407346545000021156 0ustar0000000000000000{-# LANGUAGE MagicHash, UnboxedTuples #-} {-# OPTIONS_GHC -O2 #-} -- We always optimise this, otherwise performance of a non-optimised -- compiler is severely affected -- -- (c) The University of Glasgow 2002-2006 -- -- Unboxed mutable Ints module GHC.Data.FastMutInt( FastMutInt, newFastMutInt, readFastMutInt, writeFastMutInt, atomicFetchAddFastMut ) where import GHC.Prelude.Basic import GHC.Base data FastMutInt = FastMutInt !(MutableByteArray# RealWorld) newFastMutInt :: Int -> IO FastMutInt newFastMutInt n = do x <- create writeFastMutInt x n return x where !(I# size) = finiteBitSize (0 :: Int) `unsafeShiftR` 3 create = IO $ \s -> case newByteArray# size s of (# s, arr #) -> (# s, FastMutInt arr #) readFastMutInt :: FastMutInt -> IO Int readFastMutInt (FastMutInt arr) = IO $ \s -> case readIntArray# arr 0# s of (# s, i #) -> (# s, I# i #) writeFastMutInt :: FastMutInt -> Int -> IO () writeFastMutInt (FastMutInt arr) (I# i) = IO $ \s -> case writeIntArray# arr 0# i s of s -> (# s, () #) atomicFetchAddFastMut :: FastMutInt -> Int -> IO Int atomicFetchAddFastMut (FastMutInt arr) (I# i) = IO $ \s -> case fetchAddIntArray# arr 0# i s of (# s, n #) -> (# s, I# n #) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Data/FastString.hs0000644000000000000000000006214607346545000021210 0ustar0000000000000000{-# LANGUAGE CPP #-} {-# LANGUAGE DerivingStrategies #-} {-# LANGUAGE MagicHash #-} {-# LANGUAGE UnboxedTuples #-} {-# LANGUAGE UnliftedFFITypes #-} {-# OPTIONS_GHC -O2 -funbox-strict-fields #-} -- We always optimise this, otherwise performance of a non-optimised -- compiler is severely affected -- | -- There are two principal string types used internally by GHC: -- -- ['FastString'] -- -- * A compact, hash-consed, representation of character strings. -- * Generated by 'fsLit'. -- * You can get a 'GHC.Types.Unique.Unique' from them. -- * Equality test is O(1) (it uses the Unique). -- * Comparison is O(1) or O(n): -- * O(n) but deterministic with lexical comparison (`lexicalCompareFS`) -- * O(1) but non-deterministic with Unique comparison (`uniqCompareFS`) -- * Turn into 'GHC.Utils.Outputable.SDoc' with 'GHC.Utils.Outputable.ftext'. -- -- ['PtrString'] -- -- * Pointer and size of a Latin-1 encoded string. -- * Practically no operations. -- * Outputting them is fast. -- * Generated by 'mkPtrString#'. -- * Length of string literals (mkPtrString# "abc"#) is computed statically -- * Turn into 'GHC.Utils.Outputable.SDoc' with 'GHC.Utils.Outputable.ptext' -- * Requires manual memory management. -- Improper use may lead to memory leaks or dangling pointers. -- * It assumes Latin-1 as the encoding, therefore it cannot represent -- arbitrary Unicode strings. -- -- Use 'PtrString' unless you want the facilities of 'FastString'. module GHC.Data.FastString ( -- * ByteString bytesFS, fastStringToByteString, mkFastStringByteString, fastZStringToByteString, unsafeMkByteString, -- * ShortByteString fastStringToShortByteString, mkFastStringShortByteString, -- * ShortText fastStringToShortText, -- * FastZString FastZString, hPutFZS, zString, zStringTakeN, lengthFZS, -- * FastStrings FastString(..), -- not abstract, for now. NonDetFastString (..), LexicalFastString (..), -- ** Construction fsLit, mkFastString, mkFastStringBytes, mkFastStringByteList, mkFastString#, -- ** Deconstruction unpackFS, -- :: FastString -> String unconsFS, -- :: FastString -> Maybe (Char, FastString) -- ** Encoding zEncodeFS, -- ** Operations uniqueOfFS, lengthFS, nullFS, appendFS, concatFS, consFS, nilFS, lexicalCompareFS, uniqCompareFS, -- ** Outputting hPutFS, -- ** Internal getFastStringTable, getFastStringZEncCounter, -- * PtrStrings PtrString (..), -- ** Construction mkPtrString#, -- ** Deconstruction unpackPtrString, unpackPtrStringTakeN, -- ** Operations lengthPS ) where import GHC.Prelude.Basic as Prelude import GHC.Utils.Encoding import GHC.Utils.IO.Unsafe import GHC.Utils.Panic.Plain import GHC.Utils.Misc import GHC.Data.FastMutInt import Control.Concurrent.MVar import Control.DeepSeq import Control.Monad import Data.ByteString (ByteString) import Data.ByteString.Short (ShortByteString) import qualified Data.ByteString as BS import qualified Data.ByteString.Char8 as BSC import qualified Data.ByteString.Unsafe as BS import qualified Data.ByteString.Short as SBS #if !MIN_VERSION_bytestring(0,11,0) import qualified Data.ByteString.Short.Internal as SBS #endif import GHC.Data.ShortText (ShortText(..)) import Foreign.C import System.IO import Data.Data import Data.IORef import Data.Semigroup as Semi import Foreign #if MIN_VERSION_GLASGOW_HASKELL(9,3,0,0) import GHC.Conc.Sync (sharedCAF) #endif import GHC.Exts import GHC.IO -- | Gives the Modified UTF-8 encoded bytes corresponding to a 'FastString' bytesFS, fastStringToByteString :: FastString -> ByteString {-# INLINE[1] bytesFS #-} bytesFS f = SBS.fromShort $ fs_sbs f {-# DEPRECATED fastStringToByteString "Use `bytesFS` instead" #-} fastStringToByteString = bytesFS fastStringToShortByteString :: FastString -> ShortByteString fastStringToShortByteString = fs_sbs fastStringToShortText :: FastString -> ShortText fastStringToShortText = ShortText . fs_sbs fastZStringToByteString :: FastZString -> ByteString fastZStringToByteString (FastZString bs) = bs -- This will drop information if any character > '\xFF' unsafeMkByteString :: String -> ByteString unsafeMkByteString = BSC.pack hashFastString :: FastString -> Int hashFastString fs = hashStr $ fs_sbs fs -- ----------------------------------------------------------------------------- newtype FastZString = FastZString ByteString deriving NFData hPutFZS :: Handle -> FastZString -> IO () hPutFZS handle (FastZString bs) = BS.hPut handle bs zString :: FastZString -> String zString (FastZString bs) = inlinePerformIO $ BS.unsafeUseAsCStringLen bs peekCAStringLen -- | @zStringTakeN n = 'take' n . 'zString'@ -- but is performed in \(O(\min(n,l))\) rather than \(O(l)\), -- where \(l\) is the length of the 'FastZString'. zStringTakeN :: Int -> FastZString -> String zStringTakeN n (FastZString bs) = inlinePerformIO $ BS.unsafeUseAsCStringLen bs $ \(cp, len) -> peekCAStringLen (cp, min n len) lengthFZS :: FastZString -> Int lengthFZS (FastZString bs) = BS.length bs mkFastZStringString :: String -> FastZString mkFastZStringString str = FastZString (BSC.pack str) -- ----------------------------------------------------------------------------- {-| A 'FastString' is a Modified UTF-8 encoded string together with a unique ID. All 'FastString's are stored in a global hashtable to support fast O(1) comparison. It is also associated with a lazy reference to the Z-encoding of this string which is used by the compiler internally. -} data FastString = FastString { uniq :: {-# UNPACK #-} !Int, -- unique id n_chars :: {-# UNPACK #-} !Int, -- number of chars fs_sbs :: {-# UNPACK #-} !ShortByteString, fs_zenc :: FastZString -- ^ Lazily computed Z-encoding of this string. See Note [Z-Encoding] in -- GHC.Utils.Encoding. -- -- Since 'FastString's are globally memoized this is computed at most -- once for any given string. } instance Eq FastString where f1 == f2 = uniq f1 == uniq f2 -- We don't provide any "Ord FastString" instance to force you to think about -- which ordering you want: -- * lexical: deterministic, O(n). Cf lexicalCompareFS and LexicalFastString. -- * by unique: non-deterministic, O(1). Cf uniqCompareFS and NonDetFastString. instance IsString FastString where fromString = fsLit instance Semi.Semigroup FastString where (<>) = appendFS instance Monoid FastString where mempty = nilFS mappend = (Semi.<>) mconcat = concatFS instance Show FastString where show fs = show (unpackFS fs) instance Data FastString where -- don't traverse? toConstr _ = abstractConstr "FastString" gunfold _ _ = error "gunfold" dataTypeOf _ = mkNoRepType "FastString" instance NFData FastString where rnf fs = seq fs () -- | Compare FastString lexically -- -- If you don't care about the lexical ordering, use `uniqCompareFS` instead. lexicalCompareFS :: FastString -> FastString -> Ordering lexicalCompareFS fs1 fs2 = if uniq fs1 == uniq fs2 then EQ else utf8CompareShortByteString (fs_sbs fs1) (fs_sbs fs2) -- perform a lexical comparison taking into account the Modified UTF-8 -- encoding we use (cf #18562) -- | Compare FastString by their Unique (not lexically). -- -- Much cheaper than `lexicalCompareFS` but non-deterministic! uniqCompareFS :: FastString -> FastString -> Ordering uniqCompareFS fs1 fs2 = compare (uniq fs1) (uniq fs2) -- | Non-deterministic FastString -- -- This is a simple FastString wrapper with an Ord instance using -- `uniqCompareFS` (i.e. which compares FastStrings on their Uniques). Hence it -- is not deterministic from one run to the other. newtype NonDetFastString = NonDetFastString FastString deriving newtype (Eq, Show) deriving stock Data instance Ord NonDetFastString where compare (NonDetFastString fs1) (NonDetFastString fs2) = uniqCompareFS fs1 fs2 -- | Lexical FastString -- -- This is a simple FastString wrapper with an Ord instance using -- `lexicalCompareFS` (i.e. which compares FastStrings on their String -- representation). Hence it is deterministic from one run to the other. newtype LexicalFastString = LexicalFastString { getLexicalFastString :: FastString } deriving newtype (Eq, Show) deriving stock Data instance Ord LexicalFastString where compare (LexicalFastString fs1) (LexicalFastString fs2) = lexicalCompareFS fs1 fs2 instance NFData LexicalFastString where rnf (LexicalFastString f) = rnf f -- ----------------------------------------------------------------------------- -- Construction {- Internally, the compiler will maintain a fast string symbol table, providing sharing and fast comparison. Creation of new @FastString@s then covertly does a lookup, re-using the @FastString@ if there was a hit. The design of the FastString hash table allows for lockless concurrent reads and updates to multiple buckets with low synchronization overhead. See Note [Updating the FastString table] on how it's updated. -} data FastStringTable = FastStringTable {-# UNPACK #-} !FastMutInt -- ^ The unique ID counter shared with all buckets -- -- We unpack the 'FastMutInt' counter as it is always consumed strictly. {-# NOUNPACK #-} !FastMutInt -- ^ Number of computed z-encodings for all buckets. -- -- We mark this as 'NOUNPACK' as this 'FastMutInt' is retained by a thunk -- in 'mkFastStringWith' and needs to be boxed any way. -- If this is unpacked, then we box this single 'FastMutInt' once for each -- allocated FastString. (Array# (IORef FastStringTableSegment)) -- ^ concurrent segments data FastStringTableSegment = FastStringTableSegment {-# UNPACK #-} !(MVar ()) -- the lock for write in each segment {-# UNPACK #-} !FastMutInt -- the number of elements (MutableArray# RealWorld [FastString]) -- buckets in this segment {- Following parameters are determined based on: * Benchmark based on testsuite/tests/utils/should_run/T14854.hs * Stats of @echo :browse | ghc --interactive -dfaststring-stats >/dev/null@: on 2018-10-24, we have 13920 entries. -} segmentBits, numSegments, segmentMask, initialNumBuckets :: Int segmentBits = 8 numSegments = 256 -- bit segmentBits segmentMask = 0xff -- bit segmentBits - 1 initialNumBuckets = 64 hashToSegment# :: Int# -> Int# hashToSegment# hash# = hash# `andI#` segmentMask# where !(I# segmentMask#) = segmentMask hashToIndex# :: MutableArray# RealWorld [FastString] -> Int# -> Int# hashToIndex# buckets# hash# = (hash# `uncheckedIShiftRL#` segmentBits#) `remInt#` size# where !(I# segmentBits#) = segmentBits size# = sizeofMutableArray# buckets# maybeResizeSegment :: IORef FastStringTableSegment -> IO FastStringTableSegment maybeResizeSegment segmentRef = do segment@(FastStringTableSegment lock counter old#) <- readIORef segmentRef let oldSize# = sizeofMutableArray# old# newSize# = oldSize# *# 2# (I# n#) <- readFastMutInt counter if isTrue# (n# <# newSize#) -- maximum load of 1 then return segment else do resizedSegment@(FastStringTableSegment _ _ new#) <- IO $ \s1# -> case newArray# newSize# [] s1# of (# s2#, arr# #) -> (# s2#, FastStringTableSegment lock counter arr# #) forM_ [0 .. (I# oldSize#) - 1] $ \(I# i#) -> do fsList <- IO $ readArray# old# i# forM_ fsList $ \fs -> do let -- Shall we store in hash value in FastString instead? !(I# hash#) = hashFastString fs idx# = hashToIndex# new# hash# IO $ \s1# -> case readArray# new# idx# s1# of (# s2#, bucket #) -> case writeArray# new# idx# (fs: bucket) s2# of s3# -> (# s3#, () #) writeIORef segmentRef resizedSegment return resizedSegment {-# NOINLINE stringTable #-} stringTable :: FastStringTable stringTable = unsafePerformIO $ do let !(I# numSegments#) = numSegments !(I# initialNumBuckets#) = initialNumBuckets loop a# i# s1# | isTrue# (i# ==# numSegments#) = s1# | otherwise = case newMVar () `unIO` s1# of (# s2#, lock #) -> case newFastMutInt 0 `unIO` s2# of (# s3#, counter #) -> case newArray# initialNumBuckets# [] s3# of (# s4#, buckets# #) -> case newIORef (FastStringTableSegment lock counter buckets#) `unIO` s4# of (# s5#, segment #) -> case writeArray# a# i# segment s5# of s6# -> loop a# (i# +# 1#) s6# uid <- newFastMutInt 603979776 -- ord '$' * 0x01000000 n_zencs <- newFastMutInt 0 tab <- IO $ \s1# -> case newArray# numSegments# (panic "string_table") s1# of (# s2#, arr# #) -> case loop arr# 0# s2# of s3# -> case unsafeFreezeArray# arr# s3# of (# s4#, segments# #) -> (# s4#, FastStringTable uid n_zencs segments# #) -- use the support wired into the RTS to share this CAF among all images of -- libHSghc #if !MIN_VERSION_GLASGOW_HASKELL(9,3,0,0) return tab #else sharedCAF tab getOrSetLibHSghcFastStringTable -- from the 9.3 RTS; the previous RTS before might not have this symbol. The -- right way to do this however would be to define some HAVE_FAST_STRING_TABLE -- or similar rather than use (odd parity) development versions. foreign import ccall unsafe "getOrSetLibHSghcFastStringTable" getOrSetLibHSghcFastStringTable :: Ptr a -> IO (Ptr a) #endif {- We include the FastString table in the `sharedCAF` mechanism because we'd like FastStrings created by a Core plugin to have the same uniques as corresponding strings created by the host compiler itself. For example, this allows plugins to lookup known names (eg `mkTcOcc "MySpecialType"`) in the GlobalRdrEnv or even re-invoke the parser. In particular, the following little sanity test was failing in a plugin prototyping safe newtype-coercions: GHC.NT.Type.NT was imported, but could not be looked up /by the plugin/. let rdrName = mkModuleName "GHC.NT.Type" `mkRdrQual` mkTcOcc "NT" putMsgS $ showSDoc dflags $ ppr $ lookupGRE (mg_rdr_env guts) (LookupRdrName rdrName AllRelevantGREs) `mkTcOcc` involves the lookup (or creation) of a FastString. Since the plugin's FastString.string_table is empty, constructing the RdrName also allocates new uniques for the FastStrings "GHC.NT.Type" and "NT". These uniques are almost certainly unequal to the ones that the host compiler originally assigned to those FastStrings. Thus the lookup fails since the domain of the GlobalRdrEnv is affected by the RdrName's OccName's FastString's unique. Maintaining synchronization of the two instances of this global is rather difficult because of the uses of `unsafePerformIO` in this module. Not synchronizing them risks breaking the rather major invariant that two FastStrings with the same unique have the same string. Thus we use the lower-level `sharedCAF` mechanism that relies on Globals.c. -} mkFastString# :: Addr# -> FastString {-# INLINE mkFastString# #-} mkFastString# a# = mkFastStringBytes ptr (ptrStrLength ptr) where ptr = Ptr a# {- Note [Updating the FastString table] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We use a concurrent hashtable which contains multiple segments, each hash value always maps to the same segment. Read is lock-free, write to the a segment should acquire a lock for that segment to avoid race condition, writes to different segments are independent. The procedure goes like this: 1. Find out which segment to operate on based on the hash value 2. Read the relevant bucket and perform a look up of the string. 3. If it exists, return it. 4. Otherwise grab a unique ID, create a new FastString and atomically attempt to update the relevant segment with this FastString: * Resize the segment by doubling the number of buckets when the number of FastStrings in this segment grows beyond the threshold. * Double check that the string is not in the bucket. Another thread may have inserted it while we were creating our string. * Return the existing FastString if it exists. The one we preemptively created will get GCed. * Otherwise, insert and return the string we created. -} mkFastStringWith :: (Int -> FastMutInt-> IO FastString) -> ShortByteString -> IO FastString mkFastStringWith mk_fs sbs = do FastStringTableSegment lock _ buckets# <- readIORef segmentRef let idx# = hashToIndex# buckets# hash# bucket <- IO $ readArray# buckets# idx# case bucket_match bucket sbs of Just found -> return found Nothing -> do -- The withMVar below is not dupable. It can lead to deadlock if it is -- only run partially and putMVar is not called after takeMVar. noDuplicate n <- get_uid new_fs <- mk_fs n n_zencs withMVar lock $ \_ -> insert new_fs where !(FastStringTable uid n_zencs segments#) = stringTable get_uid = atomicFetchAddFastMut uid 1 !(I# hash#) = hashStr sbs (# segmentRef #) = indexArray# segments# (hashToSegment# hash#) insert fs = do FastStringTableSegment _ counter buckets# <- maybeResizeSegment segmentRef let idx# = hashToIndex# buckets# hash# bucket <- IO $ readArray# buckets# idx# case bucket_match bucket sbs of -- The FastString was added by another thread after previous read and -- before we acquired the write lock. Just found -> return found Nothing -> do IO $ \s1# -> case writeArray# buckets# idx# (fs : bucket) s1# of s2# -> (# s2#, () #) _ <- atomicFetchAddFastMut counter 1 return fs bucket_match :: [FastString] -> ShortByteString -> Maybe FastString bucket_match fs sbs = go fs where go [] = Nothing go (fs@(FastString {fs_sbs=fs_sbs}) : ls) | fs_sbs == sbs = Just fs | otherwise = go ls -- bucket_match used to inline before changes to instance Eq ShortByteString -- in bytestring-0.12, which made it slightly larger than inlining threshold. -- Non-inlining causes a small, but measurable performance regression, so let's force it. {-# INLINE bucket_match #-} mkFastStringBytes :: Ptr Word8 -> Int -> FastString mkFastStringBytes !ptr !len = -- NB: Might as well use unsafeDupablePerformIO, since mkFastStringWith is -- idempotent. unsafeDupablePerformIO $ do sbs <- newSBSFromPtr ptr len mkFastStringWith (mkNewFastStringShortByteString sbs) sbs newSBSFromPtr :: Ptr a -> Int -> IO ShortByteString newSBSFromPtr (Ptr src#) (I# len#) = IO $ \s -> case newByteArray# len# s of { (# s, dst# #) -> case copyAddrToByteArray# src# dst# 0# len# s of { s -> case unsafeFreezeByteArray# dst# s of { (# s, ba# #) -> (# s, SBS.SBS ba# #) }}} -- | Create a 'FastString' by copying an existing 'ByteString' mkFastStringByteString :: ByteString -> FastString mkFastStringByteString bs = let sbs = SBS.toShort bs in inlinePerformIO $ mkFastStringWith (mkNewFastStringShortByteString sbs) sbs -- | Create a 'FastString' from an existing 'ShortByteString' without -- copying. mkFastStringShortByteString :: ShortByteString -> FastString mkFastStringShortByteString sbs = inlinePerformIO $ mkFastStringWith (mkNewFastStringShortByteString sbs) sbs -- | Creates a UTF-8 encoded 'FastString' from a 'String' mkFastString :: String -> FastString {-# NOINLINE[1] mkFastString #-} mkFastString str = inlinePerformIO $ do let !sbs = utf8EncodeShortByteString str mkFastStringWith (mkNewFastStringShortByteString sbs) sbs -- The following rule is used to avoid polluting the non-reclaimable FastString -- table with transient strings when we only want their encoding. {-# RULES "bytesFS/mkFastString" forall x. bytesFS (mkFastString x) = utf8EncodeByteString x #-} -- | Creates a 'FastString' from a UTF-8 encoded @[Word8]@ mkFastStringByteList :: [Word8] -> FastString mkFastStringByteList str = mkFastStringShortByteString (SBS.pack str) -- | Creates a (lazy) Z-encoded 'FastString' from a 'ShortByteString' and -- account the number of forced z-strings into the passed 'FastMutInt'. mkZFastString :: FastMutInt -> ShortByteString -> FastZString mkZFastString n_zencs sbs = unsafePerformIO $ do _ <- atomicFetchAddFastMut n_zencs 1 return $ mkFastZStringString (zEncodeString (utf8DecodeShortByteString sbs)) mkNewFastStringShortByteString :: ShortByteString -> Int -> FastMutInt -> IO FastString mkNewFastStringShortByteString sbs uid n_zencs = do let zstr = mkZFastString n_zencs sbs chars = utf8CountCharsShortByteString sbs return (FastString uid chars sbs zstr) hashStr :: ShortByteString -> Int -- produce a hash value between 0 & m (inclusive) hashStr sbs@(SBS.SBS ba#) = loop 0# 0# where !(I# len#) = SBS.length sbs loop h n = if isTrue# (n ==# len#) then I# h else let -- DO NOT move this let binding! indexCharOffAddr# reads from the -- pointer so we need to evaluate this based on the length check -- above. Not doing this right caused #17909. !c = int8ToInt# (indexInt8Array# ba# n) !h2 = (h *# 16777619#) `xorI#` c in loop h2 (n +# 1#) -- ----------------------------------------------------------------------------- -- Operations -- | Returns the length of the 'FastString' in characters lengthFS :: FastString -> Int lengthFS fs = n_chars fs -- | Returns @True@ if the 'FastString' is empty nullFS :: FastString -> Bool nullFS fs = SBS.null $ fs_sbs fs -- | Lazily unpacks and decodes the FastString unpackFS :: FastString -> String unpackFS fs = utf8DecodeShortByteString $ fs_sbs fs -- | Returns a Z-encoded version of a 'FastString'. This might be the -- original, if it was already Z-encoded. The first time this -- function is applied to a particular 'FastString', the results are -- memoized. -- zEncodeFS :: FastString -> FastZString zEncodeFS fs = fs_zenc fs appendFS :: FastString -> FastString -> FastString appendFS fs1 fs2 = mkFastStringShortByteString $ (Semi.<>) (fs_sbs fs1) (fs_sbs fs2) concatFS :: [FastString] -> FastString concatFS = mkFastStringShortByteString . mconcat . map fs_sbs consFS :: Char -> FastString -> FastString consFS c fs = mkFastString (c : unpackFS fs) unconsFS :: FastString -> Maybe (Char, FastString) unconsFS fs = case unpackFS fs of [] -> Nothing (chr : str) -> Just (chr, mkFastString str) uniqueOfFS :: FastString -> Int uniqueOfFS fs = uniq fs nilFS :: FastString nilFS = mkFastString "" -- ----------------------------------------------------------------------------- -- Stats getFastStringTable :: IO [[[FastString]]] getFastStringTable = forM [0 .. numSegments - 1] $ \(I# i#) -> do let (# segmentRef #) = indexArray# segments# i# FastStringTableSegment _ _ buckets# <- readIORef segmentRef let bucketSize = I# (sizeofMutableArray# buckets#) forM [0 .. bucketSize - 1] $ \(I# j#) -> IO $ readArray# buckets# j# where !(FastStringTable _ _ segments#) = stringTable getFastStringZEncCounter :: IO Int getFastStringZEncCounter = readFastMutInt n_zencs where !(FastStringTable _ n_zencs _) = stringTable -- ----------------------------------------------------------------------------- -- Outputting 'FastString's -- |Outputs a 'FastString' with /no decoding at all/, that is, you -- get the actual bytes in the 'FastString' written to the 'Handle'. hPutFS :: Handle -> FastString -> IO () hPutFS handle fs = BS.hPut handle $ bytesFS fs -- ToDo: we'll probably want an hPutFSLocal, or something, to output -- in the current locale's encoding (for error messages and suchlike). -- ----------------------------------------------------------------------------- -- PtrStrings, here for convenience only. -- | A 'PtrString' is a pointer to some array of Latin-1 encoded chars. data PtrString = PtrString !(Ptr Word8) !Int -- | Wrap an unboxed address into a 'PtrString'. mkPtrString# :: Addr# -> PtrString {-# INLINE mkPtrString# #-} mkPtrString# a# = PtrString (Ptr a#) (ptrStrLength (Ptr a#)) -- | Decode a 'PtrString' back into a 'String' using Latin-1 encoding. -- This does not free the memory associated with 'PtrString'. unpackPtrString :: PtrString -> String unpackPtrString (PtrString (Ptr p#) (I# n#)) = unpackNBytes# p# n# -- | @unpackPtrStringTakeN n = 'take' n . 'unpackPtrString'@ -- but is performed in \(O(\min(n,l))\) rather than \(O(l)\), -- where \(l\) is the length of the 'PtrString'. unpackPtrStringTakeN :: Int -> PtrString -> String unpackPtrStringTakeN n (PtrString (Ptr p#) len) = case min n len of I# n# -> unpackNBytes# p# n# -- | Return the length of a 'PtrString' lengthPS :: PtrString -> Int lengthPS (PtrString _ n) = n -- ----------------------------------------------------------------------------- -- under the carpet #if !MIN_VERSION_GLASGOW_HASKELL(9,0,0,0) foreign import ccall unsafe "strlen" cstringLength# :: Addr# -> Int# #endif ptrStrLength :: Ptr Word8 -> Int {-# INLINE ptrStrLength #-} ptrStrLength (Ptr a) = I# (cstringLength# a) {-# NOINLINE fsLit #-} fsLit :: String -> FastString fsLit x = mkFastString x {-# RULES "fslit" forall x . fsLit (unpackCString# x) = mkFastString# x #-} ghc-lib-parser-9.12.2.20250421/compiler/GHC/Data/FastString/0000755000000000000000000000000007346545000020643 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Data/FastString/Env.hs0000644000000000000000000001061007346545000021725 0ustar0000000000000000{- % % (c) The University of Glasgow 2006 % (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 % -} -- | FastStringEnv: FastString environments module GHC.Data.FastString.Env ( -- * FastString environments (maps) FastStringEnv, -- ** Manipulating these environments mkFsEnv, emptyFsEnv, unitFsEnv, extendFsEnv_C, extendFsEnv_Acc, extendFsEnv, extendFsEnvList, extendFsEnvList_C, filterFsEnv, plusFsEnv, plusFsEnv_C, alterFsEnv, lookupFsEnv, lookupFsEnv_NF, delFromFsEnv, delListFromFsEnv, elemFsEnv, mapFsEnv, strictMapFsEnv, mapMaybeFsEnv, nonDetFoldFsEnv, -- * Deterministic FastString environments (maps) DFastStringEnv, -- ** Manipulating these environments mkDFsEnv, emptyDFsEnv, dFsEnvElts, lookupDFsEnv ) where import GHC.Prelude import GHC.Types.Unique.FM import GHC.Types.Unique.DFM import GHC.Data.Maybe import GHC.Data.FastString -- | A non-deterministic set of FastStrings. -- See Note [Deterministic UniqFM] in "GHC.Types.Unique.DFM" for explanation why it's not -- deterministic and why it matters. Use DFastStringEnv if the set eventually -- gets converted into a list or folded over in a way where the order -- changes the generated code. type FastStringEnv a = UniqFM FastString a -- Domain is FastString emptyFsEnv :: FastStringEnv a mkFsEnv :: [(FastString,a)] -> FastStringEnv a alterFsEnv :: (Maybe a-> Maybe a) -> FastStringEnv a -> FastString -> FastStringEnv a extendFsEnv_C :: (a->a->a) -> FastStringEnv a -> FastString -> a -> FastStringEnv a extendFsEnv_Acc :: (a->b->b) -> (a->b) -> FastStringEnv b -> FastString -> a -> FastStringEnv b extendFsEnv :: FastStringEnv a -> FastString -> a -> FastStringEnv a plusFsEnv :: FastStringEnv a -> FastStringEnv a -> FastStringEnv a plusFsEnv_C :: (a->a->a) -> FastStringEnv a -> FastStringEnv a -> FastStringEnv a extendFsEnvList :: FastStringEnv a -> [(FastString,a)] -> FastStringEnv a extendFsEnvList_C :: (a->a->a) -> FastStringEnv a -> [(FastString,a)] -> FastStringEnv a delFromFsEnv :: FastStringEnv a -> FastString -> FastStringEnv a delListFromFsEnv :: FastStringEnv a -> [FastString] -> FastStringEnv a elemFsEnv :: FastString -> FastStringEnv a -> Bool unitFsEnv :: FastString -> a -> FastStringEnv a lookupFsEnv :: FastStringEnv a -> FastString -> Maybe a lookupFsEnv_NF :: FastStringEnv a -> FastString -> a filterFsEnv :: (elt -> Bool) -> FastStringEnv elt -> FastStringEnv elt mapFsEnv :: (elt1 -> elt2) -> FastStringEnv elt1 -> FastStringEnv elt2 mapMaybeFsEnv :: (elt1 -> Maybe elt2) -> FastStringEnv elt1 -> FastStringEnv elt2 emptyFsEnv = emptyUFM unitFsEnv x y = unitUFM x y extendFsEnv x y z = addToUFM x y z extendFsEnvList x l = addListToUFM x l lookupFsEnv x y = lookupUFM x y alterFsEnv = alterUFM mkFsEnv l = listToUFM l elemFsEnv x y = elemUFM x y plusFsEnv x y = plusUFM x y plusFsEnv_C f x y = plusUFM_C f x y extendFsEnv_C f x y z = addToUFM_C f x y z mapFsEnv f x = mapUFM f x extendFsEnv_Acc x y z a b = addToUFM_Acc x y z a b extendFsEnvList_C x y z = addListToUFM_C x y z delFromFsEnv x y = delFromUFM x y delListFromFsEnv x y = delListFromUFM x y filterFsEnv x y = filterUFM x y mapMaybeFsEnv f x = mapMaybeUFM f x lookupFsEnv_NF env n = expectJust "lookupFsEnv_NF" (lookupFsEnv env n) strictMapFsEnv :: (a -> b) -> FastStringEnv a -> FastStringEnv b strictMapFsEnv = strictMapUFM -- | Fold over a 'FastStringEnv'. -- -- Non-deterministic, unless the folding function is commutative -- (i.e. @a1 `f` ( a2 `f` b ) == a2 `f` ( a1 `f` b )@ for all @a1@, @a2@, @b@). nonDetFoldFsEnv :: (a -> b -> b) -> b -> FastStringEnv a -> b nonDetFoldFsEnv = nonDetFoldUFM -- Deterministic FastStringEnv -- See Note [Deterministic UniqFM] in GHC.Types.Unique.DFM for explanation why we need -- DFastStringEnv. type DFastStringEnv a = UniqDFM FastString a -- Domain is FastString emptyDFsEnv :: DFastStringEnv a emptyDFsEnv = emptyUDFM dFsEnvElts :: DFastStringEnv a -> [a] dFsEnvElts = eltsUDFM mkDFsEnv :: [(FastString,a)] -> DFastStringEnv a mkDFsEnv l = listToUDFM l lookupDFsEnv :: DFastStringEnv a -> FastString -> Maybe a lookupDFsEnv = lookupUDFM ghc-lib-parser-9.12.2.20250421/compiler/GHC/Data/FiniteMap.hs0000644000000000000000000000163607346545000020775 0ustar0000000000000000-- Some extra functions to extend Data.Map module GHC.Data.FiniteMap ( insertList, insertListWith, deleteList, foldRight, foldRightWithKey ) where import GHC.Prelude import Data.Map (Map) import qualified Data.Map as Map insertList :: Ord key => [(key,elt)] -> Map key elt -> Map key elt insertList xs m = foldl' (\m (k, v) -> Map.insert k v m) m xs insertListWith :: Ord key => (elt -> elt -> elt) -> [(key,elt)] -> Map key elt -> Map key elt insertListWith f xs m0 = foldl' (\m (k, v) -> Map.insertWith f k v m) m0 xs deleteList :: Ord key => [key] -> Map key elt -> Map key elt deleteList ks m = foldl' (flip Map.delete) m ks foldRight :: (elt -> a -> a) -> a -> Map key elt -> a foldRight = Map.foldr foldRightWithKey :: (key -> elt -> a -> a) -> a -> Map key elt -> a foldRightWithKey = Map.foldrWithKey ghc-lib-parser-9.12.2.20250421/compiler/GHC/Data/FlatBag.hs0000644000000000000000000001033607346545000020416 0ustar0000000000000000{-# LANGUAGE UnboxedTuples #-} module GHC.Data.FlatBag ( FlatBag(EmptyFlatBag, UnitFlatBag, TupleFlatBag) , emptyFlatBag , unitFlatBag , sizeFlatBag , elemsFlatBag , mappendFlatBag -- * Construction , fromList , fromSizedSeq ) where import GHC.Prelude import GHC.Data.SizedSeq (SizedSeq, ssElts, sizeSS) import Control.DeepSeq import GHC.Data.SmallArray -- | Store elements in a flattened representation. -- -- A 'FlatBag' is a data structure that stores an ordered list of elements -- in a flat structure, avoiding the overhead of a linked list. -- Use this data structure, if the code requires the following properties: -- -- * Elements are stored in a long-lived object, and benefit from a flattened -- representation. -- * The 'FlatBag' will be traversed but not extended or filtered. -- * The number of elements should be known. -- * Sharing of the empty case improves memory behaviour. -- -- A 'FlagBag' aims to have as little overhead as possible to store its elements. -- To achieve that, it distinguishes between the empty case, singleton, tuple -- and general case. -- Thus, we only pay for the additional three words of an 'Array' if we have at least -- three elements. data FlatBag a = EmptyFlatBag | UnitFlatBag !a | TupleFlatBag !a !a | FlatBag {-# UNPACK #-} !(SmallArray a) instance Functor FlatBag where fmap _ EmptyFlatBag = EmptyFlatBag fmap f (UnitFlatBag a) = UnitFlatBag $ f a fmap f (TupleFlatBag a b) = TupleFlatBag (f a) (f b) fmap f (FlatBag e) = FlatBag $ mapSmallArray f e instance Foldable FlatBag where foldMap _ EmptyFlatBag = mempty foldMap f (UnitFlatBag a) = f a foldMap f (TupleFlatBag a b) = f a `mappend` f b foldMap f (FlatBag arr) = foldMapSmallArray f arr length = fromIntegral . sizeFlatBag instance Traversable FlatBag where traverse _ EmptyFlatBag = pure EmptyFlatBag traverse f (UnitFlatBag a) = UnitFlatBag <$> f a traverse f (TupleFlatBag a b) = TupleFlatBag <$> f a <*> f b traverse f fl@(FlatBag arr) = fromList (fromIntegral $ sizeofSmallArray arr) <$> traverse f (elemsFlatBag fl) instance NFData a => NFData (FlatBag a) where rnf EmptyFlatBag = () rnf (UnitFlatBag a) = rnf a rnf (TupleFlatBag a b) = rnf a `seq` rnf b rnf (FlatBag arr) = rnfSmallArray arr -- | Create an empty 'FlatBag'. -- -- The empty 'FlatBag' is shared over all instances. emptyFlatBag :: FlatBag a emptyFlatBag = EmptyFlatBag -- | Create a singleton 'FlatBag'. unitFlatBag :: a -> FlatBag a unitFlatBag = UnitFlatBag -- | Calculate the size of sizeFlatBag :: FlatBag a -> Word sizeFlatBag EmptyFlatBag = 0 sizeFlatBag UnitFlatBag{} = 1 sizeFlatBag TupleFlatBag{} = 2 sizeFlatBag (FlatBag arr) = fromIntegral $ sizeofSmallArray arr -- | Get all elements that are stored in the 'FlatBag'. elemsFlatBag :: FlatBag a -> [a] elemsFlatBag EmptyFlatBag = [] elemsFlatBag (UnitFlatBag a) = [a] elemsFlatBag (TupleFlatBag a b) = [a, b] elemsFlatBag (FlatBag arr) = [indexSmallArray arr i | i <- [0 .. sizeofSmallArray arr - 1]] -- | Combine two 'FlatBag's. -- -- The new 'FlatBag' contains all elements from both 'FlatBag's. -- -- If one of the 'FlatBag's is empty, the old 'FlatBag' is reused. mappendFlatBag :: FlatBag a -> FlatBag a -> FlatBag a mappendFlatBag EmptyFlatBag b = b mappendFlatBag a EmptyFlatBag = a mappendFlatBag (UnitFlatBag a) (UnitFlatBag b) = TupleFlatBag a b mappendFlatBag a b = fromList (sizeFlatBag a + sizeFlatBag b) (elemsFlatBag a ++ elemsFlatBag b) -- | Store the list in a flattened memory representation, avoiding the memory overhead -- of a linked list. -- -- The size 'n' needs to be smaller or equal to the length of the list. -- If it is smaller than the length of the list, overflowing elements are -- discarded. It is undefined behaviour to set 'n' to be bigger than the -- length of the list. fromList :: Word -> [a] -> FlatBag a fromList n elts = case elts of [] -> EmptyFlatBag [a] -> UnitFlatBag a [a, b] -> TupleFlatBag a b xs -> FlatBag (listToArray (fromIntegral n) fst snd (zip [0..] xs)) -- | Convert a 'SizedSeq' into its flattened representation. -- A 'FlatBag a' is more memory efficient than '[a]', if no further modification -- is necessary. fromSizedSeq :: SizedSeq a -> FlatBag a fromSizedSeq s = fromList (sizeSS s) (ssElts s) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Data/Graph/0000755000000000000000000000000007346545000017620 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Data/Graph/Directed.hs0000644000000000000000000006056607346545000021714 0ustar0000000000000000-- (c) The University of Glasgow 2006 {-# LANGUAGE GeneralizedNewtypeDeriving #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE ViewPatterns #-} {-# LANGUAGE DeriveFunctor #-} module GHC.Data.Graph.Directed ( Graph, graphFromEdgedVerticesOrd, graphFromEdgedVerticesUniq, graphFromVerticesAndAdjacency, SCC(..), Node(..), G.flattenSCC, G.flattenSCCs, stronglyConnCompG, topologicalSortG, verticesG, edgesG, hasVertexG, reachableG, reachablesG, transposeG, allReachable, allReachableCyclic, outgoingG, emptyG, findCycle, -- For backwards compatibility with the simpler version of Digraph stronglyConnCompFromEdgedVerticesOrd, stronglyConnCompFromEdgedVerticesOrdR, stronglyConnCompFromEdgedVerticesUniq, stronglyConnCompFromEdgedVerticesUniqR, -- Simple way to classify edges EdgeType(..), classifyEdges ) where ------------------------------------------------------------------------------ -- A version of the graph algorithms described in: -- -- ``Lazy Depth-First Search and Linear IntGraph Algorithms in Haskell'' -- by David King and John Launchbury -- -- Also included is some additional code for printing tree structures ... -- -- If you ever find yourself in need of algorithms for classifying edges, -- or finding connected/biconnected components, consult the history; Sigbjorn -- Finne contributed some implementations in 1997, although we've since -- removed them since they were not used anywhere in GHC. ------------------------------------------------------------------------------ import GHC.Prelude import GHC.Utils.Misc ( sortWith, count ) import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Data.Maybe ( expectJust ) -- std interfaces import Data.Maybe import Data.Array import Data.List ( sort ) import qualified Data.Map as Map import qualified Data.Set as Set import qualified Data.Graph as G import Data.Graph ( Vertex, Bounds, SCC(..) ) -- Used in the underlying representation import Data.Tree import GHC.Types.Unique import GHC.Types.Unique.FM import qualified Data.IntMap as IM import qualified Data.IntSet as IS import qualified Data.Map as M import qualified Data.Set as S {- ************************************************************************ * * * Graphs and Graph Construction * * ************************************************************************ Note [Nodes, keys, vertices] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * A 'node' is a big blob of client-stuff * Each 'node' has a unique (client) 'key', but the latter is in Ord and has fast comparison * Digraph then maps each 'key' to a Vertex (Int) which is arranged densely in 0.n -} data Graph node = Graph { gr_int_graph :: IntGraph, gr_vertex_to_node :: Vertex -> node, gr_node_to_vertex :: node -> Maybe Vertex } data Edge node = Edge node node {-| Representation for nodes of the Graph. * The @payload@ is user data, just carried around in this module * The @key@ is the node identifier. Key has an Ord instance for performance reasons. * The @[key]@ are the dependencies of the node; it's ok to have extra keys in the dependencies that are not the key of any Node in the graph -} data Node key payload = DigraphNode { node_payload :: payload, -- ^ User data node_key :: key, -- ^ User defined node id node_dependencies :: [key] -- ^ Dependencies/successors of the node } deriving Functor instance (Outputable a, Outputable b) => Outputable (Node a b) where ppr (DigraphNode a b c) = ppr (a, b, c) emptyGraph :: Graph a emptyGraph = Graph (array (1, 0) []) (error "emptyGraph") (const Nothing) -- See Note [Deterministic SCC] graphFromEdgedVertices :: ReduceFn key payload -> [Node key payload] -- The graph; its ok for the -- out-list to contain keys which aren't -- a vertex key, they are ignored -> Graph (Node key payload) graphFromEdgedVertices _reduceFn [] = emptyGraph graphFromEdgedVertices reduceFn edged_vertices = Graph graph vertex_fn (key_vertex . key_extractor) where key_extractor = node_key (bounds, vertex_fn, key_vertex, numbered_nodes) = reduceFn edged_vertices key_extractor graph = array bounds [ (v, sort $ mapMaybe key_vertex ks) | (v, (node_dependencies -> ks)) <- numbered_nodes] -- We normalize outgoing edges by sorting on node order, so -- that the result doesn't depend on the order of the edges -- See Note [Deterministic SCC] -- See Note [reduceNodesIntoVertices implementations] graphFromEdgedVerticesOrd :: Ord key => [Node key payload] -- The graph; its ok for the -- out-list to contain keys which aren't -- a vertex key, they are ignored -> Graph (Node key payload) graphFromEdgedVerticesOrd = graphFromEdgedVertices reduceNodesIntoVerticesOrd -- See Note [Deterministic SCC] -- See Note [reduceNodesIntoVertices implementations] graphFromEdgedVerticesUniq :: Uniquable key => [Node key payload] -- The graph; its ok for the -- out-list to contain keys which aren't -- a vertex key, they are ignored -> Graph (Node key payload) graphFromEdgedVerticesUniq = graphFromEdgedVertices reduceNodesIntoVerticesUniq type ReduceFn key payload = [Node key payload] -> (Node key payload -> key) -> (Bounds, Vertex -> Node key payload , key -> Maybe Vertex, [(Vertex, Node key payload)]) {- Note [reduceNodesIntoVertices implementations] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ reduceNodesIntoVertices is parameterized by the container type. This is to accommodate key types that don't have an Ord instance and hence preclude the use of Data.Map. An example of such type would be Unique, there's no way to implement Ord Unique deterministically. For such types, there's a version with a Uniquable constraint. This leaves us with two versions of every function that depends on reduceNodesIntoVertices, one with Ord constraint and the other with Uniquable constraint. For example: graphFromEdgedVerticesOrd and graphFromEdgedVerticesUniq. The Uniq version should be a tiny bit more efficient since it uses Data.IntMap internally. -} reduceNodesIntoVertices :: ([(key, Vertex)] -> m) -> (key -> m -> Maybe Vertex) -> ReduceFn key payload reduceNodesIntoVertices fromList lookup nodes key_extractor = (bounds, (!) vertex_map, key_vertex, numbered_nodes) where max_v = length nodes - 1 bounds = (0, max_v) :: (Vertex, Vertex) -- Keep the order intact to make the result depend on input order -- instead of key order numbered_nodes = zip [0..] nodes vertex_map = array bounds numbered_nodes key_map = fromList [ (key_extractor node, v) | (v, node) <- numbered_nodes ] key_vertex k = lookup k key_map -- See Note [reduceNodesIntoVertices implementations] reduceNodesIntoVerticesOrd :: Ord key => ReduceFn key payload reduceNodesIntoVerticesOrd = reduceNodesIntoVertices Map.fromList Map.lookup -- See Note [reduceNodesIntoVertices implementations] reduceNodesIntoVerticesUniq :: Uniquable key => ReduceFn key payload reduceNodesIntoVerticesUniq = reduceNodesIntoVertices listToUFM (flip lookupUFM) {- ************************************************************************ * * * SCC * * ************************************************************************ -} type WorkItem key payload = (Node key payload, -- Tip of the path [payload]) -- Rest of the path; -- [a,b,c] means c depends on b, b depends on a -- | Find a reasonably short cycle a->b->c->a, in a graph -- The graph might not necessarily be strongly connected. findCycle :: forall payload key. Ord key => [Node key payload] -- The nodes. The dependencies can -- contain extra keys, which are ignored -> Maybe [payload] -- A cycle, starting with node -- so each depends on the next findCycle graph = goRoots plausible_roots where env :: Map.Map key (Node key payload) env = Map.fromList [ (node_key node, node) | node <- graph ] goRoots [] = Nothing goRoots (root:xs) = case go Set.empty (new_work root_deps []) [] of Nothing -> goRoots xs Just res -> Just res where DigraphNode root_payload root_key root_deps = root -- 'go' implements Dijkstra's algorithm, more or less go :: Set.Set key -- Visited -> [WorkItem key payload] -- Work list, items length n -> [WorkItem key payload] -- Work list, items length n+1 -> Maybe [payload] -- Returned cycle -- Invariant: in a call (go visited ps qs), -- visited = union (map tail (ps ++ qs)) go _ [] [] = Nothing -- No cycles go visited [] qs = go visited qs [] go visited (((DigraphNode payload key deps), path) : ps) qs | key == root_key = Just (root_payload : reverse path) | key `Set.member` visited = go visited ps qs | key `Map.notMember` env = go visited ps qs | otherwise = go (Set.insert key visited) ps (new_qs ++ qs) where new_qs = new_work deps (payload : path) -- Find the nodes with fewest dependencies among the SCC modules -- This is just a heuristic to find some plausible root module plausible_roots :: [Node key payload] plausible_roots = map fst (sortWith snd [ (node, count (`Map.member` env) (node_dependencies node)) | node <- graph ]) new_work :: [key] -> [payload] -> [WorkItem key payload] new_work deps path = [ (n, path) | Just n <- map (`Map.lookup` env) deps ] {- ************************************************************************ * * * Strongly Connected Component wrappers for Graph * * ************************************************************************ Note: the components are returned topologically sorted: later components depend on earlier ones, but not vice versa i.e. later components only have edges going from them to earlier ones. -} {- Note [Deterministic SCC] ~~~~~~~~~~~~~~~~~~~~~~~~ stronglyConnCompFromEdgedVerticesUniq, stronglyConnCompFromEdgedVerticesUniqR, stronglyConnCompFromEdgedVerticesOrd and stronglyConnCompFromEdgedVerticesOrdR provide a following guarantee: Given a deterministically ordered list of nodes it returns a deterministically ordered list of strongly connected components, where the list of vertices in an SCC is also deterministically ordered. Note that the order of edges doesn't need to be deterministic for this to work. We use the order of nodes to normalize the order of edges. -} stronglyConnCompG :: Graph node -> [SCC node] stronglyConnCompG graph = decodeSccs graph $ scc (gr_int_graph graph) decodeSccs :: Graph node -> [SCC Vertex] -> [SCC node] decodeSccs Graph { gr_vertex_to_node = vertex_fn } = map (fmap vertex_fn) -- The following two versions are provided for backwards compatibility: -- See Note [Deterministic SCC] -- See Note [reduceNodesIntoVertices implementations] stronglyConnCompFromEdgedVerticesOrd :: Ord key => [Node key payload] -> [SCC payload] stronglyConnCompFromEdgedVerticesOrd = map (fmap node_payload) . stronglyConnCompFromEdgedVerticesOrdR -- The following two versions are provided for backwards compatibility: -- See Note [Deterministic SCC] -- See Note [reduceNodesIntoVertices implementations] stronglyConnCompFromEdgedVerticesUniq :: Uniquable key => [Node key payload] -> [SCC payload] stronglyConnCompFromEdgedVerticesUniq = map (fmap node_payload) . stronglyConnCompFromEdgedVerticesUniqR -- The "R" interface is used when you expect to apply SCC to -- (some of) the result of SCC, so you don't want to lose the dependency info -- See Note [Deterministic SCC] -- See Note [reduceNodesIntoVertices implementations] stronglyConnCompFromEdgedVerticesOrdR :: Ord key => [Node key payload] -> [SCC (Node key payload)] stronglyConnCompFromEdgedVerticesOrdR = stronglyConnCompG . graphFromEdgedVerticesOrd -- The "R" interface is used when you expect to apply SCC to -- (some of) the result of SCC, so you don't want to lose the dependency info -- See Note [Deterministic SCC] -- See Note [reduceNodesIntoVertices implementations] stronglyConnCompFromEdgedVerticesUniqR :: Uniquable key => [Node key payload] -> [SCC (Node key payload)] stronglyConnCompFromEdgedVerticesUniqR = stronglyConnCompG . graphFromEdgedVerticesUniq {- ************************************************************************ * * * Misc wrappers for Graph * * ************************************************************************ -} topologicalSortG :: Graph node -> [node] topologicalSortG graph = map (gr_vertex_to_node graph) result where result = {-# SCC "Digraph.topSort" #-} G.topSort (gr_int_graph graph) reachableG :: Graph node -> node -> [node] reachableG graph from = map (gr_vertex_to_node graph) result where from_vertex = expectJust "reachableG" (gr_node_to_vertex graph from) result = {-# SCC "Digraph.reachable" #-} reachable (gr_int_graph graph) [from_vertex] outgoingG :: Graph node -> node -> [node] outgoingG graph from = map (gr_vertex_to_node graph) result where from_vertex = expectJust "reachableG" (gr_node_to_vertex graph from) result = gr_int_graph graph ! from_vertex -- | Given a list of roots return all reachable nodes. reachablesG :: Graph node -> [node] -> [node] reachablesG graph froms = map (gr_vertex_to_node graph) result where result = {-# SCC "Digraph.reachable" #-} reachable (gr_int_graph graph) vs vs = [ v | Just v <- map (gr_node_to_vertex graph) froms ] -- | Efficiently construct a map which maps each key to it's set of transitive -- dependencies. Only works on acyclic input. allReachable :: Ord key => Graph node -> (node -> key) -> M.Map key (S.Set key) allReachable = all_reachable reachableGraph -- | Efficiently construct a map which maps each key to it's set of transitive -- dependencies. Less efficient than @allReachable@, but works on cyclic input as well. allReachableCyclic :: Ord key => Graph node -> (node -> key) -> M.Map key (S.Set key) allReachableCyclic = all_reachable reachableGraphCyclic all_reachable :: Ord key => (IntGraph -> IM.IntMap IS.IntSet) -> Graph node -> (node -> key) -> M.Map key (S.Set key) all_reachable int_reachables (Graph g from _) keyOf = M.fromList [(k, IS.foldr (\v' vs -> keyOf (from v') `S.insert` vs) S.empty vs) | (v, vs) <- IM.toList int_graph , let k = keyOf (from v)] where int_graph = int_reachables g hasVertexG :: Graph node -> node -> Bool hasVertexG graph node = isJust $ gr_node_to_vertex graph node verticesG :: Graph node -> [node] verticesG graph = map (gr_vertex_to_node graph) $ G.vertices (gr_int_graph graph) edgesG :: Graph node -> [Edge node] edgesG graph = map (\(v1, v2) -> Edge (v2n v1) (v2n v2)) $ G.edges (gr_int_graph graph) where v2n = gr_vertex_to_node graph transposeG :: Graph node -> Graph node transposeG graph = Graph (G.transposeG (gr_int_graph graph)) (gr_vertex_to_node graph) (gr_node_to_vertex graph) emptyG :: Graph node -> Bool emptyG g = graphEmpty (gr_int_graph g) {- ************************************************************************ * * * Showing Graphs * * ************************************************************************ -} instance Outputable node => Outputable (Graph node) where ppr graph = vcat [ hang (text "Vertices:") 2 (vcat (map ppr $ verticesG graph)), hang (text "Edges:") 2 (vcat (map ppr $ edgesG graph)) ] instance Outputable node => Outputable (Edge node) where ppr (Edge from to) = ppr from <+> text "->" <+> ppr to graphEmpty :: G.Graph -> Bool graphEmpty g = lo > hi where (lo, hi) = bounds g {- ************************************************************************ * * * IntGraphs * * ************************************************************************ -} type IntGraph = G.Graph {- ------------------------------------------------------------ -- Depth first search numbering ------------------------------------------------------------ -} -- Data.Tree has flatten for Tree, but nothing for Forest preorderF :: Forest a -> [a] preorderF ts = concatMap flatten ts {- ------------------------------------------------------------ -- Finding reachable vertices ------------------------------------------------------------ -} -- This generalizes reachable which was found in Data.Graph reachable :: IntGraph -> [Vertex] -> [Vertex] reachable g vs = preorderF (G.dfs g vs) reachableGraph :: IntGraph -> IM.IntMap IS.IntSet reachableGraph g = res where do_one v = IS.unions (IS.fromList (g ! v) : mapMaybe (flip IM.lookup res) (g ! v)) res = IM.fromList [(v, do_one v) | v <- G.vertices g] scc :: IntGraph -> [SCC Vertex] scc graph = map decode forest where forest = {-# SCC "Digraph.scc" #-} G.scc graph decode (Node v []) | mentions_itself v = CyclicSCC [v] | otherwise = AcyclicSCC v decode other = CyclicSCC (dec other []) where dec (Node v ts) vs = v : foldr dec vs ts mentions_itself v = v `elem` (graph ! v) reachableGraphCyclic :: IntGraph -> IM.IntMap IS.IntSet reachableGraphCyclic g = foldl' add_one_comp mempty comps where neighboursOf v = g!v comps = scc g -- To avoid divergence on cyclic input, we build the result -- strongly connected component by component, in topological -- order. For each SCC, we know that: -- -- * All vertices in the component can reach all other vertices -- in the component ("local" reachables) -- -- * Other reachable vertices ("remote" reachables) must come -- from earlier components, either via direct neighbourhood, or -- transitively from earlier reachability map -- -- This allows us to build the extension of the reachability map -- directly, without any self-reference, thereby avoiding a loop. add_one_comp :: IM.IntMap IS.IntSet -> SCC Vertex -> IM.IntMap IS.IntSet add_one_comp earlier (AcyclicSCC v) = IM.insert v all_remotes earlier where earlier_neighbours = neighboursOf v earlier_further = mapMaybe (flip IM.lookup earlier) earlier_neighbours all_remotes = IS.unions (IS.fromList earlier_neighbours : earlier_further) add_one_comp earlier (CyclicSCC vs) = IM.union (IM.fromList [(v, local v `IS.union` all_remotes) | v <- vs]) earlier where all_locals = IS.fromList vs local v = IS.delete v all_locals -- Arguably, for a cyclic SCC we should include each -- vertex in its own reachable set. However, this could -- lead to a lot of extra pain in client code to avoid -- looping when traversing the reachability map. all_neighbours = IS.fromList (concatMap neighboursOf vs) earlier_neighbours = all_neighbours IS.\\ all_locals earlier_further = mapMaybe (flip IM.lookup earlier) (IS.toList earlier_neighbours) all_remotes = IS.unions (earlier_neighbours : earlier_further) {- ************************************************************************ * * * Classify Edge Types * * ************************************************************************ -} -- Remark: While we could generalize this algorithm this comes at a runtime -- cost and with no advantages. If you find yourself using this with graphs -- not easily represented using Int nodes please consider rewriting this -- using the more general Graph type. -- | Edge direction based on DFS Classification data EdgeType = Forward | Cross | Backward -- ^ Loop back towards the root node. -- Eg backjumps in loops | SelfLoop -- ^ v -> v deriving (Eq,Ord) instance Outputable EdgeType where ppr Forward = text "Forward" ppr Cross = text "Cross" ppr Backward = text "Backward" ppr SelfLoop = text "SelfLoop" newtype Time = Time Int deriving (Eq,Ord,Num,Outputable) --Allow for specialization {-# INLINEABLE classifyEdges #-} -- | Given a start vertex, a way to get successors from a node -- and a list of (directed) edges classify the types of edges. classifyEdges :: forall key. Uniquable key => key -> (key -> [key]) -> [(key,key)] -> [((key, key), EdgeType)] classifyEdges root getSucc edges = --let uqe (from,to) = (getUnique from, getUnique to) --in pprTrace "Edges:" (ppr $ map uqe edges) $ zip edges $ map classify edges where (_time, starts, ends) = addTimes (0,emptyUFM,emptyUFM) root classify :: (key,key) -> EdgeType classify (from,to) | startFrom < startTo , endFrom > endTo = Forward | startFrom > startTo , endFrom < endTo = Backward | startFrom > startTo , endFrom > endTo = Cross | getUnique from == getUnique to = SelfLoop | otherwise = pprPanic "Failed to classify edge of Graph" (ppr (getUnique from, getUnique to)) where getTime event node | Just time <- lookupUFM event node = time | otherwise = pprPanic "Failed to classify edge of CFG - not not timed" (text "edges" <> ppr (getUnique from, getUnique to) <+> ppr starts <+> ppr ends ) startFrom = getTime starts from startTo = getTime starts to endFrom = getTime ends from endTo = getTime ends to addTimes :: (Time, UniqFM key Time, UniqFM key Time) -> key -> (Time, UniqFM key Time, UniqFM key Time) addTimes (time,starts,ends) n --Dont reenter nodes | elemUFM n starts = (time,starts,ends) | otherwise = let starts' = addToUFM starts n time time' = time + 1 succs = getSucc n :: [key] (time'',starts'',ends') = foldl' addTimes (time',starts',ends) succs ends'' = addToUFM ends' n time'' in (time'' + 1, starts'', ends'') graphFromVerticesAndAdjacency :: Ord key => [Node key payload] -> [(key, key)] -- First component is source vertex key, -- second is target vertex key (thing depended on) -- Unlike the other interface I insist they correspond to -- actual vertices because the alternative hides bugs. I can't -- do the same thing for the other one for backcompat reasons. -> Graph (Node key payload) graphFromVerticesAndAdjacency [] _ = emptyGraph graphFromVerticesAndAdjacency vertices edges = Graph graph vertex_node (key_vertex . key_extractor) where key_extractor = node_key (bounds, vertex_node, key_vertex, _) = reduceNodesIntoVerticesOrd vertices key_extractor key_vertex_pair (a, b) = (expectJust "graphFromVerticesAndAdjacency" $ key_vertex a, expectJust "graphFromVerticesAndAdjacency" $ key_vertex b) reduced_edges = map key_vertex_pair edges graph = G.buildG bounds reduced_edges ghc-lib-parser-9.12.2.20250421/compiler/GHC/Data/Graph/UnVar.hs0000644000000000000000000001433407346545000021214 0ustar0000000000000000{- Copyright (c) 2014 Joachim Breitner A data structure for undirected graphs of variables (or in plain terms: Sets of unordered pairs of numbers) This is very specifically tailored for the use in CallArity. In particular it stores the graph as a union of complete and complete bipartite graph, which would be very expensive to store as sets of edges or as adjanceny lists. It does not normalize the graphs. This means that g `unionUnVarGraph` g is equal to g, but twice as expensive and large. -} module GHC.Data.Graph.UnVar ( UnVarSet , emptyUnVarSet, mkUnVarSet, unionUnVarSet, unionUnVarSets , extendUnVarSet, extendUnVarSetList, delUnVarSet, delUnVarSetList , elemUnVarSet, isEmptyUnVarSet , UnVarGraph , emptyUnVarGraph , unionUnVarGraph, unionUnVarGraphs , completeGraph, completeBipartiteGraph , neighbors , hasLoopAt , delNode , domUFMUnVarSet ) where import GHC.Prelude import GHC.Types.Unique.FM( UniqFM, ufmToSet_Directly ) import GHC.Types.Var import GHC.Utils.Outputable import GHC.Types.Unique import GHC.Word import qualified GHC.Data.Word64Set as S -- We need a type for sets of variables (UnVarSet). -- We do not use VarSet, because for that we need to have the actual variable -- at hand, and we do not have that when we turn the domain of a VarEnv into a UnVarSet. -- Therefore, use a IntSet directly (which is likely also a bit more efficient). -- Set of uniques, i.e. for adjacent nodes newtype UnVarSet = UnVarSet S.Word64Set deriving Eq k :: Var -> Word64 k v = getKey (getUnique v) domUFMUnVarSet :: UniqFM key elt -> UnVarSet domUFMUnVarSet ae = UnVarSet $ ufmToSet_Directly ae emptyUnVarSet :: UnVarSet emptyUnVarSet = UnVarSet S.empty elemUnVarSet :: Var -> UnVarSet -> Bool elemUnVarSet v (UnVarSet s) = k v `S.member` s isEmptyUnVarSet :: UnVarSet -> Bool isEmptyUnVarSet (UnVarSet s) = S.null s delUnVarSet :: UnVarSet -> Var -> UnVarSet delUnVarSet (UnVarSet s) v = UnVarSet $ k v `S.delete` s delUnVarSetList :: UnVarSet -> [Var] -> UnVarSet delUnVarSetList s vs = s `minusUnVarSet` mkUnVarSet vs minusUnVarSet :: UnVarSet -> UnVarSet -> UnVarSet minusUnVarSet (UnVarSet s) (UnVarSet s') = UnVarSet $ s `S.difference` s' sizeUnVarSet :: UnVarSet -> Int sizeUnVarSet (UnVarSet s) = S.size s mkUnVarSet :: [Var] -> UnVarSet mkUnVarSet vs = UnVarSet $ S.fromList $ map k vs extendUnVarSet :: Var -> UnVarSet -> UnVarSet extendUnVarSet v (UnVarSet s) = UnVarSet $ S.insert (k v) s extendUnVarSetList :: [Var] -> UnVarSet -> UnVarSet extendUnVarSetList vs s = s `unionUnVarSet` mkUnVarSet vs unionUnVarSet :: UnVarSet -> UnVarSet -> UnVarSet unionUnVarSet (UnVarSet set1) (UnVarSet set2) = UnVarSet (set1 `S.union` set2) unionUnVarSets :: [UnVarSet] -> UnVarSet unionUnVarSets = foldl' (flip unionUnVarSet) emptyUnVarSet instance Outputable UnVarSet where ppr (UnVarSet s) = braces $ hcat $ punctuate comma [ ppr (mkUniqueGrimily i) | i <- S.toList s] data UnVarGraph = CBPG !UnVarSet !UnVarSet -- ^ complete bipartite graph | CG !UnVarSet -- ^ complete graph | Union UnVarGraph UnVarGraph | Del !UnVarSet UnVarGraph emptyUnVarGraph :: UnVarGraph emptyUnVarGraph = CG emptyUnVarSet unionUnVarGraph :: UnVarGraph -> UnVarGraph -> UnVarGraph {- Premature optimisation, it seems. unionUnVarGraph (UnVarGraph [CBPG s1 s2]) (UnVarGraph [CG s3, CG s4]) | s1 == s3 && s2 == s4 = pprTrace "unionUnVarGraph fired" empty $ completeGraph (s1 `unionUnVarSet` s2) unionUnVarGraph (UnVarGraph [CBPG s1 s2]) (UnVarGraph [CG s3, CG s4]) | s2 == s3 && s1 == s4 = pprTrace "unionUnVarGraph fired2" empty $ completeGraph (s1 `unionUnVarSet` s2) -} unionUnVarGraph a b | is_null a = b | is_null b = a | otherwise = Union a b unionUnVarGraphs :: [UnVarGraph] -> UnVarGraph unionUnVarGraphs = foldl' unionUnVarGraph emptyUnVarGraph -- completeBipartiteGraph A B = { {a,b} | a ∈ A, b ∈ B } completeBipartiteGraph :: UnVarSet -> UnVarSet -> UnVarGraph completeBipartiteGraph s1 s2 = prune $ CBPG s1 s2 completeGraph :: UnVarSet -> UnVarGraph completeGraph s = prune $ CG s -- (v' ∈ neighbors G v) <=> v--v' ∈ G neighbors :: UnVarGraph -> Var -> UnVarSet neighbors = go where go (Del d g) v | v `elemUnVarSet` d = emptyUnVarSet | otherwise = go g v `minusUnVarSet` d go (Union g1 g2) v = go g1 v `unionUnVarSet` go g2 v go (CG s) v = if v `elemUnVarSet` s then s else emptyUnVarSet go (CBPG s1 s2) v = (if v `elemUnVarSet` s1 then s2 else emptyUnVarSet) `unionUnVarSet` (if v `elemUnVarSet` s2 then s1 else emptyUnVarSet) -- hasLoopAt G v <=> v--v ∈ G hasLoopAt :: UnVarGraph -> Var -> Bool hasLoopAt = go where go (Del d g) v | v `elemUnVarSet` d = False | otherwise = go g v go (Union g1 g2) v = go g1 v || go g2 v go (CG s) v = v `elemUnVarSet` s go (CBPG s1 s2) v = v `elemUnVarSet` s1 && v `elemUnVarSet` s2 delNode :: UnVarGraph -> Var -> UnVarGraph delNode (Del d g) v = Del (extendUnVarSet v d) g delNode g v | is_null g = emptyUnVarGraph | otherwise = Del (mkUnVarSet [v]) g -- | Resolves all `Del`, by pushing them in, and simplifies `∅ ∪ … = …` prune :: UnVarGraph -> UnVarGraph prune = go emptyUnVarSet where go :: UnVarSet -> UnVarGraph -> UnVarGraph go dels (Del dels' g) = go (dels `unionUnVarSet` dels') g go dels (Union g1 g2) | is_null g1' = g2' | is_null g2' = g1' | otherwise = Union g1' g2' where g1' = go dels g1 g2' = go dels g2 go dels (CG s) = CG (s `minusUnVarSet` dels) go dels (CBPG s1 s2) = CBPG (s1 `minusUnVarSet` dels) (s2 `minusUnVarSet` dels) -- | Shallow empty check. is_null :: UnVarGraph -> Bool is_null (CBPG s1 s2) = isEmptyUnVarSet s1 || isEmptyUnVarSet s2 is_null (CG s) = isEmptyUnVarSet s is_null _ = False instance Outputable UnVarGraph where ppr (Del d g) = text "Del" <+> ppr (sizeUnVarSet d) <+> parens (ppr g) ppr (Union a b) = text "Union" <+> parens (ppr a) <+> parens (ppr b) ppr (CG s) = text "CG" <+> ppr (sizeUnVarSet s) ppr (CBPG a b) = text "CBPG" <+> ppr (sizeUnVarSet a) <+> ppr (sizeUnVarSet b) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Data/IOEnv.hs0000644000000000000000000002045407346545000020100 0ustar0000000000000000 {-# LANGUAGE DerivingVia #-} {-# LANGUAGE PatternSynonyms #-} -- -- (c) The University of Glasgow 2002-2006 -- -- | The IO Monad with an environment -- -- The environment is passed around as a Reader monad but -- as its in the IO monad, mutable references can be used -- for updating state. -- module GHC.Data.IOEnv ( IOEnv, -- Instance of Monad -- Monad utilities module GHC.Utils.Monad, -- Errors failM, failWithM, IOEnvFailure(..), -- Getting at the environment getEnv, setEnv, updEnv, runIOEnv, unsafeInterleaveM, uninterruptibleMaskM_, tryM, tryAllM, tryMostM, fixM, -- I/O operations IORef, newMutVar, readMutVar, writeMutVar, updMutVar, atomicUpdMutVar, atomicUpdMutVar' ) where import GHC.Prelude import GHC.Driver.DynFlags import {-# SOURCE #-} GHC.Driver.Hooks import GHC.IO (catchException) import GHC.Utils.Exception import GHC.Unit.Module import GHC.Utils.Panic import Data.IORef ( IORef, newIORef, readIORef, writeIORef, modifyIORef, atomicModifyIORef, atomicModifyIORef' ) import System.IO.Unsafe ( unsafeInterleaveIO ) import System.IO ( fixIO ) import Control.Monad import Control.Monad.Trans.Reader import Control.Monad.Catch (MonadCatch, MonadMask, MonadThrow) import GHC.Utils.Monad import GHC.Utils.Logger import Control.Applicative (Alternative(..)) import GHC.Exts( oneShot ) import Control.Concurrent.MVar (newEmptyMVar, readMVar, putMVar) import Control.Concurrent (forkIO, killThread) ---------------------------------------------------------------------- -- Defining the monad type ---------------------------------------------------------------------- newtype IOEnv env a = IOEnv' (env -> IO a) deriving (MonadThrow, MonadCatch, MonadMask, MonadFix) via (ReaderT env IO) -- See Note [The one-shot state monad trick] in GHC.Utils.Monad instance Functor (IOEnv env) where fmap f (IOEnv g) = IOEnv $ \env -> fmap f (g env) a <$ IOEnv g = IOEnv $ \env -> g env >> pure a instance MonadIO (IOEnv env) where liftIO f = IOEnv (\_ -> f) pattern IOEnv :: forall env a. (env -> IO a) -> IOEnv env a pattern IOEnv m <- IOEnv' m where IOEnv m = IOEnv' (oneShot m) {-# COMPLETE IOEnv #-} unIOEnv :: IOEnv env a -> (env -> IO a) unIOEnv (IOEnv m) = m instance Monad (IOEnv m) where (>>=) = thenM (>>) = (*>) instance MonadFail (IOEnv m) where fail _ = failM -- Ignore the string instance Applicative (IOEnv m) where pure = returnM IOEnv f <*> IOEnv x = IOEnv (\ env -> f env <*> x env ) (*>) = thenM_ returnM :: a -> IOEnv env a returnM a = IOEnv (\ _ -> return a) thenM :: IOEnv env a -> (a -> IOEnv env b) -> IOEnv env b thenM (IOEnv m) f = IOEnv (\ env -> do { r <- m env ; unIOEnv (f r) env }) thenM_ :: IOEnv env a -> IOEnv env b -> IOEnv env b thenM_ (IOEnv m) f = IOEnv (\ env -> do { _ <- m env ; unIOEnv f env }) failM :: IOEnv env a failM = IOEnv (\ _ -> throwIO IOEnvFailure) failWithM :: String -> IOEnv env a failWithM s = IOEnv (\ _ -> ioError (userError s)) data IOEnvFailure = IOEnvFailure instance Show IOEnvFailure where show IOEnvFailure = "IOEnv failure" instance Exception IOEnvFailure instance ContainsDynFlags env => HasDynFlags (IOEnv env) where getDynFlags = do env <- getEnv return $! extractDynFlags env instance ContainsHooks env => HasHooks (IOEnv env) where getHooks = do env <- getEnv return $! extractHooks env instance ContainsLogger env => HasLogger (IOEnv env) where getLogger = do env <- getEnv return $! extractLogger env instance ContainsModule env => HasModule (IOEnv env) where getModule = do env <- getEnv return $ extractModule env ---------------------------------------------------------------------- -- Fundamental combinators specific to the monad ---------------------------------------------------------------------- --------------------------- runIOEnv :: env -> IOEnv env a -> IO a runIOEnv env (IOEnv m) = m env --------------------------- {-# NOINLINE fixM #-} -- Aargh! Not inlining fixM alleviates a space leak problem. -- Normally fixM is used with a lazy tuple match: if the optimiser is -- shown the definition of fixM, it occasionally transforms the code -- in such a way that the code generator doesn't spot the selector -- thunks. Sigh. fixM :: (a -> IOEnv env a) -> IOEnv env a fixM f = IOEnv (\ env -> fixIO (\ r -> unIOEnv (f r) env)) --------------------------- tryM :: IOEnv env r -> IOEnv env (Either IOEnvFailure r) -- Reflect UserError exceptions (only) into IOEnv monad -- Other exceptions are not caught; they are simply propagated as exns -- -- The idea is that errors in the program being compiled will give rise -- to UserErrors. But, say, pattern-match failures in GHC itself should -- not be caught here, else they'll be reported as errors in the program -- begin compiled! tryM (IOEnv thing) = IOEnv (\ env -> tryIOEnvFailure (thing env)) tryIOEnvFailure :: IO a -> IO (Either IOEnvFailure a) tryIOEnvFailure = try tryAllM :: IOEnv env r -> IOEnv env (Either SomeException r) -- Catch *all* synchronous exceptions -- This is used when running a Template-Haskell splice, when -- even a pattern-match failure is a programmer error tryAllM (IOEnv thing) = IOEnv (\ env -> safeTry (thing env)) -- | Like 'try', but doesn't catch asynchronous exceptions safeTry :: IO a -> IO (Either SomeException a) safeTry act = do var <- newEmptyMVar -- uninterruptible because we want to mask around 'killThread', which is interruptible. uninterruptibleMask $ \restore -> do -- Fork, so that 'act' is safe from all asynchronous exceptions other than the ones we send it t <- forkIO $ try (restore act) >>= putMVar var restore (readMVar var) `catchException` \(e :: SomeException) -> do -- Control reaches this point only if the parent thread was sent an async exception -- In that case, kill the 'act' thread and re-raise the exception killThread t throwIO e tryMostM :: IOEnv env r -> IOEnv env (Either SomeException r) tryMostM (IOEnv thing) = IOEnv (\ env -> tryMost (thing env)) --------------------------- unsafeInterleaveM :: IOEnv env a -> IOEnv env a unsafeInterleaveM (IOEnv m) = IOEnv (\ env -> unsafeInterleaveIO (m env)) uninterruptibleMaskM_ :: IOEnv env a -> IOEnv env a uninterruptibleMaskM_ (IOEnv m) = IOEnv (\ env -> uninterruptibleMask_ (m env)) ---------------------------------------------------------------------- -- Alternative/MonadPlus ---------------------------------------------------------------------- instance Alternative (IOEnv env) where empty = IOEnv (const empty) m <|> n = IOEnv (\env -> unIOEnv m env <|> unIOEnv n env) instance MonadPlus (IOEnv env) ---------------------------------------------------------------------- -- Accessing input/output ---------------------------------------------------------------------- newMutVar :: a -> IOEnv env (IORef a) newMutVar val = liftIO (newIORef val) writeMutVar :: IORef a -> a -> IOEnv env () writeMutVar var val = liftIO (writeIORef var val) readMutVar :: IORef a -> IOEnv env a readMutVar var = liftIO (readIORef var) updMutVar :: IORef a -> (a -> a) -> IOEnv env () updMutVar var upd = liftIO (modifyIORef var upd) -- | Atomically update the reference. Does not force the evaluation of the -- new variable contents. For strict update, use 'atomicUpdMutVar''. atomicUpdMutVar :: IORef a -> (a -> (a, b)) -> IOEnv env b atomicUpdMutVar var upd = liftIO (atomicModifyIORef var upd) -- | Strict variant of 'atomicUpdMutVar'. atomicUpdMutVar' :: IORef a -> (a -> (a, b)) -> IOEnv env b atomicUpdMutVar' var upd = liftIO (atomicModifyIORef' var upd) ---------------------------------------------------------------------- -- Accessing the environment ---------------------------------------------------------------------- getEnv :: IOEnv env env {-# INLINE getEnv #-} getEnv = IOEnv (\ env -> return env) -- | Perform a computation with a different environment setEnv :: env' -> IOEnv env' a -> IOEnv env a {-# INLINE setEnv #-} setEnv new_env (IOEnv m) = IOEnv (\ _ -> m new_env) -- | Perform a computation with an altered environment updEnv :: (env -> env') -> IOEnv env' a -> IOEnv env a {-# INLINE updEnv #-} updEnv upd (IOEnv m) = IOEnv (\ env -> m (upd env)) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Data/List/0000755000000000000000000000000007346545000017472 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Data/List/Infinite.hs0000644000000000000000000001341007346545000021572 0ustar0000000000000000{-# LANGUAGE BlockArguments #-} {-# LANGUAGE DeriveTraversable #-} {-# LANGUAGE RankNTypes #-} module GHC.Data.List.Infinite ( Infinite (..) , head, tail , filter , (++) , unfoldr , (!!) , groupBy , dropList , iterate , concatMap , allListsOf , toList , repeat ) where import Prelude ((-), Applicative (..), Bool (..), Foldable, Functor (..), Int, Maybe (..), Traversable (..), flip, otherwise) import Control.Category (Category (..)) import Control.Monad (guard) import qualified Data.Foldable as F import Data.List.NonEmpty (NonEmpty (..)) import qualified GHC.Base as List (build) data Infinite a = Inf a (Infinite a) deriving (Foldable, Functor, Traversable) head :: Infinite a -> a head (Inf a _) = a {-# NOINLINE [1] head #-} tail :: Infinite a -> Infinite a tail (Inf _ as) = as {-# NOINLINE [1] tail #-} {-# RULES "head/build" forall (g :: forall b . (a -> b -> b) -> b) . head (build g) = g \ x _ -> x #-} instance Applicative Infinite where pure = repeat Inf f fs <*> Inf a as = Inf (f a) (fs <*> as) mapMaybe :: (a -> Maybe b) -> Infinite a -> Infinite b mapMaybe f = go where go (Inf a as) = let bs = go as in case f a of Nothing -> bs Just b -> Inf b bs {-# NOINLINE [1] mapMaybe #-} {-# RULES "mapMaybe" [~1] forall f as . mapMaybe f as = build \ c -> foldr (mapMaybeFB c f) as "mapMaybeList" [1] forall f . foldr (mapMaybeFB Inf f) = mapMaybe f #-} {-# INLINE [0] mapMaybeFB #-} mapMaybeFB :: (b -> r -> r) -> (a -> Maybe b) -> a -> r -> r mapMaybeFB cons f a bs = case f a of Nothing -> bs Just r -> cons r bs filter :: (a -> Bool) -> Infinite a -> Infinite a filter f = mapMaybe (\ a -> a <$ guard (f a)) {-# INLINE filter #-} infixr 5 ++ (++) :: Foldable f => f a -> Infinite a -> Infinite a (++) = flip (F.foldr Inf) unfoldr :: (b -> (a, b)) -> b -> Infinite a unfoldr f b = build \ c -> let go b = case f b of (a, b') -> a `c` go b' in go b {-# INLINE unfoldr #-} (!!) :: Infinite a -> Int -> a Inf a _ !! 0 = a Inf _ as !! n = as !! (n-1) groupBy :: (a -> a -> Bool) -> Infinite a -> Infinite (NonEmpty a) groupBy eq = go where go (Inf a as) = Inf (a:|bs) (go cs) where (bs, cs) = span (eq a) as span :: (a -> Bool) -> Infinite a -> ([a], Infinite a) span p = spanJust (\ a -> a <$ guard (p a)) {-# INLINE span #-} spanJust :: (a -> Maybe b) -> Infinite a -> ([b], Infinite a) spanJust p = go where go as@(Inf a as') | Just b <- p a = let (bs, cs) = go as' in (b:bs, cs) | otherwise = ([], as) iterate :: (a -> a) -> a -> Infinite a iterate f = go where go a = Inf a (go (f a)) {-# NOINLINE [1] iterate #-} {-# RULES "iterate" [~1] forall f a . iterate f a = build (\ c -> iterateFB c f a) "iterateFB" [1] iterateFB Inf = iterate #-} iterateFB :: (a -> b -> b) -> (a -> a) -> a -> b iterateFB c f a = go a where go a = a `c` go (f a) {-# INLINE [0] iterateFB #-} concatMap :: Foldable f => (a -> f b) -> Infinite a -> Infinite b concatMap f = go where go (Inf a as) = f a ++ go as {-# NOINLINE [1] concatMap #-} {-# RULES "concatMap" forall f as . concatMap f as = build \ c -> foldr (\ x b -> F.foldr c b (f x)) as #-} {-# SPECIALIZE concatMap :: (a -> [b]) -> Infinite a -> Infinite b #-} foldr :: (a -> b -> b) -> Infinite a -> b foldr f = go where go (Inf a as) = f a (go as) {-# INLINE [0] foldr #-} build :: (forall b . (a -> b -> b) -> b) -> Infinite a build g = g Inf {-# INLINE [1] build #-} -- Analogous to 'foldr'/'build' fusion for '[]' {-# RULES "foldr/build" forall f (g :: forall b . (a -> b -> b) -> b) . foldr f (build g) = g f "foldr/id" foldr Inf = id "foldr/cons/build" forall f a (g :: forall b . (a -> b -> b) -> b) . foldr f (Inf a (build g)) = f a (g f) #-} {-# RULES "map" [~1] forall f (as :: Infinite a) . fmap f as = build \ c -> foldr (mapFB c f) as "mapFB" forall c f g . mapFB (mapFB c f) g = mapFB c (f . g) "mapFB/id" forall c . mapFB c (\ x -> x) = c #-} mapFB :: (b -> c -> c) -> (a -> b) -> a -> c -> c mapFB c f = \ x ys -> c (f x) ys {-# INLINE [0] mapFB #-} dropList :: [a] -> Infinite b -> Infinite b dropList [] bs = bs dropList (_:as) (Inf _ bs) = dropList as bs -- | Compute all lists of the given alphabet. -- For example: @'allListsOf' "ab" = ["a", "b", "aa", "ba", "ab", "bb", "aaa", "baa", "aba", ...]@ allListsOf :: [a] -> Infinite [a] allListsOf as = concatMap (\ bs -> [a:bs | a <- as]) ([] `Inf` allListsOf as) -- See Note [Fusion for `Infinite` lists]. toList :: Infinite a -> [a] toList = \ as -> List.build (\ c _ -> foldr c as) {-# INLINE toList #-} repeat :: a -> Infinite a repeat a = as where as = Inf a as {-# INLINE [0] repeat #-} repeatFB :: (a -> b -> b) -> a -> b repeatFB c x = xs where xs = c x xs {-# INLINE [0] repeatFB #-} {-# RULES "repeat" [~1] forall a . repeat a = build \ c -> repeatFB c a "repeatFB" [1] repeatFB Inf = repeat #-} {- Note [Fusion for `Infinite` lists] ~~~~~~~~~~~~~~~~~~~~ We use RULES to support foldr/build fusion for Infinite lists, analogously to the RULES in GHC.Base to support fusion for regular lists. In particular, we define the following: • `build :: (forall b . (a -> b -> b) -> b) -> Infinite a` • `foldr :: (a -> b -> b) -> Infinite a -> b` • A RULE `foldr f (build g) = g f` • `Infinite`-producing functions in terms of `build`, and `Infinite`-consuming functions in terms of `foldr` This can work across data types. For example, consider `toList :: Infinite a -> [a]`. We want 'toList' to be both a good consumer (of 'Infinite' lists) and a good producer (of '[]'). Ergo, we define it in terms of 'Infinite.foldr' and `List.build`. For a bigger example, consider `List.map f (toList (Infinite.map g as))` We want to fuse away the intermediate `Infinite` structure between `Infnite.map` and `toList`, and the list structure between `toList` and `List.map`. And indeed we do: see test "InfiniteListFusion". -} ghc-lib-parser-9.12.2.20250421/compiler/GHC/Data/List/SetOps.hs0000644000000000000000000002035607346545000021251 0ustar0000000000000000{-# LANGUAGE CPP #-} {- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 -} -- | Set-like operations on lists -- -- Avoid using them as much as possible module GHC.Data.List.SetOps ( unionLists, unionListsOrd, minusList, -- Association lists Assoc, assoc, assocMaybe, assocUsing, assocDefault, assocDefaultUsing, -- Duplicate handling hasNoDups, removeDups, removeDupsOn, nubOrdBy, findDupsEq, equivClasses, -- Indexing getNth, -- Membership isIn, isn'tIn, ) where import GHC.Prelude import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Utils.Misc import qualified Data.List as L import qualified Data.List.NonEmpty as NE import Data.List.NonEmpty (NonEmpty(..)) import Data.Ord (comparing) import qualified Data.Set as S getNth :: Outputable a => [a] -> Int -> a getNth xs n = assertPpr (xs `lengthExceeds` n) (ppr n $$ ppr xs) $ xs !! n {- ************************************************************************ * * Treating lists as sets Assumes the lists contain no duplicates, but are unordered * * ************************************************************************ -} -- | Combines the two lists while keeping their order, placing the first argument -- first in the result. -- -- Uses a set internally to record duplicates. This makes it slightly slower for -- very small lists but avoids quadratic behaviour for large lists. unionListsOrd :: (HasDebugCallStack, Outputable a, Ord a) => [a] -> [a] -> [a] unionListsOrd xs ys -- Since both arguments don't have internal duplicates we can just take all of xs -- and every element of ys that's not already in xs. = let set_ys = S.fromList ys in (filter (\e -> not $ S.member e set_ys) xs) ++ ys -- | Assumes that the arguments contain no duplicates unionLists :: (HasDebugCallStack, Outputable a, Eq a) => [a] -> [a] -> [a] -- We special case some reasonable common patterns. unionLists xs [] = xs unionLists [] ys = ys unionLists [x] ys | isIn "unionLists" x ys = ys | otherwise = x:ys unionLists xs [y] | isIn "unionLists" y xs = xs | otherwise = y:xs unionLists xs ys = warnPprTrace (lengthExceeds xs 100 || lengthExceeds ys 100) "unionLists" (ppr xs $$ ppr ys) $ [x | x <- xs, isn'tIn "unionLists" x ys] ++ ys -- | Calculate the set difference of two lists. This is -- /O((m + n) log n)/, where we subtract a list of /n/ elements -- from a list of /m/ elements. -- -- Extremely short cases are handled specially: -- When /m/ or /n/ is 0, this takes /O(1)/ time. When /m/ is 1, -- it takes /O(n)/ time. minusList :: Ord a => [a] -> [a] -> [a] -- There's no point building a set to perform just one lookup, so we handle -- extremely short lists specially. It might actually be better to use -- an O(m*n) algorithm when m is a little longer (perhaps up to 4 or even 5). -- The tipping point will be somewhere in the area of where /m/ and /log n/ -- become comparable, but we probably don't want to work too hard on this. minusList [] _ = [] minusList xs@[x] ys | x `elem` ys = [] | otherwise = xs -- Using an empty set or a singleton would also be silly, so let's not. minusList xs [] = xs minusList xs [y] = filter (/= y) xs -- When each list has at least two elements, we build a set from the -- second argument, allowing us to filter the first argument fairly -- efficiently. minusList xs ys = filter (`S.notMember` yss) xs where yss = S.fromList ys {- ************************************************************************ * * \subsection[Utils-assoc]{Association lists} * * ************************************************************************ Inefficient finite maps based on association lists and equality. -} -- | A finite mapping based on equality and association lists. type Assoc a b = [(a,b)] assoc :: (Eq a) => String -> Assoc a b -> a -> b assocDefault :: (Eq a) => b -> Assoc a b -> a -> b assocUsing :: (a -> a -> Bool) -> String -> Assoc a b -> a -> b -- | Lookup key, fail gracefully using Nothing if not found. assocMaybe :: (Eq a) => Assoc a b -> a -> Maybe b assocDefaultUsing :: (a -> a -> Bool) -> b -> Assoc a b -> a -> b assocDefaultUsing _ deflt [] _ = deflt assocDefaultUsing eq deflt ((k,v) : rest) key | k `eq` key = v | otherwise = assocDefaultUsing eq deflt rest key assoc crash_msg list key = assocDefaultUsing (==) (panic ("Failed in assoc: " ++ crash_msg)) list key assocDefault deflt list key = assocDefaultUsing (==) deflt list key assocUsing eq crash_msg list key = assocDefaultUsing eq (panic ("Failed in assoc: " ++ crash_msg)) list key assocMaybe alist key = lookup alist where lookup [] = Nothing lookup ((tv,ty):rest) = if key == tv then Just ty else lookup rest {- ************************************************************************ * * \subsection[Utils-dups]{Duplicate-handling} * * ************************************************************************ -} hasNoDups :: (Eq a) => [a] -> Bool hasNoDups xs = f [] xs where f _ [] = True f seen_so_far (x:xs) = if x `is_elem` seen_so_far then False else f (x:seen_so_far) xs is_elem = isIn "hasNoDups" equivClasses :: (a -> a -> Ordering) -- Comparison -> [a] -> [NonEmpty a] equivClasses _ [] = [] equivClasses _ [stuff] = [stuff :| []] equivClasses cmp items = NE.groupBy eq (L.sortBy cmp items) where eq a b = case cmp a b of { EQ -> True; _ -> False } -- | Remove the duplicates from a list using the provided -- comparison function. Might change the order of elements. -- -- Returns the list without duplicates, and accumulates -- all the duplicates in the second component of its result. removeDups :: (a -> a -> Ordering) -- Comparison function -> [a] -> ([a], -- List with no duplicates [NonEmpty a]) -- List of duplicate groups. One representative -- from each group appears in the first result removeDups _ [] = ([], []) removeDups _ [x] = ([x],[]) removeDups cmp xs = case L.mapAccumR collect_dups [] (equivClasses cmp xs) of { (dups, xs') -> (xs', dups) } where collect_dups :: [NonEmpty a] -> NonEmpty a -> ([NonEmpty a], a) collect_dups dups_so_far (x :| []) = (dups_so_far, x) collect_dups dups_so_far dups@(x :| _) = (dups:dups_so_far, x) removeDupsOn :: Ord b => (a -> b) -> [a] -> ([a], [NonEmpty a]) removeDupsOn f x = removeDups (comparing f) x -- | Remove the duplicates from a list using the provided -- comparison function. nubOrdBy :: (a -> a -> Ordering) -> [a] -> [a] nubOrdBy cmp xs = fst (removeDups cmp xs) findDupsEq :: (a->a->Bool) -> [a] -> [NonEmpty a] findDupsEq _ [] = [] findDupsEq eq (x:xs) | L.null eq_xs = findDupsEq eq xs | otherwise = (x :| eq_xs) : findDupsEq eq neq_xs where (eq_xs, neq_xs) = L.partition (eq x) xs -- Debugging/specialising versions of \tr{elem} and \tr{notElem} # if !defined(DEBUG) isIn, isn'tIn :: Eq a => String -> a -> [a] -> Bool isIn _msg x ys = x `elem` ys isn'tIn _msg x ys = x `notElem` ys # else /* DEBUG */ isIn, isn'tIn :: (HasDebugCallStack, Eq a) => String -> a -> [a] -> Bool isIn msg x ys = elem100 0 x ys where elem100 :: Eq a => Int -> a -> [a] -> Bool elem100 _ _ [] = False elem100 i x (y:ys) | i > 100 = warnPprTrace True ("Over-long elem in " ++ msg) empty (x `elem` (y:ys)) | otherwise = x == y || elem100 (i + 1) x ys isn'tIn msg x ys = notElem100 0 x ys where notElem100 :: Eq a => Int -> a -> [a] -> Bool notElem100 _ _ [] = True notElem100 i x (y:ys) | i > 100 = warnPprTrace True ("Over-long notElem in " ++ msg) empty (x `notElem` (y:ys)) | otherwise = x /= y && notElem100 (i + 1) x ys # endif /* DEBUG */ ghc-lib-parser-9.12.2.20250421/compiler/GHC/Data/Maybe.hs0000644000000000000000000000757407346545000020165 0ustar0000000000000000 {-# LANGUAGE ConstraintKinds #-} {-# LANGUAGE DeriveFunctor #-} {-# LANGUAGE KindSignatures #-} {-# LANGUAGE FlexibleContexts #-} {- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 -} module GHC.Data.Maybe ( module Data.Maybe, MaybeErr(..), -- Instance of Monad failME, isSuccess, orElse, firstJust, firstJusts, firstJustsM, whenIsJust, expectJust, rightToMaybe, -- * MaybeT MaybeT(..), liftMaybeT, tryMaybeT ) where import GHC.Prelude import GHC.IO (catchException) import Control.Monad import Control.Monad.Trans.Maybe import Control.Exception (SomeException(..)) import Data.Maybe import Data.Foldable ( foldlM, for_ ) import GHC.Utils.Misc (HasDebugCallStack) import Data.List.NonEmpty ( NonEmpty ) import Control.Applicative( Alternative( (<|>) ) ) infixr 4 `orElse` {- ************************************************************************ * * \subsection[Maybe type]{The @Maybe@ type} * * ************************************************************************ -} firstJust :: Maybe a -> Maybe a -> Maybe a firstJust = (<|>) -- | Takes a list of @Maybes@ and returns the first @Just@ if there is one, or -- @Nothing@ otherwise. firstJusts :: Foldable f => f (Maybe a) -> Maybe a firstJusts = msum {-# SPECIALISE firstJusts :: [Maybe a] -> Maybe a #-} {-# SPECIALISE firstJusts :: NonEmpty (Maybe a) -> Maybe a #-} -- | Takes computations returnings @Maybes@; tries each one in order. -- The first one to return a @Just@ wins. Returns @Nothing@ if all computations -- return @Nothing@. firstJustsM :: (Monad m, Foldable f) => f (m (Maybe a)) -> m (Maybe a) firstJustsM = foldlM go Nothing where go :: Monad m => Maybe a -> m (Maybe a) -> m (Maybe a) go Nothing action = action go result@(Just _) _action = return result expectJust :: HasDebugCallStack => String -> Maybe a -> a {-# INLINE expectJust #-} expectJust _ (Just x) = x expectJust err Nothing = error ("expectJust " ++ err) whenIsJust :: Monad m => Maybe a -> (a -> m ()) -> m () whenIsJust = for_ -- | Flipped version of @fromMaybe@, useful for chaining. orElse :: Maybe a -> a -> a orElse = flip fromMaybe rightToMaybe :: Either a b -> Maybe b rightToMaybe (Left _) = Nothing rightToMaybe (Right x) = Just x {- ************************************************************************ * * \subsection[MaybeT type]{The @MaybeT@ monad transformer} * * ************************************************************************ -} -- We had our own MaybeT in the past. Now we reuse transformer's MaybeT liftMaybeT :: Monad m => m a -> MaybeT m a liftMaybeT act = MaybeT $ Just `liftM` act -- | Try performing an 'IO' action, failing on error. tryMaybeT :: IO a -> MaybeT IO a tryMaybeT action = MaybeT $ catchException (Just `fmap` action) handler where handler (SomeException _) = return Nothing {- ************************************************************************ * * \subsection[MaybeErr type]{The @MaybeErr@ type} * * ************************************************************************ -} data MaybeErr err val = Succeeded val | Failed err deriving (Functor) instance Applicative (MaybeErr err) where pure = Succeeded (<*>) = ap instance Monad (MaybeErr err) where Succeeded v >>= k = k v Failed e >>= _ = Failed e isSuccess :: MaybeErr err val -> Bool isSuccess (Succeeded {}) = True isSuccess (Failed {}) = False failME :: err -> MaybeErr err val failME e = Failed e ghc-lib-parser-9.12.2.20250421/compiler/GHC/Data/OrdList.hs0000644000000000000000000002143407346545000020477 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The AQUA Project, Glasgow University, 1993-1998 -} {-# LANGUAGE ViewPatterns #-} {-# LANGUAGE PatternSynonyms #-} {-# LANGUAGE UnboxedTuples #-} -- | Provide trees (of instructions), so that lists of instructions can be -- appended in linear time. module GHC.Data.OrdList ( OrdList, pattern NilOL, pattern ConsOL, pattern SnocOL, nilOL, isNilOL, unitOL, appOL, consOL, snocOL, concatOL, lastOL, headOL, mapOL, mapOL', fromOL, toOL, foldrOL, foldlOL, partitionOL, reverseOL, fromOLReverse, strictlyEqOL, strictlyOrdOL ) where import GHC.Prelude import Data.Foldable import GHC.Utils.Misc (strictMap) import GHC.Utils.Outputable import GHC.Utils.Panic import Data.List.NonEmpty (NonEmpty(..)) import qualified Data.List.NonEmpty as NE import qualified Data.Semigroup as Semigroup infixl 5 `appOL` infixl 5 `snocOL` infixr 5 `consOL` data OrdList a = None | One a | Many (NonEmpty a) | Cons a (OrdList a) | Snoc (OrdList a) a | Two (OrdList a) -- Invariant: non-empty (OrdList a) -- Invariant: non-empty deriving (Functor) instance Outputable a => Outputable (OrdList a) where ppr ol = ppr (fromOL ol) -- Convert to list and print that instance Semigroup (OrdList a) where (<>) = appOL instance Monoid (OrdList a) where mempty = nilOL mappend = (Semigroup.<>) mconcat = concatOL instance Foldable OrdList where foldr = foldrOL foldl' = foldlOL toList = fromOL null = isNilOL length = lengthOL instance Traversable OrdList where traverse f xs = toOL <$> traverse f (fromOL xs) nilOL :: OrdList a isNilOL :: OrdList a -> Bool unitOL :: a -> OrdList a snocOL :: OrdList a -> a -> OrdList a consOL :: a -> OrdList a -> OrdList a appOL :: OrdList a -> OrdList a -> OrdList a concatOL :: [OrdList a] -> OrdList a headOL :: OrdList a -> a lastOL :: OrdList a -> a lengthOL :: OrdList a -> Int nilOL = None unitOL as = One as snocOL as b = Snoc as b consOL a bs = Cons a bs concatOL aas = foldr appOL None aas pattern NilOL :: OrdList a pattern NilOL <- (isNilOL -> True) where NilOL = None -- | An unboxed 'Maybe' type with two unboxed fields in the 'Just' case. -- Useful for defining 'viewCons' and 'viewSnoc' without overhead. type VMaybe a b = (# (# a, b #) | (# #) #) pattern VJust :: a -> b -> VMaybe a b pattern VJust a b = (# (# a, b #) | #) pattern VNothing :: VMaybe a b pattern VNothing = (# | (# #) #) {-# COMPLETE VJust, VNothing #-} pattern ConsOL :: a -> OrdList a -> OrdList a pattern ConsOL x xs <- (viewCons -> VJust x xs) where ConsOL x xs = consOL x xs {-# COMPLETE NilOL, ConsOL #-} viewCons :: OrdList a -> VMaybe a (OrdList a) viewCons None = VNothing viewCons (One a) = VJust a NilOL viewCons (Many (a :| [])) = VJust a NilOL viewCons (Many (a :| b : bs)) = VJust a (Many (b :| bs)) viewCons (Cons a as) = VJust a as viewCons (Snoc as a) = case viewCons as of VJust a' as' -> VJust a' (Snoc as' a) VNothing -> VJust a NilOL viewCons (Two as1 as2) = case viewCons as1 of VJust a' as1' -> VJust a' (Two as1' as2) VNothing -> viewCons as2 pattern SnocOL :: OrdList a -> a -> OrdList a pattern SnocOL xs x <- (viewSnoc -> VJust xs x) where SnocOL xs x = snocOL xs x {-# COMPLETE NilOL, SnocOL #-} viewSnoc :: OrdList a -> VMaybe (OrdList a) a viewSnoc None = VNothing viewSnoc (One a) = VJust NilOL a viewSnoc (Many as) = (`VJust` NE.last as) $ case NE.init as of [] -> NilOL b : bs -> Many (b :| bs) viewSnoc (Snoc as a) = VJust as a viewSnoc (Cons a as) = case viewSnoc as of VJust as' a' -> VJust (Cons a as') a' VNothing -> VJust NilOL a viewSnoc (Two as1 as2) = case viewSnoc as2 of VJust as2' a' -> VJust (Two as1 as2') a' VNothing -> viewSnoc as1 headOL None = panic "headOL" headOL (One a) = a headOL (Many as) = NE.head as headOL (Cons a _) = a headOL (Snoc as _) = headOL as headOL (Two as _) = headOL as lastOL None = panic "lastOL" lastOL (One a) = a lastOL (Many as) = NE.last as lastOL (Cons _ as) = lastOL as lastOL (Snoc _ a) = a lastOL (Two _ as) = lastOL as lengthOL None = 0 lengthOL (One _) = 1 lengthOL (Many as) = length as lengthOL (Cons _ as) = 1 + length as lengthOL (Snoc as _) = 1 + length as lengthOL (Two as bs) = length as + length bs isNilOL None = True isNilOL _ = False None `appOL` b = b a `appOL` None = a One a `appOL` b = Cons a b a `appOL` One b = Snoc a b a `appOL` b = Two a b fromOL :: OrdList a -> [a] fromOL a = go a [] where go None acc = acc go (One a) acc = a : acc go (Cons a b) acc = a : go b acc go (Snoc a b) acc = go a (b:acc) go (Two a b) acc = go a (go b acc) go (Many xs) acc = NE.toList xs ++ acc fromOLReverse :: OrdList a -> [a] fromOLReverse a = go a [] -- acc is already in reverse order where go :: OrdList a -> [a] -> [a] go None acc = acc go (One a) acc = a : acc go (Cons a b) acc = go b (a : acc) go (Snoc a b) acc = b : go a acc go (Two a b) acc = go b (go a acc) go (Many xs) acc = reverse (NE.toList xs) ++ acc mapOL :: (a -> b) -> OrdList a -> OrdList b mapOL = fmap mapOL' :: (a->b) -> OrdList a -> OrdList b mapOL' _ None = None mapOL' f (One x) = One $! f x mapOL' f (Cons x xs) = let !x1 = f x !xs1 = mapOL' f xs in Cons x1 xs1 mapOL' f (Snoc xs x) = let !x1 = f x !xs1 = mapOL' f xs in Snoc xs1 x1 mapOL' f (Two b1 b2) = let !b1' = mapOL' f b1 !b2' = mapOL' f b2 in Two b1' b2' mapOL' f (Many (x :| xs)) = let !x1 = f x !xs1 = strictMap f xs in Many (x1 :| xs1) foldrOL :: (a->b->b) -> b -> OrdList a -> b foldrOL _ z None = z foldrOL k z (One x) = k x z foldrOL k z (Cons x xs) = k x (foldrOL k z xs) foldrOL k z (Snoc xs x) = foldrOL k (k x z) xs foldrOL k z (Two b1 b2) = foldrOL k (foldrOL k z b2) b1 foldrOL k z (Many xs) = foldr k z xs -- | Strict left fold. foldlOL :: (b->a->b) -> b -> OrdList a -> b foldlOL _ z None = z foldlOL k z (One x) = k z x foldlOL k z (Cons x xs) = let !z' = (k z x) in foldlOL k z' xs foldlOL k z (Snoc xs x) = let !z' = (foldlOL k z xs) in k z' x foldlOL k z (Two b1 b2) = let !z' = (foldlOL k z b1) in foldlOL k z' b2 foldlOL k z (Many xs) = foldl' k z xs partitionOL :: (a -> Bool) -> OrdList a -> (OrdList a, OrdList a) partitionOL _ None = (None,None) partitionOL f (One x) | f x = (One x, None) | otherwise = (None, One x) partitionOL f (Two xs ys) = (Two ls1 ls2, Two rs1 rs2) where !(!ls1,!rs1) = partitionOL f xs !(!ls2,!rs2) = partitionOL f ys partitionOL f (Cons x xs) | f x = (Cons x ls, rs) | otherwise = (ls, Cons x rs) where !(!ls,!rs) = partitionOL f xs partitionOL f (Snoc xs x) | f x = (Snoc ls x, rs) | otherwise = (ls, Snoc rs x) where !(!ls,!rs) = partitionOL f xs partitionOL f (Many xs) = (toOL ls, toOL rs) where !(!ls,!rs) = NE.partition f xs toOL :: [a] -> OrdList a toOL [] = None toOL [x] = One x toOL (x : xs) = Many (x :| xs) reverseOL :: OrdList a -> OrdList a reverseOL None = None reverseOL (One x) = One x reverseOL (Cons a b) = Snoc (reverseOL b) a reverseOL (Snoc a b) = Cons b (reverseOL a) reverseOL (Two a b) = Two (reverseOL b) (reverseOL a) reverseOL (Many xs) = Many (NE.reverse xs) -- | Compare not only the values but also the structure of two lists strictlyEqOL :: Eq a => OrdList a -> OrdList a -> Bool strictlyEqOL None None = True strictlyEqOL (One x) (One y) = x == y strictlyEqOL (Cons a as) (Cons b bs) = a == b && as `strictlyEqOL` bs strictlyEqOL (Snoc as a) (Snoc bs b) = a == b && as `strictlyEqOL` bs strictlyEqOL (Two a1 a2) (Two b1 b2) = a1 `strictlyEqOL` b1 && a2 `strictlyEqOL` b2 strictlyEqOL (Many as) (Many bs) = as == bs strictlyEqOL _ _ = False -- | Compare not only the values but also the structure of two lists strictlyOrdOL :: Ord a => OrdList a -> OrdList a -> Ordering strictlyOrdOL None None = EQ strictlyOrdOL None _ = LT strictlyOrdOL (One x) (One y) = compare x y strictlyOrdOL (One _) _ = LT strictlyOrdOL (Cons a as) (Cons b bs) = compare a b `mappend` strictlyOrdOL as bs strictlyOrdOL (Cons _ _) _ = LT strictlyOrdOL (Snoc as a) (Snoc bs b) = compare a b `mappend` strictlyOrdOL as bs strictlyOrdOL (Snoc _ _) _ = LT strictlyOrdOL (Two a1 a2) (Two b1 b2) = (strictlyOrdOL a1 b1) `mappend` (strictlyOrdOL a2 b2) strictlyOrdOL (Two _ _) _ = LT strictlyOrdOL (Many as) (Many bs) = compare as bs strictlyOrdOL (Many _ ) _ = GT ghc-lib-parser-9.12.2.20250421/compiler/GHC/Data/OsPath.hs0000644000000000000000000000127607346545000020317 0ustar0000000000000000module GHC.Data.OsPath ( -- * OsPath initialisation and transformation OsPath , OsString , encodeUtf , decodeUtf , unsafeDecodeUtf , unsafeEncodeUtf , os -- * Common utility functions , () , (<.>) ) where import GHC.Prelude import GHC.Utils.Misc (HasCallStack) import GHC.Utils.Panic (panic) import System.OsPath import System.Directory.Internal (os) -- | Decode an 'OsPath' to 'FilePath', throwing an 'error' if decoding failed. -- Prefer 'decodeUtf' and gracious error handling. unsafeDecodeUtf :: HasCallStack => OsPath -> FilePath unsafeDecodeUtf p = either (\err -> panic $ "Failed to decodeUtf \"" ++ show p ++ "\", because: " ++ show err) id (decodeUtf p) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Data/Pair.hs0000644000000000000000000000321007346545000020002 0ustar0000000000000000{- A simple homogeneous pair type with useful Functor, Applicative, and Traversable instances. -} {-# LANGUAGE DeriveTraversable #-} module GHC.Data.Pair ( Pair(..) , unPair , toPair , swap , pLiftFst, pLiftSnd , unzipPairs ) where import GHC.Prelude import GHC.Utils.Outputable import qualified Data.Semigroup as Semi data Pair a = Pair { pFst :: a, pSnd :: a } deriving (Foldable, Functor, Traversable) -- Note that Pair is a *unary* type constructor -- whereas (,) is binary -- The important thing about Pair is that it has a *homogeneous* -- Functor instance, so you can easily apply the same function -- to both components instance Applicative Pair where pure x = Pair x x (Pair f g) <*> (Pair x y) = Pair (f x) (g y) instance Semi.Semigroup a => Semi.Semigroup (Pair a) where Pair a1 b1 <> Pair a2 b2 = Pair (a1 Semi.<> a2) (b1 Semi.<> b2) instance (Semi.Semigroup a, Monoid a) => Monoid (Pair a) where mempty = Pair mempty mempty mappend = (Semi.<>) instance Outputable a => Outputable (Pair a) where ppr (Pair a b) = ppr a <+> char '~' <+> ppr b unPair :: Pair a -> (a,a) unPair (Pair x y) = (x,y) toPair :: (a,a) -> Pair a toPair (x,y) = Pair x y swap :: Pair a -> Pair a swap (Pair x y) = Pair y x pLiftFst :: (a -> a) -> Pair a -> Pair a pLiftFst f (Pair a b) = Pair (f a) b pLiftSnd :: (a -> a) -> Pair a -> Pair a pLiftSnd f (Pair a b) = Pair a (f b) unzipPairs :: [Pair a] -> ([a], [a]) unzipPairs [] = ([], []) unzipPairs (Pair a b : prs) = (a:as, b:bs) where !(as,bs) = unzipPairs prs -- This makes the unzip work eagerly, building no thunks at -- the cost of doing all the work up-front. ghc-lib-parser-9.12.2.20250421/compiler/GHC/Data/SmallArray.hs0000644000000000000000000001006607346545000021165 0ustar0000000000000000{-# LANGUAGE MagicHash #-} {-# LANGUAGE UnboxedTuples #-} {-# LANGUAGE BlockArguments #-} -- | Small-array module GHC.Data.SmallArray ( SmallMutableArray (..) , SmallArray (..) , newSmallArray , writeSmallArray , freezeSmallArray , unsafeFreezeSmallArray , indexSmallArray , sizeofSmallArray , listToArray , mapSmallArray , foldMapSmallArray , rnfSmallArray ) where import GHC.Exts import GHC.Prelude import GHC.ST import Control.DeepSeq data SmallArray a = SmallArray (SmallArray# a) data SmallMutableArray s a = SmallMutableArray (SmallMutableArray# s a) newSmallArray :: Int -- ^ size -> a -- ^ initial contents -> State# s -> (# State# s, SmallMutableArray s a #) {-# INLINE newSmallArray #-} newSmallArray (I# sz) x s = case newSmallArray# sz x s of (# s', a #) -> (# s', SmallMutableArray a #) writeSmallArray :: SmallMutableArray s a -- ^ array -> Int -- ^ index -> a -- ^ new element -> State# s -> State# s {-# INLINE writeSmallArray #-} writeSmallArray (SmallMutableArray a) (I# i) x = writeSmallArray# a i x -- | Copy and freeze a slice of a mutable array. freezeSmallArray :: SmallMutableArray s a -- ^ source -> Int -- ^ offset -> Int -- ^ length -> State# s -> (# State# s, SmallArray a #) {-# INLINE freezeSmallArray #-} freezeSmallArray (SmallMutableArray ma) (I# offset) (I# len) s = case freezeSmallArray# ma offset len s of (# s', a #) -> (# s', SmallArray a #) -- | Freeze a mutable array (no copy!) unsafeFreezeSmallArray :: SmallMutableArray s a -> State# s -> (# State# s, SmallArray a #) {-# INLINE unsafeFreezeSmallArray #-} unsafeFreezeSmallArray (SmallMutableArray ma) s = case unsafeFreezeSmallArray# ma s of (# s', a #) -> (# s', SmallArray a #) -- | Get the size of a 'SmallArray' sizeofSmallArray :: SmallArray a -> Int {-# INLINE sizeofSmallArray #-} sizeofSmallArray (SmallArray sa#) = case sizeofSmallArray# sa# of s -> I# s -- | Index a small-array (no bounds checking!) indexSmallArray :: SmallArray a -- ^ array -> Int -- ^ index -> a {-# INLINE indexSmallArray #-} indexSmallArray (SmallArray sa#) (I# i) = case indexSmallArray# sa# i of (# v #) -> v -- | Map a function over the elements of a 'SmallArray' -- mapSmallArray :: (a -> b) -> SmallArray a -> SmallArray b {-# INLINE mapSmallArray #-} mapSmallArray f sa = runST $ ST $ \s -> let n = sizeofSmallArray sa go !i saMut# state# | i < n = let a = indexSmallArray sa i newState# = writeSmallArray saMut# i (f a) state# in go (i + 1) saMut# newState# | otherwise = state# in case newSmallArray n (error "SmallArray: internal error, uninitialised elements") s of (# s', mutArr #) -> case go 0 mutArr s' of s'' -> unsafeFreezeSmallArray mutArr s'' -- | Fold the values of a 'SmallArray' into a 'Monoid m' of choice foldMapSmallArray :: Monoid m => (a -> m) -> SmallArray a -> m {-# INLINE foldMapSmallArray #-} foldMapSmallArray f sa = go 0 where n = sizeofSmallArray sa go i | i < n = f (indexSmallArray sa i) `mappend` go (i + 1) | otherwise = mempty -- | Force the elements of the given 'SmallArray' -- rnfSmallArray :: NFData a => SmallArray a -> () {-# INLINE rnfSmallArray #-} rnfSmallArray sa = go 0 where n = sizeofSmallArray sa go !i | i < n = rnf (indexSmallArray sa i) `seq` go (i + 1) | otherwise = () -- | Convert a list into an array. listToArray :: Int -> (e -> Int) -> (e -> a) -> [e] -> SmallArray a {-# INLINE listToArray #-} listToArray (I# size) index_of value_of xs = runST $ ST \s -> let index_of' e = case index_of e of I# i -> i write_elems ma es s = case es of [] -> s e:es' -> case writeSmallArray# ma (index_of' e) (value_of e) s of s' -> write_elems ma es' s' in case newSmallArray# size undefined s of (# s', ma #) -> case write_elems ma xs s' of s'' -> case unsafeFreezeSmallArray# ma s'' of (# s''', a #) -> (# s''', SmallArray a #) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Data/Stream.hs0000644000000000000000000001407107346545000020351 0ustar0000000000000000{-# LANGUAGE DeriveFunctor #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE RankNTypes #-} -- ----------------------------------------------------------------------------- -- -- (c) The University of Glasgow 2012 -- -- ----------------------------------------------------------------------------- -- | Monadic streams module GHC.Data.Stream ( Stream(..), StreamS(..), runStream, yield, liftIO, liftEff, hoistEff, collect, consume, fromList, map, mapM, mapAccumL_ ) where import GHC.Prelude hiding (map,mapM) import Control.Monad hiding (mapM) import Control.Monad.IO.Class -- | -- @Stream m a b@ is a computation in some Monad @m@ that delivers a sequence -- of elements of type @a@ followed by a result of type @b@. -- -- More concretely, a value of type @Stream m a b@ can be run using @runStreamInternal@ -- in the Monad @m@, and it delivers either -- -- * the final result: @Done b@, or -- * @Yield a str@ where @a@ is the next element in the stream, and @str@ -- is the rest of the stream -- * @Effect mstr@ where @mstr@ is some action running in @m@ which -- generates the rest of the stream. -- -- Stream is itself a Monad, and provides an operation 'yield' that -- produces a new element of the stream. This makes it convenient to turn -- existing monadic computations into streams. -- -- The idea is that Stream is useful for making a monadic computation -- that produces values from time to time. This can be used for -- knitting together two complex monadic operations, so that the -- producer does not have to produce all its values before the -- consumer starts consuming them. We make the producer into a -- Stream, and the consumer pulls on the stream each time it wants a -- new value. -- -- 'Stream' is implemented in the "yoneda" style for efficiency. By -- representing a stream in this manner 'fmap' and '>>=' operations are -- accumulated in the function parameters before being applied once when -- the stream is destroyed. In the old implementation each usage of 'mapM' -- and '>>=' would traverse the entire stream in order to apply the -- substitution at the leaves. -- -- The >>= operation for 'Stream' was a hot-spot in the ticky profile for -- the "ManyConstructors" test which called the 'cg' function many times in -- @StgToCmm.hs@ -- newtype Stream m a b = Stream { runStreamInternal :: forall r' r . (a -> m r') -- For fusing calls to `map` and `mapM` -> (b -> StreamS m r' r) -- For fusing `>>=` -> StreamS m r' r } runStream :: Applicative m => Stream m r' r -> StreamS m r' r runStream st = runStreamInternal st pure Done data StreamS m a b = Yield a (StreamS m a b) | Done b | Effect (m (StreamS m a b)) deriving (Functor) instance Monad m => Applicative (StreamS m a) where pure = Done (<*>) = ap instance Monad m => Monad (StreamS m a) where a >>= k = case a of Done r -> k r Yield a s -> Yield a (s >>= k) Effect m -> Effect (fmap (>>= k) m) instance Functor (Stream f a) where fmap = liftM instance Applicative (Stream m a) where pure a = Stream $ \_f g -> g a (<*>) = ap instance Monad (Stream m a) where Stream m >>= k = Stream $ \f h -> m f (\a -> runStreamInternal (k a) f h) instance MonadIO m => MonadIO (Stream m b) where liftIO io = Stream $ \_f g -> Effect (g <$> liftIO io) yield :: Monad m => a -> Stream m a () yield a = Stream $ \f rest -> Effect (flip Yield (rest ()) <$> f a) -- | Turn a Stream into an ordinary list, by demanding all the elements. collect :: Monad m => Stream m a () -> m [a] collect str = go [] (runStream str) where go acc (Done ()) = return (reverse acc) go acc (Effect m) = m >>= go acc go acc (Yield a k) = go (a:acc) k consume :: (Monad m, Monad n) => Stream m a b -> (forall a . m a -> n a) -> (a -> n ()) -> n b consume str l f = go (runStream str) where go (Done r) = return r go (Yield a p) = f a >> go p go (Effect m) = l m >>= go -- | Turn a list into a 'Stream', by yielding each element in turn. fromList :: Monad m => [a] -> Stream m a () fromList = mapM_ yield -- | Apply a function to each element of a 'Stream', lazily map :: Monad m => (a -> b) -> Stream m a x -> Stream m b x map f str = Stream $ \g h -> runStreamInternal str (g . f) h -- | Apply a monadic operation to each element of a 'Stream', lazily mapM :: Monad m => (a -> m b) -> Stream m a x -> Stream m b x mapM f str = Stream $ \g h -> runStreamInternal str (g <=< f) h -- | Note this is not very efficient because it traverses the whole stream -- before rebuilding it, avoid using it if you can. mapAccumL used to -- implemented but it wasn't used anywhere in the compiler and has similar -- efficiency problems. mapAccumL_ :: forall m a b c r . Monad m => (c -> a -> m (c,b)) -> c -> Stream m a r -> Stream m b (c, r) mapAccumL_ f c str = Stream $ \f h -> go c f h (runStream str) where go :: c -> (b -> m r') -> ((c, r) -> StreamS m r' r1) -> StreamS m a r -> StreamS m r' r1 go c _f1 h1 (Done r) = h1 (c, r) go c f1 h1 (Yield a p) = Effect (f c a >>= (\(c', b) -> f1 b >>= \r' -> return $ Yield r' (go c' f1 h1 p))) go c f1 h1 (Effect m) = Effect (go c f1 h1 <$> m) -- | Lift an effect into the Stream liftEff :: Monad m => m b -> Stream m a b liftEff eff = Stream $ \_f g -> Effect (g <$> eff) -- | Hoist the underlying Stream effect -- Note this is not very efficience since, just like 'mapAccumL_', it also needs -- to traverse and rebuild the whole stream. hoistEff :: forall m n a b. (Applicative m, Monad n) => (forall x. m x -> n x) -> Stream m a b -> Stream n a b hoistEff h s = Stream $ \f g -> hs f g (runStream s :: StreamS m a b) where hs :: (a -> n r') -> (b -> StreamS n r' r) -> StreamS m a b -> StreamS n r' r hs f g x = case x of Done d -> g d Yield a r -> Effect (f a >>= \r' -> return $ Yield r' (hs f g r)) Effect e -> Effect (h (hs f g <$> e)) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Data/Strict.hs0000644000000000000000000000345507346545000020372 0ustar0000000000000000-- Strict counterparts to common data structures, -- e.g. tuples, lists, maybes, etc. -- -- Import this module qualified as Strict. {-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE DeriveTraversable #-} module GHC.Data.Strict ( Maybe(Nothing, Just), fromMaybe, GHC.Data.Strict.maybe, Pair(And), -- Not used at the moment: -- -- Either(Left, Right), -- List(Nil, Cons), ) where import GHC.Prelude hiding (Maybe(..), Either(..)) import Control.Applicative import Data.Semigroup import Data.Data data Maybe a = Nothing | Just !a deriving (Eq, Ord, Show, Functor, Foldable, Traversable, Data) fromMaybe :: a -> Maybe a -> a fromMaybe d Nothing = d fromMaybe _ (Just x) = x maybe :: b -> (a -> b) -> Maybe a -> b maybe d _ Nothing = d maybe _ f (Just x) = f x apMaybe :: Maybe (a -> b) -> Maybe a -> Maybe b apMaybe (Just f) (Just x) = Just (f x) apMaybe _ _ = Nothing altMaybe :: Maybe a -> Maybe a -> Maybe a altMaybe Nothing r = r altMaybe l _ = l instance Semigroup a => Semigroup (Maybe a) where Nothing <> b = b a <> Nothing = a Just a <> Just b = Just (a <> b) instance Semigroup a => Monoid (Maybe a) where mempty = Nothing instance Applicative Maybe where pure = Just (<*>) = apMaybe instance Alternative Maybe where empty = Nothing (<|>) = altMaybe data Pair a b = !a `And` !b deriving (Eq, Ord, Show, Functor, Foldable, Traversable, Data) -- The definitions below are commented out because they are -- not used anywhere in the compiler, but are useful to showcase -- the intent behind this module (i.e. how it may evolve). -- -- data Either a b = Left !a | Right !b -- deriving (Eq, Ord, Show, Functor, Foldable, Traversable, Data) -- -- data List a = Nil | !a `Cons` !(List a) -- deriving (Eq, Ord, Show, Functor, Foldable, Traversable, Data) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Data/StringBuffer.hs0000644000000000000000000004000207346545000021507 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The University of Glasgow, 1997-2006 Buffers for scanning string input stored in external arrays. -} {-# LANGUAGE CPP #-} {-# LANGUAGE MagicHash #-} {-# LANGUAGE UnboxedTuples #-} {-# LANGUAGE LambdaCase #-} {-# OPTIONS_GHC -O2 #-} -- We always optimise this, otherwise performance of a non-optimised -- compiler is severely affected module GHC.Data.StringBuffer ( StringBuffer(..), -- non-abstract for vs\/HaskellService -- * Creation\/destruction hGetStringBuffer, hGetStringBufferBlock, hPutStringBuffer, appendStringBuffers, stringToStringBuffer, stringBufferFromByteString, -- * Inspection nextChar, currentChar, prevChar, atEnd, fingerprintStringBuffer, -- * Moving and comparison stepOn, offsetBytes, byteDiff, atLine, -- * Conversion lexemeToString, lexemeToFastString, decodePrevNChars, -- * Parsing integers parseUnsignedInteger, findHashOffset, -- * Checking for bi-directional format characters containsBidirectionalFormatChar, bidirectionalFormatChars ) where import GHC.Prelude import GHC.Data.FastString import GHC.Utils.Encoding import GHC.Utils.IO.Unsafe import GHC.Utils.Panic.Plain import GHC.Utils.Exception ( bracket_ ) import GHC.Fingerprint import Data.Maybe import System.IO import System.IO.Unsafe ( unsafePerformIO ) import GHC.IO.Encoding.UTF8 ( mkUTF8 ) import GHC.IO.Encoding.Failure ( CodingFailureMode(IgnoreCodingFailure) ) import qualified Data.ByteString.Internal as BS import qualified Data.ByteString as BS import Data.ByteString ( ByteString ) import GHC.Exts import Foreign #if MIN_VERSION_base(4,15,0) import GHC.ForeignPtr (unsafeWithForeignPtr) #else unsafeWithForeignPtr :: ForeignPtr a -> (Ptr a -> IO b) -> IO b unsafeWithForeignPtr = withForeignPtr #endif -- ----------------------------------------------------------------------------- -- The StringBuffer type -- |A StringBuffer is an internal pointer to a sized chunk of bytes. -- The bytes are intended to be *immutable*. There are pure -- operations to read the contents of a StringBuffer. -- -- A StringBuffer may have a finalizer, depending on how it was -- obtained. -- data StringBuffer = StringBuffer { buf :: {-# UNPACK #-} !(ForeignPtr Word8), len :: {-# UNPACK #-} !Int, -- length cur :: {-# UNPACK #-} !Int -- current pos } -- The buffer is assumed to be UTF-8 encoded, and furthermore -- we add three @\'\\0\'@ bytes to the end as sentinels so that the -- decoder doesn't have to check for overflow at every single byte -- of a multibyte sequence. instance Show StringBuffer where showsPrec _ s = showString "" -- ----------------------------------------------------------------------------- -- Creation / Destruction -- | Read a file into a 'StringBuffer'. The resulting buffer is automatically -- managed by the garbage collector. hGetStringBuffer :: FilePath -> IO StringBuffer hGetStringBuffer fname = do h <- openBinaryFile fname ReadMode size_i <- hFileSize h offset_i <- skipBOM h size_i 0 -- offset is 0 initially let size = fromIntegral $ size_i - offset_i buf <- mallocForeignPtrArray (size+3) unsafeWithForeignPtr buf $ \ptr -> do r <- if size == 0 then return 0 else hGetBuf h ptr size hClose h if (r /= size) then ioError (userError "short read of file") else newUTF8StringBuffer buf ptr size hGetStringBufferBlock :: Handle -> Int -> IO StringBuffer hGetStringBufferBlock handle wanted = do size_i <- hFileSize handle offset_i <- hTell handle >>= skipBOM handle size_i let size = min wanted (fromIntegral $ size_i-offset_i) buf <- mallocForeignPtrArray (size+3) unsafeWithForeignPtr buf $ \ptr -> do r <- if size == 0 then return 0 else hGetBuf handle ptr size if r /= size then ioError (userError $ "short read of file: "++show(r,size,size_i,handle)) else newUTF8StringBuffer buf ptr size hPutStringBuffer :: Handle -> StringBuffer -> IO () hPutStringBuffer hdl (StringBuffer buf len cur) = unsafeWithForeignPtr (plusForeignPtr buf cur) $ \ptr -> hPutBuf hdl ptr len -- | Skip the byte-order mark if there is one (see #1744 and #6016), -- and return the new position of the handle in bytes. -- -- This is better than treating #FEFF as whitespace, -- because that would mess up layout. We don't have a concept -- of zero-width whitespace in Haskell: all whitespace codepoints -- have a width of one column. skipBOM :: Handle -> Integer -> Integer -> IO Integer skipBOM h size offset = -- Only skip BOM at the beginning of a file. if size > 0 && offset == 0 then do -- Validate assumption that handle is in binary mode. assertM (hGetEncoding h >>= return . isNothing) -- Temporarily select utf8 encoding with error ignoring, -- to make `hLookAhead` and `hGetChar` return full Unicode characters. bracket_ (hSetEncoding h safeEncoding) (hSetBinaryMode h True) $ do c <- hLookAhead h if c == '\xfeff' then hGetChar h >> hTell h else return offset else return offset where safeEncoding = mkUTF8 IgnoreCodingFailure newUTF8StringBuffer :: ForeignPtr Word8 -> Ptr Word8 -> Int -> IO StringBuffer newUTF8StringBuffer buf ptr size = do pokeArray (ptr `plusPtr` size :: Ptr Word8) [0,0,0] -- sentinels for UTF-8 decoding return $ StringBuffer buf size 0 appendStringBuffers :: StringBuffer -> StringBuffer -> IO StringBuffer appendStringBuffers sb1 sb2 = do newBuf <- mallocForeignPtrArray (size+3) unsafeWithForeignPtr newBuf $ \ptr -> unsafeWithForeignPtr (buf sb1) $ \sb1Ptr -> unsafeWithForeignPtr (buf sb2) $ \sb2Ptr -> do copyArray ptr (sb1Ptr `advancePtr` cur sb1) sb1_len copyArray (ptr `advancePtr` sb1_len) (sb2Ptr `advancePtr` cur sb2) sb2_len pokeArray (ptr `advancePtr` size) [0,0,0] return (StringBuffer newBuf size 0) where sb1_len = calcLen sb1 sb2_len = calcLen sb2 calcLen sb = len sb - cur sb size = sb1_len + sb2_len -- | Encode a 'String' into a 'StringBuffer' as UTF-8. The resulting buffer -- is automatically managed by the garbage collector. stringToStringBuffer :: String -> StringBuffer stringToStringBuffer str = unsafePerformIO $ do let size = utf8EncodedLength str buf <- mallocForeignPtrArray (size+3) unsafeWithForeignPtr buf $ \ptr -> do utf8EncodePtr ptr str pokeArray (ptr `plusPtr` size :: Ptr Word8) [0,0,0] -- sentinels for UTF-8 decoding return (StringBuffer buf size 0) -- | Convert a UTF-8 encoded 'ByteString' into a 'StringBuffer. This really -- relies on the internals of both 'ByteString' and 'StringBuffer'. -- -- /O(n)/ (but optimized into a @memcpy@ by @bytestring@ under the hood) stringBufferFromByteString :: ByteString -> StringBuffer stringBufferFromByteString bs = let BS.PS fp off len = BS.append bs (BS.pack [0,0,0]) in StringBuffer { buf = fp, len = len - 3, cur = off } -- ----------------------------------------------------------------------------- -- Grab a character -- | Return the first UTF-8 character of a nonempty 'StringBuffer' and as well -- the remaining portion (analogous to 'Data.List.uncons'). __Warning:__ The -- behavior is undefined if the 'StringBuffer' is empty. The result shares -- the same buffer as the original. Similar to 'utf8DecodeChar', if the -- character cannot be decoded as UTF-8, @\'\\0\'@ is returned. {-# INLINE nextChar #-} nextChar :: StringBuffer -> (Char,StringBuffer) nextChar (StringBuffer buf len (I# cur#)) = -- Getting our fingers dirty a little here, but this is performance-critical inlinePerformIO $ unsafeWithForeignPtr buf $ \(Ptr a#) -> case utf8DecodeCharAddr# (a# `plusAddr#` cur#) 0# of (# c#, nBytes# #) -> let cur' = I# (cur# +# nBytes#) in return (C# c#, StringBuffer buf len cur') bidirectionalFormatChars :: [(Char,String)] bidirectionalFormatChars = [ ('\x202a' , "U+202A LEFT-TO-RIGHT EMBEDDING (LRE)") , ('\x202b' , "U+202B RIGHT-TO-LEFT EMBEDDING (RLE)") , ('\x202c' , "U+202C POP DIRECTIONAL FORMATTING (PDF)") , ('\x202d' , "U+202D LEFT-TO-RIGHT OVERRIDE (LRO)") , ('\x202e' , "U+202E RIGHT-TO-LEFT OVERRIDE (RLO)") , ('\x2066' , "U+2066 LEFT-TO-RIGHT ISOLATE (LRI)") , ('\x2067' , "U+2067 RIGHT-TO-LEFT ISOLATE (RLI)") , ('\x2068' , "U+2068 FIRST STRONG ISOLATE (FSI)") , ('\x2069' , "U+2069 POP DIRECTIONAL ISOLATE (PDI)") ] {-| Returns true if the buffer contains Unicode bi-directional formatting characters. https://www.unicode.org/reports/tr9/#Bidirectional_Character_Types Bidirectional format characters are one of '\x202a' : "U+202A LEFT-TO-RIGHT EMBEDDING (LRE)" '\x202b' : "U+202B RIGHT-TO-LEFT EMBEDDING (RLE)" '\x202c' : "U+202C POP DIRECTIONAL FORMATTING (PDF)" '\x202d' : "U+202D LEFT-TO-RIGHT OVERRIDE (LRO)" '\x202e' : "U+202E RIGHT-TO-LEFT OVERRIDE (RLO)" '\x2066' : "U+2066 LEFT-TO-RIGHT ISOLATE (LRI)" '\x2067' : "U+2067 RIGHT-TO-LEFT ISOLATE (RLI)" '\x2068' : "U+2068 FIRST STRONG ISOLATE (FSI)" '\x2069' : "U+2069 POP DIRECTIONAL ISOLATE (PDI)" This list is encoded in 'bidirectionalFormatChars' -} {-# INLINE containsBidirectionalFormatChar #-} containsBidirectionalFormatChar :: StringBuffer -> Bool containsBidirectionalFormatChar (StringBuffer buf (I# len#) (I# cur#)) = inlinePerformIO $ unsafeWithForeignPtr buf $ \(Ptr a#) -> do let go :: Int# -> Bool go i | isTrue# (i >=# len#) = False | otherwise = case utf8DecodeCharAddr# a# i of (# '\x202a'# , _ #) -> True (# '\x202b'# , _ #) -> True (# '\x202c'# , _ #) -> True (# '\x202d'# , _ #) -> True (# '\x202e'# , _ #) -> True (# '\x2066'# , _ #) -> True (# '\x2067'# , _ #) -> True (# '\x2068'# , _ #) -> True (# '\x2069'# , _ #) -> True (# _, bytes #) -> go (i +# bytes) pure $! go cur# -- | Return the first UTF-8 character of a nonempty 'StringBuffer' (analogous -- to 'Data.List.head'). __Warning:__ The behavior is undefined if the -- 'StringBuffer' is empty. Similar to 'utf8DecodeChar', if the character -- cannot be decoded as UTF-8, @\'\\0\'@ is returned. currentChar :: StringBuffer -> Char currentChar = fst . nextChar prevChar :: StringBuffer -> Char -> Char prevChar (StringBuffer _ _ 0) deflt = deflt prevChar (StringBuffer buf _ cur) _ = inlinePerformIO $ unsafeWithForeignPtr buf $ \p -> do p' <- utf8PrevChar (p `plusPtr` cur) return (fst (utf8DecodeCharPtr p')) -- ----------------------------------------------------------------------------- -- Moving -- | Return a 'StringBuffer' with the first UTF-8 character removed (analogous -- to 'Data.List.tail'). __Warning:__ The behavior is undefined if the -- 'StringBuffer' is empty. The result shares the same buffer as the -- original. stepOn :: StringBuffer -> StringBuffer stepOn s = snd (nextChar s) -- | Return a 'StringBuffer' with the first @n@ bytes removed. __Warning:__ -- If there aren't enough characters, the returned 'StringBuffer' will be -- invalid and any use of it may lead to undefined behavior. The result -- shares the same buffer as the original. offsetBytes :: Int -- ^ @n@, the number of bytes -> StringBuffer -> StringBuffer offsetBytes i s = s { cur = cur s + i } -- | Compute the difference in offset between two 'StringBuffer's that share -- the same buffer. __Warning:__ The behavior is undefined if the -- 'StringBuffer's use separate buffers. byteDiff :: StringBuffer -> StringBuffer -> Int byteDiff s1 s2 = cur s2 - cur s1 -- | Check whether a 'StringBuffer' is empty (analogous to 'Data.List.null'). atEnd :: StringBuffer -> Bool atEnd (StringBuffer _ l c) = l == c -- | Computes a hash of the contents of a 'StringBuffer'. fingerprintStringBuffer :: StringBuffer -> Fingerprint fingerprintStringBuffer (StringBuffer buf len cur) = unsafePerformIO $ withForeignPtr buf $ \ptr -> fingerprintData (ptr `plusPtr` cur) len -- | Computes a 'StringBuffer' which points to the first character of the -- wanted line. Lines begin at 1. atLine :: Int -> StringBuffer -> Maybe StringBuffer atLine line sb@(StringBuffer buf len _) = inlinePerformIO $ unsafeWithForeignPtr buf $ \p -> do p' <- skipToLine line len p if p' == nullPtr then return Nothing else let delta = p' `minusPtr` p in return $ Just (sb { cur = delta , len = len - delta }) skipToLine :: Int -> Int -> Ptr Word8 -> IO (Ptr Word8) skipToLine !line !len !op0 = go 1 op0 where !opend = op0 `plusPtr` len go !i_line !op | op >= opend = pure nullPtr | i_line == line = pure op | otherwise = do w <- peek op :: IO Word8 case w of 10 -> go (i_line + 1) (plusPtr op 1) 13 -> do -- this is safe because a 'StringBuffer' is -- guaranteed to have 3 bytes sentinel values. w' <- peek (plusPtr op 1) :: IO Word8 case w' of 10 -> go (i_line + 1) (plusPtr op 2) _ -> go (i_line + 1) (plusPtr op 1) _ -> go i_line (plusPtr op 1) -- ----------------------------------------------------------------------------- -- Conversion -- | Decode the first @n@ bytes of a 'StringBuffer' as UTF-8 into a 'String'. -- Similar to 'utf8DecodeChar', if the character cannot be decoded as UTF-8, -- they will be replaced with @\'\\0\'@. lexemeToString :: StringBuffer -> Int -- ^ @n@, the number of bytes -> String lexemeToString _ 0 = "" lexemeToString (StringBuffer buf _ cur) bytes = utf8DecodeForeignPtr buf cur bytes lexemeToFastString :: StringBuffer -> Int -- ^ @n@, the number of bytes -> FastString lexemeToFastString _ 0 = nilFS lexemeToFastString (StringBuffer buf _ cur) len = inlinePerformIO $ unsafeWithForeignPtr buf $ \ptr -> return $! mkFastStringBytes (ptr `plusPtr` cur) len -- | Return the previous @n@ characters (or fewer if we are less than @n@ -- characters into the buffer. decodePrevNChars :: Int -> StringBuffer -> String decodePrevNChars n (StringBuffer buf _ cur) = inlinePerformIO $ unsafeWithForeignPtr buf $ \p0 -> go p0 n "" (p0 `plusPtr` (cur - 1)) where go :: Ptr Word8 -> Int -> String -> Ptr Word8 -> IO String go buf0 n acc p | n == 0 || buf0 >= p = return acc go buf0 n acc p = do p' <- utf8PrevChar p let (c,_) = utf8DecodeCharPtr p' go buf0 (n - 1) (c:acc) p' -- ----------------------------------------------------------------------------- -- Parsing integer strings in various bases parseUnsignedInteger :: StringBuffer -> Int -> Integer -> (Char->Int) -> Integer parseUnsignedInteger (StringBuffer buf _ cur) len radix char_to_int = inlinePerformIO $ withForeignPtr buf $ \ptr -> return $! let go i x | i == len = x | otherwise = case fst (utf8DecodeCharPtr (ptr `plusPtr` (cur + i))) of '_' -> go (i + 1) x -- skip "_" (#14473) char -> go (i + 1) (x * radix + toInteger (char_to_int char)) in go 0 0 -- | Find the offset of the '#' character in the StringBuffer. -- -- Make sure that it contains one before calling this function! findHashOffset :: StringBuffer -> Int findHashOffset (StringBuffer buf _ cur) = inlinePerformIO $ withForeignPtr buf $ \ptr -> do let go p = peek p >>= \case (0x23 :: Word8) -> pure $! ((p `minusPtr` ptr) - cur) _ -> go (p `plusPtr` 1) go (ptr `plusPtr` cur) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Data/TrieMap.hs0000644000000000000000000003611607346545000020463 0ustar0000000000000000{-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE RankNTypes #-} {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE UndecidableInstances #-} {- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 -} module GHC.Data.TrieMap( -- * Maps over 'Maybe' values MaybeMap, -- * Maps over 'List' values ListMap, -- * 'TrieMap' class TrieMap(..), insertTM, deleteTM, foldMapTM, isEmptyTM, -- * Things helpful for adding additional Instances. (>.>), (|>), (|>>), XT, foldMaybe, filterMaybe, -- * Map for leaf compression GenMap, lkG, xtG, mapG, fdG, xtList, lkList ) where import GHC.Prelude import GHC.Types.Unique.DFM import GHC.Types.Unique( Uniquable ) import qualified Data.Map as Map import qualified Data.IntMap as IntMap import GHC.Utils.Outputable import Control.Monad( (>=>) ) import Data.Kind( Type ) import qualified Data.Semigroup as S {- This module implements TrieMaps, which are finite mappings whose key is a structured value like a CoreExpr or Type. This file implements tries over general data structures. Implementation for tries over Core Expressions/Types are available in GHC.Core.Map.Expr. The regular pattern for handling TrieMaps on data structures was first described (to my knowledge) in Connelly and Morris's 1995 paper "A generalization of the Trie Data Structure"; there is also an accessible description of the idea in Okasaki's book "Purely Functional Data Structures", Section 10.3.2 ************************************************************************ * * The TrieMap class * * ************************************************************************ -} type XT a = Maybe a -> Maybe a -- How to alter a non-existent elt (Nothing) -- or an existing elt (Just) class Functor m => TrieMap m where type Key m :: Type emptyTM :: m a lookupTM :: forall b. Key m -> m b -> Maybe b alterTM :: forall b. Key m -> XT b -> m b -> m b filterTM :: (a -> Bool) -> m a -> m a foldTM :: (a -> b -> b) -> m a -> b -> b -- The unusual argument order here makes -- it easy to compose calls to foldTM; -- see for example fdE below insertTM :: TrieMap m => Key m -> a -> m a -> m a insertTM k v m = alterTM k (\_ -> Just v) m deleteTM :: TrieMap m => Key m -> m a -> m a deleteTM k m = alterTM k (\_ -> Nothing) m foldMapTM :: (TrieMap m, Monoid r) => (a -> r) -> m a -> r foldMapTM f m = foldTM (\ x r -> f x S.<> r) m mempty -- This looks inefficient. isEmptyTM :: TrieMap m => m a -> Bool isEmptyTM m = foldTM (\ _ _ -> False) m True ---------------------- -- Recall that -- Control.Monad.(>=>) :: (a -> Maybe b) -> (b -> Maybe c) -> a -> Maybe c (>.>) :: (a -> b) -> (b -> c) -> a -> c -- Reverse function composition (do f first, then g) infixr 1 >.> (f >.> g) x = g (f x) infixr 1 |>, |>> (|>) :: a -> (a->b) -> b -- Reverse application x |> f = f x ---------------------- (|>>) :: TrieMap m2 => (XT (m2 a) -> m1 (m2 a) -> m1 (m2 a)) -> (m2 a -> m2 a) -> m1 (m2 a) -> m1 (m2 a) (|>>) f g = f (Just . g . deMaybe) deMaybe :: TrieMap m => Maybe (m a) -> m a deMaybe Nothing = emptyTM deMaybe (Just m) = m {- Note [Every TrieMap is a Functor] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Every TrieMap T admits fmap :: (a->b) -> T a -> T b where (fmap f t) applies `f` to every element of the range of `t`. Ergo, we make `Functor` a superclass of `TrieMap`. Moreover it is almost invariably possible to /derive/ Functor for each particular instance. E.g. in the list instance we have data ListMap m a = LM { lm_nil :: Maybe a , lm_cons :: m (ListMap m a) } deriving (Functor) instance TrieMap m => TrieMap (ListMap m) where { .. } Alas, we not yet derive `Functor` for reasons of performance; see #22292. -} {- ************************************************************************ * * IntMaps * * ************************************************************************ -} instance TrieMap IntMap.IntMap where type Key IntMap.IntMap = Int emptyTM = IntMap.empty lookupTM k m = IntMap.lookup k m alterTM = xtInt foldTM k m z = IntMap.foldr k z m filterTM f m = IntMap.filter f m xtInt :: Int -> XT a -> IntMap.IntMap a -> IntMap.IntMap a xtInt k f m = IntMap.alter f k m instance Ord k => TrieMap (Map.Map k) where type Key (Map.Map k) = k emptyTM = Map.empty lookupTM = Map.lookup alterTM k f m = Map.alter f k m foldTM k m z = Map.foldr k z m filterTM f m = Map.filter f m {- Note [foldTM determinism] ~~~~~~~~~~~~~~~~~~~~~~~~~ We want foldTM to be deterministic, which is why we have an instance of TrieMap for UniqDFM, but not for UniqFM. Here's an example of some things that go wrong if foldTM is nondeterministic. Consider: f a b = return (a <> b) Depending on the order that the typechecker generates constraints you get either: f :: (Monad m, Monoid a) => a -> a -> m a or: f :: (Monoid a, Monad m) => a -> a -> m a The generated code will be different after desugaring as the dictionaries will be bound in different orders, leading to potential ABI incompatibility. One way to solve this would be to notice that the typeclasses could be sorted alphabetically. Unfortunately that doesn't quite work with this example: f a b = let x = a <> a; y = b <> b in x where you infer: f :: (Monoid m, Monoid m1) => m1 -> m -> m1 or: f :: (Monoid m1, Monoid m) => m1 -> m -> m1 Here you could decide to take the order of the type variables in the type according to depth first traversal and use it to order the constraints. The real trouble starts when the user enables incoherent instances and the compiler has to make an arbitrary choice. Consider: class T a b where go :: a -> b -> String instance (Show b) => T Int b where go a b = show a ++ show b instance (Show a) => T a Bool where go a b = show a ++ show b f = go 10 True GHC is free to choose either dictionary to implement f, but for the sake of determinism we'd like it to be consistent when compiling the same sources with the same flags. inert_dicts :: DictMap is implemented with a TrieMap. In getUnsolvedInerts it gets converted to a bag of (Wanted) Cts using a fold. Then in solve_simple_wanteds it's merged with other WantedConstraints. We want the conversion to a bag to be deterministic. For that purpose we use UniqDFM instead of UniqFM to implement the TrieMap. See Note [Deterministic UniqFM] in GHC.Types.Unique.DFM for more details on how it's made deterministic. -} instance forall key. Uniquable key => TrieMap (UniqDFM key) where type Key (UniqDFM key) = key emptyTM = emptyUDFM lookupTM k m = lookupUDFM m k alterTM k f m = alterUDFM f m k foldTM k m z = foldUDFM k z m filterTM f m = filterUDFM f m {- ************************************************************************ * * Maybes * * ************************************************************************ If m is a map from k -> val then (MaybeMap m) is a map from (Maybe k) -> val -} data MaybeMap m a = MM { mm_nothing :: Maybe a, mm_just :: m a } -- TODO(22292): derive instance Functor m => Functor (MaybeMap m) where fmap f MM { mm_nothing = mn, mm_just = mj } = MM { mm_nothing = fmap f mn, mm_just = fmap f mj } instance TrieMap m => TrieMap (MaybeMap m) where type Key (MaybeMap m) = Maybe (Key m) emptyTM = MM { mm_nothing = Nothing, mm_just = emptyTM } lookupTM = lkMaybe lookupTM alterTM = xtMaybe alterTM foldTM = fdMaybe filterTM = ftMaybe instance TrieMap m => Foldable (MaybeMap m) where foldMap = foldMapTM lkMaybe :: (forall b. k -> m b -> Maybe b) -> Maybe k -> MaybeMap m a -> Maybe a lkMaybe _ Nothing = mm_nothing lkMaybe lk (Just x) = mm_just >.> lk x xtMaybe :: (forall b. k -> XT b -> m b -> m b) -> Maybe k -> XT a -> MaybeMap m a -> MaybeMap m a xtMaybe _ Nothing f m = m { mm_nothing = f (mm_nothing m) } xtMaybe tr (Just x) f m = m { mm_just = mm_just m |> tr x f } fdMaybe :: TrieMap m => (a -> b -> b) -> MaybeMap m a -> b -> b fdMaybe k m = foldMaybe k (mm_nothing m) . foldTM k (mm_just m) ftMaybe :: TrieMap m => (a -> Bool) -> MaybeMap m a -> MaybeMap m a ftMaybe f (MM { mm_nothing = mn, mm_just = mj }) = MM { mm_nothing = filterMaybe f mn, mm_just = filterTM f mj } foldMaybe :: (a -> b -> b) -> Maybe a -> b -> b foldMaybe _ Nothing b = b foldMaybe k (Just a) b = k a b filterMaybe :: (a -> Bool) -> Maybe a -> Maybe a filterMaybe _ Nothing = Nothing filterMaybe f input@(Just x) | f x = input | otherwise = Nothing {- ************************************************************************ * * Lists * * ************************************************************************ -} data ListMap m a = LM { lm_nil :: Maybe a , lm_cons :: m (ListMap m a) } -- TODO(22292): derive instance Functor m => Functor (ListMap m) where fmap f LM { lm_nil = mnil, lm_cons = mcons } = LM { lm_nil = fmap f mnil, lm_cons = fmap (fmap f) mcons } instance TrieMap m => TrieMap (ListMap m) where type Key (ListMap m) = [Key m] emptyTM = LM { lm_nil = Nothing, lm_cons = emptyTM } lookupTM = lkList lookupTM alterTM = xtList alterTM foldTM = fdList filterTM = ftList instance TrieMap m => Foldable (ListMap m) where foldMap = foldMapTM instance (TrieMap m, Outputable a) => Outputable (ListMap m a) where ppr m = text "List elts" <+> ppr (foldTM (:) m []) lkList :: TrieMap m => (forall b. k -> m b -> Maybe b) -> [k] -> ListMap m a -> Maybe a lkList _ [] = lm_nil lkList lk (x:xs) = lm_cons >.> lk x >=> lkList lk xs xtList :: TrieMap m => (forall b. k -> XT b -> m b -> m b) -> [k] -> XT a -> ListMap m a -> ListMap m a xtList _ [] f m = m { lm_nil = f (lm_nil m) } xtList tr (x:xs) f m = m { lm_cons = lm_cons m |> tr x |>> xtList tr xs f } fdList :: forall m a b. TrieMap m => (a -> b -> b) -> ListMap m a -> b -> b fdList k m = foldMaybe k (lm_nil m) . foldTM (fdList k) (lm_cons m) ftList :: TrieMap m => (a -> Bool) -> ListMap m a -> ListMap m a ftList f (LM { lm_nil = mnil, lm_cons = mcons }) = LM { lm_nil = filterMaybe f mnil, lm_cons = fmap (filterTM f) mcons } {- ************************************************************************ * * GenMap * * ************************************************************************ Note [Compressed TrieMap] ~~~~~~~~~~~~~~~~~~~~~~~~~ The GenMap constructor augments TrieMaps with leaf compression. This helps solve the performance problem detailed in #9960: suppose we have a handful H of entries in a TrieMap, each with a very large key, size K. If you fold over such a TrieMap you'd expect time O(H). That would certainly be true of an association list! But with TrieMap we actually have to navigate down a long singleton structure to get to the elements, so it takes time O(K*H). This can really hurt on many type-level computation benchmarks: see for example T9872d. The point of a TrieMap is that you need to navigate to the point where only one key remains, and then things should be fast. So the point of a SingletonMap is that, once we are down to a single (key,value) pair, we stop and just use SingletonMap. 'EmptyMap' provides an even more basic (but essential) optimization: if there is nothing in the map, don't bother building out the (possibly infinite) recursive TrieMap structure! Compressed triemaps are heavily used by GHC.Core.Map.Expr. So we have to mark some things as INLINEABLE to permit specialization. -} data GenMap m a = EmptyMap | SingletonMap (Key m) a | MultiMap (m a) instance (Outputable a, Outputable (m a)) => Outputable (GenMap m a) where ppr EmptyMap = text "Empty map" ppr (SingletonMap _ v) = text "Singleton map" <+> ppr v ppr (MultiMap m) = ppr m -- TODO(22292): derive instance Functor m => Functor (GenMap m) where fmap = mapG {-# INLINE fmap #-} -- TODO undecidable instance instance (Eq (Key m), TrieMap m) => TrieMap (GenMap m) where type Key (GenMap m) = Key m emptyTM = EmptyMap lookupTM = lkG alterTM = xtG foldTM = fdG filterTM = ftG instance (Eq (Key m), TrieMap m) => Foldable (GenMap m) where foldMap = foldMapTM --We want to be able to specialize these functions when defining eg --tries over (GenMap CoreExpr) which requires INLINEABLE {-# INLINEABLE lkG #-} lkG :: (Eq (Key m), TrieMap m) => Key m -> GenMap m a -> Maybe a lkG _ EmptyMap = Nothing lkG k (SingletonMap k' v') | k == k' = Just v' | otherwise = Nothing lkG k (MultiMap m) = lookupTM k m {-# INLINEABLE xtG #-} xtG :: (Eq (Key m), TrieMap m) => Key m -> XT a -> GenMap m a -> GenMap m a xtG k f EmptyMap = case f Nothing of Just v -> SingletonMap k v Nothing -> EmptyMap xtG k f m@(SingletonMap k' v') | k' == k -- The new key matches the (single) key already in the tree. Hence, -- apply @f@ to @Just v'@ and build a singleton or empty map depending -- on the 'Just'/'Nothing' response respectively. = case f (Just v') of Just v'' -> SingletonMap k' v'' Nothing -> EmptyMap | otherwise -- We've hit a singleton tree for a different key than the one we are -- searching for. Hence apply @f@ to @Nothing@. If result is @Nothing@ then -- we can just return the old map. If not, we need a map with *two* -- entries. The easiest way to do that is to insert two items into an empty -- map of type @m a@. = case f Nothing of Nothing -> m Just v -> emptyTM |> alterTM k' (const (Just v')) >.> alterTM k (const (Just v)) >.> MultiMap xtG k f (MultiMap m) = MultiMap (alterTM k f m) {-# INLINEABLE mapG #-} mapG :: Functor m => (a -> b) -> GenMap m a -> GenMap m b mapG _ EmptyMap = EmptyMap mapG f (SingletonMap k v) = SingletonMap k (f v) mapG f (MultiMap m) = MultiMap (fmap f m) {-# INLINEABLE fdG #-} fdG :: TrieMap m => (a -> b -> b) -> GenMap m a -> b -> b fdG _ EmptyMap = \z -> z fdG k (SingletonMap _ v) = \z -> k v z fdG k (MultiMap m) = foldTM k m {-# INLINEABLE ftG #-} ftG :: TrieMap m => (a -> Bool) -> GenMap m a -> GenMap m a ftG _ EmptyMap = EmptyMap ftG f input@(SingletonMap _ v) | f v = input | otherwise = EmptyMap ftG f (MultiMap m) = MultiMap (filterTM f m) -- we don't have enough information to reconstruct the key to make -- a SingletonMap ghc-lib-parser-9.12.2.20250421/compiler/GHC/Data/Unboxed.hs0000644000000000000000000000272407346545000020524 0ustar0000000000000000-- Unboxed counterparts to data structures {-# LANGUAGE UnboxedTuples #-} {-# LANGUAGE PatternSynonyms #-} {-# LANGUAGE UnliftedNewtypes #-} module GHC.Data.Unboxed ( MaybeUB(JustUB, NothingUB), fmapMaybeUB, fromMaybeUB, apMaybeUB, maybeUB ) where import GHC.Prelude hiding (Maybe(..), Either(..)) -- | Like Maybe, but using unboxed sums. -- -- Use with care. Using a unboxed maybe is not always a win -- in execution *time* even when allocations go down. So make -- sure to benchmark for execution time as well. If the difference -- in *runtime* for the compiler is too small to measure it's likely -- better to use a regular Maybe instead. -- -- This is since it causes more function arguments to be passed, and -- potentially more variables to be captured by closures increasing -- closure size. newtype MaybeUB a = MaybeUB (# (# #) | a #) pattern JustUB :: a -> MaybeUB a pattern JustUB x = MaybeUB (# | x #) pattern NothingUB :: MaybeUB a pattern NothingUB = MaybeUB (# (# #) | #) {-# COMPLETE NothingUB, JustUB #-} fromMaybeUB :: a -> MaybeUB a -> a fromMaybeUB d NothingUB = d fromMaybeUB _ (JustUB x) = x apMaybeUB :: MaybeUB (a -> b) -> MaybeUB a -> MaybeUB b apMaybeUB (JustUB f) (JustUB x) = JustUB (f x) apMaybeUB _ _ = NothingUB fmapMaybeUB :: (a -> b) -> MaybeUB a -> MaybeUB b fmapMaybeUB _f NothingUB = NothingUB fmapMaybeUB f (JustUB x) = JustUB $ f x maybeUB :: b -> (a -> b) -> MaybeUB a -> b maybeUB _def f (JustUB x) = f x maybeUB def _f NothingUB = def ghc-lib-parser-9.12.2.20250421/compiler/GHC/Data/Word64Map.hs0000644000000000000000000000440107346545000020635 0ustar0000000000000000{-# LANGUAGE CPP #-} {-# LANGUAGE DataKinds #-} {-# LANGUAGE MonoLocalBinds #-} ----------------------------------------------------------------------------- -- | -- Module : Data.Word64Map -- Copyright : (c) Daan Leijen 2002 -- (c) Andriy Palamarchuk 2008 -- License : BSD-style -- Maintainer : libraries@haskell.org -- Portability : portable -- -- An efficient implementation of maps from integer keys to values -- (dictionaries). -- -- This module re-exports the value lazy "Data.Word64Map.Lazy" API, plus -- several deprecated value strict functions. Please note that these functions -- have different strictness properties than those in "Data.Word64Map.Strict": -- they only evaluate the result of the combining function. For example, the -- default value to 'insertWith'' is only evaluated if the combining function -- is called and uses it. -- -- These modules are intended to be imported qualified, to avoid name -- clashes with Prelude functions, e.g. -- -- > import Data.Word64Map (Word64Map) -- > import qualified Data.Word64Map as Word64Map -- -- The implementation is based on /big-endian patricia trees/. This data -- structure performs especially well on binary operations like 'union' -- and 'intersection'. However, my benchmarks show that it is also -- (much) faster on insertions and deletions when compared to a generic -- size-balanced map implementation (see "Data.Map"). -- -- * Chris Okasaki and Andy Gill, \"/Fast Mergeable Integer Maps/\", -- Workshop on ML, September 1998, pages 77-86, -- -- -- * D.R. Morrison, \"/PATRICIA -- Practical Algorithm To Retrieve Information Coded In Alphanumeric/\", -- Journal of the ACM, 15(4), October 1968, pages 514-534. -- -- Operation comments contain the operation time complexity in -- the Big-O notation . -- Many operations have a worst-case complexity of \(O(\min(n,64))\). -- This means that the operation can become linear in the number of -- elements with a maximum of \(64\) ----------------------------------------------------------------------------- module GHC.Data.Word64Map ( module GHC.Data.Word64Map.Lazy ) where import GHC.Data.Word64Map.Lazy ghc-lib-parser-9.12.2.20250421/compiler/GHC/Data/Word64Map/0000755000000000000000000000000007346545000020302 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Data/Word64Map/Internal.hs0000644000000000000000000037024307346545000022423 0ustar0000000000000000{-# LANGUAGE CPP #-} {-# LANGUAGE MagicHash #-} {-# LANGUAGE TypeFamilies #-} {-# OPTIONS_HADDOCK not-home #-} {-# OPTIONS_GHC -fno-warn-incomplete-uni-patterns #-} ----------------------------------------------------------------------------- -- | -- Module : Data.Word64Map.Internal -- Copyright : (c) Daan Leijen 2002 -- (c) Andriy Palamarchuk 2008 -- (c) wren romano 2016 -- License : BSD-style -- Maintainer : libraries@haskell.org -- Portability : portable -- -- = WARNING -- -- This module is considered __internal__. -- -- The Package Versioning Policy __does not apply__. -- -- The contents of this module may change __in any way whatsoever__ -- and __without any warning__ between minor versions of this package. -- -- Authors importing this module are expected to track development -- closely. -- -- = Description -- -- This defines the data structures and core (hidden) manipulations -- on representations. -- -- @since 0.5.9 ----------------------------------------------------------------------------- -- [Note: INLINE bit fiddling] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- It is essential that the bit fiddling functions like mask, zero, branchMask -- etc are inlined. If they do not, the memory allocation skyrockets. The GHC -- usually gets it right, but it is disastrous if it does not. Therefore we -- explicitly mark these functions INLINE. -- [Note: Local 'go' functions and capturing] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- Care must be taken when using 'go' function which captures an argument. -- Sometimes (for example when the argument is passed to a data constructor, -- as in insert), GHC heap-allocates more than necessary. Therefore C-- code -- must be checked for increased allocation when creating and modifying such -- functions. -- [Note: Order of constructors] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- The order of constructors of Word64Map matters when considering performance. -- Currently in GHC 7.0, when type has 3 constructors, they are matched from -- the first to the last -- the best performance is achieved when the -- constructors are ordered by frequency. -- On GHC 7.0, reordering constructors from Nil | Tip | Bin to Bin | Tip | Nil -- improves the benchmark by circa 10%. -- module GHC.Data.Word64Map.Internal ( -- * Map type Word64Map(..), Key -- instance Eq,Show -- * Operators , (!), (!?), (\\) -- * Query , null , size , member , notMember , lookup , findWithDefault , lookupLT , lookupGT , lookupLE , lookupGE , disjoint -- * Construction , empty , singleton -- ** Insertion , insert , insertWith , insertWithKey , insertLookupWithKey -- ** Delete\/Update , delete , adjust , adjustWithKey , update , updateWithKey , updateLookupWithKey , alter , alterF -- * Combine -- ** Union , union , unionWith , unionWithKey , unions , unionsWith -- ** Difference , difference , differenceWith , differenceWithKey -- ** Intersection , intersection , intersectionWith , intersectionWithKey -- ** Compose , compose -- ** General combining function , SimpleWhenMissing , SimpleWhenMatched , runWhenMatched , runWhenMissing , merge -- *** @WhenMatched@ tactics , zipWithMaybeMatched , zipWithMatched -- *** @WhenMissing@ tactics , mapMaybeMissing , dropMissing , preserveMissing , mapMissing , filterMissing -- ** Applicative general combining function , WhenMissing (..) , WhenMatched (..) , mergeA -- *** @WhenMatched@ tactics -- | The tactics described for 'merge' work for -- 'mergeA' as well. Furthermore, the following -- are available. , zipWithMaybeAMatched , zipWithAMatched -- *** @WhenMissing@ tactics -- | The tactics described for 'merge' work for -- 'mergeA' as well. Furthermore, the following -- are available. , traverseMaybeMissing , traverseMissing , filterAMissing -- ** Deprecated general combining function , mergeWithKey , mergeWithKey' -- * Traversal -- ** Map , map , mapWithKey , traverseWithKey , traverseMaybeWithKey , mapAccum , mapAccumWithKey , mapAccumRWithKey , mapKeys , mapKeysWith , mapKeysMonotonic -- * Folds , foldr , foldl , foldrWithKey , foldlWithKey , foldMapWithKey -- ** Strict folds , foldr' , foldl' , foldrWithKey' , foldlWithKey' -- * Conversion , elems , keys , assocs , keysSet , fromSet -- ** Lists , toList , fromList , fromListWith , fromListWithKey -- ** Ordered lists , toAscList , toDescList , fromAscList , fromAscListWith , fromAscListWithKey , fromDistinctAscList -- * Filter , filter , filterWithKey , restrictKeys , withoutKeys , partition , partitionWithKey , takeWhileAntitone , dropWhileAntitone , spanAntitone , mapMaybe , mapMaybeWithKey , mapEither , mapEitherWithKey , split , splitLookup , splitRoot -- * Submap , isSubmapOf, isSubmapOfBy , isProperSubmapOf, isProperSubmapOfBy -- * Min\/Max , lookupMin , lookupMax , findMin , findMax , deleteMin , deleteMax , deleteFindMin , deleteFindMax , updateMin , updateMax , updateMinWithKey , updateMaxWithKey , minView , maxView , minViewWithKey , maxViewWithKey -- * Debugging , showTree , showTreeWith -- * Internal types , Mask, Prefix, Nat -- * Utility , natFromInt , intFromNat , link , linkWithMask , bin , binCheckLeft , binCheckRight , zero , nomatch , match , mask , maskW , shorter , branchMask , highestBitMask -- * Used by "Word64Map.Merge.Lazy" and "Word64Map.Merge.Strict" , mapWhenMissing , mapWhenMatched , lmapWhenMissing , contramapFirstWhenMatched , contramapSecondWhenMatched , mapGentlyWhenMissing , mapGentlyWhenMatched ) where import GHC.Prelude.Basic hiding (lookup, filter, foldr, foldl, foldl', null, map) import Data.Functor.Identity (Identity (..)) import Data.Semigroup (Semigroup(stimes,(<>)),stimesIdempotentMonoid) import Data.Functor.Classes import Control.DeepSeq (NFData(rnf)) import qualified Data.Foldable as Foldable import Data.Maybe (fromMaybe) import GHC.Data.Word64Set.Internal (Key) import qualified GHC.Data.Word64Set.Internal as Word64Set import GHC.Utils.Containers.Internal.BitUtil import GHC.Utils.Containers.Internal.StrictPair #ifdef __GLASGOW_HASKELL__ import Data.Coerce import Data.Data (Data(..), Constr, mkConstr, constrIndex, Fixity(Prefix), DataType, mkDataType, gcast1) import GHC.Exts (build) import qualified GHC.Exts as GHCExts import Text.Read #endif import qualified Control.Category as Category import Data.Word -- A "Nat" is a 64 bit machine word (an unsigned Int64) type Nat = Word64 natFromInt :: Key -> Nat natFromInt = id {-# INLINE natFromInt #-} intFromNat :: Nat -> Key intFromNat = id {-# INLINE intFromNat #-} {-------------------------------------------------------------------- Types --------------------------------------------------------------------} -- | A map of integers to values @a@. -- See Note: Order of constructors data Word64Map a = Bin {-# UNPACK #-} !Prefix {-# UNPACK #-} !Mask !(Word64Map a) !(Word64Map a) -- Fields: -- prefix: The most significant bits shared by all keys in this Bin. -- mask: The switching bit to determine if a key should follow the left -- or right subtree of a 'Bin'. -- Invariant: Nil is never found as a child of Bin. -- Invariant: The Mask is a power of 2. It is the largest bit position at which -- two keys of the map differ. -- Invariant: Prefix is the common high-order bits that all elements share to -- the left of the Mask bit. -- Invariant: In (Bin prefix mask left right), left consists of the elements that -- don't have the mask bit set; right is all the elements that do. | Tip {-# UNPACK #-} !Key a | Nil type Prefix = Word64 type Mask = Word64 -- Some stuff from "Data.Word64Set.Internal", for 'restrictKeys' and -- 'withoutKeys' to use. type Word64SetPrefix = Word64 type Word64SetBitMap = Word64 bitmapOf :: Word64 -> Word64SetBitMap bitmapOf x = shiftLL 1 (fromIntegral (x .&. Word64Set.suffixBitMask)) {-# INLINE bitmapOf #-} {-------------------------------------------------------------------- Operators --------------------------------------------------------------------} -- | \(O(\min(n,W))\). Find the value at a key. -- Calls 'error' when the element can not be found. -- -- > fromList [(5,'a'), (3,'b')] ! 1 Error: element not in the map -- > fromList [(5,'a'), (3,'b')] ! 5 == 'a' (!) :: Word64Map a -> Key -> a (!) m k = find k m -- | \(O(\min(n,W))\). Find the value at a key. -- Returns 'Nothing' when the element can not be found. -- -- > fromList [(5,'a'), (3,'b')] !? 1 == Nothing -- > fromList [(5,'a'), (3,'b')] !? 5 == Just 'a' -- -- @since 0.5.11 (!?) :: Word64Map a -> Key -> Maybe a (!?) m k = lookup k m -- | Same as 'difference'. (\\) :: Word64Map a -> Word64Map b -> Word64Map a m1 \\ m2 = difference m1 m2 infixl 9 !?,\\{-This comment teaches CPP correct behaviour -} {-------------------------------------------------------------------- Types --------------------------------------------------------------------} instance Monoid (Word64Map a) where mempty = empty mconcat = unions mappend = (<>) -- | @since 0.5.7 instance Semigroup (Word64Map a) where (<>) = union stimes = stimesIdempotentMonoid -- | Folds in order of increasing key. instance Foldable.Foldable Word64Map where fold = go where go Nil = mempty go (Tip _ v) = v go (Bin _ m l r) | m < 0 = go r `mappend` go l | otherwise = go l `mappend` go r {-# INLINABLE fold #-} foldr = foldr {-# INLINE foldr #-} foldl = foldl {-# INLINE foldl #-} foldMap f t = go t where go Nil = mempty go (Tip _ v) = f v go (Bin _ m l r) | m < 0 = go r `mappend` go l | otherwise = go l `mappend` go r {-# INLINE foldMap #-} foldl' = foldl' {-# INLINE foldl' #-} foldr' = foldr' {-# INLINE foldr' #-} length = size {-# INLINE length #-} null = null {-# INLINE null #-} toList = elems -- NB: Foldable.toList /= Word64Map.toList {-# INLINE toList #-} elem = go where go !_ Nil = False go x (Tip _ y) = x == y go x (Bin _ _ l r) = go x l || go x r {-# INLINABLE elem #-} maximum = start where start Nil = error "Data.Foldable.maximum (for Data.Word64Map): empty map" start (Tip _ y) = y start (Bin _ m l r) | m < 0 = go (start r) l | otherwise = go (start l) r go !m Nil = m go m (Tip _ y) = max m y go m (Bin _ _ l r) = go (go m l) r {-# INLINABLE maximum #-} minimum = start where start Nil = error "Data.Foldable.minimum (for Data.Word64Map): empty map" start (Tip _ y) = y start (Bin _ m l r) | m < 0 = go (start r) l | otherwise = go (start l) r go !m Nil = m go m (Tip _ y) = min m y go m (Bin _ _ l r) = go (go m l) r {-# INLINABLE minimum #-} sum = foldl' (+) 0 {-# INLINABLE sum #-} product = foldl' (*) 1 {-# INLINABLE product #-} -- | Traverses in order of increasing key. instance Traversable Word64Map where traverse f = traverseWithKey (\_ -> f) {-# INLINE traverse #-} instance NFData a => NFData (Word64Map a) where rnf Nil = () rnf (Tip _ v) = rnf v rnf (Bin _ _ l r) = rnf l `seq` rnf r #if __GLASGOW_HASKELL__ {-------------------------------------------------------------------- A Data instance --------------------------------------------------------------------} -- This instance preserves data abstraction at the cost of inefficiency. -- We provide limited reflection services for the sake of data abstraction. instance Data a => Data (Word64Map a) where gfoldl f z im = z fromList `f` (toList im) toConstr _ = fromListConstr gunfold k z c = case constrIndex c of 1 -> k (z fromList) _ -> error "gunfold" dataTypeOf _ = intMapDataType dataCast1 f = gcast1 f fromListConstr :: Constr fromListConstr = mkConstr intMapDataType "fromList" [] Prefix intMapDataType :: DataType intMapDataType = mkDataType "Data.Word64Map.Internal.Word64Map" [fromListConstr] #endif {-------------------------------------------------------------------- Query --------------------------------------------------------------------} -- | \(O(1)\). Is the map empty? -- -- > Data.Word64Map.null (empty) == True -- > Data.Word64Map.null (singleton 1 'a') == False null :: Word64Map a -> Bool null Nil = True null _ = False {-# INLINE null #-} -- | \(O(n)\). Number of elements in the map. -- -- > size empty == 0 -- > size (singleton 1 'a') == 1 -- > size (fromList([(1,'a'), (2,'c'), (3,'b')])) == 3 size :: Word64Map a -> Int size = go 0 where go !acc (Bin _ _ l r) = go (go acc l) r go acc (Tip _ _) = 1 + acc go acc Nil = acc -- | \(O(\min(n,W))\). Is the key a member of the map? -- -- > member 5 (fromList [(5,'a'), (3,'b')]) == True -- > member 1 (fromList [(5,'a'), (3,'b')]) == False -- See Note: Local 'go' functions and capturing] member :: Key -> Word64Map a -> Bool member !k = go where go (Bin p m l r) | nomatch k p m = False | zero k m = go l | otherwise = go r go (Tip kx _) = k == kx go Nil = False -- | \(O(\min(n,W))\). Is the key not a member of the map? -- -- > notMember 5 (fromList [(5,'a'), (3,'b')]) == False -- > notMember 1 (fromList [(5,'a'), (3,'b')]) == True notMember :: Key -> Word64Map a -> Bool notMember k m = not $ member k m -- | \(O(\min(n,W))\). Lookup the value at a key in the map. See also 'Data.Map.lookup'. -- See Note: Local 'go' functions and capturing lookup :: Key -> Word64Map a -> Maybe a lookup !k = go where go (Bin _p m l r) | zero k m = go l | otherwise = go r go (Tip kx x) | k == kx = Just x | otherwise = Nothing go Nil = Nothing -- See Note: Local 'go' functions and capturing] find :: Key -> Word64Map a -> a find !k = go where go (Bin _p m l r) | zero k m = go l | otherwise = go r go (Tip kx x) | k == kx = x | otherwise = not_found go Nil = not_found not_found = error ("Word64Map.!: key " ++ show k ++ " is not an element of the map") -- | \(O(\min(n,W))\). The expression @('findWithDefault' def k map)@ -- returns the value at key @k@ or returns @def@ when the key is not an -- element of the map. -- -- > findWithDefault 'x' 1 (fromList [(5,'a'), (3,'b')]) == 'x' -- > findWithDefault 'x' 5 (fromList [(5,'a'), (3,'b')]) == 'a' -- See Note: Local 'go' functions and capturing] findWithDefault :: a -> Key -> Word64Map a -> a findWithDefault def !k = go where go (Bin p m l r) | nomatch k p m = def | zero k m = go l | otherwise = go r go (Tip kx x) | k == kx = x | otherwise = def go Nil = def -- | \(O(\min(n,W))\). Find largest key smaller than the given one and return the -- corresponding (key, value) pair. -- -- > lookupLT 3 (fromList [(3,'a'), (5,'b')]) == Nothing -- > lookupLT 4 (fromList [(3,'a'), (5,'b')]) == Just (3, 'a') -- See Note: Local 'go' functions and capturing. lookupLT :: Key -> Word64Map a -> Maybe (Key, a) lookupLT !k t = case t of Bin _ m l r | m < 0 -> if k >= 0 then go r l else go Nil r _ -> go Nil t where go def (Bin p m l r) | nomatch k p m = if k < p then unsafeFindMax def else unsafeFindMax r | zero k m = go def l | otherwise = go l r go def (Tip ky y) | k <= ky = unsafeFindMax def | otherwise = Just (ky, y) go def Nil = unsafeFindMax def -- | \(O(\min(n,W))\). Find smallest key greater than the given one and return the -- corresponding (key, value) pair. -- -- > lookupGT 4 (fromList [(3,'a'), (5,'b')]) == Just (5, 'b') -- > lookupGT 5 (fromList [(3,'a'), (5,'b')]) == Nothing -- See Note: Local 'go' functions and capturing. lookupGT :: Key -> Word64Map a -> Maybe (Key, a) lookupGT !k t = case t of Bin _ m l r | m < 0 -> if k >= 0 then go Nil l else go l r _ -> go Nil t where go def (Bin p m l r) | nomatch k p m = if k < p then unsafeFindMin l else unsafeFindMin def | zero k m = go r l | otherwise = go def r go def (Tip ky y) | k >= ky = unsafeFindMin def | otherwise = Just (ky, y) go def Nil = unsafeFindMin def -- | \(O(\min(n,W))\). Find largest key smaller or equal to the given one and return -- the corresponding (key, value) pair. -- -- > lookupLE 2 (fromList [(3,'a'), (5,'b')]) == Nothing -- > lookupLE 4 (fromList [(3,'a'), (5,'b')]) == Just (3, 'a') -- > lookupLE 5 (fromList [(3,'a'), (5,'b')]) == Just (5, 'b') -- See Note: Local 'go' functions and capturing. lookupLE :: Key -> Word64Map a -> Maybe (Key, a) lookupLE !k t = case t of Bin _ m l r | m < 0 -> if k >= 0 then go r l else go Nil r _ -> go Nil t where go def (Bin p m l r) | nomatch k p m = if k < p then unsafeFindMax def else unsafeFindMax r | zero k m = go def l | otherwise = go l r go def (Tip ky y) | k < ky = unsafeFindMax def | otherwise = Just (ky, y) go def Nil = unsafeFindMax def -- | \(O(\min(n,W))\). Find smallest key greater or equal to the given one and return -- the corresponding (key, value) pair. -- -- > lookupGE 3 (fromList [(3,'a'), (5,'b')]) == Just (3, 'a') -- > lookupGE 4 (fromList [(3,'a'), (5,'b')]) == Just (5, 'b') -- > lookupGE 6 (fromList [(3,'a'), (5,'b')]) == Nothing -- See Note: Local 'go' functions and capturing. lookupGE :: Key -> Word64Map a -> Maybe (Key, a) lookupGE !k t = case t of Bin _ m l r | m < 0 -> if k >= 0 then go Nil l else go l r _ -> go Nil t where go def (Bin p m l r) | nomatch k p m = if k < p then unsafeFindMin l else unsafeFindMin def | zero k m = go r l | otherwise = go def r go def (Tip ky y) | k > ky = unsafeFindMin def | otherwise = Just (ky, y) go def Nil = unsafeFindMin def -- Helper function for lookupGE and lookupGT. It assumes that if a Bin node is -- given, it has m > 0. unsafeFindMin :: Word64Map a -> Maybe (Key, a) unsafeFindMin Nil = Nothing unsafeFindMin (Tip ky y) = Just (ky, y) unsafeFindMin (Bin _ _ l _) = unsafeFindMin l -- Helper function for lookupLE and lookupLT. It assumes that if a Bin node is -- given, it has m > 0. unsafeFindMax :: Word64Map a -> Maybe (Key, a) unsafeFindMax Nil = Nothing unsafeFindMax (Tip ky y) = Just (ky, y) unsafeFindMax (Bin _ _ _ r) = unsafeFindMax r {-------------------------------------------------------------------- Disjoint --------------------------------------------------------------------} -- | \(O(n+m)\). Check whether the key sets of two maps are disjoint -- (i.e. their 'intersection' is empty). -- -- > disjoint (fromList [(2,'a')]) (fromList [(1,()), (3,())]) == True -- > disjoint (fromList [(2,'a')]) (fromList [(1,'a'), (2,'b')]) == False -- > disjoint (fromList []) (fromList []) == True -- -- > disjoint a b == null (intersection a b) -- -- @since 0.6.2.1 disjoint :: Word64Map a -> Word64Map b -> Bool disjoint Nil _ = True disjoint _ Nil = True disjoint (Tip kx _) ys = notMember kx ys disjoint xs (Tip ky _) = notMember ky xs disjoint t1@(Bin p1 m1 l1 r1) t2@(Bin p2 m2 l2 r2) | shorter m1 m2 = disjoint1 | shorter m2 m1 = disjoint2 | p1 == p2 = disjoint l1 l2 && disjoint r1 r2 | otherwise = True where disjoint1 | nomatch p2 p1 m1 = True | zero p2 m1 = disjoint l1 t2 | otherwise = disjoint r1 t2 disjoint2 | nomatch p1 p2 m2 = True | zero p1 m2 = disjoint t1 l2 | otherwise = disjoint t1 r2 {-------------------------------------------------------------------- Compose --------------------------------------------------------------------} -- | Relate the keys of one map to the values of -- the other, by using the values of the former as keys for lookups -- in the latter. -- -- Complexity: \( O(n * \min(m,W)) \), where \(m\) is the size of the first argument -- -- > compose (fromList [('a', "A"), ('b', "B")]) (fromList [(1,'a'),(2,'b'),(3,'z')]) = fromList [(1,"A"),(2,"B")] -- -- @ -- ('compose' bc ab '!?') = (bc '!?') <=< (ab '!?') -- @ -- -- __Note:__ Prior to v0.6.4, "Data.Word64Map.Strict" exposed a version of -- 'compose' that forced the values of the output 'Word64Map'. This version does -- not force these values. -- -- @since 0.6.3.1 compose :: Word64Map c -> Word64Map Word64 -> Word64Map c compose bc !ab | null bc = empty | otherwise = mapMaybe (bc !?) ab {-------------------------------------------------------------------- Construction --------------------------------------------------------------------} -- | \(O(1)\). The empty map. -- -- > empty == fromList [] -- > size empty == 0 empty :: Word64Map a empty = Nil {-# INLINE empty #-} -- | \(O(1)\). A map of one element. -- -- > singleton 1 'a' == fromList [(1, 'a')] -- > size (singleton 1 'a') == 1 singleton :: Key -> a -> Word64Map a singleton k x = Tip k x {-# INLINE singleton #-} {-------------------------------------------------------------------- Insert --------------------------------------------------------------------} -- | \(O(\min(n,W))\). Insert a new key\/value pair in the map. -- If the key is already present in the map, the associated value is -- replaced with the supplied value, i.e. 'insert' is equivalent to -- @'insertWith' 'const'@. -- -- > insert 5 'x' (fromList [(5,'a'), (3,'b')]) == fromList [(3, 'b'), (5, 'x')] -- > insert 7 'x' (fromList [(5,'a'), (3,'b')]) == fromList [(3, 'b'), (5, 'a'), (7, 'x')] -- > insert 5 'x' empty == singleton 5 'x' insert :: Key -> a -> Word64Map a -> Word64Map a insert !k x t@(Bin p m l r) | nomatch k p m = link k (Tip k x) p t | zero k m = Bin p m (insert k x l) r | otherwise = Bin p m l (insert k x r) insert k x t@(Tip ky _) | k==ky = Tip k x | otherwise = link k (Tip k x) ky t insert k x Nil = Tip k x -- right-biased insertion, used by 'union' -- | \(O(\min(n,W))\). Insert with a combining function. -- @'insertWith' f key value mp@ -- will insert the pair (key, value) into @mp@ if key does -- not exist in the map. If the key does exist, the function will -- insert @f new_value old_value@. -- -- > insertWith (++) 5 "xxx" (fromList [(5,"a"), (3,"b")]) == fromList [(3, "b"), (5, "xxxa")] -- > insertWith (++) 7 "xxx" (fromList [(5,"a"), (3,"b")]) == fromList [(3, "b"), (5, "a"), (7, "xxx")] -- > insertWith (++) 5 "xxx" empty == singleton 5 "xxx" insertWith :: (a -> a -> a) -> Key -> a -> Word64Map a -> Word64Map a insertWith f k x t = insertWithKey (\_ x' y' -> f x' y') k x t -- | \(O(\min(n,W))\). Insert with a combining function. -- @'insertWithKey' f key value mp@ -- will insert the pair (key, value) into @mp@ if key does -- not exist in the map. If the key does exist, the function will -- insert @f key new_value old_value@. -- -- > let f key new_value old_value = (show key) ++ ":" ++ new_value ++ "|" ++ old_value -- > insertWithKey f 5 "xxx" (fromList [(5,"a"), (3,"b")]) == fromList [(3, "b"), (5, "5:xxx|a")] -- > insertWithKey f 7 "xxx" (fromList [(5,"a"), (3,"b")]) == fromList [(3, "b"), (5, "a"), (7, "xxx")] -- > insertWithKey f 5 "xxx" empty == singleton 5 "xxx" insertWithKey :: (Key -> a -> a -> a) -> Key -> a -> Word64Map a -> Word64Map a insertWithKey f !k x t@(Bin p m l r) | nomatch k p m = link k (Tip k x) p t | zero k m = Bin p m (insertWithKey f k x l) r | otherwise = Bin p m l (insertWithKey f k x r) insertWithKey f k x t@(Tip ky y) | k == ky = Tip k (f k x y) | otherwise = link k (Tip k x) ky t insertWithKey _ k x Nil = Tip k x -- | \(O(\min(n,W))\). The expression (@'insertLookupWithKey' f k x map@) -- is a pair where the first element is equal to (@'lookup' k map@) -- and the second element equal to (@'insertWithKey' f k x map@). -- -- > let f key new_value old_value = (show key) ++ ":" ++ new_value ++ "|" ++ old_value -- > insertLookupWithKey f 5 "xxx" (fromList [(5,"a"), (3,"b")]) == (Just "a", fromList [(3, "b"), (5, "5:xxx|a")]) -- > insertLookupWithKey f 7 "xxx" (fromList [(5,"a"), (3,"b")]) == (Nothing, fromList [(3, "b"), (5, "a"), (7, "xxx")]) -- > insertLookupWithKey f 5 "xxx" empty == (Nothing, singleton 5 "xxx") -- -- This is how to define @insertLookup@ using @insertLookupWithKey@: -- -- > let insertLookup kx x t = insertLookupWithKey (\_ a _ -> a) kx x t -- > insertLookup 5 "x" (fromList [(5,"a"), (3,"b")]) == (Just "a", fromList [(3, "b"), (5, "x")]) -- > insertLookup 7 "x" (fromList [(5,"a"), (3,"b")]) == (Nothing, fromList [(3, "b"), (5, "a"), (7, "x")]) insertLookupWithKey :: (Key -> a -> a -> a) -> Key -> a -> Word64Map a -> (Maybe a, Word64Map a) insertLookupWithKey f !k x t@(Bin p m l r) | nomatch k p m = (Nothing,link k (Tip k x) p t) | zero k m = let (found,l') = insertLookupWithKey f k x l in (found,Bin p m l' r) | otherwise = let (found,r') = insertLookupWithKey f k x r in (found,Bin p m l r') insertLookupWithKey f k x t@(Tip ky y) | k == ky = (Just y,Tip k (f k x y)) | otherwise = (Nothing,link k (Tip k x) ky t) insertLookupWithKey _ k x Nil = (Nothing,Tip k x) {-------------------------------------------------------------------- Deletion --------------------------------------------------------------------} -- | \(O(\min(n,W))\). Delete a key and its value from the map. When the key is not -- a member of the map, the original map is returned. -- -- > delete 5 (fromList [(5,"a"), (3,"b")]) == singleton 3 "b" -- > delete 7 (fromList [(5,"a"), (3,"b")]) == fromList [(3, "b"), (5, "a")] -- > delete 5 empty == empty delete :: Key -> Word64Map a -> Word64Map a delete !k t@(Bin p m l r) | nomatch k p m = t | zero k m = binCheckLeft p m (delete k l) r | otherwise = binCheckRight p m l (delete k r) delete k t@(Tip ky _) | k == ky = Nil | otherwise = t delete _k Nil = Nil -- | \(O(\min(n,W))\). Adjust a value at a specific key. When the key is not -- a member of the map, the original map is returned. -- -- > adjust ("new " ++) 5 (fromList [(5,"a"), (3,"b")]) == fromList [(3, "b"), (5, "new a")] -- > adjust ("new " ++) 7 (fromList [(5,"a"), (3,"b")]) == fromList [(3, "b"), (5, "a")] -- > adjust ("new " ++) 7 empty == empty adjust :: (a -> a) -> Key -> Word64Map a -> Word64Map a adjust f k m = adjustWithKey (\_ x -> f x) k m -- | \(O(\min(n,W))\). Adjust a value at a specific key. When the key is not -- a member of the map, the original map is returned. -- -- > let f key x = (show key) ++ ":new " ++ x -- > adjustWithKey f 5 (fromList [(5,"a"), (3,"b")]) == fromList [(3, "b"), (5, "5:new a")] -- > adjustWithKey f 7 (fromList [(5,"a"), (3,"b")]) == fromList [(3, "b"), (5, "a")] -- > adjustWithKey f 7 empty == empty adjustWithKey :: (Key -> a -> a) -> Key -> Word64Map a -> Word64Map a adjustWithKey f !k (Bin p m l r) | zero k m = Bin p m (adjustWithKey f k l) r | otherwise = Bin p m l (adjustWithKey f k r) adjustWithKey f k t@(Tip ky y) | k == ky = Tip ky (f k y) | otherwise = t adjustWithKey _ _ Nil = Nil -- | \(O(\min(n,W))\). The expression (@'update' f k map@) updates the value @x@ -- at @k@ (if it is in the map). If (@f x@) is 'Nothing', the element is -- deleted. If it is (@'Just' y@), the key @k@ is bound to the new value @y@. -- -- > let f x = if x == "a" then Just "new a" else Nothing -- > update f 5 (fromList [(5,"a"), (3,"b")]) == fromList [(3, "b"), (5, "new a")] -- > update f 7 (fromList [(5,"a"), (3,"b")]) == fromList [(3, "b"), (5, "a")] -- > update f 3 (fromList [(5,"a"), (3,"b")]) == singleton 5 "a" update :: (a -> Maybe a) -> Key -> Word64Map a -> Word64Map a update f = updateWithKey (\_ x -> f x) -- | \(O(\min(n,W))\). The expression (@'update' f k map@) updates the value @x@ -- at @k@ (if it is in the map). If (@f k x@) is 'Nothing', the element is -- deleted. If it is (@'Just' y@), the key @k@ is bound to the new value @y@. -- -- > let f k x = if x == "a" then Just ((show k) ++ ":new a") else Nothing -- > updateWithKey f 5 (fromList [(5,"a"), (3,"b")]) == fromList [(3, "b"), (5, "5:new a")] -- > updateWithKey f 7 (fromList [(5,"a"), (3,"b")]) == fromList [(3, "b"), (5, "a")] -- > updateWithKey f 3 (fromList [(5,"a"), (3,"b")]) == singleton 5 "a" updateWithKey :: (Key -> a -> Maybe a) -> Key -> Word64Map a -> Word64Map a updateWithKey f !k (Bin p m l r) | zero k m = binCheckLeft p m (updateWithKey f k l) r | otherwise = binCheckRight p m l (updateWithKey f k r) updateWithKey f k t@(Tip ky y) | k == ky = case (f k y) of Just y' -> Tip ky y' Nothing -> Nil | otherwise = t updateWithKey _ _ Nil = Nil -- | \(O(\min(n,W))\). Lookup and update. -- The function returns original value, if it is updated. -- This is different behavior than 'Data.Map.updateLookupWithKey'. -- Returns the original key value if the map entry is deleted. -- -- > let f k x = if x == "a" then Just ((show k) ++ ":new a") else Nothing -- > updateLookupWithKey f 5 (fromList [(5,"a"), (3,"b")]) == (Just "a", fromList [(3, "b"), (5, "5:new a")]) -- > updateLookupWithKey f 7 (fromList [(5,"a"), (3,"b")]) == (Nothing, fromList [(3, "b"), (5, "a")]) -- > updateLookupWithKey f 3 (fromList [(5,"a"), (3,"b")]) == (Just "b", singleton 5 "a") updateLookupWithKey :: (Key -> a -> Maybe a) -> Key -> Word64Map a -> (Maybe a,Word64Map a) updateLookupWithKey f !k (Bin p m l r) | zero k m = let !(found,l') = updateLookupWithKey f k l in (found,binCheckLeft p m l' r) | otherwise = let !(found,r') = updateLookupWithKey f k r in (found,binCheckRight p m l r') updateLookupWithKey f k t@(Tip ky y) | k==ky = case (f k y) of Just y' -> (Just y,Tip ky y') Nothing -> (Just y,Nil) | otherwise = (Nothing,t) updateLookupWithKey _ _ Nil = (Nothing,Nil) -- | \(O(\min(n,W))\). The expression (@'alter' f k map@) alters the value @x@ at @k@, or absence thereof. -- 'alter' can be used to insert, delete, or update a value in an 'Word64Map'. -- In short : @'lookup' k ('alter' f k m) = f ('lookup' k m)@. alter :: (Maybe a -> Maybe a) -> Key -> Word64Map a -> Word64Map a alter f !k t@(Bin p m l r) | nomatch k p m = case f Nothing of Nothing -> t Just x -> link k (Tip k x) p t | zero k m = binCheckLeft p m (alter f k l) r | otherwise = binCheckRight p m l (alter f k r) alter f k t@(Tip ky y) | k==ky = case f (Just y) of Just x -> Tip ky x Nothing -> Nil | otherwise = case f Nothing of Just x -> link k (Tip k x) ky t Nothing -> Tip ky y alter f k Nil = case f Nothing of Just x -> Tip k x Nothing -> Nil -- | \(O(\min(n,W))\). The expression (@'alterF' f k map@) alters the value @x@ at -- @k@, or absence thereof. 'alterF' can be used to inspect, insert, delete, -- or update a value in an 'Word64Map'. In short : @'lookup' k <$> 'alterF' f k m = f -- ('lookup' k m)@. -- -- Example: -- -- @ -- interactiveAlter :: Int -> Word64Map String -> IO (Word64Map String) -- interactiveAlter k m = alterF f k m where -- f Nothing = do -- putStrLn $ show k ++ -- " was not found in the map. Would you like to add it?" -- getUserResponse1 :: IO (Maybe String) -- f (Just old) = do -- putStrLn $ "The key is currently bound to " ++ show old ++ -- ". Would you like to change or delete it?" -- getUserResponse2 :: IO (Maybe String) -- @ -- -- 'alterF' is the most general operation for working with an individual -- key that may or may not be in a given map. -- -- Note: 'alterF' is a flipped version of the @at@ combinator from -- @Control.Lens.At@. -- -- @since 0.5.8 alterF :: Functor f => (Maybe a -> f (Maybe a)) -> Key -> Word64Map a -> f (Word64Map a) -- This implementation was stolen from 'Control.Lens.At'. alterF f k m = (<$> f mv) $ \fres -> case fres of Nothing -> maybe m (const (delete k m)) mv Just v' -> insert k v' m where mv = lookup k m {-------------------------------------------------------------------- Union --------------------------------------------------------------------} -- | The union of a list of maps. -- -- > unions [(fromList [(5, "a"), (3, "b")]), (fromList [(5, "A"), (7, "C")]), (fromList [(5, "A3"), (3, "B3")])] -- > == fromList [(3, "b"), (5, "a"), (7, "C")] -- > unions [(fromList [(5, "A3"), (3, "B3")]), (fromList [(5, "A"), (7, "C")]), (fromList [(5, "a"), (3, "b")])] -- > == fromList [(3, "B3"), (5, "A3"), (7, "C")] unions :: Foldable f => f (Word64Map a) -> Word64Map a unions xs = Foldable.foldl' union empty xs -- | The union of a list of maps, with a combining operation. -- -- > unionsWith (++) [(fromList [(5, "a"), (3, "b")]), (fromList [(5, "A"), (7, "C")]), (fromList [(5, "A3"), (3, "B3")])] -- > == fromList [(3, "bB3"), (5, "aAA3"), (7, "C")] unionsWith :: Foldable f => (a->a->a) -> f (Word64Map a) -> Word64Map a unionsWith f ts = Foldable.foldl' (unionWith f) empty ts -- | \(O(n+m)\). The (left-biased) union of two maps. -- It prefers the first map when duplicate keys are encountered, -- i.e. (@'union' == 'unionWith' 'const'@). -- -- > union (fromList [(5, "a"), (3, "b")]) (fromList [(5, "A"), (7, "C")]) == fromList [(3, "b"), (5, "a"), (7, "C")] union :: Word64Map a -> Word64Map a -> Word64Map a union m1 m2 = mergeWithKey' Bin const id id m1 m2 -- | \(O(n+m)\). The union with a combining function. -- -- > unionWith (++) (fromList [(5, "a"), (3, "b")]) (fromList [(5, "A"), (7, "C")]) == fromList [(3, "b"), (5, "aA"), (7, "C")] unionWith :: (a -> a -> a) -> Word64Map a -> Word64Map a -> Word64Map a unionWith f m1 m2 = unionWithKey (\_ x y -> f x y) m1 m2 -- | \(O(n+m)\). The union with a combining function. -- -- > let f key left_value right_value = (show key) ++ ":" ++ left_value ++ "|" ++ right_value -- > unionWithKey f (fromList [(5, "a"), (3, "b")]) (fromList [(5, "A"), (7, "C")]) == fromList [(3, "b"), (5, "5:a|A"), (7, "C")] unionWithKey :: (Key -> a -> a -> a) -> Word64Map a -> Word64Map a -> Word64Map a unionWithKey f m1 m2 = mergeWithKey' Bin (\(Tip k1 x1) (Tip _k2 x2) -> Tip k1 (f k1 x1 x2)) id id m1 m2 {-------------------------------------------------------------------- Difference --------------------------------------------------------------------} -- | \(O(n+m)\). Difference between two maps (based on keys). -- -- > difference (fromList [(5, "a"), (3, "b")]) (fromList [(5, "A"), (7, "C")]) == singleton 3 "b" difference :: Word64Map a -> Word64Map b -> Word64Map a difference m1 m2 = mergeWithKey (\_ _ _ -> Nothing) id (const Nil) m1 m2 -- | \(O(n+m)\). Difference with a combining function. -- -- > let f al ar = if al == "b" then Just (al ++ ":" ++ ar) else Nothing -- > differenceWith f (fromList [(5, "a"), (3, "b")]) (fromList [(5, "A"), (3, "B"), (7, "C")]) -- > == singleton 3 "b:B" differenceWith :: (a -> b -> Maybe a) -> Word64Map a -> Word64Map b -> Word64Map a differenceWith f m1 m2 = differenceWithKey (\_ x y -> f x y) m1 m2 -- | \(O(n+m)\). Difference with a combining function. When two equal keys are -- encountered, the combining function is applied to the key and both values. -- If it returns 'Nothing', the element is discarded (proper set difference). -- If it returns (@'Just' y@), the element is updated with a new value @y@. -- -- > let f k al ar = if al == "b" then Just ((show k) ++ ":" ++ al ++ "|" ++ ar) else Nothing -- > differenceWithKey f (fromList [(5, "a"), (3, "b")]) (fromList [(5, "A"), (3, "B"), (10, "C")]) -- > == singleton 3 "3:b|B" differenceWithKey :: (Key -> a -> b -> Maybe a) -> Word64Map a -> Word64Map b -> Word64Map a differenceWithKey f m1 m2 = mergeWithKey f id (const Nil) m1 m2 -- TODO(wrengr): re-verify that asymptotic bound -- | \(O(n+m)\). Remove all the keys in a given set from a map. -- -- @ -- m \`withoutKeys\` s = 'filterWithKey' (\\k _ -> k ``Word64Set.notMember`` s) m -- @ -- -- @since 0.5.8 withoutKeys :: Word64Map a -> Word64Set.Word64Set -> Word64Map a withoutKeys t1@(Bin p1 m1 l1 r1) t2@(Word64Set.Bin p2 m2 l2 r2) | shorter m1 m2 = difference1 | shorter m2 m1 = difference2 | p1 == p2 = bin p1 m1 (withoutKeys l1 l2) (withoutKeys r1 r2) | otherwise = t1 where difference1 | nomatch p2 p1 m1 = t1 | zero p2 m1 = binCheckLeft p1 m1 (withoutKeys l1 t2) r1 | otherwise = binCheckRight p1 m1 l1 (withoutKeys r1 t2) difference2 | nomatch p1 p2 m2 = t1 | zero p1 m2 = withoutKeys t1 l2 | otherwise = withoutKeys t1 r2 withoutKeys t1@(Bin p1 m1 _ _) (Word64Set.Tip p2 bm2) = let minbit = bitmapOf p1 lt_minbit = minbit - 1 maxbit = bitmapOf (p1 .|. (m1 .|. (m1 - 1))) gt_maxbit = (-maxbit) `xor` maxbit -- TODO(wrengr): should we manually inline/unroll 'updatePrefix' -- and 'withoutBM' here, in order to avoid redundant case analyses? in updatePrefix p2 t1 $ withoutBM (bm2 .|. lt_minbit .|. gt_maxbit) withoutKeys t1@(Bin _ _ _ _) Word64Set.Nil = t1 withoutKeys t1@(Tip k1 _) t2 | k1 `Word64Set.member` t2 = Nil | otherwise = t1 withoutKeys Nil _ = Nil updatePrefix :: Word64SetPrefix -> Word64Map a -> (Word64Map a -> Word64Map a) -> Word64Map a updatePrefix !kp t@(Bin p m l r) f | m .&. Word64Set.suffixBitMask /= 0 = if p .&. Word64Set.prefixBitMask == kp then f t else t | nomatch kp p m = t | zero kp m = binCheckLeft p m (updatePrefix kp l f) r | otherwise = binCheckRight p m l (updatePrefix kp r f) updatePrefix kp t@(Tip kx _) f | kx .&. Word64Set.prefixBitMask == kp = f t | otherwise = t updatePrefix _ Nil _ = Nil withoutBM :: Word64SetBitMap -> Word64Map a -> Word64Map a withoutBM 0 t = t withoutBM bm (Bin p m l r) = let leftBits = bitmapOf (p .|. m) - 1 bmL = bm .&. leftBits bmR = bm `xor` bmL -- = (bm .&. complement leftBits) in bin p m (withoutBM bmL l) (withoutBM bmR r) withoutBM bm t@(Tip k _) -- TODO(wrengr): need we manually inline 'Word64Set.Member' here? | k `Word64Set.member` Word64Set.Tip (k .&. Word64Set.prefixBitMask) bm = Nil | otherwise = t withoutBM _ Nil = Nil {-------------------------------------------------------------------- Intersection --------------------------------------------------------------------} -- | \(O(n+m)\). The (left-biased) intersection of two maps (based on keys). -- -- > intersection (fromList [(5, "a"), (3, "b")]) (fromList [(5, "A"), (7, "C")]) == singleton 5 "a" intersection :: Word64Map a -> Word64Map b -> Word64Map a intersection m1 m2 = mergeWithKey' bin const (const Nil) (const Nil) m1 m2 -- TODO(wrengr): re-verify that asymptotic bound -- | \(O(n+m)\). The restriction of a map to the keys in a set. -- -- @ -- m \`restrictKeys\` s = 'filterWithKey' (\\k _ -> k ``Word64Set.member`` s) m -- @ -- -- @since 0.5.8 restrictKeys :: Word64Map a -> Word64Set.Word64Set -> Word64Map a restrictKeys t1@(Bin p1 m1 l1 r1) t2@(Word64Set.Bin p2 m2 l2 r2) | shorter m1 m2 = intersection1 | shorter m2 m1 = intersection2 | p1 == p2 = bin p1 m1 (restrictKeys l1 l2) (restrictKeys r1 r2) | otherwise = Nil where intersection1 | nomatch p2 p1 m1 = Nil | zero p2 m1 = restrictKeys l1 t2 | otherwise = restrictKeys r1 t2 intersection2 | nomatch p1 p2 m2 = Nil | zero p1 m2 = restrictKeys t1 l2 | otherwise = restrictKeys t1 r2 restrictKeys t1@(Bin p1 m1 _ _) (Word64Set.Tip p2 bm2) = let minbit = bitmapOf p1 ge_minbit = complement (minbit - 1) maxbit = bitmapOf (p1 .|. (m1 .|. (m1 - 1))) le_maxbit = maxbit .|. (maxbit - 1) -- TODO(wrengr): should we manually inline/unroll 'lookupPrefix' -- and 'restrictBM' here, in order to avoid redundant case analyses? in restrictBM (bm2 .&. ge_minbit .&. le_maxbit) (lookupPrefix p2 t1) restrictKeys (Bin _ _ _ _) Word64Set.Nil = Nil restrictKeys t1@(Tip k1 _) t2 | k1 `Word64Set.member` t2 = t1 | otherwise = Nil restrictKeys Nil _ = Nil -- | \(O(\min(n,W))\). Restrict to the sub-map with all keys matching -- a key prefix. lookupPrefix :: Word64SetPrefix -> Word64Map a -> Word64Map a lookupPrefix !kp t@(Bin p m l r) | m .&. Word64Set.suffixBitMask /= 0 = if p .&. Word64Set.prefixBitMask == kp then t else Nil | nomatch kp p m = Nil | zero kp m = lookupPrefix kp l | otherwise = lookupPrefix kp r lookupPrefix kp t@(Tip kx _) | (kx .&. Word64Set.prefixBitMask) == kp = t | otherwise = Nil lookupPrefix _ Nil = Nil restrictBM :: Word64SetBitMap -> Word64Map a -> Word64Map a restrictBM 0 _ = Nil restrictBM bm (Bin p m l r) = let leftBits = bitmapOf (p .|. m) - 1 bmL = bm .&. leftBits bmR = bm `xor` bmL -- = (bm .&. complement leftBits) in bin p m (restrictBM bmL l) (restrictBM bmR r) restrictBM bm t@(Tip k _) -- TODO(wrengr): need we manually inline 'Word64Set.Member' here? | k `Word64Set.member` Word64Set.Tip (k .&. Word64Set.prefixBitMask) bm = t | otherwise = Nil restrictBM _ Nil = Nil -- | \(O(n+m)\). The intersection with a combining function. -- -- > intersectionWith (++) (fromList [(5, "a"), (3, "b")]) (fromList [(5, "A"), (7, "C")]) == singleton 5 "aA" intersectionWith :: (a -> b -> c) -> Word64Map a -> Word64Map b -> Word64Map c intersectionWith f m1 m2 = intersectionWithKey (\_ x y -> f x y) m1 m2 -- | \(O(n+m)\). The intersection with a combining function. -- -- > let f k al ar = (show k) ++ ":" ++ al ++ "|" ++ ar -- > intersectionWithKey f (fromList [(5, "a"), (3, "b")]) (fromList [(5, "A"), (7, "C")]) == singleton 5 "5:a|A" intersectionWithKey :: (Key -> a -> b -> c) -> Word64Map a -> Word64Map b -> Word64Map c intersectionWithKey f m1 m2 = mergeWithKey' bin (\(Tip k1 x1) (Tip _k2 x2) -> Tip k1 (f k1 x1 x2)) (const Nil) (const Nil) m1 m2 {-------------------------------------------------------------------- MergeWithKey --------------------------------------------------------------------} -- | \(O(n+m)\). A high-performance universal combining function. Using -- 'mergeWithKey', all combining functions can be defined without any loss of -- efficiency (with exception of 'union', 'difference' and 'intersection', -- where sharing of some nodes is lost with 'mergeWithKey'). -- -- Please make sure you know what is going on when using 'mergeWithKey', -- otherwise you can be surprised by unexpected code growth or even -- corruption of the data structure. -- -- When 'mergeWithKey' is given three arguments, it is inlined to the call -- site. You should therefore use 'mergeWithKey' only to define your custom -- combining functions. For example, you could define 'unionWithKey', -- 'differenceWithKey' and 'intersectionWithKey' as -- -- > myUnionWithKey f m1 m2 = mergeWithKey (\k x1 x2 -> Just (f k x1 x2)) id id m1 m2 -- > myDifferenceWithKey f m1 m2 = mergeWithKey f id (const empty) m1 m2 -- > myIntersectionWithKey f m1 m2 = mergeWithKey (\k x1 x2 -> Just (f k x1 x2)) (const empty) (const empty) m1 m2 -- -- When calling @'mergeWithKey' combine only1 only2@, a function combining two -- 'Word64Map's is created, such that -- -- * if a key is present in both maps, it is passed with both corresponding -- values to the @combine@ function. Depending on the result, the key is either -- present in the result with specified value, or is left out; -- -- * a nonempty subtree present only in the first map is passed to @only1@ and -- the output is added to the result; -- -- * a nonempty subtree present only in the second map is passed to @only2@ and -- the output is added to the result. -- -- The @only1@ and @only2@ methods /must return a map with a subset (possibly empty) of the keys of the given map/. -- The values can be modified arbitrarily. Most common variants of @only1@ and -- @only2@ are 'id' and @'const' 'empty'@, but for example @'map' f@ or -- @'filterWithKey' f@ could be used for any @f@. mergeWithKey :: (Key -> a -> b -> Maybe c) -> (Word64Map a -> Word64Map c) -> (Word64Map b -> Word64Map c) -> Word64Map a -> Word64Map b -> Word64Map c mergeWithKey f g1 g2 = mergeWithKey' bin combine g1 g2 where -- We use the lambda form to avoid non-exhaustive pattern matches warning. combine = \(Tip k1 x1) (Tip _k2 x2) -> case f k1 x1 x2 of Nothing -> Nil Just x -> Tip k1 x {-# INLINE combine #-} {-# INLINE mergeWithKey #-} -- Slightly more general version of mergeWithKey. It differs in the following: -- -- * the combining function operates on maps instead of keys and values. The -- reason is to enable sharing in union, difference and intersection. -- -- * mergeWithKey' is given an equivalent of bin. The reason is that in union*, -- Bin constructor can be used, because we know both subtrees are nonempty. mergeWithKey' :: (Prefix -> Mask -> Word64Map c -> Word64Map c -> Word64Map c) -> (Word64Map a -> Word64Map b -> Word64Map c) -> (Word64Map a -> Word64Map c) -> (Word64Map b -> Word64Map c) -> Word64Map a -> Word64Map b -> Word64Map c mergeWithKey' bin' f g1 g2 = go where go t1@(Bin p1 m1 l1 r1) t2@(Bin p2 m2 l2 r2) | shorter m1 m2 = merge1 | shorter m2 m1 = merge2 | p1 == p2 = bin' p1 m1 (go l1 l2) (go r1 r2) | otherwise = maybe_link p1 (g1 t1) p2 (g2 t2) where merge1 | nomatch p2 p1 m1 = maybe_link p1 (g1 t1) p2 (g2 t2) | zero p2 m1 = bin' p1 m1 (go l1 t2) (g1 r1) | otherwise = bin' p1 m1 (g1 l1) (go r1 t2) merge2 | nomatch p1 p2 m2 = maybe_link p1 (g1 t1) p2 (g2 t2) | zero p1 m2 = bin' p2 m2 (go t1 l2) (g2 r2) | otherwise = bin' p2 m2 (g2 l2) (go t1 r2) go t1'@(Bin _ _ _ _) t2'@(Tip k2' _) = merge0 t2' k2' t1' where merge0 t2 k2 t1@(Bin p1 m1 l1 r1) | nomatch k2 p1 m1 = maybe_link p1 (g1 t1) k2 (g2 t2) | zero k2 m1 = bin' p1 m1 (merge0 t2 k2 l1) (g1 r1) | otherwise = bin' p1 m1 (g1 l1) (merge0 t2 k2 r1) merge0 t2 k2 t1@(Tip k1 _) | k1 == k2 = f t1 t2 | otherwise = maybe_link k1 (g1 t1) k2 (g2 t2) merge0 t2 _ Nil = g2 t2 go t1@(Bin _ _ _ _) Nil = g1 t1 go t1'@(Tip k1' _) t2' = merge0 t1' k1' t2' where merge0 t1 k1 t2@(Bin p2 m2 l2 r2) | nomatch k1 p2 m2 = maybe_link k1 (g1 t1) p2 (g2 t2) | zero k1 m2 = bin' p2 m2 (merge0 t1 k1 l2) (g2 r2) | otherwise = bin' p2 m2 (g2 l2) (merge0 t1 k1 r2) merge0 t1 k1 t2@(Tip k2 _) | k1 == k2 = f t1 t2 | otherwise = maybe_link k1 (g1 t1) k2 (g2 t2) merge0 t1 _ Nil = g1 t1 go Nil t2 = g2 t2 maybe_link _ Nil _ t2 = t2 maybe_link _ t1 _ Nil = t1 maybe_link p1 t1 p2 t2 = link p1 t1 p2 t2 {-# INLINE maybe_link #-} {-# INLINE mergeWithKey' #-} {-------------------------------------------------------------------- mergeA --------------------------------------------------------------------} -- | A tactic for dealing with keys present in one map but not the -- other in 'merge' or 'mergeA'. -- -- A tactic of type @WhenMissing f k x z@ is an abstract representation -- of a function of type @Key -> x -> f (Maybe z)@. -- -- @since 0.5.9 data WhenMissing f x y = WhenMissing { missingSubtree :: Word64Map x -> f (Word64Map y) , missingKey :: Key -> x -> f (Maybe y)} -- | @since 0.5.9 instance (Applicative f, Monad f) => Functor (WhenMissing f x) where fmap = mapWhenMissing {-# INLINE fmap #-} -- | @since 0.5.9 instance (Applicative f, Monad f) => Category.Category (WhenMissing f) where id = preserveMissing f . g = traverseMaybeMissing $ \ k x -> do y <- missingKey g k x case y of Nothing -> pure Nothing Just q -> missingKey f k q {-# INLINE id #-} {-# INLINE (.) #-} -- | Equivalent to @ReaderT k (ReaderT x (MaybeT f))@. -- -- @since 0.5.9 instance (Applicative f, Monad f) => Applicative (WhenMissing f x) where pure x = mapMissing (\ _ _ -> x) f <*> g = traverseMaybeMissing $ \k x -> do res1 <- missingKey f k x case res1 of Nothing -> pure Nothing Just r -> (pure $!) . fmap r =<< missingKey g k x {-# INLINE pure #-} {-# INLINE (<*>) #-} -- | Equivalent to @ReaderT k (ReaderT x (MaybeT f))@. -- -- @since 0.5.9 instance (Applicative f, Monad f) => Monad (WhenMissing f x) where m >>= f = traverseMaybeMissing $ \k x -> do res1 <- missingKey m k x case res1 of Nothing -> pure Nothing Just r -> missingKey (f r) k x {-# INLINE (>>=) #-} -- | Map covariantly over a @'WhenMissing' f x@. -- -- @since 0.5.9 mapWhenMissing :: (Applicative f, Monad f) => (a -> b) -> WhenMissing f x a -> WhenMissing f x b mapWhenMissing f t = WhenMissing { missingSubtree = \m -> missingSubtree t m >>= \m' -> pure $! fmap f m' , missingKey = \k x -> missingKey t k x >>= \q -> (pure $! fmap f q) } {-# INLINE mapWhenMissing #-} -- | Map covariantly over a @'WhenMissing' f x@, using only a -- 'Functor f' constraint. mapGentlyWhenMissing :: Functor f => (a -> b) -> WhenMissing f x a -> WhenMissing f x b mapGentlyWhenMissing f t = WhenMissing { missingSubtree = \m -> fmap f <$> missingSubtree t m , missingKey = \k x -> fmap f <$> missingKey t k x } {-# INLINE mapGentlyWhenMissing #-} -- | Map covariantly over a @'WhenMatched' f k x@, using only a -- 'Functor f' constraint. mapGentlyWhenMatched :: Functor f => (a -> b) -> WhenMatched f x y a -> WhenMatched f x y b mapGentlyWhenMatched f t = zipWithMaybeAMatched $ \k x y -> fmap f <$> runWhenMatched t k x y {-# INLINE mapGentlyWhenMatched #-} -- | Map contravariantly over a @'WhenMissing' f _ x@. -- -- @since 0.5.9 lmapWhenMissing :: (b -> a) -> WhenMissing f a x -> WhenMissing f b x lmapWhenMissing f t = WhenMissing { missingSubtree = \m -> missingSubtree t (fmap f m) , missingKey = \k x -> missingKey t k (f x) } {-# INLINE lmapWhenMissing #-} -- | Map contravariantly over a @'WhenMatched' f _ y z@. -- -- @since 0.5.9 contramapFirstWhenMatched :: (b -> a) -> WhenMatched f a y z -> WhenMatched f b y z contramapFirstWhenMatched f t = WhenMatched $ \k x y -> runWhenMatched t k (f x) y {-# INLINE contramapFirstWhenMatched #-} -- | Map contravariantly over a @'WhenMatched' f x _ z@. -- -- @since 0.5.9 contramapSecondWhenMatched :: (b -> a) -> WhenMatched f x a z -> WhenMatched f x b z contramapSecondWhenMatched f t = WhenMatched $ \k x y -> runWhenMatched t k x (f y) {-# INLINE contramapSecondWhenMatched #-} -- | A tactic for dealing with keys present in one map but not the -- other in 'merge'. -- -- A tactic of type @SimpleWhenMissing x z@ is an abstract -- representation of a function of type @Key -> x -> Maybe z@. -- -- @since 0.5.9 type SimpleWhenMissing = WhenMissing Identity -- | A tactic for dealing with keys present in both maps in 'merge' -- or 'mergeA'. -- -- A tactic of type @WhenMatched f x y z@ is an abstract representation -- of a function of type @Key -> x -> y -> f (Maybe z)@. -- -- @since 0.5.9 newtype WhenMatched f x y z = WhenMatched { matchedKey :: Key -> x -> y -> f (Maybe z) } -- | Along with zipWithMaybeAMatched, witnesses the isomorphism -- between @WhenMatched f x y z@ and @Key -> x -> y -> f (Maybe z)@. -- -- @since 0.5.9 runWhenMatched :: WhenMatched f x y z -> Key -> x -> y -> f (Maybe z) runWhenMatched = matchedKey {-# INLINE runWhenMatched #-} -- | Along with traverseMaybeMissing, witnesses the isomorphism -- between @WhenMissing f x y@ and @Key -> x -> f (Maybe y)@. -- -- @since 0.5.9 runWhenMissing :: WhenMissing f x y -> Key-> x -> f (Maybe y) runWhenMissing = missingKey {-# INLINE runWhenMissing #-} -- | @since 0.5.9 instance Functor f => Functor (WhenMatched f x y) where fmap = mapWhenMatched {-# INLINE fmap #-} -- | @since 0.5.9 instance (Monad f, Applicative f) => Category.Category (WhenMatched f x) where id = zipWithMatched (\_ _ y -> y) f . g = zipWithMaybeAMatched $ \k x y -> do res <- runWhenMatched g k x y case res of Nothing -> pure Nothing Just r -> runWhenMatched f k x r {-# INLINE id #-} {-# INLINE (.) #-} -- | Equivalent to @ReaderT Key (ReaderT x (ReaderT y (MaybeT f)))@ -- -- @since 0.5.9 instance (Monad f, Applicative f) => Applicative (WhenMatched f x y) where pure x = zipWithMatched (\_ _ _ -> x) fs <*> xs = zipWithMaybeAMatched $ \k x y -> do res <- runWhenMatched fs k x y case res of Nothing -> pure Nothing Just r -> (pure $!) . fmap r =<< runWhenMatched xs k x y {-# INLINE pure #-} {-# INLINE (<*>) #-} -- | Equivalent to @ReaderT Key (ReaderT x (ReaderT y (MaybeT f)))@ -- -- @since 0.5.9 instance (Monad f, Applicative f) => Monad (WhenMatched f x y) where m >>= f = zipWithMaybeAMatched $ \k x y -> do res <- runWhenMatched m k x y case res of Nothing -> pure Nothing Just r -> runWhenMatched (f r) k x y {-# INLINE (>>=) #-} -- | Map covariantly over a @'WhenMatched' f x y@. -- -- @since 0.5.9 mapWhenMatched :: Functor f => (a -> b) -> WhenMatched f x y a -> WhenMatched f x y b mapWhenMatched f (WhenMatched g) = WhenMatched $ \k x y -> fmap (fmap f) (g k x y) {-# INLINE mapWhenMatched #-} -- | A tactic for dealing with keys present in both maps in 'merge'. -- -- A tactic of type @SimpleWhenMatched x y z@ is an abstract -- representation of a function of type @Key -> x -> y -> Maybe z@. -- -- @since 0.5.9 type SimpleWhenMatched = WhenMatched Identity -- | When a key is found in both maps, apply a function to the key -- and values and use the result in the merged map. -- -- > zipWithMatched -- > :: (Key -> x -> y -> z) -- > -> SimpleWhenMatched x y z -- -- @since 0.5.9 zipWithMatched :: Applicative f => (Key -> x -> y -> z) -> WhenMatched f x y z zipWithMatched f = WhenMatched $ \ k x y -> pure . Just $ f k x y {-# INLINE zipWithMatched #-} -- | When a key is found in both maps, apply a function to the key -- and values to produce an action and use its result in the merged -- map. -- -- @since 0.5.9 zipWithAMatched :: Applicative f => (Key -> x -> y -> f z) -> WhenMatched f x y z zipWithAMatched f = WhenMatched $ \ k x y -> Just <$> f k x y {-# INLINE zipWithAMatched #-} -- | When a key is found in both maps, apply a function to the key -- and values and maybe use the result in the merged map. -- -- > zipWithMaybeMatched -- > :: (Key -> x -> y -> Maybe z) -- > -> SimpleWhenMatched x y z -- -- @since 0.5.9 zipWithMaybeMatched :: Applicative f => (Key -> x -> y -> Maybe z) -> WhenMatched f x y z zipWithMaybeMatched f = WhenMatched $ \ k x y -> pure $ f k x y {-# INLINE zipWithMaybeMatched #-} -- | When a key is found in both maps, apply a function to the key -- and values, perform the resulting action, and maybe use the -- result in the merged map. -- -- This is the fundamental 'WhenMatched' tactic. -- -- @since 0.5.9 zipWithMaybeAMatched :: (Key -> x -> y -> f (Maybe z)) -> WhenMatched f x y z zipWithMaybeAMatched f = WhenMatched $ \ k x y -> f k x y {-# INLINE zipWithMaybeAMatched #-} -- | Drop all the entries whose keys are missing from the other -- map. -- -- > dropMissing :: SimpleWhenMissing x y -- -- prop> dropMissing = mapMaybeMissing (\_ _ -> Nothing) -- -- but @dropMissing@ is much faster. -- -- @since 0.5.9 dropMissing :: Applicative f => WhenMissing f x y dropMissing = WhenMissing { missingSubtree = const (pure Nil) , missingKey = \_ _ -> pure Nothing } {-# INLINE dropMissing #-} -- | Preserve, unchanged, the entries whose keys are missing from -- the other map. -- -- > preserveMissing :: SimpleWhenMissing x x -- -- prop> preserveMissing = Merge.Lazy.mapMaybeMissing (\_ x -> Just x) -- -- but @preserveMissing@ is much faster. -- -- @since 0.5.9 preserveMissing :: Applicative f => WhenMissing f x x preserveMissing = WhenMissing { missingSubtree = pure , missingKey = \_ v -> pure (Just v) } {-# INLINE preserveMissing #-} -- | Map over the entries whose keys are missing from the other map. -- -- > mapMissing :: (k -> x -> y) -> SimpleWhenMissing x y -- -- prop> mapMissing f = mapMaybeMissing (\k x -> Just $ f k x) -- -- but @mapMissing@ is somewhat faster. -- -- @since 0.5.9 mapMissing :: Applicative f => (Key -> x -> y) -> WhenMissing f x y mapMissing f = WhenMissing { missingSubtree = \m -> pure $! mapWithKey f m , missingKey = \k x -> pure $ Just (f k x) } {-# INLINE mapMissing #-} -- | Map over the entries whose keys are missing from the other -- map, optionally removing some. This is the most powerful -- 'SimpleWhenMissing' tactic, but others are usually more efficient. -- -- > mapMaybeMissing :: (Key -> x -> Maybe y) -> SimpleWhenMissing x y -- -- prop> mapMaybeMissing f = traverseMaybeMissing (\k x -> pure (f k x)) -- -- but @mapMaybeMissing@ uses fewer unnecessary 'Applicative' -- operations. -- -- @since 0.5.9 mapMaybeMissing :: Applicative f => (Key -> x -> Maybe y) -> WhenMissing f x y mapMaybeMissing f = WhenMissing { missingSubtree = \m -> pure $! mapMaybeWithKey f m , missingKey = \k x -> pure $! f k x } {-# INLINE mapMaybeMissing #-} -- | Filter the entries whose keys are missing from the other map. -- -- > filterMissing :: (k -> x -> Bool) -> SimpleWhenMissing x x -- -- prop> filterMissing f = Merge.Lazy.mapMaybeMissing $ \k x -> guard (f k x) *> Just x -- -- but this should be a little faster. -- -- @since 0.5.9 filterMissing :: Applicative f => (Key -> x -> Bool) -> WhenMissing f x x filterMissing f = WhenMissing { missingSubtree = \m -> pure $! filterWithKey f m , missingKey = \k x -> pure $! if f k x then Just x else Nothing } {-# INLINE filterMissing #-} -- | Filter the entries whose keys are missing from the other map -- using some 'Applicative' action. -- -- > filterAMissing f = Merge.Lazy.traverseMaybeMissing $ -- > \k x -> (\b -> guard b *> Just x) <$> f k x -- -- but this should be a little faster. -- -- @since 0.5.9 filterAMissing :: Applicative f => (Key -> x -> f Bool) -> WhenMissing f x x filterAMissing f = WhenMissing { missingSubtree = \m -> filterWithKeyA f m , missingKey = \k x -> bool Nothing (Just x) <$> f k x } {-# INLINE filterAMissing #-} -- | \(O(n)\). Filter keys and values using an 'Applicative' predicate. filterWithKeyA :: Applicative f => (Key -> a -> f Bool) -> Word64Map a -> f (Word64Map a) filterWithKeyA _ Nil = pure Nil filterWithKeyA f t@(Tip k x) = (\b -> if b then t else Nil) <$> f k x filterWithKeyA f (Bin p m l r) | m < 0 = liftA2 (flip (bin p m)) (filterWithKeyA f r) (filterWithKeyA f l) | otherwise = liftA2 (bin p m) (filterWithKeyA f l) (filterWithKeyA f r) -- | This wasn't in Data.Bool until 4.7.0, so we define it here bool :: a -> a -> Bool -> a bool f _ False = f bool _ t True = t -- | Traverse over the entries whose keys are missing from the other -- map. -- -- @since 0.5.9 traverseMissing :: Applicative f => (Key -> x -> f y) -> WhenMissing f x y traverseMissing f = WhenMissing { missingSubtree = traverseWithKey f , missingKey = \k x -> Just <$> f k x } {-# INLINE traverseMissing #-} -- | Traverse over the entries whose keys are missing from the other -- map, optionally producing values to put in the result. This is -- the most powerful 'WhenMissing' tactic, but others are usually -- more efficient. -- -- @since 0.5.9 traverseMaybeMissing :: Applicative f => (Key -> x -> f (Maybe y)) -> WhenMissing f x y traverseMaybeMissing f = WhenMissing { missingSubtree = traverseMaybeWithKey f , missingKey = f } {-# INLINE traverseMaybeMissing #-} -- | \(O(n)\). Traverse keys\/values and collect the 'Just' results. -- -- @since 0.6.4 traverseMaybeWithKey :: Applicative f => (Key -> a -> f (Maybe b)) -> Word64Map a -> f (Word64Map b) traverseMaybeWithKey f = go where go Nil = pure Nil go (Tip k x) = maybe Nil (Tip k) <$> f k x go (Bin p m l r) | m < 0 = liftA2 (flip (bin p m)) (go r) (go l) | otherwise = liftA2 (bin p m) (go l) (go r) -- | Merge two maps. -- -- 'merge' takes two 'WhenMissing' tactics, a 'WhenMatched' tactic -- and two maps. It uses the tactics to merge the maps. Its behavior -- is best understood via its fundamental tactics, 'mapMaybeMissing' -- and 'zipWithMaybeMatched'. -- -- Consider -- -- @ -- merge (mapMaybeMissing g1) -- (mapMaybeMissing g2) -- (zipWithMaybeMatched f) -- m1 m2 -- @ -- -- Take, for example, -- -- @ -- m1 = [(0, \'a\'), (1, \'b\'), (3, \'c\'), (4, \'d\')] -- m2 = [(1, "one"), (2, "two"), (4, "three")] -- @ -- -- 'merge' will first \"align\" these maps by key: -- -- @ -- m1 = [(0, \'a\'), (1, \'b\'), (3, \'c\'), (4, \'d\')] -- m2 = [(1, "one"), (2, "two"), (4, "three")] -- @ -- -- It will then pass the individual entries and pairs of entries -- to @g1@, @g2@, or @f@ as appropriate: -- -- @ -- maybes = [g1 0 \'a\', f 1 \'b\' "one", g2 2 "two", g1 3 \'c\', f 4 \'d\' "three"] -- @ -- -- This produces a 'Maybe' for each key: -- -- @ -- keys = 0 1 2 3 4 -- results = [Nothing, Just True, Just False, Nothing, Just True] -- @ -- -- Finally, the @Just@ results are collected into a map: -- -- @ -- return value = [(1, True), (2, False), (4, True)] -- @ -- -- The other tactics below are optimizations or simplifications of -- 'mapMaybeMissing' for special cases. Most importantly, -- -- * 'dropMissing' drops all the keys. -- * 'preserveMissing' leaves all the entries alone. -- -- When 'merge' is given three arguments, it is inlined at the call -- site. To prevent excessive inlining, you should typically use -- 'merge' to define your custom combining functions. -- -- -- Examples: -- -- prop> unionWithKey f = merge preserveMissing preserveMissing (zipWithMatched f) -- prop> intersectionWithKey f = merge dropMissing dropMissing (zipWithMatched f) -- prop> differenceWith f = merge diffPreserve diffDrop f -- prop> symmetricDifference = merge diffPreserve diffPreserve (\ _ _ _ -> Nothing) -- prop> mapEachPiece f g h = merge (diffMapWithKey f) (diffMapWithKey g) -- -- @since 0.5.9 merge :: SimpleWhenMissing a c -- ^ What to do with keys in @m1@ but not @m2@ -> SimpleWhenMissing b c -- ^ What to do with keys in @m2@ but not @m1@ -> SimpleWhenMatched a b c -- ^ What to do with keys in both @m1@ and @m2@ -> Word64Map a -- ^ Map @m1@ -> Word64Map b -- ^ Map @m2@ -> Word64Map c merge g1 g2 f m1 m2 = runIdentity $ mergeA g1 g2 f m1 m2 {-# INLINE merge #-} -- | An applicative version of 'merge'. -- -- 'mergeA' takes two 'WhenMissing' tactics, a 'WhenMatched' -- tactic and two maps. It uses the tactics to merge the maps. -- Its behavior is best understood via its fundamental tactics, -- 'traverseMaybeMissing' and 'zipWithMaybeAMatched'. -- -- Consider -- -- @ -- mergeA (traverseMaybeMissing g1) -- (traverseMaybeMissing g2) -- (zipWithMaybeAMatched f) -- m1 m2 -- @ -- -- Take, for example, -- -- @ -- m1 = [(0, \'a\'), (1, \'b\'), (3,\'c\'), (4, \'d\')] -- m2 = [(1, "one"), (2, "two"), (4, "three")] -- @ -- -- 'mergeA' will first \"align\" these maps by key: -- -- @ -- m1 = [(0, \'a\'), (1, \'b\'), (3, \'c\'), (4, \'d\')] -- m2 = [(1, "one"), (2, "two"), (4, "three")] -- @ -- -- It will then pass the individual entries and pairs of entries -- to @g1@, @g2@, or @f@ as appropriate: -- -- @ -- actions = [g1 0 \'a\', f 1 \'b\' "one", g2 2 "two", g1 3 \'c\', f 4 \'d\' "three"] -- @ -- -- Next, it will perform the actions in the @actions@ list in order from -- left to right. -- -- @ -- keys = 0 1 2 3 4 -- results = [Nothing, Just True, Just False, Nothing, Just True] -- @ -- -- Finally, the @Just@ results are collected into a map: -- -- @ -- return value = [(1, True), (2, False), (4, True)] -- @ -- -- The other tactics below are optimizations or simplifications of -- 'traverseMaybeMissing' for special cases. Most importantly, -- -- * 'dropMissing' drops all the keys. -- * 'preserveMissing' leaves all the entries alone. -- * 'mapMaybeMissing' does not use the 'Applicative' context. -- -- When 'mergeA' is given three arguments, it is inlined at the call -- site. To prevent excessive inlining, you should generally only use -- 'mergeA' to define custom combining functions. -- -- @since 0.5.9 mergeA :: (Applicative f) => WhenMissing f a c -- ^ What to do with keys in @m1@ but not @m2@ -> WhenMissing f b c -- ^ What to do with keys in @m2@ but not @m1@ -> WhenMatched f a b c -- ^ What to do with keys in both @m1@ and @m2@ -> Word64Map a -- ^ Map @m1@ -> Word64Map b -- ^ Map @m2@ -> f (Word64Map c) mergeA WhenMissing{missingSubtree = g1t, missingKey = g1k} WhenMissing{missingSubtree = g2t, missingKey = g2k} WhenMatched{matchedKey = f} = go where go t1 Nil = g1t t1 go Nil t2 = g2t t2 -- This case is already covered below. -- go (Tip k1 x1) (Tip k2 x2) = mergeTips k1 x1 k2 x2 go (Tip k1 x1) t2' = merge2 t2' where merge2 t2@(Bin p2 m2 l2 r2) | nomatch k1 p2 m2 = linkA k1 (subsingletonBy g1k k1 x1) p2 (g2t t2) | zero k1 m2 = binA p2 m2 (merge2 l2) (g2t r2) | otherwise = binA p2 m2 (g2t l2) (merge2 r2) merge2 (Tip k2 x2) = mergeTips k1 x1 k2 x2 merge2 Nil = subsingletonBy g1k k1 x1 go t1' (Tip k2 x2) = merge1 t1' where merge1 t1@(Bin p1 m1 l1 r1) | nomatch k2 p1 m1 = linkA p1 (g1t t1) k2 (subsingletonBy g2k k2 x2) | zero k2 m1 = binA p1 m1 (merge1 l1) (g1t r1) | otherwise = binA p1 m1 (g1t l1) (merge1 r1) merge1 (Tip k1 x1) = mergeTips k1 x1 k2 x2 merge1 Nil = subsingletonBy g2k k2 x2 go t1@(Bin p1 m1 l1 r1) t2@(Bin p2 m2 l2 r2) | shorter m1 m2 = merge1 | shorter m2 m1 = merge2 | p1 == p2 = binA p1 m1 (go l1 l2) (go r1 r2) | otherwise = linkA p1 (g1t t1) p2 (g2t t2) where merge1 | nomatch p2 p1 m1 = linkA p1 (g1t t1) p2 (g2t t2) | zero p2 m1 = binA p1 m1 (go l1 t2) (g1t r1) | otherwise = binA p1 m1 (g1t l1) (go r1 t2) merge2 | nomatch p1 p2 m2 = linkA p1 (g1t t1) p2 (g2t t2) | zero p1 m2 = binA p2 m2 (go t1 l2) (g2t r2) | otherwise = binA p2 m2 (g2t l2) (go t1 r2) subsingletonBy gk k x = maybe Nil (Tip k) <$> gk k x {-# INLINE subsingletonBy #-} mergeTips k1 x1 k2 x2 | k1 == k2 = maybe Nil (Tip k1) <$> f k1 x1 x2 | k1 < k2 = liftA2 (subdoubleton k1 k2) (g1k k1 x1) (g2k k2 x2) {- = link_ k1 k2 <$> subsingletonBy g1k k1 x1 <*> subsingletonBy g2k k2 x2 -} | otherwise = liftA2 (subdoubleton k2 k1) (g2k k2 x2) (g1k k1 x1) {-# INLINE mergeTips #-} subdoubleton _ _ Nothing Nothing = Nil subdoubleton _ k2 Nothing (Just y2) = Tip k2 y2 subdoubleton k1 _ (Just y1) Nothing = Tip k1 y1 subdoubleton k1 k2 (Just y1) (Just y2) = link k1 (Tip k1 y1) k2 (Tip k2 y2) {-# INLINE subdoubleton #-} -- A variant of 'link_' which makes sure to execute side-effects -- in the right order. linkA :: Applicative f => Prefix -> f (Word64Map a) -> Prefix -> f (Word64Map a) -> f (Word64Map a) linkA p1 t1 p2 t2 | zero p1 m = binA p m t1 t2 | otherwise = binA p m t2 t1 where m = branchMask p1 p2 p = mask p1 m {-# INLINE linkA #-} -- A variant of 'bin' that ensures that effects for negative keys are executed -- first. binA :: Applicative f => Prefix -> Mask -> f (Word64Map a) -> f (Word64Map a) -> f (Word64Map a) binA p m a b | m < 0 = liftA2 (flip (bin p m)) b a | otherwise = liftA2 (bin p m) a b {-# INLINE binA #-} {-# INLINE mergeA #-} {-------------------------------------------------------------------- Min\/Max --------------------------------------------------------------------} -- | \(O(\min(n,W))\). Update the value at the minimal key. -- -- > updateMinWithKey (\ k a -> Just ((show k) ++ ":" ++ a)) (fromList [(5,"a"), (3,"b")]) == fromList [(3,"3:b"), (5,"a")] -- > updateMinWithKey (\ _ _ -> Nothing) (fromList [(5,"a"), (3,"b")]) == singleton 5 "a" updateMinWithKey :: (Key -> a -> Maybe a) -> Word64Map a -> Word64Map a updateMinWithKey f t = case t of Bin p m l r | m < 0 -> binCheckRight p m l (go f r) _ -> go f t where go f' (Bin p m l r) = binCheckLeft p m (go f' l) r go f' (Tip k y) = case f' k y of Just y' -> Tip k y' Nothing -> Nil go _ Nil = error "updateMinWithKey Nil" -- | \(O(\min(n,W))\). Update the value at the maximal key. -- -- > updateMaxWithKey (\ k a -> Just ((show k) ++ ":" ++ a)) (fromList [(5,"a"), (3,"b")]) == fromList [(3,"b"), (5,"5:a")] -- > updateMaxWithKey (\ _ _ -> Nothing) (fromList [(5,"a"), (3,"b")]) == singleton 3 "b" updateMaxWithKey :: (Key -> a -> Maybe a) -> Word64Map a -> Word64Map a updateMaxWithKey f t = case t of Bin p m l r | m < 0 -> binCheckLeft p m (go f l) r _ -> go f t where go f' (Bin p m l r) = binCheckRight p m l (go f' r) go f' (Tip k y) = case f' k y of Just y' -> Tip k y' Nothing -> Nil go _ Nil = error "updateMaxWithKey Nil" data View a = View {-# UNPACK #-} !Key a !(Word64Map a) -- | \(O(\min(n,W))\). Retrieves the maximal (key,value) pair of the map, and -- the map stripped of that element, or 'Nothing' if passed an empty map. -- -- > maxViewWithKey (fromList [(5,"a"), (3,"b")]) == Just ((5,"a"), singleton 3 "b") -- > maxViewWithKey empty == Nothing maxViewWithKey :: Word64Map a -> Maybe ((Key, a), Word64Map a) maxViewWithKey t = case t of Nil -> Nothing _ -> Just $ case maxViewWithKeySure t of View k v t' -> ((k, v), t') {-# INLINE maxViewWithKey #-} maxViewWithKeySure :: Word64Map a -> View a maxViewWithKeySure t = case t of Nil -> error "maxViewWithKeySure Nil" Bin p m l r | m < 0 -> case go l of View k a l' -> View k a (binCheckLeft p m l' r) _ -> go t where go (Bin p m l r) = case go r of View k a r' -> View k a (binCheckRight p m l r') go (Tip k y) = View k y Nil go Nil = error "maxViewWithKey_go Nil" -- See note on NOINLINE at minViewWithKeySure {-# NOINLINE maxViewWithKeySure #-} -- | \(O(\min(n,W))\). Retrieves the minimal (key,value) pair of the map, and -- the map stripped of that element, or 'Nothing' if passed an empty map. -- -- > minViewWithKey (fromList [(5,"a"), (3,"b")]) == Just ((3,"b"), singleton 5 "a") -- > minViewWithKey empty == Nothing minViewWithKey :: Word64Map a -> Maybe ((Key, a), Word64Map a) minViewWithKey t = case t of Nil -> Nothing _ -> Just $ case minViewWithKeySure t of View k v t' -> ((k, v), t') -- We inline this to give GHC the best possible chance of -- getting rid of the Maybe, pair, and Int constructors, as -- well as a thunk under the Just. That is, we really want to -- be certain this inlines! {-# INLINE minViewWithKey #-} minViewWithKeySure :: Word64Map a -> View a minViewWithKeySure t = case t of Nil -> error "minViewWithKeySure Nil" Bin p m l r | m < 0 -> case go r of View k a r' -> View k a (binCheckRight p m l r') _ -> go t where go (Bin p m l r) = case go l of View k a l' -> View k a (binCheckLeft p m l' r) go (Tip k y) = View k y Nil go Nil = error "minViewWithKey_go Nil" -- There's never anything significant to be gained by inlining -- this. Sufficiently recent GHC versions will inline the wrapper -- anyway, which should be good enough. {-# NOINLINE minViewWithKeySure #-} -- | \(O(\min(n,W))\). Update the value at the maximal key. -- -- > updateMax (\ a -> Just ("X" ++ a)) (fromList [(5,"a"), (3,"b")]) == fromList [(3, "b"), (5, "Xa")] -- > updateMax (\ _ -> Nothing) (fromList [(5,"a"), (3,"b")]) == singleton 3 "b" updateMax :: (a -> Maybe a) -> Word64Map a -> Word64Map a updateMax f = updateMaxWithKey (const f) -- | \(O(\min(n,W))\). Update the value at the minimal key. -- -- > updateMin (\ a -> Just ("X" ++ a)) (fromList [(5,"a"), (3,"b")]) == fromList [(3, "Xb"), (5, "a")] -- > updateMin (\ _ -> Nothing) (fromList [(5,"a"), (3,"b")]) == singleton 5 "a" updateMin :: (a -> Maybe a) -> Word64Map a -> Word64Map a updateMin f = updateMinWithKey (const f) -- | \(O(\min(n,W))\). Retrieves the maximal key of the map, and the map -- stripped of that element, or 'Nothing' if passed an empty map. maxView :: Word64Map a -> Maybe (a, Word64Map a) maxView t = fmap (\((_, x), t') -> (x, t')) (maxViewWithKey t) -- | \(O(\min(n,W))\). Retrieves the minimal key of the map, and the map -- stripped of that element, or 'Nothing' if passed an empty map. minView :: Word64Map a -> Maybe (a, Word64Map a) minView t = fmap (\((_, x), t') -> (x, t')) (minViewWithKey t) -- | \(O(\min(n,W))\). Delete and find the maximal element. -- This function throws an error if the map is empty. Use 'maxViewWithKey' -- if the map may be empty. deleteFindMax :: Word64Map a -> ((Key, a), Word64Map a) deleteFindMax = fromMaybe (error "deleteFindMax: empty map has no maximal element") . maxViewWithKey -- | \(O(\min(n,W))\). Delete and find the minimal element. -- This function throws an error if the map is empty. Use 'minViewWithKey' -- if the map may be empty. deleteFindMin :: Word64Map a -> ((Key, a), Word64Map a) deleteFindMin = fromMaybe (error "deleteFindMin: empty map has no minimal element") . minViewWithKey -- | \(O(\min(n,W))\). The minimal key of the map. Returns 'Nothing' if the map is empty. lookupMin :: Word64Map a -> Maybe (Key, a) lookupMin Nil = Nothing lookupMin (Tip k v) = Just (k,v) lookupMin (Bin _ m l r) | m < 0 = go r | otherwise = go l where go (Tip k v) = Just (k,v) go (Bin _ _ l' _) = go l' go Nil = Nothing -- | \(O(\min(n,W))\). The minimal key of the map. Calls 'error' if the map is empty. -- Use 'minViewWithKey' if the map may be empty. findMin :: Word64Map a -> (Key, a) findMin t | Just r <- lookupMin t = r | otherwise = error "findMin: empty map has no minimal element" -- | \(O(\min(n,W))\). The maximal key of the map. Returns 'Nothing' if the map is empty. lookupMax :: Word64Map a -> Maybe (Key, a) lookupMax Nil = Nothing lookupMax (Tip k v) = Just (k,v) lookupMax (Bin _ m l r) | m < 0 = go l | otherwise = go r where go (Tip k v) = Just (k,v) go (Bin _ _ _ r') = go r' go Nil = Nothing -- | \(O(\min(n,W))\). The maximal key of the map. Calls 'error' if the map is empty. -- Use 'maxViewWithKey' if the map may be empty. findMax :: Word64Map a -> (Key, a) findMax t | Just r <- lookupMax t = r | otherwise = error "findMax: empty map has no maximal element" -- | \(O(\min(n,W))\). Delete the minimal key. Returns an empty map if the map is empty. -- -- Note that this is a change of behaviour for consistency with 'Data.Map.Map' – -- versions prior to 0.5 threw an error if the 'Word64Map' was already empty. deleteMin :: Word64Map a -> Word64Map a deleteMin = maybe Nil snd . minView -- | \(O(\min(n,W))\). Delete the maximal key. Returns an empty map if the map is empty. -- -- Note that this is a change of behaviour for consistency with 'Data.Map.Map' – -- versions prior to 0.5 threw an error if the 'Word64Map' was already empty. deleteMax :: Word64Map a -> Word64Map a deleteMax = maybe Nil snd . maxView {-------------------------------------------------------------------- Submap --------------------------------------------------------------------} -- | \(O(n+m)\). Is this a proper submap? (ie. a submap but not equal). -- Defined as (@'isProperSubmapOf' = 'isProperSubmapOfBy' (==)@). isProperSubmapOf :: Eq a => Word64Map a -> Word64Map a -> Bool isProperSubmapOf m1 m2 = isProperSubmapOfBy (==) m1 m2 {- | \(O(n+m)\). Is this a proper submap? (ie. a submap but not equal). The expression (@'isProperSubmapOfBy' f m1 m2@) returns 'True' when @keys m1@ and @keys m2@ are not equal, all keys in @m1@ are in @m2@, and when @f@ returns 'True' when applied to their respective values. For example, the following expressions are all 'True': > isProperSubmapOfBy (==) (fromList [(1,1)]) (fromList [(1,1),(2,2)]) > isProperSubmapOfBy (<=) (fromList [(1,1)]) (fromList [(1,1),(2,2)]) But the following are all 'False': > isProperSubmapOfBy (==) (fromList [(1,1),(2,2)]) (fromList [(1,1),(2,2)]) > isProperSubmapOfBy (==) (fromList [(1,1),(2,2)]) (fromList [(1,1)]) > isProperSubmapOfBy (<) (fromList [(1,1)]) (fromList [(1,1),(2,2)]) -} isProperSubmapOfBy :: (a -> b -> Bool) -> Word64Map a -> Word64Map b -> Bool isProperSubmapOfBy predicate t1 t2 = case submapCmp predicate t1 t2 of LT -> True _ -> False submapCmp :: (a -> b -> Bool) -> Word64Map a -> Word64Map b -> Ordering submapCmp predicate t1@(Bin p1 m1 l1 r1) (Bin p2 m2 l2 r2) | shorter m1 m2 = GT | shorter m2 m1 = submapCmpLt | p1 == p2 = submapCmpEq | otherwise = GT -- disjoint where submapCmpLt | nomatch p1 p2 m2 = GT | zero p1 m2 = submapCmp predicate t1 l2 | otherwise = submapCmp predicate t1 r2 submapCmpEq = case (submapCmp predicate l1 l2, submapCmp predicate r1 r2) of (GT,_ ) -> GT (_ ,GT) -> GT (EQ,EQ) -> EQ _ -> LT submapCmp _ (Bin _ _ _ _) _ = GT submapCmp predicate (Tip kx x) (Tip ky y) | (kx == ky) && predicate x y = EQ | otherwise = GT -- disjoint submapCmp predicate (Tip k x) t = case lookup k t of Just y | predicate x y -> LT _ -> GT -- disjoint submapCmp _ Nil Nil = EQ submapCmp _ Nil _ = LT -- | \(O(n+m)\). Is this a submap? -- Defined as (@'isSubmapOf' = 'isSubmapOfBy' (==)@). isSubmapOf :: Eq a => Word64Map a -> Word64Map a -> Bool isSubmapOf m1 m2 = isSubmapOfBy (==) m1 m2 {- | \(O(n+m)\). The expression (@'isSubmapOfBy' f m1 m2@) returns 'True' if all keys in @m1@ are in @m2@, and when @f@ returns 'True' when applied to their respective values. For example, the following expressions are all 'True': > isSubmapOfBy (==) (fromList [(1,1)]) (fromList [(1,1),(2,2)]) > isSubmapOfBy (<=) (fromList [(1,1)]) (fromList [(1,1),(2,2)]) > isSubmapOfBy (==) (fromList [(1,1),(2,2)]) (fromList [(1,1),(2,2)]) But the following are all 'False': > isSubmapOfBy (==) (fromList [(1,2)]) (fromList [(1,1),(2,2)]) > isSubmapOfBy (<) (fromList [(1,1)]) (fromList [(1,1),(2,2)]) > isSubmapOfBy (==) (fromList [(1,1),(2,2)]) (fromList [(1,1)]) -} isSubmapOfBy :: (a -> b -> Bool) -> Word64Map a -> Word64Map b -> Bool isSubmapOfBy predicate t1@(Bin p1 m1 l1 r1) (Bin p2 m2 l2 r2) | shorter m1 m2 = False | shorter m2 m1 = match p1 p2 m2 && if zero p1 m2 then isSubmapOfBy predicate t1 l2 else isSubmapOfBy predicate t1 r2 | otherwise = (p1==p2) && isSubmapOfBy predicate l1 l2 && isSubmapOfBy predicate r1 r2 isSubmapOfBy _ (Bin _ _ _ _) _ = False isSubmapOfBy predicate (Tip k x) t = case lookup k t of Just y -> predicate x y Nothing -> False isSubmapOfBy _ Nil _ = True {-------------------------------------------------------------------- Mapping --------------------------------------------------------------------} -- | \(O(n)\). Map a function over all values in the map. -- -- > map (++ "x") (fromList [(5,"a"), (3,"b")]) == fromList [(3, "bx"), (5, "ax")] map :: (a -> b) -> Word64Map a -> Word64Map b map f = go where go (Bin p m l r) = Bin p m (go l) (go r) go (Tip k x) = Tip k (f x) go Nil = Nil #ifdef __GLASGOW_HASKELL__ {-# NOINLINE [1] map #-} {-# RULES "map/map" forall f g xs . map f (map g xs) = map (f . g) xs "map/coerce" map coerce = coerce #-} #endif -- | \(O(n)\). Map a function over all values in the map. -- -- > let f key x = (show key) ++ ":" ++ x -- > mapWithKey f (fromList [(5,"a"), (3,"b")]) == fromList [(3, "3:b"), (5, "5:a")] mapWithKey :: (Key -> a -> b) -> Word64Map a -> Word64Map b mapWithKey f t = case t of Bin p m l r -> Bin p m (mapWithKey f l) (mapWithKey f r) Tip k x -> Tip k (f k x) Nil -> Nil #ifdef __GLASGOW_HASKELL__ {-# NOINLINE [1] mapWithKey #-} {-# RULES "mapWithKey/mapWithKey" forall f g xs . mapWithKey f (mapWithKey g xs) = mapWithKey (\k a -> f k (g k a)) xs "mapWithKey/map" forall f g xs . mapWithKey f (map g xs) = mapWithKey (\k a -> f k (g a)) xs "map/mapWithKey" forall f g xs . map f (mapWithKey g xs) = mapWithKey (\k a -> f (g k a)) xs #-} #endif -- | \(O(n)\). -- @'traverseWithKey' f s == 'fromList' <$> 'traverse' (\(k, v) -> (,) k <$> f k v) ('toList' m)@ -- That is, behaves exactly like a regular 'traverse' except that the traversing -- function also has access to the key associated with a value. -- -- > traverseWithKey (\k v -> if odd k then Just (succ v) else Nothing) (fromList [(1, 'a'), (5, 'e')]) == Just (fromList [(1, 'b'), (5, 'f')]) -- > traverseWithKey (\k v -> if odd k then Just (succ v) else Nothing) (fromList [(2, 'c')]) == Nothing traverseWithKey :: Applicative t => (Key -> a -> t b) -> Word64Map a -> t (Word64Map b) traverseWithKey f = go where go Nil = pure Nil go (Tip k v) = Tip k <$> f k v go (Bin p m l r) | m < 0 = liftA2 (flip (Bin p m)) (go r) (go l) | otherwise = liftA2 (Bin p m) (go l) (go r) {-# INLINE traverseWithKey #-} -- | \(O(n)\). The function @'mapAccum'@ threads an accumulating -- argument through the map in ascending order of keys. -- -- > let f a b = (a ++ b, b ++ "X") -- > mapAccum f "Everything: " (fromList [(5,"a"), (3,"b")]) == ("Everything: ba", fromList [(3, "bX"), (5, "aX")]) mapAccum :: (a -> b -> (a,c)) -> a -> Word64Map b -> (a,Word64Map c) mapAccum f = mapAccumWithKey (\a' _ x -> f a' x) -- | \(O(n)\). The function @'mapAccumWithKey'@ threads an accumulating -- argument through the map in ascending order of keys. -- -- > let f a k b = (a ++ " " ++ (show k) ++ "-" ++ b, b ++ "X") -- > mapAccumWithKey f "Everything:" (fromList [(5,"a"), (3,"b")]) == ("Everything: 3-b 5-a", fromList [(3, "bX"), (5, "aX")]) mapAccumWithKey :: (a -> Key -> b -> (a,c)) -> a -> Word64Map b -> (a,Word64Map c) mapAccumWithKey f a t = mapAccumL f a t -- | \(O(n)\). The function @'mapAccumL'@ threads an accumulating -- argument through the map in ascending order of keys. mapAccumL :: (a -> Key -> b -> (a,c)) -> a -> Word64Map b -> (a,Word64Map c) mapAccumL f a t = case t of Bin p m l r | m < 0 -> let (a1,r') = mapAccumL f a r (a2,l') = mapAccumL f a1 l in (a2,Bin p m l' r') | otherwise -> let (a1,l') = mapAccumL f a l (a2,r') = mapAccumL f a1 r in (a2,Bin p m l' r') Tip k x -> let (a',x') = f a k x in (a',Tip k x') Nil -> (a,Nil) -- | \(O(n)\). The function @'mapAccumRWithKey'@ threads an accumulating -- argument through the map in descending order of keys. mapAccumRWithKey :: (a -> Key -> b -> (a,c)) -> a -> Word64Map b -> (a,Word64Map c) mapAccumRWithKey f a t = case t of Bin p m l r | m < 0 -> let (a1,l') = mapAccumRWithKey f a l (a2,r') = mapAccumRWithKey f a1 r in (a2,Bin p m l' r') | otherwise -> let (a1,r') = mapAccumRWithKey f a r (a2,l') = mapAccumRWithKey f a1 l in (a2,Bin p m l' r') Tip k x -> let (a',x') = f a k x in (a',Tip k x') Nil -> (a,Nil) -- | \(O(n \min(n,W))\). -- @'mapKeys' f s@ is the map obtained by applying @f@ to each key of @s@. -- -- The size of the result may be smaller if @f@ maps two or more distinct -- keys to the same new key. In this case the value at the greatest of the -- original keys is retained. -- -- > mapKeys (+ 1) (fromList [(5,"a"), (3,"b")]) == fromList [(4, "b"), (6, "a")] -- > mapKeys (\ _ -> 1) (fromList [(1,"b"), (2,"a"), (3,"d"), (4,"c")]) == singleton 1 "c" -- > mapKeys (\ _ -> 3) (fromList [(1,"b"), (2,"a"), (3,"d"), (4,"c")]) == singleton 3 "c" mapKeys :: (Key->Key) -> Word64Map a -> Word64Map a mapKeys f = fromList . foldrWithKey (\k x xs -> (f k, x) : xs) [] -- | \(O(n \min(n,W))\). -- @'mapKeysWith' c f s@ is the map obtained by applying @f@ to each key of @s@. -- -- The size of the result may be smaller if @f@ maps two or more distinct -- keys to the same new key. In this case the associated values will be -- combined using @c@. -- -- > mapKeysWith (++) (\ _ -> 1) (fromList [(1,"b"), (2,"a"), (3,"d"), (4,"c")]) == singleton 1 "cdab" -- > mapKeysWith (++) (\ _ -> 3) (fromList [(1,"b"), (2,"a"), (3,"d"), (4,"c")]) == singleton 3 "cdab" mapKeysWith :: (a -> a -> a) -> (Key->Key) -> Word64Map a -> Word64Map a mapKeysWith c f = fromListWith c . foldrWithKey (\k x xs -> (f k, x) : xs) [] -- | \(O(n \min(n,W))\). -- @'mapKeysMonotonic' f s == 'mapKeys' f s@, but works only when @f@ -- is strictly monotonic. -- That is, for any values @x@ and @y@, if @x@ < @y@ then @f x@ < @f y@. -- /The precondition is not checked./ -- Semi-formally, we have: -- -- > and [x < y ==> f x < f y | x <- ls, y <- ls] -- > ==> mapKeysMonotonic f s == mapKeys f s -- > where ls = keys s -- -- This means that @f@ maps distinct original keys to distinct resulting keys. -- This function has slightly better performance than 'mapKeys'. -- -- > mapKeysMonotonic (\ k -> k * 2) (fromList [(5,"a"), (3,"b")]) == fromList [(6, "b"), (10, "a")] mapKeysMonotonic :: (Key->Key) -> Word64Map a -> Word64Map a mapKeysMonotonic f = fromDistinctAscList . foldrWithKey (\k x xs -> (f k, x) : xs) [] {-------------------------------------------------------------------- Filter --------------------------------------------------------------------} -- | \(O(n)\). Filter all values that satisfy some predicate. -- -- > filter (> "a") (fromList [(5,"a"), (3,"b")]) == singleton 3 "b" -- > filter (> "x") (fromList [(5,"a"), (3,"b")]) == empty -- > filter (< "a") (fromList [(5,"a"), (3,"b")]) == empty filter :: (a -> Bool) -> Word64Map a -> Word64Map a filter p m = filterWithKey (\_ x -> p x) m -- | \(O(n)\). Filter all keys\/values that satisfy some predicate. -- -- > filterWithKey (\k _ -> k > 4) (fromList [(5,"a"), (3,"b")]) == singleton 5 "a" filterWithKey :: (Key -> a -> Bool) -> Word64Map a -> Word64Map a filterWithKey predicate = go where go Nil = Nil go t@(Tip k x) = if predicate k x then t else Nil go (Bin p m l r) = bin p m (go l) (go r) -- | \(O(n)\). Partition the map according to some predicate. The first -- map contains all elements that satisfy the predicate, the second all -- elements that fail the predicate. See also 'split'. -- -- > partition (> "a") (fromList [(5,"a"), (3,"b")]) == (singleton 3 "b", singleton 5 "a") -- > partition (< "x") (fromList [(5,"a"), (3,"b")]) == (fromList [(3, "b"), (5, "a")], empty) -- > partition (> "x") (fromList [(5,"a"), (3,"b")]) == (empty, fromList [(3, "b"), (5, "a")]) partition :: (a -> Bool) -> Word64Map a -> (Word64Map a,Word64Map a) partition p m = partitionWithKey (\_ x -> p x) m -- | \(O(n)\). Partition the map according to some predicate. The first -- map contains all elements that satisfy the predicate, the second all -- elements that fail the predicate. See also 'split'. -- -- > partitionWithKey (\ k _ -> k > 3) (fromList [(5,"a"), (3,"b")]) == (singleton 5 "a", singleton 3 "b") -- > partitionWithKey (\ k _ -> k < 7) (fromList [(5,"a"), (3,"b")]) == (fromList [(3, "b"), (5, "a")], empty) -- > partitionWithKey (\ k _ -> k > 7) (fromList [(5,"a"), (3,"b")]) == (empty, fromList [(3, "b"), (5, "a")]) partitionWithKey :: (Key -> a -> Bool) -> Word64Map a -> (Word64Map a,Word64Map a) partitionWithKey predicate0 t0 = toPair $ go predicate0 t0 where go predicate t = case t of Bin p m l r -> let (l1 :*: l2) = go predicate l (r1 :*: r2) = go predicate r in bin p m l1 r1 :*: bin p m l2 r2 Tip k x | predicate k x -> (t :*: Nil) | otherwise -> (Nil :*: t) Nil -> (Nil :*: Nil) -- | \(O(\min(n,W))\). Take while a predicate on the keys holds. -- The user is responsible for ensuring that for all @Int@s, @j \< k ==\> p j \>= p k@. -- See note at 'spanAntitone'. -- -- @ -- takeWhileAntitone p = 'fromDistinctAscList' . 'Data.List.takeWhile' (p . fst) . 'toList' -- takeWhileAntitone p = 'filterWithKey' (\\k _ -> p k) -- @ -- -- @since 0.6.7 takeWhileAntitone :: (Key -> Bool) -> Word64Map a -> Word64Map a takeWhileAntitone predicate t = case t of Bin p m l r | m < 0 -> if predicate 0 -- handle negative numbers. then bin p m (go predicate l) r else go predicate r _ -> go predicate t where go predicate' (Bin p m l r) | predicate' $! p+m = bin p m l (go predicate' r) | otherwise = go predicate' l go predicate' t'@(Tip ky _) | predicate' ky = t' | otherwise = Nil go _ Nil = Nil -- | \(O(\min(n,W))\). Drop while a predicate on the keys holds. -- The user is responsible for ensuring that for all @Int@s, @j \< k ==\> p j \>= p k@. -- See note at 'spanAntitone'. -- -- @ -- dropWhileAntitone p = 'fromDistinctAscList' . 'Data.List.dropWhile' (p . fst) . 'toList' -- dropWhileAntitone p = 'filterWithKey' (\\k _ -> not (p k)) -- @ -- -- @since 0.6.7 dropWhileAntitone :: (Key -> Bool) -> Word64Map a -> Word64Map a dropWhileAntitone predicate t = case t of Bin p m l r | m < 0 -> if predicate 0 -- handle negative numbers. then go predicate l else bin p m l (go predicate r) _ -> go predicate t where go predicate' (Bin p m l r) | predicate' $! p+m = go predicate' r | otherwise = bin p m (go predicate' l) r go predicate' t'@(Tip ky _) | predicate' ky = Nil | otherwise = t' go _ Nil = Nil -- | \(O(\min(n,W))\). Divide a map at the point where a predicate on the keys stops holding. -- The user is responsible for ensuring that for all @Int@s, @j \< k ==\> p j \>= p k@. -- -- @ -- spanAntitone p xs = ('takeWhileAntitone' p xs, 'dropWhileAntitone' p xs) -- spanAntitone p xs = 'partitionWithKey' (\\k _ -> p k) xs -- @ -- -- Note: if @p@ is not actually antitone, then @spanAntitone@ will split the map -- at some /unspecified/ point. -- -- @since 0.6.7 spanAntitone :: (Key -> Bool) -> Word64Map a -> (Word64Map a, Word64Map a) spanAntitone predicate t = case t of Bin p m l r | m < 0 -> if predicate 0 -- handle negative numbers. then case go predicate l of (lt :*: gt) -> let !lt' = bin p m lt r in (lt', gt) else case go predicate r of (lt :*: gt) -> let !gt' = bin p m l gt in (lt, gt') _ -> case go predicate t of (lt :*: gt) -> (lt, gt) where go predicate' (Bin p m l r) | predicate' $! p+m = case go predicate' r of (lt :*: gt) -> bin p m l lt :*: gt | otherwise = case go predicate' l of (lt :*: gt) -> lt :*: bin p m gt r go predicate' t'@(Tip ky _) | predicate' ky = (t' :*: Nil) | otherwise = (Nil :*: t') go _ Nil = (Nil :*: Nil) -- | \(O(n)\). Map values and collect the 'Just' results. -- -- > let f x = if x == "a" then Just "new a" else Nothing -- > mapMaybe f (fromList [(5,"a"), (3,"b")]) == singleton 5 "new a" mapMaybe :: (a -> Maybe b) -> Word64Map a -> Word64Map b mapMaybe f = mapMaybeWithKey (\_ x -> f x) -- | \(O(n)\). Map keys\/values and collect the 'Just' results. -- -- > let f k _ = if k < 5 then Just ("key : " ++ (show k)) else Nothing -- > mapMaybeWithKey f (fromList [(5,"a"), (3,"b")]) == singleton 3 "key : 3" mapMaybeWithKey :: (Key -> a -> Maybe b) -> Word64Map a -> Word64Map b mapMaybeWithKey f (Bin p m l r) = bin p m (mapMaybeWithKey f l) (mapMaybeWithKey f r) mapMaybeWithKey f (Tip k x) = case f k x of Just y -> Tip k y Nothing -> Nil mapMaybeWithKey _ Nil = Nil -- | \(O(n)\). Map values and separate the 'Left' and 'Right' results. -- -- > let f a = if a < "c" then Left a else Right a -- > mapEither f (fromList [(5,"a"), (3,"b"), (1,"x"), (7,"z")]) -- > == (fromList [(3,"b"), (5,"a")], fromList [(1,"x"), (7,"z")]) -- > -- > mapEither (\ a -> Right a) (fromList [(5,"a"), (3,"b"), (1,"x"), (7,"z")]) -- > == (empty, fromList [(5,"a"), (3,"b"), (1,"x"), (7,"z")]) mapEither :: (a -> Either b c) -> Word64Map a -> (Word64Map b, Word64Map c) mapEither f m = mapEitherWithKey (\_ x -> f x) m -- | \(O(n)\). Map keys\/values and separate the 'Left' and 'Right' results. -- -- > let f k a = if k < 5 then Left (k * 2) else Right (a ++ a) -- > mapEitherWithKey f (fromList [(5,"a"), (3,"b"), (1,"x"), (7,"z")]) -- > == (fromList [(1,2), (3,6)], fromList [(5,"aa"), (7,"zz")]) -- > -- > mapEitherWithKey (\_ a -> Right a) (fromList [(5,"a"), (3,"b"), (1,"x"), (7,"z")]) -- > == (empty, fromList [(1,"x"), (3,"b"), (5,"a"), (7,"z")]) mapEitherWithKey :: (Key -> a -> Either b c) -> Word64Map a -> (Word64Map b, Word64Map c) mapEitherWithKey f0 t0 = toPair $ go f0 t0 where go f (Bin p m l r) = bin p m l1 r1 :*: bin p m l2 r2 where (l1 :*: l2) = go f l (r1 :*: r2) = go f r go f (Tip k x) = case f k x of Left y -> (Tip k y :*: Nil) Right z -> (Nil :*: Tip k z) go _ Nil = (Nil :*: Nil) -- | \(O(\min(n,W))\). The expression (@'split' k map@) is a pair @(map1,map2)@ -- where all keys in @map1@ are lower than @k@ and all keys in -- @map2@ larger than @k@. Any key equal to @k@ is found in neither @map1@ nor @map2@. -- -- > split 2 (fromList [(5,"a"), (3,"b")]) == (empty, fromList [(3,"b"), (5,"a")]) -- > split 3 (fromList [(5,"a"), (3,"b")]) == (empty, singleton 5 "a") -- > split 4 (fromList [(5,"a"), (3,"b")]) == (singleton 3 "b", singleton 5 "a") -- > split 5 (fromList [(5,"a"), (3,"b")]) == (singleton 3 "b", empty) -- > split 6 (fromList [(5,"a"), (3,"b")]) == (fromList [(3,"b"), (5,"a")], empty) split :: Key -> Word64Map a -> (Word64Map a, Word64Map a) split k t = case t of Bin p m l r | m < 0 -> if k >= 0 -- handle negative numbers. then case go k l of (lt :*: gt) -> let !lt' = bin p m lt r in (lt', gt) else case go k r of (lt :*: gt) -> let !gt' = bin p m l gt in (lt, gt') _ -> case go k t of (lt :*: gt) -> (lt, gt) where go k' t'@(Bin p m l r) | nomatch k' p m = if k' > p then t' :*: Nil else Nil :*: t' | zero k' m = case go k' l of (lt :*: gt) -> lt :*: bin p m gt r | otherwise = case go k' r of (lt :*: gt) -> bin p m l lt :*: gt go k' t'@(Tip ky _) | k' > ky = (t' :*: Nil) | k' < ky = (Nil :*: t') | otherwise = (Nil :*: Nil) go _ Nil = (Nil :*: Nil) data SplitLookup a = SplitLookup !(Word64Map a) !(Maybe a) !(Word64Map a) mapLT :: (Word64Map a -> Word64Map a) -> SplitLookup a -> SplitLookup a mapLT f (SplitLookup lt fnd gt) = SplitLookup (f lt) fnd gt {-# INLINE mapLT #-} mapGT :: (Word64Map a -> Word64Map a) -> SplitLookup a -> SplitLookup a mapGT f (SplitLookup lt fnd gt) = SplitLookup lt fnd (f gt) {-# INLINE mapGT #-} -- | \(O(\min(n,W))\). Performs a 'split' but also returns whether the pivot -- key was found in the original map. -- -- > splitLookup 2 (fromList [(5,"a"), (3,"b")]) == (empty, Nothing, fromList [(3,"b"), (5,"a")]) -- > splitLookup 3 (fromList [(5,"a"), (3,"b")]) == (empty, Just "b", singleton 5 "a") -- > splitLookup 4 (fromList [(5,"a"), (3,"b")]) == (singleton 3 "b", Nothing, singleton 5 "a") -- > splitLookup 5 (fromList [(5,"a"), (3,"b")]) == (singleton 3 "b", Just "a", empty) -- > splitLookup 6 (fromList [(5,"a"), (3,"b")]) == (fromList [(3,"b"), (5,"a")], Nothing, empty) splitLookup :: Key -> Word64Map a -> (Word64Map a, Maybe a, Word64Map a) splitLookup k t = case case t of Bin p m l r | m < 0 -> if k >= 0 -- handle negative numbers. then mapLT (flip (bin p m) r) (go k l) else mapGT (bin p m l) (go k r) _ -> go k t of SplitLookup lt fnd gt -> (lt, fnd, gt) where go k' t'@(Bin p m l r) | nomatch k' p m = if k' > p then SplitLookup t' Nothing Nil else SplitLookup Nil Nothing t' | zero k' m = mapGT (flip (bin p m) r) (go k' l) | otherwise = mapLT (bin p m l) (go k' r) go k' t'@(Tip ky y) | k' > ky = SplitLookup t' Nothing Nil | k' < ky = SplitLookup Nil Nothing t' | otherwise = SplitLookup Nil (Just y) Nil go _ Nil = SplitLookup Nil Nothing Nil {-------------------------------------------------------------------- Fold --------------------------------------------------------------------} -- | \(O(n)\). Fold the values in the map using the given right-associative -- binary operator, such that @'foldr' f z == 'Prelude.foldr' f z . 'elems'@. -- -- For example, -- -- > elems map = foldr (:) [] map -- -- > let f a len = len + (length a) -- > foldr f 0 (fromList [(5,"a"), (3,"bbb")]) == 4 foldr :: (a -> b -> b) -> b -> Word64Map a -> b foldr f z = \t -> -- Use lambda t to be inlinable with two arguments only. case t of Bin _ m l r | m < 0 -> go (go z l) r -- put negative numbers before | otherwise -> go (go z r) l _ -> go z t where go z' Nil = z' go z' (Tip _ x) = f x z' go z' (Bin _ _ l r) = go (go z' r) l {-# INLINE foldr #-} -- | \(O(n)\). A strict version of 'foldr'. Each application of the operator is -- evaluated before using the result in the next application. This -- function is strict in the starting value. foldr' :: (a -> b -> b) -> b -> Word64Map a -> b foldr' f z = \t -> -- Use lambda t to be inlinable with two arguments only. case t of Bin _ m l r | m < 0 -> go (go z l) r -- put negative numbers before | otherwise -> go (go z r) l _ -> go z t where go !z' Nil = z' go z' (Tip _ x) = f x z' go z' (Bin _ _ l r) = go (go z' r) l {-# INLINE foldr' #-} -- | \(O(n)\). Fold the values in the map using the given left-associative -- binary operator, such that @'foldl' f z == 'Prelude.foldl' f z . 'elems'@. -- -- For example, -- -- > elems = reverse . foldl (flip (:)) [] -- -- > let f len a = len + (length a) -- > foldl f 0 (fromList [(5,"a"), (3,"bbb")]) == 4 foldl :: (a -> b -> a) -> a -> Word64Map b -> a foldl f z = \t -> -- Use lambda t to be inlinable with two arguments only. case t of Bin _ m l r | m < 0 -> go (go z r) l -- put negative numbers before | otherwise -> go (go z l) r _ -> go z t where go z' Nil = z' go z' (Tip _ x) = f z' x go z' (Bin _ _ l r) = go (go z' l) r {-# INLINE foldl #-} -- | \(O(n)\). A strict version of 'foldl'. Each application of the operator is -- evaluated before using the result in the next application. This -- function is strict in the starting value. foldl' :: (a -> b -> a) -> a -> Word64Map b -> a foldl' f z = \t -> -- Use lambda t to be inlinable with two arguments only. case t of Bin _ m l r | m < 0 -> go (go z r) l -- put negative numbers before | otherwise -> go (go z l) r _ -> go z t where go !z' Nil = z' go z' (Tip _ x) = f z' x go z' (Bin _ _ l r) = go (go z' l) r {-# INLINE foldl' #-} -- | \(O(n)\). Fold the keys and values in the map using the given right-associative -- binary operator, such that -- @'foldrWithKey' f z == 'Prelude.foldr' ('uncurry' f) z . 'toAscList'@. -- -- For example, -- -- > keys map = foldrWithKey (\k x ks -> k:ks) [] map -- -- > let f k a result = result ++ "(" ++ (show k) ++ ":" ++ a ++ ")" -- > foldrWithKey f "Map: " (fromList [(5,"a"), (3,"b")]) == "Map: (5:a)(3:b)" foldrWithKey :: (Key -> a -> b -> b) -> b -> Word64Map a -> b foldrWithKey f z = \t -> -- Use lambda t to be inlinable with two arguments only. case t of Bin _ m l r | m < 0 -> go (go z l) r -- put negative numbers before | otherwise -> go (go z r) l _ -> go z t where go z' Nil = z' go z' (Tip kx x) = f kx x z' go z' (Bin _ _ l r) = go (go z' r) l {-# INLINE foldrWithKey #-} -- | \(O(n)\). A strict version of 'foldrWithKey'. Each application of the operator is -- evaluated before using the result in the next application. This -- function is strict in the starting value. foldrWithKey' :: (Key -> a -> b -> b) -> b -> Word64Map a -> b foldrWithKey' f z = \t -> -- Use lambda t to be inlinable with two arguments only. case t of Bin _ m l r | m < 0 -> go (go z l) r -- put negative numbers before | otherwise -> go (go z r) l _ -> go z t where go !z' Nil = z' go z' (Tip kx x) = f kx x z' go z' (Bin _ _ l r) = go (go z' r) l {-# INLINE foldrWithKey' #-} -- | \(O(n)\). Fold the keys and values in the map using the given left-associative -- binary operator, such that -- @'foldlWithKey' f z == 'Prelude.foldl' (\\z' (kx, x) -> f z' kx x) z . 'toAscList'@. -- -- For example, -- -- > keys = reverse . foldlWithKey (\ks k x -> k:ks) [] -- -- > let f result k a = result ++ "(" ++ (show k) ++ ":" ++ a ++ ")" -- > foldlWithKey f "Map: " (fromList [(5,"a"), (3,"b")]) == "Map: (3:b)(5:a)" foldlWithKey :: (a -> Key -> b -> a) -> a -> Word64Map b -> a foldlWithKey f z = \t -> -- Use lambda t to be inlinable with two arguments only. case t of Bin _ m l r | m < 0 -> go (go z r) l -- put negative numbers before | otherwise -> go (go z l) r _ -> go z t where go z' Nil = z' go z' (Tip kx x) = f z' kx x go z' (Bin _ _ l r) = go (go z' l) r {-# INLINE foldlWithKey #-} -- | \(O(n)\). A strict version of 'foldlWithKey'. Each application of the operator is -- evaluated before using the result in the next application. This -- function is strict in the starting value. foldlWithKey' :: (a -> Key -> b -> a) -> a -> Word64Map b -> a foldlWithKey' f z = \t -> -- Use lambda t to be inlinable with two arguments only. case t of Bin _ m l r | m < 0 -> go (go z r) l -- put negative numbers before | otherwise -> go (go z l) r _ -> go z t where go !z' Nil = z' go z' (Tip kx x) = f z' kx x go z' (Bin _ _ l r) = go (go z' l) r {-# INLINE foldlWithKey' #-} -- | \(O(n)\). Fold the keys and values in the map using the given monoid, such that -- -- @'foldMapWithKey' f = 'Prelude.fold' . 'mapWithKey' f@ -- -- This can be an asymptotically faster than 'foldrWithKey' or 'foldlWithKey' for some monoids. -- -- @since 0.5.4 foldMapWithKey :: Monoid m => (Key -> a -> m) -> Word64Map a -> m foldMapWithKey f = go where go Nil = mempty go (Tip kx x) = f kx x go (Bin _ m l r) | m < 0 = go r `mappend` go l | otherwise = go l `mappend` go r {-# INLINE foldMapWithKey #-} {-------------------------------------------------------------------- List variations --------------------------------------------------------------------} -- | \(O(n)\). -- Return all elements of the map in the ascending order of their keys. -- Subject to list fusion. -- -- > elems (fromList [(5,"a"), (3,"b")]) == ["b","a"] -- > elems empty == [] elems :: Word64Map a -> [a] elems = foldr (:) [] -- | \(O(n)\). Return all keys of the map in ascending order. Subject to list -- fusion. -- -- > keys (fromList [(5,"a"), (3,"b")]) == [3,5] -- > keys empty == [] keys :: Word64Map a -> [Key] keys = foldrWithKey (\k _ ks -> k : ks) [] -- | \(O(n)\). An alias for 'toAscList'. Returns all key\/value pairs in the -- map in ascending key order. Subject to list fusion. -- -- > assocs (fromList [(5,"a"), (3,"b")]) == [(3,"b"), (5,"a")] -- > assocs empty == [] assocs :: Word64Map a -> [(Key,a)] assocs = toAscList -- | \(O(n \min(n,W))\). The set of all keys of the map. -- -- > keysSet (fromList [(5,"a"), (3,"b")]) == Data.Word64Set.fromList [3,5] -- > keysSet empty == Data.Word64Set.empty keysSet :: Word64Map a -> Word64Set.Word64Set keysSet Nil = Word64Set.Nil keysSet (Tip kx _) = Word64Set.singleton kx keysSet (Bin p m l r) | m .&. Word64Set.suffixBitMask == 0 = Word64Set.Bin p m (keysSet l) (keysSet r) | otherwise = Word64Set.Tip (p .&. Word64Set.prefixBitMask) (computeBm (computeBm 0 l) r) where computeBm !acc (Bin _ _ l' r') = computeBm (computeBm acc l') r' computeBm acc (Tip kx _) = acc .|. Word64Set.bitmapOf kx computeBm _ Nil = error "Data.Word64Set.keysSet: Nil" -- | \(O(n)\). Build a map from a set of keys and a function which for each key -- computes its value. -- -- > fromSet (\k -> replicate k 'a') (Data.Word64Set.fromList [3, 5]) == fromList [(5,"aaaaa"), (3,"aaa")] -- > fromSet undefined Data.Word64Set.empty == empty fromSet :: (Key -> a) -> Word64Set.Word64Set -> Word64Map a fromSet _ Word64Set.Nil = Nil fromSet f (Word64Set.Bin p m l r) = Bin p m (fromSet f l) (fromSet f r) fromSet f (Word64Set.Tip kx bm) = buildTree f kx bm (Word64Set.suffixBitMask + 1) where -- This is slightly complicated, as we to convert the dense -- representation of Word64Set into tree representation of Word64Map. -- -- We are given a nonzero bit mask 'bmask' of 'bits' bits with -- prefix 'prefix'. We split bmask into halves corresponding -- to left and right subtree. If they are both nonempty, we -- create a Bin node, otherwise exactly one of them is nonempty -- and we construct the Word64Map from that half. buildTree g !prefix !bmask bits = case bits of 0 -> Tip prefix (g prefix) _ -> case intFromNat ((natFromInt bits) `shiftRL` 1) of bits2 | bmask .&. ((1 `shiftLL` fromIntegral bits2) - 1) == 0 -> buildTree g (prefix + bits2) (bmask `shiftRL` fromIntegral bits2) bits2 | (bmask `shiftRL` fromIntegral bits2) .&. ((1 `shiftLL` fromIntegral bits2) - 1) == 0 -> buildTree g prefix bmask bits2 | otherwise -> Bin prefix bits2 (buildTree g prefix bmask bits2) (buildTree g (prefix + bits2) (bmask `shiftRL` fromIntegral bits2) bits2) {-------------------------------------------------------------------- Lists --------------------------------------------------------------------} #ifdef __GLASGOW_HASKELL__ -- | @since 0.5.6.2 instance GHCExts.IsList (Word64Map a) where type Item (Word64Map a) = (Key,a) fromList = fromList toList = toList #endif -- | \(O(n)\). Convert the map to a list of key\/value pairs. Subject to list -- fusion. -- -- > toList (fromList [(5,"a"), (3,"b")]) == [(3,"b"), (5,"a")] -- > toList empty == [] toList :: Word64Map a -> [(Key,a)] toList = toAscList -- | \(O(n)\). Convert the map to a list of key\/value pairs where the -- keys are in ascending order. Subject to list fusion. -- -- > toAscList (fromList [(5,"a"), (3,"b")]) == [(3,"b"), (5,"a")] toAscList :: Word64Map a -> [(Key,a)] toAscList = foldrWithKey (\k x xs -> (k,x):xs) [] -- | \(O(n)\). Convert the map to a list of key\/value pairs where the keys -- are in descending order. Subject to list fusion. -- -- > toDescList (fromList [(5,"a"), (3,"b")]) == [(5,"a"), (3,"b")] toDescList :: Word64Map a -> [(Key,a)] toDescList = foldlWithKey (\xs k x -> (k,x):xs) [] -- List fusion for the list generating functions. #if __GLASGOW_HASKELL__ -- The foldrFB and foldlFB are fold{r,l}WithKey equivalents, used for list fusion. -- They are important to convert unfused methods back, see mapFB in prelude. foldrFB :: (Key -> a -> b -> b) -> b -> Word64Map a -> b foldrFB = foldrWithKey {-# INLINE[0] foldrFB #-} foldlFB :: (a -> Key -> b -> a) -> a -> Word64Map b -> a foldlFB = foldlWithKey {-# INLINE[0] foldlFB #-} -- Inline assocs and toList, so that we need to fuse only toAscList. {-# INLINE assocs #-} {-# INLINE toList #-} -- The fusion is enabled up to phase 2 included. If it does not succeed, -- convert in phase 1 the expanded elems,keys,to{Asc,Desc}List calls back to -- elems,keys,to{Asc,Desc}List. In phase 0, we inline fold{lr}FB (which were -- used in a list fusion, otherwise it would go away in phase 1), and let compiler -- do whatever it wants with elems,keys,to{Asc,Desc}List -- it was forbidden to -- inline it before phase 0, otherwise the fusion rules would not fire at all. {-# NOINLINE[0] elems #-} {-# NOINLINE[0] keys #-} {-# NOINLINE[0] toAscList #-} {-# NOINLINE[0] toDescList #-} {-# RULES "Word64Map.elems" [~1] forall m . elems m = build (\c n -> foldrFB (\_ x xs -> c x xs) n m) #-} {-# RULES "Word64Map.elemsBack" [1] foldrFB (\_ x xs -> x : xs) [] = elems #-} {-# RULES "Word64Map.keys" [~1] forall m . keys m = build (\c n -> foldrFB (\k _ xs -> c k xs) n m) #-} {-# RULES "Word64Map.keysBack" [1] foldrFB (\k _ xs -> k : xs) [] = keys #-} {-# RULES "Word64Map.toAscList" [~1] forall m . toAscList m = build (\c n -> foldrFB (\k x xs -> c (k,x) xs) n m) #-} {-# RULES "Word64Map.toAscListBack" [1] foldrFB (\k x xs -> (k, x) : xs) [] = toAscList #-} {-# RULES "Word64Map.toDescList" [~1] forall m . toDescList m = build (\c n -> foldlFB (\xs k x -> c (k,x) xs) n m) #-} {-# RULES "Word64Map.toDescListBack" [1] foldlFB (\xs k x -> (k, x) : xs) [] = toDescList #-} #endif -- | \(O(n \min(n,W))\). Create a map from a list of key\/value pairs. -- -- > fromList [] == empty -- > fromList [(5,"a"), (3,"b"), (5, "c")] == fromList [(5,"c"), (3,"b")] -- > fromList [(5,"c"), (3,"b"), (5, "a")] == fromList [(5,"a"), (3,"b")] fromList :: [(Key,a)] -> Word64Map a fromList xs = Foldable.foldl' ins empty xs where ins t (k,x) = insert k x t -- | \(O(n \min(n,W))\). Create a map from a list of key\/value pairs with a combining function. See also 'fromAscListWith'. -- -- > fromListWith (++) [(5,"a"), (5,"b"), (3,"b"), (3,"a"), (5,"c")] == fromList [(3, "ab"), (5, "cba")] -- > fromListWith (++) [] == empty fromListWith :: (a -> a -> a) -> [(Key,a)] -> Word64Map a fromListWith f xs = fromListWithKey (\_ x y -> f x y) xs -- | \(O(n \min(n,W))\). Build a map from a list of key\/value pairs with a combining function. See also fromAscListWithKey'. -- -- > let f key new_value old_value = show key ++ ":" ++ new_value ++ "|" ++ old_value -- > fromListWithKey f [(5,"a"), (5,"b"), (3,"b"), (3,"a"), (5,"c")] == fromList [(3, "3:a|b"), (5, "5:c|5:b|a")] -- > fromListWithKey f [] == empty fromListWithKey :: (Key -> a -> a -> a) -> [(Key,a)] -> Word64Map a fromListWithKey f xs = Foldable.foldl' ins empty xs where ins t (k,x) = insertWithKey f k x t -- | \(O(n)\). Build a map from a list of key\/value pairs where -- the keys are in ascending order. -- -- > fromAscList [(3,"b"), (5,"a")] == fromList [(3, "b"), (5, "a")] -- > fromAscList [(3,"b"), (5,"a"), (5,"b")] == fromList [(3, "b"), (5, "b")] fromAscList :: [(Key,a)] -> Word64Map a fromAscList = fromMonoListWithKey Nondistinct (\_ x _ -> x) {-# NOINLINE fromAscList #-} -- | \(O(n)\). Build a map from a list of key\/value pairs where -- the keys are in ascending order, with a combining function on equal keys. -- /The precondition (input list is ascending) is not checked./ -- -- > fromAscListWith (++) [(3,"b"), (5,"a"), (5,"b")] == fromList [(3, "b"), (5, "ba")] fromAscListWith :: (a -> a -> a) -> [(Key,a)] -> Word64Map a fromAscListWith f = fromMonoListWithKey Nondistinct (\_ x y -> f x y) {-# NOINLINE fromAscListWith #-} -- | \(O(n)\). Build a map from a list of key\/value pairs where -- the keys are in ascending order, with a combining function on equal keys. -- /The precondition (input list is ascending) is not checked./ -- -- > let f key new_value old_value = (show key) ++ ":" ++ new_value ++ "|" ++ old_value -- > fromAscListWithKey f [(3,"b"), (5,"a"), (5,"b")] == fromList [(3, "b"), (5, "5:b|a")] fromAscListWithKey :: (Key -> a -> a -> a) -> [(Key,a)] -> Word64Map a fromAscListWithKey f = fromMonoListWithKey Nondistinct f {-# NOINLINE fromAscListWithKey #-} -- | \(O(n)\). Build a map from a list of key\/value pairs where -- the keys are in ascending order and all distinct. -- /The precondition (input list is strictly ascending) is not checked./ -- -- > fromDistinctAscList [(3,"b"), (5,"a")] == fromList [(3, "b"), (5, "a")] fromDistinctAscList :: [(Key,a)] -> Word64Map a fromDistinctAscList = fromMonoListWithKey Distinct (\_ x _ -> x) {-# NOINLINE fromDistinctAscList #-} -- | \(O(n)\). Build a map from a list of key\/value pairs with monotonic keys -- and a combining function. -- -- The precise conditions under which this function works are subtle: -- For any branch mask, keys with the same prefix w.r.t. the branch -- mask must occur consecutively in the list. fromMonoListWithKey :: Distinct -> (Key -> a -> a -> a) -> [(Key,a)] -> Word64Map a fromMonoListWithKey distinct f = go where go [] = Nil go ((kx,vx) : zs1) = addAll' kx vx zs1 -- `addAll'` collects all keys equal to `kx` into a single value, -- and then proceeds with `addAll`. addAll' !kx vx [] = Tip kx vx addAll' !kx vx ((ky,vy) : zs) | Nondistinct <- distinct, kx == ky = let v = f kx vy vx in addAll' ky v zs -- inlined: | otherwise = addAll kx (Tip kx vx) (ky : zs) | m <- branchMask kx ky , Inserted ty zs' <- addMany' m ky vy zs = addAll kx (linkWithMask m ky ty {-kx-} (Tip kx vx)) zs' -- for `addAll` and `addMany`, kx is /a/ key inside the tree `tx` -- `addAll` consumes the rest of the list, adding to the tree `tx` addAll !_kx !tx [] = tx addAll !kx !tx ((ky,vy) : zs) | m <- branchMask kx ky , Inserted ty zs' <- addMany' m ky vy zs = addAll kx (linkWithMask m ky ty {-kx-} tx) zs' -- `addMany'` is similar to `addAll'`, but proceeds with `addMany'`. addMany' !_m !kx vx [] = Inserted (Tip kx vx) [] addMany' !m !kx vx zs0@((ky,vy) : zs) | Nondistinct <- distinct, kx == ky = let v = f kx vy vx in addMany' m ky v zs -- inlined: | otherwise = addMany m kx (Tip kx vx) (ky : zs) | mask kx m /= mask ky m = Inserted (Tip kx vx) zs0 | mxy <- branchMask kx ky , Inserted ty zs' <- addMany' mxy ky vy zs = addMany m kx (linkWithMask mxy ky ty {-kx-} (Tip kx vx)) zs' -- `addAll` adds to `tx` all keys whose prefix w.r.t. `m` agrees with `kx`. addMany !_m !_kx tx [] = Inserted tx [] addMany !m !kx tx zs0@((ky,vy) : zs) | mask kx m /= mask ky m = Inserted tx zs0 | mxy <- branchMask kx ky , Inserted ty zs' <- addMany' mxy ky vy zs = addMany m kx (linkWithMask mxy ky ty {-kx-} tx) zs' {-# INLINE fromMonoListWithKey #-} data Inserted a = Inserted !(Word64Map a) ![(Key,a)] data Distinct = Distinct | Nondistinct {-------------------------------------------------------------------- Eq --------------------------------------------------------------------} instance Eq a => Eq (Word64Map a) where t1 == t2 = equal t1 t2 t1 /= t2 = nequal t1 t2 equal :: Eq a => Word64Map a -> Word64Map a -> Bool equal (Bin p1 m1 l1 r1) (Bin p2 m2 l2 r2) = (m1 == m2) && (p1 == p2) && (equal l1 l2) && (equal r1 r2) equal (Tip kx x) (Tip ky y) = (kx == ky) && (x==y) equal Nil Nil = True equal _ _ = False nequal :: Eq a => Word64Map a -> Word64Map a -> Bool nequal (Bin p1 m1 l1 r1) (Bin p2 m2 l2 r2) = (m1 /= m2) || (p1 /= p2) || (nequal l1 l2) || (nequal r1 r2) nequal (Tip kx x) (Tip ky y) = (kx /= ky) || (x/=y) nequal Nil Nil = False nequal _ _ = True -- | @since 0.5.9 instance Eq1 Word64Map where liftEq eq (Bin p1 m1 l1 r1) (Bin p2 m2 l2 r2) = (m1 == m2) && (p1 == p2) && (liftEq eq l1 l2) && (liftEq eq r1 r2) liftEq eq (Tip kx x) (Tip ky y) = (kx == ky) && (eq x y) liftEq _eq Nil Nil = True liftEq _eq _ _ = False {-------------------------------------------------------------------- Ord --------------------------------------------------------------------} instance Ord a => Ord (Word64Map a) where compare m1 m2 = compare (toList m1) (toList m2) -- | @since 0.5.9 instance Ord1 Word64Map where liftCompare cmp m n = liftCompare (liftCompare cmp) (toList m) (toList n) {-------------------------------------------------------------------- Functor --------------------------------------------------------------------} instance Functor Word64Map where fmap = map #ifdef __GLASGOW_HASKELL__ a <$ Bin p m l r = Bin p m (a <$ l) (a <$ r) a <$ Tip k _ = Tip k a _ <$ Nil = Nil #endif {-------------------------------------------------------------------- Show --------------------------------------------------------------------} instance Show a => Show (Word64Map a) where showsPrec d m = showParen (d > 10) $ showString "fromList " . shows (toList m) -- | @since 0.5.9 instance Show1 Word64Map where liftShowsPrec sp sl d m = showsUnaryWith (liftShowsPrec sp' sl') "fromList" d (toList m) where sp' = liftShowsPrec sp sl sl' = liftShowList sp sl {-------------------------------------------------------------------- Read --------------------------------------------------------------------} instance (Read e) => Read (Word64Map e) where #ifdef __GLASGOW_HASKELL__ readPrec = parens $ prec 10 $ do Ident "fromList" <- lexP xs <- readPrec return (fromList xs) readListPrec = readListPrecDefault #else readsPrec p = readParen (p > 10) $ \ r -> do ("fromList",s) <- lex r (xs,t) <- reads s return (fromList xs,t) #endif -- | @since 0.5.9 instance Read1 Word64Map where liftReadsPrec rp rl = readsData $ readsUnaryWith (liftReadsPrec rp' rl') "fromList" fromList where rp' = liftReadsPrec rp rl rl' = liftReadList rp rl {-------------------------------------------------------------------- Helpers --------------------------------------------------------------------} {-------------------------------------------------------------------- Link --------------------------------------------------------------------} link :: Prefix -> Word64Map a -> Prefix -> Word64Map a -> Word64Map a link p1 t1 p2 t2 = linkWithMask (branchMask p1 p2) p1 t1 {-p2-} t2 {-# INLINE link #-} -- `linkWithMask` is useful when the `branchMask` has already been computed linkWithMask :: Mask -> Prefix -> Word64Map a -> Word64Map a -> Word64Map a linkWithMask m p1 t1 {-p2-} t2 | zero p1 m = Bin p m t1 t2 | otherwise = Bin p m t2 t1 where p = mask p1 m {-# INLINE linkWithMask #-} {-------------------------------------------------------------------- @bin@ assures that we never have empty trees within a tree. --------------------------------------------------------------------} bin :: Prefix -> Mask -> Word64Map a -> Word64Map a -> Word64Map a bin _ _ l Nil = l bin _ _ Nil r = r bin p m l r = Bin p m l r {-# INLINE bin #-} -- binCheckLeft only checks that the left subtree is non-empty binCheckLeft :: Prefix -> Mask -> Word64Map a -> Word64Map a -> Word64Map a binCheckLeft _ _ Nil r = r binCheckLeft p m l r = Bin p m l r {-# INLINE binCheckLeft #-} -- binCheckRight only checks that the right subtree is non-empty binCheckRight :: Prefix -> Mask -> Word64Map a -> Word64Map a -> Word64Map a binCheckRight _ _ l Nil = l binCheckRight p m l r = Bin p m l r {-# INLINE binCheckRight #-} {-------------------------------------------------------------------- Endian independent bit twiddling --------------------------------------------------------------------} -- | Should this key follow the left subtree of a 'Bin' with switching -- bit @m@? N.B., the answer is only valid when @match i p m@ is true. zero :: Key -> Mask -> Bool zero i m = (natFromInt i) .&. (natFromInt m) == 0 {-# INLINE zero #-} nomatch,match :: Key -> Prefix -> Mask -> Bool -- | Does the key @i@ differ from the prefix @p@ before getting to -- the switching bit @m@? nomatch i p m = (mask i m) /= p {-# INLINE nomatch #-} -- | Does the key @i@ match the prefix @p@ (up to but not including -- bit @m@)? match i p m = (mask i m) == p {-# INLINE match #-} -- | The prefix of key @i@ up to (but not including) the switching -- bit @m@. mask :: Key -> Mask -> Prefix mask i m = maskW (natFromInt i) (natFromInt m) {-# INLINE mask #-} {-------------------------------------------------------------------- Big endian operations --------------------------------------------------------------------} -- | The prefix of key @i@ up to (but not including) the switching -- bit @m@. maskW :: Nat -> Nat -> Prefix maskW i m = intFromNat (i .&. ((-m) `xor` m)) {-# INLINE maskW #-} -- | Does the left switching bit specify a shorter prefix? shorter :: Mask -> Mask -> Bool shorter m1 m2 = (natFromInt m1) > (natFromInt m2) {-# INLINE shorter #-} -- | The first switching bit where the two prefixes disagree. branchMask :: Prefix -> Prefix -> Mask branchMask p1 p2 = intFromNat (highestBitMask (natFromInt p1 `xor` natFromInt p2)) {-# INLINE branchMask #-} {-------------------------------------------------------------------- Utilities --------------------------------------------------------------------} -- | \(O(1)\). Decompose a map into pieces based on the structure -- of the underlying tree. This function is useful for consuming a -- map in parallel. -- -- No guarantee is made as to the sizes of the pieces; an internal, but -- deterministic process determines this. However, it is guaranteed that the -- pieces returned will be in ascending order (all elements in the first submap -- less than all elements in the second, and so on). -- -- Examples: -- -- > splitRoot (fromList (zip [1..6::Int] ['a'..])) == -- > [fromList [(1,'a'),(2,'b'),(3,'c')],fromList [(4,'d'),(5,'e'),(6,'f')]] -- -- > splitRoot empty == [] -- -- Note that the current implementation does not return more than two submaps, -- but you should not depend on this behaviour because it can change in the -- future without notice. splitRoot :: Word64Map a -> [Word64Map a] splitRoot orig = case orig of Nil -> [] x@(Tip _ _) -> [x] Bin _ m l r | m < 0 -> [r, l] | otherwise -> [l, r] {-# INLINE splitRoot #-} {-------------------------------------------------------------------- Debugging --------------------------------------------------------------------} -- | \(O(n \min(n,W))\). Show the tree that implements the map. The tree is shown -- in a compressed, hanging format. showTree :: Show a => Word64Map a -> String showTree s = showTreeWith True False s {- | \(O(n \min(n,W))\). The expression (@'showTreeWith' hang wide map@) shows the tree that implements the map. If @hang@ is 'True', a /hanging/ tree is shown otherwise a rotated tree is shown. If @wide@ is 'True', an extra wide version is shown. -} showTreeWith :: Show a => Bool -> Bool -> Word64Map a -> String showTreeWith hang wide t | hang = (showsTreeHang wide [] t) "" | otherwise = (showsTree wide [] [] t) "" showsTree :: Show a => Bool -> [String] -> [String] -> Word64Map a -> ShowS showsTree wide lbars rbars t = case t of Bin p m l r -> showsTree wide (withBar rbars) (withEmpty rbars) r . showWide wide rbars . showsBars lbars . showString (showBin p m) . showString "\n" . showWide wide lbars . showsTree wide (withEmpty lbars) (withBar lbars) l Tip k x -> showsBars lbars . showString " " . shows k . showString ":=" . shows x . showString "\n" Nil -> showsBars lbars . showString "|\n" showsTreeHang :: Show a => Bool -> [String] -> Word64Map a -> ShowS showsTreeHang wide bars t = case t of Bin p m l r -> showsBars bars . showString (showBin p m) . showString "\n" . showWide wide bars . showsTreeHang wide (withBar bars) l . showWide wide bars . showsTreeHang wide (withEmpty bars) r Tip k x -> showsBars bars . showString " " . shows k . showString ":=" . shows x . showString "\n" Nil -> showsBars bars . showString "|\n" showBin :: Prefix -> Mask -> String showBin _ _ = "*" -- ++ show (p,m) showWide :: Bool -> [String] -> String -> String showWide wide bars | wide = showString (concat (reverse bars)) . showString "|\n" | otherwise = id showsBars :: [String] -> ShowS showsBars bars = case bars of [] -> id _ : tl -> showString (concat (reverse tl)) . showString node node :: String node = "+--" withBar, withEmpty :: [String] -> [String] withBar bars = "| ":bars withEmpty bars = " ":bars ghc-lib-parser-9.12.2.20250421/compiler/GHC/Data/Word64Map/Lazy.hs0000644000000000000000000001222007346545000021552 0ustar0000000000000000{-# LANGUAGE CPP #-} ----------------------------------------------------------------------------- -- | -- Module : Data.Word64Map.Lazy -- Copyright : (c) Daan Leijen 2002 -- (c) Andriy Palamarchuk 2008 -- License : BSD-style -- Maintainer : libraries@haskell.org -- Portability : portable -- -- -- = Finite Word64 Maps (lazy interface) -- -- The @'Word64Map' v@ type represents a finite map (sometimes called a dictionary) -- from keys of type @Word64@ to values of type @v@. -- -- The functions in "Data.Word64Map.Strict" are careful to force values before -- installing them in an 'Word64Map'. This is usually more efficient in cases where -- laziness is not essential. The functions in this module do not do so. -- -- For a walkthrough of the most commonly used functions see the -- . -- -- This module is intended to be imported qualified, to avoid name clashes with -- Prelude functions: -- -- > import Data.Word64Map.Lazy (Word64Map) -- > import qualified Data.Word64Map.Lazy as Word64Map -- -- Note that the implementation is generally /left-biased/. Functions that take -- two maps as arguments and combine them, such as `union` and `intersection`, -- prefer the values in the first argument to those in the second. -- -- -- == Detailed performance information -- -- The amortized running time is given for each operation, with \(n\) referring to -- the number of entries in the map and \(W\) referring to the number of bits in -- an 'Word64' (64). -- -- Benchmarks comparing "Data.Word64Map.Lazy" with other dictionary -- implementations can be found at https://github.com/haskell-perf/dictionaries. -- -- -- == Implementation -- -- The implementation is based on /big-endian patricia trees/. This data -- structure performs especially well on binary operations like 'union' and -- 'intersection'. Additionally, benchmarks show that it is also (much) faster -- on insertions and deletions when compared to a generic size-balanced map -- implementation (see "Data.Map"). -- -- * Chris Okasaki and Andy Gill, \"/Fast Mergeable Integer Maps/\", -- Workshop on ML, September 1998, pages 77-86, -- -- -- * D.R. Morrison, \"/PATRICIA -- Practical Algorithm To Retrieve Information Coded In Alphanumeric/\", -- Journal of the ACM, 15(4), October 1968, pages 514-534. -- ----------------------------------------------------------------------------- module GHC.Data.Word64Map.Lazy ( -- * Map type #if !defined(TESTING) Word64Map, Key -- instance Eq,Show #else Word64Map(..), Key -- instance Eq,Show #endif -- * Construction , empty , singleton , fromSet -- ** From Unordered Lists , fromList , fromListWith , fromListWithKey -- ** From Ascending Lists , fromAscList , fromAscListWith , fromAscListWithKey , fromDistinctAscList -- * Insertion , insert , insertWith , insertWithKey , insertLookupWithKey -- * Deletion\/Update , delete , adjust , adjustWithKey , update , updateWithKey , updateLookupWithKey , alter , alterF -- * Query -- ** Lookup , WM.lookup , (!?) , (!) , findWithDefault , member , notMember , lookupLT , lookupGT , lookupLE , lookupGE -- ** Size , WM.null , size -- * Combine -- ** Union , union , unionWith , unionWithKey , unions , unionsWith -- ** Difference , difference , (\\) , differenceWith , differenceWithKey -- ** Intersection , intersection , intersectionWith , intersectionWithKey -- ** Disjoint , disjoint -- ** Compose , compose -- ** Universal combining function , mergeWithKey -- * Traversal -- ** Map , WM.map , mapWithKey , traverseWithKey , traverseMaybeWithKey , mapAccum , mapAccumWithKey , mapAccumRWithKey , mapKeys , mapKeysWith , mapKeysMonotonic -- * Folds , WM.foldr , WM.foldl , foldrWithKey , foldlWithKey , foldMapWithKey -- ** Strict folds , foldr' , foldl' , foldrWithKey' , foldlWithKey' -- * Conversion , elems , keys , assocs , keysSet -- ** Lists , toList -- ** Ordered lists , toAscList , toDescList -- * Filter , WM.filter , filterWithKey , restrictKeys , withoutKeys , partition , partitionWithKey , takeWhileAntitone , dropWhileAntitone , spanAntitone , mapMaybe , mapMaybeWithKey , mapEither , mapEitherWithKey , split , splitLookup , splitRoot -- * Submap , isSubmapOf, isSubmapOfBy , isProperSubmapOf, isProperSubmapOfBy -- * Min\/Max , lookupMin , lookupMax , findMin , findMax , deleteMin , deleteMax , deleteFindMin , deleteFindMax , updateMin , updateMax , updateMinWithKey , updateMaxWithKey , minView , maxView , minViewWithKey , maxViewWithKey ) where import GHC.Data.Word64Map.Internal as WM ghc-lib-parser-9.12.2.20250421/compiler/GHC/Data/Word64Map/Strict.hs0000644000000000000000000001353307346545000022113 0ustar0000000000000000{-# LANGUAGE CPP #-} ----------------------------------------------------------------------------- -- | -- Module : Data.Word64Map.Strict -- Copyright : (c) Daan Leijen 2002 -- (c) Andriy Palamarchuk 2008 -- License : BSD-style -- Maintainer : libraries@haskell.org -- Portability : portable -- -- -- = Finite Word64 Maps (strict interface) -- -- The @'Word64Map' v@ type represents a finite map (sometimes called a dictionary) -- from key of type @Word64@ to values of type @v@. -- -- Each function in this module is careful to force values before installing -- them in an 'Word64Map'. This is usually more efficient when laziness is not -- necessary. When laziness /is/ required, use the functions in -- "Data.Word64Map.Lazy". -- -- In particular, the functions in this module obey the following law: -- -- - If all values stored in all maps in the arguments are in WHNF, then all -- values stored in all maps in the results will be in WHNF once those maps -- are evaluated. -- -- For a walkthrough of the most commonly used functions see the -- . -- -- This module is intended to be imported qualified, to avoid name clashes with -- Prelude functions: -- -- > import Data.Word64Map.Strict (Word64Map) -- > import qualified Data.Word64Map.Strict as Word64Map -- -- Note that the implementation is generally /left-biased/. Functions that take -- two maps as arguments and combine them, such as `union` and `intersection`, -- prefer the values in the first argument to those in the second. -- -- -- == Detailed performance information -- -- The amortized running time is given for each operation, with \(n\) referring to -- the number of entries in the map and \(W\) referring to the number of bits in -- an 'Word64' (64). -- -- Benchmarks comparing "Data.Word64Map.Strict" with other dictionary -- implementations can be found at https://github.com/haskell-perf/dictionaries. -- -- -- == Warning -- -- The 'Word64Map' type is shared between the lazy and strict modules, meaning that -- the same 'Word64Map' value can be passed to functions in both modules. This -- means that the 'Functor', 'Traversable' and 'Data.Data.Data' instances are -- the same as for the "Data.Word64Map.Lazy" module, so if they are used the -- resulting map may contain suspended values (thunks). -- -- -- == Implementation -- -- The implementation is based on /big-endian patricia trees/. This data -- structure performs especially well on binary operations like 'union' and -- 'intersection'. Additionally, benchmarks show that it is also (much) faster -- on insertions and deletions when compared to a generic size-balanced map -- implementation (see "Data.Map"). -- -- * Chris Okasaki and Andy Gill, \"/Fast Mergeable Integer Maps/\", -- Workshop on ML, September 1998, pages 77-86, -- -- -- * D.R. Morrison, \"/PATRICIA -- Practical Algorithm To Retrieve Information Coded In Alphanumeric/\", -- Journal of the ACM, 15(4), October 1968, pages 514-534. -- ----------------------------------------------------------------------------- -- See the notes at the beginning of Data.Word64Map.Internal. module GHC.Data.Word64Map.Strict ( -- * Map type #if !defined(TESTING) Word64Map, Key -- instance Eq,Show #else Word64Map(..), Key -- instance Eq,Show #endif -- * Construction , empty , singleton , fromSet -- ** From Unordered Lists , fromList , fromListWith , fromListWithKey -- ** From Ascending Lists , fromAscList , fromAscListWith , fromAscListWithKey , fromDistinctAscList -- * Insertion , insert , insertWith , insertWithKey , insertLookupWithKey -- * Deletion\/Update , delete , adjust , adjustWithKey , update , updateWithKey , updateLookupWithKey , alter , alterF -- * Query -- ** Lookup , lookup , (!?) , (!) , findWithDefault , member , notMember , lookupLT , lookupGT , lookupLE , lookupGE -- ** Size , null , size -- * Combine -- ** Union , union , unionWith , unionWithKey , unions , unionsWith -- ** Difference , difference , (\\) , differenceWith , differenceWithKey -- ** Intersection , intersection , intersectionWith , intersectionWithKey -- ** Disjoint , disjoint -- ** Compose , compose -- ** Universal combining function , mergeWithKey -- * Traversal -- ** Map , map , mapWithKey , traverseWithKey , traverseMaybeWithKey , mapAccum , mapAccumWithKey , mapAccumRWithKey , mapKeys , mapKeysWith , mapKeysMonotonic -- * Folds , foldr , foldl , foldrWithKey , foldlWithKey , foldMapWithKey -- ** Strict folds , foldr' , foldl' , foldrWithKey' , foldlWithKey' -- * Conversion , elems , keys , assocs , keysSet -- ** Lists , toList -- ** Ordered lists , toAscList , toDescList -- * Filter , filter , filterWithKey , restrictKeys , withoutKeys , partition , partitionWithKey , takeWhileAntitone , dropWhileAntitone , spanAntitone , mapMaybe , mapMaybeWithKey , mapEither , mapEitherWithKey , split , splitLookup , splitRoot -- * Submap , isSubmapOf, isSubmapOfBy , isProperSubmapOf, isProperSubmapOfBy -- * Min\/Max , lookupMin , lookupMax , findMin , findMax , deleteMin , deleteMax , deleteFindMin , deleteFindMax , updateMin , updateMax , updateMinWithKey , updateMaxWithKey , minView , maxView , minViewWithKey , maxViewWithKey ) where import GHC.Data.Word64Map.Strict.Internal ghc-lib-parser-9.12.2.20250421/compiler/GHC/Data/Word64Map/Strict/0000755000000000000000000000000007346545000021552 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Data/Word64Map/Strict/Internal.hs0000644000000000000000000012754707346545000023702 0ustar0000000000000000{-# LANGUAGE CPP #-} {-# OPTIONS_GHC -fno-warn-incomplete-uni-patterns #-} ----------------------------------------------------------------------------- -- | -- Module : Data.Word64Map.Strict.Internal -- Copyright : (c) Daan Leijen 2002 -- (c) Andriy Palamarchuk 2008 -- License : BSD-style -- Maintainer : libraries@haskell.org -- Portability : portable -- -- -- = Finite Int Maps (strict interface) -- -- The @'Word64Map' v@ type represents a finite map (sometimes called a dictionary) -- from key of type @Int@ to values of type @v@. -- -- Each function in this module is careful to force values before installing -- them in an 'Word64Map'. This is usually more efficient when laziness is not -- necessary. When laziness /is/ required, use the functions in -- "Data.Word64Map.Lazy". -- -- In particular, the functions in this module obey the following law: -- -- - If all values stored in all maps in the arguments are in WHNF, then all -- values stored in all maps in the results will be in WHNF once those maps -- are evaluated. -- -- For a walkthrough of the most commonly used functions see the -- . -- -- This module is intended to be imported qualified, to avoid name clashes with -- Prelude functions: -- -- > import Data.Word64Map.Strict (Word64Map) -- > import qualified Data.Word64Map.Strict as Word64Map -- -- Note that the implementation is generally /left-biased/. Functions that take -- two maps as arguments and combine them, such as `union` and `intersection`, -- prefer the values in the first argument to those in the second. -- -- -- == Detailed performance information -- -- The amortized running time is given for each operation, with \(n\) referring to -- the number of entries in the map and \(W\) referring to the number of bits in -- an 'Int' (32 or 64). -- -- Benchmarks comparing "Data.Word64Map.Strict" with other dictionary -- implementations can be found at https://github.com/haskell-perf/dictionaries. -- -- -- == Warning -- -- The 'Word64Map' type is shared between the lazy and strict modules, meaning that -- the same 'Word64Map' value can be passed to functions in both modules. This -- means that the 'Functor', 'Traversable' and 'Data.Data.Data' instances are -- the same as for the "Data.Word64Map.Lazy" module, so if they are used the -- resulting map may contain suspended values (thunks). -- -- -- == Implementation -- -- The implementation is based on /big-endian patricia trees/. This data -- structure performs especially well on binary operations like 'union' and -- 'intersection'. Additionally, benchmarks show that it is also (much) faster -- on insertions and deletions when compared to a generic size-balanced map -- implementation (see "Data.Map"). -- -- * Chris Okasaki and Andy Gill, \"/Fast Mergeable Integer Maps/\", -- Workshop on ML, September 1998, pages 77-86, -- -- -- * D.R. Morrison, \"/PATRICIA -- Practical Algorithm To Retrieve Information Coded In Alphanumeric/\", -- Journal of the ACM, 15(4), October 1968, pages 514-534. -- ----------------------------------------------------------------------------- -- See the notes at the beginning of Data.Word64Map.Internal. module GHC.Data.Word64Map.Strict.Internal ( -- * Map type #if !defined(TESTING) Word64Map, Key -- instance Eq,Show #else Word64Map(..), Key -- instance Eq,Show #endif -- * Construction , empty , singleton , fromSet -- ** From Unordered Lists , fromList , fromListWith , fromListWithKey -- ** From Ascending Lists , fromAscList , fromAscListWith , fromAscListWithKey , fromDistinctAscList -- * Insertion , insert , insertWith , insertWithKey , insertLookupWithKey -- * Deletion\/Update , delete , adjust , adjustWithKey , update , updateWithKey , updateLookupWithKey , alter , alterF -- * Query -- ** Lookup , lookup , (!?) , (!) , findWithDefault , member , notMember , lookupLT , lookupGT , lookupLE , lookupGE -- ** Size , null , size -- * Combine -- ** Union , union , unionWith , unionWithKey , unions , unionsWith -- ** Difference , difference , (\\) , differenceWith , differenceWithKey -- ** Intersection , intersection , intersectionWith , intersectionWithKey -- ** Disjoint , disjoint -- ** Compose , compose -- ** Universal combining function , mergeWithKey -- * Traversal -- ** Map , map , mapWithKey , traverseWithKey , traverseMaybeWithKey , mapAccum , mapAccumWithKey , mapAccumRWithKey , mapKeys , mapKeysWith , mapKeysMonotonic -- * Folds , foldr , foldl , foldrWithKey , foldlWithKey , foldMapWithKey -- ** Strict folds , foldr' , foldl' , foldrWithKey' , foldlWithKey' -- * Conversion , elems , keys , assocs , keysSet -- ** Lists , toList -- ** Ordered lists , toAscList , toDescList -- * Filter , filter , filterWithKey , restrictKeys , withoutKeys , partition , partitionWithKey , takeWhileAntitone , dropWhileAntitone , spanAntitone , mapMaybe , mapMaybeWithKey , mapEither , mapEitherWithKey , split , splitLookup , splitRoot -- * Submap , isSubmapOf, isSubmapOfBy , isProperSubmapOf, isProperSubmapOfBy -- * Min\/Max , lookupMin , lookupMax , findMin , findMax , deleteMin , deleteMax , deleteFindMin , deleteFindMax , updateMin , updateMax , updateMinWithKey , updateMaxWithKey , minView , maxView , minViewWithKey , maxViewWithKey ) where import GHC.Prelude.Basic hiding (lookup, filter, foldr, foldl, foldl', null, map) import qualified GHC.Data.Word64Map.Internal as L import GHC.Data.Word64Map.Internal ( Word64Map (..) , Key , mask , branchMask , nomatch , zero , natFromInt , intFromNat , bin , binCheckLeft , binCheckRight , link , linkWithMask , (\\) , (!) , (!?) , empty , assocs , filter , filterWithKey , findMin , findMax , foldMapWithKey , foldr , foldl , foldr' , foldl' , foldlWithKey , foldrWithKey , foldlWithKey' , foldrWithKey' , keysSet , mergeWithKey' , compose , delete , deleteMin , deleteMax , deleteFindMax , deleteFindMin , difference , elems , intersection , disjoint , isProperSubmapOf , isProperSubmapOfBy , isSubmapOf , isSubmapOfBy , lookup , lookupLE , lookupGE , lookupLT , lookupGT , lookupMin , lookupMax , minView , maxView , minViewWithKey , maxViewWithKey , keys , mapKeys , mapKeysMonotonic , member , notMember , null , partition , partitionWithKey , takeWhileAntitone , dropWhileAntitone , spanAntitone , restrictKeys , size , split , splitLookup , splitRoot , toAscList , toDescList , toList , union , unions , withoutKeys ) import qualified GHC.Data.Word64Set.Internal as Word64Set import GHC.Utils.Containers.Internal.BitUtil import GHC.Utils.Containers.Internal.StrictPair import qualified Data.Foldable as Foldable {-------------------------------------------------------------------- Query --------------------------------------------------------------------} -- | \(O(\min(n,W))\). The expression @('findWithDefault' def k map)@ -- returns the value at key @k@ or returns @def@ when the key is not an -- element of the map. -- -- > findWithDefault 'x' 1 (fromList [(5,'a'), (3,'b')]) == 'x' -- > findWithDefault 'x' 5 (fromList [(5,'a'), (3,'b')]) == 'a' -- See Word64Map.Internal.Note: Local 'go' functions and capturing] findWithDefault :: a -> Key -> Word64Map a -> a findWithDefault def !k = go where go (Bin p m l r) | nomatch k p m = def | zero k m = go l | otherwise = go r go (Tip kx x) | k == kx = x | otherwise = def go Nil = def {-------------------------------------------------------------------- Construction --------------------------------------------------------------------} -- | \(O(1)\). A map of one element. -- -- > singleton 1 'a' == fromList [(1, 'a')] -- > size (singleton 1 'a') == 1 singleton :: Key -> a -> Word64Map a singleton k !x = Tip k x {-# INLINE singleton #-} {-------------------------------------------------------------------- Insert --------------------------------------------------------------------} -- | \(O(\min(n,W))\). Insert a new key\/value pair in the map. -- If the key is already present in the map, the associated value is -- replaced with the supplied value, i.e. 'insert' is equivalent to -- @'insertWith' 'const'@. -- -- > insert 5 'x' (fromList [(5,'a'), (3,'b')]) == fromList [(3, 'b'), (5, 'x')] -- > insert 7 'x' (fromList [(5,'a'), (3,'b')]) == fromList [(3, 'b'), (5, 'a'), (7, 'x')] -- > insert 5 'x' empty == singleton 5 'x' insert :: Key -> a -> Word64Map a -> Word64Map a insert !k !x t = case t of Bin p m l r | nomatch k p m -> link k (Tip k x) p t | zero k m -> Bin p m (insert k x l) r | otherwise -> Bin p m l (insert k x r) Tip ky _ | k==ky -> Tip k x | otherwise -> link k (Tip k x) ky t Nil -> Tip k x -- right-biased insertion, used by 'union' -- | \(O(\min(n,W))\). Insert with a combining function. -- @'insertWith' f key value mp@ -- will insert the pair (key, value) into @mp@ if key does -- not exist in the map. If the key does exist, the function will -- insert @f new_value old_value@. -- -- > insertWith (++) 5 "xxx" (fromList [(5,"a"), (3,"b")]) == fromList [(3, "b"), (5, "xxxa")] -- > insertWith (++) 7 "xxx" (fromList [(5,"a"), (3,"b")]) == fromList [(3, "b"), (5, "a"), (7, "xxx")] -- > insertWith (++) 5 "xxx" empty == singleton 5 "xxx" insertWith :: (a -> a -> a) -> Key -> a -> Word64Map a -> Word64Map a insertWith f k x t = insertWithKey (\_ x' y' -> f x' y') k x t -- | \(O(\min(n,W))\). Insert with a combining function. -- @'insertWithKey' f key value mp@ -- will insert the pair (key, value) into @mp@ if key does -- not exist in the map. If the key does exist, the function will -- insert @f key new_value old_value@. -- -- > let f key new_value old_value = (show key) ++ ":" ++ new_value ++ "|" ++ old_value -- > insertWithKey f 5 "xxx" (fromList [(5,"a"), (3,"b")]) == fromList [(3, "b"), (5, "5:xxx|a")] -- > insertWithKey f 7 "xxx" (fromList [(5,"a"), (3,"b")]) == fromList [(3, "b"), (5, "a"), (7, "xxx")] -- > insertWithKey f 5 "xxx" empty == singleton 5 "xxx" -- -- If the key exists in the map, this function is lazy in @value@ but strict -- in the result of @f@. insertWithKey :: (Key -> a -> a -> a) -> Key -> a -> Word64Map a -> Word64Map a insertWithKey f !k x t = case t of Bin p m l r | nomatch k p m -> link k (singleton k x) p t | zero k m -> Bin p m (insertWithKey f k x l) r | otherwise -> Bin p m l (insertWithKey f k x r) Tip ky y | k==ky -> Tip k $! f k x y | otherwise -> link k (singleton k x) ky t Nil -> singleton k x -- | \(O(\min(n,W))\). The expression (@'insertLookupWithKey' f k x map@) -- is a pair where the first element is equal to (@'lookup' k map@) -- and the second element equal to (@'insertWithKey' f k x map@). -- -- > let f key new_value old_value = (show key) ++ ":" ++ new_value ++ "|" ++ old_value -- > insertLookupWithKey f 5 "xxx" (fromList [(5,"a"), (3,"b")]) == (Just "a", fromList [(3, "b"), (5, "5:xxx|a")]) -- > insertLookupWithKey f 7 "xxx" (fromList [(5,"a"), (3,"b")]) == (Nothing, fromList [(3, "b"), (5, "a"), (7, "xxx")]) -- > insertLookupWithKey f 5 "xxx" empty == (Nothing, singleton 5 "xxx") -- -- This is how to define @insertLookup@ using @insertLookupWithKey@: -- -- > let insertLookup kx x t = insertLookupWithKey (\_ a _ -> a) kx x t -- > insertLookup 5 "x" (fromList [(5,"a"), (3,"b")]) == (Just "a", fromList [(3, "b"), (5, "x")]) -- > insertLookup 7 "x" (fromList [(5,"a"), (3,"b")]) == (Nothing, fromList [(3, "b"), (5, "a"), (7, "x")]) insertLookupWithKey :: (Key -> a -> a -> a) -> Key -> a -> Word64Map a -> (Maybe a, Word64Map a) insertLookupWithKey f0 !k0 x0 t0 = toPair $ go f0 k0 x0 t0 where go f k x t = case t of Bin p m l r | nomatch k p m -> Nothing :*: link k (singleton k x) p t | zero k m -> let (found :*: l') = go f k x l in (found :*: Bin p m l' r) | otherwise -> let (found :*: r') = go f k x r in (found :*: Bin p m l r') Tip ky y | k==ky -> (Just y :*: (Tip k $! f k x y)) | otherwise -> (Nothing :*: link k (singleton k x) ky t) Nil -> Nothing :*: (singleton k x) {-------------------------------------------------------------------- Deletion --------------------------------------------------------------------} -- | \(O(\min(n,W))\). Adjust a value at a specific key. When the key is not -- a member of the map, the original map is returned. -- -- > adjust ("new " ++) 5 (fromList [(5,"a"), (3,"b")]) == fromList [(3, "b"), (5, "new a")] -- > adjust ("new " ++) 7 (fromList [(5,"a"), (3,"b")]) == fromList [(3, "b"), (5, "a")] -- > adjust ("new " ++) 7 empty == empty adjust :: (a -> a) -> Key -> Word64Map a -> Word64Map a adjust f k m = adjustWithKey (\_ x -> f x) k m -- | \(O(\min(n,W))\). Adjust a value at a specific key. When the key is not -- a member of the map, the original map is returned. -- -- > let f key x = (show key) ++ ":new " ++ x -- > adjustWithKey f 5 (fromList [(5,"a"), (3,"b")]) == fromList [(3, "b"), (5, "5:new a")] -- > adjustWithKey f 7 (fromList [(5,"a"), (3,"b")]) == fromList [(3, "b"), (5, "a")] -- > adjustWithKey f 7 empty == empty adjustWithKey :: (Key -> a -> a) -> Key -> Word64Map a -> Word64Map a adjustWithKey f !k t = case t of Bin p m l r | nomatch k p m -> t | zero k m -> Bin p m (adjustWithKey f k l) r | otherwise -> Bin p m l (adjustWithKey f k r) Tip ky y | k==ky -> Tip ky $! f k y | otherwise -> t Nil -> Nil -- | \(O(\min(n,W))\). The expression (@'update' f k map@) updates the value @x@ -- at @k@ (if it is in the map). If (@f x@) is 'Nothing', the element is -- deleted. If it is (@'Just' y@), the key @k@ is bound to the new value @y@. -- -- > let f x = if x == "a" then Just "new a" else Nothing -- > update f 5 (fromList [(5,"a"), (3,"b")]) == fromList [(3, "b"), (5, "new a")] -- > update f 7 (fromList [(5,"a"), (3,"b")]) == fromList [(3, "b"), (5, "a")] -- > update f 3 (fromList [(5,"a"), (3,"b")]) == singleton 5 "a" update :: (a -> Maybe a) -> Key -> Word64Map a -> Word64Map a update f = updateWithKey (\_ x -> f x) -- | \(O(\min(n,W))\). The expression (@'update' f k map@) updates the value @x@ -- at @k@ (if it is in the map). If (@f k x@) is 'Nothing', the element is -- deleted. If it is (@'Just' y@), the key @k@ is bound to the new value @y@. -- -- > let f k x = if x == "a" then Just ((show k) ++ ":new a") else Nothing -- > updateWithKey f 5 (fromList [(5,"a"), (3,"b")]) == fromList [(3, "b"), (5, "5:new a")] -- > updateWithKey f 7 (fromList [(5,"a"), (3,"b")]) == fromList [(3, "b"), (5, "a")] -- > updateWithKey f 3 (fromList [(5,"a"), (3,"b")]) == singleton 5 "a" updateWithKey :: (Key -> a -> Maybe a) -> Key -> Word64Map a -> Word64Map a updateWithKey f !k t = case t of Bin p m l r | nomatch k p m -> t | zero k m -> binCheckLeft p m (updateWithKey f k l) r | otherwise -> binCheckRight p m l (updateWithKey f k r) Tip ky y | k==ky -> case f k y of Just !y' -> Tip ky y' Nothing -> Nil | otherwise -> t Nil -> Nil -- | \(O(\min(n,W))\). Lookup and update. -- The function returns original value, if it is updated. -- This is different behavior than 'Data.Map.updateLookupWithKey'. -- Returns the original key value if the map entry is deleted. -- -- > let f k x = if x == "a" then Just ((show k) ++ ":new a") else Nothing -- > updateLookupWithKey f 5 (fromList [(5,"a"), (3,"b")]) == (Just "a", fromList [(3, "b"), (5, "5:new a")]) -- > updateLookupWithKey f 7 (fromList [(5,"a"), (3,"b")]) == (Nothing, fromList [(3, "b"), (5, "a")]) -- > updateLookupWithKey f 3 (fromList [(5,"a"), (3,"b")]) == (Just "b", singleton 5 "a") updateLookupWithKey :: (Key -> a -> Maybe a) -> Key -> Word64Map a -> (Maybe a,Word64Map a) updateLookupWithKey f0 !k0 t0 = toPair $ go f0 k0 t0 where go f k t = case t of Bin p m l r | nomatch k p m -> (Nothing :*: t) | zero k m -> let (found :*: l') = go f k l in (found :*: binCheckLeft p m l' r) | otherwise -> let (found :*: r') = go f k r in (found :*: binCheckRight p m l r') Tip ky y | k==ky -> case f k y of Just !y' -> (Just y :*: Tip ky y') Nothing -> (Just y :*: Nil) | otherwise -> (Nothing :*: t) Nil -> (Nothing :*: Nil) -- | \(O(\min(n,W))\). The expression (@'alter' f k map@) alters the value @x@ at @k@, or absence thereof. -- 'alter' can be used to insert, delete, or update a value in an 'Word64Map'. -- In short : @'lookup' k ('alter' f k m) = f ('lookup' k m)@. alter :: (Maybe a -> Maybe a) -> Key -> Word64Map a -> Word64Map a alter f !k t = case t of Bin p m l r | nomatch k p m -> case f Nothing of Nothing -> t Just !x -> link k (Tip k x) p t | zero k m -> binCheckLeft p m (alter f k l) r | otherwise -> binCheckRight p m l (alter f k r) Tip ky y | k==ky -> case f (Just y) of Just !x -> Tip ky x Nothing -> Nil | otherwise -> case f Nothing of Just !x -> link k (Tip k x) ky t Nothing -> t Nil -> case f Nothing of Just !x -> Tip k x Nothing -> Nil -- | \(O(\log n)\). The expression (@'alterF' f k map@) alters the value @x@ at -- @k@, or absence thereof. 'alterF' can be used to inspect, insert, delete, -- or update a value in an 'Word64Map'. In short : @'lookup' k <$> 'alterF' f k m = f -- ('lookup' k m)@. -- -- Example: -- -- @ -- interactiveAlter :: Int -> Word64Map String -> IO (Word64Map String) -- interactiveAlter k m = alterF f k m where -- f Nothing = do -- putStrLn $ show k ++ -- " was not found in the map. Would you like to add it?" -- getUserResponse1 :: IO (Maybe String) -- f (Just old) = do -- putStrLn $ "The key is currently bound to " ++ show old ++ -- ". Would you like to change or delete it?" -- getUserResponse2 :: IO (Maybe String) -- @ -- -- 'alterF' is the most general operation for working with an individual -- key that may or may not be in a given map. -- Note: 'alterF' is a flipped version of the 'at' combinator from -- 'Control.Lens.At'. -- -- @since 0.5.8 alterF :: Functor f => (Maybe a -> f (Maybe a)) -> Key -> Word64Map a -> f (Word64Map a) -- This implementation was modified from 'Control.Lens.At'. alterF f k m = (<$> f mv) $ \fres -> case fres of Nothing -> maybe m (const (delete k m)) mv Just !v' -> insert k v' m where mv = lookup k m {-------------------------------------------------------------------- Union --------------------------------------------------------------------} -- | The union of a list of maps, with a combining operation. -- -- > unionsWith (++) [(fromList [(5, "a"), (3, "b")]), (fromList [(5, "A"), (7, "C")]), (fromList [(5, "A3"), (3, "B3")])] -- > == fromList [(3, "bB3"), (5, "aAA3"), (7, "C")] unionsWith :: Foldable f => (a->a->a) -> f (Word64Map a) -> Word64Map a unionsWith f ts = Foldable.foldl' (unionWith f) empty ts -- | \(O(n+m)\). The union with a combining function. -- -- > unionWith (++) (fromList [(5, "a"), (3, "b")]) (fromList [(5, "A"), (7, "C")]) == fromList [(3, "b"), (5, "aA"), (7, "C")] unionWith :: (a -> a -> a) -> Word64Map a -> Word64Map a -> Word64Map a unionWith f m1 m2 = unionWithKey (\_ x y -> f x y) m1 m2 -- | \(O(n+m)\). The union with a combining function. -- -- > let f key left_value right_value = (show key) ++ ":" ++ left_value ++ "|" ++ right_value -- > unionWithKey f (fromList [(5, "a"), (3, "b")]) (fromList [(5, "A"), (7, "C")]) == fromList [(3, "b"), (5, "5:a|A"), (7, "C")] unionWithKey :: (Key -> a -> a -> a) -> Word64Map a -> Word64Map a -> Word64Map a unionWithKey f m1 m2 = mergeWithKey' Bin (\(Tip k1 x1) (Tip _k2 x2) -> Tip k1 $! f k1 x1 x2) id id m1 m2 {-------------------------------------------------------------------- Difference --------------------------------------------------------------------} -- | \(O(n+m)\). Difference with a combining function. -- -- > let f al ar = if al == "b" then Just (al ++ ":" ++ ar) else Nothing -- > differenceWith f (fromList [(5, "a"), (3, "b")]) (fromList [(5, "A"), (3, "B"), (7, "C")]) -- > == singleton 3 "b:B" differenceWith :: (a -> b -> Maybe a) -> Word64Map a -> Word64Map b -> Word64Map a differenceWith f m1 m2 = differenceWithKey (\_ x y -> f x y) m1 m2 -- | \(O(n+m)\). Difference with a combining function. When two equal keys are -- encountered, the combining function is applied to the key and both values. -- If it returns 'Nothing', the element is discarded (proper set difference). -- If it returns (@'Just' y@), the element is updated with a new value @y@. -- -- > let f k al ar = if al == "b" then Just ((show k) ++ ":" ++ al ++ "|" ++ ar) else Nothing -- > differenceWithKey f (fromList [(5, "a"), (3, "b")]) (fromList [(5, "A"), (3, "B"), (10, "C")]) -- > == singleton 3 "3:b|B" differenceWithKey :: (Key -> a -> b -> Maybe a) -> Word64Map a -> Word64Map b -> Word64Map a differenceWithKey f m1 m2 = mergeWithKey f id (const Nil) m1 m2 {-------------------------------------------------------------------- Intersection --------------------------------------------------------------------} -- | \(O(n+m)\). The intersection with a combining function. -- -- > intersectionWith (++) (fromList [(5, "a"), (3, "b")]) (fromList [(5, "A"), (7, "C")]) == singleton 5 "aA" intersectionWith :: (a -> b -> c) -> Word64Map a -> Word64Map b -> Word64Map c intersectionWith f m1 m2 = intersectionWithKey (\_ x y -> f x y) m1 m2 -- | \(O(n+m)\). The intersection with a combining function. -- -- > let f k al ar = (show k) ++ ":" ++ al ++ "|" ++ ar -- > intersectionWithKey f (fromList [(5, "a"), (3, "b")]) (fromList [(5, "A"), (7, "C")]) == singleton 5 "5:a|A" intersectionWithKey :: (Key -> a -> b -> c) -> Word64Map a -> Word64Map b -> Word64Map c intersectionWithKey f m1 m2 = mergeWithKey' bin (\(Tip k1 x1) (Tip _k2 x2) -> Tip k1 $! f k1 x1 x2) (const Nil) (const Nil) m1 m2 {-------------------------------------------------------------------- MergeWithKey --------------------------------------------------------------------} -- | \(O(n+m)\). A high-performance universal combining function. Using -- 'mergeWithKey', all combining functions can be defined without any loss of -- efficiency (with exception of 'union', 'difference' and 'intersection', -- where sharing of some nodes is lost with 'mergeWithKey'). -- -- Please make sure you know what is going on when using 'mergeWithKey', -- otherwise you can be surprised by unexpected code growth or even -- corruption of the data structure. -- -- When 'mergeWithKey' is given three arguments, it is inlined to the call -- site. You should therefore use 'mergeWithKey' only to define your custom -- combining functions. For example, you could define 'unionWithKey', -- 'differenceWithKey' and 'intersectionWithKey' as -- -- > myUnionWithKey f m1 m2 = mergeWithKey (\k x1 x2 -> Just (f k x1 x2)) id id m1 m2 -- > myDifferenceWithKey f m1 m2 = mergeWithKey f id (const empty) m1 m2 -- > myIntersectionWithKey f m1 m2 = mergeWithKey (\k x1 x2 -> Just (f k x1 x2)) (const empty) (const empty) m1 m2 -- -- When calling @'mergeWithKey' combine only1 only2@, a function combining two -- 'Word64Map's is created, such that -- -- * if a key is present in both maps, it is passed with both corresponding -- values to the @combine@ function. Depending on the result, the key is either -- present in the result with specified value, or is left out; -- -- * a nonempty subtree present only in the first map is passed to @only1@ and -- the output is added to the result; -- -- * a nonempty subtree present only in the second map is passed to @only2@ and -- the output is added to the result. -- -- The @only1@ and @only2@ methods /must return a map with a subset (possibly empty) of the keys of the given map/. -- The values can be modified arbitrarily. Most common variants of @only1@ and -- @only2@ are 'id' and @'const' 'empty'@, but for example @'map' f@ or -- @'filterWithKey' f@ could be used for any @f@. mergeWithKey :: (Key -> a -> b -> Maybe c) -> (Word64Map a -> Word64Map c) -> (Word64Map b -> Word64Map c) -> Word64Map a -> Word64Map b -> Word64Map c mergeWithKey f g1 g2 = mergeWithKey' bin combine g1 g2 where -- We use the lambda form to avoid non-exhaustive pattern matches warning. combine = \(Tip k1 x1) (Tip _k2 x2) -> case f k1 x1 x2 of Nothing -> Nil Just !x -> Tip k1 x {-# INLINE combine #-} {-# INLINE mergeWithKey #-} {-------------------------------------------------------------------- Min\/Max --------------------------------------------------------------------} -- | \(O(\log n)\). Update the value at the minimal key. -- -- > updateMinWithKey (\ k a -> Just ((show k) ++ ":" ++ a)) (fromList [(5,"a"), (3,"b")]) == fromList [(3,"3:b"), (5,"a")] -- > updateMinWithKey (\ _ _ -> Nothing) (fromList [(5,"a"), (3,"b")]) == singleton 5 "a" updateMinWithKey :: (Key -> a -> Maybe a) -> Word64Map a -> Word64Map a updateMinWithKey f t = case t of Bin p m l r | m < 0 -> binCheckRight p m l (go f r) _ -> go f t where go f' (Bin p m l r) = binCheckLeft p m (go f' l) r go f' (Tip k y) = case f' k y of Just !y' -> Tip k y' Nothing -> Nil go _ Nil = error "updateMinWithKey Nil" -- | \(O(\log n)\). Update the value at the maximal key. -- -- > updateMaxWithKey (\ k a -> Just ((show k) ++ ":" ++ a)) (fromList [(5,"a"), (3,"b")]) == fromList [(3,"b"), (5,"5:a")] -- > updateMaxWithKey (\ _ _ -> Nothing) (fromList [(5,"a"), (3,"b")]) == singleton 3 "b" updateMaxWithKey :: (Key -> a -> Maybe a) -> Word64Map a -> Word64Map a updateMaxWithKey f t = case t of Bin p m l r | m < 0 -> binCheckLeft p m (go f l) r _ -> go f t where go f' (Bin p m l r) = binCheckRight p m l (go f' r) go f' (Tip k y) = case f' k y of Just !y' -> Tip k y' Nothing -> Nil go _ Nil = error "updateMaxWithKey Nil" -- | \(O(\log n)\). Update the value at the maximal key. -- -- > updateMax (\ a -> Just ("X" ++ a)) (fromList [(5,"a"), (3,"b")]) == fromList [(3, "b"), (5, "Xa")] -- > updateMax (\ _ -> Nothing) (fromList [(5,"a"), (3,"b")]) == singleton 3 "b" updateMax :: (a -> Maybe a) -> Word64Map a -> Word64Map a updateMax f = updateMaxWithKey (const f) -- | \(O(\log n)\). Update the value at the minimal key. -- -- > updateMin (\ a -> Just ("X" ++ a)) (fromList [(5,"a"), (3,"b")]) == fromList [(3, "Xb"), (5, "a")] -- > updateMin (\ _ -> Nothing) (fromList [(5,"a"), (3,"b")]) == singleton 5 "a" updateMin :: (a -> Maybe a) -> Word64Map a -> Word64Map a updateMin f = updateMinWithKey (const f) {-------------------------------------------------------------------- Mapping --------------------------------------------------------------------} -- | \(O(n)\). Map a function over all values in the map. -- -- > map (++ "x") (fromList [(5,"a"), (3,"b")]) == fromList [(3, "bx"), (5, "ax")] map :: (a -> b) -> Word64Map a -> Word64Map b map f = go where go (Bin p m l r) = Bin p m (go l) (go r) go (Tip k x) = Tip k $! f x go Nil = Nil #ifdef __GLASGOW_HASKELL__ {-# NOINLINE [1] map #-} {-# RULES "map/map" forall f g xs . map f (map g xs) = map (\x -> f $! g x) xs "map/mapL" forall f g xs . map f (L.map g xs) = map (\x -> f (g x)) xs #-} #endif -- | \(O(n)\). Map a function over all values in the map. -- -- > let f key x = (show key) ++ ":" ++ x -- > mapWithKey f (fromList [(5,"a"), (3,"b")]) == fromList [(3, "3:b"), (5, "5:a")] mapWithKey :: (Key -> a -> b) -> Word64Map a -> Word64Map b mapWithKey f t = case t of Bin p m l r -> Bin p m (mapWithKey f l) (mapWithKey f r) Tip k x -> Tip k $! f k x Nil -> Nil #ifdef __GLASGOW_HASKELL__ -- Pay close attention to strictness here. We need to force the -- intermediate result for map f . map g, and we need to refrain -- from forcing it for map f . L.map g, etc. -- -- TODO Consider moving map and mapWithKey to Word64Map.Internal so we can write -- non-orphan RULES for things like L.map f (map g xs). We'd need a new function -- for this, and we'd have to pay attention to simplifier phases. Something like -- -- lsmap :: (b -> c) -> (a -> b) -> Word64Map a -> Word64Map c -- lsmap _ _ Nil = Nil -- lsmap f g (Tip k x) = let !gx = g x in Tip k (f gx) -- lsmap f g (Bin p m l r) = Bin p m (lsmap f g l) (lsmap f g r) {-# NOINLINE [1] mapWithKey #-} {-# RULES "mapWithKey/mapWithKey" forall f g xs . mapWithKey f (mapWithKey g xs) = mapWithKey (\k a -> f k $! g k a) xs "mapWithKey/mapWithKeyL" forall f g xs . mapWithKey f (L.mapWithKey g xs) = mapWithKey (\k a -> f k (g k a)) xs "mapWithKey/map" forall f g xs . mapWithKey f (map g xs) = mapWithKey (\k a -> f k $! g a) xs "mapWithKey/mapL" forall f g xs . mapWithKey f (L.map g xs) = mapWithKey (\k a -> f k (g a)) xs "map/mapWithKey" forall f g xs . map f (mapWithKey g xs) = mapWithKey (\k a -> f $! g k a) xs "map/mapWithKeyL" forall f g xs . map f (L.mapWithKey g xs) = mapWithKey (\k a -> f (g k a)) xs #-} #endif -- | \(O(n)\). -- @'traverseWithKey' f s == 'fromList' <$> 'traverse' (\(k, v) -> (,) k <$> f k v) ('toList' m)@ -- That is, behaves exactly like a regular 'traverse' except that the traversing -- function also has access to the key associated with a value. -- -- > traverseWithKey (\k v -> if odd k then Just (succ v) else Nothing) (fromList [(1, 'a'), (5, 'e')]) == Just (fromList [(1, 'b'), (5, 'f')]) -- > traverseWithKey (\k v -> if odd k then Just (succ v) else Nothing) (fromList [(2, 'c')]) == Nothing traverseWithKey :: Applicative t => (Key -> a -> t b) -> Word64Map a -> t (Word64Map b) traverseWithKey f = go where go Nil = pure Nil go (Tip k v) = (\ !v' -> Tip k v') <$> f k v go (Bin p m l r) | m < 0 = liftA2 (flip (Bin p m)) (go r) (go l) | otherwise = liftA2 (Bin p m) (go l) (go r) {-# INLINE traverseWithKey #-} -- | \(O(n)\). Traverse keys\/values and collect the 'Just' results. -- -- @since 0.6.4 traverseMaybeWithKey :: Applicative f => (Key -> a -> f (Maybe b)) -> Word64Map a -> f (Word64Map b) traverseMaybeWithKey f = go where go Nil = pure Nil go (Tip k x) = maybe Nil (Tip k $!) <$> f k x go (Bin p m l r) | m < 0 = liftA2 (flip (bin p m)) (go r) (go l) | otherwise = liftA2 (bin p m) (go l) (go r) -- | \(O(n)\). The function @'mapAccum'@ threads an accumulating -- argument through the map in ascending order of keys. -- -- > let f a b = (a ++ b, b ++ "X") -- > mapAccum f "Everything: " (fromList [(5,"a"), (3,"b")]) == ("Everything: ba", fromList [(3, "bX"), (5, "aX")]) mapAccum :: (a -> b -> (a,c)) -> a -> Word64Map b -> (a,Word64Map c) mapAccum f = mapAccumWithKey (\a' _ x -> f a' x) -- | \(O(n)\). The function @'mapAccumWithKey'@ threads an accumulating -- argument through the map in ascending order of keys. -- -- > let f a k b = (a ++ " " ++ (show k) ++ "-" ++ b, b ++ "X") -- > mapAccumWithKey f "Everything:" (fromList [(5,"a"), (3,"b")]) == ("Everything: 3-b 5-a", fromList [(3, "bX"), (5, "aX")]) mapAccumWithKey :: (a -> Key -> b -> (a,c)) -> a -> Word64Map b -> (a,Word64Map c) mapAccumWithKey f a t = mapAccumL f a t -- | \(O(n)\). The function @'mapAccumL'@ threads an accumulating -- argument through the map in ascending order of keys. Strict in -- the accumulating argument and the both elements of the -- result of the function. mapAccumL :: (a -> Key -> b -> (a,c)) -> a -> Word64Map b -> (a,Word64Map c) mapAccumL f0 a0 t0 = toPair $ go f0 a0 t0 where go f a t = case t of Bin p m l r | m < 0 -> let (a1 :*: r') = go f a r (a2 :*: l') = go f a1 l in (a2 :*: Bin p m l' r') | otherwise -> let (a1 :*: l') = go f a l (a2 :*: r') = go f a1 r in (a2 :*: Bin p m l' r') Tip k x -> let !(a',!x') = f a k x in (a' :*: Tip k x') Nil -> (a :*: Nil) -- | \(O(n)\). The function @'mapAccumRWithKey'@ threads an accumulating -- argument through the map in descending order of keys. mapAccumRWithKey :: (a -> Key -> b -> (a,c)) -> a -> Word64Map b -> (a,Word64Map c) mapAccumRWithKey f0 a0 t0 = toPair $ go f0 a0 t0 where go f a t = case t of Bin p m l r | m < 0 -> let (a1 :*: l') = go f a l (a2 :*: r') = go f a1 r in (a2 :*: Bin p m l' r') | otherwise -> let (a1 :*: r') = go f a r (a2 :*: l') = go f a1 l in (a2 :*: Bin p m l' r') Tip k x -> let !(a',!x') = f a k x in (a' :*: Tip k x') Nil -> (a :*: Nil) -- | \(O(n \log n)\). -- @'mapKeysWith' c f s@ is the map obtained by applying @f@ to each key of @s@. -- -- The size of the result may be smaller if @f@ maps two or more distinct -- keys to the same new key. In this case the associated values will be -- combined using @c@. -- -- > mapKeysWith (++) (\ _ -> 1) (fromList [(1,"b"), (2,"a"), (3,"d"), (4,"c")]) == singleton 1 "cdab" -- > mapKeysWith (++) (\ _ -> 3) (fromList [(1,"b"), (2,"a"), (3,"d"), (4,"c")]) == singleton 3 "cdab" mapKeysWith :: (a -> a -> a) -> (Key->Key) -> Word64Map a -> Word64Map a mapKeysWith c f = fromListWith c . foldrWithKey (\k x xs -> (f k, x) : xs) [] {-------------------------------------------------------------------- Filter --------------------------------------------------------------------} -- | \(O(n)\). Map values and collect the 'Just' results. -- -- > let f x = if x == "a" then Just "new a" else Nothing -- > mapMaybe f (fromList [(5,"a"), (3,"b")]) == singleton 5 "new a" mapMaybe :: (a -> Maybe b) -> Word64Map a -> Word64Map b mapMaybe f = mapMaybeWithKey (\_ x -> f x) -- | \(O(n)\). Map keys\/values and collect the 'Just' results. -- -- > let f k _ = if k < 5 then Just ("key : " ++ (show k)) else Nothing -- > mapMaybeWithKey f (fromList [(5,"a"), (3,"b")]) == singleton 3 "key : 3" mapMaybeWithKey :: (Key -> a -> Maybe b) -> Word64Map a -> Word64Map b mapMaybeWithKey f (Bin p m l r) = bin p m (mapMaybeWithKey f l) (mapMaybeWithKey f r) mapMaybeWithKey f (Tip k x) = case f k x of Just !y -> Tip k y Nothing -> Nil mapMaybeWithKey _ Nil = Nil -- | \(O(n)\). Map values and separate the 'Left' and 'Right' results. -- -- > let f a = if a < "c" then Left a else Right a -- > mapEither f (fromList [(5,"a"), (3,"b"), (1,"x"), (7,"z")]) -- > == (fromList [(3,"b"), (5,"a")], fromList [(1,"x"), (7,"z")]) -- > -- > mapEither (\ a -> Right a) (fromList [(5,"a"), (3,"b"), (1,"x"), (7,"z")]) -- > == (empty, fromList [(5,"a"), (3,"b"), (1,"x"), (7,"z")]) mapEither :: (a -> Either b c) -> Word64Map a -> (Word64Map b, Word64Map c) mapEither f m = mapEitherWithKey (\_ x -> f x) m -- | \(O(n)\). Map keys\/values and separate the 'Left' and 'Right' results. -- -- > let f k a = if k < 5 then Left (k * 2) else Right (a ++ a) -- > mapEitherWithKey f (fromList [(5,"a"), (3,"b"), (1,"x"), (7,"z")]) -- > == (fromList [(1,2), (3,6)], fromList [(5,"aa"), (7,"zz")]) -- > -- > mapEitherWithKey (\_ a -> Right a) (fromList [(5,"a"), (3,"b"), (1,"x"), (7,"z")]) -- > == (empty, fromList [(1,"x"), (3,"b"), (5,"a"), (7,"z")]) mapEitherWithKey :: (Key -> a -> Either b c) -> Word64Map a -> (Word64Map b, Word64Map c) mapEitherWithKey f0 t0 = toPair $ go f0 t0 where go f (Bin p m l r) = bin p m l1 r1 :*: bin p m l2 r2 where (l1 :*: l2) = go f l (r1 :*: r2) = go f r go f (Tip k x) = case f k x of Left !y -> (Tip k y :*: Nil) Right !z -> (Nil :*: Tip k z) go _ Nil = (Nil :*: Nil) {-------------------------------------------------------------------- Conversions --------------------------------------------------------------------} -- | \(O(n)\). Build a map from a set of keys and a function which for each key -- computes its value. -- -- > fromSet (\k -> replicate k 'a') (Data.Word64Set.fromList [3, 5]) == fromList [(5,"aaaaa"), (3,"aaa")] -- > fromSet undefined Data.Word64Set.empty == empty fromSet :: (Key -> a) -> Word64Set.Word64Set -> Word64Map a fromSet _ Word64Set.Nil = Nil fromSet f (Word64Set.Bin p m l r) = Bin p m (fromSet f l) (fromSet f r) fromSet f (Word64Set.Tip kx bm) = buildTree f kx bm (Word64Set.suffixBitMask + 1) where -- This is slightly complicated, as we to convert the dense -- representation of Word64Set into tree representation of Word64Map. -- -- We are given a nonzero bit mask 'bmask' of 'bits' bits with prefix 'prefix'. -- We split bmask into halves corresponding to left and right subtree. -- If they are both nonempty, we create a Bin node, otherwise exactly -- one of them is nonempty and we construct the Word64Map from that half. buildTree g !prefix !bmask bits = case bits of 0 -> Tip prefix $! g prefix _ -> case intFromNat ((natFromInt bits) `shiftRL` 1) of bits2 | bmask .&. ((1 `shiftLL` fromIntegral bits2) - 1) == 0 -> buildTree g (prefix + bits2) (bmask `shiftRL` fromIntegral bits2) bits2 | (bmask `shiftRL` fromIntegral bits2) .&. ((1 `shiftLL` fromIntegral bits2) - 1) == 0 -> buildTree g prefix bmask bits2 | otherwise -> Bin prefix bits2 (buildTree g prefix bmask bits2) (buildTree g (prefix + bits2) (bmask `shiftRL` fromIntegral bits2) bits2) {-------------------------------------------------------------------- Lists --------------------------------------------------------------------} -- | \(O(n \min(n,W))\). Create a map from a list of key\/value pairs. -- -- > fromList [] == empty -- > fromList [(5,"a"), (3,"b"), (5, "c")] == fromList [(5,"c"), (3,"b")] -- > fromList [(5,"c"), (3,"b"), (5, "a")] == fromList [(5,"a"), (3,"b")] fromList :: [(Key,a)] -> Word64Map a fromList xs = Foldable.foldl' ins empty xs where ins t (k,x) = insert k x t -- | \(O(n \min(n,W))\). Create a map from a list of key\/value pairs with a combining function. See also 'fromAscListWith'. -- -- > fromListWith (++) [(5,"a"), (5,"b"), (3,"b"), (3,"a"), (5,"a")] == fromList [(3, "ab"), (5, "aba")] -- > fromListWith (++) [] == empty fromListWith :: (a -> a -> a) -> [(Key,a)] -> Word64Map a fromListWith f xs = fromListWithKey (\_ x y -> f x y) xs -- | \(O(n \min(n,W))\). Build a map from a list of key\/value pairs with a combining function. See also fromAscListWithKey'. -- -- > let f key new_value old_value = show key ++ ":" ++ new_value ++ "|" ++ old_value -- > fromListWithKey f [(5,"a"), (5,"b"), (3,"b"), (3,"a"), (5,"c")] == fromList [(3, "3:a|b"), (5, "5:c|5:b|a")] -- > fromListWithKey f [] == empty fromListWithKey :: (Key -> a -> a -> a) -> [(Key,a)] -> Word64Map a fromListWithKey f xs = Foldable.foldl' ins empty xs where ins t (k,x) = insertWithKey f k x t -- | \(O(n)\). Build a map from a list of key\/value pairs where -- the keys are in ascending order. -- -- > fromAscList [(3,"b"), (5,"a")] == fromList [(3, "b"), (5, "a")] -- > fromAscList [(3,"b"), (5,"a"), (5,"b")] == fromList [(3, "b"), (5, "b")] fromAscList :: [(Key,a)] -> Word64Map a fromAscList = fromMonoListWithKey Nondistinct (\_ x _ -> x) {-# NOINLINE fromAscList #-} -- | \(O(n)\). Build a map from a list of key\/value pairs where -- the keys are in ascending order, with a combining function on equal keys. -- /The precondition (input list is ascending) is not checked./ -- -- > fromAscListWith (++) [(3,"b"), (5,"a"), (5,"b")] == fromList [(3, "b"), (5, "ba")] fromAscListWith :: (a -> a -> a) -> [(Key,a)] -> Word64Map a fromAscListWith f = fromMonoListWithKey Nondistinct (\_ x y -> f x y) {-# NOINLINE fromAscListWith #-} -- | \(O(n)\). Build a map from a list of key\/value pairs where -- the keys are in ascending order, with a combining function on equal keys. -- /The precondition (input list is ascending) is not checked./ -- -- > fromAscListWith (++) [(3,"b"), (5,"a"), (5,"b")] == fromList [(3, "b"), (5, "ba")] fromAscListWithKey :: (Key -> a -> a -> a) -> [(Key,a)] -> Word64Map a fromAscListWithKey f = fromMonoListWithKey Nondistinct f {-# NOINLINE fromAscListWithKey #-} -- | \(O(n)\). Build a map from a list of key\/value pairs where -- the keys are in ascending order and all distinct. -- /The precondition (input list is strictly ascending) is not checked./ -- -- > fromDistinctAscList [(3,"b"), (5,"a")] == fromList [(3, "b"), (5, "a")] fromDistinctAscList :: [(Key,a)] -> Word64Map a fromDistinctAscList = fromMonoListWithKey Distinct (\_ x _ -> x) {-# NOINLINE fromDistinctAscList #-} -- | \(O(n)\). Build a map from a list of key\/value pairs with monotonic keys -- and a combining function. -- -- The precise conditions under which this function works are subtle: -- For any branch mask, keys with the same prefix w.r.t. the branch -- mask must occur consecutively in the list. fromMonoListWithKey :: Distinct -> (Key -> a -> a -> a) -> [(Key,a)] -> Word64Map a fromMonoListWithKey distinct f = go where go [] = Nil go ((kx,vx) : zs1) = addAll' kx vx zs1 -- `addAll'` collects all keys equal to `kx` into a single value, -- and then proceeds with `addAll`. addAll' !kx vx [] = Tip kx $! vx addAll' !kx vx ((ky,vy) : zs) | Nondistinct <- distinct, kx == ky = let !v = f kx vy vx in addAll' ky v zs -- inlined: | otherwise = addAll kx (Tip kx $! vx) (ky : zs) | m <- branchMask kx ky , Inserted ty zs' <- addMany' m ky vy zs = addAll kx (linkWithMask m ky ty {-kx-} (Tip kx $! vx)) zs' -- for `addAll` and `addMany`, kx is /a/ key inside the tree `tx` -- `addAll` consumes the rest of the list, adding to the tree `tx` addAll !_kx !tx [] = tx addAll !kx !tx ((ky,vy) : zs) | m <- branchMask kx ky , Inserted ty zs' <- addMany' m ky vy zs = addAll kx (linkWithMask m ky ty {-kx-} tx) zs' -- `addMany'` is similar to `addAll'`, but proceeds with `addMany'`. addMany' !_m !kx vx [] = Inserted (Tip kx $! vx) [] addMany' !m !kx vx zs0@((ky,vy) : zs) | Nondistinct <- distinct, kx == ky = let !v = f kx vy vx in addMany' m ky v zs -- inlined: | otherwise = addMany m kx (Tip kx $! vx) (ky : zs) | mask kx m /= mask ky m = Inserted (Tip kx $! vx) zs0 | mxy <- branchMask kx ky , Inserted ty zs' <- addMany' mxy ky vy zs = addMany m kx (linkWithMask mxy ky ty {-kx-} (Tip kx $! vx)) zs' -- `addAll` adds to `tx` all keys whose prefix w.r.t. `m` agrees with `kx`. addMany !_m !_kx tx [] = Inserted tx [] addMany !m !kx tx zs0@((ky,vy) : zs) | mask kx m /= mask ky m = Inserted tx zs0 | mxy <- branchMask kx ky , Inserted ty zs' <- addMany' mxy ky vy zs = addMany m kx (linkWithMask mxy ky ty {-kx-} tx) zs' {-# INLINE fromMonoListWithKey #-} data Inserted a = Inserted !(Word64Map a) ![(Key,a)] data Distinct = Distinct | Nondistinct ghc-lib-parser-9.12.2.20250421/compiler/GHC/Data/Word64Set.hs0000644000000000000000000001071407346545000020657 0ustar0000000000000000{-# LANGUAGE CPP #-} ----------------------------------------------------------------------------- -- | -- Module : Data.Word64Set -- Copyright : (c) Daan Leijen 2002 -- (c) Joachim Breitner 2011 -- License : BSD-style -- Maintainer : libraries@haskell.org -- Portability : portable -- -- -- = Finite Int Sets -- -- The @'Word64Set'@ type represents a set of elements of type @Int@. -- -- For a walkthrough of the most commonly used functions see their -- . -- -- These modules are intended to be imported qualified, to avoid name -- clashes with Prelude functions, e.g. -- -- > import Data.Word64Set (Word64Set) -- > import qualified Data.Word64Set as Word64Set -- -- -- == Performance information -- -- Many operations have a worst-case complexity of \(O(\min(n,W))\). -- This means that the operation can become linear in the number of -- elements with a maximum of \(W\) -- the number of bits in an 'Int' -- (32 or 64). -- -- -- == Implementation -- -- The implementation is based on /big-endian patricia trees/. This data -- structure performs especially well on binary operations like 'union' -- and 'intersection'. However, my benchmarks show that it is also -- (much) faster on insertions and deletions when compared to a generic -- size-balanced set implementation (see "Data.Set"). -- -- * Chris Okasaki and Andy Gill, \"/Fast Mergeable Integer Maps/\", -- Workshop on ML, September 1998, pages 77-86, -- -- -- * D.R. Morrison, \"/PATRICIA -- Practical Algorithm To Retrieve Information Coded In Alphanumeric/\", -- Journal of the ACM, 15(4), October 1968, pages 514-534. -- -- Additionally, this implementation places bitmaps in the leaves of the tree. -- Their size is the natural size of a machine word (32 or 64 bits) and greatly -- reduces the memory footprint and execution times for dense sets, e.g. sets -- where it is likely that many values lie close to each other. The asymptotics -- are not affected by this optimization. -- ----------------------------------------------------------------------------- module GHC.Data.Word64Set ( -- * Strictness properties -- $strictness -- * Set type #if !defined(TESTING) Word64Set -- instance Eq,Show #else Word64Set(..) -- instance Eq,Show #endif , Key -- * Construction , empty , singleton , fromList , fromAscList , fromDistinctAscList -- * Insertion , insert -- * Deletion , delete -- * Generalized insertion/deletion , alterF -- * Query , member , notMember , lookupLT , lookupGT , lookupLE , lookupGE , WS.null , size , isSubsetOf , isProperSubsetOf , disjoint -- * Combine , union , unions , difference , (\\) , intersection -- * Filter , WS.filter , partition , takeWhileAntitone , dropWhileAntitone , spanAntitone , split , splitMember , splitRoot -- * Map , WS.map , mapMonotonic -- * Folds , WS.foldr , WS.foldl -- ** Strict folds , foldr' , foldl' -- ** Legacy folds , fold -- * Min\/Max , findMin , findMax , deleteMin , deleteMax , deleteFindMin , deleteFindMax , maxView , minView -- * Conversion -- ** List , elems , toList , toAscList , toDescList -- * Debugging , showTree , showTreeWith #if defined(TESTING) -- * Internals , match #endif ) where import GHC.Data.Word64Set.Internal as WS -- $strictness -- -- This module satisfies the following strictness property: -- -- * Key arguments are evaluated to WHNF -- -- Here are some examples that illustrate the property: -- -- > delete undefined s == undefined ghc-lib-parser-9.12.2.20250421/compiler/GHC/Data/Word64Set/0000755000000000000000000000000007346545000020320 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Data/Word64Set/Internal.hs0000644000000000000000000016742607346545000022450 0ustar0000000000000000{-# LANGUAGE CPP #-} {-# LANGUAGE MagicHash #-} {-# LANGUAGE TypeFamilies #-} {-# OPTIONS_HADDOCK not-home #-} ----------------------------------------------------------------------------- -- | -- Module : Data.Word64Set.Internal -- Copyright : (c) Daan Leijen 2002 -- (c) Joachim Breitner 2011 -- License : BSD-style -- Maintainer : libraries@haskell.org -- Portability : portable -- -- = WARNING -- -- This module is considered __internal__. -- -- The Package Versioning Policy __does not apply__. -- -- The contents of this module may change __in any way whatsoever__ -- and __without any warning__ between minor versions of this package. -- -- Authors importing this module are expected to track development -- closely. -- -- = Description -- -- An efficient implementation of integer sets. -- -- These modules are intended to be imported qualified, to avoid name -- clashes with Prelude functions, e.g. -- -- > import Data.Word64Set (Word64Set) -- > import qualified Data.Word64Set as Word64Set -- -- The implementation is based on /big-endian patricia trees/. This data -- structure performs especially well on binary operations like 'union' -- and 'intersection'. However, my benchmarks show that it is also -- (much) faster on insertions and deletions when compared to a generic -- size-balanced set implementation (see "Data.Set"). -- -- * Chris Okasaki and Andy Gill, \"/Fast Mergeable Integer Maps/\", -- Workshop on ML, September 1998, pages 77-86, -- -- -- * D.R. Morrison, \"/PATRICIA -- Practical Algorithm To Retrieve Information Coded In Alphanumeric/\", -- Journal of the ACM, 15(4), October 1968, pages 514-534. -- -- Additionally, this implementation places bitmaps in the leaves of the tree. -- Their size is the natural size of a machine word (32 or 64 bits) and greatly -- reduce memory footprint and execution times for dense sets, e.g. sets where -- it is likely that many values lie close to each other. The asymptotics are -- not affected by this optimization. -- -- Many operations have a worst-case complexity of \(O(\min(n,W))\). -- This means that the operation can become linear in the number of -- elements with a maximum of \(W\) -- the number of bits in an 'Int' -- (32 or 64). -- -- @since 0.5.9 ----------------------------------------------------------------------------- -- [Note: INLINE bit fiddling] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- It is essential that the bit fiddling functions like mask, zero, branchMask -- etc are inlined. If they do not, the memory allocation skyrockets. The GHC -- usually gets it right, but it is disastrous if it does not. Therefore we -- explicitly mark these functions INLINE. -- [Note: Local 'go' functions and capturing] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- Care must be taken when using 'go' function which captures an argument. -- Sometimes (for example when the argument is passed to a data constructor, -- as in insert), GHC heap-allocates more than necessary. Therefore C-- code -- must be checked for increased allocation when creating and modifying such -- functions. -- [Note: Order of constructors] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- The order of constructors of Word64Set matters when considering performance. -- Currently in GHC 7.0, when type has 3 constructors, they are matched from -- the first to the last -- the best performance is achieved when the -- constructors are ordered by frequency. -- On GHC 7.0, reordering constructors from Nil | Tip | Bin to Bin | Tip | Nil -- improves the benchmark by circa 10%. module GHC.Data.Word64Set.Internal ( -- * Set type Word64Set(..), Key -- instance Eq,Show , Prefix, Mask, BitMap -- * Operators , (\\) -- * Query , null , size , member , notMember , lookupLT , lookupGT , lookupLE , lookupGE , isSubsetOf , isProperSubsetOf , disjoint -- * Construction , empty , singleton , insert , delete , alterF -- * Combine , union , unions , difference , intersection -- * Filter , filter , partition , takeWhileAntitone , dropWhileAntitone , spanAntitone , split , splitMember , splitRoot -- * Map , map , mapMonotonic -- * Folds , foldr , foldl -- ** Strict folds , foldr' , foldl' -- ** Legacy folds , fold -- * Min\/Max , findMin , findMax , deleteMin , deleteMax , deleteFindMin , deleteFindMax , maxView , minView -- * Conversion -- ** List , elems , toList , fromList -- ** Ordered list , toAscList , toDescList , fromAscList , fromDistinctAscList -- * Debugging , showTree , showTreeWith -- * Internals , match , suffixBitMask , prefixBitMask , bitmapOf , zero ) where import Control.Applicative (Const(..)) import Control.DeepSeq (NFData(rnf)) import Data.Bits import qualified Data.List as List import Data.Maybe (fromMaybe) import Data.Semigroup (Semigroup(stimes, (<>)), stimesIdempotentMonoid) import GHC.Prelude.Basic hiding (filter, foldr, foldl, foldl', null, map) import Data.Word ( Word64 ) import GHC.Utils.Containers.Internal.BitUtil import GHC.Utils.Containers.Internal.StrictPair #if __GLASGOW_HASKELL__ import Data.Data (Data(..), Constr, mkConstr, constrIndex, DataType, mkDataType) import qualified Data.Data import Text.Read #endif #if __GLASGOW_HASKELL__ import qualified GHC.Exts #endif import Data.Functor.Identity (Identity(..)) infixl 9 \\{-This comment teaches CPP correct behaviour -} -- A "Nat" is a 64 bit machine word type Nat = Word64 natFromInt :: Word64 -> Nat natFromInt = id {-# INLINE natFromInt #-} intFromNat :: Nat -> Word64 intFromNat = id {-# INLINE intFromNat #-} {-------------------------------------------------------------------- Operators --------------------------------------------------------------------} -- | \(O(n+m)\). See 'difference'. (\\) :: Word64Set -> Word64Set -> Word64Set m1 \\ m2 = difference m1 m2 {-------------------------------------------------------------------- Types --------------------------------------------------------------------} -- | A set of integers. -- See Note: Order of constructors data Word64Set = Bin {-# UNPACK #-} !Prefix {-# UNPACK #-} !Mask !Word64Set !Word64Set -- Invariant: Nil is never found as a child of Bin. -- Invariant: The Mask is a power of 2. It is the largest bit position at which -- two elements of the set differ. -- Invariant: Prefix is the common high-order bits that all elements share to -- the left of the Mask bit. -- Invariant: In Bin prefix mask left right, left consists of the elements that -- don't have the mask bit set; right is all the elements that do. | Tip {-# UNPACK #-} !Prefix {-# UNPACK #-} !BitMap -- Invariant: The Prefix is zero for the last 6 bits. The values of the set -- represented by a tip are the prefix plus the indices of the set -- bits in the bit map. | Nil -- A number stored in a set is stored as -- * Prefix (all but last 6 bits) and -- * BitMap (last 6 bits stored as a bitmask) -- Last 6 bits are called a Suffix. type Prefix = Word64 type Mask = Word64 type BitMap = Word64 type Key = Word64 instance Monoid Word64Set where mempty = empty mconcat = unions mappend = (<>) -- | @since 0.5.7 instance Semigroup Word64Set where (<>) = union stimes = stimesIdempotentMonoid #if __GLASGOW_HASKELL__ {-------------------------------------------------------------------- A Data instance --------------------------------------------------------------------} -- This instance preserves data abstraction at the cost of inefficiency. -- We provide limited reflection services for the sake of data abstraction. instance Data Word64Set where gfoldl f z is = z fromList `f` (toList is) toConstr _ = fromListConstr gunfold k z c = case constrIndex c of 1 -> k (z fromList) _ -> error "gunfold" dataTypeOf _ = intSetDataType fromListConstr :: Constr fromListConstr = mkConstr intSetDataType "fromList" [] Data.Data.Prefix intSetDataType :: DataType intSetDataType = mkDataType "Data.Word64Set.Internal.Word64Set" [fromListConstr] #endif {-------------------------------------------------------------------- Query --------------------------------------------------------------------} -- | \(O(1)\). Is the set empty? null :: Word64Set -> Bool null Nil = True null _ = False {-# INLINE null #-} -- | \(O(n)\). Cardinality of the set. size :: Word64Set -> Int size = go 0 where go !acc (Bin _ _ l r) = go (go acc l) r go acc (Tip _ bm) = acc + bitcount 0 bm go acc Nil = acc -- | \(O(\min(n,W))\). Is the value a member of the set? -- See Note: Local 'go' functions and capturing. member :: Key -> Word64Set -> Bool member !x = go where go (Bin p m l r) | nomatch x p m = False | zero x m = go l | otherwise = go r go (Tip y bm) = prefixOf x == y && bitmapOf x .&. bm /= 0 go Nil = False -- | \(O(\min(n,W))\). Is the element not in the set? notMember :: Key -> Word64Set -> Bool notMember k = not . member k -- | \(O(\min(n,W))\). Find largest element smaller than the given one. -- -- > lookupLT 3 (fromList [3, 5]) == Nothing -- > lookupLT 5 (fromList [3, 5]) == Just 3 -- See Note: Local 'go' functions and capturing. lookupLT :: Key -> Word64Set -> Maybe Key lookupLT !x t = case t of Bin _ m l r | m < 0 -> if x >= 0 then go r l else go Nil r _ -> go Nil t where go def (Bin p m l r) | nomatch x p m = if x < p then unsafeFindMax def else unsafeFindMax r | zero x m = go def l | otherwise = go l r go def (Tip kx bm) | prefixOf x > kx = Just $ kx + highestBitSet bm | prefixOf x == kx && maskLT /= 0 = Just $ kx + highestBitSet maskLT | otherwise = unsafeFindMax def where maskLT = (bitmapOf x - 1) .&. bm go def Nil = unsafeFindMax def -- | \(O(\min(n,W))\). Find smallest element greater than the given one. -- -- > lookupGT 4 (fromList [3, 5]) == Just 5 -- > lookupGT 5 (fromList [3, 5]) == Nothing -- See Note: Local 'go' functions and capturing. lookupGT :: Key -> Word64Set -> Maybe Key lookupGT !x t = case t of Bin _ m l r | m < 0 -> if x >= 0 then go Nil l else go l r _ -> go Nil t where go def (Bin p m l r) | nomatch x p m = if x < p then unsafeFindMin l else unsafeFindMin def | zero x m = go r l | otherwise = go def r go def (Tip kx bm) | prefixOf x < kx = Just $ kx + lowestBitSet bm | prefixOf x == kx && maskGT /= 0 = Just $ kx + lowestBitSet maskGT | otherwise = unsafeFindMin def where maskGT = (- ((bitmapOf x) `shiftLL` 1)) .&. bm go def Nil = unsafeFindMin def -- | \(O(\min(n,W))\). Find largest element smaller or equal to the given one. -- -- > lookupLE 2 (fromList [3, 5]) == Nothing -- > lookupLE 4 (fromList [3, 5]) == Just 3 -- > lookupLE 5 (fromList [3, 5]) == Just 5 -- See Note: Local 'go' functions and capturing. lookupLE :: Key -> Word64Set -> Maybe Key lookupLE !x t = case t of Bin _ m l r | m < 0 -> if x >= 0 then go r l else go Nil r _ -> go Nil t where go def (Bin p m l r) | nomatch x p m = if x < p then unsafeFindMax def else unsafeFindMax r | zero x m = go def l | otherwise = go l r go def (Tip kx bm) | prefixOf x > kx = Just $ kx + highestBitSet bm | prefixOf x == kx && maskLE /= 0 = Just $ kx + highestBitSet maskLE | otherwise = unsafeFindMax def where maskLE = (((bitmapOf x) `shiftLL` 1) - 1) .&. bm go def Nil = unsafeFindMax def -- | \(O(\min(n,W))\). Find smallest element greater or equal to the given one. -- -- > lookupGE 3 (fromList [3, 5]) == Just 3 -- > lookupGE 4 (fromList [3, 5]) == Just 5 -- > lookupGE 6 (fromList [3, 5]) == Nothing -- See Note: Local 'go' functions and capturing. lookupGE :: Key -> Word64Set -> Maybe Key lookupGE !x t = case t of Bin _ m l r | m < 0 -> if x >= 0 then go Nil l else go l r _ -> go Nil t where go def (Bin p m l r) | nomatch x p m = if x < p then unsafeFindMin l else unsafeFindMin def | zero x m = go r l | otherwise = go def r go def (Tip kx bm) | prefixOf x < kx = Just $ kx + lowestBitSet bm | prefixOf x == kx && maskGE /= 0 = Just $ kx + lowestBitSet maskGE | otherwise = unsafeFindMin def where maskGE = (- (bitmapOf x)) .&. bm go def Nil = unsafeFindMin def -- Helper function for lookupGE and lookupGT. It assumes that if a Bin node is -- given, it has m > 0. unsafeFindMin :: Word64Set -> Maybe Key unsafeFindMin Nil = Nothing unsafeFindMin (Tip kx bm) = Just $ kx + lowestBitSet bm unsafeFindMin (Bin _ _ l _) = unsafeFindMin l -- Helper function for lookupLE and lookupLT. It assumes that if a Bin node is -- given, it has m > 0. unsafeFindMax :: Word64Set -> Maybe Key unsafeFindMax Nil = Nothing unsafeFindMax (Tip kx bm) = Just $ kx + highestBitSet bm unsafeFindMax (Bin _ _ _ r) = unsafeFindMax r {-------------------------------------------------------------------- Construction --------------------------------------------------------------------} -- | \(O(1)\). The empty set. empty :: Word64Set empty = Nil {-# INLINE empty #-} -- | \(O(1)\). A set of one element. singleton :: Key -> Word64Set singleton x = Tip (prefixOf x) (bitmapOf x) {-# INLINE singleton #-} {-------------------------------------------------------------------- Insert --------------------------------------------------------------------} -- | \(O(\min(n,W))\). Add a value to the set. There is no left- or right bias for -- Word64Sets. insert :: Key -> Word64Set -> Word64Set insert !x = insertBM (prefixOf x) (bitmapOf x) -- Helper function for insert and union. insertBM :: Prefix -> BitMap -> Word64Set -> Word64Set insertBM !kx !bm t@(Bin p m l r) | nomatch kx p m = link kx (Tip kx bm) p t | zero kx m = Bin p m (insertBM kx bm l) r | otherwise = Bin p m l (insertBM kx bm r) insertBM kx bm t@(Tip kx' bm') | kx' == kx = Tip kx' (bm .|. bm') | otherwise = link kx (Tip kx bm) kx' t insertBM kx bm Nil = Tip kx bm -- | \(O(\min(n,W))\). Delete a value in the set. Returns the -- original set when the value was not present. delete :: Key -> Word64Set -> Word64Set delete !x = deleteBM (prefixOf x) (bitmapOf x) -- Deletes all values mentioned in the BitMap from the set. -- Helper function for delete and difference. deleteBM :: Prefix -> BitMap -> Word64Set -> Word64Set deleteBM !kx !bm t@(Bin p m l r) | nomatch kx p m = t | zero kx m = bin p m (deleteBM kx bm l) r | otherwise = bin p m l (deleteBM kx bm r) deleteBM kx bm t@(Tip kx' bm') | kx' == kx = tip kx (bm' .&. complement bm) | otherwise = t deleteBM _ _ Nil = Nil -- | \(O(\min(n,W))\). @('alterF' f x s)@ can delete or insert @x@ in @s@ depending -- on whether it is already present in @s@. -- -- In short: -- -- @ -- 'member' x \<$\> 'alterF' f x s = f ('member' x s) -- @ -- -- Note: 'alterF' is a variant of the @at@ combinator from "Control.Lens.At". -- -- @since 0.6.3.1 alterF :: Functor f => (Bool -> f Bool) -> Key -> Word64Set -> f Word64Set alterF f k s = fmap choose (f member_) where member_ = member k s (inserted, deleted) | member_ = (s , delete k s) | otherwise = (insert k s, s ) choose True = inserted choose False = deleted #ifndef __GLASGOW_HASKELL__ {-# INLINE alterF #-} #else {-# INLINABLE [2] alterF #-} {-# RULES "alterF/Const" forall k (f :: Bool -> Const a Bool) . alterF f k = \s -> Const . getConst . f $ member k s #-} #endif {-# SPECIALIZE alterF :: (Bool -> Identity Bool) -> Key -> Word64Set -> Identity Word64Set #-} {-------------------------------------------------------------------- Union --------------------------------------------------------------------} -- | The union of a list of sets. {-# INLINABLE unions #-} unions :: [Word64Set] -> Word64Set unions = List.foldl' union empty -- | \(O(n+m)\). The union of two sets. union :: Word64Set -> Word64Set -> Word64Set union t1@(Bin p1 m1 l1 r1) t2@(Bin p2 m2 l2 r2) | shorter m1 m2 = union1 | shorter m2 m1 = union2 | p1 == p2 = Bin p1 m1 (union l1 l2) (union r1 r2) | otherwise = link p1 t1 p2 t2 where union1 | nomatch p2 p1 m1 = link p1 t1 p2 t2 | zero p2 m1 = Bin p1 m1 (union l1 t2) r1 | otherwise = Bin p1 m1 l1 (union r1 t2) union2 | nomatch p1 p2 m2 = link p1 t1 p2 t2 | zero p1 m2 = Bin p2 m2 (union t1 l2) r2 | otherwise = Bin p2 m2 l2 (union t1 r2) union t@(Bin _ _ _ _) (Tip kx bm) = insertBM kx bm t union t@(Bin _ _ _ _) Nil = t union (Tip kx bm) t = insertBM kx bm t union Nil t = t {-------------------------------------------------------------------- Difference --------------------------------------------------------------------} -- | \(O(n+m)\). Difference between two sets. difference :: Word64Set -> Word64Set -> Word64Set difference t1@(Bin p1 m1 l1 r1) t2@(Bin p2 m2 l2 r2) | shorter m1 m2 = difference1 | shorter m2 m1 = difference2 | p1 == p2 = bin p1 m1 (difference l1 l2) (difference r1 r2) | otherwise = t1 where difference1 | nomatch p2 p1 m1 = t1 | zero p2 m1 = bin p1 m1 (difference l1 t2) r1 | otherwise = bin p1 m1 l1 (difference r1 t2) difference2 | nomatch p1 p2 m2 = t1 | zero p1 m2 = difference t1 l2 | otherwise = difference t1 r2 difference t@(Bin _ _ _ _) (Tip kx bm) = deleteBM kx bm t difference t@(Bin _ _ _ _) Nil = t difference t1@(Tip kx bm) t2 = differenceTip t2 where differenceTip (Bin p2 m2 l2 r2) | nomatch kx p2 m2 = t1 | zero kx m2 = differenceTip l2 | otherwise = differenceTip r2 differenceTip (Tip kx2 bm2) | kx == kx2 = tip kx (bm .&. complement bm2) | otherwise = t1 differenceTip Nil = t1 difference Nil _ = Nil {-------------------------------------------------------------------- Intersection --------------------------------------------------------------------} -- | \(O(n+m)\). The intersection of two sets. intersection :: Word64Set -> Word64Set -> Word64Set intersection t1@(Bin p1 m1 l1 r1) t2@(Bin p2 m2 l2 r2) | shorter m1 m2 = intersection1 | shorter m2 m1 = intersection2 | p1 == p2 = bin p1 m1 (intersection l1 l2) (intersection r1 r2) | otherwise = Nil where intersection1 | nomatch p2 p1 m1 = Nil | zero p2 m1 = intersection l1 t2 | otherwise = intersection r1 t2 intersection2 | nomatch p1 p2 m2 = Nil | zero p1 m2 = intersection t1 l2 | otherwise = intersection t1 r2 intersection t1@(Bin _ _ _ _) (Tip kx2 bm2) = intersectBM t1 where intersectBM (Bin p1 m1 l1 r1) | nomatch kx2 p1 m1 = Nil | zero kx2 m1 = intersectBM l1 | otherwise = intersectBM r1 intersectBM (Tip kx1 bm1) | kx1 == kx2 = tip kx1 (bm1 .&. bm2) | otherwise = Nil intersectBM Nil = Nil intersection (Bin _ _ _ _) Nil = Nil intersection (Tip kx1 bm1) t2 = intersectBM t2 where intersectBM (Bin p2 m2 l2 r2) | nomatch kx1 p2 m2 = Nil | zero kx1 m2 = intersectBM l2 | otherwise = intersectBM r2 intersectBM (Tip kx2 bm2) | kx1 == kx2 = tip kx1 (bm1 .&. bm2) | otherwise = Nil intersectBM Nil = Nil intersection Nil _ = Nil {-------------------------------------------------------------------- Subset --------------------------------------------------------------------} -- | \(O(n+m)\). Is this a proper subset? (ie. a subset but not equal). isProperSubsetOf :: Word64Set -> Word64Set -> Bool isProperSubsetOf t1 t2 = case subsetCmp t1 t2 of LT -> True _ -> False subsetCmp :: Word64Set -> Word64Set -> Ordering subsetCmp t1@(Bin p1 m1 l1 r1) (Bin p2 m2 l2 r2) | shorter m1 m2 = GT | shorter m2 m1 = case subsetCmpLt of GT -> GT _ -> LT | p1 == p2 = subsetCmpEq | otherwise = GT -- disjoint where subsetCmpLt | nomatch p1 p2 m2 = GT | zero p1 m2 = subsetCmp t1 l2 | otherwise = subsetCmp t1 r2 subsetCmpEq = case (subsetCmp l1 l2, subsetCmp r1 r2) of (GT,_ ) -> GT (_ ,GT) -> GT (EQ,EQ) -> EQ _ -> LT subsetCmp (Bin _ _ _ _) _ = GT subsetCmp (Tip kx1 bm1) (Tip kx2 bm2) | kx1 /= kx2 = GT -- disjoint | bm1 == bm2 = EQ | bm1 .&. complement bm2 == 0 = LT | otherwise = GT subsetCmp t1@(Tip kx _) (Bin p m l r) | nomatch kx p m = GT | zero kx m = case subsetCmp t1 l of GT -> GT ; _ -> LT | otherwise = case subsetCmp t1 r of GT -> GT ; _ -> LT subsetCmp (Tip _ _) Nil = GT -- disjoint subsetCmp Nil Nil = EQ subsetCmp Nil _ = LT -- | \(O(n+m)\). Is this a subset? -- @(s1 \`isSubsetOf\` s2)@ tells whether @s1@ is a subset of @s2@. isSubsetOf :: Word64Set -> Word64Set -> Bool isSubsetOf t1@(Bin p1 m1 l1 r1) (Bin p2 m2 l2 r2) | shorter m1 m2 = False | shorter m2 m1 = match p1 p2 m2 && (if zero p1 m2 then isSubsetOf t1 l2 else isSubsetOf t1 r2) | otherwise = (p1==p2) && isSubsetOf l1 l2 && isSubsetOf r1 r2 isSubsetOf (Bin _ _ _ _) _ = False isSubsetOf (Tip kx1 bm1) (Tip kx2 bm2) = kx1 == kx2 && bm1 .&. complement bm2 == 0 isSubsetOf t1@(Tip kx _) (Bin p m l r) | nomatch kx p m = False | zero kx m = isSubsetOf t1 l | otherwise = isSubsetOf t1 r isSubsetOf (Tip _ _) Nil = False isSubsetOf Nil _ = True {-------------------------------------------------------------------- Disjoint --------------------------------------------------------------------} -- | \(O(n+m)\). Check whether two sets are disjoint (i.e. their intersection -- is empty). -- -- > disjoint (fromList [2,4,6]) (fromList [1,3]) == True -- > disjoint (fromList [2,4,6,8]) (fromList [2,3,5,7]) == False -- > disjoint (fromList [1,2]) (fromList [1,2,3,4]) == False -- > disjoint (fromList []) (fromList []) == True -- -- @since 0.5.11 disjoint :: Word64Set -> Word64Set -> Bool disjoint t1@(Bin p1 m1 l1 r1) t2@(Bin p2 m2 l2 r2) | shorter m1 m2 = disjoint1 | shorter m2 m1 = disjoint2 | p1 == p2 = disjoint l1 l2 && disjoint r1 r2 | otherwise = True where disjoint1 | nomatch p2 p1 m1 = True | zero p2 m1 = disjoint l1 t2 | otherwise = disjoint r1 t2 disjoint2 | nomatch p1 p2 m2 = True | zero p1 m2 = disjoint t1 l2 | otherwise = disjoint t1 r2 disjoint t1@(Bin _ _ _ _) (Tip kx2 bm2) = disjointBM t1 where disjointBM (Bin p1 m1 l1 r1) | nomatch kx2 p1 m1 = True | zero kx2 m1 = disjointBM l1 | otherwise = disjointBM r1 disjointBM (Tip kx1 bm1) | kx1 == kx2 = (bm1 .&. bm2) == 0 | otherwise = True disjointBM Nil = True disjoint (Bin _ _ _ _) Nil = True disjoint (Tip kx1 bm1) t2 = disjointBM t2 where disjointBM (Bin p2 m2 l2 r2) | nomatch kx1 p2 m2 = True | zero kx1 m2 = disjointBM l2 | otherwise = disjointBM r2 disjointBM (Tip kx2 bm2) | kx1 == kx2 = (bm1 .&. bm2) == 0 | otherwise = True disjointBM Nil = True disjoint Nil _ = True {-------------------------------------------------------------------- Filter --------------------------------------------------------------------} -- | \(O(n)\). Filter all elements that satisfy some predicate. filter :: (Key -> Bool) -> Word64Set -> Word64Set filter predicate t = case t of Bin p m l r -> bin p m (filter predicate l) (filter predicate r) Tip kx bm -> tip kx (foldl'Bits 0 (bitPred kx) 0 bm) Nil -> Nil where bitPred kx bm bi | predicate (kx + bi) = bm .|. bitmapOfSuffix bi | otherwise = bm {-# INLINE bitPred #-} -- | \(O(n)\). partition the set according to some predicate. partition :: (Key -> Bool) -> Word64Set -> (Word64Set,Word64Set) partition predicate0 t0 = toPair $ go predicate0 t0 where go predicate t = case t of Bin p m l r -> let (l1 :*: l2) = go predicate l (r1 :*: r2) = go predicate r in bin p m l1 r1 :*: bin p m l2 r2 Tip kx bm -> let bm1 = foldl'Bits 0 (bitPred kx) 0 bm in tip kx bm1 :*: tip kx (bm `xor` bm1) Nil -> (Nil :*: Nil) where bitPred kx bm bi | predicate (kx + bi) = bm .|. bitmapOfSuffix bi | otherwise = bm {-# INLINE bitPred #-} -- | \(O(\min(n,W))\). Take while a predicate on the elements holds. -- The user is responsible for ensuring that for all @Int@s, @j \< k ==\> p j \>= p k@. -- See note at 'spanAntitone'. -- -- @ -- takeWhileAntitone p = 'fromDistinctAscList' . 'Data.List.takeWhile' p . 'toList' -- takeWhileAntitone p = 'filter' p -- @ -- -- @since 0.6.7 takeWhileAntitone :: (Key -> Bool) -> Word64Set -> Word64Set takeWhileAntitone predicate t = case t of Bin p m l r | m < 0 -> if predicate 0 -- handle negative numbers. then bin p m (go predicate l) r else go predicate r _ -> go predicate t where go predicate' (Bin p m l r) | predicate' $! p+m = bin p m l (go predicate' r) | otherwise = go predicate' l go predicate' (Tip kx bm) = tip kx (takeWhileAntitoneBits kx predicate' bm) go _ Nil = Nil -- | \(O(\min(n,W))\). Drop while a predicate on the elements holds. -- The user is responsible for ensuring that for all @Int@s, @j \< k ==\> p j \>= p k@. -- See note at 'spanAntitone'. -- -- @ -- dropWhileAntitone p = 'fromDistinctAscList' . 'Data.List.dropWhile' p . 'toList' -- dropWhileAntitone p = 'filter' (not . p) -- @ -- -- @since 0.6.7 dropWhileAntitone :: (Key -> Bool) -> Word64Set -> Word64Set dropWhileAntitone predicate t = case t of Bin p m l r | m < 0 -> if predicate 0 -- handle negative numbers. then go predicate l else bin p m l (go predicate r) _ -> go predicate t where go predicate' (Bin p m l r) | predicate' $! p+m = go predicate' r | otherwise = bin p m (go predicate' l) r go predicate' (Tip kx bm) = tip kx (bm `xor` takeWhileAntitoneBits kx predicate' bm) go _ Nil = Nil -- | \(O(\min(n,W))\). Divide a set at the point where a predicate on the elements stops holding. -- The user is responsible for ensuring that for all @Int@s, @j \< k ==\> p j \>= p k@. -- -- @ -- spanAntitone p xs = ('takeWhileAntitone' p xs, 'dropWhileAntitone' p xs) -- spanAntitone p xs = 'partition' p xs -- @ -- -- Note: if @p@ is not actually antitone, then @spanAntitone@ will split the set -- at some /unspecified/ point. -- -- @since 0.6.7 spanAntitone :: (Key -> Bool) -> Word64Set -> (Word64Set, Word64Set) spanAntitone predicate t = case t of Bin p m l r | m < 0 -> if predicate 0 -- handle negative numbers. then case go predicate l of (lt :*: gt) -> let !lt' = bin p m lt r in (lt', gt) else case go predicate r of (lt :*: gt) -> let !gt' = bin p m l gt in (lt, gt') _ -> case go predicate t of (lt :*: gt) -> (lt, gt) where go predicate' (Bin p m l r) | predicate' $! p+m = case go predicate' r of (lt :*: gt) -> bin p m l lt :*: gt | otherwise = case go predicate' l of (lt :*: gt) -> lt :*: bin p m gt r go predicate' (Tip kx bm) = let bm' = takeWhileAntitoneBits kx predicate' bm in (tip kx bm' :*: tip kx (bm `xor` bm')) go _ Nil = (Nil :*: Nil) -- | \(O(\min(n,W))\). The expression (@'split' x set@) is a pair @(set1,set2)@ -- where @set1@ comprises the elements of @set@ less than @x@ and @set2@ -- comprises the elements of @set@ greater than @x@. -- -- > split 3 (fromList [1..5]) == (fromList [1,2], fromList [4,5]) split :: Key -> Word64Set -> (Word64Set,Word64Set) split x t = case t of Bin p m l r | m < 0 -> if x >= 0 -- handle negative numbers. then case go x l of (lt :*: gt) -> let !lt' = bin p m lt r in (lt', gt) else case go x r of (lt :*: gt) -> let !gt' = bin p m l gt in (lt, gt') _ -> case go x t of (lt :*: gt) -> (lt, gt) where go !x' t'@(Bin p m l r) | nomatch x' p m = if x' < p then (Nil :*: t') else (t' :*: Nil) | zero x' m = case go x' l of (lt :*: gt) -> lt :*: bin p m gt r | otherwise = case go x' r of (lt :*: gt) -> bin p m l lt :*: gt go x' t'@(Tip kx' bm) | kx' > x' = (Nil :*: t') -- equivalent to kx' > prefixOf x' | kx' < prefixOf x' = (t' :*: Nil) | otherwise = tip kx' (bm .&. lowerBitmap) :*: tip kx' (bm .&. higherBitmap) where lowerBitmap = bitmapOf x' - 1 higherBitmap = complement (lowerBitmap + bitmapOf x') go _ Nil = (Nil :*: Nil) -- | \(O(\min(n,W))\). Performs a 'split' but also returns whether the pivot -- element was found in the original set. splitMember :: Key -> Word64Set -> (Word64Set,Bool,Word64Set) splitMember x t = case t of Bin p m l r | m < 0 -> if x >= 0 -- handle negative numbers. then case go x l of (lt, fnd, gt) -> let !lt' = bin p m lt r in (lt', fnd, gt) else case go x r of (lt, fnd, gt) -> let !gt' = bin p m l gt in (lt, fnd, gt') _ -> go x t where go x' t'@(Bin p m l r) | nomatch x' p m = if x' < p then (Nil, False, t') else (t', False, Nil) | zero x' m = case go x' l of (lt, fnd, gt) -> let !gt' = bin p m gt r in (lt, fnd, gt') | otherwise = case go x' r of (lt, fnd, gt) -> let !lt' = bin p m l lt in (lt', fnd, gt) go x' t'@(Tip kx' bm) | kx' > x' = (Nil, False, t') -- equivalent to kx' > prefixOf x' | kx' < prefixOf x' = (t', False, Nil) | otherwise = let !lt = tip kx' (bm .&. lowerBitmap) !found = (bm .&. bitmapOfx') /= 0 !gt = tip kx' (bm .&. higherBitmap) in (lt, found, gt) where bitmapOfx' = bitmapOf x' lowerBitmap = bitmapOfx' - 1 higherBitmap = complement (lowerBitmap + bitmapOfx') go _ Nil = (Nil, False, Nil) {---------------------------------------------------------------------- Min/Max ----------------------------------------------------------------------} -- | \(O(\min(n,W))\). Retrieves the maximal key of the set, and the set -- stripped of that element, or 'Nothing' if passed an empty set. maxView :: Word64Set -> Maybe (Key, Word64Set) maxView t = case t of Nil -> Nothing Bin p m l r | m < 0 -> case go l of (result, l') -> Just (result, bin p m l' r) _ -> Just (go t) where go (Bin p m l r) = case go r of (result, r') -> (result, bin p m l r') go (Tip kx bm) = case highestBitSet bm of bi -> (kx + bi, tip kx (bm .&. complement (bitmapOfSuffix bi))) go Nil = error "maxView Nil" -- | \(O(\min(n,W))\). Retrieves the minimal key of the set, and the set -- stripped of that element, or 'Nothing' if passed an empty set. minView :: Word64Set -> Maybe (Key, Word64Set) minView t = case t of Nil -> Nothing Bin p m l r | m < 0 -> case go r of (result, r') -> Just (result, bin p m l r') _ -> Just (go t) where go (Bin p m l r) = case go l of (result, l') -> (result, bin p m l' r) go (Tip kx bm) = case lowestBitSet bm of bi -> (kx + bi, tip kx (bm .&. complement (bitmapOfSuffix bi))) go Nil = error "minView Nil" -- | \(O(\min(n,W))\). Delete and find the minimal element. -- -- > deleteFindMin set = (findMin set, deleteMin set) deleteFindMin :: Word64Set -> (Key, Word64Set) deleteFindMin = fromMaybe (error "deleteFindMin: empty set has no minimal element") . minView -- | \(O(\min(n,W))\). Delete and find the maximal element. -- -- > deleteFindMax set = (findMax set, deleteMax set) deleteFindMax :: Word64Set -> (Key, Word64Set) deleteFindMax = fromMaybe (error "deleteFindMax: empty set has no maximal element") . maxView -- | \(O(\min(n,W))\). The minimal element of the set. findMin :: Word64Set -> Key findMin Nil = error "findMin: empty set has no minimal element" findMin (Tip kx bm) = kx + lowestBitSet bm findMin (Bin _ m l r) | m < 0 = find r | otherwise = find l where find (Tip kx bm) = kx + lowestBitSet bm find (Bin _ _ l' _) = find l' find Nil = error "findMin Nil" -- | \(O(\min(n,W))\). The maximal element of a set. findMax :: Word64Set -> Key findMax Nil = error "findMax: empty set has no maximal element" findMax (Tip kx bm) = kx + highestBitSet bm findMax (Bin _ m l r) | m < 0 = find l | otherwise = find r where find (Tip kx bm) = kx + highestBitSet bm find (Bin _ _ _ r') = find r' find Nil = error "findMax Nil" -- | \(O(\min(n,W))\). Delete the minimal element. Returns an empty set if the set is empty. -- -- Note that this is a change of behaviour for consistency with 'Data.Set.Set' – -- versions prior to 0.5 threw an error if the 'Word64Set' was already empty. deleteMin :: Word64Set -> Word64Set deleteMin = maybe Nil snd . minView -- | \(O(\min(n,W))\). Delete the maximal element. Returns an empty set if the set is empty. -- -- Note that this is a change of behaviour for consistency with 'Data.Set.Set' – -- versions prior to 0.5 threw an error if the 'Word64Set' was already empty. deleteMax :: Word64Set -> Word64Set deleteMax = maybe Nil snd . maxView {---------------------------------------------------------------------- Map ----------------------------------------------------------------------} -- | \(O(n \min(n,W))\). -- @'map' f s@ is the set obtained by applying @f@ to each element of @s@. -- -- It's worth noting that the size of the result may be smaller if, -- for some @(x,y)@, @x \/= y && f x == f y@ map :: (Key -> Key) -> Word64Set -> Word64Set map f = fromList . List.map f . toList -- | \(O(n)\). The -- -- @'mapMonotonic' f s == 'map' f s@, but works only when @f@ is strictly increasing. -- /The precondition is not checked./ -- Semi-formally, we have: -- -- > and [x < y ==> f x < f y | x <- ls, y <- ls] -- > ==> mapMonotonic f s == map f s -- > where ls = toList s -- -- @since 0.6.3.1 -- Note that for now the test is insufficient to support any fancier implementation. mapMonotonic :: (Key -> Key) -> Word64Set -> Word64Set mapMonotonic f = fromDistinctAscList . List.map f . toAscList {-------------------------------------------------------------------- Fold --------------------------------------------------------------------} -- | \(O(n)\). Fold the elements in the set using the given right-associative -- binary operator. This function is an equivalent of 'foldr' and is present -- for compatibility only. -- -- /Please note that fold will be deprecated in the future and removed./ fold :: (Key -> b -> b) -> b -> Word64Set -> b fold = foldr {-# INLINE fold #-} -- | \(O(n)\). Fold the elements in the set using the given right-associative -- binary operator, such that @'foldr' f z == 'Prelude.foldr' f z . 'toAscList'@. -- -- For example, -- -- > toAscList set = foldr (:) [] set foldr :: (Key -> b -> b) -> b -> Word64Set -> b foldr f z = \t -> -- Use lambda t to be inlinable with two arguments only. case t of Bin _ m l r | m < 0 -> go (go z l) r -- put negative numbers before | otherwise -> go (go z r) l _ -> go z t where go z' Nil = z' go z' (Tip kx bm) = foldrBits kx f z' bm go z' (Bin _ _ l r) = go (go z' r) l {-# INLINE foldr #-} -- | \(O(n)\). A strict version of 'foldr'. Each application of the operator is -- evaluated before using the result in the next application. This -- function is strict in the starting value. foldr' :: (Key -> b -> b) -> b -> Word64Set -> b foldr' f z = \t -> -- Use lambda t to be inlinable with two arguments only. case t of Bin _ m l r | m < 0 -> go (go z l) r -- put negative numbers before | otherwise -> go (go z r) l _ -> go z t where go !z' Nil = z' go z' (Tip kx bm) = foldr'Bits kx f z' bm go z' (Bin _ _ l r) = go (go z' r) l {-# INLINE foldr' #-} -- | \(O(n)\). Fold the elements in the set using the given left-associative -- binary operator, such that @'foldl' f z == 'Prelude.foldl' f z . 'toAscList'@. -- -- For example, -- -- > toDescList set = foldl (flip (:)) [] set foldl :: (a -> Key -> a) -> a -> Word64Set -> a foldl f z = \t -> -- Use lambda t to be inlinable with two arguments only. case t of Bin _ m l r | m < 0 -> go (go z r) l -- put negative numbers before | otherwise -> go (go z l) r _ -> go z t where go z' Nil = z' go z' (Tip kx bm) = foldlBits kx f z' bm go z' (Bin _ _ l r) = go (go z' l) r {-# INLINE foldl #-} -- | \(O(n)\). A strict version of 'foldl'. Each application of the operator is -- evaluated before using the result in the next application. This -- function is strict in the starting value. foldl' :: (a -> Key -> a) -> a -> Word64Set -> a foldl' f z = \t -> -- Use lambda t to be inlinable with two arguments only. case t of Bin _ m l r | m < 0 -> go (go z r) l -- put negative numbers before | otherwise -> go (go z l) r _ -> go z t where go !z' Nil = z' go z' (Tip kx bm) = foldl'Bits kx f z' bm go z' (Bin _ _ l r) = go (go z' l) r {-# INLINE foldl' #-} {-------------------------------------------------------------------- List variations --------------------------------------------------------------------} -- | \(O(n)\). An alias of 'toAscList'. The elements of a set in ascending order. -- Subject to list fusion. elems :: Word64Set -> [Key] elems = toAscList {-------------------------------------------------------------------- Lists --------------------------------------------------------------------} #ifdef __GLASGOW_HASKELL__ -- | @since 0.5.6.2 instance GHC.Exts.IsList Word64Set where type Item Word64Set = Key fromList = fromList toList = toList #endif -- | \(O(n)\). Convert the set to a list of elements. Subject to list fusion. toList :: Word64Set -> [Key] toList = toAscList -- | \(O(n)\). Convert the set to an ascending list of elements. Subject to list -- fusion. toAscList :: Word64Set -> [Key] toAscList = foldr (:) [] -- | \(O(n)\). Convert the set to a descending list of elements. Subject to list -- fusion. toDescList :: Word64Set -> [Key] toDescList = foldl (flip (:)) [] -- List fusion for the list generating functions. #if __GLASGOW_HASKELL__ -- The foldrFB and foldlFB are foldr and foldl equivalents, used for list fusion. -- They are important to convert unfused to{Asc,Desc}List back, see mapFB in prelude. foldrFB :: (Key -> b -> b) -> b -> Word64Set -> b foldrFB = foldr {-# INLINE[0] foldrFB #-} foldlFB :: (a -> Key -> a) -> a -> Word64Set -> a foldlFB = foldl {-# INLINE[0] foldlFB #-} -- Inline elems and toList, so that we need to fuse only toAscList. {-# INLINE elems #-} {-# INLINE toList #-} -- The fusion is enabled up to phase 2 included. If it does not succeed, -- convert in phase 1 the expanded to{Asc,Desc}List calls back to -- to{Asc,Desc}List. In phase 0, we inline fold{lr}FB (which were used in -- a list fusion, otherwise it would go away in phase 1), and let compiler do -- whatever it wants with to{Asc,Desc}List -- it was forbidden to inline it -- before phase 0, otherwise the fusion rules would not fire at all. {-# NOINLINE[0] toAscList #-} {-# NOINLINE[0] toDescList #-} {-# RULES "Word64Set.toAscList" [~1] forall s . toAscList s = GHC.Exts.build (\c n -> foldrFB c n s) #-} {-# RULES "Word64Set.toAscListBack" [1] foldrFB (:) [] = toAscList #-} {-# RULES "Word64Set.toDescList" [~1] forall s . toDescList s = GHC.Exts.build (\c n -> foldlFB (\xs x -> c x xs) n s) #-} {-# RULES "Word64Set.toDescListBack" [1] foldlFB (\xs x -> x : xs) [] = toDescList #-} #endif -- | \(O(n \min(n,W))\). Create a set from a list of integers. {-# INLINABLE fromList #-} fromList :: [Key] -> Word64Set fromList = List.foldl' ins empty where ins t x = insert x t -- | \(O(n)\). Build a set from an ascending list of elements. -- /The precondition (input list is ascending) is not checked./ fromAscList :: [Key] -> Word64Set fromAscList = fromMonoList {-# NOINLINE fromAscList #-} -- | \(O(n)\). Build a set from an ascending list of distinct elements. -- /The precondition (input list is strictly ascending) is not checked./ fromDistinctAscList :: [Key] -> Word64Set fromDistinctAscList = fromAscList {-# INLINE fromDistinctAscList #-} -- | \(O(n)\). Build a set from a monotonic list of elements. -- -- The precise conditions under which this function works are subtle: -- For any branch mask, keys with the same prefix w.r.t. the branch -- mask must occur consecutively in the list. fromMonoList :: [Key] -> Word64Set fromMonoList [] = Nil fromMonoList (kx : zs1) = addAll' (prefixOf kx) (bitmapOf kx) zs1 where -- `addAll'` collects all keys with the prefix `px` into a single -- bitmap, and then proceeds with `addAll`. addAll' !px !bm [] = Tip px bm addAll' !px !bm (ky : zs) | px == prefixOf ky = addAll' px (bm .|. bitmapOf ky) zs -- inlined: | otherwise = addAll px (Tip px bm) (ky : zs) | py <- prefixOf ky , m <- branchMask px py , Inserted ty zs' <- addMany' m py (bitmapOf ky) zs = addAll px (linkWithMask m py ty {-px-} (Tip px bm)) zs' -- for `addAll` and `addMany`, px is /a/ prefix inside the tree `tx` -- `addAll` consumes the rest of the list, adding to the tree `tx` addAll !_px !tx [] = tx addAll !px !tx (ky : zs) | py <- prefixOf ky , m <- branchMask px py , Inserted ty zs' <- addMany' m py (bitmapOf ky) zs = addAll px (linkWithMask m py ty {-px-} tx) zs' -- `addMany'` is similar to `addAll'`, but proceeds with `addMany'`. addMany' !_m !px !bm [] = Inserted (Tip px bm) [] addMany' !m !px !bm zs0@(ky : zs) | px == prefixOf ky = addMany' m px (bm .|. bitmapOf ky) zs -- inlined: | otherwise = addMany m px (Tip px bm) (ky : zs) | mask px m /= mask ky m = Inserted (Tip (prefixOf px) bm) zs0 | py <- prefixOf ky , mxy <- branchMask px py , Inserted ty zs' <- addMany' mxy py (bitmapOf ky) zs = addMany m px (linkWithMask mxy py ty {-px-} (Tip px bm)) zs' -- `addAll` adds to `tx` all keys whose prefix w.r.t. `m` agrees with `px`. addMany !_m !_px tx [] = Inserted tx [] addMany !m !px tx zs0@(ky : zs) | mask px m /= mask ky m = Inserted tx zs0 | py <- prefixOf ky , mxy <- branchMask px py , Inserted ty zs' <- addMany' mxy py (bitmapOf ky) zs = addMany m px (linkWithMask mxy py ty {-px-} tx) zs' {-# INLINE fromMonoList #-} data Inserted = Inserted !Word64Set ![Key] {-------------------------------------------------------------------- Eq --------------------------------------------------------------------} instance Eq Word64Set where t1 == t2 = equal t1 t2 t1 /= t2 = nequal t1 t2 equal :: Word64Set -> Word64Set -> Bool equal (Bin p1 m1 l1 r1) (Bin p2 m2 l2 r2) = (m1 == m2) && (p1 == p2) && (equal l1 l2) && (equal r1 r2) equal (Tip kx1 bm1) (Tip kx2 bm2) = kx1 == kx2 && bm1 == bm2 equal Nil Nil = True equal _ _ = False nequal :: Word64Set -> Word64Set -> Bool nequal (Bin p1 m1 l1 r1) (Bin p2 m2 l2 r2) = (m1 /= m2) || (p1 /= p2) || (nequal l1 l2) || (nequal r1 r2) nequal (Tip kx1 bm1) (Tip kx2 bm2) = kx1 /= kx2 || bm1 /= bm2 nequal Nil Nil = False nequal _ _ = True {-------------------------------------------------------------------- Ord --------------------------------------------------------------------} instance Ord Word64Set where compare s1 s2 = compare (toAscList s1) (toAscList s2) -- tentative implementation. See if more efficient exists. {-------------------------------------------------------------------- Show --------------------------------------------------------------------} instance Show Word64Set where showsPrec p xs = showParen (p > 10) $ showString "fromList " . shows (toList xs) {-------------------------------------------------------------------- Read --------------------------------------------------------------------} instance Read Word64Set where #ifdef __GLASGOW_HASKELL__ readPrec = parens $ prec 10 $ do Ident "fromList" <- lexP xs <- readPrec return (fromList xs) readListPrec = readListPrecDefault #else readsPrec p = readParen (p > 10) $ \ r -> do ("fromList",s) <- lex r (xs,t) <- reads s return (fromList xs,t) #endif {-------------------------------------------------------------------- NFData --------------------------------------------------------------------} -- The Word64Set constructors consist only of strict fields of Ints and -- Word64Sets, thus the default NFData instance which evaluates to whnf -- should suffice instance NFData Word64Set where rnf x = seq x () {-------------------------------------------------------------------- Debugging --------------------------------------------------------------------} -- | \(O(n \min(n,W))\). Show the tree that implements the set. The tree is shown -- in a compressed, hanging format. showTree :: Word64Set -> String showTree s = showTreeWith True False s {- | \(O(n \min(n,W))\). The expression (@'showTreeWith' hang wide map@) shows the tree that implements the set. If @hang@ is 'True', a /hanging/ tree is shown otherwise a rotated tree is shown. If @wide@ is 'True', an extra wide version is shown. -} showTreeWith :: Bool -> Bool -> Word64Set -> String showTreeWith hang wide t | hang = (showsTreeHang wide [] t) "" | otherwise = (showsTree wide [] [] t) "" showsTree :: Bool -> [String] -> [String] -> Word64Set -> ShowS showsTree wide lbars rbars t = case t of Bin p m l r -> showsTree wide (withBar rbars) (withEmpty rbars) r . showWide wide rbars . showsBars lbars . showString (showBin p m) . showString "\n" . showWide wide lbars . showsTree wide (withEmpty lbars) (withBar lbars) l Tip kx bm -> showsBars lbars . showString " " . shows kx . showString " + " . showsBitMap bm . showString "\n" Nil -> showsBars lbars . showString "|\n" showsTreeHang :: Bool -> [String] -> Word64Set -> ShowS showsTreeHang wide bars t = case t of Bin p m l r -> showsBars bars . showString (showBin p m) . showString "\n" . showWide wide bars . showsTreeHang wide (withBar bars) l . showWide wide bars . showsTreeHang wide (withEmpty bars) r Tip kx bm -> showsBars bars . showString " " . shows kx . showString " + " . showsBitMap bm . showString "\n" Nil -> showsBars bars . showString "|\n" showBin :: Prefix -> Mask -> String showBin _ _ = "*" -- ++ show (p,m) showWide :: Bool -> [String] -> String -> String showWide wide bars | wide = showString (concat (reverse bars)) . showString "|\n" | otherwise = id showsBars :: [String] -> ShowS showsBars [] = id showsBars (_ : tl) = showString (concat (reverse tl)) . showString node showsBitMap :: Word64 -> ShowS showsBitMap = showString . showBitMap showBitMap :: Word64 -> String showBitMap w = show $ foldrBits 0 (:) [] w node :: String node = "+--" withBar, withEmpty :: [String] -> [String] withBar bars = "| ":bars withEmpty bars = " ":bars {-------------------------------------------------------------------- Helpers --------------------------------------------------------------------} {-------------------------------------------------------------------- Link --------------------------------------------------------------------} link :: Prefix -> Word64Set -> Prefix -> Word64Set -> Word64Set link p1 t1 p2 t2 = linkWithMask (branchMask p1 p2) p1 t1 {-p2-} t2 {-# INLINE link #-} -- `linkWithMask` is useful when the `branchMask` has already been computed linkWithMask :: Mask -> Prefix -> Word64Set -> Word64Set -> Word64Set linkWithMask m p1 t1 {-p2-} t2 | zero p1 m = Bin p m t1 t2 | otherwise = Bin p m t2 t1 where p = mask p1 m {-# INLINE linkWithMask #-} {-------------------------------------------------------------------- @bin@ assures that we never have empty trees within a tree. --------------------------------------------------------------------} bin :: Prefix -> Mask -> Word64Set -> Word64Set -> Word64Set bin _ _ l Nil = l bin _ _ Nil r = r bin p m l r = Bin p m l r {-# INLINE bin #-} {-------------------------------------------------------------------- @tip@ assures that we never have empty bitmaps within a tree. --------------------------------------------------------------------} tip :: Prefix -> BitMap -> Word64Set tip _ 0 = Nil tip kx bm = Tip kx bm {-# INLINE tip #-} {---------------------------------------------------------------------- Functions that generate Prefix and BitMap of a Key or a Suffix. ----------------------------------------------------------------------} suffixBitMask :: Word64 suffixBitMask = fromIntegral (finiteBitSize (undefined::Word64)) - 1 {-# INLINE suffixBitMask #-} prefixBitMask :: Word64 prefixBitMask = complement suffixBitMask {-# INLINE prefixBitMask #-} prefixOf :: Word64 -> Prefix prefixOf x = x .&. prefixBitMask {-# INLINE prefixOf #-} suffixOf :: Word64 -> Word64 suffixOf x = x .&. suffixBitMask {-# INLINE suffixOf #-} bitmapOfSuffix :: Word64 -> BitMap bitmapOfSuffix s = 1 `shiftLL` fromIntegral s {-# INLINE bitmapOfSuffix #-} bitmapOf :: Word64 -> BitMap bitmapOf x = bitmapOfSuffix (suffixOf x) {-# INLINE bitmapOf #-} {-------------------------------------------------------------------- Endian independent bit twiddling --------------------------------------------------------------------} -- Returns True iff the bits set in i and the Mask m are disjoint. zero :: Word64 -> Mask -> Bool zero i m = (natFromInt i) .&. (natFromInt m) == 0 {-# INLINE zero #-} nomatch,match :: Word64 -> Prefix -> Mask -> Bool nomatch i p m = (mask i m) /= p {-# INLINE nomatch #-} match i p m = (mask i m) == p {-# INLINE match #-} -- Suppose a is largest such that 2^a divides 2*m. -- Then mask i m is i with the low a bits zeroed out. mask :: Word64 -> Mask -> Prefix mask i m = maskW (natFromInt i) (natFromInt m) {-# INLINE mask #-} {-------------------------------------------------------------------- Big endian operations --------------------------------------------------------------------} maskW :: Nat -> Nat -> Prefix maskW i m = intFromNat (i .&. (complement (m-1) `xor` m)) {-# INLINE maskW #-} shorter :: Mask -> Mask -> Bool shorter m1 m2 = (natFromInt m1) > (natFromInt m2) {-# INLINE shorter #-} branchMask :: Prefix -> Prefix -> Mask branchMask p1 p2 = intFromNat (highestBitMask (natFromInt p1 `xor` natFromInt p2)) {-# INLINE branchMask #-} {---------------------------------------------------------------------- To get best performance, we provide fast implementations of lowestBitSet, highestBitSet and fold[lr][l]Bits for GHC. If the intel bsf and bsr instructions ever become GHC primops, this code should be reimplemented using these. Performance of this code is crucial for folds, toList, filter, partition. The signatures of methods in question are placed after this comment. ----------------------------------------------------------------------} lowestBitSet :: Nat -> Word64 highestBitSet :: Nat -> Word64 foldlBits :: Word64 -> (a -> Word64 -> a) -> a -> Nat -> a foldl'Bits :: Word64 -> (a -> Word64 -> a) -> a -> Nat -> a foldrBits :: Word64 -> (Word64 -> a -> a) -> a -> Nat -> a foldr'Bits :: Word64 -> (Word64 -> a -> a) -> a -> Nat -> a takeWhileAntitoneBits :: Word64 -> (Word64 -> Bool) -> Nat -> Nat {-# INLINE lowestBitSet #-} {-# INLINE highestBitSet #-} {-# INLINE foldlBits #-} {-# INLINE foldl'Bits #-} {-# INLINE foldrBits #-} {-# INLINE foldr'Bits #-} {-# INLINE takeWhileAntitoneBits #-} #if defined(__GLASGOW_HASKELL__) indexOfTheOnlyBit :: Nat -> Word64 {-# INLINE indexOfTheOnlyBit #-} indexOfTheOnlyBit bitmask = fromIntegral $ countTrailingZeros bitmask lowestBitSet x = fromIntegral $ countTrailingZeros x highestBitSet x = fromIntegral $ 63 - countLeadingZeros x lowestBitMask :: Nat -> Nat lowestBitMask x = x .&. negate x {-# INLINE lowestBitMask #-} -- Reverse the order of bits in the Nat. revNat :: Nat -> Nat revNat x1 = case ((x1 `shiftRL` 1) .&. 0x5555555555555555) .|. ((x1 .&. 0x5555555555555555) `shiftLL` 1) of x2 -> case ((x2 `shiftRL` 2) .&. 0x3333333333333333) .|. ((x2 .&. 0x3333333333333333) `shiftLL` 2) of x3 -> case ((x3 `shiftRL` 4) .&. 0x0F0F0F0F0F0F0F0F) .|. ((x3 .&. 0x0F0F0F0F0F0F0F0F) `shiftLL` 4) of x4 -> case ((x4 `shiftRL` 8) .&. 0x00FF00FF00FF00FF) .|. ((x4 .&. 0x00FF00FF00FF00FF) `shiftLL` 8) of x5 -> case ((x5 `shiftRL` 16) .&. 0x0000FFFF0000FFFF) .|. ((x5 .&. 0x0000FFFF0000FFFF) `shiftLL` 16) of x6 -> ( x6 `shiftRL` 32 ) .|. ( x6 `shiftLL` 32); foldlBits prefix f z bitmap = go bitmap z where go 0 acc = acc go bm acc = go (bm `xor` bitmask) ((f acc) $! (prefix+bi)) where !bitmask = lowestBitMask bm !bi = indexOfTheOnlyBit bitmask foldl'Bits prefix f z bitmap = go bitmap z where go 0 acc = acc go bm !acc = go (bm `xor` bitmask) ((f acc) $! (prefix+bi)) where !bitmask = lowestBitMask bm !bi = indexOfTheOnlyBit bitmask foldrBits prefix f z bitmap = go (revNat bitmap) z where go 0 acc = acc go bm acc = go (bm `xor` bitmask) ((f $! (prefix+63-bi)) acc) where !bitmask = lowestBitMask bm !bi = indexOfTheOnlyBit bitmask foldr'Bits prefix f z bitmap = go (revNat bitmap) z where go 0 acc = acc go bm !acc = go (bm `xor` bitmask) ((f $! (prefix+63-bi)) acc) where !bitmask = lowestBitMask bm !bi = indexOfTheOnlyBit bitmask takeWhileAntitoneBits prefix predicate bitmap = -- Binary search for the first index where the predicate returns false, but skip a predicate -- call if the high half of the current range is empty. This ensures -- min (log2 64 + 1 = 7) (popcount bitmap) predicate calls. let next d h (n',b') = if n' .&. h /= 0 && (predicate $! prefix + fromIntegral (b'+d)) then (n' `shiftRL` d, b'+d) else (n',b') {-# INLINE next #-} (_,b) = next 1 0x2 $ next 2 0xC $ next 4 0xF0 $ next 8 0xFF00 $ next 16 0xFFFF0000 $ next 32 0xFFFFFFFF00000000 $ (bitmap,0) m = if b /= 0 || (bitmap .&. 0x1 /= 0 && predicate prefix) then ((2 `shiftLL` b) - 1) else ((1 `shiftLL` b) - 1) in bitmap .&. m #else {---------------------------------------------------------------------- In general case we use logarithmic implementation of lowestBitSet and highestBitSet, which works up to bit sizes of 64. Folds are linear scans. ----------------------------------------------------------------------} lowestBitSet n0 = let (n1,b1) = if n0 .&. 0xFFFFFFFF /= 0 then (n0,0) else (n0 `shiftRL` 32, 32) (n2,b2) = if n1 .&. 0xFFFF /= 0 then (n1,b1) else (n1 `shiftRL` 16, 16+b1) (n3,b3) = if n2 .&. 0xFF /= 0 then (n2,b2) else (n2 `shiftRL` 8, 8+b2) (n4,b4) = if n3 .&. 0xF /= 0 then (n3,b3) else (n3 `shiftRL` 4, 4+b3) (n5,b5) = if n4 .&. 0x3 /= 0 then (n4,b4) else (n4 `shiftRL` 2, 2+b4) b6 = if n5 .&. 0x1 /= 0 then b5 else 1+b5 in b6 highestBitSet n0 = let (n1,b1) = if n0 .&. 0xFFFFFFFF00000000 /= 0 then (n0 `shiftRL` 32, 32) else (n0,0) (n2,b2) = if n1 .&. 0xFFFF0000 /= 0 then (n1 `shiftRL` 16, 16+b1) else (n1,b1) (n3,b3) = if n2 .&. 0xFF00 /= 0 then (n2 `shiftRL` 8, 8+b2) else (n2,b2) (n4,b4) = if n3 .&. 0xF0 /= 0 then (n3 `shiftRL` 4, 4+b3) else (n3,b3) (n5,b5) = if n4 .&. 0xC /= 0 then (n4 `shiftRL` 2, 2+b4) else (n4,b4) b6 = if n5 .&. 0x2 /= 0 then 1+b5 else b5 in b6 foldlBits prefix f z bm = let lb = lowestBitSet bm in go (prefix+lb) z (bm `shiftRL` lb) where go !_ acc 0 = acc go bi acc n | n `testBit` 0 = go (bi + 1) (f acc bi) (n `shiftRL` 1) | otherwise = go (bi + 1) acc (n `shiftRL` 1) foldl'Bits prefix f z bm = let lb = lowestBitSet bm in go (prefix+lb) z (bm `shiftRL` lb) where go !_ !acc 0 = acc go bi acc n | n `testBit` 0 = go (bi + 1) (f acc bi) (n `shiftRL` 1) | otherwise = go (bi + 1) acc (n `shiftRL` 1) foldrBits prefix f z bm = let lb = lowestBitSet bm in go (prefix+lb) (bm `shiftRL` lb) where go !_ 0 = z go bi n | n `testBit` 0 = f bi (go (bi + 1) (n `shiftRL` 1)) | otherwise = go (bi + 1) (n `shiftRL` 1) foldr'Bits prefix f z bm = let lb = lowestBitSet bm in go (prefix+lb) (bm `shiftRL` lb) where go !_ 0 = z go bi n | n `testBit` 0 = f bi $! go (bi + 1) (n `shiftRL` 1) | otherwise = go (bi + 1) (n `shiftRL` 1) takeWhileAntitoneBits prefix predicate = foldl'Bits prefix f 0 -- Does not use antitone property where f acc bi | predicate bi = acc .|. bitmapOf bi | otherwise = acc #endif {-------------------------------------------------------------------- Utilities --------------------------------------------------------------------} -- | \(O(1)\). Decompose a set into pieces based on the structure of the underlying -- tree. This function is useful for consuming a set in parallel. -- -- No guarantee is made as to the sizes of the pieces; an internal, but -- deterministic process determines this. However, it is guaranteed that the -- pieces returned will be in ascending order (all elements in the first submap -- less than all elements in the second, and so on). -- -- Examples: -- -- > splitRoot (fromList [1..120]) == [fromList [1..63],fromList [64..120]] -- > splitRoot empty == [] -- -- Note that the current implementation does not return more than two subsets, -- but you should not depend on this behaviour because it can change in the -- future without notice. Also, the current version does not continue -- splitting all the way to individual singleton sets -- it stops at some -- point. splitRoot :: Word64Set -> [Word64Set] splitRoot Nil = [] -- NOTE: we don't currently split below Tip, but we could. splitRoot x@(Tip _ _) = [x] splitRoot (Bin _ m l r) | m < 0 = [r, l] | otherwise = [l, r] {-# INLINE splitRoot #-} ghc-lib-parser-9.12.2.20250421/compiler/GHC/Driver/0000755000000000000000000000000007346545000017141 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Driver/Backend.hs0000644000000000000000000011025507346545000021030 0ustar0000000000000000{-# LANGUAGE MultiWayIf, LambdaCase #-} {-| Module : GHC.Driver.Backend Description : Back ends for code generation This module exports the `Backend` type and all the available values of that type. The type is abstract, and GHC assumes a "closed world": all the back ends are known and are known here. The compiler driver chooses a `Backend` value based on how it is asked to generate code. A `Backend` value encapsulates the knowledge needed to take Cmm, STG, or Core and write assembly language to a file. A back end also provides a function that enables the compiler driver to run an assembler on the code that is written, if any (the "post-backend pipeline"). Finally, a back end has myriad /properties/. Properties mediate interactions between a back end and the rest of the compiler, especially the driver. Examples include the following: * Property `backendValidityOfCImport` says whether the back end can import foreign C functions. * Property `backendForcesOptimization0` says whether the back end can be used with optimization levels higher than `-O0`. * Property `backendCDefs` tells the compiler driver, "if you're using this back end, then these are the command-line flags you should add to any invocation of the C compiler." These properties are used elsewhere in GHC, primarily in the driver, to fine-tune operations according to the capabilities of the chosen back end. You might use a property to make GHC aware of a potential limitation of certain back ends, or a special feature available only in certain back ends. If your client code needs to know a fact that is not exposed in an existing property, you would define and export a new property. Conditioning client code on the /identity/ or /name/ of a back end is Not Done. For full details, see the documentation of each property. -} module GHC.Driver.Backend ( -- * The @Backend@ type Backend -- note: type is abstract -- * Available back ends , ncgBackend , llvmBackend , jsBackend , viaCBackend , interpreterBackend , noBackend , allBackends -- * Types used to specify properties of back ends , PrimitiveImplementation(..) -- ** Properties that stand for functions -- *** Back-end function for code generation , DefunctionalizedCodeOutput(..) -- *** Back-end functions for assembly , DefunctionalizedPostHscPipeline(..) -- *** Other back-end functions , DefunctionalizedCDefs(..) -- ** Names of back ends (for API clients of version 9.4 or earlier) , BackendName -- * Properties of back ends , backendDescription , backendWritesFiles , backendPipelineOutput , backendCanReuseLoadedCode , backendGeneratesCode , backendGeneratesCodeForHsBoot , backendSupportsInterfaceWriting , backendRespectsSpecialise , backendWantsGlobalBindings , backendHasNativeSwitch , backendPrimitiveImplementation , backendSimdValidity , backendSupportsEmbeddedBlobs , backendNeedsPlatformNcgSupport , backendSupportsUnsplitProcPoints , backendSwappableWithViaC , backendUnregisterisedAbiOnly , backendGeneratesHc , backendSptIsDynamic , backendSupportsBreakpoints , backendForcesOptimization0 , backendNeedsFullWays , backendSpecialModuleSource , backendSupportsHpc , backendSupportsCImport , backendSupportsCExport , backendCDefs , backendCodeOutput , backendUseJSLinker , backendPostHscPipeline , backendNormalSuccessorPhase , backendName , backendValidityOfCImport , backendValidityOfCExport -- * Other functions of back ends , platformDefaultBackend , platformNcgSupported ) where import GHC.Prelude import GHC.Driver.Backend.Internal (BackendName(..)) import GHC.Driver.Phases import GHC.Utils.Error import GHC.Utils.Panic import GHC.Driver.Pipeline.Monad import GHC.Platform --------------------------------------------------------------------------------- -- -- DESIGN CONSIDERATIONS -- -- -- -- The `Backend` type is made abstract in order to make it possible to -- add new back ends without having to inspect or modify much code -- elsewhere in GHC. Adding a new back end would be /easiest/ if -- `Backend` were represented as a record type, but in peer review, -- the clear will of the majority was to use a sum type. As a result, -- when adding a new back end it is necessary to modify /every/ -- function in this module that expects `Backend` as its first argument. -- **By design, these functions have no default/wildcard cases.** This -- design forces the author of a new back end to consider the semantics -- in every case, rather than relying on a default that may be wrong. -- The names and documentation of the functions defined in the `Backend` -- record are sufficiently descriptive that the author of a new back -- end will be able to identify correct result values without having to go -- spelunking throughout the compiler. -- -- While the design localizes /most/ back-end logic in this module, -- the author of a new back end will still have to make changes -- elsewhere in the compiler: -- -- * For reasons described in Note [Backend Defunctionalization], -- code-generation and post-backend pipeline functions, among other -- functions, cannot be placed in the `Backend` record itself. -- Instead, the /names/ of those functions are placed. Each name is -- a value constructor in one of the algebraic data types defined in -- this module. The named function is then defined near its point -- of use. -- -- The author of a new back end will have to consider whether an -- existing function will do or whether a new function needs to be -- defined. When a new function needs to be defined, the author -- must take two steps: -- -- - Add a value constructor to the relevant data type here -- in the `Backend` module -- -- - Add a case to the location in the compiler (there should be -- exactly one) where the value constructors of the relevant -- data type are used -- -- * When a new back end is defined, it's quite possible that the -- compiler driver will have to be changed in some way. Just because -- the driver supports five back ends doesn't mean it will support a sixth -- without changes. -- -- The collection of functions exported from this module hasn't -- really been "designed"; it's what emerged from a refactoring of -- older code. The real design criterion was "make it crystal clear -- what has to be done to add a new back end." -- -- One issue remains unresolved: some of the error messages and -- warning messages used in the driver assume a "closed world": they -- think they know all the back ends that exist, and they are not shy -- about enumerating them. Just one set of error messages has been -- ported to have an open-world assumption: these are the error -- messages associated with type checking of foreign imports and -- exports. To allow other errors to be issued with an open-world -- assumption, use functions `backendValidityOfCImport` and -- `backendValidityOfCExport` as models, and have a look at how the -- 'expected back ends' are used in modules "GHC.Tc.Gen.Foreign" and -- "GHC.Tc.Errors.Ppr" -- --------------------------------------------------------------------------------- platformDefaultBackend :: Platform -> Backend platformDefaultBackend platform = if | platformUnregisterised platform -> viaCBackend | platformNcgSupported platform -> ncgBackend | platformJSSupported platform -> jsBackend | otherwise -> llvmBackend -- | Is the platform supported by the Native Code Generator? platformNcgSupported :: Platform -> Bool platformNcgSupported platform = if | platformUnregisterised platform -> False -- NCG doesn't support unregisterised ABI | ncgValidArch -> True | otherwise -> False where ncgValidArch = case platformArch platform of ArchX86 -> True ArchX86_64 -> True ArchPPC -> True ArchPPC_64 {} -> True ArchAArch64 -> True ArchWasm32 -> True ArchRISCV64 -> True _ -> False -- | Is the platform supported by the JS backend? platformJSSupported :: Platform -> Bool platformJSSupported platform | platformArch platform == ArchJavaScript = True | otherwise = False -- | A value of type @Backend@ represents one of GHC's back ends. -- The set of back ends cannot be extended except by modifying the -- definition of @Backend@ in this module. -- -- The @Backend@ type is abstract; that is, its value constructors are -- not exported. It's crucial that they not be exported, because a -- value of type @Backend@ carries only the back end's /name/, not its -- behavior or properties. If @Backend@ were not abstract, then code -- elsewhere in the compiler could depend directly on the name, not on -- the semantics, which would make it challenging to create a new back end. -- Because @Backend@ /is/ abstract, all the obligations of a new back -- end are enumerated in this module, in the form of functions that -- take @Backend@ as an argument. -- -- The issue of abstraction is discussed at great length in #20927 and !7442. newtype Backend = Named BackendName -- Must be a newtype so that it has no `Eq` instance and -- a different `Show` instance. -- | The Show instance is for messages /only/. If code depends on -- what's in the string, you deserve what happens to you. instance Show Backend where show = backendDescription ncgBackend, llvmBackend, viaCBackend, interpreterBackend, jsBackend, noBackend :: Backend -- | The native code generator. -- Compiles Cmm code into textual assembler, then relies on -- an external assembler toolchain to produce machine code. -- -- Only supports a few platforms (X86, PowerPC, SPARC). -- -- See "GHC.CmmToAsm". ncgBackend = Named NCG -- | The LLVM backend. -- -- Compiles Cmm code into LLVM textual IR, then relies on -- LLVM toolchain to produce machine code. -- -- It relies on LLVM support for the calling convention used -- by the NCG backend to produce code objects ABI compatible -- with it (see "cc 10" or "ghccc" calling convention in -- https://llvm.org/docs/LangRef.html#calling-conventions). -- -- Supports a few platforms (X86, AArch64, s390x, ARM). -- -- See "GHC.CmmToLlvm" llvmBackend = Named LLVM -- | The JavaScript Backend -- -- See documentation in GHC.StgToJS jsBackend = Named JavaScript -- | Via-C ("unregisterised") backend. -- -- Compiles Cmm code into C code, then relies on a C compiler -- to produce machine code. -- -- It produces code objects that are /not/ ABI compatible -- with those produced by NCG and LLVM backends. -- -- Produced code is expected to be less efficient than the -- one produced by NCG and LLVM backends because STG -- registers are not pinned into real registers. On the -- other hand, it supports more target platforms (those -- having a valid C toolchain). -- -- See "GHC.CmmToC" viaCBackend = Named ViaC -- | The ByteCode interpreter. -- -- Produce ByteCode objects (BCO, see "GHC.ByteCode") that -- can be interpreted. It is used by GHCi. -- -- Currently some extensions are not supported -- (foreign primops). -- -- See "GHC.StgToByteCode" interpreterBackend = Named Interpreter -- | A dummy back end that generates no code. -- -- Use this back end to disable code generation. It is particularly -- useful when GHC is used as a library for other purpose than -- generating code (e.g. to generate documentation with Haddock) or -- when the user requested it (via `-fno-code`) for some reason. noBackend = Named NoBackend --------------------------------------------------------------------------------- -- | This enumeration type specifies how the back end wishes GHC's -- primitives to be implemented. (Module "GHC.StgToCmm.Prim" provides -- a generic implementation of every primitive, but some primitives, -- like `IntQuotRemOp`, can be implemented more efficiently by -- certain back ends on certain platforms. For example, by using a -- machine instruction that simultaneously computes quotient and remainder.) -- -- For the meaning of each alternative, consult -- "GHC.StgToCmm.Config". (In a perfect world, type -- `PrimitiveImplementation` would be defined there, in the module -- that determines its meaning. But I could not figure out how to do -- it without mutual recursion across module boundaries.) data PrimitiveImplementation = LlvmPrimitives -- ^ Primitives supported by LLVM | NcgPrimitives -- ^ Primitives supported by the native code generator | JSPrimitives -- ^ Primitives supported by JS backend | GenericPrimitives -- ^ Primitives supported by all back ends deriving Show -- | Names a function that generates code and writes the results to a -- file, of this type: -- -- > Logger -- > -> DynFlags -- > -> Module -- ^ module being compiled -- > -> ModLocation -- > -> FilePath -- ^ Where to write output -- > -> Set UnitId -- ^ dependencies -- > -> Stream IO RawCmmGroup a -- results from `StgToCmm` -- > -> IO a -- -- The functions so named are defined in "GHC.Driver.CodeOutput". -- -- We expect one function per back end—or more precisely, one function -- for each back end that writes code to a file. (The interpreter -- does not write to files; its output lives only in memory.) data DefunctionalizedCodeOutput = NcgCodeOutput | ViaCCodeOutput | LlvmCodeOutput | JSCodeOutput -- | Names a function that tells the driver what should happen after -- assembly code is written. This might include running a C compiler, -- running LLVM, running an assembler, or various similar activities. -- The function named normally has this type: -- -- > TPipelineClass TPhase m -- > => PipeEnv -- > -> HscEnv -- > -> Maybe ModLocation -- > -> FilePath -- > -> m (Maybe FilePath) -- -- The functions so named are defined in "GHC.Driver.Pipeline". data DefunctionalizedPostHscPipeline = NcgPostHscPipeline | ViaCPostHscPipeline | LlvmPostHscPipeline | JSPostHscPipeline | NoPostHscPipeline -- ^ After code generation, nothing else need happen. -- | Names a function that tells the driver what command-line options -- to include when invoking a C compiler. It's meant for @-D@ options that -- define symbols for the C preprocessor. Because the exact symbols -- defined might depend on versions of tools located in the file -- system (/cough/ LLVM /cough/), the function requires an `IO` action. -- The function named has this type: -- -- > Logger -> DynFlags -> IO [String] data DefunctionalizedCDefs = NoCDefs -- ^ No additional command-line options are needed | LlvmCDefs -- ^ Return command-line options that tell GHC about the -- LLVM version. --------------------------------------------------------------------------------- -- | An informal description of the back end, for use in -- issuing warning messages /only/. If code depends on -- what's in the string, you deserve what happens to you. backendDescription :: Backend -> String backendDescription (Named NCG) = "native code generator" backendDescription (Named LLVM) = "LLVM" backendDescription (Named ViaC) = "compiling via C" backendDescription (Named JavaScript) = "compiling to JavaScript" backendDescription (Named Interpreter) = "byte-code interpreter" backendDescription (Named NoBackend) = "no code generated" -- | This flag tells the compiler driver whether the back -- end will write files: interface files and object files. -- It is typically true for "real" back ends that generate -- code into the filesystem. (That means, not the interpreter.) backendWritesFiles :: Backend -> Bool backendWritesFiles (Named NCG) = True backendWritesFiles (Named LLVM) = True backendWritesFiles (Named ViaC) = True backendWritesFiles (Named JavaScript) = True backendWritesFiles (Named Interpreter) = False backendWritesFiles (Named NoBackend) = False -- | When the back end does write files, this value tells -- the compiler in what manner of file the output should go: -- temporary, persistent, or specific. backendPipelineOutput :: Backend -> PipelineOutput backendPipelineOutput (Named NCG) = Persistent backendPipelineOutput (Named LLVM) = Persistent backendPipelineOutput (Named ViaC) = Persistent backendPipelineOutput (Named JavaScript) = Persistent backendPipelineOutput (Named Interpreter) = NoOutputFile backendPipelineOutput (Named NoBackend) = NoOutputFile -- | This flag tells the driver whether the back end can -- reuse code (bytecode or object code) that has been -- loaded dynamically. Likely true only of the interpreter. backendCanReuseLoadedCode :: Backend -> Bool backendCanReuseLoadedCode (Named NCG) = False backendCanReuseLoadedCode (Named LLVM) = False backendCanReuseLoadedCode (Named ViaC) = False backendCanReuseLoadedCode (Named JavaScript) = False backendCanReuseLoadedCode (Named Interpreter) = True backendCanReuseLoadedCode (Named NoBackend) = False -- | It is is true of every back end except @-fno-code@ -- that it "generates code." Surprisingly, this property -- influences the driver in a ton of ways. Some examples: -- -- * If the back end does not generate code, then the -- driver needs to turn on code generation for -- Template Haskell (because that code needs to be -- generated and run at compile time). -- -- * If the back end does not generate code, then the -- driver does not need to deal with an output file. -- -- * If the back end /does/ generated code, then the -- driver supports `HscRecomp`. If not, recompilation -- does not need a linkable (and is automatically up -- to date). -- backendGeneratesCode :: Backend -> Bool backendGeneratesCode (Named NCG) = True backendGeneratesCode (Named LLVM) = True backendGeneratesCode (Named ViaC) = True backendGeneratesCode (Named JavaScript) = True backendGeneratesCode (Named Interpreter) = True backendGeneratesCode (Named NoBackend) = False backendGeneratesCodeForHsBoot :: Backend -> Bool backendGeneratesCodeForHsBoot (Named NCG) = True backendGeneratesCodeForHsBoot (Named LLVM) = True backendGeneratesCodeForHsBoot (Named ViaC) = True backendGeneratesCodeForHsBoot (Named JavaScript) = True backendGeneratesCodeForHsBoot (Named Interpreter) = False backendGeneratesCodeForHsBoot (Named NoBackend) = False -- | When set, this flag turns on interface writing for -- Backpack. It should probably be the same as -- `backendGeneratesCode`, but it is kept distinct for -- reasons described in Note [-fno-code mode]. backendSupportsInterfaceWriting :: Backend -> Bool backendSupportsInterfaceWriting (Named NCG) = True backendSupportsInterfaceWriting (Named LLVM) = True backendSupportsInterfaceWriting (Named ViaC) = True backendSupportsInterfaceWriting (Named JavaScript) = True backendSupportsInterfaceWriting (Named Interpreter) = True backendSupportsInterfaceWriting (Named NoBackend) = False -- | When preparing code for this back end, the type -- checker should pay attention to SPECIALISE pragmas. If -- this flag is `False`, then the type checker ignores -- SPECIALISE pragmas (for imported things?). backendRespectsSpecialise :: Backend -> Bool backendRespectsSpecialise (Named NCG) = True backendRespectsSpecialise (Named LLVM) = True backendRespectsSpecialise (Named ViaC) = True backendRespectsSpecialise (Named JavaScript) = True backendRespectsSpecialise (Named Interpreter) = False backendRespectsSpecialise (Named NoBackend) = False -- | This back end wants the `mi_top_env` field of a -- `ModIface` to be populated (with the top-level bindings -- of the original source). Only true for the interpreter. backendWantsGlobalBindings :: Backend -> Bool backendWantsGlobalBindings (Named NCG) = False backendWantsGlobalBindings (Named LLVM) = False backendWantsGlobalBindings (Named ViaC) = False backendWantsGlobalBindings (Named JavaScript) = False backendWantsGlobalBindings (Named NoBackend) = False backendWantsGlobalBindings (Named Interpreter) = True -- | The back end targets a technology that implements -- `switch` natively. (For example, LLVM or C.) Therefore -- it is not necessary for GHC to ccompile a Cmm `Switch` -- form into a decision tree with jump tables at the -- leaves. backendHasNativeSwitch :: Backend -> Bool backendHasNativeSwitch (Named NCG) = False backendHasNativeSwitch (Named LLVM) = True backendHasNativeSwitch (Named ViaC) = True backendHasNativeSwitch (Named JavaScript) = True backendHasNativeSwitch (Named Interpreter) = False backendHasNativeSwitch (Named NoBackend) = False -- | As noted in the documentation for -- `PrimitiveImplementation`, certain primitives have -- multiple implementations, depending on the capabilities -- of the back end. This field signals to module -- "GHC.StgToCmm.Prim" what implementations to use with -- this back end. backendPrimitiveImplementation :: Backend -> PrimitiveImplementation backendPrimitiveImplementation (Named NCG) = NcgPrimitives backendPrimitiveImplementation (Named LLVM) = LlvmPrimitives backendPrimitiveImplementation (Named JavaScript) = JSPrimitives backendPrimitiveImplementation (Named ViaC) = GenericPrimitives backendPrimitiveImplementation (Named Interpreter) = GenericPrimitives backendPrimitiveImplementation (Named NoBackend) = GenericPrimitives -- | When this value is `IsValid`, the back end is -- compatible with vector instructions. When it is -- `NotValid`, it carries a message that is shown to -- users. backendSimdValidity :: Backend -> Validity' String backendSimdValidity (Named NCG) = IsValid backendSimdValidity (Named LLVM) = IsValid backendSimdValidity (Named ViaC) = NotValid $ unlines ["SIMD vector instructions require using the NCG or the LLVM backend."] backendSimdValidity (Named JavaScript) = NotValid $ unlines ["SIMD vector instructions require using the NCG or the LLVM backend."] backendSimdValidity (Named Interpreter) = NotValid $ unlines ["SIMD vector instructions require using the NCG or the LLVM backend."] backendSimdValidity (Named NoBackend) = NotValid $ unlines ["SIMD vector instructions require using the NCG or the LLVM backend."] -- | This flag says whether the back end supports large -- binary blobs. See Note [Embedding large binary blobs] -- in "GHC.CmmToAsm.Ppr". backendSupportsEmbeddedBlobs :: Backend -> Bool backendSupportsEmbeddedBlobs (Named NCG) = True backendSupportsEmbeddedBlobs (Named LLVM) = False backendSupportsEmbeddedBlobs (Named ViaC) = False backendSupportsEmbeddedBlobs (Named JavaScript) = False backendSupportsEmbeddedBlobs (Named Interpreter) = False backendSupportsEmbeddedBlobs (Named NoBackend) = False -- | This flag tells the compiler driver that the back end -- does not support every target platform; it supports -- only platforms that claim NCG support. (It's set only -- for the native code generator.) Crufty. If the driver -- tries to use the native code generator /without/ -- platform support, the driver fails over to the LLVM -- back end. backendNeedsPlatformNcgSupport :: Backend -> Bool backendNeedsPlatformNcgSupport (Named NCG) = True backendNeedsPlatformNcgSupport (Named LLVM) = False backendNeedsPlatformNcgSupport (Named ViaC) = False backendNeedsPlatformNcgSupport (Named JavaScript) = False backendNeedsPlatformNcgSupport (Named Interpreter) = False backendNeedsPlatformNcgSupport (Named NoBackend) = False -- | This flag is set if the back end can generate code -- for proc points. If the flag is not set, then a Cmm -- pass needs to split proc points (that is, turn each -- proc point into a standalone procedure). backendSupportsUnsplitProcPoints :: Backend -> Bool backendSupportsUnsplitProcPoints (Named NCG) = True backendSupportsUnsplitProcPoints (Named LLVM) = False backendSupportsUnsplitProcPoints (Named ViaC) = False backendSupportsUnsplitProcPoints (Named JavaScript) = False backendSupportsUnsplitProcPoints (Named Interpreter) = False backendSupportsUnsplitProcPoints (Named NoBackend) = False -- | This flag guides the driver in resolving issues about -- API support on the target platform. If the flag is set, -- then these things are true: -- -- * When the target platform supports /only/ an unregisterised API, -- this backend can be replaced with compilation via C. -- -- * When the target does /not/ support an unregisterised API, -- this back end can replace compilation via C. -- backendSwappableWithViaC :: Backend -> Bool backendSwappableWithViaC (Named NCG) = True backendSwappableWithViaC (Named LLVM) = True backendSwappableWithViaC (Named ViaC) = False backendSwappableWithViaC (Named JavaScript) = False backendSwappableWithViaC (Named Interpreter) = False backendSwappableWithViaC (Named NoBackend) = False -- | This flag is true if the back end works *only* with -- the unregisterised ABI. backendUnregisterisedAbiOnly :: Backend -> Bool backendUnregisterisedAbiOnly (Named NCG) = False backendUnregisterisedAbiOnly (Named LLVM) = False backendUnregisterisedAbiOnly (Named ViaC) = True backendUnregisterisedAbiOnly (Named JavaScript) = False backendUnregisterisedAbiOnly (Named Interpreter) = False backendUnregisterisedAbiOnly (Named NoBackend) = False -- | This flag is set if the back end generates C code in -- a @.hc@ file. The flag lets the compiler driver know -- if the command-line flag @-C@ is meaningful. backendGeneratesHc :: Backend -> Bool backendGeneratesHc (Named NCG) = False backendGeneratesHc (Named LLVM) = False backendGeneratesHc (Named ViaC) = True backendGeneratesHc (Named JavaScript) = False backendGeneratesHc (Named Interpreter) = False backendGeneratesHc (Named NoBackend) = False -- | This flag says whether SPT (static pointer table) -- entries will be inserted dynamically if needed. If -- this flag is `False`, then "GHC.Iface.Tidy" should emit C -- stubs that initialize the SPT entries. backendSptIsDynamic :: Backend -> Bool backendSptIsDynamic (Named NCG) = False backendSptIsDynamic (Named LLVM) = False backendSptIsDynamic (Named ViaC) = False backendSptIsDynamic (Named JavaScript) = False backendSptIsDynamic (Named Interpreter) = True backendSptIsDynamic (Named NoBackend) = False -- | If this flag is unset, then the driver ignores the flag @-fbreak-points@, -- since backends other than the interpreter tend to panic on breakpoints. backendSupportsBreakpoints :: Backend -> Bool backendSupportsBreakpoints = \case Named NCG -> False Named LLVM -> False Named ViaC -> False Named JavaScript -> False Named Interpreter -> True Named NoBackend -> False -- | If this flag is set, then the driver forces the -- optimization level to 0, issuing a warning message if -- the command line requested a higher optimization level. backendForcesOptimization0 :: Backend -> Bool backendForcesOptimization0 (Named NCG) = False backendForcesOptimization0 (Named LLVM) = False backendForcesOptimization0 (Named ViaC) = False backendForcesOptimization0 (Named JavaScript) = False backendForcesOptimization0 (Named Interpreter) = True backendForcesOptimization0 (Named NoBackend) = False -- | I don't understand exactly how this works. But if -- this flag is set *and* another condition is met, then -- @ghc/Main.hs@ will alter the `DynFlags` so that all the -- `hostFullWays` are asked for. It is set only for the interpreter. backendNeedsFullWays :: Backend -> Bool backendNeedsFullWays (Named NCG) = False backendNeedsFullWays (Named LLVM) = False backendNeedsFullWays (Named ViaC) = False backendNeedsFullWays (Named JavaScript) = False backendNeedsFullWays (Named Interpreter) = True backendNeedsFullWays (Named NoBackend) = False -- | This flag is also special for the interpreter: if a -- message about a module needs to be shown, do we know -- anything special about where the module came from? The -- Boolean argument is a `recomp` flag. backendSpecialModuleSource :: Backend -> Bool -> Maybe String backendSpecialModuleSource (Named NCG) = const Nothing backendSpecialModuleSource (Named LLVM) = const Nothing backendSpecialModuleSource (Named ViaC) = const Nothing backendSpecialModuleSource (Named JavaScript) = const Nothing backendSpecialModuleSource (Named Interpreter) = \b -> if b then Just "interpreted" else Nothing backendSpecialModuleSource (Named NoBackend) = const (Just "nothing") -- | This flag says whether the back end supports Haskell -- Program Coverage (HPC). If not, the compiler driver -- will ignore the `-fhpc` option (and will issue a -- warning message if it is used). backendSupportsHpc :: Backend -> Bool backendSupportsHpc (Named NCG) = True backendSupportsHpc (Named LLVM) = True backendSupportsHpc (Named ViaC) = True backendSupportsHpc (Named JavaScript) = False backendSupportsHpc (Named Interpreter) = False backendSupportsHpc (Named NoBackend) = True -- | This flag says whether the back end supports foreign -- import of C functions. ("Supports" means "does not -- barf on," so @-fno-code@ supports foreign C imports.) backendSupportsCImport :: Backend -> Bool backendSupportsCImport (Named NCG) = True backendSupportsCImport (Named LLVM) = True backendSupportsCImport (Named ViaC) = True backendSupportsCImport (Named JavaScript) = True backendSupportsCImport (Named Interpreter) = True backendSupportsCImport (Named NoBackend) = True -- | This flag says whether the back end supports foreign -- export of Haskell functions to C. backendSupportsCExport :: Backend -> Bool backendSupportsCExport (Named NCG) = True backendSupportsCExport (Named LLVM) = True backendSupportsCExport (Named ViaC) = True backendSupportsCExport (Named JavaScript) = True backendSupportsCExport (Named Interpreter) = False backendSupportsCExport (Named NoBackend) = True -- | When using this back end, it may be necessary or -- advisable to pass some `-D` options to a C compiler. -- This (defunctionalized) function produces those -- options, if any. An IO action may be necessary in -- order to interrogate external tools about what version -- they are, for example. -- -- The function's type is -- @ -- Logger -> DynFlags -> IO [String] -- @ -- -- This field is usually defaulted. backendCDefs :: Backend -> DefunctionalizedCDefs backendCDefs (Named NCG) = NoCDefs backendCDefs (Named LLVM) = LlvmCDefs backendCDefs (Named ViaC) = NoCDefs backendCDefs (Named JavaScript) = NoCDefs backendCDefs (Named Interpreter) = NoCDefs backendCDefs (Named NoBackend) = NoCDefs -- | This (defunctionalized) function generates code and -- writes it to a file. The type of the function is -- -- > Logger -- > -> DynFlags -- > -> Module -- ^ module being compiled -- > -> ModLocation -- > -> FilePath -- ^ Where to write output -- > -> Set UnitId -- ^ dependencies -- > -> Stream IO RawCmmGroup a -- results from `StgToCmm` -- > -> IO a backendCodeOutput :: Backend -> DefunctionalizedCodeOutput backendCodeOutput (Named NCG) = NcgCodeOutput backendCodeOutput (Named LLVM) = LlvmCodeOutput backendCodeOutput (Named ViaC) = ViaCCodeOutput backendCodeOutput (Named JavaScript) = JSCodeOutput backendCodeOutput (Named Interpreter) = panic "backendCodeOutput: interpreterBackend" backendCodeOutput (Named NoBackend) = panic "backendCodeOutput: noBackend" backendUseJSLinker :: Backend -> Bool backendUseJSLinker (Named NCG) = False backendUseJSLinker (Named LLVM) = False backendUseJSLinker (Named ViaC) = False backendUseJSLinker (Named JavaScript) = True backendUseJSLinker (Named Interpreter) = False backendUseJSLinker (Named NoBackend) = False -- | This (defunctionalized) function tells the compiler -- driver what else has to be run after code output. -- The type of the function is -- -- > -- > TPipelineClass TPhase m -- > => PipeEnv -- > -> HscEnv -- > -> Maybe ModLocation -- > -> FilePath -- > -> m (Maybe FilePath) backendPostHscPipeline :: Backend -> DefunctionalizedPostHscPipeline backendPostHscPipeline (Named NCG) = NcgPostHscPipeline backendPostHscPipeline (Named LLVM) = LlvmPostHscPipeline backendPostHscPipeline (Named ViaC) = ViaCPostHscPipeline backendPostHscPipeline (Named JavaScript) = JSPostHscPipeline backendPostHscPipeline (Named Interpreter) = NoPostHscPipeline backendPostHscPipeline (Named NoBackend) = NoPostHscPipeline -- | Somewhere in the compiler driver, when compiling -- Haskell source (as opposed to a boot file or a sig -- file), it needs to know what to do with the code that -- the `backendCodeOutput` writes to a file. This `Phase` -- value gives instructions like "run the C compiler", -- "run the assembler," or "run the LLVM Optimizer." backendNormalSuccessorPhase :: Backend -> Phase backendNormalSuccessorPhase (Named NCG) = As False backendNormalSuccessorPhase (Named LLVM) = LlvmOpt backendNormalSuccessorPhase (Named ViaC) = HCc backendNormalSuccessorPhase (Named JavaScript) = StopLn backendNormalSuccessorPhase (Named Interpreter) = StopLn backendNormalSuccessorPhase (Named NoBackend) = StopLn -- | Name of the back end, if any. Used to migrate legacy -- clients of the GHC API. Code within the GHC source -- tree should not refer to a back end's name. backendName :: Backend -> BackendName backendName (Named NCG) = NCG backendName (Named LLVM) = LLVM backendName (Named ViaC) = ViaC backendName (Named JavaScript) = JavaScript backendName (Named Interpreter) = Interpreter backendName (Named NoBackend) = NoBackend -- | A list of all back ends. They are ordered as we wish them to -- appear when they are enumerated in error messages. allBackends :: [Backend] allBackends = [ ncgBackend , llvmBackend , viaCBackend , jsBackend , interpreterBackend , noBackend ] -- | When foreign C import or export is invalid, the carried value -- enumerates the /valid/ back ends. backendValidityOfCImport, backendValidityOfCExport :: Backend -> Validity' [Backend] backendValidityOfCImport backend = if backendSupportsCImport backend then IsValid else NotValid $ filter backendSupportsCImport allBackends backendValidityOfCExport backend = if backendSupportsCExport backend then IsValid else NotValid $ filter backendSupportsCExport allBackends {- Note [Backend Defunctionalization] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ I had hoped to include code-output and post-hsc-pipeline functions directly in the `Backend` record itself. But this agenda was derailed by mutual recursion in the types: - A `DynFlags` record contains a back end of type `Backend`. - A `Backend` contains a code-output function. - A code-output function takes Cmm as input. - Cmm can include a `CLabel`. - A `CLabel` can have elements that are defined in `GHC.Driver.Session`, where `DynFlags` is defined. There is also a nasty issue in the values: a typical post-backend pipeline function both depends on and is depended upon by functions in "GHC.Driver.Pipeline". I'm cut the Gordian not by removing the function types from the `Backend` record. Instead, a function is represented by its /name/. This representation is an example of an old trick called /defunctionalization/, which has been used in both compilers and interpreters for languages with first-class, nested functions. Here, a function's name is a value of an algebraic data type. For example, a code-output function is represented by a value of this type: data DefunctionalizedCodeOutput = NcgCodeOutput | ViaCCodeOutput | LlvmCodeOutput Such a function may be applied in one of two ways: - In this particular example, a `case` expression in module "GHC.Driver.CodeOutput" discriminates on the value and calls the designated function. - In another example, a function of type `DefunctionalizedCDefs` is applied by calling function `applyCDefs`, which has this type: @ applyCDefs :: DefunctionalizedCDefs -> Logger -> DynFlags -> IO [String] @ Function `applyCDefs` is defined in module "GHC.SysTools.Cpp". I don't love this solution, but defunctionalization is a standard thing, and it makes the meanings of the enumeration values clear. Anyone defining a new back end will need to extend both the `DefunctionalizedCodeOutput` type and the corresponding apply function. -} ghc-lib-parser-9.12.2.20250421/compiler/GHC/Driver/Backend/0000755000000000000000000000000007346545000020470 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Driver/Backend/Internal.hs0000644000000000000000000000171407346545000022603 0ustar0000000000000000{-| Module : GHC.Driver.Backend.Internal Description : Interface for migrating legacy clients of the GHC API In versions of GHC up through 9.2, a `Backend` was represented only by its name. This module is meant to aid clients written against the GHC API, versions 9.2 and older. The module provides an alternative way to name any back end found in GHC 9.2. /Code within the GHC source tree should not import this module./ (#20927). Only back ends found in version 9.2 have names. -} module GHC.Driver.Backend.Internal ( -- * Name of a back end BackendName(..) ) where import GHC.Prelude data BackendName = NCG -- ^ Names the native code generator backend. | LLVM -- ^ Names the LLVM backend. | ViaC -- ^ Names the Via-C backend. | JavaScript -- ^ Names the JS backend. | Interpreter -- ^ Names the ByteCode interpreter. | NoBackend -- ^ Names the `-fno-code` backend. deriving (Eq, Show) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Driver/Backpack/0000755000000000000000000000000007346545000020640 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Driver/Backpack/Syntax.hs0000644000000000000000000000516507346545000022471 0ustar0000000000000000-- | This is the syntax for bkp files which are parsed in 'ghc --backpack' -- mode. This syntax is used purely for testing purposes. module GHC.Driver.Backpack.Syntax ( -- * Backpack abstract syntax HsUnitId(..), LHsUnitId, HsModuleSubst, LHsModuleSubst, HsModuleId(..), LHsModuleId, HsComponentId(..), LHsUnit, HsUnit(..), LHsUnitDecl, HsUnitDecl(..), IncludeDecl(..), LRenaming, Renaming(..), ) where import GHC.Prelude import GHC.Hs import GHC.Types.SrcLoc import GHC.Types.SourceFile import GHC.Unit.Types import GHC.Unit.Info import GHC.Utils.Outputable {- ************************************************************************ * * User syntax * * ************************************************************************ -} data HsComponentId = HsComponentId { hsPackageName :: PackageName, hsComponentId :: UnitId } instance Outputable HsComponentId where ppr (HsComponentId _pn cid) = ppr cid -- todo debug with pn data HsUnitId n = HsUnitId (Located n) [LHsModuleSubst n] type LHsUnitId n = Located (HsUnitId n) type HsModuleSubst n = (Located ModuleName, LHsModuleId n) type LHsModuleSubst n = Located (HsModuleSubst n) data HsModuleId n = HsModuleVar (Located ModuleName) | HsModuleId (LHsUnitId n) (Located ModuleName) type LHsModuleId n = Located (HsModuleId n) -- | Top level @unit@ declaration in a Backpack file. data HsUnit n = HsUnit { hsunitName :: Located n, hsunitBody :: [LHsUnitDecl n] } type LHsUnit n = Located (HsUnit n) -- | A declaration in a package, e.g. a module or signature definition, -- or an include. data HsUnitDecl n = DeclD HscSource (Located ModuleName) (Located (HsModule GhcPs)) | IncludeD (IncludeDecl n) type LHsUnitDecl n = Located (HsUnitDecl n) -- | An include of another unit data IncludeDecl n = IncludeDecl { idUnitId :: LHsUnitId n, idModRenaming :: Maybe [ LRenaming ], -- | Is this a @dependency signature@ include? If so, -- we don't compile this include when we instantiate this -- unit (as there should not be any modules brought into -- scope.) idSignatureInclude :: Bool } -- | Rename a module from one name to another. The identity renaming -- means that the module should be brought into scope. data Renaming = Renaming { renameFrom :: Located ModuleName , renameTo :: Maybe (Located ModuleName) } type LRenaming = Located Renaming ghc-lib-parser-9.12.2.20250421/compiler/GHC/Driver/CmdLine.hs0000644000000000000000000003161707346545000021020 0ustar0000000000000000{-# LANGUAGE DeriveFunctor #-} {-# LANGUAGE RankNTypes #-} ------------------------------------------------------------------------------- -- -- | Command-line parser -- -- This is an abstract command-line parser used by DynFlags. -- -- (c) The University of Glasgow 2005 -- ------------------------------------------------------------------------------- module GHC.Driver.CmdLine ( processArgs, parseResponseFile, OptKind(..), GhcFlagMode(..), Flag(..), defFlag, defGhcFlag, defGhciFlag, defHiddenFlag, hoistFlag, errorsToGhcException, Err(..), Warn, warnsToMessages, EwM, runEwM, addErr, addWarn, addFlagWarn, getArg, getCurLoc, liftEwM ) where import GHC.Prelude import GHC.Utils.Misc import GHC.Utils.Panic import GHC.Data.Bag import GHC.Types.SrcLoc import GHC.Types.Error import GHC.Utils.Error import GHC.Driver.Errors.Types import GHC.Driver.Errors.Ppr () -- instance Diagnostic DriverMessage import GHC.Utils.Outputable (text) import Data.Function import Data.List (sortBy, intercalate, stripPrefix) import Data.Word import GHC.ResponseFile import Control.Exception (IOException, catch) import Control.Monad (ap) import Control.Monad.IO.Class -------------------------------------------------------- -- The Flag and OptKind types -------------------------------------------------------- data Flag m = Flag { flagName :: String, -- Flag, without the leading "-" flagOptKind :: OptKind m, -- What to do if we see it flagGhcMode :: GhcFlagMode -- Which modes this flag affects } defFlag :: String -> OptKind m -> Flag m defFlag name optKind = Flag name optKind AllModes defGhcFlag :: String -> OptKind m -> Flag m defGhcFlag name optKind = Flag name optKind OnlyGhc defGhciFlag :: String -> OptKind m -> Flag m defGhciFlag name optKind = Flag name optKind OnlyGhci defHiddenFlag :: String -> OptKind m -> Flag m defHiddenFlag name optKind = Flag name optKind HiddenFlag hoistFlag :: forall m n. (forall a. m a -> n a) -> Flag m -> Flag n hoistFlag f (Flag a b c) = Flag a (go b) c where go (NoArg k) = NoArg (go2 k) go (HasArg k) = HasArg (\s -> go2 (k s)) go (SepArg k) = SepArg (\s -> go2 (k s)) go (Prefix k) = Prefix (\s -> go2 (k s)) go (OptPrefix k) = OptPrefix (\s -> go2 (k s)) go (OptIntSuffix k) = OptIntSuffix (\n -> go2 (k n)) go (IntSuffix k) = IntSuffix (\n -> go2 (k n)) go (Word64Suffix k) = Word64Suffix (\s -> go2 (k s)) go (FloatSuffix k) = FloatSuffix (\s -> go2 (k s)) go (PassFlag k) = PassFlag (\s -> go2 (k s)) go (AnySuffix k) = AnySuffix (\s -> go2 (k s)) go2 :: EwM m a -> EwM n a go2 (EwM g) = EwM $ \loc es ws -> f (g loc es ws) -- | GHC flag modes describing when a flag has an effect. data GhcFlagMode = OnlyGhc -- ^ The flag only affects the non-interactive GHC | OnlyGhci -- ^ The flag only affects the interactive GHC | AllModes -- ^ The flag affects multiple ghc modes | HiddenFlag -- ^ This flag should not be seen in cli completion data OptKind m -- Suppose the flag is -f = NoArg (EwM m ()) -- -f all by itself | HasArg (String -> EwM m ()) -- -farg or -f arg | SepArg (String -> EwM m ()) -- -f arg | Prefix (String -> EwM m ()) -- -farg | OptPrefix (String -> EwM m ()) -- -f or -farg (i.e. the arg is optional) | OptIntSuffix (Maybe Int -> EwM m ()) -- -f or -f=n; pass n to fn | IntSuffix (Int -> EwM m ()) -- -f or -f=n; pass n to fn | Word64Suffix (Word64 -> EwM m ()) -- -f or -f=n; pass n to fn | FloatSuffix (Float -> EwM m ()) -- -f or -f=n; pass n to fn | PassFlag (String -> EwM m ()) -- -f; pass "-f" fn | AnySuffix (String -> EwM m ()) -- -f or -farg; pass entire "-farg" to fn -------------------------------------------------------- -- The EwM monad -------------------------------------------------------- -- | A command-line error message newtype Err = Err { errMsg :: Located String } -- | A command-line warning message and the reason it arose -- -- This used to be own type, but now it's just @'MsgEnvelope' 'DriverMessage'@. type Warn = Located DriverMessage type Errs = Bag Err type Warns = [Warn] -- EwM ("errors and warnings monad") is a monad -- transformer for m that adds an (err, warn) state newtype EwM m a = EwM { unEwM :: Located String -- Current parse arg -> Errs -> Warns -> m (Errs, Warns, a) } deriving (Functor) instance Monad m => Applicative (EwM m) where pure v = EwM (\_ e w -> return (e, w, v)) (<*>) = ap instance Monad m => Monad (EwM m) where (EwM f) >>= k = EwM (\l e w -> do (e', w', r) <- f l e w unEwM (k r) l e' w') instance MonadIO m => MonadIO (EwM m) where liftIO = liftEwM . liftIO runEwM :: EwM m a -> m (Errs, Warns, a) runEwM action = unEwM action (panic "processArgs: no arg yet") emptyBag mempty setArg :: Located String -> EwM m () -> EwM m () setArg l (EwM f) = EwM (\_ es ws -> f l es ws) addErr :: Monad m => String -> EwM m () addErr e = EwM (\(L loc _) es ws -> return (es `snocBag` Err (L loc e), ws, ())) addWarn :: Monad m => String -> EwM m () addWarn msg = addFlagWarn $ DriverUnknownMessage $ mkSimpleUnknownDiagnostic $ mkPlainDiagnostic WarningWithoutFlag noHints $ text msg addFlagWarn :: Monad m => DriverMessage -> EwM m () addFlagWarn msg = EwM (\(L loc _) es ws -> return (es, L loc msg : ws, ())) getArg :: Monad m => EwM m String getArg = EwM (\(L _ arg) es ws -> return (es, ws, arg)) getCurLoc :: Monad m => EwM m SrcSpan getCurLoc = EwM (\(L loc _) es ws -> return (es, ws, loc)) liftEwM :: Monad m => m a -> EwM m a liftEwM action = EwM (\_ es ws -> do { r <- action; return (es, ws, r) }) warnsToMessages :: DiagOpts -> [Warn] -> Messages DriverMessage warnsToMessages diag_opts = foldr (\(L loc w) ws -> addMessage (mkPlainMsgEnvelope diag_opts loc w) ws) emptyMessages -------------------------------------------------------- -- Processing arguments -------------------------------------------------------- processArgs :: Monad m => [Flag m] -- ^ cmdline parser spec -> [Located String] -- ^ args -> (FilePath -> EwM m [Located String]) -- ^ response file handler -> m ( [Located String], -- spare args [Err], -- errors Warns ) -- warnings processArgs spec args handleRespFile = do (errs, warns, spare) <- runEwM action return (spare, bagToList errs, warns) where action = process args [] -- process :: [Located String] -> [Located String] -> EwM m [Located String] process [] spare = return (reverse spare) process (L _ ('@' : resp_file) : args) spare = do resp_args <- handleRespFile resp_file process (resp_args ++ args) spare process (locArg@(L _ ('-' : arg)) : args) spare = case findArg spec arg of Just (rest, opt_kind) -> case processOneArg opt_kind rest arg args of Left err -> let b = process args spare in (setArg locArg $ addErr err) >> b Right (action,rest) -> let b = process rest spare in (setArg locArg $ action) >> b Nothing -> process args (locArg : spare) process (arg : args) spare = process args (arg : spare) processOneArg :: OptKind m -> String -> String -> [Located String] -> Either String (EwM m (), [Located String]) processOneArg opt_kind rest arg args = let dash_arg = '-' : arg rest_no_eq = dropEq rest in case opt_kind of NoArg a -> assert (null rest) Right (a, args) HasArg f | notNull rest_no_eq -> Right (f rest_no_eq, args) | otherwise -> case args of [] -> missingArgErr dash_arg (L _ arg1:args1) -> Right (f arg1, args1) -- See #9776 SepArg f -> case args of [] -> missingArgErr dash_arg (L _ arg1:args1) -> Right (f arg1, args1) -- See #12625 Prefix f | notNull rest_no_eq -> Right (f rest_no_eq, args) | otherwise -> missingArgErr dash_arg PassFlag f | notNull rest -> unknownFlagErr dash_arg | otherwise -> Right (f dash_arg, args) OptIntSuffix f | null rest -> Right (f Nothing, args) | Just n <- parseInt rest_no_eq -> Right (f (Just n), args) | otherwise -> Left ("malformed integer argument in " ++ dash_arg) IntSuffix f | Just n <- parseInt rest_no_eq -> Right (f n, args) | otherwise -> Left ("malformed integer argument in " ++ dash_arg) Word64Suffix f | Just n <- parseWord64 rest_no_eq -> Right (f n, args) | otherwise -> Left ("malformed natural argument in " ++ dash_arg) FloatSuffix f | Just n <- parseFloat rest_no_eq -> Right (f n, args) | otherwise -> Left ("malformed float argument in " ++ dash_arg) OptPrefix f -> Right (f rest_no_eq, args) AnySuffix f -> Right (f dash_arg, args) findArg :: [Flag m] -> String -> Maybe (String, OptKind m) findArg spec arg = case sortBy (compare `on` (length . fst)) -- prefer longest matching flag [ (removeSpaces rest, optKind) | flag <- spec, let optKind = flagOptKind flag, Just rest <- [stripPrefix (flagName flag) arg], arg_ok optKind rest arg ] of [] -> Nothing (one:_) -> Just one arg_ok :: OptKind t -> [Char] -> String -> Bool arg_ok (NoArg _) rest _ = null rest arg_ok (HasArg _) _ _ = True arg_ok (SepArg _) rest _ = null rest arg_ok (Prefix _) _ _ = True -- Missing argument checked for in processOneArg t -- to improve error message (#12625) arg_ok (OptIntSuffix _) _ _ = True arg_ok (IntSuffix _) _ _ = True arg_ok (Word64Suffix _) _ _ = True arg_ok (FloatSuffix _) _ _ = True arg_ok (OptPrefix _) _ _ = True arg_ok (PassFlag _) rest _ = null rest arg_ok (AnySuffix _) _ _ = True -- | Parse an Int -- -- Looks for "433" or "=342", with no trailing gubbins -- * n or =n => Just n -- * gibberish => Nothing parseInt :: String -> Maybe Int parseInt s = case reads s of ((n,""):_) -> Just n _ -> Nothing parseWord64 :: String -> Maybe Word64 parseWord64 s = case reads s of ((n,""):_) -> Just n _ -> Nothing parseFloat :: String -> Maybe Float parseFloat s = case reads s of ((n,""):_) -> Just n _ -> Nothing -- | Discards a leading equals sign dropEq :: String -> String dropEq ('=' : s) = s dropEq s = s unknownFlagErr :: String -> Either String a unknownFlagErr f = Left ("unrecognised flag: " ++ f) missingArgErr :: String -> Either String a missingArgErr f = Left ("missing argument for flag: " ++ f) -------------------------------------------------------- -- Utils -------------------------------------------------------- -- | Parse a response file into arguments. parseResponseFile :: MonadIO m => FilePath -> EwM m [Located String] parseResponseFile path = do res <- liftIO $ fmap Right (readFile path) `catch` \(e :: IOException) -> pure (Left e) case res of Left _err -> addErr "Could not open response file" >> return [] Right resp_file -> return $ map (mkGeneralLocated path) (unescapeArgs resp_file) -- See Note [Handling errors when parsing command-line flags] errorsToGhcException :: [(String, -- Location String)] -- Error -> GhcException errorsToGhcException errs = UsageError $ intercalate "\n" $ [ l ++ ": " ++ e | (l, e) <- errs ] {- Note [Handling errors when parsing command-line flags] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Parsing of static and mode flags happens before any session is started, i.e., before the first call to 'GHC.withGhc'. Therefore, to report errors for invalid usage of these two types of flags, we can not call any function that needs DynFlags, as there are no DynFlags available yet (unsafeGlobalDynFlags is not set either). So we always print "on the commandline" as the location, which is true except for Api users, which is probably ok. When reporting errors for invalid usage of dynamic flags we /can/ make use of DynFlags, and we do so explicitly in DynFlags.parseDynamicFlagsFull. Before, we called unsafeGlobalDynFlags when an invalid (combination of) flag(s) was given on the commandline, resulting in panics (#9963). -} ghc-lib-parser-9.12.2.20250421/compiler/GHC/Driver/Config.hs0000644000000000000000000000211207346545000020676 0ustar0000000000000000-- | Subsystem configuration module GHC.Driver.Config ( initOptCoercionOpts , initSimpleOpts , initEvalOpts ) where import GHC.Prelude import GHC.Driver.DynFlags import GHC.Core.SimpleOpt import GHC.Core.Coercion.Opt import GHCi.Message (EvalOpts(..)) -- | Initialise coercion optimiser configuration from DynFlags initOptCoercionOpts :: DynFlags -> OptCoercionOpts initOptCoercionOpts dflags = OptCoercionOpts { optCoercionEnabled = not (hasNoOptCoercion dflags) } -- | Initialise Simple optimiser configuration from DynFlags initSimpleOpts :: DynFlags -> SimpleOpts initSimpleOpts dflags = SimpleOpts { so_uf_opts = unfoldingOpts dflags , so_co_opts = initOptCoercionOpts dflags , so_eta_red = gopt Opt_DoEtaReduction dflags } -- | Extract GHCi options from DynFlags and step initEvalOpts :: DynFlags -> Bool -> EvalOpts initEvalOpts dflags step = EvalOpts { useSandboxThread = gopt Opt_GhciSandbox dflags , singleStep = step , breakOnException = gopt Opt_BreakOnException dflags , breakOnError = gopt Opt_BreakOnError dflags } ghc-lib-parser-9.12.2.20250421/compiler/GHC/Driver/Config/Core/0000755000000000000000000000000007346545000021236 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Driver/Config/Core/Lint.hs0000644000000000000000000001620307346545000022502 0ustar0000000000000000module GHC.Driver.Config.Core.Lint ( endPass , endPassHscEnvIO , lintCoreBindings , initEndPassConfig , initLintPassResultConfig , initLintConfig ) where import GHC.Prelude import qualified GHC.LanguageExtensions as LangExt import GHC.Driver.Env import GHC.Driver.DynFlags import GHC.Driver.Config.Diagnostic import GHC.Core import GHC.Core.Lint import GHC.Core.Lint.Interactive import GHC.Core.Opt.Pipeline.Types import GHC.Core.Opt.Simplify ( SimplifyOpts(..) ) import GHC.Core.Opt.Simplify.Env ( SimplMode(..) ) import GHC.Core.Opt.Monad import GHC.Core.Coercion import GHC.Types.Basic ( CompilerPhase(..) ) import GHC.Utils.Outputable as Outputable {- These functions are not CoreM monad stuff, but they probably ought to be, and it makes a convenient place for them. They print out stuff before and after core passes, and do Core Lint when necessary. -} endPass :: CoreToDo -> CoreProgram -> [CoreRule] -> CoreM () endPass pass binds rules = do { hsc_env <- getHscEnv ; name_ppr_ctx <- getNamePprCtx ; liftIO $ endPassHscEnvIO hsc_env name_ppr_ctx pass binds rules } endPassHscEnvIO :: HscEnv -> NamePprCtx -> CoreToDo -> CoreProgram -> [CoreRule] -> IO () endPassHscEnvIO hsc_env name_ppr_ctx pass binds rules = do { let dflags = hsc_dflags hsc_env ; endPassIO (hsc_logger hsc_env) (initEndPassConfig dflags (interactiveInScope $ hsc_IC hsc_env) name_ppr_ctx pass) binds rules } -- | Type-check a 'CoreProgram'. See Note [Core Lint guarantee]. lintCoreBindings :: DynFlags -> CoreToDo -> [Var] -> CoreProgram -> WarnsAndErrs lintCoreBindings dflags coreToDo vars -- binds = lintCoreBindings' $ LintConfig { l_diagOpts = initDiagOpts dflags , l_platform = targetPlatform dflags , l_flags = perPassFlags dflags coreToDo , l_vars = vars } initEndPassConfig :: DynFlags -> [Var] -> NamePprCtx -> CoreToDo -> EndPassConfig initEndPassConfig dflags extra_vars name_ppr_ctx pass = EndPassConfig { ep_dumpCoreSizes = not (gopt Opt_SuppressCoreSizes dflags) , ep_lintPassResult = if gopt Opt_DoCoreLinting dflags then Just $ initLintPassResultConfig dflags extra_vars pass else Nothing , ep_namePprCtx = name_ppr_ctx , ep_dumpFlag = coreDumpFlag pass , ep_prettyPass = ppr pass , ep_passDetails = pprPassDetails pass } coreDumpFlag :: CoreToDo -> Maybe DumpFlag coreDumpFlag (CoreDoSimplify {}) = Just Opt_D_verbose_core2core coreDumpFlag (CoreDoPluginPass {}) = Just Opt_D_verbose_core2core coreDumpFlag CoreDoFloatInwards = Just Opt_D_dump_float_in coreDumpFlag (CoreDoFloatOutwards {}) = Just Opt_D_dump_float_out coreDumpFlag CoreLiberateCase = Just Opt_D_dump_liberate_case coreDumpFlag CoreDoStaticArgs = Just Opt_D_dump_static_argument_transformation coreDumpFlag CoreDoCallArity = Just Opt_D_dump_call_arity coreDumpFlag CoreDoExitify = Just Opt_D_dump_exitify coreDumpFlag (CoreDoDemand {}) = Just Opt_D_dump_dmdanal coreDumpFlag CoreDoCpr = Just Opt_D_dump_cpranal coreDumpFlag CoreDoWorkerWrapper = Just Opt_D_dump_worker_wrapper coreDumpFlag CoreDoSpecialising = Just Opt_D_dump_spec coreDumpFlag CoreDoSpecConstr = Just Opt_D_dump_spec_constr coreDumpFlag CoreCSE = Just Opt_D_dump_cse coreDumpFlag CoreDesugar = Just Opt_D_dump_ds_preopt coreDumpFlag CoreDesugarOpt = Just Opt_D_dump_ds coreDumpFlag CoreTidy = Just Opt_D_dump_simpl coreDumpFlag CorePrep = Just Opt_D_dump_prep coreDumpFlag CoreAddCallerCcs = Nothing coreDumpFlag CoreAddLateCcs = Nothing coreDumpFlag CoreDoPrintCore = Nothing coreDumpFlag (CoreDoRuleCheck {}) = Nothing coreDumpFlag CoreDoNothing = Nothing coreDumpFlag (CoreDoPasses {}) = Nothing initLintPassResultConfig :: DynFlags -> [Var] -> CoreToDo -> LintPassResultConfig initLintPassResultConfig dflags extra_vars pass = LintPassResultConfig { lpr_diagOpts = initDiagOpts dflags , lpr_platform = targetPlatform dflags , lpr_makeLintFlags = perPassFlags dflags pass , lpr_showLintWarnings = showLintWarnings pass , lpr_passPpr = ppr pass , lpr_localsInScope = extra_vars } showLintWarnings :: CoreToDo -> Bool -- Disable Lint warnings on the first simplifier pass, because -- there may be some INLINE knots still tied, which is tiresomely noisy showLintWarnings (CoreDoSimplify cfg) = case sm_phase (so_mode cfg) of InitialPhase -> False _ -> True showLintWarnings _ = True perPassFlags :: DynFlags -> CoreToDo -> LintFlags perPassFlags dflags pass = (defaultLintFlags dflags) { lf_check_global_ids = check_globals , lf_check_inline_loop_breakers = check_lbs , lf_check_static_ptrs = check_static_ptrs , lf_check_linearity = check_linearity , lf_check_fixed_rep = check_fixed_rep } where -- In the output of the desugarer, before optimisation, -- we have eta-expanded data constructors with representation-polymorphic -- bindings; so we switch off the representation-polymorphism checks. -- The very simple optimiser will beta-reduce them away. -- See Note [Representation-polymorphism checking built-ins] in GHC.Tc.Utils.Concrete check_fixed_rep = case pass of CoreDesugar -> False _ -> True -- See Note [Checking for global Ids] check_globals = case pass of CoreTidy -> False CorePrep -> False _ -> True -- See Note [Checking for INLINE loop breakers] check_lbs = case pass of CoreDesugar -> False CoreDesugarOpt -> False _ -> True -- See Note [Checking StaticPtrs] check_static_ptrs | not (xopt LangExt.StaticPointers dflags) = AllowAnywhere | otherwise = case pass of CoreDoFloatOutwards _ -> AllowAtTopLevel CoreTidy -> RejectEverywhere CorePrep -> AllowAtTopLevel _ -> AllowAnywhere -- See Note [Linting linearity] check_linearity = gopt Opt_DoLinearCoreLinting dflags || ( case pass of CoreDesugar -> True _ -> False) initLintConfig :: DynFlags -> [Var] -> LintConfig initLintConfig dflags vars =LintConfig { l_diagOpts = initDiagOpts dflags , l_platform = targetPlatform dflags , l_flags = defaultLintFlags dflags , l_vars = vars } defaultLintFlags :: DynFlags -> LintFlags defaultLintFlags dflags = LF { lf_check_global_ids = False , lf_check_inline_loop_breakers = True , lf_check_static_ptrs = AllowAnywhere , lf_check_linearity = gopt Opt_DoLinearCoreLinting dflags , lf_report_unsat_syns = True , lf_check_fixed_rep = True } ghc-lib-parser-9.12.2.20250421/compiler/GHC/Driver/Config/0000755000000000000000000000000007346545000020346 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Driver/Config/Diagnostic.hs0000644000000000000000000000532607346545000022774 0ustar0000000000000000 -- | Functions for initialising error message printing configuration from the -- GHC session flags. module GHC.Driver.Config.Diagnostic ( initDiagOpts , initPrintConfig , initPsMessageOpts , initDsMessageOpts , initTcMessageOpts , initDriverMessageOpts , initIfaceMessageOpts ) where import GHC.Driver.Flags import GHC.Driver.DynFlags import GHC.Prelude import GHC.Utils.Outputable import GHC.Utils.Error (DiagOpts (..)) import GHC.Driver.Errors.Types (GhcMessage, GhcMessageOpts (..), PsMessage, DriverMessage, DriverMessageOpts (..), checkBuildingCabalPackage) import GHC.Driver.Errors.Ppr () -- Diagnostic instances import GHC.Tc.Errors.Types import GHC.HsToCore.Errors.Types import GHC.Types.Error import GHC.Iface.Errors.Types -- | Initialise the general configuration for printing diagnostic messages -- For example, this configuration controls things like whether warnings are -- treated like errors. initDiagOpts :: DynFlags -> DiagOpts initDiagOpts dflags = DiagOpts { diag_warning_flags = warningFlags dflags , diag_fatal_warning_flags = fatalWarningFlags dflags , diag_custom_warning_categories = customWarningCategories dflags , diag_fatal_custom_warning_categories = fatalCustomWarningCategories dflags , diag_warn_is_error = gopt Opt_WarnIsError dflags , diag_reverse_errors = reverseErrors dflags , diag_max_errors = maxErrors dflags , diag_ppr_ctx = initSDocContext dflags defaultErrStyle } -- | Initialise the configuration for printing specific diagnostic messages initPrintConfig :: DynFlags -> DiagnosticOpts GhcMessage initPrintConfig dflags = GhcMessageOpts { psMessageOpts = initPsMessageOpts dflags , tcMessageOpts = initTcMessageOpts dflags , dsMessageOpts = initDsMessageOpts dflags , driverMessageOpts= initDriverMessageOpts dflags } initPsMessageOpts :: DynFlags -> DiagnosticOpts PsMessage initPsMessageOpts _ = NoDiagnosticOpts initTcMessageOpts :: DynFlags -> DiagnosticOpts TcRnMessage initTcMessageOpts dflags = TcRnMessageOpts { tcOptsShowContext = gopt Opt_ShowErrorContext dflags , tcOptsIfaceOpts = initIfaceMessageOpts dflags } initDsMessageOpts :: DynFlags -> DiagnosticOpts DsMessage initDsMessageOpts _ = NoDiagnosticOpts initIfaceMessageOpts :: DynFlags -> DiagnosticOpts IfaceMessage initIfaceMessageOpts dflags = IfaceMessageOpts { ifaceShowTriedFiles = verbosity dflags >= 3 , ifaceBuildingCabalPackage = checkBuildingCabalPackage dflags } initDriverMessageOpts :: DynFlags -> DiagnosticOpts DriverMessage initDriverMessageOpts dflags = DriverMessageOpts (initPsMessageOpts dflags) (initIfaceMessageOpts dflags) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Driver/Config/Logger.hs0000644000000000000000000000222407346545000022121 0ustar0000000000000000module GHC.Driver.Config.Logger ( initLogFlags ) where import GHC.Prelude import GHC.Driver.DynFlags import GHC.Utils.Logger (LogFlags (..)) import GHC.Utils.Outputable -- | Initialize LogFlags from DynFlags initLogFlags :: DynFlags -> LogFlags initLogFlags dflags = LogFlags { log_default_user_context = initSDocContext dflags defaultUserStyle , log_default_dump_context = initSDocContext dflags defaultDumpStyle , log_dump_flags = dumpFlags dflags , log_show_caret = gopt Opt_DiagnosticsShowCaret dflags , log_diagnostics_as_json = gopt Opt_DiagnosticsAsJSON dflags , log_show_warn_groups = gopt Opt_ShowWarnGroups dflags , log_enable_timestamps = not (gopt Opt_SuppressTimestamps dflags) , log_dump_to_file = gopt Opt_DumpToFile dflags , log_dump_dir = dumpDir dflags , log_dump_prefix = dumpPrefix dflags , log_dump_prefix_override = dumpPrefixForce dflags , log_with_ways = gopt Opt_DumpWithWays dflags , log_enable_debug = not (hasNoDebugOutput dflags) , log_verbosity = verbosity dflags , log_ways = Just $ ways dflags } ghc-lib-parser-9.12.2.20250421/compiler/GHC/Driver/Config/Parser.hs0000644000000000000000000000110407346545000022132 0ustar0000000000000000module GHC.Driver.Config.Parser ( initParserOpts ) where import GHC.Prelude import GHC.Platform import GHC.Driver.Session import GHC.Driver.Config.Diagnostic import GHC.Parser.Lexer -- | Extracts the flags needed for parsing initParserOpts :: DynFlags -> ParserOpts initParserOpts = mkParserOpts <$> extensionFlags <*> initDiagOpts <*> (supportedLanguagesAndExtensions . platformArchOS . targetPlatform) <*> safeImportsOn <*> gopt Opt_Haddock <*> gopt Opt_KeepRawTokenStream <*> const True -- use LINE/COLUMN to update the internal location ghc-lib-parser-9.12.2.20250421/compiler/GHC/Driver/DynFlags.hs0000644000000000000000000017315107346545000021214 0ustar0000000000000000{-# LANGUAGE LambdaCase #-} module GHC.Driver.DynFlags ( -- * Dynamic flags and associated configuration types DumpFlag(..), GeneralFlag(..), WarningFlag(..), DiagnosticReason(..), Language(..), FatalMessager, FlushOut(..), ProfAuto(..), hasPprDebug, hasNoDebugOutput, hasNoStateHack, hasNoOptCoercion, dopt, dopt_set, dopt_unset, gopt, gopt_set, gopt_unset, wopt, wopt_set, wopt_unset, wopt_fatal, wopt_set_fatal, wopt_unset_fatal, wopt_set_all_custom, wopt_unset_all_custom, wopt_set_all_fatal_custom, wopt_unset_all_fatal_custom, wopt_set_custom, wopt_unset_custom, wopt_set_fatal_custom, wopt_unset_fatal_custom, wopt_any_custom, xopt, xopt_set, xopt_unset, xopt_set_unlessExplSpec, xopt_DuplicateRecordFields, xopt_FieldSelectors, lang_set, DynamicTooState(..), dynamicTooState, setDynamicNow, OnOff(..), DynFlags(..), ParMakeCount(..), ways, HasDynFlags(..), ContainsDynFlags(..), RtsOptsEnabled(..), GhcMode(..), isOneShot, GhcLink(..), isNoLink, PackageFlag(..), PackageArg(..), ModRenaming(..), packageFlagsChanged, IgnorePackageFlag(..), TrustFlag(..), PackageDBFlag(..), PkgDbRef(..), Option(..), showOpt, DynLibLoader(..), positionIndependent, optimisationFlags, targetProfile, ReexportedModule(..), -- ** Manipulating DynFlags defaultDynFlags, -- Settings -> DynFlags initDynFlags, -- DynFlags -> IO DynFlags defaultFatalMessager, defaultFlushOut, optLevelFlags, languageExtensions, TurnOnFlag, turnOn, turnOff, -- ** System tool settings and locations programName, projectVersion, ghcUsagePath, ghciUsagePath, topDir, toolDir, versionedAppDir, versionedFilePath, extraGccViaCFlags, globalPackageDatabasePath, -- * Include specifications IncludeSpecs(..), addGlobalInclude, addQuoteInclude, flattenIncludes, addImplicitQuoteInclude, -- * SDoc initSDocContext, initDefaultSDocContext, initPromotionTickContext, -- * Platform features isSse4_1Enabled, isSse4_2Enabled, isAvxEnabled, isAvx2Enabled, isAvx512cdEnabled, isAvx512erEnabled, isAvx512fEnabled, isAvx512pfEnabled, isFmaEnabled, isBmiEnabled, isBmi2Enabled ) where import GHC.Prelude import GHC.Platform import GHC.Platform.Ways import GHC.Platform.Profile import GHC.CmmToAsm.CFG.Weight import GHC.Core.Unfold import GHC.Data.Bool import GHC.Data.EnumSet (EnumSet) import GHC.Data.Maybe import GHC.Builtin.Names ( mAIN_NAME ) import GHC.Driver.Backend import GHC.Driver.Flags import GHC.Driver.Phases ( Phase(..), phaseInputExt ) import GHC.Driver.Plugins.External import GHC.Settings import GHC.Settings.Constants import GHC.Types.Basic ( IntWithInf, treatZeroAsInf ) import GHC.Types.Error (DiagnosticReason(..)) import GHC.Types.ProfAuto import GHC.Types.SafeHaskell import GHC.Types.SrcLoc import GHC.Unit.Module import GHC.Unit.Module.Warnings import GHC.Utils.CliOption import GHC.SysTools.Terminal ( stderrSupportsAnsiColors ) import GHC.UniqueSubdir (uniqueSubdir) import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Utils.TmpFs import qualified GHC.Types.FieldLabel as FieldLabel import qualified GHC.Utils.Ppr.Colour as Col import qualified GHC.Data.EnumSet as EnumSet import GHC.Core.Opt.CallerCC.Types import Control.Monad (msum, (<=<)) import Control.Monad.Trans.Class (lift) import Control.Monad.Trans.Except (ExceptT) import Control.Monad.Trans.Reader (ReaderT) import Control.Monad.Trans.Writer (WriterT) import Data.Word import System.IO import System.IO.Error (catchIOError) import System.Environment (lookupEnv) import System.FilePath (normalise, ()) import System.Directory import GHC.Foreign (withCString, peekCString) import qualified Data.Set as Set import qualified GHC.LanguageExtensions as LangExt -- ----------------------------------------------------------------------------- -- DynFlags -- | Contains not only a collection of 'GeneralFlag's but also a plethora of -- information relating to the compilation of a single file or GHC session data DynFlags = DynFlags { ghcMode :: GhcMode, ghcLink :: GhcLink, backend :: !Backend, -- ^ The backend to use (if any). -- -- Whenever you change the backend, also make sure to set 'ghcLink' to -- something sensible. -- -- 'NoBackend' can be used to avoid generating any output, however, note that: -- -- * If a program uses Template Haskell the typechecker may need to run code -- from an imported module. To facilitate this, code generation is enabled -- for modules imported by modules that use template haskell, using the -- default backend for the platform. -- See Note [-fno-code mode]. -- formerly Settings ghcNameVersion :: {-# UNPACK #-} !GhcNameVersion, fileSettings :: {-# UNPACK #-} !FileSettings, targetPlatform :: Platform, -- Filled in by SysTools toolSettings :: {-# UNPACK #-} !ToolSettings, platformMisc :: {-# UNPACK #-} !PlatformMisc, rawSettings :: [(String, String)], tmpDir :: TempDir, llvmOptLevel :: Int, -- ^ LLVM optimisation level verbosity :: Int, -- ^ Verbosity level: see Note [Verbosity levels] debugLevel :: Int, -- ^ How much debug information to produce simplPhases :: Int, -- ^ Number of simplifier phases maxSimplIterations :: Int, -- ^ Max simplifier iterations ruleCheck :: Maybe String, strictnessBefore :: [Int], -- ^ Additional demand analysis parMakeCount :: Maybe ParMakeCount, -- ^ The number of modules to compile in parallel -- If unspecified, compile with a single job. enableTimeStats :: Bool, -- ^ Enable RTS timing statistics? ghcHeapSize :: Maybe Int, -- ^ The heap size to set. maxRelevantBinds :: Maybe Int, -- ^ Maximum number of bindings from the type envt -- to show in type error messages maxValidHoleFits :: Maybe Int, -- ^ Maximum number of hole fits to show -- in typed hole error messages maxRefHoleFits :: Maybe Int, -- ^ Maximum number of refinement hole -- fits to show in typed hole error -- messages refLevelHoleFits :: Maybe Int, -- ^ Maximum level of refinement for -- refinement hole fits in typed hole -- error messages maxUncoveredPatterns :: Int, -- ^ Maximum number of unmatched patterns to show -- in non-exhaustiveness warnings maxPmCheckModels :: Int, -- ^ Soft limit on the number of models -- the pattern match checker checks -- a pattern against. A safe guard -- against exponential blow-up. simplTickFactor :: Int, -- ^ Multiplier for simplifier ticks dmdUnboxWidth :: !Int, -- ^ Whether DmdAnal should optimistically put an -- Unboxed demand on returned products with at most -- this number of fields ifCompression :: Int, specConstrThreshold :: Maybe Int, -- ^ Threshold for SpecConstr specConstrCount :: Maybe Int, -- ^ Max number of specialisations for any one function specConstrRecursive :: Int, -- ^ Max number of specialisations for recursive types -- Not optional; otherwise ForceSpecConstr can diverge. binBlobThreshold :: Maybe Word, -- ^ Binary literals (e.g. strings) whose size is above -- this threshold will be dumped in a binary file -- by the assembler code generator. 0 and Nothing disables -- this feature. See 'GHC.StgToCmm.Config'. liberateCaseThreshold :: Maybe Int, -- ^ Threshold for LiberateCase floatLamArgs :: Maybe Int, -- ^ Arg count for lambda floating -- See 'GHC.Core.Opt.Monad.FloatOutSwitches' liftLamsRecArgs :: Maybe Int, -- ^ Maximum number of arguments after lambda lifting a -- recursive function. liftLamsNonRecArgs :: Maybe Int, -- ^ Maximum number of arguments after lambda lifting a -- non-recursive function. liftLamsKnown :: Bool, -- ^ Lambda lift even when this turns a known call -- into an unknown call. cmmProcAlignment :: Maybe Int, -- ^ Align Cmm functions at this boundary or use default. historySize :: Int, -- ^ Simplification history size importPaths :: [FilePath], mainModuleNameIs :: ModuleName, mainFunIs :: Maybe String, reductionDepth :: IntWithInf, -- ^ Typechecker maximum stack depth solverIterations :: IntWithInf, -- ^ Number of iterations in the constraints solver -- Typically only 1 is needed givensFuel :: Int, -- ^ Number of layers of superclass expansion for givens -- Should be < solverIterations -- See Note [Expanding Recursive Superclasses and ExpansionFuel] wantedsFuel :: Int, -- ^ Number of layers of superclass expansion for wanteds -- Should be < givensFuel -- See Note [Expanding Recursive Superclasses and ExpansionFuel] qcsFuel :: Int, -- ^ Number of layers of superclass expansion for quantified constraints -- Should be < givensFuel -- See Note [Expanding Recursive Superclasses and ExpansionFuel] homeUnitId_ :: UnitId, -- ^ Target home unit-id homeUnitInstanceOf_ :: Maybe UnitId, -- ^ Id of the unit to instantiate homeUnitInstantiations_ :: [(ModuleName, Module)], -- ^ Module instantiations -- Note [Filepaths and Multiple Home Units] workingDirectory :: Maybe FilePath, thisPackageName :: Maybe String, -- ^ What the package is called, use with multiple home units hiddenModules :: Set.Set ModuleName, reexportedModules :: [ReexportedModule], -- ways targetWays_ :: Ways, -- ^ Target way flags from the command line -- For object splitting splitInfo :: Maybe (String,Int), -- paths etc. objectDir :: Maybe String, dylibInstallName :: Maybe String, hiDir :: Maybe String, hieDir :: Maybe String, stubDir :: Maybe String, dumpDir :: Maybe String, objectSuf_ :: String, hcSuf :: String, hiSuf_ :: String, hieSuf :: String, dynObjectSuf_ :: String, dynHiSuf_ :: String, outputFile_ :: Maybe String, dynOutputFile_ :: Maybe String, outputHi :: Maybe String, dynOutputHi :: Maybe String, dynLibLoader :: DynLibLoader, dynamicNow :: !Bool, -- ^ Indicate if we are now generating dynamic output -- because of -dynamic-too. This predicate is -- used to query the appropriate fields -- (outputFile/dynOutputFile, ways, etc.) -- | This defaults to 'non-module'. It can be set by -- 'GHC.Driver.Pipeline.setDumpPrefix' or 'ghc.GHCi.UI.runStmt' based on -- where its output is going. dumpPrefix :: FilePath, -- | Override the 'dumpPrefix' set by 'GHC.Driver.Pipeline.setDumpPrefix' -- or 'ghc.GHCi.UI.runStmt'. -- Set by @-ddump-file-prefix@ dumpPrefixForce :: Maybe FilePath, ldInputs :: [Option], includePaths :: IncludeSpecs, libraryPaths :: [String], frameworkPaths :: [String], -- used on darwin only cmdlineFrameworks :: [String], -- ditto rtsOpts :: Maybe String, rtsOptsEnabled :: RtsOptsEnabled, rtsOptsSuggestions :: Bool, hpcDir :: String, -- ^ Path to store the .mix files -- Plugins pluginModNames :: [ModuleName], -- ^ the @-fplugin@ flags given on the command line, in *reverse* -- order that they're specified on the command line. pluginModNameOpts :: [(ModuleName,String)], frontendPluginOpts :: [String], -- ^ the @-ffrontend-opt@ flags given on the command line, in *reverse* -- order that they're specified on the command line. externalPluginSpecs :: [ExternalPluginSpec], -- ^ External plugins loaded from shared libraries -- For ghc -M depMakefile :: FilePath, depIncludePkgDeps :: Bool, depIncludeCppDeps :: Bool, depExcludeMods :: [ModuleName], depSuffixes :: [String], -- Package flags packageDBFlags :: [PackageDBFlag], -- ^ The @-package-db@ flags given on the command line, In -- *reverse* order that they're specified on the command line. -- This is intended to be applied with the list of "initial" -- package databases derived from @GHC_PACKAGE_PATH@; see -- 'getUnitDbRefs'. ignorePackageFlags :: [IgnorePackageFlag], -- ^ The @-ignore-package@ flags from the command line. -- In *reverse* order that they're specified on the command line. packageFlags :: [PackageFlag], -- ^ The @-package@ and @-hide-package@ flags from the command-line. -- In *reverse* order that they're specified on the command line. pluginPackageFlags :: [PackageFlag], -- ^ The @-plugin-package-id@ flags from command line. -- In *reverse* order that they're specified on the command line. trustFlags :: [TrustFlag], -- ^ The @-trust@ and @-distrust@ flags. -- In *reverse* order that they're specified on the command line. packageEnv :: Maybe FilePath, -- ^ Filepath to the package environment file (if overriding default) -- hsc dynamic flags dumpFlags :: EnumSet DumpFlag, generalFlags :: EnumSet GeneralFlag, warningFlags :: EnumSet WarningFlag, fatalWarningFlags :: EnumSet WarningFlag, customWarningCategories :: WarningCategorySet, -- See Note [Warning categories] fatalCustomWarningCategories :: WarningCategorySet, -- in GHC.Unit.Module.Warnings -- Don't change this without updating extensionFlags: language :: Maybe Language, -- | Safe Haskell mode safeHaskell :: SafeHaskellMode, safeInfer :: Bool, safeInferred :: Bool, -- We store the location of where some extension and flags were turned on so -- we can produce accurate error messages when Safe Haskell fails due to -- them. thOnLoc :: SrcSpan, newDerivOnLoc :: SrcSpan, deriveViaOnLoc :: SrcSpan, overlapInstLoc :: SrcSpan, incoherentOnLoc :: SrcSpan, pkgTrustOnLoc :: SrcSpan, warnSafeOnLoc :: SrcSpan, warnUnsafeOnLoc :: SrcSpan, trustworthyOnLoc :: SrcSpan, -- Don't change this without updating extensionFlags: -- Here we collect the settings of the language extensions -- from the command line, the ghci config file and -- from interactive :set / :seti commands. extensions :: [OnOff LangExt.Extension], -- extensionFlags should always be equal to -- flattenExtensionFlags language extensions -- LangExt.Extension is defined in libraries/ghc-boot so that it can be used -- by template-haskell extensionFlags :: EnumSet LangExt.Extension, -- | Unfolding control -- See Note [Discounts and thresholds] in GHC.Core.Unfold unfoldingOpts :: !UnfoldingOpts, maxWorkerArgs :: Int, maxForcedSpecArgs :: Int, ghciHistSize :: Int, flushOut :: FlushOut, ghcVersionFile :: Maybe FilePath, haddockOptions :: Maybe String, -- | GHCi scripts specified by -ghci-script, in reverse order ghciScripts :: [String], -- Output style options pprUserLength :: Int, pprCols :: Int, useUnicode :: Bool, useColor :: OverridingBool, canUseColor :: Bool, useErrorLinks :: OverridingBool, canUseErrorLinks :: Bool, colScheme :: Col.Scheme, -- | what kind of {-# SCC #-} to add automatically profAuto :: ProfAuto, callerCcFilters :: [CallerCcFilter], interactivePrint :: Maybe String, -- | Machine dependent flags (-m\ stuff) sseVersion :: Maybe SseVersion, bmiVersion :: Maybe BmiVersion, avx :: Bool, avx2 :: Bool, avx512cd :: Bool, -- Enable AVX-512 Conflict Detection Instructions. avx512er :: Bool, -- Enable AVX-512 Exponential and Reciprocal Instructions. avx512f :: Bool, -- Enable AVX-512 instructions. avx512pf :: Bool, -- Enable AVX-512 PreFetch Instructions. fma :: Bool, -- ^ Enable FMA instructions. -- Constants used to control the amount of optimization done. -- | Max size, in bytes, of inline array allocations. maxInlineAllocSize :: Int, -- | Only inline memcpy if it generates no more than this many -- pseudo (roughly: Cmm) instructions. maxInlineMemcpyInsns :: Int, -- | Only inline memset if it generates no more than this many -- pseudo (roughly: Cmm) instructions. maxInlineMemsetInsns :: Int, -- | Reverse the order of error messages in GHC/GHCi reverseErrors :: Bool, -- | Limit the maximum number of errors to show maxErrors :: Maybe Int, -- | Unique supply configuration for testing build determinism initialUnique :: Word64, uniqueIncrement :: Int, -- 'Int' because it can be used to test uniques in decreasing order. -- | Temporary: CFG Edge weights for fast iterations cfgWeights :: Weights } class HasDynFlags m where getDynFlags :: m DynFlags {- It would be desirable to have the more generalised instance (MonadTrans t, Monad m, HasDynFlags m) => HasDynFlags (t m) where getDynFlags = lift getDynFlags instance definition. However, that definition would overlap with the `HasDynFlags (GhcT m)` instance. Instead we define instances for a couple of common Monad transformers explicitly. -} instance (Monoid a, Monad m, HasDynFlags m) => HasDynFlags (WriterT a m) where getDynFlags = lift getDynFlags instance (Monad m, HasDynFlags m) => HasDynFlags (ReaderT a m) where getDynFlags = lift getDynFlags instance (Monad m, HasDynFlags m) => HasDynFlags (MaybeT m) where getDynFlags = lift getDynFlags instance (Monad m, HasDynFlags m) => HasDynFlags (ExceptT e m) where getDynFlags = lift getDynFlags class ContainsDynFlags t where extractDynFlags :: t -> DynFlags ----------------------------------------------------------------------------- -- | Used by 'GHC.runGhc' to partially initialize a new 'DynFlags' value initDynFlags :: DynFlags -> IO DynFlags initDynFlags dflags = do let -- This is not bulletproof: we test that 'localeEncoding' is Unicode-capable, -- but potentially 'hGetEncoding' 'stdout' might be different. Still good enough. canUseUnicode <- do let enc = localeEncoding str = "‘’" (withCString enc str $ \cstr -> do str' <- peekCString enc cstr return (str == str')) `catchIOError` \_ -> return False ghcNoUnicodeEnv <- lookupEnv "GHC_NO_UNICODE" let useUnicode' = isNothing ghcNoUnicodeEnv && canUseUnicode maybeGhcColorsEnv <- lookupEnv "GHC_COLORS" maybeGhcColoursEnv <- lookupEnv "GHC_COLOURS" let adjustCols (Just env) = Col.parseScheme env adjustCols Nothing = id let (useColor', colScheme') = (adjustCols maybeGhcColoursEnv . adjustCols maybeGhcColorsEnv) (useColor dflags, colScheme dflags) tmp_dir <- normalise <$> getTemporaryDirectory return dflags{ useUnicode = useUnicode', useColor = useColor', canUseColor = stderrSupportsAnsiColors, -- if the terminal supports color, we assume it supports links as well canUseErrorLinks = stderrSupportsAnsiColors, colScheme = colScheme', tmpDir = TempDir tmp_dir } -- | The normal 'DynFlags'. Note that they are not suitable for use in this form -- and must be fully initialized by 'GHC.runGhc' first. defaultDynFlags :: Settings -> DynFlags defaultDynFlags mySettings = -- See Note [Updating flag description in the User's Guide] DynFlags { ghcMode = CompManager, ghcLink = LinkBinary, backend = platformDefaultBackend (sTargetPlatform mySettings), verbosity = 0, debugLevel = 0, simplPhases = 2, maxSimplIterations = 4, ruleCheck = Nothing, binBlobThreshold = Just 500000, -- 500K is a good default (see #16190) maxRelevantBinds = Just 6, maxValidHoleFits = Just 6, maxRefHoleFits = Just 6, refLevelHoleFits = Nothing, maxUncoveredPatterns = 4, maxPmCheckModels = 30, simplTickFactor = 100, dmdUnboxWidth = 3, -- Default: Assume an unboxed demand on function bodies returning a triple ifCompression = 2, -- Default: Apply safe compressions specConstrThreshold = Just 2000, specConstrCount = Just 3, specConstrRecursive = 3, liberateCaseThreshold = Just 2000, floatLamArgs = Just 0, -- Default: float only if no fvs liftLamsRecArgs = Just 5, -- Default: the number of available argument hardware registers on x86_64 liftLamsNonRecArgs = Just 5, -- Default: the number of available argument hardware registers on x86_64 liftLamsKnown = False, -- Default: don't turn known calls into unknown ones cmmProcAlignment = Nothing, historySize = 20, strictnessBefore = [], parMakeCount = Nothing, enableTimeStats = False, ghcHeapSize = Nothing, importPaths = ["."], mainModuleNameIs = mAIN_NAME, mainFunIs = Nothing, reductionDepth = treatZeroAsInf mAX_REDUCTION_DEPTH, solverIterations = treatZeroAsInf mAX_SOLVER_ITERATIONS, givensFuel = mAX_GIVENS_FUEL, wantedsFuel = mAX_WANTEDS_FUEL, qcsFuel = mAX_QC_FUEL, homeUnitId_ = mainUnitId, homeUnitInstanceOf_ = Nothing, homeUnitInstantiations_ = [], workingDirectory = Nothing, thisPackageName = Nothing, hiddenModules = Set.empty, reexportedModules = [], objectDir = Nothing, dylibInstallName = Nothing, hiDir = Nothing, hieDir = Nothing, stubDir = Nothing, dumpDir = Nothing, objectSuf_ = phaseInputExt StopLn, hcSuf = phaseInputExt HCc, hiSuf_ = "hi", hieSuf = "hie", dynObjectSuf_ = "dyn_" ++ phaseInputExt StopLn, dynHiSuf_ = "dyn_hi", dynamicNow = False, pluginModNames = [], pluginModNameOpts = [], frontendPluginOpts = [], externalPluginSpecs = [], outputFile_ = Nothing, dynOutputFile_ = Nothing, outputHi = Nothing, dynOutputHi = Nothing, dynLibLoader = SystemDependent, dumpPrefix = "non-module.", dumpPrefixForce = Nothing, ldInputs = [], includePaths = IncludeSpecs [] [] [], libraryPaths = [], frameworkPaths = [], cmdlineFrameworks = [], rtsOpts = Nothing, rtsOptsEnabled = RtsOptsSafeOnly, rtsOptsSuggestions = True, hpcDir = ".hpc", packageDBFlags = [], packageFlags = [], pluginPackageFlags = [], ignorePackageFlags = [], trustFlags = [], packageEnv = Nothing, targetWays_ = Set.empty, splitInfo = Nothing, ghcNameVersion = sGhcNameVersion mySettings, fileSettings = sFileSettings mySettings, toolSettings = sToolSettings mySettings, targetPlatform = sTargetPlatform mySettings, platformMisc = sPlatformMisc mySettings, rawSettings = sRawSettings mySettings, tmpDir = panic "defaultDynFlags: uninitialized tmpDir", llvmOptLevel = 0, -- ghc -M values depMakefile = "Makefile", depIncludePkgDeps = False, depIncludeCppDeps = False, depExcludeMods = [], depSuffixes = [], -- end of ghc -M values ghcVersionFile = Nothing, haddockOptions = Nothing, dumpFlags = EnumSet.empty, generalFlags = EnumSet.fromList (defaultFlags mySettings), warningFlags = EnumSet.fromList standardWarnings, fatalWarningFlags = EnumSet.empty, customWarningCategories = completeWarningCategorySet, fatalCustomWarningCategories = emptyWarningCategorySet, ghciScripts = [], language = Nothing, safeHaskell = Sf_None, safeInfer = True, safeInferred = True, thOnLoc = noSrcSpan, newDerivOnLoc = noSrcSpan, deriveViaOnLoc = noSrcSpan, overlapInstLoc = noSrcSpan, incoherentOnLoc = noSrcSpan, pkgTrustOnLoc = noSrcSpan, warnSafeOnLoc = noSrcSpan, warnUnsafeOnLoc = noSrcSpan, trustworthyOnLoc = noSrcSpan, extensions = [], extensionFlags = flattenExtensionFlags Nothing [], unfoldingOpts = defaultUnfoldingOpts, maxWorkerArgs = 10, maxForcedSpecArgs = 333, -- 333 is fairly arbitrary, see Note [Forcing specialisation]:FS5 ghciHistSize = 50, -- keep a log of length 50 by default flushOut = defaultFlushOut, pprUserLength = 5, pprCols = 100, useUnicode = False, useColor = Auto, canUseColor = False, useErrorLinks = Auto, canUseErrorLinks = False, colScheme = Col.defaultScheme, profAuto = NoProfAuto, callerCcFilters = [], interactivePrint = Nothing, sseVersion = Nothing, bmiVersion = Nothing, avx = False, avx2 = False, avx512cd = False, avx512er = False, avx512f = False, avx512pf = False, -- Use FMA by default on AArch64 fma = (platformArch . sTargetPlatform $ mySettings) == ArchAArch64, maxInlineAllocSize = 128, maxInlineMemcpyInsns = 32, maxInlineMemsetInsns = 32, initialUnique = 0, uniqueIncrement = 1, reverseErrors = False, maxErrors = Nothing, cfgWeights = defaultWeights } type FatalMessager = String -> IO () defaultFatalMessager :: FatalMessager defaultFatalMessager = hPutStrLn stderr newtype FlushOut = FlushOut (IO ()) defaultFlushOut :: FlushOut defaultFlushOut = FlushOut $ hFlush stdout data OnOff a = On a | Off a deriving (Eq, Show) instance Outputable a => Outputable (OnOff a) where ppr (On x) = text "On" <+> ppr x ppr (Off x) = text "Off" <+> ppr x -- OnOffs accumulate in reverse order, so we use foldr in order to -- process them in the right order flattenExtensionFlags :: Maybe Language -> [OnOff LangExt.Extension] -> EnumSet LangExt.Extension flattenExtensionFlags ml = foldr g defaultExtensionFlags where g (On f) flags = EnumSet.insert f flags g (Off f) flags = EnumSet.delete f flags defaultExtensionFlags = EnumSet.fromList (languageExtensions ml) -- ----------------------------------------------------------------------------- -- -jN -- | The type for the -jN argument, specifying that -j on its own represents -- using the number of machine processors. data ParMakeCount -- | Use this many processors (@-j@ flag). = ParMakeThisMany Int -- | Use parallelism with as many processors as possible (@-j@ flag without an argument). | ParMakeNumProcessors -- | Use the specific semaphore @@ to control parallelism (@-jsem @ flag). | ParMakeSemaphore FilePath -- | The 'GhcMode' tells us whether we're doing multi-module -- compilation (controlled via the "GHC" API) or one-shot -- (single-module) compilation. This makes a difference primarily to -- the "GHC.Unit.Finder": in one-shot mode we look for interface files for -- imported modules, but in multi-module mode we look for source files -- in order to check whether they need to be recompiled. data GhcMode = CompManager -- ^ @\-\-make@, GHCi, etc. | OneShot -- ^ @ghc -c Foo.hs@ | MkDepend -- ^ @ghc -M@, see "GHC.Unit.Finder" for why we need this deriving Eq instance Outputable GhcMode where ppr CompManager = text "CompManager" ppr OneShot = text "OneShot" ppr MkDepend = text "MkDepend" isOneShot :: GhcMode -> Bool isOneShot OneShot = True isOneShot _other = False -- | What to do in the link step, if there is one. data GhcLink = NoLink -- ^ Don't link at all | LinkBinary -- ^ Link object code into a binary | LinkInMemory -- ^ Use the in-memory dynamic linker (works for both -- bytecode and object code). | LinkDynLib -- ^ Link objects into a dynamic lib (DLL on Windows, DSO on ELF platforms) | LinkStaticLib -- ^ Link objects into a static lib | LinkMergedObj -- ^ Link objects into a merged "GHCi object" deriving (Eq, Show) isNoLink :: GhcLink -> Bool isNoLink NoLink = True isNoLink _ = False -- | We accept flags which make packages visible, but how they select -- the package varies; this data type reflects what selection criterion -- is used. data PackageArg = PackageArg String -- ^ @-package@, by 'PackageName' | UnitIdArg Unit -- ^ @-package-id@, by 'Unit' deriving (Eq, Show) instance Outputable PackageArg where ppr (PackageArg pn) = text "package" <+> text pn ppr (UnitIdArg uid) = text "unit" <+> ppr uid -- | Represents the renaming that may be associated with an exposed -- package, e.g. the @rns@ part of @-package "foo (rns)"@. -- -- Here are some example parsings of the package flags (where -- a string literal is punned to be a 'ModuleName': -- -- * @-package foo@ is @ModRenaming True []@ -- * @-package foo ()@ is @ModRenaming False []@ -- * @-package foo (A)@ is @ModRenaming False [("A", "A")]@ -- * @-package foo (A as B)@ is @ModRenaming False [("A", "B")]@ -- * @-package foo with (A as B)@ is @ModRenaming True [("A", "B")]@ data ModRenaming = ModRenaming { modRenamingWithImplicit :: Bool, -- ^ Bring all exposed modules into scope? modRenamings :: [(ModuleName, ModuleName)] -- ^ Bring module @m@ into scope -- under name @n@. } deriving (Eq) instance Outputable ModRenaming where ppr (ModRenaming b rns) = ppr b <+> parens (ppr rns) -- | Flags for manipulating the set of non-broken packages. newtype IgnorePackageFlag = IgnorePackage String -- ^ @-ignore-package@ deriving (Eq) -- | Flags for manipulating package trust. data TrustFlag = TrustPackage String -- ^ @-trust@ | DistrustPackage String -- ^ @-distrust@ deriving (Eq) -- | Flags for manipulating packages visibility. data PackageFlag = ExposePackage String PackageArg ModRenaming -- ^ @-package@, @-package-id@ | HidePackage String -- ^ @-hide-package@ deriving (Eq) -- NB: equality instance is used by packageFlagsChanged data PackageDBFlag = PackageDB PkgDbRef | NoUserPackageDB | NoGlobalPackageDB | ClearPackageDBs deriving (Eq) packageFlagsChanged :: DynFlags -> DynFlags -> Bool packageFlagsChanged idflags1 idflags0 = packageFlags idflags1 /= packageFlags idflags0 || ignorePackageFlags idflags1 /= ignorePackageFlags idflags0 || pluginPackageFlags idflags1 /= pluginPackageFlags idflags0 || trustFlags idflags1 /= trustFlags idflags0 || packageDBFlags idflags1 /= packageDBFlags idflags0 || packageGFlags idflags1 /= packageGFlags idflags0 where packageGFlags dflags = map (`gopt` dflags) [ Opt_HideAllPackages , Opt_HideAllPluginPackages , Opt_AutoLinkPackages ] instance Outputable PackageFlag where ppr (ExposePackage n arg rn) = text n <> braces (ppr arg <+> ppr rn) ppr (HidePackage str) = text "-hide-package" <+> text str data DynLibLoader = Deployable | SystemDependent deriving Eq data RtsOptsEnabled = RtsOptsNone | RtsOptsIgnore | RtsOptsIgnoreAll | RtsOptsSafeOnly | RtsOptsAll deriving (Show) -- | Are we building with @-fPIE@ or @-fPIC@ enabled? positionIndependent :: DynFlags -> Bool positionIndependent dflags = gopt Opt_PIC dflags || gopt Opt_PIE dflags -- Note [-dynamic-too business] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- -- With -dynamic-too flag, we try to build both the non-dynamic and dynamic -- objects in a single run of the compiler: the pipeline is the same down to -- Core optimisation, then the backend (from Core to object code) is executed -- twice. -- -- The implementation is currently rather hacky, for example, we don't clearly separate non-dynamic -- and dynamic loaded interfaces (#9176). -- -- To make matters worse, we automatically enable -dynamic-too when some modules -- need Template-Haskell and GHC is dynamically linked (cf -- GHC.Driver.Pipeline.compileOne'). -- -- We used to try and fall back from a dynamic-too failure but this feature -- didn't work as expected (#20446) so it was removed to simplify the -- implementation and not obscure latent bugs. data DynamicTooState = DT_Dont -- ^ Don't try to build dynamic objects too | DT_OK -- ^ Will still try to generate dynamic objects | DT_Dyn -- ^ Currently generating dynamic objects (in the backend) deriving (Eq,Show,Ord) dynamicTooState :: DynFlags -> DynamicTooState dynamicTooState dflags | not (gopt Opt_BuildDynamicToo dflags) = DT_Dont | dynamicNow dflags = DT_Dyn | otherwise = DT_OK setDynamicNow :: DynFlags -> DynFlags setDynamicNow dflags0 = dflags0 { dynamicNow = True } data PkgDbRef = GlobalPkgDb | UserPkgDb | PkgDbPath FilePath deriving Eq -- | Used to differentiate the scope an include needs to apply to. -- We have to split the include paths to avoid accidentally forcing recursive -- includes since -I overrides the system search paths. See #14312. data IncludeSpecs = IncludeSpecs { includePathsQuote :: [String] , includePathsGlobal :: [String] -- | See Note [Implicit include paths] , includePathsQuoteImplicit :: [String] } deriving Show -- | Append to the list of includes a path that shall be included using `-I` -- when the C compiler is called. These paths override system search paths. addGlobalInclude :: IncludeSpecs -> [String] -> IncludeSpecs addGlobalInclude spec paths = let f = includePathsGlobal spec in spec { includePathsGlobal = f ++ paths } -- | Append to the list of includes a path that shall be included using -- `-iquote` when the C compiler is called. These paths only apply when quoted -- includes are used. e.g. #include "foo.h" addQuoteInclude :: IncludeSpecs -> [String] -> IncludeSpecs addQuoteInclude spec paths = let f = includePathsQuote spec in spec { includePathsQuote = f ++ paths } -- | These includes are not considered while fingerprinting the flags for iface -- | See Note [Implicit include paths] addImplicitQuoteInclude :: IncludeSpecs -> [String] -> IncludeSpecs addImplicitQuoteInclude spec paths = let f = includePathsQuoteImplicit spec in spec { includePathsQuoteImplicit = f ++ paths } -- | Concatenate and flatten the list of global and quoted includes returning -- just a flat list of paths. flattenIncludes :: IncludeSpecs -> [String] flattenIncludes specs = includePathsQuote specs ++ includePathsQuoteImplicit specs ++ includePathsGlobal specs -- An argument to --reexported-module which can optionally specify a module renaming. data ReexportedModule = ReexportedModule { reexportFrom :: ModuleName , reexportTo :: ModuleName } instance Outputable ReexportedModule where ppr (ReexportedModule from to) = if from == to then ppr from else ppr from <+> text "as" <+> ppr to {- Note [Implicit include paths] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The compile driver adds the path to the folder containing the source file being compiled to the 'IncludeSpecs', and this change gets recorded in the 'DynFlags' that are used later to compute the interface file. Because of this, the flags fingerprint derived from these 'DynFlags' and recorded in the interface file will end up containing the absolute path to the source folder. Build systems with a remote cache like Bazel or Buck (or Shake, see #16956) store the build artifacts produced by a build BA for reuse in subsequent builds. Embedding source paths in interface fingerprints will thwart these attempts and lead to unnecessary recompilations when the source paths in BA differ from the source paths in subsequent builds. -} hasPprDebug :: DynFlags -> Bool hasPprDebug = dopt Opt_D_ppr_debug hasNoDebugOutput :: DynFlags -> Bool hasNoDebugOutput = dopt Opt_D_no_debug_output hasNoStateHack :: DynFlags -> Bool hasNoStateHack = gopt Opt_G_NoStateHack hasNoOptCoercion :: DynFlags -> Bool hasNoOptCoercion = gopt Opt_G_NoOptCoercion -- | Test whether a 'DumpFlag' is set dopt :: DumpFlag -> DynFlags -> Bool dopt = getDumpFlagFrom verbosity dumpFlags -- | Set a 'DumpFlag' dopt_set :: DynFlags -> DumpFlag -> DynFlags dopt_set dfs f = dfs{ dumpFlags = EnumSet.insert f (dumpFlags dfs) } -- | Unset a 'DumpFlag' dopt_unset :: DynFlags -> DumpFlag -> DynFlags dopt_unset dfs f = dfs{ dumpFlags = EnumSet.delete f (dumpFlags dfs) } -- | Test whether a 'GeneralFlag' is set -- -- Note that `dynamicNow` (i.e., dynamic objects built with `-dynamic-too`) -- always implicitly enables Opt_PIC, Opt_ExternalDynamicRefs, and disables -- Opt_SplitSections. -- gopt :: GeneralFlag -> DynFlags -> Bool gopt Opt_PIC dflags | dynamicNow dflags = True gopt Opt_ExternalDynamicRefs dflags | dynamicNow dflags = True gopt Opt_SplitSections dflags | dynamicNow dflags = False gopt f dflags = f `EnumSet.member` generalFlags dflags -- | Set a 'GeneralFlag' gopt_set :: DynFlags -> GeneralFlag -> DynFlags gopt_set dfs f = dfs{ generalFlags = EnumSet.insert f (generalFlags dfs) } -- | Unset a 'GeneralFlag' gopt_unset :: DynFlags -> GeneralFlag -> DynFlags gopt_unset dfs f = dfs{ generalFlags = EnumSet.delete f (generalFlags dfs) } -- | Test whether a 'WarningFlag' is set wopt :: WarningFlag -> DynFlags -> Bool wopt f dflags = f `EnumSet.member` warningFlags dflags -- | Set a 'WarningFlag' wopt_set :: DynFlags -> WarningFlag -> DynFlags wopt_set dfs f = dfs{ warningFlags = EnumSet.insert f (warningFlags dfs) } -- | Unset a 'WarningFlag' wopt_unset :: DynFlags -> WarningFlag -> DynFlags wopt_unset dfs f = dfs{ warningFlags = EnumSet.delete f (warningFlags dfs) } -- | Test whether a 'WarningFlag' is set as fatal wopt_fatal :: WarningFlag -> DynFlags -> Bool wopt_fatal f dflags = f `EnumSet.member` fatalWarningFlags dflags -- | Mark a 'WarningFlag' as fatal (do not set the flag) wopt_set_fatal :: DynFlags -> WarningFlag -> DynFlags wopt_set_fatal dfs f = dfs { fatalWarningFlags = EnumSet.insert f (fatalWarningFlags dfs) } -- | Mark a 'WarningFlag' as not fatal wopt_unset_fatal :: DynFlags -> WarningFlag -> DynFlags wopt_unset_fatal dfs f = dfs { fatalWarningFlags = EnumSet.delete f (fatalWarningFlags dfs) } -- | Enable all custom warning categories. wopt_set_all_custom :: DynFlags -> DynFlags wopt_set_all_custom dfs = dfs{ customWarningCategories = completeWarningCategorySet } -- | Disable all custom warning categories. wopt_unset_all_custom :: DynFlags -> DynFlags wopt_unset_all_custom dfs = dfs{ customWarningCategories = emptyWarningCategorySet } -- | Mark all custom warning categories as fatal (do not set the flags). wopt_set_all_fatal_custom :: DynFlags -> DynFlags wopt_set_all_fatal_custom dfs = dfs { fatalCustomWarningCategories = completeWarningCategorySet } -- | Mark all custom warning categories as non-fatal. wopt_unset_all_fatal_custom :: DynFlags -> DynFlags wopt_unset_all_fatal_custom dfs = dfs { fatalCustomWarningCategories = emptyWarningCategorySet } -- | Set a custom 'WarningCategory' wopt_set_custom :: DynFlags -> WarningCategory -> DynFlags wopt_set_custom dfs f = dfs{ customWarningCategories = insertWarningCategorySet f (customWarningCategories dfs) } -- | Unset a custom 'WarningCategory' wopt_unset_custom :: DynFlags -> WarningCategory -> DynFlags wopt_unset_custom dfs f = dfs{ customWarningCategories = deleteWarningCategorySet f (customWarningCategories dfs) } -- | Mark a custom 'WarningCategory' as fatal (do not set the flag) wopt_set_fatal_custom :: DynFlags -> WarningCategory -> DynFlags wopt_set_fatal_custom dfs f = dfs { fatalCustomWarningCategories = insertWarningCategorySet f (fatalCustomWarningCategories dfs) } -- | Mark a custom 'WarningCategory' as not fatal wopt_unset_fatal_custom :: DynFlags -> WarningCategory -> DynFlags wopt_unset_fatal_custom dfs f = dfs { fatalCustomWarningCategories = deleteWarningCategorySet f (fatalCustomWarningCategories dfs) } -- | Are there any custom warning categories enabled? wopt_any_custom :: DynFlags -> Bool wopt_any_custom dfs = not (nullWarningCategorySet (customWarningCategories dfs)) -- | Test whether a 'LangExt.Extension' is set xopt :: LangExt.Extension -> DynFlags -> Bool xopt f dflags = f `EnumSet.member` extensionFlags dflags -- | Set a 'LangExt.Extension' xopt_set :: DynFlags -> LangExt.Extension -> DynFlags xopt_set dfs f = let onoffs = On f : extensions dfs in dfs { extensions = onoffs, extensionFlags = flattenExtensionFlags (language dfs) onoffs } -- | Unset a 'LangExt.Extension' xopt_unset :: DynFlags -> LangExt.Extension -> DynFlags xopt_unset dfs f = let onoffs = Off f : extensions dfs in dfs { extensions = onoffs, extensionFlags = flattenExtensionFlags (language dfs) onoffs } -- | Set or unset a 'LangExt.Extension', unless it has been explicitly -- set or unset before. xopt_set_unlessExplSpec :: LangExt.Extension -> (DynFlags -> LangExt.Extension -> DynFlags) -> DynFlags -> DynFlags xopt_set_unlessExplSpec ext setUnset dflags = let referedExts = stripOnOff <$> extensions dflags stripOnOff (On x) = x stripOnOff (Off x) = x in if ext `elem` referedExts then dflags else setUnset dflags ext xopt_DuplicateRecordFields :: DynFlags -> FieldLabel.DuplicateRecordFields xopt_DuplicateRecordFields dfs | xopt LangExt.DuplicateRecordFields dfs = FieldLabel.DuplicateRecordFields | otherwise = FieldLabel.NoDuplicateRecordFields xopt_FieldSelectors :: DynFlags -> FieldLabel.FieldSelectors xopt_FieldSelectors dfs | xopt LangExt.FieldSelectors dfs = FieldLabel.FieldSelectors | otherwise = FieldLabel.NoFieldSelectors lang_set :: DynFlags -> Maybe Language -> DynFlags lang_set dflags lang = dflags { language = lang, extensionFlags = flattenExtensionFlags lang (extensions dflags) } defaultFlags :: Settings -> [GeneralFlag] defaultFlags settings -- See Note [Updating flag description in the User's Guide] = [ Opt_AutoLinkPackages, Opt_DiagnosticsShowCaret, Opt_EmbedManifest, Opt_FamAppCache, Opt_GenManifest, Opt_GhciHistory, Opt_GhciSandbox, Opt_HelpfulErrors, Opt_KeepHiFiles, Opt_KeepOFiles, Opt_OmitYields, Opt_PrintBindContents, Opt_ProfCountEntries, Opt_SharedImplib, Opt_SimplPreInlining, Opt_VersionMacros, Opt_RPath, Opt_DumpWithWays, Opt_CompactUnwind, Opt_ShowErrorContext, Opt_SuppressStgReps, Opt_UnoptimizedCoreForInterpreter, Opt_SpecialiseIncoherents ] ++ [f | (ns,f) <- optLevelFlags, 0 `elem` ns] -- The default -O0 options -- Default floating flags (see Note [RHS Floating]) ++ [ Opt_LocalFloatOut, Opt_LocalFloatOutTopLevel ] ++ default_PIC platform ++ validHoleFitDefaults where platform = sTargetPlatform settings -- | These are the default settings for the display and sorting of valid hole -- fits in typed-hole error messages. See Note [Valid hole fits include ...] -- in the "GHC.Tc.Errors.Hole" module. validHoleFitDefaults :: [GeneralFlag] validHoleFitDefaults = [ Opt_ShowTypeAppOfHoleFits , Opt_ShowTypeOfHoleFits , Opt_ShowProvOfHoleFits , Opt_ShowMatchesOfHoleFits , Opt_ShowValidHoleFits , Opt_SortValidHoleFits , Opt_SortBySizeHoleFits , Opt_ShowHoleConstraints ] -- Note [When is StarIsType enabled] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- The StarIsType extension determines whether to treat '*' as a regular type -- operator or as a synonym for 'Data.Kind.Type'. Many existing pre-TypeInType -- programs expect '*' to be synonymous with 'Type', so by default StarIsType is -- enabled. -- -- Programs that use TypeOperators might expect to repurpose '*' for -- multiplication or another binary operation, but making TypeOperators imply -- NoStarIsType caused too much breakage on Hackage. -- -- -- Note [Documenting optimisation flags] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- -- If you change the list of flags enabled for particular optimisation levels -- please remember to update the User's Guide. The relevant file is: -- -- docs/users_guide/using-optimisation.rst -- -- Make sure to note whether a flag is implied by -O0, -O or -O2. optLevelFlags :: [([Int], GeneralFlag)] -- Default settings of flags, before any command-line overrides optLevelFlags -- see Note [Documenting optimisation flags] = [ ([0,1,2], Opt_DoLambdaEtaExpansion) , ([1,2], Opt_DoCleverArgEtaExpansion) -- See Note [Eta expansion of arguments in CorePrep] , ([0,1,2], Opt_DoEtaReduction) -- See Note [Eta-reduction in -O0] , ([0,1,2], Opt_ProfManualCcs ) , ([2], Opt_DictsStrict) , ([0], Opt_IgnoreInterfacePragmas) , ([0], Opt_OmitInterfacePragmas) , ([1,2], Opt_CoreConstantFolding) , ([1,2], Opt_CallArity) , ([1,2], Opt_Exitification) , ([1,2], Opt_CaseMerge) , ([1,2], Opt_CaseFolding) , ([1,2], Opt_CmmElimCommonBlocks) , ([2], Opt_AsmShortcutting) , ([1,2], Opt_CmmSink) , ([1,2], Opt_CmmStaticPred) , ([1,2], Opt_CSE) , ([1,2], Opt_StgCSE) , ([2], Opt_StgLiftLams) , ([1,2], Opt_CmmControlFlow) , ([1,2], Opt_EnableRewriteRules) -- Off for -O0. Otherwise we desugar list literals -- to 'build' but don't run the simplifier passes that -- would rewrite them back to cons cells! This seems -- silly, and matters for the GHCi debugger. , ([1,2], Opt_FloatIn) , ([1,2], Opt_FullLaziness) , ([1,2], Opt_IgnoreAsserts) , ([1,2], Opt_Loopification) , ([1,2], Opt_CfgBlocklayout) -- Experimental , ([1,2], Opt_Specialise) , ([1,2], Opt_CrossModuleSpecialise) , ([1,2], Opt_InlineGenerics) , ([1,2], Opt_Strictness) , ([1,2], Opt_UnboxSmallStrictFields) , ([1,2], Opt_CprAnal) , ([1,2], Opt_WorkerWrapper) , ([1,2], Opt_SolveConstantDicts) , ([1,2], Opt_NumConstantFolding) , ([2], Opt_LiberateCase) , ([2], Opt_SpecConstr) , ([2], Opt_FastPAPCalls) -- , ([2], Opt_RegsGraph) -- RegsGraph suffers performance regression. See #7679 -- , ([2], Opt_StaticArgumentTransformation) -- Static Argument Transformation needs investigation. See #9374 , ([0,1,2], Opt_SpecEval) , ([0,1,2], Opt_SpecEvalDictFun) ] default_PIC :: Platform -> [GeneralFlag] default_PIC platform = case (platformOS platform, platformArch platform) of -- Darwin always requires PIC. Especially on more recent macOS releases -- there will be a 4GB __ZEROPAGE that prevents us from using 32bit addresses -- while we could work around this on x86_64 (like WINE does), we won't be -- able on aarch64, where this is enforced. (OSDarwin, ArchX86_64) -> [Opt_PIC] -- For AArch64, we need to always have PIC enabled. The relocation model -- on AArch64 does not permit arbitrary relocations. Under ASLR, we can't -- control much how far apart symbols are in memory for our in-memory static -- linker; and thus need to ensure we get sufficiently capable relocations. -- This requires PIC on AArch64, and ExternalDynamicRefs on Linux as on top -- of that. Subsequently we expect all code on aarch64/linux (and macOS) to -- be built with -fPIC. (OSDarwin, ArchAArch64) -> [Opt_PIC] (OSLinux, ArchAArch64) -> [Opt_PIC, Opt_ExternalDynamicRefs] (OSLinux, ArchARM {}) -> [Opt_PIC, Opt_ExternalDynamicRefs] (OSLinux, ArchRISCV64 {}) -> [Opt_PIC, Opt_ExternalDynamicRefs] (OSOpenBSD, ArchX86_64) -> [Opt_PIC] -- Due to PIE support in -- OpenBSD since 5.3 release -- (1 May 2013) we need to -- always generate PIC. See -- #10597 for more -- information. _ -> [] -- | The language extensions implied by the various language variants. -- When updating this be sure to update the flag documentation in -- @docs/users_guide/exts@. languageExtensions :: Maybe Language -> [LangExt.Extension] -- Nothing: the default case languageExtensions Nothing = languageExtensions (Just defaultLanguage) languageExtensions (Just Haskell98) = [LangExt.ImplicitPrelude, -- See Note [When is StarIsType enabled] LangExt.StarIsType, LangExt.CUSKs, LangExt.MonomorphismRestriction, LangExt.NPlusKPatterns, LangExt.DatatypeContexts, LangExt.TraditionalRecordSyntax, LangExt.FieldSelectors, LangExt.NondecreasingIndentation, -- strictly speaking non-standard, but we always had this -- on implicitly before the option was added in 7.1, and -- turning it off breaks code, so we're keeping it on for -- backwards compatibility. Cabal uses -XHaskell98 by -- default unless you specify another language. LangExt.DeepSubsumption, -- Non-standard but enabled for backwards compatability (see GHC proposal #511) LangExt.ListTuplePuns ] languageExtensions (Just Haskell2010) = [LangExt.ImplicitPrelude, -- See Note [When is StarIsType enabled] LangExt.StarIsType, LangExt.CUSKs, LangExt.MonomorphismRestriction, LangExt.DatatypeContexts, LangExt.TraditionalRecordSyntax, LangExt.EmptyDataDecls, LangExt.ForeignFunctionInterface, LangExt.PatternGuards, LangExt.DoAndIfThenElse, LangExt.FieldSelectors, LangExt.RelaxedPolyRec, LangExt.DeepSubsumption, LangExt.ListTuplePuns ] languageExtensions (Just GHC2021) = [LangExt.ImplicitPrelude, -- See Note [When is StarIsType enabled] LangExt.StarIsType, LangExt.MonomorphismRestriction, LangExt.TraditionalRecordSyntax, LangExt.EmptyDataDecls, LangExt.ForeignFunctionInterface, LangExt.PatternGuards, LangExt.DoAndIfThenElse, LangExt.FieldSelectors, LangExt.RelaxedPolyRec, LangExt.ListTuplePuns, -- Now the new extensions (not in Haskell2010) LangExt.BangPatterns, LangExt.BinaryLiterals, LangExt.ConstrainedClassMethods, LangExt.ConstraintKinds, LangExt.DeriveDataTypeable, LangExt.DeriveFoldable, LangExt.DeriveFunctor, LangExt.DeriveGeneric, LangExt.DeriveLift, LangExt.DeriveTraversable, LangExt.EmptyCase, LangExt.EmptyDataDeriving, LangExt.ExistentialQuantification, LangExt.ExplicitForAll, LangExt.FlexibleContexts, LangExt.FlexibleInstances, LangExt.GADTSyntax, LangExt.GeneralizedNewtypeDeriving, LangExt.HexFloatLiterals, LangExt.ImportQualifiedPost, LangExt.InstanceSigs, LangExt.KindSignatures, LangExt.MultiParamTypeClasses, LangExt.NamedFieldPuns, LangExt.NamedWildCards, LangExt.NumericUnderscores, LangExt.PolyKinds, LangExt.PostfixOperators, LangExt.RankNTypes, LangExt.ScopedTypeVariables, LangExt.StandaloneDeriving, LangExt.StandaloneKindSignatures, LangExt.TupleSections, LangExt.TypeApplications, LangExt.TypeOperators, LangExt.TypeSynonymInstances] languageExtensions (Just GHC2024) = languageExtensions (Just GHC2021) ++ [LangExt.DataKinds, LangExt.DerivingStrategies, LangExt.DisambiguateRecordFields, LangExt.ExplicitNamespaces, LangExt.GADTs, LangExt.MonoLocalBinds, LangExt.LambdaCase, LangExt.RoleAnnotations] ways :: DynFlags -> Ways ways dflags | dynamicNow dflags = addWay WayDyn (targetWays_ dflags) | otherwise = targetWays_ dflags -- | Get target profile targetProfile :: DynFlags -> Profile targetProfile dflags = Profile (targetPlatform dflags) (ways dflags) -- -- System tool settings and locations programName :: DynFlags -> String programName dflags = ghcNameVersion_programName $ ghcNameVersion dflags projectVersion :: DynFlags -> String projectVersion dflags = ghcNameVersion_projectVersion (ghcNameVersion dflags) ghcUsagePath :: DynFlags -> FilePath ghcUsagePath dflags = fileSettings_ghcUsagePath $ fileSettings dflags ghciUsagePath :: DynFlags -> FilePath ghciUsagePath dflags = fileSettings_ghciUsagePath $ fileSettings dflags topDir :: DynFlags -> FilePath topDir dflags = fileSettings_topDir $ fileSettings dflags toolDir :: DynFlags -> Maybe FilePath toolDir dflags = fileSettings_toolDir $ fileSettings dflags extraGccViaCFlags :: DynFlags -> [String] extraGccViaCFlags dflags = toolSettings_extraGccViaCFlags $ toolSettings dflags globalPackageDatabasePath :: DynFlags -> FilePath globalPackageDatabasePath dflags = fileSettings_globalPackageDatabase $ fileSettings dflags -- | The directory for this version of ghc in the user's app directory -- The appdir used to be in ~/.ghc but to respect the XDG specification -- we want to move it under $XDG_DATA_HOME/ -- However, old tooling (like cabal) might still write package environments -- to the old directory, so we prefer that if a subdirectory of ~/.ghc -- with the correct target and GHC version suffix exists. -- -- i.e. if ~/.ghc/$UNIQUE_SUBDIR exists we use that -- otherwise we use $XDG_DATA_HOME/$UNIQUE_SUBDIR -- -- UNIQUE_SUBDIR is typically a combination of the target platform and GHC version versionedAppDir :: String -> ArchOS -> MaybeT IO FilePath versionedAppDir appname platform = do -- Make sure we handle the case the HOME isn't set (see #11678) -- We need to fallback to the old scheme if the subdirectory exists. msum $ map (checkIfExists <=< fmap ( versionedFilePath platform)) [ tryMaybeT $ getAppUserDataDirectory appname -- this is ~/.ghc/ , tryMaybeT $ getXdgDirectory XdgData appname -- this is $XDG_DATA_HOME/ ] where checkIfExists dir = tryMaybeT (doesDirectoryExist dir) >>= \case True -> pure dir False -> MaybeT (pure Nothing) versionedFilePath :: ArchOS -> FilePath versionedFilePath platform = uniqueSubdir platform -- SDoc ------------------------------------------- -- | Initialize the pretty-printing options initSDocContext :: DynFlags -> PprStyle -> SDocContext initSDocContext dflags style = SDC { sdocStyle = style , sdocColScheme = colScheme dflags , sdocLastColour = Col.colReset , sdocShouldUseColor = overrideWith (canUseColor dflags) (useColor dflags) , sdocDefaultDepth = pprUserLength dflags , sdocLineLength = pprCols dflags , sdocCanUseUnicode = useUnicode dflags , sdocPrintErrIndexLinks = overrideWith (canUseErrorLinks dflags) (useErrorLinks dflags) , sdocHexWordLiterals = gopt Opt_HexWordLiterals dflags , sdocPprDebug = dopt Opt_D_ppr_debug dflags , sdocPrintUnicodeSyntax = gopt Opt_PrintUnicodeSyntax dflags , sdocPrintCaseAsLet = gopt Opt_PprCaseAsLet dflags , sdocPrintTypecheckerElaboration = gopt Opt_PrintTypecheckerElaboration dflags , sdocPrintAxiomIncomps = gopt Opt_PrintAxiomIncomps dflags , sdocPrintExplicitKinds = gopt Opt_PrintExplicitKinds dflags , sdocPrintExplicitCoercions = gopt Opt_PrintExplicitCoercions dflags , sdocPrintExplicitRuntimeReps = gopt Opt_PrintExplicitRuntimeReps dflags , sdocPrintExplicitForalls = gopt Opt_PrintExplicitForalls dflags , sdocPrintPotentialInstances = gopt Opt_PrintPotentialInstances dflags , sdocPrintEqualityRelations = gopt Opt_PrintEqualityRelations dflags , sdocSuppressTicks = gopt Opt_SuppressTicks dflags , sdocSuppressTypeSignatures = gopt Opt_SuppressTypeSignatures dflags , sdocSuppressTypeApplications = gopt Opt_SuppressTypeApplications dflags , sdocSuppressIdInfo = gopt Opt_SuppressIdInfo dflags , sdocSuppressCoercions = gopt Opt_SuppressCoercions dflags , sdocSuppressCoercionTypes = gopt Opt_SuppressCoercionTypes dflags , sdocSuppressUnfoldings = gopt Opt_SuppressUnfoldings dflags , sdocSuppressVarKinds = gopt Opt_SuppressVarKinds dflags , sdocSuppressUniques = gopt Opt_SuppressUniques dflags , sdocSuppressModulePrefixes = gopt Opt_SuppressModulePrefixes dflags , sdocSuppressStgExts = gopt Opt_SuppressStgExts dflags , sdocSuppressStgReps = gopt Opt_SuppressStgReps dflags , sdocErrorSpans = gopt Opt_ErrorSpans dflags , sdocStarIsType = xopt LangExt.StarIsType dflags , sdocLinearTypes = xopt LangExt.LinearTypes dflags , sdocListTuplePuns = xopt LangExt.ListTuplePuns dflags , sdocPrintTypeAbbreviations = True , sdocUnitIdForUser = ftext } -- | Initialize the pretty-printing options using the default user style initDefaultSDocContext :: DynFlags -> SDocContext initDefaultSDocContext dflags = initSDocContext dflags defaultUserStyle initPromotionTickContext :: DynFlags -> PromotionTickContext initPromotionTickContext dflags = PromTickCtx { ptcListTuplePuns = xopt LangExt.ListTuplePuns dflags, ptcPrintRedundantPromTicks = gopt Opt_PrintRedundantPromotionTicks dflags } -- ----------------------------------------------------------------------------- -- SSE, AVX, FMA isSse4_1Enabled :: DynFlags -> Bool isSse4_1Enabled dflags = sseVersion dflags >= Just SSE4 isSse4_2Enabled :: DynFlags -> Bool isSse4_2Enabled dflags = sseVersion dflags >= Just SSE42 isAvxEnabled :: DynFlags -> Bool isAvxEnabled dflags = avx dflags || avx2 dflags || avx512f dflags isAvx2Enabled :: DynFlags -> Bool isAvx2Enabled dflags = avx2 dflags || avx512f dflags isAvx512cdEnabled :: DynFlags -> Bool isAvx512cdEnabled dflags = avx512cd dflags isAvx512erEnabled :: DynFlags -> Bool isAvx512erEnabled dflags = avx512er dflags isAvx512fEnabled :: DynFlags -> Bool isAvx512fEnabled dflags = avx512f dflags isAvx512pfEnabled :: DynFlags -> Bool isAvx512pfEnabled dflags = avx512pf dflags isFmaEnabled :: DynFlags -> Bool isFmaEnabled dflags = fma dflags -- ----------------------------------------------------------------------------- -- BMI2 isBmiEnabled :: DynFlags -> Bool isBmiEnabled dflags = case platformArch (targetPlatform dflags) of ArchX86_64 -> bmiVersion dflags >= Just BMI1 ArchX86 -> bmiVersion dflags >= Just BMI1 _ -> False isBmi2Enabled :: DynFlags -> Bool isBmi2Enabled dflags = case platformArch (targetPlatform dflags) of ArchX86_64 -> bmiVersion dflags >= Just BMI2 ArchX86 -> bmiVersion dflags >= Just BMI2 _ -> False ghc-lib-parser-9.12.2.20250421/compiler/GHC/Driver/Env.hs0000644000000000000000000004050607346545000020232 0ustar0000000000000000 module GHC.Driver.Env ( Hsc(..) , HscEnv (..) , hscUpdateFlags , hscSetFlags , hsc_home_unit , hsc_home_unit_maybe , hsc_units , hsc_HPT , hsc_HUE , hsc_HUG , hsc_all_home_unit_ids , hscUpdateLoggerFlags , hscUpdateHUG , hscUpdateHPT_lazy , hscUpdateHPT , hscSetActiveHomeUnit , hscSetActiveUnitId , hscActiveUnitId , runHsc , runHsc' , mkInteractiveHscEnv , runInteractiveHsc , hscEPS , hscInterp , hptCompleteSigs , hptAllInstances , hptInstancesBelow , hptAnns , hptAllThings , hptSomeThingsBelowUs , hptRules , prepareAnnotations , discardIC , lookupType , lookupIfaceByModule , mainModIs ) where import GHC.Prelude import GHC.Driver.DynFlags import GHC.Driver.Errors ( printOrThrowDiagnostics ) import GHC.Driver.Errors.Types ( GhcMessage ) import GHC.Driver.Config.Logger (initLogFlags) import GHC.Driver.Config.Diagnostic (initDiagOpts, initPrintConfig) import GHC.Driver.Env.Types ( Hsc(..), HscEnv(..) ) import GHC.Runtime.Context import GHC.Runtime.Interpreter.Types (Interp) import GHC.Unit import GHC.Unit.Module.ModGuts import GHC.Unit.Module.ModIface import GHC.Unit.Module.ModDetails import GHC.Unit.Home.ModInfo import GHC.Unit.Env import GHC.Unit.External import GHC.Core ( CoreRule ) import GHC.Core.FamInstEnv import GHC.Core.InstEnv import GHC.Types.Annotations ( Annotation, AnnEnv, mkAnnEnv, plusAnnEnv ) import GHC.Types.CompleteMatch import GHC.Types.Error ( emptyMessages, Messages ) import GHC.Types.Name import GHC.Types.Name.Env import GHC.Types.TyThing import GHC.Builtin.Names ( gHC_PRIM ) import GHC.Data.Maybe import GHC.Utils.Exception as Ex import GHC.Utils.Outputable import GHC.Utils.Monad import GHC.Utils.Panic import GHC.Utils.Misc import GHC.Utils.Logger import Data.IORef import qualified Data.Set as Set import GHC.Unit.Module.Graph runHsc :: HscEnv -> Hsc a -> IO a runHsc hsc_env hsc = do (a, w) <- runHsc' hsc_env hsc let dflags = hsc_dflags hsc_env let !diag_opts = initDiagOpts dflags !print_config = initPrintConfig dflags printOrThrowDiagnostics (hsc_logger hsc_env) print_config diag_opts w return a runHsc' :: HscEnv -> Hsc a -> IO (a, Messages GhcMessage) runHsc' hsc_env (Hsc hsc) = hsc hsc_env emptyMessages -- | Switches in the DynFlags and Plugins from the InteractiveContext mkInteractiveHscEnv :: HscEnv -> HscEnv mkInteractiveHscEnv hsc_env = let ic = hsc_IC hsc_env in hscSetFlags (ic_dflags ic) $ hsc_env { hsc_plugins = ic_plugins ic } -- | A variant of runHsc that switches in the DynFlags and Plugins from the -- InteractiveContext before running the Hsc computation. runInteractiveHsc :: HscEnv -> Hsc a -> IO a runInteractiveHsc hsc_env = runHsc (mkInteractiveHscEnv hsc_env) hsc_home_unit :: HscEnv -> HomeUnit hsc_home_unit = unsafeGetHomeUnit . hsc_unit_env hsc_home_unit_maybe :: HscEnv -> Maybe HomeUnit hsc_home_unit_maybe = ue_homeUnit . hsc_unit_env hsc_units :: HasDebugCallStack => HscEnv -> UnitState hsc_units = ue_units . hsc_unit_env hsc_HPT :: HscEnv -> HomePackageTable hsc_HPT = ue_hpt . hsc_unit_env hsc_HUE :: HscEnv -> HomeUnitEnv hsc_HUE = ue_currentHomeUnitEnv . hsc_unit_env hsc_HUG :: HscEnv -> HomeUnitGraph hsc_HUG = ue_home_unit_graph . hsc_unit_env hsc_all_home_unit_ids :: HscEnv -> Set.Set UnitId hsc_all_home_unit_ids = unitEnv_keys . hsc_HUG hscUpdateHPT_lazy :: (HomePackageTable -> HomePackageTable) -> HscEnv -> HscEnv hscUpdateHPT_lazy f hsc_env = let !res = updateHpt_lazy f (hsc_unit_env hsc_env) in hsc_env { hsc_unit_env = res } hscUpdateHPT :: (HomePackageTable -> HomePackageTable) -> HscEnv -> HscEnv hscUpdateHPT f hsc_env = let !res = updateHpt f (hsc_unit_env hsc_env) in hsc_env { hsc_unit_env = res } hscUpdateHUG :: (HomeUnitGraph -> HomeUnitGraph) -> HscEnv -> HscEnv hscUpdateHUG f hsc_env = hsc_env { hsc_unit_env = updateHug f (hsc_unit_env hsc_env) } {- Note [Target code interpreter] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Template Haskell and GHCi use an interpreter to execute code that is built for the compiler target platform (= code host platform) on the compiler host platform (= code build platform). The internal interpreter can be used when both platforms are the same and when the built code is compatible with the compiler itself (same way, etc.). This interpreter is not always available: for instance stage1 compiler doesn't have it because there might be an ABI mismatch between the code objects (built by stage1 compiler) and the stage1 compiler itself (built by stage0 compiler). In most cases, an external interpreter can be used instead: it runs in a separate process and it communicates with the compiler via a two-way message passing channel. The process is lazily spawned to avoid overhead when it is not used. The target code interpreter to use can be selected per session via the `hsc_interp` field of `HscEnv`. There may be no interpreter available at all, in which case Template Haskell and GHCi will fail to run. The interpreter to use is configured via command-line flags (in `GHC.setSessionDynFlags`). -} -- Note [hsc_type_env_var hack] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- hsc_type_env_var is used to initialize tcg_type_env_var, and -- eventually it is the mutable variable that is queried from -- if_rec_types to get a TypeEnv. So, clearly, it's something -- related to knot-tying (see Note [Tying the knot]). -- hsc_type_env_var is used in two places: initTcRn (where -- it initializes tcg_type_env_var) and initIfaceCheck -- (where it initializes if_rec_types). -- -- But why do we need a way to feed a mutable variable in? Why -- can't we just initialize tcg_type_env_var when we start -- typechecking? The problem is we need to knot-tie the -- EPS, and we may start adding things to the EPS before type -- checking starts. -- -- Here is a concrete example. Suppose we are running -- "ghc -c A.hs", and we have this file system state: -- -- A.hs-boot A.hi-boot **up to date** -- B.hs B.hi **up to date** -- A.hs A.hi **stale** -- -- The first thing we do is run checkOldIface on A.hi. -- checkOldIface will call loadInterface on B.hi so it can -- get its hands on the fingerprints, to find out if A.hi -- needs recompilation. But loadInterface also populates -- the EPS! And so if compilation turns out to be necessary, -- as it is in this case, the thunks we put into the EPS for -- B.hi need to have the correct if_rec_types mutable variable -- to query. -- -- If the mutable variable is only allocated WHEN we start -- typechecking, then that's too late: we can't get the -- information to the thunks. So we need to pre-commit -- to a type variable in 'hscIncrementalCompile' BEFORE we -- check the old interface. -- -- This is all a massive hack because arguably checkOldIface -- should not populate the EPS. But that's a refactor for -- another day. -- | Retrieve the ExternalPackageState cache. hscEPS :: HscEnv -> IO ExternalPackageState hscEPS hsc_env = readIORef (euc_eps (ue_eps (hsc_unit_env hsc_env))) hptCompleteSigs :: HscEnv -> CompleteMatches hptCompleteSigs = hptAllThings (md_complete_matches . hm_details) -- | Find all the instance declarations (of classes and families) from -- the Home Package Table filtered by the provided predicate function. -- Used in @tcRnImports@, to select the instances that are in the -- transitive closure of imports from the currently compiled module. hptAllInstances :: HscEnv -> (InstEnv, [FamInst]) hptAllInstances hsc_env = let (insts, famInsts) = unzip $ flip hptAllThings hsc_env $ \mod_info -> do let details = hm_details mod_info return (md_insts details, md_fam_insts details) in (foldl' unionInstEnv emptyInstEnv insts, concat famInsts) -- | Find instances visible from the given set of imports hptInstancesBelow :: HscEnv -> UnitId -> ModuleNameWithIsBoot -> (InstEnv, [FamInst]) hptInstancesBelow hsc_env uid mnwib = let mn = gwib_mod mnwib (insts, famInsts) = unzip $ hptSomeThingsBelowUs (\mod_info -> let details = hm_details mod_info -- Don't include instances for the current module in if moduleName (mi_module (hm_iface mod_info)) == mn then [] else [(md_insts details, md_fam_insts details)]) True -- Include -hi-boot hsc_env uid mnwib in (foldl' unionInstEnv emptyInstEnv insts, concat famInsts) -- | Get rules from modules "below" this one (in the dependency sense) hptRules :: HscEnv -> UnitId -> ModuleNameWithIsBoot -> [CoreRule] hptRules = hptSomeThingsBelowUs (md_rules . hm_details) False -- | Get annotations from modules "below" this one (in the dependency sense) hptAnns :: HscEnv -> Maybe (UnitId, ModuleNameWithIsBoot) -> [Annotation] hptAnns hsc_env (Just (uid, mn)) = hptSomeThingsBelowUs (md_anns . hm_details) False hsc_env uid mn hptAnns hsc_env Nothing = hptAllThings (md_anns . hm_details) hsc_env hptAllThings :: (HomeModInfo -> [a]) -> HscEnv -> [a] hptAllThings extract hsc_env = concatMap (concatHpt extract . homeUnitEnv_hpt . snd) (hugElts (hsc_HUG hsc_env)) -- | Get things from modules "below" this one (in the dependency sense) -- C.f Inst.hptInstances hptSomeThingsBelowUs :: (HomeModInfo -> [a]) -> Bool -> HscEnv -> UnitId -> ModuleNameWithIsBoot -> [a] hptSomeThingsBelowUs extract include_hi_boot hsc_env uid mn | isOneShot (ghcMode (hsc_dflags hsc_env)) = [] | otherwise = let hug = hsc_HUG hsc_env mg = hsc_mod_graph hsc_env in [ thing | -- Find each non-hi-boot module below me (ModNodeKeyWithUid (GWIB { gwib_mod = mod, gwib_isBoot = is_boot }) mod_uid) <- Set.toList (moduleGraphModulesBelow mg uid mn) , include_hi_boot || (is_boot == NotBoot) -- unsavoury: when compiling the base package with --make, we -- sometimes try to look up RULES etc for GHC.Prim. GHC.Prim won't -- be in the HPT, because we never compile it; it's in the EPT -- instead. ToDo: clean up, and remove this slightly bogus filter: , mod /= moduleName gHC_PRIM , not (mod == gwib_mod mn && uid == mod_uid) -- Look it up in the HPT , let things = case lookupHug hug mod_uid mod of Just info -> extract info Nothing -> pprTrace "WARNING in hptSomeThingsBelowUs" msg mempty msg = vcat [text "missing module" <+> ppr mod, text "When starting from" <+> ppr mn, text "below:" <+> ppr (moduleGraphModulesBelow mg uid mn), text "Probable cause: out-of-date interface files"] -- This really shouldn't happen, but see #962 , thing <- things ] -- | Deal with gathering annotations in from all possible places -- and combining them into a single 'AnnEnv' prepareAnnotations :: HscEnv -> Maybe ModGuts -> IO AnnEnv prepareAnnotations hsc_env mb_guts = do eps <- hscEPS hsc_env let -- Extract annotations from the module being compiled if supplied one mb_this_module_anns = fmap (mkAnnEnv . mg_anns) mb_guts -- Extract dependencies of the module if we are supplied one, -- otherwise load annotations from all home package table -- entries regardless of dependency ordering. get_mod mg = (moduleUnitId (mg_module mg), GWIB (moduleName (mg_module mg)) NotBoot) home_pkg_anns = (mkAnnEnv . hptAnns hsc_env) $ fmap get_mod mb_guts other_pkg_anns = eps_ann_env eps ann_env = foldl1' plusAnnEnv $ catMaybes [mb_this_module_anns, Just home_pkg_anns, Just other_pkg_anns] return ann_env -- | Find the 'TyThing' for the given 'Name' by using all the resources -- at our disposal: the compiled modules in the 'HomePackageTable' and the -- compiled modules in other packages that live in 'PackageTypeEnv'. Note -- that this does NOT look up the 'TyThing' in the module being compiled: you -- have to do that yourself, if desired lookupType :: HscEnv -> Name -> IO (Maybe TyThing) lookupType hsc_env name = do eps <- liftIO $ hscEPS hsc_env let pte = eps_PTE eps return $ lookupTypeInPTE hsc_env pte name lookupTypeInPTE :: HscEnv -> PackageTypeEnv -> Name -> Maybe TyThing lookupTypeInPTE hsc_env pte name = ty where hpt = hsc_HUG hsc_env mod = assertPpr (isExternalName name) (ppr name) $ if isHoleName name then mkHomeModule (hsc_home_unit hsc_env) (moduleName (nameModule name)) else nameModule name !ty = if isOneShot (ghcMode (hsc_dflags hsc_env)) -- in one-shot, we don't use the HPT then lookupNameEnv pte name else case lookupHugByModule mod hpt of Just hm -> lookupNameEnv (md_types (hm_details hm)) name Nothing -> lookupNameEnv pte name -- | Find the 'ModIface' for a 'Module', searching in both the loaded home -- and external package module information lookupIfaceByModule :: HomeUnitGraph -> PackageIfaceTable -> Module -> Maybe ModIface lookupIfaceByModule hug pit mod = case lookupHugByModule mod hug of Just hm -> Just (hm_iface hm) Nothing -> lookupModuleEnv pit mod -- If the module does come from the home package, why do we look in the PIT as well? -- (a) In OneShot mode, even home-package modules accumulate in the PIT -- (b) Even in Batch (--make) mode, there is *one* case where a home-package -- module is in the PIT, namely GHC.Prim when compiling the base package. -- We could eliminate (b) if we wanted, by making GHC.Prim belong to a package -- of its own, but it doesn't seem worth the bother. mainModIs :: HomeUnitEnv -> Module mainModIs hue = mkHomeModule (expectJust "mainModIs" $ homeUnitEnv_home_unit hue) (mainModuleNameIs (homeUnitEnv_dflags hue)) -- | Retrieve the target code interpreter -- -- Fails if no target code interpreter is available hscInterp :: HscEnv -> Interp hscInterp hsc_env = case hsc_interp hsc_env of Nothing -> throw (InstallationError "Couldn't find a target code interpreter. Try with -fexternal-interpreter") Just i -> i -- | Update the LogFlags of the Log in hsc_logger from the DynFlags in -- hsc_dflags. You need to call this when DynFlags are modified. hscUpdateLoggerFlags :: HscEnv -> HscEnv hscUpdateLoggerFlags h = h { hsc_logger = setLogFlags (hsc_logger h) (initLogFlags (hsc_dflags h)) } -- | Update Flags hscUpdateFlags :: (DynFlags -> DynFlags) -> HscEnv -> HscEnv hscUpdateFlags f h = hscSetFlags (f (hsc_dflags h)) h -- | Set Flags hscSetFlags :: HasDebugCallStack => DynFlags -> HscEnv -> HscEnv hscSetFlags dflags h = hscUpdateLoggerFlags $ h { hsc_dflags = dflags , hsc_unit_env = ue_setFlags dflags (hsc_unit_env h) } -- See Note [Multiple Home Units] hscSetActiveHomeUnit :: HasDebugCallStack => HomeUnit -> HscEnv -> HscEnv hscSetActiveHomeUnit home_unit = hscSetActiveUnitId (homeUnitId home_unit) hscSetActiveUnitId :: HasDebugCallStack => UnitId -> HscEnv -> HscEnv hscSetActiveUnitId uid e = e { hsc_unit_env = ue_setActiveUnit uid (hsc_unit_env e) , hsc_dflags = ue_unitFlags uid (hsc_unit_env e) } hscActiveUnitId :: HscEnv -> UnitId hscActiveUnitId e = ue_currentUnit (hsc_unit_env e) -- | Discard the contents of the InteractiveContext, but keep the DynFlags and -- the loaded plugins. It will also keep ic_int_print and ic_monad if their -- names are from external packages. discardIC :: HscEnv -> HscEnv discardIC hsc_env = hsc_env { hsc_IC = empty_ic { ic_int_print = new_ic_int_print , ic_monad = new_ic_monad , ic_plugins = old_plugins } } where -- Force the new values for ic_int_print and ic_monad to avoid leaking old_ic !new_ic_int_print = keep_external_name ic_int_print !new_ic_monad = keep_external_name ic_monad !old_plugins = ic_plugins old_ic dflags = ic_dflags old_ic old_ic = hsc_IC hsc_env empty_ic = emptyInteractiveContext dflags keep_external_name ic_name | nameIsFromExternalPackage home_unit old_name = old_name | otherwise = ic_name empty_ic where home_unit = hsc_home_unit hsc_env old_name = ic_name old_ic ghc-lib-parser-9.12.2.20250421/compiler/GHC/Driver/Env/0000755000000000000000000000000007346545000017671 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Driver/Env/KnotVars.hs0000644000000000000000000001057007346545000021777 0ustar0000000000000000{-# LANGUAGE DeriveFunctor #-} -- | This data structure holds an updateable environment which is used -- when compiling module loops. module GHC.Driver.Env.KnotVars( KnotVars(..) , emptyKnotVars , knotVarsFromModuleEnv , knotVarElems , lookupKnotVars , knotVarsWithout ) where import GHC.Prelude import GHC.Unit.Types ( Module ) import GHC.Unit.Module.Env import Data.Maybe import GHC.Utils.Outputable -- See Note [Why is KnotVars not a ModuleEnv] -- See Note [KnotVars invariants] data KnotVars a = KnotVars { kv_domain :: [Module] -- Domain of the function , Note [KnotVars: Why store the domain?] -- Invariant: kv_lookup is surjective relative to kv_domain , kv_lookup :: Module -> Maybe a -- Lookup function } | NoKnotVars deriving Functor instance Outputable (KnotVars a) where ppr NoKnotVars = text "NoKnot" ppr (KnotVars dom _lookup) = text "Knotty:" <+> ppr dom emptyKnotVars :: KnotVars a emptyKnotVars = NoKnotVars knotVarsFromModuleEnv :: ModuleEnv a -> KnotVars a knotVarsFromModuleEnv me | isEmptyModuleEnv me = NoKnotVars knotVarsFromModuleEnv me = KnotVars (moduleEnvKeys me) (lookupModuleEnv me) knotVarElems :: KnotVars a -> [a] knotVarElems (KnotVars keys lookup) = mapMaybe lookup keys knotVarElems NoKnotVars = [] lookupKnotVars :: KnotVars a -> Module -> Maybe a lookupKnotVars (KnotVars _ lookup) x = lookup x lookupKnotVars NoKnotVars _ = Nothing knotVarsWithout :: Module -> KnotVars a -> KnotVars a knotVarsWithout this_mod (KnotVars loop_mods lkup) = KnotVars (filter (/= this_mod) loop_mods) (\that_mod -> if that_mod == this_mod then Nothing else lkup that_mod) knotVarsWithout _ NoKnotVars = NoKnotVars {- Note [Why is KnotVars not a ModuleEnv] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Initially 'KnotVars' was just a 'ModuleEnv a' but there is one tricky use of the data structure in 'mkDsEnvs' which required this generalised structure. In interactive mode the TypeEnvs from all the previous statements are merged together into one big TypeEnv. 'dsLookupVar' relies on `tcIfaceVar'. The normal lookup functions either look in the HPT or EPS but there is no entry for the `Ghci` modules in either, so the whole merged TypeEnv for all previous Ghci* is stored in the `if_rec_types` variable and then lookup checks there in the case of any interactive module. This is a misuse of the `if_rec_types` variable which might be fixed in future if the Ghci modules are just placed into the HPT like normal modules with implicit imports between them. Note [KnotVars: Why store the domain?] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Normally there's a 'Module' at hand to tell us which 'TypeEnv' we want to interrogate at a particular time, apart from one case, when constructing the in-scope set when linting an unfolding. In this case the whole environment is needed to tell us everything that's in-scope at top-level in the loop because whilst we are linting unfoldings the top-level identifiers from modules in the cycle might not be globalised properly yet. This could be refactored so that the lint functions knew about 'KnotVars' and delayed this check until deciding whether a variable was local or not. Note [KnotVars invariants] ~~~~~~~~~~~~~~~~~~~~~~~~~~ There is a simple invariant which should hold for the KnotVars constructor: * At the end of upsweep, there should be no live KnotVars This invariant is difficult to test but easy to check using ghc-debug. The usage of NoKnotVars is intended to make this invariant easier to check. The most common situation where a KnotVars is retained accidentally is if a HscEnv which contains reference to a KnotVars is used during interface file loading. The thunks created during this process will retain a reference to the KnotVars. In theory, all these references should be removed by 'maybeRehydrateAfter' as that rehydrates all interface files in the loop without using KnotVars. At the time of writing (MP: Oct 21) the invariant doesn't actually hold but also doesn't seem to have too much of a negative consequence on compiler residency. In theory it could be quite bad as each KnotVars may retain a stale reference to an entire TypeEnv. See #20491 -} ghc-lib-parser-9.12.2.20250421/compiler/GHC/Driver/Env/Types.hs0000644000000000000000000000773507346545000021345 0ustar0000000000000000{-# LANGUAGE DerivingVia #-} module GHC.Driver.Env.Types ( Hsc(..) , HscEnv(..) ) where import GHC.Driver.Errors.Types ( GhcMessage ) import {-# SOURCE #-} GHC.Driver.Hooks import GHC.Driver.DynFlags ( ContainsDynFlags(..), HasDynFlags(..), DynFlags ) import GHC.Driver.LlvmConfigCache (LlvmConfigCache) import GHC.Prelude import GHC.Runtime.Context import GHC.Runtime.Interpreter.Types ( Interp ) import GHC.Types.Error ( Messages ) import GHC.Types.Name.Cache import GHC.Types.Target import GHC.Types.TypeEnv import GHC.Unit.Finder.Types import GHC.Unit.Module.Graph import GHC.Unit.Env import GHC.Utils.Logger import GHC.Utils.TmpFs import {-# SOURCE #-} GHC.Driver.Plugins import Control.Monad.IO.Class import Control.Monad.Trans.Reader import Control.Monad.Trans.State import Data.IORef import GHC.Driver.Env.KnotVars -- | The Hsc monad: Passing an environment and diagnostic state newtype Hsc a = Hsc (HscEnv -> Messages GhcMessage -> IO (a, Messages GhcMessage)) deriving (Functor, Applicative, Monad, MonadIO) via ReaderT HscEnv (StateT (Messages GhcMessage) IO) instance HasDynFlags Hsc where getDynFlags = Hsc $ \e w -> return (hsc_dflags e, w) instance ContainsDynFlags HscEnv where extractDynFlags h = hsc_dflags h instance HasLogger Hsc where getLogger = Hsc $ \e w -> return (hsc_logger e, w) -- | HscEnv is like 'GHC.Driver.Monad.Session', except that some of the fields are immutable. -- An HscEnv is used to compile a single module from plain Haskell source -- code (after preprocessing) to either C, assembly or C--. It's also used -- to store the dynamic linker state to allow for multiple linkers in the -- same address space. -- Things like the module graph don't change during a single compilation. -- -- Historical note: \"hsc\" used to be the name of the compiler binary, -- when there was a separate driver and compiler. To compile a single -- module, the driver would invoke hsc on the source code... so nowadays -- we think of hsc as the layer of the compiler that deals with compiling -- a single module. data HscEnv = HscEnv { hsc_dflags :: DynFlags, -- ^ The dynamic flag settings hsc_targets :: [Target], -- ^ The targets (or roots) of the current session hsc_mod_graph :: ModuleGraph, -- ^ The module graph of the current session hsc_IC :: InteractiveContext, -- ^ The context for evaluating interactive statements hsc_NC :: {-# UNPACK #-} !NameCache, -- ^ Global Name cache so that each Name gets a single Unique. -- Also track the origin of the Names. hsc_FC :: {-# UNPACK #-} !FinderCache, -- ^ The cached result of performing finding in the file system hsc_type_env_vars :: KnotVars (IORef TypeEnv) -- ^ Used for one-shot compilation only, to initialise -- the 'IfGblEnv'. See 'GHC.Tc.Utils.tcg_type_env_var' for -- 'GHC.Tc.Utils.TcGblEnv'. See also Note [hsc_type_env_var hack] , hsc_interp :: Maybe Interp -- ^ target code interpreter (if any) to use for TH and GHCi. -- See Note [Target code interpreter] , hsc_plugins :: !Plugins -- ^ Plugins , hsc_unit_env :: UnitEnv -- ^ Unit environment (unit state, home unit, etc.). -- -- Initialized from the databases cached in 'hsc_unit_dbs' and -- from the DynFlags. , hsc_logger :: !Logger -- ^ Logger with its flags. -- -- Don't forget to update the logger flags if the logging -- related DynFlags change. Or better, use hscSetFlags setter -- which does it. , hsc_hooks :: !Hooks -- ^ Hooks , hsc_tmpfs :: !TmpFs -- ^ Temporary files , hsc_llvm_config :: !LlvmConfigCache -- ^ LLVM configuration cache. } ghc-lib-parser-9.12.2.20250421/compiler/GHC/Driver/Errors.hs0000644000000000000000000000503607346545000020755 0ustar0000000000000000{-# LANGUAGE ScopedTypeVariables #-} module GHC.Driver.Errors ( printOrThrowDiagnostics , printMessages , mkDriverPsHeaderMessage ) where import GHC.Driver.Errors.Types import GHC.Prelude import GHC.Types.SourceError import GHC.Types.Error import GHC.Utils.Error import GHC.Utils.Outputable (hang, ppr, ($$), text, mkErrStyle, sdocStyle, updSDocContext ) import GHC.Utils.Logger printMessages :: forall a . Diagnostic a => Logger -> DiagnosticOpts a -> DiagOpts -> Messages a -> IO () printMessages logger msg_opts opts msgs = sequence_ [ let style = mkErrStyle name_ppr_ctx ctx = (diag_ppr_ctx opts) { sdocStyle = style } in (if log_diags_as_json then logJsonMsg logger (MCDiagnostic sev reason (diagnosticCode dia)) msg else logMsg logger (MCDiagnostic sev reason (diagnosticCode dia)) s $ updSDocContext (\_ -> ctx) (messageWithHints dia)) | msg@MsgEnvelope { errMsgSpan = s, errMsgDiagnostic = dia, errMsgSeverity = sev, errMsgReason = reason, errMsgContext = name_ppr_ctx } <- sortMsgBag (Just opts) (getMessages msgs) ] where messageWithHints :: Diagnostic a => a -> SDoc messageWithHints e = let main_msg = formatBulleted $ diagnosticMessage msg_opts e in case diagnosticHints e of [] -> main_msg [h] -> main_msg $$ hang (text "Suggested fix:") 2 (ppr h) hs -> main_msg $$ hang (text "Suggested fixes:") 2 (formatBulleted $ mkDecorated . map ppr $ hs) log_diags_as_json = log_diagnostics_as_json (logFlags logger) -- | Given a bag of diagnostics, turn them into an exception if -- any has 'SevError', or print them out otherwise. printOrThrowDiagnostics :: Logger -> GhcMessageOpts -> DiagOpts -> Messages GhcMessage -> IO () printOrThrowDiagnostics logger print_config opts msgs | errorsOrFatalWarningsFound msgs = throwErrors msgs | otherwise = printMessages logger print_config opts msgs -- | Convert a 'PsError' into a wrapped 'DriverMessage'; use it -- for dealing with parse errors when the driver is doing dependency analysis. -- Defined here to avoid module loops between GHC.Driver.Error.Types and -- GHC.Driver.Error.Ppr mkDriverPsHeaderMessage :: MsgEnvelope PsMessage -> MsgEnvelope DriverMessage mkDriverPsHeaderMessage = fmap DriverPsHeaderMessage ghc-lib-parser-9.12.2.20250421/compiler/GHC/Driver/Errors/0000755000000000000000000000000007346545000020415 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Driver/Errors/Ppr.hs0000644000000000000000000004033107346545000021513 0ustar0000000000000000{-# LANGUAGE DataKinds #-} {-# LANGUAGE GADTs #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE TypeApplications #-} {-# OPTIONS_GHC -fno-warn-orphans #-} -- instance Diagnostic {DriverMessage, GhcMessage} module GHC.Driver.Errors.Ppr ( -- This module only exports Diagnostic instances. ) where import GHC.Prelude import GHC.Driver.Errors.Types import GHC.Driver.Flags import GHC.Driver.DynFlags import GHC.HsToCore.Errors.Ppr () -- instance Diagnostic DsMessage import GHC.Parser.Errors.Ppr () -- instance Diagnostic PsMessage import GHC.Types.Error import GHC.Types.Error.Codes import GHC.Unit.Types import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Unit.Module import GHC.Unit.Module.Graph import GHC.Unit.Module.ModSummary import GHC.Unit.State import GHC.Types.Hint import GHC.Types.SrcLoc import Data.Version import Language.Haskell.Syntax.Decls (RuleDecl(..)) import GHC.Tc.Errors.Types (TcRnMessage) import GHC.HsToCore.Errors.Types (DsMessage) import GHC.Iface.Errors.Types import GHC.Tc.Errors.Ppr () -- instance Diagnostic TcRnMessage import GHC.Iface.Errors.Ppr () -- instance Diagnostic IfaceMessage import GHC.CmmToLlvm.Version (llvmVersionStr, supportedLlvmVersionLowerBound, supportedLlvmVersionUpperBound) -- -- Suggestions -- -- | Suggests a list of 'InstantiationSuggestion' for the '.hsig' file to the user. suggestInstantiatedWith :: ModuleName -> GenInstantiations UnitId -> [InstantiationSuggestion] suggestInstantiatedWith pi_mod_name insts = [ InstantiationSuggestion k v | (k,v) <- ((pi_mod_name, mkHoleModule pi_mod_name) : insts) ] instance HasDefaultDiagnosticOpts GhcMessageOpts where defaultOpts = GhcMessageOpts (defaultDiagnosticOpts @PsMessage) (defaultDiagnosticOpts @TcRnMessage) (defaultDiagnosticOpts @DsMessage) (defaultDiagnosticOpts @DriverMessage) instance Diagnostic GhcMessage where type DiagnosticOpts GhcMessage = GhcMessageOpts diagnosticMessage opts = \case GhcPsMessage m -> diagnosticMessage (psMessageOpts opts) m GhcTcRnMessage m -> diagnosticMessage (tcMessageOpts opts) m GhcDsMessage m -> diagnosticMessage (dsMessageOpts opts) m GhcDriverMessage m -> diagnosticMessage (driverMessageOpts opts) m GhcUnknownMessage (UnknownDiagnostic f m) -> diagnosticMessage (f opts) m diagnosticReason = \case GhcPsMessage m -> diagnosticReason m GhcTcRnMessage m -> diagnosticReason m GhcDsMessage m -> diagnosticReason m GhcDriverMessage m -> diagnosticReason m GhcUnknownMessage m -> diagnosticReason m diagnosticHints = \case GhcPsMessage m -> diagnosticHints m GhcTcRnMessage m -> diagnosticHints m GhcDsMessage m -> diagnosticHints m GhcDriverMessage m -> diagnosticHints m GhcUnknownMessage m -> diagnosticHints m diagnosticCode = constructorCode instance HasDefaultDiagnosticOpts DriverMessageOpts where defaultOpts = DriverMessageOpts (defaultDiagnosticOpts @PsMessage) (defaultDiagnosticOpts @IfaceMessage) instance Diagnostic DriverMessage where type DiagnosticOpts DriverMessage = DriverMessageOpts diagnosticMessage opts = \case DriverUnknownMessage (UnknownDiagnostic f m) -> diagnosticMessage (f opts) m DriverPsHeaderMessage m -> diagnosticMessage (psDiagnosticOpts opts) m DriverMissingHomeModules uid missing buildingCabalPackage -> let msg | buildingCabalPackage == YesBuildingCabalPackage = hang (text "These modules are needed for compilation but not listed in your .cabal file's other-modules for" <+> quotes (ppr uid) <+> text ":") 4 (sep (map ppr missing)) | otherwise = hang (text "Modules are not listed in options for" <+> quotes (ppr uid) <+> text "but needed for compilation:") 4 (sep (map ppr missing)) in mkSimpleDecorated msg DriverUnknownHiddenModules uid missing -> let msg = hang (text "Modules are listed as hidden in options for" <+> quotes (ppr uid) <+> text "but not part of the unit:") 4 (sep (map ppr missing)) in mkSimpleDecorated msg DriverUnknownReexportedModules uid missing -> let msg = hang (text "Modules are listed as reexported in options for" <+> quotes (ppr uid) <+> text "but can't be found in any dependency:") 4 (sep (map ppr missing)) in mkSimpleDecorated msg DriverUnusedPackages unusedArgs -> let msg = vcat [ text "The following packages were specified" <+> text "via -package or -package-id flags," , text "but were not needed for compilation:" , nest 2 (vcat (map (withDash . displayOneUnused) unusedArgs)) ] in mkSimpleDecorated msg where withDash :: SDoc -> SDoc withDash = (<+>) (text "-") displayOneUnused (_uid, pn , v, f) = ppr pn <> text "-" <> text (showVersion v) <+> parens (suffix f) suffix f = text "exposed by flag" <+> pprUnusedArg f pprUnusedArg :: PackageArg -> SDoc pprUnusedArg (PackageArg str) = text "-package" <+> text str pprUnusedArg (UnitIdArg uid) = text "-package-id" <+> ppr uid DriverUnnecessarySourceImports mod -> mkSimpleDecorated (text "{-# SOURCE #-} unnecessary in import of " <+> quotes (ppr mod)) DriverDuplicatedModuleDeclaration mod files -> mkSimpleDecorated $ text "module" <+> quotes (ppr mod) <+> text "is defined in multiple files:" <+> sep (map text files) DriverModuleNotFound mod -> mkSimpleDecorated (text "module" <+> quotes (ppr mod) <+> text "cannot be found locally") DriverFileModuleNameMismatch actual expected -> mkSimpleDecorated $ text "File name does not match module name:" $$ text "Saw :" <+> quotes (ppr actual) $$ text "Expected:" <+> quotes (ppr expected) DriverUnexpectedSignature pi_mod_name _buildingCabalPackage _instantiations -> mkSimpleDecorated $ text "Unexpected signature:" <+> quotes (ppr pi_mod_name) DriverFileNotFound hsFilePath -> mkSimpleDecorated (text "Can't find" <+> text hsFilePath) DriverStaticPointersNotSupported -> mkSimpleDecorated (text "StaticPointers is not supported in GHCi interactive expressions.") DriverBackpackModuleNotFound modname -> mkSimpleDecorated (text "module" <+> ppr modname <+> text "was not found") DriverUserDefinedRuleIgnored (HsRule { rd_name = n }) -> mkSimpleDecorated $ text "Rule \"" <> ftext (unLoc n) <> text "\" ignored" $+$ text "Defining user rules is disabled under Safe Haskell" DriverMixedSafetyImport modName -> mkSimpleDecorated $ text "Module" <+> ppr modName <+> text ("is imported both as a safe and unsafe import!") DriverCannotLoadInterfaceFile m -> mkSimpleDecorated $ text "Can't load the interface file for" <+> ppr m <> text ", to check that it can be safely imported" DriverInferredSafeModule m -> mkSimpleDecorated $ quotes (ppr $ moduleName m) <+> text "has been inferred as safe!" DriverInferredSafeImport m -> mkSimpleDecorated $ sep [ text "Importing Safe-Inferred module " <> ppr (moduleName m) <> text " from explicitly Safe module" ] DriverMarkedTrustworthyButInferredSafe m -> mkSimpleDecorated $ quotes (ppr $ moduleName m) <+> text "is marked as Trustworthy but has been inferred as safe!" DriverCannotImportUnsafeModule m -> mkSimpleDecorated $ sep [ ppr (moduleName m) <> text ": Can't be safely imported!" , text "The module itself isn't safe." ] DriverMissingSafeHaskellMode modName -> mkSimpleDecorated $ ppr modName <+> text "is missing Safe Haskell mode" DriverPackageNotTrusted state pkg -> mkSimpleDecorated $ pprWithUnitState state $ text "The package (" <> ppr pkg <> text ") is required to be trusted but it isn't!" DriverCannotImportFromUntrustedPackage state m -> mkSimpleDecorated $ sep [ ppr (moduleName m) <> text ": Can't be safely imported!" , text "The package (" <> (pprWithUnitState state $ ppr (moduleUnit m)) <> text ") the module resides in isn't trusted." ] DriverRedirectedNoMain mod_name -> mkSimpleDecorated $ (text ("Output was redirected with -o, " ++ "but no output will be generated.") $$ (text "There is no module named" <+> quotes (ppr mod_name) <> text ".")) DriverHomePackagesNotClosed needed_unit_ids -> mkSimpleDecorated $ vcat ([text "Home units are not closed." , text "It is necessary to also load the following units:" ] ++ map (\uid -> text "-" <+> ppr uid) needed_unit_ids) DriverInterfaceError reason -> diagnosticMessage (ifaceDiagnosticOpts opts) reason DriverInconsistentDynFlags msg -> mkSimpleDecorated $ text msg DriverSafeHaskellIgnoredExtension ext -> let arg = text "-X" <> ppr ext in mkSimpleDecorated $ arg <+> text "is not allowed in Safe Haskell; ignoring" <+> arg DriverPackageTrustIgnored -> mkSimpleDecorated $ text "-fpackage-trust ignored; must be specified with a Safe Haskell flag" DriverUnrecognisedFlag arg -> mkSimpleDecorated $ text $ "unrecognised warning flag: -" ++ arg DriverDeprecatedFlag arg msg -> mkSimpleDecorated $ text $ arg ++ " is deprecated: " ++ msg DriverModuleGraphCycle path -> mkSimpleDecorated $ vcat [ text "Module graph contains a cycle:" , nest 2 (show_path path) ] where show_path :: [ModuleGraphNode] -> SDoc show_path [] = panic "show_path" show_path [m] = ppr_node m <+> text "imports itself" show_path (m1:m2:ms) = vcat ( nest 14 (ppr_node m1) : nest 6 (text "imports" <+> ppr_node m2) : go ms ) where go [] = [text "which imports" <+> ppr_node m1] go (m:ms) = (text "which imports" <+> ppr_node m) : go ms ppr_node :: ModuleGraphNode -> SDoc ppr_node (ModuleNode _deps m) = text "module" <+> ppr_ms m ppr_node (InstantiationNode _uid u) = text "instantiated unit" <+> ppr u ppr_node (LinkNode uid _) = pprPanic "LinkNode should not be in a cycle" (ppr uid) ppr_ms :: ModSummary -> SDoc ppr_ms ms = quotes (ppr (moduleName (ms_mod ms))) <+> (parens (text (msHsFilePath ms))) DriverInstantiationNodeInDependencyGeneration node -> mkSimpleDecorated $ vcat [ text "Unexpected backpack instantiation in dependency graph while constructing Makefile:" , nest 2 $ ppr node ] DriverNoConfiguredLLVMToolchain -> mkSimpleDecorated $ text "GHC was not configured with a supported LLVM toolchain" $$ text ("Make sure you have installed LLVM between [" ++ llvmVersionStr supportedLlvmVersionLowerBound ++ " and " ++ llvmVersionStr supportedLlvmVersionUpperBound ++ ") and reinstall GHC to make -fllvm work") diagnosticReason = \case DriverUnknownMessage m -> diagnosticReason m DriverPsHeaderMessage {} -> ErrorWithoutFlag DriverMissingHomeModules{} -> WarningWithFlag Opt_WarnMissingHomeModules DriverUnknownHiddenModules {} -> ErrorWithoutFlag DriverUnknownReexportedModules {} -> ErrorWithoutFlag DriverUnusedPackages{} -> WarningWithFlag Opt_WarnUnusedPackages DriverUnnecessarySourceImports{} -> WarningWithFlag Opt_WarnUnusedImports DriverDuplicatedModuleDeclaration{} -> ErrorWithoutFlag DriverModuleNotFound{} -> ErrorWithoutFlag DriverFileModuleNameMismatch{} -> ErrorWithoutFlag DriverUnexpectedSignature{} -> ErrorWithoutFlag DriverFileNotFound{} -> ErrorWithoutFlag DriverStaticPointersNotSupported -> WarningWithoutFlag DriverBackpackModuleNotFound{} -> ErrorWithoutFlag DriverUserDefinedRuleIgnored{} -> WarningWithoutFlag DriverMixedSafetyImport{} -> ErrorWithoutFlag DriverCannotLoadInterfaceFile{} -> ErrorWithoutFlag DriverInferredSafeModule{} -> WarningWithFlag Opt_WarnSafe DriverMarkedTrustworthyButInferredSafe{} ->WarningWithFlag Opt_WarnTrustworthySafe DriverInferredSafeImport{} -> WarningWithFlag Opt_WarnInferredSafeImports DriverCannotImportUnsafeModule{} -> ErrorWithoutFlag DriverMissingSafeHaskellMode{} -> WarningWithFlag Opt_WarnMissingSafeHaskellMode DriverPackageNotTrusted{} -> ErrorWithoutFlag DriverCannotImportFromUntrustedPackage{} -> ErrorWithoutFlag DriverRedirectedNoMain {} -> ErrorWithoutFlag DriverHomePackagesNotClosed {} -> ErrorWithoutFlag DriverInterfaceError reason -> diagnosticReason reason DriverInconsistentDynFlags {} -> WarningWithFlag Opt_WarnInconsistentFlags DriverSafeHaskellIgnoredExtension {} -> WarningWithoutFlag DriverPackageTrustIgnored {} -> WarningWithoutFlag DriverUnrecognisedFlag {} -> WarningWithFlag Opt_WarnUnrecognisedWarningFlags DriverDeprecatedFlag {} -> WarningWithFlag Opt_WarnDeprecatedFlags DriverModuleGraphCycle {} -> ErrorWithoutFlag DriverInstantiationNodeInDependencyGeneration {} -> ErrorWithoutFlag DriverNoConfiguredLLVMToolchain -> ErrorWithoutFlag diagnosticHints = \case DriverUnknownMessage m -> diagnosticHints m DriverPsHeaderMessage psMsg -> diagnosticHints psMsg DriverMissingHomeModules{} -> noHints DriverUnknownHiddenModules {} -> noHints DriverUnknownReexportedModules {} -> noHints DriverUnusedPackages{} -> noHints DriverUnnecessarySourceImports{} -> noHints DriverDuplicatedModuleDeclaration{} -> noHints DriverModuleNotFound{} -> noHints DriverFileModuleNameMismatch{} -> noHints DriverUnexpectedSignature pi_mod_name buildingCabalPackage instantiations -> if buildingCabalPackage == YesBuildingCabalPackage then [SuggestAddSignatureCabalFile pi_mod_name] else [SuggestSignatureInstantiations pi_mod_name (suggestInstantiatedWith pi_mod_name instantiations)] DriverFileNotFound{} -> noHints DriverStaticPointersNotSupported -> noHints DriverBackpackModuleNotFound{} -> noHints DriverUserDefinedRuleIgnored{} -> noHints DriverMixedSafetyImport{} -> noHints DriverCannotLoadInterfaceFile{} -> noHints DriverInferredSafeModule{} -> noHints DriverInferredSafeImport{} -> noHints DriverCannotImportUnsafeModule{} -> noHints DriverMissingSafeHaskellMode{} -> noHints DriverPackageNotTrusted{} -> noHints DriverMarkedTrustworthyButInferredSafe{} -> noHints DriverCannotImportFromUntrustedPackage{} -> noHints DriverRedirectedNoMain {} -> noHints DriverHomePackagesNotClosed {} -> noHints DriverInterfaceError reason -> diagnosticHints reason DriverInconsistentDynFlags {} -> noHints DriverSafeHaskellIgnoredExtension {} -> noHints DriverPackageTrustIgnored {} -> noHints DriverUnrecognisedFlag {} -> noHints DriverDeprecatedFlag {} -> noHints DriverModuleGraphCycle {} -> noHints DriverInstantiationNodeInDependencyGeneration {} -> noHints DriverNoConfiguredLLVMToolchain -> noHints diagnosticCode = constructorCode ghc-lib-parser-9.12.2.20250421/compiler/GHC/Driver/Errors/Types.hs0000644000000000000000000003704307346545000022064 0ustar0000000000000000{-# LANGUAGE DeriveGeneric #-} {-# LANGUAGE GADTs #-} {-# LANGUAGE StandaloneDeriving #-} module GHC.Driver.Errors.Types ( GhcMessage(..) , GhcMessageOpts(..) , DriverMessage(..) , DriverMessageOpts(..) , DriverMessages, PsMessage(PsHeaderMessage) , WarningMessages , ErrorMessages , WarnMsg -- * Constructors , ghcUnknownMessage -- * Utility functions , hoistTcRnMessage , hoistDsMessage , checkBuildingCabalPackage ) where import GHC.Prelude import Data.Bifunctor import Data.Typeable import GHC.Driver.DynFlags (DynFlags, PackageArg, gopt, ReexportedModule) import GHC.Driver.Flags (GeneralFlag (Opt_BuildingCabalPackage)) import GHC.Types.Error import GHC.Unit.Module import GHC.Unit.Module.Graph import GHC.Unit.State import GHC.Parser.Errors.Types ( PsMessage(PsHeaderMessage) ) import GHC.HsToCore.Errors.Types ( DsMessage ) import GHC.Hs.Extension (GhcTc) import Language.Haskell.Syntax.Decls (RuleDecl) import qualified GHC.LanguageExtensions as LangExt import GHC.Generics ( Generic ) import GHC.Tc.Errors.Types import GHC.Iface.Errors.Types -- | A collection of warning messages. -- /INVARIANT/: Each 'GhcMessage' in the collection should have 'SevWarning' severity. type WarningMessages = Messages GhcMessage -- | A collection of error messages. -- /INVARIANT/: Each 'GhcMessage' in the collection should have 'SevError' severity. type ErrorMessages = Messages GhcMessage -- | A single warning message. -- /INVARIANT/: It must have 'SevWarning' severity. type WarnMsg = MsgEnvelope GhcMessage {- Note [GhcMessage] ~~~~~~~~~~~~~~~~~~~~ We might need to report diagnostics (error and/or warnings) to the users. The 'GhcMessage' type is the root of the diagnostic hierarchy. It's useful to have a separate type constructor for the different stages of the compilation pipeline. This is not just helpful for tools, as it gives a clear indication on where the error occurred exactly. Furthermore it increases the modularity amongst the different components of GHC (i.e. to avoid having "everything depend on everything else") and allows us to write separate functions that renders the different kind of messages. -} -- | The umbrella type that encompasses all the different messages that GHC -- might output during the different compilation stages. See -- Note [GhcMessage]. data GhcMessage where -- | A message from the parsing phase. GhcPsMessage :: PsMessage -> GhcMessage -- | A message from typecheck/renaming phase. GhcTcRnMessage :: TcRnMessage -> GhcMessage -- | A message from the desugaring (HsToCore) phase. GhcDsMessage :: DsMessage -> GhcMessage -- | A message from the driver. GhcDriverMessage :: DriverMessage -> GhcMessage -- | An \"escape\" hatch which can be used when we don't know the source of -- the message or if the message is not one of the typed ones. The -- 'Diagnostic' and 'Typeable' constraints ensure that if we /know/, at -- pattern-matching time, the originating type, we can attempt a cast and -- access the fully-structured error. This would be the case for a GHC -- plugin that offers a domain-specific error type but that doesn't want to -- place the burden on IDEs/application code to \"know\" it. The -- 'Diagnostic' constraint ensures that worst case scenario we can still -- render this into something which can be eventually converted into a -- 'DecoratedSDoc'. GhcUnknownMessage :: (UnknownDiagnostic (DiagnosticOpts GhcMessage)) -> GhcMessage deriving Generic data GhcMessageOpts = GhcMessageOpts { psMessageOpts :: DiagnosticOpts PsMessage , tcMessageOpts :: DiagnosticOpts TcRnMessage , dsMessageOpts :: DiagnosticOpts DsMessage , driverMessageOpts :: DiagnosticOpts DriverMessage } -- | Creates a new 'GhcMessage' out of any diagnostic. This function is also -- provided to ease the integration of #18516 by allowing diagnostics to be -- wrapped into the general (but structured) 'GhcMessage' type, so that the -- conversion can happen gradually. This function should not be needed within -- GHC, as it would typically be used by plugin or library authors (see -- comment for the 'GhcUnknownMessage' type constructor) ghcUnknownMessage :: (DiagnosticOpts a ~ NoDiagnosticOpts, Diagnostic a, Typeable a) => a -> GhcMessage ghcUnknownMessage = GhcUnknownMessage . mkSimpleUnknownDiagnostic -- | Abstracts away the frequent pattern where we are calling 'ioMsgMaybe' on -- the result of 'IO (Messages TcRnMessage, a)'. hoistTcRnMessage :: Monad m => m (Messages TcRnMessage, a) -> m (Messages GhcMessage, a) hoistTcRnMessage = fmap (first (fmap GhcTcRnMessage)) -- | Abstracts away the frequent pattern where we are calling 'ioMsgMaybe' on -- the result of 'IO (Messages DsMessage, a)'. hoistDsMessage :: Monad m => m (Messages DsMessage, a) -> m (Messages GhcMessage, a) hoistDsMessage = fmap (first (fmap GhcDsMessage)) -- | A collection of driver messages type DriverMessages = Messages DriverMessage -- | A message from the driver. data DriverMessage where -- | Simply wraps a generic 'Diagnostic' message @a@. DriverUnknownMessage :: UnknownDiagnostic (DiagnosticOpts DriverMessage) -> DriverMessage -- | A parse error in parsing a Haskell file header during dependency -- analysis DriverPsHeaderMessage :: !PsMessage -> DriverMessage {-| DriverMissingHomeModules is a warning (controlled with -Wmissing-home-modules) that arises when running GHC in --make mode when some modules needed for compilation are not included on the command line. For example, if A imports B, `ghc --make A.hs` will cause this warning, while `ghc --make A.hs B.hs` will not. Useful for cabal to ensure GHC won't pick up modules listed neither in 'exposed-modules' nor in 'other-modules'. Test case: warnings/should_compile/MissingMod -} DriverMissingHomeModules :: UnitId -> [ModuleName] -> !BuildingCabalPackage -> DriverMessage {-| DriverUnknown is a warning that arises when a user tries to reexport a module which isn't part of that unit. -} DriverUnknownReexportedModules :: UnitId -> [ReexportedModule] -> DriverMessage {-| DriverUnknownHiddenModules is a warning that arises when a user tries to hide a module which isn't part of that unit. -} DriverUnknownHiddenModules :: UnitId -> [ModuleName] -> DriverMessage {-| DriverUnusedPackages occurs when when package is requested on command line, but was never needed during compilation. Activated by -Wunused-packages. Test cases: warnings/should_compile/UnusedPackages -} DriverUnusedPackages :: [(UnitId, PackageName, Version, PackageArg)] -> DriverMessage {-| DriverUnnecessarySourceImports (controlled with -Wunused-imports) occurs if there are {-# SOURCE #-} imports which are not necessary. See 'warnUnnecessarySourceImports' in 'GHC.Driver.Make'. Test cases: warnings/should_compile/T10637 -} DriverUnnecessarySourceImports :: !ModuleName -> DriverMessage {-| DriverDuplicatedModuleDeclaration occurs if a module 'A' is declared in multiple files. Test cases: None. -} DriverDuplicatedModuleDeclaration :: !Module -> [FilePath] -> DriverMessage {-| DriverModuleNotFound occurs if a module 'A' can't be found. Test cases: None. -} DriverModuleNotFound :: !ModuleName -> DriverMessage {-| DriverFileModuleNameMismatch occurs if a module 'A' is defined in a file with a different name. The first field is the name written in the source code; the second argument is the name extracted from the filename. Test cases: module/mod178, /driver/bug1677 -} DriverFileModuleNameMismatch :: !ModuleName -> !ModuleName -> DriverMessage {-| DriverUnexpectedSignature occurs when GHC encounters a module 'A' that imports a signature file which is neither in the 'signatures' section of a '.cabal' file nor in any package in the home modules. Example: -- MyStr.hsig is defined, but not added to 'signatures' in the '.cabal' file. signature MyStr where data Str -- A.hs, which tries to import the signature. module A where import MyStr Test cases: driver/T12955 -} DriverUnexpectedSignature :: !ModuleName -> !BuildingCabalPackage -> GenInstantiations UnitId -> DriverMessage {-| DriverFileNotFound occurs when the input file (e.g. given on the command line) can't be found. Test cases: None. -} DriverFileNotFound :: !FilePath -> DriverMessage {-| DriverStaticPointersNotSupported occurs when the 'StaticPointers' extension is used in an interactive GHCi context. Test cases: ghci/scripts/StaticPtr -} DriverStaticPointersNotSupported :: DriverMessage {-| DriverBackpackModuleNotFound occurs when Backpack can't find a particular module during its dependency analysis. Test cases: - -} DriverBackpackModuleNotFound :: !ModuleName -> DriverMessage {-| DriverUserDefinedRuleIgnored is a warning that occurs when user-defined rules are ignored. This typically happens when Safe Haskell. Test cases: tests/safeHaskell/safeInfered/UnsafeWarn05 tests/safeHaskell/safeInfered/UnsafeWarn06 tests/safeHaskell/safeInfered/UnsafeWarn07 tests/safeHaskell/safeInfered/UnsafeInfered11 tests/safeHaskell/safeLanguage/SafeLang03 -} DriverUserDefinedRuleIgnored :: !(RuleDecl GhcTc) -> DriverMessage {-| DriverMixedSafetyImport is an error that occurs when a module is imported both as safe and unsafe. Test cases: tests/safeHaskell/safeInfered/Mixed03 tests/safeHaskell/safeInfered/Mixed02 -} DriverMixedSafetyImport :: !ModuleName -> DriverMessage {-| DriverCannotLoadInterfaceFile is an error that occurs when we cannot load the interface file for a particular module. This can happen for example in the context of Safe Haskell, when we have to load a module to check if it can be safely imported. Test cases: None. -} DriverCannotLoadInterfaceFile :: !Module -> DriverMessage {-| DriverInferredSafeImport is a warning (controlled by the Opt_WarnSafe flag) that occurs when a module is inferred safe. Test cases: None. -} DriverInferredSafeModule :: !Module -> DriverMessage {-| DriverMarkedTrustworthyButInferredSafe is a warning (controlled by the Opt_WarnTrustworthySafe flag) that occurs when a module is marked trustworthy in SafeHaskell but it has been inferred safe. Test cases: tests/safeHaskell/safeInfered/TrustworthySafe02 tests/safeHaskell/safeInfered/TrustworthySafe03 -} DriverMarkedTrustworthyButInferredSafe :: !Module -> DriverMessage {-| DriverInferredSafeImport is a warning (controlled by the Opt_WarnInferredSafeImports flag) that occurs when a safe-inferred module is imported from a safe module. Test cases: None. -} DriverInferredSafeImport :: !Module -> DriverMessage {-| DriverCannotImportUnsafeModule is an error that occurs when an usafe module is being imported from a safe one. Test cases: None. -} DriverCannotImportUnsafeModule :: !Module -> DriverMessage {-| DriverMissingSafeHaskellMode is a warning (controlled by the Opt_WarnMissingSafeHaskellMode flag) that occurs when a module is using SafeHaskell features but SafeHaskell mode is not enabled. Test cases: None. -} DriverMissingSafeHaskellMode :: !Module -> DriverMessage {-| DriverPackageNotTrusted is an error that occurs when a package is required to be trusted but it isn't. Test cases: tests/safeHaskell/check/Check01 tests/safeHaskell/check/Check08 tests/safeHaskell/check/Check06 tests/safeHaskell/check/pkg01/ImpSafeOnly09 tests/safeHaskell/check/pkg01/ImpSafe03 tests/safeHaskell/check/pkg01/ImpSafeOnly07 tests/safeHaskell/check/pkg01/ImpSafeOnly08 -} DriverPackageNotTrusted :: !UnitState -> !UnitId -> DriverMessage {-| DriverCannotImportFromUntrustedPackage is an error that occurs in the context of Safe Haskell when trying to import a module coming from an untrusted package. Test cases: tests/safeHaskell/check/Check09 tests/safeHaskell/check/pkg01/ImpSafe01 tests/safeHaskell/check/pkg01/ImpSafe04 tests/safeHaskell/check/pkg01/ImpSafeOnly03 tests/safeHaskell/check/pkg01/ImpSafeOnly05 tests/safeHaskell/flags/SafeFlags17 tests/safeHaskell/flags/SafeFlags22 tests/safeHaskell/flags/SafeFlags23 tests/safeHaskell/ghci/p11 tests/safeHaskell/ghci/p12 tests/safeHaskell/ghci/p17 tests/safeHaskell/ghci/p3 tests/safeHaskell/safeInfered/UnsafeInfered01 tests/safeHaskell/safeInfered/UnsafeInfered02 tests/safeHaskell/safeInfered/UnsafeInfered02 tests/safeHaskell/safeInfered/UnsafeInfered03 tests/safeHaskell/safeInfered/UnsafeInfered05 tests/safeHaskell/safeInfered/UnsafeInfered06 tests/safeHaskell/safeInfered/UnsafeInfered09 tests/safeHaskell/safeInfered/UnsafeInfered10 tests/safeHaskell/safeInfered/UnsafeInfered11 tests/safeHaskell/safeInfered/UnsafeWarn01 tests/safeHaskell/safeInfered/UnsafeWarn03 tests/safeHaskell/safeInfered/UnsafeWarn04 tests/safeHaskell/safeInfered/UnsafeWarn05 tests/safeHaskell/unsafeLibs/BadImport01 tests/safeHaskell/unsafeLibs/BadImport06 tests/safeHaskell/unsafeLibs/BadImport07 tests/safeHaskell/unsafeLibs/BadImport08 tests/safeHaskell/unsafeLibs/BadImport09 tests/safeHaskell/unsafeLibs/Dep05 tests/safeHaskell/unsafeLibs/Dep06 tests/safeHaskell/unsafeLibs/Dep07 tests/safeHaskell/unsafeLibs/Dep08 tests/safeHaskell/unsafeLibs/Dep09 tests/safeHaskell/unsafeLibs/Dep10 -} DriverCannotImportFromUntrustedPackage :: !UnitState -> !Module -> DriverMessage DriverRedirectedNoMain :: !ModuleName -> DriverMessage DriverHomePackagesNotClosed :: ![UnitId] -> DriverMessage DriverInterfaceError :: !IfaceMessage -> DriverMessage -- TODO: Add structure messages rather than a String DriverInconsistentDynFlags :: String -> DriverMessage DriverSafeHaskellIgnoredExtension :: !LangExt.Extension -> DriverMessage DriverPackageTrustIgnored :: DriverMessage DriverUnrecognisedFlag :: String -> DriverMessage DriverDeprecatedFlag :: String -> String -> DriverMessage {-| DriverModuleGraphCycle is an error that occurs if the module graph contains cyclic imports. Test cases: tests/backpack/should_fail/bkpfail51 tests/driver/T20459 tests/driver/T24196/T24196 tests/driver/T24275/T24275 -} DriverModuleGraphCycle :: [ModuleGraphNode] -> DriverMessage {- | DriverInstantiationNodeInDependencyGeneration is an error that occurs if the module graph used for dependency generation contains Backpack 'InstantiationNode's. -} DriverInstantiationNodeInDependencyGeneration :: InstantiatedUnit -> DriverMessage {-| DriverNoConfiguredLLVMToolchain is an error that occurs if there is no LLVM toolchain configured but -fllvm is passed as an option to the compiler. Test cases: None. -} DriverNoConfiguredLLVMToolchain :: DriverMessage deriving instance Generic DriverMessage data DriverMessageOpts = DriverMessageOpts { psDiagnosticOpts :: DiagnosticOpts PsMessage , ifaceDiagnosticOpts :: DiagnosticOpts IfaceMessage } -- | Checks if we are building a cabal package by consulting the 'DynFlags'. checkBuildingCabalPackage :: DynFlags -> BuildingCabalPackage checkBuildingCabalPackage dflags = if gopt Opt_BuildingCabalPackage dflags then YesBuildingCabalPackage else NoBuildingCabalPackage ghc-lib-parser-9.12.2.20250421/compiler/GHC/Driver/Flags.hs0000644000000000000000000016272607346545000020547 0ustar0000000000000000{-# LANGUAGE LambdaCase #-} module GHC.Driver.Flags ( DumpFlag(..) , getDumpFlagFrom , enabledIfVerbose , GeneralFlag(..) , Language(..) , defaultLanguage , optimisationFlags , codeGenFlags -- * Warnings , WarningGroup(..) , warningGroupName , warningGroupFlags , warningGroupIncludesExtendedWarnings , WarningFlag(..) , warnFlagNames , warningGroups , warningHierarchies , smallestWarningGroups , smallestWarningGroupsForCategory , standardWarnings , minusWOpts , minusWallOpts , minusWeverythingOpts , minusWcompatOpts , unusedBindsFlags , TurnOnFlag , turnOn , turnOff , impliedXFlags , validHoleFitsImpliedGFlags , impliedGFlags , impliedOffGFlags , glasgowExtsFlags , ExtensionDeprecation(..) , Deprecation(..) , extensionDeprecation , deprecation , extensionNames , extensionName ) where import GHC.Prelude import GHC.Utils.Outputable import GHC.Utils.Binary import GHC.Data.EnumSet as EnumSet import Control.DeepSeq import Control.Monad (guard) import Data.List.NonEmpty (NonEmpty(..)) import Data.Maybe (fromMaybe,mapMaybe) import qualified GHC.LanguageExtensions as LangExt data Language = Haskell98 | Haskell2010 | GHC2021 | GHC2024 deriving (Eq, Enum, Show, Bounded) -- | The default Language is used if one is not specified explicitly, by both -- GHC and GHCi. defaultLanguage :: Language defaultLanguage = GHC2021 instance Outputable Language where ppr = text . show instance Binary Language where put_ bh = put_ bh . fromEnum get bh = toEnum <$> get bh instance NFData Language where rnf x = x `seq` () type TurnOnFlag = Bool -- True <=> we are turning the flag on -- False <=> we are turning the flag off turnOn :: TurnOnFlag; turnOn = True turnOff :: TurnOnFlag; turnOff = False data Deprecation = NotDeprecated | Deprecated deriving (Eq, Ord) data ExtensionDeprecation = ExtensionNotDeprecated | ExtensionDeprecatedFor [LangExt.Extension] | ExtensionFlagDeprecatedCond TurnOnFlag String | ExtensionFlagDeprecated String deriving Eq -- | Always returns 'Deprecated' even when the flag is -- only conditionally deprecated. deprecation :: ExtensionDeprecation -> Deprecation deprecation ExtensionNotDeprecated = NotDeprecated deprecation _ = Deprecated extensionDeprecation :: LangExt.Extension -> ExtensionDeprecation extensionDeprecation = \case LangExt.TypeInType -> ExtensionDeprecatedFor [LangExt.DataKinds, LangExt.PolyKinds] LangExt.NullaryTypeClasses -> ExtensionDeprecatedFor [LangExt.MultiParamTypeClasses] LangExt.RelaxedPolyRec -> ExtensionFlagDeprecatedCond turnOff "You can't turn off RelaxedPolyRec any more" LangExt.DatatypeContexts -> ExtensionFlagDeprecatedCond turnOn "It was widely considered a misfeature, and has been removed from the Haskell language." LangExt.AutoDeriveTypeable -> ExtensionFlagDeprecatedCond turnOn "Typeable instances are created automatically for all types since GHC 8.2." LangExt.OverlappingInstances -> ExtensionFlagDeprecated "instead use per-instance pragmas OVERLAPPING/OVERLAPPABLE/OVERLAPS" _ -> ExtensionNotDeprecated extensionName :: LangExt.Extension -> String extensionName = \case LangExt.Cpp -> "CPP" LangExt.OverlappingInstances -> "OverlappingInstances" LangExt.UndecidableInstances -> "UndecidableInstances" LangExt.IncoherentInstances -> "IncoherentInstances" LangExt.UndecidableSuperClasses -> "UndecidableSuperClasses" LangExt.MonomorphismRestriction -> "MonomorphismRestriction" LangExt.MonoLocalBinds -> "MonoLocalBinds" LangExt.DeepSubsumption -> "DeepSubsumption" LangExt.RelaxedPolyRec -> "RelaxedPolyRec" -- Deprecated LangExt.ExtendedDefaultRules -> "ExtendedDefaultRules" -- Use GHC's extended rules for defaulting LangExt.NamedDefaults -> "NamedDefaults" LangExt.ForeignFunctionInterface -> "ForeignFunctionInterface" LangExt.UnliftedFFITypes -> "UnliftedFFITypes" LangExt.InterruptibleFFI -> "InterruptibleFFI" LangExt.CApiFFI -> "CApiFFI" LangExt.GHCForeignImportPrim -> "GHCForeignImportPrim" LangExt.JavaScriptFFI -> "JavaScriptFFI" LangExt.ParallelArrays -> "ParallelArrays" -- Syntactic support for parallel arrays LangExt.Arrows -> "Arrows" -- Arrow-notation syntax LangExt.TemplateHaskell -> "TemplateHaskell" LangExt.TemplateHaskellQuotes -> "TemplateHaskellQuotes" -- subset of TH supported by stage1, no splice LangExt.QualifiedDo -> "QualifiedDo" LangExt.QuasiQuotes -> "QuasiQuotes" LangExt.ImplicitParams -> "ImplicitParams" LangExt.ImplicitPrelude -> "ImplicitPrelude" LangExt.ScopedTypeVariables -> "ScopedTypeVariables" LangExt.AllowAmbiguousTypes -> "AllowAmbiguousTypes" LangExt.UnboxedTuples -> "UnboxedTuples" LangExt.UnboxedSums -> "UnboxedSums" LangExt.UnliftedNewtypes -> "UnliftedNewtypes" LangExt.UnliftedDatatypes -> "UnliftedDatatypes" LangExt.BangPatterns -> "BangPatterns" LangExt.TypeFamilies -> "TypeFamilies" LangExt.TypeFamilyDependencies -> "TypeFamilyDependencies" LangExt.TypeInType -> "TypeInType" -- Deprecated LangExt.OverloadedStrings -> "OverloadedStrings" LangExt.OverloadedLists -> "OverloadedLists" LangExt.NumDecimals -> "NumDecimals" LangExt.OrPatterns -> "OrPatterns" LangExt.DisambiguateRecordFields -> "DisambiguateRecordFields" LangExt.RecordWildCards -> "RecordWildCards" LangExt.NamedFieldPuns -> "NamedFieldPuns" LangExt.ViewPatterns -> "ViewPatterns" LangExt.GADTs -> "GADTs" LangExt.GADTSyntax -> "GADTSyntax" LangExt.NPlusKPatterns -> "NPlusKPatterns" LangExt.DoAndIfThenElse -> "DoAndIfThenElse" LangExt.BlockArguments -> "BlockArguments" LangExt.RebindableSyntax -> "RebindableSyntax" LangExt.ConstraintKinds -> "ConstraintKinds" LangExt.PolyKinds -> "PolyKinds" -- Kind polymorphism LangExt.DataKinds -> "DataKinds" -- Datatype promotion LangExt.TypeData -> "TypeData" -- allow @type data@ definitions LangExt.InstanceSigs -> "InstanceSigs" LangExt.ApplicativeDo -> "ApplicativeDo" LangExt.LinearTypes -> "LinearTypes" LangExt.RequiredTypeArguments -> "RequiredTypeArguments" -- Visible forall (VDQ) in types of terms LangExt.StandaloneDeriving -> "StandaloneDeriving" LangExt.DeriveDataTypeable -> "DeriveDataTypeable" LangExt.AutoDeriveTypeable -> "AutoDeriveTypeable" -- Automatic derivation of Typeable LangExt.DeriveFunctor -> "DeriveFunctor" LangExt.DeriveTraversable -> "DeriveTraversable" LangExt.DeriveFoldable -> "DeriveFoldable" LangExt.DeriveGeneric -> "DeriveGeneric" -- Allow deriving Generic/1 LangExt.DefaultSignatures -> "DefaultSignatures" -- Allow extra signatures for defmeths LangExt.DeriveAnyClass -> "DeriveAnyClass" -- Allow deriving any class LangExt.DeriveLift -> "DeriveLift" -- Allow deriving Lift LangExt.DerivingStrategies -> "DerivingStrategies" LangExt.DerivingVia -> "DerivingVia" -- Derive through equal representation LangExt.TypeSynonymInstances -> "TypeSynonymInstances" LangExt.FlexibleContexts -> "FlexibleContexts" LangExt.FlexibleInstances -> "FlexibleInstances" LangExt.ConstrainedClassMethods -> "ConstrainedClassMethods" LangExt.MultiParamTypeClasses -> "MultiParamTypeClasses" LangExt.NullaryTypeClasses -> "NullaryTypeClasses" LangExt.FunctionalDependencies -> "FunctionalDependencies" LangExt.UnicodeSyntax -> "UnicodeSyntax" LangExt.ExistentialQuantification -> "ExistentialQuantification" LangExt.MagicHash -> "MagicHash" LangExt.EmptyDataDecls -> "EmptyDataDecls" LangExt.KindSignatures -> "KindSignatures" LangExt.RoleAnnotations -> "RoleAnnotations" LangExt.ParallelListComp -> "ParallelListComp" LangExt.TransformListComp -> "TransformListComp" LangExt.MonadComprehensions -> "MonadComprehensions" LangExt.GeneralizedNewtypeDeriving -> "GeneralizedNewtypeDeriving" LangExt.RecursiveDo -> "RecursiveDo" LangExt.PostfixOperators -> "PostfixOperators" LangExt.TupleSections -> "TupleSections" LangExt.PatternGuards -> "PatternGuards" LangExt.LiberalTypeSynonyms -> "LiberalTypeSynonyms" LangExt.RankNTypes -> "RankNTypes" LangExt.ImpredicativeTypes -> "ImpredicativeTypes" LangExt.TypeOperators -> "TypeOperators" LangExt.ExplicitNamespaces -> "ExplicitNamespaces" LangExt.PackageImports -> "PackageImports" LangExt.ExplicitForAll -> "ExplicitForAll" LangExt.AlternativeLayoutRule -> "AlternativeLayoutRule" LangExt.AlternativeLayoutRuleTransitional -> "AlternativeLayoutRuleTransitional" LangExt.DatatypeContexts -> "DatatypeContexts" LangExt.NondecreasingIndentation -> "NondecreasingIndentation" LangExt.RelaxedLayout -> "RelaxedLayout" LangExt.TraditionalRecordSyntax -> "TraditionalRecordSyntax" LangExt.LambdaCase -> "LambdaCase" LangExt.MultiWayIf -> "MultiWayIf" LangExt.BinaryLiterals -> "BinaryLiterals" LangExt.NegativeLiterals -> "NegativeLiterals" LangExt.HexFloatLiterals -> "HexFloatLiterals" LangExt.DuplicateRecordFields -> "DuplicateRecordFields" LangExt.OverloadedLabels -> "OverloadedLabels" LangExt.EmptyCase -> "EmptyCase" LangExt.PatternSynonyms -> "PatternSynonyms" LangExt.PartialTypeSignatures -> "PartialTypeSignatures" LangExt.NamedWildCards -> "NamedWildCards" LangExt.StaticPointers -> "StaticPointers" LangExt.TypeApplications -> "TypeApplications" LangExt.Strict -> "Strict" LangExt.StrictData -> "StrictData" LangExt.EmptyDataDeriving -> "EmptyDataDeriving" LangExt.NumericUnderscores -> "NumericUnderscores" LangExt.QuantifiedConstraints -> "QuantifiedConstraints" LangExt.StarIsType -> "StarIsType" LangExt.ImportQualifiedPost -> "ImportQualifiedPost" LangExt.CUSKs -> "CUSKs" LangExt.StandaloneKindSignatures -> "StandaloneKindSignatures" LangExt.LexicalNegation -> "LexicalNegation" LangExt.FieldSelectors -> "FieldSelectors" LangExt.OverloadedRecordDot -> "OverloadedRecordDot" LangExt.OverloadedRecordUpdate -> "OverloadedRecordUpdate" LangExt.TypeAbstractions -> "TypeAbstractions" LangExt.ExtendedLiterals -> "ExtendedLiterals" LangExt.ListTuplePuns -> "ListTuplePuns" LangExt.MultilineStrings -> "MultilineStrings" -- | Is this extension known by any other names? For example -- -XGeneralizedNewtypeDeriving is accepted extensionAlternateNames :: LangExt.Extension -> [String] extensionAlternateNames = \case LangExt.GeneralizedNewtypeDeriving -> ["GeneralisedNewtypeDeriving"] LangExt.RankNTypes -> ["Rank2Types", "PolymorphicComponents"] _ -> [] extensionDeprecatedNames :: LangExt.Extension -> [String] extensionDeprecatedNames = \case LangExt.RecursiveDo -> ["DoRec"] LangExt.NamedFieldPuns -> ["RecordPuns"] LangExt.ScopedTypeVariables -> ["PatternSignatures"] _ -> [] -- | All the names by which an extension is known. extensionNames :: LangExt.Extension -> [ (ExtensionDeprecation, String) ] extensionNames ext = mk (extensionDeprecation ext) (extensionName ext : extensionAlternateNames ext) ++ mk (ExtensionDeprecatedFor [ext]) (extensionDeprecatedNames ext) where mk depr = map (\name -> (depr, name)) impliedXFlags :: [(LangExt.Extension, TurnOnFlag, LangExt.Extension)] impliedXFlags -- See Note [Updating flag description in the User's Guide] = [ (LangExt.RankNTypes, turnOn, LangExt.ExplicitForAll) , (LangExt.QuantifiedConstraints, turnOn, LangExt.ExplicitForAll) , (LangExt.ScopedTypeVariables, turnOn, LangExt.ExplicitForAll) , (LangExt.LiberalTypeSynonyms, turnOn, LangExt.ExplicitForAll) , (LangExt.ExistentialQuantification, turnOn, LangExt.ExplicitForAll) , (LangExt.FlexibleInstances, turnOn, LangExt.TypeSynonymInstances) , (LangExt.FunctionalDependencies, turnOn, LangExt.MultiParamTypeClasses) , (LangExt.MultiParamTypeClasses, turnOn, LangExt.ConstrainedClassMethods) -- c.f. #7854 , (LangExt.TypeFamilyDependencies, turnOn, LangExt.TypeFamilies) , (LangExt.RebindableSyntax, turnOff, LangExt.ImplicitPrelude) -- NB: turn off! , (LangExt.DerivingVia, turnOn, LangExt.DerivingStrategies) , (LangExt.GADTs, turnOn, LangExt.GADTSyntax) , (LangExt.GADTs, turnOn, LangExt.MonoLocalBinds) , (LangExt.TypeFamilies, turnOn, LangExt.MonoLocalBinds) , (LangExt.TypeFamilies, turnOn, LangExt.KindSignatures) -- Type families use kind signatures , (LangExt.PolyKinds, turnOn, LangExt.KindSignatures) -- Ditto polymorphic kinds -- TypeInType is now just a synonym for a couple of other extensions. , (LangExt.TypeInType, turnOn, LangExt.DataKinds) , (LangExt.TypeInType, turnOn, LangExt.PolyKinds) , (LangExt.TypeInType, turnOn, LangExt.KindSignatures) -- Standalone kind signatures are a replacement for CUSKs. , (LangExt.StandaloneKindSignatures, turnOff, LangExt.CUSKs) -- AutoDeriveTypeable is not very useful without DeriveDataTypeable , (LangExt.AutoDeriveTypeable, turnOn, LangExt.DeriveDataTypeable) -- We turn this on so that we can export associated type -- type synonyms in subordinates (e.g. MyClass(type AssocType)) , (LangExt.TypeFamilies, turnOn, LangExt.ExplicitNamespaces) , (LangExt.TypeOperators, turnOn, LangExt.ExplicitNamespaces) , (LangExt.ImpredicativeTypes, turnOn, LangExt.RankNTypes) -- Record wild-cards implies field disambiguation -- Otherwise if you write (C {..}) you may well get -- stuff like " 'a' not in scope ", which is a bit silly -- if the compiler has just filled in field 'a' of constructor 'C' , (LangExt.RecordWildCards, turnOn, LangExt.DisambiguateRecordFields) , (LangExt.ParallelArrays, turnOn, LangExt.ParallelListComp) , (LangExt.JavaScriptFFI, turnOn, LangExt.InterruptibleFFI) , (LangExt.DeriveTraversable, turnOn, LangExt.DeriveFunctor) , (LangExt.DeriveTraversable, turnOn, LangExt.DeriveFoldable) -- Duplicate record fields require field disambiguation , (LangExt.DuplicateRecordFields, turnOn, LangExt.DisambiguateRecordFields) , (LangExt.TemplateHaskell, turnOn, LangExt.TemplateHaskellQuotes) , (LangExt.Strict, turnOn, LangExt.StrictData) -- Historically only UnboxedTuples was required for unboxed sums to work. -- To avoid breaking code, we make UnboxedTuples imply UnboxedSums. , (LangExt.UnboxedTuples, turnOn, LangExt.UnboxedSums) -- The extensions needed to declare an H98 unlifted data type , (LangExt.UnliftedDatatypes, turnOn, LangExt.DataKinds) , (LangExt.UnliftedDatatypes, turnOn, LangExt.StandaloneKindSignatures) -- See Note [Non-variable pattern bindings aren't linear] in GHC.Tc.Gen.Bind , (LangExt.LinearTypes, turnOn, LangExt.MonoLocalBinds) ] validHoleFitsImpliedGFlags :: [(GeneralFlag, TurnOnFlag, GeneralFlag)] validHoleFitsImpliedGFlags = [ (Opt_UnclutterValidHoleFits, turnOff, Opt_ShowTypeAppOfHoleFits) , (Opt_UnclutterValidHoleFits, turnOff, Opt_ShowTypeAppVarsOfHoleFits) , (Opt_UnclutterValidHoleFits, turnOff, Opt_ShowDocsOfHoleFits) , (Opt_ShowTypeAppVarsOfHoleFits, turnOff, Opt_ShowTypeAppOfHoleFits) , (Opt_UnclutterValidHoleFits, turnOff, Opt_ShowProvOfHoleFits) ] -- General flags that are switched on/off when other general flags are switched -- on impliedGFlags :: [(GeneralFlag, TurnOnFlag, GeneralFlag)] impliedGFlags = [(Opt_DeferTypeErrors, turnOn, Opt_DeferTypedHoles) ,(Opt_DeferTypeErrors, turnOn, Opt_DeferOutOfScopeVariables) ,(Opt_DoLinearCoreLinting, turnOn, Opt_DoCoreLinting) ,(Opt_Strictness, turnOn, Opt_WorkerWrapper) ,(Opt_WriteIfSimplifiedCore, turnOn, Opt_WriteInterface) ,(Opt_ByteCodeAndObjectCode, turnOn, Opt_WriteIfSimplifiedCore) ,(Opt_InfoTableMap, turnOn, Opt_InfoTableMapWithStack) ,(Opt_InfoTableMap, turnOn, Opt_InfoTableMapWithFallback) ] ++ validHoleFitsImpliedGFlags -- General flags that are switched on/off when other general flags are switched -- off impliedOffGFlags :: [(GeneralFlag, TurnOnFlag, GeneralFlag)] impliedOffGFlags = [(Opt_Strictness, turnOff, Opt_WorkerWrapper)] -- Please keep what_glasgow_exts_does.rst up to date with this list glasgowExtsFlags :: [LangExt.Extension] glasgowExtsFlags = [ LangExt.ConstrainedClassMethods , LangExt.DeriveDataTypeable , LangExt.DeriveFoldable , LangExt.DeriveFunctor , LangExt.DeriveGeneric , LangExt.DeriveTraversable , LangExt.EmptyDataDecls , LangExt.ExistentialQuantification , LangExt.ExplicitNamespaces , LangExt.FlexibleContexts , LangExt.FlexibleInstances , LangExt.ForeignFunctionInterface , LangExt.FunctionalDependencies , LangExt.GeneralizedNewtypeDeriving , LangExt.ImplicitParams , LangExt.KindSignatures , LangExt.LiberalTypeSynonyms , LangExt.MagicHash , LangExt.MultiParamTypeClasses , LangExt.ParallelListComp , LangExt.PatternGuards , LangExt.PostfixOperators , LangExt.RankNTypes , LangExt.RecursiveDo , LangExt.ScopedTypeVariables , LangExt.StandaloneDeriving , LangExt.TypeOperators , LangExt.TypeSynonymInstances , LangExt.UnboxedTuples , LangExt.UnicodeSyntax , LangExt.UnliftedFFITypes ] -- | Debugging flags data DumpFlag -- See Note [Updating flag description in the User's Guide] in GHC.Driver.Session -- debugging flags = Opt_D_dump_cmm | Opt_D_dump_cmm_from_stg | Opt_D_dump_cmm_raw | Opt_D_dump_cmm_verbose_by_proc -- All of the cmm subflags (there are a lot!) automatically -- enabled if you run -ddump-cmm-verbose-by-proc -- Each flag corresponds to exact stage of Cmm pipeline. | Opt_D_dump_cmm_verbose -- same as -ddump-cmm-verbose-by-proc but writes each stage -- to a separate file (if used with -ddump-to-file) | Opt_D_dump_cmm_cfg | Opt_D_dump_cmm_cbe | Opt_D_dump_cmm_switch | Opt_D_dump_cmm_proc | Opt_D_dump_cmm_sp | Opt_D_dump_cmm_sink | Opt_D_dump_cmm_caf | Opt_D_dump_cmm_procmap | Opt_D_dump_cmm_split | Opt_D_dump_cmm_info | Opt_D_dump_cmm_cps | Opt_D_dump_cmm_thread_sanitizer -- end cmm subflags | Opt_D_dump_cfg_weights -- ^ Dump the cfg used for block layout. | Opt_D_dump_asm | Opt_D_dump_asm_native | Opt_D_dump_asm_liveness | Opt_D_dump_asm_regalloc | Opt_D_dump_asm_regalloc_stages | Opt_D_dump_asm_conflicts | Opt_D_dump_asm_stats | Opt_D_dump_c_backend | Opt_D_dump_llvm | Opt_D_dump_js | Opt_D_dump_core_stats | Opt_D_dump_deriv | Opt_D_dump_ds | Opt_D_dump_ds_preopt | Opt_D_dump_foreign | Opt_D_dump_inlinings | Opt_D_dump_verbose_inlinings | Opt_D_dump_rule_firings | Opt_D_dump_rule_rewrites | Opt_D_dump_simpl_trace | Opt_D_dump_occur_anal | Opt_D_dump_parsed | Opt_D_dump_parsed_ast | Opt_D_dump_rn | Opt_D_dump_rn_ast | Opt_D_dump_simpl | Opt_D_dump_simpl_iterations | Opt_D_dump_spec | Opt_D_dump_spec_constr | Opt_D_dump_prep | Opt_D_dump_late_cc | Opt_D_dump_stg_from_core -- ^ Initial STG (CoreToStg output) | Opt_D_dump_stg_unarised -- ^ STG after unarise | Opt_D_dump_stg_cg -- ^ STG (after stg2stg) | Opt_D_dump_stg_tags -- ^ Result of tag inference analysis. | Opt_D_dump_stg_final -- ^ Final STG (before cmm gen) | Opt_D_dump_call_arity | Opt_D_dump_exitify | Opt_D_dump_dmdanal | Opt_D_dump_dmd_signatures | Opt_D_dump_cpranal | Opt_D_dump_cpr_signatures | Opt_D_dump_tc | Opt_D_dump_tc_ast | Opt_D_dump_hie | Opt_D_dump_types | Opt_D_dump_rules | Opt_D_dump_cse | Opt_D_dump_float_out | Opt_D_dump_float_in | Opt_D_dump_liberate_case | Opt_D_dump_static_argument_transformation | Opt_D_dump_worker_wrapper | Opt_D_dump_rn_trace | Opt_D_dump_rn_stats | Opt_D_dump_opt_cmm | Opt_D_dump_simpl_stats | Opt_D_dump_cs_trace -- Constraint solver in type checker | Opt_D_dump_tc_trace | Opt_D_dump_ec_trace -- Pattern match exhaustiveness checker | Opt_D_dump_if_trace | Opt_D_dump_splices | Opt_D_th_dec_file | Opt_D_dump_BCOs | Opt_D_dump_ticked | Opt_D_dump_rtti | Opt_D_source_stats | Opt_D_verbose_stg2stg | Opt_D_dump_hi | Opt_D_dump_hi_diffs | Opt_D_dump_mod_cycles | Opt_D_dump_mod_map | Opt_D_dump_timings | Opt_D_dump_view_pattern_commoning | Opt_D_verbose_core2core | Opt_D_dump_debug | Opt_D_dump_json | Opt_D_ppr_debug | Opt_D_no_debug_output | Opt_D_dump_faststrings | Opt_D_faststring_stats | Opt_D_ipe_stats deriving (Eq, Show, Enum) -- | Helper function to query whether a given `DumpFlag` is enabled or not. getDumpFlagFrom :: (a -> Int) -- ^ Getter for verbosity setting -> (a -> EnumSet DumpFlag) -- ^ Getter for the set of enabled dump flags -> DumpFlag -> a -> Bool getDumpFlagFrom getVerbosity getFlags f x = (f `EnumSet.member` getFlags x) || (getVerbosity x >= 4 && enabledIfVerbose f) -- | Is the flag implicitly enabled when the verbosity is high enough? enabledIfVerbose :: DumpFlag -> Bool enabledIfVerbose Opt_D_dump_tc_trace = False enabledIfVerbose Opt_D_dump_rn_trace = False enabledIfVerbose Opt_D_dump_cs_trace = False enabledIfVerbose Opt_D_dump_if_trace = False enabledIfVerbose Opt_D_dump_tc = False enabledIfVerbose Opt_D_dump_rn = False enabledIfVerbose Opt_D_dump_rn_stats = False enabledIfVerbose Opt_D_dump_hi_diffs = False enabledIfVerbose Opt_D_verbose_core2core = False enabledIfVerbose Opt_D_verbose_stg2stg = False enabledIfVerbose Opt_D_dump_splices = False enabledIfVerbose Opt_D_th_dec_file = False enabledIfVerbose Opt_D_dump_rule_firings = False enabledIfVerbose Opt_D_dump_rule_rewrites = False enabledIfVerbose Opt_D_dump_simpl_trace = False enabledIfVerbose Opt_D_dump_rtti = False enabledIfVerbose Opt_D_dump_inlinings = False enabledIfVerbose Opt_D_dump_verbose_inlinings = False enabledIfVerbose Opt_D_dump_core_stats = False enabledIfVerbose Opt_D_dump_asm_stats = False enabledIfVerbose Opt_D_dump_types = False enabledIfVerbose Opt_D_dump_simpl_iterations = False enabledIfVerbose Opt_D_dump_ticked = False enabledIfVerbose Opt_D_dump_view_pattern_commoning = False enabledIfVerbose Opt_D_dump_mod_cycles = False enabledIfVerbose Opt_D_dump_mod_map = False enabledIfVerbose Opt_D_dump_ec_trace = False enabledIfVerbose _ = True -- | Enumerates the simple on-or-off dynamic flags data GeneralFlag -- See Note [Updating flag description in the User's Guide] in GHC.Driver.Session = Opt_DumpToFile -- ^ Append dump output to files instead of stdout. | Opt_DumpWithWays -- ^ Use foo.ways. instead of foo. | Opt_D_dump_minimal_imports | Opt_DoCoreLinting | Opt_DoLinearCoreLinting | Opt_DoStgLinting | Opt_DoCmmLinting | Opt_DoAsmLinting | Opt_DoAnnotationLinting | Opt_DoBoundsChecking | Opt_NoLlvmMangler -- hidden flag | Opt_FastLlvm -- hidden flag | Opt_NoTypeableBinds | Opt_DistinctConstructorTables | Opt_InfoTableMap | Opt_InfoTableMapWithFallback | Opt_InfoTableMapWithStack | Opt_WarnIsError -- -Werror; makes warnings fatal | Opt_ShowWarnGroups -- Show the group a warning belongs to | Opt_HideSourcePaths -- Hide module source/object paths | Opt_PrintExplicitForalls | Opt_PrintExplicitKinds | Opt_PrintExplicitCoercions | Opt_PrintExplicitRuntimeReps | Opt_PrintEqualityRelations | Opt_PrintAxiomIncomps | Opt_PrintUnicodeSyntax | Opt_PrintExpandedSynonyms | Opt_PrintPotentialInstances | Opt_PrintRedundantPromotionTicks | Opt_PrintTypecheckerElaboration -- optimisation opts | Opt_CallArity | Opt_Exitification | Opt_Strictness | Opt_LateDmdAnal -- #6087 | Opt_KillAbsence | Opt_KillOneShot | Opt_FullLaziness | Opt_FloatIn | Opt_LocalFloatOut -- ^ Enable floating out of let-bindings in the -- simplifier | Opt_LocalFloatOutTopLevel -- ^ Enable floating out of let-bindings at the -- top level in the simplifier -- N.B. See Note [RHS Floating] | Opt_LateSpecialise | Opt_Specialise | Opt_SpecialiseAggressively | Opt_CrossModuleSpecialise | Opt_PolymorphicSpecialisation | Opt_InlineGenerics | Opt_InlineGenericsAggressively | Opt_StaticArgumentTransformation | Opt_CSE | Opt_StgCSE | Opt_StgLiftLams | Opt_LiberateCase | Opt_SpecConstr | Opt_SpecConstrKeen | Opt_SpecialiseIncoherents | Opt_DoLambdaEtaExpansion | Opt_DoCleverArgEtaExpansion -- See Note [Eta expansion of arguments in CorePrep] | Opt_IgnoreAsserts | Opt_DoEtaReduction | Opt_CaseMerge | Opt_CaseFolding -- Constant folding through case-expressions | Opt_UnboxStrictFields | Opt_UnboxSmallStrictFields | Opt_DictsCheap | Opt_EnableRewriteRules -- Apply rewrite rules during simplification | Opt_EnableThSpliceWarnings -- Enable warnings for TH splices | Opt_RegsGraph -- do graph coloring register allocation | Opt_RegsIterative -- do iterative coalescing graph coloring register allocation | Opt_PedanticBottoms -- Be picky about how we treat bottom | Opt_LlvmFillUndefWithGarbage -- Testing for undef bugs (hidden flag) | Opt_IrrefutableTuples | Opt_CmmSink | Opt_CmmStaticPred | Opt_CmmElimCommonBlocks | Opt_CmmControlFlow | Opt_AsmShortcutting | Opt_OmitYields | Opt_FunToThunk -- deprecated | Opt_DictsStrict -- be strict in argument dictionaries | Opt_DmdTxDictSel -- ^ deprecated, no effect and behaviour is now default. -- Allowed switching of a special demand transformer for dictionary selectors | Opt_Loopification -- See Note [Self-recursive tail calls] | Opt_CfgBlocklayout -- ^ Use the cfg based block layout algorithm. | Opt_WeightlessBlocklayout -- ^ Layout based on last instruction per block. | Opt_CprAnal | Opt_WorkerWrapper | Opt_WorkerWrapperUnlift -- ^ Do W/W split for unlifting even if we won't unbox anything. | Opt_SolveConstantDicts | Opt_AlignmentSanitisation | Opt_CatchNonexhaustiveCases | Opt_NumConstantFolding | Opt_CoreConstantFolding | Opt_FastPAPCalls -- #6084 | Opt_SpecEval | Opt_SpecEvalDictFun -- See Note [Controlling Speculative Evaluation] -- Inference flags | Opt_DoTagInferenceChecks -- PreInlining is on by default. The option is there just to see how -- bad things get if you turn it off! | Opt_SimplPreInlining -- Interface files | Opt_IgnoreInterfacePragmas | Opt_OmitInterfacePragmas | Opt_ExposeAllUnfoldings | Opt_ExposeOverloadedUnfoldings | Opt_KeepAutoRules -- ^Keep auto-generated rules even if they seem to have become useless | Opt_WriteInterface -- forces .hi files to be written even with -fno-code | Opt_WriteHie -- generate .hie files -- JavaScript opts | Opt_DisableJsMinifier -- ^ render JavaScript pretty-printed instead of minified (compacted) | Opt_DisableJsCsources -- ^ don't link C sources (compiled to JS) with Haskell code (compiled to JS) -- profiling opts | Opt_AutoSccsOnIndividualCafs | Opt_ProfCountEntries | Opt_ProfLateInlineCcs | Opt_ProfLateCcs | Opt_ProfLateOverloadedCcs | Opt_ProfLateoverloadedCallsCCs | Opt_ProfManualCcs -- ^ Ignore manual SCC annotations -- misc opts | Opt_Pp | Opt_ForceRecomp | Opt_IgnoreOptimChanges | Opt_IgnoreHpcChanges | Opt_ExcessPrecision | Opt_EagerBlackHoling | Opt_OrigThunkInfo | Opt_NoHsMain | Opt_SplitSections | Opt_StgStats | Opt_HideAllPackages | Opt_HideAllPluginPackages | Opt_PrintBindResult | Opt_Haddock | Opt_HaddockOptions | Opt_BreakOnException | Opt_BreakOnError | Opt_PrintEvldWithShow | Opt_PrintBindContents | Opt_GenManifest | Opt_EmbedManifest | Opt_SharedImplib | Opt_BuildingCabalPackage | Opt_IgnoreDotGhci | Opt_GhciSandbox | Opt_InsertBreakpoints | Opt_GhciHistory | Opt_GhciLeakCheck | Opt_ValidateHie | Opt_LocalGhciHistory | Opt_NoIt | Opt_HelpfulErrors | Opt_DeferTypeErrors -- Since 7.6 | Opt_DeferTypedHoles -- Since 7.10 | Opt_DeferOutOfScopeVariables | Opt_PIC -- ^ @-fPIC@ | Opt_PIE -- ^ @-fPIE@ | Opt_PICExecutable -- ^ @-pie@ | Opt_ExternalDynamicRefs | Opt_Ticky | Opt_Ticky_Allocd | Opt_Ticky_LNE | Opt_Ticky_Dyn_Thunk | Opt_Ticky_Tag | Opt_Ticky_AP -- ^ Use regular thunks even when we could use std ap thunks in order to get entry counts | Opt_CmmThreadSanitizer | Opt_RPath | Opt_RelativeDynlibPaths | Opt_CompactUnwind -- ^ @-fcompact-unwind@ | Opt_Hpc | Opt_FamAppCache | Opt_ExternalInterpreter | Opt_OptimalApplicativeDo | Opt_VersionMacros | Opt_WholeArchiveHsLibs -- copy all libs into a single folder prior to linking binaries -- this should alleviate the excessive command line limit restrictions -- on windows, by only requiring a single -L argument instead of -- one for each dependency. At the time of this writing, gcc -- forwards all -L flags to the collect2 command without using a -- response file and as such breaking apart. | Opt_SingleLibFolder | Opt_ExposeInternalSymbols | Opt_KeepCAFs | Opt_KeepGoing | Opt_ByteCode | Opt_ByteCodeAndObjectCode | Opt_UnoptimizedCoreForInterpreter | Opt_LinkRts -- output style opts | Opt_ErrorSpans -- Include full span info in error messages, -- instead of just the start position. | Opt_DeferDiagnostics | Opt_DiagnosticsAsJSON -- ^ Dump diagnostics as JSON | Opt_DiagnosticsShowCaret -- Show snippets of offending code | Opt_PprCaseAsLet | Opt_PprShowTicks | Opt_ShowHoleConstraints -- Options relating to the display of valid hole fits -- when generating an error message for a typed hole -- See Note [Valid hole fits include ...] in GHC.Tc.Errors.Hole | Opt_ShowValidHoleFits | Opt_SortValidHoleFits | Opt_SortBySizeHoleFits | Opt_SortBySubsumHoleFits | Opt_AbstractRefHoleFits | Opt_UnclutterValidHoleFits | Opt_ShowTypeAppOfHoleFits | Opt_ShowTypeAppVarsOfHoleFits | Opt_ShowDocsOfHoleFits | Opt_ShowTypeOfHoleFits | Opt_ShowProvOfHoleFits | Opt_ShowMatchesOfHoleFits | Opt_ShowLoadedModules | Opt_HexWordLiterals -- See Note [Print Hexadecimal Literals] -- Suppress a coercions inner structure, replacing it with '...' | Opt_SuppressCoercions -- Suppress the type of a coercion as well | Opt_SuppressCoercionTypes | Opt_SuppressVarKinds -- Suppress module id prefixes on variables. | Opt_SuppressModulePrefixes -- Suppress type applications. | Opt_SuppressTypeApplications -- Suppress info such as arity and unfoldings on identifiers. | Opt_SuppressIdInfo -- Suppress separate type signatures in core, but leave types on -- lambda bound vars | Opt_SuppressUnfoldings -- Suppress the details of even stable unfoldings | Opt_SuppressTypeSignatures -- Suppress unique ids on variables. -- Except for uniques, as some simplifier phases introduce new -- variables that have otherwise identical names. | Opt_SuppressUniques | Opt_SuppressStgExts | Opt_SuppressStgReps | Opt_SuppressTicks -- Replaces Opt_PprShowTicks | Opt_SuppressTimestamps -- ^ Suppress timestamps in dumps | Opt_SuppressCoreSizes -- ^ Suppress per binding Core size stats in dumps -- Error message suppression | Opt_ShowErrorContext -- Object code determinism | Opt_ObjectDeterminism -- temporary flags | Opt_AutoLinkPackages | Opt_ImplicitImportQualified -- keeping stuff | Opt_KeepHscppFiles | Opt_KeepHiDiffs | Opt_KeepHcFiles | Opt_KeepSFiles | Opt_KeepTmpFiles | Opt_KeepRawTokenStream | Opt_KeepLlvmFiles | Opt_KeepHiFiles | Opt_KeepOFiles | Opt_BuildDynamicToo | Opt_WriteIfSimplifiedCore | Opt_UseBytecodeRatherThanObjects -- safe haskell flags | Opt_DistrustAllPackages | Opt_PackageTrust | Opt_PluginTrustworthy | Opt_G_NoStateHack | Opt_G_NoOptCoercion deriving (Eq, Show, Enum) -- | The set of flags which affect optimisation for the purposes of -- recompilation avoidance. Specifically, these include flags which -- affect code generation but not the semantics of the program. -- -- See Note [Ignoring some flag changes] in GHC.Iface.Recomp.Flags) optimisationFlags :: EnumSet GeneralFlag optimisationFlags = EnumSet.fromList [ Opt_CallArity , Opt_Strictness , Opt_LateDmdAnal , Opt_KillAbsence , Opt_KillOneShot , Opt_FullLaziness , Opt_FloatIn , Opt_LateSpecialise , Opt_Specialise , Opt_SpecialiseAggressively , Opt_CrossModuleSpecialise , Opt_StaticArgumentTransformation , Opt_CSE , Opt_StgCSE , Opt_StgLiftLams , Opt_LiberateCase , Opt_SpecConstr , Opt_SpecConstrKeen , Opt_DoLambdaEtaExpansion , Opt_IgnoreAsserts , Opt_DoEtaReduction , Opt_CaseMerge , Opt_CaseFolding , Opt_UnboxStrictFields , Opt_UnboxSmallStrictFields , Opt_DictsCheap , Opt_EnableRewriteRules , Opt_RegsGraph , Opt_RegsIterative , Opt_IrrefutableTuples , Opt_CmmSink , Opt_CmmElimCommonBlocks , Opt_AsmShortcutting , Opt_FunToThunk , Opt_DmdTxDictSel , Opt_Loopification , Opt_CfgBlocklayout , Opt_WeightlessBlocklayout , Opt_CprAnal , Opt_WorkerWrapper , Opt_WorkerWrapperUnlift , Opt_SolveConstantDicts , Opt_SpecEval , Opt_SpecEvalDictFun ] -- | The set of flags which affect code generation and can change a program's -- runtime behavior (other than performance). These include flags which affect: -- -- * user visible debugging information (e.g. info table provenance) -- * the ability to catch runtime errors (e.g. -fignore-asserts) -- * the runtime result of the program (e.g. -fomit-yields) -- * which code or interface file declarations are emitted -- -- We also considered placing flags which affect asympototic space behavior -- (e.g. -ffull-laziness) however this would mean that changing optimisation -- levels would trigger recompilation even with -fignore-optim-changes, -- regressing #13604. -- -- Also, arguably Opt_IgnoreAsserts should be here as well; however, we place -- it instead in 'optimisationFlags' since it is implied by @-O[12]@ and -- therefore would also break #13604. -- -- See #23369. codeGenFlags :: EnumSet GeneralFlag codeGenFlags = EnumSet.fromList [ -- Flags that affect runtime result Opt_EagerBlackHoling , Opt_ExcessPrecision , Opt_DictsStrict , Opt_PedanticBottoms , Opt_OmitYields -- Flags that affect generated code , Opt_ExposeAllUnfoldings , Opt_ExposeOverloadedUnfoldings , Opt_NoTypeableBinds , Opt_ObjectDeterminism , Opt_Haddock -- Flags that affect catching of runtime errors , Opt_CatchNonexhaustiveCases , Opt_LlvmFillUndefWithGarbage , Opt_DoTagInferenceChecks -- Flags that affect debugging information , Opt_DistinctConstructorTables , Opt_InfoTableMap , Opt_InfoTableMapWithStack , Opt_InfoTableMapWithFallback , Opt_OrigThunkInfo ] data WarningFlag = -- See Note [Updating flag description in the User's Guide] in GHC.Driver.Session Opt_WarnDuplicateExports | Opt_WarnDuplicateConstraints | Opt_WarnRedundantConstraints | Opt_WarnHiShadows | Opt_WarnImplicitPrelude | Opt_WarnIncompletePatterns | Opt_WarnIncompleteUniPatterns | Opt_WarnIncompletePatternsRecUpd | Opt_WarnOverflowedLiterals | Opt_WarnEmptyEnumerations | Opt_WarnMissingFields | Opt_WarnMissingImportList | Opt_WarnMissingMethods | Opt_WarnMissingSignatures | Opt_WarnMissingLocalSignatures | Opt_WarnNameShadowing | Opt_WarnOverlappingPatterns | Opt_WarnTypeDefaults | Opt_WarnMonomorphism | Opt_WarnUnusedTopBinds | Opt_WarnUnusedLocalBinds | Opt_WarnUnusedPatternBinds | Opt_WarnUnusedImports | Opt_WarnUnusedMatches | Opt_WarnUnusedTypePatterns | Opt_WarnUnusedForalls | Opt_WarnUnusedRecordWildcards | Opt_WarnRedundantBangPatterns | Opt_WarnRedundantRecordWildcards | Opt_WarnDeprecatedFlags | Opt_WarnMissingMonadFailInstances -- since 8.0, has no effect since 8.8 | Opt_WarnSemigroup -- since 8.0, has no effect since 9.8 | Opt_WarnDodgyExports | Opt_WarnDodgyImports | Opt_WarnOrphans | Opt_WarnAutoOrphans | Opt_WarnIdentities | Opt_WarnTabs | Opt_WarnUnrecognisedPragmas | Opt_WarnMisplacedPragmas | Opt_WarnDodgyForeignImports | Opt_WarnUnusedDoBind | Opt_WarnWrongDoBind | Opt_WarnAlternativeLayoutRuleTransitional | Opt_WarnUnsafe | Opt_WarnSafe | Opt_WarnTrustworthySafe | Opt_WarnMissedSpecs | Opt_WarnAllMissedSpecs | Opt_WarnUnsupportedCallingConventions | Opt_WarnUnsupportedLlvmVersion | Opt_WarnMissedExtraSharedLib | Opt_WarnInlineRuleShadowing | Opt_WarnTypedHoles | Opt_WarnPartialTypeSignatures | Opt_WarnMissingExportedSignatures | Opt_WarnUntickedPromotedConstructors | Opt_WarnDerivingTypeable | Opt_WarnDeferredTypeErrors | Opt_WarnDeferredOutOfScopeVariables | Opt_WarnNonCanonicalMonadInstances -- since 8.0 | Opt_WarnNonCanonicalMonadFailInstances -- since 8.0, removed 8.8 | Opt_WarnNonCanonicalMonoidInstances -- since 8.0 | Opt_WarnMissingPatternSynonymSignatures -- since 8.0 | Opt_WarnUnrecognisedWarningFlags -- since 8.0 | Opt_WarnSimplifiableClassConstraints -- Since 8.2 | Opt_WarnCPPUndef -- Since 8.2 | Opt_WarnUnbangedStrictPatterns -- Since 8.2 | Opt_WarnMissingHomeModules -- Since 8.2 | Opt_WarnPartialFields -- Since 8.4 | Opt_WarnMissingExportList | Opt_WarnInaccessibleCode | Opt_WarnStarIsType -- Since 8.6 | Opt_WarnStarBinder -- Since 8.6 | Opt_WarnImplicitKindVars -- Since 8.6 | Opt_WarnSpaceAfterBang | Opt_WarnMissingDerivingStrategies -- Since 8.8 | Opt_WarnPrepositiveQualifiedModule -- Since 8.10 | Opt_WarnUnusedPackages -- Since 8.10 | Opt_WarnInferredSafeImports -- Since 8.10 | Opt_WarnMissingSafeHaskellMode -- Since 8.10 | Opt_WarnCompatUnqualifiedImports -- Since 8.10 | Opt_WarnDerivingDefaults | Opt_WarnInvalidHaddock -- Since 9.0 | Opt_WarnOperatorWhitespaceExtConflict -- Since 9.2 | Opt_WarnOperatorWhitespace -- Since 9.2 | Opt_WarnAmbiguousFields -- Since 9.2 | Opt_WarnImplicitLift -- Since 9.2 | Opt_WarnMissingKindSignatures -- Since 9.2 | Opt_WarnMissingPolyKindSignatures -- Since 9.8 | Opt_WarnMissingExportedPatternSynonymSignatures -- since 9.2 | Opt_WarnRedundantStrictnessFlags -- Since 9.4 | Opt_WarnForallIdentifier -- Since 9.4 | Opt_WarnUnicodeBidirectionalFormatCharacters -- Since 9.0.2 | Opt_WarnGADTMonoLocalBinds -- Since 9.4 | Opt_WarnTypeEqualityOutOfScope -- Since 9.4 | Opt_WarnTypeEqualityRequiresOperators -- Since 9.4 | Opt_WarnLoopySuperclassSolve -- Since 9.6, has no effect since 9.10 | Opt_WarnTermVariableCapture -- Since 9.8 | Opt_WarnMissingRoleAnnotations -- Since 9.8 | Opt_WarnImplicitRhsQuantification -- Since 9.8 | Opt_WarnIncompleteExportWarnings -- Since 9.8 | Opt_WarnIncompleteRecordSelectors -- Since 9.10 | Opt_WarnBadlyStagedTypes -- Since 9.10 | Opt_WarnInconsistentFlags -- Since 9.8 | Opt_WarnDataKindsTC -- Since 9.10 | Opt_WarnDeprecatedTypeAbstractions -- Since 9.10 | Opt_WarnDefaultedExceptionContext -- Since 9.10 | Opt_WarnViewPatternSignatures -- Since 9.12 deriving (Eq, Ord, Show, Enum, Bounded) -- | Return the names of a WarningFlag -- -- One flag may have several names because of US/UK spelling. The first one is -- the "preferred one" that will be displayed in warning messages. warnFlagNames :: WarningFlag -> NonEmpty String warnFlagNames wflag = case wflag of Opt_WarnAlternativeLayoutRuleTransitional -> "alternative-layout-rule-transitional" :| [] Opt_WarnAmbiguousFields -> "ambiguous-fields" :| [] Opt_WarnAutoOrphans -> "auto-orphans" :| [] Opt_WarnTermVariableCapture -> "term-variable-capture" :| [] Opt_WarnCPPUndef -> "cpp-undef" :| [] Opt_WarnUnbangedStrictPatterns -> "unbanged-strict-patterns" :| [] Opt_WarnDeferredTypeErrors -> "deferred-type-errors" :| [] Opt_WarnDeferredOutOfScopeVariables -> "deferred-out-of-scope-variables" :| [] Opt_WarnDeprecatedFlags -> "deprecated-flags" :| [] Opt_WarnDerivingDefaults -> "deriving-defaults" :| [] Opt_WarnDerivingTypeable -> "deriving-typeable" :| [] Opt_WarnDodgyExports -> "dodgy-exports" :| [] Opt_WarnDodgyForeignImports -> "dodgy-foreign-imports" :| [] Opt_WarnDodgyImports -> "dodgy-imports" :| [] Opt_WarnEmptyEnumerations -> "empty-enumerations" :| [] Opt_WarnDuplicateConstraints -> "duplicate-constraints" :| [] Opt_WarnRedundantConstraints -> "redundant-constraints" :| [] Opt_WarnDuplicateExports -> "duplicate-exports" :| [] Opt_WarnHiShadows -> "hi-shadowing" :| [] Opt_WarnInaccessibleCode -> "inaccessible-code" :| [] Opt_WarnImplicitPrelude -> "implicit-prelude" :| [] Opt_WarnImplicitKindVars -> "implicit-kind-vars" :| [] Opt_WarnIncompletePatterns -> "incomplete-patterns" :| [] Opt_WarnIncompletePatternsRecUpd -> "incomplete-record-updates" :| [] Opt_WarnIncompleteUniPatterns -> "incomplete-uni-patterns" :| [] Opt_WarnInlineRuleShadowing -> "inline-rule-shadowing" :| [] Opt_WarnIdentities -> "identities" :| [] Opt_WarnMissingFields -> "missing-fields" :| [] Opt_WarnMissingImportList -> "missing-import-lists" :| [] Opt_WarnMissingExportList -> "missing-export-lists" :| [] Opt_WarnMissingLocalSignatures -> "missing-local-signatures" :| [] Opt_WarnMissingMethods -> "missing-methods" :| [] Opt_WarnMissingMonadFailInstances -> "missing-monadfail-instances" :| [] Opt_WarnSemigroup -> "semigroup" :| [] Opt_WarnMissingSignatures -> "missing-signatures" :| [] Opt_WarnMissingKindSignatures -> "missing-kind-signatures" :| [] Opt_WarnMissingPolyKindSignatures -> "missing-poly-kind-signatures" :| [] Opt_WarnMissingExportedSignatures -> "missing-exported-signatures" :| [] Opt_WarnMonomorphism -> "monomorphism-restriction" :| [] Opt_WarnNameShadowing -> "name-shadowing" :| [] Opt_WarnNonCanonicalMonadInstances -> "noncanonical-monad-instances" :| [] Opt_WarnNonCanonicalMonadFailInstances -> "noncanonical-monadfail-instances" :| [] Opt_WarnNonCanonicalMonoidInstances -> "noncanonical-monoid-instances" :| [] Opt_WarnOrphans -> "orphans" :| [] Opt_WarnOverflowedLiterals -> "overflowed-literals" :| [] Opt_WarnOverlappingPatterns -> "overlapping-patterns" :| [] Opt_WarnMissedSpecs -> "missed-specialisations" :| ["missed-specializations"] Opt_WarnAllMissedSpecs -> "all-missed-specialisations" :| ["all-missed-specializations"] Opt_WarnSafe -> "safe" :| [] Opt_WarnTrustworthySafe -> "trustworthy-safe" :| [] Opt_WarnInferredSafeImports -> "inferred-safe-imports" :| [] Opt_WarnMissingSafeHaskellMode -> "missing-safe-haskell-mode" :| [] Opt_WarnTabs -> "tabs" :| [] Opt_WarnTypeDefaults -> "type-defaults" :| [] Opt_WarnTypedHoles -> "typed-holes" :| [] Opt_WarnPartialTypeSignatures -> "partial-type-signatures" :| [] Opt_WarnUnrecognisedPragmas -> "unrecognised-pragmas" :| [] Opt_WarnMisplacedPragmas -> "misplaced-pragmas" :| [] Opt_WarnUnsafe -> "unsafe" :| [] Opt_WarnUnsupportedCallingConventions -> "unsupported-calling-conventions" :| [] Opt_WarnUnsupportedLlvmVersion -> "unsupported-llvm-version" :| [] Opt_WarnMissedExtraSharedLib -> "missed-extra-shared-lib" :| [] Opt_WarnUntickedPromotedConstructors -> "unticked-promoted-constructors" :| [] Opt_WarnUnusedDoBind -> "unused-do-bind" :| [] Opt_WarnUnusedForalls -> "unused-foralls" :| [] Opt_WarnUnusedImports -> "unused-imports" :| [] Opt_WarnUnusedLocalBinds -> "unused-local-binds" :| [] Opt_WarnUnusedMatches -> "unused-matches" :| [] Opt_WarnUnusedPatternBinds -> "unused-pattern-binds" :| [] Opt_WarnUnusedTopBinds -> "unused-top-binds" :| [] Opt_WarnUnusedTypePatterns -> "unused-type-patterns" :| [] Opt_WarnUnusedRecordWildcards -> "unused-record-wildcards" :| [] Opt_WarnRedundantBangPatterns -> "redundant-bang-patterns" :| [] Opt_WarnRedundantRecordWildcards -> "redundant-record-wildcards" :| [] Opt_WarnRedundantStrictnessFlags -> "redundant-strictness-flags" :| [] Opt_WarnWrongDoBind -> "wrong-do-bind" :| [] Opt_WarnMissingPatternSynonymSignatures -> "missing-pattern-synonym-signatures" :| [] Opt_WarnMissingDerivingStrategies -> "missing-deriving-strategies" :| [] Opt_WarnSimplifiableClassConstraints -> "simplifiable-class-constraints" :| [] Opt_WarnMissingHomeModules -> "missing-home-modules" :| [] Opt_WarnUnrecognisedWarningFlags -> "unrecognised-warning-flags" :| [] Opt_WarnStarBinder -> "star-binder" :| [] Opt_WarnStarIsType -> "star-is-type" :| [] Opt_WarnSpaceAfterBang -> "missing-space-after-bang" :| [] Opt_WarnPartialFields -> "partial-fields" :| [] Opt_WarnPrepositiveQualifiedModule -> "prepositive-qualified-module" :| [] Opt_WarnUnusedPackages -> "unused-packages" :| [] Opt_WarnCompatUnqualifiedImports -> "compat-unqualified-imports" :| [] Opt_WarnInvalidHaddock -> "invalid-haddock" :| [] Opt_WarnOperatorWhitespaceExtConflict -> "operator-whitespace-ext-conflict" :| [] Opt_WarnOperatorWhitespace -> "operator-whitespace" :| [] Opt_WarnImplicitLift -> "implicit-lift" :| [] Opt_WarnMissingExportedPatternSynonymSignatures -> "missing-exported-pattern-synonym-signatures" :| [] Opt_WarnForallIdentifier -> "forall-identifier" :| [] Opt_WarnUnicodeBidirectionalFormatCharacters -> "unicode-bidirectional-format-characters" :| [] Opt_WarnGADTMonoLocalBinds -> "gadt-mono-local-binds" :| [] Opt_WarnTypeEqualityOutOfScope -> "type-equality-out-of-scope" :| [] Opt_WarnLoopySuperclassSolve -> "loopy-superclass-solve" :| [] Opt_WarnTypeEqualityRequiresOperators -> "type-equality-requires-operators" :| [] Opt_WarnMissingRoleAnnotations -> "missing-role-annotations" :| [] Opt_WarnImplicitRhsQuantification -> "implicit-rhs-quantification" :| [] Opt_WarnIncompleteExportWarnings -> "incomplete-export-warnings" :| [] Opt_WarnIncompleteRecordSelectors -> "incomplete-record-selectors" :| [] Opt_WarnBadlyStagedTypes -> "badly-staged-types" :| [] Opt_WarnInconsistentFlags -> "inconsistent-flags" :| [] Opt_WarnDataKindsTC -> "data-kinds-tc" :| [] Opt_WarnDeprecatedTypeAbstractions -> "deprecated-type-abstractions" :| [] Opt_WarnDefaultedExceptionContext -> "defaulted-exception-context" :| [] Opt_WarnViewPatternSignatures -> "view-pattern-signatures" :| [] -- ----------------------------------------------------------------------------- -- Standard sets of warning options -- Note [Documenting warning flags] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- -- If you change the list of warnings enabled by default -- please remember to update the User's Guide. The relevant file is: -- -- docs/users_guide/using-warnings.rst -- | A group of warning flags that can be enabled or disabled collectively, -- e.g. using @-Wcompat@ to enable all warnings in the 'W_compat' group. data WarningGroup = W_compat | W_unused_binds | W_extended_warnings | W_default | W_extra | W_all | W_everything deriving (Bounded, Enum, Eq) warningGroupName :: WarningGroup -> String warningGroupName W_compat = "compat" warningGroupName W_unused_binds = "unused-binds" warningGroupName W_extended_warnings = "extended-warnings" warningGroupName W_default = "default" warningGroupName W_extra = "extra" warningGroupName W_all = "all" warningGroupName W_everything = "everything" warningGroupFlags :: WarningGroup -> [WarningFlag] warningGroupFlags W_compat = minusWcompatOpts warningGroupFlags W_unused_binds = unusedBindsFlags warningGroupFlags W_extended_warnings = [] warningGroupFlags W_default = standardWarnings warningGroupFlags W_extra = minusWOpts warningGroupFlags W_all = minusWallOpts warningGroupFlags W_everything = minusWeverythingOpts -- | Does this warning group contain (all) extended warning categories? See -- Note [Warning categories] in GHC.Unit.Module.Warnings. -- -- The 'W_extended_warnings' group contains extended warnings but no -- 'WarningFlag's, but extended warnings are also treated as part of 'W_default' -- and every warning group that includes it. warningGroupIncludesExtendedWarnings :: WarningGroup -> Bool warningGroupIncludesExtendedWarnings W_compat = False warningGroupIncludesExtendedWarnings W_unused_binds = False warningGroupIncludesExtendedWarnings W_extended_warnings = True warningGroupIncludesExtendedWarnings W_default = True warningGroupIncludesExtendedWarnings W_extra = True warningGroupIncludesExtendedWarnings W_all = True warningGroupIncludesExtendedWarnings W_everything = True -- | Warning groups. -- -- As all warnings are in the Weverything set, it is ignored when -- displaying to the user which group a warning is in. warningGroups :: [WarningGroup] warningGroups = [minBound..maxBound] -- | Warning group hierarchies, where there is an explicit inclusion -- relation. -- -- Each inner list is a hierarchy of warning groups, ordered from -- smallest to largest, where each group is a superset of the one -- before it. -- -- Separating this from 'warningGroups' allows for multiple -- hierarchies with no inherent relation to be defined. -- -- The special-case Weverything group is not included. warningHierarchies :: [[WarningGroup]] warningHierarchies = hierarchies ++ map (:[]) rest where hierarchies = [[W_default, W_extra, W_all]] rest = filter (`notElem` W_everything : concat hierarchies) warningGroups -- | Find the smallest group in every hierarchy which a warning -- belongs to, excluding Weverything. smallestWarningGroups :: WarningFlag -> [WarningGroup] smallestWarningGroups flag = mapMaybe go warningHierarchies where -- Because each hierarchy is arranged from smallest to largest, -- the first group we find in a hierarchy which contains the flag -- is the smallest. go (group:rest) = fromMaybe (go rest) $ do guard (flag `elem` warningGroupFlags group) pure (Just group) go [] = Nothing -- | The smallest group in every hierarchy to which a custom warning -- category belongs is currently always @-Wextended-warnings@. -- See Note [Warning categories] in "GHC.Unit.Module.Warnings". smallestWarningGroupsForCategory :: [WarningGroup] smallestWarningGroupsForCategory = [W_extended_warnings] -- | Warnings enabled unless specified otherwise standardWarnings :: [WarningFlag] standardWarnings -- see Note [Documenting warning flags] = [ Opt_WarnOverlappingPatterns, Opt_WarnDeprecatedFlags, Opt_WarnDeferredTypeErrors, Opt_WarnTypedHoles, Opt_WarnDeferredOutOfScopeVariables, Opt_WarnPartialTypeSignatures, Opt_WarnUnrecognisedPragmas, Opt_WarnMisplacedPragmas, Opt_WarnDuplicateExports, Opt_WarnDerivingDefaults, Opt_WarnOverflowedLiterals, Opt_WarnEmptyEnumerations, Opt_WarnAmbiguousFields, Opt_WarnMissingFields, Opt_WarnMissingMethods, Opt_WarnWrongDoBind, Opt_WarnUnsupportedCallingConventions, Opt_WarnDodgyForeignImports, Opt_WarnInlineRuleShadowing, Opt_WarnAlternativeLayoutRuleTransitional, Opt_WarnUnsupportedLlvmVersion, Opt_WarnMissedExtraSharedLib, Opt_WarnTabs, Opt_WarnUnrecognisedWarningFlags, Opt_WarnSimplifiableClassConstraints, Opt_WarnStarBinder, Opt_WarnStarIsType, Opt_WarnInaccessibleCode, Opt_WarnSpaceAfterBang, Opt_WarnNonCanonicalMonadInstances, Opt_WarnNonCanonicalMonoidInstances, Opt_WarnOperatorWhitespaceExtConflict, Opt_WarnUnicodeBidirectionalFormatCharacters, Opt_WarnGADTMonoLocalBinds, Opt_WarnBadlyStagedTypes, Opt_WarnTypeEqualityRequiresOperators, Opt_WarnInconsistentFlags, Opt_WarnDataKindsTC, Opt_WarnTypeEqualityOutOfScope, Opt_WarnViewPatternSignatures ] -- | Things you get with -W minusWOpts :: [WarningFlag] minusWOpts = standardWarnings ++ [ Opt_WarnUnusedTopBinds, Opt_WarnUnusedLocalBinds, Opt_WarnUnusedPatternBinds, Opt_WarnUnusedMatches, Opt_WarnUnusedForalls, Opt_WarnUnusedImports, Opt_WarnIncompletePatterns, Opt_WarnDodgyExports, Opt_WarnDodgyImports, Opt_WarnUnbangedStrictPatterns ] -- | Things you get with -Wall minusWallOpts :: [WarningFlag] minusWallOpts = minusWOpts ++ [ Opt_WarnTypeDefaults, Opt_WarnNameShadowing, Opt_WarnMissingSignatures, Opt_WarnHiShadows, Opt_WarnOrphans, Opt_WarnUnusedDoBind, Opt_WarnTrustworthySafe, Opt_WarnMissingPatternSynonymSignatures, Opt_WarnUnusedRecordWildcards, Opt_WarnRedundantRecordWildcards, Opt_WarnIncompleteUniPatterns, Opt_WarnIncompletePatternsRecUpd, Opt_WarnIncompleteExportWarnings, Opt_WarnDerivingTypeable ] -- | Things you get with -Weverything, i.e. *all* known warnings flags minusWeverythingOpts :: [WarningFlag] minusWeverythingOpts = [ toEnum 0 .. ] -- | Things you get with -Wcompat. -- -- This is intended to group together warnings that will be enabled by default -- at some point in the future, so that library authors eager to make their -- code future compatible to fix issues before they even generate warnings. minusWcompatOpts :: [WarningFlag] minusWcompatOpts = [ Opt_WarnImplicitRhsQuantification , Opt_WarnDeprecatedTypeAbstractions ] -- | Things you get with -Wunused-binds unusedBindsFlags :: [WarningFlag] unusedBindsFlags = [ Opt_WarnUnusedTopBinds , Opt_WarnUnusedLocalBinds , Opt_WarnUnusedPatternBinds ] ghc-lib-parser-9.12.2.20250421/compiler/GHC/Driver/Hooks.hs0000644000000000000000000001242007346545000020557 0ustar0000000000000000-- \section[Hooks]{Low level API hooks} -- NB: this module is SOURCE-imported by DynFlags, and should primarily -- refer to *types*, rather than *code* {-# LANGUAGE RankNTypes, TypeFamilies #-} module GHC.Driver.Hooks ( Hooks , HasHooks (..) , ContainsHooks (..) , emptyHooks -- the hooks: , DsForeignsHook , dsForeignsHook , tcForeignImportsHook , tcForeignExportsHook , hscFrontendHook , hscCompileCoreExprHook , ghcPrimIfaceHook , runPhaseHook , runMetaHook , linkHook , runRnSpliceHook , getValueSafelyHook , createIservProcessHook , stgToCmmHook , cmmToRawCmmHook ) where import GHC.Prelude import GHC.Driver.Env import GHC.Driver.DynFlags import GHC.Driver.Pipeline.Phases import GHC.Hs.Decls import GHC.Hs.Binds import GHC.Hs.Expr import GHC.Hs.Extension import GHC.Types.Name.Reader import GHC.Types.Name import GHC.Types.Id import GHC.Types.SrcLoc import GHC.Types.Basic import GHC.Types.CostCentre import GHC.Types.IPE import GHC.Types.Meta import GHC.Unit.Module import GHC.Unit.Module.ModSummary import GHC.Unit.Module.ModIface import GHC.Unit.Home.ModInfo import GHC.Core import GHC.Core.TyCon import GHC.Core.Type import GHC.Tc.Types import GHC.Stg.Syntax import GHC.StgToCmm.CgUtils (CgStream) import GHC.StgToCmm.Types (ModuleLFInfos) import GHC.StgToCmm.Config import GHC.Cmm import GHCi.RemoteTypes import GHC.Data.Bag import qualified Data.Kind import System.Process import GHC.Linker.Types {- ************************************************************************ * * \subsection{Hooks} * * ************************************************************************ -} -- | Hooks can be used by GHC API clients to replace parts of -- the compiler pipeline. If a hook is not installed, GHC -- uses the default built-in behaviour emptyHooks :: Hooks emptyHooks = Hooks { dsForeignsHook = Nothing , tcForeignImportsHook = Nothing , tcForeignExportsHook = Nothing , hscFrontendHook = Nothing , hscCompileCoreExprHook = Nothing , ghcPrimIfaceHook = Nothing , runPhaseHook = Nothing , runMetaHook = Nothing , linkHook = Nothing , runRnSpliceHook = Nothing , getValueSafelyHook = Nothing , createIservProcessHook = Nothing , stgToCmmHook = Nothing , cmmToRawCmmHook = Nothing } {- Note [The Decoupling Abstract Data Hack] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The "Abstract Data" idea is due to Richard Eisenberg in https://gitlab.haskell.org/ghc/ghc/-/merge_requests/1957, where the pattern is described in more detail. Here we use it as a temporary measure to break the dependency from the Parser on the Desugarer until the parser is free of DynFlags. We introduced a nullary type family @DsForeignsook@, whose single definition is in GHC.HsToCore.Types, where we instantiate it to [LForeignDecl GhcTc] -> DsM (ForeignStubs, OrdList (Id, CoreExpr)) In doing so, the Hooks module (which is an hs-boot dependency of DynFlags) can be decoupled from its use of the DsM definition in GHC.HsToCore.Types. Since both DsM and the definition of @ForeignsHook@ live in the same module, there is virtually no difference for plugin authors that want to write a foreign hook. An awkward consequences is that the `type instance DsForeignsHook`, in GHC.HsToCore.Types is an orphan instance. -} -- See Note [The Decoupling Abstract Data Hack] type family DsForeignsHook :: Data.Kind.Type data Hooks = Hooks { dsForeignsHook :: !(Maybe DsForeignsHook) -- ^ Actual type: -- @Maybe ([LForeignDecl GhcTc] -> DsM (ForeignStubs, OrdList (Id, CoreExpr)))@ , tcForeignImportsHook :: !(Maybe ([LForeignDecl GhcRn] -> TcM ([Id], [LForeignDecl GhcTc], Bag GlobalRdrElt))) , tcForeignExportsHook :: !(Maybe ([LForeignDecl GhcRn] -> TcM (LHsBinds GhcTc, [LForeignDecl GhcTc], Bag GlobalRdrElt))) , hscFrontendHook :: !(Maybe (ModSummary -> Hsc FrontendResult)) , hscCompileCoreExprHook :: !(Maybe (HscEnv -> SrcSpan -> CoreExpr -> IO (ForeignHValue, [Linkable], PkgsLoaded))) , ghcPrimIfaceHook :: !(Maybe ModIface) , runPhaseHook :: !(Maybe PhaseHook) , runMetaHook :: !(Maybe (MetaHook TcM)) , linkHook :: !(Maybe (GhcLink -> DynFlags -> Bool -> HomePackageTable -> IO SuccessFlag)) , runRnSpliceHook :: !(Maybe (HsUntypedSplice GhcRn -> RnM (HsUntypedSplice GhcRn))) , getValueSafelyHook :: !(Maybe (HscEnv -> Name -> Type -> IO (Either Type (HValue, [Linkable], PkgsLoaded)))) , createIservProcessHook :: !(Maybe (CreateProcess -> IO ProcessHandle)) , stgToCmmHook :: !(Maybe (StgToCmmConfig -> InfoTableProvMap -> [TyCon] -> CollectedCCs -> [CgStgTopBinding] -> CgStream CmmGroup ModuleLFInfos)) , cmmToRawCmmHook :: !(forall a . Maybe (DynFlags -> Maybe Module -> CgStream CmmGroupSRTs a -> IO (CgStream RawCmmGroup a))) } class HasHooks m where getHooks :: m Hooks class ContainsHooks a where extractHooks :: a -> Hooks ghc-lib-parser-9.12.2.20250421/compiler/GHC/Driver/Hooks.hs-boot0000644000000000000000000000030207346545000021514 0ustar0000000000000000module GHC.Driver.Hooks where import GHC.Prelude () data Hooks emptyHooks :: Hooks class HasHooks m where getHooks :: m Hooks class ContainsHooks a where extractHooks :: a -> Hooks ghc-lib-parser-9.12.2.20250421/compiler/GHC/Driver/LlvmConfigCache.hs0000644000000000000000000000140507346545000022461 0ustar0000000000000000-- | LLVM config cache module GHC.Driver.LlvmConfigCache ( LlvmConfigCache , initLlvmConfigCache , readLlvmConfigCache ) where import GHC.Prelude import GHC.CmmToLlvm.Config import System.IO.Unsafe -- | Cache LLVM configuration read from files in top_dir -- -- See Note [LLVM configuration] in GHC.CmmToLlvm.Config -- -- Currently implemented with unsafe lazy IO. But it could be implemented with -- an IORef as the exposed interface is in IO. data LlvmConfigCache = LlvmConfigCache LlvmConfig initLlvmConfigCache :: FilePath -> IO LlvmConfigCache initLlvmConfigCache top_dir = pure $ LlvmConfigCache (unsafePerformIO $ initLlvmConfig top_dir) readLlvmConfigCache :: LlvmConfigCache -> IO LlvmConfig readLlvmConfigCache (LlvmConfigCache !config) = pure config ghc-lib-parser-9.12.2.20250421/compiler/GHC/Driver/Monad.hs0000644000000000000000000001765707346545000020553 0ustar0000000000000000{-# LANGUAGE DerivingVia, NoPolyKinds #-} {-# OPTIONS_GHC -funbox-strict-fields #-} -- ----------------------------------------------------------------------------- -- -- (c) The University of Glasgow, 2010 -- -- The Session type and related functionality -- -- ----------------------------------------------------------------------------- module GHC.Driver.Monad ( -- * 'Ghc' monad stuff GhcMonad(..), Ghc(..), GhcT(..), liftGhcT, reflectGhc, reifyGhc, getSessionDynFlags, liftIO, Session(..), withSession, modifySession, modifySessionM, withTempSession, -- * Logger modifyLogger, pushLogHookM, popLogHookM, pushJsonLogHookM, popJsonLogHookM, putLogMsgM, putMsgM, withTimingM, -- ** Diagnostics logDiagnostics, printException, WarnErrLogger, defaultWarnErrLogger ) where import GHC.Prelude import GHC.Driver.DynFlags import GHC.Driver.Env import GHC.Driver.Errors ( printOrThrowDiagnostics, printMessages ) import GHC.Driver.Errors.Types import GHC.Driver.Config.Diagnostic import GHC.Utils.Monad import GHC.Utils.Exception import GHC.Utils.Error import GHC.Utils.Logger import GHC.Types.SrcLoc import GHC.Types.SourceError import Control.Monad import Control.Monad.Catch as MC import Control.Monad.Trans.Reader import Data.IORef -- ----------------------------------------------------------------------------- -- | A monad that has all the features needed by GHC API calls. -- -- In short, a GHC monad -- -- - allows embedding of IO actions, -- -- - can log warnings, -- -- - allows handling of (extensible) exceptions, and -- -- - maintains a current session. -- -- If you do not use 'Ghc' or 'GhcT', make sure to call 'GHC.initGhcMonad' -- before any call to the GHC API functions can occur. -- class (Functor m, ExceptionMonad m, HasDynFlags m, HasLogger m ) => GhcMonad m where getSession :: m HscEnv setSession :: HscEnv -> m () -- | Call the argument with the current session. withSession :: GhcMonad m => (HscEnv -> m a) -> m a withSession f = getSession >>= f -- | Grabs the DynFlags from the Session getSessionDynFlags :: GhcMonad m => m DynFlags getSessionDynFlags = withSession (return . hsc_dflags) -- | Set the current session to the result of applying the current session to -- the argument. modifySession :: GhcMonad m => (HscEnv -> HscEnv) -> m () modifySession f = do h <- getSession setSession $! f h -- | Set the current session to the result of applying the current session to -- the argument. modifySessionM :: GhcMonad m => (HscEnv -> m HscEnv) -> m () modifySessionM f = do h <- getSession h' <- f h setSession $! h' withSavedSession :: GhcMonad m => m a -> m a withSavedSession m = do saved_session <- getSession m `MC.finally` setSession saved_session -- | Call an action with a temporarily modified Session. withTempSession :: GhcMonad m => (HscEnv -> HscEnv) -> m a -> m a withTempSession f m = withSavedSession $ modifySession f >> m ---------------------------------------- -- Logging ---------------------------------------- -- | Modify the logger modifyLogger :: GhcMonad m => (Logger -> Logger) -> m () modifyLogger f = modifySession $ \hsc_env -> hsc_env { hsc_logger = f (hsc_logger hsc_env) } -- | Push a log hook on the stack pushLogHookM :: GhcMonad m => (LogAction -> LogAction) -> m () pushLogHookM = modifyLogger . pushLogHook -- | Pop a log hook from the stack popLogHookM :: GhcMonad m => m () popLogHookM = modifyLogger popLogHook pushJsonLogHookM :: GhcMonad m => (LogJsonAction -> LogJsonAction) -> m () pushJsonLogHookM = modifyLogger . pushJsonLogHook popJsonLogHookM :: GhcMonad m => m () popJsonLogHookM = modifyLogger popJsonLogHook -- | Put a log message putMsgM :: GhcMonad m => SDoc -> m () putMsgM doc = do logger <- getLogger liftIO $ putMsg logger doc -- | Put a log message putLogMsgM :: GhcMonad m => MessageClass -> SrcSpan -> SDoc -> m () putLogMsgM msg_class loc doc = do logger <- getLogger liftIO $ logMsg logger msg_class loc doc -- | Time an action withTimingM :: GhcMonad m => SDoc -> (b -> ()) -> m b -> m b withTimingM doc force action = do logger <- getLogger withTiming logger doc force action -- ----------------------------------------------------------------------------- -- | A monad that allows logging of diagnostics. logDiagnostics :: GhcMonad m => Messages GhcMessage -> m () logDiagnostics warns = do dflags <- getSessionDynFlags logger <- getLogger let !diag_opts = initDiagOpts dflags !print_config = initPrintConfig dflags liftIO $ printOrThrowDiagnostics logger print_config diag_opts warns -- ----------------------------------------------------------------------------- -- | A minimal implementation of a 'GhcMonad'. If you need a custom monad, -- e.g., to maintain additional state consider wrapping this monad or using -- 'GhcT'. newtype Ghc a = Ghc { unGhc :: Session -> IO a } deriving stock (Functor) deriving (Applicative, Monad, MonadFail, MonadFix, MonadThrow, MonadCatch, MonadMask, MonadIO) via (ReaderT Session IO) -- | The Session is a handle to the complete state of a compilation -- session. A compilation session consists of a set of modules -- constituting the current program or library, the context for -- interactive evaluation, and various caches. data Session = Session !(IORef HscEnv) instance HasDynFlags Ghc where getDynFlags = getSessionDynFlags instance HasLogger Ghc where getLogger = hsc_logger <$> getSession instance GhcMonad Ghc where getSession = Ghc $ \(Session r) -> readIORef r setSession s' = Ghc $ \(Session r) -> writeIORef r s' -- | Reflect a computation in the 'Ghc' monad into the 'IO' monad. -- -- You can use this to call functions returning an action in the 'Ghc' monad -- inside an 'IO' action. This is needed for some (too restrictive) callback -- arguments of some library functions: -- -- > libFunc :: String -> (Int -> IO a) -> IO a -- > ghcFunc :: Int -> Ghc a -- > -- > ghcFuncUsingLibFunc :: String -> Ghc a -> Ghc a -- > ghcFuncUsingLibFunc str = -- > reifyGhc $ \s -> -- > libFunc $ \i -> do -- > reflectGhc (ghcFunc i) s -- reflectGhc :: Ghc a -> Session -> IO a reflectGhc m = unGhc m -- > Dual to 'reflectGhc'. See its documentation. reifyGhc :: (Session -> IO a) -> Ghc a reifyGhc act = Ghc $ act -- ----------------------------------------------------------------------------- -- | A monad transformer to add GHC specific features to another monad. -- -- Note that the wrapped monad must support IO and handling of exceptions. newtype GhcT m a = GhcT { unGhcT :: Session -> m a } deriving stock (Functor) deriving (Applicative, Monad, MonadFail, MonadFix, MonadThrow, MonadCatch, MonadMask, MonadIO) via (ReaderT Session m) liftGhcT :: m a -> GhcT m a liftGhcT m = GhcT $ \_ -> m instance MonadIO m => HasDynFlags (GhcT m) where getDynFlags = GhcT $ \(Session r) -> liftM hsc_dflags (liftIO $ readIORef r) instance MonadIO m => HasLogger (GhcT m) where getLogger = GhcT $ \(Session r) -> liftM hsc_logger (liftIO $ readIORef r) instance ExceptionMonad m => GhcMonad (GhcT m) where getSession = GhcT $ \(Session r) -> liftIO $ readIORef r setSession s' = GhcT $ \(Session r) -> liftIO $ writeIORef r s' -- | Print the all diagnostics in a 'SourceError'. Useful inside exception -- handlers. printException :: (HasLogger m, MonadIO m, HasDynFlags m) => SourceError -> m () printException err = do dflags <- getDynFlags logger <- getLogger let !diag_opts = initDiagOpts dflags !print_config = initPrintConfig dflags liftIO $ printMessages logger print_config diag_opts (srcErrorMessages err) -- | A function called to log warnings and errors. type WarnErrLogger = forall m. (HasDynFlags m , MonadIO m, HasLogger m) => Maybe SourceError -> m () defaultWarnErrLogger :: WarnErrLogger defaultWarnErrLogger Nothing = return () defaultWarnErrLogger (Just e) = printException e ghc-lib-parser-9.12.2.20250421/compiler/GHC/Driver/Phases.hs0000644000000000000000000003006107346545000020720 0ustar0000000000000000----------------------------------------------------------------------------- -- -- GHC Driver -- -- (c) The University of Glasgow 2002 -- ----------------------------------------------------------------------------- module GHC.Driver.Phases ( Phase(..), happensBefore, eqPhase, isStopLn, startPhase, phaseInputExt, StopPhase(..), stopPhaseToPhase, isHaskellishSuffix, isHaskellSrcSuffix, isBackpackishSuffix, isObjectSuffix, isCishSuffix, isDynLibSuffix, isHaskellUserSrcSuffix, isHaskellSigSuffix, isSourceSuffix, isHaskellishTarget, isHaskellishFilename, isHaskellSrcFilename, isHaskellSigFilename, isObjectFilename, isCishFilename, isDynLibFilename, isHaskellUserSrcFilename, isSourceFilename, phaseForeignLanguage ) where import GHC.Prelude import GHC.Platform import GHC.ForeignSrcLang import GHC.Types.SourceFile import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Utils.Misc import System.FilePath ----------------------------------------------------------------------------- -- Phases {- Phase of the | Suffix saying | Flag saying | (suffix of) compilation system | ``start here''| ``stop after''| output file literate pre-processor | .lhs | - | - C pre-processor (opt.) | - | -E | - Haskell compiler | .hs | -C, -S | .hc, .s C compiler (opt.) | .hc or .c | -S | .s assembler | .s or .S | -c | .o linker | other | - | a.out linker (merge objects) | other | - | .o -} -- Phases we can actually stop after data StopPhase = StopPreprocess -- ^ @-E@ | StopC -- ^ @-C@ | StopAs -- ^ @-S@ | NoStop -- ^ @-c@ stopPhaseToPhase :: StopPhase -> Phase stopPhaseToPhase StopPreprocess = anyHsc stopPhaseToPhase StopC = HCc stopPhaseToPhase StopAs = As False stopPhaseToPhase NoStop = StopLn -- | Untyped Phase description data Phase = Unlit HscSource | Cpp HscSource | HsPp HscSource | Hsc HscSource | Ccxx -- Compile C++ | Cc -- Compile C | Cobjc -- Compile Objective-C | Cobjcxx -- Compile Objective-C++ | HCc -- Haskellised C (as opposed to vanilla C) compilation | As Bool -- Assembler for regular assembly files (Bool: with-cpp) | LlvmOpt -- Run LLVM opt tool over llvm assembly | LlvmLlc -- LLVM bitcode to native assembly | LlvmMangle -- Fix up TNTC by processing assembly produced by LLVM | CmmCpp -- pre-process Cmm source | Cmm -- parse & compile Cmm code | MergeForeign -- merge in the foreign object files | Js -- pre-process Js source -- The final phase is a pseudo-phase that tells the pipeline to stop. | StopLn -- Stop, but linking will follow, so generate .o file deriving (Eq, Show) instance Outputable Phase where ppr p = text (show p) anyHsc :: Phase anyHsc = Hsc (panic "anyHsc") isStopLn :: Phase -> Bool isStopLn StopLn = True isStopLn _ = False eqPhase :: Phase -> Phase -> Bool -- Equality of constructors, ignoring the HscSource field -- NB: the HscSource field can be 'bot'; see anyHsc above eqPhase (Unlit _) (Unlit _) = True eqPhase (Cpp _) (Cpp _) = True eqPhase (HsPp _) (HsPp _) = True eqPhase (Hsc _) (Hsc _) = True eqPhase Cc Cc = True eqPhase Cobjc Cobjc = True eqPhase HCc HCc = True eqPhase (As x) (As y) = x == y eqPhase LlvmOpt LlvmOpt = True eqPhase LlvmLlc LlvmLlc = True eqPhase LlvmMangle LlvmMangle = True eqPhase CmmCpp CmmCpp = True eqPhase Cmm Cmm = True eqPhase MergeForeign MergeForeign = True eqPhase StopLn StopLn = True eqPhase Ccxx Ccxx = True eqPhase Cobjcxx Cobjcxx = True eqPhase Js Js = True eqPhase _ _ = False -- MP: happensBefore is only used in preprocessPipeline, that usage should -- be refactored and this usage removed. happensBefore :: Platform -> Phase -> Phase -> Bool happensBefore platform p1 p2 = p1 `happensBefore'` p2 where StopLn `happensBefore'` _ = False x `happensBefore'` y = after_x `eqPhase` y || after_x `happensBefore'` y where after_x = nextPhase platform x nextPhase :: Platform -> Phase -> Phase nextPhase platform p -- A conservative approximation to the next phase, used in happensBefore = case p of Unlit sf -> Cpp sf Cpp sf -> HsPp sf HsPp sf -> Hsc sf Hsc _ -> maybeHCc LlvmOpt -> LlvmLlc LlvmLlc -> LlvmMangle LlvmMangle -> As False As _ -> MergeForeign Ccxx -> MergeForeign Cc -> MergeForeign Cobjc -> MergeForeign Cobjcxx -> MergeForeign CmmCpp -> Cmm Cmm -> maybeHCc HCc -> MergeForeign MergeForeign -> StopLn Js -> StopLn StopLn -> panic "nextPhase: nothing after StopLn" where maybeHCc = if platformUnregisterised platform then HCc else As False -- the first compilation phase for a given file is determined -- by its suffix. startPhase :: String -> Phase startPhase "lhs" = Unlit HsSrcFile startPhase "lhs-boot" = Unlit HsBootFile startPhase "lhsig" = Unlit HsigFile startPhase "hs" = Cpp HsSrcFile startPhase "hs-boot" = Cpp HsBootFile startPhase "hsig" = Cpp HsigFile startPhase "hscpp" = HsPp HsSrcFile startPhase "hspp" = Hsc HsSrcFile startPhase "hc" = HCc startPhase "c" = Cc startPhase "cpp" = Ccxx startPhase "C" = Cc startPhase "m" = Cobjc startPhase "M" = Cobjcxx startPhase "mm" = Cobjcxx startPhase "cc" = Ccxx startPhase "cxx" = Ccxx startPhase "s" = As False startPhase "S" = As True startPhase "ll" = LlvmOpt startPhase "bc" = LlvmLlc startPhase "lm_s" = LlvmMangle startPhase "o" = StopLn startPhase "cmm" = CmmCpp startPhase "cmmcpp" = Cmm startPhase "js" = Js startPhase _ = StopLn -- all unknown file types -- This is used to determine the extension for the output from the -- current phase (if it generates a new file). The extension depends -- on the next phase in the pipeline. phaseInputExt :: Phase -> String phaseInputExt (Unlit HsSrcFile) = "lhs" phaseInputExt (Unlit HsBootFile) = "lhs-boot" phaseInputExt (Unlit HsigFile) = "lhsig" phaseInputExt (Cpp _) = "lpp" -- intermediate only phaseInputExt (HsPp _) = "hscpp" -- intermediate only phaseInputExt (Hsc _) = "hspp" -- intermediate only -- NB: as things stand, phaseInputExt (Hsc x) must not evaluate x -- because runPhase uses the StopBefore phase to pick the -- output filename. That could be fixed, but watch out. phaseInputExt HCc = "hc" phaseInputExt Ccxx = "cpp" phaseInputExt Cobjc = "m" phaseInputExt Cobjcxx = "mm" phaseInputExt Cc = "c" phaseInputExt (As True) = "S" phaseInputExt (As False) = "s" phaseInputExt LlvmOpt = "ll" phaseInputExt LlvmLlc = "bc" phaseInputExt LlvmMangle = "lm_s" phaseInputExt CmmCpp = "cmmcpp" phaseInputExt Cmm = "cmm" phaseInputExt MergeForeign = "o" phaseInputExt Js = "js" phaseInputExt StopLn = "o" haskellish_src_suffixes, backpackish_suffixes, haskellish_suffixes, cish_suffixes, js_suffixes, haskellish_user_src_suffixes, haskellish_sig_suffixes :: [String] -- When a file with an extension in the haskellish_src_suffixes group is -- loaded in --make mode, its imports will be loaded too. haskellish_src_suffixes = haskellish_user_src_suffixes ++ [ "hspp", "hscpp" ] haskellish_suffixes = haskellish_src_suffixes ++ [ "hc", "cmm", "cmmcpp" ] cish_suffixes = [ "c", "cpp", "C", "cc", "cxx", "s", "S", "ll", "bc", "lm_s", "m", "M", "mm" ] js_suffixes = [ "js" ] -- Will not be deleted as temp files: haskellish_user_src_suffixes = haskellish_sig_suffixes ++ [ "hs", "lhs", "hs-boot", "lhs-boot" ] haskellish_sig_suffixes = [ "hsig", "lhsig" ] backpackish_suffixes = [ "bkp" ] objish_suffixes :: Platform -> [String] -- Use the appropriate suffix for the system on which -- the GHC-compiled code will run objish_suffixes platform = case platformOS platform of OSMinGW32 -> [ "o", "O", "obj", "OBJ" ] _ -> [ "o" ] dynlib_suffixes :: Platform -> [String] dynlib_suffixes platform = case platformOS platform of OSMinGW32 -> ["dll", "DLL"] OSDarwin -> ["dylib", "so"] _ -> ["so"] isHaskellishSuffix, isBackpackishSuffix, isHaskellSrcSuffix, isCishSuffix, isHaskellUserSrcSuffix, isJsSuffix, isHaskellSigSuffix :: String -> Bool isHaskellishSuffix s = s `elem` haskellish_suffixes isBackpackishSuffix s = s `elem` backpackish_suffixes isHaskellSigSuffix s = s `elem` haskellish_sig_suffixes isHaskellSrcSuffix s = s `elem` haskellish_src_suffixes isCishSuffix s = s `elem` cish_suffixes isJsSuffix s = s `elem` js_suffixes isHaskellUserSrcSuffix s = s `elem` haskellish_user_src_suffixes isObjectSuffix, isDynLibSuffix :: Platform -> String -> Bool isObjectSuffix platform s = s `elem` objish_suffixes platform isDynLibSuffix platform s = s `elem` dynlib_suffixes platform isSourceSuffix :: String -> Bool isSourceSuffix suff = isHaskellishSuffix suff || isCishSuffix suff || isJsSuffix suff || isBackpackishSuffix suff -- | When we are given files (modified by -x arguments) we need -- to determine if they are Haskellish or not to figure out -- how we should try to compile it. The rules are: -- -- 1. If no -x flag was specified, we check to see if -- the file looks like a module name, has no extension, -- or has a Haskell source extension. -- -- 2. If an -x flag was specified, we just make sure the -- specified suffix is a Haskell one. isHaskellishTarget :: (String, Maybe Phase) -> Bool isHaskellishTarget (f,Nothing) = looksLikeModuleName f || isHaskellSrcFilename f || not (hasExtension f) isHaskellishTarget (_,Just phase) = phase `notElem` [ As True, As False, Cc, Cobjc, Cobjcxx, CmmCpp, Cmm, Js , StopLn] isHaskellishFilename, isHaskellSrcFilename, isCishFilename, isHaskellUserSrcFilename, isSourceFilename, isHaskellSigFilename :: FilePath -> Bool -- takeExtension return .foo, so we drop 1 to get rid of the . isHaskellishFilename f = isHaskellishSuffix (drop 1 $ takeExtension f) isHaskellSrcFilename f = isHaskellSrcSuffix (drop 1 $ takeExtension f) isCishFilename f = isCishSuffix (drop 1 $ takeExtension f) isHaskellUserSrcFilename f = isHaskellUserSrcSuffix (drop 1 $ takeExtension f) isSourceFilename f = isSourceSuffix (drop 1 $ takeExtension f) isHaskellSigFilename f = isHaskellSigSuffix (drop 1 $ takeExtension f) isObjectFilename, isDynLibFilename :: Platform -> FilePath -> Bool isObjectFilename platform f = isObjectSuffix platform (drop 1 $ takeExtension f) isDynLibFilename platform f = isDynLibSuffix platform (drop 1 $ takeExtension f) -- | Foreign language of the phase if the phase deals with a foreign code phaseForeignLanguage :: Phase -> Maybe ForeignSrcLang phaseForeignLanguage phase = case phase of Cc -> Just LangC Ccxx -> Just LangCxx Cobjc -> Just LangObjc Cobjcxx -> Just LangObjcxx HCc -> Just LangC As _ -> Just LangAsm MergeForeign -> Just RawObject Js -> Just LangJs _ -> Nothing ghc-lib-parser-9.12.2.20250421/compiler/GHC/Driver/Pipeline/0000755000000000000000000000000007346545000020706 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Driver/Pipeline/Monad.hs0000644000000000000000000000356207346545000022306 0ustar0000000000000000{-# LANGUAGE KindSignatures #-} {-# LANGUAGE ConstraintKinds #-} {-# LANGUAGE MultiParamTypeClasses #-} -- | The 'TPipelineClass' and 'MonadUse' classes and associated types module GHC.Driver.Pipeline.Monad ( TPipelineClass, MonadUse(..) , PipeEnv(..) , PipelineOutput(..) ) where import GHC.Prelude import Control.Monad.IO.Class import qualified Data.Kind as K import GHC.Driver.Phases import GHC.Utils.TmpFs -- The interface that the pipeline monad must implement. type TPipelineClass (f :: K.Type -> K.Type) (m :: K.Type -> K.Type) = (Functor m, MonadIO m, Applicative m, Monad m, MonadUse f m) -- | Lift a `f` action into an `m` action. class MonadUse f m where use :: f a -> m a -- PipeEnv: invariant information passed down through the pipeline data PipeEnv = PipeEnv { stop_phase :: StopPhase, -- ^ Stop just after this phase src_filename :: String, -- ^ basename of original input source src_basename :: String, -- ^ basename of original input source src_suffix :: String, -- ^ its extension start_phase :: Phase, output_spec :: PipelineOutput -- ^ says where to put the pipeline output } data PipelineOutput = Temporary TempFileLifetime -- ^ Output should be to a temporary file: we're going to -- run more compilation steps on this output later. | Persistent -- ^ We want a persistent file, i.e. a file in the current directory -- derived from the input filename, but with the appropriate extension. -- eg. in "ghc -c Foo.hs" the output goes into ./Foo.o. | SpecificFile -- ^ The output must go into the specific outputFile in DynFlags. -- We don't store the filename in the constructor as it changes -- when doing -dynamic-too. | NoOutputFile -- ^ No output should be created, like in Interpreter or NoBackend. deriving Show ghc-lib-parser-9.12.2.20250421/compiler/GHC/Driver/Pipeline/Phases.hs0000644000000000000000000000534507346545000022474 0ustar0000000000000000{-# LANGUAGE GADTs #-} {-# LANGUAGE RankNTypes #-} module GHC.Driver.Pipeline.Phases (TPhase(..), PhaseHook(..)) where import GHC.Prelude import GHC.Driver.Pipeline.Monad import GHC.Driver.Env.Types import GHC.Driver.DynFlags import GHC.Types.SourceFile import GHC.Unit.Module.ModSummary import GHC.Unit.Module.Status import GHC.Tc.Types ( FrontendResult ) import GHC.Types.Error import GHC.Driver.Errors.Types import GHC.Fingerprint.Type import GHC.Unit.Module.Location ( ModLocation ) import GHC.Unit.Module.ModIface import GHC.Driver.Phases import Language.Haskell.Syntax.Module.Name ( ModuleName ) import GHC.Unit.Home.ModInfo -- Typed Pipeline Phases -- MP: TODO: We need to refine the arguments to each of these phases so recompilation -- can be smarter. For example, rather than passing a whole HscEnv, just pass the options -- which each phase depends on, then recompilation checking can decide to only rerun each -- phase if the inputs have been modified. data TPhase res where T_Unlit :: PipeEnv -> HscEnv -> FilePath -> TPhase FilePath T_FileArgs :: HscEnv -> FilePath -> TPhase (DynFlags, Messages PsMessage, Messages DriverMessage) T_Cpp :: PipeEnv -> HscEnv -> FilePath -> TPhase FilePath T_HsPp :: PipeEnv -> HscEnv -> FilePath -> FilePath -> TPhase FilePath T_HscRecomp :: PipeEnv -> HscEnv -> FilePath -> HscSource -> TPhase (HscEnv, ModSummary, HscRecompStatus) T_Hsc :: HscEnv -> ModSummary -> TPhase (FrontendResult, Messages GhcMessage) T_HscPostTc :: HscEnv -> ModSummary -> FrontendResult -> Messages GhcMessage -> Maybe Fingerprint -> TPhase HscBackendAction T_HscBackend :: PipeEnv -> HscEnv -> ModuleName -> HscSource -> ModLocation -> HscBackendAction -> TPhase ([FilePath], ModIface, HomeModLinkable, FilePath) T_CmmCpp :: PipeEnv -> HscEnv -> FilePath -> TPhase FilePath T_Cmm :: PipeEnv -> HscEnv -> FilePath -> TPhase ([FilePath], FilePath) T_Cc :: Phase -> PipeEnv -> HscEnv -> Maybe ModLocation -> FilePath -> TPhase FilePath T_As :: Bool -> PipeEnv -> HscEnv -> Maybe ModLocation -> FilePath -> TPhase FilePath T_Js :: PipeEnv -> HscEnv -> Maybe ModLocation -> FilePath -> TPhase FilePath T_ForeignJs :: PipeEnv -> HscEnv -> Maybe ModLocation -> FilePath -> TPhase FilePath T_LlvmOpt :: PipeEnv -> HscEnv -> FilePath -> TPhase FilePath T_LlvmLlc :: PipeEnv -> HscEnv -> FilePath -> TPhase FilePath T_LlvmAs :: Bool -> PipeEnv -> HscEnv -> Maybe ModLocation -> FilePath -> TPhase FilePath T_LlvmMangle :: PipeEnv -> HscEnv -> FilePath -> TPhase FilePath T_MergeForeign :: PipeEnv -> HscEnv -> FilePath -> [FilePath] -> TPhase FilePath -- | A wrapper around the interpretation function for phases. data PhaseHook = PhaseHook (forall a . TPhase a -> IO a) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Driver/Plugins.hs0000644000000000000000000004072107346545000021122 0ustar0000000000000000{-# LANGUAGE RankNTypes #-} {-# LANGUAGE CPP #-} #if defined(HAVE_INTERNAL_INTERPRETER) && defined(CAN_LOAD_DLL) {-# LANGUAGE MagicHash #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE UnboxedTuples #-} #endif -- | Definitions for writing /plugins/ for GHC. Plugins can hook into -- several areas of the compiler. See the 'Plugin' type. These plugins -- include type-checker plugins, source plugins, and core-to-core plugins. module GHC.Driver.Plugins ( -- * Plugins Plugins (..) , emptyPlugins , Plugin(..) , defaultPlugin , CommandLineOption , PsMessages(..) , ParsedResult(..) -- * External plugins , loadExternalPlugins -- ** Recompilation checking , purePlugin, impurePlugin, flagRecompile , PluginRecompile(..) -- * Plugin types -- ** Frontend plugins , FrontendPlugin(..), defaultFrontendPlugin, FrontendPluginAction -- ** Core plugins -- | Core plugins allow plugins to register as a Core-to-Core pass. , CorePlugin -- ** Typechecker plugins -- | Typechecker plugins allow plugins to provide evidence to the -- typechecker. , TcPlugin -- ** Source plugins -- | GHC offers a number of points where plugins can access and modify its -- front-end (\"source\") representation. These include: -- -- - access to the parser result with 'parsedResultAction' -- - access to the renamed AST with 'renamedResultAction' -- - access to the typechecked AST with 'typeCheckResultAction' -- - access to the Template Haskell splices with 'spliceRunAction' -- - access to loaded interface files with 'interfaceLoadAction' -- , keepRenamedSource -- ** Defaulting plugins -- | Defaulting plugins can add candidate types to the defaulting -- mechanism. , DefaultingPlugin -- ** Hole fit plugins -- | hole fit plugins allow plugins to change the behavior of valid hole -- fit suggestions , HoleFitPluginR -- ** Late plugins -- | Late plugins can access and modify the core of a module after -- optimizations have been applied and after interface creation. , LatePlugin -- * Internal , PluginWithArgs(..), pluginsWithArgs, pluginRecompile' , LoadedPlugin(..), lpModuleName , StaticPlugin(..) , ExternalPlugin(..) , mapPlugins, withPlugins, withPlugins_ ) where import GHC.Prelude import GHC.Driver.Env import GHC.Driver.Monad import GHC.Driver.Phases import GHC.Driver.Plugins.External import GHC.Unit.Module import GHC.Unit.Module.ModIface import GHC.Unit.Module.ModSummary import GHC.Parser.Errors.Types (PsWarning, PsError) import qualified GHC.Tc.Types import GHC.Tc.Types ( TcGblEnv, IfM, TcM, tcg_rn_decls, tcg_rn_exports ) import GHC.Tc.Errors.Hole.Plugin ( HoleFitPluginR ) import GHC.Core.Opt.Monad ( CoreM ) import GHC.Core.Opt.Pipeline.Types ( CoreToDo ) import GHC.Hs import GHC.Types.Error (Messages) import GHC.Linker.Types import GHC.Types.CostCentre.State import GHC.Types.Unique.DFM import GHC.Unit.Module.ModGuts (CgGuts) import GHC.Utils.Fingerprint import GHC.Utils.Outputable import GHC.Utils.Panic import Data.List (sort) --Qualified import so we can define a Semigroup instance -- but it doesn't clash with Outputable.<> import qualified Data.Semigroup import Control.Monad #if defined(HAVE_INTERNAL_INTERPRETER) && defined(CAN_LOAD_DLL) import GHCi.ObjLink import GHC.Exts (addrToAny#, Ptr(..)) import GHC.Utils.Encoding #endif -- | Command line options gathered from the -PModule.Name:stuff syntax -- are given to you as this type type CommandLineOption = String -- | Errors and warnings produced by the parser data PsMessages = PsMessages { psWarnings :: Messages PsWarning , psErrors :: Messages PsError } -- | Result of running the parser and the parser plugin data ParsedResult = ParsedResult { -- | Parsed module, potentially modified by a plugin parsedResultModule :: HsParsedModule , -- | Warnings and errors from parser, potentially modified by a plugin parsedResultMessages :: PsMessages } -- | 'Plugin' is the compiler plugin data type. Try to avoid -- constructing one of these directly, and just modify some fields of -- 'defaultPlugin' instead: this is to try and preserve source-code -- compatibility when we add fields to this. -- -- Nonetheless, this API is preliminary and highly likely to change in -- the future. data Plugin = Plugin { installCoreToDos :: CorePlugin -- ^ Modify the Core pipeline that will be used for compilation. -- This is called as the Core pipeline is built for every module -- being compiled, and plugins get the opportunity to modify the -- pipeline in a nondeterministic order. , tcPlugin :: TcPlugin -- ^ An optional typechecker plugin, which may modify the -- behaviour of the constraint solver. , defaultingPlugin :: DefaultingPlugin -- ^ An optional defaulting plugin, which may specify the -- additional type-defaulting rules. , holeFitPlugin :: HoleFitPlugin -- ^ An optional plugin to handle hole fits, which may re-order -- or change the list of valid hole fits and refinement hole fits. , driverPlugin :: [CommandLineOption] -> HscEnv -> IO HscEnv -- ^ An optional plugin to update 'HscEnv', right after plugin loading. This -- can be used to register hooks or tweak any field of 'DynFlags' before -- doing actual work on a module. -- -- @since 8.10.1 , latePlugin :: LatePlugin -- ^ A plugin that runs after interface creation and after late cost centre -- insertion. Useful for transformations that should not impact interfaces -- or optimization at all. -- -- @since 9.10.1 , pluginRecompile :: [CommandLineOption] -> IO PluginRecompile -- ^ Specify how the plugin should affect recompilation. , parsedResultAction :: [CommandLineOption] -> ModSummary -> ParsedResult -> Hsc ParsedResult -- ^ Modify the module when it is parsed. This is called by -- "GHC.Driver.Main" when the parser has produced no or only non-fatal -- errors. -- Compilation will fail if the messages produced by this function contain -- any errors. , renamedResultAction :: [CommandLineOption] -> TcGblEnv -> HsGroup GhcRn -> TcM (TcGblEnv, HsGroup GhcRn) -- ^ Modify each group after it is renamed. This is called after each -- `HsGroup` has been renamed. , typeCheckResultAction :: [CommandLineOption] -> ModSummary -> TcGblEnv -> TcM TcGblEnv -- ^ Modify the module when it is type checked. This is called at the -- very end of typechecking. , spliceRunAction :: [CommandLineOption] -> LHsExpr GhcTc -> TcM (LHsExpr GhcTc) -- ^ Modify the TH splice or quasiqoute before it is run. , interfaceLoadAction :: forall lcl . [CommandLineOption] -> ModIface -> IfM lcl ModIface -- ^ Modify an interface that have been loaded. This is called by -- "GHC.Iface.Load" when an interface is successfully loaded. Not applied to -- the loading of the plugin interface. Tools that rely on information from -- modules other than the currently compiled one should implement this -- function. } -- Note [Source plugins] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- The `Plugin` datatype have been extended by fields that allow access to the -- different inner representations that are generated during the compilation -- process. These fields are `parsedResultAction`, `renamedResultAction`, -- `typeCheckResultAction`, `spliceRunAction` and `interfaceLoadAction`. -- -- The main purpose of these plugins is to help tool developers. They allow -- development tools to extract the information about the source code of a big -- Haskell project during the normal build procedure. In this case the plugin -- acts as the tools access point to the compiler that can be controlled by -- compiler flags. This is important because the manipulation of compiler flags -- is supported by most build environment. -- -- For the full discussion, check the full proposal at: -- https://gitlab.haskell.org/ghc/ghc/wikis/extended-plugins-proposal data PluginWithArgs = PluginWithArgs { paPlugin :: Plugin -- ^ the actual callable plugin , paArguments :: [CommandLineOption] -- ^ command line arguments for the plugin } -- | A plugin with its arguments. The result of loading the plugin. data LoadedPlugin = LoadedPlugin { lpPlugin :: PluginWithArgs -- ^ the actual plugin together with its commandline arguments , lpModule :: ModIface -- ^ the module containing the plugin } -- | External plugin loaded directly from a library without loading module -- interfaces data ExternalPlugin = ExternalPlugin { epPlugin :: PluginWithArgs -- ^ Plugin with its arguments , epUnit :: String -- ^ UnitId , epModule :: String -- ^ Module name } -- | A static plugin with its arguments. For registering compiled-in plugins -- through the GHC API. data StaticPlugin = StaticPlugin { spPlugin :: PluginWithArgs -- ^ the actual plugin together with its commandline arguments , spInitialised :: Bool -- ^ has this plugin been initialised (i.e. driverPlugin has been run) } lpModuleName :: LoadedPlugin -> ModuleName lpModuleName = moduleName . mi_module . lpModule pluginRecompile' :: PluginWithArgs -> IO PluginRecompile pluginRecompile' (PluginWithArgs plugin args) = pluginRecompile plugin args data PluginRecompile = ForceRecompile | NoForceRecompile | MaybeRecompile Fingerprint instance Outputable PluginRecompile where ppr ForceRecompile = text "ForceRecompile" ppr NoForceRecompile = text "NoForceRecompile" ppr (MaybeRecompile fp) = text "MaybeRecompile" <+> ppr fp instance Semigroup PluginRecompile where ForceRecompile <> _ = ForceRecompile NoForceRecompile <> r = r MaybeRecompile fp <> NoForceRecompile = MaybeRecompile fp MaybeRecompile fp <> MaybeRecompile fp' = MaybeRecompile (fingerprintFingerprints [fp, fp']) MaybeRecompile _fp <> ForceRecompile = ForceRecompile instance Monoid PluginRecompile where mempty = NoForceRecompile type CorePlugin = [CommandLineOption] -> [CoreToDo] -> CoreM [CoreToDo] type TcPlugin = [CommandLineOption] -> Maybe GHC.Tc.Types.TcPlugin type DefaultingPlugin = [CommandLineOption] -> Maybe GHC.Tc.Types.DefaultingPlugin type HoleFitPlugin = [CommandLineOption] -> Maybe HoleFitPluginR type LatePlugin = HscEnv -> [CommandLineOption] -> (CgGuts, CostCentreState) -> IO (CgGuts, CostCentreState) purePlugin, impurePlugin, flagRecompile :: [CommandLineOption] -> IO PluginRecompile purePlugin _args = return NoForceRecompile impurePlugin _args = return ForceRecompile flagRecompile = return . MaybeRecompile . fingerprintFingerprints . map fingerprintString . sort -- | Default plugin: does nothing at all, except for marking that safe -- inference has failed unless @-fplugin-trustworthy@ is passed. For -- compatibility reason you should base all your plugin definitions on this -- default value. defaultPlugin :: Plugin defaultPlugin = Plugin { installCoreToDos = const return , tcPlugin = const Nothing , defaultingPlugin = const Nothing , holeFitPlugin = const Nothing , driverPlugin = const return , latePlugin = \_ -> const return , pluginRecompile = impurePlugin , renamedResultAction = \_ env grp -> return (env, grp) , parsedResultAction = \_ _ -> return , typeCheckResultAction = \_ _ -> return , spliceRunAction = \_ -> return , interfaceLoadAction = \_ -> return } -- | A renamer plugin which mades the renamed source available in -- a typechecker plugin. keepRenamedSource :: [CommandLineOption] -> TcGblEnv -> HsGroup GhcRn -> TcM (TcGblEnv, HsGroup GhcRn) keepRenamedSource _ gbl_env group = return (gbl_env { tcg_rn_decls = update (tcg_rn_decls gbl_env) , tcg_rn_exports = update_exports (tcg_rn_exports gbl_env) }, group) where update_exports Nothing = Just [] update_exports m = m update Nothing = Just emptyRnGroup update m = m type PluginOperation m a = Plugin -> [CommandLineOption] -> a -> m a type ConstPluginOperation m a = Plugin -> [CommandLineOption] -> a -> m () data Plugins = Plugins { staticPlugins :: ![StaticPlugin] -- ^ Static plugins which do not need dynamic loading. These plugins are -- intended to be added by GHC API users directly to this list. -- -- To add dynamically loaded plugins through the GHC API see -- 'addPluginModuleName' instead. , externalPlugins :: ![ExternalPlugin] -- ^ External plugins loaded directly from libraries without loading -- module interfaces. , loadedPlugins :: ![LoadedPlugin] -- ^ Plugins dynamically loaded after processing arguments. What -- will be loaded here is directed by DynFlags.pluginModNames. -- Arguments are loaded from DynFlags.pluginModNameOpts. -- -- The purpose of this field is to cache the plugins so they -- don't have to be loaded each time they are needed. See -- 'GHC.Runtime.Loader.initializePlugins'. , loadedPluginDeps :: !([Linkable], PkgsLoaded) -- ^ The object files required by the loaded plugins -- See Note [Plugin dependencies] } emptyPlugins :: Plugins emptyPlugins = Plugins { staticPlugins = [] , externalPlugins = [] , loadedPlugins = [] , loadedPluginDeps = ([], emptyUDFM) } pluginsWithArgs :: Plugins -> [PluginWithArgs] pluginsWithArgs plugins = map lpPlugin (loadedPlugins plugins) ++ map epPlugin (externalPlugins plugins) ++ map spPlugin (staticPlugins plugins) -- | Perform an operation by using all of the plugins in turn. withPlugins :: Monad m => Plugins -> PluginOperation m a -> a -> m a withPlugins plugins transformation input = foldM go input (pluginsWithArgs plugins) where go arg (PluginWithArgs p opts) = transformation p opts arg mapPlugins :: Plugins -> (Plugin -> [CommandLineOption] -> a) -> [a] mapPlugins plugins f = map (\(PluginWithArgs p opts) -> f p opts) (pluginsWithArgs plugins) -- | Perform a constant operation by using all of the plugins in turn. withPlugins_ :: Monad m => Plugins -> ConstPluginOperation m a -> a -> m () withPlugins_ plugins transformation input = mapM_ (\(PluginWithArgs p opts) -> transformation p opts input) (pluginsWithArgs plugins) type FrontendPluginAction = [String] -> [(String, Maybe Phase)] -> Ghc () data FrontendPlugin = FrontendPlugin { frontend :: FrontendPluginAction } defaultFrontendPlugin :: FrontendPlugin defaultFrontendPlugin = FrontendPlugin { frontend = \_ _ -> return () } -- | Load external plugins loadExternalPlugins :: [ExternalPluginSpec] -> IO [ExternalPlugin] loadExternalPlugins [] = return [] #if !defined(HAVE_INTERNAL_INTERPRETER) loadExternalPlugins _ = do panic "loadExternalPlugins: can't load external plugins with GHC built without internal interpreter" #elif !defined(CAN_LOAD_DLL) loadExternalPlugins _ = do panic "loadExternalPlugins: loading shared libraries isn't supported by this compiler" #else loadExternalPlugins ps = do -- initialize the linker initObjLinker RetainCAFs -- load plugins forM ps $ \(ExternalPluginSpec path unit mod_name opts) -> do loadExternalPluginLib path -- lookup symbol let ztmp = zEncodeString mod_name ++ "_plugin_closure" symbol | null unit = ztmp | otherwise = zEncodeString unit ++ "_" ++ ztmp plugin <- lookupSymbol symbol >>= \case Nothing -> pprPanic "loadExternalPlugins" (vcat [ text "Symbol not found" , text " Library path: " <> text path , text " Symbol : " <> text symbol ]) Just (Ptr addr) -> case addrToAny# addr of (# a #) -> pure a pure $ ExternalPlugin (PluginWithArgs plugin opts) unit mod_name loadExternalPluginLib :: FilePath -> IO () loadExternalPluginLib path = do -- load library loadDLL path >>= \case Left errmsg -> pprPanic "loadExternalPluginLib" (vcat [ text "Can't load plugin library" , text " Library path: " <> text path , text " Error : " <> text errmsg ]) Right _ -> do -- resolve objects resolveObjs >>= \case True -> return () False -> pprPanic "loadExternalPluginLib" (text "Unable to resolve objects for library: " <> text path) #endif ghc-lib-parser-9.12.2.20250421/compiler/GHC/Driver/Plugins.hs-boot0000644000000000000000000000041007346545000022052 0ustar0000000000000000-- The plugins datatype is stored in DynFlags, so it needs to be -- exposed without importing all of its implementation. module GHC.Driver.Plugins where import GHC.Prelude () data Plugin data Plugins emptyPlugins :: Plugins data LoadedPlugin data StaticPlugin ghc-lib-parser-9.12.2.20250421/compiler/GHC/Driver/Plugins/0000755000000000000000000000000007346545000020562 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Driver/Plugins/External.hs0000644000000000000000000000620307346545000022701 0ustar0000000000000000-- | External plugins -- -- GHC supports two kinds of "static" plugins: -- 1. internal: setup with GHC-API -- 2. external: setup as explained below and loaded from shared libraries -- -- The intended use case for external static plugins is with cross compilers: at -- the time of writing, GHC is mono-target and a GHC cross-compiler (i.e. when -- host /= target) can't build nor load plugins for the host using the -- "non-static" plugin approach. Fixing this is tracked in #14335. If you're not -- using a cross-compiler, you'd better use non-static plugins which are easier -- to build and and safer to use (see below). -- -- External static plugins can be configured via the command-line with -- the -fplugin-library flag. Syntax is: -- -- -fplugin-library=⟨file-path⟩;⟨unit-id⟩;⟨module⟩;⟨args⟩ -- -- Example: -- -fplugin-library=path/to/plugin;package-123;Plugin.Module;["Argument","List"] -- -- Building the plugin library: -- 1. link with the libraries used to build the compiler you target. If you -- target a cross-compiler (stage2), you can't directly use it to build the -- plugin library. Use the stage1 compiler instead. -- -- 2. if you use cabal to build the library, its unit-id will be set by cabal -- and will contain a hash (e.g. "my-plugin-unit-1345656546ABCDEF"). To force -- the unit id, use GHC's `-this-unit-id` command line flag: -- e.g. -this-unit-id my-plugin-unit -- You can set this in the .cabal file of your library with the following -- stanza: `ghc-options: -this-unit-id my-plugin-unit` -- -- 3. To make your plugin easier to distribute, you may want to link it -- statically with all its dependencies. You would need to use `-shared` -- without `-dynamic` when building your library. -- -- However, all the static dependencies have to be built with `-fPIC` and it's -- not done by default. See -- https://www.hobson.space/posts/haskell-foreign-library/ for a way to modify -- the compiler to do it. -- -- In any case, don't link your plugin library statically with the RTS (e.g. -- use `-fno-link-rts`) as there are some global variables in the RTS that must -- be shared between the plugin and the compiler. -- -- With external static plugins we don't check the type of the `plugin` closure -- we look up. If it's not a valid `Plugin` value, it will probably crash badly. -- module GHC.Driver.Plugins.External ( ExternalPluginSpec (..) , parseExternalPluginSpec ) where import GHC.Prelude import Text.Read -- | External plugin spec data ExternalPluginSpec = ExternalPluginSpec { esp_lib :: !FilePath , esp_unit_id :: !String , esp_module :: !String , esp_args :: ![String] } -- | Parser external static plugin specification from command-line flag parseExternalPluginSpec :: String -> Maybe ExternalPluginSpec parseExternalPluginSpec optflag = case break (== ';') optflag of (libPath, _:rest) -> case break (== ';') rest of (libName, _:pack) -> case break (== ';') pack of (modName, _:args) -> case readMaybe args of Just as -> Just (ExternalPluginSpec libPath libName modName as) Nothing -> Nothing _ -> Nothing _ -> Nothing _ -> Nothing ghc-lib-parser-9.12.2.20250421/compiler/GHC/Driver/Ppr.hs0000644000000000000000000000233507346545000020241 0ustar0000000000000000-- | Printing related functions that depend on session state (DynFlags) module GHC.Driver.Ppr ( showSDoc , showSDocUnsafe , showSDocForUser , showPpr , showPprUnsafe , printForUser ) where import GHC.Prelude import GHC.Driver.DynFlags import GHC.Unit.State import GHC.Utils.Outputable import GHC.Utils.Ppr ( Mode(..) ) import System.IO ( Handle ) -- | Show a SDoc as a String with the default user style showSDoc :: DynFlags -> SDoc -> String showSDoc dflags sdoc = renderWithContext (initSDocContext dflags defaultUserStyle) sdoc showPpr :: Outputable a => DynFlags -> a -> String showPpr dflags thing = showSDoc dflags (ppr thing) -- | Allows caller to specify the NamePprCtx to use showSDocForUser :: DynFlags -> UnitState -> NamePprCtx -> SDoc -> String showSDocForUser dflags unit_state name_ppr_ctx doc = renderWithContext (initSDocContext dflags sty) doc' where sty = mkUserStyle name_ppr_ctx AllTheWay doc' = pprWithUnitState unit_state doc printForUser :: DynFlags -> Handle -> NamePprCtx -> Depth -> SDoc -> IO () printForUser dflags handle name_ppr_ctx depth doc = printSDocLn ctx (PageMode False) handle doc where ctx = initSDocContext dflags (mkUserStyle name_ppr_ctx depth) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Driver/Session.hs0000644000000000000000000050557207346545000021136 0ustar0000000000000000{-# OPTIONS_GHC -O0 #-} {-# LANGUAGE CPP #-} {-# LANGUAGE DeriveFunctor #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE RankNTypes #-} {-# LANGUAGE LambdaCase #-} ------------------------------------------------------------------------------- -- -- | Dynamic flags -- -- Most flags are dynamic flags, which means they can change from compilation -- to compilation using @OPTIONS_GHC@ pragmas, and in a multi-session GHC each -- session can be using different dynamic flags. Dynamic flags can also be set -- at the prompt in GHCi. -- -- (c) The University of Glasgow 2005 -- ------------------------------------------------------------------------------- {-# OPTIONS_GHC -Wno-incomplete-uni-patterns #-} module GHC.Driver.Session ( -- * Dynamic flags and associated configuration types DumpFlag(..), GeneralFlag(..), WarningFlag(..), DiagnosticReason(..), Language(..), FatalMessager, FlushOut(..), ProfAuto(..), glasgowExtsFlags, hasPprDebug, hasNoDebugOutput, hasNoStateHack, hasNoOptCoercion, dopt, dopt_set, dopt_unset, gopt, gopt_set, gopt_unset, setGeneralFlag', unSetGeneralFlag', wopt, wopt_set, wopt_unset, wopt_fatal, wopt_set_fatal, wopt_unset_fatal, wopt_set_all_custom, wopt_unset_all_custom, wopt_set_all_fatal_custom, wopt_unset_all_fatal_custom, wopt_set_custom, wopt_unset_custom, wopt_set_fatal_custom, wopt_unset_fatal_custom, wopt_any_custom, xopt, xopt_set, xopt_unset, xopt_set_unlessExplSpec, xopt_DuplicateRecordFields, xopt_FieldSelectors, lang_set, DynamicTooState(..), dynamicTooState, setDynamicNow, sccProfilingEnabled, needSourceNotes, OnOff(..), DynFlags(..), ParMakeCount(..), outputFile, objectSuf, ways, FlagSpec(..), HasDynFlags(..), ContainsDynFlags(..), RtsOptsEnabled(..), GhcMode(..), isOneShot, GhcLink(..), isNoLink, PackageFlag(..), PackageArg(..), ModRenaming(..), packageFlagsChanged, IgnorePackageFlag(..), TrustFlag(..), PackageDBFlag(..), PkgDbRef(..), Option(..), showOpt, DynLibLoader(..), fFlags, fLangFlags, xFlags, wWarningFlags, makeDynFlagsConsistent, positionIndependent, optimisationFlags, codeGenFlags, setFlagsFromEnvFile, pprDynFlagsDiff, flagSpecOf, targetProfile, -- ** Safe Haskell safeHaskellOn, safeHaskellModeEnabled, safeImportsOn, safeLanguageOn, safeInferOn, packageTrustOn, safeDirectImpsReq, safeImplicitImpsReq, unsafeFlags, unsafeFlagsForInfer, -- ** System tool settings and locations Settings(..), sProgramName, sProjectVersion, sGhcUsagePath, sGhciUsagePath, sToolDir, sTopDir, sGlobalPackageDatabasePath, sLdSupportsCompactUnwind, sLdSupportsFilelist, sLdIsGnuLd, sGccSupportsNoPie, sPgm_L, sPgm_P, sPgm_F, sPgm_c, sPgm_cxx, sPgm_cpp, sPgm_a, sPgm_l, sPgm_lm, sPgm_windres, sPgm_ar, sPgm_ranlib, sPgm_lo, sPgm_lc, sPgm_las, sPgm_i, sOpt_L, sOpt_P, sOpt_P_fingerprint, sOpt_JSP, sOpt_JSP_fingerprint, sOpt_CmmP, sOpt_CmmP_fingerprint, sOpt_F, sOpt_c, sOpt_cxx, sOpt_a, sOpt_l, sOpt_lm, sOpt_windres, sOpt_lo, sOpt_lc, sOpt_i, sExtraGccViaCFlags, sTargetPlatformString, sGhcWithInterpreter, sLibFFI, sTargetRTSLinkerOnlySupportsSharedLibs, GhcNameVersion(..), FileSettings(..), PlatformMisc(..), settings, programName, projectVersion, ghcUsagePath, ghciUsagePath, topDir, versionedAppDir, versionedFilePath, extraGccViaCFlags, globalPackageDatabasePath, pgm_L, pgm_P, pgm_JSP, pgm_CmmP, pgm_F, pgm_c, pgm_cxx, pgm_cpp, pgm_a, pgm_l, pgm_lm, pgm_windres, pgm_ar, pgm_ranlib, pgm_lo, pgm_lc, pgm_las, pgm_i, opt_L, opt_P, opt_JSP, opt_CmmP, opt_F, opt_c, opt_cxx, opt_a, opt_l, opt_lm, opt_i, opt_P_signature, opt_JSP_signature, opt_CmmP_signature, opt_windres, opt_lo, opt_lc, opt_las, updatePlatformConstants, -- ** Manipulating DynFlags addPluginModuleName, defaultDynFlags, -- Settings -> DynFlags initDynFlags, -- DynFlags -> IO DynFlags defaultFatalMessager, defaultFlushOut, setOutputFile, setDynOutputFile, setOutputHi, setDynOutputHi, augmentByWorkingDirectory, getOpts, -- DynFlags -> (DynFlags -> [a]) -> [a] getVerbFlags, updOptLevel, setTmpDir, setUnitId, TurnOnFlag, turnOn, turnOff, impliedGFlags, impliedOffGFlags, impliedXFlags, -- ** State CmdLineP(..), runCmdLineP, getCmdLineState, putCmdLineState, processCmdLineP, -- ** Parsing DynFlags parseDynamicFlagsCmdLine, parseDynamicFilePragma, parseDynamicFlagsFull, flagSuggestions, -- ** Available DynFlags allNonDeprecatedFlags, flagsAll, flagsDynamic, flagsPackage, flagsForCompletion, supportedLanguagesAndExtensions, languageExtensions, -- ** DynFlags C compiler options picCCOpts, picPOpts, -- ** DynFlags C linker options pieCCLDOpts, -- * Compiler configuration suitable for display to the user compilerInfo, wordAlignment, setUnsafeGlobalDynFlags, -- * SSE and AVX isSse4_2Enabled, isBmiEnabled, isBmi2Enabled, isAvxEnabled, isAvx2Enabled, isAvx512cdEnabled, isAvx512erEnabled, isAvx512fEnabled, isAvx512pfEnabled, isFmaEnabled, -- * Linker/compiler information useXLinkerRPath, -- * Include specifications IncludeSpecs(..), addGlobalInclude, addQuoteInclude, flattenIncludes, addImplicitQuoteInclude, -- * SDoc initSDocContext, initDefaultSDocContext, initPromotionTickContext, ) where import GHC.Prelude import GHC.Platform import GHC.Platform.Ways import GHC.Platform.Profile import GHC.Unit.Types import GHC.Unit.Parser import GHC.Unit.Module import GHC.Unit.Module.Warnings import GHC.Driver.DynFlags import GHC.Driver.Config.Diagnostic import GHC.Driver.Flags import GHC.Driver.Backend import GHC.Driver.Errors.Types import GHC.Driver.Plugins.External import GHC.Settings.Config import GHC.Core.Unfold import GHC.Driver.CmdLine import GHC.Utils.Panic import GHC.Utils.Misc import GHC.Utils.Constants (debugIsOn) import GHC.Utils.GlobalVars import GHC.Data.Maybe import GHC.Data.Bool import GHC.Data.StringBuffer (stringToStringBuffer) import GHC.Types.Error import GHC.Types.Name.Reader (RdrName(..)) import GHC.Types.Name.Occurrence (isVarOcc, occNameString) import GHC.Utils.Monad import GHC.Types.SrcLoc import GHC.Types.SafeHaskell import GHC.Types.Basic ( treatZeroAsInf ) import GHC.Data.FastString import GHC.Utils.TmpFs import GHC.Utils.Fingerprint import GHC.Utils.Outputable import GHC.Utils.Error (emptyDiagOpts) import GHC.Settings import GHC.CmmToAsm.CFG.Weight import GHC.Core.Opt.CallerCC import GHC.Parser (parseIdentifier) import GHC.Parser.Lexer (mkParserOpts, initParserState, P(..), ParseResult(..)) import GHC.SysTools.BaseDir ( expandToolDir, expandTopDir ) import Data.IORef import Control.Arrow ((&&&)) import Control.Monad import Control.Monad.Trans.State as State import Data.Functor.Identity import Data.Ord import Data.Char import Data.List (intercalate, sortBy, partition) import qualified Data.List.NonEmpty as NE import qualified Data.Map as Map import qualified Data.Set as Set import Data.Word import System.FilePath import Text.ParserCombinators.ReadP hiding (char) import Text.ParserCombinators.ReadP as R import qualified GHC.Data.EnumSet as EnumSet import qualified GHC.LanguageExtensions as LangExt -- Note [Updating flag description in the User's Guide] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- -- If you modify anything in this file please make sure that your changes are -- described in the User's Guide. Please update the flag description in the -- users guide (docs/users_guide) whenever you add or change a flag. -- Please make sure you add ":since:" information to new flags. -- Note [Supporting CLI completion] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- -- The command line interface completion (in for example bash) is an easy way -- for the developer to learn what flags are available from GHC. -- GHC helps by separating which flags are available when compiling with GHC, -- and which flags are available when using GHCi. -- A flag is assumed to either work in both these modes, or only in one of them. -- When adding or changing a flag, please consider for which mode the flag will -- have effect, and annotate it accordingly. For Flags use defFlag, defGhcFlag, -- defGhciFlag, and for FlagSpec use flagSpec or flagGhciSpec. -- Note [Adding a language extension] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- -- There are a few steps to adding (or removing) a language extension, -- -- * Adding the extension to GHC.LanguageExtensions -- -- The Extension type in libraries/ghc-boot-th/GHC/LanguageExtensions/Type.hs -- is the canonical list of language extensions known by GHC. -- -- * Adding a flag to DynFlags.xFlags -- -- This is fairly self-explanatory. The name should be concise, memorable, -- and consistent with any previous implementations of the similar idea in -- other Haskell compilers. -- -- * Adding the flag to the documentation -- -- This is the same as any other flag. See -- Note [Updating flag description in the User's Guide] -- -- * Adding the flag to Cabal -- -- The Cabal library has its own list of all language extensions supported -- by all major compilers. This is the list that user code being uploaded -- to Hackage is checked against to ensure language extension validity. -- Consequently, it is very important that this list remains up-to-date. -- -- To this end, there is a testsuite test (testsuite/tests/driver/T4437.hs) -- whose job it is to ensure these GHC's extensions are consistent with -- Cabal. -- -- The recommended workflow is, -- -- 1. Temporarily add your new language extension to the -- expectedGhcOnlyExtensions list in T4437 to ensure the test doesn't -- break while Cabal is updated. -- -- 2. After your GHC change is accepted, submit a Cabal pull request adding -- your new extension to Cabal's list (found in -- Cabal/Language/Haskell/Extension.hs). -- -- 3. After your Cabal change is accepted, let the GHC developers know so -- they can update the Cabal submodule and remove the extensions from -- expectedGhcOnlyExtensions. -- -- * Adding the flag to the GHC Wiki -- -- There is a change log tracking language extension additions and removals -- on the GHC wiki: https://gitlab.haskell.org/ghc/ghc/wikis/language-pragma-history -- -- See #4437 and #8176. -- ----------------------------------------------------------------------------- -- DynFlags {- Note [RHS Floating] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We provide both 'Opt_LocalFloatOut' and 'Opt_LocalFloatOutTopLevel' to correspond to 'doFloatFromRhs'; with this we can control floating out with GHC flags. This addresses https://gitlab.haskell.org/ghc/ghc/-/issues/13663 and allows for experimentation. -} ----------------------------------------------------------------------------- -- Accessors from 'DynFlags' -- | "unbuild" a 'Settings' from a 'DynFlags'. This shouldn't be needed in the -- vast majority of code. But GHCi questionably uses this to produce a default -- 'DynFlags' from which to compute a flags diff for printing. settings :: DynFlags -> Settings settings dflags = Settings { sGhcNameVersion = ghcNameVersion dflags , sFileSettings = fileSettings dflags , sTargetPlatform = targetPlatform dflags , sToolSettings = toolSettings dflags , sPlatformMisc = platformMisc dflags , sRawSettings = rawSettings dflags } pgm_L :: DynFlags -> String pgm_L dflags = toolSettings_pgm_L $ toolSettings dflags pgm_P :: DynFlags -> (String,[Option]) pgm_P dflags = toolSettings_pgm_P $ toolSettings dflags pgm_JSP :: DynFlags -> (String,[Option]) pgm_JSP dflags = toolSettings_pgm_JSP $ toolSettings dflags pgm_CmmP :: DynFlags -> (String,[Option]) pgm_CmmP dflags = toolSettings_pgm_CmmP $ toolSettings dflags pgm_F :: DynFlags -> String pgm_F dflags = toolSettings_pgm_F $ toolSettings dflags pgm_c :: DynFlags -> String pgm_c dflags = toolSettings_pgm_c $ toolSettings dflags pgm_cxx :: DynFlags -> String pgm_cxx dflags = toolSettings_pgm_cxx $ toolSettings dflags pgm_cpp :: DynFlags -> (String,[Option]) pgm_cpp dflags = toolSettings_pgm_cpp $ toolSettings dflags pgm_a :: DynFlags -> (String,[Option]) pgm_a dflags = toolSettings_pgm_a $ toolSettings dflags pgm_l :: DynFlags -> (String,[Option]) pgm_l dflags = toolSettings_pgm_l $ toolSettings dflags pgm_lm :: DynFlags -> Maybe (String,[Option]) pgm_lm dflags = toolSettings_pgm_lm $ toolSettings dflags pgm_windres :: DynFlags -> String pgm_windres dflags = toolSettings_pgm_windres $ toolSettings dflags pgm_ar :: DynFlags -> String pgm_ar dflags = toolSettings_pgm_ar $ toolSettings dflags pgm_ranlib :: DynFlags -> String pgm_ranlib dflags = toolSettings_pgm_ranlib $ toolSettings dflags pgm_lo :: DynFlags -> (String,[Option]) pgm_lo dflags = toolSettings_pgm_lo $ toolSettings dflags pgm_lc :: DynFlags -> (String,[Option]) pgm_lc dflags = toolSettings_pgm_lc $ toolSettings dflags pgm_las :: DynFlags -> (String,[Option]) pgm_las dflags = toolSettings_pgm_las $ toolSettings dflags pgm_i :: DynFlags -> String pgm_i dflags = toolSettings_pgm_i $ toolSettings dflags opt_L :: DynFlags -> [String] opt_L dflags = toolSettings_opt_L $ toolSettings dflags opt_P :: DynFlags -> [String] opt_P dflags = concatMap (wayOptP (targetPlatform dflags)) (ways dflags) ++ toolSettings_opt_P (toolSettings dflags) opt_JSP :: DynFlags -> [String] opt_JSP dflags = concatMap (wayOptP (targetPlatform dflags)) (ways dflags) ++ toolSettings_opt_JSP (toolSettings dflags) opt_CmmP :: DynFlags -> [String] opt_CmmP dflags = toolSettings_opt_CmmP $ toolSettings dflags -- This function packages everything that's needed to fingerprint opt_P -- flags. See Note [Repeated -optP hashing]. opt_P_signature :: DynFlags -> ([String], Fingerprint) opt_P_signature dflags = ( concatMap (wayOptP (targetPlatform dflags)) (ways dflags) , toolSettings_opt_P_fingerprint $ toolSettings dflags ) -- This function packages everything that's needed to fingerprint opt_P -- flags. See Note [Repeated -optP hashing]. opt_JSP_signature :: DynFlags -> ([String], Fingerprint) opt_JSP_signature dflags = ( concatMap (wayOptP (targetPlatform dflags)) (ways dflags) , toolSettings_opt_JSP_fingerprint $ toolSettings dflags ) -- This function packages everything that's needed to fingerprint opt_CmmP -- flags. See Note [Repeated -optP hashing]. opt_CmmP_signature :: DynFlags -> Fingerprint opt_CmmP_signature = toolSettings_opt_CmmP_fingerprint . toolSettings opt_F :: DynFlags -> [String] opt_F dflags= toolSettings_opt_F $ toolSettings dflags opt_c :: DynFlags -> [String] opt_c dflags = concatMap (wayOptc (targetPlatform dflags)) (ways dflags) ++ toolSettings_opt_c (toolSettings dflags) opt_cxx :: DynFlags -> [String] opt_cxx dflags = concatMap (wayOptcxx (targetPlatform dflags)) (ways dflags) ++ toolSettings_opt_cxx (toolSettings dflags) opt_a :: DynFlags -> [String] opt_a dflags= toolSettings_opt_a $ toolSettings dflags opt_l :: DynFlags -> [String] opt_l dflags = concatMap (wayOptl (targetPlatform dflags)) (ways dflags) ++ toolSettings_opt_l (toolSettings dflags) opt_lm :: DynFlags -> [String] opt_lm dflags= toolSettings_opt_lm $ toolSettings dflags opt_windres :: DynFlags -> [String] opt_windres dflags= toolSettings_opt_windres $ toolSettings dflags opt_lo :: DynFlags -> [String] opt_lo dflags= toolSettings_opt_lo $ toolSettings dflags opt_lc :: DynFlags -> [String] opt_lc dflags= toolSettings_opt_lc $ toolSettings dflags opt_las :: DynFlags -> [String] opt_las dflags = toolSettings_opt_las $ toolSettings dflags opt_i :: DynFlags -> [String] opt_i dflags= toolSettings_opt_i $ toolSettings dflags ----------------------------------------------------------------------------- {- Note [Verbosity levels] ~~~~~~~~~~~~~~~~~~~~~~~ 0 | print errors & warnings only 1 | minimal verbosity: print "compiling M ... done." for each module. 2 | equivalent to -dshow-passes 3 | equivalent to existing "ghc -v" 4 | "ghc -v -ddump-most" 5 | "ghc -v -ddump-all" -} -- | Set the Haskell language standard to use setLanguage :: Language -> DynP () setLanguage l = upd (`lang_set` Just l) -- | Is the -fpackage-trust mode on packageTrustOn :: DynFlags -> Bool packageTrustOn = gopt Opt_PackageTrust -- | Is Safe Haskell on in some way (including inference mode) safeHaskellOn :: DynFlags -> Bool safeHaskellOn dflags = safeHaskellModeEnabled dflags || safeInferOn dflags safeHaskellModeEnabled :: DynFlags -> Bool safeHaskellModeEnabled dflags = safeHaskell dflags `elem` [Sf_Unsafe, Sf_Trustworthy , Sf_Safe ] -- | Is the Safe Haskell safe language in use safeLanguageOn :: DynFlags -> Bool safeLanguageOn dflags = safeHaskell dflags == Sf_Safe -- | Is the Safe Haskell safe inference mode active safeInferOn :: DynFlags -> Bool safeInferOn = safeInfer -- | Test if Safe Imports are on in some form safeImportsOn :: DynFlags -> Bool safeImportsOn dflags = safeHaskell dflags == Sf_Unsafe || safeHaskell dflags == Sf_Trustworthy || safeHaskell dflags == Sf_Safe -- | Set a 'Safe Haskell' flag setSafeHaskell :: SafeHaskellMode -> DynP () setSafeHaskell s = updM f where f dfs = do let sf = safeHaskell dfs safeM <- combineSafeFlags sf s case s of Sf_Safe -> return $ dfs { safeHaskell = safeM, safeInfer = False } -- leave safe inference on in Trustworthy mode so we can warn -- if it could have been inferred safe. Sf_Trustworthy -> do l <- getCurLoc return $ dfs { safeHaskell = safeM, trustworthyOnLoc = l } -- leave safe inference on in Unsafe mode as well. _ -> return $ dfs { safeHaskell = safeM } -- | Are all direct imports required to be safe for this Safe Haskell mode? -- Direct imports are when the code explicitly imports a module safeDirectImpsReq :: DynFlags -> Bool safeDirectImpsReq d = safeLanguageOn d -- | Are all implicit imports required to be safe for this Safe Haskell mode? -- Implicit imports are things in the prelude. e.g System.IO when print is used. safeImplicitImpsReq :: DynFlags -> Bool safeImplicitImpsReq d = safeLanguageOn d -- | Combine two Safe Haskell modes correctly. Used for dealing with multiple flags. -- This makes Safe Haskell very much a monoid but for now I prefer this as I don't -- want to export this functionality from the module but do want to export the -- type constructors. combineSafeFlags :: SafeHaskellMode -> SafeHaskellMode -> DynP SafeHaskellMode combineSafeFlags a b | a == Sf_None = return b | b == Sf_None = return a | a == Sf_Ignore || b == Sf_Ignore = return Sf_Ignore | a == b = return a | otherwise = addErr errm >> pure a where errm = "Incompatible Safe Haskell flags! (" ++ show a ++ ", " ++ show b ++ ")" -- | A list of unsafe flags under Safe Haskell. Tuple elements are: -- * name of the flag -- * function to get srcspan that enabled the flag -- * function to test if the flag is on -- * function to turn the flag off unsafeFlags, unsafeFlagsForInfer :: [(LangExt.Extension, DynFlags -> SrcSpan, DynFlags -> Bool, DynFlags -> DynFlags)] unsafeFlags = [ (LangExt.GeneralizedNewtypeDeriving, newDerivOnLoc, xopt LangExt.GeneralizedNewtypeDeriving, flip xopt_unset LangExt.GeneralizedNewtypeDeriving) , (LangExt.DerivingVia, deriveViaOnLoc, xopt LangExt.DerivingVia, flip xopt_unset LangExt.DerivingVia) , (LangExt.TemplateHaskell, thOnLoc, xopt LangExt.TemplateHaskell, flip xopt_unset LangExt.TemplateHaskell) ] unsafeFlagsForInfer = unsafeFlags -- | Retrieve the options corresponding to a particular @opt_*@ field in the correct order getOpts :: DynFlags -- ^ 'DynFlags' to retrieve the options from -> (DynFlags -> [a]) -- ^ Relevant record accessor: one of the @opt_*@ accessors -> [a] -- ^ Correctly ordered extracted options getOpts dflags opts = reverse (opts dflags) -- We add to the options from the front, so we need to reverse the list -- | Gets the verbosity flag for the current verbosity level. This is fed to -- other tools, so GHC-specific verbosity flags like @-ddump-most@ are not included getVerbFlags :: DynFlags -> [String] getVerbFlags dflags | verbosity dflags >= 4 = ["-v"] | otherwise = [] setObjectDir, setHiDir, setHieDir, setStubDir, setDumpDir, setOutputDir, setDynObjectSuf, setDynHiSuf, setDylibInstallName, setObjectSuf, setHiSuf, setHieSuf, setHcSuf, parseDynLibLoaderMode, setPgmP, setPgmJSP, setPgmCmmP, addOptl, addOptc, addOptcxx, addOptP, addOptJSP, addOptCmmP, addCmdlineFramework, addHaddockOpts, addGhciScript, setInteractivePrint :: String -> DynFlags -> DynFlags setOutputFile, setDynOutputFile, setOutputHi, setDynOutputHi, setDumpPrefixForce :: Maybe String -> DynFlags -> DynFlags setObjectDir f d = d { objectDir = Just f} setHiDir f d = d { hiDir = Just f} setHieDir f d = d { hieDir = Just f} setStubDir f d = d { stubDir = Just f , includePaths = addGlobalInclude (includePaths d) [f] } -- -stubdir D adds an implicit -I D, so that gcc can find the _stub.h file -- \#included from the .hc file when compiling via C (i.e. unregisterised -- builds). setDumpDir f d = d { dumpDir = Just f} setOutputDir f = setObjectDir f . setHieDir f . setHiDir f . setStubDir f . setDumpDir f setDylibInstallName f d = d { dylibInstallName = Just f} setObjectSuf f d = d { objectSuf_ = f} setDynObjectSuf f d = d { dynObjectSuf_ = f} setHiSuf f d = d { hiSuf_ = f} setHieSuf f d = d { hieSuf = f} setDynHiSuf f d = d { dynHiSuf_ = f} setHcSuf f d = d { hcSuf = f} setOutputFile f d = d { outputFile_ = f} setDynOutputFile f d = d { dynOutputFile_ = f} setOutputHi f d = d { outputHi = f} setDynOutputHi f d = d { dynOutputHi = f} parseUnitInsts :: String -> Instantiations parseUnitInsts str = case filter ((=="").snd) (readP_to_S parse str) of [(r, "")] -> r _ -> throwGhcException $ CmdLineError ("Can't parse -instantiated-with: " ++ str) where parse = sepBy parseEntry (R.char ',') parseEntry = do n <- parseModuleName _ <- R.char '=' m <- parseHoleyModule return (n, m) setUnitInstantiations :: String -> DynFlags -> DynFlags setUnitInstantiations s d = d { homeUnitInstantiations_ = parseUnitInsts s } setUnitInstanceOf :: String -> DynFlags -> DynFlags setUnitInstanceOf s d = d { homeUnitInstanceOf_ = Just (UnitId (fsLit s)) } addPluginModuleName :: String -> DynFlags -> DynFlags addPluginModuleName name d = d { pluginModNames = (mkModuleName name) : (pluginModNames d) } clearPluginModuleNames :: DynFlags -> DynFlags clearPluginModuleNames d = d { pluginModNames = [] , pluginModNameOpts = [] } addPluginModuleNameOption :: String -> DynFlags -> DynFlags addPluginModuleNameOption optflag d = d { pluginModNameOpts = (mkModuleName m, option) : (pluginModNameOpts d) } where (m, rest) = break (== ':') optflag option = case rest of [] -> "" -- should probably signal an error (_:plug_opt) -> plug_opt -- ignore the ':' from break addExternalPlugin :: String -> DynFlags -> DynFlags addExternalPlugin optflag d = case parseExternalPluginSpec optflag of Just r -> d { externalPluginSpecs = r : externalPluginSpecs d } Nothing -> cmdLineError $ "Couldn't parse external plugin specification: " ++ optflag addFrontendPluginOption :: String -> DynFlags -> DynFlags addFrontendPluginOption s d = d { frontendPluginOpts = s : frontendPluginOpts d } parseDynLibLoaderMode f d = case splitAt 8 f of ("deploy", "") -> d { dynLibLoader = Deployable } ("sysdep", "") -> d { dynLibLoader = SystemDependent } _ -> throwGhcException (CmdLineError ("Unknown dynlib loader: " ++ f)) setDumpPrefixForce f d = d { dumpPrefixForce = f} -- XXX HACK: Prelude> words "'does not' work" ===> ["'does","not'","work"] -- Config.hs should really use Option. setPgmP f = alterToolSettings (\s -> s { toolSettings_pgm_P = (pgm, map Option args)}) where (pgm:args) = words f -- XXX HACK: Prelude> words "'does not' work" ===> ["'does","not'","work"] -- Config.hs should really use Option. setPgmJSP f = alterToolSettings (\s -> s { toolSettings_pgm_JSP = (pgm, map Option args)}) where (pgm:args) = words f -- XXX HACK: Prelude> words "'does not' work" ===> ["'does","not'","work"] -- Config.hs should really use Option. setPgmCmmP f = alterToolSettings (\s -> s { toolSettings_pgm_CmmP = (pgm, map Option args)}) where (pgm:args) = words f addOptl f = alterToolSettings (\s -> s { toolSettings_opt_l = f : toolSettings_opt_l s}) addOptc f = alterToolSettings (\s -> s { toolSettings_opt_c = f : toolSettings_opt_c s}) addOptcxx f = alterToolSettings (\s -> s { toolSettings_opt_cxx = f : toolSettings_opt_cxx s}) addOptP f = alterToolSettings $ \s -> s { toolSettings_opt_P = f : toolSettings_opt_P s , toolSettings_opt_P_fingerprint = fingerprintStrings (f : toolSettings_opt_P s) } -- See Note [Repeated -optP hashing] addOptJSP f = alterToolSettings $ \s -> s { toolSettings_opt_JSP = f : toolSettings_opt_JSP s , toolSettings_opt_JSP_fingerprint = fingerprintStrings (f : toolSettings_opt_JSP s) } -- See Note [Repeated -optP hashing] addOptCmmP f = alterToolSettings $ \s -> s { toolSettings_opt_CmmP = f : toolSettings_opt_CmmP s , toolSettings_opt_CmmP_fingerprint = fingerprintStrings (f : toolSettings_opt_CmmP s) } setDepMakefile :: FilePath -> DynFlags -> DynFlags setDepMakefile f d = d { depMakefile = f } setDepIncludeCppDeps :: Bool -> DynFlags -> DynFlags setDepIncludeCppDeps b d = d { depIncludeCppDeps = b } setDepIncludePkgDeps :: Bool -> DynFlags -> DynFlags setDepIncludePkgDeps b d = d { depIncludePkgDeps = b } addDepExcludeMod :: String -> DynFlags -> DynFlags addDepExcludeMod m d = d { depExcludeMods = mkModuleName m : depExcludeMods d } addDepSuffix :: FilePath -> DynFlags -> DynFlags addDepSuffix s d = d { depSuffixes = s : depSuffixes d } addCmdlineFramework f d = d { cmdlineFrameworks = f : cmdlineFrameworks d} addGhcVersionFile :: FilePath -> DynFlags -> DynFlags addGhcVersionFile f d = d { ghcVersionFile = Just f } addHaddockOpts f d = d { haddockOptions = Just f} addGhciScript f d = d { ghciScripts = f : ghciScripts d} setInteractivePrint f d = d { interactivePrint = Just f} ----------------------------------------------------------------------------- -- Setting the optimisation level updOptLevelChanged :: Int -> DynFlags -> (DynFlags, Bool) -- ^ Sets the 'DynFlags' to be appropriate to the optimisation level and signals if any changes took place updOptLevelChanged n dfs = (dfs3, changed1 || changed2 || changed3) where final_n = max 0 (min 2 n) -- Clamp to 0 <= n <= 2 (dfs1, changed1) = foldr unset (dfs , False) remove_gopts (dfs2, changed2) = foldr set (dfs1, False) extra_gopts (dfs3, changed3) = setLlvmOptLevel dfs2 extra_gopts = [ f | (ns,f) <- optLevelFlags, final_n `elem` ns ] remove_gopts = [ f | (ns,f) <- optLevelFlags, final_n `notElem` ns ] set f (dfs, changed) | gopt f dfs = (dfs, changed) | otherwise = (gopt_set dfs f, True) unset f (dfs, changed) | not (gopt f dfs) = (dfs, changed) | otherwise = (gopt_unset dfs f, True) setLlvmOptLevel dfs | llvmOptLevel dfs /= final_n = (dfs{ llvmOptLevel = final_n }, True) | otherwise = (dfs, False) updOptLevel :: Int -> DynFlags -> DynFlags -- ^ Sets the 'DynFlags' to be appropriate to the optimisation level updOptLevel n = fst . updOptLevelChanged n {- ********************************************************************** %* * DynFlags parser %* * %********************************************************************* -} -- ----------------------------------------------------------------------------- -- Parsing the dynamic flags. -- | Parse dynamic flags from a list of command line arguments. Returns -- the parsed 'DynFlags', the left-over arguments, and a list of warnings. -- Throws a 'UsageError' if errors occurred during parsing (such as unknown -- flags or missing arguments). parseDynamicFlagsCmdLine :: MonadIO m => DynFlags -> [Located String] -> m (DynFlags, [Located String], Messages DriverMessage) -- ^ Updated 'DynFlags', left-over arguments, and -- list of warnings. parseDynamicFlagsCmdLine = parseDynamicFlagsFull flagsAll True -- | Like 'parseDynamicFlagsCmdLine' but does not allow the package flags -- (-package, -hide-package, -ignore-package, -hide-all-packages, -package-db). -- Used to parse flags set in a modules pragma. parseDynamicFilePragma :: MonadIO m => DynFlags -> [Located String] -> m (DynFlags, [Located String], Messages DriverMessage) -- ^ Updated 'DynFlags', left-over arguments, and -- list of warnings. parseDynamicFilePragma = parseDynamicFlagsFull flagsDynamic False newtype CmdLineP s a = CmdLineP (forall m. (Monad m) => StateT s m a) deriving (Functor) instance Monad (CmdLineP s) where CmdLineP k >>= f = CmdLineP (k >>= \x -> case f x of CmdLineP g -> g) return = pure instance Applicative (CmdLineP s) where pure x = CmdLineP (pure x) (<*>) = ap getCmdLineState :: CmdLineP s s getCmdLineState = CmdLineP State.get putCmdLineState :: s -> CmdLineP s () putCmdLineState x = CmdLineP (State.put x) runCmdLineP :: CmdLineP s a -> s -> (a, s) runCmdLineP (CmdLineP k) s0 = runIdentity $ runStateT k s0 -- | A helper to parse a set of flags from a list of command-line arguments, handling -- response files. processCmdLineP :: forall s m. MonadIO m => [Flag (CmdLineP s)] -- ^ valid flags to match against -> s -- ^ current state -> [Located String] -- ^ arguments to parse -> m (([Located String], [Err], [Warn]), s) -- ^ (leftovers, errors, warnings) processCmdLineP activeFlags s0 args = runStateT (processArgs (map (hoistFlag getCmdLineP) activeFlags) args parseResponseFile) s0 where getCmdLineP :: CmdLineP s a -> StateT s m a getCmdLineP (CmdLineP k) = k -- | Parses the dynamically set flags for GHC. This is the most general form of -- the dynamic flag parser that the other methods simply wrap. It allows -- saying which flags are valid flags and indicating if we are parsing -- arguments from the command line or from a file pragma. parseDynamicFlagsFull :: forall m. MonadIO m => [Flag (CmdLineP DynFlags)] -- ^ valid flags to match against -> Bool -- ^ are the arguments from the command line? -> DynFlags -- ^ current dynamic flags -> [Located String] -- ^ arguments to parse -> m (DynFlags, [Located String], Messages DriverMessage) parseDynamicFlagsFull activeFlags cmdline dflags0 args = do ((leftover, errs, cli_warns), dflags1) <- processCmdLineP activeFlags dflags0 args -- See Note [Handling errors when parsing command-line flags] let rdr = renderWithContext (initSDocContext dflags0 defaultUserStyle) unless (null errs) $ liftIO $ throwGhcExceptionIO $ errorsToGhcException $ map ((rdr . ppr . getLoc &&& unLoc) . errMsg) $ errs -- check for disabled flags in safe haskell let (dflags2, sh_warns) = safeFlagCheck cmdline dflags1 theWays = ways dflags2 unless (allowed_combination theWays) $ liftIO $ throwGhcExceptionIO (CmdLineError ("combination not supported: " ++ intercalate "/" (map wayDesc (Set.toAscList theWays)))) let (dflags3, consistency_warnings) = makeDynFlagsConsistent dflags2 -- Set timer stats & heap size when (enableTimeStats dflags3) $ liftIO enableTimingStats case (ghcHeapSize dflags3) of Just x -> liftIO (setHeapSize x) _ -> return () liftIO $ setUnsafeGlobalDynFlags dflags3 -- create message envelopes using final DynFlags: #23402 let diag_opts = initDiagOpts dflags3 warns = warnsToMessages diag_opts $ mconcat [consistency_warnings, sh_warns, cli_warns] return (dflags3, leftover, warns) -- | Check (and potentially disable) any extensions that aren't allowed -- in safe mode. -- -- The bool is to indicate if we are parsing command line flags (false means -- file pragma). This allows us to generate better warnings. safeFlagCheck :: Bool -> DynFlags -> (DynFlags, [Warn]) safeFlagCheck _ dflags | safeLanguageOn dflags = (dflagsUnset, warns) where -- Handle illegal flags under safe language. (dflagsUnset, warns) = foldl' check_method (dflags, mempty) unsafeFlags check_method (df, warns) (ext,loc,test,fix) | test df = (fix df, safeFailure (loc df) ext : warns) | otherwise = (df, warns) safeFailure loc ext = L loc $ DriverSafeHaskellIgnoredExtension ext safeFlagCheck cmdl dflags = case safeInferOn dflags of True -> (dflags' { safeInferred = safeFlags }, warn) False -> (dflags', warn) where -- dynflags and warn for when -fpackage-trust by itself with no safe -- haskell flag (dflags', warn) | not (safeHaskellModeEnabled dflags) && not cmdl && packageTrustOn dflags = (gopt_unset dflags Opt_PackageTrust, pkgWarnMsg) | otherwise = (dflags, mempty) pkgWarnMsg :: [Warn] pkgWarnMsg = [ L (pkgTrustOnLoc dflags') DriverPackageTrustIgnored ] -- Have we inferred Unsafe? See Note [Safe Haskell Inference] in GHC.Driver.Main -- Force this to avoid retaining reference to old DynFlags value !safeFlags = all (\(_,_,t,_) -> not $ t dflags) unsafeFlagsForInfer -- | Produce a list of suggestions for a user provided flag that is invalid. flagSuggestions :: [String] -- valid flags to match against -> String -> [String] flagSuggestions flags userInput -- fixes #11789 -- If the flag contains '=', -- this uses both the whole and the left side of '=' for comparing. | elem '=' userInput = let (flagsWithEq, flagsWithoutEq) = partition (elem '=') flags fName = takeWhile (/= '=') userInput in (fuzzyMatch userInput flagsWithEq) ++ (fuzzyMatch fName flagsWithoutEq) | otherwise = fuzzyMatch userInput flags {- ********************************************************************** %* * DynFlags specifications %* * %********************************************************************* -} -- | All dynamic flags option strings without the deprecated ones. -- These are the user facing strings for enabling and disabling options. allNonDeprecatedFlags :: [String] allNonDeprecatedFlags = allFlagsDeps False -- | All flags with possibility to filter deprecated ones allFlagsDeps :: Bool -> [String] allFlagsDeps keepDeprecated = [ '-':flagName flag | (deprecated, flag) <- flagsAllDeps , keepDeprecated || not (isDeprecated deprecated)] where isDeprecated Deprecated = True isDeprecated _ = False {- - Below we export user facing symbols for GHC dynamic flags for use with the - GHC API. -} -- All dynamic flags present in GHC. flagsAll :: [Flag (CmdLineP DynFlags)] flagsAll = map snd flagsAllDeps -- All dynamic flags present in GHC with deprecation information. flagsAllDeps :: [(Deprecation, Flag (CmdLineP DynFlags))] flagsAllDeps = package_flags_deps ++ dynamic_flags_deps -- All dynamic flags, minus package flags, present in GHC. flagsDynamic :: [Flag (CmdLineP DynFlags)] flagsDynamic = map snd dynamic_flags_deps -- ALl package flags present in GHC. flagsPackage :: [Flag (CmdLineP DynFlags)] flagsPackage = map snd package_flags_deps ----------------Helpers to make flags and keep deprecation information---------- type FlagMaker m = String -> OptKind m -> Flag m type DynFlagMaker = FlagMaker (CmdLineP DynFlags) -- Make a non-deprecated flag make_ord_flag :: DynFlagMaker -> String -> OptKind (CmdLineP DynFlags) -> (Deprecation, Flag (CmdLineP DynFlags)) make_ord_flag fm name kind = (NotDeprecated, fm name kind) -- Make a deprecated flag make_dep_flag :: DynFlagMaker -> String -> OptKind (CmdLineP DynFlags) -> String -> (Deprecation, Flag (CmdLineP DynFlags)) make_dep_flag fm name kind message = (Deprecated, fm name $ add_dep_message kind message) add_dep_message :: OptKind (CmdLineP DynFlags) -> String -> OptKind (CmdLineP DynFlags) add_dep_message (NoArg f) message = NoArg $ f >> deprecate message add_dep_message (HasArg f) message = HasArg $ \s -> f s >> deprecate message add_dep_message (SepArg f) message = SepArg $ \s -> f s >> deprecate message add_dep_message (Prefix f) message = Prefix $ \s -> f s >> deprecate message add_dep_message (OptPrefix f) message = OptPrefix $ \s -> f s >> deprecate message add_dep_message (OptIntSuffix f) message = OptIntSuffix $ \oi -> f oi >> deprecate message add_dep_message (IntSuffix f) message = IntSuffix $ \i -> f i >> deprecate message add_dep_message (Word64Suffix f) message = Word64Suffix $ \i -> f i >> deprecate message add_dep_message (FloatSuffix f) message = FloatSuffix $ \fl -> f fl >> deprecate message add_dep_message (PassFlag f) message = PassFlag $ \s -> f s >> deprecate message add_dep_message (AnySuffix f) message = AnySuffix $ \s -> f s >> deprecate message ----------------------- The main flags themselves ------------------------------ -- See Note [Updating flag description in the User's Guide] -- See Note [Supporting CLI completion] dynamic_flags_deps :: [(Deprecation, Flag (CmdLineP DynFlags))] dynamic_flags_deps = [ make_dep_flag defFlag "n" (NoArg $ return ()) "The -n flag is deprecated and no longer has any effect" , make_ord_flag defFlag "cpp" (NoArg (setExtensionFlag LangExt.Cpp)) , make_ord_flag defFlag "F" (NoArg (setGeneralFlag Opt_Pp)) , (Deprecated, defFlag "#include" (HasArg (\_s -> deprecate ("-#include and INCLUDE pragmas are " ++ "deprecated: They no longer have any effect")))) , make_ord_flag defFlag "v" (OptIntSuffix setVerbosity) , make_ord_flag defGhcFlag "j" (OptIntSuffix (\n -> case n of Just n | n > 0 -> upd (\d -> d { parMakeCount = Just (ParMakeThisMany n) }) | otherwise -> addErr "Syntax: -j[n] where n > 0" Nothing -> upd (\d -> d { parMakeCount = Just ParMakeNumProcessors }))) -- When the number of parallel builds -- is omitted, it is the same -- as specifying that the number of -- parallel builds is equal to the -- result of getNumProcessors , make_ord_flag defGhcFlag "jsem" $ hasArg $ \f d -> d { parMakeCount = Just (ParMakeSemaphore f) } , make_ord_flag defFlag "instantiated-with" (sepArg setUnitInstantiations) , make_ord_flag defFlag "this-component-id" (sepArg setUnitInstanceOf) -- RTS options ------------------------------------------------------------- , make_ord_flag defFlag "H" (HasArg (\s -> upd (\d -> d { ghcHeapSize = Just $ fromIntegral (decodeSize s)}))) , make_ord_flag defFlag "Rghc-timing" (NoArg (upd (\d -> d { enableTimeStats = True }))) ------- ways --------------------------------------------------------------- , make_ord_flag defGhcFlag "prof" (NoArg (addWayDynP WayProf)) , (Deprecated, defFlag "eventlog" $ noArgM $ \d -> do deprecate "the eventlog is now enabled in all runtime system ways" return d) , make_ord_flag defGhcFlag "debug" (NoArg (addWayDynP WayDebug)) , make_ord_flag defGhcFlag "threaded" (NoArg (addWayDynP WayThreaded)) , make_ord_flag defGhcFlag "single-threaded" (NoArg (removeWayDynP WayThreaded)) , make_ord_flag defGhcFlag "ticky" (NoArg (setGeneralFlag Opt_Ticky >> addWayDynP WayDebug)) -- -ticky enables ticky-ticky code generation, and also implies -debug which -- is required to get the RTS ticky support. ----- Linker -------------------------------------------------------- , make_ord_flag defGhcFlag "static" (NoArg (removeWayDynP WayDyn)) , make_ord_flag defGhcFlag "dynamic" (NoArg (addWayDynP WayDyn)) , make_ord_flag defGhcFlag "rdynamic" $ noArg $ #if defined(linux_HOST_OS) addOptl "-rdynamic" #elif defined(mingw32_HOST_OS) addOptl "-Wl,--export-all-symbols" #else -- ignored for compat w/ gcc: id #endif , make_ord_flag defGhcFlag "relative-dynlib-paths" (NoArg (setGeneralFlag Opt_RelativeDynlibPaths)) , make_ord_flag defGhcFlag "copy-libs-when-linking" (NoArg (setGeneralFlag Opt_SingleLibFolder)) , make_ord_flag defGhcFlag "pie" (NoArg (setGeneralFlag Opt_PICExecutable)) , make_ord_flag defGhcFlag "no-pie" (NoArg (unSetGeneralFlag Opt_PICExecutable)) ------- Specific phases -------------------------------------------- -- need to appear before -pgmL to be parsed as LLVM flags. , make_ord_flag defFlag "pgmlo" $ hasArg $ \f -> alterToolSettings $ \s -> s { toolSettings_pgm_lo = (f,[]) } , make_ord_flag defFlag "pgmlc" $ hasArg $ \f -> alterToolSettings $ \s -> s { toolSettings_pgm_lc = (f,[]) } , make_ord_flag defFlag "pgmlas" $ hasArg $ \f -> alterToolSettings $ \s -> s { toolSettings_pgm_las = (f,[]) } , make_ord_flag defFlag "pgmlm" $ hasArg $ \f -> alterToolSettings $ \s -> s { toolSettings_pgm_lm = if null f then Nothing else Just (f,[]) } , make_ord_flag defFlag "pgmi" $ hasArg $ \f -> alterToolSettings $ \s -> s { toolSettings_pgm_i = f } , make_ord_flag defFlag "pgmL" $ hasArg $ \f -> alterToolSettings $ \s -> s { toolSettings_pgm_L = f } , make_ord_flag defFlag "pgmP" (hasArg setPgmP) , make_ord_flag defFlag "pgmJSP" (hasArg setPgmJSP) , make_ord_flag defFlag "pgmCmmP" (hasArg setPgmCmmP) , make_ord_flag defFlag "pgmF" $ hasArg $ \f -> alterToolSettings $ \s -> s { toolSettings_pgm_F = f } , make_ord_flag defFlag "pgmc" $ hasArg $ \f -> alterToolSettings $ \s -> s { toolSettings_pgm_c = f } , make_ord_flag defFlag "pgmcxx" $ hasArg $ \f -> alterToolSettings $ \s -> s { toolSettings_pgm_cxx = f } , (Deprecated, defFlag "pgmc-supports-no-pie" $ noArgM $ \d -> do deprecate $ "use -pgml-supports-no-pie instead" pure $ alterToolSettings (\s -> s { toolSettings_ccSupportsNoPie = True }) d) , make_ord_flag defFlag "pgms" (HasArg (\_ -> addWarn "Object splitting was removed in GHC 8.8")) , make_ord_flag defFlag "pgma" $ hasArg $ \f -> alterToolSettings $ \s -> s { toolSettings_pgm_a = (f,[]) } , make_ord_flag defFlag "pgml" $ hasArg $ \f -> alterToolSettings $ \s -> s { toolSettings_pgm_l = (f,[]) , -- Don't pass -no-pie with custom -pgml (see #15319). Note -- that this could break when -no-pie is actually needed. -- But the CC_SUPPORTS_NO_PIE check only happens at -- buildtime, and -pgml is a runtime option. A better -- solution would be running this check for each custom -- -pgml. toolSettings_ccSupportsNoPie = False } , make_ord_flag defFlag "pgml-supports-no-pie" $ noArg $ alterToolSettings $ \s -> s { toolSettings_ccSupportsNoPie = True } , make_ord_flag defFlag "pgmwindres" $ hasArg $ \f -> alterToolSettings $ \s -> s { toolSettings_pgm_windres = f } , make_ord_flag defFlag "pgmar" $ hasArg $ \f -> alterToolSettings $ \s -> s { toolSettings_pgm_ar = f } , make_ord_flag defFlag "pgmotool" $ hasArg $ \f -> alterToolSettings $ \s -> s { toolSettings_pgm_otool = f} , make_ord_flag defFlag "pgminstall_name_tool" $ hasArg $ \f -> alterToolSettings $ \s -> s { toolSettings_pgm_install_name_tool = f} , make_ord_flag defFlag "pgmranlib" $ hasArg $ \f -> alterToolSettings $ \s -> s { toolSettings_pgm_ranlib = f } -- need to appear before -optl/-opta to be parsed as LLVM flags. , make_ord_flag defFlag "optlm" $ hasArg $ \f -> alterToolSettings $ \s -> s { toolSettings_opt_lm = f : toolSettings_opt_lm s } , make_ord_flag defFlag "optlo" $ hasArg $ \f -> alterToolSettings $ \s -> s { toolSettings_opt_lo = f : toolSettings_opt_lo s } , make_ord_flag defFlag "optlc" $ hasArg $ \f -> alterToolSettings $ \s -> s { toolSettings_opt_lc = f : toolSettings_opt_lc s } , make_ord_flag defFlag "optlas" $ hasArg $ \f -> alterToolSettings $ \s -> s { toolSettings_opt_las = f : toolSettings_opt_las s } , make_ord_flag defFlag "opti" $ hasArg $ \f -> alterToolSettings $ \s -> s { toolSettings_opt_i = f : toolSettings_opt_i s } , make_ord_flag defFlag "optL" $ hasArg $ \f -> alterToolSettings $ \s -> s { toolSettings_opt_L = f : toolSettings_opt_L s } , make_ord_flag defFlag "optP" (hasArg addOptP) , make_ord_flag defFlag "optJSP" (hasArg addOptJSP) , make_ord_flag defFlag "optCmmP" (hasArg addOptCmmP) , make_ord_flag defFlag "optF" $ hasArg $ \f -> alterToolSettings $ \s -> s { toolSettings_opt_F = f : toolSettings_opt_F s } , make_ord_flag defFlag "optc" (hasArg addOptc) , make_ord_flag defFlag "optcxx" (hasArg addOptcxx) , make_ord_flag defFlag "opta" $ hasArg $ \f -> alterToolSettings $ \s -> s { toolSettings_opt_a = f : toolSettings_opt_a s } , make_ord_flag defFlag "optl" (hasArg addOptl) , make_ord_flag defFlag "optwindres" $ hasArg $ \f -> alterToolSettings $ \s -> s { toolSettings_opt_windres = f : toolSettings_opt_windres s } -- N.B. We may someday deprecate this in favor of -fsplit-sections, -- which has the benefit of also having a negating -fno-split-sections. , make_ord_flag defGhcFlag "split-sections" (NoArg $ setGeneralFlag Opt_SplitSections) -------- ghc -M ----------------------------------------------------- , make_ord_flag defGhcFlag "dep-suffix" (hasArg addDepSuffix) , make_ord_flag defGhcFlag "dep-makefile" (hasArg setDepMakefile) , make_ord_flag defGhcFlag "include-cpp-deps" (noArg (setDepIncludeCppDeps True)) , make_ord_flag defGhcFlag "include-pkg-deps" (noArg (setDepIncludePkgDeps True)) , make_ord_flag defGhcFlag "exclude-module" (hasArg addDepExcludeMod) -------- Linking ---------------------------------------------------- , make_ord_flag defGhcFlag "no-link" (noArg (\d -> d { ghcLink=NoLink })) , make_ord_flag defGhcFlag "shared" (noArg (\d -> d { ghcLink=LinkDynLib })) , make_ord_flag defGhcFlag "staticlib" (noArg (\d -> setGeneralFlag' Opt_LinkRts (d { ghcLink=LinkStaticLib }))) , make_ord_flag defGhcFlag "-merge-objs" (noArg (\d -> d { ghcLink=LinkMergedObj })) , make_ord_flag defGhcFlag "dynload" (hasArg parseDynLibLoaderMode) , make_ord_flag defGhcFlag "dylib-install-name" (hasArg setDylibInstallName) ------- Libraries --------------------------------------------------- , make_ord_flag defFlag "L" (Prefix addLibraryPath) , make_ord_flag defFlag "l" (hasArg (addLdInputs . Option . ("-l" ++))) ------- Frameworks -------------------------------------------------- -- -framework-path should really be -F ... , make_ord_flag defFlag "framework-path" (HasArg addFrameworkPath) , make_ord_flag defFlag "framework" (hasArg addCmdlineFramework) ------- Output Redirection ------------------------------------------ , make_ord_flag defGhcFlag "odir" (hasArg setObjectDir) , make_ord_flag defGhcFlag "o" (sepArg (setOutputFile . Just)) , make_ord_flag defGhcFlag "dyno" (sepArg (setDynOutputFile . Just)) , make_ord_flag defGhcFlag "ohi" (hasArg (setOutputHi . Just )) , make_ord_flag defGhcFlag "dynohi" (hasArg (setDynOutputHi . Just )) , make_ord_flag defGhcFlag "osuf" (hasArg setObjectSuf) , make_ord_flag defGhcFlag "dynosuf" (hasArg setDynObjectSuf) , make_ord_flag defGhcFlag "hcsuf" (hasArg setHcSuf) , make_ord_flag defGhcFlag "hisuf" (hasArg setHiSuf) , make_ord_flag defGhcFlag "hiesuf" (hasArg setHieSuf) , make_ord_flag defGhcFlag "dynhisuf" (hasArg setDynHiSuf) , make_ord_flag defGhcFlag "hidir" (hasArg setHiDir) , make_ord_flag defGhcFlag "hiedir" (hasArg setHieDir) , make_ord_flag defGhcFlag "tmpdir" (hasArg setTmpDir) , make_ord_flag defGhcFlag "stubdir" (hasArg setStubDir) , make_ord_flag defGhcFlag "dumpdir" (hasArg setDumpDir) , make_ord_flag defGhcFlag "outputdir" (hasArg setOutputDir) , make_ord_flag defGhcFlag "ddump-file-prefix" (hasArg (setDumpPrefixForce . Just . flip (++) ".")) , make_ord_flag defGhcFlag "dynamic-too" (NoArg (setGeneralFlag Opt_BuildDynamicToo)) ------- Keeping temporary files ------------------------------------- -- These can be singular (think ghc -c) or plural (think ghc --make) , make_ord_flag defGhcFlag "keep-hc-file" (NoArg (setGeneralFlag Opt_KeepHcFiles)) , make_ord_flag defGhcFlag "keep-hc-files" (NoArg (setGeneralFlag Opt_KeepHcFiles)) , make_ord_flag defGhcFlag "keep-hscpp-file" (NoArg (setGeneralFlag Opt_KeepHscppFiles)) , make_ord_flag defGhcFlag "keep-hscpp-files" (NoArg (setGeneralFlag Opt_KeepHscppFiles)) , make_ord_flag defGhcFlag "keep-s-file" (NoArg (setGeneralFlag Opt_KeepSFiles)) , make_ord_flag defGhcFlag "keep-s-files" (NoArg (setGeneralFlag Opt_KeepSFiles)) , make_ord_flag defGhcFlag "keep-llvm-file" (NoArg $ setObjBackend llvmBackend >> setGeneralFlag Opt_KeepLlvmFiles) , make_ord_flag defGhcFlag "keep-llvm-files" (NoArg $ setObjBackend llvmBackend >> setGeneralFlag Opt_KeepLlvmFiles) -- This only makes sense as plural , make_ord_flag defGhcFlag "keep-tmp-files" (NoArg (setGeneralFlag Opt_KeepTmpFiles)) , make_ord_flag defGhcFlag "keep-hi-file" (NoArg (setGeneralFlag Opt_KeepHiFiles)) , make_ord_flag defGhcFlag "no-keep-hi-file" (NoArg (unSetGeneralFlag Opt_KeepHiFiles)) , make_ord_flag defGhcFlag "keep-hi-files" (NoArg (setGeneralFlag Opt_KeepHiFiles)) , make_ord_flag defGhcFlag "no-keep-hi-files" (NoArg (unSetGeneralFlag Opt_KeepHiFiles)) , make_ord_flag defGhcFlag "keep-o-file" (NoArg (setGeneralFlag Opt_KeepOFiles)) , make_ord_flag defGhcFlag "no-keep-o-file" (NoArg (unSetGeneralFlag Opt_KeepOFiles)) , make_ord_flag defGhcFlag "keep-o-files" (NoArg (setGeneralFlag Opt_KeepOFiles)) , make_ord_flag defGhcFlag "no-keep-o-files" (NoArg (unSetGeneralFlag Opt_KeepOFiles)) ------- Miscellaneous ---------------------------------------------- , make_ord_flag defGhcFlag "no-auto-link-packages" (NoArg (unSetGeneralFlag Opt_AutoLinkPackages)) , make_ord_flag defGhcFlag "no-hs-main" (NoArg (setGeneralFlag Opt_NoHsMain)) , make_ord_flag defGhcFlag "fno-state-hack" (NoArg (setGeneralFlag Opt_G_NoStateHack)) , make_ord_flag defGhcFlag "fno-opt-coercion" (NoArg (setGeneralFlag Opt_G_NoOptCoercion)) , make_ord_flag defGhcFlag "with-rtsopts" (HasArg setRtsOpts) , make_ord_flag defGhcFlag "rtsopts" (NoArg (setRtsOptsEnabled RtsOptsAll)) , make_ord_flag defGhcFlag "rtsopts=all" (NoArg (setRtsOptsEnabled RtsOptsAll)) , make_ord_flag defGhcFlag "rtsopts=some" (NoArg (setRtsOptsEnabled RtsOptsSafeOnly)) , make_ord_flag defGhcFlag "rtsopts=none" (NoArg (setRtsOptsEnabled RtsOptsNone)) , make_ord_flag defGhcFlag "rtsopts=ignore" (NoArg (setRtsOptsEnabled RtsOptsIgnore)) , make_ord_flag defGhcFlag "rtsopts=ignoreAll" (NoArg (setRtsOptsEnabled RtsOptsIgnoreAll)) , make_ord_flag defGhcFlag "no-rtsopts" (NoArg (setRtsOptsEnabled RtsOptsNone)) , make_ord_flag defGhcFlag "no-rtsopts-suggestions" (noArg (\d -> d {rtsOptsSuggestions = False})) , make_ord_flag defGhcFlag "dhex-word-literals" (NoArg (setGeneralFlag Opt_HexWordLiterals)) , make_ord_flag defGhcFlag "ghcversion-file" (hasArg addGhcVersionFile) , make_ord_flag defGhcFlag "main-is" (SepArg setMainIs) , make_ord_flag defGhcFlag "haddock" (NoArg (setGeneralFlag Opt_Haddock)) , make_ord_flag defGhcFlag "no-haddock" (NoArg (unSetGeneralFlag Opt_Haddock)) , make_ord_flag defGhcFlag "haddock-opts" (hasArg addHaddockOpts) , make_ord_flag defGhcFlag "hpcdir" (SepArg setOptHpcDir) , make_ord_flag defGhciFlag "ghci-script" (hasArg addGhciScript) , make_ord_flag defGhciFlag "interactive-print" (hasArg setInteractivePrint) , make_ord_flag defGhcFlag "ticky-allocd" (NoArg (setGeneralFlag Opt_Ticky_Allocd)) , make_ord_flag defGhcFlag "ticky-LNE" (NoArg (setGeneralFlag Opt_Ticky_LNE)) , make_ord_flag defGhcFlag "ticky-ap-thunk" (NoArg (setGeneralFlag Opt_Ticky_AP)) , make_ord_flag defGhcFlag "ticky-dyn-thunk" (NoArg (setGeneralFlag Opt_Ticky_Dyn_Thunk)) , make_ord_flag defGhcFlag "ticky-tag-checks" (NoArg (setGeneralFlag Opt_Ticky_Tag)) ------- recompilation checker -------------------------------------- , make_dep_flag defGhcFlag "recomp" (NoArg $ unSetGeneralFlag Opt_ForceRecomp) "Use -fno-force-recomp instead" , make_dep_flag defGhcFlag "no-recomp" (NoArg $ setGeneralFlag Opt_ForceRecomp) "Use -fforce-recomp instead" , make_ord_flag defFlag "fmax-errors" (intSuffix (\n d -> d { maxErrors = Just (max 1 n) })) , make_ord_flag defFlag "fno-max-errors" (noArg (\d -> d { maxErrors = Nothing })) , make_ord_flag defFlag "freverse-errors" (noArg (\d -> d {reverseErrors = True} )) , make_ord_flag defFlag "fno-reverse-errors" (noArg (\d -> d {reverseErrors = False} )) ------ HsCpp opts --------------------------------------------------- , make_ord_flag defFlag "D" (AnySuffix (upd . addOptP)) , make_ord_flag defFlag "U" (AnySuffix (upd . addOptP)) ------- Include/Import Paths ---------------------------------------- , make_ord_flag defFlag "I" (Prefix addIncludePath) , make_ord_flag defFlag "i" (OptPrefix addImportPath) ------ Output style options ----------------------------------------- , make_ord_flag defFlag "dppr-user-length" (intSuffix (\n d -> d { pprUserLength = n })) , make_ord_flag defFlag "dppr-cols" (intSuffix (\n d -> d { pprCols = n })) , make_ord_flag defFlag "fdiagnostics-color=auto" (NoArg (upd (\d -> d { useColor = Auto }))) , make_ord_flag defFlag "fdiagnostics-color=always" (NoArg (upd (\d -> d { useColor = Always }))) , make_ord_flag defFlag "fdiagnostics-color=never" (NoArg (upd (\d -> d { useColor = Never }))) , make_ord_flag defFlag "fprint-error-index-links=auto" (NoArg (upd (\d -> d { useErrorLinks = Auto }))) , make_ord_flag defFlag "fprint-error-index-links=always" (NoArg (upd (\d -> d { useErrorLinks = Always }))) , make_ord_flag defFlag "fprint-error-index-links=never" (NoArg (upd (\d -> d { useErrorLinks = Never }))) -- Suppress all that is suppressible in core dumps. -- Except for uniques, as some simplifier phases introduce new variables that -- have otherwise identical names. , make_ord_flag defGhcFlag "dsuppress-all" (NoArg $ do setGeneralFlag Opt_SuppressCoercions setGeneralFlag Opt_SuppressCoercionTypes setGeneralFlag Opt_SuppressVarKinds setGeneralFlag Opt_SuppressModulePrefixes setGeneralFlag Opt_SuppressTypeApplications setGeneralFlag Opt_SuppressIdInfo setGeneralFlag Opt_SuppressTicks setGeneralFlag Opt_SuppressStgExts setGeneralFlag Opt_SuppressStgReps setGeneralFlag Opt_SuppressTypeSignatures setGeneralFlag Opt_SuppressCoreSizes setGeneralFlag Opt_SuppressTimestamps) ------ Debugging ---------------------------------------------------- , make_ord_flag defGhcFlag "dstg-stats" (NoArg (setGeneralFlag Opt_StgStats)) , make_ord_flag defGhcFlag "ddump-cmm" (setDumpFlag Opt_D_dump_cmm) , make_ord_flag defGhcFlag "ddump-cmm-from-stg" (setDumpFlag Opt_D_dump_cmm_from_stg) , make_ord_flag defGhcFlag "ddump-cmm-raw" (setDumpFlag Opt_D_dump_cmm_raw) , make_ord_flag defGhcFlag "ddump-cmm-verbose" (setDumpFlag Opt_D_dump_cmm_verbose) , make_ord_flag defGhcFlag "ddump-cmm-verbose-by-proc" (setDumpFlag Opt_D_dump_cmm_verbose_by_proc) , make_ord_flag defGhcFlag "ddump-cmm-cfg" (setDumpFlag Opt_D_dump_cmm_cfg) , make_ord_flag defGhcFlag "ddump-cmm-cbe" (setDumpFlag Opt_D_dump_cmm_cbe) , make_ord_flag defGhcFlag "ddump-cmm-switch" (setDumpFlag Opt_D_dump_cmm_switch) , make_ord_flag defGhcFlag "ddump-cmm-proc" (setDumpFlag Opt_D_dump_cmm_proc) , make_ord_flag defGhcFlag "ddump-cmm-sp" (setDumpFlag Opt_D_dump_cmm_sp) , make_ord_flag defGhcFlag "ddump-cmm-sink" (setDumpFlag Opt_D_dump_cmm_sink) , make_ord_flag defGhcFlag "ddump-cmm-caf" (setDumpFlag Opt_D_dump_cmm_caf) , make_ord_flag defGhcFlag "ddump-cmm-procmap" (setDumpFlag Opt_D_dump_cmm_procmap) , make_ord_flag defGhcFlag "ddump-cmm-split" (setDumpFlag Opt_D_dump_cmm_split) , make_ord_flag defGhcFlag "ddump-cmm-info" (setDumpFlag Opt_D_dump_cmm_info) , make_ord_flag defGhcFlag "ddump-cmm-cps" (setDumpFlag Opt_D_dump_cmm_cps) , make_ord_flag defGhcFlag "ddump-cmm-opt" (setDumpFlag Opt_D_dump_opt_cmm) , make_ord_flag defGhcFlag "ddump-cmm-thread-sanitizer" (setDumpFlag Opt_D_dump_cmm_thread_sanitizer) , make_ord_flag defGhcFlag "ddump-cfg-weights" (setDumpFlag Opt_D_dump_cfg_weights) , make_ord_flag defGhcFlag "ddump-core-stats" (setDumpFlag Opt_D_dump_core_stats) , make_ord_flag defGhcFlag "ddump-asm" (setDumpFlag Opt_D_dump_asm) , make_ord_flag defGhcFlag "ddump-js" (setDumpFlag Opt_D_dump_js) , make_ord_flag defGhcFlag "ddump-asm-native" (setDumpFlag Opt_D_dump_asm_native) , make_ord_flag defGhcFlag "ddump-asm-liveness" (setDumpFlag Opt_D_dump_asm_liveness) , make_ord_flag defGhcFlag "ddump-asm-regalloc" (setDumpFlag Opt_D_dump_asm_regalloc) , make_ord_flag defGhcFlag "ddump-asm-conflicts" (setDumpFlag Opt_D_dump_asm_conflicts) , make_ord_flag defGhcFlag "ddump-asm-regalloc-stages" (setDumpFlag Opt_D_dump_asm_regalloc_stages) , make_ord_flag defGhcFlag "ddump-asm-stats" (setDumpFlag Opt_D_dump_asm_stats) , make_ord_flag defGhcFlag "ddump-llvm" (NoArg $ setDumpFlag' Opt_D_dump_llvm) , make_ord_flag defGhcFlag "ddump-c-backend" (NoArg $ setDumpFlag' Opt_D_dump_c_backend) , make_ord_flag defGhcFlag "ddump-deriv" (setDumpFlag Opt_D_dump_deriv) , make_ord_flag defGhcFlag "ddump-ds" (setDumpFlag Opt_D_dump_ds) , make_ord_flag defGhcFlag "ddump-ds-preopt" (setDumpFlag Opt_D_dump_ds_preopt) , make_ord_flag defGhcFlag "ddump-foreign" (setDumpFlag Opt_D_dump_foreign) , make_ord_flag defGhcFlag "ddump-inlinings" (setDumpFlag Opt_D_dump_inlinings) , make_ord_flag defGhcFlag "ddump-verbose-inlinings" (setDumpFlag Opt_D_dump_verbose_inlinings) , make_ord_flag defGhcFlag "ddump-rule-firings" (setDumpFlag Opt_D_dump_rule_firings) , make_ord_flag defGhcFlag "ddump-rule-rewrites" (setDumpFlag Opt_D_dump_rule_rewrites) , make_ord_flag defGhcFlag "ddump-simpl-trace" (setDumpFlag Opt_D_dump_simpl_trace) , make_ord_flag defGhcFlag "ddump-occur-anal" (setDumpFlag Opt_D_dump_occur_anal) , make_ord_flag defGhcFlag "ddump-parsed" (setDumpFlag Opt_D_dump_parsed) , make_ord_flag defGhcFlag "ddump-parsed-ast" (setDumpFlag Opt_D_dump_parsed_ast) , make_ord_flag defGhcFlag "dkeep-comments" (NoArg (setGeneralFlag Opt_KeepRawTokenStream)) , make_ord_flag defGhcFlag "ddump-rn" (setDumpFlag Opt_D_dump_rn) , make_ord_flag defGhcFlag "ddump-rn-ast" (setDumpFlag Opt_D_dump_rn_ast) , make_ord_flag defGhcFlag "ddump-simpl" (setDumpFlag Opt_D_dump_simpl) , make_ord_flag defGhcFlag "ddump-simpl-iterations" (setDumpFlag Opt_D_dump_simpl_iterations) , make_ord_flag defGhcFlag "ddump-spec" (setDumpFlag Opt_D_dump_spec) , make_ord_flag defGhcFlag "ddump-spec-constr" (setDumpFlag Opt_D_dump_spec_constr) , make_ord_flag defGhcFlag "ddump-prep" (setDumpFlag Opt_D_dump_prep) , make_ord_flag defGhcFlag "ddump-late-cc" (setDumpFlag Opt_D_dump_late_cc) , make_ord_flag defGhcFlag "ddump-stg-from-core" (setDumpFlag Opt_D_dump_stg_from_core) , make_ord_flag defGhcFlag "ddump-stg-unarised" (setDumpFlag Opt_D_dump_stg_unarised) , make_ord_flag defGhcFlag "ddump-stg-final" (setDumpFlag Opt_D_dump_stg_final) , make_ord_flag defGhcFlag "ddump-stg-cg" (setDumpFlag Opt_D_dump_stg_cg) , make_dep_flag defGhcFlag "ddump-stg" (setDumpFlag Opt_D_dump_stg_from_core) "Use `-ddump-stg-from-core` or `-ddump-stg-final` instead" , make_ord_flag defGhcFlag "ddump-stg-tags" (setDumpFlag Opt_D_dump_stg_tags) , make_ord_flag defGhcFlag "ddump-call-arity" (setDumpFlag Opt_D_dump_call_arity) , make_ord_flag defGhcFlag "ddump-exitify" (setDumpFlag Opt_D_dump_exitify) , make_dep_flag defGhcFlag "ddump-stranal" (setDumpFlag Opt_D_dump_dmdanal) "Use `-ddump-dmdanal` instead" , make_dep_flag defGhcFlag "ddump-str-signatures" (setDumpFlag Opt_D_dump_dmd_signatures) "Use `-ddump-dmd-signatures` instead" , make_ord_flag defGhcFlag "ddump-dmdanal" (setDumpFlag Opt_D_dump_dmdanal) , make_ord_flag defGhcFlag "ddump-dmd-signatures" (setDumpFlag Opt_D_dump_dmd_signatures) , make_ord_flag defGhcFlag "ddump-cpranal" (setDumpFlag Opt_D_dump_cpranal) , make_ord_flag defGhcFlag "ddump-cpr-signatures" (setDumpFlag Opt_D_dump_cpr_signatures) , make_ord_flag defGhcFlag "ddump-tc" (setDumpFlag Opt_D_dump_tc) , make_ord_flag defGhcFlag "ddump-tc-ast" (setDumpFlag Opt_D_dump_tc_ast) , make_ord_flag defGhcFlag "ddump-hie" (setDumpFlag Opt_D_dump_hie) , make_ord_flag defGhcFlag "ddump-types" (setDumpFlag Opt_D_dump_types) , make_ord_flag defGhcFlag "ddump-rules" (setDumpFlag Opt_D_dump_rules) , make_ord_flag defGhcFlag "ddump-cse" (setDumpFlag Opt_D_dump_cse) , make_ord_flag defGhcFlag "ddump-float-out" (setDumpFlag Opt_D_dump_float_out) , make_ord_flag defGhcFlag "ddump-full-laziness" (setDumpFlag Opt_D_dump_float_out) , make_ord_flag defGhcFlag "ddump-float-in" (setDumpFlag Opt_D_dump_float_in) , make_ord_flag defGhcFlag "ddump-liberate-case" (setDumpFlag Opt_D_dump_liberate_case) , make_ord_flag defGhcFlag "ddump-static-argument-transformation" (setDumpFlag Opt_D_dump_static_argument_transformation) , make_ord_flag defGhcFlag "ddump-worker-wrapper" (setDumpFlag Opt_D_dump_worker_wrapper) , make_ord_flag defGhcFlag "ddump-rn-trace" (setDumpFlag Opt_D_dump_rn_trace) , make_ord_flag defGhcFlag "ddump-if-trace" (setDumpFlag Opt_D_dump_if_trace) , make_ord_flag defGhcFlag "ddump-cs-trace" (setDumpFlag Opt_D_dump_cs_trace) , make_ord_flag defGhcFlag "ddump-tc-trace" (NoArg (do setDumpFlag' Opt_D_dump_tc_trace setDumpFlag' Opt_D_dump_cs_trace)) , make_ord_flag defGhcFlag "ddump-ec-trace" (setDumpFlag Opt_D_dump_ec_trace) , make_ord_flag defGhcFlag "ddump-splices" (setDumpFlag Opt_D_dump_splices) , make_ord_flag defGhcFlag "dth-dec-file" (setDumpFlag Opt_D_th_dec_file) , make_ord_flag defGhcFlag "ddump-rn-stats" (setDumpFlag Opt_D_dump_rn_stats) , make_ord_flag defGhcFlag "ddump-opt-cmm" --old alias for cmm-opt (setDumpFlag Opt_D_dump_opt_cmm) , make_ord_flag defGhcFlag "ddump-simpl-stats" (setDumpFlag Opt_D_dump_simpl_stats) , make_ord_flag defGhcFlag "ddump-bcos" (setDumpFlag Opt_D_dump_BCOs) , make_ord_flag defGhcFlag "dsource-stats" (setDumpFlag Opt_D_source_stats) , make_ord_flag defGhcFlag "dverbose-core2core" (NoArg $ setVerbosity (Just 2) >> setDumpFlag' Opt_D_verbose_core2core) , make_ord_flag defGhcFlag "dverbose-stg2stg" (setDumpFlag Opt_D_verbose_stg2stg) , make_ord_flag defGhcFlag "ddump-hi" (setDumpFlag Opt_D_dump_hi) , make_ord_flag defGhcFlag "ddump-minimal-imports" (NoArg (setGeneralFlag Opt_D_dump_minimal_imports)) , make_ord_flag defGhcFlag "ddump-hpc" (setDumpFlag Opt_D_dump_ticked) -- back compat , make_ord_flag defGhcFlag "ddump-ticked" (setDumpFlag Opt_D_dump_ticked) , make_ord_flag defGhcFlag "ddump-mod-cycles" (setDumpFlag Opt_D_dump_mod_cycles) , make_ord_flag defGhcFlag "ddump-mod-map" (setDumpFlag Opt_D_dump_mod_map) , make_ord_flag defGhcFlag "ddump-timings" (setDumpFlag Opt_D_dump_timings) , make_ord_flag defGhcFlag "ddump-view-pattern-commoning" (setDumpFlag Opt_D_dump_view_pattern_commoning) , make_ord_flag defGhcFlag "ddump-to-file" (NoArg (setGeneralFlag Opt_DumpToFile)) , make_ord_flag defGhcFlag "ddump-hi-diffs" (setDumpFlag Opt_D_dump_hi_diffs) , make_ord_flag defGhcFlag "ddump-rtti" (setDumpFlag Opt_D_dump_rtti) , make_ord_flag defGhcFlag "dlint" (NoArg enableDLint) , make_ord_flag defGhcFlag "dcore-lint" (NoArg (setGeneralFlag Opt_DoCoreLinting)) , make_ord_flag defGhcFlag "dlinear-core-lint" (NoArg (setGeneralFlag Opt_DoLinearCoreLinting)) , make_ord_flag defGhcFlag "dstg-lint" (NoArg (setGeneralFlag Opt_DoStgLinting)) , make_ord_flag defGhcFlag "dcmm-lint" (NoArg (setGeneralFlag Opt_DoCmmLinting)) , make_ord_flag defGhcFlag "dasm-lint" (NoArg (setGeneralFlag Opt_DoAsmLinting)) , make_ord_flag defGhcFlag "dannot-lint" (NoArg (setGeneralFlag Opt_DoAnnotationLinting)) , make_ord_flag defGhcFlag "dtag-inference-checks" (NoArg (setGeneralFlag Opt_DoTagInferenceChecks)) , make_ord_flag defGhcFlag "dshow-passes" (NoArg $ forceRecompile >> (setVerbosity $ Just 2)) , make_ord_flag defGhcFlag "dipe-stats" (setDumpFlag Opt_D_ipe_stats) , make_ord_flag defGhcFlag "dfaststring-stats" (setDumpFlag Opt_D_faststring_stats) , make_ord_flag defGhcFlag "dno-llvm-mangler" (NoArg (setGeneralFlag Opt_NoLlvmMangler)) -- hidden flag , make_ord_flag defGhcFlag "dno-typeable-binds" (NoArg (setGeneralFlag Opt_NoTypeableBinds)) , make_ord_flag defGhcFlag "ddump-debug" (setDumpFlag Opt_D_dump_debug) , make_dep_flag defGhcFlag "ddump-json" (setDumpFlag Opt_D_dump_json) "Use `-fdiagnostics-as-json` instead" , make_ord_flag defGhcFlag "dppr-debug" (setDumpFlag Opt_D_ppr_debug) , make_ord_flag defGhcFlag "ddebug-output" (noArg (flip dopt_unset Opt_D_no_debug_output)) , make_ord_flag defGhcFlag "dno-debug-output" (setDumpFlag Opt_D_no_debug_output) , make_ord_flag defGhcFlag "ddump-faststrings" (setDumpFlag Opt_D_dump_faststrings) ------ Machine dependent (-m) stuff --------------------------- , make_ord_flag defGhcFlag "msse" (noArg (\d -> d { sseVersion = Just SSE1 })) , make_ord_flag defGhcFlag "msse2" (noArg (\d -> d { sseVersion = Just SSE2 })) , make_ord_flag defGhcFlag "msse3" (noArg (\d -> d { sseVersion = Just SSE3 })) , make_ord_flag defGhcFlag "msse4" (noArg (\d -> d { sseVersion = Just SSE4 })) , make_ord_flag defGhcFlag "msse4.2" (noArg (\d -> d { sseVersion = Just SSE42 })) , make_ord_flag defGhcFlag "mbmi" (noArg (\d -> d { bmiVersion = Just BMI1 })) , make_ord_flag defGhcFlag "mbmi2" (noArg (\d -> d { bmiVersion = Just BMI2 })) , make_ord_flag defGhcFlag "mavx" (noArg (\d -> d { avx = True })) , make_ord_flag defGhcFlag "mavx2" (noArg (\d -> d { avx2 = True })) , make_ord_flag defGhcFlag "mavx512cd" (noArg (\d -> d { avx512cd = True })) , make_ord_flag defGhcFlag "mavx512er" (noArg (\d -> d { avx512er = True })) , make_ord_flag defGhcFlag "mavx512f" (noArg (\d -> d { avx512f = True })) , make_ord_flag defGhcFlag "mavx512pf" (noArg (\d -> d { avx512pf = True })) , make_ord_flag defGhcFlag "mfma" (noArg (\d -> d { fma = True })) ------ Plugin flags ------------------------------------------------ , make_ord_flag defGhcFlag "fplugin-opt" (hasArg addPluginModuleNameOption) , make_ord_flag defGhcFlag "fplugin-trustworthy" (NoArg (setGeneralFlag Opt_PluginTrustworthy)) , make_ord_flag defGhcFlag "fplugin" (hasArg addPluginModuleName) , make_ord_flag defGhcFlag "fclear-plugins" (noArg clearPluginModuleNames) , make_ord_flag defGhcFlag "ffrontend-opt" (hasArg addFrontendPluginOption) , make_ord_flag defGhcFlag "fplugin-library" (hasArg addExternalPlugin) ------ Optimisation flags ------------------------------------------ , make_dep_flag defGhcFlag "Onot" (noArgM $ setOptLevel 0 ) "Use -O0 instead" , make_ord_flag defGhcFlag "O" (optIntSuffixM (\mb_n -> setOptLevel (mb_n `orElse` 1))) -- If the number is missing, use 1 , make_ord_flag defFlag "fbinary-blob-threshold" (intSuffix (\n d -> d { binBlobThreshold = case fromIntegral n of 0 -> Nothing x -> Just x})) , make_ord_flag defFlag "fmax-relevant-binds" (intSuffix (\n d -> d { maxRelevantBinds = Just n })) , make_ord_flag defFlag "fno-max-relevant-binds" (noArg (\d -> d { maxRelevantBinds = Nothing })) , make_ord_flag defFlag "fmax-valid-hole-fits" (intSuffix (\n d -> d { maxValidHoleFits = Just n })) , make_ord_flag defFlag "fno-max-valid-hole-fits" (noArg (\d -> d { maxValidHoleFits = Nothing })) , make_ord_flag defFlag "fmax-refinement-hole-fits" (intSuffix (\n d -> d { maxRefHoleFits = Just n })) , make_ord_flag defFlag "fno-max-refinement-hole-fits" (noArg (\d -> d { maxRefHoleFits = Nothing })) , make_ord_flag defFlag "frefinement-level-hole-fits" (intSuffix (\n d -> d { refLevelHoleFits = Just n })) , make_ord_flag defFlag "fno-refinement-level-hole-fits" (noArg (\d -> d { refLevelHoleFits = Nothing })) , make_ord_flag defFlag "fwrite-if-compression" (intSuffix (\n d -> d { ifCompression = n })) , make_dep_flag defGhcFlag "fllvm-pass-vectors-in-regs" (noArg id) "vectors registers are now passed in registers by default." , make_ord_flag defFlag "fmax-uncovered-patterns" (intSuffix (\n d -> d { maxUncoveredPatterns = n })) , make_ord_flag defFlag "fmax-pmcheck-models" (intSuffix (\n d -> d { maxPmCheckModels = n })) , make_ord_flag defFlag "fsimplifier-phases" (intSuffix (\n d -> d { simplPhases = n })) , make_ord_flag defFlag "fmax-simplifier-iterations" (intSuffix (\n d -> d { maxSimplIterations = n })) , (Deprecated, defFlag "fmax-pmcheck-iterations" (intSuffixM (\_ d -> do { deprecate $ "use -fmax-pmcheck-models instead" ; return d }))) , make_ord_flag defFlag "fsimpl-tick-factor" (intSuffix (\n d -> d { simplTickFactor = n })) , make_ord_flag defFlag "fdmd-unbox-width" (intSuffix (\n d -> d { dmdUnboxWidth = n })) , make_ord_flag defFlag "fspec-constr-threshold" (intSuffix (\n d -> d { specConstrThreshold = Just n })) , make_ord_flag defFlag "fno-spec-constr-threshold" (noArg (\d -> d { specConstrThreshold = Nothing })) , make_ord_flag defFlag "fspec-constr-count" (intSuffix (\n d -> d { specConstrCount = Just n })) , make_ord_flag defFlag "fno-spec-constr-count" (noArg (\d -> d { specConstrCount = Nothing })) , make_ord_flag defFlag "fspec-constr-recursive" (intSuffix (\n d -> d { specConstrRecursive = n })) , make_ord_flag defFlag "fliberate-case-threshold" (intSuffix (\n d -> d { liberateCaseThreshold = Just n })) , make_ord_flag defFlag "fno-liberate-case-threshold" (noArg (\d -> d { liberateCaseThreshold = Nothing })) , make_ord_flag defFlag "drule-check" (sepArg (\s d -> d { ruleCheck = Just s })) , make_ord_flag defFlag "dinline-check" (sepArg (\s d -> d { unfoldingOpts = updateReportPrefix (Just s) (unfoldingOpts d)})) , make_ord_flag defFlag "freduction-depth" (intSuffix (\n d -> d { reductionDepth = treatZeroAsInf n })) , make_ord_flag defFlag "fconstraint-solver-iterations" (intSuffix (\n d -> d { solverIterations = treatZeroAsInf n })) , make_ord_flag defFlag "fgivens-expansion-fuel" (intSuffix (\n d -> d { givensFuel = n })) , make_ord_flag defFlag "fwanteds-expansion-fuel" (intSuffix (\n d -> d { wantedsFuel = n })) , make_ord_flag defFlag "fqcs-expansion-fuel" (intSuffix (\n d -> d { qcsFuel = n })) , (Deprecated, defFlag "fcontext-stack" (intSuffixM (\n d -> do { deprecate $ "use -freduction-depth=" ++ show n ++ " instead" ; return $ d { reductionDepth = treatZeroAsInf n } }))) , (Deprecated, defFlag "ftype-function-depth" (intSuffixM (\n d -> do { deprecate $ "use -freduction-depth=" ++ show n ++ " instead" ; return $ d { reductionDepth = treatZeroAsInf n } }))) , make_ord_flag defFlag "fstrictness-before" (intSuffix (\n d -> d { strictnessBefore = n : strictnessBefore d })) , make_ord_flag defFlag "ffloat-lam-args" (intSuffix (\n d -> d { floatLamArgs = Just n })) , make_ord_flag defFlag "ffloat-all-lams" (noArg (\d -> d { floatLamArgs = Nothing })) , make_ord_flag defFlag "fstg-lift-lams-rec-args" (intSuffix (\n d -> d { liftLamsRecArgs = Just n })) , make_ord_flag defFlag "fstg-lift-lams-rec-args-any" (noArg (\d -> d { liftLamsRecArgs = Nothing })) , make_ord_flag defFlag "fstg-lift-lams-non-rec-args" (intSuffix (\n d -> d { liftLamsNonRecArgs = Just n })) , make_ord_flag defFlag "fstg-lift-lams-non-rec-args-any" (noArg (\d -> d { liftLamsNonRecArgs = Nothing })) , make_ord_flag defFlag "fstg-lift-lams-known" (noArg (\d -> d { liftLamsKnown = True })) , make_ord_flag defFlag "fno-stg-lift-lams-known" (noArg (\d -> d { liftLamsKnown = False })) , make_ord_flag defFlag "fproc-alignment" (intSuffix (\n d -> d { cmmProcAlignment = Just n })) , make_ord_flag defFlag "fblock-layout-weights" (HasArg (\s -> upd (\d -> d { cfgWeights = parseWeights s (cfgWeights d)}))) , make_ord_flag defFlag "fhistory-size" (intSuffix (\n d -> d { historySize = n })) , make_ord_flag defFlag "funfolding-creation-threshold" (intSuffix (\n d -> d { unfoldingOpts = updateCreationThreshold n (unfoldingOpts d)})) , make_ord_flag defFlag "funfolding-use-threshold" (intSuffix (\n d -> d { unfoldingOpts = updateUseThreshold n (unfoldingOpts d)})) , make_ord_flag defFlag "funfolding-fun-discount" (intSuffix (\n d -> d { unfoldingOpts = updateFunAppDiscount n (unfoldingOpts d)})) , make_ord_flag defFlag "funfolding-dict-discount" (intSuffix (\n d -> d { unfoldingOpts = updateDictDiscount n (unfoldingOpts d)})) , make_ord_flag defFlag "funfolding-case-threshold" (intSuffix (\n d -> d { unfoldingOpts = updateCaseThreshold n (unfoldingOpts d)})) , make_ord_flag defFlag "funfolding-case-scaling" (intSuffix (\n d -> d { unfoldingOpts = updateCaseScaling n (unfoldingOpts d)})) , make_dep_flag defFlag "funfolding-keeness-factor" (floatSuffix (\_ d -> d)) "-funfolding-keeness-factor is no longer respected as of GHC 9.0" , make_ord_flag defFlag "fmax-worker-args" (intSuffix (\n d -> d {maxWorkerArgs = n})) , make_ord_flag defFlag "fmax-forced-spec-args" (intSuffix (\n d -> d {maxForcedSpecArgs = n})) , make_ord_flag defGhciFlag "fghci-hist-size" (intSuffix (\n d -> d {ghciHistSize = n})) , make_ord_flag defGhcFlag "fmax-inline-alloc-size" (intSuffix (\n d -> d { maxInlineAllocSize = n })) , make_ord_flag defGhcFlag "fmax-inline-memcpy-insns" (intSuffix (\n d -> d { maxInlineMemcpyInsns = n })) , make_ord_flag defGhcFlag "fmax-inline-memset-insns" (intSuffix (\n d -> d { maxInlineMemsetInsns = n })) , make_ord_flag defGhcFlag "dinitial-unique" (word64Suffix (\n d -> d { initialUnique = n })) , make_ord_flag defGhcFlag "dunique-increment" (intSuffix (\n d -> d { uniqueIncrement = n })) ------ Profiling ---------------------------------------------------- -- OLD profiling flags , make_dep_flag defGhcFlag "auto-all" (noArg (\d -> d { profAuto = ProfAutoAll } )) "Use -fprof-auto instead" , make_dep_flag defGhcFlag "no-auto-all" (noArg (\d -> d { profAuto = NoProfAuto } )) "Use -fno-prof-auto instead" , make_dep_flag defGhcFlag "auto" (noArg (\d -> d { profAuto = ProfAutoExports } )) "Use -fprof-auto-exported instead" , make_dep_flag defGhcFlag "no-auto" (noArg (\d -> d { profAuto = NoProfAuto } )) "Use -fno-prof-auto instead" , make_dep_flag defGhcFlag "caf-all" (NoArg (setGeneralFlag Opt_AutoSccsOnIndividualCafs)) "Use -fprof-cafs instead" , make_dep_flag defGhcFlag "no-caf-all" (NoArg (unSetGeneralFlag Opt_AutoSccsOnIndividualCafs)) "Use -fno-prof-cafs instead" -- NEW profiling flags , make_ord_flag defGhcFlag "fprof-auto" (noArg (\d -> d { profAuto = ProfAutoAll } )) , make_ord_flag defGhcFlag "fprof-auto-top" (noArg (\d -> d { profAuto = ProfAutoTop } )) , make_ord_flag defGhcFlag "fprof-auto-exported" (noArg (\d -> d { profAuto = ProfAutoExports } )) , make_ord_flag defGhcFlag "fprof-auto-calls" (noArg (\d -> d { profAuto = ProfAutoCalls } )) , make_ord_flag defGhcFlag "fno-prof-auto" (noArg (\d -> d { profAuto = NoProfAuto } )) -- Caller-CC , make_ord_flag defGhcFlag "fprof-callers" (HasArg setCallerCcFilters) ------ Compiler flags ----------------------------------------------- , make_ord_flag defGhcFlag "fasm" (NoArg (setObjBackend ncgBackend)) , make_ord_flag defGhcFlag "fvia-c" (NoArg (deprecate $ "The -fvia-c flag does nothing; " ++ "it will be removed in a future GHC release")) , make_ord_flag defGhcFlag "fvia-C" (NoArg (deprecate $ "The -fvia-C flag does nothing; " ++ "it will be removed in a future GHC release")) , make_ord_flag defGhcFlag "fllvm" (NoArg (setObjBackend llvmBackend)) , make_ord_flag defFlag "fno-code" (NoArg ((upd $ \d -> d { ghcLink=NoLink }) >> setBackend noBackend)) , make_ord_flag defFlag "fbyte-code" (noArgM $ \dflags -> do setBackend interpreterBackend pure $ flip gopt_unset Opt_ByteCodeAndObjectCode (gopt_set dflags Opt_ByteCode)) , make_ord_flag defFlag "fobject-code" $ noArgM $ \dflags -> do setBackend $ platformDefaultBackend (targetPlatform dflags) dflags' <- liftEwM getCmdLineState pure $ gopt_unset dflags' Opt_ByteCodeAndObjectCode , make_dep_flag defFlag "fglasgow-exts" (NoArg enableGlasgowExts) "Use individual extensions instead" , make_dep_flag defFlag "fno-glasgow-exts" (NoArg disableGlasgowExts) "Use individual extensions instead" ------ Safe Haskell flags ------------------------------------------- , make_ord_flag defFlag "fpackage-trust" (NoArg setPackageTrust) , make_ord_flag defFlag "fno-safe-infer" (noArg (\d -> d { safeInfer = False })) , make_ord_flag defFlag "fno-safe-haskell" (NoArg (setSafeHaskell Sf_Ignore)) ------ position independent flags ---------------------------------- , make_ord_flag defGhcFlag "fPIC" (NoArg (setGeneralFlag Opt_PIC)) , make_ord_flag defGhcFlag "fno-PIC" (NoArg (unSetGeneralFlag Opt_PIC)) , make_ord_flag defGhcFlag "fPIE" (NoArg (setGeneralFlag Opt_PIE)) , make_ord_flag defGhcFlag "fno-PIE" (NoArg (unSetGeneralFlag Opt_PIE)) ------ Debugging flags ---------------------------------------------- , make_ord_flag defGhcFlag "g" (OptIntSuffix setDebugLevel) ] ++ map (mkFlag turnOn "" setGeneralFlag ) negatableFlagsDeps ++ map (mkFlag turnOff "no-" unSetGeneralFlag ) negatableFlagsDeps ++ map (mkFlag turnOn "d" setGeneralFlag ) dFlagsDeps ++ map (mkFlag turnOff "dno-" unSetGeneralFlag ) dFlagsDeps ++ map (mkFlag turnOn "f" setGeneralFlag ) fFlagsDeps ++ map (mkFlag turnOff "fno-" unSetGeneralFlag ) fFlagsDeps ++ ------ Warning flags ------------------------------------------------- [ make_ord_flag defFlag "W" (NoArg (setWarningGroup W_extra)) , make_ord_flag defFlag "Werror" (NoArg (do { setGeneralFlag Opt_WarnIsError ; setFatalWarningGroup W_everything })) , make_ord_flag defFlag "Wwarn" (NoArg (do { unSetGeneralFlag Opt_WarnIsError ; unSetFatalWarningGroup W_everything })) -- Opt_WarnIsError is still needed to pass -Werror -- to CPP; see runCpp in SysTools , make_dep_flag defFlag "Wnot" (NoArg (unSetWarningGroup W_everything)) "Use -w or -Wno-everything instead" , make_ord_flag defFlag "w" (NoArg (unSetWarningGroup W_everything)) ] -- New-style uniform warning sets -- -- Note that -Weverything > -Wall > -Wextra > -Wdefault > -Wno-everything ++ warningControls setWarningGroup unSetWarningGroup setWErrorWarningGroup unSetFatalWarningGroup warningGroupsDeps ++ warningControls setWarningFlag unSetWarningFlag setWErrorFlag unSetFatalWarningFlag wWarningFlagsDeps ++ warningControls setCustomWarningFlag unSetCustomWarningFlag setCustomWErrorFlag unSetCustomFatalWarningFlag [(NotDeprecated, FlagSpec "warnings-deprecations" defaultWarningCategory nop AllModes)] -- See Note [Warning categories] in GHC.Unit.Module.Warnings. ++ [ (NotDeprecated, customOrUnrecognisedWarning "Wno-" unSetCustomWarningFlag) , (NotDeprecated, customOrUnrecognisedWarning "Werror=" setCustomWErrorFlag) , (NotDeprecated, customOrUnrecognisedWarning "Wwarn=" unSetCustomFatalWarningFlag) , (NotDeprecated, customOrUnrecognisedWarning "Wno-error=" unSetCustomFatalWarningFlag) , (NotDeprecated, customOrUnrecognisedWarning "W" setCustomWarningFlag) , (Deprecated, customOrUnrecognisedWarning "fwarn-" setCustomWarningFlag) , (Deprecated, customOrUnrecognisedWarning "fno-warn-" unSetCustomWarningFlag) ] ------ JavaScript flags ----------------------------------------------- ++ [ make_ord_flag defFlag "ddisable-js-minifier" (NoArg (setGeneralFlag Opt_DisableJsMinifier)) , make_ord_flag defFlag "ddisable-js-c-sources" (NoArg (setGeneralFlag Opt_DisableJsCsources)) ] ------ Language flags ------------------------------------------------- ++ map (mkFlag turnOn "f" setExtensionFlag ) fLangFlagsDeps ++ map (mkFlag turnOff "fno-" unSetExtensionFlag) fLangFlagsDeps ++ map (mkFlag turnOn "X" setExtensionFlag ) xFlagsDeps ++ map (mkFlag turnOff "XNo" unSetExtensionFlag) xFlagsDeps ++ map (mkFlag turnOn "X" setLanguage ) languageFlagsDeps ++ map (mkFlag turnOn "X" setSafeHaskell ) safeHaskellFlagsDeps -- | Warnings have both new-style flags to control their state (@-W@, @-Wno-@, -- @-Werror=@, @-Wwarn=@) and old-style flags (@-fwarn-@, @-fno-warn-@). We -- define these uniformly for individual warning flags and groups of warnings. warningControls :: (warn_flag -> DynP ()) -- ^ Set the warning -> (warn_flag -> DynP ()) -- ^ Unset the warning -> (warn_flag -> DynP ()) -- ^ Make the warning an error -> (warn_flag -> DynP ()) -- ^ Clear the error status -> [(Deprecation, FlagSpec warn_flag)] -> [(Deprecation, Flag (CmdLineP DynFlags))] warningControls set unset set_werror unset_fatal xs = map (mkFlag turnOn "W" set ) xs ++ map (mkFlag turnOff "Wno-" unset ) xs ++ map (mkFlag turnOn "Werror=" set_werror ) xs ++ map (mkFlag turnOn "Wwarn=" unset_fatal ) xs ++ map (mkFlag turnOn "Wno-error=" unset_fatal ) xs ++ map (mkFlag turnOn "fwarn-" set . hideFlag) xs ++ map (mkFlag turnOff "fno-warn-" unset . hideFlag) xs -- | This is where we handle unrecognised warning flags. If the flag is valid as -- an extended warning category, we call the supplied action. Otherwise, issue a -- warning if -Wunrecognised-warning-flags is set. See #11429 for context. -- See Note [Warning categories] in GHC.Unit.Module.Warnings. customOrUnrecognisedWarning :: String -> (WarningCategory -> DynP ()) -> Flag (CmdLineP DynFlags) customOrUnrecognisedWarning prefix custom = defHiddenFlag prefix (Prefix action) where action :: String -> DynP () action flag | validWarningCategory cat = custom cat | otherwise = unrecognised flag where cat = mkWarningCategory (mkFastString flag) unrecognised flag = do -- #23402 and #12056 -- for unrecognised flags we consider current dynflags, not the final one. -- But if final state says to not report unrecognised flags, they won't anyway. f <- wopt Opt_WarnUnrecognisedWarningFlags <$> liftEwM getCmdLineState when f $ addFlagWarn (DriverUnrecognisedFlag (prefix ++ flag)) -- See Note [Supporting CLI completion] package_flags_deps :: [(Deprecation, Flag (CmdLineP DynFlags))] package_flags_deps = [ ------- Packages ---------------------------------------------------- make_ord_flag defFlag "package-db" (HasArg (addPkgDbRef . PkgDbPath)) , make_ord_flag defFlag "clear-package-db" (NoArg clearPkgDb) , make_ord_flag defFlag "no-global-package-db" (NoArg removeGlobalPkgDb) , make_ord_flag defFlag "no-user-package-db" (NoArg removeUserPkgDb) , make_ord_flag defFlag "global-package-db" (NoArg (addPkgDbRef GlobalPkgDb)) , make_ord_flag defFlag "user-package-db" (NoArg (addPkgDbRef UserPkgDb)) -- backwards compat with GHC<=7.4 : , make_dep_flag defFlag "package-conf" (HasArg $ addPkgDbRef . PkgDbPath) "Use -package-db instead" , make_dep_flag defFlag "no-user-package-conf" (NoArg removeUserPkgDb) "Use -no-user-package-db instead" , make_ord_flag defGhcFlag "package-name" (HasArg $ \name -> upd (setUnitId name)) , make_ord_flag defGhcFlag "this-unit-id" (hasArg setUnitId) , make_ord_flag defGhcFlag "working-dir" (hasArg setWorkingDirectory) , make_ord_flag defGhcFlag "this-package-name" (hasArg setPackageName) , make_ord_flag defGhcFlag "hidden-module" (HasArg addHiddenModule) , make_ord_flag defGhcFlag "reexported-module" (HasArg addReexportedModule) , make_ord_flag defFlag "package" (HasArg exposePackage) , make_ord_flag defFlag "plugin-package-id" (HasArg exposePluginPackageId) , make_ord_flag defFlag "plugin-package" (HasArg exposePluginPackage) , make_ord_flag defFlag "package-id" (HasArg exposePackageId) , make_ord_flag defFlag "hide-package" (HasArg hidePackage) , make_ord_flag defFlag "hide-all-packages" (NoArg (setGeneralFlag Opt_HideAllPackages)) , make_ord_flag defFlag "hide-all-plugin-packages" (NoArg (setGeneralFlag Opt_HideAllPluginPackages)) , make_ord_flag defFlag "package-env" (HasArg setPackageEnv) , make_ord_flag defFlag "ignore-package" (HasArg ignorePackage) , make_dep_flag defFlag "syslib" (HasArg exposePackage) "Use -package instead" , make_ord_flag defFlag "distrust-all-packages" (NoArg (setGeneralFlag Opt_DistrustAllPackages)) , make_ord_flag defFlag "trust" (HasArg trustPackage) , make_ord_flag defFlag "distrust" (HasArg distrustPackage) ] where setPackageEnv env = upd $ \s -> s { packageEnv = Just env } -- | Make a list of flags for shell completion. -- Filter all available flags into two groups, for interactive GHC vs all other. flagsForCompletion :: Bool -> [String] flagsForCompletion isInteractive = [ '-':flagName flag | flag <- flagsAll , modeFilter (flagGhcMode flag) ] where modeFilter AllModes = True modeFilter OnlyGhci = isInteractive modeFilter OnlyGhc = not isInteractive modeFilter HiddenFlag = False data FlagSpec flag = FlagSpec { flagSpecName :: String -- ^ Flag in string form , flagSpecFlag :: flag -- ^ Flag in internal form , flagSpecAction :: (TurnOnFlag -> DynP ()) -- ^ Extra action to run when the flag is found -- Typically, emit a warning or error , flagSpecGhcMode :: GhcFlagMode -- ^ In which ghc mode the flag has effect } -- | Define a new flag. flagSpec :: String -> flag -> (Deprecation, FlagSpec flag) flagSpec name flag = flagSpec' name flag nop -- | Define a new flag with an effect. flagSpec' :: String -> flag -> (TurnOnFlag -> DynP ()) -> (Deprecation, FlagSpec flag) flagSpec' name flag act = (NotDeprecated, FlagSpec name flag act AllModes) -- | Define a warning flag. warnSpec :: WarningFlag -> [(Deprecation, FlagSpec WarningFlag)] warnSpec flag = warnSpec' flag nop -- | Define a warning flag with an effect. warnSpec' :: WarningFlag -> (TurnOnFlag -> DynP ()) -> [(Deprecation, FlagSpec WarningFlag)] warnSpec' flag act = [ (NotDeprecated, FlagSpec name flag act AllModes) | name <- NE.toList (warnFlagNames flag) ] -- | Define a new deprecated flag with an effect. depFlagSpecOp :: String -> flag -> (TurnOnFlag -> DynP ()) -> String -> (Deprecation, FlagSpec flag) depFlagSpecOp name flag act dep = (Deprecated, snd (flagSpec' name flag (\f -> act f >> deprecate dep))) -- | Define a new deprecated flag. depFlagSpec :: String -> flag -> String -> (Deprecation, FlagSpec flag) depFlagSpec name flag dep = depFlagSpecOp name flag nop dep -- | Define a deprecated warning flag. depWarnSpec :: WarningFlag -> String -> [(Deprecation, FlagSpec WarningFlag)] depWarnSpec flag dep = [ depFlagSpecOp name flag nop dep | name <- NE.toList (warnFlagNames flag) ] -- | Define a deprecated warning name substituted by another. subWarnSpec :: String -> WarningFlag -> String -> [(Deprecation, FlagSpec WarningFlag)] subWarnSpec oldname flag dep = [ depFlagSpecOp oldname flag nop dep ] -- | Define a new deprecated flag with an effect where the deprecation message -- depends on the flag value depFlagSpecOp' :: String -> flag -> (TurnOnFlag -> DynP ()) -> (TurnOnFlag -> String) -> (Deprecation, FlagSpec flag) depFlagSpecOp' name flag act dep = (Deprecated, FlagSpec name flag (\f -> act f >> (deprecate $ dep f)) AllModes) -- | Define a new deprecated flag where the deprecation message -- depends on the flag value depFlagSpec' :: String -> flag -> (TurnOnFlag -> String) -> (Deprecation, FlagSpec flag) depFlagSpec' name flag dep = depFlagSpecOp' name flag nop dep -- | Define a new flag for GHCi. flagGhciSpec :: String -> flag -> (Deprecation, FlagSpec flag) flagGhciSpec name flag = flagGhciSpec' name flag nop -- | Define a new flag for GHCi with an effect. flagGhciSpec' :: String -> flag -> (TurnOnFlag -> DynP ()) -> (Deprecation, FlagSpec flag) flagGhciSpec' name flag act = (NotDeprecated, FlagSpec name flag act OnlyGhci) -- | Define a new flag invisible to CLI completion. flagHiddenSpec :: String -> flag -> (Deprecation, FlagSpec flag) flagHiddenSpec name flag = flagHiddenSpec' name flag nop -- | Define a new flag invisible to CLI completion with an effect. flagHiddenSpec' :: String -> flag -> (TurnOnFlag -> DynP ()) -> (Deprecation, FlagSpec flag) flagHiddenSpec' name flag act = (NotDeprecated, FlagSpec name flag act HiddenFlag) -- | Hide a 'FlagSpec' from being displayed in @--show-options@. -- -- This is for example useful for flags that are obsolete, but should not -- (yet) be deprecated for compatibility reasons. hideFlag :: (Deprecation, FlagSpec a) -> (Deprecation, FlagSpec a) hideFlag (dep, fs) = (dep, fs { flagSpecGhcMode = HiddenFlag }) mkFlag :: TurnOnFlag -- ^ True <=> it should be turned on -> String -- ^ The flag prefix -> (flag -> DynP ()) -- ^ What to do when the flag is found -> (Deprecation, FlagSpec flag) -- ^ Specification of -- this particular flag -> (Deprecation, Flag (CmdLineP DynFlags)) mkFlag turn_on flagPrefix f (dep, (FlagSpec name flag extra_action mode)) = (dep, Flag (flagPrefix ++ name) (NoArg (f flag >> extra_action turn_on)) mode) deprecate :: String -> DynP () deprecate s = do arg <- getArg addFlagWarn (DriverDeprecatedFlag arg s) deprecatedForExtension :: String -> TurnOnFlag -> String deprecatedForExtension lang turn_on = "use -X" ++ flag ++ " or pragma {-# LANGUAGE " ++ flag ++ " #-} instead" where flag | turn_on = lang | otherwise = "No" ++ lang deprecatedForExtensions :: [String] -> TurnOnFlag -> String deprecatedForExtensions [] _ = panic "new extension has not been specified" deprecatedForExtensions [lang] turn_on = deprecatedForExtension lang turn_on deprecatedForExtensions langExts turn_on = "use " ++ xExt flags ++ " instead" where flags | turn_on = langExts | otherwise = ("No" ++) <$> langExts xExt fls = intercalate " and " $ (\flag -> "-X" ++ flag) <$> fls useInstead :: String -> String -> TurnOnFlag -> String useInstead prefix flag turn_on = "Use " ++ prefix ++ no ++ flag ++ " instead" where no = if turn_on then "" else "no-" nop :: TurnOnFlag -> DynP () nop _ = return () -- | Find the 'FlagSpec' for a 'WarningFlag'. flagSpecOf :: WarningFlag -> Maybe (FlagSpec WarningFlag) flagSpecOf = flip Map.lookup wWarningFlagMap wWarningFlagMap :: Map.Map WarningFlag (FlagSpec WarningFlag) wWarningFlagMap = Map.fromListWith (\_ x -> x) $ map (flagSpecFlag &&& id) wWarningFlags -- | These @-W\@ flags can all be reversed with @-Wno-\@ wWarningFlags :: [FlagSpec WarningFlag] wWarningFlags = map snd (sortBy (comparing fst) wWarningFlagsDeps) wWarningFlagsDeps :: [(Deprecation, FlagSpec WarningFlag)] wWarningFlagsDeps = [minBound..maxBound] >>= \x -> case x of -- See Note [Updating flag description in the User's Guide] -- See Note [Supporting CLI completion] Opt_WarnAlternativeLayoutRuleTransitional -> warnSpec x Opt_WarnAmbiguousFields -> warnSpec x Opt_WarnAutoOrphans -> depWarnSpec x "it has no effect" Opt_WarnCPPUndef -> warnSpec x Opt_WarnBadlyStagedTypes -> warnSpec x Opt_WarnUnbangedStrictPatterns -> warnSpec x Opt_WarnDeferredTypeErrors -> warnSpec x Opt_WarnDeferredOutOfScopeVariables -> warnSpec x Opt_WarnDeprecatedFlags -> warnSpec x Opt_WarnDerivingDefaults -> warnSpec x Opt_WarnDerivingTypeable -> warnSpec x Opt_WarnDodgyExports -> warnSpec x Opt_WarnDodgyForeignImports -> warnSpec x Opt_WarnDodgyImports -> warnSpec x Opt_WarnEmptyEnumerations -> warnSpec x Opt_WarnDuplicateConstraints -> subWarnSpec "duplicate-constraints" x "it is subsumed by -Wredundant-constraints" Opt_WarnRedundantConstraints -> warnSpec x Opt_WarnDuplicateExports -> warnSpec x Opt_WarnHiShadows -> depWarnSpec x "it is not used, and was never implemented" Opt_WarnInaccessibleCode -> warnSpec x Opt_WarnImplicitPrelude -> warnSpec x Opt_WarnImplicitKindVars -> depWarnSpec x "it is now an error" Opt_WarnIncompletePatterns -> warnSpec x Opt_WarnIncompletePatternsRecUpd -> warnSpec x Opt_WarnIncompleteUniPatterns -> warnSpec x Opt_WarnInconsistentFlags -> warnSpec x Opt_WarnInlineRuleShadowing -> warnSpec x Opt_WarnIdentities -> warnSpec x Opt_WarnLoopySuperclassSolve -> depWarnSpec x "it is now an error" Opt_WarnMissingFields -> warnSpec x Opt_WarnMissingImportList -> warnSpec x Opt_WarnMissingExportList -> warnSpec x Opt_WarnMissingLocalSignatures -> subWarnSpec "missing-local-sigs" x "it is replaced by -Wmissing-local-signatures" ++ warnSpec x Opt_WarnMissingMethods -> warnSpec x Opt_WarnMissingMonadFailInstances -> depWarnSpec x "fail is no longer a method of Monad" Opt_WarnSemigroup -> depWarnSpec x "Semigroup is now a superclass of Monoid" Opt_WarnMissingSignatures -> warnSpec x Opt_WarnMissingKindSignatures -> warnSpec x Opt_WarnMissingPolyKindSignatures -> warnSpec x Opt_WarnMissingExportedSignatures -> subWarnSpec "missing-exported-sigs" x "it is replaced by -Wmissing-exported-signatures" ++ warnSpec x Opt_WarnMonomorphism -> warnSpec x Opt_WarnNameShadowing -> warnSpec x Opt_WarnNonCanonicalMonadInstances -> warnSpec x Opt_WarnNonCanonicalMonadFailInstances -> depWarnSpec x "fail is no longer a method of Monad" Opt_WarnNonCanonicalMonoidInstances -> warnSpec x Opt_WarnOrphans -> warnSpec x Opt_WarnOverflowedLiterals -> warnSpec x Opt_WarnOverlappingPatterns -> warnSpec x Opt_WarnMissedSpecs -> warnSpec x Opt_WarnAllMissedSpecs -> warnSpec x Opt_WarnSafe -> warnSpec' x setWarnSafe Opt_WarnTrustworthySafe -> warnSpec x Opt_WarnInferredSafeImports -> warnSpec x Opt_WarnMissingSafeHaskellMode -> warnSpec x Opt_WarnTabs -> warnSpec x Opt_WarnTypeDefaults -> warnSpec x Opt_WarnTypedHoles -> warnSpec x Opt_WarnPartialTypeSignatures -> warnSpec x Opt_WarnUnrecognisedPragmas -> warnSpec x Opt_WarnMisplacedPragmas -> warnSpec x Opt_WarnUnsafe -> warnSpec' x setWarnUnsafe Opt_WarnUnsupportedCallingConventions -> warnSpec x Opt_WarnUnsupportedLlvmVersion -> warnSpec x Opt_WarnMissedExtraSharedLib -> warnSpec x Opt_WarnUntickedPromotedConstructors -> warnSpec x Opt_WarnUnusedDoBind -> warnSpec x Opt_WarnUnusedForalls -> warnSpec x Opt_WarnUnusedImports -> warnSpec x Opt_WarnUnusedLocalBinds -> warnSpec x Opt_WarnUnusedMatches -> warnSpec x Opt_WarnUnusedPatternBinds -> warnSpec x Opt_WarnUnusedTopBinds -> warnSpec x Opt_WarnUnusedTypePatterns -> warnSpec x Opt_WarnUnusedRecordWildcards -> warnSpec x Opt_WarnRedundantBangPatterns -> warnSpec x Opt_WarnRedundantRecordWildcards -> warnSpec x Opt_WarnRedundantStrictnessFlags -> warnSpec x Opt_WarnWrongDoBind -> warnSpec x Opt_WarnMissingPatternSynonymSignatures -> warnSpec x Opt_WarnMissingDerivingStrategies -> warnSpec x Opt_WarnSimplifiableClassConstraints -> warnSpec x Opt_WarnMissingHomeModules -> warnSpec x Opt_WarnUnrecognisedWarningFlags -> warnSpec x Opt_WarnStarBinder -> warnSpec x Opt_WarnStarIsType -> warnSpec x Opt_WarnSpaceAfterBang -> depWarnSpec x "bang patterns can no longer be written with a space" Opt_WarnPartialFields -> warnSpec x Opt_WarnPrepositiveQualifiedModule -> warnSpec x Opt_WarnUnusedPackages -> warnSpec x Opt_WarnCompatUnqualifiedImports -> depWarnSpec x "This warning no longer does anything; see GHC #24904" Opt_WarnInvalidHaddock -> warnSpec x Opt_WarnOperatorWhitespaceExtConflict -> warnSpec x Opt_WarnOperatorWhitespace -> warnSpec x Opt_WarnImplicitLift -> warnSpec x Opt_WarnMissingExportedPatternSynonymSignatures -> warnSpec x Opt_WarnForallIdentifier -> depWarnSpec x "forall is no longer a valid identifier" Opt_WarnUnicodeBidirectionalFormatCharacters -> warnSpec x Opt_WarnGADTMonoLocalBinds -> warnSpec x Opt_WarnTypeEqualityOutOfScope -> warnSpec x Opt_WarnTypeEqualityRequiresOperators -> warnSpec x Opt_WarnTermVariableCapture -> warnSpec x Opt_WarnMissingRoleAnnotations -> warnSpec x Opt_WarnImplicitRhsQuantification -> warnSpec x Opt_WarnIncompleteExportWarnings -> warnSpec x Opt_WarnIncompleteRecordSelectors -> warnSpec x Opt_WarnDataKindsTC -> warnSpec x Opt_WarnDeprecatedTypeAbstractions -> warnSpec x Opt_WarnDefaultedExceptionContext -> warnSpec x Opt_WarnViewPatternSignatures -> warnSpec x warningGroupsDeps :: [(Deprecation, FlagSpec WarningGroup)] warningGroupsDeps = map mk warningGroups where mk g = (NotDeprecated, FlagSpec (warningGroupName g) g nop AllModes) -- | These @-\@ flags can all be reversed with @-no-\@ negatableFlagsDeps :: [(Deprecation, FlagSpec GeneralFlag)] negatableFlagsDeps = [ flagGhciSpec "ignore-dot-ghci" Opt_IgnoreDotGhci ] -- | These @-d\@ flags can all be reversed with @-dno-\@ dFlagsDeps :: [(Deprecation, FlagSpec GeneralFlag)] dFlagsDeps = [ -- See Note [Updating flag description in the User's Guide] -- See Note [Supporting CLI completion] -- Please keep the list of flags below sorted alphabetically flagSpec "ppr-case-as-let" Opt_PprCaseAsLet, depFlagSpec' "ppr-ticks" Opt_PprShowTicks (\turn_on -> useInstead "-d" "suppress-ticks" (not turn_on)), flagSpec "suppress-ticks" Opt_SuppressTicks, depFlagSpec' "suppress-stg-free-vars" Opt_SuppressStgExts (useInstead "-d" "suppress-stg-exts"), flagSpec "suppress-stg-exts" Opt_SuppressStgExts, flagSpec "suppress-stg-reps" Opt_SuppressStgReps, flagSpec "suppress-coercions" Opt_SuppressCoercions, flagSpec "suppress-coercion-types" Opt_SuppressCoercionTypes, flagSpec "suppress-idinfo" Opt_SuppressIdInfo, flagSpec "suppress-unfoldings" Opt_SuppressUnfoldings, flagSpec "suppress-module-prefixes" Opt_SuppressModulePrefixes, flagSpec "suppress-timestamps" Opt_SuppressTimestamps, flagSpec "suppress-type-applications" Opt_SuppressTypeApplications, flagSpec "suppress-type-signatures" Opt_SuppressTypeSignatures, flagSpec "suppress-uniques" Opt_SuppressUniques, flagSpec "suppress-var-kinds" Opt_SuppressVarKinds, flagSpec "suppress-core-sizes" Opt_SuppressCoreSizes ] -- | These @-f\@ flags can all be reversed with @-fno-\@ fFlags :: [FlagSpec GeneralFlag] fFlags = map snd fFlagsDeps fFlagsDeps :: [(Deprecation, FlagSpec GeneralFlag)] fFlagsDeps = [ -- See Note [Updating flag description in the User's Guide] -- See Note [Supporting CLI completion] -- Please keep the list of flags below sorted alphabetically flagSpec "asm-shortcutting" Opt_AsmShortcutting, flagGhciSpec "break-on-error" Opt_BreakOnError, flagGhciSpec "break-on-exception" Opt_BreakOnException, flagSpec "building-cabal-package" Opt_BuildingCabalPackage, flagSpec "call-arity" Opt_CallArity, flagSpec "exitification" Opt_Exitification, flagSpec "case-merge" Opt_CaseMerge, flagSpec "case-folding" Opt_CaseFolding, flagSpec "cmm-elim-common-blocks" Opt_CmmElimCommonBlocks, flagSpec "cmm-sink" Opt_CmmSink, flagSpec "cmm-static-pred" Opt_CmmStaticPred, flagSpec "cse" Opt_CSE, flagSpec "stg-cse" Opt_StgCSE, flagSpec "stg-lift-lams" Opt_StgLiftLams, flagSpec "cpr-anal" Opt_CprAnal, flagSpec "defer-diagnostics" Opt_DeferDiagnostics, flagSpec "defer-type-errors" Opt_DeferTypeErrors, flagSpec "defer-typed-holes" Opt_DeferTypedHoles, flagSpec "defer-out-of-scope-variables" Opt_DeferOutOfScopeVariables, flagSpec "diagnostics-show-caret" Opt_DiagnosticsShowCaret, flagSpec "diagnostics-as-json" Opt_DiagnosticsAsJSON, -- With-ways needs to be reversible hence why its made via flagSpec unlike -- other debugging flags. flagSpec "dump-with-ways" Opt_DumpWithWays, flagSpec "dicts-cheap" Opt_DictsCheap, flagSpec "dicts-strict" Opt_DictsStrict, depFlagSpec "dmd-tx-dict-sel" Opt_DmdTxDictSel "effect is now unconditionally enabled", flagSpec "do-eta-reduction" Opt_DoEtaReduction, flagSpec "do-lambda-eta-expansion" Opt_DoLambdaEtaExpansion, flagSpec "do-clever-arg-eta-expansion" Opt_DoCleverArgEtaExpansion, -- See Note [Eta expansion of arguments in CorePrep] flagSpec "eager-blackholing" Opt_EagerBlackHoling, flagSpec "orig-thunk-info" Opt_OrigThunkInfo, flagSpec "embed-manifest" Opt_EmbedManifest, flagSpec "enable-rewrite-rules" Opt_EnableRewriteRules, flagSpec "enable-th-splice-warnings" Opt_EnableThSpliceWarnings, flagSpec "error-spans" Opt_ErrorSpans, flagSpec "excess-precision" Opt_ExcessPrecision, flagSpec "expose-all-unfoldings" Opt_ExposeAllUnfoldings, flagSpec "expose-overloaded-unfoldings" Opt_ExposeOverloadedUnfoldings, flagSpec "keep-auto-rules" Opt_KeepAutoRules, flagSpec "expose-internal-symbols" Opt_ExposeInternalSymbols, flagSpec "external-dynamic-refs" Opt_ExternalDynamicRefs, flagSpec "external-interpreter" Opt_ExternalInterpreter, flagSpec "family-application-cache" Opt_FamAppCache, flagSpec "float-in" Opt_FloatIn, flagSpec "force-recomp" Opt_ForceRecomp, flagSpec "ignore-optim-changes" Opt_IgnoreOptimChanges, flagSpec "ignore-hpc-changes" Opt_IgnoreHpcChanges, flagSpec "full-laziness" Opt_FullLaziness, depFlagSpec' "fun-to-thunk" Opt_FunToThunk (useInstead "-f" "full-laziness"), flagSpec "local-float-out" Opt_LocalFloatOut, flagSpec "local-float-out-top-level" Opt_LocalFloatOutTopLevel, flagSpec "gen-manifest" Opt_GenManifest, flagSpec "ghci-history" Opt_GhciHistory, flagSpec "ghci-leak-check" Opt_GhciLeakCheck, flagSpec "validate-ide-info" Opt_ValidateHie, flagGhciSpec "local-ghci-history" Opt_LocalGhciHistory, flagGhciSpec "no-it" Opt_NoIt, flagSpec "ghci-sandbox" Opt_GhciSandbox, flagSpec "helpful-errors" Opt_HelpfulErrors, flagSpec "hpc" Opt_Hpc, flagSpec "ignore-asserts" Opt_IgnoreAsserts, flagSpec "ignore-interface-pragmas" Opt_IgnoreInterfacePragmas, flagGhciSpec "implicit-import-qualified" Opt_ImplicitImportQualified, flagSpec "irrefutable-tuples" Opt_IrrefutableTuples, flagSpec "keep-going" Opt_KeepGoing, flagSpec "late-dmd-anal" Opt_LateDmdAnal, flagSpec "late-specialise" Opt_LateSpecialise, flagSpec "liberate-case" Opt_LiberateCase, flagHiddenSpec "llvm-fill-undef-with-garbage" Opt_LlvmFillUndefWithGarbage, flagSpec "loopification" Opt_Loopification, flagSpec "block-layout-cfg" Opt_CfgBlocklayout, flagSpec "block-layout-weightless" Opt_WeightlessBlocklayout, flagSpec "omit-interface-pragmas" Opt_OmitInterfacePragmas, flagSpec "omit-yields" Opt_OmitYields, flagSpec "optimal-applicative-do" Opt_OptimalApplicativeDo, flagSpec "pedantic-bottoms" Opt_PedanticBottoms, flagSpec "pre-inlining" Opt_SimplPreInlining, flagGhciSpec "print-bind-contents" Opt_PrintBindContents, flagGhciSpec "print-bind-result" Opt_PrintBindResult, flagGhciSpec "print-evld-with-show" Opt_PrintEvldWithShow, flagSpec "print-explicit-foralls" Opt_PrintExplicitForalls, flagSpec "print-explicit-kinds" Opt_PrintExplicitKinds, flagSpec "print-explicit-coercions" Opt_PrintExplicitCoercions, flagSpec "print-explicit-runtime-reps" Opt_PrintExplicitRuntimeReps, flagSpec "print-equality-relations" Opt_PrintEqualityRelations, flagSpec "print-axiom-incomps" Opt_PrintAxiomIncomps, flagSpec "print-unicode-syntax" Opt_PrintUnicodeSyntax, flagSpec "print-expanded-synonyms" Opt_PrintExpandedSynonyms, flagSpec "print-potential-instances" Opt_PrintPotentialInstances, flagSpec "print-redundant-promotion-ticks" Opt_PrintRedundantPromotionTicks, flagSpec "print-typechecker-elaboration" Opt_PrintTypecheckerElaboration, flagSpec "prof-cafs" Opt_AutoSccsOnIndividualCafs, flagSpec "prof-count-entries" Opt_ProfCountEntries, flagSpec "prof-late" Opt_ProfLateCcs, flagSpec "prof-late-overloaded" Opt_ProfLateOverloadedCcs, flagSpec "prof-late-overloaded-calls" Opt_ProfLateoverloadedCallsCCs, flagSpec "prof-manual" Opt_ProfManualCcs, flagSpec "prof-late-inline" Opt_ProfLateInlineCcs, flagSpec "regs-graph" Opt_RegsGraph, flagSpec "regs-iterative" Opt_RegsIterative, depFlagSpec' "rewrite-rules" Opt_EnableRewriteRules (useInstead "-f" "enable-rewrite-rules"), flagSpec "shared-implib" Opt_SharedImplib, flagSpec "spec-constr" Opt_SpecConstr, flagSpec "spec-constr-keen" Opt_SpecConstrKeen, flagSpec "specialise" Opt_Specialise, flagSpec "specialize" Opt_Specialise, flagSpec "specialise-aggressively" Opt_SpecialiseAggressively, flagSpec "specialize-aggressively" Opt_SpecialiseAggressively, flagSpec "cross-module-specialise" Opt_CrossModuleSpecialise, flagSpec "cross-module-specialize" Opt_CrossModuleSpecialise, flagSpec "polymorphic-specialisation" Opt_PolymorphicSpecialisation, flagSpec "specialise-incoherents" Opt_SpecialiseIncoherents, flagSpec "inline-generics" Opt_InlineGenerics, flagSpec "inline-generics-aggressively" Opt_InlineGenericsAggressively, flagSpec "static-argument-transformation" Opt_StaticArgumentTransformation, flagSpec "strictness" Opt_Strictness, flagSpec "use-rpaths" Opt_RPath, flagSpec "write-interface" Opt_WriteInterface, flagSpec "write-if-simplified-core" Opt_WriteIfSimplifiedCore, flagSpec "write-ide-info" Opt_WriteHie, flagSpec "unbox-small-strict-fields" Opt_UnboxSmallStrictFields, flagSpec "unbox-strict-fields" Opt_UnboxStrictFields, flagSpec "unoptimized-core-for-interpreter" Opt_UnoptimizedCoreForInterpreter, flagSpec "version-macros" Opt_VersionMacros, flagSpec "worker-wrapper" Opt_WorkerWrapper, flagSpec "worker-wrapper-cbv" Opt_WorkerWrapperUnlift, -- See Note [Worker/wrapper for strict arguments] flagSpec "solve-constant-dicts" Opt_SolveConstantDicts, flagSpec "catch-nonexhaustive-cases" Opt_CatchNonexhaustiveCases, flagSpec "alignment-sanitisation" Opt_AlignmentSanitisation, flagSpec "check-prim-bounds" Opt_DoBoundsChecking, flagSpec "num-constant-folding" Opt_NumConstantFolding, flagSpec "core-constant-folding" Opt_CoreConstantFolding, flagSpec "fast-pap-calls" Opt_FastPAPCalls, flagSpec "spec-eval" Opt_SpecEval, flagSpec "spec-eval-dictfun" Opt_SpecEvalDictFun, flagSpec "cmm-control-flow" Opt_CmmControlFlow, flagSpec "show-warning-groups" Opt_ShowWarnGroups, flagSpec "hide-source-paths" Opt_HideSourcePaths, flagSpec "show-loaded-modules" Opt_ShowLoadedModules, flagSpec "whole-archive-hs-libs" Opt_WholeArchiveHsLibs, flagSpec "keep-cafs" Opt_KeepCAFs, flagSpec "link-rts" Opt_LinkRts, flagSpec "byte-code-and-object-code" Opt_ByteCodeAndObjectCode, flagSpec "prefer-byte-code" Opt_UseBytecodeRatherThanObjects, flagSpec "object-determinism" Opt_ObjectDeterminism, flagSpec' "compact-unwind" Opt_CompactUnwind (\turn_on -> updM (\dflags -> do unless (platformOS (targetPlatform dflags) == OSDarwin && turn_on) (addWarn "-compact-unwind is only implemented by the darwin platform. Ignoring.") return dflags)), flagSpec "show-error-context" Opt_ShowErrorContext, flagSpec "cmm-thread-sanitizer" Opt_CmmThreadSanitizer, flagSpec "split-sections" Opt_SplitSections, flagSpec "break-points" Opt_InsertBreakpoints, flagSpec "distinct-constructor-tables" Opt_DistinctConstructorTables, flagSpec "info-table-map" Opt_InfoTableMap, flagSpec "info-table-map-with-stack" Opt_InfoTableMapWithStack, flagSpec "info-table-map-with-fallback" Opt_InfoTableMapWithFallback ] ++ fHoleFlags -- | These @-f\@ flags have to do with the typed-hole error message or -- the valid hole fits in that message. See Note [Valid hole fits include ...] -- in the "GHC.Tc.Errors.Hole" module. These flags can all be reversed with -- @-fno-\@ fHoleFlags :: [(Deprecation, FlagSpec GeneralFlag)] fHoleFlags = [ flagSpec "show-hole-constraints" Opt_ShowHoleConstraints, depFlagSpec' "show-valid-substitutions" Opt_ShowValidHoleFits (useInstead "-f" "show-valid-hole-fits"), flagSpec "show-valid-hole-fits" Opt_ShowValidHoleFits, -- Sorting settings flagSpec "sort-valid-hole-fits" Opt_SortValidHoleFits, flagSpec "sort-by-size-hole-fits" Opt_SortBySizeHoleFits, flagSpec "sort-by-subsumption-hole-fits" Opt_SortBySubsumHoleFits, flagSpec "abstract-refinement-hole-fits" Opt_AbstractRefHoleFits, -- Output format settings flagSpec "show-hole-matches-of-hole-fits" Opt_ShowMatchesOfHoleFits, flagSpec "show-provenance-of-hole-fits" Opt_ShowProvOfHoleFits, flagSpec "show-type-of-hole-fits" Opt_ShowTypeOfHoleFits, flagSpec "show-type-app-of-hole-fits" Opt_ShowTypeAppOfHoleFits, flagSpec "show-type-app-vars-of-hole-fits" Opt_ShowTypeAppVarsOfHoleFits, flagSpec "show-docs-of-hole-fits" Opt_ShowDocsOfHoleFits, flagSpec "unclutter-valid-hole-fits" Opt_UnclutterValidHoleFits ] -- | These @-f\@ flags can all be reversed with @-fno-\@ fLangFlags :: [FlagSpec LangExt.Extension] fLangFlags = map snd fLangFlagsDeps fLangFlagsDeps :: [(Deprecation, FlagSpec LangExt.Extension)] fLangFlagsDeps = [ -- See Note [Updating flag description in the User's Guide] -- See Note [Supporting CLI completion] depFlagSpecOp' "th" LangExt.TemplateHaskell checkTemplateHaskellOk (deprecatedForExtension "TemplateHaskell"), depFlagSpec' "fi" LangExt.ForeignFunctionInterface (deprecatedForExtension "ForeignFunctionInterface"), depFlagSpec' "ffi" LangExt.ForeignFunctionInterface (deprecatedForExtension "ForeignFunctionInterface"), depFlagSpec' "arrows" LangExt.Arrows (deprecatedForExtension "Arrows"), depFlagSpec' "implicit-prelude" LangExt.ImplicitPrelude (deprecatedForExtension "ImplicitPrelude"), depFlagSpec' "bang-patterns" LangExt.BangPatterns (deprecatedForExtension "BangPatterns"), depFlagSpec' "monomorphism-restriction" LangExt.MonomorphismRestriction (deprecatedForExtension "MonomorphismRestriction"), depFlagSpec' "extended-default-rules" LangExt.ExtendedDefaultRules (deprecatedForExtension "ExtendedDefaultRules"), depFlagSpec' "implicit-params" LangExt.ImplicitParams (deprecatedForExtension "ImplicitParams"), depFlagSpec' "scoped-type-variables" LangExt.ScopedTypeVariables (deprecatedForExtension "ScopedTypeVariables"), depFlagSpec' "allow-overlapping-instances" LangExt.OverlappingInstances (deprecatedForExtension "OverlappingInstances"), depFlagSpec' "allow-undecidable-instances" LangExt.UndecidableInstances (deprecatedForExtension "UndecidableInstances"), depFlagSpec' "allow-incoherent-instances" LangExt.IncoherentInstances (deprecatedForExtension "IncoherentInstances") ] supportedLanguages :: [String] supportedLanguages = map (flagSpecName . snd) languageFlagsDeps supportedLanguageOverlays :: [String] supportedLanguageOverlays = map (flagSpecName . snd) safeHaskellFlagsDeps supportedExtensions :: ArchOS -> [String] supportedExtensions (ArchOS arch os) = concatMap toFlagSpecNamePair xFlags where toFlagSpecNamePair flg -- IMPORTANT! Make sure that `ghc --supported-extensions` omits -- "TemplateHaskell"/"QuasiQuotes" when it's known not to work out of the -- box. See also GHC #11102 and #16331 for more details about -- the rationale | isAIX, flagSpecFlag flg == LangExt.TemplateHaskell = [noName] | isAIX, flagSpecFlag flg == LangExt.QuasiQuotes = [noName] -- "JavaScriptFFI" is only supported on the JavaScript/Wasm backend | notJSOrWasm, flagSpecFlag flg == LangExt.JavaScriptFFI = [noName] | otherwise = [name, noName] where isAIX = os == OSAIX notJSOrWasm = not $ arch `elem` [ ArchJavaScript, ArchWasm32 ] noName = "No" ++ name name = flagSpecName flg supportedLanguagesAndExtensions :: ArchOS -> [String] supportedLanguagesAndExtensions arch_os = supportedLanguages ++ supportedLanguageOverlays ++ supportedExtensions arch_os -- | These -X flags cannot be reversed with -XNo languageFlagsDeps :: [(Deprecation, FlagSpec Language)] languageFlagsDeps = [ flagSpec "Haskell98" Haskell98, flagSpec "Haskell2010" Haskell2010, flagSpec "GHC2021" GHC2021, flagSpec "GHC2024" GHC2024 ] -- | These -X flags cannot be reversed with -XNo -- They are used to place hard requirements on what GHC Haskell language -- features can be used. safeHaskellFlagsDeps :: [(Deprecation, FlagSpec SafeHaskellMode)] safeHaskellFlagsDeps = [mkF Sf_Unsafe, mkF Sf_Trustworthy, mkF Sf_Safe] where mkF flag = flagSpec (show flag) flag -- | These -X flags can all be reversed with -XNo xFlags :: [FlagSpec LangExt.Extension] xFlags = map snd xFlagsDeps makeExtensionFlags :: LangExt.Extension -> [(Deprecation, FlagSpec LangExt.Extension)] makeExtensionFlags ext = [ makeExtensionFlag name depr ext | (depr, name) <- extensionNames ext ] xFlagsDeps :: [(Deprecation, FlagSpec LangExt.Extension)] xFlagsDeps = concatMap makeExtensionFlags [minBound .. maxBound] makeExtensionFlag :: String -> ExtensionDeprecation -> LangExt.Extension -> (Deprecation, FlagSpec LangExt.Extension) makeExtensionFlag name depr ext = (deprecation depr, spec) where effect = extensionEffect ext spec = FlagSpec name ext (\f -> effect f >> act f) AllModes act = case depr of ExtensionNotDeprecated -> nop ExtensionDeprecatedFor xs -> deprecate . deprecatedForExtensions (map extensionName xs) ExtensionFlagDeprecatedCond cond str -> \f -> when (f == cond) (deprecate str) ExtensionFlagDeprecated str -> const (deprecate str) extensionEffect :: LangExt.Extension -> (TurnOnFlag -> DynP ()) extensionEffect = \case LangExt.TemplateHaskell -> checkTemplateHaskellOk LangExt.OverlappingInstances -> setOverlappingInsts LangExt.GeneralizedNewtypeDeriving -> setGenDeriving LangExt.IncoherentInstances -> setIncoherentInsts LangExt.DerivingVia -> setDeriveVia _ -> nop -- | Things you get with `-dlint`. enableDLint :: DynP () enableDLint = do mapM_ setGeneralFlag dLintFlags addWayDynP WayDebug where dLintFlags :: [GeneralFlag] dLintFlags = [ Opt_DoCoreLinting , Opt_DoStgLinting , Opt_DoCmmLinting , Opt_DoAsmLinting , Opt_CatchNonexhaustiveCases , Opt_LlvmFillUndefWithGarbage ] enableGlasgowExts :: DynP () enableGlasgowExts = do setGeneralFlag Opt_PrintExplicitForalls mapM_ setExtensionFlag glasgowExtsFlags disableGlasgowExts :: DynP () disableGlasgowExts = do unSetGeneralFlag Opt_PrintExplicitForalls mapM_ unSetExtensionFlag glasgowExtsFlags setWarnSafe :: Bool -> DynP () setWarnSafe True = getCurLoc >>= \l -> upd (\d -> d { warnSafeOnLoc = l }) setWarnSafe False = return () setWarnUnsafe :: Bool -> DynP () setWarnUnsafe True = getCurLoc >>= \l -> upd (\d -> d { warnUnsafeOnLoc = l }) setWarnUnsafe False = return () setPackageTrust :: DynP () setPackageTrust = do setGeneralFlag Opt_PackageTrust l <- getCurLoc upd $ \d -> d { pkgTrustOnLoc = l } setGenDeriving :: TurnOnFlag -> DynP () setGenDeriving True = getCurLoc >>= \l -> upd (\d -> d { newDerivOnLoc = l }) setGenDeriving False = return () setDeriveVia :: TurnOnFlag -> DynP () setDeriveVia True = getCurLoc >>= \l -> upd (\d -> d { deriveViaOnLoc = l }) setDeriveVia False = return () setOverlappingInsts :: TurnOnFlag -> DynP () setOverlappingInsts False = return () setOverlappingInsts True = do l <- getCurLoc upd (\d -> d { overlapInstLoc = l }) setIncoherentInsts :: TurnOnFlag -> DynP () setIncoherentInsts False = return () setIncoherentInsts True = do l <- getCurLoc upd (\d -> d { incoherentOnLoc = l }) checkTemplateHaskellOk :: TurnOnFlag -> DynP () checkTemplateHaskellOk _turn_on = getCurLoc >>= \l -> upd (\d -> d { thOnLoc = l }) {- ********************************************************************** %* * DynFlags constructors %* * %********************************************************************* -} type DynP = EwM (CmdLineP DynFlags) upd :: (DynFlags -> DynFlags) -> DynP () upd f = liftEwM (do dflags <- getCmdLineState putCmdLineState $! f dflags) updM :: (DynFlags -> DynP DynFlags) -> DynP () updM f = do dflags <- liftEwM getCmdLineState dflags' <- f dflags liftEwM $ putCmdLineState $! dflags' --------------- Constructor functions for OptKind ----------------- noArg :: (DynFlags -> DynFlags) -> OptKind (CmdLineP DynFlags) noArg fn = NoArg (upd fn) noArgM :: (DynFlags -> DynP DynFlags) -> OptKind (CmdLineP DynFlags) noArgM fn = NoArg (updM fn) hasArg :: (String -> DynFlags -> DynFlags) -> OptKind (CmdLineP DynFlags) hasArg fn = HasArg (upd . fn) sepArg :: (String -> DynFlags -> DynFlags) -> OptKind (CmdLineP DynFlags) sepArg fn = SepArg (upd . fn) intSuffix :: (Int -> DynFlags -> DynFlags) -> OptKind (CmdLineP DynFlags) intSuffix fn = IntSuffix (\n -> upd (fn n)) intSuffixM :: (Int -> DynFlags -> DynP DynFlags) -> OptKind (CmdLineP DynFlags) intSuffixM fn = IntSuffix (\n -> updM (fn n)) word64Suffix :: (Word64 -> DynFlags -> DynFlags) -> OptKind (CmdLineP DynFlags) word64Suffix fn = Word64Suffix (\n -> upd (fn n)) floatSuffix :: (Float -> DynFlags -> DynFlags) -> OptKind (CmdLineP DynFlags) floatSuffix fn = FloatSuffix (\n -> upd (fn n)) optIntSuffixM :: (Maybe Int -> DynFlags -> DynP DynFlags) -> OptKind (CmdLineP DynFlags) optIntSuffixM fn = OptIntSuffix (\mi -> updM (fn mi)) setDumpFlag :: DumpFlag -> OptKind (CmdLineP DynFlags) setDumpFlag dump_flag = NoArg (setDumpFlag' dump_flag) -------------------------- addWayDynP :: Way -> DynP () addWayDynP = upd . addWay' addWay' :: Way -> DynFlags -> DynFlags addWay' w dflags0 = let platform = targetPlatform dflags0 dflags1 = dflags0 { targetWays_ = addWay w (targetWays_ dflags0) } dflags2 = foldr setGeneralFlag' dflags1 (wayGeneralFlags platform w) dflags3 = foldr unSetGeneralFlag' dflags2 (wayUnsetGeneralFlags platform w) in dflags3 removeWayDynP :: Way -> DynP () removeWayDynP w = upd (\dfs -> dfs { targetWays_ = removeWay w (targetWays_ dfs) }) -------------------------- setGeneralFlag, unSetGeneralFlag :: GeneralFlag -> DynP () setGeneralFlag f = upd (setGeneralFlag' f) unSetGeneralFlag f = upd (unSetGeneralFlag' f) setGeneralFlag' :: GeneralFlag -> DynFlags -> DynFlags setGeneralFlag' f dflags = foldr ($) (gopt_set dflags f) deps where deps = [ if turn_on then setGeneralFlag' d else unSetGeneralFlag' d | (f', turn_on, d) <- impliedGFlags, f' == f ] -- When you set f, set the ones it implies -- NB: use setGeneralFlag recursively, in case the implied flags -- implies further flags unSetGeneralFlag' :: GeneralFlag -> DynFlags -> DynFlags unSetGeneralFlag' f dflags = foldr ($) (gopt_unset dflags f) deps where deps = [ if turn_on then setGeneralFlag' d else unSetGeneralFlag' d | (f', turn_on, d) <- impliedOffGFlags, f' == f ] -- In general, when you un-set f, we don't un-set the things it implies. -- There are however some exceptions, e.g., -fno-strictness implies -- -fno-worker-wrapper. -- -- NB: use unSetGeneralFlag' recursively, in case the implied off flags -- imply further flags. -------------------------- setWarningGroup :: WarningGroup -> DynP () setWarningGroup g = do mapM_ setWarningFlag (warningGroupFlags g) when (warningGroupIncludesExtendedWarnings g) $ upd wopt_set_all_custom unSetWarningGroup :: WarningGroup -> DynP () unSetWarningGroup g = do mapM_ unSetWarningFlag (warningGroupFlags g) when (warningGroupIncludesExtendedWarnings g) $ upd wopt_unset_all_custom setWErrorWarningGroup :: WarningGroup -> DynP () setWErrorWarningGroup g = do { setWarningGroup g ; setFatalWarningGroup g } setFatalWarningGroup :: WarningGroup -> DynP () setFatalWarningGroup g = do mapM_ setFatalWarningFlag (warningGroupFlags g) when (warningGroupIncludesExtendedWarnings g) $ upd wopt_set_all_fatal_custom unSetFatalWarningGroup :: WarningGroup -> DynP () unSetFatalWarningGroup g = do mapM_ unSetFatalWarningFlag (warningGroupFlags g) when (warningGroupIncludesExtendedWarnings g) $ upd wopt_unset_all_fatal_custom setWarningFlag, unSetWarningFlag :: WarningFlag -> DynP () setWarningFlag f = upd (\dfs -> wopt_set dfs f) unSetWarningFlag f = upd (\dfs -> wopt_unset dfs f) setFatalWarningFlag, unSetFatalWarningFlag :: WarningFlag -> DynP () setFatalWarningFlag f = upd (\dfs -> wopt_set_fatal dfs f) unSetFatalWarningFlag f = upd (\dfs -> wopt_unset_fatal dfs f) setWErrorFlag :: WarningFlag -> DynP () setWErrorFlag flag = do { setWarningFlag flag ; setFatalWarningFlag flag } setCustomWarningFlag, unSetCustomWarningFlag :: WarningCategory -> DynP () setCustomWarningFlag f = upd (\dfs -> wopt_set_custom dfs f) unSetCustomWarningFlag f = upd (\dfs -> wopt_unset_custom dfs f) setCustomFatalWarningFlag, unSetCustomFatalWarningFlag :: WarningCategory -> DynP () setCustomFatalWarningFlag f = upd (\dfs -> wopt_set_fatal_custom dfs f) unSetCustomFatalWarningFlag f = upd (\dfs -> wopt_unset_fatal_custom dfs f) setCustomWErrorFlag :: WarningCategory -> DynP () setCustomWErrorFlag flag = do { setCustomWarningFlag flag ; setCustomFatalWarningFlag flag } -------------------------- setExtensionFlag, unSetExtensionFlag :: LangExt.Extension -> DynP () setExtensionFlag f = upd (setExtensionFlag' f) unSetExtensionFlag f = upd (unSetExtensionFlag' f) setExtensionFlag', unSetExtensionFlag' :: LangExt.Extension -> DynFlags -> DynFlags setExtensionFlag' f dflags = foldr ($) (xopt_set dflags f) deps where deps = [ if turn_on then setExtensionFlag' d else unSetExtensionFlag' d | (f', turn_on, d) <- impliedXFlags, f' == f ] -- When you set f, set the ones it implies -- NB: use setExtensionFlag recursively, in case the implied flags -- implies further flags unSetExtensionFlag' f dflags = xopt_unset dflags f -- When you un-set f, however, we don't un-set the things it implies -- (except for -fno-glasgow-exts, which is treated specially) -------------------------- alterToolSettings :: (ToolSettings -> ToolSettings) -> DynFlags -> DynFlags alterToolSettings f dynFlags = dynFlags { toolSettings = f (toolSettings dynFlags) } -------------------------- setDumpFlag' :: DumpFlag -> DynP () setDumpFlag' dump_flag = do upd (\dfs -> dopt_set dfs dump_flag) when want_recomp forceRecompile where -- Certain dumpy-things are really interested in what's going -- on during recompilation checking, so in those cases we -- don't want to turn it off. want_recomp = dump_flag `notElem` [Opt_D_dump_if_trace, Opt_D_dump_hi_diffs, Opt_D_no_debug_output] forceRecompile :: DynP () -- Whenever we -ddump, force recompilation (by switching off the -- recompilation checker), else you don't see the dump! However, -- don't switch it off in --make mode, else *everything* gets -- recompiled which probably isn't what you want forceRecompile = do dfs <- liftEwM getCmdLineState when (force_recomp dfs) (setGeneralFlag Opt_ForceRecomp) where force_recomp dfs = isOneShot (ghcMode dfs) setVerbosity :: Maybe Int -> DynP () setVerbosity mb_n = upd (\dfs -> dfs{ verbosity = mb_n `orElse` 3 }) setDebugLevel :: Maybe Int -> DynP () setDebugLevel mb_n = upd (\dfs -> exposeSyms $ dfs{ debugLevel = n }) where n = mb_n `orElse` 2 exposeSyms | n > 2 = setGeneralFlag' Opt_ExposeInternalSymbols | otherwise = id addPkgDbRef :: PkgDbRef -> DynP () addPkgDbRef p = upd $ \s -> s { packageDBFlags = PackageDB p : packageDBFlags s } removeUserPkgDb :: DynP () removeUserPkgDb = upd $ \s -> s { packageDBFlags = NoUserPackageDB : packageDBFlags s } removeGlobalPkgDb :: DynP () removeGlobalPkgDb = upd $ \s -> s { packageDBFlags = NoGlobalPackageDB : packageDBFlags s } clearPkgDb :: DynP () clearPkgDb = upd $ \s -> s { packageDBFlags = ClearPackageDBs : packageDBFlags s } parsePackageFlag :: String -- the flag -> ReadP PackageArg -- type of argument -> String -- string to parse -> PackageFlag parsePackageFlag flag arg_parse str = case filter ((=="").snd) (readP_to_S parse str) of [(r, "")] -> r _ -> throwGhcException $ CmdLineError ("Can't parse package flag: " ++ str) where doc = flag ++ " " ++ str parse = do pkg_arg <- tok arg_parse let mk_expose = ExposePackage doc pkg_arg ( do _ <- tok $ string "with" fmap (mk_expose . ModRenaming True) parseRns <++ fmap (mk_expose . ModRenaming False) parseRns <++ return (mk_expose (ModRenaming True []))) parseRns = do _ <- tok $ R.char '(' rns <- tok $ sepBy parseItem (tok $ R.char ',') _ <- tok $ R.char ')' return rns parseItem = do orig <- tok $ parseModuleName (do _ <- tok $ string "as" new <- tok $ parseModuleName return (orig, new) +++ return (orig, orig)) tok m = m >>= \x -> skipSpaces >> return x exposePackage, exposePackageId, hidePackage, exposePluginPackage, exposePluginPackageId, ignorePackage, trustPackage, distrustPackage :: String -> DynP () exposePackage p = upd (exposePackage' p) exposePackageId p = upd (\s -> s{ packageFlags = parsePackageFlag "-package-id" parseUnitArg p : packageFlags s }) exposePluginPackage p = upd (\s -> s{ pluginPackageFlags = parsePackageFlag "-plugin-package" parsePackageArg p : pluginPackageFlags s }) exposePluginPackageId p = upd (\s -> s{ pluginPackageFlags = parsePackageFlag "-plugin-package-id" parseUnitArg p : pluginPackageFlags s }) hidePackage p = upd (\s -> s{ packageFlags = HidePackage p : packageFlags s }) ignorePackage p = upd (\s -> s{ ignorePackageFlags = IgnorePackage p : ignorePackageFlags s }) trustPackage p = exposePackage p >> -- both trust and distrust also expose a package upd (\s -> s{ trustFlags = TrustPackage p : trustFlags s }) distrustPackage p = exposePackage p >> upd (\s -> s{ trustFlags = DistrustPackage p : trustFlags s }) exposePackage' :: String -> DynFlags -> DynFlags exposePackage' p dflags = dflags { packageFlags = parsePackageFlag "-package" parsePackageArg p : packageFlags dflags } parsePackageArg :: ReadP PackageArg parsePackageArg = fmap PackageArg (munch1 (\c -> isAlphaNum c || c `elem` ":-_.")) parseUnitArg :: ReadP PackageArg parseUnitArg = fmap UnitIdArg parseUnit setUnitId :: String -> DynFlags -> DynFlags setUnitId p d = d { homeUnitId_ = stringToUnitId p } setWorkingDirectory :: String -> DynFlags -> DynFlags setWorkingDirectory p d = d { workingDirectory = Just p } {- Note [Filepaths and Multiple Home Units] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ It is common to assume that a package is compiled in the directory where its cabal file resides. Thus, all paths used in the compiler are assumed to be relative to this directory. When there are multiple home units the compiler is often not operating in the standard directory and instead where the cabal.project file is located. In this case the `-working-dir` option can be passed which specifies the path from the current directory to the directory the unit assumes to be it's root, normally the directory which contains the cabal file. When the flag is passed, any relative paths used by the compiler are offset by the working directory. Notably this includes `-i`, `-I⟨dir⟩`, `-hidir`, `-odir` etc and the location of input files. -} augmentByWorkingDirectory :: DynFlags -> FilePath -> FilePath augmentByWorkingDirectory dflags fp | isRelative fp, Just offset <- workingDirectory dflags = offset fp augmentByWorkingDirectory _ fp = fp setPackageName :: String -> DynFlags -> DynFlags setPackageName p d = d { thisPackageName = Just p } addHiddenModule :: String -> DynP () addHiddenModule p = upd (\s -> s{ hiddenModules = Set.insert (mkModuleName p) (hiddenModules s) }) addReexportedModule :: String -> DynP () addReexportedModule p = upd (\s -> s{ reexportedModules = (parseReexportedModule p) : (reexportedModules s) }) parseReexportedModule :: String -- string to parse -> ReexportedModule parseReexportedModule str = case filter ((=="").snd) (readP_to_S parseItem str) of [(r, "")] -> r _ -> throwGhcException $ CmdLineError ("Can't parse reexported module flag: " ++ str) where parseItem = do orig <- tok $ parseModuleName (do _ <- tok $ string "as" new <- tok $ parseModuleName return (ReexportedModule orig new)) +++ return (ReexportedModule orig orig) tok m = m >>= \x -> skipSpaces >> return x -- If we're linking a binary, then only backends that produce object -- code are allowed (requests for other target types are ignored). setBackend :: Backend -> DynP () setBackend l = upd $ \ dfs -> if ghcLink dfs /= LinkBinary || backendWritesFiles l then dfs{ backend = l } else dfs -- Changes the target only if we're compiling object code. This is -- used by -fasm and -fllvm, which switch from one to the other, but -- not from bytecode to object-code. The idea is that -fasm/-fllvm -- can be safely used in an OPTIONS_GHC pragma. setObjBackend :: Backend -> DynP () setObjBackend l = updM set where set dflags | backendWritesFiles (backend dflags) = return $ dflags { backend = l } | otherwise = return dflags setOptLevel :: Int -> DynFlags -> DynP DynFlags setOptLevel n dflags = return (updOptLevel n dflags) setCallerCcFilters :: String -> DynP () setCallerCcFilters arg = case parseCallerCcFilter arg of Right filt -> upd $ \d -> d { callerCcFilters = filt : callerCcFilters d } Left err -> addErr err setMainIs :: String -> DynP () setMainIs arg = parse parse_main_f arg where parse callback str = case unP parseIdentifier (p_state str) of PFailed _ -> addErr $ "Can't parse -main-is \"" ++ arg ++ "\" as an identifier or module." POk _ (L _ re) -> callback re -- dummy parser state. p_state str = initParserState (mkParserOpts mempty emptyDiagOpts [] False False False True) (stringToStringBuffer str) (mkRealSrcLoc (mkFastString []) 1 1) parse_main_f (Unqual occ) | isVarOcc occ = upd $ \d -> d { mainFunIs = main_f occ } parse_main_f (Qual (ModuleName mod) occ) | isVarOcc occ = upd $ \d -> d { mainModuleNameIs = mkModuleNameFS mod , mainFunIs = main_f occ } -- append dummy "function" to parse A.B as the module A.B -- and not the Data constructor B from the module A parse_main_f _ = parse parse_mod (arg ++ ".main") main_f = Just . occNameString parse_mod (Qual (ModuleName mod) _) = upd $ \d -> d { mainModuleNameIs = mkModuleNameFS mod } -- we appended ".m" and any parse error was caught. We are Qual or something went very wrong parse_mod _ = error "unreachable" addLdInputs :: Option -> DynFlags -> DynFlags addLdInputs p dflags = dflags{ldInputs = ldInputs dflags ++ [p]} -- ----------------------------------------------------------------------------- -- Load dynflags from environment files. setFlagsFromEnvFile :: FilePath -> String -> DynP () setFlagsFromEnvFile envfile content = do setGeneralFlag Opt_HideAllPackages parseEnvFile envfile content parseEnvFile :: FilePath -> String -> DynP () parseEnvFile envfile = mapM_ parseEntry . lines where parseEntry str = case words str of ("package-db": _) -> addPkgDbRef (PkgDbPath (envdir db)) -- relative package dbs are interpreted relative to the env file where envdir = takeDirectory envfile db = drop 11 str ["clear-package-db"] -> clearPkgDb ["hide-package", pkg] -> hidePackage pkg ["global-package-db"] -> addPkgDbRef GlobalPkgDb ["user-package-db"] -> addPkgDbRef UserPkgDb ["package-id", pkgid] -> exposePackageId pkgid (('-':'-':_):_) -> return () -- comments -- and the original syntax introduced in 7.10: [pkgid] -> exposePackageId pkgid [] -> return () _ -> throwGhcException $ CmdLineError $ "Can't parse environment file entry: " ++ envfile ++ ": " ++ str ----------------------------------------------------------------------------- -- Paths & Libraries addImportPath, addLibraryPath, addIncludePath, addFrameworkPath :: FilePath -> DynP () -- -i on its own deletes the import paths addImportPath "" = upd (\s -> s{importPaths = []}) addImportPath p = upd (\s -> s{importPaths = importPaths s ++ splitPathList p}) addLibraryPath p = upd (\s -> s{libraryPaths = libraryPaths s ++ splitPathList p}) addIncludePath p = upd (\s -> s{includePaths = addGlobalInclude (includePaths s) (splitPathList p)}) addFrameworkPath p = upd (\s -> s{frameworkPaths = frameworkPaths s ++ splitPathList p}) #if !defined(mingw32_HOST_OS) split_marker :: Char split_marker = ':' -- not configurable (ToDo) #endif splitPathList :: String -> [String] splitPathList s = filter notNull (splitUp s) -- empty paths are ignored: there might be a trailing -- ':' in the initial list, for example. Empty paths can -- cause confusion when they are translated into -I options -- for passing to gcc. where #if !defined(mingw32_HOST_OS) splitUp xs = split split_marker xs #else -- Windows: 'hybrid' support for DOS-style paths in directory lists. -- -- That is, if "foo:bar:baz" is used, this interpreted as -- consisting of three entries, 'foo', 'bar', 'baz'. -- However, with "c:/foo:c:\\foo;x:/bar", this is interpreted -- as 3 elts, "c:/foo", "c:\\foo", "x:/bar" -- -- Notice that no attempt is made to fully replace the 'standard' -- split marker ':' with the Windows / DOS one, ';'. The reason being -- that this will cause too much breakage for users & ':' will -- work fine even with DOS paths, if you're not insisting on being silly. -- So, use either. splitUp [] = [] splitUp (x:':':div:xs) | div `elem` dir_markers = ((x:':':div:p): splitUp rs) where (p,rs) = findNextPath xs -- we used to check for existence of the path here, but that -- required the IO monad to be threaded through the command-line -- parser which is quite inconvenient. The splitUp xs = cons p (splitUp rs) where (p,rs) = findNextPath xs cons "" xs = xs cons x xs = x:xs -- will be called either when we've consumed nought or the -- ":/" part of a DOS path, so splitting is just a Q of -- finding the next split marker. findNextPath xs = case break (`elem` split_markers) xs of (p, _:ds) -> (p, ds) (p, xs) -> (p, xs) split_markers :: [Char] split_markers = [':', ';'] dir_markers :: [Char] dir_markers = ['/', '\\'] #endif -- ----------------------------------------------------------------------------- -- tmpDir, where we store temporary files. setTmpDir :: FilePath -> DynFlags -> DynFlags setTmpDir dir d = d { tmpDir = TempDir (normalise dir) } -- we used to fix /cygdrive/c/.. on Windows, but this doesn't -- seem necessary now --SDM 7/2/2008 ----------------------------------------------------------------------------- -- RTS opts setRtsOpts :: String -> DynP () setRtsOpts arg = upd $ \ d -> d {rtsOpts = Just arg} setRtsOptsEnabled :: RtsOptsEnabled -> DynP () setRtsOptsEnabled arg = upd $ \ d -> d {rtsOptsEnabled = arg} ----------------------------------------------------------------------------- -- Hpc stuff setOptHpcDir :: String -> DynP () setOptHpcDir arg = upd $ \ d -> d {hpcDir = arg} ----------------------------------------------------------------------------- -- Via-C compilation stuff -- There are some options that we need to pass to gcc when compiling -- Haskell code via C, but are only supported by recent versions of -- gcc. The configure script decides which of these options we need, -- and puts them in the "settings" file in $topdir. The advantage of -- having these in a separate file is that the file can be created at -- install-time depending on the available gcc version, and even -- re-generated later if gcc is upgraded. -- -- The options below are not dependent on the version of gcc, only the -- platform. picCCOpts :: DynFlags -> [String] picCCOpts dflags = case platformOS (targetPlatform dflags) of OSDarwin -- Apple prefers to do things the other way round. -- PIC is on by default. -- -mdynamic-no-pic: -- Turn off PIC code generation. -- -fno-common: -- Don't generate "common" symbols - these are unwanted -- in dynamic libraries. | gopt Opt_PIC dflags -> ["-fno-common", "-U__PIC__", "-D__PIC__"] | otherwise -> ["-mdynamic-no-pic"] OSMinGW32 -- no -fPIC for Windows | gopt Opt_PIC dflags -> ["-U__PIC__", "-D__PIC__"] | otherwise -> [] _ -- we need -fPIC for C files when we are compiling with -dynamic, -- otherwise things like stub.c files don't get compiled -- correctly. They need to reference data in the Haskell -- objects, but can't without -fPIC. See -- https://gitlab.haskell.org/ghc/ghc/wikis/commentary/position-independent-code | gopt Opt_PIC dflags || ways dflags `hasWay` WayDyn -> ["-fPIC", "-U__PIC__", "-D__PIC__"] ++ -- Clang defaults to -fvisibility=hidden for wasm targets, -- but we need these compile-time flags to generate PIC -- objects that can be properly linked by wasm-ld using -- --export-dynamic; without these flags we would need -- -Wl,--export-all at .so link-time which will export -- internal symbols as well, and that severely pollutes the -- global symbol namespace. (if platformArch (targetPlatform dflags) == ArchWasm32 then [ "-fvisibility=default", "-fvisibility-inlines-hidden" ] else []) -- gcc may be configured to have PIC on by default, let's be -- explicit here, see #15847 | otherwise -> ["-fno-PIC"] pieCCLDOpts :: DynFlags -> [String] pieCCLDOpts dflags | gopt Opt_PICExecutable dflags = ["-pie"] -- See Note [No PIE when linking] | toolSettings_ccSupportsNoPie (toolSettings dflags) = ["-no-pie"] | otherwise = [] {- Note [No PIE when linking] ~~~~~~~~~~~~~~~~~~~~~~~~~~ As of 2016 some Linux distributions (e.g. Debian) have started enabling -pie by default in their gcc builds. This is incompatible with -r as it implies that we are producing an executable. Consequently, we must manually pass -no-pie to gcc when joining object files or linking dynamic libraries. Unless, of course, the user has explicitly requested a PIE executable with -pie. See #12759. -} picPOpts :: DynFlags -> [String] picPOpts dflags | gopt Opt_PIC dflags = ["-U__PIC__", "-D__PIC__"] | otherwise = [] -- ----------------------------------------------------------------------------- -- Compiler Info compilerInfo :: DynFlags -> [(String, String)] compilerInfo dflags = -- We always make "Project name" be first to keep parsing in -- other languages simple, i.e. when looking for other fields, -- you don't have to worry whether there is a leading '[' or not ("Project name", cProjectName) -- Next come the settings, so anything else can be overridden -- in the settings file (as "lookup" uses the first match for the -- key) : map (fmap $ expandDirectories (topDir dflags) (toolDir dflags)) (rawSettings dflags) ++ [("Project version", projectVersion dflags), ("Project Git commit id", cProjectGitCommitId), ("Project Version Int", cProjectVersionInt), ("Project Patch Level", cProjectPatchLevel), ("Project Patch Level1", cProjectPatchLevel1), ("Project Patch Level2", cProjectPatchLevel2), ("Project Unit Id", cProjectUnitId), ("Booter version", cBooterVersion), ("Stage", cStage), ("Build platform", cBuildPlatformString), ("Host platform", cHostPlatformString), ("Target platform", platformMisc_targetPlatformString $ platformMisc dflags), ("Have interpreter", showBool $ platformMisc_ghcWithInterpreter $ platformMisc dflags), ("Object splitting supported", showBool False), ("Have native code generator", showBool $ platformNcgSupported platform), ("target has RTS linker", showBool $ platformHasRTSLinker platform), ("Target default backend", show $ platformDefaultBackend platform), -- Whether or not we support @-dynamic-too@ ("Support dynamic-too", showBool $ not isWindows), -- Whether or not we support the @-j@ flag with @--make@. ("Support parallel --make", "YES"), -- Whether or not we support "Foo from foo-0.1-XXX:Foo" syntax in -- installed package info. ("Support reexported-modules", "YES"), -- Whether or not we support extended @-package foo (Foo)@ syntax. ("Support thinning and renaming package flags", "YES"), -- Whether or not we support Backpack. ("Support Backpack", "YES"), -- If true, we require that the 'id' field in installed package info -- match what is passed to the @-this-unit-id@ flag for modules -- built in it ("Requires unified installed package IDs", "YES"), -- Whether or not we support the @-this-package-key@ flag. Prefer -- "Uses unit IDs" over it. We still say yes even if @-this-package-key@ -- flag has been removed, otherwise it breaks Cabal... ("Uses package keys", "YES"), -- Whether or not we support the @-this-unit-id@ flag ("Uses unit IDs", "YES"), -- Whether or not GHC was compiled using -dynamic ("GHC Dynamic", showBool hostIsDynamic), -- Whether or not GHC was compiled using -prof ("GHC Profiled", showBool hostIsProfiled), ("Debug on", showBool debugIsOn), ("LibDir", topDir dflags), -- This is always an absolute path, unlike "Relative Global Package DB" which is -- in the settings file. ("Global Package DB", globalPackageDatabasePath dflags) ] where showBool True = "YES" showBool False = "NO" platform = targetPlatform dflags isWindows = platformOS platform == OSMinGW32 useInplaceMinGW = toolSettings_useInplaceMinGW $ toolSettings dflags expandDirectories :: FilePath -> Maybe FilePath -> String -> String expandDirectories topd mtoold = expandToolDir useInplaceMinGW mtoold . expandTopDir topd {- ----------------------------------------------------------------------------- Note [DynFlags consistency] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ There are a number of number of DynFlags configurations which either do not make sense or lead to unimplemented or buggy codepaths in the compiler. makeDynFlagsConsistent is responsible for verifying the validity of a set of DynFlags, fixing any issues, and reporting them back to the caller. GHCi and -O --------------- When using optimization, the compiler can introduce several things (such as unboxed tuples) into the intermediate code, which GHCi later chokes on since the bytecode interpreter can't handle this (and while this is arguably a bug these aren't handled, there are no plans to fix it.) While the driver pipeline always checks for this particular erroneous combination when parsing flags, we also need to check when we update the flags; this is because API clients may parse flags but update the DynFlags afterwords, before finally running code inside a session (see T10052 and #10052). -} -- | Resolve any internal inconsistencies in a set of 'DynFlags'. -- Returns the consistent 'DynFlags' as well as a list of warnings -- to report to the user. makeDynFlagsConsistent :: DynFlags -> (DynFlags, [Warn]) -- Whenever makeDynFlagsConsistent does anything, it starts over, to -- ensure that a later change doesn't invalidate an earlier check. -- Be careful not to introduce potential loops! makeDynFlagsConsistent dflags -- Disable -dynamic-too on Windows (#8228, #7134, #5987) | os == OSMinGW32 && gopt Opt_BuildDynamicToo dflags = let dflags' = gopt_unset dflags Opt_BuildDynamicToo warn = "-dynamic-too is not supported on Windows" in loop dflags' warn -- Disable -dynamic-too if we are are compiling with -dynamic already, otherwise -- you get two dynamic object files (.o and .dyn_o). (#20436) | ways dflags `hasWay` WayDyn && gopt Opt_BuildDynamicToo dflags = let dflags' = gopt_unset dflags Opt_BuildDynamicToo warn = "-dynamic-too is ignored when using -dynamic" in loop dflags' warn | gopt Opt_SplitSections dflags , platformHasSubsectionsViaSymbols (targetPlatform dflags) = let dflags' = gopt_unset dflags Opt_SplitSections warn = "-fsplit-sections is not useful on this platform " ++ "since it uses subsections-via-symbols. Ignoring." in loop dflags' warn -- Via-C backend only supports unregisterised ABI. Switch to a backend -- supporting it if possible. | backendUnregisterisedAbiOnly (backend dflags) && not (platformUnregisterised (targetPlatform dflags)) = let b = platformDefaultBackend (targetPlatform dflags) in if backendSwappableWithViaC b then let dflags' = dflags { backend = b } warn = "Target platform doesn't use unregisterised ABI, so using " ++ backendDescription b ++ " rather than " ++ backendDescription (backend dflags) in loop dflags' warn else pgmError (backendDescription (backend dflags) ++ " supports only unregisterised ABI but target platform doesn't use it.") | gopt Opt_Hpc dflags && not (backendSupportsHpc (backend dflags)) = let dflags' = gopt_unset dflags Opt_Hpc warn = "Hpc can't be used with " ++ backendDescription (backend dflags) ++ ". Ignoring -fhpc." in loop dflags' warn | backendSwappableWithViaC (backend dflags) && platformUnregisterised (targetPlatform dflags) = loop (dflags { backend = viaCBackend }) "Target platform uses unregisterised ABI, so compiling via C" | backendNeedsPlatformNcgSupport (backend dflags) && not (platformNcgSupported $ targetPlatform dflags) = let dflags' = dflags { backend = llvmBackend } warn = "Native code generator doesn't support target platform, so using LLVM" in loop dflags' warn | not (osElfTarget os) && gopt Opt_PIE dflags = loop (gopt_unset dflags Opt_PIE) "Position-independent only supported on ELF platforms" | os == OSDarwin && arch == ArchX86_64 && not (gopt Opt_PIC dflags) = loop (gopt_set dflags Opt_PIC) "Enabling -fPIC as it is always on for this platform" | backendForcesOptimization0 (backend dflags) , gopt Opt_UnoptimizedCoreForInterpreter dflags , let (dflags', changed) = updOptLevelChanged 0 dflags , changed = loop dflags' $ "Ignoring optimization flags since they are experimental for the " ++ backendDescription (backend dflags) ++ ". Pass -fno-unoptimized-core-for-interpreter to enable this feature." | LinkInMemory <- ghcLink dflags , not (gopt Opt_ExternalInterpreter dflags) , hostIsProfiled , backendWritesFiles (backend dflags) , ways dflags `hasNotWay` WayProf = loop dflags{targetWays_ = addWay WayProf (targetWays_ dflags)} "Enabling -prof, because -fobject-code is enabled and GHCi is profiled" | gopt Opt_ByteCode dflags || gopt Opt_ByteCodeAndObjectCode dflags , not (gopt Opt_ExternalInterpreter dflags) , hostIsProfiled , ways dflags `hasNotWay` WayProf = loop (gopt_set dflags Opt_ExternalInterpreter) "Enabling external interpreter, because GHC is profiled and bytecode is being used for TH" | LinkMergedObj <- ghcLink dflags , Nothing <- outputFile dflags = pgmError "--output must be specified when using --merge-objs" | otherwise = (dflags, mempty) where loc = mkGeneralSrcSpan (fsLit "when making flags consistent") loop updated_dflags warning = case makeDynFlagsConsistent updated_dflags of (dflags', ws) -> (dflags', L loc (DriverInconsistentDynFlags warning) : ws) platform = targetPlatform dflags arch = platformArch platform os = platformOS platform setUnsafeGlobalDynFlags :: DynFlags -> IO () setUnsafeGlobalDynFlags dflags = do writeIORef v_unsafeHasPprDebug (hasPprDebug dflags) writeIORef v_unsafeHasNoDebugOutput (hasNoDebugOutput dflags) writeIORef v_unsafeHasNoStateHack (hasNoStateHack dflags) -- ----------------------------------------------------------------------------- -- | Indicate if cost-centre profiling is enabled sccProfilingEnabled :: DynFlags -> Bool sccProfilingEnabled dflags = profileIsProfiling (targetProfile dflags) -- | Indicate whether we need to generate source notes needSourceNotes :: DynFlags -> Bool needSourceNotes dflags = debugLevel dflags > 0 || gopt Opt_InfoTableMap dflags -- Source ticks are used to approximate the location of -- overloaded call cost centers || gopt Opt_ProfLateoverloadedCallsCCs dflags -- ----------------------------------------------------------------------------- -- Linker/compiler information -- | Should we use `-XLinker -rpath` when linking or not? -- See Note [-fno-use-rpaths] useXLinkerRPath :: DynFlags -> OS -> Bool -- wasm shared libs don't have RPATH at all and wasm-ld doesn't accept -- any RPATH-related flags useXLinkerRPath dflags _ | ArchWasm32 <- platformArch $ targetPlatform dflags = False useXLinkerRPath _ OSDarwin = False -- See Note [Dynamic linking on macOS] useXLinkerRPath dflags _ = gopt Opt_RPath dflags {- Note [-fno-use-rpaths] ~~~~~~~~~~~~~~~~~~~~~~ First read, Note [Dynamic linking on macOS] to understand why on darwin we never use `-XLinker -rpath`. The specification of `Opt_RPath` is as follows: The default case `-fuse-rpaths`: * On darwin, never use `-Xlinker -rpath -Xlinker`, always inject the rpath afterwards, see `runInjectRPaths`. There is no way to use `-Xlinker` on darwin as things stand but it wasn't documented in the user guide before this patch how `-fuse-rpaths` should behave and the fact it was always disabled on darwin. * Otherwise, use `-Xlinker -rpath -Xlinker` to set the rpath of the executable, this is the normal way you should set the rpath. The case of `-fno-use-rpaths` * Never inject anything into the rpath. When this was first implemented, `Opt_RPath` was disabled on darwin, but the rpath was still always augmented by `runInjectRPaths`, and there was no way to stop this. This was problematic because you couldn't build an executable in CI with a clean rpath. -} -- ----------------------------------------------------------------------------- -- RTS hooks -- Convert sizes like "3.5M" into integers decodeSize :: String -> Integer decodeSize str | c == "" = truncate n | c == "K" || c == "k" = truncate (n * 1000) | c == "M" || c == "m" = truncate (n * 1000 * 1000) | c == "G" || c == "g" = truncate (n * 1000 * 1000 * 1000) | otherwise = throwGhcException (CmdLineError ("can't decode size: " ++ str)) where (m, c) = span pred str n = readRational m pred c = isDigit c || c == '.' foreign import ccall unsafe "ghc_lib_parser_setHeapSize" setHeapSize :: Int -> IO () foreign import ccall unsafe "ghc_lib_parser_enableTimingStats" enableTimingStats :: IO () outputFile :: DynFlags -> Maybe String outputFile dflags | dynamicNow dflags = dynOutputFile_ dflags | otherwise = outputFile_ dflags objectSuf :: DynFlags -> String objectSuf dflags | dynamicNow dflags = dynObjectSuf_ dflags | otherwise = objectSuf_ dflags -- | Pretty-print the difference between 2 DynFlags. -- -- For now only their general flags but it could be extended. -- Useful mostly for debugging. pprDynFlagsDiff :: DynFlags -> DynFlags -> SDoc pprDynFlagsDiff d1 d2 = let gf_removed = EnumSet.difference (generalFlags d1) (generalFlags d2) gf_added = EnumSet.difference (generalFlags d2) (generalFlags d1) ext_removed = EnumSet.difference (extensionFlags d1) (extensionFlags d2) ext_added = EnumSet.difference (extensionFlags d2) (extensionFlags d1) in vcat [ text "Added general flags:" , text $ show $ EnumSet.toList $ gf_added , text "Removed general flags:" , text $ show $ EnumSet.toList $ gf_removed , text "Added extension flags:" , text $ show $ EnumSet.toList $ ext_added , text "Removed extension flags:" , text $ show $ EnumSet.toList $ ext_removed ] updatePlatformConstants :: DynFlags -> Maybe PlatformConstants -> IO DynFlags updatePlatformConstants dflags mconstants = do let platform1 = (targetPlatform dflags) { platform_constants = mconstants } let dflags1 = dflags { targetPlatform = platform1 } return dflags1 ghc-lib-parser-9.12.2.20250421/compiler/GHC/Hs.hs0000644000000000000000000001162707346545000016623 0ustar0000000000000000{-# OPTIONS_GHC -O0 #-} {- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 \section{Haskell abstract syntax definition} This module glues together the pieces of the Haskell abstract syntax, which is declared in the various \tr{Hs*} modules. This module, therefore, is almost nothing but re-exporting. -} {-# OPTIONS_GHC -Wno-orphans #-} -- Outputable {-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE StandaloneDeriving #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE UndecidableInstances #-} -- Wrinkle in Note [Trees That Grow] -- in module Language.Haskell.Syntax.Extension {-# LANGUAGE ConstraintKinds #-} {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE FlexibleInstances #-} -- For deriving instance Data {-# LANGUAGE DataKinds #-} module GHC.Hs ( module Language.Haskell.Syntax, module GHC.Hs.Binds, module GHC.Hs.Decls, module GHC.Hs.Expr, module GHC.Hs.ImpExp, module GHC.Hs.Lit, module GHC.Hs.Pat, module GHC.Hs.Type, module GHC.Hs.Utils, module GHC.Hs.Doc, module GHC.Hs.Extension, module GHC.Parser.Annotation, Fixity, HsModule(..), AnnsModule(..), HsParsedModule(..), XModulePs(..) ) where -- friends: import GHC.Prelude import GHC.Hs.Decls import GHC.Hs.Binds import GHC.Hs.Expr import GHC.Hs.ImpExp import GHC.Hs.Lit import Language.Haskell.Syntax import GHC.Hs.Extension import GHC.Parser.Annotation import GHC.Hs.Pat import GHC.Hs.Type import GHC.Hs.Utils import GHC.Hs.Doc import GHC.Hs.Instances () -- For Data instances -- others: import GHC.Utils.Outputable import GHC.Types.Fixity ( Fixity ) import GHC.Types.SrcLoc import GHC.Unit.Module.Warnings -- libraries: import Data.Data hiding ( Fixity ) -- | Haskell Module extension point: GHC specific data XModulePs = XModulePs { hsmodAnn :: EpAnn AnnsModule, hsmodLayout :: EpLayout, -- ^ Layout info for the module. -- For incomplete modules (e.g. the output of parseHeader), it is EpNoLayout. hsmodDeprecMessage :: Maybe (LWarningTxt GhcPs), -- ^ reason\/explanation for warning/deprecation of this module hsmodHaddockModHeader :: Maybe (LHsDoc GhcPs) -- ^ Haddock module info and description, unparsed } deriving Data type instance XCModule GhcPs = XModulePs type instance XCModule GhcRn = DataConCantHappen type instance XCModule GhcTc = DataConCantHappen type instance XXModule p = DataConCantHappen deriving instance Data (HsModule GhcPs) data AnnsModule = AnnsModule { am_sig :: EpToken "signature", am_mod :: EpToken "module", am_where :: EpToken "where", am_decls :: [TrailingAnn], -- ^ Semis before the start of top decls am_cs :: [LEpaComment], -- ^ Comments before start of top decl, -- used in exact printing only am_eof :: Maybe (RealSrcSpan, RealSrcSpan) -- ^ End of file and end of prior token } deriving (Data, Eq) instance NoAnn AnnsModule where noAnn = AnnsModule NoEpTok NoEpTok NoEpTok [] [] Nothing instance Outputable (HsModule GhcPs) where ppr (HsModule { hsmodExt = XModulePs { hsmodHaddockModHeader = mbDoc } , hsmodName = Nothing , hsmodImports = imports , hsmodDecls = decls }) = pprMaybeWithDoc mbDoc $ pp_nonnull imports $$ pp_nonnull decls ppr (HsModule { hsmodExt = XModulePs { hsmodDeprecMessage = deprec , hsmodHaddockModHeader = mbDoc } , hsmodName = (Just name) , hsmodExports = exports , hsmodImports = imports , hsmodDecls = decls }) = pprMaybeWithDoc mbDoc $ vcat [ case exports of Nothing -> pp_header (text "where") Just es -> vcat [ pp_header lparen, nest 8 (pprWithCommas ppr (unLoc es)), nest 4 (text ") where") ], pp_nonnull imports, pp_nonnull decls ] where pp_header rest = case deprec of Nothing -> pp_modname <+> rest Just d -> vcat [ pp_modname, ppr d, rest ] pp_modname = text "module" <+> ppr name pp_nonnull :: Outputable t => [t] -> SDoc pp_nonnull [] = empty pp_nonnull xs = vcat (map ppr xs) data HsParsedModule = HsParsedModule { hpm_module :: Located (HsModule GhcPs), hpm_src_files :: [FilePath] -- ^ extra source files (e.g. from #includes). The lexer collects -- these from '# ' pragmas, which the C preprocessor -- leaves behind. These files and their timestamps are stored in -- the .hi file, so that we can force recompilation if any of -- them change (#3589) } ghc-lib-parser-9.12.2.20250421/compiler/GHC/Hs/0000755000000000000000000000000007346545000016260 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Hs/Basic.hs0000644000000000000000000000237007346545000017637 0ustar0000000000000000{-# OPTIONS_GHC -Wno-orphans #-} -- Outputable, Binary {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE DeriveDataTypeable #-} -- | Fixity module GHC.Hs.Basic ( module Language.Haskell.Syntax.Basic ) where import GHC.Prelude import GHC.Utils.Outputable import GHC.Utils.Binary import Data.Data () import Language.Haskell.Syntax.Basic instance Outputable LexicalFixity where ppr Prefix = text "Prefix" ppr Infix = text "Infix" instance Outputable FixityDirection where ppr InfixL = text "infixl" ppr InfixR = text "infixr" ppr InfixN = text "infix" instance Outputable Fixity where ppr (Fixity prec dir) = hcat [ppr dir, space, int prec] instance Binary Fixity where put_ bh (Fixity aa ab) = do put_ bh aa put_ bh ab get bh = do aa <- get bh ab <- get bh return (Fixity aa ab) ------------------------ instance Binary FixityDirection where put_ bh InfixL = putByte bh 0 put_ bh InfixR = putByte bh 1 put_ bh InfixN = putByte bh 2 get bh = do h <- getByte bh case h of 0 -> return InfixL 1 -> return InfixR _ -> return InfixN ghc-lib-parser-9.12.2.20250421/compiler/GHC/Hs/Binds.hs0000644000000000000000000011207107346545000017655 0ustar0000000000000000{-# LANGUAGE AllowAmbiguousTypes #-} -- used to pass the phase to ppr_mult_ann since MultAnn is a type family {-# LANGUAGE CPP #-} {-# LANGUAGE ConstraintKinds #-} {-# LANGUAGE DataKinds #-} {-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE TypeApplications #-} {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE UndecidableInstances #-} -- Wrinkle in Note [Trees That Grow] -- in module Language.Haskell.Syntax.Extension {-# OPTIONS_GHC -Wno-orphans #-} -- Outputable {- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 \section[HsBinds]{Abstract syntax: top-level bindings and signatures} Datatype for: @BindGroup@, @Bind@, @Sig@, @Bind@. -} module GHC.Hs.Binds ( module Language.Haskell.Syntax.Binds , module GHC.Hs.Binds ) where import GHC.Prelude import Language.Haskell.Syntax.Extension import Language.Haskell.Syntax.Binds import {-# SOURCE #-} GHC.Hs.Expr ( pprExpr, pprFunBind, pprPatBind ) import {-# SOURCE #-} GHC.Hs.Pat (pprLPat ) import GHC.Types.Tickish import GHC.Hs.Extension import GHC.Parser.Annotation import GHC.Hs.Type import GHC.Tc.Types.Evidence import GHC.Core.Type import GHC.Types.Name.Set import GHC.Types.Basic import GHC.Types.SourceText import GHC.Types.SrcLoc as SrcLoc import GHC.Types.Var import GHC.Data.BooleanFormula (LBooleanFormula) import GHC.Types.Name import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Utils.Misc ((<||>)) import Data.Function import Data.List (sortBy) import Data.Data (Data) {- ************************************************************************ * * \subsection{Bindings: @BindGroup@} * * ************************************************************************ Global bindings (where clauses) -} -- the ...LR datatypes are parameterized by two id types, -- one for the left and one for the right. type instance XHsValBinds (GhcPass pL) (GhcPass pR) = EpAnn (AnnList (EpToken "where")) type instance XHsIPBinds (GhcPass pL) (GhcPass pR) = EpAnn (AnnList (EpToken "where")) type instance XEmptyLocalBinds (GhcPass pL) (GhcPass pR) = NoExtField type instance XXHsLocalBindsLR (GhcPass pL) (GhcPass pR) = DataConCantHappen -- --------------------------------------------------------------------- -- Deal with ValBindsOut -- TODO: make this the only type for ValBinds data NHsValBindsLR idL = NValBinds [(RecFlag, LHsBinds idL)] [LSig GhcRn] type instance XValBinds (GhcPass pL) (GhcPass pR) = AnnSortKey BindTag type instance XXValBindsLR (GhcPass pL) pR = NHsValBindsLR (GhcPass pL) -- --------------------------------------------------------------------- type instance XFunBind (GhcPass pL) GhcPs = NoExtField type instance XFunBind (GhcPass pL) GhcRn = NameSet -- ^ After the renamer (but before the type-checker), the FunBind -- extension field contains the locally-bound free variables of this -- defn. See Note [Bind free vars] type instance XFunBind (GhcPass pL) GhcTc = (HsWrapper, [CoreTickish]) -- ^ After the type-checker, the FunBind extension field contains -- the ticks to put on the rhs, if any, and a coercion from the -- type of the MatchGroup to the type of the Id. -- Example: -- -- @ -- f :: Int -> forall a. a -> a -- f x y = y -- @ -- -- Then the MatchGroup will have type (Int -> a' -> a') -- (with a free type variable a'). The coercion will take -- a CoreExpr of this type and convert it to a CoreExpr of -- type Int -> forall a'. a' -> a' -- Notice that the coercion captures the free a'. type instance XPatBind GhcPs (GhcPass pR) = NoExtField type instance XPatBind GhcRn (GhcPass pR) = NameSet -- See Note [Bind free vars] type instance XPatBind GhcTc (GhcPass pR) = ( Type -- Type of the GRHSs , ( [CoreTickish] -- Ticks to put on the rhs, if any , [[CoreTickish]] ) ) -- and ticks to put on the bound variables. type instance XVarBind (GhcPass pL) (GhcPass pR) = XVarBindGhc pL pR type family XVarBindGhc pL pR where XVarBindGhc 'Typechecked 'Typechecked = NoExtField XVarBindGhc _ _ = DataConCantHappen type instance XPatSynBind (GhcPass pL) (GhcPass pR) = NoExtField type instance XXHsBindsLR GhcPs pR = DataConCantHappen type instance XXHsBindsLR GhcRn pR = DataConCantHappen type instance XXHsBindsLR GhcTc pR = AbsBinds type instance XPSB (GhcPass idL) GhcPs = AnnPSB type instance XPSB (GhcPass idL) GhcRn = NameSet -- Post renaming, FVs. See Note [Bind free vars] type instance XPSB (GhcPass idL) GhcTc = NameSet type instance XXPatSynBind (GhcPass idL) (GhcPass idR) = DataConCantHappen type instance XNoMultAnn GhcPs = NoExtField type instance XNoMultAnn GhcRn = NoExtField type instance XNoMultAnn GhcTc = Mult type instance XPct1Ann GhcPs = EpToken "%1" type instance XPct1Ann GhcRn = NoExtField type instance XPct1Ann GhcTc = Mult type instance XMultAnn GhcPs = EpToken "%" type instance XMultAnn GhcRn = NoExtField type instance XMultAnn GhcTc = Mult type instance XXMultAnn (GhcPass _) = DataConCantHappen data AnnPSB = AnnPSB { ap_pattern :: EpToken "pattern", ap_openc :: Maybe (EpToken "{"), ap_closec :: Maybe (EpToken "}"), ap_larrow :: Maybe (EpUniToken "<-" "←"), ap_equal :: Maybe (EpToken "=") } deriving Data instance NoAnn AnnPSB where noAnn = AnnPSB noAnn noAnn noAnn noAnn noAnn setTcMultAnn :: Mult -> HsMultAnn GhcRn -> HsMultAnn GhcTc setTcMultAnn mult (HsPct1Ann _) = HsPct1Ann mult setTcMultAnn mult (HsMultAnn _ p) = HsMultAnn mult p setTcMultAnn mult (HsNoMultAnn _) = HsNoMultAnn mult getTcMultAnn :: HsMultAnn GhcTc -> Mult getTcMultAnn (HsPct1Ann mult) = mult getTcMultAnn (HsMultAnn mult _) = mult getTcMultAnn (HsNoMultAnn mult) = mult -- --------------------------------------------------------------------- -- | Typechecked, generalised bindings, used in the output to the type checker. -- See Note [AbsBinds]. data AbsBinds = AbsBinds { abs_tvs :: [TyVar], abs_ev_vars :: [EvVar], -- ^ Includes equality constraints -- | AbsBinds only gets used when idL = idR after renaming, -- but these need to be idL's for the collect... code in HsUtil -- to have the right type abs_exports :: [ABExport], -- | Evidence bindings -- Why a list? See "GHC.Tc.TyCl.Instance" -- Note [Typechecking plan for instance declarations] abs_ev_binds :: [TcEvBinds], -- | Typechecked user bindings abs_binds :: LHsBinds GhcTc, abs_sig :: Bool -- See Note [The abs_sig field of AbsBinds] } -- Consider (AbsBinds tvs ds [(ftvs, poly_f, mono_f) binds] -- -- Creates bindings for (polymorphic, overloaded) poly_f -- in terms of monomorphic, non-overloaded mono_f -- -- Invariants: -- 1. 'binds' binds mono_f -- 2. ftvs is a subset of tvs -- 3. ftvs includes all tyvars free in ds -- -- See Note [AbsBinds] -- | Abstraction Bindings Export data ABExport = ABE { abe_poly :: Id -- ^ Any INLINE pragma is attached to this Id , abe_mono :: Id , abe_wrap :: HsWrapper -- ^ See Note [ABExport wrapper] -- Shape: (forall abs_tvs. abs_ev_vars => abe_mono) ~ abe_poly , abe_prags :: TcSpecPrags -- ^ SPECIALISE pragmas } {- Note [AbsBinds] ~~~~~~~~~~~~~~~ The AbsBinds constructor is used in the output of the type checker, to record *typechecked* and *generalised* bindings. Specifically AbsBinds { abs_tvs = tvs , abs_ev_vars = [d1,d2] , abs_exports = [ABE { abe_poly = fp, abe_mono = fm , abe_wrap = fwrap } ABE { slly for g } ] , abs_ev_binds = DBINDS , abs_binds = BIND[fm,gm] } where 'BIND' binds the monomorphic Ids 'fm' and 'gm', means fp = fwrap [/\ tvs. \d1 d2. letrec { DBINDS ] [ ; BIND[fm,gm] } ] [ in fm ] gp = ...same again, with gm instead of fm The 'fwrap' is an impedance-matcher that typically does nothing; see Note [ABExport wrapper]. This is a pretty bad translation, because it duplicates all the bindings. So the desugarer tries to do a better job: fp = /\ [a,b] -> \ [d1,d2] -> case tp [a,b] [d1,d2] of (fm,gm) -> fm ..ditto for gp.. tp = /\ [a,b] -> \ [d1,d2] -> letrec { DBINDS; BIND } in (fm,gm) In general: * abs_tvs are the type variables over which the binding group is generalised * abs_ev_var are the evidence variables (usually dictionaries) over which the binding group is generalised * abs_binds are the monomorphic bindings * abs_ex_binds are the evidence bindings that wrap the abs_binds * abs_exports connects the monomorphic Ids bound by abs_binds with the polymorphic Ids bound by the AbsBinds itself. For example, consider a module M, with this top-level binding, where there is no type signature for M.reverse, M.reverse [] = [] M.reverse (x:xs) = M.reverse xs ++ [x] In Hindley-Milner, a recursive binding is typechecked with the *recursive* uses being *monomorphic*. So after typechecking *and* desugaring we will get something like this M.reverse :: forall a. [a] -> [a] = /\a. letrec reverse :: [a] -> [a] = \xs -> case xs of [] -> [] (x:xs) -> reverse xs ++ [x] in reverse Notice that 'M.reverse' is polymorphic as expected, but there is a local definition for plain 'reverse' which is *monomorphic*. The type variable 'a' scopes over the entire letrec. That's after desugaring. What about after type checking but before desugaring? That's where AbsBinds comes in. It looks like this: AbsBinds { abs_tvs = [a] , abs_ev_vars = [] , abs_exports = [ABE { abe_poly = M.reverse :: forall a. [a] -> [a], , abe_mono = reverse :: [a] -> [a]}] , abs_ev_binds = {} , abs_binds = { reverse :: [a] -> [a] = \xs -> case xs of [] -> [] (x:xs) -> reverse xs ++ [x] } } Here, * abs_tvs says what type variables are abstracted over the binding group, just 'a' in this case. * abs_binds is the *monomorphic* bindings of the group * abs_exports describes how to get the polymorphic Id 'M.reverse' from the monomorphic one 'reverse' Notice that the *original* function (the polymorphic one you thought you were defining) appears in the abe_poly field of the abs_exports. The bindings in abs_binds are for fresh, local, Ids with a *monomorphic* Id. If there is a group of mutually recursive (see Note [Polymorphic recursion]) functions without type signatures, we get one AbsBinds with the monomorphic versions of the bindings in abs_binds, and one element of abe_exports for each variable bound in the mutually recursive group. This is true even for pattern bindings. Example: (f,g) = (\x -> x, f) After type checking we get AbsBinds { abs_tvs = [a] , abs_exports = [ ABE { abe_poly = M.f :: forall a. a -> a , abe_mono = f :: a -> a } , ABE { abe_poly = M.g :: forall a. a -> a , abe_mono = g :: a -> a }] , abs_binds = { (f,g) = (\x -> x, f) } Note [Polymorphic recursion] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider Rec { f x = ...(g ef)... ; g :: forall a. [a] -> [a] ; g y = ...(f eg)... } These bindings /are/ mutually recursive (f calls g, and g calls f). But we can use the type signature for g to break the recursion, like this: 1. Add g :: forall a. [a] -> [a] to the type environment 2. Typecheck the definition of f, all by itself, including generalising it to find its most general type, say f :: forall b. b -> b -> [b] 3. Extend the type environment with that type for f 4. Typecheck the definition of g, all by itself, checking that it has the type claimed by its signature Steps 2 and 4 each generate a separate AbsBinds, so we end up with Rec { AbsBinds { ...for f ... } ; AbsBinds { ...for g ... } } This approach allows both f and to call each other polymorphically, even though only g has a signature. We get an AbsBinds that encompasses multiple source-program bindings only when * Each binding in the group has at least one binder that lacks a user type signature * The group forms a strongly connected component Note [The abs_sig field of AbsBinds] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The abs_sig field supports a couple of special cases for bindings. Consider x :: Num a => (# a, a #) x = (# 3, 4 #) The general desugaring for AbsBinds would give x = /\a. \ ($dNum :: Num a) -> letrec xm = (# fromInteger $dNum 3, fromInteger $dNum 4 #) in xm But that has an illegal let-binding for an unboxed tuple. In this case we'd prefer to generate the (more direct) x = /\ a. \ ($dNum :: Num a) -> (# fromInteger $dNum 3, fromInteger $dNum 4 #) A similar thing happens with representation-polymorphic defns (#11405): undef :: forall (r :: RuntimeRep) (a :: TYPE r). HasCallStack => a undef = error "undef" Again, the vanilla desugaring gives a local let-binding for a representation-polymorphic (undefm :: a), which is illegal. But again we can desugar without a let: undef = /\ a. \ (d:HasCallStack) -> error a d "undef" The abs_sig field supports this direct desugaring, with no local let-binding. When abs_sig = True * the abs_binds is single FunBind * the abs_exports is a singleton * we have a complete type sig for binder and hence the abs_binds is non-recursive (it binds the mono_id but refers to the poly_id These properties are exploited in GHC.HsToCore.Binds.dsAbsBinds to generate code without a let-binding. Note [ABExport wrapper] ~~~~~~~~~~~~~~~~~~~~~~~ Consider (f,g) = (\x.x, \y.y) This ultimately desugars to something like this: tup :: forall a b. (a->a, b->b) tup = /\a b. (\x:a.x, \y:b.y) f :: forall a. a -> a f = /\a. case tup a Any of (fm::a->a,gm:Any->Any) -> fm ...similarly for g... The abe_wrap field deals with impedance-matching between (/\a b. case tup a b of { (f,g) -> f }) and the thing we really want, which may have fewer type variables. The action happens in GHC.Tc.Gen.Bind.mkExport. Note [Bind free vars] ~~~~~~~~~~~~~~~~~~~~~ The extension fields of FunBind, PatBind and PatSynBind at GhcRn records the free variables of the definition. It is used for the following purposes: a) Dependency analysis prior to type checking (see GHC.Tc.Gen.Bind.tc_group) b) Deciding whether we can do generalisation of the binding (see GHC.Tc.Gen.Bind.decideGeneralisationPlan) c) Deciding whether the binding can be used in static forms (see GHC.Tc.Gen.Expr.checkClosedInStaticForm for the HsStatic case and GHC.Tc.Gen.Bind.isClosedBndrGroup). Specifically, * it includes all free vars that are defined in this module (including top-level things and lexically scoped type variables) * it excludes imported vars; this is just to keep the set smaller * Before renaming, and after typechecking, the field is unused; it's just an error thunk -} instance (OutputableBndrId pl, OutputableBndrId pr) => Outputable (HsLocalBindsLR (GhcPass pl) (GhcPass pr)) where ppr (HsValBinds _ bs) = ppr bs ppr (HsIPBinds _ bs) = ppr bs ppr (EmptyLocalBinds _) = empty instance (OutputableBndrId pl, OutputableBndrId pr) => Outputable (HsValBindsLR (GhcPass pl) (GhcPass pr)) where ppr (ValBinds _ binds sigs) = pprDeclList (pprLHsBindsForUser binds sigs) ppr (XValBindsLR (NValBinds sccs sigs)) = getPprDebug $ \case -- Print with sccs showing True -> vcat (map ppr sigs) $$ vcat (map ppr_scc sccs) False -> pprDeclList (pprLHsBindsForUser (concat (map snd sccs)) sigs) where ppr_scc (rec_flag, binds) = pp_rec rec_flag <+> pprLHsBinds binds pp_rec Recursive = text "rec" pp_rec NonRecursive = text "nonrec" pprLHsBinds :: (OutputableBndrId idL, OutputableBndrId idR) => LHsBindsLR (GhcPass idL) (GhcPass idR) -> SDoc pprLHsBinds binds | isEmptyLHsBinds binds = empty | otherwise = pprDeclList (map ppr binds) pprLHsBindsForUser :: (OutputableBndrId idL, OutputableBndrId idR, OutputableBndrId id2) => LHsBindsLR (GhcPass idL) (GhcPass idR) -> [LSig (GhcPass id2)] -> [SDoc] -- pprLHsBindsForUser is different to pprLHsBinds because -- a) No braces: 'let' and 'where' include a list of HsBindGroups -- and we don't want several groups of bindings each -- with braces around -- b) Sort by location before printing -- c) Include signatures pprLHsBindsForUser binds sigs = map snd (sort_by_loc decls) where decls :: [(SrcSpan, SDoc)] decls = [(locA loc, ppr sig) | L loc sig <- sigs] ++ [(locA loc, ppr bind) | L loc bind <- binds] sort_by_loc decls = sortBy (SrcLoc.leftmost_smallest `on` fst) decls pprDeclList :: [SDoc] -> SDoc -- Braces with a space -- Print a bunch of declarations -- One could choose { d1; d2; ... }, using 'sep' -- or d1 -- d2 -- .. -- using vcat -- At the moment we chose the latter -- Also we do the 'pprDeeperList' thing. pprDeclList ds = pprDeeperList vcat ds ------------ emptyLocalBinds :: HsLocalBindsLR (GhcPass a) (GhcPass b) emptyLocalBinds = EmptyLocalBinds noExtField eqEmptyLocalBinds :: HsLocalBindsLR a b -> Bool eqEmptyLocalBinds (EmptyLocalBinds _) = True eqEmptyLocalBinds _ = False isEmptyValBinds :: HsValBindsLR (GhcPass a) (GhcPass b) -> Bool isEmptyValBinds (ValBinds _ ds sigs) = isEmptyLHsBinds ds && null sigs isEmptyValBinds (XValBindsLR (NValBinds ds sigs)) = null ds && null sigs emptyValBindsIn, emptyValBindsOut :: HsValBindsLR (GhcPass a) (GhcPass b) emptyValBindsIn = ValBinds NoAnnSortKey [] [] emptyValBindsOut = XValBindsLR (NValBinds [] []) emptyLHsBinds :: LHsBindsLR (GhcPass idL) idR emptyLHsBinds = [] isEmptyLHsBinds :: LHsBindsLR (GhcPass idL) idR -> Bool isEmptyLHsBinds = null ------------ plusHsValBinds :: HsValBinds (GhcPass a) -> HsValBinds (GhcPass a) -> HsValBinds(GhcPass a) plusHsValBinds (ValBinds _ ds1 sigs1) (ValBinds _ ds2 sigs2) = ValBinds NoAnnSortKey (ds1 ++ ds2) (sigs1 ++ sigs2) plusHsValBinds (XValBindsLR (NValBinds ds1 sigs1)) (XValBindsLR (NValBinds ds2 sigs2)) = XValBindsLR (NValBinds (ds1 ++ ds2) (sigs1 ++ sigs2)) plusHsValBinds _ _ = panic "HsBinds.plusHsValBinds" -- Used to print, for instance, let bindings: -- let %1 x = … pprHsMultAnn :: forall id. OutputableBndrId id => HsMultAnn (GhcPass id) -> SDoc pprHsMultAnn (HsNoMultAnn _) = empty pprHsMultAnn (HsPct1Ann _) = text "%1" pprHsMultAnn (HsMultAnn _ p) = text "%" <> ppr p instance (OutputableBndrId pl, OutputableBndrId pr) => Outputable (HsBindLR (GhcPass pl) (GhcPass pr)) where ppr mbind = ppr_monobind mbind ppr_monobind :: forall idL idR. (OutputableBndrId idL, OutputableBndrId idR) => HsBindLR (GhcPass idL) (GhcPass idR) -> SDoc ppr_monobind (PatBind { pat_lhs = pat, pat_mult = mult_ann, pat_rhs = grhss }) = pprHsMultAnn @idL mult_ann <+> pprPatBind pat grhss ppr_monobind (VarBind { var_id = var, var_rhs = rhs }) = sep [pprBndr CasePatBind var, nest 2 $ equals <+> pprExpr (unLoc rhs)] ppr_monobind (FunBind { fun_id = fun, fun_matches = matches, fun_ext = ext }) = pprTicks empty ticksDoc $$ whenPprDebug (pprBndr LetBind (unLoc fun)) $$ pprFunBind matches $$ whenPprDebug (pprIfTc @idR $ wrapDoc) where ticksDoc :: SDoc ticksDoc = case ghcPass @idR of GhcPs -> empty GhcRn -> empty GhcTc | (_, ticks) <- ext -> if null ticks then empty else text "-- ticks = " <> ppr ticks wrapDoc :: SDoc wrapDoc = case ghcPass @idR of GhcPs -> empty GhcRn -> empty GhcTc | (wrap, _) <- ext -> ppr wrap ppr_monobind (PatSynBind _ psb) = ppr psb ppr_monobind (XHsBindsLR b) = case ghcPass @idL of GhcTc -> ppr_absbinds b where ppr_absbinds (AbsBinds { abs_tvs = tyvars, abs_ev_vars = dictvars , abs_exports = exports, abs_binds = val_binds , abs_ev_binds = ev_binds }) = sdocOption sdocPrintTypecheckerElaboration $ \case False -> pprLHsBinds val_binds True -> -- Show extra information (bug number: #10662) hang (text "AbsBinds" <+> sep [ brackets (interpp'SP tyvars) , brackets (interpp'SP dictvars) ]) 2 $ braces $ vcat [ text "Exports:" <+> brackets (sep (punctuate comma (map ppr exports))) , text "Exported types:" <+> vcat [pprBndr LetBind (abe_poly ex) | ex <- exports] , text "Binds:" <+> pprLHsBinds val_binds , pprIfTc @idR (text "Evidence:" <+> ppr ev_binds) ] instance Outputable ABExport where ppr (ABE { abe_wrap = wrap, abe_poly = gbl, abe_mono = lcl, abe_prags = prags }) = vcat [ sep [ ppr gbl, nest 2 (text "<=" <+> ppr lcl) ] , nest 2 (pprTcSpecPrags prags) , ppr $ nest 2 (text "wrap:" <+> ppr wrap) ] instance (OutputableBndrId l, OutputableBndrId r) => Outputable (PatSynBind (GhcPass l) (GhcPass r)) where ppr (PSB{ psb_id = (L _ psyn), psb_args = details, psb_def = pat, psb_dir = dir }) = ppr_lhs <+> ppr_rhs where ppr_lhs = text "pattern" <+> ppr_details ppr_simple syntax = syntax <+> pprLPat pat ppr_details = case details of InfixCon v1 v2 -> hsep [ppr_v v1, pprInfixOcc psyn, ppr_v v2] where ppr_v v = case ghcPass @r of GhcPs -> ppr v GhcRn -> ppr v GhcTc -> ppr v PrefixCon _ vs -> hsep (pprPrefixOcc psyn : map ppr_v vs) where ppr_v v = case ghcPass @r of GhcPs -> ppr v GhcRn -> ppr v GhcTc -> ppr v RecCon vs -> pprPrefixOcc psyn <> braces (sep (punctuate comma (map ppr_v vs))) where ppr_v v = case ghcPass @r of GhcPs -> ppr v GhcRn -> ppr v GhcTc -> ppr v ppr_rhs = case dir of Unidirectional -> ppr_simple (text "<-") ImplicitBidirectional -> ppr_simple equals ExplicitBidirectional mg -> ppr_simple (text "<-") <+> text "where" $$ (nest 2 $ pprFunBind mg) pprTicks :: SDoc -> SDoc -> SDoc -- Print stuff about ticks only when -dppr-debug is on, to avoid -- them appearing in error messages (from the desugarer); see # 3263 -- Also print ticks in dumpStyle, so that -ddump-hpc actually does -- something useful. pprTicks pp_no_debug pp_when_debug = getPprStyle $ \sty -> getPprDebug $ \debug -> if debug || dumpStyle sty then pp_when_debug else pp_no_debug instance Outputable (XRec pass (IdP pass)) => Outputable (RecordPatSynField pass) where ppr (RecordPatSynField { recordPatSynField = v }) = ppr v {- ************************************************************************ * * Implicit parameter bindings * * ************************************************************************ -} type instance XIPBinds GhcPs = NoExtField type instance XIPBinds GhcRn = NoExtField type instance XIPBinds GhcTc = TcEvBinds -- binds uses of the -- implicit parameters type instance XXHsIPBinds (GhcPass p) = DataConCantHappen isEmptyIPBindsPR :: HsIPBinds (GhcPass p) -> Bool isEmptyIPBindsPR (IPBinds _ is) = null is isEmptyIPBindsTc :: HsIPBinds GhcTc -> Bool isEmptyIPBindsTc (IPBinds ds is) = null is && isEmptyTcEvBinds ds -- EPA annotations in GhcPs, dictionary Id in GhcTc type instance XCIPBind GhcPs = EpToken "=" type instance XCIPBind GhcRn = NoExtField type instance XCIPBind GhcTc = Id type instance XXIPBind (GhcPass p) = DataConCantHappen instance OutputableBndrId p => Outputable (HsIPBinds (GhcPass p)) where ppr (IPBinds ds bs) = pprDeeperList vcat (map ppr bs) $$ whenPprDebug (pprIfTc @p $ ppr ds) instance OutputableBndrId p => Outputable (IPBind (GhcPass p)) where ppr (IPBind x (L _ ip) rhs) = name <+> equals <+> pprExpr (unLoc rhs) where name = case ghcPass @p of GhcPs -> pprBndr LetBind ip GhcRn -> pprBndr LetBind ip GhcTc -> pprBndr LetBind x {- ************************************************************************ * * \subsection{@Sig@: type signatures and value-modifying user pragmas} * * ************************************************************************ -} type instance XTypeSig (GhcPass p) = AnnSig type instance XPatSynSig (GhcPass p) = AnnSig type instance XClassOpSig (GhcPass p) = AnnSig type instance XFixSig (GhcPass p) = ((EpaLocation, Maybe EpaLocation), SourceText) type instance XInlineSig (GhcPass p) = (EpaLocation, EpToken "#-}", ActivationAnn) type instance XSpecSig (GhcPass p) = AnnSpecSig type instance XSpecInstSig (GhcPass p) = ((EpaLocation, EpToken "instance", EpToken "#-}"), SourceText) type instance XMinimalSig (GhcPass p) = ((EpaLocation, EpToken "#-}"), SourceText) type instance XSCCFunSig (GhcPass p) = ((EpaLocation, EpToken "#-}"), SourceText) type instance XCompleteMatchSig (GhcPass p) = ((EpaLocation, Maybe TokDcolon, EpToken "#-}"), SourceText) -- SourceText: See Note [Pragma source text] in "GHC.Types.SourceText" type instance XXSig GhcPs = DataConCantHappen type instance XXSig GhcRn = IdSig type instance XXSig GhcTc = IdSig type instance XFixitySig GhcPs = NamespaceSpecifier type instance XFixitySig GhcRn = NamespaceSpecifier type instance XFixitySig GhcTc = NoExtField type instance XXFixitySig (GhcPass p) = DataConCantHappen data AnnSpecSig = AnnSpecSig { ass_open :: EpaLocation, ass_close :: EpToken "#-}", ass_dcolon :: TokDcolon, ass_act :: ActivationAnn } deriving Data instance NoAnn AnnSpecSig where noAnn = AnnSpecSig noAnn noAnn noAnn noAnn data ActivationAnn = ActivationAnn { aa_openc :: EpToken "[", aa_closec :: EpToken "]", aa_tilde :: Maybe (EpToken "~"), aa_val :: Maybe EpaLocation } deriving (Data, Eq) instance NoAnn ActivationAnn where noAnn = ActivationAnn noAnn noAnn noAnn noAnn -- | Optional namespace specifier for fixity signatures, -- WARNINIG and DEPRECATED pragmas. -- -- Examples: -- -- {-# WARNING in "x-partial" data Head "don't use this pattern synonym" #-} -- -- ↑ DataNamespaceSpecifier -- -- {-# DEPRECATED type D "This type was deprecated" #-} -- -- ↑ TypeNamespaceSpecifier -- -- infixr 6 data $ -- -- ↑ DataNamespaceSpecifier data NamespaceSpecifier = NoNamespaceSpecifier | TypeNamespaceSpecifier (EpToken "type") | DataNamespaceSpecifier (EpToken "data") deriving (Eq, Data) -- | Check if namespace specifiers overlap, i.e. if they are equal or -- if at least one of them doesn't specify a namespace overlappingNamespaceSpecifiers :: NamespaceSpecifier -> NamespaceSpecifier -> Bool overlappingNamespaceSpecifiers NoNamespaceSpecifier _ = True overlappingNamespaceSpecifiers _ NoNamespaceSpecifier = True overlappingNamespaceSpecifiers TypeNamespaceSpecifier{} TypeNamespaceSpecifier{} = True overlappingNamespaceSpecifiers DataNamespaceSpecifier{} DataNamespaceSpecifier{} = True overlappingNamespaceSpecifiers _ _ = False -- | Check if namespace is covered by a namespace specifier: -- * NoNamespaceSpecifier covers both namespaces -- * TypeNamespaceSpecifier covers the type namespace only -- * DataNamespaceSpecifier covers the data namespace only coveredByNamespaceSpecifier :: NamespaceSpecifier -> NameSpace -> Bool coveredByNamespaceSpecifier NoNamespaceSpecifier = const True coveredByNamespaceSpecifier TypeNamespaceSpecifier{} = isTcClsNameSpace <||> isTvNameSpace coveredByNamespaceSpecifier DataNamespaceSpecifier{} = isValNameSpace instance Outputable NamespaceSpecifier where ppr NoNamespaceSpecifier = empty ppr TypeNamespaceSpecifier{} = text "type" ppr DataNamespaceSpecifier{} = text "data" -- | A type signature in generated code, notably the code -- generated for record selectors. We simply record the desired Id -- itself, replete with its name, type and IdDetails. Otherwise it's -- just like a type signature: there should be an accompanying binding newtype IdSig = IdSig { unIdSig :: Id } deriving Data data AnnSig = AnnSig { asDcolon :: EpUniToken "::" "∷", asPattern :: Maybe (EpToken "pattern"), asDefault :: Maybe (EpToken "default") } deriving Data instance NoAnn AnnSig where noAnn = AnnSig noAnn noAnn noAnn -- | Type checker Specialisation Pragmas -- -- 'TcSpecPrags' conveys @SPECIALISE@ pragmas from the type checker to the desugarer data TcSpecPrags = IsDefaultMethod -- ^ Super-specialised: a default method should -- be macro-expanded at every call site | SpecPrags [LTcSpecPrag] deriving Data -- | Located Type checker Specification Pragmas type LTcSpecPrag = Located TcSpecPrag -- | Type checker Specification Pragma data TcSpecPrag = SpecPrag Id HsWrapper InlinePragma -- ^ The Id to be specialised, a wrapper that specialises the -- polymorphic function, and inlining spec for the specialised function deriving Data noSpecPrags :: TcSpecPrags noSpecPrags = SpecPrags [] hasSpecPrags :: TcSpecPrags -> Bool hasSpecPrags (SpecPrags ps) = not (null ps) hasSpecPrags IsDefaultMethod = False isDefaultMethod :: TcSpecPrags -> Bool isDefaultMethod IsDefaultMethod = True isDefaultMethod (SpecPrags {}) = False instance OutputableBndrId p => Outputable (Sig (GhcPass p)) where ppr sig = ppr_sig sig ppr_sig :: forall p. OutputableBndrId p => Sig (GhcPass p) -> SDoc ppr_sig (TypeSig _ vars ty) = pprVarSig (map unLoc vars) (ppr ty) ppr_sig (ClassOpSig _ is_deflt vars ty) | is_deflt = text "default" <+> pprVarSig (map unLoc vars) (ppr ty) | otherwise = pprVarSig (map unLoc vars) (ppr ty) ppr_sig (FixSig _ fix_sig) = ppr fix_sig ppr_sig (SpecSig _ var ty inl@(InlinePragma { inl_inline = spec })) = pragSrcBrackets (inlinePragmaSource inl) pragmaSrc (pprSpec (unLoc var) (interpp'SP ty) inl) where pragmaSrc = case spec of NoUserInlinePrag -> "{-# " ++ extractSpecPragName (inl_src inl) _ -> "{-# " ++ extractSpecPragName (inl_src inl) ++ "_INLINE" ppr_sig (InlineSig _ var inl) = ppr_pfx <+> pprInline inl <+> pprPrefixOcc (unLoc var) <+> text "#-}" where ppr_pfx = case inlinePragmaSource inl of SourceText src -> ftext src NoSourceText -> text "{-#" <+> inlinePragmaName (inl_inline inl) ppr_sig (SpecInstSig (_, src) ty) = pragSrcBrackets src "{-# pragma" (text "instance" <+> ppr ty) ppr_sig (MinimalSig (_, src) bf) = pragSrcBrackets src "{-# MINIMAL" (pprMinimalSig bf) ppr_sig (PatSynSig _ names sig_ty) = text "pattern" <+> pprVarSig (map unLoc names) (ppr sig_ty) ppr_sig (SCCFunSig (_, src) fn mlabel) = pragSrcBrackets src "{-# SCC" (ppr_fn <+> maybe empty ppr mlabel ) where ppr_fn = case ghcPass @p of GhcPs -> ppr fn GhcRn -> ppr fn GhcTc -> ppr fn ppr_sig (CompleteMatchSig (_, src) cs mty) = pragSrcBrackets src "{-# COMPLETE" ((hsep (punctuate comma (map ppr_n cs))) <+> opt_sig) where opt_sig = maybe empty ((\t -> dcolon <+> ppr t) . unLoc) mty ppr_n n = case ghcPass @p of GhcPs -> ppr n GhcRn -> ppr n GhcTc -> ppr n ppr_sig (XSig x) = case ghcPass @p of GhcRn | IdSig id <- x -> pprVarSig [id] (ppr (varType id)) GhcTc | IdSig id <- x -> pprVarSig [id] (ppr (varType id)) hsSigDoc :: forall p. IsPass p => Sig (GhcPass p) -> SDoc hsSigDoc (TypeSig {}) = text "type signature" hsSigDoc (PatSynSig {}) = text "pattern synonym signature" hsSigDoc (ClassOpSig _ is_deflt _ _) | is_deflt = text "default type signature" | otherwise = text "class method signature" hsSigDoc (SpecSig _ _ _ inl) = (inlinePragmaName . inl_inline $ inl) <+> text "pragma" hsSigDoc (InlineSig _ _ prag) = (inlinePragmaName . inl_inline $ prag) <+> text "pragma" -- Using the 'inlinePragmaName' function ensures that the pragma name for any -- one of the INLINE/INLINABLE/NOINLINE pragmas are printed after being extracted -- from the InlineSpec field of the pragma. hsSigDoc (SpecInstSig (_, src) _) = text (extractSpecPragName src) <+> text "instance pragma" hsSigDoc (FixSig {}) = text "fixity declaration" hsSigDoc (MinimalSig {}) = text "MINIMAL pragma" hsSigDoc (SCCFunSig {}) = text "SCC pragma" hsSigDoc (CompleteMatchSig {}) = text "COMPLETE pragma" hsSigDoc (XSig _) = case ghcPass @p of GhcRn -> text "id signature" GhcTc -> text "id signature" -- | Extracts the name for a SPECIALIZE instance pragma. In 'hsSigDoc', the src -- field of 'SpecInstSig' signature contains the SourceText for a SPECIALIZE -- instance pragma of the form: "SourceText {-# SPECIALIZE" -- -- Extraction ensures that all variants of the pragma name (with a 'Z' or an -- 'S') are output exactly as used in the pragma. extractSpecPragName :: SourceText -> String extractSpecPragName srcTxt = case (words $ show srcTxt) of (_:_:pragName:_) -> filter (/= '\"') pragName _ -> pprPanic "hsSigDoc: Misformed SPECIALISE instance pragma:" (ppr srcTxt) instance OutputableBndrId p => Outputable (FixitySig (GhcPass p)) where ppr (FixitySig ns_spec names fixity) = sep [ppr fixity, ppr_ns_spec, pprops] where ppr_ns_spec = case ghcPass @p of GhcPs -> ppr ns_spec GhcRn -> ppr ns_spec GhcTc -> empty pprops = hsep $ punctuate comma (map (pprInfixOcc . unLoc) names) pragBrackets :: SDoc -> SDoc pragBrackets doc = text "{-#" <+> doc <+> text "#-}" -- | Using SourceText in case the pragma was spelled differently or used mixed -- case pragSrcBrackets :: SourceText -> String -> SDoc -> SDoc pragSrcBrackets (SourceText src) _ doc = ftext src <+> doc <+> text "#-}" pragSrcBrackets NoSourceText alt doc = text alt <+> doc <+> text "#-}" pprVarSig :: (OutputableBndr id) => [id] -> SDoc -> SDoc pprVarSig vars pp_ty = sep [pprvars <+> dcolon, nest 2 pp_ty] where pprvars = hsep $ punctuate comma (map pprPrefixOcc vars) pprSpec :: (OutputableBndr id) => id -> SDoc -> InlinePragma -> SDoc pprSpec var pp_ty inl = pp_inl <+> pprVarSig [var] pp_ty where pp_inl | isDefaultInlinePragma inl = empty | otherwise = pprInline inl pprTcSpecPrags :: TcSpecPrags -> SDoc pprTcSpecPrags IsDefaultMethod = text "" pprTcSpecPrags (SpecPrags ps) = vcat (map (ppr . unLoc) ps) instance Outputable TcSpecPrag where ppr (SpecPrag var _ inl) = text (extractSpecPragName $ inl_src inl) <+> pprSpec var (text "") inl pprMinimalSig :: (OutputableBndr name) => LBooleanFormula (GenLocated l name) -> SDoc pprMinimalSig (L _ bf) = ppr (fmap unLoc bf) {- ************************************************************************ * * \subsection{Anno instances} * * ************************************************************************ -} type instance Anno (HsBindLR (GhcPass idL) (GhcPass idR)) = SrcSpanAnnA type instance Anno (IPBind (GhcPass p)) = SrcSpanAnnA type instance Anno (Sig (GhcPass p)) = SrcSpanAnnA type instance Anno (FixitySig (GhcPass p)) = SrcSpanAnnA type instance Anno StringLiteral = EpAnnCO ghc-lib-parser-9.12.2.20250421/compiler/GHC/Hs/Decls.hs0000644000000000000000000016375107346545000017663 0ustar0000000000000000 {-# LANGUAGE ConstraintKinds #-} {-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE TypeApplications #-} {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE DataKinds #-} {-# LANGUAGE UndecidableInstances #-} -- Wrinkle in Note [Trees That Grow] -- in module Language.Haskell.Syntax.Extension {-# OPTIONS_GHC -Wno-orphans #-} -- Outputable {-# LANGUAGE InstanceSigs #-} {- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 -} -- | Abstract syntax of global declarations. -- -- Definitions for: @SynDecl@ and @ConDecl@, @ClassDecl@, -- @InstDecl@, @DefaultDecl@ and @ForeignDecl@. module GHC.Hs.Decls ( -- * Toplevel declarations HsDecl(..), LHsDecl, HsDataDefn(..), HsDeriving, LHsFunDep, HsDerivingClause(..), LHsDerivingClause, DerivClauseTys(..), LDerivClauseTys, NewOrData, newOrDataToFlavour, anyLConIsGadt, StandaloneKindSig(..), LStandaloneKindSig, standaloneKindSigName, -- ** Class or type declarations TyClDecl(..), LTyClDecl, DataDeclRn(..), AnnDataDefn(..), AnnClassDecl(..), AnnSynDecl(..), AnnFamilyDecl(..), AnnClsInstDecl(..), TyClGroup(..), tyClGroupTyClDecls, tyClGroupInstDecls, tyClGroupRoleDecls, tyClGroupKindSigs, isClassDecl, isDataDecl, isSynDecl, tcdName, isFamilyDecl, isTypeFamilyDecl, isDataFamilyDecl, isOpenTypeFamilyInfo, isClosedTypeFamilyInfo, tyFamInstDeclName, tyFamInstDeclLName, countTyClDecls, pprTyClDeclFlavour, tyClDeclLName, tyClDeclTyVars, hsDeclHasCusk, famResultKindSignature, FamilyDecl(..), LFamilyDecl, FunDep(..), ppDataDefnHeader, pp_vanilla_decl_head, -- ** Instance declarations InstDecl(..), LInstDecl, FamilyInfo(..), TyFamInstDecl(..), LTyFamInstDecl, instDeclDataFamInsts, TyFamDefltDecl, LTyFamDefltDecl, DataFamInstDecl(..), LDataFamInstDecl, pprDataFamInstFlavour, pprTyFamInstDecl, pprHsFamInstLHS, FamEqn(..), TyFamInstEqn, LTyFamInstEqn, HsFamEqnPats, LClsInstDecl, ClsInstDecl(..), -- ** Standalone deriving declarations DerivDecl(..), LDerivDecl, AnnDerivDecl, -- ** Deriving strategies DerivStrategy(..), LDerivStrategy, derivStrategyName, foldDerivStrategy, mapDerivStrategy, XViaStrategyPs(..), -- ** @RULE@ declarations LRuleDecls,RuleDecls(..),RuleDecl(..),LRuleDecl,HsRuleRn(..), HsRuleAnn(..), ActivationAnn(..), RuleBndr(..),LRuleBndr, collectRuleBndrSigTys, flattenRuleDecls, pprFullRuleName, -- ** @default@ declarations DefaultDecl(..), LDefaultDecl, -- ** Template haskell declaration splice SpliceDecoration(..), SpliceDecl(..), LSpliceDecl, -- ** Foreign function interface declarations ForeignDecl(..), LForeignDecl, ForeignImport(..), ForeignExport(..), CImportSpec(..), -- ** Data-constructor declarations ConDecl(..), LConDecl, HsConDeclH98Details, HsConDeclGADTDetails(..), AnnConDeclH98(..), AnnConDeclGADT(..), hsConDeclTheta, getConNames, getRecConArgs_maybe, -- ** Document comments DocDecl(..), LDocDecl, docDeclDoc, -- ** Deprecations WarnDecl(..), LWarnDecl, WarnDecls(..), LWarnDecls, -- ** Annotations AnnDecl(..), LAnnDecl, AnnProvenance(..), annProvenanceName_maybe, -- ** Role annotations RoleAnnotDecl(..), LRoleAnnotDecl, roleAnnotDeclName, -- ** Injective type families FamilyResultSig(..), LFamilyResultSig, InjectivityAnn(..), LInjectivityAnn, resultVariableName, familyDeclLName, familyDeclName, -- * Grouping HsGroup(..), emptyRdrGroup, emptyRnGroup, appendGroups, hsGroupInstDecls, hsGroupTopLevelFixitySigs, partitionBindsAndSigs, ) where -- friends: import GHC.Prelude import Language.Haskell.Syntax.Decls import {-# SOURCE #-} GHC.Hs.Expr ( pprExpr, pprUntypedSplice ) -- Because Expr imports Decls via HsBracket import GHC.Hs.Binds import GHC.Hs.Type import GHC.Hs.Doc import GHC.Types.Basic import GHC.Core.Coercion import Language.Haskell.Syntax.Extension import GHC.Hs.Extension import GHC.Parser.Annotation import GHC.Types.Name import GHC.Types.Name.Set import GHC.Types.Fixity -- others: import GHC.Utils.Misc (count) import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Types.SrcLoc import GHC.Types.SourceText import GHC.Core.Type import GHC.Types.ForeignCall import GHC.Unit.Module.Warnings import GHC.Data.Maybe import Data.Data (Data) import Data.Foldable (toList) {- ************************************************************************ * * \subsection[HsDecl]{Declarations} * * ************************************************************************ -} type instance XTyClD (GhcPass _) = NoExtField type instance XInstD (GhcPass _) = NoExtField type instance XDerivD (GhcPass _) = NoExtField type instance XValD (GhcPass _) = NoExtField type instance XSigD (GhcPass _) = NoExtField type instance XKindSigD (GhcPass _) = NoExtField type instance XDefD (GhcPass _) = NoExtField type instance XForD (GhcPass _) = NoExtField type instance XWarningD (GhcPass _) = NoExtField type instance XAnnD (GhcPass _) = NoExtField type instance XRuleD (GhcPass _) = NoExtField type instance XSpliceD (GhcPass _) = NoExtField type instance XDocD (GhcPass _) = NoExtField type instance XRoleAnnotD (GhcPass _) = NoExtField type instance XXHsDecl (GhcPass _) = DataConCantHappen -- | Partition a list of HsDecls into function/pattern bindings, signatures, -- type family declarations, type family instances, and documentation comments. -- -- Panics when given a declaration that cannot be put into any of the output -- groups. -- -- The primary use of this function is to implement -- 'GHC.Parser.PostProcess.cvBindsAndSigs'. partitionBindsAndSigs :: [LHsDecl GhcPs] -> (LHsBinds GhcPs, [LSig GhcPs], [LFamilyDecl GhcPs], [LTyFamInstDecl GhcPs], [LDataFamInstDecl GhcPs], [LDocDecl GhcPs]) partitionBindsAndSigs = go where go [] = ([], [], [], [], [], []) go ((L l decl) : ds) = let (bs, ss, ts, tfis, dfis, docs) = go ds in case decl of ValD _ b -> (L l b : bs, ss, ts, tfis, dfis, docs) SigD _ s -> (bs, L l s : ss, ts, tfis, dfis, docs) TyClD _ (FamDecl _ t) -> (bs, ss, L l t : ts, tfis, dfis, docs) InstD _ (TyFamInstD { tfid_inst = tfi }) -> (bs, ss, ts, L l tfi : tfis, dfis, docs) InstD _ (DataFamInstD { dfid_inst = dfi }) -> (bs, ss, ts, tfis, L l dfi : dfis, docs) DocD _ d -> (bs, ss, ts, tfis, dfis, L l d : docs) _ -> pprPanic "partitionBindsAndSigs" (ppr decl) -- Okay, I need to reconstruct the document comments, but for now: instance Outputable (DocDecl name) where ppr _ = text "" type instance XCHsGroup (GhcPass _) = NoExtField type instance XXHsGroup (GhcPass _) = DataConCantHappen emptyGroup, emptyRdrGroup, emptyRnGroup :: HsGroup (GhcPass p) emptyRdrGroup = emptyGroup { hs_valds = emptyValBindsIn } emptyRnGroup = emptyGroup { hs_valds = emptyValBindsOut } emptyGroup = HsGroup { hs_ext = noExtField, hs_tyclds = [], hs_derivds = [], hs_fixds = [], hs_defds = [], hs_annds = [], hs_fords = [], hs_warnds = [], hs_ruleds = [], hs_valds = error "emptyGroup hs_valds: Can't happen", hs_splcds = [], hs_docs = [] } -- | The fixity signatures for each top-level declaration and class method -- in an 'HsGroup'. -- See Note [Top-level fixity signatures in an HsGroup] hsGroupTopLevelFixitySigs :: HsGroup (GhcPass p) -> [LFixitySig (GhcPass p)] hsGroupTopLevelFixitySigs (HsGroup{ hs_fixds = fixds, hs_tyclds = tyclds }) = fixds ++ cls_fixds where cls_fixds = [ L loc sig | L _ ClassDecl{tcdSigs = sigs} <- tyClGroupTyClDecls tyclds , L loc (FixSig _ sig) <- sigs ] appendGroups :: HsGroup (GhcPass p) -> HsGroup (GhcPass p) -> HsGroup (GhcPass p) appendGroups HsGroup { hs_valds = val_groups1, hs_splcds = spliceds1, hs_tyclds = tyclds1, hs_derivds = derivds1, hs_fixds = fixds1, hs_defds = defds1, hs_annds = annds1, hs_fords = fords1, hs_warnds = warnds1, hs_ruleds = rulds1, hs_docs = docs1 } HsGroup { hs_valds = val_groups2, hs_splcds = spliceds2, hs_tyclds = tyclds2, hs_derivds = derivds2, hs_fixds = fixds2, hs_defds = defds2, hs_annds = annds2, hs_fords = fords2, hs_warnds = warnds2, hs_ruleds = rulds2, hs_docs = docs2 } = HsGroup { hs_ext = noExtField, hs_valds = val_groups1 `plusHsValBinds` val_groups2, hs_splcds = spliceds1 ++ spliceds2, hs_tyclds = tyclds1 ++ tyclds2, hs_derivds = derivds1 ++ derivds2, hs_fixds = fixds1 ++ fixds2, hs_annds = annds1 ++ annds2, hs_defds = defds1 ++ defds2, hs_fords = fords1 ++ fords2, hs_warnds = warnds1 ++ warnds2, hs_ruleds = rulds1 ++ rulds2, hs_docs = docs1 ++ docs2 } instance (OutputableBndrId p) => Outputable (HsDecl (GhcPass p)) where ppr (TyClD _ dcl) = ppr dcl ppr (ValD _ binds) = ppr binds ppr (DefD _ def) = ppr def ppr (InstD _ inst) = ppr inst ppr (DerivD _ deriv) = ppr deriv ppr (ForD _ fd) = ppr fd ppr (SigD _ sd) = ppr sd ppr (KindSigD _ ksd) = ppr ksd ppr (RuleD _ rd) = ppr rd ppr (WarningD _ wd) = ppr wd ppr (AnnD _ ad) = ppr ad ppr (SpliceD _ dd) = ppr dd ppr (DocD _ doc) = ppr doc ppr (RoleAnnotD _ ra) = ppr ra instance (OutputableBndrId p) => Outputable (HsGroup (GhcPass p)) where ppr (HsGroup { hs_valds = val_decls, hs_tyclds = tycl_decls, hs_derivds = deriv_decls, hs_fixds = fix_decls, hs_warnds = deprec_decls, hs_annds = ann_decls, hs_fords = foreign_decls, hs_defds = default_decls, hs_ruleds = rule_decls }) = vcat_mb empty [ppr_ds fix_decls, ppr_ds default_decls, ppr_ds deprec_decls, ppr_ds ann_decls, ppr_ds rule_decls, if isEmptyValBinds val_decls then Nothing else Just (ppr val_decls), ppr_ds (tyClGroupRoleDecls tycl_decls), ppr_ds (tyClGroupKindSigs tycl_decls), ppr_ds (tyClGroupTyClDecls tycl_decls), ppr_ds (tyClGroupInstDecls tycl_decls), ppr_ds deriv_decls, ppr_ds foreign_decls] where ppr_ds :: Outputable a => [a] -> Maybe SDoc ppr_ds [] = Nothing ppr_ds ds = Just (vcat (map ppr ds)) vcat_mb :: SDoc -> [Maybe SDoc] -> SDoc -- Concatenate vertically with white-space between non-blanks vcat_mb _ [] = empty vcat_mb gap (Nothing : ds) = vcat_mb gap ds vcat_mb gap (Just d : ds) = gap $$ d $$ vcat_mb blankLine ds type instance XSpliceDecl (GhcPass _) = NoExtField type instance XXSpliceDecl (GhcPass _) = DataConCantHappen instance OutputableBndrId p => Outputable (SpliceDecl (GhcPass p)) where ppr (SpliceDecl _ (L _ e) DollarSplice) = pprUntypedSplice True Nothing e ppr (SpliceDecl _ (L _ e) BareSplice) = pprUntypedSplice False Nothing e instance Outputable SpliceDecoration where ppr x = text $ show x {- ************************************************************************ * * Type and class declarations * * ************************************************************************ -} type instance XFamDecl (GhcPass _) = NoExtField type instance XSynDecl GhcPs = AnnSynDecl type instance XSynDecl GhcRn = NameSet -- FVs type instance XSynDecl GhcTc = NameSet -- FVs type instance XDataDecl GhcPs = NoExtField type instance XDataDecl GhcRn = DataDeclRn type instance XDataDecl GhcTc = DataDeclRn data DataDeclRn = DataDeclRn { tcdDataCusk :: Bool -- ^ does this have a CUSK? -- See Note [CUSKs: complete user-supplied kind signatures] , tcdFVs :: NameSet } deriving Data type instance XClassDecl GhcPs = ( AnnClassDecl , EpLayout -- See Note [Class EpLayout] , AnnSortKey DeclTag ) -- TODO:AZ:tidy up AnnSortKey type instance XClassDecl GhcRn = NameSet -- FVs type instance XClassDecl GhcTc = NameSet -- FVs type instance XXTyClDecl (GhcPass _) = DataConCantHappen type instance XCTyFamInstDecl (GhcPass _) = (EpToken "type", EpToken "instance") type instance XXTyFamInstDecl (GhcPass _) = DataConCantHappen data AnnDataDefn = AnnDataDefn { andd_openp :: [EpToken "("], andd_closep :: [EpToken ")"], andd_type :: EpToken "type", andd_newtype :: EpToken "newtype", andd_data :: EpToken "data", andd_instance :: EpToken "instance", andd_dcolon :: TokDcolon, andd_where :: EpToken "where", andd_openc :: EpToken "{", andd_closec :: EpToken "}", andd_equal :: EpToken "=" } deriving Data instance NoAnn AnnDataDefn where noAnn = AnnDataDefn noAnn noAnn noAnn noAnn noAnn noAnn noAnn noAnn noAnn noAnn noAnn data AnnClassDecl = AnnClassDecl { acd_class :: EpToken "class", acd_openp :: [EpToken "("], acd_closep :: [EpToken ")"], acd_vbar :: EpToken "|", acd_where :: EpToken "where", acd_openc :: EpToken "{", acd_closec :: EpToken "}", acd_semis :: [EpToken ";"] } deriving Data instance NoAnn AnnClassDecl where noAnn = AnnClassDecl noAnn noAnn noAnn noAnn noAnn noAnn noAnn noAnn data AnnSynDecl = AnnSynDecl { asd_opens :: [EpToken "("], asd_closes :: [EpToken ")"], asd_type :: EpToken "type", asd_equal :: EpToken "=" } deriving Data instance NoAnn AnnSynDecl where noAnn = AnnSynDecl noAnn noAnn noAnn noAnn ------------- Pretty printing FamilyDecls ----------- pprFlavour :: FamilyInfo pass -> SDoc pprFlavour DataFamily = text "data" pprFlavour OpenTypeFamily = text "type" pprFlavour (ClosedTypeFamily {}) = text "type" instance Outputable (FamilyInfo pass) where ppr info = pprFlavour info <+> text "family" -- Dealing with names tyFamInstDeclName :: Anno (IdGhcP p) ~ SrcSpanAnnN => TyFamInstDecl (GhcPass p) -> IdP (GhcPass p) tyFamInstDeclName = unLoc . tyFamInstDeclLName tyFamInstDeclLName :: Anno (IdGhcP p) ~ SrcSpanAnnN => TyFamInstDecl (GhcPass p) -> LocatedN (IdP (GhcPass p)) tyFamInstDeclLName (TyFamInstDecl { tfid_eqn = FamEqn { feqn_tycon = ln }}) = ln tyClDeclLName :: Anno (IdGhcP p) ~ SrcSpanAnnN => TyClDecl (GhcPass p) -> LocatedN (IdP (GhcPass p)) tyClDeclLName (FamDecl { tcdFam = fd }) = familyDeclLName fd tyClDeclLName (SynDecl { tcdLName = ln }) = ln tyClDeclLName (DataDecl { tcdLName = ln }) = ln tyClDeclLName (ClassDecl { tcdLName = ln }) = ln countTyClDecls :: [TyClDecl pass] -> (Int, Int, Int, Int, Int) -- class, synonym decls, data, newtype, family decls countTyClDecls decls = (count isClassDecl decls, count isSynDecl decls, -- excluding... count isDataTy decls, -- ...family... count isNewTy decls, -- ...instances count isFamilyDecl decls) where isDataTy DataDecl{ tcdDataDefn = HsDataDefn { dd_cons = DataTypeCons _ _ } } = True isDataTy _ = False isNewTy DataDecl{ tcdDataDefn = HsDataDefn { dd_cons = NewTypeCon _ } } = True isNewTy _ = False -- FIXME: tcdName is commonly used by both GHC and third-party tools, so it -- needs to be polymorphic in the pass tcdName :: Anno (IdGhcP p) ~ SrcSpanAnnN => TyClDecl (GhcPass p) -> IdP (GhcPass p) tcdName = unLoc . tyClDeclLName -- | Does this declaration have a complete, user-supplied kind signature? -- See Note [CUSKs: complete user-supplied kind signatures] hsDeclHasCusk :: TyClDecl GhcRn -> Bool hsDeclHasCusk (FamDecl { tcdFam = FamilyDecl { fdInfo = fam_info , fdTyVars = tyvars , fdResultSig = L _ resultSig } }) = case fam_info of ClosedTypeFamily {} -> hsTvbAllKinded tyvars && isJust (famResultKindSignature resultSig) _ -> True -- Un-associated open type/data families have CUSKs hsDeclHasCusk (SynDecl { tcdTyVars = tyvars, tcdRhs = rhs }) = hsTvbAllKinded tyvars && isJust (hsTyKindSig rhs) hsDeclHasCusk (DataDecl { tcdDExt = DataDeclRn { tcdDataCusk = cusk }}) = cusk hsDeclHasCusk (ClassDecl { tcdTyVars = tyvars }) = hsTvbAllKinded tyvars -- Pretty-printing TyClDecl -- ~~~~~~~~~~~~~~~~~~~~~~~~ instance (OutputableBndrId p) => Outputable (TyClDecl (GhcPass p)) where ppr (FamDecl { tcdFam = decl }) = ppr decl ppr (SynDecl { tcdLName = ltycon, tcdTyVars = tyvars, tcdFixity = fixity , tcdRhs = rhs }) = hang (text "type" <+> pp_vanilla_decl_head ltycon tyvars fixity Nothing <+> equals) 4 (ppr rhs) ppr (DataDecl { tcdLName = ltycon, tcdTyVars = tyvars, tcdFixity = fixity , tcdDataDefn = defn }) = pp_data_defn (pp_vanilla_decl_head ltycon tyvars fixity) defn ppr (ClassDecl {tcdCtxt = context, tcdLName = lclas, tcdTyVars = tyvars, tcdFixity = fixity, tcdFDs = fds, tcdSigs = sigs, tcdMeths = methods, tcdATs = ats, tcdATDefs = at_defs}) | null sigs && null methods && null ats && null at_defs -- No "where" part = top_matter | otherwise -- Laid out = vcat [ top_matter <+> text "where" , nest 2 $ pprDeclList (map (ppr . unLoc) ats ++ map (pprTyFamDefltDecl . unLoc) at_defs ++ pprLHsBindsForUser methods sigs) ] where top_matter = text "class" <+> pp_vanilla_decl_head lclas tyvars fixity context <+> pprFundeps (map unLoc fds) instance OutputableBndrId p => Outputable (TyClGroup (GhcPass p)) where ppr (TyClGroup { group_tyclds = tyclds , group_roles = roles , group_kisigs = kisigs , group_instds = instds } ) = hang (text "TyClGroup") 2 $ ppr kisigs $$ ppr tyclds $$ ppr roles $$ ppr instds pp_vanilla_decl_head :: (OutputableBndrId p) => XRec (GhcPass p) (IdP (GhcPass p)) -> LHsQTyVars (GhcPass p) -> LexicalFixity -> Maybe (LHsContext (GhcPass p)) -> SDoc pp_vanilla_decl_head thing (HsQTvs { hsq_explicit = tyvars }) fixity context = hsep [pprLHsContext context, pp_tyvars tyvars] where pp_tyvars (varl:varsr) | fixity == Infix, varr:varsr'@(_:_) <- varsr -- If varsr has at least 2 elements, parenthesize. = hsep [char '(',ppr (unLoc varl), pprInfixOcc (unLoc thing) , (ppr.unLoc) varr, char ')' , hsep (map (ppr.unLoc) varsr')] | fixity == Infix = hsep [ppr (unLoc varl), pprInfixOcc (unLoc thing) , hsep (map (ppr.unLoc) varsr)] | otherwise = hsep [ pprPrefixOcc (unLoc thing) , hsep (map (ppr.unLoc) (varl:varsr))] pp_tyvars [] = pprPrefixOcc (unLoc thing) pprTyClDeclFlavour :: TyClDecl (GhcPass p) -> SDoc pprTyClDeclFlavour (ClassDecl {}) = text "class" pprTyClDeclFlavour (SynDecl {}) = text "type" pprTyClDeclFlavour (FamDecl { tcdFam = FamilyDecl { fdInfo = info }}) = pprFlavour info <+> text "family" pprTyClDeclFlavour (DataDecl { tcdDataDefn = HsDataDefn { dd_cons = nd } }) = ppr (dataDefnConsNewOrData nd) instance OutputableBndrId p => Outputable (FunDep (GhcPass p)) where ppr = pprFunDep type instance XCFunDep (GhcPass _) = TokRarrow type instance XXFunDep (GhcPass _) = DataConCantHappen pprFundeps :: OutputableBndrId p => [FunDep (GhcPass p)] -> SDoc pprFundeps [] = empty pprFundeps fds = hsep (vbar : punctuate comma (map pprFunDep fds)) pprFunDep :: OutputableBndrId p => FunDep (GhcPass p) -> SDoc pprFunDep (FunDep _ us vs) = hsep [interppSP us, arrow, interppSP vs] {- ********************************************************************* * * TyClGroup Strongly connected components of type, class, instance, and role declarations * * ********************************************************************* -} type instance XCTyClGroup (GhcPass _) = NoExtField type instance XXTyClGroup (GhcPass _) = DataConCantHappen {- ********************************************************************* * * Data and type family declarations * * ********************************************************************* -} type instance XNoSig (GhcPass _) = NoExtField type instance XCKindSig (GhcPass _) = NoExtField type instance XTyVarSig (GhcPass _) = NoExtField type instance XXFamilyResultSig (GhcPass _) = DataConCantHappen type instance XCFamilyDecl (GhcPass _) = AnnFamilyDecl type instance XXFamilyDecl (GhcPass _) = DataConCantHappen data AnnFamilyDecl = AnnFamilyDecl { afd_openp :: [EpToken "("], afd_closep :: [EpToken ")"], afd_type :: EpToken "type", afd_data :: EpToken "data", afd_family :: EpToken "family", afd_dcolon :: TokDcolon, afd_equal :: EpToken "=", afd_vbar :: EpToken "|", afd_where :: EpToken "where", afd_openc :: EpToken "{", afd_dotdot :: EpToken "..", afd_closec :: EpToken "}" } deriving Data instance NoAnn AnnFamilyDecl where noAnn = AnnFamilyDecl noAnn noAnn noAnn noAnn noAnn noAnn noAnn noAnn noAnn noAnn noAnn noAnn ------------- Functions over FamilyDecls ----------- familyDeclLName :: FamilyDecl (GhcPass p) -> XRec (GhcPass p) (IdP (GhcPass p)) familyDeclLName (FamilyDecl { fdLName = n }) = n familyDeclName :: FamilyDecl (GhcPass p) -> IdP (GhcPass p) familyDeclName = unLoc . familyDeclLName famResultKindSignature :: FamilyResultSig (GhcPass p) -> Maybe (LHsKind (GhcPass p)) famResultKindSignature (NoSig _) = Nothing famResultKindSignature (KindSig _ ki) = Just ki famResultKindSignature (TyVarSig _ bndr) = case hsBndrKind (unLoc bndr) of HsBndrNoKind _ -> Nothing HsBndrKind _ ki -> Just ki -- | Maybe return name of the result type variable resultVariableName :: FamilyResultSig (GhcPass a) -> Maybe (IdP (GhcPass a)) resultVariableName (TyVarSig _ sig) = hsLTyVarName sig resultVariableName _ = Nothing ------------- Pretty printing FamilyDecls ----------- type instance XCInjectivityAnn (GhcPass _) = TokRarrow type instance XXInjectivityAnn (GhcPass _) = DataConCantHappen instance OutputableBndrId p => Outputable (FamilyDecl (GhcPass p)) where ppr (FamilyDecl { fdInfo = info, fdLName = ltycon , fdTopLevel = top_level , fdTyVars = tyvars , fdFixity = fixity , fdResultSig = L _ result , fdInjectivityAnn = mb_inj }) = vcat [ pprFlavour info <+> pp_top_level <+> pp_vanilla_decl_head ltycon tyvars fixity Nothing <+> pp_kind <+> pp_inj <+> pp_where , nest 2 $ pp_eqns ] where pp_top_level = case top_level of TopLevel -> text "family" NotTopLevel -> empty pp_kind = case result of NoSig _ -> empty KindSig _ kind -> dcolon <+> ppr kind TyVarSig _ tv_bndr -> text "=" <+> ppr tv_bndr pp_inj = case mb_inj of Just (L _ (InjectivityAnn _ lhs rhs)) -> hsep [ vbar, ppr lhs, text "->", hsep (map ppr rhs) ] Nothing -> empty (pp_where, pp_eqns) = case info of ClosedTypeFamily mb_eqns -> ( text "where" , case mb_eqns of Nothing -> text ".." Just eqns -> vcat $ map (ppr_fam_inst_eqn . unLoc) eqns ) _ -> (empty, empty) {- ********************************************************************* * * Data types and data constructors * * ********************************************************************* -} type instance XCHsDataDefn (GhcPass _) = AnnDataDefn type instance XXHsDataDefn (GhcPass _) = DataConCantHappen type instance XCHsDerivingClause (GhcPass _) = EpToken "deriving" type instance XXHsDerivingClause (GhcPass _) = DataConCantHappen instance OutputableBndrId p => Outputable (HsDerivingClause (GhcPass p)) where ppr (HsDerivingClause { deriv_clause_strategy = dcs , deriv_clause_tys = L _ dct }) = hsep [ text "deriving" , pp_strat_before , ppr dct , pp_strat_after ] where -- @via@ is unique in that in comes /after/ the class being derived, -- so we must special-case it. (pp_strat_before, pp_strat_after) = case dcs of Just (L _ via@ViaStrategy{}) -> (empty, ppr via) _ -> (ppDerivStrategy dcs, empty) -- | A short description of a @DerivStrategy'@. derivStrategyName :: DerivStrategy a -> SDoc derivStrategyName = text . go where go StockStrategy {} = "stock" go AnyclassStrategy {} = "anyclass" go NewtypeStrategy {} = "newtype" go ViaStrategy {} = "via" type instance XDctSingle (GhcPass _) = NoExtField type instance XDctMulti (GhcPass _) = NoExtField type instance XXDerivClauseTys (GhcPass _) = DataConCantHappen instance OutputableBndrId p => Outputable (DerivClauseTys (GhcPass p)) where ppr (DctSingle _ ty) = ppr ty ppr (DctMulti _ tys) = parens (interpp'SP tys) type instance XStandaloneKindSig GhcPs = (EpToken "type", TokDcolon) type instance XStandaloneKindSig GhcRn = NoExtField type instance XStandaloneKindSig GhcTc = NoExtField type instance XXStandaloneKindSig (GhcPass p) = DataConCantHappen standaloneKindSigName :: StandaloneKindSig (GhcPass p) -> IdP (GhcPass p) standaloneKindSigName (StandaloneKindSig _ lname _) = unLoc lname type instance XConDeclGADT GhcPs = AnnConDeclGADT type instance XConDeclGADT GhcRn = NoExtField type instance XConDeclGADT GhcTc = NoExtField type instance XConDeclH98 GhcPs = AnnConDeclH98 type instance XConDeclH98 GhcRn = NoExtField type instance XConDeclH98 GhcTc = NoExtField type instance XXConDecl (GhcPass _) = DataConCantHappen type instance XPrefixConGADT (GhcPass _) = NoExtField type instance XRecConGADT GhcPs = TokRarrow type instance XRecConGADT GhcRn = NoExtField type instance XRecConGADT GhcTc = NoExtField type instance XXConDeclGADTDetails (GhcPass _) = DataConCantHappen data AnnConDeclH98 = AnnConDeclH98 { acdh_forall :: TokForall, acdh_dot :: EpToken ".", acdh_darrow :: TokDarrow } deriving Data instance NoAnn AnnConDeclH98 where noAnn = AnnConDeclH98 noAnn noAnn noAnn data AnnConDeclGADT = AnnConDeclGADT { acdg_openp :: [EpToken "("], acdg_closep :: [EpToken ")"], acdg_dcolon :: TokDcolon } deriving Data instance NoAnn AnnConDeclGADT where noAnn = AnnConDeclGADT noAnn noAnn noAnn -- Codomain could be 'NonEmpty', but at the moment all users need a list. getConNames :: ConDecl GhcRn -> [LocatedN Name] getConNames ConDeclH98 {con_name = name} = [name] getConNames ConDeclGADT {con_names = names} = toList names -- | Return @'Just' fields@ if a data constructor declaration uses record -- syntax (i.e., 'RecCon'), where @fields@ are the field selectors. -- Otherwise, return 'Nothing'. getRecConArgs_maybe :: ConDecl GhcRn -> Maybe (LocatedL [LConDeclField GhcRn]) getRecConArgs_maybe (ConDeclH98{con_args = args}) = case args of PrefixCon{} -> Nothing RecCon flds -> Just flds InfixCon{} -> Nothing getRecConArgs_maybe (ConDeclGADT{con_g_args = args}) = case args of PrefixConGADT{} -> Nothing RecConGADT _ flds -> Just flds hsConDeclTheta :: Maybe (LHsContext (GhcPass p)) -> [LHsType (GhcPass p)] hsConDeclTheta Nothing = [] hsConDeclTheta (Just (L _ theta)) = theta ppDataDefnHeader :: (OutputableBndrId p) => (Maybe (LHsContext (GhcPass p)) -> SDoc) -- Printing the header -> HsDataDefn (GhcPass p) -> SDoc ppDataDefnHeader pp_hdr HsDataDefn { dd_ctxt = context , dd_cType = mb_ct , dd_kindSig = mb_sig , dd_cons = condecls } = pp_type <+> ppr (dataDefnConsNewOrData condecls) <+> pp_ct <+> pp_hdr context <+> pp_sig where pp_type | isTypeDataDefnCons condecls = text "type" | otherwise = empty pp_ct = case mb_ct of Nothing -> empty Just ct -> ppr ct pp_sig = case mb_sig of Nothing -> empty Just kind -> dcolon <+> ppr kind pp_data_defn :: (OutputableBndrId p) => (Maybe (LHsContext (GhcPass p)) -> SDoc) -- Printing the header -> HsDataDefn (GhcPass p) -> SDoc pp_data_defn pp_hdr defn@HsDataDefn { dd_cons = condecls , dd_derivs = derivings } | null condecls = ppDataDefnHeader pp_hdr defn <+> pp_derivings derivings | otherwise = hang (ppDataDefnHeader pp_hdr defn) 2 (pp_condecls (toList condecls) $$ pp_derivings derivings) where pp_derivings ds = vcat (map ppr ds) instance OutputableBndrId p => Outputable (HsDataDefn (GhcPass p)) where ppr d = pp_data_defn (\_ -> text "Naked HsDataDefn") d instance OutputableBndrId p => Outputable (StandaloneKindSig (GhcPass p)) where ppr (StandaloneKindSig _ v ki) = text "type" <+> pprPrefixOcc (unLoc v) <+> text "::" <+> ppr ki pp_condecls :: forall p. OutputableBndrId p => [LConDecl (GhcPass p)] -> SDoc pp_condecls cs | anyLConIsGadt cs -- In GADT syntax = hang (text "where") 2 (vcat (map ppr cs)) | otherwise -- In H98 syntax = equals <+> sep (punctuate (text " |") (map ppr cs)) instance (OutputableBndrId p) => Outputable (ConDecl (GhcPass p)) where ppr = pprConDecl pprConDecl :: forall p. OutputableBndrId p => ConDecl (GhcPass p) -> SDoc pprConDecl (ConDeclH98 { con_name = L _ con , con_ex_tvs = ex_tvs , con_mb_cxt = mcxt , con_args = args , con_doc = doc }) = pprMaybeWithDoc doc $ sep [ pprHsForAll (mkHsForAllInvisTele noAnn ex_tvs) mcxt , ppr_details args ] where -- In ppr_details: let's not print the multiplicities (they are always 1, by -- definition) as they do not appear in an actual declaration. ppr_details (InfixCon t1 t2) = hsep [ppr (hsScaledThing t1), pprInfixOcc con, ppr (hsScaledThing t2)] ppr_details (PrefixCon _ tys) = hsep (pprPrefixOcc con : map (pprHsType . unLoc . hsScaledThing) tys) ppr_details (RecCon fields) = pprPrefixOcc con <+> pprConDeclFields (unLoc fields) pprConDecl (ConDeclGADT { con_names = cons, con_bndrs = L _ outer_bndrs , con_mb_cxt = mcxt, con_g_args = args , con_res_ty = res_ty, con_doc = doc }) = pprMaybeWithDoc doc $ ppr_con_names (toList cons) <+> dcolon <+> (sep [pprHsOuterSigTyVarBndrs outer_bndrs <+> pprLHsContext mcxt, sep (ppr_args args ++ [ppr res_ty]) ]) where ppr_args (PrefixConGADT _ args) = map (\(HsScaled arr t) -> ppr t <+> ppr_arr arr) args ppr_args (RecConGADT _ fields) = [pprConDeclFields (unLoc fields) <+> arrow] -- Display linear arrows as unrestricted with -XNoLinearTypes -- (cf. dataConDisplayType in Note [Displaying linear fields] in GHC.Core.DataCon) ppr_arr (HsLinearArrow _) = sdocOption sdocLinearTypes $ \show_linear_types -> if show_linear_types then lollipop else arrow ppr_arr arr = pprHsArrow arr ppr_con_names :: (OutputableBndr a) => [GenLocated l a] -> SDoc ppr_con_names = pprWithCommas (pprPrefixOcc . unLoc) {- ************************************************************************ * * Instance declarations * * ************************************************************************ -} type instance XCFamEqn (GhcPass _) r = ([EpToken "("], [EpToken ")"], EpToken "=") type instance XXFamEqn (GhcPass _) r = DataConCantHappen ----------------- Class instances ------------- type instance XCClsInstDecl GhcPs = ( Maybe (LWarningTxt GhcPs) -- The warning of the deprecated instance -- See Note [Implementation of deprecated instances] -- in GHC.Tc.Solver.Dict , AnnClsInstDecl , AnnSortKey DeclTag) -- For sorting the additional annotations -- TODO:AZ:tidy up type instance XCClsInstDecl GhcRn = Maybe (LWarningTxt GhcRn) -- The warning of the deprecated instance -- See Note [Implementation of deprecated instances] -- in GHC.Tc.Solver.Dict type instance XCClsInstDecl GhcTc = NoExtField type instance XXClsInstDecl (GhcPass _) = DataConCantHappen ----------------- Instances of all kinds ------------- type instance XClsInstD (GhcPass _) = NoExtField type instance XDataFamInstD (GhcPass _) = NoExtField type instance XTyFamInstD GhcPs = NoExtField type instance XTyFamInstD GhcRn = NoExtField type instance XTyFamInstD GhcTc = NoExtField type instance XXInstDecl (GhcPass _) = DataConCantHappen data AnnClsInstDecl = AnnClsInstDecl { acid_instance :: EpToken "instance", acid_where :: EpToken "where", acid_openc :: EpToken "{", acid_semis :: [EpToken ";"], acid_closec :: EpToken "}" } deriving Data instance NoAnn AnnClsInstDecl where noAnn = AnnClsInstDecl noAnn noAnn noAnn noAnn noAnn cidDeprecation :: forall p. IsPass p => ClsInstDecl (GhcPass p) -> Maybe (WarningTxt (GhcPass p)) cidDeprecation = fmap unLoc . decl_deprecation (ghcPass @p) where decl_deprecation :: GhcPass p -> ClsInstDecl (GhcPass p) -> Maybe (LocatedP (WarningTxt (GhcPass p))) decl_deprecation GhcPs (ClsInstDecl{ cid_ext = (depr, _, _) } ) = depr decl_deprecation GhcRn (ClsInstDecl{ cid_ext = depr }) = depr decl_deprecation _ _ = Nothing instance OutputableBndrId p => Outputable (TyFamInstDecl (GhcPass p)) where ppr = pprTyFamInstDecl TopLevel pprTyFamInstDecl :: (OutputableBndrId p) => TopLevelFlag -> TyFamInstDecl (GhcPass p) -> SDoc pprTyFamInstDecl top_lvl (TyFamInstDecl { tfid_eqn = eqn }) = text "type" <+> ppr_instance_keyword top_lvl <+> ppr_fam_inst_eqn eqn ppr_instance_keyword :: TopLevelFlag -> SDoc ppr_instance_keyword TopLevel = text "instance" ppr_instance_keyword NotTopLevel = empty pprTyFamDefltDecl :: (OutputableBndrId p) => TyFamDefltDecl (GhcPass p) -> SDoc pprTyFamDefltDecl = pprTyFamInstDecl NotTopLevel ppr_fam_inst_eqn :: (OutputableBndrId p) => TyFamInstEqn (GhcPass p) -> SDoc ppr_fam_inst_eqn (FamEqn { feqn_tycon = L _ tycon , feqn_bndrs = bndrs , feqn_pats = pats , feqn_fixity = fixity , feqn_rhs = rhs }) = pprHsFamInstLHS tycon bndrs pats fixity Nothing <+> equals <+> ppr rhs instance OutputableBndrId p => Outputable (DataFamInstDecl (GhcPass p)) where ppr = pprDataFamInstDecl TopLevel pprDataFamInstDecl :: (OutputableBndrId p) => TopLevelFlag -> DataFamInstDecl (GhcPass p) -> SDoc pprDataFamInstDecl top_lvl (DataFamInstDecl { dfid_eqn = (FamEqn { feqn_tycon = L _ tycon , feqn_bndrs = bndrs , feqn_pats = pats , feqn_fixity = fixity , feqn_rhs = defn })}) = pp_data_defn pp_hdr defn where pp_hdr mctxt = ppr_instance_keyword top_lvl <+> pprHsFamInstLHS tycon bndrs pats fixity mctxt -- pp_data_defn pretty-prints the kind sig. See #14817. pprDataFamInstFlavour :: DataFamInstDecl (GhcPass p) -> SDoc pprDataFamInstFlavour DataFamInstDecl { dfid_eqn = FamEqn { feqn_rhs = HsDataDefn { dd_cons = cons }}} = ppr (dataDefnConsNewOrData cons) pprHsFamInstLHS :: (OutputableBndrId p) => IdP (GhcPass p) -> HsOuterFamEqnTyVarBndrs (GhcPass p) -> HsFamEqnPats (GhcPass p) -> LexicalFixity -> Maybe (LHsContext (GhcPass p)) -> SDoc pprHsFamInstLHS thing bndrs typats fixity mb_ctxt = hsep [ pprHsOuterFamEqnTyVarBndrs bndrs , pprLHsContext mb_ctxt , pprHsArgsApp thing fixity typats ] instance OutputableBndrId p => Outputable (ClsInstDecl (GhcPass p)) where ppr (cid@ClsInstDecl { cid_poly_ty = inst_ty, cid_binds = binds , cid_sigs = sigs, cid_tyfam_insts = ats , cid_overlap_mode = mbOverlap , cid_datafam_insts = adts }) | null sigs, null ats, null adts, null binds -- No "where" part = top_matter | otherwise -- Laid out = vcat [ top_matter <+> text "where" , nest 2 $ pprDeclList $ map (pprTyFamInstDecl NotTopLevel . unLoc) ats ++ map (pprDataFamInstDecl NotTopLevel . unLoc) adts ++ pprLHsBindsForUser binds sigs ] where top_matter = text "instance" <+> maybe empty ppr (cidDeprecation cid) <+> ppOverlapPragma mbOverlap <+> ppr inst_ty ppDerivStrategy :: OutputableBndrId p => Maybe (LDerivStrategy (GhcPass p)) -> SDoc ppDerivStrategy mb = case mb of Nothing -> empty Just (L _ ds) -> ppr ds ppOverlapPragma :: Maybe (LocatedP OverlapMode) -> SDoc ppOverlapPragma mb = case mb of Nothing -> empty Just (L _ (NoOverlap s)) -> maybe_stext s "{-# NO_OVERLAP #-}" Just (L _ (Overlappable s)) -> maybe_stext s "{-# OVERLAPPABLE #-}" Just (L _ (Overlapping s)) -> maybe_stext s "{-# OVERLAPPING #-}" Just (L _ (Overlaps s)) -> maybe_stext s "{-# OVERLAPS #-}" Just (L _ (Incoherent s)) -> maybe_stext s "{-# INCOHERENT #-}" Just (L _ (NonCanonical s)) -> maybe_stext s "{-# INCOHERENT #-}" -- No surface syntax for NONCANONICAL yet where maybe_stext NoSourceText alt = text alt maybe_stext (SourceText src) _ = ftext src <+> text "#-}" instance (OutputableBndrId p) => Outputable (InstDecl (GhcPass p)) where ppr (ClsInstD { cid_inst = decl }) = ppr decl ppr (TyFamInstD { tfid_inst = decl }) = ppr decl ppr (DataFamInstD { dfid_inst = decl }) = ppr decl -- Extract the declarations of associated data types from an instance instDeclDataFamInsts :: [LInstDecl (GhcPass p)] -> [DataFamInstDecl (GhcPass p)] instDeclDataFamInsts inst_decls = concatMap do_one inst_decls where do_one :: LInstDecl (GhcPass p) -> [DataFamInstDecl (GhcPass p)] do_one (L _ (ClsInstD { cid_inst = ClsInstDecl { cid_datafam_insts = fam_insts } })) = map unLoc fam_insts do_one (L _ (DataFamInstD { dfid_inst = fam_inst })) = [fam_inst] do_one (L _ (TyFamInstD {})) = [] -- | Convert a 'NewOrData' to a 'TyConFlavour' newOrDataToFlavour :: NewOrData -> TyConFlavour tc newOrDataToFlavour NewType = NewtypeFlavour newOrDataToFlavour DataType = DataTypeFlavour instance Outputable NewOrData where ppr NewType = text "newtype" ppr DataType = text "data" -- At the moment we only call this with @f = '[]'@ and @f = 'DataDefnCons'@. anyLConIsGadt :: Foldable f => f (GenLocated l (ConDecl pass)) -> Bool anyLConIsGadt xs = case toList xs of L _ ConDeclGADT {} : _ -> True _ -> False {-# SPECIALIZE anyLConIsGadt :: [GenLocated l (ConDecl pass)] -> Bool #-} {-# SPECIALIZE anyLConIsGadt :: DataDefnCons (GenLocated l (ConDecl pass)) -> Bool #-} {- ************************************************************************ * * \subsection[DerivDecl]{A stand-alone instance deriving declaration} * * ************************************************************************ -} type instance XCDerivDecl GhcPs = ( Maybe (LWarningTxt GhcPs) -- The warning of the deprecated derivation -- See Note [Implementation of deprecated instances] -- in GHC.Tc.Solver.Dict , AnnDerivDecl ) type instance XCDerivDecl GhcRn = ( Maybe (LWarningTxt GhcRn) -- The warning of the deprecated derivation -- See Note [Implementation of deprecated instances] -- in GHC.Tc.Solver.Dict , AnnDerivDecl ) type instance XCDerivDecl GhcTc = AnnDerivDecl type instance XXDerivDecl (GhcPass _) = DataConCantHappen type AnnDerivDecl = (EpToken "deriving", EpToken "instance") derivDeprecation :: forall p. IsPass p => DerivDecl (GhcPass p) -> Maybe (WarningTxt (GhcPass p)) derivDeprecation = fmap unLoc . decl_deprecation (ghcPass @p) where decl_deprecation :: GhcPass p -> DerivDecl (GhcPass p) -> Maybe (LocatedP (WarningTxt (GhcPass p))) decl_deprecation GhcPs (DerivDecl{ deriv_ext = (depr, _) }) = depr decl_deprecation GhcRn (DerivDecl{ deriv_ext = (depr, _) }) = depr decl_deprecation _ _ = Nothing instance OutputableBndrId p => Outputable (DerivDecl (GhcPass p)) where ppr (deriv@DerivDecl { deriv_type = ty , deriv_strategy = ds , deriv_overlap_mode = o }) = hsep [ text "deriving" , ppDerivStrategy ds , text "instance" , maybe empty ppr (derivDeprecation deriv) , ppOverlapPragma o , ppr ty ] {- ************************************************************************ * * Deriving strategies * * ************************************************************************ -} type instance XStockStrategy GhcPs = EpToken "stock" type instance XStockStrategy GhcRn = NoExtField type instance XStockStrategy GhcTc = NoExtField type instance XAnyClassStrategy GhcPs = EpToken "anyclass" type instance XAnyClassStrategy GhcRn = NoExtField type instance XAnyClassStrategy GhcTc = NoExtField type instance XNewtypeStrategy GhcPs = EpToken "newtype" type instance XNewtypeStrategy GhcRn = NoExtField type instance XNewtypeStrategy GhcTc = NoExtField type instance XViaStrategy GhcPs = XViaStrategyPs type instance XViaStrategy GhcRn = LHsSigType GhcRn type instance XViaStrategy GhcTc = Type data XViaStrategyPs = XViaStrategyPs (EpToken "via") (LHsSigType GhcPs) instance OutputableBndrId p => Outputable (DerivStrategy (GhcPass p)) where ppr (StockStrategy _) = text "stock" ppr (AnyclassStrategy _) = text "anyclass" ppr (NewtypeStrategy _) = text "newtype" ppr (ViaStrategy ty) = text "via" <+> case ghcPass @p of GhcPs -> ppr ty GhcRn -> ppr ty GhcTc -> ppr ty instance Outputable XViaStrategyPs where ppr (XViaStrategyPs _ t) = ppr t -- | Eliminate a 'DerivStrategy'. foldDerivStrategy :: (p ~ GhcPass pass) => r -> (XViaStrategy p -> r) -> DerivStrategy p -> r foldDerivStrategy other _ (StockStrategy _) = other foldDerivStrategy other _ (AnyclassStrategy _) = other foldDerivStrategy other _ (NewtypeStrategy _) = other foldDerivStrategy _ via (ViaStrategy t) = via t -- | Map over the @via@ type if dealing with 'ViaStrategy'. Otherwise, -- return the 'DerivStrategy' unchanged. mapDerivStrategy :: (p ~ GhcPass pass) => (XViaStrategy p -> XViaStrategy p) -> DerivStrategy p -> DerivStrategy p mapDerivStrategy f ds = foldDerivStrategy ds (ViaStrategy . f) ds {- ************************************************************************ * * \subsection[DefaultDecl]{A @default@ declaration} * * ************************************************************************ -} type instance XCDefaultDecl GhcPs = (EpToken "default", EpToken "(", EpToken ")") type instance XCDefaultDecl GhcRn = NoExtField type instance XCDefaultDecl GhcTc = NoExtField type instance XXDefaultDecl (GhcPass _) = DataConCantHappen instance OutputableBndrId p => Outputable (DefaultDecl (GhcPass p)) where ppr (DefaultDecl _ cl tys) = text "default" <+> maybe id ((<+>) . ppr) cl (parens (interpp'SP tys)) {- ************************************************************************ * * \subsection{Foreign function interface declaration} * * ************************************************************************ -} type instance XForeignImport GhcPs = (EpToken "foreign", EpToken "import", TokDcolon) type instance XForeignImport GhcRn = NoExtField type instance XForeignImport GhcTc = Coercion type instance XForeignExport GhcPs = (EpToken "foreign", EpToken "export", TokDcolon) type instance XForeignExport GhcRn = NoExtField type instance XForeignExport GhcTc = Coercion type instance XXForeignDecl (GhcPass _) = DataConCantHappen type instance XCImport (GhcPass _) = LocatedE SourceText -- original source text for the C entity type instance XXForeignImport (GhcPass _) = DataConCantHappen type instance XCExport (GhcPass _) = LocatedE SourceText -- original source text for the C entity type instance XXForeignExport (GhcPass _) = DataConCantHappen -- pretty printing of foreign declarations instance OutputableBndrId p => Outputable (ForeignDecl (GhcPass p)) where ppr (ForeignImport { fd_name = n, fd_sig_ty = ty, fd_fi = fimport }) = hang (text "foreign import" <+> ppr fimport <+> ppr n) 2 (dcolon <+> ppr ty) ppr (ForeignExport { fd_name = n, fd_sig_ty = ty, fd_fe = fexport }) = hang (text "foreign export" <+> ppr fexport <+> ppr n) 2 (dcolon <+> ppr ty) instance OutputableBndrId p => Outputable (ForeignImport (GhcPass p)) where ppr (CImport (L _ srcText) cconv safety mHeader spec) = ppr cconv <+> ppr safety <+> pprWithSourceText srcText (pprCEntity spec "") where pp_hdr = case mHeader of Nothing -> empty Just (Header _ header) -> ftext header pprCEntity (CLabel lbl) _ = doubleQuotes $ text "static" <+> pp_hdr <+> char '&' <> ppr lbl pprCEntity (CFunction (StaticTarget st _lbl _ isFun)) src = if dqNeeded then doubleQuotes ce else empty where dqNeeded = (take 6 src == "static") || isJust mHeader || not isFun || st /= NoSourceText ce = -- We may need to drop leading spaces first (if take 6 src == "static" then text "static" else empty) <+> pp_hdr <+> (if isFun then empty else text "value") <+> (pprWithSourceText st empty) pprCEntity (CFunction DynamicTarget) _ = doubleQuotes $ text "dynamic" pprCEntity CWrapper _ = doubleQuotes $ text "wrapper" instance OutputableBndrId p => Outputable (ForeignExport (GhcPass p)) where ppr (CExport _ (L _ (CExportStatic _ lbl cconv))) = ppr cconv <+> char '"' <> ppr lbl <> char '"' {- ************************************************************************ * * \subsection{Rewrite rules} * * ************************************************************************ -} type instance XCRuleDecls GhcPs = ((EpaLocation, EpToken "#-}"), SourceText) type instance XCRuleDecls GhcRn = SourceText type instance XCRuleDecls GhcTc = SourceText type instance XXRuleDecls (GhcPass _) = DataConCantHappen type instance XHsRule GhcPs = (HsRuleAnn, SourceText) type instance XHsRule GhcRn = (HsRuleRn, SourceText) type instance XHsRule GhcTc = (HsRuleRn, SourceText) data HsRuleRn = HsRuleRn NameSet NameSet -- Free-vars from the LHS and RHS deriving Data type instance XXRuleDecl (GhcPass _) = DataConCantHappen data HsRuleAnn = HsRuleAnn { ra_tyanns :: Maybe (TokForall, EpToken ".") , ra_tmanns :: Maybe (TokForall, EpToken ".") , ra_equal :: EpToken "=" , ra_rest :: ActivationAnn } deriving (Data, Eq) instance NoAnn HsRuleAnn where noAnn = HsRuleAnn Nothing Nothing noAnn noAnn flattenRuleDecls :: [LRuleDecls (GhcPass p)] -> [LRuleDecl (GhcPass p)] flattenRuleDecls decls = concatMap (rds_rules . unLoc) decls type instance XCRuleBndr (GhcPass _) = AnnTyVarBndr type instance XRuleBndrSig (GhcPass _) = AnnTyVarBndr type instance XXRuleBndr (GhcPass _) = DataConCantHappen instance (OutputableBndrId p) => Outputable (RuleDecls (GhcPass p)) where ppr (HsRules { rds_ext = ext , rds_rules = rules }) = pprWithSourceText st (text "{-# RULES") <+> vcat (punctuate semi (map ppr rules)) <+> text "#-}" where st = case ghcPass @p of GhcPs | (_, st) <- ext -> st GhcRn -> ext GhcTc -> ext instance (OutputableBndrId p) => Outputable (RuleDecl (GhcPass p)) where ppr (HsRule { rd_ext = ext , rd_name = name , rd_act = act , rd_tyvs = tys , rd_tmvs = tms , rd_lhs = lhs , rd_rhs = rhs }) = sep [pprFullRuleName st name <+> ppr act, nest 4 (pp_forall_ty tys <+> pp_forall_tm tys <+> pprExpr (unLoc lhs)), nest 6 (equals <+> pprExpr (unLoc rhs)) ] where pp_forall_ty Nothing = empty pp_forall_ty (Just qtvs) = forAllLit <+> fsep (map ppr qtvs) <> dot pp_forall_tm Nothing | null tms = empty pp_forall_tm _ = forAllLit <+> fsep (map ppr tms) <> dot st = case ghcPass @p of GhcPs | (_, st) <- ext -> st GhcRn | (_, st) <- ext -> st GhcTc | (_, st) <- ext -> st instance (OutputableBndrId p) => Outputable (RuleBndr (GhcPass p)) where ppr (RuleBndr _ name) = ppr name ppr (RuleBndrSig _ name ty) = parens (ppr name <> dcolon <> ppr ty) pprFullRuleName :: SourceText -> GenLocated a (RuleName) -> SDoc pprFullRuleName st (L _ n) = pprWithSourceText st (doubleQuotes $ ftext n) {- ************************************************************************ * * \subsection[DeprecDecl]{Deprecations} * * ************************************************************************ -} type instance XWarnings GhcPs = ((EpaLocation, EpToken "#-}"), SourceText) type instance XWarnings GhcRn = SourceText type instance XWarnings GhcTc = SourceText type instance XXWarnDecls (GhcPass _) = DataConCantHappen type instance XWarning (GhcPass _) = (NamespaceSpecifier, (EpToken "[", EpToken "]")) type instance XXWarnDecl (GhcPass _) = DataConCantHappen instance OutputableBndrId p => Outputable (WarnDecls (GhcPass p)) where ppr (Warnings ext decls) = ftext src <+> vcat (punctuate semi (map ppr decls)) <+> text "#-}" where src = case ghcPass @p of GhcPs | (_, SourceText src) <- ext -> src GhcRn | SourceText src <- ext -> src GhcTc | SourceText src <- ext -> src _ -> panic "WarnDecls" instance OutputableBndrId p => Outputable (WarnDecl (GhcPass p)) where ppr (Warning (ns_spec, _) thing txt) = ppr_category <+> ppr ns_spec <+> hsep (punctuate comma (map ppr thing)) <+> ppr txt where ppr_category = case txt of WarningTxt (Just cat) _ _ -> ppr cat _ -> empty {- ************************************************************************ * * \subsection[AnnDecl]{Annotations} * * ************************************************************************ -} type instance XHsAnnotation (GhcPass _) = (AnnPragma, SourceText) type instance XXAnnDecl (GhcPass _) = DataConCantHappen instance (OutputableBndrId p) => Outputable (AnnDecl (GhcPass p)) where ppr (HsAnnotation _ provenance expr) = hsep [text "{-#", pprAnnProvenance provenance, pprExpr (unLoc expr), text "#-}"] pprAnnProvenance :: OutputableBndrId p => AnnProvenance (GhcPass p) -> SDoc pprAnnProvenance ModuleAnnProvenance = text "ANN module" pprAnnProvenance (ValueAnnProvenance (L _ name)) = text "ANN" <+> ppr name pprAnnProvenance (TypeAnnProvenance (L _ name)) = text "ANN type" <+> ppr name {- ************************************************************************ * * \subsection[RoleAnnot]{Role annotations} * * ************************************************************************ -} type instance XCRoleAnnotDecl GhcPs = (EpToken "type", EpToken "role") type instance XCRoleAnnotDecl GhcRn = NoExtField type instance XCRoleAnnotDecl GhcTc = NoExtField type instance XXRoleAnnotDecl (GhcPass _) = DataConCantHappen instance OutputableBndr (IdP (GhcPass p)) => Outputable (RoleAnnotDecl (GhcPass p)) where ppr (RoleAnnotDecl _ ltycon roles) = text "type role" <+> pprPrefixOcc (unLoc ltycon) <+> hsep (map (pp_role . unLoc) roles) where pp_role Nothing = underscore pp_role (Just r) = ppr r roleAnnotDeclName :: RoleAnnotDecl (GhcPass p) -> IdP (GhcPass p) roleAnnotDeclName (RoleAnnotDecl _ (L _ name) _) = name {- ************************************************************************ * * \subsection{Anno instances} * * ************************************************************************ -} type instance Anno (HsDecl (GhcPass _)) = SrcSpanAnnA type instance Anno (SpliceDecl (GhcPass p)) = SrcSpanAnnA type instance Anno (TyClDecl (GhcPass p)) = SrcSpanAnnA type instance Anno (FunDep (GhcPass p)) = SrcSpanAnnA type instance Anno (FamilyResultSig (GhcPass p)) = EpAnnCO type instance Anno (FamilyDecl (GhcPass p)) = SrcSpanAnnA type instance Anno (InjectivityAnn (GhcPass p)) = EpAnnCO type instance Anno CType = SrcSpanAnnP type instance Anno (HsDerivingClause (GhcPass p)) = EpAnnCO type instance Anno (DerivClauseTys (GhcPass _)) = SrcSpanAnnC type instance Anno (StandaloneKindSig (GhcPass p)) = SrcSpanAnnA type instance Anno (ConDecl (GhcPass p)) = SrcSpanAnnA type instance Anno Bool = EpAnnCO type instance Anno [LocatedA (ConDeclField (GhcPass _))] = SrcSpanAnnL type instance Anno (FamEqn p (LocatedA (HsType p))) = SrcSpanAnnA type instance Anno (TyFamInstDecl (GhcPass p)) = SrcSpanAnnA type instance Anno (DataFamInstDecl (GhcPass p)) = SrcSpanAnnA type instance Anno (FamEqn (GhcPass p) _) = SrcSpanAnnA type instance Anno (ClsInstDecl (GhcPass p)) = SrcSpanAnnA type instance Anno (InstDecl (GhcPass p)) = SrcSpanAnnA type instance Anno (DocDecl (GhcPass p)) = SrcSpanAnnA type instance Anno (DerivDecl (GhcPass p)) = SrcSpanAnnA type instance Anno OverlapMode = SrcSpanAnnP type instance Anno (DerivStrategy (GhcPass p)) = EpAnnCO type instance Anno (DefaultDecl (GhcPass p)) = SrcSpanAnnA type instance Anno (ForeignDecl (GhcPass p)) = SrcSpanAnnA type instance Anno (RuleDecls (GhcPass p)) = SrcSpanAnnA type instance Anno (RuleDecl (GhcPass p)) = SrcSpanAnnA type instance Anno (SourceText, RuleName) = EpAnnCO type instance Anno (RuleBndr (GhcPass p)) = EpAnnCO type instance Anno (WarnDecls (GhcPass p)) = SrcSpanAnnA type instance Anno (WarnDecl (GhcPass p)) = SrcSpanAnnA type instance Anno (AnnDecl (GhcPass p)) = SrcSpanAnnA type instance Anno (RoleAnnotDecl (GhcPass p)) = SrcSpanAnnA type instance Anno (Maybe Role) = EpAnnCO type instance Anno CCallConv = EpaLocation type instance Anno Safety = EpaLocation type instance Anno CExportSpec = EpaLocation ghc-lib-parser-9.12.2.20250421/compiler/GHC/Hs/Doc.hs0000644000000000000000000002636007346545000017330 0ustar0000000000000000-- | Types and functions for raw and lexed docstrings. {-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE UndecidableInstances #-} {-# LANGUAGE StandaloneDeriving #-} module GHC.Hs.Doc ( HsDoc , WithHsDocIdentifiers(..) , hsDocIds , LHsDoc , pprHsDocDebug , pprWithDoc , pprMaybeWithDoc , module GHC.Hs.DocString , ExtractedTHDocs(..) , DocStructureItem(..) , DocStructure , Docs(..) , emptyDocs ) where import GHC.Prelude import GHC.Utils.Binary import GHC.Types.Name import GHC.Utils.Outputable as Outputable hiding ((<>)) import GHC.Types.SrcLoc import qualified GHC.Data.EnumSet as EnumSet import GHC.Data.EnumSet (EnumSet) import GHC.Types.Avail import GHC.Types.Name.Set import GHC.Driver.Flags import Control.DeepSeq import Data.Data import Data.IntMap (IntMap) import qualified Data.IntMap as IntMap import Data.Map (Map) import qualified Data.Map as Map import Data.List.NonEmpty (NonEmpty(..)) import GHC.LanguageExtensions.Type import qualified GHC.Utils.Outputable as O import GHC.Hs.Extension import GHC.Types.Unique.Map import Data.List (sortBy) import GHC.Hs.DocString import Language.Haskell.Syntax.Extension import Language.Haskell.Syntax.Module.Name -- | A docstring with the (probable) identifiers found in it. type HsDoc = WithHsDocIdentifiers HsDocString -- | Annotate a value with the probable identifiers found in it -- These will be used by haddock to generate links. -- -- The identifiers are bundled along with their location in the source file. -- This is useful for tooling to know exactly where they originate. -- -- This type is currently used in two places - for regular documentation comments, -- with 'a' set to 'HsDocString', and for adding identifier information to -- warnings, where 'a' is 'StringLiteral' data WithHsDocIdentifiers a pass = WithHsDocIdentifiers { hsDocString :: !a , hsDocIdentifiers :: ![Located (IdP pass)] } deriving instance (Data pass, Data (IdP pass), Data a) => Data (WithHsDocIdentifiers a pass) deriving instance (Eq (IdP pass), Eq a) => Eq (WithHsDocIdentifiers a pass) instance (NFData (IdP pass), NFData a) => NFData (WithHsDocIdentifiers a pass) where rnf (WithHsDocIdentifiers d i) = rnf d `seq` rnf i -- | For compatibility with the existing @-ddump-parsed' output, we only show -- the docstring. -- -- Use 'pprHsDoc' to show `HsDoc`'s internals. instance Outputable a => Outputable (WithHsDocIdentifiers a pass) where ppr (WithHsDocIdentifiers s _ids) = ppr s instance Binary a => Binary (WithHsDocIdentifiers a GhcRn) where put_ bh (WithHsDocIdentifiers s ids) = do put_ bh s put_ bh $ BinLocated <$> ids get bh = liftA2 WithHsDocIdentifiers (get bh) (fmap unBinLocated <$> get bh) -- | Extract a mapping from the lexed identifiers to the names they may -- correspond to. hsDocIds :: WithHsDocIdentifiers a GhcRn -> NameSet hsDocIds (WithHsDocIdentifiers _ ids) = mkNameSet $ map unLoc ids -- | Pretty print a thing with its doc -- The docstring will include the comment decorators '-- |', '{-|' etc -- and will come either before or after depending on how it was written -- i.e it will come after the thing if it is a '-- ^' or '{-^' and before -- otherwise. pprWithDoc :: LHsDoc name -> SDoc -> SDoc pprWithDoc doc = pprWithDocString (hsDocString $ unLoc doc) -- | See 'pprWithHsDoc' pprMaybeWithDoc :: Maybe (LHsDoc name) -> SDoc -> SDoc pprMaybeWithDoc Nothing = id pprMaybeWithDoc (Just doc) = pprWithDoc doc -- | Print a doc with its identifiers, useful for debugging pprHsDocDebug :: (Outputable (IdP name)) => HsDoc name -> SDoc pprHsDocDebug (WithHsDocIdentifiers s ids) = vcat [ text "text:" $$ nest 2 (pprHsDocString s) , text "identifiers:" $$ nest 2 (vcat (map pprLocatedAlways ids)) ] type LHsDoc pass = Located (HsDoc pass) -- | A simplified version of 'HsImpExp.IE'. data DocStructureItem = DsiSectionHeading !Int !(HsDoc GhcRn) | DsiDocChunk !(HsDoc GhcRn) | DsiNamedChunkRef !String | DsiExports !DetOrdAvails | DsiModExport !(NonEmpty ModuleName) -- ^ We might re-export avails from multiple -- modules with a single export declaration. E.g. -- when we have -- -- > module M (module X) where -- > import R0 as X -- > import R1 as X -- -- Invariant: This list of ModuleNames must be -- sorted to guarantee interface file determinism. !DetOrdAvails -- ^ Invariant: This list of Avails must be sorted -- to guarantee interface file determinism. instance Binary DocStructureItem where put_ bh = \case DsiSectionHeading level doc -> do putByte bh 0 put_ bh level put_ bh doc DsiDocChunk doc -> do putByte bh 1 put_ bh doc DsiNamedChunkRef name -> do putByte bh 2 put_ bh name DsiExports avails -> do putByte bh 3 put_ bh avails DsiModExport mod_names avails -> do putByte bh 4 put_ bh mod_names put_ bh avails get bh = do tag <- getByte bh case tag of 0 -> DsiSectionHeading <$> get bh <*> get bh 1 -> DsiDocChunk <$> get bh 2 -> DsiNamedChunkRef <$> get bh 3 -> DsiExports <$> get bh 4 -> DsiModExport <$> get bh <*> get bh _ -> fail "instance Binary DocStructureItem: Invalid tag" instance Outputable DocStructureItem where ppr = \case DsiSectionHeading level doc -> vcat [ text "section heading, level" <+> ppr level O.<> colon , nest 2 (pprHsDocDebug doc) ] DsiDocChunk doc -> vcat [ text "documentation chunk:" , nest 2 (pprHsDocDebug doc) ] DsiNamedChunkRef name -> text "reference to named chunk:" <+> text name DsiExports avails -> text "avails:" $$ nest 2 (ppr avails) DsiModExport mod_names avails -> text "re-exported module(s):" <+> ppr mod_names $$ nest 2 (ppr avails) instance NFData DocStructureItem where rnf = \case DsiSectionHeading level doc -> rnf level `seq` rnf doc DsiDocChunk doc -> rnf doc DsiNamedChunkRef name -> rnf name DsiExports avails -> rnf avails DsiModExport mod_names avails -> rnf mod_names `seq` rnf avails type DocStructure = [DocStructureItem] data Docs = Docs { docs_mod_hdr :: Maybe (HsDoc GhcRn) -- ^ Module header. , docs_exports :: UniqMap Name (HsDoc GhcRn) -- ^ Docs attached to module exports. , docs_decls :: UniqMap Name [HsDoc GhcRn] -- ^ Docs for declarations: functions, data types, instances, methods etc. -- A list because sometimes subsequent haddock comments can be combined into one , docs_args :: UniqMap Name (IntMap (HsDoc GhcRn)) -- ^ Docs for arguments. E.g. function arguments, method arguments. , docs_structure :: DocStructure , docs_named_chunks :: Map String (HsDoc GhcRn) -- ^ Map from chunk name to content. -- -- This map will be empty unless we have an explicit export list from which -- we can reference the chunks. , docs_haddock_opts :: Maybe String -- ^ Haddock options from @OPTIONS_HADDOCK@ or from @-haddock-opts@. , docs_language :: Maybe Language -- ^ The 'Language' used in the module, for example 'Haskell2010'. , docs_extensions :: EnumSet Extension -- ^ The full set of language extensions used in the module. } instance NFData Docs where rnf (Docs mod_hdr exps decls args structure named_chunks haddock_opts language extentions) = rnf mod_hdr `seq` rnf exps `seq` rnf decls `seq` rnf args `seq` rnf structure `seq` rnf named_chunks `seq` rnf haddock_opts `seq` rnf language `seq` rnf extentions `seq` () instance Binary Docs where put_ bh docs = do put_ bh (docs_mod_hdr docs) put_ bh (sortBy (\a b -> (fst a) `stableNameCmp` fst b) $ nonDetUniqMapToList $ docs_exports docs) put_ bh (sortBy (\a b -> (fst a) `stableNameCmp` fst b) $ nonDetUniqMapToList $ docs_decls docs) put_ bh (sortBy (\a b -> (fst a) `stableNameCmp` fst b) $ nonDetUniqMapToList $ docs_args docs) put_ bh (docs_structure docs) put_ bh (Map.toList $ docs_named_chunks docs) put_ bh (docs_haddock_opts docs) put_ bh (docs_language docs) put_ bh (docs_extensions docs) get bh = do mod_hdr <- get bh exports <- listToUniqMap <$> get bh decls <- listToUniqMap <$> get bh args <- listToUniqMap <$> get bh structure <- get bh named_chunks <- Map.fromList <$> get bh haddock_opts <- get bh language <- get bh exts <- get bh pure Docs { docs_mod_hdr = mod_hdr , docs_exports = exports , docs_decls = decls , docs_args = args , docs_structure = structure , docs_named_chunks = named_chunks , docs_haddock_opts = haddock_opts , docs_language = language , docs_extensions = exts } instance Outputable Docs where ppr docs = vcat [ pprField (pprMaybe pprHsDocDebug) "module header" docs_mod_hdr , pprField (ppr . fmap pprHsDocDebug) "export docs" docs_exports , pprField (ppr . fmap (ppr . map pprHsDocDebug)) "declaration docs" docs_decls , pprField (ppr . fmap (pprIntMap ppr pprHsDocDebug)) "arg docs" docs_args , pprField (vcat . map ppr) "documentation structure" docs_structure , pprField (pprMap (doubleQuotes . text) pprHsDocDebug) "named chunks" docs_named_chunks , pprField pprMbString "haddock options" docs_haddock_opts , pprField ppr "language" docs_language , pprField (vcat . map ppr . EnumSet.toList) "language extensions" docs_extensions ] where pprField :: (a -> SDoc) -> String -> (Docs -> a) -> SDoc pprField ppr' heading lbl = text heading O.<> colon $$ nest 2 (ppr' (lbl docs)) pprMap pprKey pprVal m = vcat $ flip map (Map.toList m) $ \(k, v) -> pprKey k O.<> colon $$ nest 2 (pprVal v) pprIntMap pprKey pprVal m = vcat $ flip map (IntMap.toList m) $ \(k, v) -> pprKey k O.<> colon $$ nest 2 (pprVal v) pprMbString Nothing = empty pprMbString (Just s) = text s pprMaybe ppr' = \case Nothing -> text "Nothing" Just x -> text "Just" <+> ppr' x emptyDocs :: Docs emptyDocs = Docs { docs_mod_hdr = Nothing , docs_exports = emptyUniqMap , docs_decls = emptyUniqMap , docs_args = emptyUniqMap , docs_structure = [] , docs_named_chunks = Map.empty , docs_haddock_opts = Nothing , docs_language = Nothing , docs_extensions = EnumSet.empty } -- | Maps of docs that were added via Template Haskell's @putDoc@. data ExtractedTHDocs = ExtractedTHDocs { ethd_mod_header :: Maybe (HsDoc GhcRn) -- ^ The added module header documentation, if it exists. , ethd_decl_docs :: UniqMap Name (HsDoc GhcRn) -- ^ The documentation added to declarations. , ethd_arg_docs :: UniqMap Name (IntMap (HsDoc GhcRn)) -- ^ The documentation added to function arguments. , ethd_inst_docs :: UniqMap Name (HsDoc GhcRn) -- ^ The documentation added to class and family instances. } ghc-lib-parser-9.12.2.20250421/compiler/GHC/Hs/DocString.hs0000644000000000000000000001715307346545000020517 0ustar0000000000000000-- | An exactprintable structure for docstrings {-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE DerivingStrategies #-} {-# LANGUAGE GeneralizedNewtypeDeriving #-} module GHC.Hs.DocString ( LHsDocString , HsDocString(..) , HsDocStringDecorator(..) , HsDocStringChunk(..) , LHsDocStringChunk , isEmptyDocString , unpackHDSC , mkHsDocStringChunk , mkHsDocStringChunkUtf8ByteString , pprHsDocString , pprHsDocStrings , mkGeneratedHsDocString , docStringChunks , renderHsDocString , renderHsDocStrings , exactPrintHsDocString , pprWithDocString , printDecorator ) where import GHC.Prelude import GHC.Utils.Binary import GHC.Utils.Encoding import GHC.Utils.Outputable as Outputable hiding ((<>)) import GHC.Types.SrcLoc import Control.DeepSeq import Data.ByteString (ByteString) import qualified Data.ByteString as BS import Data.Data import Data.List.NonEmpty (NonEmpty(..)) import Data.List (intercalate) type LHsDocString = Located HsDocString -- | Haskell Documentation String -- -- Rich structure to support exact printing -- The location around each chunk doesn't include the decorators data HsDocString = MultiLineDocString !HsDocStringDecorator !(NonEmpty LHsDocStringChunk) -- ^ The first chunk is preceded by "-- " and each following chunk is preceded by "--" -- Example: -- | This is a docstring for 'foo'. It is the line with the decorator '|' and is always included -- -- This continues that docstring and is the second element in the NonEmpty list -- foo :: a -> a | NestedDocString !HsDocStringDecorator LHsDocStringChunk -- ^ The docstring is preceded by "{-" and followed by "-}" -- The chunk contains balanced pairs of '{-' and '-}' | GeneratedDocString HsDocStringChunk -- ^ A docstring generated either internally or via TH -- Pretty printed with the '-- |' decorator -- This is because it may contain unbalanced pairs of '{-' and '-}' and -- not form a valid 'NestedDocString' deriving (Eq, Data, Show) instance Outputable HsDocString where ppr = text . renderHsDocString instance NFData HsDocString where rnf (MultiLineDocString a b) = rnf a `seq` rnf b rnf (NestedDocString a b) = rnf a `seq` rnf b rnf (GeneratedDocString a) = rnf a -- | Annotate a pretty printed thing with its doc -- The docstring comes after if is 'HsDocStringPrevious' -- Otherwise it comes before. -- Note - we convert MultiLineDocString HsDocStringPrevious to HsDocStringNext -- because we can't control if something else will be pretty printed on the same line pprWithDocString :: HsDocString -> SDoc -> SDoc pprWithDocString (MultiLineDocString HsDocStringPrevious ds) sd = pprWithDocString (MultiLineDocString HsDocStringNext ds) sd pprWithDocString doc@(NestedDocString HsDocStringPrevious _) sd = sd <+> pprHsDocString doc pprWithDocString doc sd = pprHsDocString doc $+$ sd instance Binary HsDocString where put_ bh x = case x of MultiLineDocString dec xs -> do putByte bh 0 put_ bh dec put_ bh $ BinLocated <$> xs NestedDocString dec x -> do putByte bh 1 put_ bh dec put_ bh $ BinLocated x GeneratedDocString x -> do putByte bh 2 put_ bh x get bh = do tag <- getByte bh case tag of 0 -> MultiLineDocString <$> get bh <*> (fmap unBinLocated <$> get bh) 1 -> NestedDocString <$> get bh <*> (unBinLocated <$> get bh) 2 -> GeneratedDocString <$> get bh t -> fail $ "HsDocString: invalid tag " ++ show t data HsDocStringDecorator = HsDocStringNext -- ^ '|' is the decorator | HsDocStringPrevious -- ^ '^' is the decorator | HsDocStringNamed !String -- ^ '$' is the decorator | HsDocStringGroup !Int -- ^ The decorator is the given number of '*'s deriving (Eq, Ord, Show, Data) instance Outputable HsDocStringDecorator where ppr = text . printDecorator instance NFData HsDocStringDecorator where rnf HsDocStringNext = () rnf HsDocStringPrevious = () rnf (HsDocStringNamed x) = rnf x rnf (HsDocStringGroup x) = rnf x printDecorator :: HsDocStringDecorator -> String printDecorator HsDocStringNext = "|" printDecorator HsDocStringPrevious = "^" printDecorator (HsDocStringNamed n) = '$':n printDecorator (HsDocStringGroup n) = replicate n '*' instance Binary HsDocStringDecorator where put_ bh x = case x of HsDocStringNext -> putByte bh 0 HsDocStringPrevious -> putByte bh 1 HsDocStringNamed n -> putByte bh 2 >> put_ bh n HsDocStringGroup n -> putByte bh 3 >> put_ bh n get bh = do tag <- getByte bh case tag of 0 -> pure HsDocStringNext 1 -> pure HsDocStringPrevious 2 -> HsDocStringNamed <$> get bh 3 -> HsDocStringGroup <$> get bh t -> fail $ "HsDocStringDecorator: invalid tag " ++ show t type LHsDocStringChunk = Located HsDocStringChunk -- | A contiguous chunk of documentation newtype HsDocStringChunk = HsDocStringChunk ByteString deriving stock (Eq,Ord,Data, Show) deriving newtype (NFData) instance Binary HsDocStringChunk where put_ bh (HsDocStringChunk bs) = put_ bh bs get bh = HsDocStringChunk <$> get bh instance Outputable HsDocStringChunk where ppr = text . unpackHDSC mkHsDocStringChunk :: String -> HsDocStringChunk mkHsDocStringChunk s = HsDocStringChunk (utf8EncodeByteString s) -- | Create a 'HsDocString' from a UTF8-encoded 'ByteString'. mkHsDocStringChunkUtf8ByteString :: ByteString -> HsDocStringChunk mkHsDocStringChunkUtf8ByteString = HsDocStringChunk unpackHDSC :: HsDocStringChunk -> String unpackHDSC (HsDocStringChunk bs) = utf8DecodeByteString bs nullHDSC :: HsDocStringChunk -> Bool nullHDSC (HsDocStringChunk bs) = BS.null bs mkGeneratedHsDocString :: String -> HsDocString mkGeneratedHsDocString = GeneratedDocString . mkHsDocStringChunk isEmptyDocString :: HsDocString -> Bool isEmptyDocString (MultiLineDocString _ xs) = all (nullHDSC . unLoc) xs isEmptyDocString (NestedDocString _ s) = nullHDSC $ unLoc s isEmptyDocString (GeneratedDocString x) = nullHDSC x docStringChunks :: HsDocString -> [LHsDocStringChunk] docStringChunks (MultiLineDocString _ (x:|xs)) = x:xs docStringChunks (NestedDocString _ x) = [x] docStringChunks (GeneratedDocString x) = [L (UnhelpfulSpan UnhelpfulGenerated) x] -- | Pretty print with decorators, exactly as the user wrote it pprHsDocString :: HsDocString -> SDoc pprHsDocString = text . exactPrintHsDocString pprHsDocStrings :: [HsDocString] -> SDoc pprHsDocStrings = text . intercalate "\n\n" . map exactPrintHsDocString -- | Pretty print with decorators, exactly as the user wrote it exactPrintHsDocString :: HsDocString -> String exactPrintHsDocString (MultiLineDocString dec (x :| xs)) = unlines' $ ("-- " ++ printDecorator dec ++ unpackHDSC (unLoc x)) : map (\x -> "--" ++ unpackHDSC (unLoc x)) xs exactPrintHsDocString (NestedDocString dec (L _ s)) = "{-" ++ printDecorator dec ++ unpackHDSC s ++ "-}" exactPrintHsDocString (GeneratedDocString x) = case lines (unpackHDSC x) of [] -> "" (x:xs) -> unlines' $ ( "-- |" ++ x) : map (\y -> "--"++y) xs -- | Just get the docstring, without any decorators renderHsDocString :: HsDocString -> String renderHsDocString (MultiLineDocString _ (x :| xs)) = unlines' $ map (unpackHDSC . unLoc) (x:xs) renderHsDocString (NestedDocString _ ds) = unpackHDSC $ unLoc ds renderHsDocString (GeneratedDocString x) = unpackHDSC x -- | Don't add a newline to a single string unlines' :: [String] -> String unlines' = intercalate "\n" -- | Just get the docstring, without any decorators -- Separates docstrings using "\n\n", which is how haddock likes to render them renderHsDocStrings :: [HsDocString] -> String renderHsDocStrings = intercalate "\n\n" . map renderHsDocString ghc-lib-parser-9.12.2.20250421/compiler/GHC/Hs/Dump.hs0000644000000000000000000004677607346545000017545 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 -} {-# LANGUAGE RankNTypes #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE DataKinds #-} -- | Contains a debug function to dump parts of the GHC.Hs AST. It uses a syb -- traversal which falls back to displaying based on the constructor name, so -- can be used to dump anything having a @Data.Data@ instance. module GHC.Hs.Dump ( -- * Dumping ASTs showAstData, showAstDataFull, BlankSrcSpan(..), BlankEpAnnotations(..), ) where import GHC.Prelude import GHC.Hs import GHC.Core.DataCon import GHC.Data.Bag import GHC.Data.FastString import GHC.Types.Name.Set import GHC.Types.Name import GHC.Types.SrcLoc import GHC.Types.Var import GHC.Types.SourceText import GHC.Utils.Outputable import Data.Data hiding (Fixity) import qualified Data.ByteString as B import GHC.TypeLits -- | Should source spans be removed from output. data BlankSrcSpan = BlankSrcSpan | BlankSrcSpanFile | NoBlankSrcSpan deriving (Eq,Show) -- | Should EpAnnotations be removed from output. data BlankEpAnnotations = BlankEpAnnotations | NoBlankEpAnnotations deriving (Eq,Show) -- | Show the full AST as the compiler sees it. showAstDataFull :: Data a => a -> SDoc showAstDataFull = showAstData NoBlankSrcSpan NoBlankEpAnnotations -- | Show a GHC syntax tree. This parameterised because it is also used for -- comparing ASTs in ppr roundtripping tests, where the SrcSpan's are blanked -- out, to avoid comparing locations, only structure showAstData :: Data a => BlankSrcSpan -> BlankEpAnnotations -> a -> SDoc showAstData bs ba a0 = blankLine $$ showAstData' a0 where showAstData' :: Data a => a -> SDoc showAstData' = generic `ext1Q` list `extQ` list_epaLocation `extQ` list_epTokenOpenP `extQ` list_epTokenCloseP `extQ` string `extQ` fastString `extQ` srcSpan `extQ` realSrcSpan `extQ` annotationModule `extQ` annotationGrhsAnn `extQ` annotationAnnList `extQ` annotationAnnListWhere `extQ` annotationAnnListCommas `extQ` annotationAnnListIE `extQ` annotationEpAnnImportDecl `extQ` annotationNoEpAnns `extQ` annotationExprBracket `extQ` annotationTypedBracket `extQ` epTokenOC `extQ` epTokenCC `extQ` epTokenInstance `extQ` epTokenForall `extQ` annParen `extQ` annClassDecl `extQ` annSynDecl `extQ` annDataDefn `extQ` annFamilyDecl `extQ` annClsInstDecl `extQ` lit `extQ` litr `extQ` litt `extQ` sourceText `extQ` deltaPos `extQ` epaLocation `extQ` maybe_epaLocation `extQ` bytestring `extQ` name `extQ` occName `extQ` moduleName `extQ` var `extQ` dataCon `extQ` bagName `extQ` bagRdrName `extQ` bagVar `extQ` nameSet `ext2Q` located `extQ` srcSpanAnnA `extQ` srcSpanAnnL `extQ` srcSpanAnnP `extQ` srcSpanAnnC `extQ` srcSpanAnnN where generic :: Data a => a -> SDoc generic t = parens $ text (showConstr (toConstr t)) $$ vcat (gmapQ showAstData' t) string :: String -> SDoc string = text . normalize_newlines . show fastString :: FastString -> SDoc fastString s = braces $ text "FastString:" <+> text (normalize_newlines . show $ s) bytestring :: B.ByteString -> SDoc bytestring = text . normalize_newlines . show list_epaLocation :: [EpaLocation] -> SDoc list_epaLocation ls = case ba of BlankEpAnnotations -> parens $ text "blanked:" <+> text "[EpaLocation]" NoBlankEpAnnotations -> list ls list_epTokenOpenP :: [EpToken "("] -> SDoc list_epTokenOpenP ls = case ba of BlankEpAnnotations -> parens $ text "blanked:" <+> text "[EpToken \"(\"]" NoBlankEpAnnotations -> list ls list_epTokenCloseP :: [EpToken ")"] -> SDoc list_epTokenCloseP ls = case ba of BlankEpAnnotations -> parens $ text "blanked:" <+> text "[EpToken \"(\"]" NoBlankEpAnnotations -> list ls list [] = brackets empty list [x] = brackets (showAstData' x) list (x1 : x2 : xs) = (text "[" <> showAstData' x1) $$ go x2 xs where go y [] = text "," <> showAstData' y <> text "]" go y1 (y2 : ys) = (text "," <> showAstData' y1) $$ go y2 ys -- Eliminate word-size dependence lit :: HsLit GhcPs -> SDoc lit (HsWordPrim s x) = numericLit "HsWord{64}Prim" x s lit (HsWord64Prim s x) = numericLit "HsWord{64}Prim" x s lit (HsIntPrim s x) = numericLit "HsInt{64}Prim" x s lit (HsInt64Prim s x) = numericLit "HsInt{64}Prim" x s lit l = generic l litr :: HsLit GhcRn -> SDoc litr (HsWordPrim s x) = numericLit "HsWord{64}Prim" x s litr (HsWord64Prim s x) = numericLit "HsWord{64}Prim" x s litr (HsIntPrim s x) = numericLit "HsInt{64}Prim" x s litr (HsInt64Prim s x) = numericLit "HsInt{64}Prim" x s litr l = generic l litt :: HsLit GhcTc -> SDoc litt (HsWordPrim s x) = numericLit "HsWord{64}Prim" x s litt (HsWord64Prim s x) = numericLit "HsWord{64}Prim" x s litt (HsIntPrim s x) = numericLit "HsInt{64}Prim" x s litt (HsInt64Prim s x) = numericLit "HsInt{64}Prim" x s litt l = generic l numericLit :: String -> Integer -> SourceText -> SDoc numericLit tag x s = braces $ hsep [ text tag , generic x , generic s ] sourceText :: SourceText -> SDoc sourceText NoSourceText = case bs of BlankSrcSpan -> parens $ text "SourceText" <+> text "blanked" _ -> parens $ text "NoSourceText" sourceText (SourceText src) = case bs of BlankSrcSpan -> parens $ text "SourceText" <+> text "blanked" _ -> parens $ text "SourceText" <+> ftext src epaLocation :: EpaLocation -> SDoc epaLocation (EpaSpan s) = parens $ text "EpaSpan" <+> srcSpan s epaLocation (EpaDelta s d cs) = case ba of NoBlankEpAnnotations -> parens $ text "EpaDelta" <+> srcSpan s <+> deltaPos d <+> showAstData' cs BlankEpAnnotations -> parens $ text "EpaDelta" <+> srcSpan s <+> deltaPos d <+> text "blanked" maybe_epaLocation :: Maybe EpaLocation -> SDoc maybe_epaLocation ml = case ba of NoBlankEpAnnotations -> case ml of Nothing -> parens $ text "Nothing" Just l -> parens (text "Just" $$ showAstData' l) BlankEpAnnotations -> parens $ text "Maybe EpaLocation:" <+> text "blanked" deltaPos :: DeltaPos -> SDoc deltaPos (SameLine c) = parens $ text "SameLine" <+> ppr c deltaPos (DifferentLine l c) = parens $ text "DifferentLine" <+> ppr l <+> ppr c name :: Name -> SDoc name nm = braces $ text "Name:" <+> ppr nm occName n = braces $ text "OccName:" <+> ftext (occNameFS n) moduleName :: ModuleName -> SDoc moduleName m = braces $ text "ModuleName:" <+> ppr m srcSpan :: SrcSpan -> SDoc srcSpan ss = case bs of BlankSrcSpan -> text "{ ss }" NoBlankSrcSpan -> braces $ char ' ' <> (ppr ss) <> char ' ' BlankSrcSpanFile -> braces $ char ' ' <> (pprUserSpan False ss) <> char ' ' realSrcSpan :: RealSrcSpan -> SDoc realSrcSpan ss = case bs of BlankSrcSpan -> text "{ ss }" NoBlankSrcSpan -> braces $ char ' ' <> (ppr ss) <> char ' ' BlankSrcSpanFile -> braces $ char ' ' <> (pprUserRealSpan False ss) <> char ' ' annParen :: AnnParen -> SDoc annParen ap = case ba of BlankEpAnnotations -> parens $ text "blanked:" <+> text "AnnParen" NoBlankEpAnnotations -> parens (case ap of (AnnParens o c) -> text "AnnParens" $$ vcat [showAstData' o, showAstData' c] (AnnParensHash o c) -> text "AnnParensHash" $$ vcat [showAstData' o, showAstData' c] (AnnParensSquare o c) -> text "AnnParensSquare" $$ vcat [showAstData' o, showAstData' c] ) annClassDecl :: AnnClassDecl -> SDoc annClassDecl (AnnClassDecl c ops cps v w oc cc s) = case ba of BlankEpAnnotations -> parens $ text "blanked:" <+> text "AnnClassDecl" NoBlankEpAnnotations -> parens $ text "AnnClassDecl" $$ vcat [showAstData' c, showAstData' ops, showAstData' cps, showAstData' v, showAstData' w, showAstData' oc, showAstData' cc, showAstData' s] annSynDecl :: AnnSynDecl -> SDoc annSynDecl (AnnSynDecl ops cps t e) = case ba of BlankEpAnnotations -> parens $ text "blanked:" <+> text "AnnSynDecl" NoBlankEpAnnotations -> parens $ text "AnnSynDecl" $$ vcat [showAstData' ops, showAstData' cps, showAstData' t, showAstData' e] annDataDefn :: AnnDataDefn -> SDoc annDataDefn (AnnDataDefn a b c d e f g h i j k) = case ba of BlankEpAnnotations -> parens $ text "blanked:" <+> text "AnnDataDefn" NoBlankEpAnnotations -> parens $ text "AnnDataDefn" $$ vcat [showAstData' a, showAstData' b, showAstData' c, showAstData' d, showAstData' e, showAstData' f, showAstData' g, showAstData' h, showAstData' i, showAstData' j, showAstData' k] annFamilyDecl :: AnnFamilyDecl -> SDoc annFamilyDecl (AnnFamilyDecl a b c d e f g h i j k l) = case ba of BlankEpAnnotations -> parens $ text "blanked:" <+> text "AnnFamilyDecl" NoBlankEpAnnotations -> parens $ text "AnnFamilyDecl" $$ vcat [showAstData' a, showAstData' b, showAstData' c, showAstData' d, showAstData' e, showAstData' f, showAstData' g, showAstData' h, showAstData' i, showAstData' j, showAstData' k, showAstData' l] annClsInstDecl :: AnnClsInstDecl -> SDoc annClsInstDecl (AnnClsInstDecl a b c d e) = case ba of BlankEpAnnotations -> parens $ text "blanked:" <+> text "AnnFamilyDecl" NoBlankEpAnnotations -> parens $ text "AnnClsInstDecl" $$ vcat [showAstData' a, showAstData' b, showAstData' c, showAstData' d, showAstData' e] annotationExprBracket :: BracketAnn (EpUniToken "[|" "⟦") (EpToken "[e|") -> SDoc annotationExprBracket = annotationBracket annotationTypedBracket :: BracketAnn (EpToken "[||") (EpToken "[e||") -> SDoc annotationTypedBracket = annotationBracket annotationBracket ::forall n h .(Data n, Data h, Typeable n, Typeable h) => BracketAnn n h -> SDoc annotationBracket a = case ba of BlankEpAnnotations -> parens $ text "blanked:" <+> text "BracketAnn" NoBlankEpAnnotations -> parens $ case a of BracketNoE t -> text "BracketNoE" <+> showAstData' t BracketHasE t -> text "BracketHasE" <+> showAstData' t epTokenOC :: EpToken "{" -> SDoc epTokenOC = epToken' epTokenCC :: EpToken "}" -> SDoc epTokenCC = epToken' epTokenInstance :: EpToken "instance" -> SDoc epTokenInstance = epToken' epTokenForall :: TokForall -> SDoc epTokenForall = epUniToken' epToken' :: KnownSymbol sym => EpToken sym -> SDoc epToken' (EpTok s) = case ba of BlankEpAnnotations -> parens $ text "blanked:" <+> text "EpToken" NoBlankEpAnnotations -> parens $ text "EpTok" <+> epaLocation s epToken' NoEpTok = case ba of BlankEpAnnotations -> parens $ text "blanked:" <+> text "EpToken" NoBlankEpAnnotations -> parens $ text "NoEpTok" epUniToken' :: EpUniToken sym1 sym2 -> SDoc epUniToken' (EpUniTok s f) = case ba of BlankEpAnnotations -> parens $ text "blanked:" <+> text "EpUniToken" NoBlankEpAnnotations -> parens $ text "EpUniTok" <+> epaLocation s <+> ppr f epUniToken' NoEpUniTok = case ba of BlankEpAnnotations -> parens $ text "blanked:" <+> text "EpUniToken" NoBlankEpAnnotations -> parens $ text "NoEpUniTok" var :: Var -> SDoc var v = braces $ text "Var:" <+> ppr v dataCon :: DataCon -> SDoc dataCon c = braces $ text "DataCon:" <+> ppr c bagRdrName:: Bag (LocatedA (HsBind GhcPs)) -> SDoc bagRdrName bg = braces $ text "Bag(LocatedA (HsBind GhcPs)):" $$ (list . bagToList $ bg) bagName :: Bag (LocatedA (HsBind GhcRn)) -> SDoc bagName bg = braces $ text "Bag(LocatedA (HsBind Name)):" $$ (list . bagToList $ bg) bagVar :: Bag (LocatedA (HsBind GhcTc)) -> SDoc bagVar bg = braces $ text "Bag(LocatedA (HsBind Var)):" $$ (list . bagToList $ bg) nameSet ns = braces $ text "NameSet:" $$ (list . nameSetElemsStable $ ns) located :: (Data a, Data b) => GenLocated a b -> SDoc located (L ss a) = parens (text "L" $$ vcat [showAstData' ss, showAstData' a]) -- ------------------------- annotationModule :: EpAnn AnnsModule -> SDoc annotationModule = annotation' (text "EpAnn AnnsModule") annotationGrhsAnn :: EpAnn GrhsAnn -> SDoc annotationGrhsAnn = annotation' (text "EpAnn GrhsAnn") annotationAnnList :: EpAnn (AnnList ()) -> SDoc annotationAnnList = annotation' (text "EpAnn (AnnList ())") annotationAnnListWhere :: EpAnn (AnnList (EpToken "where")) -> SDoc annotationAnnListWhere = annotation' (text "EpAnn (AnnList (EpToken \"where\"))") annotationAnnListCommas :: EpAnn (AnnList [EpToken ","]) -> SDoc annotationAnnListCommas = annotation' (text "EpAnn (AnnList [EpToken \",\"])") annotationAnnListIE :: EpAnn (AnnList (EpToken "hiding", [EpToken ","])) -> SDoc annotationAnnListIE = annotation' (text "EpAnn (AnnList (EpToken \"hiding\", [EpToken \",\"]))") annotationEpAnnImportDecl :: EpAnn EpAnnImportDecl -> SDoc annotationEpAnnImportDecl = annotation' (text "EpAnn EpAnnImportDecl") annotationNoEpAnns :: EpAnn NoEpAnns -> SDoc annotationNoEpAnns = annotation' (text "EpAnn NoEpAnns") annotation' :: forall a .(Data a, Typeable a) => SDoc -> EpAnn a -> SDoc annotation' tag anns = case ba of BlankEpAnnotations -> parens (text "blanked:" <+> tag) NoBlankEpAnnotations -> parens $ text (showConstr (toConstr anns)) $$ vcat (gmapQ showAstData' anns) -- ------------------------- srcSpanAnnA :: EpAnn AnnListItem -> SDoc srcSpanAnnA = locatedAnn'' (text "SrcSpanAnnA") srcSpanAnnL :: EpAnn (AnnList ()) -> SDoc srcSpanAnnL = locatedAnn'' (text "SrcSpanAnnL") srcSpanAnnP :: EpAnn AnnPragma -> SDoc srcSpanAnnP = locatedAnn'' (text "SrcSpanAnnP") srcSpanAnnC :: EpAnn AnnContext -> SDoc srcSpanAnnC = locatedAnn'' (text "SrcSpanAnnC") srcSpanAnnN :: EpAnn NameAnn -> SDoc srcSpanAnnN = locatedAnn'' (text "SrcSpanAnnN") locatedAnn'' :: forall a. (Typeable a, Data a) => SDoc -> EpAnn a -> SDoc locatedAnn'' tag ss = parens $ case cast ss of Just (ann :: EpAnn a) -> case ba of BlankEpAnnotations -> parens (text "blanked:" <+> tag) NoBlankEpAnnotations -> text (showConstr (toConstr ann)) $$ vcat (gmapQ showAstData' ann) Nothing -> text "locatedAnn:unmatched" <+> tag <+> (parens $ text (showConstr (toConstr ss))) normalize_newlines :: String -> String normalize_newlines ('\\':'r':'\\':'n':xs) = '\\':'n':normalize_newlines xs normalize_newlines (x:xs) = x:normalize_newlines xs normalize_newlines [] = [] {- ************************************************************************ * * * Copied from syb * * ************************************************************************ -} -- | The type constructor for queries newtype Q q x = Q { unQ :: x -> q } -- | Extend a generic query by a type-specific case extQ :: ( Typeable a , Typeable b ) => (a -> q) -> (b -> q) -> a -> q extQ f g a = maybe (f a) g (cast a) -- | Type extension of queries for type constructors ext1Q :: (Data d, Typeable t) => (d -> q) -> (forall e. Data e => t e -> q) -> d -> q ext1Q def ext = unQ ((Q def) `ext1` (Q ext)) -- | Type extension of queries for type constructors ext2Q :: (Data d, Typeable t) => (d -> q) -> (forall d1 d2. (Data d1, Data d2) => t d1 d2 -> q) -> d -> q ext2Q def ext = unQ ((Q def) `ext2` (Q ext)) -- | Flexible type extension ext1 :: (Data a, Typeable t) => c a -> (forall d. Data d => c (t d)) -> c a ext1 def ext = maybe def id (dataCast1 ext) -- | Flexible type extension ext2 :: (Data a, Typeable t) => c a -> (forall d1 d2. (Data d1, Data d2) => c (t d1 d2)) -> c a ext2 def ext = maybe def id (dataCast2 ext) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Hs/Expr.hs0000644000000000000000000031221707346545000017540 0ustar0000000000000000{-# LANGUAGE CPP #-} {-# LANGUAGE ConstraintKinds #-} {-# LANGUAGE DataKinds #-} {-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE ExistentialQuantification #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE MultiParamTypeClasses #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE StandaloneDeriving #-} {-# LANGUAGE TypeApplications #-} {-# LANGUAGE TypeFamilyDependencies #-} {-# LANGUAGE UndecidableInstances #-} -- Wrinkle in Note [Trees That Grow] -- in module Language.Haskell.Syntax.Extension {-# OPTIONS_GHC -Wno-orphans #-} -- Outputable {- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 -} -- | Abstract Haskell syntax for expressions. module GHC.Hs.Expr ( module Language.Haskell.Syntax.Expr , module GHC.Hs.Expr ) where import Language.Haskell.Syntax.Expr -- friends: import GHC.Prelude import GHC.Hs.Basic() -- import instances import GHC.Hs.Decls() -- import instances import GHC.Hs.Pat import GHC.Hs.Lit import Language.Haskell.Syntax.Extension import Language.Haskell.Syntax.Basic (FieldLabelString(..)) import GHC.Hs.Extension import GHC.Hs.Type import GHC.Hs.Binds import GHC.Parser.Annotation -- others: import GHC.Tc.Types.Evidence import GHC.Types.Id.Info ( RecSelParent ) import GHC.Types.Name import GHC.Types.Name.Reader import GHC.Types.Name.Set import GHC.Types.Basic import GHC.Types.Fixity import GHC.Types.SourceText import GHC.Types.SrcLoc import GHC.Types.Tickish (CoreTickish) import GHC.Types.Unique.Set (UniqSet) import GHC.Core.ConLike ( conLikeName, ConLike ) import GHC.Unit.Module (ModuleName) import GHC.Utils.Misc import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Data.FastString import GHC.Core.Type import GHC.Builtin.Types (mkTupleStr) import GHC.Tc.Utils.TcType (TcType, TcTyVar) import {-# SOURCE #-} GHC.Tc.Types.LclEnv (TcLclEnv) import GHCi.RemoteTypes ( ForeignRef ) import qualified GHC.Internal.TH.Syntax as TH (Q) -- libraries: import Data.Data hiding (Fixity(..)) import qualified Data.Data as Data (Fixity(..)) import qualified Data.Kind import Data.Maybe (isJust) import Data.Foldable ( toList ) import Data.List.NonEmpty (NonEmpty) import Data.Void (Void) {- ********************************************************************* * * Expressions proper * * ********************************************************************* -} -- | Post-Type checking Expression -- -- PostTcExpr is an evidence expression attached to the syntax tree by the -- type checker (c.f. postTcType). type PostTcExpr = HsExpr GhcTc -- | Post-Type checking Table -- -- We use a PostTcTable where there are a bunch of pieces of evidence, more -- than is convenient to keep individually. type PostTcTable = [(Name, PostTcExpr)] ------------------------- -- Defining SyntaxExpr in two stages allows for better type inference, because -- we can declare SyntaxExprGhc to be injective (and closed). Without injectivity, -- noSyntaxExpr would be ambiguous. type instance SyntaxExpr (GhcPass p) = SyntaxExprGhc p type family SyntaxExprGhc (p :: Pass) = (r :: Data.Kind.Type) | r -> p where SyntaxExprGhc 'Parsed = NoExtField SyntaxExprGhc 'Renamed = SyntaxExprRn SyntaxExprGhc 'Typechecked = SyntaxExprTc -- | The function to use in rebindable syntax. See Note [NoSyntaxExpr]. data SyntaxExprRn = SyntaxExprRn (HsExpr GhcRn) -- Why is the payload not just a Name? -- See Note [Monad fail : Rebindable syntax, overloaded strings] in "GHC.Rename.Expr" | NoSyntaxExprRn -- | An expression with wrappers, used for rebindable syntax -- -- This should desugar to -- -- > syn_res_wrap $ syn_expr (syn_arg_wraps[0] arg0) -- > (syn_arg_wraps[1] arg1) ... -- -- where the actual arguments come from elsewhere in the AST. data SyntaxExprTc = SyntaxExprTc { syn_expr :: HsExpr GhcTc , syn_arg_wraps :: [HsWrapper] , syn_res_wrap :: HsWrapper } | NoSyntaxExprTc -- See Note [NoSyntaxExpr] -- | This is used for rebindable-syntax pieces that are too polymorphic -- for tcSyntaxOp (trS_fmap and the mzip in ParStmt) noExpr :: HsExpr (GhcPass p) noExpr = HsLit noExtField (HsString (SourceText $ fsLit "noExpr") (fsLit "noExpr")) noSyntaxExpr :: forall p. IsPass p => SyntaxExpr (GhcPass p) -- Before renaming, and sometimes after -- See Note [NoSyntaxExpr] noSyntaxExpr = case ghcPass @p of GhcPs -> noExtField GhcRn -> NoSyntaxExprRn GhcTc -> NoSyntaxExprTc -- | Make a 'SyntaxExpr GhcRn' from an expression -- Used only in getMonadFailOp. -- See Note [Monad fail : Rebindable syntax, overloaded strings] in "GHC.Rename.Expr" mkSyntaxExpr :: HsExpr GhcRn -> SyntaxExprRn mkSyntaxExpr = SyntaxExprRn -- | Make a 'SyntaxExpr' from a 'Name' (the "rn" is because this is used in the -- renamer). mkRnSyntaxExpr :: Name -> SyntaxExprRn mkRnSyntaxExpr name = SyntaxExprRn $ HsVar noExtField $ noLocA name instance Outputable SyntaxExprRn where ppr (SyntaxExprRn expr) = ppr expr ppr NoSyntaxExprRn = text "" instance Outputable SyntaxExprTc where ppr (SyntaxExprTc { syn_expr = expr , syn_arg_wraps = arg_wraps , syn_res_wrap = res_wrap }) = sdocOption sdocPrintExplicitCoercions $ \print_co -> getPprDebug $ \debug -> if debug || print_co then ppr expr <> braces (pprWithCommas ppr arg_wraps) <> braces (ppr res_wrap) else ppr expr ppr NoSyntaxExprTc = text "" -- | HsWrap appears only in typechecker output data HsWrap hs_syn = HsWrap HsWrapper -- the wrapper (hs_syn GhcTc) -- the thing that is wrapped deriving instance (Data (hs_syn GhcTc), Typeable hs_syn) => Data (HsWrap hs_syn) -- --------------------------------------------------------------------- data HsBracketTc = HsBracketTc { hsb_quote :: HsQuote GhcRn -- See Note [The life cycle of a TH quotation] , hsb_ty :: Type , hsb_wrap :: Maybe QuoteWrapper -- The wrapper to apply type and dictionary argument to the quote. , hsb_splices :: [PendingTcSplice] -- Output of the type checker is the *original* -- renamed expression, plus -- _typechecked_ splices to be -- pasted back in by the desugarer } type instance XTypedBracket GhcPs = (BracketAnn (EpToken "[||") (EpToken "[e||"), EpToken "||]") type instance XTypedBracket GhcRn = NoExtField type instance XTypedBracket GhcTc = HsBracketTc type instance XUntypedBracket GhcPs = NoExtField type instance XUntypedBracket GhcRn = [PendingRnSplice] -- See Note [Pending Splices] -- Output of the renamer is the *original* renamed expression, -- plus _renamed_ splices to be type checked type instance XUntypedBracket GhcTc = HsBracketTc data BracketAnn noE hasE = BracketNoE noE | BracketHasE hasE deriving Data instance (NoAnn n, NoAnn h) => NoAnn (BracketAnn n h) where noAnn = BracketNoE noAnn -- --------------------------------------------------------------------- -- API Annotations types data EpAnnHsCase = EpAnnHsCase { hsCaseAnnCase :: EpToken "case" , hsCaseAnnOf :: EpToken "of" } deriving Data instance NoAnn EpAnnHsCase where noAnn = EpAnnHsCase noAnn noAnn data EpAnnLam = EpAnnLam { epl_lambda :: EpToken "\\" -- ^ Location of '\' keyword , epl_case :: Maybe EpaLocation -- ^ Location of 'case' or -- 'cases' keyword, depending -- on related 'HsLamVariant'. } deriving Data instance NoAnn EpAnnLam where noAnn = EpAnnLam noAnn noAnn data EpAnnUnboundVar = EpAnnUnboundVar { hsUnboundBackquotes :: (EpToken "`", EpToken "`") , hsUnboundHole :: EpToken "_" } deriving Data -- Record selectors at parse time are HsVar; they convert to HsRecSel -- on renaming. type instance XRecSel GhcPs = DataConCantHappen type instance XRecSel GhcRn = NoExtField type instance XRecSel GhcTc = NoExtField -- OverLabel not present in GhcTc pass; see GHC.Rename.Expr -- Note [Handling overloaded and rebindable constructs] type instance XOverLabel GhcPs = SourceText type instance XOverLabel GhcRn = SourceText type instance XOverLabel GhcTc = DataConCantHappen -- --------------------------------------------------------------------- type instance XVar (GhcPass _) = NoExtField type instance XUnboundVar GhcPs = Maybe EpAnnUnboundVar type instance XUnboundVar GhcRn = NoExtField type instance XUnboundVar GhcTc = HoleExprRef -- We really don't need the whole HoleExprRef; just the IORef EvTerm -- would be enough. But then deriving a Data instance becomes impossible. -- Much, much easier just to define HoleExprRef with a Data instance and -- store the whole structure. type instance XIPVar GhcPs = NoExtField type instance XIPVar GhcRn = NoExtField type instance XIPVar GhcTc = DataConCantHappen type instance XOverLitE (GhcPass _) = NoExtField type instance XLitE (GhcPass _) = NoExtField type instance XLam (GhcPass _) = EpAnnLam type instance XApp (GhcPass _) = NoExtField type instance XAppTypeE GhcPs = EpToken "@" type instance XAppTypeE GhcRn = NoExtField type instance XAppTypeE GhcTc = Type -- OpApp not present in GhcTc pass; see GHC.Rename.Expr -- Note [Handling overloaded and rebindable constructs] type instance XOpApp GhcPs = NoExtField type instance XOpApp GhcRn = Fixity type instance XOpApp GhcTc = DataConCantHappen -- SectionL, SectionR not present in GhcTc pass; see GHC.Rename.Expr -- Note [Handling overloaded and rebindable constructs] type instance XSectionL GhcPs = NoExtField type instance XSectionR GhcPs = NoExtField type instance XSectionL GhcRn = NoExtField type instance XSectionR GhcRn = NoExtField type instance XSectionL GhcTc = DataConCantHappen type instance XSectionR GhcTc = DataConCantHappen type instance XNegApp GhcPs = EpToken "-" type instance XNegApp GhcRn = NoExtField type instance XNegApp GhcTc = NoExtField type instance XPar GhcPs = (EpToken "(", EpToken ")") type instance XPar GhcRn = NoExtField type instance XPar GhcTc = NoExtField type instance XExplicitTuple GhcPs = (EpaLocation, EpaLocation) type instance XExplicitTuple GhcRn = NoExtField type instance XExplicitTuple GhcTc = NoExtField type instance XExplicitSum GhcPs = AnnExplicitSum type instance XExplicitSum GhcRn = NoExtField type instance XExplicitSum GhcTc = [Type] type instance XCase GhcPs = EpAnnHsCase type instance XCase GhcRn = HsMatchContextRn type instance XCase GhcTc = HsMatchContextRn type instance XIf GhcPs = AnnsIf type instance XIf GhcRn = NoExtField type instance XIf GhcTc = NoExtField type instance XMultiIf GhcPs = (EpToken "if", EpToken "{", EpToken "}") type instance XMultiIf GhcRn = NoExtField type instance XMultiIf GhcTc = Type type instance XLet GhcPs = (EpToken "let", EpToken "in") type instance XLet GhcRn = NoExtField type instance XLet GhcTc = NoExtField type instance XDo GhcPs = AnnList EpaLocation type instance XDo GhcRn = NoExtField type instance XDo GhcTc = Type type instance XExplicitList GhcPs = AnnList () type instance XExplicitList GhcRn = NoExtField type instance XExplicitList GhcTc = Type -- GhcPs: ExplicitList includes all source-level -- list literals, including overloaded ones -- GhcRn and GhcTc: ExplicitList used only for list literals -- that denote Haskell's built-in lists. Overloaded lists -- have been expanded away in the renamer -- See Note [Handling overloaded and rebindable constructs] -- in GHC.Rename.Expr type instance XRecordCon GhcPs = (Maybe (EpToken "{"), Maybe (EpToken "}")) type instance XRecordCon GhcRn = NoExtField type instance XRecordCon GhcTc = PostTcExpr -- Instantiated constructor function type instance XRecordUpd GhcPs = (Maybe (EpToken "{"), Maybe (EpToken "}")) type instance XRecordUpd GhcRn = NoExtField type instance XRecordUpd GhcTc = DataConCantHappen -- We desugar record updates in the typechecker. -- See [Handling overloaded and rebindable constructs], -- and [Record Updates] in GHC.Tc.Gen.Expr. -- | Information about the parent of a record update: -- -- - the parent type constructor or pattern synonym, -- - the relevant con-likes, -- - the field labels. data family HsRecUpdParent x data instance HsRecUpdParent GhcPs data instance HsRecUpdParent GhcRn = RnRecUpdParent { rnRecUpdLabels :: NonEmpty FieldGlobalRdrElt , rnRecUpdCons :: UniqSet ConLikeName } data instance HsRecUpdParent GhcTc = TcRecUpdParent { tcRecUpdParent :: RecSelParent , tcRecUpdLabels :: NonEmpty FieldGlobalRdrElt , tcRecUpdCons :: UniqSet ConLike } type instance XLHsRecUpdLabels GhcPs = NoExtField type instance XLHsRecUpdLabels GhcRn = NonEmpty (HsRecUpdParent GhcRn) -- Possible parents for the record update. type instance XLHsRecUpdLabels GhcTc = DataConCantHappen type instance XLHsOLRecUpdLabels p = NoExtField type instance XGetField GhcPs = NoExtField type instance XGetField GhcRn = NoExtField type instance XGetField GhcTc = DataConCantHappen -- HsGetField is eliminated by the renamer. See [Handling overloaded -- and rebindable constructs]. type instance XProjection GhcPs = AnnProjection type instance XProjection GhcRn = NoExtField type instance XProjection GhcTc = DataConCantHappen -- HsProjection is eliminated by the renamer. See [Handling overloaded -- and rebindable constructs]. type instance XExprWithTySig GhcPs = TokDcolon type instance XExprWithTySig GhcRn = NoExtField type instance XExprWithTySig GhcTc = NoExtField type instance XArithSeq GhcPs = AnnArithSeq type instance XArithSeq GhcRn = NoExtField type instance XArithSeq GhcTc = PostTcExpr type instance XProc (GhcPass _) = (EpToken "proc", TokRarrow) type instance XStatic GhcPs = EpToken "static" type instance XStatic GhcRn = NameSet type instance XStatic GhcTc = (NameSet, Type) -- Free variables and type of expression, this is stored for convenience as wiring in -- StaticPtr is a bit tricky (see #20150) type instance XEmbTy GhcPs = EpToken "type" type instance XEmbTy GhcRn = NoExtField type instance XEmbTy GhcTc = DataConCantHappen -- A free-standing HsEmbTy is an error. -- Valid usages are immediately desugared into Type. type instance XForAll GhcPs = NoExtField type instance XForAll GhcRn = NoExtField type instance XForAll GhcTc = DataConCantHappen type instance XQual GhcPs = NoExtField type instance XQual GhcRn = NoExtField type instance XQual GhcTc = DataConCantHappen type instance XFunArr GhcPs = NoExtField type instance XFunArr GhcRn = NoExtField type instance XFunArr GhcTc = DataConCantHappen type instance XPragE (GhcPass _) = NoExtField type instance XFunRhs = AnnFunRhs type instance Anno [LocatedA ((StmtLR (GhcPass pl) (GhcPass pr) (LocatedA (body (GhcPass pr)))))] = SrcSpanAnnLW type instance Anno (StmtLR GhcRn GhcRn (LocatedA (body GhcRn))) = SrcSpanAnnA arrowToHsExpr :: HsArrowOf (LocatedA (HsExpr GhcRn)) GhcRn -> LocatedA (HsExpr GhcRn) arrowToHsExpr = expandHsArrow (HsVar noExtField) data AnnExplicitSum = AnnExplicitSum { aesOpen :: EpaLocation, aesBarsBefore :: [EpToken "|"], aesBarsAfter :: [EpToken "|"], aesClose :: EpaLocation } deriving Data instance NoAnn AnnExplicitSum where noAnn = AnnExplicitSum noAnn noAnn noAnn noAnn data AnnFieldLabel = AnnFieldLabel { afDot :: Maybe (EpToken ".") } deriving Data instance NoAnn AnnFieldLabel where noAnn = AnnFieldLabel Nothing data AnnProjection = AnnProjection { apOpen :: EpToken "(", apClose :: EpToken ")" } deriving Data instance NoAnn AnnProjection where noAnn = AnnProjection noAnn noAnn data AnnArithSeq = AnnArithSeq { aas_open :: EpToken "[", aas_comma :: Maybe (EpToken ","), aas_dotdot :: EpToken "..", aas_close :: EpToken "]" } deriving Data instance NoAnn AnnArithSeq where noAnn = AnnArithSeq noAnn noAnn noAnn noAnn data AnnsIf = AnnsIf { aiIf :: EpToken "if", aiThen :: EpToken "then", aiElse :: EpToken "else", aiThenSemi :: Maybe (EpToken ";"), aiElseSemi :: Maybe (EpToken ";") } deriving Data instance NoAnn AnnsIf where noAnn = AnnsIf noAnn noAnn noAnn Nothing Nothing data AnnFunRhs = AnnFunRhs { afr_strict :: EpToken "!", afr_opens :: [EpToken "("], afr_closes :: [EpToken ")"] } deriving Data instance NoAnn AnnFunRhs where noAnn = AnnFunRhs noAnn noAnn noAnn -- --------------------------------------------------------------------- type instance XSCC (GhcPass _) = (AnnPragma, SourceText) type instance XXPragE (GhcPass _) = DataConCantHappen type instance XCDotFieldOcc (GhcPass _) = AnnFieldLabel type instance XXDotFieldOcc (GhcPass _) = DataConCantHappen type instance XPresent (GhcPass _) = NoExtField type instance XMissing GhcPs = EpAnn Bool -- True for empty last comma type instance XMissing GhcRn = NoExtField type instance XMissing GhcTc = Scaled Type type instance XXTupArg (GhcPass _) = DataConCantHappen tupArgPresent :: HsTupArg (GhcPass p) -> Bool tupArgPresent (Present {}) = True tupArgPresent (Missing {}) = False tupArgPresent_maybe :: HsTupArg (GhcPass p) -> Maybe (LHsExpr (GhcPass p)) tupArgPresent_maybe (Present _ e) = Just e tupArgPresent_maybe (Missing {}) = Nothing tupArgsPresent_maybe :: [HsTupArg (GhcPass p)] -> Maybe [LHsExpr (GhcPass p)] tupArgsPresent_maybe = traverse tupArgPresent_maybe {- ********************************************************************* * * XXExpr: the extension constructor of HsExpr * * ********************************************************************* -} type instance XXExpr GhcPs = DataConCantHappen type instance XXExpr GhcRn = XXExprGhcRn type instance XXExpr GhcTc = XXExprGhcTc -- XXExprGhcRn: see Note [Rebindable syntax and XXExprGhcRn] below {- ********************************************************************* * * Generating code for ExpandedThingRn See Note [Handling overloaded and rebindable constructs] * * ********************************************************************* -} -- | The different source constructs that we use to instantiate the "original" field -- in an `XXExprGhcRn original expansion` data HsThingRn = OrigExpr (HsExpr GhcRn) | OrigStmt (ExprLStmt GhcRn) | OrigPat (LPat GhcRn) isHsThingRnExpr, isHsThingRnStmt, isHsThingRnPat :: HsThingRn -> Bool isHsThingRnExpr (OrigExpr{}) = True isHsThingRnExpr _ = False isHsThingRnStmt (OrigStmt{}) = True isHsThingRnStmt _ = False isHsThingRnPat (OrigPat{}) = True isHsThingRnPat _ = False data XXExprGhcRn = ExpandedThingRn { xrn_orig :: HsThingRn -- The original source thing , xrn_expanded :: HsExpr GhcRn } -- The compiler generated expanded thing | PopErrCtxt -- A hint for typechecker to pop {-# UNPACK #-} !(LHsExpr GhcRn) -- the top of the error context stack -- Does not presist post renaming phase -- See Part 3. of Note [Expanding HsDo with XXExprGhcRn] -- in `GHC.Tc.Gen.Do` | HsRecSelRn (FieldOcc GhcRn) -- ^ Variable pointing to record selector -- See Note [Non-overloaded record field selectors] and -- Note [Record selectors in the AST] -- | Wrap a located expression with a `PopErrCtxt` mkPopErrCtxtExpr :: LHsExpr GhcRn -> HsExpr GhcRn mkPopErrCtxtExpr a = XExpr (PopErrCtxt a) -- | Wrap a located expression with a PopSrcExpr with an appropriate location mkPopErrCtxtExprAt :: SrcSpanAnnA -> LHsExpr GhcRn -> LHsExpr GhcRn mkPopErrCtxtExprAt loc a = L loc $ mkPopErrCtxtExpr a -- | Build an expression using the extension constructor `XExpr`, -- and the two components of the expansion: original expression and -- expanded expressions. mkExpandedExpr :: HsExpr GhcRn -- ^ source expression -> HsExpr GhcRn -- ^ expanded expression -> HsExpr GhcRn -- ^ suitably wrapped 'XXExprGhcRn' mkExpandedExpr oExpr eExpr = XExpr (ExpandedThingRn (OrigExpr oExpr) eExpr) -- | Build an expression using the extension constructor `XExpr`, -- and the two components of the expansion: original do stmt and -- expanded expression mkExpandedStmt :: ExprLStmt GhcRn -- ^ source statement -> HsExpr GhcRn -- ^ expanded expression -> HsExpr GhcRn -- ^ suitably wrapped 'XXExprGhcRn' mkExpandedStmt oStmt eExpr = XExpr (ExpandedThingRn (OrigStmt oStmt) eExpr) mkExpandedPatRn :: LPat GhcRn -- ^ source pattern -> HsExpr GhcRn -- ^ expanded expression -> HsExpr GhcRn -- ^ suitably wrapped 'XXExprGhcRn' mkExpandedPatRn oPat eExpr = XExpr (ExpandedThingRn (OrigPat oPat) eExpr) -- | Build an expression using the extension constructor `XExpr`, -- and the two components of the expansion: original do stmt and -- expanded expression an associate with a provided location mkExpandedStmtAt :: SrcSpanAnnA -- ^ Location for the expansion expression -> ExprLStmt GhcRn -- ^ source statement -> HsExpr GhcRn -- ^ expanded expression -> LHsExpr GhcRn -- ^ suitably wrapped located 'XXExprGhcRn' mkExpandedStmtAt loc oStmt eExpr = L loc $ mkExpandedStmt oStmt eExpr -- | Wrap the expanded version of the expression with a pop. mkExpandedStmtPopAt :: SrcSpanAnnA -- ^ Location for the expansion statement -> ExprLStmt GhcRn -- ^ source statement -> HsExpr GhcRn -- ^ expanded expression -> LHsExpr GhcRn -- ^ suitably wrapped 'XXExprGhcRn' mkExpandedStmtPopAt loc oStmt eExpr = mkPopErrCtxtExprAt loc $ mkExpandedStmtAt loc oStmt eExpr data XXExprGhcTc = WrapExpr -- Type and evidence application and abstractions HsWrapper (HsExpr GhcTc) | ExpandedThingTc -- See Note [Rebindable syntax and XXExprGhcRn] -- See Note [Expanding HsDo with XXExprGhcRn] in `GHC.Tc.Gen.Do` { xtc_orig :: HsThingRn -- The original user written thing , xtc_expanded :: HsExpr GhcTc } -- The expanded typechecked expression | ConLikeTc -- Result of typechecking a data-con -- See Note [Typechecking data constructors] in -- GHC.Tc.Gen.Head -- The two arguments describe how to eta-expand -- the data constructor when desugaring ConLike [TcTyVar] [Scaled TcType] --------------------------------------- -- Haskell program coverage (Hpc) Support | HsTick CoreTickish (LHsExpr GhcTc) -- sub-expression | HsBinTick Int -- module-local tick number for True Int -- module-local tick number for False (LHsExpr GhcTc) -- sub-expression | HsRecSelTc (FieldOcc GhcTc) -- ^ Variable pointing to record selector -- See Note [Non-overloaded record field selectors] and -- Note [Record selectors in the AST] -- | Build a 'XXExprGhcRn' out of an extension constructor, -- and the two components of the expansion: original and -- expanded typechecked expressions. mkExpandedExprTc :: HsExpr GhcRn -- ^ source expression -> HsExpr GhcTc -- ^ expanded typechecked expression -> HsExpr GhcTc -- ^ suitably wrapped 'XXExprGhcRn' mkExpandedExprTc oExpr eExpr = XExpr (ExpandedThingTc (OrigExpr oExpr) eExpr) -- | Build a 'XXExprGhcRn' out of an extension constructor. -- The two components of the expansion are: original statement and -- expanded typechecked expression. mkExpandedStmtTc :: ExprLStmt GhcRn -- ^ source do statement -> HsExpr GhcTc -- ^ expanded typechecked expression -> HsExpr GhcTc -- ^ suitably wrapped 'XXExprGhcRn' mkExpandedStmtTc oStmt eExpr = XExpr (ExpandedThingTc (OrigStmt oStmt) eExpr) {- ********************************************************************* * * Pretty-printing expressions * * ********************************************************************* -} instance (OutputableBndrId p) => Outputable (HsExpr (GhcPass p)) where ppr expr = pprExpr expr ----------------------- -- pprExpr, pprLExpr, pprBinds call pprDeeper; -- the underscore versions do not pprLExpr :: (OutputableBndrId p) => LHsExpr (GhcPass p) -> SDoc pprLExpr (L _ e) = pprExpr e pprExpr :: (OutputableBndrId p) => HsExpr (GhcPass p) -> SDoc pprExpr e | isAtomicHsExpr e || isQuietHsExpr e = ppr_expr e | otherwise = pprDeeper (ppr_expr e) isQuietHsExpr :: HsExpr id -> Bool -- Parentheses do display something, but it gives little info and -- if we go deeper when we go inside them then we get ugly things -- like (...) isQuietHsExpr (HsPar {}) = True -- applications don't display anything themselves isQuietHsExpr (HsApp {}) = True isQuietHsExpr (HsAppType {}) = True isQuietHsExpr (OpApp {}) = True isQuietHsExpr _ = False pprBinds :: (OutputableBndrId idL, OutputableBndrId idR) => HsLocalBindsLR (GhcPass idL) (GhcPass idR) -> SDoc pprBinds b = pprDeeper (ppr b) ----------------------- ppr_lexpr :: (OutputableBndrId p) => LHsExpr (GhcPass p) -> SDoc ppr_lexpr e = ppr_expr (unLoc e) ppr_expr :: forall p. (OutputableBndrId p) => HsExpr (GhcPass p) -> SDoc ppr_expr (HsVar _ (L _ v)) = pprPrefixOcc v ppr_expr (HsUnboundVar _ uv) = pprPrefixOcc uv ppr_expr (HsIPVar _ v) = ppr v ppr_expr (HsOverLabel s l) = case ghcPass @p of GhcPs -> helper s GhcRn -> helper s GhcTc -> dataConCantHappen s where helper s = char '#' <> case s of NoSourceText -> ppr l SourceText src -> ftext src ppr_expr (HsLit _ lit) = ppr lit ppr_expr (HsOverLit _ lit) = ppr lit ppr_expr (HsPar _ e) = parens (ppr_lexpr e) ppr_expr (HsPragE _ prag e) = sep [ppr prag, ppr_lexpr e] ppr_expr e@(HsApp {}) = ppr_apps e [] ppr_expr e@(HsAppType {}) = ppr_apps e [] ppr_expr (OpApp _ e1 op e2) | Just pp_op <- ppr_infix_expr (unLoc op) = pp_infixly pp_op | otherwise = pp_prefixly where pp_e1 = pprDebugParendExpr opPrec e1 -- In debug mode, add parens pp_e2 = pprDebugParendExpr opPrec e2 -- to make precedence clear pp_prefixly = hang (ppr op) 2 (sep [pp_e1, pp_e2]) pp_infixly pp_op = hang pp_e1 2 (sep [pp_op, nest 2 pp_e2]) ppr_expr (NegApp _ e _) = char '-' <+> pprDebugParendExpr appPrec e ppr_expr (SectionL _ expr op) | Just pp_op <- ppr_infix_expr (unLoc op) = pp_infixly pp_op | otherwise = pp_prefixly where pp_expr = pprDebugParendExpr opPrec expr pp_prefixly = hang (hsep [text " \\ x_ ->", ppr op]) 4 (hsep [pp_expr, text "x_ )"]) pp_infixly v = (sep [pp_expr, v]) ppr_expr (SectionR _ op expr) | Just pp_op <- ppr_infix_expr (unLoc op) = pp_infixly pp_op | otherwise = pp_prefixly where pp_expr = pprDebugParendExpr opPrec expr pp_prefixly = hang (hsep [text "( \\ x_ ->", ppr op, text "x_"]) 4 (pp_expr <> rparen) pp_infixly v = sep [v, pp_expr] ppr_expr (ExplicitTuple _ exprs boxity) -- Special-case unary boxed tuples so that they are pretty-printed as -- `MkSolo x`, not `(x)` | [Present _ expr] <- exprs , Boxed <- boxity = hsep [text (mkTupleStr Boxed dataName 1), ppr expr] | otherwise = tupleParens (boxityTupleSort boxity) (fcat (ppr_tup_args exprs)) where ppr_tup_args [] = [] ppr_tup_args (Present _ e : es) = (ppr_lexpr e <> punc es) : ppr_tup_args es ppr_tup_args (Missing _ : es) = punc es : ppr_tup_args es punc (Present {} : _) = comma <> space punc (Missing {} : _) = comma punc (XTupArg {} : _) = comma <> space punc [] = empty ppr_expr (ExplicitSum _ alt arity expr) = text "(#" <+> ppr_bars (alt - 1) <+> ppr expr <+> ppr_bars (arity - alt) <+> text "#)" where ppr_bars n = hsep (replicate n (char '|')) ppr_expr (HsLam _ lam_variant matches) = case lam_variant of LamSingle -> pprMatches matches _ -> sep [ sep [lamCaseKeyword lam_variant] , nest 2 (pprMatches matches) ] ppr_expr (HsCase _ expr matches@(MG { mg_alts = L _ alts })) = sep [ sep [text "case", nest 4 (ppr expr), text "of"], pp_alts ] where pp_alts | null alts = text "{}" | otherwise = nest 2 (pprMatches matches) ppr_expr (HsIf _ e1 e2 e3) = sep [hsep [text "if", nest 2 (ppr e1), text "then"], nest 4 (ppr e2), text "else", nest 4 (ppr e3)] ppr_expr (HsMultiIf _ alts) = hang (text "if") 3 (vcat (map ppr_alt alts)) where ppr_alt (L _ (GRHS _ guards expr)) = hang vbar 2 (ppr_one one_alt) where ppr_one [] = panic "ppr_exp HsMultiIf" ppr_one (h:t) = hang h 2 (sep t) one_alt = [ interpp'SP guards , text "->" <+> pprDeeper (ppr expr) ] ppr_alt (L _ (XGRHS x)) = ppr x -- special case: let ... in let ... ppr_expr (HsLet _ binds expr@(L _ (HsLet _ _ _))) = sep [hang (text "let") 2 (hsep [pprBinds binds, text "in"]), ppr_lexpr expr] ppr_expr (HsLet _ binds expr) = sep [hang (text "let") 2 (pprBinds binds), hang (text "in") 2 (ppr expr)] ppr_expr (HsDo _ do_or_list_comp (L _ stmts)) = pprDo do_or_list_comp stmts ppr_expr (ExplicitList _ exprs) = brackets (pprDeeperList fsep (punctuate comma (map ppr_lexpr exprs))) ppr_expr (RecordCon { rcon_con = con, rcon_flds = rbinds }) = hang pp_con 2 (ppr rbinds) where -- con :: ConLikeP (GhcPass p) -- so we need case analysis to know to print it pp_con = case ghcPass @p of GhcPs -> ppr con GhcRn -> ppr con GhcTc -> ppr con ppr_expr (RecordUpd { rupd_expr = L _ aexp, rupd_flds = flds }) = case flds of RegularRecUpdFields { recUpdFields= rbinds } -> hang (ppr aexp) 2 (braces (fsep (punctuate comma (map ppr rbinds)))) OverloadedRecUpdFields { olRecUpdFields = pbinds } -> hang (ppr aexp) 2 (braces (fsep (punctuate comma (map ppr pbinds)))) ppr_expr (HsGetField { gf_expr = L _ fexp, gf_field = field }) = ppr fexp <> dot <> ppr field ppr_expr (HsProjection { proj_flds = flds }) = parens (hcat (dot : (punctuate dot (map ppr $ toList flds)))) ppr_expr (ExprWithTySig _ expr sig) = hang (nest 2 (ppr_lexpr expr) <+> dcolon) 4 (ppr sig) ppr_expr (ArithSeq _ _ info) = brackets (ppr info) ppr_expr (HsTypedSplice ext e) = case ghcPass @p of GhcPs -> pprTypedSplice Nothing e GhcRn -> pprTypedSplice (Just ext) e GhcTc -> pprTypedSplice Nothing e ppr_expr (HsUntypedSplice ext s) = case ghcPass @p of GhcPs -> pprUntypedSplice True Nothing s GhcRn | HsUntypedSpliceNested n <- ext -> pprUntypedSplice True (Just n) s GhcRn | HsUntypedSpliceTop _ e <- ext -> ppr e GhcTc -> dataConCantHappen ext ppr_expr (HsTypedBracket b e) = case ghcPass @p of GhcPs -> thTyBrackets (ppr e) GhcRn -> thTyBrackets (ppr e) GhcTc | HsBracketTc _ _ty _wrap ps <- b -> thTyBrackets (ppr e) `ppr_with_pending_tc_splices` ps ppr_expr (HsUntypedBracket b q) = case ghcPass @p of GhcPs -> ppr q GhcRn -> case b of [] -> ppr q ps -> ppr q $$ text "pending(rn)" <+> ppr ps GhcTc | HsBracketTc rnq _ty _wrap ps <- b -> ppr rnq `ppr_with_pending_tc_splices` ps ppr_expr (HsProc _ pat (L _ (HsCmdTop _ cmd))) = hsep [text "proc", ppr pat, text "->", ppr cmd] ppr_expr (HsStatic _ e) = hsep [text "static", ppr e] ppr_expr (HsEmbTy _ ty) = hsep [text "type", ppr ty] ppr_expr (HsQual _ ctxt ty) = sep [ppr_context ctxt, ppr_lexpr ty] where ppr_context (L _ ctxt) = case ctxt of [] -> parens empty <+> darrow [L _ ty] -> ppr_expr ty <+> darrow _ -> parens (interpp'SP ctxt) <+> darrow ppr_expr (HsForAll _ tele ty) = sep [pprHsForAll tele Nothing, ppr_lexpr ty] ppr_expr (HsFunArr _ arr arg res) = sep [ppr_lexpr arg, pprHsArrow arr <+> ppr_lexpr res] ppr_expr (XExpr x) = case ghcPass @p of GhcRn -> ppr x GhcTc -> ppr x instance Outputable HsThingRn where ppr thing = case thing of OrigExpr x -> ppr_builder ":" x OrigStmt x -> ppr_builder ":" x OrigPat x -> ppr_builder ":" x where ppr_builder prefix x = ifPprDebug (braces (text prefix <+> parens (ppr x))) (ppr x) instance Outputable XXExprGhcRn where ppr (ExpandedThingRn o e) = ifPprDebug (braces $ vcat [ppr o, ppr e]) (ppr o) ppr (PopErrCtxt e) = ifPprDebug (braces (text "" <+> ppr e)) (ppr e) ppr (HsRecSelRn f) = pprPrefixOcc f instance Outputable XXExprGhcTc where ppr (WrapExpr co_fn e) = pprHsWrapper co_fn (\_parens -> pprExpr e) ppr (ExpandedThingTc o e) = ifPprDebug (braces $ vcat [ppr o, ppr e]) (ppr o) -- e is the expanded expression, we print the original -- expression (HsExpr GhcRn), not the -- expanded typechecked one (HsExpr GhcTc), -- unless we are in ppr's debug mode printed both ppr (ConLikeTc con _ _) = pprPrefixOcc con -- Used in error messages generated by -- the pattern match overlap checker ppr (HsTick tickish exp) = pprTicks (ppr exp) $ ppr tickish <+> ppr_lexpr exp ppr (HsBinTick tickIdTrue tickIdFalse exp) = pprTicks (ppr exp) $ hcat [text "bintick<", ppr tickIdTrue, text ",", ppr tickIdFalse, text ">(", ppr exp, text ")"] ppr (HsRecSelTc f) = pprPrefixOcc f ppr_infix_expr :: forall p. (OutputableBndrId p) => HsExpr (GhcPass p) -> Maybe SDoc ppr_infix_expr (HsVar _ (L _ v)) = Just (pprInfixOcc v) ppr_infix_expr (HsUnboundVar _ occ) = Just (pprInfixOcc occ) ppr_infix_expr (XExpr x) = case ghcPass @p of GhcRn -> ppr_infix_expr_rn x GhcTc -> ppr_infix_expr_tc x ppr_infix_expr _ = Nothing ppr_infix_expr_rn :: XXExprGhcRn -> Maybe SDoc ppr_infix_expr_rn (ExpandedThingRn thing _) = ppr_infix_hs_expansion thing ppr_infix_expr_rn (PopErrCtxt (L _ a)) = ppr_infix_expr a ppr_infix_expr_rn (HsRecSelRn f) = Just (pprInfixOcc f) ppr_infix_expr_tc :: XXExprGhcTc -> Maybe SDoc ppr_infix_expr_tc (WrapExpr _ e) = ppr_infix_expr e ppr_infix_expr_tc (ExpandedThingTc thing _) = ppr_infix_hs_expansion thing ppr_infix_expr_tc (ConLikeTc {}) = Nothing ppr_infix_expr_tc (HsTick {}) = Nothing ppr_infix_expr_tc (HsBinTick {}) = Nothing ppr_infix_expr_tc (HsRecSelTc f) = Just (pprInfixOcc f) ppr_infix_hs_expansion :: HsThingRn -> Maybe SDoc ppr_infix_hs_expansion (OrigExpr e) = ppr_infix_expr e ppr_infix_hs_expansion _ = Nothing ppr_apps :: (OutputableBndrId p) => HsExpr (GhcPass p) -> [Either (LHsExpr (GhcPass p)) (LHsWcType (NoGhcTc (GhcPass p)))] -> SDoc ppr_apps (HsApp _ (L _ fun) arg) args = ppr_apps fun (Left arg : args) ppr_apps (HsAppType _ (L _ fun) arg) args = ppr_apps fun (Right arg : args) ppr_apps fun args = hang (ppr_expr fun) 2 (fsep (map pp args)) where pp (Left arg) = ppr arg -- pp (Right (LHsWcTypeX (HsWC { hswc_body = L _ arg }))) -- = char '@' <> pprHsType arg pp (Right arg) = text "@" <> ppr arg pprDebugParendExpr :: (OutputableBndrId p) => PprPrec -> LHsExpr (GhcPass p) -> SDoc pprDebugParendExpr p expr = getPprDebug $ \case True -> pprParendLExpr p expr False -> pprLExpr expr pprParendLExpr :: (OutputableBndrId p) => PprPrec -> LHsExpr (GhcPass p) -> SDoc pprParendLExpr p (L _ e) = pprParendExpr p e pprParendExpr :: (OutputableBndrId p) => PprPrec -> HsExpr (GhcPass p) -> SDoc pprParendExpr p expr | hsExprNeedsParens p expr = parens (pprExpr expr) | otherwise = pprExpr expr -- Using pprLExpr makes sure that we go 'deeper' -- I think that is usually (always?) right -- | @'hsExprNeedsParens' p e@ returns 'True' if the expression @e@ needs -- parentheses under precedence @p@. hsExprNeedsParens :: forall p. IsPass p => PprPrec -> HsExpr (GhcPass p) -> Bool hsExprNeedsParens prec = go where go :: HsExpr (GhcPass p) -> Bool go (HsVar{}) = False go (HsUnboundVar{}) = False go (HsIPVar{}) = False go (HsOverLabel{}) = False go (HsLit _ l) = hsLitNeedsParens prec l go (HsOverLit _ ol) = hsOverLitNeedsParens prec ol go (HsPar{}) = False go (HsApp{}) = prec >= appPrec go (HsAppType {}) = prec >= appPrec go (OpApp{}) = prec >= opPrec go (NegApp{}) = prec > topPrec go (SectionL{}) = True go (SectionR{}) = True -- Special-case unary boxed tuple applications so that they are -- parenthesized as `Identity (Solo x)`, not `Identity Solo x` (#18612) -- See Note [One-tuples] in GHC.Builtin.Types go (ExplicitTuple _ [Present{}] Boxed) = prec >= appPrec go (ExplicitTuple{}) = False go (ExplicitSum{}) = False go (HsLam{}) = prec > topPrec go (HsCase{}) = prec > topPrec go (HsIf{}) = prec > topPrec go (HsMultiIf{}) = prec > topPrec go (HsLet{}) = prec > topPrec go (HsDo _ sc _) | isDoComprehensionContext sc = False | otherwise = prec > topPrec go (ExplicitList{}) = False go (RecordUpd{}) = False go (ExprWithTySig{}) = prec >= sigPrec go (ArithSeq{}) = False go (HsPragE{}) = prec >= appPrec go (HsTypedSplice{}) = False go (HsUntypedSplice{}) = False go (HsTypedBracket{}) = False go (HsUntypedBracket{}) = False go (HsProc{}) = prec > topPrec go (HsStatic{}) = prec >= appPrec go (RecordCon{}) = False go (HsProjection{}) = True go (HsGetField{}) = False go (HsEmbTy{}) = prec > topPrec go (HsForAll{}) = prec >= funPrec go (HsQual{}) = prec >= funPrec go (HsFunArr{}) = prec >= funPrec go (XExpr x) = case ghcPass @p of GhcTc -> go_x_tc x GhcRn -> go_x_rn x go_x_tc :: XXExprGhcTc -> Bool go_x_tc (WrapExpr _ e) = hsExprNeedsParens prec e go_x_tc (ExpandedThingTc thing _) = hsExpandedNeedsParens thing go_x_tc (ConLikeTc {}) = False go_x_tc (HsTick _ (L _ e)) = hsExprNeedsParens prec e go_x_tc (HsBinTick _ _ (L _ e)) = hsExprNeedsParens prec e go_x_tc (HsRecSelTc{}) = False go_x_rn :: XXExprGhcRn -> Bool go_x_rn (ExpandedThingRn thing _) = hsExpandedNeedsParens thing go_x_rn (PopErrCtxt (L _ a)) = hsExprNeedsParens prec a go_x_rn (HsRecSelRn{}) = False hsExpandedNeedsParens :: HsThingRn -> Bool hsExpandedNeedsParens (OrigExpr e) = hsExprNeedsParens prec e hsExpandedNeedsParens _ = False -- | Parenthesize an expression without token information gHsPar :: forall p. IsPass p => LHsExpr (GhcPass p) -> HsExpr (GhcPass p) gHsPar e = HsPar x e where x = case ghcPass @p of GhcPs -> noAnn GhcRn -> noExtField GhcTc -> noExtField -- | @'parenthesizeHsExpr' p e@ checks if @'hsExprNeedsParens' p e@ is true, -- and if so, surrounds @e@ with an 'HsPar'. Otherwise, it simply returns @e@. parenthesizeHsExpr :: IsPass p => PprPrec -> LHsExpr (GhcPass p) -> LHsExpr (GhcPass p) parenthesizeHsExpr p le@(L loc e) | hsExprNeedsParens p e = L loc (gHsPar le) | otherwise = le stripParensLHsExpr :: LHsExpr (GhcPass p) -> LHsExpr (GhcPass p) stripParensLHsExpr (L _ (HsPar _ e)) = stripParensLHsExpr e stripParensLHsExpr e = e stripParensHsExpr :: HsExpr (GhcPass p) -> HsExpr (GhcPass p) stripParensHsExpr (HsPar _ (L _ e)) = stripParensHsExpr e stripParensHsExpr e = e isAtomicHsExpr :: forall p. IsPass p => HsExpr (GhcPass p) -> Bool -- True of a single token isAtomicHsExpr (HsVar {}) = True isAtomicHsExpr (HsLit {}) = True isAtomicHsExpr (HsOverLit {}) = True isAtomicHsExpr (HsIPVar {}) = True isAtomicHsExpr (HsOverLabel {}) = True isAtomicHsExpr (HsUnboundVar {}) = True isAtomicHsExpr (XExpr x) | GhcTc <- ghcPass @p = go_x_tc x | GhcRn <- ghcPass @p = go_x_rn x where go_x_tc :: XXExprGhcTc -> Bool go_x_tc (WrapExpr _ e) = isAtomicHsExpr e go_x_tc (ExpandedThingTc thing _) = isAtomicExpandedThingRn thing go_x_tc (ConLikeTc {}) = True go_x_tc (HsTick {}) = False go_x_tc (HsBinTick {}) = False go_x_tc (HsRecSelTc{}) = True go_x_rn :: XXExprGhcRn -> Bool go_x_rn (ExpandedThingRn thing _) = isAtomicExpandedThingRn thing go_x_rn (PopErrCtxt (L _ a)) = isAtomicHsExpr a go_x_rn (HsRecSelRn{}) = True isAtomicExpandedThingRn :: HsThingRn -> Bool isAtomicExpandedThingRn (OrigExpr e) = isAtomicHsExpr e isAtomicExpandedThingRn _ = False isAtomicHsExpr _ = False instance Outputable (HsPragE (GhcPass p)) where ppr (HsPragSCC (_, st) (StringLiteral stl lbl _)) = pprWithSourceText st (text "{-# SCC") -- no doublequotes if stl empty, for the case where the SCC was written -- without quotes. <+> pprWithSourceText stl (ftext lbl) <+> text "#-}" {- ********************************************************************* * * XXExprGhcRn and rebindable syntax * * ********************************************************************* -} {- Note [Rebindable syntax and XXExprGhcRn] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We implement rebindable syntax (RS) support by performing a desugaring in the renamer. We transform GhcPs expressions and patterns affected by RS into the appropriate desugared form, but **annotated with the original expression/pattern**. Let us consider a piece of code like: {-# LANGUAGE RebindableSyntax #-} ifThenElse :: Char -> () -> () -> () ifThenElse _ _ _ = () x = if 'a' then () else True The parsed AST for the RHS of x would look something like (slightly simplified): L locif (HsIf (L loca 'a') (L loctrue ()) (L locfalse True)) Upon seeing such an AST with RS on, we could transform it into a mere function call, as per the RS rules, equivalent to the following function application: ifThenElse 'a' () True which doesn't typecheck. But GHC would report an error about not being able to match the third argument's type (Bool) with the expected type: (), in the expression _as desugared_, i.e in the aforementioned function application. But the user never wrote a function application! This would be pretty bad. To remedy this, instead of transforming the original HsIf node into mere applications of 'ifThenElse', we keep the original 'if' expression around too, using the TTG XExpr extension point to allow GHC to construct an 'XXExprGhcRn' value that will keep track of the original expression in its first field, and the desugared one in the second field. The resulting renamed AST would look like: L locif (XExpr (ExpandedThingRn (HsIf (L loca 'a') (L loctrue ()) (L locfalse True) ) (App (L generatedSrcSpan (App (L generatedSrcSpan (App (L generatedSrcSpan (Var ifThenElse)) (L loca 'a') ) ) (L loctrue ()) ) ) (L locfalse True) ) ) ) When comes the time to typecheck the program, we end up calling tcMonoExpr on the AST above. If this expression gives rise to a type error, then it will appear in a context line and GHC will pretty-print it using the 'Outputable (XXExprGhcRn a b)' instance defined below, which *only prints the original expression*. This is the gist of the idea, but is not quite enough to recover the error messages that we had with the SyntaxExpr-based, typechecking/desugaring-to-core time implementation of rebindable syntax. The key idea is to decorate some elements of the desugared expression so as to be able to give them a special treatment when typechecking the desugared expression, to print a different context line or skip one altogether. Whenever we 'setSrcSpan' a 'generatedSrcSpan', we update a field in TcLclEnv called 'tcl_in_gen_code', setting it to True, which indicates that we entered generated code, i.e code fabricated by the compiler when rebinding some syntax. If someone tries to push some error context line while that field is set to True, the pushing won't actually happen and the context line is just dropped. Once we 'setSrcSpan' a real span (for an expression that was in the original source code), we set 'tcl_in_gen_code' back to False, indicating that we "emerged from the generated code tunnel", and that the expressions we will be processing are relevant to report in context lines again. You might wonder why TcLclEnv has both tcl_loc :: RealSrcSpan tcl_in_gen_code :: Bool Could we not store a Maybe RealSrcSpan? The problem is that we still generate constraints when processing generated code, and a CtLoc must contain a RealSrcSpan -- otherwise, error messages might appear without source locations. So tcl_loc keeps the RealSrcSpan of the last location spotted that wasn't generated; it's as good as we're going to get in generated code. Once we get to sub-trees that are not generated, then we update the RealSrcSpan appropriately, and set the tcl_in_gen_code Bool to False. --- An overview of the constructs that are desugared in this way is laid out in Note [Handling overloaded and rebindable constructs] in GHC.Rename.Expr. A general recipe to follow this approach for new constructs could go as follows: - Remove any GhcRn-time SyntaxExpr extensions to the relevant constructor for your construct, in HsExpr or related syntax data types. - At renaming-time: - take your original node of interest (HsIf above) - rename its subexpressions/subpatterns (condition and true/false branches above) - construct the suitable "rebound"-and-renamed result (ifThenElse call above), where the 'SrcSpan' attached to any _fabricated node_ (the HsVar/HsApp nodes, above) is set to 'generatedSrcSpan' - take both the original node and that rebound-and-renamed result and wrap them into an expansion construct: for expressions, XExpr (ExpandedThingRn ) for patterns, XPat (HsPatExpanded ) - At typechecking-time: - remove any logic that was previously dealing with your rebindable construct, typically involving [tc]SyntaxOp, SyntaxExpr and friends. - the XExpr (ExpandedThingRn ... ...) case in tcExpr already makes sure that we typecheck the desugared expression while reporting the original one in errors -} {- Note [Overview of record dot syntax] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This is the note that explains all the moving parts for record dot syntax. The language extensions @OverloadedRecordDot@ and @OverloadedRecordUpdate@ (providing "record dot syntax") are implemented using the techniques of Note [Rebindable syntax and XXExprGhcRn]. When OverloadedRecordDot is enabled: - Field selection expressions - e.g. foo.bar.baz - Have abstract syntax HsGetField - After renaming are XExpr (ExpandedThingRn (HsGetField ...) (getField @"..."...)) expressions - Field selector expressions e.g. (.x.y) - Have abstract syntax HsProjection - After renaming are XExpr (ExpandedThingRn (HsProjection ...) ((getField @"...") . (getField @"...") . ...) expressions When OverloadedRecordUpdate is enabled: - Record update expressions - e.g. a{foo.bar=1, quux="corge", baz} - Have abstract syntax RecordUpd - With rupd_flds containting a Right - See Note [RecordDotSyntax field updates] (in Language.Haskell.Syntax.Expr) - After renaming are XExpr (ExpandedThingRn (RecordUpd ...) (setField@"..." ...) expressions - Note that this is true for all record updates even for those that do not involve '.' When OverloadedRecordDot is enabled and RebindableSyntax is not enabled the name 'getField' is resolved to GHC.Records.getField. When OverloadedRecordDot is enabled and RebindableSyntax is enabled the name 'getField' is whatever in-scope name that is. When OverloadedRecordUpd is enabled and RebindableSyntax is not enabled it is an error for now (temporary while we wait on native setField support; see https://gitlab.haskell.org/ghc/ghc/-/issues/16232). When OverloadedRecordUpd is enabled and RebindableSyntax is enabled the names 'getField' and 'setField' are whatever in-scope names they are. -} {- ************************************************************************ * * \subsection{Commands (in arrow abstractions)} * * ************************************************************************ -} type instance XCmdArrApp GhcPs = (IsUnicodeSyntax, EpaLocation) type instance XCmdArrApp GhcRn = NoExtField type instance XCmdArrApp GhcTc = Type type instance XCmdArrForm GhcPs = AnnList () -- | fixity (filled in by the renamer), for forms that were converted from -- OpApp's by the renamer type instance XCmdArrForm GhcRn = Maybe Fixity type instance XCmdArrForm GhcTc = Maybe Fixity type instance XCmdApp (GhcPass _) = NoExtField type instance XCmdLam (GhcPass _) = NoExtField type instance XCmdPar GhcPs = (EpToken "(", EpToken ")") type instance XCmdPar GhcRn = NoExtField type instance XCmdPar GhcTc = NoExtField type instance XCmdCase GhcPs = EpAnnHsCase type instance XCmdCase GhcRn = NoExtField type instance XCmdCase GhcTc = NoExtField type instance XCmdLamCase (GhcPass _) = EpAnnLam type instance XCmdIf GhcPs = AnnsIf type instance XCmdIf GhcRn = NoExtField type instance XCmdIf GhcTc = NoExtField type instance XCmdLet GhcPs = (EpToken "let", EpToken "in") type instance XCmdLet GhcRn = NoExtField type instance XCmdLet GhcTc = NoExtField type instance XCmdDo GhcPs = AnnList EpaLocation type instance XCmdDo GhcRn = NoExtField type instance XCmdDo GhcTc = Type type instance XCmdWrap (GhcPass _) = NoExtField type instance XXCmd GhcPs = DataConCantHappen type instance XXCmd GhcRn = DataConCantHappen type instance XXCmd GhcTc = HsWrap HsCmd -- If cmd :: arg1 --> res -- wrap :: arg1 "->" arg2 -- Then (XCmd (HsWrap wrap cmd)) :: arg2 --> res -- | Command Syntax Table (for Arrow syntax) type CmdSyntaxTable p = [(Name, HsExpr p)] -- See Note [CmdSyntaxTable] {- Note [CmdSyntaxTable] ~~~~~~~~~~~~~~~~~~~~~ Used only for arrow-syntax stuff (HsCmdTop), the CmdSyntaxTable keeps track of the methods needed for a Cmd. * Before the renamer, this list is an empty list * After the renamer, it takes the form @[(std_name, HsVar actual_name)]@ For example, for the 'arr' method * normal case: (GHC.Control.Arrow.arr, HsVar GHC.Control.Arrow.arr) * with rebindable syntax: (GHC.Control.Arrow.arr, arr_22) where @arr_22@ is whatever 'arr' is in scope * After the type checker, it takes the form [(std_name, )] where is the evidence for the method. This evidence is instantiated with the class, but is still polymorphic in everything else. For example, in the case of 'arr', the evidence has type forall b c. (b->c) -> a b c where 'a' is the ambient type of the arrow. This polymorphism is important because the desugarer uses the same evidence at multiple different types. This is Less Cool than what we normally do for rebindable syntax, which is to make fully-instantiated piece of evidence at every use site. The Cmd way is Less Cool because * The renamer has to predict which methods are needed. See the tedious GHC.Rename.Expr.methodNamesCmd. * The desugarer has to know the polymorphic type of the instantiated method. This is checked by Inst.tcSyntaxName, but is less flexible than the rest of rebindable syntax, where the type is less pre-ordained. (And this flexibility is useful; for example we can typecheck do-notation with (>>=) :: m1 a -> (a -> m2 b) -> m2 b.) -} data CmdTopTc = CmdTopTc Type -- Nested tuple of inputs on the command's stack Type -- return type of the command (CmdSyntaxTable GhcTc) -- See Note [CmdSyntaxTable] type instance XCmdTop GhcPs = NoExtField type instance XCmdTop GhcRn = CmdSyntaxTable GhcRn -- See Note [CmdSyntaxTable] type instance XCmdTop GhcTc = CmdTopTc type instance XXCmdTop (GhcPass _) = DataConCantHappen instance (OutputableBndrId p) => Outputable (HsCmd (GhcPass p)) where ppr cmd = pprCmd cmd ----------------------- -- pprCmd and pprLCmd call pprDeeper; -- the underscore versions do not pprLCmd :: (OutputableBndrId p) => LHsCmd (GhcPass p) -> SDoc pprLCmd (L _ c) = pprCmd c pprCmd :: (OutputableBndrId p) => HsCmd (GhcPass p) -> SDoc pprCmd c | isQuietHsCmd c = ppr_cmd c | otherwise = pprDeeper (ppr_cmd c) isQuietHsCmd :: HsCmd id -> Bool -- Parentheses do display something, but it gives little info and -- if we go deeper when we go inside them then we get ugly things -- like (...) isQuietHsCmd (HsCmdPar {}) = True -- applications don't display anything themselves isQuietHsCmd (HsCmdApp {}) = True isQuietHsCmd _ = False ----------------------- ppr_lcmd :: (OutputableBndrId p) => LHsCmd (GhcPass p) -> SDoc ppr_lcmd c = ppr_cmd (unLoc c) ppr_cmd :: forall p. (OutputableBndrId p ) => HsCmd (GhcPass p) -> SDoc ppr_cmd (HsCmdPar _ c) = parens (ppr_lcmd c) ppr_cmd (HsCmdApp _ c e) = let (fun, args) = collect_args c [e] in hang (ppr_lcmd fun) 2 (sep (map ppr args)) where collect_args (L _ (HsCmdApp _ fun arg)) args = collect_args fun (arg:args) collect_args fun args = (fun, args) ppr_cmd (HsCmdLam _ LamSingle matches) = pprMatches matches ppr_cmd (HsCmdLam _ lam_variant matches) = sep [ lamCaseKeyword lam_variant, nest 2 (pprMatches matches) ] ppr_cmd (HsCmdCase _ expr matches) = sep [ sep [text "case", nest 4 (ppr expr), text "of"], nest 2 (pprMatches matches) ] ppr_cmd (HsCmdIf _ _ e ct ce) = sep [hsep [text "if", nest 2 (ppr e), text "then"], nest 4 (ppr ct), text "else", nest 4 (ppr ce)] -- special case: let ... in let ... ppr_cmd (HsCmdLet _ binds cmd@(L _ (HsCmdLet {}))) = sep [hang (text "let") 2 (hsep [pprBinds binds, text "in"]), ppr_lcmd cmd] ppr_cmd (HsCmdLet _ binds cmd) = sep [hang (text "let") 2 (pprBinds binds), hang (text "in") 2 (ppr cmd)] ppr_cmd (HsCmdDo _ (L _ stmts)) = pprArrowExpr stmts ppr_cmd (HsCmdArrApp _ arrow arg HsFirstOrderApp True) = hsep [ppr_lexpr arrow, larrowt, ppr_lexpr arg] ppr_cmd (HsCmdArrApp _ arrow arg HsFirstOrderApp False) = hsep [ppr_lexpr arg, arrowt, ppr_lexpr arrow] ppr_cmd (HsCmdArrApp _ arrow arg HsHigherOrderApp True) = hsep [ppr_lexpr arrow, larrowtt, ppr_lexpr arg] ppr_cmd (HsCmdArrApp _ arrow arg HsHigherOrderApp False) = hsep [ppr_lexpr arg, arrowtt, ppr_lexpr arrow] ppr_cmd (HsCmdArrForm rn_fix (L _ op) ps_fix args) | HsVar _ (L _ v) <- op = ppr_cmd_infix v | GhcTc <- ghcPass @p , XExpr (ConLikeTc c _ _) <- op = ppr_cmd_infix (conLikeName c) | otherwise = fall_through where fall_through = hang (text "(|" <+> ppr_expr op) 4 (sep (map (pprCmdArg.unLoc) args) <+> text "|)") ppr_cmd_infix :: OutputableBndr v => v -> SDoc ppr_cmd_infix v | [arg1, arg2] <- args , case ghcPass @p of GhcPs -> ps_fix == Infix GhcRn -> isJust rn_fix || ps_fix == Infix GhcTc -> isJust rn_fix || ps_fix == Infix = hang (pprCmdArg (unLoc arg1)) 4 (sep [ pprInfixOcc v, pprCmdArg (unLoc arg2)]) | otherwise = fall_through ppr_cmd (XCmd x) = case ghcPass @p of GhcTc -> case x of HsWrap w cmd -> pprHsWrapper w (\_ -> parens (ppr_cmd cmd)) pprCmdArg :: (OutputableBndrId p) => HsCmdTop (GhcPass p) -> SDoc pprCmdArg (HsCmdTop _ cmd) = ppr_lcmd cmd instance (OutputableBndrId p) => Outputable (HsCmdTop (GhcPass p)) where ppr = pprCmdArg {- ************************************************************************ * * \subsection{@Match@, @GRHSs@, and @GRHS@ datatypes} * * ************************************************************************ -} type instance XMG GhcPs b = Origin type instance XMG GhcRn b = Origin -- See Note [Generated code and pattern-match checking] type instance XMG GhcTc b = MatchGroupTc data MatchGroupTc = MatchGroupTc { mg_arg_tys :: [Scaled Type] -- Types of the arguments, t1..tn , mg_res_ty :: Type -- Type of the result, tr , mg_origin :: Origin -- Origin (Generated vs FromSource) } deriving Data type instance XXMatchGroup (GhcPass _) b = DataConCantHappen type instance XCMatch (GhcPass _) b = NoExtField type instance XXMatch (GhcPass _) b = DataConCantHappen instance (OutputableBndrId pr, Outputable body) => Outputable (Match (GhcPass pr) body) where ppr = pprMatch isEmptyMatchGroup :: MatchGroup (GhcPass p) body -> Bool isEmptyMatchGroup (MG { mg_alts = ms }) = null $ unLoc ms -- | Is there only one RHS in this list of matches? isSingletonMatchGroup :: [LMatch (GhcPass p) body] -> Bool isSingletonMatchGroup matches | [L _ match] <- matches , Match { m_grhss = GRHSs { grhssGRHSs = [_] } } <- match = True | otherwise = False matchGroupArity :: MatchGroup (GhcPass id) body -> Arity -- Precondition: MatchGroup is non-empty -- This is called before type checking, when mg_arg_tys is not set matchGroupArity (MG { mg_alts = alts }) | L _ (alt1:_) <- alts = count (isVisArgPat . unLoc) (hsLMatchPats alt1) | otherwise = panic "matchGroupArity" hsLMatchPats :: LMatch (GhcPass id) body -> [LPat (GhcPass id)] hsLMatchPats (L _ (Match { m_pats = L _ pats })) = pats -- We keep the type checker happy by providing EpAnnComments. They -- can only be used if they follow a `where` keyword with no binds, -- but in that case the comment is attached to the following parsed -- item. So this can never be used in practice. type instance XCGRHSs (GhcPass _) _ = EpAnnComments type instance XXGRHSs (GhcPass _) _ = DataConCantHappen data GrhsAnn = GrhsAnn { ga_vbar :: Maybe (EpToken "|"), ga_sep :: Either (EpToken "=") TokRarrow -- ^ Match separator location, `=` or `->` } deriving (Data) instance NoAnn GrhsAnn where noAnn = GrhsAnn Nothing noAnn type instance XCGRHS (GhcPass _) _ = EpAnn GrhsAnn -- Location of matchSeparator -- TODO:AZ does this belong on the GRHS, or GRHSs? type instance XXGRHS (GhcPass _) b = DataConCantHappen pprMatches :: (OutputableBndrId idR, Outputable body) => MatchGroup (GhcPass idR) body -> SDoc pprMatches MG { mg_alts = matches } = vcat (map pprMatch (map unLoc (unLoc matches))) -- Don't print the type; it's only a place-holder before typechecking -- Exported to GHC.Hs.Binds, which can't see the defn of HsMatchContext pprFunBind :: (OutputableBndrId idR) => MatchGroup (GhcPass idR) (LHsExpr (GhcPass idR)) -> SDoc pprFunBind matches = pprMatches matches -- Exported to GHC.Hs.Binds, which can't see the defn of HsMatchContext pprPatBind :: forall bndr p . (OutputableBndrId bndr, OutputableBndrId p) => LPat (GhcPass bndr) -> GRHSs (GhcPass p) (LHsExpr (GhcPass p)) -> SDoc pprPatBind pat grhss = sep [ppr pat, nest 2 (pprGRHSs (PatBindRhs :: HsMatchContext Void) grhss)] pprMatch :: (OutputableBndrId idR, Outputable body) => Match (GhcPass idR) body -> SDoc pprMatch (Match { m_pats = L _ pats, m_ctxt = ctxt, m_grhss = grhss }) = sep [ sep (herald : map (nest 2 . pprParendLPat appPrec) other_pats) , nest 2 (pprGRHSs ctxt grhss) ] where -- lam_cases_result: we don't simply return (empty, pats) to avoid -- introducing an additional `nest 2` via the empty herald lam_cases_result = case pats of [] -> (empty, []) (p:ps) -> (pprParendLPat appPrec p, ps) (herald, other_pats) = case ctxt of FunRhs {mc_fun=L _ fun, mc_fixity=fixity, mc_strictness=strictness} | SrcStrict <- strictness -> assert (null pats) -- A strict variable binding (char '!'<>pprPrefixOcc fun, pats) | Prefix <- fixity -> (pprPrefixOcc fun, pats) -- f x y z = e -- Not pprBndr; the AbsBinds will -- have printed the signature | otherwise -> case pats of (p1:p2:rest) | null rest -> (pp_infix, []) -- x &&& y = e | otherwise -> (parens pp_infix, rest) -- (x &&& y) z = e where pp_infix = pprParendLPat opPrec p1 <+> pprInfixOcc fun <+> pprParendLPat opPrec p2 _ -> pprPanic "pprMatch" (ppr ctxt $$ ppr pats) LamAlt LamSingle -> (char '\\', pats) ArrowMatchCtxt (ArrowLamAlt LamSingle) -> (char '\\', pats) LamAlt LamCases -> lam_cases_result ArrowMatchCtxt (ArrowLamAlt LamCases) -> lam_cases_result ArrowMatchCtxt ProcExpr -> (text "proc", pats) _ -> case pats of [] -> (empty, []) [pat] -> (ppr pat, []) -- No parens around the single pat in a case _ -> pprPanic "pprMatch" (ppr ctxt $$ ppr pats) pprGRHSs :: (OutputableBndrId idR, Outputable body) => HsMatchContext fn -> GRHSs (GhcPass idR) body -> SDoc pprGRHSs ctxt (GRHSs _ grhss binds) = vcat (map (pprGRHS ctxt . unLoc) grhss) -- Print the "where" even if the contents of the binds is empty. Only -- EmptyLocalBinds means no "where" keyword $$ ppUnless (eqEmptyLocalBinds binds) (text "where" $$ nest 4 (pprBinds binds)) pprGRHS :: (OutputableBndrId idR, Outputable body) => HsMatchContext fn -> GRHS (GhcPass idR) body -> SDoc pprGRHS ctxt (GRHS _ [] body) = pp_rhs ctxt body pprGRHS ctxt (GRHS _ guards body) = sep [vbar <+> interpp'SP guards, pp_rhs ctxt body] pp_rhs :: Outputable body => HsMatchContext fn -> body -> SDoc pp_rhs ctxt rhs = matchSeparator ctxt <+> pprDeeper (ppr rhs) matchSeparator :: HsMatchContext fn -> SDoc matchSeparator FunRhs{} = text "=" matchSeparator CaseAlt = text "->" matchSeparator LamAlt{} = text "->" matchSeparator IfAlt = text "->" matchSeparator ArrowMatchCtxt{} = text "->" matchSeparator PatBindRhs = text "=" matchSeparator PatBindGuards = text "=" matchSeparator StmtCtxt{} = text "<-" matchSeparator RecUpd = text "=" -- This can be printed by the pattern matchSeparator PatSyn = text "<-" -- match checker trace matchSeparator LazyPatCtx = panic "unused" matchSeparator ThPatSplice = panic "unused" matchSeparator ThPatQuote = panic "unused" instance Outputable GrhsAnn where ppr (GrhsAnn v s) = text "GrhsAnn" <+> ppr v <+> ppr s {- ************************************************************************ * * \subsection{Do stmts and list comprehensions} * * ************************************************************************ -} -- Extra fields available post typechecking for RecStmt. data RecStmtTc = RecStmtTc { recS_bind_ty :: Type -- S in (>>=) :: Q -> (R -> S) -> T , recS_later_rets :: [PostTcExpr] -- (only used in the arrow version) , recS_rec_rets :: [PostTcExpr] -- These expressions correspond 1-to-1 -- with recS_later_ids and recS_rec_ids, -- and are the expressions that should be -- returned by the recursion. -- They may not quite be the Ids themselves, -- because the Id may be *polymorphic*, but -- the returned thing has to be *monomorphic*, -- so they may be type applications , recS_ret_ty :: Type -- The type of -- do { stmts; return (a,b,c) } -- With rebindable syntax the type might not -- be quite as simple as (m (tya, tyb, tyc)). } type instance XLastStmt (GhcPass _) (GhcPass _) b = NoExtField type instance XBindStmt (GhcPass _) GhcPs b = EpUniToken "<-" "←" type instance XBindStmt (GhcPass _) GhcRn b = XBindStmtRn type instance XBindStmt (GhcPass _) GhcTc b = XBindStmtTc data XBindStmtRn = XBindStmtRn { xbsrn_bindOp :: SyntaxExpr GhcRn , xbsrn_failOp :: FailOperator GhcRn } data XBindStmtTc = XBindStmtTc { xbstc_bindOp :: SyntaxExpr GhcTc , xbstc_boundResultType :: Type -- If (>>=) :: Q -> (R -> S) -> T, this is S , xbstc_boundResultMult :: Mult -- If (>>=) :: Q -> (R -> S) -> T, this is S , xbstc_failOp :: FailOperator GhcTc } type instance XApplicativeStmt (GhcPass _) GhcPs = NoExtField type instance XApplicativeStmt (GhcPass _) GhcRn = NoExtField type instance XApplicativeStmt (GhcPass _) GhcTc = Type type instance XBodyStmt (GhcPass _) GhcPs b = NoExtField type instance XBodyStmt (GhcPass _) GhcRn b = NoExtField type instance XBodyStmt (GhcPass _) GhcTc b = Type type instance XLetStmt (GhcPass _) (GhcPass _) b = EpToken "let" type instance XParStmt (GhcPass _) GhcPs b = NoExtField type instance XParStmt (GhcPass _) GhcRn b = NoExtField type instance XParStmt (GhcPass _) GhcTc b = Type type instance XTransStmt (GhcPass _) GhcPs b = AnnTransStmt type instance XTransStmt (GhcPass _) GhcRn b = NoExtField type instance XTransStmt (GhcPass _) GhcTc b = Type type instance XRecStmt (GhcPass _) GhcPs b = AnnList (EpToken "rec") type instance XRecStmt (GhcPass _) GhcRn b = NoExtField type instance XRecStmt (GhcPass _) GhcTc b = RecStmtTc type instance XXStmtLR (GhcPass _) GhcPs b = DataConCantHappen type instance XXStmtLR (GhcPass x) GhcRn b = ApplicativeStmt (GhcPass x) GhcRn type instance XXStmtLR (GhcPass x) GhcTc b = ApplicativeStmt (GhcPass x) GhcTc data AnnTransStmt = AnnTransStmt { ats_then :: EpToken "then", ats_group :: Maybe (EpToken "group"), ats_by :: Maybe (EpToken "by"), ats_using :: Maybe (EpToken "using") } deriving Data instance NoAnn AnnTransStmt where noAnn = AnnTransStmt noAnn noAnn noAnn noAnn -- | 'ApplicativeStmt' represents an applicative expression built with -- '<$>' and '<*>'. It is generated by the renamer, and is desugared into the -- appropriate applicative expression by the desugarer, but it is intended -- to be invisible in error messages. -- -- For full details, see Note [ApplicativeDo] in "GHC.Rename.Expr" -- data ApplicativeStmt idL idR = ApplicativeStmt (XApplicativeStmt idL idR) -- Post typecheck, Type of the body [ ( SyntaxExpr idR , ApplicativeArg idL) ] -- [(<$>, e1), (<*>, e2), ..., (<*>, en)] (Maybe (SyntaxExpr idR)) -- 'join', if necessary -- | Applicative Argument data ApplicativeArg idL = ApplicativeArgOne -- A single statement (BindStmt or BodyStmt) { xarg_app_arg_one :: XApplicativeArgOne idL -- ^ The fail operator, after renaming -- -- The fail operator is needed if this is a BindStmt -- where the pattern can fail. E.g.: -- (Just a) <- stmt -- The fail operator will be invoked if the pattern -- match fails. -- It is also used for guards in MonadComprehensions. -- The fail operator is Nothing -- if the pattern match can't fail , app_arg_pattern :: LPat idL -- WildPat if it was a BodyStmt (see below) , arg_expr :: LHsExpr idL , is_body_stmt :: Bool -- ^ True <=> was a BodyStmt, -- False <=> was a BindStmt. -- See Note [Applicative BodyStmt] } | ApplicativeArgMany -- do { stmts; return vars } { xarg_app_arg_many :: XApplicativeArgMany idL , app_stmts :: [ExprLStmt idL] -- stmts , final_expr :: HsExpr idL -- return (v1,..,vn), or just (v1,..,vn) , bv_pattern :: LPat idL -- (v1,...,vn) , stmt_context :: HsDoFlavour -- ^ context of the do expression, used in pprArg } | XApplicativeArg !(XXApplicativeArg idL) type family XApplicativeStmt x x' -- ApplicativeArg type families type family XApplicativeArgOne x type family XApplicativeArgMany x type family XXApplicativeArg x type instance XParStmtBlock (GhcPass pL) (GhcPass pR) = NoExtField type instance XXParStmtBlock (GhcPass pL) (GhcPass pR) = DataConCantHappen type instance XApplicativeArgOne GhcPs = NoExtField type instance XApplicativeArgOne GhcRn = FailOperator GhcRn type instance XApplicativeArgOne GhcTc = FailOperator GhcTc type instance XApplicativeArgMany (GhcPass _) = NoExtField type instance XXApplicativeArg (GhcPass _) = DataConCantHappen instance (Outputable (StmtLR (GhcPass idL) (GhcPass idL) (LHsExpr (GhcPass idL))), Outputable (XXParStmtBlock (GhcPass idL) (GhcPass idR))) => Outputable (ParStmtBlock (GhcPass idL) (GhcPass idR)) where ppr (ParStmtBlock _ stmts _ _) = interpp'SP stmts instance (OutputableBndrId pl, OutputableBndrId pr, Anno (StmtLR (GhcPass pl) (GhcPass pr) body) ~ SrcSpanAnnA, Outputable body) => Outputable (StmtLR (GhcPass pl) (GhcPass pr) body) where ppr stmt = pprStmt stmt pprStmt :: forall idL idR body . (OutputableBndrId idL, OutputableBndrId idR, Anno (StmtLR (GhcPass idL) (GhcPass idR) body) ~ SrcSpanAnnA, Outputable body) => (StmtLR (GhcPass idL) (GhcPass idR) body) -> SDoc pprStmt (LastStmt _ expr m_dollar_stripped _) = whenPprDebug (text "[last]") <+> (case m_dollar_stripped of Just True -> text "return $" Just False -> text "return" Nothing -> empty) <+> ppr expr pprStmt (BindStmt _ pat expr) = pprBindStmt pat expr pprStmt (LetStmt _ binds) = hsep [text "let", pprBinds binds] pprStmt (BodyStmt _ expr _ _) = ppr expr pprStmt (ParStmt _ stmtss _ _) = sep (punctuate (text " | ") (map ppr stmtss)) pprStmt (TransStmt { trS_stmts = stmts, trS_by = by , trS_using = using, trS_form = form }) = sep $ punctuate comma (map ppr stmts ++ [pprTransStmt by using form]) pprStmt (RecStmt { recS_stmts = segment, recS_rec_ids = rec_ids , recS_later_ids = later_ids }) = text "rec" <+> vcat [ ppr_do_stmts (unLoc segment) , whenPprDebug (vcat [ text "rec_ids=" <> ppr rec_ids , text "later_ids=" <> ppr later_ids])] pprStmt (XStmtLR x) = case ghcPass :: GhcPass idR of GhcRn -> pprApplicativeStmt x GhcTc -> pprApplicativeStmt x where pprApplicativeStmt :: (OutputableBndrId idL, OutputableBndrId idR) => ApplicativeStmt (GhcPass idL) (GhcPass idR) -> SDoc pprApplicativeStmt (ApplicativeStmt _ args mb_join) = getPprStyle $ \style -> if userStyle style then pp_for_user else pp_debug where -- make all the Applicative stuff invisible in error messages by -- flattening the whole ApplicativeStmt nest back to a sequence -- of statements. pp_for_user = vcat $ concatMap flattenArg args -- ppr directly rather than transforming here, because we need to -- inject a "return" which is hard when we're polymorphic in the id -- type. flattenStmt :: ExprLStmt (GhcPass idL) -> [SDoc] flattenStmt (L _ (XStmtLR x)) = case ghcPass :: GhcPass idL of GhcRn | (ApplicativeStmt _ args _) <- x -> concatMap flattenArg args GhcTc | (ApplicativeStmt _ args _) <- x -> concatMap flattenArg args flattenStmt stmt = [ppr stmt] flattenArg :: (a, ApplicativeArg (GhcPass idL)) -> [SDoc] flattenArg (_, ApplicativeArgOne _ pat expr isBody) | isBody = [ppr expr] -- See Note [Applicative BodyStmt] | otherwise = [pprBindStmt pat expr] flattenArg (_, ApplicativeArgMany _ stmts _ _ _) = concatMap flattenStmt stmts pp_debug = let ap_expr = sep (punctuate (text " |") (map pp_arg args)) in whenPprDebug (if isJust mb_join then text "[join]" else empty) <+> (if lengthAtLeast args 2 then parens else id) ap_expr pp_arg :: (a, ApplicativeArg (GhcPass idL)) -> SDoc pp_arg (_, applicativeArg) = ppr applicativeArg pprBindStmt :: (Outputable pat, Outputable expr) => pat -> expr -> SDoc pprBindStmt pat expr = hsep [ppr pat, larrow, ppr expr] instance (OutputableBndrId idL) => Outputable (ApplicativeArg (GhcPass idL)) where ppr = pprArg pprArg :: forall idL . (OutputableBndrId idL) => ApplicativeArg (GhcPass idL) -> SDoc pprArg (ApplicativeArgOne _ pat expr isBody) | isBody = ppr expr -- See Note [Applicative BodyStmt] | otherwise = pprBindStmt pat expr pprArg (ApplicativeArgMany _ stmts return pat ctxt) = ppr pat <+> text "<-" <+> pprDo ctxt (stmts ++ [noLocA (LastStmt noExtField (noLocA return) Nothing noSyntaxExpr)]) pprTransformStmt :: (OutputableBndrId p) => [IdP (GhcPass p)] -> LHsExpr (GhcPass p) -> Maybe (LHsExpr (GhcPass p)) -> SDoc pprTransformStmt bndrs using by = sep [ text "then" <+> whenPprDebug (braces (ppr bndrs)) , nest 2 (ppr using) , nest 2 (pprBy by)] pprTransStmt :: Outputable body => Maybe body -> body -> TransForm -> SDoc pprTransStmt by using ThenForm = sep [ text "then", nest 2 (ppr using), nest 2 (pprBy by)] pprTransStmt by using GroupForm = sep [ text "then group", nest 2 (pprBy by), nest 2 (text "using" <+> ppr using)] pprBy :: Outputable body => Maybe body -> SDoc pprBy Nothing = empty pprBy (Just e) = text "by" <+> ppr e pprDo :: (OutputableBndrId p, Outputable body, Anno (StmtLR (GhcPass p) (GhcPass p) body) ~ SrcSpanAnnA ) => HsDoFlavour -> [LStmt (GhcPass p) body] -> SDoc pprDo (DoExpr m) stmts = ppr_module_name_prefix m <> text "do" <+> ppr_do_stmts stmts pprDo GhciStmtCtxt stmts = text "do" <+> ppr_do_stmts stmts pprDo (MDoExpr m) stmts = ppr_module_name_prefix m <> text "mdo" <+> ppr_do_stmts stmts pprDo ListComp stmts = brackets $ pprComp stmts pprDo MonadComp stmts = brackets $ pprComp stmts pprArrowExpr :: (OutputableBndrId p, Outputable body, Anno (StmtLR (GhcPass p) (GhcPass p) body) ~ SrcSpanAnnA ) => [LStmt (GhcPass p) body] -> SDoc pprArrowExpr stmts = text "do" <+> ppr_do_stmts stmts ppr_module_name_prefix :: Maybe ModuleName -> SDoc ppr_module_name_prefix = \case Nothing -> empty Just module_name -> ppr module_name <> char '.' ppr_do_stmts :: (OutputableBndrId idL, OutputableBndrId idR, Anno (StmtLR (GhcPass idL) (GhcPass idR) body) ~ SrcSpanAnnA, Outputable body) => [LStmtLR (GhcPass idL) (GhcPass idR) body] -> SDoc -- Print a bunch of do stmts ppr_do_stmts stmts = pprDeeperList vcat (map ppr stmts) pprComp :: (OutputableBndrId p, Outputable body, Anno (StmtLR (GhcPass p) (GhcPass p) body) ~ SrcSpanAnnA) => [LStmt (GhcPass p) body] -> SDoc pprComp quals -- Prints: body | qual1, ..., qualn | Just (initStmts, L _ (LastStmt _ body _ _)) <- snocView quals = if null initStmts -- If there are no statements in a list comprehension besides the last -- one, we simply treat it like a normal list. This does arise -- occasionally in code that GHC generates, e.g., in implementations of -- 'range' for derived 'Ix' instances for product datatypes with exactly -- one constructor (e.g., see #12583). then ppr body else hang (ppr body <+> vbar) 2 (pprQuals initStmts) | otherwise = pprPanic "pprComp" (pprQuals quals) pprQuals :: (OutputableBndrId p, Outputable body, Anno (StmtLR (GhcPass p) (GhcPass p) body) ~ SrcSpanAnnA) => [LStmt (GhcPass p) body] -> SDoc -- Show list comprehension qualifiers separated by commas pprQuals quals = interpp'SP quals {- ************************************************************************ * * Template Haskell quotation brackets * * ************************************************************************ -} -- | Finalizers produced by a splice with -- 'Language.Haskell.TH.Syntax.addModFinalizer' -- -- See Note [Delaying modFinalizers in untyped splices] in GHC.Rename.Splice. For how -- this is used. -- newtype ThModFinalizers = ThModFinalizers [ForeignRef (TH.Q ())] -- A Data instance which ignores the argument of 'ThModFinalizers'. instance Data ThModFinalizers where gunfold _ z _ = z $ ThModFinalizers [] toConstr a = mkConstr (dataTypeOf a) "ThModFinalizers" [] Data.Prefix dataTypeOf a = mkDataType "HsExpr.ThModFinalizers" [toConstr a] -- See Note [Delaying modFinalizers in untyped splices] in GHC.Rename.Splice. -- This is the result of splicing a splice. It is produced by -- the renamer and consumed by the typechecker. It lives only between the two. data HsUntypedSpliceResult thing -- 'thing' can be HsExpr or HsType = HsUntypedSpliceTop { utsplice_result_finalizers :: ThModFinalizers -- ^ TH finalizers produced by the splice. , utsplice_result :: thing -- ^ The result of splicing; See Note [Lifecycle of a splice] } | HsUntypedSpliceNested SplicePointName -- A unique name to identify this splice point type instance XTypedSplice GhcPs = EpToken "$$" type instance XTypedSplice GhcRn = SplicePointName type instance XTypedSplice GhcTc = DelayedSplice type instance XUntypedSplice GhcPs = NoExtField type instance XUntypedSplice GhcRn = HsUntypedSpliceResult (HsExpr GhcRn) type instance XUntypedSplice GhcTc = DataConCantHappen -- HsUntypedSplice type instance XUntypedSpliceExpr GhcPs = EpToken "$" type instance XUntypedSpliceExpr GhcRn = EpToken "$" type instance XUntypedSpliceExpr GhcTc = DataConCantHappen type instance XQuasiQuote p = NoExtField type instance XXUntypedSplice p = DataConCantHappen -- See Note [Running typed splices in the zonker] -- These are the arguments that are passed to `GHC.Tc.Gen.Splice.runTopSplice` data DelayedSplice = DelayedSplice TcLclEnv -- The local environment to run the splice in (LHsExpr GhcRn) -- The original renamed expression TcType -- The result type of running the splice, unzonked (LHsExpr GhcTc) -- The typechecked expression to run and splice in the result -- A Data instance which ignores the argument of 'DelayedSplice'. instance Data DelayedSplice where gunfold _ _ _ = panic "DelayedSplice" toConstr a = mkConstr (dataTypeOf a) "DelayedSplice" [] Data.Prefix dataTypeOf a = mkDataType "HsExpr.DelayedSplice" [toConstr a] -- See Note [Pending Splices] type SplicePointName = Name data UntypedSpliceFlavour = UntypedExpSplice | UntypedPatSplice | UntypedTypeSplice | UntypedDeclSplice deriving Data -- | Pending Renamer Splice data PendingRnSplice = PendingRnSplice UntypedSpliceFlavour SplicePointName (LHsExpr GhcRn) -- | Pending Type-checker Splice data PendingTcSplice = PendingTcSplice SplicePointName (LHsExpr GhcTc) pprPendingSplice :: (OutputableBndrId p) => SplicePointName -> LHsExpr (GhcPass p) -> SDoc pprPendingSplice n e = angleBrackets (ppr n <> comma <+> ppr (stripParensLHsExpr e)) pprTypedSplice :: (OutputableBndrId p) => Maybe SplicePointName -> LHsExpr (GhcPass p) -> SDoc pprTypedSplice n e = ppr_splice (text "$$") n e pprUntypedSplice :: forall p. (OutputableBndrId p) => Bool -- Whether to precede the splice with "$" -> Maybe SplicePointName -- Used for pretty printing when exists -> HsUntypedSplice (GhcPass p) -> SDoc pprUntypedSplice True n (HsUntypedSpliceExpr _ e) = ppr_splice (text "$") n e pprUntypedSplice False n (HsUntypedSpliceExpr _ e) = ppr_splice empty n e pprUntypedSplice _ _ (HsQuasiQuote _ q s) = ppr_quasi q (unLoc s) ppr_quasi :: OutputableBndr p => p -> FastString -> SDoc ppr_quasi quoter quote = char '[' <> ppr quoter <> vbar <> ppr quote <> text "|]" ppr_splice :: (OutputableBndrId p) => SDoc -> Maybe SplicePointName -> LHsExpr (GhcPass p) -> SDoc ppr_splice herald mn e = herald <> (case mn of Nothing -> empty Just splice_name -> whenPprDebug (brackets (ppr splice_name))) <> ppr e type instance XExpBr GhcPs = (BracketAnn (EpUniToken "[|" "⟦") (EpToken "[e|"), EpUniToken "|]" "⟧") type instance XPatBr GhcPs = (EpToken "[p|", EpUniToken "|]" "⟧") type instance XDecBrL GhcPs = (EpToken "[d|", EpUniToken "|]" "⟧", (EpToken "{", EpToken "}")) type instance XDecBrG GhcPs = NoExtField type instance XTypBr GhcPs = (EpToken "[t|", EpUniToken "|]" "⟧") type instance XVarBr GhcPs = EpaLocation type instance XXQuote GhcPs = DataConCantHappen type instance XExpBr GhcRn = NoExtField type instance XPatBr GhcRn = NoExtField type instance XDecBrL GhcRn = NoExtField type instance XDecBrG GhcRn = NoExtField type instance XTypBr GhcRn = NoExtField type instance XVarBr GhcRn = NoExtField type instance XXQuote GhcRn = DataConCantHappen -- See Note [The life cycle of a TH quotation] type instance XExpBr GhcTc = DataConCantHappen type instance XPatBr GhcTc = DataConCantHappen type instance XDecBrL GhcTc = DataConCantHappen type instance XDecBrG GhcTc = DataConCantHappen type instance XTypBr GhcTc = DataConCantHappen type instance XVarBr GhcTc = DataConCantHappen type instance XXQuote GhcTc = NoExtField instance OutputableBndrId p => Outputable (HsQuote (GhcPass p)) where ppr = pprHsQuote where pprHsQuote :: forall p. (OutputableBndrId p) => HsQuote (GhcPass p) -> SDoc pprHsQuote (ExpBr _ e) = thBrackets empty (ppr e) pprHsQuote (PatBr _ p) = thBrackets (char 'p') (ppr p) pprHsQuote (DecBrG _ gp) = thBrackets (char 'd') (ppr gp) pprHsQuote (DecBrL _ ds) = thBrackets (char 'd') (vcat (map ppr ds)) pprHsQuote (TypBr _ t) = thBrackets (char 't') (ppr t) pprHsQuote (VarBr _ True n) = char '\'' <> pprPrefixOcc (unLoc n) pprHsQuote (VarBr _ False n) = text "''" <> pprPrefixOcc (unLoc n) pprHsQuote (XQuote b) = case ghcPass @p of GhcTc -> pprPanic "pprHsQuote: `HsQuote GhcTc` shouldn't exist" (ppr b) -- See Note [The life cycle of a TH quotation] thBrackets :: SDoc -> SDoc -> SDoc thBrackets pp_kind pp_body = char '[' <> pp_kind <> vbar <+> pp_body <+> text "|]" thTyBrackets :: SDoc -> SDoc thTyBrackets pp_body = text "[||" <+> pp_body <+> text "||]" instance Outputable PendingRnSplice where ppr (PendingRnSplice _ n e) = pprPendingSplice n e instance Outputable PendingTcSplice where ppr (PendingTcSplice n e) = pprPendingSplice n e ppr_with_pending_tc_splices :: SDoc -> [PendingTcSplice] -> SDoc ppr_with_pending_tc_splices x [] = x ppr_with_pending_tc_splices x ps = x $$ text "pending(tc)" <+> ppr ps {- ************************************************************************ * * \subsection{Enumerations and list comprehensions} * * ************************************************************************ -} instance OutputableBndrId p => Outputable (ArithSeqInfo (GhcPass p)) where ppr (From e1) = hcat [ppr e1, pp_dotdot] ppr (FromThen e1 e2) = hcat [ppr e1, comma, space, ppr e2, pp_dotdot] ppr (FromTo e1 e3) = hcat [ppr e1, pp_dotdot, ppr e3] ppr (FromThenTo e1 e2 e3) = hcat [ppr e1, comma, space, ppr e2, pp_dotdot, ppr e3] pp_dotdot :: SDoc pp_dotdot = text " .. " {- ************************************************************************ * * \subsection{HsMatchCtxt} * * ************************************************************************ -} type HsMatchContextPs = HsMatchContext (LIdP GhcPs) type HsMatchContextRn = HsMatchContext (LIdP GhcRn) type HsStmtContextRn = HsStmtContext (LIdP GhcRn) instance Outputable fn => Outputable (HsMatchContext fn) where ppr m@(FunRhs{}) = text "FunRhs" <+> ppr (mc_fun m) <+> ppr (mc_fixity m) ppr CaseAlt = text "CaseAlt" ppr (LamAlt lam_variant) = text "LamAlt" <+> ppr lam_variant ppr IfAlt = text "IfAlt" ppr (ArrowMatchCtxt c) = text "ArrowMatchCtxt" <+> ppr c ppr PatBindRhs = text "PatBindRhs" ppr PatBindGuards = text "PatBindGuards" ppr RecUpd = text "RecUpd" ppr (StmtCtxt _) = text "StmtCtxt _" ppr ThPatSplice = text "ThPatSplice" ppr ThPatQuote = text "ThPatQuote" ppr PatSyn = text "PatSyn" ppr LazyPatCtx = text "LazyPatCtx" instance Outputable HsLamVariant where ppr = text . \case LamSingle -> "LamSingle" LamCase -> "LamCase" LamCases -> "LamCases" lamCaseKeyword :: HsLamVariant -> SDoc lamCaseKeyword LamSingle = text "lambda" lamCaseKeyword LamCase = text "\\case" lamCaseKeyword LamCases = text "\\cases" pprExternalSrcLoc :: (StringLiteral,(Int,Int),(Int,Int)) -> SDoc pprExternalSrcLoc (StringLiteral _ src _,(n1,n2),(n3,n4)) = ppr (src,(n1,n2),(n3,n4)) instance Outputable HsArrowMatchContext where ppr ProcExpr = text "ProcExpr" ppr ArrowCaseAlt = text "ArrowCaseAlt" ppr (ArrowLamAlt lam_variant) = parens $ text "ArrowLamCaseAlt" <+> ppr lam_variant pprHsArrType :: HsArrAppType -> SDoc pprHsArrType HsHigherOrderApp = text "higher order arrow application" pprHsArrType HsFirstOrderApp = text "first order arrow application" ----------------- instance Outputable fn => Outputable (HsStmtContext fn) where ppr = pprStmtContext -- Used to generate the string for a *runtime* error message matchContextErrString :: Outputable fn => HsMatchContext fn -> SDoc matchContextErrString (FunRhs{mc_fun=fun}) = text "function" <+> ppr fun matchContextErrString CaseAlt = text "case" matchContextErrString (LamAlt lam_variant) = lamCaseKeyword lam_variant matchContextErrString IfAlt = text "multi-way if" matchContextErrString PatBindRhs = text "pattern binding" matchContextErrString PatBindGuards = text "pattern binding guards" matchContextErrString RecUpd = text "record update" matchContextErrString (ArrowMatchCtxt c) = matchArrowContextErrString c matchContextErrString ThPatSplice = panic "matchContextErrString" -- Not used at runtime matchContextErrString ThPatQuote = panic "matchContextErrString" -- Not used at runtime matchContextErrString PatSyn = text "pattern synonym" matchContextErrString (StmtCtxt (ParStmtCtxt c)) = matchContextErrString (StmtCtxt c) matchContextErrString (StmtCtxt (TransStmtCtxt c)) = matchContextErrString (StmtCtxt c) matchContextErrString (StmtCtxt (PatGuard _)) = text "pattern guard" matchContextErrString (StmtCtxt (ArrowExpr)) = text "'do' block" matchContextErrString (StmtCtxt (HsDoStmt flavour)) = matchDoContextErrString flavour matchContextErrString LazyPatCtx = text "irrefutable pattern" matchArrowContextErrString :: HsArrowMatchContext -> SDoc matchArrowContextErrString ProcExpr = text "proc" matchArrowContextErrString ArrowCaseAlt = text "case" matchArrowContextErrString (ArrowLamAlt LamSingle) = text "kappa" matchArrowContextErrString (ArrowLamAlt lam_variant) = lamCaseKeyword lam_variant matchDoContextErrString :: HsDoFlavour -> SDoc matchDoContextErrString GhciStmtCtxt = text "interactive GHCi command" matchDoContextErrString (DoExpr m) = prependQualified m (text "'do' block") matchDoContextErrString (MDoExpr m) = prependQualified m (text "'mdo' block") matchDoContextErrString ListComp = text "list comprehension" matchDoContextErrString MonadComp = text "monad comprehension" pprMatchInCtxt :: (OutputableBndrId idR, Outputable body) => Match (GhcPass idR) body -> SDoc pprMatchInCtxt match = hang (text "In" <+> pprMatchContext (m_ctxt match) <> colon) 4 (pprMatch match) pprStmtInCtxt :: (OutputableBndrId idL, OutputableBndrId idR, Outputable fn, Outputable body, Anno (StmtLR (GhcPass idL) (GhcPass idR) body) ~ SrcSpanAnnA) => HsStmtContext fn -> StmtLR (GhcPass idL) (GhcPass idR) body -> SDoc pprStmtInCtxt ctxt (LastStmt _ e _ _) | isComprehensionContext ctxt -- For [ e | .. ], do not mutter about "stmts" = hang (text "In the expression:") 2 (ppr e) pprStmtInCtxt ctxt stmt = hang (text "In a stmt of" <+> pprAStmtContext ctxt <> colon) 2 (ppr_stmt stmt) where -- For Group and Transform Stmts, don't print the nested stmts! ppr_stmt (TransStmt { trS_by = by, trS_using = using , trS_form = form }) = pprTransStmt by using form ppr_stmt stmt = pprStmt stmt pprMatchContext :: Outputable fn => HsMatchContext fn -> SDoc pprMatchContext ctxt | want_an ctxt = text "an" <+> pprMatchContextNoun ctxt | otherwise = text "a" <+> pprMatchContextNoun ctxt where want_an (FunRhs {}) = True -- Use "an" in front want_an (ArrowMatchCtxt ProcExpr) = True want_an (ArrowMatchCtxt (ArrowLamAlt LamSingle)) = True want_an LazyPatCtx = True want_an _ = False pprMatchContextNoun :: Outputable fn => HsMatchContext fn -> SDoc pprMatchContextNoun (FunRhs {mc_fun=fun}) = text "equation for" <+> quotes (ppr fun) pprMatchContextNoun CaseAlt = text "case alternative" pprMatchContextNoun (LamAlt LamSingle) = text "lambda abstraction" pprMatchContextNoun (LamAlt lam_variant) = lamCaseKeyword lam_variant <+> text "alternative" pprMatchContextNoun IfAlt = text "multi-way if alternative" pprMatchContextNoun RecUpd = text "record update" pprMatchContextNoun ThPatSplice = text "Template Haskell pattern splice" pprMatchContextNoun ThPatQuote = text "Template Haskell pattern quotation" pprMatchContextNoun PatBindRhs = text "pattern binding" pprMatchContextNoun PatBindGuards = text "pattern binding guards" pprMatchContextNoun (ArrowMatchCtxt c) = pprArrowMatchContextNoun c pprMatchContextNoun (StmtCtxt ctxt) = text "pattern binding in" $$ pprAStmtContext ctxt pprMatchContextNoun PatSyn = text "pattern synonym declaration" pprMatchContextNoun LazyPatCtx = text "irrefutable pattern" pprMatchContextNouns :: Outputable fn => HsMatchContext fn -> SDoc pprMatchContextNouns (FunRhs {mc_fun=fun}) = text "equations for" <+> quotes (ppr fun) pprMatchContextNouns PatBindGuards = text "pattern binding guards" pprMatchContextNouns (ArrowMatchCtxt c) = pprArrowMatchContextNouns c pprMatchContextNouns (StmtCtxt ctxt) = text "pattern bindings in" $$ pprAStmtContext ctxt pprMatchContextNouns ctxt = pprMatchContextNoun ctxt <> char 's' pprArrowMatchContextNoun :: HsArrowMatchContext -> SDoc pprArrowMatchContextNoun ProcExpr = text "arrow proc pattern" pprArrowMatchContextNoun ArrowCaseAlt = text "case alternative within arrow notation" pprArrowMatchContextNoun (ArrowLamAlt LamSingle) = text "arrow kappa abstraction" pprArrowMatchContextNoun (ArrowLamAlt lam_variant) = lamCaseKeyword lam_variant <+> text "alternative within arrow notation" pprArrowMatchContextNouns :: HsArrowMatchContext -> SDoc pprArrowMatchContextNouns ArrowCaseAlt = text "case alternatives within arrow notation" pprArrowMatchContextNouns (ArrowLamAlt LamSingle) = text "arrow kappa abstractions" pprArrowMatchContextNouns (ArrowLamAlt lam_variant) = lamCaseKeyword lam_variant <+> text "alternatives within arrow notation" pprArrowMatchContextNouns ctxt = pprArrowMatchContextNoun ctxt <> char 's' ----------------- pprAStmtContext, pprStmtContext :: Outputable fn => HsStmtContext fn -> SDoc pprAStmtContext (HsDoStmt flavour) = pprAHsDoFlavour flavour pprAStmtContext ctxt = text "a" <+> pprStmtContext ctxt ----------------- pprStmtContext (HsDoStmt flavour) = pprHsDoFlavour flavour pprStmtContext (PatGuard ctxt) = text "pattern guard for" $$ pprMatchContext ctxt pprStmtContext ArrowExpr = text "'do' block in an arrow command" -- Drop the inner contexts when reporting errors, else we get -- Unexpected transform statement -- in a transformed branch of -- transformed branch of -- transformed branch of monad comprehension pprStmtContext (ParStmtCtxt c) = ifPprDebug (sep [text "parallel branch of", pprAStmtContext c]) (pprStmtContext c) pprStmtContext (TransStmtCtxt c) = ifPprDebug (sep [text "transformed branch of", pprAStmtContext c]) (pprStmtContext c) pprStmtCat :: forall p body . IsPass p => Stmt (GhcPass p) body -> SDoc pprStmtCat (TransStmt {}) = text "transform" pprStmtCat (LastStmt {}) = text "return expression" pprStmtCat (BodyStmt {}) = text "body" pprStmtCat (BindStmt {}) = text "binding" pprStmtCat (LetStmt {}) = text "let" pprStmtCat (RecStmt {}) = text "rec" pprStmtCat (ParStmt {}) = text "parallel" pprStmtCat (XStmtLR _) = text "applicative" pprAHsDoFlavour, pprHsDoFlavour :: HsDoFlavour -> SDoc pprAHsDoFlavour flavour = article <+> pprHsDoFlavour flavour where pp_an = text "an" pp_a = text "a" article = case flavour of MDoExpr Nothing -> pp_an GhciStmtCtxt -> pp_an _ -> pp_a pprHsDoFlavour (DoExpr m) = prependQualified m (text "'do' block") pprHsDoFlavour (MDoExpr m) = prependQualified m (text "'mdo' block") pprHsDoFlavour ListComp = text "list comprehension" pprHsDoFlavour MonadComp = text "monad comprehension" pprHsDoFlavour GhciStmtCtxt = text "interactive GHCi command" prependQualified :: Maybe ModuleName -> SDoc -> SDoc prependQualified Nothing t = t prependQualified (Just _) t = text "qualified" <+> t {- ************************************************************************ * * FieldLabelStrings * * ************************************************************************ -} instance (UnXRec p, Outputable (XRec p FieldLabelString)) => Outputable (FieldLabelStrings p) where ppr (FieldLabelStrings flds) = hcat (punctuate dot (map (ppr . unXRec @p) flds)) instance (UnXRec p, Outputable (XRec p FieldLabelString)) => OutputableBndr (FieldLabelStrings p) where pprInfixOcc = pprFieldLabelStrings pprPrefixOcc = pprFieldLabelStrings instance (UnXRec p, Outputable (XRec p FieldLabelString)) => OutputableBndr (Located (FieldLabelStrings p)) where pprInfixOcc = pprInfixOcc . unLoc pprPrefixOcc = pprInfixOcc . unLoc pprFieldLabelStrings :: forall p. (UnXRec p, Outputable (XRec p FieldLabelString)) => FieldLabelStrings p -> SDoc pprFieldLabelStrings (FieldLabelStrings flds) = hcat (punctuate dot (map (ppr . unXRec @p) flds)) pprPrefixFastString :: FastString -> SDoc pprPrefixFastString fs = pprPrefixOcc (mkVarUnqual fs) instance UnXRec p => Outputable (DotFieldOcc p) where ppr (DotFieldOcc _ s) = (pprPrefixFastString . field_label . unXRec @p) s ppr XDotFieldOcc{} = text "XDotFieldOcc" {- ************************************************************************ * * \subsection{Anno instances} * * ************************************************************************ -} type instance Anno (HsExpr (GhcPass p)) = SrcSpanAnnA type instance Anno [LocatedA (HsExpr (GhcPass p))] = SrcSpanAnnC type instance Anno [LocatedA (StmtLR (GhcPass pl) (GhcPass pr) (LocatedA (HsExpr (GhcPass pr))))] = SrcSpanAnnLW type instance Anno [LocatedA (StmtLR (GhcPass pl) (GhcPass pr) (LocatedA (HsCmd (GhcPass pr))))] = SrcSpanAnnLW type instance Anno (HsCmd (GhcPass p)) = SrcSpanAnnA type instance Anno (HsCmdTop (GhcPass p)) = EpAnnCO type instance Anno [LocatedA (Match (GhcPass p) (LocatedA (HsExpr (GhcPass p))))] = SrcSpanAnnLW type instance Anno [LocatedA (Match (GhcPass p) (LocatedA (HsCmd (GhcPass p))))] = SrcSpanAnnLW type instance Anno (Match (GhcPass p) (LocatedA (HsExpr (GhcPass p)))) = SrcSpanAnnA type instance Anno (Match (GhcPass p) (LocatedA (HsCmd (GhcPass p)))) = SrcSpanAnnA type instance Anno [LocatedA (Pat (GhcPass p))] = EpaLocation type instance Anno (GRHS (GhcPass p) (LocatedA (HsExpr (GhcPass p)))) = EpAnnCO type instance Anno (GRHS (GhcPass p) (LocatedA (HsCmd (GhcPass p)))) = EpAnnCO type instance Anno (StmtLR (GhcPass pl) (GhcPass pr) (LocatedA (body (GhcPass pr)))) = SrcSpanAnnA type instance Anno (HsUntypedSplice (GhcPass p)) = SrcSpanAnnA type instance Anno [LocatedA (StmtLR (GhcPass pl) (GhcPass pr) (LocatedA (body (GhcPass pr))))] = SrcSpanAnnLW type instance Anno (FieldLabelStrings (GhcPass p)) = EpAnnCO type instance Anno FieldLabelString = SrcSpanAnnN type instance Anno FastString = EpAnnCO -- Used in HsQuasiQuote and perhaps elsewhere type instance Anno (DotFieldOcc (GhcPass p)) = EpAnnCO instance (HasAnnotation (Anno a)) => WrapXRec (GhcPass p) a where wrapXRec = noLocA ghc-lib-parser-9.12.2.20250421/compiler/GHC/Hs/Expr.hs-boot0000644000000000000000000000346607346545000020504 0ustar0000000000000000{-# LANGUAGE RoleAnnotations #-} {-# LANGUAGE KindSignatures #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE UndecidableInstances #-} -- Wrinkle in Note [Trees That Grow] -- in module Language.Haskell.Syntax.Extension {-# OPTIONS_GHC -Wno-orphans #-} -- Outputable module GHC.Hs.Expr where import GHC.Utils.Outputable ( SDoc, Outputable ) import Language.Haskell.Syntax.Pat ( LPat ) import {-# SOURCE #-} GHC.Hs.Pat () -- for Outputable import Language.Haskell.Syntax.Expr ( HsExpr, LHsExpr , HsCmd , MatchGroup , GRHSs , HsUntypedSplice ) import GHC.Hs.Extension ( OutputableBndrId, GhcPass ) import GHC.Types.Name ( Name ) import Data.Bool ( Bool ) import Data.Maybe ( Maybe ) type SplicePointName = Name instance (OutputableBndrId p) => Outputable (HsExpr (GhcPass p)) instance (OutputableBndrId p) => Outputable (HsCmd (GhcPass p)) pprLExpr :: (OutputableBndrId p) => LHsExpr (GhcPass p) -> SDoc pprExpr :: (OutputableBndrId p) => HsExpr (GhcPass p) -> SDoc pprTypedSplice :: (OutputableBndrId p) => Maybe SplicePointName -> LHsExpr (GhcPass p) -> SDoc pprUntypedSplice :: (OutputableBndrId p) => Bool -> Maybe SplicePointName -> HsUntypedSplice (GhcPass p) -> SDoc pprPatBind :: forall bndr p . (OutputableBndrId bndr, OutputableBndrId p) => LPat (GhcPass bndr) -> GRHSs (GhcPass p) (LHsExpr (GhcPass p)) -> SDoc pprFunBind :: (OutputableBndrId idR) => MatchGroup (GhcPass idR) (LHsExpr (GhcPass idR)) -> SDoc data ThModFinalizers type role HsUntypedSpliceResult representational data HsUntypedSpliceResult thing = HsUntypedSpliceTop { utsplice_result_finalizers :: ThModFinalizers , utsplice_result :: thing } | HsUntypedSpliceNested SplicePointName ghc-lib-parser-9.12.2.20250421/compiler/GHC/Hs/Extension.hs0000644000000000000000000002220407346545000020570 0ustar0000000000000000{-# LANGUAGE AllowAmbiguousTypes #-} -- for pprIfTc, etc. {-# LANGUAGE ConstraintKinds #-} {-# LANGUAGE DataKinds #-} {-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE EmptyDataDeriving #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE GADTs #-} {-# LANGUAGE MultiParamTypeClasses #-} {-# LANGUAGE RankNTypes #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE TypeApplications #-} {-# LANGUAGE TypeFamilyDependencies #-} {-# LANGUAGE StandaloneDeriving #-} {-# LANGUAGE UndecidableSuperClasses #-} -- for IsPass; see Note [NoGhcTc] {-# LANGUAGE UndecidableInstances #-} -- Wrinkle in Note [Trees That Grow] -- in module Language.Haskell.Syntax.Extension {-# OPTIONS_GHC -Wno-orphans #-} -- Outputable module GHC.Hs.Extension where -- This module captures the type families to precisely identify the extension -- points for GHC.Hs syntax import GHC.Prelude import Data.Data hiding ( Fixity ) import Language.Haskell.Syntax.Extension import GHC.Types.Name import GHC.Types.Name.Reader import GHC.Types.Var import GHC.Utils.Outputable hiding ((<>)) import GHC.Types.SrcLoc (GenLocated(..), unLoc) import GHC.Utils.Panic import GHC.Parser.Annotation {- Note [IsPass] ~~~~~~~~~~~~~ One challenge with the Trees That Grow approach is that we sometimes have different information in different passes. For example, we have type instance XViaStrategy GhcPs = LHsSigType GhcPs type instance XViaStrategy GhcRn = LHsSigType GhcRn type instance XViaStrategy GhcTc = Type This means that printing a DerivStrategy (which contains an XViaStrategy) might need to print a LHsSigType, or it might need to print a type. Yet we want one Outputable instance for a DerivStrategy, instead of one per pass. We could have a large constraint, including e.g. (Outputable (XViaStrategy p), Outputable (XViaStrategy GhcTc)), and pass that around in every context where we might output a DerivStrategy. But a simpler alternative is to pass a witness to whichever pass we're in. When we pattern-match on that (GADT) witness, we learn the pass identity and can then print away. To wit, we get the definition of GhcPass and the functions isPass. These allow us to do away with big constraints, passing around all manner of dictionaries we might or might not use. It does mean that we have to manually use isPass when printing, but these places are few. See Note [NoGhcTc] about the superclass constraint to IsPass. Note [NoGhcTc] ~~~~~~~~~~~~~~ An expression is parsed into HsExpr GhcPs, renamed into HsExpr GhcRn, and then type-checked into HsExpr GhcTc. Not so for types! These get parsed into HsType GhcPs, renamed into HsType GhcRn, and then type-checked into Type. We never build an HsType GhcTc. Why do this? Because we need to be able to compare type-checked types for equality, and we don't want to do this with HsType. This causes wrinkles within the AST, where we normally think that the whole AST travels through the GhcPs --> GhcRn --> GhcTc pipeline as one. So we have the NoGhcTc type family, which just replaces GhcTc with GhcRn, so that user-written types can be preserved (as HsType GhcRn) even in e.g. HsExpr GhcTc. For example, this is used in ExprWithTySig: | ExprWithTySig (XExprWithTySig p) (LHsExpr p) (LHsSigWcType (NoGhcTc p)) If we have (e :: ty), we still want to be able to print that (with the :: ty) after type-checking. So we retain the LHsSigWcType GhcRn, even in an HsExpr GhcTc. That's what NoGhcTc does. When we're printing the type annotation, we need to know (Outputable (LHsSigWcType GhcRn)), even though we've assumed only that (OutputableBndrId GhcTc). We thus must be able to prove OutputableBndrId (NoGhcTc p) from OutputableBndrId p. The extra constraints in OutputableBndrId and the superclass constraints of IsPass allow this. Note that the superclass constraint of IsPass is *recursive*: it asserts that IsPass (NoGhcTcPass p) holds. For this to make sense, we need -XUndecidableSuperClasses and the other constraint, saying that NoGhcTcPass is idempotent. -} -- See Note [XRec and Anno in the AST] in GHC.Parser.Annotation type instance XRec (GhcPass p) a = GenLocated (Anno a) a type instance Anno RdrName = SrcSpanAnnN type instance Anno Name = SrcSpanAnnN type instance Anno Id = SrcSpanAnnN type IsSrcSpanAnn p a = ( Anno (IdGhcP p) ~ EpAnn a, NoAnn a, IsPass p) instance UnXRec (GhcPass p) where unXRec = unLoc instance MapXRec (GhcPass p) where mapXRec = fmap -- instance WrapXRec (GhcPass p) a where -- wrapXRec = noLocA {- Note [DataConCantHappen and strict fields] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Currently, any unused TTG extension constructor will generally look like the following: type instance XXHsDecl (GhcPass _) = DataConCantHappen data HsDecl p = ... | XHsDecl !(XXHsDecl p) The field of type `XXHsDecl p` is strict for a good reason: it allows the pattern-match coverage checker to conclude that any matches against XHsDecl are unreachable whenever `p ~ GhcPass _`. To see why this is the case, consider the following function which consumes an HsDecl: ex :: HsDecl GhcPs -> HsDecl GhcRn ... ex (XHsDecl nec) = dataConCantHappen nec Because `p` equals GhcPs (i.e., GhcPass 'Parsed), XHsDecl's field has the type DataConCantHappen. But since (1) the field is strict and (2) DataConCantHappen is an empty data type, there is no possible way to reach the right-hand side of the XHsDecl case. As a result, the coverage checker concludes that the XHsDecl case is inaccessible, so it can be removed. (See Note [Strict argument type constraints] in GHC.HsToCore.Pmc.Solver for more on how this works.) Bottom line: if you add a TTG extension constructor that uses DataConCantHappen, make sure that any uses of it as a field are strict. -} -- | Used as a data type index for the hsSyn AST; also serves -- as a singleton type for Pass data GhcPass (c :: Pass) where GhcPs :: GhcPass 'Parsed GhcRn :: GhcPass 'Renamed GhcTc :: GhcPass 'Typechecked -- This really should never be entered, but the data-deriving machinery -- needs the instance to exist. instance Typeable p => Data (GhcPass p) where gunfold _ _ _ = panic "instance Data GhcPass" toConstr _ = panic "instance Data GhcPass" dataTypeOf _ = panic "instance Data GhcPass" data Pass = Parsed | Renamed | Typechecked deriving (Data) -- Type synonyms as a shorthand for tagging type GhcPs = GhcPass 'Parsed -- Output of parser type GhcRn = GhcPass 'Renamed -- Output of renamer type GhcTc = GhcPass 'Typechecked -- Output of typechecker -- | Allows us to check what phase we're in at GHC's runtime. -- For example, this class allows us to write -- -- @ -- f :: forall p. IsPass p => HsExpr (GhcPass p) -> blah -- f e = case ghcPass @p of -- GhcPs -> ... in this RHS we have HsExpr GhcPs... -- GhcRn -> ... in this RHS we have HsExpr GhcRn... -- GhcTc -> ... in this RHS we have HsExpr GhcTc... -- @ -- -- which is very useful, for example, when pretty-printing. -- See Note [IsPass]. class ( NoGhcTcPass (NoGhcTcPass p) ~ NoGhcTcPass p , IsPass (NoGhcTcPass p) ) => IsPass p where ghcPass :: GhcPass p instance IsPass 'Parsed where ghcPass = GhcPs instance IsPass 'Renamed where ghcPass = GhcRn instance IsPass 'Typechecked where ghcPass = GhcTc type instance IdP (GhcPass p) = IdGhcP p -- | Maps the "normal" id type for a given GHC pass type family IdGhcP pass where IdGhcP 'Parsed = RdrName IdGhcP 'Renamed = Name IdGhcP 'Typechecked = Id -- | Marks that a field uses the GhcRn variant even when the pass -- parameter is GhcTc. Useful for storing HsTypes in GHC.Hs.Exprs, say, because -- HsType GhcTc should never occur. -- See Note [NoGhcTc] -- Breaking it up this way, GHC can figure out that the result is a GhcPass type instance NoGhcTc (GhcPass pass) = GhcPass (NoGhcTcPass pass) type family NoGhcTcPass (p :: Pass) :: Pass where NoGhcTcPass 'Typechecked = 'Renamed NoGhcTcPass other = other -- |Constraint type to bundle up the requirement for 'OutputableBndr' on both -- the @id@ and the 'NoGhcTc' of it. See Note [NoGhcTc]. type OutputableBndrId pass = ( OutputableBndr (IdGhcP pass) , OutputableBndr (IdGhcP (NoGhcTcPass pass)) , Outputable (GenLocated (Anno (IdGhcP pass)) (IdGhcP pass)) , Outputable (GenLocated (Anno (IdGhcP (NoGhcTcPass pass))) (IdGhcP (NoGhcTcPass pass))) , IsPass pass ) -- useful helper functions: pprIfPs :: forall p. IsPass p => (p ~ 'Parsed => SDoc) -> SDoc pprIfPs pp = case ghcPass @p of GhcPs -> pp _ -> empty pprIfRn :: forall p. IsPass p => (p ~ 'Renamed => SDoc) -> SDoc pprIfRn pp = case ghcPass @p of GhcRn -> pp _ -> empty pprIfTc :: forall p. IsPass p => (p ~ 'Typechecked => SDoc) -> SDoc pprIfTc pp = case ghcPass @p of GhcTc -> pp _ -> empty --- Outputable instance Outputable NoExtField where ppr _ = text "NoExtField" instance Outputable DataConCantHappen where ppr = dataConCantHappen ghc-lib-parser-9.12.2.20250421/compiler/GHC/Hs/ImpExp.hs0000644000000000000000000003753307346545000020031 0ustar0000000000000000{-# OPTIONS_GHC -Wno-orphans #-} -- Outputable and IEWrappedName {-# LANGUAGE DataKinds #-} {-# LANGUAGE StandaloneDeriving #-} {-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE TypeApplications #-} {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE UndecidableInstances #-} -- Wrinkle in Note [Trees That Grow] -- in module Language.Haskell.Syntax.Extension {- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 GHC.Hs.ImpExp: Abstract syntax: imports, exports, interfaces -} module GHC.Hs.ImpExp ( module Language.Haskell.Syntax.ImpExp , module GHC.Hs.ImpExp ) where import Language.Haskell.Syntax.Extension import Language.Haskell.Syntax.Module.Name import Language.Haskell.Syntax.ImpExp import GHC.Prelude import GHC.Types.SourceText ( SourceText(..) ) import GHC.Types.SrcLoc import GHC.Types.Name import GHC.Types.PkgQual import GHC.Parser.Annotation import GHC.Hs.Extension import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Unit.Module.Warnings import Data.Data import Data.Maybe import GHC.Hs.Doc (LHsDoc) {- ************************************************************************ * * Import and export declaration lists * * ************************************************************************ One per import declaration in a module. -} type instance Anno (ImportDecl (GhcPass p)) = SrcSpanAnnA -- | Given two possible located 'qualified' tokens, compute a style -- (in a conforming Haskell program only one of the two can be not -- 'Nothing'). This is called from "GHC.Parser". importDeclQualifiedStyle :: Maybe (EpToken "qualified") -> Maybe (EpToken "qualified") -> (Maybe (EpToken "qualified"), ImportDeclQualifiedStyle) importDeclQualifiedStyle mPre mPost = if isJust mPre then (mPre, QualifiedPre) else if isJust mPost then (mPost,QualifiedPost) else (Nothing, NotQualified) -- | Convenience function to answer the question if an import decl. is -- qualified. isImportDeclQualified :: ImportDeclQualifiedStyle -> Bool isImportDeclQualified NotQualified = False isImportDeclQualified _ = True type instance ImportDeclPkgQual GhcPs = RawPkgQual type instance ImportDeclPkgQual GhcRn = PkgQual type instance ImportDeclPkgQual GhcTc = PkgQual type instance XCImportDecl GhcPs = XImportDeclPass type instance XCImportDecl GhcRn = XImportDeclPass type instance XCImportDecl GhcTc = DataConCantHappen data XImportDeclPass = XImportDeclPass { ideclAnn :: EpAnn EpAnnImportDecl , ideclSourceText :: SourceText -- Note [Pragma source text] in "GHC.Types.SourceText" , ideclImplicit :: Bool -- ^ GHC generates an `ImportDecl` to represent the invisible `import Prelude` -- that appears in any file that omits `import Prelude`, setting -- this field to indicate that the import doesn't appear in the -- original source. True => implicit import (of Prelude) } deriving (Data) type instance XXImportDecl (GhcPass _) = DataConCantHappen type instance Anno ModuleName = SrcSpanAnnA type instance Anno [LocatedA (IE (GhcPass p))] = SrcSpanAnnLI deriving instance Data (IEWrappedName GhcPs) deriving instance Data (IEWrappedName GhcRn) deriving instance Data (IEWrappedName GhcTc) deriving instance Eq (IEWrappedName GhcPs) deriving instance Eq (IEWrappedName GhcRn) deriving instance Eq (IEWrappedName GhcTc) -- --------------------------------------------------------------------- -- API Annotations types data EpAnnImportDecl = EpAnnImportDecl { importDeclAnnImport :: EpToken "import" -- ^ The location of the @import@ keyword , importDeclAnnPragma :: Maybe (EpaLocation, EpToken "#-}") -- ^ The locations of @{-# SOURCE@ and @#-}@ respectively , importDeclAnnSafe :: Maybe (EpToken "safe") -- ^ The location of the @safe@ keyword , importDeclAnnQualified :: Maybe (EpToken "qualified") -- ^ The location of the @qualified@ keyword , importDeclAnnPackage :: Maybe EpaLocation -- ^ The location of the package name (when using @-XPackageImports@) , importDeclAnnAs :: Maybe (EpToken "as") -- ^ The location of the @as@ keyword } deriving (Data) instance NoAnn EpAnnImportDecl where noAnn = EpAnnImportDecl noAnn Nothing Nothing Nothing Nothing Nothing -- --------------------------------------------------------------------- simpleImportDecl :: ModuleName -> ImportDecl GhcPs simpleImportDecl mn = ImportDecl { ideclExt = XImportDeclPass noAnn NoSourceText False, ideclName = noLocA mn, ideclPkgQual = NoRawPkgQual, ideclSource = NotBoot, ideclSafe = False, ideclQualified = NotQualified, ideclAs = Nothing, ideclImportList = Nothing } instance (OutputableBndrId p , Outputable (Anno (IE (GhcPass p))) , Outputable (ImportDeclPkgQual (GhcPass p))) => Outputable (ImportDecl (GhcPass p)) where ppr (ImportDecl { ideclExt = impExt, ideclName = mod' , ideclPkgQual = pkg , ideclSource = from, ideclSafe = safe , ideclQualified = qual , ideclAs = as, ideclImportList = spec }) = hang (hsep [text "import", ppr_imp impExt from, pp_implicit impExt, pp_safe safe, pp_qual qual False, ppr pkg, ppr mod', pp_qual qual True, pp_as as]) 4 (pp_spec spec) where pp_implicit ext = let implicit = case ghcPass @p of GhcPs | XImportDeclPass { ideclImplicit = implicit } <- ext -> implicit GhcRn | XImportDeclPass { ideclImplicit = implicit } <- ext -> implicit GhcTc -> dataConCantHappen ext in if implicit then text "(implicit)" else empty pp_qual QualifiedPre False = text "qualified" -- Prepositive qualifier/prepositive position. pp_qual QualifiedPost True = text "qualified" -- Postpositive qualifier/postpositive position. pp_qual QualifiedPre True = empty -- Prepositive qualifier/postpositive position. pp_qual QualifiedPost False = empty -- Postpositive qualifier/prepositive position. pp_qual NotQualified _ = empty pp_safe False = empty pp_safe True = text "safe" pp_as Nothing = empty pp_as (Just a) = text "as" <+> ppr a ppr_imp ext IsBoot = let mSrcText = case ghcPass @p of GhcPs | XImportDeclPass { ideclSourceText = mst } <- ext -> mst GhcRn | XImportDeclPass { ideclSourceText = mst } <- ext -> mst GhcTc -> dataConCantHappen ext in case mSrcText of NoSourceText -> text "{-# SOURCE #-}" SourceText src -> ftext src <+> text "#-}" ppr_imp _ NotBoot = empty pp_spec Nothing = empty pp_spec (Just (Exactly, (L _ ies))) = ppr_ies ies pp_spec (Just (EverythingBut, (L _ ies))) = text "hiding" <+> ppr_ies ies ppr_ies [] = text "()" ppr_ies ies = char '(' <+> interpp'SP ies <+> char ')' {- ************************************************************************ * * \subsection{Imported and exported entities} * * ************************************************************************ -} type instance XIEName (GhcPass _) = NoExtField type instance XIEDefault (GhcPass _) = EpToken "default" type instance XIEPattern (GhcPass _) = EpToken "pattern" type instance XIEType (GhcPass _) = EpToken "type" type instance XXIEWrappedName (GhcPass _) = DataConCantHappen type instance Anno (IEWrappedName (GhcPass _)) = SrcSpanAnnA type instance Anno (IE (GhcPass p)) = SrcSpanAnnA -- The additional field of type 'Maybe (WarningTxt pass)' holds information -- about export deprecation annotations and is thus set to Nothing when `IE` -- is used in an import list (since export deprecation can only be used in exports) type instance XIEVar GhcPs = Maybe (LWarningTxt GhcPs) type instance XIEVar GhcRn = Maybe (LWarningTxt GhcRn) type instance XIEVar GhcTc = NoExtField -- The additional field of type 'Maybe (WarningTxt pass)' holds information -- about export deprecation annotations and is thus set to Nothing when `IE` -- is used in an import list (since export deprecation can only be used in exports) type instance XIEThingAbs GhcPs = Maybe (LWarningTxt GhcPs) type instance XIEThingAbs GhcRn = Maybe (LWarningTxt GhcRn) type instance XIEThingAbs GhcTc = () -- The additional field of type 'Maybe (WarningTxt pass)' holds information -- about export deprecation annotations and is thus set to Nothing when `IE` -- is used in an import list (since export deprecation can only be used in exports) type instance XIEThingAll GhcPs = (Maybe (LWarningTxt GhcPs), (EpToken "(", EpToken "..", EpToken ")")) type instance XIEThingAll GhcRn = (Maybe (LWarningTxt GhcRn), (EpToken "(", EpToken "..", EpToken ")")) type instance XIEThingAll GhcTc = (EpToken "(", EpToken "..", EpToken ")") -- The additional field of type 'Maybe (WarningTxt pass)' holds information -- about export deprecation annotations and is thus set to Nothing when `IE` -- is used in an import list (since export deprecation can only be used in exports) type instance XIEThingWith GhcPs = (Maybe (LWarningTxt GhcPs), IEThingWithAnns) type instance XIEThingWith GhcRn = (Maybe (LWarningTxt GhcRn), IEThingWithAnns) type instance XIEThingWith GhcTc = IEThingWithAnns type IEThingWithAnns = (EpToken "(", EpToken "..", EpToken ",", EpToken ")") -- The additional field of type 'Maybe (WarningTxt pass)' holds information -- about export deprecation annotations and is thus set to Nothing when `IE` -- is used in an import list (since export deprecation can only be used in exports) type instance XIEModuleContents GhcPs = (Maybe (LWarningTxt GhcPs), EpToken "module") type instance XIEModuleContents GhcRn = Maybe (LWarningTxt GhcRn) type instance XIEModuleContents GhcTc = NoExtField type instance XIEGroup (GhcPass _) = NoExtField type instance XIEDoc (GhcPass _) = NoExtField type instance XIEDocNamed (GhcPass _) = NoExtField type instance XXIE (GhcPass _) = DataConCantHappen type instance Anno (LocatedA (IE (GhcPass p))) = SrcSpanAnnA ieName :: IE (GhcPass p) -> IdP (GhcPass p) ieName (IEVar _ (L _ n) _) = ieWrappedName n ieName (IEThingAbs _ (L _ n) _) = ieWrappedName n ieName (IEThingWith _ (L _ n) _ _ _) = ieWrappedName n ieName (IEThingAll _ (L _ n) _) = ieWrappedName n ieName _ = panic "ieName failed pattern match!" ieNames :: IE (GhcPass p) -> [IdP (GhcPass p)] ieNames (IEVar _ (L _ n) _) = [ieWrappedName n] ieNames (IEThingAbs _ (L _ n) _) = [ieWrappedName n] ieNames (IEThingAll _ (L _ n) _) = [ieWrappedName n] ieNames (IEThingWith _ (L _ n) _ ns _) = ieWrappedName n : map (ieWrappedName . unLoc) ns -- NB the above case does not include names of field selectors ieNames (IEModuleContents {}) = [] ieNames (IEGroup {}) = [] ieNames (IEDoc {}) = [] ieNames (IEDocNamed {}) = [] ieDeprecation :: forall p. IsPass p => IE (GhcPass p) -> Maybe (WarningTxt (GhcPass p)) ieDeprecation = fmap unLoc . ie_deprecation (ghcPass @p) where ie_deprecation :: GhcPass p -> IE (GhcPass p) -> Maybe (LWarningTxt (GhcPass p)) ie_deprecation GhcPs (IEVar xie _ _) = xie ie_deprecation GhcPs (IEThingAbs xie _ _) = xie ie_deprecation GhcPs (IEThingAll (xie, _) _ _) = xie ie_deprecation GhcPs (IEThingWith (xie, _) _ _ _ _) = xie ie_deprecation GhcPs (IEModuleContents (xie, _) _) = xie ie_deprecation GhcRn (IEVar xie _ _) = xie ie_deprecation GhcRn (IEThingAbs xie _ _) = xie ie_deprecation GhcRn (IEThingAll (xie, _) _ _) = xie ie_deprecation GhcRn (IEThingWith (xie, _) _ _ _ _) = xie ie_deprecation GhcRn (IEModuleContents xie _) = xie ie_deprecation _ _ = Nothing ieWrappedLName :: IEWrappedName (GhcPass p) -> LIdP (GhcPass p) ieWrappedLName (IEDefault _ (L l n)) = L l n ieWrappedLName (IEName _ (L l n)) = L l n ieWrappedLName (IEPattern _ (L l n)) = L l n ieWrappedLName (IEType _ (L l n)) = L l n ieWrappedName :: IEWrappedName (GhcPass p) -> IdP (GhcPass p) ieWrappedName = unLoc . ieWrappedLName lieWrappedName :: LIEWrappedName (GhcPass p) -> IdP (GhcPass p) lieWrappedName (L _ n) = ieWrappedName n ieLWrappedName :: LIEWrappedName (GhcPass p) -> LIdP (GhcPass p) ieLWrappedName (L _ n) = ieWrappedLName n replaceWrappedName :: IEWrappedName GhcPs -> IdP GhcRn -> IEWrappedName GhcRn replaceWrappedName (IEDefault r (L l _)) n = IEDefault r (L l n) replaceWrappedName (IEName x (L l _)) n = IEName x (L l n) replaceWrappedName (IEPattern r (L l _)) n = IEPattern r (L l n) replaceWrappedName (IEType r (L l _)) n = IEType r (L l n) replaceLWrappedName :: LIEWrappedName GhcPs -> IdP GhcRn -> LIEWrappedName GhcRn replaceLWrappedName (L l n) n' = L l (replaceWrappedName n n') exportDocstring :: LHsDoc pass -> SDoc exportDocstring doc = braces (text "docstring: " <> ppr doc) instance OutputableBndrId p => Outputable (IE (GhcPass p)) where ppr ie@(IEVar _ var doc) = sep $ catMaybes [ ppr <$> ieDeprecation ie , Just $ ppr (unLoc var) , exportDocstring <$> doc ] ppr ie@(IEThingAbs _ thing doc) = sep $ catMaybes [ ppr <$> ieDeprecation ie , Just $ ppr (unLoc thing) , exportDocstring <$> doc ] ppr ie@(IEThingAll _ thing doc) = sep $ catMaybes [ ppr <$> ieDeprecation ie , Just $ hcat [ppr (unLoc thing) , text "(..)"] , exportDocstring <$> doc ] ppr ie@(IEThingWith _ thing wc withs doc) = sep $ catMaybes [ ppr <$> ieDeprecation ie , Just $ ppr (unLoc thing) <> parens (fsep (punctuate comma ppWiths)) , exportDocstring <$> doc ] where ppWiths = case wc of NoIEWildcard -> map (ppr . unLoc) withs IEWildcard pos -> let (bs, as) = splitAt pos (map (ppr . unLoc) withs) in bs ++ [text ".."] ++ as ppr ie@(IEModuleContents _ mod') = sep $ catMaybes [ppr <$> ieDeprecation ie, Just $ text "module" <+> ppr mod'] ppr (IEGroup _ n _) = text ("") ppr (IEDoc _ doc) = ppr doc ppr (IEDocNamed _ string) = text ("") instance (HasOccName (IdP (GhcPass p)), OutputableBndrId p) => HasOccName (IEWrappedName (GhcPass p)) where occName w = occName (ieWrappedName w) instance OutputableBndrId p => OutputableBndr (IEWrappedName (GhcPass p)) where pprBndr bs w = pprBndr bs (ieWrappedName w) pprPrefixOcc w = pprPrefixOcc (ieWrappedName w) pprInfixOcc w = pprInfixOcc (ieWrappedName w) instance OutputableBndrId p => Outputable (IEWrappedName (GhcPass p)) where ppr (IEDefault _ (L _ n)) = text "default" <+> pprPrefixOcc n ppr (IEName _ (L _ n)) = pprPrefixOcc n ppr (IEPattern _ (L _ n)) = text "pattern" <+> pprPrefixOcc n ppr (IEType _ (L _ n)) = text "type" <+> pprPrefixOcc n pprImpExp :: (HasOccName name, OutputableBndr name) => name -> SDoc pprImpExp name = type_pref <+> pprPrefixOcc name where occ = occName name type_pref | isTcOcc occ && isSymOcc occ = text "type" | otherwise = empty ghc-lib-parser-9.12.2.20250421/compiler/GHC/Hs/Instances.hs0000644000000000000000000005524307346545000020554 0ustar0000000000000000{-# LANGUAGE TypeFamilies #-} {-# LANGUAGE StandaloneDeriving #-} {-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE UndecidableInstances #-} {-# OPTIONS_GHC -fno-warn-orphans #-} -- This module contains exclusively Data instances, which are going to be slow -- no matter what we do. Furthermore, they are incredibly slow to compile with -- optimisation (see #9557). Consequently we compile this with -O0. -- See #18254. {-# OPTIONS_GHC -O0 #-} module GHC.Hs.Instances where -- This module defines the Data instances for the hsSyn AST. -- It happens here to avoid massive constraint types on the AST with concomitant -- slow GHC bootstrap times. -- UndecidableInstances ? import Data.Data hiding ( Fixity ) import GHC.Prelude import GHC.Hs.Extension import GHC.Hs.Binds import GHC.Hs.Decls import GHC.Hs.Expr import GHC.Hs.Lit import GHC.Hs.Type import GHC.Hs.Pat import GHC.Hs.ImpExp import GHC.Parser.Annotation -- --------------------------------------------------------------------- -- Data derivations from GHC.Hs----------------------------------------- -- --------------------------------------------------------------------- -- Data derivations from GHC.Hs.Binds ---------------------------------- -- deriving instance (DataIdLR pL pR) => Data (HsLocalBindsLR pL pR) deriving instance Data (HsLocalBindsLR GhcPs GhcPs) deriving instance Data (HsLocalBindsLR GhcPs GhcRn) deriving instance Data (HsLocalBindsLR GhcRn GhcRn) deriving instance Data (HsLocalBindsLR GhcTc GhcTc) -- deriving instance (DataIdLR pL pR) => Data (HsValBindsLR pL pR) deriving instance Data (HsValBindsLR GhcPs GhcPs) deriving instance Data (HsValBindsLR GhcPs GhcRn) deriving instance Data (HsValBindsLR GhcRn GhcRn) deriving instance Data (HsValBindsLR GhcTc GhcTc) -- deriving instance (DataIdLR pL pL) => Data (NHsValBindsLR pL) deriving instance Data (NHsValBindsLR GhcPs) deriving instance Data (NHsValBindsLR GhcRn) deriving instance Data (NHsValBindsLR GhcTc) -- deriving instance (DataIdLR pL pR) => Data (HsBindLR pL pR) deriving instance Data (HsBindLR GhcPs GhcPs) deriving instance Data (HsBindLR GhcPs GhcRn) deriving instance Data (HsBindLR GhcRn GhcRn) deriving instance Data (HsBindLR GhcTc GhcTc) deriving instance Data AbsBinds deriving instance Data ABExport -- deriving instance DataId p => Data (RecordPatSynField p) deriving instance Data (RecordPatSynField GhcPs) deriving instance Data (RecordPatSynField GhcRn) deriving instance Data (RecordPatSynField GhcTc) -- deriving instance (DataIdLR pL pR) => Data (PatSynBind pL pR) deriving instance Data (PatSynBind GhcPs GhcPs) deriving instance Data (PatSynBind GhcPs GhcRn) deriving instance Data (PatSynBind GhcRn GhcRn) deriving instance Data (PatSynBind GhcTc GhcTc) -- deriving instance (DataIdLR p p) => Data (HsIPBinds p) deriving instance Data (HsIPBinds GhcPs) deriving instance Data (HsIPBinds GhcRn) deriving instance Data (HsIPBinds GhcTc) -- deriving instance (DataIdLR p p) => Data (IPBind p) deriving instance Data (IPBind GhcPs) deriving instance Data (IPBind GhcRn) deriving instance Data (IPBind GhcTc) -- deriving instance (DataIdLR p p) => Data (Sig p) deriving instance Data (Sig GhcPs) deriving instance Data (Sig GhcRn) deriving instance Data (Sig GhcTc) -- deriving instance (DataId p) => Data (FixitySig p) deriving instance Data (FixitySig GhcPs) deriving instance Data (FixitySig GhcRn) deriving instance Data (FixitySig GhcTc) -- deriving instance (DataId p) => Data (StandaloneKindSig p) deriving instance Data (StandaloneKindSig GhcPs) deriving instance Data (StandaloneKindSig GhcRn) deriving instance Data (StandaloneKindSig GhcTc) -- deriving instance (DataIdLR p p) => Data (HsPatSynDir p) deriving instance Data (HsPatSynDir GhcPs) deriving instance Data (HsPatSynDir GhcRn) deriving instance Data (HsPatSynDir GhcTc) deriving instance Data (HsMultAnn GhcPs) deriving instance Data (HsMultAnn GhcRn) deriving instance Data (HsMultAnn GhcTc) -- --------------------------------------------------------------------- -- Data derivations from GHC.Hs.Decls ---------------------------------- -- deriving instance (DataIdLR p p) => Data (HsDecl p) deriving instance Data (HsDecl GhcPs) deriving instance Data (HsDecl GhcRn) deriving instance Data (HsDecl GhcTc) -- deriving instance (DataIdLR p p) => Data (HsGroup p) deriving instance Data (HsGroup GhcPs) deriving instance Data (HsGroup GhcRn) deriving instance Data (HsGroup GhcTc) -- deriving instance (DataIdLR p p) => Data (SpliceDecl p) deriving instance Data (SpliceDecl GhcPs) deriving instance Data (SpliceDecl GhcRn) deriving instance Data (SpliceDecl GhcTc) -- deriving instance (DataIdLR p p) => Data (TyClDecl p) deriving instance Data (TyClDecl GhcPs) deriving instance Data (TyClDecl GhcRn) deriving instance Data (TyClDecl GhcTc) -- deriving instance (DataIdLR p p) => Data (FunDep p) deriving instance Data (FunDep GhcPs) deriving instance Data (FunDep GhcRn) deriving instance Data (FunDep GhcTc) -- deriving instance (DataIdLR p p) => Data (TyClGroup p) deriving instance Data (TyClGroup GhcPs) deriving instance Data (TyClGroup GhcRn) deriving instance Data (TyClGroup GhcTc) -- deriving instance (DataIdLR p p) => Data (FamilyResultSig p) deriving instance Data (FamilyResultSig GhcPs) deriving instance Data (FamilyResultSig GhcRn) deriving instance Data (FamilyResultSig GhcTc) -- deriving instance (DataIdLR p p) => Data (FamilyDecl p) deriving instance Data (FamilyDecl GhcPs) deriving instance Data (FamilyDecl GhcRn) deriving instance Data (FamilyDecl GhcTc) -- deriving instance (DataIdLR p p) => Data (InjectivityAnn p) deriving instance Data (InjectivityAnn GhcPs) deriving instance Data (InjectivityAnn GhcRn) deriving instance Data (InjectivityAnn GhcTc) -- deriving instance (DataIdLR p p) => Data (FamilyInfo p) deriving instance Data (FamilyInfo GhcPs) deriving instance Data (FamilyInfo GhcRn) deriving instance Data (FamilyInfo GhcTc) -- deriving instance (DataIdLR p p) => Data (HsDataDefn p) deriving instance Data (HsDataDefn GhcPs) deriving instance Data (HsDataDefn GhcRn) deriving instance Data (HsDataDefn GhcTc) -- deriving instance (DataIdLR p p) => Data (HsDerivingClause p) deriving instance Data (HsDerivingClause GhcPs) deriving instance Data (HsDerivingClause GhcRn) deriving instance Data (HsDerivingClause GhcTc) -- deriving instance DataIdLR p p => Data (DerivClauseTys p) deriving instance Data (DerivClauseTys GhcPs) deriving instance Data (DerivClauseTys GhcRn) deriving instance Data (DerivClauseTys GhcTc) -- deriving instance (DataIdLR p p) => Data (ConDecl p) deriving instance Data (ConDecl GhcPs) deriving instance Data (ConDecl GhcRn) deriving instance Data (ConDecl GhcTc) -- deriving instance DataIdLR p p => Data (HsConDeclGADTDetails p) deriving instance Data (HsConDeclGADTDetails GhcPs) deriving instance Data (HsConDeclGADTDetails GhcRn) deriving instance Data (HsConDeclGADTDetails GhcTc) -- deriving instance DataIdLR p p => Data (TyFamInstDecl p) deriving instance Data (TyFamInstDecl GhcPs) deriving instance Data (TyFamInstDecl GhcRn) deriving instance Data (TyFamInstDecl GhcTc) -- deriving instance DataIdLR p p => Data (DataFamInstDecl p) deriving instance Data (DataFamInstDecl GhcPs) deriving instance Data (DataFamInstDecl GhcRn) deriving instance Data (DataFamInstDecl GhcTc) -- deriving instance (DataIdLR p p,Data rhs)=>Data (FamEqn p rhs) deriving instance Data rhs => Data (FamEqn GhcPs rhs) deriving instance Data rhs => Data (FamEqn GhcRn rhs) deriving instance Data rhs => Data (FamEqn GhcTc rhs) -- deriving instance (DataIdLR p p) => Data (ClsInstDecl p) deriving instance Data (ClsInstDecl GhcPs) deriving instance Data (ClsInstDecl GhcRn) deriving instance Data (ClsInstDecl GhcTc) -- deriving instance (DataIdLR p p) => Data (InstDecl p) deriving instance Data (InstDecl GhcPs) deriving instance Data (InstDecl GhcRn) deriving instance Data (InstDecl GhcTc) -- deriving instance (DataIdLR p p) => Data (DerivDecl p) deriving instance Data (DerivDecl GhcPs) deriving instance Data (DerivDecl GhcRn) deriving instance Data (DerivDecl GhcTc) -- deriving instance (DataIdLR p p) => Data (DerivStrategy p) deriving instance Data (DerivStrategy GhcPs) deriving instance Data (DerivStrategy GhcRn) deriving instance Data (DerivStrategy GhcTc) -- deriving instance (DataIdLR p p) => Data (DefaultDecl p) deriving instance Data (DefaultDecl GhcPs) deriving instance Data (DefaultDecl GhcRn) deriving instance Data (DefaultDecl GhcTc) -- deriving instance (DataIdLR p p) => Data (ForeignDecl p) deriving instance Data (ForeignDecl GhcPs) deriving instance Data (ForeignDecl GhcRn) deriving instance Data (ForeignDecl GhcTc) -- deriving instance (DataIdLR p p) => Data (ForeignImport p) deriving instance Data (ForeignImport GhcPs) deriving instance Data (ForeignImport GhcRn) deriving instance Data (ForeignImport GhcTc) -- deriving instance (DataIdLR p p) => Data (ForeignExport p) deriving instance Data (ForeignExport GhcPs) deriving instance Data (ForeignExport GhcRn) deriving instance Data (ForeignExport GhcTc) -- deriving instance (DataIdLR p p) => Data (RuleDecls p) deriving instance Data (RuleDecls GhcPs) deriving instance Data (RuleDecls GhcRn) deriving instance Data (RuleDecls GhcTc) -- deriving instance (DataIdLR p p) => Data (RuleDecl p) deriving instance Data (RuleDecl GhcPs) deriving instance Data (RuleDecl GhcRn) deriving instance Data (RuleDecl GhcTc) -- deriving instance (DataIdLR p p) => Data (RuleBndr p) deriving instance Data (RuleBndr GhcPs) deriving instance Data (RuleBndr GhcRn) deriving instance Data (RuleBndr GhcTc) -- deriving instance (DataId p) => Data (WarnDecls p) deriving instance Data (WarnDecls GhcPs) deriving instance Data (WarnDecls GhcRn) deriving instance Data (WarnDecls GhcTc) -- deriving instance (DataId p) => Data (WarnDecl p) deriving instance Data (WarnDecl GhcPs) deriving instance Data (WarnDecl GhcRn) deriving instance Data (WarnDecl GhcTc) -- deriving instance (DataIdLR p p) => Data (AnnDecl p) deriving instance Data (AnnProvenance GhcPs) deriving instance Data (AnnProvenance GhcRn) deriving instance Data (AnnProvenance GhcTc) deriving instance Data (AnnDecl GhcPs) deriving instance Data (AnnDecl GhcRn) deriving instance Data (AnnDecl GhcTc) -- deriving instance (DataId p) => Data (RoleAnnotDecl p) deriving instance Data (RoleAnnotDecl GhcPs) deriving instance Data (RoleAnnotDecl GhcRn) deriving instance Data (RoleAnnotDecl GhcTc) -- --------------------------------------------------------------------- -- Data derivations from GHC.Hs.Expr ----------------------------------- deriving instance Data (FieldLabelStrings GhcPs) deriving instance Data (FieldLabelStrings GhcRn) deriving instance Data (FieldLabelStrings GhcTc) deriving instance Data (HsRecUpdParent GhcPs) deriving instance Data (HsRecUpdParent GhcRn) deriving instance Data (HsRecUpdParent GhcTc) deriving instance Data (LHsRecUpdFields GhcPs) deriving instance Data (LHsRecUpdFields GhcRn) deriving instance Data (LHsRecUpdFields GhcTc) deriving instance Data (DotFieldOcc GhcPs) deriving instance Data (DotFieldOcc GhcRn) deriving instance Data (DotFieldOcc GhcTc) -- deriving instance (DataIdLR p p) => Data (HsPragE p) deriving instance Data (HsPragE GhcPs) deriving instance Data (HsPragE GhcRn) deriving instance Data (HsPragE GhcTc) -- deriving instance (DataIdLR p p) => Data (HsExpr p) deriving instance Data (HsExpr GhcPs) deriving instance Data (HsExpr GhcRn) deriving instance Data (HsExpr GhcTc) -- deriving instance (DataIdLR p p) => Data (HsTupArg p) deriving instance Data (HsTupArg GhcPs) deriving instance Data (HsTupArg GhcRn) deriving instance Data (HsTupArg GhcTc) -- deriving instance (DataIdLR p p) => Data (HsCmd p) deriving instance Data (HsCmd GhcPs) deriving instance Data (HsCmd GhcRn) deriving instance Data (HsCmd GhcTc) -- deriving instance (DataIdLR p p) => Data (HsCmdTop p) deriving instance Data (HsCmdTop GhcPs) deriving instance Data (HsCmdTop GhcRn) deriving instance Data (HsCmdTop GhcTc) -- deriving instance (DataIdLR p p,Data body) => Data (MatchGroup p body) deriving instance Data (MatchGroup GhcPs (LocatedA (HsExpr GhcPs))) deriving instance Data (MatchGroup GhcRn (LocatedA (HsExpr GhcRn))) deriving instance Data (MatchGroup GhcTc (LocatedA (HsExpr GhcTc))) deriving instance Data (MatchGroup GhcPs (LocatedA (HsCmd GhcPs))) deriving instance Data (MatchGroup GhcRn (LocatedA (HsCmd GhcRn))) deriving instance Data (MatchGroup GhcTc (LocatedA (HsCmd GhcTc))) -- deriving instance (DataIdLR p p,Data body) => Data (Match p body) deriving instance Data (Match GhcPs (LocatedA (HsExpr GhcPs))) deriving instance Data (Match GhcRn (LocatedA (HsExpr GhcRn))) deriving instance Data (Match GhcTc (LocatedA (HsExpr GhcTc))) deriving instance Data (Match GhcPs (LocatedA (HsCmd GhcPs))) deriving instance Data (Match GhcRn (LocatedA (HsCmd GhcRn))) deriving instance Data (Match GhcTc (LocatedA (HsCmd GhcTc))) -- deriving instance (DataIdLR p p,Data body) => Data (GRHSs p body) deriving instance Data (GRHSs GhcPs (LocatedA (HsExpr GhcPs))) deriving instance Data (GRHSs GhcRn (LocatedA (HsExpr GhcRn))) deriving instance Data (GRHSs GhcTc (LocatedA (HsExpr GhcTc))) deriving instance Data (GRHSs GhcPs (LocatedA (HsCmd GhcPs))) deriving instance Data (GRHSs GhcRn (LocatedA (HsCmd GhcRn))) deriving instance Data (GRHSs GhcTc (LocatedA (HsCmd GhcTc))) -- deriving instance (DataIdLR p p,Data body) => Data (GRHS p body) deriving instance Data (GRHS GhcPs (LocatedA (HsExpr GhcPs))) deriving instance Data (GRHS GhcRn (LocatedA (HsExpr GhcRn))) deriving instance Data (GRHS GhcTc (LocatedA (HsExpr GhcTc))) deriving instance Data (GRHS GhcPs (LocatedA (HsCmd GhcPs))) deriving instance Data (GRHS GhcRn (LocatedA (HsCmd GhcRn))) deriving instance Data (GRHS GhcTc (LocatedA (HsCmd GhcTc))) -- deriving instance (DataIdLR p p,Data body) => Data (StmtLR p p body) deriving instance Data (StmtLR GhcPs GhcPs (LocatedA (HsExpr GhcPs))) deriving instance Data (StmtLR GhcPs GhcRn (LocatedA (HsExpr GhcRn))) deriving instance Data (StmtLR GhcRn GhcRn (LocatedA (HsExpr GhcRn))) deriving instance Data (StmtLR GhcTc GhcTc (LocatedA (HsExpr GhcTc))) deriving instance Data (StmtLR GhcPs GhcPs (LocatedA (HsCmd GhcPs))) deriving instance Data (StmtLR GhcPs GhcRn (LocatedA (HsCmd GhcRn))) deriving instance Data (StmtLR GhcRn GhcRn (LocatedA (HsCmd GhcRn))) deriving instance Data (StmtLR GhcTc GhcTc (LocatedA (HsCmd GhcTc))) deriving instance Data RecStmtTc -- deriving instance (DataIdLR p p) => Data (ParStmtBlock p p) deriving instance Data (ParStmtBlock GhcPs GhcPs) deriving instance Data (ParStmtBlock GhcPs GhcRn) deriving instance Data (ParStmtBlock GhcRn GhcRn) deriving instance Data (ParStmtBlock GhcTc GhcTc) -- deriving instance (DataIdLR p p) => Data (ApplicativeStmt p p) deriving instance Data (ApplicativeStmt GhcPs GhcPs) deriving instance Data (ApplicativeStmt GhcPs GhcRn) deriving instance Data (ApplicativeStmt GhcPs GhcTc) deriving instance Data (ApplicativeStmt GhcRn GhcPs) deriving instance Data (ApplicativeStmt GhcRn GhcRn) deriving instance Data (ApplicativeStmt GhcRn GhcTc) deriving instance Data (ApplicativeStmt GhcTc GhcPs) deriving instance Data (ApplicativeStmt GhcTc GhcRn) deriving instance Data (ApplicativeStmt GhcTc GhcTc) -- deriving instance (DataIdLR p p) => Data (ApplicativeArg p) deriving instance Data (ApplicativeArg GhcPs) deriving instance Data (ApplicativeArg GhcRn) deriving instance Data (ApplicativeArg GhcTc) deriving instance Data HsArrowMatchContext deriving instance Data fn => Data (HsStmtContext fn) deriving instance Data fn => Data (HsMatchContext fn) -- deriving instance (DataIdLR p p) => Data (HsUntypedSplice p) deriving instance Data (HsUntypedSplice GhcPs) deriving instance Data (HsUntypedSplice GhcRn) deriving instance Data (HsUntypedSplice GhcTc) deriving instance Data a => Data (HsUntypedSpliceResult a) -- deriving instance (DataIdLR p p) => Data (HsQuote p) deriving instance Data (HsQuote GhcPs) deriving instance Data (HsQuote GhcRn) deriving instance Data (HsQuote GhcTc) deriving instance Data HsBracketTc -- deriving instance (DataIdLR p p) => Data (ArithSeqInfo p) deriving instance Data (ArithSeqInfo GhcPs) deriving instance Data (ArithSeqInfo GhcRn) deriving instance Data (ArithSeqInfo GhcTc) deriving instance Data CmdTopTc deriving instance Data PendingRnSplice deriving instance Data PendingTcSplice deriving instance Data SyntaxExprRn deriving instance Data SyntaxExprTc deriving instance Data XBindStmtRn deriving instance Data XBindStmtTc -- --------------------------------------------------------------------- -- Data derivations from GHC.Hs.Lit ------------------------------------ -- deriving instance (DataId p) => Data (HsLit p) deriving instance Data (HsLit GhcPs) deriving instance Data (HsLit GhcRn) deriving instance Data (HsLit GhcTc) -- deriving instance (DataIdLR p p) => Data (HsOverLit p) deriving instance Data (HsOverLit GhcPs) deriving instance Data (HsOverLit GhcRn) deriving instance Data (HsOverLit GhcTc) deriving instance Data OverLitRn deriving instance Data OverLitTc -- --------------------------------------------------------------------- -- Data derivations from GHC.Hs.Pat ------------------------------------ -- deriving instance (DataIdLR p p) => Data (Pat p) deriving instance Data (Pat GhcPs) deriving instance Data (Pat GhcRn) deriving instance Data (Pat GhcTc) deriving instance Data ConPatTc deriving instance Data (HsConPatTyArg GhcPs) deriving instance Data (HsConPatTyArg GhcRn) deriving instance Data (HsConPatTyArg GhcTc) deriving instance (Data a, Data b) => Data (HsFieldBind a b) deriving instance (Data body) => Data (HsRecFields GhcPs body) deriving instance (Data body) => Data (HsRecFields GhcRn body) deriving instance (Data body) => Data (HsRecFields GhcTc body) -- --------------------------------------------------------------------- -- Data derivations from GHC.Hs.Type ---------------------------------- -- deriving instance Data (HsBndrVis p) deriving instance Data (HsBndrVis GhcPs) deriving instance Data (HsBndrVis GhcRn) deriving instance Data (HsBndrVis GhcTc) -- deriving instance (DataIdLR p p) => Data (LHsQTyVars p) deriving instance Data (LHsQTyVars GhcPs) deriving instance Data (LHsQTyVars GhcRn) deriving instance Data (LHsQTyVars GhcTc) -- deriving instance (Data flag, DataIdLR p p) => Data (HsOuterTyVarBndrs p) deriving instance Data flag => Data (HsOuterTyVarBndrs flag GhcPs) deriving instance Data flag => Data (HsOuterTyVarBndrs flag GhcRn) deriving instance Data flag => Data (HsOuterTyVarBndrs flag GhcTc) -- deriving instance (DataIdLR p p) => Data (HsSigType p) deriving instance Data (HsSigType GhcPs) deriving instance Data (HsSigType GhcRn) deriving instance Data (HsSigType GhcTc) -- deriving instance (DataIdLR p p, Data thing) =>Data (HsWildCardBndrs p thing) deriving instance (Data thing) => Data (HsWildCardBndrs GhcPs thing) deriving instance (Data thing) => Data (HsWildCardBndrs GhcRn thing) deriving instance (Data thing) => Data (HsWildCardBndrs GhcTc thing) -- deriving instance (DataIdLR p p) => Data (HsPatSigType p) deriving instance Data (HsPatSigType GhcPs) deriving instance Data (HsPatSigType GhcRn) deriving instance Data (HsPatSigType GhcTc) -- deriving instance (DataIdLR p p) => Data (HsTyPat p) deriving instance Data (HsTyPat GhcPs) deriving instance Data (HsTyPat GhcRn) deriving instance Data (HsTyPat GhcTc) -- deriving instance (DataIdLR p p) => Data (HsForAllTelescope p) deriving instance Data (HsForAllTelescope GhcPs) deriving instance Data (HsForAllTelescope GhcRn) deriving instance Data (HsForAllTelescope GhcTc) -- deriving instance (DataIdLR p p) => Data (HsTyVarBndr p) deriving instance (Data flag) => Data (HsTyVarBndr flag GhcPs) deriving instance (Data flag) => Data (HsTyVarBndr flag GhcRn) deriving instance (Data flag) => Data (HsTyVarBndr flag GhcTc) -- deriving instance Data (HsBndrVar p) deriving instance Data (HsBndrVar GhcPs) deriving instance Data (HsBndrVar GhcRn) deriving instance Data (HsBndrVar GhcTc) -- deriving instance (DataIdLR p p) => Data (HsBndrKind p) deriving instance Data (HsBndrKind GhcPs) deriving instance Data (HsBndrKind GhcRn) deriving instance Data (HsBndrKind GhcTc) -- deriving instance (DataIdLR p p) => Data (HsType p) deriving instance Data (HsType GhcPs) deriving instance Data (HsType GhcRn) deriving instance Data (HsType GhcTc) -- deriving instance (DataIdLR p p) => Data (HsTyLit p) deriving instance Data (HsTyLit GhcPs) deriving instance Data (HsTyLit GhcRn) deriving instance Data (HsTyLit GhcTc) -- deriving instance (Data mult, DataIdLR p p) => Data (HsArrowOf mult p) deriving instance Data (HsArrowOf (LocatedA (HsType GhcPs)) GhcPs) deriving instance Data (HsArrowOf (LocatedA (HsType GhcRn)) GhcRn) deriving instance Data (HsArrowOf (LocatedA (HsType GhcTc)) GhcTc) deriving instance Data (HsArrowOf (LocatedA (HsExpr GhcPs)) GhcPs) deriving instance Data (HsArrowOf (LocatedA (HsExpr GhcRn)) GhcRn) deriving instance Data (HsArrowOf (LocatedA (HsExpr GhcTc)) GhcTc) -- deriving instance (DataIdLR p p) => Data (HsScaled p a) deriving instance Data thing => Data (HsScaled GhcPs thing) deriving instance Data thing => Data (HsScaled GhcRn thing) deriving instance Data thing => Data (HsScaled GhcTc thing) -- deriving instance (Data a, Data b) => Data (HsArg p a b) deriving instance (Data a, Data b) => Data (HsArg GhcPs a b) deriving instance (Data a, Data b) => Data (HsArg GhcRn a b) deriving instance (Data a, Data b) => Data (HsArg GhcTc a b) -- deriving instance (DataIdLR p p) => Data (ConDeclField p) deriving instance Data (ConDeclField GhcPs) deriving instance Data (ConDeclField GhcRn) deriving instance Data (ConDeclField GhcTc) -- deriving instance (DataId p) => Data (FieldOcc p) deriving instance Data (FieldOcc GhcPs) deriving instance Data (FieldOcc GhcRn) deriving instance Data (FieldOcc GhcTc) -- deriving instance (DataId name) => Data (ImportDecl name) deriving instance Data (ImportDecl GhcPs) deriving instance Data (ImportDecl GhcRn) deriving instance Data (ImportDecl GhcTc) -- deriving instance (DataId name) => Data (IE name) deriving instance Data (IE GhcPs) deriving instance Data (IE GhcRn) deriving instance Data (IE GhcTc) -- deriving instance (Eq name, Eq (IdP name)) => Eq (IE name) deriving instance Eq (IE GhcPs) deriving instance Eq (IE GhcRn) deriving instance Eq (IE GhcTc) -- --------------------------------------------------------------------- deriving instance Data HsThingRn deriving instance Data XXExprGhcRn -- --------------------------------------------------------------------- deriving instance Data XXExprGhcTc deriving instance Data XXPatGhcTc -- --------------------------------------------------------------------- deriving instance Data XViaStrategyPs -- --------------------------------------------------------------------- ghc-lib-parser-9.12.2.20250421/compiler/GHC/Hs/Lit.hs0000644000000000000000000002621507346545000017352 0ustar0000000000000000{-# LANGUAGE ConstraintKinds #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE UndecidableInstances #-} -- Wrinkle in Note [Trees That Grow] -- in module Language.Haskell.Syntax.Extension {-# LANGUAGE DuplicateRecordFields #-} {-# LANGUAGE TypeApplications #-} {-# OPTIONS_GHC -Wno-orphans #-} -- Outputable, OutputableBndrId {- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 -} -- | Source-language literals module GHC.Hs.Lit ( module Language.Haskell.Syntax.Lit , module GHC.Hs.Lit ) where import GHC.Prelude import {-# SOURCE #-} GHC.Hs.Expr( pprExpr ) import GHC.Data.FastString (unpackFS) import GHC.Types.Basic (PprPrec(..), topPrec ) import GHC.Core.Ppr ( {- instance OutputableBndr TyVar -} ) import GHC.Types.SourceText import GHC.Core.Type import GHC.Utils.Misc (split) import GHC.Utils.Outputable import GHC.Utils.Panic (panic) import GHC.Hs.Extension import Language.Haskell.Syntax.Expr ( HsExpr ) import Language.Haskell.Syntax.Extension import Language.Haskell.Syntax.Lit {- ************************************************************************ * * \subsection[HsLit]{Literals} * * ************************************************************************ -} type instance XHsChar (GhcPass _) = SourceText type instance XHsCharPrim (GhcPass _) = SourceText type instance XHsString (GhcPass _) = SourceText type instance XHsMultilineString (GhcPass _) = SourceText type instance XHsStringPrim (GhcPass _) = SourceText type instance XHsInt (GhcPass _) = NoExtField type instance XHsIntPrim (GhcPass _) = SourceText type instance XHsWordPrim (GhcPass _) = SourceText type instance XHsInt8Prim (GhcPass _) = SourceText type instance XHsInt16Prim (GhcPass _) = SourceText type instance XHsInt32Prim (GhcPass _) = SourceText type instance XHsInt64Prim (GhcPass _) = SourceText type instance XHsWord8Prim (GhcPass _) = SourceText type instance XHsWord16Prim (GhcPass _) = SourceText type instance XHsWord32Prim (GhcPass _) = SourceText type instance XHsWord64Prim (GhcPass _) = SourceText type instance XHsInteger (GhcPass _) = SourceText type instance XHsRat (GhcPass _) = NoExtField type instance XHsFloatPrim (GhcPass _) = NoExtField type instance XHsDoublePrim (GhcPass _) = NoExtField type instance XXLit (GhcPass _) = DataConCantHappen data OverLitRn = OverLitRn { ol_rebindable :: Bool, -- Note [ol_rebindable] ol_from_fun :: LIdP GhcRn -- Note [Overloaded literal witnesses] } data OverLitTc = OverLitTc { ol_rebindable :: Bool, -- Note [ol_rebindable] ol_witness :: HsExpr GhcTc, -- Note [Overloaded literal witnesses] ol_type :: Type } {- Note [Overloaded literal witnesses] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ During renaming, the coercion function needed for a given HsOverLit is resolved according to the current scope and RebindableSyntax (see Note [ol_rebindable]). The result of this resolution *before* type checking is the coercion function such as 'fromInteger' or 'fromRational', stored in the ol_from_fun field of OverLitRn. *After* type checking, the ol_witness field of the OverLitTc contains the witness of the literal as HsExpr, such as (fromInteger 3) or lit_78. This witness should replace the literal. Reason: it allows commoning up of the fromInteger calls, which wouldn't be possible if the desugarer made the application. The ol_type in OverLitTc records the type the overloaded literal is found to have. -} type instance XOverLit GhcPs = NoExtField type instance XOverLit GhcRn = OverLitRn type instance XOverLit GhcTc = OverLitTc pprXOverLit :: GhcPass p -> XOverLit (GhcPass p) -> SDoc pprXOverLit GhcPs noExt = ppr noExt pprXOverLit GhcRn OverLitRn{ ol_from_fun = from_fun } = ppr from_fun pprXOverLit GhcTc OverLitTc{ ol_witness = witness } = pprExpr witness type instance XXOverLit (GhcPass _) = DataConCantHappen overLitType :: HsOverLit GhcTc -> Type overLitType (OverLit OverLitTc{ ol_type = ty } _) = ty -- | @'hsOverLitNeedsParens' p ol@ returns 'True' if an overloaded literal -- @ol@ needs to be parenthesized under precedence @p@. hsOverLitNeedsParens :: PprPrec -> HsOverLit x -> Bool hsOverLitNeedsParens p (OverLit { ol_val = olv }) = go olv where go :: OverLitVal -> Bool go (HsIntegral x) = p > topPrec && il_neg x go (HsFractional x) = p > topPrec && fl_neg x go (HsIsString {}) = False hsOverLitNeedsParens _ (XOverLit { }) = False -- | @'hsLitNeedsParens' p l@ returns 'True' if a literal @l@ needs -- to be parenthesized under precedence @p@. -- -- See Note [Printing of literals in Core] in GHC.Types.Literal -- for the reasoning. hsLitNeedsParens :: PprPrec -> HsLit x -> Bool hsLitNeedsParens p = go where go (HsChar {}) = False go (HsCharPrim {}) = False go (HsString {}) = False go (HsMultilineString {}) = False go (HsStringPrim {}) = False go (HsInt _ x) = p > topPrec && il_neg x go (HsInteger _ x _) = p > topPrec && x < 0 go (HsRat _ x _) = p > topPrec && fl_neg x go (HsFloatPrim {}) = False go (HsDoublePrim {}) = False go (HsIntPrim {}) = False go (HsInt8Prim {}) = False go (HsInt16Prim {}) = False go (HsInt32Prim {}) = False go (HsInt64Prim {}) = False go (HsWordPrim {}) = False go (HsWord8Prim {}) = False go (HsWord16Prim {}) = False go (HsWord64Prim {}) = False go (HsWord32Prim {}) = False go (XLit _) = False -- | Convert a literal from one index type to another convertLit :: HsLit (GhcPass p1) -> HsLit (GhcPass p2) convertLit (HsChar a x) = HsChar a x convertLit (HsCharPrim a x) = HsCharPrim a x convertLit (HsString a x) = HsString a x convertLit (HsMultilineString a x) = HsMultilineString a x convertLit (HsStringPrim a x) = HsStringPrim a x convertLit (HsInt a x) = HsInt a x convertLit (HsIntPrim a x) = HsIntPrim a x convertLit (HsWordPrim a x) = HsWordPrim a x convertLit (HsInt8Prim a x) = HsInt8Prim a x convertLit (HsInt16Prim a x) = HsInt16Prim a x convertLit (HsInt32Prim a x) = HsInt32Prim a x convertLit (HsInt64Prim a x) = HsInt64Prim a x convertLit (HsWord8Prim a x) = HsWord8Prim a x convertLit (HsWord16Prim a x) = HsWord16Prim a x convertLit (HsWord32Prim a x) = HsWord32Prim a x convertLit (HsWord64Prim a x) = HsWord64Prim a x convertLit (HsInteger a x b) = HsInteger a x b convertLit (HsRat a x b) = HsRat a x b convertLit (HsFloatPrim a x) = HsFloatPrim a x convertLit (HsDoublePrim a x) = HsDoublePrim a x {- Note [ol_rebindable] ~~~~~~~~~~~~~~~~~~~~ The ol_rebindable field is True if this literal is actually using rebindable syntax. Specifically: False iff ol_from_fun / ol_witness is the standard one True iff ol_from_fun / ol_witness is non-standard Equivalently it's True if a) RebindableSyntax is on b) the witness for fromInteger/fromRational/fromString that happens to be in scope isn't the standard one -} -- Instance specific to GhcPs, need the SourceText instance Outputable (HsLit (GhcPass p)) where ppr (HsChar st c) = pprWithSourceText st (pprHsChar c) ppr (HsCharPrim st c) = pprWithSourceText st (pprPrimChar c) ppr (HsString st s) = pprWithSourceText st (pprHsString s) ppr (HsMultilineString st s) = case st of NoSourceText -> pprHsString s SourceText src -> vcat $ map text $ split '\n' (unpackFS src) ppr (HsStringPrim st s) = pprWithSourceText st (pprHsBytes s) ppr (HsInt _ i) = pprWithSourceText (il_text i) (integer (il_value i)) ppr (HsInteger st i _) = pprWithSourceText st (integer i) ppr (HsRat _ f _) = ppr f ppr (HsFloatPrim _ f) = ppr f <> primFloatSuffix ppr (HsDoublePrim _ d) = ppr d <> primDoubleSuffix ppr (HsIntPrim st i) = pprWithSourceText st (pprPrimInt i) ppr (HsInt8Prim st i) = pprWithSourceText st (pprPrimInt8 i) ppr (HsInt16Prim st i) = pprWithSourceText st (pprPrimInt16 i) ppr (HsInt32Prim st i) = pprWithSourceText st (pprPrimInt32 i) ppr (HsInt64Prim st i) = pprWithSourceText st (pprPrimInt64 i) ppr (HsWordPrim st w) = pprWithSourceText st (pprPrimWord w) ppr (HsWord8Prim st w) = pprWithSourceText st (pprPrimWord8 w) ppr (HsWord16Prim st w) = pprWithSourceText st (pprPrimWord16 w) ppr (HsWord32Prim st w) = pprWithSourceText st (pprPrimWord32 w) ppr (HsWord64Prim st w) = pprWithSourceText st (pprPrimWord64 w) -- in debug mode, print the expression that it's resolved to, too instance OutputableBndrId p => Outputable (HsOverLit (GhcPass p)) where ppr (OverLit {ol_val=val, ol_ext=ext}) = ppr val <+> (whenPprDebug (parens (pprXOverLit (ghcPass @p) ext))) instance Outputable OverLitVal where ppr (HsIntegral i) = pprWithSourceText (il_text i) (integer (il_value i)) ppr (HsFractional f) = ppr f ppr (HsIsString st s) = pprWithSourceText st (pprHsString s) -- | pmPprHsLit pretty prints literals and is used when pretty printing pattern -- match warnings. All are printed the same (i.e., without hashes if they are -- primitive and not wrapped in constructors if they are boxed). This happens -- mainly for too reasons: -- * We do not want to expose their internal representation -- * The warnings become too messy pmPprHsLit :: HsLit (GhcPass x) -> SDoc pmPprHsLit (HsChar _ c) = pprHsChar c pmPprHsLit (HsCharPrim _ c) = pprHsChar c pmPprHsLit (HsString st s) = pprWithSourceText st (pprHsString s) pmPprHsLit (HsMultilineString st s) = pprWithSourceText st (pprHsString s) pmPprHsLit (HsStringPrim _ s) = pprHsBytes s pmPprHsLit (HsInt _ i) = integer (il_value i) pmPprHsLit (HsIntPrim _ i) = integer i pmPprHsLit (HsWordPrim _ w) = integer w pmPprHsLit (HsInt8Prim _ i) = integer i pmPprHsLit (HsInt16Prim _ i) = integer i pmPprHsLit (HsInt32Prim _ i) = integer i pmPprHsLit (HsInt64Prim _ i) = integer i pmPprHsLit (HsWord8Prim _ w) = integer w pmPprHsLit (HsWord16Prim _ w) = integer w pmPprHsLit (HsWord32Prim _ w) = integer w pmPprHsLit (HsWord64Prim _ w) = integer w pmPprHsLit (HsInteger _ i _) = integer i pmPprHsLit (HsRat _ f _) = ppr f pmPprHsLit (HsFloatPrim _ f) = ppr f pmPprHsLit (HsDoublePrim _ d) = ppr d negateOverLitVal :: OverLitVal -> OverLitVal negateOverLitVal (HsIntegral i) = HsIntegral (negateIntegralLit i) negateOverLitVal (HsFractional f) = HsFractional (negateFractionalLit f) negateOverLitVal _ = panic "negateOverLitVal: argument is not a number" instance (Ord (XXOverLit p)) => Ord (HsOverLit p) where compare (OverLit _ val1) (OverLit _ val2) = val1 `compare` val2 compare (XOverLit val1) (XOverLit val2) = val1 `compare` val2 compare _ _ = panic "Ord HsOverLit" -- Comparison operations are needed when grouping literals -- for compiling pattern-matching (module GHC.HsToCore.Match.Literal) instance (Eq (XXOverLit p)) => Eq (HsOverLit p) where (OverLit _ val1) == (OverLit _ val2) = val1 == val2 (XOverLit val1) == (XOverLit val2) = val1 == val2 _ == _ = panic "Eq HsOverLit" ghc-lib-parser-9.12.2.20250421/compiler/GHC/Hs/Pat.hs0000644000000000000000000012671607346545000017355 0ustar0000000000000000{-# LANGUAGE CPP #-} {-# LANGUAGE ViewPatterns #-} {-# LANGUAGE ConstraintKinds #-} {-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE GADTs #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE TypeApplications #-} {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE DataKinds #-} {-# LANGUAGE UndecidableInstances #-} -- Wrinkle in Note [Trees That Grow] -- in module Language.Haskell.Syntax.Extension {-# OPTIONS_GHC -Wno-orphans #-} -- Outputable {- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 \section[PatSyntax]{Abstract Haskell syntax---patterns} -} module GHC.Hs.Pat ( Pat(..), LPat, isInvisArgPat, isVisArgPat, EpAnnSumPat(..), ConPatTc (..), ConLikeP, HsPatExpansion(..), XXPatGhcTc(..), HsConPatDetails, hsConPatArgs, hsConPatTyArgs, HsConPatTyArg(..), HsRecFields(..), HsFieldBind(..), LHsFieldBind, HsRecField, LHsRecField, HsRecUpdField, LHsRecUpdField, RecFieldsDotDot(..), hsRecFields, hsRecFieldSel, hsRecFieldId, hsRecFieldsArgs, mkPrefixConPat, mkCharLitPat, mkNilPat, isSimplePat, isPatSyn, looksLazyPatBind, isBangedLPat, gParPat, patNeedsParens, parenthesizePat, isIrrefutableHsPat, isBoringHsPat, collectEvVarsPat, collectEvVarsPats, pprParendLPat, pprConArgs, pprLPat ) where import GHC.Prelude import Language.Haskell.Syntax.Pat import Language.Haskell.Syntax.Expr ( HsExpr ) import {-# SOURCE #-} GHC.Hs.Expr (pprLExpr, pprUntypedSplice, HsUntypedSpliceResult(..)) -- friends: import GHC.Hs.Binds import GHC.Hs.Lit import Language.Haskell.Syntax.Extension import GHC.Parser.Annotation import GHC.Hs.Extension import GHC.Hs.Type import GHC.Tc.Types.Evidence import GHC.Types.Basic import GHC.Types.SourceText -- others: import GHC.Core.Ppr ( {- instance OutputableBndr TyVar -} ) import GHC.Builtin.Types import GHC.Types.Var import GHC.Types.Name.Reader import GHC.Core.ConLike import GHC.Core.DataCon import GHC.Utils.Outputable import GHC.Core.Type import GHC.Types.SrcLoc import GHC.Data.Bag -- collect ev vars from pats import GHC.Types.Name import Data.Data import qualified Data.List.NonEmpty as NE type instance XWildPat GhcPs = NoExtField type instance XWildPat GhcRn = NoExtField type instance XWildPat GhcTc = Type type instance XVarPat (GhcPass _) = NoExtField type instance XLazyPat GhcPs = EpToken "~" type instance XLazyPat GhcRn = NoExtField type instance XLazyPat GhcTc = NoExtField type instance XAsPat GhcPs = EpToken "@" type instance XAsPat GhcRn = NoExtField type instance XAsPat GhcTc = NoExtField type instance XParPat GhcPs = (EpToken "(", EpToken ")") type instance XParPat GhcRn = NoExtField type instance XParPat GhcTc = NoExtField type instance XBangPat GhcPs = EpToken "!" type instance XBangPat GhcRn = NoExtField type instance XBangPat GhcTc = NoExtField type instance XListPat GhcPs = AnnList () -- After parsing, ListPat can refer to a built-in Haskell list pattern -- or an overloaded list pattern. type instance XListPat GhcRn = NoExtField -- Built-in list patterns only. -- After renaming, overloaded list patterns are expanded to view patterns. -- See Note [Desugaring overloaded list patterns] type instance XListPat GhcTc = Type -- List element type, for use in hsPatType. type instance XTuplePat GhcPs = (EpaLocation, EpaLocation) type instance XTuplePat GhcRn = NoExtField type instance XTuplePat GhcTc = [Type] type instance XOrPat GhcPs = NoExtField type instance XOrPat GhcRn = NoExtField type instance XOrPat GhcTc = Type type instance XSumPat GhcPs = EpAnnSumPat type instance XSumPat GhcRn = NoExtField type instance XSumPat GhcTc = [Type] type instance XConPat GhcPs = (Maybe (EpToken "{"), Maybe (EpToken "}")) type instance XConPat GhcRn = NoExtField type instance XConPat GhcTc = ConPatTc type instance XViewPat GhcPs = TokRarrow type instance XViewPat GhcRn = Maybe (HsExpr GhcRn) -- The @HsExpr GhcRn@ gives an inverse to the view function. -- This is used for overloaded lists in particular. -- See Note [Invertible view patterns] in GHC.Tc.TyCl.PatSyn. type instance XViewPat GhcTc = Type -- Overall type of the pattern -- (= the argument type of the view function), for hsPatType. type instance XSplicePat GhcPs = NoExtField type instance XSplicePat GhcRn = HsUntypedSpliceResult (Pat GhcRn) -- See Note [Lifecycle of a splice] in GHC.Hs.Expr type instance XSplicePat GhcTc = DataConCantHappen type instance XLitPat (GhcPass _) = NoExtField type instance XNPat GhcPs = EpToken "-" type instance XNPat GhcRn = EpToken "-" type instance XNPat GhcTc = Type type instance XNPlusKPat GhcPs = EpToken "+" type instance XNPlusKPat GhcRn = NoExtField type instance XNPlusKPat GhcTc = Type type instance XSigPat GhcPs = TokDcolon type instance XSigPat GhcRn = NoExtField type instance XSigPat GhcTc = Type type instance XEmbTyPat GhcPs = EpToken "type" type instance XEmbTyPat GhcRn = NoExtField type instance XEmbTyPat GhcTc = Type type instance XXPat GhcPs = DataConCantHappen type instance XXPat GhcRn = HsPatExpansion (Pat GhcRn) (Pat GhcRn) -- Original pattern and its desugaring/expansion. -- See Note [Rebindable syntax and XXExprGhcRn]. type instance XXPat GhcTc = XXPatGhcTc -- After typechecking, we add extra constructors: CoPat and XXExprGhcRn. -- XXExprGhcRn allows us to handle RebindableSyntax in pattern position: -- see "XXExpr GhcTc" for the counterpart in expressions. type instance ConLikeP GhcPs = RdrName -- IdP GhcPs type instance ConLikeP GhcRn = Name -- IdP GhcRn type instance ConLikeP GhcTc = ConLike type instance XConPatTyArg GhcPs = EpToken "@" type instance XConPatTyArg GhcRn = NoExtField type instance XConPatTyArg GhcTc = NoExtField type instance XHsRecFields GhcPs = NoExtField type instance XHsRecFields GhcRn = NoExtField type instance XHsRecFields GhcTc = MultiplicityCheckCoercions type instance XHsFieldBind _ = Maybe (EpToken "=") -- The specificity of an invisible pattern from the parser is always -- SpecifiedSpec. The specificity field supports code generated when deriving -- newtype or via; see Note [Inferred invisible patterns]. type instance XInvisPat GhcPs = (EpToken "@", Specificity) type instance XInvisPat GhcRn = Specificity type instance XInvisPat GhcTc = Type {- Note [Invisible binders in functions] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ GHC Proposal #448 (section 1.5 Type arguments in lambda patterns) introduces binders for invisible type arguments (@a-binders) in function equations and lambdas, e.g. 1. {-# LANGUAGE TypeAbstractions #-} id1 :: a -> a id1 @t x = x :: t -- @t-binder on the LHS of a function equation 2. {-# LANGUAGE TypeAbstractions #-} ex :: (Int8, Int16) ex = higherRank (\ @a x -> maxBound @a - x ) -- @a-binder in a lambda pattern in an argument -- to a higher-order function higherRank :: (forall a. (Num a, Bounded a) => a -> a) -> (Int8, Int16) higherRank f = (f 42, f 42) In the AST, invisible patterns are represented as InvisPat constructor inside of Pat: data Pat p = ... | InvisPat (LHsType p) ... Just like `BangPat`, the `Pat` data type allows `InvisPat` to appear in nested positions. But this is often not allowed; e.g. f @a x = rhs -- YES f (@a,x) = rhs -- NO g = do { @a <- e1; e2 } -- NO h x = case x of { @a -> rhs } -- NO Rather than excluding these things syntactically, we reject them in the renamer (see `rn_pats_general`). This actually gives a better error message than we would get if they were rejected in the parser. Each pattern is either visible (not prefixed with @) or invisible (prefixed with @): f :: forall a. forall b -> forall c. Int -> ... f @a b @c x = ... In this example, the arg-patterns are 1. InvisPat @a -- in the type sig: forall a. 2. VarPat b -- in the type sig: forall b -> 3. InvisPat @c -- in the type sig: forall c. 4. VarPat x -- in the type sig: Int -> Invisible patterns are always type patterns, i.e. they are matched with forall-bound type variables in the signature. Consequently, those variables (and their binders) are erased during compilation, having no effect on program execution at runtime. Visible patterns, on the other hand, may be matched with ordinary function arguments (Int ->) as well as required type arguments (forall b ->). This means that a visible pattern may either be erased or retained, and we only find out in the type checker, namely in tcMatchPats, where we match up all arg-patterns with quantifiers from the type signature. In other words, invisible patterns are always /erased/, while visible patterns are sometimes /erased/ and sometimes /retained/. The desugarer has no use for erased patterns, as the type checker generates HsWrappers to bind the corresponding type variables. Erased patterns are simply discarded inside tcMatchPats, where we know if visible pattern retained or erased. -} -- --------------------------------------------------------------------- -- API Annotations types data EpAnnSumPat = EpAnnSumPat { sumPatParens :: (EpaLocation, EpaLocation) , sumPatVbarsBefore :: [EpToken "|"] , sumPatVbarsAfter :: [EpToken "|"] } deriving Data instance NoAnn EpAnnSumPat where noAnn = EpAnnSumPat (noAnn, noAnn) [] [] -- --------------------------------------------------------------------- -- | Extension constructor for Pat, added after typechecking. data XXPatGhcTc = -- | Coercion Pattern (translation only) -- -- During desugaring a (CoPat co pat) turns into a cast with 'co' on the -- scrutinee, followed by a match on 'pat'. CoPat { -- | Coercion Pattern -- If co :: t1 ~ t2, p :: t2, -- then (CoPat co p) :: t1 co_cpt_wrap :: HsWrapper , -- | Why not LPat? Ans: existing locn will do co_pat_inner :: Pat GhcTc , -- | Type of whole pattern, t1 co_pat_ty :: Type } -- | Pattern expansion: original pattern, and desugared pattern, -- for RebindableSyntax and other overloaded syntax such as OverloadedLists. -- See Note [Rebindable syntax and XXExprGhcRn]. | ExpansionPat (Pat GhcRn) (Pat GhcTc) -- See Note [Rebindable syntax and XXExprGhcRn]. data HsPatExpansion a b = HsPatExpanded a b deriving Data -- | This is the extension field for ConPat, added after typechecking -- It adds quite a few extra fields, to support elaboration of pattern matching. data ConPatTc = ConPatTc { -- | The universal arg types 1-1 with the universal -- tyvars of the constructor/pattern synonym -- Use (conLikeResTy pat_con cpt_arg_tys) to get -- the type of the pattern cpt_arg_tys :: [Type] , -- | Existentially bound type variables -- in correctly-scoped order e.g. [k:* x:k] cpt_tvs :: [TyVar] , -- | Ditto *coercion variables* and *dictionaries* -- One reason for putting coercion variable here I think -- is to ensure their kinds are zonked cpt_dicts :: [EvVar] , -- | Bindings involving those dictionaries cpt_binds :: TcEvBinds , -- | Extra wrapper to pass to the matcher -- Only relevant for pattern-synonyms; -- ignored for data cons cpt_wrap :: HsWrapper } hsRecFieldId :: HsRecField GhcTc arg -> Id hsRecFieldId = hsRecFieldSel {- ************************************************************************ * * * Printing patterns * * ************************************************************************ -} instance Outputable (HsTyPat p) => Outputable (HsConPatTyArg p) where ppr (HsConPatTyArg _ ty) = char '@' <> ppr ty instance (Outputable arg, Outputable (XRec p (HsRecField p arg)), XRec p RecFieldsDotDot ~ LocatedE RecFieldsDotDot) => Outputable (HsRecFields p arg) where ppr (HsRecFields { rec_flds = flds, rec_dotdot = Nothing }) = braces (fsep (punctuate comma (map ppr flds))) ppr (HsRecFields { rec_flds = flds, rec_dotdot = Just (unLoc -> RecFieldsDotDot n) }) = braces (fsep (punctuate comma (map ppr (take n flds) ++ [dotdot]))) where dotdot = text ".." <+> whenPprDebug (ppr (drop n flds)) instance (Outputable p, OutputableBndr p, Outputable arg) => Outputable (HsFieldBind p arg) where ppr (HsFieldBind { hfbLHS = f, hfbRHS = arg, hfbPun = pun }) = pprPrefixOcc f <+> (ppUnless pun $ equals <+> ppr arg) instance OutputableBndrId p => Outputable (Pat (GhcPass p)) where ppr = pprPat -- See Note [Rebindable syntax and XXExprGhcRn]. instance (Outputable a, Outputable b) => Outputable (HsPatExpansion a b) where ppr (HsPatExpanded a b) = ifPprDebug (vcat [ppr a, ppr b]) (ppr a) pprLPat :: (OutputableBndrId p) => LPat (GhcPass p) -> SDoc pprLPat (L _ e) = pprPat e -- | Print with type info if -dppr-debug is on pprPatBndr :: OutputableBndr name => name -> SDoc pprPatBndr var = getPprDebug $ \case True -> parens (pprBndr LambdaBind var) -- Could pass the site to pprPat -- but is it worth it? False -> pprPrefixOcc var pprParendLPat :: (OutputableBndrId p) => PprPrec -> LPat (GhcPass p) -> SDoc pprParendLPat p = pprParendPat p . unLoc pprParendPat :: forall p. OutputableBndrId p => PprPrec -> Pat (GhcPass p) -> SDoc pprParendPat p pat = sdocOption sdocPrintTypecheckerElaboration $ \ print_tc_elab -> if need_parens print_tc_elab pat then parens (pprPat pat) else pprPat pat where need_parens print_tc_elab pat | GhcTc <- ghcPass @p , XPat (CoPat {}) <- pat = print_tc_elab | otherwise = patNeedsParens p pat -- For a CoPat we need parens if we are going to show it, which -- we do if -fprint-typechecker-elaboration is on (c.f. pprHsWrapper) -- But otherwise the CoPat is discarded, so it -- is the pattern inside that matters. Sigh. pprPat :: forall p. (OutputableBndrId p) => Pat (GhcPass p) -> SDoc pprPat (VarPat _ lvar) = pprPatBndr (unLoc lvar) pprPat (WildPat _) = char '_' pprPat (LazyPat _ pat) = char '~' <> pprParendLPat appPrec pat pprPat (BangPat _ pat) = char '!' <> pprParendLPat appPrec pat pprPat (AsPat _ name pat) = hcat [pprPrefixOcc (unLoc name), char '@', pprParendLPat appPrec pat] pprPat (ViewPat _ expr pat) = hcat [pprLExpr expr, text " -> ", ppr pat] pprPat (ParPat _ pat) = parens (ppr pat) pprPat (LitPat _ s) = ppr s pprPat (NPat _ l Nothing _) = ppr l pprPat (NPat _ l (Just _) _) = char '-' <> ppr l pprPat (NPlusKPat _ n k _ _ _) = hcat [ppr_n, char '+', ppr k] where ppr_n = case ghcPass @p of GhcPs -> ppr n GhcRn -> ppr n GhcTc -> ppr n pprPat (SplicePat ext splice) = case ghcPass @p of GhcPs -> pprUntypedSplice True Nothing splice GhcRn | HsUntypedSpliceNested n <- ext -> pprUntypedSplice True (Just n) splice GhcRn | HsUntypedSpliceTop _ p <- ext -> ppr p GhcTc -> dataConCantHappen ext pprPat (SigPat _ pat ty) = ppr pat <+> dcolon <+> ppr ty pprPat (ListPat _ pats) = brackets (interpp'SP pats) pprPat (OrPat _ pats) = pprWithSemis ppr (NE.toList pats) pprPat (TuplePat _ pats bx) -- Special-case unary boxed tuples so that they are pretty-printed as -- `MkSolo x`, not `(x)` | [pat] <- pats , Boxed <- bx = hcat [text (mkTupleStr Boxed dataName 1), pprParendLPat appPrec pat] | otherwise = tupleParens (boxityTupleSort bx) (pprWithCommas ppr pats) pprPat (SumPat _ pat alt arity) = sumParens (pprAlternative ppr pat alt arity) pprPat (ConPat { pat_con = con , pat_args = details , pat_con_ext = ext } ) = case ghcPass @p of GhcPs -> pprUserCon (unLoc con) details GhcRn -> pprUserCon (unLoc con) details GhcTc -> sdocOption sdocPrintTypecheckerElaboration $ \case False -> pprUserCon (unLoc con) details True -> -- Tiresome; in 'GHC.Tc.Gen.Bind.tcRhs' we print out a typechecked Pat in an -- error message, and we want to make sure it prints nicely ppr con <> braces (sep [ hsep (map pprPatBndr (tvs ++ dicts)) , ppr binds ]) <+> pprConArgs details where ConPatTc { cpt_tvs = tvs , cpt_dicts = dicts , cpt_binds = binds } = ext pprPat (EmbTyPat _ tp) = text "type" <+> ppr tp pprPat (InvisPat x tp) = char '@' <> delimit (ppr tp) where delimit | inferred = braces | needs_parens = parens | otherwise = id inferred = case ghcPass @p of GhcPs -> snd x == InferredSpec GhcRn -> x == InferredSpec GhcTc -> False needs_parens = hsTypeNeedsParens appPrec $ unLoc $ hstp_body tp pprPat (XPat ext) = case ghcPass @p of GhcRn -> case ext of HsPatExpanded orig _ -> pprPat orig GhcTc -> case ext of CoPat co pat _ -> pprHsWrapper co $ \parens -> if parens then pprParendPat appPrec pat else pprPat pat ExpansionPat orig _ -> pprPat orig pprUserCon :: (OutputableBndr con, OutputableBndrId p, Outputable (Anno (IdGhcP p))) => con -> HsConPatDetails (GhcPass p) -> SDoc pprUserCon c (InfixCon p1 p2) = ppr p1 <+> pprInfixOcc c <+> ppr p2 pprUserCon c details = pprPrefixOcc c <+> pprConArgs details pprConArgs :: (OutputableBndrId p, Outputable (Anno (IdGhcP p))) => HsConPatDetails (GhcPass p) -> SDoc pprConArgs (PrefixCon ts pats) = fsep (pprTyArgs ts : map (pprParendLPat appPrec) pats) where pprTyArgs tyargs = fsep (map ppr tyargs) pprConArgs (InfixCon p1 p2) = sep [ pprParendLPat appPrec p1 , pprParendLPat appPrec p2 ] pprConArgs (RecCon rpats) = ppr rpats {- ************************************************************************ * * * Building patterns * * ************************************************************************ -} mkPrefixConPat :: DataCon -> [LPat GhcTc] -> [Type] -> LPat GhcTc -- Make a vanilla Prefix constructor pattern mkPrefixConPat dc pats tys = noLocA $ ConPat { pat_con = noLocA (RealDataCon dc) , pat_args = PrefixCon [] pats , pat_con_ext = ConPatTc { cpt_tvs = [] , cpt_dicts = [] , cpt_binds = emptyTcEvBinds , cpt_arg_tys = tys , cpt_wrap = idHsWrapper } } mkNilPat :: Type -> LPat GhcTc mkNilPat ty = mkPrefixConPat nilDataCon [] [ty] mkCharLitPat :: SourceText -> Char -> LPat GhcTc mkCharLitPat src c = mkPrefixConPat charDataCon [noLocA $ LitPat noExtField (HsCharPrim src c)] [] {- ************************************************************************ * * * Predicates for checking things about pattern-lists in EquationInfo * * * ************************************************************************ \subsection[Pat-list-predicates]{Look for interesting things in patterns} Unlike in the Wadler chapter, where patterns are either ``variables'' or ``constructors,'' here we distinguish between: \begin{description} \item[unfailable:] Patterns that cannot fail to match: variables, wildcards, and lazy patterns. These are the irrefutable patterns; the two other categories are refutable patterns. \item[constructor:] A non-literal constructor pattern (see next category). \item[literal patterns:] At least the numeric ones may be overloaded. \end{description} A pattern is in {\em exactly one} of the above three categories; `as' patterns are treated specially, of course. The 1.3 report defines what ``irrefutable'' and ``failure-free'' patterns are. -} isBangedLPat :: LPat (GhcPass p) -> Bool isBangedLPat = isBangedPat . unLoc isBangedPat :: Pat (GhcPass p) -> Bool isBangedPat (ParPat _ p) = isBangedLPat p isBangedPat (BangPat {}) = True isBangedPat _ = False looksLazyPatBind :: HsBind GhcTc -> Bool -- Returns True of anything *except* -- a StrictHsBind (as above) or -- a VarPat -- In particular, returns True of a pattern binding with a compound pattern, like (I# x) -- Looks through AbsBinds looksLazyPatBind (PatBind { pat_lhs = p }) = looksLazyLPat p looksLazyPatBind (XHsBindsLR (AbsBinds { abs_binds = binds })) = any (looksLazyPatBind . unLoc) binds looksLazyPatBind _ = False looksLazyLPat :: LPat (GhcPass p) -> Bool looksLazyLPat = looksLazyPat . unLoc looksLazyPat :: Pat (GhcPass p) -> Bool looksLazyPat (ParPat _ p) = looksLazyLPat p looksLazyPat (AsPat _ _ p) = looksLazyLPat p looksLazyPat (BangPat {}) = False looksLazyPat (VarPat {}) = False looksLazyPat (WildPat {}) = False looksLazyPat _ = True {- Note [-XStrict and irrefutability] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When -XStrict is enabled the rules for irrefutability are slightly modified. Specifically, the pattern in a program like do ~(Just hi) <- expr cannot be considered irrefutable. The ~ here merely disables the bang that -XStrict would usually apply, rendering the program equivalent to the following without -XStrict do Just hi <- expr To achieve make this pattern irrefutable with -XStrict the user would rather need to write do ~(~(Just hi)) <- expr Failing to account for this resulted in #19027. To fix this isIrrefutableHsPat takes care to check for two the irrefutability of the inner pattern when it encounters a LazyPat and -XStrict is enabled. See also Note [decideBangHood] in GHC.HsToCore.Utils. -} -- | @isIrrefutableHsPat p@ is true if matching against @p@ cannot fail -- in the sense of falling through to the next pattern. -- (NB: this is not quite the same as the (silly) defn -- in 3.17.2 of the Haskell 98 report.) -- -- If isIrrefutableHsPat returns 'True', the pattern is definitely irrefutable. -- -- However, isIrrefutableHsPat returns 'False' if it's in doubt. It's a -- best effort guess with the information we have available: -- -- - we sometimes call 'isIrrefutableHsPat' from the renamer, in which case -- we don't have type information to hand. This means we can't properly -- handle GADTs, nor the result TyCon of COMPLETE pragmas. -- - even when calling 'isIrrefutableHsPat' in the typechecker, we don't keep -- track of any long distance information like the pattern-match checker does. isIrrefutableHsPat :: forall p . IsPass p => Bool -- ^ Are we in a @-XStrict@ context? -- See Note [-XStrict and irrefutability] -> (ConLikeP (GhcPass p) -> Bool) -- ^ How to check whether the 'ConLike' in a -- 'ConPat' pattern is irrefutable -> LPat (GhcPass p) -- ^ The (located) pattern to check -> Bool -- Is it irrefutable? isIrrefutableHsPat is_strict irref_conLike pat = go (unLoc pat) where goL (L _ p) = go p go :: Pat (GhcPass p) -> Bool go (WildPat {}) = True go (VarPat {}) = True go (LazyPat _ p') | is_strict = isIrrefutableHsPat False irref_conLike p' | otherwise = True go (BangPat _ pat) = goL pat go (ParPat _ pat) = goL pat go (AsPat _ _ pat) = goL pat go (ViewPat _ _ pat) = goL pat go (SigPat _ pat _) = goL pat go (TuplePat _ pats _) = all goL pats go (OrPat _ pats) = any goL pats -- This is simplistic; see Note [Irrefutable or-patterns] go (SumPat {}) = False -- See Note [Unboxed sum patterns aren't irrefutable] go (ListPat {}) = False -- See Note [Irrefutability of ConPat] go (ConPat { pat_con = L _ con, pat_args = details }) = irref_conLike con && all goL (hsConPatArgs details) go (LitPat {}) = False go (NPat {}) = False go (NPlusKPat {}) = False -- We conservatively assume that no TH splices are irrefutable -- since we cannot know until the splice is evaluated. go (SplicePat {}) = False -- The behavior of this case is unimportant, as GHC will throw an error shortly -- after reaching this case for other reasons (see TcRnIllegalTypePattern). go (EmbTyPat {}) = True go (InvisPat {}) = True go (XPat ext) = case ghcPass @p of GhcRn -> case ext of HsPatExpanded _ pat -> go pat GhcTc -> case ext of CoPat _ pat _ -> go pat ExpansionPat _ pat -> go pat -- | Is the pattern any of combination of: -- -- - (pat) -- - pat :: Type -- - ~pat -- - !pat -- - x (variable) isSimplePat :: LPat (GhcPass x) -> Maybe (IdP (GhcPass x)) isSimplePat p = case unLoc p of ParPat _ x -> isSimplePat x SigPat _ x _ -> isSimplePat x LazyPat _ x -> isSimplePat x BangPat _ x -> isSimplePat x VarPat _ x -> Just (unLoc x) _ -> Nothing -- | Is this pattern boring from the perspective of pattern-match checking, -- i.e. introduces no new pieces of long-distance information -- which could influence pattern-match checking? -- -- See Note [Boring patterns]. isBoringHsPat :: forall p. OutputableBndrId p => LPat (GhcPass p) -> Bool -- NB: it's always safe to return 'False' in this function; that just means -- performing potentially-redundant pattern-match checking. isBoringHsPat = goL where goL :: forall p. OutputableBndrId p => LPat (GhcPass p) -> Bool goL = go . unLoc go :: forall p. OutputableBndrId p => Pat (GhcPass p) -> Bool go = \case WildPat {} -> True VarPat {} -> True LazyPat {} -> True BangPat _ pat -> goL pat ParPat _ pat -> goL pat AsPat {} -> False -- the pattern x@y links x and y together, -- which is a nontrivial piece of information ViewPat _ _ pat -> goL pat SigPat _ pat _ -> goL pat TuplePat _ pats _ -> all goL pats SumPat _ pat _ _ -> goL pat ListPat _ pats -> all goL pats ConPat { pat_con = con, pat_args = details } -> case ghcPass @p of GhcPs -> False -- conservative GhcRn -> False -- conservative GhcTc | isVanillaConLike (unLoc con) -> all goL (hsConPatArgs details) | otherwise -- A pattern match on a GADT constructor can introduce -- type-level information (for example, T18572). -> False OrPat _ pats -> all goL pats LitPat {} -> True NPat {} -> True NPlusKPat {} -> True SplicePat {} -> False EmbTyPat {} -> True InvisPat {} -> True XPat ext -> case ghcPass @p of GhcRn -> case ext of HsPatExpanded _ pat -> go pat GhcTc -> case ext of CoPat _ pat _ -> go pat ExpansionPat _ pat -> go pat isPatSyn :: LPat GhcTc -> Bool isPatSyn (L _ (ConPat {pat_con = L _ (PatSynCon{})})) = True isPatSyn _ = False {- Note [Irrefutability of ConPat] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A constructor pattern `ConPat { pat_con, pat_args }` is irrefutable under two conditions: Irref-ConLike: the constructor, pat_con, is itself irrefutable. Irref-args : all of the argument patterns, pat_args, are irrefutable. The (Irref-ConLike) condition can be stated as follows: Irref-DataCon: a DataCon is irrefutable iff it is the only constructor of its parent type constructor. Irref-PatSyn: a PatSyn is irrefutable iff there is a COMPLETE pragma containing this PatSyn as its sole member. To understand this, let's consider some simple examples: data A = MkA Int Bool data BC = B Int | C pattern P :: Maybe Int -> BC pattern P mb_i <- ( ( \ case { B i -> Just i; C -> Nothing } ) -> mb_i ) {-# COMPLETE P #-} In this case: - the pattern 'A p1 p2' (for patterns 'p1 :: Int', 'p2 :: Bool') is irrefutable precisely when both 'p1' and 'p2' are irrefutable (this is the same as irrefutability of tuple patterns); - neither of the patterns 'B p' (for any pattern 'p :: Int') or 'C' are irrefutable, because the parent type constructor 'BC' contains more than one data constructor, - the pattern 'P q', for a pattern 'q :: Maybe Int', is irrefutable precisely when 'q' is irrefutable, due to the COMPLETE pragma on 'P'. Wrinkle [Irrefutability and COMPLETE pragma result TyCons] There is one subtlety in the Irref-PatSyn condition: COMPLETE pragmas may optionally specify a result TyCon, as explained in Note [Implementation of COMPLETE pragmas] in GHC.HsToCore.Pmc.Solver. So, for a COMPLETE pragma with a result TyCon, we would need to compute 'completeMatchAppliesAtType' to ensure that the COMPLETE pragma is indeed applicable. Doing so is not so straightforward in 'isIrrefutableHsPat', for a couple of reasons: 1. 'isIrrefutableHsPat' is called from within the renamer, which means we don't have the appropriate 'Type' to hand, 2. Even when 'isIrrefutableHsPat' is called from within the typechecker, computing 'completeMatchAppliesAtType' for a 'ConPat' which might be nested deep inside the top-level call, such as ( ( _ , P (x :: Int) ) :: ( Int, Int ) would require keeping track of types as we recur in 'isIrrefutableHsPat', which would be much more involved and require duplicating code from the pattern match checker (it performs this check using the notion of "match variables", which we don't have in the typechecker). Note [Irrefutable or-patterns] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When is an or-pattern ( p_1 ; ... ; p_n ) irrefutable? It certainly suffices that individual pattern p_i is irrefutable, but it isn't necessary. For example, with the datatype definition data ABC = A | B | C the or-pattern ( B ; C ; A ) is irrefutable. Similarly, one can take into account COMPLETE pragmas, e.g. (P ; R ; Q) is irrefutable in the presence of {-# COMPLETE P, Q, R #-}. This would extend Note [Irrefutability of ConPat] to the case of disjunctions of constructor patterns. For now, the function 'isIrrefutableHsPat' does not take into account these additional complications, and considers an or-pattern irrefutable precisely when any of the summands are irrefutable. This pessimistic behaviour is OK: the contract of 'isIrrefutableHsPat' is that it can only return 'True' for definitely irrefutable patterns, but may conservatively return 'False' in other cases. The justification for this design choice is as follows: 1. Producing the correct answer in all cases would be rather difficult, for example for a complex pattern such as ( P ; !( R ; S ; ( Q :: Ty ) ) ). 2. Irrefutable or-patterns aren't particularly common or useful, given that (currently) or-patterns aren't allowed to bind variables. Note [Unboxed sum patterns aren't irrefutable] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Unlike unboxed tuples, unboxed sums are *not* irrefutable when used as patterns. A simple example that demonstrates this is from #14228: pattern Just' x = (# x | #) pattern Nothing' = (# | () #) foo x = case x of Nothing' -> putStrLn "nothing" Just' -> putStrLn "just" In foo, the pattern Nothing' (that is, (# x | #)) is certainly not irrefutable, as does not match an unboxed sum value of the same arity—namely, (# | y #) (covered by Just'). In fact, no unboxed sum pattern is irrefutable, since the minimum unboxed sum arity is 2. Failing to mark unboxed sum patterns as non-irrefutable would cause the Just' case in foo to be unreachable, as GHC would mistakenly believe that Nothing' is the only thing that could possibly be matched! Note [Boring patterns] ~~~~~~~~~~~~~~~~~~~~~~ A pattern is called boring when no new information is gained upon successfully matching on the pattern. Some examples of boring patterns: - x, for a variable x. We learn nothing about x upon matching this pattern. - Just y. This pattern can fail, but if it matches, we don't learn anything about y. Some examples of non-boring patterns: - x@(Just y). A match on this pattern introduces the fact that x is headed by the constructor Just, which means that a subsequent pattern match such as case x of { Just z -> ... } should not be marked as incomplete. - a@b. Matching on this pattern introduces a relation between 'a' and 'b', which means that we shouldn't emit any warnings in code of the form case a of True -> case b of { True -> .. } -- no warning here! False -> ... - GADT patterns. For example, with the GADT data G i where { MkGInt :: G Int } a match on the pattern 'MkGInt' introduces type-level information: foo :: G i -> i foo MkGInt = 3 Here we learn that i ~ Int after matching on 'MkGInt', so this pattern is not boring. When a pattern is boring, and we are only interested in additional long-distance information (not whether the pattern itself is fallible), we can skip pattern-match checking entirely. Doing this saves about 10% allocations in test T11195. This happens when we are checking pattern-matches in do-notation, for example: do { x@(Just y) <- z ; ... ; return $ case x of { Just w -> ... } } Here we *do not* want to emit a pattern-match warning on the first line for the incomplete pattern-match, as incompleteness inside do-notation is handled using MonadFail. However, we still want to propagate the fact that x is headed by the 'Just' constructor, to avoid a pattern-match warning on the last line. Note [Implementation of OrPatterns] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This Note describes the implementation of the extension -XOrPatterns. * Proposal: https://github.com/ghc-proposals/ghc-proposals/blob/master/proposals/0522-or-patterns.rst * Discussion: https://github.com/ghc-proposals/ghc-proposals/pull/522 and others Parser ------ We parse an or-pattern `pat_1; ...; pat_k` into `OrPat [pat_1, ..., pat_k]`, where `OrPat` is a constructor of `Pat` in Language.Haskell.Syntax.Pat. We occasionally refer to any of the `pat_k` as "pattern alternatives" below. The changes to the parser are as outlined in Section 8.1 of the proposal. The main productions are orpats -> exp | exp ';' orpats aexp2 -> '(' orpats ')' pat -> orpats Renamer and typechecker ----------------------- The typing rule for or-patterns in terms of pattern types is Γ0, Σ0 ⊢ pat_i : τ ⤳ Γ0,Σi,Ψi -------------------------------------------- Γ0, Σ0 ⊢ ( pat_1; ...; pat_n ) : τ ⤳ Γ0,Σ0,∅ (See the proposal for what a pattern type `Γ, Σ ⊢ pat : τ ⤳ Γ,Σ,Ψ` is.) The main points * None of the patterns may bind any variables, hence the same Γ0 in both input and output. * Any Given constraints bound by the pattern are discarded: the rule discards the Σi returned by each pattern. * Similarly any existentials Ψi bound by the pattern are discarded. In GHC.Rename.Pat.rnPatAndThen, we reject visible term and type binders (i.e. concerning Γ0). Regarding the Givens Σi and existenials Ψi (i.e. invisible type binders) introduced by the pattern alternatives `pat_i`, we discard them in GHC.Tc.Gen.Pats.tc_pat in a manner similar to LazyPats; see Note [Hopping the LIE in lazy patterns]. Why is it useful to allow Σi and Ψi only to discard them immediately after? Consider data T a where MkT :: forall a x. Num a => x -> T a foo :: T a -> a foo (MkT{}; MkT{}) = 3 We do want to allow matching on MkT{} in or-patterns, despite them invisibly binding an existential type variable `x` and a new Given constraint `Num a`. Clearly, `x` must be dead in the RHS of foo, because there is no field binder that brings it to life, so no harm done. But we must be careful not to solve the `Num a` Wanted constraint in the RHS of foo with the Given constraint from the pattern alternatives, hence we are Hopping the LIE. Desugarer --------- The desugaring of or-patterns is complicated by the fact that we have to avoid exponential code blowup. Consider f (LT; GT) (EQ; GT) = rhs1 f _ _ = rhs2 The naïve desugaring of or-patterns would explode every or-pattern, thus f LT EQ = rhs1 f LT GT = rhs1 f GT EQ = rhs1 f GT GT = rhs1 f _ _ = rhs2 which leads to an exponential number of copies of `rhs1`. Our current strategy, implemented in GHC.HsToCore.Match.tidy1, is to desugar to LambdaCase and ViewPatterns, f ((\case LT -> True; GT -> True; _ -> False) -> True) ((\case EQ -> True; GT -> True; _ -> False) -> True) = rhs1 f _ _ = rhs2 The existing code for ViewPatterns makes sure that we do not duplicate `rhs1` and the Simplifier will take care to turn this into efficient code. Pattern-match checker --------------------- The changes to the pattern-match checker are described in detail in Section 4.9 of the 2024 revision of the "Lower Your Guards" paper. What follows is a brief summary of that change. The pattern-match checker desugars patterns as well, into syntactic variants of *guard trees* such as `PmMatch`, describing a single Match `f ps | grhss`. It used to be that each such guard trees nicely captured the effects of pattern matching `ps` in a conjunctive list of `PmGrd`s, each of which refines the set of Nablas that reach the RHS of the clause. `PmGrd` is the heart of the Lower Your Guards approach: it is compositional, simple, and *non-recursive*, unlike or-patterns! Conjunction is implemented with the `...Pmc.Check.leftToRight` combinator. But to desugar or-patterns, we need to compose with `Pmc.Check.topToBottom` to model first match semantics! This was previously impossible in the pattern fragment, and indeed is incompatible with the simple "list of `PmGrd`s" desugaring of patterns. So our solution is to generalise "sequence of `PmGrd`" into a series-parallel graph `GrdDag`, a special kind of DAG, where "series" corresponds to left-to-right sequence and "parallel" corresponds to top-to-bottom or-pattern alternatives. Example f (LT; GT) True (EQ; GT) = rhs desugars to /- LT <- x -\ /- EQ <- z -\ . . True <- y . .-> rhs \- GT <- x ./ \- GT <- z -/ Branching is GdAlt and models first-match semantics of or-patterns, and sequencing is GdSeq. We must take care of exponential explosion of Covered sets for long matches like g (LT; GT) (LT; GT) ... True = 1 Fortunately, we can build on our existing throttling mechanism; see Note [Countering exponential blowup] in GHC.HsToCore.Pmc.Check. -} -- | @'patNeedsParens' p pat@ returns 'True' if the pattern @pat@ needs -- parentheses under precedence @p@. patNeedsParens :: forall p. IsPass p => PprPrec -> Pat (GhcPass p) -> Bool patNeedsParens p = go @p where -- Remark: go needs to be polymorphic, as we call it recursively -- at a different GhcPass (see the case for GhcTc XPat below). go :: forall q. IsPass q => Pat (GhcPass q) -> Bool go (NPlusKPat {}) = p > opPrec go (OrPat {}) = p > topPrec go (SplicePat {}) = False go (ConPat { pat_args = ds }) = conPatNeedsParens p ds go (SigPat {}) = p >= sigPrec go (ViewPat {}) = True go (EmbTyPat {}) = True go (InvisPat{}) = False go (XPat ext) = case ghcPass @q of GhcRn -> case ext of HsPatExpanded orig _ -> go orig GhcTc -> case ext of CoPat _ inner _ -> go inner ExpansionPat orig _ -> go orig -- ^^^^^^^ -- NB: recursive call of go at a different GhcPass. go (WildPat {}) = False go (VarPat {}) = False go (LazyPat {}) = False go (BangPat {}) = False go (ParPat {}) = False go (AsPat {}) = False -- Special-case unary boxed tuple applications so that they are -- parenthesized as `Identity (Solo x)`, not `Identity Solo x` (#18612) -- See Note [One-tuples] in GHC.Builtin.Types go (TuplePat _ [_] Boxed) = p >= appPrec go (TuplePat{}) = False go (SumPat {}) = False go (ListPat {}) = False go (LitPat _ l) = hsLitNeedsParens p l go (NPat _ lol _ _) = hsOverLitNeedsParens p (unLoc lol) -- | @'conPatNeedsParens' p cp@ returns 'True' if the constructor patterns @cp@ -- needs parentheses under precedence @p@. conPatNeedsParens :: PprPrec -> HsConDetails t a b -> Bool conPatNeedsParens p = go where go (PrefixCon ts args) = p >= appPrec && (not (null args) || not (null ts)) go (InfixCon {}) = p >= opPrec -- type args should be empty in this case go (RecCon {}) = False -- | Parenthesize a pattern without token information gParPat :: forall p. IsPass p => LPat (GhcPass p) -> Pat (GhcPass p) gParPat pat = ParPat x pat where x = case ghcPass @p of GhcPs -> noAnn GhcRn -> noExtField GhcTc -> noExtField -- | @'parenthesizePat' p pat@ checks if @'patNeedsParens' p pat@ is true, and -- if so, surrounds @pat@ with a 'ParPat'. Otherwise, it simply returns @pat@. parenthesizePat :: IsPass p => PprPrec -> LPat (GhcPass p) -> LPat (GhcPass p) parenthesizePat p lpat@(L loc pat) | patNeedsParens p pat = L loc (gParPat lpat) | otherwise = lpat {- % Collect all EvVars from all constructor patterns -} -- May need to add more cases collectEvVarsPats :: [Pat GhcTc] -> Bag EvVar collectEvVarsPats = unionManyBags . map collectEvVarsPat collectEvVarsLPat :: LPat GhcTc -> Bag EvVar collectEvVarsLPat = collectEvVarsPat . unLoc collectEvVarsPat :: Pat GhcTc -> Bag EvVar collectEvVarsPat pat = case pat of LazyPat _ p -> collectEvVarsLPat p AsPat _ _ p -> collectEvVarsLPat p ParPat _ p -> collectEvVarsLPat p BangPat _ p -> collectEvVarsLPat p ListPat _ ps -> unionManyBags $ map collectEvVarsLPat ps TuplePat _ ps _ -> unionManyBags $ map collectEvVarsLPat ps OrPat _ ps -> unionManyBags $ map collectEvVarsLPat (NE.toList ps) SumPat _ p _ _ -> collectEvVarsLPat p ConPat { pat_args = args , pat_con_ext = ConPatTc { cpt_dicts = dicts } } -> unionBags (listToBag dicts) $ unionManyBags $ map collectEvVarsLPat $ hsConPatArgs args SigPat _ p _ -> collectEvVarsLPat p XPat ext -> case ext of CoPat _ p _ -> collectEvVarsPat p ExpansionPat _ p -> collectEvVarsPat p _other_pat -> emptyBag {- ************************************************************************ * * \subsection{Anno instances} * * ************************************************************************ -} type instance Anno (Pat (GhcPass p)) = SrcSpanAnnA type instance Anno (HsOverLit (GhcPass p)) = EpAnnCO type instance Anno ConLike = SrcSpanAnnN type instance Anno (HsFieldBind lhs rhs) = SrcSpanAnnA type instance Anno RecFieldsDotDot = EpaLocation ghc-lib-parser-9.12.2.20250421/compiler/GHC/Hs/Pat.hs-boot0000644000000000000000000000104107346545000020275 0ustar0000000000000000{-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE UndecidableInstances #-} -- Wrinkle in Note [Trees That Grow] -- in module Language.Haskell.Syntax.Extension {-# OPTIONS_GHC -Wno-orphans #-} -- Outputable module GHC.Hs.Pat where import GHC.Utils.Outputable import GHC.Hs.Extension ( OutputableBndrId, GhcPass ) import Language.Haskell.Syntax.Pat instance (OutputableBndrId p) => Outputable (Pat (GhcPass p)) pprLPat :: (OutputableBndrId p) => LPat (GhcPass p) -> SDoc ghc-lib-parser-9.12.2.20250421/compiler/GHC/Hs/Specificity.hs0000644000000000000000000000250407346545000021070 0ustar0000000000000000{-# OPTIONS_GHC -Wno-orphans #-} module GHC.Hs.Specificity where import Prelude import Control.DeepSeq (NFData(..)) import GHC.Utils.Outputable import GHC.Utils.Binary import Language.Haskell.Syntax.Specificity {- ********************************************************************* * * * ForAllTyFlag * * ********************************************************************* -} instance Outputable ForAllTyFlag where ppr Required = text "[req]" ppr Specified = text "[spec]" ppr Inferred = text "[infrd]" instance Binary Specificity where put_ bh SpecifiedSpec = putByte bh 0 put_ bh InferredSpec = putByte bh 1 get bh = do h <- getByte bh case h of 0 -> return SpecifiedSpec _ -> return InferredSpec instance Binary ForAllTyFlag where put_ bh Required = putByte bh 0 put_ bh Specified = putByte bh 1 put_ bh Inferred = putByte bh 2 get bh = do h <- getByte bh case h of 0 -> return Required 1 -> return Specified _ -> return Inferred instance NFData Specificity where rnf SpecifiedSpec = () rnf InferredSpec = () instance NFData ForAllTyFlag where rnf (Invisible spec) = rnf spec rnf Required = () ghc-lib-parser-9.12.2.20250421/compiler/GHC/Hs/Type.hs0000644000000000000000000017714307346545000017552 0ustar0000000000000000 {-# LANGUAGE ConstraintKinds #-} {-# LANGUAGE DataKinds #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE MultiParamTypeClasses #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE TypeApplications #-} {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE ViewPatterns #-} {-# LANGUAGE UndecidableInstances #-} -- Wrinkle in Note [Trees That Grow] -- in module Language.Haskell.Syntax.Extension {-# OPTIONS_GHC -Wno-orphans #-} -- NamedThing, Outputable, OutputableBndrId {- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 GHC.Hs.Type: Abstract syntax: user-defined types -} module GHC.Hs.Type ( Mult, HsScaled(..), hsMult, hsScaledThing, HsArrow, HsArrowOf(..), arrowToHsType, expandHsArrow, EpLinearArrow(..), hsLinear, hsUnrestricted, isUnrestricted, pprHsArrow, HsType(..), HsCoreTy, LHsType, HsKind, LHsKind, HsForAllTelescope(..), EpAnnForallVis, EpAnnForallInvis, HsTyVarBndr(..), LHsTyVarBndr, AnnTyVarBndr(..), HsBndrKind(..), HsBndrVar(..), HsBndrVis(..), isHsBndrInvisible, LHsQTyVars(..), HsOuterTyVarBndrs(..), HsOuterFamEqnTyVarBndrs, HsOuterSigTyVarBndrs, HsWildCardBndrs(..), HsPatSigType(..), HsPSRn(..), HsTyPat(..), HsTyPatRn(..), HsTyPatRnBuilder(..), tpBuilderExplicitTV, tpBuilderPatSig, buildHsTyPatRn, builderFromHsTyPatRn, HsSigType(..), LHsSigType, LHsSigWcType, LHsWcType, HsTupleSort(..), HsContext, LHsContext, fromMaybeContext, HsTyLit(..), HsIPName(..), hsIPNameFS, HsArg(..), numVisibleArgs, pprHsArgsApp, LHsTypeArg, lhsTypeArgSrcSpan, OutputableBndrFlag, LBangType, BangType, HsSrcBang(..), HsImplBang(..), SrcStrictness(..), SrcUnpackedness(..), getBangType, getBangStrictness, ConDeclField(..), LConDeclField, pprConDeclFields, HsConDetails(..), noTypeArgs, FieldOcc(..), LFieldOcc, mkFieldOcc, fieldOccRdrName, fieldOccLRdrName, OpName(..), mkAnonWildCardTy, pprAnonWildCard, hsOuterTyVarNames, hsOuterExplicitBndrs, mapHsOuterImplicit, mkHsOuterImplicit, mkHsOuterExplicit, mkHsImplicitSigType, mkHsExplicitSigType, mkHsWildCardBndrs, mkHsPatSigType, mkHsTyPat, mkEmptyWildCardBndrs, mkHsForAllVisTele, mkHsForAllInvisTele, mkHsQTvs, hsQTvExplicit, emptyLHsQTvs, isHsKindedTyVar, hsBndrVar, hsBndrKind, hsTvbAllKinded, hsScopedTvs, hsScopedKvs, hsWcScopedTvs, dropWildCards, hsTyVarLName, hsTyVarName, hsAllLTyVarNames, hsLTyVarLocNames, hsLTyVarName, hsLTyVarNames, hsForAllTelescopeNames, hsLTyVarLocName, hsExplicitLTyVarNames, splitLHsInstDeclTy, getLHsInstDeclHead, getLHsInstDeclClass_maybe, splitLHsPatSynTy, splitLHsForAllTyInvis, splitLHsForAllTyInvis_KP, splitLHsQualTy, splitLHsSigmaTyInvis, splitLHsGadtTy, splitHsFunType, hsTyGetAppHead_maybe, mkHsOpTy, mkHsAppTy, mkHsAppTys, mkHsAppKindTy, ignoreParens, hsSigWcType, hsPatSigType, hsTyKindSig, setHsTyVarBndrFlag, hsTyVarBndrFlag, updateHsTyVarBndrFlag, -- Printing pprHsType, pprHsForAll, pprHsOuterFamEqnTyVarBndrs, pprHsOuterSigTyVarBndrs, pprLHsContext, hsTypeNeedsParens, parenthesizeHsType, parenthesizeHsContext ) where import GHC.Prelude import Language.Haskell.Syntax.Type import {-# SOURCE #-} GHC.Hs.Expr ( pprUntypedSplice, HsUntypedSpliceResult(..) ) import Language.Haskell.Syntax.Extension import GHC.Core.DataCon ( SrcStrictness(..), SrcUnpackedness(..) , HsSrcBang(..), HsImplBang(..) , mkHsSrcBang ) import GHC.Hs.Extension import GHC.Parser.Annotation import GHC.Types.Fixity ( LexicalFixity(..) ) import GHC.Types.SourceText import GHC.Types.Name import GHC.Types.Name.Reader ( RdrName ) import GHC.Types.Var ( VarBndr, visArgTypeLike ) import GHC.Core.TyCo.Rep ( Type(..) ) import GHC.Builtin.Names ( negateName ) import GHC.Builtin.Types( manyDataConName, oneDataConName, mkTupleStr ) import GHC.Core.Ppr ( pprOccWithTick) import GHC.Core.Type import GHC.Core.Multiplicity( pprArrowWithMultiplicity ) import GHC.Hs.Doc import GHC.Types.Basic import GHC.Types.SrcLoc import GHC.Utils.Outputable import GHC.Utils.Misc (count) import Data.Maybe import Data.Data (Data) import qualified Data.Semigroup as S import GHC.Data.Bag {- ************************************************************************ * * \subsection{Bang annotations} * * ************************************************************************ -} getBangType :: LHsType (GhcPass p) -> LHsType (GhcPass p) getBangType (L _ (HsBangTy _ _ lty)) = lty getBangType (L _ (HsDocTy x (L _ (HsBangTy _ _ lty)) lds)) = addCLocA lty lds (HsDocTy x lty lds) getBangType lty = lty getBangStrictness :: LHsType (GhcPass p) -> HsSrcBang getBangStrictness (L _ (HsBangTy (_, s) b _)) = HsSrcBang s b getBangStrictness (L _ (HsDocTy _ (L _ (HsBangTy (_, s) b _)) _)) = HsSrcBang s b getBangStrictness _ = (mkHsSrcBang NoSourceText NoSrcUnpack NoSrcStrict) {- ************************************************************************ * * \subsection{Data types} * * ************************************************************************ -} fromMaybeContext :: Maybe (LHsContext (GhcPass p)) -> HsContext (GhcPass p) fromMaybeContext mctxt = unLoc $ fromMaybe (noLocA []) mctxt type instance XHsForAllVis (GhcPass _) = EpAnn (TokForall, TokRarrow) -- Location of 'forall' and '->' type instance XHsForAllInvis (GhcPass _) = EpAnn (TokForall, EpToken ".") -- Location of 'forall' and '.' type instance XXHsForAllTelescope (GhcPass _) = DataConCantHappen type EpAnnForallVis = EpAnn (TokForall, TokRarrow) type EpAnnForallInvis = EpAnn (TokForall, EpToken ".") type HsQTvsRn = [Name] -- Implicit variables -- For example, in data T (a :: k1 -> k2) = ... -- the 'a' is explicit while 'k1', 'k2' are implicit type instance XHsQTvs GhcPs = NoExtField type instance XHsQTvs GhcRn = HsQTvsRn type instance XHsQTvs GhcTc = HsQTvsRn type instance XXLHsQTyVars (GhcPass _) = DataConCantHappen mkHsForAllVisTele ::EpAnnForallVis -> [LHsTyVarBndr () (GhcPass p)] -> HsForAllTelescope (GhcPass p) mkHsForAllVisTele an vis_bndrs = HsForAllVis { hsf_xvis = an, hsf_vis_bndrs = vis_bndrs } mkHsForAllInvisTele :: EpAnnForallInvis -> [LHsTyVarBndr Specificity (GhcPass p)] -> HsForAllTelescope (GhcPass p) mkHsForAllInvisTele an invis_bndrs = HsForAllInvis { hsf_xinvis = an, hsf_invis_bndrs = invis_bndrs } mkHsQTvs :: [LHsTyVarBndr (HsBndrVis GhcPs) GhcPs] -> LHsQTyVars GhcPs mkHsQTvs tvs = HsQTvs { hsq_ext = noExtField, hsq_explicit = tvs } emptyLHsQTvs :: LHsQTyVars GhcRn emptyLHsQTvs = HsQTvs { hsq_ext = [], hsq_explicit = [] } ------------------------------------------------ -- HsOuterTyVarBndrs type instance XHsOuterImplicit GhcPs = NoExtField type instance XHsOuterImplicit GhcRn = [Name] type instance XHsOuterImplicit GhcTc = [TyVar] type instance XHsOuterExplicit GhcPs _ = EpAnnForallInvis type instance XHsOuterExplicit GhcRn _ = NoExtField type instance XHsOuterExplicit GhcTc flag = [VarBndr TyVar flag] type instance XXHsOuterTyVarBndrs (GhcPass _) = DataConCantHappen type instance XHsWC GhcPs b = NoExtField type instance XHsWC GhcRn b = [Name] type instance XHsWC GhcTc b = [Name] type instance XXHsWildCardBndrs (GhcPass _) _ = DataConCantHappen type instance XHsPS GhcPs = EpAnnCO type instance XHsPS GhcRn = HsPSRn type instance XHsPS GhcTc = HsPSRn type instance XHsTP GhcPs = NoExtField type instance XHsTP GhcRn = HsTyPatRn type instance XHsTP GhcTc = DataConCantHappen -- | The extension field for 'HsPatSigType', which is only used in the -- renamer onwards. See @Note [Pattern signature binders and scoping]@. data HsPSRn = HsPSRn { hsps_nwcs :: [Name] -- ^ Wildcard names , hsps_imp_tvs :: [Name] -- ^ Implicitly bound variable names } deriving Data -- HsTyPatRn is the extension field for `HsTyPat`, after renaming -- E.g. pattern K @(Maybe (_x, a, b::Proxy k) -- In the type pattern @(Maybe ...): -- '_x' is a named wildcard -- 'a' is explicitly bound -- 'k' is implicitly bound -- See Note [Implicit and explicit type variable binders] in GHC.Rename.Pat data HsTyPatRn = HsTPRn { hstp_nwcs :: [Name] -- ^ Wildcard names , hstp_imp_tvs :: [Name] -- ^ Implicitly bound variable names , hstp_exp_tvs :: [Name] -- ^ Explicitly bound variable names } deriving Data -- | A variant of HsTyPatRn that uses Bags for efficient concatenation. -- See Note [Implicit and explicit type variable binders] in GHC.Rename.Pat data HsTyPatRnBuilder = HsTPRnB { hstpb_nwcs :: Bag Name, hstpb_imp_tvs :: Bag Name, hstpb_exp_tvs :: Bag Name } tpBuilderExplicitTV :: Name -> HsTyPatRnBuilder tpBuilderExplicitTV name = mempty {hstpb_exp_tvs = unitBag name} tpBuilderPatSig :: HsPSRn -> HsTyPatRnBuilder tpBuilderPatSig HsPSRn {hsps_nwcs, hsps_imp_tvs} = mempty { hstpb_nwcs = listToBag hsps_nwcs, hstpb_imp_tvs = listToBag hsps_imp_tvs } instance Semigroup HsTyPatRnBuilder where HsTPRnB nwcs1 imp_tvs1 exptvs1 <> HsTPRnB nwcs2 imp_tvs2 exptvs2 = HsTPRnB (nwcs1 `unionBags` nwcs2) (imp_tvs1 `unionBags` imp_tvs2) (exptvs1 `unionBags` exptvs2) instance Monoid HsTyPatRnBuilder where mempty = HsTPRnB emptyBag emptyBag emptyBag buildHsTyPatRn :: HsTyPatRnBuilder -> HsTyPatRn buildHsTyPatRn HsTPRnB {hstpb_nwcs, hstpb_imp_tvs, hstpb_exp_tvs} = HsTPRn { hstp_nwcs = bagToList hstpb_nwcs, hstp_imp_tvs = bagToList hstpb_imp_tvs, hstp_exp_tvs = bagToList hstpb_exp_tvs } builderFromHsTyPatRn :: HsTyPatRn -> HsTyPatRnBuilder builderFromHsTyPatRn HsTPRn{hstp_nwcs, hstp_imp_tvs, hstp_exp_tvs} = HsTPRnB { hstpb_nwcs = listToBag hstp_nwcs, hstpb_imp_tvs = listToBag hstp_imp_tvs, hstpb_exp_tvs = listToBag hstp_exp_tvs } type instance XXHsPatSigType (GhcPass _) = DataConCantHappen type instance XXHsTyPat (GhcPass _) = DataConCantHappen type instance XHsSig (GhcPass _) = NoExtField type instance XXHsSigType (GhcPass _) = DataConCantHappen hsSigWcType :: forall p. UnXRec p => LHsSigWcType p -> LHsType p hsSigWcType = sig_body . unXRec @p . hswc_body dropWildCards :: LHsSigWcType pass -> LHsSigType pass -- Drop the wildcard part of a LHsSigWcType dropWildCards sig_ty = hswc_body sig_ty hsOuterTyVarNames :: HsOuterTyVarBndrs flag GhcRn -> [Name] hsOuterTyVarNames (HsOuterImplicit{hso_ximplicit = imp_tvs}) = imp_tvs hsOuterTyVarNames (HsOuterExplicit{hso_bndrs = bndrs}) = hsLTyVarNames bndrs hsOuterExplicitBndrs :: HsOuterTyVarBndrs flag (GhcPass p) -> [LHsTyVarBndr flag (NoGhcTc (GhcPass p))] hsOuterExplicitBndrs (HsOuterExplicit{hso_bndrs = bndrs}) = bndrs hsOuterExplicitBndrs (HsOuterImplicit{}) = [] mkHsOuterImplicit :: HsOuterTyVarBndrs flag GhcPs mkHsOuterImplicit = HsOuterImplicit{hso_ximplicit = noExtField} mkHsOuterExplicit :: EpAnnForallInvis -> [LHsTyVarBndr flag GhcPs] -> HsOuterTyVarBndrs flag GhcPs mkHsOuterExplicit an bndrs = HsOuterExplicit { hso_xexplicit = an , hso_bndrs = bndrs } mkHsImplicitSigType :: LHsType GhcPs -> HsSigType GhcPs mkHsImplicitSigType body = HsSig { sig_ext = noExtField , sig_bndrs = mkHsOuterImplicit, sig_body = body } mkHsExplicitSigType :: EpAnnForallInvis -> [LHsTyVarBndr Specificity GhcPs] -> LHsType GhcPs -> HsSigType GhcPs mkHsExplicitSigType an bndrs body = HsSig { sig_ext = noExtField , sig_bndrs = mkHsOuterExplicit an bndrs, sig_body = body } mkHsWildCardBndrs :: thing -> HsWildCardBndrs GhcPs thing mkHsWildCardBndrs x = HsWC { hswc_body = x , hswc_ext = noExtField } mkHsPatSigType :: EpAnnCO -> LHsType GhcPs -> HsPatSigType GhcPs mkHsPatSigType ann x = HsPS { hsps_ext = ann , hsps_body = x } mkHsTyPat :: LHsType GhcPs -> HsTyPat GhcPs mkHsTyPat x = HsTP { hstp_ext = noExtField , hstp_body = x } mkEmptyWildCardBndrs :: thing -> HsWildCardBndrs GhcRn thing mkEmptyWildCardBndrs x = HsWC { hswc_body = x , hswc_ext = [] } -------------------------------------------------- type instance XTyVarBndr (GhcPass _) = AnnTyVarBndr type instance XXTyVarBndr (GhcPass _) = DataConCantHappen type instance XBndrKind (GhcPass p) = NoExtField type instance XBndrNoKind (GhcPass p) = NoExtField type instance XXBndrKind (GhcPass p) = DataConCantHappen type instance XBndrVar (GhcPass p) = NoExtField type instance XBndrWildCard GhcPs = EpToken "_" type instance XBndrWildCard GhcRn = NoExtField type instance XBndrWildCard GhcTc = NoExtField type instance XXBndrVar (GhcPass p) = DataConCantHappen data AnnTyVarBndr = AnnTyVarBndr { atv_opens :: [EpaLocation], -- all "(" or all "{" atv_closes :: [EpaLocation], -- all ")" or all "}" atv_tv :: EpToken "'", atv_dcolon :: TokDcolon } deriving Data instance NoAnn AnnTyVarBndr where noAnn = AnnTyVarBndr noAnn noAnn noAnn noAnn -- | Return the attached flag hsTyVarBndrFlag :: HsTyVarBndr flag (GhcPass pass) -> flag hsTyVarBndrFlag = tvb_flag -- By specialising to (GhcPass p) we know that XXTyVarBndr is DataConCantHappen -- so the equation is exhaustive: extension construction can't happen -- | Set the attached flag setHsTyVarBndrFlag :: flag -> HsTyVarBndr flag' (GhcPass pass) -> HsTyVarBndr flag (GhcPass pass) setHsTyVarBndrFlag fl tvb = tvb { tvb_flag = fl } -- | Update the attached flag updateHsTyVarBndrFlag :: (flag -> flag') -> HsTyVarBndr flag (GhcPass pass) -> HsTyVarBndr flag' (GhcPass pass) updateHsTyVarBndrFlag f tvb = tvb { tvb_flag = f (tvb_flag tvb) } -- | Get the variable of the type variable binder hsBndrVar :: HsTyVarBndr flag (GhcPass pass) -> HsBndrVar (GhcPass pass) hsBndrVar = tvb_var -- | Get the kind of the type variable binder hsBndrKind :: HsTyVarBndr flag (GhcPass pass) -> HsBndrKind (GhcPass pass) hsBndrKind = tvb_kind -- | Do all type variables in this 'LHsQTyVars' come with kind annotations? hsTvbAllKinded :: LHsQTyVars (GhcPass p) -> Bool hsTvbAllKinded = all (isHsKindedTyVar . unLoc) . hsQTvExplicit type instance XBndrRequired (GhcPass _) = NoExtField type instance XBndrInvisible GhcPs = EpToken "@" type instance XBndrInvisible GhcRn = NoExtField type instance XBndrInvisible GhcTc = NoExtField type instance XXBndrVis (GhcPass _) = DataConCantHappen {- Note [Wildcard binders in disallowed contexts] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In contexts where a type variable binder is expected (HsTyVarBndr), we usually allow both named binders and wildcards, e.g. type Const1 a b = a -- ok type Const2 a _ = a -- ok, too This applies to LHSs of data, newtype, type, class, type family and data family declarations. However, we choose to reject wildcards in forall telescopes and type family result variables (the latter being part of TypeFamilyDependencies): type family Fd a = _ -- disallowed (WildcardBndrInTyFamResultVar) fn :: forall _. Int -- disallowed (WildcardBndrInForallTelescope) This restriction is placed solely because such binders have not been proposed and there is no known use case for them. If we see user demand for wildcard binders in these contexts, adding support for them would be as easy as dropping the checks that reject them. The rest of the compiler can handle all wildcard binders regardless of context by generating a fresh name (see `tcHsBndrVarName` in GHC.Tc.Gen.HsType and `repHsBndrVar` in GHC.HsToCore.Quote). That is, in type declarations we have: type F _ = ... -- equivalent to ... type F _a = ... -- where _a is fresh and the same principle could be applied to foralls: fn :: forall _. Int -- equivalent to ... fn :: forall _a. Int -- where _a is fresh except the `forall _.` example is rejected by checkForAllTelescopeWildcardBndrs. -} type instance XForAllTy (GhcPass _) = NoExtField type instance XQualTy (GhcPass _) = NoExtField type instance XTyVar (GhcPass _) = EpToken "'" type instance XAppTy (GhcPass _) = NoExtField type instance XFunTy (GhcPass _) = NoExtField type instance XListTy (GhcPass _) = AnnParen type instance XTupleTy (GhcPass _) = AnnParen type instance XSumTy (GhcPass _) = AnnParen type instance XOpTy (GhcPass _) = NoExtField type instance XParTy (GhcPass _) = (EpToken "(", EpToken ")") type instance XIParamTy (GhcPass _) = TokDcolon type instance XStarTy (GhcPass _) = NoExtField type instance XKindSig (GhcPass _) = TokDcolon type instance XAppKindTy GhcPs = EpToken "@" type instance XAppKindTy GhcRn = NoExtField type instance XAppKindTy GhcTc = NoExtField type instance XSpliceTy GhcPs = NoExtField type instance XSpliceTy GhcRn = HsUntypedSpliceResult (LHsType GhcRn) type instance XSpliceTy GhcTc = Kind type instance XDocTy (GhcPass _) = NoExtField type instance XBangTy (GhcPass _) = ((EpaLocation, EpToken "#-}", EpaLocation), SourceText) type instance XRecTy GhcPs = AnnList () type instance XRecTy GhcRn = NoExtField type instance XRecTy GhcTc = NoExtField type instance XExplicitListTy GhcPs = (EpToken "'", EpToken "[", EpToken "]") type instance XExplicitListTy GhcRn = NoExtField type instance XExplicitListTy GhcTc = Kind type instance XExplicitTupleTy GhcPs = (EpToken "'", EpToken "(", EpToken ")") type instance XExplicitTupleTy GhcRn = NoExtField type instance XExplicitTupleTy GhcTc = [Kind] type instance XTyLit (GhcPass _) = NoExtField type instance XWildCardTy GhcPs = EpToken "_" type instance XWildCardTy GhcRn = NoExtField type instance XWildCardTy GhcTc = NoExtField type instance XXType (GhcPass _) = HsCoreTy -- An escape hatch for tunnelling a Core 'Type' through 'HsType'. -- For more details on how this works, see: -- -- * @Note [Renaming HsCoreTys]@ in "GHC.Rename.HsType" -- -- * @Note [Typechecking HsCoreTys]@ in "GHC.Tc.Gen.HsType" type HsCoreTy = Type type instance XNumTy (GhcPass _) = SourceText type instance XStrTy (GhcPass _) = SourceText type instance XCharTy (GhcPass _) = SourceText type instance XXTyLit (GhcPass _) = DataConCantHappen data EpLinearArrow = EpPct1 !(EpToken "%1") !(TokRarrow) | EpLolly !(EpToken "⊸") deriving Data instance NoAnn EpLinearArrow where noAnn = EpPct1 noAnn noAnn type instance XUnrestrictedArrow _ GhcPs = TokRarrow type instance XUnrestrictedArrow _ GhcRn = NoExtField type instance XUnrestrictedArrow _ GhcTc = NoExtField type instance XLinearArrow _ GhcPs = EpLinearArrow type instance XLinearArrow _ GhcRn = NoExtField type instance XLinearArrow _ GhcTc = NoExtField type instance XExplicitMult _ GhcPs = (EpToken "%", TokRarrow) type instance XExplicitMult _ GhcRn = NoExtField type instance XExplicitMult _ GhcTc = NoExtField type instance XXArrow _ (GhcPass _) = DataConCantHappen hsLinear :: forall p a. IsPass p => a -> HsScaled (GhcPass p) a hsLinear = HsScaled (HsLinearArrow x) where x = case ghcPass @p of GhcPs -> noAnn GhcRn -> noExtField GhcTc -> noExtField hsUnrestricted :: forall p a. IsPass p => a -> HsScaled (GhcPass p) a hsUnrestricted = HsScaled (HsUnrestrictedArrow x) where x = case ghcPass @p of GhcPs -> noAnn GhcRn -> noExtField GhcTc -> noExtField isUnrestricted :: HsArrow GhcRn -> Bool isUnrestricted (arrowToHsType -> L _ (HsTyVar _ _ (L _ n))) = n == manyDataConName isUnrestricted _ = False arrowToHsType :: HsArrow GhcRn -> LHsType GhcRn arrowToHsType = expandHsArrow (HsTyVar noAnn NotPromoted) -- | Convert an arrow into its corresponding multiplicity. In essence this -- erases the information of whether the programmer wrote an explicit -- multiplicity or a shorthand. expandHsArrow :: (LocatedN Name -> t GhcRn) -> HsArrowOf (LocatedA (t GhcRn)) GhcRn -> LocatedA (t GhcRn) expandHsArrow mk_var (HsUnrestrictedArrow _) = noLocA (mk_var (noLocA manyDataConName)) expandHsArrow mk_var (HsLinearArrow _) = noLocA (mk_var (noLocA oneDataConName)) expandHsArrow _mk_var (HsExplicitMult _ p) = p instance (Outputable mult, OutputableBndrId pass) => Outputable (HsArrowOf mult (GhcPass pass)) where ppr arr = parens (pprHsArrow arr) -- See #18846 pprHsArrow :: (Outputable mult, OutputableBndrId pass) => HsArrowOf mult (GhcPass pass) -> SDoc pprHsArrow (HsUnrestrictedArrow _) = pprArrowWithMultiplicity visArgTypeLike (Left False) pprHsArrow (HsLinearArrow _) = pprArrowWithMultiplicity visArgTypeLike (Left True) pprHsArrow (HsExplicitMult _ p) = pprArrowWithMultiplicity visArgTypeLike (Right (ppr p)) type instance XConDeclField (GhcPass _) = TokDcolon type instance XXConDeclField (GhcPass _) = DataConCantHappen instance OutputableBndrId p => Outputable (ConDeclField (GhcPass p)) where ppr (ConDeclField _ fld_n fld_ty _) = ppr fld_n <+> dcolon <+> ppr fld_ty --------------------- hsWcScopedTvs :: LHsSigWcType GhcRn -> [Name] -- Get the lexically-scoped type variables of an LHsSigWcType: -- - the explicitly-given forall'd type variables; -- see Note [Lexically scoped type variables] -- - the named wildcards; see Note [Scoping of named wildcards] -- because they scope in the same way hsWcScopedTvs sig_wc_ty | HsWC { hswc_ext = nwcs, hswc_body = sig_ty } <- sig_wc_ty , L _ (HsSig{sig_bndrs = outer_bndrs}) <- sig_ty = nwcs ++ hsLTyVarNames (hsOuterExplicitBndrs outer_bndrs) -- See Note [hsScopedTvs and visible foralls] hsScopedTvs :: LHsSigType GhcRn -> [Name] -- Same as hsWcScopedTvs, but for a LHsSigType hsScopedTvs (L _ (HsSig{sig_bndrs = outer_bndrs})) = hsLTyVarNames (hsOuterExplicitBndrs outer_bndrs) -- See Note [hsScopedTvs and visible foralls] hsScopedKvs :: LHsKind GhcRn -> [Name] -- Same as hsScopedTvs, but for a LHsKind hsScopedKvs (L _ HsForAllTy { hst_tele = HsForAllInvis { hsf_invis_bndrs = bndrs }}) = hsLTyVarNames bndrs -- See Note [hsScopedTvs and visible foralls] hsScopedKvs _ = [] --------------------- hsTyVarLName :: HsTyVarBndr flag (GhcPass p) -> Maybe (LIdP (GhcPass p)) hsTyVarLName tvb = case hsBndrVar tvb of HsBndrVar _ n -> Just n HsBndrWildCard _ -> Nothing hsTyVarName :: HsTyVarBndr flag (GhcPass p) -> Maybe (IdP (GhcPass p)) hsTyVarName = fmap unLoc . hsTyVarLName hsLTyVarName :: LHsTyVarBndr flag (GhcPass p) -> Maybe (IdP (GhcPass p)) hsLTyVarName = hsTyVarName . unLoc hsLTyVarNames :: [LHsTyVarBndr flag (GhcPass p)] -> [IdP (GhcPass p)] hsLTyVarNames = mapMaybe hsLTyVarName hsForAllTelescopeNames :: HsForAllTelescope (GhcPass p) -> [IdP (GhcPass p)] hsForAllTelescopeNames (HsForAllVis _ bndrs) = hsLTyVarNames bndrs hsForAllTelescopeNames (HsForAllInvis _ bndrs) = hsLTyVarNames bndrs hsExplicitLTyVarNames :: LHsQTyVars (GhcPass p) -> [IdP (GhcPass p)] -- Explicit variables only hsExplicitLTyVarNames qtvs = hsLTyVarNames (hsQTvExplicit qtvs) hsAllLTyVarNames :: LHsQTyVars GhcRn -> [Name] -- All variables hsAllLTyVarNames (HsQTvs { hsq_ext = kvs , hsq_explicit = tvs }) = kvs ++ hsLTyVarNames tvs hsLTyVarLocName :: Anno (IdGhcP p) ~ SrcSpanAnnN => LHsTyVarBndr flag (GhcPass p) -> Maybe (LocatedN (IdP (GhcPass p))) hsLTyVarLocName (L _ a) = hsTyVarLName a hsLTyVarLocNames :: Anno (IdGhcP p) ~ SrcSpanAnnN => LHsQTyVars (GhcPass p) -> [LocatedN (IdP (GhcPass p))] hsLTyVarLocNames qtvs = mapMaybe hsLTyVarLocName (hsQTvExplicit qtvs) -- | Get the kind signature of a type, ignoring parentheses: -- -- hsTyKindSig `Maybe ` = Nothing -- hsTyKindSig `Maybe :: Type -> Type ` = Just `Type -> Type` -- hsTyKindSig `Maybe :: ((Type -> Type))` = Just `Type -> Type` -- -- This is used to extract the result kind of type synonyms with a CUSK: -- -- type S = (F :: res_kind) -- ^^^^^^^^ -- hsTyKindSig :: LHsType (GhcPass p) -> Maybe (LHsKind (GhcPass p)) hsTyKindSig lty = case unLoc lty of HsParTy _ lty' -> hsTyKindSig lty' HsKindSig _ _ k -> Just k _ -> Nothing --------------------- ignoreParens :: LHsType (GhcPass p) -> LHsType (GhcPass p) ignoreParens (L _ (HsParTy _ ty)) = ignoreParens ty ignoreParens ty = ty {- ************************************************************************ * * Building types * * ************************************************************************ -} mkAnonWildCardTy :: EpToken "_" -> HsType GhcPs mkAnonWildCardTy tok = HsWildCardTy tok mkHsOpTy :: (Anno (IdGhcP p) ~ SrcSpanAnnN) => PromotionFlag -> LHsType (GhcPass p) -> LocatedN (IdP (GhcPass p)) -> LHsType (GhcPass p) -> HsType (GhcPass p) mkHsOpTy prom ty1 op ty2 = HsOpTy noExtField prom ty1 op ty2 mkHsAppTy :: LHsType (GhcPass p) -> LHsType (GhcPass p) -> LHsType (GhcPass p) mkHsAppTy t1 t2 = addCLocA t1 t2 (HsAppTy noExtField t1 t2) mkHsAppTys :: LHsType (GhcPass p) -> [LHsType (GhcPass p)] -> LHsType (GhcPass p) mkHsAppTys = foldl' mkHsAppTy mkHsAppKindTy :: XAppKindTy (GhcPass p) -> LHsType (GhcPass p) -> LHsType (GhcPass p) -> LHsType (GhcPass p) mkHsAppKindTy at ty k = addCLocA ty k (HsAppKindTy at ty k) {- ************************************************************************ * * Decomposing HsTypes * * ************************************************************************ -} --------------------------------- -- splitHsFunType decomposes a type (t1 -> t2 ... -> tn) -- Breaks up any parens in the result type: -- splitHsFunType (a -> (b -> c)) = ([a,b], c) -- It returns API Annotations for any parens removed splitHsFunType :: LHsType (GhcPass p) -> ( ([EpToken "("], [EpToken ")"]) , EpAnnComments -- The locations of any parens and -- comments discarded , [HsScaled (GhcPass p) (LHsType (GhcPass p))], LHsType (GhcPass p)) splitHsFunType ty = go ty where go (L l (HsParTy (op,cp) ty)) = let ((ops, cps), cs, args, res) = splitHsFunType ty cs' = cs S.<> epAnnComments l in ((ops++[op], cps ++ [cp]), cs', args, res) go (L ll (HsFunTy _ mult x y)) | (anns, csy, args, res) <- splitHsFunType y = (anns, csy S.<> epAnnComments ll, HsScaled mult x:args, res) go other = (noAnn, emptyComments, [], other) -- | Retrieve the name of the \"head\" of a nested type application. -- This is somewhat like @GHC.Tc.Gen.HsType.splitHsAppTys@, but a little more -- thorough. The purpose of this function is to examine instance heads, so it -- doesn't handle *all* cases (like lists, tuples, @(~)@, etc.). hsTyGetAppHead_maybe :: (Anno (IdGhcP p) ~ SrcSpanAnnN) => LHsType (GhcPass p) -> Maybe (LocatedN (IdP (GhcPass p))) hsTyGetAppHead_maybe = go where go (L _ (HsTyVar _ _ ln)) = Just ln go (L _ (HsAppTy _ l _)) = go l go (L _ (HsAppKindTy _ t _)) = go t go (L _ (HsOpTy _ _ _ ln _)) = Just ln go (L _ (HsParTy _ t)) = go t go (L _ (HsKindSig _ t _)) = go t go _ = Nothing ------------------------------------------------------------ type instance XValArg (GhcPass _) = NoExtField type instance XTypeArg GhcPs = EpToken "@" type instance XTypeArg GhcRn = NoExtField type instance XTypeArg GhcTc = NoExtField type instance XArgPar (GhcPass _) = SrcSpan type instance XXArg (GhcPass _) = DataConCantHappen -- | Compute the 'SrcSpan' associated with an 'LHsTypeArg'. lhsTypeArgSrcSpan :: LHsTypeArg GhcPs -> SrcSpan lhsTypeArgSrcSpan arg = case arg of HsValArg _ tm -> getLocA tm HsTypeArg at ty -> getEpTokenSrcSpan at `combineSrcSpans` getLocA ty HsArgPar sp -> sp -------------------------------- numVisibleArgs :: [HsArg p tm ty] -> Arity numVisibleArgs = count is_vis where is_vis (HsValArg _ _) = True is_vis _ = False -------------------------------- -- | @'pprHsArgsApp' id fixity args@ pretty-prints an application of @id@ -- to @args@, using the @fixity@ to tell whether @id@ should be printed prefix -- or infix. Examples: -- -- @ -- pprHsArgsApp T Prefix [HsTypeArg Bool, HsValArg Int] = T \@Bool Int -- pprHsArgsApp T Prefix [HsTypeArg Bool, HsArgPar, HsValArg Int] = (T \@Bool) Int -- pprHsArgsApp (++) Infix [HsValArg Char, HsValArg Double] = Char ++ Double -- pprHsArgsApp (++) Infix [HsValArg Char, HsValArg Double, HsVarArg Ordering] = (Char ++ Double) Ordering -- @ pprHsArgsApp :: (OutputableBndr id, Outputable tm, Outputable ty) => id -> LexicalFixity -> [HsArg (GhcPass p) tm ty] -> SDoc pprHsArgsApp thing fixity (argl:argr:args) | Infix <- fixity = let pp_op_app = hsep [ ppr_single_hs_arg argl , pprInfixOcc thing , ppr_single_hs_arg argr ] in case args of [] -> pp_op_app _ -> ppr_hs_args_prefix_app (parens pp_op_app) args pprHsArgsApp thing _fixity args = ppr_hs_args_prefix_app (pprPrefixOcc thing) args -- | Pretty-print a prefix identifier to a list of 'HsArg's. ppr_hs_args_prefix_app :: (Outputable tm, Outputable ty) => SDoc -> [HsArg (GhcPass p) tm ty] -> SDoc ppr_hs_args_prefix_app acc [] = acc ppr_hs_args_prefix_app acc (arg:args) = case arg of HsValArg{} -> ppr_hs_args_prefix_app (acc <+> ppr_single_hs_arg arg) args HsTypeArg{} -> ppr_hs_args_prefix_app (acc <+> ppr_single_hs_arg arg) args HsArgPar{} -> ppr_hs_args_prefix_app (parens acc) args -- | Pretty-print an 'HsArg' in isolation. ppr_single_hs_arg :: (Outputable tm, Outputable ty) => HsArg (GhcPass p) tm ty -> SDoc ppr_single_hs_arg (HsValArg _ tm) = ppr tm ppr_single_hs_arg (HsTypeArg _ ty) = char '@' <> ppr ty -- GHC shouldn't be constructing ASTs such that this case is ever reached. -- Still, it's possible some wily user might construct their own AST that -- allows this to be reachable, so don't fail here. ppr_single_hs_arg (HsArgPar{}) = empty -- | This instance is meant for debug-printing purposes. If you wish to -- pretty-print an application of 'HsArg's, use 'pprHsArgsApp' instead. instance (Outputable tm, Outputable ty) => Outputable (HsArg (GhcPass p) tm ty) where ppr (HsValArg _ tm) = text "HsValArg" <+> ppr tm ppr (HsTypeArg _ ty) = text "HsTypeArg" <+> ppr ty ppr (HsArgPar sp) = text "HsArgPar" <+> ppr sp -------------------------------- -- | Decompose a pattern synonym type signature into its constituent parts. -- -- Note that this function looks through parentheses, so it will work on types -- such as @(forall a. <...>)@. The downside to this is that it is not -- generally possible to take the returned types and reconstruct the original -- type (parentheses and all) from them. splitLHsPatSynTy :: LHsSigType (GhcPass p) -> ( [LHsTyVarBndr Specificity (GhcPass (NoGhcTcPass p))] -- universals , Maybe (LHsContext (GhcPass p)) -- required constraints , [LHsTyVarBndr Specificity (GhcPass p)] -- existentials , Maybe (LHsContext (GhcPass p)) -- provided constraints , LHsType (GhcPass p)) -- body type splitLHsPatSynTy ty = (univs, reqs, exis, provs, ty4) where -- split_sig_ty :: -- LHsSigType (GhcPass p) -- -> ([LHsTyVarBndr Specificity (GhcPass (NoGhcTcPass p))], LHsType (GhcPass p)) split_sig_ty (L _ HsSig{sig_bndrs = outer_bndrs, sig_body = body}) = case outer_bndrs of -- NB: Use ignoreParens here in order to be consistent with the use of -- splitLHsForAllTyInvis below, which also looks through parentheses. HsOuterImplicit{} -> ([], ignoreParens body) HsOuterExplicit{hso_bndrs = exp_bndrs} -> (exp_bndrs, body) (univs, ty1) = split_sig_ty ty (reqs, ty2) = splitLHsQualTy ty1 (exis, ty3) = splitLHsForAllTyInvis ty2 (provs, ty4) = splitLHsQualTy ty3 -- | Decompose a sigma type (of the form @forall . context => body@) -- into its constituent parts. -- Only splits type variable binders that were -- quantified invisibly (e.g., @forall a.@, with a dot). -- -- This function is used to split apart certain types, such as instance -- declaration types, which disallow visible @forall@s. For instance, if GHC -- split apart the @forall@ in @instance forall a -> Show (Blah a)@, then that -- declaration would mistakenly be accepted! -- -- Note that this function looks through parentheses, so it will work on types -- such as @(forall a. <...>)@. The downside to this is that it is not -- generally possible to take the returned types and reconstruct the original -- type (parentheses and all) from them. splitLHsSigmaTyInvis :: LHsType (GhcPass p) -> ([LHsTyVarBndr Specificity (GhcPass p)] , Maybe (LHsContext (GhcPass p)), LHsType (GhcPass p)) splitLHsSigmaTyInvis ty | (tvs, ty1) <- splitLHsForAllTyInvis ty , (ctxt, ty2) <- splitLHsQualTy ty1 = (tvs, ctxt, ty2) -- | Decompose a GADT type into its constituent parts. -- Returns @(outer_bndrs, mb_ctxt, body)@, where: -- -- * @outer_bndrs@ are 'HsOuterExplicit' if the type has explicit, outermost -- type variable binders. Otherwise, they are 'HsOuterImplicit'. -- -- * @mb_ctxt@ is @Just@ the context, if it is provided. -- Otherwise, it is @Nothing@. -- -- * @body@ is the body of the type after the optional @forall@s and context. -- -- This function is careful not to look through parentheses. -- See @Note [GADT abstract syntax] (Wrinkle: No nested foralls or contexts)@ -- "GHC.Hs.Decls" for why this is important. splitLHsGadtTy :: LHsSigType GhcPs -> (HsOuterSigTyVarBndrs GhcPs, Maybe (LHsContext GhcPs), LHsType GhcPs) splitLHsGadtTy (L _ sig_ty) | (outer_bndrs, rho_ty) <- split_bndrs sig_ty , (mb_ctxt, tau_ty) <- splitLHsQualTy_KP rho_ty = (outer_bndrs, mb_ctxt, tau_ty) where split_bndrs :: HsSigType GhcPs -> (HsOuterSigTyVarBndrs GhcPs, LHsType GhcPs) split_bndrs (HsSig{sig_bndrs = outer_bndrs, sig_body = body_ty}) = (outer_bndrs, body_ty) -- | Decompose a type of the form @forall . body@ into its constituent -- parts. Only splits type variable binders that -- were quantified invisibly (e.g., @forall a.@, with a dot). -- -- This function is used to split apart certain types, such as instance -- declaration types, which disallow visible @forall@s. For instance, if GHC -- split apart the @forall@ in @instance forall a -> Show (Blah a)@, then that -- declaration would mistakenly be accepted! -- -- Note that this function looks through parentheses, so it will work on types -- such as @(forall a. <...>)@. The downside to this is that it is not -- generally possible to take the returned types and reconstruct the original -- type (parentheses and all) from them. -- Unlike 'splitLHsSigmaTyInvis', this function does not look through -- parentheses, hence the suffix @_KP@ (short for \"Keep Parentheses\"). splitLHsForAllTyInvis :: LHsType (GhcPass pass) -> ( [LHsTyVarBndr Specificity (GhcPass pass)] , LHsType (GhcPass pass)) splitLHsForAllTyInvis ty | ((mb_tvbs), body) <- splitLHsForAllTyInvis_KP (ignoreParens ty) = (fromMaybe [] mb_tvbs, body) -- | Decompose a type of the form @forall . body@ into its constituent -- parts. Only splits type variable binders that -- were quantified invisibly (e.g., @forall a.@, with a dot). -- -- This function is used to split apart certain types, such as instance -- declaration types, which disallow visible @forall@s. For instance, if GHC -- split apart the @forall@ in @instance forall a -> Show (Blah a)@, then that -- declaration would mistakenly be accepted! -- -- Unlike 'splitLHsForAllTyInvis', this function does not look through -- parentheses, hence the suffix @_KP@ (short for \"Keep Parentheses\"). splitLHsForAllTyInvis_KP :: LHsType (GhcPass pass) -> (Maybe ([LHsTyVarBndr Specificity (GhcPass pass)]) , LHsType (GhcPass pass)) splitLHsForAllTyInvis_KP lty@(L _ ty) = case ty of HsForAllTy { hst_tele = HsForAllInvis {hsf_invis_bndrs = tvs } , hst_body = body } -> (Just tvs, body) _ -> (Nothing, lty) -- | Decompose a type of the form @context => body@ into its constituent parts. -- -- Note that this function looks through parentheses, so it will work on types -- such as @(context => <...>)@. The downside to this is that it is not -- generally possible to take the returned types and reconstruct the original -- type (parentheses and all) from them. splitLHsQualTy :: LHsType (GhcPass pass) -> (Maybe (LHsContext (GhcPass pass)), LHsType (GhcPass pass)) splitLHsQualTy ty | (mb_ctxt, body) <- splitLHsQualTy_KP (ignoreParens ty) = (mb_ctxt, body) -- | Decompose a type of the form @context => body@ into its constituent parts. -- -- Unlike 'splitLHsQualTy', this function does not look through -- parentheses, hence the suffix @_KP@ (short for \"Keep Parentheses\"). splitLHsQualTy_KP :: LHsType (GhcPass pass) -> (Maybe (LHsContext (GhcPass pass)), LHsType (GhcPass pass)) splitLHsQualTy_KP (L _ (HsQualTy { hst_ctxt = ctxt, hst_body = body })) = (Just ctxt, body) splitLHsQualTy_KP body = (Nothing, body) -- | Decompose a type class instance type (of the form -- @forall . context => instance_head@) into its constituent parts. -- Note that the @[Name]@s returned correspond to either: -- -- * The implicitly bound type variables (if the type lacks an outermost -- @forall@), or -- -- * The explicitly bound type variables (if the type has an outermost -- @forall@). -- -- This function is careful not to look through parentheses. -- See @Note [No nested foralls or contexts in instance types]@ -- for why this is important. splitLHsInstDeclTy :: LHsSigType GhcRn -> ([Name], Maybe (LHsContext GhcRn), LHsType GhcRn) splitLHsInstDeclTy (L _ (HsSig{sig_bndrs = outer_bndrs, sig_body = inst_ty})) = (hsOuterTyVarNames outer_bndrs, mb_cxt, body_ty) where (mb_cxt, body_ty) = splitLHsQualTy_KP inst_ty -- | Decompose a type class instance type (of the form -- @forall . context => instance_head@) into the @instance_head@. getLHsInstDeclHead :: LHsSigType (GhcPass p) -> LHsType (GhcPass p) getLHsInstDeclHead (L _ (HsSig{sig_body = qual_ty})) | (_mb_cxt, body_ty) <- splitLHsQualTy_KP qual_ty = body_ty -- | Decompose a type class instance type (of the form -- @forall . context => instance_head@) into the @instance_head@ and -- retrieve the underlying class type constructor (if it exists). getLHsInstDeclClass_maybe :: (Anno (IdGhcP p) ~ SrcSpanAnnN) => LHsSigType (GhcPass p) -> Maybe (LocatedN (IdP (GhcPass p))) -- Works on (LHsSigType GhcPs) getLHsInstDeclClass_maybe inst_ty = do { let head_ty = getLHsInstDeclHead inst_ty ; hsTyGetAppHead_maybe head_ty } {- Note [No nested foralls or contexts in instance types] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The type at the top of an instance declaration is one of the few places in GHC where nested `forall`s or contexts are not permitted, even with RankNTypes enabled. For example, the following will be rejected: instance forall a. forall b. Show (Either a b) where ... instance Eq a => Eq b => Show (Either a b) where ... instance (forall a. Show (Maybe a)) where ... instance (Eq a => Show (Maybe a)) where ... This restriction is partly motivated by an unusual quirk of instance declarations. Namely, if ScopedTypeVariables is enabled, then the type variables from the top of an instance will scope over the bodies of the instance methods, /even if the type variables are implicitly quantified/. For example, GHC will accept the following: instance Monoid a => Monoid (Identity a) where mempty = Identity (mempty @a) Moreover, the type in the top of an instance declaration must obey the forall-or-nothing rule (see Note [forall-or-nothing rule]). If instance types allowed nested `forall`s, this could result in some strange interactions. For example, consider the following: class C a where m :: Proxy a instance (forall a. C (Either a b)) where m = Proxy @(Either a b) Somewhat surprisingly, old versions of GHC would accept the instance above. Even though the `forall` only quantifies `a`, the outermost parentheses mean that the `forall` is nested, and per the forall-or-nothing rule, this means that implicit quantification would occur. Therefore, the `a` is explicitly bound and the `b` is implicitly bound. Moreover, ScopedTypeVariables would bring /both/ sorts of type variables into scope over the body of `m`. How utterly confusing! To avoid this sort of confusion, we simply disallow nested `forall`s in instance types, which makes things like the instance above become illegal. For the sake of consistency, we also disallow nested contexts, even though they don't have the same strange interaction with ScopedTypeVariables. Just as we forbid nested `forall`s and contexts in normal instance declarations, we also forbid them in SPECIALISE instance pragmas (#18455). Unlike normal instance declarations, ScopedTypeVariables don't have any impact on SPECIALISE instance pragmas, but we use the same validity checks for SPECIALISE instance pragmas anyway to be consistent. ----- -- Wrinkle: Derived instances ----- `deriving` clauses and standalone `deriving` declarations also permit bringing type variables into scope, either through explicit or implicit quantification. Unlike in the tops of instance declarations, however, one does not need to enable ScopedTypeVariables for this to take effect. Just as GHC forbids nested `forall`s in the top of instance declarations, it also forbids them in types involved with `deriving`: 1. In the `via` types in DerivingVia. For example, this is rejected: deriving via (forall x. V x) instance C (S x) Just like the types in instance declarations, `via` types can also bring both implicitly and explicitly bound type variables into scope. As a result, we adopt the same no-nested-`forall`s rule in `via` types to avoid confusing behavior like in the example below: deriving via (forall x. T x y) instance W x y (Foo a b) -- Both x and y are brought into scope??? 2. In the classes in `deriving` clauses. For example, this is rejected: data T = MkT deriving (C1, (forall x. C2 x y)) This is because the generated instance would look like: instance forall x y. C2 x y T where ... So really, the same concerns as instance declarations apply here as well. -} {- ************************************************************************ * * FieldOcc * * ************************************************************************ Note [Ambiguous FieldOcc in record updates] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When renaming a "record field update" (`some_record{ field = expr }`), the field occurrence may be ambiguous if there are multiple record types with that same field label in scope. Instead of failing, we may attempt to do type-directed disambiguation: if we typecheck the record field update, we can disambiguate the `field` based on the record and field type. In practice, this means an identifier of a field occurrence (`FieldOcc`) may have to go straight from `RdrName` to `Id`, since field ambiguity makes it impossible to construct a `Name` for the field. Since type-directed disambiguation is a GHC property rather than a property of the GHC-Haskell AST, we still parameterise a `FieldOcc` occurrence by `IdP p`, but in the case of the ambiguity we do the unthinkable and insert a mkUnboundName in the name. Very bad, yes, but since type-directed disambiguation is on the way out (see proposal https://github.com/ghc-proposals/ghc-proposals/pull/366), we consider this acceptable for now. see also Wrinkle [Disambiguating fields] and note [Type-directed record disambiguation] NB: FieldOcc preserves the RdrName throughout its lifecycle for exact printing purposes. -} type instance XCFieldOcc GhcPs = NoExtField -- RdrName is stored in the proper IdP field type instance XCFieldOcc GhcRn = RdrName type instance XCFieldOcc GhcTc = RdrName type instance XXFieldOcc GhcPs = DataConCantHappen type instance XXFieldOcc GhcRn = DataConCantHappen type instance XXFieldOcc GhcTc = DataConCantHappen -------------------------------------------------------------------------------- mkFieldOcc :: LocatedN RdrName -> FieldOcc GhcPs mkFieldOcc rdr = FieldOcc noExtField rdr fieldOccRdrName :: forall p. IsPass p => FieldOcc (GhcPass p) -> RdrName fieldOccRdrName fo = case ghcPass @p of GhcPs -> unLoc $ foLabel fo GhcRn -> foExt fo GhcTc -> foExt fo fieldOccLRdrName :: forall p. IsPass p => FieldOcc (GhcPass p) -> LocatedN RdrName fieldOccLRdrName fo = case ghcPass @p of GhcPs -> foLabel fo GhcRn -> case fo of FieldOcc rdr sel -> let (L l _) = sel in L l rdr GhcTc -> let (L l _) = foLabel fo in L l (foExt fo) {- ************************************************************************ * * OpName * * ************************************************************************ -} -- | Name of an operator in an operator application or section data OpName = NormalOp Name -- ^ A normal identifier | NegateOp -- ^ Prefix negation | UnboundOp RdrName -- ^ An unbound identifier | RecFldOp (FieldOcc GhcRn) -- ^ A record field occurrence instance Outputable OpName where ppr (NormalOp n) = ppr n ppr NegateOp = ppr negateName ppr (UnboundOp uv) = ppr uv ppr (RecFldOp fld) = ppr fld {- ************************************************************************ * * \subsection{Pretty printing} * * ************************************************************************ -} instance OutputableBndrId p => Outputable (HsBndrVar (GhcPass p)) where ppr (HsBndrVar _ name) = ppr name ppr (HsBndrWildCard _) = char '_' class OutputableBndrFlag flag p where pprTyVarBndr :: OutputableBndrId p => HsTyVarBndr flag (GhcPass p) -> SDoc instance OutputableBndrFlag () p where pprTyVarBndr (HsTvb _ _ bvar bkind) = decorate (ppr_hs_tvb bvar bkind) where decorate :: SDoc -> SDoc decorate d = parens_if_kind bkind d instance OutputableBndrFlag Specificity p where pprTyVarBndr (HsTvb _ spec bvar bkind) = decorate (ppr_hs_tvb bvar bkind) where decorate :: SDoc -> SDoc decorate d = case spec of InferredSpec -> braces d SpecifiedSpec -> parens_if_kind bkind d instance OutputableBndrFlag (HsBndrVis (GhcPass p')) p where pprTyVarBndr (HsTvb _ bvis bvar bkind) = decorate (ppr_hs_tvb bvar bkind) where decorate :: SDoc -> SDoc decorate d = case bvis of HsBndrRequired _ -> parens_if_kind bkind d HsBndrInvisible _ -> char '@' <> parens_if_kind bkind d ppr_hs_tvb :: OutputableBndrId p => HsBndrVar (GhcPass p) -> HsBndrKind (GhcPass p) -> SDoc ppr_hs_tvb bvar (HsBndrNoKind _) = ppr bvar ppr_hs_tvb bvar (HsBndrKind _ k) = hsep [ppr bvar, dcolon, ppr k] parens_if_kind :: HsBndrKind (GhcPass p) -> SDoc -> SDoc parens_if_kind (HsBndrNoKind _) d = d parens_if_kind (HsBndrKind _ _) d = parens d instance OutputableBndrId p => Outputable (HsSigType (GhcPass p)) where ppr (HsSig { sig_bndrs = outer_bndrs, sig_body = body }) = pprHsOuterSigTyVarBndrs outer_bndrs <+> ppr body instance OutputableBndrId p => Outputable (HsType (GhcPass p)) where ppr ty = pprHsType ty instance OutputableBndrId p => Outputable (LHsQTyVars (GhcPass p)) where ppr (HsQTvs { hsq_explicit = tvs }) = interppSP tvs instance (OutputableBndrFlag flag p, OutputableBndrFlag flag (NoGhcTcPass p), OutputableBndrId p) => Outputable (HsOuterTyVarBndrs flag (GhcPass p)) where ppr (HsOuterImplicit{hso_ximplicit = imp_tvs}) = text "HsOuterImplicit:" <+> case ghcPass @p of GhcPs -> ppr imp_tvs GhcRn -> ppr imp_tvs GhcTc -> ppr imp_tvs ppr (HsOuterExplicit{hso_bndrs = exp_tvs}) = text "HsOuterExplicit:" <+> ppr exp_tvs instance OutputableBndrId p => Outputable (HsForAllTelescope (GhcPass p)) where ppr (HsForAllVis { hsf_vis_bndrs = bndrs }) = text "HsForAllVis:" <+> ppr bndrs ppr (HsForAllInvis { hsf_invis_bndrs = bndrs }) = text "HsForAllInvis:" <+> ppr bndrs instance (OutputableBndrId p, OutputableBndrFlag flag p) => Outputable (HsTyVarBndr flag (GhcPass p)) where ppr = pprTyVarBndr instance Outputable thing => Outputable (HsWildCardBndrs (GhcPass p) thing) where ppr (HsWC { hswc_body = ty }) = ppr ty instance (OutputableBndrId p) => Outputable (HsPatSigType (GhcPass p)) where ppr (HsPS { hsps_body = ty }) = ppr ty instance (OutputableBndrId p) => Outputable (HsTyPat (GhcPass p)) where ppr (HsTP { hstp_body = ty }) = ppr ty instance (OutputableBndrId p) => Outputable (HsTyLit (GhcPass p)) where ppr = ppr_tylit instance Outputable HsIPName where ppr (HsIPName n) = char '?' <> ftext n -- Ordinary implicit parameters instance OutputableBndr HsIPName where pprBndr _ n = ppr n -- Simple for now pprInfixOcc n = ppr n pprPrefixOcc n = ppr n instance (Outputable tyarg, Outputable arg, Outputable rec) => Outputable (HsConDetails tyarg arg rec) where ppr (PrefixCon tyargs args) = text "PrefixCon:" <+> hsep (map (\t -> text "@" <> ppr t) tyargs) <+> ppr args ppr (RecCon rec) = text "RecCon:" <+> ppr rec ppr (InfixCon l r) = text "InfixCon:" <+> ppr [l, r] instance Outputable (XRec pass (IdP pass)) => Outputable (FieldOcc pass) where ppr = ppr . foLabel instance (OutputableBndrId pass) => OutputableBndr (FieldOcc (GhcPass pass)) where pprInfixOcc = pprInfixOcc . unXRec @(GhcPass pass) . foLabel pprPrefixOcc = pprPrefixOcc . unXRec @(GhcPass pass) . foLabel instance (OutputableBndrId pass) => OutputableBndr (GenLocated SrcSpan (FieldOcc (GhcPass pass))) where pprInfixOcc = pprInfixOcc . unLoc pprPrefixOcc = pprPrefixOcc . unLoc ppr_tylit :: (HsTyLit (GhcPass p)) -> SDoc ppr_tylit (HsNumTy source i) = pprWithSourceText source (integer i) ppr_tylit (HsStrTy source s) = pprWithSourceText source (text (show s)) ppr_tylit (HsCharTy source c) = pprWithSourceText source (text (show c)) pprAnonWildCard :: SDoc pprAnonWildCard = char '_' -- | Prints the explicit @forall@ in a type family equation if one is written. -- If there is no explicit @forall@, nothing is printed. pprHsOuterFamEqnTyVarBndrs :: OutputableBndrId p => HsOuterFamEqnTyVarBndrs (GhcPass p) -> SDoc pprHsOuterFamEqnTyVarBndrs (HsOuterImplicit{}) = empty pprHsOuterFamEqnTyVarBndrs (HsOuterExplicit{hso_bndrs = qtvs}) = forAllLit <+> interppSP qtvs <> dot -- | Prints the outermost @forall@ in a type signature if one is written. -- If there is no outermost @forall@, nothing is printed. pprHsOuterSigTyVarBndrs :: OutputableBndrId p => HsOuterSigTyVarBndrs (GhcPass p) -> SDoc pprHsOuterSigTyVarBndrs (HsOuterImplicit{}) = empty pprHsOuterSigTyVarBndrs (HsOuterExplicit{hso_bndrs = bndrs}) = pprHsForAll (mkHsForAllInvisTele noAnn bndrs) Nothing -- | Prints a forall; When passed an empty list, prints @forall .@/@forall ->@ -- only when @-dppr-debug@ is enabled. pprHsForAll :: forall p. OutputableBndrId p => HsForAllTelescope (GhcPass p) -> Maybe (LHsContext (GhcPass p)) -> SDoc pprHsForAll tele cxt = pp_tele tele <+> pprLHsContext cxt where pp_tele :: HsForAllTelescope (GhcPass p) -> SDoc pp_tele tele = case tele of HsForAllVis { hsf_vis_bndrs = qtvs } -> pp_forall (space <> arrow) qtvs HsForAllInvis { hsf_invis_bndrs = qtvs } -> pp_forall dot qtvs pp_forall :: forall flag p. (OutputableBndrId p, OutputableBndrFlag flag p) => SDoc -> [LHsTyVarBndr flag (GhcPass p)] -> SDoc pp_forall separator qtvs | null qtvs = whenPprDebug (forAllLit <> separator) -- Note: to fix the PprRecordDotSyntax1 ppr roundtrip test, the <> -- below needs to be <+>. But it means 94 other test results need to -- be updated to match. | otherwise = forAllLit <+> interppSP qtvs <> separator pprLHsContext :: (OutputableBndrId p) => Maybe (LHsContext (GhcPass p)) -> SDoc pprLHsContext Nothing = empty pprLHsContext (Just lctxt) = pprLHsContextAlways lctxt -- For use in a HsQualTy, which always gets printed if it exists. pprLHsContextAlways :: (OutputableBndrId p) => LHsContext (GhcPass p) -> SDoc pprLHsContextAlways (L _ ctxt) = case ctxt of [] -> parens empty <+> darrow [L _ ty] -> ppr_mono_ty ty <+> darrow _ -> parens (interpp'SP ctxt) <+> darrow pprConDeclFields :: forall p. OutputableBndrId p => [LConDeclField (GhcPass p)] -> SDoc pprConDeclFields fields = braces (sep (punctuate comma (map ppr_fld fields))) where ppr_fld (L _ (ConDeclField { cd_fld_names = ns, cd_fld_type = ty, cd_fld_doc = doc })) = pprMaybeWithDoc doc (ppr_names ns <+> dcolon <+> ppr ty) ppr_names :: forall p. OutputableBndrId p => [LFieldOcc (GhcPass p)] -> SDoc ppr_names [n] = pprPrefixOcc n ppr_names ns = sep (punctuate comma (map pprPrefixOcc ns)) -- Printing works more-or-less as for Types pprHsType :: (OutputableBndrId p) => HsType (GhcPass p) -> SDoc pprHsType ty = ppr_mono_ty ty ppr_mono_lty :: OutputableBndrId p => LHsType (GhcPass p) -> SDoc ppr_mono_lty ty = ppr_mono_ty (unLoc ty) ppr_mono_ty :: forall p. (OutputableBndrId p) => HsType (GhcPass p) -> SDoc ppr_mono_ty (HsForAllTy { hst_tele = tele, hst_body = ty }) = sep [pprHsForAll tele Nothing, ppr_mono_lty ty] ppr_mono_ty (HsQualTy { hst_ctxt = ctxt, hst_body = ty }) = sep [pprLHsContextAlways ctxt, ppr_mono_lty ty] ppr_mono_ty (HsBangTy _ b ty) = ppr b <> ppr_mono_lty ty ppr_mono_ty (HsRecTy _ flds) = pprConDeclFields flds ppr_mono_ty (HsTyVar _ prom (L _ name)) = pprOccWithTick Prefix prom name ppr_mono_ty (HsFunTy _ mult ty1 ty2) = ppr_fun_ty mult ty1 ty2 ppr_mono_ty (HsTupleTy _ con tys) -- Special-case unary boxed tuples so that they are pretty-printed as -- `Solo x`, not `(x)` | [ty] <- tys , BoxedTuple <- std_con = sep [text (mkTupleStr Boxed tcName 1), ppr_mono_lty ty] | otherwise = tupleParens std_con (pprWithCommas ppr tys) where std_con = case con of HsUnboxedTuple -> UnboxedTuple _ -> BoxedTuple ppr_mono_ty (HsSumTy _ tys) = tupleParens UnboxedTuple (pprWithBars ppr tys) ppr_mono_ty (HsKindSig _ ty kind) = ppr_mono_lty ty <+> dcolon <+> ppr kind ppr_mono_ty (HsListTy _ ty) = brackets (ppr_mono_lty ty) ppr_mono_ty (HsIParamTy _ n ty) = (ppr n <+> dcolon <+> ppr_mono_lty ty) ppr_mono_ty (HsSpliceTy ext s) = case ghcPass @p of GhcPs -> pprUntypedSplice True Nothing s GhcRn | HsUntypedSpliceNested n <- ext -> pprUntypedSplice True (Just n) s GhcRn | HsUntypedSpliceTop _ t <- ext -> ppr t GhcTc -> pprUntypedSplice True Nothing s ppr_mono_ty (HsExplicitListTy _ prom tys) | isPromoted prom = quote $ brackets (maybeAddSpace tys $ interpp'SP tys) | otherwise = brackets (interpp'SP tys) ppr_mono_ty (HsExplicitTupleTy _ prom tys) -- Special-case unary boxed tuples so that they are pretty-printed as -- `'MkSolo x`, not `'(x)` | [ty] <- tys = quote_tuple prom $ sep [text (mkTupleStr Boxed dataName 1), ppr_mono_lty ty] | otherwise = quote_tuple prom $ parens (maybeAddSpace tys $ interpp'SP tys) ppr_mono_ty (HsTyLit _ t) = ppr t ppr_mono_ty (HsWildCardTy {}) = char '_' ppr_mono_ty (HsStarTy _ isUni) = char (if isUni then '★' else '*') ppr_mono_ty (HsAppTy _ fun_ty arg_ty) = hsep [ppr_mono_lty fun_ty, ppr_mono_lty arg_ty] ppr_mono_ty (HsAppKindTy _ ty k) = ppr_mono_lty ty <+> char '@' <> ppr_mono_lty k ppr_mono_ty (HsOpTy _ prom ty1 (L _ op) ty2) = sep [ ppr_mono_lty ty1 , sep [pprOccWithTick Infix prom op, ppr_mono_lty ty2 ] ] ppr_mono_ty (HsParTy _ ty) = parens (ppr_mono_lty ty) -- Put the parens in where the user did -- But we still use the precedence stuff to add parens because -- toHsType doesn't put in any HsParTys, so we may still need them ppr_mono_ty (HsDocTy _ ty doc) = pprWithDoc doc $ ppr_mono_lty ty ppr_mono_ty (XHsType t) = ppr t -------------------------- ppr_fun_ty :: (OutputableBndrId p) => HsArrow (GhcPass p) -> LHsType (GhcPass p) -> LHsType (GhcPass p) -> SDoc ppr_fun_ty mult ty1 ty2 = let p1 = ppr_mono_lty ty1 p2 = ppr_mono_lty ty2 arr = pprHsArrow mult in sep [p1, arr <+> p2] quote_tuple :: PromotionFlag -> SDoc -> SDoc quote_tuple IsPromoted doc = quote doc quote_tuple NotPromoted doc = doc -------------------------- -- | @'hsTypeNeedsParens' p t@ returns 'True' if the type @t@ needs parentheses -- under precedence @p@. hsTypeNeedsParens :: PprPrec -> HsType (GhcPass p) -> Bool hsTypeNeedsParens p = go_hs_ty where go_hs_ty (HsForAllTy{}) = p >= funPrec go_hs_ty (HsQualTy{}) = p >= funPrec go_hs_ty (HsBangTy{}) = p > topPrec go_hs_ty (HsRecTy{}) = False go_hs_ty (HsTyVar{}) = False go_hs_ty (HsFunTy{}) = p >= funPrec -- Special-case unary boxed tuple applications so that they are -- parenthesized as `Identity (Solo x)`, not `Identity Solo x` (#18612) -- See Note [One-tuples] in GHC.Builtin.Types go_hs_ty (HsTupleTy _ con [_]) = case con of HsBoxedOrConstraintTuple -> p >= appPrec HsUnboxedTuple -> False go_hs_ty (HsTupleTy{}) = False go_hs_ty (HsSumTy{}) = False go_hs_ty (HsKindSig{}) = p >= sigPrec go_hs_ty (HsListTy{}) = False go_hs_ty (HsIParamTy{}) = p > topPrec go_hs_ty (HsSpliceTy{}) = False go_hs_ty (HsExplicitListTy{}) = False -- Special-case unary boxed tuple applications so that they are -- parenthesized as `Proxy ('MkSolo x)`, not `Proxy 'MkSolo x` (#18612) -- See Note [One-tuples] in GHC.Builtin.Types go_hs_ty (HsExplicitTupleTy _ _ [_]) = p >= appPrec go_hs_ty (HsExplicitTupleTy{}) = False go_hs_ty (HsTyLit{}) = False go_hs_ty (HsWildCardTy{}) = False go_hs_ty (HsStarTy{}) = p >= starPrec go_hs_ty (HsAppTy{}) = p >= appPrec go_hs_ty (HsAppKindTy{}) = p >= appPrec go_hs_ty (HsOpTy{}) = p >= opPrec go_hs_ty (HsParTy{}) = False go_hs_ty (HsDocTy _ (L _ t) _) = go_hs_ty t go_hs_ty (XHsType ty) = go_core_ty ty go_core_ty (TyVarTy{}) = False go_core_ty (AppTy{}) = p >= appPrec go_core_ty (TyConApp _ args) | null args = False | otherwise = p >= appPrec go_core_ty (ForAllTy{}) = p >= funPrec go_core_ty (FunTy{}) = p >= funPrec go_core_ty (LitTy{}) = False go_core_ty (CastTy t _) = go_core_ty t go_core_ty (CoercionTy{}) = False maybeAddSpace :: [LHsType (GhcPass p)] -> SDoc -> SDoc -- See Note [Printing promoted type constructors] -- in GHC.Iface.Type. This code implements the same -- logic for printing HsType maybeAddSpace tys doc | (ty : _) <- tys , lhsTypeHasLeadingPromotionQuote ty = space <> doc | otherwise = doc lhsTypeHasLeadingPromotionQuote :: LHsType (GhcPass p) -> Bool lhsTypeHasLeadingPromotionQuote ty = goL ty where goL (L _ ty) = go ty go (HsForAllTy{}) = False go (HsQualTy{ hst_ctxt = ctxt, hst_body = body}) | (L _ (c:_)) <- ctxt = goL c | otherwise = goL body go (HsBangTy{}) = False go (HsRecTy{}) = False go (HsTyVar _ p _) = isPromoted p go (HsFunTy _ _ arg _) = goL arg go (HsListTy{}) = False go (HsTupleTy{}) = False go (HsSumTy{}) = False go (HsOpTy _ _ t1 _ _) = goL t1 go (HsKindSig _ t _) = goL t go (HsIParamTy{}) = False go (HsSpliceTy{}) = False go (HsExplicitListTy _ p _) = isPromoted p go (HsExplicitTupleTy{}) = True go (HsTyLit{}) = False go (HsWildCardTy{}) = False go (HsStarTy{}) = False go (HsAppTy _ t _) = goL t go (HsAppKindTy _ t _) = goL t go (HsParTy{}) = False go (HsDocTy _ t _) = goL t go (XHsType{}) = False -- | @'parenthesizeHsType' p ty@ checks if @'hsTypeNeedsParens' p ty@ is -- true, and if so, surrounds @ty@ with an 'HsParTy'. Otherwise, it simply -- returns @ty@. parenthesizeHsType :: PprPrec -> LHsType (GhcPass p) -> LHsType (GhcPass p) parenthesizeHsType p lty@(L loc ty) | hsTypeNeedsParens p ty = L loc (HsParTy noAnn lty) | otherwise = lty -- | @'parenthesizeHsContext' p ctxt@ checks if @ctxt@ is a single constraint -- @c@ such that @'hsTypeNeedsParens' p c@ is true, and if so, surrounds @c@ -- with an 'HsParTy' to form a parenthesized @ctxt@. Otherwise, it simply -- returns @ctxt@ unchanged. parenthesizeHsContext :: PprPrec -> LHsContext (GhcPass p) -> LHsContext (GhcPass p) parenthesizeHsContext p lctxt@(L loc ctxt) = case ctxt of [c] -> L loc [parenthesizeHsType p c] _ -> lctxt -- Other contexts are already "parenthesized" by virtue of -- being tuples. {- ************************************************************************ * * \subsection{Anno instances} * * ************************************************************************ -} type instance Anno (BangType (GhcPass p)) = SrcSpanAnnA type instance Anno [LocatedA (HsType (GhcPass p))] = SrcSpanAnnC type instance Anno (HsType (GhcPass p)) = SrcSpanAnnA type instance Anno (HsSigType (GhcPass p)) = SrcSpanAnnA type instance Anno (HsKind (GhcPass p)) = SrcSpanAnnA type instance Anno (HsTyVarBndr _flag (GhcPass _)) = SrcSpanAnnA -- Explicit pass Anno instances needed because of the NoGhcTc field type instance Anno (HsTyVarBndr _flag GhcPs) = SrcSpanAnnA type instance Anno (HsTyVarBndr _flag GhcRn) = SrcSpanAnnA type instance Anno (HsTyVarBndr _flag GhcTc) = SrcSpanAnnA type instance Anno (HsOuterTyVarBndrs _ (GhcPass _)) = SrcSpanAnnA type instance Anno HsIPName = EpAnnCO type instance Anno (ConDeclField (GhcPass p)) = SrcSpanAnnA type instance Anno (FieldOcc (GhcPass p)) = SrcSpanAnnA ghc-lib-parser-9.12.2.20250421/compiler/GHC/Hs/Utils.hs0000644000000000000000000022541707346545000017727 0ustar0000000000000000{-# LANGUAGE ConstraintKinds #-} {-# LANGUAGE TupleSections #-} {-| Module : GHC.Hs.Utils Description : Generic helpers for the HsSyn type. Copyright : (c) The University of Glasgow, 1992-2023 Here we collect a variety of helper functions that construct or analyse HsSyn. All these functions deal with generic HsSyn; functions which deal with the instantiated versions are located elsewhere: Parameterised by Module ---------------- ------------- GhcPs/RdrName GHC.Parser.PostProcess GhcRn/Name GHC.Rename.* GhcTc/Id GHC.Tc.Zonk.Type The @mk*@ functions attempt to construct a not-completely-useless SrcSpan from their components, compared with the @nl*@ functions which just attach noSrcSpan to everything. -} {-# LANGUAGE CPP #-} {-# LANGUAGE AllowAmbiguousTypes #-} {-# LANGUAGE DataKinds #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE GADTs #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE MultiParamTypeClasses #-} {-# LANGUAGE PatternSynonyms #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE TypeApplications #-} {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE ViewPatterns #-} {-# LANGUAGE NamedFieldPuns #-} {-# OPTIONS_GHC -Wno-incomplete-record-updates #-} {-# LANGUAGE RecordWildCards #-} module GHC.Hs.Utils( -- * Terms mkHsPar, mkHsApp, mkHsAppWith, mkHsApps, mkHsAppsWith, mkHsSyntaxApps, mkHsAppType, mkHsAppTypes, mkHsCaseAlt, mkSimpleMatch, unguardedGRHSs, unguardedRHS, mkMatchGroup, mkLamCaseMatchGroup, mkMatch, mkPrefixFunRhs, mkHsLam, mkHsIf, mkHsWrap, mkLHsWrap, mkHsWrapCo, mkHsWrapCoR, mkLHsWrapCo, mkHsDictLet, mkHsLams, mkHsOpApp, mkHsDo, mkHsDoAnns, mkHsComp, mkHsCompAnns, mkHsWrapPat, mkLHsWrapPat, mkHsWrapPatCo, mkLHsPar, mkHsCmdWrap, mkLHsCmdWrap, mkHsCmdIf, mkConLikeTc, nlHsTyApp, nlHsTyApps, nlHsVar, nl_HsVar, nlHsDataCon, nlHsLit, nlHsApp, nlHsApps, nlHsSyntaxApps, nlHsIntLit, nlHsVarApps, nlHsDo, nlHsOpApp, nlHsLam, nlHsPar, nlHsIf, nlHsCase, nlList, mkLHsTupleExpr, mkLHsVarTuple, missingTupArg, mkLocatedList, nlAscribe, -- * Bindings mkFunBind, mkVarBind, mkHsVarBind, mkSimpleGeneratedFunBind, mkTopFunBind, mkPatSynBind, isInfixFunBind, spanHsLocaLBinds, -- * Literals mkHsIntegral, mkHsFractional, mkHsIsString, mkHsString, mkHsStringFS, mkHsStringPrimLit, mkHsCharPrimLit, -- * Patterns mkNPat, mkNPlusKPat, nlVarPat, nlLitPat, nlConVarPat, nlConVarPatName, nlConPat, nlConPatName, nlInfixConPat, nlNullaryConPat, nlWildConPat, nlWildPat, nlWildPatName, nlTuplePat, mkParPat, nlParPat, mkBigLHsVarTup, mkBigLHsTup, mkBigLHsVarPatTup, mkBigLHsPatTup, -- * Types mkHsAppTy, mkHsAppKindTy, hsTypeToHsSigType, hsTypeToHsSigWcType, mkClassOpSigs, mkHsSigEnv, nlHsAppTy, nlHsAppKindTy, nlHsTyVar, nlHsFunTy, nlHsParTy, nlHsTyConApp, -- * Stmts mkTransformStmt, mkTransformByStmt, mkBodyStmt, mkPsBindStmt, mkRnBindStmt, mkTcBindStmt, mkLastStmt, emptyTransStmt, mkGroupUsingStmt, mkGroupByUsingStmt, emptyRecStmt, emptyRecStmtName, emptyRecStmtId, mkRecStmt, unitRecStmtTc, mkLetStmt, -- * Collecting binders isUnliftedHsBind, isUnliftedHsBinds, isBangedHsBind, collectLocalBinders, collectHsValBinders, collectHsBindListBinders, collectHsIdBinders, collectHsBindsBinders, collectHsBindBinders, collectMethodBinders, collectPatBinders, collectPatsBinders, collectLStmtsBinders, collectStmtsBinders, collectLStmtBinders, collectStmtBinders, CollectPass(..), CollectFlag(..), TyDeclBinders(..), LConsWithFields(..), hsLTyClDeclBinders, hsTyClForeignBinders, hsPatSynSelectors, getPatSynBinds, hsForeignDeclsBinders, hsGroupBinders, hsDataFamInstBinders, -- * Collecting implicit binders ImplicitFieldBinders(..), lStmtsImplicits, hsValBindsImplicits, lPatImplicits, lHsRecFieldsImplicits ) where import GHC.Prelude hiding (head, init, last, tail) import GHC.Hs.Decls import GHC.Hs.Binds import GHC.Hs.Expr import GHC.Hs.Pat import GHC.Hs.Type import GHC.Hs.Lit import Language.Haskell.Syntax.Decls import Language.Haskell.Syntax.Extension import GHC.Hs.Extension import GHC.Parser.Annotation import GHC.Tc.Types.Evidence import GHC.Core.Coercion( isReflCo ) import GHC.Core.Multiplicity ( pattern ManyTy ) import GHC.Core.DataCon import GHC.Core.ConLike import GHC.Core.Make ( mkChunkified ) import GHC.Core.Type ( Type, isUnliftedType ) import GHC.Builtin.Types ( unitTy ) import GHC.Types.Id import GHC.Types.Name import GHC.Types.Name.Set hiding ( unitFV ) import GHC.Types.Name.Env import GHC.Types.Name.Reader import GHC.Types.Var import GHC.Types.Basic import GHC.Types.SrcLoc import GHC.Types.Fixity import GHC.Types.SourceText import GHC.Data.FastString import GHC.Utils.Misc import GHC.Utils.Outputable import GHC.Utils.Panic import Control.Arrow ( first ) import Data.Foldable ( toList ) import Data.List ( partition ) import Data.List.NonEmpty ( nonEmpty ) import qualified Data.List.NonEmpty as NE import Data.IntMap ( IntMap ) import qualified Data.IntMap.Strict as IntMap import Data.Map ( Map ) import qualified Data.Map.Strict as Map {- ************************************************************************ * * Some useful helpers for constructing syntax * * ************************************************************************ These functions attempt to construct a not-completely-useless 'SrcSpan' from their components, compared with the @nl*@ functions below which just attach 'noSrcSpan' to everything. -} -- | @e => (e)@ mkHsPar :: IsPass p => LHsExpr (GhcPass p) -> LHsExpr (GhcPass p) mkHsPar e = L (getLoc e) (gHsPar e) mkSimpleMatch :: (Anno (Match (GhcPass p) (LocatedA (body (GhcPass p)))) ~ SrcSpanAnnA, Anno (GRHS (GhcPass p) (LocatedA (body (GhcPass p)))) ~ EpAnn NoEpAnns) => HsMatchContext (LIdP (NoGhcTc (GhcPass p))) -> LocatedE [LPat (GhcPass p)] -> LocatedA (body (GhcPass p)) -> LMatch (GhcPass p) (LocatedA (body (GhcPass p))) mkSimpleMatch ctxt (L l pats) rhs = L loc $ Match { m_ext = noExtField, m_ctxt = ctxt, m_pats = L l pats , m_grhss = unguardedGRHSs (locA loc) rhs noAnn } where loc = case pats of [] -> getLoc rhs (pat:_) -> combineSrcSpansA (getLoc pat) (getLoc rhs) unguardedGRHSs :: Anno (GRHS (GhcPass p) (LocatedA (body (GhcPass p)))) ~ EpAnn NoEpAnns => SrcSpan -> LocatedA (body (GhcPass p)) -> EpAnn GrhsAnn -> GRHSs (GhcPass p) (LocatedA (body (GhcPass p))) unguardedGRHSs loc rhs an = GRHSs emptyComments (unguardedRHS an loc rhs) emptyLocalBinds unguardedRHS :: Anno (GRHS (GhcPass p) (LocatedA (body (GhcPass p)))) ~ EpAnn NoEpAnns => EpAnn GrhsAnn -> SrcSpan -> LocatedA (body (GhcPass p)) -> [LGRHS (GhcPass p) (LocatedA (body (GhcPass p)))] unguardedRHS an loc rhs = [L (noAnnSrcSpan loc) (GRHS an [] rhs)] type AnnoBody p body = ( XMG (GhcPass p) (LocatedA (body (GhcPass p))) ~ Origin , Anno [LocatedA (Match (GhcPass p) (LocatedA (body (GhcPass p))))] ~ SrcSpanAnnLW , Anno (Match (GhcPass p) (LocatedA (body (GhcPass p)))) ~ SrcSpanAnnA ) mkMatchGroup :: AnnoBody p body => Origin -> LocatedLW [LocatedA (Match (GhcPass p) (LocatedA (body (GhcPass p))))] -> MatchGroup (GhcPass p) (LocatedA (body (GhcPass p))) mkMatchGroup origin matches = MG { mg_ext = origin , mg_alts = matches } mkLamCaseMatchGroup :: AnnoBody p body => Origin -> HsLamVariant -> LocatedLW [LocatedA (Match (GhcPass p) (LocatedA (body (GhcPass p))))] -> MatchGroup (GhcPass p) (LocatedA (body (GhcPass p))) mkLamCaseMatchGroup origin lam_variant (L l matches) = mkMatchGroup origin (L l $ map fixCtxt matches) where fixCtxt (L a match) = L a match{m_ctxt = LamAlt lam_variant} mkLocatedList :: (Semigroup a, NoAnn an) => [GenLocated (EpAnn a) e2] -> LocatedAn an [GenLocated (EpAnn a) e2] mkLocatedList ms = case nonEmpty ms of Nothing -> noLocA [] Just ms1 -> L (noAnnSrcSpan $ locA $ combineLocsA (NE.head ms1) (NE.last ms1)) ms mkHsApp :: LHsExpr (GhcPass id) -> LHsExpr (GhcPass id) -> LHsExpr (GhcPass id) mkHsApp e1 e2 = addCLocA e1 e2 (HsApp noExtField e1 e2) mkHsAppWith :: (LHsExpr (GhcPass id) -> LHsExpr (GhcPass id) -> HsExpr (GhcPass id) -> LHsExpr (GhcPass id)) -> LHsExpr (GhcPass id) -> LHsExpr (GhcPass id) -> LHsExpr (GhcPass id) mkHsAppWith mkLocated e1 e2 = mkLocated e1 e2 (HsApp noExtField e1 e2) mkHsApps :: LHsExpr (GhcPass id) -> [LHsExpr (GhcPass id)] -> LHsExpr (GhcPass id) mkHsApps = mkHsAppsWith addCLocA mkHsAppsWith :: (LHsExpr (GhcPass id) -> LHsExpr (GhcPass id) -> HsExpr (GhcPass id) -> LHsExpr (GhcPass id)) -> LHsExpr (GhcPass id) -> [LHsExpr (GhcPass id)] -> LHsExpr (GhcPass id) mkHsAppsWith mkLocated = foldl' (mkHsAppWith mkLocated) mkHsAppType :: LHsExpr GhcRn -> LHsWcType GhcRn -> LHsExpr GhcRn mkHsAppType e t = addCLocA t_body e (HsAppType noExtField e paren_wct) where t_body = hswc_body t paren_wct = t { hswc_body = t_body } mkHsAppTypes :: LHsExpr GhcRn -> [LHsWcType GhcRn] -> LHsExpr GhcRn mkHsAppTypes = foldl' mkHsAppType mkHsLam :: (IsPass p, XMG (GhcPass p) (LHsExpr (GhcPass p)) ~ Origin) => LocatedE [LPat (GhcPass p)] -> LHsExpr (GhcPass p) -> LHsExpr (GhcPass p) mkHsLam (L l pats) body = mkHsPar (L (getLoc body) (HsLam noAnn LamSingle matches)) where matches = mkMatchGroup (Generated OtherExpansion SkipPmc) (noLocA [mkSimpleMatch (LamAlt LamSingle) (L l pats') body]) pats' = map (parenthesizePat appPrec) pats mkHsLams :: [TyVar] -> [EvVar] -> LHsExpr GhcTc -> LHsExpr GhcTc mkHsLams tyvars dicts expr = mkLHsWrap (mkWpTyLams tyvars <.> mkWpEvLams dicts) expr mkHsSyntaxApps :: SrcSpanAnnA -> SyntaxExprTc -> [LHsExpr GhcTc] -> LHsExpr GhcTc mkHsSyntaxApps ann (SyntaxExprTc { syn_expr = fun , syn_arg_wraps = arg_wraps , syn_res_wrap = res_wrap }) args = mkLHsWrap res_wrap (foldl' mkHsApp (L ann fun) (zipWithEqual "mkHsSyntaxApps" mkLHsWrap arg_wraps args)) mkHsSyntaxApps _ NoSyntaxExprTc args = pprPanic "mkHsSyntaxApps" (ppr args) -- this function should never be called in scenarios where there is no -- syntax expr -- |A simple case alternative with a single pattern, no binds, no guards; -- pre-typechecking mkHsCaseAlt :: (Anno (GRHS (GhcPass p) (LocatedA (body (GhcPass p)))) ~ EpAnn NoEpAnns, Anno (Match (GhcPass p) (LocatedA (body (GhcPass p)))) ~ SrcSpanAnnA) => LPat (GhcPass p) -> (LocatedA (body (GhcPass p))) -> LMatch (GhcPass p) (LocatedA (body (GhcPass p))) mkHsCaseAlt (L l pat) expr = mkSimpleMatch CaseAlt (L (l2l l) [L l pat]) expr nlHsTyApp :: Id -> [Type] -> LHsExpr GhcTc nlHsTyApp fun_id tys = noLocA (mkHsWrap (mkWpTyApps tys) (HsVar noExtField (noLocA fun_id))) nlHsTyApps :: Id -> [Type] -> [LHsExpr GhcTc] -> LHsExpr GhcTc nlHsTyApps fun_id tys xs = foldl' nlHsApp (nlHsTyApp fun_id tys) xs --------- Adding parens --------- -- | Wrap in parens if @'hsExprNeedsParens' appPrec@ says it needs them -- So @f x@ becomes @(f x)@, but @3@ stays as @3@. mkLHsPar :: IsPass id => LHsExpr (GhcPass id) -> LHsExpr (GhcPass id) mkLHsPar = parenthesizeHsExpr appPrec mkParPat :: IsPass p => LPat (GhcPass p) -> LPat (GhcPass p) mkParPat = parenthesizePat appPrec nlParPat :: IsPass p => LPat (GhcPass p) -> LPat (GhcPass p) nlParPat p = noLocA (gParPat p) ------------------------------- -- These are the bits of syntax that contain rebindable names -- See GHC.Rename.Env.lookupSyntax mkHsIntegral :: IntegralLit -> HsOverLit GhcPs mkHsFractional :: FractionalLit -> HsOverLit GhcPs mkHsIsString :: SourceText -> FastString -> HsOverLit GhcPs mkHsDo :: HsDoFlavour -> LocatedLW [ExprLStmt GhcPs] -> HsExpr GhcPs mkHsDoAnns :: HsDoFlavour -> LocatedLW [ExprLStmt GhcPs] -> AnnList EpaLocation -> HsExpr GhcPs mkHsComp :: HsDoFlavour -> [ExprLStmt GhcPs] -> LHsExpr GhcPs -> HsExpr GhcPs mkHsCompAnns :: HsDoFlavour -> [ExprLStmt GhcPs] -> LHsExpr GhcPs -> AnnList EpaLocation -> HsExpr GhcPs mkNPat :: LocatedAn NoEpAnns (HsOverLit GhcPs) -> Maybe (SyntaxExpr GhcPs) -> EpToken "-" -> Pat GhcPs mkNPlusKPat :: LocatedN RdrName -> LocatedAn NoEpAnns (HsOverLit GhcPs) -> EpToken "+" -> Pat GhcPs -- NB: The following functions all use noSyntaxExpr: the generated expressions -- will not work with rebindable syntax if used after the renamer mkLastStmt :: IsPass idR => LocatedA (bodyR (GhcPass idR)) -> StmtLR (GhcPass idL) (GhcPass idR) (LocatedA (bodyR (GhcPass idR))) mkBodyStmt :: LocatedA (bodyR GhcPs) -> StmtLR (GhcPass idL) GhcPs (LocatedA (bodyR GhcPs)) mkPsBindStmt :: EpUniToken "<-" "←" -> LPat GhcPs -> LocatedA (bodyR GhcPs) -> StmtLR GhcPs GhcPs (LocatedA (bodyR GhcPs)) mkRnBindStmt :: LPat GhcRn -> LocatedA (bodyR GhcRn) -> StmtLR GhcRn GhcRn (LocatedA (bodyR GhcRn)) mkTcBindStmt :: LPat GhcTc -> LocatedA (bodyR GhcTc) -> StmtLR GhcTc GhcTc (LocatedA (bodyR GhcTc)) emptyRecStmt :: (Anno [GenLocated (Anno (StmtLR (GhcPass idL) GhcPs bodyR)) (StmtLR (GhcPass idL) GhcPs bodyR)] ~ SrcSpanAnnLW) => StmtLR (GhcPass idL) GhcPs bodyR emptyRecStmtName :: (Anno [GenLocated (Anno (StmtLR GhcRn GhcRn bodyR)) (StmtLR GhcRn GhcRn bodyR)] ~ SrcSpanAnnLW) => StmtLR GhcRn GhcRn bodyR emptyRecStmtId :: Stmt GhcTc (LocatedA (HsCmd GhcTc)) mkRecStmt :: forall (idL :: Pass) bodyR. (Anno [GenLocated (Anno (StmtLR (GhcPass idL) GhcPs bodyR)) (StmtLR (GhcPass idL) GhcPs bodyR)] ~ SrcSpanAnnLW) => AnnList (EpToken "rec") -> LocatedLW [LStmtLR (GhcPass idL) GhcPs bodyR] -> StmtLR (GhcPass idL) GhcPs bodyR mkRecStmt anns stmts = (emptyRecStmt' anns :: StmtLR (GhcPass idL) GhcPs bodyR) { recS_stmts = stmts } mkHsIntegral i = OverLit noExtField (HsIntegral i) mkHsFractional f = OverLit noExtField (HsFractional f) mkHsIsString src s = OverLit noExtField (HsIsString src s) mkHsDo ctxt stmts = HsDo noAnn ctxt stmts mkHsDoAnns ctxt stmts anns = HsDo anns ctxt stmts mkHsComp ctxt stmts expr = mkHsCompAnns ctxt stmts expr noAnn mkHsCompAnns ctxt stmts expr@(L l e) anns = mkHsDoAnns ctxt (L loc (stmts ++ [last_stmt])) anns where -- Move the annotations to the top of the last_stmt last = mkLastStmt (L (noAnnSrcSpan $ getLocA expr) e) last_stmt = L l last -- last_stmt actually comes first in a list comprehension, consider all spans loc = noAnnSrcSpan $ getHasLocList (last_stmt:stmts) -- restricted to GhcPs because other phases might need a SyntaxExpr mkHsIf :: LHsExpr GhcPs -> LHsExpr GhcPs -> LHsExpr GhcPs -> AnnsIf -> HsExpr GhcPs mkHsIf c a b anns = HsIf anns c a b -- restricted to GhcPs because other phases might need a SyntaxExpr mkHsCmdIf :: LHsExpr GhcPs -> LHsCmd GhcPs -> LHsCmd GhcPs -> AnnsIf -> HsCmd GhcPs mkHsCmdIf c a b anns = HsCmdIf anns noSyntaxExpr c a b mkNPat lit neg anns = NPat anns lit neg noSyntaxExpr mkNPlusKPat id lit anns = NPlusKPat anns id lit (unLoc lit) noSyntaxExpr noSyntaxExpr mkTransformStmt :: AnnTransStmt -> [ExprLStmt GhcPs] -> LHsExpr GhcPs -> StmtLR GhcPs GhcPs (LHsExpr GhcPs) mkTransformByStmt :: AnnTransStmt -> [ExprLStmt GhcPs] -> LHsExpr GhcPs -> LHsExpr GhcPs -> StmtLR GhcPs GhcPs (LHsExpr GhcPs) mkGroupUsingStmt :: AnnTransStmt -> [ExprLStmt GhcPs] -> LHsExpr GhcPs -> StmtLR GhcPs GhcPs (LHsExpr GhcPs) mkGroupByUsingStmt :: AnnTransStmt -> [ExprLStmt GhcPs] -> LHsExpr GhcPs -> LHsExpr GhcPs -> StmtLR GhcPs GhcPs (LHsExpr GhcPs) emptyTransStmt :: AnnTransStmt -> StmtLR GhcPs GhcPs (LHsExpr GhcPs) emptyTransStmt anns = TransStmt { trS_ext = anns , trS_form = panic "emptyTransStmt: form" , trS_stmts = [], trS_bndrs = [] , trS_by = Nothing, trS_using = noLocA noExpr , trS_ret = noSyntaxExpr, trS_bind = noSyntaxExpr , trS_fmap = noExpr } mkTransformStmt a ss u = (emptyTransStmt a) { trS_form = ThenForm, trS_stmts = ss, trS_using = u } mkTransformByStmt a ss u b = (emptyTransStmt a) { trS_form = ThenForm, trS_stmts = ss, trS_using = u, trS_by = Just b } mkGroupUsingStmt a ss u = (emptyTransStmt a) { trS_form = GroupForm, trS_stmts = ss, trS_using = u } mkGroupByUsingStmt a ss b u = (emptyTransStmt a) { trS_form = GroupForm, trS_stmts = ss, trS_using = u, trS_by = Just b } mkLastStmt body = LastStmt noExtField body Nothing noSyntaxExpr mkBodyStmt body = BodyStmt noExtField body noSyntaxExpr noSyntaxExpr mkPsBindStmt ann pat body = BindStmt ann pat body mkRnBindStmt pat body = BindStmt (XBindStmtRn { xbsrn_bindOp = noSyntaxExpr, xbsrn_failOp = Nothing }) pat body mkTcBindStmt pat body = BindStmt (XBindStmtTc { xbstc_bindOp = noSyntaxExpr, xbstc_boundResultType = unitTy, -- unitTy is a dummy value -- can't panic here: it's forced during zonking xbstc_boundResultMult = ManyTy, xbstc_failOp = Nothing }) pat body emptyRecStmt' :: forall idL idR body . (WrapXRec (GhcPass idR) [LStmtLR (GhcPass idL) (GhcPass idR) body], IsPass idR) => XRecStmt (GhcPass idL) (GhcPass idR) body -> StmtLR (GhcPass idL) (GhcPass idR) body emptyRecStmt' tyVal = RecStmt { recS_stmts = wrapXRec @(GhcPass idR) [] , recS_later_ids = [] , recS_rec_ids = [] , recS_ret_fn = noSyntaxExpr , recS_mfix_fn = noSyntaxExpr , recS_bind_fn = noSyntaxExpr , recS_ext = tyVal } unitRecStmtTc :: RecStmtTc unitRecStmtTc = RecStmtTc { recS_bind_ty = unitTy , recS_later_rets = [] , recS_rec_rets = [] , recS_ret_ty = unitTy } emptyRecStmt = emptyRecStmt' noAnn emptyRecStmtName = emptyRecStmt' noExtField emptyRecStmtId = emptyRecStmt' unitRecStmtTc -- a panic might trigger during zonking mkLetStmt :: EpToken "let" -> HsLocalBinds GhcPs -> StmtLR GhcPs GhcPs (LocatedA b) mkLetStmt anns binds = LetStmt anns binds ------------------------------- -- | A useful function for building @OpApps@. The operator is always a -- variable, and we don't know the fixity yet. mkHsOpApp :: LHsExpr GhcPs -> IdP GhcPs -> LHsExpr GhcPs -> HsExpr GhcPs mkHsOpApp e1 op e2 = OpApp noExtField e1 (noLocA (HsVar noExtField (noLocA op))) e2 mkHsString :: String -> HsLit (GhcPass p) mkHsString s = HsString NoSourceText (mkFastString s) mkHsStringFS :: FastString -> HsLit (GhcPass p) mkHsStringFS s = HsString NoSourceText s mkHsStringPrimLit :: FastString -> HsLit (GhcPass p) mkHsStringPrimLit fs = HsStringPrim NoSourceText (bytesFS fs) mkHsCharPrimLit :: Char -> HsLit (GhcPass p) mkHsCharPrimLit c = HsChar NoSourceText c mkConLikeTc :: ConLike -> HsExpr GhcTc mkConLikeTc con = XExpr (ConLikeTc con [] []) {- ************************************************************************ * * Constructing syntax with no location info * * ************************************************************************ -} nlHsVar :: IsSrcSpanAnn p a => IdP (GhcPass p) -> LHsExpr (GhcPass p) nlHsVar n = noLocA (HsVar noExtField (noLocA n)) nl_HsVar :: IsSrcSpanAnn p a => IdP (GhcPass p) -> HsExpr (GhcPass p) nl_HsVar n = HsVar noExtField (noLocA n) -- | NB: Only for 'LHsExpr' 'Id'. nlHsDataCon :: DataCon -> LHsExpr GhcTc nlHsDataCon con = noLocA (mkConLikeTc (RealDataCon con)) nlHsLit :: HsLit (GhcPass p) -> LHsExpr (GhcPass p) nlHsLit n = noLocA (HsLit noExtField n) nlHsIntLit :: Integer -> LHsExpr (GhcPass p) nlHsIntLit n = noLocA (HsLit noExtField (HsInt noExtField (mkIntegralLit n))) nlVarPat :: IsSrcSpanAnn p a => IdP (GhcPass p) -> LPat (GhcPass p) nlVarPat n = noLocA (VarPat noExtField (noLocA n)) nlLitPat :: HsLit GhcPs -> LPat GhcPs nlLitPat l = noLocA (LitPat noExtField l) nlHsApp :: IsPass id => LHsExpr (GhcPass id) -> LHsExpr (GhcPass id) -> LHsExpr (GhcPass id) nlHsApp f x = noLocA (HsApp noExtField f (mkLHsPar x)) nlHsSyntaxApps :: SyntaxExprTc -> [LHsExpr GhcTc] -> LHsExpr GhcTc nlHsSyntaxApps = mkHsSyntaxApps noSrcSpanA nlHsApps :: IsSrcSpanAnn p a => IdP (GhcPass p) -> [LHsExpr (GhcPass p)] -> LHsExpr (GhcPass p) nlHsApps f xs = foldl' nlHsApp (nlHsVar f) xs nlHsVarApps :: IsSrcSpanAnn p a => IdP (GhcPass p) -> [IdP (GhcPass p)] -> LHsExpr (GhcPass p) nlHsVarApps f xs = noLocA (foldl' mk (HsVar noExtField (noLocA f)) (map ((HsVar noExtField) . noLocA) xs)) where mk f a = HsApp noExtField (noLocA f) (noLocA a) nlConVarPat :: RdrName -> [RdrName] -> LPat GhcPs nlConVarPat con vars = nlConPat con (map nlVarPat vars) nlConVarPatName :: Name -> [Name] -> LPat GhcRn nlConVarPatName con vars = nlConPatName con (map nlVarPat vars) nlInfixConPat :: RdrName -> LPat GhcPs -> LPat GhcPs -> LPat GhcPs nlInfixConPat con l r = noLocA $ ConPat { pat_con = noLocA con , pat_args = InfixCon (parenthesizePat opPrec l) (parenthesizePat opPrec r) , pat_con_ext = noAnn } nlConPat :: RdrName -> [LPat GhcPs] -> LPat GhcPs nlConPat con pats = noLocA $ ConPat { pat_con_ext = noAnn , pat_con = noLocA con , pat_args = PrefixCon [] (map (parenthesizePat appPrec) pats) } nlConPatName :: Name -> [LPat GhcRn] -> LPat GhcRn nlConPatName con pats = noLocA $ ConPat { pat_con_ext = noExtField , pat_con = noLocA con , pat_args = PrefixCon [] (map (parenthesizePat appPrec) pats) } nlNullaryConPat :: RdrName -> LPat GhcPs nlNullaryConPat con = noLocA $ ConPat { pat_con_ext = noAnn , pat_con = noLocA con , pat_args = PrefixCon [] [] } nlWildConPat :: DataCon -> LPat GhcPs nlWildConPat con = noLocA $ ConPat { pat_con_ext = noAnn , pat_con = noLocA $ getRdrName con , pat_args = PrefixCon [] $ replicate (dataConSourceArity con) nlWildPat } -- | Wildcard pattern - after parsing nlWildPat :: LPat GhcPs nlWildPat = noLocA (WildPat noExtField ) -- | Wildcard pattern - after renaming nlWildPatName :: LPat GhcRn nlWildPatName = noLocA (WildPat noExtField ) nlHsDo :: HsDoFlavour -> [LStmt GhcPs (LHsExpr GhcPs)] -> LHsExpr GhcPs nlHsDo ctxt stmts = noLocA (mkHsDo ctxt (noLocA stmts)) nlHsOpApp :: LHsExpr GhcPs -> IdP GhcPs -> LHsExpr GhcPs -> LHsExpr GhcPs nlHsOpApp e1 op e2 = noLocA (mkHsOpApp e1 op e2) nlHsLam :: LMatch GhcPs (LHsExpr GhcPs) -> LHsExpr GhcPs nlHsPar :: IsPass p => LHsExpr (GhcPass p) -> LHsExpr (GhcPass p) nlHsCase :: LHsExpr GhcPs -> [LMatch GhcPs (LHsExpr GhcPs)] -> LHsExpr GhcPs nlList :: [LHsExpr GhcPs] -> LHsExpr GhcPs -- AZ:Is this used? nlHsLam match = noLocA $ HsLam noAnn LamSingle $ mkMatchGroup (Generated OtherExpansion SkipPmc) (noLocA [match]) nlHsPar e = noLocA (gHsPar e) -- nlHsIf should generate if-expressions which are NOT subject to -- RebindableSyntax, so the first field of HsIf is False. (#12080) nlHsIf :: LHsExpr GhcPs -> LHsExpr GhcPs -> LHsExpr GhcPs -> LHsExpr GhcPs nlHsIf cond true false = noLocA (HsIf noAnn cond true false) nlHsCase expr matches = noLocA (HsCase noAnn expr (mkMatchGroup (Generated OtherExpansion SkipPmc) (noLocA matches))) nlList exprs = noLocA (ExplicitList noAnn exprs) nlHsAppTy :: LHsType (GhcPass p) -> LHsType (GhcPass p) -> LHsType (GhcPass p) nlHsTyVar :: IsSrcSpanAnn p a => PromotionFlag -> IdP (GhcPass p) -> LHsType (GhcPass p) nlHsFunTy :: forall p. IsPass p => LHsType (GhcPass p) -> LHsType (GhcPass p) -> LHsType (GhcPass p) nlHsParTy :: LHsType (GhcPass p) -> LHsType (GhcPass p) nlHsAppTy f t = noLocA (HsAppTy noExtField f t) nlHsTyVar p x = noLocA (HsTyVar noAnn p (noLocA x)) nlHsFunTy a b = noLocA (HsFunTy noExtField (HsUnrestrictedArrow x) a b) where x = case ghcPass @p of GhcPs -> noAnn GhcRn -> noExtField GhcTc -> noExtField nlHsParTy t = noLocA (HsParTy noAnn t) nlHsTyConApp :: forall p a. IsSrcSpanAnn p a => PromotionFlag -> LexicalFixity -> IdP (GhcPass p) -> [LHsTypeArg (GhcPass p)] -> LHsType (GhcPass p) nlHsTyConApp prom fixity tycon tys | Infix <- fixity , HsValArg _ ty1 : HsValArg _ ty2 : rest <- tys = foldl' mk_app (noLocA $ HsOpTy noExtField prom ty1 (noLocA tycon) ty2) rest | otherwise = foldl' mk_app (nlHsTyVar prom tycon) tys where mk_app :: LHsType (GhcPass p) -> LHsTypeArg (GhcPass p) -> LHsType (GhcPass p) mk_app fun@(L _ (HsOpTy {})) arg = mk_app (nlHsParTy fun) arg -- parenthesize things like `(A + B) C` mk_app fun (HsValArg _ ty) = nlHsAppTy fun ty mk_app fun (HsTypeArg _ ki) = nlHsAppKindTy fun ki mk_app fun (HsArgPar _) = nlHsParTy fun nlHsAppKindTy :: forall p. IsPass p => LHsType (GhcPass p) -> LHsKind (GhcPass p) -> LHsType (GhcPass p) nlHsAppKindTy f k = noLocA (HsAppKindTy x f k) where x = case ghcPass @p of GhcPs -> noAnn GhcRn -> noExtField GhcTc -> noExtField {- Tuples. All these functions are *pre-typechecker* because they lack types on the tuple. -} mkLHsTupleExpr :: [LHsExpr (GhcPass p)] -> XExplicitTuple (GhcPass p) -> LHsExpr (GhcPass p) -- Makes a pre-typechecker boxed tuple, deals with 1 case mkLHsTupleExpr [e] _ = e mkLHsTupleExpr es ext = noLocA $ ExplicitTuple ext (map (Present noExtField) es) Boxed mkLHsVarTuple :: IsSrcSpanAnn p a => [IdP (GhcPass p)] -> XExplicitTuple (GhcPass p) -> LHsExpr (GhcPass p) mkLHsVarTuple ids ext = mkLHsTupleExpr (map nlHsVar ids) ext nlTuplePat :: [LPat GhcPs] -> Boxity -> LPat GhcPs nlTuplePat pats box = noLocA (TuplePat noAnn pats box) missingTupArg :: EpAnn Bool -> HsTupArg GhcPs missingTupArg ann = Missing ann mkLHsPatTup :: [LPat GhcRn] -> LPat GhcRn mkLHsPatTup [] = noLocA $ TuplePat noExtField [] Boxed mkLHsPatTup [lpat] = lpat mkLHsPatTup lpats@(lpat:_) = L (getLoc lpat) $ TuplePat noExtField lpats Boxed -- | The Big equivalents for the source tuple expressions mkBigLHsVarTup :: IsSrcSpanAnn p a => [IdP (GhcPass p)] -> XExplicitTuple (GhcPass p) -> LHsExpr (GhcPass p) mkBigLHsVarTup ids anns = mkBigLHsTup (map nlHsVar ids) anns mkBigLHsTup :: [LHsExpr (GhcPass id)] -> XExplicitTuple (GhcPass id) -> LHsExpr (GhcPass id) mkBigLHsTup es anns = mkChunkified (\e -> mkLHsTupleExpr e anns) es -- | The Big equivalents for the source tuple patterns mkBigLHsVarPatTup :: [IdP GhcRn] -> LPat GhcRn mkBigLHsVarPatTup bs = mkBigLHsPatTup (map nlVarPat bs) mkBigLHsPatTup :: [LPat GhcRn] -> LPat GhcRn mkBigLHsPatTup = mkChunkified mkLHsPatTup {- ************************************************************************ * * LHsSigType and LHsSigWcType * * ********************************************************************* -} -- | Convert an 'LHsType' to an 'LHsSigType'. hsTypeToHsSigType :: LHsType GhcPs -> LHsSigType GhcPs hsTypeToHsSigType lty@(L loc ty) = case ty of HsForAllTy { hst_tele = HsForAllInvis { hsf_xinvis = an , hsf_invis_bndrs = bndrs } , hst_body = body } -> L loc $ mkHsExplicitSigType an bndrs body _ -> L (l2l loc) $ mkHsImplicitSigType lty -- The annotations are in lty, erase them from loc -- | Convert an 'LHsType' to an 'LHsSigWcType'. hsTypeToHsSigWcType :: LHsType GhcPs -> LHsSigWcType GhcPs hsTypeToHsSigWcType = mkHsWildCardBndrs . hsTypeToHsSigType mkHsSigEnv :: forall a. (LSig GhcRn -> Maybe ([LocatedN Name], a)) -> [LSig GhcRn] -> NameEnv a mkHsSigEnv get_info sigs = mkNameEnv (mk_pairs ordinary_sigs) `extendNameEnvList` (mk_pairs gen_dm_sigs) -- The subtlety is this: in a class decl with a -- default-method signature as well as a method signature -- we want the latter to win (#12533) -- class C x where -- op :: forall a . x a -> x a -- default op :: forall b . x b -> x b -- op x = ...(e :: b -> b)... -- The scoped type variables of the 'default op', namely 'b', -- scope over the code for op. The 'forall a' does not! -- This applies both in the renamer and typechecker, both -- of which use this function where (gen_dm_sigs, ordinary_sigs) = partition is_gen_dm_sig sigs is_gen_dm_sig (L _ (ClassOpSig _ True _ _)) = True is_gen_dm_sig _ = False mk_pairs :: [LSig GhcRn] -> [(Name, a)] mk_pairs sigs = [ (n,a) | Just (ns,a) <- map get_info sigs , L _ n <- ns ] mkClassOpSigs :: [LSig GhcPs] -> [LSig GhcPs] -- ^ Convert 'TypeSig' to 'ClassOpSig'. -- The former is what is parsed, but the latter is -- what we need in class/instance declarations mkClassOpSigs sigs = map fiddle sigs where fiddle (L loc (TypeSig anns nms ty)) = L loc (ClassOpSig anns False nms (dropWildCards ty)) fiddle sig = sig -- | Type ascription: (e :: ty) nlAscribe :: RdrName -> LHsExpr GhcPs -> LHsExpr GhcPs nlAscribe ty e = noLocA $ ExprWithTySig noAnn e $ mkHsWildCardBndrs $ noLocA $ mkHsImplicitSigType $ nlHsTyVar NotPromoted ty {- ********************************************************************* * * --------- HsWrappers: type args, dict args, casts --------- * * ********************************************************************* -} mkLHsWrap :: HsWrapper -> LHsExpr GhcTc -> LHsExpr GhcTc mkLHsWrap co_fn (L loc e) = L loc (mkHsWrap co_fn e) mkHsWrap :: HsWrapper -> HsExpr GhcTc -> HsExpr GhcTc mkHsWrap co_fn e | isIdHsWrapper co_fn = e mkHsWrap co_fn e = XExpr (WrapExpr co_fn e) mkHsWrapCo :: TcCoercionN -- A Nominal coercion a ~N b -> HsExpr GhcTc -> HsExpr GhcTc mkHsWrapCo co e = mkHsWrap (mkWpCastN co) e mkHsWrapCoR :: TcCoercionR -- A Representational coercion a ~R b -> HsExpr GhcTc -> HsExpr GhcTc mkHsWrapCoR co e = mkHsWrap (mkWpCastR co) e mkLHsWrapCo :: TcCoercionN -> LHsExpr GhcTc -> LHsExpr GhcTc mkLHsWrapCo co (L loc e) = L loc (mkHsWrapCo co e) mkHsCmdWrap :: HsWrapper -> HsCmd GhcTc -> HsCmd GhcTc mkHsCmdWrap w cmd | isIdHsWrapper w = cmd | otherwise = XCmd (HsWrap w cmd) mkLHsCmdWrap :: HsWrapper -> LHsCmd GhcTc -> LHsCmd GhcTc mkLHsCmdWrap w (L loc c) = L loc (mkHsCmdWrap w c) mkHsWrapPat :: HsWrapper -> Pat GhcTc -> Type -> Pat GhcTc mkHsWrapPat co_fn p ty | isIdHsWrapper co_fn = p | otherwise = XPat $ CoPat co_fn p ty mkLHsWrapPat :: HsWrapper -> LPat GhcTc -> Type -> LPat GhcTc mkLHsWrapPat co_fn (L loc p) ty = L loc (mkHsWrapPat co_fn p ty) mkHsWrapPatCo :: TcCoercionN -> Pat GhcTc -> Type -> Pat GhcTc mkHsWrapPatCo co pat ty | isReflCo co = pat | otherwise = XPat $ CoPat (mkWpCastN co) pat ty mkHsDictLet :: TcEvBinds -> LHsExpr GhcTc -> LHsExpr GhcTc mkHsDictLet ev_binds expr = mkLHsWrap (mkWpLet ev_binds) expr {- l ************************************************************************ * * Bindings; with a location at the top * * ************************************************************************ -} mkFunBind :: Origin -> LocatedN RdrName -> [LMatch GhcPs (LHsExpr GhcPs)] -> HsBind GhcPs -- ^ Not infix, with place holders for coercion and free vars mkFunBind origin fn ms = FunBind { fun_id = fn , fun_matches = mkMatchGroup origin (noLocA ms) , fun_ext = noExtField } mkTopFunBind :: Origin -> LocatedN Name -> [LMatch GhcRn (LHsExpr GhcRn)] -> HsBind GhcRn -- ^ In Name-land, with empty bind_fvs mkTopFunBind origin fn ms = FunBind { fun_id = fn , fun_matches = mkMatchGroup origin (noLocA ms) , fun_ext = emptyNameSet -- NB: closed -- binding } mkHsVarBind :: SrcSpan -> RdrName -> LHsExpr GhcPs -> LHsBind GhcPs mkHsVarBind loc var rhs = mkSimpleGeneratedFunBind loc var (noLocA []) rhs mkVarBind :: IdP GhcTc -> LHsExpr GhcTc -> LHsBind GhcTc mkVarBind var rhs = L (getLoc rhs) $ VarBind { var_ext = noExtField, var_id = var, var_rhs = rhs } mkPatSynBind :: LocatedN RdrName -> HsPatSynDetails GhcPs -> LPat GhcPs -> HsPatSynDir GhcPs -> AnnPSB -> HsBind GhcPs mkPatSynBind name details lpat dir anns = PatSynBind noExtField psb where psb = PSB{ psb_ext = anns , psb_id = name , psb_args = details , psb_def = lpat , psb_dir = dir } -- |If any of the matches in the 'FunBind' are infix, the 'FunBind' is -- considered infix. isInfixFunBind :: forall id1 id2. UnXRec id2 => HsBindLR id1 id2 -> Bool isInfixFunBind (FunBind { fun_matches = MG _ matches }) = any (isInfixMatch . unXRec @id2) (unXRec @id2 matches) isInfixFunBind _ = False -- |Return the 'SrcSpan' encompassing the contents of any enclosed binds spanHsLocaLBinds :: HsLocalBinds (GhcPass p) -> SrcSpan spanHsLocaLBinds (EmptyLocalBinds _) = noSrcSpan spanHsLocaLBinds (HsValBinds _ (ValBinds _ bs sigs)) = foldr combineSrcSpans noSrcSpan (bsSpans ++ sigsSpans) where bsSpans :: [SrcSpan] bsSpans = map getLocA bs sigsSpans :: [SrcSpan] sigsSpans = map getLocA sigs spanHsLocaLBinds (HsValBinds _ (XValBindsLR (NValBinds bs sigs))) = foldr combineSrcSpans noSrcSpan (bsSpans ++ sigsSpans) where bsSpans :: [SrcSpan] bsSpans = map getLocA $ concatMap snd bs sigsSpans :: [SrcSpan] sigsSpans = map getLocA sigs spanHsLocaLBinds (HsIPBinds _ (IPBinds _ bs)) = foldr combineSrcSpans noSrcSpan (map getLocA bs) ------------ -- | Convenience function using 'mkFunBind'. -- This is for generated bindings only, do not use for user-written code. mkSimpleGeneratedFunBind :: SrcSpan -> RdrName -> LocatedE [LPat GhcPs] -> LHsExpr GhcPs -> LHsBind GhcPs mkSimpleGeneratedFunBind loc fun pats expr = L (noAnnSrcSpan loc) $ mkFunBind (Generated OtherExpansion SkipPmc) (L (noAnnSrcSpan loc) fun) [mkMatch ctxt pats expr emptyLocalBinds] where ctxt :: HsMatchContextPs ctxt = mkPrefixFunRhs (L (noAnnSrcSpan loc) fun) noAnn -- | Make a prefix, non-strict function 'HsMatchContext' mkPrefixFunRhs :: fn -> AnnFunRhs -> HsMatchContext fn mkPrefixFunRhs n an = FunRhs { mc_fun = n , mc_fixity = Prefix , mc_strictness = NoSrcStrict , mc_an = an } ------------ mkMatch :: forall p. IsPass p => HsMatchContext (LIdP (NoGhcTc (GhcPass p))) -> LocatedE [LPat (GhcPass p)] -> LHsExpr (GhcPass p) -> HsLocalBinds (GhcPass p) -> LMatch (GhcPass p) (LHsExpr (GhcPass p)) mkMatch ctxt pats expr binds = noLocA (Match { m_ext = noExtField , m_ctxt = ctxt , m_pats = pats , m_grhss = GRHSs emptyComments (unguardedRHS noAnn noSrcSpan expr) binds }) {- ************************************************************************ * * Collecting binders * * ************************************************************************ Get all the binders in some HsBindGroups, IN THE ORDER OF APPEARANCE. eg. ... where (x, y) = ... f i j = ... [a, b] = ... it should return [x, y, f, a, b] (remember, order important). Note [Collect binders only after renaming] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ These functions should only be used on HsSyn *after* the renamer, to return a [Name] or [Id]. Before renaming the record punning and wild-card mechanism makes it hard to know what is bound. So these functions should not be applied to (HsSyn RdrName) Note [isUnliftedHsBind] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The function isUnliftedHsBind tells if the binding binds a variable of unlifted type. e.g. - I# x = blah - Just (I# x) = blah isUnliftedHsBind is used in two ways: * To complain if we make a top-level binding for a variable of unlifted type. E.g. any of the above bindings are illegal at top level * To generate a case expression for a non-recursive local let. E.g. let Just (I# x) = blah in body ==> case blah of Just (I# x) -> body See GHC.HsToCore.Expr.dsUnliftedBind. Wrinkles: (W1) For AbsBinds we must check if the local letrec generated by desugaring AbsBinds would be unlifted; so we just recurse into the abs_binds. E.g. f :: Num a => (# a, a #) g :: Num a => a -> a f = ...g... g = ...g... The top-level bindings for f,g are not unlifted (because of the Num a =>), but the local, recursive, monomorphic bindings are: t = /\a \(d:Num a). letrec fm :: (# a, a #) = ...g... gm :: a -> a = ...f... in (fm, gm) Here the binding for 'fm' is illegal. So we recurse into the abs_binds (W2) BUT we have a special case when abs_sig is true; see Note [The abs_sig field of AbsBinds] in GHC.Hs.Binds (W3) isUnliftedHsBind returns False even if the binding itself is unlifted, provided it binds only lifted variables. E.g. - (# a,b #) = (# reverse xs, xs #) - x = sqrt# y# :: Float# - type Unl :: UnliftedType data Unl = MkUnl Int MkUnl z = blah In each case the RHS of the "=" has unlifted type, but isUnliftedHsBind returns False. Reason: see GHC Proposal #35 https://github.com/ghc-proposals/ghc-proposals/blob/master/ proposals/0035-unbanged-strict-patterns.rst (W4) In particular, (W3) applies to a pattern that binds no variables at all. So { _ = sqrt# y :: Float# } returns False from isUnliftedHsBind, but { x = sqrt# y :: Float# } returns True. This is arguably a bit confusing (see #22719) -} ----------------- Bindings -------------------------- -- | Should we treat this as an unlifted bind? This will be true for any -- bind that binds an unlifted variable, but we must be careful around -- AbsBinds. See Note [isUnliftedHsBind]. For usage -- information, see Note [Strict binds checks] is GHC.HsToCore.Binds. isUnliftedHsBind :: HsBind GhcTc -> Bool -- works only over typechecked binds isUnliftedHsBind (XHsBindsLR (AbsBinds { abs_exports = exports , abs_sig = has_sig , abs_binds = binds })) | has_sig = any (is_unlifted_id . abe_poly) exports | otherwise = isUnliftedHsBinds binds -- See wrinkle (W1) and (W2) in Note [isUnliftedHsBind] -- If has_sig is True we will never generate a binding for abe_mono, -- so we don't need to worry about it being unlifted. The abe_poly -- binding might not be: e.g. forall a. Num a => (# a, a #) -- If has_sig is False, just recurse isUnliftedHsBind (FunBind { fun_id = L _ fun }) = is_unlifted_id fun isUnliftedHsBind (VarBind { var_id = var }) = is_unlifted_id var isUnliftedHsBind (PatBind { pat_lhs = pat }) = any is_unlifted_id (collectPatBinders CollNoDictBinders pat) -- If we changed our view on (W3) you could add -- || isUnliftedType pat_ty -- to this check isUnliftedHsBind (PatSynBind {}) = panic "isUnliftedBind: PatSynBind" isUnliftedHsBinds :: LHsBinds GhcTc -> Bool isUnliftedHsBinds = any (isUnliftedHsBind . unLoc) is_unlifted_id :: Id -> Bool is_unlifted_id id = isUnliftedType (idType id) -- Bindings always have a fixed RuntimeRep, so it's OK -- to call isUnliftedType here -- | Is a binding a strict variable or pattern bind (e.g. @!x = ...@)? isBangedHsBind :: HsBind GhcTc -> Bool isBangedHsBind (XHsBindsLR (AbsBinds { abs_binds = binds })) = any (isBangedHsBind . unLoc) binds isBangedHsBind (FunBind {fun_matches = matches}) | [L _ match] <- unLoc $ mg_alts matches , FunRhs{mc_strictness = SrcStrict} <- m_ctxt match = True isBangedHsBind (PatBind {pat_lhs = pat}) = isBangedLPat pat isBangedHsBind _ = False collectLocalBinders :: CollectPass (GhcPass idL) => CollectFlag (GhcPass idL) -> HsLocalBindsLR (GhcPass idL) (GhcPass idR) -> [IdP (GhcPass idL)] collectLocalBinders flag = \case HsValBinds _ binds -> collectHsIdBinders flag binds -- No pattern synonyms here HsIPBinds {} -> [] EmptyLocalBinds _ -> [] collectHsIdBinders :: CollectPass (GhcPass idL) => CollectFlag (GhcPass idL) -> HsValBindsLR (GhcPass idL) (GhcPass idR) -> [IdP (GhcPass idL)] -- ^ Collect 'Id' binders only, or 'Id's + pattern synonyms, respectively collectHsIdBinders flag = collect_hs_val_binders True flag collectHsValBinders :: CollectPass (GhcPass idL) => CollectFlag (GhcPass idL) -> HsValBindsLR (GhcPass idL) idR -> [IdP (GhcPass idL)] collectHsValBinders flag = collect_hs_val_binders False flag collectHsBindBinders :: CollectPass p => CollectFlag p -> HsBindLR p idR -> [IdP p] -- ^ Collect both 'Id's and pattern-synonym binders collectHsBindBinders flag b = collect_bind False flag b [] collectHsBindsBinders :: CollectPass p => CollectFlag p -> LHsBindsLR p idR -> [IdP p] collectHsBindsBinders flag binds = collect_binds False flag binds [] collectHsBindListBinders :: forall p idR. CollectPass p => CollectFlag p -> [LHsBindLR p idR] -> [IdP p] -- ^ Same as 'collectHsBindsBinders', but works over a list of bindings collectHsBindListBinders flag = foldr (collect_bind False flag . unXRec @p) [] collect_hs_val_binders :: CollectPass (GhcPass idL) => Bool -> CollectFlag (GhcPass idL) -> HsValBindsLR (GhcPass idL) idR -> [IdP (GhcPass idL)] collect_hs_val_binders ps flag = \case ValBinds _ binds _ -> collect_binds ps flag binds [] XValBindsLR (NValBinds binds _) -> collect_out_binds ps flag binds collect_out_binds :: forall p. CollectPass p => Bool -> CollectFlag p -> [(RecFlag, LHsBinds p)] -> [IdP p] collect_out_binds ps flag = foldr (collect_binds ps flag . snd) [] collect_binds :: forall p idR. CollectPass p => Bool -> CollectFlag p -> LHsBindsLR p idR -> [IdP p] -> [IdP p] -- ^ Collect 'Id's, or 'Id's + pattern synonyms, depending on boolean flag collect_binds ps flag binds acc = foldr (collect_bind ps flag . unXRec @p) acc binds collect_bind :: forall p idR. CollectPass p => Bool -> CollectFlag p -> HsBindLR p idR -> [IdP p] -> [IdP p] collect_bind _ _ (FunBind { fun_id = f }) acc = unXRec @p f : acc collect_bind _ flag (PatBind { pat_lhs = p }) acc = collect_lpat flag p acc collect_bind _ _ (VarBind { var_id = f }) acc = f : acc collect_bind omitPatSyn _ (PatSynBind _ (PSB { psb_id = ps })) acc | omitPatSyn = acc | otherwise = unXRec @p ps : acc collect_bind _ _ (PatSynBind _ (XPatSynBind _)) acc = acc collect_bind _ _ (XHsBindsLR b) acc = collectXXHsBindsLR @p @idR b acc collectMethodBinders :: forall idL idR. UnXRec idL => LHsBindsLR idL idR -> [LIdP idL] -- ^ Used exclusively for the bindings of an instance decl which are all -- 'FunBinds' collectMethodBinders binds = foldr (get . unXRec @idL) [] binds where get (FunBind { fun_id = f }) fs = f : fs get _ fs = fs -- Someone else complains about non-FunBinds ----------------- Statements -------------------------- -- collectLStmtsBinders :: (IsPass idL, IsPass idR, CollectPass (GhcPass idL)) => CollectFlag (GhcPass idL) -> [LStmtLR (GhcPass idL) (GhcPass idR) body] -> [IdP (GhcPass idL)] collectLStmtsBinders flag = concatMap (collectLStmtBinders flag) collectStmtsBinders :: (IsPass idL, IsPass idR, CollectPass (GhcPass idL)) => CollectFlag (GhcPass idL) -> [StmtLR (GhcPass idL) (GhcPass idR) body] -> [IdP (GhcPass idL)] collectStmtsBinders flag = concatMap (collectStmtBinders flag) collectLStmtBinders :: (IsPass idL, IsPass idR, CollectPass (GhcPass idL)) => CollectFlag (GhcPass idL) -> LStmtLR (GhcPass idL) (GhcPass idR) body -> [IdP (GhcPass idL)] collectLStmtBinders flag = collectStmtBinders flag . unLoc collectStmtBinders :: forall idL idR body . (IsPass idL, IsPass idR, CollectPass (GhcPass idL)) => CollectFlag (GhcPass idL) -> StmtLR (GhcPass idL) (GhcPass idR) body -> [IdP (GhcPass idL)] -- Id Binders for a Stmt... [but what about pattern-sig type vars]? collectStmtBinders flag = \case BindStmt _ pat _ -> collectPatBinders flag pat LetStmt _ binds -> collectLocalBinders flag binds BodyStmt {} -> [] LastStmt {} -> [] ParStmt _ xs _ _ -> collectLStmtsBinders flag [s | ParStmtBlock _ ss _ _ <- xs, s <- ss] TransStmt { trS_stmts = stmts } -> collectLStmtsBinders flag stmts RecStmt { recS_stmts = L _ ss } -> collectLStmtsBinders flag ss XStmtLR x -> case ghcPass :: GhcPass idR of GhcRn -> collectApplicativeStmtBndrs x GhcTc -> collectApplicativeStmtBndrs x where collectApplicativeStmtBndrs :: ApplicativeStmt (GhcPass idL) a -> [IdP (GhcPass idL)] collectApplicativeStmtBndrs (ApplicativeStmt _ args _) = concatMap (collectArgBinders . snd) args collectArgBinders = \case ApplicativeArgOne { app_arg_pattern = pat } -> collectPatBinders flag pat ApplicativeArgMany { bv_pattern = pat } -> collectPatBinders flag pat ----------------- Patterns -------------------------- collectPatBinders :: CollectPass p => CollectFlag p -> LPat p -> [IdP p] collectPatBinders flag pat = collect_lpat flag pat [] collectPatsBinders :: CollectPass p => CollectFlag p -> [LPat p] -> [IdP p] collectPatsBinders flag pats = foldr (collect_lpat flag) [] pats ------------- -- | Indicate if evidence binders and type variable binders have -- to be collected. -- -- This type enumerates the modes of collecting bound variables -- | evidence | type | term | ghc | -- | binders | variables | variables | pass | -- -------------------------------------------- -- CollNoDictBinders | no | no | yes | any | -- CollWithDictBinders | yes | no | yes | GhcTc | -- CollVarTyVarBinders | no | yes | yes | GhcRn | -- -- See Note [Dictionary binders in ConPatOut] data CollectFlag p where -- | Don't collect evidence binders CollNoDictBinders :: CollectFlag p -- | Collect evidence binders CollWithDictBinders :: CollectFlag GhcTc -- | Collect variable and type variable binders, but no evidence binders CollVarTyVarBinders :: CollectFlag GhcRn collect_lpat :: forall p. CollectPass p => CollectFlag p -> LPat p -> [IdP p] -> [IdP p] collect_lpat flag pat bndrs = collect_pat flag (unXRec @p pat) bndrs collect_pat :: forall p. CollectPass p => CollectFlag p -> Pat p -> [IdP p] -> [IdP p] collect_pat flag pat bndrs = case pat of VarPat _ var -> unXRec @p var : bndrs WildPat _ -> bndrs LazyPat _ pat -> collect_lpat flag pat bndrs BangPat _ pat -> collect_lpat flag pat bndrs AsPat _ a pat -> unXRec @p a : collect_lpat flag pat bndrs ViewPat _ _ pat -> collect_lpat flag pat bndrs ParPat _ pat -> collect_lpat flag pat bndrs ListPat _ pats -> foldr (collect_lpat flag) bndrs pats TuplePat _ pats _ -> foldr (collect_lpat flag) bndrs pats OrPat _ _ -> [] -- See Note [Implementation of OrPatterns], Renamer: -- evidence binders in an OrPat currently aren't visible outside their -- binding pattern, so we return []. SumPat _ pat _ _ -> collect_lpat flag pat bndrs LitPat _ _ -> bndrs NPat {} -> bndrs NPlusKPat _ n _ _ _ _ -> unXRec @p n : bndrs SigPat _ pat sig -> case flag of CollNoDictBinders -> collect_lpat flag pat bndrs CollWithDictBinders -> collect_lpat flag pat bndrs CollVarTyVarBinders -> collect_lpat flag pat bndrs ++ collectPatSigBndrs sig XPat ext -> collectXXPat @p flag ext bndrs SplicePat ext _ -> collectXSplicePat @p flag ext bndrs EmbTyPat _ tp -> collect_ty_pat_bndrs flag tp bndrs InvisPat _ tp -> collect_ty_pat_bndrs flag tp bndrs -- See Note [Dictionary binders in ConPatOut] ConPat {pat_args=ps} -> case flag of CollNoDictBinders -> foldr (collect_lpat flag) bndrs (hsConPatArgs ps) CollWithDictBinders -> foldr (collect_lpat flag) bndrs (hsConPatArgs ps) ++ collectEvBinders (cpt_binds (pat_con_ext pat)) CollVarTyVarBinders -> foldr (collect_lpat flag) bndrs (hsConPatArgs ps) ++ concatMap collectConPatTyArgBndrs (hsConPatTyArgs ps) collectEvBinders :: TcEvBinds -> [Id] collectEvBinders (EvBinds bs) = foldr add_ev_bndr [] bs collectEvBinders (TcEvBinds {}) = panic "ToDo: collectEvBinders" collectConPatTyArgBndrs :: HsConPatTyArg GhcRn -> [Name] collectConPatTyArgBndrs (HsConPatTyArg _ tp) = collectTyPatBndrs tp collect_ty_pat_bndrs :: CollectFlag p -> HsTyPat (NoGhcTc p) -> [IdP p] -> [IdP p] collect_ty_pat_bndrs CollNoDictBinders _ bndrs = bndrs collect_ty_pat_bndrs CollWithDictBinders _ bndrs = bndrs collect_ty_pat_bndrs CollVarTyVarBinders tp bndrs = collectTyPatBndrs tp ++ bndrs collectTyPatBndrs :: HsTyPat GhcRn -> [Name] collectTyPatBndrs (HsTP (HsTPRn nwcs imp_tvs exp_tvs) _) = nwcs ++ imp_tvs ++ exp_tvs collectPatSigBndrs :: HsPatSigType GhcRn -> [Name] collectPatSigBndrs (HsPS (HsPSRn nwcs imp_tvs) _) = nwcs ++ imp_tvs add_ev_bndr :: EvBind -> [Id] -> [Id] add_ev_bndr (EvBind { eb_lhs = b }) bs | isId b = b:bs | otherwise = bs -- A worry: what about coercion variable binders?? -- | This class specifies how to collect variable identifiers from extension patterns in the given pass. -- Consumers of the GHC API that define their own passes should feel free to implement instances in order -- to make use of functions which depend on it. -- -- In particular, Haddock already makes use of this, with an instance for its 'DocNameI' pass so that -- it can reuse the code in GHC for collecting binders. class UnXRec p => CollectPass p where collectXXPat :: CollectFlag p -> XXPat p -> [IdP p] -> [IdP p] collectXXHsBindsLR :: forall pR. XXHsBindsLR p pR -> [IdP p] -> [IdP p] collectXSplicePat :: CollectFlag p -> XSplicePat p -> [IdP p] -> [IdP p] instance IsPass p => CollectPass (GhcPass p) where collectXXPat flag ext = case ghcPass @p of GhcPs -> dataConCantHappen ext GhcRn | HsPatExpanded _ pat <- ext -> collect_pat flag pat GhcTc -> case ext of CoPat _ pat _ -> collect_pat flag pat ExpansionPat _ pat -> collect_pat flag pat collectXXHsBindsLR ext = case ghcPass @p of GhcPs -> dataConCantHappen ext GhcRn -> dataConCantHappen ext GhcTc -> case ext of AbsBinds { abs_exports = dbinds } -> (map abe_poly dbinds ++) -- I don't think we want the binders from the abe_binds -- binding (hence see AbsBinds) is in zonking in GHC.Tc.Zonk.Type collectXSplicePat flag ext = case ghcPass @p of GhcPs -> id GhcRn | (HsUntypedSpliceTop _ pat) <- ext -> collect_pat flag pat GhcRn | (HsUntypedSpliceNested _) <- ext -> id GhcTc -> dataConCantHappen ext {- Note [Dictionary binders in ConPatOut] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Should we collect dictionary binders in ConPatOut? It depends! Use CollectFlag to choose. 1. Pre-typechecker there are no ConPatOuts. Use CollNoDictBinders flag. 2. In the desugarer, most of the time we don't want to collect evidence binders, so we also use CollNoDictBinders flag. Example of why it matters: In a lazy pattern, for example f ~(C x y) = ..., we want to generate bindings for x,y but not for dictionaries bound by C. (The type checker ensures they would not be used.) Here's the problem. Consider data T a where C :: Num a => a -> Int -> T a f ~(C (n+1) m) = (n,m) Here, the pattern (C (n+1)) binds a hidden dictionary (d::Num a), and *also* uses that dictionary to match the (n+1) pattern. Yet, the variables bound by the lazy pattern are n,m, *not* the dictionary d. So in mkSelectorBinds in GHC.HsToCore.Utils, we want just m,n as the variables bound. So in this case, we do *not* gather (a) dictionary and (b) dictionary bindings as binders of a ConPatOut pattern. 3. On the other hand, desugaring of arrows needs evidence bindings and uses CollWithDictBinders flag. Consider h :: (ArrowChoice a, Arrow a) => Int -> a (Int,Int) Int h x = proc (y,z) -> case compare x y of GT -> returnA -< z+x The type checker turns the case into case compare x y of GT { $dNum_123 = $dNum_Int } -> returnA -< (+) $dNum_123 z x That is, it attaches the $dNum_123 binding to a ConPatOut in scope. During desugaring, evidence binders must be collected because their sets are intersected with free variable sets of subsequent commands to create (minimal) command environments. Failing to do it properly leads to bugs (e.g., #18950). Note: attaching evidence binders to existing ConPatOut may be suboptimal for arrows. In the example above we would prefer to generate: case compare x y of GT -> returnA -< let $dNum_123 = $dNum_Int in (+) $dNum_123 z x So that the evidence isn't passed into the command environment. This issue doesn't arise with desugaring of non-arrow code because the simplifier can freely float and inline let-expressions created for evidence binders. But with arrow desugaring, the simplifier would have to see through the command environment tuple which is more complicated. -} hsGroupBinders :: HsGroup GhcRn -> [Name] hsGroupBinders (HsGroup { hs_valds = val_decls, hs_tyclds = tycl_decls, hs_fords = foreign_decls }) = collectHsValBinders CollNoDictBinders val_decls ++ hsTyClForeignBinders tycl_decls foreign_decls hsTyClForeignBinders :: [TyClGroup GhcRn] -> [LForeignDecl GhcRn] -> [Name] -- We need to look at instance declarations too, -- because their associated types may bind data constructors hsTyClForeignBinders tycl_decls foreign_decls = map unLoc (hsForeignDeclsBinders foreign_decls) ++ getSelectorNames (foldMap (foldMap (tyDeclBinders . hsLTyClDeclBinders) . group_tyclds) tycl_decls `mappend` (foldMap (foldMap hsLInstDeclBinders . group_instds) tycl_decls)) where getSelectorNames :: ([LocatedA Name], [LFieldOcc GhcRn]) -> [Name] getSelectorNames (ns, fs) = map unLoc ns ++ map (unLoc . foLabel . unLoc) fs ------------------- data TyDeclBinders p = TyDeclBinders { tyDeclMainBinder :: !(LocatedA (IdP (GhcPass p)), TyConFlavour ()) , tyDeclATs :: ![(LocatedA (IdP (GhcPass p)), TyConFlavour ())] , tyDeclOpSigs :: ![LocatedA (IdP (GhcPass p))] , tyDeclConsWithFields :: !(LConsWithFields p) } tyDeclBinders :: TyDeclBinders p -> ([LocatedA (IdP (GhcPass p))], [LFieldOcc (GhcPass p)]) tyDeclBinders (TyDeclBinders main ats sigs consWithFields) = (fst main : (fmap fst ats ++ sigs ++ cons), flds) where (cons, flds) = lconsWithFieldsBinders consWithFields hsLTyClDeclBinders :: (IsPass p, OutputableBndrId p) => LocatedA (TyClDecl (GhcPass p)) -> TyDeclBinders p -- ^ Returns all the /binding/ names of the decl. The first one is -- guaranteed to be the name of the decl. The first component -- represents all binding names except record fields; the second -- represents field occurrences. For record fields mentioned in -- multiple constructors, the SrcLoc will be from the first occurrence. -- -- Each returned (Located name) has a SrcSpan for the /whole/ declaration. -- See Note [SrcSpan for binders] hsLTyClDeclBinders (L loc (FamDecl { tcdFam = FamilyDecl { fdLName = (L _ name) , fdInfo = fd_info } })) = TyDeclBinders { tyDeclMainBinder = (L loc name, familyInfoTyConFlavour Nothing fd_info) , tyDeclATs = [], tyDeclOpSigs = [] , tyDeclConsWithFields = emptyLConsWithFields } hsLTyClDeclBinders (L loc (SynDecl { tcdLName = (L _ name) })) = TyDeclBinders { tyDeclMainBinder = (L loc name, TypeSynonymFlavour) , tyDeclATs = [], tyDeclOpSigs = [] , tyDeclConsWithFields = emptyLConsWithFields } hsLTyClDeclBinders (L loc (ClassDecl { tcdLName = (L _ cls_name) , tcdSigs = sigs , tcdATs = ats })) = TyDeclBinders { tyDeclMainBinder = (L loc cls_name, ClassFlavour) , tyDeclATs = [ (L fam_loc fam_name, familyInfoTyConFlavour (Just ()) fd_info) | (L fam_loc (FamilyDecl { fdLName = L _ fam_name , fdInfo = fd_info })) <- ats ] , tyDeclOpSigs = [ L mem_loc mem_name | (L mem_loc (ClassOpSig _ False ns _)) <- sigs , (L _ mem_name) <- ns ] , tyDeclConsWithFields = emptyLConsWithFields } hsLTyClDeclBinders (L loc (DataDecl { tcdLName = (L _ name) , tcdDataDefn = defn })) = TyDeclBinders { tyDeclMainBinder = (L loc name, flav ) , tyDeclATs = [] , tyDeclOpSigs = [] , tyDeclConsWithFields = hsDataDefnBinders defn } where flav = newOrDataToFlavour $ dataDefnConsNewOrData $ dd_cons defn ------------------- hsForeignDeclsBinders :: forall p a. (UnXRec (GhcPass p), IsSrcSpanAnn p a) => [LForeignDecl (GhcPass p)] -> [LIdP (GhcPass p)] -- ^ See Note [SrcSpan for binders] hsForeignDeclsBinders foreign_decls = [ L (noAnnSrcSpan (locA decl_loc)) n | L decl_loc (ForeignImport { fd_name = L _ n }) <- foreign_decls] ------------------- hsPatSynSelectors :: IsPass p => HsValBinds (GhcPass p) -> [FieldOcc (GhcPass p)] -- ^ Collects record pattern-synonym selectors only; the pattern synonym -- names are collected by 'collectHsValBinders'. hsPatSynSelectors (ValBinds _ _ _) = panic "hsPatSynSelectors" hsPatSynSelectors (XValBindsLR (NValBinds binds _)) = foldr addPatSynSelector [] . concat $ map snd binds addPatSynSelector :: forall p. UnXRec p => LHsBind p -> [FieldOcc p] -> [FieldOcc p] addPatSynSelector bind sels | PatSynBind _ (PSB { psb_args = RecCon as }) <- unXRec @p bind = map recordPatSynField as ++ sels | otherwise = sels getPatSynBinds :: forall id. UnXRec id => [(RecFlag, LHsBinds id)] -> [PatSynBind id id] getPatSynBinds binds = [ psb | (_, lbinds) <- binds , (unXRec @id -> (PatSynBind _ psb)) <- lbinds ] ------------------- hsLInstDeclBinders :: (IsPass p, OutputableBndrId p) => LInstDecl (GhcPass p) -> ([(LocatedA (IdP (GhcPass p)))], [LFieldOcc (GhcPass p)]) hsLInstDeclBinders (L _ (ClsInstD { cid_inst = ClsInstDecl { cid_datafam_insts = dfis }})) = foldMap (lconsWithFieldsBinders . hsDataFamInstBinders . unLoc) dfis hsLInstDeclBinders (L _ (DataFamInstD { dfid_inst = fi })) = lconsWithFieldsBinders $ hsDataFamInstBinders fi hsLInstDeclBinders (L _ (TyFamInstD {})) = mempty ------------------- -- | the 'SrcLoc' returned are for the whole declarations, not just the names hsDataFamInstBinders :: (IsPass p, OutputableBndrId p) => DataFamInstDecl (GhcPass p) -> LConsWithFields p hsDataFamInstBinders (DataFamInstDecl { dfid_eqn = FamEqn { feqn_rhs = defn }}) = hsDataDefnBinders defn -- There can't be repeated symbols because only data instances have binders ------------------- -- | the 'SrcLoc' returned are for the whole declarations, not just the names hsDataDefnBinders :: (IsPass p, OutputableBndrId p) => HsDataDefn (GhcPass p) -> LConsWithFields p hsDataDefnBinders (HsDataDefn { dd_cons = cons }) = hsConDeclsBinders (toList cons) -- See Note [Binders in family instances] ------------------- {- Note [Collecting record fields in data declarations] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When renaming a data declaration that includes record constructors, we are, in the end, going to to create a mapping from constructor to its field labels, to store in 'GREInfo' (see 'IAmConLike'). This allows us to know, in the renamer, which constructor has what fields. In order to achieve this, we return the constructor and field information from hsConDeclsBinders in the following format: - [(ConRdrName, [Located Int])], a list of the constructors, each associated with its record fields, in the form of a list of Int indices into... - IntMap FieldOcc, an IntMap of record fields. (In actual fact, we use [(ConRdrName, Maybe [Located Int])], with Nothing indicating that the constructor has unlabelled fields: see Note [Local constructor info in the renamer] in GHC.Types.GREInfo.) This allows us to do the following (see GHC.Rename.Names.getLocalNonValBinders.new_tc): - create 'Name's for each of the record fields, to get IntMap FieldLabel, - create 'Name's for each of the constructors, to get [(ConName, [Int])], - look up the FieldLabels of each constructor, to get [(ConName, [FieldLabel])]. NB: This can be a bit tricky to get right in the presence of data types with duplicate constructors or fields. Storing locations allows us to report an error for duplicate field declarations, see test cases T9156 T9156_DF. Other relevant test cases: rnfail015. -} -- | A mapping from constructors to all of their fields. -- -- See Note [Collecting record fields in data declarations]. data LConsWithFields p = LConsWithFields { consWithFieldIndices :: [(LocatedA (IdP (GhcPass p)), Maybe [Located Int])] , consFields :: IntMap (LFieldOcc (GhcPass p)) } lconsWithFieldsBinders :: LConsWithFields p -> ([(LocatedA (IdP (GhcPass p)))], [LFieldOcc (GhcPass p)]) lconsWithFieldsBinders (LConsWithFields cons fields) = (map fst cons, IntMap.elems fields) emptyLConsWithFields :: LConsWithFields p emptyLConsWithFields = LConsWithFields [] IntMap.empty hsConDeclsBinders :: forall p. (IsPass p, OutputableBndrId p) => [LConDecl (GhcPass p)] -> LConsWithFields p -- The function is boringly complicated because of the records -- And since we only have equality, we have to be a little careful hsConDeclsBinders cons = go emptyFieldIndices cons where go :: FieldIndices p -> [LConDecl (GhcPass p)] -> LConsWithFields p go seen [] = LConsWithFields [] (fields seen) go seen (r:rs) -- Don't re-mangle the location of field names, because we don't -- have a record of the full location of the field declaration anyway = let loc = getLoc r in case unLoc r of ConDeclGADT { con_names = names, con_g_args = args } -> LConsWithFields (cons ++ ns) fs where cons = map ( , con_flds ) $ toList (L loc . unLoc <$> names) (con_flds, seen') = get_flds_gadt seen args LConsWithFields ns fs = go seen' rs ConDeclH98 { con_name = name, con_args = args } -> LConsWithFields ([(L loc (unLoc name), con_flds)] ++ ns) fs where (con_flds, seen') = get_flds_h98 seen args LConsWithFields ns fs = go seen' rs get_flds_h98 :: FieldIndices p -> HsConDeclH98Details (GhcPass p) -> (Maybe [Located Int], FieldIndices p) get_flds_h98 seen (RecCon flds) = first Just $ get_flds seen flds get_flds_h98 seen (PrefixCon _ []) = (Just [], seen) get_flds_h98 seen _ = (Nothing, seen) get_flds_gadt :: FieldIndices p -> HsConDeclGADTDetails (GhcPass p) -> (Maybe [Located Int], FieldIndices p) get_flds_gadt seen (RecConGADT _ flds) = first Just $ get_flds seen flds get_flds_gadt seen (PrefixConGADT _ []) = (Just [], seen) get_flds_gadt seen _ = (Nothing, seen) get_flds :: FieldIndices p -> LocatedL [LConDeclField (GhcPass p)] -> ([Located Int], FieldIndices p) get_flds seen flds = foldr add_fld ([], seen) fld_names where add_fld fld (is, ixs) = let (i, ixs') = insertField fld ixs in (i:is, ixs') fld_names = concatMap (cd_fld_names . unLoc) (unLoc flds) -- | A bijection between record fields of a datatype and integers, -- used to implement Note [Collecting record fields in data declarations]. data FieldIndices p = FieldIndices { fields :: IntMap (LFieldOcc (GhcPass p)) -- ^ Look up a field from its index. , fieldIndices :: Map RdrName Int -- ^ Look up the index of a field label in the previous 'IntMap'. , newInt :: !Int -- ^ An integer @i@ such that no integer @i' >= i@ appears in the 'IntMap'. } emptyFieldIndices :: FieldIndices p emptyFieldIndices = FieldIndices { fields = IntMap.empty , fieldIndices = Map.empty , newInt = 0 } insertField :: IsPass p => LFieldOcc (GhcPass p) -> FieldIndices p -> (Located Int, FieldIndices p) insertField new_fld fi@(FieldIndices flds idxs new_idx) | Just i <- Map.lookup rdr idxs = (L loc i, fi) | otherwise = (L loc new_idx, FieldIndices (IntMap.insert new_idx new_fld flds) (Map.insert rdr new_idx idxs) (new_idx + 1)) where loc = getLocA new_fld rdr = fieldOccRdrName . unLoc $ new_fld {- Note [SrcSpan for binders] ~~~~~~~~~~~~~~~~~~~~~~~~~~ When extracting the (Located RdrName) for a binder, at least for the main name (the TyCon of a type declaration etc), we want to give it the @SrcSpan@ of the whole /declaration/, not just the name itself (which is how it appears in the syntax tree). This SrcSpan (for the entire declaration) is used as the SrcSpan for the Name that is finally produced, and hence for error messages. (See #8607.) Note [Binders in family instances] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In a type or data family instance declaration, the type constructor is an *occurrence* not a binding site type instance T Int = Int -> Int -- No binders data instance S Bool = S1 | S2 -- Binders are S1,S2 ************************************************************************ * * Collecting binders the user did not write * * ************************************************************************ The job of the following family of functions is to run through binding sites and find the set of all Names that were defined "implicitly", without being explicitly written by the user. Note [Collecting implicit binders] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We collect all the RHS Names that are implicitly introduced by record wildcards, so that we can: - avoid warning the user when they don't use those names (#4404), - report deprecation warnings for deprecated fields that are used (#23382). The functions that collect implicit binders return a collection of 'ImplicitFieldBinders', which associates each implicitly-introduced record field with the bound variables in the RHS of the record field pattern, e.g. in data R = MkR { fld :: Int } foo (MkR { .. }) = fld the renamer will elaborate this to foo (MkR { fld = fld_var }) = fld_var and the implicit binders function will return [ ImplicitFieldBinders { implFlBndr_field = fld , implFlBndr_binders = [fld_var] } ] This information is then used: - in the calls to GHC.Rename.Utils.checkUnusedRecordWildcard, to emit a warning when a record wildcard binds no new variables (redundant record wildcard) or none of the bound variables are used (unused record wildcard). - in GHC.Rename.Utils.deprecateUsedRecordWildcard, to emit a warning when the field is deprecated and any of the binders are used. NOTE: the implFlBndr_binders field should always be a singleton (since the RHS of an implicit binding should always be a VarPat, created in rnHsRecPatsAndThen.mkVarPat) -} -- | All binders corresponding to a single implicit record field pattern. -- -- See Note [Collecting implicit binders]. data ImplicitFieldBinders = ImplicitFieldBinders { implFlBndr_field :: Name -- ^ The 'Name' of the record field , implFlBndr_binders :: [Name] -- ^ The binders of the RHS of the record field pattern -- (in practice, always a singleton: see Note [Collecting implicit binders]) } lStmtsImplicits :: forall idR body . IsPass idR => [LStmtLR GhcRn (GhcPass idR) (LocatedA (body (GhcPass idR)))] -> [(SrcSpan, [ImplicitFieldBinders])] lStmtsImplicits = hs_lstmts where hs_lstmts :: forall idR body . IsPass idR => [LStmtLR GhcRn (GhcPass idR) (LocatedA (body (GhcPass idR)))] -> [(SrcSpan, [ImplicitFieldBinders])] hs_lstmts = concatMap (hs_stmt . unLoc) hs_stmt :: forall idR body . IsPass idR => StmtLR GhcRn (GhcPass idR) (LocatedA (body (GhcPass idR))) -> [(SrcSpan, [ImplicitFieldBinders])] hs_stmt (BindStmt _ pat _) = lPatImplicits pat hs_stmt (XStmtLR x) = case ghcPass :: GhcPass idR of GhcRn -> hs_applicative_stmt x GhcTc -> hs_applicative_stmt x hs_stmt (LetStmt _ binds) = hs_local_binds binds hs_stmt (BodyStmt {}) = [] hs_stmt (LastStmt {}) = [] hs_stmt (ParStmt _ xs _ _) = hs_lstmts [s | ParStmtBlock _ ss _ _ <- xs , s <- ss] hs_stmt (TransStmt { trS_stmts = stmts }) = hs_lstmts stmts hs_stmt (RecStmt { recS_stmts = L _ ss }) = hs_lstmts ss hs_local_binds (HsValBinds _ val_binds) = hsValBindsImplicits val_binds hs_local_binds (HsIPBinds {}) = [] hs_local_binds (EmptyLocalBinds _) = [] hs_applicative_stmt (ApplicativeStmt _ args _) = concatMap do_arg args where do_arg (_, ApplicativeArgOne { app_arg_pattern = pat }) = lPatImplicits pat do_arg (_, ApplicativeArgMany { app_stmts = stmts }) = hs_lstmts stmts hsValBindsImplicits :: HsValBindsLR GhcRn (GhcPass idR) -> [(SrcSpan, [ImplicitFieldBinders])] hsValBindsImplicits (XValBindsLR (NValBinds binds _)) = concatMap (lhsBindsImplicits . snd) binds hsValBindsImplicits (ValBinds _ binds _) = lhsBindsImplicits binds lhsBindsImplicits :: LHsBindsLR GhcRn idR -> [(SrcSpan, [ImplicitFieldBinders])] lhsBindsImplicits = concatMap (lhs_bind . unLoc) where lhs_bind (PatBind { pat_lhs = lpat }) = lPatImplicits lpat lhs_bind _ = [] -- | Collect all record wild card binders in the given pattern. -- -- These are all the variables bound in all (possibly nested) record wildcard patterns -- appearing inside the pattern. -- -- See Note [Collecting implicit binders]. lPatImplicits :: LPat GhcRn -> [(SrcSpan, [ImplicitFieldBinders])] lPatImplicits = hs_lpat where hs_lpat lpat = hs_pat (unLoc lpat) hs_lpats = foldr (\pat rest -> hs_lpat pat ++ rest) [] hs_pat (LazyPat _ pat) = hs_lpat pat hs_pat (BangPat _ pat) = hs_lpat pat hs_pat (AsPat _ _ pat) = hs_lpat pat hs_pat (ViewPat _ _ pat) = hs_lpat pat hs_pat (ParPat _ pat) = hs_lpat pat hs_pat (ListPat _ pats) = hs_lpats pats hs_pat (TuplePat _ pats _) = hs_lpats pats hs_pat (SigPat _ pat _) = hs_lpat pat hs_pat (ConPat {pat_args=ps}) = details ps hs_pat _ = [] details :: HsConPatDetails GhcRn -> [(SrcSpan, [ImplicitFieldBinders])] details (PrefixCon _ ps) = hs_lpats ps details (RecCon (HsRecFields { rec_dotdot = Nothing, rec_flds })) = hs_lpats $ map (hfbRHS . unLoc) rec_flds details (RecCon (HsRecFields { rec_dotdot = Just (L err_loc rec_dotdot), rec_flds })) = [(l2l err_loc, implicit_field_binders)] ++ hs_lpats explicit_pats where (explicit_pats, implicit_field_binders) = rec_field_expl_impl rec_flds rec_dotdot details (InfixCon p1 p2) = hs_lpat p1 ++ hs_lpat p2 lHsRecFieldsImplicits :: [LHsRecField GhcRn (LPat GhcRn)] -> RecFieldsDotDot -> [ImplicitFieldBinders] lHsRecFieldsImplicits rec_flds rec_dotdot = snd $ rec_field_expl_impl rec_flds rec_dotdot rec_field_expl_impl :: [LHsRecField GhcRn (LPat GhcRn)] -> RecFieldsDotDot -> ([LPat GhcRn], [ImplicitFieldBinders]) rec_field_expl_impl rec_flds (RecFieldsDotDot { .. }) = ( map (hfbRHS . unLoc) explicit_binds , map implicit_field_binders implicit_binds ) where (explicit_binds, implicit_binds) = splitAt unRecFieldsDotDot rec_flds implicit_field_binders (L _ (HsFieldBind { hfbLHS = L _ fld, hfbRHS = rhs })) = ImplicitFieldBinders { implFlBndr_field = unLoc $ foLabel fld , implFlBndr_binders = collectPatBinders CollNoDictBinders rhs } ghc-lib-parser-9.12.2.20250421/compiler/GHC/HsToCore/Errors/0000755000000000000000000000000007346545000020650 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/HsToCore/Errors/Ppr.hs0000644000000000000000000003672407346545000021761 0ustar0000000000000000{-# LANGUAGE DataKinds #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE GADTs #-} {-# LANGUAGE TypeApplications #-} {-# LANGUAGE TypeFamilies #-} {-# OPTIONS_GHC -fno-warn-orphans #-} -- instance Diagnostic DsMessage module GHC.HsToCore.Errors.Ppr where import GHC.Core.Predicate (isEvVar) import GHC.Core.Type import GHC.Driver.Flags import GHC.Hs import GHC.HsToCore.Errors.Types import GHC.Prelude import GHC.Types.Basic (pprRuleName) import GHC.Types.Error import GHC.Types.Error.Codes import GHC.Types.Id (idType) import GHC.Types.SrcLoc import GHC.Utils.Misc import GHC.Utils.Outputable import qualified GHC.LanguageExtensions as LangExt import GHC.HsToCore.Pmc.Ppr instance Diagnostic DsMessage where type DiagnosticOpts DsMessage = NoDiagnosticOpts diagnosticMessage opts = \case DsUnknownMessage (UnknownDiagnostic f m) -> diagnosticMessage (f opts) m DsEmptyEnumeration -> mkSimpleDecorated $ text "Enumeration is empty" DsIdentitiesFound conv_fn type_of_conv -> mkSimpleDecorated $ vcat [ text "Call of" <+> ppr conv_fn <+> dcolon <+> ppr type_of_conv , nest 2 $ text "can probably be omitted" ] DsOverflowedLiterals i tc bounds _possiblyUsingNegativeLiterals -> let msg = case bounds of Nothing -> vcat [ text "Literal" <+> integer i <+> text "is negative but" <+> ppr tc <+> text "only supports positive numbers" ] Just (MinBound minB, MaxBound maxB) -> vcat [ text "Literal" <+> integer i <+> text "is out of the" <+> ppr tc <+> text "range" <+> integer minB <> text ".." <> integer maxB ] in mkSimpleDecorated msg DsRedundantBangPatterns ctx q -> mkSimpleDecorated $ pprEqn ctx q "has redundant bang" DsOverlappingPatterns ctx q -> mkSimpleDecorated $ pprEqn ctx q "is redundant" DsInaccessibleRhs ctx q -> mkSimpleDecorated $ pprEqn ctx q "has inaccessible right hand side" DsMaxPmCheckModelsReached limit -> mkSimpleDecorated $ vcat [ hang (text "Pattern match checker ran into -fmax-pmcheck-models=" <> int limit <> text " limit, so") 2 ( bullet <+> text "Redundant clauses might not be reported at all" $$ bullet <+> text "Redundant clauses might be reported as inaccessible" $$ bullet <+> text "Patterns reported as unmatched might actually be matched") ] DsNonExhaustivePatterns kind _flag maxPatterns vars nablas -> mkSimpleDecorated $ pprContext False kind (text "are non-exhaustive") $ \_ -> case vars of -- See #11245 [] -> text "Guards do not cover entire pattern space" _ -> let us = map (\nabla -> pprUncovered nabla vars) nablas pp_tys = pprQuotedList $ map idType vars in hang (text "Patterns of type" <+> pp_tys <+> text "not matched:") 4 (vcat (take maxPatterns us) $$ dots maxPatterns us) DsTopLevelBindsNotAllowed bindsType bind -> let desc = case bindsType of UnliftedTypeBinds -> "bindings for unlifted types" StrictBinds -> "strict bindings" in mkSimpleDecorated $ hang (text "Top-level" <+> text desc <+> text "aren't allowed:") 2 (ppr bind) DsUselessSpecialiseForClassMethodSelector poly_id -> mkSimpleDecorated $ text "Ignoring useless SPECIALISE pragma for class selector:" <+> quotes (ppr poly_id) DsUselessSpecialiseForNoInlineFunction poly_id -> mkSimpleDecorated $ text "Ignoring useless SPECIALISE pragma for NOINLINE function:" <+> quotes (ppr poly_id) DsMultiplicityCoercionsNotSupported -> mkSimpleDecorated $ text "GHC bug #19517: GHC currently does not support programs using GADTs or type families to witness equality of multiplicities" DsOrphanRule rule -> mkSimpleDecorated $ text "Orphan rule:" <+> ppr rule DsRuleLhsTooComplicated orig_lhs lhs2 -> mkSimpleDecorated $ hang (text "RULE left-hand side too complicated to desugar") 2 (vcat [ text "Optimised lhs:" <+> ppr lhs2 , text "Orig lhs:" <+> ppr orig_lhs]) DsRuleIgnoredDueToConstructor con -> mkSimpleDecorated $ vcat [ text "A constructor," <+> ppr con <> text ", appears as outermost match in RULE lhs." , text "This rule will be ignored." ] DsRuleBindersNotBound unbound orig_bndrs orig_lhs lhs2 -> mkSimpleDecorated $ vcat (map pp_dead unbound) where pp_dead bndr = hang (sep [ text "Forall'd" <+> pp_bndr bndr , text "is not bound in RULE lhs"]) 2 (vcat [ text "Orig bndrs:" <+> ppr orig_bndrs , text "Orig lhs:" <+> ppr orig_lhs , text "optimised lhs:" <+> ppr lhs2 ]) pp_bndr b | isTyVar b = text "type variable" <+> quotes (ppr b) | isEvVar b = text "constraint" <+> quotes (ppr (varType b)) | otherwise = text "variable" <+> quotes (ppr b) DsLazyPatCantBindVarsOfUnliftedType unlifted_bndrs -> mkSimpleDecorated $ hang (text "A lazy (~) pattern cannot bind variables of unlifted type." $$ text "Unlifted variables:") 2 (vcat (map (\id -> ppr id <+> dcolon <+> ppr (idType id)) unlifted_bndrs)) DsNotYetHandledByTH reason -> case reason of ThAmbiguousRecordUpdates fld -> mkMsg "Ambiguous record updates" (ppr fld) ThAbstractClosedTypeFamily decl -> mkMsg "abstract closed type family" (ppr decl) ThForeignLabel cls -> mkMsg "Foreign label" (doubleQuotes (ppr cls)) ThForeignExport decl -> mkMsg "Foreign export" (ppr decl) ThMinimalPragmas -> mkMsg "MINIMAL pragmas" empty ThSCCPragmas -> mkMsg "SCC pragmas" empty ThNoUserInline -> mkMsg "NOUSERINLINE" empty ThExoticFormOfType ty -> mkMsg "Exotic form of type" (ppr ty) ThAmbiguousRecordSelectors e -> mkMsg "Ambiguous record selectors" (ppr e) ThMonadComprehensionSyntax e -> mkMsg "monad comprehension and [: :]" (ppr e) ThCostCentres e -> mkMsg "Cost centres" (ppr e) ThExpressionForm e -> mkMsg "Expression form" (ppr e) ThExoticStatement other -> mkMsg "Exotic statement" (ppr other) ThExoticLiteral lit -> mkMsg "Exotic literal" (ppr lit) ThExoticPattern pat -> mkMsg "Exotic pattern" (ppr pat) ThGuardedLambdas m -> mkMsg "Guarded lambdas" (pprMatch m) ThNegativeOverloadedPatterns pat -> mkMsg "Negative overloaded patterns" (ppr pat) ThHaddockDocumentation -> mkMsg "Haddock documentation" empty ThWarningAndDeprecationPragmas decl -> mkMsg "WARNING and DEPRECATION pragmas" $ text "Pragma for declaration of" <+> ppr decl ThSplicesWithinDeclBrackets -> mkMsg "Splices within declaration brackets" empty ThNonLinearDataCon -> mkMsg "Non-linear fields in data constructors" empty where mkMsg what doc = mkSimpleDecorated $ hang (text what <+> text "not (yet) handled by Template Haskell") 2 doc DsAggregatedViewExpressions views -> mkSimpleDecorated (vcat msgs) where msgs = map (\g -> text "Putting these view expressions into the same case:" <+> (ppr g)) views DsUnbangedStrictPatterns bind -> mkSimpleDecorated $ hang (text "Pattern bindings containing unlifted types should use" $$ text "an outermost bang pattern:") 2 (ppr bind) DsCannotMixPolyAndUnliftedBindings bind -> mkSimpleDecorated $ hang (text "You can't mix polymorphic and unlifted bindings:") 2 (ppr bind) DsWrongDoBind _rhs elt_ty -> mkSimpleDecorated $ badMonadBind elt_ty DsUnusedDoBind _rhs elt_ty -> mkSimpleDecorated $ badMonadBind elt_ty DsRecBindsNotAllowedForUnliftedTys binds -> mkSimpleDecorated $ hang (text "Recursive bindings for unlifted types aren't allowed:") 2 (vcat (map ppr binds)) DsRuleMightInlineFirst rule_name lhs_id _ -> mkSimpleDecorated $ vcat [ hang (text "Rule" <+> pprRuleName rule_name <+> text "may never fire") 2 (text "because" <+> quotes (ppr lhs_id) <+> text "might inline first") ] DsAnotherRuleMightFireFirst rule_name bad_rule lhs_id -> mkSimpleDecorated $ vcat [ hang (text "Rule" <+> pprRuleName rule_name <+> text "may never fire") 2 (text "because rule" <+> pprRuleName bad_rule <+> text "for"<+> quotes (ppr lhs_id) <+> text "might fire first") ] DsIncompleteRecordSelector name cons_wo_field not_full_examples -> mkSimpleDecorated $ text "The application of the record field" <+> quotes (ppr name) <+> text "may fail for the following constructors:" <+> vcat (map ppr cons_wo_field ++ [text "..." | not_full_examples]) diagnosticReason = \case DsUnknownMessage m -> diagnosticReason m DsEmptyEnumeration -> WarningWithFlag Opt_WarnEmptyEnumerations DsIdentitiesFound{} -> WarningWithFlag Opt_WarnIdentities DsOverflowedLiterals{} -> WarningWithFlag Opt_WarnOverflowedLiterals DsRedundantBangPatterns{} -> WarningWithFlag Opt_WarnRedundantBangPatterns DsOverlappingPatterns{} -> WarningWithFlag Opt_WarnOverlappingPatterns DsInaccessibleRhs{} -> WarningWithFlag Opt_WarnOverlappingPatterns DsMaxPmCheckModelsReached{} -> WarningWithoutFlag DsNonExhaustivePatterns _ (ExhaustivityCheckType mb_flag) _ _ _ -> maybe WarningWithoutFlag WarningWithFlag mb_flag DsTopLevelBindsNotAllowed{} -> ErrorWithoutFlag DsUselessSpecialiseForClassMethodSelector{} -> WarningWithoutFlag DsUselessSpecialiseForNoInlineFunction{} -> WarningWithoutFlag DsMultiplicityCoercionsNotSupported{} -> ErrorWithoutFlag DsOrphanRule{} -> WarningWithFlag Opt_WarnOrphans DsRuleLhsTooComplicated{} -> WarningWithoutFlag DsRuleIgnoredDueToConstructor{} -> WarningWithoutFlag DsRuleBindersNotBound{} -> WarningWithoutFlag DsLazyPatCantBindVarsOfUnliftedType{} -> ErrorWithoutFlag DsNotYetHandledByTH{} -> ErrorWithoutFlag DsAggregatedViewExpressions{} -> WarningWithoutFlag DsUnbangedStrictPatterns{} -> WarningWithFlag Opt_WarnUnbangedStrictPatterns DsCannotMixPolyAndUnliftedBindings{} -> ErrorWithoutFlag DsWrongDoBind{} -> WarningWithFlag Opt_WarnWrongDoBind DsUnusedDoBind{} -> WarningWithFlag Opt_WarnUnusedDoBind DsRecBindsNotAllowedForUnliftedTys{} -> ErrorWithoutFlag DsRuleMightInlineFirst{} -> WarningWithFlag Opt_WarnInlineRuleShadowing DsAnotherRuleMightFireFirst{} -> WarningWithFlag Opt_WarnInlineRuleShadowing DsIncompleteRecordSelector{} -> WarningWithFlag Opt_WarnIncompleteRecordSelectors diagnosticHints = \case DsUnknownMessage m -> diagnosticHints m DsEmptyEnumeration -> noHints DsIdentitiesFound{} -> noHints DsOverflowedLiterals i _tc bounds usingNegLiterals -> case (bounds, usingNegLiterals) of (Just (MinBound minB, MaxBound _), NotUsingNegLiterals) | minB == -i -- Note [Suggest NegativeLiterals] , i > 0 -> [ suggestExtensionWithInfo (text "If you are trying to write a large negative literal") LangExt.NegativeLiterals ] _ -> noHints DsRedundantBangPatterns{} -> noHints DsOverlappingPatterns{} -> noHints DsInaccessibleRhs{} -> noHints DsMaxPmCheckModelsReached{} -> [SuggestIncreaseMaxPmCheckModels] DsNonExhaustivePatterns{} -> noHints DsTopLevelBindsNotAllowed{} -> noHints DsUselessSpecialiseForClassMethodSelector{} -> noHints DsUselessSpecialiseForNoInlineFunction{} -> noHints DsMultiplicityCoercionsNotSupported -> noHints DsOrphanRule{} -> noHints DsRuleLhsTooComplicated{} -> noHints DsRuleIgnoredDueToConstructor{} -> noHints DsRuleBindersNotBound{} -> noHints DsLazyPatCantBindVarsOfUnliftedType{} -> noHints DsNotYetHandledByTH{} -> noHints DsAggregatedViewExpressions{} -> noHints DsUnbangedStrictPatterns{} -> noHints DsCannotMixPolyAndUnliftedBindings{} -> [SuggestAddTypeSignatures UnnamedBinding] DsWrongDoBind rhs _ -> [SuggestBindToWildcard rhs] DsUnusedDoBind rhs _ -> [SuggestBindToWildcard rhs] DsRecBindsNotAllowedForUnliftedTys{} -> noHints DsRuleMightInlineFirst _ lhs_id rule_act -> [SuggestAddInlineOrNoInlinePragma lhs_id rule_act] DsAnotherRuleMightFireFirst _ bad_rule _ -> [SuggestAddPhaseToCompetingRule bad_rule] DsIncompleteRecordSelector{} -> noHints diagnosticCode = constructorCode {- Note [Suggest NegativeLiterals] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If you write x :: Int8 x = -128 it'll parse as (negate 128), and overflow. In this case, suggest NegativeLiterals. We get an erroneous suggestion for x = 128 but perhaps that does not matter too much. -} -- -- Helper functions -- badMonadBind :: Type -> SDoc badMonadBind elt_ty = hang (text "A do-notation statement discarded a result of type") 2 (quotes (ppr elt_ty)) -- Print a single clause (for redundant/with-inaccessible-rhs) pprEqn :: HsMatchContextRn -> SDoc -> String -> SDoc pprEqn ctx q txt = pprContext True ctx (text txt) $ \f -> f (q <+> matchSeparator ctx <+> text "...") pprContext :: Bool -> HsMatchContextRn -> SDoc -> ((SDoc -> SDoc) -> SDoc) -> SDoc pprContext singular kind msg rest_of_msg_fun = vcat [text txt <+> msg, sep [ text "In" <+> ppr_match <> char ':' , nest 4 (rest_of_msg_fun pref)]] where txt | singular = "Pattern match" | otherwise = "Pattern match(es)" (ppr_match, pref) = case kind of FunRhs { mc_fun = L _ fun } -> (pprMatchContext kind, \ pp -> ppr fun <+> pp) _ -> (pprMatchContext kind, \ pp -> pp) dots :: Int -> [a] -> SDoc dots maxPatterns qs | qs `lengthExceeds` maxPatterns = text "..." | otherwise = empty ghc-lib-parser-9.12.2.20250421/compiler/GHC/HsToCore/Errors/Types.hs0000644000000000000000000001446407346545000022321 0ustar0000000000000000{-# LANGUAGE DeriveGeneric #-} {-# LANGUAGE ExistentialQuantification #-} {-# LANGUAGE TypeFamilies #-} module GHC.HsToCore.Errors.Types where import GHC.Prelude import GHC.Core (CoreRule, CoreExpr, RuleName) import GHC.Core.DataCon import GHC.Core.ConLike import GHC.Core.Type import GHC.Driver.DynFlags (DynFlags, xopt) import GHC.Driver.Flags (WarningFlag) import GHC.Hs import GHC.HsToCore.Pmc.Solver.Types import GHC.Types.Basic (Activation) import GHC.Types.Error import GHC.Types.ForeignCall import GHC.Types.Id import GHC.Types.Name (Name) import qualified GHC.LanguageExtensions as LangExt import GHC.Generics (Generic) newtype MinBound = MinBound Integer newtype MaxBound = MaxBound Integer type MaxUncoveredPatterns = Int type MaxPmCheckModels = Int -- | Diagnostics messages emitted during desugaring. data DsMessage -- | Simply wraps a generic 'Diagnostic' message. = DsUnknownMessage (UnknownDiagnostic (DiagnosticOpts DsMessage)) {-| DsEmptyEnumeration is a warning (controlled by the -Wempty-enumerations flag) that is emitted if an enumeration is empty. Example(s): main :: IO () main = do let enum = [5 .. 3] print enum Here 'enum' would yield an empty list, because 5 is greater than 3. Test case(s): warnings/should_compile/T10930 warnings/should_compile/T18402 warnings/should_compile/T10930b numeric/should_compile/T10929 numeric/should_compile/T7881 deSugar/should_run/T18172 -} | DsEmptyEnumeration {-| DsIdentitiesFound is a warning (controlled by the -Widentities flag) that is emitted on uses of Prelude numeric conversions that are probably the identity (and hence could be omitted). Example(s): main :: IO () main = do let x = 10 print $ conv 10 where conv :: Int -> Int conv x = fromIntegral x Here calling 'conv' is essentially the identity function, and therefore can be omitted. Test case(s): deSugar/should_compile/T4488 -} | DsIdentitiesFound !Id -- The conversion function !Type -- The type of conversion | DsOverflowedLiterals !Integer !Name !(Maybe (MinBound, MaxBound)) !NegLiteralExtEnabled -- FIXME(adn) Use a proper type instead of 'SDoc', but unfortunately -- 'SrcInfo' gives us an 'SDoc' to begin with. | DsRedundantBangPatterns !HsMatchContextRn !SDoc -- FIXME(adn) Use a proper type instead of 'SDoc', but unfortunately -- 'SrcInfo' gives us an 'SDoc' to begin with. | DsOverlappingPatterns !HsMatchContextRn !SDoc -- FIXME(adn) Use a proper type instead of 'SDoc' | DsInaccessibleRhs !HsMatchContextRn !SDoc | DsMaxPmCheckModelsReached !MaxPmCheckModels | DsNonExhaustivePatterns !HsMatchContextRn !ExhaustivityCheckType !MaxUncoveredPatterns [Id] [Nabla] | DsTopLevelBindsNotAllowed !BindsType !(HsBindLR GhcTc GhcTc) | DsUselessSpecialiseForClassMethodSelector !Id | DsUselessSpecialiseForNoInlineFunction !Id | DsMultiplicityCoercionsNotSupported | DsOrphanRule !CoreRule | DsRuleLhsTooComplicated !CoreExpr !CoreExpr | DsRuleIgnoredDueToConstructor !DataCon | DsRuleBindersNotBound ![Var] -- ^ The list of unbound binders ![Var] -- ^ The original binders !CoreExpr -- ^ The original LHS !CoreExpr -- ^ The optimised LHS | DsLazyPatCantBindVarsOfUnliftedType [Var] | DsNotYetHandledByTH !ThRejectionReason | DsAggregatedViewExpressions [[LHsExpr GhcTc]] | DsUnbangedStrictPatterns !(HsBindLR GhcTc GhcTc) | DsCannotMixPolyAndUnliftedBindings !(HsBindLR GhcTc GhcTc) | DsWrongDoBind !(LHsExpr GhcTc) !Type | DsUnusedDoBind !(LHsExpr GhcTc) !Type | DsRecBindsNotAllowedForUnliftedTys ![LHsBindLR GhcTc GhcTc] | DsRuleMightInlineFirst !RuleName !Var !Activation | DsAnotherRuleMightFireFirst !RuleName !RuleName -- the \"bad\" rule !Var {-| DsIncompleteRecordSelector is a warning triggered when we are not certain whether a record selector application will be successful. Currently, this means that the warning is triggered when there is a record selector of a data type that does not have that field in all its constructors. Example(s): data T = T1 | T2 {x :: Bool} f :: T -> Bool f a = x a Test cases: DsIncompleteRecSel1 DsIncompleteRecSel2 DsIncompleteRecSel3 -} | DsIncompleteRecordSelector !Name ![ConLike] !Bool deriving Generic -- The positional number of the argument for an expression (first, second, third, etc) newtype DsArgNum = DsArgNum Int -- | Why TemplateHaskell rejected the splice. Used in the 'DsNotYetHandledByTH' -- constructor of a 'DsMessage'. data ThRejectionReason = ThAmbiguousRecordUpdates !(HsRecUpdField GhcRn GhcRn) | ThAbstractClosedTypeFamily !(LFamilyDecl GhcRn) | ThForeignLabel !CLabelString | ThForeignExport !(LForeignDecl GhcRn) | ThMinimalPragmas | ThSCCPragmas | ThNoUserInline | ThExoticFormOfType !(HsType GhcRn) | ThAmbiguousRecordSelectors !(HsExpr GhcRn) | ThMonadComprehensionSyntax !(HsExpr GhcRn) | ThCostCentres !(HsExpr GhcRn) | ThExpressionForm !(HsExpr GhcRn) | ThExoticStatement [Stmt GhcRn (LHsExpr GhcRn)] | ThExoticLiteral !(HsLit GhcRn) | ThExoticPattern !(Pat GhcRn) | ThGuardedLambdas !(Match GhcRn (LHsExpr GhcRn)) | ThNegativeOverloadedPatterns !(Pat GhcRn) | ThHaddockDocumentation | ThWarningAndDeprecationPragmas [LIdP GhcRn] | ThSplicesWithinDeclBrackets | ThNonLinearDataCon data NegLiteralExtEnabled = YesUsingNegLiterals | NotUsingNegLiterals negLiteralExtEnabled :: DynFlags -> NegLiteralExtEnabled negLiteralExtEnabled dflags = if (xopt LangExt.NegativeLiterals dflags) then YesUsingNegLiterals else NotUsingNegLiterals newtype ExhaustivityCheckType = ExhaustivityCheckType (Maybe WarningFlag) data BindsType = UnliftedTypeBinds | StrictBinds ghc-lib-parser-9.12.2.20250421/compiler/GHC/HsToCore/Pmc/0000755000000000000000000000000007346545000020113 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/HsToCore/Pmc/Ppr.hs0000644000000000000000000001665007346545000021220 0ustar0000000000000000 -- | Provides facilities for pretty-printing 'Nabla's in a way appropriate for -- user facing pattern match warnings. module GHC.HsToCore.Pmc.Ppr ( pprUncovered ) where import GHC.Prelude import GHC.Data.List.Infinite (Infinite (..)) import qualified GHC.Data.List.Infinite as Inf import GHC.Types.Basic import GHC.Types.Id import GHC.Types.Var.Env import GHC.Types.Unique.DFM import GHC.Core.ConLike import GHC.Core.DataCon import GHC.Builtin.Types import GHC.Utils.Outputable import GHC.Utils.Panic import Control.Monad.Trans.RWS.CPS import GHC.Data.Maybe import Data.List.NonEmpty (NonEmpty, nonEmpty, toList) import GHC.HsToCore.Pmc.Types -- | Pretty-print the guts of an uncovered value vector abstraction, i.e., its -- components and refutable shapes associated to any mentioned variables. -- -- Example for @([Just p, q], [p :-> [3,4], q :-> [0,5]])@: -- -- @ -- (Just p) q -- where p is not one of {3, 4} -- q is not one of {0, 5} -- @ -- -- When the set of refutable shapes contains more than 3 elements, the -- additional elements are indicated by "...". pprUncovered :: Nabla -> [Id] -> SDoc pprUncovered nabla vas | isNullUDFM refuts = fsep vec -- there are no refutations | otherwise = hang (fsep vec) 4 $ text "where" <+> vcat (map (pprRefutableShapes . snd) (udfmToList refuts)) where init_prec -- No outer parentheses when it's a unary pattern by assuming lowest -- precedence | [_] <- vas = topPrec | otherwise = appPrec ppr_action = mapM (pprPmVar init_prec) vas (vec, renamings) = runPmPpr nabla ppr_action refuts = prettifyRefuts nabla renamings -- | Output refutable shapes of a variable in the form of @var is not one of {2, -- Nothing, 3}@. Will never print more than 3 refutable shapes, the tail is -- indicated by an ellipsis. pprRefutableShapes :: (SDoc,[PmAltCon]) -> SDoc pprRefutableShapes (var, alts) = var <+> text "is not one of" <+> format_alts alts where format_alts = braces . fsep . punctuate comma . shorten . map ppr_alt shorten (a:b:c:_:_) = a:b:c:[text "..."] shorten xs = xs ppr_alt (PmAltConLike cl) = ppr cl ppr_alt (PmAltLit lit) = ppr lit {- 1. Literals ~~~~~~~~~~~~~~ Starting with a function definition like: f :: Int -> Bool f 5 = True f 6 = True The uncovered set looks like: { var |> var /= 5, var /= 6 } Yet, we would like to print this nicely as follows: x , where x not one of {5,6} Since these variables will be shown to the programmer, we give them better names (t1, t2, ..) in 'prettifyRefuts', hence the SDoc in 'PrettyPmRefutEnv'. 2. Residual Constraints ~~~~~~~~~~~~~~~~~~~~~~~ Unhandled constraints that refer to HsExpr are typically ignored by the solver (it does not even substitute in HsExpr so they are even printed as wildcards). Additionally, the oracle returns a substitution if it succeeds so we apply this substitution to the vectors before printing them out (see function `pprOne' in "GHC.HsToCore.Pmc") to be more precise. -} -- | Extract and assigns pretty names to constraint variables with refutable -- shapes. prettifyRefuts :: Nabla -> DIdEnv (Id, SDoc) -> DIdEnv (SDoc, [PmAltCon]) prettifyRefuts nabla = listToUDFM_Directly . map attach_refuts . udfmToList where attach_refuts (u, (x, sdoc)) = (u, (sdoc, lookupRefuts nabla x)) type PmPprM a = RWS Nabla () (DIdEnv (Id, SDoc), Infinite SDoc) a -- Try nice names p,q,r,s,t before using the (ugly) t_i nameList :: Infinite SDoc nameList = map text ["p","q","r","s","t"] Inf.++ flip Inf.unfoldr (0 :: Int) (\ u -> (text ('t':show u), u+1)) runPmPpr :: Nabla -> PmPprM a -> (a, DIdEnv (Id, SDoc)) runPmPpr nabla m = case runRWS m nabla (emptyDVarEnv, nameList) of (a, (renamings, _), _) -> (a, renamings) -- | Allocates a new, clean name for the given 'Id' if it doesn't already have -- one. getCleanName :: Id -> PmPprM SDoc getCleanName x = do (renamings, name_supply) <- get let Inf clean_name name_supply' = name_supply case lookupDVarEnv renamings x of Just (_, nm) -> pure nm Nothing -> do put (extendDVarEnv renamings x (x, clean_name), name_supply') pure clean_name checkRefuts :: Id -> PmPprM (Maybe SDoc) -- the clean name if it has negative info attached checkRefuts x = do nabla <- ask case lookupRefuts nabla x of [] -> pure Nothing -- Will just be a wildcard later on _ -> Just <$> getCleanName x -- | Pretty print a variable, but remember to prettify the names of the variables -- that refer to neg-literals. The ones that cannot be shown are printed as -- underscores. pprPmVar :: PprPrec -> Id -> PmPprM SDoc pprPmVar prec x = do nabla <- ask case lookupSolution nabla x of Just (PACA alt _tvs args) -> pprPmAltCon prec alt args Nothing -> fromMaybe underscore <$> checkRefuts x pprPmAltCon :: PprPrec -> PmAltCon -> [Id] -> PmPprM SDoc pprPmAltCon _prec (PmAltLit l) _ = pure (ppr l) pprPmAltCon prec (PmAltConLike cl) args = do nabla <- ask pprConLike nabla prec cl args pprConLike :: Nabla -> PprPrec -> ConLike -> [Id] -> PmPprM SDoc pprConLike nabla _prec cl args | Just pm_expr_list <- pmExprAsList nabla (PmAltConLike cl) args = case pm_expr_list of NilTerminated list -> brackets . fsep . punctuate comma <$> mapM (pprPmVar appPrec) list WcVarTerminated pref x -> parens . fcat . punctuate colon <$> mapM (pprPmVar appPrec) (toList pref ++ [x]) pprConLike _nabla _prec (RealDataCon con) args | isUnboxedTupleDataCon con , let hash_parens doc = text "(#" <+> doc <+> text "#)" = hash_parens . fsep . punctuate comma <$> mapM (pprPmVar appPrec) args | isTupleDataCon con = parens . fsep . punctuate comma <$> mapM (pprPmVar appPrec) args pprConLike _nabla prec cl args | conLikeIsInfix cl = case args of [x, y] -> do x' <- pprPmVar funPrec x y' <- pprPmVar funPrec y return (cparen (prec > opPrec) (x' <+> ppr cl <+> y')) -- can it be infix but have more than two arguments? list -> pprPanic "pprConLike:" (ppr list) | null args = return (ppr cl) | otherwise = do args' <- mapM (pprPmVar appPrec) args return (cparen (prec > funPrec) (fsep (ppr cl : args'))) -- | The result of 'pmExprAsList'. data PmExprList = NilTerminated [Id] | WcVarTerminated (NonEmpty Id) Id -- | Extract a list of 'Id's out of a sequence of cons cells, optionally -- terminated by a wildcard variable instead of @[]@. Some examples: -- -- * @pmExprAsList (1:2:[]) == Just ('NilTerminated' [1,2])@, a regular, -- @[]@-terminated list. Should be pretty-printed as @[1,2]@. -- * @pmExprAsList (1:2:x) == Just ('WcVarTerminated' [1,2] x)@, a list prefix -- ending in a wildcard variable x (of list type). Should be pretty-printed as -- (1:2:_). -- * @pmExprAsList [] == Just ('NilTerminated' [])@ pmExprAsList :: Nabla -> PmAltCon -> [Id] -> Maybe PmExprList pmExprAsList nabla = go_con [] where go_var rev_pref x | Just (PACA alt _tvs args) <- lookupSolution nabla x = go_con rev_pref alt args go_var rev_pref x | Just pref <- nonEmpty (reverse rev_pref) = Just (WcVarTerminated pref x) go_var _ _ = Nothing go_con rev_pref (PmAltConLike (RealDataCon c)) es | c == nilDataCon = assert (null es) $ Just (NilTerminated (reverse rev_pref)) | c == consDataCon = assert (length es == 2) $ go_var (es !! 0 : rev_pref) (es !! 1) go_con _ _ _ = Nothing ghc-lib-parser-9.12.2.20250421/compiler/GHC/HsToCore/Pmc/Solver/0000755000000000000000000000000007346545000021365 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/HsToCore/Pmc/Solver/Types.hs0000644000000000000000000007507307346545000023041 0ustar0000000000000000{-# LANGUAGE ApplicativeDo #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE ViewPatterns #-} {-# LANGUAGE MultiWayIf #-} -- | Domain types used in "GHC.HsToCore.Pmc.Solver". -- The ultimate goal is to define 'Nabla', which models normalised refinement -- types from the paper -- [Lower Your Guards: A Compositional Pattern-Match Coverage Checker"](https://dl.acm.org/doi/abs/10.1145/3408989). module GHC.HsToCore.Pmc.Solver.Types ( -- * Normalised refinement types BotInfo(..), PmAltConApp(..), VarInfo(..), TmState(..), TyState(..), Nabla(..), Nablas(..), initNablas, lookupRefuts, lookupSolution, -- ** Looking up 'VarInfo' lookupVarInfo, lookupVarInfoNT, trvVarInfo, -- ** Caching residual COMPLETE sets CompleteMatch, ResidualCompleteMatches(..), getRcm, isRcmInitialised, -- ** Representations for Literals and AltCons PmLit(..), PmLitValue(..), PmAltCon(..), pmLitType, pmAltConType, isPmAltConMatchStrict, pmAltConImplBangs, -- *** PmAltConSet PmAltConSet, emptyPmAltConSet, isEmptyPmAltConSet, elemPmAltConSet, extendPmAltConSet, pmAltConSetElems, -- *** Equality on 'PmAltCon's PmEquality(..), eqPmAltCon, -- *** Operations on 'PmLit' literalToPmLit, negatePmLit, overloadPmLit, pmLitAsStringLit, coreExprAsPmLit ) where import GHC.Prelude import GHC.Data.Bag import GHC.Data.FastString import GHC.Types.Id import GHC.Types.Var.Set import GHC.Types.Unique.DSet import GHC.Types.Unique.SDFM import GHC.Types.Name import GHC.Core.DataCon import GHC.Core.ConLike import GHC.Utils.Outputable import GHC.Utils.Panic.Plain import GHC.Utils.Misc (lastMaybe) import GHC.Data.List.SetOps (unionLists) import GHC.Data.Maybe import GHC.Core.Type import GHC.Core.TyCon import GHC.Types.Literal import GHC.Core import GHC.Core.TyCo.Compare( eqType ) import GHC.Core.Map.Expr import GHC.Core.Utils (exprType) import GHC.Builtin.Names import GHC.Builtin.Types import GHC.Builtin.Types.Prim import GHC.Tc.Solver.InertSet (InertSet, emptyInert) import GHC.Tc.Utils.TcType (isStringTy) import GHC.Types.CompleteMatch import GHC.Types.SourceText (SourceText(..), mkFractionalLit, FractionalLit , fractionalLitFromRational , FractionalExponentBase(..)) import Numeric (fromRat) import Data.Foldable (find) import Data.Ratio import GHC.Real (Ratio(..)) import qualified Data.Semigroup as Semi -- import GHC.Driver.Ppr -- -- * Normalised refinement types -- -- | A normalised refinement type ∇ (\"nabla\"), comprised of an inert set of -- canonical (i.e. mutually compatible) term and type constraints that form the -- refinement type's predicate. data Nabla = MkNabla { nabla_ty_st :: !TyState -- ^ Type oracle; things like a~Int , nabla_tm_st :: !TmState -- ^ Term oracle; things like x~Nothing } -- | An initial nabla that is always satisfiable initNabla :: Nabla initNabla = MkNabla initTyState initTmState instance Outputable Nabla where ppr nabla = hang (text "Nabla") 2 $ vcat [ -- intentionally formatted this way enable the dev to comment in only -- the info they need ppr (nabla_tm_st nabla), ppr (nabla_ty_st nabla) ] -- | A disjunctive bag of 'Nabla's, representing a refinement type. newtype Nablas = MkNablas (Bag Nabla) initNablas :: Nablas initNablas = MkNablas (unitBag initNabla) instance Outputable Nablas where ppr (MkNablas nablas) = ppr nablas instance Semigroup Nablas where MkNablas l <> MkNablas r = MkNablas (l `unionBags` r) instance Monoid Nablas where mempty = MkNablas emptyBag -- | The type oracle state. An 'GHC.Tc.Solver.Monad.InertSet' that we -- incrementally add local type constraints to, together with a sequence -- number that counts the number of times we extended it with new facts. data TyState = TySt { ty_st_n :: !Int, ty_st_inert :: !InertSet } -- | Not user-facing. instance Outputable TyState where ppr (TySt n inert) = ppr n <+> ppr inert initTyState :: TyState initTyState = TySt 0 emptyInert -- | The term oracle state. Stores 'VarInfo' for encountered 'Id's. These -- entries are possibly shared when we figure out that two variables must be -- equal, thus represent the same set of values. -- -- See Note [TmState invariants] in "GHC.HsToCore.Pmc.Solver". data TmState = TmSt { ts_facts :: !(UniqSDFM Id VarInfo) -- ^ Facts about term variables. Deterministic env, so that we generate -- deterministic error messages. , ts_reps :: !(CoreMap Id) -- ^ An environment for looking up whether we already encountered semantically -- equivalent expressions that we want to represent by the same 'Id' -- representative. , ts_dirty :: !DIdSet -- ^ Which 'VarInfo' needs to be checked for inhabitants because of new -- negative constraints (e.g. @x ≁ ⊥@ or @x ≁ K@). } -- | Information about an 'Id'. Stores positive ('vi_pos') facts, like @x ~ Just 42@, -- and negative ('vi_neg') facts, like "x is not (:)". -- Also caches the type ('vi_ty'), the 'ResidualCompleteMatches' of a COMPLETE set -- ('vi_rcm'). -- -- Subject to Note [The Pos/Neg invariant] in "GHC.HsToCore.Pmc.Solver". data VarInfo = VI { vi_id :: !Id -- ^ The 'Id' in question. Important for adding new constraints relative to -- this 'VarInfo' when we don't easily have the 'Id' available. , vi_pos :: ![PmAltConApp] -- ^ Positive info: 'PmAltCon' apps it is (i.e. @x ~ [Just y, PatSyn z]@), all -- at the same time (i.e. conjunctive). We need a list because of nested -- pattern matches involving pattern synonym -- case x of { Just y -> case x of PatSyn z -> ... } -- However, no more than one RealDataCon in the list, otherwise contradiction -- because of generativity. , vi_neg :: !PmAltConSet -- ^ Negative info: A list of 'PmAltCon's that it cannot match. -- Example, assuming -- -- @ -- data T = Leaf Int | Branch T T | Node Int T -- @ -- -- then @x ≁ [Leaf, Node]@ means that @x@ cannot match a @Leaf@ or @Node@, -- and hence can only match @Branch@. Is orthogonal to anything from 'vi_pos', -- in the sense that 'eqPmAltCon' returns @PossiblyOverlap@ for any pairing -- between 'vi_pos' and 'vi_neg'. -- See Note [Why record both positive and negative info?] -- It's worth having an actual set rather than a simple association list, -- because files like Cabal's `LicenseId` define relatively huge enums -- that lead to quadratic or worse behavior. , vi_bot :: BotInfo -- ^ Can this variable be ⊥? Models (mutually contradicting) @x ~ ⊥@ and -- @x ≁ ⊥@ constraints. E.g. -- * 'MaybeBot': Don't know; Neither @x ~ ⊥@ nor @x ≁ ⊥@. -- * 'IsBot': @x ~ ⊥@ -- * 'IsNotBot': @x ≁ ⊥@ , vi_rcm :: !ResidualCompleteMatches -- ^ A cache of the associated COMPLETE sets. At any time a superset of -- possible constructors of each COMPLETE set. So, if it's not in here, we -- can't possibly match on it. Complementary to 'vi_neg'. We still need it -- to recognise completion of a COMPLETE set efficiently for large enums. } data PmAltConApp = PACA { paca_con :: !PmAltCon , paca_tvs :: ![TyVar] , paca_ids :: ![Id] } -- | See 'vi_bot'. data BotInfo = IsBot | IsNotBot | MaybeBot deriving Eq instance Outputable PmAltConApp where ppr PACA{paca_con = con, paca_tvs = tvs, paca_ids = ids} = hsep (ppr con : map ((char '@' <>) . ppr) tvs ++ map ppr ids) instance Outputable BotInfo where ppr MaybeBot = underscore ppr IsBot = text "~⊥" ppr IsNotBot = text "≁⊥" -- | Not user-facing. instance Outputable TmState where ppr (TmSt state reps dirty) = ppr state $$ ppr reps $$ ppr dirty -- | Not user-facing. instance Outputable VarInfo where ppr (VI x pos neg bot cache) = braces (hcat (punctuate comma [pp_x, pp_pos, pp_neg, ppr bot, pp_cache])) where pp_x = ppr x <> dcolon <> ppr (idType x) pp_pos | [] <- pos = underscore | [p] <- pos = char '~' <> ppr p -- suppress outer [_] if singleton | otherwise = char '~' <> ppr pos pp_neg | isEmptyPmAltConSet neg = underscore | otherwise = char '≁' <> ppr neg pp_cache | RCM Nothing Nothing <- cache = underscore | otherwise = ppr cache -- | Initial state of the term oracle. initTmState :: TmState initTmState = TmSt emptyUSDFM emptyCoreMap emptyDVarSet -- | A data type that caches for the 'VarInfo' of @x@ the results of querying -- 'dsGetCompleteMatches' and then striking out all occurrences of @K@ for -- which we already know @x ≁ K@ from these sets. -- -- For motivation, see Section 5.3 in Lower Your Guards. -- See also Note [Implementation of COMPLETE pragmas] data ResidualCompleteMatches = RCM { rcm_vanilla :: !(Maybe DsCompleteMatch) -- ^ The residual set for the vanilla COMPLETE set from the data defn. -- Tracked separately from 'rcm_pragmas', because it might only be -- known much later (when we have enough type information to see the 'TyCon' -- of the match), or not at all even. Until that happens, it is 'Nothing'. , rcm_pragmas :: !(Maybe DsCompleteMatches) -- ^ The residual sets for /all/ COMPLETE sets from pragmas that are -- visible when compiling this module. Querying that set with -- 'dsGetCompleteMatches' requires 'DsM', so we initialise it with 'Nothing' -- until first needed in a 'DsM' context. } getRcm :: ResidualCompleteMatches -> DsCompleteMatches getRcm (RCM vanilla pragmas) = maybeToList vanilla ++ fromMaybe [] pragmas isRcmInitialised :: ResidualCompleteMatches -> Bool isRcmInitialised (RCM vanilla pragmas) = isJust vanilla && isJust pragmas instance Outputable ResidualCompleteMatches where -- formats as "[{Nothing,Just},{P,Q}]" ppr rcm = ppr (getRcm rcm) ----------------------- -- * Looking up VarInfo emptyRCM :: ResidualCompleteMatches emptyRCM = RCM Nothing Nothing emptyVarInfo :: Id -> VarInfo emptyVarInfo x = VI { vi_id = x , vi_pos = [] , vi_neg = emptyPmAltConSet -- Why not set IsNotBot for unlifted type here? -- Because we'd have to trigger an inhabitation test, which we can't. -- See case (4) in Note [Strict fields and variables of unlifted type] -- in GHC.HsToCore.Pmc.Solver , vi_bot = MaybeBot , vi_rcm = emptyRCM } lookupVarInfo :: TmState -> Id -> VarInfo -- (lookupVarInfo tms x) tells what we know about 'x' lookupVarInfo (TmSt env _ _) x = fromMaybe (emptyVarInfo x) (lookupUSDFM env x) -- | Like @lookupVarInfo ts x@, but @lookupVarInfo ts x = (y, vi)@ also looks -- through newtype constructors. We have @x ~ N1 (... (Nk y))@ such that the -- returned @y@ doesn't have a positive newtype constructor constraint -- associated with it (yet). The 'VarInfo' returned is that of @y@'s -- representative. -- -- Careful, this means that @idType x@ might be different to @idType y@, even -- modulo type normalisation! -- -- See also Note [Coverage checking Newtype matches] in GHC.HsToCore.Pmc.Solver. lookupVarInfoNT :: TmState -> Id -> (Id, VarInfo) lookupVarInfoNT ts x = case lookupVarInfo ts x of VI{ vi_pos = as_newtype -> Just y } -> lookupVarInfoNT ts y res -> (x, res) where as_newtype = listToMaybe . mapMaybe go go PACA{paca_con = PmAltConLike (RealDataCon dc), paca_ids = [y]} | isNewDataCon dc = Just y go _ = Nothing trvVarInfo :: Functor f => (VarInfo -> f (a, VarInfo)) -> Nabla -> Id -> f (a, Nabla) {-# INLINE trvVarInfo #-} -- This function is called a lot and we want to specilise it, not only -- for the type class, but also for its 'f' function argument. -- Before the INLINE pragma it sometimes inlined and sometimes didn't, -- depending delicately on GHC's optimisations. Better to use a pragma. trvVarInfo f nabla@MkNabla{ nabla_tm_st = ts@TmSt{ts_facts = env} } x = set_vi <$> f (lookupVarInfo ts x) where set_vi (a, vi') = (a, nabla{ nabla_tm_st = ts{ ts_facts = addToUSDFM env (vi_id vi') vi' } }) ------------------------------------------------ -- * Exported utility functions querying 'Nabla' lookupRefuts :: Nabla -> Id -> [PmAltCon] -- Unfortunately we need the extra bit of polymorphism and the unfortunate -- duplication of lookupVarInfo here. lookupRefuts MkNabla{ nabla_tm_st = ts } x = pmAltConSetElems $ vi_neg $ lookupVarInfo ts x isDataConSolution :: PmAltConApp -> Bool isDataConSolution PACA{paca_con = PmAltConLike (RealDataCon _)} = True isDataConSolution _ = False -- @lookupSolution nabla x@ picks a single solution ('vi_pos') of @x@ from -- possibly many, preferring 'RealDataCon' solutions whenever possible. lookupSolution :: Nabla -> Id -> Maybe PmAltConApp lookupSolution nabla x = case vi_pos (lookupVarInfo (nabla_tm_st nabla) x) of [] -> Nothing pos@(x:_) | Just sol <- find isDataConSolution pos -> Just sol | otherwise -> Just x -------------------------------------------------------------------------------- -- The rest is just providing an IR for (overloaded!) literals and AltCons that -- sits between Hs and Core. We need a reliable way to detect and determine -- equality between them, which is impossible with Hs (too expressive) and with -- Core (no notion of overloaded literals, and even plain 'Int' literals are -- actually constructor apps). Also String literals are troublesome. -- | Literals (simple and overloaded ones) for pattern match checking. -- -- See Note [Undecidable Equality for PmAltCons] data PmLit = PmLit { pm_lit_ty :: Type , pm_lit_val :: PmLitValue } data PmLitValue = PmLitInt Integer | PmLitRat Rational | PmLitChar Char -- We won't actually see PmLitString in the oracle since we desugar strings to -- lists | PmLitString FastString | PmLitOverInt Int {- How often Negated? -} Integer | PmLitOverRat Int {- How often Negated? -} FractionalLit | PmLitOverString FastString -- | Undecidable semantic equality result. -- See Note [Undecidable Equality for PmAltCons] data PmEquality = Equal | Disjoint | PossiblyOverlap deriving (Eq, Show) -- | When 'PmEquality' can be decided. @True <=> Equal@, @False <=> Disjoint@. decEquality :: Bool -> PmEquality decEquality True = Equal decEquality False = Disjoint -- | Undecidable equality for values represented by 'PmLit's. -- See Note [Undecidable Equality for PmAltCons] -- -- * @Just True@ ==> Surely equal -- * @Just False@ ==> Surely different (non-overlapping, even!) -- * @Nothing@ ==> Equality relation undecidable eqPmLit :: PmLit -> PmLit -> PmEquality eqPmLit (PmLit t1 v1) (PmLit t2 v2) -- no haddock | pprTrace "eqPmLit" (ppr t1 <+> ppr v1 $$ ppr t2 <+> ppr v2) False = undefined | not (t1 `eqType` t2) = Disjoint | otherwise = go v1 v2 where go (PmLitInt i1) (PmLitInt i2) = decEquality (i1 == i2) go (PmLitRat r1) (PmLitRat r2) = decEquality (r1 == r2) go (PmLitChar c1) (PmLitChar c2) = decEquality (c1 == c2) go (PmLitString s1) (PmLitString s2) = decEquality (s1 == s2) go (PmLitOverInt n1 i1) (PmLitOverInt n2 i2) | n1 == n2 && i1 == i2 = Equal go (PmLitOverRat n1 r1) (PmLitOverRat n2 r2) | n1 == n2 && r1 == r2 = Equal go (PmLitOverString s1) (PmLitOverString s2) | s1 == s2 = Equal go _ _ = PossiblyOverlap -- | Syntactic equality. instance Eq PmLit where a == b = eqPmLit a b == Equal -- | Type of a 'PmLit' pmLitType :: PmLit -> Type pmLitType (PmLit ty _) = ty -- | Undecidable equality for values represented by 'ConLike's. -- See Note [Undecidable Equality for PmAltCons]. -- 'PatSynCon's aren't enforced to be generative, so two syntactically different -- 'PatSynCon's might match the exact same values. Without looking into and -- reasoning about the pattern synonym's definition, we can't decide if their -- sets of matched values is different. -- -- * @Just True@ ==> Surely equal -- * @Just False@ ==> Surely different (non-overlapping, even!) -- * @Nothing@ ==> Equality relation undecidable eqConLike :: ConLike -> ConLike -> PmEquality eqConLike (RealDataCon dc1) (RealDataCon dc2) = decEquality (dc1 == dc2) eqConLike (PatSynCon psc1) (PatSynCon psc2) | psc1 == psc2 = Equal eqConLike _ _ = PossiblyOverlap -- | Represents the head of a match against a 'ConLike' or literal. -- Really similar to 'GHC.Core.AltCon'. data PmAltCon = PmAltConLike ConLike | PmAltLit PmLit data PmAltConSet = PACS !(UniqDSet ConLike) ![PmLit] emptyPmAltConSet :: PmAltConSet emptyPmAltConSet = PACS emptyUniqDSet [] isEmptyPmAltConSet :: PmAltConSet -> Bool isEmptyPmAltConSet (PACS cls lits) = isEmptyUniqDSet cls && null lits -- | Whether there is a 'PmAltCon' in the 'PmAltConSet' that compares 'Equal' to -- the given 'PmAltCon' according to 'eqPmAltCon'. elemPmAltConSet :: PmAltCon -> PmAltConSet -> Bool elemPmAltConSet (PmAltConLike cl) (PACS cls _ ) = elementOfUniqDSet cl cls elemPmAltConSet (PmAltLit lit) (PACS _ lits) = elem lit lits extendPmAltConSet :: PmAltConSet -> PmAltCon -> PmAltConSet extendPmAltConSet (PACS cls lits) (PmAltConLike cl) = PACS (addOneToUniqDSet cls cl) lits extendPmAltConSet (PACS cls lits) (PmAltLit lit) = PACS cls (unionLists lits [lit]) pmAltConSetElems :: PmAltConSet -> [PmAltCon] pmAltConSetElems (PACS cls lits) = map PmAltConLike (uniqDSetToList cls) ++ map PmAltLit lits instance Outputable PmAltConSet where ppr = ppr . pmAltConSetElems -- | We can't in general decide whether two 'PmAltCon's match the same set of -- values. In addition to the reasons in 'eqPmLit' and 'eqConLike', a -- 'PmAltConLike' might or might not represent the same value as a 'PmAltLit'. -- See Note [Undecidable Equality for PmAltCons]. -- -- * @Just True@ ==> Surely equal -- * @Just False@ ==> Surely different (non-overlapping, even!) -- * @Nothing@ ==> Equality relation undecidable -- -- Examples (omitting some constructor wrapping): -- -- * @eqPmAltCon (LitInt 42) (LitInt 1) == Just False@: Lit equality is -- decidable -- * @eqPmAltCon (DataCon A) (DataCon B) == Just False@: DataCon equality is -- decidable -- * @eqPmAltCon (LitOverInt 42) (LitOverInt 1) == Nothing@: OverLit equality -- is undecidable -- * @eqPmAltCon (PatSyn PA) (PatSyn PB) == Nothing@: PatSyn equality is -- undecidable -- * @eqPmAltCon (DataCon I#) (LitInt 1) == Nothing@: DataCon to Lit -- comparisons are undecidable without reasoning about the wrapped @Int#@ -- * @eqPmAltCon (LitOverInt 1) (LitOverInt 1) == Just True@: We assume -- reflexivity for overloaded literals -- * @eqPmAltCon (PatSyn PA) (PatSyn PA) == Just True@: We assume reflexivity -- for Pattern Synonyms eqPmAltCon :: PmAltCon -> PmAltCon -> PmEquality eqPmAltCon (PmAltConLike cl1) (PmAltConLike cl2) = eqConLike cl1 cl2 eqPmAltCon (PmAltLit l1) (PmAltLit l2) = eqPmLit l1 l2 eqPmAltCon _ _ = PossiblyOverlap -- | Syntactic equality. instance Eq PmAltCon where a == b = eqPmAltCon a b == Equal -- | Type of a 'PmAltCon' pmAltConType :: PmAltCon -> [Type] -> Type pmAltConType (PmAltLit lit) _arg_tys = assert (null _arg_tys ) $ pmLitType lit pmAltConType (PmAltConLike con) arg_tys = conLikeResTy con arg_tys -- | Is a match on this constructor forcing the match variable? -- True of data constructors, literals and pattern synonyms (#17357), but not of -- newtypes. -- See Note [Coverage checking Newtype matches] in GHC.HsToCore.Pmc.Solver. isPmAltConMatchStrict :: PmAltCon -> Bool isPmAltConMatchStrict PmAltLit{} = True isPmAltConMatchStrict (PmAltConLike PatSynCon{}) = True -- #17357 isPmAltConMatchStrict (PmAltConLike (RealDataCon dc)) = not (isNewDataCon dc) pmAltConImplBangs :: PmAltCon -> [HsImplBang] pmAltConImplBangs PmAltLit{} = [] pmAltConImplBangs (PmAltConLike con) = conLikeImplBangs con {- Note [Undecidable Equality for PmAltCons] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Equality on overloaded literals is undecidable in the general case. Consider the following example: instance Num Bool where ... fromInteger 0 = False -- C-like representation of booleans fromInteger _ = True f :: Bool -> () f 1 = () -- Clause A f 2 = () -- Clause B Clause B is redundant but to detect this, we must decide the constraint: @fromInteger 2 ~ fromInteger 1@ which means that we have to look through function @fromInteger@, whose implementation could be anything. This poses difficulties for: 1. The expressive power of the check. We cannot expect a reasonable implementation of pattern matching to detect that @fromInteger 2 ~ fromInteger 1@ is True, unless we unfold function fromInteger. This puts termination at risk and is undecidable in the general case. 2. Error messages/Warnings. What should our message for @f@ above be? A reasonable approach would be to issue: Pattern matches are (potentially) redundant: f 2 = ... under the assumption that 1 == 2 but seems to complex and confusing for the user. We choose to equate only obviously equal overloaded literals, in all other cases we signal undecidability by returning Nothing from 'eqPmAltCons'. We do better for non-overloaded literals, because we know their fromInteger/fromString implementation is actually injective, allowing us to simplify the constraint @fromInteger 1 ~ fromInteger 2@ to @1 ~ 2@, which is trivially unsatisfiable. The impact of this treatment of overloaded literals is the following: * Redundancy checking is rather conservative, since it cannot see that clause B above is redundant. * We have instant equality check for overloaded literals (we do not rely on the term oracle which is rather expensive, both in terms of performance and memory). This significantly improves the performance of functions `covered` `uncovered` and `divergent` in "GHC.HsToCore.Pmc" and effectively addresses #11161. * The warnings issued are simpler. Similar reasoning applies to pattern synonyms: In contrast to data constructors, which are generative, constraints like F a ~ G b for two different pattern synonyms F and G aren't immediately unsatisfiable. We assume F a ~ F a, though. -} literalToPmLit :: Type -> Literal -> Maybe PmLit literalToPmLit ty l = PmLit ty <$> go l where go (LitChar c) = Just (PmLitChar c) go (LitFloat r) = Just (PmLitRat r) go (LitDouble r) = Just (PmLitRat r) go (LitString s) = Just (PmLitString (mkFastStringByteString s)) go (LitNumber _ i) = Just (PmLitInt i) go _ = Nothing negatePmLit :: PmLit -> Maybe PmLit negatePmLit (PmLit ty v) = PmLit ty <$> go v where go (PmLitInt i) = Just (PmLitInt (-i)) go (PmLitRat r) = Just (PmLitRat (-r)) go (PmLitOverInt n i) = Just (PmLitOverInt (n+1) i) go (PmLitOverRat n r) = Just (PmLitOverRat (n+1) r) go _ = Nothing overloadPmLit :: Type -> PmLit -> Maybe PmLit overloadPmLit ty (PmLit _ v) = PmLit ty <$> go v where go (PmLitInt i) = Just (PmLitOverInt 0 i) go (PmLitRat r) = Just $! PmLitOverRat 0 $! fractionalLitFromRational r go (PmLitString s) | ty `eqType` stringTy = Just v | otherwise = Just (PmLitOverString s) go ovRat@PmLitOverRat{} = Just ovRat go _ = Nothing pmLitAsStringLit :: PmLit -> Maybe FastString pmLitAsStringLit (PmLit _ (PmLitString s)) = Just s pmLitAsStringLit _ = Nothing coreExprAsPmLit :: CoreExpr -> Maybe PmLit -- coreExprAsPmLit e | pprTrace "coreExprAsPmLit" (ppr e) False = undefined coreExprAsPmLit (Tick _t e) = coreExprAsPmLit e coreExprAsPmLit (Lit l) = literalToPmLit (literalType l) l coreExprAsPmLit e = case collectArgs e of (Var x, [Lit l]) | Just dc <- isDataConWorkId_maybe x , dc `elem` [intDataCon, wordDataCon, charDataCon, floatDataCon, doubleDataCon] -> literalToPmLit (exprType e) l (Var x, [Lit (LitNumber _ l)]) | Just (ty,l) <- bignum_lit_maybe x l -> Just (PmLit ty (PmLitInt l)) (Var x, [_ty, n_arg, d_arg]) | Just dc <- isDataConWorkId_maybe x , dataConName dc == ratioDataConName , Just (PmLit _ (PmLitInt n)) <- coreExprAsPmLit n_arg , Just (PmLit _ (PmLitInt d)) <- coreExprAsPmLit d_arg -- HACK: just assume we have a literal double. This case only occurs for -- overloaded lits anyway, so we immediately override type information -> literalToPmLit (exprType e) (mkLitDouble (n % d)) (Var x, args) -- See Note [Detecting overloaded literals with -XRebindableSyntax] | is_rebound_name x fromIntegerName , Just arg <- lastMaybe args , Just (_ty,l) <- bignum_conapp_maybe arg -> Just (PmLit integerTy (PmLitInt l)) >>= overloadPmLit (exprType e) (Var x, args) -- See Note [Detecting overloaded literals with -XRebindableSyntax] -- fromRational | is_rebound_name x fromRationalName , [r] <- dropWhile (not . is_ratio) args -> coreExprAsPmLit r >>= overloadPmLit (exprType e) --Rationals with large exponents (Var x, args) -- See Note [Detecting overloaded literals with -XRebindableSyntax] -- See Note [Dealing with rationals with large exponents] -- mkRationalBase* | Just exp_base <- is_larg_exp_ratio x , [r, exp] <- dropWhile (not . is_ratio) args , (Var x, [_ty, n_arg, d_arg]) <- collectArgs r , Just dc <- isDataConWorkId_maybe x , dataConName dc == ratioDataConName , Just (PmLit _ (PmLitInt n)) <- coreExprAsPmLit n_arg , Just (PmLit _ (PmLitInt d)) <- coreExprAsPmLit d_arg , Just (_exp_ty,exp') <- bignum_conapp_maybe exp -> do let rational = (abs n) :% d let neg = if n < 0 then 1 else 0 let frac = mkFractionalLit NoSourceText False rational exp' exp_base Just $ PmLit (exprType e) (PmLitOverRat neg frac) (Var x, args) | is_rebound_name x fromStringName -- See Note [Detecting overloaded literals with -XRebindableSyntax] , s:_ <- filter (isStringTy . exprType) $ filter isValArg args -- NB: Calls coreExprAsPmLit and then overloadPmLit, so that we return PmLitOverStrings -> coreExprAsPmLit s >>= overloadPmLit (exprType e) -- These last two cases handle proper String literals (Var x, [Type ty]) | Just dc <- isDataConWorkId_maybe x , dc == nilDataCon , ty `eqType` charTy -> literalToPmLit stringTy (mkLitString "") (Var x, [Lit l]) | idName x `elem` [unpackCStringName, unpackCStringUtf8Name] -> literalToPmLit stringTy l _ -> Nothing where bignum_conapp_maybe (App (Var x) (Lit (LitNumber _ l))) = bignum_lit_maybe x l bignum_conapp_maybe _ = Nothing bignum_lit_maybe x l | Just dc <- isDataConWorkId_maybe x = if | dc == integerISDataCon -> Just (integerTy,l) | dc == integerIPDataCon -> Just (integerTy,l) | dc == integerINDataCon -> Just (integerTy,negate l) | dc == naturalNSDataCon -> Just (naturalTy,l) | dc == naturalNBDataCon -> Just (naturalTy,l) | otherwise -> Nothing bignum_lit_maybe _ _ = Nothing is_ratio (Type _) = False is_ratio r | Just (tc, _) <- splitTyConApp_maybe (exprType r) = tyConName tc == ratioTyConName | otherwise = False is_larg_exp_ratio x | is_rebound_name x mkRationalBase10Name = Just Base10 | is_rebound_name x mkRationalBase2Name = Just Base2 | otherwise = Nothing -- See Note [Detecting overloaded literals with -XRebindableSyntax] is_rebound_name :: Id -> Name -> Bool is_rebound_name x n = getOccFS (idName x) == getOccFS n {- Note [Detecting overloaded literals with -XRebindableSyntax] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Normally, we'd find e.g. overloaded string literals by comparing the application head of an expression to `fromStringName`. But that doesn't work with -XRebindableSyntax: The `Name` of a user-provided `fromString` function is different to `fromStringName`, which lives in a certain module, etc. There really is no other way than to compare `OccName`s and guess which argument is the actual literal string (we assume it's the first argument of type `String`). The same applies to other overloaded literals, such as overloaded rationals (`fromRational`)and overloaded integer literals (`fromInteger`). Note [Dealing with rationals with large exponents] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Rationals with large exponents are *not* desugared to a simple rational. As that would require us to compute their value which can be expensive. Rather they desugar to an expression. For example 1e1000 will desugar to an expression of the form: `mkRationalWithExponentBase10 (1 :% 1) 1000` Only overloaded literals desugar to this form however, so we we can just return a overloaded rational literal. The most complex case is if we have RebindableSyntax enabled. By example if we have a pattern like this: `f 3.3 = True` It will desugar to: fromRational [TYPE: Rational, mkRationalBase10 (:% @Integer 10 1) (-1)] The fromRational is properly detected as an overloaded Rational by coreExprAsPmLit and it's general code for detecting overloaded rationals. See Note [Detecting overloaded literals with -XRebindableSyntax]. This case then recurses into coreExprAsPmLit passing only the expression `mkRationalBase10 (:% @Integer 10 1) (-1)`. Which is caught by rationals with large exponents case. This will return a `PmLitOverRat` literal. Which is then passed to overloadPmLit which simply returns it as-is since it's already overloaded. -} instance Outputable PmLitValue where ppr (PmLitInt i) = ppr i ppr (PmLitRat r) = double (fromRat r) -- good enough ppr (PmLitChar c) = pprHsChar c ppr (PmLitString s) = pprHsString s ppr (PmLitOverInt n i) = minuses n (ppr i) ppr (PmLitOverRat n r) = minuses n (ppr r) ppr (PmLitOverString s) = pprHsString s -- Take care of negated literals minuses :: Int -> SDoc -> SDoc minuses n sdoc = iterate (\sdoc -> parens (char '-' <> sdoc)) sdoc !! n instance Outputable PmLit where ppr (PmLit ty v) = ppr v <> suffix where -- Some ad-hoc hackery for displaying proper lit suffixes based on type tbl = [ (intPrimTy, primIntSuffix) , (int64PrimTy, primInt64Suffix) , (wordPrimTy, primWordSuffix) , (word64PrimTy, primWord64Suffix) , (charPrimTy, primCharSuffix) , (floatPrimTy, primFloatSuffix) , (doublePrimTy, primDoubleSuffix) ] suffix = fromMaybe empty (snd <$> find (eqType ty . fst) tbl) instance Outputable PmAltCon where ppr (PmAltConLike cl) = ppr cl ppr (PmAltLit l) = ppr l instance Outputable PmEquality where ppr = text . show ghc-lib-parser-9.12.2.20250421/compiler/GHC/HsToCore/Pmc/Types.hs0000644000000000000000000002366207346545000021564 0ustar0000000000000000 {-# LANGUAGE DeriveFunctor #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE ViewPatterns #-} {- Author: George Karachalias Sebastian Graf -} -- | Types used through-out pattern match checking. This module is mostly there -- to be imported from "GHC.HsToCore.Types". The exposed API is that of -- "GHC.HsToCore.Pmc". -- -- These types model the paper -- [Lower Your Guards: A Compositional Pattern-Match Coverage Checker"](https://dl.acm.org/doi/abs/10.1145/3408989). module GHC.HsToCore.Pmc.Types ( -- * LYG syntax -- ** Guard language SrcInfo(..), PmGrd(..), GrdDag(..), consGrdDag, gdSeq, sequencePmGrds, sequenceGrdDags, alternativesGrdDags, -- ** Guard tree language PmMatchGroup(..), PmMatch(..), PmGRHSs(..), PmGRHS(..), PmPatBind(..), PmEmptyCase(..), PmRecSel(..), -- * Coverage Checking types RedSets (..), Precision (..), CheckResult (..), -- * Pre and post coverage checking synonyms Pre, Post, -- * Normalised refinement types module GHC.HsToCore.Pmc.Solver.Types ) where import GHC.Prelude import GHC.HsToCore.Pmc.Solver.Types import GHC.Data.OrdList import GHC.Types.Id import GHC.Types.Var (EvVar) import GHC.Types.SrcLoc import GHC.Utils.Outputable import GHC.Core.ConLike import GHC.Core.Type import GHC.Core import Data.List.NonEmpty ( NonEmpty(..) ) import qualified Data.List.NonEmpty as NE import qualified Data.Semigroup as Semi -- -- * Guard language -- -- | A very simple language for pattern guards. Let bindings, bang patterns, -- and matching variables against flat constructor patterns. -- The LYG guard language. data PmGrd = -- | @PmCon x K dicts args@ corresponds to a @K dicts args <- x@ guard. -- The @args@ are bound in this construct, the @x@ is just a use. -- For the arguments' meaning see 'GHC.Hs.Pat.ConPatOut'. PmCon { pm_id :: !Id, pm_con_con :: !PmAltCon, pm_con_tvs :: ![TyVar], pm_con_dicts :: ![EvVar], pm_con_args :: ![Id] } -- | @PmBang x@ corresponds to a @seq x True@ guard. -- If the extra 'SrcInfo' is present, the bang guard came from a source -- bang pattern, in which case we might want to report it as redundant. -- See Note [Dead bang patterns] in GHC.HsToCore.Pmc.Check. | PmBang { pm_id :: !Id, _pm_loc :: !(Maybe SrcInfo) } -- | @PmLet x expr@ corresponds to a @let x = expr@ guard. This actually -- /binds/ @x@. | PmLet { pm_id :: !Id, _pm_let_expr :: !CoreExpr } -- | Should not be user-facing. instance Outputable PmGrd where ppr (PmCon x alt _tvs _con_dicts con_args) = hsep [ppr alt, hsep (map ppr con_args), text "<-", ppr x] ppr (PmBang x _loc) = char '!' <> ppr x ppr (PmLet x expr) = hsep [text "let", ppr x, text "=", ppr expr] -- -- * Guard tree language -- -- | Means by which we identify a source construct for later pretty-printing in -- a warning message. 'SDoc' for the equation to show, 'Located' for the -- location. newtype SrcInfo = SrcInfo (Located SDoc) -- | A series-parallel graph of 'PmGrd's, so very nearly a guard tree, if -- it weren't for or-patterns/'GdAlt'! -- The implicit "source" corresponds to "before the match" and the implicit -- "sink" corresponds to "after a successful match". -- -- * 'GdEnd' is a 'GrdDag' that always matches. -- * 'GdOne' is a 'GrdDag' that matches iff its 'PmGrd' matches. -- * @'GdSeq' g1 g2@ corresponds to matching guards @g1@ and then @g2@ -- if matching @g1@ succeeded. -- Example: The Haskell guard @| x > 1, x < 10 = ...@ will test @x > 1@ -- before @x < 10@, failing if either test fails. -- * @'GdAlt' g1 g2@ is far less common than 'GdSeq' and corresponds to -- matching an or-pattern @(LT; EQ)@, succeeding if the -- match variable matches /either/ 'LT' or 'EQ'. -- See Note [Implementation of OrPatterns] for a larger example. -- data GrdDag = GdEnd | GdOne !PmGrd | GdSeq !GrdDag !GrdDag | GdAlt !GrdDag !GrdDag -- | Sequentially compose a list of 'PmGrd's into a 'GrdDag'. sequencePmGrds :: [PmGrd] -> GrdDag sequencePmGrds = sequenceGrdDags . map GdOne -- | Sequentially compose a list of 'GrdDag's. sequenceGrdDags :: [GrdDag] -> GrdDag sequenceGrdDags xs = foldr gdSeq GdEnd xs -- | Sequentially compose a 'PmGrd' in front of a 'GrdDag'. consGrdDag :: PmGrd -> GrdDag -> GrdDag consGrdDag g d = gdSeq (GdOne g) d -- | Sequentially compose two 'GrdDag's. A smart constructor for `GdSeq` that -- eliminates `GdEnd`s. gdSeq :: GrdDag -> GrdDag -> GrdDag gdSeq g1 GdEnd = g1 gdSeq GdEnd g2 = g2 gdSeq g1 g2 = g1 `GdSeq` g2 -- | Parallel composition of a list of 'GrdDag's. -- Needs a non-empty list as 'GdAlt' does not have a neutral element. alternativesGrdDags :: NonEmpty GrdDag -> GrdDag alternativesGrdDags xs = foldr1 GdAlt xs -- | A guard tree denoting 'MatchGroup'. newtype PmMatchGroup p = PmMatchGroup (NonEmpty (PmMatch p)) -- | A guard tree denoting 'Match': A payload describing the pats and a bunch of -- GRHS. data PmMatch p = PmMatch { pm_pats :: !p, pm_grhss :: !(PmGRHSs p) } -- | A guard tree denoting 'GRHSs': A bunch of 'PmLet' guards for local -- bindings from the 'GRHSs's @where@ clauses and the actual list of 'GRHS'. -- See Note [Long-distance information for HsLocalBinds] in -- "GHC.HsToCore.Pmc.Desugar". data PmGRHSs p = PmGRHSs { pgs_lcls :: !p, pgs_grhss :: !(NonEmpty (PmGRHS p))} -- | A guard tree denoting 'GRHS': A payload describing the grds and a 'SrcInfo' -- useful for printing out in warnings messages. data PmGRHS p = PmGRHS { pg_grds :: !p, pg_rhs :: !SrcInfo } -- | A guard tree denoting an -XEmptyCase. newtype PmEmptyCase = PmEmptyCase { pe_var :: Id } -- | A guard tree denoting a pattern binding. newtype PmPatBind p = -- just reuse GrdGRHS and pretend its @SrcInfo@ is info on the /pattern/, -- rather than on the pattern bindings. PmPatBind (PmGRHS p) -- A guard tree denoting a record selector application data PmRecSel v = PmRecSel { pr_arg_var :: v, pr_arg :: CoreExpr, pr_cons :: [ConLike] } instance Outputable SrcInfo where ppr (SrcInfo (L (RealSrcSpan rss _) _)) = ppr (srcSpanStartLine rss) ppr (SrcInfo (L s _)) = ppr s -- | Format LYG guards as @| True <- x, let x = 42, !z@ instance Outputable GrdDag where ppr GdEnd = empty ppr (GdOne g) = ppr g ppr (GdSeq d1 d2) = ppr d1 <> comma <+> ppr d2 ppr d0@GdAlt{} = parens $ fsep (ppr d : map ((semi <+>) . ppr) ds) where d NE.:| ds = collect d0 collect (GdAlt d1 d2) = collect d1 Semi.<> collect d2 collect d = NE.singleton d -- | Format a LYG sequence (e.g. 'Match'es of a 'MatchGroup' or 'GRHSs') as -- @{ ; ...; }@ pprLygSequence :: Outputable a => NonEmpty a -> SDoc pprLygSequence (NE.toList -> as) = braces (space <> fsep (punctuate semi (map ppr as)) <> space) instance Outputable p => Outputable (PmMatchGroup p) where ppr (PmMatchGroup matches) = pprLygSequence matches instance Outputable p => Outputable (PmMatch p) where ppr (PmMatch { pm_pats = grds, pm_grhss = grhss }) = ppr grds <+> ppr grhss instance Outputable p => Outputable (PmGRHSs p) where ppr (PmGRHSs { pgs_lcls = _lcls, pgs_grhss = grhss }) = ppr grhss instance Outputable p => Outputable (PmGRHS p) where ppr (PmGRHS { pg_grds = grds, pg_rhs = rhs }) = ppr grds <+> text "->" <+> ppr rhs instance Outputable p => Outputable (PmPatBind p) where ppr (PmPatBind PmGRHS { pg_grds = grds, pg_rhs = bind }) = ppr bind <+> ppr grds <+> text "=" <+> text "..." instance Outputable PmEmptyCase where ppr (PmEmptyCase { pe_var = var }) = text " ppr var <> text ">" data Precision = Approximate | Precise deriving (Eq, Show) instance Outputable Precision where ppr = text . show instance Semi.Semigroup Precision where Precise <> Precise = Precise _ <> _ = Approximate instance Monoid Precision where mempty = Precise mappend = (Semi.<>) -- | Redundancy sets, used to determine redundancy of RHSs and bang patterns -- (later digested into a 'CIRB'). data RedSets = RedSets { rs_cov :: !Nablas -- ^ The /Covered/ set; the set of values reaching a particular program -- point. , rs_div :: !Nablas -- ^ The /Diverging/ set; empty if no match can lead to divergence. -- If it wasn't empty, we have to turn redundancy warnings into -- inaccessibility warnings for any subclauses. , rs_bangs :: !(OrdList (Nablas, SrcInfo)) -- ^ If any of the 'Nablas' is empty, the corresponding 'SrcInfo' pin-points -- a bang pattern in source that is redundant. See Note [Dead bang patterns]. } instance Outputable RedSets where ppr RedSets { rs_cov = _cov, rs_div = _div, rs_bangs = _bangs } -- It's useful to change this definition for different verbosity levels in -- printf-debugging = empty -- | Pattern-match coverage check result data CheckResult a = CheckResult { cr_ret :: !a -- ^ A hole for redundancy info and covered sets. , cr_uncov :: !Nablas -- ^ The set of uncovered values falling out at the bottom. -- (for -Wincomplete-patterns, but also important state for the algorithm) , cr_approx :: !Precision -- ^ A flag saying whether we ran into the 'maxPmCheckModels' limit for the -- purpose of suggesting to crank it up in the warning message. Writer state. } deriving Functor instance Outputable a => Outputable (CheckResult a) where ppr (CheckResult c unc pc) = text "CheckResult" <+> ppr_precision pc <+> braces (fsep [ field "ret" c <> comma , field "uncov" unc]) where ppr_precision Precise = empty ppr_precision Approximate = text "(Approximate)" field name value = text name <+> equals <+> ppr value -- -- * Pre and post coverage checking synonyms -- -- | Used as tree payload pre-checking. The LYG guards to check. type Pre = GrdDag -- | Used as tree payload post-checking. The redundancy info we elaborated. type Post = RedSets ghc-lib-parser-9.12.2.20250421/compiler/GHC/Iface/0000755000000000000000000000000007346545000016715 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Iface/Decl.hs0000644000000000000000000003524707346545000020133 0ustar0000000000000000 {-# LANGUAGE NondecreasingIndentation #-} {-# LANGUAGE LambdaCase #-} {- (c) The University of Glasgow 2006-2008 (c) The GRASP/AQUA Project, Glasgow University, 1993-1998 -} -- | Module for constructing interface declaration values -- from the corresponding 'TyThing's. module GHC.Iface.Decl ( coAxiomToIfaceDecl , tyThingToIfaceDecl -- Converting things to their Iface equivalents , toIfaceBooleanFormula ) where import GHC.Prelude import GHC.Tc.Utils.TcType import GHC.Iface.Syntax import GHC.CoreToIface import GHC.Core.Class import GHC.Core.TyCon import GHC.Core.Coercion.Axiom import GHC.Core.ConLike import GHC.Core.DataCon import GHC.Core.Type import GHC.Core.Multiplicity import GHC.Types.Id import GHC.Types.Var.Env import GHC.Types.Var import GHC.Types.Name import GHC.Types.Basic import GHC.Types.TyThing import GHC.Types.SrcLoc import GHC.Utils.Panic.Plain import GHC.Utils.Misc import GHC.Data.Maybe import GHC.Data.BooleanFormula import Data.List ( findIndex, mapAccumL ) {- ************************************************************************ * * Converting things to their Iface equivalents * * ************************************************************************ -} tyThingToIfaceDecl :: Bool -> TyThing -> IfaceDecl tyThingToIfaceDecl _ (AnId id) = idToIfaceDecl id tyThingToIfaceDecl _ (ATyCon tycon) = snd (tyConToIfaceDecl emptyTidyEnv tycon) tyThingToIfaceDecl _ (ACoAxiom ax) = coAxiomToIfaceDecl ax tyThingToIfaceDecl show_linear_types (AConLike cl) = case cl of RealDataCon dc -> dataConToIfaceDecl show_linear_types dc -- for ppr purposes only PatSynCon ps -> patSynToIfaceDecl ps -------------------------- idToIfaceDecl :: Id -> IfaceDecl -- The Id is already tidied, so that locally-bound names -- (lambdas, for-alls) already have non-clashing OccNames -- We can't tidy it here, locally, because it may have -- free variables in its type or IdInfo idToIfaceDecl id = IfaceId { ifName = getName id, ifType = toIfaceType (idType id), ifIdDetails = toIfaceIdDetails (idDetails id), ifIdInfo = toIfaceIdInfo (idInfo id) } -------------------------- dataConToIfaceDecl :: Bool -> DataCon -> IfaceDecl dataConToIfaceDecl show_linear_types dataCon = IfaceId { ifName = getName dataCon, ifType = toIfaceType (dataConDisplayType show_linear_types dataCon), ifIdDetails = IfVanillaId, ifIdInfo = [] } -------------------------- coAxiomToIfaceDecl :: CoAxiom br -> IfaceDecl -- We *do* tidy Axioms, because they are not (and cannot -- conveniently be) built in tidy form coAxiomToIfaceDecl ax@(CoAxiom { co_ax_tc = tycon, co_ax_branches = branches , co_ax_role = role }) = IfaceAxiom { ifName = getName ax , ifTyCon = toIfaceTyCon tycon , ifRole = role , ifAxBranches = map (coAxBranchToIfaceBranch tycon (map coAxBranchLHS branch_list)) branch_list } where branch_list = fromBranches branches -- 2nd parameter is the list of branch LHSs, in case of a closed type family, -- for conversion from incompatible branches to incompatible indices. -- For an open type family the list should be empty. -- See Note [Storing compatibility] in GHC.Core.Coercion.Axiom coAxBranchToIfaceBranch :: TyCon -> [[Type]] -> CoAxBranch -> IfaceAxBranch coAxBranchToIfaceBranch tc lhs_s (CoAxBranch { cab_tvs = tvs, cab_cvs = cvs , cab_eta_tvs = eta_tvs , cab_lhs = lhs, cab_roles = roles , cab_rhs = rhs, cab_incomps = incomps }) = IfaceAxBranch { ifaxbTyVars = toIfaceTvBndrs tvs , ifaxbCoVars = map toIfaceIdBndr cvs , ifaxbEtaTyVars = toIfaceTvBndrs eta_tvs , ifaxbLHS = toIfaceTcArgs tc lhs , ifaxbRoles = roles , ifaxbRHS = toIfaceType rhs , ifaxbIncomps = iface_incomps } where iface_incomps = map (expectJust "iface_incomps" . flip findIndex lhs_s . eqTypes . coAxBranchLHS) incomps ----------------- tyConToIfaceDecl :: TidyEnv -> TyCon -> (TidyEnv, IfaceDecl) -- We *do* tidy TyCons, because they are not (and cannot -- conveniently be) built in tidy form -- The returned TidyEnv is the one after tidying the tyConTyVars tyConToIfaceDecl env tycon | Just clas <- tyConClass_maybe tycon = classToIfaceDecl env clas | Just syn_rhs <- synTyConRhs_maybe tycon = ( tc_env1 , IfaceSynonym { ifName = getName tycon, ifRoles = tyConRoles tycon, ifSynRhs = if_syn_type syn_rhs, ifBinders = if_binders, ifResKind = if_res_kind }) | Just fam_flav <- famTyConFlav_maybe tycon = ( tc_env1 , IfaceFamily { ifName = getName tycon, ifResVar = mkIfLclName <$> if_res_var, ifFamFlav = to_if_fam_flav fam_flav, ifBinders = if_binders, ifResKind = if_res_kind, ifFamInj = tyConInjectivityInfo tycon }) | isAlgTyCon tycon = ( tc_env1 , IfaceData { ifName = getName tycon, ifBinders = if_binders, ifResKind = if_res_kind, ifCType = tyConCType_maybe tycon, ifRoles = tyConRoles tycon, ifCtxt = tidyToIfaceContext tc_env1 (tyConStupidTheta tycon), ifCons = ifaceConDecls (algTyConRhs tycon), ifGadtSyntax = isGadtSyntaxTyCon tycon, ifParent = parent }) | otherwise -- FunTyCon, PrimTyCon, promoted TyCon/DataCon -- We only convert these TyCons to IfaceTyCons when we are -- just about to pretty-print them, not because we are going -- to put them into interface files = ( env , IfaceData { ifName = getName tycon, ifBinders = if_binders, ifResKind = if_res_kind, ifCType = Nothing, ifRoles = tyConRoles tycon, ifCtxt = [], ifCons = IfDataTyCon False [], ifGadtSyntax = False, ifParent = IfNoParent }) where -- NOTE: Not all TyCons have `tyConTyVars` field. Forcing this when `tycon` -- is one of these TyCons (FunTyCon, PrimTyCon, PromotedDataCon) will cause -- an error. (tc_env1, tc_binders) = tidyTyConBinders env (tyConBinders tycon) tc_tyvars = binderVars tc_binders if_binders = toIfaceForAllBndrs tc_binders -- No tidying of the binders; they are already tidy if_res_kind = tidyToIfaceType tc_env1 (tyConResKind tycon) if_syn_type ty = tidyToIfaceType tc_env1 ty if_res_var = getOccFS `fmap` tyConFamilyResVar_maybe tycon parent = case tyConFamInstSig_maybe tycon of Just (tc, ty, ax) -> IfDataInstance (coAxiomName ax) (toIfaceTyCon tc) (tidyToIfaceTcArgs tc_env1 tc ty) Nothing -> IfNoParent to_if_fam_flav OpenSynFamilyTyCon = IfaceOpenSynFamilyTyCon to_if_fam_flav AbstractClosedSynFamilyTyCon = IfaceAbstractClosedSynFamilyTyCon to_if_fam_flav (DataFamilyTyCon {}) = IfaceDataFamilyTyCon to_if_fam_flav (BuiltInSynFamTyCon {}) = IfaceBuiltInSynFamTyCon to_if_fam_flav (ClosedSynFamilyTyCon Nothing) = IfaceClosedSynFamilyTyCon Nothing to_if_fam_flav (ClosedSynFamilyTyCon (Just ax)) = IfaceClosedSynFamilyTyCon (Just (axn, ibr)) where defs = fromBranches $ coAxiomBranches ax lhss = map coAxBranchLHS defs ibr = map (coAxBranchToIfaceBranch tycon lhss) defs axn = coAxiomName ax ifaceConDecls (NewTyCon { data_con = con }) = IfNewTyCon (ifaceConDecl con) ifaceConDecls (DataTyCon { data_cons = cons, is_type_data = type_data }) = IfDataTyCon type_data (map ifaceConDecl cons) ifaceConDecls (TupleTyCon { data_con = con }) = IfDataTyCon False [ifaceConDecl con] ifaceConDecls (SumTyCon { data_cons = cons }) = IfDataTyCon False (map ifaceConDecl cons) ifaceConDecls AbstractTyCon = IfAbstractTyCon -- The AbstractTyCon case happens when a TyCon has been trimmed -- during tidying. -- Furthermore, tyThingToIfaceDecl is also used in GHC.Tc.Module -- for GHCi, when browsing a module, in which case the -- AbstractTyCon and TupleTyCon cases are perfectly sensible. -- (Tuple declarations are not serialised into interface files.) ifaceConDecl data_con = IfCon { ifConName = dataConName data_con, ifConInfix = dataConIsInfix data_con, ifConWrapper = isJust (dataConWrapId_maybe data_con), ifConExTCvs = map toIfaceBndr ex_tvs', ifConUserTvBinders = toIfaceForAllBndrs user_bndrs', ifConEqSpec = map (to_eq_spec . eqSpecPair) eq_spec, ifConCtxt = tidyToIfaceContext con_env2 theta, ifConArgTys = map (\(Scaled w t) -> (tidyToIfaceType con_env2 w , (tidyToIfaceType con_env2 t))) arg_tys, ifConFields = dataConFieldLabels data_con, ifConStricts = map (toIfaceBang con_env2) (dataConImplBangs data_con), ifConSrcStricts = map toIfaceSrcBang (dataConSrcBangs data_con)} where (univ_tvs, ex_tvs, eq_spec, theta, arg_tys, _) = dataConFullSig data_con user_bndrs = dataConUserTyVarBinders data_con -- Tidy the univ_tvs of the data constructor to be identical -- to the tyConTyVars of the type constructor. This means -- (a) we don't need to redundantly put them into the interface file -- (b) when pretty-printing an Iface data declaration in H98-style syntax, -- we know that the type variables will line up -- The latter (b) is important because we pretty-print type constructors -- by converting to Iface syntax and pretty-printing that con_env1 = (fst tc_env1, mkVarEnv (zipEqual "ifaceConDecl" univ_tvs tc_tyvars)) -- A bit grimy, perhaps, but it's simple! (con_env2, ex_tvs') = tidyVarBndrs con_env1 ex_tvs user_bndrs' = map (tidyUserForAllTyBinder con_env2) user_bndrs to_eq_spec (tv,ty) = (tidyTyVar con_env2 tv, tidyToIfaceType con_env2 ty) -- By this point, we have tidied every universal and existential -- tyvar. Because of the dcUserForAllTyBinders invariant -- (see Note [DataCon user type variable binders]), *every* -- user-written tyvar must be contained in the substitution that -- tidying produced. Therefore, tidying the user-written tyvars is a -- simple matter of looking up each variable in the substitution, -- which tidyTyCoVarOcc accomplishes. tidyUserForAllTyBinder :: TidyEnv -> InvisTVBinder -> InvisTVBinder tidyUserForAllTyBinder env (Bndr tv vis) = Bndr (tidyTyCoVarOcc env tv) vis classToIfaceDecl :: TidyEnv -> Class -> (TidyEnv, IfaceDecl) classToIfaceDecl env clas = ( env1 , IfaceClass { ifName = getName tycon, ifRoles = tyConRoles (classTyCon clas), ifBinders = toIfaceForAllBndrs tc_binders, ifBody = body, ifFDs = map toIfaceFD clas_fds }) where (_, clas_fds, sc_theta, _, clas_ats, op_stuff) = classExtraBigSig clas tycon = classTyCon clas body | isAbstractTyCon tycon = IfAbstractClass | otherwise = IfConcreteClass { ifClassCtxt = tidyToIfaceContext env1 sc_theta, ifATs = map toIfaceAT clas_ats, ifSigs = map toIfaceClassOp op_stuff, ifMinDef = toIfaceBooleanFormula $ fmap (mkIfLclName . getOccFS) (classMinimalDef clas) } (env1, tc_binders) = tidyTyConBinders env (tyConBinders tycon) toIfaceAT :: ClassATItem -> IfaceAT toIfaceAT (ATI tc def) = IfaceAT if_decl (fmap (tidyToIfaceType env2 . fst) def) where (env2, if_decl) = tyConToIfaceDecl env1 tc toIfaceClassOp (sel_id, def_meth) = assert (sel_tyvars == binderVars tc_binders) $ IfaceClassOp (getName sel_id) (tidyToIfaceType env1 op_ty) (fmap toDmSpec def_meth) where -- Be careful when splitting the type, because of things -- like class Foo a where -- op :: (?x :: String) => a -> a -- and class Baz a where -- op :: (Ord a) => a -> a (sel_tyvars, rho_ty) = splitForAllTyCoVars (idType sel_id) op_ty = funResultTy rho_ty toDmSpec :: (Name, DefMethSpec Type) -> DefMethSpec IfaceType toDmSpec (_, VanillaDM) = VanillaDM toDmSpec (_, GenericDM dm_ty) = GenericDM (tidyToIfaceType env1 dm_ty) toIfaceFD (tvs1, tvs2) = (map (tidyTyVar env1) tvs1 ,map (tidyTyVar env1) tvs2) -------------------------- tidyTyConBinder :: TidyEnv -> TyConBinder -> (TidyEnv, TyConBinder) -- If the type variable "binder" is in scope, don't re-bind it -- In a class decl, for example, the ATD binders mention -- (amd must mention) the class tyvars tidyTyConBinder env@(_, subst) tvb@(Bndr tv vis) = case lookupVarEnv subst tv of Just tv' -> (env, Bndr tv' vis) Nothing -> tidyForAllTyBinder env tvb tidyTyConBinders :: TidyEnv -> [TyConBinder] -> (TidyEnv, [TyConBinder]) tidyTyConBinders = mapAccumL tidyTyConBinder tidyTyVar :: TidyEnv -> TyVar -> IfLclName tidyTyVar (_, subst) tv = toIfaceTyVar (lookupVarEnv subst tv `orElse` tv) toIfaceBooleanFormula :: BooleanFormula IfLclName -> IfaceBooleanFormula toIfaceBooleanFormula = \case Var nm -> IfVar nm And bfs -> IfAnd (map (toIfaceBooleanFormula . unLoc) bfs) Or bfs -> IfOr (map (toIfaceBooleanFormula . unLoc) bfs) Parens bf -> IfParens (toIfaceBooleanFormula . unLoc $ bf) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Iface/Errors/0000755000000000000000000000000007346545000020171 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Iface/Errors/Ppr.hs0000644000000000000000000003564207346545000021300 0ustar0000000000000000{-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE NamedFieldPuns #-} {-# LANGUAGE RecordWildCards #-} {-# LANGUAGE DataKinds #-} {-# LANGUAGE ViewPatterns #-} {-# LANGUAGE TypeApplications #-} {-# LANGUAGE TypeFamilies #-} {-# OPTIONS_GHC -fno-warn-orphans #-} -- instance Diagnostic IfaceMessage {-# LANGUAGE InstanceSigs #-} module GHC.Iface.Errors.Ppr ( IfaceMessageOpts(..) , interfaceErrorHints , interfaceErrorReason , interfaceErrorDiagnostic , missingInterfaceErrorHints , missingInterfaceErrorReason , missingInterfaceErrorDiagnostic , readInterfaceErrorDiagnostic , lookingForHerald , cantFindErrorX , mayShowLocations , pkgHiddenHint ) where import GHC.Prelude import GHC.Types.Error import GHC.Types.Hint.Ppr () -- Outputable GhcHint import GHC.Types.Error.Codes import GHC.Types.Name import GHC.Types.TyThing import GHC.Unit.State import GHC.Unit.Module import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Iface.Errors.Types defaultIfaceMessageOpts :: IfaceMessageOpts defaultIfaceMessageOpts = IfaceMessageOpts { ifaceShowTriedFiles = False , ifaceBuildingCabalPackage = NoBuildingCabalPackage } instance HasDefaultDiagnosticOpts IfaceMessageOpts where defaultOpts = defaultIfaceMessageOpts instance Diagnostic IfaceMessage where type DiagnosticOpts IfaceMessage = IfaceMessageOpts diagnosticMessage opts reason = mkSimpleDecorated $ interfaceErrorDiagnostic opts reason diagnosticReason = interfaceErrorReason diagnosticHints = interfaceErrorHints diagnosticCode = constructorCode interfaceErrorHints :: IfaceMessage -> [GhcHint] interfaceErrorHints = \ case Can'tFindInterface err _looking_for -> missingInterfaceErrorHints err Can'tFindNameInInterface {} -> noHints CircularImport {} -> noHints missingInterfaceErrorHints :: MissingInterfaceError -> [GhcHint] missingInterfaceErrorHints = \case BadSourceImport {} -> noHints HomeModError {} -> noHints DynamicHashMismatchError {} -> noHints CantFindErr {} -> noHints BadIfaceFile {} -> noHints FailedToLoadDynamicInterface {} -> noHints interfaceErrorReason :: IfaceMessage -> DiagnosticReason interfaceErrorReason (Can'tFindInterface err _) = missingInterfaceErrorReason err interfaceErrorReason (Can'tFindNameInInterface {}) = ErrorWithoutFlag interfaceErrorReason (CircularImport {}) = ErrorWithoutFlag missingInterfaceErrorReason :: MissingInterfaceError -> DiagnosticReason missingInterfaceErrorReason = \ case BadSourceImport {} -> ErrorWithoutFlag HomeModError {} -> ErrorWithoutFlag DynamicHashMismatchError {} -> ErrorWithoutFlag CantFindErr {} -> ErrorWithoutFlag BadIfaceFile {} -> ErrorWithoutFlag FailedToLoadDynamicInterface {} -> ErrorWithoutFlag prettyCantFindWhat :: FindOrLoad -> FindingModuleOrInterface -> AmbiguousOrMissing -> SDoc prettyCantFindWhat Find FindingModule AoM_Missing = text "Could not find module" prettyCantFindWhat Load FindingModule AoM_Missing = text "Could not load module" prettyCantFindWhat _ FindingInterface AoM_Missing = text "Failed to load interface for" prettyCantFindWhat _ FindingModule AoM_Ambiguous = text "Ambiguous module name" prettyCantFindWhat _ FindingInterface AoM_Ambiguous = text "Ambiguous interface for" isAmbiguousInstalledReason :: CantFindInstalledReason -> AmbiguousOrMissing isAmbiguousInstalledReason (MultiplePackages {}) = AoM_Ambiguous isAmbiguousInstalledReason _ = AoM_Missing isLoadOrFindReason :: CantFindInstalledReason -> FindOrLoad isLoadOrFindReason NotAModule {} = Find isLoadOrFindReason (GenericMissing a b c _) | null a && null b && null c = Find isLoadOrFindReason (ModuleSuggestion {}) = Find isLoadOrFindReason _ = Load data FindOrLoad = Find | Load data AmbiguousOrMissing = AoM_Ambiguous | AoM_Missing cantFindError :: IfaceMessageOpts -> FindingModuleOrInterface -> CantFindInstalled -> SDoc cantFindError opts = cantFindErrorX (pkgHiddenHint (const empty) (ifaceBuildingCabalPackage opts)) (mayShowLocations "-v" (ifaceShowTriedFiles opts)) pkgHiddenHint :: (UnitInfo -> SDoc) -> BuildingCabalPackage -> UnitInfo -> SDoc pkgHiddenHint _hint YesBuildingCabalPackage pkg = text "Perhaps you need to add" <+> quotes (ppr (unitPackageName pkg)) <+> text "to the build-depends in your .cabal file." pkgHiddenHint hint _not_cabal pkg = hint pkg mayShowLocations :: String -> Bool -> [FilePath] -> SDoc mayShowLocations option verbose files | null files = empty | not verbose = text "Use" <+> text option <+> text "to see a list of the files searched for." | otherwise = hang (text "Locations searched:") 2 $ vcat (map text files) -- | General version of cantFindError which has some holes which allow GHC/GHCi to display slightly different -- error messages. cantFindErrorX :: (UnitInfo -> SDoc) -> ([FilePath] -> SDoc) -> FindingModuleOrInterface -> CantFindInstalled -> SDoc cantFindErrorX pkg_hidden_hint may_show_locations mod_or_interface (CantFindInstalled mod_name cfir) = let ambig = isAmbiguousInstalledReason cfir find_or_load = isLoadOrFindReason cfir ppr_what = prettyCantFindWhat find_or_load mod_or_interface ambig in (ppr_what <+> quotes (ppr mod_name) <> dot) $$ case cfir of NoUnitIdMatching pkg cands -> let looks_like_srcpkgid :: SDoc looks_like_srcpkgid = -- Unsafely coerce a unit id (i.e. an installed package component -- identifier) into a PackageId and see if it means anything. case cands of (pkg:pkgs) -> parens (text "This unit ID looks like the source package ID;" $$ text "the real unit ID is" <+> quotes (ftext (unitIdFS (unitId pkg))) $$ (if null pkgs then empty else text "and" <+> int (length pkgs) <+> text "other candidate" <> plural pkgs)) -- Todo: also check if it looks like a package name! [] -> empty in hsep [ text "no unit id matching" <+> quotes (ppr pkg) , text "was found"] $$ looks_like_srcpkgid MissingPackageFiles pkg files -> text "There are files missing in the " <> quotes (ppr pkg) <+> text "package," $$ text "try running 'ghc-pkg check'." $$ may_show_locations files MissingPackageWayFiles build pkg files -> text "Perhaps you haven't installed the " <> text build <+> text "libraries for package " <> quotes (ppr pkg) <> char '?' $$ may_show_locations files ModuleSuggestion ms fps -> let pp_suggestions :: [ModuleSuggestion] -> SDoc pp_suggestions sugs | null sugs = empty | otherwise = hang (text "Perhaps you meant") 2 (vcat (map pp_sugg sugs)) -- NB: Prefer the *original* location, and then reexports, and then -- package flags when making suggestions. ToDo: if the original package -- also has a reexport, prefer that one pp_sugg (SuggestVisible m mod o) = ppr m <+> provenance o where provenance ModHidden = empty provenance (ModUnusable _) = empty provenance (ModOrigin{ fromOrigUnit = e, fromExposedReexport = res, fromPackageFlag = f }) | Just True <- e = parens (text "from" <+> ppr (moduleUnit mod)) | f && moduleName mod == m = parens (text "from" <+> ppr (moduleUnit mod)) | (pkg:_) <- res = parens (text "from" <+> ppr (mkUnit pkg) <> comma <+> text "reexporting" <+> ppr mod) | f = parens (text "defined via package flags to be" <+> ppr mod) | otherwise = empty pp_sugg (SuggestHidden m mod o) = ppr m <+> provenance o where provenance ModHidden = empty provenance (ModUnusable _) = empty provenance (ModOrigin{ fromOrigUnit = e, fromHiddenReexport = rhs }) | Just False <- e = parens (text "needs flag -package-id" <+> ppr (moduleUnit mod)) | (pkg:_) <- rhs = parens (text "needs flag -package-id" <+> ppr (mkUnit pkg)) | otherwise = empty in pp_suggestions ms $$ may_show_locations fps NotAModule -> text "It is not a module in the current program, or in any known package." CouldntFindInFiles fps -> vcat (map text fps) MultiplePackages mods | Just pkgs <- unambiguousPackages -> sep [text "it was found in multiple packages:", hsep (map ppr pkgs)] | otherwise -> vcat (map pprMod mods) where unambiguousPackages = foldl' unambiguousPackage (Just []) mods unambiguousPackage (Just xs) (m, ModOrigin (Just _) _ _ _) = Just (moduleUnit m : xs) unambiguousPackage _ _ = Nothing GenericMissing pkg_hiddens mod_hiddens unusables files -> vcat (map pkg_hidden pkg_hiddens) $$ vcat (map mod_hidden mod_hiddens) $$ vcat (map unusable unusables) $$ may_show_locations files where pprMod (m, o) = text "it is bound as" <+> ppr m <+> text "by" <+> pprOrigin m o pprOrigin _ ModHidden = panic "cantFindErr: bound by mod hidden" pprOrigin _ (ModUnusable _) = panic "cantFindErr: bound by mod unusable" pprOrigin m (ModOrigin e res _ f) = sep $ punctuate comma ( if e == Just True then [text "package" <+> ppr (moduleUnit m)] else [] ++ map ((text "a reexport in package" <+>) .ppr.mkUnit) res ++ if f then [text "a package flag"] else [] ) pkg_hidden :: (Unit, Maybe UnitInfo) -> SDoc pkg_hidden (uid, uif) = text "It is a member of the hidden package" <+> quotes (ppr uid) --FIXME: we don't really want to show the unit id here we should -- show the source package id or installed package id if it's ambiguous <> dot $$ maybe empty pkg_hidden_hint uif mod_hidden pkg = text "it is a hidden module in the package" <+> quotes (ppr pkg) unusable (UnusableUnit unit reason reexport) = text "It is " <> (if reexport then text "reexported from the package" else text "a member of the package") <+> quotes (ppr unit) $$ pprReason (text "which is") reason interfaceErrorDiagnostic :: IfaceMessageOpts -> IfaceMessage -> SDoc interfaceErrorDiagnostic opts = \ case Can'tFindNameInInterface name relevant_tyThings -> missingDeclInInterface name relevant_tyThings Can'tFindInterface err looking_for -> hangNotEmpty (lookingForHerald looking_for) 2 (missingInterfaceErrorDiagnostic opts err) CircularImport mod -> text "Circular imports: module" <+> quotes (ppr mod) <+> text "depends on itself" lookingForHerald :: InterfaceLookingFor -> SDoc lookingForHerald looking_for = case looking_for of LookingForName {} -> empty LookingForModule {} -> empty LookingForHiBoot mod -> text "Could not find hi-boot interface for" <+> quotes (ppr mod) <> colon LookingForSig sig -> text "Could not find interface file for signature" <+> quotes (ppr sig) <> colon readInterfaceErrorDiagnostic :: ReadInterfaceError -> SDoc readInterfaceErrorDiagnostic = \ case ExceptionOccurred fp ex -> hang (text "Exception when reading interface file " <+> text fp) 2 (text (showException ex)) HiModuleNameMismatchWarn _ m1 m2 -> hiModuleNameMismatchWarn m1 m2 missingInterfaceErrorDiagnostic :: IfaceMessageOpts -> MissingInterfaceError -> SDoc missingInterfaceErrorDiagnostic opts reason = case reason of BadSourceImport m -> badSourceImport m HomeModError im ml -> homeModError im ml DynamicHashMismatchError m ml -> dynamicHashMismatchError m ml CantFindErr us module_or_interface cfi -> pprWithUnitState us $ cantFindError opts module_or_interface cfi BadIfaceFile rie -> readInterfaceErrorDiagnostic rie FailedToLoadDynamicInterface wanted_mod err -> hang (text "Failed to load dynamic interface file for" <+> ppr wanted_mod <> colon) 2 (readInterfaceErrorDiagnostic err) hiModuleNameMismatchWarn :: Module -> Module -> SDoc hiModuleNameMismatchWarn requested_mod read_mod | moduleUnit requested_mod == moduleUnit read_mod = sep [text "Interface file contains module" <+> quotes (ppr read_mod) <> comma, text "but we were expecting module" <+> quotes (ppr requested_mod), sep [text "Probable cause: the source code which generated interface file", text "has an incompatible module name" ] ] | otherwise = -- ToDo: This will fail to have enough qualification when the package IDs -- are the same withPprStyle (mkUserStyle alwaysQualify AllTheWay) $ -- we want the Modules below to be qualified with package names, -- so reset the NamePprCtx setting. hsep [ text "Something is amiss; requested module " , ppr requested_mod , text "differs from name found in the interface file" , ppr read_mod , parens (text "if these names look the same, try again with -dppr-debug") ] dynamicHashMismatchError :: Module -> ModLocation -> SDoc dynamicHashMismatchError wanted_mod loc = vcat [ text "Dynamic hash doesn't match for" <+> quotes (ppr wanted_mod) , text "Normal interface file from" <+> text (ml_hi_file loc) , text "Dynamic interface file from" <+> text (ml_dyn_hi_file loc) , text "You probably need to recompile" <+> quotes (ppr wanted_mod) ] homeModError :: InstalledModule -> ModLocation -> SDoc -- See Note [Home module load error] homeModError mod location = text "attempting to use module " <> quotes (ppr mod) <> (case ml_hs_file location of Just file -> space <> parens (text file) Nothing -> empty) <+> text "which is not loaded" missingDeclInInterface :: Name -> [TyThing] -> SDoc missingDeclInInterface name things = whenPprDebug (found_things $$ empty) $$ hang (text "Can't find interface-file declaration for" <+> pprNameSpace (nameNameSpace name) <+> ppr name) 2 (vcat [text "Probable cause: bug in .hi-boot file, or inconsistent .hi file", text "Use -ddump-if-trace to get an idea of which file caused the error"]) where found_things = hang (text "Found the following declarations in" <+> ppr (nameModule name) <> colon) 2 (vcat (map ppr things)) badSourceImport :: Module -> SDoc badSourceImport mod = hang (text "You cannot {-# SOURCE #-} import a module from another package") 2 (text "but" <+> quotes (ppr mod) <+> text "is from package" <+> quotes (ppr (moduleUnit mod))) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Iface/Errors/Types.hs0000644000000000000000000000545607346545000021643 0ustar0000000000000000 {-# LANGUAGE DeriveGeneric #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE GADTs #-} {-# LANGUAGE KindSignatures #-} module GHC.Iface.Errors.Types ( MissingInterfaceError(..) , InterfaceLookingFor(..) , IfaceMessage(..) , ReadInterfaceError(..) , CantFindInstalled(..) , CantFindInstalledReason(..) , FindingModuleOrInterface(..) , BuildingCabalPackage(..) , IfaceMessageOpts(..) ) where import GHC.Prelude import GHC.Types.Name (Name) import GHC.Types.TyThing (TyThing) import GHC.Unit.Types (Module, InstalledModule, UnitId, Unit) import GHC.Unit.State (UnitState, ModuleSuggestion, ModuleOrigin, UnusableUnit, UnitInfo) import GHC.Exception.Type (SomeException) import GHC.Unit.Types ( IsBootInterface ) import Language.Haskell.Syntax.Module.Name ( ModuleName ) import GHC.Generics ( Generic ) import GHC.Unit.Module.Location data IfaceMessageOpts = IfaceMessageOpts { ifaceShowTriedFiles :: !Bool -- ^ Whether to show files we tried to look for or not when printing loader errors , ifaceBuildingCabalPackage :: !BuildingCabalPackage } data InterfaceLookingFor = LookingForName !Name | LookingForHiBoot !Module | LookingForModule !ModuleName !IsBootInterface | LookingForSig !InstalledModule data IfaceMessage = Can'tFindInterface MissingInterfaceError InterfaceLookingFor | Can'tFindNameInInterface Name [TyThing] -- possibly relevant TyThings | CircularImport !Module deriving Generic data MissingInterfaceError = BadSourceImport !Module | HomeModError !InstalledModule !ModLocation | DynamicHashMismatchError !Module !ModLocation | CantFindErr !UnitState FindingModuleOrInterface CantFindInstalled | BadIfaceFile ReadInterfaceError | FailedToLoadDynamicInterface Module ReadInterfaceError deriving Generic data ReadInterfaceError = ExceptionOccurred FilePath SomeException | HiModuleNameMismatchWarn FilePath Module Module deriving Generic data CantFindInstalledReason = NoUnitIdMatching UnitId [UnitInfo] | MissingPackageFiles UnitId [FilePath] | MissingPackageWayFiles String UnitId [FilePath] | ModuleSuggestion [ModuleSuggestion] [FilePath] | NotAModule | CouldntFindInFiles [FilePath] | GenericMissing [(Unit, Maybe UnitInfo)] [Unit] [UnusableUnit] [FilePath] | MultiplePackages [(Module, ModuleOrigin)] deriving Generic data CantFindInstalled = CantFindInstalled ModuleName CantFindInstalledReason deriving Generic data FindingModuleOrInterface = FindingModule | FindingInterface -- | Pass to a 'DriverMessage' the information whether or not the -- '-fbuilding-cabal-package' flag is set. data BuildingCabalPackage = YesBuildingCabalPackage | NoBuildingCabalPackage deriving Eq ghc-lib-parser-9.12.2.20250421/compiler/GHC/Iface/Ext/0000755000000000000000000000000007346545000017455 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Iface/Ext/Fields.hs0000644000000000000000000000541407346545000021223 0ustar0000000000000000module GHC.Iface.Ext.Fields ( ExtensibleFields (..) , FieldName , emptyExtensibleFields -- * Reading , readField , readFieldWith -- * Writing , writeField , writeFieldWith -- * Deletion , deleteField ) where import GHC.Prelude import GHC.Utils.Binary import Control.Monad import Data.Map ( Map ) import qualified Data.Map as Map import Control.DeepSeq type FieldName = String newtype ExtensibleFields = ExtensibleFields { getExtensibleFields :: (Map FieldName BinData) } instance Binary ExtensibleFields where put_ bh (ExtensibleFields fs) = do put_ bh (Map.size fs :: Int) -- Put the names of each field, and reserve a space -- for a payload pointer after each name: header_entries <- forM (Map.toList fs) $ \(name, dat) -> do put_ bh name field_p_p <- tellBinWriter bh put_ bh field_p_p return (field_p_p, dat) -- Now put the payloads and use the reserved space -- to point to the start of each payload: forM_ header_entries $ \(field_p_p, dat) -> do field_p <- tellBinWriter bh putAtRel bh field_p_p field_p seekBinWriter bh field_p put_ bh dat get bh = do n <- get bh :: IO Int -- Get the names and field pointers: header_entries <- replicateM n $ (,) <$> get bh <*> getRelBin bh -- Seek to and get each field's payload: fields <- forM header_entries $ \(name, field_p) -> do seekBinReaderRel bh field_p dat <- get bh return (name, dat) return . ExtensibleFields . Map.fromList $ fields instance NFData ExtensibleFields where rnf (ExtensibleFields fs) = rnf fs emptyExtensibleFields :: ExtensibleFields emptyExtensibleFields = ExtensibleFields Map.empty -------------------------------------------------------------------------------- -- | Reading readField :: Binary a => FieldName -> ExtensibleFields -> IO (Maybe a) readField name = readFieldWith name get readFieldWith :: FieldName -> (ReadBinHandle -> IO a) -> ExtensibleFields -> IO (Maybe a) readFieldWith name read fields = sequence $ ((read =<<) . dataHandle) <$> Map.lookup name (getExtensibleFields fields) -------------------------------------------------------------------------------- -- | Writing writeField :: Binary a => FieldName -> a -> ExtensibleFields -> IO ExtensibleFields writeField name x = writeFieldWith name (`put_` x) writeFieldWith :: FieldName -> (WriteBinHandle -> IO ()) -> ExtensibleFields -> IO ExtensibleFields writeFieldWith name write fields = do bh <- openBinMem (1024 * 1024) write bh -- bd <- handleData bh return $ ExtensibleFields (Map.insert name bd $ getExtensibleFields fields) deleteField :: FieldName -> ExtensibleFields -> ExtensibleFields deleteField name (ExtensibleFields fs) = ExtensibleFields $ Map.delete name fs ghc-lib-parser-9.12.2.20250421/compiler/GHC/Iface/Recomp/0000755000000000000000000000000007346545000020142 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Iface/Recomp/Binary.hs0000644000000000000000000000316507346545000021727 0ustar0000000000000000 -- | Computing fingerprints of values serializable with GHC's \"Binary\" module. module GHC.Iface.Recomp.Binary ( -- * Computing fingerprints fingerprintBinMem , computeFingerprint , putNameLiterally ) where import GHC.Prelude import GHC.Utils.Fingerprint import GHC.Utils.Binary import GHC.Types.Name import GHC.Utils.Panic.Plain import GHC.Iface.Type (putIfaceType) fingerprintBinMem :: WriteBinHandle -> IO Fingerprint fingerprintBinMem bh = withBinBuffer bh f where f bs = -- we need to take care that we force the result here -- lest a reference to the ByteString may leak out of -- withBinBuffer. let fp = fingerprintByteString bs in fp `seq` return fp computeFingerprint :: (Binary a) => (WriteBinHandle -> Name -> IO ()) -> a -> IO Fingerprint computeFingerprint put_nonbinding_name a = do bh <- fmap set_user_data $ openBinMem (3*1024) -- just less than a block put_ bh a fingerprintBinMem bh where set_user_data bh = setWriterUserData bh $ mkWriterUserData [ mkSomeBinaryWriter $ mkWriter putIfaceType , mkSomeBinaryWriter $ mkWriter put_nonbinding_name , mkSomeBinaryWriter $ simpleBindingNameWriter $ mkWriter putNameLiterally , mkSomeBinaryWriter $ mkWriter putFS ] -- | Used when we want to fingerprint a structure without depending on the -- fingerprints of external Names that it refers to. putNameLiterally :: WriteBinHandle -> Name -> IO () putNameLiterally bh name = assert (isExternalName name) $ do put_ bh $! nameModule name put_ bh $! nameOccName name ghc-lib-parser-9.12.2.20250421/compiler/GHC/Iface/Syntax.hs0000644000000000000000000033471607346545000020555 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1993-1998 -} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE DeriveTraversable #-} module GHC.Iface.Syntax ( module GHC.Iface.Type, IfaceDecl(..), IfaceFamTyConFlav(..), IfaceClassOp(..), IfaceAT(..), IfaceConDecl(..), IfaceConDecls(..), IfaceEqSpec, IfaceExpr(..), IfaceAlt(..), IfaceLetBndr(..), IfaceBinding, IfaceBindingX(..), IfaceMaybeRhs(..), IfaceConAlt(..), IfaceIdInfo, IfaceIdDetails(..), IfaceUnfolding(..), IfGuidance(..), IfaceInfoItem(..), IfaceRule(..), IfaceAnnotation(..), IfaceAnnTarget, IfaceWarnings(..), IfaceWarningTxt(..), IfaceStringLiteral(..), IfaceDefault(..), IfaceClsInst(..), IfaceFamInst(..), IfaceTickish(..), IfaceClassBody(..), IfaceBooleanFormula(..), IfaceBang(..), IfaceSrcBang(..), SrcUnpackedness(..), SrcStrictness(..), IfaceAxBranch(..), IfaceTyConParent(..), IfaceCompleteMatch(..), IfaceLFInfo(..), IfaceTopBndrInfo(..), IfaceImport(..), ImpIfaceList(..), -- * Binding names IfaceTopBndr, putIfaceTopBndr, getIfaceTopBndr, -- Misc ifaceDeclImplicitBndrs, visibleIfConDecls, ifaceDeclFingerprints, fromIfaceBooleanFormula, fromIfaceWarnings, fromIfaceWarningTxt, -- Free Names freeNamesIfDecl, freeNamesIfRule, freeNamesIfFamInst, freeNamesIfConDecls, -- Pretty printing pprIfaceExpr, pprIfaceDecl, AltPpr(..), ShowSub(..), ShowHowMuch(..), showToIface, showToHeader ) where import GHC.Prelude import GHC.Data.FastString import GHC.Builtin.Names ( unrestrictedFunTyConKey, liftedTypeKindTyConKey, constraintKindTyConKey ) import GHC.Types.Unique ( hasKey ) import GHC.Iface.Type import GHC.Iface.Recomp.Binary import GHC.Core( IsOrphan, isOrphan, UnfoldingCache(..) ) import GHC.Types.Demand import GHC.Types.Cpr import GHC.Core.Class import GHC.Types.FieldLabel import GHC.Types.Name.Set import GHC.Core.Coercion.Axiom ( BranchIndex ) import GHC.Types.Name import GHC.Types.Name.Reader import GHC.Types.CostCentre import GHC.Types.Literal import GHC.Types.ForeignCall import GHC.Types.Annotations( AnnPayload, AnnTarget ) import GHC.Types.Basic import GHC.Unit.Module import GHC.Unit.Module.Warnings import GHC.Types.SrcLoc import GHC.Types.SourceText import GHC.Data.BooleanFormula ( BooleanFormula(..), pprBooleanFormula, isTrue ) import GHC.Types.Var( VarBndr(..), binderVar, tyVarSpecToBinders, visArgTypeLike ) import GHC.Core.TyCon ( Role (..), Injectivity(..), tyConBndrVisForAllTyFlag ) import GHC.Core.DataCon (SrcStrictness(..), SrcUnpackedness(..)) import GHC.Builtin.Types ( constraintKindTyConName ) import GHC.Stg.InferTags.TagSig import GHC.Parser.Annotation (noLocA) import GHC.Hs.Extension ( GhcRn ) import GHC.Hs.Doc ( WithHsDocIdentifiers(..) ) import GHC.Utils.Lexeme (isLexSym) import GHC.Utils.Fingerprint import GHC.Utils.Binary import GHC.Utils.Binary.Typeable () -- instance Binary AnnPayload import GHC.Utils.Outputable as Outputable import GHC.Utils.Panic import GHC.Utils.Misc( dropList, filterByList, notNull, unzipWith, seqList, zipWithEqual ) import Control.Monad import System.IO.Unsafe import Control.DeepSeq import Data.Proxy infixl 3 &&& {- ************************************************************************ * * Declarations * * ************************************************************************ -} data IfaceImport = IfaceImport ImpDeclSpec ImpIfaceList data ImpIfaceList = ImpIfaceAll -- ^ no user import list | ImpIfaceExplicit !IfGlobalRdrEnv | ImpIfaceEverythingBut !NameSet -- | A binding top-level 'Name' in an interface file (e.g. the name of an -- 'IfaceDecl'). type IfaceTopBndr = Name -- It's convenient to have a Name in the Iface syntax, although in each -- case the namespace is implied by the context. However, having a -- Name makes things like ifaceDeclImplicitBndrs and ifaceDeclFingerprints -- very convenient. Moreover, having the key of the binder means that -- we can encode known-key things cleverly in the symbol table. See Note -- [Symbol table representation of Names] -- -- We don't serialise the namespace onto the disk though; rather we -- drop it when serialising and add it back in when deserialising. getIfaceTopBndr :: ReadBinHandle -> IO IfaceTopBndr getIfaceTopBndr bh = get bh putIfaceTopBndr :: WriteBinHandle -> IfaceTopBndr -> IO () putIfaceTopBndr bh name = case findUserDataWriter (Proxy @BindingName) bh of tbl -> --pprTrace "putIfaceTopBndr" (ppr name) $ putEntry tbl bh (BindingName name) data IfaceDecl = IfaceId { ifName :: IfaceTopBndr, ifType :: IfaceType, ifIdDetails :: IfaceIdDetails, ifIdInfo :: IfaceIdInfo } | IfaceData { ifName :: IfaceTopBndr, -- Type constructor ifBinders :: [IfaceTyConBinder], ifResKind :: IfaceType, -- Result kind of type constructor ifCType :: Maybe CType, -- C type for CAPI FFI ifRoles :: [Role], -- Roles ifCtxt :: IfaceContext, -- The "stupid theta" ifCons :: IfaceConDecls, -- Includes new/data/data family info ifGadtSyntax :: Bool, -- True <=> declared using -- GADT syntax ifParent :: IfaceTyConParent -- The axiom, for a newtype, -- or data/newtype family instance } | IfaceSynonym { ifName :: IfaceTopBndr, -- Type constructor ifRoles :: [Role], -- Roles ifBinders :: [IfaceTyConBinder], ifResKind :: IfaceKind, -- Kind of the *result* ifSynRhs :: IfaceType } | IfaceFamily { ifName :: IfaceTopBndr, -- Type constructor ifResVar :: Maybe IfLclName, -- Result variable name, used -- only for pretty-printing -- with --show-iface ifBinders :: [IfaceTyConBinder], ifResKind :: IfaceKind, -- Kind of the *tycon* ifFamFlav :: IfaceFamTyConFlav, ifFamInj :: Injectivity } -- injectivity information | IfaceClass { ifName :: IfaceTopBndr, -- Name of the class TyCon ifRoles :: [Role], -- Roles ifBinders :: [IfaceTyConBinder], ifFDs :: [FunDep IfLclName], -- Functional dependencies ifBody :: IfaceClassBody -- Methods, superclasses, ATs } | IfaceAxiom { ifName :: IfaceTopBndr, -- Axiom name ifTyCon :: IfaceTyCon, -- LHS TyCon ifRole :: Role, -- Role of axiom ifAxBranches :: [IfaceAxBranch] -- Branches } | IfacePatSyn { ifName :: IfaceTopBndr, -- Name of the pattern synonym ifPatIsInfix :: Bool, ifPatMatcher :: (IfExtName, Bool), ifPatBuilder :: Maybe (IfExtName, Bool), -- Everything below is redundant, -- but needed to implement pprIfaceDecl ifPatUnivBndrs :: [IfaceForAllSpecBndr], ifPatExBndrs :: [IfaceForAllSpecBndr], ifPatProvCtxt :: IfaceContext, ifPatReqCtxt :: IfaceContext, ifPatArgs :: [IfaceType], ifPatTy :: IfaceType, ifFieldLabels :: [FieldLabel] } -- See also 'ClassBody' data IfaceClassBody -- Abstract classes don't specify their body; they only occur in @hs-boot@ and -- @hsig@ files. = IfAbstractClass | IfConcreteClass { ifClassCtxt :: IfaceContext, -- Super classes ifATs :: [IfaceAT], -- Associated type families ifSigs :: [IfaceClassOp], -- Method signatures ifMinDef :: IfaceBooleanFormula -- Minimal complete definition } data IfaceBooleanFormula = IfVar IfLclName | IfAnd [IfaceBooleanFormula] | IfOr [IfaceBooleanFormula] | IfParens IfaceBooleanFormula fromIfaceBooleanFormula :: IfaceBooleanFormula -> BooleanFormula IfLclName fromIfaceBooleanFormula = \case IfVar nm -> Var nm IfAnd ibfs -> And (map (noLocA . fromIfaceBooleanFormula) ibfs) IfOr ibfs -> Or (map (noLocA . fromIfaceBooleanFormula) ibfs) IfParens ibf -> Parens (noLocA . fromIfaceBooleanFormula $ ibf) data IfaceTyConParent = IfNoParent | IfDataInstance IfExtName -- Axiom name IfaceTyCon -- Family TyCon (pretty-printing only, not used in GHC.IfaceToCore) -- see Note [Pretty printing via Iface syntax] in GHC.Types.TyThing.Ppr IfaceAppArgs -- Arguments of the family TyCon data IfaceFamTyConFlav = IfaceDataFamilyTyCon -- Data family | IfaceOpenSynFamilyTyCon | IfaceClosedSynFamilyTyCon (Maybe (IfExtName, [IfaceAxBranch])) -- ^ Name of associated axiom and branches for pretty printing purposes, -- or 'Nothing' for an empty closed family without an axiom -- See Note [Pretty printing via Iface syntax] in "GHC.Types.TyThing.Ppr" | IfaceAbstractClosedSynFamilyTyCon | IfaceBuiltInSynFamTyCon -- for pretty printing purposes only data IfaceClassOp = IfaceClassOp IfaceTopBndr IfaceType -- Class op type (Maybe (DefMethSpec IfaceType)) -- Default method -- The types of both the class op itself, -- and the default method, are *not* quantified -- over the class variables data IfaceAT = IfaceAT -- See GHC.Core.Class.ClassATItem IfaceDecl -- The associated type declaration (Maybe IfaceType) -- Default associated type instance, if any -- This is just like CoAxBranch data IfaceAxBranch = IfaceAxBranch { ifaxbTyVars :: [IfaceTvBndr] , ifaxbEtaTyVars :: [IfaceTvBndr] , ifaxbCoVars :: [IfaceIdBndr] , ifaxbLHS :: IfaceAppArgs , ifaxbRoles :: [Role] , ifaxbRHS :: IfaceType , ifaxbIncomps :: [BranchIndex] } -- See Note [Storing compatibility] in GHC.Core.Coercion.Axiom data IfaceConDecls = IfAbstractTyCon -- c.f TyCon.AbstractTyCon | IfDataTyCon !Bool [IfaceConDecl] -- Data type decls -- The Bool is True for "type data" declarations. -- see Note [Type data declarations] in GHC.Rename.Module | IfNewTyCon IfaceConDecl -- Newtype decls -- For IfDataTyCon and IfNewTyCon we store: -- * the data constructor(s); -- The field labels are stored individually in the IfaceConDecl -- (there is some redundancy here, because a field label may occur -- in multiple IfaceConDecls and represent the same field label) data IfaceConDecl = IfCon { ifConName :: IfaceTopBndr, -- Constructor name ifConWrapper :: Bool, -- True <=> has a wrapper ifConInfix :: Bool, -- True <=> declared infix -- The universal type variables are precisely those -- of the type constructor of this data constructor -- This is *easy* to guarantee when creating the IfCon -- but it's not so easy for the original TyCon/DataCon -- So this guarantee holds for IfaceConDecl, but *not* for DataCon ifConExTCvs :: [IfaceBndr], -- Existential ty/covars ifConUserTvBinders :: [IfaceForAllSpecBndr], -- The tyvars, in the order the user wrote them -- INVARIANT: the set of tyvars in ifConUserTvBinders is exactly the -- set of tyvars (*not* covars) of ifConExTCvs, unioned -- with the set of ifBinders (from the parent IfaceDecl) -- whose tyvars do not appear in ifConEqSpec -- See Note [DataCon user type variable binders] in GHC.Core.DataCon ifConEqSpec :: IfaceEqSpec, -- Equality constraints ifConCtxt :: IfaceContext, -- Non-stupid context ifConArgTys :: [(IfaceMult, IfaceType)],-- Arg types ifConFields :: [FieldLabel], -- ...ditto... (field labels) ifConStricts :: [IfaceBang], -- Empty (meaning all lazy), -- or 1-1 corresp with arg tys -- See Note [Bangs on imported data constructors] in GHC.Types.Id.Make ifConSrcStricts :: [IfaceSrcBang] } -- empty meaning no src stricts type IfaceEqSpec = [(IfLclName,IfaceType)] -- | This corresponds to an HsImplBang; that is, the final -- implementation decision about the data constructor arg data IfaceBang = IfNoBang | IfStrict | IfUnpack | IfUnpackCo IfaceCoercion -- | This corresponds to HsSrcBang data IfaceSrcBang = IfSrcBang SrcUnpackedness SrcStrictness -- See Note [Named default declarations] in GHC.Tc.Gen.Default -- | Exported named defaults data IfaceDefault = IfaceDefault { ifDefaultCls :: IfaceTyCon, -- Defaulted class ifDefaultTys :: [IfaceType], -- List of defaults ifDefaultWarn :: Maybe IfaceWarningTxt } data IfaceClsInst = IfaceClsInst { ifInstCls :: IfExtName, -- See comments with ifInstTys :: [Maybe IfaceTyCon], -- the defn of ClsInst ifDFun :: IfExtName, -- The dfun ifOFlag :: OverlapFlag, -- Overlap flag ifInstOrph :: IsOrphan, -- See Note [Orphans] in GHC.Core.InstEnv ifInstWarn :: Maybe IfaceWarningTxt } -- Warning emitted when the instance is used -- See Note [Implementation of deprecated instances] -- in GHC.Tc.Solver.Dict -- There's always a separate IfaceDecl for the DFun, which gives -- its IdInfo with its full type and version number. -- The instance declarations taken together have a version number, -- and we don't want that to wobble gratuitously -- If this instance decl is *used*, we'll record a usage on the dfun; -- and if the head does not change it won't be used if it wasn't before -- The ifFamInstTys field of IfaceFamInst contains a list of the rough -- match types data IfaceFamInst = IfaceFamInst { ifFamInstFam :: IfExtName -- Family name , ifFamInstTys :: [Maybe IfaceTyCon] -- See above , ifFamInstAxiom :: IfExtName -- The axiom , ifFamInstOrph :: IsOrphan -- Just like IfaceClsInst } data IfaceRule = IfaceRule { ifRuleName :: RuleName, ifActivation :: Activation, ifRuleBndrs :: [IfaceBndr], -- Tyvars and term vars ifRuleHead :: IfExtName, -- Head of lhs ifRuleArgs :: [IfaceExpr], -- Args of LHS ifRuleRhs :: IfaceExpr, ifRuleAuto :: Bool, ifRuleOrph :: IsOrphan -- Just like IfaceClsInst } data IfaceWarnings = IfWarnAll IfaceWarningTxt | IfWarnSome [(OccName, IfaceWarningTxt)] [(IfExtName, IfaceWarningTxt)] data IfaceWarningTxt = IfWarningTxt (Maybe WarningCategory) SourceText [(IfaceStringLiteral, [IfExtName])] | IfDeprecatedTxt SourceText [(IfaceStringLiteral, [IfExtName])] data IfaceStringLiteral = IfStringLiteral SourceText FastString data IfaceAnnotation = IfaceAnnotation { ifAnnotatedTarget :: IfaceAnnTarget, ifAnnotatedValue :: AnnPayload } type IfaceAnnTarget = AnnTarget OccName data IfaceCompleteMatch = IfaceCompleteMatch [IfExtName] (Maybe IfExtName) instance Outputable IfaceCompleteMatch where ppr (IfaceCompleteMatch cls mtc) = text "COMPLETE" <> colon <+> ppr cls <+> case mtc of Nothing -> empty Just tc -> dcolon <+> ppr tc -- Here's a tricky case: -- * Compile with -O module A, and B which imports A.f -- * Change function f in A, and recompile without -O -- * When we read in old A.hi we read in its IdInfo (as a thunk) -- (In earlier GHCs we used to drop IdInfo immediately on reading, -- but we do not do that now. Instead it's discarded when the -- ModIface is read into the various decl pools.) -- * The version comparison sees that new (=NoInfo) differs from old (=HasInfo *) -- and so gives a new version. type IfaceIdInfo = [IfaceInfoItem] data IfaceInfoItem = HsArity Arity | HsDmdSig DmdSig | HsCprSig CprSig | HsInline InlinePragma | HsUnfold Bool -- True <=> isStrongLoopBreaker is true IfaceUnfolding -- See Note [Expose recursive functions] | HsNoCafRefs | HsLFInfo IfaceLFInfo | HsTagSig TagSig -- NB: Specialisations and rules come in separately and are -- only later attached to the Id. Partial reason: some are orphans. data IfaceUnfolding = IfCoreUnfold UnfoldingSource IfUnfoldingCache -- See Note [Tying the 'CoreUnfolding' knot] IfGuidance IfaceExpr | IfDFunUnfold [IfaceBndr] [IfaceExpr] type IfUnfoldingCache = UnfoldingCache data IfGuidance = IfNoGuidance -- Compute it from the IfaceExpr | IfWhen Arity Bool Bool -- Just like UnfWhen in Core.UnfoldingGuidance -- We only serialise the IdDetails of top-level Ids, and even then -- we only need a very limited selection. Notably, none of the -- implicit ones are needed here, because they are not put in -- interface files data IfaceIdDetails = IfVanillaId | IfWorkerLikeId [CbvMark] | IfRecSelId { ifRecSelIdParent :: Either IfaceTyCon IfaceDecl , ifRecSelFirstCon :: IfaceTopBndr , ifRecSelIdIsNaughty :: Bool , ifRecSelIdFieldLabel :: FieldLabel } | IfDFunId -- | Iface type for LambdaFormInfo. Fields not relevant for imported Ids are -- omitted in this type. data IfaceLFInfo = IfLFReEntrant !RepArity | IfLFThunk !Bool -- True <=> updatable !Bool -- True <=> might be a function type | IfLFCon !Name | IfLFUnknown !Bool | IfLFUnlifted instance Outputable IfaceLFInfo where ppr (IfLFReEntrant arity) = text "LFReEntrant" <+> ppr arity ppr (IfLFThunk updatable mb_fun) = text "LFThunk" <+> parens (text "updatable=" <> ppr updatable <+> text "might_be_function=" <+> ppr mb_fun) ppr (IfLFCon con) = text "LFCon" <> brackets (ppr con) ppr IfLFUnlifted = text "LFUnlifted" ppr (IfLFUnknown fun_flag) = text "LFUnknown" <+> ppr fun_flag instance Binary IfaceLFInfo where put_ bh (IfLFReEntrant arity) = do putByte bh 0 put_ bh arity put_ bh (IfLFThunk updatable mb_fun) = do putByte bh 1 put_ bh updatable put_ bh mb_fun put_ bh (IfLFCon con_name) = do putByte bh 2 put_ bh con_name put_ bh (IfLFUnknown fun_flag) = do putByte bh 3 put_ bh fun_flag put_ bh IfLFUnlifted = putByte bh 4 get bh = do tag <- getByte bh case tag of 0 -> IfLFReEntrant <$> get bh 1 -> IfLFThunk <$> get bh <*> get bh 2 -> IfLFCon <$> get bh 3 -> IfLFUnknown <$> get bh 4 -> pure IfLFUnlifted _ -> panic "Invalid byte" {- Note [Versioning of instances] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ See [https://gitlab.haskell.org/ghc/ghc/wikis/commentary/compiler/recompilation-avoidance#instances] ************************************************************************ * * Functions over declarations * * ************************************************************************ -} visibleIfConDecls :: IfaceConDecls -> [IfaceConDecl] visibleIfConDecls (IfAbstractTyCon {}) = [] visibleIfConDecls (IfDataTyCon _ cs) = cs visibleIfConDecls (IfNewTyCon c) = [c] ifaceDeclImplicitBndrs :: IfaceDecl -> [OccName] -- *Excludes* the 'main' name, but *includes* the implicitly-bound names -- Deeply revolting, because it has to predict what gets bound, -- especially the question of whether there's a wrapper for a datacon -- See Note [Implicit TyThings] in GHC.Driver.Env -- N.B. the set of names returned here *must* match the set of TyThings -- returned by GHC.Types.TyThing.implicitTyThings, in the sense that -- TyThing.getOccName should define a bijection between the two lists. -- This invariant is used in GHC.IfaceToCore.tc_iface_decl_fingerprint -- (see Note [Tricky iface loop] in GHC.Types.TyThing.) -- The order of the list does not matter. ifaceDeclImplicitBndrs (IfaceData {ifName = tc_name, ifCons = cons }) = case cons of IfAbstractTyCon {} -> [] IfNewTyCon cd -> mkNewTyCoOcc (occName tc_name) : ifaceConDeclImplicitBndrs cd IfDataTyCon type_data cds | type_data -> -- Constructors in "type data" declarations have no implicits. -- see Note [Type data declarations] in GHC.Rename.Module [occName con_name | IfCon { ifConName = con_name } <- cds] | otherwise -> concatMap ifaceConDeclImplicitBndrs cds ifaceDeclImplicitBndrs (IfaceClass { ifBody = IfAbstractClass }) = [] ifaceDeclImplicitBndrs (IfaceClass { ifName = cls_tc_name , ifBody = IfConcreteClass { ifClassCtxt = sc_ctxt, ifSigs = sigs, ifATs = ats }}) = -- (possibly) newtype coercion co_occs ++ -- data constructor (DataCon namespace) -- data worker (Id namespace) -- no wrapper (class dictionaries never have a wrapper) [dc_occ, dcww_occ] ++ -- associated types [occName (ifName at) | IfaceAT at _ <- ats ] ++ -- superclass selectors [mkSuperDictSelOcc n cls_tc_occ | n <- [1..n_ctxt]] ++ -- operation selectors [occName op | IfaceClassOp op _ _ <- sigs] where cls_tc_occ = occName cls_tc_name n_ctxt = length sc_ctxt n_sigs = length sigs co_occs | is_newtype = [mkNewTyCoOcc cls_tc_occ] | otherwise = [] dcww_occ = mkDataConWorkerOcc dc_occ dc_occ = mkClassDataConOcc cls_tc_occ is_newtype = n_sigs + n_ctxt == 1 -- Sigh (keep this synced with buildClass) ifaceDeclImplicitBndrs _ = [] ifaceConDeclImplicitBndrs :: IfaceConDecl -> [OccName] ifaceConDeclImplicitBndrs (IfCon { ifConWrapper = has_wrapper, ifConName = con_name }) = [occName con_name, work_occ] ++ wrap_occs where con_occ = occName con_name work_occ = mkDataConWorkerOcc con_occ -- Id namespace wrap_occs | has_wrapper = [mkDataConWrapperOcc con_occ] -- Id namespace | otherwise = [] -- ----------------------------------------------------------------------------- -- The fingerprints of an IfaceDecl -- We better give each name bound by the declaration a -- different fingerprint! So we calculate the fingerprint of -- each binder by combining the fingerprint of the whole -- declaration with the name of the binder. (#5614, #7215) ifaceDeclFingerprints :: Fingerprint -> IfaceDecl -> [(OccName,Fingerprint)] ifaceDeclFingerprints hash decl = (getOccName decl, hash) : [ (occ, computeFingerprint' (hash,occ)) | occ <- ifaceDeclImplicitBndrs decl ] where computeFingerprint' = unsafeDupablePerformIO . computeFingerprint (panic "ifaceDeclFingerprints") fromIfaceWarnings :: IfaceWarnings -> Warnings GhcRn fromIfaceWarnings = \case IfWarnAll txt -> WarnAll (fromIfaceWarningTxt txt) IfWarnSome vs ds -> WarnSome [(occ, fromIfaceWarningTxt txt) | (occ, txt) <- vs] [(occ, fromIfaceWarningTxt txt) | (occ, txt) <- ds] fromIfaceWarningTxt :: IfaceWarningTxt -> WarningTxt GhcRn fromIfaceWarningTxt = \case IfWarningTxt mb_cat src strs -> WarningTxt (noLocA . fromWarningCategory <$> mb_cat) src (noLocA <$> map fromIfaceStringLiteralWithNames strs) IfDeprecatedTxt src strs -> DeprecatedTxt src (noLocA <$> map fromIfaceStringLiteralWithNames strs) fromIfaceStringLiteralWithNames :: (IfaceStringLiteral, [IfExtName]) -> WithHsDocIdentifiers StringLiteral GhcRn fromIfaceStringLiteralWithNames (str, names) = WithHsDocIdentifiers (fromIfaceStringLiteral str) (map noLoc names) fromIfaceStringLiteral :: IfaceStringLiteral -> StringLiteral fromIfaceStringLiteral (IfStringLiteral st fs) = StringLiteral st fs Nothing {- ************************************************************************ * * Expressions * * ************************************************************************ -} data IfaceExpr = IfaceLcl IfLclName | IfaceExt IfExtName | IfaceType IfaceType | IfaceCo IfaceCoercion | IfaceTuple TupleSort [IfaceExpr] -- Saturated; type arguments omitted | IfaceLam IfaceLamBndr IfaceExpr | IfaceApp IfaceExpr IfaceExpr | IfaceCase IfaceExpr IfLclName [IfaceAlt] | IfaceECase IfaceExpr IfaceType -- See Note [Empty case alternatives] | IfaceLet (IfaceBinding IfaceLetBndr) IfaceExpr | IfaceCast IfaceExpr IfaceCoercion | IfaceLit Literal | IfaceLitRubbish TypeOrConstraint IfaceType -- See GHC.Types.Literal Note [Rubbish literals] item (6) | IfaceFCall ForeignCall IfaceType | IfaceTick IfaceTickish IfaceExpr -- from Tick tickish E data IfaceTickish = IfaceHpcTick Module Int -- from HpcTick x | IfaceSCC CostCentre Bool Bool -- from ProfNote | IfaceSource RealSrcSpan FastString -- from SourceNote | IfaceBreakpoint Int [IfaceExpr] Module -- from Breakpoint data IfaceAlt = IfaceAlt IfaceConAlt [IfLclName] IfaceExpr -- Note: IfLclName, not IfaceBndr (and same with the case binder) -- We reconstruct the kind/type of the thing from the context -- thus saving bulk in interface files data IfaceConAlt = IfaceDefaultAlt | IfaceDataAlt IfExtName | IfaceLitAlt Literal type IfaceBinding b = IfaceBindingX IfaceExpr b data IfaceBindingX r b = IfaceNonRec b r | IfaceRec [(b, r)] deriving (Functor, Foldable, Traversable, Ord, Eq) -- IfaceLetBndr is like IfaceIdBndr, but has IdInfo too -- It's used for *non-top-level* let/rec binders -- See Note [IdInfo on nested let-bindings] data IfaceLetBndr = IfLetBndr IfLclName IfaceType IfaceIdInfo JoinPointHood data IfaceTopBndrInfo = IfLclTopBndr IfLclName IfaceType IfaceIdInfo IfaceIdDetails | IfGblTopBndr IfaceTopBndr -- See Note [Interface File with Core: Sharing RHSs] data IfaceMaybeRhs = IfUseUnfoldingRhs | IfRhs IfaceExpr {- Note [Empty case alternatives] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In Iface syntax an IfaceCase does not record the types of the alternatives, unlike Core syntax Case. But we need this type if the alternatives are empty. Hence IfaceECase. See Note [Empty case alternatives] in GHC.Core. Note [Expose recursive functions] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For supercompilation we want to put *all* unfoldings in the interface file, even for functions that are recursive (or big). So we need to know when an unfolding belongs to a loop-breaker so that we can refrain from inlining it (except during supercompilation). Note [IdInfo on nested let-bindings] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Occasionally we want to preserve IdInfo on nested let bindings. The one that came up was a NOINLINE pragma on a let-binding inside an INLINE function. The user (Duncan Coutts) really wanted the NOINLINE control to cross the separate compilation boundary. In general we retain all info that is left by GHC.Core.Tidy.tidyLetBndr, since that is what is seen by importing module with --make Note [Displaying axiom incompatibilities] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ With -fprint-axiom-incomps we display which closed type family equations are incompatible with which. This information is sometimes necessary because GHC doesn't try equations in order: any equation can be used when all preceding equations that are incompatible with it do not apply. For example, the last "a && a = a" equation in Data.Type.Bool.&& is actually compatible with all previous equations, and can reduce at any time. This is displayed as: Prelude> :i Data.Type.Equality.== type family (==) (a :: k) (b :: k) :: Bool where {- #0 -} (==) (f a) (g b) = (f == g) && (a == b) {- #1 -} (==) a a = 'True -- incompatible with: #0 {- #2 -} (==) _1 _2 = 'False -- incompatible with: #1, #0 The comment after an equation refers to all previous equations (0-indexed) that are incompatible with it. ************************************************************************ * * Printing IfaceDecl * * ************************************************************************ -} pprAxBranch :: SDoc -> BranchIndex -> IfaceAxBranch -> SDoc -- The TyCon might be local (just an OccName), or this might -- be a branch for an imported TyCon, so it would be an ExtName -- So it's easier to take an SDoc here -- -- This function is used -- to print interface files, -- in debug messages -- in :info F for GHCi, which goes via toConToIfaceDecl on the family tycon -- For user error messages we use Coercion.pprCoAxiom and friends pprAxBranch pp_tc idx (IfaceAxBranch { ifaxbTyVars = tvs , ifaxbCoVars = _cvs , ifaxbLHS = pat_tys , ifaxbRHS = rhs , ifaxbIncomps = incomps }) = assertPpr (null _cvs) (pp_tc $$ ppr _cvs) $ hang ppr_binders 2 (hang pp_lhs 2 (equals <+> ppr rhs)) $+$ nest 4 maybe_incomps where -- See Note [Printing foralls in type family instances] in GHC.Iface.Type ppr_binders = maybe_index <+> pprUserIfaceForAll (map (mkIfaceForAllTvBndr Specified) tvs) pp_lhs = hang pp_tc 2 (pprParendIfaceAppArgs pat_tys) -- See Note [Displaying axiom incompatibilities] maybe_index = ppWhenOption sdocPrintAxiomIncomps $ text "{-" <+> (text "#" <> ppr idx) <+> text "-}" maybe_incomps = ppWhenOption sdocPrintAxiomIncomps $ ppWhen (notNull incomps) $ text "--" <+> text "incompatible with:" <+> pprWithCommas (\incomp -> text "#" <> ppr incomp) incomps instance Outputable IfaceWarnings where ppr = \case IfWarnAll txt -> text "Warn all" <+> ppr txt IfWarnSome vs ds -> hang (text "Warnings:") 2 $ text "Deprecated names:" <+> vcat [ppr name <+> ppr txt | (name, txt) <- vs] $$ text "Deprecated exports:" <+> vcat [ppr name <+> ppr txt | (name, txt) <- ds] instance Outputable IfaceWarningTxt where ppr = \case IfWarningTxt _ _ ws -> pp_ws ws IfDeprecatedTxt _ ds -> pp_ws ds where pp_ws [msg] = pp_with_name msg pp_ws msgs = brackets $ vcat . punctuate comma . map pp_with_name $ msgs pp_with_name = ppr . fst instance Outputable IfaceStringLiteral where ppr (IfStringLiteral st fs) = pprWithSourceText st (ftext fs) instance Outputable IfaceAnnotation where ppr (IfaceAnnotation target value) = ppr target <+> colon <+> ppr value instance NamedThing IfaceClassOp where getName (IfaceClassOp n _ _) = n instance HasOccName IfaceClassOp where occName = getOccName instance NamedThing IfaceConDecl where getName = ifConName instance HasOccName IfaceConDecl where occName = getOccName instance NamedThing IfaceDecl where getName = ifName instance HasOccName IfaceDecl where occName = getOccName instance Outputable IfaceDecl where ppr = pprIfaceDecl showToIface instance (Outputable r, Outputable b) => Outputable (IfaceBindingX r b) where ppr b = case b of (IfaceNonRec b r) -> ppr_bind (b, r) (IfaceRec pairs) -> sep [text "rec {", nest 2 (sep (map ppr_bind pairs)),text "}"] where ppr_bind (b, r) = ppr b <+> equals <+> ppr r instance Outputable IfaceTopBndrInfo where ppr (IfLclTopBndr lcl_name _ _ _) = ppr lcl_name ppr (IfGblTopBndr gbl) = ppr gbl instance Outputable IfaceMaybeRhs where ppr IfUseUnfoldingRhs = text "" ppr (IfRhs ie) = ppr ie {- Note [Minimal complete definition] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The minimal complete definition should only be included if a complete class definition is shown. Since the minimal complete definition is anonymous we can't reuse the same mechanism that is used for the filtering of method signatures. Instead we just check if anything at all is filtered and hide it in that case. -} {- Note [Printing IfaceDecl binders] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The binders in an IfaceDecl are just OccNames, so we don't know what module they come from. But when we pretty-print a TyThing by converting to an IfaceDecl (see GHC.Types.TyThing.Ppr), the TyThing may come from some other module so we really need the module qualifier. We solve this by passing in a pretty-printer for the binders. When printing an interface file (--show-iface), we want to print everything unqualified, so we can just print the OccName directly. -} -- | Show a declaration but not its RHS. showToHeader :: ShowSub showToHeader = ShowSub { ss_how_much = ShowHeader $ AltPpr Nothing , ss_forall = ShowForAllWhen } -- | Show declaration and its RHS, including GHc-internal information (e.g. -- for @--show-iface@). showToIface :: ShowSub showToIface = ShowSub { ss_how_much = ShowIface , ss_forall = ShowForAllWhen } ppShowIface :: ShowSub -> SDoc -> SDoc ppShowIface (ShowSub { ss_how_much = ShowIface }) doc = doc ppShowIface _ _ = Outputable.empty -- show if all sub-components or the complete interface is shown ppShowAllSubs :: ShowSub -> SDoc -> SDoc -- See Note [Minimal complete definition] ppShowAllSubs (ShowSub { ss_how_much = ShowSome Nothing _ }) doc = doc ppShowAllSubs (ShowSub { ss_how_much = ShowIface }) doc = doc ppShowAllSubs _ _ = Outputable.empty ppShowRhs :: ShowSub -> SDoc -> SDoc ppShowRhs (ShowSub { ss_how_much = ShowHeader _ }) _ = Outputable.empty ppShowRhs _ doc = doc showSub :: HasOccName n => ShowSub -> n -> Bool showSub (ShowSub { ss_how_much = ShowHeader _ }) _ = False showSub (ShowSub { ss_how_much = ShowSome (Just f) _ }) thing = f (occName thing) showSub (ShowSub { ss_how_much = _ }) _ = True ppr_trim :: [Maybe SDoc] -> [SDoc] -- Collapse a group of Nothings to a single "..." ppr_trim xs = snd (foldr go (False, []) xs) where go (Just doc) (_, so_far) = (False, doc : so_far) go Nothing (True, so_far) = (True, so_far) go Nothing (False, so_far) = (True, text "..." : so_far) isIfaceDataInstance :: IfaceTyConParent -> Bool isIfaceDataInstance IfNoParent = False isIfaceDataInstance _ = True pprClassRoles :: ShowSub -> IfaceTopBndr -> [IfaceTyConBinder] -> [Role] -> SDoc pprClassRoles ss clas binders roles = pprRoles (== Nominal) (pprPrefixIfDeclBndr (ss_how_much ss) (occName clas)) binders roles pprClassStandaloneKindSig :: ShowSub -> IfaceTopBndr -> IfaceKind -> SDoc pprClassStandaloneKindSig ss clas = pprStandaloneKindSig (pprPrefixIfDeclBndr (ss_how_much ss) (occName clas)) constraintIfaceKind :: IfaceKind constraintIfaceKind = IfaceTyConApp (IfaceTyCon constraintKindTyConName (mkIfaceTyConInfo NotPromoted IfaceNormalTyCon)) IA_Nil pprIfaceDecl :: ShowSub -> IfaceDecl -> SDoc -- NB: pprIfaceDecl is also used for pretty-printing TyThings in GHCi -- See Note [Pretty printing via Iface syntax] in GHC.Types.TyThing.Ppr pprIfaceDecl ss (IfaceData { ifName = tycon, ifCType = ctype, ifCtxt = context, ifResKind = kind, ifRoles = roles, ifCons = condecls, ifParent = parent, ifGadtSyntax = gadt, ifBinders = binders }) | gadt = vcat [ pp_roles , pp_ki_sig , pp_nd <+> pp_lhs <+> pp_kind <+> pp_where , nest 2 (vcat pp_cons) , nest 2 $ ppShowIface ss pp_extra ] | otherwise = vcat [ pp_roles , pp_ki_sig , hang (pp_nd <+> pp_lhs) 2 (add_bars pp_cons) , nest 2 $ ppShowIface ss pp_extra ] where is_data_instance = isIfaceDataInstance parent -- See Note [Printing foralls in type family instances] in GHC.Iface.Type pp_data_inst_forall :: SDoc pp_data_inst_forall = pprUserIfaceForAll forall_bndrs forall_bndrs :: [IfaceForAllBndr] forall_bndrs = [Bndr (binderVar tc_bndr) Specified | tc_bndr <- binders] cons = visibleIfConDecls condecls pp_where = ppWhen (gadt && not (null cons)) $ text "where" pp_cons = ppr_trim (map show_con cons) :: [SDoc] pp_kind = ppUnless (ki_sig_printable || isIfaceLiftedTypeKind kind) (dcolon <+> ppr kind) pp_lhs = case parent of IfNoParent -> pprIfaceDeclHead suppress_bndr_sig context ss tycon binders IfDataInstance{} -> text "instance" <+> pp_data_inst_forall <+> pprIfaceTyConParent parent pp_roles | is_data_instance = empty | otherwise = pprRoles (== Representational) name_doc binders roles -- Don't display roles for data family instances (yet) -- See discussion on #8672. ki_sig_printable = -- If we print a standalone kind signature for a data instance, we leak -- the internal constructor name: -- -- type T15827.R:Dka :: forall k. k -> * -- data instance forall k (a :: k). D a = MkD (Proxy a) -- -- This T15827.R:Dka is a compiler-generated type constructor for the -- data instance. not is_data_instance pp_ki_sig = ppWhen ki_sig_printable $ pprStandaloneKindSig name_doc (mkIfaceTyConKind binders kind) -- See Note [Suppressing binder signatures] in GHC.Iface.Type suppress_bndr_sig = SuppressBndrSig ki_sig_printable name_doc = pprPrefixIfDeclBndr (ss_how_much ss) (occName tycon) add_bars [] = Outputable.empty add_bars (c:cs) = sep ((equals <+> c) : map (vbar <+>) cs) ok_con dc = showSub ss dc || any (showSub ss . flSelector) (ifConFields dc) show_con dc | ok_con dc = Just $ pprIfaceConDecl ss gadt tycon binders parent dc | otherwise = Nothing pp_nd = case condecls of IfAbstractTyCon{} -> text "data" IfDataTyCon True _ -> text "type data" IfDataTyCon{} -> text "data" IfNewTyCon{} -> text "newtype" pp_extra = vcat [pprCType ctype] pprIfaceDecl ss (IfaceClass { ifName = clas , ifRoles = roles , ifFDs = fds , ifBinders = binders , ifBody = IfAbstractClass }) = vcat [ pprClassRoles ss clas binders roles , pprClassStandaloneKindSig ss clas (mkIfaceTyConKind binders constraintIfaceKind) , text "class" <+> pprIfaceDeclHead suppress_bndr_sig [] ss clas binders <+> pprFundeps fds ] where -- See Note [Suppressing binder signatures] in GHC.Iface.Type suppress_bndr_sig = SuppressBndrSig True pprIfaceDecl ss (IfaceClass { ifName = clas , ifRoles = roles , ifFDs = fds , ifBinders = binders , ifBody = IfConcreteClass { ifATs = ats, ifSigs = sigs, ifClassCtxt = context, ifMinDef = minDef }}) = vcat [ pprClassRoles ss clas binders roles , pprClassStandaloneKindSig ss clas (mkIfaceTyConKind binders constraintIfaceKind) , text "class" <+> pprIfaceDeclHead suppress_bndr_sig context ss clas binders <+> pprFundeps fds <+> pp_where , nest 2 (vcat [ vcat asocs, vcat dsigs , ppShowAllSubs ss (pprMinDef $ fromIfaceBooleanFormula minDef)])] where pp_where = ppShowRhs ss $ ppUnless (null sigs && null ats) (text "where") asocs = ppr_trim $ map maybeShowAssoc ats dsigs = ppr_trim $ map maybeShowSig sigs maybeShowAssoc :: IfaceAT -> Maybe SDoc maybeShowAssoc asc@(IfaceAT d _) | showSub ss d = Just $ pprIfaceAT ss asc | otherwise = Nothing maybeShowSig :: IfaceClassOp -> Maybe SDoc maybeShowSig sg | showSub ss sg = Just $ pprIfaceClassOp ss sg | otherwise = Nothing pprMinDef :: BooleanFormula IfLclName -> SDoc pprMinDef minDef = ppUnless (isTrue minDef) $ -- hide empty definitions text "{-# MINIMAL" <+> pprBooleanFormula (\_ def -> cparen (isLexSym def) (ppr def)) 0 (fmap ifLclNameFS minDef) <+> text "#-}" -- See Note [Suppressing binder signatures] in GHC.Iface.Type suppress_bndr_sig = SuppressBndrSig True pprIfaceDecl ss (IfaceSynonym { ifName = tc , ifBinders = binders , ifSynRhs = mono_ty , ifResKind = res_kind}) = vcat [ pprStandaloneKindSig name_doc (mkIfaceTyConKind binders res_kind) , hang (text "type" <+> pprIfaceDeclHead suppress_bndr_sig [] ss tc binders <+> equals) 2 (sep [ pprIfaceForAll tvs, pprIfaceContextArr theta, ppr_tau , ppUnless (isIfaceLiftedTypeKind res_kind) (dcolon <+> ppr res_kind) ]) ] where (tvs, theta, tau) = splitIfaceSigmaTy mono_ty name_doc = pprPrefixIfDeclBndr (ss_how_much ss) (occName tc) -- See Note [Printing type abbreviations] in GHC.Iface.Type ppr_tau | tc `hasKey` liftedTypeKindTyConKey || tc `hasKey` unrestrictedFunTyConKey || tc `hasKey` constraintKindTyConKey = updSDocContext (\ctx -> ctx { sdocPrintTypeAbbreviations = False }) $ ppr tau | otherwise = ppr tau -- See Note [Suppressing binder signatures] in GHC.Iface.Type suppress_bndr_sig = SuppressBndrSig True pprIfaceDecl ss (IfaceFamily { ifName = tycon , ifFamFlav = rhs, ifBinders = binders , ifResKind = res_kind , ifResVar = res_var, ifFamInj = inj }) | IfaceDataFamilyTyCon <- rhs = vcat [ pprStandaloneKindSig name_doc (mkIfaceTyConKind binders res_kind) , text "data family" <+> pprIfaceDeclHead suppress_bndr_sig [] ss tycon binders ] | otherwise = vcat [ pprStandaloneKindSig name_doc (mkIfaceTyConKind binders res_kind) , hang (text "type family" <+> pprIfaceDeclHead suppress_bndr_sig [] ss tycon binders <+> pp_inj res_var inj <+> ppShowRhs ss (pp_where rhs)) 2 (ppShowRhs ss (pp_rhs rhs)) $$ nest 2 (ppShowRhs ss (pp_branches rhs)) ] where name_doc = pprPrefixIfDeclBndr (ss_how_much ss) (occName tycon) pp_where (IfaceClosedSynFamilyTyCon {}) = text "where" pp_where _ = empty pp_inj Nothing _ = empty pp_inj (Just res) inj | Injective injectivity <- inj = hsep [ equals, ppr res , pp_inj_cond res injectivity] | otherwise = hsep [ equals, ppr res ] pp_inj_cond res inj = case filterByList inj binders of [] -> empty tvs -> hsep [vbar, ppr res, text "->", interppSP (map ifTyConBinderName tvs)] pp_rhs IfaceDataFamilyTyCon = ppShowIface ss (text "data") pp_rhs IfaceOpenSynFamilyTyCon = ppShowIface ss (text "open") pp_rhs IfaceAbstractClosedSynFamilyTyCon = ppShowIface ss (text "closed, abstract") pp_rhs (IfaceClosedSynFamilyTyCon {}) = empty -- see pp_branches pp_rhs IfaceBuiltInSynFamTyCon = ppShowIface ss (text "built-in") pp_branches (IfaceClosedSynFamilyTyCon (Just (ax, brs))) = vcat (unzipWith (pprAxBranch (pprPrefixIfDeclBndr (ss_how_much ss) (occName tycon)) ) $ zip [0..] brs) $$ ppShowIface ss (text "axiom" <+> ppr ax) pp_branches _ = Outputable.empty -- See Note [Suppressing binder signatures] in GHC.Iface.Type suppress_bndr_sig = SuppressBndrSig True pprIfaceDecl _ (IfacePatSyn { ifName = name, ifPatUnivBndrs = univ_bndrs, ifPatExBndrs = ex_bndrs, ifPatProvCtxt = prov_ctxt, ifPatReqCtxt = req_ctxt, ifPatArgs = arg_tys, ifFieldLabels = pat_fldlbls, ifPatTy = pat_ty} ) = sdocWithContext mk_msg where pat_keywrd = text "pattern" mk_msg sdocCtx = vcat [ ppr_pat_ty -- only print this for record pattern synonyms , if null pat_fldlbls then Outputable.empty else pat_keywrd <+> pprPrefixOcc name <+> pat_body] where ppr_pat_ty = hang (pat_keywrd <+> pprPrefixOcc name) 2 (dcolon <+> sep [univ_msg , pprIfaceContextArr req_ctxt , ppWhen insert_empty_ctxt $ parens empty <+> darrow , ex_msg , pprIfaceContextArr prov_ctxt , pprIfaceType $ foldr (IfaceFunTy visArgTypeLike many_ty) pat_ty arg_tys ]) pat_body = braces $ sep $ punctuate comma $ map ppr pat_fldlbls univ_msg = pprUserIfaceForAll $ tyVarSpecToBinders univ_bndrs ex_msg = pprUserIfaceForAll $ tyVarSpecToBinders ex_bndrs insert_empty_ctxt = null req_ctxt && not (null prov_ctxt && isEmpty sdocCtx ex_msg) pprIfaceDecl ss (IfaceId { ifName = var, ifType = ty, ifIdDetails = details, ifIdInfo = info }) = vcat [ hang (pprPrefixIfDeclBndr (ss_how_much ss) (occName var) <+> dcolon) 2 (pprIfaceSigmaType (ss_forall ss) ty) , ppShowIface ss (ppr details) , ppShowIface ss (ppr info) ] pprIfaceDecl _ (IfaceAxiom { ifName = name, ifTyCon = tycon , ifAxBranches = branches }) = hang (text "axiom" <+> ppr name <+> dcolon) 2 (vcat $ unzipWith (pprAxBranch (ppr tycon)) $ zip [0..] branches) pprCType :: Maybe CType -> SDoc pprCType Nothing = Outputable.empty pprCType (Just cType) = text "C type:" <+> ppr cType -- if, for each role, suppress_if role is True, then suppress the role -- output pprRoles :: (Role -> Bool) -> SDoc -> [IfaceTyConBinder] -> [Role] -> SDoc pprRoles suppress_if tyCon bndrs roles = sdocOption sdocPrintExplicitKinds $ \print_kinds -> let froles = suppressIfaceInvisibles (PrintExplicitKinds print_kinds) bndrs roles in ppUnless (all suppress_if froles || null froles) $ text "type role" <+> tyCon <+> hsep (map ppr froles) pprStandaloneKindSig :: SDoc -> IfaceType -> SDoc pprStandaloneKindSig tyCon ty = text "type" <+> tyCon <+> text "::" <+> ppr ty pprInfixIfDeclBndr :: ShowHowMuch -> OccName -> SDoc pprInfixIfDeclBndr (ShowSome _ (AltPpr (Just ppr_bndr))) name = pprInfixVar (isSymOcc name) (ppr_bndr name) pprInfixIfDeclBndr _ name = pprInfixVar (isSymOcc name) (ppr name) pprPrefixIfDeclBndr :: ShowHowMuch -> OccName -> SDoc pprPrefixIfDeclBndr (ShowHeader (AltPpr (Just ppr_bndr))) name = parenSymOcc name (ppr_bndr name) pprPrefixIfDeclBndr (ShowSome _ (AltPpr (Just ppr_bndr))) name = parenSymOcc name (ppr_bndr name) pprPrefixIfDeclBndr _ name = parenSymOcc name (ppr name) instance Outputable IfaceClassOp where ppr = pprIfaceClassOp showToIface pprIfaceClassOp :: ShowSub -> IfaceClassOp -> SDoc pprIfaceClassOp ss (IfaceClassOp n ty dm) = pp_sig n ty $$ generic_dm where generic_dm | Just (GenericDM dm_ty) <- dm = text "default" <+> pp_sig n dm_ty | otherwise = empty pp_sig n ty = pprPrefixIfDeclBndr (ss_how_much ss) (occName n) <+> dcolon <+> pprIfaceSigmaType ShowForAllWhen ty instance Outputable IfaceAT where ppr = pprIfaceAT showToIface pprIfaceAT :: ShowSub -> IfaceAT -> SDoc pprIfaceAT ss (IfaceAT d mb_def) = vcat [ pprIfaceDecl ss d , case mb_def of Nothing -> Outputable.empty Just rhs -> nest 2 $ text "Default:" <+> ppr rhs ] instance Outputable IfaceTyConParent where ppr p = pprIfaceTyConParent p pprIfaceTyConParent :: IfaceTyConParent -> SDoc pprIfaceTyConParent IfNoParent = Outputable.empty pprIfaceTyConParent (IfDataInstance _ tc tys) = pprIfaceTypeApp topPrec tc tys pprIfaceDeclHead :: SuppressBndrSig -> IfaceContext -> ShowSub -> Name -> [IfaceTyConBinder] -- of the tycon, for invisible-suppression -> SDoc pprIfaceDeclHead suppress_sig context ss tc_occ bndrs = sdocOption sdocPrintExplicitKinds $ \print_kinds -> sep [ pprIfaceContextArr context , pprPrefixIfDeclBndr (ss_how_much ss) (occName tc_occ) <+> pprIfaceTyConBinders suppress_sig (suppressIfaceInvisibles (PrintExplicitKinds print_kinds) bndrs bndrs) ] pprIfaceConDecl :: ShowSub -> Bool -> IfaceTopBndr -> [IfaceTyConBinder] -> IfaceTyConParent -> IfaceConDecl -> SDoc pprIfaceConDecl ss gadt_style tycon tc_binders parent (IfCon { ifConName = name, ifConInfix = is_infix, ifConUserTvBinders = user_tvbs, ifConEqSpec = eq_spec, ifConCtxt = ctxt, ifConArgTys = arg_tys, ifConStricts = stricts, ifConFields = fields }) | gadt_style = pp_prefix_con <+> dcolon <+> ppr_gadt_ty | otherwise = ppr_ex_quant pp_h98_con where pp_h98_con | not (null fields) = pp_prefix_con <+> pp_field_args | is_infix , [ty1, ty2] <- pp_args = sep [ ty1 , pprInfixIfDeclBndr how_much (occName name) , ty2] | otherwise = pp_prefix_con <+> sep pp_args how_much = ss_how_much ss tys_w_strs :: [(IfaceBang, IfaceType)] tys_w_strs = zip stricts (map snd arg_tys) pp_prefix_con = pprPrefixIfDeclBndr how_much (occName name) -- If we're pretty-printing a H98-style declaration with existential -- quantification, then user_tvbs will always consist of the universal -- tyvar binders followed by the existential tyvar binders. So to recover -- the visibilities of the existential tyvar binders, we can simply drop -- the universal tyvar binders from user_tvbs. ex_tvbs = dropList tc_binders user_tvbs ppr_ex_quant = pprIfaceForAllPartMust (ifaceForAllSpecToBndrs ex_tvbs) ctxt pp_gadt_res_ty = mk_user_con_res_ty eq_spec ppr_gadt_ty = pprIfaceForAllPart (ifaceForAllSpecToBndrs user_tvbs) ctxt pp_tau -- A bit gruesome this, but we can't form the full con_tau, and ppr it, -- because we don't have a Name for the tycon, only an OccName pp_tau | null fields = case pp_args ++ [pp_gadt_res_ty] of (t:ts) -> fsep (t : zipWithEqual "pprIfaceConDecl" (\(w,_) d -> ppr_arr w <+> d) arg_tys ts) [] -> panic "pp_con_taus" | otherwise = sep [pp_field_args, arrow <+> pp_gadt_res_ty] -- Constructors are linear by default, but we don't want to show -- linear arrows when -XLinearTypes is disabled ppr_arr w = sdocOption sdocLinearTypes $ \linearTypes -> if linearTypes then pprTypeArrow visArgTypeLike w else arrow ppr_bang IfNoBang = whenPprDebug $ char '_' ppr_bang IfStrict = char '!' ppr_bang IfUnpack = text "{-# UNPACK #-}" ppr_bang (IfUnpackCo co) = text "! {-# UNPACK #-}" <> pprParendIfaceCoercion co pprFieldArgTy, pprArgTy :: (IfaceBang, IfaceType) -> SDoc -- If using record syntax, the only reason one would need to parenthesize -- a compound field type is if it's preceded by a bang pattern. pprFieldArgTy (bang, ty) = ppr_arg_ty (bang_prec bang) bang ty -- If not using record syntax, a compound field type might need to be -- parenthesized if one of the following holds: -- -- 1. We're using Haskell98 syntax. -- 2. The field type is preceded with a bang pattern. pprArgTy (bang, ty) = ppr_arg_ty (max gadt_prec (bang_prec bang)) bang ty ppr_arg_ty :: PprPrec -> IfaceBang -> IfaceType -> SDoc ppr_arg_ty prec bang ty = ppr_bang bang <> pprPrecIfaceType prec ty -- If we're displaying the fields GADT-style, e.g., -- -- data Foo a where -- MkFoo :: (Int -> Int) -> Maybe a -> Foo -- -- Then we use `funPrec`, since that will ensure `Int -> Int` gets the -- parentheses that it requires, but simple compound types like `Maybe a` -- (which don't require parentheses in a function argument position) won't -- get them, assuming that there are no bang patterns (see bang_prec). -- -- If we're displaying the fields Haskell98-style, e.g., -- -- data Foo a = MkFoo (Int -> Int) (Maybe a) -- -- Then not only must we parenthesize `Int -> Int`, we must also -- parenthesize compound fields like (Maybe a). Therefore, we pick -- `appPrec`, which has higher precedence than `funPrec`. gadt_prec :: PprPrec gadt_prec | gadt_style = funPrec | otherwise = appPrec -- The presence of bang patterns or UNPACK annotations requires -- surrounding the type with parentheses, if needed (#13699) bang_prec :: IfaceBang -> PprPrec bang_prec IfNoBang = topPrec bang_prec IfStrict = appPrec bang_prec IfUnpack = appPrec bang_prec IfUnpackCo{} = appPrec pp_args :: [SDoc] -- No records, e.g., ` Maybe a -> Int -> ...` or -- `!(Maybe a) -> !Int -> ...` pp_args = map pprArgTy tys_w_strs pp_field_args :: SDoc -- Records, e.g., { x :: Maybe a, y :: Int } or -- { x :: !(Maybe a), y :: !Int } pp_field_args = braces $ sep $ punctuate comma $ ppr_trim $ zipWith maybe_show_label fields tys_w_strs maybe_show_label :: FieldLabel -> (IfaceBang, IfaceType) -> Maybe SDoc maybe_show_label lbl bty | showSub ss sel = Just (pprPrefixIfDeclBndr how_much occ <+> dcolon <+> pprFieldArgTy bty) | otherwise = Nothing where sel = flSelector lbl occ = nameOccName sel mk_user_con_res_ty :: IfaceEqSpec -> SDoc -- See Note [Result type of a data family GADT] mk_user_con_res_ty eq_spec | IfDataInstance _ tc tys <- parent = pprIfaceType (IfaceTyConApp tc (substIfaceAppArgs gadt_subst tys)) | otherwise = ppr_tc_app gadt_subst where gadt_subst = mkIfaceTySubst eq_spec -- When pretty-printing a GADT return type, we: -- -- 1. Take the data tycon binders, extract their variable names and -- visibilities, and construct suitable arguments from them. (This is -- the role of mk_tc_app_args.) -- 2. Apply the GADT substitution constructed from the eq_spec. -- (See Note [Result type of a data family GADT].) -- 3. Pretty-print the data type constructor applied to its arguments. -- This process will omit any invisible arguments, such as coercion -- variables, if necessary. (See Note -- [VarBndrs, ForAllTyBinders, TyConBinders, and visibility] in GHC.Core.TyCo.Rep.) ppr_tc_app gadt_subst = pprPrefixIfDeclBndr how_much (occName tycon) <+> pprParendIfaceAppArgs (substIfaceAppArgs gadt_subst (mk_tc_app_args tc_binders)) mk_tc_app_args :: [IfaceTyConBinder] -> IfaceAppArgs mk_tc_app_args [] = IA_Nil mk_tc_app_args (Bndr bndr vis:tc_bndrs) = IA_Arg (IfaceTyVar (ifaceBndrName bndr)) (tyConBndrVisForAllTyFlag vis) (mk_tc_app_args tc_bndrs) instance Outputable IfaceRule where ppr (IfaceRule { ifRuleName = name, ifActivation = act, ifRuleBndrs = bndrs, ifRuleHead = fn, ifRuleArgs = args, ifRuleRhs = rhs, ifRuleOrph = orph }) = sep [ hsep [ pprRuleName name , if isOrphan orph then text "[orphan]" else Outputable.empty , ppr act , pp_foralls ] , nest 2 (sep [ppr fn <+> sep (map pprParendIfaceExpr args), text "=" <+> ppr rhs]) ] where pp_foralls = ppUnless (null bndrs) $ forAllLit <+> pprIfaceBndrs bndrs <> dot instance Outputable IfaceDefault where ppr (IfaceDefault { ifDefaultCls = cls, ifDefaultTys = tcs }) = text "default" <+> ppr cls <+> parens (pprWithCommas ppr tcs) instance Outputable IfaceClsInst where ppr (IfaceClsInst { ifDFun = dfun_id, ifOFlag = flag , ifInstCls = cls, ifInstTys = mb_tcs , ifInstOrph = orph }) = hang (text "instance" <+> ppr flag <+> (if isOrphan orph then text "[orphan]" else Outputable.empty) <+> ppr cls <+> brackets (pprWithCommas ppr_rough mb_tcs)) 2 (equals <+> ppr dfun_id) instance Outputable IfaceFamInst where ppr (IfaceFamInst { ifFamInstFam = fam, ifFamInstTys = mb_tcs , ifFamInstAxiom = tycon_ax, ifFamInstOrph = orph }) = hang (text "family instance" <+> (if isOrphan orph then text "[orphan]" else Outputable.empty) <+> ppr fam <+> pprWithCommas (brackets . ppr_rough) mb_tcs) 2 (equals <+> ppr tycon_ax) ppr_rough :: Maybe IfaceTyCon -> SDoc ppr_rough Nothing = dot ppr_rough (Just tc) = ppr tc {- Note [Result type of a data family GADT] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider data family T a data instance T (p,q) where T1 :: T (Int, Maybe c) T2 :: T (Bool, q) The IfaceDecl actually looks like data TPr p q where T1 :: forall p q. forall c. (p~Int,q~Maybe c) => TPr p q T2 :: forall p q. (p~Bool) => TPr p q To reconstruct the result types for T1 and T2 that we want to pretty print, we substitute the eq-spec [p->Int, q->Maybe c] in the arg pattern (p,q) to give T (Int, Maybe c) Remember that in IfaceSyn, the TyCon and DataCon share the same universal type variables. ----------------------------- Printing IfaceExpr ------------------------------------ -} instance Outputable IfaceExpr where ppr e = pprIfaceExpr noParens e noParens :: SDoc -> SDoc noParens pp = pp pprParendIfaceExpr :: IfaceExpr -> SDoc pprParendIfaceExpr = pprIfaceExpr parens -- | Pretty Print an IfaceExpr -- -- The first argument should be a function that adds parens in context that need -- an atomic value (e.g. function args) pprIfaceExpr :: (SDoc -> SDoc) -> IfaceExpr -> SDoc pprIfaceExpr _ (IfaceLcl v) = ppr v pprIfaceExpr _ (IfaceExt v) = ppr v pprIfaceExpr _ (IfaceLit l) = ppr l pprIfaceExpr _ (IfaceFCall cc ty) = braces (ppr cc <+> ppr ty) pprIfaceExpr _ (IfaceType ty) = char '@' <> pprParendIfaceType ty pprIfaceExpr _ (IfaceCo co) = text "@~" <> pprParendIfaceCoercion co pprIfaceExpr _ (IfaceTuple c as) = tupleParens c (pprWithCommas ppr as) pprIfaceExpr _ (IfaceLitRubbish tc r) = text "RUBBISH" <> (case tc of { TypeLike -> empty; ConstraintLike -> text "[c]" }) <> parens (ppr r) pprIfaceExpr add_par app@(IfaceApp _ _) = add_par (pprIfaceApp app []) pprIfaceExpr add_par i@(IfaceLam _ _) = add_par (sep [char '\\' <+> sep (map pprIfaceLamBndr bndrs) <+> arrow, pprIfaceExpr noParens body]) where (bndrs,body) = collect [] i collect bs (IfaceLam b e) = collect (b:bs) e collect bs e = (reverse bs, e) pprIfaceExpr add_par (IfaceECase scrut ty) = add_par (sep [ text "case" <+> pprIfaceExpr noParens scrut , text "ret_ty" <+> pprParendIfaceType ty , text "of {}" ]) pprIfaceExpr add_par (IfaceCase scrut bndr [IfaceAlt con bs rhs]) = add_par (sep [text "case" <+> pprIfaceExpr noParens scrut <+> text "of" <+> ppr bndr <+> char '{' <+> ppr_con_bs con bs <+> arrow, pprIfaceExpr noParens rhs <+> char '}']) pprIfaceExpr add_par (IfaceCase scrut bndr alts) = add_par (sep [text "case" <+> pprIfaceExpr noParens scrut <+> text "of" <+> ppr bndr <+> char '{', nest 2 (sep (map pprIfaceAlt alts)) <+> char '}']) pprIfaceExpr _ (IfaceCast expr co) = sep [pprParendIfaceExpr expr, nest 2 (text "`cast`"), pprParendIfaceCoercion co] pprIfaceExpr add_par (IfaceLet (IfaceNonRec b rhs) body) = add_par (sep [text "let {", nest 2 (ppr_bind (b, rhs)), text "} in", pprIfaceExpr noParens body]) pprIfaceExpr add_par (IfaceLet (IfaceRec pairs) body) = add_par (sep [text "letrec {", nest 2 (sep (map ppr_bind pairs)), text "} in", pprIfaceExpr noParens body]) pprIfaceExpr add_par (IfaceTick tickish e) = add_par (pprIfaceTickish tickish <+> pprIfaceExpr noParens e) pprIfaceAlt :: IfaceAlt -> SDoc pprIfaceAlt (IfaceAlt con bs rhs) = sep [ppr_con_bs con bs, arrow <+> pprIfaceExpr noParens rhs] ppr_con_bs :: IfaceConAlt -> [IfLclName] -> SDoc ppr_con_bs con bs = ppr con <+> hsep (map ppr bs) ppr_bind :: (IfaceLetBndr, IfaceExpr) -> SDoc ppr_bind (IfLetBndr b ty info ji, rhs) = sep [hang (ppr b <+> dcolon <+> ppr ty) 2 (ppr ji <+> ppr info), equals <+> pprIfaceExpr noParens rhs] ------------------ pprIfaceTickish :: IfaceTickish -> SDoc pprIfaceTickish (IfaceHpcTick m ix) = braces (text "tick" <+> ppr m <+> ppr ix) pprIfaceTickish (IfaceSCC cc tick scope) = braces (pprCostCentreCore cc <+> ppr tick <+> ppr scope) pprIfaceTickish (IfaceSource src _names) = braces (pprUserRealSpan True src) pprIfaceTickish (IfaceBreakpoint m ix fvs) = braces (text "break" <+> ppr m <+> ppr ix <+> ppr fvs) ------------------ pprIfaceApp :: IfaceExpr -> [SDoc] -> SDoc pprIfaceApp (IfaceApp fun arg) args = pprIfaceApp fun $ nest 2 (pprParendIfaceExpr arg) : args pprIfaceApp fun args = sep (pprParendIfaceExpr fun : args) ------------------ instance Outputable IfaceConAlt where ppr IfaceDefaultAlt = text "DEFAULT" ppr (IfaceLitAlt l) = ppr l ppr (IfaceDataAlt d) = ppr d ------------------ instance Outputable IfaceIdDetails where ppr IfVanillaId = Outputable.empty ppr (IfWorkerLikeId dmd) = text "StrWork" <> parens (ppr dmd) ppr (IfRecSelId tc _c b _fl) = text "RecSel" <+> ppr tc <+> if b then text "" else Outputable.empty ppr IfDFunId = text "DFunId" instance Outputable IfaceInfoItem where ppr (HsUnfold lb unf) = text "Unfolding" <> ppWhen lb (text "(loop-breaker)") <> colon <+> ppr unf ppr (HsInline prag) = text "Inline:" <+> ppr prag ppr (HsArity arity) = text "Arity:" <+> int arity ppr (HsDmdSig str) = text "Strictness:" <+> ppr str ppr (HsCprSig cpr) = text "CPR:" <+> ppr cpr ppr HsNoCafRefs = text "HasNoCafRefs" ppr (HsLFInfo lf_info) = text "LambdaFormInfo:" <+> ppr lf_info ppr (HsTagSig tag_sig) = text "TagSig:" <+> ppr tag_sig instance Outputable IfaceUnfolding where ppr (IfCoreUnfold src _ guide e) = sep [ text "Core:" <+> ppr src <+> ppr guide, ppr e ] ppr (IfDFunUnfold bs es) = hang (text "DFun:" <+> sep (map ppr bs) <> dot) 2 (sep (map pprParendIfaceExpr es)) instance Outputable IfGuidance where ppr IfNoGuidance = empty ppr (IfWhen a u b) = angleBrackets (ppr a <> comma <> ppr u <> ppr b) {- ************************************************************************ * * Finding the Names in Iface syntax * * ************************************************************************ This is used for dependency analysis in GHC.Iface.Make, so that we fingerprint a declaration before the things that depend on it. It is specific to interface-file fingerprinting in the sense that we don't collect *all* Names: for example, the DFun of an instance is recorded textually rather than by its fingerprint when fingerprinting the instance, so DFuns are not dependencies. -} freeNamesIfDecl :: IfaceDecl -> NameSet freeNamesIfDecl (IfaceId { ifType = t, ifIdDetails = d, ifIdInfo = i}) = freeNamesIfType t &&& freeNamesIfIdInfo i &&& freeNamesIfIdDetails d freeNamesIfDecl (IfaceData { ifBinders = bndrs, ifResKind = res_k , ifParent = p, ifCtxt = ctxt, ifCons = cons }) = freeNamesIfVarBndrs bndrs &&& freeNamesIfType res_k &&& freeNamesIfaceTyConParent p &&& freeNamesIfContext ctxt &&& freeNamesIfConDecls cons freeNamesIfDecl (IfaceSynonym { ifBinders = bndrs, ifResKind = res_k , ifSynRhs = rhs }) = freeNamesIfVarBndrs bndrs &&& freeNamesIfKind res_k &&& freeNamesIfType rhs freeNamesIfDecl (IfaceFamily { ifBinders = bndrs, ifResKind = res_k , ifFamFlav = flav }) = freeNamesIfVarBndrs bndrs &&& freeNamesIfKind res_k &&& freeNamesIfFamFlav flav freeNamesIfDecl (IfaceClass{ ifBinders = bndrs, ifBody = cls_body }) = freeNamesIfVarBndrs bndrs &&& freeNamesIfClassBody cls_body freeNamesIfDecl (IfaceAxiom { ifTyCon = tc, ifAxBranches = branches }) = freeNamesIfTc tc &&& fnList freeNamesIfAxBranch branches freeNamesIfDecl (IfacePatSyn { ifPatMatcher = (matcher, _) , ifPatBuilder = mb_builder , ifPatUnivBndrs = univ_bndrs , ifPatExBndrs = ex_bndrs , ifPatProvCtxt = prov_ctxt , ifPatReqCtxt = req_ctxt , ifPatArgs = args , ifPatTy = pat_ty , ifFieldLabels = lbls }) = unitNameSet matcher &&& maybe emptyNameSet (unitNameSet . fst) mb_builder &&& freeNamesIfVarBndrs univ_bndrs &&& freeNamesIfVarBndrs ex_bndrs &&& freeNamesIfContext prov_ctxt &&& freeNamesIfContext req_ctxt &&& fnList freeNamesIfType args &&& freeNamesIfType pat_ty &&& mkNameSet (map flSelector lbls) freeNamesIfClassBody :: IfaceClassBody -> NameSet freeNamesIfClassBody IfAbstractClass = emptyNameSet freeNamesIfClassBody (IfConcreteClass{ ifClassCtxt = ctxt, ifATs = ats, ifSigs = sigs }) = freeNamesIfContext ctxt &&& fnList freeNamesIfAT ats &&& fnList freeNamesIfClsSig sigs freeNamesIfAxBranch :: IfaceAxBranch -> NameSet freeNamesIfAxBranch (IfaceAxBranch { ifaxbTyVars = tyvars , ifaxbCoVars = covars , ifaxbLHS = lhs , ifaxbRHS = rhs }) = fnList freeNamesIfTvBndr tyvars &&& fnList freeNamesIfIdBndr covars &&& freeNamesIfAppArgs lhs &&& freeNamesIfType rhs freeNamesIfIdDetails :: IfaceIdDetails -> NameSet freeNamesIfIdDetails (IfRecSelId tc first_con _ fl) = either freeNamesIfTc freeNamesIfDecl tc &&& unitFV first_con &&& unitFV (flSelector fl) freeNamesIfIdDetails IfVanillaId = emptyNameSet freeNamesIfIdDetails (IfWorkerLikeId {}) = emptyNameSet freeNamesIfIdDetails IfDFunId = emptyNameSet -- All other changes are handled via the version info on the tycon freeNamesIfFamFlav :: IfaceFamTyConFlav -> NameSet freeNamesIfFamFlav IfaceOpenSynFamilyTyCon = emptyNameSet freeNamesIfFamFlav IfaceDataFamilyTyCon = emptyNameSet freeNamesIfFamFlav (IfaceClosedSynFamilyTyCon (Just (ax, br))) = unitNameSet ax &&& fnList freeNamesIfAxBranch br freeNamesIfFamFlav (IfaceClosedSynFamilyTyCon Nothing) = emptyNameSet freeNamesIfFamFlav IfaceAbstractClosedSynFamilyTyCon = emptyNameSet freeNamesIfFamFlav IfaceBuiltInSynFamTyCon = emptyNameSet freeNamesIfContext :: IfaceContext -> NameSet freeNamesIfContext = fnList freeNamesIfType freeNamesIfAT :: IfaceAT -> NameSet freeNamesIfAT (IfaceAT decl mb_def) = freeNamesIfDecl decl &&& case mb_def of Nothing -> emptyNameSet Just rhs -> freeNamesIfType rhs freeNamesIfClsSig :: IfaceClassOp -> NameSet freeNamesIfClsSig (IfaceClassOp _n ty dm) = freeNamesIfType ty &&& freeNamesDM dm freeNamesDM :: Maybe (DefMethSpec IfaceType) -> NameSet freeNamesDM (Just (GenericDM ty)) = freeNamesIfType ty freeNamesDM _ = emptyNameSet freeNamesIfConDecls :: IfaceConDecls -> NameSet freeNamesIfConDecls (IfDataTyCon _ cs) = fnList freeNamesIfConDecl cs freeNamesIfConDecls (IfNewTyCon c) = freeNamesIfConDecl c freeNamesIfConDecls _ = emptyNameSet freeNamesIfConDecl :: IfaceConDecl -> NameSet freeNamesIfConDecl (IfCon { ifConExTCvs = ex_tvs, ifConCtxt = ctxt , ifConArgTys = arg_tys , ifConFields = flds , ifConEqSpec = eq_spec , ifConStricts = bangs }) = fnList freeNamesIfBndr ex_tvs &&& freeNamesIfContext ctxt &&& fnList freeNamesIfType (map fst arg_tys) &&& -- these are multiplicities, represented as types fnList freeNamesIfType (map snd arg_tys) &&& mkNameSet (map flSelector flds) &&& fnList freeNamesIfType (map snd eq_spec) &&& -- equality constraints fnList freeNamesIfBang bangs freeNamesIfBang :: IfaceBang -> NameSet freeNamesIfBang (IfUnpackCo co) = freeNamesIfCoercion co freeNamesIfBang _ = emptyNameSet freeNamesIfKind :: IfaceType -> NameSet freeNamesIfKind = freeNamesIfType freeNamesIfAppArgs :: IfaceAppArgs -> NameSet freeNamesIfAppArgs (IA_Arg t _ ts) = freeNamesIfType t &&& freeNamesIfAppArgs ts freeNamesIfAppArgs IA_Nil = emptyNameSet freeNamesIfType :: IfaceType -> NameSet freeNamesIfType (IfaceFreeTyVar _) = emptyNameSet freeNamesIfType (IfaceTyVar _) = emptyNameSet freeNamesIfType (IfaceAppTy s t) = freeNamesIfType s &&& freeNamesIfAppArgs t freeNamesIfType (IfaceTyConApp tc ts) = freeNamesIfTc tc &&& freeNamesIfAppArgs ts freeNamesIfType (IfaceTupleTy _ _ ts) = freeNamesIfAppArgs ts freeNamesIfType (IfaceLitTy _) = emptyNameSet freeNamesIfType (IfaceForAllTy tv t) = freeNamesIfVarBndr tv &&& freeNamesIfType t freeNamesIfType (IfaceFunTy _ w s t) = freeNamesIfType s &&& freeNamesIfType t &&& freeNamesIfType w freeNamesIfType (IfaceCastTy t c) = freeNamesIfType t &&& freeNamesIfCoercion c freeNamesIfType (IfaceCoercionTy c) = freeNamesIfCoercion c freeNamesIfMCoercion :: IfaceMCoercion -> NameSet freeNamesIfMCoercion IfaceMRefl = emptyNameSet freeNamesIfMCoercion (IfaceMCo co) = freeNamesIfCoercion co freeNamesIfCoercion :: IfaceCoercion -> NameSet freeNamesIfCoercion (IfaceReflCo t) = freeNamesIfType t freeNamesIfCoercion (IfaceGReflCo _ t mco) = freeNamesIfType t &&& freeNamesIfMCoercion mco freeNamesIfCoercion (IfaceFunCo _ c_mult c1 c2) = freeNamesIfCoercion c_mult &&& freeNamesIfCoercion c1 &&& freeNamesIfCoercion c2 freeNamesIfCoercion (IfaceTyConAppCo _ tc cos) = freeNamesIfTc tc &&& fnList freeNamesIfCoercion cos freeNamesIfCoercion (IfaceAppCo c1 c2) = freeNamesIfCoercion c1 &&& freeNamesIfCoercion c2 freeNamesIfCoercion (IfaceForAllCo _tcv _visL _visR kind_co co) = freeNamesIfCoercion kind_co &&& freeNamesIfCoercion co freeNamesIfCoercion (IfaceFreeCoVar _) = emptyNameSet freeNamesIfCoercion (IfaceCoVarCo _) = emptyNameSet freeNamesIfCoercion (IfaceHoleCo _) = emptyNameSet freeNamesIfCoercion (IfaceUnivCo _ _ t1 t2 cos) = freeNamesIfType t1 &&& freeNamesIfType t2 &&& fnList freeNamesIfCoercion cos freeNamesIfCoercion (IfaceSymCo c) = freeNamesIfCoercion c freeNamesIfCoercion (IfaceTransCo c1 c2) = freeNamesIfCoercion c1 &&& freeNamesIfCoercion c2 freeNamesIfCoercion (IfaceSelCo _ co) = freeNamesIfCoercion co freeNamesIfCoercion (IfaceLRCo _ co) = freeNamesIfCoercion co freeNamesIfCoercion (IfaceInstCo co co2) = freeNamesIfCoercion co &&& freeNamesIfCoercion co2 freeNamesIfCoercion (IfaceKindCo c) = freeNamesIfCoercion c freeNamesIfCoercion (IfaceSubCo co) = freeNamesIfCoercion co freeNamesIfCoercion (IfaceAxiomCo ax cos) = fnAxRule ax &&& fnList freeNamesIfCoercion cos fnAxRule :: IfaceAxiomRule -> NameSet fnAxRule (IfaceAR_X _) = emptyNameSet -- the axiom is just a string, so we don't count it as a name. fnAxRule (IfaceAR_U n) = unitNameSet n fnAxRule (IfaceAR_B n _) = unitNameSet n freeNamesIfVarBndr :: VarBndr IfaceBndr vis -> NameSet freeNamesIfVarBndr (Bndr bndr _) = freeNamesIfBndr bndr freeNamesIfVarBndrs :: [VarBndr IfaceBndr vis] -> NameSet freeNamesIfVarBndrs = fnList freeNamesIfVarBndr freeNamesIfBndr :: IfaceBndr -> NameSet freeNamesIfBndr (IfaceIdBndr b) = freeNamesIfIdBndr b freeNamesIfBndr (IfaceTvBndr b) = freeNamesIfTvBndr b freeNamesIfBndrs :: [IfaceBndr] -> NameSet freeNamesIfBndrs = fnList freeNamesIfBndr freeNamesIfLetBndr :: IfaceLetBndr -> NameSet -- Remember IfaceLetBndr is used only for *nested* bindings -- The IdInfo can contain an unfolding (in the case of -- local INLINE pragmas), so look there too freeNamesIfLetBndr (IfLetBndr _name ty info _ji) = freeNamesIfType ty &&& freeNamesIfIdInfo info freeNamesIfTvBndr :: IfaceTvBndr -> NameSet freeNamesIfTvBndr (_fs,k) = freeNamesIfKind k -- kinds can have Names inside, because of promotion freeNamesIfIdBndr :: IfaceIdBndr -> NameSet freeNamesIfIdBndr (_, _fs,k) = freeNamesIfKind k freeNamesIfIdInfo :: IfaceIdInfo -> NameSet freeNamesIfIdInfo = fnList freeNamesItem freeNamesItem :: IfaceInfoItem -> NameSet freeNamesItem (HsUnfold _ u) = freeNamesIfUnfold u freeNamesItem (HsLFInfo (IfLFCon n)) = unitNameSet n freeNamesItem _ = emptyNameSet freeNamesIfUnfold :: IfaceUnfolding -> NameSet freeNamesIfUnfold (IfCoreUnfold _ _ _ e) = freeNamesIfExpr e freeNamesIfUnfold (IfDFunUnfold bs es) = freeNamesIfBndrs bs &&& fnList freeNamesIfExpr es freeNamesIfExpr :: IfaceExpr -> NameSet freeNamesIfExpr (IfaceExt v) = unitNameSet v freeNamesIfExpr (IfaceFCall _ ty) = freeNamesIfType ty freeNamesIfExpr (IfaceType ty) = freeNamesIfType ty freeNamesIfExpr (IfaceCo co) = freeNamesIfCoercion co freeNamesIfExpr (IfaceTuple _ as) = fnList freeNamesIfExpr as freeNamesIfExpr (IfaceLam (b,_) body) = freeNamesIfBndr b &&& freeNamesIfExpr body freeNamesIfExpr (IfaceApp f a) = freeNamesIfExpr f &&& freeNamesIfExpr a freeNamesIfExpr (IfaceCast e co) = freeNamesIfExpr e &&& freeNamesIfCoercion co freeNamesIfExpr (IfaceTick t e) = freeNamesIfTickish t &&& freeNamesIfExpr e freeNamesIfExpr (IfaceECase e ty) = freeNamesIfExpr e &&& freeNamesIfType ty freeNamesIfExpr (IfaceCase s _ alts) = freeNamesIfExpr s &&& fnList fn_alt alts &&& fn_cons alts where fn_alt (IfaceAlt _con _bs r) = freeNamesIfExpr r -- Depend on the data constructors. Just one will do! -- Note [Tracking data constructors] fn_cons [] = emptyNameSet fn_cons (IfaceAlt IfaceDefaultAlt _ _ : xs) = fn_cons xs fn_cons (IfaceAlt (IfaceDataAlt con) _ _ : _ ) = unitNameSet con fn_cons (_ : _ ) = emptyNameSet freeNamesIfExpr (IfaceLet (IfaceNonRec bndr rhs) body) = freeNamesIfLetBndr bndr &&& freeNamesIfExpr rhs &&& freeNamesIfExpr body freeNamesIfExpr (IfaceLet (IfaceRec as) x) = fnList fn_pair as &&& freeNamesIfExpr x where fn_pair (bndr, rhs) = freeNamesIfLetBndr bndr &&& freeNamesIfExpr rhs freeNamesIfExpr _ = emptyNameSet freeNamesIfTc :: IfaceTyCon -> NameSet freeNamesIfTc tc = unitNameSet (ifaceTyConName tc) -- ToDo: shouldn't we include IfaceIntTc & co.? freeNamesIfRule :: IfaceRule -> NameSet freeNamesIfRule (IfaceRule { ifRuleBndrs = bs, ifRuleHead = f , ifRuleArgs = es, ifRuleRhs = rhs }) = unitNameSet f &&& fnList freeNamesIfBndr bs &&& fnList freeNamesIfExpr es &&& freeNamesIfExpr rhs freeNamesIfFamInst :: IfaceFamInst -> NameSet freeNamesIfFamInst (IfaceFamInst { ifFamInstFam = famName , ifFamInstAxiom = axName }) = unitNameSet famName &&& unitNameSet axName freeNamesIfaceTyConParent :: IfaceTyConParent -> NameSet freeNamesIfaceTyConParent IfNoParent = emptyNameSet freeNamesIfaceTyConParent (IfDataInstance ax tc tys) = unitNameSet ax &&& freeNamesIfTc tc &&& freeNamesIfAppArgs tys freeNamesIfTickish :: IfaceTickish -> NameSet freeNamesIfTickish (IfaceBreakpoint _ fvs _) = fnList freeNamesIfExpr fvs freeNamesIfTickish _ = emptyNameSet -- helpers (&&&) :: NameSet -> NameSet -> NameSet (&&&) = unionNameSet fnList :: (a -> NameSet) -> [a] -> NameSet fnList f = foldr (&&&) emptyNameSet . map f {- Note [Tracking data constructors] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In a case expression case e of { C a -> ...; ... } You might think that we don't need to include the datacon C in the free names, because its type will probably show up in the free names of 'e'. But in rare circumstances this may not happen. Here's the one that bit me: module DynFlags where import {-# SOURCE #-} Packages( PackageState ) data DynFlags = DF ... PackageState ... module Packages where import GHC.Driver.DynFlags data PackageState = PS ... lookupModule (df :: DynFlags) = case df of DF ...p... -> case p of PS ... -> ... Now, lookupModule depends on DynFlags, but the transitive dependency on the *locally-defined* type PackageState is not visible. We need to take account of the use of the data constructor PS in the pattern match. ************************************************************************ * * Binary instances * * ************************************************************************ Note that there is a bit of subtlety here when we encode names. While IfaceTopBndrs is really just a synonym for Name, we need to take care to encode them with {get,put}IfaceTopBndr. The difference becomes important when we go to fingerprint an IfaceDecl. See Note [Fingerprinting IfaceDecls] for details. -} instance Binary IfaceDecl where put_ bh (IfaceId name ty details idinfo) = do putByte bh 0 putIfaceTopBndr bh name lazyPut bh (ty, details, idinfo) -- See Note [Lazy deserialization of IfaceId] put_ bh (IfaceData a1 a2 a3 a4 a5 a6 a7 a8 a9) = do putByte bh 2 putIfaceTopBndr bh a1 put_ bh a2 put_ bh a3 put_ bh a4 put_ bh a5 put_ bh a6 put_ bh a7 put_ bh a8 put_ bh a9 put_ bh (IfaceSynonym a1 a2 a3 a4 a5) = do putByte bh 3 putIfaceTopBndr bh a1 put_ bh a2 put_ bh a3 put_ bh a4 put_ bh a5 put_ bh (IfaceFamily a1 a2 a3 a4 a5 a6) = do putByte bh 4 putIfaceTopBndr bh a1 put_ bh a2 put_ bh a3 put_ bh a4 put_ bh a5 put_ bh a6 -- NB: Written in a funny way to avoid an interface change put_ bh (IfaceClass { ifName = a2, ifRoles = a3, ifBinders = a4, ifFDs = a5, ifBody = IfConcreteClass { ifClassCtxt = a1, ifATs = a6, ifSigs = a7, ifMinDef = a8 }}) = do putByte bh 5 put_ bh a1 putIfaceTopBndr bh a2 put_ bh a3 put_ bh a4 put_ bh a5 put_ bh a6 put_ bh a7 put_ bh a8 put_ bh (IfaceAxiom a1 a2 a3 a4) = do putByte bh 6 putIfaceTopBndr bh a1 put_ bh a2 put_ bh a3 put_ bh a4 put_ bh (IfacePatSyn a1 a2 a3 a4 a5 a6 a7 a8 a9 a10 a11) = do putByte bh 7 putIfaceTopBndr bh a1 put_ bh a2 put_ bh a3 put_ bh a4 put_ bh a5 put_ bh a6 put_ bh a7 put_ bh a8 put_ bh a9 put_ bh a10 put_ bh a11 put_ bh (IfaceClass { ifName = a1, ifRoles = a2, ifBinders = a3, ifFDs = a4, ifBody = IfAbstractClass }) = do putByte bh 8 putIfaceTopBndr bh a1 put_ bh a2 put_ bh a3 put_ bh a4 get bh = do h <- getByte bh case h of 0 -> do name <- get bh ~(ty, details, idinfo) <- lazyGet bh -- See Note [Lazy deserialization of IfaceId] return (IfaceId name ty details idinfo) 1 -> error "Binary.get(TyClDecl): ForeignType" 2 -> do a1 <- getIfaceTopBndr bh a2 <- get bh a3 <- get bh a4 <- get bh a5 <- get bh a6 <- get bh a7 <- get bh a8 <- get bh a9 <- get bh return (IfaceData a1 a2 a3 a4 a5 a6 a7 a8 a9) 3 -> do a1 <- getIfaceTopBndr bh a2 <- get bh a3 <- get bh a4 <- get bh a5 <- get bh return (IfaceSynonym a1 a2 a3 a4 a5) 4 -> do a1 <- getIfaceTopBndr bh a2 <- get bh a3 <- get bh a4 <- get bh a5 <- get bh a6 <- get bh return (IfaceFamily a1 a2 a3 a4 a5 a6) 5 -> do a1 <- get bh a2 <- getIfaceTopBndr bh a3 <- get bh a4 <- get bh a5 <- get bh a6 <- get bh a7 <- get bh a8 <- get bh return (IfaceClass { ifName = a2, ifRoles = a3, ifBinders = a4, ifFDs = a5, ifBody = IfConcreteClass { ifClassCtxt = a1, ifATs = a6, ifSigs = a7, ifMinDef = a8 }}) 6 -> do a1 <- getIfaceTopBndr bh a2 <- get bh a3 <- get bh a4 <- get bh return (IfaceAxiom a1 a2 a3 a4) 7 -> do a1 <- getIfaceTopBndr bh a2 <- get bh a3 <- get bh a4 <- get bh a5 <- get bh a6 <- get bh a7 <- get bh a8 <- get bh a9 <- get bh a10 <- get bh a11 <- get bh return (IfacePatSyn a1 a2 a3 a4 a5 a6 a7 a8 a9 a10 a11) 8 -> do a1 <- getIfaceTopBndr bh a2 <- get bh a3 <- get bh a4 <- get bh return (IfaceClass { ifName = a1, ifRoles = a2, ifBinders = a3, ifFDs = a4, ifBody = IfAbstractClass }) _ -> panic (unwords ["Unknown IfaceDecl tag:", show h]) instance Binary IfaceBooleanFormula where put_ bh = \case IfVar a1 -> putByte bh 0 >> put_ bh a1 IfAnd a1 -> putByte bh 1 >> put_ bh a1 IfOr a1 -> putByte bh 2 >> put_ bh a1 IfParens a1 -> putByte bh 3 >> put_ bh a1 get bh = do getByte bh >>= \case 0 -> IfVar <$> get bh 1 -> IfAnd <$> get bh 2 -> IfOr <$> get bh _ -> IfParens <$> get bh {- Note [Lazy deserialization of IfaceId] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The use of lazyPut and lazyGet in the IfaceId Binary instance is purely for performance reasons, to avoid deserializing details about identifiers that will never be used. It's not involved in tying the knot in the type checker. It saved ~1% of the total build time of GHC. When we read an interface file, we extend the PTE, a mapping of Names to TyThings, with the declarations we have read. The extension of the PTE is strict in the Names, but not in the TyThings themselves. GHC.IfaceToCore.tcIfaceDecls calculates the list of (Name, TyThing) bindings to add to the PTE. For an IfaceId, there's just one binding to add; and the ty, details, and idinfo fields of an IfaceId are used only in the TyThing. So by reading those fields lazily we may be able to save the work of ever having to deserialize them (into IfaceType, etc.). For IfaceData and IfaceClass, tcIfaceDecls creates extra implicit bindings (the constructors and field selectors of the data declaration, or the methods of the class), whose Names depend on more than just the Name of the type constructor or class itself. So deserializing them lazily would be more involved. Similar comments apply to the other constructors of IfaceDecl with the additional point that they probably represent a small proportion of all declarations. -} instance Binary IfaceFamTyConFlav where put_ bh IfaceDataFamilyTyCon = putByte bh 0 put_ bh IfaceOpenSynFamilyTyCon = putByte bh 1 put_ bh (IfaceClosedSynFamilyTyCon mb) = putByte bh 2 >> put_ bh mb put_ bh IfaceAbstractClosedSynFamilyTyCon = putByte bh 3 put_ _ IfaceBuiltInSynFamTyCon = pprPanic "Cannot serialize IfaceBuiltInSynFamTyCon, used for pretty-printing only" Outputable.empty get bh = do { h <- getByte bh ; case h of 0 -> return IfaceDataFamilyTyCon 1 -> return IfaceOpenSynFamilyTyCon 2 -> do { mb <- get bh ; return (IfaceClosedSynFamilyTyCon mb) } 3 -> return IfaceAbstractClosedSynFamilyTyCon _ -> pprPanic "Binary.get(IfaceFamTyConFlav): Invalid tag" (ppr (fromIntegral h :: Int)) } instance Binary IfaceClassOp where put_ bh (IfaceClassOp n ty def) = do putIfaceTopBndr bh n put_ bh ty put_ bh def get bh = do n <- getIfaceTopBndr bh ty <- get bh def <- get bh return (IfaceClassOp n ty def) instance Binary IfaceAT where put_ bh (IfaceAT dec defs) = do put_ bh dec put_ bh defs get bh = do dec <- get bh defs <- get bh return (IfaceAT dec defs) instance Binary IfaceAxBranch where put_ bh (IfaceAxBranch a1 a2 a3 a4 a5 a6 a7) = do put_ bh a1 put_ bh a2 put_ bh a3 put_ bh a4 put_ bh a5 put_ bh a6 put_ bh a7 get bh = do a1 <- get bh a2 <- get bh a3 <- get bh a4 <- get bh a5 <- get bh a6 <- get bh a7 <- get bh return (IfaceAxBranch a1 a2 a3 a4 a5 a6 a7) instance Binary IfaceConDecls where put_ bh IfAbstractTyCon = putByte bh 0 put_ bh (IfDataTyCon False cs) = putByte bh 1 >> put_ bh cs put_ bh (IfDataTyCon True cs) = putByte bh 2 >> put_ bh cs put_ bh (IfNewTyCon c) = putByte bh 3 >> put_ bh c get bh = do h <- getByte bh case h of 0 -> return IfAbstractTyCon 1 -> liftM (IfDataTyCon False) (get bh) 2 -> liftM (IfDataTyCon True) (get bh) 3 -> liftM IfNewTyCon (get bh) _ -> error "Binary(IfaceConDecls).get: Invalid IfaceConDecls" instance Binary IfaceConDecl where put_ bh (IfCon a1 a2 a3 a4 a5 a6 a7 a8 a9 a10 a11) = do putIfaceTopBndr bh a1 put_ bh a2 put_ bh a3 put_ bh a4 put_ bh a5 put_ bh a6 put_ bh a7 put_ bh a8 put_ bh (length a9) mapM_ (put_ bh) a9 put_ bh a10 put_ bh a11 get bh = do a1 <- getIfaceTopBndr bh a2 <- get bh a3 <- get bh a4 <- get bh a5 <- get bh a6 <- get bh a7 <- get bh a8 <- get bh n_fields <- get bh a9 <- replicateM n_fields (get bh) a10 <- get bh a11 <- get bh return (IfCon a1 a2 a3 a4 a5 a6 a7 a8 a9 a10 a11) instance Binary IfaceBang where put_ bh IfNoBang = putByte bh 0 put_ bh IfStrict = putByte bh 1 put_ bh IfUnpack = putByte bh 2 put_ bh (IfUnpackCo co) = putByte bh 3 >> put_ bh co get bh = do h <- getByte bh case h of 0 -> return IfNoBang 1 -> return IfStrict 2 -> return IfUnpack _ -> IfUnpackCo <$> get bh instance Binary IfaceSrcBang where put_ bh (IfSrcBang a1 a2) = do put_ bh a1 put_ bh a2 get bh = do a1 <- get bh a2 <- get bh return (IfSrcBang a1 a2) instance Binary IfaceDefault where put_ bh (IfaceDefault cls tys warn) = do put_ bh cls put_ bh tys put_ bh warn get bh = do cls <- get bh tys <- get bh warn <- get bh return (IfaceDefault cls tys warn) instance Binary IfaceClsInst where put_ bh (IfaceClsInst cls tys dfun flag orph warn) = do put_ bh cls put_ bh tys put_ bh dfun put_ bh flag put_ bh orph put_ bh warn get bh = do cls <- get bh tys <- get bh dfun <- get bh flag <- get bh orph <- get bh warn <- get bh return (IfaceClsInst cls tys dfun flag orph warn) instance Binary IfaceFamInst where put_ bh (IfaceFamInst fam tys name orph) = do put_ bh fam put_ bh tys put_ bh name put_ bh orph get bh = do fam <- get bh tys <- get bh name <- get bh orph <- get bh return (IfaceFamInst fam tys name orph) instance Binary IfaceRule where put_ bh (IfaceRule a1 a2 a3 a4 a5 a6 a7 a8) = do put_ bh a1 put_ bh a2 put_ bh a3 put_ bh a4 put_ bh a5 put_ bh a6 put_ bh a7 put_ bh a8 get bh = do a1 <- get bh a2 <- get bh a3 <- get bh a4 <- get bh a5 <- get bh a6 <- get bh a7 <- get bh a8 <- get bh return (IfaceRule a1 a2 a3 a4 a5 a6 a7 a8) instance Binary IfaceWarnings where put_ bh = \case IfWarnAll txt -> putByte bh 0 *> put_ bh txt IfWarnSome vs ds -> putByte bh 1 *> put_ bh vs *> put_ bh ds get bh = getByte bh >>= \case 0 -> pure IfWarnAll <*> get bh 1 -> pure IfWarnSome <*> get bh <*> get bh _ -> fail "invalid tag(IfaceWarnings)" instance Binary IfaceWarningTxt where put_ bh = \case IfWarningTxt a1 a2 a3 -> putByte bh 0 *> put_ bh a1 *> put_ bh a2 *> put_ bh a3 IfDeprecatedTxt a1 a2 -> putByte bh 1 *> put_ bh a1 *> put_ bh a2 get bh = getByte bh >>= \case 0 -> pure IfWarningTxt <*> get bh <*> get bh <*> get bh _ -> pure IfDeprecatedTxt <*> get bh <*> get bh instance Binary IfaceStringLiteral where put_ bh (IfStringLiteral a1 a2) = put_ bh a1 *> put_ bh a2 get bh = IfStringLiteral <$> get bh <*> get bh instance Binary IfaceAnnotation where put_ bh (IfaceAnnotation a1 a2) = do put_ bh a1 put_ bh a2 get bh = do a1 <- get bh a2 <- get bh return (IfaceAnnotation a1 a2) instance Binary IfaceIdDetails where put_ bh IfVanillaId = putByte bh 0 put_ bh (IfRecSelId a b c d) = do { putByte bh 1 ; put_ bh a ; put_ bh b ; put_ bh c ; put_ bh d } put_ bh (IfWorkerLikeId dmds) = putByte bh 2 >> put_ bh dmds put_ bh IfDFunId = putByte bh 3 get bh = do h <- getByte bh case h of 0 -> return IfVanillaId 1 -> do { a <- get bh ; b <- get bh ; c <- get bh ; d <- get bh ; return (IfRecSelId a b c d) } 2 -> do { dmds <- get bh ; return (IfWorkerLikeId dmds) } _ -> return IfDFunId instance Binary IfaceInfoItem where put_ bh (HsArity aa) = putByte bh 0 >> put_ bh aa put_ bh (HsDmdSig ab) = putByte bh 1 >> put_ bh ab put_ bh (HsUnfold lb ad) = putByte bh 2 >> put_ bh lb >> put_ bh ad put_ bh (HsInline ad) = putByte bh 3 >> put_ bh ad put_ bh HsNoCafRefs = putByte bh 4 put_ bh (HsCprSig cpr) = putByte bh 6 >> put_ bh cpr put_ bh (HsLFInfo lf_info) = putByte bh 7 >> put_ bh lf_info put_ bh (HsTagSig sig) = putByte bh 8 >> put_ bh sig get bh = do h <- getByte bh case h of 0 -> liftM HsArity $ get bh 1 -> liftM HsDmdSig $ get bh 2 -> do lb <- get bh ad <- get bh return (HsUnfold lb ad) 3 -> liftM HsInline $ get bh 4 -> return HsNoCafRefs 6 -> HsCprSig <$> get bh 7 -> HsLFInfo <$> get bh _ -> HsTagSig <$> get bh instance Binary IfaceUnfolding where put_ bh (IfCoreUnfold s c g e) = do putByte bh 0 put_ bh s putUnfoldingCache bh c put_ bh g put_ bh e put_ bh (IfDFunUnfold as bs) = do putByte bh 1 put_ bh as put_ bh bs get bh = do h <- getByte bh case h of 0 -> do s <- get bh c <- getUnfoldingCache bh g <- get bh e <- get bh return (IfCoreUnfold s c g e) _ -> do as <- get bh bs <- get bh return (IfDFunUnfold as bs) instance Binary IfGuidance where put_ bh IfNoGuidance = putByte bh 0 put_ bh (IfWhen a b c ) = do putByte bh 1 put_ bh a put_ bh b put_ bh c get bh = do h <- getByte bh case h of 0 -> return IfNoGuidance _ -> do a <- get bh b <- get bh c <- get bh return (IfWhen a b c) putUnfoldingCache :: WriteBinHandle -> IfUnfoldingCache -> IO () putUnfoldingCache bh (UnfoldingCache { uf_is_value = hnf, uf_is_conlike = conlike , uf_is_work_free = wf, uf_expandable = exp }) = do let b = zeroBits .<<|. hnf .<<|. conlike .<<|. wf .<<|. exp putByte bh b getUnfoldingCache :: ReadBinHandle -> IO IfUnfoldingCache getUnfoldingCache bh = do b <- getByte bh let hnf = testBit b 3 conlike = testBit b 2 wf = testBit b 1 exp = testBit b 0 return (UnfoldingCache { uf_is_value = hnf, uf_is_conlike = conlike , uf_is_work_free = wf, uf_expandable = exp }) infixl 9 .<<|. (.<<|.) :: (Num a, Bits a) => a -> Bool -> a x .<<|. b = (if b then (`setBit` 0) else id) (x `shiftL` 1) {-# INLINE (.<<|.) #-} instance Binary IfaceAlt where put_ bh (IfaceAlt a b c) = do put_ bh a put_ bh b put_ bh c get bh = do a <- get bh b <- get bh c <- get bh return (IfaceAlt a b c) instance Binary IfaceExpr where put_ bh (IfaceLcl aa) = do putByte bh 0 put_ bh aa put_ bh (IfaceType ab) = do putByte bh 1 put_ bh ab put_ bh (IfaceCo ab) = do putByte bh 2 put_ bh ab put_ bh (IfaceTuple ac ad) = do putByte bh 3 put_ bh ac put_ bh ad put_ bh (IfaceLam (ae, os) af) = do putByte bh 4 put_ bh ae put_ bh os put_ bh af put_ bh (IfaceApp ag ah) = do putByte bh 5 put_ bh ag put_ bh ah put_ bh (IfaceCase ai aj ak) = do putByte bh 6 put_ bh ai put_ bh aj put_ bh ak put_ bh (IfaceLet al am) = do putByte bh 7 put_ bh al put_ bh am put_ bh (IfaceTick an ao) = do putByte bh 8 put_ bh an put_ bh ao put_ bh (IfaceLit ap) = do putByte bh 9 put_ bh ap put_ bh (IfaceFCall as at) = do putByte bh 10 put_ bh as put_ bh at put_ bh (IfaceExt aa) = do putByte bh 11 put_ bh aa put_ bh (IfaceCast ie ico) = do putByte bh 12 put_ bh ie put_ bh ico put_ bh (IfaceECase a b) = do putByte bh 13 put_ bh a put_ bh b put_ bh (IfaceLitRubbish TypeLike r) = do putByte bh 14 put_ bh r put_ bh (IfaceLitRubbish ConstraintLike r) = do putByte bh 15 put_ bh r get bh = do h <- getByte bh case h of 0 -> do aa <- get bh return (IfaceLcl aa) 1 -> do ab <- get bh return (IfaceType ab) 2 -> do ab <- get bh return (IfaceCo ab) 3 -> do ac <- get bh ad <- get bh return (IfaceTuple ac ad) 4 -> do ae <- get bh os <- get bh af <- get bh return (IfaceLam (ae, os) af) 5 -> do ag <- get bh ah <- get bh return (IfaceApp ag ah) 6 -> do ai <- get bh aj <- get bh ak <- get bh return (IfaceCase ai aj ak) 7 -> do al <- get bh am <- get bh return (IfaceLet al am) 8 -> do an <- get bh ao <- get bh return (IfaceTick an ao) 9 -> do ap <- get bh return (IfaceLit ap) 10 -> do as <- get bh at <- get bh return (IfaceFCall as at) 11 -> do aa <- get bh return (IfaceExt aa) 12 -> do ie <- get bh ico <- get bh return (IfaceCast ie ico) 13 -> do a <- get bh b <- get bh return (IfaceECase a b) 14 -> do r <- get bh return (IfaceLitRubbish TypeLike r) 15 -> do r <- get bh return (IfaceLitRubbish ConstraintLike r) _ -> panic ("get IfaceExpr " ++ show h) instance Binary IfaceTickish where put_ bh (IfaceHpcTick m ix) = do putByte bh 0 put_ bh m put_ bh ix put_ bh (IfaceSCC cc tick push) = do putByte bh 1 put_ bh cc put_ bh tick put_ bh push put_ bh (IfaceSource src name) = do putByte bh 2 put_ bh (srcSpanFile src) put_ bh (srcSpanStartLine src) put_ bh (srcSpanStartCol src) put_ bh (srcSpanEndLine src) put_ bh (srcSpanEndCol src) put_ bh name put_ bh (IfaceBreakpoint m ix fvs) = do putByte bh 3 put_ bh m put_ bh ix put_ bh fvs get bh = do h <- getByte bh case h of 0 -> do m <- get bh ix <- get bh return (IfaceHpcTick m ix) 1 -> do cc <- get bh tick <- get bh push <- get bh return (IfaceSCC cc tick push) 2 -> do file <- get bh sl <- get bh sc <- get bh el <- get bh ec <- get bh let start = mkRealSrcLoc file sl sc end = mkRealSrcLoc file el ec name <- get bh return (IfaceSource (mkRealSrcSpan start end) name) 3 -> do m <- get bh ix <- get bh fvs <- get bh return (IfaceBreakpoint m ix fvs) _ -> panic ("get IfaceTickish " ++ show h) instance Binary IfaceConAlt where put_ bh IfaceDefaultAlt = putByte bh 0 put_ bh (IfaceDataAlt aa) = putByte bh 1 >> put_ bh aa put_ bh (IfaceLitAlt ac) = putByte bh 2 >> put_ bh ac get bh = do h <- getByte bh case h of 0 -> return IfaceDefaultAlt 1 -> liftM IfaceDataAlt $ get bh _ -> liftM IfaceLitAlt $ get bh instance (Binary r, Binary b) => Binary (IfaceBindingX b r) where put_ bh (IfaceNonRec aa ab) = putByte bh 0 >> put_ bh aa >> put_ bh ab put_ bh (IfaceRec ac) = putByte bh 1 >> put_ bh ac get bh = do h <- getByte bh case h of 0 -> do { aa <- get bh; ab <- get bh; return (IfaceNonRec aa ab) } _ -> do { ac <- get bh; return (IfaceRec ac) } instance Binary IfaceLetBndr where put_ bh (IfLetBndr a b c d) = do put_ bh a put_ bh b put_ bh c put_ bh d get bh = do a <- get bh b <- get bh c <- get bh d <- get bh return (IfLetBndr a b c d) instance Binary IfaceTopBndrInfo where put_ bh (IfLclTopBndr lcl ty info dets) = do putByte bh 0 put_ bh lcl put_ bh ty put_ bh info put_ bh dets put_ bh (IfGblTopBndr gbl) = do putByte bh 1 put_ bh gbl get bh = do tag <- getByte bh case tag of 0 -> IfLclTopBndr <$> get bh <*> get bh <*> get bh <*> get bh 1 -> IfGblTopBndr <$> get bh _ -> pprPanic "IfaceTopBndrInfo" (intWithCommas tag) instance Binary IfaceMaybeRhs where put_ bh IfUseUnfoldingRhs = putByte bh 0 put_ bh (IfRhs e) = do putByte bh 1 put_ bh e get bh = do b <- getByte bh case b of 0 -> return IfUseUnfoldingRhs 1 -> IfRhs <$> get bh _ -> pprPanic "IfaceMaybeRhs" (intWithCommas b) instance Binary IfaceTyConParent where put_ bh IfNoParent = putByte bh 0 put_ bh (IfDataInstance ax pr ty) = do putByte bh 1 put_ bh ax put_ bh pr put_ bh ty get bh = do h <- getByte bh case h of 0 -> return IfNoParent _ -> do ax <- get bh pr <- get bh ty <- get bh return $ IfDataInstance ax pr ty instance Binary IfaceCompleteMatch where put_ bh (IfaceCompleteMatch cs mtc) = put_ bh cs >> put_ bh mtc get bh = IfaceCompleteMatch <$> get bh <*> get bh {- ************************************************************************ * * NFData instances See Note [Avoiding space leaks in toIface*] in GHC.CoreToIface * * ************************************************************************ -} instance NFData IfaceImport where rnf (IfaceImport a b) = rnf a `seq` rnf b instance NFData ImpIfaceList where rnf ImpIfaceAll = () rnf (ImpIfaceEverythingBut ns) = rnf ns rnf (ImpIfaceExplicit gre) = rnf gre instance NFData IfaceDecl where rnf = \case IfaceId f1 f2 f3 f4 -> rnf f1 `seq` rnf f2 `seq` rnf f3 `seq` rnf f4 IfaceData f1 f2 f3 f4 f5 f6 f7 f8 f9 -> f1 `seq` seqList f2 `seq` f3 `seq` f4 `seq` f5 `seq` rnf f6 `seq` rnf f7 `seq` rnf f8 `seq` rnf f9 IfaceSynonym f1 f2 f3 f4 f5 -> rnf f1 `seq` f2 `seq` seqList f3 `seq` rnf f4 `seq` rnf f5 IfaceFamily f1 f2 f3 f4 f5 f6 -> rnf f1 `seq` rnf f2 `seq` seqList f3 `seq` rnf f4 `seq` rnf f5 `seq` f6 `seq` () IfaceClass f1 f2 f3 f4 f5 -> rnf f1 `seq` f2 `seq` seqList f3 `seq` rnf f4 `seq` rnf f5 IfaceAxiom nm tycon role ax -> rnf nm `seq` rnf tycon `seq` role `seq` rnf ax IfacePatSyn f1 f2 f3 f4 f5 f6 f7 f8 f9 f10 f11 -> rnf f1 `seq` rnf f2 `seq` rnf f3 `seq` rnf f4 `seq` f5 `seq` f6 `seq` rnf f7 `seq` rnf f8 `seq` rnf f9 `seq` rnf f10 `seq` f11 `seq` () instance NFData IfaceAxBranch where rnf (IfaceAxBranch f1 f2 f3 f4 f5 f6 f7) = rnf f1 `seq` rnf f2 `seq` rnf f3 `seq` rnf f4 `seq` f5 `seq` rnf f6 `seq` rnf f7 instance NFData IfaceClassBody where rnf = \case IfAbstractClass -> () IfConcreteClass f1 f2 f3 f4 -> rnf f1 `seq` rnf f2 `seq` rnf f3 `seq` rnf f4 `seq` () instance NFData IfaceBooleanFormula where rnf = \case IfVar f1 -> rnf f1 IfAnd f1 -> rnf f1 IfOr f1 -> rnf f1 IfParens f1 -> rnf f1 instance NFData IfaceAT where rnf (IfaceAT f1 f2) = rnf f1 `seq` rnf f2 instance NFData IfaceClassOp where rnf (IfaceClassOp f1 f2 f3) = rnf f1 `seq` rnf f2 `seq` f3 `seq` () instance NFData IfaceTyConParent where rnf = \case IfNoParent -> () IfDataInstance f1 f2 f3 -> rnf f1 `seq` rnf f2 `seq` rnf f3 instance NFData IfaceConDecls where rnf = \case IfAbstractTyCon -> () IfDataTyCon _ f1 -> rnf f1 IfNewTyCon f1 -> rnf f1 instance NFData IfaceConDecl where rnf (IfCon f1 f2 f3 f4 f5 f6 f7 f8 f9 f10 f11) = rnf f1 `seq` rnf f2 `seq` rnf f3 `seq` rnf f4 `seq` f5 `seq` rnf f6 `seq` rnf f7 `seq` rnf f8 `seq` f9 `seq` rnf f10 `seq` rnf f11 instance NFData IfaceSrcBang where rnf (IfSrcBang f1 f2) = f1 `seq` f2 `seq` () instance NFData IfaceBang where rnf x = x `seq` () instance NFData IfaceIdDetails where rnf = \case IfVanillaId -> () IfWorkerLikeId dmds -> dmds `seqList` () IfRecSelId (Left tycon) b c d -> rnf tycon `seq` rnf b `seq` rnf c `seq` rnf d IfRecSelId (Right decl) b c d -> rnf decl `seq` rnf b `seq` rnf c `seq` rnf d IfDFunId -> () instance NFData IfaceInfoItem where rnf = \case HsArity a -> rnf a HsDmdSig str -> seqDmdSig str HsInline p -> p `seq` () -- TODO: seq further? HsUnfold b unf -> rnf b `seq` rnf unf HsNoCafRefs -> () HsCprSig cpr -> cpr `seq` () HsLFInfo lf_info -> lf_info `seq` () -- TODO: seq further? HsTagSig sig -> sig `seq` () instance NFData IfGuidance where rnf = \case IfNoGuidance -> () IfWhen a b c -> a `seq` b `seq` c `seq` () instance NFData IfaceUnfolding where rnf = \case IfCoreUnfold src cache guidance expr -> src `seq` cache `seq` rnf guidance `seq` rnf expr IfDFunUnfold bndrs exprs -> rnf bndrs `seq` rnf exprs -- See Note [UnfoldingCache] in GHC.Core for why it suffices to merely `seq` on cache instance NFData IfaceExpr where rnf = \case IfaceLcl nm -> rnf nm IfaceExt nm -> rnf nm IfaceType ty -> rnf ty IfaceCo co -> rnf co IfaceTuple sort exprs -> sort `seq` rnf exprs IfaceLam bndr expr -> rnf bndr `seq` rnf expr IfaceApp e1 e2 -> rnf e1 `seq` rnf e2 IfaceCase e nm alts -> rnf e `seq` nm `seq` rnf alts IfaceECase e ty -> rnf e `seq` rnf ty IfaceLet bind e -> rnf bind `seq` rnf e IfaceCast e co -> rnf e `seq` rnf co IfaceLit l -> l `seq` () -- FIXME IfaceLitRubbish tc r -> tc `seq` rnf r `seq` () IfaceFCall fc ty -> fc `seq` rnf ty IfaceTick tick e -> rnf tick `seq` rnf e instance NFData IfaceAlt where rnf (IfaceAlt con bndrs rhs) = rnf con `seq` rnf bndrs `seq` rnf rhs instance (NFData b, NFData a) => NFData (IfaceBindingX a b) where rnf = \case IfaceNonRec bndr e -> rnf bndr `seq` rnf e IfaceRec binds -> rnf binds instance NFData IfaceTopBndrInfo where rnf (IfGblTopBndr n) = n `seq` () rnf (IfLclTopBndr fs ty info dets) = rnf fs `seq` rnf ty `seq` rnf info `seq` rnf dets `seq` () instance NFData IfaceMaybeRhs where rnf IfUseUnfoldingRhs = () rnf (IfRhs ce) = rnf ce `seq` () instance NFData IfaceLetBndr where rnf (IfLetBndr nm ty id_info join_info) = rnf nm `seq` rnf ty `seq` rnf id_info `seq` rnf join_info instance NFData IfaceFamTyConFlav where rnf = \case IfaceDataFamilyTyCon -> () IfaceOpenSynFamilyTyCon -> () IfaceClosedSynFamilyTyCon f1 -> rnf f1 IfaceAbstractClosedSynFamilyTyCon -> () IfaceBuiltInSynFamTyCon -> () instance NFData IfaceTickish where rnf = \case IfaceHpcTick m i -> rnf m `seq` rnf i IfaceSCC cc b1 b2 -> cc `seq` rnf b1 `seq` rnf b2 IfaceSource src str -> src `seq` rnf str IfaceBreakpoint m i fvs -> rnf m `seq` rnf i `seq` rnf fvs instance NFData IfaceConAlt where rnf = \case IfaceDefaultAlt -> () IfaceDataAlt nm -> rnf nm IfaceLitAlt lit -> lit `seq` () instance NFData IfaceCompleteMatch where rnf (IfaceCompleteMatch f1 mtc) = rnf f1 `seq` rnf mtc instance NFData IfaceRule where rnf (IfaceRule f1 f2 f3 f4 f5 f6 f7 f8) = rnf f1 `seq` f2 `seq` rnf f3 `seq` rnf f4 `seq` rnf f5 `seq` rnf f6 `seq` rnf f7 `seq` f8 `seq` () instance NFData IfaceDefault where rnf (IfaceDefault f1 f2 f3) = rnf f1 `seq` rnf f2 `seq` rnf f3 instance NFData IfaceFamInst where rnf (IfaceFamInst f1 f2 f3 f4) = rnf f1 `seq` rnf f2 `seq` rnf f3 `seq` f4 `seq` () instance NFData IfaceClsInst where rnf (IfaceClsInst f1 f2 f3 f4 f5 f6) = f1 `seq` rnf f2 `seq` rnf f3 `seq` f4 `seq` f5 `seq` rnf f6 instance NFData IfaceWarnings where rnf = \case IfWarnAll txt -> rnf txt IfWarnSome vs ds -> rnf vs `seq` rnf ds instance NFData IfaceWarningTxt where rnf = \case IfWarningTxt f1 f2 f3 -> rnf f1 `seq` rnf f2 `seq` rnf f3 IfDeprecatedTxt f1 f2 -> rnf f1 `seq` rnf f2 instance NFData IfaceStringLiteral where rnf (IfStringLiteral f1 f2) = rnf f1 `seq` rnf f2 instance NFData IfaceAnnotation where rnf (IfaceAnnotation f1 f2) = f1 `seq` f2 `seq` () ghc-lib-parser-9.12.2.20250421/compiler/GHC/Iface/Type.hs0000644000000000000000000030106407346545000020176 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1993-1998 This module defines interface types and binders -} {-# LANGUAGE MultiWayIf #-} {-# LANGUAGE LambdaCase #-} module GHC.Iface.Type ( IfExtName, IfLclName(..), mkIfLclName, ifLclNameFS, IfaceType(..), IfacePredType, IfaceKind, IfaceCoercion(..), IfaceAxiomRule(..),IfaceMCoercion(..), IfaceMult, IfaceTyCon(..), IfaceTyConInfo(..), mkIfaceTyConInfo, IfaceTyConSort(..), IfaceTyLit(..), IfaceAppArgs(..), IfaceContext, IfaceBndr(..), IfaceOneShot(..), IfaceLamBndr, IfaceTvBndr, IfaceIdBndr, IfaceTyConBinder, IfaceForAllSpecBndr, IfaceForAllBndr, ForAllTyFlag(..), FunTyFlag(..), ShowForAllFlag(..), ShowSub(..), ShowHowMuch(..), AltPpr(..), mkIfaceForAllTvBndr, mkIfaceTyConKind, ifaceForAllSpecToBndrs, ifaceForAllSpecToBndr, ifForAllBndrVar, ifForAllBndrName, ifaceBndrName, ifTyConBinderVar, ifTyConBinderName, -- Binary utilities putIfaceType, getIfaceType, ifaceTypeSharedByte, -- Equality testing isIfaceLiftedTypeKind, -- Conversion from IfaceAppArgs to IfaceTypes/ForAllTyFlags appArgsIfaceTypes, appArgsIfaceTypesForAllTyFlags, -- Printing SuppressBndrSig(..), UseBndrParens(..), PrintExplicitKinds(..), pprIfaceType, pprParendIfaceType, pprPrecIfaceType, pprIfaceContext, pprIfaceContextArr, pprIfaceIdBndr, pprIfaceLamBndr, pprIfaceTvBndr, pprIfaceTyConBinders, pprIfaceBndrs, pprIfaceAppArgs, pprParendIfaceAppArgs, pprIfaceForAllPart, pprIfaceForAllPartMust, pprIfaceForAll, pprIfaceSigmaType, pprIfaceTyLit, pprIfaceCoercion, pprParendIfaceCoercion, splitIfaceSigmaTy, pprIfaceTypeApp, pprUserIfaceForAll, pprIfaceCoTcApp, pprTyTcApp, pprIfacePrefixApp, isIfaceRhoType, suppressIfaceInvisibles, stripIfaceInvisVars, stripInvisArgs, mkIfaceTySubst, substIfaceTyVar, substIfaceAppArgs, inDomIfaceTySubst, many_ty, pprTypeArrow ) where import GHC.Prelude import {-# SOURCE #-} GHC.Builtin.Types ( coercibleTyCon, heqTyCon , constraintKindTyConName , tupleTyConName , tupleDataConName , manyDataConTyCon , liftedRepTyCon, liftedDataConTyCon , sumTyCon ) import GHC.Core.Type ( isRuntimeRepTy, isMultiplicityTy, isLevityTy, funTyFlagTyCon ) import GHC.Core.TyCo.Rep( CoSel, UnivCoProvenance(..) ) import GHC.Core.TyCo.Compare( eqForAllVis ) import GHC.Core.TyCon hiding ( pprPromotionQuote ) import GHC.Core.Coercion.Axiom import GHC.Types.Var import GHC.Builtin.Names import {-# SOURCE #-} GHC.Builtin.Types ( liftedTypeKindTyConName ) import GHC.Types.Name import GHC.Types.Basic import GHC.Utils.Binary import GHC.Utils.Outputable import GHC.Data.FastString import GHC.Utils.Misc import GHC.Utils.Panic import {-# SOURCE #-} GHC.Tc.Utils.TcType ( isMetaTyVar, isTyConableTyVar ) import Data.Maybe (isJust) import Data.Proxy import qualified Data.Semigroup as Semi import Data.Word (Word8) import Control.Arrow (first) import Control.DeepSeq import Control.Monad ((<$!>)) {- ************************************************************************ * * Local (nested) binders * * ************************************************************************ -} -- | A local name in iface syntax newtype IfLclName = IfLclName { getIfLclName :: LexicalFastString } deriving (Eq, Ord, Show) ifLclNameFS :: IfLclName -> FastString ifLclNameFS = getLexicalFastString . getIfLclName mkIfLclName :: FastString -> IfLclName mkIfLclName = IfLclName . LexicalFastString type IfExtName = Name -- An External or WiredIn Name can appear in Iface syntax -- (However Internal or System Names never should) data IfaceBndr -- Local (non-top-level) binders = IfaceIdBndr {-# UNPACK #-} !IfaceIdBndr | IfaceTvBndr {-# UNPACK #-} !IfaceTvBndr deriving (Eq, Ord) type IfaceIdBndr = (IfaceType, IfLclName, IfaceType) type IfaceTvBndr = (IfLclName, IfaceKind) ifaceTvBndrName :: IfaceTvBndr -> IfLclName ifaceTvBndrName (n,_) = n ifaceIdBndrName :: IfaceIdBndr -> IfLclName ifaceIdBndrName (_,n,_) = n ifaceBndrName :: IfaceBndr -> IfLclName ifaceBndrName (IfaceTvBndr bndr) = ifaceTvBndrName bndr ifaceBndrName (IfaceIdBndr bndr) = ifaceIdBndrName bndr ifaceBndrType :: IfaceBndr -> IfaceType ifaceBndrType (IfaceIdBndr (_, _, t)) = t ifaceBndrType (IfaceTvBndr (_, t)) = t type IfaceLamBndr = (IfaceBndr, IfaceOneShot) data IfaceOneShot -- See Note [Preserve OneShotInfo] in "GHC.Core.Tidy" = IfaceNoOneShot -- and Note [oneShot magic] in "GHC.Types.Id.Make" | IfaceOneShot instance Outputable IfaceOneShot where ppr IfaceNoOneShot = text "NoOneShotInfo" ppr IfaceOneShot = text "OneShot" {- %************************************************************************ %* * IfaceType %* * %************************************************************************ -} ------------------------------- type IfaceKind = IfaceType -- | A kind of universal type, used for types and kinds. -- -- Any time a 'Type' is pretty-printed, it is first converted to an 'IfaceType' -- before being printed. See Note [Pretty printing via Iface syntax] in "GHC.Types.TyThing.Ppr" data IfaceType = IfaceFreeTyVar TyVar -- See Note [Free TyVars and CoVars in IfaceType] | IfaceTyVar IfLclName -- Type/coercion variable only, not tycon | IfaceLitTy IfaceTyLit | IfaceAppTy IfaceType IfaceAppArgs -- See Note [Suppressing invisible arguments] for -- an explanation of why the second field isn't -- IfaceType, analogous to AppTy. | IfaceFunTy FunTyFlag IfaceMult IfaceType IfaceType | IfaceForAllTy IfaceForAllBndr IfaceType | IfaceTyConApp IfaceTyCon IfaceAppArgs -- Not necessarily saturated -- Includes newtypes, synonyms, tuples | IfaceCastTy IfaceType IfaceCoercion | IfaceCoercionTy IfaceCoercion | IfaceTupleTy -- Saturated tuples (unsaturated ones use IfaceTyConApp) TupleSort -- What sort of tuple? PromotionFlag -- A bit like IfaceTyCon IfaceAppArgs -- arity = length args -- For promoted data cons, the kind args are omitted -- Why have this? Only for efficiency: IfaceTupleTy can omit the -- type arguments, as they can be recreated when deserializing. -- In an experiment, removing IfaceTupleTy resulted in a 0.75% regression -- in interface file size (in GHC's boot libraries). -- See !3987. deriving (Eq, Ord) -- See Note [Ord instance of IfaceType] {- Note [Ord instance of IfaceType] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We need an 'Ord' instance to have a 'Map' keyed by 'IfaceType'. This 'Map' is required for implementing the deduplication table during interface file serialisation. See Note [Deduplication during iface binary serialisation] for the implementation details. We experimented with a 'TrieMap' based implementation, but it seems to be slower than using a straight-forward 'Map IfaceType'. The experiments loaded the full agda library into a ghci session with the following scenarios: * normal: a plain ghci session. * cold: a ghci session that uses '-fwrite-if-simplified-core -fforce-recomp', forcing a cold-cache. * warm: a subsequent ghci session that uses a warm cache for '-fwrite-if-simplified-core', e.g. nothing needs to be recompiled. The implementation was up to 5% slower in some execution runs. However, on 'lib:Cabal', the performance difference between 'Map IfaceType' and 'TrieMap IfaceType' was negligible. We share our implementation of the 'TrieMap' in the ticket #24816, so that further performance analysis and improvements don't need to start from scratch. -} type IfaceMult = IfaceType type IfacePredType = IfaceType type IfaceContext = [IfacePredType] data IfaceTyLit = IfaceNumTyLit Integer | IfaceStrTyLit LexicalFastString | IfaceCharTyLit Char deriving (Eq, Ord) type IfaceTyConBinder = VarBndr IfaceBndr TyConBndrVis type IfaceForAllBndr = VarBndr IfaceBndr ForAllTyFlag type IfaceForAllSpecBndr = VarBndr IfaceBndr Specificity -- | Make an 'IfaceForAllBndr' from an 'IfaceTvBndr'. mkIfaceForAllTvBndr :: ForAllTyFlag -> IfaceTvBndr -> IfaceForAllBndr mkIfaceForAllTvBndr vis var = Bndr (IfaceTvBndr var) vis -- | Build the 'tyConKind' from the binders and the result kind. -- Keep in sync with 'mkTyConKind' in "GHC.Core.TyCon". mkIfaceTyConKind :: [IfaceTyConBinder] -> IfaceKind -> IfaceKind mkIfaceTyConKind bndrs res_kind = foldr mk res_kind bndrs where mk :: IfaceTyConBinder -> IfaceKind -> IfaceKind mk (Bndr tv AnonTCB) k = IfaceFunTy FTF_T_T many_ty (ifaceBndrType tv) k mk (Bndr tv (NamedTCB vis)) k = IfaceForAllTy (Bndr tv vis) k ifaceForAllSpecToBndrs :: [IfaceForAllSpecBndr] -> [IfaceForAllBndr] ifaceForAllSpecToBndrs = map ifaceForAllSpecToBndr ifaceForAllSpecToBndr :: IfaceForAllSpecBndr -> IfaceForAllBndr ifaceForAllSpecToBndr (Bndr tv spec) = Bndr tv (Invisible spec) -- | Stores the arguments in a type application as a list. -- See @Note [Suppressing invisible arguments]@. data IfaceAppArgs = IA_Nil | IA_Arg IfaceType -- The type argument ForAllTyFlag -- The argument's visibility. We store this here so -- that we can: -- -- 1. Avoid pretty-printing invisible (i.e., specified -- or inferred) arguments when -- -fprint-explicit-kinds isn't enabled, or -- 2. When -fprint-explicit-kinds *is*, enabled, print -- specified arguments in @(...) and inferred -- arguments in @{...}. IfaceAppArgs -- The rest of the arguments deriving (Eq, Ord) instance Semi.Semigroup IfaceAppArgs where IA_Nil <> xs = xs IA_Arg ty argf rest <> xs = IA_Arg ty argf (rest Semi.<> xs) instance Monoid IfaceAppArgs where mempty = IA_Nil mappend = (Semi.<>) -- Encodes type constructors, kind constructors, -- coercion constructors, the lot. -- We have to tag them in order to pretty print them -- properly. data IfaceTyCon = IfaceTyCon { ifaceTyConName :: IfExtName , ifaceTyConInfo :: !IfaceTyConInfo -- ^ We add a bang to this field as heap analysis -- showed that this constructor retains a thunk to -- a value that is usually shared. -- -- See !12200 for how this bang saved ~10% residency -- when loading 'mi_extra_decls' on the agda -- code base. -- -- See Note [Sharing IfaceTyConInfo] for why -- sharing is so important for 'IfaceTyConInfo'. } deriving (Eq, Ord) -- | The various types of TyCons which have special, built-in syntax. data IfaceTyConSort = IfaceNormalTyCon -- ^ a regular tycon | IfaceTupleTyCon !Arity !TupleSort -- ^ a tuple, e.g. @(a, b, c)@ or @(#a, b, c#)@. -- The arity is the tuple width, not the tycon arity -- (which is twice the width in the case of unboxed -- tuples). | IfaceSumTyCon !Arity -- ^ an unboxed sum, e.g. @(# a | b | c #)@ | IfaceEqualityTyCon -- ^ A heterogeneous equality TyCon -- (i.e. eqPrimTyCon, eqReprPrimTyCon, heqTyCon) -- that is actually being applied to two types -- of the same kind. This affects pretty-printing -- only: see Note [Equality predicates in IfaceType] deriving (Eq, Ord) instance Outputable IfaceTyConSort where ppr IfaceNormalTyCon = text "normal" ppr (IfaceTupleTyCon n sort) = ppr sort <> colon <> ppr n ppr (IfaceSumTyCon n) = text "sum:" <> ppr n ppr IfaceEqualityTyCon = text "equality" {- Note [Free TyVars and CoVars in IfaceType] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Nowadays (since Nov 16, 2016) we pretty-print a Type by converting to an IfaceType and pretty printing that. This eliminates a lot of pretty-print duplication, and it matches what we do with pretty- printing TyThings. See Note [Pretty printing via Iface syntax] in GHC.Types.TyThing.Ppr. It works fine for closed types, but when printing debug traces (e.g. when using -ddump-tc-trace) we print a lot of /open/ types. These types are full of TcTyVars, and it's absolutely crucial to print them in their full glory, with their unique, TcTyVarDetails etc. So we simply embed a TyVar in IfaceType with the IfaceFreeTyVar constructor. Note that: * We never expect to serialise an IfaceFreeTyVar into an interface file, nor to deserialise one. IfaceFreeTyVar is used only in the "convert to IfaceType and then pretty-print" pipeline. We do the same for covars, naturally. Note [Equality predicates in IfaceType] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ GHC has several varieties of type equality (see Note [The equality types story] in GHC.Builtin.Types.Prim for details). In an effort to avoid confusing users, we suppress the differences during pretty printing unless certain flags are enabled. Here is how each equality predicate* is printed in homogeneous and heterogeneous contexts, depending on which combination of the -fprint-explicit-kinds and -fprint-equality-relations flags is used: -------------------------------------------------------------------------------------------- | Predicate | Neither flag | -fprint-explicit-kinds | |-------------------------------|----------------------------|-----------------------------| | a ~ b (homogeneous) | a ~ b | (a :: Type) ~ (b :: Type) | | a ~~ b, homogeneously | a ~ b | (a :: Type) ~ (b :: Type) | | a ~~ b, heterogeneously | a ~~ c | (a :: Type) ~~ (c :: k) | | a ~# b, homogeneously | a ~ b | (a :: Type) ~ (b :: Type) | | a ~# b, heterogeneously | a ~~ c | (a :: Type) ~~ (c :: k) | | Coercible a b (homogeneous) | Coercible a b | Coercible @Type a b | | a ~R# b, homogeneously | Coercible a b | Coercible @Type a b | | a ~R# b, heterogeneously | a ~R# b | (a :: Type) ~R# (c :: k) | |-------------------------------|----------------------------|-----------------------------| | Predicate | -fprint-equality-relations | Both flags | |-------------------------------|----------------------------|-----------------------------| | a ~ b (homogeneous) | a ~ b | (a :: Type) ~ (b :: Type) | | a ~~ b, homogeneously | a ~~ b | (a :: Type) ~~ (b :: Type) | | a ~~ b, heterogeneously | a ~~ c | (a :: Type) ~~ (c :: k) | | a ~# b, homogeneously | a ~# b | (a :: Type) ~# (b :: Type) | | a ~# b, heterogeneously | a ~# c | (a :: Type) ~# (c :: k) | | Coercible a b (homogeneous) | Coercible a b | Coercible @Type a b | | a ~R# b, homogeneously | a ~R# b | (a :: Type) ~R# (b :: Type) | | a ~R# b, heterogeneously | a ~R# b | (a :: Type) ~R# (c :: k) | -------------------------------------------------------------------------------------------- (* There is no heterogeneous, representational, lifted equality counterpart to (~~). There could be, but there seems to be no use for it.) This table adheres to the following rules: A. With -fprint-equality-relations, print the true equality relation. B. Without -fprint-equality-relations: i. If the equality is representational and homogeneous, use Coercible. ii. Otherwise, if the equality is representational, use ~R#. iii. If the equality is nominal and homogeneous, use ~. iv. Otherwise, if the equality is nominal, use ~~. C. With -fprint-explicit-kinds, print kinds on both sides of an infix operator, as above; or print the kind with Coercible. D. Without -fprint-explicit-kinds, don't print kinds. A hetero-kinded equality is used homogeneously when it is applied to two identical kinds. Unfortunately, determining this from an IfaceType isn't possible since we can't see through type synonyms. Consequently, we need to record whether this particular application is homogeneous in IfaceTyConSort for the purposes of pretty-printing. See Note [The equality types story] in GHC.Builtin.Types.Prim. -} data IfaceTyConInfo -- Used only to guide pretty-printing = IfaceTyConInfo { ifaceTyConIsPromoted :: PromotionFlag -- A PromotionFlag value of IsPromoted indicates -- that the type constructor came from a data -- constructor promoted by -XDataKinds, and thus -- should be printed as 'D to distinguish it from -- an existing type constructor D. , ifaceTyConSort :: IfaceTyConSort } deriving (Eq, Ord) -- | This smart constructor allows sharing of the two most common -- cases. See Note [Sharing IfaceTyConInfo] mkIfaceTyConInfo :: PromotionFlag -> IfaceTyConSort -> IfaceTyConInfo mkIfaceTyConInfo IsPromoted IfaceNormalTyCon = promotedNormalTyConInfo mkIfaceTyConInfo NotPromoted IfaceNormalTyCon = notPromotedNormalTyConInfo mkIfaceTyConInfo prom sort = IfaceTyConInfo prom sort {-# NOINLINE promotedNormalTyConInfo #-} -- | See Note [Sharing IfaceTyConInfo] promotedNormalTyConInfo :: IfaceTyConInfo promotedNormalTyConInfo = IfaceTyConInfo IsPromoted IfaceNormalTyCon {-# NOINLINE notPromotedNormalTyConInfo #-} -- | See Note [Sharing IfaceTyConInfo] notPromotedNormalTyConInfo :: IfaceTyConInfo notPromotedNormalTyConInfo = IfaceTyConInfo NotPromoted IfaceNormalTyCon {- Note [Sharing IfaceTyConInfo] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 'IfaceTyConInfo' occurs an awful lot in 'ModIface', see #19194 for an example. But almost all of them are IfaceTyConInfo IsPromoted IfaceNormalTyCon IfaceTyConInfo NotPromoted IfaceNormalTyCon. The smart constructor `mkIfaceTyConInfo` arranges to share these instances, thus: promotedNormalTyConInfo = IfaceTyConInfo IsPromoted IfaceNormalTyCon notPromotedNormalTyConInfo = IfaceTyConInfo NotPromoted IfaceNormalTyCon mkIfaceTyConInfo IsPromoted IfaceNormalTyCon = promotedNormalTyConInfo mkIfaceTyConInfo NotPromoted IfaceNormalTyCon = notPromotedNormalTyConInfo mkIfaceTyConInfo prom sort = IfaceTyConInfo prom sort But ALAS, the (nested) CPR transform can lose this sharing, completely negating the effect of `mkIfaceTyConInfo`: see #24530 and #19326. Sticking-plaster solution: add a NOINLINE pragma to those top-level constants. When we fix the CPR bug we can remove the NOINLINE pragmas. This one change leads to an 15% reduction in residency for GHC when embedding 'mi_extra_decls': see !12222. -} data IfaceMCoercion = IfaceMRefl | IfaceMCo IfaceCoercion deriving (Eq, Ord) data IfaceCoercion = IfaceReflCo IfaceType | IfaceGReflCo Role IfaceType (IfaceMCoercion) | IfaceFunCo Role IfaceCoercion IfaceCoercion IfaceCoercion | IfaceTyConAppCo Role IfaceTyCon [IfaceCoercion] | IfaceAppCo IfaceCoercion IfaceCoercion | IfaceForAllCo IfaceBndr !ForAllTyFlag !ForAllTyFlag IfaceCoercion IfaceCoercion | IfaceCoVarCo IfLclName | IfaceAxiomCo IfaceAxiomRule [IfaceCoercion] -- ^ There are only a fixed number of CoAxiomRules, so it suffices -- to use an IfaceLclName to distinguish them. -- See Note [Adding built-in type families] in GHC.Builtin.Types.Literals | IfaceUnivCo UnivCoProvenance Role IfaceType IfaceType [IfaceCoercion] | IfaceSymCo IfaceCoercion | IfaceTransCo IfaceCoercion IfaceCoercion | IfaceSelCo CoSel IfaceCoercion | IfaceLRCo LeftOrRight IfaceCoercion | IfaceInstCo IfaceCoercion IfaceCoercion | IfaceKindCo IfaceCoercion | IfaceSubCo IfaceCoercion | IfaceFreeCoVar CoVar -- ^ See Note [Free TyVars and CoVars in IfaceType] | IfaceHoleCo CoVar -- ^ See Note [Holes in IfaceCoercion] deriving (Eq, Ord) -- Why Ord? See Note [Ord instance of IfaceType] data IfaceAxiomRule = IfaceAR_X IfLclName -- Built-in | IfaceAR_B IfExtName BranchIndex -- Branched | IfaceAR_U IfExtName -- Unbranched deriving (Eq, Ord) {- Note [Holes in IfaceCoercion] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When typechecking fails the typechecker will produce a HoleCo to stand in place of the unproven assertion. While we generally don't want to let these unproven assertions leak into interface files, we still need to be able to pretty-print them as we use IfaceType's pretty-printer to render Types. For this reason IfaceCoercion has a IfaceHoleCo constructor; however, we fails when asked to serialize to a IfaceHoleCo to ensure that they don't end up in an interface file. %************************************************************************ %* * Functions over IfaceTypes * * ************************************************************************ -} ifaceTyConHasKey :: IfaceTyCon -> Unique -> Bool ifaceTyConHasKey tc key = ifaceTyConName tc `hasKey` key -- | Returns true for Type or (TYPE LiftedRep) isIfaceLiftedTypeKind :: IfaceKind -> Bool isIfaceLiftedTypeKind (IfaceTyConApp tc args) | tc `ifaceTyConHasKey` liftedTypeKindTyConKey , IA_Nil <- args = True -- Type | tc `ifaceTyConHasKey` tYPETyConKey , IA_Arg arg1 Required IA_Nil <- args , isIfaceLiftedRep arg1 = True -- TYPE Lifted isIfaceLiftedTypeKind _ = False -- | Returns true for Constraint or (CONSTRAINT LiftedRep) isIfaceConstraintKind :: IfaceKind -> Bool isIfaceConstraintKind (IfaceTyConApp tc args) | tc `ifaceTyConHasKey` constraintKindTyConKey , IA_Nil <- args = True -- Type | tc `ifaceTyConHasKey` cONSTRAINTTyConKey , IA_Arg arg1 Required IA_Nil <- args , isIfaceLiftedRep arg1 = True -- TYPE Lifted isIfaceConstraintKind _ = False isIfaceLiftedRep :: IfaceKind -> Bool -- Returns true for LiftedRep, or BoxedRep Lifted isIfaceLiftedRep (IfaceTyConApp tc args) | tc `ifaceTyConHasKey` liftedRepTyConKey , IA_Nil <- args = True -- LiftedRep | tc `ifaceTyConHasKey` boxedRepDataConKey , IA_Arg arg1 Required IA_Nil <- args , isIfaceLifted arg1 = True -- TYPE Lifted isIfaceLiftedRep _ = False isIfaceLifted :: IfaceKind -> Bool -- Returns true for Lifted isIfaceLifted (IfaceTyConApp tc args) | tc `ifaceTyConHasKey` liftedDataConKey , IA_Nil <- args = True isIfaceLifted _ = False splitIfaceSigmaTy :: IfaceType -> ([IfaceForAllBndr], [IfacePredType], IfaceType) -- Mainly for printing purposes -- -- Here we split nested IfaceSigmaTy properly. -- -- @ -- forall t. T t => forall m a b. M m => (a -> m b) -> t a -> m (t b) -- @ -- -- If you called @splitIfaceSigmaTy@ on this type: -- -- @ -- ([t, m, a, b], [T t, M m], (a -> m b) -> t a -> m (t b)) -- @ splitIfaceSigmaTy ty = case (bndrs, theta) of ([], []) -> (bndrs, theta, tau) _ -> let (bndrs', theta', tau') = splitIfaceSigmaTy tau in (bndrs ++ bndrs', theta ++ theta', tau') where (bndrs, rho) = split_foralls ty (theta, tau) = split_rho rho split_foralls (IfaceForAllTy bndr ty) | isInvisibleForAllTyFlag (binderFlag bndr) = case split_foralls ty of { (bndrs, rho) -> (bndr:bndrs, rho) } split_foralls rho = ([], rho) split_rho (IfaceFunTy af _ ty1 ty2) | isInvisibleFunArg af = case split_rho ty2 of { (ps, tau) -> (ty1:ps, tau) } split_rho tau = ([], tau) splitIfaceReqForallTy :: IfaceType -> ([IfaceForAllBndr], IfaceType) splitIfaceReqForallTy (IfaceForAllTy bndr ty) | isVisibleForAllTyFlag (binderFlag bndr) = case splitIfaceReqForallTy ty of { (bndrs, rho) -> (bndr:bndrs, rho) } splitIfaceReqForallTy rho = ([], rho) suppressIfaceInvisibles :: PrintExplicitKinds -> [IfaceTyConBinder] -> [a] -> [a] suppressIfaceInvisibles (PrintExplicitKinds True) _tys xs = xs suppressIfaceInvisibles (PrintExplicitKinds False) tys xs = suppress tys xs where suppress _ [] = [] suppress [] a = a suppress (k:ks) (x:xs) | isInvisibleTyConBinder k = suppress ks xs | otherwise = x : suppress ks xs stripIfaceInvisVars :: PrintExplicitKinds -> [IfaceTyConBinder] -> [IfaceTyConBinder] stripIfaceInvisVars (PrintExplicitKinds True) tyvars = tyvars stripIfaceInvisVars (PrintExplicitKinds False) tyvars = filterOut isInvisibleTyConBinder tyvars -- | Extract an 'IfaceBndr' from an 'IfaceForAllBndr'. ifForAllBndrVar :: IfaceForAllBndr -> IfaceBndr ifForAllBndrVar = binderVar -- | Extract the variable name from an 'IfaceForAllBndr'. ifForAllBndrName :: IfaceForAllBndr -> IfLclName ifForAllBndrName fab = ifaceBndrName (ifForAllBndrVar fab) -- | Extract an 'IfaceBndr' from an 'IfaceTyConBinder'. ifTyConBinderVar :: IfaceTyConBinder -> IfaceBndr ifTyConBinderVar = binderVar -- | Extract the variable name from an 'IfaceTyConBinder'. ifTyConBinderName :: IfaceTyConBinder -> IfLclName ifTyConBinderName tcb = ifaceBndrName (ifTyConBinderVar tcb) ifTypeIsVarFree :: IfaceType -> Bool -- Returns True if the type definitely has no variables at all -- Just used to control pretty printing ifTypeIsVarFree ty = go ty where go (IfaceTyVar {}) = False go (IfaceFreeTyVar {}) = False go (IfaceAppTy fun args) = go fun && go_args args go (IfaceFunTy _ w arg res) = go w && go arg && go res go (IfaceForAllTy {}) = False go (IfaceTyConApp _ args) = go_args args go (IfaceTupleTy _ _ args) = go_args args go (IfaceLitTy _) = True go (IfaceCastTy {}) = False -- Safe go (IfaceCoercionTy {}) = False -- Safe go_args IA_Nil = True go_args (IA_Arg arg _ args) = go arg && go_args args {- Note [Substitution on IfaceType] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Substitutions on IfaceType are done only during pretty-printing to construct the result type of a GADT, and does not deal with binders (eg IfaceForAll), so it doesn't need fancy capture stuff. -} type IfaceTySubst = FastStringEnv IfaceType -- Note [Substitution on IfaceType] mkIfaceTySubst :: [(IfLclName,IfaceType)] -> IfaceTySubst -- See Note [Substitution on IfaceType] mkIfaceTySubst eq_spec = mkFsEnv (map (first ifLclNameFS) eq_spec) inDomIfaceTySubst :: IfaceTySubst -> IfaceTvBndr -> Bool -- See Note [Substitution on IfaceType] inDomIfaceTySubst subst (fs, _) = isJust (lookupFsEnv subst (ifLclNameFS fs)) substIfaceType :: IfaceTySubst -> IfaceType -> IfaceType -- See Note [Substitution on IfaceType] substIfaceType env ty = go ty where go (IfaceFreeTyVar tv) = IfaceFreeTyVar tv go (IfaceTyVar tv) = substIfaceTyVar env tv go (IfaceAppTy t ts) = IfaceAppTy (go t) (substIfaceAppArgs env ts) go (IfaceFunTy af w t1 t2) = IfaceFunTy af (go w) (go t1) (go t2) go ty@(IfaceLitTy {}) = ty go (IfaceTyConApp tc tys) = IfaceTyConApp tc (substIfaceAppArgs env tys) go (IfaceTupleTy s i tys) = IfaceTupleTy s i (substIfaceAppArgs env tys) go (IfaceForAllTy {}) = pprPanic "substIfaceType" (ppr ty) go (IfaceCastTy ty co) = IfaceCastTy (go ty) (go_co co) go (IfaceCoercionTy co) = IfaceCoercionTy (go_co co) go_mco IfaceMRefl = IfaceMRefl go_mco (IfaceMCo co) = IfaceMCo $ go_co co go_co (IfaceReflCo ty) = IfaceReflCo (go ty) go_co (IfaceGReflCo r ty mco) = IfaceGReflCo r (go ty) (go_mco mco) go_co (IfaceFunCo r w c1 c2) = IfaceFunCo r (go_co w) (go_co c1) (go_co c2) go_co (IfaceTyConAppCo r tc cos) = IfaceTyConAppCo r tc (go_cos cos) go_co (IfaceAppCo c1 c2) = IfaceAppCo (go_co c1) (go_co c2) go_co (IfaceForAllCo {}) = pprPanic "substIfaceCoercion" (ppr ty) go_co (IfaceFreeCoVar cv) = IfaceFreeCoVar cv go_co (IfaceCoVarCo cv) = IfaceCoVarCo cv go_co (IfaceHoleCo cv) = IfaceHoleCo cv go_co (IfaceUnivCo p r t1 t2 ds) = IfaceUnivCo p r (go t1) (go t2) (go_cos ds) go_co (IfaceSymCo co) = IfaceSymCo (go_co co) go_co (IfaceTransCo co1 co2) = IfaceTransCo (go_co co1) (go_co co2) go_co (IfaceSelCo n co) = IfaceSelCo n (go_co co) go_co (IfaceLRCo lr co) = IfaceLRCo lr (go_co co) go_co (IfaceInstCo c1 c2) = IfaceInstCo (go_co c1) (go_co c2) go_co (IfaceKindCo co) = IfaceKindCo (go_co co) go_co (IfaceSubCo co) = IfaceSubCo (go_co co) go_co (IfaceAxiomCo n cos) = IfaceAxiomCo n (go_cos cos) go_cos = map go_co substIfaceAppArgs :: IfaceTySubst -> IfaceAppArgs -> IfaceAppArgs substIfaceAppArgs env args = go args where go IA_Nil = IA_Nil go (IA_Arg ty arg tys) = IA_Arg (substIfaceType env ty) arg (go tys) substIfaceTyVar :: IfaceTySubst -> IfLclName -> IfaceType substIfaceTyVar env tv | Just ty <- lookupFsEnv env (ifLclNameFS tv) = ty | otherwise = IfaceTyVar tv {- ************************************************************************ * * Functions over IfaceAppArgs * * ************************************************************************ -} stripInvisArgs :: PrintExplicitKinds -> IfaceAppArgs -> IfaceAppArgs stripInvisArgs (PrintExplicitKinds True) tys = tys stripInvisArgs (PrintExplicitKinds False) tys = suppress_invis tys where suppress_invis c = case c of IA_Nil -> IA_Nil IA_Arg t argf ts | isVisibleForAllTyFlag argf -> IA_Arg t argf $ suppress_invis ts -- Keep recursing through the remainder of the arguments, as it's -- possible that there are remaining invisible ones. -- See the "In type declarations" section of Note [VarBndrs, -- ForAllTyBinders, TyConBinders, and visibility] in GHC.Core.TyCo.Rep. | otherwise -> suppress_invis ts appArgsIfaceTypes :: IfaceAppArgs -> [IfaceType] appArgsIfaceTypes IA_Nil = [] appArgsIfaceTypes (IA_Arg t _ ts) = t : appArgsIfaceTypes ts appArgsIfaceTypesForAllTyFlags :: IfaceAppArgs -> [(IfaceType, ForAllTyFlag)] appArgsIfaceTypesForAllTyFlags IA_Nil = [] appArgsIfaceTypesForAllTyFlags (IA_Arg t a ts) = (t, a) : appArgsIfaceTypesForAllTyFlags ts ifaceVisAppArgsLength :: IfaceAppArgs -> Int ifaceVisAppArgsLength = go 0 where go !n IA_Nil = n go n (IA_Arg _ argf rest) | isVisibleForAllTyFlag argf = go (n+1) rest | otherwise = go n rest ifaceAppArgsLength :: IfaceAppArgs -> Int ifaceAppArgsLength = go 0 where go !n IA_Nil = n go !n (IA_Arg _ _ ts) = go (n + 1) ts {- Note [Suppressing invisible arguments] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We use the IfaceAppArgs data type to specify which of the arguments to a type should be displayed when pretty-printing, under the control of -fprint-explicit-kinds. See also Type.filterOutInvisibleTypes. For example, given T :: forall k. (k->*) -> k -> * -- Ordinary kind polymorphism 'Just :: forall k. k -> 'Maybe k -- Promoted we want T * Tree Int prints as T Tree Int 'Just * prints as Just * For type constructors (IfaceTyConApp), IfaceAppArgs is a quite natural fit, since the corresponding Core constructor: data Type = ... | TyConApp TyCon [Type] Already puts all of its arguments into a list. So when converting a Type to an IfaceType (see toIfaceAppArgsX in GHC.Core.ToIface), we simply use the kind of the TyCon (which is cached) to guide the process of converting the argument Types into an IfaceAppArgs list. We also want this behavior for IfaceAppTy, since given: data Proxy (a :: k) f :: forall (t :: forall a. a -> Type). Proxy Type (t Bool True) We want to print the return type as `Proxy (t True)` without the use of -fprint-explicit-kinds (#15330). Accomplishing this is trickier than in the tycon case, because the corresponding Core constructor for IfaceAppTy: data Type = ... | AppTy Type Type Only stores one argument at a time. Therefore, when converting an AppTy to an IfaceAppTy (in toIfaceTypeX in GHC.CoreToIface), we: 1. Flatten the chain of AppTys down as much as possible 2. Use typeKind to determine the function Type's kind 3. Use this kind to guide the process of converting the argument Types into an IfaceAppArgs list. By flattening the arguments like this, we obtain two benefits: (a) We can reuse the same machinery to pretty-print IfaceTyConApp arguments as we do IfaceTyApp arguments, which means that we only need to implement the logic to filter out invisible arguments once. (b) Unlike for tycons, finding the kind of a type in general (through typeKind) is not a constant-time operation, so by flattening the arguments first, we decrease the number of times we have to call typeKind. Note [Pretty-printing invisible arguments] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Note [Suppressing invisible arguments] is all about how to avoid printing invisible arguments when the -fprint-explicit-kinds flag is disables. Well, what about when it's enabled? Then we can and should print invisible kind arguments, and this Note explains how we do it. As two running examples, consider the following code: {-# LANGUAGE PolyKinds #-} data T1 a data T2 (a :: k) When displaying these types (with -fprint-explicit-kinds on), we could just do the following: T1 k a T2 k a That certainly gets the job done. But it lacks a crucial piece of information: is the `k` argument inferred or specified? To communicate this, we use visible kind application syntax to distinguish the two cases: T1 @{k} a T2 @k a Here, @{k} indicates that `k` is an inferred argument, and @k indicates that `k` is a specified argument. (See Note [VarBndrs, ForAllTyBinders, TyConBinders, and visibility] in GHC.Core.TyCo.Rep for a lengthier explanation on what "inferred" and "specified" mean.) ************************************************************************ * * Pretty-printing * * ************************************************************************ -} if_print_coercions :: SDoc -- ^ if printing coercions -> SDoc -- ^ otherwise -> SDoc if_print_coercions yes no = sdocOption sdocPrintExplicitCoercions $ \print_co -> getPprStyle $ \style -> getPprDebug $ \debug -> if print_co || dumpStyle style || debug then yes else no pprIfaceInfixApp :: PprPrec -> SDoc -> SDoc -> SDoc -> SDoc pprIfaceInfixApp ctxt_prec pp_tc pp_ty1 pp_ty2 = maybeParen ctxt_prec opPrec $ sep [pp_ty1, pp_tc <+> pp_ty2] pprIfacePrefixApp :: PprPrec -> SDoc -> [SDoc] -> SDoc pprIfacePrefixApp ctxt_prec pp_fun pp_tys | null pp_tys = pp_fun | otherwise = maybeParen ctxt_prec appPrec $ hang pp_fun 2 (sep pp_tys) isIfaceRhoType :: IfaceType -> Bool isIfaceRhoType (IfaceForAllTy _ _) = False isIfaceRhoType (IfaceFunTy af _ _ _) = isVisibleFunArg af isIfaceRhoType _ = True -- ----------------------------- Printing binders ------------------------------------ instance Outputable IfaceBndr where ppr (IfaceIdBndr bndr) = pprIfaceIdBndr bndr ppr (IfaceTvBndr bndr) = char '@' <> pprIfaceTvBndr bndr (SuppressBndrSig False) (UseBndrParens False) pprIfaceBndrs :: [IfaceBndr] -> SDoc pprIfaceBndrs bs = sep (map ppr bs) pprIfaceLamBndr :: IfaceLamBndr -> SDoc pprIfaceLamBndr (b, IfaceNoOneShot) = ppr b pprIfaceLamBndr (b, IfaceOneShot) = ppr b <> text "[OneShot]" pprIfaceIdBndr :: IfaceIdBndr -> SDoc pprIfaceIdBndr (w, name, ty) = parens (ppr name <> brackets (ppr_ty_nested w) <+> dcolon <+> ppr_ty_nested ty) {- Note [Suppressing binder signatures] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When printing the binders in a 'forall', we want to keep the kind annotations: forall (a :: k). blah ^^^^ good On the other hand, when we print the binders of a data declaration in :info, the kind information would be redundant due to the standalone kind signature: type F :: Symbol -> Type type F (s :: Symbol) = blah ^^^^^^^^^ redundant Here we'd like to omit the kind annotation: type F :: Symbol -> Type type F s = blah Note [Printing type abbreviations] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Normally, we pretty-print `TYPE 'LiftedRep` as `Type` (or `*`) `CONSTRAINT 'LiftedRep` as `Constraint` `FUN 'Many` as `(->)` This way, error messages don't refer to representation polymorphism or linearity if it is not necessary. Normally we'd would represent these types using their synonyms (see GHC.Core.Type Note [Using synonyms to compress types]), but in the :kind! GHCi command we specifically expand synonyms (see GHC.Tc.Module.tcRnExpr). So here in the pretty-printing we effectively collapse back Type and Constraint to their synonym forms. A bit confusing! However, when printing the definition of Type, Constraint or (->) with :info, this would give confusing output: `type (->) = (->)` (#18594). Solution: detect when we are in :info and disable displaying the synonym with the SDoc option sdocPrintTypeAbbreviations. If you are creating a similar synonym, make sure it is listed in pprIfaceDecl, see reference to this Note. If there will be a need, in the future we could expose it as a flag -fprint-type-abbreviations or even three separate flags controlling TYPE 'LiftedRep, CONSTRAINT 'LiftedRep and FUN 'Many. -} -- | Do we want to suppress kind annotations on binders? -- See Note [Suppressing binder signatures] newtype SuppressBndrSig = SuppressBndrSig Bool newtype UseBndrParens = UseBndrParens Bool newtype PrintExplicitKinds = PrintExplicitKinds Bool pprIfaceTvBndr :: IfaceTvBndr -> SuppressBndrSig -> UseBndrParens -> SDoc pprIfaceTvBndr (tv, ki) (SuppressBndrSig suppress_sig) (UseBndrParens use_parens) | suppress_sig = ppr tv | isIfaceLiftedTypeKind ki = ppr tv | otherwise = maybe_parens (ppr tv <+> dcolon <+> ppr_ty_nested ki) where maybe_parens | use_parens = parens | otherwise = id pprIfaceTyConBinders :: SuppressBndrSig -> [IfaceTyConBinder] -> SDoc pprIfaceTyConBinders suppress_sig = sep . map go where go :: IfaceTyConBinder -> SDoc go (Bndr (IfaceIdBndr bndr) _) = pprIfaceIdBndr bndr go (Bndr (IfaceTvBndr bndr) vis) = -- See Note [Pretty-printing invisible arguments] case vis of AnonTCB -> ppr_bndr (UseBndrParens True) NamedTCB Required -> ppr_bndr (UseBndrParens True) NamedTCB Specified -> char '@' <> ppr_bndr (UseBndrParens True) NamedTCB Inferred -> char '@' <> braces (ppr_bndr (UseBndrParens False)) where ppr_bndr = pprIfaceTvBndr bndr suppress_sig instance Binary IfaceBndr where put_ bh (IfaceIdBndr aa) = do putByte bh 0 put_ bh aa put_ bh (IfaceTvBndr ab) = do putByte bh 1 put_ bh ab get bh = do h <- getByte bh case h of 0 -> do aa <- get bh return (IfaceIdBndr aa) _ -> do ab <- get bh return (IfaceTvBndr ab) instance Binary IfaceOneShot where put_ bh IfaceNoOneShot = putByte bh 0 put_ bh IfaceOneShot = putByte bh 1 get bh = do h <- getByte bh case h of 0 -> return IfaceNoOneShot _ -> return IfaceOneShot -- ----------------------------- Printing IfaceType ------------------------------------ --------------------------------- instance Outputable IfaceType where ppr ty = pprIfaceType ty -- The purpose of 'ppr_ty_nested' is to distinguish calls that should not -- trigger 'hideNonStandardTypes', see Note [Defaulting RuntimeRep variables] -- wrinkle (W2). pprIfaceType, pprParendIfaceType, ppr_ty_nested :: IfaceType -> SDoc pprIfaceType = pprPrecIfaceType topPrec pprParendIfaceType = pprPrecIfaceType appPrec ppr_ty_nested = ppr_ty topPrec pprPrecIfaceType :: PprPrec -> IfaceType -> SDoc -- We still need `hideNonStandardTypes`, since the `pprPrecIfaceType` may be -- called from other places, besides `:type` and `:info`. pprPrecIfaceType prec ty = hideNonStandardTypes (ppr_ty prec) ty pprTypeArrow :: FunTyFlag -> IfaceMult -> SDoc pprTypeArrow af mult = pprArrow (mb_conc, pprPrecIfaceType) af mult where mb_conc (IfaceTyConApp tc _) = Just tc mb_conc _ = Nothing pprArrow :: (a -> Maybe IfaceTyCon, PprPrec -> a -> SDoc) -> FunTyFlag -> a -> SDoc -- Prints a thin arrow (->) with its multiplicity -- Used for both FunTy and FunCo, hence higher order arguments pprArrow (mb_conc, ppr_mult) af mult | isFUNArg af = case mb_conc mult of Just tc | tc `ifaceTyConHasKey` manyDataConKey -> arrow | tc `ifaceTyConHasKey` oneDataConKey -> lollipop _ -> text "%" <> ppr_mult appPrec mult <+> arrow | otherwise = ppr (funTyFlagTyCon af) ppr_ty :: PprPrec -> IfaceType -> SDoc ppr_ty ctxt_prec ty | not (isIfaceRhoType ty) = ppr_sigma ShowForAllMust ctxt_prec ty ppr_ty _ (IfaceForAllTy {}) = panic "ppr_ty" -- Covered by not.isIfaceRhoType ppr_ty _ (IfaceFreeTyVar tyvar) = ppr tyvar -- This is the main reason for IfaceFreeTyVar! ppr_ty _ (IfaceTyVar tyvar) = ppr tyvar -- See Note [Free TyVars and CoVars in IfaceType] ppr_ty ctxt_prec (IfaceTyConApp tc tys) = pprTyTcApp ctxt_prec tc tys ppr_ty ctxt_prec (IfaceTupleTy i p tys) = ppr_tuple ctxt_prec i p tys -- always fully saturated ppr_ty _ (IfaceLitTy n) = pprIfaceTyLit n -- Function types ppr_ty ctxt_prec ty@(IfaceFunTy af w ty1 ty2) -- Should be a visible argument = assertPpr (isVisibleFunArg af) (ppr ty) $ -- Ensured by isIfaceRhoType above -- We want to print a chain of arrows in a column -- type1 -- -> type2 -- -> type3 maybeParen ctxt_prec funPrec $ sep [ppr_ty funPrec ty1, sep (ppr_fun_tail w ty2)] where ppr_fun_tail wthis (IfaceFunTy af wnext ty1 ty2) | isVisibleFunArg af = (pprTypeArrow af wthis <+> ppr_ty funPrec ty1) : ppr_fun_tail wnext ty2 ppr_fun_tail wthis other_ty = [pprTypeArrow af wthis <+> ppr_ty_nested other_ty] ppr_ty ctxt_prec (IfaceAppTy t ts) = if_print_coercions ppr_app_ty ppr_app_ty_no_casts where ppr_app_ty = sdocOption sdocPrintExplicitKinds $ \print_kinds -> let tys_wo_kinds = appArgsIfaceTypesForAllTyFlags $ stripInvisArgs (PrintExplicitKinds print_kinds) ts in pprIfacePrefixApp ctxt_prec (ppr_ty funPrec t) (map (ppr_app_arg appPrec) tys_wo_kinds) -- Strip any casts from the head of the application ppr_app_ty_no_casts = case t of IfaceCastTy head _ -> ppr_ty ctxt_prec (mk_app_tys head ts) _ -> ppr_app_ty mk_app_tys :: IfaceType -> IfaceAppArgs -> IfaceType mk_app_tys (IfaceTyConApp tc tys1) tys2 = IfaceTyConApp tc (tys1 `mappend` tys2) mk_app_tys t1 tys2 = IfaceAppTy t1 tys2 ppr_ty ctxt_prec (IfaceCastTy ty co) = if_print_coercions (parens (ppr_ty topPrec ty <+> text "|>" <+> ppr co)) (ppr_ty ctxt_prec ty) ppr_ty ctxt_prec (IfaceCoercionTy co) = if_print_coercions (ppr_co ctxt_prec co) (text "<>") {- Note [Defaulting RuntimeRep variables] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ RuntimeRep variables are considered by many (most?) users to be little more than syntactic noise. When the notion was introduced there was a significant and understandable push-back from those with pedagogy in mind, which argued that RuntimeRep variables would throw a wrench into nearly any teach approach since they appear in even the lowly ($) function's type, ($) :: forall (w :: RuntimeRep) a (b :: TYPE w). (a -> b) -> a -> b which is significantly less readable than its non RuntimeRep-polymorphic type of ($) :: (a -> b) -> a -> b Moreover, unboxed types don't appear all that often in run-of-the-mill Haskell programs, so it makes little sense to make all users pay this syntactic overhead. For this reason it was decided that we would hide RuntimeRep variables for now (see #11549). We do this right in the pretty-printer, by pre-processing the type we are about to print, to default any type variables of kind RuntimeRep that are bound by toplevel invisible quantification to LiftedRep. Likewise, we default Multiplicity variables to Many and Levity variables to Lifted. This is done in a pass right before pretty-printing (defaultIfaceTyVarsOfKind, controlled by -fprint-explicit-runtime-reps and -XLinearTypes) This applies to /quantified/ variables like 'w' above. What about variables that are /free/ in the type being printed, which certainly happens in error messages. Suppose (#16074, #19361) we are reporting a mismatch between skolems (a :: RuntimeRep) ~ (b :: RuntimeRep) or (m :: Multiplicity) ~ Many We certainly don't want to say "Can't match LiftedRep with LiftedRep" or "Can't match Many with Many"! But if we are printing the type (forall (a :: TYPE r). blah) we do want to turn that (free) r into LiftedRep, so it prints as (forall a. blah) We use isMetaTyVar to distinguish between those two situations: metavariables are converted, skolem variables are not. There's one exception though: TyVarTv metavariables should not be defaulted, as they appear during kind-checking of "newtype T :: TYPE r where..." (test T18357a). Therefore, we additionally test for isTyConableTyVar. Wrinkles: (W1) The loop 'go' in 'defaultIfaceTyVarsOfKind' passes a Bool flag, 'rank1', around that indicates whether we haven't yet descended into the arguments of a function type. This is used to decide whether newly bound variables are eligible for defaulting – we do not want contravariant foralls to be defaulted because that would result in an incorrect, rather than specialized, type. For example: ∀ p (r1 :: RuntimeRep) . (∀ (r2 :: RuntimeRep) . p r2) -> p r1 We want to default 'r1', but not 'r2'. When examining the first forall, 'rank1' is True. The toplevel function type is matched as IfaceFunTy, where we recurse into 'go' by passing False for 'rank1'. The forall in the first argument then skips adding a substitution for 'r2'. (W2) 'defaultIfaceTyVarsOfKind' ought to be called only once when printing a type. A few components of the printing machinery used to invoke 'ppr' on types nested in secondary structures like IfaceBndr, which would repeat the defaulting process, but treating the type as if it were top-level, causing unwanted defaulting. In order to prevent future developers from using 'ppr' again or being confused that @ppr_ty topPrec@ is used, we introduced a marker function, 'ppr_ty_nested'. -} -- | Default 'RuntimeRep' variables to 'LiftedRep', -- 'Levity' variables to 'Lifted', and 'Multiplicity' -- variables to 'Many'. For example: -- -- @ -- ($) :: forall (r :: GHC.Types.RuntimeRep) a (b :: TYPE r). -- (a -> b) -> a -> b -- Just :: forall (k :: Multiplicity) a. a % k -> Maybe a -- @ -- -- turns in to, -- -- @ ($) :: forall a (b :: *). (a -> b) -> a -> b @ -- @ Just :: forall a . a -> Maybe a @ -- -- We do this to prevent RuntimeRep, Levity and Multiplicity variables from -- incurring a significant syntactic overhead in otherwise simple -- type signatures (e.g. ($)). See Note [Defaulting RuntimeRep variables] -- and #11549 for further discussion. defaultIfaceTyVarsOfKind :: Bool -- ^ default 'RuntimeRep'/'Levity' variables? -> Bool -- ^ default 'Multiplicity' variables? -> IfaceType -> IfaceType defaultIfaceTyVarsOfKind def_rep def_mult ty = go emptyFsEnv True ty where go :: FastStringEnv IfaceType -- Set of enclosing forall-ed RuntimeRep/Levity/Multiplicity variables -> Bool -- Are we in a toplevel forall, where defaulting is allowed? -> IfaceType -> IfaceType go subs True (IfaceForAllTy (Bndr (IfaceTvBndr (var, var_kind)) argf) ty) | isInvisibleForAllTyFlag argf -- Don't default *visible* quantification -- or we get the mess in #13963 , Just substituted_ty <- check_substitution var_kind = let subs' = extendFsEnv subs (ifLclNameFS var) substituted_ty -- Record that we should replace it with LiftedRep/Lifted/Many, -- and recurse, discarding the forall in go subs' True ty go subs rank1 (IfaceForAllTy bndr ty) = IfaceForAllTy (go_ifacebndr subs bndr) (go subs rank1 ty) go subs _ ty@(IfaceTyVar tv) = case lookupFsEnv subs (ifLclNameFS tv) of Just s -> s Nothing -> ty go _ _ ty@(IfaceFreeTyVar tv) -- See Note [Defaulting RuntimeRep variables], about free vars | def_rep , GHC.Core.Type.isRuntimeRepTy (tyVarKind tv) , isMetaTyVar tv , isTyConableTyVar tv = liftedRep_ty | def_rep , GHC.Core.Type.isLevityTy (tyVarKind tv) , isMetaTyVar tv , isTyConableTyVar tv = lifted_ty | def_mult , GHC.Core.Type.isMultiplicityTy (tyVarKind tv) , isMetaTyVar tv , isTyConableTyVar tv = many_ty | otherwise = ty go subs _ (IfaceTyConApp tc tc_args) = IfaceTyConApp tc (go_args subs tc_args) go subs _ (IfaceTupleTy sort is_prom tc_args) = IfaceTupleTy sort is_prom (go_args subs tc_args) go subs rank1 (IfaceFunTy af w arg res) = IfaceFunTy af (go subs False w) (go subs False arg) (go subs rank1 res) go subs _ (IfaceAppTy t ts) = IfaceAppTy (go subs False t) (go_args subs ts) go subs rank1 (IfaceCastTy x co) = IfaceCastTy (go subs rank1 x) co go _ _ ty@(IfaceLitTy {}) = ty go _ _ ty@(IfaceCoercionTy {}) = ty go_ifacebndr :: FastStringEnv IfaceType -> IfaceForAllBndr -> IfaceForAllBndr go_ifacebndr subs (Bndr (IfaceIdBndr (w, n, t)) argf) = Bndr (IfaceIdBndr (w, n, go subs False t)) argf go_ifacebndr subs (Bndr (IfaceTvBndr (n, t)) argf) = Bndr (IfaceTvBndr (n, go subs False t)) argf go_args :: FastStringEnv IfaceType -> IfaceAppArgs -> IfaceAppArgs go_args _ IA_Nil = IA_Nil go_args subs (IA_Arg ty argf args) = IA_Arg (go subs False ty) argf (go_args subs args) check_substitution :: IfaceType -> Maybe IfaceType check_substitution (IfaceTyConApp tc _) | def_rep , tc `ifaceTyConHasKey` runtimeRepTyConKey = Just liftedRep_ty | def_rep , tc `ifaceTyConHasKey` levityTyConKey = Just lifted_ty | def_mult , tc `ifaceTyConHasKey` multiplicityTyConKey = Just many_ty check_substitution _ = Nothing -- | The type ('BoxedRep 'Lifted), also known as LiftedRep. liftedRep_ty :: IfaceType liftedRep_ty = IfaceTyConApp liftedRep IA_Nil where liftedRep :: IfaceTyCon liftedRep = IfaceTyCon tc_name (mkIfaceTyConInfo NotPromoted IfaceNormalTyCon) where tc_name = getName liftedRepTyCon -- | The type 'Lifted :: Levity'. lifted_ty :: IfaceType lifted_ty = IfaceTyConApp (IfaceTyCon dc_name (mkIfaceTyConInfo IsPromoted IfaceNormalTyCon)) IA_Nil where dc_name = getName liftedDataConTyCon -- | The type 'Many :: Multiplicity'. many_ty :: IfaceType many_ty = IfaceTyConApp (IfaceTyCon dc_name (mkIfaceTyConInfo IsPromoted IfaceNormalTyCon)) IA_Nil where dc_name = getName manyDataConTyCon hideNonStandardTypes :: (IfaceType -> SDoc) -> IfaceType -> SDoc hideNonStandardTypes f ty = sdocOption sdocPrintExplicitRuntimeReps $ \printExplicitRuntimeReps -> sdocOption sdocLinearTypes $ \linearTypes -> getPprStyle $ \sty -> let def_rep = not printExplicitRuntimeReps def_mult = not linearTypes in if userStyle sty then f (defaultIfaceTyVarsOfKind def_rep def_mult ty) else f ty instance Outputable IfaceAppArgs where ppr tca = pprIfaceAppArgs tca pprIfaceAppArgs, pprParendIfaceAppArgs :: IfaceAppArgs -> SDoc pprIfaceAppArgs = ppr_app_args topPrec pprParendIfaceAppArgs = ppr_app_args appPrec ppr_app_args :: PprPrec -> IfaceAppArgs -> SDoc ppr_app_args ctx_prec = go where go :: IfaceAppArgs -> SDoc go IA_Nil = empty go (IA_Arg t argf ts) = ppr_app_arg ctx_prec (t, argf) <+> go ts -- See Note [Pretty-printing invisible arguments] ppr_app_arg :: PprPrec -> (IfaceType, ForAllTyFlag) -> SDoc ppr_app_arg ctx_prec (t, argf) = sdocOption sdocPrintExplicitKinds $ \print_kinds -> case argf of Required -> ppr_ty ctx_prec t Specified | print_kinds -> char '@' <> ppr_ty appPrec t Inferred | print_kinds -> char '@' <> braces (ppr_ty_nested t) _ -> empty ------------------- pprIfaceForAllPart :: [IfaceForAllBndr] -> [IfacePredType] -> SDoc -> SDoc pprIfaceForAllPart tvs ctxt sdoc = ppr_iface_forall_part ShowForAllWhen tvs ctxt sdoc -- | Like 'pprIfaceForAllPart', but always uses an explicit @forall@. pprIfaceForAllPartMust :: [IfaceForAllBndr] -> [IfacePredType] -> SDoc -> SDoc pprIfaceForAllPartMust tvs ctxt sdoc = ppr_iface_forall_part ShowForAllMust tvs ctxt sdoc pprIfaceForAllCoPart :: [(IfLclName, IfaceCoercion, ForAllTyFlag, ForAllTyFlag)] -> SDoc -> SDoc pprIfaceForAllCoPart tvs sdoc = sep [ pprIfaceForAllCo tvs, sdoc ] ppr_iface_forall_part :: ShowForAllFlag -> [IfaceForAllBndr] -> [IfacePredType] -> SDoc -> SDoc ppr_iface_forall_part show_forall tvs ctxt sdoc = sep [ case show_forall of ShowForAllMust -> pprIfaceForAll tvs ShowForAllWhen -> pprUserIfaceForAll tvs , pprIfaceContextArr ctxt , sdoc] -- | Render the "forall ... ." or "forall ... ->" bit of a type. pprIfaceForAll :: [IfaceForAllBndr] -> SDoc pprIfaceForAll [] = empty pprIfaceForAll bndrs@(Bndr _ vis : _) = sep [ add_separator (forAllLit <+> fsep docs) , pprIfaceForAll bndrs' ] where (bndrs', docs) = ppr_itv_bndrs bndrs vis add_separator stuff = case vis of Required -> stuff <+> arrow _inv -> stuff <> dot -- | Render the ... in @(forall ... .)@ or @(forall ... ->)@. -- Returns both the list of not-yet-rendered binders and the doc. -- No anonymous binders here! ppr_itv_bndrs :: [IfaceForAllBndr] -> ForAllTyFlag -- ^ visibility of the first binder in the list -> ([IfaceForAllBndr], [SDoc]) ppr_itv_bndrs all_bndrs@(bndr@(Bndr _ vis) : bndrs) vis1 | vis `eqForAllVis` vis1 = let (bndrs', doc) = ppr_itv_bndrs bndrs vis1 in (bndrs', pprIfaceForAllBndr bndr : doc) | otherwise = (all_bndrs, []) ppr_itv_bndrs [] _ = ([], []) pprIfaceForAllCo :: [(IfLclName, IfaceCoercion, ForAllTyFlag, ForAllTyFlag)] -> SDoc pprIfaceForAllCo [] = empty pprIfaceForAllCo tvs = text "forall" <+> pprIfaceForAllCoBndrs tvs <> dot pprIfaceForAllCoBndrs :: [(IfLclName, IfaceCoercion, ForAllTyFlag, ForAllTyFlag)] -> SDoc pprIfaceForAllCoBndrs bndrs = hsep $ map pprIfaceForAllCoBndr bndrs pprIfaceForAllBndr :: IfaceForAllBndr -> SDoc pprIfaceForAllBndr bndr = case bndr of Bndr (IfaceTvBndr tv) Inferred -> braces $ pprIfaceTvBndr tv suppress_sig (UseBndrParens False) Bndr (IfaceTvBndr tv) _ -> pprIfaceTvBndr tv suppress_sig (UseBndrParens True) Bndr (IfaceIdBndr idv) _ -> pprIfaceIdBndr idv where -- See Note [Suppressing binder signatures] suppress_sig = SuppressBndrSig False pprIfaceForAllCoBndr :: (IfLclName, IfaceCoercion, ForAllTyFlag, ForAllTyFlag) -> SDoc pprIfaceForAllCoBndr (tv, kind_co, visL, visR) = parens (ppr tv <> pp_vis <+> dcolon <+> pprIfaceCoercion kind_co) where pp_vis | visL == coreTyLamForAllTyFlag , visR == coreTyLamForAllTyFlag = empty | otherwise = ppr visL <> char '~' <> ppr visR -- "[spec]~[reqd]" -- | Show forall flag -- -- Unconditionally show the forall quantifier with ('ShowForAllMust') -- or when ('ShowForAllWhen') the names used are free in the binder -- or when compiling with -fprint-explicit-foralls. data ShowForAllFlag = ShowForAllMust | ShowForAllWhen data ShowSub = ShowSub { ss_how_much :: ShowHowMuch , ss_forall :: ShowForAllFlag } -- See Note [Printing IfaceDecl binders] -- The alternative pretty printer referred to in the note. newtype AltPpr = AltPpr (Maybe (OccName -> SDoc)) data ShowHowMuch = ShowHeader AltPpr -- ^ Header information only, not rhs | ShowSome (Maybe (OccName -> Bool)) AltPpr -- ^ Show the declaration and its RHS. The @Maybe@ predicate -- allows filtering of the sub-components which should be printing; -- any sub-components filtered out will be elided with @...@. | ShowIface -- ^ Everything including GHC-internal information (used in --show-iface) instance Outputable ShowHowMuch where ppr (ShowHeader _) = text "ShowHeader" ppr ShowIface = text "ShowIface" ppr (ShowSome _ _) = text "ShowSome" pprIfaceSigmaType :: ShowForAllFlag -> IfaceType -> SDoc pprIfaceSigmaType show_forall ty = hideNonStandardTypes (ppr_sigma show_forall topPrec) ty ppr_sigma :: ShowForAllFlag -> PprPrec -> IfaceType -> SDoc ppr_sigma show_forall ctxt_prec iface_ty = maybeParen ctxt_prec funPrec $ let (invis_tvs, theta, tau) = splitIfaceSigmaTy iface_ty (req_tvs, tau') = splitIfaceReqForallTy tau -- splitIfaceSigmaTy is recursive, so it will gather the binders after -- the theta, i.e. forall a. theta => forall b. tau -- will give you ([a,b], theta, tau). -- -- This isn't right when it comes to visible forall (see -- testsuite/tests/polykinds/T18522-ppr), -- so we split off required binders separately, -- using splitIfaceReqForallTy. -- -- An alternative solution would be to make splitIfaceSigmaTy -- non-recursive (see #18458). -- Then it could handle both invisible and required binders, and -- splitIfaceReqForallTy wouldn't be necessary here. in ppr_iface_forall_part show_forall invis_tvs theta $ sep [pprIfaceForAll req_tvs, ppr_ty_nested tau'] pprUserIfaceForAll :: [IfaceForAllBndr] -> SDoc pprUserIfaceForAll tvs = sdocOption sdocPrintExplicitForalls $ \print_foralls -> -- See Note [When to print foralls] in this module. ppWhen (any tv_has_kind_var tvs || any tv_is_required tvs || print_foralls) $ pprIfaceForAll tvs where tv_has_kind_var (Bndr (IfaceTvBndr (_,kind)) _) = not (ifTypeIsVarFree kind) tv_has_kind_var _ = False tv_is_required = isVisibleForAllTyFlag . binderFlag {- Note [When to print foralls] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We opt to explicitly pretty-print `forall`s if any of the following criteria are met: 1. -fprint-explicit-foralls is on. 2. A bound type variable has a polymorphic kind. E.g., forall k (a::k). Proxy a -> Proxy a Since a's kind mentions a variable k, we print the foralls. 3. A bound type variable is a visible argument (#14238). Suppose we are printing the kind of: T :: forall k -> k -> Type The "forall k ->" notation means that this kind argument is required. That is, it must be supplied at uses of T. E.g., f :: T (Type->Type) Monad -> Int So we print an explicit "T :: forall k -> k -> Type", because omitting it and printing "T :: k -> Type" would be utterly misleading. See Note [VarBndrs, ForAllTyBinders, TyConBinders, and visibility] in GHC.Core.TyCo.Rep. N.B. Until now (Aug 2018) we didn't check anything for coercion variables. Note [Printing foralls in type family instances] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We use the same criteria as in Note [When to print foralls] to determine whether a type family instance should be pretty-printed with an explicit `forall`. Example: type family Foo (a :: k) :: k where Foo Maybe = [] Foo (a :: Type) = Int Foo a = a Without -fprint-explicit-foralls enabled, this will be pretty-printed as: type family Foo (a :: k) :: k where Foo Maybe = [] Foo a = Int forall k (a :: k). Foo a = a Note that only the third equation has an explicit forall, since it has a type variable with a non-Type kind. (If -fprint-explicit-foralls were enabled, then the second equation would be preceded with `forall a.`.) There is one tricky point in the implementation: what visibility do we give the type variables in a type family instance? Type family instances only store type *variables*, not type variable *binders*, and only the latter has visibility information. We opt to default the visibility of each of these type variables to Specified because users can't ever instantiate these variables manually, so the choice of visibility is only relevant to pretty-printing. (This is why the `k` in `forall k (a :: k). ...` above is printed the way it is, even though it wasn't written explicitly in the original source code.) We adopt the same strategy for data family instances. Example: data family DF (a :: k) data instance DF '[a, b] = DFList That data family instance is pretty-printed as: data instance forall j (a :: j) (b :: j). DF '[a, b] = DFList This is despite that the representation tycon for this data instance (call it $DF:List) actually has different visibilities for its binders. However, the visibilities of these binders are utterly irrelevant to the programmer, who cares only about the specificity of variables in `DF`'s type, not $DF:List's type. Therefore, we opt to pretty-print all variables in data family instances as Specified. Note [Printing promoted type constructors] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider this GHCi session (#14343) > _ :: Proxy '[ 'True ] error: Found hole: _ :: Proxy '['True] This would be bad, because the '[' looks like a character literal. A similar issue arises if the element is a character literal (#22488) ghci> type T = '[ 'x' ] ghci> :kind! T T :: [Char] = '['x'] Solution: in type-level lists and tuples, add a leading space if the first element is printed with a single quote. -} ------------------- -- See equivalent function in "GHC.Core.TyCo.Rep" pprIfaceTyList :: PprPrec -> IfaceType -> IfaceType -> SDoc -- Given a type-level list (t1 ': t2), see if we can print -- it in list notation [t1, ...]. -- Precondition: Opt_PrintExplicitKinds is off pprIfaceTyList ctxt_prec ty1 ty2 = case gather ty2 of (arg_tys, Nothing) -> sdocWithContext $ \ctx -> let items = ty1:arg_tys eos = isListEmptyOrSingleton items ticked = promTick (sdocStyle ctx) (PromotedItemListSyntax eos) (preBracket, postBracket) = if ticked then (char '\'', spaceIfSingleQuote) else (empty, id) in preBracket <> brackets (postBracket (fsep (punctuate comma (map (ppr_ty topPrec) items)))) (arg_tys, Just tl) -> maybeParen ctxt_prec funPrec $ hang (ppr_ty funPrec ty1) 2 (fsep [ colon <+> ppr_ty funPrec ty | ty <- arg_tys ++ [tl]]) where gather :: IfaceType -> ([IfaceType], Maybe IfaceType) -- (gather ty) = (tys, Nothing) means ty is a list [t1, .., tn] -- = (tys, Just tl) means ty is of form t1:t2:...tn:tl gather (IfaceTyConApp tc tys) | tc `ifaceTyConHasKey` consDataConKey , IA_Arg _ argf (IA_Arg ty1 Required (IA_Arg ty2 Required IA_Nil)) <- tys , isInvisibleForAllTyFlag argf , (args, tl) <- gather ty2 = (ty1:args, tl) | tc `ifaceTyConHasKey` nilDataConKey = ([], Nothing) gather ty = ([], Just ty) pprIfaceTypeApp :: PprPrec -> IfaceTyCon -> IfaceAppArgs -> SDoc pprIfaceTypeApp prec tc args = pprTyTcApp prec tc args pprTyTcApp :: PprPrec -> IfaceTyCon -> IfaceAppArgs -> SDoc pprTyTcApp ctxt_prec tc tys = sdocOption sdocPrintExplicitKinds $ \print_kinds -> sdocOption sdocPrintTypeAbbreviations $ \print_type_abbreviations -> getPprDebug $ \debug -> if | ifaceTyConName tc `hasKey` ipClassKey , IA_Arg (IfaceLitTy (IfaceStrTyLit n)) Required (IA_Arg ty Required IA_Nil) <- tys -> maybeParen ctxt_prec funPrec $ char '?' <> ftext (getLexicalFastString n) <> text "::" <> ppr_ty topPrec ty | IfaceTupleTyCon arity sort <- ifaceTyConSort info , not debug , arity == ifaceVisAppArgsLength tys -> ppr_tuple ctxt_prec sort (ifaceTyConIsPromoted info) tys -- NB: ppr_tuple requires a saturated tuple. | IfaceSumTyCon arity <- ifaceTyConSort info , not debug , arity == ifaceVisAppArgsLength tys -> ppr_sum ctxt_prec (ifaceTyConIsPromoted info) tys -- NB: ppr_sum requires a saturated unboxed sum. | tc `ifaceTyConHasKey` consDataConKey , False <- print_kinds , IA_Arg _ argf (IA_Arg ty1 Required (IA_Arg ty2 Required IA_Nil)) <- tys , isInvisibleForAllTyFlag argf -> pprIfaceTyList ctxt_prec ty1 ty2 | isIfaceLiftedTypeKind (IfaceTyConApp tc tys) , print_type_abbreviations -- See Note [Printing type abbreviations] -> ppr_kind_type ctxt_prec | isIfaceConstraintKind (IfaceTyConApp tc tys) , print_type_abbreviations -- See Note [Printing type abbreviations] -> pprPrefixOcc constraintKindTyConName | tc `ifaceTyConHasKey` fUNTyConKey , IA_Arg (IfaceTyConApp rep IA_Nil) Required args <- tys , rep `ifaceTyConHasKey` manyDataConKey , print_type_abbreviations -- See Note [Printing type abbreviations] -> pprIfacePrefixApp ctxt_prec (parens arrow) (map (ppr_app_arg appPrec) $ appArgsIfaceTypesForAllTyFlags $ stripInvisArgs (PrintExplicitKinds print_kinds) args) -- Use appArgsIfaceTypesForAllTyFlags to print invisible arguments -- correctly (#19310) | tc `ifaceTyConHasKey` errorMessageTypeErrorFamKey , not debug -- Suppress detail unless you _really_ want to see -> text "(TypeError ...)" | Just doc <- ppr_equality ctxt_prec tc (appArgsIfaceTypes tys) -> doc | otherwise -> ppr_iface_tc_app ppr_app_arg ctxt_prec tc $ appArgsIfaceTypesForAllTyFlags $ stripInvisArgs (PrintExplicitKinds print_kinds) tys where info = ifaceTyConInfo tc ppr_kind_type :: PprPrec -> SDoc ppr_kind_type ctxt_prec = sdocOption sdocStarIsType $ \case False -> pprPrefixOcc liftedTypeKindTyConName True -> maybeParen ctxt_prec starPrec $ unicodeSyntax (char '★') (char '*') -- | Pretty-print a type-level equality. -- Returns (Just doc) if the argument is a /saturated/ application -- of eqTyCon (~) -- eqPrimTyCon (~#) -- eqReprPrimTyCon (~R#) -- heqTyCon (~~) -- -- See Note [Equality predicates in IfaceType] -- and Note [The equality types story] in GHC.Builtin.Types.Prim ppr_equality :: PprPrec -> IfaceTyCon -> [IfaceType] -> Maybe SDoc ppr_equality ctxt_prec tc args | hetero_eq_tc , [k1, k2, t1, t2] <- args = Just $ print_equality (k1, k2, t1, t2) | hom_eq_tc , [k, t1, t2] <- args = Just $ print_equality (k, k, t1, t2) | otherwise = Nothing where homogeneous = tc_name `hasKey` eqTyConKey -- (~) || hetero_tc_used_homogeneously where hetero_tc_used_homogeneously = case ifaceTyConSort $ ifaceTyConInfo tc of IfaceEqualityTyCon -> True _other -> False -- True <=> a heterogeneous equality whose arguments -- are (in this case) of the same kind tc_name = ifaceTyConName tc pp = ppr_ty hom_eq_tc = tc_name `hasKey` eqTyConKey -- (~) hetero_eq_tc = tc_name `hasKey` eqPrimTyConKey -- (~#) || tc_name `hasKey` eqReprPrimTyConKey -- (~R#) || tc_name `hasKey` heqTyConKey -- (~~) nominal_eq_tc = tc_name `hasKey` heqTyConKey -- (~~) || tc_name `hasKey` eqPrimTyConKey -- (~#) print_equality args = sdocOption sdocPrintExplicitKinds $ \print_kinds -> sdocOption sdocPrintEqualityRelations $ \print_eqs -> getPprStyle $ \style -> getPprDebug $ \debug -> print_equality' args print_kinds (print_eqs || dumpStyle style || debug) print_equality' (ki1, ki2, ty1, ty2) print_kinds print_eqs | -- If -fprint-equality-relations is on, just print the original TyCon print_eqs = ppr_infix_eq (ppr tc) | -- Homogeneous use of heterogeneous equality (ty1 ~~ ty2) -- or unlifted equality (ty1 ~# ty2) nominal_eq_tc, homogeneous = ppr_infix_eq (text "~") | -- Heterogeneous use of unlifted equality (ty1 ~# ty2) not homogeneous = ppr_infix_eq (ppr heqTyCon) | -- Homogeneous use of representational unlifted equality (ty1 ~R# ty2) tc_name `hasKey` eqReprPrimTyConKey, homogeneous = let ki | print_kinds = [pp appPrec ki1] | otherwise = [] in pprIfacePrefixApp ctxt_prec (ppr coercibleTyCon) (ki ++ [pp appPrec ty1, pp appPrec ty2]) -- The other cases work as you'd expect | otherwise = ppr_infix_eq (ppr tc) where ppr_infix_eq :: SDoc -> SDoc ppr_infix_eq eq_op = pprIfaceInfixApp ctxt_prec eq_op (pp_ty_ki ty1 ki1) (pp_ty_ki ty2 ki2) where pp_ty_ki ty ki | print_kinds = parens (pp topPrec ty <+> dcolon <+> pp opPrec ki) | otherwise = pp opPrec ty pprIfaceCoTcApp :: PprPrec -> IfaceTyCon -> [IfaceCoercion] -> SDoc pprIfaceCoTcApp ctxt_prec tc tys = ppr_iface_tc_app (\prec (co, _) -> ppr_co prec co) ctxt_prec tc (map (, Required) tys) -- We are trying to re-use ppr_iface_tc_app here, which requires its -- arguments to be accompanied by visibilities. But visibility is -- irrelevant when printing coercions, so just default everything to -- Required. -- | Pretty-prints an application of a type constructor to some arguments -- (whose visibilities are known). This is polymorphic (over @a@) since we use -- this function to pretty-print two different things: -- -- 1. Types (from `pprTyTcApp'`) -- -- 2. Coercions (from 'pprIfaceCoTcApp') ppr_iface_tc_app :: (PprPrec -> (a, ForAllTyFlag) -> SDoc) -> PprPrec -> IfaceTyCon -> [(a, ForAllTyFlag)] -> SDoc ppr_iface_tc_app pp ctxt_prec tc tys = sdocOption sdocListTuplePuns $ \listTuplePuns -> if | listTuplePuns, tc `ifaceTyConHasKey` listTyConKey, [ty] <- tys -> brackets (pp topPrec ty) | tc `ifaceTyConHasKey` liftedTypeKindTyConKey -> ppr_kind_type ctxt_prec | isSymOcc (nameOccName (ifaceTyConName tc)) , [ ty1@(_, Required), ty2@(_, Required) ] <- tys -- Infix, two visible arguments (we know nothing of precedence though). -- Don't apply this special case if one of the arguments is invisible, -- lest we print something like (@LiftedRep -> @LiftedRep) (#15941). -> pprIfaceInfixApp ctxt_prec (pprIfaceTyCon tc) (pp opPrec ty1) (pp opPrec ty2) | otherwise -> pprIfacePrefixApp ctxt_prec (pprParendIfaceTyCon tc) (map (pp appPrec) tys) data TupleOrSum = IsSum | IsTuple TupleSort deriving (Eq) -- | Pretty-print a boxed tuple datacon in regular tuple syntax. -- Used when -XListTuplePuns is disabled. ppr_tuple_no_pun :: PprPrec -> [IfaceType] -> SDoc ppr_tuple_no_pun ctxt_prec = \case [t] -> maybeParen ctxt_prec appPrec (text "MkSolo" <+> pprPrecIfaceType appPrec t) tys -> tupleParens BoxedTuple (pprWithCommas pprIfaceType tys) -- | Pretty-print an unboxed tuple or sum type in its parenthesized, punned, form. -- Used when -XListTuplePuns is enabled. -- -- The tycon should be saturated: -- as many visible arguments as the arity of the sum or tuple. -- -- NB: this always strips off the invisible 'RuntimeRep' arguments, -- even with `-fprint-explicit-runtime-reps` and `-fprint-explicit-kinds`. ppr_tuple_sum_pun :: PprPrec -> TupleOrSum -> PromotionFlag -> IfaceType -> Arity -> [IfaceType] -> SDoc ppr_tuple_sum_pun ctxt_prec sort promoted tc arity tys | IsSum <- sort = sumParens (pprWithBars (ppr_ty topPrec) tys) | IsTuple ConstraintTuple <- sort , NotPromoted <- promoted , arity == 0 = maybeParen ctxt_prec sigPrec $ text "() :: Constraint" -- Special-case unary boxed tuples so that they are pretty-printed as -- `Solo x`, not `(x)` | IsTuple BoxedTuple <- sort , arity == 1 = pprPrecIfaceType ctxt_prec tc | IsTuple tupleSort <- sort = pprPromotionQuoteI promoted <> tupleParens tupleSort (quote_space (pprWithCommas pprIfaceType tys)) where quote_space = case promoted of IsPromoted -> spaceIfSingleQuote NotPromoted -> id -- | Pretty-print an unboxed tuple or sum type either in the punned or unpunned form, -- depending on whether -XListTuplePuns is enabled. ppr_tuple_sum :: PprPrec -> TupleOrSum -> PromotionFlag -> IfaceAppArgs -> SDoc ppr_tuple_sum ctxt_prec sort is_promoted args = sdocOption sdocListTuplePuns $ \case True -> ppr_tuple_sum_pun ctxt_prec sort is_promoted prefix_tc arity non_rep_tys False | IsPromoted <- is_promoted , IsTuple BoxedTuple <- sort -> ppr_tuple_no_pun ctxt_prec non_rep_tys | otherwise -> pprPrecIfaceType ctxt_prec prefix_tc where -- This tycon is used to print in prefix notation for the punned Solo -- case and the unabbreviated case. prefix_tc = IfaceTyConApp (IfaceTyCon (mk_name arity) info) args info = mkIfaceTyConInfo NotPromoted IfaceNormalTyCon mk_name = case (sort, is_promoted) of (IsTuple BoxedTuple, IsPromoted) -> tupleDataConName Boxed (IsTuple s, _) -> tupleTyConName s (IsSum, _) -> tyConName . sumTyCon -- drop the RuntimeRep vars. -- See Note [Unboxed tuple RuntimeRep vars] in GHC.Core.TyCon non_rep_tys = if strip_reps then drop arity all_tys else all_tys arity = if strip_reps then count `div` 2 else count count = length all_tys all_tys = appArgsIfaceTypes args strip_reps = case is_promoted of IsPromoted -> True NotPromoted -> strip_reps_sort strip_reps_sort = case sort of IsTuple BoxedTuple -> False IsTuple UnboxedTuple -> True IsTuple ConstraintTuple -> False IsSum -> True -- | Pretty-print an unboxed sum type. -- The sum should be saturated: as many visible arguments as the arity of -- the sum. ppr_sum :: PprPrec -> PromotionFlag -> IfaceAppArgs -> SDoc ppr_sum ctxt_prec = ppr_tuple_sum ctxt_prec IsSum -- | Pretty-print a tuple type (boxed tuple, constraint tuple, unboxed tuple). -- The tuple should be saturated: as many visible arguments as the arity of -- the tuple. ppr_tuple :: PprPrec -> TupleSort -> PromotionFlag -> IfaceAppArgs -> SDoc ppr_tuple ctxt_prec sort = ppr_tuple_sum ctxt_prec (IsTuple sort) pprIfaceTyLit :: IfaceTyLit -> SDoc pprIfaceTyLit (IfaceNumTyLit n) = integer n pprIfaceTyLit (IfaceStrTyLit n) = text (show n) pprIfaceTyLit (IfaceCharTyLit c) = text (show c) pprIfaceCoercion, pprParendIfaceCoercion :: IfaceCoercion -> SDoc pprIfaceCoercion = ppr_co topPrec pprParendIfaceCoercion = ppr_co appPrec ppr_co :: PprPrec -> IfaceCoercion -> SDoc ppr_co _ (IfaceReflCo ty) = angleBrackets (ppr ty) <> ppr_role Nominal ppr_co _ (IfaceGReflCo r ty IfaceMRefl) = angleBrackets (ppr ty) <> ppr_role r ppr_co ctxt_prec (IfaceGReflCo r ty (IfaceMCo co)) = ppr_special_co ctxt_prec (text "GRefl" <+> ppr r <+> pprParendIfaceType ty) [co] ppr_co ctxt_prec (IfaceFunCo r co_mult co1 co2) = maybeParen ctxt_prec funPrec $ sep (ppr_co funPrec co1 : ppr_fun_tail co_mult co2) where ppr_fun_tail co_mult1 (IfaceFunCo r co_mult2 co1 co2) = (ppr_arrow co_mult1 <> ppr_role r <+> ppr_co funPrec co1) : ppr_fun_tail co_mult2 co2 ppr_fun_tail co_mult1 other_co = [ppr_arrow co_mult1 <> ppr_role r <+> pprIfaceCoercion other_co] ppr_arrow = pprArrow (mb_conc, ppr_co) visArgTypeLike mb_conc (IfaceTyConAppCo _ tc _) = Just tc mb_conc _ = Nothing ppr_co _ (IfaceTyConAppCo r tc cos) = parens (pprIfaceCoTcApp topPrec tc cos) <> ppr_role r ppr_co ctxt_prec (IfaceAppCo co1 co2) = maybeParen ctxt_prec appPrec $ ppr_co funPrec co1 <+> pprParendIfaceCoercion co2 ppr_co ctxt_prec co@(IfaceForAllCo {}) = maybeParen ctxt_prec funPrec $ -- FIXME: collect and pretty-print visibility info? pprIfaceForAllCoPart tvs (pprIfaceCoercion inner_co) where (tvs, inner_co) = split_co co split_co (IfaceForAllCo (IfaceTvBndr (name, _)) visL visR kind_co co') = let (tvs, co'') = split_co co' in ((name,kind_co,visL,visR):tvs,co'') split_co (IfaceForAllCo (IfaceIdBndr (_, name, _)) visL visR kind_co co') = let (tvs, co'') = split_co co' in ((name,kind_co,visL,visR):tvs,co'') split_co co' = ([], co') -- Why these three? See Note [Free TyVars and CoVars in IfaceType] ppr_co _ (IfaceFreeCoVar covar) = ppr covar ppr_co _ (IfaceCoVarCo covar) = ppr covar ppr_co _ (IfaceHoleCo covar) = braces (ppr covar) ppr_co _ (IfaceUnivCo prov role ty1 ty2 ds) = text "Univ" <> (parens $ sep [ ppr role <+> ppr prov <> ppr ds , dcolon <+> ppr ty1 <> comma <+> ppr ty2 ]) ppr_co ctxt_prec (IfaceInstCo co ty) = maybeParen ctxt_prec appPrec $ text "Inst" <+> sep [ pprParendIfaceCoercion co , pprParendIfaceCoercion ty ] ppr_co ctxt_prec (IfaceAxiomCo ax cos) | null cos = pprIfAxRule ax -- Don't add parens | otherwise = ppr_special_co ctxt_prec (pprIfAxRule ax) cos ppr_co ctxt_prec (IfaceSymCo co) = ppr_special_co ctxt_prec (text "Sym") [co] ppr_co ctxt_prec (IfaceTransCo co1 co2) -- chain nested TransCo = let ppr_trans (IfaceTransCo c1 c2) = semi <+> ppr_co topPrec c1 : ppr_trans c2 ppr_trans c = [semi <+> ppr_co opPrec c] in maybeParen ctxt_prec opPrec $ vcat (ppr_co topPrec co1 : ppr_trans co2) ppr_co ctxt_prec (IfaceSelCo d co) = ppr_special_co ctxt_prec (text "SelCo:" <> ppr d) [co] ppr_co ctxt_prec (IfaceLRCo lr co) = ppr_special_co ctxt_prec (ppr lr) [co] ppr_co ctxt_prec (IfaceSubCo co) = ppr_special_co ctxt_prec (text "Sub") [co] ppr_co ctxt_prec (IfaceKindCo co) = ppr_special_co ctxt_prec (text "Kind") [co] ppr_special_co :: PprPrec -> SDoc -> [IfaceCoercion] -> SDoc ppr_special_co ctxt_prec doc cos = maybeParen ctxt_prec appPrec (sep [doc, nest 4 (sep (map pprParendIfaceCoercion cos))]) pprIfAxRule :: IfaceAxiomRule -> SDoc pprIfAxRule (IfaceAR_X n) = ppr n pprIfAxRule (IfaceAR_U n) = ppr n pprIfAxRule (IfaceAR_B n i) = ppr n <> brackets (int i) ppr_role :: Role -> SDoc ppr_role r = underscore <> pp_role where pp_role = case r of Nominal -> char 'N' Representational -> char 'R' Phantom -> char 'P' ------------------- instance Outputable IfLclName where ppr = ppr . ifLclNameFS instance Outputable IfaceTyCon where ppr = pprIfaceTyCon -- | Print an `IfaceTyCon` with a promotion tick if needed, without parens, -- suitable for use in infix contexts pprIfaceTyCon :: IfaceTyCon -> SDoc pprIfaceTyCon tc = pprPromotionQuote tc <> ppr (ifaceTyConName tc) -- | Print an `IfaceTyCon` with a promotion tick if needed, possibly with parens, -- suitable for use in prefix contexts pprParendIfaceTyCon :: IfaceTyCon -> SDoc pprParendIfaceTyCon tc = pprPromotionQuote tc <> pprPrefixVar (isSymOcc (nameOccName tc_name)) (ppr tc_name) where tc_name = ifaceTyConName tc instance Outputable IfaceTyConInfo where ppr (IfaceTyConInfo { ifaceTyConIsPromoted = prom , ifaceTyConSort = sort }) = angleBrackets $ ppr prom <> comma <+> ppr sort pprPromotionQuote :: IfaceTyCon -> SDoc pprPromotionQuote tc = getPprStyle $ \sty -> let name = getOccName (ifaceTyConName tc) ticked = case ifaceTyConIsPromoted (ifaceTyConInfo tc) of NotPromoted -> False IsPromoted -> promTick sty (PromotedItemDataCon name) in if ticked then char '\'' else empty pprPromotionQuoteI :: PromotionFlag -> SDoc pprPromotionQuoteI NotPromoted = empty pprPromotionQuoteI IsPromoted = char '\'' instance Outputable IfaceCoercion where ppr = pprIfaceCoercion instance Binary IfaceTyCon where put_ bh (IfaceTyCon n i) = put_ bh n >> put_ bh i get bh = do n <- get bh i <- get bh return (IfaceTyCon n i) instance Binary IfaceTyConSort where put_ bh IfaceNormalTyCon = putByte bh 0 put_ bh (IfaceTupleTyCon arity sort) = putByte bh 1 >> put_ bh arity >> put_ bh sort put_ bh (IfaceSumTyCon arity) = putByte bh 2 >> put_ bh arity put_ bh IfaceEqualityTyCon = putByte bh 3 get bh = do n <- getByte bh case n of 0 -> return IfaceNormalTyCon 1 -> IfaceTupleTyCon <$> get bh <*> get bh 2 -> IfaceSumTyCon <$> get bh _ -> return IfaceEqualityTyCon instance Binary IfaceTyConInfo where put_ bh (IfaceTyConInfo i s) = put_ bh i >> put_ bh s get bh = mkIfaceTyConInfo <$!> get bh <*> get bh -- We want to make sure, when reading from disk, as the most common case -- is supposed to be shared. Any thunk adds an additional indirection -- making sharing less useful. -- -- See !12200 for how this bang and the one in 'IfaceTyCon' reduces the -- residency by ~10% when loading 'mi_extra_decls' from disk. instance Outputable IfaceTyLit where ppr = pprIfaceTyLit instance Binary IfaceTyLit where put_ bh (IfaceNumTyLit n) = putByte bh 1 >> put_ bh n put_ bh (IfaceStrTyLit n) = putByte bh 2 >> put_ bh n put_ bh (IfaceCharTyLit n) = putByte bh 3 >> put_ bh n get bh = do tag <- getByte bh case tag of 1 -> do { n <- get bh ; return (IfaceNumTyLit n) } 2 -> do { n <- get bh ; return (IfaceStrTyLit n) } 3 -> do { n <- get bh ; return (IfaceCharTyLit n) } _ -> panic ("get IfaceTyLit " ++ show tag) instance Binary IfaceAppArgs where put_ bh tk = do -- Int is variable length encoded so only -- one byte for small lists. put_ bh (ifaceAppArgsLength tk) go tk where go IA_Nil = pure () go (IA_Arg a b t) = do put_ bh a put_ bh b go t get bh = do n <- get bh :: IO Int go n where go 0 = return IA_Nil go c = do a <- get bh b <- get bh IA_Arg a b <$> go (c - 1) ------------------- -- Some notes about printing contexts -- -- In the event that we are printing a singleton context (e.g. @Eq a@) we can -- omit parentheses. However, we must take care to set the precedence correctly -- to opPrec, since something like @a :~: b@ must be parenthesized (see -- #9658). -- -- When printing a larger context we use 'fsep' instead of 'sep' so that -- the context doesn't get displayed as a giant column. Rather than, -- instance (Eq a, -- Eq b, -- Eq c, -- Eq d, -- Eq e, -- Eq f, -- Eq g, -- Eq h, -- Eq i, -- Eq j, -- Eq k, -- Eq l) => -- Eq (a, b, c, d, e, f, g, h, i, j, k, l) -- -- we want -- -- instance (Eq a, Eq b, Eq c, Eq d, Eq e, Eq f, Eq g, Eq h, Eq i, -- Eq j, Eq k, Eq l) => -- Eq (a, b, c, d, e, f, g, h, i, j, k, l) -- | Prints "(C a, D b) =>", including the arrow. -- Used when we want to print a context in a type, so we -- use 'funPrec' to decide whether to parenthesise a singleton -- predicate; e.g. Num a => a -> a pprIfaceContextArr :: [IfacePredType] -> SDoc pprIfaceContextArr [] = empty pprIfaceContextArr [pred] = ppr_ty funPrec pred <+> darrow pprIfaceContextArr preds = ppr_parend_preds preds <+> darrow -- | Prints a context or @()@ if empty -- You give it the context precedence pprIfaceContext :: PprPrec -> [IfacePredType] -> SDoc pprIfaceContext _ [] = text "()" pprIfaceContext prec [pred] = ppr_ty prec pred pprIfaceContext _ preds = ppr_parend_preds preds ppr_parend_preds :: [IfacePredType] -> SDoc ppr_parend_preds preds = parens (fsep (punctuate comma (map ppr preds))) instance Binary IfaceType where put_ bh ty = case findUserDataWriter Proxy bh of tbl -> putEntry tbl bh ty get bh = getIfaceTypeShared bh -- | This is the byte tag we expect to read when the next -- value is not an 'IfaceType' value, but an offset into a -- lookup table. -- See Note [Deduplication during iface binary serialisation]. -- -- Must not overlap with any byte tag in 'getIfaceType'. ifaceTypeSharedByte :: Word8 ifaceTypeSharedByte = 99 -- | Like 'getIfaceType' but checks for a specific byte tag -- that indicates that we won't be able to read a 'IfaceType' value -- but rather an offset into a lookup table. Consequentially, -- we look up the value for the 'IfaceType' in the look up table. -- -- See Note [Deduplication during iface binary serialisation] -- for details. getIfaceTypeShared :: ReadBinHandle -> IO IfaceType getIfaceTypeShared bh = do start <- tellBinReader bh tag <- getByte bh if ifaceTypeSharedByte == tag then case findUserDataReader Proxy bh of tbl -> getEntry tbl bh else seekBinReader bh start >> getIfaceType bh -- | Serialises an 'IfaceType' to the given 'WriteBinHandle'. -- -- Serialising inner 'IfaceType''s uses the 'Binary.put' of 'IfaceType' which may be using -- a deduplication table. See Note [Deduplication during iface binary serialisation]. putIfaceType :: WriteBinHandle -> IfaceType -> IO () putIfaceType _ (IfaceFreeTyVar tv) = pprPanic "Can't serialise IfaceFreeTyVar" (ppr tv) -- See Note [Free TyVars and CoVars in IfaceType] putIfaceType bh (IfaceForAllTy aa ab) = do putByte bh 0 put_ bh aa put_ bh ab putIfaceType bh (IfaceTyVar ad) = do putByte bh 1 put_ bh ad putIfaceType bh (IfaceAppTy ae af) = do putByte bh 2 put_ bh ae put_ bh af putIfaceType bh (IfaceFunTy af aw ag ah) = do putByte bh 3 put_ bh af put_ bh aw put_ bh ag put_ bh ah putIfaceType bh (IfaceTyConApp tc tys) = do { putByte bh 5; put_ bh tc; put_ bh tys } putIfaceType bh (IfaceCastTy a b) = do { putByte bh 6; put_ bh a; put_ bh b } putIfaceType bh (IfaceCoercionTy a) = do { putByte bh 7; put_ bh a } putIfaceType bh (IfaceTupleTy s i tys) = do { putByte bh 8; put_ bh s; put_ bh i; put_ bh tys } putIfaceType bh (IfaceLitTy n) = do { putByte bh 9; put_ bh n } -- | Deserialises an 'IfaceType' from the given 'ReadBinHandle'. -- -- Reading inner 'IfaceType''s uses the 'Binary.get' of 'IfaceType' which may be using -- a deduplication table. See Note [Deduplication during iface binary serialisation]. getIfaceType :: HasCallStack => ReadBinHandle -> IO IfaceType getIfaceType bh = do h <- getByte bh case h of 0 -> do aa <- get bh ab <- get bh return (IfaceForAllTy aa ab) 1 -> do ad <- get bh return (IfaceTyVar ad) 2 -> do ae <- get bh af <- get bh return (IfaceAppTy ae af) 3 -> do af <- get bh aw <- get bh ag <- get bh ah <- get bh return (IfaceFunTy af aw ag ah) 5 -> do { tc <- get bh; tys <- get bh ; return (IfaceTyConApp tc tys) } 6 -> do { a <- get bh; b <- get bh ; return (IfaceCastTy a b) } 7 -> do { a <- get bh ; return (IfaceCoercionTy a) } 8 -> do { s <- get bh; i <- get bh; tys <- get bh ; return (IfaceTupleTy s i tys) } _ -> do n <- get bh return (IfaceLitTy n) instance Binary IfLclName where put_ bh = put_ bh . ifLclNameFS get bh = do fs <- get bh pure $ IfLclName $ LexicalFastString fs instance Binary IfaceMCoercion where put_ bh IfaceMRefl = putByte bh 1 put_ bh (IfaceMCo co) = do putByte bh 2 put_ bh co get bh = do tag <- getByte bh case tag of 1 -> return IfaceMRefl 2 -> do a <- get bh return $ IfaceMCo a _ -> panic ("get IfaceMCoercion " ++ show tag) instance Binary IfaceCoercion where put_ bh (IfaceReflCo a) = do putByte bh 1 put_ bh a put_ bh (IfaceGReflCo a b c) = do putByte bh 2 put_ bh a put_ bh b put_ bh c put_ bh (IfaceFunCo a w b c) = do putByte bh 3 put_ bh a put_ bh w put_ bh b put_ bh c put_ bh (IfaceTyConAppCo a b c) = do putByte bh 4 put_ bh a put_ bh b put_ bh c put_ bh (IfaceAppCo a b) = do putByte bh 5 put_ bh a put_ bh b put_ bh (IfaceForAllCo a visL visR b c) = do putByte bh 6 put_ bh a put_ bh visL put_ bh visR put_ bh b put_ bh c put_ bh (IfaceCoVarCo a) = do putByte bh 7 put_ bh a put_ bh (IfaceUnivCo a b c d deps) = do putByte bh 9 put_ bh a put_ bh b put_ bh c put_ bh d put_ bh deps put_ bh (IfaceSymCo a) = do putByte bh 10 put_ bh a put_ bh (IfaceTransCo a b) = do putByte bh 11 put_ bh a put_ bh b put_ bh (IfaceSelCo a b) = do putByte bh 12 put_ bh a put_ bh b put_ bh (IfaceLRCo a b) = do putByte bh 13 put_ bh a put_ bh b put_ bh (IfaceInstCo a b) = do putByte bh 14 put_ bh a put_ bh b put_ bh (IfaceKindCo a) = do putByte bh 15 put_ bh a put_ bh (IfaceSubCo a) = do putByte bh 16 put_ bh a put_ bh (IfaceAxiomCo a b) = do putByte bh 17 put_ bh a put_ bh b put_ _ (IfaceFreeCoVar cv) = pprPanic "Can't serialise IfaceFreeCoVar" (ppr cv) -- See Note [Free TyVars and CoVars in IfaceType] put_ _ (IfaceHoleCo cv) = pprPanic "Can't serialise IfaceHoleCo" (ppr cv) -- See Note [Holes in IfaceCoercion] get bh = do tag <- getByte bh case tag of 1 -> do a <- get bh return $ IfaceReflCo a 2 -> do a <- get bh b <- get bh c <- get bh return $ IfaceGReflCo a b c 3 -> do a <- get bh w <- get bh b <- get bh c <- get bh return $ IfaceFunCo a w b c 4 -> do a <- get bh b <- get bh c <- get bh return $ IfaceTyConAppCo a b c 5 -> do a <- get bh b <- get bh return $ IfaceAppCo a b 6 -> do a <- get bh visL <- get bh visR <- get bh b <- get bh c <- get bh return $ IfaceForAllCo a visL visR b c 7 -> do a <- get bh return $ IfaceCoVarCo a 9 -> do a <- get bh b <- get bh c <- get bh d <- get bh deps <- get bh return $ IfaceUnivCo a b c d deps 10-> do a <- get bh return $ IfaceSymCo a 11-> do a <- get bh b <- get bh return $ IfaceTransCo a b 12-> do a <- get bh b <- get bh return $ IfaceSelCo a b 13-> do a <- get bh b <- get bh return $ IfaceLRCo a b 14-> do a <- get bh b <- get bh return $ IfaceInstCo a b 15-> do a <- get bh return $ IfaceKindCo a 16-> do a <- get bh return $ IfaceSubCo a 17-> do a <- get bh b <- get bh return $ IfaceAxiomCo a b _ -> panic ("get IfaceCoercion " ++ show tag) instance Binary IfaceAxiomRule where put_ bh (IfaceAR_X n) = putByte bh 0 >> put_ bh n put_ bh (IfaceAR_U n) = putByte bh 1 >> put_ bh n put_ bh (IfaceAR_B n i) = putByte bh 2 >> put_ bh n >> put_ bh i get bh = do h <- getByte bh case h of 0 -> do { n <- get bh; return (IfaceAR_X n) } 1 -> do { n <- get bh; return (IfaceAR_U n) } _ -> do { n <- get bh; i <- get bh; return (IfaceAR_B n i) } instance Binary (DefMethSpec IfaceType) where put_ bh VanillaDM = putByte bh 0 put_ bh (GenericDM t) = putByte bh 1 >> put_ bh t get bh = do h <- getByte bh case h of 0 -> return VanillaDM _ -> do { t <- get bh; return (GenericDM t) } instance NFData IfaceType where rnf = \case IfaceFreeTyVar f1 -> f1 `seq` () IfaceTyVar f1 -> rnf f1 IfaceLitTy f1 -> rnf f1 IfaceAppTy f1 f2 -> rnf f1 `seq` rnf f2 IfaceFunTy f1 f2 f3 f4 -> f1 `seq` rnf f2 `seq` rnf f3 `seq` rnf f4 IfaceForAllTy f1 f2 -> f1 `seq` rnf f2 IfaceTyConApp f1 f2 -> rnf f1 `seq` rnf f2 IfaceCastTy f1 f2 -> rnf f1 `seq` rnf f2 IfaceCoercionTy f1 -> rnf f1 IfaceTupleTy f1 f2 f3 -> f1 `seq` f2 `seq` rnf f3 instance NFData IfaceTyLit where rnf = \case IfaceNumTyLit f1 -> rnf f1 IfaceStrTyLit f1 -> rnf f1 IfaceCharTyLit f1 -> rnf f1 instance NFData IfaceCoercion where rnf = \case IfaceReflCo f1 -> rnf f1 IfaceGReflCo f1 f2 f3 -> f1 `seq` rnf f2 `seq` rnf f3 IfaceFunCo f1 f2 f3 f4 -> f1 `seq` rnf f2 `seq` rnf f3 `seq` rnf f4 IfaceTyConAppCo f1 f2 f3 -> f1 `seq` rnf f2 `seq` rnf f3 IfaceAppCo f1 f2 -> rnf f1 `seq` rnf f2 IfaceForAllCo f1 f2 f3 f4 f5 -> rnf f1 `seq` rnf f2 `seq` rnf f3 `seq` rnf f4 `seq` rnf f5 IfaceCoVarCo f1 -> rnf f1 IfaceAxiomCo f1 f2 -> rnf f1 `seq` rnf f2 IfaceUnivCo f1 f2 f3 f4 deps -> rnf f1 `seq` f2 `seq` rnf f3 `seq` rnf f4 `seq` rnf deps IfaceSymCo f1 -> rnf f1 IfaceTransCo f1 f2 -> rnf f1 `seq` rnf f2 IfaceSelCo f1 f2 -> rnf f1 `seq` rnf f2 IfaceLRCo f1 f2 -> f1 `seq` rnf f2 IfaceInstCo f1 f2 -> rnf f1 `seq` rnf f2 IfaceKindCo f1 -> rnf f1 IfaceSubCo f1 -> rnf f1 IfaceFreeCoVar f1 -> f1 `seq` () IfaceHoleCo f1 -> f1 `seq` () instance NFData IfaceAxiomRule where rnf = \case IfaceAR_X n -> rnf n IfaceAR_U n -> rnf n IfaceAR_B n i -> rnf n `seq` rnf i instance NFData IfaceMCoercion where rnf x = seq x () instance NFData IfaceOneShot where rnf x = seq x () instance NFData IfaceTyConSort where rnf = \case IfaceNormalTyCon -> () IfaceTupleTyCon arity sort -> rnf arity `seq` sort `seq` () IfaceSumTyCon arity -> rnf arity IfaceEqualityTyCon -> () instance NFData IfLclName where rnf (IfLclName lfs) = rnf lfs instance NFData IfaceTyConInfo where rnf (IfaceTyConInfo f s) = f `seq` rnf s instance NFData IfaceTyCon where rnf (IfaceTyCon nm info) = rnf nm `seq` rnf info instance NFData IfaceBndr where rnf = \case IfaceIdBndr id_bndr -> rnf id_bndr IfaceTvBndr tv_bndr -> rnf tv_bndr instance NFData IfaceAppArgs where rnf = \case IA_Nil -> () IA_Arg f1 f2 f3 -> rnf f1 `seq` f2 `seq` rnf f3 ghc-lib-parser-9.12.2.20250421/compiler/GHC/Iface/Type.hs-boot0000644000000000000000000000061307346545000021133 0ustar0000000000000000module GHC.Iface.Type ( IfaceType, IfaceTyCon, IfaceBndr , IfaceCoercion, IfaceTyLit, IfaceAppArgs , ShowSub ) where -- Empty import to influence the compilation ordering. -- See W1 of Note [Tracking dependencies on primitives] in GHC.Internal.Base import GHC.Base () data IfaceAppArgs data IfaceType data IfaceTyCon data IfaceTyLit data IfaceCoercion data IfaceBndr data ShowSub ghc-lib-parser-9.12.2.20250421/compiler/GHC/JS/0000755000000000000000000000000007346545000016222 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/JS/Ident.hs0000644000000000000000000000440207346545000017621 0ustar0000000000000000{-# LANGUAGE DerivingStrategies #-} {-# LANGUAGE GeneralizedNewtypeDeriving #-} ----------------------------------------------------------------------------- -- | -- Module : GHC.JS.Ident -- Copyright : (c) The University of Glasgow 2001 -- License : BSD-style (see the file LICENSE) -- -- Maintainer : Jeffrey Young -- Luite Stegeman -- Sylvain Henry -- Josh Meredith -- Stability : experimental -- -- -- * Domain and Purpose -- -- GHC.JS.Ident defines identifiers for the JS backend. We keep this module -- separate to prevent coupling between GHC and the backend and between -- unrelated modules is the JS backend. -- -- * Consumers -- -- The entire JavaScript Backend consumes this module including modules in -- GHC.JS.\* and modules in GHC.StgToJS.\* -- -- * Additional Notes -- -- This module should be kept as small as possible. Anything added to it -- will be coupled to the JS backend EDSL and the JS Backend including the -- linker and rts. You have been warned. -- ----------------------------------------------------------------------------- module GHC.JS.Ident ( Ident(..) , name ) where import GHC.Prelude import GHC.Data.FastString import GHC.Types.Unique import GHC.Utils.Outputable -------------------------------------------------------------------------------- -- Identifiers -------------------------------------------------------------------------------- -- | A newtype wrapper around 'FastString' for JS identifiers. newtype Ident = TxtI { identFS :: FastString } deriving stock (Show, Eq) deriving newtype (Uniquable, Outputable) -- | To give a thing a name is to have power over it. This smart constructor -- serves two purposes: first, it isolates the JS backend from the rest of GHC. -- The backend should not explicitly use types provided by GHC but instead -- should wrap them such as we do here. Second it creates a symbol in the JS -- backend, but it does not yet give that symbols meaning. Giving the symbol -- meaning only occurs once it is used with a combinator from @GHC.JS.Make@. name :: FastString -> Ident name = TxtI ghc-lib-parser-9.12.2.20250421/compiler/GHC/JS/JStg/0000755000000000000000000000000007346545000017071 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/JS/JStg/Monad.hs0000644000000000000000000000763207346545000020473 0ustar0000000000000000{-# LANGUAGE DerivingStrategies #-} {-# LANGUAGE OverloadedStrings #-} ----------------------------------------------------------------------------- -- | -- Module : GHC.JS.JStg.Monad -- Copyright : (c) The University of Glasgow 2001 -- License : BSD-style (see the file LICENSE) -- -- Maintainer : Jeffrey Young -- Luite Stegeman -- Sylvain Henry -- Josh Meredith -- Stability : experimental -- -- -- * Domain and Purpose -- -- GHC.JS.JStg.Monad defines the computational environment for the eDSL that -- we use to write the JS Backend's RTS. Its purpose is to ensure unique -- identifiers are generated throughout the backend and that we can use the -- host language to ensure references are not mixed. -- -- * Strategy -- -- The monad is a straightforward state monad which holds an environment -- holds a pointer to a prefix to tag identifiers with and an infinite -- stream of identifiers. -- -- * Usage -- -- One should almost never need to directly use the functions in this -- module. Instead one should opt to use the combinators in 'GHC.JS.Make', -- the sole exception to this is the @withTag@ function which is used to -- change the prefix of identifiers for a given computation. For example, -- the rts uses this function to tag all identifiers generated by the RTS -- code as RTS_N, where N is some unique. ----------------------------------------------------------------------------- module GHC.JS.JStg.Monad ( runJSM , JSM , withTag , newIdent , initJSM ) where import Prelude import GHC.JS.Ident import GHC.Types.Unique import GHC.Types.Unique.Supply import Control.Monad.Trans.State.Strict import GHC.Data.FastString -------------------------------------------------------------------------------- -- JSM Monad -------------------------------------------------------------------------------- -- | Environment for the JSM Monad. We maintain the prefix of each ident in the -- environment to allow consumers to tag idents with a new prefix. See @withTag@ data JEnv = JEnv { prefix :: !FastString -- ^ prefix for generated names, e.g., -- prefix = "RTS" will generate names -- such as 'h$RTS_jUt' , ids :: UniqSupply -- ^ The supply of uniques for names -- generated by the JSM monad. } type JSM a = State JEnv a runJSM :: JEnv -> JSM a -> a runJSM env m = evalState m env -- | create a new environment using the input tag. initJSMState :: FastString -> UniqSupply -> JEnv initJSMState tag supply = JEnv { prefix = tag , ids = supply } initJSM :: IO JEnv initJSM = do supply <- mkSplitUniqSupply 'j' return (initJSMState "js" supply) update_stream :: UniqSupply -> JSM () update_stream new = modify' $ \env -> env {ids = new} -- | generate a fresh Ident newIdent :: JSM Ident newIdent = do env <- get let tag = prefix env supply = ids env (id,rest) = takeUniqFromSupply supply update_stream rest return $ mk_ident tag id mk_ident :: FastString -> Unique -> Ident mk_ident t i = name (mconcat [t, "_", mkFastString (show i)]) -- | Set the tag for @Ident@s for all remaining computations. tag_names :: FastString -> JSM () tag_names tag = modify' (\env -> env {prefix = tag}) -- | tag the name generater with a prefix for the monadic action. withTag :: FastString -- ^ new name to tag with -> JSM a -- ^ action to run with new tags -> JSM a -- ^ result withTag tag go = do old <- gets prefix tag_names tag result <- go tag_names old return result ghc-lib-parser-9.12.2.20250421/compiler/GHC/JS/JStg/Syntax.hs0000644000000000000000000003033507346545000020717 0ustar0000000000000000{-# LANGUAGE TypeFamilies #-} {-# LANGUAGE RankNTypes #-} {-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE GADTs #-} {-# LANGUAGE DeriveGeneric #-} {-# LANGUAGE DerivingVia #-} {-# LANGUAGE BlockArguments #-} {-# LANGUAGE PatternSynonyms #-} ----------------------------------------------------------------------------- -- | -- Module : GHC.JS.JStg.Syntax -- Copyright : (c) The University of Glasgow 2001 -- License : BSD-style (see the file LICENSE) -- -- Maintainer : Jeffrey Young -- Luite Stegeman -- Sylvain Henry -- Josh Meredith -- Stability : experimental -- -- -- * Domain and Purpose -- -- GHC.JS.JStg.Syntax defines the eDSL that the JS backend's runtime system -- is written in. Nothing fancy, its just a straightforward deeply embedded -- DSL. -- -- In general, one should not use these constructors explicitly in the JS -- backend. Instead, prefer using the combinators in GHC.JS.Make, if those -- are suitable then prefer using the patterns exported from this module ----------------------------------------------------------------------------- module GHC.JS.JStg.Syntax ( -- * Deeply embedded JS datatypes JStgStat(..) , JStgExpr(..) , JVal(..) , Op(..) , AOp(..) , UOp(..) , JsLabel -- * pattern synonyms over JS operators , pattern New , pattern Not , pattern Negate , pattern Add , pattern Sub , pattern Mul , pattern Div , pattern Mod , pattern BOr , pattern BAnd , pattern BXor , pattern BNot , pattern LOr , pattern LAnd , pattern Int , pattern String , pattern Var , pattern PreInc , pattern PostInc , pattern PreDec , pattern PostDec -- * Utility , SaneDouble(..) , pattern Func , global , local ) where import GHC.Prelude import GHC.Utils.Outputable import GHC.JS.Ident import Control.DeepSeq import Data.Data import qualified Data.Semigroup as Semigroup import GHC.Generics import GHC.Data.FastString import GHC.Types.Unique.Map import GHC.Types.SaneDouble -------------------------------------------------------------------------------- -- Statements -------------------------------------------------------------------------------- -- | JavaScript statements, see the [ECMA262 -- Reference](https://tc39.es/ecma262/#sec-ecmascript-language-statements-and-declarations) -- for details data JStgStat = DeclStat !Ident !(Maybe JStgExpr) -- ^ Variable declarations: var foo [= e] | ReturnStat JStgExpr -- ^ Return | IfStat JStgExpr JStgStat JStgStat -- ^ If | WhileStat Bool JStgExpr JStgStat -- ^ While, bool is "do" when True | ForStat JStgStat JStgExpr JStgStat JStgStat -- ^ For | ForInStat Bool Ident JStgExpr JStgStat -- ^ For-in, bool is "each' when True | SwitchStat JStgExpr [(JStgExpr, JStgStat)] JStgStat -- ^ Switch | TryStat JStgStat Ident JStgStat JStgStat -- ^ Try | BlockStat [JStgStat] -- ^ Blocks | ApplStat JStgExpr [JStgExpr] -- ^ Application | UOpStat UOp JStgExpr -- ^ Unary operators | AssignStat JStgExpr AOp JStgExpr -- ^ Binding form: @foo = bar@ | LabelStat JsLabel JStgStat -- ^ Statement Labels, makes me nostalgic for qbasic | BreakStat (Maybe JsLabel) -- ^ Break | ContinueStat (Maybe JsLabel) -- ^ Continue | FuncStat !Ident [Ident] JStgStat -- ^ an explicit function definition deriving (Eq, Generic) -- | A Label used for 'JStgStat', specifically 'BreakStat', 'ContinueStat' and of -- course 'LabelStat' type JsLabel = LexicalFastString instance Semigroup JStgStat where (<>) = appendJStgStat instance Monoid JStgStat where mempty = BlockStat [] -- | Append a statement to another statement. 'appendJStgStat' only returns a -- 'JStgStat' that is /not/ a 'BlockStat' when either @mx@ or @my is an empty -- 'BlockStat'. That is: -- > (BlockStat [] , y ) = y -- > (x , BlockStat []) = x appendJStgStat :: JStgStat -> JStgStat -> JStgStat appendJStgStat mx my = case (mx,my) of (BlockStat [] , y ) -> y (x , BlockStat []) -> x (BlockStat xs , BlockStat ys) -> BlockStat $ xs ++ ys (BlockStat xs , ys ) -> BlockStat $ xs ++ [ys] (xs , BlockStat ys) -> BlockStat $ xs : ys (xs , ys ) -> BlockStat [xs,ys] -------------------------------------------------------------------------------- -- Expressions -------------------------------------------------------------------------------- -- | JavaScript Expressions data JStgExpr = ValExpr JVal -- ^ All values are trivially expressions | SelExpr JStgExpr Ident -- ^ Selection: Obj.foo, see 'GHC.JS.Make..^' | IdxExpr JStgExpr JStgExpr -- ^ Indexing: Obj[foo], see 'GHC.JS.Make..!' | InfixExpr Op JStgExpr JStgExpr -- ^ Infix Expressions, see 'JStgExpr' pattern synonyms | UOpExpr UOp JStgExpr -- ^ Unary Expressions | IfExpr JStgExpr JStgExpr JStgExpr -- ^ If-expression | ApplExpr JStgExpr [JStgExpr] -- ^ Application deriving (Eq, Generic) instance Outputable JStgExpr where ppr x = case x of ValExpr _ -> text ("ValExpr" :: String) SelExpr x' _ -> text ("SelExpr" :: String) <+> ppr x' IdxExpr x' y' -> text ("IdxExpr" :: String) <+> ppr (x', y') InfixExpr _ x' y' -> text ("InfixExpr" :: String) <+> ppr (x', y') UOpExpr _ x' -> text ("UOpExpr" :: String) <+> ppr x' IfExpr p t e -> text ("IfExpr" :: String) <+> ppr (p, t, e) ApplExpr x' xs -> text ("ApplExpr" :: String) <+> ppr (x', xs) -- * Useful pattern synonyms to ease programming with the deeply embedded JS -- AST. Each pattern wraps @UOp@ and @Op@ into a @JStgExpr@s to save typing and -- for convienience. In addition we include a string wrapper for JS string -- and Integer literals. -- | pattern synonym for a unary operator new pattern New :: JStgExpr -> JStgExpr pattern New x = UOpExpr NewOp x -- | pattern synonym for prefix increment @++x@ pattern PreInc :: JStgExpr -> JStgExpr pattern PreInc x = UOpExpr PreIncOp x -- | pattern synonym for postfix increment @x++@ pattern PostInc :: JStgExpr -> JStgExpr pattern PostInc x = UOpExpr PostIncOp x -- | pattern synonym for prefix decrement @--x@ pattern PreDec :: JStgExpr -> JStgExpr pattern PreDec x = UOpExpr PreDecOp x -- | pattern synonym for postfix decrement @--x@ pattern PostDec :: JStgExpr -> JStgExpr pattern PostDec x = UOpExpr PostDecOp x -- | pattern synonym for logical not @!@ pattern Not :: JStgExpr -> JStgExpr pattern Not x = UOpExpr NotOp x -- | pattern synonym for unary negation @-@ pattern Negate :: JStgExpr -> JStgExpr pattern Negate x = UOpExpr NegOp x -- | pattern synonym for addition @+@ pattern Add :: JStgExpr -> JStgExpr -> JStgExpr pattern Add x y = InfixExpr AddOp x y -- | pattern synonym for subtraction @-@ pattern Sub :: JStgExpr -> JStgExpr -> JStgExpr pattern Sub x y = InfixExpr SubOp x y -- | pattern synonym for multiplication @*@ pattern Mul :: JStgExpr -> JStgExpr -> JStgExpr pattern Mul x y = InfixExpr MulOp x y -- | pattern synonym for division @*@ pattern Div :: JStgExpr -> JStgExpr -> JStgExpr pattern Div x y = InfixExpr DivOp x y -- | pattern synonym for remainder @%@ pattern Mod :: JStgExpr -> JStgExpr -> JStgExpr pattern Mod x y = InfixExpr ModOp x y -- | pattern synonym for Bitwise Or @|@ pattern BOr :: JStgExpr -> JStgExpr -> JStgExpr pattern BOr x y = InfixExpr BOrOp x y -- | pattern synonym for Bitwise And @&@ pattern BAnd :: JStgExpr -> JStgExpr -> JStgExpr pattern BAnd x y = InfixExpr BAndOp x y -- | pattern synonym for Bitwise XOr @^@ pattern BXor :: JStgExpr -> JStgExpr -> JStgExpr pattern BXor x y = InfixExpr BXorOp x y -- | pattern synonym for Bitwise Not @~@ pattern BNot :: JStgExpr -> JStgExpr pattern BNot x = UOpExpr BNotOp x -- | pattern synonym for logical Or @||@ pattern LOr :: JStgExpr -> JStgExpr -> JStgExpr pattern LOr x y = InfixExpr LOrOp x y -- | pattern synonym for logical And @&&@ pattern LAnd :: JStgExpr -> JStgExpr -> JStgExpr pattern LAnd x y = InfixExpr LAndOp x y -- | pattern synonym to create integer values pattern Int :: Integer -> JStgExpr pattern Int x = ValExpr (JInt x) -- | pattern synonym to create string values pattern String :: FastString -> JStgExpr pattern String x = ValExpr (JStr x) -- | pattern synonym to create a local variable reference pattern Var :: Ident -> JStgExpr pattern Var x = ValExpr (JVar x) -- | pattern synonym to create an anonymous function pattern Func :: [Ident] -> JStgStat -> JStgExpr pattern Func args body = ValExpr (JFunc args body) -------------------------------------------------------------------------------- -- Values -------------------------------------------------------------------------------- -- | JavaScript values data JVal = JVar Ident -- ^ A variable reference | JList [JStgExpr] -- ^ A JavaScript list, or what JS -- calls an Array | JDouble SaneDouble -- ^ A Double | JInt Integer -- ^ A BigInt | JStr FastString -- ^ A String | JRegEx FastString -- ^ A Regex | JBool Bool -- ^ A Boolean | JHash (UniqMap FastString JStgExpr) -- ^ A JS HashMap: @{"foo": 0}@ | JFunc [Ident] JStgStat -- ^ A function deriving (Eq, Generic) -------------------------------------------------------------------------------- -- Operators -------------------------------------------------------------------------------- -- | JS Binary Operators. We do not deeply embed the comma operator and the -- assignment operators data Op = EqOp -- ^ Equality: `==` | StrictEqOp -- ^ Strict Equality: `===` | NeqOp -- ^ InEquality: `!=` | StrictNeqOp -- ^ Strict InEquality `!==` | GtOp -- ^ Greater Than: `>` | GeOp -- ^ Greater Than or Equal: `>=` | LtOp -- ^ Less Than: < | LeOp -- ^ Less Than or Equal: <= | AddOp -- ^ Addition: + | SubOp -- ^ Subtraction: - | MulOp -- ^ Multiplication \* | DivOp -- ^ Division: \/ | ModOp -- ^ Remainder: % | LeftShiftOp -- ^ Left Shift: \<\< | RightShiftOp -- ^ Right Shift: \>\> | ZRightShiftOp -- ^ Unsigned RightShift: \>\>\> | BAndOp -- ^ Bitwise And: & | BOrOp -- ^ Bitwise Or: | | BXorOp -- ^ Bitwise XOr: ^ | LAndOp -- ^ Logical And: && | LOrOp -- ^ Logical Or: || | InstanceofOp -- ^ @instanceof@ | InOp -- ^ @in@ deriving (Show, Eq, Ord, Enum, Data, Generic) instance NFData Op -- | JS Unary Operators data UOp = NotOp -- ^ Logical Not: @!@ | BNotOp -- ^ Bitwise Not: @~@ | NegOp -- ^ Negation: @-@ | PlusOp -- ^ Unary Plus: @+x@ | NewOp -- ^ new x | TypeofOp -- ^ typeof x | DeleteOp -- ^ delete x | YieldOp -- ^ yield x | VoidOp -- ^ void x | PreIncOp -- ^ Prefix Increment: @++x@ | PostIncOp -- ^ Postfix Increment: @x++@ | PreDecOp -- ^ Prefix Decrement: @--x@ | PostDecOp -- ^ Postfix Decrement: @x--@ deriving (Show, Eq, Ord, Enum, Data, Generic) instance NFData UOp -- | JS Unary Operators data AOp = AssignOp -- ^ Vanilla Assignment: = | AddAssignOp -- ^ Addition Assignment: += | SubAssignOp -- ^ Subtraction Assignment: -= deriving (Show, Eq, Ord, Enum, Data, Generic) instance NFData AOp -- | construct a JS reference, intended to refer to a global name global :: FastString -> JStgExpr global = Var . name -- | construct a JS reference, intended to refer to a local name local :: FastString -> JStgExpr local = Var . name ghc-lib-parser-9.12.2.20250421/compiler/GHC/JS/Make.hs0000644000000000000000000007055207346545000017444 0ustar0000000000000000{-# LANGUAGE CPP #-} {-# LANGUAGE DataKinds #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE GADTs #-} {-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE PatternSynonyms #-} {-# LANGUAGE RankNTypes #-} {-# LANGUAGE ScopedTypeVariables #-} {-# OPTIONS_GHC -fno-warn-orphans #-} -- only for Num, Fractional on JStgExpr ----------------------------------------------------------------------------- -- | -- Module : GHC.JS.Make -- Copyright : (c) The University of Glasgow 2001 -- License : BSD-style (see the file LICENSE) -- -- Maintainer : Jeffrey Young -- Luite Stegeman -- Sylvain Henry -- Josh Meredith -- Stability : experimental -- -- -- * Domain and Purpose -- -- GHC.JS.Make defines helper functions to ease the creation of JavaScript -- ASTs as defined in 'GHC.JS.Syntax'. Its purpose is twofold: make the EDSL -- more ergonomic to program in, and make errors in the EDSL /look/ obvious -- because the EDSL is untyped. It is primarily concerned with injecting -- terms into the domain of the EDSL to construct JS programs in Haskell. -- -- * Strategy -- -- The strategy for this module comes straight from gentzen; where we have -- two types of helper functions. Functions which inject terms into the -- EDSL, and combinator functions which operate on terms in the EDSL to -- construct new terms in the EDSL. Crucially, missing from this module are -- corresponding /elimination/ or /destructing/ functions which would -- project information from the EDSL back to Haskell. See -- 'GHC.StgToJS.Utils' for such functions. -- -- * /Introduction/ functions -- -- We define various primitive helpers which /introduce/ terms in the -- EDSL, for example 'jVar', 'jLam', and 'var' and 'jString'. -- Similarly this module exports four typeclasses 'ToExpr', 'ToStat', -- 'JVarMagic', 'JSArgument'. 'ToExpr' injects values as a JS -- expression into the EDSL. 'ToStat' injects values as JS statements -- into the EDSL. @JVarMagic@ provides a polymorphic way to introduce -- a new name into the EDSL and @JSArgument@ provides a polymorphic -- way to bind variable names for use in JS functions with different -- arities. -- -- * /Combinator/ functions -- -- The rest of the module defines combinators which create terms in -- the EDSL from terms in the EDSL. Notable examples are '|=' and -- '||=', '|=' is sugar for 'AssignStat', it is a binding form that -- declares @foo = bar@ /assuming/ foo has been already declared. -- '||=' is more sugar on top of '|=', it is also a binding form that -- declares the LHS of '|=' before calling '|=' to bind a value, bar, -- to a variable foo. Other common examples are the 'if_' and 'math_' -- helpers such as 'math_cos'. -- -- * Consumers -- -- The entire JS backend consumes this module, e.g., the modules in -- GHC.StgToJS.\*. -- -- * Notation -- -- In this module we use @==>@ in docstrings to show the translation from -- the JS EDSL domain to JS code. For example, @foo ||= bar ==> var foo; foo -- = bar;@ should be read as @foo ||= bar@ is in the EDSL domain and results -- in the JS code @var foo; foo = bar;@ when compiled. -- -- In most cases functions prefixed with a 'j' are monadic because the -- observably allocate. Notable exceptions are `jwhenS`, 'jString' and the -- helpers for HashMaps. ----------------------------------------------------------------------------- module GHC.JS.Make ( -- * Injection Type classes -- $classes ToJExpr(..) , ToStat(..) , JVarMagic(..) , JSArgument(..) -- * Introduction functions -- $intro_funcs , jString , jLam, jLam', jFunction, jFunctionSized, jFunction' , jVar, jVars, jFor, jForIn, jForEachIn, jTryCatchFinally -- * Combinators -- $combinators , (||=), (|=), (.==.), (.===.), (.!=.), (.!==.), (.!) , (.>.), (.>=.), (.<.), (.<=.) , (.<<.), (.>>.), (.>>>.) , (.|.), (.||.), (.&&.) , if_, if10, if01, ifS, ifBlockS, jBlock, jIf , jwhenS , app, appS, returnS , loop, loopBlockS , preIncrS, postIncrS , preDecrS, postDecrS , off8, off16, off32, off64 , mask8, mask16 , signExtend8, signExtend16 , typeOf , returnStack, assignAllEqual, assignAll, assignAllReverseOrder , declAssignAll , nullStat, (.^) -- ** Hash combinators , jhEmpty , jhSingle , jhAdd , jhFromList -- * Literals -- $literals , null_ , undefined_ , false_ , true_ , zero_ , one_ , two_ , three_ -- ** Math functions -- $math , math_log, math_sin, math_cos, math_tan, math_exp, math_acos, math_asin, math_atan, math_abs, math_pow, math_sqrt, math_asinh, math_acosh, math_atanh, math_cosh, math_sinh, math_tanh, math_expm1, math_log1p, math_fround, math_min, math_max -- * Statement helpers , Solo(..) , decl #if __GLASGOW_HASKELL__ < 905 , pattern MkSolo #endif ) where import GHC.Prelude hiding ((.|.)) import GHC.JS.Ident import GHC.JS.JStg.Syntax import GHC.JS.JStg.Monad import GHC.JS.Transform import Control.Arrow ((***)) import Control.Monad (replicateM) import Data.Tuple import qualified Data.Map as M import GHC.Data.FastString import GHC.Utils.Misc import GHC.Types.Unique.Map -------------------------------------------------------------------------------- -- Type Classes -------------------------------------------------------------------------------- -- $classes -- The 'ToJExpr' class handles injection of of things into the EDSL as a JS -- expression -- | Things that can be marshalled into javascript values. -- Instantiate for any necessary data structures. class ToJExpr a where toJExpr :: a -> JStgExpr toJExprFromList :: [a] -> JStgExpr toJExprFromList = ValExpr . JList . map toJExpr instance ToJExpr a => ToJExpr [a] where toJExpr = toJExprFromList instance ToJExpr JStgExpr where toJExpr = id instance ToJExpr () where toJExpr _ = ValExpr $ JList [] instance ToJExpr Bool where toJExpr True = global "true" toJExpr False = global "false" instance ToJExpr JVal where toJExpr = ValExpr instance ToJExpr a => ToJExpr (UniqMap FastString a) where toJExpr = ValExpr . JHash . mapUniqMap toJExpr instance ToJExpr a => ToJExpr (M.Map String a) where toJExpr = ValExpr . JHash . listToUniqMap . map (mkFastString *** toJExpr) . M.toList instance ToJExpr Double where toJExpr = ValExpr . JDouble . SaneDouble instance ToJExpr Int where toJExpr = ValExpr . JInt . fromIntegral instance ToJExpr Integer where toJExpr = ValExpr . JInt instance ToJExpr Char where toJExpr = ValExpr . JStr . mkFastString . (:[]) toJExprFromList = ValExpr . JStr . mkFastString -- where escQuotes = tailDef "" . initDef "" . show instance ToJExpr Ident where toJExpr = ValExpr . JVar instance ToJExpr FastString where toJExpr = ValExpr . JStr instance (ToJExpr a, ToJExpr b) => ToJExpr (a,b) where toJExpr (a,b) = ValExpr . JList $ [toJExpr a, toJExpr b] instance (ToJExpr a, ToJExpr b, ToJExpr c) => ToJExpr (a,b,c) where toJExpr (a,b,c) = ValExpr . JList $ [toJExpr a, toJExpr b, toJExpr c] instance (ToJExpr a, ToJExpr b, ToJExpr c, ToJExpr d) => ToJExpr (a,b,c,d) where toJExpr (a,b,c,d) = ValExpr . JList $ [toJExpr a, toJExpr b, toJExpr c, toJExpr d] instance (ToJExpr a, ToJExpr b, ToJExpr c, ToJExpr d, ToJExpr e) => ToJExpr (a,b,c,d,e) where toJExpr (a,b,c,d,e) = ValExpr . JList $ [toJExpr a, toJExpr b, toJExpr c, toJExpr d, toJExpr e] instance (ToJExpr a, ToJExpr b, ToJExpr c, ToJExpr d, ToJExpr e, ToJExpr f) => ToJExpr (a,b,c,d,e,f) where toJExpr (a,b,c,d,e,f) = ValExpr . JList $ [toJExpr a, toJExpr b, toJExpr c, toJExpr d, toJExpr e, toJExpr f] -- | The 'ToStat' class handles injection of of things into the EDSL as a JS -- statement. This ends up being polymorphic sugar for JS blocks, see helper -- function 'GHC.JS.Make.expr2stat'. Instantiate for any necessary data -- structures. class ToStat a where toStat :: a -> JStgStat instance ToStat JStgStat where toStat = id instance ToStat [JStgStat] where toStat = BlockStat instance ToStat JStgExpr where toStat = expr2stat instance ToStat [JStgExpr] where toStat = BlockStat . map expr2stat -- | Convert A JS expression to a JS statement where applicable. This only -- affects applications; 'ApplExpr', If-expressions; 'IfExpr', and Unary -- expression; 'UOpExpr'. expr2stat :: JStgExpr -> JStgStat expr2stat (ApplExpr x y) = (ApplStat x y) expr2stat (IfExpr x y z) = IfStat x (expr2stat y) (expr2stat z) expr2stat (UOpExpr o x) = UOpStat o x expr2stat _ = nullStat -------------------------------------------------------------------------------- -- Introduction Functions -------------------------------------------------------------------------------- -- $intro_functions -- Introduction functions are functions that map values or terms in the Haskell -- domain to the JS EDSL domain -- | Create a new anonymous function. The result is a 'GHC.JS.Syntax.JExpr' -- expression. -- Usage: -- -- > jLam $ \x -> jVar x + one_ -- > jLam $ \f -> (jLam $ \x -> (f `app` (x `app` x))) `app` (jLam $ \x -> (f `app` (x `app` x))) jLam :: JSArgument args => (args -> JSM JStgStat) -> JSM JStgExpr jLam body = do xs <- args ValExpr . JFunc (argList xs) <$> body xs -- | Special case of @jLam@ where the anonymous function requires no fresh -- arguments. jLam' :: JStgStat -> JStgExpr jLam' body = ValExpr $ JFunc mempty body -- | Introduce only one new variable into scope for the duration of the -- enclosed expression. The result is a block statement. Usage: -- -- 'jVar $ \x -> mconcat [jVar x ||= one_, ...' jVar :: (JVarMagic t, ToJExpr t) => (t -> JSM JStgStat) -> JSM JStgStat jVar f = jVars $ \(MkSolo only_one) -> f only_one -- | Introduce one or many new variables into scope for the duration of the -- enclosed expression. This function reifies the number of arguments based on -- the container of the input function. We intentionally avoid lists and instead -- opt for tuples because lists are not sized in general. The result is a block -- statement. Usage: -- -- @jVars $ \(x,y) -> mconcat [ x |= one_, y |= two_, x + y]@ jVars :: (JSArgument args) => (args -> JSM JStgStat) -> JSM JStgStat jVars f = do as <- args body <- f as return $ mconcat $ fmap decl (argList as) ++ [body] -- | Construct a top-level function subject to JS hoisting. This combinator is -- polymorphic over function arity so you can you use to define a JS syntax -- object in Haskell, which is a function in JS that takes 2 or 4 or whatever -- arguments. For a singleton function use the @Solo@ constructor @MkSolo@. -- Usage: -- -- an example from the Rts that defines a 1-arity JS function -- > jFunction (global "h$getReg") (\(MkSolo n) -> return $ SwitchStat n getRegCases mempty) -- -- an example of a two argument function from the Rts -- > jFunction (global "h$bh_lne") (\(x, frameSize) -> bhLneStats s x frameSize) jFunction :: (JSArgument args) => Ident -- ^ global name -> (args -> JSM JStgStat) -- ^ function body, input is locally unique generated variables -> JSM JStgStat jFunction name body = do func_args <- args FuncStat name (argList func_args) <$> (body func_args) -- | Construct a top-level function subject to JS hoisting. Special case where -- the arity cannot be deduced from the 'args' parameter (atleast not without -- dependent types). jFunctionSized :: Ident -- ^ global name -> Int -- ^ Arity -> ([JStgExpr] -> JSM JStgStat) -- ^ function body, input is locally unique generated variables -> JSM JStgStat jFunctionSized name arity body = do func_args <- replicateM arity newIdent FuncStat name func_args <$> (body $ toJExpr <$> func_args) -- | Construct a top-level function subject to JS hoisting. Special case where -- the function binds no parameters jFunction' :: Ident -- ^ global name -> JSM JStgStat -- ^ function body, input is locally unique generated variables -> JSM JStgStat jFunction' name body = FuncStat name mempty <$> body jBlock :: Monoid a => [JSM a] -> JSM a jBlock = fmap mconcat . sequence -- | Create a 'for in' statement. -- Usage: -- -- @jForIn {expression} $ \x -> {block involving x}@ jForIn :: JStgExpr -> (JStgExpr -> JStgStat) -> JSM JStgStat jForIn e f = do i <- newIdent return $ decl i `mappend` ForInStat False i e (f (ValExpr $! JVar i)) -- | As with "jForIn" but creating a \"for each in\" statement. jForEachIn :: JStgExpr -> (JStgExpr -> JStgStat) -> JSM JStgStat jForEachIn e f = do i <- newIdent return $ decl i `mappend` ForInStat True i e (f (ValExpr $! JVar i)) -- | Create a 'for' statement given a function for initialization, a predicate -- to step to, a step and a body -- Usage: -- -- @ jFor (|= zero_) (.<. Int 65536) preIncrS -- (\j -> ...something with the counter j...)@ -- jFor :: (JStgExpr -> JStgStat) -- ^ initialization function -> (JStgExpr -> JStgExpr) -- ^ predicate -> (JStgExpr -> JStgStat) -- ^ step function -> (JStgExpr -> JStgStat) -- ^ body -> JSM JStgStat jFor init pred step body = do id <- newIdent let i = ValExpr (JVar id) return $ decl id `mappend` ForStat (init i) (pred i) (step i) (body i) -- | As with "jForIn" but creating a \"for each in\" statement. jTryCatchFinally :: (Ident -> JStgStat) -> (Ident -> JStgStat) -> (Ident -> JStgStat) -> JSM JStgStat jTryCatchFinally c f f2 = do i <- newIdent return $ TryStat (c i) i (f i) (f2 i) -- | Convert a FastString to a Javascript String jString :: FastString -> JStgExpr jString = toJExpr -- | construct a js declaration with the given identifier decl :: Ident -> JStgStat decl i = DeclStat i Nothing -- | The empty JS HashMap jhEmpty :: M.Map k JStgExpr jhEmpty = M.empty -- | A singleton JS HashMap jhSingle :: (Ord k, ToJExpr a) => k -> a -> M.Map k JStgExpr jhSingle k v = jhAdd k v jhEmpty -- | insert a key-value pair into a JS HashMap jhAdd :: (Ord k, ToJExpr a) => k -> a -> M.Map k JStgExpr -> M.Map k JStgExpr jhAdd k v m = M.insert k (toJExpr v) m -- | Construct a JS HashMap from a list of key-value pairs jhFromList :: [(FastString, JStgExpr)] -> JVal jhFromList = JHash . listToUniqMap -- | The empty JS statement nullStat :: JStgStat nullStat = BlockStat [] -------------------------------------------------------------------------------- -- Combinators -------------------------------------------------------------------------------- -- $combinators -- Combinators operate on terms in the JS EDSL domain to create new terms in the -- EDSL domain. -- | JS infix Equality operators (.==.), (.===.), (.!=.), (.!==.) :: JStgExpr -> JStgExpr -> JStgExpr (.==.) = InfixExpr EqOp (.===.) = InfixExpr StrictEqOp (.!=.) = InfixExpr NeqOp (.!==.) = InfixExpr StrictNeqOp infixl 6 .==., .===., .!=., .!==. -- | JS infix Ord operators (.>.), (.>=.), (.<.), (.<=.) :: JStgExpr -> JStgExpr -> JStgExpr (.>.) = InfixExpr GtOp (.>=.) = InfixExpr GeOp (.<.) = InfixExpr LtOp (.<=.) = InfixExpr LeOp infixl 7 .>., .>=., .<., .<=. -- | JS infix bit operators (.|.), (.||.), (.&&.) :: JStgExpr -> JStgExpr -> JStgExpr (.|.) = InfixExpr BOrOp (.||.) = InfixExpr LOrOp (.&&.) = InfixExpr LAndOp infixl 8 .||., .&&. -- | JS infix bit shift operators (.<<.), (.>>.), (.>>>.) :: JStgExpr -> JStgExpr -> JStgExpr (.<<.) = InfixExpr LeftShiftOp (.>>.) = InfixExpr RightShiftOp (.>>>.) = InfixExpr ZRightShiftOp infixl 9 .<<., .>>., .>>>. -- | Given a 'JStgExpr', return the its type. typeOf :: JStgExpr -> JStgExpr typeOf = UOpExpr TypeofOp -- | JS if-expression -- -- > if_ e1 e2 e3 ==> e1 ? e2 : e3 if_ :: JStgExpr -> JStgExpr -> JStgExpr -> JStgExpr if_ e1 e2 e3 = IfExpr e1 e2 e3 -- | If-expression which returns statements, see related 'ifBlockS' -- -- > if e s1 s2 ==> if(e) { s1 } else { s2 } ifS :: JStgExpr -> JStgStat -> JStgStat -> JStgStat ifS e s1 s2 = IfStat e s1 s2 -- | Version of a JS if-expression which admits monadic actions in its branches jIf :: JStgExpr -> JSM JStgStat -> JSM JStgStat -> JSM JStgStat jIf e ma mb = do !a <- ma !b <- mb pure $ IfStat e a b -- | A when-statement as syntactic sugar via `ifS` -- -- > jwhenS cond block ==> if(cond) { block } else { } jwhenS :: JStgExpr -> JStgStat -> JStgStat jwhenS cond block = IfStat cond block mempty -- | If-expression which returns blocks -- -- > ifBlockS e s1 s2 ==> if(e) { s1 } else { s2 } ifBlockS :: JStgExpr -> [JStgStat] -> [JStgStat] -> JStgStat ifBlockS e s1 s2 = IfStat e (mconcat s1) (mconcat s2) -- | if-expression that returns 1 if condition <=> true, 0 otherwise -- -- > if10 e ==> e ? 1 : 0 if10 :: JStgExpr -> JStgExpr if10 e = IfExpr e one_ zero_ -- | if-expression that returns 0 if condition <=> true, 1 otherwise -- -- > if01 e ==> e ? 0 : 1 if01 :: JStgExpr -> JStgExpr if01 e = IfExpr e zero_ one_ -- | an application expression, see related 'appS' -- -- > app f xs ==> f(xs) app :: FastString -> [JStgExpr] -> JStgExpr app f xs = ApplExpr (global f) xs -- | A statement application, see the expression form 'app' appS :: FastString -> [JStgExpr] -> JStgStat appS f xs = ApplStat (global f) xs -- | Return a 'JStgExpr' returnS :: JStgExpr -> JStgStat returnS e = ReturnStat e -- | "for" loop with increment at end of body loop :: JStgExpr -> (JStgExpr -> JStgExpr) -> (JStgExpr -> JSM JStgStat) -> JSM JStgStat loop initial test body_ = jVar $ \i -> do body <- body_ i return $ mconcat [ i |= initial , WhileStat False (test i) body ] -- | "for" loop with increment at end of body loopBlockS :: JStgExpr -> (JStgExpr -> JStgExpr) -> (JStgExpr -> [JStgStat]) -> JSM JStgStat loopBlockS initial test body = jVar $ \i -> return $ mconcat [ i |= initial , WhileStat False (test i) (mconcat (body i)) ] -- | Prefix-increment a 'JStgExpr' preIncrS :: JStgExpr -> JStgStat preIncrS x = UOpStat PreIncOp x -- | Postfix-increment a 'JStgExpr' postIncrS :: JStgExpr -> JStgStat postIncrS x = UOpStat PostIncOp x -- | Prefix-decrement a 'JStgExpr' preDecrS :: JStgExpr -> JStgStat preDecrS x = UOpStat PreDecOp x -- | Postfix-decrement a 'JStgExpr' postDecrS :: JStgExpr -> JStgStat postDecrS x = UOpStat PostDecOp x -- | Byte indexing of o with a 64-bit offset off64 :: JStgExpr -> JStgExpr -> JStgExpr off64 o i = Add o (i .<<. three_) -- | Byte indexing of o with a 32-bit offset off32 :: JStgExpr -> JStgExpr -> JStgExpr off32 o i = Add o (i .<<. two_) -- | Byte indexing of o with a 16-bit offset off16 :: JStgExpr -> JStgExpr -> JStgExpr off16 o i = Add o (i .<<. one_) -- | Byte indexing of o with a 8-bit offset off8 :: JStgExpr -> JStgExpr -> JStgExpr off8 o i = Add o i -- | a bit mask to retrieve the lower 8-bits mask8 :: JStgExpr -> JStgExpr mask8 x = BAnd x (Int 0xFF) -- | a bit mask to retrieve the lower 16-bits mask16 :: JStgExpr -> JStgExpr mask16 x = BAnd x (Int 0xFFFF) -- | Sign-extend/narrow a 8-bit value signExtend8 :: JStgExpr -> JStgExpr signExtend8 x = (BAnd x (Int 0x7F )) `Sub` (BAnd x (Int 0x80)) -- | Sign-extend/narrow a 16-bit value signExtend16 :: JStgExpr -> JStgExpr signExtend16 x = (BAnd x (Int 0x7FFF)) `Sub` (BAnd x (Int 0x8000)) -- | Select a property 'prop', from and object 'obj' -- -- > obj .^ prop ==> obj.prop (.^) :: JStgExpr -> FastString -> JStgExpr obj .^ prop = SelExpr obj (name prop) infixl 8 .^ -- | Assign a variable to an expression -- -- > foo |= expr ==> var foo = expr; (|=) :: JStgExpr -> JStgExpr -> JStgStat (|=) l r = AssignStat l AssignOp r -- | Declare a variable and then Assign the variable to an expression -- -- > foo |= expr ==> var foo; foo = expr; (||=) :: Ident -> JStgExpr -> JStgStat i ||= ex = DeclStat i (Just ex) infixl 2 ||=, |= -- | return the expression at idx of obj -- -- > obj .! idx ==> obj[idx] (.!) :: JStgExpr -> JStgExpr -> JStgExpr (.!) = IdxExpr infixl 8 .! assignAllEqual :: HasDebugCallStack => [JStgExpr] -> [JStgExpr] -> JStgStat assignAllEqual xs ys = mconcat (zipWithEqual "assignAllEqual" (|=) xs ys) assignAll :: [JStgExpr] -> [JStgExpr] -> JStgStat assignAll xs ys = mconcat (zipWith (|=) xs ys) assignAllReverseOrder :: [JStgExpr] -> [JStgExpr] -> JStgStat assignAllReverseOrder xs ys = mconcat (reverse (zipWith (|=) xs ys)) declAssignAll :: [Ident] -> [JStgExpr] -> JStgStat declAssignAll xs ys = mconcat (zipWith (||=) xs ys) -------------------------------------------------------------------------------- -- Literals -------------------------------------------------------------------------------- -- $literals -- Literals in the JS EDSL are constants in the Haskell domain. These are useful -- helper values and never change -- | The JS literal 'null' null_ :: JStgExpr null_ = global "null" -- | The JS literal 0 zero_ :: JStgExpr zero_ = Int 0 -- | The JS literal 1 one_ :: JStgExpr one_ = Int 1 -- | The JS literal 2 two_ :: JStgExpr two_ = Int 2 -- | The JS literal 3 three_ :: JStgExpr three_ = Int 3 -- | The JS literal 'undefined' undefined_ :: JStgExpr undefined_ = global "undefined" -- | The JS literal 'true' true_ :: JStgExpr true_ = ValExpr (JBool True) -- | The JS literal 'false' false_ :: JStgExpr false_ = ValExpr (JBool False) returnStack :: JStgStat returnStack = ReturnStat (ApplExpr (global "h$rs") []) -------------------------------------------------------------------------------- -- Math functions -------------------------------------------------------------------------------- -- $math -- Math functions in the EDSL are literals, with the exception of 'math_' which -- is the sole math introduction function. math :: JStgExpr math = global "Math" math_ :: FastString -> [JStgExpr] -> JStgExpr math_ op args = ApplExpr (math .^ op) args math_log, math_sin, math_cos, math_tan, math_exp, math_acos, math_asin, math_atan, math_abs, math_pow, math_sqrt, math_asinh, math_acosh, math_atanh, math_sign, math_sinh, math_cosh, math_tanh, math_expm1, math_log1p, math_fround, math_min, math_max :: [JStgExpr] -> JStgExpr math_log = math_ "log" math_sin = math_ "sin" math_cos = math_ "cos" math_tan = math_ "tan" math_exp = math_ "exp" math_acos = math_ "acos" math_asin = math_ "asin" math_atan = math_ "atan" math_abs = math_ "abs" math_pow = math_ "pow" math_sign = math_ "sign" math_sqrt = math_ "sqrt" math_asinh = math_ "asinh" math_acosh = math_ "acosh" math_atanh = math_ "atanh" math_sinh = math_ "sinh" math_cosh = math_ "cosh" math_tanh = math_ "tanh" math_expm1 = math_ "expm1" math_log1p = math_ "log1p" math_fround = math_ "fround" math_min = math_ "min" math_max = math_ "max" instance Num JStgExpr where x + y = InfixExpr AddOp x y x - y = InfixExpr SubOp x y x * y = InfixExpr MulOp x y abs x = math_abs [x] negate x = UOpExpr NegOp x signum x = math_sign [x] fromInteger x = ValExpr (JInt x) instance Fractional JStgExpr where x / y = InfixExpr DivOp x y fromRational x = ValExpr (JDouble (realToFrac x)) -- The Solo constructor was renamed to MkSolo in ghc 9.5 #if __GLASGOW_HASKELL__ < 905 pattern MkSolo :: a -> Solo a pattern MkSolo a = Solo a {-# COMPLETE MkSolo #-} #endif -------------------------------------------------------------------------------- -- New Identifiers -------------------------------------------------------------------------------- -- | Type class that generates fresh @a@'s for the JS backend. You should almost -- never need to use this directly. Instead use @JSArgument@, for examples of -- how to employ these classes please see @jVar@, @jFunction@ and call sites in -- the Rts. class JVarMagic a where fresh :: JSM a -- | Type class that finds the form of arguments required for a JS syntax -- object. This class gives us a single interface to generate variables for -- functions that have different arities. Thus with it, we can have only one -- @jFunction@ which is polymorphic over its arity, instead of 'jFunction2', -- 'jFunction3' and so on. class JSArgument args where argList :: args -> [Ident] args :: JSM args instance JVarMagic Ident where fresh = newIdent instance JVarMagic JVal where fresh = JVar <$> fresh instance JVarMagic JStgExpr where fresh = do i <- fresh return $ ValExpr $ JVar i instance (JVarMagic a, ToJExpr a) => JSArgument (Solo a) where argList (MkSolo a) = concatMap identsE [toJExpr a] args = do i <- fresh return $ MkSolo i instance (JVarMagic a, JVarMagic b, ToJExpr a, ToJExpr b) => JSArgument (a,b) where argList (a,b) = concatMap identsE [toJExpr a , toJExpr b] args = (,) <$> fresh <*> fresh instance ( JVarMagic a, ToJExpr a , JVarMagic b, ToJExpr b , JVarMagic c, ToJExpr c ) => JSArgument (a,b,c) where argList (a,b,c) = concatMap identsE [toJExpr a , toJExpr b, toJExpr c] args = (,,) <$> fresh <*> fresh <*> fresh instance ( JVarMagic a, ToJExpr a , JVarMagic b, ToJExpr b , JVarMagic c, ToJExpr c , JVarMagic d, ToJExpr d ) => JSArgument (a,b,c,d) where argList (a,b,c,d) = concatMap identsE [toJExpr a , toJExpr b, toJExpr c, toJExpr d] args = (,,,) <$> fresh <*> fresh <*> fresh <*> fresh instance ( JVarMagic a, ToJExpr a , JVarMagic b, ToJExpr b , JVarMagic c, ToJExpr c , JVarMagic d, ToJExpr d , JVarMagic e, ToJExpr e ) => JSArgument (a,b,c,d,e) where argList (a,b,c,d,e) = concatMap identsE [toJExpr a , toJExpr b, toJExpr c, toJExpr d, toJExpr e] args = (,,,,) <$> fresh <*> fresh <*> fresh <*> fresh <*> fresh instance ( JVarMagic a, ToJExpr a , JVarMagic b, ToJExpr b , JVarMagic c, ToJExpr c , JVarMagic d, ToJExpr d , JVarMagic e, ToJExpr e , JVarMagic f, ToJExpr f ) => JSArgument (a,b,c,d,e,f) where argList (a,b,c,d,e,f) = concatMap identsE [toJExpr a , toJExpr b, toJExpr c, toJExpr d, toJExpr e, toJExpr f] args = (,,,,,) <$> fresh <*> fresh <*> fresh <*> fresh <*> fresh <*> fresh instance ( JVarMagic a, ToJExpr a , JVarMagic b, ToJExpr b , JVarMagic c, ToJExpr c , JVarMagic d, ToJExpr d , JVarMagic e, ToJExpr e , JVarMagic f, ToJExpr f , JVarMagic g, ToJExpr g ) => JSArgument (a,b,c,d,e,f,g) where argList (a,b,c,d,e,f,g) = concatMap identsE [toJExpr a , toJExpr b, toJExpr c, toJExpr d, toJExpr e, toJExpr f, toJExpr g] args = (,,,,,,) <$> fresh <*> fresh <*> fresh <*> fresh <*> fresh <*> fresh <*> fresh instance ( JVarMagic a, ToJExpr a , JVarMagic b, ToJExpr b , JVarMagic c, ToJExpr c , JVarMagic d, ToJExpr d , JVarMagic e, ToJExpr e , JVarMagic f, ToJExpr f , JVarMagic g, ToJExpr g , JVarMagic h, ToJExpr h ) => JSArgument (a,b,c,d,e,f,g,h) where argList (a,b,c,d,e,f,g,h) = concatMap identsE [toJExpr a , toJExpr b, toJExpr c, toJExpr d, toJExpr e, toJExpr f, toJExpr g, toJExpr h] args = (,,,,,,,) <$> fresh <*> fresh <*> fresh <*> fresh <*> fresh <*> fresh <*> fresh <*> fresh instance ( JVarMagic a, ToJExpr a , JVarMagic b, ToJExpr b , JVarMagic c, ToJExpr c , JVarMagic d, ToJExpr d , JVarMagic e, ToJExpr e , JVarMagic f, ToJExpr f , JVarMagic g, ToJExpr g , JVarMagic h, ToJExpr h , JVarMagic i, ToJExpr i ) => JSArgument (a,b,c,d,e,f,g,h,i) where argList (a,b,c,d,e,f,g,h,i) = concatMap identsE [toJExpr a , toJExpr b, toJExpr c, toJExpr d, toJExpr e, toJExpr f, toJExpr g, toJExpr h, toJExpr i] args = (,,,,,,,,) <$> fresh <*> fresh <*> fresh <*> fresh <*> fresh <*> fresh <*> fresh <*> fresh <*> fresh instance ( JVarMagic a, ToJExpr a , JVarMagic b, ToJExpr b , JVarMagic c, ToJExpr c , JVarMagic d, ToJExpr d , JVarMagic e, ToJExpr e , JVarMagic f, ToJExpr f , JVarMagic g, ToJExpr g , JVarMagic h, ToJExpr h , JVarMagic i, ToJExpr i , JVarMagic j, ToJExpr j ) => JSArgument (a,b,c,d,e,f,g,h,i,j) where argList (a,b,c,d,e,f,g,h,i,j) = concatMap identsE [toJExpr a , toJExpr b, toJExpr c, toJExpr d, toJExpr e, toJExpr f, toJExpr g, toJExpr h, toJExpr i, toJExpr j] args = (,,,,,,,,,) <$> fresh <*> fresh <*> fresh <*> fresh <*> fresh <*> fresh <*> fresh <*> fresh <*> fresh <*> fresh ghc-lib-parser-9.12.2.20250421/compiler/GHC/JS/Ppr.hs0000644000000000000000000003610307346545000017322 0ustar0000000000000000{-# LANGUAGE LambdaCase #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE RankNTypes #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE GADTs #-} {-# LANGUAGE BlockArguments #-} -- For Outputable instances for JS syntax {-# OPTIONS_GHC -Wno-orphans #-} ----------------------------------------------------------------------------- -- | -- Module : GHC.JS.Ppr -- Copyright : (c) The University of Glasgow 2001 -- License : BSD-style (see the file LICENSE) -- -- Maintainer : Jeffrey Young -- Luite Stegeman -- Sylvain Henry -- Josh Meredith -- Stability : experimental -- -- -- * Domain and Purpose -- -- GHC.JS.Ppr defines the code generation facilities for the JavaScript -- backend. That is, this module exports a function from the JS backend IR -- to JavaScript compliant concrete syntax that can readily be executed by -- nodejs or called in a browser. -- -- * Design -- -- This module follows the architecture and style of the other backends in -- GHC: it instances Outputable for the relevant types, creates a class that -- describes a morphism from the IR domain to JavaScript concrete Syntax and -- then generates that syntax on a case by case basis. -- -- * How to use -- -- The key functions are @renderJS@, @jsToDoc@, and the @RenderJS@ record. -- Use the @RenderJS@ record and @jsToDoc@ to define a custom renderers for -- specific parts of the backend, for example in 'GHC.StgToJS.Linker.Opt' a -- custom renderer ensures all @Ident@ generated by the linker optimization -- pass are prefixed differently than the default. Use @renderJS@ to -- generate JavaScript concrete syntax in the general case, suitable for -- human consumption. ----------------------------------------------------------------------------- module GHC.JS.Ppr ( renderJs , renderPrefixJs , renderPrefixJs' , JsToDoc(..) , defaultRenderJs , RenderJs(..) , JsRender(..) , jsToDoc , pprStringLit , interSemi , braceNest , hangBrace ) where import GHC.Prelude import GHC.JS.Ident import GHC.JS.Syntax import Data.Char (isControl, ord) import Data.List (sortOn) import Numeric(showHex) import GHC.Utils.Outputable import GHC.Data.FastString import GHC.Types.Unique.Map instance Outputable JExpr where ppr = renderJs instance Outputable JVal where ppr = renderJs -------------------------------------------------------------------------------- -- Top level API -------------------------------------------------------------------------------- -- | Render a syntax tree as a pretty-printable document -- (simply showing the resultant doc produces a nice, -- well formatted String). renderJs :: (JsToDoc a) => a -> SDoc renderJs = renderJs' defaultRenderJs {-# SPECIALISE renderJs' :: JsToDoc a => RenderJs HLine -> a -> HLine #-} {-# SPECIALISE renderJs' :: JsToDoc a => RenderJs SDoc -> a -> SDoc #-} renderJs' :: (JsToDoc a, JsRender doc) => RenderJs doc -> a -> doc renderJs' r = jsToDocR r data RenderJs doc = RenderJs { renderJsS :: !(JsRender doc => RenderJs doc -> JStat -> doc) , renderJsE :: !(JsRender doc => RenderJs doc -> JExpr -> doc) , renderJsV :: !(JsRender doc => RenderJs doc -> JVal -> doc) , renderJsI :: !(JsRender doc => RenderJs doc -> Ident -> doc) } defaultRenderJs :: RenderJs doc defaultRenderJs = RenderJs defRenderJsS defRenderJsE defRenderJsV defRenderJsI jsToDoc :: JsToDoc a => a -> SDoc jsToDoc = jsToDocR defaultRenderJs -- | Render a syntax tree as a pretty-printable document, using a given prefix -- to all generated names. Use this with distinct prefixes to ensure distinct -- generated names between independent calls to render(Prefix)Js. renderPrefixJs :: (JsToDoc a) => a -> SDoc renderPrefixJs = renderPrefixJs' defaultRenderJs renderPrefixJs' :: (JsToDoc a, JsRender doc) => RenderJs doc -> a -> doc renderPrefixJs' r = jsToDocR r -------------------------------------------------------------------------------- -- Code Generator -------------------------------------------------------------------------------- class JsToDoc a where jsToDocR :: JsRender doc => RenderJs doc -> a -> doc instance JsToDoc JStat where jsToDocR r = renderJsS r r instance JsToDoc JExpr where jsToDocR r = renderJsE r r instance JsToDoc JVal where jsToDocR r = renderJsV r r instance JsToDoc Ident where jsToDocR r = renderJsI r r instance JsToDoc [JExpr] where jsToDocR r = jcat . map (addSemi . jsToDocR r) instance JsToDoc [JStat] where jsToDocR r = jcat . map (addSemi . jsToDocR r) defRenderJsS :: JsRender doc => RenderJs doc -> JStat -> doc defRenderJsS r = \case IfStat cond x y -> jcat [ hangBrace (text "if" <+?> parens (jsToDocR r cond)) (optBlock r x) , mbElse ] where mbElse | y == BlockStat [] = empty | otherwise = hangBrace (text "else") (optBlock r y) DeclStat x Nothing -> text "var" <+> jsToDocR r x -- special treatment for functions, otherwise there is too much left padding -- (more than the length of the expression assigned to). E.g. -- -- var long_variable_name = (function() -- { -- ... -- }); -- DeclStat x (Just (ValExpr f@(JFunc {}))) -> jhang (text "var" <+> jsToDocR r x <+?> char '=') (jsToDocR r f) DeclStat x (Just e) -> text "var" <+> jsToDocR r x <+?> char '=' <+?> jsToDocR r e WhileStat False p b -> hangBrace (text "while" <+?> parens (jsToDocR r p)) (optBlock r b) WhileStat True p b -> hangBrace (text "do") (optBlock r b) <+?> text "while" <+?> parens (jsToDocR r p) BreakStat l -> addSemi $ maybe (text "break") (\(LexicalFastString s) -> (text "break" <+> ftext s)) l ContinueStat l -> addSemi $ maybe (text "continue") (\(LexicalFastString s) -> (text "continue" <+> ftext s)) l LabelStat (LexicalFastString l) s -> ftext l <> char ':' $$$ printBS s where printBS (BlockStat ss) = interSemi $ map (jsToDocR r) ss printBS x = jsToDocR r x ForStat init p s1 sb -> hangBrace (text "for" <+?> parens forCond) (optBlock r sb) where forCond = jsToDocR r init <> semi <+?> jsToDocR r p <> semi <+?> parens (jsToDocR r s1) ForInStat each i e b -> hangBrace (text txt <+?> parens (jsToDocR r i <+> text "in" <+> jsToDocR r e)) (optBlock r b) where txt | each = "for each" | otherwise = "for" SwitchStat e l d -> hangBrace (text "switch" <+?> parens (jsToDocR r e)) cases where l' = map (\(c,s) -> (text "case" <+?> parens (jsToDocR r c) <> colon) $$$ jnest (optBlock r s)) l ++ [(text "default:") $$$ jnest (optBlock r d)] cases = foldl1 ($$$) l' ReturnStat e -> text "return" <+> jsToDocR r e ApplStat e es -> jsToDocR r e <> (parens . foldl' (<+?>) empty . punctuate comma $ map (jsToDocR r) es) FuncStat i is b -> hangBrace (text "function" <+> jsToDocR r i <> parens (foldl' (<+?>) empty . punctuate comma . map (jsToDocR r) $ is)) (optBlock r b) TryStat s i s1 s2 -> hangBrace (text "try") (jsToDocR r s) <+?> mbCatch <+?> mbFinally where mbCatch | s1 == BlockStat [] = empty | otherwise = hangBrace (text "catch" <+?> parens (jsToDocR r i)) (optBlock r s1) mbFinally | s2 == BlockStat [] = empty | otherwise = hangBrace (text "finally") (optBlock r s2) AssignStat i op x -> case x of -- special treatment for functions, otherwise there is too much left padding -- (more than the length of the expression assigned to). E.g. -- -- long_variable_name = (function() -- { -- ... -- }); -- ValExpr f@(JFunc {}) -> jhang (jsToDocR r i <> ftext (aOpText op)) (jsToDocR r f) _ -> jsToDocR r i <+?> ftext (aOpText op) <+?> jsToDocR r x UOpStat op x | isPre op && isAlphaOp op -> ftext (uOpText op) <+> optParens r x | isPre op -> ftext (uOpText op) <+> optParens r x | otherwise -> optParens r x <+> ftext (uOpText op) BlockStat xs -> jsToDocR r xs -- | Remove one Block layering if we know we already have braces around the -- statement optBlock :: JsRender doc => RenderJs doc -> JStat -> doc optBlock r x = case x of BlockStat{} -> jsToDocR r x _ -> addSemi (jsToDocR r x) optParens :: JsRender doc => RenderJs doc -> JExpr -> doc optParens r x = case x of UOpExpr _ _ -> parens (jsToDocR r x) _ -> jsToDocR r x defRenderJsE :: JsRender doc => RenderJs doc -> JExpr -> doc defRenderJsE r = \case ValExpr x -> jsToDocR r x SelExpr x y -> jsToDocR r x <> char '.' <> jsToDocR r y IdxExpr x y -> jsToDocR r x <> brackets (jsToDocR r y) IfExpr x y z -> parens (jsToDocR r x <+?> char '?' <+?> jsToDocR r y <+?> colon <+?> jsToDocR r z) InfixExpr op x y -> parens $ jsToDocR r x <+?> ftext (opText op) <+?> jsToDocR r y UOpExpr op x | isPre op && isAlphaOp op -> ftext (uOpText op) <+> optParens r x | isPre op -> ftext (uOpText op) <+> optParens r x | otherwise -> optParens r x <+> ftext (uOpText op) ApplExpr je xs -> jsToDocR r je <> (parens . foldl' (<+?>) empty . punctuate comma $ map (jsToDocR r) xs) defRenderJsV :: JsRender doc => RenderJs doc -> JVal -> doc defRenderJsV r = \case JVar i -> jsToDocR r i JList xs -> brackets . foldl' (<+?>) empty . punctuate comma $ map (jsToDocR r) xs JDouble (SaneDouble d) | d < 0 || isNegativeZero d -> parens (double d) | otherwise -> double d JInt i | i < 0 -> parens (integer i) | otherwise -> integer i JStr s -> pprStringLit s JRegEx s -> char '/' <> ftext s <> char '/' JBool b -> text (if b then "true" else "false") JHash m | isNullUniqMap m -> text "{}" | otherwise -> braceNest . foldl' (<+?>) empty . punctuate comma . map (\(x,y) -> char '\'' <> ftext x <> char '\'' <> colon <+?> jsToDocR r y) -- nonDetKeysUniqMap doesn't introduce non-determinism here -- because we sort the elements lexically $ sortOn (LexicalFastString . fst) (nonDetUniqMapToList m) JFunc is b -> parens $ hangBrace (text "function" <> parens (foldl' (<+?>) empty . punctuate comma . map (jsToDocR r) $ is)) (jsToDocR r b) defRenderJsI :: JsRender doc => RenderJs doc -> Ident -> doc defRenderJsI _ t = ftext (identFS t) aOpText :: AOp -> FastString aOpText = \case AssignOp -> "=" AddAssignOp -> "+=" SubAssignOp -> "-=" uOpText :: UOp -> FastString uOpText = \case NotOp -> "!" BNotOp -> "~" NegOp -> "-" PlusOp -> "+" NewOp -> "new" TypeofOp -> "typeof" DeleteOp -> "delete" YieldOp -> "yield" VoidOp -> "void" PreIncOp -> "++" PostIncOp -> "++" PreDecOp -> "--" PostDecOp -> "--" opText :: Op -> FastString opText = \case EqOp -> "==" StrictEqOp -> "===" NeqOp -> "!=" StrictNeqOp -> "!==" GtOp -> ">" GeOp -> ">=" LtOp -> "<" LeOp -> "<=" AddOp -> "+" SubOp -> "-" MulOp -> "*" DivOp -> "/" ModOp -> "%" LeftShiftOp -> "<<" RightShiftOp -> ">>" ZRightShiftOp -> ">>>" BAndOp -> "&" BOrOp -> "|" BXorOp -> "^" LAndOp -> "&&" LOrOp -> "||" InstanceofOp -> "instanceof" InOp -> "in" isPre :: UOp -> Bool isPre = \case PostIncOp -> False PostDecOp -> False _ -> True isAlphaOp :: UOp -> Bool isAlphaOp = \case NewOp -> True TypeofOp -> True DeleteOp -> True YieldOp -> True VoidOp -> True _ -> False pprStringLit :: IsLine doc => FastString -> doc pprStringLit s = char '\"' <> encodeJson s <> char '\"' -------------------------------------------------------------------------------- -- Utilities -------------------------------------------------------------------------------- encodeJson :: IsLine doc => FastString -> doc encodeJson xs = hcat (map encodeJsonChar (unpackFS xs)) encodeJsonChar :: IsLine doc => Char -> doc encodeJsonChar = \case '/' -> text "\\/" '\b' -> text "\\b" '\f' -> text "\\f" '\n' -> text "\\n" '\r' -> text "\\r" '\t' -> text "\\t" '"' -> text "\\\"" '\\' -> text "\\\\" c | not (isControl c) && ord c <= 127 -> char c | ord c <= 0xff -> hexxs "\\x" 2 (ord c) | ord c <= 0xffff -> hexxs "\\u" 4 (ord c) | otherwise -> let cp0 = ord c - 0x10000 -- output surrogate pair in hexxs "\\u" 4 ((cp0 `shiftR` 10) + 0xd800) <> hexxs "\\u" 4 ((cp0 .&. 0x3ff) + 0xdc00) where hexxs prefix pad cp = let h = showHex cp "" in text (prefix ++ replicate (pad - length h) '0' ++ h) interSemi :: JsRender doc => [doc] -> doc interSemi = foldl ($$$) empty . punctuateFinal semi semi -- | The structure `{body}`, optionally indented over multiple lines {-# INLINE braceNest #-} braceNest :: JsRender doc => doc -> doc braceNest x = lbrace $$$ jnest x $$$ rbrace -- | The structure `hdr {body}`, optionally indented over multiple lines {-# INLINE hangBrace #-} hangBrace :: JsRender doc => doc -> doc -> doc hangBrace hdr body = jcat [ hdr <> char ' ' <> char '{', jnest body, char '}' ] {-# INLINE jhang #-} jhang :: JsRender doc => doc -> doc -> doc jhang hdr body = jcat [ hdr, jnest body] -- | JsRender controls the differences in whitespace between HLine and SDoc. -- Generally, this involves the indentation and newlines in the human-readable -- SDoc implementation being replaced in the HLine version by the minimal -- whitespace required for valid JavaScript syntax. class IsLine doc => JsRender doc where -- | Concatenate with an optional single space (<+?>) :: doc -> doc -> doc -- | Concatenate with an optional newline ($$$) :: doc -> doc -> doc -- | Concatenate these `doc`s, either vertically (SDoc) or horizontally (HLine) jcat :: [doc] -> doc -- | Optionally indent the following jnest :: doc -> doc -- | Append semi-colon (and line-break in HLine mode) addSemi :: doc -> doc instance JsRender SDoc where (<+?>) = (<+>) {-# INLINE (<+?>) #-} ($$$) = ($+$) {-# INLINE ($$$) #-} jcat = vcat {-# INLINE jcat #-} jnest = nest 2 {-# INLINE jnest #-} addSemi x = x <> semi {-# INLINE addSemi #-} instance JsRender HLine where (<+?>) = (<>) {-# INLINE (<+?>) #-} ($$$) = (<>) {-# INLINE ($$$) #-} jcat = hcat {-# INLINE jcat #-} jnest = id {-# INLINE jnest #-} addSemi x = x <> semi <> char '\n' -- we add a line-break to avoid issues with lines too long in minified outputs {-# INLINE addSemi #-} ghc-lib-parser-9.12.2.20250421/compiler/GHC/JS/Syntax.hs0000644000000000000000000002723207346545000020052 0ustar0000000000000000{-# LANGUAGE LambdaCase #-} {-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE RankNTypes #-} {-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE GADTs #-} {-# LANGUAGE GeneralizedNewtypeDeriving #-} {-# LANGUAGE DeriveGeneric #-} {-# LANGUAGE DerivingVia #-} {-# LANGUAGE BlockArguments #-} {-# LANGUAGE PatternSynonyms #-} ----------------------------------------------------------------------------- -- | -- Module : GHC.JS.Syntax -- Copyright : (c) The University of Glasgow 2001 -- License : BSD-style (see the file LICENSE) -- -- Maintainer : Jeffrey Young -- Luite Stegeman -- Sylvain Henry -- Josh Meredith -- Stability : experimental -- -- -- * Domain and Purpose -- -- GHC.JS.Syntax defines the Syntax for the JS backend in GHC. It comports -- with the [ECMA-262](https://tc39.es/ecma262/) although not every -- production rule of the standard is represented. Code in this module is a -- fork of [JMacro](https://hackage.haskell.org/package/jmacro) (BSD 3 -- Clause) by Gershom Bazerman, heavily modified to accomodate GHC's -- constraints. -- -- -- * Strategy -- -- Nothing fancy in this module, this is a classic deeply embedded AST for -- JS. We define numerous ADTs and pattern synonyms to make pattern matching -- and constructing ASTs easier. -- -- -- * Consumers -- -- The entire JS backend consumes this module, e.g., the modules in -- GHC.StgToJS.\*. Please see 'GHC.JS.Make' for a module which provides -- helper functions that use the deeply embedded DSL defined in this module -- to provide some of the benefits of a shallow embedding. -- ----------------------------------------------------------------------------- module GHC.JS.Syntax ( -- * Deeply embedded JS datatypes JStat(..) , JExpr(..) , JVal(..) , Op(..) , UOp(..) , AOp(..) , Ident(..) , JLabel -- * pattern synonyms over JS operators , pattern New , pattern Not , pattern Negate , pattern Add , pattern Sub , pattern Mul , pattern Div , pattern Mod , pattern BOr , pattern BAnd , pattern BXor , pattern BNot , pattern LOr , pattern LAnd , pattern Int , pattern String , pattern Var , pattern PreInc , pattern PostInc , pattern PreDec , pattern PostDec -- * Utility , SaneDouble(..) , var , true_ , false_ ) where import GHC.Prelude import GHC.JS.Ident import GHC.Data.FastString import GHC.Types.Unique.Map import GHC.Types.SaneDouble import Control.DeepSeq import Data.Data import qualified Data.Semigroup as Semigroup import GHC.Generics -------------------------------------------------------------------------------- -- Statements -------------------------------------------------------------------------------- -- | JavaScript statements, see the [ECMA262 -- Reference](https://tc39.es/ecma262/#sec-ecmascript-language-statements-and-declarations) -- for details data JStat = DeclStat !Ident !(Maybe JExpr) -- ^ Variable declarations: var foo [= e] | ReturnStat JExpr -- ^ Return | IfStat JExpr JStat JStat -- ^ If | WhileStat Bool JExpr JStat -- ^ While, bool is "do" when True | ForStat JStat JExpr JStat JStat -- ^ For | ForInStat Bool Ident JExpr JStat -- ^ For-in, bool is "each' when True | SwitchStat JExpr [(JExpr, JStat)] JStat -- ^ Switch | TryStat JStat Ident JStat JStat -- ^ Try | BlockStat [JStat] -- ^ Blocks | ApplStat JExpr [JExpr] -- ^ Application | UOpStat UOp JExpr -- ^ Unary operators | AssignStat JExpr AOp JExpr -- ^ Binding form: @ @ | LabelStat JLabel JStat -- ^ Statement Labels, makes me nostalgic for qbasic | BreakStat (Maybe JLabel) -- ^ Break | ContinueStat (Maybe JLabel) -- ^ Continue | FuncStat !Ident [Ident] JStat -- ^ an explicit function definition deriving (Eq, Generic) -- | A Label used for 'JStat', specifically 'BreakStat', 'ContinueStat' and of -- course 'LabelStat' type JLabel = LexicalFastString instance Semigroup JStat where (<>) = appendJStat instance Monoid JStat where mempty = BlockStat [] -- | Append a statement to another statement. 'appendJStat' only returns a -- 'JStat' that is /not/ a 'BlockStat' when either @mx@ or @my is an empty -- 'BlockStat'. That is: -- > (BlockStat [] , y ) = y -- > (x , BlockStat []) = x appendJStat :: JStat -> JStat -> JStat appendJStat mx my = case (mx,my) of (BlockStat [] , y ) -> y (x , BlockStat []) -> x (BlockStat xs , BlockStat ys) -> BlockStat $! xs ++ ys (BlockStat xs , ys ) -> BlockStat $! xs ++ [ys] (xs , BlockStat ys) -> BlockStat $! xs : ys (xs , ys ) -> BlockStat [xs,ys] -------------------------------------------------------------------------------- -- Expressions -------------------------------------------------------------------------------- -- | JavaScript Expressions data JExpr = ValExpr JVal -- ^ All values are trivially expressions | SelExpr JExpr Ident -- ^ Selection: Obj.foo, see 'GHC.JS.Make..^' | IdxExpr JExpr JExpr -- ^ Indexing: Obj[foo], see 'GHC.JS.Make..!' | InfixExpr Op JExpr JExpr -- ^ Infix Expressions, see 'JExpr' pattern synonyms | UOpExpr UOp JExpr -- ^ Unary Expressions | IfExpr JExpr JExpr JExpr -- ^ If-expression | ApplExpr JExpr [JExpr] -- ^ Application deriving (Eq, Generic) -- * Useful pattern synonyms to ease programming with the deeply embedded JS -- AST. Each pattern wraps @UOp@ and @Op@ into a @JExpr@s to save typing and -- for convienience. In addition we include a string wrapper for JS string -- and Integer literals. -- | pattern synonym for a unary operator new pattern New :: JExpr -> JExpr pattern New x = UOpExpr NewOp x -- | pattern synonym for prefix increment @++x@ pattern PreInc :: JExpr -> JExpr pattern PreInc x = UOpExpr PreIncOp x -- | pattern synonym for postfix increment @x++@ pattern PostInc :: JExpr -> JExpr pattern PostInc x = UOpExpr PostIncOp x -- | pattern synonym for prefix decrement @--x@ pattern PreDec :: JExpr -> JExpr pattern PreDec x = UOpExpr PreDecOp x -- | pattern synonym for postfix decrement @--x@ pattern PostDec :: JExpr -> JExpr pattern PostDec x = UOpExpr PostDecOp x -- | pattern synonym for logical not @!@ pattern Not :: JExpr -> JExpr pattern Not x = UOpExpr NotOp x -- | pattern synonym for unary negation @-@ pattern Negate :: JExpr -> JExpr pattern Negate x = UOpExpr NegOp x -- | pattern synonym for addition @+@ pattern Add :: JExpr -> JExpr -> JExpr pattern Add x y = InfixExpr AddOp x y -- | pattern synonym for subtraction @-@ pattern Sub :: JExpr -> JExpr -> JExpr pattern Sub x y = InfixExpr SubOp x y -- | pattern synonym for multiplication @*@ pattern Mul :: JExpr -> JExpr -> JExpr pattern Mul x y = InfixExpr MulOp x y -- | pattern synonym for division @*@ pattern Div :: JExpr -> JExpr -> JExpr pattern Div x y = InfixExpr DivOp x y -- | pattern synonym for remainder @%@ pattern Mod :: JExpr -> JExpr -> JExpr pattern Mod x y = InfixExpr ModOp x y -- | pattern synonym for Bitwise Or @|@ pattern BOr :: JExpr -> JExpr -> JExpr pattern BOr x y = InfixExpr BOrOp x y -- | pattern synonym for Bitwise And @&@ pattern BAnd :: JExpr -> JExpr -> JExpr pattern BAnd x y = InfixExpr BAndOp x y -- | pattern synonym for Bitwise XOr @^@ pattern BXor :: JExpr -> JExpr -> JExpr pattern BXor x y = InfixExpr BXorOp x y -- | pattern synonym for Bitwise Not @~@ pattern BNot :: JExpr -> JExpr pattern BNot x = UOpExpr BNotOp x -- | pattern synonym for logical Or @||@ pattern LOr :: JExpr -> JExpr -> JExpr pattern LOr x y = InfixExpr LOrOp x y -- | pattern synonym for logical And @&&@ pattern LAnd :: JExpr -> JExpr -> JExpr pattern LAnd x y = InfixExpr LAndOp x y -- | pattern synonym to create integer values pattern Int :: Integer -> JExpr pattern Int x = ValExpr (JInt x) -- | pattern synonym to create string values pattern String :: FastString -> JExpr pattern String x = ValExpr (JStr x) -- | pattern synonym to create a local variable reference pattern Var :: Ident -> JExpr pattern Var x = ValExpr (JVar x) -------------------------------------------------------------------------------- -- Values -------------------------------------------------------------------------------- -- | JavaScript values data JVal = JVar Ident -- ^ A variable reference | JList [JExpr] -- ^ A JavaScript list, or what JS calls an Array | JDouble SaneDouble -- ^ A Double | JInt Integer -- ^ A BigInt | JStr FastString -- ^ A String | JRegEx FastString -- ^ A Regex | JBool Bool -- ^ A Boolean | JHash (UniqMap FastString JExpr) -- ^ A JS HashMap: @{"foo": 0}@ | JFunc [Ident] JStat -- ^ A function deriving (Eq, Generic) -------------------------------------------------------------------------------- -- Operators -------------------------------------------------------------------------------- -- | JS Binary Operators. We do not deeply embed the comma operator and the -- assignment operators data Op = EqOp -- ^ Equality: `==` | StrictEqOp -- ^ Strict Equality: `===` | NeqOp -- ^ InEquality: `!=` | StrictNeqOp -- ^ Strict InEquality `!==` | GtOp -- ^ Greater Than: `>` | GeOp -- ^ Greater Than or Equal: `>=` | LtOp -- ^ Less Than: < | LeOp -- ^ Less Than or Equal: <= | AddOp -- ^ Addition: + | SubOp -- ^ Subtraction: - | MulOp -- ^ Multiplication \* | DivOp -- ^ Division: \/ | ModOp -- ^ Remainder: % | LeftShiftOp -- ^ Left Shift: \<\< | RightShiftOp -- ^ Right Shift: \>\> | ZRightShiftOp -- ^ Unsigned RightShift: \>\>\> | BAndOp -- ^ Bitwise And: & | BOrOp -- ^ Bitwise Or: | | BXorOp -- ^ Bitwise XOr: ^ | LAndOp -- ^ Logical And: && | LOrOp -- ^ Logical Or: || | InstanceofOp -- ^ @instanceof@ | InOp -- ^ @in@ deriving (Show, Eq, Ord, Enum, Data, Generic) instance NFData Op -- | JS Unary Operators data UOp = NotOp -- ^ Logical Not: @!@ | BNotOp -- ^ Bitwise Not: @~@ | NegOp -- ^ Negation: @-@ | PlusOp -- ^ Unary Plus: @+x@ | NewOp -- ^ new x | TypeofOp -- ^ typeof x | DeleteOp -- ^ delete x | YieldOp -- ^ yield x | VoidOp -- ^ void x | PreIncOp -- ^ Prefix Increment: @++x@ | PostIncOp -- ^ Postfix Increment: @x++@ | PreDecOp -- ^ Prefix Decrement: @--x@ | PostDecOp -- ^ Postfix Decrement: @x--@ deriving (Show, Eq, Ord, Enum, Data, Generic) instance NFData UOp -- | JS Unary Operators data AOp = AssignOp -- ^ Vanilla Assignment: = | AddAssignOp -- ^ Addition Assignment: += | SubAssignOp -- ^ Subtraction Assignment: -= deriving (Show, Eq, Ord, Enum, Data, Generic) instance NFData AOp -- | construct a JS variable reference var :: FastString -> JExpr var = Var . name -- | The JS literal 'true' true_ :: JExpr true_ = ValExpr (JBool True) -- | The JS literal 'false' false_ :: JExpr false_ = ValExpr (JBool False) ghc-lib-parser-9.12.2.20250421/compiler/GHC/JS/Transform.hs0000644000000000000000000001472207346545000020537 0ustar0000000000000000{-# LANGUAGE LambdaCase #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE RankNTypes #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE GADTs #-} {-# LANGUAGE BlockArguments #-} {-# LANGUAGE TupleSections #-} module GHC.JS.Transform ( identsS , identsV , identsE , jStgExprToJS , jStgStatToJS ) where import GHC.Prelude import GHC.JS.Ident import GHC.JS.JStg.Syntax import qualified GHC.JS.Syntax as JS import Data.List (sortBy) import GHC.Data.FastString import GHC.Types.Unique.Map import GHC.Types.Unique.FM {-# INLINE identsS #-} identsS :: JStgStat -> [Ident] identsS = \case DeclStat i e -> [i] ++ maybe [] identsE e ReturnStat e -> identsE e IfStat e s1 s2 -> identsE e ++ identsS s1 ++ identsS s2 WhileStat _ e s -> identsE e ++ identsS s ForStat init p step body -> identsS init ++ identsE p ++ identsS step ++ identsS body ForInStat _ i e s -> [i] ++ identsE e ++ identsS s SwitchStat e xs s -> identsE e ++ concatMap traverseCase xs ++ identsS s where traverseCase (e,s) = identsE e ++ identsS s TryStat s1 i s2 s3 -> identsS s1 ++ [i] ++ identsS s2 ++ identsS s3 BlockStat xs -> concatMap identsS xs ApplStat e es -> identsE e ++ concatMap identsE es UOpStat _op e -> identsE e AssignStat e1 _op e2 -> identsE e1 ++ identsE e2 LabelStat _l s -> identsS s BreakStat{} -> [] ContinueStat{} -> [] FuncStat i args body -> [i] ++ args ++ identsS body {-# INLINE identsE #-} identsE :: JStgExpr -> [Ident] identsE = \case ValExpr v -> identsV v SelExpr e _i -> identsE e -- do not rename properties IdxExpr e1 e2 -> identsE e1 ++ identsE e2 InfixExpr _ e1 e2 -> identsE e1 ++ identsE e2 UOpExpr _ e -> identsE e IfExpr e1 e2 e3 -> identsE e1 ++ identsE e2 ++ identsE e3 ApplExpr e es -> identsE e ++ concatMap identsE es {-# INLINE identsV #-} identsV :: JVal -> [Ident] identsV = \case JVar i -> [i] JList xs -> concatMap identsE xs JDouble{} -> [] JInt{} -> [] JStr{} -> [] JRegEx{} -> [] JBool{} -> [] JHash m -> concatMap identsE (nonDetEltsUniqMap m) JFunc args s -> args ++ identsS s -------------------------------------------------------------------------------- -- Translation -- -------------------------------------------------------------------------------- jStgStatToJS :: JStgStat -> JS.JStat jStgStatToJS = \case DeclStat i rhs -> JS.DeclStat i $ fmap jStgExprToJS rhs ReturnStat e -> JS.ReturnStat $ jStgExprToJS e IfStat c t e -> JS.IfStat (jStgExprToJS c) (jStgStatToJS t) (jStgStatToJS e) WhileStat is_do c e -> JS.WhileStat is_do (jStgExprToJS c) (jStgStatToJS e) ForStat init p step body -> JS.ForStat (jStgStatToJS init) (jStgExprToJS p) (jStgStatToJS step) (jStgStatToJS body) ForInStat is_each i iter body -> JS.ForInStat (is_each) i (jStgExprToJS iter) (jStgStatToJS body) SwitchStat struct ps def -> JS.SwitchStat (jStgExprToJS struct) (map (\(p1, p2) -> (jStgExprToJS p1, jStgStatToJS p2)) ps) (jStgStatToJS def) TryStat t i c f -> JS.TryStat (jStgStatToJS t) i (jStgStatToJS c) (jStgStatToJS f) BlockStat bs -> JS.BlockStat $ map jStgStatToJS bs ApplStat rator rand -> JS.ApplStat (jStgExprToJS rator) $ map jStgExprToJS rand UOpStat rator rand -> JS.UOpStat (jStgUOpToJS rator) (jStgExprToJS rand) AssignStat lhs op rhs -> JS.AssignStat (jStgExprToJS lhs) (jStgAOpToJS op) (jStgExprToJS rhs) LabelStat lbl stmt -> JS.LabelStat lbl (jStgStatToJS stmt) BreakStat m_l -> JS.BreakStat $! m_l ContinueStat m_l -> JS.ContinueStat $! m_l FuncStat i args body -> JS.FuncStat i args $ jStgStatToJS body jStgExprToJS :: JStgExpr -> JS.JExpr jStgExprToJS = \case ValExpr v -> JS.ValExpr $ jStgValToJS v SelExpr obj i -> JS.SelExpr (jStgExprToJS obj) i IdxExpr o i -> JS.IdxExpr (jStgExprToJS o) (jStgExprToJS i) InfixExpr op l r -> JS.InfixExpr (jStgOpToJS op) (jStgExprToJS l) (jStgExprToJS r) UOpExpr op r -> JS.UOpExpr (jStgUOpToJS op) (jStgExprToJS r) IfExpr c t e -> JS.IfExpr (jStgExprToJS c) (jStgExprToJS t) (jStgExprToJS e) ApplExpr rator rands -> JS.ApplExpr (jStgExprToJS rator) $ map jStgExprToJS rands jStgValToJS :: JVal -> JS.JVal jStgValToJS = \case JVar i -> JS.JVar i JList xs -> JS.JList $ map jStgExprToJS xs JDouble d -> JS.JDouble d JInt i -> JS.JInt i JStr s -> JS.JStr s JRegEx f -> JS.JRegEx f JBool b -> JS.JBool b JHash m -> JS.JHash $ mapUniqMapM satHash m where satHash (i, x) = (i,) . (i,) $ jStgExprToJS x compareHash (i,_) (j,_) = lexicalCompareFS i j -- By lexically sorting the elements, the non-determinism introduced by nonDetEltsUFM is avoided mapUniqMapM f (UniqMap m) = UniqMap . listToUFM $ (map f . sortBy compareHash $ nonDetEltsUFM m) JFunc args body -> JS.JFunc args $ jStgStatToJS body jStgOpToJS :: Op -> JS.Op jStgOpToJS = go where go EqOp = JS.EqOp go StrictEqOp = JS.StrictEqOp go NeqOp = JS.NeqOp go StrictNeqOp = JS.StrictNeqOp go GtOp = JS.GtOp go GeOp = JS.GeOp go LtOp = JS.LtOp go LeOp = JS.LeOp go AddOp = JS.AddOp go SubOp = JS.SubOp go MulOp = JS.MulOp go DivOp = JS.DivOp go ModOp = JS.ModOp go LeftShiftOp = JS.LeftShiftOp go RightShiftOp = JS.RightShiftOp go ZRightShiftOp = JS.ZRightShiftOp go BAndOp = JS.BAndOp go BOrOp = JS.BOrOp go BXorOp = JS.BXorOp go LAndOp = JS.LAndOp go LOrOp = JS.LOrOp go InstanceofOp = JS.InstanceofOp go InOp = JS.InOp jStgUOpToJS :: UOp -> JS.UOp jStgUOpToJS = go where go NotOp = JS.NotOp go BNotOp = JS.BNotOp go NegOp = JS.NegOp go PlusOp = JS.PlusOp go NewOp = JS.NewOp go TypeofOp = JS.TypeofOp go DeleteOp = JS.DeleteOp go YieldOp = JS.YieldOp go VoidOp = JS.VoidOp go PreIncOp = JS.PreIncOp go PostIncOp = JS.PostIncOp go PreDecOp = JS.PreDecOp go PostDecOp = JS.PostDecOp jStgAOpToJS :: AOp -> JS.AOp jStgAOpToJS AssignOp = JS.AssignOp jStgAOpToJS AddAssignOp = JS.AddAssignOp jStgAOpToJS SubAssignOp = JS.SubAssignOp ghc-lib-parser-9.12.2.20250421/compiler/GHC/Linker/0000755000000000000000000000000007346545000017132 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Linker/Config.hs0000644000000000000000000000135407346545000020676 0ustar0000000000000000-- | Linker configuration module GHC.Linker.Config ( FrameworkOpts(..) , LinkerConfig(..) ) where import GHC.Prelude import GHC.Utils.TmpFs import GHC.Utils.CliOption -- used on darwin only data FrameworkOpts = FrameworkOpts { foFrameworkPaths :: [String] , foCmdlineFrameworks :: [String] } -- | External linker configuration data LinkerConfig = LinkerConfig { linkerProgram :: String -- ^ Linker program , linkerOptionsPre :: [Option] -- ^ Linker options (before user options) , linkerOptionsPost :: [Option] -- ^ Linker options (after user options) , linkerTempDir :: TempDir -- ^ Temporary directory to use , linkerFilter :: String -> String -- ^ Output filter } ghc-lib-parser-9.12.2.20250421/compiler/GHC/Linker/Static/0000755000000000000000000000000007346545000020361 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Linker/Static/Utils.hs0000644000000000000000000000216007346545000022014 0ustar0000000000000000{-# LANGUAGE MultiWayIf #-} module GHC.Linker.Static.Utils where import GHC.Prelude import GHC.Platform import System.FilePath -- | Compute the output file name of a program. -- -- StaticLink boolean is used to indicate if the program is actually a static library -- (e.g., on iOS). -- -- Use the provided filename (if any), otherwise use "main.exe" (Windows), -- "a.out (otherwise without StaticLink set), "liba.a". In every case, add the -- extension if it is missing. exeFileName :: ArchOS -> Bool -> Maybe FilePath -> FilePath exeFileName (ArchOS arch os) staticLink output_fn | Just s <- output_fn = if | OSMinGW32 <- os -> s "exe" | ArchJavaScript <- arch -> s "jsexe" | ArchWasm32 <- arch -> s "wasm" | staticLink -> s "a" | otherwise -> s | otherwise = if | OSMinGW32 <- os -> "main.exe" | ArchJavaScript <- arch -> "main.jsexe" | staticLink -> "liba.a" | otherwise -> "a.out" where s ext | null (takeExtension s) = s <.> ext | otherwise = s ghc-lib-parser-9.12.2.20250421/compiler/GHC/Linker/Types.hs0000644000000000000000000004106707346545000020602 0ustar0000000000000000{-# LANGUAGE TypeApplications #-} {-# LANGUAGE LambdaCase #-} ----------------------------------------------------------------------------- -- -- Types for the linkers and the loader -- -- (c) The University of Glasgow 2019 -- ----------------------------------------------------------------------------- module GHC.Linker.Types ( Loader (..) , LoaderState (..) , uninitializedLoader , modifyClosureEnv , LinkerEnv(..) , filterLinkerEnv , ClosureEnv , emptyClosureEnv , extendClosureEnv , LinkableSet , mkLinkableSet , unionLinkableSet , ObjFile , SptEntry(..) , LibrarySpec(..) , LoadedPkgInfo(..) , PkgsLoaded -- * Linkable , Linkable(..) , LinkablePart(..) , LinkableObjectSort (..) , linkableIsNativeCodeOnly , linkableObjs , linkableLibs , linkableFiles , linkableBCOs , linkableNativeParts , linkablePartitionParts , linkablePartPath , linkablePartAllBCOs , isNativeCode , isNativeLib , linkableFilterByteCode , linkableFilterNative , partitionLinkables ) where import GHC.Prelude import GHC.Unit ( UnitId, Module ) import GHC.ByteCode.Types ( ItblEnv, AddrEnv, CompiledByteCode ) import GHCi.RemoteTypes ( ForeignHValue, RemotePtr ) import GHCi.Message ( LoadedDLL ) import GHC.Types.Name.Env ( NameEnv, emptyNameEnv, extendNameEnvList, filterNameEnv ) import GHC.Types.Name ( Name ) import GHC.Types.SptEntry import GHC.Utils.Outputable import Control.Concurrent.MVar import Data.Time ( UTCTime ) import GHC.Unit.Module.Env import GHC.Types.Unique.DSet import GHC.Types.Unique.DFM import GHC.Unit.Module.WholeCoreBindings import Data.Maybe (mapMaybe) import Data.List.NonEmpty (NonEmpty, nonEmpty) import qualified Data.List.NonEmpty as NE {- ********************************************************************** The Loader's state ********************************************************************* -} {- The loader state *must* match the actual state of the C dynamic linker at all times. The MVar used to hold the LoaderState contains a Maybe LoaderState. The MVar serves to ensure mutual exclusion between multiple loaded copies of the GHC library. The Maybe may be Nothing to indicate that the linker has not yet been initialised. The LinkerEnv maps Names to actual closures (for interpreted code only), for use during linking. Note [Looking up symbols in the relevant objects] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In #23415, we determined that a lot of time (>10s, or even up to >35s!) was being spent on dynamically loading symbols before actually interpreting code when `:main` was run in GHCi. The root cause was that for each symbol we wanted to lookup, we would traverse the list of loaded objects and try find the symbol in each of them with dlsym (i.e. looking up a symbol was, worst case, linear in the amount of loaded objects). To drastically improve load time (from +-38 seconds down to +-2s), we now: 1. For every of the native objects loaded for a given unit, store the handles returned by `dlopen`. - In `pkgs_loaded` of the `LoaderState`, which maps `UnitId`s to `LoadedPkgInfo`s, where the handles live in its field `loaded_pkg_hs_dlls`. 2. When looking up a Name (e.g. `lookupHsSymbol`), find that name's `UnitId` in the `pkgs_loaded` mapping, 3. And only look for the symbol (with `dlsym`) on the /handles relevant to that unit/, rather than in every loaded object. Note [Symbols may not be found in pkgs_loaded] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Currently the `pkgs_loaded` mapping only contains the dynamic objects associated with loaded units. Symbols defined in a static object (e.g. from a statically-linked Haskell library) are found via the generic `lookupSymbol` function call by `lookupHsSymbol` when the symbol is not found in any of the dynamic objects of `pkgs_loaded`. The rationale here is two-fold: * we have only observed major link-time issues in dynamic linking; lookups in the RTS linker's static symbol table seem to be fast enough * allowing symbol lookups restricted to a single ObjectCode would require the maintenance of a symbol table per `ObjectCode`, which would introduce time and space overhead This fallback is further needed because we don't look in the haskell objects loaded for the home units (see the call to `loadModuleLinkables` in `loadDependencies`, as opposed to the call to `loadPackages'` in the same function which updates `pkgs_loaded`). We should ultimately keep track of the objects loaded (probably in `objs_loaded`, for which `LinkableSet` is a bit unsatisfactory, see a suggestion in 51c5c4eb1f2a33e4dc88e6a37b7b7c135234ce9b) and be able to lookup symbols specifically in them too (similarly to `lookupSymbolInDLL`). -} newtype Loader = Loader { loader_state :: MVar (Maybe LoaderState) } data LoaderState = LoaderState { linker_env :: !LinkerEnv -- ^ Current global mapping from Names to their true values , bcos_loaded :: !LinkableSet -- ^ The currently loaded interpreted modules (home package) , objs_loaded :: !LinkableSet -- ^ And the currently-loaded compiled modules (home package) , pkgs_loaded :: !PkgsLoaded -- ^ The currently-loaded packages; always object code -- haskell libraries, system libraries, transitive dependencies , temp_sos :: ![(FilePath, String)] -- ^ We need to remember the name of previous temporary DLL/.so -- libraries so we can link them (see #10322) } uninitializedLoader :: IO Loader uninitializedLoader = Loader <$> newMVar Nothing modifyClosureEnv :: LoaderState -> (ClosureEnv -> ClosureEnv) -> LoaderState modifyClosureEnv pls f = let le = linker_env pls ce = closure_env le in pls { linker_env = le { closure_env = f ce } } data LinkerEnv = LinkerEnv { closure_env :: !ClosureEnv -- ^ Current global mapping from closure Names to their true values , itbl_env :: !ItblEnv -- ^ The current global mapping from RdrNames of DataCons to -- info table addresses. -- When a new LinkablePart is linked into the running image, or an existing -- module in the image is replaced, the itbl_env must be updated -- appropriately. , addr_env :: !AddrEnv -- ^ Like 'closure_env' and 'itbl_env', but for top-level 'Addr#' literals, -- see Note [Generating code for top-level string literal bindings] in GHC.StgToByteCode. } filterLinkerEnv :: (Name -> Bool) -> LinkerEnv -> LinkerEnv filterLinkerEnv f le = LinkerEnv { closure_env = filterNameEnv (f . fst) (closure_env le) , itbl_env = filterNameEnv (f . fst) (itbl_env le) , addr_env = filterNameEnv (f . fst) (addr_env le) } type ClosureEnv = NameEnv (Name, ForeignHValue) emptyClosureEnv :: ClosureEnv emptyClosureEnv = emptyNameEnv extendClosureEnv :: ClosureEnv -> [(Name,ForeignHValue)] -> ClosureEnv extendClosureEnv cl_env pairs = extendNameEnvList cl_env [ (n, (n,v)) | (n,v) <- pairs] type PkgsLoaded = UniqDFM UnitId LoadedPkgInfo data LoadedPkgInfo = LoadedPkgInfo { loaded_pkg_uid :: !UnitId , loaded_pkg_hs_objs :: ![LibrarySpec] , loaded_pkg_non_hs_objs :: ![LibrarySpec] , loaded_pkg_hs_dlls :: ![RemotePtr LoadedDLL] -- ^ See Note [Looking up symbols in the relevant objects] , loaded_pkg_trans_deps :: UniqDSet UnitId } instance Outputable LoadedPkgInfo where ppr (LoadedPkgInfo uid hs_objs non_hs_objs _ trans_deps) = vcat [ppr uid , ppr hs_objs , ppr non_hs_objs , ppr trans_deps ] -- | Information we can use to dynamically link modules into the compiler data Linkable = Linkable { linkableTime :: !UTCTime -- ^ Time at which this linkable was built -- (i.e. when the bytecodes were produced, -- or the mod date on the files) , linkableModule :: !Module -- ^ The linkable module itself , linkableParts :: NonEmpty LinkablePart -- ^ Files and chunks of code to link. } type LinkableSet = ModuleEnv Linkable mkLinkableSet :: [Linkable] -> LinkableSet mkLinkableSet ls = mkModuleEnv [(linkableModule l, l) | l <- ls] -- | Union of LinkableSets. -- -- In case of conflict, keep the most recent Linkable (as per linkableTime) unionLinkableSet :: LinkableSet -> LinkableSet -> LinkableSet unionLinkableSet = plusModuleEnv_C go where go l1 l2 | linkableTime l1 > linkableTime l2 = l1 | otherwise = l2 instance Outputable Linkable where ppr (Linkable when_made mod parts) = (text "Linkable" <+> parens (text (show when_made)) <+> ppr mod) $$ nest 3 (ppr parts) type ObjFile = FilePath -- | Classify the provenance of @.o@ products. data LinkableObjectSort = -- | The object is the final product for a module. -- When linking splices, its file extension will be adapted to the -- interpreter's way if needed. ModuleObject | -- | The object was created from generated code for foreign stubs or foreign -- sources added by the user. -- Its file extension must be preserved, since there are no objects for -- alternative ways available. ForeignObject -- | Objects which have yet to be linked by the compiler data LinkablePart = DotO ObjFile -- ^ An object file (.o) LinkableObjectSort -- ^ Whether the object is an internal, intermediate build product that -- should not be adapted to the interpreter's way. Used for foreign stubs -- loaded from interfaces. | DotA FilePath -- ^ Static archive file (.a) | DotDLL FilePath -- ^ Dynamically linked library file (.so, .dll, .dylib) | CoreBindings WholeCoreBindings -- ^ Serialised core which we can turn into BCOs (or object files), or -- used by some other backend See Note [Interface Files with Core -- Definitions] | LazyBCOs CompiledByteCode -- ^ Some BCOs generated on-demand when forced. This is used for -- WholeCoreBindings, see Note [Interface Files with Core Definitions] [FilePath] -- ^ Objects containing foreign stubs and files | BCOs CompiledByteCode -- ^ A byte-code object, lives only in memory. instance Outputable LinkablePart where ppr (DotO path sort) = text "DotO" <+> text path <+> pprSort sort where pprSort = \case ModuleObject -> empty ForeignObject -> brackets (text "foreign") ppr (DotA path) = text "DotA" <+> text path ppr (DotDLL path) = text "DotDLL" <+> text path ppr (BCOs bco) = text "BCOs" <+> ppr bco ppr (LazyBCOs{}) = text "LazyBCOs" ppr (CoreBindings {}) = text "CoreBindings" -- | Return true if the linkable only consists of native code (no BCO) linkableIsNativeCodeOnly :: Linkable -> Bool linkableIsNativeCodeOnly l = all isNativeCode (NE.toList (linkableParts l)) -- | List the BCOs parts of a linkable. -- -- This excludes the LazyBCOs and the CoreBindings parts linkableBCOs :: Linkable -> [CompiledByteCode] linkableBCOs l = [ cbc | BCOs cbc <- NE.toList (linkableParts l) ] -- | List the native linkable parts (.o/.so/.dll) of a linkable linkableNativeParts :: Linkable -> [LinkablePart] linkableNativeParts l = NE.filter isNativeCode (linkableParts l) -- | Split linkable parts into (native code parts, BCOs parts) linkablePartitionParts :: Linkable -> ([LinkablePart],[LinkablePart]) linkablePartitionParts l = NE.partition isNativeCode (linkableParts l) -- | List the native objects (.o) of a linkable linkableObjs :: Linkable -> [FilePath] linkableObjs l = concatMap linkablePartObjectPaths (linkableParts l) -- | List the native libraries (.so/.dll) of a linkable linkableLibs :: Linkable -> [LinkablePart] linkableLibs l = NE.filter isNativeLib (linkableParts l) -- | List the paths of the native objects and libraries (.o/.so/.dll) linkableFiles :: Linkable -> [FilePath] linkableFiles l = concatMap linkablePartNativePaths (NE.toList (linkableParts l)) ------------------------------------------- -- | Is the part a native object or library? (.o/.so/.dll) isNativeCode :: LinkablePart -> Bool isNativeCode = \case DotO {} -> True DotA {} -> True DotDLL {} -> True BCOs {} -> False LazyBCOs{} -> False CoreBindings {} -> False -- | Is the part a native library? (.so/.dll) isNativeLib :: LinkablePart -> Bool isNativeLib = \case DotO {} -> False DotA {} -> True DotDLL {} -> True BCOs {} -> False LazyBCOs{} -> False CoreBindings {} -> False -- | Get the FilePath of linkable part (if applicable) linkablePartPath :: LinkablePart -> Maybe FilePath linkablePartPath = \case DotO fn _ -> Just fn DotA fn -> Just fn DotDLL fn -> Just fn CoreBindings {} -> Nothing LazyBCOs {} -> Nothing BCOs {} -> Nothing -- | Return the paths of all object code files (.o, .a, .so) contained in this -- 'LinkablePart'. linkablePartNativePaths :: LinkablePart -> [FilePath] linkablePartNativePaths = \case DotO fn _ -> [fn] DotA fn -> [fn] DotDLL fn -> [fn] CoreBindings {} -> [] LazyBCOs _ fos -> fos BCOs {} -> [] -- | Return the paths of all object files (.o) contained in this 'LinkablePart'. linkablePartObjectPaths :: LinkablePart -> [FilePath] linkablePartObjectPaths = \case DotO fn _ -> [fn] DotA _ -> [] DotDLL _ -> [] CoreBindings {} -> [] LazyBCOs _ fos -> fos BCOs {} -> [] -- | Retrieve the compiled byte-code from the linkable part. -- -- Contrary to linkableBCOs, this includes byte-code from LazyBCOs. linkablePartAllBCOs :: LinkablePart -> [CompiledByteCode] linkablePartAllBCOs = \case BCOs bco -> [bco] LazyBCOs bcos _ -> [bcos] _ -> [] linkableFilter :: (LinkablePart -> [LinkablePart]) -> Linkable -> Maybe Linkable linkableFilter f linkable = do new <- nonEmpty (concatMap f (linkableParts linkable)) Just linkable {linkableParts = new} linkablePartNative :: LinkablePart -> [LinkablePart] linkablePartNative = \case u@DotO {} -> [u] u@DotA {} -> [u] u@DotDLL {} -> [u] LazyBCOs _ os -> [DotO f ForeignObject | f <- os] _ -> [] linkablePartByteCode :: LinkablePart -> [LinkablePart] linkablePartByteCode = \case u@BCOs {} -> [u] LazyBCOs bcos _ -> [BCOs bcos] _ -> [] -- | Transform the 'LinkablePart' list in this 'Linkable' to contain only -- object code files (.o, .a, .so) without 'LazyBCOs'. -- If no 'LinkablePart' remains, return 'Nothing'. linkableFilterNative :: Linkable -> Maybe Linkable linkableFilterNative = linkableFilter linkablePartNative -- | Transform the 'LinkablePart' list in this 'Linkable' to contain only byte -- code without 'LazyBCOs'. -- If no 'LinkablePart' remains, return 'Nothing'. linkableFilterByteCode :: Linkable -> Maybe Linkable linkableFilterByteCode = linkableFilter linkablePartByteCode -- | Split the 'LinkablePart' lists in each 'Linkable' into only object code -- files (.o, .a, .so) and only byte code, without 'LazyBCOs', and return two -- lists containing the nonempty 'Linkable's for each. partitionLinkables :: [Linkable] -> ([Linkable], [Linkable]) partitionLinkables linkables = ( mapMaybe linkableFilterNative linkables, mapMaybe linkableFilterByteCode linkables ) {- ********************************************************************** Loading packages ********************************************************************* -} data LibrarySpec = Objects [FilePath] -- Full path names of set of .o files, including trailing .o -- We allow batched loading to ensure that cyclic symbol -- references can be resolved (see #13786). -- For dynamic objects only, try to find the object -- file in all the directories specified in -- v_Library_paths before giving up. | Archive FilePath -- Full path name of a .a file, including trailing .a | DLL String -- "Unadorned" name of a .DLL/.so -- e.g. On unix "qt" denotes "libqt.so" -- On Windows "burble" denotes "burble.DLL" or "libburble.dll" -- loadDLL is platform-specific and adds the lib/.so/.DLL -- suffixes platform-dependently | DLLPath FilePath -- Absolute or relative pathname to a dynamic library -- (ends with .dll or .so). | Framework String -- Only used for darwin, but does no harm instance Outputable LibrarySpec where ppr (Objects objs) = text "Objects" <+> ppr (map (text @SDoc) objs) ppr (Archive a) = text "Archive" <+> text a ppr (DLL s) = text "DLL" <+> text s ppr (DLLPath f) = text "DLLPath" <+> text f ppr (Framework s) = text "Framework" <+> text s ghc-lib-parser-9.12.2.20250421/compiler/GHC/Parser.hs-boot0000644000000000000000000000025507346545000020441 0ustar0000000000000000module GHC.Parser where import GHC.Types.Name.Reader (RdrName) import GHC.Parser.Lexer (P) import GHC.Parser.Annotation (LocatedN) parseIdentifier :: P (LocatedN RdrName) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Parser.y0000644000000000000000000070217007346545000017343 0ustar0000000000000000-- -*-haskell-*- -- --------------------------------------------------------------------------- -- (c) The University of Glasgow 1997-2003 --- -- The GHC grammar. -- -- Author(s): Simon Marlow, Sven Panne 1997, 1998, 1999 -- --------------------------------------------------------------------------- { {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE RankNTypes #-} {-# LANGUAGE DataKinds #-} {-# LANGUAGE GADTs #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE LambdaCase #-} -- | This module provides the generated Happy parser for Haskell. It exports -- a number of parsers which may be used in any library that uses the GHC API. -- A common usage pattern is to initialize the parser state with a given string -- and then parse that string: -- -- @ -- runParser :: ParserOpts -> String -> P a -> ParseResult a -- runParser opts str parser = unP parser parseState -- where -- filename = "\" -- location = mkRealSrcLoc (mkFastString filename) 1 1 -- buffer = stringToStringBuffer str -- parseState = initParserState opts buffer location -- @ module GHC.Parser ( parseModule, parseSignature, parseImport, parseStatement, parseBackpack , parseDeclaration, parseExpression, parsePattern , parseTypeSignature , parseStmt, parseIdentifier , parseType, parseHeader , parseModuleNoHaddock ) where -- base import Control.Monad ( unless, liftM, when, (<=<) ) import GHC.Exts import Data.Maybe ( maybeToList ) import Data.List.NonEmpty ( NonEmpty(..) ) import qualified Data.List.NonEmpty as NE import qualified Prelude -- for happy-generated code import GHC.Hs import GHC.Driver.Backpack.Syntax import GHC.Unit.Info import GHC.Unit.Module import GHC.Unit.Module.Warnings import GHC.Data.OrdList import GHC.Data.BooleanFormula ( BooleanFormula(..), LBooleanFormula, mkTrue ) import GHC.Data.FastString import GHC.Data.Maybe ( orElse ) import GHC.Utils.Outputable import GHC.Utils.Error import GHC.Utils.Misc ( looksLikePackageName, fstOf3, sndOf3, thdOf3 ) import GHC.Utils.Panic import GHC.Prelude import qualified GHC.Data.Strict as Strict import GHC.Types.Name.Reader import GHC.Types.Name.Occurrence ( varName, dataName, tcClsName, tvName, occNameFS, mkVarOccFS) import GHC.Types.SrcLoc import GHC.Types.Basic import GHC.Types.Error ( GhcHint(..) ) import GHC.Types.Fixity import GHC.Types.ForeignCall import GHC.Types.SourceFile import GHC.Types.SourceText import GHC.Types.PkgQual import GHC.Core.Type ( Specificity(..) ) import GHC.Core.Class ( FunDep ) import GHC.Core.DataCon ( DataCon, dataConName ) import GHC.Parser.PostProcess import GHC.Parser.PostProcess.Haddock import GHC.Parser.Lexer import GHC.Parser.HaddockLex import GHC.Parser.Annotation import GHC.Parser.Errors.Types import GHC.Parser.Errors.Ppr () import GHC.Builtin.Types ( unitTyCon, unitDataCon, sumTyCon, tupleTyCon, tupleDataCon, nilDataCon, unboxedUnitTyCon, unboxedUnitDataCon, listTyCon_RDR, consDataCon_RDR, unrestrictedFunTyCon ) import Language.Haskell.Syntax.Basic (FieldLabelString(..)) import qualified Data.Semigroup as Semi } %expect 0 -- shift/reduce conflicts {- Note [shift/reduce conflicts] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The 'happy' tool turns this grammar into an efficient parser that follows the shift-reduce parsing model. There's a parse stack that contains items parsed so far (both terminals and non-terminals). Every next token produced by the lexer results in one of two actions: SHIFT: push the token onto the parse stack REDUCE: pop a few items off the parse stack and combine them with a function (reduction rule) However, sometimes it's unclear which of the two actions to take. Consider this code example: if x then y else f z There are two ways to parse it: (if x then y else f) z if x then y else (f z) How is this determined? At some point, the parser gets to the following state: parse stack: 'if' exp 'then' exp 'else' "f" next token: "z" Scenario A (simplified): 1. REDUCE, parse stack: 'if' exp 'then' exp 'else' exp next token: "z" (Note that "f" reduced to exp here) 2. REDUCE, parse stack: exp next token: "z" 3. SHIFT, parse stack: exp "z" next token: ... 4. REDUCE, parse stack: exp next token: ... This way we get: (if x then y else f) z Scenario B (simplified): 1. SHIFT, parse stack: 'if' exp 'then' exp 'else' "f" "z" next token: ... 2. REDUCE, parse stack: 'if' exp 'then' exp 'else' exp next token: ... 3. REDUCE, parse stack: exp next token: ... This way we get: if x then y else (f z) The end result is determined by the chosen action. When Happy detects this, it reports a shift/reduce conflict. At the top of the file, we have the following directive: %expect 0 It means that we expect no unresolved shift/reduce conflicts in this grammar. If you modify the grammar and get shift/reduce conflicts, follow the steps below to resolve them. STEP ONE is to figure out what causes the conflict. That's where the -i flag comes in handy: happy -agc --strict compiler/GHC/Parser.y -idetailed-info By analysing the output of this command, in a new file `detailed-info`, you can figure out which reduction rule causes the issue. At the top of the generated report, you will see a line like this: state 147 contains 67 shift/reduce conflicts. Scroll down to section State 147 (in your case it could be a different state). The start of the section lists the reduction rules that can fire and shows their context: exp10 -> fexp . (rule 492) fexp -> fexp . aexp (rule 498) fexp -> fexp . PREFIX_AT atype (rule 499) And then, for every token, it tells you the parsing action: ']' reduce using rule 492 '::' reduce using rule 492 '(' shift, and enter state 178 QVARID shift, and enter state 44 DO shift, and enter state 182 ... But if you look closer, some of these tokens also have another parsing action in parentheses: QVARID shift, and enter state 44 (reduce using rule 492) That's how you know rule 492 is causing trouble. Scroll back to the top to see what this rule is: ---------------------------------- Grammar ---------------------------------- ... ... exp10 -> fexp (492) optSemi -> ';' (493) ... ... Hence the shift/reduce conflict is caused by this parser production: exp10 :: { ECP } : '-' fexp { ... } | fexp { ... } -- problematic rule STEP TWO is to mark the problematic rule with the %shift pragma. This signals to 'happy' that any shift/reduce conflicts involving this rule must be resolved in favor of a shift. There's currently no dedicated pragma to resolve in favor of the reduce. STEP THREE is to add a dedicated Note for this specific conflict, as is done for all other conflicts below. -} {- Note [%shift: rule_activation -> {- empty -}] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Context: rule -> STRING . rule_activation rule_foralls infixexp '=' exp Example: {-# RULES "name" [0] f = rhs #-} Ambiguity: If we reduced, then we'd get an empty activation rule, and [0] would be parsed as part of the left-hand side expression. We shift, so [0] is parsed as an activation rule. -} {- Note [%shift: rule_foralls -> {- empty -}] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Context: rule -> STRING rule_activation . rule_foralls infixexp '=' exp Example: {-# RULES "name" forall a1. lhs = rhs #-} Ambiguity: If we reduced, then we would get an empty rule_foralls; the 'forall', being a valid term-level identifier, would be parsed as part of the left-hand side expression. We shift, so the 'forall' is parsed as part of rule_foralls. -} {- Note [%shift: type -> btype] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Context: context -> btype . type -> btype . type -> btype . '->' ctype type -> btype . '->.' ctype Example: a :: Maybe Integer -> Bool Ambiguity: If we reduced, we would get: (a :: Maybe Integer) -> Bool We shift to get this instead: a :: (Maybe Integer -> Bool) -} {- Note [%shift: infixtype -> ftype] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Context: infixtype -> ftype . infixtype -> ftype . tyop infixtype ftype -> ftype . tyarg ftype -> ftype . PREFIX_AT tyarg Example: a :: Maybe Integer Ambiguity: If we reduced, we would get: (a :: Maybe) Integer We shift to get this instead: a :: (Maybe Integer) -} {- Note [%shift: atype -> tyvar] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Context: atype -> tyvar . tv_bndr_no_braces -> '(' tyvar . '::' kind ')' Example: class C a where type D a = (a :: Type ... Ambiguity: If we reduced, we could specify a default for an associated type like this: class C a where type D a type D a = (a :: Type) But we shift in order to allow injectivity signatures like this: class C a where type D a = (r :: Type) | r -> a -} {- Note [%shift: exp -> infixexp] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Context: exp -> infixexp . '::' sigtype exp -> infixexp . '-<' exp exp -> infixexp . '>-' exp exp -> infixexp . '-<<' exp exp -> infixexp . '>>-' exp exp -> infixexp . infixexp -> infixexp . qop exp10p Examples: 1) if x then y else z -< e 2) if x then y else z :: T 3) if x then y else z + 1 -- (NB: '+' is in VARSYM) Ambiguity: If we reduced, we would get: 1) (if x then y else z) -< e 2) (if x then y else z) :: T 3) (if x then y else z) + 1 We shift to get this instead: 1) if x then y else (z -< e) 2) if x then y else (z :: T) 3) if x then y else (z + 1) -} {- Note [%shift: exp10 -> '-' fexp] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Context: exp10 -> '-' fexp . fexp -> fexp . aexp fexp -> fexp . PREFIX_AT atype Examples & Ambiguity: Same as in Note [%shift: exp10 -> fexp], but with a '-' in front. -} {- Note [%shift: exp10 -> fexp] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Context: exp10 -> fexp . fexp -> fexp . aexp fexp -> fexp . PREFIX_AT atype Examples: 1) if x then y else f z 2) if x then y else f @z Ambiguity: If we reduced, we would get: 1) (if x then y else f) z 2) (if x then y else f) @z We shift to get this instead: 1) if x then y else (f z) 2) if x then y else (f @z) -} {- Note [%shift: aexp2 -> ipvar] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Context: aexp2 -> ipvar . dbind -> ipvar . '=' exp Example: let ?x = ... Ambiguity: If we reduced, ?x would be parsed as the LHS of a normal binding, eventually producing an error. We shift, so it is parsed as the LHS of an implicit binding. -} {- Note [%shift: aexp2 -> TH_TY_QUOTE] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Context: aexp2 -> TH_TY_QUOTE . tyvar aexp2 -> TH_TY_QUOTE . gtycon aexp2 -> TH_TY_QUOTE . Examples: 1) x = '' 2) x = ''a 3) x = ''T Ambiguity: If we reduced, the '' would result in reportEmptyDoubleQuotes even when followed by a type variable or a type constructor. But the only reason this reduction rule exists is to improve error messages. Naturally, we shift instead, so that ''a and ''T work as expected. -} {- Note [%shift: tup_tail -> {- empty -}] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Context: tup_exprs -> commas . tup_tail sysdcon_nolist -> '(' commas . ')' sysdcon_nolist -> '(#' commas . '#)' commas -> commas . ',' Example: (,,) Ambiguity: A tuple section with no components is indistinguishable from the Haskell98 data constructor for a tuple. If we reduced, (,,) would be parsed as a tuple section. We shift, so (,,) is parsed as a data constructor. This is preferable because we want to accept (,,) without -XTupleSections. See also Note [ExplicitTuple] in GHC.Hs.Expr. -} {- Note [%shift: qtyconop -> qtyconsym] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Context: oqtycon -> '(' qtyconsym . ')' qtyconop -> qtyconsym . Example: foo :: (:%) Ambiguity: If we reduced, (:%) would be parsed as a parenthesized infix type expression without arguments, resulting in the 'failOpFewArgs' error. We shift, so it is parsed as a type constructor. -} {- Note [%shift: special_id -> 'group'] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Context: transformqual -> 'then' 'group' . 'using' exp transformqual -> 'then' 'group' . 'by' exp 'using' exp special_id -> 'group' . Example: [ ... | then group by dept using groupWith , then take 5 ] Ambiguity: If we reduced, 'group' would be parsed as a term-level identifier, just as 'take' in the other clause. We shift, so it is parsed as part of the 'group by' clause introduced by the -XTransformListComp extension. -} {- Note [%shift: activation -> {- empty -}] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Context: sigdecl -> '{-# INLINE' . activation qvarcon '#-}' activation -> {- empty -} activation -> explicit_activation Example: {-# INLINE [0] Something #-} Ambiguity: We don't know whether the '[' is the start of the activation or the beginning of the [] data constructor. We parse this as having '[0]' activation for inlining 'Something', rather than empty activation and inlining '[0] Something'. -} {- Note [%shift: orpats -> exp] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Context: texp -> exp . orpats -> exp . texp -> exp . '->' texp orpats -> exp . ';' orpats in Lookahead ')': reduce/reduce conflict between the two first productions Example: f (True) = 3 ----^ Ambiguity: We don't know whether the ')' encloses a parenthesized pat (reduce with first production) or a unary Or pattern (reduce with second production). We want to parse it as a parenthesized pat, because * That is the status quo * Parsing it as a unary Or patterns prompts the user to activate -XOrPatterns. Thus, we add a %shift pragma to `orpats -> exp` to lower its precedence, which has the effect of letting `texp -> exp` win (!). An alternative to resolve this ambiguity would be to accept only OrPatterns with at least two patterns in `orpats`, just as in `tup_exprs`. But the present code seems simpler, because it just needs one non-terminal, at the expense of using a small pragma. -} {- Note [Parser API Annotations] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A lot of the productions are now cluttered with calls to aa,am,acs,acsA etc. These are helper functions to make sure that the locations of the various keywords such as do / let / in are captured for use by tools that want to do source to source conversions, such as refactorers or structured editors. The helper functions are defined at the bottom of this file. See https://gitlab.haskell.org/ghc/ghc/wikis/api-annotations and https://gitlab.haskell.org/ghc/ghc/wikis/ghc-ast-annotations for some background. -} {- Note [Parsing lists] ~~~~~~~~~~~~~~~~~~~~~~~ You might be wondering why we spend so much effort encoding our lists this way: importdecls : importdecls ';' importdecl | importdecls ';' | importdecl | {- empty -} This might seem like an awfully roundabout way to declare a list; plus, to add insult to injury you have to reverse the results at the end. The answer is that left recursion prevents us from running out of stack space when parsing long sequences. See: https://haskell-happy.readthedocs.io/en/latest/using.html#parsing-sequences for more guidance. By adding/removing branches, you can affect what lists are accepted. Here are the most common patterns, rewritten as regular expressions for clarity: -- Equivalent to: ';'* (x ';'+)* x? (can be empty, permits leading/trailing semis) xs : xs ';' x | xs ';' | x | {- empty -} -- Equivalent to x (';' x)* ';'* (non-empty, permits trailing semis) xs : xs ';' x | xs ';' | x -- Equivalent to ';'* alts (';' alts)* ';'* (non-empty, permits leading/trailing semis) alts : alts1 | ';' alts alts1 : alts1 ';' alt | alts1 ';' | alt -- Equivalent to x (',' x)+ (non-empty, no trailing semis) xs : x | x ',' xs -} %token '_' { L _ ITunderscore } -- Haskell keywords 'as' { L _ ITas } 'case' { L _ ITcase } 'class' { L _ ITclass } 'data' { L _ ITdata } 'default' { L _ ITdefault } 'deriving' { L _ ITderiving } 'else' { L _ ITelse } 'hiding' { L _ IThiding } 'if' { L _ ITif } 'import' { L _ ITimport } 'in' { L _ ITin } 'infix' { L _ ITinfix } 'infixl' { L _ ITinfixl } 'infixr' { L _ ITinfixr } 'instance' { L _ ITinstance } 'let' { L _ ITlet } 'module' { L _ ITmodule } 'newtype' { L _ ITnewtype } 'of' { L _ ITof } 'qualified' { L _ ITqualified } 'then' { L _ ITthen } 'type' { L _ ITtype } 'where' { L _ ITwhere } 'forall' { L _ (ITforall _) } -- GHC extension keywords 'foreign' { L _ ITforeign } 'export' { L _ ITexport } 'label' { L _ ITlabel } 'dynamic' { L _ ITdynamic } 'safe' { L _ ITsafe } 'interruptible' { L _ ITinterruptible } 'unsafe' { L _ ITunsafe } 'family' { L _ ITfamily } 'role' { L _ ITrole } 'stdcall' { L _ ITstdcallconv } 'ccall' { L _ ITccallconv } 'capi' { L _ ITcapiconv } 'prim' { L _ ITprimcallconv } 'javascript' { L _ ITjavascriptcallconv } 'proc' { L _ ITproc } -- for arrow notation extension 'rec' { L _ ITrec } -- for arrow notation extension 'group' { L _ ITgroup } -- for list transform extension 'by' { L _ ITby } -- for list transform extension 'using' { L _ ITusing } -- for list transform extension 'pattern' { L _ ITpattern } -- for pattern synonyms 'static' { L _ ITstatic } -- for static pointers extension 'stock' { L _ ITstock } -- for DerivingStrategies extension 'anyclass' { L _ ITanyclass } -- for DerivingStrategies extension 'via' { L _ ITvia } -- for DerivingStrategies extension 'unit' { L _ ITunit } 'signature' { L _ ITsignature } 'dependency' { L _ ITdependency } '{-# INLINE' { L _ (ITinline_prag _ _ _) } -- INLINE or INLINABLE '{-# OPAQUE' { L _ (ITopaque_prag _) } '{-# SPECIALISE' { L _ (ITspec_prag _) } '{-# SPECIALISE_INLINE' { L _ (ITspec_inline_prag _ _) } '{-# SOURCE' { L _ (ITsource_prag _) } '{-# RULES' { L _ (ITrules_prag _) } '{-# SCC' { L _ (ITscc_prag _)} '{-# DEPRECATED' { L _ (ITdeprecated_prag _) } '{-# WARNING' { L _ (ITwarning_prag _) } '{-# UNPACK' { L _ (ITunpack_prag _) } '{-# NOUNPACK' { L _ (ITnounpack_prag _) } '{-# ANN' { L _ (ITann_prag _) } '{-# MINIMAL' { L _ (ITminimal_prag _) } '{-# CTYPE' { L _ (ITctype _) } '{-# OVERLAPPING' { L _ (IToverlapping_prag _) } '{-# OVERLAPPABLE' { L _ (IToverlappable_prag _) } '{-# OVERLAPS' { L _ (IToverlaps_prag _) } '{-# INCOHERENT' { L _ (ITincoherent_prag _) } '{-# COMPLETE' { L _ (ITcomplete_prag _) } '#-}' { L _ ITclose_prag } '..' { L _ ITdotdot } -- reserved symbols ':' { L _ ITcolon } '::' { L _ (ITdcolon _) } '=' { L _ ITequal } '\\' { L _ ITlam } 'lcase' { L _ ITlcase } 'lcases' { L _ ITlcases } '|' { L _ ITvbar } '<-' { L _ (ITlarrow _) } '->' { L _ (ITrarrow _) } '->.' { L _ ITlolly } TIGHT_INFIX_AT { L _ ITat } '=>' { L _ (ITdarrow _) } '-' { L _ ITminus } PREFIX_TILDE { L _ ITtilde } PREFIX_BANG { L _ ITbang } PREFIX_MINUS { L _ ITprefixminus } '*' { L _ (ITstar _) } '-<' { L _ (ITlarrowtail _) } -- for arrow notation '>-' { L _ (ITrarrowtail _) } -- for arrow notation '-<<' { L _ (ITLarrowtail _) } -- for arrow notation '>>-' { L _ (ITRarrowtail _) } -- for arrow notation '.' { L _ ITdot } PREFIX_PROJ { L _ (ITproj True) } -- RecordDotSyntax TIGHT_INFIX_PROJ { L _ (ITproj False) } -- RecordDotSyntax PREFIX_AT { L _ ITtypeApp } PREFIX_PERCENT { L _ ITpercent } -- for linear types '{' { L _ ITocurly } -- special symbols '}' { L _ ITccurly } vocurly { L _ ITvocurly } -- virtual open curly (from layout) vccurly { L _ ITvccurly } -- virtual close curly (from layout) '[' { L _ ITobrack } ']' { L _ ITcbrack } '(' { L _ IToparen } ')' { L _ ITcparen } '(#' { L _ IToubxparen } '#)' { L _ ITcubxparen } '(|' { L _ (IToparenbar _) } '|)' { L _ (ITcparenbar _) } ';' { L _ ITsemi } ',' { L _ ITcomma } '`' { L _ ITbackquote } SIMPLEQUOTE { L _ ITsimpleQuote } -- 'x VARID { L _ (ITvarid _) } -- identifiers CONID { L _ (ITconid _) } VARSYM { L _ (ITvarsym _) } CONSYM { L _ (ITconsym _) } QVARID { L _ (ITqvarid _) } QCONID { L _ (ITqconid _) } QVARSYM { L _ (ITqvarsym _) } QCONSYM { L _ (ITqconsym _) } -- QualifiedDo DO { L _ (ITdo _) } MDO { L _ (ITmdo _) } IPDUPVARID { L _ (ITdupipvarid _) } -- GHC extension LABELVARID { L _ (ITlabelvarid _ _) } CHAR { L _ (ITchar _ _) } STRING { L _ (ITstring _ _) } STRING_MULTI { L _ (ITstringMulti _ _) } INTEGER { L _ (ITinteger _) } RATIONAL { L _ (ITrational _) } PRIMCHAR { L _ (ITprimchar _ _) } PRIMSTRING { L _ (ITprimstring _ _) } PRIMINTEGER { L _ (ITprimint _ _) } PRIMWORD { L _ (ITprimword _ _) } PRIMINTEGER8 { L _ (ITprimint8 _ _) } PRIMINTEGER16 { L _ (ITprimint16 _ _) } PRIMINTEGER32 { L _ (ITprimint32 _ _) } PRIMINTEGER64 { L _ (ITprimint64 _ _) } PRIMWORD8 { L _ (ITprimword8 _ _) } PRIMWORD16 { L _ (ITprimword16 _ _) } PRIMWORD32 { L _ (ITprimword32 _ _) } PRIMWORD64 { L _ (ITprimword64 _ _) } PRIMFLOAT { L _ (ITprimfloat _) } PRIMDOUBLE { L _ (ITprimdouble _) } -- Template Haskell '[|' { L _ (ITopenExpQuote _ _) } '[p|' { L _ ITopenPatQuote } '[t|' { L _ ITopenTypQuote } '[d|' { L _ ITopenDecQuote } '|]' { L _ (ITcloseQuote _) } '[||' { L _ (ITopenTExpQuote _) } '||]' { L _ ITcloseTExpQuote } PREFIX_DOLLAR { L _ ITdollar } PREFIX_DOLLAR_DOLLAR { L _ ITdollardollar } TH_TY_QUOTE { L _ ITtyQuote } -- ''T TH_QUASIQUOTE { L _ (ITquasiQuote _) } TH_QQUASIQUOTE { L _ (ITqQuasiQuote _) } %monad { P } { >>= } { return } %lexer { (lexer True) } { L _ ITeof } -- Replace 'lexer' above with 'lexerDbg' -- to dump the tokens fed to the parser. %tokentype { (Located Token) } -- Exported parsers %name parseModuleNoHaddock module %name parseSignatureNoHaddock signature %name parseImport importdecl %name parseStatement e_stmt %name parseDeclaration topdecl %name parseExpression exp %name parsePattern pat %name parseTypeSignature sigdecl %name parseStmt maybe_stmt %name parseIdentifier identifier %name parseType ktype %name parseBackpack backpack %partial parseHeader header %% ----------------------------------------------------------------------------- -- Identifiers; one of the entry points identifier :: { LocatedN RdrName } : qvar { $1 } | qcon { $1 } | qvarop { $1 } | qconop { $1 } | '->' {% amsr (sLL $1 $> $ getRdrName unrestrictedFunTyCon) (NameAnnRArrow Nothing (epUniTok $1) Nothing []) } ----------------------------------------------------------------------------- -- Backpack stuff backpack :: { [LHsUnit PackageName] } : implicit_top units close { fromOL $2 } | '{' units '}' { fromOL $2 } units :: { OrdList (LHsUnit PackageName) } : units ';' unit { $1 `appOL` unitOL $3 } | units ';' { $1 } | unit { unitOL $1 } unit :: { LHsUnit PackageName } : 'unit' pkgname 'where' unitbody { sL1 $1 $ HsUnit { hsunitName = $2 , hsunitBody = fromOL $4 } } unitid :: { LHsUnitId PackageName } : pkgname { sL1 $1 $ HsUnitId $1 [] } | pkgname '[' msubsts ']' { sLL $1 $> $ HsUnitId $1 (fromOL $3) } msubsts :: { OrdList (LHsModuleSubst PackageName) } : msubsts ',' msubst { $1 `appOL` unitOL $3 } | msubsts ',' { $1 } | msubst { unitOL $1 } msubst :: { LHsModuleSubst PackageName } : modid '=' moduleid { sLL $1 $> $ (reLoc $1, $3) } | modid VARSYM modid VARSYM { sLL $1 $> $ (reLoc $1, sLL $2 $> $ HsModuleVar (reLoc $3)) } moduleid :: { LHsModuleId PackageName } : VARSYM modid VARSYM { sLL $1 $> $ HsModuleVar (reLoc $2) } | unitid ':' modid { sLL $1 $> $ HsModuleId $1 (reLoc $3) } pkgname :: { Located PackageName } : STRING { sL1 $1 $ PackageName (getSTRING $1) } | litpkgname { sL1 $1 $ PackageName (unLoc $1) } litpkgname_segment :: { Located FastString } : VARID { sL1 $1 $ getVARID $1 } | CONID { sL1 $1 $ getCONID $1 } | special_id { $1 } -- Parse a minus sign regardless of whether -XLexicalNegation is turned on or off. -- See Note [Minus tokens] in GHC.Parser.Lexer HYPHEN :: { () } : '-' { () } | PREFIX_MINUS { () } | VARSYM { () } litpkgname :: { Located FastString } : litpkgname_segment { $1 } -- a bit of a hack, means p - b is parsed same as p-b, enough for now. | litpkgname_segment HYPHEN litpkgname { sLL $1 $> $ concatFS [unLoc $1, fsLit "-", (unLoc $3)] } mayberns :: { Maybe [LRenaming] } : {- empty -} { Nothing } | '(' rns ')' { Just (fromOL $2) } rns :: { OrdList LRenaming } : rns ',' rn { $1 `appOL` unitOL $3 } | rns ',' { $1 } | rn { unitOL $1 } rn :: { LRenaming } : modid 'as' modid { sLL $1 $> $ Renaming (reLoc $1) (Just (reLoc $3)) } | modid { sL1 $1 $ Renaming (reLoc $1) Nothing } unitbody :: { OrdList (LHsUnitDecl PackageName) } : '{' unitdecls '}' { $2 } | vocurly unitdecls close { $2 } unitdecls :: { OrdList (LHsUnitDecl PackageName) } : unitdecls ';' unitdecl { $1 `appOL` unitOL $3 } | unitdecls ';' { $1 } | unitdecl { unitOL $1 } unitdecl :: { LHsUnitDecl PackageName } : 'module' maybe_src modid maybe_warning_pragma maybeexports 'where' body -- XXX not accurate { sL1 $1 $ DeclD (case snd $2 of NotBoot -> HsSrcFile IsBoot -> HsBootFile) (reLoc $3) (sL1 $1 (HsModule (XModulePs noAnn (thdOf3 $7) $4 Nothing) (Just $3) $5 (fst $ sndOf3 $7) (snd $ sndOf3 $7))) } | 'signature' modid maybe_warning_pragma maybeexports 'where' body { sL1 $1 $ DeclD HsigFile (reLoc $2) (sL1 $1 (HsModule (XModulePs noAnn (thdOf3 $6) $3 Nothing) (Just $2) $4 (fst $ sndOf3 $6) (snd $ sndOf3 $6))) } | 'dependency' unitid mayberns { sL1 $1 $ IncludeD (IncludeDecl { idUnitId = $2 , idModRenaming = $3 , idSignatureInclude = False }) } | 'dependency' 'signature' unitid { sL1 $1 $ IncludeD (IncludeDecl { idUnitId = $3 , idModRenaming = Nothing , idSignatureInclude = True }) } ----------------------------------------------------------------------------- -- Module Header -- The place for module deprecation is really too restrictive, but if it -- was allowed at its natural place just before 'module', we get an ugly -- s/r conflict with the second alternative. Another solution would be the -- introduction of a new pragma DEPRECATED_MODULE, but this is not very nice, -- either, and DEPRECATED is only expected to be used by people who really -- know what they are doing. :-) signature :: { Located (HsModule GhcPs) } : 'signature' modid maybe_warning_pragma maybeexports 'where' body {% fileSrcSpan >>= \ loc -> acs loc (\loc cs-> (L loc (HsModule (XModulePs (EpAnn (spanAsAnchor loc) (AnnsModule (epTok $1) NoEpTok (epTok $5) (fstOf3 $6) [] Nothing) cs) (thdOf3 $6) $3 Nothing) (Just $2) $4 (fst $ sndOf3 $6) (snd $ sndOf3 $6))) ) } module :: { Located (HsModule GhcPs) } : 'module' modid maybe_warning_pragma maybeexports 'where' body {% fileSrcSpan >>= \ loc -> acsFinal (\cs eof -> (L loc (HsModule (XModulePs (EpAnn (spanAsAnchor loc) (AnnsModule NoEpTok (epTok $1) (epTok $5) (fstOf3 $6) [] eof) cs) (thdOf3 $6) $3 Nothing) (Just $2) $4 (fst $ sndOf3 $6) (snd $ sndOf3 $6)) )) } | body2 {% fileSrcSpan >>= \ loc -> acsFinal (\cs eof -> (L loc (HsModule (XModulePs (EpAnn (spanAsAnchor loc) (AnnsModule NoEpTok NoEpTok NoEpTok (fstOf3 $1) [] eof) cs) (thdOf3 $1) Nothing Nothing) Nothing Nothing (fst $ sndOf3 $1) (snd $ sndOf3 $1)))) } missing_module_keyword :: { () } : {- empty -} {% pushModuleContext } implicit_top :: { () } : {- empty -} {% pushModuleContext } body :: { ([TrailingAnn] ,([LImportDecl GhcPs], [LHsDecl GhcPs]) ,EpLayout) } : '{' top '}' { (fst $2, snd $2, epExplicitBraces $1 $3) } | vocurly top close { (fst $2, snd $2, EpVirtualBraces (getVOCURLY $1)) } body2 :: { ([TrailingAnn] ,([LImportDecl GhcPs], [LHsDecl GhcPs]) ,EpLayout) } : '{' top '}' { (fst $2, snd $2, epExplicitBraces $1 $3) } | missing_module_keyword top close { ([], snd $2, EpVirtualBraces leftmostColumn) } top :: { ([TrailingAnn] ,([LImportDecl GhcPs], [LHsDecl GhcPs])) } : semis top1 { (reverse $1, $2) } top1 :: { ([LImportDecl GhcPs], [LHsDecl GhcPs]) } : importdecls_semi topdecls_cs_semi { (reverse $1, cvTopDecls $2) } | importdecls_semi topdecls_cs { (reverse $1, cvTopDecls $2) } | importdecls { (reverse $1, []) } ----------------------------------------------------------------------------- -- Module declaration & imports only header :: { Located (HsModule GhcPs) } : 'module' modid maybe_warning_pragma maybeexports 'where' header_body {% fileSrcSpan >>= \ loc -> acs loc (\loc cs -> (L loc (HsModule (XModulePs (EpAnn (spanAsAnchor loc) (AnnsModule NoEpTok (epTok $1) (epTok $5) [] [] Nothing) cs) EpNoLayout $3 Nothing) (Just $2) $4 $6 [] ))) } | 'signature' modid maybe_warning_pragma maybeexports 'where' header_body {% fileSrcSpan >>= \ loc -> acs loc (\loc cs -> (L loc (HsModule (XModulePs (EpAnn (spanAsAnchor loc) (AnnsModule NoEpTok (epTok $1) (epTok $5) [] [] Nothing) cs) EpNoLayout $3 Nothing) (Just $2) $4 $6 [] ))) } | header_body2 {% fileSrcSpan >>= \ loc -> return (L loc (HsModule (XModulePs noAnn EpNoLayout Nothing Nothing) Nothing Nothing $1 [])) } header_body :: { [LImportDecl GhcPs] } : '{' header_top { $2 } | vocurly header_top { $2 } header_body2 :: { [LImportDecl GhcPs] } : '{' header_top { $2 } | missing_module_keyword header_top { $2 } header_top :: { [LImportDecl GhcPs] } : semis header_top_importdecls { $2 } header_top_importdecls :: { [LImportDecl GhcPs] } : importdecls_semi { $1 } | importdecls { $1 } ----------------------------------------------------------------------------- -- The Export List maybeexports :: { (Maybe (LocatedLI [LIE GhcPs])) } : '(' exportlist ')' {% fmap Just $ amsr (sLL $1 $> (fromOL $ snd $2)) (AnnList Nothing (ListParens (epTok $1) (epTok $3)) [] (noAnn,fst $2) []) } | {- empty -} { Nothing } exportlist :: { ([EpToken ","], OrdList (LIE GhcPs)) } : exportlist1 { ([], $1) } | {- empty -} { ([], nilOL) } -- trailing comma: | exportlist1 ',' {% case $1 of SnocOL hs t -> do t' <- addTrailingCommaA t (epTok $2) return ([], snocOL hs t')} | ',' { ([epTok $1], nilOL) } exportlist1 :: { OrdList (LIE GhcPs) } : exportlist1 ',' export_cs {% let ls = $1 in if isNilOL ls then return (ls `appOL` $3) else case ls of SnocOL hs t -> do t' <- addTrailingCommaA t (epTok $2) return (snocOL hs t' `appOL` $3)} | export_cs { $1 } export_cs :: { OrdList (LIE GhcPs) } export_cs : export {% return (unitOL $1) } -- No longer allow things like [] and (,,,) to be exported -- They are built in syntax, always available export :: { LIE GhcPs } : maybe_warning_pragma qcname_ext export_subspec {% do { let { span = (maybe comb2 comb3 $1) $2 $> } ; impExp <- mkModuleImpExp $1 (fst $ unLoc $3) $2 (snd $ unLoc $3) ; return $ reLoc $ sL span $ impExp } } | maybe_warning_pragma 'module' modid {% do { let { span = (maybe comb2 comb3 $1) $2 $> ; anchor = (maybe glR (\loc -> spanAsAnchor . comb2 loc) $1) $2 } ; locImpExp <- return (sL span (IEModuleContents ($1, (epTok $2)) $3)) ; return $ reLoc $ locImpExp } } | maybe_warning_pragma 'pattern' qcon { let span = (maybe comb2 comb3 $1) $2 $> in reLoc $ sL span $ IEVar $1 (sLLa $2 $> (IEPattern (epTok $2) $3)) Nothing } | maybe_warning_pragma 'default' qtycon {% do { let { span = (maybe comb2 comb3 $1) $2 $> } ; locImpExp <- return (sL span (IEThingAbs $1 (sLLa $2 $> (IEDefault (epTok $2) $3)) Nothing)) ; return $ reLoc $ locImpExp } } export_subspec :: { Located ((EpToken "(", EpToken ")"), ImpExpSubSpec) } : {- empty -} { sL0 (noAnn,ImpExpAbs) } | '(' qcnames ')' {% mkImpExpSubSpec (reverse $2) >>= \ie -> return $ sLL $1 $> ((epTok $1, epTok $3), ie) } qcnames :: { [LocatedA ImpExpQcSpec] } : {- empty -} { [] } | qcnames1 { $1 } qcnames1 :: { [LocatedA ImpExpQcSpec] } -- A reversed list : qcnames1 ',' qcname_ext_w_wildcard {% case $1 of ((L la (ImpExpQcWildcard tok _)):t) -> do { return ($3 : L la (ImpExpQcWildcard tok (epTok $2)) : t) } (l:t) -> do { l' <- addTrailingCommaA l (epTok $2) ; return ($3 : l' : t)} } -- Annotations re-added in mkImpExpSubSpec | qcname_ext_w_wildcard { [$1] } -- Variable, data constructor or wildcard -- or tagged type constructor qcname_ext_w_wildcard :: { LocatedA ImpExpQcSpec } : qcname_ext { $1 } | '..' { sL1a $1 (ImpExpQcWildcard (epTok $1) NoEpTok) } qcname_ext :: { LocatedA ImpExpQcSpec } : qcname { sL1a $1 (ImpExpQcName $1) } | 'type' oqtycon {% do { n <- mkTypeImpExp $2 ; return $ sLLa $1 $> (ImpExpQcType (epTok $1) n) }} qcname :: { LocatedN RdrName } -- Variable or type constructor : qvar { $1 } -- Things which look like functions -- Note: This includes record selectors but -- also (-.->), see #11432 | oqtycon_no_varcon { $1 } -- see Note [Type constructors in export list] ----------------------------------------------------------------------------- -- Import Declarations -- importdecls and topdecls must contain at least one declaration; -- top handles the fact that these may be optional. -- One or more semicolons semis1 :: { Located [TrailingAnn] } semis1 : semis1 ';' { if isZeroWidthSpan (gl $2) then (sL1 $1 $ unLoc $1) else (sLL $1 $> $ AddSemiAnn (epTok $2) : (unLoc $1)) } | ';' { case msemi $1 of [] -> noLoc [] ms -> sL1 $1 $ ms } -- Zero or more semicolons semis :: { [TrailingAnn] } semis : semis ';' { if isZeroWidthSpan (gl $2) then $1 else (AddSemiAnn (epTok $2) : $1) } | {- empty -} { [] } -- No trailing semicolons, non-empty importdecls :: { [LImportDecl GhcPs] } importdecls : importdecls_semi importdecl { $2 : $1 } -- May have trailing semicolons, can be empty importdecls_semi :: { [LImportDecl GhcPs] } importdecls_semi : importdecls_semi importdecl semis1 {% do { i <- amsAl $2 (comb2 $2 $3) (reverse $ unLoc $3) ; return (i : $1)} } | {- empty -} { [] } importdecl :: { LImportDecl GhcPs } : 'import' maybe_src maybe_safe optqualified maybe_pkg modid optqualified maybeas maybeimpspec {% do { ; let { ; mPreQual = unLoc $4 ; mPostQual = unLoc $7 } ; checkImportDecl mPreQual mPostQual ; let anns = EpAnnImportDecl { importDeclAnnImport = epTok $1 , importDeclAnnPragma = fst $ fst $2 , importDeclAnnSafe = fst $3 , importDeclAnnQualified = fst $ importDeclQualifiedStyle mPreQual mPostQual , importDeclAnnPackage = fst $5 , importDeclAnnAs = fst $8 } ; let loc = (comb5 $1 $6 $7 (snd $8) $9); ; fmap reLoc $ acs loc (\loc cs -> L loc $ ImportDecl { ideclExt = XImportDeclPass (EpAnn (spanAsAnchor loc) anns cs) (snd $ fst $2) False , ideclName = $6, ideclPkgQual = snd $5 , ideclSource = snd $2, ideclSafe = snd $3 , ideclQualified = snd $ importDeclQualifiedStyle mPreQual mPostQual , ideclAs = unLoc (snd $8) , ideclImportList = unLoc $9 }) } } maybe_src :: { ((Maybe (EpaLocation,EpToken "#-}"),SourceText),IsBootInterface) } : '{-# SOURCE' '#-}' { ((Just (glR $1,epTok $2),getSOURCE_PRAGs $1) , IsBoot) } | {- empty -} { ((Nothing,NoSourceText),NotBoot) } maybe_safe :: { (Maybe (EpToken "safe"),Bool) } : 'safe' { (Just (epTok $1),True) } | {- empty -} { (Nothing, False) } maybe_pkg :: { (Maybe EpaLocation, RawPkgQual) } : STRING {% do { let { pkgFS = getSTRING $1 } ; unless (looksLikePackageName (unpackFS pkgFS)) $ addError $ mkPlainErrorMsgEnvelope (getLoc $1) $ (PsErrInvalidPackageName pkgFS) ; return (Just (glR $1), RawPkgQual (StringLiteral (getSTRINGs $1) pkgFS Nothing)) } } | {- empty -} { (Nothing,NoRawPkgQual) } optqualified :: { Located (Maybe (EpToken "qualified")) } : 'qualified' { sL1 $1 (Just (epTok $1)) } | {- empty -} { noLoc Nothing } maybeas :: { (Maybe (EpToken "as"),Located (Maybe (LocatedA ModuleName))) } : 'as' modid { (Just (epTok $1) ,sLL $1 $> (Just $2)) } | {- empty -} { (Nothing,noLoc Nothing) } maybeimpspec :: { Located (Maybe (ImportListInterpretation, LocatedLI [LIE GhcPs])) } : impspec {% let (b, ie) = unLoc $1 in checkImportSpec ie >>= \checkedIe -> return (L (gl $1) (Just (b, checkedIe))) } | {- empty -} { noLoc Nothing } impspec :: { Located (ImportListInterpretation, LocatedLI [LIE GhcPs]) } : '(' importlist ')' {% do { es <- amsr (sLL $1 $> $ fromOL $ snd $2) (AnnList Nothing (ListParens (epTok $1) (epTok $3)) [] (noAnn,fst $2) []) ; return $ sLL $1 $> (Exactly, es)} } | 'hiding' '(' importlist ')' {% do { es <- amsr (sLL $1 $> $ fromOL $ snd $3) (AnnList Nothing (ListParens (epTok $2) (epTok $4)) [] (epTok $1,fst $3) []) ; return $ sLL $1 $> (EverythingBut, es)} } importlist :: { ([EpToken ","], OrdList (LIE GhcPs)) } : importlist1 { ([], $1) } | {- empty -} { ([], nilOL) } -- trailing comma: | importlist1 ',' {% case $1 of SnocOL hs t -> do t' <- addTrailingCommaA t (epTok $2) return ([], snocOL hs t')} | ',' { ([epTok $1], nilOL) } importlist1 :: { OrdList (LIE GhcPs) } : importlist1 ',' import {% let ls = $1 in if isNilOL ls then return (ls `appOL` $3) else case ls of SnocOL hs t -> do t' <- addTrailingCommaA t (epTok $2) return (snocOL hs t' `appOL` $3)} | import { $1 } import :: { OrdList (LIE GhcPs) } : qcname_ext export_subspec {% fmap (unitOL . reLoc . (sLL $1 $>)) $ mkModuleImpExp Nothing (fst $ unLoc $2) $1 (snd $ unLoc $2) } | 'module' modid {% fmap (unitOL . reLoc) $ return (sLL $1 $> (IEModuleContents (Nothing, (epTok $1)) $2)) } | 'pattern' qcon { unitOL $ reLoc $ sLL $1 $> $ IEVar Nothing (sLLa $1 $> (IEPattern (epTok $1) $2)) Nothing } ----------------------------------------------------------------------------- -- Fixity Declarations prec :: { Maybe (Located (SourceText,Int)) } : {- empty -} { Nothing } | INTEGER { Just (sL1 $1 (getINTEGERs $1,fromInteger (il_value (getINTEGER $1)))) } infix :: { Located FixityDirection } : 'infix' { sL1 $1 InfixN } | 'infixl' { sL1 $1 InfixL } | 'infixr' { sL1 $1 InfixR } ops :: { Located (OrdList (LocatedN RdrName)) } : ops ',' op {% case (unLoc $1) of SnocOL hs t -> do t' <- addTrailingCommaN t (gl $2) return (sLL $1 $> (snocOL hs t' `appOL` unitOL $3)) } | op { sL1 $1 (unitOL $1) } ----------------------------------------------------------------------------- -- Top-Level Declarations -- No trailing semicolons, non-empty topdecls :: { OrdList (LHsDecl GhcPs) } : topdecls_semi topdecl { $1 `snocOL` $2 } -- May have trailing semicolons, can be empty topdecls_semi :: { OrdList (LHsDecl GhcPs) } : topdecls_semi topdecl semis1 {% do { t <- amsAl $2 (comb2 $2 $3) (reverse $ unLoc $3) ; return ($1 `snocOL` t) }} | {- empty -} { nilOL } ----------------------------------------------------------------------------- -- Each topdecl accumulates prior comments -- No trailing semicolons, non-empty topdecls_cs :: { OrdList (LHsDecl GhcPs) } : topdecls_cs_semi topdecl_cs { $1 `snocOL` $2 } -- May have trailing semicolons, can be empty topdecls_cs_semi :: { OrdList (LHsDecl GhcPs) } : topdecls_cs_semi topdecl_cs semis1 {% do { t <- amsAl $2 (comb2 $2 $3) (reverse $ unLoc $3) ; return ($1 `snocOL` t) }} | {- empty -} { nilOL } -- Each topdecl accumulates prior comments topdecl_cs :: { LHsDecl GhcPs } topdecl_cs : topdecl {% commentsPA $1 } ----------------------------------------------------------------------------- topdecl :: { LHsDecl GhcPs } : cl_decl { L (getLoc $1) (TyClD noExtField (unLoc $1)) } | ty_decl { L (getLoc $1) (TyClD noExtField (unLoc $1)) } | standalone_kind_sig { L (getLoc $1) (KindSigD noExtField (unLoc $1)) } | inst_decl { L (getLoc $1) (InstD noExtField (unLoc $1)) } | stand_alone_deriving { L (getLoc $1) (DerivD noExtField (unLoc $1)) } | role_annot { L (getLoc $1) (RoleAnnotD noExtField (unLoc $1)) } | default_decl { L (getLoc $1) (DefD noExtField (unLoc $1)) } | 'foreign' fdecl {% amsA' (sLL $1 $> ((unLoc $2) (epTok $1))) } | '{-# DEPRECATED' deprecations '#-}' {% amsA' (sLL $1 $> $ WarningD noExtField (Warnings ((glR $1,epTok $3), (getDEPRECATED_PRAGs $1)) (fromOL $2))) } | '{-# WARNING' warnings '#-}' {% amsA' (sLL $1 $> $ WarningD noExtField (Warnings ((glR $1,epTok $3), (getWARNING_PRAGs $1)) (fromOL $2))) } | '{-# RULES' rules '#-}' {% amsA' (sLL $1 $> $ RuleD noExtField (HsRules ((glR $1,epTok $3), (getRULES_PRAGs $1)) (reverse $2))) } | annotation { $1 } | decl_no_th { $1 } -- Template Haskell Extension -- The $(..) form is one possible form of infixexp -- but we treat an arbitrary expression just as if -- it had a $(..) wrapped around it | infixexp {% runPV (unECP $1) >>= \ $1 -> commentsPA $ mkSpliceDecl $1 } -- Type classes -- cl_decl :: { LTyClDecl GhcPs } : 'class' tycl_hdr fds where_cls {% do { let {(wtok, (oc,semis,cc)) = fstOf3 $ unLoc $4} ; mkClassDecl (comb4 $1 $2 $3 $4) $2 $3 (sndOf3 $ unLoc $4) (thdOf3 $ unLoc $4) (AnnClassDecl (epTok $1) [] [] (fst $ unLoc $3) wtok oc cc semis) }} -- Default declarations (toplevel) -- default_decl :: { LDefaultDecl GhcPs } : 'default' opt_class '(' comma_types0 ')' {% amsA' (sLL $1 $> (DefaultDecl (epTok $1,epTok $3,epTok $5) $2 $4)) } -- Type declarations (toplevel) -- ty_decl :: { LTyClDecl GhcPs } -- ordinary type synonyms : 'type' type '=' ktype -- Note ktype, not sigtype, on the right of '=' -- We allow an explicit for-all but we don't insert one -- in type Foo a = (b,b) -- Instead we just say b is out of scope -- -- Note the use of type for the head; this allows -- infix type constructors to be declared {% mkTySynonym (comb2 $1 $4) $2 $4 (epTok $1) (epTok $3) } -- type family declarations | 'type' 'family' type opt_tyfam_kind_sig opt_injective_info where_type_family -- Note the use of type for the head; this allows -- infix type constructors to be declared {% do { let { (tdcolon, tequal) = fst $ unLoc $4 } ; let { tvbar = fst $ unLoc $5 } ; let { (twhere, (toc, tdd, tcc)) = fst $ unLoc $6 } ; mkFamDecl (comb5 $1 $3 $4 $5 $6) (snd $ unLoc $6) TopLevel $3 (snd $ unLoc $4) (snd $ unLoc $5) (AnnFamilyDecl [] [] (epTok $1) noAnn (epTok $2) tdcolon tequal tvbar twhere toc tdd tcc) }} -- ordinary data type or newtype declaration | type_data_or_newtype capi_ctype tycl_hdr constrs maybe_derivings {% do { let { (tdata, tnewtype, ttype) = fstOf3 $ unLoc $1} ; let { tequal = fst $ unLoc $4 } ; mkTyData (comb4 $1 $3 $4 $5) (sndOf3 $ unLoc $1) (thdOf3 $ unLoc $1) $2 $3 Nothing (reverse (snd $ unLoc $4)) (fmap reverse $5) (AnnDataDefn [] [] ttype tnewtype tdata NoEpTok NoEpUniTok NoEpTok NoEpTok NoEpTok tequal) }} -- We need the location on tycl_hdr in case -- constrs and deriving are both empty -- ordinary GADT declaration | type_data_or_newtype capi_ctype tycl_hdr opt_kind_sig gadt_constrlist maybe_derivings {% do { let { (tdata, tnewtype, ttype) = fstOf3 $ unLoc $1} ; let { tdcolon = fst $ unLoc $4 } ; let { (twhere, oc, cc) = fst $ unLoc $5 } ; mkTyData (comb5 $1 $3 $4 $5 $6) (sndOf3 $ unLoc $1) (thdOf3 $ unLoc $1) $2 $3 (snd $ unLoc $4) (snd $ unLoc $5) (fmap reverse $6) (AnnDataDefn [] [] ttype tnewtype tdata NoEpTok tdcolon twhere oc cc NoEpTok)}} -- We need the location on tycl_hdr in case -- constrs and deriving are both empty -- data/newtype family | 'data' 'family' type opt_datafam_kind_sig {% do { let { tdcolon = fst $ unLoc $4 } ; mkFamDecl (comb4 $1 $2 $3 $4) DataFamily TopLevel $3 (snd $ unLoc $4) Nothing (AnnFamilyDecl [] [] noAnn (epTok $1) (epTok $2) tdcolon noAnn noAnn noAnn noAnn noAnn noAnn) }} -- standalone kind signature standalone_kind_sig :: { LStandaloneKindSig GhcPs } : 'type' sks_vars '::' sigktype {% mkStandaloneKindSig (comb2 $1 $4) (L (gl $2) $ unLoc $2) $4 (epTok $1,epUniTok $3)} -- See also: sig_vars sks_vars :: { Located [LocatedN RdrName] } -- Returned in reverse order : sks_vars ',' oqtycon {% case unLoc $1 of (h:t) -> do h' <- addTrailingCommaN h (gl $2) return (sLL $1 $> ($3 : h' : t)) } | oqtycon { sL1 $1 [$1] } inst_decl :: { LInstDecl GhcPs } : 'instance' maybe_warning_pragma overlap_pragma inst_type where_inst {% do { (binds, sigs, _, ats, adts, _) <- cvBindsAndSigs (snd $ unLoc $5) ; let (twhere, (openc, closec, semis)) = fst $ unLoc $5 ; let anns = AnnClsInstDecl (epTok $1) twhere openc semis closec ; let cid = ClsInstDecl { cid_ext = ($2, anns, NoAnnSortKey) , cid_poly_ty = $4, cid_binds = binds , cid_sigs = mkClassOpSigs sigs , cid_tyfam_insts = ats , cid_overlap_mode = $3 , cid_datafam_insts = adts } ; amsA' (L (comb3 $1 $4 $5) (ClsInstD { cid_d_ext = noExtField, cid_inst = cid })) } } -- type instance declarations | 'type' 'instance' ty_fam_inst_eqn {% mkTyFamInst (comb2 $1 $3) (unLoc $3) (epTok $1) (epTok $2) } -- data/newtype instance declaration | data_or_newtype 'instance' capi_ctype datafam_inst_hdr constrs maybe_derivings {% do { let { (tdata, tnewtype) = fst $ unLoc $1 } ; let { tequal = fst $ unLoc $5 } ; mkDataFamInst (comb4 $1 $4 $5 $6) (snd $ unLoc $1) $3 (unLoc $4) Nothing (reverse (snd $ unLoc $5)) (fmap reverse $6) (AnnDataDefn [] [] NoEpTok tnewtype tdata (epTok $2) NoEpUniTok NoEpTok NoEpTok NoEpTok tequal)}} -- GADT instance declaration | data_or_newtype 'instance' capi_ctype datafam_inst_hdr opt_kind_sig gadt_constrlist maybe_derivings {% do { let { (tdata, tnewtype) = fst $ unLoc $1 } ; let { dcolon = fst $ unLoc $5 } ; let { (twhere, oc, cc) = fst $ unLoc $6 } ; mkDataFamInst (comb4 $1 $4 $6 $7) (snd $ unLoc $1) $3 (unLoc $4) (snd $ unLoc $5) (snd $ unLoc $6) (fmap reverse $7) (AnnDataDefn [] [] NoEpTok tnewtype tdata (epTok $2) dcolon twhere oc cc NoEpTok)}} overlap_pragma :: { Maybe (LocatedP OverlapMode) } : '{-# OVERLAPPABLE' '#-}' {% fmap Just $ amsr (sLL $1 $> (Overlappable (getOVERLAPPABLE_PRAGs $1))) (AnnPragma (glR $1) (epTok $2) noAnn noAnn noAnn noAnn noAnn) } | '{-# OVERLAPPING' '#-}' {% fmap Just $ amsr (sLL $1 $> (Overlapping (getOVERLAPPING_PRAGs $1))) (AnnPragma (glR $1) (epTok $2) noAnn noAnn noAnn noAnn noAnn) } | '{-# OVERLAPS' '#-}' {% fmap Just $ amsr (sLL $1 $> (Overlaps (getOVERLAPS_PRAGs $1))) (AnnPragma (glR $1) (epTok $2) noAnn noAnn noAnn noAnn noAnn) } | '{-# INCOHERENT' '#-}' {% fmap Just $ amsr (sLL $1 $> (Incoherent (getINCOHERENT_PRAGs $1))) (AnnPragma (glR $1) (epTok $2) noAnn noAnn noAnn noAnn noAnn) } | {- empty -} { Nothing } deriv_strategy_no_via :: { LDerivStrategy GhcPs } : 'stock' {% amsA' (sL1 $1 (StockStrategy (epTok $1))) } | 'anyclass' {% amsA' (sL1 $1 (AnyclassStrategy (epTok $1))) } | 'newtype' {% amsA' (sL1 $1 (NewtypeStrategy (epTok $1))) } deriv_strategy_via :: { LDerivStrategy GhcPs } : 'via' sigktype {% amsA' (sLL $1 $> (ViaStrategy (XViaStrategyPs (epTok $1) $2))) } deriv_standalone_strategy :: { Maybe (LDerivStrategy GhcPs) } : 'stock' {% fmap Just $ amsA' (sL1 $1 (StockStrategy (epTok $1))) } | 'anyclass' {% fmap Just $ amsA' (sL1 $1 (AnyclassStrategy (epTok $1))) } | 'newtype' {% fmap Just $ amsA' (sL1 $1 (NewtypeStrategy (epTok $1))) } | deriv_strategy_via { Just $1 } | {- empty -} { Nothing } -- Optional class reference for default declarations opt_class :: { Maybe (LIdP GhcPs) } : {- empty -} { Nothing } | qtycon {% fmap Just $ amsA' (reLoc $1) } -- Injective type families opt_injective_info :: { Located (EpToken "|", Maybe (LInjectivityAnn GhcPs)) } : {- empty -} { noLoc (noAnn, Nothing) } | '|' injectivity_cond { sLL $1 $> ((epTok $1) , Just ($2)) } injectivity_cond :: { LInjectivityAnn GhcPs } : tyvarid '->' inj_varids {% amsA' (sLL $1 $> (InjectivityAnn (epUniTok $2) $1 (reverse (unLoc $3)))) } inj_varids :: { Located [LocatedN RdrName] } : inj_varids tyvarid { sLL $1 $> ($2 : unLoc $1) } | tyvarid { sL1 $1 [$1] } -- Closed type families where_type_family :: { Located ((EpToken "where", (EpToken "{", EpToken "..", EpToken "}")),FamilyInfo GhcPs) } : {- empty -} { noLoc (noAnn,OpenTypeFamily) } | 'where' ty_fam_inst_eqn_list { sLL $1 $> ((epTok $1,(fst $ unLoc $2)) ,ClosedTypeFamily (fmap reverse $ snd $ unLoc $2)) } ty_fam_inst_eqn_list :: { Located ((EpToken "{", EpToken "..", EpToken "}"),Maybe [LTyFamInstEqn GhcPs]) } : '{' ty_fam_inst_eqns '}' { sLL $1 $> ((epTok $1,noAnn, epTok $3) ,Just (unLoc $2)) } | vocurly ty_fam_inst_eqns close { let (L loc _) = $2 in L loc (noAnn,Just (unLoc $2)) } | '{' '..' '}' { sLL $1 $> ((epTok $1,epTok $2 ,epTok $3),Nothing) } | vocurly '..' close { let (L loc _) = $2 in L loc ((noAnn,epTok $2, noAnn),Nothing) } ty_fam_inst_eqns :: { Located [LTyFamInstEqn GhcPs] } : ty_fam_inst_eqns ';' ty_fam_inst_eqn {% let (L loc eqn) = $3 in case unLoc $1 of [] -> return (sLL $1 $> (L loc eqn : unLoc $1)) (h:t) -> do h' <- addTrailingSemiA h (epTok $2) return (sLL $1 $> ($3 : h' : t)) } | ty_fam_inst_eqns ';' {% case unLoc $1 of [] -> return (sLZ $1 $> (unLoc $1)) (h:t) -> do h' <- addTrailingSemiA h (epTok $2) return (sLZ $1 $> (h':t)) } | ty_fam_inst_eqn { sLL $1 $> [$1] } | {- empty -} { noLoc [] } ty_fam_inst_eqn :: { LTyFamInstEqn GhcPs } : 'forall' tv_bndrs '.' type '=' ktype {% do { hintExplicitForall $1 ; tvbs <- fromSpecTyVarBndrs $2 ; let loc = comb2 $1 $> ; !cs <- getCommentsFor loc ; mkTyFamInstEqn loc (mkHsOuterExplicit (EpAnn (glEE $1 $3) (epUniTok $1, epTok $3) cs) tvbs) $4 $6 (epTok $5) }} | type '=' ktype {% mkTyFamInstEqn (comb2 $1 $>) mkHsOuterImplicit $1 $3 (epTok $2) } -- Note the use of type for the head; this allows -- infix type constructors and type patterns -- Associated type family declarations -- -- * They have a different syntax than on the toplevel (no family special -- identifier). -- -- * They also need to be separate from instances; otherwise, data family -- declarations without a kind signature cause parsing conflicts with empty -- data declarations. -- at_decl_cls :: { LHsDecl GhcPs } : -- data family declarations, with optional 'family' keyword 'data' opt_family type opt_datafam_kind_sig {% do { let { tdcolon = fst $ unLoc $4 } ; liftM mkTyClD (mkFamDecl (comb3 $1 $3 $4) DataFamily NotTopLevel $3 (snd $ unLoc $4) Nothing (AnnFamilyDecl [] [] noAnn (epTok $1) $2 tdcolon noAnn noAnn noAnn noAnn noAnn noAnn)) }} -- type family declarations, with optional 'family' keyword -- (can't use opt_instance because you get shift/reduce errors | 'type' type opt_at_kind_inj_sig {% do { let { (tdcolon, tequal, tvbar) = fst $ unLoc $3 } ; liftM mkTyClD (mkFamDecl (comb3 $1 $2 $3) OpenTypeFamily NotTopLevel $2 (fst . snd $ unLoc $3) (snd . snd $ unLoc $3) (AnnFamilyDecl [] [] (epTok $1) noAnn noAnn tdcolon tequal tvbar noAnn noAnn noAnn noAnn)) }} | 'type' 'family' type opt_at_kind_inj_sig {% do { let { (tdcolon, tequal, tvbar) = fst $ unLoc $4 } ; liftM mkTyClD (mkFamDecl (comb3 $1 $3 $4) OpenTypeFamily NotTopLevel $3 (fst . snd $ unLoc $4) (snd . snd $ unLoc $4) (AnnFamilyDecl [] [] (epTok $1) noAnn (epTok $2) tdcolon tequal tvbar noAnn noAnn noAnn noAnn)) }} -- default type instances, with optional 'instance' keyword | 'type' ty_fam_inst_eqn {% liftM mkInstD (mkTyFamInst (comb2 $1 $2) (unLoc $2) (epTok $1) NoEpTok) } | 'type' 'instance' ty_fam_inst_eqn {% liftM mkInstD (mkTyFamInst (comb2 $1 $3) (unLoc $3) (epTok $1) (epTok $2) )} opt_family :: { EpToken "family" } : {- empty -} { noAnn } | 'family' { (epTok $1) } opt_instance :: { EpToken "instance" } : {- empty -} { NoEpTok } | 'instance' { epTok $1 } -- Associated type instances -- at_decl_inst :: { LInstDecl GhcPs } -- type instance declarations, with optional 'instance' keyword : 'type' opt_instance ty_fam_inst_eqn -- Note the use of type for the head; this allows -- infix type constructors and type patterns {% mkTyFamInst (comb2 $1 $3) (unLoc $3) (epTok $1) $2 } -- data/newtype instance declaration, with optional 'instance' keyword | data_or_newtype opt_instance capi_ctype datafam_inst_hdr constrs maybe_derivings {% do { let { (tdata, tnewtype) = fst $ unLoc $1 } ; let { tequal = fst $ unLoc $5 } ; mkDataFamInst (comb4 $1 $4 $5 $6) (snd $ unLoc $1) $3 (unLoc $4) Nothing (reverse (snd $ unLoc $5)) (fmap reverse $6) (AnnDataDefn [] [] NoEpTok tnewtype tdata $2 NoEpUniTok NoEpTok NoEpTok NoEpTok tequal)}} -- GADT instance declaration, with optional 'instance' keyword | data_or_newtype opt_instance capi_ctype datafam_inst_hdr opt_kind_sig gadt_constrlist maybe_derivings {% do { let { (tdata, tnewtype) = fst $ unLoc $1 } ; let { dcolon = fst $ unLoc $5 } ; let { (twhere, oc, cc) = fst $ unLoc $6 } ; mkDataFamInst (comb4 $1 $4 $6 $7) (snd $ unLoc $1) $3 (unLoc $4) (snd $ unLoc $5) (snd $ unLoc $6) (fmap reverse $7) (AnnDataDefn [] [] NoEpTok tnewtype tdata $2 dcolon twhere oc cc NoEpTok)}} type_data_or_newtype :: { Located ((EpToken "data", EpToken "newtype", EpToken "type") , Bool, NewOrData) } : 'data' { sL1 $1 ((epTok $1, NoEpTok, NoEpTok), False,DataType) } | 'newtype' { sL1 $1 ((NoEpTok, epTok $1, NoEpTok), False,NewType) } | 'type' 'data' { sL1 $1 ((epTok $2, NoEpTok, epTok $1), True ,DataType) } data_or_newtype :: { Located ((EpToken "data", EpToken "newtype"), NewOrData) } : 'data' { sL1 $1 ((epTok $1, NoEpTok), DataType) } | 'newtype' { sL1 $1 ((NoEpTok, epTok $1),NewType) } -- Family result/return kind signatures opt_kind_sig :: { Located (TokDcolon, Maybe (LHsKind GhcPs)) } : { noLoc (NoEpUniTok , Nothing) } | '::' kind { sLL $1 $> (epUniTok $1, Just $2) } opt_datafam_kind_sig :: { Located (TokDcolon, LFamilyResultSig GhcPs) } : { noLoc (noAnn, noLocA (NoSig noExtField) )} | '::' kind { sLL $1 $> (epUniTok $1, sLLa $1 $> (KindSig noExtField $2))} opt_tyfam_kind_sig :: { Located ((TokDcolon, EpToken "="), LFamilyResultSig GhcPs) } : { noLoc (noAnn , noLocA (NoSig noExtField) )} | '::' kind { sLL $1 $> ((epUniTok $1, noAnn), sLLa $1 $> (KindSig noExtField $2))} | '=' tv_bndr {% do { tvb <- fromSpecTyVarBndr $2 ; return $ sLL $1 $> ((noAnn, epTok $1), sLLa $1 $> (TyVarSig noExtField tvb))} } opt_at_kind_inj_sig :: { Located ((TokDcolon, EpToken "=", EpToken "|"), ( LFamilyResultSig GhcPs , Maybe (LInjectivityAnn GhcPs)))} : { noLoc (noAnn, (noLocA (NoSig noExtField), Nothing)) } | '::' kind { sLL $1 $> ( (epUniTok $1, noAnn, noAnn) , (sL1a $> (KindSig noExtField $2), Nothing)) } | '=' tv_bndr_no_braces '|' injectivity_cond {% do { tvb <- fromSpecTyVarBndr $2 ; return $ sLL $1 $> ((noAnn, epTok $1, epTok $3) , (sLLa $1 $2 (TyVarSig noExtField tvb), Just $4))} } -- tycl_hdr parses the header of a class or data type decl, -- which takes the form -- T a b -- Eq a => T a -- (Eq a, Ord b) => T a b -- T Int [a] -- for associated types -- Rather a lot of inlining here, else we get reduce/reduce errors tycl_hdr :: { Located (Maybe (LHsContext GhcPs), LHsType GhcPs) } : context '=>' type {% acs (comb2 $1 $>) (\loc cs -> (L loc (Just (addTrailingDarrowC $1 $2 cs), $3))) } | type { sL1 $1 (Nothing, $1) } datafam_inst_hdr :: { Located (Maybe (LHsContext GhcPs), HsOuterFamEqnTyVarBndrs GhcPs, LHsType GhcPs) } : 'forall' tv_bndrs '.' context '=>' type {% hintExplicitForall $1 >> fromSpecTyVarBndrs $2 >>= \tvbs -> (acs (comb2 $1 $>) (\loc cs -> (L loc (Just ( addTrailingDarrowC $4 $5 cs) , mkHsOuterExplicit (EpAnn (glEE $1 $3) (epUniTok $1, epTok $3) emptyComments) tvbs, $6)))) } | 'forall' tv_bndrs '.' type {% do { hintExplicitForall $1 ; tvbs <- fromSpecTyVarBndrs $2 ; let loc = comb2 $1 $> ; !cs <- getCommentsFor loc ; return (sL loc (Nothing, mkHsOuterExplicit (EpAnn (glEE $1 $3) (epUniTok $1, epTok $3) cs) tvbs, $4)) } } | context '=>' type {% acs (comb2 $1 $>) (\loc cs -> (L loc (Just (addTrailingDarrowC $1 $2 cs), mkHsOuterImplicit, $3))) } | type { sL1 $1 (Nothing, mkHsOuterImplicit, $1) } capi_ctype :: { Maybe (LocatedP CType) } capi_ctype : '{-# CTYPE' STRING STRING '#-}' {% fmap Just $ amsr (sLL $1 $> (CType (getCTYPEs $1) (Just (Header (getSTRINGs $2) (getSTRING $2))) (getSTRINGs $3,getSTRING $3))) (AnnPragma (glR $1) (epTok $4) noAnn (glR $2) (glR $3) noAnn noAnn) } | '{-# CTYPE' STRING '#-}' {% fmap Just $ amsr (sLL $1 $> (CType (getCTYPEs $1) Nothing (getSTRINGs $2, getSTRING $2))) (AnnPragma (glR $1) (epTok $3) noAnn noAnn (glR $2) noAnn noAnn) } | { Nothing } ----------------------------------------------------------------------------- -- Stand-alone deriving -- Glasgow extension: stand-alone deriving declarations stand_alone_deriving :: { LDerivDecl GhcPs } : 'deriving' deriv_standalone_strategy 'instance' maybe_warning_pragma overlap_pragma inst_type {% do { let { err = text "in the stand-alone deriving instance" <> colon <+> quotes (ppr $6) } ; amsA' (sLL $1 $> (DerivDecl ($4, (epTok $1, epTok $3)) (mkHsWildCardBndrs $6) $2 $5)) }} ----------------------------------------------------------------------------- -- Role annotations role_annot :: { LRoleAnnotDecl GhcPs } role_annot : 'type' 'role' oqtycon maybe_roles {% mkRoleAnnotDecl (comb3 $1 $4 $3) $3 (reverse (unLoc $4)) (epTok $1,epTok $2) } -- Reversed! maybe_roles :: { Located [Located (Maybe FastString)] } maybe_roles : {- empty -} { noLoc [] } | roles { $1 } roles :: { Located [Located (Maybe FastString)] } roles : role { sLL $1 $> [$1] } | roles role { sLL $1 $> $ $2 : unLoc $1 } -- read it in as a varid for better error messages role :: { Located (Maybe FastString) } role : VARID { sL1 $1 $ Just $ getVARID $1 } | '_' { sL1 $1 Nothing } -- Pattern synonyms -- Glasgow extension: pattern synonyms pattern_synonym_decl :: { LHsDecl GhcPs } : 'pattern' pattern_synonym_lhs '=' pat_syn_pat {% let (name, args, (mo, mc) ) = $2 in amsA' (sLL $1 $> . ValD noExtField $ mkPatSynBind name args $4 ImplicitBidirectional (AnnPSB (epTok $1) mo mc Nothing (Just (epTok $3)))) } | 'pattern' pattern_synonym_lhs '<-' pat_syn_pat {% let (name, args, (mo,mc)) = $2 in amsA' (sLL $1 $> . ValD noExtField $ mkPatSynBind name args $4 Unidirectional (AnnPSB (epTok $1) mo mc (Just (epUniTok $3)) Nothing)) } | 'pattern' pattern_synonym_lhs '<-' pat_syn_pat where_decls {% do { let (name, args, (mo,mc)) = $2 ; mg <- mkPatSynMatchGroup name $5 ; amsA' (sLL $1 $> . ValD noExtField $ mkPatSynBind name args $4 (ExplicitBidirectional mg) (AnnPSB (epTok $1) mo mc (Just (epUniTok $3)) Nothing)) }} pattern_synonym_lhs :: { (LocatedN RdrName, HsPatSynDetails GhcPs, (Maybe (EpToken "{"), Maybe (EpToken "}"))) } : con vars0 { ($1, PrefixCon noTypeArgs $2, noAnn) } | varid conop varid { ($2, InfixCon $1 $3, noAnn) } | con '{' cvars1 '}' { ($1, RecCon $3, (Just (epTok $2), Just (epTok $4))) } vars0 :: { [LocatedN RdrName] } : {- empty -} { [] } | varid vars0 { $1 : $2 } cvars1 :: { [RecordPatSynField GhcPs] } : var { [RecordPatSynField (mkFieldOcc $1) $1] } | var ',' cvars1 {% do { h <- addTrailingCommaN $1 (gl $2) ; return ((RecordPatSynField (mkFieldOcc h) h) : $3 )}} where_decls :: { LocatedLW (OrdList (LHsDecl GhcPs)) } : 'where' '{' decls '}' {% amsr (sLL $1 $> (thdOf3 $ unLoc $3)) (AnnList (Just (fstOf3 $ unLoc $3)) (ListBraces (epTok $2) (epTok $4)) (sndOf3 $ unLoc $3) (epTok $1) []) } | 'where' vocurly decls close {% amsr (sLL $1 $3 (thdOf3 $ unLoc $3)) (AnnList (Just (fstOf3 $ unLoc $3)) ListNone (sndOf3 $ unLoc $3) (epTok $1) []) } pattern_synonym_sig :: { LSig GhcPs } : 'pattern' con_list '::' sigtype {% amsA' (sLL $1 $> $ PatSynSig (AnnSig (epUniTok $3) (Just (epTok $1)) Nothing) (toList $ unLoc $2) $4) } qvarcon :: { LocatedN RdrName } : qvar { $1 } | qcon { $1 } ----------------------------------------------------------------------------- -- Nested declarations -- Declaration in class bodies -- decl_cls :: { LHsDecl GhcPs } decl_cls : at_decl_cls { $1 } | decl { $1 } -- A 'default' signature used with the generic-programming extension | 'default' infixexp '::' sigtype {% runPV (unECP $2) >>= \ $2 -> do { v <- checkValSigLhs $2 ; let err = text "in default signature" <> colon <+> quotes (ppr $2) ; amsA' (sLL $1 $> $ SigD noExtField $ ClassOpSig (AnnSig (epUniTok $3) Nothing (Just (epTok $1))) True [v] $4) }} decls_cls :: { Located ([EpToken ";"],OrdList (LHsDecl GhcPs)) } -- Reversed : decls_cls ';' decl_cls {% if isNilOL (snd $ unLoc $1) then return (sLL $1 $> ((fst $ unLoc $1) ++ [mzEpTok $2] , unitOL $3)) else case (snd $ unLoc $1) of SnocOL hs t -> do t' <- addTrailingSemiA t (epTok $2) return (sLL $1 $> (fst $ unLoc $1 , snocOL hs t' `appOL` unitOL $3)) } | decls_cls ';' {% if isNilOL (snd $ unLoc $1) then return (sLZ $1 $> ( (fst $ unLoc $1) ++ [mzEpTok $2] ,snd $ unLoc $1)) else case (snd $ unLoc $1) of SnocOL hs t -> do t' <- addTrailingSemiA t (epTok $2) return (sLZ $1 $> (fst $ unLoc $1 , snocOL hs t')) } | decl_cls { sL1 $1 ([], unitOL $1) } | {- empty -} { noLoc ([],nilOL) } decllist_cls :: { Located ((EpToken "{", [EpToken ";"], EpToken "}") , OrdList (LHsDecl GhcPs) , EpLayout) } -- Reversed : '{' decls_cls '}' { sLL $1 $> ((epTok $1, fst $ unLoc $2, epTok $3) ,snd $ unLoc $2, epExplicitBraces $1 $3) } | vocurly decls_cls close { let { L l (anns, decls) = $2 } in L l ((NoEpTok, anns, NoEpTok), decls, EpVirtualBraces (getVOCURLY $1)) } -- Class body -- where_cls :: { Located ((EpToken "where", (EpToken "{", [EpToken ";"], EpToken "}")) ,(OrdList (LHsDecl GhcPs)) -- Reversed ,EpLayout) } -- No implicit parameters -- May have type declarations : 'where' decllist_cls { sLL $1 $> ((epTok $1,fstOf3 $ unLoc $2) ,sndOf3 $ unLoc $2,thdOf3 $ unLoc $2) } | {- empty -} { noLoc ((noAnn, noAnn),nilOL,EpNoLayout) } -- Declarations in instance bodies -- decl_inst :: { Located (OrdList (LHsDecl GhcPs)) } decl_inst : at_decl_inst { sL1 $1 (unitOL (sL1a $1 (InstD noExtField (unLoc $1)))) } | decl { sL1 $1 (unitOL $1) } decls_inst :: { Located ([EpToken ";"],OrdList (LHsDecl GhcPs)) } -- Reversed : decls_inst ';' decl_inst {% if isNilOL (snd $ unLoc $1) then return (sLL $1 $> ((fst $ unLoc $1) ++ [mzEpTok $2] , unLoc $3)) else case (snd $ unLoc $1) of SnocOL hs t -> do t' <- addTrailingSemiA t (epTok $2) return (sLL $1 $> (fst $ unLoc $1 , snocOL hs t' `appOL` unLoc $3)) } | decls_inst ';' {% if isNilOL (snd $ unLoc $1) then return (sLZ $1 $> ((fst $ unLoc $1) ++ [mzEpTok $2] ,snd $ unLoc $1)) else case (snd $ unLoc $1) of SnocOL hs t -> do t' <- addTrailingSemiA t (epTok $2) return (sLZ $1 $> (fst $ unLoc $1 , snocOL hs t')) } | decl_inst { sL1 $1 ([],unLoc $1) } | {- empty -} { noLoc ([],nilOL) } decllist_inst :: { Located ((EpToken "{", EpToken "}", [EpToken ";"]) , OrdList (LHsDecl GhcPs)) } -- Reversed : '{' decls_inst '}' { sLL $1 $> ((epTok $1,epTok $3,fst $ unLoc $2),snd $ unLoc $2) } | vocurly decls_inst close { L (gl $2) ((noAnn,noAnn,fst $ unLoc $2),snd $ unLoc $2) } -- Instance body -- where_inst :: { Located ((EpToken "where", (EpToken "{", EpToken "}", [EpToken ";"])) , OrdList (LHsDecl GhcPs)) } -- Reversed -- No implicit parameters -- May have type declarations : 'where' decllist_inst { sLL $1 $> ((epTok $1,(fst $ unLoc $2)) ,snd $ unLoc $2) } | {- empty -} { noLoc (noAnn,nilOL) } -- Declarations in binding groups other than classes and instances -- decls :: { Located (EpaLocation, [EpToken ";"], OrdList (LHsDecl GhcPs)) } : decls ';' decl {% if isNilOL (thdOf3 $ unLoc $1) then return (sLL $2 $> (glR $3, (sndOf3 $ unLoc $1) ++ (msemiA $2) , unitOL $3)) else case (thdOf3 $ unLoc $1) of SnocOL hs t -> do t' <- addTrailingSemiA t (epTok $2) let { this = unitOL $3; rest = snocOL hs t'; these = rest `appOL` this } return (rest `seq` this `seq` these `seq` (sLL $1 $> (glEE (fstOf3 $ unLoc $1) $3, sndOf3 $ unLoc $1, these))) } | decls ';' {% if isNilOL (thdOf3 $ unLoc $1) then return (sLZ $1 $> (glR $2, (sndOf3 $ unLoc $1) ++ (msemiA $2) ,thdOf3 $ unLoc $1)) else case (thdOf3 $ unLoc $1) of SnocOL hs t -> do t' <- addTrailingSemiA t (epTok $2) return (sLZ $1 $> (glEEz $1 $2, sndOf3 $ unLoc $1, snocOL hs t')) } | decl { sL1 $1 (glR $1, [], unitOL $1) } | {- empty -} { noLoc (noAnn, [],nilOL) } decllist :: { Located (AnnList (),Located (OrdList (LHsDecl GhcPs))) } : '{' decls '}' { sLL $1 $> (AnnList (Just (fstOf3 $ unLoc $2)) (ListBraces (epTok $1) (epTok $3)) (sndOf3 $ unLoc $2) noAnn [] ,sL1 $2 $ thdOf3 $ unLoc $2) } | vocurly decls close { sL1 $2 (AnnList (Just (fstOf3 $ unLoc $2)) ListNone (sndOf3 $ unLoc $2) noAnn [] ,sL1 $2 $ thdOf3 $ unLoc $2) } -- Binding groups other than those of class and instance declarations -- binds :: { Located (HsLocalBinds GhcPs) } -- May have implicit parameters -- No type declarations : decllist {% do { let { (AnnList anc p s _ t, decls) = unLoc $1 } ; val_binds <- cvBindGroup (unLoc $ decls) ; !cs <- getCommentsFor (gl $1) ; return (sL1 $1 $ HsValBinds (EpAnn (glR $1) (AnnList anc p s noAnn t) cs) val_binds)} } | '{' dbinds '}' {% acs (comb3 $1 $2 $3) (\loc cs -> (L loc $ HsIPBinds (EpAnn (spanAsAnchor (comb3 $1 $2 $3)) (AnnList (Just$ glR $2) (ListBraces (epTok $1) (epTok $3)) [] noAnn []) cs) (IPBinds noExtField (reverse $ unLoc $2)))) } | vocurly dbinds close {% acs (gl $2) (\loc cs -> (L loc $ HsIPBinds (EpAnn (glR $1) (AnnList (Just $ glR $2) ListNone [] noAnn []) cs) (IPBinds noExtField (reverse $ unLoc $2)))) } wherebinds :: { Maybe (Located (HsLocalBinds GhcPs, Maybe EpAnnComments )) } -- May have implicit parameters -- No type declarations : 'where' binds {% do { r <- acs (comb2 $1 $>) (\loc cs -> (L loc (annBinds (epTok $1) cs (unLoc $2)))) ; return $ Just r} } | {- empty -} { Nothing } ----------------------------------------------------------------------------- -- Transformation Rules rules :: { [LRuleDecl GhcPs] } -- Reversed : rules ';' rule {% case $1 of [] -> return ($3:$1) (h:t) -> do h' <- addTrailingSemiA h (epTok $2) return ($3:h':t) } | rules ';' {% case $1 of [] -> return $1 (h:t) -> do h' <- addTrailingSemiA h (epTok $2) return (h':t) } | rule { [$1] } | {- empty -} { [] } rule :: { LRuleDecl GhcPs } : STRING rule_activation rule_foralls infixexp '=' exp {%runPV (unECP $4) >>= \ $4 -> runPV (unECP $6) >>= \ $6 -> amsA' (sLL $1 $> $ HsRule { rd_ext =(((fstOf3 $3) (epTok $5) (fst $2)), getSTRINGs $1) , rd_name = L (noAnnSrcSpan $ gl $1) (getSTRING $1) , rd_act = (snd $2) `orElse` AlwaysActive , rd_tyvs = sndOf3 $3, rd_tmvs = thdOf3 $3 , rd_lhs = $4, rd_rhs = $6 }) } -- Rules can be specified to be NeverActive, unlike inline/specialize pragmas rule_activation :: { (ActivationAnn, Maybe Activation) } -- See Note [%shift: rule_activation -> {- empty -}] : {- empty -} %shift { (noAnn, Nothing) } | rule_explicit_activation { (fst $1,Just (snd $1)) } -- This production is used to parse the tilde syntax in pragmas such as -- * {-# INLINE[~2] ... #-} -- * {-# SPECIALISE [~ 001] ... #-} -- * {-# RULES ... [~0] ... g #-} -- Note that it can be written either -- without a space [~1] (the PREFIX_TILDE case), or -- with a space [~ 1] (the VARSYM case). -- See Note [Whitespace-sensitive operator parsing] in GHC.Parser.Lexer rule_activation_marker :: { (Maybe (EpToken "~")) } : PREFIX_TILDE { (Just (epTok $1)) } | VARSYM {% if (getVARSYM $1 == fsLit "~") then return (Just (epTok $1)) else do { addError $ mkPlainErrorMsgEnvelope (getLoc $1) $ PsErrInvalidRuleActivationMarker ; return Nothing } } rule_explicit_activation :: { ( ActivationAnn , Activation) } -- In brackets : '[' INTEGER ']' { ( ActivationAnn (epTok $1) (epTok $3) Nothing (Just (glR $2)) , ActiveAfter (getINTEGERs $2) (fromInteger (il_value (getINTEGER $2)))) } | '[' rule_activation_marker INTEGER ']' { ( ActivationAnn (epTok $1) (epTok $4) $2 (Just (glR $3)) , ActiveBefore (getINTEGERs $3) (fromInteger (il_value (getINTEGER $3)))) } | '[' rule_activation_marker ']' { ( ActivationAnn (epTok $1) (epTok $3) $2 Nothing , NeverActive) } rule_foralls :: { (EpToken "=" -> ActivationAnn -> HsRuleAnn, Maybe [LHsTyVarBndr () GhcPs], [LRuleBndr GhcPs]) } : 'forall' rule_vars '.' 'forall' rule_vars '.' {% let tyvs = mkRuleTyVarBndrs $2 in hintExplicitForall $1 >> checkRuleTyVarBndrNames (mkRuleTyVarBndrs $2) >> return (\an_eq an_act -> HsRuleAnn (Just (epUniTok $1,epTok $3)) (Just (epUniTok $4,epTok $6)) an_eq an_act, Just (mkRuleTyVarBndrs $2), mkRuleBndrs $5) } | 'forall' rule_vars '.' { (\an_eq an_act -> HsRuleAnn Nothing (Just (epUniTok $1,epTok $3)) an_eq an_act, Nothing, mkRuleBndrs $2) } -- See Note [%shift: rule_foralls -> {- empty -}] | {- empty -} %shift { (\an_eq an_act -> HsRuleAnn Nothing Nothing an_eq an_act, Nothing, []) } rule_vars :: { [LRuleTyTmVar] } : rule_var rule_vars { $1 : $2 } | {- empty -} { [] } rule_var :: { LRuleTyTmVar } : varid { sL1a $1 (RuleTyTmVar noAnn $1 Nothing) } | '(' varid '::' ctype ')' {% amsA' (sLL $1 $> (RuleTyTmVar (AnnTyVarBndr [glR $1] [glR $5] noAnn (epUniTok $3)) $2 (Just $4))) } {- Note [Parsing explicit foralls in Rules] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We really want the above definition of rule_foralls to be: rule_foralls : 'forall' tv_bndrs '.' 'forall' rule_vars '.' | 'forall' rule_vars '.' | {- empty -} where rule_vars (term variables) can be named "family" or "role", but tv_vars (type variables) cannot be. However, such a definition results in a reduce/reduce conflict. For example, when parsing: > {-# RULE "name" forall a ... #-} before the '...' it is impossible to determine whether we should be in the first or second case of the above. This is resolved by using rule_vars (which is more general) for both, and ensuring that type-level quantified variables do not have the names "forall", "family", or "role" in the function 'checkRuleTyVarBndrNames' in GHC.Parser.PostProcess. Thus, whenever the definition of tyvarid (used for tv_bndrs) is changed relative to varid (used for rule_vars), 'checkRuleTyVarBndrNames' must be updated. -} ----------------------------------------------------------------------------- -- Warnings and deprecations (c.f. rules) maybe_warning_pragma :: { Maybe (LWarningTxt GhcPs) } : '{-# DEPRECATED' strings '#-}' {% fmap Just $ amsr (sLL $1 $> $ DeprecatedTxt (getDEPRECATED_PRAGs $1) (map stringLiteralToHsDocWst $ snd $ unLoc $2)) (AnnPragma (glR $1) (epTok $3) (fst $ unLoc $2) noAnn noAnn noAnn noAnn) } | '{-# WARNING' warning_category strings '#-}' {% fmap Just $ amsr (sLL $1 $> $ WarningTxt $2 (getWARNING_PRAGs $1) (map stringLiteralToHsDocWst $ snd $ unLoc $3)) (AnnPragma (glR $1) (epTok $4) (fst $ unLoc $3) noAnn noAnn noAnn noAnn)} | {- empty -} { Nothing } warning_category :: { Maybe (LocatedE InWarningCategory) } : 'in' STRING { Just (reLoc $ sLL $1 $> $ InWarningCategory (epTok $1) (getSTRINGs $2) (reLoc $ sL1 $2 $ mkWarningCategory (getSTRING $2))) } | {- empty -} { Nothing } warnings :: { OrdList (LWarnDecl GhcPs) } : warnings ';' warning {% if isNilOL $1 then return ($1 `appOL` $3) else case $1 of SnocOL hs t -> do t' <- addTrailingSemiA t (epTok $2) return (snocOL hs t' `appOL` $3) } | warnings ';' {% if isNilOL $1 then return $1 else case $1 of SnocOL hs t -> do t' <- addTrailingSemiA t (epTok $2) return (snocOL hs t') } | warning { $1 } | {- empty -} { nilOL } -- SUP: TEMPORARY HACK, not checking for `module Foo' warning :: { OrdList (LWarnDecl GhcPs) } : warning_category namespace_spec namelist strings {% fmap unitOL $ amsA' (L (comb4 $1 $2 $3 $4) (Warning (unLoc $2, fst $ unLoc $4) (unLoc $3) (WarningTxt $1 NoSourceText $ map stringLiteralToHsDocWst $ snd $ unLoc $4))) } namespace_spec :: { Located NamespaceSpecifier } : 'type' { sL1 $1 $ TypeNamespaceSpecifier (epTok $1) } | 'data' { sL1 $1 $ DataNamespaceSpecifier (epTok $1) } | {- empty -} { sL0 $ NoNamespaceSpecifier } deprecations :: { OrdList (LWarnDecl GhcPs) } : deprecations ';' deprecation {% if isNilOL $1 then return ($1 `appOL` $3) else case $1 of SnocOL hs t -> do t' <- addTrailingSemiA t (epTok $2) return (snocOL hs t' `appOL` $3) } | deprecations ';' {% if isNilOL $1 then return $1 else case $1 of SnocOL hs t -> do t' <- addTrailingSemiA t (epTok $2) return (snocOL hs t') } | deprecation { $1 } | {- empty -} { nilOL } -- SUP: TEMPORARY HACK, not checking for `module Foo' deprecation :: { OrdList (LWarnDecl GhcPs) } : namespace_spec namelist strings {% fmap unitOL $ amsA' (sL (comb3 $1 $2 $>) $ (Warning (unLoc $1, fst $ unLoc $3) (unLoc $2) (DeprecatedTxt NoSourceText $ map stringLiteralToHsDocWst $ snd $ unLoc $3))) } strings :: { Located ((EpToken "[", EpToken "]"),[Located StringLiteral]) } : STRING { sL1 $1 (noAnn,[L (gl $1) (getStringLiteral $1)]) } | '[' stringlist ']' { sLL $1 $> $ ((epTok $1,epTok $3),fromOL (unLoc $2)) } stringlist :: { Located (OrdList (Located StringLiteral)) } : stringlist ',' STRING {% if isNilOL (unLoc $1) then return (sLL $1 $> (unLoc $1 `snocOL` (L (gl $3) (getStringLiteral $3)))) else case (unLoc $1) of SnocOL hs t -> do let { t' = addTrailingCommaS t (glR $2) } return (sLL $1 $> (snocOL hs t' `snocOL` (L (gl $3) (getStringLiteral $3)))) } | STRING { sLL $1 $> (unitOL (L (gl $1) (getStringLiteral $1))) } | {- empty -} { noLoc nilOL } ----------------------------------------------------------------------------- -- Annotations annotation :: { LHsDecl GhcPs } : '{-# ANN' name_var aexp '#-}' {% runPV (unECP $3) >>= \ $3 -> amsA' (sLL $1 $> (AnnD noExtField $ HsAnnotation (AnnPragma (glR $1) (epTok $4) noAnn noAnn noAnn noAnn noAnn, (getANN_PRAGs $1)) (ValueAnnProvenance $2) $3)) } | '{-# ANN' 'type' otycon aexp '#-}' {% runPV (unECP $4) >>= \ $4 -> amsA' (sLL $1 $> (AnnD noExtField $ HsAnnotation (AnnPragma (glR $1) (epTok $5) noAnn noAnn noAnn (epTok $2) noAnn, (getANN_PRAGs $1)) (TypeAnnProvenance $3) $4)) } | '{-# ANN' 'module' aexp '#-}' {% runPV (unECP $3) >>= \ $3 -> amsA' (sLL $1 $> (AnnD noExtField $ HsAnnotation (AnnPragma (glR $1) (epTok $4) noAnn noAnn noAnn noAnn (epTok $2), (getANN_PRAGs $1)) ModuleAnnProvenance $3)) } ----------------------------------------------------------------------------- -- Foreign import and export declarations fdecl :: { Located (EpToken "foreign" -> HsDecl GhcPs) } fdecl : 'import' callconv safety fspec {% mkImport $2 $3 (snd $ unLoc $4) (epTok $1, fst $ unLoc $4) >>= \i -> return (sLL $1 $> i) } | 'import' callconv fspec {% do { d <- mkImport $2 (noLoc PlaySafe) (snd $ unLoc $3) (epTok $1, fst $ unLoc $3); return (sLL $1 $> d) }} | 'export' callconv fspec {% mkExport $2 (snd $ unLoc $3) (epTok $1, fst $ unLoc $3) >>= \i -> return (sLL $1 $> i ) } callconv :: { Located CCallConv } : 'stdcall' { sLL $1 $> StdCallConv } | 'ccall' { sLL $1 $> CCallConv } | 'capi' { sLL $1 $> CApiConv } | 'prim' { sLL $1 $> PrimCallConv} | 'javascript' { sLL $1 $> JavaScriptCallConv } safety :: { Located Safety } : 'unsafe' { sLL $1 $> PlayRisky } | 'safe' { sLL $1 $> PlaySafe } | 'interruptible' { sLL $1 $> PlayInterruptible } fspec :: { Located (TokDcolon ,(Located StringLiteral, LocatedN RdrName, LHsSigType GhcPs)) } : STRING var '::' sigtype { sLL $1 $> (epUniTok $3 ,(L (getLoc $1) (getStringLiteral $1), $2, $4)) } | var '::' sigtype { sLL $1 $> (epUniTok $2 ,(noLoc (StringLiteral NoSourceText nilFS Nothing), $1, $3)) } -- if the entity string is missing, it defaults to the empty string; -- the meaning of an empty entity string depends on the calling -- convention ----------------------------------------------------------------------------- -- Type signatures opt_sig :: { Maybe (EpUniToken "::" "∷", LHsType GhcPs) } : {- empty -} { Nothing } | '::' ctype { Just (epUniTok $1, $2) } opt_tyconsig :: { (Maybe (EpUniToken "::" "∷"), Maybe (LocatedN RdrName)) } : {- empty -} { (Nothing, Nothing) } | '::' gtycon { (Just (epUniTok $1), Just $2) } -- Like ktype, but for types that obey the forall-or-nothing rule. -- See Note [forall-or-nothing rule] in GHC.Hs.Type. sigktype :: { LHsSigType GhcPs } : sigtype { $1 } | ctype '::' kind {% amsA' (sLL $1 $> $ mkHsImplicitSigType $ sLLa $1 $> $ HsKindSig (epUniTok $2) $1 $3) } -- Like ctype, but for types that obey the forall-or-nothing rule. -- See Note [forall-or-nothing rule] in GHC.Hs.Type. To avoid duplicating the -- logic in ctype here, we simply reuse the ctype production and perform -- surgery on the LHsType it returns to turn it into an LHsSigType. sigtype :: { LHsSigType GhcPs } : ctype { hsTypeToHsSigType $1 } sig_vars :: { Located [LocatedN RdrName] } -- Returned in reversed order : sig_vars ',' var {% case unLoc $1 of [] -> return (sLL $1 $> ($3 : unLoc $1)) (h:t) -> do h' <- addTrailingCommaN h (gl $2) return (sLL $1 $> ($3 : h' : t)) } | var { sL1 $1 [$1] } sigtypes1 :: { OrdList (LHsSigType GhcPs) } : sigtype { unitOL $1 } | sigtype ',' sigtypes1 {% do { st <- addTrailingCommaA $1 (epTok $2) ; return $ unitOL st `appOL` $3 } } ----------------------------------------------------------------------------- -- Types unpackedness :: { Located UnpackednessPragma } : '{-# UNPACK' '#-}' { sLL $1 $> (UnpackednessPragma (glR $1, epTok $2) (getUNPACK_PRAGs $1) SrcUnpack) } | '{-# NOUNPACK' '#-}' { sLL $1 $> (UnpackednessPragma (glR $1, epTok $2) (getNOUNPACK_PRAGs $1) SrcNoUnpack) } forall_telescope :: { Located (HsForAllTelescope GhcPs) } : 'forall' tv_bndrs '.' {% do { hintExplicitForall $1 ; acs (comb2 $1 $>) (\loc cs -> (L loc $ mkHsForAllInvisTele (EpAnn (glEE $1 $>) (epUniTok $1,epTok $3) cs) $2 )) }} | 'forall' tv_bndrs '->' {% do { hintExplicitForall $1 ; req_tvbs <- fromSpecTyVarBndrs $2 ; acs (comb2 $1 $>) (\loc cs -> (L loc $ mkHsForAllVisTele (EpAnn (glEE $1 $>) (epUniTok $1,epUniTok $3) cs) req_tvbs )) }} -- A ktype is a ctype, possibly with a kind annotation ktype :: { LHsType GhcPs } : ctype { $1 } | ctype '::' kind {% amsA' (sLL $1 $> $ HsKindSig (epUniTok $2) $1 $3) } -- A ctype is a for-all type ctype :: { LHsType GhcPs } : forall_telescope ctype { sLLa $1 $> $ HsForAllTy { hst_tele = unLoc $1 , hst_xforall = noExtField , hst_body = $2 } } | context '=>' ctype {% acsA (comb2 $1 $>) (\loc cs -> (L loc $ HsQualTy { hst_ctxt = addTrailingDarrowC $1 $2 cs , hst_xqual = NoExtField , hst_body = $3 })) } | ipvar '::' ctype {% amsA' (sLL $1 $> (HsIParamTy (epUniTok $2) (reLoc $1) $3)) } | type { $1 } ---------------------- -- Notes for 'context' -- We parse a context as a btype so that we don't get reduce/reduce -- errors in ctype. The basic problem is that -- (Eq a, Ord a) -- looks so much like a tuple type. We can't tell until we find the => context :: { LHsContext GhcPs } : btype {% checkContext $1 } expcontext :: {forall b. DisambECP b => PV (LocatedC [LocatedA b])} : infixexp { unECP $1 >>= \ $1 -> checkContextPV $1 } {- Note [GADT decl discards annotations] ~~~~~~~~~~~~~~~~~~~~~ The type production for btype `->` ctype add the AnnRarrow annotation twice, in different places. This is because if the type is processed as usual, it belongs on the annotations for the type as a whole. But if the type is passed to mkGadtDecl, it discards the top level SrcSpan, and the top-level annotation will be disconnected. Hence for this specific case it is connected to the first type too. -} type :: { LHsType GhcPs } -- See Note [%shift: type -> btype] : btype %shift { $1 } | btype '->' ctype {% amsA' (sLL $1 $> $ HsFunTy noExtField (HsUnrestrictedArrow (epUniTok $2)) $1 $3) } | btype mult '->' ctype {% hintLinear (getLoc $2) >> let arr = (unLoc $2) (epUniTok $3) in amsA' (sLL $1 $> $ HsFunTy noExtField arr $1 $4) } | btype '->.' ctype {% hintLinear (getLoc $2) >> amsA' (sLL $1 $> $ HsFunTy noExtField (HsLinearArrow (EpLolly (epTok $2))) $1 $3) } mult :: { Located (EpUniToken "->" "\8594" -> HsArrow GhcPs) } : PREFIX_PERCENT atype { sLL $1 $> (mkMultTy (epTok $1) $2) } expmult :: { forall b. DisambECP b => PV (Located (EpUniToken "->" "\8594" -> HsArrowOf (LocatedA b) GhcPs)) } expmult : PREFIX_PERCENT aexp { unECP $2 >>= \ $2 -> fmap (sLL $1 $>) (mkHsMultPV (epTok $1) $2) } btype :: { LHsType GhcPs } : infixtype {% runPV $1 } infixtype :: { forall b. DisambTD b => PV (LocatedA b) } -- See Note [%shift: infixtype -> ftype] : ftype %shift { $1 } | ftype tyop infixtype { $1 >>= \ $1 -> $3 >>= \ $3 -> do { let (op, prom) = $2 ; when (looksLikeMult $1 op $3) $ hintLinear (getLocA op) ; mkHsOpTyPV prom $1 op $3 } } | unpackedness infixtype { $2 >>= \ $2 -> mkUnpackednessPV $1 $2 } ftype :: { forall b. DisambTD b => PV (LocatedA b) } : atype { mkHsAppTyHeadPV $1 } | tyop { failOpFewArgs (fst $1) } | ftype tyarg { $1 >>= \ $1 -> mkHsAppTyPV $1 $2 } | ftype PREFIX_AT atype { $1 >>= \ $1 -> mkHsAppKindTyPV $1 (epTok $2) $3 } tyarg :: { LHsType GhcPs } : atype { $1 } | unpackedness atype {% addUnpackednessP $1 $2 } tyop :: { (LocatedN RdrName, PromotionFlag) } : qtyconop { ($1, NotPromoted) } | tyvarop { ($1, NotPromoted) } | SIMPLEQUOTE qconop {% do { op <- amsr (sLL $1 $> (unLoc $2)) (NameAnnQuote (epTok $1) (gl $2) []) ; return (op, IsPromoted) } } | SIMPLEQUOTE varop {% do { op <- amsr (sLL $1 $> (unLoc $2)) (NameAnnQuote (epTok $1) (gl $2) []) ; return (op, IsPromoted) } } atype :: { LHsType GhcPs } : ntgtycon {% amsA' (sL1 $1 (HsTyVar noAnn NotPromoted $1)) } -- Not including unit tuples -- See Note [%shift: atype -> tyvar] | tyvar %shift {% amsA' (sL1 $1 (HsTyVar noAnn NotPromoted $1)) } -- (See Note [Unit tuples]) | '_' %shift { sL1a $1 $ mkAnonWildCardTy (epTok $1) } | '*' {% do { warnStarIsType (getLoc $1) ; return $ sL1a $1 (HsStarTy noExtField (isUnicode $1)) } } -- See Note [Whitespace-sensitive operator parsing] in GHC.Parser.Lexer | PREFIX_TILDE atype {% amsA' (sLL $1 $> (mkBangTy (glR $1) SrcLazy $2)) } | PREFIX_BANG atype {% amsA' (sLL $1 $> (mkBangTy (glR $1) SrcStrict $2)) } | '{' fielddecls '}' {% do { decls <- amsA' (sLL $1 $> $ HsRecTy (AnnList (listAsAnchorM $2) (ListBraces (epTok $1) (epTok $3)) [] noAnn []) $2) ; checkRecordSyntax decls }} -- Constructor sigs only -- List and tuple syntax whose interpretation depends on the extension ListTuplePuns. | '(' ')' {% amsA' . sLL $1 $> =<< (mkTupleSyntaxTy (epTok $1) [] (epTok $>)) } | '(' ktype ',' comma_types1 ')' {% do { h <- addTrailingCommaA $2 (epTok $3) ; amsA' . sLL $1 $> =<< (mkTupleSyntaxTy (epTok $1) (h : $4) (epTok $>)) }} | '(#' '#)' {% do { requireLTPuns PEP_TupleSyntaxType $1 $> ; amsA' (sLL $1 $> $ HsTupleTy (AnnParensHash (epTok $1) (epTok $2)) HsUnboxedTuple []) } } | '(#' comma_types1 '#)' {% do { requireLTPuns PEP_TupleSyntaxType $1 $> ; amsA' (sLL $1 $> $ HsTupleTy (AnnParensHash (epTok $1) (epTok $3)) HsUnboxedTuple $2) } } | '(#' bar_types2 '#)' {% do { requireLTPuns PEP_SumSyntaxType $1 $> ; amsA' (sLL $1 $> $ HsSumTy (AnnParensHash (epTok $1) (epTok $3)) $2) } } | '[' ktype ']' {% amsA' . sLL $1 $> =<< (mkListSyntaxTy1 (epTok $1) $2 (epTok $3)) } | '(' ktype ')' {% amsA' (sLL $1 $> $ HsParTy (epTok $1, epTok $3) $2) } -- see Note [Promotion] for the followings | SIMPLEQUOTE '(' ')' {% do { requireLTPuns PEP_QuoteDisambiguation $1 $> ; amsA' (sLL $1 $> $ HsExplicitTupleTy (epTok $1,epTok $2,epTok $3) IsPromoted []) }} | SIMPLEQUOTE gen_qcon {% amsA' (sLL $1 $> $ HsTyVar (epTok $1) IsPromoted $2) } | SIMPLEQUOTE sysdcon_nolist {% do { requireLTPuns PEP_QuoteDisambiguation $1 (reLoc $>) ; amsA' (sLL $1 $> $ HsTyVar (epTok $1) IsPromoted (L (getLoc $2) $ nameRdrName (dataConName (unLoc $2)))) }} | SIMPLEQUOTE '(' ktype ',' comma_types1 ')' {% do { requireLTPuns PEP_QuoteDisambiguation $1 $> ; h <- addTrailingCommaA $3 (epTok $4) ; amsA' (sLL $1 $> $ HsExplicitTupleTy (epTok $1,epTok $2,epTok $6) IsPromoted (h : $5)) }} | '[' ']' {% withCombinedComments $1 $> (mkListSyntaxTy0 (epTok $1) (epTok $2)) } | SIMPLEQUOTE '[' comma_types0 ']' {% do { requireLTPuns PEP_QuoteDisambiguation $1 $> ; amsA' (sLL $1 $> $ HsExplicitListTy (epTok $1, epTok $2, epTok $4) IsPromoted $3) }} | SIMPLEQUOTE var {% amsA' (sLL $1 $> $ HsTyVar (epTok $1) IsPromoted $2) } | quasiquote { mapLocA (HsSpliceTy noExtField) $1 } | splice_untyped { mapLocA (HsSpliceTy noExtField) $1 } -- Two or more [ty, ty, ty] must be a promoted list type, just as -- if you had written '[ty, ty, ty] -- (One means a list type, zero means the list type constructor, -- so you have to quote those.) | '[' ktype ',' comma_types1 ']' {% do { h <- addTrailingCommaA $2 (epTok $3) ; amsA' (sLL $1 $> $ HsExplicitListTy (NoEpTok,epTok $1,epTok $5) NotPromoted (h:$4)) }} | INTEGER { sLLa $1 $> $ HsTyLit noExtField $ HsNumTy (getINTEGERs $1) (il_value (getINTEGER $1)) } | CHAR { sLLa $1 $> $ HsTyLit noExtField $ HsCharTy (getCHARs $1) (getCHAR $1) } | STRING { sLLa $1 $> $ HsTyLit noExtField $ HsStrTy (getSTRINGs $1) (getSTRING $1) } | STRING_MULTI { sLLa $1 $> $ HsTyLit noExtField $ HsStrTy (getSTRINGMULTIs $1) (getSTRINGMULTI $1) } -- Type variables are never exported, so `M.tyvar` will be rejected by the renamer. -- We let it pass the parser because the renamer can generate a better error message. | QVARID {% let qname = mkQual tvName (getQVARID $1) in amsA' (sL1 $1 (HsTyVar noAnn NotPromoted (sL1n $1 $ qname)))} -- An inst_type is what occurs in the head of an instance decl -- e.g. (Foo a, Gaz b) => Wibble a b -- It's kept as a single type for convenience. inst_type :: { LHsSigType GhcPs } : sigtype { $1 } deriv_types :: { [LHsSigType GhcPs] } : sigktype { [$1] } | sigktype ',' deriv_types {% do { h <- addTrailingCommaA $1 (epTok $2) ; return (h : $3) } } comma_types0 :: { [LHsType GhcPs] } -- Zero or more: ty,ty,ty : comma_types1 { $1 } | {- empty -} { [] } comma_types1 :: { [LHsType GhcPs] } -- One or more: ty,ty,ty : ktype { [$1] } | ktype ',' comma_types1 {% do { h <- addTrailingCommaA $1 (epTok $2) ; return (h : $3) }} bar_types2 :: { [LHsType GhcPs] } -- Two or more: ty|ty|ty : ktype '|' ktype {% do { h <- addTrailingVbarA $1 (epTok $2) ; return [h,$3] }} | ktype '|' bar_types2 {% do { h <- addTrailingVbarA $1 (epTok $2) ; return (h : $3) }} tv_bndrs :: { [LHsTyVarBndr Specificity GhcPs] } : tv_bndr tv_bndrs { $1 : $2 } | {- empty -} { [] } tv_bndr :: { LHsTyVarBndr Specificity GhcPs } : tv_bndr_no_braces { $1 } | '{' tyvar '}' {% amsA' (sLL $1 $> (HsTvb { tvb_ext = AnnTyVarBndr [glR $1] [glR $3] noAnn noAnn , tvb_flag = InferredSpec , tvb_var = HsBndrVar noExtField $2 , tvb_kind = HsBndrNoKind noExtField })) } | '{' tyvar '::' kind '}' {% amsA' (sLL $1 $> (HsTvb { tvb_ext = AnnTyVarBndr [glR $1] [glR $5] noAnn (epUniTok $3) , tvb_flag = InferredSpec , tvb_var = HsBndrVar noExtField $2 , tvb_kind = HsBndrKind noExtField $4 })) } tv_bndr_no_braces :: { LHsTyVarBndr Specificity GhcPs } : tyvar_wc {% amsA' (sL1 $1 (HsTvb { tvb_ext = noAnn , tvb_flag = SpecifiedSpec , tvb_var = unLoc $1 , tvb_kind = HsBndrNoKind noExtField })) } | '(' tyvar_wc '::' kind ')' {% amsA' (sLL $1 $> (HsTvb { tvb_ext = AnnTyVarBndr [glR $1] [glR $5] noAnn (epUniTok $3) , tvb_flag = SpecifiedSpec , tvb_var = unLoc $2 , tvb_kind = HsBndrKind noExtField $4 })) } tyvar_wc :: { Located (HsBndrVar GhcPs) } : tyvar { sL1 $1 (HsBndrVar noExtField $1) } | '_' { sL1 $1 (HsBndrWildCard (epTok $1)) } fds :: { Located (EpToken "|",[LHsFunDep GhcPs]) } : {- empty -} { noLoc (NoEpTok,[]) } | '|' fds1 { (sLL $1 $> (epTok $1 ,reverse (unLoc $2))) } fds1 :: { Located [LHsFunDep GhcPs] } : fds1 ',' fd {% do { let (h:t) = unLoc $1 -- Safe from fds1 rules ; h' <- addTrailingCommaA h (epTok $2) ; return (sLL $1 $> ($3 : h' : t)) }} | fd { sL1 $1 [$1] } fd :: { LHsFunDep GhcPs } : varids0 '->' varids0 {% amsA' (L (comb3 $1 $2 $3) (FunDep (epUniTok $2) (reverse (unLoc $1)) (reverse (unLoc $3)))) } varids0 :: { Located [LocatedN RdrName] } : {- empty -} { noLoc [] } | varids0 tyvar { sLL $1 $> ($2 : (unLoc $1)) } ----------------------------------------------------------------------------- -- Kinds kind :: { LHsKind GhcPs } : ctype { $1 } {- Note [Promotion] ~~~~~~~~~~~~~~~~ - Syntax of promoted qualified names We write 'Nat.Zero instead of Nat.'Zero when dealing with qualified names. Moreover ticks are only allowed in types, not in kinds, for a few reasons: 1. we don't need quotes since we cannot define names in kinds 2. if one day we merge types and kinds, tick would mean look in DataName 3. we don't have a kind namespace anyway - Name resolution When the user write Zero instead of 'Zero in types, we parse it a HsTyVar ("Zero", TcClsName) instead of HsTyVar ("Zero", DataName). We deal with this in the renamer. If a HsTyVar ("Zero", TcClsName) is not bounded in the type level, then we look for it in the term level (we change its namespace to DataName, see Note [Demotion] in GHC.Types.Names.OccName). And both become a HsTyVar ("Zero", DataName) after the renamer. - ListTuplePuns When this extension is disabled, ticked constructors for lists and tuples are not accepted, while the unticked variants are unconditionally parsed as data constructors. -} ----------------------------------------------------------------------------- -- Datatype declarations gadt_constrlist :: { Located ((EpToken "where", EpToken "{", EpToken "}") ,[LConDecl GhcPs]) } -- Returned in order : 'where' '{' gadt_constrs '}' {% checkEmptyGADTs $ L (comb2 $1 $4) ((epTok $1 ,epTok $2 ,epTok $4) , unLoc $3) } | 'where' vocurly gadt_constrs close {% checkEmptyGADTs $ L (comb2 $1 $3) ((epTok $1, NoEpTok, NoEpTok) , unLoc $3) } | {- empty -} { noLoc (noAnn,[]) } gadt_constrs :: { Located [LConDecl GhcPs] } : gadt_constr ';' gadt_constrs {% do { h <- addTrailingSemiA $1 (epTok $2) ; return (L (comb2 $1 $3) (h : unLoc $3)) }} | gadt_constr { L (glA $1) [$1] } | {- empty -} { noLoc [] } -- We allow the following forms: -- C :: Eq a => a -> T a -- C :: forall a. Eq a => !a -> T a -- D { x,y :: a } :: T a -- forall a. Eq a => D { x,y :: a } :: T a gadt_constr :: { LConDecl GhcPs } -- see Note [Difference in parsing GADT and data constructors] -- Returns a list because of: C,D :: ty -- TODO:AZ capture the optSemi. Why leading? : optSemi con_list '::' sigtype {% mkGadtDecl (comb2 $2 $>) (unLoc $2) (epUniTok $3) $4 } {- Note [Difference in parsing GADT and data constructors] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ GADT constructors have simpler syntax than usual data constructors: in GADTs, types cannot occur to the left of '::', so they cannot be mixed with constructor names (see Note [Parsing data constructors is hard]). Due to simplified syntax, GADT constructor names (left-hand side of '::') use simpler grammar production than usual data constructor names. As a consequence, GADT constructor names are restricted (names like '(*)' are allowed in usual data constructors, but not in GADTs). -} constrs :: { Located (EpToken "=",[LConDecl GhcPs]) } : '=' constrs1 { sLL $1 $2 (epTok $1,unLoc $2)} constrs1 :: { Located [LConDecl GhcPs] } : constrs1 '|' constr {% do { let (h:t) = unLoc $1 ; h' <- addTrailingVbarA h (epTok $2) ; return (sLL $1 $> ($3 : h' : t)) }} | constr { sL1 $1 [$1] } constr :: { LConDecl GhcPs } : forall context '=>' constr_stuff {% amsA' (let (con,details) = unLoc $4 in (L (comb4 $1 $2 $3 $4) (mkConDeclH98 (epUniTok $3,(fst $ unLoc $1)) con (snd $ unLoc $1) (Just $2) details))) } | forall constr_stuff {% amsA' (let (con,details) = unLoc $2 in (L (comb2 $1 $2) (mkConDeclH98 (noAnn, fst $ unLoc $1) con (snd $ unLoc $1) Nothing -- No context details))) } forall :: { Located ((TokForall, EpToken "."), Maybe [LHsTyVarBndr Specificity GhcPs]) } : 'forall' tv_bndrs '.' { sLL $1 $> ((epUniTok $1,epTok $3), Just $2) } | {- empty -} { noLoc (noAnn, Nothing) } constr_stuff :: { Located (LocatedN RdrName, HsConDeclH98Details GhcPs) } : infixtype {% do { b <- runPV $1 ; return (sL1 b (dataConBuilderCon b, dataConBuilderDetails b)) }} | '(#' usum_constr '#)' {% let (t, tag, arity) = $2 in pure (sLL $1 $3 $ mkUnboxedSumCon t tag arity)} usum_constr :: { (LHsType GhcPs, Int, Int) } -- constructor for the data decls SumN# : ktype bars { ($1, 1, (snd $2 + 1)) } | bars ktype bars0 { ($2, snd $1 + 1, snd $1 + snd $3 + 1) } fielddecls :: { [LConDeclField GhcPs] } : {- empty -} { [] } | fielddecls1 { $1 } fielddecls1 :: { [LConDeclField GhcPs] } : fielddecl ',' fielddecls1 {% do { h <- addTrailingCommaA $1 (epTok $2) ; return (h : $3) }} | fielddecl { [$1] } fielddecl :: { LConDeclField GhcPs } -- A list because of f,g :: Int : sig_vars '::' ctype {% amsA' (L (comb2 $1 $3) (ConDeclField (epUniTok $2) (reverse (map (\ln@(L l n) -> L (fromTrailingN l) $ FieldOcc noExtField (L (noTrailingN l) n)) (unLoc $1))) $3 Nothing))} -- Reversed! maybe_derivings :: { Located (HsDeriving GhcPs) } : {- empty -} { noLoc [] } | derivings { $1 } -- A list of one or more deriving clauses at the end of a datatype derivings :: { Located (HsDeriving GhcPs) } : derivings deriving { sLL $1 $> ($2 : unLoc $1) } -- AZ: order? | deriving { sL1 $> [$1] } -- The outer Located is just to allow the caller to -- know the rightmost extremity of the 'deriving' clause deriving :: { LHsDerivingClause GhcPs } : 'deriving' deriv_clause_types {% let { full_loc = comb2 $1 $> } in amsA' (L full_loc $ HsDerivingClause (epTok $1) Nothing $2) } | 'deriving' deriv_strategy_no_via deriv_clause_types {% let { full_loc = comb2 $1 $> } in amsA' (L full_loc $ HsDerivingClause (epTok $1) (Just $2) $3) } | 'deriving' deriv_clause_types deriv_strategy_via {% let { full_loc = comb2 $1 $> } in amsA' (L full_loc $ HsDerivingClause (epTok $1) (Just $3) $2) } deriv_clause_types :: { LDerivClauseTys GhcPs } : qtycon { let { tc = sL1a $1 $ mkHsImplicitSigType $ sL1a $1 $ HsTyVar noAnn NotPromoted $1 } in sL1a $1 (DctSingle noExtField tc) } | '(' ')' {% amsr (sLL $1 $> (DctMulti noExtField [])) (AnnContext Nothing [epTok $1] [epTok $2]) } | '(' deriv_types ')' {% amsr (sLL $1 $> (DctMulti noExtField $2)) (AnnContext Nothing [epTok $1] [epTok $3])} ----------------------------------------------------------------------------- -- Value definitions {- Note [Declaration/signature overlap] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ There's an awkward overlap with a type signature. Consider f :: Int -> Int = ...rhs... Then we can't tell whether it's a type signature or a value definition with a result signature until we see the '='. So we have to inline enough to postpone reductions until we know. -} {- ATTENTION: Dirty Hackery Ahead! If the second alternative of vars is var instead of qvar, we get another shift/reduce-conflict. Consider the following programs: { (^^) :: Int->Int ; } Type signature; only var allowed { (^^) :: Int->Int = ... ; } Value defn with result signature; qvar allowed (because of instance decls) We can't tell whether to reduce var to qvar until after we've read the signatures. -} decl_no_th :: { LHsDecl GhcPs } : sigdecl { $1 } | infixexp opt_sig rhs {% runPV (unECP $1) >>= \ $1 -> do { let { l = comb2 $1 $> } ; r <- checkValDef l $1 (HsNoMultAnn noExtField, $2) $3; -- Depending upon what the pattern looks like we might get either -- a FunBind or PatBind back from checkValDef. See Note -- [FunBind vs PatBind] ; !cs <- getCommentsFor l ; return $! (sL (commentsA l cs) $ ValD noExtField r) } } | PREFIX_PERCENT atype infixexp opt_sig rhs {% runPV (unECP $3) >>= \ $3 -> do { let { l = comb2 $1 $> } ; r <- checkValDef l $3 (mkMultAnn (epTok $1) $2, $4) $5; -- parses bindings of the form %p x or -- %p x :: sig -- -- Depending upon what the pattern looks like we might get either -- a FunBind or PatBind back from checkValDef. See Note -- [FunBind vs PatBind] ; !cs <- getCommentsFor l ; return $! (sL (commentsA l cs) $ ValD noExtField r) } } | pattern_synonym_decl { $1 } decl :: { LHsDecl GhcPs } : decl_no_th { $1 } -- Why do we only allow naked declaration splices in top-level -- declarations and not here? Short answer: because readFail009 -- fails terribly with a panic in cvBindsAndSigs otherwise. | splice_exp { mkSpliceDecl $1 } rhs :: { Located (GRHSs GhcPs (LHsExpr GhcPs)) } : '=' exp wherebinds {% runPV (unECP $2) >>= \ $2 -> do { let L l (bs, csw) = adaptWhereBinds $3 ; let loc = (comb3 $1 $2 (L l bs)) ; let locg = (comb2 $1 $2) ; acs loc (\loc cs -> sL loc (GRHSs csw (unguardedRHS (EpAnn (spanAsAnchor locg) (GrhsAnn Nothing (Left $ epTok $1)) cs) locg $2) bs)) } } | gdrhs wherebinds {% do { let {L l (bs, csw) = adaptWhereBinds $2} ; acs (comb2 $1 (L l bs)) (\loc cs -> L loc (GRHSs (cs Semi.<> csw) (reverse (unLoc $1)) bs)) }} gdrhs :: { Located [LGRHS GhcPs (LHsExpr GhcPs)] } : gdrhs gdrh { sLL $1 $> ($2 : unLoc $1) } | gdrh { sL1 $1 [$1] } gdrh :: { LGRHS GhcPs (LHsExpr GhcPs) } : '|' guardquals '=' exp {% runPV (unECP $4) >>= \ $4 -> acsA (comb2 $1 $>) (\loc cs -> L loc $ GRHS (EpAnn (glEE $1 $>) (GrhsAnn (Just $ epTok $1) (Left $ epTok $3)) cs) (unLoc $2) $4) } sigdecl :: { LHsDecl GhcPs } : -- See Note [Declaration/signature overlap] for why we need infixexp here infixexp '::' sigtype {% do { $1 <- runPV (unECP $1) ; v <- checkValSigLhs $1 ; amsA' (sLL $1 $> $ SigD noExtField $ TypeSig (AnnSig (epUniTok $2) Nothing Nothing) [v] (mkHsWildCardBndrs $3))} } | var ',' sig_vars '::' sigtype {% do { v <- addTrailingCommaN $1 (gl $2) ; let sig = TypeSig (AnnSig (epUniTok $4) Nothing Nothing) (v : reverse (unLoc $3)) (mkHsWildCardBndrs $5) ; amsA' (sLL $1 $> $ SigD noExtField sig ) }} | infix prec namespace_spec ops {% do { mbPrecAnn <- traverse (\l2 -> do { checkPrecP l2 $4 ; pure (glR l2) }) $2 ; let (fixText, fixPrec) = case $2 of -- If an explicit precedence isn't supplied, -- it defaults to maxPrecedence Nothing -> (NoSourceText, maxPrecedence) Just l2 -> (fst $ unLoc l2, snd $ unLoc l2) ; amsA' (sLL $1 $> $ SigD noExtField (FixSig ((glR $1, mbPrecAnn), fixText) (FixitySig (unLoc $3) (fromOL $ unLoc $4) (Fixity fixPrec (unLoc $1))))) }} | pattern_synonym_sig { L (getLoc $1) . SigD noExtField . unLoc $ $1 } | '{-# COMPLETE' qcon_list opt_tyconsig '#-}' {% let (dcolon, tc) = $3 in amsA' (sLL $1 $> (SigD noExtField (CompleteMatchSig ((glR $1,dcolon,epTok $4), (getCOMPLETE_PRAGs $1)) $2 tc))) } -- This rule is for both INLINE and INLINABLE pragmas | '{-# INLINE' activation qvarcon '#-}' {% amsA' (sLL $1 $> $ SigD noExtField (InlineSig (glR $1, epTok $4, fst $2) $3 (mkInlinePragma (getINLINE_PRAGs $1) (getINLINE $1) (snd $2)))) } | '{-# OPAQUE' qvar '#-}' {% amsA' (sLL $1 $> $ SigD noExtField (InlineSig (glR $1, epTok $3, noAnn) $2 (mkOpaquePragma (getOPAQUE_PRAGs $1)))) } | '{-# SCC' qvar '#-}' {% amsA' (sLL $1 $> (SigD noExtField (SCCFunSig ((glR $1, epTok $3), (getSCC_PRAGs $1)) $2 Nothing))) } | '{-# SCC' qvar STRING '#-}' {% do { scc <- getSCC $3 ; let str_lit = StringLiteral (getSTRINGs $3) scc Nothing ; amsA' (sLL $1 $> (SigD noExtField (SCCFunSig ((glR $1, epTok $4), (getSCC_PRAGs $1)) $2 (Just ( sL1a $3 str_lit))))) }} | '{-# SPECIALISE' activation qvar '::' sigtypes1 '#-}' {% amsA' ( let inl_prag = mkInlinePragma (getSPEC_PRAGs $1) (NoUserInlinePrag, FunLike) (snd $2) in sLL $1 $> $ SigD noExtField (SpecSig (AnnSpecSig (glR $1) (epTok $6) (epUniTok $4) (fst $2)) $3 (fromOL $5) inl_prag)) } | '{-# SPECIALISE_INLINE' activation qvar '::' sigtypes1 '#-}' {% amsA' (sLL $1 $> $ SigD noExtField (SpecSig (AnnSpecSig (glR $1) (epTok $6) (epUniTok $4) (fst $2)) $3 (fromOL $5) (mkInlinePragma (getSPEC_INLINE_PRAGs $1) (getSPEC_INLINE $1) (snd $2)))) } | '{-# SPECIALISE' 'instance' inst_type '#-}' {% amsA' (sLL $1 $> $ SigD noExtField (SpecInstSig ((glR $1,epTok $2,epTok $4), (getSPEC_PRAGs $1)) $3)) } -- A minimal complete definition | '{-# MINIMAL' name_boolformula_opt '#-}' {% amsA' (sLL $1 $> $ SigD noExtField (MinimalSig ((glR $1,epTok $3), (getMINIMAL_PRAGs $1)) $2)) } activation :: { (ActivationAnn,Maybe Activation) } -- See Note [%shift: activation -> {- empty -}] : {- empty -} %shift { (noAnn ,Nothing) } | explicit_activation { (fst $1,Just (snd $1)) } explicit_activation :: { (ActivationAnn, Activation) } -- In brackets : '[' INTEGER ']' { (ActivationAnn (epTok $1) (epTok $3) Nothing (Just (glR $2)) ,ActiveAfter (getINTEGERs $2) (fromInteger (il_value (getINTEGER $2)))) } | '[' rule_activation_marker INTEGER ']' { (ActivationAnn (epTok $1) (epTok $4) $2 (Just (glR $3)) ,ActiveBefore (getINTEGERs $3) (fromInteger (il_value (getINTEGER $3)))) } ----------------------------------------------------------------------------- -- Expressions quasiquote :: { Located (HsUntypedSplice GhcPs) } : TH_QUASIQUOTE { let { loc = getLoc $1 ; ITquasiQuote (quoter, quote, quoteSpan) = unLoc $1 ; quoterId = mkUnqual varName quoter } in sL1 $1 (HsQuasiQuote noExtField quoterId (L (noAnnSrcSpan (mkSrcSpanPs quoteSpan)) quote)) } | TH_QQUASIQUOTE { let { loc = getLoc $1 ; ITqQuasiQuote (qual, quoter, quote, quoteSpan) = unLoc $1 ; quoterId = mkQual varName (qual, quoter) } in sL1 $1 (HsQuasiQuote noExtField quoterId (L (noAnnSrcSpan (mkSrcSpanPs quoteSpan)) quote)) } exp :: { ECP } : exp_gen(infixexp) { $1 } exp2 :: { ECP } : exp_gen(infixexp2) { $1 } exp_gen(IEXP) :: { ECP } : IEXP '::' ctype { ECP $ unECP $1 >>= \ $1 -> rejectPragmaPV $1 >> mkHsTySigPV (noAnnSrcSpan $ comb2 $1 $>) $1 $3 (epUniTok $2) } | IEXP '-<' exp_gen(IEXP) {% runPV (unECP $1) >>= \ $1 -> runPV (unECP $3) >>= \ $3 -> fmap ecpFromCmd $ amsA' (sLL $1 $> $ HsCmdArrApp (isUnicodeSyntax $2, glR $2) $1 $3 HsFirstOrderApp True) } | IEXP '>-' exp_gen(IEXP) {% runPV (unECP $1) >>= \ $1 -> runPV (unECP $3) >>= \ $3 -> fmap ecpFromCmd $ amsA' (sLL $1 $> $ HsCmdArrApp (isUnicodeSyntax $2, glR $2) $3 $1 HsFirstOrderApp False) } | IEXP '-<<' exp_gen(IEXP) {% runPV (unECP $1) >>= \ $1 -> runPV (unECP $3) >>= \ $3 -> fmap ecpFromCmd $ amsA' (sLL $1 $> $ HsCmdArrApp (isUnicodeSyntax $2, glR $2) $1 $3 HsHigherOrderApp True) } | IEXP '>>-' exp_gen(IEXP) {% runPV (unECP $1) >>= \ $1 -> runPV (unECP $3) >>= \ $3 -> fmap ecpFromCmd $ amsA' (sLL $1 $> $ HsCmdArrApp (isUnicodeSyntax $2, glR $2) $3 $1 HsHigherOrderApp False) } -- See Note [%shift: exp -> infixexp] | IEXP %shift { $1 } | exp_prag(exp_gen(IEXP)) { $1 } -- See Note [Pragmas and operator fixity] -- Embed types into expressions and patterns for required type arguments | 'type' atype { ECP $ mkHsEmbTyPV (comb2 $1 $>) (epTok $1) $2 } infixexp2 :: { ECP } : infixexp %shift { $1 } -- View patterns and function arrows | infixexp '->' infixexp2 { ECP $ withArrowParsingMode' $ \mode -> unECP $1 >>= \ $1 -> unECP $3 >>= \ $3 -> let arr = HsUnrestrictedArrow (epUniTok $2) in mkHsArrowPV (comb2 $1 $>) mode $1 arr $3 } | infixexp expmult '->' infixexp2 { ECP $ unECP $1 >>= \ $1 -> $2 >>= \ $2 -> unECP $4 >>= \ $4 -> hintLinear (getLoc $2) >> let arr = (unLoc $2) (epUniTok $3) in mkHsArrowPV (comb2 $1 $>) ArrowIsFunType $1 arr $4 } | infixexp '->.' infixexp2 { ECP $ hintLinear (getLoc $2) >> unECP $1 >>= \ $1 -> unECP $3 >>= \ $3 -> let arr = HsLinearArrow (EpLolly (epTok $2)) in mkHsArrowPV (comb2 $1 $>) ArrowIsFunType $1 arr $3 } | expcontext '=>' infixexp2 { ECP $ $1 >>= \ $1 -> unECP $3 >>= \ $3 -> mkQualPV (comb2 $1 $>) (addTrailingDarrowC $1 $2 emptyComments) $3} | forall_telescope infixexp2 { ECP $ unECP $2 >>= \ $2 -> mkHsForallPV (comb2 $1 $>) (unLoc $1) $2 } infixexp :: { ECP } : exp10 { $1 } | infixexp qop exp10p -- See Note [Pragmas and operator fixity] { ECP $ superInfixOp $ $2 >>= \ $2 -> unECP $1 >>= \ $1 -> unECP $3 >>= \ $3 -> rejectPragmaPV $1 >> (mkHsOpAppPV (comb2 $1 $3) $1 $2 $3) } -- AnnVal annotation for NPlusKPat, which discards the operator exp10p :: { ECP } : exp10 { $1 } | exp_prag(exp10p) { $1 } -- See Note [Pragmas and operator fixity] exp_prag(e) :: { ECP } : prag_e e -- See Note [Pragmas and operator fixity] {% runPV (unECP $2) >>= \ $2 -> fmap ecpFromExp $ amsA' $ (sLL $1 $> $ HsPragE noExtField (unLoc $1) $2) } exp10 :: { ECP } -- See Note [%shift: exp10 -> '-' fexp] : '-' fexp %shift { ECP $ unECP $2 >>= \ $2 -> mkHsNegAppPV (comb2 $1 $>) $2 (epTok $1) } -- See Note [%shift: exp10 -> fexp] | fexp %shift { $1 } optSemi :: { (Maybe (EpToken ";"),Bool) } : ';' { (msemim $1,True) } | {- empty -} { (Nothing,False) } {- Note [Pragmas and operator fixity] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 'prag_e' is an expression pragma, such as {-# SCC ... #-}. It must be used with care, or else #15730 happens. Consider this infix expression: 1 / 2 / 2 There are two ways to parse it: 1. (1 / 2) / 2 = 0.25 2. 1 / (2 / 2) = 1.0 Due to the fixity of the (/) operator (assuming it comes from Prelude), option 1 is the correct parse. However, in the past GHC's parser used to get confused by the SCC annotation when it occurred in the middle of an infix expression: 1 / {-# SCC ann #-} 2 / 2 -- used to get parsed as option 2 There are several ways to address this issue, see GHC Proposal #176 for a detailed exposition: https://github.com/ghc-proposals/ghc-proposals/blob/master/proposals/0176-scc-parsing.rst The accepted fix is to disallow pragmas that occur within infix expressions. Infix expressions are assembled out of 'exp10', so 'exp10' must not accept pragmas. Instead, we accept them in exactly two places: * at the start of an expression or a parenthesized subexpression: f = {-# SCC ann #-} 1 / 2 / 2 -- at the start of the expression g = 5 + ({-# SCC ann #-} 1 / 2 / 2) -- at the start of a parenthesized subexpression * immediately after the last operator: f = 1 / 2 / {-# SCC ann #-} 2 In both cases, the parse does not depend on operator fixity. The second case may sound unnecessary, but it's actually needed to support a common idiom: f $ {-# SCC ann $-} ... -} prag_e :: { Located (HsPragE GhcPs) } : '{-# SCC' STRING '#-}' {% do { scc <- getSCC $2 ; return (sLL $1 $> (HsPragSCC (AnnPragma (glR $1) (epTok $3) noAnn (glR $2) noAnn noAnn noAnn, (getSCC_PRAGs $1)) (StringLiteral (getSTRINGs $2) scc Nothing)))} } | '{-# SCC' VARID '#-}' { sLL $1 $> (HsPragSCC (AnnPragma (glR $1) (epTok $3) noAnn (glR $2) noAnn noAnn noAnn, (getSCC_PRAGs $1)) (StringLiteral NoSourceText (getVARID $2) Nothing)) } fexp :: { ECP } : fexp aexp { ECP $ superFunArg $ unECP $1 >>= \ $1 -> unECP $2 >>= \ $2 -> spanWithComments (comb2 $1 $>) >>= \l -> mkHsAppPV l $1 $2 } -- See Note [Whitespace-sensitive operator parsing] in GHC.Parser.Lexer | fexp PREFIX_AT atype { ECP $ unECP $1 >>= \ $1 -> mkHsAppTypePV (noAnnSrcSpan $ comb2 $1 $>) $1 (epTok $2) $3 } | 'static' aexp {% runPV (unECP $2) >>= \ $2 -> fmap ecpFromExp $ amsA' (sLL $1 $> $ HsStatic (epTok $1) $2) } | aexp { $1 } aexp :: { ECP } -- See Note [Whitespace-sensitive operator parsing] in GHC.Parser.Lexer : qvar TIGHT_INFIX_AT aexp { ECP $ unECP $3 >>= \ $3 -> mkHsAsPatPV (comb2 $1 $>) $1 (epTok $2) $3 } -- See Note [Whitespace-sensitive operator parsing] in GHC.Parser.Lexer | PREFIX_TILDE aexp { ECP $ unECP $2 >>= \ $2 -> mkHsLazyPatPV (comb2 $1 $>) $2 (epTok $1) } | PREFIX_BANG aexp { ECP $ unECP $2 >>= \ $2 -> mkHsBangPatPV (comb2 $1 $>) $2 (epTok $1) } | PREFIX_MINUS aexp { ECP $ unECP $2 >>= \ $2 -> mkHsNegAppPV (comb2 $1 $>) $2 (epTok $1) } | 'let' binds 'in' exp { ECP $ unECP $4 >>= \ $4 -> mkHsLetPV (comb2 $1 $>) (epTok $1) (unLoc $2) (epTok $3) $4 } | '\\' argpats '->' exp { ECP $ unECP $4 >>= \ $4 -> mkHsLamPV (comb2 $1 $>) LamSingle (sLLld $1 $> [sLLa $1 $> $ Match { m_ext = noExtField , m_ctxt = LamAlt LamSingle , m_pats = L (listLocation $2) $2 , m_grhss = unguardedGRHSs (comb2 $3 $4) $4 (EpAnn (glR $3) (GrhsAnn Nothing (Right $ epUniTok $3)) emptyComments) }]) (EpAnnLam (epTok $1) Nothing) } | '\\' 'lcase' altslist(pats1) { ECP $ $3 >>= \ $3 -> mkHsLamPV (comb3 $1 $2 $>) LamCase $3 (EpAnnLam (epTok $1) (Just (glR $2))) } | '\\' 'lcases' altslist(argpats) { ECP $ $3 >>= \ $3 -> mkHsLamPV (comb3 $1 $2 $>) LamCases $3 (EpAnnLam (epTok $1) (Just (glR $2))) } | 'if' exp optSemi 'then' exp optSemi 'else' exp {% runPV (unECP $2) >>= \ ($2 :: LHsExpr GhcPs) -> return $ ECP $ unECP $5 >>= \ $5 -> unECP $8 >>= \ $8 -> mkHsIfPV (comb2 $1 $>) $2 (snd $3) $5 (snd $6) $8 (AnnsIf { aiIf = epTok $1 , aiThen = epTok $4 , aiElse = epTok $7 , aiThenSemi = fst $3 , aiElseSemi = fst $6})} | 'if' ifgdpats {% hintMultiWayIf (getLoc $1) >>= \_ -> fmap ecpFromExp $ do { let (L _ ((o,c),_)) = $2 ; amsA' (sLL $1 $> $ HsMultiIf (epTok $1, o, c) (reverse $ snd $ unLoc $2)) }} | 'case' exp 'of' altslist(pats1) {% runPV (unECP $2) >>= \ ($2 :: LHsExpr GhcPs) -> return $ ECP $ $4 >>= \ $4 -> mkHsCasePV (comb3 $1 $3 $4) $2 $4 (EpAnnHsCase (epTok $1) (epTok $3)) } -- QualifiedDo. | DO stmtlist {% do hintQualifiedDo $1 return $ ECP $ $2 >>= \ $2 -> mkHsDoPV (comb2 $1 $2) (fmap mkModuleNameFS (getDO $1)) $2 (glR $1) (glR $2) } | MDO stmtlist {% hintQualifiedDo $1 >> runPV $2 >>= \ $2 -> fmap ecpFromExp $ amsA' (L (comb2 $1 $2) (mkMDo (MDoExpr $ fmap mkModuleNameFS (getMDO $1)) $2 (glR $1) (glR $2))) } | 'proc' aexp '->' exp {% (checkPattern <=< runPV) (unECP $2) >>= \ p -> runPV (unECP $4) >>= \ $4@cmd -> fmap ecpFromExp $ amsA' (sLL $1 $> $HsProc (epTok $1, epUniTok $3) p (sLLa $1 $> $ HsCmdTop noExtField cmd)) } | aexp1 { $1 } aexp1 :: { ECP } : aexp1 '{' fbinds '}' { ECP $ getBit OverloadedRecordUpdateBit >>= \ overloaded -> unECP $1 >>= \ $1 -> $3 >>= \ $3 -> mkHsRecordPV overloaded (comb2 $1 $>) (comb2 $2 $4) $1 $3 (Just (epTok $2), Just (epTok $4)) } -- See Note [Whitespace-sensitive operator parsing] in GHC.Parser.Lexer | aexp1 TIGHT_INFIX_PROJ field {% runPV (unECP $1) >>= \ $1 -> fmap ecpFromExp $ amsA' ( let fl = sLLa $2 $> (DotFieldOcc (AnnFieldLabel (Just $ epTok $2)) $3) in sLL $1 $> $ mkRdrGetField $1 fl) } | aexp2 { $1 } aexp2 :: { ECP } : qvar { ECP $ mkHsVarPV $! $1 } | qcon { ECP $ mkHsVarPV $! $1 } -- See Note [%shift: aexp2 -> ipvar] | ipvar %shift {% fmap ecpFromExp (ams1 $1 (HsIPVar NoExtField $! unLoc $1)) } | overloaded_label {% fmap ecpFromExp (ams1 $1 (HsOverLabel (fst $! unLoc $1) (snd $! unLoc $1))) } | literal { ECP $ mkHsLitPV $! $1 } -- This will enable overloaded strings permanently. Normally the renamer turns HsString -- into HsOverLit when -XOverloadedStrings is on. -- | STRING { sL (getLoc $1) (HsOverLit $! mkHsIsString (getSTRINGs $1) -- (getSTRING $1) noExtField) } | INTEGER { ECP $ mkHsOverLitPV (sL1a $1 $ mkHsIntegral (getINTEGER $1)) } | RATIONAL { ECP $ mkHsOverLitPV (sL1a $1 $ mkHsFractional (getRATIONAL $1)) } -- N.B.: sections get parsed by these next two productions. -- This allows you to write, e.g., '(+ 3, 4 -)', which isn't -- correct Haskell (you'd have to write '((+ 3), (4 -))') -- but the less cluttered version fell out of having texps. | '(' texp ')' { ECP $ unECP $2 >>= \ $2 -> mkHsParPV (comb2 $1 $>) (epTok $1) $2 (epTok $3) } | '(' tup_exprs ')' { ECP $ $2 >>= \ $2 -> mkSumOrTuplePV (noAnnSrcSpan $ comb2 $1 $>) Boxed $2 (glR $1,glR $3)} | '(' orpats(exp2) ')' {% do { pat <- hintOrPats (sL1a $2 (OrPat NoExtField (unLoc $2))) ; fmap ecpFromPat (amsA' (sLL $1 $> (ParPat (epTok $1, epTok $>) pat))) }} -- This case is only possible when 'OverloadedRecordDotBit' is enabled. | '(' projection ')' { ECP $ amsA' (sLL $1 $> $ mkRdrProjection (NE.reverse (unLoc $2)) (AnnProjection (epTok $1) (epTok $3)) ) >>= ecpFromExp' } | '(#' texp '#)' { ECP $ unECP $2 >>= \ $2 -> mkSumOrTuplePV (noAnnSrcSpan $ comb2 $1 $>) Unboxed (Tuple [Right $2]) (glR $1,glR $3) } | '(#' tup_exprs '#)' { ECP $ $2 >>= \ $2 -> mkSumOrTuplePV (noAnnSrcSpan $ comb2 $1 $>) Unboxed $2 (glR $1,glR $3) } | '[' list ']' { ECP $ $2 (comb2 $1 $>) (glR $1,glR $3) } | '_' { ECP $ mkHsWildCardPV (getLoc $1) } -- Template Haskell Extension | splice_untyped { ECP $ mkHsSplicePV $1 } | splice_typed { ecpFromExp $ fmap (uncurry HsTypedSplice) (reLoc $1) } | SIMPLEQUOTE qvar {% fmap ecpFromExp $ amsA' (sLL $1 $> $ HsUntypedBracket noExtField (VarBr (glR $1) True $2)) } | SIMPLEQUOTE qcon {% fmap ecpFromExp $ amsA' (sLL $1 $> $ HsUntypedBracket noExtField (VarBr (glR $1) True $2)) } | TH_TY_QUOTE tyvar {% fmap ecpFromExp $ amsA' (sLL $1 $> $ HsUntypedBracket noExtField (VarBr (glR $1) False $2)) } | TH_TY_QUOTE gtycon {% fmap ecpFromExp $ amsA' (sLL $1 $> $ HsUntypedBracket noExtField (VarBr (glR $1) False $2)) } -- See Note [%shift: aexp2 -> TH_TY_QUOTE] | TH_TY_QUOTE %shift {% reportEmptyDoubleQuotes (getLoc $1) } | '[|' exp '|]' {% runPV (unECP $2) >>= \ $2 -> fmap ecpFromExp $ amsA' (sLL $1 $> $ HsUntypedBracket noExtField (ExpBr (if (hasE $1) then (BracketHasE (epTok $1), epUniTok $3) else (BracketNoE (epUniTok $1), epUniTok $3)) $2)) } | '[||' exp '||]' {% runPV (unECP $2) >>= \ $2 -> fmap ecpFromExp $ amsA' (sLL $1 $> $ HsTypedBracket (if (hasE $1) then (BracketHasE (epTok $1),epTok $3) else (BracketNoE (epTok $1),epTok $3)) $2) } | '[t|' ktype '|]' {% fmap ecpFromExp $ amsA' (sLL $1 $> $ HsUntypedBracket noExtField (TypBr (epTok $1,epUniTok $3) $2)) } | '[p|' infixexp '|]' {% (checkPattern <=< runPV) (unECP $2) >>= \p -> fmap ecpFromExp $ amsA' (sLL $1 $> $ HsUntypedBracket noExtField (PatBr (epTok $1,epUniTok $3) p)) } | '[d|' cvtopbody '|]' {% fmap ecpFromExp $ amsA' (sLL $1 $> $ HsUntypedBracket noExtField (DecBrL (epTok $1,epUniTok $3, fst $2) (snd $2))) } | quasiquote { ECP $ mkHsSplicePV $1 } -- arrow notation extension | '(|' aexp cmdargs '|)' {% runPV (unECP $2) >>= \ $2 -> fmap ecpFromCmd $ amsA' (sLL $1 $> $ HsCmdArrForm (AnnList (glRM $1) (ListBanana (epUniTok $1) (epUniTok $4)) [] noAnn []) $2 Prefix (reverse $3)) } projection :: { Located (NonEmpty (LocatedAn NoEpAnns (DotFieldOcc GhcPs))) } projection -- See Note [Whitespace-sensitive operator parsing] in GHC.Parsing.Lexer : projection TIGHT_INFIX_PROJ field { sLL $1 $> ((sLLa $2 $> $ DotFieldOcc (AnnFieldLabel (Just $ epTok $2)) $3) `NE.cons` unLoc $1) } | PREFIX_PROJ field { sLL $1 $> ((sLLa $1 $> $ DotFieldOcc (AnnFieldLabel (Just $ epTok $1)) $2) :| [])} splice_exp :: { LHsExpr GhcPs } : splice_untyped { fmap (HsUntypedSplice noExtField) (reLoc $1) } | splice_typed { fmap (uncurry HsTypedSplice) (reLoc $1) } splice_untyped :: { Located (HsUntypedSplice GhcPs) } -- See Note [Whitespace-sensitive operator parsing] in GHC.Parser.Lexer : PREFIX_DOLLAR aexp2 {% runPV (unECP $2) >>= \ $2 -> return (sLL $1 $> $ HsUntypedSpliceExpr (epTok $1) $2) } splice_typed :: { Located (EpToken "$$", LHsExpr GhcPs) } -- See Note [Whitespace-sensitive operator parsing] in GHC.Parser.Lexer : PREFIX_DOLLAR_DOLLAR aexp2 {% runPV (unECP $2) >>= \ $2 -> return (sLL $1 $> $ (epTok $1, $2)) } cmdargs :: { [LHsCmdTop GhcPs] } : cmdargs acmd { $2 : $1 } | {- empty -} { [] } acmd :: { LHsCmdTop GhcPs } : aexp {% runPV (unECP $1) >>= \ (cmd :: LHsCmd GhcPs) -> runPV (checkCmdBlockArguments cmd) >>= \ _ -> return (sL1a cmd $ HsCmdTop noExtField cmd) } cvtopbody :: { ((EpToken "{", EpToken "}"),[LHsDecl GhcPs]) } : '{' cvtopdecls0 '}' { ((epTok $1 ,epTok $3),$2) } | vocurly cvtopdecls0 close { ((NoEpTok, NoEpTok),$2) } cvtopdecls0 :: { [LHsDecl GhcPs] } : topdecls_semi { cvTopDecls $1 } | topdecls { cvTopDecls $1 } ----------------------------------------------------------------------------- -- Tuple expressions -- "texp" is short for tuple expressions: -- things that can appear unparenthesized as long as they're -- inside parens or delimited by commas texp :: { ECP } : exp2 { $1 } -- Note [Parsing sections] -- ~~~~~~~~~~~~~~~~~~~~~~~ -- We include left and right sections here, which isn't -- technically right according to the Haskell standard. -- For example (3 +, True) isn't legal. -- However, we want to parse bang patterns like -- (!x, !y) -- and it's convenient to do so here as a section -- Then when converting expr to pattern we unravel it again -- Meanwhile, the renamer checks that real sections appear -- inside parens. | infixexp qop {% runPV (unECP $1) >>= \ $1 -> runPV (rejectPragmaPV $1) >> runPV $2 >>= \ $2 -> return $ ecpFromExp $ sLLa $1 $> $ SectionL noExtField $1 (n2l $2) } | qopm infixexp { ECP $ superInfixOp $ unECP $2 >>= \ $2 -> $1 >>= \ $1 -> mkHsSectionR_PV (comb2 $1 $>) (n2l $1) $2 } orpats(EXP) :: { Located (NonEmpty (LPat GhcPs)) } -- See Note [%shift: orpats -> exp] : EXP %shift {% do { pat <- (checkPattern <=< runPV) (unECP $1) ; return (sL1 pat (NE.singleton pat)) }} | EXP ';' orpats(EXP) {% do { pat <- (checkPattern <=< runPV) (unECP $1) ; pat <- addTrailingSemiA pat (epTok $2) ; return (sLL pat $> (pat NE.<| unLoc $3)) }} -- Always at least one comma or bar. -- Though this can parse just commas (without any expressions), it won't -- in practice, because (,,,) is parsed as a name. See Note [ExplicitTuple] -- in GHC.Hs.Expr. tup_exprs :: { forall b. DisambECP b => PV (SumOrTuple b) } : texp commas_tup_tail { unECP $1 >>= \ $1 -> $2 >>= \ $2 -> do { t <- amsA $1 [AddCommaAnn (EpTok $ srcSpan2e $ fst $2)] ; return (Tuple (Right t : snd $2)) } } | commas tup_tail { $2 >>= \ $2 -> do { let {cos = map (\ll -> (Left (EpAnn (spanAsAnchor ll) True emptyComments))) (fst $1) } ; return (Tuple (cos ++ $2)) } } | texp bars { unECP $1 >>= \ $1 -> return $ (Sum 1 (snd $2 + 1) $1 [] (fst $2)) } | bars texp bars0 { unECP $2 >>= \ $2 -> return $ (Sum (snd $1 + 1) (snd $1 + snd $3 + 1) $2 (fst $1) (fst $3)) } -- Always starts with commas; always follows an expr commas_tup_tail :: { forall b. DisambECP b => PV (SrcSpan,[Either (EpAnn Bool) (LocatedA b)]) } commas_tup_tail : commas tup_tail { $2 >>= \ $2 -> do { let {cos = map (\l -> (Left (EpAnn (spanAsAnchor l) True emptyComments))) (tail $ fst $1) } ; return ((head $ fst $1, cos ++ $2)) } } -- Always follows a comma tup_tail :: { forall b. DisambECP b => PV [Either (EpAnn Bool) (LocatedA b)] } : texp commas_tup_tail { unECP $1 >>= \ $1 -> $2 >>= \ $2 -> do { t <- amsA $1 [AddCommaAnn (EpTok $ srcSpan2e $ fst $2)] ; return (Right t : snd $2) } } | texp { unECP $1 >>= \ $1 -> return [Right $1] } -- See Note [%shift: tup_tail -> {- empty -}] | {- empty -} %shift { return [Left noAnn] } ----------------------------------------------------------------------------- -- List expressions -- The rules below are little bit contorted to keep lexps left-recursive while -- avoiding another shift/reduce-conflict. -- Never empty. list :: { forall b. DisambECP b => SrcSpan -> (EpaLocation, EpaLocation) -> PV (LocatedA b) } : texp { \loc (ao,ac) -> unECP $1 >>= \ $1 -> mkHsExplicitListPV loc [$1] (AnnList Nothing (ListSquare (EpTok ao) (EpTok ac)) [] noAnn []) } | lexps { \loc (ao,ac) -> $1 >>= \ $1 -> mkHsExplicitListPV loc (reverse $1) (AnnList Nothing (ListSquare (EpTok ao) (EpTok ac)) [] noAnn []) } | texp '..' { \loc (ao,ac) -> unECP $1 >>= \ $1 -> amsA' (L loc $ ArithSeq (AnnArithSeq (EpTok ao) Nothing (epTok $2) (EpTok ac)) Nothing (From $1)) >>= ecpFromExp' } | texp ',' exp2 '..' { \loc (ao,ac) -> unECP $1 >>= \ $1 -> unECP $3 >>= \ $3 -> amsA' (L loc $ ArithSeq (AnnArithSeq (EpTok ao) (Just (epTok $2)) (epTok $4) (EpTok ac)) Nothing (FromThen $1 $3)) >>= ecpFromExp' } | texp '..' exp2 { \loc (ao,ac) -> unECP $1 >>= \ $1 -> unECP $3 >>= \ $3 -> amsA' (L loc $ ArithSeq (AnnArithSeq (EpTok ao) Nothing (epTok $2) (EpTok ac)) Nothing (FromTo $1 $3)) >>= ecpFromExp' } | texp ',' exp2 '..' exp2 { \loc (ao,ac) -> unECP $1 >>= \ $1 -> unECP $3 >>= \ $3 -> unECP $5 >>= \ $5 -> amsA' (L loc $ ArithSeq (AnnArithSeq (EpTok ao) (Just (epTok $2)) (epTok $4) (EpTok ac)) Nothing (FromThenTo $1 $3 $5)) >>= ecpFromExp' } | texp '|' flattenedpquals { \loc (ao,ac) -> checkMonadComp >>= \ ctxt -> unECP $1 >>= \ $1 -> do { t <- addTrailingVbarA $1 (epTok $2) ; amsA' (L loc $ mkHsCompAnns ctxt (unLoc $3) t (AnnList Nothing (ListSquare (EpTok ao) (EpTok ac)) [] noAnn [])) >>= ecpFromExp' } } lexps :: { forall b. DisambECP b => PV [LocatedA b] } : lexps ',' texp { $1 >>= \ $1 -> unECP $3 >>= \ $3 -> case $1 of (h:t) -> do h' <- addTrailingCommaA h (epTok $2) return (((:) $! $3) $! (h':t)) } | texp ',' texp { unECP $1 >>= \ $1 -> unECP $3 >>= \ $3 -> do { h <- addTrailingCommaA $1 (epTok $2) ; return [$3,h] }} ----------------------------------------------------------------------------- -- List Comprehensions flattenedpquals :: { Located [LStmt GhcPs (LHsExpr GhcPs)] } : pquals { case (unLoc $1) of [qs] -> sL1 $1 qs -- We just had one thing in our "parallel" list so -- we simply return that thing directly qss -> sL1 $1 [sL1a $1 $ ParStmt noExtField [ParStmtBlock noExtField qs [] noSyntaxExpr | qs <- qss] noExpr noSyntaxExpr] -- We actually found some actual parallel lists so -- we wrap them into as a ParStmt } pquals :: { Located [[LStmt GhcPs (LHsExpr GhcPs)]] } : squals '|' pquals {% case unLoc $1 of (h:t) -> do h' <- addTrailingVbarA h (epTok $2) return (sLL $1 $> (reverse (h':t) : unLoc $3)) } | squals { L (getLoc $1) [reverse (unLoc $1)] } squals :: { Located [LStmt GhcPs (LHsExpr GhcPs)] } -- In reverse order, because the last -- one can "grab" the earlier ones : squals ',' transformqual {% case unLoc $1 of (h:t) -> do h' <- addTrailingCommaA h (epTok $2) return (sLL $1 $> [sLLa $1 $> ((unLoc $3) (reverse (h':t)))]) } | squals ',' qual {% runPV $3 >>= \ $3 -> case unLoc $1 of (h:t) -> do h' <- addTrailingCommaA h (epTok $2) return (sLL $1 $> ($3 : (h':t))) } | transformqual { sLL $1 $> [L (getLocAnn $1) ((unLoc $1) [])] } | qual {% runPV $1 >>= \ $1 -> return $ sL1 $1 [$1] } -- | transformquals1 ',' '{|' pquals '|}' { sLL $1 $> ($4 : unLoc $1) } -- | '{|' pquals '|}' { sL1 $1 [$2] } -- It is possible to enable bracketing (associating) qualifier lists -- by uncommenting the lines with {| |} above. Due to a lack of -- consensus on the syntax, this feature is not being used until we -- get user demand. transformqual :: { Located ([LStmt GhcPs (LHsExpr GhcPs)] -> Stmt GhcPs (LHsExpr GhcPs)) } -- Function is applied to a list of stmts *in order* : 'then' exp {% runPV (unECP $2) >>= \ $2 -> return ( sLL $1 $> (\ss -> (mkTransformStmt (AnnTransStmt (epTok $1) noAnn noAnn noAnn) ss $2))) } | 'then' exp 'by' exp {% runPV (unECP $2) >>= \ $2 -> runPV (unECP $4) >>= \ $4 -> return (sLL $1 $> (\ss -> (mkTransformByStmt (AnnTransStmt (epTok $1) noAnn (Just (epTok $3)) noAnn) ss $2 $4))) } | 'then' 'group' 'using' exp {% runPV (unECP $4) >>= \ $4 -> return (sLL $1 $> (\ss -> (mkGroupUsingStmt (AnnTransStmt (epTok $1) (Just (epTok $2)) noAnn (Just (epTok $3))) ss $4))) } | 'then' 'group' 'by' exp 'using' exp {% runPV (unECP $4) >>= \ $4 -> runPV (unECP $6) >>= \ $6 -> return (sLL $1 $> (\ss -> (mkGroupByUsingStmt (AnnTransStmt (epTok $1) (Just (epTok $2)) (Just (epTok $3)) (Just (epTok $5))) ss $4 $6))) } -- Note that 'group' is a special_id, which means that you can enable -- TransformListComp while still using Data.List.group. However, this -- introduces a shift/reduce conflict. Happy chooses to resolve the conflict -- in by choosing the "group by" variant, which is what we want. ----------------------------------------------------------------------------- -- Guards guardquals :: { Located [LStmt GhcPs (LHsExpr GhcPs)] } : guardquals1 { L (getLoc $1) (reverse (unLoc $1)) } guardquals1 :: { Located [LStmt GhcPs (LHsExpr GhcPs)] } : guardquals1 ',' qual {% runPV $3 >>= \ $3 -> case unLoc $1 of (h:t) -> do h' <- addTrailingCommaA h (epTok $2) return (sLL $1 $> ($3 : (h':t))) } | qual {% runPV $1 >>= \ $1 -> return $ sL1 $1 [$1] } ----------------------------------------------------------------------------- -- Case alternatives altslist(PATS) :: { forall b. DisambECP b => PV (LocatedLW [LMatch GhcPs (LocatedA b)]) } : '{' alts(PATS) '}' { $2 >>= \ $2 -> amsr (sLL $1 $> (reverse (snd $ unLoc $2))) (AnnList (Just $ glR $2) (ListBraces (epTok $1) (epTok $3)) (fst $ unLoc $2) noAnn []) } | vocurly alts(PATS) close { $2 >>= \ $2 -> amsr (L (getLoc $2) (reverse (snd $ unLoc $2))) (AnnList (Just $ glR $2) ListNone (fst $ unLoc $2) noAnn []) } | '{' '}' { amsr (sLL $1 $> []) (AnnList Nothing (ListBraces (epTok $1) (epTok $2)) [] noAnn []) } | vocurly close { return $ noLocA [] } alts(PATS) :: { forall b. DisambECP b => PV (Located ([EpToken ";"],[LMatch GhcPs (LocatedA b)])) } : alts1(PATS) { $1 >>= \ $1 -> return $ sL1 $1 (fst $ unLoc $1,snd $ unLoc $1) } | ';' alts(PATS) { $2 >>= \ $2 -> return $ sLL $1 $> (((mzEpTok $1) : (fst $ unLoc $2) ) ,snd $ unLoc $2) } alts1(PATS) :: { forall b. DisambECP b => PV (Located ([EpToken ";"],[LMatch GhcPs (LocatedA b)])) } : alts1(PATS) ';' alt(PATS) { $1 >>= \ $1 -> $3 >>= \ $3 -> case snd $ unLoc $1 of [] -> return (sLL $1 $> ((fst $ unLoc $1) ++ [mzEpTok $2] ,[$3])) (h:t) -> do h' <- addTrailingSemiA h (epTok $2) return (sLL $1 $> (fst $ unLoc $1,$3 : h' : t)) } | alts1(PATS) ';' { $1 >>= \ $1 -> case snd $ unLoc $1 of [] -> return (sLZ $1 $> ((fst $ unLoc $1) ++ [mzEpTok $2] ,[])) (h:t) -> do h' <- addTrailingSemiA h (epTok $2) return (sLZ $1 $> (fst $ unLoc $1, h' : t)) } | alt(PATS) { $1 >>= \ $1 -> return $ sL1 $1 ([],[$1]) } alt(PATS) :: { forall b. DisambECP b => PV (LMatch GhcPs (LocatedA b)) } : PATS alt_rhs { $2 >>= \ $2 -> amsA' (sLLAsl $1 $> (Match { m_ext = noExtField , m_ctxt = CaseAlt -- for \case and \cases, this will be changed during post-processing , m_pats = L (listLocation $1) $1 , m_grhss = unLoc $2 }))} alt_rhs :: { forall b. DisambECP b => PV (Located (GRHSs GhcPs (LocatedA b))) } : ralt wherebinds { $1 >>= \alt -> do { let {L l (bs, csw) = adaptWhereBinds $2} ; acs (comb2 alt (L l bs)) (\loc cs -> L loc (GRHSs (cs Semi.<> csw) (unLoc alt) bs)) }} ralt :: { forall b. DisambECP b => PV (Located [LGRHS GhcPs (LocatedA b)]) } : '->' exp { unECP $2 >>= \ $2 -> acs (comb2 $1 $>) (\loc cs -> L loc (unguardedRHS (EpAnn (spanAsAnchor $ comb2 $1 $2) (GrhsAnn Nothing (Right $ epUniTok $1)) cs) (comb2 $1 $2) $2)) } | gdpats { $1 >>= \gdpats -> return $ sL1 gdpats (reverse (unLoc gdpats)) } gdpats :: { forall b. DisambECP b => PV (Located [LGRHS GhcPs (LocatedA b)]) } : gdpats gdpat { $1 >>= \gdpats -> $2 >>= \gdpat -> return $ sLL gdpats gdpat (gdpat : unLoc gdpats) } | gdpat { $1 >>= \gdpat -> return $ sL1 gdpat [gdpat] } -- layout for MultiWayIf doesn't begin with an open brace, because it's hard to -- generate the open brace in addition to the vertical bar in the lexer, and -- we don't need it. ifgdpats :: { Located ((EpToken "{", EpToken "}"), [LGRHS GhcPs (LHsExpr GhcPs)]) } : '{' gdpats '}' {% runPV $2 >>= \ $2 -> return $ sLL $1 $> ((epTok $1,epTok $3),unLoc $2) } | gdpats close {% runPV $1 >>= \ $1 -> return $ sL1 $1 ((NoEpTok, NoEpTok),unLoc $1) } gdpat :: { forall b. DisambECP b => PV (LGRHS GhcPs (LocatedA b)) } : '|' guardquals '->' exp { unECP $4 >>= \ $4 -> acsA (comb2 $1 $>) (\loc cs -> sL loc $ GRHS (EpAnn (glEE $1 $>) (GrhsAnn (Just $ epTok $1) (Right $ epUniTok $3)) cs) (unLoc $2) $4) } -- 'pat' recognises a pattern, including one with a bang at the top -- e.g. "!x" or "!(x,y)" or "C a b" etc -- Bangs inside are parsed as infix operator applications, so that -- we parse them right when bang-patterns are off pat_syn_pat :: { LPat GhcPs } pat_syn_pat : exp {% (checkPattern <=< runPV) (unECP $1) } pat :: { LPat GhcPs } pat : orpats(exp) {% case unLoc $1 of pat :| [] -> return pat pats -> hintOrPats (sL1a $1 (OrPat NoExtField pats)) } -- 'pats1' does the same thing as 'pat', but returns it as a singleton -- list so that it can be used with a parameterized production rule -- -- It is used only for parsing patterns in `\case` and `case of` pats1 :: { [LPat GhcPs] } pats1 : pat { [ $1 ] } bindpat :: { LPat GhcPs } bindpat : exp {% -- See Note [Parser-Validator Details] in GHC.Parser.PostProcess checkPattern_details incompleteDoBlock (unECP $1) } argpat :: { LPat GhcPs } argpat : apat { $1 } | PREFIX_AT atype { sLLa $1 $> (InvisPat (epTok $1, SpecifiedSpec) (mkHsTyPat $2)) } argpats :: { [LPat GhcPs] } : argpat argpats { $1 : $2 } | {- empty -} { [] } apat :: { LPat GhcPs } apat : aexp {% (checkPattern <=< runPV) (unECP $1) } ----------------------------------------------------------------------------- -- Statement sequences stmtlist :: { forall b. DisambECP b => PV (LocatedLW [LocatedA (Stmt GhcPs (LocatedA b))]) } : '{' stmts '}' { $2 >>= \ $2 -> amsr (sLL $1 $> (reverse $ snd $ unLoc $2)) (AnnList (stmtsAnchor $2) (ListBraces (epTok $1) (epTok $3)) (fromOL $ fst $ unLoc $2) noAnn []) } | vocurly stmts close { $2 >>= \ $2 -> amsr (L (stmtsLoc $2) (reverse $ snd $ unLoc $2)) (AnnList (stmtsAnchor $2) ListNone (fromOL $ fst $ unLoc $2) noAnn []) } -- do { ;; s ; s ; ; s ;; } -- The last Stmt should be an expression, but that's hard to enforce -- here, because we need too much lookahead if we see do { e ; } -- So we use BodyStmts throughout, and switch the last one over -- in ParseUtils.checkDo instead stmts :: { forall b. DisambECP b => PV (Located (OrdList (EpToken ";"),[LStmt GhcPs (LocatedA b)])) } : stmts ';' stmt { $1 >>= \ $1 -> $3 >>= \ ($3 :: LStmt GhcPs (LocatedA b)) -> case (snd $ unLoc $1) of [] -> return (sLL $1 $> ( (fst $ unLoc $1) `snocOL` (epTok $2) , $3 : (snd $ unLoc $1))) (h:t) -> do { h' <- addTrailingSemiA h (epTok $2) ; return $ sLL $1 $> (fst $ unLoc $1,$3 :(h':t)) }} | stmts ';' { $1 >>= \ $1 -> case (snd $ unLoc $1) of [] -> return (sLZ $1 $> ((fst $ unLoc $1) `snocOL` (epTok $2),snd $ unLoc $1)) (h:t) -> do { h' <- addTrailingSemiA h (epTok $2) ; return $ sLZ $1 $> (fst $ unLoc $1,h':t) }} | stmt { $1 >>= \ $1 -> return $ sL1 $1 (nilOL,[$1]) } | {- empty -} { return $ noLoc (nilOL,[]) } -- For typing stmts at the GHCi prompt, where -- the input may consist of just comments. maybe_stmt :: { Maybe (LStmt GhcPs (LHsExpr GhcPs)) } : stmt {% fmap Just (runPV $1) } | {- nothing -} { Nothing } -- For GHC API. e_stmt :: { LStmt GhcPs (LHsExpr GhcPs) } : stmt {% runPV $1 } stmt :: { forall b. DisambECP b => PV (LStmt GhcPs (LocatedA b)) } : qual { $1 } | 'rec' stmtlist { $2 >>= \ $2 -> amsA' (sLL $1 $> $ mkRecStmt (hsDoAnn (epTok $1) $2) $2) } qual :: { forall b. DisambECP b => PV (LStmt GhcPs (LocatedA b)) } : bindpat '<-' exp { unECP $3 >>= \ $3 -> amsA' (sLL $1 $> $ mkPsBindStmt (epUniTok $2) $1 $3) } | exp { unECP $1 >>= \ $1 -> return $ sL1a $1 $ mkBodyStmt $1 } | 'let' binds { amsA' (sLL $1 $> $ mkLetStmt (epTok $1) (unLoc $2)) } ----------------------------------------------------------------------------- -- Record Field Update/Construction fbinds :: { forall b. DisambECP b => PV ([Fbind b], Maybe SrcSpan) } : fbinds1 { $1 } | {- empty -} { return ([], Nothing) } fbinds1 :: { forall b. DisambECP b => PV ([Fbind b], Maybe SrcSpan) } : fbind ',' fbinds1 { $1 >>= \ $1 -> $3 >>= \ $3 -> do h <- addTrailingCommaFBind $1 (epTok $2) return (case $3 of (flds, dd) -> (h : flds, dd)) } | fbind { $1 >>= \ $1 -> return ([$1], Nothing) } | '..' { return ([], Just (getLoc $1)) } fbind :: { forall b. DisambECP b => PV (Fbind b) } : qvar '=' texp { unECP $3 >>= \ $3 -> fmap Left $ amsA' (sLL $1 $> $ HsFieldBind (Just (epTok $2)) (sL1a $1 $ mkFieldOcc $1) $3 False) } -- RHS is a 'texp', allowing view patterns (#6038) -- and, incidentally, sections. Eg -- f (R { x = show -> s }) = ... | qvar { placeHolderPunRhs >>= \rhs -> fmap Left $ amsA' (sL1 $1 $ HsFieldBind Nothing (sL1a $1 $ mkFieldOcc $1) rhs True) } -- In the punning case, use a place-holder -- The renamer fills in the final value -- See Note [Whitespace-sensitive operator parsing] in GHC.Parser.Lexer -- AZ: need to pull out the let block into a helper | field TIGHT_INFIX_PROJ fieldToUpdate '=' texp { do let top = sL1a $1 $ DotFieldOcc noAnn $1 ((L lf (DotFieldOcc _ f)):t) = reverse (unLoc $3) lf' = comb2 $2 (L lf ()) fields = top : L (noAnnSrcSpan lf') (DotFieldOcc (AnnFieldLabel (Just $ epTok $2)) f) : t final = last fields l = comb2 $1 $3 isPun = False $5 <- unECP $5 fmap Right $ mkHsProjUpdatePV (comb2 $1 $5) (L l fields) $5 isPun (Just (epTok $4)) } -- See Note [Whitespace-sensitive operator parsing] in GHC.Parser.Lexer -- AZ: need to pull out the let block into a helper | field TIGHT_INFIX_PROJ fieldToUpdate { do let top = sL1a $1 $ DotFieldOcc noAnn $1 ((L lf (DotFieldOcc _ f)):t) = reverse (unLoc $3) lf' = comb2 $2 (L lf ()) fields = top : L (noAnnSrcSpan lf') (DotFieldOcc (AnnFieldLabel (Just $ epTok $2)) f) : t final = last fields l = comb2 $1 $3 isPun = True var <- mkHsVarPV (L (noAnnSrcSpan $ getLocA final) (mkRdrUnqual . mkVarOccFS . field_label . unLoc . dfoLabel . unLoc $ final)) fmap Right $ mkHsProjUpdatePV l (L l fields) var isPun Nothing } fieldToUpdate :: { Located [LocatedAn NoEpAnns (DotFieldOcc GhcPs)] } fieldToUpdate -- See Note [Whitespace-sensitive operator parsing] in Lexer.x : fieldToUpdate TIGHT_INFIX_PROJ field { sLL $1 $> ((sLLa $2 $> (DotFieldOcc (AnnFieldLabel $ Just $ epTok $2) $3)) : unLoc $1) } | field { sL1 $1 [sL1a $1 (DotFieldOcc (AnnFieldLabel Nothing) $1)] } ----------------------------------------------------------------------------- -- Implicit Parameter Bindings dbinds :: { Located [LIPBind GhcPs] } -- reversed : dbinds ';' dbind {% case unLoc $1 of (h:t) -> do h' <- addTrailingSemiA h (epTok $2) return (let { this = $3; rest = h':t } in rest `seq` this `seq` sLL $1 $> (this : rest)) } | dbinds ';' {% case unLoc $1 of (h:t) -> do h' <- addTrailingSemiA h (epTok $2) return (sLZ $1 $> (h':t)) } | dbind { let this = $1 in this `seq` (sL1 $1 [this]) } -- | {- empty -} { [] } dbind :: { LIPBind GhcPs } dbind : ipvar '=' exp {% runPV (unECP $3) >>= \ $3 -> amsA' (sLL $1 $> (IPBind (epTok $2) (reLoc $1) $3)) } ipvar :: { Located HsIPName } : IPDUPVARID { sL1 $1 (HsIPName (getIPDUPVARID $1)) } ----------------------------------------------------------------------------- -- Overloaded labels overloaded_label :: { Located (SourceText, FastString) } : LABELVARID { sL1 $1 (getLABELVARIDs $1, getLABELVARID $1) } ----------------------------------------------------------------------------- -- Warnings and deprecations name_boolformula_opt :: { LBooleanFormula (LocatedN RdrName) } : name_boolformula { $1 } | {- empty -} { noLocA mkTrue } name_boolformula :: { LBooleanFormula (LocatedN RdrName) } : name_boolformula_and { $1 } | name_boolformula_and '|' name_boolformula {% do { h <- addTrailingVbarL $1 (epTok $2) ; return (sLLa $1 $> (Or [h,$3])) } } name_boolformula_and :: { LBooleanFormula (LocatedN RdrName) } : name_boolformula_and_list { sLLa (head $1) (last $1) (And ($1)) } name_boolformula_and_list :: { [LBooleanFormula (LocatedN RdrName)] } : name_boolformula_atom { [$1] } | name_boolformula_atom ',' name_boolformula_and_list {% do { h <- addTrailingCommaL $1 (epTok $2) ; return (h : $3) } } name_boolformula_atom :: { LBooleanFormula (LocatedN RdrName) } : '(' name_boolformula ')' {% amsr (sLL $1 $> (Parens $2)) (AnnList Nothing (ListParens (epTok $1) (epTok $3)) [] noAnn []) } | name_var { sL1a $1 (Var $1) } namelist :: { Located [LocatedN RdrName] } namelist : name_var { sL1 $1 [$1] } | name_var ',' namelist {% do { h <- addTrailingCommaN $1 (gl $2) ; return (sLL $1 $> (h : unLoc $3)) }} name_var :: { LocatedN RdrName } name_var : var { $1 } | con { $1 } ----------------------------------------- -- Data constructors -- There are two different productions here as lifted list constructors -- are parsed differently. qcon :: { LocatedN RdrName } : gen_qcon { $1 } | syscon { $1 } gen_qcon :: { LocatedN RdrName } : qconid { $1 } | '(' qconsym ')' {% amsr (sLL $1 $> (unLoc $2)) (NameAnn (NameParens (epTok $1) (epTok $3)) (glR $2) []) } con :: { LocatedN RdrName } : conid { $1 } | '(' consym ')' {% amsr (sLL $1 $> (unLoc $2)) (NameAnn (NameParens (epTok $1) (epTok $3)) (glR $2) []) } | syscon { $1 } con_list :: { Located (NonEmpty (LocatedN RdrName)) } con_list : con { sL1 $1 (pure $1) } | con ',' con_list {% sLL $1 $> . (:| toList (unLoc $3)) <\$> addTrailingCommaN $1 (gl $2) } qcon_list :: { [LocatedN RdrName] } qcon_list : qcon { [$1] } | qcon ',' qcon_list {% do { h <- addTrailingCommaN $1 (gl $2) ; return (h : $3) }} -- See Note [ExplicitTuple] in GHC.Hs.Expr sysdcon_nolist :: { LocatedN DataCon } -- Wired in data constructors : '(' commas ')' {% amsr (sLL $1 $> $ tupleDataCon Boxed (snd $2 + 1)) (NameAnnCommas (NameParens (epTok $1) (epTok $3)) (map (EpTok . srcSpan2e) (fst $2)) []) } | '(#' '#)' {% amsr (sLL $1 $> $ unboxedUnitDataCon) (NameAnnOnly (NameParensHash (epTok $1) (epTok $2)) []) } | '(#' commas '#)' {% amsr (sLL $1 $> $ tupleDataCon Unboxed (snd $2 + 1)) (NameAnnCommas (NameParensHash (epTok $1) (epTok $3)) (map (EpTok . srcSpan2e) (fst $2)) []) } syscon :: { LocatedN RdrName } : sysdcon { L (getLoc $1) $ nameRdrName (dataConName (unLoc $1)) } | '(' '->' ')' {% amsr (sLL $1 $> $ getRdrName unrestrictedFunTyCon) (NameAnnRArrow (Just $ epTok $1) (epUniTok $2) (Just $ epTok $3) []) } -- See Note [Empty lists] in GHC.Hs.Expr sysdcon :: { LocatedN DataCon } : sysdcon_nolist { $1 } | '(' ')' {% amsr (sLL $1 $> unitDataCon) (NameAnnOnly (NameParens (epTok $1) (epTok $2)) []) } | '[' ']' {% amsr (sLL $1 $> nilDataCon) (NameAnnOnly (NameSquare (epTok $1) (epTok $2)) []) } conop :: { LocatedN RdrName } : consym { $1 } | '`' conid '`' {% amsr (sLL $1 $> (unLoc $2)) (NameAnn (NameBackquotes (epTok $1) (epTok $3)) (glR $2) []) } qconop :: { LocatedN RdrName } : qconsym { $1 } | '`' qconid '`' {% amsr (sLL $1 $> (unLoc $2)) (NameAnn (NameBackquotes (epTok $1) (epTok $3)) (glR $2) []) } ---------------------------------------------------------------------------- -- Type constructors -- See Note [Unit tuples] in GHC.Hs.Type for the distinction -- between gtycon and ntgtycon gtycon :: { LocatedN RdrName } -- A "general" qualified tycon, including unit tuples : ntgtycon { $1 } | '(' ')' {% amsr (sLL $1 $> $ getRdrName unitTyCon) (NameAnnOnly (NameParens (epTok $1) (epTok $2)) []) } | '(#' '#)' {% amsr (sLL $1 $> $ getRdrName unboxedUnitTyCon) (NameAnnOnly (NameParensHash (epTok $1) (epTok $2)) []) } | '[' ']' {% amsr (sLL $1 $> $ listTyCon_RDR) (NameAnnOnly (NameSquare (epTok $1) (epTok $2)) []) } ntgtycon :: { LocatedN RdrName } -- A "general" qualified tycon, excluding unit tuples : oqtycon { $1 } | '(' commas ')' {% do { n <- mkTupleSyntaxTycon Boxed (snd $2 + 1) ; amsr (sLL $1 $> n) (NameAnnCommas (NameParens (epTok $1) (epTok $3)) (map (EpTok . srcSpan2e) (fst $2)) []) }} | '(#' commas '#)' {% do { n <- mkTupleSyntaxTycon Unboxed (snd $2 + 1) ; amsr (sLL $1 $> n) (NameAnnCommas (NameParensHash (epTok $1) (epTok $3)) (map (EpTok . srcSpan2e) (fst $2)) []) }} | '(#' bars '#)' {% do { requireLTPuns PEP_SumSyntaxType $1 $> ; amsr (sLL $1 $> $ (getRdrName (sumTyCon (snd $2 + 1)))) (NameAnnBars (epTok $1, epTok $3) (fst $2) []) } } | '(' '->' ')' {% amsr (sLL $1 $> $ getRdrName unrestrictedFunTyCon) (NameAnnRArrow (Just $ epTok $1) (epUniTok $2) (Just $ epTok $3) []) } oqtycon :: { LocatedN RdrName } -- An "ordinary" qualified tycon; -- These can appear in export lists : qtycon { $1 } | '(' qtyconsym ')' {% amsr (sLL $1 $> (unLoc $2)) (NameAnn (NameParens (epTok $1) (epTok $3)) (glR $2) []) } oqtycon_no_varcon :: { LocatedN RdrName } -- Type constructor which cannot be mistaken -- for variable constructor in export lists -- see Note [Type constructors in export list] : qtycon { $1 } | '(' QCONSYM ')' {% let { name :: Located RdrName ; name = sL1 $2 $! mkQual tcClsName (getQCONSYM $2) } in amsr (sLL $1 $> (unLoc name)) (NameAnn (NameParens (epTok $1) (epTok $3)) (glR $2) []) } | '(' CONSYM ')' {% let { name :: Located RdrName ; name = sL1 $2 $! mkUnqual tcClsName (getCONSYM $2) } in amsr (sLL $1 $> (unLoc name)) (NameAnn (NameParens (epTok $1) (epTok $3)) (glR $2) []) } | '(' ':' ')' {% let { name :: Located RdrName ; name = sL1 $2 $! consDataCon_RDR } in amsr (sLL $1 $> (unLoc name)) (NameAnn (NameParens (epTok $1) (epTok $3)) (glR $2) []) } {- Note [Type constructors in export list] ~~~~~~~~~~~~~~~~~~~~~ Mixing type constructors and data constructors in export lists introduces ambiguity in grammar: e.g. (*) may be both a type constructor and a function. -XExplicitNamespaces allows to disambiguate by explicitly prefixing type constructors with 'type' keyword. This ambiguity causes reduce/reduce conflicts in parser, which are always resolved in favour of data constructors. To get rid of conflicts we demand that ambiguous type constructors (those, which are formed by the same productions as variable constructors) are always prefixed with 'type' keyword. Unambiguous type constructors may occur both with or without 'type' keyword. Note that in the parser we still parse data constructors as type constructors. As such, they still end up in the type constructor namespace until after renaming when we resolve the proper namespace for each exported child. -} qtyconop :: { LocatedN RdrName } -- Qualified or unqualified -- See Note [%shift: qtyconop -> qtyconsym] : qtyconsym %shift { $1 } | '`' qtycon '`' {% amsr (sLL $1 $> (unLoc $2)) (NameAnn (NameBackquotes (epTok $1) (epTok $3)) (glR $2) []) } qtycon :: { LocatedN RdrName } -- Qualified or unqualified : QCONID { sL1n $1 $! mkQual tcClsName (getQCONID $1) } | tycon { $1 } tycon :: { LocatedN RdrName } -- Unqualified : CONID { sL1n $1 $! mkUnqual tcClsName (getCONID $1) } qtyconsym :: { LocatedN RdrName } : QCONSYM { sL1n $1 $! mkQual tcClsName (getQCONSYM $1) } | QVARSYM { sL1n $1 $! mkQual tcClsName (getQVARSYM $1) } | tyconsym { $1 } tyconsym :: { LocatedN RdrName } : CONSYM { sL1n $1 $! mkUnqual tcClsName (getCONSYM $1) } | VARSYM { sL1n $1 $! mkUnqual tcClsName (getVARSYM $1) } | ':' { sL1n $1 $! consDataCon_RDR } | '-' { sL1n $1 $! mkUnqual tcClsName (fsLit "-") } | '.' { sL1n $1 $! mkUnqual tcClsName (fsLit ".") } -- An "ordinary" unqualified tycon. See `oqtycon` for the qualified version. -- These can appear in `ANN type` declarations (#19374). otycon :: { LocatedN RdrName } : tycon { $1 } | '(' tyconsym ')' {% amsr (sLL $1 $> (unLoc $2)) (NameAnn (NameParens (epTok $1) (epTok $3)) (glR $2) []) } ----------------------------------------------------------------------------- -- Operators op :: { LocatedN RdrName } -- used in infix decls : varop { $1 } | conop { $1 } | '->' {% amsr (sLL $1 $> $ getRdrName unrestrictedFunTyCon) (NameAnnRArrow Nothing (epUniTok $1) Nothing []) } varop :: { LocatedN RdrName } : varsym { $1 } | '`' varid '`' {% amsr (sLL $1 $> (unLoc $2)) (NameAnn (NameBackquotes (epTok $1) (epTok $3)) (glR $2) []) } qop :: { forall b. DisambInfixOp b => PV (LocatedN b) } -- used in sections : qvarop { mkHsVarOpPV $1 } | qconop { mkHsConOpPV $1 } | hole_op { mkHsInfixHolePV $1 } qopm :: { forall b. DisambInfixOp b => PV (LocatedN b) } -- used in sections : qvaropm { mkHsVarOpPV $1 } | qconop { mkHsConOpPV $1 } | hole_op { mkHsInfixHolePV $1 } hole_op :: { LocatedN (HsExpr GhcPs) } -- used in sections hole_op : '`' '_' '`' { sLLa $1 $> (hsHoleExpr (Just $ EpAnnUnboundVar (epTok $1, epTok $3) (epTok $2))) } qvarop :: { LocatedN RdrName } : qvarsym { $1 } | '`' qvarid '`' {% amsr (sLL $1 $> (unLoc $2)) (NameAnn (NameBackquotes (epTok $1) (epTok $3)) (glR $2) []) } qvaropm :: { LocatedN RdrName } : qvarsym_no_minus { $1 } | '`' qvarid '`' {% amsr (sLL $1 $> (unLoc $2)) (NameAnn (NameBackquotes (epTok $1) (epTok $3)) (glR $2) []) } ----------------------------------------------------------------------------- -- Type variables tyvar :: { LocatedN RdrName } tyvar : tyvarid { $1 } tyvarop :: { LocatedN RdrName } tyvarop : '`' tyvarid '`' {% amsr (sLL $1 $> (unLoc $2)) (NameAnn (NameBackquotes (epTok $1) (epTok $3)) (glR $2) []) } tyvarid :: { LocatedN RdrName } : VARID { sL1n $1 $! mkUnqual tvName (getVARID $1) } | special_id { sL1n $1 $! mkUnqual tvName (unLoc $1) } | 'unsafe' { sL1n $1 $! mkUnqual tvName (fsLit "unsafe") } | 'safe' { sL1n $1 $! mkUnqual tvName (fsLit "safe") } | 'interruptible' { sL1n $1 $! mkUnqual tvName (fsLit "interruptible") } -- If this changes relative to varid, update 'checkRuleTyVarBndrNames' -- in GHC.Parser.PostProcess -- See Note [Parsing explicit foralls in Rules] ----------------------------------------------------------------------------- -- Variables var :: { LocatedN RdrName } : varid { $1 } | '(' varsym ')' {% amsr (sLL $1 $> (unLoc $2)) (NameAnn (NameParens (epTok $1) (epTok $3)) (glR $2) []) } qvar :: { LocatedN RdrName } : qvarid { $1 } | '(' varsym ')' {% amsr (sLL $1 $> (unLoc $2)) (NameAnn (NameParens (epTok $1) (epTok $3)) (glR $2) []) } | '(' qvarsym1 ')' {% amsr (sLL $1 $> (unLoc $2)) (NameAnn (NameParens (epTok $1) (epTok $3)) (glR $2) []) } -- We've inlined qvarsym here so that the decision about -- whether it's a qvar or a var can be postponed until -- *after* we see the close paren. field :: { LocatedN FieldLabelString } : varid { fmap (FieldLabelString . occNameFS . rdrNameOcc) $1 } qvarid :: { LocatedN RdrName } : varid { $1 } | QVARID { sL1n $1 $! mkQual varName (getQVARID $1) } -- Note that 'role' and 'family' get lexed separately regardless of -- the use of extensions. However, because they are listed here, -- this is OK and they can be used as normal varids. -- See Note [Lexing type pseudo-keywords] in GHC.Parser.Lexer varid :: { LocatedN RdrName } : VARID { sL1n $1 $! mkUnqual varName (getVARID $1) } | special_id { sL1n $1 $! mkUnqual varName (unLoc $1) } | 'unsafe' { sL1n $1 $! mkUnqual varName (fsLit "unsafe") } | 'safe' { sL1n $1 $! mkUnqual varName (fsLit "safe") } | 'interruptible' { sL1n $1 $! mkUnqual varName (fsLit "interruptible")} | 'family' { sL1n $1 $! mkUnqual varName (fsLit "family") } | 'role' { sL1n $1 $! mkUnqual varName (fsLit "role") } -- If this changes relative to tyvarid, update 'checkRuleTyVarBndrNames' -- in GHC.Parser.PostProcess -- See Note [Parsing explicit foralls in Rules] qvarsym :: { LocatedN RdrName } : varsym { $1 } | qvarsym1 { $1 } qvarsym_no_minus :: { LocatedN RdrName } : varsym_no_minus { $1 } | qvarsym1 { $1 } qvarsym1 :: { LocatedN RdrName } qvarsym1 : QVARSYM { sL1n $1 $ mkQual varName (getQVARSYM $1) } varsym :: { LocatedN RdrName } : varsym_no_minus { $1 } | '-' { sL1n $1 $ mkUnqual varName (fsLit "-") } varsym_no_minus :: { LocatedN RdrName } -- varsym not including '-' : VARSYM { sL1n $1 $ mkUnqual varName (getVARSYM $1) } | special_sym { sL1n $1 $ mkUnqual varName (unLoc $1) } -- These special_ids are treated as keywords in various places, -- but as ordinary ids elsewhere. 'special_id' collects all these -- except 'unsafe', 'interruptible', 'forall', 'family', 'role', 'stock', and -- 'anyclass', whose treatment differs depending on context special_id :: { Located FastString } special_id : 'as' { sL1 $1 (fsLit "as") } | 'qualified' { sL1 $1 (fsLit "qualified") } | 'hiding' { sL1 $1 (fsLit "hiding") } | 'export' { sL1 $1 (fsLit "export") } | 'label' { sL1 $1 (fsLit "label") } | 'dynamic' { sL1 $1 (fsLit "dynamic") } | 'stdcall' { sL1 $1 (fsLit "stdcall") } | 'ccall' { sL1 $1 (fsLit "ccall") } | 'capi' { sL1 $1 (fsLit "capi") } | 'prim' { sL1 $1 (fsLit "prim") } | 'javascript' { sL1 $1 (fsLit "javascript") } -- See Note [%shift: special_id -> 'group'] | 'group' %shift { sL1 $1 (fsLit "group") } | 'stock' { sL1 $1 (fsLit "stock") } | 'anyclass' { sL1 $1 (fsLit "anyclass") } | 'via' { sL1 $1 (fsLit "via") } | 'unit' { sL1 $1 (fsLit "unit") } | 'dependency' { sL1 $1 (fsLit "dependency") } | 'signature' { sL1 $1 (fsLit "signature") } special_sym :: { Located FastString } special_sym : '.' { sL1 $1 (fsLit ".") } | '*' { sL1 $1 (starSym (isUnicode $1)) } ----------------------------------------------------------------------------- -- Data constructors qconid :: { LocatedN RdrName } -- Qualified or unqualified : conid { $1 } | QCONID { sL1n $1 $! mkQual dataName (getQCONID $1) } conid :: { LocatedN RdrName } : CONID { sL1n $1 $ mkUnqual dataName (getCONID $1) } qconsym :: { LocatedN RdrName } -- Qualified or unqualified : consym { $1 } | QCONSYM { sL1n $1 $ mkQual dataName (getQCONSYM $1) } consym :: { LocatedN RdrName } : CONSYM { sL1n $1 $ mkUnqual dataName (getCONSYM $1) } -- ':' means only list cons | ':' { sL1n $1 $ consDataCon_RDR } ----------------------------------------------------------------------------- -- Literals literal :: { Located (HsLit GhcPs) } : CHAR { sL1 $1 $ HsChar (getCHARs $1) $ getCHAR $1 } | STRING { sL1 $1 $ HsString (getSTRINGs $1) $ getSTRING $1 } | STRING_MULTI { sL1 $1 $ HsMultilineString (getSTRINGMULTIs $1) $ getSTRINGMULTI $1 } | PRIMINTEGER { sL1 $1 $ HsIntPrim (getPRIMINTEGERs $1) $ getPRIMINTEGER $1 } | PRIMWORD { sL1 $1 $ HsWordPrim (getPRIMWORDs $1) $ getPRIMWORD $1 } | PRIMINTEGER8 { sL1 $1 $ HsInt8Prim (getPRIMINTEGER8s $1) $ getPRIMINTEGER8 $1 } | PRIMINTEGER16 { sL1 $1 $ HsInt16Prim (getPRIMINTEGER16s $1) $ getPRIMINTEGER16 $1 } | PRIMINTEGER32 { sL1 $1 $ HsInt32Prim (getPRIMINTEGER32s $1) $ getPRIMINTEGER32 $1 } | PRIMINTEGER64 { sL1 $1 $ HsInt64Prim (getPRIMINTEGER64s $1) $ getPRIMINTEGER64 $1 } | PRIMWORD8 { sL1 $1 $ HsWord8Prim (getPRIMWORD8s $1) $ getPRIMWORD8 $1 } | PRIMWORD16 { sL1 $1 $ HsWord16Prim (getPRIMWORD16s $1) $ getPRIMWORD16 $1 } | PRIMWORD32 { sL1 $1 $ HsWord32Prim (getPRIMWORD32s $1) $ getPRIMWORD32 $1 } | PRIMWORD64 { sL1 $1 $ HsWord64Prim (getPRIMWORD64s $1) $ getPRIMWORD64 $1 } | PRIMCHAR { sL1 $1 $ HsCharPrim (getPRIMCHARs $1) $ getPRIMCHAR $1 } | PRIMSTRING { sL1 $1 $ HsStringPrim (getPRIMSTRINGs $1) $ getPRIMSTRING $1 } | PRIMFLOAT { sL1 $1 $ HsFloatPrim noExtField $ getPRIMFLOAT $1 } | PRIMDOUBLE { sL1 $1 $ HsDoublePrim noExtField $ getPRIMDOUBLE $1 } ----------------------------------------------------------------------------- -- Layout {- Note [Layout and error] ~~~~~~~~~~~~~~~~~~~~~~~~~~ The Haskell 2010 report (Section 10.3, Note 5) dictates the use of the error token in `close`. To recall why that is necessary, consider f x = case x of True -> False where y = x+1 The virtual pass L inserts vocurly, semi, vccurly to return a laid-out token stream. It must insert a vccurly before `where` to close the layout block introduced by `of`. But there is no good way to do so other than L becoming aware of the grammar! Thus, L is specified to detect the ensuing parse error (implemented via happy's `error` token) and then insert the vccurly. Thus in effect, L is distributed between Lexer.x and Parser.y. There are a bunch of other, less "tricky" examples: let x = x {- vccurly -} in x -- could just track bracketing of -- let..in on layout stack to fix (case x of True -> False {- vccurly -}) -- ditto for surrounding delimiters such as () data T = T;{- vccurly -} -- Need insert vccurly at EOF Many of these are not that hard to fix, but still tedious and prone to break when the grammar changes; but the `of`/`where` example is especially gnarly, because it demonstrates a grammatical interaction between two lexically unrelated tokens. -} close :: { () } : vccurly { () } -- context popped in lexer. | error {% popContext } -- See Note [Layout and error] ----------------------------------------------------------------------------- -- Miscellaneous (mostly renamings) modid :: { LocatedA ModuleName } : CONID { sL1a $1 $ mkModuleNameFS (getCONID $1) } | QCONID { sL1a $1 $ let (mod,c) = getQCONID $1 in mkModuleNameFS (concatFS [mod, fsLit ".", c]) } commas :: { ([SrcSpan],Int) } -- One or more commas : commas ',' { ((fst $1)++[gl $2],snd $1 + 1) } | ',' { ([gl $1],1) } bars0 :: { ([EpToken "|"],Int) } -- Zero or more bars : bars { $1 } | { ([], 0) } bars :: { ([EpToken "|"],Int) } -- One or more bars : bars '|' { ((fst $1)++[epTok $2],snd $1 + 1) } | '|' { ([epTok $1],1) } { happyError :: P a happyError = srcParseFail getVARID (L _ (ITvarid x)) = x getCONID (L _ (ITconid x)) = x getVARSYM (L _ (ITvarsym x)) = x getCONSYM (L _ (ITconsym x)) = x getDO (L _ (ITdo x)) = x getMDO (L _ (ITmdo x)) = x getQVARID (L _ (ITqvarid x)) = x getQCONID (L _ (ITqconid x)) = x getQVARSYM (L _ (ITqvarsym x)) = x getQCONSYM (L _ (ITqconsym x)) = x getIPDUPVARID (L _ (ITdupipvarid x)) = x getLABELVARID (L _ (ITlabelvarid _ x)) = x getCHAR (L _ (ITchar _ x)) = x getSTRING (L _ (ITstring _ x)) = x getSTRINGMULTI (L _ (ITstringMulti _ x)) = x getINTEGER (L _ (ITinteger x)) = x getRATIONAL (L _ (ITrational x)) = x getPRIMCHAR (L _ (ITprimchar _ x)) = x getPRIMSTRING (L _ (ITprimstring _ x)) = x getPRIMINTEGER (L _ (ITprimint _ x)) = x getPRIMWORD (L _ (ITprimword _ x)) = x getPRIMINTEGER8 (L _ (ITprimint8 _ x)) = x getPRIMINTEGER16 (L _ (ITprimint16 _ x)) = x getPRIMINTEGER32 (L _ (ITprimint32 _ x)) = x getPRIMINTEGER64 (L _ (ITprimint64 _ x)) = x getPRIMWORD8 (L _ (ITprimword8 _ x)) = x getPRIMWORD16 (L _ (ITprimword16 _ x)) = x getPRIMWORD32 (L _ (ITprimword32 _ x)) = x getPRIMWORD64 (L _ (ITprimword64 _ x)) = x getPRIMFLOAT (L _ (ITprimfloat x)) = x getPRIMDOUBLE (L _ (ITprimdouble x)) = x getINLINE (L _ (ITinline_prag _ inl conl)) = (inl,conl) getSPEC_INLINE (L _ (ITspec_inline_prag src True)) = (Inline src,FunLike) getSPEC_INLINE (L _ (ITspec_inline_prag src False)) = (NoInline src,FunLike) getCOMPLETE_PRAGs (L _ (ITcomplete_prag x)) = x getVOCURLY (L (RealSrcSpan l _) ITvocurly) = srcSpanStartCol l getINTEGERs (L _ (ITinteger (IL src _ _))) = src getCHARs (L _ (ITchar src _)) = src getSTRINGs (L _ (ITstring src _)) = src getSTRINGMULTIs (L _ (ITstringMulti src _)) = src getPRIMCHARs (L _ (ITprimchar src _)) = src getPRIMSTRINGs (L _ (ITprimstring src _)) = src getPRIMINTEGERs (L _ (ITprimint src _)) = src getPRIMWORDs (L _ (ITprimword src _)) = src getPRIMINTEGER8s (L _ (ITprimint8 src _)) = src getPRIMINTEGER16s (L _ (ITprimint16 src _)) = src getPRIMINTEGER32s (L _ (ITprimint32 src _)) = src getPRIMINTEGER64s (L _ (ITprimint64 src _)) = src getPRIMWORD8s (L _ (ITprimword8 src _)) = src getPRIMWORD16s (L _ (ITprimword16 src _)) = src getPRIMWORD32s (L _ (ITprimword32 src _)) = src getPRIMWORD64s (L _ (ITprimword64 src _)) = src getLABELVARIDs (L _ (ITlabelvarid src _)) = src -- See Note [Pragma source text] in "GHC.Types.SourceText" for the following getINLINE_PRAGs (L _ (ITinline_prag _ inl _)) = inlineSpecSource inl getOPAQUE_PRAGs (L _ (ITopaque_prag src)) = src getSPEC_PRAGs (L _ (ITspec_prag src)) = src getSPEC_INLINE_PRAGs (L _ (ITspec_inline_prag src _)) = src getSOURCE_PRAGs (L _ (ITsource_prag src)) = src getRULES_PRAGs (L _ (ITrules_prag src)) = src getWARNING_PRAGs (L _ (ITwarning_prag src)) = src getDEPRECATED_PRAGs (L _ (ITdeprecated_prag src)) = src getSCC_PRAGs (L _ (ITscc_prag src)) = src getUNPACK_PRAGs (L _ (ITunpack_prag src)) = src getNOUNPACK_PRAGs (L _ (ITnounpack_prag src)) = src getANN_PRAGs (L _ (ITann_prag src)) = src getMINIMAL_PRAGs (L _ (ITminimal_prag src)) = src getOVERLAPPABLE_PRAGs (L _ (IToverlappable_prag src)) = src getOVERLAPPING_PRAGs (L _ (IToverlapping_prag src)) = src getOVERLAPS_PRAGs (L _ (IToverlaps_prag src)) = src getINCOHERENT_PRAGs (L _ (ITincoherent_prag src)) = src getCTYPEs (L _ (ITctype src)) = src getStringLiteral l = StringLiteral (getSTRINGs l) (getSTRING l) Nothing isUnicode :: Located Token -> Bool isUnicode (L _ (ITforall iu)) = iu == UnicodeSyntax isUnicode (L _ (ITdarrow iu)) = iu == UnicodeSyntax isUnicode (L _ (ITdcolon iu)) = iu == UnicodeSyntax isUnicode (L _ (ITlarrow iu)) = iu == UnicodeSyntax isUnicode (L _ (ITrarrow iu)) = iu == UnicodeSyntax isUnicode (L _ (ITlarrowtail iu)) = iu == UnicodeSyntax isUnicode (L _ (ITrarrowtail iu)) = iu == UnicodeSyntax isUnicode (L _ (ITLarrowtail iu)) = iu == UnicodeSyntax isUnicode (L _ (ITRarrowtail iu)) = iu == UnicodeSyntax isUnicode (L _ (IToparenbar iu)) = iu == UnicodeSyntax isUnicode (L _ (ITcparenbar iu)) = iu == UnicodeSyntax isUnicode (L _ (ITopenExpQuote _ iu)) = iu == UnicodeSyntax isUnicode (L _ (ITcloseQuote iu)) = iu == UnicodeSyntax isUnicode (L _ (ITstar iu)) = iu == UnicodeSyntax isUnicode (L _ ITlolly) = True isUnicode _ = False hasE :: Located Token -> Bool hasE (L _ (ITopenExpQuote HasE _)) = True hasE (L _ (ITopenTExpQuote HasE)) = True hasE _ = False getSCC :: Located Token -> P FastString getSCC lt = do let s = getSTRING lt -- We probably actually want to be more restrictive than this if ' ' `elem` unpackFS s then addFatalError $ mkPlainErrorMsgEnvelope (getLoc lt) $ PsErrSpaceInSCC else return s stringLiteralToHsDocWst :: Located StringLiteral -> LocatedE (WithHsDocIdentifiers StringLiteral GhcPs) stringLiteralToHsDocWst sl = reLoc $ lexStringLiteral parseIdentifier sl -- Utilities for combining source spans comb2 :: (HasLoc a, HasLoc b) => a -> b -> SrcSpan comb2 !a !b = combineHasLocs a b comb3 :: (HasLoc a, HasLoc b, HasLoc c) => a -> b -> c -> SrcSpan comb3 !a !b !c = combineSrcSpans (getHasLoc a) (combineHasLocs b c) comb4 :: (HasLoc a, HasLoc b, HasLoc c, HasLoc d) => a -> b -> c -> d -> SrcSpan comb4 !a !b !c !d = combineSrcSpans (getHasLoc a) $ combineSrcSpans (getHasLoc b) $ combineSrcSpans (getHasLoc c) (getHasLoc d) comb5 :: (HasLoc a, HasLoc b, HasLoc c, HasLoc d, HasLoc e) => a -> b -> c -> d -> e -> SrcSpan comb5 !a !b !c !d !e = combineSrcSpans (getHasLoc a) $ combineSrcSpans (getHasLoc b) $ combineSrcSpans (getHasLoc c) $ combineSrcSpans (getHasLoc d) (getHasLoc e) -- strict constructor version: {-# INLINE sL #-} sL :: l -> a -> GenLocated l a sL !loc !a = L loc a -- See Note [Adding location info] for how these utility functions are used -- replaced last 3 CPP macros in this file {-# INLINE sL0 #-} sL0 :: a -> Located a sL0 = L noSrcSpan -- #define L0 L noSrcSpan {-# INLINE sL1 #-} sL1 :: HasLoc a => a -> b -> Located b sL1 !x = sL (getHasLoc x) -- #define sL1 sL (getLoc $1) {-# INLINE sL1a #-} sL1a :: (HasLoc a, HasAnnotation t) => a -> b -> GenLocated t b sL1a !x = sL (noAnnSrcSpan $ getHasLoc x) -- #define sL1 sL (getLoc $1) {-# INLINE sL1n #-} sL1n :: HasLoc a => a -> b -> LocatedN b sL1n !x = L (noAnnSrcSpan $ getHasLoc x) -- #define sL1 sL (getLoc $1) {-# INLINE sLL #-} sLL :: (HasLoc a, HasLoc b) => a -> b -> c -> Located c sLL !x !y = sL (comb2 x y) -- #define LL sL (comb2 $1 $>) {-# INLINE sLLa #-} sLLa :: (HasLoc a, HasLoc b, NoAnn t) => a -> b -> c -> LocatedAn t c sLLa !x !y = sL (noAnnSrcSpan $ comb2 x y) -- #define LL sL (comb2 $1 $>) {-# INLINE sLLl #-} sLLl :: (HasLoc a, HasLoc b) => a -> b -> c -> LocatedL c sLLl !x !y = sL (noAnnSrcSpan $ comb2 x y) -- #define LL sL (comb2 $1 $>) {-# INLINE sLLld #-} sLLld :: (HasLoc a, HasLoc b) => a -> b -> c -> LocatedLW c sLLld !x !y = sL (noAnnSrcSpan $ comb2 x y) -- #define LL sL (comb2 $1 $>) {-# INLINE sLLAsl #-} sLLAsl :: (HasLoc a) => [a] -> Located b -> c -> Located c sLLAsl [] = sL1 sLLAsl (!x:_) = sLL x {-# INLINE sLZ #-} sLZ :: (HasLoc a, HasLoc b) => a -> b -> c -> Located c sLZ !x !y = if isZeroWidthSpan (getHasLoc y) then sL (getHasLoc x) else sL (comb2 x y) {- Note [Adding location info] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ This is done using the three functions below, sL0, sL1 and sLL. Note that these functions were mechanically converted from the three macros that used to exist before, namely L0, L1 and LL. They each add a SrcSpan to their argument. sL0 adds 'noSrcSpan', used for empty productions -- This doesn't seem to work anymore -=chak sL1 for a production with a single token on the lhs. Grabs the SrcSpan from that token. sLL for a production with >1 token on the lhs. Makes up a SrcSpan from the first and last tokens. These suffice for the majority of cases. However, we must be especially careful with empty productions: sLL won't work if the first or last token on the lhs can represent an empty span. In these cases, we have to calculate the span using more of the tokens from the lhs, eg. | 'newtype' tycl_hdr '=' newconstr deriving { L (comb3 $1 $4 $5) (mkTyData NewType (unLoc $2) $4 (unLoc $5)) } We provide comb3 and comb4 functions which are useful in such cases. Be careful: there's no checking that you actually got this right, the only symptom will be that the SrcSpans of your syntax will be incorrect. -} -- Make a source location for the file. We're a bit lazy here and just -- make a point SrcSpan at line 1, column 0. Strictly speaking we should -- try to find the span of the whole file (ToDo). fileSrcSpan :: P SrcSpan fileSrcSpan = do l <- getRealSrcLoc; let loc = mkSrcLoc (srcLocFile l) 1 1; return (mkSrcSpan loc loc) -- Hint about linear types hintLinear :: MonadP m => SrcSpan -> m () hintLinear span = do linearEnabled <- getBit LinearTypesBit unless linearEnabled $ addError $ mkPlainErrorMsgEnvelope span $ PsErrLinearFunction -- Does this look like (a %m)? looksLikeMult :: LHsType GhcPs -> LocatedN RdrName -> LHsType GhcPs -> Bool looksLikeMult ty1 l_op ty2 | Unqual op_name <- unLoc l_op , occNameFS op_name == fsLit "%" , Strict.Just ty1_pos <- getBufSpan (getLocA ty1) , Strict.Just pct_pos <- getBufSpan (getLocA l_op) , Strict.Just ty2_pos <- getBufSpan (getLocA ty2) , bufSpanEnd ty1_pos /= bufSpanStart pct_pos , bufSpanEnd pct_pos == bufSpanStart ty2_pos = True | otherwise = False -- Hint about or-patterns hintOrPats :: MonadP m => LPat GhcPs -> m (LPat GhcPs) hintOrPats pat = do orPatsEnabled <- getBit OrPatternsBit unless orPatsEnabled $ addError $ mkPlainErrorMsgEnvelope (locA (getLoc pat)) $ PsErrIllegalOrPat pat return pat -- Hint about the MultiWayIf extension hintMultiWayIf :: SrcSpan -> P () hintMultiWayIf span = do mwiEnabled <- getBit MultiWayIfBit unless mwiEnabled $ addError $ mkPlainErrorMsgEnvelope span PsErrMultiWayIf -- Hint about explicit-forall hintExplicitForall :: Located Token -> P () hintExplicitForall tok = do explicit_forall_enabled <- getBit ExplicitForallBit in_rule_prag <- getBit InRulePragBit unless (explicit_forall_enabled || in_rule_prag) $ addError $ mkPlainErrorMsgEnvelope (getLoc tok) $ PsErrExplicitForall (isUnicode tok) -- Hint about qualified-do hintQualifiedDo :: Located Token -> P () hintQualifiedDo tok = do qualifiedDo <- getBit QualifiedDoBit case maybeQDoDoc of Just qdoDoc | not qualifiedDo -> addError $ mkPlainErrorMsgEnvelope (getLoc tok) $ (PsErrIllegalQualifiedDo qdoDoc) _ -> return () where maybeQDoDoc = case unLoc tok of ITdo (Just m) -> Just $ ftext m <> text ".do" ITmdo (Just m) -> Just $ ftext m <> text ".mdo" t -> Nothing -- When two single quotes don't followed by tyvar or gtycon, we report the -- error as empty character literal, or TH quote that missing proper type -- variable or constructor. See #13450. reportEmptyDoubleQuotes :: SrcSpan -> P a reportEmptyDoubleQuotes span = do thQuotes <- getBit ThQuotesBit addFatalError $ mkPlainErrorMsgEnvelope span $ PsErrEmptyDoubleQuotes thQuotes {- %************************************************************************ %* * Helper functions for generating annotations in the parser %* * %************************************************************************ For the general principles of the following routines, see Note [exact print annotations] in GHC.Parser.Annotation -} msemi :: Located Token -> [TrailingAnn] msemi !l = if isZeroWidthSpan (gl l) then [] else [AddSemiAnn (epTok l)] msemiA :: Located Token -> [EpToken ";"] msemiA !l = if isZeroWidthSpan (gl l) then [] else [epTok l] msemim :: Located Token -> Maybe (EpToken ";") msemim !l = if isZeroWidthSpan (gl l) then Nothing else Just (epTok l) toUnicode :: Located Token -> IsUnicodeSyntax toUnicode t = if isUnicode t then UnicodeSyntax else NormalSyntax -- ------------------------------------- gl :: GenLocated l a -> l gl = getLoc glA :: HasLoc a => a -> SrcSpan glA = getHasLoc glR :: HasLoc a => a -> EpaLocation glR !la = EpaSpan (getHasLoc la) glEE :: (HasLoc a, HasLoc b) => a -> b -> EpaLocation glEE !x !y = spanAsAnchor $ comb2 x y glEEz :: (HasLoc a, HasLoc b) => a -> b -> EpaLocation glEEz !x !y = if isZeroWidthSpan (getHasLoc y) then spanAsAnchor (getHasLoc x) else spanAsAnchor $ comb2 x y glRM :: Located a -> Maybe EpaLocation glRM (L !l _) = Just $ spanAsAnchor l n2l :: LocatedN a -> LocatedA a n2l (L !la !a) = L (l2l la) a -- Called at the very end to pick up the EOF position, as well as any comments not allocated yet. acsFinal :: (EpAnnComments -> Maybe (RealSrcSpan, RealSrcSpan) -> Located a) -> P (Located a) acsFinal a = do let (L l _) = a emptyComments Nothing !cs <- getCommentsFor l csf <- getFinalCommentsFor l meof <- getEofPos let ce = case meof of Strict.Nothing -> Nothing Strict.Just (pos `Strict.And` gap) -> Just (pos,gap) return (a (cs Semi.<> csf) ce) acs :: (HasLoc l, MonadP m) => l -> (l -> EpAnnComments -> GenLocated l a) -> m (GenLocated l a) acs !l a = do !cs <- getCommentsFor (locA l) return (a l cs) acsA :: (HasLoc l, HasAnnotation t, MonadP m) => l -> (l -> EpAnnComments -> Located a) -> m (GenLocated t a) acsA !l a = do !cs <- getCommentsFor (locA l) return $ reLoc (a l cs) ams1 :: MonadP m => Located a -> b -> m (LocatedA b) ams1 (L l a) b = do !cs <- getCommentsFor l return (L (EpAnn (spanAsAnchor l) noAnn cs) b) amsA' :: (NoAnn t, MonadP m) => Located a -> m (GenLocated (EpAnn t) a) amsA' (L l a) = do !cs <- getCommentsFor l return (L (EpAnn (spanAsAnchor l) noAnn cs) a) amsA :: MonadP m => LocatedA a -> [TrailingAnn] -> m (LocatedA a) amsA (L !l a) bs = do !cs <- getCommentsFor (locA l) return (L (addAnnsA l bs cs) a) amsAl :: MonadP m => LocatedA a -> SrcSpan -> [TrailingAnn] -> m (LocatedA a) amsAl (L l a) loc bs = do !cs <- getCommentsFor loc return (L (addAnnsA l bs cs) a) amsr :: MonadP m => Located a -> an -> m (LocatedAn an a) amsr (L l a) an = do !cs <- getCommentsFor l return (L (EpAnn (spanAsAnchor l) an cs) a) -- | Parse a Haskell module with Haddock comments. This is done in two steps: -- -- * 'parseModuleNoHaddock' to build the AST -- * 'addHaddockToModule' to insert Haddock comments into it -- -- This and the signature module parser are the only parser entry points that -- deal with Haddock comments. The other entry points ('parseDeclaration', -- 'parseExpression', etc) do not insert them into the AST. parseModule :: P (Located (HsModule GhcPs)) parseModule = parseModuleNoHaddock >>= addHaddockToModule -- | Parse a Haskell signature module with Haddock comments. This is done in two -- steps: -- -- * 'parseSignatureNoHaddock' to build the AST -- * 'addHaddockToModule' to insert Haddock comments into it -- -- This and the module parser are the only parser entry points that deal with -- Haddock comments. The other entry points ('parseDeclaration', -- 'parseExpression', etc) do not insert them into the AST. parseSignature :: P (Located (HsModule GhcPs)) parseSignature = parseSignatureNoHaddock >>= addHaddockToModule commentsA :: (NoAnn ann) => SrcSpan -> EpAnnComments -> EpAnn ann commentsA loc cs = EpAnn (EpaSpan loc) noAnn cs spanWithComments :: (NoAnn ann, MonadP m) => SrcSpan -> m (EpAnn ann) spanWithComments l = do !cs <- getCommentsFor l return (commentsA l cs) -- | Instead of getting the *enclosed* comments, this includes the -- *preceding* ones. It is used at the top level to get comments -- between top level declarations. commentsPA :: (NoAnn ann) => LocatedAn ann a -> P (LocatedAn ann a) commentsPA la@(L l a) = do !cs <- getPriorCommentsFor (getLocA la) return (L (addCommentsToEpAnn l cs) a) hsDoAnn :: EpToken "rec" -> LocatedAn t b -> AnnList (EpToken "rec") hsDoAnn tok (L ll _) = AnnList (Just $ spanAsAnchor (locA ll)) ListNone [] tok [] listAsAnchorM :: [LocatedAn t a] -> Maybe EpaLocation listAsAnchorM [] = Nothing listAsAnchorM (L l _:_) = case locA l of RealSrcSpan ll _ -> Just $ realSpanAsAnchor ll _ -> Nothing epTok :: Located Token -> EpToken tok epTok (L !l _) = EpTok (EpaSpan l) epUniTok :: Located Token -> EpUniToken tok utok epUniTok t@(L !l _) = EpUniTok (EpaSpan l) u where u = if isUnicode t then UnicodeSyntax else NormalSyntax -- |Construct an EpToken from the location of the token, provided the span is not zero width mzEpTok :: Located Token -> EpToken tok mzEpTok !l = if isZeroWidthSpan (gl l) then NoEpTok else (epTok l) epExplicitBraces :: Located Token -> Located Token -> EpLayout epExplicitBraces !t1 !t2 = EpExplicitBraces (epTok t1) (epTok t2) -- ------------------------------------- addTrailingCommaFBind :: MonadP m => Fbind b -> EpToken "," -> m (Fbind b) addTrailingCommaFBind (Left b) l = fmap Left (addTrailingCommaA b l) addTrailingCommaFBind (Right b) l = fmap Right (addTrailingCommaA b l) addTrailingVbarA :: MonadP m => LocatedA a -> EpToken "|" -> m (LocatedA a) addTrailingVbarA la tok = addTrailingAnnA la tok AddVbarAnn addTrailingSemiA :: MonadP m => LocatedA a -> EpToken ";" -> m (LocatedA a) addTrailingSemiA la span = addTrailingAnnA la span AddSemiAnn addTrailingCommaA :: MonadP m => LocatedA a -> EpToken "," -> m (LocatedA a) addTrailingCommaA la span = addTrailingAnnA la span AddCommaAnn addTrailingAnnA :: (MonadP m, HasLoc tok) => LocatedA a -> tok -> (tok -> TrailingAnn) -> m (LocatedA a) addTrailingAnnA (L anns a) tok ta = do let cs = emptyComments -- AZ:TODO: generalise updating comments into an annotation let anns' = if isZeroWidthSpan (getHasLoc tok) then anns else addTrailingAnnToA (ta tok) cs anns return (L anns' a) -- ------------------------------------- addTrailingVbarL :: MonadP m => LocatedL a -> EpToken "|" -> m (LocatedL a) addTrailingVbarL la tok = addTrailingAnnL la (AddVbarAnn tok) addTrailingCommaL :: MonadP m => LocatedL a -> EpToken "," -> m (LocatedL a) addTrailingCommaL la tok = addTrailingAnnL la (AddCommaAnn tok) addTrailingAnnL :: MonadP m => LocatedL a -> TrailingAnn -> m (LocatedL a) addTrailingAnnL (L anns a) ta = do !cs <- getCommentsFor (locA anns) let anns' = addTrailingAnnToL ta cs anns return (L anns' a) -- ------------------------------------- -- Mostly use to add AnnComma, special case it to NOP if adding a zero-width annotation addTrailingCommaN :: MonadP m => LocatedN a -> SrcSpan -> m (LocatedN a) addTrailingCommaN (L anns a) span = do let cs = emptyComments -- AZ:TODO: generalise updating comments into an annotation let anns' = if isZeroWidthSpan span then anns else addTrailingCommaToN anns (srcSpan2e span) return (L anns' a) addTrailingCommaS :: Located StringLiteral -> EpaLocation -> Located StringLiteral addTrailingCommaS (L l sl) span = L (widenSpanL l [span]) (sl { sl_tc = Just (epaToNoCommentsLocation span) }) -- ------------------------------------- addTrailingDarrowC :: LocatedC a -> Located Token -> EpAnnComments -> LocatedC a addTrailingDarrowC (L (EpAnn lr (AnnContext _ o c) csc) a) lt cs = let u = if (isUnicode lt) then UnicodeSyntax else NormalSyntax in L (EpAnn lr (AnnContext (Just (epUniTok lt)) o c) (cs Semi.<> csc)) a -- ------------------------------------- isUnicodeSyntax :: Located Token -> IsUnicodeSyntax isUnicodeSyntax lt = if isUnicode lt then UnicodeSyntax else NormalSyntax -- We need a location for the where binds, when computing the SrcSpan -- for the AST element using them. Where there is a span, we return -- it, else noLoc, which is ignored in the comb2 call. adaptWhereBinds :: Maybe (Located (HsLocalBinds GhcPs, Maybe EpAnnComments)) -> Located (HsLocalBinds GhcPs, EpAnnComments) adaptWhereBinds Nothing = noLoc (EmptyLocalBinds noExtField, emptyComments) adaptWhereBinds (Just (L l (b, mc))) = L l (b, maybe emptyComments id mc) combineHasLocs :: (HasLoc a, HasLoc b) => a -> b -> SrcSpan combineHasLocs a b = combineSrcSpans (getHasLoc a) (getHasLoc b) fromTrailingN :: SrcSpanAnnN -> SrcSpanAnnA fromTrailingN (EpAnn anc ann cs) = EpAnn anc (AnnListItem (nann_trailing ann)) cs } ghc-lib-parser-9.12.2.20250421/compiler/GHC/Parser/0000755000000000000000000000000007346545000017142 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Parser/Annotation.hs0000644000000000000000000013131307346545000021612 0ustar0000000000000000{-# LANGUAGE DataKinds #-} {-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE DeriveFunctor #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE KindSignatures #-} {-# LANGUAGE StandaloneDeriving #-} module GHC.Parser.Annotation ( -- * Core Exact Print Annotation types EpToken(..), EpUniToken(..), getEpTokenSrcSpan, getEpTokenLocs, getEpTokenLoc, getEpUniTokenLoc, TokDcolon, TokDarrow, TokRarrow, TokForall, EpLayout(..), EpaComment(..), EpaCommentTok(..), IsUnicodeSyntax(..), HasE(..), -- * In-tree Exact Print Annotations EpaLocation, EpaLocation'(..), epaLocationRealSrcSpan, TokenLocation(..), DeltaPos(..), deltaPos, getDeltaLine, EpAnn(..), spanAsAnchor, realSpanAsAnchor, noSpanAnchor, NoAnn(..), -- ** Comments in Annotations EpAnnComments(..), LEpaComment, NoCommentsLocation, NoComments(..), emptyComments, epaToNoCommentsLocation, noCommentsToEpaLocation, getFollowingComments, setFollowingComments, setPriorComments, EpAnnCO, -- ** Annotations in 'GenLocated' LocatedA, LocatedL, LocatedC, LocatedN, LocatedAn, LocatedP, LocatedLC, LocatedLS, LocatedLW, LocatedLI, SrcSpanAnnA, SrcSpanAnnL, SrcSpanAnnP, SrcSpanAnnC, SrcSpanAnnN, SrcSpanAnnLC, SrcSpanAnnLW, SrcSpanAnnLS, SrcSpanAnnLI, LocatedE, -- ** Annotation data types used in 'GenLocated' AnnListItem(..), AnnList(..), AnnListBrackets(..), AnnParen(..), AnnPragma(..), AnnContext(..), NameAnn(..), NameAdornment(..), NoEpAnns(..), AnnSortKey(..), DeclTag(..), BindTag(..), -- ** Trailing annotations in lists TrailingAnn(..), ta_location, addTrailingAnnToA, addTrailingAnnToL, addTrailingCommaToN, noTrailingN, -- ** Utilities for converting between different 'GenLocated' when -- ** we do not care about the annotations. l2l, la2la, reLoc, HasLoc(..), getHasLocList, srcSpan2e, realSrcSpan, -- ** Building up annotations reAnnL, reAnnC, addAnnsA, widenSpanL, widenSpanT, widenAnchorT, widenAnchorS, widenLocatedAnL, listLocation, -- ** Querying annotations getLocAnn, epAnnComments, -- ** Working with locations of annotations sortLocatedA, mapLocA, combineLocsA, combineSrcSpansA, addCLocA, -- ** Constructing 'GenLocated' annotation types when we do not care -- about annotations. HasAnnotation(..), locA, noLocA, getLocA, noSrcSpanA, -- ** Working with comments in annotations noComments, comment, addCommentsToEpAnn, setCommentsEpAnn, transferAnnsA, transferAnnsOnlyA, transferCommentsOnlyA, transferPriorCommentsA, transferFollowingA, commentsOnlyA, removeCommentsA, placeholderRealSpan, ) where import GHC.Prelude import Data.Data import Data.Function (on) import Data.List (sortBy, foldl1') import Data.Semigroup import GHC.Data.FastString import GHC.TypeLits (Symbol, KnownSymbol, symbolVal) import GHC.Types.Name import GHC.Types.SrcLoc import GHC.Hs.DocString import GHC.Utils.Outputable hiding ( (<>) ) import GHC.Utils.Panic import qualified GHC.Data.Strict as Strict import GHC.Types.SourceText (SourceText (NoSourceText)) {- Note [exact print annotations] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Given a parse tree of a Haskell module, how can we reconstruct the original Haskell source code, retaining all whitespace and source code comments? We need to track the locations of all elements from the original source: this includes keywords such as 'let' / 'in' / 'do' etc as well as punctuation such as commas and braces, and also comments. We collectively refer to this metadata as the "exact print annotations". NON-COMMENT ELEMENTS Intuitively, every AST element directly contains a bag of keywords (keywords can show up more than once in a node: a semicolon i.e. newline can show up multiple times before the next AST element), each of which needs to be associated with its location in the original source code. These keywords are recorded directly in the AST element in which they occur, for the GhcPs phase. For any given element in the AST, there is only a set number of keywords that are applicable for it (e.g., you'll never see an 'import' keyword associated with a let-binding.) The set of allowed keywords is documented in a comment associated with the constructor of a given AST element, although the ground truth is in GHC.Parser and GHC.Parser.PostProcess (which actually add the annotations). COMMENT ELEMENTS We associate comments with the lowest (most specific) AST element enclosing them PARSER STATE There are three fields in PState (the parser state) which play a role with annotation comments. > comment_q :: [LEpaComment], > header_comments :: Maybe [LEpaComment], > eof_pos :: Maybe (RealSrcSpan, RealSrcSpan), -- pos, gap to prior token The 'comment_q' field captures comments as they are seen in the token stream, so that when they are ready to be allocated via the parser they are available. The 'header_comments' capture the comments coming at the top of the source file. They are moved there from the `comment_q` when comments are allocated for the first top-level declaration. The 'eof_pos' captures the final location in the file, and the location of the immediately preceding token to the last location, so that the exact-printer can work out how far to advance to add the trailing whitespace. PARSER EMISSION OF ANNOTATIONS The parser interacts with the lexer using the functions > getCommentsFor :: (MonadP m) => SrcSpan -> m EpAnnComments > getPriorCommentsFor :: (MonadP m) => SrcSpan -> m EpAnnComments > getFinalCommentsFor :: (MonadP m) => SrcSpan -> m EpAnnComments The 'getCommentsFor' function is the one used most often. It takes the AST element SrcSpan and removes and returns any comments in the 'comment_q' that are inside the span. 'allocateComments' in 'Lexer' is responsible for making sure we only return comments that actually fit in the 'SrcSpan'. The 'getPriorCommentsFor' function is used for top-level declarations, and removes and returns any comments in the 'comment_q' that either precede or are included in the given SrcSpan. This is to ensure that preceding documentation comments are kept together with the declaration they belong to. The 'getFinalCommentsFor' function is called right at the end when EOF is hit. This drains the 'comment_q' completely, and returns the 'header_comments', remaining 'comment_q' entries and the 'eof_pos'. These values are inserted into the 'HsModule' AST element. The wiki page describing this feature is https://gitlab.haskell.org/ghc/ghc/wikis/api-annotations -} -- -------------------------------------------------------------------- -- | Certain tokens can have alternate representations when unicode syntax is -- enabled. This flag is attached to those tokens in the lexer so that the -- original source representation can be reproduced in the corresponding -- 'EpAnnotation' data IsUnicodeSyntax = UnicodeSyntax | NormalSyntax deriving (Eq, Ord, Data, Show) -- | Some template haskell tokens have two variants, one with an `e` the other -- not: -- -- > [| or [e| -- > [|| or [e|| -- -- This type indicates whether the 'e' is present or not. data HasE = HasE | NoE deriving (Eq, Ord, Data, Show) -- --------------------------------------------------------------------- -- | A token stored in the syntax tree. For example, when parsing a -- let-expression, we store @EpToken "let"@ and @EpToken "in"@. -- The locations of those tokens can be used to faithfully reproduce -- (exactprint) the original program text. data EpToken (tok :: Symbol) = NoEpTok | EpTok !EpaLocation instance KnownSymbol tok => Outputable (EpToken tok) where ppr _ = text (symbolVal (Proxy @tok)) -- | With @UnicodeSyntax@, there might be multiple ways to write the same -- token. For example an arrow could be either @->@ or @→@. This choice must be -- recorded in order to exactprint such tokens, so instead of @EpToken "->"@ we -- introduce @EpUniToken "->" "→"@. data EpUniToken (tok :: Symbol) (utok :: Symbol) = NoEpUniTok | EpUniTok !EpaLocation !IsUnicodeSyntax deriving instance Eq (EpToken tok) deriving instance Eq (EpUniToken tok utok) deriving instance KnownSymbol tok => Data (EpToken tok) deriving instance (KnownSymbol tok, KnownSymbol utok) => Data (EpUniToken tok utok) instance (KnownSymbol tok, KnownSymbol utok) => Outputable (EpUniToken tok utok) where ppr NoEpUniTok = text $ symbolVal (Proxy @tok) ppr (EpUniTok _ NormalSyntax) = text $ symbolVal (Proxy @tok) ppr (EpUniTok _ UnicodeSyntax) = text $ symbolVal (Proxy @utok) getEpTokenSrcSpan :: EpToken tok -> SrcSpan getEpTokenSrcSpan NoEpTok = noSrcSpan getEpTokenSrcSpan (EpTok EpaDelta{}) = noSrcSpan getEpTokenSrcSpan (EpTok (EpaSpan span)) = span getEpTokenLocs :: [EpToken tok] -> [EpaLocation] getEpTokenLocs ls = concatMap go ls where go NoEpTok = [] go (EpTok l) = [l] getEpTokenLoc :: EpToken tok -> EpaLocation getEpTokenLoc NoEpTok = noAnn getEpTokenLoc (EpTok l) = l getEpUniTokenLoc :: EpUniToken tok toku -> EpaLocation getEpUniTokenLoc NoEpUniTok = noAnn getEpUniTokenLoc (EpUniTok l _) = l -- TODO:AZ: check we have all of the unicode tokens type TokDcolon = EpUniToken "::" "∷" type TokDarrow = EpUniToken "=>" "⇒" type TokRarrow = EpUniToken "->" "→" type TokForall = EpUniToken "forall" "∀" -- | Layout information for declarations. data EpLayout = -- | Explicit braces written by the user. -- -- @ -- class C a where { foo :: a; bar :: a } -- @ EpExplicitBraces !(EpToken "{") !(EpToken "}") | -- | Virtual braces inserted by the layout algorithm. -- -- @ -- class C a where -- foo :: a -- bar :: a -- @ EpVirtualBraces !Int -- ^ Layout column (indentation level, begins at 1) | -- | Empty or compiler-generated blocks do not have layout information -- associated with them. EpNoLayout deriving instance Data EpLayout -- --------------------------------------------------------------------- data EpaComment = EpaComment { ac_tok :: EpaCommentTok , ac_prior_tok :: RealSrcSpan -- ^ The location of the prior token, used in exact printing. The -- 'EpaComment' appears as an 'LEpaComment' containing its -- location. The difference between the end of the prior token -- and the start of this location is used for the spacing when -- exact printing the comment. } deriving (Eq, Data, Show) data EpaCommentTok = -- Documentation annotations EpaDocComment HsDocString -- ^ a docstring that can be pretty printed using pprHsDocString | EpaDocOptions String -- ^ doc options (prune, ignore-exports, etc) | EpaLineComment String -- ^ comment starting by "--" | EpaBlockComment String -- ^ comment in {- -} deriving (Eq, Data, Show) -- Note: these are based on the Token versions, but the Token type is -- defined in GHC.Parser.Lexer and bringing it in here would create a loop instance Outputable EpaComment where ppr x = text (show x) -- --------------------------------------------------------------------- type EpaLocation = EpaLocation' [LEpaComment] epaToNoCommentsLocation :: EpaLocation -> NoCommentsLocation epaToNoCommentsLocation (EpaSpan ss) = EpaSpan ss epaToNoCommentsLocation (EpaDelta ss dp []) = EpaDelta ss dp NoComments epaToNoCommentsLocation (EpaDelta _ _ _ ) = panic "epaToNoCommentsLocation" noCommentsToEpaLocation :: NoCommentsLocation -> EpaLocation noCommentsToEpaLocation (EpaSpan ss) = EpaSpan ss noCommentsToEpaLocation (EpaDelta ss dp NoComments) = EpaDelta ss dp [] -- | Tokens embedded in the AST have an EpaLocation, unless they come from -- generated code (e.g. by TH). data TokenLocation = NoTokenLoc | TokenLoc !EpaLocation deriving (Data,Eq) instance Outputable a => Outputable (GenLocated TokenLocation a) where ppr (L _ x) = ppr x -- | Used in the parser only, extract the 'RealSrcSpan' from an -- 'EpaLocation'. The parser will never insert a 'DeltaPos', so the -- partial function is safe. epaLocationRealSrcSpan :: EpaLocation' a -> RealSrcSpan epaLocationRealSrcSpan (EpaSpan (RealSrcSpan r _)) = r epaLocationRealSrcSpan _ = panic "epaLocationRealSrcSpan" -- --------------------------------------------------------------------- -- | The exact print annotations (EPAs) are kept in the HsSyn AST for -- the GhcPs phase. They are usually inserted into the AST by the parser, -- and in case of generated code (e.g. by TemplateHaskell) they are usually -- initialized using 'NoAnn' type class. -- -- A goal of the annotations is that an AST can be edited, including -- moving subtrees from one place to another, duplicating them, and so -- on. This means that each fragment must be self-contained. To this -- end, each annotated fragment keeps track of the anchor position it -- was originally captured at, being simply the start span of the -- topmost element of the ast fragment. This gives us a way to later -- re-calculate all Located items in this layer of the AST, as well as -- any annotations captured. The comments associated with the AST -- fragment are also captured here. -- -- The 'ann' type parameter allows this general structure to be -- specialised to the specific set of locations of original exact -- print annotation elements. For example -- -- @ -- type SrcSpannAnnA = EpAnn AnnListItem -- @ -- -- is a commonly used type alias that specializes the 'ann' type parameter to -- 'AnnListItem'. -- -- The spacing between the items under the scope of a given EpAnn is -- normally derived from the original 'Anchor'. But if a sub-element -- is not in its original position, the required spacing can be -- captured using an appropriate 'EpaDelta' value for the 'entry' Anchor. -- This allows us to freely move elements around, and stitch together -- new AST fragments out of old ones, and have them still printed out -- in a precise way. data EpAnn ann = EpAnn { entry :: !EpaLocation -- ^ Base location for the start of the syntactic element -- holding the annotations. , anns :: !ann -- ^ Annotations added by the Parser , comments :: !EpAnnComments -- ^ Comments enclosed in the SrcSpan of the element -- this `EpAnn` is attached to } deriving (Data, Eq, Functor) -- See Note [XRec and Anno in the AST] spanAsAnchor :: SrcSpan -> (EpaLocation' a) spanAsAnchor ss = EpaSpan ss realSpanAsAnchor :: RealSrcSpan -> (EpaLocation' a) realSpanAsAnchor s = EpaSpan (RealSrcSpan s Strict.Nothing) noSpanAnchor :: (NoAnn a) => EpaLocation' a noSpanAnchor = EpaDelta noSrcSpan (SameLine 0) noAnn -- --------------------------------------------------------------------- -- | When we are parsing we add comments that belong to a particular AST -- element, and print them together with the element, interleaving -- them into the output stream. But when editing the AST to move -- fragments around it is useful to be able to first separate the -- comments into those occurring before the AST element and those -- following it. The 'EpaCommentsBalanced' constructor is used to do -- this. The GHC parser will only insert the 'EpaComments' form. data EpAnnComments = EpaComments { priorComments :: ![LEpaComment] } | EpaCommentsBalanced { priorComments :: ![LEpaComment] , followingComments :: ![LEpaComment] } deriving (Data, Eq) type LEpaComment = GenLocated NoCommentsLocation EpaComment emptyComments :: EpAnnComments emptyComments = EpaComments [] -- --------------------------------------------------------------------- -- Annotations attached to a 'SrcSpan'. -- --------------------------------------------------------------------- type LocatedA = GenLocated SrcSpanAnnA type LocatedN = GenLocated SrcSpanAnnN type LocatedL = GenLocated SrcSpanAnnL type LocatedLC = GenLocated SrcSpanAnnLC type LocatedLS = GenLocated SrcSpanAnnLS type LocatedLW = GenLocated SrcSpanAnnLW type LocatedLI = GenLocated SrcSpanAnnLI type LocatedP = GenLocated SrcSpanAnnP type LocatedC = GenLocated SrcSpanAnnC type SrcSpanAnnA = EpAnn AnnListItem type SrcSpanAnnN = EpAnn NameAnn type SrcSpanAnnL = EpAnn (AnnList ()) type SrcSpanAnnLC = EpAnn (AnnList [EpToken ","]) type SrcSpanAnnLS = EpAnn (AnnList ()) type SrcSpanAnnLW = EpAnn (AnnList (EpToken "where")) type SrcSpanAnnLI = EpAnn (AnnList (EpToken "hiding", [EpToken ","])) type SrcSpanAnnP = EpAnn AnnPragma type SrcSpanAnnC = EpAnn AnnContext type LocatedE = GenLocated EpaLocation -- | General representation of a 'GenLocated' type carrying a -- parameterised annotation type. type LocatedAn an = GenLocated (EpAnn an) {- Note [XRec and Anno in the AST] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The exact print annotations are captured directly inside the AST, using TTG extension points. However certain annotations need to be captured on the Located versions too. There is a general form for these, captured in the type 'EpAnn ann' with the specific usage captured in the 'ann' parameter in different contexts. Some of the particular use cases are 1) RdrNames, which can have additional items such as backticks or parens 2) Items which occur in lists, and the annotation relates purely to its usage inside a list. See the section above this note for the rest. The Anno type family maps to the specific EpAnn variant for a given item. So type instance XRec (GhcPass p) a = GenLocated (Anno a) a type instance Anno RdrName = SrcSpanAnnN type LocatedN = GenLocated SrcSpanAnnN meaning we can have type LocatedN RdrName -} -- --------------------------------------------------------------------- -- Annotations for items in a list -- --------------------------------------------------------------------- -- | Captures the location of punctuation occurring between items, -- normally in a list. It is captured as a trailing annotation. data TrailingAnn = AddSemiAnn (EpToken ";") -- ^ Trailing ';' | AddCommaAnn (EpToken ",") -- ^ Trailing ',' | AddVbarAnn (EpToken "|") -- ^ Trailing '|' | AddDarrowAnn TokDarrow -- ^ Trailing '=>' / '⇒' deriving (Data, Eq) ta_location :: TrailingAnn -> EpaLocation ta_location (AddSemiAnn tok) = getEpTokenLoc tok ta_location (AddCommaAnn tok) = getEpTokenLoc tok ta_location (AddVbarAnn tok) = getEpTokenLoc tok ta_location (AddDarrowAnn tok) = getEpUniTokenLoc tok instance Outputable TrailingAnn where ppr (AddSemiAnn tok) = text "AddSemiAnn" <+> ppr tok ppr (AddCommaAnn tok) = text "AddCommaAnn" <+> ppr tok ppr (AddVbarAnn tok) = text "AddVbarAnn" <+> ppr tok ppr (AddDarrowAnn tok) = text "AddDarrowAnn" <+> ppr tok -- | Annotation for items appearing in a list. They can have one or -- more trailing punctuations items, such as commas or semicolons. data AnnListItem = AnnListItem { lann_trailing :: [TrailingAnn] } deriving (Data, Eq) -- --------------------------------------------------------------------- -- Annotations for the context of a list of items -- --------------------------------------------------------------------- -- | Annotation for the "container" of a list. This captures -- surrounding items such as braces if present, and introductory -- keywords such as 'where'. data AnnList a = AnnList { al_anchor :: !(Maybe EpaLocation), -- ^ start point of a list having layout al_brackets :: !AnnListBrackets, al_semis :: [EpToken ";"], -- decls al_rest :: !a, al_trailing :: ![TrailingAnn] -- ^ items appearing after the -- list, such as '=>' for a -- context } deriving (Data,Eq) data AnnListBrackets = ListParens (EpToken "(") (EpToken ")") | ListBraces (EpToken "{") (EpToken "}") | ListSquare (EpToken "[") (EpToken "]") | ListBanana (EpUniToken "(|" "⦇") (EpUniToken "|)" "⦈") | ListNone deriving (Data,Eq) -- --------------------------------------------------------------------- -- Annotations for parenthesised elements, such as tuples, lists -- --------------------------------------------------------------------- -- | exact print annotation for an item having surrounding "brackets", such as -- tuples or lists data AnnParen = AnnParens (EpToken "(") (EpToken ")") -- ^ '(', ')' | AnnParensHash (EpToken "(#") (EpToken "#)") -- ^ '(#', '#)' | AnnParensSquare (EpToken "[") (EpToken "]") -- ^ '[', ']' deriving Data -- --------------------------------------------------------------------- -- | Exact print annotation for the 'Context' data type. data AnnContext = AnnContext { ac_darrow :: Maybe TokDarrow, -- ^ location of the '=>', if present. ac_open :: [EpToken "("], -- ^ zero or more opening parentheses. ac_close :: [EpToken ")"] -- ^ zero or more closing parentheses. } deriving (Data) -- --------------------------------------------------------------------- -- Annotations for names -- --------------------------------------------------------------------- -- | exact print annotations for a 'RdrName'. There are many kinds of -- adornment that can be attached to a given 'RdrName'. This type -- captures them, as detailed on the individual constructors. data NameAnn -- | Used for a name with an adornment, so '`foo`', '(bar)' = NameAnn { nann_adornment :: NameAdornment, nann_name :: EpaLocation, nann_trailing :: [TrailingAnn] } -- | Used for @(,,,)@, or @(#,,,#)@ | NameAnnCommas { nann_adornment :: NameAdornment, nann_commas :: [EpToken ","], nann_trailing :: [TrailingAnn] } -- | Used for @(# | | #)@ | NameAnnBars { nann_parensh :: (EpToken "(#", EpToken "#)"), nann_bars :: [EpToken "|"], nann_trailing :: [TrailingAnn] } -- | Used for @()@, @(##)@, @[]@ | NameAnnOnly { nann_adornment :: NameAdornment, nann_trailing :: [TrailingAnn] } -- | Used for @->@, as an identifier | NameAnnRArrow { nann_mopen :: Maybe (EpToken "("), nann_arrow :: TokRarrow, nann_mclose :: Maybe (EpToken ")"), nann_trailing :: [TrailingAnn] } -- | Used for an item with a leading @'@. The annotation for -- unquoted item is stored in 'nann_quoted'. | NameAnnQuote { nann_quote :: EpToken "'", nann_quoted :: SrcSpanAnnN, nann_trailing :: [TrailingAnn] } -- | Used when adding a 'TrailingAnn' to an existing 'LocatedN' -- which has no Api Annotation. | NameAnnTrailing { nann_trailing :: [TrailingAnn] } deriving (Data, Eq) -- | A 'NameAnn' can capture the locations of surrounding adornments, -- such as parens or backquotes. This data type identifies what -- particular pair are being used. data NameAdornment = NameParens (EpToken "(") (EpToken ")") | NameParensHash (EpToken "(#") (EpToken "#)") | NameBackquotes (EpToken "`") (EpToken "`") | NameSquare (EpToken "[") (EpToken "]") | NameNoAdornment deriving (Eq, Data) -- --------------------------------------------------------------------- -- | exact print annotation used for capturing the locations of -- annotations in pragmas. data AnnPragma = AnnPragma { apr_open :: EpaLocation, apr_close :: EpToken "#-}", apr_squares :: (EpToken "[", EpToken "]"), apr_loc1 :: EpaLocation, apr_loc2 :: EpaLocation, apr_type :: EpToken "type", apr_module :: EpToken "module" } deriving (Data,Eq) -- --------------------------------------------------------------------- -- | Captures the sort order of sub elements for `ValBinds`, -- `ClassDecl`, `ClsInstDecl` data AnnSortKey tag -- See Note [AnnSortKey] below = NoAnnSortKey | AnnSortKey [tag] deriving (Data, Eq) -- | Used to track of interleaving of binds and signatures for ValBind data BindTag -- See Note [AnnSortKey] below = BindTag | SigDTag deriving (Eq,Data,Ord,Show) -- | Used to track interleaving of class methods, class signatures, -- associated types and associate type defaults in `ClassDecl` and -- `ClsInstDecl`. data DeclTag -- See Note [AnnSortKey] below = ClsMethodTag | ClsSigTag | ClsAtTag | ClsAtdTag deriving (Eq,Data,Ord,Show) {- Note [AnnSortKey] ~~~~~~~~~~~~~~~~~ For some constructs in the ParsedSource we have mixed lists of items that can be freely intermingled. An example is the binds in a where clause, captured in ValBinds (XValBinds idL idR) (LHsBindsLR idL idR) [LSig idR] This keeps separate ordered collections of LHsBind GhcPs and LSig GhcPs. But there is no constraint on the original source code as to how these should appear, so they can have all the signatures first, then their binds, or grouped with a signature preceding each bind. fa :: Int fa = 1 fb :: Char fb = 'c' Or fa :: Int fb :: Char fb = 'c' fa = 1 When exact printing these, we need to restore the original order. As initially parsed we have the SrcSpan, and can sort on those. But if we have modified the AST prior to printing, we cannot rely on the SrcSpans for order any more. The bag of LHsBind GhcPs is physically ordered, as is the list of LSig GhcPs. So in effect we have a list of binds in the order we care about, and a list of sigs in the order we care about. The only problem is to know how to merge the lists. This is where AnnSortKey comes in, which we store in the TTG extension point for ValBinds. data AnnSortKey tag = NoAnnSortKey | AnnSortKey [tag] When originally parsed, with SrcSpans we can rely on, we do not need any extra information, so we tag it with NoAnnSortKey. If the binds and signatures are updated in any way, such that we can no longer rely on their SrcSpans (e.g. they are copied from elsewhere, parsed from scratch for insertion, have a fake SrcSpan), we use `AnnSortKey [BindTag]` to keep track. data BindTag = BindTag | SigDTag We use it as a merge selector, and have one entry for each bind and signature. So for the first example we have binds: fa = 1 , fb = 'c' sigs: fa :: Int, fb :: Char tags: SigDTag, BindTag, SigDTag, BindTag so we draw first from the signatures, then the binds, and same again. For the second example we have binds: fb = 'c', fa = 1 sigs: fa :: Int, fb :: Char tags: SigDTag, SigDTag, BindTag, BindTag so we draw two signatures, then two binds. We do similar for ClassDecl and ClsInstDecl, but we have four different lists we must manage. For this we use DeclTag. -} -- --------------------------------------------------------------------- -- | Helper function used in the parser to add a 'TrailingAnn' items -- to an existing annotation. addTrailingAnnToL :: TrailingAnn -> EpAnnComments -> EpAnn (AnnList a) -> EpAnn (AnnList a) addTrailingAnnToL t cs n = n { anns = addTrailing (anns n) , comments = comments n <> cs } where -- See Note [list append in addTrailing*] addTrailing n = n { al_trailing = al_trailing n ++ [t]} -- | Helper function used in the parser to add a 'TrailingAnn' items -- to an existing annotation. addTrailingAnnToA :: TrailingAnn -> EpAnnComments -> EpAnn AnnListItem -> EpAnn AnnListItem addTrailingAnnToA t cs n = n { anns = addTrailing (anns n) , comments = comments n <> cs } where -- See Note [list append in addTrailing*] addTrailing n = n { lann_trailing = lann_trailing n ++ [t] } -- | Helper function used in the parser to add a comma location to an -- existing annotation. addTrailingCommaToN :: EpAnn NameAnn -> EpaLocation -> EpAnn NameAnn addTrailingCommaToN n l = n { anns = addTrailing (anns n) l } where -- See Note [list append in addTrailing*] addTrailing :: NameAnn -> EpaLocation -> NameAnn addTrailing n l = n { nann_trailing = nann_trailing n ++ [AddCommaAnn (EpTok l)]} noTrailingN :: SrcSpanAnnN -> SrcSpanAnnN noTrailingN s = s { anns = (anns s) { nann_trailing = [] } } {- Note [list append in addTrailing*] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The addTrailingAnnToL, addTrailingAnnToA and addTrailingCommaToN functions are used to add a separator for an item when it occurs in a list. So they are used to capture a comma, vbar, semicolon and similar. In general, a given element will have zero or one of these. In extreme (test) cases, there may be multiple semicolons. In exact printing we sometimes convert the EpaLocation variant for an trailing annotation to the EpaDelta variant, which cannot be sorted. Hence it is critical that these annotations are captured in the order they appear in the original source file. And so we use the less efficient list append to preserve the order, knowing that in most cases the original list is empty. -} -- --------------------------------------------------------------------- -- |Helper function for converting annotation types. -- Discards any annotations l2l :: (HasLoc a, HasAnnotation b) => a -> b l2l a = noAnnSrcSpan (getHasLoc a) -- |Helper function for converting annotation types. -- Discards any annotations la2la :: (HasLoc l, HasAnnotation l2) => GenLocated l a -> GenLocated l2 a la2la (L la a) = L (noAnnSrcSpan (getHasLoc la)) a locA :: (HasLoc a) => a -> SrcSpan locA = getHasLoc reLoc :: (HasLoc (GenLocated a e), HasAnnotation b) => GenLocated a e -> GenLocated b e reLoc (L la a) = L (noAnnSrcSpan $ locA (L la a) ) a -- --------------------------------------------------------------------- class HasAnnotation e where noAnnSrcSpan :: SrcSpan -> e instance HasAnnotation SrcSpan where noAnnSrcSpan l = l instance HasAnnotation EpaLocation where noAnnSrcSpan l = EpaSpan l instance (NoAnn ann) => HasAnnotation (EpAnn ann) where noAnnSrcSpan l = EpAnn (spanAsAnchor l) noAnn emptyComments noLocA :: (HasAnnotation e) => a -> GenLocated e a noLocA = L (noAnnSrcSpan noSrcSpan) getLocA :: (HasLoc a) => GenLocated a e -> SrcSpan getLocA = getHasLoc noSrcSpanA :: (HasAnnotation e) => e noSrcSpanA = noAnnSrcSpan noSrcSpan -- --------------------------------------------------------------------- class NoAnn a where -- | equivalent of `mempty`, but does not need Semigroup noAnn :: a -- --------------------------------------------------------------------- class HasLoc a where -- ^ conveniently calculate locations for things without locations attached getHasLoc :: a -> SrcSpan instance (HasLoc l) => HasLoc (GenLocated l a) where getHasLoc (L l _) = getHasLoc l instance HasLoc SrcSpan where getHasLoc l = l instance (HasLoc a) => (HasLoc (Maybe a)) where getHasLoc (Just a) = getHasLoc a getHasLoc Nothing = noSrcSpan instance HasLoc (EpAnn a) where getHasLoc (EpAnn l _ _) = getHasLoc l instance HasLoc EpaLocation where getHasLoc (EpaSpan l) = l getHasLoc (EpaDelta l _ _) = l instance HasLoc (EpToken tok) where getHasLoc = getEpTokenSrcSpan instance HasLoc (EpUniToken tok utok) where getHasLoc NoEpUniTok = noSrcSpan getHasLoc (EpUniTok l _) = getHasLoc l getHasLocList :: HasLoc a => [a] -> SrcSpan getHasLocList [] = noSrcSpan getHasLocList xs = foldl1' combineSrcSpans $ map getHasLoc xs -- --------------------------------------------------------------------- realSrcSpan :: SrcSpan -> RealSrcSpan realSrcSpan (RealSrcSpan s _) = s realSrcSpan _ = mkRealSrcSpan l l -- AZ temporary where l = mkRealSrcLoc (fsLit "realSrcSpan") (-1) (-1) srcSpan2e :: SrcSpan -> EpaLocation srcSpan2e ss@(RealSrcSpan _ _) = EpaSpan ss srcSpan2e span = EpaSpan (RealSrcSpan (realSrcSpan span) Strict.Nothing) reAnnC :: AnnContext -> EpAnnComments -> Located a -> LocatedC a reAnnC anns cs (L l a) = L (EpAnn (spanAsAnchor l) anns cs) a reAnnL :: ann -> EpAnnComments -> Located e -> GenLocated (EpAnn ann) e reAnnL anns cs (L l a) = L (EpAnn (spanAsAnchor l) anns cs) a getLocAnn :: Located a -> SrcSpanAnnA getLocAnn (L l _) = noAnnSrcSpan l -- AZ:TODO use widenSpan here too addAnnsA :: SrcSpanAnnA -> [TrailingAnn] -> EpAnnComments -> SrcSpanAnnA addAnnsA (EpAnn l as1 cs) as2 cs2 = EpAnn l (AnnListItem (lann_trailing as1 ++ as2)) (cs <> cs2) -- | The annotations need to all come after the anchor. Make sure -- this is the case. widenSpanL :: SrcSpan -> [EpaLocation] -> SrcSpan widenSpanL s as = foldl combineSrcSpans s (go as) where go [] = [] go ((EpaSpan (RealSrcSpan s mb):rest)) = RealSrcSpan s mb : go rest go ((EpaSpan _):rest) = go rest go ((EpaDelta _ _ _):rest) = go rest widenSpanT :: SrcSpan -> EpToken tok -> SrcSpan widenSpanT l (EpTok loc) = widenSpanL l [loc] widenSpanT l NoEpTok = l listLocation :: [LocatedAn an a] -> EpaLocation listLocation as = EpaSpan (go noSrcSpan as) where combine l r = combineSrcSpans l r go acc [] = acc go acc (L (EpAnn (EpaSpan s) _ _) _:rest) = go (combine acc s) rest go acc (_:rest) = go acc rest widenAnchorT :: EpaLocation -> EpToken tok -> EpaLocation widenAnchorT (EpaSpan ss) (EpTok l) = widenAnchorS l ss widenAnchorT ss _ = ss widenAnchorS :: EpaLocation -> SrcSpan -> EpaLocation widenAnchorS (EpaSpan (RealSrcSpan s mbe)) (RealSrcSpan r mbr) = EpaSpan (RealSrcSpan (combineRealSrcSpans s r) (liftA2 combineBufSpans mbe mbr)) widenAnchorS (EpaSpan us) _ = EpaSpan us widenAnchorS EpaDelta{} (RealSrcSpan r mb) = EpaSpan (RealSrcSpan r mb) widenAnchorS anc _ = anc widenLocatedAnL :: EpAnn an -> [EpaLocation] -> EpAnn an widenLocatedAnL (EpAnn (EpaSpan l) a cs) as = EpAnn (spanAsAnchor l') a cs where l' = widenSpanL l as widenLocatedAnL (EpAnn anc a cs) _as = EpAnn anc a cs epAnnComments :: EpAnn an -> EpAnnComments epAnnComments (EpAnn _ _ cs) = cs -- --------------------------------------------------------------------- sortLocatedA :: (HasLoc (EpAnn a)) => [GenLocated (EpAnn a) e] -> [GenLocated (EpAnn a) e] sortLocatedA = sortBy (leftmost_smallest `on` getLocA) mapLocA :: (NoAnn ann) => (a -> b) -> GenLocated SrcSpan a -> GenLocated (EpAnn ann) b mapLocA f (L l a) = L (noAnnSrcSpan l) (f a) -- AZ:TODO: move this somewhere sane combineLocsA :: Semigroup a => GenLocated (EpAnn a) e1 -> GenLocated (EpAnn a) e2 -> EpAnn a combineLocsA (L a _) (L b _) = combineSrcSpansA a b combineSrcSpansA :: Semigroup a => EpAnn a -> EpAnn a -> EpAnn a combineSrcSpansA aa ab = aa <> ab -- | Combine locations from two 'Located' things and add them to a third thing addCLocA :: (HasLoc a, HasLoc b, HasAnnotation l) => a -> b -> c -> GenLocated l c addCLocA a b c = L (noAnnSrcSpan $ combineSrcSpans (getHasLoc a) (getHasLoc b)) c -- --------------------------------------------------------------------- -- Utilities for manipulating EpAnnComments -- --------------------------------------------------------------------- getFollowingComments :: EpAnnComments -> [LEpaComment] getFollowingComments (EpaComments _) = [] getFollowingComments (EpaCommentsBalanced _ cs) = cs setFollowingComments :: EpAnnComments -> [LEpaComment] -> EpAnnComments setFollowingComments (EpaComments ls) cs = EpaCommentsBalanced ls cs setFollowingComments (EpaCommentsBalanced ls _) cs = EpaCommentsBalanced ls cs setPriorComments :: EpAnnComments -> [LEpaComment] -> EpAnnComments setPriorComments (EpaComments _) cs = EpaComments cs setPriorComments (EpaCommentsBalanced _ ts) cs = EpaCommentsBalanced cs ts -- --------------------------------------------------------------------- -- Comment-only annotations -- --------------------------------------------------------------------- type EpAnnCO = EpAnn NoEpAnns -- ^ Api Annotations for comments only data NoEpAnns = NoEpAnns deriving (Data,Eq,Ord) noComments ::EpAnnCO noComments = EpAnn noSpanAnchor NoEpAnns emptyComments -- TODO:AZ get rid of this placeholderRealSpan :: RealSrcSpan placeholderRealSpan = realSrcLocSpan (mkRealSrcLoc (mkFastString "placeholder") (-1) (-1)) comment :: RealSrcSpan -> EpAnnComments -> EpAnnCO comment loc cs = EpAnn (EpaSpan (RealSrcSpan loc Strict.Nothing)) NoEpAnns cs -- --------------------------------------------------------------------- -- Utilities for managing comments in an `EpAnn a` structure. -- --------------------------------------------------------------------- -- | Add additional comments to a 'EpAnn', used for manipulating the -- AST prior to exact printing the changed one. addCommentsToEpAnn :: (NoAnn ann) => EpAnn ann -> EpAnnComments -> EpAnn ann addCommentsToEpAnn (EpAnn a an cs) cs' = EpAnn a an (cs <> cs') -- | Replace any existing comments on a 'EpAnn', used for manipulating the -- AST prior to exact printing the changed one. setCommentsEpAnn :: (NoAnn ann) => EpAnn ann -> EpAnnComments -> EpAnn ann setCommentsEpAnn (EpAnn a an _) cs = (EpAnn a an cs) -- | Transfer comments and trailing items from the annotations in the -- first 'SrcSpanAnnA' argument to those in the second. transferAnnsA :: SrcSpanAnnA -> SrcSpanAnnA -> (SrcSpanAnnA, SrcSpanAnnA) transferAnnsA (EpAnn a an cs) (EpAnn a' an' cs') = (EpAnn a noAnn emptyComments, EpAnn a' (an' <> an) (cs' <> cs)) -- | Transfer trailing items but not comments from the annotations in the -- first 'SrcSpanAnnA' argument to those in the second. transferFollowingA :: SrcSpanAnnA -> SrcSpanAnnA -> (SrcSpanAnnA, SrcSpanAnnA) transferFollowingA (EpAnn a1 an1 cs1) (EpAnn a2 an2 cs2) = (EpAnn a1 noAnn cs1', EpAnn a2 (an1 <> an2) cs2') where pc = priorComments cs1 fc = getFollowingComments cs1 cs1' = setPriorComments emptyComments pc cs2' = setFollowingComments cs2 fc -- | Transfer trailing items from the annotations in the -- first 'SrcSpanAnnA' argument to those in the second. transferAnnsOnlyA :: SrcSpanAnnA -> SrcSpanAnnA -> (SrcSpanAnnA, SrcSpanAnnA) transferAnnsOnlyA (EpAnn a an cs) (EpAnn a' an' cs') = (EpAnn a noAnn cs, EpAnn a' (an' <> an) cs') -- | Transfer comments from the annotations in the -- first 'SrcSpanAnnA' argument to those in the second. transferCommentsOnlyA :: EpAnn a -> EpAnn b -> (EpAnn a, EpAnn b) transferCommentsOnlyA (EpAnn a an cs) (EpAnn a' an' cs') = (EpAnn a an emptyComments, EpAnn a' an' (cs <> cs')) -- | Transfer prior comments only from the annotations in the -- first 'SrcSpanAnnA' argument to those in the second. transferPriorCommentsA :: SrcSpanAnnA -> SrcSpanAnnA -> (SrcSpanAnnA, SrcSpanAnnA) transferPriorCommentsA (EpAnn a1 an1 cs1) (EpAnn a2 an2 cs2) = (EpAnn a1 an1 cs1', EpAnn a2 an2 cs2') where pc = priorComments cs1 fc = getFollowingComments cs1 cs1' = setFollowingComments emptyComments fc cs2' = setPriorComments cs2 (priorComments cs2 <> pc) -- | Remove the exact print annotations payload, leaving only the -- anchor and comments. commentsOnlyA :: NoAnn ann => EpAnn ann -> EpAnn ann commentsOnlyA (EpAnn a _ cs) = EpAnn a noAnn cs -- | Remove the comments, leaving the exact print annotations payload removeCommentsA :: EpAnn ann -> EpAnn ann removeCommentsA (EpAnn a an _) = EpAnn a an emptyComments -- --------------------------------------------------------------------- -- Semigroup instances, to allow easy combination of annotation elements -- --------------------------------------------------------------------- instance (Semigroup a) => Semigroup (EpAnn a) where (EpAnn l1 a1 b1) <> (EpAnn l2 a2 b2) = EpAnn (l1 <> l2) (a1 <> a2) (b1 <> b2) -- The critical part about the anchor is its left edge, and all -- annotations must follow it. So we combine them which yields the -- largest span instance Semigroup EpaLocation where EpaSpan s1 <> EpaSpan s2 = EpaSpan (combineSrcSpans s1 s2) EpaSpan s1 <> _ = EpaSpan s1 _ <> EpaSpan s2 = EpaSpan s2 EpaDelta s1 dp1 cs1 <> EpaDelta s2 _dp2 cs2 = EpaDelta (combineSrcSpans s1 s2) dp1 (cs1<>cs2) instance Semigroup EpAnnComments where EpaComments cs1 <> EpaComments cs2 = EpaComments (cs1 ++ cs2) EpaComments cs1 <> EpaCommentsBalanced cs2 as2 = EpaCommentsBalanced (cs1 ++ cs2) as2 EpaCommentsBalanced cs1 as1 <> EpaComments cs2 = EpaCommentsBalanced (cs1 ++ cs2) as1 EpaCommentsBalanced cs1 as1 <> EpaCommentsBalanced cs2 as2 = EpaCommentsBalanced (cs1 ++ cs2) (as1++as2) instance Semigroup AnnListItem where (AnnListItem l1) <> (AnnListItem l2) = AnnListItem (l1 <> l2) instance Semigroup (AnnSortKey tag) where NoAnnSortKey <> x = x x <> NoAnnSortKey = x AnnSortKey ls1 <> AnnSortKey ls2 = AnnSortKey (ls1 <> ls2) instance Monoid (AnnSortKey tag) where mempty = NoAnnSortKey -- --------------------------------------------------------------------- -- NoAnn instances -- --------------------------------------------------------------------- instance NoAnn EpaLocation where noAnn = EpaDelta noSrcSpan (SameLine 0) [] instance NoAnn [a] where noAnn = [] instance NoAnn (Maybe a) where noAnn = Nothing instance NoAnn a => NoAnn (Either a b) where noAnn = Left noAnn instance (NoAnn a, NoAnn b) => NoAnn (a, b) where noAnn = (noAnn, noAnn) instance (NoAnn a, NoAnn b, NoAnn c) => NoAnn (a, b, c) where noAnn = (noAnn, noAnn, noAnn) instance (NoAnn a, NoAnn b, NoAnn c, NoAnn d) => NoAnn (a, b, c, d) where noAnn = (noAnn, noAnn, noAnn, noAnn) instance NoAnn Bool where noAnn = False instance NoAnn () where noAnn = () instance (NoAnn ann) => NoAnn (EpAnn ann) where noAnn = EpAnn noSpanAnchor noAnn emptyComments instance NoAnn NoEpAnns where noAnn = NoEpAnns instance NoAnn AnnListItem where noAnn = AnnListItem [] instance NoAnn AnnContext where noAnn = AnnContext Nothing [] [] instance NoAnn a => NoAnn (AnnList a) where noAnn = AnnList Nothing ListNone noAnn noAnn [] instance NoAnn NameAnn where noAnn = NameAnnTrailing [] instance NoAnn AnnPragma where noAnn = AnnPragma noAnn noAnn noAnn noAnn noAnn noAnn noAnn instance NoAnn AnnParen where noAnn = AnnParens noAnn noAnn instance NoAnn (EpToken s) where noAnn = NoEpTok instance NoAnn (EpUniToken s t) where noAnn = NoEpUniTok instance NoAnn SourceText where noAnn = NoSourceText -- --------------------------------------------------------------------- instance (Outputable a) => Outputable (EpAnn a) where ppr (EpAnn l a c) = text "EpAnn" <+> ppr l <+> ppr a <+> ppr c instance Outputable NoEpAnns where ppr NoEpAnns = text "NoEpAnns" instance Outputable (GenLocated NoCommentsLocation EpaComment) where ppr (L l c) = text "L" <+> ppr l <+> ppr c instance Outputable EpAnnComments where ppr (EpaComments cs) = text "EpaComments" <+> ppr cs ppr (EpaCommentsBalanced cs ts) = text "EpaCommentsBalanced" <+> ppr cs <+> ppr ts instance (NamedThing (Located a)) => NamedThing (LocatedAn an a) where getName (L l a) = getName (L (locA l) a) instance Outputable AnnContext where ppr (AnnContext a o c) = text "AnnContext" <+> ppr a <+> ppr o <+> ppr c instance Outputable BindTag where ppr tag = text $ show tag instance Outputable DeclTag where ppr tag = text $ show tag instance Outputable tag => Outputable (AnnSortKey tag) where ppr NoAnnSortKey = text "NoAnnSortKey" ppr (AnnSortKey ls) = text "AnnSortKey" <+> ppr ls instance Outputable IsUnicodeSyntax where ppr = text . show instance (Outputable a, Outputable e) => Outputable (GenLocated (EpAnn a) e) where ppr = pprLocated instance (Outputable a, OutputableBndr e) => OutputableBndr (GenLocated (EpAnn a) e) where pprInfixOcc = pprInfixOcc . unLoc pprPrefixOcc = pprPrefixOcc . unLoc instance (Outputable e) => Outputable (GenLocated EpaLocation e) where ppr = pprLocated instance Outputable AnnParen where ppr (AnnParens o c) = text "AnnParens" <+> ppr o <+> ppr c ppr (AnnParensHash o c) = text "AnnParensHash" <+> ppr o <+> ppr c ppr (AnnParensSquare o c) = text "AnnParensSquare" <+> ppr o <+> ppr c instance Outputable AnnListItem where ppr (AnnListItem ts) = text "AnnListItem" <+> ppr ts instance Outputable NameAdornment where ppr (NameParens o c) = text "NameParens" <+> ppr o <+> ppr c ppr (NameParensHash o c) = text "NameParensHash" <+> ppr o <+> ppr c ppr (NameBackquotes o c) = text "NameBackquotes" <+> ppr o <+> ppr c ppr (NameSquare o c) = text "NameSquare" <+> ppr o <+> ppr c ppr NameNoAdornment = text "NameNoAdornment" instance Outputable NameAnn where ppr (NameAnn a n t) = text "NameAnn" <+> ppr a <+> ppr n <+> ppr t ppr (NameAnnCommas a n t) = text "NameAnnCommas" <+> ppr a <+> ppr n <+> ppr t ppr (NameAnnBars a n t) = text "NameAnnBars" <+> ppr a <+> ppr n <+> ppr t ppr (NameAnnOnly a t) = text "NameAnnOnly" <+> ppr a <+> ppr t ppr (NameAnnRArrow o n c t) = text "NameAnnRArrow" <+> ppr o <+> ppr n <+> ppr c <+> ppr t ppr (NameAnnQuote q n t) = text "NameAnnQuote" <+> ppr q <+> ppr n <+> ppr t ppr (NameAnnTrailing t) = text "NameAnnTrailing" <+> ppr t instance (Outputable a) => Outputable (AnnList a) where ppr (AnnList anc p s a t) = text "AnnList" <+> ppr anc <+> ppr p <+> ppr s <+> ppr a <+> ppr t instance Outputable AnnListBrackets where ppr (ListParens o c) = text "ListParens" <+> ppr o <+> ppr c ppr (ListBraces o c) = text "ListBraces" <+> ppr o <+> ppr c ppr (ListSquare o c) = text "ListSquare" <+> ppr o <+> ppr c ppr (ListBanana o c) = text "ListBanana" <+> ppr o <+> ppr c ppr ListNone = text "ListNone" instance Outputable AnnPragma where ppr (AnnPragma o c s l ca t m) = text "AnnPragma" <+> ppr o <+> ppr c <+> ppr s <+> ppr l <+> ppr ca <+> ppr ca <+> ppr t <+> ppr m ghc-lib-parser-9.12.2.20250421/compiler/GHC/Parser/CharClass.hs0000644000000000000000000002031307346545000021340 0ustar0000000000000000-- Character classification module GHC.Parser.CharClass ( is_ident -- Char# -> Bool , is_symbol -- Char# -> Bool , is_any -- Char# -> Bool , is_space -- Char# -> Bool , is_lower -- Char# -> Bool , is_upper -- Char# -> Bool , is_digit -- Char# -> Bool , is_alphanum -- Char# -> Bool , is_decdigit, is_hexdigit, is_octdigit, is_bindigit , hexDigit, octDecDigit ) where import GHC.Prelude import Data.Char ( ord, chr ) import Data.Word import GHC.Utils.Panic -- Bit masks cIdent, cSymbol, cAny, cSpace, cLower, cUpper, cDigit :: Word8 cIdent = 1 cSymbol = 2 cAny = 4 cSpace = 8 cLower = 16 cUpper = 32 cDigit = 64 -- | The predicates below look costly, but aren't, GHC+GCC do a great job -- at the big case below. {-# INLINABLE is_ctype #-} is_ctype :: Word8 -> Char -> Bool is_ctype mask c = c <= '\127' && (charType c .&. mask) /= 0 is_ident, is_symbol, is_any, is_space, is_lower, is_upper, is_digit, is_alphanum :: Char -> Bool is_ident = is_ctype cIdent is_symbol = is_ctype cSymbol is_any = is_ctype cAny is_space = is_ctype cSpace is_lower = is_ctype cLower is_upper = is_ctype cUpper is_digit = is_ctype cDigit is_alphanum = is_ctype (cLower+cUpper+cDigit) -- Utils hexDigit :: Char -> Int hexDigit c | is_decdigit c = ord c - ord '0' | otherwise = ord (to_lower c) - ord 'a' + 10 octDecDigit :: Char -> Int octDecDigit c = ord c - ord '0' is_decdigit :: Char -> Bool is_decdigit c = c >= '0' && c <= '9' is_hexdigit :: Char -> Bool is_hexdigit c = is_decdigit c || (c >= 'a' && c <= 'f') || (c >= 'A' && c <= 'F') is_octdigit :: Char -> Bool is_octdigit c = c >= '0' && c <= '7' is_bindigit :: Char -> Bool is_bindigit c = c == '0' || c == '1' to_lower :: Char -> Char to_lower c | c >= 'A' && c <= 'Z' = chr (ord c - (ord 'A' - ord 'a')) | otherwise = c charType :: Char -> Word8 charType c = case c of '\0' -> 0 -- \000 '\1' -> 0 -- \001 '\2' -> 0 -- \002 '\3' -> 0 -- \003 '\4' -> 0 -- \004 '\5' -> 0 -- \005 '\6' -> 0 -- \006 '\7' -> 0 -- \007 '\8' -> 0 -- \010 '\9' -> cSpace -- \t (not allowed in strings, so !cAny) '\10' -> cSpace -- \n (ditto) '\11' -> cSpace -- \v (ditto) '\12' -> cSpace -- \f (ditto) '\13' -> cSpace -- ^M (ditto) '\14' -> 0 -- \016 '\15' -> 0 -- \017 '\16' -> 0 -- \020 '\17' -> 0 -- \021 '\18' -> 0 -- \022 '\19' -> 0 -- \023 '\20' -> 0 -- \024 '\21' -> 0 -- \025 '\22' -> 0 -- \026 '\23' -> 0 -- \027 '\24' -> 0 -- \030 '\25' -> 0 -- \031 '\26' -> 0 -- \032 '\27' -> 0 -- \033 '\28' -> 0 -- \034 '\29' -> 0 -- \035 '\30' -> 0 -- \036 '\31' -> 0 -- \037 '\32' -> cAny .|. cSpace -- '\33' -> cAny .|. cSymbol -- ! '\34' -> cAny -- " '\35' -> cAny .|. cSymbol -- # '\36' -> cAny .|. cSymbol -- $ '\37' -> cAny .|. cSymbol -- % '\38' -> cAny .|. cSymbol -- & '\39' -> cAny .|. cIdent -- ' '\40' -> cAny -- ( '\41' -> cAny -- ) '\42' -> cAny .|. cSymbol -- * '\43' -> cAny .|. cSymbol -- + '\44' -> cAny -- , '\45' -> cAny .|. cSymbol -- - '\46' -> cAny .|. cSymbol -- . '\47' -> cAny .|. cSymbol -- / '\48' -> cAny .|. cIdent .|. cDigit -- 0 '\49' -> cAny .|. cIdent .|. cDigit -- 1 '\50' -> cAny .|. cIdent .|. cDigit -- 2 '\51' -> cAny .|. cIdent .|. cDigit -- 3 '\52' -> cAny .|. cIdent .|. cDigit -- 4 '\53' -> cAny .|. cIdent .|. cDigit -- 5 '\54' -> cAny .|. cIdent .|. cDigit -- 6 '\55' -> cAny .|. cIdent .|. cDigit -- 7 '\56' -> cAny .|. cIdent .|. cDigit -- 8 '\57' -> cAny .|. cIdent .|. cDigit -- 9 '\58' -> cAny .|. cSymbol -- : '\59' -> cAny -- ; '\60' -> cAny .|. cSymbol -- < '\61' -> cAny .|. cSymbol -- = '\62' -> cAny .|. cSymbol -- > '\63' -> cAny .|. cSymbol -- ? '\64' -> cAny .|. cSymbol -- @ '\65' -> cAny .|. cIdent .|. cUpper -- A '\66' -> cAny .|. cIdent .|. cUpper -- B '\67' -> cAny .|. cIdent .|. cUpper -- C '\68' -> cAny .|. cIdent .|. cUpper -- D '\69' -> cAny .|. cIdent .|. cUpper -- E '\70' -> cAny .|. cIdent .|. cUpper -- F '\71' -> cAny .|. cIdent .|. cUpper -- G '\72' -> cAny .|. cIdent .|. cUpper -- H '\73' -> cAny .|. cIdent .|. cUpper -- I '\74' -> cAny .|. cIdent .|. cUpper -- J '\75' -> cAny .|. cIdent .|. cUpper -- K '\76' -> cAny .|. cIdent .|. cUpper -- L '\77' -> cAny .|. cIdent .|. cUpper -- M '\78' -> cAny .|. cIdent .|. cUpper -- N '\79' -> cAny .|. cIdent .|. cUpper -- O '\80' -> cAny .|. cIdent .|. cUpper -- P '\81' -> cAny .|. cIdent .|. cUpper -- Q '\82' -> cAny .|. cIdent .|. cUpper -- R '\83' -> cAny .|. cIdent .|. cUpper -- S '\84' -> cAny .|. cIdent .|. cUpper -- T '\85' -> cAny .|. cIdent .|. cUpper -- U '\86' -> cAny .|. cIdent .|. cUpper -- V '\87' -> cAny .|. cIdent .|. cUpper -- W '\88' -> cAny .|. cIdent .|. cUpper -- X '\89' -> cAny .|. cIdent .|. cUpper -- Y '\90' -> cAny .|. cIdent .|. cUpper -- Z '\91' -> cAny -- [ '\92' -> cAny .|. cSymbol -- backslash '\93' -> cAny -- ] '\94' -> cAny .|. cSymbol -- ^ '\95' -> cAny .|. cIdent .|. cLower -- _ '\96' -> cAny -- ` '\97' -> cAny .|. cIdent .|. cLower -- a '\98' -> cAny .|. cIdent .|. cLower -- b '\99' -> cAny .|. cIdent .|. cLower -- c '\100' -> cAny .|. cIdent .|. cLower -- d '\101' -> cAny .|. cIdent .|. cLower -- e '\102' -> cAny .|. cIdent .|. cLower -- f '\103' -> cAny .|. cIdent .|. cLower -- g '\104' -> cAny .|. cIdent .|. cLower -- h '\105' -> cAny .|. cIdent .|. cLower -- i '\106' -> cAny .|. cIdent .|. cLower -- j '\107' -> cAny .|. cIdent .|. cLower -- k '\108' -> cAny .|. cIdent .|. cLower -- l '\109' -> cAny .|. cIdent .|. cLower -- m '\110' -> cAny .|. cIdent .|. cLower -- n '\111' -> cAny .|. cIdent .|. cLower -- o '\112' -> cAny .|. cIdent .|. cLower -- p '\113' -> cAny .|. cIdent .|. cLower -- q '\114' -> cAny .|. cIdent .|. cLower -- r '\115' -> cAny .|. cIdent .|. cLower -- s '\116' -> cAny .|. cIdent .|. cLower -- t '\117' -> cAny .|. cIdent .|. cLower -- u '\118' -> cAny .|. cIdent .|. cLower -- v '\119' -> cAny .|. cIdent .|. cLower -- w '\120' -> cAny .|. cIdent .|. cLower -- x '\121' -> cAny .|. cIdent .|. cLower -- y '\122' -> cAny .|. cIdent .|. cLower -- z '\123' -> cAny -- { '\124' -> cAny .|. cSymbol -- | '\125' -> cAny -- } '\126' -> cAny .|. cSymbol -- ~ '\127' -> 0 -- \177 _ -> panic ("charType: " ++ show c) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Parser/Errors/0000755000000000000000000000000007346545000020416 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Parser/Errors/Basic.hs0000644000000000000000000000156207346545000021777 0ustar0000000000000000{-# LANGUAGE LambdaCase #-} module GHC.Parser.Errors.Basic where import GHC.Utils.Outputable ( SDoc, text ) -- | The operator symbol in the 'PsOperatorWhitespaceExtConflictMessage' diagnostic. data OperatorWhitespaceSymbol = OperatorWhitespaceSymbol_PrefixPercent | OperatorWhitespaceSymbol_PrefixDollar | OperatorWhitespaceSymbol_PrefixDollarDollar pprOperatorWhitespaceSymbol :: OperatorWhitespaceSymbol -> SDoc pprOperatorWhitespaceSymbol = \case OperatorWhitespaceSymbol_PrefixPercent -> text "%" OperatorWhitespaceSymbol_PrefixDollar -> text "$" OperatorWhitespaceSymbol_PrefixDollarDollar -> text "$$" -- | The operator occurrence type in the 'PsOperatorWhitespaceMessage' diagnostic. data OperatorWhitespaceOccurrence = OperatorWhitespaceOccurrence_Prefix | OperatorWhitespaceOccurrence_Suffix | OperatorWhitespaceOccurrence_TightInfix ghc-lib-parser-9.12.2.20250421/compiler/GHC/Parser/Errors/Ppr.hs0000644000000000000000000014130407346545000021516 0ustar0000000000000000{-# LANGUAGE RecordWildCards #-} {-# LANGUAGE MultiWayIf #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE GADTs #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE DataKinds #-} {-# LANGUAGE PolyKinds #-} {-# LANGUAGE TypeApplications #-} {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE OverloadedStrings #-} {-# OPTIONS_GHC -fno-warn-orphans #-} -- instance Diagnostic PsMessage module GHC.Parser.Errors.Ppr where import GHC.Prelude import GHC.Driver.Flags import GHC.Parser.Errors.Basic import GHC.Parser.Errors.Types import GHC.Parser.Types import GHC.Types.Basic import GHC.Types.Hint import GHC.Types.Error import GHC.Types.Hint.Ppr (perhapsAsPat) import GHC.Types.SrcLoc import GHC.Types.Error.Codes import GHC.Types.Name.Reader ( opIsAt, rdrNameOcc, mkUnqual ) import GHC.Types.Name.Occurrence (isSymOcc, occNameFS, varName) import GHC.Utils.Outputable import GHC.Utils.Misc import GHC.Data.FastString import GHC.Data.Maybe (catMaybes) import GHC.Hs.Expr (prependQualified, HsExpr(..), HsLamVariant(..), lamCaseKeyword) import GHC.Hs.Type (pprLHsContext, pprHsArrow, pprHsForAll) import GHC.Builtin.Names (allNameStringList) import GHC.Builtin.Types (filterCTuple) import qualified GHC.LanguageExtensions as LangExt import Data.List.NonEmpty (NonEmpty((:|))) import GHC.Hs.Pat (Pat(..), LPat) import GHC.Hs.Extension import GHC.Parser.Annotation (noAnn) instance Diagnostic PsMessage where type DiagnosticOpts PsMessage = NoDiagnosticOpts diagnosticMessage opts = \case PsUnknownMessage (UnknownDiagnostic f m) -> diagnosticMessage (f opts) m PsHeaderMessage m -> psHeaderMessageDiagnostic m PsWarnHaddockInvalidPos -> mkSimpleDecorated $ text "A Haddock comment cannot appear in this position and will be ignored." PsWarnHaddockIgnoreMulti -> mkSimpleDecorated $ text "Multiple Haddock comments for a single entity are not allowed." $$ text "The extraneous comment will be ignored." PsWarnBidirectionalFormatChars ((loc,_,desc) :| xs) -> mkSimpleDecorated $ text "A unicode bidirectional formatting character" <+> parens (text desc) $$ text "was found at offset" <+> ppr (bufPos (psBufPos loc)) <+> text "in the file" $$ (case xs of [] -> empty xs -> text "along with further bidirectional formatting characters at" <+> pprChars xs where pprChars [] = empty pprChars ((loc,_,desc):xs) = text "offset" <+> ppr (bufPos (psBufPos loc)) <> text ":" <+> text desc $$ pprChars xs ) $$ text "Bidirectional formatting characters may be rendered misleadingly in certain editors" PsWarnTab tc -> mkSimpleDecorated $ text "Tab character found here" <> (if tc == 1 then text "" else text ", and in" <+> speakNOf (fromIntegral (tc - 1)) (text "further location")) <> text "." PsWarnTransitionalLayout reason -> mkSimpleDecorated $ text "transitional layout will not be accepted in the future:" $$ (case reason of TransLayout_Where -> text "`where' clause at the same depth as implicit layout block" TransLayout_Pipe -> text "`|' at the same depth as implicit layout block" ) PsWarnOperatorWhitespaceExtConflict sym -> let mk_prefix_msg extension_name syntax_meaning = text "The prefix use of a" <+> quotes (pprOperatorWhitespaceSymbol sym) <+> text "would denote" <+> syntax_meaning $$ nest 2 (text "were the" <+> extension_name <+> text "extension enabled.") in mkSimpleDecorated $ case sym of OperatorWhitespaceSymbol_PrefixPercent -> mk_prefix_msg (text "LinearTypes") (text "a multiplicity annotation") OperatorWhitespaceSymbol_PrefixDollar -> mk_prefix_msg (text "TemplateHaskell") (text "an untyped splice") OperatorWhitespaceSymbol_PrefixDollarDollar -> mk_prefix_msg (text "TemplateHaskell") (text "a typed splice") PsWarnOperatorWhitespace sym occ_type -> let mk_msg occ_type_str = text "The" <+> text occ_type_str <+> text "use of a" <+> quotes (ftext sym) <+> text "might be repurposed as special syntax" $$ nest 2 (text "by a future language extension.") in mkSimpleDecorated $ case occ_type of OperatorWhitespaceOccurrence_Prefix -> mk_msg "prefix" OperatorWhitespaceOccurrence_Suffix -> mk_msg "suffix" OperatorWhitespaceOccurrence_TightInfix -> mk_msg "tight infix" PsWarnStarBinder -> mkSimpleDecorated $ text "Found binding occurrence of" <+> quotes (text "*") <+> text "yet StarIsType is enabled." PsWarnStarIsType -> mkSimpleDecorated $ text "Using" <+> quotes (text "*") <+> text "(or its Unicode variant) to mean" <+> quotes (text "Data.Kind.Type") $$ text "relies on the StarIsType extension, which will become" $$ text "deprecated in the future." PsWarnUnrecognisedPragma prag _ -> mkSimpleDecorated $ text "Unrecognised pragma" <> if null prag then empty else text ":" <+> text prag PsWarnMisplacedPragma prag -> mkSimpleDecorated $ text "Misplaced" <+> pprFileHeaderPragmaType prag <+> text "pragma" PsWarnImportPreQualified -> mkSimpleDecorated $ text "Found" <+> quotes (text "qualified") <+> text "in prepositive position" PsWarnViewPatternSignatures old new -> mkDecorated $ [ text "Found an unparenthesized pattern signature on the RHS of a view pattern." , vcat [ text "This code might stop working in a future GHC release" , text "due to a planned change to the precedence of view patterns," , text "unless the view function is an endofunction." ] , nest 2 $ vcat [ text "Current parse:" <+> quotes (ppr (add_parens_sig old)) , text "Future parse:" <+> quotes (ppr (add_parens_view new)) ] ] where add_parens_sig :: LPat GhcPs -> LPat GhcPs add_parens_sig = go where go (L l (ViewPat x e p)) = L l (ViewPat x e (go p)) go (L l (SigPat x p sig)) = par_pat (L l (SigPat x p sig)) go p = p add_parens_view :: LPat GhcPs -> LPat GhcPs add_parens_view = go where go (L l (ViewPat x e p)) = par_pat (L l (ViewPat x e p)) go (L l (SigPat x p sig)) = L l (SigPat x (go p) sig) go p = p par_pat :: LPat GhcPs -> LPat GhcPs par_pat p = L noAnn (ParPat noAnn p) PsErrLexer err kind -> mkSimpleDecorated $ hcat [ case err of LexError -> text "lexical error" LexUnknownPragma -> text "unknown pragma" LexErrorInPragma -> text "lexical error in pragma" LexNumEscapeRange -> text "numeric escape sequence out of range" LexUnterminatedComment -> text "unterminated `{-'" LexUnterminatedOptions -> text "unterminated OPTIONS pragma" LexUnterminatedQQ -> text "unterminated quasiquotation" , case kind of LexErrKind_EOF -> text " at end of input" LexErrKind_UTF8 -> text " (UTF-8 decoding error)" LexErrKind_Char c -> text $ " at character " ++ show c ] PsErrParse token _details | null token -> mkSimpleDecorated $ text "parse error (possibly incorrect indentation or mismatched brackets)" | otherwise -> mkSimpleDecorated $ text "parse error on input" <+> quotes (text token) PsErrCmmLexer -> mkSimpleDecorated $ text "Cmm lexical error" PsErrCmmParser cmm_err -> mkSimpleDecorated $ case cmm_err of CmmUnknownPrimitive name -> text "unknown primitive" <+> ftext name CmmUnknownMacro fun -> text "unknown macro" <+> ftext fun CmmUnknownCConv cconv -> text "unknown calling convention:" <+> text cconv CmmUnrecognisedSafety safety -> text "unrecognised safety" <+> text safety CmmUnrecognisedHint hint -> text "unrecognised hint:" <+> text hint PsErrTypeAppWithoutSpace v e -> mkSimpleDecorated $ sep [ text "@-pattern in expression context:" , nest 4 (pprPrefixOcc v <> text "@" <> ppr e) ] $$ text "Type application syntax requires a space before '@'" PsErrLazyPatWithoutSpace e -> mkSimpleDecorated $ sep [ text "Lazy pattern in expression context:" , nest 4 (text "~" <> ppr e) ] $$ text "Did you mean to add a space after the '~'?" PsErrBangPatWithoutSpace e -> mkSimpleDecorated $ sep [ text "Bang pattern in expression context:" , nest 4 (text "!" <> ppr e) ] $$ text "Did you mean to add a space after the '!'?" PsErrInvalidInfixHole -> mkSimpleDecorated $ text "Invalid infix hole, expected an infix operator" PsErrExpectedHyphen -> mkSimpleDecorated $ text "Expected a hyphen" PsErrSpaceInSCC -> mkSimpleDecorated $ text "Spaces are not allowed in SCCs" PsErrEmptyDoubleQuotes _th_on -> mkSimpleDecorated $ vcat msg where msg = [ text "Parser error on `''`" , text "Character literals may not be empty" ] PsErrLambdaCase -- we can't get this error for \cases, since without -XLambdaCase, that's -- just a regular lambda expression -> mkSimpleDecorated $ text "Illegal" <+> lamCaseKeyword LamCase PsErrEmptyLambda -> mkSimpleDecorated $ text "A lambda requires at least one parameter" PsErrLinearFunction -> mkSimpleDecorated $ text "Illegal use of linear functions" PsErrOverloadedRecordUpdateNotEnabled -> mkSimpleDecorated $ text "Illegal overloaded record update" PsErrMultiWayIf -> mkSimpleDecorated $ text "Illegal multi-way if-expression" PsErrNumUnderscores reason -> mkSimpleDecorated $ text $ case reason of NumUnderscore_Integral -> "Illegal underscores in integer literals" NumUnderscore_Float -> "Illegal underscores in floating literals" PsErrIllegalBangPattern e -> mkSimpleDecorated $ text "Illegal bang-pattern or strict binding" $$ ppr e PsErrOverloadedRecordDotInvalid -> mkSimpleDecorated $ text "Use of OverloadedRecordDot '.' not valid ('.' isn't allowed when constructing records or in record patterns)" PsErrIllegalPatSynExport -> mkSimpleDecorated $ text "Illegal export form" PsErrOverloadedRecordUpdateNoQualifiedFields -> mkSimpleDecorated $ text "Fields cannot be qualified when OverloadedRecordUpdate is enabled" PsErrExplicitForall is_unicode -> mkSimpleDecorated $ text "Illegal symbol" <+> quotes (forallSym is_unicode) <+> text "in type" PsErrIllegalQualifiedDo qdoDoc -> mkSimpleDecorated $ text "Illegal qualified" <+> quotes qdoDoc <+> text "block" PsErrQualifiedDoInCmd m -> mkSimpleDecorated $ hang (text "Parse error in command:") 2 $ text "Found a qualified" <+> ppr m <> text ".do block in a command, but" $$ text "qualified 'do' is not supported in commands." PsErrRecordSyntaxInPatSynDecl pat -> mkSimpleDecorated $ text "record syntax not supported for pattern synonym declarations:" $$ ppr pat PsErrEmptyWhereInPatSynDecl patsyn_name -> mkSimpleDecorated $ text "pattern synonym 'where' clause cannot be empty" $$ text "In the pattern synonym declaration for: " <+> ppr (patsyn_name) PsErrInvalidWhereBindInPatSynDecl patsyn_name decl -> mkSimpleDecorated $ text "pattern synonym 'where' clause must bind the pattern synonym's name" <+> quotes (ppr patsyn_name) $$ ppr decl PsErrNoSingleWhereBindInPatSynDecl _patsyn_name decl -> mkSimpleDecorated $ text "pattern synonym 'where' clause must contain a single binding:" $$ ppr decl PsErrDeclSpliceNotAtTopLevel d -> mkSimpleDecorated $ hang (text "Declaration splices are allowed only" <+> text "at the top level:") 2 (ppr d) PsErrMultipleNamesInStandaloneKindSignature vs -> mkSimpleDecorated $ vcat [ hang (text "Standalone kind signatures do not support multiple names at the moment:") 2 (pprWithCommas ppr vs) , text "See https://gitlab.haskell.org/ghc/ghc/issues/16754 for details." ] PsErrIllegalExplicitNamespace -> mkSimpleDecorated $ text "Illegal keyword 'type'" PsErrUnallowedPragma prag -> mkSimpleDecorated $ hang (text "A pragma is not allowed in this position:") 2 (ppr prag) PsErrImportPostQualified -> mkSimpleDecorated $ text "Found" <+> quotes (text "qualified") <+> text "in postpositive position. " PsErrImportQualifiedTwice -> mkSimpleDecorated $ text "Multiple occurrences of 'qualified'" PsErrIllegalImportBundleForm -> mkSimpleDecorated $ text "Illegal import form, this syntax can only be used to bundle" $+$ text "pattern synonyms with types in module exports." PsErrInvalidRuleActivationMarker -> mkSimpleDecorated $ text "Invalid rule activation marker" PsErrMissingBlock -> mkSimpleDecorated $ text "Missing block" PsErrUnsupportedBoxedSumExpr s -> mkSimpleDecorated $ hang (text "Boxed sums not supported:") 2 (pprSumOrTuple Boxed s) PsErrUnsupportedBoxedSumPat s -> mkSimpleDecorated $ hang (text "Boxed sums not supported:") 2 (pprSumOrTuple Boxed s) PsErrUnexpectedQualifiedConstructor v -> mkSimpleDecorated $ hang (text "Expected an unqualified type constructor:") 2 (ppr v) PsErrTupleSectionInPat -> mkSimpleDecorated $ text "Tuple section in pattern context" PsErrOpFewArgs _ op -> mkSimpleDecorated $ text "Operator applied to too few arguments:" <+> ppr op PsErrVarForTyCon name -> mkSimpleDecorated $ text "Expecting a type constructor but found a variable," <+> quotes (ppr name) <> text "." $$ if isSymOcc $ rdrNameOcc name then text "If" <+> quotes (ppr name) <+> text "is a type constructor" <+> text "then enable ExplicitNamespaces and use the 'type' keyword." else empty PsErrMalformedEntityString -> mkSimpleDecorated $ text "Malformed entity string" PsErrDotsInRecordUpdate -> mkSimpleDecorated $ text "You cannot use `..' in a record update" PsErrInvalidDataCon t -> mkSimpleDecorated $ hang (text "Cannot parse data constructor in a data/newtype declaration:") 2 (ppr t) PsErrInvalidInfixDataCon lhs tc rhs -> mkSimpleDecorated $ hang (text "Cannot parse an infix data constructor in a data/newtype declaration:") 2 (ppr lhs <+> ppr tc <+> ppr rhs) PsErrIllegalPromotionQuoteDataCon name -> mkSimpleDecorated $ text "Illegal promotion quote mark in the declaration of" $$ text "data/newtype constructor" <+> pprPrefixOcc name PsErrUnpackDataCon -> mkSimpleDecorated $ text "{-# UNPACK #-} cannot be applied to a data constructor." PsErrUnexpectedKindAppInDataCon lhs ki -> mkSimpleDecorated $ hang (text "Unexpected kind application in a data/newtype declaration:") 2 (ppr lhs <+> text "@" <> ppr ki) PsErrInvalidRecordCon p -> mkSimpleDecorated $ text "Not a record constructor:" <+> ppr p PsErrIllegalUnboxedStringInPat lit -> mkSimpleDecorated $ text "Illegal unboxed string literal in pattern:" $$ ppr lit PsErrIllegalUnboxedFloatingLitInPat lit -> mkSimpleDecorated $ text "Illegal unboxed floating point literal in pattern:" $$ ppr lit PsErrDoNotationInPat -> mkSimpleDecorated $ text "do-notation in pattern" PsErrIfThenElseInPat -> mkSimpleDecorated $ text "(if ... then ... else ...)-syntax in pattern" PsErrCaseInPat -> mkSimpleDecorated $ text "(case ... of ...)-syntax in pattern" PsErrLetInPat -> mkSimpleDecorated $ text "(let ... in ...)-syntax in pattern" PsErrLambdaInPat lam_variant -> mkSimpleDecorated $ text "Illegal" <+> lamCaseKeyword lam_variant <> text "-syntax in pattern" PsErrArrowExprInPat e -> mkSimpleDecorated $ text "Expression syntax in pattern:" <+> ppr e PsErrArrowCmdInPat c -> mkSimpleDecorated $ text "Command syntax in pattern:" <+> ppr c PsErrArrowCmdInExpr c -> mkSimpleDecorated $ vcat [ text "Arrow command found where an expression was expected:" , nest 2 (ppr c) ] PsErrOrPatInExpr p -> mkSimpleDecorated $ sep [ text "Or pattern in expression context:" , nest 4 (ppr p) ] PsErrCaseCmdInFunAppCmd a -> mkSimpleDecorated $ pp_unexpected_fun_app (text "case command") a PsErrLambdaCmdInFunAppCmd lam_variant a -> mkSimpleDecorated $ pp_unexpected_fun_app (lamCaseKeyword lam_variant <+> text "command") a PsErrIfCmdInFunAppCmd a -> mkSimpleDecorated $ pp_unexpected_fun_app (text "if command") a PsErrLetCmdInFunAppCmd a -> mkSimpleDecorated $ pp_unexpected_fun_app (text "let command") a PsErrDoCmdInFunAppCmd a -> mkSimpleDecorated $ pp_unexpected_fun_app (text "do command") a PsErrDoInFunAppExpr m a -> mkSimpleDecorated $ pp_unexpected_fun_app (prependQualified m (text "do block")) a PsErrMDoInFunAppExpr m a -> mkSimpleDecorated $ pp_unexpected_fun_app (prependQualified m (text "mdo block")) a PsErrCaseInFunAppExpr a -> mkSimpleDecorated $ pp_unexpected_fun_app (text "case expression") a PsErrLambdaInFunAppExpr lam_variant a -> mkSimpleDecorated $ pp_unexpected_fun_app (lamCaseKeyword lam_variant <+> text "expression") a PsErrLetInFunAppExpr a -> mkSimpleDecorated $ pp_unexpected_fun_app (text "let expression") a PsErrIfInFunAppExpr a -> mkSimpleDecorated $ pp_unexpected_fun_app (text "if expression") a PsErrProcInFunAppExpr a -> mkSimpleDecorated $ pp_unexpected_fun_app (text "proc expression") a PsErrMalformedTyOrClDecl ty -> mkSimpleDecorated $ text "Malformed head of type or class declaration:" <+> ppr ty PsErrIllegalWhereInDataDecl -> mkSimpleDecorated $ text "Illegal keyword 'where' in data declaration" PsErrIllegalDataTypeContext c -> mkSimpleDecorated $ text "Illegal datatype context:" <+> pprLHsContext (Just c) PsErrPrimStringInvalidChar -> mkSimpleDecorated $ text "primitive string literal must contain only characters <= \'\\xFF\'" PsErrSuffixAT -> mkSimpleDecorated $ text "The symbol '@' occurs as a suffix." $$ text "For an as-pattern, there must not be any whitespace surrounding '@'." PsErrPrecedenceOutOfRange i -> mkSimpleDecorated $ text "Precedence out of range: " <> int i PsErrSemiColonsInCondExpr c st t se e -> mkSimpleDecorated $ text "Unexpected semi-colons in conditional:" $$ nest 4 expr where pprOptSemi True = semi pprOptSemi False = empty expr = text "if" <+> ppr c <> pprOptSemi st <+> text "then" <+> ppr t <> pprOptSemi se <+> text "else" <+> ppr e PsErrSemiColonsInCondCmd c st t se e -> mkSimpleDecorated $ text "Unexpected semi-colons in conditional:" $$ nest 4 expr where pprOptSemi True = semi pprOptSemi False = empty expr = text "if" <+> ppr c <> pprOptSemi st <+> text "then" <+> ppr t <> pprOptSemi se <+> text "else" <+> ppr e PsErrAtInPatPos -> mkSimpleDecorated $ text "Found a binding for the" <+> quotes (text "@") <+> text "operator in a pattern position." $$ perhapsAsPat PsErrParseErrorOnInput occ -> mkSimpleDecorated $ text "parse error on input" <+> ftext (occNameFS occ) PsErrMalformedDecl what for -> mkSimpleDecorated $ text "Malformed" <+> what <+> text "declaration for" <+> quotes (ppr for) PsErrNotADataCon name -> mkSimpleDecorated $ text "Not a data constructor:" <+> quotes (ppr name) PsErrInferredTypeVarNotAllowed -> mkSimpleDecorated $ text "Inferred type variables are not allowed here" PsErrIllegalTraditionalRecordSyntax s -> mkSimpleDecorated $ text "Illegal record syntax:" <+> s PsErrParseErrorInCmd s -> mkSimpleDecorated $ hang (text "Parse error in command:") 2 s PsErrInPat s details -> let msg = parse_error_in_pat body = case details of PEIP_NegApp -> text "-" <> ppr s PEIP_TypeArgs peipd_tyargs | not (null peipd_tyargs) -> ppr s <+> vcat [ hsep (map ppr peipd_tyargs) , text "Type applications in patterns are only allowed on data constructors." ] | otherwise -> ppr s PEIP_OtherPatDetails (ParseContext (Just fun) _) -> ppr s <+> text "In a function binding for the" <+> quotes (ppr fun) <+> text "operator." $$ if opIsAt fun then perhapsAsPat else empty _ -> ppr s in mkSimpleDecorated $ msg <+> body PsErrParseRightOpSectionInPat infixOcc s -> mkSimpleDecorated $ parse_error_in_pat <+> pprInfixOcc infixOcc <> ppr s PsErrIllegalRoleName role _nearby -> mkSimpleDecorated $ text "Illegal role name" <+> quotes (ppr role) PsErrInvalidTypeSignature reason lhs -> mkSimpleDecorated $ case reason of PsErrInvalidTypeSig_DataCon -> text "Invalid data constructor" <+> quotes (ppr lhs) <+> text "in type signature" <> colon $$ text "You can only define data constructors in data type declarations." PsErrInvalidTypeSig_Qualified -> text "Invalid qualified name in type signature." PsErrInvalidTypeSig_Other -> text "Invalid type signature" <> colon $$ text "A type signature should be of form" <+> placeHolder "variables" <+> dcolon <+> placeHolder "type" <> dot where placeHolder = angleBrackets . text PsErrUnexpectedTypeInDecl t what tc tparms equals_or_where -> mkSimpleDecorated $ vcat [ text "Unexpected type" <+> quotes (ppr t) , text "In the" <+> what <+> text "declaration for" <+> quotes tc' , vcat[ (text "A" <+> what <+> text "declaration should have form") , nest 2 (what <+> tc' <+> hsep (map text (takeList tparms allNameStringList)) <+> equals_or_where) ] ] where -- Avoid printing a constraint tuple in the error message. Print -- a plain old tuple instead (since that's what the user probably -- wrote). See #14907 tc' = ppr $ filterCTuple tc PsErrInvalidPackageName pkg -> mkSimpleDecorated $ vcat [ text "Parse error" <> colon <+> quotes (ftext pkg) , text "Version number or non-alphanumeric" <+> text "character in package name" ] PsErrIllegalGadtRecordMultiplicity arr -> mkSimpleDecorated $ vcat [ text "Parse error" <> colon <+> quotes (ppr arr) , text "Record constructors in GADTs must use an ordinary, non-linear arrow." ] PsErrInvalidCApiImport {} -> mkSimpleDecorated $ vcat [ text "Wrapper stubs can't be used with CApiFFI."] PsErrMultipleConForNewtype tycon n -> mkSimpleDecorated $ vcat [ sep [ text "A newtype must have exactly one constructor," , nest 2 $ text "but" <+> quotes (ppr tycon) <+> text "has" <+> speakN n ] , text "In the newtype declaration for" <+> quotes (ppr tycon) ] PsErrUnicodeCharLooksLike bad_char looks_like_char looks_like_char_name -> mkSimpleDecorated $ hsep [ text "Unicode character" -- purposefully not using `quotes (text [bad_char])`, because the quotes function adds smart quotes, -- and smart quotes may be the topic of this error message , text "'" <> text [bad_char] <> text "' (" <> text (show bad_char) <> text ")" , text "looks like" , text "'" <> text [looks_like_char] <> text "' (" <> text looks_like_char_name <> text ")" <> comma , text "but it is not" ] PsErrInvalidPun PEP_QuoteDisambiguation -> mkSimpleDecorated $ vcat [ text "Disambiguating data constructors of tuples and lists is disabled." , text "Remove the quote to use the data constructor." ] PsErrInvalidPun PEP_TupleSyntaxType -> mkSimpleDecorated $ vcat [ text "Unboxed tuple data constructors are not supported in types." , text "Use" <+> quotes (text "Tuple# a b c ...") <+> text "to refer to the type constructor." ] PsErrInvalidPun PEP_SumSyntaxType -> mkSimpleDecorated $ vcat [ text "Unboxed sum data constructors are not supported in types." , text "Use" <+> quotes (text "Sum# a b c ...") <+> text "to refer to the type constructor." ] PsErrTypeSyntaxInPat ctx -> mkSimpleDecorated $ vcat [ text "Illegal" <+> text what <+> "in pattern:" <+> quotes ctx' , text "Type syntax in patterns isn't supported at the time"] where (what, ctx') = case ctx of PETS_FunctionArrow arg arr res -> ("function arrow", ppr arg <+> pprHsArrow arr <+> ppr res) PETS_Multiplicity tok p -> ("multiplicity annotation", ppr tok <> ppr p) PETS_ForallTelescope tele body -> ("forall telescope", pprHsForAll tele Nothing <+> ppr body) PETS_ConstraintContext ctx -> ("constraint context", ppr ctx) PsErrIllegalOrPat pat -> mkSimpleDecorated $ vcat [text "Illegal or-pattern:" <+> ppr (unLoc pat)] diagnosticReason = \case PsUnknownMessage m -> diagnosticReason m PsHeaderMessage m -> psHeaderMessageReason m PsWarnBidirectionalFormatChars{} -> WarningWithFlag Opt_WarnUnicodeBidirectionalFormatCharacters PsWarnTab{} -> WarningWithFlag Opt_WarnTabs PsWarnTransitionalLayout{} -> WarningWithFlag Opt_WarnAlternativeLayoutRuleTransitional PsWarnOperatorWhitespaceExtConflict{} -> WarningWithFlag Opt_WarnOperatorWhitespaceExtConflict PsWarnOperatorWhitespace{} -> WarningWithFlag Opt_WarnOperatorWhitespace PsWarnHaddockInvalidPos -> WarningWithFlag Opt_WarnInvalidHaddock PsWarnHaddockIgnoreMulti -> WarningWithFlag Opt_WarnInvalidHaddock PsWarnStarBinder -> WarningWithFlag Opt_WarnStarBinder PsWarnStarIsType -> WarningWithFlag Opt_WarnStarIsType PsWarnUnrecognisedPragma{} -> WarningWithFlag Opt_WarnUnrecognisedPragmas PsWarnMisplacedPragma{} -> WarningWithFlag Opt_WarnMisplacedPragmas PsWarnImportPreQualified -> WarningWithFlag Opt_WarnPrepositiveQualifiedModule PsWarnViewPatternSignatures{} -> WarningWithFlag Opt_WarnViewPatternSignatures PsErrLexer{} -> ErrorWithoutFlag PsErrCmmLexer -> ErrorWithoutFlag PsErrCmmParser{} -> ErrorWithoutFlag PsErrParse{} -> ErrorWithoutFlag PsErrTypeAppWithoutSpace{} -> ErrorWithoutFlag PsErrLazyPatWithoutSpace{} -> ErrorWithoutFlag PsErrBangPatWithoutSpace{} -> ErrorWithoutFlag PsErrInvalidInfixHole -> ErrorWithoutFlag PsErrExpectedHyphen -> ErrorWithoutFlag PsErrSpaceInSCC -> ErrorWithoutFlag PsErrEmptyDoubleQuotes{} -> ErrorWithoutFlag PsErrLambdaCase{} -> ErrorWithoutFlag PsErrEmptyLambda{} -> ErrorWithoutFlag PsErrLinearFunction{} -> ErrorWithoutFlag PsErrMultiWayIf{} -> ErrorWithoutFlag PsErrOverloadedRecordUpdateNotEnabled{} -> ErrorWithoutFlag PsErrNumUnderscores{} -> ErrorWithoutFlag PsErrIllegalBangPattern{} -> ErrorWithoutFlag PsErrOverloadedRecordDotInvalid{} -> ErrorWithoutFlag PsErrIllegalPatSynExport -> ErrorWithoutFlag PsErrOverloadedRecordUpdateNoQualifiedFields -> ErrorWithoutFlag PsErrExplicitForall{} -> ErrorWithoutFlag PsErrIllegalQualifiedDo{} -> ErrorWithoutFlag PsErrQualifiedDoInCmd{} -> ErrorWithoutFlag PsErrRecordSyntaxInPatSynDecl{} -> ErrorWithoutFlag PsErrEmptyWhereInPatSynDecl{} -> ErrorWithoutFlag PsErrInvalidWhereBindInPatSynDecl{} -> ErrorWithoutFlag PsErrNoSingleWhereBindInPatSynDecl{} -> ErrorWithoutFlag PsErrDeclSpliceNotAtTopLevel{} -> ErrorWithoutFlag PsErrMultipleNamesInStandaloneKindSignature{} -> ErrorWithoutFlag PsErrIllegalExplicitNamespace -> ErrorWithoutFlag PsErrUnallowedPragma{} -> ErrorWithoutFlag PsErrImportPostQualified -> ErrorWithoutFlag PsErrImportQualifiedTwice -> ErrorWithoutFlag PsErrIllegalImportBundleForm -> ErrorWithoutFlag PsErrInvalidRuleActivationMarker -> ErrorWithoutFlag PsErrMissingBlock -> ErrorWithoutFlag PsErrUnsupportedBoxedSumExpr{} -> ErrorWithoutFlag PsErrUnsupportedBoxedSumPat{} -> ErrorWithoutFlag PsErrUnexpectedQualifiedConstructor{} -> ErrorWithoutFlag PsErrTupleSectionInPat{} -> ErrorWithoutFlag PsErrOpFewArgs{} -> ErrorWithoutFlag PsErrVarForTyCon{} -> ErrorWithoutFlag PsErrMalformedEntityString -> ErrorWithoutFlag PsErrDotsInRecordUpdate -> ErrorWithoutFlag PsErrInvalidDataCon{} -> ErrorWithoutFlag PsErrInvalidInfixDataCon{} -> ErrorWithoutFlag PsErrIllegalPromotionQuoteDataCon{} -> ErrorWithoutFlag PsErrUnpackDataCon -> ErrorWithoutFlag PsErrUnexpectedKindAppInDataCon{} -> ErrorWithoutFlag PsErrInvalidRecordCon{} -> ErrorWithoutFlag PsErrIllegalUnboxedStringInPat{} -> ErrorWithoutFlag PsErrIllegalUnboxedFloatingLitInPat{} -> ErrorWithoutFlag PsErrDoNotationInPat{} -> ErrorWithoutFlag PsErrIfThenElseInPat -> ErrorWithoutFlag PsErrCaseInPat -> ErrorWithoutFlag PsErrLetInPat -> ErrorWithoutFlag PsErrLambdaInPat{} -> ErrorWithoutFlag PsErrArrowExprInPat{} -> ErrorWithoutFlag PsErrArrowCmdInPat{} -> ErrorWithoutFlag PsErrArrowCmdInExpr{} -> ErrorWithoutFlag PsErrOrPatInExpr{} -> ErrorWithoutFlag PsErrCaseCmdInFunAppCmd{} -> ErrorWithoutFlag PsErrLambdaCmdInFunAppCmd{} -> ErrorWithoutFlag PsErrIfCmdInFunAppCmd{} -> ErrorWithoutFlag PsErrLetCmdInFunAppCmd{} -> ErrorWithoutFlag PsErrDoCmdInFunAppCmd{} -> ErrorWithoutFlag PsErrDoInFunAppExpr{} -> ErrorWithoutFlag PsErrMDoInFunAppExpr{} -> ErrorWithoutFlag PsErrLambdaInFunAppExpr{} -> ErrorWithoutFlag PsErrCaseInFunAppExpr{} -> ErrorWithoutFlag PsErrLetInFunAppExpr{} -> ErrorWithoutFlag PsErrIfInFunAppExpr{} -> ErrorWithoutFlag PsErrProcInFunAppExpr{} -> ErrorWithoutFlag PsErrMalformedTyOrClDecl{} -> ErrorWithoutFlag PsErrIllegalWhereInDataDecl -> ErrorWithoutFlag PsErrIllegalDataTypeContext{} -> ErrorWithoutFlag PsErrPrimStringInvalidChar -> ErrorWithoutFlag PsErrSuffixAT -> ErrorWithoutFlag PsErrPrecedenceOutOfRange{} -> ErrorWithoutFlag PsErrSemiColonsInCondExpr{} -> ErrorWithoutFlag PsErrSemiColonsInCondCmd{} -> ErrorWithoutFlag PsErrAtInPatPos -> ErrorWithoutFlag PsErrParseErrorOnInput{} -> ErrorWithoutFlag PsErrMalformedDecl{} -> ErrorWithoutFlag PsErrNotADataCon{} -> ErrorWithoutFlag PsErrInferredTypeVarNotAllowed -> ErrorWithoutFlag PsErrIllegalTraditionalRecordSyntax{} -> ErrorWithoutFlag PsErrParseErrorInCmd{} -> ErrorWithoutFlag PsErrInPat{} -> ErrorWithoutFlag PsErrIllegalRoleName{} -> ErrorWithoutFlag PsErrInvalidTypeSignature{} -> ErrorWithoutFlag PsErrUnexpectedTypeInDecl{} -> ErrorWithoutFlag PsErrInvalidPackageName{} -> ErrorWithoutFlag PsErrParseRightOpSectionInPat{} -> ErrorWithoutFlag PsErrIllegalGadtRecordMultiplicity{} -> ErrorWithoutFlag PsErrInvalidCApiImport {} -> ErrorWithoutFlag PsErrMultipleConForNewtype {} -> ErrorWithoutFlag PsErrUnicodeCharLooksLike{} -> ErrorWithoutFlag PsErrInvalidPun {} -> ErrorWithoutFlag PsErrIllegalOrPat{} -> ErrorWithoutFlag PsErrTypeSyntaxInPat{} -> ErrorWithoutFlag diagnosticHints = \case PsUnknownMessage m -> diagnosticHints m PsHeaderMessage m -> psHeaderMessageHints m PsWarnBidirectionalFormatChars{} -> noHints PsWarnTab{} -> [SuggestUseSpaces] PsWarnTransitionalLayout{} -> noHints PsWarnOperatorWhitespaceExtConflict sym -> [SuggestUseWhitespaceAfter sym] PsWarnOperatorWhitespace sym occ -> [SuggestUseWhitespaceAround (unpackFS sym) occ] PsWarnHaddockInvalidPos -> noHints PsWarnHaddockIgnoreMulti -> noHints PsWarnStarBinder -> [SuggestQualifyStarOperator] PsWarnStarIsType -> [SuggestUseTypeFromDataKind Nothing] PsWarnUnrecognisedPragma "" _ -> noHints PsWarnUnrecognisedPragma p avail -> let suggestions = fuzzyMatch p avail in if null suggestions then noHints else [SuggestCorrectPragmaName suggestions] PsWarnMisplacedPragma{} -> [SuggestPlacePragmaInHeader] PsWarnImportPreQualified -> [ SuggestQualifiedAfterModuleName , suggestExtension LangExt.ImportQualifiedPost] PsWarnViewPatternSignatures{} -> [SuggestParenthesizePatternRHS] PsErrLexer{} -> noHints PsErrCmmLexer -> noHints PsErrCmmParser{} -> noHints PsErrParse token PsErrParseDetails{..} -> case token of "" -> [] "$" | not ped_th_enabled -> [suggestExtension LangExt.TemplateHaskell] -- #7396 "$$" | not ped_th_enabled -> [suggestExtension LangExt.TemplateHaskell] -- #20157 "<-" | ped_mdo_in_last_100 -> [suggestExtension LangExt.RecursiveDo] | otherwise -> [SuggestMissingDo] "=" | ped_do_in_last_100 -> [SuggestLetInDo] -- #15849 _ | not ped_pat_syn_enabled , ped_pattern_parsed -> [suggestExtension LangExt.PatternSynonyms] -- #12429 | otherwise -> [] PsErrTypeAppWithoutSpace{} -> noHints PsErrLazyPatWithoutSpace{} -> noHints PsErrBangPatWithoutSpace{} -> noHints PsErrInvalidInfixHole -> noHints PsErrExpectedHyphen -> noHints PsErrSpaceInSCC -> noHints PsErrEmptyDoubleQuotes th_on | th_on -> [SuggestThQuotationSyntax] | otherwise -> noHints PsErrLambdaCase{} -> [suggestExtension LangExt.LambdaCase] PsErrEmptyLambda{} -> noHints PsErrLinearFunction{} -> [suggestExtension LangExt.LinearTypes] PsErrMultiWayIf{} -> [suggestExtension LangExt.MultiWayIf] PsErrOverloadedRecordUpdateNotEnabled{} -> [suggestExtension LangExt.OverloadedRecordUpdate] PsErrNumUnderscores{} -> [suggestExtension LangExt.NumericUnderscores] PsErrIllegalBangPattern{} -> [suggestExtension LangExt.BangPatterns] PsErrOverloadedRecordDotInvalid{} -> noHints PsErrIllegalPatSynExport -> [suggestExtension LangExt.PatternSynonyms] PsErrOverloadedRecordUpdateNoQualifiedFields -> noHints PsErrExplicitForall is_unicode -> [useExtensionInOrderTo info LangExt.ExplicitForAll] where info = "to enable syntax:" <+> forallSym is_unicode <+> angleBrackets "tvs" <> dot <+> angleBrackets "type" PsErrIllegalQualifiedDo{} -> [suggestExtension LangExt.QualifiedDo] PsErrQualifiedDoInCmd{} -> noHints PsErrRecordSyntaxInPatSynDecl{} -> noHints PsErrEmptyWhereInPatSynDecl{} -> noHints PsErrInvalidWhereBindInPatSynDecl{} -> noHints PsErrNoSingleWhereBindInPatSynDecl{} -> noHints PsErrDeclSpliceNotAtTopLevel{} -> noHints PsErrMultipleNamesInStandaloneKindSignature{} -> noHints PsErrIllegalExplicitNamespace -> [suggestExtension LangExt.ExplicitNamespaces] PsErrUnallowedPragma{} -> noHints PsErrImportPostQualified -> [suggestExtension LangExt.ImportQualifiedPost] PsErrImportQualifiedTwice -> noHints PsErrIllegalImportBundleForm -> noHints PsErrInvalidRuleActivationMarker -> noHints PsErrMissingBlock -> noHints PsErrUnsupportedBoxedSumExpr{} -> noHints PsErrUnsupportedBoxedSumPat{} -> noHints PsErrUnexpectedQualifiedConstructor{} -> noHints PsErrTupleSectionInPat{} -> noHints PsErrOpFewArgs star_is_type op -> noStarIsTypeHints star_is_type op PsErrVarForTyCon{} -> noHints PsErrMalformedEntityString -> noHints PsErrDotsInRecordUpdate -> noHints PsErrInvalidDataCon{} -> noHints PsErrInvalidInfixDataCon{} -> noHints PsErrIllegalPromotionQuoteDataCon{} -> noHints PsErrUnpackDataCon -> noHints PsErrUnexpectedKindAppInDataCon{} -> noHints PsErrInvalidRecordCon{} -> noHints PsErrIllegalUnboxedStringInPat{} -> noHints PsErrIllegalUnboxedFloatingLitInPat{} -> noHints PsErrDoNotationInPat{} -> noHints PsErrIfThenElseInPat -> noHints PsErrCaseInPat -> noHints PsErrLetInPat -> noHints PsErrLambdaInPat{} -> noHints PsErrArrowExprInPat{} -> noHints PsErrArrowCmdInPat{} -> noHints PsErrArrowCmdInExpr{} -> noHints PsErrOrPatInExpr{} -> noHints PsErrLambdaCmdInFunAppCmd{} -> suggestParensAndBlockArgs PsErrCaseCmdInFunAppCmd{} -> suggestParensAndBlockArgs PsErrIfCmdInFunAppCmd{} -> suggestParensAndBlockArgs PsErrLetCmdInFunAppCmd{} -> suggestParensAndBlockArgs PsErrDoCmdInFunAppCmd{} -> suggestParensAndBlockArgs PsErrDoInFunAppExpr{} -> suggestParensAndBlockArgs PsErrMDoInFunAppExpr{} -> suggestParensAndBlockArgs PsErrLambdaInFunAppExpr{} -> suggestParensAndBlockArgs PsErrCaseInFunAppExpr{} -> suggestParensAndBlockArgs PsErrLetInFunAppExpr{} -> suggestParensAndBlockArgs PsErrIfInFunAppExpr{} -> suggestParensAndBlockArgs PsErrProcInFunAppExpr{} -> suggestParensAndBlockArgs PsErrMalformedTyOrClDecl{} -> noHints PsErrIllegalWhereInDataDecl -> [useExtensionInOrderTo "to enable syntax: data T where" LangExt.GADTSyntax] PsErrIllegalDataTypeContext{} -> [suggestExtension LangExt.DatatypeContexts] PsErrPrimStringInvalidChar -> noHints PsErrSuffixAT -> noHints PsErrPrecedenceOutOfRange{} -> noHints PsErrSemiColonsInCondExpr{} -> [suggestExtension LangExt.DoAndIfThenElse] PsErrSemiColonsInCondCmd{} -> [suggestExtension LangExt.DoAndIfThenElse] PsErrAtInPatPos -> noHints PsErrParseErrorOnInput{} -> noHints PsErrMalformedDecl{} -> noHints PsErrNotADataCon{} -> noHints PsErrInferredTypeVarNotAllowed -> noHints PsErrIllegalTraditionalRecordSyntax{} -> [suggestExtension LangExt.TraditionalRecordSyntax] PsErrParseErrorInCmd{} -> noHints PsErrInPat _ details -> case details of PEIP_RecPattern args YesPatIsRecursive ctx | length args /= 0 -> catMaybes [sug_recdo, sug_missingdo ctx] | otherwise -> catMaybes [sug_missingdo ctx] PEIP_OtherPatDetails ctx -> catMaybes [sug_missingdo ctx] _ -> [] where sug_recdo = Just (suggestExtension LangExt.RecursiveDo) sug_missingdo (ParseContext _ YesIncompleteDoBlock) = Just SuggestMissingDo sug_missingdo _ = Nothing PsErrParseRightOpSectionInPat{} -> noHints PsErrIllegalRoleName _ nearby -> [SuggestRoles nearby] PsErrInvalidTypeSignature reason lhs -> if | foreign_RDR `looks_like` lhs -> [suggestExtension LangExt.ForeignFunctionInterface] | default_RDR `looks_like` lhs -> [suggestExtension LangExt.DefaultSignatures] | pattern_RDR `looks_like` lhs -> [suggestExtension LangExt.PatternSynonyms] | PsErrInvalidTypeSig_Qualified <- reason -> [SuggestTypeSignatureRemoveQualifier] | otherwise -> [] where -- A common error is to forget the ForeignFunctionInterface flag -- so check for that, and suggest. cf #3805 -- Sadly 'foreign import' still barfs 'parse error' because -- 'import' is a keyword -- looks_like :: RdrName -> LHsExpr GhcPsErr -> Bool -- AZ looks_like s (L _ (HsVar _ (L _ v))) = v == s looks_like s (L _ (HsApp _ lhs _)) = looks_like s lhs looks_like _ _ = False foreign_RDR = mkUnqual varName (fsLit "foreign") default_RDR = mkUnqual varName (fsLit "default") pattern_RDR = mkUnqual varName (fsLit "pattern") PsErrUnexpectedTypeInDecl{} -> noHints PsErrInvalidPackageName{} -> noHints PsErrIllegalGadtRecordMultiplicity{} -> noHints PsErrInvalidCApiImport {} -> noHints PsErrMultipleConForNewtype {} -> noHints PsErrUnicodeCharLooksLike{} -> noHints PsErrInvalidPun {} -> [suggestExtension LangExt.ListTuplePuns] PsErrIllegalOrPat{} -> [suggestExtension LangExt.OrPatterns] PsErrTypeSyntaxInPat{} -> noHints diagnosticCode = constructorCode psHeaderMessageDiagnostic :: PsHeaderMessage -> DecoratedSDoc psHeaderMessageDiagnostic = \case PsErrParseLanguagePragma -> mkSimpleDecorated $ vcat [ text "Cannot parse LANGUAGE pragma" , text "Expecting comma-separated list of language options," , text "each starting with a capital letter" , nest 2 (text "E.g. {-# LANGUAGE TemplateHaskell, GADTs #-}") ] PsErrUnsupportedExt unsup _ -> mkSimpleDecorated $ text "Unsupported extension: " <> text unsup PsErrParseOptionsPragma str -> mkSimpleDecorated $ vcat [ text "Error while parsing OPTIONS_GHC pragma." , text "Expecting whitespace-separated list of GHC options." , text " E.g. {-# OPTIONS_GHC -Wall -O2 #-}" , text ("Input was: " ++ show str) ] PsErrUnknownOptionsPragma flag -> mkSimpleDecorated $ text "Unknown flag in {-# OPTIONS_GHC #-} pragma:" <+> text flag psHeaderMessageReason :: PsHeaderMessage -> DiagnosticReason psHeaderMessageReason = \case PsErrParseLanguagePragma -> ErrorWithoutFlag PsErrUnsupportedExt{} -> ErrorWithoutFlag PsErrParseOptionsPragma{} -> ErrorWithoutFlag PsErrUnknownOptionsPragma{} -> ErrorWithoutFlag psHeaderMessageHints :: PsHeaderMessage -> [GhcHint] psHeaderMessageHints = \case PsErrParseLanguagePragma -> noHints PsErrUnsupportedExt unsup supported -> if null suggestions then noHints -- FIXME(adn) To fix the compiler crash in #19923 we just rewrap this into an -- UnknownHint, but we should have here a proper hint, but that would require -- changing 'supportedExtensions' to emit a list of 'Extension'. else [UnknownHint $ text "Perhaps you meant" <+> quotedListWithOr (map text suggestions)] where suggestions :: [String] suggestions = fuzzyMatch unsup supported PsErrParseOptionsPragma{} -> noHints PsErrUnknownOptionsPragma{} -> noHints suggestParensAndBlockArgs :: [GhcHint] suggestParensAndBlockArgs = [SuggestParentheses, suggestExtension LangExt.BlockArguments] pp_unexpected_fun_app :: Outputable a => SDoc -> a -> SDoc pp_unexpected_fun_app e a = text "Unexpected " <> e <> text " in function application:" $$ nest 4 (ppr a) parse_error_in_pat :: SDoc parse_error_in_pat = text "Parse error in pattern:" forallSym :: Bool -> SDoc forallSym True = text "∀" forallSym False = text "forall" pprFileHeaderPragmaType :: FileHeaderPragmaType -> SDoc pprFileHeaderPragmaType OptionsPrag = text "OPTIONS" pprFileHeaderPragmaType IncludePrag = text "INCLUDE" pprFileHeaderPragmaType LanguagePrag = text "LANGUAGE" pprFileHeaderPragmaType DocOptionsPrag = text "OPTIONS_HADDOCK" ghc-lib-parser-9.12.2.20250421/compiler/GHC/Parser/Errors/Types.hs0000644000000000000000000004577307346545000022076 0ustar0000000000000000{-# LANGUAGE DeriveGeneric #-} {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE DataKinds #-} module GHC.Parser.Errors.Types where import GHC.Prelude import GHC.Core.TyCon (Role) import GHC.Data.FastString import GHC.Hs import GHC.Parser.Types import GHC.Parser.Errors.Basic import GHC.Types.Error import GHC.Types.Hint import GHC.Types.Name.Occurrence (OccName) import GHC.Types.Name.Reader import Data.List.NonEmpty (NonEmpty) import GHC.Types.SrcLoc (PsLoc) import GHC.Generics ( Generic ) -- The type aliases below are useful to make some type signatures a bit more -- descriptive, like 'handleWarningsThrowErrors' in 'GHC.Driver.Main'. type PsWarning = PsMessage -- /INVARIANT/: The diagnosticReason is a Warning reason type PsError = PsMessage -- /INVARIANT/: The diagnosticReason is ErrorWithoutFlag {- Note [Messages from GHC.Parser.Header ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We group the messages from 'GHC.Parser.Header' because we need to be able to pattern match on them in the driver code. This is because in functions like 'GHC.Driver.Pipeline.preprocess' we want to handle only a specific subset of parser messages, during dependency analysis, and having a single constructor to handle them all is handy. -} data PsHeaderMessage = PsErrParseLanguagePragma | PsErrUnsupportedExt !String ![String] | PsErrParseOptionsPragma !String {-| PsErrUnsupportedOptionsPragma is an error that occurs when an unknown OPTIONS_GHC pragma is supplied is found. Example(s): {-# OPTIONS_GHC foo #-} Test case(s): tests/safeHaskell/flags/SafeFlags28 tests/safeHaskell/flags/SafeFlags19 tests/safeHaskell/flags/SafeFlags29 tests/parser/should_fail/T19923c tests/parser/should_fail/T19923b tests/parser/should_fail/readFail044 tests/driver/T2499 -} | PsErrUnknownOptionsPragma !String deriving Generic data PsMessage = {-| An \"unknown\" message from the parser. This type constructor allows arbitrary messages to be embedded. The typical use case would be GHC plugins willing to emit custom diagnostics. -} PsUnknownMessage (UnknownDiagnostic (DiagnosticOpts PsMessage)) {-| A group of parser messages emitted in 'GHC.Parser.Header'. See Note [Messages from GHC.Parser.Header]. -} | PsHeaderMessage !PsHeaderMessage {-| PsWarnBidirectionalFormatChars is a warning (controlled by the -Wwarn-bidirectional-format-characters flag) that occurs when unicode bi-directional format characters are found within in a file The 'PsLoc' contains the exact position in the buffer the character occurred, and the string contains a description of the character. -} | PsWarnBidirectionalFormatChars (NonEmpty (PsLoc, Char, String)) {-| PsWarnTab is a warning (controlled by the -Wwarn-tabs flag) that occurs when tabulations (tabs) are found within a file. Test case(s): parser/should_fail/T12610 parser/should_compile/T9723b parser/should_compile/T9723a parser/should_compile/read043 parser/should_fail/T16270 warnings/should_compile/T9230 -} | PsWarnTab !Word -- ^ Number of other occurrences other than the first one {-| PsWarnTransitionalLayout is a warning (controlled by the -Walternative-layout-rule-transitional flag) that occurs when pipes ('|') or 'where' are at the same depth of an implicit layout block. Example(s): f :: IO () f | True = do let x = () y = () return () | True = return () Test case(s): layout/layout006 layout/layout003 layout/layout001 -} | PsWarnTransitionalLayout !TransLayoutReason -- | Unrecognised pragma. First field is the actual pragma name which -- might be empty. Second field is the set of valid candidate pragmas. | PsWarnUnrecognisedPragma !String ![String] | PsWarnMisplacedPragma !FileHeaderPragmaType -- | Invalid Haddock comment position | PsWarnHaddockInvalidPos -- | Multiple Haddock comment for the same entity | PsWarnHaddockIgnoreMulti -- | Found binding occurrence of "*" while StarIsType is enabled | PsWarnStarBinder -- | Using "*" for "Type" without StarIsType enabled | PsWarnStarIsType -- | Pre qualified import with 'WarnPrepositiveQualifiedModule' enabled | PsWarnImportPreQualified | PsWarnOperatorWhitespaceExtConflict !OperatorWhitespaceSymbol | PsWarnOperatorWhitespace !FastString !OperatorWhitespaceOccurrence {- | PsWarnViewPatternSignatures is a warning triggered by view patterns whose RHS is an unparenthesised pattern signature. It warns on code that is highly likely to break when the precedence of view patterns relative to pattern signatures is changed per GHC Proposal #281. The suggested fix is to add parentheses. Example: f1 (isJust -> True :: Bool) = () Suggested fix: f1 (isJust -> (True :: Bool)) = () Test cases: T24159_viewpat -} | PsWarnViewPatternSignatures !(LPat GhcPs) !(LPat GhcPs) -- | LambdaCase syntax used without the extension enabled | PsErrLambdaCase -- | A lambda requires at least one parameter | PsErrEmptyLambda -- | Underscores in literals without the extension enabled | PsErrNumUnderscores !NumUnderscoreReason -- | Invalid character in primitive string | PsErrPrimStringInvalidChar -- | Missing block | PsErrMissingBlock -- | Lexer error | PsErrLexer !LexErr !LexErrKind -- | Suffix occurrence of `@` | PsErrSuffixAT -- | Parse errors | PsErrParse !String !PsErrParseDetails -- | Cmm lexer error | PsErrCmmLexer -- | Unsupported boxed sum in expression | PsErrUnsupportedBoxedSumExpr !(SumOrTuple (HsExpr GhcPs)) -- | Unsupported boxed sum in pattern | PsErrUnsupportedBoxedSumPat !(SumOrTuple (PatBuilder GhcPs)) -- | Unexpected qualified constructor | PsErrUnexpectedQualifiedConstructor !RdrName -- | Tuple section in pattern context | PsErrTupleSectionInPat -- | Bang-pattern without BangPattterns enabled | PsErrIllegalBangPattern !(Pat GhcPs) -- | Operator applied to too few arguments | PsErrOpFewArgs !StarIsType !RdrName -- | Import: multiple occurrences of 'qualified' | PsErrImportQualifiedTwice -- | Post qualified import without 'ImportQualifiedPost' | PsErrImportPostQualified -- | Explicit namespace keyword without 'ExplicitNamespaces' | PsErrIllegalExplicitNamespace -- | Expecting a type constructor but found a variable | PsErrVarForTyCon !RdrName -- | Illegal export form allowed by PatternSynonyms | PsErrIllegalPatSynExport -- | Malformed entity string | PsErrMalformedEntityString -- | Dots used in record update | PsErrDotsInRecordUpdate -- | Precedence out of range | PsErrPrecedenceOutOfRange !Int -- | Invalid use of record dot syntax `.' | PsErrOverloadedRecordDotInvalid -- | `OverloadedRecordUpdate` is not enabled. | PsErrOverloadedRecordUpdateNotEnabled -- | Can't use qualified fields when OverloadedRecordUpdate is enabled. | PsErrOverloadedRecordUpdateNoQualifiedFields -- | Cannot parse data constructor in a data/newtype declaration | PsErrInvalidDataCon !(HsType GhcPs) -- | Cannot parse data constructor in a data/newtype declaration | PsErrInvalidInfixDataCon !(HsType GhcPs) !RdrName !(HsType GhcPs) -- | Illegal DataKinds quote mark in data/newtype constructor declaration | PsErrIllegalPromotionQuoteDataCon !RdrName -- | UNPACK applied to a data constructor | PsErrUnpackDataCon -- | Unexpected kind application in data/newtype declaration | PsErrUnexpectedKindAppInDataCon !DataConBuilder !(HsType GhcPs) -- | Not a record constructor | PsErrInvalidRecordCon !(PatBuilder GhcPs) -- | Illegal unboxed string literal in pattern | PsErrIllegalUnboxedStringInPat !(HsLit GhcPs) -- | Illegal primitive floating point literal in pattern | PsErrIllegalUnboxedFloatingLitInPat !(HsLit GhcPs) -- | Do-notation in pattern | PsErrDoNotationInPat -- | If-then-else syntax in pattern | PsErrIfThenElseInPat -- | Lambda or Lambda-case in pattern | PsErrLambdaInPat HsLamVariant -- | case..of in pattern | PsErrCaseInPat -- | let-syntax in pattern | PsErrLetInPat -- | Arrow expression-syntax in pattern | PsErrArrowExprInPat !(HsExpr GhcPs) -- | Arrow command-syntax in pattern | PsErrArrowCmdInPat !(HsCmd GhcPs) -- | Arrow command-syntax in expression | PsErrArrowCmdInExpr !(HsCmd GhcPs) -- | Or-pattern in expression | PsErrOrPatInExpr !(LPat GhcPs) -- | Type-application without space before '@' | PsErrTypeAppWithoutSpace !RdrName !(LHsExpr GhcPs) -- | Lazy-pattern ('~') without space after it | PsErrLazyPatWithoutSpace !(LHsExpr GhcPs) -- | Bang-pattern ('!') without space after it | PsErrBangPatWithoutSpace !(LHsExpr GhcPs) -- | Pragma not allowed in this position | PsErrUnallowedPragma !(HsPragE GhcPs) -- | Qualified do block in command | PsErrQualifiedDoInCmd !ModuleName -- | Invalid infix hole, expected an infix operator | PsErrInvalidInfixHole -- | Unexpected semi-colons in conditional expression | PsErrSemiColonsInCondExpr !(HsExpr GhcPs) -- ^ conditional expr !Bool -- ^ "then" semi-colon? !(HsExpr GhcPs) -- ^ "then" expr !Bool -- ^ "else" semi-colon? !(HsExpr GhcPs) -- ^ "else" expr -- | Unexpected semi-colons in conditional command | PsErrSemiColonsInCondCmd !(HsExpr GhcPs) -- ^ conditional expr !Bool -- ^ "then" semi-colon? !(HsCmd GhcPs) -- ^ "then" expr !Bool -- ^ "else" semi-colon? !(HsCmd GhcPs) -- ^ "else" expr -- | @-operator in a pattern position | PsErrAtInPatPos -- | Unexpected case command in function application | PsErrCaseCmdInFunAppCmd !(LHsCmd GhcPs) -- | Unexpected lambda or \case(s) command in function application | PsErrLambdaCmdInFunAppCmd !HsLamVariant !(LHsCmd GhcPs) -- | Unexpected if command in function application | PsErrIfCmdInFunAppCmd !(LHsCmd GhcPs) -- | Unexpected let command in function application | PsErrLetCmdInFunAppCmd !(LHsCmd GhcPs) -- | Unexpected do command in function application | PsErrDoCmdInFunAppCmd !(LHsCmd GhcPs) -- | Unexpected do block in function application | PsErrDoInFunAppExpr !(Maybe ModuleName) !(LHsExpr GhcPs) -- | Unexpected mdo block in function application | PsErrMDoInFunAppExpr !(Maybe ModuleName) !(LHsExpr GhcPs) -- | Unexpected case expression in function application | PsErrCaseInFunAppExpr !(LHsExpr GhcPs) -- | Unexpected lambda or \case(s) expression in function application | PsErrLambdaInFunAppExpr !HsLamVariant !(LHsExpr GhcPs) -- | Unexpected let expression in function application | PsErrLetInFunAppExpr !(LHsExpr GhcPs) -- | Unexpected if expression in function application | PsErrIfInFunAppExpr !(LHsExpr GhcPs) -- | Unexpected proc expression in function application | PsErrProcInFunAppExpr !(LHsExpr GhcPs) -- | Malformed head of type or class declaration | PsErrMalformedTyOrClDecl !(LHsType GhcPs) -- | Illegal 'where' keyword in data declaration | PsErrIllegalWhereInDataDecl -- | Illegal datatype context | PsErrIllegalDataTypeContext !(LHsContext GhcPs) -- | Parse error on input | PsErrParseErrorOnInput !OccName -- | Malformed ... declaration for ... | PsErrMalformedDecl !SDoc !RdrName -- | Not a data constructor | PsErrNotADataCon !RdrName -- | Record syntax used in pattern synonym declaration | PsErrRecordSyntaxInPatSynDecl !(LPat GhcPs) -- | Empty 'where' clause in pattern-synonym declaration | PsErrEmptyWhereInPatSynDecl !RdrName -- | Invalid binding name in 'where' clause of pattern-synonym declaration | PsErrInvalidWhereBindInPatSynDecl !RdrName !(HsDecl GhcPs) -- | Multiple bindings in 'where' clause of pattern-synonym declaration | PsErrNoSingleWhereBindInPatSynDecl !RdrName !(HsDecl GhcPs) -- | Declaration splice not a top-level | PsErrDeclSpliceNotAtTopLevel !(SpliceDecl GhcPs) -- | Inferred type variables not allowed here | PsErrInferredTypeVarNotAllowed -- | Multiple names in standalone kind signatures | PsErrMultipleNamesInStandaloneKindSignature [LIdP GhcPs] -- | Illegal import bundle form | PsErrIllegalImportBundleForm -- | Illegal role name | PsErrIllegalRoleName !FastString [Role] -- | Invalid type signature | PsErrInvalidTypeSignature !PsInvalidTypeSignature !(LHsExpr GhcPs) -- | Unexpected type in declaration | PsErrUnexpectedTypeInDecl !(LHsType GhcPs) !SDoc !RdrName [LHsTypeArg GhcPs] !SDoc -- | Expected a hyphen | PsErrExpectedHyphen -- | Found a space in a SCC | PsErrSpaceInSCC -- | Found two single quotes | PsErrEmptyDoubleQuotes !Bool -- ^ Is TH on? -- | Invalid package name | PsErrInvalidPackageName !FastString -- | Invalid rule activation marker | PsErrInvalidRuleActivationMarker -- | Linear function found but LinearTypes not enabled | PsErrLinearFunction -- | Multi-way if-expression found but MultiWayIf not enabled | PsErrMultiWayIf -- | Explicit forall found but no extension allowing it is enabled | PsErrExplicitForall !Bool -- ^ is Unicode forall? -- | Found qualified-do without QualifiedDo enabled | PsErrIllegalQualifiedDo !SDoc -- | Cmm parser error | PsErrCmmParser !CmmParserError -- | Illegal traditional record syntax -- -- TODO: distinguish errors without using SDoc | PsErrIllegalTraditionalRecordSyntax !SDoc -- | Parse error in command -- -- TODO: distinguish errors without using SDoc | PsErrParseErrorInCmd !SDoc -- | Parse error in pattern | PsErrInPat !(PatBuilder GhcPs) !PsErrInPatDetails -- | Parse error in right operator section pattern -- TODO: embed the proper operator, if possible | PsErrParseRightOpSectionInPat !RdrName !(PatBuilder GhcPs) -- | Illegal linear arrow or multiplicity annotation in GADT record syntax | PsErrIllegalGadtRecordMultiplicity !(HsArrow GhcPs) | PsErrInvalidCApiImport | PsErrMultipleConForNewtype !RdrName !Int | PsErrUnicodeCharLooksLike Char -- ^ the problematic character Char -- ^ the character it looks like String -- ^ the name of the character that it looks like | PsErrInvalidPun !PsErrPunDetails -- | Or pattern used without -XOrPatterns | PsErrIllegalOrPat (LPat GhcPs) -- | Temporary error until GHC gains support for type syntax in patterns. -- Test cases: T24159_pat_parse_error_1 -- T24159_pat_parse_error_2 -- T24159_pat_parse_error_3 -- T24159_pat_parse_error_4 -- T24159_pat_parse_error_5 -- T24159_pat_parse_error_6 | PsErrTypeSyntaxInPat !PsErrTypeSyntaxDetails deriving Generic -- | Extra details about a parse error, which helps -- us in determining which should be the hints to -- suggest. data PsErrParseDetails = PsErrParseDetails { ped_th_enabled :: !Bool -- Is 'TemplateHaskell' enabled? , ped_do_in_last_100 :: !Bool -- ^ Is there a 'do' in the last 100 characters? , ped_mdo_in_last_100 :: !Bool -- ^ Is there an 'mdo' in the last 100 characters? , ped_pat_syn_enabled :: !Bool -- ^ Is 'PatternSynonyms' enabled? , ped_pattern_parsed :: !Bool -- ^ Did we parse a \"pattern\" keyword? } data PsInvalidTypeSignature = PsErrInvalidTypeSig_Qualified | PsErrInvalidTypeSig_DataCon | PsErrInvalidTypeSig_Other -- | Is the parsed pattern recursive? data PatIsRecursive = YesPatIsRecursive | NoPatIsRecursive data PatIncompleteDoBlock = YesIncompleteDoBlock | NoIncompleteDoBlock deriving Eq -- | Extra information for the expression GHC is currently inspecting/parsing. -- It can be used to generate more informative parser diagnostics and hints. data ParseContext = ParseContext { is_infix :: !(Maybe RdrName) -- ^ If 'Just', this is an infix -- pattern with the bound operator name , incomplete_do_block :: !PatIncompleteDoBlock -- ^ Did the parser likely fail due to an incomplete do block? } deriving Eq data PsErrInPatDetails = PEIP_NegApp -- ^ Negative application pattern? | PEIP_TypeArgs [HsConPatTyArg GhcPs] -- ^ The list of type arguments for the pattern | PEIP_RecPattern [LPat GhcPs] -- ^ The pattern arguments !PatIsRecursive -- ^ Is the parsed pattern recursive? !ParseContext | PEIP_OtherPatDetails !ParseContext data PsErrPunDetails = PEP_QuoteDisambiguation | PEP_TupleSyntaxType | PEP_SumSyntaxType data PsErrTypeSyntaxDetails = PETS_FunctionArrow !(LocatedA (PatBuilder GhcPs)) !(HsArrowOf (LocatedA (PatBuilder GhcPs)) GhcPs) !(LocatedA (PatBuilder GhcPs)) | PETS_Multiplicity !(EpToken "%") !(LocatedA (PatBuilder GhcPs)) | PETS_ForallTelescope !(HsForAllTelescope GhcPs) !(LocatedA (PatBuilder GhcPs)) | PETS_ConstraintContext !(LocatedA (PatBuilder GhcPs)) noParseContext :: ParseContext noParseContext = ParseContext Nothing NoIncompleteDoBlock incompleteDoBlock :: ParseContext incompleteDoBlock = ParseContext Nothing YesIncompleteDoBlock -- | Builds a 'PsErrInPatDetails' with the information provided by the 'ParseContext'. fromParseContext :: ParseContext -> PsErrInPatDetails fromParseContext = PEIP_OtherPatDetails data NumUnderscoreReason = NumUnderscore_Integral | NumUnderscore_Float deriving (Show,Eq,Ord) data LexErrKind = LexErrKind_EOF -- ^ End of input | LexErrKind_UTF8 -- ^ UTF-8 decoding error | LexErrKind_Char !Char -- ^ Error at given character deriving (Show,Eq,Ord) data LexErr = LexError -- ^ Lexical error | LexUnknownPragma -- ^ Unknown pragma | LexErrorInPragma -- ^ Lexical error in pragma | LexNumEscapeRange -- ^ Numeric escape sequence out of range | LexUnterminatedComment -- ^ Unterminated `{-' | LexUnterminatedOptions -- ^ Unterminated OPTIONS pragma | LexUnterminatedQQ -- ^ Unterminated quasiquotation -- | Errors from the Cmm parser data CmmParserError = CmmUnknownPrimitive !FastString -- ^ Unknown Cmm primitive | CmmUnknownMacro !FastString -- ^ Unknown macro | CmmUnknownCConv !String -- ^ Unknown calling convention | CmmUnrecognisedSafety !String -- ^ Unrecognised safety | CmmUnrecognisedHint !String -- ^ Unrecognised hint data TransLayoutReason = TransLayout_Where -- ^ "`where' clause at the same depth as implicit layout block" | TransLayout_Pipe -- ^ "`|' at the same depth as implicit layout block") data FileHeaderPragmaType = OptionsPrag | IncludePrag | LanguagePrag | DocOptionsPrag ghc-lib-parser-9.12.2.20250421/compiler/GHC/Parser/HaddockLex.x0000644000000000000000000001574407346545000021354 0ustar0000000000000000{ {-# OPTIONS_GHC -funbox-strict-fields #-} module GHC.Parser.HaddockLex (lexHsDoc, lexStringLiteral) where import GHC.Prelude import GHC.Data.FastString import GHC.Hs.Doc import GHC.Parser.Lexer import GHC.Parser.Annotation import GHC.Types.SrcLoc import GHC.Types.SourceText import GHC.Data.StringBuffer import qualified GHC.Data.Strict as Strict import GHC.Types.Name.Reader import GHC.Utils.Error import GHC.Utils.Encoding import GHC.Hs.Extension import qualified GHC.Data.EnumSet as EnumSet import Data.Maybe import Data.Word import Data.ByteString ( ByteString ) import qualified Data.ByteString as BS import qualified GHC.LanguageExtensions as LangExt } -- ----------------------------------------------------------------------------- -- Alex "Character set macros" -- Copied from GHC/Parser/Lexer.x -- NB: The logic behind these definitions is also reflected in "GHC.Utils.Lexeme" -- Any changes here should likely be reflected there. $unispace = \x05 -- Trick Alex into handling Unicode. See Note [Unicode in Alex]. $nl = [\n\r\f] $whitechar = [$nl\v\ $unispace] $white_no_nl = $whitechar # \n -- TODO #8424 $tab = \t $ascdigit = 0-9 $unidigit = \x03 -- Trick Alex into handling Unicode. See Note [Unicode in Alex]. $decdigit = $ascdigit -- exactly $ascdigit, no more no less. $digit = [$ascdigit $unidigit] $special = [\(\)\,\;\[\]\`\{\}] $ascsymbol = [\!\#\$\%\&\*\+\.\/\<\=\>\?\@\\\^\|\-\~\:] $unisymbol = \x04 -- Trick Alex into handling Unicode. See Note [Unicode in Alex]. $symbol = [$ascsymbol $unisymbol] # [$special \_\"\'] $unilarge = \x01 -- Trick Alex into handling Unicode. See Note [Unicode in Alex]. $asclarge = [A-Z] $large = [$asclarge $unilarge] $unismall = \x02 -- Trick Alex into handling Unicode. See Note [Unicode in Alex]. $ascsmall = [a-z] $small = [$ascsmall $unismall \_] $uniidchar = \x07 -- Trick Alex into handling Unicode. See Note [Unicode in Alex]. $idchar = [$small $large $digit $uniidchar \'] $unigraphic = \x06 -- Trick Alex into handling Unicode. See Note [Unicode in Alex]. $graphic = [$small $large $symbol $digit $idchar $special $unigraphic \"\'] $alpha = [$small $large] -- The character sets marked "TODO" are mostly overly inclusive -- and should be defined more precisely once alex has better -- support for unicode character sets (see -- https://github.com/simonmar/alex/issues/126). @id = $alpha $idchar* \#* | $symbol+ @modname = $large $idchar* @qualid = (@modname \.)* @id :- \' @qualid \' | \` @qualid \` { getIdentifier 1 } \'\` @qualid \`\' | \'\( @qualid \)\' | \`\( @qualid \)\` { getIdentifier 2 } [. \n] ; { data AlexInput = AlexInput { alexInput_position :: !RealSrcLoc , alexInput_string :: !ByteString } -- NB: As long as we don't use a left-context we don't need to track the -- previous input character. alexInputPrevChar :: AlexInput -> Word8 alexInputPrevChar = error "Left-context not supported" alexGetByte :: AlexInput -> Maybe (Word8, AlexInput) alexGetByte (AlexInput p s) = case utf8UnconsByteString s of Nothing -> Nothing Just (c,bs) -> Just (adjustChar c, AlexInput (advanceSrcLoc p c) bs) alexScanTokens :: RealSrcLoc -> ByteString -> [(RealSrcSpan, ByteString)] alexScanTokens start str0 = go (AlexInput start str0) where go inp@(AlexInput pos str) = case alexScan inp 0 of AlexSkip inp' _ln -> go inp' AlexToken inp'@(AlexInput _ str') _ act -> act pos (BS.length str - BS.length str') str : go inp' AlexEOF -> [] AlexError (AlexInput p _) -> error $ "lexical error at " ++ show p -------------------------------------------------------------------------------- -- | Extract identifier from Alex state. getIdentifier :: Int -- ^ adornment length -> RealSrcLoc -> Int -- ^ Token length -> ByteString -- ^ The remaining input beginning with the found token -> (RealSrcSpan, ByteString) getIdentifier !i !loc0 !len0 !s0 = (mkRealSrcSpan loc1 loc2, ident) where (adornment, s1) = BS.splitAt i s0 ident = BS.take (len0 - 2*i) s1 loc1 = advanceSrcLocBS loc0 adornment loc2 = advanceSrcLocBS loc1 ident advanceSrcLocBS :: RealSrcLoc -> ByteString -> RealSrcLoc advanceSrcLocBS !loc bs = case utf8UnconsByteString bs of Nothing -> loc Just (c, bs') -> advanceSrcLocBS (advanceSrcLoc loc c) bs' -- | Lex 'StringLiteral' for warning messages lexStringLiteral :: P (LocatedN RdrName) -- ^ A precise identifier parser -> Located StringLiteral -> Located (WithHsDocIdentifiers StringLiteral GhcPs) lexStringLiteral identParser (L l sl@(StringLiteral _ fs _)) = L l (WithHsDocIdentifiers sl idents) where bs = bytesFS fs idents = mapMaybe (uncurry (validateIdentWith identParser)) plausibleIdents plausibleIdents :: [(SrcSpan,ByteString)] plausibleIdents = case l of RealSrcSpan span _ -> [(RealSrcSpan span' Strict.Nothing, tok) | (span', tok) <- alexScanTokens (realSrcSpanStart span) bs] UnhelpfulSpan reason -> [(UnhelpfulSpan reason, tok) | (_, tok) <- alexScanTokens fakeLoc bs] fakeLoc = mkRealSrcLoc nilFS 0 0 -- | Lex identifiers from a docstring. lexHsDoc :: P (LocatedN RdrName) -- ^ A precise identifier parser -> HsDocString -> HsDoc GhcPs lexHsDoc identParser doc = WithHsDocIdentifiers doc idents where docStrings = docStringChunks doc idents = concat [mapMaybe maybeDocIdentifier (plausibleIdents doc) | doc <- docStrings] maybeDocIdentifier :: (SrcSpan, ByteString) -> Maybe (Located RdrName) maybeDocIdentifier = uncurry (validateIdentWith identParser) plausibleIdents :: LHsDocStringChunk -> [(SrcSpan,ByteString)] plausibleIdents (L (RealSrcSpan span _) (HsDocStringChunk s)) = [(RealSrcSpan span' Strict.Nothing, tok) | (span', tok) <- alexScanTokens (realSrcSpanStart span) s] plausibleIdents (L (UnhelpfulSpan reason) (HsDocStringChunk s)) = [(UnhelpfulSpan reason, tok) | (_, tok) <- alexScanTokens fakeLoc s] -- preserve the original reason fakeLoc = mkRealSrcLoc nilFS 0 0 validateIdentWith :: P (LocatedN RdrName) -> SrcSpan -> ByteString -> Maybe (Located RdrName) validateIdentWith identParser mloc str0 = let -- These ParserFlags should be as "inclusive" as possible, allowing -- identifiers defined with any language extension. pflags = mkParserOpts (EnumSet.fromList [LangExt.MagicHash]) dopts [] False False False False dopts = emptyDiagOpts buffer = stringBufferFromByteString str0 realSrcLc = case mloc of RealSrcSpan loc _ -> realSrcSpanStart loc UnhelpfulSpan _ -> mkRealSrcLoc nilFS 0 0 pstate = initParserState pflags buffer realSrcLc in case unP identParser pstate of POk _ name -> Just $ case mloc of RealSrcSpan _ _ -> reLoc name UnhelpfulSpan _ -> L mloc (unLoc name) -- Preserve the original reason _ -> Nothing } ghc-lib-parser-9.12.2.20250421/compiler/GHC/Parser/Header.hs0000644000000000000000000004713707346545000020702 0ustar0000000000000000{-# LANGUAGE TypeFamilies #-} ----------------------------------------------------------------------------- -- -- | Parsing the top of a Haskell source file to get its module name, -- imports and options. -- -- (c) Simon Marlow 2005 -- (c) Lemmih 2006 -- ----------------------------------------------------------------------------- module GHC.Parser.Header ( getImports , mkPrelImports -- used by the renamer too , getOptionsFromFile , getOptions , toArgs , checkProcessArgsResult ) where import GHC.Prelude import GHC.Data.Bag import GHC.Driver.Errors.Types -- Unfortunate, needed due to the fact we throw exceptions! import GHC.Parser.Errors.Types import GHC.Parser ( parseHeader ) import GHC.Parser.Lexer import GHC.Hs import GHC.Unit.Module import GHC.Builtin.Names import GHC.Types.Error import GHC.Types.SrcLoc import GHC.Types.SourceError import GHC.Types.SourceText import GHC.Types.PkgQual import GHC.Utils.Misc import GHC.Utils.Panic import GHC.Utils.Monad import GHC.Utils.Error import GHC.Utils.Exception as Exception import GHC.Data.StringBuffer import GHC.Data.Maybe import GHC.Data.FastString import qualified GHC.Data.Strict as Strict import Control.Monad import System.IO import System.IO.Unsafe import Data.List (partition) import Data.Char (isSpace) import Text.ParserCombinators.ReadP (readP_to_S, gather) import Text.ParserCombinators.ReadPrec (readPrec_to_P) import Text.Read (readPrec) ------------------------------------------------------------------------------ -- | Parse the imports of a source file. -- -- Throws a 'SourceError' if parsing fails. getImports :: ParserOpts -- ^ Parser options -> Bool -- ^ Implicit Prelude? -> StringBuffer -- ^ Parse this. -> FilePath -- ^ Filename the buffer came from. Used for -- reporting parse error locations. -> FilePath -- ^ The original source filename (used for locations -- in the function result) -> IO (Either (Messages PsMessage) ([(RawPkgQual, Located ModuleName)], [(RawPkgQual, Located ModuleName)], Bool, -- Is GHC.Prim imported or not Located ModuleName)) -- ^ The source imports and normal imports (with optional package -- names from -XPackageImports), and the module name. getImports popts implicit_prelude buf filename source_filename = do let loc = mkRealSrcLoc (mkFastString filename) 1 1 case unP parseHeader (initParserState popts buf loc) of PFailed pst -> -- assuming we're not logging warnings here as per below return $ Left $ getPsErrorMessages pst POk pst rdr_module -> fmap Right $ do let (_warns, errs) = getPsMessages pst -- don't log warnings: they'll be reported when we parse the file -- for real. See #2500. if not (isEmptyMessages errs) then throwErrors (GhcPsMessage <$> errs) else let hsmod = unLoc rdr_module mb_mod = hsmodName hsmod imps = hsmodImports hsmod main_loc = srcLocSpan (mkSrcLoc (mkFastString source_filename) 1 1) mod = mb_mod `orElse` L (noAnnSrcSpan main_loc) mAIN_NAME (src_idecls, ord_idecls) = partition ((== IsBoot) . ideclSource . unLoc) imps -- GHC.Prim doesn't exist physically, so don't go looking for it. (ordinary_imps, ghc_prim_import) = partition ((/= moduleName gHC_PRIM) . unLoc . ideclName . unLoc) ord_idecls implicit_imports = mkPrelImports (unLoc mod) main_loc implicit_prelude imps convImport (L _ i) = (ideclPkgQual i, reLoc $ ideclName i) in return (map convImport src_idecls , map convImport (implicit_imports ++ ordinary_imps) , not (null ghc_prim_import) , reLoc mod) mkPrelImports :: ModuleName -> SrcSpan -- Attribute the "import Prelude" to this location -> Bool -> [LImportDecl GhcPs] -> [LImportDecl GhcPs] -- Construct the implicit declaration "import Prelude" (or not) -- -- NB: opt_NoImplicitPrelude is slightly different to import Prelude (); -- because the former doesn't even look at Prelude.hi for instance -- declarations, whereas the latter does. mkPrelImports this_mod loc implicit_prelude import_decls | this_mod == pRELUDE_NAME || explicit_prelude_import || not implicit_prelude = [] | otherwise = [preludeImportDecl] where explicit_prelude_import = any is_prelude_import import_decls is_prelude_import (L _ decl) = unLoc (ideclName decl) == pRELUDE_NAME -- See #17045, package qualified imports are never counted as -- explicit prelude imports && case ideclPkgQual decl of NoRawPkgQual -> True RawPkgQual {} -> False loc' = noAnnSrcSpan loc preludeImportDecl :: LImportDecl GhcPs preludeImportDecl = L loc' $ ImportDecl { ideclExt = XImportDeclPass { ideclAnn = noAnn , ideclSourceText = NoSourceText , ideclImplicit = True -- Implicit! }, ideclName = L loc' pRELUDE_NAME, ideclPkgQual = NoRawPkgQual, ideclSource = NotBoot, ideclSafe = False, -- Not a safe import ideclQualified = NotQualified, ideclAs = Nothing, ideclImportList = Nothing } -------------------------------------------------------------- -- Get options -------------------------------------------------------------- -- | Parse OPTIONS and LANGUAGE pragmas of the source file. -- -- Throws a 'SourceError' if flag parsing fails (including unsupported flags.) getOptionsFromFile :: ParserOpts -> FilePath -- ^ Input file -> IO (Messages PsMessage, [Located String]) -- ^ Parsed options, if any. getOptionsFromFile opts filename = Exception.bracket (openBinaryFile filename ReadMode) (hClose) (\handle -> do (warns, opts) <- fmap (getOptions' opts) (lazyGetToks opts' filename handle) seqList opts $ seqList (bagToList $ getMessages warns) $ return (warns, opts)) where -- We don't need to get haddock doc tokens when we're just -- getting the options from pragmas, and lazily lexing them -- correctly is a little tricky: If there is "\n" or "\n-" -- left at the end of a buffer then the haddock doc may -- continue past the end of the buffer, despite the fact that -- we already have an apparently-complete token. -- We therefore just turn Opt_Haddock off when doing the lazy -- lex. opts' = disableHaddock opts blockSize :: Int -- blockSize = 17 -- for testing :-) blockSize = 1024 lazyGetToks :: ParserOpts -> FilePath -> Handle -> IO [Located Token] lazyGetToks popts filename handle = do buf <- hGetStringBufferBlock handle blockSize let prag_state = initPragState popts buf loc unsafeInterleaveIO $ lazyLexBuf handle prag_state False blockSize where loc = mkRealSrcLoc (mkFastString filename) 1 1 lazyLexBuf :: Handle -> PState -> Bool -> Int -> IO [Located Token] lazyLexBuf handle state eof size = case unP (lexer False return) state of POk state' t -> do -- pprTrace "lazyLexBuf" (text (show (buffer state'))) (return ()) if atEnd (buffer state') && not eof -- if this token reached the end of the buffer, and we haven't -- necessarily read up to the end of the file, then the token might -- be truncated, so read some more of the file and lex it again. then getMore handle state size else case unLoc t of ITeof -> return [t] _other -> do rest <- lazyLexBuf handle state' eof size return (t : rest) _ | not eof -> getMore handle state size | otherwise -> return [L (mkSrcSpanPs (last_loc state)) ITeof] -- parser assumes an ITeof sentinel at the end getMore :: Handle -> PState -> Int -> IO [Located Token] getMore handle state size = do -- pprTrace "getMore" (text (show (buffer state))) (return ()) let new_size = size * 2 -- double the buffer size each time we read a new block. This -- counteracts the quadratic slowdown we otherwise get for very -- large module names (#5981) nextbuf <- hGetStringBufferBlock handle new_size if (len nextbuf == 0) then lazyLexBuf handle state True new_size else do newbuf <- appendStringBuffers (buffer state) nextbuf unsafeInterleaveIO $ lazyLexBuf handle state{buffer=newbuf} False new_size getToks :: ParserOpts -> FilePath -> StringBuffer -> [Located Token] getToks popts filename buf = lexAll pstate where pstate = initPragState popts buf loc loc = mkRealSrcLoc (mkFastString filename) 1 1 lexAll state = case unP (lexer False return) state of POk _ t@(L _ ITeof) -> [t] POk state' t -> t : lexAll state' _ -> [L (mkSrcSpanPs (last_loc state)) ITeof] -- | Parse OPTIONS and LANGUAGE pragmas of the source file. -- -- Throws a 'SourceError' if flag parsing fails (including unsupported flags.) getOptions :: ParserOpts -> StringBuffer -- ^ Input Buffer -> FilePath -- ^ Source filename. Used for location info. -> (Messages PsMessage,[Located String]) -- ^ warnings and parsed options. getOptions opts buf filename = getOptions' opts (getToks opts filename buf) -- The token parser is written manually because Happy can't -- return a partial result when it encounters a lexer error. -- We want to extract options before the buffer is passed through -- CPP, so we can't use the same trick as 'getImports'. getOptions' :: ParserOpts -> [Located Token] -- Input buffer -> (Messages PsMessage,[Located String]) -- Options. getOptions' opts toks = parseToks toks where parseToks (open:close:xs) | IToptions_prag str <- unLoc open , ITclose_prag <- unLoc close = case toArgs starting_loc str of Left _err -> optionsParseError str $ -- #15053 combineSrcSpans (getLoc open) (getLoc close) Right args -> fmap (args ++) (parseToks xs) where src_span = getLoc open real_src_span = expectJust "getOptions'" (srcSpanToRealSrcSpan src_span) starting_loc = realSrcSpanStart real_src_span parseToks (open:close:xs) | ITinclude_prag str <- unLoc open , ITclose_prag <- unLoc close = fmap (map (L (getLoc open)) ["-#include",removeSpaces str] ++) (parseToks xs) parseToks (open:close:xs) | ITdocOptions str _ <- unLoc open , ITclose_prag <- unLoc close = fmap (map (L (getLoc open)) ["-haddock-opts", removeSpaces str] ++) (parseToks xs) parseToks (open:xs) | ITlanguage_prag <- unLoc open = parseLanguage xs parseToks (comment:xs) -- Skip over comments | isComment (unLoc comment) = parseToks xs -- At the end of the header, warn about all the misplaced pragmas parseToks xs = (unionManyMessages $ mapMaybe mkMessage xs ,[]) parseLanguage ((L loc (ITconid fs)):rest) = fmap (checkExtension opts (L loc fs) :) $ case rest of (L _loc ITcomma):more -> parseLanguage more (L _loc ITclose_prag):more -> parseToks more (L loc _):_ -> languagePragParseError loc [] -> panic "getOptions'.parseLanguage(1) went past eof token" parseLanguage (tok:_) = languagePragParseError (getLoc tok) parseLanguage [] = panic "getOptions'.parseLanguage(2) went past eof token" -- Warn for all the misplaced pragmas mkMessage :: Located Token -> Maybe (Messages PsMessage) mkMessage (L loc token) | IToptions_prag _ <- token = Just (singleMessage $ mkPlainMsgEnvelope diag_opts loc (PsWarnMisplacedPragma OptionsPrag)) | ITinclude_prag _ <- token = Just (singleMessage $ mkPlainMsgEnvelope diag_opts loc (PsWarnMisplacedPragma IncludePrag)) | ITdocOptions _ _ <- token = Just (singleMessage $ mkPlainMsgEnvelope diag_opts loc (PsWarnMisplacedPragma DocOptionsPrag)) | ITlanguage_prag <- token = Just (singleMessage $ mkPlainMsgEnvelope diag_opts loc (PsWarnMisplacedPragma LanguagePrag)) | otherwise = Nothing where diag_opts = pDiagOpts opts isComment :: Token -> Bool isComment c = case c of (ITlineComment {}) -> True (ITblockComment {}) -> True (ITdocComment {}) -> True _ -> False toArgs :: RealSrcLoc -> String -> Either String -- Error [Located String] -- Args toArgs starting_loc orig_str = let (after_spaces_loc, after_spaces_str) = consume_spaces starting_loc orig_str in case after_spaces_str of '[':after_bracket -> let after_bracket_loc = advanceSrcLoc after_spaces_loc '[' (after_bracket_spaces_loc, after_bracket_spaces_str) = consume_spaces after_bracket_loc after_bracket in case after_bracket_spaces_str of ']':rest | all isSpace rest -> Right [] _ -> readAsList after_bracket_spaces_loc after_bracket_spaces_str _ -> toArgs' after_spaces_loc after_spaces_str where consume_spaces :: RealSrcLoc -> String -> (RealSrcLoc, String) consume_spaces loc [] = (loc, []) consume_spaces loc (c:cs) | isSpace c = consume_spaces (advanceSrcLoc loc c) cs | otherwise = (loc, c:cs) break_with_loc :: (Char -> Bool) -> RealSrcLoc -> String -> (String, RealSrcLoc, String) -- location is start of second string break_with_loc p = go [] where go reversed_acc loc [] = (reverse reversed_acc, loc, []) go reversed_acc loc (c:cs) | p c = (reverse reversed_acc, loc, c:cs) | otherwise = go (c:reversed_acc) (advanceSrcLoc loc c) cs advance_src_loc_many :: RealSrcLoc -> String -> RealSrcLoc advance_src_loc_many = foldl' advanceSrcLoc locate :: RealSrcLoc -> RealSrcLoc -> a -> Located a locate begin end x = L (RealSrcSpan (mkRealSrcSpan begin end) Strict.Nothing) x toArgs' :: RealSrcLoc -> String -> Either String [Located String] -- Remove outer quotes: -- > toArgs' "\"foo\" \"bar baz\"" -- Right ["foo", "bar baz"] -- -- Keep inner quotes: -- > toArgs' "-DFOO=\"bar baz\"" -- Right ["-DFOO=\"bar baz\""] toArgs' loc s = let (after_spaces_loc, after_spaces_str) = consume_spaces loc s in case after_spaces_str of [] -> Right [] '"' : _ -> do -- readAsString removes outer quotes (arg, new_loc, rest) <- readAsString after_spaces_loc after_spaces_str check_for_space rest (locate after_spaces_loc new_loc arg:) `fmap` toArgs' new_loc rest _ -> case break_with_loc (isSpace <||> (== '"')) after_spaces_loc after_spaces_str of (argPart1, loc2, s''@('"':_)) -> do (argPart2, loc3, rest) <- readAsString loc2 s'' check_for_space rest -- show argPart2 to keep inner quotes (locate after_spaces_loc loc3 (argPart1 ++ show argPart2):) `fmap` toArgs' loc3 rest (arg, loc2, s'') -> (locate after_spaces_loc loc2 arg:) `fmap` toArgs' loc2 s'' check_for_space :: String -> Either String () check_for_space [] = Right () check_for_space (c:_) | isSpace c = Right () | otherwise = Left ("Whitespace expected after string in " ++ show orig_str) reads_with_consumed :: Read a => String -> [((String, a), String)] -- ((consumed string, parsed result), remainder of input) reads_with_consumed = readP_to_S (gather (readPrec_to_P readPrec 0)) readAsString :: RealSrcLoc -> String -> Either String (String, RealSrcLoc, String) readAsString loc s = case reads_with_consumed s of [((consumed, arg), rest)] -> Right (arg, advance_src_loc_many loc consumed, rest) _ -> Left ("Couldn't read " ++ show s ++ " as String") -- input has had the '[' stripped off readAsList :: RealSrcLoc -> String -> Either String [Located String] readAsList loc s = do let (after_spaces_loc, after_spaces_str) = consume_spaces loc s (arg, after_arg_loc, after_arg_str) <- readAsString after_spaces_loc after_spaces_str let (after_arg_spaces_loc, after_arg_spaces_str) = consume_spaces after_arg_loc after_arg_str (locate after_spaces_loc after_arg_loc arg :) <$> case after_arg_spaces_str of ',':after_comma -> readAsList (advanceSrcLoc after_arg_spaces_loc ',') after_comma ']':after_bracket | all isSpace after_bracket -> Right [] _ -> Left ("Couldn't read " ++ show ('[' : s) ++ " as [String]") -- reinsert missing '[' for clarity. ----------------------------------------------------------------------------- -- | Complain about non-dynamic flags in OPTIONS pragmas. -- -- Throws a 'SourceError' if the input list is non-empty claiming that the -- input flags are unknown. checkProcessArgsResult :: MonadIO m => [Located String] -> m () checkProcessArgsResult flags = when (notNull flags) $ liftIO $ throwErrors $ foldMap (singleMessage . mkMsg) flags where mkMsg (L loc flag) = mkPlainErrorMsgEnvelope loc $ GhcPsMessage $ PsHeaderMessage $ PsErrUnknownOptionsPragma flag ----------------------------------------------------------------------------- checkExtension :: ParserOpts -> Located FastString -> Located String checkExtension opts (L l ext) -- Checks if a given extension is valid, and if so returns -- its corresponding flag. Otherwise it throws an exception. = if ext' `elem` (pSupportedExts opts) then L l ("-X"++ext') else unsupportedExtnError opts l ext' where ext' = unpackFS ext languagePragParseError :: SrcSpan -> a languagePragParseError loc = throwErr loc $ PsErrParseLanguagePragma unsupportedExtnError :: ParserOpts -> SrcSpan -> String -> a unsupportedExtnError opts loc unsup = throwErr loc $ PsErrUnsupportedExt unsup (pSupportedExts opts) optionsParseError :: String -> SrcSpan -> a -- #15053 optionsParseError str loc = throwErr loc $ PsErrParseOptionsPragma str throwErr :: SrcSpan -> PsHeaderMessage -> a -- #15053 throwErr loc ps_msg = let msg = mkPlainErrorMsgEnvelope loc $ GhcPsMessage (PsHeaderMessage ps_msg) in throw $ mkSrcErr $ singleMessage msg ghc-lib-parser-9.12.2.20250421/compiler/GHC/Parser/Lexer.x0000644000000000000000000045472607346545000020434 0ustar0000000000000000----------------------------------------------------------------------------- -- (c) The University of Glasgow, 2006 -- -- GHC's lexer for Haskell 2010 [1]. -- -- This is a combination of an Alex-generated lexer [2] from a regex -- definition, with some hand-coded bits. [3] -- -- Completely accurate information about token-spans within the source -- file is maintained. Every token has a start and end RealSrcLoc -- attached to it. -- -- References: -- [1] https://www.haskell.org/onlinereport/haskell2010/haskellch2.html -- [2] http://www.haskell.org/alex/ -- [3] https://gitlab.haskell.org/ghc/ghc/wikis/commentary/compiler/parser -- ----------------------------------------------------------------------------- -- ToDo / known bugs: -- - parsing integers is a bit slow -- - readRational is a bit slow -- -- Known bugs, that were also in the previous version: -- - M... should be 3 tokens, not 1. -- - pragma-end should be only valid in a pragma -- qualified operator NOTES. -- -- - If M.(+) is a single lexeme, then.. -- - Probably (+) should be a single lexeme too, for consistency. -- Otherwise ( + ) would be a prefix operator, but M.( + ) would not be. -- - But we have to rule out reserved operators, otherwise (..) becomes -- a different lexeme. -- - Should we therefore also rule out reserved operators in the qualified -- form? This is quite difficult to achieve. We don't do it for -- qualified varids. -- ----------------------------------------------------------------------------- -- Alex "Haskell code fragment top" { {-# LANGUAGE ViewPatterns #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE MultiWayIf #-} {-# LANGUAGE UnboxedTuples #-} {-# LANGUAGE UnboxedSums #-} {-# LANGUAGE UnliftedNewtypes #-} {-# LANGUAGE PatternSynonyms #-} {-# LANGUAGE DataKinds #-} {-# OPTIONS_GHC -funbox-strict-fields #-} {-# OPTIONS_GHC -Wno-incomplete-uni-patterns #-} module GHC.Parser.Lexer ( Token(..), lexer, lexerDbg, ParserOpts(..), mkParserOpts, PState (..), initParserState, initPragState, P(..), ParseResult(POk, PFailed), allocateComments, allocatePriorComments, allocateFinalComments, MonadP(..), getBit, getRealSrcLoc, getPState, failMsgP, failLocMsgP, srcParseFail, getPsErrorMessages, getPsMessages, popContext, pushModuleContext, setLastToken, setSrcLoc, activeContext, nextIsEOF, getLexState, popLexState, pushLexState, ExtBits(..), xtest, xunset, xset, disableHaddock, lexTokenStream, mkParensEpToks, mkParensLocs, getCommentsFor, getPriorCommentsFor, getFinalCommentsFor, getEofPos, commentToAnnotation, HdkComment(..), warnopt, adjustChar, addPsMessage ) where import GHC.Prelude import qualified GHC.Data.Strict as Strict -- base import Control.Monad import Control.Applicative import Data.Char import Data.List (stripPrefix, isInfixOf, partition) import Data.List.NonEmpty ( NonEmpty(..) ) import qualified Data.List.NonEmpty as NE import Data.Maybe import Data.Word import Debug.Trace (trace) import GHC.Data.EnumSet as EnumSet -- ghc-boot import qualified GHC.LanguageExtensions as LangExt -- bytestring import Data.ByteString (ByteString) -- containers import Data.Map (Map) import qualified Data.Map as Map -- compiler import GHC.Utils.Error import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Data.StringBuffer import GHC.Data.FastString import GHC.Types.Error import GHC.Types.Unique.FM import GHC.Data.Maybe import GHC.Data.OrdList import GHC.Utils.Misc ( readSignificandExponentPair, readHexSignificandExponentPair ) import GHC.Types.SrcLoc import GHC.Types.SourceText import GHC.Types.Basic ( InlineSpec(..), RuleMatchInfo(..)) import GHC.Hs.Doc import GHC.Parser.CharClass import GHC.Parser.Annotation import GHC.Driver.Flags import GHC.Parser.Errors.Basic import GHC.Parser.Errors.Types import GHC.Parser.Errors.Ppr () import GHC.Parser.String } -- ----------------------------------------------------------------------------- -- Alex "Character set macros" -- NB: The logic behind these definitions is also reflected in "GHC.Utils.Lexeme" -- Any changes here should likely be reflected there. $unispace = \x05 -- Trick Alex into handling Unicode. See Note [Unicode in Alex]. $nl = [\n\r\f] $space = [\ $unispace] $whitechar = [$nl \v $space] $white_no_nl = $whitechar # \n -- TODO #8424 $tab = \t $ascdigit = 0-9 $unidigit = \x03 -- Trick Alex into handling Unicode. See Note [Unicode in Alex]. $decdigit = $ascdigit -- exactly $ascdigit, no more no less. $digit = [$ascdigit $unidigit] $special = [\(\)\,\;\[\]\`\{\}] $ascsymbol = [\!\#\$\%\&\*\+\.\/\<\=\>\?\@\\\^\|\-\~\:] $unisymbol = \x04 -- Trick Alex into handling Unicode. See Note [Unicode in Alex]. $symbol = [$ascsymbol $unisymbol] # [$special \_\"\'] $unilarge = \x01 -- Trick Alex into handling Unicode. See Note [Unicode in Alex]. $asclarge = [A-Z] $large = [$asclarge $unilarge] $unismall = \x02 -- Trick Alex into handling Unicode. See Note [Unicode in Alex]. $ascsmall = [a-z] $small = [$ascsmall $unismall \_] $uniidchar = \x07 -- Trick Alex into handling Unicode. See Note [Unicode in Alex]. $idchar = [$small $large $digit $uniidchar \'] $unigraphic = \x06 -- Trick Alex into handling Unicode. See Note [Unicode in Alex]. $graphic = [$small $large $symbol $digit $idchar $special $unigraphic \"\'] $charesc = [a b f n r t v \\ \" \' \&] $binit = 0-1 $octit = 0-7 $hexit = [$decdigit A-F a-f] $pragmachar = [$small $large $digit $uniidchar ] $docsym = [\| \^ \* \$] -- ----------------------------------------------------------------------------- -- Alex "Regular expression macros" @varid = $small $idchar* -- variable identifiers @conid = $large $idchar* -- constructor identifiers @varsym = ($symbol # \:) $symbol* -- variable (operator) symbol @consym = \: $symbol* -- constructor (operator) symbol -- See Note [Lexing NumericUnderscores extension] and #14473 @numspc = _* -- numeric spacer (#14473) @decimal = $decdigit(@numspc $decdigit)* @binary = $binit(@numspc $binit)* @octal = $octit(@numspc $octit)* @hexadecimal = $hexit(@numspc $hexit)* @exponent = @numspc [eE] [\-\+]? @decimal @bin_exponent = @numspc [pP] [\-\+]? @decimal @binarylit = 0[bB] @numspc @binary @octallit = 0[oO] @numspc @octal @hexadecimallit = 0[xX] @numspc @hexadecimal @qual = (@conid \.)+ @qvarid = @qual @varid @qconid = @qual @conid @qvarsym = @qual @varsym @qconsym = @qual @consym -- QualifiedDo needs to parse "M.do" not as a variable, so as to keep the -- layout rules. @qdo = @qual "do" @qmdo = @qual "mdo" @floating_point = @numspc @decimal \. @decimal @exponent? | @numspc @decimal @exponent @hex_floating_point = @numspc @hexadecimal \. @hexadecimal @bin_exponent? | @numspc @hexadecimal @bin_exponent @gap = \\ $whitechar+ \\ @cntrl = $asclarge | \@ | \[ | \\ | \] | \^ | \_ @ascii = \^ @cntrl | "NUL" | "SOH" | "STX" | "ETX" | "EOT" | "ENQ" | "ACK" | "BEL" | "BS" | "HT" | "LF" | "VT" | "FF" | "CR" | "SO" | "SI" | "DLE" | "DC1" | "DC2" | "DC3" | "DC4" | "NAK" | "SYN" | "ETB" | "CAN" | "EM" | "SUB" | "ESC" | "FS" | "GS" | "RS" | "US" | "SP" | "DEL" -- N.B. ideally, we would do `@escape # \\ \&` instead of duplicating in @escapechar, -- which is what the Haskell Report says, but this isn't valid Alex syntax, as only -- character sets can be subtracted, not strings @escape = \\ ( $charesc | @ascii | @decimal | o @octal | x @hexadecimal ) @escapechar = \\ ( $charesc # \& | @ascii | @decimal | o @octal | x @hexadecimal ) @stringchar = ($graphic # [\\ \"]) | $space | @escape | @gap @char = ($graphic # [\\ \']) | $space | @escapechar -- normal signed numerical literals can only be explicitly negative, -- not explicitly positive (contrast @exponent) @negative = \- -- ----------------------------------------------------------------------------- -- Alex "Identifier" haskell :- -- ----------------------------------------------------------------------------- -- Alex "Rules" -- everywhere: skip whitespace $white_no_nl+ ; $tab { warnTab } -- Everywhere: deal with nested comments. We explicitly rule out -- pragmas, "{-#", so that we don't accidentally treat them as comments. -- (this can happen even though pragmas will normally take precedence due to -- longest-match, because pragmas aren't valid in every state, but comments -- are). We also rule out nested Haddock comments, if the -haddock flag is -- set. "{-" / { isNormalComment } { nested_comment } -- Single-line comments are a bit tricky. Haskell 98 says that two or -- more dashes followed by a symbol should be parsed as a varsym, so we -- have to exclude those. -- Since Haddock comments aren't valid in every state, we need to rule them -- out here. -- The following two rules match comments that begin with two dashes, but -- continue with a different character. The rules test that this character -- is not a symbol (in which case we'd have a varsym), and that it's not a -- space followed by a Haddock comment symbol (docsym) (in which case we'd -- have a Haddock comment). The rules then munch the rest of the line. "-- " ~$docsym .* { lineCommentToken } "--" [^$symbol \ ] .* { lineCommentToken } -- Next, match Haddock comments if no -haddock flag "-- " $docsym .* / { alexNotPred (ifExtension HaddockBit) } { lineCommentToken } -- Now, when we've matched comments that begin with 2 dashes and continue -- with a different character, we need to match comments that begin with three -- or more dashes (which clearly can't be Haddock comments). We only need to -- make sure that the first non-dash character isn't a symbol, and munch the -- rest of the line. "---"\-* ~$symbol .* { lineCommentToken } -- Since the previous rules all match dashes followed by at least one -- character, we also need to match a whole line filled with just dashes. "--"\-* / { atEOL } { lineCommentToken } -- We need this rule since none of the other single line comment rules -- actually match this case. "-- " / { atEOL } { lineCommentToken } -- Everywhere: check for smart quotes--they are not allowed outside of strings $unigraphic / { isSmartQuote } { smart_quote_error } -- 'bol' state: beginning of a line. Slurp up all the whitespace (including -- blank lines) until we find a non-whitespace character, then do layout -- processing. -- -- One slight wibble here: what if the line begins with {-#? In -- theory, we have to lex the pragma to see if it's one we recognise, -- and if it is, then we backtrack and do_bol, otherwise we treat it -- as a nested comment. We don't bother with this: if the line begins -- with {-#, then we'll assume it's a pragma we know about and go for do_bol. { \n ; ^\# line { begin line_prag1 } ^\# / { followedByDigit } { begin line_prag1 } ^\# pragma .* \n ; -- GCC 3.3 CPP generated, apparently ^\# \! .* \n ; -- #!, for scripts -- gcc ^\ \# \! .* \n ; -- #!, for scripts -- clang; See #6132 () { do_bol } } -- after a layout keyword (let, where, do, of), we begin a new layout -- context if the curly brace is missing. -- Careful! This stuff is quite delicate. { \{ / { notFollowedBy '-' } { hopefully_open_brace } -- we might encounter {-# here, but {- has been handled already \n ; ^\# (line)? { begin line_prag1 } } -- after an 'if', a vertical bar starts a layout context for MultiWayIf { \| / { notFollowedBySymbol } { new_layout_context True dontGenerateSemic ITvbar } () { pop } } -- do is treated in a subtly different way, see new_layout_context () { new_layout_context True generateSemic ITvocurly } () { new_layout_context False generateSemic ITvocurly } -- after a new layout context which was found to be to the left of the -- previous context, we have generated a '{' token, and we now need to -- generate a matching '}' token. () { do_layout_left } <0,option_prags> \n { begin bol } "{-#" $whitechar* $pragmachar+ / { known_pragma linePrags } { dispatch_pragmas linePrags } -- single-line line pragmas, of the form -- # "" \n { @decimal $white_no_nl+ \" [$graphic \ ]* \" { setLineAndFile line_prag1a } () { failLinePrag1 } } .* { popLinePrag1 } -- Haskell-style line pragmas, of the form -- {-# LINE "" #-} { @decimal $white_no_nl+ \" [$graphic \ ]* \" { setLineAndFile line_prag2a } } "#-}"|"-}" { pop } -- NOTE: accept -} at the end of a LINE pragma, for compatibility -- with older versions of GHC which generated these. -- Haskell-style column pragmas, of the form -- {-# COLUMN #-} @decimal $whitechar* "#-}" { setColumn } <0,option_prags> { "{-#" $whitechar* $pragmachar+ $whitechar+ $pragmachar+ / { known_pragma twoWordPrags } { dispatch_pragmas twoWordPrags } "{-#" $whitechar* $pragmachar+ / { known_pragma oneWordPrags } { dispatch_pragmas oneWordPrags } -- We ignore all these pragmas, but don't generate a warning for them "{-#" $whitechar* $pragmachar+ / { known_pragma ignoredPrags } { dispatch_pragmas ignoredPrags } -- ToDo: should only be valid inside a pragma: "#-}" { endPrag } } { "{-#" $whitechar* $pragmachar+ / { known_pragma fileHeaderPrags } { dispatch_pragmas fileHeaderPrags } } <0> { -- In the "0" mode we ignore these pragmas "{-#" $whitechar* $pragmachar+ / { known_pragma fileHeaderPrags } { nested_comment } } <0,option_prags> { -- This code would eagerly accept and hence discard, e.g., "LANGUAGE MagicHash". -- "{-#" $whitechar* $pragmachar+ -- $whitechar+ $pragmachar+ -- { warn_unknown_prag twoWordPrags } "{-#" $whitechar* $pragmachar+ { warn_unknown_prag (Map.unions [ oneWordPrags, fileHeaderPrags, ignoredPrags, linePrags ]) } "{-#" { warn_unknown_prag Map.empty } } -- '0' state: ordinary lexemes -- Haddock comments "-- " $docsym / { ifExtension HaddockBit } { multiline_doc_comment } "{-" \ ? $docsym / { ifExtension HaddockBit } { nested_doc_comment } -- "special" symbols <0> { -- Don't check ThQuotesBit here as the renamer can produce a better -- error message than the lexer (see the thQuotesEnabled check in rnBracket). "[|" { token (ITopenExpQuote NoE NormalSyntax) } "[||" { token (ITopenTExpQuote NoE) } "|]" { token (ITcloseQuote NormalSyntax) } "||]" { token ITcloseTExpQuote } -- Check ThQuotesBit here as to not steal syntax. "[e|" / { ifExtension ThQuotesBit } { token (ITopenExpQuote HasE NormalSyntax) } "[e||" / { ifExtension ThQuotesBit } { token (ITopenTExpQuote HasE) } "[p|" / { ifExtension ThQuotesBit } { token ITopenPatQuote } "[d|" / { ifExtension ThQuotesBit } { layout_token ITopenDecQuote } "[t|" / { ifExtension ThQuotesBit } { token ITopenTypQuote } "[" @varid "|" / { ifExtension QqBit } { lex_quasiquote_tok } -- qualified quasi-quote (#5555) "[" @qvarid "|" / { ifExtension QqBit } { lex_qquasiquote_tok } $unigraphic -- ⟦ / { ifCurrentChar '⟦' `alexAndPred` ifExtension UnicodeSyntaxBit `alexAndPred` ifExtension ThQuotesBit } { token (ITopenExpQuote NoE UnicodeSyntax) } $unigraphic -- ⟧ / { ifCurrentChar '⟧' `alexAndPred` ifExtension UnicodeSyntaxBit `alexAndPred` ifExtension ThQuotesBit } { token (ITcloseQuote UnicodeSyntax) } } <0> { "(|" / { ifExtension ArrowsBit `alexAndPred` notFollowedBySymbol } { special (IToparenbar NormalSyntax) } "|)" / { ifExtension ArrowsBit } { special (ITcparenbar NormalSyntax) } $unigraphic -- ⦇ / { ifCurrentChar '⦇' `alexAndPred` ifExtension UnicodeSyntaxBit `alexAndPred` ifExtension ArrowsBit } { special (IToparenbar UnicodeSyntax) } $unigraphic -- ⦈ / { ifCurrentChar '⦈' `alexAndPred` ifExtension UnicodeSyntaxBit `alexAndPred` ifExtension ArrowsBit } { special (ITcparenbar UnicodeSyntax) } } <0> { \? @varid / { ifExtension IpBit } { skip_one_varid ITdupipvarid } } <0> { "#" $idchar+ / { ifExtension OverloadedLabelsBit } { skip_one_varid_src ITlabelvarid } "#" \" @stringchar* \" / { ifExtension OverloadedLabelsBit } { tok_quoted_label } } <0> { "(#" / { ifExtension UnboxedParensBit } { token IToubxparen } "#)" / { ifExtension UnboxedParensBit } { token ITcubxparen } } <0,option_prags> { \( { special IToparen } \) { special ITcparen } \[ { special ITobrack } \] { special ITcbrack } \, { special ITcomma } \; { special ITsemi } \` { special ITbackquote } \{ { open_brace } \} { close_brace } } <0,option_prags> { @qdo { qdo_token ITdo } @qmdo / { ifExtension RecursiveDoBit } { qdo_token ITmdo } @qvarid { idtoken qvarid } @qconid { idtoken qconid } @varid { varid } @conid { idtoken conid } } <0> { @qvarid "#"+ / { ifExtension MagicHashBit } { idtoken qvarid } @qconid "#"+ / { ifExtension MagicHashBit } { idtoken qconid } @varid "#"+ / { ifExtension MagicHashBit } { varid } @conid "#"+ / { ifExtension MagicHashBit } { idtoken conid } } -- ToDo: - move `var` and (sym) into lexical syntax? -- - remove backquote from $special? <0> { @qvarsym { idtoken qvarsym } @qconsym { idtoken qconsym } @varsym { with_op_ws varsym } @consym { with_op_ws consym } } -- For the normal boxed literals we need to be careful -- when trying to be close to Haskell98 -- Note [Lexing NumericUnderscores extension] (#14473) -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- NumericUnderscores extension allows underscores in numeric literals. -- Multiple underscores are represented with @numspc macro. -- To be simpler, we have only the definitions with underscores. -- And then we have a separate function (tok_integral and tok_frac) -- that validates the literals. -- If extensions are not enabled, check that there are no underscores. -- <0> { -- Normal integral literals (:: Num a => a, from Integer) @decimal { tok_num positive 0 0 decimal } @binarylit / { ifExtension BinaryLiteralsBit } { tok_num positive 2 2 binary } @octallit { tok_num positive 2 2 octal } @hexadecimallit { tok_num positive 2 2 hexadecimal } @negative @decimal / { negLitPred } { tok_num negative 1 1 decimal } @negative @binarylit / { negLitPred `alexAndPred` ifExtension BinaryLiteralsBit } { tok_num negative 3 3 binary } @negative @octallit / { negLitPred } { tok_num negative 3 3 octal } @negative @hexadecimallit / { negLitPred } { tok_num negative 3 3 hexadecimal } -- Normal rational literals (:: Fractional a => a, from Rational) @floating_point { tok_frac 0 tok_float } @negative @floating_point / { negLitPred } { tok_frac 0 tok_float } 0[xX] @numspc @hex_floating_point / { ifExtension HexFloatLiteralsBit } { tok_frac 0 tok_hex_float } @negative 0[xX] @numspc @hex_floating_point / { ifExtension HexFloatLiteralsBit `alexAndPred` negLitPred } { tok_frac 0 tok_hex_float } } <0> { -- Unboxed ints (:: Int#) and words (:: Word#) -- It's simpler (and faster?) to give separate cases to the negatives, -- especially considering octal/hexadecimal prefixes. @decimal \# / { ifExtension MagicHashBit } { tok_primint positive 0 1 decimal } @binarylit \# / { ifExtension MagicHashBit `alexAndPred` ifExtension BinaryLiteralsBit } { tok_primint positive 2 3 binary } @octallit \# / { ifExtension MagicHashBit } { tok_primint positive 2 3 octal } @hexadecimallit \# / { ifExtension MagicHashBit } { tok_primint positive 2 3 hexadecimal } @negative @decimal \# / { negHashLitPred MagicHashBit } { tok_primint negative 1 2 decimal } @negative @binarylit \# / { negHashLitPred MagicHashBit `alexAndPred` ifExtension BinaryLiteralsBit } { tok_primint negative 3 4 binary } @negative @octallit \# / { negHashLitPred MagicHashBit } { tok_primint negative 3 4 octal } @negative @hexadecimallit \# / { negHashLitPred MagicHashBit } { tok_primint negative 3 4 hexadecimal } @decimal \# \# / { ifExtension MagicHashBit } { tok_primword 0 2 decimal } @binarylit \# \# / { ifExtension MagicHashBit `alexAndPred` ifExtension BinaryLiteralsBit } { tok_primword 2 4 binary } @octallit \# \# / { ifExtension MagicHashBit } { tok_primword 2 4 octal } @hexadecimallit \# \# / { ifExtension MagicHashBit } { tok_primword 2 4 hexadecimal } @decimal \# $idchar+ / { ifExtension ExtendedLiteralsBit } { tok_prim_num_ext positive 0 decimal } @binarylit \# $idchar+ / { ifExtension ExtendedLiteralsBit `alexAndPred` ifExtension BinaryLiteralsBit } { tok_prim_num_ext positive 2 binary } @octallit \# $idchar+ / { ifExtension ExtendedLiteralsBit } { tok_prim_num_ext positive 2 octal } @hexadecimallit \# $idchar+ / { ifExtension ExtendedLiteralsBit } { tok_prim_num_ext positive 2 hexadecimal } @negative @decimal \# $idchar+ / { negHashLitPred ExtendedLiteralsBit } { tok_prim_num_ext negative 1 decimal } @negative @binarylit \# $idchar+ / { negHashLitPred ExtendedLiteralsBit `alexAndPred` ifExtension BinaryLiteralsBit } { tok_prim_num_ext negative 3 binary } @negative @octallit \# $idchar+ / { negHashLitPred ExtendedLiteralsBit } { tok_prim_num_ext negative 3 octal } @negative @hexadecimallit \# $idchar+ / { negHashLitPred ExtendedLiteralsBit } { tok_prim_num_ext negative 3 hexadecimal } -- Unboxed floats and doubles (:: Float#, :: Double#) -- prim_{float,double} work with signed literals @floating_point \# / { ifExtension MagicHashBit } { tok_frac 1 tok_primfloat } @floating_point \# \# / { ifExtension MagicHashBit } { tok_frac 2 tok_primdouble } @negative @floating_point \# / { negHashLitPred MagicHashBit } { tok_frac 1 tok_primfloat } @negative @floating_point \# \# / { negHashLitPred MagicHashBit } { tok_frac 2 tok_primdouble } 0[xX] @numspc @hex_floating_point \# / { ifExtension MagicHashBit `alexAndPred` ifExtension HexFloatLiteralsBit } { tok_frac 1 tok_prim_hex_float } 0[xX] @numspc @hex_floating_point \# \# / { ifExtension MagicHashBit `alexAndPred` ifExtension HexFloatLiteralsBit } { tok_frac 2 tok_prim_hex_double } @negative 0[xX] @numspc @hex_floating_point \# / { ifExtension HexFloatLiteralsBit `alexAndPred` negHashLitPred MagicHashBit } { tok_frac 1 tok_prim_hex_float } @negative 0[xX] @numspc @hex_floating_point \# \# / { ifExtension HexFloatLiteralsBit `alexAndPred` negHashLitPred MagicHashBit } { tok_frac 2 tok_prim_hex_double } } <0> { \"\"\" / { ifExtension MultilineStringsBit } { tok_string_multi } \" @stringchar* \" { tok_string } \" @stringchar* \" \# / { ifExtension MagicHashBit } { tok_string } \' @char \' { tok_char } \' @char \' \# / { ifExtension MagicHashBit } { tok_char } -- Check for smart quotes and throw better errors than a plain lexical error (#21843) \' \\ $unigraphic / { isSmartQuote } { smart_quote_error } \" @stringchar* \\ $unigraphic / { isSmartQuote } { smart_quote_error } -- See Note [Bare smart quote error] -- The valid string rule will take precedence because it'll match more -- characters than this rule, so this rule will only fire if the string -- could not be lexed correctly \" @stringchar* $unigraphic / { isSmartQuote } { smart_quote_error } } { -- Parse as much of the multiline string as possible, except for quotes @stringchar* ($nl ([\ $tab] | @gap)* @stringchar*)* { tok_string_multi_content } -- Allow bare quotes if it's not a triple quote (\" | \"\") / ([\n .] # \") { tok_string_multi_content } } <0> { \'\' { token ITtyQuote } -- The normal character match takes precedence over this because it matches -- more characters. However, if that pattern didn't match, then this quote -- could be a quoted identifier, like 'x. Here, just return ITsimpleQuote, -- as the parser will lex the varid separately. \' / ($graphic # \\ | " ") { token ITsimpleQuote } } -- Note [Whitespace-sensitive operator parsing] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- In accord with GHC Proposal #229 https://github.com/ghc-proposals/ghc-proposals/blob/master/proposals/0229-whitespace-bang-patterns.rst -- we classify operator occurrences into four categories: -- -- a ! b -- a loose infix occurrence -- a!b -- a tight infix occurrence -- a !b -- a prefix occurrence -- a! b -- a suffix occurrence -- -- The rules are a bit more elaborate than simply checking for whitespace, in -- order to accommodate the following use cases: -- -- f (!a) = ... -- prefix occurrence -- g (a !) -- loose infix occurrence -- g (! a) -- loose infix occurrence -- -- The precise rules are as follows: -- -- * Identifiers, literals, and opening brackets (, (#, (|, [, [|, [||, [p|, -- [e|, [t|, {, ⟦, ⦇, are considered "opening tokens". The function -- followedByOpeningToken tests whether the next token is an opening token. -- -- * Identifiers, literals, and closing brackets ), #), |), ], |], }, ⟧, ⦈, -- are considered "closing tokens". The function precededByClosingToken tests -- whether the previous token is a closing token. -- -- * Whitespace, comments, separators, and other tokens, are considered -- neither opening nor closing. -- -- * Any unqualified operator occurrence is classified as prefix, suffix, or -- tight/loose infix, based on preceding and following tokens: -- -- precededByClosingToken | followedByOpeningToken | Occurrence -- ------------------------+------------------------+------------ -- False | True | prefix -- True | False | suffix -- True | True | tight infix -- False | False | loose infix -- ------------------------+------------------------+------------ -- -- A loose infix occurrence is always considered an operator. Other types of -- occurrences may be assigned a special per-operator meaning override: -- -- Operator | Occurrence | Token returned -- ----------+---------------+------------------------------------------ -- ! | prefix | ITbang -- | | strictness annotation or bang pattern, -- | | e.g. f !x = rhs, data T = MkT !a -- | not prefix | ITvarsym "!" -- | | ordinary operator or type operator, -- | | e.g. xs ! 3, (! x), Int ! Bool -- ----------+---------------+------------------------------------------ -- ~ | prefix | ITtilde -- | | laziness annotation or lazy pattern, -- | | e.g. f ~x = rhs, data T = MkT ~a -- | not prefix | ITvarsym "~" -- | | ordinary operator or type operator, -- | | e.g. xs ~ 3, (~ x), Int ~ Bool -- ----------+---------------+------------------------------------------ -- . | prefix | ITproj True -- | | field projection, -- | | e.g. .x -- | tight infix | ITproj False -- | | field projection, -- | | e.g. r.x -- | suffix | ITdot -- | | function composition, -- | | e.g. f. g -- | loose infix | ITdot -- | | function composition, -- | | e.g. f . g -- ----------+---------------+------------------------------------------ -- $ $$ | prefix | ITdollar, ITdollardollar -- | | untyped or typed Template Haskell splice, -- | | e.g. $(f x), $$(f x), $$"str" -- | not prefix | ITvarsym "$", ITvarsym "$$" -- | | ordinary operator or type operator, -- | | e.g. f $ g x, a $$ b -- ----------+---------------+------------------------------------------ -- @ | prefix | ITtypeApp -- | | type application, e.g. fmap @Maybe -- | tight infix | ITat -- | | as-pattern, e.g. f p@(a,b) = rhs -- | suffix | parse error -- | | e.g. f p@ x = rhs -- | loose infix | ITvarsym "@" -- | | ordinary operator or type operator, -- | | e.g. f @ g, (f @) -- ----------+---------------+------------------------------------------ -- -- Also, some of these overrides are guarded behind language extensions. -- According to the specification, we must determine the occurrence based on -- surrounding *tokens* (see the proposal for the exact rules). However, in -- the implementation we cheat a little and do the classification based on -- characters, for reasons of both simplicity and efficiency (see -- 'followedByOpeningToken' and 'precededByClosingToken') -- -- When an operator is subject to a meaning override, it is mapped to special -- token: ITbang, ITtilde, ITat, ITdollar, ITdollardollar. Otherwise, it is -- returned as ITvarsym. -- -- For example, this is how we process the (!): -- -- precededByClosingToken | followedByOpeningToken | Token -- ------------------------+------------------------+------------- -- False | True | ITbang -- True | False | ITvarsym "!" -- True | True | ITvarsym "!" -- False | False | ITvarsym "!" -- ------------------------+------------------------+------------- -- -- And this is how we process the (@): -- -- precededByClosingToken | followedByOpeningToken | Token -- ------------------------+------------------------+------------- -- False | True | ITtypeApp -- True | False | parse error -- True | True | ITat -- False | False | ITvarsym "@" -- ------------------------+------------------------+------------- -- ----------------------------------------------------------------------------- -- Alex "Haskell code fragment bottom" { -- Operator whitespace occurrence. See Note [Whitespace-sensitive operator parsing]. data OpWs = OpWsPrefix -- a !b | OpWsSuffix -- a! b | OpWsTightInfix -- a!b | OpWsLooseInfix -- a ! b deriving Show -- ----------------------------------------------------------------------------- -- The token type data Token = ITas -- Haskell keywords | ITcase | ITclass | ITdata | ITdefault | ITderiving | ITdo (Maybe FastString) | ITelse | IThiding | ITforeign | ITif | ITimport | ITin | ITinfix | ITinfixl | ITinfixr | ITinstance | ITlet | ITmodule | ITnewtype | ITof | ITqualified | ITthen | ITtype | ITwhere | ITforall IsUnicodeSyntax -- GHC extension keywords | ITexport | ITlabel | ITdynamic | ITsafe | ITinterruptible | ITunsafe | ITstdcallconv | ITccallconv | ITcapiconv | ITprimcallconv | ITjavascriptcallconv | ITmdo (Maybe FastString) | ITfamily | ITrole | ITgroup | ITby | ITusing | ITpattern | ITstatic | ITstock | ITanyclass | ITvia -- Backpack tokens | ITunit | ITsignature | ITdependency | ITrequires -- Pragmas, see Note [Pragma source text] in "GHC.Types.SourceText" | ITinline_prag SourceText InlineSpec RuleMatchInfo | ITopaque_prag SourceText | ITspec_prag SourceText -- SPECIALISE | ITspec_inline_prag SourceText Bool -- SPECIALISE INLINE (or NOINLINE) | ITsource_prag SourceText | ITrules_prag SourceText | ITwarning_prag SourceText | ITdeprecated_prag SourceText | ITline_prag SourceText -- not usually produced, see 'UsePosPragsBit' | ITcolumn_prag SourceText -- not usually produced, see 'UsePosPragsBit' | ITscc_prag SourceText | ITunpack_prag SourceText | ITnounpack_prag SourceText | ITann_prag SourceText | ITcomplete_prag SourceText | ITclose_prag | IToptions_prag String | ITinclude_prag String | ITlanguage_prag | ITminimal_prag SourceText | IToverlappable_prag SourceText -- instance overlap mode | IToverlapping_prag SourceText -- instance overlap mode | IToverlaps_prag SourceText -- instance overlap mode | ITincoherent_prag SourceText -- instance overlap mode | ITctype SourceText | ITcomment_line_prag -- See Note [Nested comment line pragmas] | ITdotdot -- reserved symbols | ITcolon | ITdcolon IsUnicodeSyntax | ITequal | ITlam | ITlcase | ITlcases | ITvbar | ITlarrow IsUnicodeSyntax | ITrarrow IsUnicodeSyntax | ITdarrow IsUnicodeSyntax | ITlolly -- The (⊸) arrow (for LinearTypes) | ITminus -- See Note [Minus tokens] | ITprefixminus -- See Note [Minus tokens] | ITbang -- Prefix (!) only, e.g. f !x = rhs | ITtilde -- Prefix (~) only, e.g. f ~x = rhs | ITat -- Tight infix (@) only, e.g. f x@pat = rhs | ITtypeApp -- Prefix (@) only, e.g. f @t | ITpercent -- Prefix (%) only, e.g. a %1 -> b | ITstar IsUnicodeSyntax | ITdot | ITproj Bool -- Extension: OverloadedRecordDotBit | ITbiglam -- GHC-extension symbols | ITocurly -- special symbols | ITccurly | ITvocurly | ITvccurly | ITobrack | ITopabrack -- [:, for parallel arrays with -XParallelArrays | ITcpabrack -- :], for parallel arrays with -XParallelArrays | ITcbrack | IToparen | ITcparen | IToubxparen | ITcubxparen | ITsemi | ITcomma | ITunderscore | ITbackquote | ITsimpleQuote -- ' | ITvarid FastString -- identifiers | ITconid FastString | ITvarsym FastString | ITconsym FastString | ITqvarid (FastString,FastString) | ITqconid (FastString,FastString) | ITqvarsym (FastString,FastString) | ITqconsym (FastString,FastString) | ITdupipvarid FastString -- GHC extension: implicit param: ?x | ITlabelvarid SourceText FastString -- Overloaded label: #x -- The SourceText is required because we can -- have a string literal as a label -- Note [Literal source text] in "GHC.Types.SourceText" | ITchar SourceText Char -- Note [Literal source text] in "GHC.Types.SourceText" | ITstring SourceText FastString -- Note [Literal source text] in "GHC.Types.SourceText" | ITstringMulti SourceText FastString -- Note [Literal source text] in "GHC.Types.SourceText" | ITinteger IntegralLit -- Note [Literal source text] in "GHC.Types.SourceText" | ITrational FractionalLit | ITprimchar SourceText Char -- Note [Literal source text] in "GHC.Types.SourceText" | ITprimstring SourceText ByteString -- Note [Literal source text] in "GHC.Types.SourceText" | ITprimint SourceText Integer -- Note [Literal source text] in "GHC.Types.SourceText" | ITprimword SourceText Integer -- Note [Literal source text] in "GHC.Types.SourceText" | ITprimint8 SourceText Integer -- Note [Literal source text] in "GHC.Types.SourceText" | ITprimint16 SourceText Integer -- Note [Literal source text] in "GHC.Types.SourceText" | ITprimint32 SourceText Integer -- Note [Literal source text] in "GHC.Types.SourceText" | ITprimint64 SourceText Integer -- Note [Literal source text] in "GHC.Types.SourceText" | ITprimword8 SourceText Integer -- Note [Literal source text] in "GHC.Types.SourceText" | ITprimword16 SourceText Integer -- Note [Literal source text] in "GHC.Types.SourceText" | ITprimword32 SourceText Integer -- Note [Literal source text] in "GHC.Types.SourceText" | ITprimword64 SourceText Integer -- Note [Literal source text] in "GHC.Types.SourceText" | ITprimfloat FractionalLit | ITprimdouble FractionalLit -- Template Haskell extension tokens | ITopenExpQuote HasE IsUnicodeSyntax -- [| or [e| | ITopenPatQuote -- [p| | ITopenDecQuote -- [d| | ITopenTypQuote -- [t| | ITcloseQuote IsUnicodeSyntax -- |] | ITopenTExpQuote HasE -- [|| or [e|| | ITcloseTExpQuote -- ||] | ITdollar -- prefix $ | ITdollardollar -- prefix $$ | ITtyQuote -- '' | ITquasiQuote (FastString,FastString,PsSpan) -- ITquasiQuote(quoter, quote, loc) -- represents a quasi-quote of the form -- [quoter| quote |] | ITqQuasiQuote (FastString,FastString,FastString,PsSpan) -- ITqQuasiQuote(Qual, quoter, quote, loc) -- represents a qualified quasi-quote of the form -- [Qual.quoter| quote |] -- Arrow notation extension | ITproc | ITrec | IToparenbar IsUnicodeSyntax -- ^ @(|@ | ITcparenbar IsUnicodeSyntax -- ^ @|)@ | ITlarrowtail IsUnicodeSyntax -- ^ @-<@ | ITrarrowtail IsUnicodeSyntax -- ^ @>-@ | ITLarrowtail IsUnicodeSyntax -- ^ @-<<@ | ITRarrowtail IsUnicodeSyntax -- ^ @>>-@ | ITunknown String -- ^ Used when the lexer can't make sense of it | ITeof -- ^ end of file token -- Documentation annotations. See Note [PsSpan in Comments] | ITdocComment HsDocString PsSpan -- ^ The HsDocString contains more details about what -- this is and how to pretty print it | ITdocOptions String PsSpan -- ^ doc options (prune, ignore-exports, etc) | ITlineComment String PsSpan -- ^ comment starting by "--" | ITblockComment String PsSpan -- ^ comment in {- -} deriving Show instance Outputable Token where ppr x = text (show x) {- Note [PsSpan in Comments] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When using the Api Annotations to exact print a modified AST, managing the space before a comment is important. The PsSpan in the comment token allows this to happen, and this location is tracked in prev_loc in PState. This only tracks physical tokens, so is not updated for zero-width ones. We also use this to track the space before the end-of-file marker. -} {- Note [Minus tokens] ~~~~~~~~~~~~~~~~~~~~~~ A minus sign can be used in prefix form (-x) and infix form (a - b). When LexicalNegation is on: * ITprefixminus represents the prefix form * ITvarsym "-" represents the infix form * ITminus is not used When LexicalNegation is off: * ITminus represents all forms * ITprefixminus is not used * ITvarsym "-" is not used -} {- Note [Why not LexicalNegationBit] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ One might wonder why we define NoLexicalNegationBit instead of LexicalNegationBit. The problem lies in the following line in reservedSymsFM: ,("-", ITminus, NormalSyntax, xbit NoLexicalNegationBit) We want to generate ITminus only when LexicalNegation is off. How would one do it if we had LexicalNegationBit? I (int-index) tried to use bitwise complement: ,("-", ITminus, NormalSyntax, complement (xbit LexicalNegationBit)) This did not work, so I opted for NoLexicalNegationBit instead. -} -- the bitmap provided as the third component indicates whether the -- corresponding extension keyword is valid under the extension options -- provided to the compiler; if the extension corresponding to *any* of the -- bits set in the bitmap is enabled, the keyword is valid (this setup -- facilitates using a keyword in two different extensions that can be -- activated independently) -- reservedWordsFM :: UniqFM FastString (Token, ExtsBitmap) reservedWordsFM = listToUFM $ map (\(x, y, z) -> (mkFastString x, (y, z))) [( "_", ITunderscore, 0 ), ( "as", ITas, 0 ), ( "case", ITcase, 0 ), ( "cases", ITlcases, xbit LambdaCaseBit ), ( "class", ITclass, 0 ), ( "data", ITdata, 0 ), ( "default", ITdefault, 0 ), ( "deriving", ITderiving, 0 ), ( "do", ITdo Nothing, 0 ), ( "else", ITelse, 0 ), ( "hiding", IThiding, 0 ), ( "if", ITif, 0 ), ( "import", ITimport, 0 ), ( "in", ITin, 0 ), ( "infix", ITinfix, 0 ), ( "infixl", ITinfixl, 0 ), ( "infixr", ITinfixr, 0 ), ( "instance", ITinstance, 0 ), ( "let", ITlet, 0 ), ( "module", ITmodule, 0 ), ( "newtype", ITnewtype, 0 ), ( "of", ITof, 0 ), ( "qualified", ITqualified, 0 ), ( "then", ITthen, 0 ), ( "type", ITtype, 0 ), ( "where", ITwhere, 0 ), ( "forall", ITforall NormalSyntax, 0), ( "mdo", ITmdo Nothing, xbit RecursiveDoBit), -- See Note [Lexing type pseudo-keywords] ( "family", ITfamily, 0 ), ( "role", ITrole, 0 ), ( "pattern", ITpattern, xbit PatternSynonymsBit), ( "static", ITstatic, xbit StaticPointersBit ), ( "stock", ITstock, 0 ), ( "anyclass", ITanyclass, 0 ), ( "via", ITvia, 0 ), ( "group", ITgroup, xbit TransformComprehensionsBit), ( "by", ITby, xbit TransformComprehensionsBit), ( "using", ITusing, xbit TransformComprehensionsBit), ( "foreign", ITforeign, xbit FfiBit), ( "export", ITexport, xbit FfiBit), ( "label", ITlabel, xbit FfiBit), ( "dynamic", ITdynamic, xbit FfiBit), ( "safe", ITsafe, xbit FfiBit .|. xbit SafeHaskellBit), ( "interruptible", ITinterruptible, xbit InterruptibleFfiBit), ( "unsafe", ITunsafe, xbit FfiBit), ( "stdcall", ITstdcallconv, xbit FfiBit), ( "ccall", ITccallconv, xbit FfiBit), ( "capi", ITcapiconv, xbit CApiFfiBit), ( "prim", ITprimcallconv, xbit FfiBit), ( "javascript", ITjavascriptcallconv, xbit FfiBit), ( "unit", ITunit, 0 ), ( "dependency", ITdependency, 0 ), ( "signature", ITsignature, 0 ), ( "rec", ITrec, xbit ArrowsBit .|. xbit RecursiveDoBit), ( "proc", ITproc, xbit ArrowsBit) ] {----------------------------------- Note [Lexing type pseudo-keywords] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ One might think that we wish to treat 'family' and 'role' as regular old varids whenever -XTypeFamilies and -XRoleAnnotations are off, respectively. But, there is no need to do so. These pseudo-keywords are not stolen syntax: they are only used after the keyword 'type' at the top-level, where varids are not allowed. Furthermore, checks further downstream (GHC.Tc.TyCl) ensure that type families and role annotations are never declared without their extensions on. In fact, by unconditionally lexing these pseudo-keywords as special, we can get better error messages. Also, note that these are included in the `varid` production in the parser -- a key detail to make all this work. -------------------------------------} reservedSymsFM :: UniqFM FastString (Token, IsUnicodeSyntax, ExtsBitmap) reservedSymsFM = listToUFM $ map (\ (x,w,y,z) -> (mkFastString x,(w,y,z))) [ ("..", ITdotdot, NormalSyntax, 0 ) -- (:) is a reserved op, meaning only list cons ,(":", ITcolon, NormalSyntax, 0 ) ,("::", ITdcolon NormalSyntax, NormalSyntax, 0 ) ,("=", ITequal, NormalSyntax, 0 ) ,("\\", ITlam, NormalSyntax, 0 ) ,("|", ITvbar, NormalSyntax, 0 ) ,("<-", ITlarrow NormalSyntax, NormalSyntax, 0 ) ,("->", ITrarrow NormalSyntax, NormalSyntax, 0 ) ,("=>", ITdarrow NormalSyntax, NormalSyntax, 0 ) ,("-", ITminus, NormalSyntax, xbit NoLexicalNegationBit) ,("*", ITstar NormalSyntax, NormalSyntax, xbit StarIsTypeBit) ,("-<", ITlarrowtail NormalSyntax, NormalSyntax, xbit ArrowsBit) ,(">-", ITrarrowtail NormalSyntax, NormalSyntax, xbit ArrowsBit) ,("-<<", ITLarrowtail NormalSyntax, NormalSyntax, xbit ArrowsBit) ,(">>-", ITRarrowtail NormalSyntax, NormalSyntax, xbit ArrowsBit) ,("∷", ITdcolon UnicodeSyntax, UnicodeSyntax, 0 ) ,("⇒", ITdarrow UnicodeSyntax, UnicodeSyntax, 0 ) ,("∀", ITforall UnicodeSyntax, UnicodeSyntax, 0 ) ,("→", ITrarrow UnicodeSyntax, UnicodeSyntax, 0 ) ,("←", ITlarrow UnicodeSyntax, UnicodeSyntax, 0 ) ,("⊸", ITlolly, UnicodeSyntax, 0) ,("⤙", ITlarrowtail UnicodeSyntax, UnicodeSyntax, xbit ArrowsBit) ,("⤚", ITrarrowtail UnicodeSyntax, UnicodeSyntax, xbit ArrowsBit) ,("⤛", ITLarrowtail UnicodeSyntax, UnicodeSyntax, xbit ArrowsBit) ,("⤜", ITRarrowtail UnicodeSyntax, UnicodeSyntax, xbit ArrowsBit) ,("★", ITstar UnicodeSyntax, UnicodeSyntax, xbit StarIsTypeBit) -- ToDo: ideally, → and ∷ should be "specials", so that they cannot -- form part of a large operator. This would let us have a better -- syntax for kinds: ɑ∷*→* would be a legal kind signature. (maybe). ] -- ----------------------------------------------------------------------------- -- Lexer actions type Action = PsSpan -> StringBuffer -> Int -> StringBuffer -> P (PsLocated Token) special :: Token -> Action special tok span _buf _len _buf2 = return (L span tok) token, layout_token :: Token -> Action token t span _buf _len _buf2 = return (L span t) layout_token t span _buf _len _buf2 = pushLexState layout >> return (L span t) idtoken :: (StringBuffer -> Int -> Token) -> Action idtoken f span buf len _buf2 = return (L span $! (f buf len)) qdo_token :: (Maybe FastString -> Token) -> Action qdo_token con span buf len _buf2 = do maybe_layout token return (L span $! token) where !token = con $! Just $! fst $! splitQualName buf len False skip_one_varid :: (FastString -> Token) -> Action skip_one_varid f span buf len _buf2 = return (L span $! f (lexemeToFastString (stepOn buf) (len-1))) skip_one_varid_src :: (SourceText -> FastString -> Token) -> Action skip_one_varid_src f span buf len _buf2 = return (L span $! f (SourceText $ lexemeToFastString (stepOn buf) (len-1)) (lexemeToFastString (stepOn buf) (len-1))) skip_two_varid :: (FastString -> Token) -> Action skip_two_varid f span buf len _buf2 = return (L span $! f (lexemeToFastString (stepOn (stepOn buf)) (len-2))) strtoken :: (String -> Token) -> Action strtoken f span buf len _buf2 = return (L span $! (f $! lexemeToString buf len)) fstrtoken :: (FastString -> Token) -> Action fstrtoken f span buf len _buf2 = return (L span $! (f $! lexemeToFastString buf len)) begin :: Int -> Action begin code _span _str _len _buf2 = do pushLexState code; lexToken pop :: Action pop _span _buf _len _buf2 = do _ <- popLexState lexToken -- See Note [Nested comment line pragmas] failLinePrag1 :: Action failLinePrag1 span _buf _len _buf2 = do b <- getBit InNestedCommentBit if b then return (L span ITcomment_line_prag) else lexError LexErrorInPragma -- See Note [Nested comment line pragmas] popLinePrag1 :: Action popLinePrag1 span _buf _len _buf2 = do b <- getBit InNestedCommentBit if b then return (L span ITcomment_line_prag) else do _ <- popLexState lexToken hopefully_open_brace :: Action hopefully_open_brace span buf len buf2 = do relaxed <- getBit RelaxedLayoutBit ctx <- getContext (AI l _) <- getInput let offset = srcLocCol (psRealLoc l) isOK = relaxed || case ctx of Layout prev_off _ : _ -> prev_off < offset _ -> True if isOK then pop_and open_brace span buf len buf2 else addFatalError $ mkPlainErrorMsgEnvelope (mkSrcSpanPs span) PsErrMissingBlock pop_and :: Action -> Action pop_and act span buf len buf2 = do _ <- popLexState act span buf len buf2 -- See Note [Whitespace-sensitive operator parsing] followedByOpeningToken, precededByClosingToken :: AlexAccPred ExtsBitmap followedByOpeningToken _ _ _ (AI _ buf) = followedByOpeningToken' buf precededByClosingToken _ (AI _ buf) _ _ = precededByClosingToken' buf -- The input is the buffer *after* the token. followedByOpeningToken' :: StringBuffer -> Bool followedByOpeningToken' buf | atEnd buf = False | otherwise = case nextChar buf of ('{', buf') -> nextCharIsNot buf' (== '-') ('(', _) -> True ('[', _) -> True ('\"', _) -> True ('\'', _) -> True ('_', _) -> True ('⟦', _) -> True ('⦇', _) -> True (c, _) -> isAlphaNum c -- The input is the buffer *before* the token. precededByClosingToken' :: StringBuffer -> Bool precededByClosingToken' buf = case prevChar buf '\n' of '}' -> decodePrevNChars 1 buf /= "-" ')' -> True ']' -> True '\"' -> True '\'' -> True '_' -> True '⟧' -> True '⦈' -> True c -> isAlphaNum c get_op_ws :: StringBuffer -> StringBuffer -> OpWs get_op_ws buf1 buf2 = mk_op_ws (precededByClosingToken' buf1) (followedByOpeningToken' buf2) where mk_op_ws False True = OpWsPrefix mk_op_ws True False = OpWsSuffix mk_op_ws True True = OpWsTightInfix mk_op_ws False False = OpWsLooseInfix {-# INLINE with_op_ws #-} with_op_ws :: (OpWs -> Action) -> Action with_op_ws act span buf len buf2 = act (get_op_ws buf buf2) span buf len buf2 {-# INLINE nextCharIs #-} nextCharIs :: StringBuffer -> (Char -> Bool) -> Bool nextCharIs buf p = not (atEnd buf) && p (currentChar buf) {-# INLINE nextCharIsNot #-} nextCharIsNot :: StringBuffer -> (Char -> Bool) -> Bool nextCharIsNot buf p = not (nextCharIs buf p) notFollowedBy :: Char -> AlexAccPred ExtsBitmap notFollowedBy char _ _ _ (AI _ buf) = nextCharIsNot buf (== char) notFollowedBySymbol :: AlexAccPred ExtsBitmap notFollowedBySymbol _ _ _ (AI _ buf) = nextCharIsNot buf (`elem` "!#$%&*+./<=>?@\\^|-~") followedByDigit :: AlexAccPred ExtsBitmap followedByDigit _ _ _ (AI _ buf) = afterOptionalSpace buf (\b -> nextCharIs b (`elem` ['0'..'9'])) ifCurrentChar :: Char -> AlexAccPred ExtsBitmap ifCurrentChar char _ (AI _ buf) _ _ = nextCharIs buf (== char) -- We must reject doc comments as being ordinary comments everywhere. -- In some cases the doc comment will be selected as the lexeme due to -- maximal munch, but not always, because the nested comment rule is -- valid in all states, but the doc-comment rules are only valid in -- the non-layout states. isNormalComment :: AlexAccPred ExtsBitmap isNormalComment bits _ _ (AI _ buf) | HaddockBit `xtest` bits = notFollowedByDocOrPragma | otherwise = nextCharIsNot buf (== '#') where notFollowedByDocOrPragma = afterOptionalSpace buf (\b -> nextCharIsNot b (`elem` "|^*$#")) afterOptionalSpace :: StringBuffer -> (StringBuffer -> Bool) -> Bool afterOptionalSpace buf p = if nextCharIs buf (== ' ') then p (snd (nextChar buf)) else p buf atEOL :: AlexAccPred ExtsBitmap atEOL _ _ _ (AI _ buf) = atEnd buf || currentChar buf == '\n' -- Check if we should parse a negative literal (e.g. -123) as a single token. negLitPred :: AlexAccPred ExtsBitmap negLitPred = prefix_minus `alexAndPred` (negative_literals `alexOrPred` lexical_negation) where negative_literals = ifExtension NegativeLiteralsBit lexical_negation = -- See Note [Why not LexicalNegationBit] alexNotPred (ifExtension NoLexicalNegationBit) prefix_minus = -- Note [prefix_minus in negLitPred and negHashLitPred] alexNotPred precededByClosingToken -- Check if we should parse an unboxed negative literal (e.g. -123#) as a single token. negHashLitPred :: ExtBits -> AlexAccPred ExtsBitmap negHashLitPred ext = prefix_minus `alexAndPred` magic_hash where magic_hash = ifExtension ext -- Either MagicHashBit or ExtendedLiteralsBit prefix_minus = -- Note [prefix_minus in negLitPred and negHashLitPred] alexNotPred precededByClosingToken {- Note [prefix_minus in negLitPred and negHashLitPred] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We want to parse -1 as a single token, but x-1 as three tokens. So in negLitPred (and negHashLitPred) we require that we have a prefix occurrence of the minus sign. See Note [Whitespace-sensitive operator parsing] for a detailed definition of a prefix occurrence. The condition for a prefix occurrence of an operator is: not precededByClosingToken && followedByOpeningToken but we don't check followedByOpeningToken when parsing a negative literal. It holds simply because we immediately lex a literal after the minus. -} ifExtension :: ExtBits -> AlexAccPred ExtsBitmap ifExtension extBits bits _ _ _ = extBits `xtest` bits alexNotPred p userState in1 len in2 = not (p userState in1 len in2) alexOrPred p1 p2 userState in1 len in2 = p1 userState in1 len in2 || p2 userState in1 len in2 multiline_doc_comment :: Action multiline_doc_comment span buf _len _buf2 = {-# SCC "multiline_doc_comment" #-} withLexedDocType worker where worker input@(AI start_loc _) docType checkNextLine = go start_loc "" [] input where go start_loc curLine prevLines input@(AI end_loc _) = case alexGetChar' input of Just ('\n', input') | checkNextLine -> case checkIfCommentLine input' of Just input@(AI next_start _) -> go next_start "" (locatedLine : prevLines) input -- Start a new line Nothing -> endComment | otherwise -> endComment Just (c, input) -> go start_loc (c:curLine) prevLines input Nothing -> endComment where lineSpan = mkSrcSpanPs $ mkPsSpan start_loc end_loc locatedLine = L lineSpan (mkHsDocStringChunk $ reverse curLine) commentLines = NE.reverse $ locatedLine :| prevLines endComment = docCommentEnd input (docType (\dec -> MultiLineDocString dec commentLines)) buf span -- Check if the next line of input belongs to this doc comment as well. -- A doc comment continues onto the next line when the following -- conditions are met: -- * The line starts with "--" -- * The line doesn't start with "---". -- * The line doesn't start with "-- $", because that would be the -- start of a /new/ named haddock chunk (#10398). checkIfCommentLine :: AlexInput -> Maybe AlexInput checkIfCommentLine input = check (dropNonNewlineSpace input) where check input = do ('-', input) <- alexGetChar' input ('-', input) <- alexGetChar' input (c, after_c) <- alexGetChar' input case c of '-' -> Nothing ' ' -> case alexGetChar' after_c of Just ('$', _) -> Nothing _ -> Just input _ -> Just input dropNonNewlineSpace input = case alexGetChar' input of Just (c, input') | isSpace c && c /= '\n' -> dropNonNewlineSpace input' | otherwise -> input Nothing -> input lineCommentToken :: Action lineCommentToken span buf len buf2 = do b <- getBit RawTokenStreamBit if b then do lt <- getLastLocIncludingComments strtoken (\s -> ITlineComment s lt) span buf len buf2 else lexToken {- nested comments require traversing by hand, they can't be parsed using regular expressions. -} nested_comment :: Action nested_comment span buf len _buf2 = {-# SCC "nested_comment" #-} do l <- getLastLocIncludingComments let endComment input (L _ comment) = commentEnd lexToken input (Nothing, ITblockComment comment l) buf span input <- getInput -- Include decorator in comment let start_decorator = reverse $ lexemeToString buf len nested_comment_logic endComment start_decorator input span nested_doc_comment :: Action nested_doc_comment span buf _len _buf2 = {-# SCC "nested_doc_comment" #-} withLexedDocType worker where worker input@(AI start_loc _) docType _checkNextLine = nested_comment_logic endComment "" input (mkPsSpan start_loc (psSpanEnd span)) where endComment input lcomment = docCommentEnd input (docType (\d -> NestedDocString d (mkHsDocStringChunk . dropTrailingDec <$> lcomment))) buf span dropTrailingDec [] = [] dropTrailingDec "-}" = "" dropTrailingDec (x:xs) = x:dropTrailingDec xs {-# INLINE nested_comment_logic #-} -- | Includes the trailing '-}' decorators -- drop the last two elements with the callback if you don't want them to be included nested_comment_logic :: (AlexInput -> Located String -> P (PsLocated Token)) -- ^ Continuation that gets the rest of the input and the lexed comment -> String -- ^ starting value for accumulator (reversed) - When we want to include a decorator '{-' in the comment -> AlexInput -> PsSpan -> P (PsLocated Token) nested_comment_logic endComment commentAcc input span = go commentAcc (1::Int) input where go commentAcc 0 input@(AI end_loc _) = do let comment = reverse commentAcc cspan = mkSrcSpanPs $ mkPsSpan (psSpanStart span) end_loc lcomment = L cspan comment endComment input lcomment go commentAcc n input = case alexGetChar' input of Nothing -> errBrace input (psRealSpan span) Just ('-',input) -> case alexGetChar' input of Nothing -> errBrace input (psRealSpan span) Just ('\125',input) -> go ('\125':'-':commentAcc) (n-1) input -- '}' Just (_,_) -> go ('-':commentAcc) n input Just ('\123',input) -> case alexGetChar' input of -- '{' char Nothing -> errBrace input (psRealSpan span) Just ('-',input) -> go ('-':'\123':commentAcc) (n+1) input Just (_,_) -> go ('\123':commentAcc) n input -- See Note [Nested comment line pragmas] Just ('\n',input) -> case alexGetChar' input of Nothing -> errBrace input (psRealSpan span) Just ('#',_) -> do (parsedAcc,input) <- parseNestedPragma input go (parsedAcc ++ '\n':commentAcc) n input Just (_,_) -> go ('\n':commentAcc) n input Just (c,input) -> go (c:commentAcc) n input -- See Note [Nested comment line pragmas] parseNestedPragma :: AlexInput -> P (String,AlexInput) parseNestedPragma input@(AI _ buf) = do origInput <- getInput setInput input setExts (.|. xbit InNestedCommentBit) pushLexState bol lt <- lexToken _ <- popLexState setExts (.&. complement (xbit InNestedCommentBit)) postInput@(AI _ postBuf) <- getInput setInput origInput case unLoc lt of ITcomment_line_prag -> do let bytes = byteDiff buf postBuf diff = lexemeToString buf bytes return (reverse diff, postInput) lt' -> panic ("parseNestedPragma: unexpected token" ++ (show lt')) {- Note [Nested comment line pragmas] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We used to ignore cpp-preprocessor-generated #line pragmas if they were inside nested comments. Now, when parsing a nested comment, if we encounter a line starting with '#' we call parseNestedPragma, which executes the following: 1. Save the current lexer input (loc, buf) for later 2. Set the current lexer input to the beginning of the line starting with '#' 3. Turn the 'InNestedComment' extension on 4. Push the 'bol' lexer state 5. Lex a token. Due to (2), (3), and (4), this should always lex a single line or less and return the ITcomment_line_prag token. This may set source line and file location if a #line pragma is successfully parsed 6. Restore lexer input and state to what they were before we did all this 7. Return control to the function parsing a nested comment, informing it of what the lexer parsed Regarding (5) above: Every exit from the 'bol' lexer state (do_bol, popLinePrag1, failLinePrag1) checks if the 'InNestedComment' extension is set. If it is, that function will return control to parseNestedPragma by returning the ITcomment_line_prag token. See #314 for more background on the bug this fixes. -} {-# INLINE withLexedDocType #-} withLexedDocType :: (AlexInput -> ((HsDocStringDecorator -> HsDocString) -> (HdkComment, Token)) -> Bool -> P (PsLocated Token)) -> P (PsLocated Token) withLexedDocType lexDocComment = do input@(AI _ buf) <- getInput l <- getLastLocIncludingComments case prevChar buf ' ' of -- The `Bool` argument to lexDocComment signals whether or not the next -- line of input might also belong to this doc comment. '|' -> lexDocComment input (mkHdkCommentNext l) True '^' -> lexDocComment input (mkHdkCommentPrev l) True '$' -> case lexDocName input of Nothing -> do setInput input; lexToken -- eof reached, lex it normally Just (name, input) -> lexDocComment input (mkHdkCommentNamed l name) True '*' -> lexDocSection l 1 input _ -> panic "withLexedDocType: Bad doc type" where lexDocSection l n input = case alexGetChar' input of Just ('*', input) -> lexDocSection l (n+1) input Just (_, _) -> lexDocComment input (mkHdkCommentSection l n) False Nothing -> do setInput input; lexToken -- eof reached, lex it normally lexDocName :: AlexInput -> Maybe (String, AlexInput) lexDocName = go "" where go acc input = case alexGetChar' input of Just (c, input') | isSpace c -> Just (reverse acc, input) | otherwise -> go (c:acc) input' Nothing -> Nothing mkHdkCommentNext, mkHdkCommentPrev :: PsSpan -> (HsDocStringDecorator -> HsDocString) -> (HdkComment, Token) mkHdkCommentNext loc mkDS = (HdkCommentNext ds,ITdocComment ds loc) where ds = mkDS HsDocStringNext mkHdkCommentPrev loc mkDS = (HdkCommentPrev ds,ITdocComment ds loc) where ds = mkDS HsDocStringPrevious mkHdkCommentNamed :: PsSpan -> String -> (HsDocStringDecorator -> HsDocString) -> (HdkComment, Token) mkHdkCommentNamed loc name mkDS = (HdkCommentNamed name ds, ITdocComment ds loc) where ds = mkDS (HsDocStringNamed name) mkHdkCommentSection :: PsSpan -> Int -> (HsDocStringDecorator -> HsDocString) -> (HdkComment, Token) mkHdkCommentSection loc n mkDS = (HdkCommentSection n ds, ITdocComment ds loc) where ds = mkDS (HsDocStringGroup n) -- RULES pragmas turn on the forall and '.' keywords, and we turn them -- off again at the end of the pragma. rulePrag :: Action rulePrag span buf len _buf2 = do setExts (.|. xbit InRulePragBit) let !src = lexemeToFastString buf len return (L span (ITrules_prag (SourceText src))) -- When 'UsePosPragsBit' is not set, it is expected that we emit a token instead -- of updating the position in 'PState' linePrag :: Action linePrag span buf len buf2 = do usePosPrags <- getBit UsePosPragsBit if usePosPrags then begin line_prag2 span buf len buf2 else let !src = lexemeToFastString buf len in return (L span (ITline_prag (SourceText src))) -- When 'UsePosPragsBit' is not set, it is expected that we emit a token instead -- of updating the position in 'PState' columnPrag :: Action columnPrag span buf len buf2 = do usePosPrags <- getBit UsePosPragsBit if usePosPrags then begin column_prag span buf len buf2 else let !src = lexemeToFastString buf len in return (L span (ITcolumn_prag (SourceText src))) endPrag :: Action endPrag span _buf _len _buf2 = do setExts (.&. complement (xbit InRulePragBit)) return (L span ITclose_prag) -- docCommentEnd ------------------------------------------------------------------------------- -- This function is quite tricky. We can't just return a new token, we also -- need to update the state of the parser. Why? Because the token is longer -- than what was lexed by Alex, and the lexToken function doesn't know this, so -- it writes the wrong token length to the parser state. This function is -- called afterwards, so it can just update the state. {-# INLINE commentEnd #-} commentEnd :: P (PsLocated Token) -> AlexInput -> (Maybe HdkComment, Token) -> StringBuffer -> PsSpan -> P (PsLocated Token) commentEnd cont input (m_hdk_comment, hdk_token) buf span = do setInput input let (AI loc nextBuf) = input span' = mkPsSpan (psSpanStart span) loc last_len = byteDiff buf nextBuf span `seq` setLastToken span' last_len whenIsJust m_hdk_comment $ \hdk_comment -> P $ \s -> POk (s {hdk_comments = hdk_comments s `snocOL` L span' hdk_comment}) () b <- getBit RawTokenStreamBit if b then return (L span' hdk_token) else cont {-# INLINE docCommentEnd #-} docCommentEnd :: AlexInput -> (HdkComment, Token) -> StringBuffer -> PsSpan -> P (PsLocated Token) docCommentEnd input (hdk_comment, tok) buf span = commentEnd lexToken input (Just hdk_comment, tok) buf span errBrace :: AlexInput -> RealSrcSpan -> P a errBrace (AI end _) span = failLocMsgP (realSrcSpanStart span) (psRealLoc end) (\srcLoc -> mkPlainErrorMsgEnvelope srcLoc (PsErrLexer LexUnterminatedComment LexErrKind_EOF)) open_brace, close_brace :: Action open_brace span _str _len _buf2 = do ctx <- getContext setContext (NoLayout:ctx) return (L span ITocurly) close_brace span _str _len _buf2 = do popContext return (L span ITccurly) qvarid, qconid :: StringBuffer -> Int -> Token qvarid buf len = ITqvarid $! splitQualName buf len False qconid buf len = ITqconid $! splitQualName buf len False splitQualName :: StringBuffer -> Int -> Bool -> (FastString,FastString) -- takes a StringBuffer and a length, and returns the module name -- and identifier parts of a qualified name. Splits at the *last* dot, -- because of hierarchical module names. -- -- Throws an error if the name is not qualified. splitQualName orig_buf len parens = split orig_buf orig_buf where split buf dot_buf | orig_buf `byteDiff` buf >= len = done dot_buf | c == '.' = found_dot buf' | otherwise = split buf' dot_buf where (c,buf') = nextChar buf -- careful, we might get names like M.... -- so, if the character after the dot is not upper-case, this is -- the end of the qualifier part. found_dot buf -- buf points after the '.' | isUpper c = split buf' buf | otherwise = done buf where (c,buf') = nextChar buf done dot_buf | qual_size < 1 = error "splitQualName got an unqualified named" | otherwise = (lexemeToFastString orig_buf (qual_size - 1), if parens -- Prelude.(+) then lexemeToFastString (stepOn dot_buf) (len - qual_size - 2) else lexemeToFastString dot_buf (len - qual_size)) where qual_size = orig_buf `byteDiff` dot_buf varid :: Action varid span buf len _buf2 = case lookupUFM reservedWordsFM fs of Just (ITcase, _) -> do lastTk <- getLastTk keyword <- case lastTk of Strict.Just (L _ ITlam) -> do lambdaCase <- getBit LambdaCaseBit unless lambdaCase $ do pState <- getPState addError $ mkPlainErrorMsgEnvelope (mkSrcSpanPs (last_loc pState)) PsErrLambdaCase return ITlcase _ -> return ITcase maybe_layout keyword return $ L span keyword Just (ITlcases, _) -> do lastTk <- getLastTk lambdaCase <- getBit LambdaCaseBit token <- case lastTk of Strict.Just (L _ ITlam) | lambdaCase -> return ITlcases _ -> return $ ITvarid fs maybe_layout token return $ L span token Just (keyword, 0) -> do maybe_layout keyword return $ L span keyword Just (keyword, i) -> do exts <- getExts if exts .&. i /= 0 then do maybe_layout keyword return $ L span keyword else return $ L span $ ITvarid fs Nothing -> return $ L span $ ITvarid fs where !fs = lexemeToFastString buf len conid :: StringBuffer -> Int -> Token conid buf len = ITconid $! lexemeToFastString buf len qvarsym, qconsym :: StringBuffer -> Int -> Token qvarsym buf len = ITqvarsym $! splitQualName buf len False qconsym buf len = ITqconsym $! splitQualName buf len False errSuffixAt :: PsSpan -> P a errSuffixAt span = do input <- getInput failLocMsgP start (go input start) (\srcSpan -> mkPlainErrorMsgEnvelope srcSpan $ PsErrSuffixAT) where start = psRealLoc (psSpanStart span) go inp loc | Just (c, i) <- alexGetChar inp , let next = advanceSrcLoc loc c = if c == ' ' then go i next else next | otherwise = loc -- See Note [Whitespace-sensitive operator parsing] varsym :: OpWs -> Action varsym opws@OpWsPrefix = sym $ \span exts s -> let warnExtConflict errtok = do { addPsMessage (mkSrcSpanPs span) (PsWarnOperatorWhitespaceExtConflict errtok) ; return (ITvarsym s) } in if | s == fsLit "@" -> return ITtypeApp -- regardless of TypeApplications for better error messages | s == fsLit "%" -> if xtest LinearTypesBit exts then return ITpercent else warnExtConflict OperatorWhitespaceSymbol_PrefixPercent | s == fsLit "$" -> if xtest ThQuotesBit exts then return ITdollar else warnExtConflict OperatorWhitespaceSymbol_PrefixDollar | s == fsLit "$$" -> if xtest ThQuotesBit exts then return ITdollardollar else warnExtConflict OperatorWhitespaceSymbol_PrefixDollarDollar | s == fsLit "-" -> return ITprefixminus -- Only when LexicalNegation is on, otherwise we get ITminus -- and don't hit this code path. See Note [Minus tokens] | s == fsLit ".", OverloadedRecordDotBit `xtest` exts -> return (ITproj True) -- e.g. '(.x)' | s == fsLit "." -> return ITdot | s == fsLit "!" -> return ITbang | s == fsLit "~" -> return ITtilde | otherwise -> do { warnOperatorWhitespace opws span s ; return (ITvarsym s) } varsym opws@OpWsSuffix = sym $ \span _ s -> if | s == fsLit "@" -> errSuffixAt span | s == fsLit "." -> return ITdot | otherwise -> do { warnOperatorWhitespace opws span s ; return (ITvarsym s) } varsym opws@OpWsTightInfix = sym $ \span exts s -> if | s == fsLit "@" -> return ITat | s == fsLit ".", OverloadedRecordDotBit `xtest` exts -> return (ITproj False) | s == fsLit "." -> return ITdot | otherwise -> do { warnOperatorWhitespace opws span s ; return (ITvarsym s) } varsym OpWsLooseInfix = sym $ \_ _ s -> if | s == fsLit "." -> return ITdot | otherwise -> return $ ITvarsym s consym :: OpWs -> Action consym opws = sym $ \span _exts s -> do { warnOperatorWhitespace opws span s ; return (ITconsym s) } warnOperatorWhitespace :: OpWs -> PsSpan -> FastString -> P () warnOperatorWhitespace opws span s = whenIsJust (check_unusual_opws opws) $ \opws' -> addPsMessage (mkSrcSpanPs span) (PsWarnOperatorWhitespace s opws') -- Check an operator occurrence for unusual whitespace (prefix, suffix, tight infix). -- This determines if -Woperator-whitespace is triggered. check_unusual_opws :: OpWs -> Maybe OperatorWhitespaceOccurrence check_unusual_opws opws = case opws of OpWsPrefix -> Just OperatorWhitespaceOccurrence_Prefix OpWsSuffix -> Just OperatorWhitespaceOccurrence_Suffix OpWsTightInfix -> Just OperatorWhitespaceOccurrence_TightInfix OpWsLooseInfix -> Nothing sym :: (PsSpan -> ExtsBitmap -> FastString -> P Token) -> Action sym con span buf len _buf2 = case lookupUFM reservedSymsFM fs of Just (keyword, NormalSyntax, 0) -> return $ L span keyword Just (keyword, NormalSyntax, i) -> do exts <- getExts if exts .&. i /= 0 then return $ L span keyword else L span <$!> con span exts fs Just (keyword, UnicodeSyntax, 0) -> do exts <- getExts if xtest UnicodeSyntaxBit exts then return $ L span keyword else L span <$!> con span exts fs Just (keyword, UnicodeSyntax, i) -> do exts <- getExts if exts .&. i /= 0 && xtest UnicodeSyntaxBit exts then return $ L span keyword else L span <$!> con span exts fs Nothing -> do exts <- getExts L span <$!> con span exts fs where !fs = lexemeToFastString buf len -- Variations on the integral numeric literal. tok_integral :: (SourceText -> Integer -> Token) -- ^ token constructor -> (Integer -> Integer) -- ^ value transformation (e.g. negate) -> Int -- ^ Offset of the unsigned value (e.g. 1 when we parsed "-", 2 for "0x", etc.) -> Int -- ^ Number of non-numeric characters parsed (e.g. 6 in "-12#Int8") -> (Integer, (Char -> Int)) -- ^ (radix, char_to_int parsing function) -> Action tok_integral mk_token transval offset translen (radix,char_to_int) span buf len _buf2 = do numericUnderscores <- getBit NumericUnderscoresBit -- #14473 let src = lexemeToFastString buf len when ((not numericUnderscores) && ('_' `elem` unpackFS src)) $ do pState <- getPState let msg = PsErrNumUnderscores NumUnderscore_Integral addError $ mkPlainErrorMsgEnvelope (mkSrcSpanPs (last_loc pState)) msg return $ L span $ mk_token (SourceText src) $! transval $ parseUnsignedInteger (offsetBytes offset buf) (subtract translen len) radix char_to_int -- | Helper to parse ExtendedLiterals (e.g. -0x10#Word32) -- -- This function finds the offset of the "#" character and checks that the -- suffix is valid. Then it calls tok_integral with the appropriate suffix -- length taken into account. tok_prim_num_ext :: (Integer -> Integer) -- ^ value transformation (e.g. negate) -> Int -- ^ Offset of the unsigned value (e.g. 1 when we parsed "-", 2 for "0x", etc.) -> (Integer, (Char -> Int)) -- ^ (radix, char_to_int parsing function) -> Action tok_prim_num_ext transval offset (radix,char_to_int) span buf len buf2 = do let !suffix_offset = findHashOffset buf + 1 let !suffix_len = len - suffix_offset let !suffix = lexemeToFastString (offsetBytes suffix_offset buf) suffix_len mk_token <- if | suffix == fsLit "Word" -> pure ITprimword | suffix == fsLit "Word8" -> pure ITprimword8 | suffix == fsLit "Word16" -> pure ITprimword16 | suffix == fsLit "Word32" -> pure ITprimword32 | suffix == fsLit "Word64" -> pure ITprimword64 | suffix == fsLit "Int" -> pure ITprimint | suffix == fsLit "Int8" -> pure ITprimint8 | suffix == fsLit "Int16" -> pure ITprimint16 | suffix == fsLit "Int32" -> pure ITprimint32 | suffix == fsLit "Int64" -> pure ITprimint64 | otherwise -> srcParseFail let !translen = suffix_len+offset+1 tok_integral mk_token transval offset translen (radix,char_to_int) span buf len buf2 tok_num :: (Integer -> Integer) -> Int -> Int -> (Integer, (Char->Int)) -> Action tok_num = tok_integral $ \case st@(SourceText (unconsFS -> Just ('-',_))) -> itint st (const True) st@(SourceText _) -> itint st (const False) st@NoSourceText -> itint st (< 0) where itint :: SourceText -> (Integer -> Bool) -> Integer -> Token itint !st is_negative !val = ITinteger ((IL st $! is_negative val) val) tok_primint :: (Integer -> Integer) -> Int -> Int -> (Integer, (Char->Int)) -> Action tok_primint = tok_integral ITprimint tok_primword :: Int -> Int -> (Integer, (Char->Int)) -> Action tok_primword = tok_integral ITprimword positive positive, negative :: (Integer -> Integer) positive = id negative = negate binary, octal, decimal, hexadecimal :: (Integer, Char -> Int) binary = (2,octDecDigit) octal = (8,octDecDigit) decimal = (10,octDecDigit) hexadecimal = (16,hexDigit) -- readSignificandExponentPair can understand negative rationals, exponents, everything. tok_frac :: Int -> (String -> Token) -> Action tok_frac drop f span buf len _buf2 = do numericUnderscores <- getBit NumericUnderscoresBit -- #14473 let src = lexemeToString buf (len-drop) when ((not numericUnderscores) && ('_' `elem` src)) $ do pState <- getPState let msg = PsErrNumUnderscores NumUnderscore_Float addError $ mkPlainErrorMsgEnvelope (mkSrcSpanPs (last_loc pState)) msg return (L span $! (f $! src)) tok_float, tok_primfloat, tok_primdouble, tok_prim_hex_float, tok_prim_hex_double :: String -> Token tok_float str = ITrational $! readFractionalLit str tok_hex_float str = ITrational $! readHexFractionalLit str tok_primfloat str = ITprimfloat $! readFractionalLit str tok_primdouble str = ITprimdouble $! readFractionalLit str tok_prim_hex_float str = ITprimfloat $! readHexFractionalLit str tok_prim_hex_double str = ITprimdouble $! readHexFractionalLit str readFractionalLit, readHexFractionalLit :: String -> FractionalLit readHexFractionalLit = readFractionalLitX readHexSignificandExponentPair Base2 readFractionalLit = readFractionalLitX readSignificandExponentPair Base10 readFractionalLitX :: (String -> (Integer, Integer)) -> FractionalExponentBase -> String -> FractionalLit readFractionalLitX readStr b str = mkSourceFractionalLit str is_neg i e b where is_neg = case str of '-' : _ -> True _ -> False (i, e) = readStr str -- ----------------------------------------------------------------------------- -- Layout processing -- we're at the first token on a line, insert layout tokens if necessary do_bol :: Action do_bol span _str _len _buf2 = do -- See Note [Nested comment line pragmas] b <- getBit InNestedCommentBit if b then return (L span ITcomment_line_prag) else do (pos, gen_semic) <- getOffside case pos of LT -> do --trace "layout: inserting '}'" $ do popContext -- do NOT pop the lex state, we might have a ';' to insert return (L span ITvccurly) EQ | gen_semic -> do --trace "layout: inserting ';'" $ do _ <- popLexState return (L span ITsemi) _ -> do _ <- popLexState lexToken -- certain keywords put us in the "layout" state, where we might -- add an opening curly brace. maybe_layout :: Token -> P () maybe_layout t = do -- If the alternative layout rule is enabled then -- we never create an implicit layout context here. -- Layout is handled XXX instead. -- The code for closing implicit contexts, or -- inserting implicit semi-colons, is therefore -- irrelevant as it only applies in an implicit -- context. alr <- getBit AlternativeLayoutRuleBit unless alr $ f t where f (ITdo _) = pushLexState layout_do f (ITmdo _) = pushLexState layout_do f ITof = pushLexState layout f ITlcase = pushLexState layout f ITlcases = pushLexState layout f ITlet = pushLexState layout f ITwhere = pushLexState layout f ITrec = pushLexState layout f ITif = pushLexState layout_if f _ = return () -- Pushing a new implicit layout context. If the indentation of the -- next token is not greater than the previous layout context, then -- Haskell 98 says that the new layout context should be empty; that is -- the lexer must generate {}. -- -- We are slightly more lenient than this: when the new context is started -- by a 'do', then we allow the new context to be at the same indentation as -- the previous context. This is what the 'strict' argument is for. new_layout_context :: Bool -> Bool -> Token -> Action new_layout_context strict gen_semic tok span _buf len _buf2 = do _ <- popLexState (AI l _) <- getInput let offset = srcLocCol (psRealLoc l) - len ctx <- getContext nondecreasing <- getBit NondecreasingIndentationBit let strict' = strict || not nondecreasing case ctx of Layout prev_off _ : _ | (strict' && prev_off >= offset || not strict' && prev_off > offset) -> do -- token is indented to the left of the previous context. -- we must generate a {} sequence now. pushLexState layout_left return (L span tok) _ -> do setContext (Layout offset gen_semic : ctx) return (L span tok) do_layout_left :: Action do_layout_left span _buf _len _buf2 = do _ <- popLexState pushLexState bol -- we must be at the start of a line return (L span ITvccurly) -- ----------------------------------------------------------------------------- -- LINE pragmas setLineAndFile :: Int -> Action setLineAndFile code (PsSpan span _) buf len _buf2 = do let src = lexemeToString buf (len - 1) -- drop trailing quotation mark linenumLen = length $ head $ words src linenum = parseUnsignedInteger buf linenumLen 10 octDecDigit file = mkFastString $ go $ drop 1 $ dropWhile (/= '"') src -- skip everything through first quotation mark to get to the filename where go ('\\':c:cs) = c : go cs go (c:cs) = c : go cs go [] = [] -- decode escapes in the filename. e.g. on Windows -- when our filenames have backslashes in, gcc seems to -- escape the backslashes. One symptom of not doing this -- is that filenames in error messages look a bit strange: -- C:\\foo\bar.hs -- only the first backslash is doubled, because we apply -- System.FilePath.normalise before printing out -- filenames and it does not remove duplicate -- backslashes after the drive letter (should it?). resetAlrLastLoc file setSrcLoc (mkRealSrcLoc file (fromIntegral linenum - 1) (srcSpanEndCol span)) -- subtract one: the line number refers to the *following* line addSrcFile file _ <- popLexState pushLexState code lexToken setColumn :: Action setColumn (PsSpan span _) buf len _buf2 = do let column = case reads (lexemeToString buf len) of [(column, _)] -> column _ -> error "setColumn: expected integer" -- shouldn't happen setSrcLoc (mkRealSrcLoc (srcSpanFile span) (srcSpanEndLine span) (fromIntegral (column :: Integer))) _ <- popLexState lexToken alrInitialLoc :: FastString -> RealSrcSpan alrInitialLoc file = mkRealSrcSpan loc loc where -- This is a hack to ensure that the first line in a file -- looks like it is after the initial location: loc = mkRealSrcLoc file (-1) (-1) -- ----------------------------------------------------------------------------- -- Options, includes and language pragmas. lex_string_prag :: (String -> Token) -> Action lex_string_prag mkTok = lex_string_prag_comment mkTok' where mkTok' s _ = mkTok s lex_string_prag_comment :: (String -> PsSpan -> Token) -> Action lex_string_prag_comment mkTok span _buf _len _buf2 = do input <- getInput start <- getParsedLoc l <- getLastLocIncludingComments tok <- go l [] input end <- getParsedLoc return (L (mkPsSpan start end) tok) where go l acc input = if isString input "#-}" then do setInput input return (mkTok (reverse acc) l) else case alexGetChar input of Just (c,i) -> go l (c:acc) i Nothing -> err input isString _ [] = True isString i (x:xs) = case alexGetChar i of Just (c,i') | c == x -> isString i' xs _other -> False err (AI end _) = failLocMsgP (realSrcSpanStart (psRealSpan span)) (psRealLoc end) (\srcLoc -> mkPlainErrorMsgEnvelope srcLoc $ PsErrLexer LexUnterminatedOptions LexErrKind_EOF) -- ----------------------------------------------------------------------------- -- Strings & Chars tok_string :: Action tok_string span buf len _buf2 = do s <- lex_chars ("\"", "\"") span buf (if endsInHash then len - 1 else len) if endsInHash then do when (any (> '\xFF') s) $ do pState <- getPState let msg = PsErrPrimStringInvalidChar let err = mkPlainErrorMsgEnvelope (mkSrcSpanPs (last_loc pState)) msg addError err pure $ L span (ITprimstring src (unsafeMkByteString s)) else pure $ L span (ITstring src (mkFastString s)) where src = SourceText $ lexemeToFastString buf len endsInHash = currentChar (offsetBytes (len - 1) buf) == '#' -- | Ideally, we would define this completely with Alex syntax, like normal strings. -- Instead, this is defined as a hybrid solution by manually invoking lex states, which -- we're doing for two reasons: -- 1. The multiline string should all be one lexical token, not multiple -- 2. We need to allow bare quotes, which can't be done with one regex tok_string_multi :: Action tok_string_multi startSpan startBuf _len _buf2 = do -- advance to the end of the multiline string let startLoc = psSpanStart startSpan let i@(AI _ contentStartBuf) = case lexDelim $ AI startLoc startBuf of Just i -> i Nothing -> panic "tok_string_multi did not start with a delimiter" (AI _ contentEndBuf, i'@(AI endLoc endBuf)) <- goContent i -- build the values pertaining to the entire multiline string, including delimiters let span = mkPsSpan startLoc endLoc let len = byteDiff startBuf endBuf let src = SourceText $ lexemeToFastString startBuf len -- load the content of the multiline string let contentLen = byteDiff contentStartBuf contentEndBuf s <- either (throwStringLexError (AI startLoc startBuf)) pure $ lexMultilineString contentLen contentStartBuf setInput i' pure $ L span $ ITstringMulti src (mkFastString s) where goContent i0 = case alexScan i0 string_multi_content of AlexToken i1 len _ | Just i2 <- lexDelim i1 -> pure (i1, i2) | isEOF i1 -> checkSmartQuotes >> setInput i1 >> lexError LexError -- is the next token a tab character? -- need this explicitly because there's a global rule matching $tab | Just ('\t', _) <- alexGetChar' i1 -> setInput i1 >> lexError LexError -- Can happen if no patterns match, e.g. an unterminated gap | len == 0 -> setInput i1 >> lexError LexError | otherwise -> goContent i1 AlexSkip i1 _ -> goContent i1 _ -> setInput i0 >> lexError LexError lexDelim = let go 0 i = Just i go n i = case alexGetChar' i of Just ('"', i') -> go (n - 1) i' _ -> Nothing in go (3 :: Int) -- See Note [Bare smart quote error] checkSmartQuotes = do let findSmartQuote i0@(AI loc _) = case alexGetChar' i0 of Just ('\\', i1) | Just (_, i2) <- alexGetChar' i1 -> findSmartQuote i2 Just (c, i1) | isDoubleSmartQuote c -> Just (c, loc) | otherwise -> findSmartQuote i1 _ -> Nothing case findSmartQuote (AI (psSpanStart startSpan) startBuf) of Just (c, loc) -> throwSmartQuoteError c loc Nothing -> pure () -- | Dummy action that should never be called. Should only be used in lex states -- that are manually lexed in tok_string_multi. tok_string_multi_content :: Action tok_string_multi_content = panic "tok_string_multi_content unexpectedly invoked" lex_chars :: (String, String) -> PsSpan -> StringBuffer -> Int -> P String lex_chars (startDelim, endDelim) span buf len = either (throwStringLexError i0) pure $ lexString contentLen contentBuf where i0@(AI _ contentBuf) = advanceInputBytes (length startDelim) $ AI (psSpanStart span) buf -- assumes delimiters are ASCII, with 1 byte per Char contentLen = len - length startDelim - length endDelim throwStringLexError :: AlexInput -> StringLexError -> P a throwStringLexError i (StringLexError e pos) = setInput (advanceInputTo pos i) >> lexError e tok_quoted_label :: Action tok_quoted_label span buf len _buf2 = do s <- lex_chars ("#\"", "\"") span buf len pure $ L span (ITlabelvarid src (mkFastString s)) where -- skip leading '#' src = SourceText . mkFastString . drop 1 $ lexemeToString buf len tok_char :: Action tok_char span buf len _buf2 = do c <- lex_chars ("'", "'") span buf (if endsInHash then len - 1 else len) >>= \case [c] -> pure c s -> panic $ "tok_char expected exactly one character, got: " ++ show s pure . L span $ if endsInHash then ITprimchar src c else ITchar src c where src = SourceText $ lexemeToFastString buf len endsInHash = currentChar (offsetBytes (len - 1) buf) == '#' -- ----------------------------------------------------------------------------- -- QuasiQuote lex_qquasiquote_tok :: Action lex_qquasiquote_tok span buf len _buf2 = do let (qual, quoter) = splitQualName (stepOn buf) (len - 2) False quoteStart <- getParsedLoc quote <- lex_quasiquote (psRealLoc quoteStart) "" end <- getParsedLoc return (L (mkPsSpan (psSpanStart span) end) (ITqQuasiQuote (qual, quoter, mkFastString (reverse quote), mkPsSpan quoteStart end))) lex_quasiquote_tok :: Action lex_quasiquote_tok span buf len _buf2 = do let quoter = tail (lexemeToString buf (len - 1)) -- 'tail' drops the initial '[', -- while the -1 drops the trailing '|' quoteStart <- getParsedLoc quote <- lex_quasiquote (psRealLoc quoteStart) "" end <- getParsedLoc return (L (mkPsSpan (psSpanStart span) end) (ITquasiQuote (mkFastString quoter, mkFastString (reverse quote), mkPsSpan quoteStart end))) lex_quasiquote :: RealSrcLoc -> String -> P String lex_quasiquote start s = do i <- getInput case alexGetChar' i of Nothing -> quasiquote_error start -- NB: The string "|]" terminates the quasiquote, -- with absolutely no escaping. See the extensive -- discussion on #5348 for why there is no -- escape handling. Just ('|',i) | Just (']',i) <- alexGetChar' i -> do { setInput i; return s } Just (c, i) -> do setInput i; lex_quasiquote start (c : s) quasiquote_error :: RealSrcLoc -> P a quasiquote_error start = do (AI end buf) <- getInput reportLexError start (psRealLoc end) buf (\k srcLoc -> mkPlainErrorMsgEnvelope srcLoc (PsErrLexer LexUnterminatedQQ k)) -- ----------------------------------------------------------------------------- -- Unicode Smart Quote detection (#21843) isSmartQuote :: AlexAccPred ExtsBitmap isSmartQuote _ _ _ (AI _ buf) = let c = prevChar buf ' ' in isSingleSmartQuote c || isDoubleSmartQuote c throwSmartQuoteError :: Char -> PsLoc -> P a throwSmartQuoteError c loc = addFatalError err where err = mkPlainErrorMsgEnvelope (mkSrcSpanPs (mkPsSpan loc loc)) $ PsErrUnicodeCharLooksLike c correct_char correct_char_name (correct_char, correct_char_name) = if isSingleSmartQuote c then ('\'', "Single Quote") else ('"', "Quotation Mark") -- | Throw a smart quote error, where the smart quote was the last character lexed smart_quote_error :: Action smart_quote_error span _ _ buf2 = do let c = prevChar buf2 (panic "smart_quote_error unexpectedly called on beginning of input") throwSmartQuoteError c (psSpanStart span) -- Note [Bare smart quote error] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- A smart quote inside of a string is allowed, but if a complete valid string -- couldn't be lexed, we want to see if there's a smart quote that the user -- thought ended the string, but in fact didn't. -- ----------------------------------------------------------------------------- -- Warnings warnTab :: Action warnTab srcspan _buf _len _buf2 = do addTabWarning (psRealSpan srcspan) lexToken warnThen :: PsMessage -> Action -> Action warnThen warning action srcspan buf len buf2 = do addPsMessage (RealSrcSpan (psRealSpan srcspan) Strict.Nothing) warning action srcspan buf len buf2 -- ----------------------------------------------------------------------------- -- The Parse Monad -- | Do we want to generate ';' layout tokens? In some cases we just want to -- generate '}', e.g. in MultiWayIf we don't need ';'s because '|' separates -- alternatives (unlike a `case` expression where we need ';' to as a separator -- between alternatives). type GenSemic = Bool generateSemic, dontGenerateSemic :: GenSemic generateSemic = True dontGenerateSemic = False data LayoutContext = NoLayout | Layout !Int !GenSemic deriving Show -- | The result of running a parser. newtype ParseResult a = PR (# (# PState, a #) | PState #) -- | The parser has consumed a (possibly empty) prefix of the input and produced -- a result. Use 'getPsMessages' to check for accumulated warnings and non-fatal -- errors. -- -- The carried parsing state can be used to resume parsing. pattern POk :: PState -> a -> ParseResult a pattern POk s a = PR (# (# s , a #) | #) -- | The parser has consumed a (possibly empty) prefix of the input and failed. -- -- The carried parsing state can be used to resume parsing. It is the state -- right before failure, including the fatal parse error. 'getPsMessages' and -- 'getPsErrorMessages' must return a non-empty bag of errors. pattern PFailed :: PState -> ParseResult a pattern PFailed s = PR (# | s #) {-# COMPLETE POk, PFailed #-} -- | Test whether a 'WarningFlag' is set warnopt :: WarningFlag -> ParserOpts -> Bool warnopt f options = f `EnumSet.member` pWarningFlags options -- | Parser options. -- -- See 'mkParserOpts' to construct this. data ParserOpts = ParserOpts { pExtsBitmap :: !ExtsBitmap -- ^ bitmap of permitted extensions , pDiagOpts :: !DiagOpts -- ^ Options to construct diagnostic messages. , pSupportedExts :: [String] -- ^ supported extensions (only used for suggestions in error messages) } pWarningFlags :: ParserOpts -> EnumSet WarningFlag pWarningFlags opts = diag_warning_flags (pDiagOpts opts) -- | Haddock comment as produced by the lexer. These are accumulated in 'PState' -- and then processed in "GHC.Parser.PostProcess.Haddock". The location of the -- 'HsDocString's spans over the contents of the docstring - i.e. it does not -- include the decorator ("-- |", "{-|" etc.) data HdkComment = HdkCommentNext HsDocString | HdkCommentPrev HsDocString | HdkCommentNamed String HsDocString | HdkCommentSection Int HsDocString deriving Show data PState = PState { buffer :: StringBuffer, options :: ParserOpts, warnings :: Messages PsMessage, errors :: Messages PsMessage, tab_first :: Strict.Maybe RealSrcSpan, -- pos of first tab warning in the file tab_count :: !Word, -- number of tab warnings in the file last_tk :: Strict.Maybe (PsLocated Token), -- last non-comment token prev_loc :: PsSpan, -- pos of previous non-virtual token, including comments, last_loc :: PsSpan, -- pos of current token last_len :: !Int, -- len of current token loc :: PsLoc, -- current loc (end of prev token + 1) context :: [LayoutContext], lex_state :: [Int], srcfiles :: [FastString], -- Used in the alternative layout rule: -- These tokens are the next ones to be sent out. They are -- just blindly emitted, without the rule looking at them again: alr_pending_implicit_tokens :: [PsLocated Token], -- This is the next token to be considered or, if it is Nothing, -- we need to get the next token from the input stream: alr_next_token :: Maybe (PsLocated Token), -- This is what we consider to be the location of the last token -- emitted: alr_last_loc :: PsSpan, -- The stack of layout contexts: alr_context :: [ALRContext], -- Are we expecting a '{'? If it's Just, then the ALRLayout tells -- us what sort of layout the '{' will open: alr_expecting_ocurly :: Maybe ALRLayout, -- Have we just had the '}' for a let block? If so, than an 'in' -- token doesn't need to close anything: alr_justClosedExplicitLetBlock :: Bool, -- The next three are used to implement Annotations giving the -- locations of 'noise' tokens in the source, so that users of -- the GHC API can do source to source conversions. -- See Note [exact print annotations] in GHC.Parser.Annotation eof_pos :: Strict.Maybe (Strict.Pair RealSrcSpan RealSrcSpan), -- pos, gap to prior token header_comments :: Strict.Maybe [LEpaComment], comment_q :: [LEpaComment], -- Haddock comments accumulated in ascending order of their location -- (BufPos). We use OrdList to get O(1) snoc. -- -- See Note [Adding Haddock comments to the syntax tree] in GHC.Parser.PostProcess.Haddock hdk_comments :: OrdList (PsLocated HdkComment) } -- last_loc and last_len are used when generating error messages, -- and in pushCurrentContext only. Sigh, if only Happy passed the -- current token to happyError, we could at least get rid of last_len. -- Getting rid of last_loc would require finding another way to -- implement pushCurrentContext (which is only called from one place). -- AZ question: setLastToken which sets last_loc and last_len -- is called when processing AlexToken, immediately prior to -- calling the action in the token. So from the perspective -- of the action, it is the *current* token. Do I understand -- correctly? data ALRContext = ALRNoLayout Bool{- does it contain commas? -} Bool{- is it a 'let' block? -} | ALRLayout ALRLayout Int data ALRLayout = ALRLayoutLet | ALRLayoutWhere | ALRLayoutOf | ALRLayoutDo -- | The parsing monad, isomorphic to @StateT PState Maybe@. newtype P a = P { unP :: PState -> ParseResult a } instance Functor P where fmap = liftM instance Applicative P where pure = returnP (<*>) = ap instance Monad P where (>>=) = thenP returnP :: a -> P a returnP a = a `seq` (P $ \s -> POk s a) thenP :: P a -> (a -> P b) -> P b (P m) `thenP` k = P $ \ s -> case m s of POk s1 a -> (unP (k a)) s1 PFailed s1 -> PFailed s1 failMsgP :: (SrcSpan -> MsgEnvelope PsMessage) -> P a failMsgP f = do pState <- getPState addFatalError (f (mkSrcSpanPs (last_loc pState))) failLocMsgP :: RealSrcLoc -> RealSrcLoc -> (SrcSpan -> MsgEnvelope PsMessage) -> P a failLocMsgP loc1 loc2 f = addFatalError (f (RealSrcSpan (mkRealSrcSpan loc1 loc2) Strict.Nothing)) getPState :: P PState getPState = P $ \s -> POk s s getExts :: P ExtsBitmap getExts = P $ \s -> POk s (pExtsBitmap . options $ s) setExts :: (ExtsBitmap -> ExtsBitmap) -> P () setExts f = P $ \s -> POk s { options = let p = options s in p { pExtsBitmap = f (pExtsBitmap p) } } () setSrcLoc :: RealSrcLoc -> P () setSrcLoc new_loc = P $ \s@(PState{ loc = PsLoc _ buf_loc }) -> POk s{ loc = PsLoc new_loc buf_loc } () getRealSrcLoc :: P RealSrcLoc getRealSrcLoc = P $ \s@(PState{ loc=loc }) -> POk s (psRealLoc loc) getParsedLoc :: P PsLoc getParsedLoc = P $ \s@(PState{ loc=loc }) -> POk s loc addSrcFile :: FastString -> P () addSrcFile f = P $ \s -> POk s{ srcfiles = f : srcfiles s } () setEofPos :: RealSrcSpan -> RealSrcSpan -> P () setEofPos span gap = P $ \s -> POk s{ eof_pos = Strict.Just (span `Strict.And` gap) } () setLastToken :: PsSpan -> Int -> P () setLastToken loc len = P $ \s -> POk s { last_loc=loc, last_len=len } () setLastTk :: PsLocated Token -> P () setLastTk tk@(L l _) = P $ \s -> if isPointRealSpan (psRealSpan l) then POk s { last_tk = Strict.Just tk } () else POk s { last_tk = Strict.Just tk , prev_loc = l } () setLastComment :: PsLocated Token -> P () setLastComment (L l _) = P $ \s -> POk s { prev_loc = l } () getLastTk :: P (Strict.Maybe (PsLocated Token)) getLastTk = P $ \s@(PState { last_tk = last_tk }) -> POk s last_tk -- see Note [PsSpan in Comments] getLastLocIncludingComments :: P PsSpan getLastLocIncludingComments = P $ \s@(PState { prev_loc = prev_loc }) -> POk s prev_loc getLastLoc :: P PsSpan getLastLoc = P $ \s@(PState { last_loc = last_loc }) -> POk s last_loc data AlexInput = AI !PsLoc !StringBuffer deriving (Show) {- Note [Unicode in Alex] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Although newer versions of Alex support unicode, this grammar is processed with the old style '--latin1' behaviour. This means that when implementing the functions alexGetByte :: AlexInput -> Maybe (Word8,AlexInput) alexInputPrevChar :: AlexInput -> Char which Alex uses to take apart our 'AlexInput', we must * return a latin1 character in the 'Word8' that 'alexGetByte' expects * return a latin1 character in 'alexInputPrevChar'. We handle this in 'adjustChar' by squishing entire classes of unicode characters into single bytes. -} {-# INLINE adjustChar #-} adjustChar :: Char -> Word8 adjustChar c = adj_c where non_graphic = 0x00 upper = 0x01 lower = 0x02 digit = 0x03 symbol = 0x04 space = 0x05 other_graphic = 0x06 uniidchar = 0x07 adj_c | c <= '\x07' = non_graphic | c <= '\x7f' = fromIntegral (ord c) -- Alex doesn't handle Unicode, so when Unicode -- character is encountered we output these values -- with the actual character value hidden in the state. | otherwise = -- NB: The logic behind these definitions is also reflected -- in "GHC.Utils.Lexeme" -- Any changes here should likely be reflected there. case generalCategory c of UppercaseLetter -> upper LowercaseLetter -> lower TitlecaseLetter -> upper ModifierLetter -> uniidchar -- see #10196 OtherLetter -> lower -- see #1103 NonSpacingMark -> uniidchar -- see #7650 SpacingCombiningMark -> other_graphic EnclosingMark -> other_graphic DecimalNumber -> digit LetterNumber -> digit OtherNumber -> digit -- see #4373 ConnectorPunctuation -> symbol DashPunctuation -> symbol OpenPunctuation -> other_graphic ClosePunctuation -> other_graphic InitialQuote -> other_graphic FinalQuote -> other_graphic OtherPunctuation -> symbol MathSymbol -> symbol CurrencySymbol -> symbol ModifierSymbol -> symbol OtherSymbol -> symbol Space -> space _other -> non_graphic -- Getting the previous 'Char' isn't enough here - we need to convert it into -- the same format that 'alexGetByte' would have produced. -- -- See Note [Unicode in Alex] and #13986. alexInputPrevChar :: AlexInput -> Char alexInputPrevChar (AI _ buf) = unsafeChr (fromIntegral (adjustChar pc)) where pc = prevChar buf '\n' unsafeChr :: Int -> Char unsafeChr (I# c) = GHC.Exts.C# (GHC.Exts.chr# c) -- backwards compatibility for Alex 2.x alexGetChar :: AlexInput -> Maybe (Char,AlexInput) alexGetChar inp = case alexGetByte inp of Nothing -> Nothing Just (b,i) -> c `seq` Just (c,i) where c = unsafeChr $ fromIntegral b -- See Note [Unicode in Alex] alexGetByte :: AlexInput -> Maybe (Word8,AlexInput) alexGetByte (AI loc s) | atEnd s = Nothing | otherwise = byte `seq` loc' `seq` s' `seq` --trace (show (ord c)) $ Just (byte, (AI loc' s')) where (c,s') = nextChar s loc' = advancePsLoc loc c byte = adjustChar c {-# INLINE alexGetChar' #-} -- This version does not squash unicode characters, it is used when -- lexing strings. alexGetChar' :: AlexInput -> Maybe (Char,AlexInput) alexGetChar' (AI loc s) | atEnd s = Nothing | otherwise = c `seq` loc' `seq` s' `seq` --trace (show (ord c)) $ Just (c, (AI loc' s')) where (c,s') = nextChar s loc' = advancePsLoc loc c -- | Advance the given input N bytes. advanceInputBytes :: Int -> AlexInput -> AlexInput advanceInputBytes n i0@(AI _ buf0) = advanceInputTo (cur buf0 + n) i0 -- | Advance the given input to the given position. advanceInputTo :: Int -> AlexInput -> AlexInput advanceInputTo pos = go where go i@(AI _ buf) | cur buf >= pos = i | Just (_, i') <- alexGetChar' i = go i' | otherwise = i -- reached the end, just return the last input getInput :: P AlexInput getInput = P $ \s@PState{ loc=l, buffer=b } -> POk s (AI l b) setInput :: AlexInput -> P () setInput (AI l b) = P $ \s -> POk s{ loc=l, buffer=b } () nextIsEOF :: P Bool nextIsEOF = isEOF <$> getInput isEOF :: AlexInput -> Bool isEOF (AI _ buf) = atEnd buf pushLexState :: Int -> P () pushLexState ls = P $ \s@PState{ lex_state=l } -> POk s{lex_state=ls:l} () popLexState :: P Int popLexState = P $ \s@PState{ lex_state=ls:l } -> POk s{ lex_state=l } ls getLexState :: P Int getLexState = P $ \s@PState{ lex_state=ls:_ } -> POk s ls popNextToken :: P (Maybe (PsLocated Token)) popNextToken = P $ \s@PState{ alr_next_token = m } -> POk (s {alr_next_token = Nothing}) m activeContext :: P Bool activeContext = do ctxt <- getALRContext expc <- getAlrExpectingOCurly impt <- implicitTokenPending case (ctxt,expc) of ([],Nothing) -> return impt _other -> return True resetAlrLastLoc :: FastString -> P () resetAlrLastLoc file = P $ \s@(PState {alr_last_loc = PsSpan _ buf_span}) -> POk s{ alr_last_loc = PsSpan (alrInitialLoc file) buf_span } () setAlrLastLoc :: PsSpan -> P () setAlrLastLoc l = P $ \s -> POk (s {alr_last_loc = l}) () getAlrLastLoc :: P PsSpan getAlrLastLoc = P $ \s@(PState {alr_last_loc = l}) -> POk s l getALRContext :: P [ALRContext] getALRContext = P $ \s@(PState {alr_context = cs}) -> POk s cs setALRContext :: [ALRContext] -> P () setALRContext cs = P $ \s -> POk (s {alr_context = cs}) () getJustClosedExplicitLetBlock :: P Bool getJustClosedExplicitLetBlock = P $ \s@(PState {alr_justClosedExplicitLetBlock = b}) -> POk s b setJustClosedExplicitLetBlock :: Bool -> P () setJustClosedExplicitLetBlock b = P $ \s -> POk (s {alr_justClosedExplicitLetBlock = b}) () setNextToken :: PsLocated Token -> P () setNextToken t = P $ \s -> POk (s {alr_next_token = Just t}) () implicitTokenPending :: P Bool implicitTokenPending = P $ \s@PState{ alr_pending_implicit_tokens = ts } -> case ts of [] -> POk s False _ -> POk s True popPendingImplicitToken :: P (Maybe (PsLocated Token)) popPendingImplicitToken = P $ \s@PState{ alr_pending_implicit_tokens = ts } -> case ts of [] -> POk s Nothing (t : ts') -> POk (s {alr_pending_implicit_tokens = ts'}) (Just t) setPendingImplicitTokens :: [PsLocated Token] -> P () setPendingImplicitTokens ts = P $ \s -> POk (s {alr_pending_implicit_tokens = ts}) () getAlrExpectingOCurly :: P (Maybe ALRLayout) getAlrExpectingOCurly = P $ \s@(PState {alr_expecting_ocurly = b}) -> POk s b setAlrExpectingOCurly :: Maybe ALRLayout -> P () setAlrExpectingOCurly b = P $ \s -> POk (s {alr_expecting_ocurly = b}) () -- | For reasons of efficiency, boolean parsing flags (eg, language extensions -- or whether we are currently in a @RULE@ pragma) are represented by a bitmap -- stored in a @Word64@. type ExtsBitmap = Word64 xbit :: ExtBits -> ExtsBitmap xbit = bit . fromEnum xtest :: ExtBits -> ExtsBitmap -> Bool xtest ext xmap = testBit xmap (fromEnum ext) xset :: ExtBits -> ExtsBitmap -> ExtsBitmap xset ext xmap = setBit xmap (fromEnum ext) xunset :: ExtBits -> ExtsBitmap -> ExtsBitmap xunset ext xmap = clearBit xmap (fromEnum ext) -- | Various boolean flags, mostly language extensions, that impact lexing and -- parsing. Note that a handful of these can change during lexing/parsing. data ExtBits -- Flags that are constant once parsing starts = FfiBit | InterruptibleFfiBit | CApiFfiBit | ArrowsBit | ThBit | ThQuotesBit | IpBit | OverloadedLabelsBit -- #x overloaded labels | ExplicitForallBit -- the 'forall' keyword | BangPatBit -- Tells the parser to understand bang-patterns -- (doesn't affect the lexer) | PatternSynonymsBit -- pattern synonyms | HaddockBit-- Lex and parse Haddock comments | MagicHashBit -- "#" in both functions and operators | RecursiveDoBit -- mdo | QualifiedDoBit -- .do and .mdo | UnicodeSyntaxBit -- the forall symbol, arrow symbols, etc | UnboxedParensBit -- (# and #) | DatatypeContextsBit | MonadComprehensionsBit | TransformComprehensionsBit | QqBit -- enable quasiquoting | RawTokenStreamBit -- producing a token stream with all comments included | AlternativeLayoutRuleBit | ALRTransitionalBit | RelaxedLayoutBit | NondecreasingIndentationBit | SafeHaskellBit | TraditionalRecordSyntaxBit | ExplicitNamespacesBit | LambdaCaseBit | BinaryLiteralsBit | NegativeLiteralsBit | HexFloatLiteralsBit | StaticPointersBit | NumericUnderscoresBit | StarIsTypeBit | BlockArgumentsBit | NPlusKPatternsBit | DoAndIfThenElseBit | MultiWayIfBit | GadtSyntaxBit | ImportQualifiedPostBit | LinearTypesBit | NoLexicalNegationBit -- See Note [Why not LexicalNegationBit] | OverloadedRecordDotBit | OverloadedRecordUpdateBit | OrPatternsBit | ExtendedLiteralsBit | ListTuplePunsBit | ViewPatternsBit | RequiredTypeArgumentsBit | MultilineStringsBit -- Flags that are updated once parsing starts | InRulePragBit | InNestedCommentBit -- See Note [Nested comment line pragmas] | UsePosPragsBit -- ^ If this is enabled, '{-# LINE ... -#}' and '{-# COLUMN ... #-}' -- update the internal position. Otherwise, those pragmas are lexed as -- tokens of their own. deriving Enum {-# INLINE mkParserOpts #-} mkParserOpts :: EnumSet LangExt.Extension -- ^ permitted language extensions enabled -> DiagOpts -- ^ diagnostic options -> [String] -- ^ Supported Languages and Extensions -> Bool -- ^ are safe imports on? -> Bool -- ^ keeping Haddock comment tokens -> Bool -- ^ keep regular comment tokens -> Bool -- ^ If this is enabled, '{-# LINE ... -#}' and '{-# COLUMN ... #-}' update -- the internal position kept by the parser. Otherwise, those pragmas are -- lexed as 'ITline_prag' and 'ITcolumn_prag' tokens. -> ParserOpts -- ^ Given exactly the information needed, set up the 'ParserOpts' mkParserOpts extensionFlags diag_opts supported safeImports isHaddock rawTokStream usePosPrags = ParserOpts { pDiagOpts = diag_opts , pExtsBitmap = safeHaskellBit .|. langExtBits .|. optBits , pSupportedExts = supported } where safeHaskellBit = SafeHaskellBit `setBitIf` safeImports langExtBits = FfiBit `xoptBit` LangExt.ForeignFunctionInterface .|. InterruptibleFfiBit `xoptBit` LangExt.InterruptibleFFI .|. CApiFfiBit `xoptBit` LangExt.CApiFFI .|. ArrowsBit `xoptBit` LangExt.Arrows .|. ThBit `xoptBit` LangExt.TemplateHaskell .|. ThQuotesBit `xoptBit` LangExt.TemplateHaskellQuotes .|. QqBit `xoptBit` LangExt.QuasiQuotes .|. IpBit `xoptBit` LangExt.ImplicitParams .|. OverloadedLabelsBit `xoptBit` LangExt.OverloadedLabels .|. ExplicitForallBit `xoptBit` LangExt.ExplicitForAll .|. BangPatBit `xoptBit` LangExt.BangPatterns .|. MagicHashBit `xoptBit` LangExt.MagicHash .|. RecursiveDoBit `xoptBit` LangExt.RecursiveDo .|. QualifiedDoBit `xoptBit` LangExt.QualifiedDo .|. UnicodeSyntaxBit `xoptBit` LangExt.UnicodeSyntax .|. UnboxedParensBit `orXoptsBit` [LangExt.UnboxedTuples, LangExt.UnboxedSums] .|. DatatypeContextsBit `xoptBit` LangExt.DatatypeContexts .|. TransformComprehensionsBit `xoptBit` LangExt.TransformListComp .|. MonadComprehensionsBit `xoptBit` LangExt.MonadComprehensions .|. AlternativeLayoutRuleBit `xoptBit` LangExt.AlternativeLayoutRule .|. ALRTransitionalBit `xoptBit` LangExt.AlternativeLayoutRuleTransitional .|. RelaxedLayoutBit `xoptBit` LangExt.RelaxedLayout .|. NondecreasingIndentationBit `xoptBit` LangExt.NondecreasingIndentation .|. TraditionalRecordSyntaxBit `xoptBit` LangExt.TraditionalRecordSyntax .|. ExplicitNamespacesBit `xoptBit` LangExt.ExplicitNamespaces .|. LambdaCaseBit `xoptBit` LangExt.LambdaCase .|. BinaryLiteralsBit `xoptBit` LangExt.BinaryLiterals .|. NegativeLiteralsBit `xoptBit` LangExt.NegativeLiterals .|. HexFloatLiteralsBit `xoptBit` LangExt.HexFloatLiterals .|. PatternSynonymsBit `xoptBit` LangExt.PatternSynonyms .|. StaticPointersBit `xoptBit` LangExt.StaticPointers .|. NumericUnderscoresBit `xoptBit` LangExt.NumericUnderscores .|. StarIsTypeBit `xoptBit` LangExt.StarIsType .|. BlockArgumentsBit `xoptBit` LangExt.BlockArguments .|. NPlusKPatternsBit `xoptBit` LangExt.NPlusKPatterns .|. DoAndIfThenElseBit `xoptBit` LangExt.DoAndIfThenElse .|. MultiWayIfBit `xoptBit` LangExt.MultiWayIf .|. GadtSyntaxBit `xoptBit` LangExt.GADTSyntax .|. ImportQualifiedPostBit `xoptBit` LangExt.ImportQualifiedPost .|. LinearTypesBit `xoptBit` LangExt.LinearTypes .|. NoLexicalNegationBit `xoptNotBit` LangExt.LexicalNegation -- See Note [Why not LexicalNegationBit] .|. OverloadedRecordDotBit `xoptBit` LangExt.OverloadedRecordDot .|. OverloadedRecordUpdateBit `xoptBit` LangExt.OverloadedRecordUpdate -- Enable testing via 'getBit OverloadedRecordUpdateBit' in the parser (RecordDotSyntax parsing uses that information). .|. OrPatternsBit `xoptBit` LangExt.OrPatterns .|. ExtendedLiteralsBit `xoptBit` LangExt.ExtendedLiterals .|. ListTuplePunsBit `xoptBit` LangExt.ListTuplePuns .|. ViewPatternsBit `xoptBit` LangExt.ViewPatterns .|. RequiredTypeArgumentsBit `xoptBit` LangExt.RequiredTypeArguments .|. MultilineStringsBit `xoptBit` LangExt.MultilineStrings optBits = HaddockBit `setBitIf` isHaddock .|. RawTokenStreamBit `setBitIf` rawTokStream .|. UsePosPragsBit `setBitIf` usePosPrags xoptBit bit ext = bit `setBitIf` EnumSet.member ext extensionFlags xoptNotBit bit ext = bit `setBitIf` not (EnumSet.member ext extensionFlags) orXoptsBit bit exts = bit `setBitIf` any (`EnumSet.member` extensionFlags) exts setBitIf :: ExtBits -> Bool -> ExtsBitmap b `setBitIf` cond | cond = xbit b | otherwise = 0 disableHaddock :: ParserOpts -> ParserOpts disableHaddock opts = upd_bitmap (xunset HaddockBit) where upd_bitmap f = opts { pExtsBitmap = f (pExtsBitmap opts) } -- | Set parser options for parsing OPTIONS pragmas initPragState :: ParserOpts -> StringBuffer -> RealSrcLoc -> PState initPragState options buf loc = (initParserState options buf loc) { lex_state = [bol, option_prags, 0] } -- | Creates a parse state from a 'ParserOpts' value initParserState :: ParserOpts -> StringBuffer -> RealSrcLoc -> PState initParserState options buf loc = PState { buffer = buf, options = options, errors = emptyMessages, warnings = emptyMessages, tab_first = Strict.Nothing, tab_count = 0, last_tk = Strict.Nothing, prev_loc = mkPsSpan init_loc init_loc, last_loc = mkPsSpan init_loc init_loc, last_len = 0, loc = init_loc, context = [], lex_state = [bol, 0], srcfiles = [], alr_pending_implicit_tokens = [], alr_next_token = Nothing, alr_last_loc = PsSpan (alrInitialLoc (fsLit "")) (BufSpan (BufPos 0) (BufPos 0)), alr_context = [], alr_expecting_ocurly = Nothing, alr_justClosedExplicitLetBlock = False, eof_pos = Strict.Nothing, header_comments = Strict.Nothing, comment_q = [], hdk_comments = nilOL } where init_loc = PsLoc loc (BufPos 0) -- | An mtl-style class for monads that support parsing-related operations. -- For example, sometimes we make a second pass over the parsing results to validate, -- disambiguate, or rearrange them, and we do so in the PV monad which cannot consume -- input but can report parsing errors, check for extension bits, and accumulate -- parsing annotations. Both P and PV are instances of MonadP. -- -- MonadP grants us convenient overloading. The other option is to have separate operations -- for each monad: addErrorP vs addErrorPV, getBitP vs getBitPV, and so on. -- class Monad m => MonadP m where -- | Add a non-fatal error. Use this when the parser can produce a result -- despite the error. -- -- For example, when GHC encounters a @forall@ in a type, -- but @-XExplicitForAll@ is disabled, the parser constructs @ForAllTy@ -- as if @-XExplicitForAll@ was enabled, adding a non-fatal error to -- the accumulator. -- -- Control flow wise, non-fatal errors act like warnings: they are added -- to the accumulator and parsing continues. This allows GHC to report -- more than one parse error per file. -- addError :: MsgEnvelope PsMessage -> m () -- | Add a warning to the accumulator. -- Use 'getPsMessages' to get the accumulated warnings. addWarning :: MsgEnvelope PsMessage -> m () -- | Add a fatal error. This will be the last error reported by the parser, and -- the parser will not produce any result, ending in a 'PFailed' state. addFatalError :: MsgEnvelope PsMessage -> m a -- | Get parser options getParserOpts :: m ParserOpts -- | Go through the @comment_q@ in @PState@ and remove all comments -- that belong within the given span allocateCommentsP :: RealSrcSpan -> m EpAnnComments -- | Go through the @comment_q@ in @PState@ and remove all comments -- that come before or within the given span allocatePriorCommentsP :: RealSrcSpan -> m EpAnnComments -- | Go through the @comment_q@ in @PState@ and remove all comments -- that come after the given span allocateFinalCommentsP :: RealSrcSpan -> m EpAnnComments instance MonadP P where addError err = P $ \s -> POk s { errors = err `addMessage` errors s} () -- If the warning is meant to be suppressed, GHC will assign -- a `SevIgnore` severity and the message will be discarded, -- so we can simply add it no matter what. addWarning w = P $ \s -> POk (s { warnings = w `addMessage` warnings s }) () addFatalError err = addError err >> P PFailed getParserOpts = P $ \s -> POk s $! options s allocateCommentsP ss = P $ \s -> if null (comment_q s) then POk s emptyComments else -- fast path let (comment_q', newAnns) = allocateComments ss (comment_q s) in POk s { comment_q = comment_q' } (EpaComments newAnns) allocatePriorCommentsP ss = P $ \s -> let (header_comments', comment_q', newAnns) = allocatePriorComments ss (comment_q s) (header_comments s) in POk s { header_comments = header_comments', comment_q = comment_q' } (EpaComments newAnns) allocateFinalCommentsP ss = P $ \s -> let (header_comments', comment_q', newAnns) = allocateFinalComments ss (comment_q s) (header_comments s) in POk s { header_comments = header_comments', comment_q = comment_q' } (EpaCommentsBalanced (Strict.fromMaybe [] header_comments') newAnns) -- | Check if a given flag is currently set in the bitmap. getBit :: MonadP m => ExtBits -> m Bool getBit ext = (\opts -> ext `xtest` pExtsBitmap opts) <$> getParserOpts getCommentsFor :: (MonadP m) => SrcSpan -> m EpAnnComments getCommentsFor (RealSrcSpan l _) = allocateCommentsP l getCommentsFor _ = return emptyComments getPriorCommentsFor :: (MonadP m) => SrcSpan -> m EpAnnComments getPriorCommentsFor (RealSrcSpan l _) = allocatePriorCommentsP l getPriorCommentsFor _ = return emptyComments getFinalCommentsFor :: (MonadP m) => SrcSpan -> m EpAnnComments getFinalCommentsFor (RealSrcSpan l _) = allocateFinalCommentsP l getFinalCommentsFor _ = return emptyComments getEofPos :: P (Strict.Maybe (Strict.Pair RealSrcSpan RealSrcSpan)) getEofPos = P $ \s@(PState { eof_pos = pos }) -> POk s pos addPsMessage :: MonadP m => SrcSpan -> PsMessage -> m () addPsMessage srcspan msg = do diag_opts <- pDiagOpts <$> getParserOpts addWarning (mkPlainMsgEnvelope diag_opts srcspan msg) addTabWarning :: RealSrcSpan -> P () addTabWarning srcspan = P $ \s@PState{tab_first=tf, tab_count=tc, options=o} -> let tf' = tf <|> Strict.Just srcspan tc' = tc + 1 s' = if warnopt Opt_WarnTabs o then s{tab_first = tf', tab_count = tc'} else s in POk s' () -- | Get a bag of the errors that have been accumulated so far. -- Does not take -Werror into account. getPsErrorMessages :: PState -> Messages PsMessage getPsErrorMessages p = errors p -- | Get the warnings and errors accumulated so far. -- Does not take -Werror into account. getPsMessages :: PState -> (Messages PsMessage, Messages PsMessage) getPsMessages p = let ws = warnings p diag_opts = pDiagOpts (options p) -- we add the tabulation warning on the fly because -- we count the number of occurrences of tab characters ws' = case tab_first p of Strict.Nothing -> ws Strict.Just tf -> let msg = mkPlainMsgEnvelope diag_opts (RealSrcSpan tf Strict.Nothing) (PsWarnTab (tab_count p)) in msg `addMessage` ws in (ws', errors p) getContext :: P [LayoutContext] getContext = P $ \s@PState{context=ctx} -> POk s ctx setContext :: [LayoutContext] -> P () setContext ctx = P $ \s -> POk s{context=ctx} () popContext :: P () popContext = P $ \ s@(PState{ buffer = buf, options = o, context = ctx, last_len = len, last_loc = last_loc }) -> case ctx of (_:tl) -> POk s{ context = tl } () [] -> unP (addFatalError $ srcParseErr o buf len (mkSrcSpanPs last_loc)) s -- Push a new layout context at the indentation of the last token read. pushCurrentContext :: GenSemic -> P () pushCurrentContext gen_semic = P $ \ s@PState{ last_loc=loc, context=ctx } -> POk s{context = Layout (srcSpanStartCol (psRealSpan loc)) gen_semic : ctx} () -- This is only used at the outer level of a module when the 'module' keyword is -- missing. pushModuleContext :: P () pushModuleContext = pushCurrentContext generateSemic getOffside :: P (Ordering, Bool) getOffside = P $ \s@PState{last_loc=loc, context=stk} -> let offs = srcSpanStartCol (psRealSpan loc) in let ord = case stk of Layout n gen_semic : _ -> --trace ("layout: " ++ show n ++ ", offs: " ++ show offs) $ (compare offs n, gen_semic) _ -> (GT, dontGenerateSemic) in POk s ord -- --------------------------------------------------------------------------- -- Construct a parse error srcParseErr :: ParserOpts -> StringBuffer -- current buffer (placed just after the last token) -> Int -- length of the previous token -> SrcSpan -> MsgEnvelope PsMessage srcParseErr options buf len loc = mkPlainErrorMsgEnvelope loc (PsErrParse token details) where token = lexemeToString (offsetBytes (-len) buf) len pattern_ = decodePrevNChars 8 buf last100 = decodePrevNChars 100 buf doInLast100 = "do" `isInfixOf` last100 mdoInLast100 = "mdo" `isInfixOf` last100 th_enabled = ThQuotesBit `xtest` pExtsBitmap options ps_enabled = PatternSynonymsBit `xtest` pExtsBitmap options details = PsErrParseDetails { ped_th_enabled = th_enabled , ped_do_in_last_100 = doInLast100 , ped_mdo_in_last_100 = mdoInLast100 , ped_pat_syn_enabled = ps_enabled , ped_pattern_parsed = pattern_ == "pattern " } -- Report a parse failure, giving the span of the previous token as -- the location of the error. This is the entry point for errors -- detected during parsing. srcParseFail :: P a srcParseFail = P $ \s@PState{ buffer = buf, options = o, last_len = len, last_loc = last_loc } -> unP (addFatalError $ srcParseErr o buf len (mkSrcSpanPs last_loc)) s -- A lexical error is reported at a particular position in the source file, -- not over a token range. lexError :: LexErr -> P a lexError e = do loc <- getRealSrcLoc (AI end buf) <- getInput reportLexError loc (psRealLoc end) buf (\k srcLoc -> mkPlainErrorMsgEnvelope srcLoc $ PsErrLexer e k) -- ----------------------------------------------------------------------------- -- This is the top-level function: called from the parser each time a -- new token is to be read from the input. lexer, lexerDbg :: Bool -> (Located Token -> P a) -> P a lexer queueComments cont = do alr <- getBit AlternativeLayoutRuleBit let lexTokenFun = if alr then lexTokenAlr else lexToken (L span tok) <- lexTokenFun --trace ("token: " ++ show tok) $ do if (queueComments && isComment tok) then queueComment (L (psRealSpan span) tok) >> lexer queueComments cont else cont (L (mkSrcSpanPs span) tok) -- Use this instead of 'lexer' in GHC.Parser to dump the tokens for debugging. lexerDbg queueComments cont = lexer queueComments contDbg where contDbg tok = trace ("token: " ++ show (unLoc tok)) (cont tok) lexTokenAlr :: P (PsLocated Token) lexTokenAlr = do mPending <- popPendingImplicitToken t <- case mPending of Nothing -> do mNext <- popNextToken t <- case mNext of Nothing -> lexToken Just next -> return next alternativeLayoutRuleToken t Just t -> return t setAlrLastLoc (getLoc t) case unLoc t of ITwhere -> setAlrExpectingOCurly (Just ALRLayoutWhere) ITlet -> setAlrExpectingOCurly (Just ALRLayoutLet) ITof -> setAlrExpectingOCurly (Just ALRLayoutOf) ITlcase -> setAlrExpectingOCurly (Just ALRLayoutOf) ITlcases -> setAlrExpectingOCurly (Just ALRLayoutOf) ITdo _ -> setAlrExpectingOCurly (Just ALRLayoutDo) ITmdo _ -> setAlrExpectingOCurly (Just ALRLayoutDo) ITrec -> setAlrExpectingOCurly (Just ALRLayoutDo) _ -> return () return t alternativeLayoutRuleToken :: PsLocated Token -> P (PsLocated Token) alternativeLayoutRuleToken t = do context <- getALRContext lastLoc <- getAlrLastLoc mExpectingOCurly <- getAlrExpectingOCurly transitional <- getBit ALRTransitionalBit justClosedExplicitLetBlock <- getJustClosedExplicitLetBlock setJustClosedExplicitLetBlock False let thisLoc = getLoc t thisCol = srcSpanStartCol (psRealSpan thisLoc) newLine = srcSpanStartLine (psRealSpan thisLoc) > srcSpanEndLine (psRealSpan lastLoc) case (unLoc t, context, mExpectingOCurly) of -- This case handles a GHC extension to the original H98 -- layout rule... (ITocurly, _, Just alrLayout) -> do setAlrExpectingOCurly Nothing let isLet = case alrLayout of ALRLayoutLet -> True _ -> False setALRContext (ALRNoLayout (containsCommas ITocurly) isLet : context) return t -- ...and makes this case unnecessary {- -- I think our implicit open-curly handling is slightly -- different to John's, in how it interacts with newlines -- and "in" (ITocurly, _, Just _) -> do setAlrExpectingOCurly Nothing setNextToken t lexTokenAlr -} (_, ALRLayout _ col : _ls, Just expectingOCurly) | (thisCol > col) || (thisCol == col && isNonDecreasingIndentation expectingOCurly) -> do setAlrExpectingOCurly Nothing setALRContext (ALRLayout expectingOCurly thisCol : context) setNextToken t return (L thisLoc ITvocurly) | otherwise -> do setAlrExpectingOCurly Nothing setPendingImplicitTokens [L lastLoc ITvccurly] setNextToken t return (L lastLoc ITvocurly) (_, _, Just expectingOCurly) -> do setAlrExpectingOCurly Nothing setALRContext (ALRLayout expectingOCurly thisCol : context) setNextToken t return (L thisLoc ITvocurly) -- We do the [] cases earlier than in the spec, as we -- have an actual EOF token (ITeof, ALRLayout _ _ : ls, _) -> do setALRContext ls setNextToken t return (L thisLoc ITvccurly) (ITeof, _, _) -> return t -- the other ITeof case omitted; general case below covers it (ITin, _, _) | justClosedExplicitLetBlock -> return t (ITin, ALRLayout ALRLayoutLet _ : ls, _) | newLine -> do setPendingImplicitTokens [t] setALRContext ls return (L thisLoc ITvccurly) -- This next case is to handle a transitional issue: (ITwhere, ALRLayout _ col : ls, _) | newLine && thisCol == col && transitional -> do addPsMessage (mkSrcSpanPs thisLoc) (PsWarnTransitionalLayout TransLayout_Where) setALRContext ls setNextToken t -- Note that we use lastLoc, as we may need to close -- more layouts, or give a semicolon return (L lastLoc ITvccurly) -- This next case is to handle a transitional issue: (ITvbar, ALRLayout _ col : ls, _) | newLine && thisCol == col && transitional -> do addPsMessage (mkSrcSpanPs thisLoc) (PsWarnTransitionalLayout TransLayout_Pipe) setALRContext ls setNextToken t -- Note that we use lastLoc, as we may need to close -- more layouts, or give a semicolon return (L lastLoc ITvccurly) (_, ALRLayout _ col : ls, _) | newLine && thisCol == col -> do setNextToken t let loc = psSpanStart thisLoc zeroWidthLoc = mkPsSpan loc loc return (L zeroWidthLoc ITsemi) | newLine && thisCol < col -> do setALRContext ls setNextToken t -- Note that we use lastLoc, as we may need to close -- more layouts, or give a semicolon return (L lastLoc ITvccurly) -- We need to handle close before open, as 'then' is both -- an open and a close (u, _, _) | isALRclose u -> case context of ALRLayout _ _ : ls -> do setALRContext ls setNextToken t return (L thisLoc ITvccurly) ALRNoLayout _ isLet : ls -> do let ls' = if isALRopen u then ALRNoLayout (containsCommas u) False : ls else ls setALRContext ls' when isLet $ setJustClosedExplicitLetBlock True return t [] -> do let ls = if isALRopen u then [ALRNoLayout (containsCommas u) False] else [] setALRContext ls -- XXX This is an error in John's code, but -- it looks reachable to me at first glance return t (u, _, _) | isALRopen u -> do setALRContext (ALRNoLayout (containsCommas u) False : context) return t (ITin, ALRLayout ALRLayoutLet _ : ls, _) -> do setALRContext ls setPendingImplicitTokens [t] return (L thisLoc ITvccurly) (ITin, ALRLayout _ _ : ls, _) -> do setALRContext ls setNextToken t return (L thisLoc ITvccurly) -- the other ITin case omitted; general case below covers it (ITcomma, ALRLayout _ _ : ls, _) | topNoLayoutContainsCommas ls -> do setALRContext ls setNextToken t return (L thisLoc ITvccurly) (ITwhere, ALRLayout ALRLayoutDo _ : ls, _) -> do setALRContext ls setPendingImplicitTokens [t] return (L thisLoc ITvccurly) -- the other ITwhere case omitted; general case below covers it (_, _, _) -> return t isALRopen :: Token -> Bool isALRopen ITcase = True isALRopen ITif = True isALRopen ITthen = True isALRopen IToparen = True isALRopen ITobrack = True isALRopen ITocurly = True -- GHC Extensions: isALRopen IToubxparen = True isALRopen _ = False isALRclose :: Token -> Bool isALRclose ITof = True isALRclose ITthen = True isALRclose ITelse = True isALRclose ITcparen = True isALRclose ITcbrack = True isALRclose ITccurly = True -- GHC Extensions: isALRclose ITcubxparen = True isALRclose _ = False isNonDecreasingIndentation :: ALRLayout -> Bool isNonDecreasingIndentation ALRLayoutDo = True isNonDecreasingIndentation _ = False containsCommas :: Token -> Bool containsCommas IToparen = True containsCommas ITobrack = True -- John doesn't have {} as containing commas, but records contain them, -- which caused a problem parsing Cabal's Distribution.Simple.InstallDirs -- (defaultInstallDirs). containsCommas ITocurly = True -- GHC Extensions: containsCommas IToubxparen = True containsCommas _ = False topNoLayoutContainsCommas :: [ALRContext] -> Bool topNoLayoutContainsCommas [] = False topNoLayoutContainsCommas (ALRLayout _ _ : ls) = topNoLayoutContainsCommas ls topNoLayoutContainsCommas (ALRNoLayout b _ : _) = b -- If the generated alexScan/alexScanUser functions are called multiple times -- in this file, alexScanUser gets broken out into a separate function and -- increases memory usage. Make sure GHC inlines this function and optimizes it. lexToken :: P (PsLocated Token) lexToken = do inp@(AI loc1 buf) <- getInput sc <- getLexState exts <- getExts case alexScanUser exts inp sc of AlexEOF -> do let span = mkPsSpan loc1 loc1 lc <- getLastLocIncludingComments setEofPos (psRealSpan span) (psRealSpan lc) setLastToken span 0 return (L span ITeof) AlexError (AI loc2 buf) -> reportLexError (psRealLoc loc1) (psRealLoc loc2) buf (\k srcLoc -> mkPlainErrorMsgEnvelope srcLoc $ PsErrLexer LexError k) AlexSkip inp2 _ -> do setInput inp2 lexToken AlexToken inp2@(AI end buf2) _ t -> do setInput inp2 let span = mkPsSpan loc1 end let bytes = byteDiff buf buf2 span `seq` setLastToken span bytes lt <- t span buf bytes buf2 let lt' = unLoc lt if (isComment lt') then setLastComment lt else setLastTk lt return lt reportLexError :: RealSrcLoc -> RealSrcLoc -> StringBuffer -> (LexErrKind -> SrcSpan -> MsgEnvelope PsMessage) -> P a reportLexError loc1 loc2 buf f | atEnd buf = failLocMsgP loc1 loc2 (f LexErrKind_EOF) | otherwise = let c = fst (nextChar buf) in if c == '\0' -- decoding errors are mapped to '\0', see utf8DecodeChar# then failLocMsgP loc2 loc2 (f LexErrKind_UTF8) else failLocMsgP loc1 loc2 (f (LexErrKind_Char c)) lexTokenStream :: ParserOpts -> StringBuffer -> RealSrcLoc -> ParseResult [Located Token] lexTokenStream opts buf loc = unP go initState{ options = opts' } where new_exts = xunset UsePosPragsBit -- parse LINE/COLUMN pragmas as tokens $ xset RawTokenStreamBit -- include comments $ pExtsBitmap opts opts' = opts { pExtsBitmap = new_exts } initState = initParserState opts' buf loc go = do ltok <- lexer False return case ltok of L _ ITeof -> return [] _ -> liftM (ltok:) go linePrags = Map.singleton "line" linePrag fileHeaderPrags = Map.fromList([("options", lex_string_prag IToptions_prag), ("options_ghc", lex_string_prag IToptions_prag), ("options_haddock", lex_string_prag_comment ITdocOptions), ("language", token ITlanguage_prag), ("include", lex_string_prag ITinclude_prag)]) ignoredPrags = Map.fromList (map ignored pragmas) where ignored opt = (opt, nested_comment) impls = ["hugs", "nhc98", "jhc", "yhc", "catch", "derive"] options_pragmas = map ("options_" ++) impls -- CFILES is a hugs-only thing. pragmas = options_pragmas ++ ["cfiles", "contract"] oneWordPrags = Map.fromList [ ("rules", rulePrag), ("inline", fstrtoken (\s -> (ITinline_prag (SourceText s) (Inline (SourceText s)) FunLike))), ("inlinable", fstrtoken (\s -> (ITinline_prag (SourceText s) (Inlinable (SourceText s)) FunLike))), ("inlineable", fstrtoken (\s -> (ITinline_prag (SourceText s) (Inlinable (SourceText s)) FunLike))), -- Spelling variant ("notinline", fstrtoken (\s -> (ITinline_prag (SourceText s) (NoInline (SourceText s)) FunLike))), ("opaque", fstrtoken (\s -> ITopaque_prag (SourceText s))), ("specialize", fstrtoken (\s -> ITspec_prag (SourceText s))), ("source", fstrtoken (\s -> ITsource_prag (SourceText s))), ("warning", fstrtoken (\s -> ITwarning_prag (SourceText s))), ("deprecated", fstrtoken (\s -> ITdeprecated_prag (SourceText s))), ("scc", fstrtoken (\s -> ITscc_prag (SourceText s))), ("unpack", fstrtoken (\s -> ITunpack_prag (SourceText s))), ("nounpack", fstrtoken (\s -> ITnounpack_prag (SourceText s))), ("ann", fstrtoken (\s -> ITann_prag (SourceText s))), ("minimal", fstrtoken (\s -> ITminimal_prag (SourceText s))), ("overlaps", fstrtoken (\s -> IToverlaps_prag (SourceText s))), ("overlappable", fstrtoken (\s -> IToverlappable_prag (SourceText s))), ("overlapping", fstrtoken (\s -> IToverlapping_prag (SourceText s))), ("incoherent", fstrtoken (\s -> ITincoherent_prag (SourceText s))), ("ctype", fstrtoken (\s -> ITctype (SourceText s))), ("complete", fstrtoken (\s -> ITcomplete_prag (SourceText s))), ("column", columnPrag) ] twoWordPrags = Map.fromList [ ("inline conlike", fstrtoken (\s -> (ITinline_prag (SourceText s) (Inline (SourceText s)) ConLike))), ("notinline conlike", fstrtoken (\s -> (ITinline_prag (SourceText s) (NoInline (SourceText s)) ConLike))), ("specialize inline", fstrtoken (\s -> (ITspec_inline_prag (SourceText s) True))), ("specialize notinline", fstrtoken (\s -> (ITspec_inline_prag (SourceText s) False))) ] dispatch_pragmas :: Map String Action -> Action dispatch_pragmas prags span buf len buf2 = case Map.lookup (clean_pragma (lexemeToString buf len)) prags of Just found -> found span buf len buf2 Nothing -> lexError LexUnknownPragma known_pragma :: Map String Action -> AlexAccPred ExtsBitmap known_pragma prags _ (AI _ startbuf) _ (AI _ curbuf) = isKnown && nextCharIsNot curbuf pragmaNameChar where l = lexemeToString startbuf (byteDiff startbuf curbuf) isKnown = isJust $ Map.lookup (clean_pragma l) prags pragmaNameChar c = isAlphaNum c || c == '_' clean_pragma :: String -> String clean_pragma prag = canon_ws (map toLower (unprefix prag)) where unprefix prag' = case stripPrefix "{-#" prag' of Just rest -> rest Nothing -> prag' canonical prag' = case prag' of "noinline" -> "notinline" "specialise" -> "specialize" "constructorlike" -> "conlike" _ -> prag' canon_ws s = unwords (map canonical (words s)) warn_unknown_prag :: Map String Action -> Action warn_unknown_prag prags span buf len buf2 = do let uppercase = map toUpper unknown_prag = uppercase (clean_pragma (lexemeToString buf len)) suggestions = map uppercase (Map.keys prags) addPsMessage (RealSrcSpan (psRealSpan span) Strict.Nothing) $ PsWarnUnrecognisedPragma unknown_prag suggestions nested_comment span buf len buf2 {- %************************************************************************ %* * Helper functions for generating annotations in the parser %* * %************************************************************************ -} -- TODO:AZ: we should have only mkParensEpToks. Delee mkParensEpAnn, mkParensLocs -- |Given a 'RealSrcSpan' that surrounds a 'HsPar' or 'HsParTy', generate -- 'EpToken' values for the opening and closing bordering on the start -- and end of the span mkParensEpToks :: RealSrcSpan -> (EpToken "(", EpToken ")") mkParensEpToks ss = (EpTok (EpaSpan (RealSrcSpan lo Strict.Nothing)), EpTok (EpaSpan (RealSrcSpan lc Strict.Nothing))) where f = srcSpanFile ss sl = srcSpanStartLine ss sc = srcSpanStartCol ss el = srcSpanEndLine ss ec = srcSpanEndCol ss lo = mkRealSrcSpan (realSrcSpanStart ss) (mkRealSrcLoc f sl (sc+1)) lc = mkRealSrcSpan (mkRealSrcLoc f el (ec - 1)) (realSrcSpanEnd ss) -- |Given a 'RealSrcSpan' that surrounds a 'HsPar' or 'HsParTy', generate -- 'EpaLocation' values for the opening and closing bordering on the start -- and end of the span mkParensLocs :: RealSrcSpan -> (EpaLocation, EpaLocation) mkParensLocs ss = (EpaSpan (RealSrcSpan lo Strict.Nothing), EpaSpan (RealSrcSpan lc Strict.Nothing)) where f = srcSpanFile ss sl = srcSpanStartLine ss sc = srcSpanStartCol ss el = srcSpanEndLine ss ec = srcSpanEndCol ss lo = mkRealSrcSpan (realSrcSpanStart ss) (mkRealSrcLoc f sl (sc+1)) lc = mkRealSrcSpan (mkRealSrcLoc f el (ec - 1)) (realSrcSpanEnd ss) queueComment :: RealLocated Token -> P() queueComment c = P $ \s -> POk s { comment_q = commentToAnnotation c : comment_q s } () allocateComments :: RealSrcSpan -> [LEpaComment] -> ([LEpaComment], [LEpaComment]) allocateComments ss comment_q = let (before,rest) = break (\(L l _) -> isRealSubspanOf (epaLocationRealSrcSpan l) ss) comment_q (middle,after) = break (\(L l _) -> not (isRealSubspanOf (epaLocationRealSrcSpan l) ss)) rest comment_q' = before ++ after newAnns = middle in (comment_q', reverse newAnns) -- Comments appearing without a line-break before the first -- declaration are associated with the declaration splitPriorComments :: RealSrcSpan -> [LEpaComment] -> ([LEpaComment], [LEpaComment]) splitPriorComments ss prior_comments = let -- True if there is only one line between the earlier and later span, -- And the token preceding the comment is on a different line cmp :: RealSrcSpan -> LEpaComment -> Bool cmp later (L l c) = srcSpanStartLine later - srcSpanEndLine (epaLocationRealSrcSpan l) == 1 && srcSpanEndLine (ac_prior_tok c) /= srcSpanStartLine (epaLocationRealSrcSpan l) go :: [LEpaComment] -> RealSrcSpan -> [LEpaComment] -> ([LEpaComment], [LEpaComment]) go decl_comments _ [] = ([],decl_comments) go decl_comments r (c@(L l _):cs) = if cmp r c then go (c:decl_comments) (epaLocationRealSrcSpan l) cs else (reverse (c:cs), decl_comments) in go [] ss prior_comments allocatePriorComments :: RealSrcSpan -> [LEpaComment] -> Strict.Maybe [LEpaComment] -> (Strict.Maybe [LEpaComment], [LEpaComment], [LEpaComment]) allocatePriorComments ss comment_q mheader_comments = let cmp (L l _) = epaLocationRealSrcSpan l <= ss (newAnns,after) = partition cmp comment_q comment_q'= after (prior_comments, decl_comments) = splitPriorComments ss newAnns in case mheader_comments of Strict.Nothing -> (Strict.Just prior_comments, comment_q', decl_comments) Strict.Just _ -> (mheader_comments, comment_q', reverse newAnns) allocateFinalComments :: RealSrcSpan -> [LEpaComment] -> Strict.Maybe [LEpaComment] -> (Strict.Maybe [LEpaComment], [LEpaComment], [LEpaComment]) allocateFinalComments _ss comment_q mheader_comments = -- We ignore the RealSrcSpan as the parser currently provides a -- point span at (1,1). case mheader_comments of Strict.Nothing -> (Strict.Just (reverse comment_q), [], []) Strict.Just _ -> (mheader_comments, [], reverse comment_q) commentToAnnotation :: RealLocated Token -> LEpaComment commentToAnnotation (L l (ITdocComment s ll)) = mkLEpaComment l ll (EpaDocComment s) commentToAnnotation (L l (ITdocOptions s ll)) = mkLEpaComment l ll (EpaDocOptions s) commentToAnnotation (L l (ITlineComment s ll)) = mkLEpaComment l ll (EpaLineComment s) commentToAnnotation (L l (ITblockComment s ll)) = mkLEpaComment l ll (EpaBlockComment s) commentToAnnotation _ = panic "commentToAnnotation" -- see Note [PsSpan in Comments] mkLEpaComment :: RealSrcSpan -> PsSpan -> EpaCommentTok -> LEpaComment mkLEpaComment l ll tok = L (realSpanAsAnchor l) (EpaComment tok (psRealSpan ll)) -- --------------------------------------------------------------------- isComment :: Token -> Bool isComment (ITlineComment _ _) = True isComment (ITblockComment _ _) = True isComment (ITdocComment _ _) = True isComment (ITdocOptions _ _) = True isComment _ = False } ghc-lib-parser-9.12.2.20250421/compiler/GHC/Parser/PostProcess.hs0000644000000000000000000045406707346545000022002 0ustar0000000000000000 {-# LANGUAGE GADTs #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE ViewPatterns #-} {-# LANGUAGE DataKinds #-} {-# LANGUAGE MultiWayIf #-} -- -- (c) The University of Glasgow 2002-2006 -- -- Functions over HsSyn specialised to RdrName. module GHC.Parser.PostProcess ( mkRdrGetField, mkRdrProjection, Fbind, -- RecordDot mkHsOpApp, mkHsIntegral, mkHsFractional, mkHsIsString, mkHsDo, mkMDo, mkSpliceDecl, mkRoleAnnotDecl, mkClassDecl, mkTyData, mkDataFamInst, mkTySynonym, mkTyFamInstEqn, mkStandaloneKindSig, mkTyFamInst, mkFamDecl, mkInlinePragma, mkOpaquePragma, mkPatSynMatchGroup, mkRecConstrOrUpdate, mkTyClD, mkInstD, mkRdrRecordCon, mkRdrRecordUpd, setRdrNameSpace, fromSpecTyVarBndr, fromSpecTyVarBndrs, annBinds, stmtsAnchor, stmtsLoc, cvBindGroup, cvBindsAndSigs, cvTopDecls, placeHolderPunRhs, -- Stuff to do with Foreign declarations mkImport, parseCImport, mkExport, mkExtName, -- RdrName -> CLabelString mkGadtDecl, -- [LocatedA RdrName] -> LHsType RdrName -> ConDecl RdrName mkConDeclH98, -- Bunch of functions in the parser monad for -- checking and constructing values checkImportDecl, checkExpBlockArguments, checkCmdBlockArguments, checkPrecP, -- Int -> P Int checkContext, -- HsType -> P HsContext checkPattern, -- HsExp -> P HsPat checkPattern_details, incompleteDoBlock, ParseContext(..), checkMonadComp, checkValDef, -- (SrcLoc, HsExp, HsRhs, [HsDecl]) -> P HsDecl checkValSigLhs, LRuleTyTmVar, RuleTyTmVar(..), mkRuleBndrs, mkRuleTyVarBndrs, checkRuleTyVarBndrNames, checkRecordSyntax, checkEmptyGADTs, addFatalError, hintBangPat, mkBangTy, UnpackednessPragma(..), mkMultTy, mkMultAnn, -- Token location mkTokenLocation, -- Help with processing exports ImpExpSubSpec(..), ImpExpQcSpec(..), mkModuleImpExp, mkTypeImpExp, mkImpExpSubSpec, checkImportSpec, -- Token symbols starSym, -- Warnings and errors warnStarIsType, warnPrepositiveQualifiedModule, failOpFewArgs, failNotEnabledImportQualifiedPost, failImportQualifiedTwice, SumOrTuple (..), -- Expression/command/pattern ambiguity resolution PV, runPV, ECP(ECP, unECP), DisambInfixOp(..), DisambECP(..), ecpFromExp, ecpFromCmd, ecpFromPat, ArrowParsingMode(..), withArrowParsingMode, withArrowParsingMode', setTelescopeBndrsNameSpace, PatBuilder, hsHoleExpr, -- Type/datacon ambiguity resolution DisambTD(..), addUnpackednessP, dataConBuilderCon, dataConBuilderDetails, mkUnboxedSumCon, -- ListTuplePuns related parsers mkTupleSyntaxTy, mkTupleSyntaxTycon, mkListSyntaxTy0, mkListSyntaxTy1, withCombinedComments, requireLTPuns, ) where import GHC.Prelude import GHC.Hs -- Lots of it import GHC.Core.TyCon ( TyCon, isTupleTyCon, tyConSingleDataCon_maybe ) import GHC.Core.DataCon ( DataCon, dataConTyCon, dataConName ) import GHC.Core.ConLike ( ConLike(..) ) import GHC.Core.Coercion.Axiom ( Role, fsFromRole ) import GHC.Types.Name.Reader import GHC.Types.Name import GHC.Types.Basic import GHC.Types.Error import GHC.Types.Fixity import GHC.Types.Hint import GHC.Types.SourceText import GHC.Parser.Types import GHC.Parser.Lexer import GHC.Parser.Errors.Types import GHC.Utils.Lexeme ( okConOcc ) import GHC.Types.TyThing import GHC.Core.Type ( Specificity(..) ) import GHC.Builtin.Types( cTupleTyConName, tupleTyCon, tupleDataCon, nilDataConName, nilDataConKey, listTyConName, listTyConKey, sumDataCon, unrestrictedFunTyCon , listTyCon_RDR, unitDataCon ) import GHC.Types.ForeignCall import GHC.Types.SrcLoc import GHC.Types.Unique ( hasKey ) import GHC.Data.OrdList import GHC.Utils.Outputable as Outputable import GHC.Data.FastString import GHC.Data.Maybe import GHC.Utils.Error import GHC.Utils.Misc import GHC.Utils.Monad (unlessM) import Data.Either import Data.List ( findIndex ) import Data.Foldable import qualified Data.Semigroup as Semi import GHC.Unit.Module.Warnings import GHC.Utils.Panic import qualified GHC.Data.Strict as Strict import Language.Haskell.Syntax.Basic (FieldLabelString(..)) import Control.Monad import Text.ParserCombinators.ReadP as ReadP import Data.Char import Data.Data ( dataTypeOf, fromConstr, dataTypeConstrs ) import Data.Kind ( Type ) import Data.List.NonEmpty (NonEmpty) {- ********************************************************************** Construction functions for Rdr stuff ********************************************************************* -} -- | mkClassDecl builds a RdrClassDecl, filling in the names for tycon and -- datacon by deriving them from the name of the class. We fill in the names -- for the tycon and datacon corresponding to the class, by deriving them -- from the name of the class itself. This saves recording the names in the -- interface file (which would be equally good). -- Similarly for mkConDecl, mkClassOpSig and default-method names. -- *** See Note [The Naming story] in GHC.Hs.Decls **** mkTyClD :: LTyClDecl (GhcPass p) -> LHsDecl (GhcPass p) mkTyClD (L loc d) = L loc (TyClD noExtField d) mkInstD :: LInstDecl (GhcPass p) -> LHsDecl (GhcPass p) mkInstD (L loc d) = L loc (InstD noExtField d) mkClassDecl :: SrcSpan -> Located (Maybe (LHsContext GhcPs), LHsType GhcPs) -> Located (a,[LHsFunDep GhcPs]) -> OrdList (LHsDecl GhcPs) -> EpLayout -> AnnClassDecl -> P (LTyClDecl GhcPs) mkClassDecl loc' (L _ (mcxt, tycl_hdr)) fds where_cls layout annsIn = do { (binds, sigs, ats, at_defs, _, docs) <- cvBindsAndSigs where_cls ; (cls, tparams, fixity, ops, cps, cs) <- checkTyClHdr True tycl_hdr ; tyvars <- checkTyVars (text "class") whereDots cls tparams ; let anns' = annsIn { acd_openp = ops, acd_closep = cps} ; let loc = EpAnn (spanAsAnchor loc') noAnn cs ; return (L loc (ClassDecl { tcdCExt = (anns', layout, NoAnnSortKey) , tcdCtxt = mcxt , tcdLName = cls, tcdTyVars = tyvars , tcdFixity = fixity , tcdFDs = snd (unLoc fds) , tcdSigs = mkClassOpSigs sigs , tcdMeths = binds , tcdATs = ats, tcdATDefs = at_defs , tcdDocs = docs })) } mkTyData :: SrcSpan -> Bool -> NewOrData -> Maybe (LocatedP CType) -> Located (Maybe (LHsContext GhcPs), LHsType GhcPs) -> Maybe (LHsKind GhcPs) -> [LConDecl GhcPs] -> Located (HsDeriving GhcPs) -> AnnDataDefn -> P (LTyClDecl GhcPs) mkTyData loc' is_type_data new_or_data cType (L _ (mcxt, tycl_hdr)) ksig data_cons (L _ maybe_deriv) annsIn = do { (tc, tparams, fixity, ops, cps, cs) <- checkTyClHdr False tycl_hdr ; tyvars <- checkTyVars (ppr new_or_data) equalsDots tc tparams ; let anns = annsIn {andd_openp = ops, andd_closep = cps} ; data_cons <- checkNewOrData loc' (unLoc tc) is_type_data new_or_data data_cons ; defn <- mkDataDefn cType mcxt ksig data_cons maybe_deriv anns ; !cs' <- getCommentsFor loc' ; let loc = EpAnn (spanAsAnchor loc') noAnn (cs' Semi.<> cs) ; return (L loc (DataDecl { tcdDExt = noExtField, tcdLName = tc, tcdTyVars = tyvars, tcdFixity = fixity, tcdDataDefn = defn })) } mkDataDefn :: Maybe (LocatedP CType) -> Maybe (LHsContext GhcPs) -> Maybe (LHsKind GhcPs) -> DataDefnCons (LConDecl GhcPs) -> HsDeriving GhcPs -> AnnDataDefn -> P (HsDataDefn GhcPs) mkDataDefn cType mcxt ksig data_cons maybe_deriv anns = do { checkDatatypeContext mcxt ; return (HsDataDefn { dd_ext = anns , dd_cType = cType , dd_ctxt = mcxt , dd_cons = data_cons , dd_kindSig = ksig , dd_derivs = maybe_deriv }) } mkTySynonym :: SrcSpan -> LHsType GhcPs -- LHS -> LHsType GhcPs -- RHS -> EpToken "type" -> EpToken "=" -> P (LTyClDecl GhcPs) mkTySynonym loc lhs rhs antype aneq = do { (tc, tparams, fixity, ops, cps, cs) <- checkTyClHdr False lhs ; tyvars <- checkTyVars (text "type") equalsDots tc tparams ; let anns = AnnSynDecl ops cps antype aneq ; let loc' = EpAnn (spanAsAnchor loc) noAnn cs ; return (L loc' (SynDecl { tcdSExt = anns , tcdLName = tc, tcdTyVars = tyvars , tcdFixity = fixity , tcdRhs = rhs })) } mkStandaloneKindSig :: SrcSpan -> Located [LocatedN RdrName] -- LHS -> LHsSigType GhcPs -- RHS -> (EpToken "type", TokDcolon) -> P (LStandaloneKindSig GhcPs) mkStandaloneKindSig loc lhs rhs anns = do { vs <- mapM check_lhs_name (unLoc lhs) ; v <- check_singular_lhs (reverse vs) ; return $ L (noAnnSrcSpan loc) $ StandaloneKindSig anns v rhs } where check_lhs_name v@(unLoc->name) = if isUnqual name && isTcOcc (rdrNameOcc name) then return v else addFatalError $ mkPlainErrorMsgEnvelope (getLocA v) $ (PsErrUnexpectedQualifiedConstructor (unLoc v)) check_singular_lhs vs = case vs of [] -> panic "mkStandaloneKindSig: empty left-hand side" [v] -> return v _ -> addFatalError $ mkPlainErrorMsgEnvelope (getLoc lhs) $ (PsErrMultipleNamesInStandaloneKindSignature vs) mkTyFamInstEqn :: SrcSpan -> HsOuterFamEqnTyVarBndrs GhcPs -> LHsType GhcPs -> LHsType GhcPs -> EpToken "=" -> P (LTyFamInstEqn GhcPs) mkTyFamInstEqn loc bndrs lhs rhs annEq = do { (tc, tparams, fixity, ops, cps, cs) <- checkTyClHdr False lhs ; let loc' = EpAnn (spanAsAnchor loc) noAnn cs ; return (L loc' $ FamEqn { feqn_ext = (ops, cps, annEq) , feqn_tycon = tc , feqn_bndrs = bndrs , feqn_pats = tparams , feqn_fixity = fixity , feqn_rhs = rhs })} mkDataFamInst :: SrcSpan -> NewOrData -> Maybe (LocatedP CType) -> (Maybe ( LHsContext GhcPs), HsOuterFamEqnTyVarBndrs GhcPs , LHsType GhcPs) -> Maybe (LHsKind GhcPs) -> [LConDecl GhcPs] -> Located (HsDeriving GhcPs) -> AnnDataDefn -> P (LInstDecl GhcPs) mkDataFamInst loc new_or_data cType (mcxt, bndrs, tycl_hdr) ksig data_cons (L _ maybe_deriv) anns = do { (tc, tparams, fixity, ops, cps, cs) <- checkTyClHdr False tycl_hdr ; data_cons <- checkNewOrData loc (unLoc tc) False new_or_data data_cons ; let anns' = anns {andd_openp = ops, andd_closep = cps} ; defn <- mkDataDefn cType mcxt ksig data_cons maybe_deriv anns' ; let loc' = EpAnn (spanAsAnchor loc) noAnn cs ; return (L loc' (DataFamInstD noExtField (DataFamInstDecl (FamEqn { feqn_ext = ([], [], NoEpTok) , feqn_tycon = tc , feqn_bndrs = bndrs , feqn_pats = tparams , feqn_fixity = fixity , feqn_rhs = defn })))) } mkTyFamInst :: SrcSpan -> TyFamInstEqn GhcPs -> EpToken "type" -> EpToken "instance" -> P (LInstDecl GhcPs) mkTyFamInst loc eqn t i = do return (L (noAnnSrcSpan loc) (TyFamInstD noExtField (TyFamInstDecl (t,i) eqn))) mkFamDecl :: SrcSpan -> FamilyInfo GhcPs -> TopLevelFlag -> LHsType GhcPs -- LHS -> LFamilyResultSig GhcPs -- Optional result signature -> Maybe (LInjectivityAnn GhcPs) -- Injectivity annotation -> AnnFamilyDecl -> P (LTyClDecl GhcPs) mkFamDecl loc info topLevel lhs ksig injAnn annsIn = do { (tc, tparams, fixity, ops, cps, cs) <- checkTyClHdr False lhs ; tyvars <- checkTyVars (ppr info) equals_or_where tc tparams ; let loc' = EpAnn (spanAsAnchor loc) noAnn cs ; let anns' = annsIn { afd_openp = ops, afd_closep = cps } ; return (L loc' (FamDecl noExtField (FamilyDecl { fdExt = anns' , fdTopLevel = topLevel , fdInfo = info, fdLName = tc , fdTyVars = tyvars , fdFixity = fixity , fdResultSig = ksig , fdInjectivityAnn = injAnn }))) } where equals_or_where = case info of DataFamily -> empty OpenTypeFamily -> empty ClosedTypeFamily {} -> whereDots mkSpliceDecl :: LHsExpr GhcPs -> (LHsDecl GhcPs) -- If the user wrote -- [pads| ... ] then return a QuasiQuoteD -- $(e) then return a SpliceD -- but if they wrote, say, -- f x then behave as if they'd written $(f x) -- ie a SpliceD -- -- Typed splices are not allowed at the top level, thus we do not represent them -- as spliced declaration. See #10945 mkSpliceDecl lexpr@(L loc expr) | HsUntypedSplice _ splice@(HsUntypedSpliceExpr {}) <- expr = L loc $ SpliceD noExtField (SpliceDecl noExtField (L (l2l loc) splice) DollarSplice) | HsUntypedSplice _ splice@(HsQuasiQuote {}) <- expr = L loc $ SpliceD noExtField (SpliceDecl noExtField (L (l2l loc) splice) DollarSplice) | otherwise = L loc $ SpliceD noExtField (SpliceDecl noExtField (L (l2l loc) (HsUntypedSpliceExpr noAnn (la2la lexpr))) BareSplice) mkRoleAnnotDecl :: SrcSpan -> LocatedN RdrName -- type being annotated -> [Located (Maybe FastString)] -- roles -> (EpToken "type", EpToken "role") -> P (LRoleAnnotDecl GhcPs) mkRoleAnnotDecl loc tycon roles anns = do { roles' <- mapM parse_role roles ; !cs <- getCommentsFor loc ; return $ L (EpAnn (spanAsAnchor loc) noAnn cs) $ RoleAnnotDecl anns tycon roles' } where role_data_type = dataTypeOf (undefined :: Role) all_roles = map fromConstr $ dataTypeConstrs role_data_type possible_roles = [(fsFromRole role, role) | role <- all_roles] parse_role (L loc_role Nothing) = return $ L (noAnnSrcSpan loc_role) Nothing parse_role (L loc_role (Just role)) = case lookup role possible_roles of Just found_role -> return $ L (noAnnSrcSpan loc_role) $ Just found_role Nothing -> let nearby = fuzzyLookup (unpackFS role) (mapFst unpackFS possible_roles) in addFatalError $ mkPlainErrorMsgEnvelope loc_role $ (PsErrIllegalRoleName role nearby) mkMDo :: HsDoFlavour -> LocatedLW [ExprLStmt GhcPs] -> EpaLocation -> EpaLocation -> HsExpr GhcPs mkMDo ctxt stmts tok loc = mkHsDoAnns ctxt stmts (AnnList (Just loc) ListNone [] tok []) -- | Converts a list of 'LHsTyVarBndr's annotated with their 'Specificity' to -- binders without annotations. Only accepts specified variables, and errors if -- any of the provided binders has an 'InferredSpec' annotation. fromSpecTyVarBndrs :: [LHsTyVarBndr Specificity GhcPs] -> P [LHsTyVarBndr () GhcPs] fromSpecTyVarBndrs = mapM fromSpecTyVarBndr -- | Converts 'LHsTyVarBndr' annotated with its 'Specificity' to one without -- annotations. Only accepts specified variables, and errors if the provided -- binder has an 'InferredSpec' annotation. fromSpecTyVarBndr :: LHsTyVarBndr Specificity GhcPs -> P (LHsTyVarBndr () GhcPs) fromSpecTyVarBndr (L loc (HsTvb xtv flag idp k)) = do case flag of SpecifiedSpec -> return () InferredSpec -> addFatalError $ mkPlainErrorMsgEnvelope (locA loc) $ PsErrInferredTypeVarNotAllowed return $ L loc (HsTvb xtv () idp k) -- | Add the annotation for a 'where' keyword to existing @HsLocalBinds@ annBinds :: EpToken "where" -> EpAnnComments -> HsLocalBinds GhcPs -> (HsLocalBinds GhcPs, Maybe EpAnnComments) annBinds w cs (HsValBinds an bs) = (HsValBinds (add_where w an cs) bs, Nothing) annBinds w cs (HsIPBinds an bs) = (HsIPBinds (add_where w an cs) bs, Nothing) annBinds _ cs (EmptyLocalBinds x) = (EmptyLocalBinds x, Just cs) add_where :: EpToken "where" -> EpAnn (AnnList (EpToken "where")) -> EpAnnComments -> EpAnn (AnnList (EpToken "where")) add_where w@(EpTok (EpaSpan (RealSrcSpan rs _))) (EpAnn a al cs) cs2 | valid_anchor a = EpAnn (widenAnchorT a w) (al { al_rest = w}) (cs Semi.<> cs2) | otherwise = EpAnn (patch_anchor rs a) (al { al_anchor = (fmap (patch_anchor rs) (al_anchor al)) , al_rest = w}) (cs Semi.<> cs2) add_where _ _ _ = panic "add_where" -- EpaDelta should only be used for transformations valid_anchor :: EpaLocation -> Bool valid_anchor (EpaSpan (RealSrcSpan r _)) = srcSpanStartLine r >= 0 valid_anchor _ = False -- If the decl list for where binds is empty, the anchor ends up -- invalid. In this case, use the parent one patch_anchor :: RealSrcSpan -> EpaLocation -> EpaLocation patch_anchor r EpaDelta{} = EpaSpan (RealSrcSpan r Strict.Nothing) patch_anchor r1 (EpaSpan (RealSrcSpan r0 mb)) = EpaSpan (RealSrcSpan r mb) where r = if srcSpanStartLine r0 < 0 then r1 else r0 patch_anchor _ (EpaSpan ss) = EpaSpan ss -- | The anchor for a stmtlist is based on either the location or -- the first semicolon annotion. stmtsAnchor :: Located (OrdList (EpToken tok),a) -> Maybe EpaLocation stmtsAnchor (L (RealSrcSpan l mb) ((ConsOL (EpTok (EpaSpan (RealSrcSpan r rb))) _), _)) = Just $ widenAnchorS (EpaSpan (RealSrcSpan l mb)) (RealSrcSpan r rb) stmtsAnchor (L (RealSrcSpan l mb) _) = Just $ EpaSpan (RealSrcSpan l mb) stmtsAnchor _ = Nothing stmtsLoc :: Located (OrdList (EpToken tok),a) -> SrcSpan stmtsLoc (L l ((ConsOL aa _), _)) = widenSpanT l aa stmtsLoc (L l _) = l {- ********************************************************************** #cvBinds-etc# Converting to @HsBinds@, etc. ********************************************************************* -} -- | Function definitions are restructured here. Each is assumed to be recursive -- initially, and non recursive definitions are discovered by the dependency -- analyser. -- | Groups together bindings for a single function cvTopDecls :: OrdList (LHsDecl GhcPs) -> [LHsDecl GhcPs] cvTopDecls decls = getMonoBindAll (fromOL decls) -- Declaration list may only contain value bindings and signatures. cvBindGroup :: OrdList (LHsDecl GhcPs) -> P (HsValBinds GhcPs) cvBindGroup binding = do { (mbs, sigs, fam_ds, tfam_insts , dfam_insts, _) <- cvBindsAndSigs binding ; massert (null fam_ds && null tfam_insts && null dfam_insts) ; return $ ValBinds NoAnnSortKey mbs sigs } cvBindsAndSigs :: OrdList (LHsDecl GhcPs) -> P (LHsBinds GhcPs, [LSig GhcPs], [LFamilyDecl GhcPs] , [LTyFamInstDecl GhcPs], [LDataFamInstDecl GhcPs], [LDocDecl GhcPs]) -- Input decls contain just value bindings and signatures -- and in case of class or instance declarations also -- associated type declarations. They might also contain Haddock comments. cvBindsAndSigs fb = do fb' <- drop_bad_decls (fromOL fb) return (partitionBindsAndSigs (getMonoBindAll fb')) where -- cvBindsAndSigs is called in several places in the parser, -- and its items can be produced by various productions: -- -- * decl (when parsing a where clause or a let-expression) -- * decl_inst (when parsing an instance declaration) -- * decl_cls (when parsing a class declaration) -- -- partitionBindsAndSigs can handle almost all declaration forms produced -- by the aforementioned productions, except for SpliceD, which we filter -- out here (in drop_bad_decls). -- -- We're not concerned with every declaration form possible, such as those -- produced by the topdecl parser production, because cvBindsAndSigs is not -- called on top-level declarations. drop_bad_decls [] = return [] drop_bad_decls (L l (SpliceD _ d) : ds) = do addError $ mkPlainErrorMsgEnvelope (locA l) $ PsErrDeclSpliceNotAtTopLevel d drop_bad_decls ds drop_bad_decls (d:ds) = (d:) <$> drop_bad_decls ds ----------------------------------------------------------------------------- -- Group function bindings into equation groups getMonoBind :: LHsBind GhcPs -> [LHsDecl GhcPs] -> (LHsBind GhcPs, [LHsDecl GhcPs]) -- Suppose (b',ds') = getMonoBind b ds -- ds is a list of parsed bindings -- b is a MonoBinds that has just been read off the front -- Then b' is the result of grouping more equations from ds that -- belong with b into a single MonoBinds, and ds' is the depleted -- list of parsed bindings. -- -- All Haddock comments between equations inside the group are -- discarded. -- -- No AndMonoBinds or EmptyMonoBinds here; just single equations getMonoBind (L loc1 (FunBind { fun_id = fun_id1@(L _ f1) , fun_matches = MG { mg_alts = (L _ m1@[L _ mtchs1]) } })) binds | has_args m1 = go [L loc1 mtchs1] (noAnnSrcSpan $ locA loc1) binds [] where -- See Note [Exact Print Annotations for FunBind] go :: [LMatch GhcPs (LHsExpr GhcPs)] -- accumulates matches for current fun -> SrcSpanAnnA -- current top level loc -> [LHsDecl GhcPs] -- Any docbinds seen -> [LHsDecl GhcPs] -- rest of decls to be processed -> (LHsBind GhcPs, [LHsDecl GhcPs]) -- FunBind, rest of decls go mtchs loc ((L loc2 (ValD _ (FunBind { fun_id = (L _ f2) , fun_matches = MG { mg_alts = (L _ [L lm2 mtchs2]) } }))) : binds) _ | f1 == f2 = let (loc2', lm2') = transferAnnsA loc2 lm2 in go (L lm2' mtchs2 : mtchs) (combineSrcSpansA loc loc2') binds [] go mtchs loc (doc_decl@(L loc2 (DocD {})) : binds) doc_decls = let doc_decls' = doc_decl : doc_decls in go mtchs (combineSrcSpansA loc loc2) binds doc_decls' go mtchs loc binds doc_decls = let L llm last_m = head mtchs -- Guaranteed at least one (llm',loc') = transferAnnsOnlyA llm loc -- Keep comments, transfer trailing matches' = reverse (L llm' last_m:tail mtchs) L lfm first_m = head matches' (lfm', loc'') = transferCommentsOnlyA lfm loc' in ( L loc'' (makeFunBind fun_id1 (mkLocatedList $ (L lfm' first_m:tail matches'))) , (reverse doc_decls) ++ binds) -- Reverse the final matches, to get it back in the right order -- Do the same thing with the trailing doc comments getMonoBind bind binds = (bind, binds) {- Note [Exact Print Annotations for FunBind] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ An individual Match that ends up in a FunBind MatchGroup is initially parsed as a LHsDecl. This takes the form L loc (ValD NoExtField (FunBind ... [L lm (Match ..)])) The loc contains the annotations, in particular comments, which are to precede the declaration when printed, and [TrailingAnn] which are to follow it. The [TrailingAnn] captures semicolons that may appear after it when using the braces and semis style of coding. The match location (lm) has only a location in it at this point, no annotations. Its location is the same as the top level location in loc. What getMonoBind does it to take a sequence of FunBind LHsDecls that belong to the same function and group them into a single function with the component declarations all combined into the single MatchGroup as [LMatch GhcPs]. Given that when exact printing a FunBind the exact printer simply iterates over all the matches and prints each in turn, the simplest behaviour would be to simply take the top level annotations (loc) for each declaration, and use them for the individual component matches (lm). The problem is the exact printer first has to deal with the top level LHsDecl, which means annotations for the loc. This needs to be able to be exact printed in the context of surrounding declarations, and if some refactor decides to move the declaration elsewhere, the leading comments and trailing semicolons need to be handled at that level. So the solution is to combine all the matches into one, pushing the annotations into the LMatch's, and then at the end extract the comments from the first match and [TrailingAnn] from the last to go in the top level LHsDecl. -} -- Group together adjacent FunBinds for every function. getMonoBindAll :: [LHsDecl GhcPs] -> [LHsDecl GhcPs] getMonoBindAll [] = [] getMonoBindAll (L l (ValD _ b) : ds) = let (L l' b', ds') = getMonoBind (L l b) ds in L l' (ValD noExtField b') : getMonoBindAll ds' getMonoBindAll (d : ds) = d : getMonoBindAll ds has_args :: [LMatch GhcPs (LHsExpr GhcPs)] -> Bool has_args [] = panic "GHC.Parser.PostProcess.has_args" has_args (L _ (Match { m_pats = L _ args }) : _) = not (null args) -- Don't group together FunBinds if they have -- no arguments. This is necessary now that variable bindings -- with no arguments are now treated as FunBinds rather -- than pattern bindings (tests/rename/should_fail/rnfail002). {- ********************************************************************** #PrefixToHS-utils# Utilities for conversion ********************************************************************* -} {- Note [Parsing data constructors is hard] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The problem with parsing data constructors is that they look a lot like types. Compare: (s1) data T = C t1 t2 (s2) type T = C t1 t2 Syntactically, there's little difference between these declarations, except in (s1) 'C' is a data constructor, but in (s2) 'C' is a type constructor. This similarity would pose no problem if we knew ahead of time if we are parsing a type or a constructor declaration. Looking at (s1) and (s2), a simple (but wrong!) rule comes to mind: in 'data' declarations assume we are parsing data constructors, and in other contexts (e.g. 'type' declarations) assume we are parsing type constructors. This simple rule does not work because of two problematic cases: (p1) data T = C t1 t2 :+ t3 (p2) data T = C t1 t2 => t3 In (p1) we encounter (:+) and it turns out we are parsing an infix data declaration, so (C t1 t2) is a type and 'C' is a type constructor. In (p2) we encounter (=>) and it turns out we are parsing an existential context, so (C t1 t2) is a constraint and 'C' is a type constructor. As the result, in order to determine whether (C t1 t2) declares a data constructor, a type, or a context, we would need unlimited lookahead which 'happy' is not so happy with. -} -- | Reinterpret a type constructor, including type operators, as a data -- constructor. -- See Note [Parsing data constructors is hard] tyConToDataCon :: LocatedN RdrName -> Either (MsgEnvelope PsMessage) (LocatedN RdrName) tyConToDataCon (L loc tc) | okConOcc (occNameString occ) = return (L loc (setRdrNameSpace tc srcDataName)) | otherwise = Left $ mkPlainErrorMsgEnvelope (locA loc) $ (PsErrNotADataCon tc) where occ = rdrNameOcc tc mkPatSynMatchGroup :: LocatedN RdrName -> LocatedLW (OrdList (LHsDecl GhcPs)) -> P (MatchGroup GhcPs (LHsExpr GhcPs)) mkPatSynMatchGroup (L loc patsyn_name) (L ld decls) = do { matches <- mapM fromDecl (fromOL decls) ; when (null matches) (wrongNumberErr (locA loc)) ; return $ mkMatchGroup FromSource (L ld matches) } where fromDecl (L loc decl@(ValD _ (PatBind _ pat@(L _ (ConPat _conAnn ln@(L _ name) details)) _ rhs))) = do { unless (name == patsyn_name) $ wrongNameBindingErr (locA loc) decl -- conAnn should only be AnnOpenP, AnnCloseP, so the rest should be empty ; let ann_fun = mk_ann_funrhs [] [] ; match <- case details of PrefixCon _ pats -> return $ Match { m_ext = noExtField , m_ctxt = ctxt, m_pats = L l pats , m_grhss = rhs } where l = listLocation pats ctxt = FunRhs { mc_fun = ln , mc_fixity = Prefix , mc_strictness = NoSrcStrict , mc_an = ann_fun } InfixCon p1 p2 -> return $ Match { m_ext = noExtField , m_ctxt = ctxt , m_pats = L l [p1, p2] , m_grhss = rhs } where l = listLocation [p1, p2] ctxt = FunRhs { mc_fun = ln , mc_fixity = Infix , mc_strictness = NoSrcStrict , mc_an = ann_fun } RecCon{} -> recordPatSynErr (locA loc) pat ; return $ L loc match } fromDecl (L loc decl) = extraDeclErr (locA loc) decl extraDeclErr loc decl = addFatalError $ mkPlainErrorMsgEnvelope loc $ (PsErrNoSingleWhereBindInPatSynDecl patsyn_name decl) wrongNameBindingErr loc decl = addFatalError $ mkPlainErrorMsgEnvelope loc $ (PsErrInvalidWhereBindInPatSynDecl patsyn_name decl) wrongNumberErr loc = addFatalError $ mkPlainErrorMsgEnvelope loc $ (PsErrEmptyWhereInPatSynDecl patsyn_name) recordPatSynErr :: SrcSpan -> LPat GhcPs -> P a recordPatSynErr loc pat = addFatalError $ mkPlainErrorMsgEnvelope loc $ (PsErrRecordSyntaxInPatSynDecl pat) mkConDeclH98 :: (TokDarrow, (TokForall, EpToken ".")) -> LocatedN RdrName -> Maybe [LHsTyVarBndr Specificity GhcPs] -> Maybe (LHsContext GhcPs) -> HsConDeclH98Details GhcPs -> ConDecl GhcPs mkConDeclH98 (tdarrow, (tforall,tdot)) name mb_forall mb_cxt args = ConDeclH98 { con_ext = AnnConDeclH98 tforall tdot tdarrow , con_name = name , con_forall = isJust mb_forall , con_ex_tvs = mb_forall `orElse` [] , con_mb_cxt = mb_cxt , con_args = args , con_doc = Nothing } -- | Construct a GADT-style data constructor from the constructor names and -- their type. Some interesting aspects of this function: -- -- * This splits up the constructor type into its quantified type variables (if -- provided), context (if provided), argument types, and result type, and -- records whether this is a prefix or record GADT constructor. See -- Note [GADT abstract syntax] in "GHC.Hs.Decls" for more details. mkGadtDecl :: SrcSpan -> NonEmpty (LocatedN RdrName) -> TokDcolon -> LHsSigType GhcPs -> P (LConDecl GhcPs) mkGadtDecl loc names dcol ty = do (args, res_ty, (ops, cps), csa) <- case body_ty of L ll (HsFunTy _ hsArr (L (EpAnn anc _ cs) (HsRecTy an rf)) res_ty) -> do arr <- case hsArr of HsUnrestrictedArrow arr -> return arr _ -> do addError $ mkPlainErrorMsgEnvelope (getLocA body_ty) $ (PsErrIllegalGadtRecordMultiplicity hsArr) return noAnn return ( RecConGADT arr (L (EpAnn anc an cs) rf), res_ty , ([], []), epAnnComments ll) _ -> do let ((ops, cps), cs, arg_types, res_type) = splitHsFunType body_ty return (PrefixConGADT noExtField arg_types, res_type, (ops,cps), cs) let bndrs_loc = case outer_bndrs of HsOuterImplicit{} -> getLoc ty HsOuterExplicit an _ -> EpAnn (entry an) noAnn emptyComments let l = EpAnn (spanAsAnchor loc) noAnn csa pure $ L l ConDeclGADT { con_g_ext = AnnConDeclGADT ops cps dcol , con_names = names , con_bndrs = L bndrs_loc outer_bndrs , con_mb_cxt = mcxt , con_g_args = args , con_res_ty = res_ty , con_doc = Nothing } where (outer_bndrs, mcxt, body_ty) = splitLHsGadtTy ty setRdrNameSpace :: RdrName -> NameSpace -> RdrName -- ^ This rather gruesome function is used mainly by the parser. -- -- Case #1. When parsing: -- -- > data T a = T | T1 Int -- -- we parse the data constructors as /types/ because of parser ambiguities, -- so then we need to change the /type constr/ to a /data constr/ -- -- The exact-name case /can/ occur when parsing: -- -- > data [] a = [] | a : [a] -- -- For the exact-name case we return an original name. -- -- Case #2. When parsing: -- -- > x = fn (forall a. a) -- RequiredTypeArguments -- -- we use setRdrNameSpace to set the namespace of forall-bound variables. -- setRdrNameSpace (Unqual occ) ns = Unqual (setOccNameSpace ns occ) setRdrNameSpace (Qual m occ) ns = Qual m (setOccNameSpace ns occ) setRdrNameSpace (Orig m occ) ns = Orig m (setOccNameSpace ns occ) setRdrNameSpace (Exact n) ns | Just thing <- wiredInNameTyThing_maybe n = setWiredInNameSpace thing ns -- Preserve Exact Names for wired-in things, -- notably tuples and lists | isExternalName n = Orig (nameModule n) occ | otherwise -- This can happen when quoting and then -- splicing a fixity declaration for a type = Exact (mkSystemNameAt (nameUnique n) occ (nameSrcSpan n)) where occ = setOccNameSpace ns (nameOccName n) setWiredInNameSpace :: TyThing -> NameSpace -> RdrName setWiredInNameSpace (ATyCon tc) ns | isDataConNameSpace ns = ty_con_data_con tc | isTcClsNameSpace ns = Exact (getName tc) -- No-op setWiredInNameSpace (AConLike (RealDataCon dc)) ns | isTcClsNameSpace ns = data_con_ty_con dc | isDataConNameSpace ns = Exact (getName dc) -- No-op setWiredInNameSpace thing ns = pprPanic "setWiredinNameSpace" (pprNameSpace ns <+> ppr thing) ty_con_data_con :: TyCon -> RdrName ty_con_data_con tc | isTupleTyCon tc , Just dc <- tyConSingleDataCon_maybe tc = Exact (getName dc) | tc `hasKey` listTyConKey = Exact nilDataConName | otherwise -- See Note [setRdrNameSpace for wired-in names] = Unqual (setOccNameSpace srcDataName (getOccName tc)) data_con_ty_con :: DataCon -> RdrName data_con_ty_con dc | let tc = dataConTyCon dc , isTupleTyCon tc = Exact (getName tc) | dc `hasKey` nilDataConKey = Exact listTyConName | otherwise -- See Note [setRdrNameSpace for wired-in names] = Unqual (setOccNameSpace tcClsName (getOccName dc)) {- Note [setRdrNameSpace for wired-in names] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In GHC.Types, which declares (:), we have infixr 5 : The ambiguity about which ":" is meant is resolved by parsing it as a data constructor, but then using dataTcOccs to try the type constructor too; and that in turn calls setRdrNameSpace to change the name-space of ":" to tcClsName. There isn't a corresponding ":" type constructor, but it's painful to make setRdrNameSpace partial, so we just make an Unqual name instead. It really doesn't matter! -} eitherToP :: MonadP m => Either (MsgEnvelope PsMessage) a -> m a -- Adapts the Either monad to the P monad eitherToP (Left err) = addFatalError err eitherToP (Right thing) = return thing checkTyVars :: SDoc -> SDoc -> LocatedN RdrName -> [LHsTypeArg GhcPs] -> P (LHsQTyVars GhcPs) -- the synthesized type variables -- ^ Check whether the given list of type parameters are all type variables -- (possibly with a kind signature). checkTyVars pp_what equals_or_where tc tparms = do { tvs <- mapM check tparms ; return (mkHsQTvs tvs) } where check (HsTypeArg at ki) = chkParens [] [] (HsBndrInvisible at) ki check (HsValArg _ ty) = chkParens [] [] (HsBndrRequired noExtField) ty check (HsArgPar sp) = addFatalError $ mkPlainErrorMsgEnvelope sp $ (PsErrMalformedDecl pp_what (unLoc tc)) -- Keep around an action for adjusting the annotations of extra parens chkParens :: [EpaLocation] -> [EpaLocation] -> HsBndrVis GhcPs -> LHsType GhcPs -> P (LHsTyVarBndr (HsBndrVis GhcPs) GhcPs) chkParens ops cps bvis (L l (HsParTy _ (L lt ty))) = let (o,c) = mkParensLocs (realSrcSpan $ locA l) (_,lt') = transferCommentsOnlyA l lt in chkParens (o:ops) (c:cps) bvis (L lt' ty) chkParens ops cps bvis ty = chk ops cps bvis ty -- Check that the name space is correct! chk :: [EpaLocation] -> [EpaLocation] -> HsBndrVis GhcPs -> LHsType GhcPs -> P (LHsTyVarBndr (HsBndrVis GhcPs) GhcPs) chk ops cps bvis (L l (HsKindSig tok_dc (L annt t) k)) | Just (ann, bvar) <- match_bndr_var t = let bkind = HsBndrKind noExtField k an = (reverse ops) ++ cps in return (L (widenLocatedAnL (l Semi.<> annt) (for_widening bvis:an)) (HsTvb (AnnTyVarBndr (reverse ops) cps ann tok_dc) bvis bvar bkind)) chk ops cps bvis (L l t) | Just (ann, bvar) <- match_bndr_var t = let bkind = HsBndrNoKind noExtField an = (reverse ops) ++ cps in return (L (widenLocatedAnL l (for_widening bvis:an)) (HsTvb (AnnTyVarBndr (reverse ops) cps ann noAnn) bvis bvar bkind)) chk _ _ _ t@(L loc _) = addFatalError $ mkPlainErrorMsgEnvelope (locA loc) $ (PsErrUnexpectedTypeInDecl t pp_what (unLoc tc) tparms equals_or_where) match_bndr_var :: HsType GhcPs -> Maybe (EpToken "'", HsBndrVar GhcPs) match_bndr_var (HsTyVar ann _ tv) | isRdrTyVar (unLoc tv) = Just (ann, HsBndrVar noExtField tv) match_bndr_var (HsWildCardTy tok) = Just (noAnn, HsBndrWildCard tok) match_bndr_var _ = Nothing -- Return a EpaLocation for use in widenLocatedAnL. for_widening :: HsBndrVis GhcPs -> EpaLocation for_widening (HsBndrInvisible (EpTok loc)) = loc for_widening _ = noAnn whereDots, equalsDots :: SDoc -- Second argument to checkTyVars whereDots = text "where ..." equalsDots = text "= ..." checkDatatypeContext :: Maybe (LHsContext GhcPs) -> P () checkDatatypeContext Nothing = return () checkDatatypeContext (Just c) = do allowed <- getBit DatatypeContextsBit unless allowed $ addError $ mkPlainErrorMsgEnvelope (getLocA c) $ (PsErrIllegalDataTypeContext c) type LRuleTyTmVar = LocatedAn NoEpAnns RuleTyTmVar data RuleTyTmVar = RuleTyTmVar AnnTyVarBndr (LocatedN RdrName) (Maybe (LHsType GhcPs)) -- ^ Essentially a wrapper for a @RuleBndr GhcPs@ -- turns RuleTyTmVars into RuleBnrs - this is straightforward mkRuleBndrs :: [LRuleTyTmVar] -> [LRuleBndr GhcPs] mkRuleBndrs = fmap (fmap cvt_one) where cvt_one (RuleTyTmVar ann v Nothing) = RuleBndr ann v cvt_one (RuleTyTmVar ann v (Just sig)) = RuleBndrSig ann v (mkHsPatSigType noAnn sig) -- turns RuleTyTmVars into HsTyVarBndrs - this is more interesting mkRuleTyVarBndrs :: [LRuleTyTmVar] -> [LHsTyVarBndr () GhcPs] mkRuleTyVarBndrs = fmap (setLHsTyVarBndrNameSpace tvName . cvt_one) where cvt_one (L l (RuleTyTmVar ann v msig)) = L (l2l l) (HsTvb ann () (HsBndrVar noExtField v) (cvt_sig msig)) cvt_sig Nothing = HsBndrNoKind noExtField cvt_sig (Just sig) = HsBndrKind noExtField sig -- See Note [Parsing explicit foralls in Rules] in Parser.y checkRuleTyVarBndrNames :: [LHsTyVarBndr flag GhcPs] -> P () checkRuleTyVarBndrNames = mapM_ check . mapMaybe (hsTyVarLName . unLoc) where check (L loc (Unqual occ)) = when (occNameFS occ `elem` [fsLit "family",fsLit "role"]) (addFatalError $ mkPlainErrorMsgEnvelope (locA loc) $ (PsErrParseErrorOnInput occ)) check _ = panic "checkRuleTyVarBndrNames" checkRecordSyntax :: (MonadP m, Outputable a) => LocatedA a -> m (LocatedA a) checkRecordSyntax lr@(L loc r) = do allowed <- getBit TraditionalRecordSyntaxBit unless allowed $ addError $ mkPlainErrorMsgEnvelope (locA loc) $ (PsErrIllegalTraditionalRecordSyntax (ppr r)) return lr -- | Check if the gadt_constrlist is empty. Only raise parse error for -- `data T where` to avoid affecting existing error message, see #8258. checkEmptyGADTs :: Located ((EpToken "where", EpToken "{", EpToken "}"), [LConDecl GhcPs]) -> P (Located ((EpToken "where", EpToken "{", EpToken "}"), [LConDecl GhcPs])) checkEmptyGADTs gadts@(L span (_, [])) -- Empty GADT declaration. = do gadtSyntax <- getBit GadtSyntaxBit -- GADTs implies GADTSyntax unless gadtSyntax $ addError $ mkPlainErrorMsgEnvelope span $ PsErrIllegalWhereInDataDecl return gadts checkEmptyGADTs gadts = return gadts -- Ordinary GADT declaration. checkTyClHdr :: Bool -- True <=> class header -- False <=> type header -> LHsType GhcPs -> P (LocatedN RdrName, -- the head symbol (type or class name) [LHsTypeArg GhcPs], -- parameters of head symbol LexicalFixity, -- the declaration is in infix format [EpToken "("], -- API Annotation for HsParTy [EpToken ")"], -- when stripping parens EpAnnComments) -- Accumulated comments from re-arranging -- Well-formedness check and decomposition of type and class heads. -- Decomposes T ty1 .. tyn into (T, [ty1, ..., tyn]) -- Int :*: Bool into (:*:, [Int, Bool]) -- returning the pieces checkTyClHdr is_cls ty = goL emptyComments ty [] [] [] Prefix where goL cs (L l ty) acc ops cps fix = go cs l ty acc ops cps fix -- workaround to define '*' despite StarIsType go cs ll (HsParTy an (L l (HsStarTy _ isUni))) acc ops' cps' fix = do { addPsMessage (locA l) PsWarnStarBinder ; let name = mkOccNameFS tcClsName (starSym isUni) ; let a' = newAnns ll l an ; return (L a' (Unqual name), acc, fix , (reverse ops'), cps', cs) } go cs l (HsTyVar _ _ ltc@(L _ tc)) acc ops cps fix | isRdrTc tc = return (ltc, acc, fix, (reverse ops), cps, cs Semi.<> comments l) go cs l (HsOpTy _ _ t1 ltc@(L _ tc) t2) acc ops cps _fix | isRdrTc tc = return (ltc, lhs:rhs:acc, Infix, (reverse ops), cps, cs Semi.<> comments l) where lhs = HsValArg noExtField t1 rhs = HsValArg noExtField t2 go cs l (HsParTy (o,c) ty) acc ops cps fix = goL (cs Semi.<> comments l) ty acc (o:ops) (c:cps) fix go cs l (HsAppTy _ t1 t2) acc ops cps fix = goL (cs Semi.<> comments l) t1 (HsValArg noExtField t2:acc) ops cps fix go cs l (HsAppKindTy at ty ki) acc ops cps fix = goL (cs Semi.<> comments l) ty (HsTypeArg at ki:acc) ops cps fix go cs l (HsTupleTy _ HsBoxedOrConstraintTuple ts) [] ops cps fix = return (L (l2l l) (nameRdrName tup_name) , map (HsValArg noExtField) ts, fix, (reverse ops), cps, cs Semi.<> comments l) where arity = length ts tup_name | is_cls = cTupleTyConName arity | otherwise = getName (tupleTyCon Boxed arity) -- See Note [Unit tuples] in GHC.Hs.Type (TODO: is this still relevant?) go _ l _ _ _ _ _ = addFatalError $ mkPlainErrorMsgEnvelope (locA l) $ (PsErrMalformedTyOrClDecl ty) -- Combine the annotations from the HsParTy and HsStarTy into a -- new one for the LocatedN RdrName newAnns :: SrcSpanAnnA -> SrcSpanAnnA -> (EpToken "(", EpToken ")") -> SrcSpanAnnN newAnns l@(EpAnn _ (AnnListItem _) csp0) l1@(EpAnn ap (AnnListItem ta) csp) (o,c) = let lr = combineSrcSpans (locA l1) (locA l) in EpAnn (EpaSpan lr) (NameAnn (NameParens o c) ap ta) (csp0 Semi.<> csp) -- | Yield a parse error if we have a function applied directly to a do block -- etc. and BlockArguments is not enabled. checkExpBlockArguments :: LHsExpr GhcPs -> PV () checkCmdBlockArguments :: LHsCmd GhcPs -> PV () (checkExpBlockArguments, checkCmdBlockArguments) = (checkExpr, checkCmd) where checkExpr :: LHsExpr GhcPs -> PV () checkExpr expr = case unLoc expr of HsDo _ (DoExpr m) _ -> check (PsErrDoInFunAppExpr m) expr HsDo _ (MDoExpr m) _ -> check (PsErrMDoInFunAppExpr m) expr HsCase {} -> check PsErrCaseInFunAppExpr expr HsLam _ lam_variant _ -> check (PsErrLambdaInFunAppExpr lam_variant) expr HsLet {} -> check PsErrLetInFunAppExpr expr HsIf {} -> check PsErrIfInFunAppExpr expr HsProc {} -> check PsErrProcInFunAppExpr expr _ -> return () checkCmd :: LHsCmd GhcPs -> PV () checkCmd cmd = case unLoc cmd of HsCmdLam _ lam_variant _ -> check (PsErrLambdaCmdInFunAppCmd lam_variant) cmd HsCmdCase {} -> check PsErrCaseCmdInFunAppCmd cmd HsCmdIf {} -> check PsErrIfCmdInFunAppCmd cmd HsCmdLet {} -> check PsErrLetCmdInFunAppCmd cmd HsCmdDo {} -> check PsErrDoCmdInFunAppCmd cmd _ -> return () check err a = do blockArguments <- getBit BlockArgumentsBit unless blockArguments $ addError $ mkPlainErrorMsgEnvelope (getLocA a) $ (err a) -- | Validate the context constraints and break up a context into a list -- of predicates. -- -- @ -- (Eq a, Ord b) --> [Eq a, Ord b] -- Eq a --> [Eq a] -- (Eq a) --> [Eq a] -- (((Eq a))) --> [Eq a] -- @ checkContext :: LHsType GhcPs -> P (LHsContext GhcPs) checkContext orig_t@(L (EpAnn l _ cs) _orig_t) = check ([],[],cs) orig_t where check :: ([EpToken "("],[EpToken ")"],EpAnnComments) -> LHsType GhcPs -> P (LHsContext GhcPs) check (oparens,cparens,cs) (L _l (HsTupleTy (AnnParens o c) HsBoxedOrConstraintTuple ts)) -- (Eq a, Ord b) shows up as a tuple type. Only boxed tuples can -- be used as context constraints. -- Ditto () = mkCTuple (oparens ++ [o], c : cparens, cs) ts -- With NoListTuplePuns, contexts are parsed as data constructors, which causes failure -- downstream. -- This converts them just like when they are parsed as types in the punned case. check (oparens,cparens,cs) (L _l (HsExplicitTupleTy (q,o,c) _ ts)) = punsAllowed >>= \case True -> unprocessed False -> do let (op, cp) = case q of EpTok ql -> ([EpTok ql], [c]) _ -> ([o], [c]) mkCTuple (oparens ++ op, cp ++ cparens, cs) ts check (opi,cpi,csi) (L _lp1 (HsParTy (o,c) ty)) -- to be sure HsParTy doesn't get into the way = check (o:opi, c:cpi, csi) ty -- No need for anns, returning original check (_opi,_cpi,_csi) _t = unprocessed unprocessed = return (L (EpAnn l (AnnContext Nothing [] []) emptyComments) [orig_t]) mkCTuple (oparens, cparens, cs) ts = -- Append parens so that the original order in the source is maintained return (L (EpAnn l (AnnContext Nothing oparens cparens) cs) ts) -- | The same as `checkContext`, but for expressions. -- -- Validate the context constraints and break up a context into a list -- of predicates. -- -- @ -- (Eq a, Ord b) --> [Eq a, Ord b] -- Eq a --> [Eq a] -- (Eq a) --> [Eq a] -- (((Eq a))) --> [Eq a] -- @ checkContextExpr :: LHsExpr GhcPs -> PV (LocatedC [LHsExpr GhcPs]) checkContextExpr orig_expr@(L (EpAnn l _ cs) _) = check ([],[], cs) orig_expr where check :: ([EpToken "("],[EpToken ")"],EpAnnComments) -> LHsExpr GhcPs -> PV (LocatedC [LHsExpr GhcPs]) check (oparens,cparens,cs) (L _ (ExplicitTuple (ap_open, ap_close) tup_args boxity)) -- Neither unboxed tuples (#e1,e2#) nor tuple sections (e1,,e2,) can be a context | isBoxed boxity , Just es <- tupArgsPresent_maybe tup_args = mkCTuple (oparens ++ [EpTok ap_open], EpTok ap_close : cparens, cs) es check (opi, cpi, csi) (L _ (HsPar (open_tok, close_tok) expr)) = check (opi ++ [open_tok], close_tok : cpi, csi) expr check (oparens,cparens,cs) (L _ (HsVar _ (L (EpAnn _ (NameAnnOnly (NameParens open closed) []) _) name))) | name == nameRdrName (dataConName unitDataCon) = mkCTuple (oparens ++ [open], closed : cparens, cs) [] check _ _ = unprocessed unprocessed = return (L (EpAnn l (AnnContext Nothing [] []) emptyComments) [orig_expr]) mkCTuple (oparens, cparens, cs) ts = -- Append parens so that the original order in the source is maintained return (L (EpAnn l (AnnContext Nothing oparens cparens) cs) ts) checkImportDecl :: Maybe (EpToken "qualified") -> Maybe (EpToken "qualified") -> P () checkImportDecl mPre mPost = do let whenJust mg f = maybe (pure ()) f mg tokenSpan tok = RealSrcSpan (epaLocationRealSrcSpan $ getEpTokenLoc tok) Strict.Nothing importQualifiedPostEnabled <- getBit ImportQualifiedPostBit -- Error if 'qualified' found in postpositive position and -- 'ImportQualifiedPost' is not in effect. whenJust mPost $ \post -> when (not importQualifiedPostEnabled) $ failNotEnabledImportQualifiedPost (tokenSpan post) -- Error if 'qualified' occurs in both pre and postpositive -- positions. whenJust mPost $ \post -> when (isJust mPre) $ failImportQualifiedTwice (tokenSpan post) -- Warn if 'qualified' found in prepositive position and -- 'Opt_WarnPrepositiveQualifiedModule' is enabled. whenJust mPre $ \pre -> warnPrepositiveQualifiedModule (tokenSpan pre) -- ------------------------------------------------------------------------- -- Checking Patterns. -- We parse patterns as expressions and check for valid patterns below, -- converting the expression into a pattern at the same time. checkPattern :: LocatedA (PatBuilder GhcPs) -> P (LPat GhcPs) checkPattern = runPV . checkLPat checkPattern_details :: ParseContext -> PV (LocatedA (PatBuilder GhcPs)) -> P (LPat GhcPs) checkPattern_details extraDetails pp = runPV_details extraDetails (pp >>= checkLPat) checkLArgPat :: LocatedA (ArgPatBuilder GhcPs) -> PV (LPat GhcPs) checkLArgPat (L l (ArgPatBuilderVisPat p)) = checkLPat (L l p) checkLArgPat (L l (ArgPatBuilderArgPat p)) = return (L l p) checkLPat :: LocatedA (PatBuilder GhcPs) -> PV (LPat GhcPs) checkLPat (L l@(EpAnn anc an _) p) = do (L l' p', cs) <- checkPat (EpAnn anc an emptyComments) emptyComments (L l p) [] [] return (L (addCommentsToEpAnn l' cs) p') checkPat :: SrcSpanAnnA -> EpAnnComments -> LocatedA (PatBuilder GhcPs) -> [HsConPatTyArg GhcPs] -> [LPat GhcPs] -> PV (LPat GhcPs, EpAnnComments) -- SG: I think this function checks what Haskell2010 calls the `pat` and `lpat` -- productions checkPat loc cs (L l e@(PatBuilderVar (L ln c))) tyargs args | isRdrDataCon c || isRdrTc c = return (L loc $ ConPat { pat_con_ext = noAnn -- AZ: where should this come from? , pat_con = L ln c , pat_args = PrefixCon tyargs args }, comments l Semi.<> cs) | (not (null args) && patIsRec c) = do ctx <- askParseContext patFail (locA l) . PsErrInPat e $ PEIP_RecPattern args YesPatIsRecursive ctx checkPat loc cs (L la (PatBuilderAppType f at t)) tyargs args = checkPat loc (cs Semi.<> comments la) f (HsConPatTyArg at t : tyargs) args checkPat loc cs (L la (PatBuilderApp f e)) [] args = do p <- checkLPat e checkPat loc (cs Semi.<> comments la) f [] (p : args) checkPat loc cs (L l e) [] [] = do p <- checkAPat loc e return (L l p, cs) checkPat loc _ e _ _ = do details <- fromParseContext <$> askParseContext patFail (locA loc) (PsErrInPat (unLoc e) details) checkAPat :: SrcSpanAnnA -> PatBuilder GhcPs -> PV (Pat GhcPs) checkAPat loc e0 = do nPlusKPatterns <- getBit NPlusKPatternsBit case e0 of PatBuilderPat p -> return p PatBuilderVar x -> return (VarPat noExtField x) -- Overloaded numeric patterns (e.g. f 0 x = x) -- Negation is recorded separately, so that the literal is zero or +ve -- NB. Negative *primitive* literals are already handled by the lexer PatBuilderOverLit pos_lit -> return (mkNPat (L (l2l loc) pos_lit) Nothing noAnn) -- n+k patterns PatBuilderOpApp (L _ (PatBuilderVar (L nloc n))) (L l plus) (L lloc (PatBuilderOverLit lit@(OverLit {ol_val = HsIntegral {}}))) _ | nPlusKPatterns && (plus == plus_RDR) -> return (mkNPlusKPat (L nloc n) (L (l2l lloc) lit) (EpTok $ entry l)) -- Improve error messages for the @-operator when the user meant an @-pattern PatBuilderOpApp _ op _ _ | opIsAt (unLoc op) -> do addError $ mkPlainErrorMsgEnvelope (getLocA op) PsErrAtInPatPos return (WildPat noExtField) PatBuilderOpApp l (L cl c) r (_os,_cs) | isRdrDataCon c || isRdrTc c -> do l <- checkLPat l r <- checkLPat r return $ ConPat { pat_con_ext = noAnn , pat_con = L cl c , pat_args = InfixCon l r } PatBuilderPar lpar e rpar -> do p <- checkLPat e return (ParPat (lpar, rpar) p) _ -> do details <- fromParseContext <$> askParseContext patFail (locA loc) (PsErrInPat e0 details) placeHolderPunRhs :: DisambECP b => PV (LocatedA b) -- The RHS of a punned record field will be filled in by the renamer -- It's better not to make it an error, in case we want to print it when -- debugging placeHolderPunRhs = mkHsVarPV (noLocA pun_RDR) plus_RDR, pun_RDR :: RdrName plus_RDR = mkUnqual varName (fsLit "+") -- Hack pun_RDR = mkUnqual varName (fsLit "pun-right-hand-side") checkPatField :: LHsRecField GhcPs (LocatedA (PatBuilder GhcPs)) -> PV (LHsRecField GhcPs (LPat GhcPs)) checkPatField (L l fld) = do p <- checkLPat (hfbRHS fld) return (L l (fld { hfbRHS = p })) patFail :: SrcSpan -> PsMessage -> PV a patFail loc msg = addFatalError $ mkPlainErrorMsgEnvelope loc $ msg patIsRec :: RdrName -> Bool patIsRec e = e == mkUnqual varName (fsLit "rec") --------------------------------------------------------------------------- -- Check Equation Syntax checkValDef :: SrcSpan -> LocatedA (PatBuilder GhcPs) -> (HsMultAnn GhcPs, Maybe (TokDcolon, LHsType GhcPs)) -> Located (GRHSs GhcPs (LHsExpr GhcPs)) -> P (HsBind GhcPs) checkValDef loc lhs (mult, Just (sigAnn, sig)) grhss -- x :: ty = rhs parses as a *pattern* binding = do lhs' <- runPV $ mkHsTySigPV (combineLocsA lhs sig) lhs sig sigAnn >>= checkLPat checkPatBind loc lhs' grhss mult checkValDef loc lhs (mult_ann, Nothing) grhss | HsNoMultAnn{} <- mult_ann = do { mb_fun <- isFunLhs lhs ; case mb_fun of Just (fun, is_infix, pats, ops, cps) -> do let ann_fun = mk_ann_funrhs ops cps let l = listLocation pats checkFunBind loc ann_fun fun is_infix (L l pats) grhss Nothing -> do lhs' <- checkPattern lhs checkPatBind loc lhs' grhss mult_ann } checkValDef loc lhs (mult_ann, Nothing) ghrss -- %p x = rhs parses as a *pattern* binding = do lhs' <- checkPattern lhs checkPatBind loc lhs' ghrss mult_ann mk_ann_funrhs :: [EpToken "("] -> [EpToken ")"] -> AnnFunRhs mk_ann_funrhs ops cps = AnnFunRhs NoEpTok ops cps checkFunBind :: SrcSpan -> AnnFunRhs -> LocatedN RdrName -> LexicalFixity -> LocatedE [LocatedA (ArgPatBuilder GhcPs)] -> Located (GRHSs GhcPs (LHsExpr GhcPs)) -> P (HsBind GhcPs) checkFunBind locF ann_fun (L lf fun) is_infix (L lp pats) (L _ grhss) = do ps <- runPV_details extraDetails (mapM checkLArgPat pats) let match_span = noAnnSrcSpan $ locF return (makeFunBind (L (l2l lf) fun) (L (noAnnSrcSpan $ locA match_span) [L match_span (Match { m_ext = noExtField , m_ctxt = FunRhs { mc_fun = L lf fun , mc_fixity = is_infix , mc_strictness = NoSrcStrict , mc_an = ann_fun } , m_pats = L lp ps , m_grhss = grhss })])) -- The span of the match covers the entire equation. -- That isn't quite right, but it'll do for now. where extraDetails | Infix <- is_infix = ParseContext (Just fun) NoIncompleteDoBlock | otherwise = noParseContext makeFunBind :: LocatedN RdrName -> LocatedLW [LMatch GhcPs (LHsExpr GhcPs)] -> HsBind GhcPs -- Like GHC.Hs.Utils.mkFunBind, but we need to be able to set the fixity too makeFunBind fn ms = FunBind { fun_ext = noExtField, fun_id = fn, fun_matches = mkMatchGroup FromSource ms } -- See Note [FunBind vs PatBind] checkPatBind :: SrcSpan -> LPat GhcPs -> Located (GRHSs GhcPs (LHsExpr GhcPs)) -> HsMultAnn GhcPs -> P (HsBind GhcPs) checkPatBind loc (L _ (BangPat an (L _ (VarPat _ v)))) (L _match_span grhss) (HsNoMultAnn _) = return (makeFunBind v (L (noAnnSrcSpan loc) [L (noAnnSrcSpan loc) (m an v)])) where m a v = Match { m_ext = noExtField , m_ctxt = FunRhs { mc_fun = v , mc_fixity = Prefix , mc_strictness = SrcStrict , mc_an = AnnFunRhs a [] [] } , m_pats = noLocA [] , m_grhss = grhss } checkPatBind _loc lhs (L _ grhss) mult = do return (PatBind noExtField lhs mult grhss) checkValSigLhs :: LHsExpr GhcPs -> P (LocatedN RdrName) checkValSigLhs lhs@(L l lhs_expr) = case lhs_expr of HsVar _ lrdr@(L _ v) -> check_var v lrdr _ -> make_err PsErrInvalidTypeSig_Other where check_var v lrdr | not (isUnqual v) = make_err PsErrInvalidTypeSig_Qualified | isDataOcc occ_n = make_err PsErrInvalidTypeSig_DataCon | otherwise = pure lrdr where occ_n = rdrNameOcc v make_err reason = addFatalError $ mkPlainErrorMsgEnvelope (locA l) (PsErrInvalidTypeSignature reason lhs) checkDoAndIfThenElse :: (Outputable a, Outputable b, Outputable c) => (a -> Bool -> b -> Bool -> c -> PsMessage) -> LocatedA a -> Bool -> LocatedA b -> Bool -> LocatedA c -> PV () checkDoAndIfThenElse err guardExpr semiThen thenExpr semiElse elseExpr | semiThen || semiElse = do doAndIfThenElse <- getBit DoAndIfThenElseBit let e = err (unLoc guardExpr) semiThen (unLoc thenExpr) semiElse (unLoc elseExpr) loc = combineLocs (reLoc guardExpr) (reLoc elseExpr) unless doAndIfThenElse $ addError (mkPlainErrorMsgEnvelope loc e) | otherwise = return () isFunLhs :: LocatedA (PatBuilder GhcPs) -> P (Maybe (LocatedN RdrName, LexicalFixity, [LocatedA (ArgPatBuilder GhcPs)],[EpToken "("],[EpToken ")"])) -- A variable binding is parsed as a FunBind. -- Just (fun, is_infix, arg_pats) if e is a function LHS isFunLhs e = go e [] [] [] where mk = fmap ArgPatBuilderVisPat go (L l (PatBuilderVar (L loc f))) es ops cps | not (isRdrDataCon f) = do let (_l, loc') = transferCommentsOnlyA l loc return (Just (L loc' f, Prefix, es, (reverse ops), cps)) go (L l (PatBuilderApp (L lf f) e)) es ops cps = do let (_l, lf') = transferCommentsOnlyA l lf go (L lf' f) (mk e:es) ops cps go (L l (PatBuilderPar _ (L le e) _)) es@(_:_) ops cps = go (L le' e) es (o:ops) (c:cps) -- NB: es@(_:_) means that there must be an arg after the parens for the -- LHS to be a function LHS. This corresponds to the Haskell Report's definition -- of funlhs. where (_l, le') = transferCommentsOnlyA l le (o,c) = mkParensEpToks (realSrcSpan $ locA l) go (L loc (PatBuilderOpApp (L ll l) (L loc' op) r (os,cs))) es ops cps | not (isRdrDataCon op) -- We have found the function! = do { let (_l, ll') = transferCommentsOnlyA loc ll ; return (Just (L loc' op, Infix, (mk (L ll' l):mk r:es), (os ++ reverse ops), (cs ++ cps))) } | otherwise -- Infix data con; keep going = do { let (_l, ll') = transferCommentsOnlyA loc ll ; mb_l <- go (L ll' l) es ops cps ; return (reassociate =<< mb_l) } where reassociate (op', Infix, j : L k_loc (ArgPatBuilderVisPat k) : es', ops', cps') = Just (op', Infix, j : op_app : es', ops', cps') where op_app = mk $ L loc (PatBuilderOpApp (L k_loc k) (L loc' op) r (reverse ops, cps)) reassociate _other = Nothing go (L l (PatBuilderAppType (L lp pat) tok ty_pat@(HsTP _ (L (EpAnn anc ann cs) _)))) es ops cps = go (L lp' pat) (L (EpAnn anc' ann cs) (ArgPatBuilderArgPat invis_pat) : es) ops cps where invis_pat = InvisPat (tok, SpecifiedSpec) ty_pat anc' = widenAnchorT anc tok (_l, lp') = transferCommentsOnlyA l lp go _ _ _ _ = return Nothing data ArgPatBuilder p = ArgPatBuilderVisPat (PatBuilder p) | ArgPatBuilderArgPat (Pat p) instance Outputable (ArgPatBuilder GhcPs) where ppr (ArgPatBuilderVisPat p) = ppr p ppr (ArgPatBuilderArgPat p) = ppr p mkBangTy :: EpaLocation -> SrcStrictness -> LHsType GhcPs -> HsType GhcPs mkBangTy tok_loc strictness = HsBangTy ((noAnn, noAnn, tok_loc), NoSourceText) (HsBang NoSrcUnpack strictness) -- | Result of parsing @{-\# UNPACK \#-}@ or @{-\# NOUNPACK \#-}@. data UnpackednessPragma = UnpackednessPragma (EpaLocation, EpToken "#-}") SourceText SrcUnpackedness -- | Annotate a type with either an @{-\# UNPACK \#-}@ or a @{-\# NOUNPACK \#-}@ pragma. addUnpackednessP :: MonadP m => Located UnpackednessPragma -> LHsType GhcPs -> m (LHsType GhcPs) addUnpackednessP (L lprag (UnpackednessPragma anns prag unpk)) ty = do let l' = combineSrcSpans lprag (getLocA ty) let t' = addUnpackedness anns ty return (L (noAnnSrcSpan l') t') where -- If we have a HsBangTy that only has a strictness annotation, -- such as ~T or !T, then add the pragma to the existing HsBangTy. -- -- Otherwise, wrap the type in a new HsBangTy constructor. addUnpackedness (o,c) (L _ (HsBangTy ((_,_,tl), NoSourceText) bang t)) | HsBang NoSrcUnpack strictness <- bang = HsBangTy ((o,c,tl), prag) (HsBang unpk strictness) t addUnpackedness (o,c) t = HsBangTy ((o,c,noAnn), prag) (HsBang unpk NoSrcStrict) t --------------------------------------------------------------------------- -- | Check for monad comprehensions -- -- If the flag MonadComprehensions is set, return a 'MonadComp' context, -- otherwise use the usual 'ListComp' context checkMonadComp :: PV HsDoFlavour checkMonadComp = do monadComprehensions <- getBit MonadComprehensionsBit return $ if monadComprehensions then MonadComp else ListComp -- ------------------------------------------------------------------------- -- Expression/command/pattern ambiguity. -- See Note [Ambiguous syntactic categories] -- -- See Note [Ambiguous syntactic categories] -- -- This newtype is required to avoid impredicative types in monadic -- productions. That is, in a production that looks like -- -- | ... {% return (ECP ...) } -- -- we are dealing with -- P ECP -- whereas without a newtype we would be dealing with -- P (forall b. DisambECP b => PV (Located b)) -- newtype ECP = ECP { unECP :: forall b. DisambECP b => PV (LocatedA b) } ecpFromExp :: LHsExpr GhcPs -> ECP ecpFromExp a = ECP (ecpFromExp' a) ecpFromCmd :: LHsCmd GhcPs -> ECP ecpFromCmd a = ECP (ecpFromCmd' a) ecpFromPat :: LPat GhcPs -> ECP ecpFromPat a = ECP (ecpFromPat' a) -- The 'fbinds' parser rule produces values of this type. See Note -- [RecordDotSyntax field updates]. type Fbind b = Either (LHsRecField GhcPs (LocatedA b)) (LHsRecProj GhcPs (LocatedA b)) -- | Disambiguate infix operators. -- See Note [Ambiguous syntactic categories] class DisambInfixOp b where mkHsVarOpPV :: LocatedN RdrName -> PV (LocatedN b) mkHsConOpPV :: LocatedN RdrName -> PV (LocatedN b) mkHsInfixHolePV :: LocatedN (HsExpr GhcPs) -> PV (LocatedN b) instance DisambInfixOp (HsExpr GhcPs) where mkHsVarOpPV v = return $ L (getLoc v) (HsVar noExtField v) mkHsConOpPV v = return $ L (getLoc v) (HsVar noExtField v) mkHsInfixHolePV h = return h instance DisambInfixOp RdrName where mkHsConOpPV (L l v) = return $ L l v mkHsVarOpPV (L l v) = return $ L l v mkHsInfixHolePV (L l _) = addFatalError $ mkPlainErrorMsgEnvelope (getHasLoc l) $ PsErrInvalidInfixHole type AnnoBody b = ( Anno (GRHS GhcPs (LocatedA (Body b GhcPs))) ~ EpAnnCO , Anno [LocatedA (Match GhcPs (LocatedA (Body b GhcPs)))] ~ SrcSpanAnnLW , Anno (Match GhcPs (LocatedA (Body b GhcPs))) ~ SrcSpanAnnA , Anno (StmtLR GhcPs GhcPs (LocatedA (Body (Body b GhcPs) GhcPs))) ~ SrcSpanAnnA , Anno [LocatedA (StmtLR GhcPs GhcPs (LocatedA (Body (Body (Body b GhcPs) GhcPs) GhcPs)))] ~ SrcSpanAnnLW ) -- | Disambiguate constructs that may appear when we do not know ahead of time whether we are -- parsing an expression, a command, or a pattern. -- See Note [Ambiguous syntactic categories] class (b ~ (Body b) GhcPs, AnnoBody b) => DisambECP b where -- | See Note [Body in DisambECP] type Body b :: Type -> Type -- | Return a command without ambiguity, or fail in a non-command context. ecpFromCmd' :: LHsCmd GhcPs -> PV (LocatedA b) -- | Return an expression without ambiguity, or fail in a non-expression context. ecpFromExp' :: LHsExpr GhcPs -> PV (LocatedA b) -- | Return a pattern without ambiguity, or fail in a non-pattern context. ecpFromPat' :: LPat GhcPs -> PV (LocatedA b) mkHsProjUpdatePV :: SrcSpan -> Located [LocatedAn NoEpAnns (DotFieldOcc GhcPs)] -> LocatedA b -> Bool -> Maybe (EpToken "=") -> PV (LHsRecProj GhcPs (LocatedA b)) -- | Disambiguate "let ... in ..." mkHsLetPV :: SrcSpan -> EpToken "let" -> HsLocalBinds GhcPs -> EpToken "in" -> LocatedA b -> PV (LocatedA b) -- | Infix operator representation type InfixOp b -- | Bring superclass constraints on InfixOp into scope. -- See Note [UndecidableSuperClasses for associated types] superInfixOp :: (DisambInfixOp (InfixOp b) => PV (LocatedA b )) -> PV (LocatedA b) -- | Disambiguate "f # x" (infix operator) mkHsOpAppPV :: SrcSpan -> LocatedA b -> LocatedN (InfixOp b) -> LocatedA b -> PV (LocatedA b) -- | Disambiguate "case ... of ..." mkHsCasePV :: SrcSpan -> LHsExpr GhcPs -> (LocatedLW [LMatch GhcPs (LocatedA b)]) -> EpAnnHsCase -> PV (LocatedA b) -- | Disambiguate "\... -> ..." (lambda), "\case" and "\cases" mkHsLamPV :: SrcSpan -> HsLamVariant -> (LocatedLW [LMatch GhcPs (LocatedA b)]) -> EpAnnLam -> PV (LocatedA b) -- | Function argument representation type FunArg b -- | Bring superclass constraints on FunArg into scope. -- See Note [UndecidableSuperClasses for associated types] superFunArg :: (DisambECP (FunArg b) => PV (LocatedA b)) -> PV (LocatedA b) -- | Disambiguate "f x" (function application) mkHsAppPV :: SrcSpanAnnA -> LocatedA b -> LocatedA (FunArg b) -> PV (LocatedA b) -- | Disambiguate "f @t" (visible type application) mkHsAppTypePV :: SrcSpanAnnA -> LocatedA b -> EpToken "@" -> LHsType GhcPs -> PV (LocatedA b) -- | Disambiguate "if ... then ... else ..." mkHsIfPV :: SrcSpan -> LHsExpr GhcPs -> Bool -- semicolon? -> LocatedA b -> Bool -- semicolon? -> LocatedA b -> AnnsIf -> PV (LocatedA b) -- | Disambiguate "do { ... }" (do notation) mkHsDoPV :: SrcSpan -> Maybe ModuleName -> LocatedLW [LStmt GhcPs (LocatedA b)] -> EpaLocation -> -- Token EpaLocation -> -- Anchor PV (LocatedA b) -- | Disambiguate "( ... )" (parentheses) mkHsParPV :: SrcSpan -> EpToken "(" -> LocatedA b -> EpToken ")" -> PV (LocatedA b) -- | Disambiguate a variable "f" or a data constructor "MkF". mkHsVarPV :: LocatedN RdrName -> PV (LocatedA b) -- | Disambiguate a monomorphic literal mkHsLitPV :: Located (HsLit GhcPs) -> PV (LocatedA b) -- | Disambiguate an overloaded literal mkHsOverLitPV :: LocatedAn a (HsOverLit GhcPs) -> PV (LocatedAn a b) -- | Disambiguate a wildcard mkHsWildCardPV :: (NoAnn a) => SrcSpan -> PV (LocatedAn a b) -- | Disambiguate "a :: t" (type annotation) mkHsTySigPV :: SrcSpanAnnA -> LocatedA b -> LHsType GhcPs -> TokDcolon -> PV (LocatedA b) -- | Disambiguate "[a,b,c]" (list syntax) mkHsExplicitListPV :: SrcSpan -> [LocatedA b] -> AnnList () -> PV (LocatedA b) -- | Disambiguate "$(...)" and "[quasi|...|]" (TH splices) mkHsSplicePV :: Located (HsUntypedSplice GhcPs) -> PV (LocatedA b) -- | Disambiguate "f { a = b, ... }" syntax (record construction and record updates) mkHsRecordPV :: Bool -> -- Is OverloadedRecordUpdate in effect? SrcSpan -> SrcSpan -> LocatedA b -> ([Fbind b], Maybe SrcSpan) -> (Maybe (EpToken "{"), Maybe (EpToken "}")) -> PV (LocatedA b) -- | Disambiguate "-a" (negation) mkHsNegAppPV :: SrcSpan -> LocatedA b -> EpToken "-" -> PV (LocatedA b) -- | Disambiguate "(# a)" (right operator section) mkHsSectionR_PV :: SrcSpan -> LocatedA (InfixOp b) -> LocatedA b -> PV (LocatedA b) -- | Disambiguate "(a -> b)" (view pattern or function type arrow) mkHsArrowPV :: SrcSpan -> ArrowParsingMode lhs b -> LocatedA lhs -> HsArrowOf (LocatedA b) GhcPs -> LocatedA b -> PV (LocatedA b) -- | Disambiguate "%m" to the left of "->" (multiplicity) mkHsMultPV :: EpToken "%" -> LocatedA b -> PV (TokRarrow -> HsArrowOf (LocatedA b) GhcPs) -- | Disambiguate "forall a. b" and "forall a -> b" (forall telescope) mkHsForallPV :: SrcSpan -> HsForAllTelescope GhcPs -> LocatedA b -> PV (LocatedA b) -- | Disambiguate "(a,b,c)" to the left of "=>" (constraint list) checkContextPV :: LocatedA b -> PV (LocatedC [LocatedA b]) -- | Disambiguate "a => b" (constraint context) mkQualPV :: SrcSpan -> LocatedC [LocatedA b] -> LocatedA b -> PV (LocatedA b) -- | Disambiguate "a@b" (as-pattern) mkHsAsPatPV :: SrcSpan -> LocatedN RdrName -> EpToken "@" -> LocatedA b -> PV (LocatedA b) -- | Disambiguate "~a" (lazy pattern) mkHsLazyPatPV :: SrcSpan -> LocatedA b -> EpToken "~" -> PV (LocatedA b) -- | Disambiguate "!a" (bang pattern) mkHsBangPatPV :: SrcSpan -> LocatedA b -> EpToken "!" -> PV (LocatedA b) -- | Disambiguate tuple sections and unboxed sums mkSumOrTuplePV :: SrcSpanAnnA -> Boxity -> SumOrTuple b -> (EpaLocation, EpaLocation) -> PV (LocatedA b) -- | Disambiguate "type t" (embedded type) mkHsEmbTyPV :: SrcSpan -> EpToken "type" -> LHsType GhcPs -> PV (LocatedA b) -- | Validate infixexp LHS to reject unwanted {-# SCC ... #-} pragmas rejectPragmaPV :: LocatedA b -> PV () {- Note [UndecidableSuperClasses for associated types] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ (This Note is about the code in GHC, not about the user code that we are parsing) Assume we have a class C with an associated type T: class C a where type T a ... If we want to add 'C (T a)' as a superclass, we need -XUndecidableSuperClasses: {-# LANGUAGE UndecidableSuperClasses #-} class C (T a) => C a where type T a ... Unfortunately, -XUndecidableSuperClasses don't work all that well, sometimes making GHC loop. The workaround is to bring this constraint into scope manually with a helper method: class C a where type T a superT :: (C (T a) => r) -> r In order to avoid ambiguous types, 'r' must mention 'a'. For consistency, we use this approach for all constraints on associated types, even when -XUndecidableSuperClasses are not required. -} {- Note [Body in DisambECP] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ There are helper functions (mkBodyStmt, mkBindStmt, unguardedRHS, etc) that require their argument to take a form of (body GhcPs) for some (body :: Type -> *). To satisfy this requirement, we say that (b ~ Body b GhcPs) in the superclass constraints of DisambECP. The alternative is to change mkBodyStmt, mkBindStmt, unguardedRHS, etc, to drop this requirement. It is possible and would allow removing the type index of PatBuilder, but leads to worse type inference, breaking some code in the typechecker. -} instance DisambECP (HsCmd GhcPs) where type Body (HsCmd GhcPs) = HsCmd ecpFromCmd' = return ecpFromExp' (L l e) = cmdFail (locA l) (ppr e) ecpFromPat' (L l p) = cmdFail (locA l) (ppr p) mkHsProjUpdatePV l _ _ _ _ = addFatalError $ mkPlainErrorMsgEnvelope l $ PsErrOverloadedRecordDotInvalid mkHsLamPV l lam_variant (L lm m) anns = do !cs <- getCommentsFor l let mg = mkLamCaseMatchGroup FromSource lam_variant (L lm m) return $ L (EpAnn (spanAsAnchor l) noAnn cs) (HsCmdLam anns lam_variant mg) mkHsLetPV l tkLet bs tkIn e = do !cs <- getCommentsFor l return $ L (EpAnn (spanAsAnchor l) noAnn cs) (HsCmdLet (tkLet, tkIn) bs e) type InfixOp (HsCmd GhcPs) = HsExpr GhcPs superInfixOp m = m mkHsOpAppPV l c1 op c2 = do let cmdArg c = L (l2l $ getLoc c) $ HsCmdTop noExtField c !cs <- getCommentsFor l return $ L (EpAnn (spanAsAnchor l) noAnn cs) $ HsCmdArrForm noAnn (reLoc op) Infix [cmdArg c1, cmdArg c2] mkHsCasePV l c (L lm m) anns = do !cs <- getCommentsFor l let mg = mkMatchGroup FromSource (L lm m) return $ L (EpAnn (spanAsAnchor l) noAnn cs) (HsCmdCase anns c mg) type FunArg (HsCmd GhcPs) = HsExpr GhcPs superFunArg m = m mkHsAppPV l c e = do checkCmdBlockArguments c checkExpBlockArguments e return $ L l (HsCmdApp noExtField c e) mkHsAppTypePV l c _ t = cmdFail (locA l) (ppr c <+> text "@" <> ppr t) mkHsIfPV l c semi1 a semi2 b anns = do checkDoAndIfThenElse PsErrSemiColonsInCondCmd c semi1 a semi2 b !cs <- getCommentsFor l return $ L (EpAnn (spanAsAnchor l) noAnn cs) (mkHsCmdIf c a b anns) mkHsDoPV l Nothing stmts tok_loc anc = do !cs <- getCommentsFor l return $ L (EpAnn (spanAsAnchor l) noAnn cs) (HsCmdDo (AnnList (Just anc) ListNone [] tok_loc []) stmts) mkHsDoPV l (Just m) _ _ _ = addFatalError $ mkPlainErrorMsgEnvelope l $ PsErrQualifiedDoInCmd m mkHsParPV l lpar c rpar = do !cs <- getCommentsFor l return $ L (EpAnn (spanAsAnchor l) noAnn cs) (HsCmdPar (lpar, rpar) c) mkHsVarPV (L l v) = cmdFail (locA l) (ppr v) mkHsLitPV (L l a) = cmdFail l (ppr a) mkHsOverLitPV (L l a) = cmdFail (locA l) (ppr a) mkHsWildCardPV l = cmdFail l (text "_") mkHsTySigPV l a sig _ = cmdFail (locA l) (ppr a <+> text "::" <+> ppr sig) mkHsExplicitListPV l xs _ = cmdFail l $ brackets (pprWithCommas ppr xs) mkHsSplicePV (L l sp) = cmdFail l (pprUntypedSplice True Nothing sp) mkHsRecordPV _ l _ a (fbinds, ddLoc) _ = do let (fs, ps) = partitionEithers fbinds if not (null ps) then addFatalError $ mkPlainErrorMsgEnvelope l $ PsErrOverloadedRecordDotInvalid else cmdFail l $ ppr a <+> ppr (mk_rec_fields fs ddLoc) mkHsNegAppPV l a _ = cmdFail l (text "-" <> ppr a) mkHsSectionR_PV l op c = cmdFail l $ let pp_op = fromMaybe (panic "cannot print infix operator") (ppr_infix_expr (unLoc op)) in pp_op <> ppr c mkHsArrowPV l mode a arr b = cmdFail l $ case mode of -- matching on the mode brings Outputable instances into scope ArrowIsViewPat -> ppr a <+> pprHsArrow arr <+> ppr b ArrowIsFunType -> ppr a <+> pprHsArrow arr <+> ppr b mkHsMultPV pct mult = cmdFail l $ ppr pct <> ppr mult where l = getHasLoc pct `combineSrcSpans` getHasLoc mult mkHsForallPV l tele cmd = cmdFail l $ pprHsForAll tele Nothing <+> ppr cmd checkContextPV ctxt = cmdFail (getLocA ctxt) $ ppr ctxt mkQualPV l ctxt cmd = cmdFail l $ ppr ctxt <+> text "=>" <+> ppr cmd mkHsAsPatPV l v _ c = cmdFail l $ pprPrefixOcc (unLoc v) <> text "@" <> ppr c mkHsLazyPatPV l c _ = cmdFail l $ text "~" <> ppr c mkHsBangPatPV l c _ = cmdFail l $ text "!" <> ppr c mkSumOrTuplePV l boxity a _ = cmdFail (locA l) (pprSumOrTuple boxity a) mkHsEmbTyPV l _ ty = cmdFail l (text "type" <+> ppr ty) rejectPragmaPV _ = return () cmdFail :: SrcSpan -> SDoc -> PV a cmdFail loc e = addFatalError $ mkPlainErrorMsgEnvelope loc $ PsErrParseErrorInCmd e checkLamMatchGroup :: SrcSpan -> HsLamVariant -> MatchGroup GhcPs (LHsExpr GhcPs) -> PV () checkLamMatchGroup l LamSingle (MG { mg_alts = (L _ (matches:_))}) = do when (null (hsLMatchPats matches)) $ addError $ mkPlainErrorMsgEnvelope l PsErrEmptyLambda checkLamMatchGroup _ _ _ = return () instance DisambECP (HsExpr GhcPs) where type Body (HsExpr GhcPs) = HsExpr ecpFromCmd' (L l c) = do addError $ mkPlainErrorMsgEnvelope (locA l) $ PsErrArrowCmdInExpr c return (L l (hsHoleExpr noAnn)) ecpFromExp' = return ecpFromPat' p@(L l _) = do addError $ mkPlainErrorMsgEnvelope (locA l) $ PsErrOrPatInExpr p return (L l (hsHoleExpr noAnn)) mkHsProjUpdatePV l fields arg isPun anns = do !cs <- getCommentsFor l return $ mkRdrProjUpdate (EpAnn (spanAsAnchor l) noAnn cs) fields arg isPun anns mkHsLetPV l tkLet bs tkIn c = do !cs <- getCommentsFor l return $ L (EpAnn (spanAsAnchor l) noAnn cs) (HsLet (tkLet, tkIn) bs c) type InfixOp (HsExpr GhcPs) = HsExpr GhcPs superInfixOp m = m mkHsOpAppPV l e1 op e2 = do !cs <- getCommentsFor l return $ L (EpAnn (spanAsAnchor l) noAnn cs) $ OpApp noExtField e1 (reLoc op) e2 mkHsCasePV l e (L lm m) anns = do !cs <- getCommentsFor l let mg = mkMatchGroup FromSource (L lm m) return $ L (EpAnn (spanAsAnchor l) noAnn cs) (HsCase anns e mg) mkHsLamPV l lam_variant (L lm m) anns = do !cs <- getCommentsFor l let mg = mkLamCaseMatchGroup FromSource lam_variant (L lm m) checkLamMatchGroup l lam_variant mg return $ L (EpAnn (spanAsAnchor l) noAnn cs) (HsLam anns lam_variant mg) type FunArg (HsExpr GhcPs) = HsExpr GhcPs superFunArg m = m mkHsAppPV l e1 e2 = do checkExpBlockArguments e1 checkExpBlockArguments e2 return $ L l (HsApp noExtField e1 e2) mkHsAppTypePV l e at t = do checkExpBlockArguments e return $ L l (HsAppType at e (mkHsWildCardBndrs t)) mkHsIfPV l c semi1 a semi2 b anns = do checkDoAndIfThenElse PsErrSemiColonsInCondExpr c semi1 a semi2 b !cs <- getCommentsFor l return $ L (EpAnn (spanAsAnchor l) noAnn cs) (mkHsIf c a b anns) mkHsDoPV l mod stmts loc_tok anc = do !cs <- getCommentsFor l return $ L (EpAnn (spanAsAnchor l) noAnn cs) (HsDo (AnnList (Just anc) ListNone [] loc_tok []) (DoExpr mod) stmts) mkHsParPV l lpar e rpar = do !cs <- getCommentsFor l return $ L (EpAnn (spanAsAnchor l) noAnn cs) (HsPar (lpar, rpar) e) mkHsVarPV v@(L l@(EpAnn anc _ _) _) = do !cs <- getCommentsFor (getHasLoc l) return $ L (EpAnn anc noAnn cs) (HsVar noExtField v) mkHsLitPV (L l a) = do !cs <- getCommentsFor l return $ L (EpAnn (spanAsAnchor l) noAnn cs) (HsLit noExtField a) mkHsOverLitPV (L (EpAnn l an csIn) a) = do !cs <- getCommentsFor (locA l) return $ L (EpAnn l an (cs Semi.<> csIn)) (HsOverLit NoExtField a) mkHsWildCardPV l = return $ L (noAnnSrcSpan l) (hsHoleExpr noAnn) mkHsTySigPV l@(EpAnn anc an csIn) a sig anns = do !cs <- getCommentsFor (locA l) return $ L (EpAnn anc an (csIn Semi.<> cs)) (ExprWithTySig anns a (hsTypeToHsSigWcType sig)) mkHsExplicitListPV l xs anns = do !cs <- getCommentsFor l return $ L (EpAnn (spanAsAnchor l) noAnn cs) (ExplicitList anns xs) mkHsSplicePV (L l a) = do !cs <- getCommentsFor l return $ fmap (HsUntypedSplice NoExtField) (L (EpAnn (spanAsAnchor l) noAnn cs) a) mkHsRecordPV opts l lrec a (fbinds, ddLoc) anns = do !cs <- getCommentsFor l r <- mkRecConstrOrUpdate opts a lrec (fbinds, ddLoc) anns checkRecordSyntax (L (EpAnn (spanAsAnchor l) noAnn cs) r) mkHsNegAppPV l a anns = do !cs <- getCommentsFor l return $ L (EpAnn (spanAsAnchor l) noAnn cs) (NegApp anns a noSyntaxExpr) mkHsSectionR_PV l op e = do !cs <- getCommentsFor l return $ L (EpAnn (spanAsAnchor l) noAnn cs) (SectionR noExtField op e) mkHsAsPatPV l v _ e = addError (mkPlainErrorMsgEnvelope l $ PsErrTypeAppWithoutSpace (unLoc v) e) >> return (L (noAnnSrcSpan l) (hsHoleExpr noAnn)) mkHsLazyPatPV l e _ = addError (mkPlainErrorMsgEnvelope l $ PsErrLazyPatWithoutSpace e) >> return (L (noAnnSrcSpan l) (hsHoleExpr noAnn)) mkHsBangPatPV l e _ = addError (mkPlainErrorMsgEnvelope l $ PsErrBangPatWithoutSpace e) >> return (L (noAnnSrcSpan l) (hsHoleExpr noAnn)) mkSumOrTuplePV = mkSumOrTupleExpr mkHsEmbTyPV l toktype ty = return $ L (noAnnSrcSpan l) $ HsEmbTy toktype (mkHsWildCardBndrs ty) mkHsArrowPV l mode arg arr res = -- In expressions, (e1 -> e2) is always parsed as a function type, -- even if ViewPatterns are enabled. exprArrowParsingMode mode $ return $ L (noAnnSrcSpan l) $ HsFunArr noExtField arr arg res mkHsMultPV pct t = return $ mkMultExpr pct t mkHsForallPV l telescope ty = return $ L (noAnnSrcSpan l) $ HsForAll noExtField (setTelescopeBndrsNameSpace varName telescope) ty checkContextPV = checkContextExpr mkQualPV l qual ty = return $ L (noAnnSrcSpan l) $ HsQual noExtField qual ty rejectPragmaPV (L _ (OpApp _ _ _ e)) = -- assuming left-associative parsing of operators rejectPragmaPV e rejectPragmaPV (L l (HsPragE _ prag _)) = addError $ mkPlainErrorMsgEnvelope (locA l) $ (PsErrUnallowedPragma prag) rejectPragmaPV _ = return () hsHoleExpr :: Maybe EpAnnUnboundVar -> HsExpr GhcPs hsHoleExpr anns = HsUnboundVar anns (mkRdrUnqual (mkVarOccFS (fsLit "_"))) instance DisambECP (PatBuilder GhcPs) where type Body (PatBuilder GhcPs) = PatBuilder ecpFromCmd' (L l c) = addFatalError $ mkPlainErrorMsgEnvelope (locA l) $ PsErrArrowCmdInPat c ecpFromExp' (L l e) = addFatalError $ mkPlainErrorMsgEnvelope (locA l) $ PsErrArrowExprInPat e ecpFromPat' (L l p) = return $ L l (PatBuilderPat p) mkHsLetPV l _ _ _ _ = addFatalError $ mkPlainErrorMsgEnvelope l PsErrLetInPat mkHsProjUpdatePV l _ _ _ _ = addFatalError $ mkPlainErrorMsgEnvelope l PsErrOverloadedRecordDotInvalid type InfixOp (PatBuilder GhcPs) = RdrName superInfixOp m = m mkHsOpAppPV l p1 op p2 = do !cs <- getCommentsFor l return $ L (EpAnn (spanAsAnchor l) noAnn cs) $ PatBuilderOpApp p1 op p2 ([],[]) mkHsLamPV l lam_variant _ _ = addFatalError $ mkPlainErrorMsgEnvelope l (PsErrLambdaInPat lam_variant) mkHsCasePV l _ _ _ = addFatalError $ mkPlainErrorMsgEnvelope l PsErrCaseInPat type FunArg (PatBuilder GhcPs) = PatBuilder GhcPs superFunArg m = m mkHsAppPV l p1 p2 = return $ L l (PatBuilderApp p1 p2) mkHsAppTypePV l p at t = do !cs <- getCommentsFor (locA l) return $ L (addCommentsToEpAnn l cs) (PatBuilderAppType p at (mkHsTyPat t)) mkHsIfPV l _ _ _ _ _ _ = addFatalError $ mkPlainErrorMsgEnvelope l PsErrIfThenElseInPat mkHsDoPV l _ _ _ _ = addFatalError $ mkPlainErrorMsgEnvelope l PsErrDoNotationInPat mkHsParPV l lpar p rpar = return $ L (noAnnSrcSpan l) (PatBuilderPar lpar p rpar) mkHsVarPV v@(getLoc -> l) = return $ L (l2l l) (PatBuilderVar v) mkHsLitPV lit@(L l a) = do checkUnboxedLitPat lit !cs <- getCommentsFor l return $ L (EpAnn (spanAsAnchor l) noAnn cs) (PatBuilderPat (LitPat noExtField a)) mkHsOverLitPV (L l a) = return $ L l (PatBuilderOverLit a) mkHsWildCardPV l = return $ L (noAnnSrcSpan l) (PatBuilderPat (WildPat noExtField)) mkHsTySigPV l p t anns = do p' <- checkLPat p let sig = mkHsPatSigType noAnn t sig_pat <- addSigPatP l p' sig anns return $ fmap PatBuilderPat sig_pat mkHsExplicitListPV l xs anns = do ps <- traverse checkLPat xs !cs <- getCommentsFor l return (L (EpAnn (spanAsAnchor l) noAnn cs) (PatBuilderPat (ListPat anns ps))) mkHsSplicePV (L l sp) = do !cs <- getCommentsFor l return $ L (EpAnn (spanAsAnchor l) noAnn cs) (PatBuilderPat (SplicePat noExtField sp)) mkHsRecordPV _ l _ a (fbinds, ddLoc) anns = do let (fs, ps) = partitionEithers fbinds if not (null ps) then addFatalError $ mkPlainErrorMsgEnvelope l PsErrOverloadedRecordDotInvalid else do !cs <- getCommentsFor l r <- mkPatRec a (mk_rec_fields fs ddLoc) anns checkRecordSyntax (L (EpAnn (spanAsAnchor l) noAnn cs) r) mkHsNegAppPV l (L lp p) anns = do lit <- case p of PatBuilderOverLit pos_lit -> return (L (l2l lp) pos_lit) _ -> patFail l $ PsErrInPat p PEIP_NegApp !cs <- getCommentsFor l return $ L (EpAnn (spanAsAnchor l) noAnn cs) (PatBuilderPat (mkNPat lit (Just noSyntaxExpr) anns)) mkHsSectionR_PV l op p = patFail l (PsErrParseRightOpSectionInPat (unLoc op) (unLoc p)) mkHsArrowPV l ArrowIsViewPat a arr b = do p <- checkLPat b !cs <- getCommentsFor l return $ L (EpAnn (spanAsAnchor l) noAnn cs) (PatBuilderPat (ViewPat tok a p)) where tok :: TokRarrow tok = case arr of HsUnrestrictedArrow x -> x _ -> -- unreachable case because in Parser.y the reduction rules for -- (a %m -> b) and (a ->. b) use ArrowIsFunType panic "mkHsArrowPV ArrowIsViewPat: expected HsUnrestrictedArrow" mkHsArrowPV l ArrowIsFunType a arr b = patFail l (PsErrTypeSyntaxInPat (PETS_FunctionArrow a arr b)) mkHsMultPV tok arg = let l = getHasLoc tok `combineSrcSpans` getLocA arg in patFail l (PsErrTypeSyntaxInPat (PETS_Multiplicity tok arg)) mkHsForallPV l tele body = patFail l (PsErrTypeSyntaxInPat (PETS_ForallTelescope tele body)) checkContextPV ctx = patFail (getLocA ctx) (PsErrTypeSyntaxInPat (PETS_ConstraintContext ctx)) mkQualPV _ _ _ = -- unreachable because mkQualPV is only called on the result -- of checkContextPV, which fails in patterns panic "mkQualPV in a pattern" mkHsAsPatPV l v at e = do p <- checkLPat e !cs <- getCommentsFor l return $ L (EpAnn (spanAsAnchor l) noAnn cs) (PatBuilderPat (AsPat at v p)) mkHsLazyPatPV l e a = do p <- checkLPat e !cs <- getCommentsFor l return $ L (EpAnn (spanAsAnchor l) noAnn cs) (PatBuilderPat (LazyPat a p)) mkHsBangPatPV l e an = do p <- checkLPat e !cs <- getCommentsFor l let pb = BangPat an p hintBangPat l pb return $ L (EpAnn (spanAsAnchor l) noAnn cs) (PatBuilderPat pb) mkSumOrTuplePV = mkSumOrTuplePat mkHsEmbTyPV l toktype ty = return $ L (noAnnSrcSpan l) $ PatBuilderPat (EmbTyPat toktype (mkHsTyPat ty)) rejectPragmaPV _ = return () -- For reasons of backwards compatibility, we can't simply add the pattern -- signature if the inner pattern is a view pattern. Consider: -- (f -> p :: t) -- There are two ways to parse it -- (f -> (p :: t)) -- legacy parse -- ((f -> p) :: t) -- future parse -- The grammar in Parser.y is structured in such a way that we get the -- future parse by default. Until we're ready to make the breaking change, -- we need to do some extra work here to push the signature under the view -- pattern (and emit a warning). addSigPatP :: SrcSpanAnnA -> LPat GhcPs -> HsPatSigType GhcPs -> TokDcolon -> PV (LPat GhcPs) addSigPatP l viewpat@(L _ ViewPat{}) sig anns = -- Test case: T24159_viewpat do { let futureParse = L l (SigPat anns viewpat sig) ; legacyParse <- go viewpat ; addPsMessage (locA l) (PsWarnViewPatternSignatures legacyParse futureParse) ; return legacyParse } where sig_loc_no_comments :: SrcSpan sig_loc_no_comments = getLocA (hsps_body sig) -- Test case for comments and locations preservation: Test24159 go :: LPat GhcPs -> PV (LPat GhcPs) go (L (EpAnn (EpaSpan view_pat_loc) anns cs1) (ViewPat anns' e' p')) = do sig' <- go p' let new_loc = view_pat_loc `combineSrcSpans` sig_loc_no_comments cs2 <- getCommentsFor new_loc let ep_ann_loc = EpAnn (spanAsAnchor new_loc) anns (cs1 Semi.<> cs2) pure (L ep_ann_loc (ViewPat anns' e' sig')) go p = pure $ L new_loc (SigPat anns p sig) where new_loc = noAnnSrcSpan ((getLocA p) `combineSrcSpans` sig_loc_no_comments) addSigPatP l p sig anns = do return $ L l (SigPat anns p sig) {- Note [Arrow parsing mode] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider this example: f (K (a -> b)) = () A pattern of the form (a -> b) could be parsed in one of two ways: * a view pattern `viewfn -> pat` (with ViewPatterns) * a function type `t1 -> t2` (with RequiredTypeArguments) This depends on the enabled extensions: NoViewPatterns, RequiredTypeArguments => function type NoViewPatterns, NoRequiredTypeArguments => error (suggest ViewPatterns) ViewPatterns, RequiredTypeArguments => view pattern ViewPatterns, NoRequiredTypeArguments => view pattern The decision how to parse arrow patterns (p1 -> p2) is captured by the `ArrowParsingMode` data type, produced in `withArrowParsingMode` and consumed in `mkHsArrowPV`. Naively, one might expect to see the following definition: -- a simple (but insufficient) definition data ArrowParsingMode = ArrowIsViewPat | ArrowIsFunType However, there is a slight complication that leads us to parameterize these constructor with GADT type indices. In a pattern (p1 -> p2), what is the AST type to represent the LHS `p1`? It depends: * if (p1 -> p2) is a view pattern, `p1` is an HsExpr * if (p1 -> p2) is a function type, `p1` is a Pat (PatBuilder) And since the decision how to parse `p1` depends on the arrow parsing mode, we could try to encode the LHS type as a GADT index: -- a less simple (but still insufficient) definition data ArrowParsingMode lhs where ArrowIsViewPat :: ArrowParsingMode (PatBuilder GhcPs) ArrowIsFunType :: ArrowParsingMode (HsExpr GhcPs) This definition would suffice for parsing patterns, but remember that expressions, commands, and patterns are all parsed using a unified framework `DisambECP`, as described in Note [Ambiguous syntactic categories]. In an expression (e1 -> e2), the LHS is always represented by an HsExpr. We can account for this with a further refinement of the definition: -- actual definition data ArrowParsingMode lhs rhs where ArrowIsViewPat :: ArrowParsingMode (HsExpr GhcPs) b ArrowIsFunType :: ArrowParsingMode b b So when parsing a view pattern, the LHS is an HsExpr; and when parsing a function type, the type of the LHS is assumed to match the type of the RHS, which works out just right both for expressions and patterns. -} -- The arrow parsing mode is selected depending on the enabled extensions and -- determines how we parse patterns of the form (p1 -> p2). See Note [Arrow parsing mode] data ArrowParsingMode lhs rhs where ArrowIsViewPat :: ArrowParsingMode (HsExpr GhcPs) b -- the LHS is always of type HsExpr ArrowIsFunType :: ArrowParsingMode b b -- the LHS is of the same type as RHS -- When parsing an expression (e1 -> e2), the LHS `e1` is an HsExpr regardless of -- the arrow parsing mode. `exprArrowParsingMode` proves this to the type checker. -- See Note [Arrow parsing mode] exprArrowParsingMode :: ArrowParsingMode lhs (HsExpr GhcPs) -> (lhs ~ HsExpr GhcPs => r) -> r exprArrowParsingMode ArrowIsViewPat k = k exprArrowParsingMode ArrowIsFunType k = k -- Check the enabled extensions and select the appropriate ArrowParsingMode, -- then pass it to a continuation. See Note [Arrow parsing mode] withArrowParsingMode :: DisambECP b => (forall lhs. DisambECP lhs => ArrowParsingMode lhs b -> PV r) -> PV r withArrowParsingMode cont = do vpEnabled <- getBit ViewPatternsBit rtaEnabled <- getBit RequiredTypeArgumentsBit if | vpEnabled -> cont ArrowIsViewPat | rtaEnabled -> cont ArrowIsFunType | otherwise -> cont ArrowIsViewPat -- Error message should suggest ViewPatterns in patterns -- Type-restricted variant of `withArrowParsingMode` to aid type inference (#25103) withArrowParsingMode' :: DisambECP b => (forall lhs. DisambECP lhs => ArrowParsingMode lhs b -> PV (LocatedA b)) -> PV (LocatedA b) withArrowParsingMode' = withArrowParsingMode -- When a forall-type occurs in term syntax, forall-bound variables should -- inhabit the term namespace `varName` rather than the usual `tvName`. -- See Note [Types in terms]. -- -- Since type variable binders in a `HsForAllTelescope` produced by the -- `forall_telescope` nonterminal have their namespaces set to `tvName`, -- we use `setTelescopeBndrsNameSpace` to fix them up. setTelescopeBndrsNameSpace :: NameSpace -> HsForAllTelescope GhcPs -> HsForAllTelescope GhcPs setTelescopeBndrsNameSpace ns forall_telescope = case forall_telescope of HsForAllInvis x bndrs -> HsForAllInvis x (set_bndrs_ns bndrs) HsForAllVis x bndrs -> HsForAllVis x (set_bndrs_ns bndrs) where set_bndrs_ns :: [LHsTyVarBndr flag GhcPs] -> [LHsTyVarBndr flag GhcPs] set_bndrs_ns = map (setLHsTyVarBndrNameSpace ns) setLHsTyVarBndrNameSpace :: NameSpace -> LHsTyVarBndr flag GhcPs -> LHsTyVarBndr flag GhcPs setLHsTyVarBndrNameSpace ns (L l tvb) = L l tvb' where tvb' = tvb { tvb_var = setHsBndrVarNameSpace ns (tvb_var tvb) } setHsBndrVarNameSpace :: NameSpace -> HsBndrVar GhcPs -> HsBndrVar GhcPs setHsBndrVarNameSpace ns (HsBndrVar x (L l rdr)) = HsBndrVar x (L l rdr') where rdr' = setRdrNameSpace rdr ns setHsBndrVarNameSpace _ (HsBndrWildCard x) = HsBndrWildCard x -- | Ensure that a literal pattern isn't of type Addr#, Float#, Double#. checkUnboxedLitPat :: Located (HsLit GhcPs) -> PV () checkUnboxedLitPat (L loc lit) = case lit of -- Don't allow primitive string literal patterns. -- See #13260. HsStringPrim {} -> addError $ mkPlainErrorMsgEnvelope loc $ (PsErrIllegalUnboxedStringInPat lit) -- Don't allow Float#/Double# literal patterns. -- See #9238 and Note [Rules for floating-point comparisons] -- in GHC.Core.Opt.ConstantFold. _ | is_floating_lit lit -> addError $ mkPlainErrorMsgEnvelope loc $ (PsErrIllegalUnboxedFloatingLitInPat lit) | otherwise -> return () where is_floating_lit :: HsLit GhcPs -> Bool is_floating_lit (HsFloatPrim {}) = True is_floating_lit (HsDoublePrim {}) = True is_floating_lit _ = False mkPatRec :: LocatedA (PatBuilder GhcPs) -> HsRecFields GhcPs (LocatedA (PatBuilder GhcPs)) -> (Maybe (EpToken "{"), Maybe (EpToken "}")) -> PV (PatBuilder GhcPs) mkPatRec (unLoc -> PatBuilderVar c) (HsRecFields x fs dd) anns | isRdrDataCon (unLoc c) = do fs <- mapM checkPatField fs return $ PatBuilderPat $ ConPat { pat_con_ext = anns , pat_con = c , pat_args = RecCon (HsRecFields x fs dd) } mkPatRec p _ _ = addFatalError $ mkPlainErrorMsgEnvelope (getLocA p) $ (PsErrInvalidRecordCon (unLoc p)) -- | Disambiguate constructs that may appear when we do not know -- ahead of time whether we are parsing a type or a newtype/data constructor. -- -- See Note [Ambiguous syntactic categories] for the general idea. -- -- See Note [Parsing data constructors is hard] for the specific issue this -- particular class is solving. -- class DisambTD b where -- | Process the head of a type-level function/constructor application, -- i.e. the @H@ in @H a b c@. mkHsAppTyHeadPV :: LHsType GhcPs -> PV (LocatedA b) -- | Disambiguate @f x@ (function application or prefix data constructor). mkHsAppTyPV :: LocatedA b -> LHsType GhcPs -> PV (LocatedA b) -- | Disambiguate @f \@t@ (visible kind application) mkHsAppKindTyPV :: LocatedA b -> EpToken "@" -> LHsType GhcPs -> PV (LocatedA b) -- | Disambiguate @f \# x@ (infix operator) mkHsOpTyPV :: PromotionFlag -> LHsType GhcPs -> LocatedN RdrName -> LHsType GhcPs -> PV (LocatedA b) -- | Disambiguate @{-\# UNPACK \#-} t@ (unpack/nounpack pragma) mkUnpackednessPV :: Located UnpackednessPragma -> LocatedA b -> PV (LocatedA b) instance DisambTD (HsType GhcPs) where mkHsAppTyHeadPV = return mkHsAppTyPV t1 t2 = return (mkHsAppTy t1 t2) mkHsAppKindTyPV t at ki = return (mkHsAppKindTy at t ki) mkHsOpTyPV prom t1 op t2 = do let (L l ty) = mkLHsOpTy prom t1 op t2 !cs <- getCommentsFor (locA l) return (L (addCommentsToEpAnn l cs) ty) mkUnpackednessPV = addUnpackednessP dataConBuilderCon :: LocatedA DataConBuilder -> LocatedN RdrName dataConBuilderCon (L _ (PrefixDataConBuilder _ dc)) = dc dataConBuilderCon (L _ (InfixDataConBuilder _ dc _)) = dc dataConBuilderDetails :: LocatedA DataConBuilder -> HsConDeclH98Details GhcPs -- Detect when the record syntax is used: -- data T = MkT { ... } dataConBuilderDetails (L _ (PrefixDataConBuilder flds _)) | [L (EpAnn anc _ cs) (HsRecTy an fields)] <- toList flds = RecCon (L (EpAnn anc an cs) fields) -- Normal prefix constructor, e.g. data T = MkT A B C dataConBuilderDetails (L _ (PrefixDataConBuilder flds _)) = PrefixCon noTypeArgs (map hsLinear (toList flds)) -- Infix constructor, e.g. data T = Int :! Bool dataConBuilderDetails (L (EpAnn _ _ csl) (InfixDataConBuilder (L (EpAnn anc ann csll) lhs) _ rhs)) = InfixCon (hsLinear (L (EpAnn anc ann (csl Semi.<> csll)) lhs)) (hsLinear rhs) instance DisambTD DataConBuilder where mkHsAppTyHeadPV = tyToDataConBuilder mkHsAppTyPV (L l (PrefixDataConBuilder flds fn)) t = return $ L (noAnnSrcSpan $ combineSrcSpans (locA l) (getLocA t)) (PrefixDataConBuilder (flds `snocOL` t) fn) mkHsAppTyPV (L _ InfixDataConBuilder{}) _ = -- This case is impossible because of the way -- the grammar in Parser.y is written (see infixtype/ftype). panic "mkHsAppTyPV: InfixDataConBuilder" mkHsAppKindTyPV lhs at ki = addFatalError $ mkPlainErrorMsgEnvelope (getEpTokenSrcSpan at) $ (PsErrUnexpectedKindAppInDataCon (unLoc lhs) (unLoc ki)) mkHsOpTyPV prom lhs tc rhs = do check_no_ops (unLoc rhs) -- check the RHS because parsing type operators is right-associative data_con <- eitherToP $ tyConToDataCon tc !cs <- getCommentsFor (locA l) checkNotPromotedDataCon prom data_con return $ L (addCommentsToEpAnn l cs) (InfixDataConBuilder lhs data_con rhs) where l = combineLocsA lhs rhs check_no_ops (HsBangTy _ _ t) = check_no_ops (unLoc t) check_no_ops (HsOpTy{}) = addError $ mkPlainErrorMsgEnvelope (locA l) $ (PsErrInvalidInfixDataCon (unLoc lhs) (unLoc tc) (unLoc rhs)) check_no_ops _ = return () mkUnpackednessPV unpk constr_stuff | L _ (InfixDataConBuilder lhs data_con rhs) <- constr_stuff = -- When the user writes data T = {-# UNPACK #-} Int :+ Bool -- we apply {-# UNPACK #-} to the LHS do lhs' <- addUnpackednessP unpk lhs let l = combineLocsA (reLoc unpk) constr_stuff return $ L l (InfixDataConBuilder lhs' data_con rhs) | otherwise = do addError $ mkPlainErrorMsgEnvelope (getLoc unpk) PsErrUnpackDataCon return constr_stuff tyToDataConBuilder :: LHsType GhcPs -> PV (LocatedA DataConBuilder) tyToDataConBuilder (L l (HsTyVar _ prom v)) = do data_con <- eitherToP $ tyConToDataCon v checkNotPromotedDataCon prom data_con return $ L l (PrefixDataConBuilder nilOL data_con) tyToDataConBuilder (L l (HsTupleTy _ HsBoxedOrConstraintTuple ts)) = do let data_con = L (l2l l) (getRdrName (tupleDataCon Boxed (length ts))) return $ L l (PrefixDataConBuilder (toOL ts) data_con) tyToDataConBuilder (L l (HsTupleTy _ HsUnboxedTuple ts)) = do let data_con = L (l2l l) (getRdrName (tupleDataCon Unboxed (length ts))) return $ L l (PrefixDataConBuilder (toOL ts) data_con) tyToDataConBuilder t = addFatalError $ mkPlainErrorMsgEnvelope (getLocA t) $ (PsErrInvalidDataCon (unLoc t)) -- | Rejects declarations such as @data T = 'MkT@ (note the leading tick). checkNotPromotedDataCon :: PromotionFlag -> LocatedN RdrName -> PV () checkNotPromotedDataCon NotPromoted _ = return () checkNotPromotedDataCon IsPromoted (L l name) = addError $ mkPlainErrorMsgEnvelope (locA l) $ PsErrIllegalPromotionQuoteDataCon name mkUnboxedSumCon :: LHsType GhcPs -> ConTag -> Arity -> (LocatedN RdrName, HsConDeclH98Details GhcPs) mkUnboxedSumCon t tag arity = (noLocA (getRdrName (sumDataCon tag arity)), PrefixCon noTypeArgs [hsLinear t]) {- Note [Ambiguous syntactic categories] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ There are places in the grammar where we do not know whether we are parsing an expression or a pattern without unlimited lookahead (which we do not have in 'happy'): View patterns: f (Con a b ) = ... -- 'Con a b' is a pattern f (Con a b -> x) = ... -- 'Con a b' is an expression do-notation: do { Con a b <- x } -- 'Con a b' is a pattern do { Con a b } -- 'Con a b' is an expression Guards: x | True <- p && q = ... -- 'True' is a pattern x | True = ... -- 'True' is an expression Top-level value/function declarations (FunBind/PatBind): f ! a -- TH splice f ! a = ... -- function declaration Until we encounter the = sign, we don't know if it's a top-level TemplateHaskell splice where ! is used, or if it's a function declaration where ! is bound. There are also places in the grammar where we do not know whether we are parsing an expression or a command: proc x -> do { (stuff) -< x } -- 'stuff' is an expression proc x -> do { (stuff) } -- 'stuff' is a command Until we encounter arrow syntax (-<) we don't know whether to parse 'stuff' as an expression or a command. In fact, do-notation is subject to both ambiguities: proc x -> do { (stuff) -< x } -- 'stuff' is an expression proc x -> do { (stuff) <- f -< x } -- 'stuff' is a pattern proc x -> do { (stuff) } -- 'stuff' is a command There are many possible solutions to this problem. For an overview of the ones we decided against, see Note [Resolving parsing ambiguities: non-taken alternatives] The solution that keeps basic definitions (such as HsExpr) clean, keeps the concerns local to the parser, and does not require duplication of hsSyn types, or an extra pass over the entire AST, is to parse into an overloaded parser-validator (a so-called tagless final encoding): class DisambECP b where ... instance DisambECP (HsCmd GhcPs) where ... instance DisambECP (HsExp GhcPs) where ... instance DisambECP (PatBuilder GhcPs) where ... The 'DisambECP' class contains functions to build and validate 'b'. For example, to add parentheses we have: mkHsParPV :: DisambECP b => SrcSpan -> Located b -> PV (Located b) 'mkHsParPV' will wrap the inner value in HsCmdPar for commands, HsPar for expressions, and 'PatBuilderPar' for patterns (later transformed into ParPat, see Note [PatBuilder]). Consider the 'alts' production used to parse case-of alternatives: alts :: { Located ([AddEpAnn],[LMatch GhcPs (LHsExpr GhcPs)]) } : alts1 { sL1 $1 (fst $ unLoc $1,snd $ unLoc $1) } | ';' alts { sLL $1 $> ((mj AnnSemi $1:(fst $ unLoc $2)),snd $ unLoc $2) } We abstract over LHsExpr GhcPs, and it becomes: alts :: { forall b. DisambECP b => PV (Located ([AddEpAnn],[LMatch GhcPs (Located b)])) } : alts1 { $1 >>= \ $1 -> return $ sL1 $1 (fst $ unLoc $1,snd $ unLoc $1) } | ';' alts { $2 >>= \ $2 -> return $ sLL $1 $> ((mj AnnSemi $1:(fst $ unLoc $2)),snd $ unLoc $2) } Compared to the initial definition, the added bits are: forall b. DisambECP b => PV ( ... ) -- in the type signature $1 >>= \ $1 -> return $ -- in one reduction rule $2 >>= \ $2 -> return $ -- in another reduction rule The overhead is constant relative to the size of the rest of the reduction rule, so this approach scales well to large parser productions. Note that we write ($1 >>= \ $1 -> ...), so the second $1 is in a binding position and shadows the previous $1. We can do this because internally 'happy' desugars $n to happy_var_n, and the rationale behind this idiom is to be able to write (sLL $1 $>) later on. The alternative would be to write this as ($1 >>= \ fresh_name -> ...), but then we couldn't refer to the last fresh name as $>. Finally, we instantiate the polymorphic type to a concrete one, and run the parser-validator, for example: stmt :: { forall b. DisambECP b => PV (LStmt GhcPs (Located b)) } e_stmt :: { LStmt GhcPs (LHsExpr GhcPs) } : stmt {% runPV $1 } In e_stmt, three things happen: 1. we instantiate: b ~ HsExpr GhcPs 2. we embed the PV computation into P by using runPV 3. we run validation by using a monadic production, {% ... } At this point the ambiguity is resolved. -} {- Note [Resolving parsing ambiguities: non-taken alternatives] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Alternative I, extra constructors in GHC.Hs.Expr ------------------------------------------------ We could add extra constructors to HsExpr to represent command-specific and pattern-specific syntactic constructs. Under this scheme, we parse patterns and commands as expressions and rejig later. This is what GHC used to do, and it polluted 'HsExpr' with irrelevant constructors: * for commands: 'HsArrForm', 'HsArrApp' * for patterns: 'EWildPat', 'EAsPat', 'EViewPat', 'ELazyPat' (As of now, we still do that for patterns, but we plan to fix it). There are several issues with this: * The implementation details of parsing are leaking into hsSyn definitions. * Code that uses HsExpr has to panic on these impossible-after-parsing cases. * HsExpr is arbitrarily selected as the extension basis. Why not extend HsCmd or HsPat with extra constructors instead? Alternative II, extra constructors in GHC.Hs.Expr for GhcPs ----------------------------------------------------------- We could address some of the problems with Alternative I by using Trees That Grow and extending HsExpr only in the GhcPs pass. However, GhcPs corresponds to the output of parsing, not to its intermediate results, so we wouldn't want them there either. Alternative III, extra constructors in GHC.Hs.Expr for GhcPrePs --------------------------------------------------------------- We could introduce a new pass, GhcPrePs, to keep GhcPs pristine. Unfortunately, creating a new pass would significantly bloat conversion code and slow down the compiler by adding another linear-time pass over the entire AST. For example, in order to build HsExpr GhcPrePs, we would need to build HsLocalBinds GhcPrePs (as part of HsLet), and we never want HsLocalBinds GhcPrePs. Alternative IV, sum type and bottom-up data flow ------------------------------------------------ Expressions and commands are disjoint. There are no user inputs that could be interpreted as either an expression or a command depending on outer context: 5 -- definitely an expression x -< y -- definitely a command Even though we have both 'HsLam' and 'HsCmdLam', we can look at the body to disambiguate: \p -> 5 -- definitely an expression \p -> x -< y -- definitely a command This means we could use a bottom-up flow of information to determine whether we are parsing an expression or a command, using a sum type for intermediate results: Either (LHsExpr GhcPs) (LHsCmd GhcPs) There are two problems with this: * We cannot handle the ambiguity between expressions and patterns, which are not disjoint. * Bottom-up flow of information leads to poor error messages. Consider if ... then 5 else (x -< y) Do we report that '5' is not a valid command or that (x -< y) is not a valid expression? It depends on whether we want the entire node to be 'HsIf' or 'HsCmdIf', and this information flows top-down, from the surrounding parsing context (are we in 'proc'?) Alternative V, backtracking with parser combinators --------------------------------------------------- One might think we could sidestep the issue entirely by using a backtracking parser and doing something along the lines of (try pExpr <|> pPat). Turns out, this wouldn't work very well, as there can be patterns inside expressions (e.g. via 'case', 'let', 'do') and expressions inside patterns (e.g. view patterns). To handle this, we would need to backtrack while backtracking, and unbound levels of backtracking lead to very fragile performance. Alternative VI, an intermediate data type ----------------------------------------- There are common syntactic elements of expressions, commands, and patterns (e.g. all of them must have balanced parentheses), and we can capture this common structure in an intermediate data type, Frame: data Frame = FrameVar RdrName -- ^ Identifier: Just, map, BS.length | FrameTuple [LTupArgFrame] Boxity -- ^ Tuple (section): (a,b) (a,b,c) (a,,) (,a,) | FrameTySig LFrame (LHsSigWcType GhcPs) -- ^ Type signature: x :: ty | FramePar (SrcSpan, SrcSpan) LFrame -- ^ Parentheses | FrameIf LFrame LFrame LFrame -- ^ If-expression: if p then x else y | FrameCase LFrame [LFrameMatch] -- ^ Case-expression: case x of { p1 -> e1; p2 -> e2 } | FrameDo HsStmtContextRn [LFrameStmt] -- ^ Do-expression: do { s1; a <- s2; s3 } ... | FrameExpr (HsExpr GhcPs) -- unambiguously an expression | FramePat (HsPat GhcPs) -- unambiguously a pattern | FrameCommand (HsCmd GhcPs) -- unambiguously a command To determine which constructors 'Frame' needs to have, we take the union of intersections between HsExpr, HsCmd, and HsPat. The intersection between HsPat and HsExpr: HsPat = VarPat | TuplePat | SigPat | ParPat | ... HsExpr = HsVar | ExplicitTuple | ExprWithTySig | HsPar | ... ------------------------------------------------------------------- Frame = FrameVar | FrameTuple | FrameTySig | FramePar | ... The intersection between HsCmd and HsExpr: HsCmd = HsCmdIf | HsCmdCase | HsCmdDo | HsCmdPar HsExpr = HsIf | HsCase | HsDo | HsPar ------------------------------------------------ Frame = FrameIf | FrameCase | FrameDo | FramePar The intersection between HsCmd and HsPat: HsPat = ParPat | ... HsCmd = HsCmdPar | ... ----------------------- Frame = FramePar | ... Take the union of each intersection and this yields the final 'Frame' data type. The problem with this approach is that we end up duplicating a good portion of hsSyn: Frame for HsExpr, HsPat, HsCmd TupArgFrame for HsTupArg FrameMatch for Match FrameStmt for StmtLR FrameGRHS for GRHS FrameGRHSs for GRHSs ... Alternative VII, a product type ------------------------------- We could avoid the intermediate representation of Alternative VI by parsing into a product of interpretations directly: type ExpCmdPat = ( PV (LHsExpr GhcPs) , PV (LHsCmd GhcPs) , PV (LHsPat GhcPs) ) This means that in positions where we do not know whether to produce expression, a pattern, or a command, we instead produce a parser-validator for each possible option. Then, as soon as we have parsed far enough to resolve the ambiguity, we pick the appropriate component of the product, discarding the rest: checkExpOf3 (e, _, _) = e -- interpret as an expression checkCmdOf3 (_, c, _) = c -- interpret as a command checkPatOf3 (_, _, p) = p -- interpret as a pattern We can easily define ambiguities between arbitrary subsets of interpretations. For example, when we know ahead of type that only an expression or a command is possible, but not a pattern, we can use a smaller type: type ExpCmd = (PV (LHsExpr GhcPs), PV (LHsCmd GhcPs)) checkExpOf2 (e, _) = e -- interpret as an expression checkCmdOf2 (_, c) = c -- interpret as a command However, there is a slight problem with this approach, namely code duplication in parser productions. Consider the 'alts' production used to parse case-of alternatives: alts :: { Located ([AddEpAnn],[LMatch GhcPs (LHsExpr GhcPs)]) } : alts1 { sL1 $1 (fst $ unLoc $1,snd $ unLoc $1) } | ';' alts { sLL $1 $> ((mj AnnSemi $1:(fst $ unLoc $2)),snd $ unLoc $2) } Under the new scheme, we have to completely duplicate its type signature and each reduction rule: alts :: { ( PV (Located ([AddEpAnn],[LMatch GhcPs (LHsExpr GhcPs)])) -- as an expression , PV (Located ([AddEpAnn],[LMatch GhcPs (LHsCmd GhcPs)])) -- as a command ) } : alts1 { ( checkExpOf2 $1 >>= \ $1 -> return $ sL1 $1 (fst $ unLoc $1,snd $ unLoc $1) , checkCmdOf2 $1 >>= \ $1 -> return $ sL1 $1 (fst $ unLoc $1,snd $ unLoc $1) ) } | ';' alts { ( checkExpOf2 $2 >>= \ $2 -> return $ sLL $1 $> ((mj AnnSemi $1:(fst $ unLoc $2)),snd $ unLoc $2) , checkCmdOf2 $2 >>= \ $2 -> return $ sLL $1 $> ((mj AnnSemi $1:(fst $ unLoc $2)),snd $ unLoc $2) ) } And the same goes for other productions: 'altslist', 'alts1', 'alt', 'alt_rhs', 'ralt', 'gdpats', 'gdpat', 'exp', ... and so on. That is a lot of code! Alternative VIII, a function from a GADT ---------------------------------------- We could avoid code duplication of the Alternative VII by representing the product as a function from a GADT: data ExpCmdG b where ExpG :: ExpCmdG HsExpr CmdG :: ExpCmdG HsCmd type ExpCmd = forall b. ExpCmdG b -> PV (Located (b GhcPs)) checkExp :: ExpCmd -> PV (LHsExpr GhcPs) checkCmd :: ExpCmd -> PV (LHsCmd GhcPs) checkExp f = f ExpG -- interpret as an expression checkCmd f = f CmdG -- interpret as a command Consider the 'alts' production used to parse case-of alternatives: alts :: { Located ([AddEpAnn],[LMatch GhcPs (LHsExpr GhcPs)]) } : alts1 { sL1 $1 (fst $ unLoc $1,snd $ unLoc $1) } | ';' alts { sLL $1 $> ((mj AnnSemi $1:(fst $ unLoc $2)),snd $ unLoc $2) } We abstract over LHsExpr, and it becomes: alts :: { forall b. ExpCmdG b -> PV (Located ([AddEpAnn],[LMatch GhcPs (Located (b GhcPs))])) } : alts1 { \tag -> $1 tag >>= \ $1 -> return $ sL1 $1 (fst $ unLoc $1,snd $ unLoc $1) } | ';' alts { \tag -> $2 tag >>= \ $2 -> return $ sLL $1 $> ((mj AnnSemi $1:(fst $ unLoc $2)),snd $ unLoc $2) } Note that 'ExpCmdG' is a singleton type, the value is completely determined by the type: when (b~HsExpr), tag = ExpG when (b~HsCmd), tag = CmdG This is a clear indication that we can use a class to pass this value behind the scenes: class ExpCmdI b where expCmdG :: ExpCmdG b instance ExpCmdI HsExpr where expCmdG = ExpG instance ExpCmdI HsCmd where expCmdG = CmdG And now the 'alts' production is simplified, as we no longer need to thread 'tag' explicitly: alts :: { forall b. ExpCmdI b => PV (Located ([AddEpAnn],[LMatch GhcPs (Located (b GhcPs))])) } : alts1 { $1 >>= \ $1 -> return $ sL1 $1 (fst $ unLoc $1,snd $ unLoc $1) } | ';' alts { $2 >>= \ $2 -> return $ sLL $1 $> ((mj AnnSemi $1:(fst $ unLoc $2)),snd $ unLoc $2) } This encoding works well enough, but introduces an extra GADT unlike the tagless final encoding, and there's no need for this complexity. -} {- Note [PatBuilder] ~~~~~~~~~~~~~~~~~~~~ Unlike HsExpr or HsCmd, the Pat type cannot accommodate all intermediate forms, so we introduce the notion of a PatBuilder. Consider a pattern like this: Con a b c We parse arguments to "Con" one at a time in the fexp aexp parser production, building the result with mkHsAppPV, so the intermediate forms are: 1. Con 2. Con a 3. Con a b 4. Con a b c In 'HsExpr', we have 'HsApp', so the intermediate forms are represented like this (pseudocode): 1. "Con" 2. HsApp "Con" "a" 3. HsApp (HsApp "Con" "a") "b" 3. HsApp (HsApp (HsApp "Con" "a") "b") "c" Similarly, in 'HsCmd' we have 'HsCmdApp'. In 'Pat', however, what we have instead is 'ConPatIn', which is very awkward to modify and thus unsuitable for the intermediate forms. We also need an intermediate representation to postpone disambiguation between FunBind and PatBind. Consider: a `Con` b = ... a `fun` b = ... How do we know that (a `Con` b) is a PatBind but (a `fun` b) is a FunBind? We learn this by inspecting an intermediate representation in 'isFunLhs' and seeing that 'Con' is a data constructor but 'f' is not. We need an intermediate representation capable of representing both a FunBind and a PatBind, so Pat is insufficient. PatBuilder is an extension of Pat that is capable of representing intermediate parsing results for patterns and function bindings: data PatBuilder p = PatBuilderPat (Pat p) | PatBuilderApp (LocatedA (PatBuilder p)) (LocatedA (PatBuilder p)) | PatBuilderOpApp (LocatedA (PatBuilder p)) (LocatedA RdrName) (LocatedA (PatBuilder p)) ... It can represent any pattern via 'PatBuilderPat', but it also has a variety of other constructors which were added by following a simple principle: we never pattern match on the pattern stored inside 'PatBuilderPat'. -} --------------------------------------------------------------------------- -- Miscellaneous utilities -- | Check if a fixity is valid. We support bypassing the usual bound checks -- for some special operators. checkPrecP :: Located (SourceText,Int) -- ^ precedence -> Located (OrdList (LocatedN RdrName)) -- ^ operators -> P () checkPrecP (L l (_,i)) (L _ ol) | 0 <= i, i <= maxPrecedence = pure () | all specialOp ol = pure () | otherwise = addFatalError $ mkPlainErrorMsgEnvelope l (PsErrPrecedenceOutOfRange i) where -- If you change this, consider updating Note [Fixity of (->)] in GHC/Types.hs specialOp op = unLoc op == getRdrName unrestrictedFunTyCon mkRecConstrOrUpdate :: Bool -> LHsExpr GhcPs -> SrcSpan -> ([Fbind (HsExpr GhcPs)], Maybe SrcSpan) -> (Maybe (EpToken "{"), Maybe (EpToken "}")) -> PV (HsExpr GhcPs) mkRecConstrOrUpdate _ (L _ (HsVar _ (L l c))) _lrec (fbinds,dd) anns | isRdrDataCon c = do let (fs, ps) = partitionEithers fbinds case ps of p:_ -> addFatalError $ mkPlainErrorMsgEnvelope (getLocA p) $ PsErrOverloadedRecordDotInvalid _ -> return (mkRdrRecordCon (L l c) (mk_rec_fields fs dd) anns) mkRecConstrOrUpdate overloaded_update exp _ (fs,dd) anns | Just dd_loc <- dd = addFatalError $ mkPlainErrorMsgEnvelope dd_loc $ PsErrDotsInRecordUpdate | otherwise = mkRdrRecordUpd overloaded_update exp fs anns mkRdrRecordUpd :: Bool -> LHsExpr GhcPs -> [Fbind (HsExpr GhcPs)] -> (Maybe (EpToken "{"), Maybe (EpToken "}")) -> PV (HsExpr GhcPs) mkRdrRecordUpd overloaded_on exp@(L loc _) fbinds anns = do -- We do not need to know if OverloadedRecordDot is in effect. We do -- however need to know if OverloadedRecordUpdate (passed in -- overloaded_on) is in effect because it affects the Left/Right nature -- of the RecordUpd value we calculate. let (fs, ps) = partitionEithers fbinds fs' :: [LHsRecUpdField GhcPs GhcPs] fs' = map (fmap mk_rec_upd_field) fs case overloaded_on of False | not $ null ps -> -- A '.' was found in an update and OverloadedRecordUpdate isn't on. addFatalError $ mkPlainErrorMsgEnvelope (locA loc) PsErrOverloadedRecordUpdateNotEnabled False -> -- This is just a regular record update. return RecordUpd { rupd_ext = anns , rupd_expr = exp , rupd_flds = RegularRecUpdFields { xRecUpdFields = noExtField , recUpdFields = fs' } } -- This is a RecordDotSyntax update. True -> do let qualifiedFields = [ L l lbl | L _ (HsFieldBind _ (L l lbl) _ _) <- fs' , isQual . fieldOccRdrName $ lbl ] case qualifiedFields of qf:_ -> addFatalError $ mkPlainErrorMsgEnvelope (getLocA qf) $ PsErrOverloadedRecordUpdateNoQualifiedFields _ -> return $ RecordUpd { rupd_ext = anns , rupd_expr = exp , rupd_flds = OverloadedRecUpdFields { xOLRecUpdFields = noExtField , olRecUpdFields = toProjUpdates fbinds } } where toProjUpdates :: [Fbind (HsExpr GhcPs)] -> [LHsRecUpdProj GhcPs] toProjUpdates = map (\case { Right p -> p; Left f -> recFieldToProjUpdate f }) -- Convert a top-level field update like {foo=2} or {bar} (punned) -- to a projection update. recFieldToProjUpdate :: LHsRecField GhcPs (LHsExpr GhcPs) -> LHsRecUpdProj GhcPs recFieldToProjUpdate (L l (HsFieldBind anns (L _ (FieldOcc _ (L loc rdr))) arg pun)) = -- The idea here is to convert the label to a singleton [FastString]. let f = occNameFS . rdrNameOcc $ rdr fl = DotFieldOcc noAnn (L loc (FieldLabelString f)) lf = locA loc in mkRdrProjUpdate l (L lf [L (l2l loc) fl]) (punnedVar f) pun anns where -- If punning, compute HsVar "f" otherwise just arg. This -- has the effect that sentinel HsVar "pun-rhs" is replaced -- by HsVar "f" here, before the update is written to a -- setField expressions. punnedVar :: FastString -> LHsExpr GhcPs punnedVar f = if not pun then arg else noLocA . HsVar noExtField . noLocA . mkRdrUnqual . mkVarOccFS $ f mkRdrRecordCon :: LocatedN RdrName -> HsRecordBinds GhcPs -> (Maybe (EpToken "{"), Maybe (EpToken "}")) -> HsExpr GhcPs mkRdrRecordCon con flds anns = RecordCon { rcon_ext = anns, rcon_con = con, rcon_flds = flds } mk_rec_fields :: [LocatedA (HsRecField GhcPs arg)] -> Maybe SrcSpan -> HsRecFields GhcPs arg mk_rec_fields fs Nothing = HsRecFields { rec_ext = noExtField, rec_flds = fs, rec_dotdot = Nothing } mk_rec_fields fs (Just s) = HsRecFields { rec_ext = noExtField, rec_flds = fs , rec_dotdot = Just (L (l2l s) (RecFieldsDotDot $ length fs)) } mk_rec_upd_field :: HsRecField GhcPs (LHsExpr GhcPs) -> HsRecUpdField GhcPs GhcPs mk_rec_upd_field (HsFieldBind noAnn (L loc (FieldOcc _ rdr)) arg pun) = HsFieldBind noAnn (L loc (FieldOcc noExtField rdr)) arg pun mkInlinePragma :: SourceText -> (InlineSpec, RuleMatchInfo) -> Maybe Activation -> InlinePragma -- The (Maybe Activation) is because the user can omit -- the activation spec (and usually does) mkInlinePragma src (inl, match_info) mb_act = InlinePragma { inl_src = src -- See Note [Pragma source text] in "GHC.Types.SourceText" , inl_inline = inl , inl_sat = Nothing , inl_act = act , inl_rule = match_info } where act = case mb_act of Just act -> act Nothing -> -- No phase specified case inl of NoInline _ -> NeverActive Opaque _ -> NeverActive _other -> AlwaysActive mkOpaquePragma :: SourceText -> InlinePragma mkOpaquePragma src = InlinePragma { inl_src = src , inl_inline = Opaque src , inl_sat = Nothing -- By marking the OPAQUE pragma NeverActive we stop -- (constructor) specialisation on OPAQUE things. -- -- See Note [OPAQUE pragma] , inl_act = NeverActive , inl_rule = FunLike } checkNewOrData :: SrcSpan -> RdrName -> Bool -> NewOrData -> [LConDecl GhcPs] -> P (DataDefnCons (LConDecl GhcPs)) checkNewOrData span name is_type_data = curry $ \ case (NewType, [a]) -> pure $ NewTypeCon a (DataType, as) -> pure $ DataTypeCons is_type_data (handle_type_data as) (NewType, as) -> addFatalError $ mkPlainErrorMsgEnvelope span $ PsErrMultipleConForNewtype name (length as) where -- In a "type data" declaration, the constructors are in the type/class -- namespace rather than the data constructor namespace. -- See Note [Type data declarations] in GHC.Rename.Module. handle_type_data | is_type_data = map (fmap promote_constructor) | otherwise = id promote_constructor (dc@ConDeclGADT { con_names = cons }) = dc { con_names = fmap (fmap promote_name) cons } promote_constructor (dc@ConDeclH98 { con_name = con }) = dc { con_name = fmap promote_name con } promote_constructor dc = dc promote_name name = fromMaybe name (promoteRdrName name) ----------------------------------------------------------------------------- -- utilities for foreign declarations -- construct a foreign import declaration -- mkImport :: Located CCallConv -> Located Safety -> (Located StringLiteral, LocatedN RdrName, LHsSigType GhcPs) -> (EpToken "import", TokDcolon) -> P (EpToken "foreign" -> HsDecl GhcPs) mkImport cconv safety (L loc (StringLiteral esrc entity _), v, ty) (timport, td) = case unLoc cconv of CCallConv -> returnSpec =<< mkCImport CApiConv -> do imp <- mkCImport if isCWrapperImport imp then addFatalError $ mkPlainErrorMsgEnvelope loc PsErrInvalidCApiImport else returnSpec imp StdCallConv -> returnSpec =<< mkCImport PrimCallConv -> mkOtherImport JavaScriptCallConv -> mkOtherImport where -- Parse a C-like entity string of the following form: -- "[static] [chname] [&] [cid]" | "dynamic" | "wrapper" -- If 'cid' is missing, the function name 'v' is used instead as symbol -- name (cf section 8.5.1 in Haskell 2010 report). mkCImport = do let e = unpackFS entity case parseCImport (reLoc cconv) (reLoc safety) (mkExtName (unLoc v)) e (L loc esrc) of Nothing -> addFatalError $ mkPlainErrorMsgEnvelope loc $ PsErrMalformedEntityString Just importSpec -> return importSpec isCWrapperImport (CImport _ _ _ _ CWrapper) = True isCWrapperImport _ = False -- currently, all the other import conventions only support a symbol name in -- the entity string. If it is missing, we use the function name instead. mkOtherImport = returnSpec importSpec where entity' = if nullFS entity then mkExtName (unLoc v) else entity funcTarget = CFunction (StaticTarget esrc entity' Nothing True) importSpec = CImport (L (l2l loc) esrc) (reLoc cconv) (reLoc safety) Nothing funcTarget returnSpec spec = return $ \tforeign -> ForD noExtField $ ForeignImport { fd_i_ext = (tforeign, timport, td) , fd_name = v , fd_sig_ty = ty , fd_fi = spec } -- the string "foo" is ambiguous: either a header or a C identifier. The -- C identifier case comes first in the alternatives below, so we pick -- that one. parseCImport :: LocatedE CCallConv -> LocatedE Safety -> FastString -> String -> Located SourceText -> Maybe (ForeignImport (GhcPass p)) parseCImport cconv safety nm str sourceText = listToMaybe $ map fst $ filter (null.snd) $ readP_to_S parse str where parse = do skipSpaces r <- choice [ string "dynamic" >> return (mk Nothing (CFunction DynamicTarget)), string "wrapper" >> return (mk Nothing CWrapper), do optional (token "static" >> skipSpaces) ((mk Nothing <$> cimp nm) +++ (do h <- munch1 hdr_char skipSpaces let src = mkFastString h mk (Just (Header (SourceText src) src)) <$> cimp nm)) ] skipSpaces return r token str = do _ <- string str toks <- look case toks of c : _ | id_char c -> pfail _ -> return () mk h n = CImport (reLoc sourceText) (reLoc cconv) (reLoc safety) h n hdr_char c = not (isSpace c) -- header files are filenames, which can contain -- pretty much any char (depending on the platform), -- so just accept any non-space character id_first_char c = isAlpha c || c == '_' id_char c = isAlphaNum c || c == '_' cimp nm = (ReadP.char '&' >> skipSpaces >> CLabel <$> cid) +++ (do isFun <- case unLoc cconv of CApiConv -> option True (do token "value" skipSpaces return False) _ -> return True cid' <- cid return (CFunction (StaticTarget NoSourceText cid' Nothing isFun))) where cid = return nm +++ (do c <- satisfy id_first_char cs <- many (satisfy id_char) return (mkFastString (c:cs))) -- construct a foreign export declaration -- mkExport :: Located CCallConv -> (Located StringLiteral, LocatedN RdrName, LHsSigType GhcPs) -> ( EpToken "export", TokDcolon) -> P (EpToken "foreign" -> HsDecl GhcPs) mkExport (L lc cconv) (L le (StringLiteral esrc entity _), v, ty) (texport, td) = return $ \tforeign -> ForD noExtField $ ForeignExport { fd_e_ext = (tforeign, texport, td), fd_name = v, fd_sig_ty = ty , fd_fe = CExport (L (l2l le) esrc) (L (l2l lc) (CExportStatic esrc entity' cconv)) } where entity' | nullFS entity = mkExtName (unLoc v) | otherwise = entity -- Supplying the ext_name in a foreign decl is optional; if it -- isn't there, the Haskell name is assumed. Note that no transformation -- of the Haskell name is then performed, so if you foreign export (++), -- it's external name will be "++". Too bad; it's important because we don't -- want z-encoding (e.g. names with z's in them shouldn't be doubled) -- mkExtName :: RdrName -> CLabelString mkExtName rdrNm = occNameFS (rdrNameOcc rdrNm) -------------------------------------------------------------------------------- -- Help with module system imports/exports data ImpExpSubSpec = ImpExpAbs | ImpExpAll (EpToken "..") | ImpExpList [LocatedA ImpExpQcSpec] | ImpExpAllWith [LocatedA ImpExpQcSpec] data ImpExpQcSpec = ImpExpQcName (LocatedN RdrName) | ImpExpQcType (EpToken "type") (LocatedN RdrName) | ImpExpQcWildcard (EpToken "..") (EpToken ",") mkModuleImpExp :: Maybe (LWarningTxt GhcPs) -> (EpToken "(", EpToken ")") -> LocatedA ImpExpQcSpec -> ImpExpSubSpec -> P (IE GhcPs) mkModuleImpExp warning (top, tcp) (L l specname) subs = do case subs of ImpExpAbs | isVarNameSpace (rdrNameSpace name) -> return $ IEVar warning (L l (ieNameFromSpec specname)) Nothing | otherwise -> IEThingAbs warning . L l <$> nameT <*> pure noExportDoc ImpExpAll tok -> IEThingAll (warning, (top, tok, tcp)) . L l <$> nameT <*> pure noExportDoc ImpExpList xs -> (\newName -> IEThingWith (warning, (top,NoEpTok,NoEpTok,tcp)) (L l newName) NoIEWildcard (wrapped xs)) <$> nameT <*> pure noExportDoc ImpExpAllWith xs -> do allowed <- getBit PatternSynonymsBit if allowed then let withs = map unLoc xs pos = maybe NoIEWildcard IEWildcard (findIndex isImpExpQcWildcard withs) (td,tc) = case find isImpExpQcWildcard withs of Just (ImpExpQcWildcard td tc) -> (td,tc) _ -> (NoEpTok, NoEpTok) ies :: [LocatedA (IEWrappedName GhcPs)] ies = wrapped $ filter (not . isImpExpQcWildcard . unLoc) xs in (\newName -> IEThingWith (warning, (top,td,tc,tcp)) (L l newName) pos ies) <$> nameT <*> pure noExportDoc else addFatalError $ mkPlainErrorMsgEnvelope (locA l) $ PsErrIllegalPatSynExport where noExportDoc :: Maybe (LHsDoc GhcPs) noExportDoc = Nothing name = ieNameVal specname nameT = if isVarNameSpace (rdrNameSpace name) then addFatalError $ mkPlainErrorMsgEnvelope (locA l) $ (PsErrVarForTyCon name) else return $ ieNameFromSpec specname ieNameVal (ImpExpQcName ln) = unLoc ln ieNameVal (ImpExpQcType _ ln) = unLoc ln ieNameVal ImpExpQcWildcard{} = panic "ieNameVal got wildcard" ieNameFromSpec :: ImpExpQcSpec -> IEWrappedName GhcPs ieNameFromSpec (ImpExpQcName (L l n)) = IEName noExtField (L l n) ieNameFromSpec (ImpExpQcType r (L l n)) = IEType r (L l n) ieNameFromSpec ImpExpQcWildcard{} = panic "ieName got wildcard" wrapped = map (fmap ieNameFromSpec) mkTypeImpExp :: LocatedN RdrName -- TcCls or Var name space -> P (LocatedN RdrName) mkTypeImpExp name = do requireExplicitNamespaces (getLocA name) return (fmap (`setRdrNameSpace` tcClsName) name) checkImportSpec :: LocatedLI [LIE GhcPs] -> P (LocatedLI [LIE GhcPs]) checkImportSpec ie@(L _ specs) = case [l | (L l (IEThingWith _ _ (IEWildcard _) _ _)) <- specs] of [] -> return ie (l:_) -> importSpecError (locA l) where importSpecError l = addFatalError $ mkPlainErrorMsgEnvelope l PsErrIllegalImportBundleForm -- In the correct order mkImpExpSubSpec :: [LocatedA ImpExpQcSpec] -> P ImpExpSubSpec mkImpExpSubSpec [] = return (ImpExpList []) mkImpExpSubSpec [L _ (ImpExpQcWildcard td _tc)] = return (ImpExpAll td) mkImpExpSubSpec xs = if (any (isImpExpQcWildcard . unLoc) xs) then return $ (ImpExpAllWith xs) else return $ (ImpExpList xs) isImpExpQcWildcard :: ImpExpQcSpec -> Bool isImpExpQcWildcard (ImpExpQcWildcard _ _) = True isImpExpQcWildcard _ = False ----------------------------------------------------------------------------- -- Warnings and failures warnPrepositiveQualifiedModule :: SrcSpan -> P () warnPrepositiveQualifiedModule span = addPsMessage span PsWarnImportPreQualified failNotEnabledImportQualifiedPost :: SrcSpan -> P () failNotEnabledImportQualifiedPost loc = addError $ mkPlainErrorMsgEnvelope loc $ PsErrImportPostQualified failImportQualifiedTwice :: SrcSpan -> P () failImportQualifiedTwice loc = addError $ mkPlainErrorMsgEnvelope loc $ PsErrImportQualifiedTwice warnStarIsType :: SrcSpan -> P () warnStarIsType span = addPsMessage span PsWarnStarIsType failOpFewArgs :: MonadP m => LocatedN RdrName -> m a failOpFewArgs (L loc op) = do { star_is_type <- getBit StarIsTypeBit ; let is_star_type = if star_is_type then StarIsType else StarIsNotType ; addFatalError $ mkPlainErrorMsgEnvelope (locA loc) $ (PsErrOpFewArgs is_star_type op) } requireExplicitNamespaces :: MonadP m => SrcSpan -> m () requireExplicitNamespaces l = do allowed <- getBit ExplicitNamespacesBit unless allowed $ addError $ mkPlainErrorMsgEnvelope l PsErrIllegalExplicitNamespace ----------------------------------------------------------------------------- -- Misc utils data PV_Context = PV_Context { pv_options :: ParserOpts , pv_details :: ParseContext -- See Note [Parser-Validator Details] } data PV_Accum = PV_Accum { pv_warnings :: Messages PsMessage , pv_errors :: Messages PsMessage , pv_header_comments :: Strict.Maybe [LEpaComment] , pv_comment_q :: [LEpaComment] } data PV_Result a = PV_Ok PV_Accum a | PV_Failed PV_Accum deriving (Foldable, Functor, Traversable) -- During parsing, we make use of several monadic effects: reporting parse errors, -- accumulating warnings, adding API annotations, and checking for extensions. These -- effects are captured by the 'MonadP' type class. -- -- Sometimes we need to postpone some of these effects to a later stage due to -- ambiguities described in Note [Ambiguous syntactic categories]. -- We could use two layers of the P monad, one for each stage: -- -- abParser :: forall x. DisambAB x => P (P x) -- -- The outer layer of P consumes the input and builds the inner layer, which -- validates the input. But this type is not particularly helpful, as it obscures -- the fact that the inner layer of P never consumes any input. -- -- For clarity, we introduce the notion of a parser-validator: a parser that does -- not consume any input, but may fail or use other effects. Thus we have: -- -- abParser :: forall x. DisambAB x => P (PV x) -- newtype PV a = PV { unPV :: PV_Context -> PV_Accum -> PV_Result a } deriving (Functor) instance Applicative PV where pure a = a `seq` PV (\_ acc -> PV_Ok acc a) (<*>) = ap instance Monad PV where m >>= f = PV $ \ctx acc -> case unPV m ctx acc of PV_Ok acc' a -> unPV (f a) ctx acc' PV_Failed acc' -> PV_Failed acc' runPV :: PV a -> P a runPV = runPV_details noParseContext askParseContext :: PV ParseContext askParseContext = PV $ \(PV_Context _ details) acc -> PV_Ok acc details runPV_details :: ParseContext -> PV a -> P a runPV_details details m = P $ \s -> let pv_ctx = PV_Context { pv_options = options s , pv_details = details } pv_acc = PV_Accum { pv_warnings = warnings s , pv_errors = errors s , pv_header_comments = header_comments s , pv_comment_q = comment_q s } mkPState acc' = s { warnings = pv_warnings acc' , errors = pv_errors acc' , comment_q = pv_comment_q acc' } in case unPV m pv_ctx pv_acc of PV_Ok acc' a -> POk (mkPState acc') a PV_Failed acc' -> PFailed (mkPState acc') instance MonadP PV where addError err = PV $ \_ctx acc -> PV_Ok acc{pv_errors = err `addMessage` pv_errors acc} () addWarning w = PV $ \_ctx acc -> -- No need to check for the warning flag to be set, GHC will correctly discard suppressed -- diagnostics. PV_Ok acc{pv_warnings= w `addMessage` pv_warnings acc} () addFatalError err = addError err >> PV (const PV_Failed) getParserOpts = PV $ \ctx acc -> PV_Ok acc $! pv_options ctx allocateCommentsP ss = PV $ \_ s -> if null (pv_comment_q s) then PV_Ok s emptyComments else -- fast path let (comment_q', newAnns) = allocateComments ss (pv_comment_q s) in PV_Ok s { pv_comment_q = comment_q' } (EpaComments newAnns) allocatePriorCommentsP ss = PV $ \_ s -> let (header_comments', comment_q', newAnns) = allocatePriorComments ss (pv_comment_q s) (pv_header_comments s) in PV_Ok s { pv_header_comments = header_comments', pv_comment_q = comment_q' } (EpaComments newAnns) allocateFinalCommentsP ss = PV $ \_ s -> let (header_comments', comment_q', newAnns) = allocateFinalComments ss (pv_comment_q s) (pv_header_comments s) in PV_Ok s { pv_header_comments = header_comments', pv_comment_q = comment_q' } (EpaCommentsBalanced (Strict.fromMaybe [] header_comments') newAnns) {- Note [Parser-Validator Details] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A PV computation is parameterized by some 'ParseContext' for diagnostic messages, which can be set depending on validation context. We use this in checkPattern to fix #984. Consider this example, where the user has forgotten a 'do': f _ = do x <- computation case () of _ -> result <- computation case () of () -> undefined GHC parses it as follows: f _ = do x <- computation (case () of _ -> result) <- computation case () of () -> undefined Note that this fragment is parsed as a pattern: case () of _ -> result We attempt to detect such cases and add a hint to the diagnostic messages: T984.hs:6:9: Parse error in pattern: case () of { _ -> result } Possibly caused by a missing 'do'? The "Possibly caused by a missing 'do'?" suggestion is the hint that is computed out of the 'ParseContext', which are read by functions like 'patFail' when constructing the 'PsParseErrorInPatDetails' data structure. When validating in a context other than 'bindpat' (a pattern to the left of <-), we set the details to 'noParseContext' and it has no effect on the diagnostic messages. -} -- | Hint about bang patterns, assuming @BangPatterns@ is off. hintBangPat :: SrcSpan -> Pat GhcPs -> PV () hintBangPat span e = do bang_on <- getBit BangPatBit unless bang_on $ addError $ mkPlainErrorMsgEnvelope span $ PsErrIllegalBangPattern e mkSumOrTupleExpr :: SrcSpanAnnA -> Boxity -> SumOrTuple (HsExpr GhcPs) -> (EpaLocation, EpaLocation) -> PV (LHsExpr GhcPs) -- Tuple mkSumOrTupleExpr l@(EpAnn anc an csIn) boxity (Tuple es) anns = do !cs <- getCommentsFor (locA l) return $ L (EpAnn anc an (csIn Semi.<> cs)) (ExplicitTuple anns (map toTupArg es) boxity) where toTupArg :: Either (EpAnn Bool) (LHsExpr GhcPs) -> HsTupArg GhcPs toTupArg (Left ann) = missingTupArg ann toTupArg (Right a) = Present noExtField a -- Sum -- mkSumOrTupleExpr l Unboxed (Sum alt arity e) = -- return $ L l (ExplicitSum noExtField alt arity e) mkSumOrTupleExpr l@(EpAnn anc anIn csIn) Unboxed (Sum alt arity e barsp barsa) (o, c) = do let an = AnnExplicitSum o barsp barsa c !cs <- getCommentsFor (locA l) return $ L (EpAnn anc anIn (csIn Semi.<> cs)) (ExplicitSum an alt arity e) mkSumOrTupleExpr l Boxed a@Sum{} _ = addFatalError $ mkPlainErrorMsgEnvelope (locA l) $ PsErrUnsupportedBoxedSumExpr a mkSumOrTuplePat :: SrcSpanAnnA -> Boxity -> SumOrTuple (PatBuilder GhcPs) -> (EpaLocation, EpaLocation) -> PV (LocatedA (PatBuilder GhcPs)) -- Tuple mkSumOrTuplePat l boxity (Tuple ps) anns = do ps' <- traverse toTupPat ps return $ L l (PatBuilderPat (TuplePat anns ps' boxity)) where toTupPat :: Either (EpAnn Bool) (LocatedA (PatBuilder GhcPs)) -> PV (LPat GhcPs) -- Ignore the element location so that the error message refers to the -- entire tuple. See #19504 (and the discussion) for details. toTupPat p = case p of Left _ -> addFatalError $ mkPlainErrorMsgEnvelope (locA l) PsErrTupleSectionInPat Right p' -> checkLPat p' -- Sum mkSumOrTuplePat l Unboxed (Sum alt arity p barsb barsa) anns = do p' <- checkLPat p let an = EpAnnSumPat anns barsb barsa return $ L l (PatBuilderPat (SumPat an p' alt arity)) mkSumOrTuplePat l Boxed a@Sum{} _ = addFatalError $ mkPlainErrorMsgEnvelope (locA l) $ PsErrUnsupportedBoxedSumPat a mkLHsOpTy :: PromotionFlag -> LHsType GhcPs -> LocatedN RdrName -> LHsType GhcPs -> LHsType GhcPs mkLHsOpTy prom x op y = let loc = locA x `combineSrcSpans` locA op `combineSrcSpans` locA y in L (noAnnSrcSpan loc) (mkHsOpTy prom x op y) mkMultTy :: EpToken "%" -> LHsType GhcPs -> TokRarrow -> HsArrow GhcPs mkMultTy pct t@(L _ (HsTyLit _ (HsNumTy (SourceText (unpackFS -> "1")) 1))) arr -- See #18888 for the use of (SourceText "1") above = HsLinearArrow (EpPct1 pct1 arr) where -- The location of "%" combined with the location of "1". pct1 :: EpToken "%1" pct1 = epTokenWidenR pct (locA (getLoc t)) mkMultTy pct t arr = HsExplicitMult (pct, arr) t mkMultExpr :: EpToken "%" -> LHsExpr GhcPs -> TokRarrow -> HsArrowOf (LHsExpr GhcPs) GhcPs mkMultExpr pct t@(L _ (HsOverLit _ (OverLit _ (HsIntegral (IL (SourceText (unpackFS -> "1")) _ 1))))) arr -- See #18888 for the use of (SourceText "1") above = HsLinearArrow (EpPct1 pct1 arr) where -- The location of "%" combined with the location of "1". pct1 :: EpToken "%1" pct1 = epTokenWidenR pct (locA (getLoc t)) mkMultExpr pct t arr = HsExplicitMult (pct, arr) t mkMultAnn :: EpToken "%" -> LHsType GhcPs -> HsMultAnn GhcPs mkMultAnn pct t@(L _ (HsTyLit _ (HsNumTy (SourceText (unpackFS -> "1")) 1))) -- See #18888 for the use of (SourceText "1") above = HsPct1Ann pct1 where -- The location of "%" combined with the location of "1". pct1 :: EpToken "%1" pct1 = epTokenWidenR pct (locA (getLoc t)) mkMultAnn pct t = HsMultAnn pct t mkTokenLocation :: SrcSpan -> TokenLocation mkTokenLocation (UnhelpfulSpan _) = NoTokenLoc mkTokenLocation (RealSrcSpan r mb) = TokenLoc (EpaSpan (RealSrcSpan r mb)) -- Precondition: the EpToken has EpaSpan, never EpaDelta. epTokenWidenR :: EpToken tok -> SrcSpan -> EpToken tok' epTokenWidenR NoEpTok _ = NoEpTok epTokenWidenR (EpTok l) (UnhelpfulSpan _) = EpTok l epTokenWidenR (EpTok (EpaSpan s1)) s2 = EpTok (EpaSpan (combineSrcSpans s1 s2)) epTokenWidenR (EpTok EpaDelta{}) _ = -- Never happens because the parser does not produce EpaDelta. panic "epTokenWidenR: EpaDelta" ----------------------------------------------------------------------------- -- Token symbols starSym :: Bool -> FastString starSym True = fsLit "★" starSym False = fsLit "*" ----------------------------------------- -- Bits and pieces for RecordDotSyntax. mkRdrGetField :: LHsExpr GhcPs -> LocatedAn NoEpAnns (DotFieldOcc GhcPs) -> HsExpr GhcPs mkRdrGetField arg field = HsGetField { gf_ext = NoExtField , gf_expr = arg , gf_field = field } mkRdrProjection :: NonEmpty (LocatedAn NoEpAnns (DotFieldOcc GhcPs)) -> AnnProjection -> HsExpr GhcPs mkRdrProjection flds anns = HsProjection { proj_ext = anns , proj_flds = fmap unLoc flds } mkRdrProjUpdate :: SrcSpanAnnA -> Located [LocatedAn NoEpAnns (DotFieldOcc GhcPs)] -> LHsExpr GhcPs -> Bool -> Maybe (EpToken "=") -> LHsRecProj GhcPs (LHsExpr GhcPs) mkRdrProjUpdate _ (L _ []) _ _ _ = panic "mkRdrProjUpdate: The impossible has happened!" mkRdrProjUpdate loc (L l flds) arg isPun anns = L loc HsFieldBind { hfbAnn = anns , hfbLHS = L (noAnnSrcSpan l) (FieldLabelStrings flds) , hfbRHS = arg , hfbPun = isPun } ----------------------------------------------------------------------------- -- Tuple and list punning punsAllowed :: P Bool punsAllowed = getBit ListTuplePunsBit -- | Check whether @ListTuplePuns@ is enabled and return the first arg if it is, -- the second arg otherwise. punsIfElse :: a -> a -> P a punsIfElse enabled disabled = do allowed <- punsAllowed pure (if allowed then enabled else disabled) -- | Emit an error of type 'PsErrInvalidPun' with a location from @start@ to -- @end@ if the extension @ListTuplePuns@ is disabled. -- -- This is used in Parser.y to guard rules that require punning. requireLTPuns :: PsErrPunDetails -> Located a -> Located b -> P () requireLTPuns err start end = unlessM punsAllowed $ do addError (mkPlainErrorMsgEnvelope loc (PsErrInvalidPun err)) where loc = (combineSrcSpans (getLoc start) (getLoc end)) -- | Call a parser with a span and its comments given by a start and end token. withCombinedComments :: HasLoc l1 => HasLoc l2 => l1 -> l2 -> (SrcSpan -> P a) -> P (LocatedA a) withCombinedComments start end use = do cs <- getCommentsFor fullSpan a <- use fullSpan pure (L (EpAnn (spanAsAnchor fullSpan) noAnn cs) a) where fullSpan = combineSrcSpans (getHasLoc start) (getHasLoc end) -- | Decide whether to parse tuple syntax @(Int, Double)@ in a type as a -- type or data constructor, based on the extension @ListTuplePuns@. -- The case with an explicit promotion quote, @'(Int, Double)@, is handled -- by 'mkExplicitTupleTy'. mkTupleSyntaxTy :: EpToken "(" -> [LocatedA (HsType GhcPs)] -> EpToken ")" -> P (HsType GhcPs) mkTupleSyntaxTy parOpen args parClose = punsIfElse enabled disabled where enabled = HsTupleTy annParen HsBoxedOrConstraintTuple args disabled = HsExplicitTupleTy annsKeyword NotPromoted args annParen = AnnParens parOpen parClose annsKeyword = (NoEpTok, parOpen, parClose) -- | Decide whether to parse tuple con syntax @(,)@ in a type as a -- type or data constructor, based on the extension @ListTuplePuns@. -- The case with an explicit promotion quote, @'(,)@, is handled -- by the rule @SIMPLEQUOTE sysdcon_nolist@ in @atype@. mkTupleSyntaxTycon :: Boxity -> Int -> P RdrName mkTupleSyntaxTycon boxity n = punsIfElse (getRdrName (tupleTyCon boxity n)) (getRdrName (tupleDataCon boxity n)) -- | Decide whether to parse list tycon syntax @[]@ in a type as a type or data -- constructor, based on the extension @ListTuplePuns@. -- The case with an explicit promotion quote, @'[]@, is handled by -- 'mkExplicitListTy'. mkListSyntaxTy0 :: EpToken "[" -> EpToken "]" -> SrcSpan -> P (HsType GhcPs) mkListSyntaxTy0 brkOpen brkClose span = punsIfElse enabled disabled where enabled = HsTyVar noAnn NotPromoted rn -- attach the comments only to the RdrName since it's the innermost AST node rn = L (EpAnn fullLoc rdrNameAnn emptyComments) listTyCon_RDR disabled = HsExplicitListTy annsKeyword NotPromoted [] rdrNameAnn = NameAnnOnly (NameSquare brkOpen brkClose) [] annsKeyword = (NoEpTok, brkOpen, brkClose) fullLoc = EpaSpan span -- | Decide whether to parse list type syntax @[Int]@ in a type as a -- type or data constructor, based on the extension @ListTuplePuns@. -- The case with an explicit promotion quote, @'[Int]@, is handled -- by 'mkExplicitListTy'. mkListSyntaxTy1 :: EpToken "[" -> LocatedA (HsType GhcPs) -> EpToken "]" -> P (HsType GhcPs) mkListSyntaxTy1 brkOpen t brkClose = punsIfElse enabled disabled where enabled = HsListTy annParen t disabled = HsExplicitListTy annsKeyword NotPromoted [t] annsKeyword = (NoEpTok, brkOpen, brkClose) annParen = AnnParensSquare brkOpen brkClose ghc-lib-parser-9.12.2.20250421/compiler/GHC/Parser/PostProcess/0000755000000000000000000000000007346545000021426 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Parser/PostProcess/Haddock.hs0000644000000000000000000016204307346545000023325 0ustar0000000000000000{-# LANGUAGE ApplicativeDo #-} {-# LANGUAGE DeriveFunctor #-} {-# LANGUAGE DerivingVia #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE NamedFieldPuns #-} {-# LANGUAGE RankNTypes #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE TypeApplications #-} {-# LANGUAGE TypeFamilies #-} {- | This module implements 'addHaddockToModule', which inserts Haddock comments accumulated during parsing into the AST (#17544). We process Haddock comments in two phases: 1. Parse the program (via the Happy parser in `Parser.y`), generating an AST, and (quite separately) a list of all the Haddock comments found in the file. More precisely, the Haddock comments are accumulated in the `hdk_comments` field of the `PState`, the parser state (see Lexer.x): data PState = PState { ... , hdk_comments :: [PsLocated HdkComment] } Each of these Haddock comments has a `PsSpan`, which gives the `BufPos` of the beginning and end of the Haddock comment. 2. Walk over the AST, attaching the Haddock comments to the correct parts of the tree. This step is called `addHaddockToModule`, and is implemented in this module. See Note [Adding Haddock comments to the syntax tree]. This approach codifies an important principle: The presence or absence of a Haddock comment should never change the parsing of a program. Alternative approaches that did not work properly: 1. Using 'RealSrcLoc' instead of 'BufPos'. This led to failures in presence of {-# LANGUAGE CPP #-} and other sources of line pragmas. See documentation on 'BufPos' (in GHC.Types.SrcLoc) for the details. 2. In earlier versions of GHC, the Haddock comments were incorporated into the Parser.y grammar. The parser constructed the AST and attached comments to it in a single pass. See Note [Old solution: Haddock in the grammar] for the details. -} module GHC.Parser.PostProcess.Haddock (addHaddockToModule) where import GHC.Prelude hiding (head, init, last, mod, tail) import GHC.Hs import GHC.Types.SrcLoc import Data.Semigroup import Data.Foldable import Data.Traversable import qualified Data.List.NonEmpty as NE import Control.Applicative import Control.Monad import Control.Monad.Trans.State.Strict import Control.Monad.Trans.Reader import Data.Functor.Identity import {-# SOURCE #-} GHC.Parser (parseIdentifier) import GHC.Parser.Lexer import GHC.Parser.HaddockLex import GHC.Parser.Errors.Types import GHC.Utils.Misc (mergeListsBy, filterOut, (<&&>)) import qualified GHC.Data.Strict as Strict {- Note [Adding Haddock comments to the syntax tree] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 'addHaddock' traverses the AST in concrete syntax order, building a computation (represented by HdkA) that reconstructs the AST but with Haddock comments inserted in appropriate positions: addHaddock :: HasHaddock a => a -> HdkA a Consider this code example: f :: Int -- ^ comment on argument -> Bool -- ^ comment on result In the AST, the "Int" part of this snippet is represented like this (pseudo-code): L (BufSpan 6 8) (HsTyVar "Int") :: LHsType GhcPs And the comments are represented like this (pseudo-code): L (BufSpan 11 35) (HdkCommentPrev "comment on argument") L (BufSpan 46 69) (HdkCommentPrev "comment on result") So when we are traversing the AST and 'addHaddock' is applied to HsTyVar "Int", how does it know to associate it with "comment on argument" but not with "comment on result"? The trick is to look in the space between syntactic elements. In the example above, the location range in which we search for HdkCommentPrev is as follows: f :: Int████████████████████████ ████Bool -- ^ comment on result We search for comments after HsTyVar "Int" and until the next syntactic element, in this case HsTyVar "Bool". Ignoring the "->" allows us to accommodate alternative coding styles: f :: Int -> -- ^ comment on argument Bool -- ^ comment on result Sometimes we also need to take indentation information into account. Compare the following examples: class C a where f :: a -> Int -- ^ comment on f class C a where f :: a -> Int -- ^ comment on C Notice how "comment on f" and "comment on C" differ only by indentation level. Therefore, in order to know the location range in which the comments are applicable to a syntactic elements, we need three nuggets of information: 1. lower bound on the BufPos of a comment 2. upper bound on the BufPos of a comment 3. minimum indentation level of a comment This information is represented by the 'LocRange' type. In order to propagate this information, we have the 'HdkA' applicative. 'HdkA' is defined as follows: data HdkA a = HdkA (Maybe BufSpan) (HdkM a) The first field contains a 'BufSpan', which represents the location span taken by a syntactic element: addHaddock (L bufSpan ...) = HdkA (Just bufSpan) ... The second field, 'HdkM', is a stateful computation that looks up Haddock comments in the specified location range: HdkM a ≈ LocRange -- The allowed location range -> [PsLocated HdkComment] -- Unallocated comments -> (a, -- AST with comments inserted into it [PsLocated HdkComment]) -- Leftover comments The 'Applicative' instance for 'HdkA' is defined in such a way that the location range of every computation is defined by its neighbours: addHaddock aaa <*> addHaddock bbb <*> addHaddock ccc Here, the 'LocRange' passed to the 'HdkM' computation of addHaddock bbb is determined by the BufSpan recorded in addHaddock aaa and addHaddock ccc. This is why it's important to traverse the AST in the order of the concrete syntax. In the example above we assume that aaa, bbb, ccc are ordered by location: * getBufSpan (getLoc aaa) < getBufSpan (getLoc bbb) * getBufSpan (getLoc bbb) < getBufSpan (getLoc ccc) Violation of this assumption would lead to bugs, and care must be taken to traverse the AST correctly. For example, when dealing with class declarations, we have to use 'flattenBindsAndSigs' to traverse it in the correct order. -} -- | Add Haddock documentation accumulated in the parser state -- to a parsed HsModule. -- -- Reports badly positioned comments when -Winvalid-haddock is enabled. addHaddockToModule :: Located (HsModule GhcPs) -> P (Located (HsModule GhcPs)) addHaddockToModule lmod = do pState <- getPState let all_comments = toList (hdk_comments pState) initial_hdk_st = HdkSt all_comments [] (lmod', final_hdk_st) = runHdkA (addHaddock lmod) initial_hdk_st hdk_warnings = collectHdkWarnings final_hdk_st -- lmod': module with Haddock comments inserted into the AST -- hdk_warnings: warnings accumulated during AST/comment processing mapM_ reportHdkWarning hdk_warnings return lmod' reportHdkWarning :: HdkWarn -> P () reportHdkWarning (HdkWarnInvalidComment (L l _)) = addPsMessage (mkSrcSpanPs l) PsWarnHaddockInvalidPos reportHdkWarning (HdkWarnExtraComment (L l _)) = addPsMessage l PsWarnHaddockIgnoreMulti collectHdkWarnings :: HdkSt -> [HdkWarn] collectHdkWarnings HdkSt{ hdk_st_pending, hdk_st_warnings } = map HdkWarnInvalidComment hdk_st_pending -- leftover Haddock comments not inserted into the AST ++ hdk_st_warnings {- ********************************************************************* * * * addHaddock: a family of functions that processes the AST * * in concrete syntax order, adding documentation comments to it * * * ********************************************************************* -} -- HasHaddock is a convenience class for overloading the addHaddock operation. -- Alternatively, we could define a family of monomorphic functions: -- -- addHaddockSomeTypeX :: SomeTypeX -> HdkA SomeTypeX -- addHaddockAnotherTypeY :: AnotherTypeY -> HdkA AnotherTypeY -- addHaddockOneMoreTypeZ :: OneMoreTypeZ -> HdkA OneMoreTypeZ -- -- But having a single name for all of them is just easier to read, and makes it clear -- that they all are of the form t -> HdkA t for some t. -- -- If you need to handle a more complicated scenario that doesn't fit this -- pattern, it's always possible to define separate functions outside of this -- class, as is done in case of e.g. addHaddockConDeclField. -- -- See Note [Adding Haddock comments to the syntax tree]. class HasHaddock a where addHaddock :: a -> HdkA a instance HasHaddock a => HasHaddock [a] where addHaddock = traverse addHaddock -- -- | Module header comment -- module M ( -- -- * Export list comment -- Item1, -- Item2, -- -- * Export list comment -- item3, -- item4 -- ) where -- instance HasHaddock (Located (HsModule GhcPs)) where addHaddock (L l_mod mod) = do -- Step 1, get the module header documentation comment: -- -- -- | Module header comment -- module M where -- -- Only do this when the module header exists. headerDocs <- for @Maybe (hsmodName mod) $ \(L l_name _) -> extendHdkA (locA l_name) $ liftHdkA $ do -- todo: register keyword location of 'module', see Note [Register keyword location] docs <- inLocRange (locRangeTo (getBufPos (srcSpanStart (locA l_name)))) $ takeHdkComments mkDocNext dc <- selectDocString docs pure $ lexLHsDocString <$> dc -- Step 2, process documentation comments in the export list: -- -- module M ( -- -- * Export list comment -- Item1, -- Item2, -- -- * Export list comment -- item3, -- item4 -- ) where -- -- Only do this when the export list exists. hsmodExports' <- traverse @Maybe addHaddock (hsmodExports mod) -- Step 3, register the import section to reject invalid comments: -- -- import Data.Maybe -- -- | rejected comment (cannot appear here) -- import Data.Bool -- traverse_ registerHdkA (hsmodImports mod) -- Step 4, process declarations: -- -- module M where -- -- | Comment on D -- data D = MkD -- ^ Comment on MkD -- data C = MkC -- ^ Comment on MkC -- -- ^ Comment on C -- let layout = hsmodLayout (hsmodExt mod) hsmodDecls' <- addHaddockInterleaveItems layout (mkDocHsDecl layout) (hsmodDecls mod) pure $ L l_mod $ mod { hsmodExports = hsmodExports' , hsmodDecls = hsmodDecls' , hsmodExt = (hsmodExt mod) { hsmodHaddockModHeader = join @Maybe headerDocs } } lexHsDocString :: HsDocString -> HsDoc GhcPs lexHsDocString = lexHsDoc parseIdentifier lexLHsDocString :: Located HsDocString -> LHsDoc GhcPs lexLHsDocString = fmap lexHsDocString -- | Only for module exports, not module imports. -- -- module M (a, b, c) where -- use on this [LIE GhcPs] -- import I (a, b, c) -- do not use here! -- -- Imports cannot have documentation comments anyway. instance HasHaddock (LocatedLI [LocatedA (IE GhcPs)]) where addHaddock (L l_exports exports) = extendHdkA (locA l_exports) $ do exports' <- addHaddockInterleaveItems EpNoLayout mkDocIE exports registerLocHdkA (srcLocSpan (srcSpanEnd (locA l_exports))) -- Do not consume comments after the closing parenthesis pure $ L l_exports exports' -- Needed to use 'addHaddockInterleaveItems' in 'instance HasHaddock (Located [LIE GhcPs])'. instance HasHaddock (LocatedA (IE GhcPs)) where addHaddock (L l_export ie ) = extendHdkA (locA l_export) $ liftHdkA $ do docs <- inLocRange (locRangeFrom (getBufPos (srcSpanEnd (locA l_export)))) $ takeHdkComments mkDocPrev mb_doc <- selectDocString docs let mb_ldoc = lexLHsDocString <$> mb_doc let ie' = case ie of IEVar ext nm _ -> IEVar ext nm mb_ldoc IEThingAbs ext nm _ -> IEThingAbs ext nm mb_ldoc IEThingAll ext nm _ -> IEThingAll ext nm mb_ldoc IEThingWith ext nm wild subs _ -> IEThingWith ext nm wild subs mb_ldoc x -> x pure $ L l_export ie' {- Add Haddock items to a list of non-Haddock items. Used to process export lists (with mkDocIE) and declarations (with mkDocHsDecl). For example: module M where -- | Comment on D data D = MkD -- ^ Comment on MkD data C = MkC -- ^ Comment on MkC -- ^ Comment on C In this case, we should produce four HsDecl items (pseudo-code): 1. DocD (DocCommentNext "Comment on D") 2. TyClD (DataDecl "D" ... [ConDeclH98 "MkD" ... (Just "Comment on MkD")]) 3. TyClD (DataDecl "C" ... [ConDeclH98 "MkC" ... (Just "Comment on MkC")]) 4. DocD (DocCommentPrev "Comment on C") The inputs to addHaddockInterleaveItems are: * layout :: EpLayout In the example above, note that the indentation level inside the module is 2 spaces. It would be represented as layout = EpVirtualBraces 2. It is used to delimit the search space for comments when processing declarations. Here, we restrict indentation levels to >=(2+1), so that when we look up comment on MkC, we get "Comment on MkC" but not "Comment on C". * get_doc_item :: PsLocated HdkComment -> Maybe a This is the function used to look up documentation comments. In the above example, get_doc_item = mkDocHsDecl layout, and it will produce the following parts of the output: DocD (DocCommentNext "Comment on D") DocD (DocCommentPrev "Comment on C") * The list of items. These are the declarations that will be annotated with documentation comments. Before processing: TyClD (DataDecl "D" ... [ConDeclH98 "MkD" ... Nothing]) TyClD (DataDecl "C" ... [ConDeclH98 "MkC" ... Nothing]) After processing: TyClD (DataDecl "D" ... [ConDeclH98 "MkD" ... (Just "Comment on MkD")]) TyClD (DataDecl "C" ... [ConDeclH98 "MkC" ... (Just "Comment on MkC")]) -} addHaddockInterleaveItems :: forall a. HasHaddock a => EpLayout -> (PsLocated HdkComment -> Maybe a) -- Get a documentation item -> [a] -- Unprocessed (non-documentation) items -> HdkA [a] -- Documentation items & processed non-documentation items addHaddockInterleaveItems layout get_doc_item = go where go :: [a] -> HdkA [a] go [] = liftHdkA (takeHdkComments get_doc_item) go (item : items) = do docItems <- liftHdkA (takeHdkComments get_doc_item) item' <- with_layout (addHaddock item) other_items <- go items pure $ docItems ++ item':other_items with_layout :: HdkA a -> HdkA a with_layout = case layout of EpNoLayout -> id EpExplicitBraces{} -> id EpVirtualBraces n -> let loc_range = mempty { loc_range_col = ColumnFrom (n+1) } in hoistHdkA (inLocRange loc_range) instance HasHaddock (LocatedA (HsDecl GhcPs)) where addHaddock ldecl = extendHdkA (getLocA ldecl) $ traverse @LocatedA addHaddock ldecl -- Process documentation comments *inside* a declaration, for example: -- -- data T = MkT -- ^ Comment on MkT (inside DataDecl) -- f, g -- :: Int -- ^ Comment on Int (inside TypeSig) -- -> Bool -- ^ Comment on Bool (inside TypeSig) -- -- Comments that relate to the entire declaration are processed elsewhere: -- -- -- | Comment on T (not processed in this instance) -- data T = MkT -- -- -- | Comment on f, g (not processed in this instance) -- f, g :: Int -> Bool -- f = ... -- g = ... -- -- Such comments are inserted into the syntax tree as DocD declarations -- by addHaddockInterleaveItems, and then associated with other declarations -- in GHC.HsToCore.Docs (see DeclDocMap). -- -- In this instance, we only process comments that relate to parts of the -- declaration, not to the declaration itself. instance HasHaddock (HsDecl GhcPs) where -- Type signatures: -- -- f, g -- :: Int -- ^ Comment on Int -- -> Bool -- ^ Comment on Bool -- addHaddock (SigD _ (TypeSig x names t)) = do traverse_ registerHdkA names t' <- addHaddock t pure (SigD noExtField (TypeSig x names t')) -- Pattern synonym type signatures: -- -- pattern MyPat -- :: Bool -- ^ Comment on Bool -- -> Maybe Bool -- ^ Comment on Maybe Bool -- addHaddock (SigD _ (PatSynSig x names t)) = do traverse_ registerHdkA names t' <- addHaddock t pure (SigD noExtField (PatSynSig x names t')) -- Class method signatures and default signatures: -- -- class C x where -- method_of_c -- :: Maybe x -- ^ Comment on Maybe x -- -> IO () -- ^ Comment on IO () -- default method_of_c -- :: Eq x -- => Maybe x -- ^ Comment on Maybe x -- -> IO () -- ^ Comment on IO () -- addHaddock (SigD _ (ClassOpSig x is_dflt names t)) = do traverse_ registerHdkA names t' <- addHaddock t pure (SigD noExtField (ClassOpSig x is_dflt names t')) -- Data/newtype declarations: -- -- data T = MkT -- ^ Comment on MkT -- A -- ^ Comment on A -- B -- ^ Comment on B -- -- data G where -- -- | Comment on MkG -- MkG :: A -- ^ Comment on A -- -> B -- ^ Comment on B -- -> G -- -- newtype N = MkN { getN :: Natural } -- ^ Comment on N -- deriving newtype (Eq {- ^ Comment on Eq N -}) -- deriving newtype (Ord {- ^ Comment on Ord N -}) -- addHaddock (TyClD x decl) | DataDecl { tcdDExt, tcdLName, tcdTyVars, tcdFixity, tcdDataDefn = defn } <- decl = do registerHdkA tcdLName defn' <- addHaddock defn pure $ TyClD x (DataDecl { tcdDExt, tcdLName, tcdTyVars, tcdFixity, tcdDataDefn = defn' }) -- Class declarations: -- -- class C a where -- -- | Comment on the first method -- first_method :: a -> Bool -- second_method :: a -> String -- -- ^ Comment on the second method -- addHaddock (TyClD _ decl) | ClassDecl { tcdCExt = (x, layout, NoAnnSortKey), tcdCtxt, tcdLName, tcdTyVars, tcdFixity, tcdFDs, tcdSigs, tcdMeths, tcdATs, tcdATDefs } <- decl = do registerHdkA tcdLName -- todo: register keyword location of 'where', see Note [Register keyword location] where_cls' <- addHaddockInterleaveItems layout (mkDocHsDecl layout) $ flattenBindsAndSigs (tcdMeths, tcdSigs, tcdATs, tcdATDefs, [], []) pure $ let (tcdMeths', tcdSigs', tcdATs', tcdATDefs', _, tcdDocs) = partitionBindsAndSigs where_cls' decl' = ClassDecl { tcdCExt = (x, layout, NoAnnSortKey) , tcdCtxt, tcdLName, tcdTyVars, tcdFixity, tcdFDs , tcdSigs = tcdSigs' , tcdMeths = tcdMeths' , tcdATs = tcdATs' , tcdATDefs = tcdATDefs' , tcdDocs } in TyClD noExtField decl' -- Data family instances: -- -- data instance D Bool where ... (same as data/newtype declarations) -- data instance D Bool = ... (same as data/newtype declarations) -- addHaddock (InstD _ decl) | DataFamInstD { dfid_ext, dfid_inst } <- decl , DataFamInstDecl { dfid_eqn } <- dfid_inst = do dfid_eqn' <- case dfid_eqn of FamEqn { feqn_ext, feqn_tycon, feqn_bndrs, feqn_pats, feqn_fixity, feqn_rhs } -> do registerHdkA feqn_tycon feqn_rhs' <- addHaddock feqn_rhs pure $ FamEqn { feqn_ext, feqn_tycon, feqn_bndrs, feqn_pats, feqn_fixity, feqn_rhs = feqn_rhs' } pure $ InstD noExtField (DataFamInstD { dfid_ext, dfid_inst = DataFamInstDecl { dfid_eqn = dfid_eqn' } }) -- Type synonyms: -- -- type T = Int -- ^ Comment on Int -- addHaddock (TyClD _ decl) | SynDecl { tcdSExt, tcdLName, tcdTyVars, tcdFixity, tcdRhs } <- decl = do registerHdkA tcdLName -- todo: register keyword location of '=', see Note [Register keyword location] tcdRhs' <- addHaddock tcdRhs pure $ TyClD noExtField (SynDecl { tcdSExt, tcdLName, tcdTyVars, tcdFixity, tcdRhs = tcdRhs' }) -- Foreign imports: -- -- foreign import ccall unsafe -- o :: Float -- ^ The input float -- -> IO Float -- ^ The output float -- addHaddock (ForD _ decl) = do registerHdkA (fd_name decl) fd_sig_ty' <- addHaddock (fd_sig_ty decl) pure $ ForD noExtField (decl{ fd_sig_ty = fd_sig_ty' }) -- Other declarations addHaddock d = pure d -- The right-hand side of a data/newtype declaration or data family instance. instance HasHaddock (HsDataDefn GhcPs) where addHaddock defn@HsDataDefn{} = do -- Register the kind signature: -- data D :: Type -> Type where ... -- data instance D Bool :: Type where ... traverse_ @Maybe registerHdkA (dd_kindSig defn) -- todo: register keyword location of '=' or 'where', see Note [Register keyword location] -- Process the data constructors: -- -- data T -- = MkT1 Int Bool -- ^ Comment on MkT1 -- | MkT2 Char Int -- ^ Comment on MkT2 -- dd_cons' <- traverse addHaddock (dd_cons defn) -- Process the deriving clauses: -- -- newtype N = MkN Natural -- deriving (Eq {- ^ Comment on Eq N -}) -- deriving (Ord {- ^ Comment on Ord N -}) -- dd_derivs' <- addHaddock (dd_derivs defn) pure $ defn { dd_cons = dd_cons', dd_derivs = dd_derivs' } -- Process the deriving clauses of a data/newtype declaration. -- Not used for standalone deriving. instance HasHaddock (Located [LocatedAn NoEpAnns (HsDerivingClause GhcPs)]) where addHaddock lderivs = extendHdkA (getLoc lderivs) $ traverse @Located addHaddock lderivs -- Process a single deriving clause of a data/newtype declaration: -- -- newtype N = MkN Natural -- deriving newtype (Eq {- ^ Comment on Eq N -}) -- deriving (Ord {- ^ Comment on Ord N -}) via Down N -- -- Not used for standalone deriving. instance HasHaddock (LocatedAn NoEpAnns (HsDerivingClause GhcPs)) where addHaddock lderiv = extendHdkA (getLocA lderiv) $ for @(LocatedAn NoEpAnns) lderiv $ \deriv -> case deriv of HsDerivingClause { deriv_clause_ext, deriv_clause_strategy, deriv_clause_tys } -> do let -- 'stock', 'anyclass', and 'newtype' strategies come -- before the clause types. -- -- 'via' comes after. -- -- See tests/.../T11768.hs (register_strategy_before, register_strategy_after) = case deriv_clause_strategy of Nothing -> (pure (), pure ()) Just (L l (ViaStrategy _)) -> (pure (), registerLocHdkA (locA l)) Just (L l _) -> (registerLocHdkA (locA l), pure ()) register_strategy_before deriv_clause_tys' <- addHaddock deriv_clause_tys register_strategy_after pure HsDerivingClause { deriv_clause_ext, deriv_clause_strategy, deriv_clause_tys = deriv_clause_tys' } -- Process the types in a single deriving clause, which may come in one of the -- following forms: -- -- 1. A singular type constructor: -- deriving Eq -- ^ Comment on Eq -- -- 2. A list of comma-separated types surrounded by enclosing parentheses: -- deriving ( Eq -- ^ Comment on Eq -- , C a -- ^ Comment on C a -- ) instance HasHaddock (LocatedC (DerivClauseTys GhcPs)) where addHaddock (L l_dct dct) = extendHdkA (locA l_dct) $ case dct of DctSingle x ty -> do ty' <- addHaddock ty pure $ L l_dct $ DctSingle x ty' DctMulti x tys -> do tys' <- addHaddock tys pure $ L l_dct $ DctMulti x tys' -- Process a single data constructor declaration, which may come in one of the -- following forms: -- -- 1. H98-syntax PrefixCon: -- data T = -- MkT -- ^ Comment on MkT -- Int -- ^ Comment on Int -- Bool -- ^ Comment on Bool -- -- 2. H98-syntax InfixCon: -- data T = -- Int -- ^ Comment on Int -- :+ -- ^ Comment on (:+) -- Bool -- ^ Comment on Bool -- -- 3. H98-syntax RecCon: -- data T = -- MkT { int_field :: Int, -- ^ Comment on int_field -- bool_field :: Bool } -- ^ Comment on bool_field -- -- 4. GADT-syntax PrefixCon: -- data T where -- -- | Comment on MkT -- MkT :: Int -- ^ Comment on Int -- -> Bool -- ^ Comment on Bool -- -> T -- -- 5. GADT-syntax RecCon: -- data T where -- -- | Comment on MkT -- MkT :: { int_field :: Int, -- ^ Comment on int_field -- bool_field :: Bool } -- ^ Comment on bool_field -- -> T -- instance HasHaddock (LocatedA (ConDecl GhcPs)) where addHaddock (L l_con_decl con_decl) = extendHdkA (locA l_con_decl) $ case con_decl of ConDeclGADT { con_g_ext, con_names, con_bndrs, con_mb_cxt, con_g_args, con_res_ty } -> do con_doc' <- getConDoc (getLocA (NE.head con_names)) con_g_args' <- case con_g_args of PrefixConGADT x ts -> PrefixConGADT x <$> addHaddock ts RecConGADT arr (L l_rec flds) -> do flds' <- traverse addHaddockConDeclField flds pure $ RecConGADT arr (L l_rec flds') con_res_ty' <- addHaddock con_res_ty pure $ L l_con_decl $ ConDeclGADT { con_g_ext, con_names, con_bndrs, con_mb_cxt, con_doc = lexLHsDocString <$> con_doc', con_g_args = con_g_args', con_res_ty = con_res_ty' } ConDeclH98 { con_ext, con_name, con_forall, con_ex_tvs, con_mb_cxt, con_args } -> let -- See Note [Leading and trailing comments on H98 constructors] getTrailingLeading :: HdkM (LocatedA (ConDecl GhcPs)) getTrailingLeading = do con_doc' <- getPrevNextDoc (locA l_con_decl) return $ L l_con_decl $ ConDeclH98 { con_ext, con_name, con_forall, con_ex_tvs, con_mb_cxt, con_args , con_doc = lexLHsDocString <$> con_doc' } -- See Note [Leading and trailing comments on H98 constructors] getMixed :: HdkA (LocatedA (ConDecl GhcPs)) getMixed = case con_args of PrefixCon _ ts -> do con_doc' <- getConDoc (getLocA con_name) ts' <- traverse addHaddockConDeclFieldTy ts pure $ L l_con_decl $ ConDeclH98 { con_ext, con_name, con_forall, con_ex_tvs, con_mb_cxt, con_doc = lexLHsDocString <$> con_doc', con_args = PrefixCon noTypeArgs ts' } InfixCon t1 t2 -> do t1' <- addHaddockConDeclFieldTy t1 con_doc' <- getConDoc (getLocA con_name) t2' <- addHaddockConDeclFieldTy t2 pure $ L l_con_decl $ ConDeclH98 { con_ext, con_name, con_forall, con_ex_tvs, con_mb_cxt, con_doc = lexLHsDocString <$> con_doc', con_args = InfixCon t1' t2' } RecCon (L l_rec flds) -> do con_doc' <- getConDoc (getLocA con_name) flds' <- traverse addHaddockConDeclField flds pure $ L l_con_decl $ ConDeclH98 { con_ext, con_name, con_forall, con_ex_tvs, con_mb_cxt, con_doc = lexLHsDocString <$> con_doc', con_args = RecCon (L l_rec flds') } in hoistHdkA (\m -> do { a <- onlyTrailingOrLeading (locA l_con_decl) ; if a then getTrailingLeading else m }) getMixed -- See Note [Leading and trailing comments on H98 constructors] onlyTrailingOrLeading :: SrcSpan -> HdkM Bool onlyTrailingOrLeading l = peekHdkM $ do leading <- inLocRange (locRangeTo (getBufPos (srcSpanStart l))) $ takeHdkComments mkDocNext inner <- inLocRange (locRangeIn (getBufSpan l)) $ takeHdkComments (\x -> mkDocNext x <|> mkDocPrev x) trailing <- inLocRange (locRangeFrom (getBufPos (srcSpanEnd l))) $ takeHdkComments mkDocPrev return $ case (leading, inner, trailing) of (_:_, [], []) -> True -- leading comment only ([], [], _:_) -> True -- trailing comment only _ -> False -- Get the documentation comment associated with the data constructor in a -- data/newtype declaration. getConDoc :: SrcSpan -- Location of the data constructor -> HdkA (Maybe (Located HsDocString)) getConDoc l = extendHdkA l $ liftHdkA $ getPrevNextDoc l -- Add documentation comment to a data constructor field. -- Used for PrefixCon and InfixCon. addHaddockConDeclFieldTy :: HsScaled GhcPs (LHsType GhcPs) -> HdkA (HsScaled GhcPs (LHsType GhcPs)) addHaddockConDeclFieldTy (HsScaled mult (L l t)) = extendHdkA (locA l) $ liftHdkA $ do mDoc <- getPrevNextDoc (locA l) return (HsScaled mult (mkLHsDocTy (L l t) mDoc)) -- Add documentation comment to a data constructor field. -- Used for RecCon. addHaddockConDeclField :: LConDeclField GhcPs -> HdkA (LConDeclField GhcPs) addHaddockConDeclField (L l_fld fld) = extendHdkA (locA l_fld) $ liftHdkA $ do cd_fld_doc <- fmap lexLHsDocString <$> getPrevNextDoc (locA l_fld) return (L l_fld (fld { cd_fld_doc })) {- Note [Leading and trailing comments on H98 constructors] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The trailing comment after a constructor declaration is associated with the constructor itself when it is the only comment: data T = MkT A B -- ^ Comment on MkT data T = MkT { x :: A } -- ^ Comment on MkT data T = A `MkT` B -- ^ Comment on MkT When there are other comments, the trailing comment applies to the last field: data T = MkT -- ^ Comment on MkT A -- ^ Comment on A B -- ^ Comment on B data T = MkT { a :: A -- ^ Comment on a , b :: B -- ^ Comment on b , c :: C } -- ^ Comment on c data T = A -- ^ Comment on A `MkT` -- ^ Comment on MkT B -- ^ Comment on B When it comes to the leading comment, there is no such ambiguity in /prefix/ constructor declarations (plain or record syntax): data T = -- | Comment on MkT MkT A B data T = -- | Comment on MkT MkT -- | Comment on A A -- | Comment on B B data T = -- | Comment on MkT MkT { x :: A } data T = -- | Comment on MkT MkT { -- | Comment on a a :: A, -- | Comment on b b :: B, -- | Comment on c c :: C } However, in /infix/ constructor declarations the leading comment is associated with the constructor itself if it is the only comment, and with the first field if there are other comments: data T = -- | Comment on MkT A `MkT` B data T = -- | Comment on A A -- | Comment on MkT `MkT` -- | Comment on B B This makes the leading and trailing comments context-sensitive. Example: data T = -- | comment 1 MkT Int Bool -- ^ comment 2 Here, "comment 2" applies to the Bool field. But if we removed "comment 1", then "comment 2" would be apply to the data constructor rather than its field. All of this applies to H98-style data declarations only. GADTSyntax data constructors don't have any special treatment for the trailing comment. We implement this in two steps: 1. Gather information about available comments using `onlyTrailingOrLeading`. It inspects available comments but does not consume them, and returns a boolean that tells us what algorithm we should use True <=> expect a single leading/trailing comment False <=> expect inner comments or more than one comment 2. Collect the comments using the algorithm determined in the previous step a) `getTrailingLeading`: a single leading/trailing comment is applied to the entire constructor declaration as a whole; see the `con_doc` field b) `getMixed`: comments apply to individual parts of a constructor declaration, including its field types -} instance HasHaddock a => HasHaddock (HsScaled GhcPs a) where addHaddock (HsScaled mult a) = HsScaled mult <$> addHaddock a instance HasHaddock a => HasHaddock (HsWildCardBndrs GhcPs a) where addHaddock (HsWC _ t) = HsWC noExtField <$> addHaddock t instance HasHaddock (LocatedA (HsSigType GhcPs)) where addHaddock (L l (HsSig{sig_bndrs = outer_bndrs, sig_body = body})) = extendHdkA (locA l) $ do case outer_bndrs of HsOuterImplicit{} -> pure () HsOuterExplicit{hso_bndrs = bndrs} -> registerLocHdkA (getLHsTyVarBndrsLoc bndrs) body' <- addHaddock body pure $ L l $ HsSig noExtField outer_bndrs body' -- Process a type, adding documentation comments to function arguments -- and the result. Many formatting styles are supported. -- -- my_function :: -- forall a. -- Eq a => -- Maybe a -> -- ^ Comment on Maybe a (function argument) -- Bool -> -- ^ Comment on Bool (function argument) -- String -- ^ Comment on String (the result) -- -- my_function -- :: forall a. Eq a -- => Maybe a -- ^ Comment on Maybe a (function argument) -- -> Bool -- ^ Comment on Bool (function argument) -- -> String -- ^ Comment on String (the result) -- -- my_function :: -- forall a. Eq a => -- -- | Comment on Maybe a (function argument) -- Maybe a -> -- -- | Comment on Bool (function argument) -- Bool -> -- -- | Comment on String (the result) -- String -- -- This is achieved by simply ignoring (not registering the location of) the -- function arrow (->). instance HasHaddock (LocatedA (HsType GhcPs)) where addHaddock (L l t) = extendHdkA (locA l) $ case t of -- forall a b c. t HsForAllTy x tele body -> do registerLocHdkA (getForAllTeleLoc tele) body' <- addHaddock body pure $ L l (HsForAllTy x tele body') -- (Eq a, Num a) => t HsQualTy x lhs rhs -> do registerHdkA lhs rhs' <- addHaddock rhs pure $ L l (HsQualTy x lhs rhs') -- arg -> res HsFunTy u mult lhs rhs -> do lhs' <- addHaddock lhs rhs' <- addHaddock rhs pure $ L l (HsFunTy u mult lhs' rhs') -- other types _ -> liftHdkA $ do mDoc <- getPrevNextDoc (locA l) return (mkLHsDocTy (L l t) mDoc) {- ********************************************************************* * * * HdkA: a layer over HdkM that propagates location information * * * ********************************************************************* -} -- See Note [Adding Haddock comments to the syntax tree]. -- -- 'HdkA' provides a way to propagate location information from surrounding -- computations: -- -- left_neighbour <*> HdkA inner_span inner_m <*> right_neighbour -- -- Here, the following holds: -- -- * the 'left_neighbour' will only see Haddock comments until 'bufSpanStart' of 'inner_span' -- * the 'right_neighbour' will only see Haddock comments after 'bufSpanEnd' of 'inner_span' -- * the 'inner_m' will only see Haddock comments between its 'left_neighbour' and its 'right_neighbour' -- -- In other words, every computation: -- -- * delimits the surrounding computations -- * is delimited by the surrounding computations -- -- Therefore, a 'HdkA' computation must be always considered in the context in -- which it is used. data HdkA a = HdkA !(Strict.Maybe BufSpan) -- Just b <=> BufSpan occupied by the processed AST element. -- The surrounding computations will not look inside. -- -- Nothing <=> No BufSpan (e.g. when the HdkA is constructed by 'pure' or 'liftHdkA'). -- The surrounding computations are not delimited. !(HdkM a) -- The stateful computation that looks up Haddock comments and -- adds them to the resulting AST node. deriving (Functor) instance Applicative HdkA where HdkA l1 m1 <*> HdkA l2 m2 = HdkA (l1 <> l2) -- The combined BufSpan that covers both subcomputations. -- -- The Semigroup instance for Maybe quite conveniently does the right thing: -- Nothing <> b = b -- a <> Nothing = a -- Just a <> Just b = Just (a <> b) (delim1 m1 <*> delim2 m2) -- Stateful computations are run in left-to-right order, -- without any smart reordering strategy. So users of this -- operation must take care to traverse the AST -- in concrete syntax order. -- See Note [Smart reordering in HdkA (or lack thereof)] -- -- Each computation is delimited ("sandboxed") -- in a way that it doesn't see any Haddock -- comments past the neighbouring AST node. -- These delim1/delim2 are key to how HdkA operates. where -- Delimit the LHS by the location information from the RHS delim1 = inLocRange (locRangeTo (fmap @Strict.Maybe bufSpanStart l2)) -- Delimit the RHS by the location information from the LHS delim2 = inLocRange (locRangeFrom (fmap @Strict.Maybe bufSpanEnd l1)) pure a = -- Return a value without performing any stateful computation, and without -- any delimiting effect on the surrounding computations. liftHdkA (pure a) {- Note [Smart reordering in HdkA (or lack thereof)] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When traversing the AST, the user must take care to traverse it in concrete syntax order. For example, when processing HsFunTy, it's important to get it right and write it like so: HsFunTy _ mult lhs rhs -> do lhs' <- addHaddock lhs rhs' <- addHaddock rhs pure $ L l (HsFunTy noExtField mult lhs' rhs') Rather than like so: HsFunTy _ mult lhs rhs -> do rhs' <- addHaddock rhs -- bad! wrong order lhs' <- addHaddock lhs -- bad! wrong order pure $ L l (HsFunTy noExtField mult lhs' rhs') This is somewhat bug-prone, so we could try to fix this with some Applicative magic. When we define (<*>) for HdkA, why not reorder the computations as necessary? In pseudo-code: a1 <*> a2 | a1 `before` a2 = ... normal processing ... | otherwise = a1 <**> a2 While this trick could work for any two *adjacent* AST elements out of order (as in HsFunTy example above), it would fail in more elaborate scenarios (e.g. processing a list of declarations out of order). If it's not obvious why this trick doesn't work, ponder this: it's a bit like trying to get a sorted list by defining a 'smart' concatenation operator in the following manner: a ?++ b | a <= b = a ++ b | otherwise = b ++ a At first glance it seems to work: ghci> [1] ?++ [2] ?++ [3] [1,2,3] ghci> [2] ?++ [1] ?++ [3] [1,2,3] -- wow, sorted! But it actually doesn't: ghci> [3] ?++ [1] ?++ [2] [1,3,2] -- not sorted... -} -- Run a HdkA computation in an unrestricted LocRange. This is only used at the -- top level to run the final computation for the entire module. runHdkA :: HdkA a -> HdkSt -> (a, HdkSt) runHdkA (HdkA _ m) = unHdkM m mempty -- Let the neighbours know about an item at this location. -- -- Consider this example: -- -- class -- | peculiarly placed comment -- MyClass a where -- my_method :: a -> a -- -- How do we know to reject the "peculiarly placed comment" instead of -- associating it with my_method? Its indentation level matches. -- -- But clearly, there's "MyClass a where" separating the comment and my_method. -- To take it into account, we must register its location using registerLocHdkA -- or registerHdkA. -- -- See Note [Register keyword location]. -- See Note [Adding Haddock comments to the syntax tree]. registerLocHdkA :: SrcSpan -> HdkA () registerLocHdkA l = HdkA (getBufSpan l) (pure ()) -- Let the neighbours know about an item at this location. -- A small wrapper over registerLocHdkA. -- -- See Note [Adding Haddock comments to the syntax tree]. registerHdkA :: GenLocated (EpAnn a) e -> HdkA () registerHdkA a = registerLocHdkA (getLocA a) -- Modify the action of a HdkA computation. hoistHdkA :: (HdkM a -> HdkM b) -> HdkA a -> HdkA b hoistHdkA f (HdkA l m) = HdkA l (f m) -- Lift a HdkM computation to HdkA. liftHdkA :: HdkM a -> HdkA a liftHdkA = HdkA mempty -- Extend the declared location span of a 'HdkA' computation: -- -- left_neighbour <*> extendHdkA l x <*> right_neighbour -- -- The declared location of 'x' now includes 'l', so that the surrounding -- computations 'left_neighbour' and 'right_neighbour' will not look for -- Haddock comments inside the 'l' location span. extendHdkA :: SrcSpan -> HdkA a -> HdkA a extendHdkA l' (HdkA l m) = HdkA (getBufSpan l' <> l) m {- ********************************************************************* * * * HdkM: a stateful computation to associate * * accumulated documentation comments with AST nodes * * * ********************************************************************* -} -- The state of 'HdkM' contains a list of pending Haddock comments. We go -- over the AST, looking up these comments using 'takeHdkComments' and removing -- them from the state. The remaining, un-removed ones are ignored with a -- warning (-Winvalid-haddock). Also, using a state means we never use the same -- Haddock twice. -- -- See Note [Adding Haddock comments to the syntax tree]. newtype HdkM a = HdkM { unHdkM :: LocRange -> HdkSt -> (a, HdkSt) } deriving (Functor, Applicative, Monad) via (ReaderT LocRange (State HdkSt)) -- | The state of HdkM. data HdkSt = HdkSt { hdk_st_pending :: [PsLocated HdkComment] -- a list of pending (unassociated with an AST node) -- Haddock comments, sorted by location: in ascending order of the starting 'BufPos' , hdk_st_warnings :: [HdkWarn] -- accumulated warnings (order doesn't matter) } -- | Warnings accumulated in HdkM. data HdkWarn = HdkWarnInvalidComment (PsLocated HdkComment) | HdkWarnExtraComment (Located HsDocString) -- Restrict the range in which a HdkM computation will look up comments: -- -- inLocRange r1 $ -- inLocRange r2 $ -- takeHdkComments ... -- Only takes comments in the (r1 <> r2) location range. -- -- Note that it does not blindly override the range but tightens it using (<>). -- At many use sites, you will see something along the lines of: -- -- inLocRange (locRangeTo end_pos) $ ... -- -- And 'locRangeTo' defines a location range from the start of the file to -- 'end_pos'. This does not mean that we now search for every comment from the -- start of the file, as this restriction will be combined with other -- restrictions. Somewhere up the callstack we might have: -- -- inLocRange (locRangeFrom start_pos) $ ... -- -- The net result is that the location range is delimited by 'start_pos' on -- one side and by 'end_pos' on the other side. -- -- In 'HdkA', every (<*>) may restrict the location range of its -- subcomputations. inLocRange :: LocRange -> HdkM a -> HdkM a inLocRange r (HdkM m) = HdkM (\r' -> m (r <> r')) -- Take the Haddock comments that satisfy the matching function, -- leaving the rest pending. takeHdkComments :: forall a. (PsLocated HdkComment -> Maybe a) -> HdkM [a] takeHdkComments f = HdkM $ \(LocRange hdk_from hdk_to hdk_col) -> \hdk_st -> let comments = hdk_st_pending hdk_st (comments_before_range, comments') = break (is_after hdk_from) comments (comments_in_range, comments_after_range) = span (is_before hdk_to <&&> is_indented hdk_col) comments' (items, other_comments) = foldr add_comment ([], []) comments_in_range remaining_comments = comments_before_range ++ other_comments ++ comments_after_range hdk_st' = hdk_st{ hdk_st_pending = remaining_comments } in (items, hdk_st') where is_after StartOfFile _ = True is_after (StartLoc l) (L l_comment _) = bufSpanStart (psBufSpan l_comment) >= l is_before EndOfFile _ = True is_before (EndLoc l) (L l_comment _) = bufSpanStart (psBufSpan l_comment) <= l is_indented (ColumnFrom n) (L l_comment _) = srcSpanStartCol (psRealSpan l_comment) >= n add_comment :: PsLocated HdkComment -> ([a], [PsLocated HdkComment]) -> ([a], [PsLocated HdkComment]) add_comment hdk_comment (items, other_hdk_comments) = case f hdk_comment of Just item -> (item : items, other_hdk_comments) Nothing -> (items, hdk_comment : other_hdk_comments) -- Run a HdkM action and restore the original state. peekHdkM :: HdkM a -> HdkM a peekHdkM m = HdkM $ \r s -> case unHdkM m r s of (a, _) -> (a, s) -- Get the docnext or docprev comment for an AST node at the given source span. getPrevNextDoc :: SrcSpan -> HdkM (Maybe (Located HsDocString)) getPrevNextDoc l = do let (l_start, l_end) = (srcSpanStart l, srcSpanEnd l) before_t = locRangeTo (getBufPos l_start) after_t = locRangeFrom (getBufPos l_end) nextDocs <- inLocRange before_t $ takeHdkComments mkDocNext prevDocs <- inLocRange after_t $ takeHdkComments mkDocPrev selectDocString (nextDocs ++ prevDocs) appendHdkWarning :: HdkWarn -> HdkM () appendHdkWarning e = HdkM $ \_ hdk_st -> let hdk_st' = hdk_st { hdk_st_warnings = e : hdk_st_warnings hdk_st } in ((), hdk_st') selectDocString :: [Located HsDocString] -> HdkM (Maybe (Located HsDocString)) selectDocString = select . filterOut (isEmptyDocString . unLoc) where select [] = return Nothing select [doc] = return (Just doc) select (doc : extra_docs) = do reportExtraDocs extra_docs return (Just doc) reportExtraDocs :: [Located HsDocString] -> HdkM () reportExtraDocs = traverse_ (\extra_doc -> appendHdkWarning (HdkWarnExtraComment extra_doc)) {- ********************************************************************* * * * Matching functions for extracting documentation comments * * * ********************************************************************* -} mkDocHsDecl :: EpLayout -> PsLocated HdkComment -> Maybe (LHsDecl GhcPs) mkDocHsDecl layout a = fmap (DocD noExtField) <$> mkDocDecl layout a mkDocDecl :: EpLayout -> PsLocated HdkComment -> Maybe (LDocDecl GhcPs) mkDocDecl layout (L l_comment hdk_comment) | indent_mismatch = Nothing | otherwise = Just $ L (noAnnSrcSpan span) $ case hdk_comment of HdkCommentNext doc -> DocCommentNext (L span $ lexHsDocString doc) HdkCommentPrev doc -> DocCommentPrev (L span $ lexHsDocString doc) HdkCommentNamed s doc -> DocCommentNamed s (L span $ lexHsDocString doc) HdkCommentSection n doc -> DocGroup n (L span $ lexHsDocString doc) where span = mkSrcSpanPs l_comment -- 'indent_mismatch' checks if the documentation comment has the exact -- indentation level expected by the parent node. -- -- For example, when extracting documentation comments between class -- method declarations, there are three cases to consider: -- -- 1. Indent matches (indent_mismatch=False): -- class C a where -- f :: a -> a -- -- ^ doc on f -- -- 2. Indented too much (indent_mismatch=True): -- class C a where -- f :: a -> a -- -- ^ indent mismatch -- -- 3. Indented too little (indent_mismatch=True): -- class C a where -- f :: a -> a -- -- ^ indent mismatch indent_mismatch = case layout of EpNoLayout -> False EpExplicitBraces{} -> False EpVirtualBraces n -> n /= srcSpanStartCol (psRealSpan l_comment) mkDocIE :: PsLocated HdkComment -> Maybe (LIE GhcPs) mkDocIE (L l_comment hdk_comment) = case hdk_comment of HdkCommentSection n doc -> Just $ L l (IEGroup noExtField n $ L span $ lexHsDocString doc) HdkCommentNamed s _doc -> Just $ L l (IEDocNamed noExtField s) HdkCommentNext doc -> Just $ L l (IEDoc noExtField $ L span $ lexHsDocString doc) HdkCommentPrev doc -> Just $ L l (IEDoc noExtField $ L span $ lexHsDocString doc) where l = noAnnSrcSpan span span = mkSrcSpanPs l_comment mkDocNext :: PsLocated HdkComment -> Maybe (Located HsDocString) mkDocNext (L l (HdkCommentNext doc)) = Just (L (mkSrcSpanPs l) doc) mkDocNext _ = Nothing mkDocPrev :: PsLocated HdkComment -> Maybe (Located HsDocString) mkDocPrev (L l (HdkCommentPrev doc)) = Just (L (mkSrcSpanPs l) doc) mkDocPrev _ = Nothing {- ********************************************************************* * * * LocRange: a location range * * * ********************************************************************* -} -- A location range for extracting documentation comments. data LocRange = LocRange { loc_range_from :: !LowerLocBound, loc_range_to :: !UpperLocBound, loc_range_col :: !ColumnBound } instance Semigroup LocRange where LocRange from1 to1 col1 <> LocRange from2 to2 col2 = LocRange (from1 <> from2) (to1 <> to2) (col1 <> col2) instance Monoid LocRange where mempty = LocRange mempty mempty mempty -- The location range from the specified position to the end of the file. locRangeFrom :: Strict.Maybe BufPos -> LocRange locRangeFrom (Strict.Just l) = mempty { loc_range_from = StartLoc l } locRangeFrom Strict.Nothing = mempty -- The location range from the start of the file to the specified position. locRangeTo :: Strict.Maybe BufPos -> LocRange locRangeTo (Strict.Just l) = mempty { loc_range_to = EndLoc l } locRangeTo Strict.Nothing = mempty -- The location range within the specified span. locRangeIn :: Strict.Maybe BufSpan -> LocRange locRangeIn (Strict.Just l) = mempty { loc_range_from = StartLoc (bufSpanStart l) , loc_range_to = EndLoc (bufSpanEnd l) } locRangeIn Strict.Nothing = mempty -- Represents a predicate on BufPos: -- -- LowerLocBound | BufPos -> Bool -- --------------+----------------- -- StartOfFile | const True -- StartLoc p | (>= p) -- -- The semigroup instance corresponds to (&&). -- -- We don't use the BufPos -> Bool representation -- as it would lead to redundant checks. -- -- That is, instead of -- -- (pos >= 20) && (pos >= 30) && (pos >= 40) -- -- We'd rather only do the (>=40) check. So we reify the predicate to make -- sure we only check for the most restrictive bound. data LowerLocBound = StartOfFile | StartLoc !BufPos deriving Show instance Semigroup LowerLocBound where StartOfFile <> l = l l <> StartOfFile = l StartLoc l1 <> StartLoc l2 = StartLoc (max l1 l2) instance Monoid LowerLocBound where mempty = StartOfFile -- Represents a predicate on BufPos: -- -- UpperLocBound | BufPos -> Bool -- --------------+----------------- -- EndOfFile | const True -- EndLoc p | (<= p) -- -- The semigroup instance corresponds to (&&). -- -- We don't use the BufPos -> Bool representation -- as it would lead to redundant checks. -- -- That is, instead of -- -- (pos <= 40) && (pos <= 30) && (pos <= 20) -- -- We'd rather only do the (<=20) check. So we reify the predicate to make -- sure we only check for the most restrictive bound. data UpperLocBound = EndOfFile | EndLoc !BufPos deriving Show instance Semigroup UpperLocBound where EndOfFile <> l = l l <> EndOfFile = l EndLoc l1 <> EndLoc l2 = EndLoc (min l1 l2) instance Monoid UpperLocBound where mempty = EndOfFile -- | Represents a predicate on the column number. -- -- ColumnBound | Int -> Bool -- --------------+----------------- -- ColumnFrom n | (>=n) -- -- The semigroup instance corresponds to (&&). -- newtype ColumnBound = ColumnFrom Int -- n >= GHC.Types.SrcLoc.leftmostColumn deriving Show instance Semigroup ColumnBound where ColumnFrom n <> ColumnFrom m = ColumnFrom (max n m) instance Monoid ColumnBound where mempty = ColumnFrom leftmostColumn {- ********************************************************************* * * * AST manipulation utilities * * * ********************************************************************* -} mkLHsDocTy :: LHsType GhcPs -> Maybe (Located HsDocString) -> LHsType GhcPs mkLHsDocTy t Nothing = t mkLHsDocTy t (Just doc) = L (getLoc t) (HsDocTy noExtField t $ lexLHsDocString doc) getForAllTeleLoc :: HsForAllTelescope GhcPs -> SrcSpan getForAllTeleLoc tele = case tele of HsForAllVis{ hsf_vis_bndrs } -> getLHsTyVarBndrsLoc hsf_vis_bndrs HsForAllInvis { hsf_invis_bndrs } -> getLHsTyVarBndrsLoc hsf_invis_bndrs getLHsTyVarBndrsLoc :: [LHsTyVarBndr flag GhcPs] -> SrcSpan getLHsTyVarBndrsLoc bndrs = foldr combineSrcSpans noSrcSpan $ map getLocA bndrs -- | The inverse of 'partitionBindsAndSigs' that merges partitioned items back -- into a flat list. Elements are put back into the order in which they -- appeared in the original program before partitioning, using BufPos to order -- them. -- -- Precondition (unchecked): the input lists are already sorted. flattenBindsAndSigs :: (LHsBinds GhcPs, [LSig GhcPs], [LFamilyDecl GhcPs], [LTyFamInstDecl GhcPs], [LDataFamInstDecl GhcPs], [LDocDecl GhcPs]) -> [LHsDecl GhcPs] flattenBindsAndSigs (all_bs, all_ss, all_ts, all_tfis, all_dfis, all_docs) = -- 'cmpBufSpan' is safe here with the following assumptions: -- -- - 'LHsDecl' produced by 'decl_cls' in Parser.y always have a 'BufSpan' -- - 'partitionBindsAndSigs' does not discard this 'BufSpan' mergeListsBy cmpBufSpanA [ mapLL (\b -> ValD noExtField b) all_bs, mapLL (\s -> SigD noExtField s) all_ss, mapLL (\t -> TyClD noExtField (FamDecl noExtField t)) all_ts, mapLL (\tfi -> InstD noExtField (TyFamInstD noExtField tfi)) all_tfis, mapLL (\dfi -> InstD noExtField (DataFamInstD noExtField dfi)) all_dfis, mapLL (\d -> DocD noExtField d) all_docs ] cmpBufSpanA :: GenLocated (EpAnn a1) a2 -> GenLocated (EpAnn a3) a2 -> Ordering cmpBufSpanA (L la a) (L lb b) = cmpBufSpan (L (locA la) a) (L (locA lb) b) {- ********************************************************************* * * * General purpose utilities * * * ********************************************************************* -} -- Map a function over a list of located items. mapLL :: (a -> b) -> [GenLocated l a] -> [GenLocated l b] mapLL f = map (fmap f) {- Note [Old solution: Haddock in the grammar] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In the past, Haddock comments were incorporated into the grammar (Parser.y). This led to excessive complexity and duplication. For example, here's the grammar production for types without documentation: type : btype | btype '->' ctype To support Haddock, we had to also maintain an additional grammar production for types with documentation on function arguments and function result: typedoc : btype | btype docprev | docnext btype | btype '->' ctypedoc | btype docprev '->' ctypedoc | docnext btype '->' ctypedoc Sometimes handling documentation comments during parsing led to bugs (#17561), and sometimes it simply made it hard to modify and extend the grammar. Another issue was that sometimes Haddock would fail to parse code that GHC could parse successfully: class BadIndent where f :: a -> Int -- ^ comment g :: a -> Int This declaration was accepted by ghc but rejected by ghc -haddock. -} {- Note [Register keyword location] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ At the moment, 'addHaddock' erroneously associates some comments with constructs that are separated by a keyword. For example: data Foo -- | Comment for MkFoo where MkFoo :: Foo We could use EPA (exactprint annotations) to fix this, but not without modification. For example, EpaLocation contains RealSrcSpan but not BufSpan. Also, the fix would be more straightforward after #19623. For examples, see tests/haddock/should_compile_flag_haddock/T17544_kw.hs -} ghc-lib-parser-9.12.2.20250421/compiler/GHC/Parser/String.hs0000644000000000000000000003171607346545000020754 0ustar0000000000000000{-# LANGUAGE LambdaCase #-} {-# LANGUAGE PatternSynonyms #-} {-# LANGUAGE TupleSections #-} {-# LANGUAGE ViewPatterns #-} module GHC.Parser.String ( StringLexError (..), lexString, lexMultilineString, -- * Unicode smart quote helpers isDoubleSmartQuote, isSingleSmartQuote, ) where import GHC.Prelude hiding (getChar) import Control.Arrow ((>>>)) import Control.Monad (when) import Data.Char (chr, ord) import qualified Data.Foldable1 as Foldable1 import qualified Data.List.NonEmpty as NonEmpty import Data.Maybe (listToMaybe, mapMaybe) import GHC.Data.StringBuffer (StringBuffer) import qualified GHC.Data.StringBuffer as StringBuffer import GHC.Parser.CharClass ( hexDigit, is_decdigit, is_hexdigit, is_octdigit, is_space, octDecDigit, ) import GHC.Parser.Errors.Types (LexErr (..)) import GHC.Utils.Panic (panic) type BufPos = Int data StringLexError = StringLexError LexErr BufPos lexString :: Int -> StringBuffer -> Either StringLexError String lexString = lexStringWith processChars processChars where processChars :: HasChar c => [c] -> Either (c, LexErr) [c] processChars = collapseGaps >>> resolveEscapes -- ----------------------------------------------------------------------------- -- Lexing interface {- Note [Lexing strings] ~~~~~~~~~~~~~~~~~~~~~ After verifying if a string is lexically valid with Alex, we still need to do some post processing of the string, namely: 1. Collapse string gaps 2. Resolve escape characters The problem: 'lexemeToString' is more performant than manually reading characters from the StringBuffer. However, that completely erases the position of each character, which we need in order to report the correct position for error messages (e.g. when resolving escape characters). So what we'll do is do two passes. The first pass is optimistic; just convert to a plain String and process it. If this results in an error, we do a second pass, this time where each character is annotated with its position. Now, the error has all the information it needs. Ideally, lexStringWith would take a single (forall c. HasChar c => ...) function, but to help the specializer, we pass it in twice to concretize it for the two types we actually use. -} -- | See Note [Lexing strings] lexStringWith :: ([Char] -> Either (Char, LexErr) [Char]) -> ([CharPos] -> Either (CharPos, LexErr) [CharPos]) -> Int -> StringBuffer -> Either StringLexError String lexStringWith processChars processCharsPos len buf = case processChars $ bufferChars buf len of Right s -> Right s Left _ -> case processCharsPos $ bufferLocatedChars buf len of Right _ -> panic "expected lex error on second pass" Left ((_, pos), e) -> Left $ StringLexError e pos class HasChar c where getChar :: c -> Char setChar :: Char -> c -> c instance HasChar Char where getChar = id setChar = const instance HasChar (Char, x) where getChar = fst setChar c (_, x) = (c, x) pattern Char :: HasChar c => Char -> c pattern Char c <- (getChar -> c) {-# COMPLETE Char #-} bufferChars :: StringBuffer -> Int -> [Char] bufferChars = StringBuffer.lexemeToString type CharPos = (Char, BufPos) bufferLocatedChars :: StringBuffer -> Int -> [CharPos] bufferLocatedChars initialBuf len = go initialBuf where go buf | atEnd buf = [] | otherwise = let (c, buf') = StringBuffer.nextChar buf in (c, StringBuffer.cur buf) : go buf' atEnd buf = StringBuffer.byteDiff initialBuf buf >= len -- ----------------------------------------------------------------------------- -- Lexing phases collapseGaps :: HasChar c => [c] -> [c] collapseGaps = go where go = \case c1@(Char '\\') : c2@(Char c) : cs | is_space c -> go $ dropGap cs | otherwise -> c1 : c2 : go cs c : cs -> c : go cs [] -> [] dropGap = \case Char '\\' : cs -> cs _ : cs -> dropGap cs [] -> panic "gap unexpectedly ended" resolveEscapes :: HasChar c => [c] -> Either (c, LexErr) [c] resolveEscapes = go dlistEmpty where go !acc = \case [] -> pure $ dlistToList acc Char '\\' : Char '&' : cs -> go acc cs backslash@(Char '\\') : cs -> case resolveEscapeChar cs of Right (esc, cs') -> go (acc `dlistSnoc` setChar esc backslash) cs' Left (c, e) -> Left (c, e) c : cs -> go (acc `dlistSnoc` c) cs -- ----------------------------------------------------------------------------- -- Escape characters -- | Resolve a escape character, after having just lexed a backslash. -- Assumes escape character is valid. resolveEscapeChar :: HasChar c => [c] -> Either (c, LexErr) (Char, [c]) resolveEscapeChar = \case Char 'a' : cs -> pure ('\a', cs) Char 'b' : cs -> pure ('\b', cs) Char 'f' : cs -> pure ('\f', cs) Char 'n' : cs -> pure ('\n', cs) Char 'r' : cs -> pure ('\r', cs) Char 't' : cs -> pure ('\t', cs) Char 'v' : cs -> pure ('\v', cs) Char '\\' : cs -> pure ('\\', cs) Char '"' : cs -> pure ('\"', cs) Char '\'' : cs -> pure ('\'', cs) -- escape codes Char 'x' : cs -> parseNum is_hexdigit 16 hexDigit cs Char 'o' : cs -> parseNum is_octdigit 8 octDecDigit cs cs@(Char c : _) | is_decdigit c -> parseNum is_decdigit 10 octDecDigit cs -- control characters (e.g. '\^M') Char '^' : Char c : cs -> pure (chr $ ord c - ord '@', cs) -- long form escapes (e.g. '\NUL') cs | Just (esc, cs') <- parseLongEscape cs -> pure (esc, cs') -- shouldn't happen Char c : _ -> panic $ "found unexpected escape character: " ++ show c [] -> panic "escape character unexpectedly ended" where parseNum isDigit base toDigit = let go x = \case ch@(Char c) : cs | isDigit c -> do let x' = x * base + toDigit c when (x' > 0x10ffff) $ Left (ch, LexNumEscapeRange) go x' cs cs -> pure (chr x, cs) in go 0 parseLongEscape :: HasChar c => [c] -> Maybe (Char, [c]) parseLongEscape cs = listToMaybe (mapMaybe tryParse longEscapeCodes) where tryParse (code, esc) = case splitAt (length code) cs of (pre, cs') | map getChar pre == code -> Just (esc, cs') _ -> Nothing longEscapeCodes = [ ("NUL", '\NUL') , ("SOH", '\SOH') , ("STX", '\STX') , ("ETX", '\ETX') , ("EOT", '\EOT') , ("ENQ", '\ENQ') , ("ACK", '\ACK') , ("BEL", '\BEL') , ("BS" , '\BS' ) , ("HT" , '\HT' ) , ("LF" , '\LF' ) , ("VT" , '\VT' ) , ("FF" , '\FF' ) , ("CR" , '\CR' ) , ("SO" , '\SO' ) , ("SI" , '\SI' ) , ("DLE", '\DLE') , ("DC1", '\DC1') , ("DC2", '\DC2') , ("DC3", '\DC3') , ("DC4", '\DC4') , ("NAK", '\NAK') , ("SYN", '\SYN') , ("ETB", '\ETB') , ("CAN", '\CAN') , ("EM" , '\EM' ) , ("SUB", '\SUB') , ("ESC", '\ESC') , ("FS" , '\FS' ) , ("GS" , '\GS' ) , ("RS" , '\RS' ) , ("US" , '\US' ) , ("SP" , '\SP' ) , ("DEL", '\DEL') ] -- ----------------------------------------------------------------------------- -- Unicode Smart Quote detection (#21843) isDoubleSmartQuote :: Char -> Bool isDoubleSmartQuote = \case '“' -> True '”' -> True _ -> False isSingleSmartQuote :: Char -> Bool isSingleSmartQuote = \case '‘' -> True '’' -> True _ -> False -- ----------------------------------------------------------------------------- -- Multiline strings -- | See Note [Multiline string literals] -- -- Assumes string is lexically valid. Skips the steps about splitting -- and rejoining lines, and instead manually find newline characters, -- for performance. lexMultilineString :: Int -> StringBuffer -> Either StringLexError String lexMultilineString = lexStringWith processChars processChars where processChars :: HasChar c => [c] -> Either (c, LexErr) [c] processChars = collapseGaps -- Step 1 >>> normalizeEOL >>> expandLeadingTabs -- Step 3 >>> rmCommonWhitespacePrefix -- Step 4 >>> collapseOnlyWsLines -- Step 5 >>> rmFirstNewline -- Step 7a >>> rmLastNewline -- Step 7b >>> resolveEscapes -- Step 8 -- Normalize line endings to LF. The spec dictates that lines should be -- split on newline characters and rejoined with ``\n``. But because we -- aren't actually splitting/rejoining, we'll manually normalize here normalizeEOL :: HasChar c => [c] -> [c] normalizeEOL = let go = \case Char '\r' : c@(Char '\n') : cs -> c : go cs c@(Char '\r') : cs -> setChar '\n' c : go cs c@(Char '\f') : cs -> setChar '\n' c : go cs c : cs -> c : go cs [] -> [] in go -- expands all tabs, since the lexer will verify that tabs can only appear -- as leading indentation expandLeadingTabs :: HasChar c => [c] -> [c] expandLeadingTabs = let go !col = \case c@(Char '\t') : cs -> let fill = 8 - (col `mod` 8) in replicate fill (setChar ' ' c) ++ go (col + fill) cs c : cs -> c : go (if getChar c == '\n' then 0 else col + 1) cs [] -> [] in go 0 rmCommonWhitespacePrefix :: HasChar c => [c] -> [c] rmCommonWhitespacePrefix cs0 = let commonWSPrefix = getCommonWsPrefix (map getChar cs0) go = \case c@(Char '\n') : cs -> c : go (dropLine commonWSPrefix cs) c : cs -> c : go cs [] -> [] -- drop x characters from the string, or up to a newline, whichever -- comes first dropLine !x = \case cs | x <= 0 -> cs cs@(Char '\n' : _) -> cs _ : cs -> dropLine (x - 1) cs [] -> [] in go cs0 collapseOnlyWsLines :: HasChar c => [c] -> [c] collapseOnlyWsLines = let go = \case c@(Char '\n') : cs | Just cs' <- checkAllWs cs -> c : go cs' c : cs -> c : go cs [] -> [] checkAllWs = \case -- got all the way to a newline or the end of the string, return cs@(Char '\n' : _) -> Just cs cs@[] -> Just cs -- found whitespace, continue Char c : cs | is_space c -> checkAllWs cs -- anything else, stop _ -> Nothing in go rmFirstNewline :: HasChar c => [c] -> [c] rmFirstNewline = \case Char '\n' : cs -> cs cs -> cs rmLastNewline :: HasChar c => [c] -> [c] rmLastNewline = let go = \case [] -> [] [Char '\n'] -> [] c : cs -> c : go cs in go -- | See step 4 in Note [Multiline string literals] -- -- Assumes tabs have already been expanded. getCommonWsPrefix :: String -> Int getCommonWsPrefix s = case NonEmpty.nonEmpty includedLines of Nothing -> 0 Just ls -> Foldable1.minimum $ NonEmpty.map (length . takeWhile is_space) ls where includedLines = filter (not . all is_space) -- ignore whitespace-only lines . drop 1 -- ignore first line in calculation $ lines s {- Note [Multiline string literals] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Multiline string literals were added following the acceptance of the proposal: https://github.com/ghc-proposals/ghc-proposals/pull/569 Multiline string literals are syntax sugar for normal string literals, with an extra post processing step. This all happens in the Lexer; that is, HsMultilineString will contain the post-processed string. This matches the same behavior as HsString, which contains the normalized string (see Note [Literal source text]). The canonical steps for post processing a multiline string are: 1. Collapse string gaps 2. Split the string by newlines 3. Convert leading tabs into spaces * In each line, any tabs preceding non-whitespace characters are replaced with spaces up to the next tab stop 4. Remove common whitespace prefix in every line except the first (see below) 5. If a line contains only whitespace, remove all of the whitespace 6. Join the string back with `\n` delimiters 7a. If the first character of the string is a newline, remove it 7b. If the last character of the string is a newline, remove it 8. Interpret escaped characters The common whitespace prefix can be informally defined as "The longest prefix of whitespace shared by all lines in the string, excluding the first line and any whitespace-only lines". It's more precisely defined with the following algorithm: 1. Take a list representing the lines in the string 2. Ignore the following elements in the list: * The first line (we want to ignore everything before the first newline) * Empty lines * Lines with only whitespace characters 3. Calculate the longest prefix of whitespace shared by all lines in the remaining list -} -- ----------------------------------------------------------------------------- -- DList newtype DList a = DList ([a] -> [a]) dlistEmpty :: DList a dlistEmpty = DList id dlistToList :: DList a -> [a] dlistToList (DList f) = f [] dlistSnoc :: DList a -> a -> DList a dlistSnoc (DList f) x = DList (f . (x :)) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Parser/Types.hs0000644000000000000000000001005307346545000020601 0ustar0000000000000000{-# LANGUAGE TypeFamilies #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE DataKinds #-} module GHC.Parser.Types ( SumOrTuple(..) , pprSumOrTuple , PatBuilder(..) , DataConBuilder(..) ) where import GHC.Prelude import GHC.Types.Basic import GHC.Types.SrcLoc import GHC.Types.Name.Reader import GHC.Hs.Extension import GHC.Hs.Lit import GHC.Hs.Pat import GHC.Hs.Type import GHC.Utils.Outputable as Outputable import GHC.Data.OrdList import Data.Foldable import GHC.Parser.Annotation import Language.Haskell.Syntax data SumOrTuple b = Sum ConTag Arity (LocatedA b) [EpToken "|"] [EpToken "|"] -- ^ Last two are the locations of the '|' before and after the payload | Tuple [Either (EpAnn Bool) (LocatedA b)] pprSumOrTuple :: Outputable b => Boxity -> SumOrTuple b -> SDoc pprSumOrTuple boxity = \case Sum alt arity e _ _ -> parOpen <+> ppr_bars (alt - 1) <+> ppr e <+> ppr_bars (arity - alt) <+> parClose Tuple xs -> parOpen <> (fcat . punctuate comma $ map ppr_tup xs) <> parClose where ppr_tup (Left _) = empty ppr_tup (Right e) = ppr e ppr_bars n = hsep (replicate n (Outputable.char '|')) (parOpen, parClose) = case boxity of Boxed -> (text "(", text ")") Unboxed -> (text "(#", text "#)") -- | See Note [Ambiguous syntactic categories] and Note [PatBuilder] data PatBuilder p = PatBuilderPat (Pat p) | PatBuilderPar (EpToken "(") (LocatedA (PatBuilder p)) (EpToken ")") | PatBuilderApp (LocatedA (PatBuilder p)) (LocatedA (PatBuilder p)) | PatBuilderAppType (LocatedA (PatBuilder p)) (EpToken "@") (HsTyPat GhcPs) | PatBuilderOpApp (LocatedA (PatBuilder p)) (LocatedN RdrName) (LocatedA (PatBuilder p)) ([EpToken "("], [EpToken ")"]) | PatBuilderVar (LocatedN RdrName) | PatBuilderOverLit (HsOverLit GhcPs) -- These instances are here so that they are not orphans type instance Anno (GRHS GhcPs (LocatedA (PatBuilder GhcPs))) = EpAnnCO type instance Anno [LocatedA (Match GhcPs (LocatedA (PatBuilder GhcPs)))] = SrcSpanAnnLW type instance Anno (Match GhcPs (LocatedA (PatBuilder GhcPs))) = SrcSpanAnnA type instance Anno (StmtLR GhcPs GhcPs (LocatedA (PatBuilder GhcPs))) = SrcSpanAnnA instance Outputable (PatBuilder GhcPs) where ppr (PatBuilderPat p) = ppr p ppr (PatBuilderPar _ (L _ p) _) = parens (ppr p) ppr (PatBuilderApp (L _ p1) (L _ p2)) = ppr p1 <+> ppr p2 ppr (PatBuilderAppType (L _ p) _ t) = ppr p <+> text "@" <> ppr t ppr (PatBuilderOpApp (L _ p1) op (L _ p2) _) = ppr p1 <+> ppr op <+> ppr p2 ppr (PatBuilderVar v) = ppr v ppr (PatBuilderOverLit l) = ppr l -- | An accumulator to build a prefix data constructor, -- e.g. when parsing @MkT A B C@, the accumulator will evolve as follows: -- -- @ -- 1. PrefixDataConBuilder [] MkT -- 2. PrefixDataConBuilder [A] MkT -- 3. PrefixDataConBuilder [A, B] MkT -- 4. PrefixDataConBuilder [A, B, C] MkT -- @ -- -- There are two reasons we have a separate builder type instead of using -- @HsConDeclDetails GhcPs@ directly: -- -- 1. It's faster, because 'OrdList' gives us constant-time snoc. -- 2. Having a separate type helps ensure that we don't forget to finalize a -- 'RecTy' into a 'RecCon' (we do that in 'dataConBuilderDetails'). -- -- See Note [PatBuilder] for another builder type used in the parser. -- Here the technique is similar, but the motivation is different. data DataConBuilder = PrefixDataConBuilder (OrdList (LHsType GhcPs)) -- Data constructor fields (LocatedN RdrName) -- Data constructor name | InfixDataConBuilder (LHsType GhcPs) -- LHS field (LocatedN RdrName) -- Data constructor name (LHsType GhcPs) -- RHS field instance Outputable DataConBuilder where ppr (PrefixDataConBuilder flds data_con) = hang (ppr data_con) 2 (sep (map ppr (toList flds))) ppr (InfixDataConBuilder lhs data_con rhs) = ppr lhs <+> ppr data_con <+> ppr rhs type instance Anno [LocatedA (StmtLR GhcPs GhcPs (LocatedA (PatBuilder GhcPs)))] = SrcSpanAnnLW ghc-lib-parser-9.12.2.20250421/compiler/GHC/Platform.hs0000644000000000000000000003030107346545000020023 0ustar0000000000000000{-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE LambdaCase #-} -- | Platform description module GHC.Platform ( Platform (..) , PlatformWordSize(..) , platformArch , platformOS , ArchOS(..) , Arch(..) , OS(..) , ArmISA(..) , ArmISAExt(..) , ArmABI(..) , PPC_64ABI(..) , ByteOrder(..) , target32Bit , isARM , isPPC , osElfTarget , osMachOTarget , osSubsectionsViaSymbols , platformUsesFrameworks , platformWordSizeInBytes , platformWordSizeInBits , platformMinInt , platformMaxInt , platformMaxWord , platformInIntRange , platformInWordRange , platformCConvNeedsExtension , platformHasRTSLinker , PlatformMisc(..) , SseVersion (..) , BmiVersion (..) , wordAlignment -- * SSE and AVX , isSseEnabled , isSse2Enabled -- * Platform constants , PlatformConstants(..) , lookupPlatformConstants , platformConstants -- * Shared libraries , platformSOName , platformHsSOName , platformSOExt , genericPlatform ) where import Prelude -- See Note [Why do we import Prelude here?] import GHC.Read import GHC.ByteOrder (ByteOrder(..)) import GHC.Platform.Constants import GHC.Platform.ArchOS import GHC.Types.Basic (Alignment, alignmentOf) import GHC.Utils.Panic.Plain import Data.Word import Data.Int import System.FilePath import System.Directory -- | Platform description -- -- This is used to describe platforms so that we can generate code for them. data Platform = Platform { platformArchOS :: !ArchOS -- ^ Architecture and OS , platformWordSize :: !PlatformWordSize -- ^ Word size , platformByteOrder :: !ByteOrder -- ^ Byte order (endianness) , platformUnregisterised :: !Bool , platformHasGnuNonexecStack :: !Bool , platformHasIdentDirective :: !Bool , platformHasSubsectionsViaSymbols :: !Bool -- ^ Enable Darwin .subsections_via_symbols directive -- -- See Note [Subsections Via Symbols] in GHC.CmmToAsm.X86.Ppr , platformIsCrossCompiling :: !Bool , platformLeadingUnderscore :: !Bool -- ^ Symbols need underscore prefix , platformTablesNextToCode :: !Bool -- ^ Determines whether we will be compiling info tables that reside just -- before the entry code, or with an indirection to the entry code. See -- TABLES_NEXT_TO_CODE in @rts/include/rts/storage/InfoTables.h@. , platformHasLibm :: !Bool -- ^ Some platforms require that we explicitly link against @libm@ if any -- math-y things are used (which we assume to include all programs). See -- #14022. , platform_constants :: !(Maybe PlatformConstants) -- ^ Constants such as structure offsets, type sizes, etc. } deriving (Read, Show, Eq, Ord) wordAlignment :: Platform -> Alignment wordAlignment platform = alignmentOf (platformWordSizeInBytes platform) -- ----------------------------------------------------------------------------- -- SSE and AVX -- TODO: Instead of using a separate predicate (i.e. isSse2Enabled) to -- check if SSE is enabled, we might have x86-64 imply the -msse2 -- flag. isSseEnabled :: Platform -> Bool isSseEnabled platform = case platformArch platform of ArchX86_64 -> True ArchX86 -> True _ -> False isSse2Enabled :: Platform -> Bool isSse2Enabled platform = case platformArch platform of -- We assume SSE1 and SSE2 operations are available on both -- x86 and x86_64. Historically we didn't default to SSE2 and -- SSE1 on x86, which results in defacto nondeterminism for how -- rounding behaves in the associated x87 floating point instructions -- because variations in the spill/fpu stack placement of arguments for -- operations would change the precision and final result of what -- would otherwise be the same expressions with respect to single or -- double precision IEEE floating point computations. ArchX86_64 -> True ArchX86 -> True _ -> False -- ----------------------------------------------------------------------------- -- Platform Constants platformConstants :: Platform -> PlatformConstants platformConstants platform = case platform_constants platform of Nothing -> panic "Platform constants not available!" Just c -> c genericPlatform :: Platform genericPlatform = Platform { platformArchOS = ArchOS ArchX86_64 OSLinux , platformWordSize = PW8 , platformByteOrder = LittleEndian , platformUnregisterised = False , platformHasGnuNonexecStack = False , platformHasIdentDirective = False , platformHasSubsectionsViaSymbols= False , platformHasLibm = False , platformIsCrossCompiling = False , platformLeadingUnderscore = False , platformTablesNextToCode = True , platform_constants = Nothing } data PlatformWordSize = PW4 -- ^ A 32-bit platform | PW8 -- ^ A 64-bit platform deriving (Eq, Ord) instance Show PlatformWordSize where show PW4 = "4" show PW8 = "8" instance Read PlatformWordSize where readPrec = do i :: Int <- readPrec case i of 4 -> return PW4 8 -> return PW8 other -> fail ("Invalid PlatformWordSize: " ++ show other) platformWordSizeInBytes :: Platform -> Int platformWordSizeInBytes p = case platformWordSize p of PW4 -> 4 PW8 -> 8 platformWordSizeInBits :: Platform -> Int platformWordSizeInBits p = platformWordSizeInBytes p * 8 -- | Platform architecture platformArch :: Platform -> Arch platformArch platform = case platformArchOS platform of ArchOS arch _ -> arch -- | Platform OS platformOS :: Platform -> OS platformOS platform = case platformArchOS platform of ArchOS _ os -> os -- | This predicate tells us whether the platform is 32-bit. target32Bit :: Platform -> Bool target32Bit p = case platformWordSize p of PW4 -> True PW8 -> False osUsesFrameworks :: OS -> Bool osUsesFrameworks OSDarwin = True osUsesFrameworks _ = False platformUsesFrameworks :: Platform -> Bool platformUsesFrameworks = osUsesFrameworks . platformOS osSubsectionsViaSymbols :: OS -> Bool osSubsectionsViaSymbols OSDarwin = True osSubsectionsViaSymbols _ = False -- | Minimum representable Int value for the given platform platformMinInt :: Platform -> Integer platformMinInt p = case platformWordSize p of PW4 -> toInteger (minBound :: Int32) PW8 -> toInteger (minBound :: Int64) -- | Maximum representable Int value for the given platform platformMaxInt :: Platform -> Integer platformMaxInt p = case platformWordSize p of PW4 -> toInteger (maxBound :: Int32) PW8 -> toInteger (maxBound :: Int64) -- | Maximum representable Word value for the given platform platformMaxWord :: Platform -> Integer platformMaxWord p = case platformWordSize p of PW4 -> toInteger (maxBound :: Word32) PW8 -> toInteger (maxBound :: Word64) -- | Test if the given Integer is representable with a platform Int platformInIntRange :: Platform -> Integer -> Bool platformInIntRange platform x = x >= platformMinInt platform && x <= platformMaxInt platform -- | Test if the given Integer is representable with a platform Word platformInWordRange :: Platform -> Integer -> Bool platformInWordRange platform x = x >= 0 && x <= platformMaxWord platform -- | For some architectures the C calling convention is that any -- integer shorter than 64 bits is replaced by its 64 bits -- representation using sign or zero extension. platformCConvNeedsExtension :: Platform -> Bool platformCConvNeedsExtension platform = case platformArch platform of ArchPPC_64 _ -> True ArchS390X -> True ArchRISCV64 -> True ArchLoongArch64 -> True ArchAArch64 -- Apple's AArch64 ABI requires that the caller sign-extend -- small integer arguments. See -- https://developer.apple.com/documentation/xcode/writing-arm64-code-for-apple-platforms | OSDarwin <- platformOS platform -> True _ -> False -- | Does this platform have an RTS linker? platformHasRTSLinker :: Platform -> Bool -- Note that we've inlined this logic in hadrian's -- Settings.Builders.RunTest.inTreeCompilerArgs. -- If you change this, be sure to change it too platformHasRTSLinker p = case archOS_arch (platformArchOS p) of ArchPPC -> False -- powerpc ArchPPC_64 ELF_V1 -> False -- powerpc64 ArchPPC_64 ELF_V2 -> False -- powerpc64le ArchS390X -> False ArchLoongArch64 -> False ArchJavaScript -> False _ -> True -------------------------------------------------- -- Instruction sets -------------------------------------------------- -- | x86 SSE instructions data SseVersion = SSE1 | SSE2 | SSE3 | SSE4 | SSE42 deriving (Eq, Ord) -- | x86 BMI (bit manipulation) instructions data BmiVersion = BMI1 | BMI2 deriving (Eq, Ord) -- | Platform-specific settings formerly hard-coded in Config.hs. -- -- These should probably be all be triaged whether they can be computed from -- other settings or belong in another another place (like 'Platform' above). data PlatformMisc = PlatformMisc { -- TODO Recalculate string from richer info? platformMisc_targetPlatformString :: String , platformMisc_ghcWithInterpreter :: Bool , platformMisc_libFFI :: Bool , platformMisc_llvmTarget :: String , platformMisc_targetRTSLinkerOnlySupportsSharedLibs :: Bool } platformSOName :: Platform -> FilePath -> FilePath platformSOName platform root = case platformOS platform of OSMinGW32 -> root <.> platformSOExt platform _ -> ("lib" ++ root) <.> platformSOExt platform platformHsSOName :: Platform -> FilePath -> FilePath platformHsSOName platform root = ("lib" ++ root) <.> platformSOExt platform platformSOExt :: Platform -> FilePath platformSOExt platform = case platformOS platform of OSDarwin -> "dylib" OSMinGW32 -> "dll" _ -> "so" -- Note [Platform constants] -- ~~~~~~~~~~~~~~~~~~~~~~~~~ -- -- The RTS is partly written in C, hence we use an external C compiler to build -- it. Thus GHC must somehow retrieve some information about the produced code -- (sizes of types, offsets of struct fields, etc.) to produce compatible code. -- -- This is the role of utils/deriveConstants utility: it produces a C -- source, compiles it with the same toolchain that will be used to build the -- RTS, and finally retrieves the constants from the built artefact. We can't -- directly run the produced program because we may be cross-compiling. -- -- These constants are then stored in GhclibDerivedConstants.h header file that is -- bundled with the RTS unit. This file is directly imported by Cmm codes and it -- is also read by GHC. deriveConstants also produces the Haskell definition of -- the PlatformConstants datatype and the Haskell parser for the -- GhclibDerivedConstants.h file. -- -- For quite some time, constants used by GHC were globally installed in -- ${libdir}/platformConstants but now GHC reads the GhclibDerivedConstants.h header -- bundled with the RTS unit. GHC detects when it builds the RTS unit itself and -- in this case it loads the header from the include-dirs passed on the -- command-line. -- -- Note that GHC doesn't parse every "#define SOME_CONSTANT 123" individually. -- Instead there is a single #define that contains all the constants useful to -- GHC in a comma separated list: -- -- #define HS_CONSTANTS "123,45,..." -- -- Note that GHC mustn't directly import GhclibDerivedConstants.h as these constants -- are only valid for a specific target platform and we want GHC to be target -- agnostic. -- -- | Try to locate "GhclibDerivedConstants.h" file in the given dirs and to parse the -- PlatformConstants from it. -- -- See Note [Platform constants] lookupPlatformConstants :: [FilePath] -> IO (Maybe PlatformConstants) lookupPlatformConstants include_dirs = find_constants include_dirs where try_parse d = do let p = d "GhclibDerivedConstants.h" doesFileExist p >>= \case True -> Just <$> parseConstantsHeader p False -> return Nothing find_constants [] = return Nothing find_constants (x:xs) = try_parse x >>= \case Nothing -> find_constants xs Just c -> return (Just c) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Platform/0000755000000000000000000000000007346545000017472 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Platform/AArch64.hs0000644000000000000000000000024107346545000021153 0ustar0000000000000000{-# LANGUAGE CPP #-} module GHC.Platform.AArch64 where import GHC.Prelude #define MACHREGS_NO_REGS 0 #define MACHREGS_aarch64 1 #include "CodeGen.Platform.h" ghc-lib-parser-9.12.2.20250421/compiler/GHC/Platform/ARM.hs0000644000000000000000000000023207346545000020442 0ustar0000000000000000{-# LANGUAGE CPP #-} module GHC.Platform.ARM where import GHC.Prelude #define MACHREGS_NO_REGS 0 #define MACHREGS_arm 1 #include "CodeGen.Platform.h" ghc-lib-parser-9.12.2.20250421/compiler/GHC/Platform/LoongArch64.hs0000644000000000000000000000025007346545000022051 0ustar0000000000000000{-# LANGUAGE CPP #-} module GHC.Platform.LoongArch64 where import GHC.Prelude #define MACHREGS_NO_REGS 0 #define MACHREGS_loongarch64 1 #include "CodeGen.Platform.h"ghc-lib-parser-9.12.2.20250421/compiler/GHC/Platform/NoRegs.hs0000644000000000000000000000020607346545000021221 0ustar0000000000000000{-# LANGUAGE CPP #-} module GHC.Platform.NoRegs where import GHC.Prelude #define MACHREGS_NO_REGS 1 #include "CodeGen.Platform.h" ghc-lib-parser-9.12.2.20250421/compiler/GHC/Platform/PPC.hs0000644000000000000000000000023607346545000020451 0ustar0000000000000000{-# LANGUAGE CPP #-} module GHC.Platform.PPC where import GHC.Prelude #define MACHREGS_NO_REGS 0 #define MACHREGS_powerpc 1 #include "CodeGen.Platform.h" ghc-lib-parser-9.12.2.20250421/compiler/GHC/Platform/Profile.hs0000644000000000000000000000317607346545000021435 0ustar0000000000000000-- | Platform profiles module GHC.Platform.Profile ( Profile (..) , profileBuildTag , profileConstants , profileIsProfiling , profileWordSizeInBytes ) where import GHC.Prelude import GHC.Platform import GHC.Platform.Ways -- | A platform profile fully describes the kind of objects that are generated -- for a platform. -- -- 'Platform' doesn't fully describe the ABI of an object. Compiler ways -- (profiling, debug, dynamic) also modify the ABI. -- data Profile = Profile { profilePlatform :: !Platform -- ^ Platform , profileWays :: !Ways -- ^ Ways } deriving (Eq, Ord, Show, Read) -- | Get platform constants profileConstants :: Profile -> PlatformConstants {-# INLINE profileConstants #-} profileConstants profile = platformConstants (profilePlatform profile) -- | Is profiling enabled profileIsProfiling :: Profile -> Bool {-# INLINE profileIsProfiling #-} profileIsProfiling profile = profileWays profile `hasWay` WayProf -- | Word size in bytes profileWordSizeInBytes :: Profile -> Int {-# INLINE profileWordSizeInBytes #-} profileWordSizeInBytes profile = platformWordSizeInBytes (profilePlatform profile) -- | Unique build tag for the profile profileBuildTag :: Profile -> String profileBuildTag profile -- profiles using unregisterised convention are not binary compatible with -- those that don't. Make sure to make it apparent in the tag so that our -- interface files can't be mismatched by mistake. | platformUnregisterised platform = 'u':wayTag | otherwise = wayTag where platform = profilePlatform profile wayTag = waysBuildTag (profileWays profile) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Platform/RISCV64.hs0000644000000000000000000000024207346545000021064 0ustar0000000000000000{-# LANGUAGE CPP #-} module GHC.Platform.RISCV64 where import GHC.Prelude #define MACHREGS_NO_REGS 0 #define MACHREGS_riscv64 1 #include "CodeGen.Platform.h" ghc-lib-parser-9.12.2.20250421/compiler/GHC/Platform/Reg.hs0000644000000000000000000001744607346545000020557 0ustar0000000000000000{-# LANGUAGE MagicHash #-} -- | An architecture independent description of a register. -- This needs to stay architecture independent because it is used -- by NCGMonad and the register allocators, which are shared -- by all architectures. -- module GHC.Platform.Reg ( RegNo, Reg(..), regSingle, realRegSingle, isRealReg, takeRealReg, isVirtualReg, takeVirtualReg, VirtualReg(..), renameVirtualReg, classOfVirtualReg, getHiVirtualRegFromLo, getHiVRegFromLo, RealReg(..), regNosOfRealReg, realRegsAlias, liftPatchFnToRegReg ) where import GHC.Prelude import GHC.Exts ( Int(I#), dataToTag# ) import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Types.Unique import GHC.Builtin.Uniques import GHC.Platform.Reg.Class import qualified GHC.Platform.Reg.Class.Unified as Unified import qualified GHC.Platform.Reg.Class.Separate as Separate import qualified GHC.Platform.Reg.Class.NoVectors as NoVectors import GHC.Platform.ArchOS -- | An identifier for a primitive real machine register. type RegNo = Int -- VirtualRegs are virtual registers. The register allocator will -- eventually have to map them into RealRegs, or into spill slots. -- -- VirtualRegs are allocated on the fly, usually to represent a single -- value in the abstract assembly code (i.e. dynamic registers are -- usually single assignment). -- -- The single assignment restriction isn't necessary to get correct code, -- although a better register allocation will result if single -- assignment is used -- because the allocator maps a VirtualReg into -- a single RealReg, even if the VirtualReg has multiple live ranges. -- -- Virtual regs can be of either class, so that info is attached. -- data VirtualReg -- | Integer virtual register = VirtualRegI { virtualRegUnique :: {-# UNPACK #-} !Unique } -- | High part of 2-word virtual register | VirtualRegHi { virtualRegUnique :: {-# UNPACK #-} !Unique } -- | Double virtual register | VirtualRegD { virtualRegUnique :: {-# UNPACK #-} !Unique } -- | 128-bit wide vector virtual register | VirtualRegV128 { virtualRegUnique :: {-# UNPACK #-} !Unique } deriving (Eq, Show) -- We can't derive Ord, because Unique doesn't have an Ord instance. -- Note nonDetCmpUnique in the implementation. See Note [No Ord for Unique]. -- This is non-deterministic but we do not currently support deterministic -- code-generation. See Note [Unique Determinism and code generation] instance Ord VirtualReg where compare vr1 vr2 = case compare (I# (dataToTag# vr1)) (I# (dataToTag# vr2)) of LT -> LT GT -> GT EQ -> nonDetCmpUnique (virtualRegUnique vr1) (virtualRegUnique vr2) instance Uniquable VirtualReg where getUnique = virtualRegUnique instance Outputable VirtualReg where ppr reg = case reg of VirtualRegI u -> text "%vI_" <> pprUniqueAlways u VirtualRegHi u -> text "%vHi_" <> pprUniqueAlways u VirtualRegD u -> text "%vDouble_" <> pprUniqueAlways u VirtualRegV128 u -> text "%vV128_" <> pprUniqueAlways u renameVirtualReg :: Unique -> VirtualReg -> VirtualReg renameVirtualReg u r = r { virtualRegUnique = u } classOfVirtualReg :: Arch -> VirtualReg -> RegClass classOfVirtualReg arch vr = case vr of VirtualRegI{} -> case regArch of Unified -> Unified.RcInteger Separate -> Separate.RcInteger NoVectors -> NoVectors.RcInteger VirtualRegHi{} -> case regArch of Unified -> Unified.RcInteger Separate -> Separate.RcInteger NoVectors -> NoVectors.RcInteger VirtualRegD{} -> case regArch of Unified -> Unified.RcFloatOrVector Separate -> Separate.RcFloat NoVectors -> NoVectors.RcFloat VirtualRegV128{} -> case regArch of Unified -> Unified.RcFloatOrVector Separate -> Separate.RcVector NoVectors -> pprPanic "classOfVirtualReg VirtualRegV128" ( text "arch:" <+> text ( show arch ) ) where regArch = registerArch arch -- Determine the upper-half vreg for a 64-bit quantity on a 32-bit platform -- when supplied with the vreg for the lower-half of the quantity. -- (NB. Not reversible). getHiVirtualRegFromLo :: VirtualReg -> VirtualReg getHiVirtualRegFromLo reg = case reg of -- makes a pseudo-unique with tag 'H' VirtualRegI u -> VirtualRegHi (newTagUnique u 'H') _ -> panic "Reg.getHiVirtualRegFromLo" getHiVRegFromLo :: Reg -> Reg getHiVRegFromLo reg = case reg of RegVirtual vr -> RegVirtual (getHiVirtualRegFromLo vr) RegReal _ -> panic "Reg.getHiVRegFromLo" ------------------------------------------------------------------------------------ -- | RealRegs are machine regs which are available for allocation, in -- the usual way. We know what class they are, because that's part of -- the processor's architecture. -- newtype RealReg = RealRegSingle RegNo deriving (Eq, Show, Ord) instance Uniquable RealReg where getUnique reg = case reg of RealRegSingle i -> mkRegSingleUnique i instance Outputable RealReg where ppr reg = case reg of RealRegSingle i -> text "%r" <> int i regNosOfRealReg :: RealReg -> [RegNo] regNosOfRealReg rr = case rr of RealRegSingle r1 -> [r1] realRegsAlias :: RealReg -> RealReg -> Bool realRegsAlias rr1 rr2 = -- used to be `not $ null $ intersect (regNosOfRealReg rr1) (regNosOfRealReg rr2)` -- but that resulted in some gnarly, gnarly, allocating code. So we manually -- write out all the cases which gives us nice non-allocating code. case rr1 of RealRegSingle r1 -> case rr2 of RealRegSingle r2 -> r1 == r2 -------------------------------------------------------------------------------- -- | A register, either virtual or real data Reg = RegVirtual !VirtualReg | RegReal !RealReg deriving (Eq, Ord, Show) regSingle :: RegNo -> Reg regSingle regNo = RegReal (realRegSingle regNo) realRegSingle :: RegNo -> RealReg realRegSingle regNo = RealRegSingle regNo -- We like to have Uniques for Reg so that we can make UniqFM and UniqSets -- in the register allocator. instance Uniquable Reg where getUnique reg = case reg of RegVirtual vr -> getUnique vr RegReal rr -> getUnique rr -- | Print a reg in a generic manner -- If you want the architecture specific names, then use the pprReg -- function from the appropriate Ppr module. instance Outputable Reg where ppr reg = case reg of RegVirtual vr -> ppr vr RegReal rr -> ppr rr isRealReg :: Reg -> Bool isRealReg reg = case reg of RegReal _ -> True RegVirtual _ -> False takeRealReg :: Reg -> Maybe RealReg takeRealReg reg = case reg of RegReal rr -> Just rr _ -> Nothing isVirtualReg :: Reg -> Bool isVirtualReg reg = case reg of RegReal _ -> False RegVirtual _ -> True takeVirtualReg :: Reg -> Maybe VirtualReg takeVirtualReg reg = case reg of RegReal _ -> Nothing RegVirtual vr -> Just vr -- | The patch function supplied by the allocator maps VirtualReg to RealReg -- regs, but sometimes we want to apply it to plain old Reg. -- liftPatchFnToRegReg :: (VirtualReg -> RealReg) -> (Reg -> Reg) liftPatchFnToRegReg patchF reg = case reg of RegVirtual vr -> RegReal (patchF vr) RegReal _ -> reg ghc-lib-parser-9.12.2.20250421/compiler/GHC/Platform/Reg/0000755000000000000000000000000007346545000020207 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Platform/Reg/Class.hs0000644000000000000000000000326107346545000021612 0ustar0000000000000000{-# LANGUAGE LambdaCase #-} module GHC.Platform.Reg.Class ( RegClass (..), RegArch(..), registerArch ) where import GHC.Prelude import GHC.Types.Unique import GHC.Builtin.Uniques ( mkRegClassUnique ) import GHC.Platform.ArchOS import GHC.Utils.Outputable ( Outputable(ppr), text ) -- | The class of a register. -- Used in the register allocator. -- We treat all registers in a class as being interchangeable. -- newtype RegClass = RegClass Int deriving ( Eq, Ord, Show ) instance Uniquable RegClass where getUnique ( RegClass i ) = mkRegClassUnique i -- | This instance is just used for the graph colouring register allocator. -- Prefer using either 'GHC.Platform.Reg.Class.Separate.pprRegClass' -- or 'GHC.Platform.Reg.Class.Unified.pprRegClass', which is more informative. instance Outputable RegClass where ppr (RegClass i) = ppr i -- | The register architecture of a given machine. data RegArch -- | Floating-point and vector registers are unified (e.g. X86, AArch64). = Unified -- | Floating-point and vector registers are separate (e.g. RISC-V). | Separate -- | No vector registers. | NoVectors deriving ( Eq, Ord, Show ) instance Outputable RegArch where ppr regArch = text (show regArch) -- | What is the register architecture of the given architecture? registerArch :: Arch -> RegArch registerArch arch = case arch of ArchX86 -> Unified ArchX86_64 -> Unified ArchPPC -> Unified ArchPPC_64 {} -> Unified ArchAArch64 -> Unified -- Support for vector registers not yet implemented for RISC-V -- see panic in `getFreeRegs`. --ArchRISCV64 -> Separate ArchRISCV64 -> NoVectors _ -> NoVectors ghc-lib-parser-9.12.2.20250421/compiler/GHC/Platform/Reg/Class/0000755000000000000000000000000007346545000021254 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Platform/Reg/Class/NoVectors.hs0000644000000000000000000000121707346545000023533 0ustar0000000000000000{-# LANGUAGE LambdaCase #-} {-# LANGUAGE PatternSynonyms #-} -- | Register classes for architectures which don't have any vector registers. module GHC.Platform.Reg.Class.NoVectors ( RegClass ( RcInteger, RcFloat ) , pprRegClass, allRegClasses ) where import GHC.Utils.Outputable ( SDoc, text ) import GHC.Platform.Reg.Class ( RegClass(..) ) pattern RcInteger, RcFloat :: RegClass pattern RcInteger = RegClass 0 pattern RcFloat = RegClass 1 {-# COMPLETE RcInteger, RcFloat #-} pprRegClass :: RegClass -> SDoc pprRegClass = \case RcInteger -> text "I" RcFloat -> text "F" allRegClasses :: [RegClass] allRegClasses = [RcInteger, RcFloat] ghc-lib-parser-9.12.2.20250421/compiler/GHC/Platform/Reg/Class/Separate.hs0000644000000000000000000000145407346545000023360 0ustar0000000000000000{-# LANGUAGE LambdaCase #-} {-# LANGUAGE PatternSynonyms #-} -- | Register classes for architectures which have registers -- for scalar floating-point values that are separate from all vector registers. module GHC.Platform.Reg.Class.Separate ( RegClass ( RcInteger, RcFloat, RcVector ) , pprRegClass, allRegClasses ) where import GHC.Utils.Outputable ( SDoc, text ) import GHC.Platform.Reg.Class ( RegClass(..) ) pattern RcInteger, RcFloat, RcVector :: RegClass pattern RcInteger = RegClass 0 pattern RcFloat = RegClass 1 pattern RcVector = RegClass 2 {-# COMPLETE RcInteger, RcFloat, RcVector #-} pprRegClass :: RegClass -> SDoc pprRegClass = \case RcInteger -> text "I" RcFloat -> text "F" RcVector -> text "V" allRegClasses :: [RegClass] allRegClasses = [RcInteger, RcFloat, RcVector] ghc-lib-parser-9.12.2.20250421/compiler/GHC/Platform/Reg/Class/Unified.hs0000644000000000000000000000140607346545000023174 0ustar0000000000000000{-# LANGUAGE LambdaCase #-} {-# LANGUAGE PatternSynonyms #-} -- | Register classes for architectures which don't have separate registers -- for scalar floating-point values separate from vector registers. module GHC.Platform.Reg.Class.Unified ( RegClass ( RcInteger, RcFloatOrVector ) , pprRegClass, allRegClasses ) where import GHC.Utils.Outputable ( SDoc, text ) import GHC.Platform.Reg.Class ( RegClass(..) ) pattern RcInteger, RcFloatOrVector :: RegClass pattern RcInteger = RegClass 0 pattern RcFloatOrVector = RegClass 1 {-# COMPLETE RcInteger, RcFloatOrVector #-} pprRegClass :: RegClass -> SDoc pprRegClass = \case RcInteger -> text "I" RcFloatOrVector -> text "F" allRegClasses :: [RegClass] allRegClasses = [RcInteger, RcFloatOrVector] ghc-lib-parser-9.12.2.20250421/compiler/GHC/Platform/Regs.hs0000644000000000000000000001013207346545000020723 0ustar0000000000000000module GHC.Platform.Regs (callerSaves, activeStgRegs, haveRegBase, globalRegMaybe, freeReg) where import GHC.Prelude import GHC.Cmm.Expr import GHC.Platform import GHC.Platform.Reg import qualified GHC.Platform.ARM as ARM import qualified GHC.Platform.AArch64 as AArch64 import qualified GHC.Platform.PPC as PPC import qualified GHC.Platform.S390X as S390X import qualified GHC.Platform.X86 as X86 import qualified GHC.Platform.X86_64 as X86_64 import qualified GHC.Platform.RISCV64 as RISCV64 import qualified GHC.Platform.Wasm32 as Wasm32 import qualified GHC.Platform.LoongArch64 as LoongArch64 import qualified GHC.Platform.NoRegs as NoRegs -- | Returns 'True' if this global register is stored in a caller-saves -- machine register. callerSaves :: Platform -> GlobalReg -> Bool callerSaves platform | platformUnregisterised platform = NoRegs.callerSaves | otherwise = case platformArch platform of ArchX86 -> X86.callerSaves ArchX86_64 -> X86_64.callerSaves ArchS390X -> S390X.callerSaves ArchARM {} -> ARM.callerSaves ArchAArch64 -> AArch64.callerSaves ArchRISCV64 -> RISCV64.callerSaves ArchWasm32 -> Wasm32.callerSaves ArchLoongArch64 -> LoongArch64.callerSaves arch | arch `elem` [ArchPPC, ArchPPC_64 ELF_V1, ArchPPC_64 ELF_V2] -> PPC.callerSaves | otherwise -> NoRegs.callerSaves -- | Here is where the STG register map is defined for each target arch. -- The order matters (for the llvm backend anyway)! We must make sure to -- maintain the order here with the order used in the LLVM calling conventions. -- Note that also, this isn't all registers, just the ones that are currently -- possibly mapped to real registers. activeStgRegs :: Platform -> [GlobalReg] activeStgRegs platform | platformUnregisterised platform = NoRegs.activeStgRegs | otherwise = case platformArch platform of ArchX86 -> X86.activeStgRegs ArchX86_64 -> X86_64.activeStgRegs ArchS390X -> S390X.activeStgRegs ArchARM {} -> ARM.activeStgRegs ArchAArch64 -> AArch64.activeStgRegs ArchRISCV64 -> RISCV64.activeStgRegs ArchWasm32 -> Wasm32.activeStgRegs ArchLoongArch64 -> LoongArch64.activeStgRegs arch | arch `elem` [ArchPPC, ArchPPC_64 ELF_V1, ArchPPC_64 ELF_V2] -> PPC.activeStgRegs | otherwise -> NoRegs.activeStgRegs haveRegBase :: Platform -> Bool haveRegBase platform | platformUnregisterised platform = NoRegs.haveRegBase | otherwise = case platformArch platform of ArchX86 -> X86.haveRegBase ArchX86_64 -> X86_64.haveRegBase ArchS390X -> S390X.haveRegBase ArchARM {} -> ARM.haveRegBase ArchAArch64 -> AArch64.haveRegBase ArchRISCV64 -> RISCV64.haveRegBase ArchWasm32 -> Wasm32.haveRegBase ArchLoongArch64 -> LoongArch64.haveRegBase arch | arch `elem` [ArchPPC, ArchPPC_64 ELF_V1, ArchPPC_64 ELF_V2] -> PPC.haveRegBase | otherwise -> NoRegs.haveRegBase globalRegMaybe :: Platform -> GlobalReg -> Maybe RealReg globalRegMaybe platform | platformUnregisterised platform = NoRegs.globalRegMaybe | otherwise = case platformArch platform of ArchX86 -> X86.globalRegMaybe ArchX86_64 -> X86_64.globalRegMaybe ArchS390X -> S390X.globalRegMaybe ArchARM {} -> ARM.globalRegMaybe ArchAArch64 -> AArch64.globalRegMaybe ArchRISCV64 -> RISCV64.globalRegMaybe ArchWasm32 -> Wasm32.globalRegMaybe ArchLoongArch64 -> LoongArch64.globalRegMaybe arch | arch `elem` [ArchPPC, ArchPPC_64 ELF_V1, ArchPPC_64 ELF_V2] -> PPC.globalRegMaybe | otherwise -> NoRegs.globalRegMaybe freeReg :: Platform -> RegNo -> Bool freeReg platform | platformUnregisterised platform = NoRegs.freeReg | otherwise = case platformArch platform of ArchX86 -> X86.freeReg ArchX86_64 -> X86_64.freeReg ArchS390X -> S390X.freeReg ArchARM {} -> ARM.freeReg ArchAArch64 -> AArch64.freeReg ArchRISCV64 -> RISCV64.freeReg ArchWasm32 -> Wasm32.freeReg ArchLoongArch64 -> LoongArch64.freeReg arch | arch `elem` [ArchPPC, ArchPPC_64 ELF_V1, ArchPPC_64 ELF_V2] -> PPC.freeReg | otherwise -> NoRegs.freeReg ghc-lib-parser-9.12.2.20250421/compiler/GHC/Platform/S390X.hs0000644000000000000000000000023607346545000020615 0ustar0000000000000000{-# LANGUAGE CPP #-} module GHC.Platform.S390X where import GHC.Prelude #define MACHREGS_NO_REGS 0 #define MACHREGS_s390x 1 #include "CodeGen.Platform.h" ghc-lib-parser-9.12.2.20250421/compiler/GHC/Platform/Wasm32.hs0000644000000000000000000000023707346545000021104 0ustar0000000000000000{-# LANGUAGE CPP #-} module GHC.Platform.Wasm32 where import GHC.Prelude #define MACHREGS_NO_REGS 0 #define MACHREGS_wasm32 1 #include "CodeGen.Platform.h" ghc-lib-parser-9.12.2.20250421/compiler/GHC/Platform/Ways.hs0000644000000000000000000002064607346545000020761 0ustar0000000000000000{-# LANGUAGE CPP #-} -- | Ways -- -- The central concept of a "way" is that all objects in a given -- program must be compiled in the same "way". Certain options change -- parameters of the virtual machine, eg. profiling adds an extra word -- to the object header, so profiling objects cannot be linked with -- non-profiling objects. -- -- After parsing the command-line options, we determine which "way" we -- are building - this might be a combination way, eg. profiling+threaded. -- -- There are two kinds of ways: -- - RTS only: only affect the runtime system (RTS) and don't affect code -- generation (e.g. threaded, debug) -- - Full ways: affect code generation and the RTS (e.g. profiling, dynamic -- linking) -- -- We then find the "build-tag" associated with this way, and this -- becomes the suffix used to find .hi files and libraries used in -- this compilation. module GHC.Platform.Ways ( Way(..) , Ways , hasWay , hasNotWay , addWay , removeWay , allowed_combination , wayGeneralFlags , wayUnsetGeneralFlags , wayOptc , wayOptcxx , wayOptl , wayOptP , wayDesc , wayRTSOnly , wayTag , waysTag , waysBuildTag , fullWays , rtsWays -- * Host GHC ways , hostWays , hostFullWays , hostIsProfiled , hostIsDynamic , hostIsThreaded , hostIsDebugged , hostIsTracing ) where import GHC.Prelude import GHC.Platform import GHC.Driver.Flags import qualified Data.Set as Set import Data.Set (Set) import Data.List (intersperse) -- | A way -- -- Don't change the constructor order as it is used by `waysTag` to create a -- unique tag (e.g. thr_debug_p) which is expected by other tools (e.g. Cabal). data Way = WayCustom String -- ^ for GHC API clients building custom variants | WayThreaded -- ^ (RTS only) Multithreaded runtime system | WayDebug -- ^ Debugging, enable trace messages and extra checks | WayProf -- ^ Profiling, enable cost-centre stacks and profiling reports | WayDyn -- ^ Dynamic linking deriving (Eq, Ord, Show, Read) type Ways = Set Way -- | Test if a way is enabled hasWay :: Ways -> Way -> Bool hasWay ws w = Set.member w ws -- | Test if a way is not enabled hasNotWay :: Ways -> Way -> Bool hasNotWay ws w = Set.notMember w ws -- | Add a way addWay :: Way -> Ways -> Ways addWay = Set.insert -- | Remove a way removeWay :: Way -> Ways -> Ways removeWay = Set.delete -- | Check if a combination of ways is allowed allowed_combination :: Ways -> Bool allowed_combination ways = not disallowed where disallowed = or [ hasWay ways x && hasWay ways y | (x,y) <- couples ] -- List of disallowed couples of ways couples = [] -- we don't have any disallowed combination of ways nowadays -- | Unique tag associated to a list of ways waysTag :: Ways -> String waysTag = concat . intersperse "_" . map wayTag . Set.toAscList -- | Unique build-tag associated to a list of ways -- -- RTS only ways are filtered out because they have no impact on the build. waysBuildTag :: Ways -> String waysBuildTag ws = waysTag (Set.filter (not . wayRTSOnly) ws) -- | Unique build-tag associated to a way wayTag :: Way -> String wayTag (WayCustom xs) = xs wayTag WayThreaded = "thr" wayTag WayDebug = "debug" wayTag WayDyn = "dyn" wayTag WayProf = "p" -- | Return true for ways that only impact the RTS, not the generated code wayRTSOnly :: Way -> Bool wayRTSOnly (WayCustom {}) = False wayRTSOnly WayDyn = False wayRTSOnly WayProf = False wayRTSOnly WayThreaded = True wayRTSOnly WayDebug = True -- | Filter ways that have an impact on compilation fullWays :: Ways -> Ways fullWays ws = Set.filter (not . wayRTSOnly) ws -- | Filter RTS-only ways (ways that don't have an impact on compilation) rtsWays :: Ways -> Ways rtsWays ws = Set.filter wayRTSOnly ws wayDesc :: Way -> String wayDesc (WayCustom xs) = xs wayDesc WayThreaded = "Threaded" wayDesc WayDebug = "Debug" wayDesc WayDyn = "Dynamic" wayDesc WayProf = "Profiling" -- | Turn these flags on when enabling this way wayGeneralFlags :: Platform -> Way -> [GeneralFlag] wayGeneralFlags _ (WayCustom {}) = [] wayGeneralFlags _ WayThreaded = [] wayGeneralFlags _ WayDebug = [] wayGeneralFlags _ WayDyn = [Opt_PIC, Opt_ExternalDynamicRefs] -- We could get away without adding -fPIC when compiling the -- modules of a program that is to be linked with -dynamic; the -- program itself does not need to be position-independent, only -- the libraries need to be. HOWEVER, GHCi links objects into a -- .so before loading the .so using the system linker. Since only -- PIC objects can be linked into a .so, we have to compile even -- modules of the main program with -fPIC when using -dynamic. wayGeneralFlags _ WayProf = [] -- | Turn these flags off when enabling this way wayUnsetGeneralFlags :: Platform -> Way -> [GeneralFlag] wayUnsetGeneralFlags _ (WayCustom {}) = [] wayUnsetGeneralFlags _ WayThreaded = [] wayUnsetGeneralFlags _ WayDebug = [] wayUnsetGeneralFlags _ WayDyn = [Opt_SplitSections] -- There's no point splitting when we're going to be dynamically linking. -- Plus it breaks compilation on OSX x86. wayUnsetGeneralFlags _ WayProf = [] -- | Pass these options to the C compiler when enabling this way wayOptc :: Platform -> Way -> [String] wayOptc _ (WayCustom {}) = [] wayOptc platform WayThreaded = case platformOS platform of OSOpenBSD -> ["-pthread"] OSNetBSD -> ["-pthread"] _ -> [] wayOptc _ WayDebug = [] wayOptc _ WayDyn = [] wayOptc _ WayProf = ["-DPROFILING"] wayOptcxx :: Platform -> Way -> [String] wayOptcxx = wayOptc -- Use the same flags as C -- | Pass these options to linker when enabling this way wayOptl :: Platform -> Way -> [String] wayOptl _ (WayCustom {}) = [] wayOptl platform WayThreaded = case platformOS platform of -- N.B. FreeBSD cc throws a warning if we pass -pthread without -- actually using any pthread symbols. OSFreeBSD -> ["-pthread", "-Wno-unused-command-line-argument"] OSOpenBSD -> ["-pthread"] OSNetBSD -> ["-pthread"] _ -> [] wayOptl _ WayDebug = [] wayOptl _ WayDyn = [] wayOptl _ WayProf = [] -- | Pass these options to the preprocessor when enabling this way wayOptP :: Platform -> Way -> [String] wayOptP _ (WayCustom {}) = [] wayOptP _ WayThreaded = [] wayOptP _ WayDebug = [] wayOptP _ WayDyn = [] wayOptP _ WayProf = ["-DPROFILING"] -- | Consult the RTS to find whether it has been built with profiling enabled. hostIsProfiled :: Bool hostIsProfiled = rtsIsProfiled_ /= 0 foreign import ccall unsafe "rts_isProfiled" rtsIsProfiled_ :: Int -- | Consult the RTS to find whether GHC itself has been built with -- dynamic linking. This can't be statically known at compile-time, -- because we build both the static and dynamic versions together with -- -dynamic-too. hostIsDynamic :: Bool hostIsDynamic = rtsIsDynamic_ /= 0 foreign import ccall unsafe "rts_isDynamic" rtsIsDynamic_ :: Int -- we need this until the bootstrap GHC is always recent enough #if MIN_VERSION_GLASGOW_HASKELL(9,1,0,0) -- | Consult the RTS to find whether it is threaded. hostIsThreaded :: Bool hostIsThreaded = rtsIsThreaded_ /= 0 foreign import ccall unsafe "rts_isThreaded" rtsIsThreaded_ :: Int -- | Consult the RTS to find whether it is debugged. hostIsDebugged :: Bool hostIsDebugged = rtsIsDebugged_ /= 0 foreign import ccall unsafe "rts_isDebugged" rtsIsDebugged_ :: Int -- | Consult the RTS to find whether it is tracing. hostIsTracing :: Bool hostIsTracing = rtsIsTracing_ /= 0 foreign import ccall unsafe "rts_isTracing" rtsIsTracing_ :: Int #else hostIsThreaded :: Bool hostIsThreaded = False hostIsDebugged :: Bool hostIsDebugged = False hostIsTracing :: Bool hostIsTracing = False #endif -- | Host ways. hostWays :: Ways hostWays = Set.unions [ if hostIsDynamic then Set.singleton WayDyn else Set.empty , if hostIsProfiled then Set.singleton WayProf else Set.empty , if hostIsThreaded then Set.singleton WayThreaded else Set.empty , if hostIsDebugged then Set.singleton WayDebug else Set.empty ] -- | Host "full" ways (i.e. ways that have an impact on the compilation, -- not RTS only ways). -- -- These ways must be used when compiling codes targeting the internal -- interpreter. hostFullWays :: Ways hostFullWays = fullWays hostWays ghc-lib-parser-9.12.2.20250421/compiler/GHC/Platform/X86.hs0000644000000000000000000000023307346545000020411 0ustar0000000000000000{-# LANGUAGE CPP #-} module GHC.Platform.X86 where import GHC.Prelude #define MACHREGS_NO_REGS 0 #define MACHREGS_i386 1 #include "CodeGen.Platform.h" ghc-lib-parser-9.12.2.20250421/compiler/GHC/Platform/X86_64.hs0000644000000000000000000000024007346545000020720 0ustar0000000000000000{-# LANGUAGE CPP #-} module GHC.Platform.X86_64 where import GHC.Prelude #define MACHREGS_NO_REGS 0 #define MACHREGS_x86_64 1 #include "CodeGen.Platform.h" ghc-lib-parser-9.12.2.20250421/compiler/GHC/Prelude.hs0000644000000000000000000000323707346545000017647 0ustar0000000000000000{-# LANGUAGE CPP #-} {-# OPTIONS_HADDOCK not-home #-} {-# OPTIONS_GHC -O2 #-} -- See Note [-O2 Prelude] -- | Custom GHC "Prelude" -- -- This module serves as a replacement for the "Prelude" module -- and abstracts over differences between the bootstrapping -- GHC version, and may also provide a common default vocabulary. -- Every module in GHC -- * Is compiled with -XNoImplicitPrelude -- * Explicitly imports GHC.Prelude module GHC.Prelude (module GHC.Prelude ,module GHC.Utils.Trace ) where {- Note [-O2 Prelude] ~~~~~~~~~~~~~~~~~~~~~ There is some code in GHC that is *always* compiled with -O[2] because of it's impact on compile time performance. Some of this code might depend on the definitions like shiftL being defined here being performant. So we always compile this module with -O2. It's (currently) tiny so I have little reason to suspect this impacts overall GHC compile times negatively. -} -- We export the 'Semigroup' class but w/o the (<>) operator to avoid -- clashing with the (Outputable.<>) operator which is heavily used -- through GHC's code-base. {- Note [Why do we import Prelude here?] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The files ghc-boot-th.cabal, ghc-boot.cabal, ghci.cabal and ghc-heap.cabal contain the directive default-extensions: NoImplicitPrelude. There are two motivations for this: - Consistency with the compiler directory, which enables NoImplicitPrelude; - Allows loading the above dependent packages with ghc-in-ghci, giving a smoother development experience when adding new extensions. -} import GHC.Prelude.Basic as GHC.Prelude -- import {-# SOURCE #-} GHC.Utils.Trace import GHC.Utils.Trace hiding ( trace ) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Prelude/0000755000000000000000000000000007346545000017306 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Prelude/Basic.hs0000644000000000000000000001121307346545000020661 0ustar0000000000000000{-# LANGUAGE CPP #-} {-# OPTIONS_HADDOCK not-home #-} {-# OPTIONS_GHC -O2 #-} -- See Note [-O2 Prelude] -- See Note [Proxies for head and tail] {-# OPTIONS_GHC -Wno-unrecognised-warning-flags -Wno-x-partial #-} -- | Custom minimal GHC "Prelude" -- -- This module serves as a replacement for the "Prelude" module -- and abstracts over differences between the bootstrapping -- GHC version, and may also provide a common default vocabulary. -- Every module in GHC -- * Is compiled with -XNoImplicitPrelude -- * Explicitly imports GHC.Prelude.Basic or GHC.Prelude -- * The later provides some functionality with within ghc itself -- like pprTrace. module GHC.Prelude.Basic ( module X , Applicative (..) , module Bits , bit , shiftL, shiftR , setBit, clearBit , head, tail ) where {- Note [-O2 Prelude] ~~~~~~~~~~~~~~~~~~~~~ There is some code in GHC that is *always* compiled with -O[2] because of it's impact on compile time performance. Some of this code might depend on the definitions like shiftL being defined here being performant. So we always compile this module with -O2. It's (currently) tiny so I have little reason to suspect this impacts overall GHC compile times negatively. -} -- We export the 'Semigroup' class but w/o the (<>) operator to avoid -- clashing with the (Outputable.<>) operator which is heavily used -- through GHC's code-base. {- Note [Why do we import Prelude here?] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The files ghc-boot-th.cabal, ghc-boot.cabal, ghci.cabal and ghc-heap.cabal contain the directive default-extensions: NoImplicitPrelude. There are two motivations for this: - Consistency with the compiler directory, which enables NoImplicitPrelude; - Allows loading the above dependent packages with ghc-in-ghci, giving a smoother development experience when adding new extensions. -} import qualified Prelude import Prelude as X hiding ((<>), Applicative(..), Foldable(..), head, tail) import Control.Applicative (Applicative(..)) import Data.Foldable as X (Foldable(elem, foldMap, foldr, foldl, foldl', foldr1, foldl1, maximum, minimum, product, sum, null, length)) import GHC.Stack.Types (HasCallStack) #if MIN_VERSION_base(4,16,0) import GHC.Bits as Bits hiding (bit, shiftL, shiftR, setBit, clearBit) # if defined(DEBUG) import qualified GHC.Bits as Bits (shiftL, shiftR) # endif #else --base <4.15 import Data.Bits as Bits hiding (bit, shiftL, shiftR, setBit, clearBit) # if defined(DEBUG) import qualified Data.Bits as Bits (shiftL, shiftR) # endif #endif {- Note [Default to unsafe shifts inside GHC] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The safe shifts can introduce branches which come at the cost of performance. We still want the additional debuggability for debug builds. So we define it as one or the other depending on the DEBUG setting. Why do we then continue on to re-export the rest of Data.Bits? If we would not what is likely to happen is: * Someone imports Data.Bits, uses xor. Things are fine. * They add a shift and get an ambiguous definition error. * The are puzzled for a bit. * They either: + Remove the import of Data.Bits and get an error because xor is not in scope. + Add the hiding clause to the Data.Bits import for the shifts. Either is quite annoying. Simply re-exporting all of Data.Bits avoids this making for a smoother developer experience. At the cost of having a few more names in scope at all time. But that seems like a fair tradeoff. See also #19618 -} -- We always want the Data.Bits method to show up for rules etc. {-# INLINE shiftL #-} {-# INLINE shiftR #-} shiftL, shiftR :: Bits.Bits a => a -> Int -> a #if defined(DEBUG) shiftL = Bits.shiftL shiftR = Bits.shiftR #else shiftL = Bits.unsafeShiftL shiftR = Bits.unsafeShiftR #endif {-# INLINE bit #-} bit :: (Num a, Bits.Bits a) => Int -> a bit = \ i -> 1 `shiftL` i {-# INLINE setBit #-} setBit :: (Num a, Bits.Bits a) => a -> Int -> a setBit = \ x i -> x Bits..|. bit i {-# INLINE clearBit #-} clearBit :: (Num a, Bits.Bits a) => a -> Int -> a clearBit = \ x i -> x Bits..&. Bits.complement (bit i) {- Note [Proxies for head and tail] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Prelude.head and Prelude.tail have recently acquired {-# WARNING in "x-partial" #-}, but the GHC codebase uses them fairly extensively and insists on building warning-free. Thus, instead of adding {-# OPTIONS_GHC -Wno-x-partial #-} to every module which employs them, we define warning-less proxies and export them from GHC.Prelude. -} -- See Note [Proxies for head and tail] head :: HasCallStack => [a] -> a head = Prelude.head {-# INLINE head #-} -- See Note [Proxies for head and tail] tail :: HasCallStack => [a] -> [a] tail = Prelude.tail {-# INLINE tail #-} ghc-lib-parser-9.12.2.20250421/compiler/GHC/Runtime/0000755000000000000000000000000007346545000017331 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Runtime/Context.hs0000644000000000000000000004453507346545000021324 0ustar0000000000000000module GHC.Runtime.Context ( InteractiveContext (..) , InteractiveImport (..) , emptyInteractiveContext , extendInteractiveContext , extendInteractiveContextWithIds , setInteractivePrintName , substInteractiveContext , replaceImportEnv , icReaderEnv , icExtendGblRdrEnv , icInteractiveModule , icInScopeTTs , icNamePprCtx ) where import GHC.Prelude import GHC.Hs import GHC.Driver.DynFlags import {-# SOURCE #-} GHC.Driver.Plugins import GHC.Runtime.Eval.Types ( IcGlobalRdrEnv(..), Resume ) import GHC.Unit import GHC.Unit.Env import GHC.Core.FamInstEnv import GHC.Core.InstEnv import GHC.Core.Type import GHC.Types.DefaultEnv ( DefaultEnv, emptyDefaultEnv ) import GHC.Types.Fixity.Env import GHC.Types.Id.Info ( IdDetails(..) ) import GHC.Types.Name import GHC.Types.Name.Env import GHC.Types.Name.Reader import GHC.Types.Name.Ppr import GHC.Types.TyThing import GHC.Types.Var import GHC.Builtin.Names ( ioTyConName, printName, mkInteractiveModule ) import GHC.Utils.Outputable {- Note [The interactive package] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Type, class, and value declarations at the command prompt are treated as if they were defined in modules interactive:Ghci1 interactive:Ghci2 ...etc... with each bunch of declarations using a new module, all sharing a common package 'interactive' (see Module.interactiveUnitId, and GHC.Builtin.Names.mkInteractiveModule). This scheme deals well with shadowing. For example: ghci> data T = A ghci> data T = B ghci> :i A data Ghci1.T = A -- Defined at :2:10 Here we must display info about constructor A, but its type T has been shadowed by the second declaration. But it has a respectable qualified name (Ghci1.T), and its source location says where it was defined, and it can also be used with the qualified name. So the main invariant continues to hold, that in any session an original name M.T only refers to one unique thing. (In a previous iteration both the T's above were called :Interactive.T, albeit with different uniques, which gave rise to all sorts of trouble.) The details are a bit tricky though: * The field ic_mod_index counts which Ghci module we've got up to. It is incremented when extending ic_tythings * ic_tythings contains only things from the 'interactive' package. * Module from the 'interactive' package (Ghci1, Ghci2 etc) never go in the Home Package Table (HPT). When you say :load, that's when we extend the HPT. * The 'homeUnitId' field of DynFlags is *not* set to 'interactive'. It stays as 'main' (or whatever -this-unit-id says), and is the package to which :load'ed modules are added to. * So how do we arrange that declarations at the command prompt get to be in the 'interactive' package? Simply by setting the tcg_mod field of the TcGblEnv to "interactive:Ghci1". This is done by the call to initTc in initTcInteractive, which in turn get the module from it 'icInteractiveModule' field of the interactive context. The 'homeUnitId' field stays as 'main' (or whatever -this-unit-id says). * The main trickiness is that the type environment (tcg_type_env) and fixity envt (tcg_fix_env), now contain entities from all the interactive-package modules (Ghci1, Ghci2, ...) together, rather than just a single module as is usually the case. So you can't use "nameIsLocalOrFrom" to decide whether to look in the TcGblEnv vs the HPT/PTE. This is a change, but not a problem provided you know. * However, the tcg_binds, tcg_sigs, tcg_insts, tcg_fam_insts, etc fields of the TcGblEnv, which collect "things defined in this module", all refer to stuff define in a single GHCi command, *not* all the commands so far. In contrast, tcg_inst_env, tcg_fam_inst_env, have instances from all GhciN modules, which makes sense -- they are all "home package" modules. Note [Interactively-bound Ids in GHCi] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The Ids bound by previous Stmts in GHCi are currently a) GlobalIds, with b) An External Name, like Ghci4.foo See Note [The interactive package] above c) A tidied type (a) They must be GlobalIds (not LocalIds) otherwise when we come to compile an expression using these ids later, the byte code generator will consider the occurrences to be free rather than global. (b) Having an External Name is important because of Note [GlobalRdrEnv shadowing] in GHC.Types.Names.RdrName (c) Their types are tidied. This is important, because :info may ask to look at them, and :info expects the things it looks up to have tidy types Where do interactively-bound Ids come from? - GHCi REPL Stmts e.g. ghci> let foo x = x+1 These start with an Internal Name because a Stmt is a local construct, so the renamer naturally builds an Internal name for each of its binders. Then in tcRnStmt they are externalised via GHC.Tc.Module.externaliseAndTidyId, so they get Names like Ghic4.foo. - Ids bound by the debugger etc have Names constructed by GHC.Iface.Env.newInteractiveBinder; at the call sites it is followed by mkVanillaGlobal or mkVanillaGlobalWithInfo. So again, they are all Global, External. - TyCons, Classes, and Ids bound by other top-level declarations in GHCi (eg foreign import, record selectors) also get External Names, with Ghci9 (or 8, or 7, etc) as the module name. Note [ic_tythings] ~~~~~~~~~~~~~~~~~~ The ic_tythings field contains * The TyThings declared by the user at the command prompt (eg Ids, TyCons, Classes) * The user-visible Ids that arise from such things, which *don't* come from 'implicitTyThings', notably: - record selectors - class ops The implicitTyThings are readily obtained from the TyThings but record selectors etc are not It does *not* contain * DFunIds (they can be gotten from ic_instances) * CoAxioms (ditto) See also Note [Interactively-bound Ids in GHCi] Note [Override identical instances in GHCi] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If you declare a new instance in GHCi that is identical to a previous one, we simply override the previous one; we don't regard it as overlapping. e.g. Prelude> data T = A | B Prelude> instance Eq T where ... Prelude> instance Eq T where ... -- This one overrides It's exactly the same for type-family instances. See #7102 Note [icReaderEnv recalculation] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The GlobalRdrEnv describing what’s in scope at the prompts consists of all the imported things, followed by all the things defined on the prompt, with shadowing. Defining new things on the prompt is easy: we shadow as needed, and then extend the environment. But changing the set of imports, which can happen later as well, is tricky we need to re-apply the shadowing from all the things defined at the prompt! For example: ghci> let empty = True ghci> import Data.IntMap.Strict -- Exports 'empty' ghci> empty -- Still gets the 'empty' defined at the prompt True It would be correct to re-construct the env from scratch based on `ic_tythings`, but that'd be quite expensive if there are many entries in `ic_tythings` that shadow each other. Therefore we keep around a `GlobalRdrEnv` in `igre_prompt_env` that contains _just_ the things defined at the prompt, and use that in `replaceImportEnv` to rebuild the full env. Conveniently, `shadowNames` takes such an `OccEnv` to denote the set of names to shadow. INVARIANT: Every `OccName` in `igre_prompt_env` is present unqualified as well (else it would not be right to pass `igre_prompt_env` to `shadowNames`.) The definition of the IcGlobalRdrEnv type should conceptually be in this module, and made abstract, but it’s used in `Resume`, so it lives in GHC.Runtime.Eval.Type. -} -- | Interactive context, recording information about the state of the -- context in which statements are executed in a GHCi session. data InteractiveContext = InteractiveContext { ic_dflags :: DynFlags, -- ^ The 'DynFlags' used to evaluate interactive expressions -- and statements. ic_mod_index :: Int, -- ^ Each GHCi stmt or declaration brings some new things into -- scope. We give them names like interactive:Ghci9.T, -- where the ic_index is the '9'. The ic_mod_index is -- incremented whenever we add something to ic_tythings -- See Note [The interactive package] ic_imports :: [InteractiveImport], -- ^ The GHCi top-level scope (icReaderEnv) is extended with -- these imports -- -- This field is only stored here so that the client -- can retrieve it with GHC.getContext. GHC itself doesn't -- use it, but does reset it to empty sometimes (such -- as before a GHC.load). The context is set with GHC.setContext. ic_tythings :: [TyThing], -- ^ TyThings defined by the user, in reverse order of -- definition (ie most recent at the front). -- Also used in GHC.Tc.Module.runTcInteractive to fill the type -- checker environment. -- See Note [ic_tythings] ic_gre_cache :: IcGlobalRdrEnv, -- ^ Essentially the cached 'GlobalRdrEnv'. -- -- The GlobalRdrEnv contains everything in scope at the command -- line, both imported and everything in ic_tythings, with the -- correct shadowing. -- -- The IcGlobalRdrEnv contains extra data to allow efficient -- recalculation when the set of imports change. -- See Note [icReaderEnv recalculation] ic_instances :: (InstEnv, [FamInst]), -- ^ All instances and family instances created during -- this session. These are grabbed en masse after each -- update to be sure that proper overlapping is retained. -- That is, rather than re-check the overlapping each -- time we update the context, we just take the results -- from the instance code that already does that. ic_fix_env :: FixityEnv, -- ^ Fixities declared in let statements ic_default :: DefaultEnv, -- ^ The current default classes and types, set by 'default' declarations ic_resume :: [Resume], -- ^ The stack of breakpoint contexts ic_monad :: Name, -- ^ The monad that GHCi is executing in ic_int_print :: Name, -- ^ The function that is used for printing results -- of expressions in ghci and -e mode. ic_cwd :: Maybe FilePath, -- ^ virtual CWD of the program ic_plugins :: !Plugins -- ^ Cache of loaded plugins. We store them here to avoid having to -- load them every time we switch to the interactive context. } data InteractiveImport = IIDecl (ImportDecl GhcPs) -- ^ Bring the exports of a particular module -- (filtered by an import decl) into scope | IIModule ModuleName -- ^ Bring into scope the entire top-level envt of -- of this module, including the things imported -- into it. emptyIcGlobalRdrEnv :: IcGlobalRdrEnv emptyIcGlobalRdrEnv = IcGlobalRdrEnv { igre_env = emptyGlobalRdrEnv , igre_prompt_env = emptyGlobalRdrEnv } -- | Constructs an empty InteractiveContext. emptyInteractiveContext :: DynFlags -> InteractiveContext emptyInteractiveContext dflags = InteractiveContext { ic_dflags = dflags, ic_imports = [], ic_gre_cache = emptyIcGlobalRdrEnv, ic_mod_index = 1, ic_tythings = [], ic_instances = (emptyInstEnv,[]), ic_fix_env = emptyNameEnv, ic_monad = ioTyConName, -- IO monad by default ic_int_print = printName, -- System.IO.print by default ic_default = emptyDefaultEnv, ic_resume = [], ic_cwd = Nothing, ic_plugins = emptyPlugins } icReaderEnv :: InteractiveContext -> GlobalRdrEnv icReaderEnv = igre_env . ic_gre_cache icInteractiveModule :: InteractiveContext -> Module icInteractiveModule (InteractiveContext { ic_mod_index = index }) = mkInteractiveModule (show index) -- | This function returns the list of visible TyThings (useful for -- e.g. showBindings). -- -- It picks only those TyThings that are not shadowed by later definitions on the interpreter, -- to not clutter :showBindings with shadowed ids, which would show up as Ghci9.foo. -- -- Some TyThings define many names; we include them if _any_ name is still -- available unqualified. icInScopeTTs :: InteractiveContext -> [TyThing] icInScopeTTs ictxt = filter in_scope_unqualified (ic_tythings ictxt) where in_scope_unqualified thing = or [ unQualOK gre | gre <- tyThingLocalGREs thing , let name = greName gre , Just gre <- [lookupGRE_Name (icReaderEnv ictxt) name] ] -- | Get the NamePprCtx function based on the flags and this InteractiveContext icNamePprCtx :: UnitEnv -> InteractiveContext -> NamePprCtx icNamePprCtx unit_env ictxt = mkNamePprCtx ptc unit_env (icReaderEnv ictxt) where ptc = initPromotionTickContext (ic_dflags ictxt) -- | extendInteractiveContext is called with new TyThings recently defined to update the -- InteractiveContext to include them. By putting new things first, unqualified -- use will pick the most recently defined thing with a given name, while -- still keeping the old names in scope in their qualified form (Ghci1.foo). extendInteractiveContext :: InteractiveContext -> [TyThing] -> InstEnv -> [FamInst] -> DefaultEnv -> FixityEnv -> InteractiveContext extendInteractiveContext ictxt new_tythings new_cls_insts new_fam_insts defaults fix_env = ictxt { ic_mod_index = ic_mod_index ictxt + 1 -- Always bump this; even instances should create -- a new mod_index (#9426) , ic_tythings = new_tythings ++ ic_tythings ictxt , ic_gre_cache = ic_gre_cache ictxt `icExtendIcGblRdrEnv` new_tythings , ic_instances = ( new_cls_insts `unionInstEnv` old_cls_insts , new_fam_insts ++ fam_insts ) -- we don't shadow old family instances (#7102), -- so don't need to remove them here , ic_default = defaults , ic_fix_env = fix_env -- See Note [Fixity declarations in GHCi] } where -- Discard old instances that have been fully overridden -- See Note [Override identical instances in GHCi] (cls_insts, fam_insts) = ic_instances ictxt old_cls_insts = filterInstEnv (\i -> not $ anyInstEnv (identicalClsInstHead i) new_cls_insts) cls_insts extendInteractiveContextWithIds :: InteractiveContext -> [Id] -> InteractiveContext -- Just a specialised version extendInteractiveContextWithIds ictxt new_ids | null new_ids = ictxt | otherwise = ictxt { ic_mod_index = ic_mod_index ictxt + 1 , ic_tythings = new_tythings ++ ic_tythings ictxt , ic_gre_cache = ic_gre_cache ictxt `icExtendIcGblRdrEnv` new_tythings } where new_tythings = map AnId new_ids setInteractivePrintName :: InteractiveContext -> Name -> InteractiveContext setInteractivePrintName ic n = ic{ic_int_print = n} icExtendIcGblRdrEnv :: IcGlobalRdrEnv -> [TyThing] -> IcGlobalRdrEnv icExtendIcGblRdrEnv igre tythings = IcGlobalRdrEnv { igre_env = icExtendGblRdrEnv False (igre_env igre) tythings , igre_prompt_env = icExtendGblRdrEnv True (igre_prompt_env igre) tythings -- Pass 'True' <=> drop names that are only available qualified. -- This is done to maintain the invariant of Note [icReaderEnv recalculation] -- that igre_prompt_env should only contain Names that are available unqualified. } -- This is used by setContext in GHC.Runtime.Eval when the set of imports -- changes, and recalculates the GlobalRdrEnv. See Note [icReaderEnv recalculation] replaceImportEnv :: IcGlobalRdrEnv -> GlobalRdrEnv -> IcGlobalRdrEnv replaceImportEnv igre import_env = igre { igre_env = new_env } where import_env_shadowed = shadowNames False import_env (igre_prompt_env igre) new_env = import_env_shadowed `plusGlobalRdrEnv` igre_prompt_env igre -- | Add 'TyThings' to the 'GlobalRdrEnv', earlier ones in the list shadowing -- later ones, and shadowing existing entries in the 'GlobalRdrEnv'. icExtendGblRdrEnv :: Bool -- ^ discard names that are only available qualified? -> GlobalRdrEnv -> [TyThing] -> GlobalRdrEnv icExtendGblRdrEnv drop_only_qualified env tythings = foldr add env tythings -- Foldr makes things in the front of -- the list shadow things at the back where -- One at a time, to ensure each shadows the previous ones add thing env | is_sub_bndr thing = env | otherwise = foldl' extendGlobalRdrEnv env1 new_gres where new_gres = tyThingLocalGREs thing env1 = shadowNames drop_only_qualified env $ mkGlobalRdrEnv new_gres -- Ugh! The new_tythings may include record selectors, since they -- are not implicit-ids, and must appear in the TypeEnv. But they -- will also be brought into scope by the corresponding (ATyCon -- tc). And we want the latter, because that has the correct -- parent (#10520) is_sub_bndr (AnId f) = case idDetails f of RecSelId {} -> True ClassOpId {} -> True _ -> False is_sub_bndr _ = False substInteractiveContext :: InteractiveContext -> Subst -> InteractiveContext substInteractiveContext ictxt@InteractiveContext{ ic_tythings = tts } subst | isEmptyTCvSubst subst = ictxt | otherwise = ictxt { ic_tythings = map subst_ty tts } where subst_ty (AnId id) = AnId $ updateIdTypeAndMult (substTyAddInScope subst) id -- Variables in the interactive context *can* mention free type variables -- because of the runtime debugger. Otherwise you'd expect all -- variables bound in the interactive context to be closed. subst_ty tt = tt instance Outputable InteractiveImport where ppr (IIModule m) = char '*' <> ppr m ppr (IIDecl d) = ppr d ghc-lib-parser-9.12.2.20250421/compiler/GHC/Runtime/Eval/0000755000000000000000000000000007346545000020220 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Runtime/Eval/Types.hs0000644000000000000000000000647507346545000021674 0ustar0000000000000000-- ----------------------------------------------------------------------------- -- -- (c) The University of Glasgow, 2005-2007 -- -- Running statements interactively -- -- ----------------------------------------------------------------------------- module GHC.Runtime.Eval.Types ( Resume(..), ResumeBindings, IcGlobalRdrEnv(..), History(..), ExecResult(..), SingleStep(..), isStep, ExecOptions(..) ) where import GHC.Prelude import GHCi.RemoteTypes import GHCi.Message (EvalExpr, ResumeContext) import GHC.Types.Id import GHC.Types.Name import GHC.Types.TyThing import GHC.Types.Breakpoint import GHC.Types.Name.Reader import GHC.Types.SrcLoc import GHC.Utils.Exception import Data.Word import GHC.Stack.CCS data ExecOptions = ExecOptions { execSingleStep :: SingleStep -- ^ stepping mode , execSourceFile :: String -- ^ filename (for errors) , execLineNumber :: Int -- ^ line number (for errors) , execWrap :: ForeignHValue -> EvalExpr ForeignHValue } data SingleStep = RunToCompletion | SingleStep | RunAndLogSteps isStep :: SingleStep -> Bool isStep RunToCompletion = False isStep _ = True data ExecResult = ExecComplete { execResult :: Either SomeException [Name] , execAllocation :: Word64 } | ExecBreak { breakNames :: [Name] , breakPointId :: Maybe InternalBreakpointId } -- | Essentially a GlobalRdrEnv, but with additional cached values to allow -- efficient re-calculation when the imports change. -- Fields are strict to avoid space leaks (see T4029) -- All operations are in GHC.Runtime.Context. -- See Note [icReaderEnv recalculation] data IcGlobalRdrEnv = IcGlobalRdrEnv { igre_env :: !GlobalRdrEnv -- ^ The final environment , igre_prompt_env :: !GlobalRdrEnv -- ^ Just the things defined at the prompt (excluding imports!) } data Resume = Resume { resumeStmt :: String -- the original statement , resumeContext :: ForeignRef (ResumeContext [HValueRef]) , resumeBindings :: ResumeBindings , resumeFinalIds :: [Id] -- [Id] to bind on completion , resumeApStack :: ForeignHValue -- The object from which we can get -- value of the free variables. , resumeBreakpointId :: Maybe InternalBreakpointId -- ^ the breakpoint we stopped at -- (Nothing <=> exception) , resumeSpan :: SrcSpan -- just a copy of the SrcSpan -- from the ModBreaks, -- otherwise it's a pain to -- fetch the ModDetails & -- ModBreaks to get this. , resumeDecl :: String -- ditto , resumeCCS :: RemotePtr CostCentreStack , resumeHistory :: [History] , resumeHistoryIx :: Int -- 0 <==> at the top of the history } type ResumeBindings = ([TyThing], IcGlobalRdrEnv) data History = History { historyApStack :: ForeignHValue , historyBreakpointId :: InternalBreakpointId -- ^ breakpoint identifier , historyEnclosingDecls :: [String] -- ^ declarations enclosing the breakpoint } ghc-lib-parser-9.12.2.20250421/compiler/GHC/Runtime/Heap/0000755000000000000000000000000007346545000020206 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Runtime/Heap/Layout.hs0000644000000000000000000005114007346545000022020 0ustar0000000000000000-- (c) The University of Glasgow 2006 -- (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 -- -- Storage manager representation of closures {-# LANGUAGE CPP #-} {-# LANGUAGE GeneralizedNewtypeDeriving #-} module GHC.Runtime.Heap.Layout ( -- * Words and bytes WordOff, ByteOff, wordsToBytes, bytesToWordsRoundUp, roundUpToWords, roundUpTo, StgWord, fromStgWord, toStgWord, StgHalfWord, fromStgHalfWord, toStgHalfWord, halfWordSize, halfWordSizeInBits, -- * Closure representation SMRep(..), -- CmmInfo sees the rep; no one else does IsStatic, ClosureTypeInfo(..), ArgDescr(..), Liveness, ConstrDescription, -- ** Construction mkHeapRep, blackHoleRep, indStaticRep, mkStackRep, mkRTSRep, arrPtrsRep, smallArrPtrsRep, arrWordsRep, -- ** Predicates isStaticRep, isConRep, isThunkRep, isFunRep, isStaticNoCafCon, isStackRep, -- ** Size-related things heapClosureSizeW, fixedHdrSizeW, arrWordsHdrSize, arrWordsHdrSizeW, arrPtrsHdrSize, arrPtrsHdrSizeW, profHdrSize, thunkHdrSize, nonHdrSize, nonHdrSizeW, smallArrPtrsHdrSize, smallArrPtrsHdrSizeW, hdrSize, hdrSizeW, fixedHdrSize, -- ** RTS closure types rtsClosureType, rET_SMALL, rET_BIG, aRG_GEN, aRG_GEN_BIG, -- ** Arrays card, cardRoundUp, cardTableSizeB, cardTableSizeW ) where import GHC.Prelude import GHC.Types.Basic( ConTagZ ) import GHC.Platform import GHC.Platform.Profile import GHC.Utils.Outputable import GHC.Utils.Panic import Data.Word import Data.ByteString (ByteString) {- ************************************************************************ * * Words and bytes * * ************************************************************************ -} -- | Byte offset, or byte count type ByteOff = Int -- | Word offset, or word count type WordOff = Int -- | Round up the given byte count to the next byte count that's a -- multiple of the machine's word size. roundUpToWords :: Platform -> ByteOff -> ByteOff roundUpToWords platform n = roundUpTo n (platformWordSizeInBytes platform) -- | Round up @base@ to a multiple of @size@. roundUpTo :: ByteOff -> ByteOff -> ByteOff roundUpTo base size = (base + (size - 1)) .&. (complement (size - 1)) -- | Convert the given number of words to a number of bytes. -- -- This function morally has type @WordOff -> ByteOff@, but uses @Num -- a@ to allow for overloading. wordsToBytes :: Num a => Platform -> a -> a wordsToBytes platform n = fromIntegral (platformWordSizeInBytes platform) * n {-# SPECIALIZE wordsToBytes :: Platform -> Int -> Int #-} {-# SPECIALIZE wordsToBytes :: Platform -> Word -> Word #-} {-# SPECIALIZE wordsToBytes :: Platform -> Integer -> Integer #-} -- | First round the given byte count up to a multiple of the -- machine's word size and then convert the result to words. bytesToWordsRoundUp :: Platform -> ByteOff -> WordOff bytesToWordsRoundUp platform n = (n + word_size - 1) `quot` word_size where word_size = platformWordSizeInBytes platform -- StgWord is a type representing an StgWord on the target platform. -- A Word64 is large enough to hold a Word for either a 32bit or 64bit platform newtype StgWord = StgWord Word64 deriving (Eq, Bits) fromStgWord :: StgWord -> Integer fromStgWord (StgWord i) = toInteger i toStgWord :: Platform -> Integer -> StgWord toStgWord platform i = case platformWordSize platform of -- These conversions mean that things like toStgWord (-1) -- do the right thing PW4 -> StgWord (fromIntegral (fromInteger i :: Word32)) PW8 -> StgWord (fromInteger i) instance Outputable StgWord where ppr (StgWord i) = integer (toInteger i) -- -- A Word32 is large enough to hold half a Word for either a 32bit or -- 64bit platform newtype StgHalfWord = StgHalfWord Word32 deriving Eq fromStgHalfWord :: StgHalfWord -> Integer fromStgHalfWord (StgHalfWord w) = toInteger w toStgHalfWord :: Platform -> Integer -> StgHalfWord toStgHalfWord platform i = case platformWordSize platform of -- These conversions mean that things like toStgHalfWord (-1) -- do the right thing PW4 -> StgHalfWord (fromIntegral (fromInteger i :: Word16)) PW8 -> StgHalfWord (fromInteger i :: Word32) instance Outputable StgHalfWord where ppr (StgHalfWord w) = integer (toInteger w) -- | Half word size in bytes halfWordSize :: Platform -> ByteOff halfWordSize platform = platformWordSizeInBytes platform `div` 2 halfWordSizeInBits :: Platform -> Int halfWordSizeInBits platform = platformWordSizeInBits platform `div` 2 {- ************************************************************************ * * \subsubsection[SMRep-datatype]{@SMRep@---storage manager representation} * * ************************************************************************ -} -- | A description of the layout of a closure. Corresponds directly -- to the closure types in includes\/rts\/storage\/ClosureTypes.h. data SMRep = HeapRep -- GC routines consult sizes in info tbl IsStatic !WordOff -- # ptr words !WordOff -- # non-ptr words INCLUDING SLOP (see mkHeapRep below) ClosureTypeInfo -- type-specific info | ArrayPtrsRep !WordOff -- # ptr words !WordOff -- # card table words | SmallArrayPtrsRep !WordOff -- # ptr words | ArrayWordsRep !WordOff -- # bytes expressed in words, rounded up | StackRep -- Stack frame (RET_SMALL or RET_BIG) Liveness | RTSRep -- The RTS needs to declare info tables with specific Int -- type tags, so this form lets us override the default SMRep -- tag for an SMRep. deriving (Eq, Ord) -- | True \<=> This is a static closure. Affects how we garbage-collect it. -- Static closure have an extra static link field at the end. -- Constructors do not have a static variant; see Note [static constructors] type IsStatic = Bool -- From an SMRep you can get to the closure type defined in -- rts/include/rts/storage/ClosureTypes.h. Described by the function -- rtsClosureType below. data ClosureTypeInfo = Constr ConTagZ ConstrDescription | Fun FunArity ArgDescr | Thunk | ThunkSelector SelectorOffset | BlackHole | IndStatic deriving (Eq, Ord) type ConstrDescription = ByteString -- result of dataConIdentity type FunArity = Int type SelectorOffset = Int -- | We represent liveness bitmaps as a Bitmap (whose internal representation -- really is a bitmap). These are pinned onto case return vectors to indicate -- the state of the stack for the garbage collector. -- -- In the compiled program, liveness bitmaps that fit inside a single word -- (StgWord) are stored as a single word, while larger bitmaps are stored as a -- pointer to an array of words. type Liveness = [Bool] -- One Bool per word; True <=> non-ptr or dead -- False <=> ptr -------------------------------------------------------------------------------- -- | An ArgDescr describes the argument pattern of a function data ArgDescr = ArgSpec -- Fits one of the standard patterns !Int -- RTS type identifier ARG_P, ARG_N, ... | ArgGen -- General case Liveness -- Details about the arguments | ArgUnknown -- For imported binds. -- Invariant: Never Unknown for binds of the module -- we are compiling. deriving (Eq, Ord) instance Outputable ArgDescr where ppr (ArgSpec n) = text "ArgSpec" <+> ppr n ppr (ArgGen ls) = text "ArgGen" <+> ppr ls ppr ArgUnknown = text "ArgUnknown" ----------------------------------------------------------------------------- -- Construction mkHeapRep :: Profile -> IsStatic -> WordOff -> WordOff -> ClosureTypeInfo -> SMRep mkHeapRep profile is_static ptr_wds nonptr_wds cl_type_info = HeapRep is_static ptr_wds (nonptr_wds + slop_wds) cl_type_info where slop_wds | is_static = 0 | otherwise = max 0 (minClosureSize profile - (hdr_size + payload_size)) hdr_size = closureTypeHdrSize profile cl_type_info payload_size = ptr_wds + nonptr_wds mkRTSRep :: Int -> SMRep -> SMRep mkRTSRep = RTSRep mkStackRep :: [Bool] -> SMRep mkStackRep liveness = StackRep liveness blackHoleRep :: SMRep blackHoleRep = HeapRep False 0 0 BlackHole indStaticRep :: SMRep indStaticRep = HeapRep True 1 0 IndStatic arrPtrsRep :: Platform -> WordOff -> SMRep arrPtrsRep platform elems = ArrayPtrsRep elems (cardTableSizeW platform elems) smallArrPtrsRep :: WordOff -> SMRep smallArrPtrsRep elems = SmallArrayPtrsRep elems arrWordsRep :: Platform -> ByteOff -> SMRep arrWordsRep platform bytes = ArrayWordsRep (bytesToWordsRoundUp platform bytes) ----------------------------------------------------------------------------- -- Predicates isStaticRep :: SMRep -> IsStatic isStaticRep (HeapRep is_static _ _ _) = is_static isStaticRep (RTSRep _ rep) = isStaticRep rep isStaticRep _ = False isStackRep :: SMRep -> Bool isStackRep StackRep{} = True isStackRep (RTSRep _ rep) = isStackRep rep isStackRep _ = False isConRep :: SMRep -> Bool isConRep (HeapRep _ _ _ Constr{}) = True isConRep _ = False isThunkRep :: SMRep -> Bool isThunkRep (HeapRep _ _ _ Thunk) = True isThunkRep (HeapRep _ _ _ ThunkSelector{}) = True isThunkRep (HeapRep _ _ _ BlackHole) = True isThunkRep (HeapRep _ _ _ IndStatic) = True isThunkRep _ = False isFunRep :: SMRep -> Bool isFunRep (HeapRep _ _ _ Fun{}) = True isFunRep _ = False isStaticNoCafCon :: SMRep -> Bool -- This should line up exactly with CONSTR_NOCAF below -- See Note [Static NoCaf constructors] isStaticNoCafCon (HeapRep _ 0 _ Constr{}) = True isStaticNoCafCon _ = False ----------------------------------------------------------------------------- -- Size-related things fixedHdrSize :: Profile -> ByteOff fixedHdrSize profile = wordsToBytes (profilePlatform profile) (fixedHdrSizeW profile) -- | Size of a closure header (StgHeader in includes\/rts\/storage\/Closures.h) fixedHdrSizeW :: Profile -> WordOff fixedHdrSizeW profile = pc_STD_HDR_SIZE (profileConstants profile) + profHdrSize profile -- | Size of the profiling part of a closure header -- (StgProfHeader in includes\/rts\/storage\/Closures.h) profHdrSize :: Profile -> WordOff profHdrSize profile = if profileIsProfiling profile then pc_PROF_HDR_SIZE (profileConstants profile) else 0 -- | The garbage collector requires that every closure is at least as -- big as this. minClosureSize :: Profile -> WordOff minClosureSize profile = fixedHdrSizeW profile + pc_MIN_PAYLOAD_SIZE (profileConstants profile) arrWordsHdrSize :: Profile -> ByteOff arrWordsHdrSize profile = fixedHdrSize profile + pc_SIZEOF_StgArrBytes_NoHdr (profileConstants profile) arrWordsHdrSizeW :: Profile -> WordOff arrWordsHdrSizeW profile = fixedHdrSizeW profile + (pc_SIZEOF_StgArrBytes_NoHdr (profileConstants profile) `quot` platformWordSizeInBytes (profilePlatform profile)) arrPtrsHdrSize :: Profile -> ByteOff arrPtrsHdrSize profile = fixedHdrSize profile + pc_SIZEOF_StgMutArrPtrs_NoHdr (profileConstants profile) arrPtrsHdrSizeW :: Profile -> WordOff arrPtrsHdrSizeW profile = fixedHdrSizeW profile + (pc_SIZEOF_StgMutArrPtrs_NoHdr (profileConstants profile) `quot` platformWordSizeInBytes (profilePlatform profile)) smallArrPtrsHdrSize :: Profile -> ByteOff smallArrPtrsHdrSize profile = fixedHdrSize profile + pc_SIZEOF_StgSmallMutArrPtrs_NoHdr (profileConstants profile) smallArrPtrsHdrSizeW :: Profile -> WordOff smallArrPtrsHdrSizeW profile = fixedHdrSizeW profile + (pc_SIZEOF_StgSmallMutArrPtrs_NoHdr (profileConstants profile) `quot` platformWordSizeInBytes (profilePlatform profile)) -- Thunks have an extra header word on SMP, so the update doesn't -- splat the payload. thunkHdrSize :: Profile -> WordOff thunkHdrSize profile = fixedHdrSizeW profile + smp_hdr where platform = profilePlatform profile smp_hdr = pc_SIZEOF_StgSMPThunkHeader (platformConstants platform) `quot` platformWordSizeInBytes platform hdrSize :: Profile -> SMRep -> ByteOff hdrSize profile rep = wordsToBytes (profilePlatform profile) (hdrSizeW profile rep) hdrSizeW :: Profile -> SMRep -> WordOff hdrSizeW profile (HeapRep _ _ _ ty) = closureTypeHdrSize profile ty hdrSizeW profile (ArrayPtrsRep _ _) = arrPtrsHdrSizeW profile hdrSizeW profile (SmallArrayPtrsRep _) = smallArrPtrsHdrSizeW profile hdrSizeW profile (ArrayWordsRep _) = arrWordsHdrSizeW profile hdrSizeW _ _ = panic "GHC.Runtime.Heap.Layout.hdrSizeW" nonHdrSize :: Platform -> SMRep -> ByteOff nonHdrSize platform rep = wordsToBytes platform (nonHdrSizeW rep) nonHdrSizeW :: SMRep -> WordOff nonHdrSizeW (HeapRep _ p np _) = p + np nonHdrSizeW (ArrayPtrsRep elems ct) = elems + ct nonHdrSizeW (SmallArrayPtrsRep elems) = elems nonHdrSizeW (ArrayWordsRep words) = words nonHdrSizeW (StackRep bs) = length bs nonHdrSizeW (RTSRep _ rep) = nonHdrSizeW rep -- | The total size of the closure, in words. heapClosureSizeW :: Profile -> SMRep -> WordOff heapClosureSizeW profile rep = case rep of HeapRep _ p np ty -> closureTypeHdrSize profile ty + p + np ArrayPtrsRep elems ct -> arrPtrsHdrSizeW profile + elems + ct SmallArrayPtrsRep elems -> smallArrPtrsHdrSizeW profile + elems ArrayWordsRep words -> arrWordsHdrSizeW profile + words _ -> panic "GHC.Runtime.Heap.Layout.heapClosureSize" closureTypeHdrSize :: Profile -> ClosureTypeInfo -> WordOff closureTypeHdrSize profile ty = case ty of Thunk -> thunkHdrSize profile ThunkSelector{} -> thunkHdrSize profile BlackHole -> thunkHdrSize profile IndStatic -> thunkHdrSize profile _ -> fixedHdrSizeW profile -- All thunks use thunkHdrSize, even if they are non-updatable. -- this is because we don't have separate closure types for -- updatable vs. non-updatable thunks, so the GC can't tell the -- difference. If we ever have significant numbers of non- -- updatable thunks, it might be worth fixing this. -- --------------------------------------------------------------------------- -- Arrays -- | The byte offset into the card table of the card for a given element card :: Platform -> Int -> Int card platform i = i `shiftR` pc_MUT_ARR_PTRS_CARD_BITS (platformConstants platform) -- | Convert a number of elements to a number of cards, rounding up cardRoundUp :: Platform -> Int -> Int cardRoundUp platform i = card platform (i + ((1 `shiftL` pc_MUT_ARR_PTRS_CARD_BITS (platformConstants platform)) - 1)) -- | The size of a card table, in bytes cardTableSizeB :: Platform -> Int -> ByteOff cardTableSizeB platform elems = cardRoundUp platform elems -- | The size of a card table, in words cardTableSizeW :: Platform -> Int -> WordOff cardTableSizeW platform elems = bytesToWordsRoundUp platform (cardTableSizeB platform elems) ----------------------------------------------------------------------------- -- deriving the RTS closure type from an SMRep #include "ClosureTypes.h" #include "FunTypes.h" -- Defines CONSTR, CONSTR_1_0 etc -- | Derives the RTS closure type from an 'SMRep' rtsClosureType :: SMRep -> Int rtsClosureType rep = case rep of RTSRep ty _ -> ty -- See Note [static constructors] HeapRep _ 1 0 Constr{} -> CONSTR_1_0 HeapRep _ 0 1 Constr{} -> CONSTR_0_1 HeapRep _ 2 0 Constr{} -> CONSTR_2_0 HeapRep _ 1 1 Constr{} -> CONSTR_1_1 HeapRep _ 0 2 Constr{} -> CONSTR_0_2 HeapRep _ 0 _ Constr{} -> CONSTR_NOCAF -- See Note [Static NoCaf constructors] HeapRep _ _ _ Constr{} -> CONSTR HeapRep False 1 0 Fun{} -> FUN_1_0 HeapRep False 0 1 Fun{} -> FUN_0_1 HeapRep False 2 0 Fun{} -> FUN_2_0 HeapRep False 1 1 Fun{} -> FUN_1_1 HeapRep False 0 2 Fun{} -> FUN_0_2 HeapRep False _ _ Fun{} -> FUN HeapRep False 1 0 Thunk -> THUNK_1_0 HeapRep False 0 1 Thunk -> THUNK_0_1 HeapRep False 2 0 Thunk -> THUNK_2_0 HeapRep False 1 1 Thunk -> THUNK_1_1 HeapRep False 0 2 Thunk -> THUNK_0_2 HeapRep False _ _ Thunk -> THUNK HeapRep False _ _ ThunkSelector{} -> THUNK_SELECTOR HeapRep True _ _ Fun{} -> FUN_STATIC HeapRep True _ _ Thunk -> THUNK_STATIC HeapRep False _ _ BlackHole -> BLACKHOLE HeapRep False _ _ IndStatic -> IND_STATIC StackRep _ -> STACK _ -> panic "rtsClosureType" -- We export these ones rET_SMALL, rET_BIG, aRG_GEN, aRG_GEN_BIG :: Int rET_SMALL = RET_SMALL rET_BIG = RET_BIG aRG_GEN = ARG_GEN aRG_GEN_BIG = ARG_GEN_BIG {- Note [static constructors] ~~~~~~~~~~~~~~~~~~~~~~~~~~ We used to have a CONSTR_STATIC closure type, and each constructor had two info tables: one with CONSTR (or CONSTR_1_0 etc.), and one with CONSTR_STATIC. This distinction was removed, because when copying a data structure into a compact region, we must copy static constructors into the compact region too. If we didn't do this, we would need to track the references from the compact region out to the static constructors, because they might (indirectly) refer to CAFs. Since static constructors will be copied to the heap, if we wanted to use different info tables for static and dynamic constructors, we would have to switch the info pointer when copying the constructor into the compact region, which means we would need an extra field of the static info table to point to the dynamic one. However, since the distinction between static and dynamic closure types is never actually needed (other than for assertions), we can just drop the distinction and use the same info table for both. The GC *does* need to distinguish between static and dynamic closures, but it does this using the HEAP_ALLOCED() macro which checks whether the address of the closure resides within the dynamic heap. HEAP_ALLOCED() doesn't read the closure's info table. Note [Static NoCaf constructors] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If we know that a top-level binding 'x' is not Caffy (ie no CAFs are reachable from 'x'), then a statically allocated constructor (Just x) is also not Caffy, and the garbage collector need not follow its argument fields. Exploiting this would require two static info tables for Just, for the two cases where the argument was Caffy or non-Caffy. Currently we don't do this; instead we treat nullary constructors as non-Caffy, and the others as potentially Caffy. ************************************************************************ * * Pretty printing of SMRep and friends * * ************************************************************************ -} instance Outputable ClosureTypeInfo where ppr = pprTypeInfo instance Outputable SMRep where ppr (HeapRep static ps nps tyinfo) = hang (header <+> lbrace) 2 (ppr tyinfo <+> rbrace) where header = text "HeapRep" <+> if static then text "static" else empty <+> pp_n "ptrs" ps <+> pp_n "nonptrs" nps pp_n :: String -> Int -> SDoc pp_n _ 0 = empty pp_n s n = int n <+> text s ppr (ArrayPtrsRep size _) = text "ArrayPtrsRep" <+> ppr size ppr (SmallArrayPtrsRep size) = text "SmallArrayPtrsRep" <+> ppr size ppr (ArrayWordsRep words) = text "ArrayWordsRep" <+> ppr words ppr (StackRep bs) = text "StackRep" <+> ppr bs ppr (RTSRep ty rep) = text "tag:" <> ppr ty <+> ppr rep pprTypeInfo :: ClosureTypeInfo -> SDoc pprTypeInfo (Constr tag descr) = text "Con" <+> braces (sep [ text "tag:" <+> ppr tag , text "descr:" <> text (show descr) ]) pprTypeInfo (Fun arity args) = text "Fun" <+> braces (sep [ text "arity:" <+> ppr arity , text "fun_type:" <+> ppr args ]) pprTypeInfo (ThunkSelector offset) = text "ThunkSel" <+> ppr offset pprTypeInfo Thunk = text "Thunk" pprTypeInfo BlackHole = text "BlackHole" pprTypeInfo IndStatic = text "IndStatic" ghc-lib-parser-9.12.2.20250421/compiler/GHC/Runtime/Interpreter/0000755000000000000000000000000007346545000021634 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Runtime/Interpreter/Types.hs0000644000000000000000000001355607346545000023306 0ustar0000000000000000{-# LANGUAGE CPP #-} -- | Types used by the runtime interpreter module GHC.Runtime.Interpreter.Types ( Interp(..) , InterpInstance(..) , InterpProcess (..) , ExtInterp (..) , ExtInterpStatusVar , ExtInterpInstance (..) , ExtInterpState (..) , InterpStatus(..) -- * IServ , IServ , IServConfig(..) -- * JSInterp , JSInterp , JSInterpExtra (..) , JSInterpConfig (..) , JSState (..) , NodeJsSettings (..) , defaultNodeJsSettings , WasmInterp , WasmInterpConfig (..) ) where import GHC.Prelude import GHC.Linker.Types import GHCi.RemoteTypes import GHCi.Message ( Pipe ) import GHC.Types.Unique.FM import GHC.Data.FastString ( FastString ) import Foreign import GHC.Platform import GHC.Utils.TmpFs import GHC.Utils.Logger import GHC.Unit.Env import GHC.Unit.State import GHC.Unit.Types import GHC.StgToJS.Types import GHC.StgToJS.Linker.Types import Control.Concurrent import System.Process ( ProcessHandle, CreateProcess ) import System.IO import GHC.Unit.Finder.Types (FinderCache, FinderOpts) -- | Interpreter data Interp = Interp { interpInstance :: !InterpInstance -- ^ Interpreter instance (internal, external) , interpLoader :: !Loader -- ^ Interpreter loader , interpLookupSymbolCache :: !(MVar (UniqFM FastString (Ptr ()))) -- ^ LookupSymbol cache } data InterpInstance = ExternalInterp !ExtInterp -- ^ External interpreter #if defined(HAVE_INTERNAL_INTERPRETER) | InternalInterp -- ^ Internal interpreter #endif data ExtInterp = ExtIServ !IServ | ExtJS !JSInterp | ExtWasm !WasmInterp -- | External interpreter -- -- The external interpreter is spawned lazily (on first use) to avoid slowing -- down sessions that don't require it. The contents of the MVar reflects the -- state of the interpreter (running or not). data ExtInterpState cfg details = ExtInterpState { interpConfig :: !cfg , interpStatus :: !(ExtInterpStatusVar details) } type ExtInterpStatusVar d = MVar (InterpStatus (ExtInterpInstance d)) type IServ = ExtInterpState IServConfig () type JSInterp = ExtInterpState JSInterpConfig JSInterpExtra type WasmInterp = ExtInterpState WasmInterpConfig () data InterpProcess = InterpProcess { interpPipe :: !Pipe -- ^ Pipe to communicate with the server , interpHandle :: !ProcessHandle -- ^ Process handle of the server , interpLock :: !(MVar ()) -- ^ Lock to prevent concurrent access to the stream } -- | Status of an external interpreter data InterpStatus inst = InterpPending -- ^ Not spawned yet | InterpRunning !inst -- ^ Running -- | Configuration needed to spawn an external interpreter data IServConfig = IServConfig { iservConfProgram :: !String -- ^ External program to run , iservConfOpts :: ![String] -- ^ Command-line options , iservConfProfiled :: !Bool -- ^ Use Profiling way , iservConfDynamic :: !Bool -- ^ Use Dynamic way , iservConfHook :: !(Maybe (CreateProcess -> IO ProcessHandle)) -- ^ Hook , iservConfTrace :: IO () -- ^ Trace action executed after spawn } -- | Common field between native external interpreter and the JS one data ExtInterpInstance c = ExtInterpInstance { instProcess :: {-# UNPACK #-} !InterpProcess -- ^ External interpreter process and its pipe (communication channel) , instPendingFrees :: !(MVar [HValueRef]) -- ^ Values that need to be freed before the next command is sent. -- Finalizers for ForeignRefs can append values to this list -- asynchronously. , instExtra :: !c -- ^ Instance specific extra fields } ------------------------ -- JS Stuff ------------------------ data JSInterpExtra = JSInterpExtra { instStdIn :: !Handle -- ^ Stdin for the process , instFinderCache :: !FinderCache , instFinderOpts :: !FinderOpts , instJSState :: !(MVar JSState) -- ^ Mutable state , instGhciUnitId :: !UnitId -- ^ GHCi unit-id } data JSState = JSState { jsLinkState :: !LinkPlan -- ^ Linker state of the interpreter , jsServerStarted :: !Bool -- ^ Is the Haskell server started? } -- | NodeJs configuration data NodeJsSettings = NodeJsSettings { nodeProgram :: FilePath -- ^ location of node.js program , nodePath :: Maybe FilePath -- ^ value of NODE_PATH environment variable (search path for Node modules; GHCJS used to provide some) , nodeExtraArgs :: [String] -- ^ extra arguments to pass to node.js , nodeKeepAliveMaxMem :: Integer -- ^ keep node.js (TH, GHCJSi) processes alive if they don't use more than this } defaultNodeJsSettings :: NodeJsSettings defaultNodeJsSettings = NodeJsSettings { nodeProgram = "node" , nodePath = Nothing , nodeExtraArgs = [] , nodeKeepAliveMaxMem = 536870912 } data JSInterpConfig = JSInterpConfig { jsInterpNodeConfig :: !NodeJsSettings -- ^ NodeJS settings , jsInterpScript :: !FilePath -- ^ Path to "ghc-interp.js" script , jsInterpTmpFs :: !TmpFs , jsInterpTmpDir :: !TempDir , jsInterpLogger :: !Logger , jsInterpCodegenCfg :: !StgToJSConfig , jsInterpUnitEnv :: !UnitEnv , jsInterpFinderOpts :: !FinderOpts , jsInterpFinderCache :: !FinderCache } ------------------------ -- Wasm Stuff ------------------------ data WasmInterpConfig = WasmInterpConfig { wasmInterpDyLD :: !FilePath -- ^ Location of dyld.mjs script , wasmInterpLibDir :: FilePath -- ^ wasi-sdk sysroot libdir containing libc.so, etc , wasmInterpOpts :: ![String] -- ^ Additional command line arguments for iserv , wasmInterpTargetPlatform :: !Platform , wasmInterpProfiled :: !Bool -- ^ Are we profiling yet? , wasmInterpHsSoSuffix :: !String -- ^ Shared lib filename common suffix sans .so, e.g. p-ghc9.13.20241001 , wasmInterpUnitState :: !UnitState } ghc-lib-parser-9.12.2.20250421/compiler/GHC/Settings.hs0000644000000000000000000002606707346545000020055 0ustar0000000000000000 -- | Run-time settings module GHC.Settings ( Settings (..) , ToolSettings (..) , FileSettings (..) , GhcNameVersion (..) , Platform (..) , PlatformMisc (..) -- * Accessors , dynLibSuffix , sProgramName , sProjectVersion , sGhcUsagePath , sGhciUsagePath , sToolDir , sTopDir , sGlobalPackageDatabasePath , sLdSupportsCompactUnwind , sLdSupportsFilelist , sMergeObjsSupportsResponseFiles , sLdIsGnuLd , sGccSupportsNoPie , sUseInplaceMinGW , sArSupportsDashL , sPgm_L , sPgm_P , sPgm_JSP , sPgm_CmmP , sPgm_F , sPgm_c , sPgm_cxx , sPgm_cpp , sPgm_a , sPgm_l , sPgm_lm , sPgm_windres , sPgm_ar , sPgm_otool , sPgm_install_name_tool , sPgm_ranlib , sPgm_lo , sPgm_lc , sPgm_las , sPgm_i , sOpt_L , sOpt_P , sOpt_P_fingerprint , sOpt_JSP , sOpt_JSP_fingerprint , sOpt_CmmP , sOpt_CmmP_fingerprint , sOpt_F , sOpt_c , sOpt_cxx , sOpt_a , sOpt_l , sOpt_lm , sOpt_windres , sOpt_lo , sOpt_lc , sOpt_i , sExtraGccViaCFlags , sTargetPlatformString , sGhcWithInterpreter , sLibFFI , sTargetRTSLinkerOnlySupportsSharedLibs ) where import GHC.Prelude import GHC.Utils.CliOption import GHC.Utils.Fingerprint import GHC.Platform data Settings = Settings { sGhcNameVersion :: {-# UNPACk #-} !GhcNameVersion , sFileSettings :: {-# UNPACK #-} !FileSettings , sTargetPlatform :: Platform -- Filled in by SysTools , sToolSettings :: {-# UNPACK #-} !ToolSettings , sPlatformMisc :: {-# UNPACK #-} !PlatformMisc -- You shouldn't need to look things up in rawSettings directly. -- They should have their own fields instead. , sRawSettings :: [(String, String)] } -- | Settings for other executables GHC calls. -- -- Probably should further split down by phase, or split between -- platform-specific and platform-agnostic. data ToolSettings = ToolSettings { toolSettings_ldSupportsCompactUnwind :: Bool , toolSettings_ldSupportsFilelist :: Bool , toolSettings_ldSupportsSingleModule :: Bool , toolSettings_mergeObjsSupportsResponseFiles :: Bool , toolSettings_ldIsGnuLd :: Bool , toolSettings_ccSupportsNoPie :: Bool , toolSettings_useInplaceMinGW :: Bool , toolSettings_arSupportsDashL :: Bool , toolSettings_cmmCppSupportsG0 :: Bool -- commands for particular phases , toolSettings_pgm_L :: String , -- | The Haskell C preprocessor and default options (not added by -optP) toolSettings_pgm_P :: (String, [Option]) , -- | The JavaScript C preprocessor and default options (not added by -optP) toolSettings_pgm_JSP :: (String, [Option]) , -- | The C-- C Preprocessor and default options (not added by -optP) toolSettings_pgm_CmmP :: (String, [Option]) , toolSettings_pgm_F :: String , toolSettings_pgm_c :: String , toolSettings_pgm_cxx :: String , -- | The C preprocessor (distinct from the Haskell C preprocessor!) toolSettings_pgm_cpp :: (String, [Option]) , toolSettings_pgm_a :: (String, [Option]) , toolSettings_pgm_l :: (String, [Option]) , toolSettings_pgm_lm :: Maybe (String, [Option]) -- ^ N.B. On Windows we don't have a linker which supports object -- merging, hence the 'Maybe'. See Note [Object merging] in -- "GHC.Driver.Pipeline.Execute" for details. , toolSettings_pgm_windres :: String , toolSettings_pgm_ar :: String , toolSettings_pgm_otool :: String , toolSettings_pgm_install_name_tool :: String , toolSettings_pgm_ranlib :: String , -- | LLVM: opt llvm optimiser toolSettings_pgm_lo :: (String, [Option]) , -- | LLVM: llc static compiler toolSettings_pgm_lc :: (String, [Option]) -- | LLVM: assembler , toolSettings_pgm_las :: (String, [Option]) , toolSettings_pgm_i :: String -- options for particular phases , toolSettings_opt_L :: [String] , toolSettings_opt_P :: [String] , toolSettings_opt_JSP :: [String] , toolSettings_opt_CmmP :: [String] , -- | cached Fingerprint of sOpt_P -- See Note [Repeated -optP hashing] toolSettings_opt_P_fingerprint :: Fingerprint , -- | cached Fingerprint of sOpt_JSP -- See Note [Repeated -optP hashing] toolSettings_opt_JSP_fingerprint :: Fingerprint , -- | cached Fingerprint of sOpt_CmmP -- See Note [Repeated -optP hashing] toolSettings_opt_CmmP_fingerprint :: Fingerprint , toolSettings_opt_F :: [String] , toolSettings_opt_c :: [String] , toolSettings_opt_cxx :: [String] , toolSettings_opt_a :: [String] , toolSettings_opt_l :: [String] , toolSettings_opt_lm :: [String] , toolSettings_opt_windres :: [String] , -- | LLVM: llvm optimiser toolSettings_opt_lo :: [String] , -- | LLVM: llc static compiler toolSettings_opt_lc :: [String] , toolSettings_opt_las :: [String] , -- | iserv options toolSettings_opt_i :: [String] , toolSettings_extraGccViaCFlags :: [String] } -- | Paths to various files and directories used by GHC, including those that -- provide more settings. data FileSettings = FileSettings { fileSettings_ghcUsagePath :: FilePath -- ditto , fileSettings_ghciUsagePath :: FilePath -- ditto , fileSettings_toolDir :: Maybe FilePath -- ditto , fileSettings_topDir :: FilePath -- ditto , fileSettings_globalPackageDatabase :: FilePath } -- | Settings for what GHC this is. data GhcNameVersion = GhcNameVersion { ghcNameVersion_programName :: String , ghcNameVersion_projectVersion :: String } -- | Dynamic library suffix dynLibSuffix :: GhcNameVersion -> String dynLibSuffix (GhcNameVersion name ver) = '-':name ++ ver ----------------------------------------------------------------------------- -- Accessors from 'Settings' sProgramName :: Settings -> String sProgramName = ghcNameVersion_programName . sGhcNameVersion sProjectVersion :: Settings -> String sProjectVersion = ghcNameVersion_projectVersion . sGhcNameVersion sGhcUsagePath :: Settings -> FilePath sGhcUsagePath = fileSettings_ghcUsagePath . sFileSettings sGhciUsagePath :: Settings -> FilePath sGhciUsagePath = fileSettings_ghciUsagePath . sFileSettings sToolDir :: Settings -> Maybe FilePath sToolDir = fileSettings_toolDir . sFileSettings sTopDir :: Settings -> FilePath sTopDir = fileSettings_topDir . sFileSettings sGlobalPackageDatabasePath :: Settings -> FilePath sGlobalPackageDatabasePath = fileSettings_globalPackageDatabase . sFileSettings sLdSupportsCompactUnwind :: Settings -> Bool sLdSupportsCompactUnwind = toolSettings_ldSupportsCompactUnwind . sToolSettings sLdSupportsFilelist :: Settings -> Bool sLdSupportsFilelist = toolSettings_ldSupportsFilelist . sToolSettings sMergeObjsSupportsResponseFiles :: Settings -> Bool sMergeObjsSupportsResponseFiles = toolSettings_mergeObjsSupportsResponseFiles . sToolSettings sLdIsGnuLd :: Settings -> Bool sLdIsGnuLd = toolSettings_ldIsGnuLd . sToolSettings sGccSupportsNoPie :: Settings -> Bool sGccSupportsNoPie = toolSettings_ccSupportsNoPie . sToolSettings sUseInplaceMinGW :: Settings -> Bool sUseInplaceMinGW = toolSettings_useInplaceMinGW . sToolSettings sArSupportsDashL :: Settings -> Bool sArSupportsDashL = toolSettings_arSupportsDashL . sToolSettings sPgm_L :: Settings -> String sPgm_L = toolSettings_pgm_L . sToolSettings sPgm_P :: Settings -> (String, [Option]) sPgm_P = toolSettings_pgm_P . sToolSettings sPgm_JSP :: Settings -> (String, [Option]) sPgm_JSP = toolSettings_pgm_JSP . sToolSettings sPgm_CmmP :: Settings -> (String, [Option]) sPgm_CmmP = toolSettings_pgm_CmmP . sToolSettings sPgm_F :: Settings -> String sPgm_F = toolSettings_pgm_F . sToolSettings sPgm_c :: Settings -> String sPgm_c = toolSettings_pgm_c . sToolSettings sPgm_cxx :: Settings -> String sPgm_cxx = toolSettings_pgm_cxx . sToolSettings sPgm_cpp :: Settings -> (String, [Option]) sPgm_cpp = toolSettings_pgm_cpp . sToolSettings sPgm_a :: Settings -> (String, [Option]) sPgm_a = toolSettings_pgm_a . sToolSettings sPgm_l :: Settings -> (String, [Option]) sPgm_l = toolSettings_pgm_l . sToolSettings sPgm_lm :: Settings -> Maybe (String, [Option]) sPgm_lm = toolSettings_pgm_lm . sToolSettings sPgm_windres :: Settings -> String sPgm_windres = toolSettings_pgm_windres . sToolSettings sPgm_ar :: Settings -> String sPgm_ar = toolSettings_pgm_ar . sToolSettings sPgm_otool :: Settings -> String sPgm_otool = toolSettings_pgm_otool . sToolSettings sPgm_install_name_tool :: Settings -> String sPgm_install_name_tool = toolSettings_pgm_install_name_tool . sToolSettings sPgm_ranlib :: Settings -> String sPgm_ranlib = toolSettings_pgm_ranlib . sToolSettings sPgm_lo :: Settings -> (String, [Option]) sPgm_lo = toolSettings_pgm_lo . sToolSettings sPgm_lc :: Settings -> (String, [Option]) sPgm_lc = toolSettings_pgm_lc . sToolSettings sPgm_las :: Settings -> (String, [Option]) sPgm_las = toolSettings_pgm_las . sToolSettings sPgm_i :: Settings -> String sPgm_i = toolSettings_pgm_i . sToolSettings sOpt_L :: Settings -> [String] sOpt_L = toolSettings_opt_L . sToolSettings sOpt_P :: Settings -> [String] sOpt_P = toolSettings_opt_P . sToolSettings sOpt_P_fingerprint :: Settings -> Fingerprint sOpt_P_fingerprint = toolSettings_opt_P_fingerprint . sToolSettings sOpt_JSP :: Settings -> [String] sOpt_JSP = toolSettings_opt_JSP . sToolSettings sOpt_JSP_fingerprint :: Settings -> Fingerprint sOpt_JSP_fingerprint = toolSettings_opt_JSP_fingerprint . sToolSettings sOpt_CmmP :: Settings -> [String] sOpt_CmmP = toolSettings_opt_CmmP . sToolSettings sOpt_CmmP_fingerprint :: Settings -> Fingerprint sOpt_CmmP_fingerprint = toolSettings_opt_CmmP_fingerprint . sToolSettings sOpt_F :: Settings -> [String] sOpt_F = toolSettings_opt_F . sToolSettings sOpt_c :: Settings -> [String] sOpt_c = toolSettings_opt_c . sToolSettings sOpt_cxx :: Settings -> [String] sOpt_cxx = toolSettings_opt_cxx . sToolSettings sOpt_a :: Settings -> [String] sOpt_a = toolSettings_opt_a . sToolSettings sOpt_l :: Settings -> [String] sOpt_l = toolSettings_opt_l . sToolSettings sOpt_lm :: Settings -> [String] sOpt_lm = toolSettings_opt_lm . sToolSettings sOpt_windres :: Settings -> [String] sOpt_windres = toolSettings_opt_windres . sToolSettings sOpt_lo :: Settings -> [String] sOpt_lo = toolSettings_opt_lo . sToolSettings sOpt_lc :: Settings -> [String] sOpt_lc = toolSettings_opt_lc . sToolSettings sOpt_i :: Settings -> [String] sOpt_i = toolSettings_opt_i . sToolSettings sExtraGccViaCFlags :: Settings -> [String] sExtraGccViaCFlags = toolSettings_extraGccViaCFlags . sToolSettings sTargetPlatformString :: Settings -> String sTargetPlatformString = platformMisc_targetPlatformString . sPlatformMisc sGhcWithInterpreter :: Settings -> Bool sGhcWithInterpreter = platformMisc_ghcWithInterpreter . sPlatformMisc sLibFFI :: Settings -> Bool sLibFFI = platformMisc_libFFI . sPlatformMisc sTargetRTSLinkerOnlySupportsSharedLibs :: Settings -> Bool sTargetRTSLinkerOnlySupportsSharedLibs = platformMisc_targetRTSLinkerOnlySupportsSharedLibs . sPlatformMisc ghc-lib-parser-9.12.2.20250421/compiler/GHC/Settings/0000755000000000000000000000000007346545000017506 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Settings/Constants.hs0000644000000000000000000000355107346545000022022 0ustar0000000000000000-- | Compile-time settings module GHC.Settings.Constants where import GHC.Prelude import GHC.Settings.Config hiVersion :: Integer hiVersion = read (cProjectVersionInt ++ cProjectPatchLevel) :: Integer -- All pretty arbitrary: mAX_TUPLE_SIZE :: Int mAX_TUPLE_SIZE = 64 -- Should really match the number -- of decls in GHC.Tuple mAX_CTUPLE_SIZE :: Int -- Constraint tuples mAX_CTUPLE_SIZE = 64 -- Should match the number of decls in GHC.Classes mAX_SUM_SIZE :: Int -- We use 6 bits to record sum size, mAX_SUM_SIZE = 63 -- so max sum size is 63. Sadly inconsistent. -- | Default maximum depth for both class instance search and type family -- reduction. See also #5395. mAX_REDUCTION_DEPTH :: Int mAX_REDUCTION_DEPTH = 200 -- | Default maximum constraint-solver iterations -- Typically there should be very few mAX_SOLVER_ITERATIONS :: Int mAX_SOLVER_ITERATIONS = 4 -- | In case of loopy quantified constraints constraints, -- how many times should we allow superclass expansions -- Should be less than mAX_SOLVER_ITERATIONS -- See Note [Expanding Recursive Superclasses and ExpansionFuel] mAX_QC_FUEL :: Int mAX_QC_FUEL = 3 -- | In case of loopy wanted constraints, -- how many times should we allow superclass expansions -- Should be less than mAX_GIVENS_FUEL -- See Note [Expanding Recursive Superclasses and ExpansionFuel] mAX_WANTEDS_FUEL :: Int mAX_WANTEDS_FUEL = 1 -- | In case of loopy given constraints, -- how many times should we allow superclass expansions -- Should be less than max_SOLVER_ITERATIONS -- See Note [Expanding Recursive Superclasses and ExpansionFuel] mAX_GIVENS_FUEL :: Int mAX_GIVENS_FUEL = 3 wORD64_SIZE :: Int wORD64_SIZE = 8 -- Size of float in bytes. fLOAT_SIZE :: Int fLOAT_SIZE = 4 -- Size of double in bytes. dOUBLE_SIZE :: Int dOUBLE_SIZE = 8 tARGET_MAX_CHAR :: Int tARGET_MAX_CHAR = 0x10ffff ghc-lib-parser-9.12.2.20250421/compiler/GHC/Stg/InferTags/0000755000000000000000000000000007346545000020325 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Stg/InferTags/TagSig.hs0000644000000000000000000000522707346545000022045 0ustar0000000000000000{-# LANGUAGE TypeFamilies, DataKinds, GADTs, FlexibleInstances #-} {-# LANGUAGE CPP #-} {-# LANGUAGE ConstraintKinds #-} -- We export this type from this module instead of GHC.Stg.InferTags.Types -- because it's used by more than the analysis itself. For example in interface -- files where we record a tag signature for bindings. -- By putting the sig into its own module we can avoid module loops. module GHC.Stg.InferTags.TagSig where import GHC.Prelude import GHC.Types.Var import GHC.Types.Name.Env( NameEnv ) import GHC.Utils.Outputable import GHC.Utils.Binary import GHC.Utils.Panic.Plain import Data.Coerce -- | Information to be exposed in interface files which is produced -- by the stg2stg passes. type StgCgInfos = NameEnv TagSig newtype TagSig -- The signature for each binding, this is a newtype as we might -- want to track more information in the future. = TagSig TagInfo deriving (Eq) data TagInfo = TagDunno -- We don't know anything about the tag. | TagTuple [TagInfo] -- Represents a function/thunk which when evaluated -- will return a Unboxed tuple whos components have -- the given TagInfos. | TagProper -- Heap pointer to properly-tagged value | TagTagged -- Bottom of the domain. deriving (Eq) instance Outputable TagInfo where ppr TagTagged = text "TagTagged" ppr TagDunno = text "TagDunno" ppr TagProper = text "TagProper" ppr (TagTuple tis) = text "TagTuple" <> brackets (pprWithCommas ppr tis) instance Binary TagInfo where put_ bh TagDunno = putByte bh 1 put_ bh (TagTuple flds) = putByte bh 2 >> put_ bh flds put_ bh TagProper = putByte bh 3 put_ bh TagTagged = putByte bh 4 get bh = do tag <- getByte bh case tag of 1 -> return TagDunno 2 -> TagTuple <$> get bh 3 -> return TagProper 4 -> return TagTagged _ -> panic ("get TagInfo " ++ show tag) instance Outputable TagSig where ppr (TagSig ti) = char '<' <> ppr ti <> char '>' instance OutputableBndr (Id,TagSig) where pprInfixOcc = ppr pprPrefixOcc = ppr instance Binary TagSig where put_ bh (TagSig sig) = put_ bh sig get bh = pure TagSig <*> get bh isTaggedSig :: TagSig -> Bool isTaggedSig (TagSig TagProper) = True isTaggedSig (TagSig TagTagged) = True isTaggedSig _ = False seqTagSig :: TagSig -> () seqTagSig = coerce seqTagInfo seqTagInfo :: TagInfo -> () seqTagInfo TagTagged = () seqTagInfo TagDunno = () seqTagInfo TagProper = () seqTagInfo (TagTuple tis) = foldl' (\_unit sig -> seqTagSig (coerce sig)) () tis ghc-lib-parser-9.12.2.20250421/compiler/GHC/Stg/Lift/0000755000000000000000000000000007346545000017341 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Stg/Lift/Types.hs0000644000000000000000000000530707346545000021006 0ustar0000000000000000{-# LANGUAGE TypeFamilies, DataKinds, GADTs, FlexibleInstances #-} {-# LANGUAGE CPP #-} {-# LANGUAGE ConstraintKinds #-} -- This module declares some basic types used by GHC.Stg.Lift -- We can import this module into GHC.Stg.Syntax, where the -- type instance declarations for BinderP etc live module GHC.Stg.Lift.Types( Skeleton(..), bothSk, altSk, rhsSk, BinderInfo(..), binderInfoBndr, binderInfoOccursAsArg ) where import GHC.Prelude import GHC.Types.Id import GHC.Types.Demand import GHC.Types.Var.Set import GHC.Utils.Outputable -- | Captures details of the syntax tree relevant to the cost model, such as -- closures, multi-shot lambdas and case expressions. data Skeleton = ClosureSk !Id !DIdSet {- ^ free vars -} !Skeleton | RhsSk !Card {- ^ how often the RHS was entered -} !Skeleton | AltSk !Skeleton !Skeleton | BothSk !Skeleton !Skeleton | NilSk bothSk :: Skeleton -> Skeleton -> Skeleton bothSk NilSk b = b bothSk a NilSk = a bothSk a b = BothSk a b altSk :: Skeleton -> Skeleton -> Skeleton altSk NilSk b = b altSk a NilSk = a altSk a b = AltSk a b rhsSk :: Card -> Skeleton -> Skeleton rhsSk _ NilSk = NilSk rhsSk body_dmd skel = RhsSk body_dmd skel -- | The type used in binder positions in 'GenStgExpr's. data BinderInfo = BindsClosure !Id !Bool -- ^ Let(-no-escape)-bound thing with a flag -- indicating whether it occurs as an argument -- or in a nullary application -- (see "GHC.Stg.Lift.Analysis#arg_occs"). | BoringBinder !Id -- ^ Every other kind of binder -- | Gets the bound 'Id' out a 'BinderInfo'. binderInfoBndr :: BinderInfo -> Id binderInfoBndr (BoringBinder bndr) = bndr binderInfoBndr (BindsClosure bndr _) = bndr -- | Returns 'Nothing' for 'BoringBinder's and 'Just' the flag indicating -- occurrences as argument or in a nullary applications otherwise. binderInfoOccursAsArg :: BinderInfo -> Maybe Bool binderInfoOccursAsArg BoringBinder{} = Nothing binderInfoOccursAsArg (BindsClosure _ b) = Just b instance Outputable Skeleton where ppr NilSk = text "" ppr (AltSk l r) = vcat [ text "{ " <+> ppr l , text "ALT" , text " " <+> ppr r , text "}" ] ppr (BothSk l r) = ppr l $$ ppr r ppr (ClosureSk f fvs body) = ppr f <+> ppr fvs $$ nest 2 (ppr body) ppr (RhsSk card body) = hcat [ lambda , ppr card , dot , ppr body ] instance Outputable BinderInfo where ppr = ppr . binderInfoBndr instance OutputableBndr BinderInfo where pprBndr b = pprBndr b . binderInfoBndr pprPrefixOcc = pprPrefixOcc . binderInfoBndr pprInfixOcc = pprInfixOcc . binderInfoBndr bndrIsJoin_maybe = bndrIsJoin_maybe . binderInfoBndr ghc-lib-parser-9.12.2.20250421/compiler/GHC/Stg/0000755000000000000000000000000007346545000016443 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Stg/Syntax.hs0000644000000000000000000010313107346545000020264 0ustar0000000000000000{-# LANGUAGE ConstraintKinds #-} {-# LANGUAGE DataKinds #-} {-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE NamedFieldPuns #-} {-# LANGUAGE UndecidableInstances #-} {- (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 Shared term graph (STG) syntax for spineless-tagless code generation ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This data type represents programs just before code generation (conversion to @Cmm@): basically, what we have is a stylised form of Core syntax, the style being one that happens to be ideally suited to spineless tagless code generation. -} module GHC.Stg.Syntax ( StgArg(..), GenStgTopBinding(..), GenStgBinding(..), GenStgExpr(..), GenStgRhs(..), GenStgAlt(..), AltType(..), StgPass(..), BinderP, XRhsClosure, XLet, XLetNoEscape, NoExtFieldSilent, noExtFieldSilent, OutputablePass, UpdateFlag(..), isUpdatable, ConstructorNumber(..), -- a set of synonyms for the vanilla parameterisation StgTopBinding, StgBinding, StgExpr, StgRhs, StgAlt, -- a set of synonyms for the code gen parameterisation CgStgTopBinding, CgStgBinding, CgStgExpr, CgStgRhs, CgStgAlt, -- Same for taggedness TgStgTopBinding, TgStgBinding, TgStgExpr, TgStgRhs, TgStgAlt, -- a set of synonyms for the lambda lifting parameterisation LlStgTopBinding, LlStgBinding, LlStgExpr, LlStgRhs, LlStgAlt, -- a set of synonyms to distinguish in- and out variants InStgArg, InStgTopBinding, InStgBinding, InStgExpr, InStgRhs, InStgAlt, OutStgArg, OutStgTopBinding, OutStgBinding, OutStgExpr, OutStgRhs, OutStgAlt, -- StgOp StgOp(..), -- utils stgRhsArity, freeVarsOfRhs, stgArgType, stgArgRep, stgArgRep1, stgArgRepU, stgArgRep_maybe, stgCaseBndrInScope, -- ppr StgPprOpts(..), panicStgPprOpts, shortStgPprOpts, pprStgArg, pprStgExpr, pprStgRhs, pprStgBinding, pprStgAlt, pprGenStgTopBinding, pprStgTopBinding, pprGenStgTopBindings, pprStgTopBindings ) where import GHC.Prelude import GHC.Stg.InferTags.TagSig( TagSig ) import GHC.Stg.Lift.Types -- To avoid having an orphan instances for BinderP, XLet etc import GHC.Types.CostCentre ( CostCentreStack ) import GHC.Core ( AltCon ) import GHC.Core.DataCon import GHC.Core.TyCon ( PrimRep(..), PrimOrVoidRep(..), TyCon ) import GHC.Core.Type ( Type ) import GHC.Core.Ppr( {- instances -} ) import GHC.Types.ForeignCall ( ForeignCall ) import GHC.Types.Id import GHC.Types.Tickish ( StgTickish ) import GHC.Types.Var.Set import GHC.Types.Literal ( Literal, literalType ) import GHC.Types.RepType ( typePrimRep, typePrimRep1, typePrimRepU, typePrimRep_maybe ) import GHC.Utils.Outputable import GHC.Utils.Panic.Plain import GHC.Builtin.PrimOps ( PrimOp, PrimCall ) import Data.ByteString ( ByteString ) import Data.Data ( Data ) import Data.List ( intersperse ) {- ************************************************************************ * * GenStgBinding * * ************************************************************************ As usual, expressions are interesting; other things are boring. Here are the boring things (except note the @GenStgRhs@), parameterised with respect to binder and occurrence information (just as in @GHC.Core@): -} -- | A top-level binding. data GenStgTopBinding pass -- See Note [Core top-level string literals] = StgTopLifted (GenStgBinding pass) | StgTopStringLit Id ByteString data GenStgBinding pass = StgNonRec (BinderP pass) (GenStgRhs pass) | StgRec [(BinderP pass, GenStgRhs pass)] {- ************************************************************************ * * StgArg * * ************************************************************************ -} data StgArg = StgVarArg Id | StgLitArg Literal -- | Type of an @StgArg@ -- -- Very half baked because we have lost the type arguments. -- -- This function should be avoided: in STG we aren't supposed to -- look at types, but only PrimReps. -- Use 'stgArgRep', 'stgArgRep_maybe', 'stgArgRep1' instaed. stgArgType :: StgArg -> Type stgArgType (StgVarArg v) = idType v stgArgType (StgLitArg lit) = literalType lit stgArgRep :: StgArg -> [PrimRep] stgArgRep ty = typePrimRep (stgArgType ty) stgArgRep_maybe :: StgArg -> Maybe [PrimRep] stgArgRep_maybe ty = typePrimRep_maybe (stgArgType ty) -- | Assumes that the argument has at most one PrimRep, which holds after unarisation. -- See Note [Post-unarisation invariants] in GHC.Stg.Unarise. -- See Note [VoidRep] in GHC.Types.RepType. stgArgRep1 :: StgArg -> PrimOrVoidRep stgArgRep1 ty = typePrimRep1 (stgArgType ty) -- | Assumes that the argument has exactly one PrimRep. -- See Note [VoidRep] in GHC.Types.RepType. stgArgRepU :: StgArg -> PrimRep stgArgRepU ty = typePrimRepU (stgArgType ty) -- | Given an alt type and whether the program is unarised, return whether the -- case binder is in scope. -- -- Case binders of unboxed tuple or unboxed sum type always dead after the -- unariser has run. See Note [Post-unarisation invariants] in GHC.Stg.Unarise. stgCaseBndrInScope :: AltType -> Bool {- ^ unarised? -} -> Bool stgCaseBndrInScope alt_ty unarised = case alt_ty of AlgAlt _ -> True PrimAlt _ -> True MultiValAlt _ -> not unarised PolyAlt -> True {- ************************************************************************ * * STG expressions * * ************************************************************************ The @GenStgExpr@ data type is parameterised on binder and occurrence info, as before. ************************************************************************ * * GenStgExpr * * ************************************************************************ An application is of a function to a list of atoms (not expressions). Operationally, we want to push the arguments on the stack and call the function. (If the arguments were expressions, we would have to build their closures first.) There is no constructor for a lone variable; it would appear as @StgApp var []@. -} data GenStgExpr pass = StgApp Id -- function [StgArg] -- arguments; may be empty {- ************************************************************************ * * StgConApp and StgPrimApp --- saturated applications * * ************************************************************************ There are specialised forms of application, for constructors, primitives, and literals. Note [Constructor applications in STG] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ After the unarisation pass: * In `StgConApp` and `StgRhsCon` and `StgAlt` we filter out the void arguments, leaving only non-void ones. * In `StgApp` and `StgOpApp` we retain void arguments. We can do this because we know that `StgConApp` and `StgRhsCon` are saturated applications, so we lose no information by dropping those void args. In contrast, in `StgApp` we need the void argument to compare the number of args in the call with the arity of the function. This is an open design choice. We could instead choose to treat all these applications consistently (keeping the void args). But for some reason we don't, and this Note simply documents that design choice. As an example, consider: data T a = MkT !Int a Void# The wrapper's representation and the worker's representation (i.e. the datacon's Core representation) are respectively: $WMkT :: Int -> a -> Void# -> T a MkT :: Int# -> a -> Void# -> T a T would end up being used in STG post-unarise as: let x = MkT 1# y in ... case x of MkT int a -> ... The Void# argument is dropped. In essence we only generate binders for runtime relevant values. We also flatten out unboxed tuples in this process. See the unarise pass for details on how this is done. But as an example consider `data S = MkS Bool (# Bool | Char #)` which when matched on would result in an alternative with three binders like this MkS bool tag tpl_field -> See Note [Translating unboxed sums to unboxed tuples] and Note [Unarisation] for the details of this transformation. -} | StgLit Literal -- StgConApp is vital for returning unboxed tuples or sums -- which can't be let-bound | StgConApp DataCon ConstructorNumber [StgArg] -- Saturated. See Note [Constructor applications in STG] [[PrimRep]] -- See Note [Representations in StgConApp] in GHC.Stg.Unarise | StgOpApp StgOp -- Primitive op or foreign call [StgArg] -- Saturated. Type -- Result type -- We need to know this so that we can -- assign result registers {- ************************************************************************ * * GenStgExpr: case-expressions * * ************************************************************************ This has the same boxed/unboxed business as Core case expressions. -} | StgCase (GenStgExpr pass) -- the thing to examine (BinderP pass) -- binds the result of evaluating the scrutinee AltType [GenStgAlt pass] -- The DEFAULT case is always *first* -- if it is there at all {- ************************************************************************ * * GenStgExpr: let(rec)-expressions * * ************************************************************************ The various forms of let(rec)-expression encode most of the interesting things we want to do. - let-closure x = [free-vars] [args] expr in e is equivalent to let x = (\free-vars -> \args -> expr) free-vars @args@ may be empty (and is for most closures). It isn't under circumstances like this: let x = (\y -> y+z) This gets mangled to let-closure x = [z] [y] (y+z) The idea is that we compile code for @(y+z)@ in an environment in which @z@ is bound to an offset from Node, and `y` is bound to an offset from the stack pointer. (A let-closure is an @StgLet@ with a @StgRhsClosure@ RHS.) - let-constructor x = Constructor [args] in e (A let-constructor is an @StgLet@ with a @StgRhsCon@ RHS.) - Letrec-expressions are essentially the same deal as let-closure/ let-constructor, so we use a common structure and distinguish between them with an @is_recursive@ boolean flag. - let-unboxed u = in e All the stuff on the RHS must be fully evaluated. No function calls either! (We've backed away from this toward case-expressions with suitably-magical alts ...) - Advanced stuff here! Not to start with, but makes pattern matching generate more efficient code. let-escapes-not fail = expr in e' Here the idea is that @e'@ guarantees not to put @fail@ in a data structure, or pass it to another function. All @e'@ will ever do is tail-call @fail@. Rather than build a closure for @fail@, all we need do is to record the stack level at the moment of the @let-escapes-not@; then entering @fail@ is just a matter of adjusting the stack pointer back down to that point and entering the code for it. Another example: f x y = let z = huge-expression in if y==1 then z else if y==2 then z else 1 (A let-escapes-not is an @StgLetNoEscape@.) - We may eventually want: let-literal x = Literal in e And so the code for let(rec)-things: -} | StgLet (XLet pass) (GenStgBinding pass) -- right hand sides (see below) (GenStgExpr pass) -- body | StgLetNoEscape (XLetNoEscape pass) (GenStgBinding pass) -- right hand sides (see below) (GenStgExpr pass) -- body {- ************************************************************************* * * GenStgExpr: hpc, scc and other debug annotations * * ************************************************************************* Finally for @hpc@ expressions we introduce a new STG construct. -} | StgTick StgTickish (GenStgExpr pass) -- sub expression -- END of GenStgExpr {- ************************************************************************ * * STG right-hand sides * * ************************************************************************ Here's the rest of the interesting stuff for @StgLet@s; the first flavour is for closures: -} data GenStgRhs pass = StgRhsClosure (XRhsClosure pass) -- ^ Extension point for non-global free var -- list just before 'CodeGen'. CostCentreStack -- ^ CCS to be attached (default is CurrentCCS) !UpdateFlag -- ^ 'ReEntrant' | 'Updatable' | 'SingleEntry' [BinderP pass] -- ^ arguments; if empty, then not a function; -- as above, order is important. (GenStgExpr pass) -- ^ body Type -- ^ result type {- An example may be in order. Consider: let t = \x -> \y -> ... x ... y ... p ... q in e Pulling out the free vars and stylising somewhat, we get the equivalent: let t = (\[p,q] -> \[x,y] -> ... x ... y ... p ...q) p q Stg-operationally, the @[x,y]@ are on the stack, the @[p,q]@ are offsets from @Node@ into the closure, and the code ptr for the closure will be exactly that in parentheses above. The second flavour of right-hand-side is for constructors (simple but important): -} | StgRhsCon CostCentreStack -- CCS to be attached (default is CurrentCCS). -- Top-level (static) ones will end up with -- DontCareCCS, because we don't count static -- data in heap profiles, and we don't set CCCS -- from static closure. DataCon -- Constructor. Never an unboxed tuple or sum, as those -- are not allocated. ConstructorNumber [StgTickish] [StgArg] -- Saturated Args. See Note [Constructor applications in STG] Type -- Type, for rewriting to an StgRhsClosure -- | Like 'GHC.Hs.Extension.NoExtField', but with an 'Outputable' instance that -- returns 'empty'. data NoExtFieldSilent = NoExtFieldSilent deriving (Data, Eq, Ord) instance Outputable NoExtFieldSilent where ppr _ = empty -- | Used when constructing a term with an unused extension point that should -- not appear in pretty-printed output at all. noExtFieldSilent :: NoExtFieldSilent noExtFieldSilent = NoExtFieldSilent -- TODO: Maybe move this to GHC.Hs.Extension? I'm not sure about the -- implications on build time... stgRhsArity :: StgRhs -> Int stgRhsArity (StgRhsClosure _ _ _ bndrs _ _) = assert (all isId bndrs) $ length bndrs -- The arity never includes type parameters, but they should have gone by now stgRhsArity (StgRhsCon {}) = 0 freeVarsOfRhs :: (XRhsClosure pass ~ DIdSet) => GenStgRhs pass -> DIdSet freeVarsOfRhs (StgRhsCon _ _ _ _ args _) = mkDVarSet [ id | StgVarArg id <- args ] freeVarsOfRhs (StgRhsClosure fvs _ _ _ _ _) = fvs {- ************************************************************************ * * STG case alternatives * * ************************************************************************ Very like in Core syntax (except no type-world stuff). The type constructor is guaranteed not to be abstract; that is, we can see its representation. This is important because the code generator uses it to determine return conventions etc. But it's not trivial where there's a module loop involved, because some versions of a type constructor might not have all the constructors visible. So mkStgAlgAlts (in CoreToStg) ensures that it gets the TyCon from the constructors or literals (which are guaranteed to have the Real McCoy) rather than from the scrutinee type. -} data GenStgAlt pass = GenStgAlt { alt_con :: !AltCon -- alts: data constructor, , alt_bndrs :: ![BinderP pass] -- constructor's parameters, , alt_rhs :: !(GenStgExpr pass) -- right-hand side. } data AltType = PolyAlt -- Polymorphic (a boxed type variable, lifted or unlifted) | MultiValAlt Int -- Multi value of this arity (unboxed tuple or sum) -- the arity could indeed be 1 for unary unboxed tuple -- or enum-like unboxed sums | AlgAlt TyCon -- Algebraic data type; the AltCons will be DataAlts | PrimAlt PrimRep -- Primitive data type; the AltCons (if any) will be LitAlts {- ************************************************************************ * * The Plain STG parameterisation * * ************************************************************************ Note [STG Extension points] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ We now make use of extension points in STG for different passes which want to associate information with AST nodes. Currently the pipeline is roughly: CoreToStg: Core -> Stg StgSimpl: Stg -> Stg CodeGen: Stg -> Cmm As part of StgSimpl we run late lambda lifting (Ll). Late lambda lift: Stg -> FvStg -> LlStg -> Stg CodeGen: As part of CodeGen we run tag inference. Tag Inference: Stg -> Stg 'InferTaggedBinders` -> Stg And at a last step we add the free Variables: Stg -> CgStg Which finally CgStg being used to generate Cmm. -} type StgTopBinding = GenStgTopBinding 'Vanilla type StgBinding = GenStgBinding 'Vanilla type StgExpr = GenStgExpr 'Vanilla type StgRhs = GenStgRhs 'Vanilla type StgAlt = GenStgAlt 'Vanilla type LlStgTopBinding = GenStgTopBinding 'LiftLams type LlStgBinding = GenStgBinding 'LiftLams type LlStgExpr = GenStgExpr 'LiftLams type LlStgRhs = GenStgRhs 'LiftLams type LlStgAlt = GenStgAlt 'LiftLams type CgStgTopBinding = GenStgTopBinding 'CodeGen type CgStgBinding = GenStgBinding 'CodeGen type CgStgExpr = GenStgExpr 'CodeGen type CgStgRhs = GenStgRhs 'CodeGen type CgStgAlt = GenStgAlt 'CodeGen type TgStgTopBinding = GenStgTopBinding 'CodeGen type TgStgBinding = GenStgBinding 'CodeGen type TgStgExpr = GenStgExpr 'CodeGen type TgStgRhs = GenStgRhs 'CodeGen type TgStgAlt = GenStgAlt 'CodeGen {- Many passes apply a substitution, and it's very handy to have type synonyms to remind us whether or not the substitution has been applied. See GHC.Core for precedence in Core land -} type InStgTopBinding = StgTopBinding type InStgBinding = StgBinding type InStgArg = StgArg type InStgExpr = StgExpr type InStgRhs = StgRhs type InStgAlt = StgAlt type OutStgTopBinding = StgTopBinding type OutStgBinding = StgBinding type OutStgArg = StgArg type OutStgExpr = StgExpr type OutStgRhs = StgRhs type OutStgAlt = StgAlt -- | When `-fdistinct-constructor-tables` is turned on then -- each usage of a constructor is given an unique number and -- an info table is generated for each different constructor. data ConstructorNumber = NoNumber | Numbered Int instance Outputable ConstructorNumber where ppr NoNumber = empty ppr (Numbered n) = text "#" <> ppr n {- Note Stg Passes ~~~~~~~~~~~~~~~ Here is a short summary of the STG pipeline and where we use the different StgPass data type indexes: 1. CoreToStg.Prep performs several transformations that prepare the desugared and simplified core to be converted to STG. One of these transformations is making it so that value lambdas only exist as the RHS of a binding. See Note [CorePrep Overview]. 2. CoreToStg converts the prepared core to STG, specifically GenStg* parameterised by 'Vanilla. See the GHC.CoreToStg Module. 3. Stg.Pipeline does a number of passes on the generated STG. One of these is the lambda-lifting pass, which internally uses the 'LiftLams parameterisation to store information for deciding whether or not to lift each binding. See Note [Late lambda lifting in STG]. 4. Tag inference takes in 'Vanilla and produces 'InferTagged STG, while using the InferTaggedBinders annotated AST internally. See Note [Tag Inference]. 5. Stg.FVs annotates closures with their free variables. To store these annotations we use the 'CodeGen parameterisation. See the GHC.Stg.FVs module. 6. The Module Stg.StgToCmm generates Cmm from the CodeGen annotated STG. -} -- | Used as a data type index for the stgSyn AST data StgPass = Vanilla | LiftLams -- ^ Use internally by the lambda lifting pass | InferTaggedBinders -- ^ Tag inference information on binders. -- See Note [Tag inference passes] in GHC.Stg.InferTags | InferTagged -- ^ Tag inference information put on relevant StgApp nodes -- See Note [Tag inference passes] in GHC.Stg.InferTags | CodeGen type family BinderP (pass :: StgPass) type instance BinderP 'Vanilla = Id type instance BinderP 'CodeGen = Id type instance BinderP 'InferTagged = Id type instance BinderP 'InferTaggedBinders = (Id, TagSig) type instance BinderP 'LiftLams = BinderInfo type family XRhsClosure (pass :: StgPass) type instance XRhsClosure 'Vanilla = NoExtFieldSilent type instance XRhsClosure 'LiftLams = DIdSet type instance XRhsClosure 'InferTagged = NoExtFieldSilent type instance XRhsClosure 'InferTaggedBinders = XRhsClosure 'CodeGen -- | Code gen needs to track non-global free vars type instance XRhsClosure 'CodeGen = DIdSet type family XLet (pass :: StgPass) type instance XLet 'Vanilla = NoExtFieldSilent type instance XLet 'LiftLams = Skeleton type instance XLet 'InferTagged = NoExtFieldSilent type instance XLet 'InferTaggedBinders = XLet 'CodeGen type instance XLet 'CodeGen = NoExtFieldSilent type family XLetNoEscape (pass :: StgPass) type instance XLetNoEscape 'Vanilla = NoExtFieldSilent type instance XLetNoEscape 'LiftLams = Skeleton type instance XLetNoEscape 'InferTagged = NoExtFieldSilent type instance XLetNoEscape 'InferTaggedBinders = XLetNoEscape 'CodeGen type instance XLetNoEscape 'CodeGen = NoExtFieldSilent {- ************************************************************************ * * UpdateFlag * * ************************************************************************ This is also used in @LambdaFormInfo@ in the @ClosureInfo@ module. -} data UpdateFlag = ReEntrant -- ^ A @ReEntrant@ closure may be entered multiple times, but should not -- be updated or blackholed. | Updatable -- ^ An @Updatable@ closure should be updated after evaluation (and may be -- blackholed during evaluation). | SingleEntry -- ^ A @SingleEntry@ closure will only be entered once, and so need not be -- updated but may safely be blackholed. | JumpedTo -- ^ A @JumpedTo@ (join-point) closure is entered once or multiple times -- but has no heap-allocated associated closure. deriving (Show,Eq) instance Outputable UpdateFlag where ppr u = char $ case u of ReEntrant -> 'r' Updatable -> 'u' SingleEntry -> 's' JumpedTo -> 'j' isUpdatable :: UpdateFlag -> Bool isUpdatable ReEntrant = False isUpdatable SingleEntry = False isUpdatable Updatable = True isUpdatable JumpedTo = False {- ************************************************************************ * * StgOp * * ************************************************************************ An StgOp allows us to group together PrimOps and ForeignCalls. It's quite useful to move these around together, notably in StgOpApp and COpStmt. -} data StgOp = StgPrimOp PrimOp | StgPrimCallOp PrimCall | StgFCallOp ForeignCall Type -- The Type, which is obtained from the foreign import declaration -- itself, is needed by the stg-to-cmm pass to determine the offset to -- apply to unlifted boxed arguments in GHC.StgToCmm.Foreign. See Note -- [Unlifted boxed arguments to foreign calls] {- ************************************************************************ * * Pretty-printing * * ************************************************************************ Robin Popplestone asked for semi-colon separators on STG binds; here's hoping he likes terminators instead... Ditto for case alternatives. -} type OutputablePass pass = ( Outputable (XLet pass) , Outputable (XLetNoEscape pass) , Outputable (XRhsClosure pass) , OutputableBndr (BinderP pass) ) -- | STG pretty-printing options data StgPprOpts = StgPprOpts { stgSccEnabled :: !Bool -- ^ Enable cost-centres } -- | STG pretty-printing options used for panic messages panicStgPprOpts :: StgPprOpts panicStgPprOpts = StgPprOpts { stgSccEnabled = True } -- | STG pretty-printing options used for short messages shortStgPprOpts :: StgPprOpts shortStgPprOpts = StgPprOpts { stgSccEnabled = False } pprGenStgTopBinding :: OutputablePass pass => StgPprOpts -> GenStgTopBinding pass -> SDoc pprGenStgTopBinding opts b = case b of StgTopStringLit bndr str -> hang (hsep [pprBndr LetBind bndr, equals]) 4 (pprHsBytes str <> semi) StgTopLifted bind -> pprGenStgBinding opts bind pprGenStgBinding :: OutputablePass pass => StgPprOpts -> GenStgBinding pass -> SDoc pprGenStgBinding opts b = case b of StgNonRec bndr rhs -> hang (hsep [pprBndr LetBind bndr, equals]) 4 (pprStgRhs opts rhs <> semi) StgRec pairs -> vcat [ text "Rec {" , vcat (intersperse blankLine (map ppr_bind pairs)) , text "end Rec }" ] where ppr_bind (bndr, expr) = hang (hsep [pprBndr LetBind bndr, equals]) 4 (pprStgRhs opts expr <> semi) instance OutputablePass pass => Outputable (GenStgBinding pass) where ppr = pprGenStgBinding panicStgPprOpts pprGenStgTopBindings :: (OutputablePass pass) => StgPprOpts -> [GenStgTopBinding pass] -> SDoc pprGenStgTopBindings opts binds = vcat $ intersperse blankLine (map (pprGenStgTopBinding opts) binds) pprStgBinding :: OutputablePass pass => StgPprOpts -> GenStgBinding pass -> SDoc pprStgBinding = pprGenStgBinding pprStgTopBinding :: OutputablePass pass => StgPprOpts -> GenStgTopBinding pass -> SDoc pprStgTopBinding = pprGenStgTopBinding pprStgTopBindings :: OutputablePass pass => StgPprOpts -> [GenStgTopBinding pass] -> SDoc pprStgTopBindings = pprGenStgTopBindings pprIdWithRep :: Id -> SDoc pprIdWithRep v = ppr v <> pprTypeRep (idType v) pprTypeRep :: Type -> SDoc pprTypeRep ty = ppUnlessOption sdocSuppressStgReps $ char ':' <> case typePrimRep ty of [r] -> ppr r r -> ppr r instance Outputable StgArg where ppr = pprStgArg pprStgArg :: StgArg -> SDoc pprStgArg (StgVarArg var) = pprIdWithRep var pprStgArg (StgLitArg con) = ppr con <> pprTypeRep (literalType con) instance OutputablePass pass => Outputable (GenStgExpr pass) where ppr = pprStgExpr panicStgPprOpts pprStgExpr :: OutputablePass pass => StgPprOpts -> GenStgExpr pass -> SDoc pprStgExpr opts e = case e of -- special case StgLit lit -> ppr lit -- general case StgApp func args | null args , Just sig <- idTagSig_maybe func -> ppr func <> ppr sig | otherwise -> hang (ppr func) 4 (interppSP args) -- TODO: Print taggedness StgConApp con n args _ -> hsep [ ppr con, ppr n, brackets (interppSP args) ] StgOpApp op args _ -> hsep [ pprStgOp op, brackets (interppSP args)] -- special case: let v = -- in -- let ... -- in -- ... -- -- Very special! Suspicious! (SLPJ) {- StgLet srt (StgNonRec bndr (StgRhsClosure cc bi free_vars upd_flag args rhs)) expr@(StgLet _ _)) -> ($$) (hang (hcat [text "let { ", ppr bndr, text " = ", ppr cc, pp_binder_info bi, text " [", whenPprDebug (interppSP free_vars), text "] \\", ppr upd_flag, text " [", interppSP args, char ']']) 8 (sep [hsep [ppr rhs, text "} in"]])) (ppr expr) -} -- special case: let ... in let ... StgLet ext bind expr@StgLet{} -> ($$) (sep [hang (text "let" <+> ppr ext <+> text "{") 2 (hsep [pprGenStgBinding opts bind, text "} in"])]) (pprStgExpr opts expr) -- general case StgLet ext bind expr -> sep [ hang (text "let" <+> ppr ext <+> text "{") 2 (pprGenStgBinding opts bind) , hang (text "} in ") 2 (pprStgExpr opts expr) ] StgLetNoEscape ext bind expr -> sep [ hang (text "let-no-escape" <+> ppr ext <+> text "{") 2 (pprGenStgBinding opts bind) , hang (text "} in ") 2 (pprStgExpr opts expr) ] StgTick tickish expr -> sdocOption sdocSuppressTicks $ \case True -> pprStgExpr opts expr False -> sep [ ppr tickish, pprStgExpr opts expr ] -- Don't indent for a single case alternative. StgCase expr bndr alt_type [alt] -> sep [ sep [ text "case" , nest 4 (hsep [ pprStgExpr opts expr , whenPprDebug (dcolon <+> ppr alt_type) ]) , text "of" , pprBndr CaseBind bndr , char '{' ] , pprStgAlt opts False alt , char '}' ] StgCase expr bndr alt_type alts -> sep [ sep [ text "case" , nest 4 (hsep [ pprStgExpr opts expr , whenPprDebug (dcolon <+> ppr alt_type) ]) , text "of" , pprBndr CaseBind bndr, char '{' ] , nest 2 (vcat (map (pprStgAlt opts True) alts)) , char '}' ] pprStgAlt :: OutputablePass pass => StgPprOpts -> Bool -> GenStgAlt pass -> SDoc pprStgAlt opts indent GenStgAlt{alt_con, alt_bndrs, alt_rhs} | indent = hang altPattern 4 (pprStgExpr opts alt_rhs <> semi) | otherwise = sep [altPattern, pprStgExpr opts alt_rhs <> semi] where altPattern = hsep [ ppr alt_con , sep (map (pprBndr CasePatBind) alt_bndrs) , text "->" ] pprStgOp :: StgOp -> SDoc pprStgOp (StgPrimOp op) = ppr op pprStgOp (StgPrimCallOp op)= ppr op pprStgOp (StgFCallOp op _) = ppr op instance Outputable StgOp where ppr = pprStgOp instance Outputable AltType where ppr PolyAlt = text "Polymorphic" ppr (MultiValAlt n) = text "MultiAlt" <+> ppr n ppr (AlgAlt tc) = text "Alg" <+> ppr tc ppr (PrimAlt tc) = text "Prim" <+> ppr tc pprStgRhs :: OutputablePass pass => StgPprOpts -> GenStgRhs pass -> SDoc pprStgRhs opts rhs = case rhs of StgRhsClosure ext cc upd_flag args body _ -> hang (hsep [ if stgSccEnabled opts then ppr cc else empty , ppUnlessOption sdocSuppressStgExts (ppr ext) , char '\\' <> ppr upd_flag, brackets (interppSP args) ]) 4 (pprStgExpr opts body) StgRhsCon cc con mid _ticks args _ -> hcat [ if stgSccEnabled opts then ppr cc <> space else empty , case mid of NoNumber -> empty Numbered n -> hcat [ppr n, space] -- The bang indicates this is an StgRhsCon instead of an StgConApp. , ppr con, text "! ", brackets (sep (map pprStgArg args))] instance OutputablePass pass => Outputable (GenStgRhs pass) where ppr = pprStgRhs panicStgPprOpts ghc-lib-parser-9.12.2.20250421/compiler/GHC/StgToCmm/0000755000000000000000000000000007346545000017403 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/StgToCmm/CgUtils.hs0000644000000000000000000002427507346545000021323 0ustar0000000000000000{-# LANGUAGE GADTs #-} ----------------------------------------------------------------------------- -- -- Code generator utilities; mostly monadic -- -- (c) The University of Glasgow 2004-2006 -- ----------------------------------------------------------------------------- module GHC.StgToCmm.CgUtils ( fixStgRegisters, baseRegOffset, get_Regtable_addr_from_offset, regTableOffset, get_GlobalReg_addr, -- * Streaming for CG CgStream ) where import GHC.Prelude import GHC.Platform.Regs import GHC.Platform import GHC.Cmm import GHC.Cmm.Dataflow.Block import GHC.Cmm.Dataflow.Graph import GHC.Cmm.Utils import GHC.Cmm.CLabel import GHC.Utils.Panic import GHC.Cmm.Dataflow.Label import GHC.Data.Stream (Stream) import GHC.Types.Unique.DSM (UniqDSMT) -- ----------------------------------------------------------------------------- -- Streaming -- | The Stream instantiation used for code generation. -- Note the underlying monad is @UniqDSMT IO@, where @UniqDSMT@ is a transformer -- that propagates a deterministic unique supply (essentially an incrementing -- counter) from which new uniques are deterministically created during the -- code generation stages following StgToCmm. -- See Note [Object determinism]. type CgStream = Stream (UniqDSMT IO) -- ----------------------------------------------------------------------------- -- Information about global registers baseRegOffset :: Platform -> GlobalReg -> Int baseRegOffset platform reg = case reg of VanillaReg 1 -> pc_OFFSET_StgRegTable_rR1 constants VanillaReg 2 -> pc_OFFSET_StgRegTable_rR2 constants VanillaReg 3 -> pc_OFFSET_StgRegTable_rR3 constants VanillaReg 4 -> pc_OFFSET_StgRegTable_rR4 constants VanillaReg 5 -> pc_OFFSET_StgRegTable_rR5 constants VanillaReg 6 -> pc_OFFSET_StgRegTable_rR6 constants VanillaReg 7 -> pc_OFFSET_StgRegTable_rR7 constants VanillaReg 8 -> pc_OFFSET_StgRegTable_rR8 constants VanillaReg 9 -> pc_OFFSET_StgRegTable_rR9 constants VanillaReg 10 -> pc_OFFSET_StgRegTable_rR10 constants VanillaReg n -> panic ("Registers above R10 are not supported (tried to use R" ++ show n ++ ")") FloatReg 1 -> pc_OFFSET_StgRegTable_rF1 constants FloatReg 2 -> pc_OFFSET_StgRegTable_rF2 constants FloatReg 3 -> pc_OFFSET_StgRegTable_rF3 constants FloatReg 4 -> pc_OFFSET_StgRegTable_rF4 constants FloatReg 5 -> pc_OFFSET_StgRegTable_rF5 constants FloatReg 6 -> pc_OFFSET_StgRegTable_rF6 constants FloatReg n -> panic ("Registers above F6 are not supported (tried to use F" ++ show n ++ ")") DoubleReg 1 -> pc_OFFSET_StgRegTable_rD1 constants DoubleReg 2 -> pc_OFFSET_StgRegTable_rD2 constants DoubleReg 3 -> pc_OFFSET_StgRegTable_rD3 constants DoubleReg 4 -> pc_OFFSET_StgRegTable_rD4 constants DoubleReg 5 -> pc_OFFSET_StgRegTable_rD5 constants DoubleReg 6 -> pc_OFFSET_StgRegTable_rD6 constants DoubleReg n -> panic ("Registers above D6 are not supported (tried to use D" ++ show n ++ ")") XmmReg 1 -> pc_OFFSET_StgRegTable_rXMM1 constants XmmReg 2 -> pc_OFFSET_StgRegTable_rXMM2 constants XmmReg 3 -> pc_OFFSET_StgRegTable_rXMM3 constants XmmReg 4 -> pc_OFFSET_StgRegTable_rXMM4 constants XmmReg 5 -> pc_OFFSET_StgRegTable_rXMM5 constants XmmReg 6 -> pc_OFFSET_StgRegTable_rXMM6 constants XmmReg n -> panic ("Registers above XMM6 are not supported (tried to use XMM" ++ show n ++ ")") YmmReg 1 -> pc_OFFSET_StgRegTable_rYMM1 constants YmmReg 2 -> pc_OFFSET_StgRegTable_rYMM2 constants YmmReg 3 -> pc_OFFSET_StgRegTable_rYMM3 constants YmmReg 4 -> pc_OFFSET_StgRegTable_rYMM4 constants YmmReg 5 -> pc_OFFSET_StgRegTable_rYMM5 constants YmmReg 6 -> pc_OFFSET_StgRegTable_rYMM6 constants YmmReg n -> panic ("Registers above YMM6 are not supported (tried to use YMM" ++ show n ++ ")") ZmmReg 1 -> pc_OFFSET_StgRegTable_rZMM1 constants ZmmReg 2 -> pc_OFFSET_StgRegTable_rZMM2 constants ZmmReg 3 -> pc_OFFSET_StgRegTable_rZMM3 constants ZmmReg 4 -> pc_OFFSET_StgRegTable_rZMM4 constants ZmmReg 5 -> pc_OFFSET_StgRegTable_rZMM5 constants ZmmReg 6 -> pc_OFFSET_StgRegTable_rZMM6 constants ZmmReg n -> panic ("Registers above ZMM6 are not supported (tried to use ZMM" ++ show n ++ ")") Sp -> pc_OFFSET_StgRegTable_rSp constants SpLim -> pc_OFFSET_StgRegTable_rSpLim constants LongReg 1 -> pc_OFFSET_StgRegTable_rL1 constants LongReg n -> panic ("Registers above L1 are not supported (tried to use L" ++ show n ++ ")") Hp -> pc_OFFSET_StgRegTable_rHp constants HpLim -> pc_OFFSET_StgRegTable_rHpLim constants CCCS -> pc_OFFSET_StgRegTable_rCCCS constants CurrentTSO -> pc_OFFSET_StgRegTable_rCurrentTSO constants CurrentNursery -> pc_OFFSET_StgRegTable_rCurrentNursery constants HpAlloc -> pc_OFFSET_StgRegTable_rHpAlloc constants EagerBlackholeInfo -> pc_OFFSET_stgEagerBlackholeInfo constants GCEnter1 -> pc_OFFSET_stgGCEnter1 constants GCFun -> pc_OFFSET_stgGCFun constants BaseReg -> panic "GHC.StgToCmm.CgUtils.baseRegOffset:BaseReg" PicBaseReg -> panic "GHC.StgToCmm.CgUtils.baseRegOffset:PicBaseReg" MachSp -> panic "GHC.StgToCmm.CgUtils.baseRegOffset:MachSp" UnwindReturnReg -> panic "GHC.StgToCmm.CgUtils.baseRegOffset:UnwindReturnReg" where !constants = platformConstants platform -- ----------------------------------------------------------------------------- -- -- STG/Cmm GlobalReg -- -- ----------------------------------------------------------------------------- -- | We map STG registers onto appropriate CmmExprs. Either they map -- to real machine registers or stored as offsets from BaseReg. Given -- a GlobalReg, get_GlobalReg_addr always produces the -- register table address for it. get_GlobalReg_addr :: Platform -> GlobalReg -> CmmExpr get_GlobalReg_addr platform BaseReg = regTableOffset platform 0 get_GlobalReg_addr platform mid = get_Regtable_addr_from_offset platform (baseRegOffset platform mid) -- Calculate a literal representing an offset into the register table. -- Used when we don't have an actual BaseReg to offset from. regTableOffset :: Platform -> Int -> CmmExpr regTableOffset platform n = CmmLit (CmmLabelOff mkMainCapabilityLabel (pc_OFFSET_Capability_r (platformConstants platform) + n)) get_Regtable_addr_from_offset :: Platform -> Int -> CmmExpr get_Regtable_addr_from_offset platform offset = if haveRegBase platform then cmmRegOff (baseReg platform) offset else regTableOffset platform offset -- | Fixup global registers so that they assign to locations within the -- RegTable if they aren't pinned for the current target. fixStgRegisters :: Platform -> RawCmmDecl -> RawCmmDecl fixStgRegisters _ top@(CmmData _ _) = top fixStgRegisters platform (CmmProc info lbl live graph) = let graph' = modifyGraph (mapGraphBlocks mapMap (fixStgRegBlock platform)) graph in CmmProc info lbl live graph' fixStgRegBlock :: Platform -> Block CmmNode e x -> Block CmmNode e x fixStgRegBlock platform block = mapBlock (fixStgRegStmt platform) block fixStgRegStmt :: Platform -> CmmNode e x -> CmmNode e x fixStgRegStmt platform stmt = fixAssign $ mapExpDeep fixExpr stmt where fixAssign stmt = case stmt of CmmAssign (CmmGlobal reg_use) src -- MachSp isn't an STG register; it's merely here for tracking unwind -- information | reg == MachSp -> stmt | otherwise -> let baseAddr = get_GlobalReg_addr platform reg in case reg `elem` activeStgRegs platform of True -> CmmAssign (CmmGlobal reg_use) src False -> CmmStore baseAddr src NaturallyAligned where reg = globalRegUse_reg reg_use other_stmt -> other_stmt fixExpr expr = case expr of -- MachSp isn't an STG; it's merely here for tracking unwind information CmmReg (CmmGlobal (GlobalRegUse MachSp _)) -> expr CmmReg (CmmGlobal reg_use) -> -- Replace register leaves with appropriate StixTrees for -- the given target. MagicIds which map to a reg on this -- arch are left unchanged. For the rest, BaseReg is taken -- to mean the address of the reg table in MainCapability, -- and for all others we generate an indirection to its -- location in the register table. let reg = globalRegUse_reg reg_use in case reg `elem` activeStgRegs platform of True -> expr False -> let baseAddr = get_GlobalReg_addr platform reg in case reg of BaseReg -> baseAddr _other -> CmmLoad baseAddr (globalRegUse_type reg_use) NaturallyAligned CmmRegOff greg@(CmmGlobal reg) offset -> -- RegOf leaves are just a shorthand form. If the reg maps -- to a real reg, we keep the shorthand, otherwise, we just -- expand it and defer to the above code. -- NB: to ensure type correctness we need to ensure the Add -- as well as the Int need to be of the same size as the -- register. case globalRegUse_reg reg `elem` activeStgRegs platform of True -> expr False -> CmmMachOp (MO_Add (cmmRegWidth greg)) [ fixExpr (CmmReg greg), CmmLit (CmmInt (fromIntegral offset) (cmmRegWidth greg))] other_expr -> other_expr ghc-lib-parser-9.12.2.20250421/compiler/GHC/StgToCmm/Config.hs0000644000000000000000000001570707346545000021156 0ustar0000000000000000-- | The stg to cmm code generator configuration module GHC.StgToCmm.Config ( StgToCmmConfig(..) , stgToCmmPlatform ) where import GHC.Platform.Profile import GHC.Platform import GHC.Unit.Module import GHC.Utils.Outputable import GHC.Utils.TmpFs import GHC.Cmm.MachOp ( FMASign(..) ) import GHC.Prelude -- This config is static and contains information only passed *downwards* by StgToCmm.Monad data StgToCmmConfig = StgToCmmConfig ----------------------------- General Settings -------------------------------- { stgToCmmProfile :: !Profile -- ^ Current profile , stgToCmmThisModule :: Module -- ^ The module being compiled. This field kept lazy for -- Cmm/Parser.y which preloads it with a panic , stgToCmmTmpDir :: !TempDir -- ^ Temp Dir for files used in compilation , stgToCmmContext :: !SDocContext -- ^ Context for StgToCmm phase , stgToCmmEmitDebugInfo :: !Bool -- ^ Whether we wish to output debug information , stgToCmmBinBlobThresh :: !(Maybe Word) -- ^ Threshold at which Binary literals (e.g. strings) -- are either dumped to a file and a CmmFileEmbed literal -- is emitted (over threshold), or become a CmmString -- Literal (under or at threshold). CmmFileEmbed is only supported -- with the NCG, thus a Just means two things: We have a threshold, -- and will be using the NCG. Conversely, a Nothing implies we are not -- using NCG and disables CmmFileEmbed. See Note -- [Embedding large binary blobs] in GHC.CmmToAsm.Ppr, and -- @cgTopBinding@ in GHC.StgToCmm. , stgToCmmMaxInlAllocSize :: !Int -- ^ Max size, in bytes, of inline array allocations. ------------------------------ Ticky Options ---------------------------------- , stgToCmmDoTicky :: !Bool -- ^ Ticky profiling enabled (cf @-ticky@) , stgToCmmTickyAllocd :: !Bool -- ^ True indicates ticky prof traces allocs of each named -- thing in addition to allocs _by_ that thing , stgToCmmTickyLNE :: !Bool -- ^ True indicates ticky uses name-specific counters for -- join-points (let-no-escape) , stgToCmmTickyDynThunk :: !Bool -- ^ True indicates ticky uses name-specific counters for -- dynamic thunks , stgToCmmTickyTag :: !Bool -- ^ True indicates ticky will count number of avoided tag checks by tag inference. ---------------------------------- Flags -------------------------------------- , stgToCmmLoopification :: !Bool -- ^ Loopification enabled (cf @-floopification@) , stgToCmmAlignCheck :: !Bool -- ^ Insert alignment check (cf @-falignment-sanitisation@) , stgToCmmFastPAPCalls :: !Bool -- ^ , stgToCmmSCCProfiling :: !Bool -- ^ Check if cost-centre profiling is enabled , stgToCmmEagerBlackHole :: !Bool -- ^ , stgToCmmOrigThunkInfo :: !Bool -- ^ Push @stg_orig_thunk_info@ frames during thunk update. , stgToCmmInfoTableMap :: !Bool -- ^ true means generate C Stub for IPE map, See Note [Mapping Info Tables to Source Positions] , stgToCmmInfoTableMapWithFallback :: !Bool -- ^ Include info tables with fallback source locations in the info table map , stgToCmmInfoTableMapWithStack :: !Bool -- ^ Include info tables for STACK closures in the info table map , stgToCmmOmitYields :: !Bool -- ^ true means omit heap checks when no allocation is performed , stgToCmmOmitIfPragmas :: !Bool -- ^ true means don't generate interface programs (implied by -O0) , stgToCmmPIC :: !Bool -- ^ true if @-fPIC@ , stgToCmmPIE :: !Bool -- ^ true if @-fPIE@ , stgToCmmExtDynRefs :: !Bool -- ^ true if @-fexternal-dynamic-refs@, meaning generate -- code for linking against dynamic libraries , stgToCmmDoBoundsCheck :: !Bool -- ^ decides whether to check array bounds in StgToCmm.Prim -- or not , stgToCmmDoTagCheck :: !Bool -- ^ Verify tag inference predictions. , stgToCmmObjectDeterminism :: !Bool -- ^ Enable deterministic code generation (more precisely, the deterministic unique-renaming pass in StgToCmm) ------------------------------ Backend Flags ---------------------------------- , stgToCmmAllowArith64 :: !Bool -- ^ Allowed to emit 64-bit arithmetic operations , stgToCmmAllowQuot64 :: !Bool -- ^ Allowed to emit 64-bit division operations , stgToCmmAllowQuotRemInstr :: !Bool -- ^ Allowed to generate QuotRem instructions , stgToCmmAllowQuotRem2 :: !Bool -- ^ Allowed to generate QuotRem , stgToCmmAllowExtendedAddSubInstrs :: !Bool -- ^ Allowed to generate AddWordC, SubWordC, Add2, etc. , stgToCmmAllowIntMul2Instr :: !Bool -- ^ Allowed to generate IntMul2 instruction , stgToCmmAllowWordMul2Instr :: !Bool -- ^ Allowed to generate WordMul2 instruction , stgToCmmAllowFMAInstr :: FMASign -> Bool -- ^ Allowed to generate FMA instruction , stgToCmmTickyAP :: !Bool -- ^ Disable use of precomputed standard thunks. , stgToCmmSaveFCallTargetToLocal :: !Bool -- ^ Save a foreign call target to a Cmm local, see -- Note [Saving foreign call target to local] for details ------------------------------ SIMD flags ------------------------------------ -- Each of these flags checks vector compatibility with the backend requested -- during compilation. In essence, this means checking for @-fllvm@ which is -- the only backend that currently allows SIMD instructions, see -- Ghc.StgToCmm.Prim.checkVecCompatibility for these flags only call site. , stgToCmmVecInstrsErr :: Maybe String -- ^ Error (if any) to raise when vector instructions are -- used, see @StgToCmm.Prim.checkVecCompatibility@ , stgToCmmAvx :: !Bool -- ^ check for Advanced Vector Extensions , stgToCmmAvx2 :: !Bool -- ^ check for Advanced Vector Extensions 2 , stgToCmmAvx512f :: !Bool -- ^ check for Advanced Vector 512-bit Extensions } stgToCmmPlatform :: StgToCmmConfig -> Platform stgToCmmPlatform = profilePlatform . stgToCmmProfile ghc-lib-parser-9.12.2.20250421/compiler/GHC/StgToCmm/Types.hs0000644000000000000000000002307507346545000021052 0ustar0000000000000000 module GHC.StgToCmm.Types ( CmmCgInfos (..) , LambdaFormInfo (..) , ModuleLFInfos , StandardFormInfo (..) , DoSCCProfiling , DoExtDynRefs ) where import GHC.Prelude import GHC.Core.DataCon import GHC.Runtime.Heap.Layout import GHC.Types.Basic import GHC.Types.ForeignStubs import GHC.Types.Name.Env import GHC.Types.Name.Set import GHC.Utils.Outputable {- Note [Conveying CAF-info and LFInfo between modules] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Some information about an Id is generated in the code generator, and is not available earlier. Namely: * CAF info. Code motion in Cmm or earlier phases may move references around so we compute information about which bits of code refer to which CAF late in the Cmm pipeline. * LambdaFormInfo. This records the details of a closure representation, including - the final arity (for functions) - whether it is a data constructor, and if so its tag Collectively we call this CgInfo (see GHC.StgToCmm.Types). It's very useful for importing modules to have this information. We can always make a conservative assumption, but that is bad: e.g. * For CAF info, if we know nothing we have to assume it is a CAF which bloats the SRTs of the importing module. Conservative assumption here is made when creating new Ids. * For data constructors, we really like having well-tagged pointers. See #14677, #16559, #15155, and wiki: commentary/rts/haskell-execution/pointer-tagging Conservative assumption here is made when we import an Id without a LambdaFormInfo in the interface, in GHC.StgToCmm.Closure.importedIdLFInfo. So we arrange to always serialise this information into the interface file. The moving parts are: * We record the CgInfo in the IdInfo of the Id. * GHC.Driver.Pipeline: the call to updateModDetailsIdInfos augments the ModDetails constructed at the end of the Core pipeline, with CgInfo gleaned from the back end. The hard work is done in GHC.Iface.UpdateIdInfos. * For ModIface we generate the final ModIface with CgInfo in GHC.Iface.Make.mkFullIface. * We don't absolutely guarantee to serialise the CgInfo: we won't if you have -fomit-interface-pragmas or -fno-code; and we won't read it in if you have -fignore-interface-pragmas. (We could revisit this decision.) Note [Imported unlifted nullary datacon wrappers must have correct LFInfo] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ As described in `Note [Conveying CAF-info and LFInfo between modules]`, imported unlifted nullary datacons must have their LambdaFormInfo set to reflect the fact that they are evaluated. This is necessary as otherwise references to them may be passed untagged to code that expects tagged references because of the unlifted nature of the argument. For example, in type T :: UnliftedType data T = T1 | T2 f :: T -> Int f x = case x of T1 -> 1; T2 -> 2 `f` expects `x` to be evaluated and properly tagged due to its unliftedness. We can guarantee all occurrences of `T1` and `T2` are considered evaluated and are properly tagged by giving them the `LFCon` LambdaFormInfo which indicates they are fully saturated constructor applications. (The LambdaFormInfo is used to tag the pointer with the tag of the constructor, in `litIdInfo`) What may be less obvious is that this must be done for not only datacon workers but also *wrappers*. The reason is found in this program from #23146: module B where type NP :: [UnliftedType] -> UnliftedType data NP xs where UNil :: NP '[] module A where import B fieldsSam :: NP xs -> NP xs -> Bool fieldsSam UNil UNil = True x = fieldsSam UNil UNil Due to its GADT nature, `B.UNil` produces a trivial wrapper $WUNil :: NP '[] $WUNil = UNil @'[] @~() which is referenced in the RHS of `A.x`. If we fail to give `$WUNil` the correct `LFCon 0` `LambdaFormInfo` then we will end up passing an untagged pointer to `fieldsSam`. This is problematic as `fieldsSam` may take advantage of the unlifted nature of its arguments by omitting handling of the zero tag when scrutinising them. The fix is straightforward: ensure we always construct a /correct/ LFInfo for datacon workers and wrappers, and populate the `lfInfo` with it. See Note [LFInfo of DataCon workers and wrappers]. This fixed #23146. See also Note [The LFInfo of Imported Ids] -} -- | Codegen-generated Id infos, to be passed to downstream via interfaces. -- -- This stuff is for optimization purposes only, they're not compulsory. -- -- * When CafInfo of an imported Id is not known it's safe to treat it as CAFFY. -- * When LambdaFormInfo of an imported Id is not known it's safe to treat it as -- `LFUnknown True` (which just says "it could be anything" and we do slow -- entry). -- -- See also Note [Conveying CAF-info and LFInfo between modules] above. -- data CmmCgInfos = CmmCgInfos { cgNonCafs :: !NonCaffySet -- ^ Exported Non-CAFFY closures in the current module. Everything else is -- either not exported of CAFFY. , cgLFInfos :: !ModuleLFInfos -- ^ LambdaFormInfos of exported closures in the current module. , cgIPEStub :: !CStub -- ^ The C stub which is used for IPE information } -------------------------------------------------------------------------------- -- LambdaFormInfo -------------------------------------------------------------------------------- -- | Maps names in the current module to their LambdaFormInfos type ModuleLFInfos = NameEnv LambdaFormInfo -- | Information about an identifier, from the code generator's point of view. -- Every identifier is bound to a LambdaFormInfo in the environment, which gives -- the code generator enough info to be able to tail call or return that -- identifier. data LambdaFormInfo = LFReEntrant -- Reentrant closure (a function) !TopLevelFlag -- True if top level !RepArity -- Arity. Invariant: always > 0 !Bool -- True <=> no fvs !ArgDescr -- Argument descriptor (should really be in ClosureInfo) | LFThunk -- Thunk (zero arity) !TopLevelFlag !Bool -- True <=> no free vars !Bool -- True <=> updatable (i.e., *not* single-entry) !StandardFormInfo !Bool -- True <=> *might* be a function type | LFCon -- A saturated data constructor application !DataCon -- The constructor | LFUnknown -- Used for function arguments and imported things. -- We know nothing about this closure. -- Treat like updatable "LFThunk"... -- Imported things which we *do* know something about use -- one of the other LF constructors (eg LFReEntrant for -- known functions) !Bool -- True <=> *might* be a function type -- The False case is good when we want to enter it, -- because then we know the entry code will do -- For a function, the entry code is the fast entry point | LFUnlifted -- A value of unboxed type; -- always a value, needs evaluation | LFLetNoEscape -- See LetNoEscape module for precise description instance Outputable LambdaFormInfo where ppr (LFReEntrant top rep fvs argdesc) = text "LFReEntrant" <> brackets (ppr top <+> ppr rep <+> pprFvs fvs <+> ppr argdesc) ppr (LFThunk top hasfv updateable sfi m_function) = text "LFThunk" <> brackets (ppr top <+> pprFvs hasfv <+> pprUpdateable updateable <+> ppr sfi <+> pprFuncFlag m_function) ppr (LFCon con) = text "LFCon" <> brackets (ppr con) ppr (LFUnknown m_func) = text "LFUnknown" <> brackets (pprFuncFlag m_func) ppr LFUnlifted = text "LFUnlifted" ppr LFLetNoEscape = text "LFLetNoEscape" pprFvs :: Bool -> SDoc pprFvs True = text "no-fvs" pprFvs False = text "fvs" pprFuncFlag :: Bool -> SDoc pprFuncFlag True = text "mFunc" pprFuncFlag False = text "value" pprUpdateable :: Bool -> SDoc pprUpdateable True = text "updateable" pprUpdateable False = text "oneshot" -------------------------------------------------------------------------------- -- | StandardFormInfo tells whether this thunk has one of a small number of -- standard forms data StandardFormInfo = NonStandardThunk -- The usual case: not of the standard forms | SelectorThunk -- A SelectorThunk is of form -- case x of -- con a1,..,an -> ak -- and the constructor is from a single-constr type. !WordOff -- 0-origin offset of ak within the "goods" of -- constructor (Recall that the a1,...,an may be laid -- out in the heap in a non-obvious order.) | ApThunk -- An ApThunk is of form -- x1 ... xn -- The code for the thunk just pushes x2..xn on the stack and enters x1. -- There are a few of these (for 1 <= n <= MAX_SPEC_AP_SIZE) pre-compiled -- in the RTS to save space. !RepArity -- Arity, n deriving (Eq) instance Outputable StandardFormInfo where ppr NonStandardThunk = text "RegThunk" ppr (SelectorThunk w) = text "SelThunk:" <> ppr w ppr (ApThunk n) = text "ApThunk:" <> ppr n -------------------------------------------------------------------------------- -- Gaining sight in a sea of blindness -------------------------------------------------------------------------------- type DoSCCProfiling = Bool type DoExtDynRefs = Bool ghc-lib-parser-9.12.2.20250421/compiler/GHC/StgToJS/Linker/0000755000000000000000000000000007346545000020427 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/StgToJS/Linker/Types.hs0000644000000000000000000000622107346545000022070 0ustar0000000000000000{-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE PartialTypeSignatures #-} {-# LANGUAGE LambdaCase #-} ----------------------------------------------------------------------------- -- | -- Module : GHC.StgToJS.Linker.Types -- Copyright : (c) The University of Glasgow 2001 -- License : BSD-style (see the file LICENSE) -- -- Maintainer : Jeffrey Young -- Luite Stegeman -- Sylvain Henry -- Josh Meredith -- Stability : experimental -- ----------------------------------------------------------------------------- module GHC.StgToJS.Linker.Types ( JSLinkConfig (..) , LinkPlan (..) ) where import GHC.StgToJS.Object import GHC.Unit.Types import GHC.Utils.Outputable (Outputable(..),text,ppr, hang, IsDoc (vcat), IsLine (hcat)) import Data.Map.Strict (Map) import Data.Set (Set) import qualified Data.Set as S import System.IO import Prelude -------------------------------------------------------------------------------- -- Linker Config -------------------------------------------------------------------------------- data JSLinkConfig = JSLinkConfig { lcNoJSExecutables :: !Bool -- ^ Dont' build JS executables , lcNoHsMain :: !Bool -- ^ Don't generate Haskell main entry , lcNoRts :: !Bool -- ^ Don't dump the generated RTS , lcNoStats :: !Bool -- ^ Disable .stats file generation , lcForeignRefs :: !Bool -- ^ Dump .frefs (foreign references) files , lcCombineAll :: !Bool -- ^ Generate all.js (combined js) + wrappers , lcForceEmccRts :: !Bool -- ^ Force the link with the emcc rts. Use this if you plan to dynamically -- load wasm modules made from C files (e.g. in iserv). , lcLinkCsources :: !Bool -- ^ Link C sources (compiled to JS/Wasm) with Haskell code compiled to -- JS. This implies the use of the Emscripten RTS to load this code. } data LinkPlan = LinkPlan { lkp_block_info :: Map Module LocatedBlockInfo -- ^ Block information , lkp_dep_blocks :: Set BlockRef -- ^ Blocks to link , lkp_archives :: !(Set FilePath) -- ^ Archives to load JS and Cc sources from (JS code corresponding to -- Haskell code is handled with blocks above) , lkp_objs_js :: !(Set FilePath) -- ^ JS objects to link , lkp_objs_cc :: !(Set FilePath) -- ^ Cc objects to link } instance Outputable LinkPlan where ppr s = hang (text "LinkPlan") 2 $ vcat -- Hidden because it's too verbose and it's not really part of the -- plan, just meta info used to retrieve actual block contents -- [ hcat [ text "Block info: ", ppr (lkp_block_info s)] [ hcat [ text "Blocks: ", ppr (S.size (lkp_dep_blocks s))] , hang (text "Archives:") 2 (vcat (fmap text (S.toList (lkp_archives s)))) , hang (text "Extra JS objects:") 2 (vcat (fmap text (S.toList (lkp_objs_js s)))) , hang (text "Extra Cc objects:") 2 (vcat (fmap text (S.toList (lkp_objs_cc s)))) ] ghc-lib-parser-9.12.2.20250421/compiler/GHC/StgToJS/0000755000000000000000000000000007346545000017203 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/StgToJS/Object.hs0000644000000000000000000007027707346545000020762 0ustar0000000000000000{-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE Rank2Types #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE ViewPatterns #-} {-# LANGUAGE MagicHash #-} {-# LANGUAGE MultiWayIf #-} -- only for DB.Binary instances on Module {-# OPTIONS_GHC -fno-warn-orphans #-} ----------------------------------------------------------------------------- -- | -- Module : GHC.StgToJS.Object -- Copyright : (c) The University of Glasgow 2001 -- License : BSD-style (see the file LICENSE) -- -- Maintainer : Sylvain Henry -- Jeffrey Young -- Luite Stegeman -- Josh Meredith -- Stability : experimental -- -- Serialization/deserialization of binary .o files for the JavaScript backend -- ----------------------------------------------------------------------------- module GHC.StgToJS.Object ( ObjectKind(..) , getObjectKind , getObjectKindBS -- * JS object , JSOptions(..) , defaultJSOptions , getOptionsFromJsFile , writeJSObject , readJSObject , parseJSObject , parseJSObjectBS -- * HS object , putObject , getObjectHeader , getObjectBody , getObject , readObject , getObjectBlocks , readObjectBlocks , readObjectBlockInfo , isGlobalBlock , Object(..) , IndexEntry(..) , LocatedBlockInfo (..) , BlockInfo (..) , BlockDeps (..) , BlockLocation (..) , BlockId , BlockIds , BlockRef (..) , ExportedFun (..) ) where import GHC.Prelude import Control.Monad import Data.Array import qualified Data.ByteString as B import qualified Data.ByteString.Unsafe as B import Data.Char (isSpace) import Data.Int import Data.IntSet (IntSet) import qualified Data.IntSet as IS import Data.List (sortOn) import qualified Data.List as List import Data.Map (Map) import qualified Data.Map as M import Data.Word import Data.Semigroup import System.IO import GHC.Settings.Constants (hiVersion) import GHC.JS.Ident import qualified GHC.JS.Syntax as Sat import GHC.StgToJS.Types import GHC.Unit.Module import GHC.Data.FastString import GHC.Types.Unique.Map import GHC.Utils.Binary hiding (SymbolTable) import GHC.Utils.Outputable (ppr, Outputable, hcat, vcat, text, hsep) import GHC.Utils.Monad (mapMaybeM) import GHC.Utils.Panic import GHC.Utils.Misc (dropWhileEndLE) import System.IO.Unsafe import qualified Control.Exception as Exception ---------------------------------------------- -- The JS backend supports 3 kinds of objects: -- 1. HS objects: produced from Haskell sources -- 2. JS objects: produced from JS sources -- 3. Cc objects: produced by emcc (e.g. from C sources) -- -- They all have a different header that allows them to be distinguished. -- See ObjectKind type. ---------------------------------------------- -- | Different kinds of object (.o) supported by the JS backend data ObjectKind = ObjJs -- ^ JavaScript source embedded in a .o | ObjHs -- ^ JS backend object for Haskell code | ObjCc -- ^ Wasm module object as produced by emcc deriving (Show,Eq,Ord) -- | Get the kind of a file object, if any getObjectKind :: FilePath -> IO (Maybe ObjectKind) getObjectKind fp = withBinaryFile fp ReadMode $ \h -> do let !max_header_length = max (B.length jsHeader) $ max (B.length wasmHeader) (B.length hsHeader) bs <- B.hGet h max_header_length pure $! getObjectKindBS bs -- | Get the kind of an object stored in a bytestring, if any getObjectKindBS :: B.ByteString -> Maybe ObjectKind getObjectKindBS bs | jsHeader `B.isPrefixOf` bs = Just ObjJs | hsHeader `B.isPrefixOf` bs = Just ObjHs | wasmHeader `B.isPrefixOf` bs = Just ObjCc | otherwise = Nothing -- Header added to JS sources to discriminate them from other object files. -- They all have .o extension but JS sources have this header. jsHeader :: B.ByteString jsHeader = unsafePerformIO $ B.unsafePackAddressLen 8 "GHCJS_JS"# hsHeader :: B.ByteString hsHeader = unsafePerformIO $ B.unsafePackAddressLen 8 "GHCJS_HS"# wasmHeader :: B.ByteString wasmHeader = unsafePerformIO $ B.unsafePackAddressLen 4 "\0asm"# ------------------------------------------------ -- HS objects -- -- file layout: -- - magic "GHCJS_HS" -- - compiler version tag -- - module name -- - offsets of string table -- - dependencies -- - offset of the index -- - unit infos -- - index -- - string table -- ------------------------------------------------ -- | A HS object file data Object = Object { objModuleName :: !ModuleName -- ^ name of the module , objHandle :: !ReadBinHandle -- ^ BinHandle that can be used to read the ObjBlocks , objPayloadOffset :: !(Bin ObjBlock) -- ^ Offset of the payload (units) , objBlockInfo :: !BlockInfo -- ^ Information about blocks , objIndex :: !Index -- ^ Block index: symbols per block and block offset in the object file } type BlockId = Int type BlockIds = IntSet -- | Information about blocks (linkable units) data BlockInfo = BlockInfo { bi_module :: !Module -- ^ Module they were generated from , bi_must_link :: !BlockIds -- ^ blocks that always need to be linked when this object is loaded (e.g. -- everything that contains initializer code or foreign exports) , bi_exports :: !(Map ExportedFun BlockId) -- ^ exported Haskell functions -> block , bi_block_deps :: !(Array BlockId BlockDeps) -- ^ dependencies of each block } data LocatedBlockInfo = LocatedBlockInfo { lbi_loc :: !BlockLocation -- ^ Where to find the blocks , lbi_info :: !BlockInfo -- ^ Block information } instance Outputable BlockInfo where ppr d = vcat [ hcat [ text "module: ", pprModule (bi_module d) ] , hcat [ text "exports: ", ppr (M.keys (bi_exports d)) ] ] -- | Where are the blocks data BlockLocation = ObjectFile FilePath -- ^ In an object file at path | ArchiveFile FilePath -- ^ In a Ar file at path | InMemory String Object -- ^ In memory instance Outputable BlockLocation where ppr = \case ObjectFile fp -> hsep [text "ObjectFile", text fp] ArchiveFile fp -> hsep [text "ArchiveFile", text fp] InMemory s o -> hsep [text "InMemory", text s, ppr (objModuleName o)] -- | A @BlockRef@ is a pair of a module and the index of the block in the -- object file data BlockRef = BlockRef { block_ref_mod :: !Module -- ^ Module , block_ref_idx :: !BlockId -- ^ Block index in the object file } deriving (Eq,Ord) data BlockDeps = BlockDeps { blockBlockDeps :: [BlockId] -- ^ dependencies on blocks in this object , blockFunDeps :: [ExportedFun] -- ^ dependencies on exported symbols in other objects -- , blockForeignExported :: [ExpFun] -- , blockForeignImported :: [ForeignRef] } -- | we use the convention that the first block (0) is a module-global block -- that's always included when something from the module is loaded. everything -- in a module implicitly depends on the global block. The global block itself -- can't have dependencies isGlobalBlock :: BlockId -> Bool isGlobalBlock n = n == 0 -- | Exported Functions data ExportedFun = ExportedFun { funModule :: !Module -- ^ The module containing the function , funSymbol :: !LexicalFastString -- ^ The function } deriving (Eq, Ord) instance Outputable ExportedFun where ppr (ExportedFun m f) = vcat [ hcat [ text "module: ", pprModule m ] , hcat [ text "symbol: ", ppr f ] ] -- | Write an ObjBlock, except for the top level symbols which are stored in the -- index putObjBlock :: WriteBinHandle -> ObjBlock -> IO () putObjBlock bh (ObjBlock _syms b c d e f g) = do put_ bh b put_ bh c lazyPut bh d put_ bh e put_ bh f put_ bh g -- | Read an ObjBlock and associate it to the given symbols (that must have been -- read from the index) getObjBlock :: [FastString] -> ReadBinHandle -> IO ObjBlock getObjBlock syms bh = do b <- get bh c <- get bh d <- lazyGet bh e <- get bh f <- get bh g <- get bh pure $ ObjBlock { oiSymbols = syms , oiClInfo = b , oiStatic = c , oiStat = d , oiRaw = e , oiFExports = f , oiFImports = g } -- | Serialized block indexes and their exported symbols -- (the first block is module-global) type Index = [IndexEntry] data IndexEntry = IndexEntry { idxSymbols :: ![FastString] -- ^ Symbols exported by a block , idxOffset :: !(Bin ObjBlock) -- ^ Offset of the block in the object file } -------------------------------------------------------------------------------- -- Essential operations on Objects -------------------------------------------------------------------------------- -- | Given a handle to a Binary payload, add the module, 'mod_name', its -- dependencies, 'deps', and its linkable units to the payload. putObject :: WriteBinHandle -> ModuleName -- ^ module -> BlockInfo -- ^ block infos -> [ObjBlock] -- ^ linkable units and their symbols -> IO () putObject bh mod_name deps os = do putByteString bh hsHeader put_ bh (show hiVersion) -- we store the module name as a String because we don't want to have to -- decode the FastString table just to decode it when we're looking for an -- object in an archive. put_ bh (moduleNameString mod_name) (fs_tbl, fs_writer) <- initFastStringWriterTable let bh_fs = addWriterToUserData fs_writer bh forwardPut_ bh (const (putTable fs_tbl bh_fs)) $ do put_ bh_fs deps -- forward put the index forwardPut_ bh_fs (put_ bh_fs) $ do idx <- forM os $ \o -> do p <- tellBinWriter bh_fs -- write units without their symbols putObjBlock bh_fs o -- return symbols and offset to store in the index pure (oiSymbols o,p) pure idx -- | Parse object header getObjectHeader :: ReadBinHandle -> IO (Either String ModuleName) getObjectHeader bh = do magic <- getByteString bh (B.length hsHeader) case magic == hsHeader of False -> pure (Left "invalid magic header for HS object") True -> do is_correct_version <- ((== hiVersion) . read) <$> get bh case is_correct_version of False -> pure (Left "invalid header version") True -> do mod_name <- get bh pure (Right (mkModuleName (mod_name))) -- | Parse object body. Must be called after a successful getObjectHeader getObjectBody :: ReadBinHandle -> ModuleName -> IO Object getObjectBody bh0 mod_name = do -- Read the string table dict <- forwardGet bh0 (getDictionary bh0) let bh = setReaderUserData bh0 $ newReadState (panic "No name allowed") (getDictFastString dict) block_info <- get bh idx <- forwardGet bh (get bh) payload_pos <- tellBinReader bh pure $ Object { objModuleName = mod_name , objHandle = bh , objPayloadOffset = payload_pos , objBlockInfo = block_info , objIndex = idx } -- | Parse object getObject :: ReadBinHandle -> IO (Maybe Object) getObject bh = do getObjectHeader bh >>= \case Left _err -> pure Nothing Right mod_name -> Just <$> getObjectBody bh mod_name -- | Read object from file -- -- The object is still in memory after this (see objHandle). readObject :: FilePath -> IO (Maybe Object) readObject file = do bh <- readBinMem file getObject bh -- | Reads only the part necessary to get the block info readObjectBlockInfo :: FilePath -> IO (Maybe BlockInfo) readObjectBlockInfo file = do bh <- readBinMem file getObject bh >>= \case Just obj -> pure $! Just $! objBlockInfo obj Nothing -> pure Nothing -- | Get blocks in the object file, using the given filtering function getObjectBlocks :: Object -> BlockIds -> IO [ObjBlock] getObjectBlocks obj bids = mapMaybeM read_entry (zip (objIndex obj) [0..]) where bh = objHandle obj read_entry (IndexEntry syms offset,i) | IS.member i bids = do seekBinReader bh offset Just <$> getObjBlock syms bh | otherwise = pure Nothing -- | Read blocks in the object file, using the given filtering function readObjectBlocks :: FilePath -> BlockIds -> IO [ObjBlock] readObjectBlocks file bids = do readObject file >>= \case Nothing -> pure [] Just obj -> getObjectBlocks obj bids -------------------------------------------------------------------------------- -- Helper functions -------------------------------------------------------------------------------- putEnum :: Enum a => WriteBinHandle -> a -> IO () putEnum bh x | n > 65535 = error ("putEnum: out of range: " ++ show n) | otherwise = put_ bh n where n = fromIntegral $ fromEnum x :: Word16 getEnum :: Enum a => ReadBinHandle -> IO a getEnum bh = toEnum . fromIntegral <$> (get bh :: IO Word16) -- | Helper to convert Int to Int32 toI32 :: Int -> Int32 toI32 = fromIntegral -- | Helper to convert Int32 to Int fromI32 :: Int32 -> Int fromI32 = fromIntegral -------------------------------------------------------------------------------- -- Binary Instances -------------------------------------------------------------------------------- instance Binary IndexEntry where put_ bh (IndexEntry a b) = put_ bh a >> put_ bh b get bh = IndexEntry <$> get bh <*> get bh instance Binary BlockInfo where put_ bh (BlockInfo m r e b) = do put_ bh m put_ bh (map toI32 $ IS.toList r) put_ bh (map (\(x,y) -> (x, toI32 y)) $ M.toList e) put_ bh (elems b) get bh = BlockInfo <$> get bh <*> (IS.fromList . map fromI32 <$> get bh) <*> (M.fromList . map (\(x,y) -> (x, fromI32 y)) <$> get bh) <*> ((\xs -> listArray (0, length xs - 1) xs) <$> get bh) instance Binary BlockDeps where put_ bh (BlockDeps bbd bfd) = put_ bh bbd >> put_ bh bfd get bh = BlockDeps <$> get bh <*> get bh instance Binary ForeignJSRef where put_ bh (ForeignJSRef span pat safety cconv arg_tys res_ty) = put_ bh span >> put_ bh pat >> putEnum bh safety >> putEnum bh cconv >> put_ bh arg_tys >> put_ bh res_ty get bh = ForeignJSRef <$> get bh <*> get bh <*> getEnum bh <*> getEnum bh <*> get bh <*> get bh instance Binary ExpFun where put_ bh (ExpFun isIO args res) = put_ bh isIO >> put_ bh args >> put_ bh res get bh = ExpFun <$> get bh <*> get bh <*> get bh instance Binary Sat.JStat where put_ bh (Sat.DeclStat i e) = putByte bh 1 >> put_ bh i >> put_ bh e put_ bh (Sat.ReturnStat e) = putByte bh 2 >> put_ bh e put_ bh (Sat.IfStat e s1 s2) = putByte bh 3 >> put_ bh e >> put_ bh s1 >> put_ bh s2 put_ bh (Sat.WhileStat b e s) = putByte bh 4 >> put_ bh b >> put_ bh e >> put_ bh s put_ bh (Sat.ForStat is c s bd) = putByte bh 5 >> put_ bh is >> put_ bh c >> put_ bh s >> put_ bh bd put_ bh (Sat.ForInStat b i e s) = putByte bh 6 >> put_ bh b >> put_ bh i >> put_ bh e >> put_ bh s put_ bh (Sat.SwitchStat e ss s) = putByte bh 7 >> put_ bh e >> put_ bh ss >> put_ bh s put_ bh (Sat.TryStat s1 i s2 s3) = putByte bh 8 >> put_ bh s1 >> put_ bh i >> put_ bh s2 >> put_ bh s3 put_ bh (Sat.BlockStat xs) = putByte bh 9 >> put_ bh xs put_ bh (Sat.ApplStat e es) = putByte bh 10 >> put_ bh e >> put_ bh es put_ bh (Sat.UOpStat o e) = putByte bh 11 >> put_ bh o >> put_ bh e put_ bh (Sat.AssignStat e1 op e2) = putByte bh 12 >> put_ bh e1 >> put_ bh op >> put_ bh e2 put_ bh (Sat.LabelStat l s) = putByte bh 13 >> put_ bh l >> put_ bh s put_ bh (Sat.BreakStat ml) = putByte bh 14 >> put_ bh ml put_ bh (Sat.ContinueStat ml) = putByte bh 15 >> put_ bh ml put_ bh (Sat.FuncStat i is b) = putByte bh 16 >> put_ bh i >> put_ bh is >> put_ bh b get bh = getByte bh >>= \case 1 -> Sat.DeclStat <$> get bh <*> get bh 2 -> Sat.ReturnStat <$> get bh 3 -> Sat.IfStat <$> get bh <*> get bh <*> get bh 4 -> Sat.WhileStat <$> get bh <*> get bh <*> get bh 5 -> Sat.ForStat <$> get bh <*> get bh <*> get bh <*> get bh 6 -> Sat.ForInStat <$> get bh <*> get bh <*> get bh <*> get bh 7 -> Sat.SwitchStat <$> get bh <*> get bh <*> get bh 8 -> Sat.TryStat <$> get bh <*> get bh <*> get bh <*> get bh 9 -> Sat.BlockStat <$> get bh 10 -> Sat.ApplStat <$> get bh <*> get bh 11 -> Sat.UOpStat <$> get bh <*> get bh 12 -> Sat.AssignStat <$> get bh <*> get bh <*> get bh 13 -> Sat.LabelStat <$> get bh <*> get bh 14 -> Sat.BreakStat <$> get bh 15 -> Sat.ContinueStat <$> get bh 16 -> Sat.FuncStat <$> get bh <*> get bh <*> get bh n -> error ("Binary get bh JStat: invalid tag: " ++ show n) instance Binary Sat.JExpr where put_ bh (Sat.ValExpr v) = putByte bh 1 >> put_ bh v put_ bh (Sat.SelExpr e i) = putByte bh 2 >> put_ bh e >> put_ bh i put_ bh (Sat.IdxExpr e1 e2) = putByte bh 3 >> put_ bh e1 >> put_ bh e2 put_ bh (Sat.InfixExpr o e1 e2) = putByte bh 4 >> put_ bh o >> put_ bh e1 >> put_ bh e2 put_ bh (Sat.UOpExpr o e) = putByte bh 5 >> put_ bh o >> put_ bh e put_ bh (Sat.IfExpr e1 e2 e3) = putByte bh 6 >> put_ bh e1 >> put_ bh e2 >> put_ bh e3 put_ bh (Sat.ApplExpr e es) = putByte bh 7 >> put_ bh e >> put_ bh es get bh = getByte bh >>= \case 1 -> Sat.ValExpr <$> get bh 2 -> Sat.SelExpr <$> get bh <*> get bh 3 -> Sat.IdxExpr <$> get bh <*> get bh 4 -> Sat.InfixExpr <$> get bh <*> get bh <*> get bh 5 -> Sat.UOpExpr <$> get bh <*> get bh 6 -> Sat.IfExpr <$> get bh <*> get bh <*> get bh 7 -> Sat.ApplExpr <$> get bh <*> get bh n -> error ("Binary get bh UnsatExpr: invalid tag: " ++ show n) instance Binary Sat.JVal where put_ bh (Sat.JVar i) = putByte bh 1 >> put_ bh i put_ bh (Sat.JList es) = putByte bh 2 >> put_ bh es put_ bh (Sat.JDouble d) = putByte bh 3 >> put_ bh d put_ bh (Sat.JInt i) = putByte bh 4 >> put_ bh i put_ bh (Sat.JStr xs) = putByte bh 5 >> put_ bh xs put_ bh (Sat.JRegEx xs) = putByte bh 6 >> put_ bh xs put_ bh (Sat.JBool b) = putByte bh 7 >> put_ bh b put_ bh (Sat.JHash m) = putByte bh 8 >> put_ bh (sortOn (LexicalFastString . fst) $ nonDetUniqMapToList m) put_ bh (Sat.JFunc is s) = putByte bh 9 >> put_ bh is >> put_ bh s get bh = getByte bh >>= \case 1 -> Sat.JVar <$> get bh 2 -> Sat.JList <$> get bh 3 -> Sat.JDouble <$> get bh 4 -> Sat.JInt <$> get bh 5 -> Sat.JStr <$> get bh 6 -> Sat.JRegEx <$> get bh 7 -> Sat.JBool <$> get bh 8 -> Sat.JHash . listToUniqMap <$> get bh 9 -> Sat.JFunc <$> get bh <*> get bh n -> error ("Binary get bh Sat.JVal: invalid tag: " ++ show n) instance Binary Ident where put_ bh (identFS -> xs) = put_ bh xs get bh = name <$> get bh instance Binary ClosureInfo where put_ bh (ClosureInfo v regs name layo typ static) = do put_ bh v >> put_ bh regs >> put_ bh name >> put_ bh layo >> put_ bh typ >> put_ bh static get bh = ClosureInfo <$> get bh <*> get bh <*> get bh <*> get bh <*> get bh <*> get bh instance Binary JSFFIType where put_ bh = putEnum bh get bh = getEnum bh instance Binary JSRep where put_ bh = putEnum bh get bh = getEnum bh instance Binary CIRegs where put_ bh CIRegsUnknown = putByte bh 1 put_ bh (CIRegs skip types) = putByte bh 2 >> put_ bh skip >> put_ bh types get bh = getByte bh >>= \case 1 -> pure CIRegsUnknown 2 -> CIRegs <$> get bh <*> get bh n -> error ("Binary get bh CIRegs: invalid tag: " ++ show n) instance Binary Sat.Op where put_ bh = putEnum bh get bh = getEnum bh instance Binary Sat.UOp where put_ bh = putEnum bh get bh = getEnum bh instance Binary Sat.AOp where put_ bh = putEnum bh get bh = getEnum bh -- 16 bit sizes should be enough... instance Binary CILayout where put_ bh CILayoutVariable = putByte bh 1 put_ bh (CILayoutUnknown size) = putByte bh 2 >> put_ bh size put_ bh (CILayoutFixed size types) = putByte bh 3 >> put_ bh size >> put_ bh types get bh = getByte bh >>= \case 1 -> pure CILayoutVariable 2 -> CILayoutUnknown <$> get bh 3 -> CILayoutFixed <$> get bh <*> get bh n -> error ("Binary get bh CILayout: invalid tag: " ++ show n) instance Binary CIStatic where put_ bh (CIStaticRefs refs) = putByte bh 1 >> put_ bh refs get bh = getByte bh >>= \case 1 -> CIStaticRefs <$> get bh n -> error ("Binary get bh CIStatic: invalid tag: " ++ show n) instance Binary CIType where put_ bh (CIFun arity regs) = putByte bh 1 >> put_ bh arity >> put_ bh regs put_ bh CIThunk = putByte bh 2 put_ bh (CICon conTag) = putByte bh 3 >> put_ bh conTag put_ bh CIPap = putByte bh 4 put_ bh CIBlackhole = putByte bh 5 put_ bh CIStackFrame = putByte bh 6 get bh = getByte bh >>= \case 1 -> CIFun <$> get bh <*> get bh 2 -> pure CIThunk 3 -> CICon <$> get bh 4 -> pure CIPap 5 -> pure CIBlackhole 6 -> pure CIStackFrame n -> error ("Binary get bh CIType: invalid tag: " ++ show n) instance Binary ExportedFun where put_ bh (ExportedFun modu symb) = put_ bh modu >> put_ bh symb get bh = ExportedFun <$> get bh <*> get bh instance Binary StaticInfo where put_ bh (StaticInfo ident val cc) = put_ bh ident >> put_ bh val >> put_ bh cc get bh = StaticInfo <$> get bh <*> get bh <*> get bh instance Binary StaticVal where put_ bh (StaticFun f args) = putByte bh 1 >> put_ bh f >> put_ bh args put_ bh (StaticThunk t) = putByte bh 2 >> put_ bh t put_ bh (StaticUnboxed u) = putByte bh 3 >> put_ bh u put_ bh (StaticData dc args) = putByte bh 4 >> put_ bh dc >> put_ bh args put_ bh (StaticList xs t) = putByte bh 5 >> put_ bh xs >> put_ bh t get bh = getByte bh >>= \case 1 -> StaticFun <$> get bh <*> get bh 2 -> StaticThunk <$> get bh 3 -> StaticUnboxed <$> get bh 4 -> StaticData <$> get bh <*> get bh 5 -> StaticList <$> get bh <*> get bh n -> error ("Binary get bh StaticVal: invalid tag " ++ show n) instance Binary StaticUnboxed where put_ bh (StaticUnboxedBool b) = putByte bh 1 >> put_ bh b put_ bh (StaticUnboxedInt i) = putByte bh 2 >> put_ bh i put_ bh (StaticUnboxedDouble d) = putByte bh 3 >> put_ bh d put_ bh (StaticUnboxedString str) = putByte bh 4 >> put_ bh str put_ bh (StaticUnboxedStringOffset str) = putByte bh 5 >> put_ bh str get bh = getByte bh >>= \case 1 -> StaticUnboxedBool <$> get bh 2 -> StaticUnboxedInt <$> get bh 3 -> StaticUnboxedDouble <$> get bh 4 -> StaticUnboxedString <$> get bh 5 -> StaticUnboxedStringOffset <$> get bh n -> error ("Binary get bh StaticUnboxed: invalid tag " ++ show n) instance Binary StaticArg where put_ bh (StaticObjArg i) = putByte bh 1 >> put_ bh i put_ bh (StaticLitArg p) = putByte bh 2 >> put_ bh p put_ bh (StaticConArg c args) = putByte bh 3 >> put_ bh c >> put_ bh args get bh = getByte bh >>= \case 1 -> StaticObjArg <$> get bh 2 -> StaticLitArg <$> get bh 3 -> StaticConArg <$> get bh <*> get bh n -> error ("Binary get bh StaticArg: invalid tag " ++ show n) instance Binary StaticLit where put_ bh (BoolLit b) = putByte bh 1 >> put_ bh b put_ bh (IntLit i) = putByte bh 2 >> put_ bh i put_ bh NullLit = putByte bh 3 put_ bh (DoubleLit d) = putByte bh 4 >> put_ bh d put_ bh (StringLit t) = putByte bh 5 >> put_ bh t put_ bh (BinLit b) = putByte bh 6 >> put_ bh b put_ bh (LabelLit b t) = putByte bh 7 >> put_ bh b >> put_ bh t get bh = getByte bh >>= \case 1 -> BoolLit <$> get bh 2 -> IntLit <$> get bh 3 -> pure NullLit 4 -> DoubleLit <$> get bh 5 -> StringLit <$> get bh 6 -> BinLit <$> get bh 7 -> LabelLit <$> get bh <*> get bh n -> error ("Binary get bh StaticLit: invalid tag " ++ show n) ------------------------------------------------ -- JS objects ------------------------------------------------ -- | Options obtained from pragmas in JS files data JSOptions = JSOptions { enableCPP :: !Bool -- ^ Enable CPP on the JS file , emccExtraOptions :: ![String] -- ^ Pass additional options to emcc at link time , emccExportedFunctions :: ![String] -- ^ Arguments for `-sEXPORTED_FUNCTIONS` , emccExportedRuntimeMethods :: ![String] -- ^ Arguments for `-sEXPORTED_RUNTIME_METHODS` } deriving (Eq, Ord) instance Binary JSOptions where put_ bh (JSOptions a b c d) = do put_ bh a put_ bh b put_ bh c put_ bh d get bh = JSOptions <$> get bh <*> get bh <*> get bh <*> get bh instance Semigroup JSOptions where a <> b = JSOptions { enableCPP = enableCPP a || enableCPP b , emccExtraOptions = emccExtraOptions a ++ emccExtraOptions b , emccExportedFunctions = List.nub (List.sort (emccExportedFunctions a ++ emccExportedFunctions b)) , emccExportedRuntimeMethods = List.nub (List.sort (emccExportedRuntimeMethods a ++ emccExportedRuntimeMethods b)) } defaultJSOptions :: JSOptions defaultJSOptions = JSOptions { enableCPP = False , emccExtraOptions = [] , emccExportedRuntimeMethods = [] , emccExportedFunctions = [] } -- mimics `lines` implementation splitOnComma :: String -> [String] splitOnComma s = cons $ case break (== ',') s of (l, s') -> (l, case s' of [] -> [] _:s'' -> splitOnComma s'') where cons ~(h, t) = h : t -- | Get the JS option pragmas from .js files getJsOptions :: Handle -> IO JSOptions getJsOptions handle = do hSetEncoding handle utf8 let trim = dropWhileEndLE isSpace . dropWhile isSpace let go opts = do hIsEOF handle >>= \case True -> pure opts False -> do xs <- hGetLine handle if not ("//#OPTIONS:" `List.isPrefixOf` xs) then pure opts else do -- drop prefix and spaces let ys = trim (drop 11 xs) let opts' = if | ys == "CPP" -> opts {enableCPP = True} | Just s <- List.stripPrefix "EMCC:EXPORTED_FUNCTIONS=" ys , fns <- fmap trim (splitOnComma s) -> opts { emccExportedFunctions = emccExportedFunctions opts ++ fns } | Just s <- List.stripPrefix "EMCC:EXPORTED_RUNTIME_METHODS=" ys , fns <- fmap trim (splitOnComma s) -> opts { emccExportedRuntimeMethods = emccExportedRuntimeMethods opts ++ fns } | Just s <- List.stripPrefix "EMCC:EXTRA=" ys -> opts { emccExtraOptions = emccExtraOptions opts ++ [s] } | otherwise -> panic ("Unrecognized JS pragma: " ++ ys) go opts' go defaultJSOptions -- | Parse option pragma in JS file getOptionsFromJsFile :: FilePath -- ^ Input file -> IO JSOptions -- ^ Parsed options. getOptionsFromJsFile filename = Exception.bracket (openBinaryFile filename ReadMode) hClose getJsOptions -- | Write a JS object (embed some handwritten JS code) writeJSObject :: JSOptions -> B.ByteString -> FilePath -> IO () writeJSObject opts contents output_fn = do bh <- openBinMem (B.length contents + 1000) putByteString bh jsHeader put_ bh opts put_ bh contents writeBinMem bh output_fn -- | Read a JS object from BinHandle parseJSObject :: ReadBinHandle -> IO (JSOptions, B.ByteString) parseJSObject bh = do magic <- getByteString bh (B.length jsHeader) case magic == jsHeader of False -> panic "invalid magic header for JS object" True -> do opts <- get bh contents <- get bh pure (opts,contents) -- | Read a JS object from ByteString parseJSObjectBS :: B.ByteString -> IO (JSOptions, B.ByteString) parseJSObjectBS bs = do bh <- unsafeUnpackBinBuffer bs parseJSObject bh -- | Read a JS object from file readJSObject :: FilePath -> IO (JSOptions, B.ByteString) readJSObject input_fn = do bh <- readBinMem input_fn parseJSObject bh ghc-lib-parser-9.12.2.20250421/compiler/GHC/StgToJS/Symbols.hs0000644000000000000000000007236307346545000021202 0ustar0000000000000000{-# LANGUAGE OverloadedStrings #-} -- | JS symbol generation module GHC.StgToJS.Symbols where import GHC.Prelude import GHC.JS.JStg.Syntax import GHC.JS.Ident import GHC.Data.FastString import GHC.Unit.Module import GHC.Utils.Word64 (intToWord64) import Data.ByteString (ByteString) import Data.Word (Word64) import qualified Data.ByteString.Char8 as BSC import qualified Data.ByteString.Builder as BSB import qualified Data.ByteString.Lazy as BSL import Data.Array import Data.Semigroup ((<>)) -- | Hexadecimal representation of an int -- -- Used for the sub indices. intBS :: Int -> ByteString intBS = word64BS . intToWord64 -- | Hexadecimal representation of a 64-bit word -- -- Used for uniques. We could use base-62 as GHC usually does but this is likely -- faster. word64BS :: Word64 -> ByteString word64BS = BSL.toStrict . BSB.toLazyByteString . BSB.word64Hex -- | Return z-encoded unit:module unitModuleStringZ :: Module -> ByteString unitModuleStringZ mod = mconcat [ fastZStringToByteString (zEncodeFS (unitIdFS (moduleUnitId mod))) , BSC.pack "ZC" -- z-encoding for ":" , fastZStringToByteString (zEncodeFS (moduleNameFS (moduleName mod))) ] -- | the global linkable unit of a module exports this symbol, depend on it to -- include that unit (used for cost centres) moduleGlobalSymbol :: Module -> FastString moduleGlobalSymbol m = mkFastStringByteString $ mconcat [ hdB , unitModuleStringZ m , BSC.pack "_" ] moduleExportsSymbol :: Module -> FastString moduleExportsSymbol m = mkFastStringByteString $ mconcat [ hdB , unitModuleStringZ m , BSC.pack "_" ] -- | Make JS symbol corresponding to the given Haskell symbol in the given -- module mkJsSymbolBS :: Bool -> Module -> FastString -> ByteString mkJsSymbolBS exported mod s = mconcat [ if exported then hdB else hddB , unitModuleStringZ mod , BSC.pack "zi" -- z-encoding of "." , fastZStringToByteString (zEncodeFS s) ] -- | Make JS symbol corresponding to the given Haskell symbol in the given -- module mkJsSymbol :: Bool -> Module -> FastString -> FastString mkJsSymbol exported mod s = mkFastStringByteString (mkJsSymbolBS exported mod s) -- | Make JS symbol for given module and unique. mkFreshJsSymbol :: Module -> Int -> FastString mkFreshJsSymbol mod i = mkFastStringByteString $ mconcat [ hddB , unitModuleStringZ mod , BSC.pack "_" , intBS i ] -- | Make symbol "h$XYZ" or "h$$XYZ" mkRawSymbol :: Bool -> FastString -> FastString mkRawSymbol exported fs | exported = mkFastStringByteString $ mconcat [ hdB, bytesFS fs ] | otherwise = mkFastStringByteString $ mconcat [ hddB, bytesFS fs ] -- | "h$$" constant string hddB :: ByteString hddB = BSC.pack "h$$" -- | "h$" constant string hdB :: ByteString hdB = BSC.take 2 hddB hd :: JStgExpr hd = global hdStr hdStr :: FastString hdStr = mkFastStringByteString hdB hdlB :: ByteString hdlB = BSC.pack "h$l" ----------------------------------------- Runtime ------------------------------- hdApply :: JStgExpr hdApply = global hdApplyStr hdApplyStr :: FastString hdApplyStr = fsLit "h$apply" hdMoveRegs2 :: FastString hdMoveRegs2 = fsLit "h$moveRegs2" hdPapGen :: JStgExpr hdPapGen = global hdPapGenStr hdPapGenStr :: FastString hdPapGenStr = fsLit "h$pap_gen" hdSetReg :: JStgExpr hdSetReg = global hdSetRegStr hdSetRegStr :: FastString hdSetRegStr = fsLit "h$setReg" hdGetReg :: JStgExpr hdGetReg = global hdGetRegStr hdGetRegStr :: FastString hdGetRegStr = fsLit "h$getReg" hdResetRegisters :: Ident hdResetRegisters = name "h$resetRegisters" hdResetResultVars :: Ident hdResetResultVars = name "h$resetResultVars" hdInitClosure :: FastString hdInitClosure = fsLit "h$init_closure" hdRegs :: JStgExpr hdRegs = global (identFS hdRegsStr) hdRegsStr :: Ident hdRegsStr = name "h$regs" hdReturn :: JStgExpr hdReturn = global (identFS hdReturnStr) hdReturnStr :: Ident hdReturnStr = name "h$return" hdStack :: JStgExpr hdStack = global (identFS hdStackStr) hdStackStr :: Ident hdStackStr = name "h$stack" hdStackPtr :: JStgExpr hdStackPtr = global (identFS hdStackPtrStr) hdStackPtrStr :: Ident hdStackPtrStr = name "h$sp" hdBlackHoleTrap :: JStgExpr hdBlackHoleTrap = global (identFS hdBlackHoleTrapStr) hdBlackHoleTrapStr :: Ident hdBlackHoleTrapStr = name "h$blackholeTrap" hdBlockOnBlackHoleStr :: FastString hdBlockOnBlackHoleStr = "h$blockOnBlackhole" hdBlackHoleLNE :: JStgExpr hdBlackHoleLNE = global (identFS hdBlackHoleLNEStr) hdBlackHoleLNEStr :: Ident hdBlackHoleLNEStr = name "h$bh_lne" hdClosureTypeName :: JStgExpr hdClosureTypeName = global (identFS hdClosureTypeNameStr) hdClosureTypeNameStr :: Ident hdClosureTypeNameStr = name "h$closureTypeName" hdBh :: JStgExpr hdBh = global hdBhStr hdBhStr :: FastString hdBhStr = fsLit "h$bh" hdBlackHole :: JStgExpr hdBlackHole = global (identFS hdBlackHoleStr) hdBlackHoleStr :: Ident hdBlackHoleStr = name "h$blackhole" hdUpdFrame :: JStgExpr hdUpdFrame = global (identFS hdUpdFrameStr) hdUpdFrameStr :: Ident hdUpdFrameStr = name $ fsLit "h$upd_frame" hdCSel :: JStgExpr hdCSel = global hdCSelStr hdCSelStr :: FastString hdCSelStr = "h$c_sel_" hdEntry :: Ident hdEntry = name hdEntryStr hdEntryStr :: FastString hdEntryStr = fsLit "h$e" hdApGen :: JStgExpr hdApGen = global (identFS hdApGenStr) hdApGenStr :: Ident hdApGenStr = name "h$ap_gen" hdApGenFastStr :: Ident hdApGenFastStr = name $ fsLit $ unpackFS (identFS hdApGenStr) ++ "_fast" hdLog :: JStgExpr hdLog = global hdLogStr hdLogStr :: FastString hdLogStr = fsLit "h$log" hdMkFunctionPtr :: JStgExpr hdMkFunctionPtr = global "h$mkFunctionPtr" hdInitStatic :: JStgExpr hdInitStatic = global (identFS hdInitStaticStr) hdInitStaticStr :: Ident hdInitStaticStr = name "h$initStatic" hdHsSptInsert :: JStgExpr hdHsSptInsert = global "h$hs_spt_insert" hdCurrentThread :: JStgExpr hdCurrentThread = global (identFS hdCurrentThreadStr) hdCurrentThreadStr :: Ident hdCurrentThreadStr = name "h$currentThread" hdWakeupThread :: FastString hdWakeupThread = fsLit "h$wakeupThread" hdPaps :: JStgExpr hdPaps = global hdPapsStr hdPapsStr :: FastString hdPapsStr = fsLit "h$paps" hdPapStr_ :: FastString hdPapStr_ = fsLit "h$pap_" hdLazyEntryStr :: Ident hdLazyEntryStr = name "h$lazy_e" hdUnboxEntry :: JStgExpr hdUnboxEntry = global (identFS hdUnboxEntryStr) hdUnboxEntryStr :: Ident hdUnboxEntryStr = name "h$unbox_e" hdMaskFrame :: JStgExpr hdMaskFrame = global (identFS hdMaskFrameStr) hdMaskFrameStr :: Ident hdMaskFrameStr = name "h$maskFrame" hdUnMaskFrameStr :: Ident hdUnMaskFrameStr = name "h$unmaskFrame" hdReturnF :: JStgExpr hdReturnF = global (identFS hdReturnFStr) hdReturnFStr :: Ident hdReturnFStr = name "h$returnf" hdResumeEntryStr :: Ident hdResumeEntryStr = name "h$resume_e" hdFlushStdout :: JStgExpr hdFlushStdout = global (identFS hdFlushStdoutStr) hdFlushStdoutStr :: Ident hdFlushStdoutStr = name "h$flushStdout" hdFlushStdoutEntry :: JStgExpr hdFlushStdoutEntry = global (identFS hdFlushStdoutEntryStr) hdFlushStdoutEntryStr :: Ident hdFlushStdoutEntryStr = name "h$flushStdout_e" hdRunIOEntry :: JStgExpr hdRunIOEntry = global (identFS hdRunIOEntryStr) hdRunIOEntryStr :: Ident hdRunIOEntryStr = name "h$runio_e" hdReduce :: JStgExpr hdReduce = global (identFS hdReduceStr) hdReduceStr :: Ident hdReduceStr = name "h$reduce" hdThrowStr :: FastString hdThrowStr = fsLit "h$throw" hdRaiseAsyncFrame :: JStgExpr hdRaiseAsyncFrame = global (identFS hdRaiseAsyncFrameStr) hdRaiseAsyncFrameStr :: Ident hdRaiseAsyncFrameStr = name "h$raiseAsync_frame" hdRaiseAsyncEntry :: JStgExpr hdRaiseAsyncEntry = global (identFS hdRaiseAsyncEntryStr) hdRaiseAsyncEntryStr :: Ident hdRaiseAsyncEntryStr = name "h$raiseAsync_e" hdRaiseEntry :: JStgExpr hdRaiseEntry = global (identFS hdRaiseEntryStr) hdRaiseEntryStr :: Ident hdRaiseEntryStr = name "h$raise_e" hdKeepAliveEntry :: JStgExpr hdKeepAliveEntry = global (identFS hdKeepAliveEntryStr) hdKeepAliveEntryStr :: Ident hdKeepAliveEntryStr = name "h$keepAlive_e" hdSelect2Return :: JStgExpr hdSelect2Return = global (identFS hdSelect2ReturnStr) hdSelect2ReturnStr :: Ident hdSelect2ReturnStr = name "h$select2_ret" hdSelect2Entry :: JStgExpr hdSelect2Entry = global (identFS hdSelect2EntryStr) hdSelect2EntryStr :: Ident hdSelect2EntryStr = name "h$select2_e" hdSelect1Ret :: JStgExpr hdSelect1Ret = global (identFS hdSelect1RetStr) hdSelect1RetStr :: Ident hdSelect1RetStr = name "h$select1_ret" hdSelect1EntryStr :: Ident hdSelect1EntryStr = name "h$select1_e" hdStaticThunkStr :: FastString hdStaticThunkStr = fsLit "h$static_thunk" hdStaticThunksStr , hdStaticThunksArrStr , hdCAFsStr , hdCAFsResetStr :: Ident hdStaticThunksStr = name "h$staticThunks" hdStaticThunksArrStr = name "h$staticThunksArr" hdCAFsStr = name "h$CAFs" hdCAFsResetStr = name "h$CAFsReset" hdUpdThunkEntryStr :: Ident hdUpdThunkEntryStr = name "h$upd_thunk_e" hdAp3EntryStr :: Ident hdAp3EntryStr = name "h$ap3_e" hdAp2EntryStr :: Ident hdAp2EntryStr = name "h$ap2_e" hdAp1EntryStr :: Ident hdAp1EntryStr = name "h$ap1_e" hdDataToTagEntryStr :: Ident hdDataToTagEntryStr = name "h$dataToTag_e" hdTagToEnum :: FastString hdTagToEnum = fsLit "h$tagToEnum" hdCatchEntryStr :: Ident hdCatchEntryStr = name "h$catch_e" hdNoopStr :: Ident hdNoopStr = name "h$noop" hdNoopEntry :: JStgExpr hdNoopEntry = global (identFS hdNoopEntryStr) hdNoopEntryStr :: Ident hdNoopEntryStr = name "h$noop_e" hdC0 :: JStgExpr hdC0 = global (identFS hdC0Str) hdC :: JStgExpr hdC = global (identFS hdCStr) hdC0Str :: Ident hdC0Str = name "h$c0" hdCStr :: Ident hdCStr = name "h$c" hdData2Entry :: Ident hdData2Entry = name "h$data2_e" hdData1Entry :: Ident hdData1Entry = name "h$data1_e" hdTrueEntry :: Ident hdTrueEntry = name "h$true_e" hdFalseEntry :: Ident hdFalseEntry = name "h$false_e" hdDoneMainEntry :: JStgExpr hdDoneMainEntry = global (identFS hdDoneMainEntryStr) hdDoneMainEntryStr :: Ident hdDoneMainEntryStr = name "h$doneMain_e" hdDoneMain :: JStgExpr hdDoneMain = global "h$doneMain" hdDone :: Ident hdDone = name "h$done" hdExitProcess :: FastString hdExitProcess = "h$exitProcess" hdTraceAlloc :: FastString hdTraceAlloc = fsLit "h$traceAlloc" hdDebugAllocNotifyAlloc :: FastString hdDebugAllocNotifyAlloc = fsLit "h$debugAlloc_notifyAlloc" hdRtsTraceForeign , hdRtsProfiling , hdCtFun , hdCtCon , hdCtThunk , hdCtPap , hdCtBlackhole , hdCtStackFrame , hdCtVtPtr , hdVtVoid , hdVtInt , hdVtDouble , hdVtLong , hdVtAddr , hdVtObj , hdVtArr :: Ident hdRtsTraceForeign = name "h$rts_traceForeign" hdRtsProfiling = name "h$rts_profiling" hdCtFun = name "h$ct_fun" hdCtCon = name "h$ct_con" hdCtThunk = name "h$ct_thunk" hdCtPap = name "h$ct_pap" hdCtBlackhole = name "h$ct_blackhole" hdCtStackFrame = name "h$ct_stackframe" hdCtVtPtr = name "h$vt_ptr" hdVtVoid = name "h$vt_void" hdVtInt = name "h$vt_int" hdVtDouble = name "h$vt_double" hdVtLong = name "h$vt_long" hdVtAddr = name "h$vt_addr" hdVtObj = name "h$vt_obj" hdVtArr = name "h$vt_arr" hdLoads :: Array Int Ident hdLoads = listArray (1,32) [ name . mkFastStringByteString $ hdlB <> BSC.pack (show n) | n <- [1..32::Int] ] ----------------------------------------- Precompiled Aps ---------------------- hdAp00 :: JStgExpr hdAp00 = global (identFS hdAp00Str) hdAp00Str :: Ident hdAp00Str = name "h$ap_0_0" hdAp00FastStr :: FastString hdAp00FastStr = fsLit "h$ap_0_0_fast" hdAp11Fast :: FastString hdAp11Fast = fsLit "h$ap_1_1_fast" hdAp10 :: JStgExpr hdAp10 = global "h$ap_1_0" hdAp33FastStr :: FastString hdAp33FastStr = fsLit "h$ap_3_3_fast" hdAp22FastStr :: FastString hdAp22FastStr = fsLit "h$ap_2_2_fast" ----------------------------------------- ByteArray ----------------------------- hdNewByteArrayStr :: FastString hdNewByteArrayStr = "h$newByteArray" hdCopyMutableByteArrayStr :: FastString hdCopyMutableByteArrayStr = "h$copyMutableByteArray" hdCheckOverlapByteArrayStr :: FastString hdCheckOverlapByteArrayStr = "h$checkOverlapByteArray" hdShrinkMutableCharArrayStr :: FastString hdShrinkMutableCharArrayStr = "h$shrinkMutableCharArray" ----------------------------------------- EventLog ----------------------------- hdTraceEventStr :: FastString hdTraceEventStr = "h$traceEvent" hdTraceEventBinaryStr :: FastString hdTraceEventBinaryStr = "h$traceEventBinary" hdTraceMarkerStr :: FastString hdTraceMarkerStr = "h$traceMarker" ----------------------------------------- FFI ---------------------------------- hdThrowJSException :: JStgExpr hdThrowJSException = global $ fsLit "h$throwJSException" hdUnboxFFIResult :: JStgExpr hdUnboxFFIResult = global (identFS hdUnboxFFIResultStr) hdUnboxFFIResultStr :: Ident hdUnboxFFIResultStr = name "h$unboxFFIResult" hdMkForeignCallback :: JStgExpr hdMkForeignCallback = global $ fsLit "h$mkForeignCallback" hdTraceForeign :: JStgExpr hdTraceForeign = global $ fsLit "h$traceForeign" hdBuildObject :: JStgExpr hdBuildObject = global hdBuildObjectStr hdBuildObjectStr :: FastString hdBuildObjectStr = fsLit "h$buildObject" hdCallDynamicStr :: FastString hdCallDynamicStr = fsLit "h$callDynamic" except :: JStgExpr except = global $ identFS exceptStr exceptStr :: Ident exceptStr = name $ fsLit "except" excepStr :: FastString excepStr = fsLit "excep" ----------------------------------------- Accessors ----------------------------- -- for almost all other symbols that are faststrings we turn 'foo' into 'fooStr' -- because these are overloaded with JStgExpr's. But for accessors we leave -- these as FastStrings because they will become Idents after the refactor. mv :: FastString mv = fsLit "mv" lngth :: FastString lngth = fsLit "length" -- | only for byte arrays. This is a JS byte array method len :: FastString len = fsLit "len" slice :: FastString slice = fsLit "slice" this :: JStgExpr this = global "this" arr :: FastString arr = fsLit "arr" dv :: FastString dv = fsLit "dv" d1, d2, d3 :: JStgExpr d1 = global d1Str d2 = global d2Str d3 = global d3Str d1Str, d2Str, d3Str :: FastString d1Str = fsLit "d1" d2Str = fsLit "d2" d3Str = fsLit "d3" getInt16 :: FastString getInt16 = "getInt16" getUint16 :: FastString getUint16 = "getUint16" getInt32 :: FastString getInt32 = "getInt32" getUint32 :: FastString getUint32 = "getUint32" getFloat32 :: FastString getFloat32 = "getFloat32" getFloat64 :: FastString getFloat64 = "getFloat64" setInt16 :: FastString setInt16 = "setInt16" setUint16 :: FastString setUint16 = "setUint16" setInt32 :: FastString setInt32 = "setInt32" setUint32 :: FastString setUint32 = "setUint32" setFloat32 :: FastString setFloat32 = "setFloat32" setFloat64 :: FastString setFloat64 = "setFloat64" i3, u8, u1, f6, f3 :: FastString i3 = "i3" u8 = "u8" u1 = "u1" f6 = "f6" f3 = "f3" val :: FastString val = fsLit "val" label :: FastString label = fsLit "label" mask :: FastString mask = fsLit "mask" unMask :: FastString unMask = fsLit "unmask" resume :: FastString resume = "resume" f :: FastString f = fsLit "f" n :: FastString n = fsLit "n" hasOwnProperty :: FastString hasOwnProperty = fsLit "hasOwnProperty" hdCollectProps :: FastString hdCollectProps = fsLit "h$collectProps" replace :: FastString replace = fsLit "replace" substring :: FastString substring = fsLit "substring" trace :: FastString trace = fsLit "trace" apply :: FastString apply = fsLit "apply" ----------------------------------------- STM ---------------------------------- hdMVar :: JStgExpr hdMVar = global hdMVarStr hdMVarStr :: FastString hdMVarStr = fsLit "h$MVar" hdTakeMVar :: JStgExpr hdTakeMVar = global hdTakeMVarStr hdTakeMVarStr :: FastString hdTakeMVarStr = fsLit "h$takeMVar" hdTryTakeMVarStr :: FastString hdTryTakeMVarStr = fsLit "h$tryTakeMVar" hdPutMVarStr :: FastString hdPutMVarStr = fsLit "h$putMVar" hdTryPutMVarStr :: FastString hdTryPutMVarStr = fsLit "h$tryPutMVar" hdNewTVar :: FastString hdNewTVar = fsLit "h$newTVar" hdReadTVar :: FastString hdReadTVar = fsLit "h$readTVar" hdReadTVarIO :: FastString hdReadTVarIO = fsLit "h$readTVarIO" hdWriteTVar :: FastString hdWriteTVar = fsLit "h$writeTVar" hdReadMVarStr :: FastString hdReadMVarStr = fsLit "h$readMVar" hdStmRemoveBlockedThreadStr :: FastString hdStmRemoveBlockedThreadStr = fsLit "h$stmRemoveBlockedThread" hdStmStartTransactionStr :: FastString hdStmStartTransactionStr = fsLit "h$stmStartTransaction" hdAtomicallyEntry :: JStgExpr hdAtomicallyEntry = global (identFS hdAtomicallyEntryStr) hdAtomicallyEntryStr :: Ident hdAtomicallyEntryStr = name $ fsLit "h$atomically_e" hdAtomicallyStr :: FastString hdAtomicallyStr = "h$atomically" hdStgResumeRetryEntry :: JStgExpr hdStgResumeRetryEntry = global (identFS hdStgResumeRetryEntryStr) hdStgResumeRetryEntryStr :: Ident hdStgResumeRetryEntryStr = name $ fsLit "h$stmResumeRetry_e" hdStmCommitTransactionStr :: FastString hdStmCommitTransactionStr = fsLit "h$stmCommitTransaction" hdStmValidateTransactionStr :: FastString hdStmValidateTransactionStr = "h$stmValidateTransaction" hdStmCatchRetryEntry :: JStgExpr hdStmCatchRetryEntry = global (identFS hdStmCatchRetryEntryStr) hdStmCatchRetryEntryStr :: Ident hdStmCatchRetryEntryStr = name $ fsLit "h$stmCatchRetry_e" hdStmRetryStr :: FastString hdStmRetryStr = fsLit "h$stmRetry" hdStmCatchRetryStr :: FastString hdStmCatchRetryStr = fsLit "h$stmCatchRetry" hdStmCatchEntry :: JStgExpr hdStmCatchEntry = global (identFS hdStmCatchEntryStr) hdCatchStmStr :: FastString hdCatchStmStr = fsLit "h$catchStm" hdStmCatchEntryStr :: Ident hdStmCatchEntryStr = name $ fsLit "h$catchStm_e" hdRetryInterrupted :: JStgExpr hdRetryInterrupted = global (identFS hdRetryInterruptedStr) hdRetryInterruptedStr :: Ident hdRetryInterruptedStr = name $ fsLit "h$retryInterrupted" hdMaskUnintFrame :: JStgExpr hdMaskUnintFrame = global (identFS hdMaskUnintFrameStr) hdMaskUnintFrameStr :: Ident hdMaskUnintFrameStr = name $ fsLit "h$maskUnintFrame" hdReschedule :: JStgExpr hdReschedule = global (identFS hdRescheduleStr) hdRescheduleStr :: Ident hdRescheduleStr = name $ fsLit "h$reschedule" hdRestoreThread :: JStgExpr hdRestoreThread = global (identFS hdRestoreThreadStr) hdRestoreThreadStr :: Ident hdRestoreThreadStr = name $ fsLit "h$restoreThread" hdFinishedThread :: FastString hdFinishedThread = fsLit "h$finishThread" ----------------------------------------- Z-Encodings --------------------------- hdPrimOpStr :: FastString hdPrimOpStr = fsLit "h$primop_" wrapperColonStr :: FastString wrapperColonStr = fsLit "ghczuwrapperZC" -- equivalent non-z-encoding => ghc_wrapper: hdInternalExceptionTypeDivZero :: JStgExpr hdInternalExceptionTypeDivZero = global "h$ghczminternalZCGHCziInternalziExceptionziTypezidivZZeroException" hdInternalExceptionTypeOverflow :: JStgExpr hdInternalExceptionTypeOverflow = global "h$ghczminternalZCGHCziInternalziExceptionziTypezioverflowException" hdInternalExceptionTypeUnderflow :: JStgExpr hdInternalExceptionTypeUnderflow = global "h$ghczminternalZCGHCziInternalziExceptionziTypeziunderflowException" hdInternalExceptionControlExceptionBaseNonTermination :: JStgExpr hdInternalExceptionControlExceptionBaseNonTermination = global "h$ghczminternalZCGHCziInternalziControlziExceptionziBasezinonTermination" hdGhcInternalIOHandleFlush :: JStgExpr hdGhcInternalIOHandleFlush = global "h$ghczminternalZCGHCziInternalziIOziHandlezihFlush" hdGhcInternalIOHandleFDStdout :: JStgExpr hdGhcInternalIOHandleFDStdout = global "h$ghczminternalZCGHCziInternalziIOziHandleziFDzistdout" hdGhcInternalJSPrimValConEntryStr :: FastString hdGhcInternalJSPrimValConEntryStr = fsLit "h$ghczminternalZCGHCziInternalziJSziPrimziJSVal_con_e" ----------------------------------------- Profiling ----------------------------- hdBuildCCSPtrStr :: FastString hdBuildCCSPtrStr = "h$buildCCSPtr" hdClearCCSStr :: FastString hdClearCCSStr = "h$clearCCS" hdRestoreCCSStr :: FastString hdRestoreCCSStr = fsLit "h$restoreCCS" hdSetCcsEntry :: JStgExpr hdSetCcsEntry = global (identFS hdSetCcsEntryStr) hdSetCcsEntryStr :: Ident hdSetCcsEntryStr = name $ fsLit "h$setCcs_e" ccStr :: FastString ccStr = fsLit "cc" ----------------------------------------- Others ------------------------------- unknown :: FastString unknown = fsLit "" typeof :: FastString typeof = fsLit "typeof" hdRawStr :: FastString hdRawStr = fsLit "h$rstr" throwStr :: FastString throwStr = fsLit "throw" hdCheckObj :: JStgExpr hdCheckObj = global $ fsLit "h$checkObj" console :: JStgExpr console = global consoleStr consoleStr :: FastString consoleStr = fsLit "console" arguments :: JStgExpr arguments = global argumentsStr argumentsStr :: FastString argumentsStr = fsLit "arguments" hdReportHeapOverflow :: JStgExpr hdReportHeapOverflow = global (identFS hdReportHeapOverflowStr) hdReportHeapOverflowStr :: Ident hdReportHeapOverflowStr = name $ fsLit "h$reportHeapOverflow" hdReportStackOverflow :: JStgExpr hdReportStackOverflow = global (identFS hdReportStackOverflowStr) hdReportStackOverflowStr :: Ident hdReportStackOverflowStr = name $ fsLit "h$reportStackOverflow" hdDumpRes :: JStgExpr hdDumpRes = global (identFS hdDumpResStr) hdDumpResStr :: Ident hdDumpResStr = name $ fsLit "h$dumpRes" ghcjsArray :: FastString ghcjsArray = fsLit "__ghcjsArray" ----------------------------------------- Compact ------------------------------- hdCompactSize :: FastString hdCompactSize = fsLit "h$compactSize" hdCompactAddWithSharing :: FastString hdCompactAddWithSharing = fsLit "h$compactAddWithSharing" hdCompactAdd :: FastString hdCompactAdd = fsLit "h$compactAdd" hdCompactFixupPointers :: FastString hdCompactFixupPointers = fsLit "h$compactFixupPointers" hdCompactAllocateBlock :: FastString hdCompactAllocateBlock = fsLit "h$compactAllocateBlock" hdCompactGetNextBlock :: FastString hdCompactGetNextBlock = fsLit "h$compactGetNextBlock" hdCompactGetFirstBlock :: FastString hdCompactGetFirstBlock = fsLit "h$compactGetFirstBlock" hdCompactContainsAny :: FastString hdCompactContainsAny = fsLit "h$compactContainsAny" hdCompactContains :: FastString hdCompactContains = fsLit "h$compactContains" hdCompactResize :: FastString hdCompactResize = fsLit "h$compactResize" hdCompactNew :: FastString hdCompactNew = fsLit "h$compactNew" ----------------------------------------- Stable Pointers ----------------------- hdStableNameInt :: FastString hdStableNameInt = fsLit "h$stableNameInt" hdMakeStableName :: FastString hdMakeStableName = fsLit "h$makeStableName" hdDeRefStablePtr :: FastString hdDeRefStablePtr = fsLit "h$deRefStablePtr" hdStablePtrBuf :: JStgExpr hdStablePtrBuf = global "h$stablePtrBuf" hdMakeStablePtrStr :: FastString hdMakeStablePtrStr = fsLit "h$makeStablePtr" ------------------------------- Weak Pointers ----------------------------------- hdKeepAlive :: FastString hdKeepAlive = fsLit "h$keepAlive" hdFinalizeWeak :: FastString hdFinalizeWeak = fsLit "h$finalizeWeak" hdMakeWeakNoFinalizer :: FastString hdMakeWeakNoFinalizer = fsLit "h$makeWeakNoFinalizer" hdMakeWeak :: FastString hdMakeWeak = fsLit "h$makeWeak" ------------------------------- Concurrency Primitives ------------------------- hdGetThreadLabel :: FastString hdGetThreadLabel = fsLit "h$getThreadLabel" hdListThreads :: FastString hdListThreads = fsLit "h$listThreads" hdThreadStatus :: FastString hdThreadStatus = fsLit "h$threadStatus" hdYield :: FastString hdYield = fsLit "h$yield" hdKillThread :: FastString hdKillThread = fsLit "h$killThread" hdFork :: FastString hdFork = fsLit "h$fork" ------------------------------- Delay/Wait Ops --------------------------------- hdWaitWrite :: FastString hdWaitWrite = fsLit "h$waitWrite" hdWaitRead :: FastString hdWaitRead = fsLit "h$waitRead" hdDelayThread :: FastString hdDelayThread = fsLit "h$delayThread" ------------------------------- Exceptions -------------------------------------- hdCatchStr :: FastString hdCatchStr = fsLit "h$catch" hdMaskAsyncStr :: FastString hdMaskAsyncStr = fsLit "h$maskAsync" hdMaskUnintAsyncStr :: FastString hdMaskUnintAsyncStr = fsLit "h$maskUnintAsync" hdUnmaskAsyncStr :: FastString hdUnmaskAsyncStr = fsLit "h$unmaskAsync" ------------------------------- Mutable variables -------------------------------------- hdMutVarStr :: FastString hdMutVarStr = fsLit "h$MutVar" hdAtomicModifyMutVar2Str :: FastString hdAtomicModifyMutVar2Str = fsLit "h$atomicModifyMutVar2" hdAtomicModifyMutVarStr :: FastString hdAtomicModifyMutVarStr = fsLit "h$atomicModifyMutVar" ------------------------------- Addr# ------------------------------------------ hdComparePointerStr :: FastString hdComparePointerStr = fsLit "h$comparePointer" ------------------------------- Byte Arrays ------------------------------------- hdCompareByteArraysStr :: FastString hdCompareByteArraysStr = fsLit "h$compareByteArrays" hdResizeMutableByteArrayStr :: FastString hdResizeMutableByteArrayStr = fsLit "h$resizeMutableByteArray" hdShrinkMutableByteArrayStr :: FastString hdShrinkMutableByteArrayStr = fsLit "h$shrinkMutableByteArray" ------------------------------- Arrays ------------------------------------------ hdCopyMutableArrayStr :: FastString hdCopyMutableArrayStr = fsLit "h$copyMutableArray" hdNewArrayStr :: FastString hdNewArrayStr = fsLit "h$newArray" hdSliceArrayStr :: FastString hdSliceArrayStr = fsLit "h$sliceArray" ------------------------------ Float -------------------------------------------- hdDecodeFloatIntStr :: FastString hdDecodeFloatIntStr = fsLit "h$decodeFloatInt" hdCastFloatToWord32Str :: FastString hdCastFloatToWord32Str = fsLit "h$castFloatToWord32" hdCastWord32ToFloatStr :: FastString hdCastWord32ToFloatStr = fsLit "h$castWord32ToFloat" ------------------------------ Double ------------------------------------------- hdDecodeDouble2IntStr :: FastString hdDecodeDouble2IntStr = fsLit "h$decodeDouble2Int" hdDecodeDoubleInt64Str :: FastString hdDecodeDoubleInt64Str = fsLit "h$decodeDoubleInt64" hdCastDoubleToWord64Str :: FastString hdCastDoubleToWord64Str = fsLit "h$castDoubleToWord64" hdCastWord64ToDoubleStr :: FastString hdCastWord64ToDoubleStr = fsLit "h$castWord64ToDouble" ------------------------------ Word ------------------------------------------- hdReverseWordStr :: FastString hdReverseWordStr = fsLit "h$reverseWord" hdClz8Str , hdClz16Str , hdClz32Str , hdClz64Str , hdCtz8Str , hdCtz16Str , hdCtz32Str , hdCtz64Str :: FastString hdClz8Str = fsLit "h$clz8" hdClz16Str = fsLit "h$clz16" hdClz32Str = fsLit "h$clz32" hdClz64Str = fsLit "h$clz64" hdCtz8Str = fsLit "h$ctz8" hdCtz16Str = fsLit "h$ctz16" hdCtz32Str = fsLit "h$ctz32" hdCtz64Str = fsLit "h$ctz64" hdBSwap64Str :: FastString hdBSwap64Str = "h$bswap64" hdPExit8Str , hdPExit16Str , hdPExit32Str , hdPExit64Str , hdPDep8Str , hdPDep16Str , hdPDep32Str , hdPDep64Str :: FastString hdPExit8Str = fsLit "h$pext8" hdPExit16Str = fsLit "h$pext16" hdPExit32Str = fsLit "h$pext32" hdPExit64Str = fsLit "h$pext64" hdPDep8Str = fsLit "h$pdep8" hdPDep16Str = fsLit "h$pdep16" hdPDep32Str = fsLit "h$pdep32" hdPDep64Str = fsLit "h$pdep64" hdPopCntTab :: JStgExpr hdPopCntTab = global "h$popCntTab" hdPopCnt32Str :: FastString hdPopCnt32Str = fsLit "h$popCnt32" hdPopCnt64Str :: FastString hdPopCnt64Str = fsLit "h$popCnt64" hdQuotRem2Word32Str :: FastString hdQuotRem2Word32Str = fsLit "h$quotRem2Word32" hdQuotRemWord32Str :: FastString hdQuotRemWord32Str = fsLit "h$quotRemWord32" hdRemWord32Str :: FastString hdRemWord32Str = fsLit "h$remWord32" hdQuotWord32Str :: FastString hdQuotWord32Str = fsLit "h$quotWord32" hdMul2Word32Str :: FastString hdMul2Word32Str = fsLit "h$mul2Word32" hdMulImulStr :: FastString hdMulImulStr = fsLit "Math.imul" hdWordAdd2 :: FastString hdWordAdd2 = fsLit "h$wordAdd2" hdHsPlusWord64Str :: FastString hdHsPlusWord64Str = fsLit "h$hs_plusWord64" hdHsMinusWord64Str :: FastString hdHsMinusWord64Str = fsLit "h$hs_minusWord64" hdHsTimesWord64Str :: FastString hdHsTimesWord64Str = fsLit "h$hs_timesWord64" hdHsQuotWord64Str :: FastString hdHsQuotWord64Str = fsLit "h$hs_quotWord64" hdHsRemWord64Str :: FastString hdHsRemWord64Str = fsLit "h$hs_remWord64" hdHsUncheckedShiftRWord64Str :: FastString hdHsUncheckedShiftRWord64Str = fsLit "h$hs_uncheckedShiftRWord64" hdHsUncheckedShiftLWord64Str :: FastString hdHsUncheckedShiftLWord64Str = fsLit "h$hs_uncheckedShiftLWord64" hdHsPlusInt64Str :: FastString hdHsPlusInt64Str = fsLit "h$hs_plusInt64" hdHsMinusInt64Str :: FastString hdHsMinusInt64Str = fsLit "h$hs_minusInt64" hdHsTimesInt64Str :: FastString hdHsTimesInt64Str = fsLit "h$hs_timesInt64" hdHsQuotInt64Str :: FastString hdHsQuotInt64Str = fsLit "h$hs_quotInt64" hdHsRemInt64Str :: FastString hdHsRemInt64Str = fsLit "h$hs_remInt64" hdHsUncheckedShiftLLInt64Str :: FastString hdHsUncheckedShiftLLInt64Str = fsLit "h$hs_uncheckedShiftLLInt64" hdHsUncheckedShiftRAInt64Str :: FastString hdHsUncheckedShiftRAInt64Str = fsLit "h$hs_uncheckedShiftRAInt64" hdHsUncheckedShiftRLInt64Str :: FastString hdHsUncheckedShiftRLInt64Str = fsLit "h$hs_uncheckedShiftRLInt64" hdHsTimesInt2Str :: FastString hdHsTimesInt2Str = fsLit "h$hs_timesInt2" ------------------------------ Linker ------------------------------------------- hdEncodeModifiedUtf8Str :: FastString hdEncodeModifiedUtf8Str = fsLit "h$encodeModifiedUtf8" hdRawStringDataStr :: FastString hdRawStringDataStr = fsLit "h$rawStringData" hdPStr :: FastString hdPStr = fsLit "h$p" hdDStr :: FastString hdDStr = fsLit "h$d" hdDiStr :: FastString hdDiStr = fsLit "h$di" hdStcStr :: FastString hdStcStr = fsLit "h$stc" hdStlStr :: FastString hdStlStr = fsLit "h$stl" hdStiStr :: FastString hdStiStr = fsLit "h$sti" hdStrStr :: FastString hdStrStr = fsLit "h$str" ghc-lib-parser-9.12.2.20250421/compiler/GHC/StgToJS/Types.hs0000644000000000000000000003725707346545000020661 0ustar0000000000000000{-# LANGUAGE DerivingStrategies #-} {-# LANGUAGE GeneralizedNewtypeDeriving #-} {-# LANGUAGE LambdaCase #-} ----------------------------------------------------------------------------- -- | -- Module : GHC.StgToJS.Types -- Copyright : (c) The University of Glasgow 2001 -- License : BSD-style (see the file LICENSE) -- -- Maintainer : Jeffrey Young -- Luite Stegeman -- Sylvain Henry -- Josh Meredith -- Stability : experimental -- -- -- Module that holds the Types required for the StgToJS pass ----------------------------------------------------------------------------- module GHC.StgToJS.Types where import GHC.Prelude import GHC.JS.JStg.Syntax import GHC.JS.Ident import qualified GHC.JS.Syntax as Sat import GHC.JS.Make import GHC.JS.Ppr () -- expose Outputable instances to downstream modules import GHC.StgToJS.Symbols import GHC.Stg.Syntax import GHC.Core.TyCon import GHC.Linker.Config import GHC.Types.Unique import GHC.Types.Unique.FM import GHC.Types.Var import GHC.Types.ForeignCall import Control.Monad.Trans.State.Strict import GHC.Utils.Outputable (Outputable (..), text, SDocContext) import GHC.Data.FastString import GHC.Data.FastMutInt import GHC.Unit.Module import Data.Array import Data.ByteString (ByteString) import qualified Data.ByteString as BS import qualified Data.ByteString.Char8 as BSC import Data.Char (toUpper) import qualified Data.Map as M import Data.Monoid import Data.Set (Set) import Data.Word -- | A State monad over IO holding the generator state. type G = StateT GenState IO -- | The JS code generator state data GenState = GenState { gsSettings :: !StgToJSConfig -- ^ codegen settings, read-only , gsModule :: !Module -- ^ current module , gsId :: {-# UNPACK #-} !FastMutInt -- ^ unique number for the id generator , gsIdents :: !IdCache -- ^ hash consing for identifiers from a Unique , gsUnfloated :: !(UniqFM Id CgStgExpr) -- ^ unfloated arguments , gsGroup :: GenGroupState -- ^ state for the current binding group , gsGlobal :: [JStgStat] -- ^ global (per module) statements (gets included when anything else from the module is used) } -- | The JS code generator state relevant for the current binding group data GenGroupState = GenGroupState { ggsToplevelStats :: [JStgStat] -- ^ extra toplevel statements for the binding group , ggsClosureInfo :: [ClosureInfo] -- ^ closure metadata (info tables) for the binding group , ggsStatic :: [StaticInfo] -- ^ static (CAF) data in our binding group , ggsStack :: [StackSlot] -- ^ stack info for the current expression , ggsStackDepth :: Int -- ^ current stack depth , ggsExtraDeps :: Set OtherSymb -- ^ extra dependencies for the linkable unit that contains this group , ggsGlobalIdCache :: GlobalIdCache , ggsForeignRefs :: [ForeignJSRef] } -- | The Configuration record for the StgToJS pass data StgToJSConfig = StgToJSConfig -- flags { csInlinePush :: !Bool , csInlineBlackhole :: !Bool , csInlineLoadRegs :: !Bool , csInlineEnter :: !Bool , csInlineAlloc :: !Bool , csPrettyRender :: !Bool , csTraceRts :: !Bool , csAssertRts :: !Bool , csBoundsCheck :: !Bool , csDebugAlloc :: !Bool , csTraceForeign :: !Bool , csProf :: !Bool -- ^ Profiling enabled , csRuntimeAssert :: !Bool -- ^ Enable runtime assertions -- settings , csContext :: !SDocContext , csLinkerConfig :: !LinkerConfig -- ^ Emscripten linker } -- | Closure info table data ClosureInfo = ClosureInfo { ciVar :: Ident -- ^ entry code identifier: infotable fields are stored as properties of this function , ciRegs :: CIRegs -- ^ size of the payload (in number of JS values) , ciName :: FastString -- ^ friendly name for printing , ciLayout :: CILayout -- ^ heap/stack layout of the object , ciType :: CIType -- ^ type of the object, with extra info where required , ciStatic :: CIStatic -- ^ static references of this object } deriving stock (Eq, Show) -- | Closure information, 'ClosureInfo', registers data CIRegs = CIRegsUnknown -- ^ A value witnessing a state of unknown registers | CIRegs { ciRegsSkip :: Int -- ^ unused registers before actual args start , ciRegsTypes :: [JSRep] -- ^ args } deriving stock (Eq, Ord, Show) -- | Closure Information, 'ClosureInfo', layout data CILayout = CILayoutVariable -- ^ layout stored in object itself, first position from the start | CILayoutUnknown -- ^ fixed size, but content unknown (for example stack apply frame) { layoutSize :: !Int } | CILayoutFixed -- ^ whole layout known { layoutSize :: !Int -- ^ closure size in array positions, including entry , layout :: [JSRep] -- ^ The list of JSReps to layout } deriving stock (Eq, Ord, Show) -- | The type of 'ClosureInfo' data CIType = CIFun { citArity :: !Int -- ^ function arity , citRegs :: !Int -- ^ number of registers for the args } | CIThunk -- ^ The closure is a THUNK | CICon { citConstructor :: !Int } -- ^ The closure is a Constructor | CIPap -- ^ The closure is a Partial Application | CIBlackhole -- ^ The closure is a black hole | CIStackFrame -- ^ The closure is a stack frame deriving stock (Eq, Ord, Show) -- | Static references that must be kept alive newtype CIStatic = CIStaticRefs { staticRefs :: [FastString] } deriving stock (Eq) deriving newtype (Semigroup, Monoid, Show) -- | static refs: array = references, null = nothing to report -- note: only works after all top-level objects have been created instance ToJExpr CIStatic where toJExpr (CIStaticRefs []) = null_ -- [je| null |] toJExpr (CIStaticRefs rs) = toJExpr (map global rs) -- | JS primitive representations data JSRep = PtrV -- ^ pointer = reference to heap object (closure object), lifted or not. -- Can also be some RTS object (e.g. TVar#, MVar#, MutVar#, Weak#) | VoidV -- ^ no fields | DoubleV -- ^ A Double: one field | IntV -- ^ An Int (32bit because JS): one field | LongV -- ^ A Long: two fields one for the upper 32bits, one for the lower (NB: JS is little endian) | AddrV -- ^ a pointer not to the heap: two fields, array + index | ObjV -- ^ some JS object, user supplied, be careful around these, can be anything | ArrV -- ^ boxed array deriving stock (Eq, Ord, Enum, Bounded, Show) instance ToJExpr JSRep where toJExpr = toJExpr . fromEnum -- | The type of identifiers. These determine the suffix of generated functions -- in JS Land. For example, the entry function for the 'Just' constructor is a -- 'IdConEntry' which compiles to: -- @ -- function h$ghczminternalZCGHCziInternalziMaybeziJust_con_e() { return h$rs() }; -- @ -- which just returns whatever the stack point is pointing to. Whereas the entry -- function to 'Just' is an 'IdEntry' and does the work. It compiles to: -- @ -- function h$ghczminternalZCGHCziInternalziMaybeziJust_e() { -- var h$$baseZCGHCziMaybezieta_8KXnScrCjF5 = h$r2; -- h$r1 = h$c1(h$ghczminternalZCGHCziInternalziMaybeziJust_con_e, h$$ghczminternalZCGHCziInternalziMaybezieta_8KXnScrCjF5); -- return h$rs(); -- }; -- @ -- Which loads some payload from register 2, and applies the Constructor Entry -- function for the Just to the payload, returns the result in register 1 and -- returns whatever is on top of the stack data IdType = IdPlain -- ^ A plain identifier for values, no suffix added | IdEntry -- ^ An entry function, suffix = "_e" in 'GHC.StgToJS.Ids.makeIdentForId' | IdConEntry -- ^ A Constructor entry function, suffix = "_con_e" in 'GHC.StgToJS.Ids.makeIdentForId' deriving (Enum, Eq, Ord) -- | Keys to differentiate Ident's in the ID Cache data IdKey = IdKey !Word64 !Int !IdType deriving (Eq, Ord) -- | Some other symbol data OtherSymb = OtherSymb !Module !FastString deriving Eq instance Ord OtherSymb where compare (OtherSymb m1 t1) (OtherSymb m2 t2) = stableModuleCmp m1 m2 <> lexicalCompareFS t1 t2 -- | The identifier cache indexed on 'IdKey' local to a module newtype IdCache = IdCache (M.Map IdKey Ident) -- | The global Identifier Cache newtype GlobalIdCache = GlobalIdCache (UniqFM Ident (IdKey, Id)) -- | A Stack Slot is either known or unknown. We avoid maybe here for more -- strictness. data StackSlot = SlotId !Id !Int | SlotUnknown deriving (Eq, Ord) data StaticInfo = StaticInfo { siVar :: !FastString -- ^ global object , siVal :: !StaticVal -- ^ static initialization , siCC :: !(Maybe Ident) -- ^ optional CCS name } deriving stock (Eq, Show) data StaticVal = StaticFun !FastString [StaticArg] -- ^ heap object for function | StaticThunk !(Maybe (FastString,[StaticArg])) -- ^ heap object for CAF (field is Nothing when thunk is initialized in an -- alternative way, like string thunks through h$str) | StaticUnboxed !StaticUnboxed -- ^ unboxed constructor (Bool, Int, Double etc) | StaticData !FastString [StaticArg] -- ^ regular datacon app | StaticList [StaticArg] (Maybe FastString) -- ^ list initializer (with optional tail) deriving stock (Eq, Show) data StaticUnboxed = StaticUnboxedBool !Bool | StaticUnboxedInt !Integer | StaticUnboxedDouble !SaneDouble | StaticUnboxedString !BS.ByteString | StaticUnboxedStringOffset !BS.ByteString deriving stock (Eq, Ord, Show) -- | Static Arguments. Static Arguments are things that are statically -- allocated, i.e., they exist at program startup. These are static heap objects -- or literals or things that have been floated to the top level binding by ghc. data StaticArg = StaticObjArg !FastString -- ^ reference to a heap object | StaticLitArg !StaticLit -- ^ literal | StaticConArg !FastString [StaticArg] -- ^ unfloated constructor deriving stock (Eq, Show) instance Outputable StaticArg where ppr x = text (show x) -- | A Static literal value data StaticLit = BoolLit !Bool | IntLit !Integer | NullLit | DoubleLit !SaneDouble -- should we actually use double here? | StringLit !FastString | BinLit !BS.ByteString | LabelLit !Bool !FastString -- ^ is function pointer, label (also used for string / binary init) deriving (Eq, Show) instance Outputable StaticLit where ppr x = text (show x) instance ToJExpr StaticLit where toJExpr (BoolLit b) = toJExpr b toJExpr (IntLit i) = toJExpr i toJExpr NullLit = null_ toJExpr (DoubleLit d) = toJExpr (unSaneDouble d) toJExpr (StringLit t) = app hdStrStr [toJExpr t] toJExpr (BinLit b) = app hdRawStr [toJExpr (map toInteger (BS.unpack b))] toJExpr (LabelLit _isFun lbl) = global lbl -- | A foreign reference to some JS code data ForeignJSRef = ForeignJSRef { foreignRefSrcSpan :: !FastString , foreignRefPattern :: !FastString , foreignRefSafety :: !Safety , foreignRefCConv :: !CCallConv , foreignRefArgs :: ![FastString] , foreignRefResult :: !FastString } -- | data used to generate one ObjBlock in our object file data LinkableUnit = LinkableUnit { luObjBlock :: ObjBlock -- ^ serializable unit info , luIdExports :: [Id] -- ^ exported names from haskell identifiers , luOtherExports :: [FastString] -- ^ other exports , luIdDeps :: [Id] -- ^ identifiers this unit depends on , luPseudoIdDeps :: [Unique] -- ^ pseudo-id identifiers this unit depends on (fixme) , luOtherDeps :: [OtherSymb] -- ^ symbols not from a haskell id that this unit depends on , luRequired :: Bool -- ^ always link this unit , luForeignRefs :: [ForeignJSRef] } -- | one toplevel block in the object file data ObjBlock = ObjBlock { oiSymbols :: ![FastString] -- ^ toplevel symbols (stored in index) , oiClInfo :: ![ClosureInfo] -- ^ closure information of all closures in block , oiStatic :: ![StaticInfo] -- ^ static closure data , oiStat :: Sat.JStat -- ^ the code , oiRaw :: !BS.ByteString -- ^ raw JS code , oiFExports :: ![ExpFun] , oiFImports :: ![ForeignJSRef] } data ExpFun = ExpFun { isIO :: !Bool , args :: [JSFFIType] , result :: !JSFFIType } deriving (Eq, Ord, Show) -- | Types of FFI values data JSFFIType = Int8Type | Int16Type | Int32Type | Int64Type | Word8Type | Word16Type | Word32Type | Word64Type | DoubleType | ByteArrayType | PtrType | RefType deriving (Show, Ord, Eq, Enum) -- | Typed expression data TypedExpr = TypedExpr { typex_typ :: !PrimRep , typex_expr :: [JStgExpr] } instance Outputable TypedExpr where ppr (TypedExpr typ x) = ppr (typ, x) -- | A Primop result is either an inlining of some JS payload, or a primitive -- call to a JS function defined in Shim files in base. data PrimRes = PrimInline JStgStat -- ^ primop is inline, result is assigned directly | PRPrimCall JStgStat -- ^ primop is async call, primop returns the next -- function to run. result returned to stack top in -- registers data ExprResult = ExprCont | ExprInline deriving (Eq) newtype ExprValData = ExprValData [JStgExpr] deriving newtype (Eq) -- | A Closure is one of six types data ClosureType = Thunk -- ^ The closure is a THUNK | Fun -- ^ The closure is a Function | Pap -- ^ The closure is a Partial Application | Con -- ^ The closure is a Constructor | Blackhole -- ^ The closure is a Blackhole | StackFrame -- ^ The closure is a stack frame deriving (Show, Eq, Ord, Enum, Bounded, Ix) -- | Convert 'ClosureType' to an Int ctNum :: ClosureType -> Int ctNum Fun = 1 ctNum Con = 2 ctNum Thunk = 0 ctNum Pap = 3 ctNum Blackhole = 5 ctNum StackFrame = -1 closureB :: ByteString closureB = BSC.pack "_CLOSURE" closureNames :: Array ClosureType Ident closureNames = listArray (minBound, maxBound) [ name $ mk_names n | n <- enumFromTo minBound maxBound ] where mk_names :: ClosureType -> FastString mk_names nm = mkFastStringByteString $ hdB <> BSC.pack (map toUpper (show nm)) <> closureB -- | Convert 'ClosureType' to a String ctJsName :: ClosureType -> String ctJsName = \case Thunk -> "CLOSURE_TYPE_THUNK" Fun -> "CLOSURE_TYPE_FUN" Pap -> "CLOSURE_TYPE_PAP" Con -> "CLOSURE_TYPE_CON" Blackhole -> "CLOSURE_TYPE_BLACKHOLE" StackFrame -> "CLOSURE_TYPE_STACKFRAME" instance ToJExpr ClosureType where toJExpr e = toJExpr (ctNum e) -- | A thread is in one of 4 states data ThreadStatus = Running -- ^ The thread is running | Blocked -- ^ The thread is blocked | Finished -- ^ The thread is done | Died -- ^ The thread has died deriving (Show, Eq, Ord, Enum, Bounded) -- | Convert the status of a thread in JS land to an Int threadStatusNum :: ThreadStatus -> Int threadStatusNum = \case Running -> 0 Blocked -> 1 Finished -> 16 Died -> 17 -- | convert the status of a thread in JS land to a string threadStatusJsName :: ThreadStatus -> String threadStatusJsName = \case Running -> "THREAD_RUNNING" Blocked -> "THREAD_BLOCKED" Finished -> "THREAD_FINISHED" Died -> "THREAD_DIED" ghc-lib-parser-9.12.2.20250421/compiler/GHC/SysTools/0000755000000000000000000000000007346545000017505 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/SysTools/BaseDir.hs0000644000000000000000000001712107346545000021354 0ustar0000000000000000{-# LANGUAGE CPP #-} {-# LANGUAGE ScopedTypeVariables #-} {- ----------------------------------------------------------------------------- -- -- (c) The University of Glasgow 2001-2017 -- -- Finding the compiler's base directory. -- ----------------------------------------------------------------------------- -} module GHC.SysTools.BaseDir ( expandTopDir, expandToolDir , findTopDir, findToolDir , tryFindTopDir ) where import GHC.Prelude -- See Note [Base Dir] for why some of this logic is shared with ghc-pkg. import GHC.BaseDir import GHC.Utils.Panic import System.Environment (lookupEnv) import System.FilePath -- Windows #if defined(mingw32_HOST_OS) import System.Directory (doesDirectoryExist) #endif {- Note [topdir: How GHC finds its files] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ GHC needs various support files (library packages, RTS etc), plus various auxiliary programs (cp, gcc, etc). It starts by finding topdir, the root of GHC's support files On Unix: - ghc always has a shell wrapper that passes a -B option On Windows: - ghc never has a shell wrapper. - we can find the location of the ghc binary, which is $topdir//.exe where may be "ghc", "ghc-stage2", or similar - we strip off the "/.exe" to leave $topdir. from topdir we can find package.conf, ghc-asm, etc. Note [tooldir: How GHC finds mingw on Windows] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ GHC has some custom logic on Windows for finding the mingw toolchain. In general we will find the mingw toolchain in $topdir/../../mingw/. This story is long and with lots of twist and turns.. But let's talk about how the build system finds and wires through the toolchain information. 1) It all starts in configure.ac which has two modes it operates on: a) The default is where `EnableDistroToolchain` is false. This indicates that we want to use the in-tree bundled toolchains. In this mode we will download and unpack some custom toolchains into the `inplace/mingw` folder and everything is pointed to that folder. b) The second path is when `EnableDistroToolchain` is true. This makes the toolchain behave a lot like Linux, in that the environment is queried for information on the tools we require. From configure.ac we export the standard variables to set the paths to the tools for the build system to use. 2) After we have the path to the tools we have to generate the right paths to store in the settings file for ghc to use. This is done in aclocal.m4. Again we have two modes of operation: a) If not `EnableDistroToolchain` the paths are rewritten to paths using a variable `$tooldir` as we need an absolute path. $tooldir is filled in by the `expandToolDir` function in this module at GHC startup. b) When `EnableDistroToolchain` then instead of filling in a absolute path we fill in just the program name. The assumption here is that at runtime the environment GHC is operating on will be the same as the one configure was run in. This means we expect `gcc, ld, as` etc to be on the PATH. From `aclocal.m4` we export a couple of variables starting with `Settings` which will be used to generate the settings file. 3) The next step is to generate the settings file: The file `cfg/system.config.in` is preprocessed by configure and the output written to `system.config`. This serves the same purpose as `config.mk` but it rewrites the values that were exported. As an example `SettingsCCompilerCommand` is rewritten to `settings-c-compiler-command`. Next up is `src/Oracles/Settings.hs` which makes from some Haskell ADT to the settings `keys` in the `system.config`. As an example, `settings-c-compiler-command` is mapped to `SettingsFileSetting_CCompilerCommand`. The last part of this is the `generateSettings` in `src/Rules/Generate.hs` which produces the desired settings file out of Hadrian. This is the equivalent to `rts/include/ghc.mk`. -- So why do we have these? On Windows there's no such thing as a platform compiler and as such we need to provide GCC and binutils. The easiest way is to bundle these with the compiler and wire them up. This gives you a relocatable binball. This works fine for most users. However mingw-w64 have a different requirement. They require all packages in the repo to be compiled using the same version of the compiler. So it means when they are rebuilding the world to add support for GCC X, they expect all packages to have been compiled with GCC X which is a problem since we ship an older GCC version. GHC is a package in mingw-w64 because there are Haskell packages in the repository which of course requires a Haskell compiler. To help them we provide the override which allows GHC to instead of using an inplace compiler to play nice with the system compiler instead. -} -- | Expand occurrences of the @$tooldir@ interpolation in a string -- on Windows, leave the string untouched otherwise. expandToolDir :: Bool -- ^ whether we use the ambient mingw toolchain -> Maybe FilePath -- ^ tooldir -> String -> String #if defined(mingw32_HOST_OS) expandToolDir False (Just tool_dir) s = expandPathVar "tooldir" tool_dir s expandToolDir False Nothing _ = panic "Could not determine $tooldir" expandToolDir True _ s = s #else expandToolDir _ _ s = s #endif -- | Returns a Unix-format path pointing to TopDir. findTopDir :: Maybe String -- Maybe TopDir path (without the '-B' prefix). -> IO String -- TopDir (in Unix format '/' separated) findTopDir m_minusb = do maybe_exec_dir <- tryFindTopDir m_minusb case maybe_exec_dir of -- "Just" on Windows, "Nothing" on unix Nothing -> throwGhcExceptionIO $ InstallationError "missing -B option" Just dir -> return dir tryFindTopDir :: Maybe String -- ^ Maybe TopDir path (without the '-B' prefix). -> IO (Maybe String) -- ^ TopDir (in Unix format '/' separated) tryFindTopDir (Just minusb) = return $ Just $ normalise minusb tryFindTopDir Nothing = do -- The _GHC_TOP_DIR environment variable can be used to specify -- the top dir when the -B argument is not specified. It is not -- intended for use by users, it was added specifically for the -- purpose of running GHC within GHCi. maybe_env_top_dir <- lookupEnv "_GHC_TOP_DIR" case maybe_env_top_dir of Just env_top_dir -> return $ Just env_top_dir -- Try directory of executable Nothing -> getBaseDir -- See Note [tooldir: How GHC finds mingw on Windows] -- Returns @Nothing@ when not on Windows. -- When called on Windows, it either throws an error when the -- tooldir can't be located, or returns @Just tooldirpath@. -- If the distro toolchain is being used we treat Windows the same as Linux findToolDir :: Bool -- ^ whether we use the ambient mingw toolchain -> FilePath -- ^ topdir -> IO (Maybe FilePath) #if defined(mingw32_HOST_OS) findToolDir False top_dir = go 0 (top_dir "..") [] where maxDepth = 3 go :: Int -> FilePath -> [FilePath] -> IO (Maybe FilePath) go k path tried | k == maxDepth = throwGhcExceptionIO $ InstallationError $ "could not detect mingw toolchain in the following paths: " ++ show tried | otherwise = do let try = path "mingw" let tried' = tried ++ [try] oneLevel <- doesDirectoryExist try if oneLevel then return (Just path) else go (k+1) (path "..") tried' findToolDir True _ = return Nothing #else findToolDir _ _ = return Nothing #endif ghc-lib-parser-9.12.2.20250421/compiler/GHC/SysTools/Terminal.hs0000644000000000000000000000541407346545000021620 0ustar0000000000000000{-# LANGUAGE CPP #-} {-# LANGUAGE ScopedTypeVariables #-} module GHC.SysTools.Terminal (stderrSupportsAnsiColors) where import GHC.Prelude #if !defined(mingw32_HOST_OS) import System.Environment (lookupEnv) import System.IO (hIsTerminalDevice, stderr) #else import GHC.IO (catchException) import GHC.Utils.Exception (try) import Foreign (Ptr, peek, with) import qualified Graphics.Win32 as Win32 import qualified System.Win32 as Win32 #endif import System.IO.Unsafe -- | Does the controlling terminal support ANSI color sequences? -- This memoized to avoid thread-safety issues in ncurses (see #17922). stderrSupportsAnsiColors :: Bool stderrSupportsAnsiColors = unsafePerformIO stderrSupportsAnsiColors' {-# NOINLINE stderrSupportsAnsiColors #-} -- | Check if ANSI escape sequences can be used to control color in stderr. stderrSupportsAnsiColors' :: IO Bool stderrSupportsAnsiColors' = do #if !defined(mingw32_HOST_OS) -- Equivalent of https://hackage.haskell.org/package/ansi-terminal/docs/System-Console-ANSI.html#v:hSupportsANSI isTerminal <- hIsTerminalDevice stderr term <- lookupEnv "TERM" pure $ isTerminal && term /= Just "dumb" #else h <- Win32.getStdHandle Win32.sTD_ERROR_HANDLE `catchException` \ (_ :: IOError) -> pure Win32.nullHANDLE if h == Win32.nullHANDLE then pure False else do eMode <- try (getConsoleMode h) case eMode of Left (_ :: IOError) -> Win32.isMinTTYHandle h -- Check if the we're in a MinTTY terminal -- (e.g., Cygwin or MSYS2) Right mode | modeHasVTP mode -> pure True | otherwise -> enableVTP h mode where enableVTP :: Win32.HANDLE -> Win32.DWORD -> IO Bool enableVTP h mode = do setConsoleMode h (modeAddVTP mode) modeHasVTP <$> getConsoleMode h `catchException` \ (_ :: IOError) -> pure False modeHasVTP :: Win32.DWORD -> Bool modeHasVTP mode = mode .&. eNABLE_VIRTUAL_TERMINAL_PROCESSING /= 0 modeAddVTP :: Win32.DWORD -> Win32.DWORD modeAddVTP mode = mode .|. eNABLE_VIRTUAL_TERMINAL_PROCESSING eNABLE_VIRTUAL_TERMINAL_PROCESSING :: Win32.DWORD eNABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004 getConsoleMode :: Win32.HANDLE -> IO Win32.DWORD getConsoleMode h = with 64 $ \ mode -> do Win32.failIfFalse_ "GetConsoleMode" (c_GetConsoleMode h mode) peek mode setConsoleMode :: Win32.HANDLE -> Win32.DWORD -> IO () setConsoleMode h mode = do Win32.failIfFalse_ "SetConsoleMode" (c_SetConsoleMode h mode) foreign import ccall unsafe "windows.h GetConsoleMode" c_GetConsoleMode :: Win32.HANDLE -> Ptr Win32.DWORD -> IO Win32.BOOL foreign import ccall unsafe "windows.h SetConsoleMode" c_SetConsoleMode :: Win32.HANDLE -> Win32.DWORD -> IO Win32.BOOL #endif ghc-lib-parser-9.12.2.20250421/compiler/GHC/Tc/Errors/Hole/0000755000000000000000000000000007346545000020417 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Tc/Errors/Hole/FitTypes.hs0000644000000000000000000001126207346545000022524 0ustar0000000000000000{-# LANGUAGE ExistentialQuantification #-} module GHC.Tc.Errors.Hole.FitTypes ( TypedHole (..), HoleFit (..), HoleFitCandidate (..), hfIsLcl, pprHoleFitCand ) where import GHC.Prelude import GHC.Tc.Types.Constraint import GHC.Tc.Utils.TcType import GHC.Types.Name.Reader import GHC.Hs.Doc import GHC.Types.Id import GHC.Utils.Outputable import GHC.Types.Name import GHC.Data.Bag import Data.Function ( on ) data TypedHole = TypedHole { th_relevant_cts :: Bag CtEvidence -- ^ Any relevant Cts to the hole , th_implics :: [Implication] -- ^ The nested implications of the hole with the -- innermost implication first. , th_hole :: Maybe Hole -- ^ The hole itself, if available. } instance Outputable TypedHole where ppr (TypedHole { th_relevant_cts = rels , th_implics = implics , th_hole = hole }) = hang (text "TypedHole") 2 (ppr rels $+$ ppr implics $+$ ppr hole) -- | HoleFitCandidates are passed to hole fit plugins and then -- checked whether they fit a given typed-hole. data HoleFitCandidate = IdHFCand Id -- An id, like locals. | NameHFCand Name -- A name, like built-in syntax. | GreHFCand GlobalRdrElt -- A global, like imported ids. instance Eq HoleFitCandidate where IdHFCand i1 == IdHFCand i2 = i1 == i2 NameHFCand n1 == NameHFCand n2 = n1 == n2 GreHFCand gre1 == GreHFCand gre2 = greName gre1 == greName gre2 _ == _ = False instance Outputable HoleFitCandidate where ppr = pprHoleFitCand pprHoleFitCand :: HoleFitCandidate -> SDoc pprHoleFitCand (IdHFCand cid) = text "Id HFC: " <> ppr cid pprHoleFitCand (NameHFCand cname) = text "Name HFC: " <> ppr cname pprHoleFitCand (GreHFCand cgre) = text "Gre HFC: " <> ppr cgre instance NamedThing HoleFitCandidate where getName hfc = case hfc of IdHFCand cid -> idName cid NameHFCand cname -> cname GreHFCand cgre -> greName cgre getOccName hfc = case hfc of IdHFCand cid -> occName cid NameHFCand cname -> occName cname GreHFCand cgre -> occName $ greName cgre instance HasOccName HoleFitCandidate where occName = getOccName instance Ord HoleFitCandidate where compare = compare `on` getName -- | HoleFit is the type we use for valid hole fits. It contains the -- element that was checked, the Id of that element as found by `tcLookup`, -- and the refinement level of the fit, which is the number of extra argument -- holes that this fit uses (e.g. if hfRefLvl is 2, the fit is for `Id _ _`). data HoleFit = HoleFit { hfId :: Id -- ^ The elements id in the TcM , hfCand :: HoleFitCandidate -- ^ The candidate that was checked. , hfType :: TcType -- ^ The type of the id, possibly zonked. , hfRefLvl :: Int -- ^ The number of holes in this fit. , hfWrap :: [TcType] -- ^ The wrapper for the match. , hfMatches :: [TcType] -- ^ What the refinement variables got matched with, if anything , hfDoc :: Maybe [HsDocString] -- ^ Documentation of this HoleFit, if available. } | RawHoleFit SDoc -- ^ A fit that is just displayed as is. Here so thatHoleFitPlugins -- can inject any fit they want. -- We define an Eq and Ord instance to be able to build a graph. instance Eq HoleFit where (==) = (==) `on` hfId instance Outputable HoleFit where ppr (RawHoleFit sd) = sd ppr (HoleFit _ cand ty _ _ mtchs _) = hang (name <+> holes) 2 (text "where" <+> name <+> dcolon <+> (ppr ty)) where name = ppr $ getName cand holes = sep $ map (parens . (text "_" <+> dcolon <+>) . ppr) mtchs -- We compare HoleFits by their name instead of their Id, since we don't -- want our tests to be affected by the non-determinism of `nonDetCmpVar`, -- which is used to compare Ids. When comparing, we want HoleFits with a lower -- refinement level to come first. instance Ord HoleFit where compare (RawHoleFit _) (RawHoleFit _) = EQ compare (RawHoleFit _) _ = LT compare _ (RawHoleFit _) = GT compare a@(HoleFit {}) b@(HoleFit {}) = cmp a b where cmp = if hfRefLvl a == hfRefLvl b then compare `on` (getName . hfCand) else compare `on` hfRefLvl hfIsLcl :: HoleFit -> Bool hfIsLcl hf@(HoleFit {}) = case hfCand hf of IdHFCand _ -> True NameHFCand _ -> False GreHFCand gre -> gre_lcl gre hfIsLcl _ = False ghc-lib-parser-9.12.2.20250421/compiler/GHC/Tc/Errors/Hole/Plugin.hs0000644000000000000000000000225707346545000022217 0ustar0000000000000000{-# LANGUAGE ExistentialQuantification #-} module GHC.Tc.Errors.Hole.Plugin(CandPlugin, FitPlugin, HoleFitPlugin (..), HoleFitPluginR (..)) where import GHC.Tc.Errors.Hole.FitTypes import GHC.Tc.Types ( TcRef, TcM ) -- | A plugin for modifying the candidate hole fits *before* they're checked. type CandPlugin = TypedHole -> [HoleFitCandidate] -> TcM [HoleFitCandidate] -- | A plugin for modifying hole fits *after* they've been found. type FitPlugin = TypedHole -> [HoleFit] -> TcM [HoleFit] -- | A HoleFitPlugin is a pair of candidate and fit plugins. data HoleFitPlugin = HoleFitPlugin { candPlugin :: CandPlugin , fitPlugin :: FitPlugin } -- | HoleFitPluginR adds a TcRef to hole fit plugins so that plugins can -- track internal state. Note the existential quantification, ensuring that -- the state cannot be modified from outside the plugin. data HoleFitPluginR = forall s. HoleFitPluginR { hfPluginInit :: TcM (TcRef s) -- ^ Initializes the TcRef to be passed to the plugin , hfPluginRun :: TcRef s -> HoleFitPlugin -- ^ The function defining the plugin itself , hfPluginStop :: TcRef s -> TcM () -- ^ Cleanup of state, guaranteed to be called even on error }ghc-lib-parser-9.12.2.20250421/compiler/GHC/Tc/Errors/Hole/Plugin.hs-boot0000644000000000000000000000023407346545000023151 0ustar0000000000000000module GHC.Tc.Errors.Hole.Plugin where -- See W1 of Note [Tracking dependencies on primitives] in GHC.Internal.Base import GHC.Base () data HoleFitPlugin ghc-lib-parser-9.12.2.20250421/compiler/GHC/Tc/Errors/0000755000000000000000000000000007346545000017530 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Tc/Errors/Ppr.hs0000644000000000000000000110017407346545000020631 0ustar0000000000000000{-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE MonadComprehensions #-} {-# LANGUAGE NamedFieldPuns #-} {-# LANGUAGE RecordWildCards #-} {-# LANGUAGE DataKinds #-} {-# LANGUAGE ViewPatterns #-} {-# LANGUAGE TypeApplications #-} {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE OverloadedStrings #-} {-# OPTIONS_GHC -fno-warn-orphans #-} -- instance Diagnostic TcRnMessage {-# LANGUAGE InstanceSigs #-} module GHC.Tc.Errors.Ppr ( pprTypeDoesNotHaveFixedRuntimeRep , pprScopeError -- , tidySkolemInfo , tidySkolemInfoAnon -- , pprHsDocContext , inHsDocContext , TcRnMessageOpts(..) , pprTyThingUsedWrong , pprUntouchableVariable -- , mismatchMsg_ExpectedActuals -- | Useful when overriding message printing. , messageWithInfoDiagnosticMessage , messageWithHsDocContext ) where import GHC.Prelude import qualified GHC.Internal.TH.Syntax as TH -- In stage1: import "ghc-boot-th-next" qualified GHC.Internal.TH.Syntax as TH -- In stage2: import "ghc-boot-th" qualified GHC.Internal.TH.Syntax as TH -- which resolves to -- import "ghc-internal" qualified GHC.Internal.TH.Syntax as TH import qualified GHC.Internal.TH.Ppr as TH import GHC.Builtin.Names import GHC.Builtin.Types ( boxedRepDataConTyCon, tYPETyCon, filterCTuple, pretendNameIsInScope ) import GHC.Types.Name.Reader import GHC.Unit.Module.ModIface import GHC.Unit.Module.Warnings import GHC.Core.Coercion import GHC.Core.Unify ( tcMatchTys ) import GHC.Core.TyCon import GHC.Core.Class import GHC.Core.DataCon import GHC.Core.Coercion.Axiom (CoAxBranch, coAxiomTyCon, coAxiomSingleBranch) import GHC.Core.ConLike import GHC.Core.FamInstEnv ( FamInst(..), famInstAxiom, pprFamInst ) import GHC.Core.InstEnv import GHC.Core.TyCo.Rep (Type(..)) import GHC.Core.TyCo.Ppr (pprWithInvisibleBitsWhen, pprSourceTyCon, pprTyVars, pprWithTYPE, pprTyVar, pprTidiedType) import GHC.Core.PatSyn ( patSynName, pprPatSynType ) import GHC.Core.Predicate import GHC.Core.Type import GHC.Core.FVs( orphNamesOfTypes ) import GHC.CoreToIface import GHC.Driver.Flags import GHC.Driver.Backend import GHC.Hs import GHC.Tc.Errors.Types import GHC.Tc.Types.BasicTypes import GHC.Tc.Types.Constraint import GHC.Tc.Types.Origin hiding ( Position(..) ) import GHC.Tc.Types.CtLoc import GHC.Tc.Types.Rank (Rank(..)) import GHC.Tc.Types.TH import GHC.Tc.Utils.TcType import GHC.Types.DefaultEnv (ClassDefaults(ClassDefaults, cd_types, cd_module)) import GHC.Types.Error import GHC.Types.Error.Codes import GHC.Types.Hint import GHC.Types.Hint.Ppr () -- Outputable GhcHint import GHC.Types.Basic import GHC.Types.Id import GHC.Types.Id.Info ( RecSelParent(..) ) import GHC.Types.Name import GHC.Types.Name.Env import GHC.Types.Name.Set import GHC.Types.SourceFile import GHC.Types.SrcLoc import GHC.Types.TyThing import GHC.Types.TyThing.Ppr ( pprTyThingInContext ) import GHC.Types.Unique.Set ( nonDetEltsUniqSet ) import GHC.Types.Var import GHC.Types.Var.Set import GHC.Types.Var.Env import GHC.Types.Fixity (defaultFixity) import GHC.Iface.Errors.Types import GHC.Iface.Errors.Ppr import GHC.Iface.Syntax import GHC.Unit.State import GHC.Unit.Module import GHC.Data.Bag import GHC.Data.FastString import GHC.Data.List.SetOps ( nubOrdBy ) import GHC.Data.Maybe import GHC.Data.Pair import GHC.Settings.Constants (mAX_TUPLE_SIZE, mAX_CTUPLE_SIZE) import GHC.Utils.Misc import GHC.Utils.Outputable import GHC.Utils.Panic import qualified GHC.LanguageExtensions as LangExt import GHC.Data.BooleanFormula (pprBooleanFormulaNice) import Data.List.NonEmpty (NonEmpty(..)) import qualified Data.List.NonEmpty as NE import Data.Foldable ( fold ) import Data.Function (on) import Data.List ( groupBy, sortBy, tails , partition, unfoldr ) import Data.Ord ( comparing ) import Data.Bifunctor defaultTcRnMessageOpts :: TcRnMessageOpts defaultTcRnMessageOpts = TcRnMessageOpts { tcOptsShowContext = True , tcOptsIfaceOpts = defaultDiagnosticOpts @IfaceMessage } instance HasDefaultDiagnosticOpts TcRnMessageOpts where defaultOpts = defaultTcRnMessageOpts instance Diagnostic TcRnMessage where type DiagnosticOpts TcRnMessage = TcRnMessageOpts diagnosticMessage opts = \case TcRnUnknownMessage (UnknownDiagnostic f m) -> diagnosticMessage (f opts) m TcRnMessageWithInfo unit_state msg_with_info -> case msg_with_info of TcRnMessageDetailed err_info msg -> messageWithInfoDiagnosticMessage unit_state err_info (tcOptsShowContext opts) (diagnosticMessage opts msg) TcRnWithHsDocContext ctxt msg -> messageWithHsDocContext opts ctxt (diagnosticMessage opts msg) TcRnSolverReport msg _ -> mkSimpleDecorated $ pprSolverReportWithCtxt msg TcRnSolverDepthError ty depth -> mkSimpleDecorated msg where msg = vcat [ text "Reduction stack overflow; size =" <+> ppr depth , hang (text "When simplifying the following type:") 2 (ppr ty) ] TcRnRedundantConstraints redundants (info, show_info) -> mkSimpleDecorated $ text "Redundant constraint" <> plural redundants <> colon <+> pprEvVarTheta redundants $$ if show_info then text "In" <+> ppr info else empty TcRnInaccessibleCode implic contra -> mkSimpleDecorated $ hang (text "Inaccessible code in") 2 (ppr (ic_info implic)) $$ pprSolverReportWithCtxt contra TcRnInaccessibleCoAxBranch fam_tc cur_branch -> mkSimpleDecorated $ text "Type family instance equation is overlapped:" $$ nest 2 (pprCoAxBranchUser fam_tc cur_branch) TcRnTypeDoesNotHaveFixedRuntimeRep ty prov (ErrInfo extra supplementary) -> mkDecorated [pprTypeDoesNotHaveFixedRuntimeRep ty prov, extra, supplementary] TcRnImplicitLift id_or_name ErrInfo{..} -> mkDecorated $ ( text "The variable" <+> quotes (ppr id_or_name) <+> text "is implicitly lifted in the TH quotation" ) : [errInfoContext, errInfoSupplementary] TcRnUnusedPatternBinds bind -> mkDecorated [hang (text "This pattern-binding binds no variables:") 2 (ppr bind)] TcRnDodgyImports (DodgyImportsEmptyParent gre) -> mkDecorated [dodgy_msg (text "import") gre (dodgy_msg_insert gre)] TcRnDodgyImports (DodgyImportsHiding reason) -> mkSimpleDecorated $ pprImportLookup reason TcRnDodgyExports gre -> mkDecorated [dodgy_msg (text "export") gre (dodgy_msg_insert gre)] TcRnMissingImportList ie -> mkDecorated [ text "The import item" <+> quotes (ppr ie) <+> text "does not have an explicit import list" ] TcRnUnsafeDueToPlugin -> mkDecorated [text "Use of plugins makes the module unsafe"] TcRnModMissingRealSrcSpan mod -> mkDecorated [text "Module does not have a RealSrcSpan:" <+> ppr mod] TcRnIdNotExportedFromModuleSig name mod -> mkDecorated [ text "The identifier" <+> ppr (occName name) <+> text "does not exist in the signature for" <+> ppr mod ] TcRnIdNotExportedFromLocalSig name -> mkDecorated [ text "The identifier" <+> ppr (occName name) <+> text "does not exist in the local signature." ] TcRnShadowedName occ provenance -> let shadowed_locs = case provenance of ShadowedNameProvenanceLocal n -> [text "bound at" <+> ppr n] ShadowedNameProvenanceGlobal gres -> map pprNameProvenance gres in mkSimpleDecorated $ sep [text "This binding for" <+> quotes (ppr occ) <+> text "shadows the existing binding" <> plural shadowed_locs, nest 2 (vcat shadowed_locs)] TcRnInvalidWarningCategory cat -> mkSimpleDecorated $ vcat [text "Warning category" <+> quotes (ppr cat) <+> text "is not valid", text "(user-defined category names must begin with" <+> quotes (text "x-"), text "and contain only letters, numbers, apostrophes and dashes)" ] TcRnDuplicateWarningDecls d rdr_name -> mkSimpleDecorated $ vcat [text "Multiple warning declarations for" <+> quotes (ppr rdr_name), text "also at " <+> ppr (getLocA d)] TcRnSimplifierTooManyIterations simples limit wc -> mkSimpleDecorated $ hang (text "solveWanteds: too many iterations" <+> parens (text "limit =" <+> ppr limit)) 2 (vcat [ text "Unsolved:" <+> ppr wc , text "Simples:" <+> ppr simples ]) TcRnIllegalPatSynDecl rdrname -> mkSimpleDecorated $ hang (text "Illegal pattern synonym declaration for" <+> quotes (ppr rdrname)) 2 (text "Pattern synonym declarations are only valid at top level") TcRnLinearPatSyn ty -> mkSimpleDecorated $ hang (text "Pattern synonyms do not support linear fields (GHC #18806):") 2 (ppr ty) TcRnEmptyRecordUpdate -> mkSimpleDecorated $ text "Empty record update" TcRnIllegalFieldPunning fld -> mkSimpleDecorated $ text "Illegal use of punning for field" <+> quotes (ppr fld) TcRnIllegalWildcardsInRecord fld_part -> mkSimpleDecorated $ text "Illegal `..' in record" <+> pprRecordFieldPart fld_part TcRnIllegalWildcardInType mb_name bad -> mkSimpleDecorated $ case bad of WildcardNotLastInConstraint -> hang notAllowed 2 constraint_hint_msg ExtraConstraintWildcardNotAllowed allow_sole -> case allow_sole of SoleExtraConstraintWildcardNotAllowed -> notAllowed SoleExtraConstraintWildcardAllowed -> hang notAllowed 2 sole_msg WildcardsNotAllowedAtAll -> notAllowed WildcardBndrInForallTelescope -> notAllowed WildcardBndrInTyFamResultVar -> notAllowed where notAllowed, what, wildcard, how :: SDoc notAllowed = what <+> quotes wildcard <+> how wildcard = case mb_name of Nothing -> pprAnonWildCard Just name -> ppr name what | Just _ <- mb_name = text "Named wildcard" | ExtraConstraintWildcardNotAllowed {} <- bad = text "Extra-constraint wildcard" | WildcardBndrInForallTelescope {} <- bad = text "Wildcard binder" | WildcardBndrInTyFamResultVar {} <- bad = text "Wildcard binder" | otherwise = text "Wildcard" how = case bad of WildcardNotLastInConstraint -> text "not allowed in a constraint" WildcardBndrInForallTelescope -> text "not allowed in a forall telescope" WildcardBndrInTyFamResultVar -> text "not allowed in a type family result" _ -> text "not allowed" constraint_hint_msg :: SDoc constraint_hint_msg | Just _ <- mb_name = vcat [ text "Extra-constraint wildcards must be anonymous" , nest 2 (text "e.g f :: (Eq a, _) => blah") ] | otherwise = vcat [ text "except as the last top-level constraint of a type signature" , nest 2 (text "e.g f :: (Eq a, _) => blah") ] sole_msg :: SDoc sole_msg = vcat [ text "except as the sole constraint" , nest 2 (text "e.g., deriving instance _ => Eq (Foo a)") ] TcRnIllegalNamedWildcardInTypeArgument rdr -> mkSimpleDecorated $ hang (text "Illegal named wildcard in a required type argument:") 2 (quotes (ppr rdr)) TcRnIllegalImplicitTyVarInTypeArgument rdr -> mkSimpleDecorated $ hang (text "Illegal implicitly quantified type variable in a required type argument:") 2 (quotes (ppr rdr)) TcRnDuplicateFieldName fld_part dups -> mkSimpleDecorated $ hsep [ text "Duplicate field name" , quotes (ppr (rdrNameOcc $ NE.head dups)) , text "in record", pprRecordFieldPart fld_part ] TcRnIllegalViewPattern pat -> mkSimpleDecorated $ vcat [text "Illegal view pattern: " <+> ppr pat] TcRnCharLiteralOutOfRange c -> mkSimpleDecorated $ text "character literal out of range: '\\" <> char c <> char '\'' TcRnIllegalWildcardsInConstructor con -> mkSimpleDecorated $ vcat [ text "Illegal `{..}' notation for constructor" <+> quotes (ppr con) , nest 2 (text "Record wildcards may not be used for constructors with unlabelled fields.") , nest 2 (text "Possible fix: Remove the `{..}' and add a match for each field of the constructor.") ] TcRnIgnoringAnnotations anns -> mkSimpleDecorated $ text "Ignoring ANN annotation" <> plural anns <> comma <+> text "because this is a stage-1 compiler without -fexternal-interpreter or doesn't support GHCi" TcRnAnnotationInSafeHaskell -> mkSimpleDecorated $ vcat [ text "Annotations are not compatible with Safe Haskell." , text "See https://gitlab.haskell.org/ghc/ghc/issues/10826" ] TcRnInvalidTypeApplication fun_ty hs_ty -> mkSimpleDecorated $ text "Cannot apply expression of type" <+> quotes (ppr fun_ty) $$ text "to a visible type argument" <+> quotes (ppr hs_ty) TcRnTagToEnumMissingValArg -> mkSimpleDecorated $ text "tagToEnum# must appear applied to one value argument" TcRnTagToEnumUnspecifiedResTy ty -> mkSimpleDecorated $ hang (text "Bad call to tagToEnum# at type" <+> ppr ty) 2 (vcat [ text "Specify the type by giving a type signature" , text "e.g. (tagToEnum# x) :: Bool" ]) TcRnTagToEnumResTyNotAnEnum ty -> mkSimpleDecorated $ hang (text "Bad call to tagToEnum# at type" <+> ppr ty) 2 (text "Result type must be an enumeration type") TcRnTagToEnumResTyTypeData ty -> mkSimpleDecorated $ hang (text "Bad call to tagToEnum# at type" <+> ppr ty) 2 (text "Result type cannot be headed by a `type data` type") TcRnArrowIfThenElsePredDependsOnResultTy -> mkSimpleDecorated $ text "Predicate type of `ifThenElse' depends on result type" TcRnIllegalHsBootOrSigDecl boot_or_sig decls -> mkSimpleDecorated $ text "Illegal" <+> what <+> text "in" <+> whr <> dot where what = case decls of BootBindsPs {} -> text "binding" BootBindsRn {} -> text "binding" BootInstanceSigs {} -> text "instance body" BootFamInst {} -> text "family instance" BootSpliceDecls {} -> text "splice" BootForeignDecls {} -> text "foreign declaration" BootDefaultDecls {} -> text "default declaration" BootRuleDecls {} -> text "RULE pragma" whr = case boot_or_sig of HsBoot -> text "an hs-boot file" Hsig -> text "a backpack signature file" TcRnBootMismatch boot_or_sig err -> mkSimpleDecorated $ pprBootMismatch boot_or_sig err TcRnRecursivePatternSynonym binds -> mkSimpleDecorated $ hang (text "Recursive pattern synonym definition with following bindings:") 2 (vcat $ map pprLBind binds) where pprLoc loc = parens (text "defined at" <+> ppr loc) pprLBind :: CollectPass GhcRn => GenLocated (EpAnn a) (HsBindLR GhcRn idR) -> SDoc pprLBind (L loc bind) = pprWithCommas ppr (collectHsBindBinders CollNoDictBinders bind) <+> pprLoc (locA loc) TcRnPartialTypeSigTyVarMismatch n1 n2 fn_name hs_ty -> mkSimpleDecorated $ hang (text "Couldn't match" <+> quotes (ppr n1) <+> text "with" <+> quotes (ppr n2)) 2 (hang (text "both bound by the partial type signature:") 2 (ppr fn_name <+> dcolon <+> ppr hs_ty)) TcRnPartialTypeSigBadQuantifier n fn_name m_unif_ty hs_ty -> mkSimpleDecorated $ hang (text "Can't quantify over" <+> quotes (ppr n)) 2 (vcat [ hang (text "bound by the partial type signature:") 2 (ppr fn_name <+> dcolon <+> ppr hs_ty) , extra ]) where extra | Just rhs_ty <- m_unif_ty = sep [ quotes (ppr n), text "should really be", quotes (ppr rhs_ty) ] | otherwise = empty TcRnMissingSignature what _ -> mkSimpleDecorated $ case what of MissingPatSynSig p -> hang (text "Pattern synonym with no type signature:") 2 (text "pattern" <+> pprPrefixName (patSynName p) <+> dcolon <+> pprPatSynType p) MissingTopLevelBindingSig name ty -> hang (text "Top-level binding with no type signature:") 2 (pprPrefixName name <+> dcolon <+> pprSigmaType ty) MissingTyConKindSig tc cusks_enabled -> hang msg 2 (text "type" <+> pprPrefixName (tyConName tc) <+> dcolon <+> pprKind (tyConKind tc)) where msg | cusks_enabled = text "Top-level type constructor with no standalone kind signature or CUSK:" | otherwise = text "Top-level type constructor with no standalone kind signature:" TcRnPolymorphicBinderMissingSig n ty -> mkSimpleDecorated $ sep [ text "Polymorphic local binding with no type signature:" , nest 2 $ pprPrefixName n <+> dcolon <+> ppr ty ] TcRnOverloadedSig sig -> mkSimpleDecorated $ hang (text "Overloaded signature conflicts with monomorphism restriction") 2 (ppr sig) TcRnTupleConstraintInst _ -> mkSimpleDecorated $ text "You can't specify an instance for a tuple constraint" TcRnUserTypeError ty -> mkSimpleDecorated (pprUserTypeErrorTy ty) TcRnConstraintInKind ty -> mkSimpleDecorated $ text "Illegal constraint in a kind:" <+> pprType ty TcRnUnboxedTupleOrSumTypeFuncArg tuple_or_sum ty -> mkSimpleDecorated $ sep [ text "Illegal unboxed" <+> what <+> text "type as function argument:" , pprType ty ] where what = case tuple_or_sum of UnboxedTupleType -> text "tuple" UnboxedSumType -> text "sum" TcRnLinearFuncInKind ty -> mkSimpleDecorated $ text "Illegal linear function in a kind:" <+> pprType ty TcRnForAllEscapeError ty kind -> mkSimpleDecorated $ vcat [ hang (text "Quantified type's kind mentions quantified type variable") 2 (text "type:" <+> quotes (ppr ty)) , hang (text "where the body of the forall has this kind:") 2 (quotes (pprKind kind)) ] TcRnSimplifiableConstraint pred what -> mkSimpleDecorated $ vcat [ hang (text "The constraint" <+> quotes (pprType pred) <+> text "matches") 2 (ppr what) , hang (text "This makes type inference for inner bindings fragile;") 2 (text "either use MonoLocalBinds, or simplify it using the instance") ] TcRnArityMismatch thing thing_arity nb_args -> mkSimpleDecorated $ hsep [ text "The" <+> what, quotes (ppr $ getName thing), text "should have" , n_arguments <> comma, text "but has been given" , if nb_args == 0 then text "none" else int nb_args ] where what = case thing of ATyCon tc -> ppr (tyConFlavour tc) _ -> text (tyThingCategory thing) n_arguments | thing_arity == 0 = text "no arguments" | thing_arity == 1 = text "1 argument" | True = hsep [int thing_arity, text "arguments"] TcRnIllegalInstance reason -> mkSimpleDecorated $ pprIllegalInstance reason TcRnVDQInTermType mb_ty -> mkSimpleDecorated $ case mb_ty of Nothing -> main_msg Just ty -> hang (main_msg <> char ':') 2 (pprType ty) where main_msg = text "Illegal visible, dependent quantification" <+> text "in the type of a term" TcRnBadQuantPredHead ty -> mkSimpleDecorated $ hang (text "Quantified predicate must have a class or type variable head:") 2 (pprType ty) TcRnIllegalTupleConstraint ty -> mkSimpleDecorated $ text "Illegal tuple constraint:" <+> pprType ty TcRnNonTypeVarArgInConstraint ty -> mkSimpleDecorated $ hang (text "Non type-variable argument") 2 (text "in the constraint:" <+> pprType ty) TcRnIllegalImplicitParam ty -> mkSimpleDecorated $ text "Illegal implicit parameter" <+> quotes (pprType ty) TcRnIllegalConstraintSynonymOfKind kind -> mkSimpleDecorated $ text "Illegal constraint synonym of kind:" <+> quotes (pprKind kind) TcRnOversaturatedVisibleKindArg ty -> mkSimpleDecorated $ text "Illegal oversaturated visible kind argument:" <+> quotes (char '@' <> pprParendType ty) TcRnForAllRankErr rank ty -> let herald = case tcSplitForAllTyVars ty of ([], _) -> text "Illegal qualified type:" _ -> text "Illegal polymorphic type:" extra = case rank of MonoTypeConstraint -> text "A constraint must be a monotype" _ -> empty in mkSimpleDecorated $ vcat [hang herald 2 (pprType ty), extra] TcRnMonomorphicBindings bindings -> let pp_bndrs = pprBindings bindings in mkSimpleDecorated $ sep [ text "The Monomorphism Restriction applies to the binding" <> plural bindings , text "for" <+> pp_bndrs ] TcRnOrphanInstance (Left cls_inst) -> mkSimpleDecorated $ hang (text "Orphan class instance:") 2 (pprInstanceHdr cls_inst) TcRnOrphanInstance (Right fam_inst) -> mkSimpleDecorated $ hang (text "Orphan family instance:") 2 (pprFamInst fam_inst) TcRnFunDepConflict unit_state sorted -> let herald = text "Functional dependencies conflict between instance declarations:" in mkSimpleDecorated $ pprWithUnitState unit_state $ (hang herald 2 (pprInstances $ NE.toList sorted)) TcRnDupInstanceDecls unit_state sorted -> let herald = text "Duplicate instance declarations:" in mkSimpleDecorated $ pprWithUnitState unit_state $ (hang herald 2 (pprInstances $ NE.toList sorted)) TcRnConflictingFamInstDecls sortedNE -> let sorted = NE.toList sortedNE in mkSimpleDecorated $ hang (text "Conflicting family instance declarations:") 2 (vcat [ pprCoAxBranchUser (coAxiomTyCon ax) (coAxiomSingleBranch ax) | fi <- sorted , let ax = famInstAxiom fi ]) TcRnFamInstNotInjective rea fam_tc (eqn1 NE.:| rest_eqns) -> let (herald, show_kinds) = case rea of InjErrRhsBareTyVar tys -> (injectivityErrorHerald $$ text "RHS of injective type family equation is a bare" <+> text "type variable" $$ text "but these LHS type and kind patterns are not bare" <+> text "variables:" <+> pprQuotedList tys, False) InjErrRhsCannotBeATypeFam -> (injectivityErrorHerald $$ text "RHS of injective type family equation cannot" <+> text "be a type family:", False) InjErrRhsOverlap -> (text "Type family equation right-hand sides overlap; this violates" $$ text "the family's injectivity annotation:", False) InjErrCannotInferFromRhs tvs has_kinds _ -> let show_kinds = has_kinds == YesHasKinds what = if show_kinds then text "Type/kind" else text "Type" body = sep [ what <+> text "variable" <> pluralVarSet tvs <+> pprVarSet tvs (pprQuotedList . scopedSort) , text "cannot be inferred from the right-hand side." ] in (injectivityErrorHerald $$ body $$ text "In the type family equation:", show_kinds) in mkSimpleDecorated $ pprWithInvisibleBitsWhen show_kinds $ hang herald 2 (vcat (map (pprCoAxBranchUser fam_tc) (eqn1 : rest_eqns))) TcRnBangOnUnliftedType ty -> mkSimpleDecorated $ text "Strictness flag has no effect on unlifted type" <+> quotes (ppr ty) TcRnLazyBangOnUnliftedType ty -> mkSimpleDecorated $ text "Lazy flag has no effect on unlifted type" <+> quotes (ppr ty) TcRnMultipleDefaultDeclarations cls dup_things -> mkSimpleDecorated $ hang (text "Multiple default declarations for class" <+> quotes (ppr cls)) 2 (vcat (map pp dup_things)) where pp :: LDefaultDecl GhcRn -> SDoc pp (L locn DefaultDecl {}) = text "here was another default declaration" <+> ppr (locA locn) TcRnBadDefaultType ty deflt_clss -> mkSimpleDecorated $ hang (text "The default type" <+> quotes (ppr ty) <+> text "is not an instance of") 2 (foldr1 (\a b -> a <+> text "or" <+> b) (map (quotes. ppr) deflt_clss)) TcRnPatSynBundledWithNonDataCon -> mkSimpleDecorated $ text "Pattern synonyms can be bundled only with datatypes." TcRnPatSynBundledWithWrongType expected_res_ty res_ty -> mkSimpleDecorated $ text "Pattern synonyms can only be bundled with matching type constructors" $$ text "Couldn't match expected type of" <+> quotes (ppr expected_res_ty) <+> text "with actual type of" <+> quotes (ppr res_ty) TcRnDupeModuleExport mod -> mkSimpleDecorated $ hsep [ text "Duplicate" , quotes (text "Module" <+> ppr mod) , text "in export list" ] TcRnExportedModNotImported mod -> mkSimpleDecorated $ formatExportItemError (text "module" <+> ppr mod) "is not imported" TcRnNullExportedModule mod -> mkSimpleDecorated $ formatExportItemError (text "module" <+> ppr mod) "exports nothing" TcRnMissingExportList mod -> mkSimpleDecorated $ formatExportItemError (text "module" <+> ppr mod) "is missing an export list" TcRnExportHiddenComponents export_item -> mkSimpleDecorated $ formatExportItemError (ppr export_item) "attempts to export constructors or class methods that are not visible here" TcRnExportHiddenDefault export_item -> mkSimpleDecorated $ formatExportItemError (ppr export_item) "attempts to export a default class declaration that is not visible here" TcRnDuplicateExport gre ie1 ie2 -> mkSimpleDecorated $ hsep [ quotes (ppr $ greName gre) , text "is exported by", quotes (ppr ie1) , text "and", quotes (ppr ie2) ] TcRnExportedParentChildMismatch parent_name ty_thing child parent_names -> mkSimpleDecorated $ text "The type constructor" <+> quotes (ppr parent_name) <+> text "is not the parent of the" <+> text what_is <+> quotes thing <> char '.' $$ text (capitalise what_is) <> text "s can only be exported with their parent type constructor." $$ (case parents of [] -> empty [_] -> text "Parent:" _ -> text "Parents:") <+> fsep (punctuate comma parents) where pp_category :: TyThing -> String pp_category (AnId i) | isRecordSelector i = "record selector" pp_category i = tyThingCategory i what_is = pp_category ty_thing thing = ppr $ nameOccName child parents = map ppr parent_names TcRnConflictingExports occ child_gre1 ie1 child_gre2 ie2 -> mkSimpleDecorated $ vcat [ text "Conflicting exports for" <+> quotes (ppr occ) <> colon , ppr_export child_gre1 ie1 , ppr_export child_gre2 ie2 ] where ppr_export gre ie = nest 3 $ hang (quotes (ppr ie) <+> text "exports" <+> quotes (ppr $ greName gre)) 2 (pprNameProvenance gre) TcRnDuplicateFieldExport (gre, ie1) gres_ies -> mkSimpleDecorated $ vcat ( hsep [ text "Duplicate record field" , quotes (ppr $ greOccName gre) , text "in export list" <> colon ] : map ppr_export ((gre,ie1) : NE.toList gres_ies) ) where ppr_export (gre,ie) = nest 3 $ hang (sep [ quotes (ppr ie) <+> text "exports the field" <+> quotes (ppr $ greName gre) , text "belonging to the constructor" <> plural fld_cons <+> pprQuotedList fld_cons ]) 2 (pprNameProvenance gre) where fld_cons :: [ConLikeName] fld_cons = nonDetEltsUniqSet $ recFieldCons $ fieldGREInfo gre TcRnAmbiguousFieldInUpdate (gre1, gre2, gres) -> mkSimpleDecorated $ vcat [ text "Ambiguous record field" <+> fld <> dot , hang (text "It could refer to any of the following:") 2 $ vcat (map pprSugg (gre1 : gre2 : gres)) ] where fld = quotes $ ppr (occNameFS $ greOccName gre1) pprSugg gre = vcat [ bullet <+> pprGRE gre <> comma , nest 2 (pprNameProvenance gre) ] pprGRE gre = case greInfo gre of IAmRecField {} -> let parent = par_is $ greParent gre in text "record field" <+> fld <+> text "of" <+> quotes (ppr parent) _ -> text "variable" <+> fld TcRnAmbiguousRecordUpdate _rupd tc -> mkSimpleDecorated $ vcat [ text "Ambiguous record update with parent" <+> what <> dot , hsep [ text "This type-directed disambiguation mechanism" , text "will not be supported by -XDuplicateRecordFields in future releases of GHC." ] , text "Consider disambiguating using module qualification instead." ] where what :: SDoc what = text "type constructor" <+> quotes (ppr $ RecSelData tc) TcRnMissingFields con fields -> mkSimpleDecorated $ vcat [header, nest 2 rest] where rest | null fields = empty | otherwise = vcat (fmap pprField fields) header = text "Fields of" <+> quotes (ppr con) <+> text "not initialised" <> if null fields then empty else colon TcRnFieldUpdateInvalidType prs -> mkSimpleDecorated $ hang (text "Record update for insufficiently polymorphic field" <> plural prs <> colon) 2 (vcat [ ppr f <+> dcolon <+> ppr ty | (f,ty) <- prs ]) TcRnMissingStrictFields con fields -> mkSimpleDecorated $ vcat [header, nest 2 rest] where rest | null fields = empty -- Happens for non-record constructors -- with strict fields | otherwise = vcat (fmap pprField fields) header = text "Constructor" <+> quotes (ppr con) <+> text "does not have the required strict field(s)" <> if null fields then empty else colon TcRnBadRecordUpdate upd_flds reason -> case reason of NoConstructorHasAllFields { conflictingFields = conflicts } | [fld] <- conflicts -> mkSimpleDecorated $ vcat [ header , text "No constructor in scope has the field" <+> quotes (ppr fld) ] | otherwise -> mkSimpleDecorated $ vcat [ header , hang (text "No constructor in scope has all of the following fields:") 2 (pprQuotedList conflicts) ] where header :: SDoc header = text "Invalid record update." MultiplePossibleParents (par1, par2, pars) -> mkSimpleDecorated $ vcat [ hang (text "Ambiguous record update with field" <> plural upd_flds) 2 ppr_flds , hang (thisOrThese upd_flds <+> text "field" <> plural upd_flds <+> what_parent) 2 (quotedListWithAnd (map ppr (par1:par2:pars))) ] where ppr_flds, what_parent, which :: SDoc ppr_flds = quotedListWithAnd $ map ppr upd_flds what_parent = case par1 of RecSelData {} -> text "appear" <> singular upd_flds <+> text "in" <+> which <+> text "datatypes" RecSelPatSyn {} -> isOrAre upd_flds <+> text "associated with" <+> which <+> text "pattern synonyms" which = case pars of [] -> text "both" _ -> text "all of the" InvalidTyConParent tc pars -> mkSimpleDecorated $ vcat [ hang (text "No data constructor of" <+> what $$ text "has all of the fields:") 2 (pprQuotedList upd_flds) , pat_syn_msg ] where what = text "type constructor" <+> quotes (ppr (RecSelData tc)) pat_syn_msg | any (\case { RecSelPatSyn {} -> True; _ -> False}) pars = note "Type-directed disambiguation is not supported for pattern synonym record fields" | otherwise = empty TcRnStaticFormNotClosed name reason -> mkSimpleDecorated $ quotes (ppr name) <+> text "is used in a static form but it is not closed" <+> text "because it" $$ sep (causes reason) where causes :: NotClosedReason -> [SDoc] causes NotLetBoundReason = [text "is not let-bound."] causes (NotTypeClosed vs) = [ text "has a non-closed type because it contains the" , text "type variables:" <+> pprVarSet vs (hsep . punctuate comma . map (quotes . ppr)) ] causes (NotClosed n reason) = let msg = text "uses" <+> quotes (ppr n) <+> text "which" in case reason of NotClosed _ _ -> msg : causes reason _ -> let (xs0, xs1) = splitAt 1 $ causes reason in fmap (msg <+>) xs0 ++ xs1 TcRnUselessTypeable -> mkSimpleDecorated $ text "Deriving" <+> quotes (ppr typeableClassName) <+> text "has no effect: all types now auto-derive Typeable" TcRnDerivingDefaults cls -> mkSimpleDecorated $ sep [ text "Both DeriveAnyClass and" <+> text "GeneralizedNewtypeDeriving are enabled" , text "Defaulting to the DeriveAnyClass strategy" <+> text "for instantiating" <+> ppr cls ] TcRnNonUnaryTypeclassConstraint ctxt ct -> mkSimpleDecorated $ quotes (ppr ct) <+> text "is not a unary constraint, as expected by" <+> pprUserTypeCtxt ctxt TcRnPartialTypeSignatures _ theta -> mkSimpleDecorated $ text "Found type wildcard" <+> quotes (char '_') <+> text "standing for" <+> quotes (pprTheta theta) TcRnCannotDeriveInstance cls cls_tys mb_strat newtype_deriving reason -> mkSimpleDecorated $ derivErrDiagnosticMessage cls cls_tys mb_strat newtype_deriving True reason TcRnLookupInstance cls tys reason -> mkSimpleDecorated $ text "Couldn't match instance:" <+> lookupInstanceErrDiagnosticMessage cls tys reason TcRnLazyGADTPattern -> mkSimpleDecorated $ hang (text "An existential or GADT data constructor cannot be used") 2 (text "inside a lazy (~) pattern") TcRnArrowProcGADTPattern -> mkSimpleDecorated $ text "Proc patterns cannot use existential or GADT data constructors" TcRnTypeEqualityOutOfScope -> mkDecorated [ text "The" <+> quotes (text "~") <+> text "operator is out of scope." $$ text "Assuming it to stand for an equality constraint." , note $ quotes "~" <+> "used to be built-in syntax but now is a regular type operator" $$ "exported from Data.Type.Equality and Prelude." $$ "If you are using a custom Prelude, consider re-exporting it" , text "This will become an error in a future GHC release." ] TcRnTypeEqualityRequiresOperators -> mkSimpleDecorated $ fsep [ text "The use of" <+> quotes (text "~") <+> text "without TypeOperators", text "will become an error in a future GHC release." ] TcRnIllegalTypeOperator overall_ty op -> mkSimpleDecorated $ text "Illegal operator" <+> quotes (ppr op) <+> text "in type" <+> quotes (ppr overall_ty) TcRnIllegalTypeOperatorDecl name -> mkSimpleDecorated $ text "Illegal declaration of a type or class operator" <+> quotes (ppr name) TcRnGADTMonoLocalBinds -> mkSimpleDecorated $ fsep [ text "Pattern matching on GADTs without MonoLocalBinds" , text "is fragile." ] TcRnIncorrectNameSpace name _ -> mkSimpleDecorated $ text "The" <+> what <+> text "does not live in" <+> other_ns where -- the other (opposite) namespace other_ns | isValNameSpace ns = text "the type-level namespace" | otherwise = text "the term-level namespace" ns = nameNameSpace name what = pprNameSpace ns <+> quotes (ppr name) TcRnNotInScope err name imp_errs _ -> mkSimpleDecorated $ pprScopeError name err $$ vcat (map ppr imp_errs) TcRnTermNameInType name _ -> mkSimpleDecorated $ quotes (ppr name) <+> (text "is a term-level binding") $+$ (text " and can not be used at the type level.") TcRnUntickedPromotedThing thing -> mkSimpleDecorated $ text "Unticked promoted" <+> what where what :: SDoc what = case thing of UntickedExplicitList -> text "list" <> dot UntickedConstructor fixity nm -> let con = pprUntickedConstructor fixity nm bare_sym = isBareSymbol fixity nm in text "constructor:" <+> con <> if bare_sym then empty else dot TcRnIllegalBuiltinSyntax what rdr_name -> mkSimpleDecorated $ hsep [text "Illegal", what, text "of built-in syntax:", ppr rdr_name] TcRnWarnDefaulting tidy_wanteds tidy_tv default_ty -> mkSimpleDecorated $ hang (hsep $ [ text "Defaulting" ] ++ (case tidy_tv of Nothing -> [] Just tv -> [text "the type variable" , quotes (ppr tv)]) ++ [ text "to type" , quotes (ppr default_ty) , text "in the following constraint" <> plural tidy_wanteds ]) 2 (pprWithArising tidy_wanteds) TcRnWarnClashingDefaultImports cls Nothing imports -> mkSimpleDecorated $ hang (text "Clashing imported defaults for class" <+> quotes (ppr cls) <> colon) 2 (vcat $ defaultTypesAndImport <$> NE.toList imports) TcRnWarnClashingDefaultImports cls (Just local) imports -> mkSimpleDecorated $ sep [ hang (text "Imported defaults for class" <+> quotes (ppr cls) <> colon) 2 (vcat $ defaultTypesAndImport <$> NE.toList imports) , hang (text "are not subsumed by the local `default` declaration") 2 (parens $ pprWithCommas ppr local) ] TcRnForeignImportPrimExtNotSet _decl -> mkSimpleDecorated $ text "`foreign import prim' requires GHCForeignImportPrim." TcRnForeignImportPrimSafeAnn _decl -> mkSimpleDecorated $ text "The safe/unsafe annotation should not be used with `foreign import prim'." TcRnForeignFunctionImportAsValue _decl -> mkSimpleDecorated $ text "`value' imports cannot have function types" TcRnFunPtrImportWithoutAmpersand _decl -> mkSimpleDecorated $ text "possible missing & in foreign import of FunPtr" TcRnIllegalForeignDeclBackend _decl _backend expectedBknds -> mkSimpleDecorated $ fsep (text "Illegal foreign declaration: requires one of these back ends:" : commafyWith (text "or") (map (text . backendDescription) expectedBknds)) TcRnUnsupportedCallConv _decl unsupportedCC -> mkSimpleDecorated $ case unsupportedCC of StdCallConvUnsupported -> text "the 'stdcall' calling convention is unsupported on this platform," $$ text "treating as ccall" PrimCallConvUnsupported -> text "The `prim' calling convention can only be used with `foreign import'" JavaScriptCallConvUnsupported -> text "The `javascript' calling convention is unsupported on this platform" TcRnIllegalForeignType mArgOrResult reason -> mkSimpleDecorated $ hang msg 2 extra where arg_or_res = case mArgOrResult of Nothing -> empty Just Arg -> text "argument" Just Result -> text "result" msg = hsep [ text "Unacceptable", arg_or_res , text "type in foreign declaration:"] extra = case reason of TypeCannotBeMarshaled ty why -> let innerMsg = quotes (ppr ty) <+> text "cannot be marshalled in a foreign call" in case why of NotADataType -> quotes (ppr ty) <+> text "is not a data type" NewtypeDataConNotInScope _ [] -> hang innerMsg 2 $ text "because its data constructor is not in scope" NewtypeDataConNotInScope tc _ -> hang innerMsg 2 $ text "because the data constructor for" <+> quotes (ppr tc) <+> text "is not in scope" UnliftedFFITypesNeeded -> innerMsg $$ text "UnliftedFFITypes is required to marshal unlifted types" NotABoxedMarshalableTyCon -> innerMsg ForeignLabelNotAPtr -> innerMsg $$ text "A foreign-imported address (via &foo) must have type (Ptr a) or (FunPtr a)" NotSimpleUnliftedType -> innerMsg $$ text "foreign import prim only accepts simple unlifted types" NotBoxedKindAny -> text "Expected kind" <+> quotes (text "Type") <+> text "or" <+> quotes (text "UnliftedType") <> comma $$ text "but" <+> quotes (ppr ty) <+> text "has kind" <+> quotes (ppr (typeKind ty)) ForeignDynNotPtr expected ty -> vcat [ text "Expected: Ptr/FunPtr" <+> pprParendType expected <> comma, text " Actual:" <+> ppr ty ] SafeHaskellMustBeInIO -> text "Safe Haskell is on, all FFI imports must be in the IO monad" IOResultExpected -> text "IO result type expected" UnexpectedNestedForall -> text "Unexpected nested forall" LinearTypesNotAllowed -> text "Linear types are not supported in FFI declarations, see #18472" OneArgExpected -> text "One argument expected" AtLeastOneArgExpected -> text "At least one argument expected" TcRnInvalidCIdentifier target -> mkSimpleDecorated $ sep [quotes (ppr target) <+> text "is not a valid C identifier"] TcRnExpectedValueId thing -> mkSimpleDecorated $ ppr thing <+> text "used where a value identifier was expected" TcRnRecSelectorEscapedTyVar lbl -> mkSimpleDecorated $ text "Cannot use record selector" <+> quotes (ppr lbl) <+> text "as a function due to escaped type variables" TcRnPatSynNotBidirectional name -> mkSimpleDecorated $ text "non-bidirectional pattern synonym" <+> quotes (ppr name) <+> text "used in an expression" TcRnIllegalDerivingItem hs_ty -> mkSimpleDecorated $ text "Illegal deriving item" <+> quotes (ppr hs_ty) TcRnIllegalDefaultClass hs_ty -> mkSimpleDecorated $ quotes (ppr hs_ty) <+> text "is not a class" TcRnIllegalNamedDefault hs_decl -> mkSimpleDecorated $ text "Illegal use of default class name:" <+> quotes (ppr hs_decl) TcRnUnexpectedAnnotation ty bang -> mkSimpleDecorated $ let err = case bang of HsBang SrcUnpack _ -> "UNPACK" HsBang SrcNoUnpack _ -> "NOUNPACK" HsBang NoSrcUnpack SrcLazy -> "laziness" HsBang _ _ -> "strictness" in text "Unexpected" <+> text err <+> text "annotation:" <+> ppr ty $$ text err <+> text "annotation cannot appear nested inside a type" TcRnIllegalRecordSyntax either_ty_ty -> mkSimpleDecorated $ text "Record syntax is illegal here:" <+> either ppr ppr either_ty_ty TcRnInvalidVisibleKindArgument arg ty -> mkSimpleDecorated $ text "Cannot apply function of kind" <+> quotes (ppr ty) $$ text "to visible kind argument" <+> quotes (ppr arg) TcRnTooManyBinders ki bndrs -> mkSimpleDecorated $ hang (text "Not a function kind:") 4 (ppr ki) $$ hang (text "but extra binders found:") 4 (fsep (map ppr bndrs)) TcRnDifferentNamesForTyVar n1 n2 -> mkSimpleDecorated $ hang (text "Different names for the same type variable:") 2 info where info | nameOccName n1 /= nameOccName n2 = quotes (ppr n1) <+> text "and" <+> quotes (ppr n2) | otherwise -- Same OccNames! See C2 in -- Note [Swizzling the tyvars before generaliseTcTyCon] = vcat [ quotes (ppr n1) <+> text "bound at" <+> ppr (getSrcLoc n1) , quotes (ppr n2) <+> text "bound at" <+> ppr (getSrcLoc n2) ] TcRnDisconnectedTyVar n -> mkSimpleDecorated $ hang (text "Scoped type variable only appears non-injectively in declaration header:") 2 (quotes (ppr n) <+> text "bound at" <+> ppr (getSrcLoc n)) TcRnInvalidReturnKind data_sort allowed_kind kind _suggested_ext -> mkSimpleDecorated $ sep [ ppDataSort data_sort <+> text "has non-" <> allowed_kind_tycon , (if is_data_family then text "and non-variable" else empty) <+> text "return kind" <+> quotes (ppr kind) ] where is_data_family = case data_sort of DataDeclSort{} -> False DataInstanceSort{} -> False DataFamilySort -> True allowed_kind_tycon = case allowed_kind of AnyTYPEKind -> ppr tYPETyCon AnyBoxedKind -> ppr boxedRepDataConTyCon LiftedKind -> ppr liftedTypeKind TcRnClassKindNotConstraint _kind -> mkSimpleDecorated $ text "Kind signature on a class must end with" <+> ppr constraintKind $$ text "unobscured by type families" TcRnUnpromotableThing name err -> mkSimpleDecorated $ (hang (pprPECategory err <+> quotes (ppr name) <+> text "cannot be used here") 2 (parens reason)) where reason = case err of ConstrainedDataConPE theta -> text "it has an unpromotable context" <+> quotes (pprTheta theta) FamDataConPE -> text "it comes from a data family instance" PatSynPE -> text "pattern synonyms cannot be promoted" RecDataConPE -> same_rec_group_msg ClassPE -> same_rec_group_msg TyConPE -> same_rec_group_msg TermVariablePE -> text "term variables cannot be promoted" TypeVariablePE -> text "type variables bound in a kind signature cannot be used in the type" same_rec_group_msg = text "it is defined and used in the same recursive group" TcRnIllegalTermLevelUse name err -> mkSimpleDecorated $ text "Illegal term-level use of the" <+> text (teCategory err) <+> quotes (ppr name) TcRnMatchesHaveDiffNumArgs argsContext (MatchArgMatches match1 bad_matches) -> mkSimpleDecorated $ (vcat [ pprMatchContextNouns argsContext <+> text "have different numbers of arguments" , nest 2 (ppr (getLocA match1)) , nest 2 (ppr (getLocA (NE.head bad_matches)))]) TcRnCannotBindScopedTyVarInPatSig sig_tvs -> mkSimpleDecorated $ hang (text "You cannot bind scoped type variable" <> plural (NE.toList sig_tvs) <+> pprQuotedList (map fst $ NE.toList sig_tvs)) 2 (text "in a pattern binding signature") TcRnCannotBindTyVarsInPatBind _offenders -> mkSimpleDecorated $ text "Binding type variables is not allowed in pattern bindings" TcRnTooManyTyArgsInConPattern con_like expected_number actual_number -> mkSimpleDecorated $ text "Too many type arguments in constructor pattern for" <+> quotes (ppr con_like) $$ text "Expected no more than" <+> ppr expected_number <> semi <+> text "got" <+> ppr actual_number TcRnMultipleInlinePragmas poly_id fst_inl_prag inl_prags -> mkSimpleDecorated $ hang (text "Multiple INLINE pragmas for" <+> ppr poly_id) 2 (vcat (text "Ignoring all but the first" : map pp_inl (fst_inl_prag : NE.toList inl_prags))) where pp_inl (L loc prag) = ppr prag <+> parens (ppr loc) TcRnUnexpectedPragmas poly_id bad_sigs -> mkSimpleDecorated $ hang (text "Discarding unexpected pragmas for" <+> ppr poly_id) 2 (vcat (map (ppr . getLoc) $ NE.toList bad_sigs)) TcRnNonOverloadedSpecialisePragma fun_name -> mkSimpleDecorated $ text "SPECIALISE pragma for non-overloaded function" <+> quotes (ppr fun_name) TcRnSpecialiseNotVisible name -> mkSimpleDecorated $ text "You cannot SPECIALISE" <+> quotes (ppr name) <+> text "because its definition is not visible in this module" TcRnPragmaWarning { pragma_warning_info = PragmaWarningInstance{pwarn_dfunid, pwarn_ctorig} , pragma_warning_msg } -> mkSimpleDecorated $ sep [ hang (text "In the use of") 2 (pprDFunId pwarn_dfunid) , ppr pwarn_ctorig , pprWarningTxtForMsg pragma_warning_msg ] TcRnPragmaWarning { pragma_warning_info = PragmaWarningDefault{pwarn_class, pwarn_impmod} , pragma_warning_msg } -> mkSimpleDecorated $ sep [ sep [ text "In the use of class" <+> ppr pwarn_class <+> text "defaults imported from" <+> ppr pwarn_impmod <> colon ] , pprWarningTxtForMsg pragma_warning_msg ] TcRnPragmaWarning {pragma_warning_info, pragma_warning_msg} -> mkSimpleDecorated $ sep [ sep [ text "In the use of" <+> pprNonVarNameSpace (occNameSpace occ_name) <+> quotes (ppr occ_name) , parens imp_msg <> colon ] , pprWarningTxtForMsg pragma_warning_msg ] where occ_name = pwarn_occname pragma_warning_info imp_mod = pwarn_impmod pragma_warning_info imp_msg = text "imported from" <+> ppr imp_mod <> extra extra | PragmaWarningName {pwarn_declmod = decl_mod} <- pragma_warning_info , imp_mod /= decl_mod = text ", but defined in" <+> ppr decl_mod | otherwise = empty TcRnDifferentExportWarnings name locs -> mkSimpleDecorated $ vcat [quotes (ppr name) <+> text "exported with different error messages", text "at" <+> vcat (map ppr $ sortBy leftmost_smallest $ NE.toList locs)] TcRnIncompleteExportWarnings name locs -> mkSimpleDecorated $ vcat [quotes (ppr name) <+> text "will not have its export warned about", text "missing export warning at" <+> vcat (map ppr $ sortBy leftmost_smallest $ NE.toList locs)] TcRnIllegalHsigDefaultMethods name meths -> mkSimpleDecorated $ text "Illegal default method" <> plural (NE.toList meths) <+> text "in class definition of" <+> ppr name <+> text "in hsig file" TcRnHsigFixityMismatch real_thing real_fixity sig_fixity -> let ppr_fix f = ppr f <+> if f == defaultFixity then parens (text "default") else empty in mkSimpleDecorated $ vcat [ppr real_thing <+> text "has conflicting fixities in the module", text "and its hsig file", text "Main module:" <+> ppr_fix real_fixity, text "Hsig file:" <+> ppr_fix sig_fixity] TcRnHsigShapeMismatch (HsigShapeSortMismatch info1 info2) -> mkSimpleDecorated $ text "While merging export lists, could not combine" <+> ppr info1 <+> text "with" <+> ppr info2 <+> parens (text "one is a type, the other is a plain identifier") TcRnHsigShapeMismatch (HsigShapeNotUnifiable name1 name2 notHere) -> let extra = if notHere then text "Neither name variable originates from the current signature." else empty in mkSimpleDecorated $ text "While merging export lists, could not unify" <+> ppr name1 <+> text "with" <+> ppr name2 $$ extra TcRnHsigMissingModuleExport occ unit_state impl_mod -> mkSimpleDecorated $ quotes (ppr occ) <+> text "is exported by the hsig file, but not exported by the implementing module" <+> quotes (pprWithUnitState unit_state $ ppr impl_mod) TcRnBadGenericMethod clas op -> mkSimpleDecorated $ hsep [text "Class", quotes (ppr clas), text "has a generic-default signature without a binding", quotes (ppr op)] TcRnWarningMinimalDefIncomplete mindef -> mkSimpleDecorated $ vcat [ text "The MINIMAL pragma does not require:" , nest 2 (pprBooleanFormulaNice mindef) , text "but there is no default implementation." ] TcRnDefaultMethodForPragmaLacksBinding sel_id prag -> mkSimpleDecorated $ text "The" <+> hsSigDoc prag <+> text "for default method" <+> quotes (ppr sel_id) <+> text "lacks an accompanying binding" TcRnIgnoreSpecialisePragmaOnDefMethod sel_name -> mkSimpleDecorated $ text "Ignoring SPECIALISE pragmas on default method" <+> quotes (ppr sel_name) TcRnBadMethodErr{badMethodErrClassName, badMethodErrMethodName} -> mkSimpleDecorated $ hsep [text "Class", quotes (ppr badMethodErrClassName), text "does not have a method", quotes (ppr badMethodErrMethodName)] TcRnIllegalTypeData -> mkSimpleDecorated $ text "Illegal type-level data declaration" TcRnTypeDataForbids feature -> mkSimpleDecorated $ ppr feature <+> text "are not allowed in type data declarations." TcRnIllegalNewtype con show_linear_types reason -> mkSimpleDecorated $ vcat [msg, additional] where (msg,additional) = case reason of DoesNotHaveSingleField n_flds -> (sep [ text "A newtype constructor must have exactly one field", nest 2 $ text "but" <+> quotes (ppr con) <+> text "has" <+> speakN n_flds ], ppr con <+> dcolon <+> ppr (dataConDisplayType show_linear_types con)) IsNonLinear -> (text "A newtype constructor must be linear", ppr con <+> dcolon <+> ppr (dataConDisplayType True con)) IsGADT -> (text "A newtype must not be a GADT", ppr con <+> dcolon <+> pprWithInvisibleBitsWhen sneaky_eq_spec (ppr $ dataConDisplayType show_linear_types con)) HasConstructorContext -> (text "A newtype constructor must not have a context in its type", ppr con <+> dcolon <+> ppr (dataConDisplayType show_linear_types con)) HasExistentialTyVar -> (text "A newtype constructor must not have existential type variables", ppr con <+> dcolon <+> ppr (dataConDisplayType show_linear_types con)) HasStrictnessAnnotation -> (text "A newtype constructor must not have a strictness annotation", empty) -- Is the data con a "covert" GADT? See Note [isCovertGadtDataCon] -- in GHC.Core.DataCon sneaky_eq_spec = isCovertGadtDataCon con TcRnOrPatBindsVariables bndrs -> mkSimpleDecorated $ text "An or-pattern may not bind term or type variables such as" <+> quotedListWithOr (map ppr (NE.toList bndrs)) TcRnUnsatisfiedMinimalDef mindef -> mkSimpleDecorated $ vcat [text "No explicit implementation for" ,nest 2 $ pprBooleanFormulaNice mindef ] TcRnMisplacedInstSig name hs_ty -> mkSimpleDecorated $ vcat [ hang (text "Illegal type signature in instance declaration:") 2 (hang (pprPrefixName name) 2 (dcolon <+> ppr hs_ty)) ] TcRnNoRebindableSyntaxRecordDot -> mkSimpleDecorated $ text "RebindableSyntax is required if OverloadedRecordUpdate is enabled." TcRnNoFieldPunsRecordDot -> mkSimpleDecorated $ text "For this to work enable NamedFieldPuns" TcRnIllegalStaticExpression e -> mkSimpleDecorated $ text "Illegal static expression:" <+> ppr e TcRnListComprehensionDuplicateBinding n -> mkSimpleDecorated $ (text "Duplicate binding in parallel list comprehension for:" <+> quotes (ppr n)) TcRnEmptyStmtsGroup cause -> mkSimpleDecorated $ case cause of EmptyStmtsGroupInParallelComp -> text "Empty statement group in parallel comprehension" EmptyStmtsGroupInTransformListComp -> text "Empty statement group preceding 'group' or 'then'" EmptyStmtsGroupInDoNotation ctxt -> text "Empty" <+> pprHsDoFlavour ctxt EmptyStmtsGroupInArrowNotation -> text "Empty 'do' block in an arrow command" TcRnLastStmtNotExpr ctxt (UnexpectedStatement stmt) -> mkSimpleDecorated $ hang last_error 2 (ppr stmt) where last_error = text "The last statement in" <+> pprAStmtContext ctxt <+> text "must be an expression" TcRnUnexpectedStatementInContext ctxt (UnexpectedStatement stmt) _ -> mkSimpleDecorated $ sep [ text "Unexpected" <+> pprStmtCat stmt <+> text "statement" , text "in" <+> pprAStmtContext ctxt ] TcRnIllegalTupleSection -> mkSimpleDecorated $ text "Illegal tuple section" TcRnIllegalImplicitParameterBindings eBinds -> mkSimpleDecorated $ either msg msg eBinds where msg binds = hang (text "Implicit-parameter bindings illegal in an mdo expression") 2 (ppr binds) TcRnSectionWithoutParentheses expr -> mkSimpleDecorated $ hang (text "A section must be enclosed in parentheses") 2 (text "thus:" <+> (parens (ppr expr))) TcRnMissingRoleAnnotation name roles -> mkSimpleDecorated $ hang (text "Missing role annotation" <> colon) 2 (text "type role" <+> ppr name <+> hsep (map ppr roles)) TcRnIllformedTypePattern p -> mkSimpleDecorated $ hang (text "Ill-formed type pattern:") 2 (ppr p) TcRnIllegalTypePattern -> mkSimpleDecorated $ text "Illegal type pattern." $$ text "A type pattern must be checked against a visible forall." TcRnIllformedTypeArgument e -> mkSimpleDecorated $ hang (text "Ill-formed type argument:") 2 (ppr e) TcRnIllegalTypeExpr syntax -> mkSimpleDecorated $ vcat [ text "Illegal" <+> pprTypeSyntaxName syntax , text "Type syntax may only be used in a required type argument," , text "i.e. to instantiate a visible forall." ] TcRnCapturedTermName tv_name shadowed_term_names -> mkSimpleDecorated $ text "The type variable" <+> quotes (ppr tv_name) <+> text "is implicitly quantified," $+$ text "even though another variable of the same name is in scope:" $+$ nest 2 var_names $+$ text "This is not compatible with the RequiredTypeArguments extension." where var_names = case shadowed_term_names of Left gbl_names -> vcat (map (\name -> quotes (ppr $ greName name) <+> pprNameProvenance name) gbl_names) Right lcl_name -> quotes (ppr lcl_name) <+> text "defined at" <+> ppr (nameSrcLoc lcl_name) TcRnBindingOfExistingName name -> mkSimpleDecorated $ text "Illegal binding of an existing name:" <+> ppr (filterCTuple name) TcRnMultipleFixityDecls loc rdr_name -> mkSimpleDecorated $ vcat [text "Multiple fixity declarations for" <+> quotes (ppr rdr_name), text "also at " <+> ppr loc] TcRnIllegalPatternSynonymDecl -> mkSimpleDecorated $ text "Illegal pattern synonym declaration" TcRnIllegalClassBinding dsort bind -> mkSimpleDecorated $ vcat [ what <+> text "not allowed in" <+> decl_sort , nest 2 (ppr bind) ] where decl_sort = case dsort of ClassDeclSort -> text "class declaration:" InstanceDeclSort -> text "instance declaration:" what = case bind of PatBind {} -> text "Pattern bindings (except simple variables)" PatSynBind {} -> text "Pattern synonyms" -- Associated pattern synonyms are not implemented yet _ -> pprPanic "rnMethodBind" (ppr bind) TcRnOrphanCompletePragma -> mkSimpleDecorated $ text "Orphan COMPLETE pragmas not supported" $$ text "A COMPLETE pragma must mention at least one data constructor" $$ text "or pattern synonym defined in the same module." TcRnEmptyCase ctxt -> mkSimpleDecorated message where pp_ctxt = case ctxt of CaseAlt -> text "case expression" LamAlt LamCase -> text "\\case expression" ArrowMatchCtxt (ArrowLamAlt LamSingle) -> text "kappa abstraction" ArrowMatchCtxt (ArrowLamAlt LamCase) -> text "\\case command" ArrowMatchCtxt ArrowCaseAlt -> text "case command" _ -> text "(unexpected)" <+> pprMatchContextNoun ctxt message = case ctxt of LamAlt LamCases -> lcases_msg <+> text "expression" ArrowMatchCtxt (ArrowLamAlt LamCases) -> lcases_msg <+> text "command" _ -> text "Empty list of alternatives in" <+> pp_ctxt lcases_msg = text "Empty list of alternatives is not allowed in \\cases" TcRnNonStdGuards (NonStandardGuards guards) -> mkSimpleDecorated $ text "accepting non-standard pattern guards" $$ nest 4 (interpp'SP guards) TcRnDuplicateSigDecl pairs@((L _ name, sig) :| _) -> mkSimpleDecorated $ vcat [ text "Duplicate" <+> what_it_is <> text "s for" <+> quotes (ppr name) , text "at" <+> vcat (map ppr $ sortBy leftmost_smallest $ map (getLocA . fst) $ NE.toList pairs) ] where what_it_is = hsSigDoc sig TcRnMisplacedSigDecl sig -> mkSimpleDecorated $ sep [text "Misplaced" <+> hsSigDoc sig <> colon, ppr sig] TcRnUnexpectedDefaultSig sig -> mkSimpleDecorated $ hang (text "Unexpected default signature:") 2 (ppr sig) TcRnDuplicateMinimalSig sig1 sig2 otherSigs -> mkSimpleDecorated $ vcat [ text "Multiple minimal complete definitions" , text "at" <+> vcat (map ppr $ sortBy leftmost_smallest $ map getLocA sigs) , text "Combine alternative minimal complete definitions with `|'" ] where sigs = sig1 : sig2 : otherSigs TcRnUnexpectedStandaloneDerivingDecl -> mkSimpleDecorated $ text "Illegal standalone deriving declaration" TcRnUnusedVariableInRuleDecl name var -> mkSimpleDecorated $ sep [text "Rule" <+> doubleQuotes (ftext name) <> colon, text "Forall'd variable" <+> quotes (ppr var) <+> text "does not appear on left hand side"] TcRnUnexpectedStandaloneKindSig -> mkSimpleDecorated $ text "Illegal standalone kind signature" TcRnIllegalRuleLhs errReason name lhs bad_e -> mkSimpleDecorated $ sep [text "Rule" <+> pprRuleName name <> colon, nest 2 (vcat [err, text "in left-hand side:" <+> ppr lhs])] $$ text "LHS must be of form (f e1 .. en) where f is not forall'd" where err = case errReason of UnboundVariable uv nis -> pprScopeError uv nis IllegalExpression -> text "Illegal expression:" <+> ppr bad_e TcRnDuplicateRoleAnnot list -> mkSimpleDecorated $ hang (text "Duplicate role annotations for" <+> quotes (ppr $ roleAnnotDeclName first_decl) <> colon) 2 (vcat $ map pp_role_annot $ NE.toList sorted_list) where sorted_list = NE.sortBy cmp_loc list ((L _ first_decl) :| _) = sorted_list pp_role_annot (L loc decl) = hang (ppr decl) 4 (text "-- written at" <+> ppr (locA loc)) cmp_loc = leftmost_smallest `on` getLocA TcRnDuplicateKindSig list -> mkSimpleDecorated $ hang (text "Duplicate standalone kind signatures for" <+> quotes (ppr $ standaloneKindSigName first_decl) <> colon) 2 (vcat $ map pp_kisig $ NE.toList sorted_list) where sorted_list = NE.sortBy cmp_loc list ((L _ first_decl) :| _) = sorted_list pp_kisig (L loc decl) = hang (ppr decl) 4 (text "-- written at" <+> ppr (locA loc)) cmp_loc = leftmost_smallest `on` getLocA TcRnIllegalDerivStrategy ds -> mkSimpleDecorated $ text "Illegal deriving strategy" <> colon <+> derivStrategyName ds TcRnIllegalMultipleDerivClauses -> mkSimpleDecorated $ text "Illegal use of multiple, consecutive deriving clauses" TcRnNoDerivStratSpecified{} -> mkSimpleDecorated $ text "No deriving strategy specified. Did you want stock, newtype, or anyclass?" TcRnStupidThetaInGadt{} -> mkSimpleDecorated $ vcat [text "No context is allowed on a GADT-style data declaration", text "(You can put a context on each constructor, though.)"] TcRnShadowedTyVarNameInFamResult resName -> mkSimpleDecorated $ hsep [ text "Type variable", quotes (ppr resName) <> comma , text "naming a type family result," ] $$ text "shadows an already bound type variable" TcRnIncorrectTyVarOnLhsOfInjCond resName injFrom -> mkSimpleDecorated $ vcat [ text $ "Incorrect type variable on the LHS of " ++ "injectivity condition" , nest 5 ( vcat [ text "Expected :" <+> ppr resName , text "Actual :" <+> ppr injFrom ])] TcRnUnknownTyVarsOnRhsOfInjCond errorVars -> mkSimpleDecorated $ hsep [ text "Unknown type variable" <> plural errorVars , text "on the RHS of injectivity condition:" , interpp'SP errorVars ] TcRnBadlyStaged reason bind_lvl use_lvl -> mkSimpleDecorated $ vcat $ [ text "Stage error:" <+> pprStageCheckReason reason <+> hsep [text "is bound at stage" <+> ppr bind_lvl, text "but used at stage" <+> ppr use_lvl] ] ++ [ hsep [ text "Hint: quoting" <+> thBrackets (ppUnless (isValName n) "t") (ppr n) , text "or an enclosing expression would allow the quotation to be used in an earlier stage" ] | StageCheckSplice n <- [reason] ] TcRnBadlyStagedType name bind_lvl use_lvl -> mkSimpleDecorated $ text "Badly staged type:" <+> ppr name <+> hsep [text "is bound at stage" <+> ppr bind_lvl, text "but used at stage" <+> ppr use_lvl] TcRnStageRestriction reason -> mkSimpleDecorated $ sep [ text "GHC stage restriction:" , nest 2 (vcat [ pprStageCheckReason reason <+> text "is used in a top-level splice, quasi-quote, or annotation," , text "and must be imported, not defined locally"])] TcRnTyThingUsedWrong sort thing name -> mkSimpleDecorated $ pprTyThingUsedWrong sort thing name TcRnCannotDefaultKindVar var knd -> mkSimpleDecorated $ (vcat [ text "Cannot default kind variable" <+> quotes (ppr var) , text "of kind:" <+> ppr knd , text "Perhaps enable PolyKinds or add a kind signature" ]) TcRnUninferrableTyVar tidied_tvs context -> mkSimpleDecorated $ pprWithInvisibleBitsWhen True $ vcat [ text "Uninferrable type variable" <> plural tidied_tvs <+> pprWithCommas pprTyVar tidied_tvs <+> text "in" , pprUninferrableTyVarCtx context ] TcRnSkolemEscape escapees tv orig_ty -> mkSimpleDecorated $ pprWithInvisibleBitsWhen True $ vcat [ sep [ text "Cannot generalise type; skolem" <> plural escapees , quotes $ pprTyVars escapees , text "would escape" <+> itsOrTheir escapees <+> text "scope" ] , sep [ text "if I tried to quantify" , pprTyVar tv , text "in this type:" ] , nest 2 (pprTidiedType orig_ty) , text "(Indeed, I sometimes struggle even printing this correctly," , text " due to its ill-scoped nature.)" ] TcRnPatSynEscapedCoercion arg bad_co_ne -> mkSimpleDecorated $ vcat [ text "Iceland Jack! Iceland Jack! Stop torturing me!" , hang (text "Pattern-bound variable") 2 (ppr arg <+> dcolon <+> ppr (idType arg)) , nest 2 $ hang (text "has a type that mentions pattern-bound coercion" <> plural bad_co_list <> colon) 2 (pprWithCommas ppr bad_co_list) , text "Hint: use -fprint-explicit-coercions to see the coercions" , text "Probable fix: add a pattern signature" ] where bad_co_list = NE.toList bad_co_ne TcRnPatSynExistentialInResult name pat_ty bad_tvs -> mkSimpleDecorated $ hang (sep [ text "The result type of the signature for" <+> quotes (ppr name) <> comma , text "namely" <+> quotes (ppr pat_ty) ]) 2 (text "mentions existential type variable" <> plural bad_tvs <+> pprQuotedList bad_tvs) TcRnPatSynArityMismatch name decl_arity missing -> mkSimpleDecorated $ hang (text "Pattern synonym" <+> quotes (ppr name) <+> text "has" <+> speakNOf decl_arity (text "argument")) 2 (text "but its type signature has" <+> int missing <+> text "fewer arrows") TcRnPatSynInvalidRhs ps_name lpat _ reason -> mkSimpleDecorated $ vcat [ hang (text "Invalid right-hand side of bidirectional pattern synonym" <+> quotes (ppr ps_name) <> colon) 2 (pprPatSynInvalidRhsReason reason) , text "RHS pattern:" <+> ppr lpat ] TcRnTyFamDepsDisabled -> mkSimpleDecorated $ text "Illegal injectivity annotation" TcRnAbstractClosedTyFamDecl -> mkSimpleDecorated $ text "You may define an abstract closed type family" $$ text "only in a .hs-boot file" TcRnPartialFieldSelector fld -> mkSimpleDecorated $ vcat [ sep [ text "Definition of partial record field" <> colon , nest 2 $ quotes (ppr (occName fld)) ] , text "Record selection and update using this field will be partial." ] TcRnHasFieldResolvedIncomplete name -> mkSimpleDecorated $ text "The invocation of `getField` on the record field" <+> quotes (ppr name) <+> text "may produce an error since it is not defined for all data constructors" TcRnBadFieldAnnotation n con reason -> mkSimpleDecorated $ hang (pprBadFieldAnnotationReason reason) 2 (text "on the" <+> speakNth n <+> text "argument of" <+> quotes (ppr con)) TcRnSuperclassCycle (MkSuperclassCycle cls definite details) -> let herald | definite = text "Superclass cycle for" | otherwise = text "Potential superclass cycle for" in mkSimpleDecorated $ vcat [ herald <+> quotes (ppr cls), nest 2 (vcat (pprSuperclassCycleDetail <$> details))] TcRnDefaultSigMismatch sel_id dm_ty -> mkSimpleDecorated $ hang (text "The default type signature for" <+> ppr sel_id <> colon) 2 (ppr dm_ty) $$ (text "does not match its corresponding" <+> text "non-default type signature") TcRnTyFamsDisabled reason -> mkSimpleDecorated $ text "Illegal family" <+> text sort <+> text "for" <+> quotes name where (sort, name) = case reason of TyFamsDisabledFamily n -> ("declaration", ppr n) TyFamsDisabledInstance n -> ("instance", ppr n) TcRnBadTyConTelescope tc -> mkSimpleDecorated $ vcat [ hang (text "The kind of" <+> quotes (ppr tc) <+> text "is ill-scoped") 2 pp_tc_kind , extra , hang (text "Perhaps try this order instead:") 2 (pprTyVars sorted_tvs) ] where pp_tc_kind = text "Inferred kind:" <+> ppr tc <+> dcolon <+> ppr_untidy (tyConKind tc) ppr_untidy ty = pprIfaceType (toIfaceType ty) -- We need ppr_untidy here because pprType will tidy the type, which -- will turn the bogus kind we are trying to report -- T :: forall (a::k) k (b::k) -> blah -- into a misleadingly sanitised version -- T :: forall (a::k) k1 (b::k1) -> blah tcbs = tyConBinders tc tvs = binderVars tcbs sorted_tvs = scopedSort tvs inferred_tvs = [ binderVar tcb | tcb <- tcbs, Inferred == tyConBinderForAllTyFlag tcb ] specified_tvs = [ binderVar tcb | tcb <- tcbs, Specified == tyConBinderForAllTyFlag tcb ] extra | null inferred_tvs && null specified_tvs = empty | null inferred_tvs = note $ "Specified variables" <+> pp_spec <+> "always come first" | null specified_tvs = note inf_always_first | otherwise = note $ inf_always_first $$ "then specified variables" <+> pp_spec inf_always_first = "Inferred variables" <+> pp_inf $$ "always come first" pp_inf = parens (text "namely:" <+> pprTyVars inferred_tvs) pp_spec = parens (text "namely:" <+> pprTyVars specified_tvs) TcRnTyFamResultDisabled tc_name tvb -> mkSimpleDecorated $ text "Illegal result type variable" <+> ppr tvb <+> text "for" <+> quotes (ppr tc_name) TcRnRoleValidationFailed role reason -> mkSimpleDecorated $ vcat [text "Internal error in role inference:", pprRoleValidationFailedReason role reason, text "Please report this as a GHC bug: https://www.haskell.org/ghc/reportabug"] TcRnCommonFieldResultTypeMismatch con1 con2 field_name -> mkSimpleDecorated $ vcat [sep [text "Constructors" <+> ppr con1 <+> text "and" <+> ppr con2, text "have a common field" <+> quotes (ppr field_name) <> comma], nest 2 $ text "but have different result types"] TcRnCommonFieldTypeMismatch con1 con2 field_name -> mkSimpleDecorated $ sep [text "Constructors" <+> ppr con1 <+> text "and" <+> ppr con2, text "give different types for field", quotes (ppr field_name)] TcRnClassExtensionDisabled cls reason -> mkSimpleDecorated $ pprDisabledClassExtension cls reason TcRnDataConParentTypeMismatch data_con res_ty_tmpl -> mkSimpleDecorated $ hang (text "Data constructor" <+> quotes (ppr data_con) <+> text "returns type" <+> quotes (ppr actual_res_ty)) 2 (text "instead of an instance of its parent type" <+> quotes (ppr res_ty_tmpl)) where actual_res_ty = dataConOrigResTy data_con TcRnGADTsDisabled tc_name -> mkSimpleDecorated $ text "Illegal generalised algebraic data declaration for" <+> quotes (ppr tc_name) TcRnExistentialQuantificationDisabled con -> mkSimpleDecorated $ sdocOption sdocLinearTypes (\show_linear_types -> hang (text "Data constructor" <+> quotes (ppr con) <+> text "has existential type variables, a context, or a specialised result type") 2 (ppr con <+> dcolon <+> ppr (dataConDisplayType show_linear_types con))) TcRnGADTDataContext tc_name -> mkSimpleDecorated $ text "A data type declared in GADT style cannot have a context:" <+> quotes (ppr tc_name) TcRnMultipleConForNewtype tycon n -> mkSimpleDecorated $ sep [text "A newtype must have exactly one constructor,", nest 2 $ text "but" <+> quotes (ppr tycon) <+> text "has" <+> speakN n] TcRnKindSignaturesDisabled thing -> mkSimpleDecorated $ text "Illegal kind signature" <+> quotes (either ppr with_sig thing) where with_sig (tc_name, ksig) = ppr tc_name <+> dcolon <+> ppr ksig TcRnEmptyDataDeclsDisabled tycon -> mkSimpleDecorated $ quotes (ppr tycon) <+> text "has no constructors" TcRnRoleMismatch var annot inferred -> mkSimpleDecorated $ hang (text "Role mismatch on variable" <+> ppr var <> colon) 2 (sep [ text "Annotation says", ppr annot , text "but role", ppr inferred , text "is required" ]) TcRnRoleCountMismatch tyvars d@(L _ (RoleAnnotDecl _ _ annots)) -> mkSimpleDecorated $ hang (text "Wrong number of roles listed in role annotation;" $$ text "Expected" <+> (ppr tyvars) <> comma <+> text "got" <+> (ppr $ length annots) <> colon) 2 (ppr d) TcRnIllegalRoleAnnotation (RoleAnnotDecl _ tycon _) -> mkSimpleDecorated $ (text "Illegal role annotation for" <+> ppr tycon <> char ';' $$ text "they are allowed only for datatypes and classes.") TcRnRoleAnnotationsDisabled tc -> mkSimpleDecorated $ text "Illegal role annotation for" <+> ppr tc TcRnIncoherentRoles _ -> mkSimpleDecorated $ (text "Roles other than" <+> quotes (text "nominal") <+> text "for class parameters can lead to incoherence.") TcRnUnexpectedKindVar tv_name -> mkSimpleDecorated $ text "Unexpected kind variable" <+> quotes (ppr tv_name) TcRnNegativeNumTypeLiteral tyLit -> mkSimpleDecorated $ text "Illegal literal in type (type literals must not be negative):" <+> ppr tyLit TcRnIllegalKind ty_thing _ -> mkSimpleDecorated $ text "Illegal kind:" <+> (ppr ty_thing) TcRnPrecedenceParsingError op1 op2 -> mkSimpleDecorated $ hang (text "Precedence parsing error") 4 (hsep [text "cannot mix", ppr_opfix op1, text "and", ppr_opfix op2, text "in the same infix expression"]) TcRnSectionPrecedenceError op arg_op section -> mkSimpleDecorated $ vcat [text "The operator" <+> ppr_opfix op <+> text "of a section", nest 4 (sep [text "must have lower precedence than that of the operand,", nest 2 (text "namely" <+> ppr_opfix arg_op)]), nest 4 (text "in the section:" <+> quotes (ppr section))] TcRnUnexpectedPatSigType ty -> mkSimpleDecorated $ hang (text "Illegal type signature:" <+> quotes (ppr ty)) 2 (text "Type signatures are only allowed in patterns with ScopedTypeVariables") TcRnIllegalKindSignature ty -> mkSimpleDecorated $ text "Illegal kind signature:" <+> quotes (ppr ty) TcRnUnusedQuantifiedTypeVar doc tyVar -> mkSimpleDecorated $ vcat [ text "Unused quantified type variable" <+> quotes (ppr tyVar) , inHsDocContext doc ] TcRnDataKindsError typeOrKind thing -- See Note [Checking for DataKinds] (Wrinkle: Migration story for -- DataKinds typechecker errors) in GHC.Tc.Validity for why we give -- different diagnostic messages below. -> case thing of Left renamer_thing -> mkSimpleDecorated $ text "Illegal" <+> ppr_level <> colon <+> quotes (ppr renamer_thing) Right typechecker_thing -> mkSimpleDecorated $ vcat [ text "An occurrence of" <+> quotes (ppr typechecker_thing) <+> text "in a" <+> ppr_level <+> text "requires DataKinds." , text "Future versions of GHC will turn this warning into an error." ] where ppr_level = text $ levelString typeOrKind TcRnTypeSynonymCycle decl_or_tcs -> mkSimpleDecorated $ sep [ text "Cycle in type synonym declarations:" , nest 2 (vcat (map ppr_decl decl_or_tcs)) ] where ppr_decl = \case Right (L loc decl) -> ppr (locA loc) <> colon <+> ppr decl Left tc -> let n = tyConName tc in ppr (getSrcSpan n) <> colon <+> ppr (tyConName tc) <+> text "from external module" TcRnZonkerMessage err -> mkSimpleDecorated $ pprZonkerMessage err TcRnInterfaceError reason -> diagnosticMessage (tcOptsIfaceOpts opts) reason TcRnSelfImport imp_mod_name -> mkSimpleDecorated $ text "A module cannot import itself:" <+> ppr imp_mod_name TcRnNoExplicitImportList mod -> mkSimpleDecorated $ text "The module" <+> quotes (ppr mod) <+> text "does not have an explicit import list" TcRnSafeImportsDisabled _ -> mkSimpleDecorated $ text "safe import can't be used as Safe Haskell isn't on!" TcRnDeprecatedModule mod txt -> mkSimpleDecorated $ sep [ text "Module" <+> quotes (ppr mod) <> text extra <> colon, nest 2 (vcat (map (ppr . hsDocString . unLoc) msg)) ] where (extra, msg) = case txt of WarningTxt _ _ msg -> ("", msg) DeprecatedTxt _ msg -> (" is deprecated", msg) TcRnRedundantSourceImport mod_name -> mkSimpleDecorated $ text "Unnecessary {-# SOURCE #-} in the import of module" <+> quotes (ppr mod_name) TcRnImportLookup reason -> mkSimpleDecorated $ pprImportLookup reason TcRnUnusedImport decl reason -> mkSimpleDecorated $ pprUnusedImport decl reason TcRnDuplicateDecls name sorted_names -> mkSimpleDecorated $ vcat [text "Multiple declarations of" <+> quotes (ppr name), -- NB. print the OccName, not the Name, because the -- latter might not be in scope in the RdrEnv and so will -- be printed qualified. text "Declared at:" <+> vcat (NE.toList $ ppr . nameSrcLoc <$> sorted_names)] TcRnPackageImportsDisabled -> mkSimpleDecorated $ text "Package-qualified imports are not enabled" TcRnIllegalDataCon name -> mkSimpleDecorated $ hsep [text "Illegal data constructor name", quotes (ppr name)] TcRnNestedForallsContexts entity -> mkSimpleDecorated $ what <+> text "cannot contain nested" <+> quotes forAllLit <> text "s or contexts" where what = case entity of NFC_Specialize -> text "SPECIALISE instance type" NFC_ViaType -> quotes (text "via") <+> text "type" NFC_GadtConSig -> text "GADT constructor type signature" NFC_InstanceHead -> text "Instance head" NFC_StandaloneDerivedInstanceHead -> text "Standalone-derived instance head" NFC_DerivedClassType -> text "Derived class type" TcRnRedundantRecordWildcard -> mkSimpleDecorated $ text "Record wildcard does not bind any new variables" TcRnUnusedRecordWildcard _ -> mkSimpleDecorated $ text "No variables bound in the record wildcard match are used" TcRnUnusedName name reason -> mkSimpleDecorated $ pprUnusedName name reason TcRnQualifiedBinder rdr_name -> mkSimpleDecorated $ text "Qualified name in binding position:" <+> ppr rdr_name TcRnTypeApplicationsDisabled ty_app -> mkSimpleDecorated $ text "Illegal visible" <+> what <+> text "application" <+> ctx <> colon <+> ppr arg where arg = case ty_app of TypeApplication ty _ -> char '@' <> ppr ty TypeApplicationInPattern ty_app -> ppr ty_app what = case ty_app of TypeApplication _ ty_or_ki -> case ty_or_ki of TypeLevel -> text "type" KindLevel -> text "kind" TypeApplicationInPattern _ -> text "type" ctx = case ty_app of TypeApplicationInPattern _ -> text "in a pattern" _ -> empty TcRnInvalidRecordField con field -> mkSimpleDecorated $ hsep [text "Constructor" <+> quotes (ppr con), text "does not have field", quotes (ppr field)] TcRnTupleTooLarge tup_size -> mkSimpleDecorated $ sep [text "A" <+> int tup_size <> text "-tuple is too large for GHC", nest 2 (parens (text "max size is" <+> int mAX_TUPLE_SIZE)), nest 2 (text "Workaround: use nested tuples or define a data type")] TcRnCTupleTooLarge tup_size -> mkSimpleDecorated $ hang (text "Constraint tuple arity too large:" <+> int tup_size <+> parens (text "max arity =" <+> int mAX_CTUPLE_SIZE)) 2 (text "Instead, use a nested tuple") TcRnIllegalInferredTyVars _ -> mkSimpleDecorated $ text "Inferred type variables are not allowed" TcRnAmbiguousName gre_env name gres -> mkSimpleDecorated $ vcat [ text "Ambiguous occurrence" <+> quotes (ppr name) <> dot , text "It could refer to" , nest 3 (vcat msgs) ] where np1 NE.:| nps = gres msgs = punctuateFinal comma dot $ text "either" <+> ppr_gre np1 : [text " or" <+> ppr_gre np | np <- nps] ppr_gre gre = pprAmbiguousGreName gre_env gre TcRnBindingNameConflict name locs -> mkSimpleDecorated $ vcat [text "Conflicting definitions for" <+> quotes (ppr name), locations] where locations = text "Bound at:" <+> vcat (map ppr (sortBy leftmost_smallest (NE.toList locs))) TcRnNonCanonicalDefinition reason inst_ty -> mkSimpleDecorated $ pprNonCanonicalDefinition inst_ty reason TcRnDefaultedExceptionContext ct_loc -> mkSimpleDecorated $ vcat [ header, warning, proposal ] where header, warning, proposal :: SDoc header = vcat [ text "Solving for an implicit ExceptionContext constraint" , nest 2 $ pprCtOrigin (ctLocOrigin ct_loc) <> text "." ] warning = vcat [ text "Future versions of GHC will turn this warning into an error." ] proposal = vcat [ text "See GHC Proposal #330." ] TcRnImplicitImportOfPrelude -> mkSimpleDecorated $ text "Module" <+> quotes (text "Prelude") <+> text "implicitly imported." TcRnMissingMain explicit_export_list main_mod main_occ -> mkSimpleDecorated $ text "The" <+> ppMainFn main_occ <+> text "is not" <+> defOrExp <+> text "module" <+> quotes (ppr main_mod) where defOrExp :: SDoc defOrExp | explicit_export_list = text "exported by" | otherwise = text "defined in" TcRnGhciUnliftedBind id -> mkSimpleDecorated $ sep [ text "GHCi can't bind a variable of unlifted type:" , nest 2 (pprPrefixOcc id <+> dcolon <+> ppr (idType id)) ] TcRnGhciMonadLookupFail ty lookups -> mkSimpleDecorated $ hang (text "Can't find type" <+> pp_ty <> dot $$ ambig_msg) 2 (text "When checking that" <+> pp_ty <> text "is a monad that can execute GHCi statements.") where pp_ty = quotes (text ty) ambig_msg = case lookups of Just (_:_:_) -> text "The type is ambiguous." _ -> empty TcRnIllegalQuasiQuotes -> mkSimpleDecorated $ text "Quasi-quotes are not permitted without QuasiQuotes" TcRnTHError err -> pprTHError err TcRnPatersonCondFailure reason ctxt lhs rhs -> mkSimpleDecorated $ pprPatersonCondFailure reason ctxt lhs rhs TcRnIllegalInvisTyVarBndr bndr -> mkSimpleDecorated $ hang (text "Illegal invisible type variable binder:") 2 (ppr bndr) TcRnIllegalWildcardTyVarBndr bndr -> mkSimpleDecorated $ hang (text "Illegal wildcard binder:") 2 (ppr bndr) TcRnInvalidInvisTyVarBndr name hs_bndr -> mkSimpleDecorated $ vcat [ hang (text "Invalid invisible type variable binder:") 2 (ppr hs_bndr) , text "There is no matching forall-bound variable" , text "in the standalone kind signature for" <+> quotes (ppr name) <> dot , note $ vcat [ "Only" <+> quotes "forall a." <+> "-quantification matches invisible binders,", "whereas" <+> quotes "forall {a}." <+> "and" <+> quotes "forall a ->" <+> "do not" ]] TcRnDeprecatedInvisTyArgInConPat -> mkSimpleDecorated $ cat [ text "Type applications in constructor patterns will require" , text "the TypeAbstractions extension starting from GHC 9.14." ] TcRnInvisBndrWithoutSig _ hs_bndr -> mkSimpleDecorated $ vcat [ hang (text "Invalid invisible type variable binder:") 2 (ppr hs_bndr) , text "Either a standalone kind signature (SAKS)" , text "or a complete user-supplied kind (CUSK, legacy feature)" , text "is required to use invisible binders." ] TcRnImplicitRhsQuantification kv -> mkSimpleDecorated $ vcat [ text "The variable" <+> quotes (ppr kv) <+> text "occurs free on the RHS of the type declaration" , text "In the future GHC will no longer implicitly quantify over such variables" ] TcRnInvalidDefaultedTyVar wanteds proposal bad_tvs -> mkSimpleDecorated $ pprWithInvisibleBitsWhen True $ vcat [ text "Invalid defaulting proposal." , hang (text "The following type variable" <> plural (NE.toList bad_tvs) <+> text "cannot be defaulted, as" <+> why <> colon) 2 (pprQuotedList (NE.toList bad_tvs)) , hang (text "Defaulting proposal:") 2 (ppr proposal) , hang (text "Wanted constraints:") 2 (pprQuotedList (map ctPred wanteds)) ] where why | _ :| [] <- bad_tvs = text "it is not an unfilled metavariable" | otherwise = text "they are not unfilled metavariables" TcRnNamespacedWarningPragmaWithoutFlag warning@(Warning (kw, _) _ txt) -> mkSimpleDecorated $ vcat [ text "Illegal use of the" <+> quotes (ppr kw) <+> text "keyword:" , nest 2 (ppr warning) , text "in a" <+> pragma_type <+> text "pragma" ] where pragma_type = case txt of WarningTxt{} -> text "WARNING" DeprecatedTxt{} -> text "DEPRECATED" TcRnIllegalInvisibleTypePattern tp -> mkSimpleDecorated $ text "Illegal invisible type pattern:" <+> ppr tp TcRnInvisPatWithNoForAll tp -> mkSimpleDecorated $ text "Invisible type pattern" <+> ppr tp <+> text "has no associated forall" TcRnNamespacedFixitySigWithoutFlag sig@(FixitySig kw _ _) -> mkSimpleDecorated $ vcat [ text "Illegal use of the" <+> quotes (ppr kw) <+> text "keyword:" , nest 2 (ppr sig) , text "in a fixity signature" ] TcRnOutOfArityTyVar ts_name tv_name -> mkDecorated [ vcat [ text "The arity of" <+> quotes (ppr ts_name) <+> text "is insufficiently high to accommodate" , text "an implicit binding for the" <+> quotes (ppr tv_name) <+> text "type variable." ] , suggestion ] where suggestion = text "Use" <+> quotes at_bndr <+> text "on the LHS" <+> text "or" <+> quotes forall_bndr <+> text "on the RHS" <+> text "to bring it into scope." at_bndr = char '@' <> ppr tv_name forall_bndr = text "forall" <+> ppr tv_name <> text "." TcRnMisplacedInvisPat tp -> mkSimpleDecorated $ text "Invisible type pattern" <+> ppr tp <+> text "is not allowed here" TcRnUnexpectedTypeSyntaxInTerms syntax -> mkSimpleDecorated $ text "Unexpected" <+> pprTypeSyntaxName syntax diagnosticReason :: TcRnMessage -> DiagnosticReason diagnosticReason = \case TcRnUnknownMessage m -> diagnosticReason m TcRnMessageWithInfo _ msg_with_info -> case msg_with_info of TcRnMessageDetailed _ m -> diagnosticReason m TcRnWithHsDocContext _ msg -> diagnosticReason msg TcRnSolverReport _ reason -> reason -- Error, or a Warning if we are deferring type errors TcRnSolverDepthError {} -> ErrorWithoutFlag TcRnRedundantConstraints {} -> WarningWithFlag Opt_WarnRedundantConstraints TcRnInaccessibleCode {} -> WarningWithFlag Opt_WarnInaccessibleCode TcRnInaccessibleCoAxBranch {} -> WarningWithFlag Opt_WarnInaccessibleCode TcRnTypeDoesNotHaveFixedRuntimeRep{} -> ErrorWithoutFlag TcRnImplicitLift{} -> WarningWithFlag Opt_WarnImplicitLift TcRnUnusedPatternBinds{} -> WarningWithFlag Opt_WarnUnusedPatternBinds TcRnDodgyImports{} -> WarningWithFlag Opt_WarnDodgyImports TcRnDodgyExports{} -> WarningWithFlag Opt_WarnDodgyExports TcRnMissingImportList{} -> WarningWithFlag Opt_WarnMissingImportList TcRnUnsafeDueToPlugin{} -> WarningWithoutFlag TcRnModMissingRealSrcSpan{} -> ErrorWithoutFlag TcRnIdNotExportedFromModuleSig{} -> ErrorWithoutFlag TcRnIdNotExportedFromLocalSig{} -> ErrorWithoutFlag TcRnShadowedName{} -> WarningWithFlag Opt_WarnNameShadowing TcRnInvalidWarningCategory{} -> ErrorWithoutFlag TcRnDuplicateWarningDecls{} -> ErrorWithoutFlag TcRnSimplifierTooManyIterations{} -> ErrorWithoutFlag TcRnIllegalPatSynDecl{} -> ErrorWithoutFlag TcRnLinearPatSyn{} -> ErrorWithoutFlag TcRnEmptyRecordUpdate -> ErrorWithoutFlag TcRnIllegalFieldPunning{} -> ErrorWithoutFlag TcRnIllegalWildcardsInRecord{} -> ErrorWithoutFlag TcRnIllegalWildcardInType{} -> ErrorWithoutFlag TcRnIllegalNamedWildcardInTypeArgument{} -> ErrorWithoutFlag TcRnIllegalImplicitTyVarInTypeArgument{} -> ErrorWithoutFlag TcRnDuplicateFieldName{} -> ErrorWithoutFlag TcRnIllegalViewPattern{} -> ErrorWithoutFlag TcRnCharLiteralOutOfRange{} -> ErrorWithoutFlag TcRnIllegalWildcardsInConstructor{} -> ErrorWithoutFlag TcRnIgnoringAnnotations{} -> WarningWithoutFlag TcRnAnnotationInSafeHaskell -> ErrorWithoutFlag TcRnInvalidTypeApplication{} -> ErrorWithoutFlag TcRnTagToEnumMissingValArg -> ErrorWithoutFlag TcRnTagToEnumUnspecifiedResTy{} -> ErrorWithoutFlag TcRnTagToEnumResTyNotAnEnum{} -> ErrorWithoutFlag TcRnTagToEnumResTyTypeData{} -> ErrorWithoutFlag TcRnArrowIfThenElsePredDependsOnResultTy -> ErrorWithoutFlag TcRnIllegalHsBootOrSigDecl {} -> ErrorWithoutFlag TcRnBootMismatch {} -> ErrorWithoutFlag TcRnRecursivePatternSynonym{} -> ErrorWithoutFlag TcRnPartialTypeSigTyVarMismatch{} -> ErrorWithoutFlag TcRnPartialTypeSigBadQuantifier{} -> ErrorWithoutFlag TcRnMissingSignature what exported -> WarningWithFlags $ missingSignatureWarningFlags what exported TcRnPolymorphicBinderMissingSig{} -> WarningWithFlag Opt_WarnMissingLocalSignatures TcRnOverloadedSig{} -> ErrorWithoutFlag TcRnTupleConstraintInst{} -> ErrorWithoutFlag TcRnUserTypeError{} -> ErrorWithoutFlag TcRnConstraintInKind{} -> ErrorWithoutFlag TcRnUnboxedTupleOrSumTypeFuncArg{} -> ErrorWithoutFlag TcRnLinearFuncInKind{} -> ErrorWithoutFlag TcRnForAllEscapeError{} -> ErrorWithoutFlag TcRnSimplifiableConstraint{} -> WarningWithFlag Opt_WarnSimplifiableClassConstraints TcRnArityMismatch{} -> ErrorWithoutFlag TcRnIllegalInstance rea -> illegalInstanceReason rea TcRnVDQInTermType{} -> ErrorWithoutFlag TcRnBadQuantPredHead{} -> ErrorWithoutFlag TcRnIllegalTupleConstraint{} -> ErrorWithoutFlag TcRnNonTypeVarArgInConstraint{} -> ErrorWithoutFlag TcRnIllegalImplicitParam{} -> ErrorWithoutFlag TcRnIllegalConstraintSynonymOfKind{} -> ErrorWithoutFlag TcRnOversaturatedVisibleKindArg{} -> ErrorWithoutFlag TcRnForAllRankErr{} -> ErrorWithoutFlag TcRnMonomorphicBindings{} -> WarningWithFlag Opt_WarnMonomorphism TcRnOrphanInstance{} -> WarningWithFlag Opt_WarnOrphans TcRnFunDepConflict{} -> ErrorWithoutFlag TcRnDupInstanceDecls{} -> ErrorWithoutFlag TcRnConflictingFamInstDecls{} -> ErrorWithoutFlag TcRnFamInstNotInjective{} -> ErrorWithoutFlag TcRnBangOnUnliftedType{} -> WarningWithFlag Opt_WarnRedundantStrictnessFlags TcRnLazyBangOnUnliftedType{} -> WarningWithFlag Opt_WarnRedundantStrictnessFlags TcRnMultipleDefaultDeclarations{} -> ErrorWithoutFlag TcRnBadDefaultType{} -> ErrorWithoutFlag TcRnPatSynBundledWithNonDataCon{} -> ErrorWithoutFlag TcRnPatSynBundledWithWrongType{} -> ErrorWithoutFlag TcRnDupeModuleExport{} -> WarningWithFlag Opt_WarnDuplicateExports TcRnExportedModNotImported{} -> ErrorWithoutFlag TcRnNullExportedModule{} -> WarningWithFlag Opt_WarnDodgyExports TcRnMissingExportList{} -> WarningWithFlag Opt_WarnMissingExportList TcRnExportHiddenComponents{} -> ErrorWithoutFlag TcRnExportHiddenDefault{} -> ErrorWithoutFlag TcRnDuplicateExport{} -> WarningWithFlag Opt_WarnDuplicateExports TcRnExportedParentChildMismatch{} -> ErrorWithoutFlag TcRnConflictingExports{} -> ErrorWithoutFlag TcRnDuplicateFieldExport {} -> ErrorWithoutFlag TcRnAmbiguousFieldInUpdate {} -> ErrorWithoutFlag TcRnAmbiguousRecordUpdate{} -> WarningWithFlag Opt_WarnAmbiguousFields TcRnMissingFields{} -> WarningWithFlag Opt_WarnMissingFields TcRnFieldUpdateInvalidType{} -> ErrorWithoutFlag TcRnMissingStrictFields{} -> ErrorWithoutFlag TcRnBadRecordUpdate{} -> ErrorWithoutFlag TcRnIllegalStaticExpression {} -> ErrorWithoutFlag TcRnStaticFormNotClosed{} -> ErrorWithoutFlag TcRnUselessTypeable -> WarningWithFlag Opt_WarnDerivingTypeable TcRnDerivingDefaults{} -> WarningWithFlag Opt_WarnDerivingDefaults TcRnNonUnaryTypeclassConstraint{} -> ErrorWithoutFlag TcRnPartialTypeSignatures{} -> WarningWithFlag Opt_WarnPartialTypeSignatures TcRnCannotDeriveInstance _ _ _ _ rea -> case rea of DerivErrNotWellKinded{} -> ErrorWithoutFlag DerivErrSafeHaskellGenericInst -> ErrorWithoutFlag DerivErrDerivingViaWrongKind{} -> ErrorWithoutFlag DerivErrNoEtaReduce{} -> ErrorWithoutFlag DerivErrBootFileFound -> ErrorWithoutFlag DerivErrDataConsNotAllInScope{} -> ErrorWithoutFlag DerivErrGNDUsedOnData -> ErrorWithoutFlag DerivErrNullaryClasses -> ErrorWithoutFlag DerivErrLastArgMustBeApp -> ErrorWithoutFlag DerivErrNoFamilyInstance{} -> ErrorWithoutFlag DerivErrNotStockDeriveable{} -> ErrorWithoutFlag DerivErrHasAssociatedDatatypes{} -> ErrorWithoutFlag DerivErrNewtypeNonDeriveableClass -> ErrorWithoutFlag DerivErrCannotEtaReduceEnough{} -> ErrorWithoutFlag DerivErrOnlyAnyClassDeriveable{} -> ErrorWithoutFlag DerivErrNotDeriveable{} -> ErrorWithoutFlag DerivErrNotAClass{} -> ErrorWithoutFlag DerivErrNoConstructors{} -> ErrorWithoutFlag DerivErrLangExtRequired{} -> ErrorWithoutFlag DerivErrDunnoHowToDeriveForType{} -> ErrorWithoutFlag DerivErrMustBeEnumType{} -> ErrorWithoutFlag DerivErrMustHaveExactlyOneConstructor{} -> ErrorWithoutFlag DerivErrMustHaveSomeParameters{} -> ErrorWithoutFlag DerivErrMustNotHaveClassContext{} -> ErrorWithoutFlag DerivErrBadConstructor{} -> ErrorWithoutFlag DerivErrGenerics{} -> ErrorWithoutFlag DerivErrEnumOrProduct{} -> ErrorWithoutFlag TcRnLookupInstance _ _ _ -> ErrorWithoutFlag TcRnLazyGADTPattern -> ErrorWithoutFlag TcRnArrowProcGADTPattern -> ErrorWithoutFlag TcRnTypeEqualityOutOfScope -> WarningWithFlag Opt_WarnTypeEqualityOutOfScope TcRnTypeEqualityRequiresOperators -> WarningWithFlag Opt_WarnTypeEqualityRequiresOperators TcRnIllegalTypeOperator {} -> ErrorWithoutFlag TcRnIllegalTypeOperatorDecl {} -> ErrorWithoutFlag TcRnGADTMonoLocalBinds {} -> WarningWithFlag Opt_WarnGADTMonoLocalBinds TcRnIncorrectNameSpace {} -> ErrorWithoutFlag TcRnNotInScope {} -> ErrorWithoutFlag TcRnTermNameInType {} -> ErrorWithoutFlag TcRnUntickedPromotedThing {} -> WarningWithFlag Opt_WarnUntickedPromotedConstructors TcRnIllegalBuiltinSyntax {} -> ErrorWithoutFlag TcRnWarnDefaulting {} -> WarningWithFlag Opt_WarnTypeDefaults TcRnWarnClashingDefaultImports {} -> WarningWithFlag Opt_WarnTypeDefaults TcRnForeignImportPrimExtNotSet{} -> ErrorWithoutFlag TcRnForeignImportPrimSafeAnn{} -> ErrorWithoutFlag TcRnForeignFunctionImportAsValue{} -> ErrorWithoutFlag TcRnFunPtrImportWithoutAmpersand{} -> WarningWithFlag Opt_WarnDodgyForeignImports TcRnIllegalForeignDeclBackend{} -> ErrorWithoutFlag TcRnUnsupportedCallConv _ unsupportedCC -> case unsupportedCC of StdCallConvUnsupported -> WarningWithFlag Opt_WarnUnsupportedCallingConventions _ -> ErrorWithoutFlag TcRnIllegalForeignType{} -> ErrorWithoutFlag TcRnInvalidCIdentifier{} -> ErrorWithoutFlag TcRnExpectedValueId{} -> ErrorWithoutFlag TcRnRecSelectorEscapedTyVar{} -> ErrorWithoutFlag TcRnPatSynNotBidirectional{} -> ErrorWithoutFlag TcRnIllegalDerivingItem{} -> ErrorWithoutFlag TcRnIllegalDefaultClass{} -> ErrorWithoutFlag TcRnIllegalNamedDefault{} -> ErrorWithoutFlag TcRnUnexpectedAnnotation{} -> ErrorWithoutFlag TcRnIllegalRecordSyntax{} -> ErrorWithoutFlag TcRnInvalidVisibleKindArgument{} -> ErrorWithoutFlag TcRnTooManyBinders{} -> ErrorWithoutFlag TcRnDifferentNamesForTyVar{} -> ErrorWithoutFlag TcRnDisconnectedTyVar{} -> ErrorWithoutFlag TcRnInvalidReturnKind{} -> ErrorWithoutFlag TcRnClassKindNotConstraint{} -> ErrorWithoutFlag TcRnUnpromotableThing{} -> ErrorWithoutFlag TcRnIllegalTermLevelUse{} -> ErrorWithoutFlag TcRnMatchesHaveDiffNumArgs{} -> ErrorWithoutFlag TcRnCannotBindScopedTyVarInPatSig{} -> ErrorWithoutFlag TcRnCannotBindTyVarsInPatBind{} -> ErrorWithoutFlag TcRnTooManyTyArgsInConPattern{} -> ErrorWithoutFlag TcRnMultipleInlinePragmas{} -> WarningWithoutFlag TcRnUnexpectedPragmas{} -> WarningWithoutFlag TcRnNonOverloadedSpecialisePragma{} -> WarningWithoutFlag TcRnSpecialiseNotVisible{} -> WarningWithoutFlag TcRnPragmaWarning{pragma_warning_msg} -> WarningWithCategory (warningTxtCategory pragma_warning_msg) TcRnDifferentExportWarnings _ _ -> ErrorWithoutFlag TcRnIncompleteExportWarnings _ _ -> WarningWithFlag Opt_WarnIncompleteExportWarnings TcRnIllegalHsigDefaultMethods{} -> ErrorWithoutFlag TcRnHsigFixityMismatch{} -> ErrorWithoutFlag TcRnHsigShapeMismatch{} -> ErrorWithoutFlag TcRnHsigMissingModuleExport{} -> ErrorWithoutFlag TcRnBadGenericMethod{} -> ErrorWithoutFlag TcRnWarningMinimalDefIncomplete{} -> WarningWithoutFlag TcRnDefaultMethodForPragmaLacksBinding{} -> ErrorWithoutFlag TcRnIgnoreSpecialisePragmaOnDefMethod{} -> WarningWithoutFlag TcRnBadMethodErr{} -> ErrorWithoutFlag TcRnIllegalTypeData -> ErrorWithoutFlag TcRnIllegalQuasiQuotes{} -> ErrorWithoutFlag TcRnTHError err -> thErrorReason err TcRnTypeDataForbids{} -> ErrorWithoutFlag TcRnIllegalNewtype{} -> ErrorWithoutFlag TcRnOrPatBindsVariables{} -> ErrorWithoutFlag TcRnUnsatisfiedMinimalDef{} -> WarningWithFlag (Opt_WarnMissingMethods) TcRnMisplacedInstSig{} -> ErrorWithoutFlag TcRnNoRebindableSyntaxRecordDot{} -> ErrorWithoutFlag TcRnNoFieldPunsRecordDot{} -> ErrorWithoutFlag TcRnListComprehensionDuplicateBinding{} -> ErrorWithoutFlag TcRnEmptyStmtsGroup{} -> ErrorWithoutFlag TcRnLastStmtNotExpr{} -> ErrorWithoutFlag TcRnUnexpectedStatementInContext{} -> ErrorWithoutFlag TcRnSectionWithoutParentheses{} -> ErrorWithoutFlag TcRnIllegalImplicitParameterBindings{} -> ErrorWithoutFlag TcRnIllegalTupleSection{} -> ErrorWithoutFlag TcRnCapturedTermName{} -> WarningWithFlag Opt_WarnTermVariableCapture TcRnBindingOfExistingName{} -> ErrorWithoutFlag TcRnMultipleFixityDecls{} -> ErrorWithoutFlag TcRnIllegalPatternSynonymDecl{} -> ErrorWithoutFlag TcRnIllegalClassBinding{} -> ErrorWithoutFlag TcRnOrphanCompletePragma{} -> ErrorWithoutFlag TcRnEmptyCase{} -> ErrorWithoutFlag TcRnNonStdGuards{} -> WarningWithoutFlag TcRnDuplicateSigDecl{} -> ErrorWithoutFlag TcRnMisplacedSigDecl{} -> ErrorWithoutFlag TcRnUnexpectedDefaultSig{} -> ErrorWithoutFlag TcRnDuplicateMinimalSig{} -> ErrorWithoutFlag TcRnUnexpectedStandaloneDerivingDecl{} -> ErrorWithoutFlag TcRnUnusedVariableInRuleDecl{} -> ErrorWithoutFlag TcRnUnexpectedStandaloneKindSig{} -> ErrorWithoutFlag TcRnIllegalRuleLhs{} -> ErrorWithoutFlag TcRnDuplicateRoleAnnot{} -> ErrorWithoutFlag TcRnDuplicateKindSig{} -> ErrorWithoutFlag TcRnIllegalDerivStrategy{} -> ErrorWithoutFlag TcRnIllegalMultipleDerivClauses{} -> ErrorWithoutFlag TcRnNoDerivStratSpecified{} -> WarningWithFlag Opt_WarnMissingDerivingStrategies TcRnStupidThetaInGadt{} -> ErrorWithoutFlag TcRnShadowedTyVarNameInFamResult{} -> ErrorWithoutFlag TcRnIncorrectTyVarOnLhsOfInjCond{} -> ErrorWithoutFlag TcRnUnknownTyVarsOnRhsOfInjCond{} -> ErrorWithoutFlag TcRnBadlyStaged{} -> ErrorWithoutFlag TcRnBadlyStagedType{} -> WarningWithFlag Opt_WarnBadlyStagedTypes TcRnStageRestriction{} -> ErrorWithoutFlag TcRnTyThingUsedWrong{} -> ErrorWithoutFlag TcRnCannotDefaultKindVar{} -> ErrorWithoutFlag TcRnUninferrableTyVar{} -> ErrorWithoutFlag TcRnSkolemEscape{} -> ErrorWithoutFlag TcRnPatSynEscapedCoercion{} -> ErrorWithoutFlag TcRnPatSynExistentialInResult{} -> ErrorWithoutFlag TcRnPatSynArityMismatch{} -> ErrorWithoutFlag TcRnPatSynInvalidRhs{} -> ErrorWithoutFlag TcRnTyFamDepsDisabled{} -> ErrorWithoutFlag TcRnAbstractClosedTyFamDecl{} -> ErrorWithoutFlag TcRnPartialFieldSelector{} -> WarningWithFlag Opt_WarnPartialFields TcRnHasFieldResolvedIncomplete{} -> WarningWithFlag Opt_WarnIncompleteRecordSelectors TcRnBadFieldAnnotation _ _ LazyFieldsDisabled -> ErrorWithoutFlag TcRnBadFieldAnnotation{} -> WarningWithoutFlag TcRnSuperclassCycle{} -> ErrorWithoutFlag TcRnDefaultSigMismatch{} -> ErrorWithoutFlag TcRnTyFamsDisabled{} -> ErrorWithoutFlag TcRnBadTyConTelescope {} -> ErrorWithoutFlag TcRnTyFamResultDisabled{} -> ErrorWithoutFlag TcRnRoleValidationFailed{} -> ErrorWithoutFlag TcRnCommonFieldResultTypeMismatch{} -> ErrorWithoutFlag TcRnCommonFieldTypeMismatch{} -> ErrorWithoutFlag TcRnClassExtensionDisabled{} -> ErrorWithoutFlag TcRnDataConParentTypeMismatch{} -> ErrorWithoutFlag TcRnGADTsDisabled{} -> ErrorWithoutFlag TcRnExistentialQuantificationDisabled{} -> ErrorWithoutFlag TcRnGADTDataContext{} -> ErrorWithoutFlag TcRnMultipleConForNewtype{} -> ErrorWithoutFlag TcRnKindSignaturesDisabled{} -> ErrorWithoutFlag TcRnEmptyDataDeclsDisabled{} -> ErrorWithoutFlag TcRnRoleMismatch{} -> ErrorWithoutFlag TcRnRoleCountMismatch{} -> ErrorWithoutFlag TcRnIllegalRoleAnnotation{} -> ErrorWithoutFlag TcRnRoleAnnotationsDisabled{} -> ErrorWithoutFlag TcRnIncoherentRoles{} -> ErrorWithoutFlag TcRnUnexpectedKindVar{} -> ErrorWithoutFlag TcRnNegativeNumTypeLiteral{} -> ErrorWithoutFlag TcRnIllegalKind{} -> ErrorWithoutFlag TcRnPrecedenceParsingError{} -> ErrorWithoutFlag TcRnSectionPrecedenceError{} -> ErrorWithoutFlag TcRnUnexpectedPatSigType{} -> ErrorWithoutFlag TcRnIllegalKindSignature{} -> ErrorWithoutFlag TcRnUnusedQuantifiedTypeVar{} -> WarningWithFlag Opt_WarnUnusedForalls TcRnDataKindsError _ thing -- DataKinds errors can arise from either the renamer (Left) or the -- typechecker (Right). The latter category of DataKinds errors are a -- fairly recent addition to GHC (introduced in GHC 9.10), and in order -- to prevent these new errors from breaking users' code, we temporarily -- downgrade these errors to warnings. See Note [Checking for DataKinds] -- (Wrinkle: Migration story for DataKinds typechecker errors) -- in GHC.Tc.Validity. -> case thing of Left _ -> ErrorWithoutFlag Right _ -> WarningWithFlag Opt_WarnDataKindsTC TcRnTypeSynonymCycle{} -> ErrorWithoutFlag TcRnZonkerMessage msg -> zonkerMessageReason msg TcRnInterfaceError err -> interfaceErrorReason err TcRnSelfImport{} -> ErrorWithoutFlag TcRnNoExplicitImportList{} -> WarningWithFlag Opt_WarnMissingImportList TcRnSafeImportsDisabled{} -> ErrorWithoutFlag TcRnDeprecatedModule _ txt -> WarningWithCategory (warningTxtCategory txt) TcRnRedundantSourceImport{} -> WarningWithoutFlag TcRnImportLookup{} -> ErrorWithoutFlag TcRnUnusedImport{} -> WarningWithFlag Opt_WarnUnusedImports TcRnDuplicateDecls{} -> ErrorWithoutFlag TcRnPackageImportsDisabled -> ErrorWithoutFlag TcRnIllegalDataCon{} -> ErrorWithoutFlag TcRnNestedForallsContexts{} -> ErrorWithoutFlag TcRnRedundantRecordWildcard -> WarningWithFlag Opt_WarnRedundantRecordWildcards TcRnUnusedRecordWildcard{} -> WarningWithFlag Opt_WarnUnusedRecordWildcards TcRnUnusedName _ prov -> WarningWithFlag $ case prov of UnusedNameTopDecl -> Opt_WarnUnusedTopBinds UnusedNameImported _ -> Opt_WarnUnusedTopBinds UnusedNameTypePattern -> Opt_WarnUnusedTypePatterns UnusedNameMatch -> Opt_WarnUnusedMatches UnusedNameLocalBind -> Opt_WarnUnusedLocalBinds TcRnQualifiedBinder{} -> ErrorWithoutFlag TcRnTypeApplicationsDisabled{} -> ErrorWithoutFlag TcRnInvalidRecordField{} -> ErrorWithoutFlag TcRnTupleTooLarge{} -> ErrorWithoutFlag TcRnCTupleTooLarge{} -> ErrorWithoutFlag TcRnIllegalInferredTyVars{} -> ErrorWithoutFlag TcRnAmbiguousName{} -> ErrorWithoutFlag TcRnBindingNameConflict{} -> ErrorWithoutFlag TcRnNonCanonicalDefinition (NonCanonicalMonoid _) _ -> WarningWithFlag Opt_WarnNonCanonicalMonoidInstances TcRnNonCanonicalDefinition (NonCanonicalMonad _) _ -> WarningWithFlag Opt_WarnNonCanonicalMonadInstances TcRnDefaultedExceptionContext{} -> WarningWithFlag Opt_WarnDefaultedExceptionContext TcRnImplicitImportOfPrelude {} -> WarningWithFlag Opt_WarnImplicitPrelude TcRnMissingMain {} -> ErrorWithoutFlag TcRnGhciUnliftedBind {} -> ErrorWithoutFlag TcRnGhciMonadLookupFail {} -> ErrorWithoutFlag TcRnMissingRoleAnnotation{} -> WarningWithFlag Opt_WarnMissingRoleAnnotations TcRnIllegalInvisTyVarBndr{} -> ErrorWithoutFlag TcRnIllegalWildcardTyVarBndr{} -> ErrorWithoutFlag TcRnDeprecatedInvisTyArgInConPat {} -> WarningWithFlag Opt_WarnDeprecatedTypeAbstractions TcRnInvalidInvisTyVarBndr{} -> ErrorWithoutFlag TcRnInvisBndrWithoutSig{} -> ErrorWithoutFlag TcRnImplicitRhsQuantification{} -> WarningWithFlag Opt_WarnImplicitRhsQuantification TcRnPatersonCondFailure{} -> ErrorWithoutFlag TcRnIllformedTypePattern{} -> ErrorWithoutFlag TcRnIllegalTypePattern{} -> ErrorWithoutFlag TcRnIllformedTypeArgument{} -> ErrorWithoutFlag TcRnIllegalTypeExpr{} -> ErrorWithoutFlag TcRnInvalidDefaultedTyVar{} -> ErrorWithoutFlag TcRnNamespacedWarningPragmaWithoutFlag{} -> ErrorWithoutFlag TcRnIllegalInvisibleTypePattern{} -> ErrorWithoutFlag TcRnInvisPatWithNoForAll{} -> ErrorWithoutFlag TcRnNamespacedFixitySigWithoutFlag{} -> ErrorWithoutFlag TcRnOutOfArityTyVar{} -> ErrorWithoutFlag TcRnMisplacedInvisPat{} -> ErrorWithoutFlag TcRnUnexpectedTypeSyntaxInTerms{} -> ErrorWithoutFlag diagnosticHints = \case TcRnUnknownMessage m -> diagnosticHints m TcRnMessageWithInfo _ msg_with_info -> case msg_with_info of TcRnMessageDetailed _ m -> diagnosticHints m TcRnWithHsDocContext _ msg -> diagnosticHints msg TcRnSolverReport (SolverReportWithCtxt ctxt msg) _ -> tcSolverReportMsgHints ctxt msg TcRnSolverDepthError {} -> [SuggestIncreaseReductionDepth] TcRnRedundantConstraints{} -> noHints TcRnInaccessibleCode{} -> noHints TcRnInaccessibleCoAxBranch{} -> noHints TcRnTypeDoesNotHaveFixedRuntimeRep{} -> noHints TcRnImplicitLift{} -> noHints TcRnUnusedPatternBinds{} -> noHints TcRnDodgyImports{} -> noHints TcRnDodgyExports{} -> noHints TcRnMissingImportList{} -> noHints TcRnUnsafeDueToPlugin{} -> noHints TcRnModMissingRealSrcSpan{} -> noHints TcRnIdNotExportedFromModuleSig name mod -> [SuggestAddToHSigExportList name $ Just mod] TcRnIdNotExportedFromLocalSig name -> [SuggestAddToHSigExportList name Nothing] TcRnShadowedName{} -> noHints TcRnInvalidWarningCategory{} -> noHints TcRnDuplicateWarningDecls{} -> noHints TcRnSimplifierTooManyIterations{} -> [SuggestIncreaseSimplifierIterations] TcRnIllegalPatSynDecl{} -> noHints TcRnLinearPatSyn{} -> noHints TcRnEmptyRecordUpdate{} -> noHints TcRnIllegalFieldPunning{} -> [suggestExtension LangExt.NamedFieldPuns] TcRnIllegalWildcardsInRecord{} -> [suggestExtension LangExt.RecordWildCards] TcRnIllegalWildcardInType{} -> noHints TcRnIllegalNamedWildcardInTypeArgument{} -> [SuggestAnonymousWildcard] TcRnIllegalImplicitTyVarInTypeArgument tv -> [SuggestExplicitQuantification tv] TcRnDuplicateFieldName{} -> noHints TcRnIllegalViewPattern{} -> [suggestExtension LangExt.ViewPatterns] TcRnCharLiteralOutOfRange{} -> noHints TcRnIllegalWildcardsInConstructor{} -> noHints TcRnIgnoringAnnotations{} -> noHints TcRnAnnotationInSafeHaskell -> noHints TcRnInvalidTypeApplication{} -> noHints TcRnTagToEnumMissingValArg -> noHints TcRnTagToEnumUnspecifiedResTy{} -> noHints TcRnTagToEnumResTyNotAnEnum{} -> noHints TcRnTagToEnumResTyTypeData{} -> noHints TcRnArrowIfThenElsePredDependsOnResultTy -> noHints TcRnIllegalHsBootOrSigDecl {} -> noHints TcRnBootMismatch boot_or_sig err | Hsig <- boot_or_sig , BootMismatch _ _ (BootMismatchedTyCons _boot_tc real_tc tc_errs) <- err , any is_synAbsData_etaReduce (NE.toList tc_errs) -> [SuggestEtaReduceAbsDataTySyn real_tc] | otherwise -> noHints where is_synAbsData_etaReduce (SynAbstractData SynAbsDataTySynNotNullary) = True is_synAbsData_etaReduce _ = False TcRnRecursivePatternSynonym{} -> noHints TcRnPartialTypeSigTyVarMismatch{} -> noHints TcRnPartialTypeSigBadQuantifier{} -> noHints TcRnMissingSignature {} -> noHints TcRnPolymorphicBinderMissingSig{} -> noHints TcRnOverloadedSig{} -> noHints TcRnTupleConstraintInst{} -> noHints TcRnUserTypeError{} -> noHints TcRnConstraintInKind{} -> noHints TcRnUnboxedTupleOrSumTypeFuncArg tuple_or_sum _ -> [suggestExtension $ unboxedTupleOrSumExtension tuple_or_sum] TcRnLinearFuncInKind{} -> noHints TcRnForAllEscapeError{} -> noHints TcRnVDQInTermType mb_ty | isJust mb_ty -> [suggestExtension LangExt.RequiredTypeArguments] | otherwise -> [] TcRnBadQuantPredHead{} -> noHints TcRnIllegalTupleConstraint{} -> [suggestExtension LangExt.ConstraintKinds] TcRnNonTypeVarArgInConstraint{} -> [suggestExtension LangExt.FlexibleContexts] TcRnIllegalImplicitParam{} -> noHints TcRnIllegalConstraintSynonymOfKind{} -> [suggestExtension LangExt.ConstraintKinds] TcRnOversaturatedVisibleKindArg{} -> noHints TcRnForAllRankErr rank _ -> case rank of LimitedRank{} -> [suggestExtension LangExt.RankNTypes] MonoTypeRankZero -> [suggestExtension LangExt.RankNTypes] MonoTypeTyConArg -> [suggestExtension LangExt.ImpredicativeTypes] MonoTypeSynArg -> [suggestExtension LangExt.LiberalTypeSynonyms] MonoTypeConstraint -> [suggestExtension LangExt.QuantifiedConstraints] _ -> noHints TcRnSimplifiableConstraint{} -> noHints TcRnArityMismatch{} -> noHints TcRnIllegalInstance rea -> illegalInstanceHints rea TcRnMonomorphicBindings bindings -> case bindings of [] -> noHints (x:xs) -> [SuggestAddTypeSignatures $ NamedBindings (x NE.:| xs)] TcRnOrphanInstance clsOrFamInst -> [SuggestFixOrphanInst { isFamilyInstance = isFam }] where isFam = case clsOrFamInst :: Either ClsInst FamInst of Left _clsInst -> Nothing Right famInst -> Just $ fi_flavor famInst TcRnFunDepConflict{} -> noHints TcRnDupInstanceDecls{} -> noHints TcRnConflictingFamInstDecls{} -> noHints TcRnFamInstNotInjective rea _ _ -> case rea of InjErrRhsBareTyVar{} -> noHints InjErrRhsCannotBeATypeFam -> noHints InjErrRhsOverlap -> noHints InjErrCannotInferFromRhs _ _ suggestUndInst | YesSuggestUndecidableInstaces <- suggestUndInst -> [suggestExtension LangExt.UndecidableInstances] | otherwise -> noHints TcRnBangOnUnliftedType{} -> noHints TcRnLazyBangOnUnliftedType{} -> noHints TcRnMultipleDefaultDeclarations{} -> noHints TcRnBadDefaultType{} -> noHints TcRnPatSynBundledWithNonDataCon{} -> noHints TcRnPatSynBundledWithWrongType{} -> noHints TcRnDupeModuleExport{} -> noHints TcRnExportedModNotImported{} -> noHints TcRnNullExportedModule{} -> noHints TcRnMissingExportList{} -> noHints TcRnExportHiddenComponents{} -> noHints TcRnExportHiddenDefault{} -> noHints TcRnDuplicateExport{} -> noHints TcRnExportedParentChildMismatch{} -> noHints TcRnConflictingExports{} -> noHints TcRnDuplicateFieldExport {} -> [suggestExtension LangExt.DuplicateRecordFields] TcRnAmbiguousFieldInUpdate {} -> [suggestExtension LangExt.DisambiguateRecordFields] TcRnAmbiguousRecordUpdate{} -> noHints TcRnMissingFields{} -> noHints TcRnFieldUpdateInvalidType{} -> noHints TcRnMissingStrictFields{} -> noHints TcRnBadRecordUpdate{} -> noHints TcRnIllegalStaticExpression {} -> [suggestExtension LangExt.StaticPointers] TcRnStaticFormNotClosed{} -> noHints TcRnUselessTypeable -> noHints TcRnDerivingDefaults{} -> [useDerivingStrategies] TcRnNonUnaryTypeclassConstraint{} -> noHints TcRnPartialTypeSignatures suggestParSig _ -> case suggestParSig of YesSuggestPartialTypeSignatures -> let info = text "to use the inferred type" in [suggestExtensionWithInfo info LangExt.PartialTypeSignatures] NoSuggestPartialTypeSignatures -> noHints TcRnCannotDeriveInstance cls _ _ newtype_deriving rea -> deriveInstanceErrReasonHints cls newtype_deriving rea TcRnLookupInstance _ _ _ -> noHints TcRnLazyGADTPattern -> noHints TcRnArrowProcGADTPattern -> noHints TcRnTypeEqualityOutOfScope -> noHints TcRnTypeEqualityRequiresOperators -> [suggestExtension LangExt.TypeOperators] TcRnIllegalTypeOperator {} -> [suggestExtension LangExt.TypeOperators] TcRnIllegalTypeOperatorDecl {} -> [suggestExtension LangExt.TypeOperators] TcRnGADTMonoLocalBinds {} -> [suggestAnyExtension [LangExt.GADTs, LangExt.TypeFamilies]] TcRnIncorrectNameSpace nm is_th_use | is_th_use -> [SuggestAppropriateTHTick $ nameNameSpace nm] | otherwise -> noHints TcRnNotInScope err _ _ hints -> scopeErrorHints err ++ hints TcRnTermNameInType _ hints -> hints TcRnUntickedPromotedThing thing -> [SuggestAddTick thing] TcRnIllegalBuiltinSyntax {} -> noHints TcRnWarnDefaulting {} -> noHints TcRnWarnClashingDefaultImports cls local imports -> suggestDefaultDeclaration cls (fold local) (cd_types <$> NE.toList imports) TcRnForeignImportPrimExtNotSet{} -> [suggestExtension LangExt.GHCForeignImportPrim] TcRnForeignImportPrimSafeAnn{} -> noHints TcRnForeignFunctionImportAsValue{} -> noHints TcRnFunPtrImportWithoutAmpersand{} -> noHints TcRnIllegalForeignDeclBackend{} -> noHints TcRnUnsupportedCallConv{} -> noHints TcRnIllegalForeignType _ reason -> case reason of TypeCannotBeMarshaled _ why | NewtypeDataConNotInScope tc _ <- why -> let tc_nm = tyConName tc dc = dataConName $ head $ tyConDataCons tc in [ ImportSuggestion (occName dc) $ ImportDataCon Nothing (nameOccName tc_nm) ] | UnliftedFFITypesNeeded <- why -> [suggestExtension LangExt.UnliftedFFITypes] _ -> noHints TcRnInvalidCIdentifier{} -> noHints TcRnExpectedValueId{} -> noHints TcRnRecSelectorEscapedTyVar{} -> [SuggestPatternMatchingSyntax] TcRnPatSynNotBidirectional{} -> noHints TcRnIllegalDerivingItem{} -> noHints TcRnIllegalDefaultClass{} -> noHints TcRnIllegalNamedDefault{} -> [suggestExtension LangExt.NamedDefaults] TcRnUnexpectedAnnotation{} -> noHints TcRnIllegalRecordSyntax{} -> noHints TcRnInvalidVisibleKindArgument{} -> noHints TcRnTooManyBinders{} -> noHints TcRnDifferentNamesForTyVar{} -> noHints TcRnDisconnectedTyVar n -> [SuggestBindTyVarExplicitly n] TcRnInvalidReturnKind _ _ _ mb_suggest_unlifted_ext -> case mb_suggest_unlifted_ext of Nothing -> noHints Just SuggestUnliftedNewtypes -> [suggestExtension LangExt.UnliftedNewtypes] Just SuggestUnliftedDatatypes -> [suggestExtension LangExt.UnliftedDatatypes] TcRnClassKindNotConstraint{} -> noHints TcRnUnpromotableThing{} -> noHints TcRnIllegalTermLevelUse{} -> noHints TcRnMatchesHaveDiffNumArgs{} -> noHints TcRnCannotBindScopedTyVarInPatSig{} -> noHints TcRnCannotBindTyVarsInPatBind{} -> noHints TcRnTooManyTyArgsInConPattern{} -> noHints TcRnMultipleInlinePragmas{} -> noHints TcRnUnexpectedPragmas{} -> noHints TcRnNonOverloadedSpecialisePragma{} -> noHints TcRnSpecialiseNotVisible name -> [SuggestSpecialiseVisibilityHints name] TcRnPragmaWarning{} -> noHints TcRnDifferentExportWarnings _ _ -> noHints TcRnIncompleteExportWarnings _ _ -> noHints TcRnIllegalHsigDefaultMethods{} -> noHints TcRnIllegalQuasiQuotes{} -> [suggestExtension LangExt.QuasiQuotes] TcRnTHError err -> thErrorHints err TcRnHsigFixityMismatch{} -> noHints TcRnHsigShapeMismatch{} -> noHints TcRnHsigMissingModuleExport{} -> noHints TcRnBadGenericMethod{} -> noHints TcRnWarningMinimalDefIncomplete{} -> noHints TcRnDefaultMethodForPragmaLacksBinding{} -> noHints TcRnIgnoreSpecialisePragmaOnDefMethod{} -> noHints TcRnBadMethodErr{} -> noHints TcRnIllegalTypeData -> [suggestExtension LangExt.TypeData] TcRnTypeDataForbids{} -> noHints TcRnIllegalNewtype{} -> noHints TcRnOrPatBindsVariables{} -> noHints TcRnUnsatisfiedMinimalDef{} -> noHints TcRnMisplacedInstSig{} -> [suggestExtension LangExt.InstanceSigs] TcRnNoRebindableSyntaxRecordDot{} -> noHints TcRnNoFieldPunsRecordDot{} -> noHints TcRnListComprehensionDuplicateBinding{} -> noHints TcRnEmptyStmtsGroup EmptyStmtsGroupInDoNotation{} -> [suggestExtension LangExt.NondecreasingIndentation] TcRnEmptyStmtsGroup{} -> noHints TcRnLastStmtNotExpr{} -> noHints TcRnUnexpectedStatementInContext _ _ mExt | Nothing <- mExt -> noHints | Just ext <- mExt -> [suggestExtension ext] TcRnSectionWithoutParentheses{} -> noHints TcRnIllegalImplicitParameterBindings{} -> noHints TcRnIllegalTupleSection{} -> [suggestExtension LangExt.TupleSections] TcRnCapturedTermName{} -> [SuggestRenameTypeVariable] TcRnBindingOfExistingName{} -> noHints TcRnMultipleFixityDecls{} -> noHints TcRnIllegalPatternSynonymDecl{} -> [suggestExtension LangExt.PatternSynonyms] TcRnIllegalClassBinding{} -> noHints TcRnOrphanCompletePragma{} -> noHints TcRnEmptyCase ctxt -> case ctxt of LamAlt LamCases -> noHints -- cases syntax doesn't support empty case. ArrowMatchCtxt (ArrowLamAlt LamCases) -> noHints _ -> [suggestExtension LangExt.EmptyCase] TcRnNonStdGuards{} -> [suggestExtension LangExt.PatternGuards] TcRnDuplicateSigDecl{} -> noHints TcRnMisplacedSigDecl{} -> noHints TcRnUnexpectedDefaultSig{} -> [suggestExtension LangExt.DefaultSignatures] TcRnDuplicateMinimalSig{} -> noHints TcRnUnexpectedStandaloneDerivingDecl{} -> [suggestExtension LangExt.StandaloneDeriving] TcRnUnusedVariableInRuleDecl{} -> noHints TcRnUnexpectedStandaloneKindSig{} -> [suggestExtension LangExt.StandaloneKindSignatures] TcRnIllegalRuleLhs{} -> noHints TcRnDuplicateRoleAnnot{} -> noHints TcRnDuplicateKindSig{} -> noHints TcRnIllegalDerivStrategy ds -> case ds of ViaStrategy{} -> [suggestExtension LangExt.DerivingVia] _ -> [suggestExtension LangExt.DerivingStrategies] TcRnIllegalMultipleDerivClauses{} -> [suggestExtension LangExt.DerivingStrategies] TcRnNoDerivStratSpecified is_ds_enabled info -> do let explicit_strategy_hint = case info of TcRnNoDerivingClauseStrategySpecified assumed_derivings -> SuggestExplicitDerivingClauseStrategies assumed_derivings TcRnNoStandaloneDerivingStrategySpecified assumed_strategy deriv_sig -> SuggestExplicitStandaloneDerivingStrategy assumed_strategy deriv_sig explicit_strategy_hint : [suggestExtension LangExt.DerivingStrategies | not is_ds_enabled] TcRnStupidThetaInGadt{} -> noHints TcRnShadowedTyVarNameInFamResult{} -> noHints TcRnIncorrectTyVarOnLhsOfInjCond{} -> noHints TcRnUnknownTyVarsOnRhsOfInjCond{} -> noHints TcRnBadlyStaged{} -> noHints TcRnBadlyStagedType{} -> noHints TcRnStageRestriction{} -> noHints TcRnTyThingUsedWrong{} -> noHints TcRnCannotDefaultKindVar{} -> noHints TcRnUninferrableTyVar{} -> noHints TcRnSkolemEscape{} -> noHints TcRnPatSynEscapedCoercion{} -> noHints TcRnPatSynExistentialInResult{} -> noHints TcRnPatSynArityMismatch{} -> noHints TcRnPatSynInvalidRhs name pat args (PatSynNotInvertible _) -> [SuggestExplicitBidiPatSyn name pat args] TcRnPatSynInvalidRhs{} -> noHints TcRnTyFamDepsDisabled{} -> [suggestExtension LangExt.TypeFamilyDependencies] TcRnAbstractClosedTyFamDecl{} -> noHints TcRnPartialFieldSelector{} -> noHints TcRnHasFieldResolvedIncomplete{} -> noHints TcRnBadFieldAnnotation _ _ LazyFieldsDisabled -> [suggestExtension LangExt.StrictData] TcRnBadFieldAnnotation{} -> noHints TcRnSuperclassCycle{} -> [suggestExtension LangExt.UndecidableSuperClasses] TcRnDefaultSigMismatch{} -> noHints TcRnTyFamsDisabled{} -> [suggestExtension LangExt.TypeFamilies] TcRnBadTyConTelescope{} -> noHints TcRnTyFamResultDisabled{} -> [suggestExtension LangExt.TypeFamilyDependencies] TcRnRoleValidationFailed{} -> noHints TcRnCommonFieldResultTypeMismatch{} -> noHints TcRnCommonFieldTypeMismatch{} -> noHints TcRnClassExtensionDisabled _ MultiParamDisabled{} -> [suggestExtension LangExt.MultiParamTypeClasses] TcRnClassExtensionDisabled _ FunDepsDisabled{} -> [suggestExtension LangExt.FunctionalDependencies] TcRnClassExtensionDisabled _ ConstrainedClassMethodsDisabled{} -> [suggestExtension LangExt.ConstrainedClassMethods] TcRnDataConParentTypeMismatch{} -> noHints TcRnGADTsDisabled{} -> [suggestExtension LangExt.GADTs] TcRnExistentialQuantificationDisabled{} -> [suggestAnyExtension [LangExt.ExistentialQuantification, LangExt.GADTs]] TcRnGADTDataContext{} -> noHints TcRnMultipleConForNewtype{} -> noHints TcRnKindSignaturesDisabled{} -> [suggestExtension LangExt.KindSignatures] TcRnEmptyDataDeclsDisabled{} -> [suggestExtension LangExt.EmptyDataDecls] TcRnRoleMismatch{} -> noHints TcRnRoleCountMismatch{} -> noHints TcRnIllegalRoleAnnotation{} -> noHints TcRnRoleAnnotationsDisabled{} -> [suggestExtension LangExt.RoleAnnotations] TcRnIncoherentRoles{} -> [suggestExtension LangExt.IncoherentInstances] TcRnUnexpectedKindVar{} -> [suggestExtension LangExt.PolyKinds] TcRnNegativeNumTypeLiteral{} -> noHints TcRnIllegalKind _ suggest_polyKinds -> if suggest_polyKinds then [suggestExtension LangExt.PolyKinds] else noHints TcRnPrecedenceParsingError{} -> noHints TcRnSectionPrecedenceError{} -> noHints TcRnUnexpectedPatSigType{} -> [suggestExtension LangExt.ScopedTypeVariables] TcRnIllegalKindSignature{} -> [suggestExtension LangExt.KindSignatures] TcRnUnusedQuantifiedTypeVar{} -> noHints TcRnDataKindsError{} -> [suggestExtension LangExt.DataKinds] TcRnTypeSynonymCycle{} -> noHints TcRnZonkerMessage msg -> zonkerMessageHints msg TcRnInterfaceError reason -> interfaceErrorHints reason TcRnSelfImport{} -> noHints TcRnNoExplicitImportList{} -> noHints TcRnSafeImportsDisabled{} -> [SuggestSafeHaskell] TcRnDeprecatedModule{} -> noHints TcRnRedundantSourceImport{} -> noHints TcRnImportLookup (ImportLookupBad k _ is ie patsyns_enabled) -> let mod_name = moduleName $ is_mod is occ = rdrNameOcc $ ieName ie in case k of BadImportAvailVar -> [ImportSuggestion occ $ CouldRemoveTypeKeyword mod_name] BadImportNotExported suggs -> suggs BadImportAvailTyCon ex_ns -> [useExtensionInOrderTo empty LangExt.ExplicitNamespaces | not ex_ns] ++ [ImportSuggestion occ $ CouldAddTypeKeyword mod_name] BadImportAvailDataCon par -> [ImportSuggestion occ $ ImportDataCon (Just (mod_name, patsyns_enabled)) par] BadImportNotExportedSubordinates{} -> noHints TcRnImportLookup{} -> noHints TcRnUnusedImport{} -> noHints TcRnDuplicateDecls{} -> noHints TcRnPackageImportsDisabled -> [suggestExtension LangExt.PackageImports] TcRnIllegalDataCon{} -> noHints TcRnNestedForallsContexts{} -> noHints TcRnRedundantRecordWildcard -> [SuggestRemoveRecordWildcard] TcRnUnusedRecordWildcard{} -> [SuggestRemoveRecordWildcard] TcRnUnusedName{} -> noHints TcRnQualifiedBinder{} -> noHints TcRnTypeApplicationsDisabled ty_app -> case ty_app of TypeApplication {} -> [suggestExtension LangExt.TypeApplications] TypeApplicationInPattern {} -> [suggestExtension LangExt.TypeAbstractions] TcRnInvalidRecordField{} -> noHints TcRnTupleTooLarge{} -> noHints TcRnCTupleTooLarge{} -> noHints TcRnIllegalInferredTyVars{} -> noHints TcRnAmbiguousName{} -> noHints TcRnBindingNameConflict{} -> noHints TcRnNonCanonicalDefinition reason _ -> suggestNonCanonicalDefinition reason TcRnDefaultedExceptionContext _ -> noHints TcRnImplicitImportOfPrelude {} -> noHints TcRnMissingMain {} -> noHints TcRnGhciUnliftedBind {} -> noHints TcRnGhciMonadLookupFail {} -> noHints TcRnMissingRoleAnnotation{} -> noHints TcRnIllegalInvisTyVarBndr{} -> [suggestExtension LangExt.TypeAbstractions] TcRnIllegalWildcardTyVarBndr{} -> [suggestExtension LangExt.TypeAbstractions] TcRnDeprecatedInvisTyArgInConPat{} -> [suggestExtension LangExt.TypeAbstractions] TcRnInvalidInvisTyVarBndr{} -> noHints TcRnInvisBndrWithoutSig name _ -> [SuggestAddStandaloneKindSignature name] TcRnImplicitRhsQuantification kv -> [SuggestBindTyVarOnLhs (unLoc kv)] TcRnPatersonCondFailure{} -> [suggestExtension LangExt.UndecidableInstances] TcRnIllformedTypePattern{} -> noHints TcRnIllegalTypePattern{} -> noHints TcRnIllformedTypeArgument{} -> noHints TcRnIllegalTypeExpr{} -> noHints TcRnInvalidDefaultedTyVar{} -> noHints TcRnNamespacedWarningPragmaWithoutFlag{} -> [suggestExtension LangExt.ExplicitNamespaces] TcRnIllegalInvisibleTypePattern{} -> [suggestExtension LangExt.TypeAbstractions] TcRnInvisPatWithNoForAll{} -> noHints TcRnNamespacedFixitySigWithoutFlag{} -> [suggestExtension LangExt.ExplicitNamespaces] TcRnOutOfArityTyVar{} -> noHints TcRnMisplacedInvisPat{} -> noHints TcRnUnexpectedTypeSyntaxInTerms syntax -> [suggestExtension (typeSyntaxExtension syntax)] diagnosticCode = constructorCode note :: SDoc -> SDoc note note = "Note" <> colon <+> note <> dot -- | Change [x] to "x", [x, y] to "x and y", [x, y, z] to "x, y, and z", -- and so on. The `and` stands for any `conjunction`, which is passed in. commafyWith :: SDoc -> [SDoc] -> [SDoc] commafyWith _ [] = [] commafyWith _ [x] = [x] commafyWith conjunction [x, y] = [x <+> conjunction <+> y] commafyWith conjunction xs = addConjunction $ punctuate comma xs where addConjunction [x, y] = [x, conjunction, y] addConjunction (x : xs) = x : addConjunction xs addConjunction _ = panic "commafyWith expected 2 or more elements" deriveInstanceErrReasonHints :: Class -> UsingGeneralizedNewtypeDeriving -> DeriveInstanceErrReason -> [GhcHint] deriveInstanceErrReasonHints cls newtype_deriving = \case DerivErrNotWellKinded _ _ n_args_to_keep | cls `hasKey` gen1ClassKey && n_args_to_keep >= 0 -> [suggestExtension LangExt.PolyKinds] | otherwise -> noHints DerivErrSafeHaskellGenericInst -> noHints DerivErrDerivingViaWrongKind{} -> noHints DerivErrNoEtaReduce{} -> noHints DerivErrBootFileFound -> noHints DerivErrDataConsNotAllInScope{} -> noHints DerivErrGNDUsedOnData -> noHints DerivErrNullaryClasses -> noHints DerivErrLastArgMustBeApp -> noHints DerivErrNoFamilyInstance{} -> noHints DerivErrNotStockDeriveable deriveAnyClassEnabled | deriveAnyClassEnabled == NoDeriveAnyClassEnabled -> [suggestExtension LangExt.DeriveAnyClass] | otherwise -> noHints DerivErrHasAssociatedDatatypes{} -> noHints DerivErrNewtypeNonDeriveableClass | newtype_deriving == NoGeneralizedNewtypeDeriving -> [useGND] | otherwise -> noHints DerivErrCannotEtaReduceEnough{} | newtype_deriving == NoGeneralizedNewtypeDeriving -> [useGND] | otherwise -> noHints DerivErrOnlyAnyClassDeriveable _ deriveAnyClassEnabled | deriveAnyClassEnabled == NoDeriveAnyClassEnabled -> [suggestExtension LangExt.DeriveAnyClass] | otherwise -> noHints DerivErrNotDeriveable deriveAnyClassEnabled | deriveAnyClassEnabled == NoDeriveAnyClassEnabled -> [suggestExtension LangExt.DeriveAnyClass] | otherwise -> noHints DerivErrNotAClass{} -> noHints DerivErrNoConstructors{} -> let info = text "to enable deriving for empty data types" in [useExtensionInOrderTo info LangExt.EmptyDataDeriving] DerivErrLangExtRequired{} -- This is a slightly weird corner case of GHC: we are failing -- to derive a typeclass instance because a particular 'Extension' -- is not enabled (and so we report in the main error), but here -- we don't want to /repeat/ to enable the extension in the hint. -> noHints DerivErrDunnoHowToDeriveForType{} -> noHints DerivErrMustBeEnumType rep_tc -- We want to suggest GND only if this /is/ a newtype. | newtype_deriving == NoGeneralizedNewtypeDeriving && isNewTyCon rep_tc -> [useGND] | otherwise -> noHints DerivErrMustHaveExactlyOneConstructor{} -> noHints DerivErrMustHaveSomeParameters{} -> noHints DerivErrMustNotHaveClassContext{} -> noHints DerivErrBadConstructor wcard _ -> case wcard of Nothing -> noHints Just YesHasWildcard -> [SuggestFillInWildcardConstraint] Just NoHasWildcard -> [SuggestAddStandaloneDerivation] DerivErrGenerics{} -> noHints DerivErrEnumOrProduct{} -> noHints messageWithInfoDiagnosticMessage :: UnitState -> ErrInfo -> Bool -> DecoratedSDoc -> DecoratedSDoc messageWithInfoDiagnosticMessage unit_state ErrInfo{..} show_ctxt important = let err_info' = map (pprWithUnitState unit_state) ([errInfoContext | show_ctxt] ++ [errInfoSupplementary]) in (mapDecoratedSDoc (pprWithUnitState unit_state) important) `unionDecoratedSDoc` mkDecorated err_info' messageWithHsDocContext :: TcRnMessageOpts -> HsDocContext -> DecoratedSDoc -> DecoratedSDoc messageWithHsDocContext opts ctxt main_msg = do if tcOptsShowContext opts then main_msg `unionDecoratedSDoc` ctxt_msg else main_msg where ctxt_msg = mkSimpleDecorated (inHsDocContext ctxt) dodgy_msg :: Outputable ie => SDoc -> GlobalRdrElt -> ie -> SDoc dodgy_msg kind tc ie = vcat [ text "The" <+> kind <+> text "item" <+> quotes (ppr ie) <+> text "suggests that" , quotes (ppr $ greName tc) <+> text "has" <+> sep rest ] where rest :: [SDoc] rest = case greInfo tc of IAmTyCon ClassFlavour -> [ text "(in-scope) class methods or associated types" <> comma , text "but it has none" ] IAmTyCon _ -> [ text "(in-scope) constructors or record fields" <> comma , text "but it has none" ] _ -> [ text "children" <> comma , text "but it is not a type constructor or a class" ] dodgy_msg_insert :: GlobalRdrElt -> IE GhcRn dodgy_msg_insert tc_gre = IEThingAll (Nothing, noAnn) ii Nothing where ii = noLocA (IEName noExtField $ noLocA $ greName tc_gre) pprTypeDoesNotHaveFixedRuntimeRep :: Type -> FixedRuntimeRepProvenance -> SDoc pprTypeDoesNotHaveFixedRuntimeRep ty prov = let what = pprFixedRuntimeRepProvenance prov in text "The" <+> what <+> text "does not have a fixed runtime representation:" $$ format_frr_err ty format_frr_err :: Type -- ^ the type which doesn't have a fixed runtime representation -> SDoc format_frr_err ty = (bullet <+> ppr tidy_ty <+> dcolon <+> ppr tidy_ki) where (tidy_env, tidy_ty) = tidyOpenTypeX emptyTidyEnv ty tidy_ki = tidyType tidy_env (typeKind ty) pprField :: (FieldLabelString, TcType) -> SDoc pprField (f,ty) = ppr f <+> dcolon <+> ppr ty pprRecordFieldPart :: RecordFieldPart -> SDoc pprRecordFieldPart = \case RecordFieldDecl {} -> text "declaration" RecordFieldConstructor{} -> text "construction" RecordFieldPattern{} -> text "pattern" RecordFieldUpdate -> text "update" ppr_opfix :: (OpName, Fixity) -> SDoc ppr_opfix (op, fixity) = pp_op <+> brackets (ppr fixity) where pp_op | NegateOp <- op = text "prefix `-'" | otherwise = quotes (ppr op) pprBindings :: [Name] -> SDoc pprBindings = pprWithCommas (quotes . ppr) injectivityErrorHerald :: SDoc injectivityErrorHerald = text "Type family equation violates the family's injectivity annotation." formatExportItemError :: SDoc -> String -> SDoc formatExportItemError exportedThing reason = hsep [ text "The export item" , quotes exportedThing , text reason ] -- | What warning flags are associated with the given missing signature? missingSignatureWarningFlags :: MissingSignature -> Exported -> NonEmpty WarningFlag missingSignatureWarningFlags (MissingTopLevelBindingSig {}) exported -- We prefer "bigger" warnings first: #14794 -- -- See Note [Warnings controlled by multiple flags] = Opt_WarnMissingSignatures :| [ Opt_WarnMissingExportedSignatures | IsExported == exported ] missingSignatureWarningFlags (MissingPatSynSig {}) exported = Opt_WarnMissingPatternSynonymSignatures :| [ Opt_WarnMissingExportedPatternSynonymSignatures | IsExported == exported ] missingSignatureWarningFlags (MissingTyConKindSig ty_con _) _ = Opt_WarnMissingKindSignatures :| [Opt_WarnMissingPolyKindSignatures | isForAllTy_invis_ty (tyConKind ty_con) ] useDerivingStrategies :: GhcHint useDerivingStrategies = useExtensionInOrderTo (text "to pick a different strategy") LangExt.DerivingStrategies useGND :: GhcHint useGND = let info = text "for GHC's" <+> text "newtype-deriving extension" in suggestExtensionWithInfo info LangExt.GeneralizedNewtypeDeriving cannotMakeDerivedInstanceHerald :: Class -> [Type] -> Maybe (DerivStrategy GhcTc) -> UsingGeneralizedNewtypeDeriving -> Bool -- ^ If False, only prints the why. -> SDoc -> SDoc cannotMakeDerivedInstanceHerald cls cls_args mb_strat newtype_deriving pprHerald why = if pprHerald then sep [(hang (text "Can't make a derived instance of") 2 (quotes (ppr pred) <+> via_mechanism) $$ nest 2 extra) <> colon, nest 2 why] else why where strat_used = isJust mb_strat extra | not strat_used, (newtype_deriving == YesGeneralizedNewtypeDeriving) = text "(even with cunning GeneralizedNewtypeDeriving)" | otherwise = empty pred = mkClassPred cls cls_args via_mechanism | strat_used , Just strat <- mb_strat = text "with the" <+> (derivStrategyName strat) <+> text "strategy" | otherwise = empty badCon :: DataCon -> SDoc -> SDoc badCon con msg = text "Constructor" <+> quotes (ppr con) <+> msg derivErrDiagnosticMessage :: Class -> [Type] -> Maybe (DerivStrategy GhcTc) -> UsingGeneralizedNewtypeDeriving -> Bool -- If True, includes the herald \"can't make a derived..\" -> DeriveInstanceErrReason -> SDoc derivErrDiagnosticMessage cls cls_tys mb_strat newtype_deriving pprHerald = \case DerivErrNotWellKinded tc cls_kind _ -> sep [ hang (text "Cannot derive well-kinded instance of form" <+> quotes (pprClassPred cls cls_tys <+> parens (ppr tc <+> text "..."))) 2 empty , nest 2 (text "Class" <+> quotes (ppr cls) <+> text "expects an argument of kind" <+> quotes (pprKind cls_kind)) ] DerivErrSafeHaskellGenericInst -> text "Generic instances can only be derived in" <+> text "Safe Haskell using the stock strategy." DerivErrDerivingViaWrongKind cls_kind via_ty via_kind -> hang (text "Cannot derive instance via" <+> quotes (pprType via_ty)) 2 (text "Class" <+> quotes (ppr cls) <+> text "expects an argument of kind" <+> quotes (pprKind cls_kind) <> char ',' $+$ text "but" <+> quotes (pprType via_ty) <+> text "has kind" <+> quotes (pprKind via_kind)) DerivErrNoEtaReduce inst_ty -> sep [text "Cannot eta-reduce to an instance of form", nest 2 (text "instance (...) =>" <+> pprClassPred cls (cls_tys ++ [inst_ty]))] DerivErrBootFileFound -> cannotMakeDerivedInstanceHerald cls cls_tys mb_strat newtype_deriving pprHerald (text "Cannot derive instances in hs-boot files" $+$ text "Write an instance declaration instead") DerivErrDataConsNotAllInScope tc -> cannotMakeDerivedInstanceHerald cls cls_tys mb_strat newtype_deriving pprHerald (hang (text "The data constructors of" <+> quotes (ppr tc) <+> text "are not all in scope") 2 (text "so you cannot derive an instance for it")) DerivErrGNDUsedOnData -> cannotMakeDerivedInstanceHerald cls cls_tys mb_strat newtype_deriving pprHerald (text "GeneralizedNewtypeDeriving cannot be used on non-newtypes") DerivErrNullaryClasses -> cannotMakeDerivedInstanceHerald cls cls_tys mb_strat newtype_deriving pprHerald (text "Cannot derive instances for nullary classes") DerivErrLastArgMustBeApp -> cannotMakeDerivedInstanceHerald cls cls_tys mb_strat newtype_deriving pprHerald ( text "The last argument of the instance must be a" <+> text "data or newtype application") DerivErrNoFamilyInstance tc tc_args -> cannotMakeDerivedInstanceHerald cls cls_tys mb_strat newtype_deriving pprHerald (text "No family instance for" <+> quotes (pprTypeApp tc tc_args)) DerivErrNotStockDeriveable _ -> cannotMakeDerivedInstanceHerald cls cls_tys mb_strat newtype_deriving pprHerald (quotes (ppr cls) <+> text "is not a stock derivable class (Eq, Show, etc.)") DerivErrHasAssociatedDatatypes hasAdfs at_last_cls_tv_in_kinds at_without_last_cls_tv -> cannotMakeDerivedInstanceHerald cls cls_tys mb_strat newtype_deriving pprHerald $ vcat [ ppWhen (hasAdfs == YesHasAdfs) adfs_msg , case at_without_last_cls_tv of YesAssociatedTyNotParamOverLastTyVar tc -> at_without_last_cls_tv_msg tc NoAssociatedTyNotParamOverLastTyVar -> empty , case at_last_cls_tv_in_kinds of YesAssocTyLastVarInKind tc -> at_last_cls_tv_in_kinds_msg tc NoAssocTyLastVarInKind -> empty ] where adfs_msg = text "the class has associated data types" at_without_last_cls_tv_msg at_tc = hang (text "the associated type" <+> quotes (ppr at_tc) <+> text "is not parameterized over the last type variable") 2 (text "of the class" <+> quotes (ppr cls)) at_last_cls_tv_in_kinds_msg at_tc = hang (text "the associated type" <+> quotes (ppr at_tc) <+> text "contains the last type variable") 2 (text "of the class" <+> quotes (ppr cls) <+> text "in a kind, which is not (yet) allowed") DerivErrNewtypeNonDeriveableClass -> derivErrDiagnosticMessage cls cls_tys mb_strat newtype_deriving pprHerald (DerivErrNotStockDeriveable NoDeriveAnyClassEnabled) DerivErrCannotEtaReduceEnough eta_ok -> let cant_derive_err = ppUnless eta_ok eta_msg eta_msg = text "cannot eta-reduce the representation type enough" in cannotMakeDerivedInstanceHerald cls cls_tys mb_strat newtype_deriving pprHerald cant_derive_err DerivErrOnlyAnyClassDeriveable tc _ -> cannotMakeDerivedInstanceHerald cls cls_tys mb_strat newtype_deriving pprHerald (quotes (ppr tc) <+> text "is a type class," <+> text "and can only have a derived instance" $+$ text "if DeriveAnyClass is enabled") DerivErrNotDeriveable _ -> cannotMakeDerivedInstanceHerald cls cls_tys mb_strat newtype_deriving pprHerald empty DerivErrNotAClass predType -> cannotMakeDerivedInstanceHerald cls cls_tys mb_strat newtype_deriving pprHerald (quotes (ppr predType) <+> text "is not a class") DerivErrNoConstructors rep_tc -> cannotMakeDerivedInstanceHerald cls cls_tys mb_strat newtype_deriving pprHerald (quotes (pprSourceTyCon rep_tc) <+> text "must have at least one data constructor") DerivErrLangExtRequired ext -> cannotMakeDerivedInstanceHerald cls cls_tys mb_strat newtype_deriving pprHerald (text "You need " <> ppr ext <+> text "to derive an instance for this class") DerivErrDunnoHowToDeriveForType ty -> cannotMakeDerivedInstanceHerald cls cls_tys mb_strat newtype_deriving pprHerald (hang (text "Don't know how to derive" <+> quotes (ppr cls)) 2 (text "for type" <+> quotes (ppr ty))) DerivErrMustBeEnumType rep_tc -> cannotMakeDerivedInstanceHerald cls cls_tys mb_strat newtype_deriving pprHerald (sep [ quotes (pprSourceTyCon rep_tc) <+> text "must be an enumeration type" , text "(an enumeration consists of one or more nullary, non-GADT constructors)" ]) DerivErrMustHaveExactlyOneConstructor rep_tc -> cannotMakeDerivedInstanceHerald cls cls_tys mb_strat newtype_deriving pprHerald (quotes (pprSourceTyCon rep_tc) <+> text "must have precisely one constructor") DerivErrMustHaveSomeParameters rep_tc -> cannotMakeDerivedInstanceHerald cls cls_tys mb_strat newtype_deriving pprHerald (text "Data type" <+> quotes (ppr rep_tc) <+> text "must have some type parameters") DerivErrMustNotHaveClassContext rep_tc bad_stupid_theta -> cannotMakeDerivedInstanceHerald cls cls_tys mb_strat newtype_deriving pprHerald (text "Data type" <+> quotes (ppr rep_tc) <+> text "must not have a class context:" <+> pprTheta bad_stupid_theta) DerivErrBadConstructor _ reasons -> let why = vcat $ map renderReason reasons in cannotMakeDerivedInstanceHerald cls cls_tys mb_strat newtype_deriving pprHerald why where renderReason = \case DerivErrBadConExistential con -> badCon con $ text "must be truly polymorphic in the last argument of the data type" DerivErrBadConCovariant con -> badCon con $ text "must not use the type variable in a function argument" DerivErrBadConFunTypes con -> badCon con $ text "must not contain function types" DerivErrBadConWrongArg con -> badCon con $ text "must use the type variable only as the last argument of a data type" DerivErrBadConIsGADT con -> badCon con $ text "is a GADT" DerivErrBadConHasExistentials con -> badCon con $ text "has existential type variables in its type" DerivErrBadConHasConstraints con -> badCon con $ text "has constraints in its type" DerivErrBadConHasHigherRankType con -> badCon con $ text "has a higher-rank type" DerivErrGenerics reasons -> let why = vcat $ map renderReason reasons in cannotMakeDerivedInstanceHerald cls cls_tys mb_strat newtype_deriving pprHerald why where renderReason = \case DerivErrGenericsMustNotHaveDatatypeContext tc_name -> ppr tc_name <+> text "must not have a datatype context" DerivErrGenericsMustNotHaveExoticArgs dc -> ppr dc <+> text "must not have exotic unlifted or polymorphic arguments" DerivErrGenericsMustBeVanillaDataCon dc -> ppr dc <+> text "must be a vanilla data constructor" DerivErrGenericsMustHaveSomeTypeParams rep_tc -> text "Data type" <+> quotes (ppr rep_tc) <+> text "must have some type parameters" DerivErrGenericsMustNotHaveExistentials con -> badCon con $ text "must not have existential arguments" DerivErrGenericsWrongArgKind con -> badCon con $ text "applies a type to an argument involving the last parameter" $$ text "but the applied type is not of kind * -> *" DerivErrEnumOrProduct this that -> let ppr1 = derivErrDiagnosticMessage cls cls_tys mb_strat newtype_deriving False this ppr2 = derivErrDiagnosticMessage cls cls_tys mb_strat newtype_deriving False that in cannotMakeDerivedInstanceHerald cls cls_tys mb_strat newtype_deriving pprHerald (ppr1 $$ text " or" $$ ppr2) lookupInstanceErrDiagnosticMessage :: Class -> [Type] -> LookupInstanceErrReason -> SDoc lookupInstanceErrDiagnosticMessage cls tys = \case LookupInstErrNotExact -> text "Not an exact match (i.e., some variables get instantiated)" LookupInstErrFlexiVar -> text "flexible type variable:" <+> (ppr $ mkTyConApp (classTyCon cls) tys) LookupInstErrNotFound -> text "instance not found" <+> (ppr $ mkTyConApp (classTyCon cls) tys) {- ********************************************************************* * * Outputable SolverReportErrCtxt (for debugging) * * **********************************************************************-} instance Outputable SolverReportErrCtxt where ppr (CEC { cec_binds = bvar , cec_defer_type_errors = dte , cec_expr_holes = eh , cec_type_holes = th , cec_out_of_scope_holes = osh , cec_warn_redundant = wr , cec_expand_syns = es , cec_suppress = sup }) = text "CEC" <+> braces (vcat [ text "cec_binds" <+> equals <+> ppr bvar , text "cec_defer_type_errors" <+> equals <+> ppr dte , text "cec_expr_holes" <+> equals <+> ppr eh , text "cec_type_holes" <+> equals <+> ppr th , text "cec_out_of_scope_holes" <+> equals <+> ppr osh , text "cec_warn_redundant" <+> equals <+> ppr wr , text "cec_expand_syns" <+> equals <+> ppr es , text "cec_suppress" <+> equals <+> ppr sup ]) {- ********************************************************************* * * Outputting TcSolverReportMsg errors * * **********************************************************************-} -- | Pretty-print a 'SolverReportWithCtxt', containing a 'TcSolverReportMsg' -- with its enclosing 'SolverReportErrCtxt'. pprSolverReportWithCtxt :: SolverReportWithCtxt -> SDoc pprSolverReportWithCtxt (SolverReportWithCtxt { reportContext = ctxt, reportContent = msg }) = pprTcSolverReportMsg ctxt msg -- | Pretty-print a 'TcSolverReportMsg', with its enclosing 'SolverReportErrCtxt'. pprTcSolverReportMsg :: SolverReportErrCtxt -> TcSolverReportMsg -> SDoc pprTcSolverReportMsg _ (BadTelescope telescope skols) = hang (text "These kind and type variables:" <+> ppr telescope $$ text "are out of dependency order. Perhaps try this ordering:") 2 (pprTyVars sorted_tvs) where sorted_tvs = scopedSort skols pprTcSolverReportMsg _ (UserTypeError ty) = pprUserTypeErrorTy ty pprTcSolverReportMsg _ (UnsatisfiableError ty) = pprUserTypeErrorTy ty pprTcSolverReportMsg ctxt (ReportHoleError hole err) = pprHoleError ctxt hole err pprTcSolverReportMsg ctxt (CannotUnifyVariable { mismatchMsg = msg , cannotUnifyReason = reason }) = pprMismatchMsg ctxt msg $$ pprCannotUnifyVariableReason ctxt reason pprTcSolverReportMsg ctxt (Mismatch { mismatchMsg = mismatch_msg , mismatchTyVarInfo = tv_info , mismatchAmbiguityInfo = ambig_infos , mismatchCoercibleInfo = coercible_info }) = vcat ([ pprMismatchMsg ctxt mismatch_msg , maybe empty (pprTyVarInfo ctxt) tv_info , maybe empty pprCoercibleMsg coercible_info ] ++ (map pprAmbiguityInfo ambig_infos)) pprTcSolverReportMsg _ (FixedRuntimeRepError frr_origs) = vcat (map make_msg frr_origs) where -- Assemble the error message: pair up each origin with the corresponding type, e.g. -- • FixedRuntimeRep origin msg 1 ... -- a :: TYPE r1 -- • FixedRuntimeRep origin msg 2 ... -- b :: TYPE r2 make_msg :: FixedRuntimeRepErrorInfo -> SDoc make_msg (FRR_Info { frr_info_origin = FixedRuntimeRepOrigin { frr_type = ty , frr_context = frr_ctxt } , frr_info_not_concrete = mb_not_conc }) = -- Add bullet points if there is more than one error. (if length frr_origs > 1 then (bullet <+>) else id) $ vcat [ sep [ pprFixedRuntimeRepContext frr_ctxt , text "does not have a fixed runtime representation." ] , type_printout ty , case mb_not_conc of Nothing -> empty Just (conc_tv, not_conc) -> unsolved_concrete_eq_explanation conc_tv not_conc ] -- Don't print out the type (only the kind), if the type includes -- a confusing cast, unless the user passed -fprint-explicit-coercions. -- -- Example: -- -- In T20363, we have a representation-polymorphism error with a type -- of the form -- -- ( (# #) |> co ) :: TYPE NilRep -- -- where NilRep is a nullary type family application which reduces to TupleRep '[]. -- We prefer avoiding showing the cast to the user, but we also don't want to -- print the confusing: -- -- (# #) :: TYPE NilRep -- -- So in this case we simply don't print the type, only the kind. confusing_cast :: Type -> Bool confusing_cast ty = case ty of CastTy inner_ty _ -- A confusing cast is one that is responsible -- for a representation-polymorphism error. -> isConcreteType (typeKind inner_ty) _ -> False type_printout :: Type -> SDoc type_printout ty = sdocOption sdocPrintExplicitCoercions $ \ show_coercions -> if confusing_cast ty && not show_coercions then vcat [ text "Its kind is:" , nest 2 $ pprWithTYPE (typeKind ty) , text "(Use -fprint-explicit-coercions to see the full type.)" ] else vcat [ text "Its type is:" , nest 2 $ ppr ty <+> dcolon <+> pprWithTYPE (typeKind ty) ] unsolved_concrete_eq_explanation :: TcTyVar -> Type -> SDoc unsolved_concrete_eq_explanation tv not_conc = text "Cannot unify" <+> quotes (ppr not_conc) <+> text "with the type variable" <+> quotes (ppr tv) $$ text "because the former is not a concrete" <+> what <> dot where ki = tyVarKind tv what :: SDoc what | isRuntimeRepTy ki = quotes (text "RuntimeRep") | isLevityTy ki = quotes (text "Levity") | otherwise = text "type" pprTcSolverReportMsg _ (BlockedEquality item) = vcat [ hang (text "Cannot use equality for substitution:") 2 (ppr (errorItemPred item)) , text "Doing so would be ill-kinded." ] pprTcSolverReportMsg _ (ExpectingMoreArguments n thing) = text "Expecting" <+> speakN (abs n) <+> more <+> quotes (ppr thing) where more | n == 1 = text "more argument to" | otherwise = text "more arguments to" -- n > 1 pprTcSolverReportMsg ctxt (UnboundImplicitParams (item :| items)) = let givens = getUserGivens ctxt in if null givens then addArising (errorItemCtLoc item) $ sep [ text "Unbound implicit parameter" <> plural preds , nest 2 (pprParendTheta preds) ] else pprMismatchMsg ctxt (CouldNotDeduce givens (item :| items) Nothing) where preds = map errorItemPred (item : items) pprTcSolverReportMsg _ (AmbiguityPreventsSolvingCt item ambigs) = pprAmbiguityInfo (Ambiguity True ambigs) <+> pprArising (errorItemCtLoc item) $$ text "prevents the constraint" <+> quotes (pprParendType $ errorItemPred item) <+> text "from being solved." pprTcSolverReportMsg ctxt@(CEC {cec_encl = implics}) (CannotResolveInstance item unifiers candidates imp_errs suggs binds) = vcat [ no_inst_msg , nest 2 extra_note , mb_patsyn_prov `orElse` empty , ppWhen (has_ambigs && not (null unifiers && null useful_givens)) (vcat [ ppUnless lead_with_ambig $ pprAmbiguityInfo (Ambiguity False (ambig_kvs, ambig_tvs)) , pprRelevantBindings binds , potential_msg ]) , ppWhen (isNothing mb_patsyn_prov) $ -- Don't suggest fixes for the provided context of a pattern -- synonym; the right fix is to bind more in the pattern show_fixes (ctxtFixes has_ambigs pred implics ++ drv_fixes ++ naked_sc_fixes) , ppWhen (not (null candidates)) (hang (text "There are instances for similar types:") 2 (vcat (map ppr candidates))) -- See Note [Report candidate instances] , vcat $ map ppr imp_errs , vcat $ map ppr suggs ] where orig = errorItemOrigin item pred = errorItemPred item (clas, tys) = getClassPredTys pred -- See Note [Highlighting ambiguous type variables] in GHC.Tc.Errors (ambig_kvs, ambig_tvs) = ambigTkvsOfTy pred ambigs = ambig_kvs ++ ambig_tvs has_ambigs = not (null ambigs) useful_givens = discardProvCtxtGivens orig (getUserGivensFromImplics implics) -- useful_givens are the enclosing implications with non-empty givens, -- modulo the horrid discardProvCtxtGivens lead_with_ambig = not (null ambigs) && not (any isRuntimeUnkSkol ambigs) && not (null unifiers) && null useful_givens no_inst_msg :: SDoc no_inst_msg | lead_with_ambig = pprTcSolverReportMsg ctxt $ AmbiguityPreventsSolvingCt item (ambig_kvs, ambig_tvs) | otherwise = pprMismatchMsg ctxt $ CouldNotDeduce useful_givens (item :| []) Nothing -- Report "potential instances" only when the constraint arises -- directly from the user's use of an overloaded function want_potential (TypeEqOrigin {}) = False want_potential _ = True potential_msg = ppWhen (not (null unifiers) && want_potential orig) $ potential_hdr $$ potentialInstancesErrMsg (PotentialInstances { matches = [], unifiers }) potential_hdr = ppWhen lead_with_ambig $ text "Probable fix: use a type annotation to specify what" <+> pprQuotedList ambig_tvs <+> text "should be." mb_patsyn_prov :: Maybe SDoc mb_patsyn_prov | not lead_with_ambig , ProvCtxtOrigin PSB{ psb_def = L _ pat } <- orig = Just (vcat [ text "In other words, a successful match on the pattern" , nest 2 $ ppr pat , text "does not provide the constraint" <+> pprParendType pred ]) | otherwise = Nothing extra_note | any isFunTy (filterOutInvisibleTypes (classTyCon clas) tys) = text "(maybe you haven't applied a function to enough arguments?)" | className clas == typeableClassName -- Avoid mysterious "No instance for (Typeable T) , [_,ty] <- tys -- Look for (Typeable (k->*) (T k)) , Just (tc,_) <- tcSplitTyConApp_maybe ty , not (isTypeFamilyTyCon tc) = hang (text "GHC can't yet do polykinded") 2 (text "Typeable" <+> parens (ppr ty <+> dcolon <+> ppr (typeKind ty))) | otherwise = empty drv_fixes = case orig of DerivClauseOrigin -> [drv_fix False] StandAloneDerivOrigin -> [drv_fix True] DerivOriginDC _ _ standalone -> [drv_fix standalone] DerivOriginCoerce _ _ _ standalone -> [drv_fix standalone] _ -> [] drv_fix standalone_wildcard | standalone_wildcard = text "fill in the wildcard constraint yourself" | otherwise = hang (text "use a standalone 'deriving instance' declaration,") 2 (text "so you can specify the instance context yourself") -- naked_sc_fix: try to produce a helpful error message for -- superclass constraints caught by the subtleties described by -- Note [Recursive superclasses] in GHC.TyCl.Instance naked_sc_fixes | ScOrigin _ NakedSc <- orig -- A superclass wanted with no instance decls used yet , any non_tyvar_preds useful_givens -- Some non-tyvar givens = [vcat [ text "If the constraint looks soluble from a superclass of the instance context," , text "read 'Undecidable instances and loopy superclasses' in the user manual" ]] | otherwise = [] non_tyvar_preds :: UserGiven -> Bool non_tyvar_preds = any non_tyvar_pred . ic_given non_tyvar_pred :: EvVar -> Bool -- Tells if the Given is of form (C ty1 .. tyn), where the tys are not all tyvars non_tyvar_pred given = case getClassPredTys_maybe (idType given) of Just (_, tys) -> not (all isTyVarTy tys) Nothing -> False pprTcSolverReportMsg (CEC {cec_encl = implics}) (OverlappingInstances item matches unifiers) = vcat [ addArising ct_loc $ (text "Overlapping instances for" <+> pprType (mkClassPred clas tys)) , ppUnless (null matching_givens) $ sep [text "Matching givens (or their superclasses):" , nest 2 (vcat matching_givens)] , potentialInstancesErrMsg (PotentialInstances { matches = NE.toList matches, unifiers }) , ppWhen (null matching_givens && null (NE.tail matches) && null unifiers) $ -- Intuitively, some given matched the wanted in their -- flattened or rewritten (from given equalities) form -- but the matcher can't figure that out because the -- constraints are non-flat and non-rewritten so we -- simply report back the whole given -- context. Accelerate Smart.hs showed this problem. sep [ text "There exists a (perhaps superclass) match:" , nest 2 (vcat (pp_givens useful_givens))] , ppWhen (null $ NE.tail matches) $ parens (vcat [ ppUnless (null tyCoVars) $ text "The choice depends on the instantiation of" <+> quotes (pprWithCommas ppr tyCoVars) , ppUnless (null famTyCons) $ if (null tyCoVars) then text "The choice depends on the result of evaluating" <+> quotes (pprWithCommas ppr famTyCons) else text "and the result of evaluating" <+> quotes (pprWithCommas ppr famTyCons) , ppWhen (null (matching_givens)) $ vcat [ text "To pick the first instance above, use IncoherentInstances" , text "when compiling the other instance declarations"] ])] where ct_loc = errorItemCtLoc item orig = ctLocOrigin ct_loc pred = errorItemPred item (clas, tys) = getClassPredTys pred tyCoVars = tyCoVarsOfTypesList tys famTyCons = filter isFamilyTyCon $ concatMap (nonDetEltsUniqSet . tyConsOfType) tys useful_givens = discardProvCtxtGivens orig (getUserGivensFromImplics implics) matching_givens = mapMaybe matchable useful_givens matchable implic@(Implic { ic_given = evvars, ic_info = skol_info }) = case ev_vars_matching of [] -> Nothing _ -> Just $ hang (pprTheta ev_vars_matching) 2 (sep [ text "bound by" <+> ppr skol_info , text "at" <+> ppr (getCtLocEnvLoc (ic_env implic)) ]) where ev_vars_matching = [ pred | ev_var <- evvars , let pred = evVarPred ev_var , any can_match (pred : transSuperClasses pred) ] can_match pred = case getClassPredTys_maybe pred of Just (clas', tys') -> clas' == clas && isJust (tcMatchTys tys tys') Nothing -> False pprTcSolverReportMsg _ (UnsafeOverlap item match unsafe_overlapped) = vcat [ addArising ct_loc (text "Unsafe overlapping instances for" <+> pprType (mkClassPred clas tys)) , sep [text "The matching instance is:", nest 2 (pprInstance match)] , vcat [ text "It is compiled in a Safe module and as such can only" , text "overlap instances from the same module, however it" , text "overlaps the following instances from different" <+> text "modules:" , nest 2 (vcat [pprInstances $ NE.toList unsafe_overlapped]) ] ] where ct_loc = errorItemCtLoc item pred = errorItemPred item (clas, tys) = getClassPredTys pred pprCannotUnifyVariableReason :: SolverReportErrCtxt -> CannotUnifyVariableReason -> SDoc pprCannotUnifyVariableReason ctxt (CannotUnifyWithPolytype item tv1 ty2 mb_tv_info) = vcat [ (if isSkolemTyVar tv1 then text "Cannot equate type variable" else text "Cannot instantiate unification variable") <+> quotes (ppr tv1) , hang (text "with a" <+> what <+> text "involving polytypes:") 2 (ppr ty2) , maybe empty (pprTyVarInfo ctxt) mb_tv_info ] where what = text $ levelString $ ctLocTypeOrKind_maybe (errorItemCtLoc item) `orElse` TypeLevel pprCannotUnifyVariableReason _ (SkolemEscape item implic esc_skols) = let esc_doc = sep [ text "because" <+> what <+> text "variable" <> plural esc_skols <+> pprQuotedList esc_skols , text "would escape" <+> if isSingleton esc_skols then text "its scope" else text "their scope" ] in vcat [ nest 2 $ esc_doc , sep [ (if isSingleton esc_skols then text "This (rigid, skolem)" <+> what <+> text "variable is" else text "These (rigid, skolem)" <+> what <+> text "variables are") <+> text "bound by" , nest 2 $ ppr (ic_info implic) , nest 2 $ text "at" <+> ppr (getCtLocEnvLoc (ic_env implic)) ] ] where what = text $ levelString $ ctLocTypeOrKind_maybe (errorItemCtLoc item) `orElse` TypeLevel pprCannotUnifyVariableReason ctxt (OccursCheck { occursCheckInterestingTyVars = interesting_tvs , occursCheckAmbiguityInfos = ambig_infos }) = ppr_interesting_tyVars interesting_tvs $$ vcat (map pprAmbiguityInfo ambig_infos) where ppr_interesting_tyVars [] = empty ppr_interesting_tyVars (tv:tvs) = hang (text "Type variable kinds:") 2 $ vcat (map (tyvar_binding . tidyTyCoVarOcc (cec_tidy ctxt)) (tv:tvs)) tyvar_binding tyvar = ppr tyvar <+> dcolon <+> ppr (tyVarKind tyvar) pprCannotUnifyVariableReason ctxt (DifferentTyVars tv_info) = pprTyVarInfo ctxt tv_info pprCannotUnifyVariableReason ctxt (RepresentationalEq tv_info mb_coercible_msg) = pprTyVarInfo ctxt tv_info $$ maybe empty pprCoercibleMsg mb_coercible_msg pprUntouchableVariable :: TcTyVar -> Implication -> SDoc pprUntouchableVariable tv (Implic { ic_given = given, ic_info = skol_info, ic_env = env }) = sep [ quotes (ppr tv) <+> text "is untouchable" , nest 2 $ text "inside the constraints:" <+> pprEvVarTheta given , nest 2 $ text "bound by" <+> ppr skol_info , nest 2 $ text "at" <+> ppr (getCtLocEnvLoc env) ] pprMismatchMsg :: SolverReportErrCtxt -> MismatchMsg -> SDoc pprMismatchMsg ctxt (BasicMismatch { mismatch_ea = ea , mismatch_item = item , mismatch_ty1 = ty1 -- Expected , mismatch_ty2 = ty2 -- Actual , mismatch_whenMatching = mb_match_txt , mismatch_mb_same_occ = same_occ_info }) = vcat [ addArising (errorItemCtLoc item) msg , ea_extra , maybe empty (pprWhenMatching ctxt) mb_match_txt , maybe empty pprSameOccInfo same_occ_info ] where msg | (isLiftedRuntimeRep ty1 && isUnliftedRuntimeRep ty2) || (isLiftedRuntimeRep ty2 && isUnliftedRuntimeRep ty1) || (isLiftedLevity ty1 && isUnliftedLevity ty2) || (isLiftedLevity ty2 && isUnliftedLevity ty1) = text "Couldn't match a lifted type with an unlifted type" | isAtomicTy ty1 || isAtomicTy ty2 = -- Print with quotes sep [ text herald1 <+> quotes (ppr ty1) , nest padding $ text herald2 <+> quotes (ppr ty2) ] | otherwise = -- Print with vertical layout vcat [ text herald1 <> colon <+> ppr ty1 , nest padding $ text herald2 <> colon <+> ppr ty2 ] herald1 = conc [ "Couldn't match" , if is_repr then "representation of" else "" , if want_ea then "expected" else "" , what ] herald2 = conc [ "with" , if is_repr then "that of" else "" , if want_ea then ("actual " ++ what) else "" ] padding = length herald1 - length herald2 (want_ea, ea_extra) = case ea of NoEA -> (False, empty) EA mb_extra -> (True , maybe empty (pprExpectedActualInfo ctxt) mb_extra) is_repr = case errorItemEqRel item of { ReprEq -> True; NomEq -> False } what = levelString (ctLocTypeOrKind_maybe (errorItemCtLoc item) `orElse` TypeLevel) conc :: [String] -> String conc = foldr1 add_space add_space :: String -> String -> String add_space s1 s2 | null s1 = s2 | null s2 = s1 | otherwise = s1 ++ (' ' : s2) pprMismatchMsg _ (KindMismatch { kmismatch_what = thing , kmismatch_expected = exp , kmismatch_actual = act }) = hang (text "Expected" <+> kind_desc <> comma) 2 (text "but" <+> quotes (ppr thing) <+> text "has kind" <+> quotes (ppr act)) where kind_desc | isConstraintLikeKind exp = text "a constraint" | Just arg <- kindRep_maybe exp -- TYPE t0 , tcIsTyVarTy arg = sdocOption sdocPrintExplicitRuntimeReps $ \case True -> text "kind" <+> quotes (ppr exp) False -> text "a type" | otherwise = text "kind" <+> quotes (ppr exp) pprMismatchMsg ctxt (TypeEqMismatch { teq_mismatch_item = item , teq_mismatch_ty1 = ty1 -- These types are the actual types , teq_mismatch_ty2 = ty2 -- that don't match; may be swapped , teq_mismatch_expected = exp -- These are the context of , teq_mismatch_actual = act -- the mis-match , teq_mismatch_what = mb_thing , teq_mb_same_occ = mb_same_occ }) = addArising ct_loc $ pprWithInvisibleBitsWhen ppr_invis_bits msg $$ maybe empty pprSameOccInfo mb_same_occ where msg | Just (torc, rep) <- sORTKind_maybe exp = msg_for_exp_sort torc rep | Just nargs_msg <- num_args_msg , Right ea_msg <- mk_ea_msg ctxt (Just item) level orig = nargs_msg $$ pprMismatchMsg ctxt ea_msg | ea_looks_same ty1 ty2 exp act , Right ea_msg <- mk_ea_msg ctxt (Just item) level orig = pprMismatchMsg ctxt ea_msg | otherwise = bale_out_msg -- bale_out_msg: the mismatched types are /inside/ exp and act bale_out_msg = vcat errs where errs = case mk_ea_msg ctxt Nothing level orig of Left ea_info -> pprMismatchMsg ctxt mismatch_err : map (pprExpectedActualInfo ctxt) ea_info Right ea_err -> [ pprMismatchMsg ctxt mismatch_err , pprMismatchMsg ctxt ea_err ] mismatch_err = mkBasicMismatchMsg NoEA item ty1 ty2 -- 'expected' is (TYPE rep) or (CONSTRAINT rep) msg_for_exp_sort exp_torc exp_rep | Just (act_torc, act_rep) <- sORTKind_maybe act = -- (TYPE exp_rep) ~ (CONSTRAINT act_rep) etc msg_torc_torc act_torc act_rep | otherwise = -- (TYPE _) ~ Bool, etc maybe_num_args_msg $$ sep [ text "Expected a" <+> ppr_torc exp_torc <> comma , text "but" <+> case mb_thing of Nothing -> text "found something with kind" Just thing -> quotes (ppr thing) <+> text "has kind" , quotes (pprWithTYPE act) ] where msg_torc_torc act_torc act_rep | exp_torc == act_torc = msg_same_torc act_torc act_rep | otherwise = sep [ text "Expected a" <+> ppr_torc exp_torc <> comma , text "but" <+> case mb_thing of Nothing -> text "found a" Just thing -> quotes (ppr thing) <+> text "is a" <+> ppr_torc act_torc ] msg_same_torc act_torc act_rep | Just exp_doc <- describe_rep exp_rep , Just act_doc <- describe_rep act_rep = sep [ text "Expected" <+> exp_doc <+> ppr_torc exp_torc <> comma , text "but" <+> case mb_thing of Just thing -> quotes (ppr thing) <+> text "is" Nothing -> text "got" <+> act_doc <+> ppr_torc act_torc ] msg_same_torc _ _ = bale_out_msg ct_loc = errorItemCtLoc item orig = errorItemOrigin item level = ctLocTypeOrKind_maybe ct_loc `orElse` TypeLevel ppr_invis_bits = shouldPprWithInvisibleBits ty1 ty2 orig num_args_msg = case level of KindLevel | not (isMetaTyVarTy exp) && not (isMetaTyVarTy act) -- if one is a meta-tyvar, then it's possible that the user -- has asked for something impredicative, and we couldn't unify. -- Don't bother with counting arguments. -> let n_act = count_args act n_exp = count_args exp in case n_act - n_exp of n | n > 0 -- we don't know how many args there are, so don't -- recommend removing args that aren't , Just thing <- mb_thing -> Just $ pprTcSolverReportMsg ctxt (ExpectingMoreArguments n thing) _ -> Nothing _ -> Nothing maybe_num_args_msg = num_args_msg `orElse` empty count_args ty = count isVisiblePiTyBinder $ fst $ splitPiTys ty ppr_torc TypeLike = text "type"; ppr_torc ConstraintLike = text "constraint" describe_rep :: RuntimeRepType -> Maybe SDoc -- describe_rep IntRep = Just "an IntRep" -- describe_rep (BoxedRep Lifted) = Just "a lifted" -- etc describe_rep rep | Just (rr_tc, rr_args) <- splitRuntimeRep_maybe rep = case rr_args of [lev_ty] | rr_tc `hasKey` boxedRepDataConKey , Just lev <- levityType_maybe lev_ty -> case lev of Lifted -> Just (text "a lifted") Unlifted -> Just (text "a boxed unlifted") [] | rr_tc `hasKey` tupleRepDataConTyConKey -> Just (text "a zero-bit") | starts_with_vowel rr_occ -> Just (text "an" <+> text rr_occ) | otherwise -> Just (text "a" <+> text rr_occ) where rr_occ = occNameString (getOccName rr_tc) _ -> Nothing -- Must be TupleRep [r1..rn] | otherwise = Nothing starts_with_vowel (c:_) = c `elem` ("AEIOU" :: String) starts_with_vowel [] = False pprMismatchMsg ctxt (CouldNotDeduce useful_givens (item :| others) mb_extra) = main_msg $$ case supplementary of Left infos -> vcat (map (pprExpectedActualInfo ctxt) infos) Right other_msg -> pprMismatchMsg ctxt other_msg where main_msg | null useful_givens = addArising ct_loc (no_instance_msg <+> missing) | otherwise = vcat (addArising ct_loc (no_deduce_msg <+> missing) : pp_givens useful_givens) supplementary = case mb_extra of Nothing -> Left [] Just (CND_Extra level ty1 ty2) -> mk_supplementary_ea_msg ctxt level ty1 ty2 orig ct_loc = errorItemCtLoc item orig = ctLocOrigin ct_loc wanteds = map errorItemPred (item:others) no_instance_msg = case wanteds of [wanted] | Just (tc, _) <- splitTyConApp_maybe wanted -- Don't say "no instance" for a constraint such as "c" for a type variable c. , isClassTyCon tc -> text "No instance for" _ -> text "Could not solve:" no_deduce_msg = case wanteds of [_wanted] -> text "Could not deduce" _ -> text "Could not deduce:" missing = case wanteds of [wanted] -> quotes (ppr wanted) _ -> pprTheta wanteds -- | Whether to print explicit kinds (with @-fprint-explicit-kinds@) -- in an 'SDoc' when a type mismatch occurs to due invisible parts of the types. -- See Note [Showing invisible bits of types in error messages] -- -- This function first checks to see if the 'CtOrigin' argument is a -- 'TypeEqOrigin'. If so, it first checks whether the equality is a visible -- equality; if it's not, definitely print the kinds. Even if the equality is -- a visible equality, check the expected/actual types to see if the types -- have equal visible components. If the 'CtOrigin' is -- not a 'TypeEqOrigin', fall back on the actual mismatched types themselves. shouldPprWithInvisibleBits :: Type -> Type -> CtOrigin -> Bool shouldPprWithInvisibleBits _ty1 _ty2 (TypeEqOrigin { uo_actual = act , uo_expected = exp , uo_visible = vis }) | not vis = True -- See tests T15870, T16204c | otherwise = mayLookIdentical act exp -- See tests T9171, T9144. shouldPprWithInvisibleBits ty1 ty2 _ct = mayLookIdentical ty1 ty2 {- Note [Showing invisible bits of types in error messages] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ It can be terribly confusing to get an error message like (#9171) Couldn't match expected type ‘GetParam Base (GetParam Base Int)’ with actual type ‘GetParam Base (GetParam Base Int)’ The reason may be that the kinds don't match up. Typically you'll get more useful information, but not when it's as a result of ambiguity. To mitigate this, when we find a type or kind mis-match: * See if normally-visible parts of the type would make the two types look different. This check is made by `GHC.Core.TyCo.Compare.mayLookIdentical` * If not, display the types with their normally-visible parts made visible, by setting flags in the `SDocContext": Specifically: - Display kind arguments: sdocPrintExplicitKinds - Don't default away runtime-reps: sdocPrintExplicitRuntimeReps, which controls `GHC.Iface.Type.hideNonStandardTypes` (NB: foralls are always printed by pprType, it turns out.) As a result the above error message would instead be displayed as: Couldn't match expected type ‘GetParam @* @k2 @* Base (GetParam @* @* @k2 Base Int)’ with actual type ‘GetParam @* @k20 @* Base (GetParam @* @* @k20 Base Int)’ Which makes it clearer that the culprit is the mismatch between `k2` and `k20`. Another example of what goes wrong without this: #24553. -} {- ********************************************************************* * * Displaying potential instances * * **********************************************************************-} -- | Directly display the given matching and unifying instances, -- with a header for each: `Matching instances`/`Potentially matching instances`. pprPotentialInstances :: (ClsInst -> SDoc) -> PotentialInstances -> SDoc pprPotentialInstances ppr_inst (PotentialInstances { matches, unifiers }) = vcat [ ppWhen (not $ null matches) $ text "Matching instance" <> plural matches <> colon $$ nest 2 (vcat (map ppr_inst matches)) , ppWhen (not $ null unifiers) $ (text "Potentially matching instance" <> plural unifiers <> colon) $$ nest 2 (vcat (map ppr_inst unifiers)) ] -- | Display a summary of available instances, omitting those involving -- out-of-scope types, in order to explain why we couldn't solve a particular -- constraint, e.g. due to instance overlap or out-of-scope types. -- -- To directly display a collection of matching/unifying instances, -- use 'pprPotentialInstances'. potentialInstancesErrMsg :: PotentialInstances -> SDoc -- See Note [Displaying potential instances] potentialInstancesErrMsg potentials = sdocOption sdocPrintPotentialInstances $ \print_insts -> getPprStyle $ \sty -> potentials_msg_with_options potentials print_insts sty -- | Display a summary of available instances, omitting out-of-scope ones. -- -- Use 'potentialInstancesErrMsg' to automatically set the pretty-printing -- options. potentials_msg_with_options :: PotentialInstances -> Bool -- ^ Whether to print /all/ potential instances -> PprStyle -> SDoc potentials_msg_with_options (PotentialInstances { matches, unifiers }) show_all_potentials sty | null matches && null unifiers = empty | null show_these_matches && null show_these_unifiers = vcat [ not_in_scope_msg empty , flag_hint ] | otherwise = vcat [ pprPotentialInstances pprInstance -- print instance + location info (PotentialInstances { matches = show_these_matches , unifiers = show_these_unifiers }) , overlapping_but_not_more_specific_msg sorted_matches , nest 2 $ vcat [ ppWhen (n_in_scope_hidden > 0) $ text "...plus" <+> speakNOf n_in_scope_hidden (text "other") , ppWhen (not_in_scopes > 0) $ not_in_scope_msg (text "...plus") , flag_hint ] ] where n_show_matches, n_show_unifiers :: Int n_show_matches = 3 n_show_unifiers = 2 (in_scope_matches, not_in_scope_matches) = partition inst_in_scope matches (in_scope_unifiers, not_in_scope_unifiers) = partition inst_in_scope unifiers sorted_matches = sortBy fuzzyClsInstCmp in_scope_matches sorted_unifiers = sortBy fuzzyClsInstCmp in_scope_unifiers (show_these_matches, show_these_unifiers) | show_all_potentials = (sorted_matches, sorted_unifiers) | otherwise = (take n_show_matches sorted_matches ,take n_show_unifiers sorted_unifiers) n_in_scope_hidden = length sorted_matches + length sorted_unifiers - length show_these_matches - length show_these_unifiers -- "in scope" means that all the type constructors -- are lexically in scope; these instances are likely -- to be more useful inst_in_scope :: ClsInst -> Bool inst_in_scope cls_inst = nameSetAll name_in_scope $ orphNamesOfTypes (is_tys cls_inst) name_in_scope name | pretendNameIsInScope name = True -- E.g. (->); see Note [pretendNameIsInScope] in GHC.Builtin.Names | Just mod <- nameModule_maybe name = qual_in_scope (qualName sty mod (nameOccName name)) | otherwise = True qual_in_scope :: QualifyName -> Bool qual_in_scope NameUnqual = True qual_in_scope (NameQual {}) = True qual_in_scope _ = False not_in_scopes :: Int not_in_scopes = length not_in_scope_matches + length not_in_scope_unifiers not_in_scope_msg herald = hang (herald <+> speakNOf not_in_scopes (text "instance") <+> text "involving out-of-scope types") 2 (ppWhen show_all_potentials $ pprPotentialInstances pprInstanceHdr -- only print the header, not the instance location info (PotentialInstances { matches = not_in_scope_matches , unifiers = not_in_scope_unifiers })) flag_hint = ppUnless (show_all_potentials || (equalLength show_these_matches matches && equalLength show_these_unifiers unifiers)) $ text "(use -fprint-potential-instances to see them all)" -- | Compute a message informing the user of any instances that are overlapped -- but were not discarded because the instance overlapping them wasn't -- strictly more specific. overlapping_but_not_more_specific_msg :: [ClsInst] -> SDoc overlapping_but_not_more_specific_msg insts -- Only print one example of "overlapping but not strictly more specific", -- to avoid information overload. | overlap : _ <- overlapping_but_not_more_specific = overlap_header $$ ppr_overlapping overlap | otherwise = empty where overlap_header :: SDoc overlap_header | [_] <- overlapping_but_not_more_specific = text "An overlapping instance can only be chosen when it is strictly more specific." | otherwise = text "Overlapping instances can only be chosen when they are strictly more specific." overlapping_but_not_more_specific :: [(ClsInst, ClsInst)] overlapping_but_not_more_specific = nubOrdBy (comparing (is_dfun . fst)) [ (overlapper, overlappee) | these <- groupBy ((==) `on` is_cls_nm) insts -- Take all pairs of distinct instances... , one:others <- tails these -- if `these = [inst_1, inst_2, ...]` , other <- others -- then we get pairs `(one, other) = (inst_i, inst_j)` with `i < j` -- ... such that one instance in the pair overlaps the other... , let mb_overlapping | hasOverlappingFlag (overlapMode $ is_flag one) || hasOverlappableFlag (overlapMode $ is_flag other) = [(one, other)] | hasOverlappingFlag (overlapMode $ is_flag other) || hasOverlappableFlag (overlapMode $ is_flag one) = [(other, one)] | otherwise = [] , (overlapper, overlappee) <- mb_overlapping -- ... but the overlapper is not more specific than the overlappee. , not (overlapper `more_specific_than` overlappee) ] more_specific_than :: ClsInst -> ClsInst -> Bool is1 `more_specific_than` is2 = isJust (tcMatchTys (is_tys is1) (is_tys is2)) ppr_overlapping :: (ClsInst, ClsInst) -> SDoc ppr_overlapping (overlapper, overlappee) = text "The first instance that follows overlaps the second, but is not more specific than it:" $$ nest 2 (vcat $ map pprInstanceHdr [overlapper, overlappee]) {- Note [Displaying potential instances] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When showing a list of instances for - overlapping instances (show ones that match) - no such instance (show ones that could match) we want to give it a bit of structure. Here's the plan * Say that an instance is "in scope" if all of the type constructors it mentions are lexically in scope. These are the ones most likely to be useful to the programmer. * Show at most n_show in-scope instances, and summarise the rest ("plus N others") * Summarise the not-in-scope instances ("plus 4 not in scope") * Add the flag -fshow-potential-instances which replaces the summary with the full list -} {- ********************************************************************* * * Outputting additional solver report information * * **********************************************************************-} -- | Pretty-print an informational message, to accompany a 'TcSolverReportMsg'. pprExpectedActualInfo :: SolverReportErrCtxt -> ExpectedActualInfo -> SDoc pprExpectedActualInfo _ (ExpectedActual { ea_expected = exp, ea_actual = act }) = vcat [ text "Expected:" <+> ppr exp , text " Actual:" <+> ppr act ] pprExpectedActualInfo _ (ExpectedActualAfterTySynExpansion { ea_expanded_expected = exp , ea_expanded_actual = act } ) = vcat [ text "Type synonyms expanded:" , text "Expected type:" <+> ppr exp , text " Actual type:" <+> ppr act ] pprCoercibleMsg :: CoercibleMsg -> SDoc pprCoercibleMsg (UnknownRoles ty) = note $ "We cannot know what roles the parameters to" <+> quotes (ppr ty) <+> "have;" $$ "we must assume that the role is nominal" pprCoercibleMsg (TyConIsAbstract tc) = note $ "The type constructor" <+> quotes (pprSourceTyCon tc) <+> "is abstract" pprCoercibleMsg (OutOfScopeNewtypeConstructor tc dc) = hang (text "The data constructor" <+> quotes (ppr $ dataConName dc)) 2 (sep [ text "of newtype" <+> quotes (pprSourceTyCon tc) , text "is not in scope" ]) pprWhenMatching :: SolverReportErrCtxt -> WhenMatching -> SDoc pprWhenMatching ctxt (WhenMatching cty1 cty2 sub_o mb_sub_t_or_k) = sdocOption sdocPrintExplicitCoercions $ \printExplicitCoercions -> if printExplicitCoercions || not (cty1 `pickyEqType` cty2) then vcat [ hang (text "When matching" <+> sub_whats) 2 (vcat [ ppr cty1 <+> dcolon <+> ppr (typeKind cty1) , ppr cty2 <+> dcolon <+> ppr (typeKind cty2) ]) , supplementary ] else text "When matching the kind of" <+> quotes (ppr cty1) where sub_t_or_k = mb_sub_t_or_k `orElse` TypeLevel sub_whats = text (levelString sub_t_or_k) <> char 's' supplementary = case mk_supplementary_ea_msg ctxt sub_t_or_k cty1 cty2 sub_o of Left infos -> vcat $ map (pprExpectedActualInfo ctxt) infos Right msg -> pprMismatchMsg ctxt msg pprTyVarInfo :: SolverReportErrCtxt -> TyVarInfo -> SDoc pprTyVarInfo ctxt (TyVarInfo { thisTyVar = tv1, otherTy = mb_tv2, thisTyVarIsUntouchable = mb_implic }) = vcat [ mk_msg tv1 , maybe empty (pprUntouchableVariable tv1) mb_implic , case mb_tv2 of { Nothing -> empty; Just tv2 -> mk_msg tv2 } ] where mk_msg tv = case tcTyVarDetails tv of SkolemTv sk_info _ _ -> pprSkols ctxt [(getSkolemInfo sk_info, [tv])] RuntimeUnk {} -> quotes (ppr tv) <+> text "is an interactive-debugger skolem" MetaTv {} -> empty pprAmbiguityInfo :: AmbiguityInfo -> SDoc pprAmbiguityInfo (Ambiguity prepend_msg (ambig_kvs, ambig_tvs)) = msg where msg | any isRuntimeUnkSkol ambig_kvs -- See Note [Runtime skolems] || any isRuntimeUnkSkol ambig_tvs = vcat [ text "Cannot resolve unknown runtime type" <> plural ambig_tvs <+> pprQuotedList ambig_tvs , text "Use :print or :force to determine these types"] | not (null ambig_tvs) = pp_ambig (text "type") ambig_tvs | otherwise = pp_ambig (text "kind") ambig_kvs pp_ambig what tkvs | prepend_msg -- "Ambiguous type variable 't0'" = text "Ambiguous" <+> what <+> text "variable" <> plural tkvs <+> pprQuotedList tkvs | otherwise -- "The type variable 't0' is ambiguous" = text "The" <+> what <+> text "variable" <> plural tkvs <+> pprQuotedList tkvs <+> isOrAre tkvs <+> text "ambiguous" pprAmbiguityInfo (NonInjectiveTyFam tc) = note $ quotes (ppr tc) <+> text "is a non-injective type family" pprSameOccInfo :: SameOccInfo -> SDoc pprSameOccInfo (SameOcc same_pkg n1 n2) = note (ppr_from same_pkg n1 $$ ppr_from same_pkg n2) where ppr_from same_pkg nm | isGoodSrcSpan loc = hang (quotes (ppr nm) <+> text "is defined at") 2 (ppr loc) | otherwise -- Imported things have an UnhelpfulSrcSpan = hang (quotes (ppr nm)) 2 (sep [ text "is defined in" <+> quotes (ppr (moduleName mod)) , ppUnless (same_pkg || pkg == mainUnit) $ nest 4 $ text "in package" <+> quotes (ppr pkg) ]) where pkg = moduleUnit mod mod = nameModule nm loc = nameSrcSpan nm {- ********************************************************************* * * Outputting HoleError messages * * **********************************************************************-} pprHoleError :: SolverReportErrCtxt -> Hole -> HoleError -> SDoc pprHoleError _ (Hole { hole_ty, hole_occ = rdr }) (OutOfScopeHole imp_errs _hints) = out_of_scope_msg $$ vcat (map ppr imp_errs) where herald | isDataOcc (rdrNameOcc rdr) = text "Data constructor not in scope:" | otherwise = text "Variable not in scope:" out_of_scope_msg -- Print v :: ty only if the type has structure | boring_type = hang herald 2 (ppr rdr) | otherwise = hang herald 2 (pp_rdr_with_type rdr hole_ty) boring_type = isTyVarTy hole_ty pprHoleError ctxt (Hole { hole_ty, hole_occ}) (HoleError sort other_tvs hole_skol_info) = vcat [ hole_msg , tyvars_msg , case sort of { ExprHole {} -> expr_hole_hint; _ -> type_hole_hint } ] where hole_msg = case sort of ExprHole {} -> hang (text "Found hole:") 2 (pp_rdr_with_type hole_occ hole_ty) TypeHole -> hang (text "Found type wildcard" <+> quotes (ppr hole_occ)) 2 (text "standing for" <+> quotes pp_hole_type_with_kind) ConstraintHole -> hang (text "Found extra-constraints wildcard standing for") 2 (quotes $ pprType hole_ty) -- always kind Constraint hole_kind = typeKind hole_ty pp_hole_type_with_kind | isLiftedTypeKind hole_kind || isCoVarType hole_ty -- Don't print the kind of unlifted -- equalities (#15039) = pprType hole_ty | otherwise = pprType hole_ty <+> dcolon <+> pprKind hole_kind tyvars = tyCoVarsOfTypeList hole_ty tyvars_msg = ppUnless (null tyvars) $ text "Where:" <+> (vcat (map loc_msg other_tvs) $$ pprSkols ctxt hole_skol_info) -- Coercion variables can be free in the -- hole, via kind casts expr_hole_hint -- Give hint for, say, f x = _x | lengthFS (occNameFS (rdrNameOcc hole_occ)) > 1 -- Don't give this hint for plain "_" = text "Or perhaps" <+> quotes (ppr hole_occ) <+> text "is mis-spelled, or not in scope" | otherwise = empty type_hole_hint | ErrorWithoutFlag <- cec_type_holes ctxt = text "To use the inferred type, enable PartialTypeSignatures" | otherwise = empty loc_msg tv | isTyVar tv = case tcTyVarDetails tv of MetaTv {} -> quotes (ppr tv) <+> text "is an ambiguous type variable" _ -> empty -- Skolems dealt with already | otherwise -- A coercion variable can be free in the hole type = ppWhenOption sdocPrintExplicitCoercions $ quotes (ppr tv) <+> text "is a coercion variable" pp_rdr_with_type :: RdrName -> Type -> SDoc pp_rdr_with_type occ hole_ty = hang (pprPrefixOcc occ) 2 (dcolon <+> pprType hole_ty) {- ********************************************************************* * * Outputting ScopeError messages * * **********************************************************************-} pprScopeError :: RdrName -> NotInScopeError -> SDoc pprScopeError rdr_name scope_err = case scope_err of NotInScope -> hang (text "Not in scope:") 2 (what <+> quotes (ppr rdr_name)) NotARecordField -> hang (text "Not in scope:") 2 (text "record field" <+> quotes (ppr rdr_name)) NoExactName name -> text "The Name" <+> quotes (ppr name) <+> text "is not in scope." SameName gres -> assertPpr (length gres >= 2) (text "pprScopeError SameName: fewer than 2 elements" $$ nest 2 (ppr gres)) $ hang (text "Same Name in multiple name-spaces:") 2 (vcat (map pp_one sorted_names)) where sorted_names = sortBy (leftmost_smallest `on` nameSrcSpan) $ map greName gres pp_one name = hang (pprNameSpace (occNameSpace (getOccName name)) <+> quotes (ppr name) <> comma) 2 (text "declared at:" <+> ppr (nameSrcLoc name)) MissingBinding thing _ -> sep [ text "The" <+> thing <+> text "for" <+> quotes (ppr rdr_name) , nest 2 $ text "lacks an accompanying binding" ] NoTopLevelBinding -> hang (text "No top-level binding for") 2 (what <+> quotes (ppr rdr_name) <+> text "in this module") UnknownSubordinate doc -> quotes (ppr rdr_name) <+> text "is not a (visible)" <+> doc NotInScopeTc env -> vcat[text "GHC internal error:" <+> quotes (ppr rdr_name) <+> text "is not in scope during type checking, but it passed the renamer", text "tcl_env of environment:" <+> ppr env] where what = pprNonVarNameSpace (occNameSpace (rdrNameOcc rdr_name)) scopeErrorHints :: NotInScopeError -> [GhcHint] scopeErrorHints scope_err = case scope_err of NotInScope -> noHints NotARecordField -> noHints NoExactName {} -> [SuggestDumpSlices] SameName {} -> [SuggestDumpSlices] MissingBinding _ hints -> hints NoTopLevelBinding -> noHints UnknownSubordinate {} -> noHints NotInScopeTc _ -> noHints tcSolverReportMsgHints :: SolverReportErrCtxt -> TcSolverReportMsg -> [GhcHint] tcSolverReportMsgHints ctxt = \case BadTelescope {} -> noHints UserTypeError {} -> noHints UnsatisfiableError {} -> noHints ReportHoleError hole err -> holeErrorHints hole err CannotUnifyVariable mismatch_msg rea -> mismatchMsgHints ctxt mismatch_msg ++ cannotUnifyVariableHints rea Mismatch { mismatchMsg = mismatch_msg } -> mismatchMsgHints ctxt mismatch_msg FixedRuntimeRepError {} -> noHints BlockedEquality {} -> noHints ExpectingMoreArguments {} -> noHints UnboundImplicitParams {} -> noHints AmbiguityPreventsSolvingCt {} -> noHints CannotResolveInstance {} -> noHints OverlappingInstances {} -> noHints UnsafeOverlap {} -> noHints mismatchMsgHints :: SolverReportErrCtxt -> MismatchMsg -> [GhcHint] mismatchMsgHints ctxt msg = maybeToList [ hint | (exp,act) <- mismatchMsg_ExpectedActuals msg , hint <- suggestAddSig ctxt exp act ] mismatchMsg_ExpectedActuals :: MismatchMsg -> Maybe (Type, Type) mismatchMsg_ExpectedActuals = \case BasicMismatch { mismatch_ty1 = exp, mismatch_ty2 = act } -> Just (exp, act) KindMismatch { kmismatch_expected = exp, kmismatch_actual = act } -> Just (exp, act) TypeEqMismatch { teq_mismatch_expected = exp, teq_mismatch_actual = act } -> Just (exp,act) CouldNotDeduce { cnd_extra = cnd_extra } | Just (CND_Extra _ exp act) <- cnd_extra -> Just (exp, act) | otherwise -> Nothing holeErrorHints :: Hole -> HoleError -> [GhcHint] holeErrorHints _hole = \case OutOfScopeHole _ hints -> hints HoleError {} -> noHints cannotUnifyVariableHints :: CannotUnifyVariableReason -> [GhcHint] cannotUnifyVariableHints = \case CannotUnifyWithPolytype {} -> noHints OccursCheck {} -> noHints SkolemEscape {} -> noHints DifferentTyVars {} -> noHints RepresentationalEq {} -> noHints suggestAddSig :: SolverReportErrCtxt -> TcType -> TcType -> Maybe GhcHint -- See Note [Suggest adding a type signature] suggestAddSig ctxt ty1 _ty2 | bndr : bndrs <- inferred_bndrs = Just $ SuggestAddTypeSignatures $ NamedBindings (bndr :| bndrs) | otherwise = Nothing where inferred_bndrs = case getTyVar_maybe ty1 of Just tv | isSkolemTyVar tv -> find (cec_encl ctxt) False tv _ -> [] -- 'find' returns the binders of an InferSkol for 'tv', -- provided there is an intervening implication with -- ic_given_eqs /= NoGivenEqs (i.e. a GADT match) find [] _ _ = [] find (implic:implics) seen_eqs tv | tv `elem` ic_skols implic , InferSkol prs <- ic_info implic , seen_eqs = map fst prs | otherwise = find implics (seen_eqs || ic_given_eqs implic /= NoGivenEqs) tv {- Note [Suggest adding a type signature] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The OutsideIn algorithm rejects GADT programs that don't have a principal type, and indeed some that do. Example: data T a where MkT :: Int -> T Int f (MkT n) = n Does this have type f :: T a -> a, or f :: T a -> Int? The error that shows up tends to be an attempt to unify an untouchable type variable. So suggestAddSig sees if the offending type variable is bound by an *inferred* signature, and suggests adding a declared signature instead. More specifically, we suggest adding a type sig if we have p ~ ty, and p is a skolem bound by an InferSkol. Those skolems were created from unification variables in simplifyInfer. Why didn't we unify? It must have been because of an intervening GADT or existential, making it untouchable. Either way, a type signature would help. For GADTs, it might make it typeable; for existentials the attempt to write a signature will fail -- or at least will produce a better error message next time This initially came up in #8968, concerning pattern synonyms. -} {- ********************************************************************* * * Outputting ImportError messages * * **********************************************************************-} instance Outputable ImportError where ppr err = note $ case err of MissingModule mod_name -> "No module named" <+> quoted mod_name <+> "is imported" ModulesDoNotExport mods occ_name | mod NE.:| [] <- mods -> "The module" <+> quoted mod <+> "does not export" <+> quoted occ_name | otherwise -> "Neither" <+> quotedListWithNor (map ppr $ NE.toList mods) <+> "export" <+> quoted occ_name where quoted :: Outputable a => a -> SDoc quoted = quotes . ppr {- ********************************************************************* * * Suggested fixes for implication constraints * * **********************************************************************-} -- TODO: these functions should use GhcHint instead. show_fixes :: [SDoc] -> SDoc show_fixes [] = empty show_fixes (f:fs) = sep [ text "Possible fix:" , nest 2 (vcat (f : map (text "or" <+>) fs))] ctxtFixes :: Bool -> PredType -> [Implication] -> [SDoc] ctxtFixes has_ambig_tvs pred implics | not has_ambig_tvs , isTyVarClassPred pred -- Don't suggest adding (Eq T) to the context, say , (skol:skols) <- usefulContext implics pred , let what | null skols , SigSkol (PatSynCtxt {}) _ _ <- skol = text "\"required\"" | otherwise = empty = [sep [ text "add" <+> pprParendType pred <+> text "to the" <+> what <+> text "context of" , nest 2 $ ppr_skol skol $$ vcat [ text "or" <+> ppr_skol skol | skol <- skols ] ] ] | otherwise = [] where ppr_skol (PatSkol (RealDataCon dc) _) = text "the data constructor" <+> quotes (ppr dc) ppr_skol (PatSkol (PatSynCon ps) _) = text "the pattern synonym" <+> quotes (ppr ps) ppr_skol skol_info = ppr skol_info usefulContext :: [Implication] -> PredType -> [SkolemInfoAnon] -- usefulContext picks out the implications whose context -- the programmer might plausibly augment to solve 'pred' usefulContext implics pred = go implics where pred_tvs = tyCoVarsOfType pred go [] = [] go (ic : ics) | implausible ic = rest | otherwise = ic_info ic : rest where -- Stop when the context binds a variable free in the predicate rest | any (`elemVarSet` pred_tvs) (ic_skols ic) = [] | otherwise = go ics implausible ic | null (ic_skols ic) = True | implausible_info (ic_info ic) = True | otherwise = False implausible_info (SigSkol (InfSigCtxt {}) _ _) = True implausible_info _ = False -- Do not suggest adding constraints to an *inferred* type signature pp_givens :: [Implication] -> [SDoc] pp_givens givens = case givens of [] -> [] (g:gs) -> ppr_given (text "from the context:") g : map (ppr_given (text "or from:")) gs where ppr_given herald implic@(Implic { ic_given = gs, ic_info = skol_info }) = hang (herald <+> pprEvVarTheta (mkMinimalBySCs evVarPred gs)) -- See Note [Suppress redundant givens during error reporting] -- for why we use mkMinimalBySCs above. 2 (sep [ text "bound by" <+> ppr skol_info , text "at" <+> ppr (getCtLocEnvLoc (ic_env implic)) ]) {- ********************************************************************* * * CtOrigin information * * **********************************************************************-} levelString :: TypeOrKind -> String levelString TypeLevel = "type" levelString KindLevel = "kind" pprArising :: CtLoc -> SDoc -- Used for the main, top-level error message -- We've done special processing for TypeEq, KindEq, givens pprArising ct_loc | in_generated_code = empty -- See Note ["Arising from" messages in generated code] | suppress_origin = empty | otherwise = pprCtOrigin orig where orig = ctLocOrigin ct_loc in_generated_code = ctLocEnvInGeneratedCode (ctLocEnv ct_loc) suppress_origin | isGivenOrigin orig = True | otherwise = case orig of TypeEqOrigin {} -> True -- We've done special processing KindEqOrigin {} -> True -- for TypeEq, KindEq, givens AmbiguityCheckOrigin {} -> True -- The "In the ambiguity check" context -- is sufficient; more would be repetitive _ -> False -- Add the "arising from..." part to a message addArising :: CtLoc -> SDoc -> SDoc addArising ct_loc msg = hang msg 2 (pprArising ct_loc) pprWithArising :: [Ct] -> SDoc -- Print something like -- (Eq a) arising from a use of x at y -- (Show a) arising from a use of p at q -- Also return a location for the error message -- Works for Wanted/Derived only pprWithArising [] = panic "pprWithArising" pprWithArising (ct:cts) | null cts = addArising loc (pprTheta [ctPred ct]) | otherwise = vcat (map ppr_one (ct:cts)) where loc = ctLoc ct ppr_one ct' = hang (parens (pprType (ctPred ct'))) 2 (pprCtLoc (ctLoc ct')) {- Note ["Arising from" messages in generated code] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider code generated when we desugar code before typechecking; see Note [Rebindable syntax and XXExprGhcRn]. In this code, constraints may be generated, but we don't want to say "arising from a call of foo" if 'foo' doesn't appear in the users code. We leave the actual CtOrigin untouched (partly because it is generated in many, many places), but suppress the "Arising from" message for constraints that originate in generated code. -} {- ********************************************************************* * * SkolemInfo * * **********************************************************************-} tidySkolemInfo :: TidyEnv -> SkolemInfo -> SkolemInfo tidySkolemInfo env (SkolemInfo u sk_anon) = SkolemInfo u (tidySkolemInfoAnon env sk_anon) ---------------- tidySkolemInfoAnon :: TidyEnv -> SkolemInfoAnon -> SkolemInfoAnon tidySkolemInfoAnon env (DerivSkol ty) = DerivSkol (tidyType env ty) tidySkolemInfoAnon env (SigSkol cx ty tv_prs) = tidySigSkol env cx ty tv_prs tidySkolemInfoAnon env (InferSkol ids) = InferSkol (mapSnd (tidyType env) ids) tidySkolemInfoAnon env (UnifyForAllSkol ty) = UnifyForAllSkol (tidyType env ty) tidySkolemInfoAnon _ info = info tidySigSkol :: TidyEnv -> UserTypeCtxt -> TcType -> [(Name,TcTyVar)] -> SkolemInfoAnon -- We need to take special care when tidying SigSkol -- See Note [SigSkol SkolemInfo] in "GHC.Tc.Types.Origin" tidySigSkol env cx ty tv_prs = SigSkol cx (tidy_ty env ty) tv_prs' where tv_prs' = mapSnd (tidyTyCoVarOcc env) tv_prs inst_env = mkNameEnv tv_prs' tidy_ty env (ForAllTy (Bndr tv vis) ty) = ForAllTy (Bndr tv' vis) (tidy_ty env' ty) where (env', tv') = tidy_tv_bndr env tv tidy_ty env ty@(FunTy { ft_mult = w, ft_arg = arg, ft_res = res }) = -- Look under c => t and t1 -> t2 ty { ft_mult = tidy_ty env w , ft_arg = tidyType env arg , ft_res = tidy_ty env res } tidy_ty env ty = tidyType env ty tidy_tv_bndr :: TidyEnv -> TyCoVar -> (TidyEnv, TyCoVar) tidy_tv_bndr env@(occ_env, subst) tv | Just tv' <- lookupNameEnv inst_env (tyVarName tv) = ((occ_env, extendVarEnv subst tv tv'), tv') | otherwise = tidyVarBndr env tv pprSkols :: SolverReportErrCtxt -> [(SkolemInfoAnon, [TcTyVar])] -> SDoc pprSkols ctxt zonked_ty_vars = let tidy_ty_vars = map (bimap (tidySkolemInfoAnon (cec_tidy ctxt)) id) zonked_ty_vars in vcat (map pp_one tidy_ty_vars) where no_msg = text "No skolem info - we could not find the origin of the following variables" <+> ppr zonked_ty_vars $$ text "This should not happen, please report it as a bug following the instructions at:" $$ text "https://gitlab.haskell.org/ghc/ghc/wikis/report-a-bug" pp_one (UnkSkol cs, tvs) = vcat [ hang (pprQuotedList tvs) 2 (is_or_are tvs "a" "(rigid, skolem)") , nest 2 (text "of unknown origin") , nest 2 (text "bound at" <+> ppr (skolsSpan tvs)) , no_msg , prettyCallStackDoc cs ] pp_one (RuntimeUnkSkol, tvs) = hang (pprQuotedList tvs) 2 (is_or_are tvs "an" "unknown runtime") pp_one (skol_info, tvs) = vcat [ hang (pprQuotedList tvs) 2 (is_or_are tvs "a" "rigid" <+> text "bound by") , nest 2 (pprSkolInfo skol_info) , nest 2 (text "at" <+> ppr (skolsSpan tvs)) ] is_or_are [_] article adjective = text "is" <+> text article <+> text adjective <+> text "type variable" is_or_are _ _ adjective = text "are" <+> text adjective <+> text "type variables" skolsSpan :: [TcTyVar] -> SrcSpan skolsSpan skol_tvs = foldr1 combineSrcSpans (map getSrcSpan skol_tvs) {- ********************************************************************* * * Utilities for expected/actual messages * * **********************************************************************-} mk_supplementary_ea_msg :: SolverReportErrCtxt -> TypeOrKind -> Type -> Type -> CtOrigin -> Either [ExpectedActualInfo] MismatchMsg mk_supplementary_ea_msg ctxt level ty1 ty2 orig | TypeEqOrigin { uo_expected = exp, uo_actual = act } <- orig , not (ea_looks_same ty1 ty2 exp act) = mk_ea_msg ctxt Nothing level orig | otherwise = Left [] ea_looks_same :: Type -> Type -> Type -> Type -> Bool -- True if the faulting types (ty1, ty2) look the same as -- the expected/actual types (exp, act). -- If so, we don't want to redundantly report the latter ea_looks_same ty1 ty2 exp act = (act `looks_same` ty1 && exp `looks_same` ty2) || (exp `looks_same` ty1 && act `looks_same` ty2) where looks_same t1 t2 = t1 `pickyEqType` t2 || t1 `eqType` liftedTypeKind && t2 `eqType` liftedTypeKind -- pickyEqType is sensitive to synonyms, so only replies True -- when the types really look the same. However, -- (TYPE 'LiftedRep) and Type both print the same way. mk_ea_msg :: SolverReportErrCtxt -> Maybe ErrorItem -> TypeOrKind -> CtOrigin -> Either [ExpectedActualInfo] MismatchMsg -- Constructs a "Couldn't match" message -- The (Maybe ErrorItem) says whether this is the main top-level message (Just) -- or a supplementary message (Nothing) mk_ea_msg ctxt at_top level (TypeEqOrigin { uo_actual = act, uo_expected = exp, uo_thing = mb_thing }) | Just thing <- mb_thing , KindLevel <- level = Right $ KindMismatch { kmismatch_what = thing , kmismatch_expected = exp , kmismatch_actual = act } | Just item <- at_top , let ea = EA $ if expanded_syns then Just ea_expanded else Nothing mismatch = mkBasicMismatchMsg ea item exp act = Right mismatch | otherwise = Left $ if expanded_syns then [ea,ea_expanded] else [ea] where ea = ExpectedActual { ea_expected = exp, ea_actual = act } ea_expanded = ExpectedActualAfterTySynExpansion { ea_expanded_expected = expTy1 , ea_expanded_actual = expTy2 } expanded_syns = cec_expand_syns ctxt && not (expTy1 `pickyEqType` exp && expTy2 `pickyEqType` act) (expTy1, expTy2) = expandSynonymsToMatch exp act mk_ea_msg _ _ _ _ = Left [] {- Note [Expanding type synonyms to make types similar] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In type error messages, if -fprint-expanded-types is used, we want to expand type synonyms to make expected and found types as similar as possible, but we shouldn't expand types too much to make type messages even more verbose and harder to understand. The whole point here is to make the difference in expected and found types clearer. `expandSynonymsToMatch` does this, it takes two types, and expands type synonyms only as much as necessary. Given two types t1 and t2: * If they're already same, it just returns the types. * If they're in form `C1 t1_1 .. t1_n` and `C2 t2_1 .. t2_m` (C1 and C2 are type constructors), it expands C1 and C2 if they're different type synonyms. Then it recursively does the same thing on expanded types. If C1 and C2 are same, then it applies the same procedure to arguments of C1 and arguments of C2 to make them as similar as possible. Most important thing here is to keep number of synonym expansions at minimum. For example, if t1 is `T (T3, T5, Int)` and t2 is `T (T5, T3, Bool)` where T5 = T4, T4 = T3, ..., T1 = X, it returns `T (T3, T3, Int)` and `T (T3, T3, Bool)`. * Otherwise types don't have same shapes and so the difference is clearly visible. It doesn't do any expansions and show these types. Note that we only expand top-layer type synonyms. Only when top-layer constructors are the same we start expanding inner type synonyms. Suppose top-layer type synonyms of t1 and t2 can expand N and M times, respectively. If their type-synonym-expanded forms will meet at some point (i.e. will have same shapes according to `sameShapes` function), it's possible to find where they meet in O(N+M) top-layer type synonym expansions and O(min(N,M)) comparisons. We first collect all the top-layer expansions of t1 and t2 in two lists, then drop the prefix of the longer list so that they have same lengths. Then we search through both lists in parallel, and return the first pair of types that have same shapes. Inner types of these two types with same shapes are then expanded using the same algorithm. In case they don't meet, we return the last pair of types in the lists, which has top-layer type synonyms completely expanded. (in this case the inner types are not expanded at all, as the current form already shows the type error) -} -- | Expand type synonyms in given types only enough to make them as similar as -- possible. Returned types are the same in terms of used type synonyms. -- -- To expand all synonyms, see 'Type.expandTypeSynonyms'. -- -- See `ExpandSynsFail` tests in tests testsuite/tests/typecheck/should_fail for -- some examples of how this should work. expandSynonymsToMatch :: Type -> Type -> (Type, Type) expandSynonymsToMatch ty1 ty2 = (ty1_ret, ty2_ret) where (ty1_ret, ty2_ret) = go ty1 ty2 -- Returns (type synonym expanded version of first type, -- type synonym expanded version of second type) go :: Type -> Type -> (Type, Type) go t1 t2 | t1 `pickyEqType` t2 = -- Types are same, nothing to do (t1, t2) go (TyConApp tc1 tys1) (TyConApp tc2 tys2) | tc1 == tc2 , tys1 `equalLength` tys2 = -- Type constructors are same. They may be synonyms, but we don't -- expand further. The lengths of tys1 and tys2 must be equal; -- for example, with type S a = a, we don't want -- to zip (S Monad Int) and (S Bool). let (tys1', tys2') = unzip (zipWithEqual "expandSynonymsToMatch" go tys1 tys2) in (TyConApp tc1 tys1', TyConApp tc2 tys2') go (AppTy t1_1 t1_2) (AppTy t2_1 t2_2) = let (t1_1', t2_1') = go t1_1 t2_1 (t1_2', t2_2') = go t1_2 t2_2 in (mkAppTy t1_1' t1_2', mkAppTy t2_1' t2_2') go ty1@(FunTy _ w1 t1_1 t1_2) ty2@(FunTy _ w2 t2_1 t2_2) | w1 `eqType` w2 = let (t1_1', t2_1') = go t1_1 t2_1 (t1_2', t2_2') = go t1_2 t2_2 in ( ty1 { ft_arg = t1_1', ft_res = t1_2' } , ty2 { ft_arg = t2_1', ft_res = t2_2' }) go (ForAllTy b1 t1) (ForAllTy b2 t2) = -- NOTE: We may have a bug here, but we just can't reproduce it easily. -- See D1016 comments for details and our attempts at producing a test -- case. Short version: We probably need RnEnv2 to really get this right. let (t1', t2') = go t1 t2 in (ForAllTy b1 t1', ForAllTy b2 t2') go (CastTy ty1 _) ty2 = go ty1 ty2 go ty1 (CastTy ty2 _) = go ty1 ty2 go t1 t2 = -- See Note [Expanding type synonyms to make types similar] for how this -- works let t1_exp_tys = t1 : tyExpansions t1 t2_exp_tys = t2 : tyExpansions t2 t1_exps = length t1_exp_tys t2_exps = length t2_exp_tys dif = abs (t1_exps - t2_exps) in followExpansions $ zipEqual "expandSynonymsToMatch.go" (if t1_exps > t2_exps then drop dif t1_exp_tys else t1_exp_tys) (if t2_exps > t1_exps then drop dif t2_exp_tys else t2_exp_tys) -- Expand the top layer type synonyms repeatedly, collect expansions in a -- list. The list does not include the original type. -- -- Example, if you have: -- -- type T10 = T9 -- type T9 = T8 -- ... -- type T0 = Int -- -- `tyExpansions T10` returns [T9, T8, T7, ..., Int] -- -- This only expands the top layer, so if you have: -- -- type M a = Maybe a -- -- `tyExpansions (M T10)` returns [Maybe T10] (T10 is not expanded) tyExpansions :: Type -> [Type] tyExpansions = unfoldr (\t -> (\x -> (x, x)) `fmap` coreView t) -- Drop the type pairs until types in a pair look alike (i.e. the outer -- constructors are the same). followExpansions :: [(Type, Type)] -> (Type, Type) followExpansions [] = pprPanic "followExpansions" empty followExpansions [(t1, t2)] | sameShapes t1 t2 = go t1 t2 -- expand subtrees | otherwise = (t1, t2) -- the difference is already visible followExpansions ((t1, t2) : tss) -- Traverse subtrees when the outer shapes are the same | sameShapes t1 t2 = go t1 t2 -- Otherwise follow the expansions until they look alike | otherwise = followExpansions tss sameShapes :: Type -> Type -> Bool sameShapes AppTy{} AppTy{} = True sameShapes (TyConApp tc1 _) (TyConApp tc2 _) = tc1 == tc2 sameShapes (FunTy {}) (FunTy {}) = True sameShapes (ForAllTy {}) (ForAllTy {}) = True sameShapes (CastTy ty1 _) ty2 = sameShapes ty1 ty2 sameShapes ty1 (CastTy ty2 _) = sameShapes ty1 ty2 sameShapes _ _ = False {- ************************************************************************ * * \subsection{Contexts for renaming errors} * * ************************************************************************ -} inHsDocContext :: HsDocContext -> SDoc inHsDocContext ctxt = text "In" <+> pprHsDocContext ctxt pprHsDocContext :: HsDocContext -> SDoc pprHsDocContext (GenericCtx doc) = doc pprHsDocContext (TypeSigCtx doc) = text "the type signature for" <+> doc pprHsDocContext (StandaloneKindSigCtx doc) = text "the standalone kind signature for" <+> doc pprHsDocContext PatCtx = text "a pattern type-signature" pprHsDocContext SpecInstSigCtx = text "a SPECIALISE instance pragma" pprHsDocContext DefaultDeclCtx = text "a `default' declaration" pprHsDocContext DerivDeclCtx = text "a deriving declaration" pprHsDocContext (RuleCtx name) = text "the rewrite rule" <+> doubleQuotes (ftext name) pprHsDocContext (TyDataCtx tycon) = text "the data type declaration for" <+> quotes (ppr tycon) pprHsDocContext (FamPatCtx tycon) = text "a type pattern of family instance for" <+> quotes (ppr tycon) pprHsDocContext (TySynCtx name) = text "the declaration for type synonym" <+> quotes (ppr name) pprHsDocContext (TyFamilyCtx name) = text "the declaration for type family" <+> quotes (ppr name) pprHsDocContext (ClassDeclCtx name) = text "the declaration for class" <+> quotes (ppr name) pprHsDocContext ExprWithTySigCtx = text "an expression type signature" pprHsDocContext TypBrCtx = text "a Template-Haskell quoted type" pprHsDocContext HsTypeCtx = text "a type argument" pprHsDocContext HsTypePatCtx = text "a type argument in a pattern" pprHsDocContext GHCiCtx = text "GHCi input" pprHsDocContext (SpliceTypeCtx hs_ty) = text "the spliced type" <+> quotes (ppr hs_ty) pprHsDocContext ClassInstanceCtx = text "GHC.Tc.Gen.Splice.reifyInstances" pprHsDocContext (ForeignDeclCtx name) = text "the foreign declaration for" <+> quotes (ppr name) pprHsDocContext (ConDeclCtx [name]) = text "the definition of data constructor" <+> quotes (ppr name) pprHsDocContext (ConDeclCtx names) = text "the definition of data constructors" <+> interpp'SP names pprConversionFailReason :: ConversionFailReason -> SDoc pprConversionFailReason = \case IllegalOccName ctxt_ns occ -> text "Illegal" <+> pprNameSpace ctxt_ns <+> text "name:" <+> quotes (text occ) SumAltArityExceeded alt arity -> text "Sum alternative" <+> int alt <+> text "exceeds its arity," <+> int arity IllegalSumAlt alt -> vcat [ text "Illegal sum alternative:" <+> int alt , nest 2 $ text "Sum alternatives must start from 1" ] IllegalSumArity arity -> vcat [ text "Illegal sum arity:" <+> int arity , nest 2 $ text "Sums must have an arity of at least 2" ] MalformedType typeOrKind ty -> text "Malformed " <> text ty_str <+> text (show ty) where ty_str = case typeOrKind of TypeLevel -> "type" KindLevel -> "kind" IllegalLastStatement do_or_lc stmt -> vcat [ text "Illegal last statement of" <+> pprAHsDoFlavour do_or_lc <> colon , nest 2 $ ppr stmt , text "(It should be an expression.)" ] KindSigsOnlyAllowedOnGADTs -> text "Kind signatures are only allowed on GADTs" IllegalDeclaration declDescr bad_decls -> sep [ text "Illegal" <+> what <+> text "in" <+> descrDoc <> colon , nest 2 bads ] where (what, bads) = case bad_decls of IllegalDecls (NE.toList -> decls) -> (text "declaration" <> plural decls, vcat $ map ppr decls) IllegalFamDecls (NE.toList -> decls) -> ( text "family declaration" <> plural decls, vcat $ map ppr decls) descrDoc = text $ case declDescr of InstanceDecl -> "an instance declaration" WhereClause -> "a where clause" LetBinding -> "a let expression" LetExpression -> "a let expression" ClssDecl -> "a class declaration" CannotMixGADTConsWith98Cons -> text "Cannot mix GADT constructors with Haskell 98" <+> text "constructors" EmptyStmtListInDoBlock -> text "Empty stmt list in do-block" NonVarInInfixExpr -> text "Non-variable expression is not allowed in an infix expression" MultiWayIfWithoutAlts -> text "Multi-way if-expression with no alternatives" CasesExprWithoutAlts -> text "\\cases expression with no alternatives" ImplicitParamsWithOtherBinds -> text "Implicit parameters mixed with other bindings" InvalidCCallImpent from -> text (show from) <+> text "is not a valid ccall impent" RecGadtNoCons -> quotes (text "RecGadtC") <+> text "must have at least one constructor name" GadtNoCons -> quotes (text "GadtC") <+> text "must have at least one constructor name" InvalidTypeInstanceHeader tys -> text "Invalid type instance header:" <+> text (show tys) InvalidTyFamInstLHS lhs -> text "Invalid type family instance LHS:" <+> text (show lhs) InvalidImplicitParamBinding -> text "Implicit parameter binding only allowed in let or where" DefaultDataInstDecl adts -> (text "Default data instance declarations" <+> text "are not allowed:") $$ ppr adts FunBindLacksEquations nm -> text "Function binding for" <+> quotes (text (TH.pprint nm)) <+> text "has no equations" pprTyThingUsedWrong :: WrongThingSort -> TcTyThing -> Name -> SDoc pprTyThingUsedWrong sort thing name = pprTcTyThingCategory thing <+> quotes (ppr name) <+> text "used as a" <+> pprWrongThingSort sort pprWrongThingSort :: WrongThingSort -> SDoc pprWrongThingSort = text . \case WrongThingType -> "type" WrongThingDataCon -> "data constructor" WrongThingPatSyn -> "pattern synonym" WrongThingConLike -> "constructor-like thing" WrongThingClass -> "class" WrongThingTyCon -> "type constructor" WrongThingAxiom -> "axiom" pprStageCheckReason :: StageCheckReason -> SDoc pprStageCheckReason = \case StageCheckInstance _ t -> text "instance for" <+> quotes (ppr t) StageCheckSplice t -> quotes (ppr t) pprUninferrableTyVarCtx :: UninferrableTyVarCtx -> SDoc pprUninferrableTyVarCtx = \case UninfTyCtx_ClassContext theta -> sep [ text "the class context:", pprTheta theta ] UninfTyCtx_DataContext theta -> sep [ text "the datatype context:", pprTheta theta ] UninfTyCtx_ProvidedContext theta -> sep [ text "the provided context:" , pprTheta theta ] UninfTyCtx_TyFamRhs rhs_ty -> sep [ text "the type family equation right-hand side:" , ppr rhs_ty ] UninfTyCtx_TySynRhs rhs_ty -> sep [ text "the type synonym right-hand side:" , ppr rhs_ty ] UninfTyCtx_Sig exp_kind full_hs_ty -> hang (text "the kind" <+> ppr exp_kind) 2 (text "of the type signature:" <+> ppr full_hs_ty) pprPatSynInvalidRhsReason :: PatSynInvalidRhsReason -> SDoc pprPatSynInvalidRhsReason = \case PatSynNotInvertible p -> text "Pattern" <+> quotes (ppr p) <+> text "is not invertible" PatSynUnboundVar var -> quotes (ppr var) <+> text "is not bound by the LHS of the pattern synonym" pprBadFieldAnnotationReason :: BadFieldAnnotationReason -> SDoc pprBadFieldAnnotationReason = \case LazyFieldsDisabled -> text "Lazy field annotations (~) are disabled" UnpackWithoutStrictness -> text "UNPACK pragma lacks '!'" BackpackUnpackAbstractType -> text "Ignoring unusable UNPACK pragma" pprSuperclassCycleDetail :: SuperclassCycleDetail -> SDoc pprSuperclassCycleDetail = \case SCD_HeadTyVar pred -> hang (text "one of whose superclass constraints is headed by a type variable:") 2 (quotes (ppr pred)) SCD_HeadTyFam pred -> hang (text "one of whose superclass constraints is headed by a type family:") 2 (quotes (ppr pred)) SCD_Superclass cls -> text "one of whose superclasses is" <+> quotes (ppr cls) pprRoleValidationFailedReason :: Role -> RoleValidationFailedReason -> SDoc pprRoleValidationFailedReason role = \case TyVarRoleMismatch tv role' -> text "type variable" <+> quotes (ppr tv) <+> text "cannot have role" <+> ppr role <+> text "because it was assigned role" <+> ppr role' TyVarMissingInEnv tv -> text "type variable" <+> quotes (ppr tv) <+> text "missing in environment" BadCoercionRole co -> text "coercion" <+> ppr co <+> text "has bad role" <+> ppr role pprDisabledClassExtension :: Class -> DisabledClassExtension -> SDoc pprDisabledClassExtension cls = \case MultiParamDisabled n -> text howMany <+> text "parameters for class" <+> quotes (ppr cls) where howMany | n == 0 = "No" | otherwise = "Too many" FunDepsDisabled -> text "Fundeps in class" <+> quotes (ppr cls) ConstrainedClassMethodsDisabled sel_id pred -> vcat [ hang (text "Constraint" <+> quotes (ppr pred) <+> text "in the type of" <+> quotes (ppr sel_id)) 2 (text "constrains only the class type variables")] pprImportLookup :: ImportLookupReason -> SDoc pprImportLookup = \case ImportLookupBad k iface decl_spec ie _ps -> let pprImpDeclSpec :: ModIface -> ImpDeclSpec -> SDoc pprImpDeclSpec iface decl_spec = quotes (ppr (moduleName $ is_mod decl_spec)) <+> case mi_boot iface of IsBoot -> text "(hi-boot interface)" NotBoot -> empty withContext msgs = hang (text "In the import of" <+> pprImpDeclSpec iface decl_spec <> colon) 2 (vcat msgs) in case k of BadImportNotExported _ -> vcat [ text "Module" <+> pprImpDeclSpec iface decl_spec <+> text "does not export" <+> quotes (ppr ie) <> dot ] BadImportAvailVar -> withContext [ text "an item called" <+> quotes val <+> text "is exported, but it is not a type." ] where val_occ = rdrNameOcc $ ieName ie val = parenSymOcc val_occ (ppr val_occ) BadImportAvailTyCon {} -> withContext [ text "an item called" <+> quotes tycon <+> text "is exported, but it is a type." ] where tycon_occ = rdrNameOcc $ ieName ie tycon = parenSymOcc tycon_occ (ppr tycon_occ) BadImportNotExportedSubordinates ns -> withContext [ text "an item called" <+> quotes sub <+> text "is exported, but it does not export any children" , text "(constructors, class methods or field names) called" <+> pprWithCommas (quotes . ppr) ns <> dot ] where sub_occ = rdrNameOcc $ ieName ie sub = parenSymOcc sub_occ (ppr sub_occ) BadImportAvailDataCon dataType_occ -> withContext [ text "an item called" <+> quotes datacon , text "is exported, but it is a data constructor of" , quotes dataType <> dot ] where datacon_occ = rdrNameOcc $ ieName ie datacon = parenSymOcc datacon_occ (ppr datacon_occ) dataType = parenSymOcc dataType_occ (ppr dataType_occ) ImportLookupQualified rdr -> hang (text "Illegal qualified name in import item:") 2 (ppr rdr) ImportLookupIllegal -> text "Illegal import item" ImportLookupAmbiguous rdr gres -> hang (text "Ambiguous name" <+> quotes (ppr rdr) <+> text "in import item. It could refer to:") 2 (vcat (map (ppr . greOccName) gres)) pprUnusedImport :: ImportDecl GhcRn -> UnusedImportReason -> SDoc pprUnusedImport decl = \case UnusedImportNone -> vcat [ pp_herald <+> quotes pp_mod <+> text "is redundant" , nest 2 (text "except perhaps to import instances from" <+> quotes pp_mod) , text "To import instances alone, use:" <+> text "import" <+> pp_mod <> parens empty ] UnusedImportSome sort_unused -> sep [ pp_herald <+> quotes (pprWithCommas pp_unused sort_unused) , text "from module" <+> quotes pp_mod <+> text "is redundant"] where pp_mod = ppr (unLoc (ideclName decl)) pp_herald = text "The" <+> pp_qual <+> text "import of" pp_qual | isImportDeclQualified (ideclQualified decl) = text "qualified" | otherwise = empty pp_unused = \case UnusedImportNameRegular n -> pprNameUnqualified n UnusedImportNameRecField par fld_occ -> case par of ParentIs p -> pprNameUnqualified p <> parens (ppr fld_occ) NoParent -> ppr fld_occ pprUnusedName :: OccName -> UnusedNameProv -> SDoc pprUnusedName name reason = sep [ msg <> colon , nest 2 $ pprNonVarNameSpace (occNameSpace name) <+> quotes (ppr name)] where msg = case reason of UnusedNameTopDecl -> defined UnusedNameImported mod -> text "Imported from" <+> quotes (ppr mod) <+> text "but not used" UnusedNameTypePattern -> defined <+> text "on the right hand side" UnusedNameMatch -> defined UnusedNameLocalBind -> defined defined = text "Defined but not used" -- When printing the name, take care to qualify it in the same -- way as the provenance reported by pprNameProvenance, namely -- the head of 'gre_imp'. Otherwise we get confusing reports like -- Ambiguous occurrence ‘null’ -- It could refer to either ‘T15487a.null’, -- imported from ‘Prelude’ at T15487.hs:1:8-13 -- or ... -- See #15487 pprAmbiguousGreName :: GlobalRdrEnv -> GlobalRdrElt -> SDoc pprAmbiguousGreName gre_env gre | IAmRecField fld_info <- greInfo gre = sep [ text "the field" <+> quotes (ppr occ) <+> parent_info fld_info <> comma , pprNameProvenance gre ] | otherwise = sep [ quotes (pp_qual <> dot <> ppr occ) <> comma , pprNameProvenance gre ] where occ = greOccName gre parent_info fld_info = case first_con of PatSynName ps -> text "of pattern synonym" <+> quotes (ppr ps) DataConName {} -> case greParent gre of ParentIs par -- For a data family, only reporting the family TyCon can be -- unhelpful (see T23301). So we give a bit of additional -- info in that case. | Just par_gre <- lookupGRE_Name gre_env par , IAmTyCon tc_flav <- greInfo par_gre , OpenFamilyFlavour IAmData _ <- tc_flav -> vcat [ ppr_cons , text "in a data family instance of" <+> quotes (ppr par) ] | otherwise -> text "of record" <+> quotes (ppr par) NoParent -> ppr_cons where cons :: [ConLikeName] cons = nonDetEltsUniqSet $ recFieldCons fld_info first_con :: ConLikeName first_con = head cons ppr_cons :: SDoc ppr_cons = hsep [ text "belonging to data constructor" , quotes (ppr $ nameOccName $ conLikeName_Name first_con) , if length cons > 1 then parens (text "among others") else empty ] pp_qual | gre_lcl gre = ppr (nameModule $ greName gre) | Just imp <- headMaybe $ gre_imp gre -- This 'imp' is the one that -- pprNameProvenance chooses , ImpDeclSpec { is_as = mod } <- is_decl imp = ppr mod | otherwise = pprPanic "addNameClassErrRn" (ppr gre) -- Invariant: either 'lcl' is True or 'iss' is non-empty pprNonCanonicalDefinition :: LHsSigType GhcRn -> NonCanonicalDefinition -> SDoc pprNonCanonicalDefinition inst_ty = \case NonCanonicalMonoid sub -> case sub of NonCanonical_Sappend -> msg1 "(<>)" "mappend" NonCanonical_Mappend -> msg2 "mappend" "(<>)" NonCanonicalMonad sub -> case sub of NonCanonical_Pure -> msg1 "pure" "return" NonCanonical_ThenA -> msg1 "(*>)" "(>>)" NonCanonical_Return -> msg2 "return" "pure" NonCanonical_ThenM -> msg2 "(>>)" "(*>)" where msg1 :: String -> String -> SDoc msg1 lhs rhs = vcat [ text "Noncanonical" <+> quotes (text (lhs ++ " = " ++ rhs)) <+> text "definition detected" , inst ] msg2 :: String -> String -> SDoc msg2 lhs rhs = vcat [ text "Noncanonical" <+> quotes (text lhs) <+> text "definition detected" , inst , quotes (text lhs) <+> text "will eventually be removed in favour of" <+> quotes (text rhs) ] inst = instDeclCtxt1 inst_ty -- stolen from GHC.Tc.TyCl.Instance instDeclCtxt1 :: LHsSigType GhcRn -> SDoc instDeclCtxt1 hs_inst_ty = inst_decl_ctxt (ppr (getLHsInstDeclHead hs_inst_ty)) inst_decl_ctxt :: SDoc -> SDoc inst_decl_ctxt doc = hang (text "in the instance declaration for") 2 (quotes doc <> text ".") suggestNonCanonicalDefinition :: NonCanonicalDefinition -> [GhcHint] suggestNonCanonicalDefinition reason = [action doc] where action = case reason of NonCanonicalMonoid sub -> case sub of NonCanonical_Sappend -> move sappendName mappendName NonCanonical_Mappend -> remove mappendName sappendName NonCanonicalMonad sub -> case sub of NonCanonical_Pure -> move pureAName returnMName NonCanonical_ThenA -> move thenAName thenMName NonCanonical_Return -> remove returnMName pureAName NonCanonical_ThenM -> remove thenMName thenAName move = SuggestMoveNonCanonicalDefinition remove = SuggestRemoveNonCanonicalDefinition doc = case reason of NonCanonicalMonoid _ -> doc_monoid NonCanonicalMonad _ -> doc_monad doc_monoid = "https://gitlab.haskell.org/ghc/ghc/-/wikis/proposal/semigroup-monoid" doc_monad = "https://gitlab.haskell.org/ghc/ghc/-/wikis/proposal/monad-of-no-return" suggestDefaultDeclaration :: TyCon -> [Type] -> [[Type]] -> [GhcHint] suggestDefaultDeclaration cls prefix seqs = [SuggestDefaultDeclaration cls $ supersequence (prefix : seqs)] where -- Not exactly the shortest possible supersequence, but it preserves -- the head sequence as the prefix of the result which is a requirement. supersequence :: [[Type]] -> [Type] supersequence [] = [] supersequence ([] : seqs) = supersequence seqs supersequence ((x : xs) : seqs) = x : supersequence (xs : (dropHead x <$> seqs)) dropHead x ys@(y : ys') | tcEqType x y = ys' | otherwise = ys dropHead _ [] = [] -------------------------------------------------------------------------------- -- hs-boot mismatch errors pprBootMismatch :: HsBootOrSig -> BootMismatch -> SDoc pprBootMismatch boot_or_sig = \case MissingBootThing nm err -> let def_or_exp = case err of MissingBootDefinition -> text "defined in" MissingBootExport -> text "exported by" in quotes (ppr nm) <+> text "is exported by the" <+> ppr_boot_or_sig <> comma <+> text "but not" <+> def_or_exp <+> text "the implementing module." MissingBootInstance boot_dfun -> hang (text "instance" <+> ppr (idType boot_dfun)) 2 (text "is defined in the" <+> ppr ppr_boot_or_sig <> comma <+> text "but not in the implementing module.") BadReexportedBootThing name name' -> withUserStyle alwaysQualify AllTheWay $ vcat [ text "The" <+> ppr_boot_or_sig <+> text "(re)exports" <+> quotes (ppr name) , text "but the implementing module exports a different identifier" <+> quotes (ppr name') ] BootMismatch boot_thing real_thing err -> vcat [ ppr real_thing <+> text "has conflicting definitions in the module" , text "and its" <+> ppr_boot_or_sig <> dot, text "Main module:" <+> real_doc , (case boot_or_sig of HsBoot -> text " Boot file:" Hsig -> text " Hsig file:") <+> boot_doc , pprBootMismatchWhat boot_or_sig err ] where to_doc = pprTyThingInContext $ showToHeader { ss_forall = case boot_or_sig of HsBoot -> ShowForAllMust Hsig -> ShowForAllWhen } real_doc = to_doc real_thing boot_doc = to_doc boot_thing where ppr_boot_or_sig = case boot_or_sig of HsBoot -> text "hs-boot file" Hsig -> text "hsig file" pprBootMismatchWhat :: HsBootOrSig -> BootMismatchWhat -> SDoc pprBootMismatchWhat boot_or_sig = \case BootMismatchedIdTypes {} -> text "The two types are different." BootMismatchedTyCons tc1 tc2 errs -> vcat $ map (pprBootTyConMismatch boot_or_sig tc1 tc2) (NE.toList errs) pprBootTyConMismatch :: HsBootOrSig -> TyCon -> TyCon -> BootTyConMismatch -> SDoc pprBootTyConMismatch boot_or_sig tc1 tc2 = \case TyConKindMismatch -> text "The types have different kinds." TyConRoleMismatch sub_type -> if sub_type then text "The roles are not compatible:" $$ text "Main module:" <+> ppr (tyConRoles tc1) $$ text " Hsig file:" <+> ppr (tyConRoles tc2) else text "The roles do not match." $$ if boot_or_sig == HsBoot then note $ "Roles on abstract types default to" <+> quotes "representational" <+> "in hs-boot files" else empty TyConSynonymMismatch {} -> empty -- nothing interesting to say TyConFlavourMismatch fam_flav1 fam_flav2 -> whenPprDebug $ text "Family flavours" <+> ppr fam_flav1 <+> text "and" <+> ppr fam_flav2 <+> text "do not match" TyConAxiomMismatch ax_errs -> pprBootListMismatches (text "Type family equations do not match:") pprTyConAxiomMismatch ax_errs TyConInjectivityMismatch {} -> text "Injectivity annotations do not match" TyConMismatchedClasses _ _ err -> pprBootClassMismatch boot_or_sig err TyConMismatchedData _rhs1 _rhs2 err -> pprBootDataMismatch err SynAbstractData err -> pprSynAbstractDataError err TyConsVeryDifferent -> empty -- should be obvious to the user what the problem is pprSynAbstractDataError :: SynAbstractDataError -> SDoc pprSynAbstractDataError = \case SynAbsDataTySynNotNullary -> text "Illegal parameterized type synonym in implementation of abstract data." SynAbstractDataInvalidRHS bad_sub_tys -> let msgs = mapMaybe pprInvalidAbstractSubTy (NE.toList bad_sub_tys) in case msgs of [] -> herald <> dot msg:[] -> hang (herald <> colon) 2 msg _ -> hang (herald <> colon) 2 (vcat $ map (<+> bullet) msgs) where herald = text "Illegal implementation of abstract data" pprInvalidAbstractSubTy = \case TyConApp tc _ -> assertPpr (isTypeFamilyTyCon tc) (ppr tc) $ Just $ text "Invalid type family" <+> quotes (ppr tc) <> dot ty@(ForAllTy {}) -> Just $ text "Invalid polymorphic type" <> colon <+> ppr ty <> dot ty@(FunTy af _ _ _) | not (af == FTF_T_T) -> Just $ text "Invalid qualified type" <> colon <+> ppr ty <> dot _ -> Nothing pprTyConAxiomMismatch :: BootListMismatch CoAxBranch BootAxiomBranchMismatch -> SDoc pprTyConAxiomMismatch = \case MismatchedLength -> text "The number of equations differs." MismatchedThing i br1 br2 err -> hang (text "The" <+> speakNth (i+1) <+> text "equations do not match.") 2 (pprCoAxBranchMismatch br1 br2 err) pprCoAxBranchMismatch :: CoAxBranch -> CoAxBranch -> BootAxiomBranchMismatch -> SDoc pprCoAxBranchMismatch _br1 _br2 err = text "The" <+> what <+> text "don't match." where what = case err of MismatchedAxiomBinders -> text "variables bound in the equation" MismatchedAxiomLHS -> text "equation left-hand sides" MismatchedAxiomRHS -> text "equation right-hand sides" pprBootListMismatches :: SDoc -- ^ herald -> (BootListMismatch item err -> SDoc) -> BootListMismatches item err -> SDoc pprBootListMismatches herald ppr_one errs = hang herald 2 msgs where msgs = case errs of err :| [] -> ppr_one err _ -> vcat $ map ((bullet <+>) . ppr_one) $ NE.toList errs pprBootClassMismatch :: HsBootOrSig -> BootClassMismatch -> SDoc pprBootClassMismatch boot_or_sig = \case MismatchedMethods errs -> pprBootListMismatches (text "The class methods do not match:") pprBootClassMethodListMismatch errs MismatchedATs at_errs -> pprBootListMismatches (text "The associated types do not match:") (pprATMismatch boot_or_sig) at_errs MismatchedFunDeps -> text "The functional dependencies do not match." MismatchedSuperclasses -> text "The superclass constraints do not match." MismatchedMinimalPragmas -> text "The MINIMAL pragmas are not compatible." pprATMismatch :: HsBootOrSig -> BootListMismatch ClassATItem BootATMismatch -> SDoc pprATMismatch boot_or_sig = \case MismatchedLength -> text "The number of associated type defaults differs." MismatchedThing i at1 at2 err -> pprATMismatchErr boot_or_sig i at1 at2 err pprATMismatchErr :: HsBootOrSig -> Int -> ClassATItem -> ClassATItem -> BootATMismatch -> SDoc pprATMismatchErr boot_or_sig i (ATI tc1 _) (ATI tc2 _) = \case MismatchedTyConAT err -> hang (text "The associated types differ:") 2 $ pprBootTyConMismatch boot_or_sig tc1 tc2 err MismatchedATDefaultType -> text "The types of the" <+> speakNth (i+1) <+> text "associated type default differ." pprBootClassMethodListMismatch :: BootListMismatch ClassOpItem BootMethodMismatch -> SDoc pprBootClassMethodListMismatch = \case MismatchedLength -> text "The number of class methods differs." MismatchedThing _ op1 op2 err -> pprBootClassMethodMismatch op1 op2 err pprBootClassMethodMismatch :: ClassOpItem -> ClassOpItem -> BootMethodMismatch -> SDoc pprBootClassMethodMismatch (op1, _) (op2, _) = \case MismatchedMethodNames -> text "The method names" <+> quotes pname1 <+> text "and" <+> quotes pname2 <+> text "differ." MismatchedMethodTypes {} -> text "The types of" <+> pname1 <+> text "are different." MismatchedDefaultMethods subtype_check -> if subtype_check then text "The default methods associated with" <+> pname1 <+> text "are not compatible." else text "The default methods associated with" <+> pname1 <+> text "are different." where nm1 = idName op1 nm2 = idName op2 pname1 = quotes (ppr nm1) pname2 = quotes (ppr nm2) pprBootDataMismatch :: BootDataMismatch -> SDoc pprBootDataMismatch = \case MismatchedNewtypeVsData -> text "Cannot match a" <+> quotes (text "data") <+> text "definition with a" <+> quotes (text "newtype") <+> text "definition." MismatchedConstructors dc_errs -> pprBootListMismatches (text "The constructors do not match:") pprBootDataConMismatch dc_errs MismatchedDatatypeContexts {} -> text "The datatype contexts do not match." pprBootDataConMismatch :: BootListMismatch DataCon BootDataConMismatch -> SDoc pprBootDataConMismatch = \case MismatchedLength -> text "The number of constructors differs." MismatchedThing _ dc1 dc2 err -> pprBootDataConMismatchErr dc1 dc2 err pprBootDataConMismatchErr :: DataCon -> DataCon -> BootDataConMismatch -> SDoc pprBootDataConMismatchErr dc1 dc2 = \case MismatchedDataConNames -> text "The names" <+> pname1 <+> text "and" <+> pname2 <+> text "differ." MismatchedDataConFixities -> text "The fixities of" <+> pname1 <+> text "differ." MismatchedDataConBangs -> text "The strictness annotations for" <+> pname1 <+> text "differ." MismatchedDataConFieldLabels -> text "The record label lists for" <+> pname1 <+> text "differ." MismatchedDataConTypes -> text "The types for" <+> pname1 <+> text "differ." where name1 = dataConName dc1 name2 = dataConName dc2 pname1 = quotes (ppr name1) pname2 = quotes (ppr name2) -------------------------------------------------------------------------------- -- Illegal instance errors pprIllegalInstance :: IllegalInstanceReason -> SDoc pprIllegalInstance = \case IllegalClassInstance head_ty reason -> pprIllegalClassInstanceReason head_ty reason IllegalFamilyInstance reason -> pprIllegalFamilyInstance reason IllegalFamilyApplicationInInstance inst_ty invis_arg tf_tc tf_args -> pprWithInvisibleBitsWhen invis_arg $ hang (text "Illegal type synonym family application" <+> quotes (ppr tf_ty) <+> text "in instance" <> colon) 2 (ppr inst_ty) where tf_ty = mkTyConApp tf_tc tf_args pprIllegalClassInstanceReason :: TypedThing -> IllegalClassInstanceReason -> SDoc pprIllegalClassInstanceReason head_ty = \case IllegalInstanceHead reason -> pprIllegalInstanceHeadReason head_ty reason IllegalHasFieldInstance has_field_err -> with_illegal_instance_header head_ty $ pprIllegalHasFieldInstance has_field_err IllegalSpecialClassInstance cls because_safeHaskell -> text "Class" <+> quotes (ppr $ className cls) <+> text "does not support user-specified instances" <> safeHaskell_msg where safeHaskell_msg | because_safeHaskell = text " when Safe Haskell is enabled." | otherwise = dot IllegalInstanceFailsCoverageCondition cls coverage_failure -> with_illegal_instance_header head_ty $ pprNotCovered cls coverage_failure pprIllegalInstanceHeadReason :: TypedThing -> IllegalInstanceHeadReason -> SDoc pprIllegalInstanceHeadReason head_ty = \case InstHeadTySynArgs -> with_illegal_instance_header head_ty $ text "All instance types must be of the form (T t1 ... tn)" $$ text "where T is not a synonym." InstHeadNonTyVarArgs -> with_illegal_instance_header head_ty $ vcat [ text "All instance types must be of the form (T a1 ... an)", text "where a1 ... an are *distinct type variables*,", text "and each type variable appears at most once in the instance head."] InstHeadMultiParam -> with_illegal_instance_header head_ty $ parens $ text "Only one type can be given in an instance head." InstHeadAbstractClass clas -> text "Cannot define instance for abstract class" <+> quotes (ppr (className clas)) InstHeadNonClass bad_head -> vcat [ text "Illegal" <+> what_illegal <> dot , text "Instance heads must be of the form" , nest 2 $ text "C ty_1 ... ty_n" , text "where" <+> quotes (char 'C') <+> text "is a class." ] where what_illegal = case bad_head of Just tc -> text "instance for" <+> ppr (tyConFlavour tc) <+> quotes (ppr $ tyConName tc) Nothing -> text "head of an instance declaration:" <+> quotes (ppr head_ty) with_illegal_instance_header :: TypedThing -> SDoc -> SDoc with_illegal_instance_header head_ty msg = hang (hang (text "Illegal instance declaration for") 2 (quotes (ppr head_ty)) <> colon) 2 msg pprIllegalHasFieldInstance :: IllegalHasFieldInstance -> SDoc pprIllegalHasFieldInstance = \case IllegalHasFieldInstanceNotATyCon -> text "Record data type must be specified." IllegalHasFieldInstanceFamilyTyCon -> text "Record data type may not be a data family." IllegalHasFieldInstanceTyConHasField tc lbl -> quotes (ppr tc) <+> text "already has a field" <+> quotes (ppr lbl) <> dot IllegalHasFieldInstanceTyConHasFields tc lbl -> sep [ ppr_tc <+> text "has fields, and the type" <+> quotes (ppr lbl) , text "could unify with one of the field labels of" <+> ppr_tc <> dot ] where ppr_tc = quotes (ppr tc) pprNotCovered :: Class -> CoverageProblem -> SDoc pprNotCovered clas CoverageProblem { not_covered_fundep = fd , not_covered_fundep_inst = (ls, rs) , not_covered_invis_vis_tvs = undetermined_tvs , not_covered_liberal = which_cc_failed } = pprWithInvisibleBitsWhen (isEmptyVarSet $ pSnd undetermined_tvs) $ vcat [ sep [ text "The" <+> ppWhen liberal (text "liberal") <+> text "coverage condition fails in class" <+> quotes (ppr clas) , nest 2 $ text "for functional dependency:" <+> quotes (pprFunDep fd) ] , sep [ text "Reason: lhs type" <> plural ls <+> pprQuotedList ls , nest 2 $ (if isSingleton ls then text "does not" else text "do not jointly") <+> text "determine rhs type" <> plural rs <+> pprQuotedList rs ] , text "Un-determined variable" <> pluralVarSet undet_set <> colon <+> pprVarSet undet_set (pprWithCommas ppr) ] where liberal = case which_cc_failed of FailedLICC -> True FailedICC {} -> False undet_set = fold undetermined_tvs illegalInstanceHints :: IllegalInstanceReason -> [GhcHint] illegalInstanceHints = \case IllegalClassInstance _ reason -> illegalClassInstanceHints reason IllegalFamilyInstance reason -> illegalFamilyInstanceHints reason IllegalFamilyApplicationInInstance {} -> noHints illegalInstanceReason :: IllegalInstanceReason -> DiagnosticReason illegalInstanceReason = \case IllegalClassInstance _ reason -> illegalClassInstanceReason reason IllegalFamilyInstance reason -> illegalFamilyInstanceReason reason IllegalFamilyApplicationInInstance {} -> ErrorWithoutFlag illegalClassInstanceHints :: IllegalClassInstanceReason -> [GhcHint] illegalClassInstanceHints = \case IllegalInstanceHead reason -> illegalInstanceHeadHints reason IllegalHasFieldInstance has_field_err -> illegalHasFieldInstanceHints has_field_err IllegalSpecialClassInstance {} -> noHints IllegalInstanceFailsCoverageCondition _ coverage_failure -> failedCoverageConditionHints coverage_failure illegalClassInstanceReason :: IllegalClassInstanceReason -> DiagnosticReason illegalClassInstanceReason = \case IllegalInstanceHead reason -> illegalInstanceHeadReason reason IllegalHasFieldInstance has_field_err -> illegalHasFieldInstanceReason has_field_err IllegalSpecialClassInstance {} -> ErrorWithoutFlag IllegalInstanceFailsCoverageCondition _ coverage_failure -> failedCoverageConditionReason coverage_failure illegalInstanceHeadHints :: IllegalInstanceHeadReason -> [GhcHint] illegalInstanceHeadHints = \case InstHeadTySynArgs -> [suggestExtension LangExt.TypeSynonymInstances] InstHeadNonTyVarArgs -> [suggestExtension LangExt.FlexibleInstances] InstHeadMultiParam -> [suggestExtension LangExt.MultiParamTypeClasses] InstHeadAbstractClass {} -> noHints InstHeadNonClass {} -> noHints illegalInstanceHeadReason :: IllegalInstanceHeadReason -> DiagnosticReason illegalInstanceHeadReason = \case -- These are serious InstHeadAbstractClass {} -> ErrorWithoutFlag InstHeadNonClass {} -> ErrorWithoutFlag -- These are less serious (enable an extension) InstHeadTySynArgs -> ErrorWithoutFlag InstHeadNonTyVarArgs -> ErrorWithoutFlag InstHeadMultiParam -> ErrorWithoutFlag illegalHasFieldInstanceHints :: IllegalHasFieldInstance -> [GhcHint] illegalHasFieldInstanceHints = \case IllegalHasFieldInstanceNotATyCon -> noHints IllegalHasFieldInstanceFamilyTyCon -> noHints IllegalHasFieldInstanceTyConHasField {} -> noHints IllegalHasFieldInstanceTyConHasFields {} -> noHints illegalHasFieldInstanceReason :: IllegalHasFieldInstance -> DiagnosticReason illegalHasFieldInstanceReason = \case IllegalHasFieldInstanceNotATyCon -> ErrorWithoutFlag IllegalHasFieldInstanceFamilyTyCon -> ErrorWithoutFlag IllegalHasFieldInstanceTyConHasField {} -> ErrorWithoutFlag IllegalHasFieldInstanceTyConHasFields {} -> ErrorWithoutFlag failedCoverageConditionHints :: CoverageProblem -> [GhcHint] failedCoverageConditionHints (CoverageProblem { not_covered_liberal = failed_cc }) = case failed_cc of FailedLICC -> noHints FailedICC { alsoFailedLICC = failed_licc } -> -- Turning on UndecidableInstances makes the check liberal, -- so if the liberal check passes, suggest enabling UndecidableInstances. if failed_licc then noHints else [suggestExtension LangExt.UndecidableInstances] failedCoverageConditionReason :: CoverageProblem -> DiagnosticReason failedCoverageConditionReason _ = ErrorWithoutFlag pprIllegalFamilyInstance :: IllegalFamilyInstanceReason -> SDoc pprIllegalFamilyInstance = \case InvalidAssoc reason -> pprInvalidAssoc reason NotAFamilyTyCon ty_or_data tc -> vcat [ text "Illegal family instance for" <+> quotes (ppr tc) , nest 2 $ parens (quotes (ppr tc) <+> text "is not a" <+> what) ] where what = ppr ty_or_data <+> text "family" NotAnOpenFamilyTyCon tc -> text "Illegal instance for closed family" <+> quotes (ppr tc) FamilyCategoryMismatch tc -> text "Wrong category of family instance; declaration was for a" <+> what <> dot where what = case tyConFlavour tc of OpenFamilyFlavour IAmData _ -> text "data family" _ -> text "type family" FamilyArityMismatch _ max_args -> text "Number of parameters must match family declaration; expected" <+> ppr max_args <> dot TyFamNameMismatch fam_tc_name eqn_tc_name -> hang (text "Mismatched type name in type family instance.") 2 (vcat [ text "Expected:" <+> ppr fam_tc_name , text " Actual:" <+> ppr eqn_tc_name ]) FamInstRHSOutOfScopeTyVars mb_dodgy (NE.toList -> tvs) -> hang (text "Out of scope type variable" <> plural tvs <+> pprWithCommas (quotes . ppr) tvs <+> text "in the RHS of a family instance.") 2 (text "All such variables must be bound on the LHS.") $$ mk_extra where -- mk_extra: #7536: give a decent error message for -- type T a = Int -- type instance F (T a) = a mk_extra = case mb_dodgy of Nothing -> empty Just (fam_tc, pats, dodgy_tvs) -> ppWhen (any (`elemVarSetByKey` dodgy_tvs) (fmap nameUnique tvs)) $ hang (text "The real LHS (expanding synonyms) is:") 2 (pprTypeApp fam_tc (map expandTypeSynonyms pats)) FamInstLHSUnusedBoundTyVars (NE.toList -> bad_qtvs) -> vcat [ not_bound_msg, not_used_msg, dodgy_msg ] where -- Filter to only keep user-written variables, -- unless none were user-written in which case we report all of them -- (as we need to report an error). filter_user tvs = map ifiqtv $ case filter ifiqtv_user_written tvs of { [] -> tvs ; qvs -> qvs } (not_bound, not_used, dodgy) = case foldr acc_tv ([], [], []) bad_qtvs of (nb, nu, d) -> (filter_user nb, filter_user nu, filter_user d) acc_tv tv (nb, nu, d) = case ifiqtv_reason tv of InvalidFamInstQTvNotUsedInRHS -> (nb, tv : nu, d) InvalidFamInstQTvNotBoundInPats -> (tv : nb, nu, d) InvalidFamInstQTvDodgy -> (nb, nu, tv : d) -- Error message for type variables not bound in LHS patterns. not_bound_msg | null not_bound = empty | otherwise = vcat [ text "The type variable" <> plural not_bound <+> pprQuotedList not_bound <+> isOrAre not_bound <+> text "bound by a forall," , text "but" <+> doOrDoes not_bound <+> text "not appear in any of the LHS patterns of the family instance." ] -- Error message for type variables bound by a forall but not used -- in the RHS. not_used_msg = if null not_used then empty else text "The type variable" <> plural not_used <+> pprQuotedList not_used <+> isOrAre not_used <+> text "bound by a forall," $$ text "but" <+> itOrThey not_used <+> isOrAre not_used <> text "n't used in the family instance." -- Error message for dodgy type variables. -- See Note [Dodgy binding sites in type family instances] in GHC.Tc.Validity. dodgy_msg | null dodgy = empty | otherwise = hang (text "Dodgy type variable" <> plural dodgy <+> pprQuotedList dodgy <+> text "in the LHS of a family instance:") 2 (text "the type variable" <> plural dodgy <+> pprQuotedList dodgy <+> text "syntactically appear" <> singular dodgy <+> text "in LHS patterns," $$ text "but" <+> itOrThey dodgy <+> doOrDoes dodgy <> text "n't appear in an injective position.") illegalFamilyInstanceHints :: IllegalFamilyInstanceReason -> [GhcHint] illegalFamilyInstanceHints = \case InvalidAssoc rea -> invalidAssocHints rea NotAFamilyTyCon {} -> noHints NotAnOpenFamilyTyCon {} -> noHints FamilyCategoryMismatch {} -> noHints FamilyArityMismatch {} -> noHints TyFamNameMismatch {} -> noHints FamInstRHSOutOfScopeTyVars {} -> noHints FamInstLHSUnusedBoundTyVars {} -> noHints illegalFamilyInstanceReason :: IllegalFamilyInstanceReason -> DiagnosticReason illegalFamilyInstanceReason = \case InvalidAssoc rea -> invalidAssocReason rea NotAFamilyTyCon {} -> ErrorWithoutFlag NotAnOpenFamilyTyCon {} -> ErrorWithoutFlag FamilyCategoryMismatch {} -> ErrorWithoutFlag FamilyArityMismatch {} -> ErrorWithoutFlag TyFamNameMismatch {} -> ErrorWithoutFlag FamInstRHSOutOfScopeTyVars {} -> ErrorWithoutFlag FamInstLHSUnusedBoundTyVars {} -> ErrorWithoutFlag pprInvalidAssoc :: InvalidAssoc -> SDoc pprInvalidAssoc = \case InvalidAssocInstance rea -> pprInvalidAssocInstance rea InvalidAssocDefault rea -> pprInvalidAssocDefault rea pprInvalidAssocInstance :: InvalidAssocInstance -> SDoc pprInvalidAssocInstance = \case AssocInstanceMissing name -> text "No explicit" <+> text "associated type" <+> text "or default declaration for" <+> quotes (ppr name) AssocInstanceNotInAClass fam_tc -> text "Associated type" <+> quotes (ppr fam_tc) <+> text "must be inside a class instance" AssocNotInThisClass cls fam_tc -> hsep [ text "Class", quotes (ppr cls) , text "does not have an associated type", quotes (ppr fam_tc) ] AssocNoClassTyVar cls fam_tc -> sep [ text "The associated type" <+> quotes (ppr fam_tc <+> hsep (map ppr (tyConTyVars fam_tc))) , text "mentions none of the type or kind variables of the class" <+> quotes (ppr cls <+> hsep (map ppr (classTyVars cls)))] AssocTyVarsDontMatch vis fam_tc exp_tys act_tys -> pprWithInvisibleBitsWhen (isInvisibleForAllTyFlag vis) $ vcat [ text "Type indexes must match class instance head" , text "Expected:" <+> pp exp_tys , text " Actual:" <+> pp act_tys ] where pp tys = pprIfaceTypeApp topPrec (toIfaceTyCon fam_tc) $ toIfaceTcArgs fam_tc tys pprInvalidAssocDefault :: InvalidAssocDefault -> SDoc pprInvalidAssocDefault = \case AssocDefaultNotAssoc cls tc -> hsep [ text "Class", quotes (ppr cls) , text "does not have an associated type", quotes (ppr tc) ] AssocMultipleDefaults name -> text "More than one default declaration for" <+> quotes (ppr name) AssocDefaultBadArgs fam_tc pat_tys bad_arg -> let (pat_vis, main_msg) = case bad_arg of AssocDefaultNonTyVarArg (pat_ty, pat_vis) -> (pat_vis, text "Illegal argument" <+> quotes (ppr pat_ty) <+> text "in:") AssocDefaultDuplicateTyVars dups -> let (pat_tv, pat_vis) = NE.head dups in (pat_vis, text "Illegal duplicate variable" <+> quotes (ppr pat_tv) <+> text "in:") in pprWithInvisibleBitsWhen (isInvisibleForAllTyFlag pat_vis) $ hang main_msg 2 (vcat [ppr_eqn, suggestion]) where ppr_eqn :: SDoc ppr_eqn = quotes (text "type" <+> ppr (mkTyConApp fam_tc pat_tys) <+> equals <+> text "...") suggestion :: SDoc suggestion = text "The arguments to" <+> quotes (ppr fam_tc) <+> text "must all be distinct type variables." invalidAssocHints :: InvalidAssoc -> [GhcHint] invalidAssocHints = \case InvalidAssocInstance rea -> invalidAssocInstanceHints rea InvalidAssocDefault rea -> invalidAssocDefaultHints rea invalidAssocInstanceHints :: InvalidAssocInstance -> [GhcHint] invalidAssocInstanceHints = \case AssocInstanceMissing {} -> noHints AssocInstanceNotInAClass {} -> noHints AssocNotInThisClass {} -> noHints AssocNoClassTyVar {} -> noHints AssocTyVarsDontMatch {} -> noHints invalidAssocDefaultHints :: InvalidAssocDefault -> [GhcHint] invalidAssocDefaultHints = \case AssocDefaultNotAssoc {} -> noHints AssocMultipleDefaults {} -> noHints AssocDefaultBadArgs _ _ bad -> assocDefaultBadArgHints bad assocDefaultBadArgHints :: AssocDefaultBadArgs -> [GhcHint] assocDefaultBadArgHints = \case AssocDefaultNonTyVarArg {} -> noHints AssocDefaultDuplicateTyVars {} -> noHints invalidAssocReason :: InvalidAssoc -> DiagnosticReason invalidAssocReason = \case InvalidAssocInstance rea -> invalidAssocInstanceReason rea InvalidAssocDefault rea -> invalidAssocDefaultReason rea invalidAssocInstanceReason :: InvalidAssocInstance -> DiagnosticReason invalidAssocInstanceReason = \case AssocInstanceMissing {} -> WarningWithFlag (Opt_WarnMissingMethods) AssocInstanceNotInAClass {} -> ErrorWithoutFlag AssocNotInThisClass {} -> ErrorWithoutFlag AssocNoClassTyVar {} -> ErrorWithoutFlag AssocTyVarsDontMatch {} -> ErrorWithoutFlag invalidAssocDefaultReason :: InvalidAssocDefault -> DiagnosticReason invalidAssocDefaultReason = \case AssocDefaultNotAssoc {} -> ErrorWithoutFlag AssocMultipleDefaults {} -> ErrorWithoutFlag AssocDefaultBadArgs _ _ rea -> assocDefaultBadArgReason rea assocDefaultBadArgReason :: AssocDefaultBadArgs -> DiagnosticReason assocDefaultBadArgReason = \case AssocDefaultNonTyVarArg {} -> ErrorWithoutFlag AssocDefaultDuplicateTyVars {} -> ErrorWithoutFlag -------------------------------------------------------------------------------- -- Template Haskell quotes and splices pprTHError :: THError -> DecoratedSDoc pprTHError = \case THSyntaxError err -> pprTHSyntaxError err THNameError err -> pprTHNameError err THReifyError err -> pprTHReifyError err TypedTHError err -> pprTypedTHError err THSpliceFailed rea -> pprSpliceFailReason rea AddTopDeclsError err -> pprAddTopDeclsError err IllegalStaticFormInSplice e -> mkSimpleDecorated $ sep [ text "static forms cannot be used in splices:" , nest 2 $ ppr e ] FailedToLookupThInstName th_type reason -> mkSimpleDecorated $ case reason of NoMatchesFound -> text "Couldn't find any instances of" <+> text (TH.pprint th_type) <+> text "to add documentation to" CouldNotDetermineInstance -> text "Couldn't work out what instance" <+> text (TH.pprint th_type) <+> text "is supposed to be" AddInvalidCorePlugin plugin -> mkSimpleDecorated $ hang (text "addCorePlugin: invalid plugin module" <+> quotes (text plugin) ) 2 (text "Plugins in the current package can't be specified.") AddDocToNonLocalDefn doc_loc -> mkSimpleDecorated $ text "Can't add documentation to" <+> ppr_loc doc_loc <> comma <+> text "as it isn't inside the current module." where ppr_loc (TH.DeclDoc n) = text $ TH.pprint n ppr_loc (TH.ArgDoc n _) = text $ TH.pprint n ppr_loc (TH.InstDoc t) = text $ TH.pprint t ppr_loc TH.ModuleDoc = text "the module header" ReportCustomQuasiError _ msg -> mkSimpleDecorated $ text msg pprTHSyntaxError :: THSyntaxError -> DecoratedSDoc pprTHSyntaxError = mkSimpleDecorated . \case IllegalTHQuotes expr -> text "Syntax error on" <+> ppr expr -- The error message context will say -- "In the Template Haskell quotation", so no need to repeat that here. BadImplicitSplice -> sep [ text "Parse error: module header, import declaration" , text "or top-level declaration expected." ] -- The compiler should not mention TemplateHaskell, as the common case -- is that this is a simple beginner error, for example: -- -- module M where -- f :: Int -> Int; f x = x -- xyzzy -- g y = f y + 1 -- -- It's unlikely that 'xyzzy' above was intended to be a Template Haskell -- splice; instead it's probably something mistakenly left in the code. -- See #12146 for discussion. IllegalTHSplice -> text "Unexpected top-level splice." MismatchedSpliceType splice_type inner_splice_or_bracket -> inner <+> text "may not appear in" <+> outer <> dot where (inner, outer) = case inner_splice_or_bracket of IsSplice -> case splice_type of Typed -> (text "Typed splices" , text "untyped brackets") Untyped -> (text "Untyped splices", text "typed brackets") IsBracket -> case splice_type of Typed -> (text "Untyped brackets", text "typed splices") Untyped -> (text "Typed brackets" , text "untyped splices") NestedTHBrackets -> text "Template Haskell brackets cannot be nested" <+> text "(without intervening splices)" pprTHNameError :: THNameError -> DecoratedSDoc pprTHNameError = \case NonExactName name -> mkSimpleDecorated $ hang (text "The binder" <+> quotes (ppr name) <+> text "is not a NameU.") 2 (text "Probable cause: you used mkName instead of newName to generate a binding.") QuotedNameWrongStage quote -> mkSimpleDecorated $ sep [ text "Stage error: the non-top-level quoted name" <+> ppr quote , text "must be used at the same stage at which it is bound." ] pprTHReifyError :: THReifyError -> DecoratedSDoc pprTHReifyError = \case CannotReifyInstance ty -> mkSimpleDecorated $ hang (text "reifyInstances:" <+> quotes (ppr ty)) 2 (text "is not a class constraint or type family application") CannotReifyOutOfScopeThing th_name -> mkSimpleDecorated $ quotes (text (TH.pprint th_name)) <+> text "is not in scope at a reify" -- Ugh! Rather an indirect way to display the name CannotReifyThingNotInTypeEnv name -> mkSimpleDecorated $ quotes (ppr name) <+> text "is not in the type environment at a reify" NoRolesAssociatedWithThing thing -> mkSimpleDecorated $ text "No roles associated with" <+> (ppr thing) CannotRepresentType sort ty -> mkSimpleDecorated $ hsep [text "Can't represent" <+> sort_doc <+> text "in Template Haskell:", nest 2 (ppr ty)] where sort_doc = text $ case sort of LinearInvisibleArgument -> "linear invisible argument" CoercionsInTypes -> "coercions in types" pprTypedTHError :: TypedTHError -> DecoratedSDoc pprTypedTHError = \case SplicePolymorphicLocalVar ident -> mkSimpleDecorated $ text "Can't splice the polymorphic local variable" <+> quotes (ppr ident) TypedTHWithPolyType ty -> mkSimpleDecorated $ vcat [ text "Illegal polytype:" <+> ppr ty , text "The type of a Typed Template Haskell expression must" <+> text "not have any quantification." ] pprSpliceFailReason :: SpliceFailReason -> DecoratedSDoc pprSpliceFailReason = \case SpliceThrewException phase _exn exn_msg expr show_code -> mkSimpleDecorated $ vcat [ text "Exception when trying to" <+> text phaseStr <+> text "compile-time code:" , nest 2 (text exn_msg) , if show_code then text "Code:" <+> ppr expr else empty] where phaseStr = case phase of SplicePhase_Run -> "run" SplicePhase_CompileAndLink -> "compile and link" RunSpliceFailure err -> pprRunSpliceFailure Nothing err pprAddTopDeclsError :: AddTopDeclsError -> DecoratedSDoc pprAddTopDeclsError = \case InvalidTopDecl _decl -> mkSimpleDecorated $ sep [ text "Only function, value, annotation, and foreign import declarations" , text "may be added with" <+> quotes (text "addTopDecls") <> dot ] AddTopDeclsUnexpectedDeclarationSplice {} -> mkSimpleDecorated $ text "Declaration splices are not permitted" <+> text "inside top-level declarations added with" <+> quotes (text "addTopDecls") <> dot AddTopDeclsRunSpliceFailure err -> pprRunSpliceFailure (Just "addTopDecls") err pprRunSpliceFailure :: Maybe String -> RunSpliceFailReason -> DecoratedSDoc pprRunSpliceFailure mb_calling_fn (ConversionFail what reason) = mkSimpleDecorated . add_calling_fn . addSpliceInfo $ pprConversionFailReason reason where add_calling_fn rest = case mb_calling_fn of Just calling_fn -> hang (text "Error in a declaration passed to" <+> quotes (text calling_fn) <> colon) 2 rest Nothing -> rest addSpliceInfo = case what of ConvDec d -> addSliceInfo' "declaration" d ConvExp e -> addSliceInfo' "expression" e ConvPat p -> addSliceInfo' "pattern" p ConvType t -> addSliceInfo' "type" t addSliceInfo' what item reasonErr = reasonErr $$ descr where -- Show the item in pretty syntax normally, -- but with all its constructors if you say -dppr-debug descr = hang (text "When splicing a TH" <+> text what <> colon) 2 (getPprDebug $ \case True -> text (show item) False -> text (TH.pprint item)) thErrorReason :: THError -> DiagnosticReason thErrorReason = \case THSyntaxError err -> thSyntaxErrorReason err THNameError err -> thNameErrorReason err THReifyError err -> thReifyErrorReason err TypedTHError err -> typedTHErrorReason err THSpliceFailed rea -> spliceFailedReason rea AddTopDeclsError err -> addTopDeclsErrorReason err IllegalStaticFormInSplice {} -> ErrorWithoutFlag FailedToLookupThInstName {} -> ErrorWithoutFlag AddInvalidCorePlugin {} -> ErrorWithoutFlag AddDocToNonLocalDefn {} -> ErrorWithoutFlag ReportCustomQuasiError is_error _ -> if is_error then ErrorWithoutFlag else WarningWithoutFlag thSyntaxErrorReason :: THSyntaxError -> DiagnosticReason thSyntaxErrorReason = \case IllegalTHQuotes{} -> ErrorWithoutFlag BadImplicitSplice -> ErrorWithoutFlag IllegalTHSplice{} -> ErrorWithoutFlag NestedTHBrackets{} -> ErrorWithoutFlag MismatchedSpliceType{} -> ErrorWithoutFlag thNameErrorReason :: THNameError -> DiagnosticReason thNameErrorReason = \case NonExactName {} -> ErrorWithoutFlag QuotedNameWrongStage {} -> ErrorWithoutFlag thReifyErrorReason :: THReifyError -> DiagnosticReason thReifyErrorReason = \case CannotReifyInstance {} -> ErrorWithoutFlag CannotReifyOutOfScopeThing {} -> ErrorWithoutFlag CannotReifyThingNotInTypeEnv {} -> ErrorWithoutFlag NoRolesAssociatedWithThing {} -> ErrorWithoutFlag CannotRepresentType {} -> ErrorWithoutFlag typedTHErrorReason :: TypedTHError -> DiagnosticReason typedTHErrorReason = \case SplicePolymorphicLocalVar {} -> ErrorWithoutFlag TypedTHWithPolyType {} -> ErrorWithoutFlag spliceFailedReason :: SpliceFailReason -> DiagnosticReason spliceFailedReason = \case SpliceThrewException {} -> ErrorWithoutFlag RunSpliceFailure {} -> ErrorWithoutFlag addTopDeclsErrorReason :: AddTopDeclsError -> DiagnosticReason addTopDeclsErrorReason = \case InvalidTopDecl {} -> ErrorWithoutFlag AddTopDeclsUnexpectedDeclarationSplice {} -> ErrorWithoutFlag AddTopDeclsRunSpliceFailure {} -> ErrorWithoutFlag thErrorHints :: THError -> [GhcHint] thErrorHints = \case THSyntaxError err -> thSyntaxErrorHints err THNameError err -> thNameErrorHints err THReifyError err -> thReifyErrorHints err TypedTHError err -> typedTHErrorHints err THSpliceFailed rea -> spliceFailedHints rea AddTopDeclsError err -> addTopDeclsErrorHints err IllegalStaticFormInSplice {} -> noHints FailedToLookupThInstName {} -> noHints AddInvalidCorePlugin {} -> noHints AddDocToNonLocalDefn {} -> noHints ReportCustomQuasiError {} -> noHints thSyntaxErrorHints :: THSyntaxError -> [GhcHint] thSyntaxErrorHints = \case IllegalTHQuotes{} -> [suggestExtension LangExt.TemplateHaskellQuotes] BadImplicitSplice {} -> noHints -- NB: don't suggest TemplateHaskell -- see comments on BadImplicitSplice in pprTHSyntaxError IllegalTHSplice{} -> [suggestExtension LangExt.TemplateHaskell] NestedTHBrackets{} -> noHints MismatchedSpliceType{} -> noHints thNameErrorHints :: THNameError -> [GhcHint] thNameErrorHints = \case NonExactName {} -> noHints QuotedNameWrongStage {} -> noHints thReifyErrorHints :: THReifyError -> [GhcHint] thReifyErrorHints = \case CannotReifyInstance {} -> noHints CannotReifyOutOfScopeThing {} -> noHints CannotReifyThingNotInTypeEnv {} -> noHints NoRolesAssociatedWithThing {} -> noHints CannotRepresentType {} -> noHints typedTHErrorHints :: TypedTHError -> [GhcHint] typedTHErrorHints = \case SplicePolymorphicLocalVar {} -> noHints TypedTHWithPolyType {} -> noHints spliceFailedHints :: SpliceFailReason -> [GhcHint] spliceFailedHints = \case SpliceThrewException {} -> noHints RunSpliceFailure {} -> noHints addTopDeclsErrorHints :: AddTopDeclsError -> [GhcHint] addTopDeclsErrorHints = \case InvalidTopDecl {} -> noHints AddTopDeclsUnexpectedDeclarationSplice {} -> noHints AddTopDeclsRunSpliceFailure {} -> noHints -------------------------------------------------------------------------------- pprPatersonCondFailure :: PatersonCondFailure -> PatersonCondFailureContext -> Type -> Type -> SDoc pprPatersonCondFailure (PCF_TyVar tvs) InInstanceDecl lhs rhs = hang (occMsg tvs) 2 (sep [ text "in the constraint" <+> quotes (ppr lhs) , text "than in the instance head" <+> quotes (ppr rhs) ]) where occMsg tvs = text "Variable" <> plural tvs <+> quotes (pprWithCommas ppr tvs) <+> pp_occurs <+> text "more often" pp_occurs | isSingleton tvs = text "occurs" | otherwise = text "occur" pprPatersonCondFailure (PCF_TyVar tvs) InTyFamEquation lhs rhs = hang (occMsg tvs) 2 (sep [ text "in the type-family application" <+> quotes (ppr rhs) , text "than in the LHS of the family instance" <+> quotes (ppr lhs) ]) where occMsg tvs = text "Variable" <> plural tvs <+> quotes (pprWithCommas ppr tvs) <+> pp_occurs <+> text "more often" pp_occurs | isSingleton tvs = text "occurs" | otherwise = text "occur" pprPatersonCondFailure PCF_Size InInstanceDecl lhs rhs = hang (text "The constraint" <+> quotes (ppr lhs)) 2 (sep [ text "is no smaller than", pp_rhs ]) where pp_rhs = text "the instance head" <+> quotes (ppr rhs) pprPatersonCondFailure PCF_Size InTyFamEquation lhs rhs = hang (text "The type-family application" <+> quotes (ppr rhs)) 2 (sep [ text "is no smaller than", pp_lhs ]) where pp_lhs = text "the LHS of the family instance" <+> quotes (ppr lhs) pprPatersonCondFailure (PCF_TyFam tc) InInstanceDecl lhs _rhs = hang (text "Illegal use of type family" <+> quotes (ppr tc)) 2 (text "in the constraint" <+> quotes (ppr lhs)) pprPatersonCondFailure (PCF_TyFam tc) InTyFamEquation _lhs rhs = hang (text "Illegal nested use of type family" <+> quotes (ppr tc)) 2 (text "in the arguments of the type-family application" <+> quotes (ppr rhs)) -------------------------------------------------------------------------------- defaultTypesAndImport :: ClassDefaults -> SDoc defaultTypesAndImport ClassDefaults{cd_types, cd_module = Just cdm} = hang (parens $ pprWithCommas ppr cd_types) 2 (text "imported from" <+> ppr cdm) defaultTypesAndImport ClassDefaults{cd_types} = parens (pprWithCommas ppr cd_types) -------------------------------------------------------------------------------- pprZonkerMessage :: ZonkerMessage -> SDoc pprZonkerMessage = \case ZonkerCannotDefaultConcrete frr -> ppr (frr_context frr) $$ text "cannot be assigned a fixed runtime representation," <+> text "not even by defaulting." zonkerMessageHints :: ZonkerMessage -> [GhcHint] zonkerMessageHints = \case ZonkerCannotDefaultConcrete {} -> [SuggestAddTypeSignatures UnnamedBinding] zonkerMessageReason :: ZonkerMessage -> DiagnosticReason zonkerMessageReason = \case ZonkerCannotDefaultConcrete {} -> ErrorWithoutFlag -------------------------------------------------------------------------------- pprTypeSyntaxName :: TypeSyntax -> SDoc pprTypeSyntaxName TypeKeywordSyntax = "keyword" <+> quotes "type" pprTypeSyntaxName ForallTelescopeSyntax = "forall telescope" pprTypeSyntaxName ContextArrowSyntax = "context arrow (=>)" pprTypeSyntaxName FunctionArrowSyntax = "function type arrow (->)" ghc-lib-parser-9.12.2.20250421/compiler/GHC/Tc/Errors/Types.hs0000644000000000000000000070567007346545000021207 0ustar0000000000000000{-# LANGUAGE DeriveGeneric #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE GADTs #-} {-# LANGUAGE KindSignatures #-} {-# LANGUAGE DataKinds #-} {-# LANGUAGE UndecidableInstances #-} {-# LANGUAGE TypeApplications #-} module GHC.Tc.Errors.Types ( -- * Main types TcRnMessage(..) , TcRnMessageOpts(..) , mkTcRnUnknownMessage , TcRnMessageDetailed(..) , TypeDataForbids(..) , ErrInfo(..) , FixedRuntimeRepProvenance(..) , pprFixedRuntimeRepProvenance , ShadowedNameProvenance(..) , RecordFieldPart(..) , IllegalNewtypeReason(..) , BadRecordUpdateReason(..) , InjectivityErrReason(..) , HasKinds(..) , hasKinds , SuggestUndecidableInstances(..) , suggestUndecidableInstances , SuggestUnliftedTypes(..) , DataSort(..), ppDataSort , AllowedDataResKind(..) , NotClosedReason(..) , SuggestPartialTypeSignatures(..) , suggestPartialTypeSignatures , DeriveInstanceErrReason(..) , UsingGeneralizedNewtypeDeriving(..) , usingGeneralizedNewtypeDeriving , DeriveAnyClassEnabled(..) , deriveAnyClassEnabled , DeriveInstanceBadConstructor(..) , HasWildcard(..) , hasWildcard , BadAnonWildcardContext(..) , SoleExtraConstraintWildcardAllowed(..) , DeriveGenericsErrReason(..) , HasAssociatedDataFamInsts(..) , hasAssociatedDataFamInsts , AssociatedTyLastVarInKind(..) , associatedTyLastVarInKind , AssociatedTyNotParamOverLastTyVar(..) , associatedTyNotParamOverLastTyVar , MissingSignature(..) , Exported(..) , HsDocContext(..) , FixedRuntimeRepErrorInfo(..) , TcRnNoDerivStratSpecifiedInfo(..) , ErrorItem(..), errorItemOrigin, errorItemEqRel, errorItemPred, errorItemCtLoc , SolverReport(..), SolverReportSupplementary(..) , SolverReportWithCtxt(..) , SolverReportErrCtxt(..) , getUserGivens, discardProvCtxtGivens , TcSolverReportMsg(..) , CannotUnifyVariableReason(..) , MismatchMsg(..) , MismatchEA(..) , mkPlainMismatchMsg, mkBasicMismatchMsg , WhenMatching(..) , ExpectedActualInfo(..) , TyVarInfo(..), SameOccInfo(..) , AmbiguityInfo(..) , CND_Extra(..) , FitsMbSuppressed(..) , ValidHoleFits(..), noValidHoleFits , HoleFitDispConfig(..) , RelevantBindings(..), pprRelevantBindings , PromotionErr(..), pprPECategory, peCategory , TermLevelUseErr(..), teCategory , NotInScopeError(..), mkTcRnNotInScope , ImportError(..) , HoleError(..) , CoercibleMsg(..) , PotentialInstances(..) , UnsupportedCallConvention(..) , ExpectedBackends , ArgOrResult(..) , MatchArgsContext(..), MatchArgBadMatches(..) , PragmaWarningInfo(..) , EmptyStatementGroupErrReason(..) , UnexpectedStatement(..) , DeclSort(..) , NonStandardGuards(..) , RuleLhsErrReason(..) , HsigShapeMismatchReason(..) , WrongThingSort(..) , StageCheckReason(..) , UninferrableTyVarCtx(..) , PatSynInvalidRhsReason(..) , BadFieldAnnotationReason(..) , SuperclassCycle(..) , SuperclassCycleDetail(..) , RoleValidationFailedReason(..) , DisabledClassExtension(..) , TyFamsDisabledReason(..) , TypeApplication(..) , HsTypeOrSigType(..) , HsTyVarBndrExistentialFlag(..) , TySynCycleTyCons , BadImportKind(..) , DodgyImportsReason (..) , ImportLookupReason (..) , UnusedImportReason (..) , UnusedImportName (..) , NestedForallsContextsIn(..) , UnusedNameProv(..) , NonCanonicalDefinition(..) , NonCanonical_Monoid(..) , NonCanonical_Monad(..) , TypeSyntax(..) , typeSyntaxExtension -- * Errors for hs-boot and signature files , BadBootDecls(..) , MissingBootThing(..), missingBootThing , BootMismatch(..) , BootMismatchWhat(..) , BootTyConMismatch(..) , BootAxiomBranchMismatch(..) , BootClassMismatch(..) , BootMethodMismatch(..) , BootATMismatch(..) , BootDataMismatch(..) , BootDataConMismatch(..) , SynAbstractDataError(..) , BootListMismatch(..), BootListMismatches -- * Class and family instance errors , IllegalInstanceReason(..) , IllegalClassInstanceReason(..) , IllegalInstanceHeadReason(..) , IllegalHasFieldInstance(..) , CoverageProblem(..), FailedCoverageCondition(..) , IllegalFamilyInstanceReason(..) , InvalidFamInstQTv(..), InvalidFamInstQTvReason(..) , InvalidAssoc(..), InvalidAssocInstance(..) , InvalidAssocDefault(..), AssocDefaultBadArgs(..) -- * Template Haskell errors , THError(..), THSyntaxError(..), THNameError(..) , THReifyError(..), TypedTHError(..) , SpliceFailReason(..), RunSpliceFailReason(..) , AddTopDeclsError(..) , ConversionFailReason(..) , UnrepresentableTypeDescr(..) , LookupTHInstNameErrReason(..) , SplicePhase(..) , THDeclDescriptor(..) , ThingBeingConverted(..) , IllegalDecls(..) -- * Zonker errors , ZonkerMessage(..) -- FFI Errors , IllegalForeignTypeReason(..) , TypeCannotBeMarshaledReason(..) ) where import GHC.Prelude import GHC.Hs import GHC.Tc.Errors.Types.PromotionErr import GHC.Tc.Errors.Hole.FitTypes (HoleFit) import GHC.Tc.Types.Constraint import GHC.Tc.Types.Evidence (EvBindsVar) import GHC.Tc.Types.Origin ( CtOrigin (ProvCtxtOrigin), SkolemInfoAnon (SigSkol) , UserTypeCtxt (PatSynCtxt), TyVarBndrs, TypedThing , FixedRuntimeRepOrigin(..), InstanceWhat ) import GHC.Tc.Types.CtLoc( CtLoc, ctLocOrigin, SubGoalDepth ) import GHC.Tc.Types.Rank (Rank) import GHC.Tc.Types.TH import GHC.Tc.Types.BasicTypes import GHC.Tc.Utils.TcType (TcType, TcSigmaType, TcPredType, PatersonCondFailure, PatersonCondFailureContext) import GHC.Types.Basic import GHC.Types.Error import GHC.Types.Avail import GHC.Types.Hint (UntickedPromotedThing(..), AssumedDerivingStrategy(..)) import GHC.Types.ForeignCall (CLabelString) import GHC.Types.Id.Info ( RecSelParent(..) ) import GHC.Types.Name (NamedThing(..), Name, OccName, getSrcLoc, getSrcSpan) import qualified GHC.Types.Name.Occurrence as OccName import GHC.Types.Name.Reader import GHC.Types.Name.Env (NameEnv) import GHC.Types.SourceFile (HsBootOrSig(..)) import GHC.Types.SrcLoc import GHC.Types.TyThing (TyThing) import GHC.Types.Var (Id, TyCoVar, TyVar, TcTyVar, CoVar, Specificity) import GHC.Types.Var.Env (TidyEnv) import GHC.Types.Var.Set (TyVarSet, VarSet) import GHC.Types.DefaultEnv (ClassDefaults) import GHC.Unit.Types (Module) import GHC.Unit.State (UnitState) import GHC.Unit.Module.Warnings (WarningCategory, WarningTxt) import GHC.Unit.Module.ModIface (ModIface) import GHC.Core.Class (Class, ClassMinimalDef, ClassOpItem, ClassATItem) import GHC.Core.Coercion (Coercion) import GHC.Core.Coercion.Axiom (CoAxBranch) import GHC.Core.ConLike (ConLike) import GHC.Core.DataCon (DataCon, FieldLabel) import GHC.Core.FamInstEnv (FamInst) import GHC.Core.InstEnv (LookupInstanceErrReason, ClsInst, DFunId) import GHC.Core.PatSyn (PatSyn) import GHC.Core.Predicate (EqRel, predTypeEqRel) import GHC.Core.TyCon (TyCon, Role, FamTyConFlav, AlgTyConRhs) import GHC.Core.Type (Kind, Type, ThetaType, PredType, ErrorMsgType, ForAllTyFlag) import GHC.Driver.Backend (Backend) import GHC.Utils.Outputable import GHC.Utils.Misc (filterOut) import qualified GHC.LanguageExtensions as LangExt import GHC.Data.FastString (FastString) import GHC.Data.Pair import GHC.Exception.Type (SomeException) import Language.Haskell.Syntax.Basic (FieldLabelString(..)) import qualified Data.List.NonEmpty as NE import Data.Typeable (Typeable) import qualified GHC.Internal.TH.Syntax as TH import Data.Map.Strict (Map) import GHC.Generics ( Generic ) import GHC.Iface.Errors.Types data TcRnMessageOpts = TcRnMessageOpts { tcOptsShowContext :: !Bool -- ^ Whether we show the error context or not , tcOptsIfaceOpts :: !IfaceMessageOpts } {- Note [Migrating TcM Messages] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ As part of #18516, we are slowly migrating the diagnostic messages emitted and reported in the TcM from SDoc to TcRnMessage. Historically, GHC emitted some diagnostics in 3 pieces, i.e. there were lots of error-reporting functions that accepted 3 SDocs an input: one for the important part of the message, one for the context and one for any supplementary information. Consider the following: • Couldn't match expected type ‘Int’ with actual type ‘Char’ • In the expression: x4 In a stmt of a 'do' block: return (x2, x4) In the expression: Under the hood, the reporting functions in Tc.Utils.Monad were emitting "Couldn't match" as the important part, "In the expression" as the context and "In a stmt..In the expression" as the supplementary, with the context and supplementary usually smashed together so that the final message would be composed only by two SDoc (which would then be bulleted like in the example). In order for us to smooth out the migration to the new diagnostic infrastructure, we introduce the 'ErrInfo' and 'TcRnMessageDetailed' types, which serve exactly the purpose of bridging the two worlds together without breaking the external API or the existing format of messages reported by GHC. Using 'ErrInfo' and 'TcRnMessageDetailed' also allows us to move away from the SDoc-ridden diagnostic API inside Tc.Utils.Monad, enabling further refactorings. In the future, once the conversion will be complete and we will successfully eradicate any use of SDoc in the diagnostic reporting of GHC, we can surely revisit the usage and existence of these two types, which for now remain a "necessary evil". -} -- The majority of TcRn messages come with extra context about the error, -- and this newtype captures it. See Note [Migrating TcM Messages]. data ErrInfo = ErrInfo { errInfoContext :: !SDoc -- ^ Extra context associated to the error. , errInfoSupplementary :: !SDoc -- ^ Extra supplementary info associated to the error. } -- | 'TcRnMessageDetailed' is an \"internal\" type (used only inside -- 'GHC.Tc.Utils.Monad' that wraps a 'TcRnMessage' while also providing -- any extra info needed to correctly pretty-print this diagnostic later on. data TcRnMessageDetailed = TcRnMessageDetailed !ErrInfo -- ^ Extra info associated with the message !TcRnMessage deriving Generic mkTcRnUnknownMessage :: (Diagnostic a, Typeable a, DiagnosticOpts a ~ NoDiagnosticOpts) => a -> TcRnMessage mkTcRnUnknownMessage diag = TcRnUnknownMessage (mkSimpleUnknownDiagnostic diag) -- Please don't use this function inside the GHC codebase; -- it mainly exists for users of the GHC API, such as plugins. -- -- If you need to emit a new error message in the typechecker, -- you should add a new constructor to 'TcRnMessage' instead. -- | An error which might arise during typechecking/renaming. data TcRnMessage where {-| Simply wraps an unknown 'Diagnostic' message @a@. It can be used by plugins to provide custom diagnostic messages originated during typechecking/renaming. -} TcRnUnknownMessage :: (UnknownDiagnostic (DiagnosticOpts TcRnMessage)) -> TcRnMessage {-| Wrap an 'IfaceMessage' to a 'TcRnMessage' for when we attempt to load interface files during typechecking but encounter an error. -} TcRnInterfaceError :: !IfaceMessage -> TcRnMessage {-| TcRnMessageWithInfo is a constructor which is used when extra information is needed to be provided in order to qualify a diagnostic and where it was originated (and why). It carries an extra 'UnitState' which can be used to pretty-print some names and it wraps a 'TcRnMessageDetailed', which includes any extra context associated with this diagnostic. -} TcRnMessageWithInfo :: !UnitState -- ^ The 'UnitState' will allow us to pretty-print -- some diagnostics with more detail. -> !TcRnMessageDetailed -> TcRnMessage {-| TcRnWithHsDocContext annotates an error message with the context in which it originated. -} TcRnWithHsDocContext :: !HsDocContext -> !TcRnMessage -> TcRnMessage {-| TcRnSolverReport is the constructor used to report unsolved constraints after constraint solving, as well as other errors such as hole fit errors. See the documentation of t'TcSolverReportMsg' datatype for an overview of the different errors. -} TcRnSolverReport :: SolverReportWithCtxt -> DiagnosticReason -> TcRnMessage {-| TcRnSolverDepthError is an error that occurs when the constraint solver exceeds the maximum recursion depth. Example: class C a where { meth :: a } instance Cls [a] => Cls a where { meth = head . meth } t :: () t = meth Test cases: T7788 T8550 T9554 T15316A T17267{∅,a,b,c,e} T17458 ContextStack1 T22924b TcCoercibleFail -} TcRnSolverDepthError :: !Type -> !SubGoalDepth -> TcRnMessage {-| TcRnRedundantConstraints is a warning that is emitted when a binding has a user-written type signature which contains superfluous constraints. Example: f :: (Eq a, Ord a) => a -> a -> a f x y = (x < y) || x == y -- `Eq a` is superfluous: the `Ord a` constraint suffices. Test cases: T9939, T10632, T18036a, T20602, PluralS, T19296. -} TcRnRedundantConstraints :: [Id] -> (SkolemInfoAnon, Bool) -- ^ The contextual skolem info. -- The boolean controls whether we -- want to show it in the user message. -- (Nice to keep track of the info in either case, -- for other users of the GHC API.) -> TcRnMessage {-| TcRnInaccessibleCode is a warning that is emitted when the RHS of a pattern match is inaccessible, because the constraint solver has detected a contradiction. Example: data B a where { MkTrue :: B True; MkFalse :: B False } foo :: B False -> Bool foo MkFalse = False foo MkTrue = True -- Inaccessible: requires True ~ False Test cases: T7293, T7294, T15558, T17646, T18572, T18610, tcfail167. -} TcRnInaccessibleCode :: Implication -- ^ The implication containing a contradiction. -> SolverReportWithCtxt -- ^ The contradiction. -> TcRnMessage {-| TcRnInaccessibleCoAxBranch is a warning that is emitted when a closed type family has a branch which is inaccessible due to a more general, prior branch. Example: type family F a where F a = Int F Bool = Bool Test cases: T9085, T14066a, T9085, T6018, tc265, -} TcRnInaccessibleCoAxBranch :: TyCon -- ^ The type family's constructor -> CoAxBranch -- ^ The inaccessible branch -> TcRnMessage {-| A type which was expected to have a fixed runtime representation does not have a fixed runtime representation. Example: data D (a :: TYPE r) = MkD a Test cases: T11724, T18534, RepPolyPatSynArg, RepPolyPatSynUnliftedNewtype, RepPolyPatSynRes, T20423 -} TcRnTypeDoesNotHaveFixedRuntimeRep :: !Type -> !FixedRuntimeRepProvenance -> !ErrInfo -- Extra info accumulated in the TcM monad -> TcRnMessage {-| TcRnImplicitLift is a warning (controlled with -Wimplicit-lift) that occurs when a Template Haskell quote implicitly uses 'lift'. Example: warning1 :: Lift t => t -> Q Exp warning1 x = [| x |] Test cases: th/T17804 -} TcRnImplicitLift :: Name -> !ErrInfo -> TcRnMessage {-| TcRnUnusedPatternBinds is a warning (controlled with -Wunused-pattern-binds) that occurs if a pattern binding binds no variables at all, unless it is a lone wild-card pattern, or a banged pattern. Example: Just _ = rhs3 -- Warning: unused pattern binding (_, _) = rhs4 -- Warning: unused pattern binding _ = rhs3 -- No warning: lone wild-card pattern !() = rhs4 -- No warning: banged pattern; behaves like seq Test cases: rename/{T13646,T17c,T17e,T7085} -} TcRnUnusedPatternBinds :: HsBind GhcRn -> TcRnMessage {-| TcRnUnusedQuantifiedTypeVar is a warning that occurs if there are unused quantified type variables. Examples: f :: forall a. Int -> Char Test cases: rename/should_compile/ExplicitForAllRules1 rename/should_compile/T5331 -} TcRnUnusedQuantifiedTypeVar :: HsDocContext -> HsTyVarBndrExistentialFlag -- ^ tyVar binder. -> TcRnMessage {-| TcRnDodgyImports is a group of warnings (controlled with -Wdodgy-imports). See 'DodgyImportsReason' for the different warnings. -} TcRnDodgyImports :: !DodgyImportsReason -> TcRnMessage {-| TcRnDodgyExports is a warning (controlled by -Wdodgy-exports) that occurs when an export of the form 'T(..)' for a type constructor 'T' does not actually export anything beside 'T' itself. Example: module Foo ( T(..) -- Warning: T is a type synonym , A(..) -- Warning: A is a type family , C(..) -- Warning: C is a data family ) where type T = Int type family A :: * -> * data family C :: * -> * Test cases: warnings/should_compile/DodgyExports01 -} TcRnDodgyExports :: GlobalRdrElt -> TcRnMessage {-| TcRnMissingImportList is a warning (controlled by -Wmissing-import-lists) that occurs when an import declaration does not explicitly list all the names brought into scope. Test cases: rename/should_compile/T4489 -} TcRnMissingImportList :: IE GhcPs -> TcRnMessage {-| When a module marked trustworthy or unsafe (using -XTrustworthy or -XUnsafe) is compiled with a plugin, the TcRnUnsafeDueToPlugin warning (controlled by -Wunsafe) is used as the reason the module was inferred to be unsafe. This warning is not raised if the -fplugin-trustworthy flag is passed. Test cases: plugins/T19926 -} TcRnUnsafeDueToPlugin :: TcRnMessage {-| TcRnModMissingRealSrcSpan is an error that occurs when compiling a module that lacks an associated 'RealSrcSpan'. Test cases: None -} TcRnModMissingRealSrcSpan :: Module -> TcRnMessage {-| TcRnIdNotExportedFromModuleSig is an error pertaining to backpack that occurs when an identifier required by a signature is not exported by the module or signature that is being used as a substitution for that signature. Example(s): None Test cases: backpack/should_fail/bkpfail36 -} TcRnIdNotExportedFromModuleSig :: Name -> Module -> TcRnMessage {-| TcRnIdNotExportedFromLocalSig is an error pertaining to backpack that occurs when an identifier which is necessary for implementing a module signature is not exported from that signature. Example(s): None Test cases: backpack/should_fail/bkpfail30 backpack/should_fail/bkpfail31 backpack/should_fail/bkpfail34 -} TcRnIdNotExportedFromLocalSig :: Name -> TcRnMessage {-| TcRnShadowedName is a warning (controlled by -Wname-shadowing) that occurs whenever an inner-scope value has the same name as an outer-scope value, i.e. the inner value shadows the outer one. This can catch typographical errors that turn into hard-to-find bugs. The warning is suppressed for names beginning with an underscore. Examples(s): f = ... let f = id in ... f ... -- NOT OK, 'f' is shadowed f x = do { _ignore <- this; _ignore <- that; return (the other) } -- suppressed via underscore Test cases: typecheck/should_compile/T10971a rename/should_compile/rn039 rename/should_compile/rn064 rename/should_compile/T1972 rename/should_fail/T2723 rename/should_compile/T3262 driver/werror rename/should_fail/T22478d typecheck/should_fail/TyAppPat_ScopedTyVarConflict -} TcRnShadowedName :: OccName -> ShadowedNameProvenance -> TcRnMessage {-| TcRnInvalidWarningCategory is an error that occurs when a warning is declared with a category name that is not the special category "deprecations", and either does not begin with the prefix "x-" indicating a user-defined category, or contains characters not valid in category names. See Note [Warning categories] in GHC.Unit.Module.Warnings Examples(s): module M {-# WARNING in "invalid" "Oops" #-} where {-# WARNING in "x- spaces not allowed" foo "Oops" #-} Test cases: warnings/should_fail/WarningCategoryInvalid -} TcRnInvalidWarningCategory :: !WarningCategory -> TcRnMessage {-| TcRnDuplicateWarningDecls is an error that occurs whenever a warning is declared twice. Examples(s): {-# DEPRECATED foo "Don't use me" #-} {-# DEPRECATED foo "Don't use me" #-} foo :: Int foo = 2 Test cases: rename/should_fail/rnfail058 -} TcRnDuplicateWarningDecls :: !(LocatedN RdrName) -> !RdrName -> TcRnMessage {-| TcRnSimplifierTooManyIterations is an error that occurs whenever the constraint solver in the simplifier hits the iterations' limit. Examples(s): None. Test cases: None. -} TcRnSimplifierTooManyIterations :: Cts -> !IntWithInf -- ^ The limit. -> WantedConstraints -> TcRnMessage {-| TcRnIllegalPatSynDecl is an error that occurs whenever there is an illegal pattern synonym declaration. Examples(s): varWithLocalPatSyn x = case x of P -> () where pattern P = () -- not valid, it can't be local, it must be defined at top-level. Test cases: patsyn/should_fail/local -} TcRnIllegalPatSynDecl :: !(LIdP GhcPs) -> TcRnMessage {-| TcRnLinearPatSyn is an error that occurs whenever a pattern synonym signature uses a field that is not unrestricted. Example(s): None Test cases: linear/should_fail/LinearPatSyn2 -} TcRnLinearPatSyn :: !Type -> TcRnMessage {-| TcRnEmptyRecordUpdate is an error that occurs whenever a record is updated without specifying any field. Examples(s): $(deriveJSON defaultOptions{} ''Bad) -- not ok, no fields selected for update of defaultOptions Test cases: th/T12788 -} TcRnEmptyRecordUpdate :: TcRnMessage {-| TcRnIllegalFieldPunning is an error that occurs whenever field punning is used without the 'NamedFieldPuns' extension enabled. Examples(s): data Foo = Foo { a :: Int } foo :: Foo -> Int foo Foo{a} = a -- Not ok, punning used without extension. Test cases: parser/should_fail/RecordDotSyntaxFail12 -} TcRnIllegalFieldPunning :: !(Located RdrName) -> TcRnMessage {-| TcRnIllegalWildcardsInRecord is an error that occurs whenever wildcards (..) are used in a record without the relevant extension being enabled. Examples(s): data Foo = Foo { a :: Int } foo :: Foo -> Int foo Foo{..} = a -- Not ok, wildcards used without extension. Test cases: parser/should_fail/RecordWildCardsFail -} TcRnIllegalWildcardsInRecord :: !RecordFieldPart -> TcRnMessage {-| TcRnIllegalWildcardInType is an error that occurs when a wildcard appears in a type in a location in which wildcards aren't allowed. Examples: Type synonyms: type T = _ Class declarations and instances: class C _ instance C _ Standalone kind signatures: type D :: _ data D Test cases: ExtraConstraintsWildcardInTypeSplice2 ExtraConstraintsWildcardInTypeSpliceUsed ExtraConstraintsWildcardNotLast ExtraConstraintsWildcardTwice NestedExtraConstraintsWildcard NestedNamedExtraConstraintsWildcard PartialClassMethodSignature PartialClassMethodSignature2 T12039 T13324_fail1 UnnamedConstraintWildcard1 UnnamedConstraintWildcard2 WildcardInADT1 WildcardInADT2 WildcardInADT3 WildcardInADTContext1 WildcardInDefault WildcardInDefaultSignature WildcardInDeriving WildcardInForeignExport WildcardInForeignImport WildcardInGADT1 WildcardInGADT2 WildcardInInstanceHead WildcardInInstanceSig WildcardInNewtype WildcardInPatSynSig WildcardInStandaloneDeriving WildcardInTypeFamilyInstanceRHS WildcardInTypeSynonymRHS saks_fail003 T15433a -} TcRnIllegalWildcardInType :: Maybe Name -- ^ the wildcard name, or 'Nothing' for an anonymous wildcard -> !BadAnonWildcardContext -> TcRnMessage {-| TcRnIllegalNamedWildcardInTypeArgument is an error that occurs when a named wildcard is used in a required type argument. Example: vfun :: forall (a :: k) -> () x = vfun _nwc -- ^^^^ -- named wildcards not allowed in type arguments Test cases: T23738_fail_wild -} TcRnIllegalNamedWildcardInTypeArgument :: RdrName -> TcRnMessage {- TcRnIllegalImplicitTyVarInTypeArgument is an error raised when a type variable is implicitly quantified in a required type argument. Example: vfun :: forall (a :: k) -> () x = vfun (Nothing :: Maybe a) -- ^^^ -- implicit quantification not allowed in type arguments -} TcRnIllegalImplicitTyVarInTypeArgument :: RdrName -> TcRnMessage {-| TcRnDuplicateFieldName is an error that occurs whenever there are duplicate field names in a single record. Examples(s): data R = MkR { x :: Int, x :: Bool } f r = r { x = 3, x = 4 } Test cases: T21959. -} TcRnDuplicateFieldName :: !RecordFieldPart -> NE.NonEmpty RdrName -> TcRnMessage {-| TcRnIllegalViewPattern is an error that occurs whenever the ViewPatterns syntax is used but the ViewPatterns language extension is not enabled. Examples(s): data Foo = Foo { a :: Int } foo :: Foo -> Int foo (a -> l) = l -- not OK, the 'ViewPattern' extension is not enabled. Test cases: parser/should_fail/ViewPatternsFail -} TcRnIllegalViewPattern :: !(Pat GhcPs) -> TcRnMessage {-| TcRnCharLiteralOutOfRange is an error that occurs whenever a character is out of range. Examples(s): None Test cases: None -} TcRnCharLiteralOutOfRange :: !Char -> TcRnMessage {-| TcRnNegativeNumTypeLiteral is an error that occurs whenever a type-level number literal is negative. type Neg = -1 Test cases: th/T8412 typecheck/should_fail/T8306 -} TcRnNegativeNumTypeLiteral :: HsTyLit GhcPs -> TcRnMessage {-| TcRnIllegalWildcardsInConstructor is an error that occurs whenever the record wildcards '..' are used inside a constructor without labeled fields. Examples(s): None Test cases: rename/should_fail/T9815.hs rename/should_fail/T9815b.hs rename/should_fail/T9815ghci.hs rename/should_fail/T9815bghci.hs -} TcRnIllegalWildcardsInConstructor :: !Name -> TcRnMessage {-| TcRnIgnoringAnnotations is a warning that occurs when the source code contains annotation pragmas but the platform in use does not support an external interpreter such as GHCi and therefore the annotations are ignored. Example(s): None Test cases: None -} TcRnIgnoringAnnotations :: [LAnnDecl GhcRn] -> TcRnMessage {-| TcRnAnnotationInSafeHaskell is an error that occurs if annotation pragmas are used in conjunction with Safe Haskell. Example(s): None Test cases: annotations/should_fail/T10826 -} TcRnAnnotationInSafeHaskell :: TcRnMessage {-| TcRnInvalidTypeApplication is an error that occurs when a visible type application is used with an expression that does not accept "specified" type arguments. Example(s): foo :: forall {a}. a -> a foo x = x bar :: () bar = let x = foo @Int 42 in () Test cases: overloadedrecflds/should_fail/overloadedlabelsfail03 typecheck/should_fail/ExplicitSpecificity1 typecheck/should_fail/ExplicitSpecificity10 typecheck/should_fail/ExplicitSpecificity2 typecheck/should_fail/T17173 typecheck/should_fail/VtaFail -} TcRnInvalidTypeApplication :: Type -> LHsWcType GhcRn -> TcRnMessage {-| TcRnTagToEnumMissingValArg is an error that occurs when the 'tagToEnum#' function is not applied to a single value argument. Example(s): tagToEnum# 1 2 Test cases: None -} TcRnTagToEnumMissingValArg :: TcRnMessage {-| TcRnTagToEnumUnspecifiedResTy is an error that occurs when the 'tagToEnum#' function is not given a concrete result type. Example(s): foo :: forall a. a foo = tagToEnum# 0# Test cases: typecheck/should_fail/tcfail164 -} TcRnTagToEnumUnspecifiedResTy :: Type -> TcRnMessage {-| TcRnTagToEnumResTyNotAnEnum is an error that occurs when the 'tagToEnum#' function is given a result type that is not an enumeration type. Example(s): foo :: Int -- not an enumeration TyCon foo = tagToEnum# 0# Test cases: typecheck/should_fail/tcfail164 -} TcRnTagToEnumResTyNotAnEnum :: Type -> TcRnMessage {-| TcRnTagToEnumResTyTypeData is an error that occurs when the 'tagToEnum#' function is given a result type that is headed by a @type data@ type, as the data constructors of a @type data@ do not exist at the term level. Example(s): type data Letter = A | B | C foo :: Letter foo = tagToEnum# 0# Test cases: type-data/should_fail/TDTagToEnum.hs -} TcRnTagToEnumResTyTypeData :: Type -> TcRnMessage {-| TcRnArrowIfThenElsePredDependsOnResultTy is an error that occurs when the predicate type of an ifThenElse expression in arrow notation depends on the type of the result. Example(s): None Test cases: None -} TcRnArrowIfThenElsePredDependsOnResultTy :: TcRnMessage {-| TcRnIllegalHsBootOrSigDecl is an error that occurs when an hs-boot file contains declarations that are not allowed, such as bindings. Examples: -- A.hs-boot f :: Int -> Int f x = 2 * x -- binding not allowed -- B.hs-boot type family F a where { F Int = Bool } -- type family equations not allowed -- C.hsig bar :: Int -> Int {-# RULES forall x. bar x = x #-} -- RULES not allowed Test cases: - bindings: T19781 - class instance body: none - type family instance: HsBootFam - splice: none - foreign declaration: none - default declaration: none - RULEs: none -} TcRnIllegalHsBootOrSigDecl :: !HsBootOrSig -> !BadBootDecls -> TcRnMessage {-| TcRnBootMismatch is a family of errors that occur when there is a mismatch between the hs-boot and hs files. Examples: -- A.hs-boot foo :: Int -> Bool data D = MkD -- A.hs foo :: Int -> Char foo = chr data D = MkD Int Test cases: - missing export: bkpcabal06, bkpfail{01,05,09,16,35}, rnfail{047,055} - missing definition: none - missing instance: T14075 - mismatch in exports: bkpfail{03,19} - conflicting definitions: bkpcabal02, bkpfail{04,06,07,10,12,133,14,15,17,22,23,25,26,27,41,42,45,47,50,52,53,54}, T19244{a,b}, T23344, ClosedFam3, rnfail055 -} TcRnBootMismatch :: !HsBootOrSig -> !BootMismatch -> TcRnMessage {-| TcRnRecursivePatternSynonym is an error that occurs when a pattern synonym is defined in terms of itself, either directly or indirectly. Example(s): pattern A = B pattern B = A Test cases: patsyn/should_fail/T16900 -} TcRnRecursivePatternSynonym :: LHsBinds GhcRn -> TcRnMessage {-| TcRnPartialTypeSigTyVarMismatch is an error that occurs when a partial type signature attempts to unify two different types. Example(s): f :: a -> b -> _ f x y = [x, y] Test cases: partial-sigs/should_fail/T14449 -} TcRnPartialTypeSigTyVarMismatch :: Name -- ^ first type variable -> Name -- ^ second type variable -> Name -- ^ function name -> LHsSigWcType GhcRn -> TcRnMessage {-| TcRnPartialTypeSigBadQuantifier is an error that occurs when a type variable being quantified over in the partial type signature of a function gets unified with a type that is free in that function's context. Example(s): foo :: Num a => a -> a foo xxx = g xxx where g :: forall b. Num b => _ -> b g y = xxx + y Test cases: partial-sig/should_fail/T14479 -} TcRnPartialTypeSigBadQuantifier :: Name -- ^ user-written name of type variable being quantified -> Name -- ^ function name -> Maybe Type -- ^ type the variable unified with, if known -> LHsSigWcType GhcRn -- ^ partial type signature -> TcRnMessage {-| TcRnMissingSignature is a warning that occurs when a top-level binding or a pattern synonym does not have a type signature. Controlled by the flags: -Wmissing-signatures -Wmissing-exported-signatures -Wmissing-pattern-synonym-signatures -Wmissing-exported-pattern-synonym-signatures -Wmissing-kind-signatures -Wmissing-poly-kind-signatures Test cases: T11077 (top-level bindings) T12484 (pattern synonyms) T19564 (kind signatures) -} TcRnMissingSignature :: MissingSignature -> Exported -> TcRnMessage {-| TcRnPolymorphicBinderMissingSig is a warning controlled by -Wmissing-local-signatures that occurs when a local polymorphic binding lacks a type signature. Example(s): id a = a Test cases: warnings/should_compile/T12574 -} TcRnPolymorphicBinderMissingSig :: Name -> Type -> TcRnMessage {-| TcRnOverloadedSig is an error that occurs when a binding group conflicts with the monomorphism restriction. Example(s): data T a = T a mono = ... where x :: Applicative f => f a T x = ... Test cases: typecheck/should_compile/T11339 -} TcRnOverloadedSig :: TcIdSig -> TcRnMessage {-| TcRnTupleConstraintInst is an error that occurs whenever an instance for a tuple constraint is specified. Examples(s): class C m a class D m a f :: (forall a. Eq a => (C m a, D m a)) => m a f = undefined Test cases: quantified-constraints/T15334 -} TcRnTupleConstraintInst :: !Class -> TcRnMessage {-| TcRnUserTypeError is an error that occurs due to a user's custom type error, which can be triggered by adding a `TypeError` constraint in a type signature or typeclass instance. Examples(s): f :: TypeError (Text "This is a type error") f = undefined Test cases: typecheck/should_fail/CustomTypeErrors02 typecheck/should_fail/CustomTypeErrors03 -} TcRnUserTypeError :: !Type -> TcRnMessage {-| TcRnConstraintInKind is an error that occurs whenever a constraint is specified in a kind. Examples(s): data Q :: Eq a => Type where {} Test cases: dependent/should_fail/T13895 polykinds/T16263 saks/should_fail/saks_fail004 typecheck/should_fail/T16059a typecheck/should_fail/T18714 -} TcRnConstraintInKind :: !Type -> TcRnMessage {-| TcRnUnboxedTupleTypeFuncArg is an error that occurs whenever an unboxed tuple or unboxed sum type is specified as a function argument, when the appropriate extension (`-XUnboxedTuples` or `-XUnboxedSums`) isn't enabled. Examples(s): -- T15073.hs import T15073a newtype Foo a = MkFoo a deriving P -- T15073a.hs class P a where p :: a -> (# a #) Test cases: deriving/should_fail/T15073.hs deriving/should_fail/T15073a.hs typecheck/should_fail/T16059d -} TcRnUnboxedTupleOrSumTypeFuncArg :: UnboxedTupleOrSum -- ^ whether this is an unboxed tuple or an unboxed sum -> !Type -> TcRnMessage {-| TcRnLinearFuncInKind is an error that occurs whenever a linear function is specified in a kind. Examples(s): data A :: * %1 -> * Test cases: linear/should_fail/LinearKind linear/should_fail/LinearKind2 linear/should_fail/LinearKind3 -} TcRnLinearFuncInKind :: !Type -> TcRnMessage {-| TcRnForAllEscapeError is an error that occurs whenever a quantified type's kind mentions quantified type variable. Examples(s): type T :: TYPE (BoxedRep l) data T = MkT Test cases: unlifted-datatypes/should_fail/UnlDataNullaryPoly -} TcRnForAllEscapeError :: !Type -> !Kind -> TcRnMessage {-| TcRnVDQInTermType is an error that occurs whenever a visible dependent quantification is specified in the type of a term. Examples(s): a = (undefined :: forall k -> k -> Type) @Int Test cases: dependent/should_fail/T15859 dependent/should_fail/T16326_Fail1 dependent/should_fail/T16326_Fail2 dependent/should_fail/T16326_Fail3 dependent/should_fail/T16326_Fail4 dependent/should_fail/T16326_Fail5 dependent/should_fail/T16326_Fail6 dependent/should_fail/T16326_Fail7 dependent/should_fail/T16326_Fail8 dependent/should_fail/T16326_Fail9 dependent/should_fail/T16326_Fail10 dependent/should_fail/T16326_Fail11 dependent/should_fail/T16326_Fail12 dependent/should_fail/T17687 dependent/should_fail/T18271 -} TcRnVDQInTermType :: !(Maybe Type) -> TcRnMessage {-| TcRnBadQuantPredHead is an error that occurs whenever a quantified predicate lacks a class or type variable head. Examples(s): class (forall a. A t a => A t [a]) => B t where type A t a :: Constraint Test cases: quantified-constraints/T16474 -} TcRnBadQuantPredHead :: !Type -> TcRnMessage {-| TcRnIllegalTupleConstraint is an error that occurs whenever an illegal tuple constraint is specified. Examples(s): g :: ((Show a, Num a), Eq a) => a -> a g = undefined Test cases: typecheck/should_fail/tcfail209a -} TcRnIllegalTupleConstraint :: !Type -> TcRnMessage {-| TcRnNonTypeVarArgInConstraint is an error that occurs whenever a non type-variable argument is specified in a constraint. Examples(s): data T instance Eq Int => Eq T Test cases: ghci/scripts/T13202 ghci/scripts/T13202a polykinds/T12055a typecheck/should_fail/T10351 typecheck/should_fail/T19187 typecheck/should_fail/T6022 typecheck/should_fail/T8883 -} TcRnNonTypeVarArgInConstraint :: !Type -> TcRnMessage {-| TcRnIllegalImplicitParam is an error that occurs whenever an illegal implicit parameter is specified. Examples(s): type Bla = ?x::Int data T = T instance Bla => Eq T Test cases: polykinds/T11466 typecheck/should_fail/T8912 typecheck/should_fail/tcfail041 typecheck/should_fail/tcfail211 typecheck/should_fail/tcrun045 -} TcRnIllegalImplicitParam :: !Type -> TcRnMessage {-| TcRnIllegalConstraintSynonymOfKind is an error that occurs whenever an illegal constraint synonym of kind is specified. Examples(s): type Showish = Show f :: (Showish a) => a -> a f = undefined Test cases: typecheck/should_fail/tcfail209 -} TcRnIllegalConstraintSynonymOfKind :: !Type -> TcRnMessage {-| TcRnOversaturatedVisibleKindArg is an error that occurs whenever an illegal oversaturated visible kind argument is specified. Examples(s): type family F2 :: forall (a :: Type). Type where F2 @a = Maybe a Test cases: typecheck/should_fail/T15793 typecheck/should_fail/T16255 -} TcRnOversaturatedVisibleKindArg :: !Type -> TcRnMessage {-| TcRnForAllRankErr is an error that occurs whenever an illegal ranked type is specified. Examples(s): foo :: (a,b) -> (a~b => t) -> (a,b) foo p x = p Test cases: - ghci/should_run/T15806 - indexed-types/should_fail/SimpleFail15 - typecheck/should_fail/T11355 - typecheck/should_fail/T12083a - typecheck/should_fail/T12083b - typecheck/should_fail/T16059c - typecheck/should_fail/T16059e - typecheck/should_fail/T17213 - typecheck/should_fail/T18939_Fail - typecheck/should_fail/T2538 - typecheck/should_fail/T5957 - typecheck/should_fail/T7019 - typecheck/should_fail/T7019a - typecheck/should_fail/T7809 - typecheck/should_fail/T9196 - typecheck/should_fail/tcfail127 - typecheck/should_fail/tcfail184 - typecheck/should_fail/tcfail196 - typecheck/should_fail/tcfail197 -} TcRnForAllRankErr :: !Rank -> !Type -> TcRnMessage {-| TcRnSimplifiableConstraint is a warning triggered by the occurrence of a simplifiable constraint in a context, when MonoLocalBinds is not enabled. Examples(s): simplifiableEq :: Eq (a, a) => a -> a -> Bool simplifiableEq = undefined Test cases: - indexed-types/should_compile/T15322 - partial-sigs/should_compile/SomethingShowable - typecheck/should_compile/T13526 -} TcRnSimplifiableConstraint :: !PredType -> !InstanceWhat -> TcRnMessage {-| TcRnArityMismatch is an error that occurs when a type constructor is supplied with fewer arguments than required. Examples(s): f Left = undefined Test cases: - backpack/should_fail/bkpfail25.bkp - ghci/should_fail/T16013 - ghci/should_fail/T16287 - indexed-types/should_fail/BadSock - indexed-types/should_fail/T9433 - module/mod60 - ndexed-types/should_fail/T2157 - parser/should_fail/ParserNoBinaryLiterals2 - parser/should_fail/ParserNoBinaryLiterals3 - patsyn/should_fail/T12819 - polykinds/T10516 - typecheck/should_fail/T12124 - typecheck/should_fail/T15954 - typecheck/should_fail/T16874 - typecheck/should_fail/tcfail100 - typecheck/should_fail/tcfail101 - typecheck/should_fail/tcfail107 - typecheck/should_fail/tcfail129 - typecheck/should_fail/tcfail187 -} TcRnArityMismatch :: !TyThing -> !Arity -- ^ expected arity -> !Arity -- ^ actual arity -> TcRnMessage {-| TcRnIllegalClassInstance is a collection of diagnostics that arise from an invalid class or family instance declaration. See t'IllegalInstanceReason'. -} TcRnIllegalInstance :: IllegalInstanceReason -> TcRnMessage {-| TcRnMonomorphicBindings is a warning (controlled by -Wmonomorphism-restriction) that arises when the monomorphism restriction applies to the given bindings. Examples(s): {-# OPTIONS_GHC -Wmonomorphism-restriction #-} bar = 10 foo :: Int foo = bar main :: IO () main = print foo The example above emits the warning (for 'bar'), because without monomorphism restriction the inferred type for 'bar' is 'bar :: Num p => p'. This warning tells us that /if/ we were to enable '-XMonomorphismRestriction' we would make 'bar' less polymorphic, as its type would become 'bar :: Int', so GHC warns us about that. Test cases: typecheck/should_compile/T13785 -} TcRnMonomorphicBindings :: [Name] -> TcRnMessage {-| TcRnOrphanInstance is a warning (controlled by -Worphans) that arises when a typeclass instance or family instance is an \"orphan\", i.e. if it appears in a module in which neither the class/family nor the type being instanced are declared in the same module. Examples(s): None Test cases: warnings/should_compile/T9178 typecheck/should_compile/T4912 -} TcRnOrphanInstance :: Either ClsInst FamInst -> TcRnMessage {-| TcRnFunDepConflict is an error that occurs when there are functional dependencies conflicts between instance declarations. Examples(s): None Test cases: typecheck/should_fail/T2307 typecheck/should_fail/tcfail096 typecheck/should_fail/tcfail202 -} TcRnFunDepConflict :: !UnitState -> NE.NonEmpty ClsInst -> TcRnMessage {-| TcRnDupInstanceDecls is an error that occurs when there are duplicate instance declarations. Examples(s): class Foo a where foo :: a -> Int instance Foo Int where foo = id instance Foo Int where foo = const 42 Test cases: cabal/T12733/T12733 typecheck/should_fail/tcfail035 typecheck/should_fail/tcfail023 backpack/should_fail/bkpfail18 typecheck/should_fail/TcNullaryTCFail typecheck/should_fail/tcfail036 typecheck/should_fail/tcfail073 module/mod51 module/mod52 module/mod44 -} TcRnDupInstanceDecls :: !UnitState -> NE.NonEmpty ClsInst -> TcRnMessage {-| TcRnConflictingFamInstDecls is an error that occurs when there are conflicting family instance declarations. Examples(s): None. Test cases: indexed-types/should_fail/ExplicitForAllFams4b indexed-types/should_fail/NoGood indexed-types/should_fail/Over indexed-types/should_fail/OverDirectThisMod indexed-types/should_fail/OverIndirectThisMod indexed-types/should_fail/SimpleFail11a indexed-types/should_fail/SimpleFail11b indexed-types/should_fail/SimpleFail11c indexed-types/should_fail/SimpleFail11d indexed-types/should_fail/SimpleFail2a indexed-types/should_fail/SimpleFail2b indexed-types/should_fail/T13092/T13092 indexed-types/should_fail/T13092c/T13092c indexed-types/should_fail/T14179 indexed-types/should_fail/T2334A indexed-types/should_fail/T2677 indexed-types/should_fail/T3330b indexed-types/should_fail/T4246 indexed-types/should_fail/T7102a indexed-types/should_fail/T9371 polykinds/T7524 typecheck/should_fail/UnliftedNewtypesOverlap -} TcRnConflictingFamInstDecls :: NE.NonEmpty FamInst -> TcRnMessage {-| TcRnFamInstNotInjective is a collection of errors that arise from a type family equation violating the injectivity annotation. See 'InjectivityErrReason'. -} TcRnFamInstNotInjective :: InjectivityErrReason -- ^ the violation -> TyCon -- ^ the family 'TyCon' -> NE.NonEmpty CoAxBranch -- ^ the family equations -> TcRnMessage {-| TcRnBangOnUnliftedType is a warning (controlled by -Wredundant-strictness-flags) that occurs when a strictness annotation is applied to an unlifted type. Example(s): data T = MkT !Int# -- Strictness flag has no effect on unlifted types Test cases: typecheck/should_compile/T20187a typecheck/should_compile/T20187b -} TcRnBangOnUnliftedType :: !Type -> TcRnMessage {-| TcRnLazyBangOnUnliftedType is a warning (controlled by -Wredundant-strictness-flags) that occurs when a lazy annotation is applied to an unlifted type. Example(s): data T = MkT ~Int# -- Lazy flag has no effect on unlifted types Test cases: typecheck/should_compile/T21951a typecheck/should_compile/T21951b -} TcRnLazyBangOnUnliftedType :: !Type -> TcRnMessage {-| TcRnMultipleDefaultDeclarations is an error that occurs when a module has more than one default declaration for the same class. Example: default (Integer, Int) -- implicitly applies to Num default (Double, Float) -- 2nd default declaration not allowed Text cases: module/mod58 -} TcRnMultipleDefaultDeclarations :: TyCon -> [LDefaultDecl GhcRn] -> TcRnMessage {-| TcRnWarnClashingDefaultImports is a warning that occurs when a module imports more than one default declaration for the same class, and they are not all subsumed by one of them nor by a local `default` declaration. See Note [Named default declarations] in GHC.Tc.Gen.Default Test cases: default/Import07.hs -} TcRnWarnClashingDefaultImports :: TyCon -- ^ class -> Maybe [Type] -- ^ locally declared defaults -> NE.NonEmpty ClassDefaults -- ^ imported defaults -> TcRnMessage {-| TcRnBadDefaultType is an error that occurs when a type used in a default declaration does not have an instance for any of the applicable classes. Example(s): data Foo default (Foo) Test cases: typecheck/should_fail/T11974b -} TcRnBadDefaultType :: Type -> [TyCon] -> TcRnMessage {-| TcRnPatSynBundledWithNonDataCon is an error that occurs when a module's export list bundles a pattern synonym with a type that is not a proper `data` or `newtype` construction. Example(s): module Foo (MyClass(.., P)) where pattern P = Nothing class MyClass a where foo :: a -> Int Test cases: patsyn/should_fail/export-class -} TcRnPatSynBundledWithNonDataCon :: TcRnMessage {-| TcRnPatSynBundledWithWrongType is an error that occurs when the export list of a module has a pattern synonym bundled with a type that does not match the type of the pattern synonym. Example(s): module Foo (R(P,x)) where data Q = Q Int data R = R pattern P{x} = Q x Text cases: patsyn/should_fail/export-ps-rec-sel patsyn/should_fail/export-type-synonym patsyn/should_fail/export-type -} TcRnPatSynBundledWithWrongType :: Type -> Type -> TcRnMessage {-| TcRnDupeModuleExport is a warning controlled by @-Wduplicate-exports@ that occurs when a module appears more than once in an export list. Example(s): module Foo (module Bar, module Bar) import Bar Text cases: None -} TcRnDupeModuleExport :: ModuleName -> TcRnMessage {-| TcRnExportedModNotImported is an error that occurs when an export list contains a module that is not imported. Example(s): None Text cases: module/mod135 module/mod8 rename/should_fail/rnfail028 backpack/should_fail/bkpfail48 -} TcRnExportedModNotImported :: ModuleName -> TcRnMessage {-| TcRnNullExportedModule is a warning controlled by -Wdodgy-exports that occurs when an export list contains a module that has no exports. Example(s): module Foo (module Bar) where import Bar () Test cases: None -} TcRnNullExportedModule :: ModuleName -> TcRnMessage {-| TcRnMissingExportList is a warning controlled by -Wmissing-export-lists that occurs when a module does not have an explicit export list. Example(s): None Test cases: typecheck/should_fail/MissingExportList03 -} TcRnMissingExportList :: ModuleName -> TcRnMessage {-| TcRnExportHiddenComponents is an error that occurs when an export contains constructor or class methods that are not visible. Example(s): None Test cases: None -} TcRnExportHiddenComponents :: IE GhcPs -> TcRnMessage {-| TcRnExportHiddenDefault is an error that occurs when an export contains a class default (with language extension NamedDefaults) that is not visible. Example(s): None Test cases: default/fail06.hs -} TcRnExportHiddenDefault :: IE GhcPs -> TcRnMessage {-| TcRnDuplicateExport is a warning (controlled by -Wduplicate-exports) that occurs when an identifier appears in an export list more than once. Example(s): None Test cases: module/MultiExport module/mod128 module/mod14 module/mod5 overloadedrecflds/should_fail/DuplicateExports patsyn/should_compile/T11959 -} TcRnDuplicateExport :: GlobalRdrElt -> IE GhcPs -> IE GhcPs -> TcRnMessage {-| TcRnExportedParentChildMismatch is an error that occurs when an export is bundled with a parent that it does not belong to Example(s): module Foo (T(a)) where data T a = True Test cases: module/T11970 module/T11970B module/mod17 module/mod3 overloadedrecflds/should_fail/NoParent -} TcRnExportedParentChildMismatch :: Name -- ^ parent -> TyThing -> Name -- ^ child -> [Name] -> TcRnMessage {-| TcRnConflictingExports is an error that occurs when different identifiers that have the same name are being exported by a module. Example(s): module Foo (Bar.f, module Baz) where import qualified Bar (f) import Baz (f) Test cases: module/mod131 module/mod142 module/mod143 module/mod144 module/mod145 module/mod146 module/mod150 module/mod155 overloadedrecflds/should_fail/T14953 overloadedrecflds/should_fail/overloadedrecfldsfail10 rename/should_fail/rnfail029 rename/should_fail/rnfail040 typecheck/should_fail/T16453E2 typecheck/should_fail/tcfail025 typecheck/should_fail/tcfail026 -} TcRnConflictingExports :: OccName -- ^ Occurrence name shared by both exports -> GlobalRdrElt -- ^ First export -> IE GhcPs -- ^ Export decl of first export -> GlobalRdrElt -- ^ Second export -> IE GhcPs -- ^ Export decl of second export -> TcRnMessage {-| TcRnDuplicateFieldExport is an error that occurs when a module exports multiple record fields with the same name, without enabling DuplicateRecordFields. Example: module M1 where data D1 = MkD1 { foo :: Int } module M2 where data D2 = MkD2 { foo :: Int } module M ( D1(..), D2(..) ) where import module M1 import module M2 Test case: overloadedrecflds/should_fail/overloadedrecfldsfail10 -} TcRnDuplicateFieldExport :: (GlobalRdrElt, IE GhcPs) -> NE.NonEmpty (GlobalRdrElt, IE GhcPs) -> TcRnMessage {-| TcRnAmbiguousRecordUpdate is a warning, controlled by -Wambiguous-fields, which occurs when a user relies on the type-directed disambiguation mechanism to disambiguate a record update. This will not be supported by -XDuplicateRecordFields in future releases. Example(s): data Person = MkPerson { personId :: Int, name :: String } data Address = MkAddress { personId :: Int, address :: String } bad1 x = x { personId = 4 } :: Person -- ambiguous bad2 (x :: Person) = x { personId = 4 } -- ambiguous good x = (x :: Person) { personId = 4 } -- not ambiguous Test cases: overloadedrecflds/should_fail/overloadedrecfldsfail06 -} TcRnAmbiguousRecordUpdate :: HsExpr GhcRn -- ^ Field update -> TyCon -- ^ Record type -> TcRnMessage {-| TcRnMissingFields is a warning controlled by -Wmissing-fields occurring when the intialisation of a record is missing one or more (lazy) fields. Example(s): data Rec = Rec { a :: Int, b :: String, c :: Bool } x = Rec { a = 1, b = "two" } -- missing field 'c' Test cases: deSugar/should_compile/T13870 deSugar/should_compile/ds041 patsyn/should_compile/T11283 rename/should_compile/T5334 rename/should_compile/T12229 rename/should_compile/T5892a warnings/should_fail/WerrorFail2 -} TcRnMissingFields :: ConLike -> [(FieldLabelString, TcType)] -> TcRnMessage {-| TcRnFieldUpdateInvalidType is an error occurring when an updated field's type mentions something that is outside the universally quantified variables of the data constructor, such as an existentially quantified type. Example(s): data X = forall a. MkX { f :: a } x = (MkX ()) { f = False } Test cases: patsyn/should_fail/records-exquant typecheck/should_fail/T3323 -} TcRnFieldUpdateInvalidType :: [(FieldLabelString,TcType)] -> TcRnMessage {-| TcRnMissingStrictFields is an error occurring when a record field marked as strict is omitted when constructing said record. Example(s): data R = R { strictField :: !Bool, nonStrict :: Int } x = R { nonStrict = 1 } Test cases: typecheck/should_fail/T18869 typecheck/should_fail/tcfail085 typecheck/should_fail/tcfail112 -} TcRnMissingStrictFields :: ConLike -> [(FieldLabelString, TcType)] -> TcRnMessage {-| TcRnAmbiguousFieldInUpdate is an error that occurs when a field in a record update clashes with another field or top-level function of the same name, and the user hasn't enabled -XDisambiguateRecordFields. Example: {-# LANGUAGE NoFieldSelectors #-} {-# LANGUAGE NoDisambiguateRecordFields #-} module M where data A = MkA { fld :: Int } fld :: Bool fld = False f r = r { fld = 3 } -} TcRnAmbiguousFieldInUpdate :: (GlobalRdrElt, GlobalRdrElt, [GlobalRdrElt]) -> TcRnMessage {-| TcRnBadRecordUpdate is an error when a regular (non-overloaded) record update cannot be pinned down to any one parent. The problem with the record update is stored in the 'BadRecordUpdateReason' field. Example(s): data R1 = R1 { x :: Int } data R2 = R2 { x :: Int } update r = r { x = 1 } -- ambiguous data R1 = R1 { x :: Int, y :: Int } data R2 = R2 { y :: Int, z :: Int } update r = r { x = 1, y = 2, z = 3 } -- no parent has all the fields Test cases: overloadedrecflds/should_fail/overloadedrecfldsfail01 overloadedrecflds/should_fail/overloadedrecfldsfail01 overloadedrecflds/should_fail/overloadedrecfldsfail14 -} TcRnBadRecordUpdate :: [RdrName] -- ^ the fields of the record update -> BadRecordUpdateReason -- ^ the reason this record update was rejected -> TcRnMessage {-| TcRnStaticFormNotClosed is an error pertaining to terms that are marked static using the -XStaticPointers extension but which are not closed terms. Example(s): f x = static x Test cases: rename/should_fail/RnStaticPointersFail01 rename/should_fail/RnStaticPointersFail03 -} TcRnStaticFormNotClosed :: Name -> NotClosedReason -> TcRnMessage {-| TcRnUselessTypeable is a warning (controlled by -Wderiving-typeable) that occurs when trying to derive an instance of the 'Typeable' class. Deriving 'Typeable' is no longer necessary (hence the \"useless\") as all types automatically derive 'Typeable' in modern GHC versions. Example(s): None. Test cases: warnings/should_compile/DerivingTypeable -} TcRnUselessTypeable :: TcRnMessage {-| TcRnDerivingDefaults is a warning (controlled by -Wderiving-defaults) that occurs when both 'DeriveAnyClass' and 'GeneralizedNewtypeDeriving' are enabled, and therefore GHC defaults to 'DeriveAnyClass', which might not be what the user wants. Example(s): None. Test cases: typecheck/should_compile/T15839a deriving/should_compile/T16179 -} TcRnDerivingDefaults :: !Class -> TcRnMessage {-| TcRnNonUnaryTypeclassConstraint is an error that occurs when GHC encounters a non-unary constraint when trying to derive a typeclass. Example(s): class A deriving instance A data B deriving A -- We cannot derive A, is not unary (i.e. 'class A a'). Test cases: deriving/should_fail/T7959 deriving/should_fail/drvfail005 deriving/should_fail/drvfail009 deriving/should_fail/drvfail006 -} TcRnNonUnaryTypeclassConstraint :: !UserTypeCtxt -> !(LHsSigType GhcRn) -> TcRnMessage {-| TcRnPartialTypeSignatures is a warning (controlled by -Wpartial-type-signatures) that occurs when a wildcard '_' is found in place of a type in a signature or a type class derivation Example(s): foo :: _ -> Int foo = ... deriving instance _ => Eq (Foo a) Test cases: dependent/should_compile/T11241 dependent/should_compile/T15076 dependent/should_compile/T14880-2 typecheck/should_compile/T17024 typecheck/should_compile/T10072 partial-sigs/should_fail/TidyClash2 partial-sigs/should_fail/Defaulting1MROff partial-sigs/should_fail/WildcardsInPatternAndExprSig partial-sigs/should_fail/T10615 partial-sigs/should_fail/T14584a partial-sigs/should_fail/TidyClash partial-sigs/should_fail/T11122 partial-sigs/should_fail/T14584 partial-sigs/should_fail/T10045 partial-sigs/should_fail/PartialTypeSignaturesDisabled partial-sigs/should_fail/T10999 partial-sigs/should_fail/ExtraConstraintsWildcardInExpressionSignature partial-sigs/should_fail/ExtraConstraintsWildcardInPatternSplice partial-sigs/should_fail/WildcardInstantiations partial-sigs/should_run/T15415 partial-sigs/should_compile/T10463 partial-sigs/should_compile/T15039a partial-sigs/should_compile/T16728b partial-sigs/should_compile/T15039c partial-sigs/should_compile/T10438 partial-sigs/should_compile/SplicesUsed partial-sigs/should_compile/T18008 partial-sigs/should_compile/ExprSigLocal partial-sigs/should_compile/T11339a partial-sigs/should_compile/T11670 partial-sigs/should_compile/WarningWildcardInstantiations partial-sigs/should_compile/T16728 partial-sigs/should_compile/T12033 partial-sigs/should_compile/T15039b partial-sigs/should_compile/T10403 partial-sigs/should_compile/T11192 partial-sigs/should_compile/T16728a partial-sigs/should_compile/TypedSplice partial-sigs/should_compile/T15039d partial-sigs/should_compile/T11016 partial-sigs/should_compile/T13324_compile2 linear/should_fail/LinearPartialSig polykinds/T14265 polykinds/T14172 -} TcRnPartialTypeSignatures :: !SuggestPartialTypeSignatures -> !ThetaType -> TcRnMessage {-| TcRnCannotDeriveInstance is an error that occurs every time a typeclass instance can't be derived. The 'DeriveInstanceErrReason' will contain the specific reason this error arose. Example(s): None. Test cases: generics/T10604/T10604_no_PolyKinds deriving/should_fail/drvfail009 deriving/should_fail/drvfail-functor2 deriving/should_fail/T10598_fail3 deriving/should_fail/deriving-via-fail2 deriving/should_fail/deriving-via-fail deriving/should_fail/T16181 -} TcRnCannotDeriveInstance :: !Class -- ^ The typeclass we are trying to derive -- an instance for -> [Type] -- ^ The typeclass arguments, if any. -> !(Maybe (DerivStrategy GhcTc)) -- ^ The derivation strategy, if any. -> !UsingGeneralizedNewtypeDeriving -- ^ Is '-XGeneralizedNewtypeDeriving' enabled? -> !DeriveInstanceErrReason -- ^ The specific reason why we couldn't derive -- an instance for the class. -> TcRnMessage {-| TcRnLazyGADTPattern is an error that occurs when a user writes a nested GADT pattern match inside a lazy (~) pattern. Test case: gadt/lazypat -} TcRnLazyGADTPattern :: TcRnMessage {-| TcRnArrowProcGADTPattern is an error that occurs when a user writes a GADT pattern inside arrow proc notation. Test case: arrows/should_fail/arrowfail004. -} TcRnArrowProcGADTPattern :: TcRnMessage {-| TcRnCapturedTermName is a warning (controlled by -Wterm-variable-capture) that occurs when an implicitly quantified type variable's name is already used for a term. Example: a = 10 f :: a -> a Test cases: T22513a T22513b T22513c T22513d T22513e T22513f T22513g T22513h T22513i -} TcRnCapturedTermName :: RdrName -> Either [GlobalRdrElt] Name -> TcRnMessage {-| TcRnTypeEqualityOutOfScope is a warning (controlled by -Wtype-equality-out-of-scope) that occurs when the type equality (a ~ b) is not in scope. Test case: warnings/should_compile/T18862b -} TcRnTypeEqualityOutOfScope :: TcRnMessage {-| TcRnTypeEqualityRequiresOperators is a warning (controlled by -Wtype-equality-requires-operators) that occurs when the type equality (a ~ b) is used without the TypeOperators extension. Example: {-# LANGUAGE NoTypeOperators #-} f :: (a ~ b) => a -> b Test case: T18862a -} TcRnTypeEqualityRequiresOperators :: TcRnMessage {-| TcRnIllegalTypeOperator is an error that occurs when a type operator is used without the TypeOperators extension. Example: {-# LANGUAGE NoTypeOperators #-} f :: Vec a n -> Vec a m -> Vec a (n + m) Test case: T12811 -} TcRnIllegalTypeOperator :: !SDoc -> !RdrName -> TcRnMessage {-| TcRnIllegalTypeOperatorDecl is an error that occurs when a type or class operator is declared without the TypeOperators extension. See Note [Type and class operator definitions] Example: {-# LANGUAGE Haskell2010 #-} {-# LANGUAGE MultiParamTypeClasses #-} module T3265 where data a :+: b = Left a | Right b class a :*: b where {} Test cases: T3265, tcfail173 -} TcRnIllegalTypeOperatorDecl :: !RdrName -> TcRnMessage {-| TcRnGADTMonoLocalBinds is a warning controlled by -Wgadt-mono-local-binds that occurs when pattern matching on a GADT when -XMonoLocalBinds is off. Example(s): None Test cases: T20485, T20485a -} TcRnGADTMonoLocalBinds :: TcRnMessage {-| The TcRnNotInScope constructor is used for various not-in-scope errors. See 'NotInScopeError' for more details. -} TcRnNotInScope :: NotInScopeError -- ^ what the problem is -> RdrName -- ^ the name that is not in scope -> [ImportError] -- ^ import errors that are relevant -> [GhcHint] -- ^ hints, e.g. enable DataKinds to refer to a promoted data constructor -> TcRnMessage {-| TcRnTermNameInType is an error that occurs when a term-level identifier is used in a type. Example: import qualified Prelude bad :: Prelude.fst (Bool, Float) bad = False Test cases: T21605{c,d} -} TcRnTermNameInType :: RdrName -> [GhcHint] -> TcRnMessage {-| TcRnUntickedPromotedThing is a warning (controlled with -Wunticked-promoted-constructors) that is triggered by an unticked occurrence of a promoted data constructor. Examples: data A = MkA type family F (a :: A) where { F MkA = Bool } type B = [ Int, Bool ] Test cases: T9778, T19984. -} TcRnUntickedPromotedThing :: UntickedPromotedThing -> TcRnMessage {-| TcRnIllegalBuiltinSyntax is an error that occurs when built-in syntax appears in an unexpected location, e.g. as a data constructor or in a fixity declaration. Examples: infixl 5 : data P = (,) Test cases: rnfail042, T14907b, T15124, T15233. -} TcRnIllegalBuiltinSyntax :: SDoc -- ^ what kind of thing this is (a binding, fixity declaration, ...) -> RdrName -> TcRnMessage -- TODO: remove the SDoc argument. {-| TcRnWarnDefaulting is a warning (controlled by -Wtype-defaults) that is triggered whenever a Wanted typeclass constraint is solving through the defaulting of a type variable. Example: one = show 1 -- We get Wanteds Show a0, Num a0, and default a0 to Integer. Test cases: none (which are really specific to defaulting), but see e.g. tcfail204. -} TcRnWarnDefaulting :: [Ct] -- ^ Wanted constraints in which defaulting occurred -> Maybe TyVar -- ^ The type variable being defaulted -> Type -- ^ The default type -> TcRnMessage {-| TcRnIncorrectNameSpace is an error that occurs when a 'Name' is used in the incorrect 'NameSpace', e.g. a type constructor or class used in a term, or a term variable used in a type. Example: list2 = $( conE ''(:) `appE` litE (IntegerL 5) `appE` conE '[] ) -- ^^^^^ -- should use a single quotation tick, i.e. '(:) Test cases: T20884. -} TcRnIncorrectNameSpace :: Name -> Bool -- ^ whether the error is happening -- in a Template Haskell tick -- (so we should give a Template Haskell hint) -> TcRnMessage {-| TcRnForeignImportPrimExtNotSet is an error occurring when a foreign import is declared using the @prim@ calling convention without having turned on the -XGHCForeignImportPrim extension. Example(s): foreign import prim "foo" foo :: ByteArray# -> (# Int#, Int# #) Test cases: ffi/should_fail/T20116 -} TcRnForeignImportPrimExtNotSet :: ForeignImport GhcRn -> TcRnMessage {-| TcRnForeignImportPrimSafeAnn is an error declaring that the safe/unsafe annotation should not be used with @prim@ foreign imports. Example(s): foreign import prim unsafe "my_primop_cmm" :: ... Test cases: None -} TcRnForeignImportPrimSafeAnn :: ForeignImport GhcRn -> TcRnMessage {-| TcRnForeignFunctionImportAsValue is an error explaining that foreign @value@ imports cannot have function types. Example(s): foreign import capi "math.h value sqrt" f :: CInt -> CInt Test cases: ffi/should_fail/capi_value_function -} TcRnForeignFunctionImportAsValue :: ForeignImport GhcRn -> TcRnMessage {-| TcRnFunPtrImportWithoutAmpersand is a warning controlled by @-Wdodgy-foreign-imports@ that informs the user of a possible missing @&@ in the declaration of a foreign import with a 'FunPtr' return type. Example(s): foreign import ccall "f" f :: FunPtr (Int -> IO ()) Test cases: ffi/should_compile/T1357 -} TcRnFunPtrImportWithoutAmpersand :: ForeignImport GhcRn -> TcRnMessage {-| TcRnIllegalForeignDeclBackend is an error occurring when a foreign import declaration is not compatible with the code generation backend being used. Example(s): None Test cases: None -} TcRnIllegalForeignDeclBackend :: Either (ForeignExport GhcRn) (ForeignImport GhcRn) -> Backend -> ExpectedBackends -> TcRnMessage {-| TcRnUnsupportedCallConv informs the user that the calling convention specified for a foreign export declaration is not compatible with the target platform. It is a warning controlled by @-Wunsupported-calling-conventions@ in the case of @stdcall@ but is otherwise considered an error. Example(s): None Test cases: None -} TcRnUnsupportedCallConv :: Either (ForeignExport GhcRn) (ForeignImport GhcRn) -> UnsupportedCallConvention -> TcRnMessage {-| TcRnIllegalForeignType is an error for when a type appears in a foreign function signature that is not compatible with the FFI. Example(s): None Test cases: ffi/should_fail/T3066 ffi/should_fail/ccfail004 ffi/should_fail/T10461 ffi/should_fail/T7506 ffi/should_fail/T5664 safeHaskell/ghci/p6 safeHaskell/safeLanguage/SafeLang08 ffi/should_fail/T16702 linear/should_fail/LinearFFI ffi/should_fail/T7243 -} TcRnIllegalForeignType :: !(Maybe ArgOrResult) -> !IllegalForeignTypeReason -> TcRnMessage {-| TcRnInvalidCIdentifier indicates a C identifier that is not valid. Example(s): foreign import prim safe "not valid" cmm_test2 :: Int# -> Int# Test cases: th/T10638 -} TcRnInvalidCIdentifier :: !CLabelString -> TcRnMessage {-| TcRnExpectedValueId is an error occurring when something that is not a value identifier is used where one is expected. Example(s): none Test cases: none -} TcRnExpectedValueId :: !TcTyThing -> TcRnMessage {-| TcRnRecSelectorEscapedTyVar is an error indicating that a record field selector containing an existential type variable is used as a function rather than in a pattern match. Example(s): data Rec = forall a. Rec { field :: a } field (Rec True) Test cases: patsyn/should_fail/records-exquant typecheck/should_fail/T3176 -} TcRnRecSelectorEscapedTyVar :: !OccName -> TcRnMessage {-| TcRnPatSynNotBidirectional is an error for when a non-bidirectional pattern synonym is used as a constructor. Example(s): pattern Five :: Int pattern Five <- 5 five = Five Test cases: patsyn/should_fail/records-no-uni-update patsyn/should_fail/records-no-uni-update2 -} TcRnPatSynNotBidirectional :: !Name -> TcRnMessage {-| TcRnIllegalDerivingItem is an error for when something other than a type class appears in a deriving statement. Example(s): data X = X deriving Int Test cases: deriving/should_fail/T5922 -} TcRnIllegalDerivingItem :: !(LHsSigType GhcRn) -> TcRnMessage {-| TcRnIllegalDefaultClass is an error for when something other than a type class appears in a default declaration after the keyword. Example(s): default Integer (Int) Test cases: default/fail01 -} TcRnIllegalDefaultClass :: !(LHsSigType GhcRn) -> TcRnMessage {-| TcRnIllegalNamedDefault is an error for specifying an explicit default class name without @-XNamedDefaults@. Example(s): default Num (Integer) Test cases: default/fail02 -} TcRnIllegalNamedDefault :: !(LDefaultDecl GhcRn) -> TcRnMessage {-| TcRnUnexpectedAnnotation indicates the erroroneous use of an annotation such as strictness, laziness, or unpacking. Example(s): data T = T { t :: Maybe {-# UNPACK #-} Int } data C = C { f :: !IntMap Int } Test cases: parser/should_fail/unpack_inside_type typecheck/should_fail/T7210 rename/should_fail/T22478b -} TcRnUnexpectedAnnotation :: !(HsType GhcRn) -> !HsBang -> TcRnMessage {-| TcRnIllegalRecordSyntax is an error indicating an illegal use of record syntax. Example(s): data T = T Int { field :: Int } Test cases: rename/should_fail/T7943 rename/should_fail/T9077 rename/should_fail/T22478b -} TcRnIllegalRecordSyntax :: Either (HsType GhcPs) (HsType GhcRn) -> TcRnMessage {-| TcRnInvalidVisibleKindArgument is an error for a kind application on a target type that cannot accept it. Example(s): bad :: Int @Type bad = 1 type Foo :: forall a {b}. a -> b -> b type Foo x y = y type Bar = Foo @Bool @Int True 42 Test cases: indexed-types/should_fail/T16356_Fail3 typecheck/should_fail/ExplicitSpecificity7 typecheck/should_fail/T12045b typecheck/should_fail/T12045c typecheck/should_fail/T15592a typecheck/should_fail/T15816 -} TcRnInvalidVisibleKindArgument :: !(LHsType GhcRn) -- ^ The visible kind argument -> !Type -- ^ Target of the kind application -> TcRnMessage {-| TcRnTooManyBinders is an error for a type constructor that is declared with more arguments then its kind specifies. Example(s): type T :: Type -> (Type -> Type) -> Type data T a (b :: Type -> Type) x1 (x2 :: Type -> Type) Test cases: saks/should_fail/saks_fail008 -} TcRnTooManyBinders :: !Kind -> ![LHsTyVarBndr (HsBndrVis GhcRn) GhcRn] -> TcRnMessage {-| TcRnDifferentNamesForTyVar is an error that indicates different names being used for the same type variable. Example(s): data SameKind :: k -> k -> * data Q (a :: k1) (b :: k2) c = MkQ (SameKind a b) Test cases: polykinds/T11203 polykinds/T11821a saks/should_fail/T20916 typecheck/should_fail/T17566b typecheck/should_fail/T17566c -} TcRnDifferentNamesForTyVar :: !Name -> !Name -> TcRnMessage {-| TcRnDisconnectedTyVar is an error for a data declaration that has a kind signature, where the implicitly-bound type type variables can't be matched up unambiguously with the ones from the signature. See Note [Disconnected type variables] in GHC.Tc.Gen.HsType. Test cases: T24083 -} TcRnDisconnectedTyVar :: !Name -> TcRnMessage {-| TcRnInvalidReturnKind is an error for a data declaration that has a kind signature with an invalid result kind. Example(s): data family Foo :: Constraint Test cases: typecheck/should_fail/T14048b typecheck/should_fail/UnliftedNewtypesConstraintFamily typecheck/should_fail/T12729 typecheck/should_fail/T15883 typecheck/should_fail/T16829a typecheck/should_fail/T16829b typecheck/should_fail/UnliftedNewtypesNotEnabled typecheck/should_fail/tcfail079 -} TcRnInvalidReturnKind :: !DataSort -- ^ classification of thing being returned -> !AllowedDataResKind -- ^ allowed kind -> !Kind -- ^ the return kind -> !(Maybe SuggestUnliftedTypes) -- ^ suggested extension -> TcRnMessage {-| TcRnUnexpectedKindVar is an error that occurs when the user tries to use kind variables without -XPolyKinds. Example: f :: forall k a. Proxy (a :: k) Test cases: polykinds/BadKindVar polykinds/T14710 saks/should_fail/T16722 -} TcRnUnexpectedKindVar :: RdrName -> TcRnMessage {-| TcRnIllegalKind is used for a various illegal kinds errors including Example: type T :: forall k. Type -- without emabled -XPolyKinds Test cases: polykinds/T16762b -} TcRnIllegalKind :: HsTypeOrSigType GhcPs -- ^ The illegal kind -> Bool -- ^ Whether enabling -XPolyKinds should be suggested -> TcRnMessage {-| TcRnClassKindNotConstraint is an error for a type class that has a kind that is not equivalent to Constraint. Example(s): type C :: Type -> Type class C a Test cases: saks/should_fail/T16826 -} TcRnClassKindNotConstraint :: !Kind -> TcRnMessage {-| TcRnUnpromotableThing is an error that occurs when the user attempts to use the promoted version of something which is not promotable. Example(s): data T :: T -> * data X a where MkX :: Show a => a -> X a foo :: Proxy ('MkX 'True) foo = Proxy Test cases: dependent/should_fail/PromotedClass dependent/should_fail/T14845_fail1 dependent/should_fail/T14845_fail2 dependent/should_fail/T15215 dependent/should_fail/T13780c dependent/should_fail/T15245 polykinds/T5716 polykinds/T5716a polykinds/T6129 polykinds/T7433 patsyn/should_fail/T11265 patsyn/should_fail/T9161-1 patsyn/should_fail/T9161-2 dependent/should_fail/SelfDep polykinds/PolyKinds06 polykinds/PolyKinds07 polykinds/T13625 polykinds/T15116 polykinds/T15116a saks/should_fail/T16727a saks/should_fail/T16727b rename/should_fail/T12686 rename/should_fail/T16635a rename/should_fail/T16635b rename/should_fail/T16635c -} TcRnUnpromotableThing :: !Name -> !PromotionErr -> TcRnMessage {- | TcRnIllegalTermLevelUse is an error that occurs when the user attempts to use a type-level entity at the term-level. Examples: f x = Int -- illegal use of a type constructor g (Proxy :: Proxy a) = a -- illegal use of a type variable Note that the namespace cannot be used to determine if a name refers to a type-level entity: {-# LANGUAGE RequiredTypeArguments #-} bad :: forall (a :: k) -> k bad t = t The name `t` is assigned the `varName` namespace but stands for a type variable that cannot be used at the term level. Test cases: T18740a, T18740b, T23739_fail_ret, T23739_fail_case -} TcRnIllegalTermLevelUse :: !Name -> !TermLevelUseErr -> TcRnMessage {-| TcRnMatchesHaveDiffNumArgs is an error occurring when something has matches that have different numbers of arguments Example(s): foo x = True foo x y = False Test cases: rename/should_fail/rnfail045 typecheck/should_fail/T20768_fail -} TcRnMatchesHaveDiffNumArgs :: !HsMatchContextRn -- ^ Pattern match specifics -> !MatchArgBadMatches -> TcRnMessage {-| TcRnUnexpectedPatSigType is an error occurring when there is a type signature in a pattern without -XScopedTypeVariables extension Examples: f (a :: Bool) = ... Test case: rename/should_fail/T11663 -} TcRnUnexpectedPatSigType :: HsPatSigType GhcPs -> TcRnMessage {-| TcRnIllegalKindSignature is an error occurring when there is a kind signature without -XKindSignatures extension Examples: data Foo (a :: Nat) = .... Test case: parser/should_fail/readFail036 -} TcRnIllegalKindSignature :: HsType GhcPs -> TcRnMessage {-| TcRnDataKindsError is an error occurring when there is an illegal type or kind, probably required -XDataKinds and is used without the enabled extension. This error can occur in both the renamer and the typechecker. The field of type @'Either' ('HsType' 'GhcPs') 'Type'@ reflects this: this field will contain a 'Left' value if the error occurred in the renamer, and this field will contain a 'Right' value if the error occurred in the typechecker. Examples: type Foo = [Nat, Char] type Bar = [Int, String] Test cases: linear/should_fail/T18888 parser/should_fail/readFail001 polykinds/T7151 polykinds/T7433 rename/should_fail/T13568 rename/should_fail/T22478e th/TH_Promoted1Tuple typecheck/should_compile/tcfail094 typecheck/should_compile/T22141a typecheck/should_compile/T22141b typecheck/should_compile/T22141c typecheck/should_compile/T22141d typecheck/should_compile/T22141e typecheck/should_compile/T22141f typecheck/should_compile/T22141g typecheck/should_fail/T20873c typecheck/should_fail/T20873d -} TcRnDataKindsError :: TypeOrKind -> Either (HsType GhcPs) Type -> TcRnMessage {-| TcRnCannotBindScopedTyVarInPatSig is an error stating that scoped type variables cannot be used in pattern bindings. Example(s): let (x :: a) = 5 Test cases: typecheck/should_compile/tc141 -} TcRnCannotBindScopedTyVarInPatSig :: !(NE.NonEmpty (Name, TcTyVar)) -> TcRnMessage {-| TcRnCannotBindTyVarsInPatBind is an error for when type variables are introduced in a pattern binding Example(s): Just @a x = Just True Test cases: typecheck/should_fail/TyAppPat_PatternBinding typecheck/should_fail/TyAppPat_PatternBindingExistential -} TcRnCannotBindTyVarsInPatBind :: !(NE.NonEmpty (Name, TcTyVar)) -> TcRnMessage {-| TcRnTooManyTyArgsInConPattern is an error occurring when a constructor pattern has more than the expected number of type arguments Example(s): f (Just @Int @Bool x) = x Test cases: typecheck/should_fail/TyAppPat_TooMany typecheck/should_fail/T20443b -} TcRnTooManyTyArgsInConPattern :: !ConLike -> !Int -- ^ Expected number of args -> !Int -- ^ Actual number of args -> TcRnMessage {-| TcRnMultipleInlinePragmas is a warning signifying that multiple inline pragmas reference the same definition. Example(s): {-# INLINE foo #-} {-# INLINE foo #-} foo :: Bool -> Bool foo = id Test cases: none -} TcRnMultipleInlinePragmas :: !Id -- ^ Target of the pragmas -> !(LocatedA InlinePragma) -- ^ The first pragma -> !(NE.NonEmpty (LocatedA InlinePragma)) -- ^ Other pragmas -> TcRnMessage {-| TcRnUnexpectedPragmas is a warning that occurs when unexpected pragmas appear in the source. Example(s): Test cases: none -} TcRnUnexpectedPragmas :: !Id -> !(NE.NonEmpty (LSig GhcRn)) -> TcRnMessage {-| TcRnNonOverloadedSpecialisePragma is a warning for a specialise pragma being placed on a definition that is not overloaded. Example(s): {-# SPECIALISE foo :: Bool -> Bool #-} foo :: Bool -> Bool foo = id Test cases: simplCore/should_compile/T8537 typecheck/should_compile/T10504 -} TcRnNonOverloadedSpecialisePragma :: !(LIdP GhcRn) -> TcRnMessage {-| TcRnSpecialiseNotVisible is a warning that occurs when the subject of a SPECIALISE pragma has a definition that is not visible from the current module. Example(s): none Test cases: none -} TcRnSpecialiseNotVisible :: !Name -> TcRnMessage {-| TcRnPragmaWarning is a warning that can happen when usage of something is warned or deprecated by pragma. Test cases: DeprU T5281 T5867 rn050 rn066 (here is a warning, not deprecation) T3303 ExportWarnings1 ExportWarnings2 ExportWarnings3 ExportWarnings4 ExportWarnings5 ExportWarnings6 InstanceWarnings -} TcRnPragmaWarning :: { pragma_warning_info :: PragmaWarningInfo, pragma_warning_msg :: WarningTxt GhcRn } -> TcRnMessage {-| TcRnDifferentExportWarnings is an error that occurs when the warning messages for exports of a name differ between several export items. Test case: DifferentExportWarnings -} TcRnDifferentExportWarnings :: !Name -- ^ The name with different export warnings -> NE.NonEmpty SrcSpan -- ^ The locations of export list items that differ -- from the one at which the error is reported -> TcRnMessage {-| TcRnIncompleteExportWarnings is a warning (controlled by -Wincomplete-export-warnings) that occurs when some of the exports of a name do not have an export warning and some do Test case: ExportWarnings6 -} TcRnIncompleteExportWarnings :: !Name -- ^ The name that is exported -> NE.NonEmpty SrcSpan -- ^ The locations of export list items that are -- missing the export warning -> TcRnMessage {-| TcRnIllegalHsigDefaultMethods is an error that occurs when a binding for a class default method is provided in a Backpack signature file. Test case: bkpfail40 -} TcRnIllegalHsigDefaultMethods :: !Name -- ^ 'Name' of the class -> NE.NonEmpty (LHsBind GhcRn) -- ^ default methods -> TcRnMessage {-| TcRnHsigFixityMismatch is an error indicating that the fixity decl in a Backpack signature file differs from the one in the source file for the same operator. Test cases: bkpfail37, bkpfail38 -} TcRnHsigFixityMismatch :: !TyThing -- ^ The operator whose fixity is defined -> !Fixity -- ^ the fixity used in the source file -> !Fixity -- ^ the fixity used in the signature -> TcRnMessage {-| TcRnHsigShapeMismatch is a group of errors related to mismatches between backpack signatures. -} TcRnHsigShapeMismatch :: !HsigShapeMismatchReason -> TcRnMessage {-| TcRnHsigMissingModuleExport is an error indicating that a module doesn't export a name exported by its signature. Test cases: bkpfail01, bkpfail05, bkpfail09, bkpfail16, bkpfail35, bkpcabal06 -} TcRnHsigMissingModuleExport :: !OccName -- ^ The missing name -> !UnitState -- ^ The module's unit state -> !Module -- ^ The implementation module -> TcRnMessage {-| TcRnBadGenericMethod This test ensures that if you provide a "more specific" type signatures for the default method, you must also provide a binding. Example: {-# LANGUAGE DefaultSignatures #-} class C a where meth :: a default meth :: Num a => a meth = 0 Test case: typecheck/should_fail/MissingDefaultMethodBinding.hs -} TcRnBadGenericMethod :: !Name -- ^ 'Name' of the class -> !Name -- ^ Problematic method -> TcRnMessage {-| TcRnWarningMinimalDefIncomplete is a warning that one must specify which methods must be implemented by all instances. Example: class Cheater a where -- WARNING LINE cheater :: a {-# MINIMAL #-} -- warning! Test case: warnings/minimal/WarnMinimal.hs: -} TcRnWarningMinimalDefIncomplete :: ClassMinimalDef -> TcRnMessage {-| TcRnIllegalQuasiQuotes is an error that occurs when a quasi-quote is used without the QuasiQuotes extension. Example: foo = [myQuoter|x y z|] Test cases: none; the parser fails to parse this if QuasiQuotes is off. -} TcRnIllegalQuasiQuotes :: TcRnMessage {-| TcRnTHError is a family of errors involving Template Haskell. See 'THError'. -} TcRnTHError :: THError -> TcRnMessage {-| TcRnDefaultMethodForPragmaLacksBinding is an error that occurs when a default method pragma is missing an accompanying binding. Test cases: typecheck/should_fail/T5084.hs typecheck/should_fail/T2354.hs -} TcRnDefaultMethodForPragmaLacksBinding :: Id -- ^ method -> Sig GhcRn -- ^ the pragma -> TcRnMessage {-| TcRnIgnoreSpecialisePragmaOnDefMethod is a warning that occurs when a specialise pragma is put on a default method. Test cases: none -} TcRnIgnoreSpecialisePragmaOnDefMethod :: !Name -> TcRnMessage {-| TcRnBadMethodErr is an error that happens when one attempts to provide a method in a class instance, when the class doesn't have a method by that name. Test case: th/T12387 -} TcRnBadMethodErr :: { badMethodErrClassName :: !Name , badMethodErrMethodName :: !Name } -> TcRnMessage {-| TcRnIllegalNewtype is an error that occurs when a newtype: * Does not have exactly one field, or * is non-linear, or * is a GADT, or * has a context in its constructor's type, or * has existential type variables in its constructor's type, or * has strictness annotations. Test cases: gadt/T14719 indexed-types/should_fail/T14033 indexed-types/should_fail/T2334A linear/should_fail/LinearGADTNewtype parser/should_fail/readFail008 polykinds/T11459 typecheck/should_fail/T15523 typecheck/should_fail/T15796 typecheck/should_fail/T17955 typecheck/should_fail/T18891a typecheck/should_fail/T21447 typecheck/should_fail/tcfail156 -} TcRnIllegalNewtype :: DataCon -> Bool -- ^ True if linear types enabled -> IllegalNewtypeReason -> TcRnMessage {-| TcRnIllegalTypeData is an error that occurs when a @type data@ declaration occurs without the TypeOperators extension. See Note [Type data declarations] Test case: type-data/should_fail/TDNoPragma -} TcRnIllegalTypeData :: TcRnMessage {-| TcRnTypeDataForbids is an error that occurs when a @type data@ declaration contains @data@ declaration features that are forbidden in a @type data@ declaration. See Note [Type data declarations] Test cases: type-data/should_fail/TDDeriving type-data/should_fail/TDRecordsGADT type-data/should_fail/TDRecordsH98 type-data/should_fail/TDStrictnessGADT type-data/should_fail/TDStrictnessH98 -} TcRnTypeDataForbids :: !TypeDataForbids -> TcRnMessage {-| TcRnOrPatBindsVariables is an error that happens when an or-pattern binds term or type variables, e.g. (A @x; B y). Test case: testsuite/tests/typecheck/should_fail/Or3 -} TcRnOrPatBindsVariables :: NE.NonEmpty (IdP GhcRn) -- ^ List of binders -> TcRnMessage {- | TcRnUnsatisfiedMinimalDef is a warning that occurs when a class instance is missing methods that are required by the minimal definition. Example: class C a where foo :: a -> a instance C () -- | foo needs to be defined here Test cases: typecheck/prog001/typecheck.prog001 typecheck/should_compile/tc126 typecheck/should_compile/T7903 typecheck/should_compile/tc116 typecheck/should_compile/tc175 typecheck/should_compile/HasKey typecheck/should_compile/tc125 typecheck/should_compile/tc078 typecheck/should_compile/tc161 typecheck/should_fail/T5051 typecheck/should_compile/T21583 backpack/should_compile/bkp47 backpack/should_fail/bkpfail25 parser/should_compile/T2245 parser/should_compile/read014 indexed-types/should_compile/Class3 indexed-types/should_compile/Simple2 indexed-types/should_fail/T7862 deriving/should_compile/deriving-1935 deriving/should_compile/T9968a deriving/should_compile/drv003 deriving/should_compile/T4966 deriving/should_compile/T14094 perf/compiler/T15304 warnings/minimal/WarnMinimal simplCore/should_compile/simpl020 deSugar/should_compile/T14546d ghci/scripts/T5820 ghci/scripts/ghci019 -} TcRnUnsatisfiedMinimalDef :: ClassMinimalDef -> TcRnMessage {-| 'TcRnMisplacedInstSig' is an error that happens when a method in a class instance is given a type signature, but the user has not enabled the @InstanceSigs@ extension. Test case: module/mod45 -} TcRnMisplacedInstSig :: Name -> (LHsSigType GhcRn) -> TcRnMessage {-| TcRnNoRebindableSyntaxRecordDot is an error triggered by an overloaded record update without RebindableSyntax enabled. Example(s): Test cases: parser/should_fail/RecordDotSyntaxFail5 -} TcRnNoRebindableSyntaxRecordDot :: TcRnMessage {-| TcRnNoFieldPunsRecordDot is an error triggered by the use of record field puns in an overloaded record update without enabling NamedFieldPuns. Example(s): print $ a{ foo.bar.baz.quux } Test cases: parser/should_fail/RecordDotSyntaxFail12 -} TcRnNoFieldPunsRecordDot :: TcRnMessage {-| TcRnIllegalStaticExpression is an error thrown when user creates a static pointer via TemplateHaskell without enabling the StaticPointers extension. Example(s): Test cases: th/T14204 -} TcRnIllegalStaticExpression :: HsExpr GhcPs -> TcRnMessage {-| TcRnListComprehensionDuplicateBinding is an error triggered by duplicate let-bindings in a list comprehension. Example(s): [ () | let a = 13 | let a = 17 ] Test cases: typecheck/should_fail/tcfail092 -} TcRnListComprehensionDuplicateBinding :: Name -> TcRnMessage {-| TcRnEmptyStmtsGroup is an error triggered by an empty list of statements in a statement block. For more information, see 'EmptyStatementGroupErrReason' Example(s): [() | then ()] do proc () -> do Test cases: rename/should_fail/RnEmptyStatementGroup1 -} TcRnEmptyStmtsGroup:: EmptyStatementGroupErrReason -> TcRnMessage {-| TcRnLastStmtNotExpr is an error caused by the last statement in a statement block not being an expression. Example(s): do x <- pure () do let x = 5 Test cases: rename/should_fail/T6060 parser/should_fail/T3811g parser/should_fail/readFail028 -} TcRnLastStmtNotExpr :: HsStmtContextRn -> UnexpectedStatement -> TcRnMessage {-| TcRnUnexpectedStatementInContext is an error when a statement appears in an unexpected context (e.g. an arrow statement appears in a list comprehension). Example(s): Test cases: parser/should_fail/readFail042 parser/should_fail/readFail038 parser/should_fail/readFail043 -} TcRnUnexpectedStatementInContext :: HsStmtContextRn -> UnexpectedStatement -> Maybe LangExt.Extension -> TcRnMessage {-| TcRnIllegalTupleSection is an error triggered by usage of a tuple section without enabling the TupleSections extension. Example(s): (5,) Test cases: rename/should_fail/rnfail056 -} TcRnIllegalTupleSection :: TcRnMessage {-| TcRnIllegalImplicitParameterBindings is an error triggered by binding an implicit parameter in an mdo block. Example(s): mdo { let { ?x = 5 }; () } Test cases: rename/should_fail/RnImplicitBindInMdoNotation -} TcRnIllegalImplicitParameterBindings :: Either (HsLocalBindsLR GhcPs GhcPs) (HsLocalBindsLR GhcRn GhcPs) -> TcRnMessage {-| TcRnSectionWithoutParentheses is an error triggered by attempting to use an operator section without parentheses. Example(s): (`head` x, ()) Test cases: rename/should_fail/T2490 rename/should_fail/T5657 -} TcRnSectionWithoutParentheses :: HsExpr GhcPs -> TcRnMessage {-| TcRnBindingOfExistingName is an error triggered by an attempt to rebind built-in syntax, punned list or tuple syntax, or a name quoted via Template Haskell. Examples: data [] data (->) $(pure [ValD (VarP 'succ) (NormalB (ConE 'True)) []]) Test cases: rename/should_fail/T14907b rename/should_fail/T22839 rename/should_fail/rnfail042 th/T13968 -} TcRnBindingOfExistingName :: RdrName -> TcRnMessage {-| TcRnMultipleFixityDecls is an error triggered by multiple fixity declarations for the same operator. Example(s): infixr 6 $$ infixl 4 $$ Test cases: rename/should_fail/RnMultipleFixityFail -} TcRnMultipleFixityDecls :: SrcSpan -> RdrName -> TcRnMessage {-| TcRnIllegalPatternSynonymDecl is an error thrown when a user defines a pattern synonyms without enabling the PatternSynonyms extension. Example: pattern O :: Int pattern O = 0 Test cases: rename/should_fail/RnPatternSynonymFail -} TcRnIllegalPatternSynonymDecl :: TcRnMessage {-| TcRnIllegalClassBinding is an error triggered by a binding in a class or instance declaration of an illegal form. Examples: class ZeroOne a where zero :: a one :: a instance ZeroOne Int where (zero,one) = (0,1) class C a where pattern P = () Test cases: module/mod48 patsyn/should_fail/T9705-1 patsyn/should_fail/T9705-2 typecheck/should_fail/tcfail021 -} TcRnIllegalClassBinding :: DeclSort -> HsBindLR GhcPs GhcPs -> TcRnMessage {-| TcRnOrphanCompletePragma is an error triggered by a {-# COMPLETE #-} pragma which does not mention any data constructors or pattern synonyms defined in the current module. Test cases: patsyn/should_fail/T13349 -} TcRnOrphanCompletePragma :: TcRnMessage {-| TcRnEmptyCase is an error thrown when a user uses a case expression with an empty list of alternatives without enabling the EmptyCase extension. Example(s): case () of Test cases: rename/should_fail/RnEmptyCaseFail -} TcRnEmptyCase :: HsMatchContextRn -> TcRnMessage {-| TcRnNonStdGuards is a warning thrown when a user uses non-standard guards (e.g. patterns in guards) without enabling the PatternGuards extension. More realistically: the user has explicitly disabled PatternGuards, as it is enabled by default with `-XHaskell2010`. Example(s): f | 5 <- 2 + 3 = ... Test cases: rename/should_compile/rn049 -} TcRnNonStdGuards :: NonStandardGuards -> TcRnMessage {-| TcRnDuplicateSigDecl is an error triggered by two or more signatures for one entity. Examples: f :: Int -> Bool f :: Int -> Bool f _ = True g x = x {-# INLINE g #-} {-# NOINLINE g #-} pattern P = () {-# COMPLETE P #-} {-# COMPLETE P #-} Test cases: module/mod68 parser/should_fail/OpaqueParseFail4 patsyn/should_fail/T12165 rename/should_fail/rnfail048 rename/should_fail/T5589 rename/should_fail/T7338 rename/should_fail/T7338a -} TcRnDuplicateSigDecl :: NE.NonEmpty (LocatedN RdrName, Sig GhcPs) -> TcRnMessage {-| TcRnMisplacedSigDecl is an error triggered by the pragma application in the wrong context, like `MINIMAL` applied to a function or `SPECIALIZE` to an instance. Example: f x = x {-# MINIMAL f #-} Test cases: rename/should_fail/T18138 warnings/minimal/WarnMinimalFail1 -} TcRnMisplacedSigDecl :: Sig GhcRn -> TcRnMessage {-| TcRnUnexpectedDefaultSig is an error thrown when a user uses default signatures without enabling the DefaultSignatures extension. Example: class C a where m :: a default m :: Num a => a m = 0 Test cases: rename/should_fail/RnDefaultSigFail -} TcRnUnexpectedDefaultSig :: Sig GhcPs -> TcRnMessage {-| TcRnDuplicateMinimalSig is an error triggered by two or more minimal signatures for one type class. Example: class C where f :: () {-# MINIMAL f #-} {-# MINIMAL f #-} Test cases: rename/should_fail/RnMultipleMinimalPragmaFail -} TcRnDuplicateMinimalSig :: LSig GhcPs -> LSig GhcPs -> [LSig GhcPs] -> TcRnMessage {-| 'TcRnIllegalInvisTyVarBndr' is an error that occurs when invisible type variable binders in type declarations are used without enabling the @TypeAbstractions@ extension. Example: {-# LANGUAGE NoTypeAbstractions #-} -- extension disabled data T @k (a :: k) @(j :: Type) (b :: j) ^^ ^^^^^^^^^^^^ Test case: T22560_fail_ext -} TcRnIllegalInvisTyVarBndr :: !(LHsTyVarBndr (HsBndrVis GhcRn) GhcRn) -> TcRnMessage {-| 'TcRnIllegalWildcardTyVarBndr' is an error that occurs when a wildcard binder is used in a type declaration without enabling the @TypeAbstractions@ extension. Example: {-# LANGUAGE NoTypeAbstractions #-} -- extension disabled type Const a _ = a ^ Test case: T23501_fail_ext -} TcRnIllegalWildcardTyVarBndr :: !(LHsTyVarBndr (HsBndrVis GhcRn) GhcRn) -> TcRnMessage {-| 'TcRnInvalidInvisTyVarBndr' is an error that occurs when an invisible type variable binder has no corresponding @forall k.@ quantifier in the standalone kind signature. Example: type P :: forall a -> Type data P @a = MkP Test cases: T22560_fail_a T22560_fail_b -} TcRnInvalidInvisTyVarBndr :: !Name -> !(LHsTyVarBndr (HsBndrVis GhcRn) GhcRn) -> TcRnMessage {-| 'TcRnInvisBndrWithoutSig' is an error triggered by attempting to use an invisible type variable binder in a type declaration without a standalone kind signature or a complete user-supplied kind. Example: data T @k (a :: k) -- No CUSK, no SAKS Test case: T22560_fail_d -} TcRnInvisBndrWithoutSig :: !Name -> !(LHsTyVarBndr (HsBndrVis GhcRn) GhcRn) -> TcRnMessage {-| TcRnDeprecatedInvisTyArgInConPat is a warning that triggers on type applications in constructor patterns when the user has not enabled '-XTypeAbstractions' but instead has enabled both '-XScopedTypeVariables' and '-XTypeApplications'. This warning is a deprecation mechanism that is scheduled until GHC 9.12. -} TcRnDeprecatedInvisTyArgInConPat :: TcRnMessage {-| TcRnUnexpectedStandaloneDerivingDecl is an error thrown when a user uses standalone deriving without enabling the StandaloneDeriving extension. Example: deriving instance Eq Foo Test cases: rename/should_fail/RnUnexpectedStandaloneDeriving -} TcRnUnexpectedStandaloneDerivingDecl :: TcRnMessage {-| TcRnUnusedVariableInRuleDecl is an error triggered by forall'd variable in rewrite rule that does not appear on left-hand side Example: {-# RULES "rule" forall a. id = id #-} Test cases: rename/should_fail/ExplicitForAllRules2 -} TcRnUnusedVariableInRuleDecl :: FastString -> Name -> TcRnMessage {-| TcRnUnexpectedStandaloneKindSig is an error thrown when a user uses standalone kind signature without enabling the StandaloneKindSignatures extension. Example: type D :: Type data D = D Test cases: saks/should_fail/saks_fail001 -} TcRnUnexpectedStandaloneKindSig :: TcRnMessage {-| TcRnIllegalRuleLhs is an error triggered by malformed left-hand side of rewrite rule Examples: {-# RULES "test" forall x. f x = x #-} {-# RULES "test" forall x. case x of = x #-} Test cases: rename/should_fail/T15659 -} TcRnIllegalRuleLhs :: RuleLhsErrReason -> FastString -- Rule name -> LHsExpr GhcRn -- Full expression -> HsExpr GhcRn -- Bad expression -> TcRnMessage {-| TcRnDuplicateRoleAnnot is an error triggered by two or more role annotations for one type Example: data D a type role D phantom type role D phantom Test cases: roles/should_fail/Roles8 -} TcRnDuplicateRoleAnnot :: NE.NonEmpty (LRoleAnnotDecl GhcPs) -> TcRnMessage {-| TcRnDuplicateKindSig is an error triggered by two or more standalone kind signatures for one type Example: type D :: Type type D :: Type data D Test cases: saks/should_fail/saks_fail002 -} TcRnDuplicateKindSig :: NE.NonEmpty (LStandaloneKindSig GhcPs) -> TcRnMessage {-| TcRnIllegalDerivStrategy is an error thrown when a user uses deriving strategy without enabling the DerivingStrategies extension or uses deriving via without enabling the DerivingVia extension. Examples: data T = T deriving stock Eq data T = T deriving via Eq T Test cases: deriving/should_fail/deriving-via-fail3 deriving/should_fail/T10598_fail4 -} TcRnIllegalDerivStrategy :: DerivStrategy GhcPs -> TcRnMessage {-| TcRnIllegalMultipleDerivClauses is an error thrown when a user uses two or more deriving clauses without enabling the DerivingStrategies extension. Example: data T = T deriving Eq deriving Ord Test cases: deriving/should_fail/T10598_fail5 -} TcRnIllegalMultipleDerivClauses :: TcRnMessage {-| TcRnNoDerivStratSpecified is a warning implied by -Wmissing-deriving-strategies and triggered by deriving without mentioning a strategy. See 'TcRnNoDerivStratSpecifiedInfo' cases for examples. Test cases: deriving/should_compile/T15798a deriving/should_compile/T15798b deriving/should_compile/T15798c deriving/should_compile/T24955a deriving/should_compile/T24955b deriving/should_compile/T24955c -} TcRnNoDerivStratSpecified :: Bool -- ^ True if DerivingStrategies is enabled -> TcRnNoDerivStratSpecifiedInfo -> TcRnMessage {-| TcRnStupidThetaInGadt is an error triggered by data contexts in GADT-style data declaration Example: data (Eq a) => D a where MkD :: D Int Test cases: rename/should_fail/RnStupidThetaInGadt -} TcRnStupidThetaInGadt :: HsDocContext -> TcRnMessage {-| TcRnShadowedTyVarNameInFamResult is an error triggered by type variable in type family result that shadows type variable from left hand side Example: type family F a b c = b Test cases: ghci/scripts/T6018ghcirnfail rename/should_fail/T6018rnfail -} TcRnShadowedTyVarNameInFamResult :: IdP GhcPs -> TcRnMessage {-| TcRnIncorrectTyVarOnRhsOfInjCond is an error caused by a situation where the left-hand side of an injectivity condition of a type family is not a variable referring to the type family result. See Note [Renaming injectivity annotation] for more details. Example: type family F a = r | a -> a Test cases: ghci/scripts/T6018ghcirnfail rename/should_fail/T6018rnfail -} TcRnIncorrectTyVarOnLhsOfInjCond :: IdP GhcRn -- Expected -> LIdP GhcPs -- Actual -> TcRnMessage {-| TcRnUnknownTyVarsOnRhsOfInjCond is an error triggered by out-of-scope type variables on the right-hand side of a of an injectivity condition of a type family Example: type family F a = res | res -> b Test cases: ghci/scripts/T6018ghcirnfail rename/should_fail/T6018rnfail -} TcRnUnknownTyVarsOnRhsOfInjCond :: [Name] -> TcRnMessage {-| TcRnLookupInstance groups several errors emitted when looking up class instances. Test cases: none -} TcRnLookupInstance :: !Class -> ![Type] -> !LookupInstanceErrReason -> TcRnMessage {-| TcRnBadlyStaged is an error that occurs when a TH binding is used in an invalid stage. Test cases: T17820d -} TcRnBadlyStaged :: !StageCheckReason -- ^ The binding being spliced. -> !Int -- ^ The binding stage. -> !Int -- ^ The stage at which the binding is used. -> TcRnMessage {-| TcRnStageRestriction is an error that occurs when a top level splice refers to a local name. Test cases: T17820, T21547, T5795, qq00[1-4], annfail0{3,4,6,9} -} TcRnStageRestriction :: !StageCheckReason -- ^ The binding being spliced. -> TcRnMessage {-| TcRnBadlyStagedWarn is a warning that occurs when a TH type binding is used in an invalid stage. Controlled by flags: - Wbadly-staged-type Test cases: T23829_timely T23829_tardy T23829_hasty -} TcRnBadlyStagedType :: !Name -- ^ The type binding being spliced. -> !Int -- ^ The binding stage. -> !Int -- ^ The stage at which the binding is used. -> TcRnMessage {-| TcRnTyThingUsedWrong is an error that occurs when a thing is used where another thing was expected. Test cases: none -} TcRnTyThingUsedWrong :: !WrongThingSort -- ^ Expected thing. -> !TcTyThing -- ^ Thing used wrongly. -> !Name -- ^ Name of the thing used wrongly. -> TcRnMessage {-| TcRnCannotDefaultKindVar is an error that occurs when attempting to use unconstrained kind variables whose type isn't @Type@, without -XPolyKinds. Test cases: T11334b -} TcRnCannotDefaultKindVar :: !TyVar -- ^ The unconstrained variable. -> !Kind -- ^ Kind of the variable. -> TcRnMessage {-| TcRnUninferrableTyVar is an error that occurs when metavariables in a type could not be defaulted. Test cases: T17301, T17562, T17567, T17567StupidTheta, T15474, T21479 -} TcRnUninferrableTyVar :: ![TyCoVar] -- ^ The variables that could not be defaulted. -> !UninferrableTyVarCtx -- ^ Description of the surrounding context. -> TcRnMessage {-| TcRnSkolemEscape is an error that occurs when type variables from an outer scope is used in a context where they should be locally scoped. Test cases: T15076, T15076b, T14880-2, T15825, T14880, T15807, T16946, T14350, T14040A, T15795, T15795a, T14552 -} TcRnSkolemEscape :: ![TcTyVar] -- ^ The variables that would escape. -> !TcTyVar -- ^ The variable that is being quantified. -> !Type -- ^ The type in which they occur. -> TcRnMessage {-| TcRnPatSynEscapedCoercion is an error indicating that a coercion escaped from a pattern synonym into a type. See Note [Coercions that escape] in GHC.Tc.TyCl.PatSyn Test cases: T14507 -} TcRnPatSynEscapedCoercion :: !Id -- ^ The pattern-bound variable -> !(NE.NonEmpty CoVar) -- ^ The escaped coercions -> TcRnMessage {-| TcRnPatSynExistentialInResult is an error indicating that the result type of a pattern synonym mentions an existential type variable. Test cases: PatSynExistential -} TcRnPatSynExistentialInResult :: !Name -- ^ The name of the pattern synonym -> !TcSigmaType -- ^ The result type -> ![TyVar] -- ^ The escaped existential variables -> TcRnMessage {-| TcRnPatSynArityMismatch is an error indicating that the number of arguments in a pattern synonym's equation differs from the number of parameters in its signature. Test cases: PatSynArity -} TcRnPatSynArityMismatch :: !Name -- ^ The name of the pattern synonym -> !Arity -- ^ The number of equation arguments -> !Arity -- ^ The difference -> TcRnMessage {-| TcRnPatSynInvalidRhs is an error group indicating that the pattern on the right hand side of a pattern synonym is invalid. Test cases: unidir, T14112 -} TcRnPatSynInvalidRhs :: !Name -- ^ The name of the pattern synonym -> !(LPat GhcRn) -- ^ The pattern -> ![LIdP GhcRn] -- ^ The LHS args -> !PatSynInvalidRhsReason -- ^ The number of equation arguments -> TcRnMessage {-| TcRnZonkerMessage is collection of errors that occur when zonking, i.e. filling in metavariables with their final values. See 'ZonkerMessage' -} TcRnZonkerMessage :: ZonkerMessage -> TcRnMessage {-| TcRnTyFamDepsDisabled is an error indicating that a type family injectivity annotation was used without enabling the extension TypeFamilyDependencies. Test cases: T11381 -} TcRnTyFamDepsDisabled :: TcRnMessage {-| TcRnAbstractClosedTyFamDecl is an error indicating that an abstract closed type family was declared in a regular source file, while it is only allowed in hs-boot files. Test cases: ClosedFam4 -} TcRnAbstractClosedTyFamDecl :: TcRnMessage {-| TcRnPartialFieldSelector is a warning indicating that a record field was not defined for all constructors of a data type. Test cases: DRFPartialFields, T7169 -} TcRnPartialFieldSelector :: !FieldLabel -- ^ The selector -> TcRnMessage {-| TcRnHasFieldResolvedIncomplete is a warning triggered when a HasField constraint is resolved for a record field for which a `getField @"field"` application might not be successful. Currently, this means that the warning is triggered when the parent data type of that record field does not have that field in all its constructors. Example(s): data T = T1 | T2 {x :: Bool} f :: HasField t "x" Bool => t -> Bool f = getField @"x" g :: T -> Bool g = f Test cases: TcIncompleteRecSel -} TcRnHasFieldResolvedIncomplete :: !Name -> TcRnMessage {-| TcRnBadFieldAnnotation is an error/warning group indicating that a strictness/unpack related data type field annotation is invalid. -} TcRnBadFieldAnnotation :: !Int -- ^ The index of the field -> !DataCon -- ^ The constructor in which the field is defined -> !BadFieldAnnotationReason -- ^ The error specifics -> TcRnMessage {-| TcRnSuperclassCycle is an error indicating that a class has a superclass cycle. Test cases: mod40, tcfail027, tcfail213, tcfail216, tcfail217, T9415, T9739 -} TcRnSuperclassCycle :: !SuperclassCycle -- ^ The details of the cycle -> TcRnMessage {-| TcRnDefaultSigMismatch is an error indicating that a default method signature doesn't match the regular method signature. Test cases: T7437, T12918a, T12918b, T12151 -} TcRnDefaultSigMismatch :: !Id -- ^ The name of the method -> !Type -- ^ The type of the default signature -> TcRnMessage {-| TcRnTyFamsDisabled is an error indicating that a type family or instance was declared while the extension TypeFamilies was disabled. Test cases: TyFamsDisabled -} TcRnTyFamsDisabled :: !TyFamsDisabledReason -- ^ The name of the family or instance -> TcRnMessage {-| TcRnBadTyConTelescope is an error caused by an ill-scoped 'TyCon' kind, due to type variables being out of dependency order. Example: class C a (b :: Proxy a) (c :: Proxy b) where type T c a Test cases: BadTelescope{∅,3,4} T14066{f,g} T14887 T15591{b,c} T15743{c,d} T15764 T23252 -} TcRnBadTyConTelescope :: !TyCon -> TcRnMessage {-| TcRnTyFamResultDisabled is an error indicating that a result variable was used on a type family while the extension TypeFamilyDependencies was disabled. Test cases: T13571, T13571a -} TcRnTyFamResultDisabled :: !Name -- ^ The name of the type family -> !(LHsTyVarBndr () GhcRn) -- ^ Name of the result variable -> TcRnMessage {-| TcRnRoleValidationFailed is an error indicating that a variable was assigned an invalid role by the inference algorithm. This is only performed with -dcore-lint. -} TcRnRoleValidationFailed :: !Role -- ^ The validated role -> !RoleValidationFailedReason -- ^ The failure reason -> TcRnMessage {-| TcRnCommonFieldResultTypeMismatch is an error indicating that a sum type declares the same field name in multiple constructors, but the constructors' result types differ. Test cases: CommonFieldResultTypeMismatch -} TcRnCommonFieldResultTypeMismatch :: !DataCon -- ^ First constructor -> !DataCon -- ^ Second constructor -> !FieldLabelString -- ^ Field name -> TcRnMessage {-| TcRnCommonFieldTypeMismatch is an error indicating that a sum type declares the same field name in multiple constructors, but their types differ. Test cases: CommonFieldTypeMismatch -} TcRnCommonFieldTypeMismatch :: !DataCon -- ^ First constructor -> !DataCon -- ^ Second constructor -> !FieldLabelString -- ^ Field name -> TcRnMessage {-| TcRnClassExtensionDisabled is an error indicating that a class was declared with an extension feature while the extension was disabled. -} TcRnClassExtensionDisabled :: !Class -- ^ The class -> !DisabledClassExtension -- ^ The extension -> TcRnMessage {-| TcRnDataConParentTypeMismatch is an error indicating that a data constructor was declared with a type that doesn't match its type constructor (i.e. a GADT result type and its data name). Test cases: T7175, T13300, T14719, T18357, T18357b, gadt11, tcfail155, tcfail176 -} TcRnDataConParentTypeMismatch :: !DataCon -- ^ The data constructor -> !Type -- ^ The parent type -> TcRnMessage {-| TcRnGADTsDisabled is an error indicating that a GADT was declared while the extension GADTs was disabled. Test cases: ghci057, T9293 -} TcRnGADTsDisabled :: !Name -- ^ The name of the GADT -> TcRnMessage {-| TcRnExistentialQuantificationDisabled is an error indicating that a data constructor was declared with existential features while the extension ExistentialQuantification was disabled. Test cases: ghci057, T9293, gadtSyntaxFail001, gadtSyntaxFail002, gadtSyntaxFail003, prog006, rnfail053, T12083a -} TcRnExistentialQuantificationDisabled :: !DataCon -- ^ The constructor -> TcRnMessage {-| TcRnGADTDataContext is an error indicating that a GADT was declared with a data type context. This error is emitted in the tc, but it is also caught in the renamer. -} TcRnGADTDataContext :: !Name -- ^ The data type name -> TcRnMessage {-| TcRnMultipleConForNewtype is an error indicating that a newtype was declared with multiple constructors. This error is caught by the parser. -} TcRnMultipleConForNewtype :: !Name -- ^ The newtype name -> !Int -- ^ The number of constructors -> TcRnMessage {-| TcRnKindSignaturesDisabled is an error indicating that a kind signature was used in a data type declaration while the extension KindSignatures was disabled. Test cases: T20873c, readFail036 -} TcRnKindSignaturesDisabled :: !(Either (HsType GhcPs) (Name, HsType GhcRn)) -- ^ The data type name -> TcRnMessage {-| TcRnEmptyDataDeclsDisabled is an error indicating that a data type was declared with no constructors while the extension EmptyDataDecls was disabled. Test cases: readFail035 -} TcRnEmptyDataDeclsDisabled :: !Name -- ^ The data type name -> TcRnMessage {-| TcRnRoleMismatch is an error indicating that the role specified in an annotation differs from its inferred role. Test cases: T7253, Roles11 -} TcRnRoleMismatch :: !Name -- ^ The type variable -> !Role -- ^ The annotated role -> !Role -- ^ The inferred role -> TcRnMessage {-| TcRnRoleCountMismatch is an error indicating that the number of roles in an annotation doesn't match the number of type parameters. Test cases: Roles6 -} TcRnRoleCountMismatch :: !Int -- ^ The number of type variables -> !(LRoleAnnotDecl GhcRn) -- ^ The role annotation -> TcRnMessage {-| TcRnIllegalRoleAnnotation is an error indicating that a role annotation was attached to a decl that doesn't allow it. Test cases: Roles5 -} TcRnIllegalRoleAnnotation :: !(RoleAnnotDecl GhcRn) -- ^ The role annotation -> TcRnMessage {-| TcRnRoleAnnotationsDisabled is an error indicating that a role annotation was declared while the extension RoleAnnotations was disabled. Test cases: Roles5, TH_Roles1 -} TcRnRoleAnnotationsDisabled :: !TyCon -- ^ The annotated type -> TcRnMessage {-| TcRnIncoherentRoles is an error indicating that a role annotation for a class parameter was declared as not nominal. Test cases: T8773 -} TcRnIncoherentRoles :: !TyCon -- ^ The class tycon -> TcRnMessage {-| TcRnPrecedenceParsingError is an error caused by attempting to use operators with the same precedence in one infix expression. Example: eq :: (a ~ b ~ c) :~: () Test cases: module/mod61 parser/should_fail/readFail016 rename/should_fail/rnfail017 rename/should_fail/T9077 typecheck/should_fail/T18252a -} TcRnPrecedenceParsingError :: (OpName, Fixity) -- ^ first operator's name and fixity -> (OpName, Fixity) -- ^ second operator's name and fixity -> TcRnMessage {-| TcRnPrecedenceParsingError is an error caused by attempting to use an operator with higher precedence than the operand. Example: k = (-3 **) where (**) = const infixl 7 ** Test cases: overloadedrecflds/should_fail/T13132_duplicaterecflds parser/should_fail/readFail023 rename/should_fail/rnfail019 th/TH_unresolvedInfix2 -} TcRnSectionPrecedenceError :: (OpName, Fixity) -- ^ first operator's name and fixity -> (OpName, Fixity) -- ^ argument operator -> HsExpr GhcPs -- ^ Section -> TcRnMessage {-| TcRnTypeSynonymCycle is an error indicating that a cycle between type synonyms has occurred. Test cases: mod27, ghc-e-fail2, bkpfail29 -} TcRnTypeSynonymCycle :: !TySynCycleTyCons -- ^ The tycons involved in the cycle -> TcRnMessage {-| TcRnSelfImport is an error indicating that a module contains an import of itself. Test cases: T9032 -} TcRnSelfImport :: !ModuleName -- ^ The module -> TcRnMessage {-| TcRnNoExplicitImportList is a warning indicating that an import statement did not include an explicit import list. Test cases: T1789, T4489 -} TcRnNoExplicitImportList :: !ModuleName -- ^ The imported module -> TcRnMessage {-| TcRnSafeImportsDisabled is an error indicating that an import was declared using the @safe@ keyword while SafeHaskell wasn't active. Test cases: Mixed01 -} TcRnSafeImportsDisabled :: !ModuleName -- ^ The imported module -> TcRnMessage {-| TcRnDeprecatedModule is a warning indicating that an imported module is annotated with a warning or deprecation pragma. Test cases: DeprU -} TcRnDeprecatedModule :: !ModuleName -- ^ The imported module -> !(WarningTxt GhcRn) -- ^ The pragma data -> TcRnMessage {-| TcRnRedundantSourceImport is a warning indicating that a {-# SOURCE #-} import was used when there is no import cycle. Test cases: none -} TcRnRedundantSourceImport :: !ModuleName -- ^ The imported module -> TcRnMessage {-| TcRnImportLookup is a group of errors about bad imported names. -} TcRnImportLookup :: !ImportLookupReason -- ^ Details about the error -> TcRnMessage {-| TcRnUnusedImport is a group of errors about unused imports. -} TcRnUnusedImport :: !(ImportDecl GhcRn) -- ^ The import -> !UnusedImportReason -- ^ Details about the error -> TcRnMessage {-| TcRnDuplicateDecls is an error indicating that the same name was used for multiple declarations. Test cases: FieldSelectors, overloadedrecfldsfail03, T17965, NFSDuplicate, T9975a, TDMultiple01, mod19, mod38, mod21, mod66, mod20, TDPunning, mod18, mod22, TDMultiple02, T4127a, ghci048, T8932, rnfail015, rnfail010, rnfail011, rnfail013, rnfail002, rnfail003, rn_dup, rnfail009, T7164, rnfail043, TH_dupdecl, rnfail012 -} TcRnDuplicateDecls :: !OccName -- ^ The name of the declarations -> !(NE.NonEmpty Name) -- ^ The individual declarations -> TcRnMessage {-| TcRnPackageImportsDisabled is an error indicating that an import uses a package qualifier while the extension PackageImports was disabled. Test cases: PackageImportsDisabled -} TcRnPackageImportsDisabled :: TcRnMessage {-| TcRnIllegalDataCon is an error indicating that a data constructor was defined using a lowercase name, or a symbolic name in prefix position. Mostly caught by PsErrNotADataCon. Test cases: None -} TcRnIllegalDataCon :: !RdrName -- ^ The constructor name -> TcRnMessage {-| TcRnNestedForallsContexts is an error indicating that multiple foralls or contexts are nested/curried where this is not supported, like @∀ x. ∀ y.@ instead of @∀ x y.@. Test cases: T12087, T14320, T16114, T16394, T16427, T18191, T18240a, T18240b, T18455, T5951 -} TcRnNestedForallsContexts :: !NestedForallsContextsIn -> TcRnMessage {-| TcRnRedundantRecordWildcard is a warning indicating that a pattern uses a record wildcard even though all of the record's fields are bound explicitly. Test cases: T15957_Fail -} TcRnRedundantRecordWildcard :: TcRnMessage {-| TcRnUnusedRecordWildcard is a warning indicating that a pattern uses a record wildcard while none of the fields bound by it are used. Test cases: T15957_Fail -} TcRnUnusedRecordWildcard :: ![Name] -- ^ The names bound by the wildcard -> TcRnMessage {-| TcRnUnusedName is a warning indicating that a defined or imported name is not used in the module. Test cases: ds053, mc10, overloadedrecfldsfail05, overloadedrecfldsfail06, prog018, read014, rn040, rn041, rn047, rn063, T13839, T13839a, T13919, T17171b, T17a, T17b, T17d, T17e, T18470, T1972, t22391, t22391j, T2497, T3371, T3449, T7145b, T7336, TH_recover_warns, unused_haddock, WarningGroups, werror -} TcRnUnusedName :: !OccName -- ^ The unused name -> !UnusedNameProv -- ^ The provenance of the name -> TcRnMessage {-| TcRnQualifiedBinder is an error indicating that a qualified name was used in binding position. Test cases: mod62, rnfail021, rnfail034, rnfail039, rnfail046 -} TcRnQualifiedBinder :: !RdrName -- ^ The name used as a binder -> TcRnMessage {-| TcRnTypeApplicationsDisabled is an error indicating that a type application was used while the extension TypeApplications was disabled. Test cases: T12411, T12446, T15527, T16133, T18251c -} TcRnTypeApplicationsDisabled :: !TypeApplication -- ^ what kind of type application is it? -> TcRnMessage {-| TcRnInvalidRecordField is an error indicating that a record field was used that doesn't exist in a constructor. Test cases: T13644, T13847, T17469, T8448, T8570, tcfail083, tcfail084 -} TcRnInvalidRecordField :: !Name -- ^ The constructor name -> !FieldLabelString -- ^ The name of the field -> TcRnMessage {-| TcRnTupleTooLarge is an error indicating that the arity of a tuple exceeds mAX_TUPLE_SIZE. Test cases: T18723a, T18723b, T18723c, T6148a, T6148b, T6148c, T6148d -} TcRnTupleTooLarge :: !Int -- ^ The arity of the tuple -> TcRnMessage {-| TcRnCTupleTooLarge is an error indicating that the arity of a constraint tuple exceeds mAX_CTUPLE_SIZE. Test cases: T10451 -} TcRnCTupleTooLarge :: !Int -- ^ The arity of the constraint tuple -> TcRnMessage {-| TcRnIllegalInferredTyVars is an error indicating that some type variables were quantified as inferred (like @∀ {a}.@) in a place where this is not allowed, like in an instance declaration. Test cases: ExplicitSpecificity5, ExplicitSpecificity6, ExplicitSpecificity8, ExplicitSpecificity9 -} TcRnIllegalInferredTyVars :: !(NE.NonEmpty (HsTyVarBndr Specificity GhcPs)) -- ^ The offending type variables -> TcRnMessage {-| TcRnAmbiguousName is an error indicating that an unbound name might refer to multiple names in scope. Test cases: BootFldReexport, DRFUnused, duplicaterecfldsghci01, GHCiDRF, mod110, mod151, mod152, mod153, mod164, mod165, NoFieldSelectorsFail, overloadedrecfldsfail02, overloadedrecfldsfail04, overloadedrecfldsfail11, overloadedrecfldsfail12, overloadedrecfldsfail13, overloadedrecfldswasrunnowfail06, rnfail044, T11167_ambig, T11167_ambiguous_fixity, T13132_duplicaterecflds, T15487, T16745, T17420, T18999_NoDisambiguateRecordFields, T19397E1, T19397E2, T23010_fail, tcfail037 -} TcRnAmbiguousName :: !GlobalRdrEnv -> !RdrName -- ^ The name -> !(NE.NonEmpty GlobalRdrElt) -- ^ The possible matches -> TcRnMessage {-| TcRnBindingNameConflict is an error indicating that multiple local or top-level bindings have the same name. Test cases: dsrun006, mdofail002, mdofail003, mod23, mod24, qq006, rnfail001, rnfail004, SimpleFail6, T14114, T16110_Fail1, tcfail038, TH_spliceD1, T22478b, TyAppPat_NonlinearMultiAppPat, TyAppPat_NonlinearMultiPat, TyAppPat_NonlinearSinglePat, -} TcRnBindingNameConflict :: !RdrName -- ^ The conflicting name -> !(NE.NonEmpty SrcSpan) -- ^ The locations of the duplicates -> TcRnMessage {-| TcRnNonCanonicalDefinition is a warning indicating that an instance defines an implementation for a method that should not be defined in a way that deviates from its default implementation, for example because it has been scheduled to be absorbed into another method, like @pure@ making @return@ obsolete. Test cases: WCompatWarningsOn, WCompatWarningsOff, WCompatWarningsOnOff -} TcRnNonCanonicalDefinition :: !NonCanonicalDefinition -- ^ Specifics -> !(LHsSigType GhcRn) -- ^ The instance type -> TcRnMessage {-| TcRnImplicitImportOfPrelude is a warning, controlled by @Wimplicit-prelude@, that is triggered upon an implicit import of the @Prelude@ module. Example: {-# OPTIONS_GHC -fwarn-implicit-prelude #-} module M where {} Test case: rn055 -} TcRnImplicitImportOfPrelude :: TcRnMessage {-| TcRnMissingMain is an error that occurs when a Main module does not define a main function (named @main@ by default, but overridable with the @main-is@ command line flag). Example: module Main where {} Test cases: T414, T7765, readFail021, rnfail007, T13839b, T17171a, T16453E1, tcfail030, T19397E3, T19397E4 -} TcRnMissingMain :: !Bool -- ^ whether the module has an explicit export list -> !Module -> !OccName -- ^ the expected name of the main function -> TcRnMessage {-| TcRnGhciUnliftedBind is an error that occurs when a user attempts to bind an unlifted value in GHCi. Example (in GHCi): let a = (# 1#, 3# #) Test cases: T9140, T19035b -} TcRnGhciUnliftedBind :: !Id -> TcRnMessage {-| TcRnGhciMonadLookupFail is an error that occurs when the user sets the GHCi monad, using the GHC API 'setGHCiMonad' function, but GHC can't find which monad the user is referring to. Example: import GHC ( setGHCiMonad ) ... setGHCiMonad "NoSuchThing" Test cases: none -} TcRnGhciMonadLookupFail :: String -- ^ the textual name of the monad requested by the user -> Maybe [GlobalRdrElt] -- ^ lookup result -> TcRnMessage {-| TcRnMissingRoleAnnotation is a warning that occurs when type declaration doesn't have a role annotatiosn Controlled by flags: - Wmissing-role-annotations Test cases: T22702 -} TcRnMissingRoleAnnotation :: Name -> [Role] -> TcRnMessage {-| TcRnPatersonCondFailure is an error that occurs when an instance declaration fails to conform to the Paterson conditions. Which particular condition fails depends on the constructor of PatersonCondFailure See Note [Paterson conditions]. Test cases: T15231, tcfail157, T15316, T19187a, fd-loop, tcfail108, tcfail154, T15172, tcfail214 -} TcRnPatersonCondFailure :: PatersonCondFailure -- ^ the failed Paterson Condition -> PatersonCondFailureContext -> Type -- ^ the LHS -> Type -- ^ the RHS -> TcRnMessage {-| TcRnImplicitRhsQuantification is a warning that occurs when GHC implicitly quantifies over a type variable that occurs free on the RHS of the type declaration that is not mentioned on the LHS Example: type T = 'Nothing :: Maybe a Controlled by flags: - Wimplicit-rhs-quantification Test cases: T23510a T23510b -} TcRnImplicitRhsQuantification :: LocatedN RdrName -> TcRnMessage {-| TcRnIllformedTypePattern is an error raised when the pattern corresponding to a required type argument (visible forall) does not have a form that can be interpreted as a type pattern. Example: vfun :: forall (a :: k) -> () vfun !x = () -- ^^ -- bang-patterns not allowed as type patterns Test cases: T22326_fail_bang_pat -} TcRnIllformedTypePattern :: !(Pat GhcRn) -> TcRnMessage {-| TcRnIllegalTypePattern is an error raised when a pattern constructed with the @type@ keyword occurs in a position that does not correspond to a required type argument (visible forall). Example: case x of (type _) -> True -- the (type _) pattern is illegal here _ -> False Test cases: T22326_fail_ado T22326_fail_caseof -} TcRnIllegalTypePattern :: TcRnMessage {-| TcRnIllformedTypeArgument is an error raised when an argument that specifies a required type argument (instantiates a visible forall) does not have a form that can be interpreted as a type argument. Example: vfun :: forall (a :: k) -> () x = vfun (\_ -> _) -- ^^^^^^^^^ -- lambdas not allowed in type arguments Test cases: T22326_fail_lam_arg -} TcRnIllformedTypeArgument :: !(LHsExpr GhcRn) -> TcRnMessage {- TcRnIllegalTypeExpr is an error raised when an expression constructed with type syntax (@type@, @->@, @=>@, @forall@) occurs in a position that doesn't correspond to required type argument (visible forall). Examples: -- Not a function argument: xtop1 = type Int xtop2 = (Int -> Int) xtop3 = (forall a. a) xtop4 = ((Show Int, Eq Bool) => Unit) -- The function does not expect a type argument: xarg1 = length (type Int) xarg2 = show (Int -> Int) Test cases: T22326_fail_app T22326_fail_top T24159_type_syntax_tc_fail -} TcRnIllegalTypeExpr :: TypeSyntax -> TcRnMessage {-| TcRnInvalidDefaultedTyVar is an error raised when a defaulting plugin proposes to default a type variable that is not an unfilled metavariable Test cases: T23832_invalid -} TcRnInvalidDefaultedTyVar :: ![Ct] -- ^ The constraints passed to the plugin -> [(TcTyVar, Type)] -- ^ The plugin-proposed type variable defaults -> NE.NonEmpty TcTyVar -- ^ The invalid type variables of the proposal -> TcRnMessage {-| TcRnNamespacedWarningPragmaWithoutFlag is an error that occurs when a namespace specifier is used in {-# WARNING ... #-} or {-# DEPRECATED ... #-} pragmas without the -XExplicitNamespaces extension enabled Example: {-# LANGUAGE NoExplicitNamespaces #-} f = id {-# WARNING data f "some warning message" #-} Test cases: T24396c -} TcRnNamespacedWarningPragmaWithoutFlag :: WarnDecl GhcPs -> TcRnMessage {-| TcRnInvisPatWithNoForAll is an error raised when invisible type pattern is used without associated `forall` in types Examples: f :: Int f @t = 5 g :: [a -> a] g = [\ @t x -> x :: t] Test cases: T17694c T17594d -} TcRnInvisPatWithNoForAll :: HsTyPat GhcRn -> TcRnMessage {-| TcRnIllegalInvisibleTypePattern is an error raised when invisible type pattern is used without the TypeAbstractions extension enabled Example: {-# LANGUAGE NoTypeAbstractions #-} id :: a -> a id @t x = x Test cases: T17694b -} TcRnIllegalInvisibleTypePattern :: HsTyPat GhcPs -> TcRnMessage {-| TcRnNamespacedFixitySigWithoutFlag is an error that occurs when a namespace specifier is used in fixity signatures without the -XExplicitNamespaces extension enabled Example: {-# LANGUAGE NoExplicitNamespaces #-} f = const infixl 7 data `f` Test cases: T14032c -} TcRnNamespacedFixitySigWithoutFlag :: FixitySig GhcPs -> TcRnMessage {-| TcRnDefaultedExceptionContext is a warning that is triggered when the backward-compatibility logic solving for implicit ExceptionContext constraints fires. Test cases: DefaultExceptionContext -} TcRnDefaultedExceptionContext :: CtLoc -> TcRnMessage {-| TcRnOutOfArityTyVar is an error raised when the arity of a type synonym (as determined by the SAKS and the LHS) is insufficiently high to accommodate an implicit binding for a free variable that occurs in the outermost kind signature on the RHS of the said type synonym. Example: type SynBad :: forall k. k -> Type type SynBad = Proxy :: j -> Type Test cases: T24770a -} TcRnOutOfArityTyVar :: Name -- ^ Type synonym's name -> Name -- ^ Type variable's name -> TcRnMessage {- TcRnMisplacedInvisPat is an error raised when invisible @-pattern appears in invalid context (e.g. pattern in case of or in do-notation) or nested inside the pattern. Template Haskell seems to be the only source for this diagnostic. Examples: f (smth, $(invisP (varT (newName "blah")))) = ... g = do $(invisP (varT (newName "blah"))) <- aciton1 ... Test cases: T24557a T24557b T24557c T24557d -} TcRnMisplacedInvisPat :: HsTyPat GhcPs -> TcRnMessage {- TcRnUnexpectedTypeSyntaxInTerms is an error that occurs when type syntax is used in terms without -XRequiredTypeArguments extension enabled Examples: idVis (forall a. forall b -> (a ~ Int, b ~ Bool) => a -> b) Test cases: T24159_type_syntax_rn_fail -} TcRnUnexpectedTypeSyntaxInTerms :: TypeSyntax -> TcRnMessage deriving Generic ---- data ZonkerMessage where {-| ZonkerCannotDefaultConcrete is an error occurring when a concrete type variable cannot be defaulted. Test cases: T23153 -} ZonkerCannotDefaultConcrete :: !FixedRuntimeRepOrigin -> ZonkerMessage deriving Generic ---- -- | Things forbidden in @type data@ declarations. -- See Note [Type data declarations] data TypeDataForbids = TypeDataForbidsDatatypeContexts | TypeDataForbidsLabelledFields | TypeDataForbidsStrictnessAnnotations | TypeDataForbidsDerivingClauses deriving Generic instance Outputable TypeDataForbids where ppr TypeDataForbidsDatatypeContexts = text "Data type contexts" ppr TypeDataForbidsLabelledFields = text "Labelled fields" ppr TypeDataForbidsStrictnessAnnotations = text "Strictness flags" ppr TypeDataForbidsDerivingClauses = text "Deriving clauses" -- | Specifies which back ends can handle a requested foreign import or export type ExpectedBackends = [Backend] -- | Specifies which calling convention is unsupported on the current platform data UnsupportedCallConvention = StdCallConvUnsupported | PrimCallConvUnsupported | JavaScriptCallConvUnsupported deriving Eq -- | Whether the error pertains to a function argument or a result. data ArgOrResult = Arg | Result -- | Which parts of a record field are affected by a particular error or warning. data RecordFieldPart = RecordFieldDecl !Name | RecordFieldConstructor !Name | RecordFieldPattern !Name | RecordFieldUpdate -- | Why did we reject a record update? data BadRecordUpdateReason -- | No constructor has all of the required fields. = NoConstructorHasAllFields { conflictingFields :: [FieldLabelString] } -- | There are several possible parents which have all of the required fields, -- and we weren't able to disambiguate in any way. | MultiplePossibleParents (RecSelParent, RecSelParent, [RecSelParent]) -- ^ The possible parents (at least 2) -- | We used type-directed disambiguation, but this resulted in -- an invalid parent (the type-directed parent is not among the -- parents we computed from the field labels alone). | InvalidTyConParent TyCon (NE.NonEmpty RecSelParent) deriving Generic -- | Where a shadowed name comes from data ShadowedNameProvenance = ShadowedNameProvenanceLocal !SrcLoc -- ^ The shadowed name is local to the module | ShadowedNameProvenanceGlobal [GlobalRdrElt] -- ^ The shadowed name is global, typically imported from elsewhere. -- | In what context did we require a type to have a fixed runtime representation? -- -- Used by 'GHC.Tc.Utils.TcMType.checkTypeHasFixedRuntimeRep' for throwing -- representation polymorphism errors when validity checking. -- -- See Note [Representation polymorphism checking] in GHC.Tc.Utils.Concrete data FixedRuntimeRepProvenance -- | Data constructor fields must have a fixed runtime representation. -- -- Tests: T11734, T18534. = FixedRuntimeRepDataConField -- | Pattern synonym signature arguments must have a fixed runtime representation. -- -- Test: RepPolyPatSynArg. | FixedRuntimeRepPatSynSigArg -- | Pattern synonym signature scrutinee must have a fixed runtime representation. -- -- Test: RepPolyPatSynRes. | FixedRuntimeRepPatSynSigRes pprFixedRuntimeRepProvenance :: FixedRuntimeRepProvenance -> SDoc pprFixedRuntimeRepProvenance FixedRuntimeRepDataConField = text "data constructor field" pprFixedRuntimeRepProvenance FixedRuntimeRepPatSynSigArg = text "pattern synonym argument" pprFixedRuntimeRepProvenance FixedRuntimeRepPatSynSigRes = text "pattern synonym scrutinee" -- | Why the particular illegal newtype error arose together with more -- information, if any. data IllegalNewtypeReason = DoesNotHaveSingleField !Int | IsNonLinear | IsGADT | HasConstructorContext | HasExistentialTyVar | HasStrictnessAnnotation deriving Generic -- | Why the particular injectivity error arose together with more information, -- if any. data InjectivityErrReason = InjErrRhsBareTyVar [Type] | InjErrRhsCannotBeATypeFam | InjErrRhsOverlap | InjErrCannotInferFromRhs !TyVarSet !HasKinds !SuggestUndecidableInstances data HasKinds = YesHasKinds | NoHasKinds deriving (Show, Eq) hasKinds :: Bool -> HasKinds hasKinds True = YesHasKinds hasKinds False = NoHasKinds data SuggestUndecidableInstances = YesSuggestUndecidableInstaces | NoSuggestUndecidableInstaces deriving (Show, Eq) suggestUndecidableInstances :: Bool -> SuggestUndecidableInstances suggestUndecidableInstances True = YesSuggestUndecidableInstaces suggestUndecidableInstances False = NoSuggestUndecidableInstaces data SuggestUnliftedTypes = SuggestUnliftedNewtypes | SuggestUnliftedDatatypes -- | A description of whether something is a -- -- * @data@ or @newtype@ ('DataDeclSort') -- -- * @data instance@ or @newtype instance@ ('DataInstanceSort') -- -- * @data family@ ('DataFamilySort') -- -- At present, this data type is only consumed by 'checkDataKindSig'. data DataSort = DataDeclSort NewOrData | DataInstanceSort NewOrData | DataFamilySort ppDataSort :: DataSort -> SDoc ppDataSort data_sort = text $ case data_sort of DataDeclSort DataType -> "Data type" DataDeclSort NewType -> "Newtype" DataInstanceSort DataType -> "Data instance" DataInstanceSort NewType -> "Newtype instance" DataFamilySort -> "Data family" -- | Helper type used in 'checkDataKindSig'. -- -- Superficially similar to 'ContextKind', but it lacks 'AnyKind' -- and 'AnyBoxedKind', and instead of @'TheKind' liftedTypeKind@ -- provides 'LiftedKind', which is much simpler to match on and -- handle in 'isAllowedDataResKind'. data AllowedDataResKind = AnyTYPEKind | AnyBoxedKind | LiftedKind -- | A data type to describe why a variable is not closed. -- See Note [Not-closed error messages] in GHC.Tc.Gen.Expr data NotClosedReason = NotLetBoundReason | NotTypeClosed VarSet | NotClosed Name NotClosedReason data SuggestPartialTypeSignatures = YesSuggestPartialTypeSignatures | NoSuggestPartialTypeSignatures deriving (Show, Eq) suggestPartialTypeSignatures :: Bool -> SuggestPartialTypeSignatures suggestPartialTypeSignatures True = YesSuggestPartialTypeSignatures suggestPartialTypeSignatures False = NoSuggestPartialTypeSignatures data UsingGeneralizedNewtypeDeriving = YesGeneralizedNewtypeDeriving | NoGeneralizedNewtypeDeriving deriving Eq usingGeneralizedNewtypeDeriving :: Bool -> UsingGeneralizedNewtypeDeriving usingGeneralizedNewtypeDeriving True = YesGeneralizedNewtypeDeriving usingGeneralizedNewtypeDeriving False = NoGeneralizedNewtypeDeriving data DeriveAnyClassEnabled = YesDeriveAnyClassEnabled | NoDeriveAnyClassEnabled deriving Eq deriveAnyClassEnabled :: Bool -> DeriveAnyClassEnabled deriveAnyClassEnabled True = YesDeriveAnyClassEnabled deriveAnyClassEnabled False = NoDeriveAnyClassEnabled -- | Why a particular typeclass instance couldn't be derived. data DeriveInstanceErrReason = -- | The typeclass instance is not well-kinded. DerivErrNotWellKinded !TyCon -- ^ The type constructor that occurs in -- the typeclass instance declaration. !Kind -- ^ The typeclass kind. !Int -- ^ The number of typeclass arguments that GHC -- kept. See Note [tc_args and tycon arity] in -- GHC.Tc.Deriv. -- | Generic instances can only be derived using the stock strategy -- in Safe Haskell. | DerivErrSafeHaskellGenericInst | DerivErrDerivingViaWrongKind !Kind !Type !Kind | DerivErrNoEtaReduce !Type -- ^ The instance type -- | We cannot derive instances in boot files | DerivErrBootFileFound | DerivErrDataConsNotAllInScope !TyCon -- | We cannot use GND on non-newtype types | DerivErrGNDUsedOnData -- | We cannot derive instances of nullary classes | DerivErrNullaryClasses -- | Last arg must be newtype or data application | DerivErrLastArgMustBeApp | DerivErrNoFamilyInstance !TyCon [Type] | DerivErrNotStockDeriveable !DeriveAnyClassEnabled | DerivErrHasAssociatedDatatypes !HasAssociatedDataFamInsts !AssociatedTyLastVarInKind !AssociatedTyNotParamOverLastTyVar | DerivErrNewtypeNonDeriveableClass | DerivErrCannotEtaReduceEnough !Bool -- Is eta-reduction OK? | DerivErrOnlyAnyClassDeriveable !TyCon -- ^ Type constructor for which the instance -- is requested !DeriveAnyClassEnabled -- ^ Whether or not -XDeriveAnyClass is enabled -- already. -- | Stock deriving won't work, but perhaps DeriveAnyClass will. | DerivErrNotDeriveable !DeriveAnyClassEnabled -- | The given 'PredType' is not a class. | DerivErrNotAClass !PredType -- | The given (representation of the) 'TyCon' has no -- data constructors. | DerivErrNoConstructors !TyCon | DerivErrLangExtRequired !LangExt.Extension -- | GHC simply doesn't how to how derive the input 'Class' for the given -- 'Type'. | DerivErrDunnoHowToDeriveForType !Type -- | The given 'TyCon' must be an enumeration. -- See Note [Enumeration types] in GHC.Core.TyCon | DerivErrMustBeEnumType !TyCon -- | The given 'TyCon' must have /precisely/ one constructor. | DerivErrMustHaveExactlyOneConstructor !TyCon -- | The given data type must have some parameters. | DerivErrMustHaveSomeParameters !TyCon -- | The given data type must not have a class context. | DerivErrMustNotHaveClassContext !TyCon !ThetaType -- | We couldn't derive an instance for a particular data constructor -- for a variety of reasons. | DerivErrBadConstructor !(Maybe HasWildcard) [DeriveInstanceBadConstructor] -- | We couldn't derive a 'Generic' instance for the given type for a -- variety of reasons | DerivErrGenerics [DeriveGenericsErrReason] -- | We couldn't derive an instance either because the type was not an -- enum type or because it did have more than one constructor. | DerivErrEnumOrProduct !DeriveInstanceErrReason !DeriveInstanceErrReason deriving Generic data DeriveInstanceBadConstructor = -- | The given 'DataCon' must be truly polymorphic in the -- last argument of the data type. DerivErrBadConExistential !DataCon -- | The given 'DataCon' must not use the type variable in a function argument" | DerivErrBadConCovariant !DataCon -- | The given 'DataCon' must not contain function types | DerivErrBadConFunTypes !DataCon -- | The given 'DataCon' must use the type variable only -- as the last argument of a data type | DerivErrBadConWrongArg !DataCon -- | The given 'DataCon' is a GADT so we cannot directly -- derive an istance for it. | DerivErrBadConIsGADT !DataCon -- | The given 'DataCon' has existentials type vars in its type. | DerivErrBadConHasExistentials !DataCon -- | The given 'DataCon' has constraints in its type. | DerivErrBadConHasConstraints !DataCon -- | The given 'DataCon' has a higher-rank type. | DerivErrBadConHasHigherRankType !DataCon data DeriveGenericsErrReason = -- | The type must not have some datatype context. DerivErrGenericsMustNotHaveDatatypeContext !TyCon -- | The data constructor must not have exotic unlifted -- or polymorphic arguments. | DerivErrGenericsMustNotHaveExoticArgs !DataCon -- | The data constructor must be a vanilla constructor. | DerivErrGenericsMustBeVanillaDataCon !DataCon -- | The type must have some type parameters. -- check (d) from Note [Requirements for deriving Generic and Rep] -- in GHC.Tc.Deriv.Generics. | DerivErrGenericsMustHaveSomeTypeParams !TyCon -- | The data constructor must not have existential arguments. | DerivErrGenericsMustNotHaveExistentials !DataCon -- | The derivation applies a type to an argument involving -- the last parameter but the applied type is not of kind * -> *. | DerivErrGenericsWrongArgKind !DataCon data HasWildcard = YesHasWildcard | NoHasWildcard deriving Eq hasWildcard :: Bool -> HasWildcard hasWildcard True = YesHasWildcard hasWildcard False = NoHasWildcard -- | A context in which we don't allow anonymous wildcards. data BadAnonWildcardContext = WildcardNotLastInConstraint | ExtraConstraintWildcardNotAllowed SoleExtraConstraintWildcardAllowed | WildcardsNotAllowedAtAll -- See Note [Wildcard binders in disallowed contexts] in GHC.Hs.Type | WildcardBndrInForallTelescope | WildcardBndrInTyFamResultVar -- | Whether a sole extra-constraint wildcard is allowed, -- e.g. @_ => ..@ as opposed to @( .., _ ) => ..@. data SoleExtraConstraintWildcardAllowed = SoleExtraConstraintWildcardNotAllowed | SoleExtraConstraintWildcardAllowed -- | Why is a class instance head invalid? data IllegalInstanceHeadReason -- | An instance for an abstract class from an hs-boot or Backpack -- hsig file. -- -- Example: -- -- -- A.hs-boot -- module A where -- class C a -- -- -- B.hs -- module B where -- import {-# SOURCE #-} A -- instance C Int where -- -- -- A.hs -- module A where -- import B -- class C a where -- f :: a -- -- Test cases: typecheck/should_fail/T13068 = InstHeadAbstractClass !Class -- | An instance whose head is not a class. -- -- Examples(s): -- -- instance c -- -- instance 42 -- -- instance !Show D -- -- type C1 a = (Show (a -> Bool)) -- instance C1 Int where -- -- Test cases: typecheck/rename/T5513 -- typecheck/rename/T16385 -- parser/should_fail/T3811c -- rename/should_fail/T18240a -- polykinds/T13267 -- deriving/should_fail/T23522 | InstHeadNonClass !(Maybe TyCon) -- ^ the 'TyCon' at the head of the instance head, -- or 'Nothing' if the instance head is not even headed -- by a 'TyCon' -- | Instance head was headed by a type synonym. -- -- Example: -- type MyInt = Int -- class C a where {..} -- instance C MyInt where {..} -- -- Test cases: drvfail015, mod42, TidyClassKinds, tcfail139 | InstHeadTySynArgs -- | Instance head was not of the form @T a1 ... an@, -- where @a1, ..., an@ are all type variables or literals. -- -- Example: -- -- instance Num [Int] where {..} -- -- Test cases: mod41, mod42, tcfail044, tcfail047. | InstHeadNonTyVarArgs -- | Multi-param instance without -XMultiParamTypeClasses. -- -- Example: -- -- instance C a b where {..} -- -- Test case: IllegalMultiParamInstance | InstHeadMultiParam deriving Generic -- | Why is a (type or data) family instance invalid? data IllegalFamilyInstanceReason {-| A top-level family instance for a 'TyCon' that isn't a family 'TyCon'. Example: data D a = MkD type instance D Int = Bool Test case: indexed-types/should_fail/T3092 -} = NotAFamilyTyCon !TypeOrData -- ^ was this a 'type' or a 'data' instance? !TyCon {-| A top-level (open) type family instance for a closed type family. Test cases: indexed-types/should_fail/Overlap7 indexed-types/should_fail/Overlap3 -} | NotAnOpenFamilyTyCon !TyCon {-| A family instance was declared for a family of a different kind, e.g. a data instance for a type family 'TyCon'. Test cases: T9896, SimpleFail3a -} | FamilyCategoryMismatch !TyCon -- ^ The family tycon {-| A family instance was declared with a different number of arguments than expected. See Note [Oversaturated type family equations] in "GHC.Tc.Validity". Test cases: TyFamArity1, TyFamArity2, T11136, Overlap4, AssocTyDef05, AssocTyDef06, T14110 -} | FamilyArityMismatch !TyCon -- ^ The family tycon !Arity -- ^ The right number of parameters {-| A closed type family equation used a different name than the parent family. Example: type family F a where G Int = Bool Test cases: Overlap5, T15362, T16002, T20260, T11623 -} | TyFamNameMismatch !Name -- ^ The family name !Name -- ^ The name used in the equation -- | There are out-of-scope type variables in the right-hand side -- of an associated type or data family instance. -- -- Example: -- -- instance forall a. C Int where -- data instance D Int = MkD1 a -- -- Test cases: indexed-types/should_fail/T5515, polykinds/T9574, rename/should_fail/T18021 | FamInstRHSOutOfScopeTyVars !(Maybe (TyCon, [Type], TyVarSet)) -- ^ family 'TyCon', arguments, and set of "dodgy" type variables -- See Note [Dodgy binding sites in type family instances] -- in GHC.Tc.Validity !(NE.NonEmpty Name) -- ^ the out-of-scope type variables | FamInstLHSUnusedBoundTyVars !(NE.NonEmpty InvalidFamInstQTv) -- ^ the unused bound type variables | InvalidAssoc !InvalidAssoc deriving Generic -- | A quantified type variable in a type or data family equation that -- is either not bound in any LHS patterns or not used in the RHS (or both). data InvalidFamInstQTv = InvalidFamInstQTv { ifiqtv :: TcTyVar , ifiqtv_user_written :: Bool -- ^ Did the user write this type variable, or was introduced by GHC? -- For example: with @-XPolyKinds@, in @type instance forall a. F = ()@, -- we have a user-written @a@ but GHC introduces a kind variable @k@ -- as well. See #23734. , ifiqtv_reason :: InvalidFamInstQTvReason -- ^ For what reason was the quantified type variable invalid? } data InvalidFamInstQTvReason -- | A dodgy binder, i.e. a variable that syntactically appears in -- LHS patterns but only in non-injective positions. -- -- See Note [Dodgy binding sites in type family instances] -- in GHC.Tc.Validity. = InvalidFamInstQTvDodgy -- | A quantified type variable in a type or data family equation -- that is not bound in any LHS patterns. | InvalidFamInstQTvNotBoundInPats -- | A quantified type variable in a type or data family equation -- that is not used on the RHS. | InvalidFamInstQTvNotUsedInRHS -- The 'check_tvs' function in 'GHC.Tc.Validity.checkFamPatBinders' -- uses 'getSrcSpan', so this 'NamedThing' instance is convenient. instance NamedThing InvalidFamInstQTv where getName = getName . ifiqtv data InvalidAssoc -- | An invalid associated family instance. -- -- See t'InvalidAssocInstance'.Builder = InvalidAssocInstance !InvalidAssocInstance -- | An invalid associated family default declaration. -- -- See t'InvalidAssocDefault'. | InvalidAssocDefault !InvalidAssocDefault deriving Generic -- | The reason that an associated family instance was invalid. data InvalidAssocInstance -- | A class instance is missing its expected associated type/data instance. -- -- Test cases: deriving/should_compile/T14094 -- indexed-types/should_compile/Simple2 -- typecheck/should_compile/tc254 = AssocInstanceMissing !Name -- | A top-level instance for an associated family 'TyCon'. -- -- Example: -- -- class C a where { type T a } -- instance T Int = Bool -- -- Test case: indexed-types/should_fail/SimpleFail7 | AssocInstanceNotInAClass !TyCon -- | An associated type instance is provided for a class that doesn't have -- that associated type. -- -- Examples(s): -- $(do d <- instanceD (cxt []) (conT ''Eq `appT` conT ''Foo) -- [tySynInstD $ tySynEqn Nothing (conT ''Rep `appT` conT ''Foo) (conT ''Maybe)] -- return [d]) -- ======> -- instance Eq Foo where -- type Rep Foo = Maybe -- -- Test cases: th/T12387a | AssocNotInThisClass !Class !TyCon -- | An associated family instance does not mention any of the parent 'Class' -- 'TyVar's. -- -- Test cases: T2888, T9167, T12867 | AssocNoClassTyVar !Class !TyCon | AssocTyVarsDontMatch !ForAllTyFlag !TyCon -- ^ family 'TyCon' ![Type] -- ^ expected type arguments ![Type] -- ^ actual type arguments deriving Generic -- | The reason that an associated family default declaration was invalid. data InvalidAssocDefault -- | An associated family default declaration for something that isn't -- an associated family. = AssocDefaultNotAssoc !Name -- ^ 'Class' 'Name' !Name -- ^ 'TyCon' 'Name' -- | Multiple default declarations were given for an associated -- family instance. -- -- Test cases: none. | AssocMultipleDefaults !Name -- | Invalid arguments in an associated family instance. -- -- See t'AssocDefaultBadArgs'. | AssocDefaultBadArgs !TyCon ![Type] AssocDefaultBadArgs deriving Generic -- | Invalid arguments in an associated family instance declaration. data AssocDefaultBadArgs -- | An argument which isn't a type variable in an associated -- family instance default declaration. = AssocDefaultNonTyVarArg !(Type, ForAllTyFlag) -- | Duplicate occurrence of a type variable in an associated -- family instance default declaration. | AssocDefaultDuplicateTyVars !(NE.NonEmpty (TyCoVar, ForAllTyFlag)) deriving Generic -- | A type representing whether or not the input type has associated data family instances. data HasAssociatedDataFamInsts = YesHasAdfs | NoHasAdfs deriving Eq hasAssociatedDataFamInsts :: Bool -> HasAssociatedDataFamInsts hasAssociatedDataFamInsts True = YesHasAdfs hasAssociatedDataFamInsts False = NoHasAdfs -- | If 'YesAssocTyLastVarInKind', the associated type of a typeclass -- contains the last type variable of the class in a kind, which is not (yet) allowed -- by GHC. data AssociatedTyLastVarInKind = YesAssocTyLastVarInKind !TyCon -- ^ The associated type family of the class | NoAssocTyLastVarInKind deriving Eq associatedTyLastVarInKind :: Maybe TyCon -> AssociatedTyLastVarInKind associatedTyLastVarInKind (Just tc) = YesAssocTyLastVarInKind tc associatedTyLastVarInKind Nothing = NoAssocTyLastVarInKind -- | If 'NoAssociatedTyNotParamOverLastTyVar', the associated type of a -- typeclass is not parameterized over the last type variable of the class data AssociatedTyNotParamOverLastTyVar = YesAssociatedTyNotParamOverLastTyVar !TyCon -- ^ The associated type family of the class | NoAssociatedTyNotParamOverLastTyVar deriving Eq associatedTyNotParamOverLastTyVar :: Maybe TyCon -> AssociatedTyNotParamOverLastTyVar associatedTyNotParamOverLastTyVar (Just tc) = YesAssociatedTyNotParamOverLastTyVar tc associatedTyNotParamOverLastTyVar Nothing = NoAssociatedTyNotParamOverLastTyVar -- | What kind of thing is missing a type signature? -- -- Used for reporting @"missing signature"@ warnings, see -- 'tcRnMissingSignature'. data MissingSignature = MissingTopLevelBindingSig Name Type | MissingPatSynSig PatSyn | MissingTyConKindSig TyCon Bool -- ^ whether -XCUSKs is enabled -- | Is the object we are dealing with exported or not? -- -- Used for reporting @"missing signature"@ warnings, see -- 'TcRnMissingSignature'. data Exported = IsNotExported | IsExported deriving Eq instance Outputable Exported where ppr IsNotExported = text "IsNotExported" ppr IsExported = text "IsExported" -- | What declarations were not allowed in an hs-boot or hsig file? data BadBootDecls = BootBindsPs !(NE.NonEmpty (LHsBindLR GhcRn GhcPs)) | BootBindsRn !(NE.NonEmpty (LHsBindLR GhcRn GhcRn)) | BootInstanceSigs !(NE.NonEmpty (LSig GhcRn)) | BootFamInst !TyCon | BootSpliceDecls !(NE.NonEmpty (LocatedA (HsUntypedSplice GhcPs))) | BootForeignDecls !(NE.NonEmpty (LForeignDecl GhcRn)) | BootDefaultDecls !(NE.NonEmpty (LDefaultDecl GhcRn)) | BootRuleDecls !(NE.NonEmpty (LRuleDecls GhcRn)) -- | A mismatch between an hs-boot or signature file and its implementing module. data BootMismatch -- | Something defined or exported by an hs-boot or signature file -- is missing from the implementing module. = MissingBootThing !Name !MissingBootThing -- | A typeclass instance is declared in the hs-boot file but -- it is not present in the implementing module. | MissingBootInstance !DFunId -- ^ the boot instance 'DFunId' -- NB: we never trigger this for hsig files, as in that case we do -- a full round of constraint solving, and a missing instance gets reported -- as an unsolved Wanted constraint with a 'InstProvidedOrigin' 'CtOrigin'. -- See GHC.Tc.Utils.Backpack.check_inst. -- | A mismatch between an hsig file and its implementing module -- in the 'Name' that a particular re-export refers to. | BadReexportedBootThing !Name !Name -- | A mismatch between the declaration of something in the hs-boot or -- signature file and its implementation, e.g. a type mismatch or -- a type family implemented as a class. | BootMismatch !TyThing -- ^ boot thing !TyThing -- ^ real thing !BootMismatchWhat deriving Generic -- | Something from the hs-boot or signature file is missing from the -- implementing module. data MissingBootThing -- | Something defined in the hs-boot or signature file is not defined in the -- implementing module. = MissingBootDefinition -- | Something exported by the hs-boot or signature file is not exported by the -- implementing module. | MissingBootExport deriving Generic missingBootThing :: HsBootOrSig -> Name -> MissingBootThing -> TcRnMessage missingBootThing src nm thing = TcRnBootMismatch src (MissingBootThing nm thing) -- | A mismatch of two 'TyThing's between an hs-boot or signature file -- and its implementing module. data BootMismatchWhat -- | The 'Id's have different types. = BootMismatchedIdTypes !Id -- ^ boot 'Id' !Id -- ^ real 'Id' -- | Two 'TyCon's aren't compatible. | BootMismatchedTyCons !TyCon -- ^ boot 'TyCon' !TyCon -- ^ real 'TyCon' !(NE.NonEmpty BootTyConMismatch) deriving Generic -- | An error in the implementation of an abstract datatype using -- a type synonym. data SynAbstractDataError -- | The type synony was not nullary. = SynAbsDataTySynNotNullary -- | The type synonym RHS contained invalid types, e.g. -- a type family or a forall. | SynAbstractDataInvalidRHS !(NE.NonEmpty Type) -- | Mismatched implementation of a 'TyCon' in an hs-boot or signature file. data BootTyConMismatch -- | The 'TyCon' kinds differ. = TyConKindMismatch -- | The 'TyCon' 'Role's aren't compatible. | TyConRoleMismatch !Bool -- ^ True <=> role subtype check -- | Two type synonyms have different RHSs. | TyConSynonymMismatch !Kind !Kind -- | The two 'TyCon's are of a different flavour, e.g. one is -- a data family and the other is a type family. | TyConFlavourMismatch !FamTyConFlav !FamTyConFlav -- | The equations of a type family don't match. | TyConAxiomMismatch !(BootListMismatches CoAxBranch BootAxiomBranchMismatch) -- | The type family injectivity annotations don't match. | TyConInjectivityMismatch -- | The 'TyCon's are both datatype 'TyCon's, but they have diferent 'DataCon's. | TyConMismatchedData !AlgTyConRhs !AlgTyConRhs !BootDataMismatch -- | The 'TyCon's are both 'Class' 'TyCon's, but the classes don't match. | TyConMismatchedClasses !Class !Class !BootClassMismatch -- | The 'TyCon's are something completely different. | TyConsVeryDifferent -- | An abstract 'TyCon' is implemented using a type synonym in an invalid -- manner. See 'SynAbstractDataError'. | SynAbstractData !SynAbstractDataError -- | Utility datatype to record errors when checking compatibity -- between two lists of things, e.g. class methods, associated types, -- type family equations, etc. data BootListMismatch item err -- | Different number of items. = MismatchedLength -- | The item at the given position in the list differs. | MismatchedThing !Int !item !item !err type BootListMismatches item err = NE.NonEmpty (BootListMismatch item err) data BootAxiomBranchMismatch -- | The quantified variables in an equation don't match. -- -- Example: the quantification of @a@ in -- -- @type family F a where { forall a. F a = Maybe a }@ = MismatchedAxiomBinders -- | The LHSs of an equation don't match. | MismatchedAxiomLHS -- | The RHSs of an equation don't match. | MismatchedAxiomRHS -- | A mismatch in a class, between its declaration in an hs-boot or signature -- file, and its implementation in a source Haskell file. data BootClassMismatch -- | The class methods don't match. = MismatchedMethods !(BootListMismatches ClassOpItem BootMethodMismatch) -- | The associated types don't match. | MismatchedATs !(BootListMismatches ClassATItem BootATMismatch) -- | The functional dependencies don't match. | MismatchedFunDeps -- | The superclasses don't match. | MismatchedSuperclasses -- | The @MINIMAL@ pragmas are not compatible. | MismatchedMinimalPragmas -- | A mismatch in a class method, between its declaration in an hs-boot or signature -- file, and its implementation in a source Haskell file. data BootMethodMismatch -- | The class method names are different. = MismatchedMethodNames -- | The types of a class method are different. | MismatchedMethodTypes !Type !Type -- | The default method types are not compatible. | MismatchedDefaultMethods !Bool -- ^ True <=> subtype check -- | A mismatch in an associated type of a class, between its declaration -- in an hs-boot or signature file, and its implementation in a source Haskell file. data BootATMismatch -- | Two associated types don't match. = MismatchedTyConAT !BootTyConMismatch -- | Two associated type defaults don't match. | MismatchedATDefaultType -- | A mismatch in a datatype declaration, between an hs-boot file or signature -- file and its implementing module. data BootDataMismatch -- | A datatype is implemented as a newtype or vice-versa. = MismatchedNewtypeVsData -- | The constructors don't match. | MismatchedConstructors !(BootListMismatches DataCon BootDataConMismatch) -- | The datatype contexts differ. | MismatchedDatatypeContexts -- | A mismatch in a data constrcutor, between its declaration in an hs-boot -- file or signature file, and its implementation in a source Haskell module. data BootDataConMismatch -- | The 'Name's of the 'DataCon's differ. = MismatchedDataConNames -- | The fixities of the 'DataCon's differ. | MismatchedDataConFixities -- | The strictness annotations of the 'DataCon's differ. | MismatchedDataConBangs -- | The 'DataCon's have different field labels. | MismatchedDataConFieldLabels -- | The 'DataCon's have incompatible types. | MismatchedDataConTypes -------------------------------------------------------------------------------- -- -- Errors used in GHC.Tc.Errors -- -------------------------------------------------------------------------------- {- Note [Error report] ~~~~~~~~~~~~~~~~~~~~~~ The idea is that error msgs are divided into three parts: the main msg, the context block ("In the second argument of ..."), and the relevant bindings block, which are displayed in that order, with a mark to divide them. The the main msg ('report_important') varies depending on the error in question, but context and relevant bindings are always the same, which should simplify visual parsing. See 'GHC.Tc.Errors.Types.SolverReport' and 'GHC.Tc.Errors.mkErrorReport'. -} -- | A collection of main error messages and supplementary information. -- -- In practice, we will: -- - display the important messages first, -- - then the error context (e.g. by way of a call to 'GHC.Tc.Errors.mkErrorReport'), -- - then the supplementary information (e.g. relevant bindings, valid hole fits), -- - then the hints ("Possible fix: ..."). -- -- So this is mostly just a way of making sure that the error context appears -- early on rather than at the end of the message. -- -- See Note [Error report] for details. data SolverReport = SolverReport { sr_important_msg :: SolverReportWithCtxt , sr_supplementary :: [SolverReportSupplementary] } -- | Additional information to print in a 'SolverReport', after the -- important messages and after the error context. -- -- See Note [Error report]. data SolverReportSupplementary = SupplementaryBindings RelevantBindings | SupplementaryHoleFits ValidHoleFits | SupplementaryCts [(PredType, RealSrcSpan)] -- | A 'TcSolverReportMsg', together with context (e.g. enclosing implication constraints) -- that are needed in order to report it. data SolverReportWithCtxt = SolverReportWithCtxt { reportContext :: SolverReportErrCtxt -- ^ Context for what we wish to report. -- This can change as we enter implications, so is -- stored alongside the content. , reportContent :: TcSolverReportMsg -- ^ The content of the message to report. } deriving Generic -- | Context needed when reporting a 'TcSolverReportMsg', such as -- the enclosing implication constraints or whether we are deferring type errors. data SolverReportErrCtxt = CEC { cec_encl :: [Implication] -- ^ Enclosing implications -- (innermost first) -- ic_skols and givens are tidied, rest are not , cec_tidy :: TidyEnv , cec_binds :: EvBindsVar -- ^ We make some errors (depending on cec_defer) -- into warnings, and emit evidence bindings -- into 'cec_binds' for unsolved constraints , cec_defer_type_errors :: DiagnosticReason -- ^ Whether to defer type errors until runtime -- We might throw a warning on an error when encountering a hole, -- depending on the type of hole (expression hole, type hole, out of scope hole). -- We store the reasons for reporting a diagnostic for each type of hole. , cec_expr_holes :: DiagnosticReason -- ^ Reason for reporting holes in expressions. , cec_type_holes :: DiagnosticReason -- ^ Reason for reporting holes in types. , cec_out_of_scope_holes :: DiagnosticReason -- ^ Reason for reporting out of scope holes. , cec_warn_redundant :: Bool -- ^ True <=> -Wredundant-constraints , cec_expand_syns :: Bool -- ^ True <=> -fprint-expanded-synonyms , cec_suppress :: Bool -- ^ True <=> More important errors have occurred, -- so create bindings if need be, but -- don't issue any more errors/warnings -- See Note [Suppressing error messages] } getUserGivens :: SolverReportErrCtxt -> [UserGiven] -- One item for each enclosing implication getUserGivens (CEC {cec_encl = implics}) = getUserGivensFromImplics implics ---------------------------------------------------------------------------- -- -- ErrorItem -- ---------------------------------------------------------------------------- -- | A predicate with its arising location; used to encapsulate a constraint -- that will give rise to a diagnostic. data ErrorItem -- We could perhaps use Ct here (and indeed used to do exactly that), but -- having a separate type gives to denote errors-in-formation gives us -- a nice place to do pre-processing, such as calculating ei_suppress. -- Perhaps some day, an ErrorItem could eventually evolve to contain -- the error text (or some representation of it), so we can then have all -- the errors together when deciding which to report. = EI { ei_pred :: PredType -- report about this -- The ei_pred field will never be an unboxed equality with -- a (casted) tyvar on the right; this is guaranteed by the solver , ei_evdest :: Maybe TcEvDest -- ^ for Wanteds, where to put the evidence -- for Givens, Nothing , ei_flavour :: CtFlavour , ei_loc :: CtLoc , ei_m_reason :: Maybe CtIrredReason -- if this ErrorItem was made from a -- CtIrred, this stores the reason , ei_suppress :: Bool -- Suppress because of Note [Wanteds rewrite Wanteds] -- in GHC.Tc.Constraint } instance Outputable ErrorItem where ppr (EI { ei_pred = pred , ei_evdest = m_evdest , ei_flavour = flav , ei_suppress = supp }) = pp_supp <+> ppr flav <+> pp_dest m_evdest <+> ppr pred where pp_dest Nothing = empty pp_dest (Just ev) = ppr ev <+> dcolon pp_supp = if supp then text "suppress:" else empty errorItemOrigin :: ErrorItem -> CtOrigin errorItemOrigin = ctLocOrigin . ei_loc errorItemEqRel :: ErrorItem -> EqRel errorItemEqRel = predTypeEqRel . ei_pred errorItemCtLoc :: ErrorItem -> CtLoc errorItemCtLoc = ei_loc errorItemPred :: ErrorItem -> PredType errorItemPred = ei_pred {- Note [discardProvCtxtGivens] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In most situations we call all enclosing implications "useful". There is one exception, and that is when the constraint that causes the error is from the "provided" context of a pattern synonym declaration: pattern Pat :: (Num a, Eq a) => Show a => a -> Maybe a -- required => provided => type pattern Pat x <- (Just x, 4) When checking the pattern RHS we must check that it does actually bind all the claimed "provided" constraints; in this case, does the pattern (Just x, 4) bind the (Show a) constraint. Answer: no! But the implication we generate for this will look like forall a. (Num a, Eq a) => [W] Show a because when checking the pattern we must make the required constraints available, since they are needed to match the pattern (in this case the literal '4' needs (Num a, Eq a)). BUT we don't want to suggest adding (Show a) to the "required" constraints of the pattern synonym, thus: pattern Pat :: (Num a, Eq a, Show a) => Show a => a -> Maybe a It would then typecheck but it's silly. We want the /pattern/ to bind the alleged "provided" constraints, Show a. So we suppress that Implication in discardProvCtxtGivens. It's painfully ad-hoc but the truth is that adding it to the "required" constraints would work. Suppressing it solves two problems. First, we never tell the user that we could not deduce a "provided" constraint from the "required" context. Second, we never give a possible fix that suggests to add a "provided" constraint to the "required" context. For example, without this distinction the above code gives a bad error message (showing both problems): error: Could not deduce (Show a) ... from the context: (Eq a) ... Possible fix: add (Show a) to the context of the signature for pattern synonym `Pat' ... -} discardProvCtxtGivens :: CtOrigin -> [UserGiven] -> [UserGiven] discardProvCtxtGivens orig givens -- See Note [discardProvCtxtGivens] | ProvCtxtOrigin (PSB {psb_id = L _ name}) <- orig = filterOut (discard name) givens | otherwise = givens where discard n (Implic { ic_info = SigSkol (PatSynCtxt n') _ _ }) = n == n' discard _ _ = False -- | An error reported after constraint solving. -- This is usually, some sort of unsolved constraint error, -- but we try to be specific about the precise problem we encountered. data TcSolverReportMsg -- | Quantified variables appear out of dependency order. -- -- Example: -- -- forall (a :: k) k. ... -- -- Test cases: BadTelescope2, T16418, T16247, T16726, T18451. = BadTelescope TyVarBndrs [TyCoVar] -- | We came across a custom type error and we have decided to report it. -- -- Example: -- -- type family F a where -- F a = TypeError (Text "error") -- -- err :: F () -- err = () -- -- Test cases: CustomTypeErrors0{1,2,3,4,5}, T12104. | UserTypeError ErrorMsgType -- ^ the message to report -- | Report a Wanted constraint of the form "Unsatisfiable msg". | UnsatisfiableError ErrorMsgType -- ^ the message to report -- | We want to report an out of scope variable or a typed hole. -- See 'HoleError'. | ReportHoleError Hole HoleError -- | Cannot unify a variable, because of a type mismatch. | CannotUnifyVariable { mismatchMsg :: MismatchMsg , cannotUnifyReason :: CannotUnifyVariableReason } -- | A mismatch between two types. | Mismatch { mismatchMsg :: MismatchMsg , mismatchTyVarInfo :: Maybe TyVarInfo , mismatchAmbiguityInfo :: [AmbiguityInfo] , mismatchCoercibleInfo :: Maybe CoercibleMsg } -- | A violation of the representation-polymorphism invariants. -- -- See 'FixedRuntimeRepErrorInfo' and 'FixedRuntimeRepContext' for more information. | FixedRuntimeRepError [FixedRuntimeRepErrorInfo] -- | An equality between two types is blocked on a kind equality -- between their kinds. -- -- Test cases: none. | BlockedEquality ErrorItem -- These are for the "blocked" equalities, as described in -- Note [Equalities with incompatible kinds] in GHC.Tc.Solver.Equality, -- wrinkle (EIK2). There should always be another unsolved wanted around, -- which will ordinarily suppress this message. But this can still be printed out -- with -fdefer-type-errors (sigh), so we must produce a message. -- | Something was not applied to sufficiently many arguments. -- -- Example: -- -- instance Eq Maybe where {..} -- -- Test case: T11563. | ExpectingMoreArguments Int TypedThing -- | Trying to use an unbound implicit parameter. -- -- Example: -- -- foo :: Int -- foo = ?param -- -- Test case: tcfail130. | UnboundImplicitParams (NE.NonEmpty ErrorItem) -- | A constraint couldn't be solved because it contains -- ambiguous type variables. -- -- Example: -- -- class C a b where -- f :: (a,b) -- -- x = fst f -- -- -- Test case: T4921. | AmbiguityPreventsSolvingCt ErrorItem -- ^ always a class constraint ([TyVar], [TyVar]) -- ^ ambiguous kind and type variables, respectively -- | Could not solve a constraint; there were several unifying candidate instances -- but no matching instances. This is used to report as much useful information -- as possible about why we couldn't choose any instance, e.g. because of -- ambiguous type variables. | CannotResolveInstance { cannotResolve_item :: ErrorItem , cannotResolve_unifiers :: [ClsInst] , cannotResolve_candidates :: [ClsInst] , cannotResolve_importErrors :: [ImportError] , cannotResolve_suggestions :: [GhcHint] , cannotResolve_relevant_bindings :: RelevantBindings } -- TODO: remove the fields of type [GhcHint] and RelevantBindings, -- in order to handle them uniformly with other diagnostic messages. -- | Could not solve a constraint using available instances -- because the instances overlap. -- -- Test cases: tcfail118, tcfail121, tcfail218. | OverlappingInstances { overlappingInstances_item :: ErrorItem , overlappingInstances_matches :: NE.NonEmpty ClsInst , overlappingInstances_unifiers :: [ClsInst] } -- | Could not solve a constraint from instances because -- instances declared in a Safe module cannot overlap instances -- from other modules (with -XSafeHaskell). -- -- Test cases: SH_Overlap{1,2,5,6,7,11}. | UnsafeOverlap { unsafeOverlap_item :: ErrorItem , unsafeOverlap_match :: ClsInst , unsafeOverlapped :: NE.NonEmpty ClsInst } deriving Generic data MismatchMsg = -- | Couldn't unify two types or kinds. -- -- Example: -- -- 3 + 3# -- can't match a lifted type with an unlifted type -- -- Test cases: T1396, T8263, ... BasicMismatch { mismatch_ea :: MismatchEA -- ^ Should this be phrased in terms of expected vs actual? , mismatch_item :: ErrorItem -- ^ The constraint in which the mismatch originated. , mismatch_ty1 :: Type -- ^ First type (the expected type if if mismatch_ea is True) , mismatch_ty2 :: Type -- ^ Second type (the actual type if mismatch_ea is True) , mismatch_whenMatching :: Maybe WhenMatching , mismatch_mb_same_occ :: Maybe SameOccInfo } -- | A type has an unexpected kind. -- -- Test cases: T2994, T7609, ... | KindMismatch { kmismatch_what :: TypedThing -- ^ What thing is 'kmismatch_actual' the kind of? , kmismatch_expected :: Type , kmismatch_actual :: Type } -- TODO: combine with 'BasicMismatch'. -- | A mismatch between two types, which arose from a type equality. -- -- Test cases: T1470, tcfail212. | TypeEqMismatch { teq_mismatch_item :: ErrorItem , teq_mismatch_ty1 :: Type , teq_mismatch_ty2 :: Type , teq_mismatch_expected :: Type -- ^ The overall expected type , teq_mismatch_actual :: Type -- ^ The overall actual type , teq_mismatch_what :: Maybe TypedThing -- ^ What thing is 'teq_mismatch_actual' the kind of? , teq_mb_same_occ :: Maybe SameOccInfo } -- TODO: combine with 'BasicMismatch'. -- | Couldn't solve some Wanted constraints using the Givens. -- Used for messages such as @"No instance for ..."@ and -- @"Could not deduce ... from"@. | CouldNotDeduce { cnd_user_givens :: [Implication] -- | The Wanted constraints we couldn't solve. -- -- N.B.: the 'ErrorItem' at the head of the list has been tidied, -- perhaps not the others. , cnd_wanted :: NE.NonEmpty ErrorItem -- | Some additional info consumed by 'mk_supplementary_ea_msg'. , cnd_extra :: Maybe CND_Extra } deriving Generic -- | Construct a basic mismatch message between two types. -- -- See 'pprMismatchMsg' for how such a message is displayed to users. mkBasicMismatchMsg :: MismatchEA -> ErrorItem -> Type -> Type -> MismatchMsg mkBasicMismatchMsg ea item ty1 ty2 = BasicMismatch { mismatch_ea = ea , mismatch_item = item , mismatch_ty1 = ty1 , mismatch_ty2 = ty2 , mismatch_whenMatching = Nothing , mismatch_mb_same_occ = Nothing } -- | Whether to use expected/actual in a type mismatch message. data MismatchEA -- | Don't use expected/actual. = NoEA -- | Use expected/actual. | EA { mismatch_mbEA :: Maybe ExpectedActualInfo -- ^ Whether to also mention type synonym expansion. } data CannotUnifyVariableReason = -- | A type equality between a type variable and a polytype. -- -- Test cases: T12427a, T2846b, T10194, ... CannotUnifyWithPolytype ErrorItem TyVar Type (Maybe TyVarInfo) -- | An occurs check. | OccursCheck { occursCheckInterestingTyVars :: [TyVar] , occursCheckAmbiguityInfos :: [AmbiguityInfo] } -- | A skolem type variable escapes its scope. -- -- Example: -- -- data Ex where { MkEx :: a -> MkEx } -- foo (MkEx x) = x -- -- Test cases: TypeSkolEscape, T11142. | SkolemEscape ErrorItem Implication [TyVar] -- | Can't unify the type variable with the other type -- due to the kind of type variable it is. -- -- For example, trying to unify a 'SkolemTv' with the -- type Int, or with a 'TyVarTv'. | DifferentTyVars TyVarInfo | RepresentationalEq TyVarInfo (Maybe CoercibleMsg) deriving Generic -- | Report a mismatch error without any extra -- information. mkPlainMismatchMsg :: MismatchMsg -> TcSolverReportMsg mkPlainMismatchMsg msg = Mismatch { mismatchMsg = msg , mismatchTyVarInfo = Nothing , mismatchAmbiguityInfo = [] , mismatchCoercibleInfo = Nothing } -- | Additional information to be given in a 'CouldNotDeduce' message, -- which is then passed on to 'mk_supplementary_ea_msg'. data CND_Extra = CND_Extra TypeOrKind Type Type -- | A cue to print out information about type variables, -- e.g. where they were bound, when there is a mismatch @tv1 ~ ty2@. data TyVarInfo = TyVarInfo { thisTyVar :: TyVar , thisTyVarIsUntouchable :: Maybe Implication , otherTy :: Maybe TyVar } -- | Add some information to disambiguate errors in which -- two 'Names' would otherwise appear to be identical. -- -- See Note [Disambiguating (X ~ X) errors]. data SameOccInfo = SameOcc { sameOcc_same_pkg :: Bool -- ^ Whether the two 'Name's also came from the same package. , sameOcc_lhs :: Name , sameOcc_rhs :: Name } -- | Add some information about ambiguity data AmbiguityInfo -- | Some type variables remained ambiguous: print them to the user. = Ambiguity { lead_with_ambig_msg :: Bool -- ^ True <=> start the message with "Ambiguous type variable ..." -- False <=> create a message of the form "The type variable is ambiguous." , ambig_tyvars :: ([TyVar], [TyVar]) -- ^ Ambiguous kind and type variables, respectively. -- Guaranteed to not both be empty. } -- | Remind the user that a particular type family is not injective. | NonInjectiveTyFam TyCon -- | Expected/actual information. data ExpectedActualInfo -- | Display the expected and actual types. = ExpectedActual { ea_expected, ea_actual :: Type } -- | Display the expected and actual types, after expanding type synonyms. | ExpectedActualAfterTySynExpansion { ea_expanded_expected, ea_expanded_actual :: Type } -- | Explain how a kind equality originated. data WhenMatching = WhenMatching TcType TcType CtOrigin (Maybe TypeOrKind) deriving Generic data BadImportKind -- | Module does not export... = BadImportNotExported [GhcHint] -- ^ suggestions for what might have been meant -- | Missing @type@ keyword when importing a type. -- e.g. `import TypeLits( (+) )`, where TypeLits exports a /type/ (+), not a /term/ (+) -- Then we want to suggest using `import TypeLits( type (+) )` | BadImportAvailTyCon Bool -- ^ is ExplicitNamespaces enabled? -- | Trying to import a data constructor directly, e.g. -- @import Data.Maybe (Just)@ instead of @import Data.Maybe (Maybe(Just))@ | BadImportAvailDataCon OccName -- | The parent does not export the given children. | BadImportNotExportedSubordinates [OccName] -- | Incorrect @type@ keyword when importing something which isn't a type. | BadImportAvailVar deriving Generic -- | Some form of @"not in scope"@ error. See also the 'OutOfScopeHole' -- constructor of 'HoleError'. data NotInScopeError -- | A run-of-the-mill @"not in scope"@ error. = NotInScope -- | Like 'NotInScope', but when we know we are looking for a -- record field. | NotARecordField -- | An exact 'Name' was not in scope. -- -- This usually indicates a problem with a Template Haskell splice. -- -- Test cases: T5971, T18263. | NoExactName Name -- The same exact 'Name' occurs in multiple name-spaces. -- -- This usually indicates a problem with a Template Haskell splice. -- -- Test case: T7241. | SameName [GlobalRdrElt] -- ^ always at least 2 elements -- A type signature, fixity declaration, pragma, standalone kind signature... -- is missing an associated binding. | MissingBinding SDoc [GhcHint] -- TODO: remove the SDoc argument. -- | Couldn't find a top-level binding. -- -- Happens when specifying an annotation for something that -- is not in scope. -- -- Test cases: annfail01, annfail02, annfail11. | NoTopLevelBinding -- | A class doesn't have a method with this name, -- or, a class doesn't have an associated type with this name, -- or, a record doesn't have a record field with this name. | UnknownSubordinate SDoc -- | A name is not in scope during type checking but passed the renamer. -- -- Test cases: -- none | NotInScopeTc (NameEnv TcTyThing) deriving Generic -- | Create a @"not in scope"@ error message for the given 'RdrName'. mkTcRnNotInScope :: RdrName -> NotInScopeError -> TcRnMessage mkTcRnNotInScope rdr err = TcRnNotInScope err rdr [] noHints -- | Configuration for pretty-printing valid hole fits. data HoleFitDispConfig = HFDC { showWrap, showWrapVars, showType, showProv, showMatches :: Bool } -- | Report an error involving a 'Hole'. -- -- This could be an out of scope data constructor or variable, -- a typed hole, or a wildcard in a type. data HoleError -- | Report an out-of-scope data constructor or variable -- masquerading as an expression hole. -- -- See Note [Insoluble holes] in GHC.Tc.Types.Constraint. -- See 'NotInScopeError' for other not-in-scope errors. -- -- Test cases: T9177a. = OutOfScopeHole [ImportError] [GhcHint] -- | Report a typed hole, or wildcard, with additional information. | HoleError HoleSort [TcTyVar] -- Other type variables which get computed on the way. [(SkolemInfoAnon, [TcTyVar])] -- Zonked and grouped skolems for the type of the hole. -- | A message that aims to explain why two types couldn't be seen -- to be representationally equal. data CoercibleMsg -- | Not knowing the role of a type constructor prevents us from -- concluding that two types are representationally equal. -- -- Example: -- -- foo :: Applicative m => m (Sum Int) -- foo = coerce (pure $ 1 :: Int) -- -- We don't know what role `m` has, so we can't coerce `m Int` to `m (Sum Int)`. -- -- Test cases: T8984, TcCoercibleFail. = UnknownRoles Type -- | The fact that a 'TyCon' is abstract prevents us from decomposing -- a 'TyConApp' and deducing that two types are representationally equal. -- -- Test cases: none. | TyConIsAbstract TyCon -- | We can't unwrap a newtype whose constructor is not in scope. -- -- Example: -- -- import Data.Ord (Down) -- NB: not importing the constructor -- foo :: Int -> Down Int -- foo = coerce -- -- Test cases: TcCoercibleFail. | OutOfScopeNewtypeConstructor TyCon DataCon -- | Explain a problem with an import. data ImportError -- | Couldn't find a module with the requested name. = MissingModule ModuleName -- | The imported modules don't export what we're looking for. | ModulesDoNotExport (NE.NonEmpty Module) OccName -- | This datatype collates instances that match or unifier, -- in order to report an error message for an unsolved typeclass constraint. data PotentialInstances = PotentialInstances { matches :: [ClsInst] , unifiers :: [ClsInst] } -- | A collection of valid hole fits or refinement fits, -- in which some fits might have been suppressed. data FitsMbSuppressed = Fits { fits :: [HoleFit] , fitsSuppressed :: Bool -- ^ Whether we have suppressed any fits because there were too many. } -- | A collection of hole fits and refinement fits. data ValidHoleFits = ValidHoleFits { holeFits :: FitsMbSuppressed , refinementFits :: FitsMbSuppressed } noValidHoleFits :: ValidHoleFits noValidHoleFits = ValidHoleFits (Fits [] False) (Fits [] False) data RelevantBindings = RelevantBindings { relevantBindingNamesAndTys :: [(Name, Type)] , ranOutOfFuel :: Bool -- ^ Whether we ran out of fuel generating the bindings. } -- | Display some relevant bindings. pprRelevantBindings :: RelevantBindings -> SDoc -- This function should be in "GHC.Tc.Errors.Ppr", -- but it's here for the moment as it's needed in "GHC.Tc.Errors". pprRelevantBindings (RelevantBindings bds ran_out_of_fuel) = ppUnless (null rel_bds) $ hang (text "Relevant bindings include") 2 (vcat (map ppr_binding rel_bds) $$ ppWhen ran_out_of_fuel discardMsg) where ppr_binding (nm, tidy_ty) = sep [ pprPrefixOcc nm <+> dcolon <+> ppr tidy_ty , nest 2 (parens (text "bound at" <+> ppr (getSrcLoc nm)))] rel_bds = filter (not . isGeneratedSrcSpan . getSrcSpan . fst) bds discardMsg :: SDoc discardMsg = text "(Some bindings suppressed;" <+> text "use -fmax-relevant-binds=N or -fno-max-relevant-binds)" -- | Stores the information to be reported in a representation-polymorphism -- error message. data FixedRuntimeRepErrorInfo = FRR_Info { frr_info_origin :: FixedRuntimeRepOrigin -- ^ What is the original type we checked for -- representation-polymorphism, and what specific -- check did we perform? , frr_info_not_concrete :: Maybe (TcTyVar, TcType) -- ^ Which non-concrete type did we try to -- unify this concrete type variable with? } {- ************************************************************************ * * \subsection{Contexts for renaming errors} * * ************************************************************************ -} -- AZ:TODO: Change these all to be Name instead of RdrName. -- Merge TcType.UserTypeContext in to it. data HsDocContext = TypeSigCtx SDoc | StandaloneKindSigCtx SDoc | PatCtx | SpecInstSigCtx | DefaultDeclCtx | ForeignDeclCtx (LocatedN RdrName) | DerivDeclCtx | RuleCtx FastString | TyDataCtx (LocatedN RdrName) | TySynCtx (LocatedN RdrName) | TyFamilyCtx (LocatedN RdrName) | FamPatCtx (LocatedN RdrName) -- The patterns of a type/data family instance | ConDeclCtx [LocatedN Name] | ClassDeclCtx (LocatedN RdrName) | ExprWithTySigCtx | TypBrCtx | HsTypeCtx | HsTypePatCtx | GHCiCtx | SpliceTypeCtx (LHsType GhcPs) | ClassInstanceCtx | GenericCtx SDoc -- | Context for a mismatch in the number of arguments data MatchArgsContext = EquationArgs !Name -- ^ Name of the function | PatternArgs !HsMatchContextRn -- ^ Pattern match specifics -- | The information necessary to report mismatched -- numbers of arguments in a match group. data MatchArgBadMatches where MatchArgMatches :: { matchArgFirstMatch :: LocatedA (Match GhcRn body) , matchArgBadMatches :: NE.NonEmpty (LocatedA (Match GhcRn body)) } -> MatchArgBadMatches data PragmaWarningInfo = PragmaWarningName { pwarn_occname :: OccName , pwarn_impmod :: ModuleName , pwarn_declmod :: ModuleName } | PragmaWarningExport { pwarn_occname :: OccName , pwarn_impmod :: ModuleName } | PragmaWarningInstance { pwarn_dfunid :: DFunId , pwarn_ctorig :: CtOrigin } | PragmaWarningDefault { pwarn_class :: TyCon , pwarn_impmod :: ModuleName } -- | The context for an "empty statement group" error. data EmptyStatementGroupErrReason = EmptyStmtsGroupInParallelComp -- ^ Empty statement group in a parallel list comprehension | EmptyStmtsGroupInTransformListComp -- ^ Empty statement group in a transform list comprehension -- -- Example: -- [() | then ()] | EmptyStmtsGroupInDoNotation HsDoFlavour -- ^ Empty statement group in do notation -- -- Example: -- do | EmptyStmtsGroupInArrowNotation -- ^ Empty statement group in arrow notation -- -- Example: -- proc () -> do deriving (Generic) -- | An existential wrapper around @'StmtLR' GhcPs GhcPs body@. data UnexpectedStatement where UnexpectedStatement :: Outputable (StmtLR GhcPs GhcPs body) => StmtLR GhcPs GhcPs body -> UnexpectedStatement data DeclSort = ClassDeclSort | InstanceDeclSort data NonStandardGuards where NonStandardGuards :: (Outputable body, Anno (Stmt GhcRn body) ~ SrcSpanAnnA) => [LStmtLR GhcRn GhcRn body] -> NonStandardGuards data RuleLhsErrReason = UnboundVariable RdrName NotInScopeError | IllegalExpression data HsigShapeMismatchReason = {-| HsigShapeSortMismatch is an error indicating that an item in the export list of a signature doesn't match the item of the same name in another signature when merging the two – one is a type while the other is a plain identifier. Test cases: none -} HsigShapeSortMismatch !AvailInfo !AvailInfo | {-| HsigShapeNotUnifiable is an error indicating that a name in the export list of a signature cannot be unified with a name of the same name in another signature when merging the two. Test cases: bkpfail20, bkpfail21 -} HsigShapeNotUnifiable !Name !Name !Bool deriving (Generic) data WrongThingSort = WrongThingType | WrongThingDataCon | WrongThingPatSyn | WrongThingConLike | WrongThingClass | WrongThingTyCon | WrongThingAxiom data StageCheckReason = StageCheckInstance !InstanceWhat !PredType | StageCheckSplice !Name data UninferrableTyVarCtx = UninfTyCtx_ClassContext [TcType] | UninfTyCtx_DataContext [TcType] | UninfTyCtx_ProvidedContext [TcType] | UninfTyCtx_TyFamRhs TcType | UninfTyCtx_TySynRhs TcType | UninfTyCtx_Sig TcType (LHsSigType GhcRn) data PatSynInvalidRhsReason = PatSynNotInvertible !(Pat GhcRn) | PatSynUnboundVar !Name deriving (Generic) data BadFieldAnnotationReason where {-| A lazy data type field annotation (~) was used without enabling the extension StrictData. Test cases: LazyFieldsDisabled -} LazyFieldsDisabled :: BadFieldAnnotationReason {-| An UNPACK pragma was applied to a field without strictness annotation (!). Test cases: T14761a, T7562 -} UnpackWithoutStrictness :: BadFieldAnnotationReason {-| An UNPACK pragma was applied to an abstract type in an indefinite package in Backpack. Test cases: unpack_sums_5, T3966, T7050 -} BackpackUnpackAbstractType :: BadFieldAnnotationReason deriving (Generic) data SuperclassCycle = MkSuperclassCycle { cls :: Class, definite :: Bool, reasons :: [SuperclassCycleDetail] } data SuperclassCycleDetail = SCD_HeadTyVar !PredType | SCD_HeadTyFam !PredType | SCD_Superclass !Class data RoleValidationFailedReason = TyVarRoleMismatch !TyVar !Role | TyVarMissingInEnv !TyVar | BadCoercionRole !Coercion deriving (Generic) data DisabledClassExtension where {-| MultiParamTypeClasses is required. Test cases: readFail037, TcNoNullaryTC -} MultiParamDisabled :: !Int -- ^ The arity -> DisabledClassExtension {-| FunctionalDependencies is required. Test cases: readFail041 -} FunDepsDisabled :: DisabledClassExtension {-| ConstrainedClassMethods is required. Test cases: mod39, tcfail150 -} ConstrainedClassMethodsDisabled :: !Id -> !TcPredType -> DisabledClassExtension deriving (Generic) data TyFamsDisabledReason = TyFamsDisabledFamily !Name | TyFamsDisabledInstance !TyCon deriving (Generic) data TypeApplication = TypeApplication !(HsType GhcPs) !TypeOrKind | TypeApplicationInPattern !(HsConPatTyArg GhcPs) deriving Generic -- | Either `HsType p` or `HsSigType p`. -- -- Used for reporting errors in `TcRnIllegalKind`. data HsTypeOrSigType p = HsType (HsType p) | HsSigType (HsSigType p) instance OutputableBndrId p => Outputable (HsTypeOrSigType (GhcPass p)) where ppr (HsType ty) = ppr ty ppr (HsSigType sig_ty) = ppr sig_ty -- | A wrapper around HsTyVarBndr. -- Used for reporting errors in `TcRnUnusedQuantifiedTypeVar`. data HsTyVarBndrExistentialFlag = forall flag. OutputableBndrFlag flag 'Renamed => HsTyVarBndrExistentialFlag (HsTyVarBndr flag GhcRn) instance Outputable HsTyVarBndrExistentialFlag where ppr (HsTyVarBndrExistentialFlag hsTyVarBndr) = ppr hsTyVarBndr type TySynCycleTyCons = [Either TyCon (LTyClDecl GhcRn)] -- | Different types of warnings for dodgy imports. data DodgyImportsReason = {-| An import of the form 'T(..)' or 'f(..)' does not actually import anything beside 'T'/'f' itself. Test cases: DodgyImports -} DodgyImportsEmptyParent !GlobalRdrElt | {-| A 'hiding' clause contains something that would be reported as an error in a regular import, but is relaxed to a warning. Test cases: DodgyImports_hiding -} DodgyImportsHiding !ImportLookupReason deriving (Generic) -- | Different types of errors for import lookup. data ImportLookupReason where {-| An item in an import statement is not exported by the corresponding module. Test cases: T21826, recomp001, retc001, mod79, mod80, mod81, mod91, T6007, T7167, T9006, T11071, T9905fail2, T5385, T10668 -} ImportLookupBad :: BadImportKind -> ModIface -> ImpDeclSpec -> IE GhcPs -> Bool -- ^ whether @-XPatternSynonyms@ was enabled -> ImportLookupReason {-| A name is specified with a qualifying module. Test cases: T3792 -} ImportLookupQualified :: !RdrName -- ^ The name extracted from the import item -> ImportLookupReason {-| Something completely unexpected is in an import list, like @module Foo@. Test cases: ImportLookupIllegal -} ImportLookupIllegal :: ImportLookupReason {-| An item in an import list matches multiple names exported from that module. Test cases: None -} ImportLookupAmbiguous :: !RdrName -- ^ The name extracted from the import item -> ![GlobalRdrElt] -- ^ The potential matches -> ImportLookupReason deriving (Generic) -- | Distinguish record fields from other names for pretty-printing. data UnusedImportName where UnusedImportNameRecField :: !Parent -> !OccName -> UnusedImportName UnusedImportNameRegular :: !Name -> UnusedImportName -- | Different types of errors for unused imports. data UnusedImportReason where {-| No names in the import list are used in the module. Test cases: overloadedrecfldsfail06, T10890_2, t22391, t22391j, T1074, prog018, mod177, rn046, rn037, T5211 -} UnusedImportNone :: UnusedImportReason {-| A set of names in the import list are not used in the module. Test cases: overloadedrecfldsfail06, T17324, mod176, T11970A, rn046, T14881, T7454, T8149, T13064 -} UnusedImportSome :: ![UnusedImportName] -- ^ The unsed names -> UnusedImportReason deriving (Generic) -- | Different places in which a nested foralls/contexts error might occur. data NestedForallsContextsIn -- | Nested forall in @SPECIALISE instance@ = NFC_Specialize -- | Nested forall in @deriving via@ (via-type) | NFC_ViaType -- | Nested forall in the type of a GADT constructor | NFC_GadtConSig -- | Nested forall in an instance head | NFC_InstanceHead -- | Nested forall in a standalone deriving instance head | NFC_StandaloneDerivedInstanceHead -- | Nested forall in deriving class type | NFC_DerivedClassType -- | Provenance of an unused name. data UnusedNameProv = UnusedNameTopDecl | UnusedNameImported !ModuleName | UnusedNameTypePattern | UnusedNameMatch | UnusedNameLocalBind -- | Different reasons for TcRnNonCanonicalDefinition. data NonCanonicalDefinition = -- | Related to @(<>)@ and @mappend@. NonCanonicalMonoid NonCanonical_Monoid | -- | Related to @(*>)@/@(>>)@ and @pure@/@return@. NonCanonicalMonad NonCanonical_Monad deriving (Generic) -- | Possible cases for the -Wnoncanonical-monoid-instances. data NonCanonical_Monoid = -- | @(<>) = mappend@ was defined. NonCanonical_Sappend | -- | @mappend@ was defined as something other than @(<>)@. NonCanonical_Mappend -- | Possible cases for the -Wnoncanonical-monad-instances. data NonCanonical_Monad = -- | @pure = return@ was defined. NonCanonical_Pure | -- | @(*>) = (>>)@ was defined. NonCanonical_ThenA | -- | @return@ was defined as something other than @pure@. NonCanonical_Return | -- | @(>>)@ was defined as something other than @(*>)@. NonCanonical_ThenM -- | Why was an instance declaration rejected? data IllegalInstanceReason = IllegalClassInstance !TypedThing -- ^ the instance head type !IllegalClassInstanceReason -- ^ the problem with the instance head | IllegalFamilyInstance !IllegalFamilyInstanceReason | IllegalFamilyApplicationInInstance !Type -- ^ the instance head type !Bool -- ^ is this an invisible argument? !TyCon -- ^ type family ![Type] -- ^ type family argument deriving Generic -- | Why was a class instance declaration rejected? data IllegalClassInstanceReason -- | An illegal type at the head of the instance. -- -- See t'IllegalInstanceHeadReason'. = IllegalInstanceHead !IllegalInstanceHeadReason -- | An illegal HasField instance. See t'IllegalHasFieldInstance'. | IllegalHasFieldInstance !IllegalHasFieldInstance -- | An illegal instance for a built-in typeclass such as -- 'Coercible', 'Typeable', or 'KnownNat', outside of a signature file. -- -- Test cases: deriving/should_fail/T9687 -- deriving/should_fail/T14916 -- polykinds/T8132 -- typecheck/should_fail/TcCoercibleFail2 -- typecheck/should_fail/T12837 -- typecheck/should_fail/T14390 | IllegalSpecialClassInstance !Class !Bool -- ^ Whether the error is due to Safe Haskell being enabled -- | The instance failed the coverage condition, i.e. the functional -- dependencies were not respected. -- -- Example: -- -- class C a b | a -> b where {..} -- instance C a b where {..} -- -- Test cases: T9106, T10570, T2247, T12803, tcfail170. | IllegalInstanceFailsCoverageCondition !Class !CoverageProblem deriving Generic -- | Why was a HasField instance declaration rejected? data IllegalHasFieldInstance -- | HasField instance for a type not headed by a TyCon. -- -- Example: -- -- instance HasField a where {..} -- -- Test case: hasfieldfail03 = IllegalHasFieldInstanceNotATyCon -- | HasField instance for a data family. -- -- Example: -- -- data family D a -- data instance D Int = MkDInt Char -- -- instance HasField "fld" (D Int) where {..} -- -- Test case: hasfieldfail03 | IllegalHasFieldInstanceFamilyTyCon -- | HasField instance for a type that already has that field. -- -- Example -- -- data T = MkT { quux :: Int } -- instance HasField "quux" T Int where {..} -- -- Test case: hasfieldfail03 | IllegalHasFieldInstanceTyConHasField !TyCon !FieldLabelString -- | HasField instance for a type that already has fields, when the -- field label could potentially unify with those fields. -- -- Example: -- -- data T = MkInt { quux :: Int } -- instance forall (fld :: Symbol). HasField fld T Int where {..} -- -- Test case: hasfieldfail03 | IllegalHasFieldInstanceTyConHasFields !TyCon !Type -- ^ the label type in the instance head deriving Generic -- | Description of an instance coverage condition failure. data CoverageProblem = CoverageProblem { not_covered_fundep :: ([TyVar], [TyVar]) , not_covered_fundep_inst :: ([Type], [Type]) , not_covered_invis_vis_tvs :: Pair VarSet , not_covered_liberal :: FailedCoverageCondition } -- | Which instance coverage condition failed? Was it the liberal -- coverage condition? data FailedCoverageCondition -- | Failed the instance coverage condition (ICC) = FailedICC { alsoFailedLICC :: !Bool -- ^ Whether the instance also failed the LICC } -- | Failed the liberal instance coverage condition (LICC) | FailedLICC -------------------------------------------------------------------------------- -- Template Haskell errors data THError -- | A syntax error with Template Haskel quotes & splices. -- See t'THSyntaxError'. = THSyntaxError !THSyntaxError -- | An error in Template Haskell involving 'Name's. -- See t'THNameError'. | THNameError !THNameError -- | An error in Template Haskell reification. See t'THReifyError'. | THReifyError !THReifyError -- | An error due to typing restrictions in Typed Template Haskell. -- See t'TypedTHError'. | TypedTHError !TypedTHError -- | An error occurred when trying to run a splice in Template Haskell. -- See 'SpliceFailReason'. | THSpliceFailed !SpliceFailReason -- | An error involving the 'addTopDecls' functionality. See t'AddTopDeclsError'. | AddTopDeclsError !AddTopDeclsError {-| IllegalStaticFormInSplice is an error when a user attempts to define a static pointer in a Template Haskell splice. Example(s): Test cases: th/TH_StaticPointers02 -} | IllegalStaticFormInSplice !(HsExpr GhcPs) {-| FailedToLookupThInstName is a Template Haskell error that occurrs when looking up an instance fails. Example(s): Test cases: showIface/should_fail/THPutDocNonExistent -} | FailedToLookupThInstName !TH.Type !LookupTHInstNameErrReason {-| AddInvalidCorePlugin is a Template Haskell error indicating that a Core plugin being added has an invalid module due to being in the current package. Example(s): Test cases: -} | AddInvalidCorePlugin !String -- ^ Module name {-| AddDocToNonLocalDefn is a Template Haskell error for documentation being added to a definition which is not in the current module. Example(s): Test cases: showIface/should_fail/THPutDocExternal -} | AddDocToNonLocalDefn !TH.DocLoc {-| ReportCustomQuasiError is an error or warning thrown using 'qReport' from the 'Quasi' instance of 'TcM'. Example(s): Test cases: -} | ReportCustomQuasiError !Bool -- ^ True => Error, False => Warning !String -- ^ Error body deriving Generic -- | An error involving Template Haskell quotes or splices, e.g. nested -- quotation brackets or the use of an untyped bracket inside a typed splice. data THSyntaxError = {-| IllegalTHQuotes is an error that occurs when a Template Haskell quote is used without the TemplateHaskell extension enabled. Test case: T18251e -} IllegalTHQuotes !(HsExpr GhcPs) {-| IllegalTHSplice is an error that occurs when a Template Haskell splice occurs without having enabled the TemplateHaskell extension. Test cases: bkpfail01, bkpfail05, bkpfail09, bkpfail16, bkpfail35, bkpcabal06 -} | IllegalTHSplice {-| NestedTHBrackets is an error that occurs when Template Haskell brackets are nested without any intervening splices. Example: foo = [| [| 'x' |] |] Test cases: TH_NestedSplicesFail{5,6,7,8} -} | NestedTHBrackets {-| MismatchedSpliceType is an error that happens when a typed bracket or splice is used inside a typed splice/bracket, or the other way around. Examples: f1 = [| $$x |] f2 = [|| $y ||] f3 = $$( [| 'x' |] ) f4 = $( [|| 'y' ||] ) Test cases: TH_NestedSplicesFail{1,2,3,4} -} | MismatchedSpliceType SpliceType -- ^ type of the splice SpliceOrBracket -- ^ what's nested inside {-| BadImplicitSplice is an error thrown when a user uses top-level implicit TH-splice without enabling the TemplateHaskell extension. Example: pure [] -- on top-level Test cases: ghci/prog019/prog019 ghci/scripts/T1914 ghci/scripts/T6106 rename/should_fail/T4042 rename/should_fail/T12146 -} | BadImplicitSplice deriving Generic data THNameError {-| NonExactName is a Template Haskell error that occurs when the user attempts to define a binder with a 'RdrName' that is not an exact 'Name'. Example(s): Test cases: -} = NonExactName !RdrName {-| QuotedNameWrongStage is an error that can happen when a (non-top-level) Name is used at a different Template Haskell stage than the stage at which it is bound. Test cases: T16976z -} | QuotedNameWrongStage !(HsQuote GhcPs) deriving Generic data THReifyError = {-| CannotReifyInstance is a Template Haskell error for when an instance being reified via `reifyInstances` is not a class constraint or type family application. Example(s): Test cases: -} CannotReifyInstance !Type {-| CannotReifyOutOfScopeThing is a Template Haskell error indicating that the given name is not in scope and therefore cannot be reified. Example(s): Test cases: th/T16976f -} | CannotReifyOutOfScopeThing !TH.Name {-| CannotReifyThingNotInTypeEnv is a Template Haskell error occurring when the given name is not in the type environment and therefore cannot be reified. Example(s): Test cases: -} | CannotReifyThingNotInTypeEnv !Name {-| NoRolesAssociatedWithName is a Template Haskell error for when the user tries to reify the roles of a given name but it is not something that has roles associated with it. Example(s): Test cases: -} | NoRolesAssociatedWithThing !TcTyThing {-| CannotRepresentThing is a Template Haskell error indicating that a type cannot be reified because it does not have a representation in Template Haskell. Example(s): Test cases: -} | CannotRepresentType !UnrepresentableTypeDescr !Type deriving Generic data AddTopDeclsError = {-| InvalidTopDecl is a Template Haskell error occurring when one of the 'Dec's passed to 'addTopDecls' is not a function, value, annotation, or foreign import declaration. Example(s): Test cases: -} InvalidTopDecl !(HsDecl GhcPs) {-| UnexpectedDeclarationSplice is an error that occurs when a Template Haskell splice appears inside top-level declarations added with 'addTopDecls'. Example(s): none Test cases: none -} | AddTopDeclsUnexpectedDeclarationSplice | AddTopDeclsRunSpliceFailure !RunSpliceFailReason deriving Generic data TypedTHError = {-| SplicePolymorphicLocalVar is the error that occurs when the expression inside typed Template Haskell brackets is a polymorphic local variable. Example(s): x = \(y :: forall a. a -> a) -> [|| y ||] Test cases: quotes/T10384 -} SplicePolymorphicLocalVar !Id {-| TypedTHWithPolyType is an error that signifies the illegal use of a polytype in a typed Template Haskell expression. Example(s): bad :: (forall a. a -> a) -> () bad = $$( [|| \_ -> () ||] ) Test cases: th/T11452 -} | TypedTHWithPolyType !TcType deriving Generic data SpliceFailReason = {-| SpliceThrewException is an error that occurs when running a Template Haskell splice throws an exception. Example(s): Test cases: annotations/should_fail/annfail12 perf/compiler/MultiLayerModulesTH_Make perf/compiler/MultiLayerModulesTH_OneShot th/T10796b th/T19470 th/T19709d th/T5358 th/T5976 th/T7276a th/T8987 th/TH_exn1 th/TH_exn2 th/TH_runIO -} SpliceThrewException !SplicePhase !SomeException !String -- ^ Result of showing the exception (cannot be done safely outside IO) !(LHsExpr GhcTc) !Bool -- True <=> Print the expression {-| RunSpliceFailure is an error indicating that a Template Haskell splice failed to be converted into a valid expression. Example(s): Test cases: th/T10828a th/T10828b th/T12478_4 th/T15270A th/T15270B th/T16895a th/T16895b th/T16895c th/T16895d th/T16895e th/T18740d th/T2597b th/T2674 th/T3395 th/T7484 th/T7667a th/TH_implicitParamsErr1 th/TH_implicitParamsErr2 th/TH_implicitParamsErr3 th/TH_invalid_add_top_decl -} | RunSpliceFailure !RunSpliceFailReason deriving Generic data RunSpliceFailReason = ConversionFail !ThingBeingConverted !ConversionFailReason deriving Generic -- | Identifies the TH splice attempting to be converted data ThingBeingConverted = ConvDec !TH.Dec | ConvExp !TH.Exp | ConvPat !TH.Pat | ConvType !TH.Type -- | The reason a TH splice could not be converted to a Haskell expression data ConversionFailReason = IllegalOccName !OccName.NameSpace !String | SumAltArityExceeded !TH.SumAlt !TH.SumArity | IllegalSumAlt !TH.SumAlt | IllegalSumArity !TH.SumArity | MalformedType !TypeOrKind !TH.Type | IllegalLastStatement !HsDoFlavour !(LStmt GhcPs (LHsExpr GhcPs)) | KindSigsOnlyAllowedOnGADTs | IllegalDeclaration !THDeclDescriptor !IllegalDecls | CannotMixGADTConsWith98Cons | EmptyStmtListInDoBlock | NonVarInInfixExpr | MultiWayIfWithoutAlts | CasesExprWithoutAlts | ImplicitParamsWithOtherBinds | InvalidCCallImpent !String -- ^ Source | RecGadtNoCons | GadtNoCons | InvalidTypeInstanceHeader !TH.Type | InvalidTyFamInstLHS !TH.Type | InvalidImplicitParamBinding | DefaultDataInstDecl ![LDataFamInstDecl GhcPs] | FunBindLacksEquations !TH.Name deriving Generic data IllegalDecls = IllegalDecls !(NE.NonEmpty (LHsDecl GhcPs)) | IllegalFamDecls !(NE.NonEmpty (LFamilyDecl GhcPs)) -- | Label for a TH declaration data THDeclDescriptor = InstanceDecl | WhereClause | LetBinding | LetExpression | ClssDecl -- | The phase in which an exception was encountered when dealing with a TH splice data SplicePhase = SplicePhase_Run | SplicePhase_CompileAndLink data LookupTHInstNameErrReason = NoMatchesFound | CouldNotDetermineInstance data UnrepresentableTypeDescr = LinearInvisibleArgument | CoercionsInTypes -- FFI error types data IllegalForeignTypeReason = TypeCannotBeMarshaled !Type TypeCannotBeMarshaledReason | ForeignDynNotPtr !Type -- ^ Expected type !Type -- ^ Actual type | SafeHaskellMustBeInIO | IOResultExpected | UnexpectedNestedForall | LinearTypesNotAllowed | OneArgExpected | AtLeastOneArgExpected deriving Generic -- | Reason why a type cannot be marshalled through the FFI. data TypeCannotBeMarshaledReason = NotADataType | NewtypeDataConNotInScope !TyCon ![Type] | UnliftedFFITypesNeeded | NotABoxedMarshalableTyCon | ForeignLabelNotAPtr | NotSimpleUnliftedType | NotBoxedKindAny deriving Generic data TcRnNoDerivStratSpecifiedInfo where {-| 'TcRnNoDerivStratSpecified TcRnNoDerivingClauseStrategySpecified' is a warning implied by -Wmissing-deriving-strategies and triggered by a deriving clause without a specified deriving strategy. Example: newtype T = T Int deriving (Eq, Ord, Show) Here we would suggest fixing the deriving clause to: deriving stock (Show) deriving newtype (Eq, Ord) Test cases: deriving/should_compile/T15798a deriving/should_compile/T15798c deriving/should_compile/T24955a deriving/should_compile/T24955b -} TcRnNoDerivingClauseStrategySpecified :: Map AssumedDerivingStrategy [LHsSigType GhcRn] -> TcRnNoDerivStratSpecifiedInfo {-| 'TcRnNoDerivStratSpecified TcRnNoStandaloneDerivingStrategySpecified' is a warning implied by -Wmissing-deriving-strategies and triggered by a standalone deriving declaration without a specified deriving strategy. Example: data T a = T a deriving instance Show a => Show (T a) Here we would suggest fixing the instance to: deriving stock instance Show a => Show (T a) Test cases: deriving/should_compile/T15798b deriving/should_compile/T24955c -} TcRnNoStandaloneDerivingStrategySpecified :: AssumedDerivingStrategy -> LHsSigWcType GhcRn -- ^ The instance signature (e.g @Show a => Show (T a)@) -> TcRnNoDerivStratSpecifiedInfo -- | Label for syntax that may occur in terms (expressions) only as part of a -- required type argument. data TypeSyntax = TypeKeywordSyntax -- ^ @type t@ | ContextArrowSyntax -- ^ @ctx => t@ | FunctionArrowSyntax -- ^ @t1 -> t2@ | ForallTelescopeSyntax -- ^ @forall tvs. t@ deriving Generic typeSyntaxExtension :: TypeSyntax -> LangExt.Extension typeSyntaxExtension TypeKeywordSyntax = LangExt.ExplicitNamespaces typeSyntaxExtension ContextArrowSyntax = LangExt.RequiredTypeArguments typeSyntaxExtension FunctionArrowSyntax = LangExt.RequiredTypeArguments typeSyntaxExtension ForallTelescopeSyntax = LangExt.RequiredTypeArguments ghc-lib-parser-9.12.2.20250421/compiler/GHC/Tc/Errors/Types/0000755000000000000000000000000007346545000020634 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Tc/Errors/Types/PromotionErr.hs0000644000000000000000000001066507346545000023637 0ustar0000000000000000{-# LANGUAGE DeriveGeneric #-} module GHC.Tc.Errors.Types.PromotionErr ( PromotionErr(..) , pprPECategory , peCategory , TermLevelUseErr(..) , teCategory ) where import GHC.Prelude import GHC.Core.Type (ThetaType) import GHC.Utils.Outputable import GHC.Utils.Misc import GHC.Generics (Generic) data PromotionErr = TyConPE -- TyCon used in a kind before we are ready -- data T :: T -> * where ... | ClassPE -- Ditto Class | FamDataConPE -- Data constructor for a data family -- See Note [AFamDataCon: not promoting data family constructors] -- in GHC.Tc.Utils.Env. | ConstrainedDataConPE ThetaType -- Data constructor with a context -- See Note [No constraints in kinds] in GHC.Tc.Validity | PatSynPE -- Pattern synonyms -- See Note [Don't promote pattern synonyms] in GHC.Tc.Utils.Env | RecDataConPE -- Data constructor in a recursive loop -- See Note [Recursion and promoting data constructors] in GHC.Tc.TyCl | TermVariablePE -- See Note [Demotion of unqualified variables] in GHC.Rename.Env | TypeVariablePE -- See Note [Type variable scoping errors during typechecking] deriving (Generic) instance Outputable PromotionErr where ppr ClassPE = text "ClassPE" ppr TyConPE = text "TyConPE" ppr PatSynPE = text "PatSynPE" ppr FamDataConPE = text "FamDataConPE" ppr (ConstrainedDataConPE theta) = text "ConstrainedDataConPE" <+> parens (ppr theta) ppr RecDataConPE = text "RecDataConPE" ppr TermVariablePE = text "TermVariablePE" ppr TypeVariablePE = text "TypeVariablePE" pprPECategory :: PromotionErr -> SDoc pprPECategory = text . capitalise . peCategory peCategory :: PromotionErr -> String peCategory ClassPE = "class" peCategory TyConPE = "type constructor" peCategory PatSynPE = "pattern synonym" peCategory FamDataConPE = "data constructor" peCategory ConstrainedDataConPE{} = "data constructor" peCategory RecDataConPE = "data constructor" peCategory TermVariablePE = "term variable" peCategory TypeVariablePE = "type variable" -- The opposite of a promotion error (a demotion error, in a sense). data TermLevelUseErr = TyConTE -- Type constructor used at the term level, e.g. x = Int | ClassTE -- Class used at the term level, e.g. x = Functor | TyVarTE -- Type variable used at the term level, e.g. f (Proxy :: Proxy a) = a deriving (Generic) teCategory :: TermLevelUseErr -> String teCategory ClassTE = "class" teCategory TyConTE = "type constructor" teCategory TyVarTE = "type variable" {- Note [Type variable scoping errors during typechecking] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider the scoping of the type variable `a` in the following term-level example: -- f :: [forall b . Either b ()] f = [Right @a @() () :: forall a. Either a ()] Here `@a` in the type application and `a` in the type signature refer to the same type variable. Indeed, this term elaborates to the following Core: f = [(\@a -> Right @a @() ()) :: forall a . Either a ()] But how does this work with types? Suppose we have: type F = '[Right @a @() () :: forall a. Either a ()] To be consistent with the term-level language, we would have to elaborate this using a big lambda: type F = '[(/\ a . Right @a @() ()) :: forall a. Either a ()] Core has no such construct, so this is not a valid type. Conclusion: Even with -XExtendedForAllScope, the forall'd variables of a kind signature on a type cannot scope over the type. In implementation terms, to get a helpful error message we do this: * The renamer treats the type variable as bound by the forall (so it doesn't just say "out of scope"); see the `HsKindSig` case of GHC.Rename.HsType.rnHsTyKi. * The typechecker adds the forall-bound type variables to the type environent, but bound to `APromotionErr TypeVariablePE`; see the call to `tcAddKindSigPlaceholders` in the `HsKindSig` case of `GHC.Tc.Gen.HsType.tc_infer_hs_type`. * The occurrence site of a type variable then complains when it finds `APromotionErr`; see `GHC.Tc.Gen.HsType.tcTyVar`. -} ghc-lib-parser-9.12.2.20250421/compiler/GHC/Tc/Solver/0000755000000000000000000000000007346545000017526 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Tc/Solver/InertSet.hs0000644000000000000000000026524507346545000021635 0ustar0000000000000000{-# LANGUAGE DerivingStrategies #-} {-# LANGUAGE RankNTypes #-} {-# LANGUAGE TypeApplications #-} {-# OPTIONS_GHC -Wno-incomplete-uni-patterns #-} module GHC.Tc.Solver.InertSet ( -- * The work list WorkList(..), isEmptyWorkList, emptyWorkList, extendWorkListNonEq, extendWorkListCt, extendWorkListCts, extendWorkListCtList, extendWorkListEq, extendWorkListEqs, appendWorkList, extendWorkListImplic, workListSize, selectWorkItem, -- * The inert set InertSet(..), InertCans(..), emptyInert, noMatchableGivenDicts, noGivenNewtypeReprEqs, updGivenEqs, mightEqualLater, prohibitedSuperClassSolve, -- * Inert equalities InertEqs, foldTyEqs, delEq, findEq, partitionInertEqs, partitionFunEqs, foldFunEqs, addEqToCans, -- * Inert Dicts updDicts, delDict, addDict, filterDicts, partitionDicts, addSolvedDict, -- * Inert Irreds InertIrreds, delIrred, addIrreds, addIrred, foldIrreds, findMatchingIrreds, updIrreds, addIrredToCans, -- * Kick-out KickOutSpec(..), kickOutRewritableLHS, -- * Cycle breaker vars CycleBreakerVarStack, pushCycleBreakerVarStack, addCycleBreakerBindings, forAllCycleBreakerBindings_, -- * Solving one from another InteractResult(..), solveOneFromTheOther ) where import GHC.Prelude import GHC.Tc.Types.Constraint import GHC.Tc.Types.Origin import GHC.Tc.Types.CtLoc( CtLoc, ctLocOrigin, ctLocSpan, ctLocLevel ) import GHC.Tc.Solver.Types import GHC.Tc.Utils.TcType import GHC.Types.Var import GHC.Types.Var.Env import GHC.Types.Var.Set import GHC.Types.Basic( SwapFlag(..) ) import GHC.Core.Reduction import GHC.Core.Predicate import GHC.Core.TyCo.FVs import qualified GHC.Core.TyCo.Rep as Rep import GHC.Core.Class( Class ) import GHC.Core.TyCon import GHC.Core.Class( classTyCon ) import GHC.Core.Unify import GHC.Utils.Misc ( partitionWith ) import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Data.Maybe import GHC.Data.Bag import Data.List.NonEmpty ( NonEmpty(..), (<|) ) import qualified Data.List.NonEmpty as NE import Data.Function ( on ) import Control.Monad ( forM_ ) {- ************************************************************************ * * * Worklists * * Canonical and non-canonical constraints that the simplifier has to * * work on. Including their simplification depths. * * * * * ************************************************************************ Note [WorkList priorities] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ A WorkList contains canonical and non-canonical items (of all flavours). Notice that each Ct now has a simplification depth. We may consider using this depth for prioritization as well in the future. As a simple form of priority queue, our worklist separates out * equalities (wl_eqs); see Note [Prioritise equalities] * all the rest (wl_rest) Note [Prioritise equalities] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ It's very important to process equalities over class constraints: * (Efficiency) The general reason to do so is that if we process a class constraint first, we may end up putting it into the inert set and then kicking it out later. That's extra work compared to just doing the equality first. * (Avoiding fundep iteration) As #14723 showed, it's possible to get non-termination if we - Emit the fundep equalities for a class constraint, generating some fresh unification variables. - That leads to some unification - Which kicks out the class constraint - Which isn't solved (because there are still some more equalities in the work-list), but generates yet more fundeps Solution: prioritise equalities over class constraints * (Class equalities) We need to prioritise equalities even if they are hidden inside a class constraint; see Note [Prioritise class equalities] * (Kick-out) We want to apply this priority scheme to kicked-out constraints too (see the call to extendWorkListCt in kick_out_rewritable) E.g. a CIrredCan can be a hetero-kinded (t1 ~ t2), which may become homo-kinded when kicked out, and hence we want to prioritise it. Further refinements: * Among the equalities we prioritise ones with an empty rewriter set; see Note [Wanteds rewrite Wanteds] in GHC.Tc.Types.Constraint, wrinkle (W1). * Among equalities with an empty rewriter set, we prioritise nominal equalities. * They have more rewriting power, so doing them first is better. * Prioritising them ameliorates the incompleteness of newtype solving: see (Ex2) in Note [Decomposing newtype equalities] in GHC.Tc.Solver.Equality. Note [Prioritise class equalities] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We prioritise equalities in the solver (see selectWorkItem). But class constraints like (a ~ b) and (a ~~ b) are actually equalities too; see Note [The equality types story] in GHC.Builtin.Types.Prim. Failing to prioritise these is inefficient (more kick-outs etc). But, worse, it can prevent us spotting a "recursive knot" among Wanted constraints. See comment:10 of #12734 for a worked-out example. So we arrange to put these particular class constraints in the wl_eqs. NB: since we do not currently apply the substitution to the inert_solved_dicts, the knot-tying still seems a bit fragile. But this makes it better. Note [Residual implications] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The wl_implics in the WorkList are the residual implication constraints that are generated while solving or canonicalising the current worklist. Specifically, when canonicalising (forall a. t1 ~ forall a. t2) from which we get the implication (forall a. t1 ~ t2) See GHC.Tc.Solver.Monad.deferTcSForAllEq -} -- See Note [WorkList priorities] data WorkList = WL { wl_eqs_N :: [Ct] -- /Nominal/ equalities (s ~#N t), (s ~ t), (s ~~ t) -- with empty rewriter set , wl_eqs_X :: [Ct] -- CEqCan, CDictCan, CIrredCan -- with empty rewriter set -- All other equalities: contains both equality constraints and -- their class-level variants (a~b) and (a~~b); -- See Note [Prioritise equalities] -- See Note [Prioritise class equalities] , wl_rw_eqs :: [Ct] -- Like wl_eqs, but ones that have a non-empty -- rewriter set; or, more precisely, did when -- added to the WorkList -- We prioritise wl_eqs over wl_rw_eqs; -- see Note [Prioritise Wanteds with empty RewriterSet] -- in GHC.Tc.Types.Constraint for more details. , wl_rest :: [Ct] , wl_implics :: Bag Implication -- See Note [Residual implications] } appendWorkList :: WorkList -> WorkList -> WorkList appendWorkList (WL { wl_eqs_N = eqs1_N, wl_eqs_X = eqs1_X, wl_rw_eqs = rw_eqs1 , wl_rest = rest1, wl_implics = implics1 }) (WL { wl_eqs_N = eqs2_N, wl_eqs_X = eqs2_X, wl_rw_eqs = rw_eqs2 , wl_rest = rest2, wl_implics = implics2 }) = WL { wl_eqs_N = eqs1_N ++ eqs2_N , wl_eqs_X = eqs1_X ++ eqs2_X , wl_rw_eqs = rw_eqs1 ++ rw_eqs2 , wl_rest = rest1 ++ rest2 , wl_implics = implics1 `unionBags` implics2 } workListSize :: WorkList -> Int workListSize (WL { wl_eqs_N = eqs_N, wl_eqs_X = eqs_X, wl_rw_eqs = rw_eqs, wl_rest = rest }) = length eqs_N + length eqs_X + length rw_eqs + length rest extendWorkListEq :: RewriterSet -> Ct -> WorkList -> WorkList extendWorkListEq rewriters ct wl@(WL { wl_eqs_N = eqs_N, wl_eqs_X = eqs_X, wl_rw_eqs = rw_eqs }) | isEmptyRewriterSet rewriters -- A wanted that has not been rewritten -- isEmptyRewriterSet: see Note [Prioritise Wanteds with empty RewriterSet] -- in GHC.Tc.Types.Constraint = if isNomEqPred (ctPred ct) then wl { wl_eqs_N = ct : eqs_N } else wl { wl_eqs_X = ct : eqs_X } | otherwise = wl { wl_rw_eqs = ct : rw_eqs } extendWorkListEqs :: RewriterSet -> Bag Ct -> WorkList -> WorkList -- Add [eq1,...,eqn] to the work-list -- They all have the same rewriter set -- The constraints will be solved in left-to-right order: -- see Note [Work-list ordering] in GHC.Tc.Solver.Equality -- -- Precondition: new_eqs is non-empty extendWorkListEqs rewriters new_eqs wl@(WL { wl_eqs_N = eqs_N, wl_eqs_X = eqs_X, wl_rw_eqs = rw_eqs }) | isEmptyRewriterSet rewriters -- isEmptyRewriterSet: see Note [Prioritise Wanteds with empty RewriterSet] -- in GHC.Tc.Types.Constraint = case partitionBag is_nominal new_eqs of (new_eqs_N, new_eqs_X) | isEmptyBag new_eqs_N -> wl { wl_eqs_X = new_eqs_X `push_on_front` eqs_X } | isEmptyBag new_eqs_X -> wl { wl_eqs_N = new_eqs_N `push_on_front` eqs_N } | otherwise -> wl { wl_eqs_N = new_eqs_N `push_on_front` eqs_N , wl_eqs_X = new_eqs_X `push_on_front` eqs_X } -- These isEmptyBag tests are just trying -- to avoid creating unnecessary thunks | otherwise = wl { wl_rw_eqs = new_eqs `push_on_front` rw_eqs } where push_on_front :: Bag Ct -> [Ct] -> [Ct] -- push_on_front puts the new equlities on the front of the queue push_on_front new_eqs eqs = foldr (:) eqs new_eqs is_nominal ct = isNomEqPred (ctPred ct) extendWorkListNonEq :: Ct -> WorkList -> WorkList -- Extension by non equality extendWorkListNonEq ct wl = wl { wl_rest = ct : wl_rest wl } extendWorkListImplic :: Implication -> WorkList -> WorkList extendWorkListImplic implic wl = wl { wl_implics = implic `consBag` wl_implics wl } extendWorkListCt :: Ct -> WorkList -> WorkList -- Agnostic about what kind of constraint extendWorkListCt ct wl = case classifyPredType (ctEvPred ev) of EqPred {} -> extendWorkListEq rewriters ct wl ClassPred cls _ -- See Note [Prioritise class equalities] | isEqualityClass cls -> extendWorkListEq rewriters ct wl _ -> extendWorkListNonEq ct wl where ev = ctEvidence ct rewriters = ctEvRewriters ev extendWorkListCtList :: [Ct] -> WorkList -> WorkList extendWorkListCtList cts wl = foldr extendWorkListCt wl cts extendWorkListCts :: Cts -> WorkList -> WorkList extendWorkListCts cts wl = foldr extendWorkListCt wl cts isEmptyWorkList :: WorkList -> Bool isEmptyWorkList (WL { wl_eqs_N = eqs_N, wl_eqs_X = eqs_X , wl_rest = rest, wl_implics = implics }) = null eqs_N && null eqs_X && null rest && isEmptyBag implics emptyWorkList :: WorkList emptyWorkList = WL { wl_eqs_N = [], wl_eqs_X = [] , wl_rw_eqs = [], wl_rest = [], wl_implics = emptyBag } selectWorkItem :: WorkList -> Maybe (Ct, WorkList) -- See Note [Prioritise equalities] selectWorkItem wl@(WL { wl_eqs_N = eqs_N, wl_eqs_X = eqs_X , wl_rw_eqs = rw_eqs, wl_rest = rest }) | ct:cts <- eqs_N = Just (ct, wl { wl_eqs_N = cts }) | ct:cts <- eqs_X = Just (ct, wl { wl_eqs_X = cts }) | ct:cts <- rw_eqs = Just (ct, wl { wl_rw_eqs = cts }) | ct:cts <- rest = Just (ct, wl { wl_rest = cts }) | otherwise = Nothing -- Pretty printing instance Outputable WorkList where ppr (WL { wl_eqs_N = eqs_N, wl_eqs_X = eqs_X, wl_rw_eqs = rw_eqs , wl_rest = rest, wl_implics = implics }) = text "WL" <+> (braces $ vcat [ ppUnless (null eqs_N) $ text "Eqs_N =" <+> vcat (map ppr eqs_N) , ppUnless (null eqs_X) $ text "Eqs_X =" <+> vcat (map ppr eqs_X) , ppUnless (null rw_eqs) $ text "RwEqs =" <+> vcat (map ppr rw_eqs) , ppUnless (null rest) $ text "Non-eqs =" <+> vcat (map ppr rest) , ppUnless (isEmptyBag implics) $ ifPprDebug (text "Implics =" <+> vcat (map ppr (bagToList implics))) (text "(Implics omitted)") ]) {- ********************************************************************* * * InertSet: the inert set * * * * ********************************************************************* -} type CycleBreakerVarStack = NonEmpty (Bag (TcTyVar, TcType)) -- ^ a stack of (CycleBreakerTv, original family applications) lists -- first element in the stack corresponds to current implication; -- later elements correspond to outer implications -- used to undo the cycle-breaking needed to handle -- Note [Type equality cycles] in GHC.Tc.Solver.Equality -- Why store the outer implications? For the use in mightEqualLater (only) -- -- Why NonEmpty? So there is always a top element to add to data InertSet = IS { inert_cans :: InertCans -- Canonical Given, Wanted -- Sometimes called "the inert set" , inert_cycle_breakers :: CycleBreakerVarStack , inert_famapp_cache :: FunEqMap Reduction -- Just a hash-cons cache for use when reducing family applications -- only -- -- If F tys :-> (co, rhs, flav), -- then co :: F tys ~N rhs -- all evidence is from instances or Givens; no coercion holes here -- (We have no way of "kicking out" from the cache, so putting -- wanteds here means we can end up solving a Wanted with itself. Bad) , inert_solved_dicts :: DictMap DictCt -- All Wanteds, of form (C t1 .. tn) -- Always a dictionary solved by an instance decl; never an implict parameter -- See Note [Solved dictionaries] -- and Note [Do not add superclasses of solved dictionaries] } instance Outputable InertSet where ppr (IS { inert_cans = ics , inert_solved_dicts = solved_dicts }) = vcat [ ppr ics , ppUnless (null dicts) $ text "Solved dicts =" <+> vcat (map ppr dicts) ] where dicts = bagToList (dictsToBag solved_dicts) emptyInertCans :: InertCans emptyInertCans = IC { inert_eqs = emptyTyEqs , inert_funeqs = emptyFunEqs , inert_given_eq_lvl = topTcLevel , inert_given_eqs = False , inert_dicts = emptyDictMap , inert_safehask = emptyDictMap , inert_insts = [] , inert_irreds = emptyBag } emptyInert :: InertSet emptyInert = IS { inert_cans = emptyInertCans , inert_cycle_breakers = emptyBag :| [] , inert_famapp_cache = emptyFunEqs , inert_solved_dicts = emptyDictMap } {- Note [Solved dictionaries] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When we apply a top-level instance declaration, we add the "solved" dictionary to the inert_solved_dicts. In general, we use it to avoid creating a new EvVar when we have a new goal that we have solved in the past. But in particular, we can use it to create *recursive* dictionaries. The simplest, degenerate case is instance C [a] => C [a] where ... If we have [W] d1 :: C [x] then we can apply the instance to get d1 = $dfCList d [W] d2 :: C [x] Now 'd1' goes in inert_solved_dicts, and we can solve d2 directly from d1. d1 = $dfCList d d2 = d1 See Note [Example of recursive dictionaries] VERY IMPORTANT INVARIANT: (Solved Dictionary Invariant) Every member of the inert_solved_dicts is the result of applying an instance declaration that "takes a step" An instance "takes a step" if it has the form dfunDList d1 d2 = MkD (...) (...) (...) That is, the dfun is lazy in its arguments, and guarantees to immediately return a dictionary constructor. NB: all dictionary data constructors are lazy in their arguments. This property is crucial to ensure that all dictionaries are non-bottom, which in turn ensures that the whole "recursive dictionary" idea works at all, even if we get something like rec { d = dfunDList d dx } See Note [Recursive superclasses] in GHC.Tc.TyCl.Instance. Reason: - All instances, except two exceptions listed below, "take a step" in the above sense - Exception 1: local quantified constraints have no such guarantee; indeed, adding a "solved dictionary" when applying a quantified constraint led to the ability to define unsafeCoerce in #17267. - Exception 2: the magic built-in instance for (~) has no such guarantee. It behaves as if we had class (a ~# b) => (a ~ b) where {} instance (a ~# b) => (a ~ b) where {} The "dfun" for the instance is strict in the coercion. Anyway there's no point in recording a "solved dict" for (t1 ~ t2); it's not going to allow a recursive dictionary to be constructed. Ditto (~~) and Coercible. THEREFORE we only add a "solved dictionary" - when applying an instance declaration - subject to Exceptions 1 and 2 above In implementation terms - GHC.Tc.Solver.Monad.addSolvedDict adds a new solved dictionary, conditional on the kind of instance - It is only called when applying an instance decl, in GHC.Tc.Solver.Dict.tryInstances - ClsInst.InstanceWhat says what kind of instance was used to solve the constraint. In particular * LocalInstance identifies quantified constraints * BuiltinEqInstance identifies the strange built-in instances for equality. - ClsInst.instanceReturnsDictCon says which kind of instance guarantees to return a dictionary constructor Other notes about solved dictionaries * See also Note [Do not add superclasses of solved dictionaries] * The inert_solved_dicts field is not rewritten by equalities, so it may get out of date. * The inert_solved_dicts are all Wanteds, never givens * We only cache dictionaries from top-level instances, not from local quantified constraints. Reason: if we cached the latter we'd need to purge the cache when bringing new quantified constraints into scope, because quantified constraints "shadow" top-level instances. Note [Do not add superclasses of solved dictionaries] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Every member of inert_solved_dicts is the result of applying a dictionary function, NOT of applying superclass selection to anything. Consider class Ord a => C a where instance Ord [a] => C [a] where ... Suppose we are trying to solve [G] d1 : Ord a [W] d2 : C [a] Then we'll use the instance decl to give [G] d1 : Ord a Solved: d2 : C [a] = $dfCList d3 [W] d3 : Ord [a] We must not add d4 : Ord [a] to the 'solved' set (by taking the superclass of d2), otherwise we'll use it to solve d3, without ever using d1, which would be a catastrophe. Solution: when extending the solved dictionaries, do not add superclasses. That's why each element of the inert_solved_dicts is the result of applying a dictionary function. Note [Example of recursive dictionaries] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --- Example 1 data D r = ZeroD | SuccD (r (D r)); instance (Eq (r (D r))) => Eq (D r) where ZeroD == ZeroD = True (SuccD a) == (SuccD b) = a == b _ == _ = False; equalDC :: D [] -> D [] -> Bool; equalDC = (==); We need to prove (Eq (D [])). Here's how we go: [W] d1 : Eq (D []) By instance decl of Eq (D r): [W] d2 : Eq [D []] where d1 = dfEqD d2 By instance decl of Eq [a]: [W] d3 : Eq (D []) where d2 = dfEqList d3 d1 = dfEqD d2 Now this wanted can interact with our "solved" d1 to get: d3 = d1 -- Example 2: This code arises in the context of "Scrap Your Boilerplate with Class" class Sat a class Data ctx a instance Sat (ctx Char) => Data ctx Char -- dfunData1 instance (Sat (ctx [a]), Data ctx a) => Data ctx [a] -- dfunData2 class Data Maybe a => Foo a instance Foo t => Sat (Maybe t) -- dfunSat instance Data Maybe a => Foo a -- dfunFoo1 instance Foo a => Foo [a] -- dfunFoo2 instance Foo [Char] -- dfunFoo3 Consider generating the superclasses of the instance declaration instance Foo a => Foo [a] So our problem is this [G] d0 : Foo t [W] d1 : Data Maybe [t] -- Desired superclass We may add the given in the inert set, along with its superclasses Inert: [G] d0 : Foo t [G] d01 : Data Maybe t -- Superclass of d0 WorkList [W] d1 : Data Maybe [t] Solve d1 using instance dfunData2; d1 := dfunData2 d2 d3 Inert: [G] d0 : Foo t [G] d01 : Data Maybe t -- Superclass of d0 Solved: d1 : Data Maybe [t] WorkList: [W] d2 : Sat (Maybe [t]) [W] d3 : Data Maybe t Now, we may simplify d2 using dfunSat; d2 := dfunSat d4 Inert: [G] d0 : Foo t [G] d01 : Data Maybe t -- Superclass of d0 Solved: d1 : Data Maybe [t] d2 : Sat (Maybe [t]) WorkList: [W] d3 : Data Maybe t [W] d4 : Foo [t] Now, we can just solve d3 from d01; d3 := d01 Inert [G] d0 : Foo t [G] d01 : Data Maybe t -- Superclass of d0 Solved: d1 : Data Maybe [t] d2 : Sat (Maybe [t]) WorkList [W] d4 : Foo [t] Now, solve d4 using dfunFoo2; d4 := dfunFoo2 d5 Inert [G] d0 : Foo t [G] d01 : Data Maybe t -- Superclass of d0 Solved: d1 : Data Maybe [t] d2 : Sat (Maybe [t]) d4 : Foo [t] WorkList: [W] d5 : Foo t Now, d5 can be solved! d5 := d0 Result d1 := dfunData2 d2 d3 d2 := dfunSat d4 d3 := d01 d4 := dfunFoo2 d5 d5 := d0 -} {- ********************************************************************* * * InertCans: the canonical inerts * * * * ********************************************************************* -} {- Note [Tracking Given equalities] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For reasons described in (UNTOUCHABLE) in GHC.Tc.Utils.Unify Note [Unification preconditions], we can't unify alpha[2] ~ Int under a level-4 implication if there are any Given equalities bound by the implications at level 3 of 4. To that end, the InertCans tracks inert_given_eq_lvl :: TcLevel -- The TcLevel of the innermost implication that has a Given -- equality of the sort that make a unification variable untouchable -- (see Note [Unification preconditions] in GHC.Tc.Utils.Unify). We update inert_given_eq_lvl whenever we add a Given to the inert set, in updGivenEqs. Then a unification variable alpha[n] is untouchable iff n < inert_given_eq_lvl that is, if the unification variable was born outside an enclosing Given equality. Exactly which constraints should trigger (UNTOUCHABLE), and hence should update inert_given_eq_lvl? (TGE1) We do /not/ need to worry about let-bound skolems, such as forall[2] a. a ~ [b] => blah See Note [Let-bound skolems] and the isOuterTyVar tests in `updGivenEqs` (TGE2) However, solely to support better error messages (see Note [HasGivenEqs] in GHC.Tc.Types.Constraint) we also track these "local" equalities in the boolean inert_given_eqs field. This field is used only subsequntly (see `getHasGivenEqs`), to set the ic_given_eqs field to LocalGivenEqs. (TGE3) Consider an implication forall[2]. beta[1] => alpha[1] ~ Int where beta is a unification variable that has already been unified to () in an outer scope. Then alpha[1] is perfectly touchable and we can unify alpha := Int. So when deciding whether the givens contain an equality, we should canonicalise first, rather than just looking at the /original/ givens (#8644). (TGE4) However, we must take account of *potential* equalities. Consider the same example again, but this time we have /not/ yet unified beta: forall[2] beta[1] => ...blah... Because beta might turn into an equality, updGivenEqs conservatively treats it as a potential equality, and updates inert_give_eq_lvl (TGE5) We should not look at the equality relation involved (nominal vs representational), because representational equalities can still imply nominal ones. For example, if (G a ~R G b) and G's argument's role is nominal, then we can deduce a ~N b. Historical note: prior to #24938 we also ignored Given equalities that did not mention an "outer" type variable. But that is wrong, as #24938 showed. Another example is immortalised in test LocalGivenEqs2 data T where MkT :: F a ~ G b => a -> b -> T f (MkT _ _) = True We should not infer the type for `f`; let-bound-skolems does not apply. Note [Let-bound skolems] ~~~~~~~~~~~~~~~~~~~~~~~~ If * the inert set contains a canonical Given CEqCan (a ~ ty) and * 'a' is a skolem bound in this very implication, then: a) The Given is pretty much a let-binding, like f :: (a ~ b->c) => a -> a Here the equality constraint is like saying let a = b->c in ... It is not adding any new, local equality information, and hence can be ignored by has_given_eqs b) 'a' will have been completely substituted out in the inert set, so we can safely discard it. For an example, see #9211. The actual test is in `isLetBoundSkolemCt` Wrinkles: (LBS1) See GHC.Tc.Utils.Unify Note [Deeper level on the left] for how we ensure that the correct variable is on the left of the equality when both are tyvars. (LBS2) We also want this to work for forall a. [G] F b ~ a (CEqCt with TyFamLHS) Here the Given will have a TyFamLHS, with the skolem-bound tyvar on the RHS. See tests T24938a, and LocalGivenEqs. (LBS3) Happily (LBS2) also makes cycle-breakers work. Suppose we have forall a. [G] (F a) Int ~ a where F has arity 1, and `a` is the locally-bound skolem. Then, as Note [Type equality cycles] explains, we split into [G] F a ~ cbv, [G] cbv Int ~ a where `cbv` is the cycle breaker variable. But cbv has the same level as `a`, so `isOuterTyVar` (called in `isLetBoundSkolemCt`) will return False. This actually matters occasionally: see test LocalGivenEqs. You might wonder whether the skolem really needs to be bound "in the very same implication" as the equality constraint. Consider this (c.f. #15009): data S a where MkS :: (a ~ Int) => S a g :: forall a. S a -> a -> blah g x y = let h = \z. ( z :: Int , case x of MkS -> [y,z]) in ... From the type signature for `g`, we get `y::a` . Then when we encounter the `\z`, we'll assign `z :: alpha[1]`, say. Next, from the body of the lambda we'll get [W] alpha[1] ~ Int -- From z::Int [W] forall[2]. (a ~ Int) => [W] alpha[1] ~ a -- From [y,z] Now, unify alpha := a. Now we are stuck with an unsolved alpha~Int! So we must treat alpha as untouchable under the forall[2] implication. Possible future improvements. The current test just looks to see whether one side of an equality is a locally-bound skolem. But actually we could, in theory, do better: if one side (or both sides, actually) of an equality ineluctably mentions a local skolem, then the equality cannot possibly impact types outside of the implication (because doing to would cause those types to be ill-scoped). The problem is the "ineluctably": this means that no expansion, other solving, etc., could possibly get rid of the variable. This is hard, perhaps impossible, to know for sure, especially when we think about type family interactions. (And it's a user-visible property so we don't want it to be hard to predict.) So we keep the existing check, looking for one lone variable, because we're sure that variable isn't going anywhere. Note [Detailed InertCans Invariants] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The InertCans represents a collection of constraints with the following properties: * All canonical * No two dictionaries with the same head * No two CIrreds with the same type * Family equations inert wrt top-level family axioms * Dictionaries have no matching top-level instance * Given family or dictionary constraints don't mention touchable unification variables * Non-CEqCan constraints are fully rewritten with respect to the CEqCan equalities (modulo eqCanRewrite of course; eg a wanted cannot rewrite a given) * CEqCan equalities: see Note [inert_eqs: the inert equalities] Also see documentation in Constraint.Ct for a list of invariants Note [inert_eqs: the inert equalities] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Our main invariant: the EqCts in inert_eqs should be a terminating generalised substitution -------------- Definition [Can-rewrite relation] -------------- A "can-rewrite" relation between flavours, written f1 >= f2, is a binary relation with the following properties (R1) >= is transitive (R2) If f1 >= f, and f2 >= f, then either f1 >= f2 or f2 >= f1 (See Note [Why R2?].) Lemma (L0). If f1 >= f then f1 >= f1 Proof. By property (R2), with f1=f2 --------- Definition [Generalised substitution] --------------- A "generalised substitution" S is a set of triples (lhs -f-> t), where - lhs is a type variable or an exactly-saturated type family application (that is, lhs is a CanEqLHS) - t is a type - f is a flavour such that (WF1) if (lhs1 -f1-> t1) in S (lhs2 -f2-> t2) in S then (f1 >= f2) implies that lhs1 does not appear within lhs2 (WF2) if (lhs -f-> t) is in S, then t /= lhs (WF3) No LHS in S is rewritable in an RHS in S, in the argument of a type family application (F ty1..tyn) where F heads a LHS in S --------- Definition [Applying a generalised substitution] ---------- If S is a generalised substitution S(f,lhs) = rhs, if (lhs -fs-> rhs) in S, and fs >= f S(f,T t1..tn) = T S(f1,t1)..S(fn,tn) S(f,t1 t2) = S(f,t1) S(f_N,t2) S(f,t) = t Here f1..fn are obtained from f and T using the roles of T, and f_N is the nominal version of f. See Note [Flavours with roles]. Notation: repeated application. S^0(f,t) = t S^(n+1)(f,t) = S(f, S^n(t)) S*(f,t) is the result of applying S until you reach a fixpoint --------- Definition [Terminating generalised substitution] --------- A generalised substitution S is *terminating* iff (IG1) for every f,t, there is an n such that S^n(f,t) = S^(n+1)(f,t) By (IG1) we define S*(f,t) to be the result of exahaustively applying S(f,_) to t. --------- End of definitions ------------------------------------ Rationale for (WF1)-(WF3) ------------------------- * (WF1) guarantees that S is well-defined /as a function/; see Theorem (S is a function) Theorem (S is a function): S(f,t0) is well defined as a function. Proof: Suppose (lhs -f1-> t1) and (lhs -f2-> t2) are both in S, and f1 >= f and f2 >= f Then by (R2) f1 >= f2 or f2 >= f1, which contradicts (WF1) Note: this argument isn't quite right. WF1 ensures that lhs1 does not appear inside lhs2, and that guarantees confluence. But I can't quite see how to make that argument precise. * (WF2) is a bit trivial. It means that if S is terminating, so that S^(n+1)(f,t) = S^n(f,t), then there is no LHS of S in S^n(f,t). We never get a silly infinite sequence a -> a -> a -> a .... which is technically a fixed point but would still go on for ever. * (WF3) is need for the termination proof. Note that termination is not the same as idempotence. To apply S to a type, you may have to apply it recursively. But termination does guarantee that this recursive use will terminate. Note [Why R2?] ~~~~~~~~~~~~~~ R2 states that, if we have f1 >= f and f2 >= f, then either f1 >= f2 or f2 >= f1. If we do not have R2, we will easily fall into a loop. To see why, imagine we have f1 >= f, f2 >= f, and that's it. Then, let our inert set S = {a -f1-> b, b -f2-> a}. Computing S(f,a) does not terminate. And yet, we have a hard time noticing an occurs-check problem when building S, as the two equalities cannot rewrite one another. R2 actually restricts our ability to accept user-written programs. See Note [Avoiding rewriting cycles] in GHC.Tc.Types.Constraint for an example. Note [Rewritable] ~~~~~~~~~~~~~~~~~ Definition. A CanEqLHS lhs is *rewritable* in a type t if the lhs tree appears as a subtree within t without traversing any of the following components of t: * coercions (whether they appear in casts CastTy or as arguments CoercionTy) * kinds of variable occurrences The check for rewritability *does* look in kinds of the bound variable of a ForAllTy. The reason for this definition is that the rewriter does not rewrite in coercions or variables' kinds. In turn, the rewriter does not need to rewrite there because those places are never used for controlling the behaviour of the solver: these places are not used in matching instances or in decomposing equalities. This definition is used by the anyRewritableXXX family of functions and is meant to model the actual behaviour in GHC.Tc.Solver.Rewrite. Goal: If lhs is not rewritable in t, then t is a fixpoint of the generalised substitution containing only {lhs -f*-> t'}, where f* is a flavour such that f* >= f for all f. Wrinkles * Taking roles into account: we must consider a rewrite at a given role. That is, a rewrite arises from some equality, and that equality has a role associated with it. As we traverse a type, we track what role we are allowed to rewrite with. For example, suppose we have an inert [G] b ~R# Int. Then b is rewritable in Maybe b but not in F b, where F is a type function. This role-aware logic is present in both the anyRewritableXXX functions and in the rewriter. See also Note [anyRewritableTyVar must be role-aware] in GHC.Tc.Utils.TcType. * There is one exception to the claim that non-rewritable parts of the tree do not affect the solver: we sometimes do an occurs-check to decide e.g. how to orient an equality. (See the comments on GHC.Tc.Solver.Equality.canEqTyVarFunEq.) Accordingly, the presence of a variable in a kind or coercion just might influence the solver. Here is an example: type family Const x y where Const x y = x AxConst :: forall x y. Const x y ~# x alpha :: Const Type Nat [W] alpha ~ Int |> (sym (AxConst Type alpha) ;; AxConst Type alpha ;; sym (AxConst Type Nat)) The cast is clearly ludicrous (it ties together a cast and its symmetric version), but we can't quite rule it out. (See (EQ1) from Note [Respecting definitional equality] in GHC.Core.TyCo.Rep to see why we need the Const Type Nat bit.) And yet this cast will (quite rightly) prevent alpha from unifying with the RHS. I (Richard E) don't have an example of where this problem can arise from a Haskell program, but we don't have an air-tight argument for why the definition of *rewritable* given here is correct. Note [Extending the inert equalities] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Main Theorem [Stability under extension] GIVEN a "work item" [lhs_w -fw-> rhs_w] and a terminating generalised substitution S, SUCH THAT (T1) S(fw,lhs_w) = lhs_w -- LHS of work-item is a fixpoint of S(fw,_) (T2) S(fw,rhs_w) = rhs_w -- RHS of work-item is a fixpoint of S(fw,_) (T3) lhs_w not in rhs_w -- No occurs check in the work item -- If lhs is a type family application, we require only that -- lhs is not *rewritable* in rhs_w. See Note [Rewritable] and -- Note [EqCt occurs check] in GHC.Tc.Types.Constraint. (T4) no [lhs_s -fs-> rhs_s] in S meets [The KickOut Criteria] (i.e. we already kicked any such items out!) THEN the extended substitution T = S+(lhs_w -fw-> rhs_w) is a terminating generalised substitution How do we establish these conditions? * (T1) and (T2) are guaranteed by exhaustively rewriting the work-item with S(fw,_). * (T3) is guaranteed by an occurs-check on the work item. This is done during canonicalisation, in checkTypeEq; invariant (TyEq:OC) of CEqCan. See also Note [EqCt occurs check] in GHC.Tc.Types.Constraint. * (T4) is established by GHC.Tc.Solver.Monad.kickOutRewritable. If the inert set contains a triple that meets the KickOut Criteria, we kick it out and add it to the work list for later re-examination. See Note [The KickOut Criteria] Theorem: T (defined in "THEN" above) is a generalised substitution; that is, it satisfies (WF1)-(WF3) Proof: (WF1) Suppose we are adding [lhs_w -fw-> rhs_w], and [lhs_s -fs-> rhs_s] is in S. Then: - by (T1) if fs>=fw, lhs_s does not occur within lhs_w. - by (KK1) if fw>=fs, lhs_w is not rewritable in lhs_s, or we'd have kicked out the stable constraint. (WF2) is directly guaranteed by (T3) (WF3) No lhs_s in S is rewritable in rhs_w at all, because of (T2) And (KK2) guarantees that lhs_w is not rewritable under a type family in rhs_s Note [The KickOut Criteria] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Kicking out is a Bad Thing: * It means we have to re-process a constraint. The less we kick out, the better. * In the limit, kicking can lead to non-termination: imagine that we /always/ kick out the entire inert set! * Because (mid 2024) we don't support sharing in constraints, excessive kicking out can lead to exponentially big constraints (#24984). So we seek to do as little kicking out as possible. For example, consider this, which happens a lot: Inert: g1: a ~ Maybe b Work: g2: b ~ Int We do /not/ kick out g1 when adding g2. The new substitution S' = {g1,g2} is still /terminating/ but it is not /idmempotent/. To apply S' to, say, (Tree a), we may need to apply it twice: Tree a --> Tree (Maybe b) --> Tree (Maybe Int) Here are the KickOut Criteria: When adding [lhs_w -fw-> rhs_w] to a well-formed terminating substitution S, element [lhs_s -fs-> rhs_s] in S meets the KickOut Criteria if: (KK0) fw >= fs AND any of (KK1), (KK2) or (KK3) hold * (KK1: satisfy WF1) `lhs_w` is rewritable in `lhs_s`. * (KK2: termination) `lhs_w` is rewritable in `rhs_s` in these positions: If not(fs>=fw) then (KK2a) anywhere else (KK2b) look only in the argument of type family applications, whose type family heads some LHS in `S` * (KK3: completeness) If not(fs >= fw) -- If fs can rewrite fw, kick-out is redundant/harmful * (KK3a) If the role of `fs` is Nominal: kick out if `rhs_s = lhs_w` * (KK3b) If the role of `fs` is Representational: kick out if `rhs_s` is of form `(lhs_w t1 .. tn)` Rationale * (KK0) kick out only if `fw` can rewrite `fs`. Reason: suppose we kick out (lhs1 -fs-> s), and add (lhs -fw-> t) to the ineart set. The latter can't rewrite the former, so the kick-out achieved nothing * (KK1) `lhs_w` is rewritable in `lhs_s`. Reason: needed to guarantee (WF1). See Theorem: T is well formed * (KK2) see Note [KK2: termination of the extended substitution] * (KK3) see Note [KK3: completeness of solving] The above story is a bit vague wrt roles, but the code is not. See Note [Flavours with roles] Note [KK2: termination of the extended substitution] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Proving termination of the extended substitution T is surprisingly tricky. * Reason for (KK2a). Consider Work: [G] b ~ a Inert: [W] a ~ b If we don't kick out the inert, then we get a loop on e.g. [W] a ~ Int. But if both were Wanted we really should not kick out (the substitution does not have to be idempotent). So we only look everywhere for the `lhs_w` if not (fs>=fw), that is the inert item cannot rewrite the work item. So in the above example we will kick out; but if both were Wanted we won't. * Reason for (KK2b). Consider the case where (fs >= fw) Work: [G] a ~ Int Inert: [G] F Int ~ F a If we just added the work item, the substitution would loop on type (F Int). So we must kick out the inert item, even though (fs>=fw). (KK2b) does this by looking for lhs_w under type family applications in rhs_s. (KK2b) makes kick-out less aggressive by looking only under type-family applications, in the case where (fs >= fw), and that made a /huge/ difference to #24944. Tricky examples in: #19042, #17672, #24984. The last (#24984) is particular subtle: Inert: [W] g1: F a0 ~ F a1 [W] g2: F a2 ~ F a1 [W] g3: F a3 ~ F a1 Now we add [W] g4: F a1 ~ F a7. Should we kick out g1,g2,g3? No! The substitution doesn't need to be idempotent, merely terminating. And in #24984 it turned out that we kept adding one new constraint and kicking out all the previous inert ones (and that rewriting led to exponentially big constraints due to lack of contraint sharing.) So we only want to look /under/ type family applications. The proof is hard. We start by ignoring flavours. Suppose that: * We are adding [lhs_w -fw-> rhs_w] to a well-formed, terminating substitution S. * None of the constraints in S meet the KickOut Criteria. * Define T = S+[lhs_w -fw-> rhs_w] * `f` is an arbitrary flavour Lemma 1: for any lhs_s in S, T*(f,lhs_s) terminates. Proof. * We know that r1 = S*(f,lhs_s) terminates. * Moreover, we know there are no occurrences of lhs_w under a type family (which is the head of a LHS) in r1 (KK2)+(WF3). We need (WF3) because you might wonder what if rhs_s is (F a), and [a --> lhs_w] was in S. But (WF3) prevents that. * Define r2 = r1{rhs_w/lhs_w}. We know that rhs_w has no occurrences of any lhs in S, nor of lhs_w. * Since any occurrence of lhs_w does not occur under a type family, the substitution won't make any F t1..tn ~ s in S match. * So r2 is a fixed point of T. Lemma 2: T*(f,lhs_w) teminates. Proof: no occurrences of any LHS in rhs_w. Theorem. For any type r, T*(r) terminates. Proof: 1. Consider any sub-term of r, which is a LHS of T. - Rewrite it with T*; this terminates (Lemma 1). - Do this simultaneously to all sub-terms that match a LHS of T, yielding r1. 2. Could this new r1 have a sub-term that is an LHS of T? Yes, but only if r has a sub-term F w, and w rewrote in Step 1 to w' and F w' matches a LHS in T. 3. Very well: apply step 1 again, but note that /doing so consumes one of the family applications in the original r/. 4. After Step 1 either we have reached a fixed point, or we repeat Step 1 consuming at least one family application of r. 5. There are only a finite number of family applications in r, so this process terminates. Example: Inert set: gs : F Int ~ b Work item: gw : b ~ Int F (F (F b)) --[gw]--> F (F (F Int)) --[gs]--> F (F b) --[gw]--> F (F Int) --[gs]--> F b --[gw]--> F Int --[gs]--> b --[gw]--> Int Notice that each iteration of Step 1 strips off one of the layers of F, all of which were in the original r. The argument is even more tricky when flavours are involved, and we have not fleshed it out in detail. Note [KK3: completeness of solving] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ (KK3) is not necessary for the extended substitution to be terminating. In fact (KK0) could be made stronger by saying ... then (not (fw >= fs) or not (fs >= fs)) But it's not enough for S to be /terminating/; we also want /completeness/. That is, we want to be able to solve all soluble wanted equalities. Suppose we have work-item b -G-> a inert-item a -W-> b Assuming (G >= W) but not (W >= W), this fulfills all the conditions, so we could extend the inerts, thus: inert-items b -G-> a a -W-> b But if we kicked-out the inert item, we'd get work-item a -W-> b inert-item b -G-> a Then rewrite the work-item gives us (a -W-> a), which is soluble via Refl. So we add one more clause (KK3) to the kick-out criteria: * (KK3: completeness) If not(fs >= fw) (KK3a) * (KK3b) If the role of `fs` is Nominal: kick out if `rhs_s = lhs_w` * (KK3c) If the role of `fs` is Representational: kick out if `rhs_s` is of form `(lhs_w t1 .. tn)` Wrinkles: * (KK3a) All this can only happen if the work-item can rewrite the inert one, /but not vice versa/; that is not(fs >= fw). It is useless to kick out if (fs >= fw) becuase then the work-item is already fully rewritten by the inert item. And too much kick-out is positively harmful. (Historical example #14363.) * (KK3b) addresses teh main example above for KK3. Another way to understand (KK3b) is that we treat an inert item a -f-> b in the same way as b -f-> a So if we kick out one, we should kick out the other. The orientation is somewhat accidental. * (KK3c) When considering roles, we also need the second clause (KK3b). Consider work-item c -G/N-> a inert-item a -W/R-> b c The work-item doesn't get rewritten by the inert, because (>=) doesn't hold. But we don't kick out the inert item because not (W/R >= W/R). So we just add the work item. But then, consider if we hit the following: work-item b -G/N-> Id inert-items a -W/R-> b c c -G/N-> a where newtype Id x = Id x For similar reasons, if we only had (KK3a), we wouldn't kick the representational inert out. And then, we'd miss solving the inert, which now reduced to reflexivity. The solution here is to kick out representational inerts whenever the lhs of a work item is "exposed", where exposed means being at the head of the top-level application chain (lhs t1 .. tn). See head_is_new_lhs. This is encoded in (KK3c)). Note [Flavours with roles] ~~~~~~~~~~~~~~~~~~~~~~~~~~ The system described in Note [inert_eqs: the inert equalities] discusses an abstract set of flavours. In GHC, flavours have two components: the flavour proper, taken from {Wanted, Given} and the equality relation (often called role), taken from {NomEq, ReprEq}. When substituting w.r.t. the inert set, as described in Note [inert_eqs: the inert equalities], we must be careful to respect all components of a flavour. For example, if we have inert set: a -G/R-> Int b -G/R-> Bool type role T nominal representational and we wish to compute S(W/R, T a b), the correct answer is T a Bool, NOT T Int Bool. The reason is that T's first parameter has a nominal role, and thus rewriting a to Int in T a b is wrong. Indeed, this non-congruence of substitution means that the proof in Note [inert_eqs: the inert equalities] may need to be revisited, but we don't think that the end conclusion is wrong. -} data InertCans -- See Note [Detailed InertCans Invariants] for more = IC { inert_eqs :: InertEqs -- See Note [inert_eqs: the inert equalities] -- All EqCt with a TyVarLHS; index is the LHS tyvar -- Domain = skolems and untouchables; a touchable would be unified , inert_funeqs :: InertFunEqs -- All EqCt with a TyFamLHS; index is the whole family head type. -- LHS is fully rewritten (modulo eqCanRewrite constraints) -- wrt inert_eqs -- Can include both [G] and [W] , inert_dicts :: DictMap DictCt -- Dictionaries only -- All fully rewritten (modulo flavour constraints) -- wrt inert_eqs , inert_insts :: [QCInst] , inert_safehask :: DictMap DictCt -- Failed dictionary resolution due to Safe Haskell overlapping -- instances restriction. We keep this separate from inert_dicts -- as it doesn't cause compilation failure, just safe inference -- failure. -- -- ^ See Note [Safe Haskell Overlapping Instances Implementation] -- in GHC.Tc.Solver , inert_irreds :: InertIrreds -- Irreducible predicates that cannot be made canonical, -- and which don't interact with others (e.g. (c a)) -- and insoluble predicates (e.g. Int ~ Bool, or a ~ [a]) , inert_given_eq_lvl :: TcLevel -- The TcLevel of the innermost implication that has a Given -- equality of the sort that make a unification variable untouchable -- (see Note [Unification preconditions] in GHC.Tc.Utils.Unify). -- See Note [Tracking Given equalities] , inert_given_eqs :: Bool -- True <=> The inert Givens *at this level* (tcl_tclvl) -- could includes at least one equality /other than/ a -- let-bound skolem equality. -- Reason: report these givens when reporting a failed equality -- See Note [Tracking Given equalities] } type InertEqs = DTyVarEnv EqualCtList type InertFunEqs = FunEqMap EqualCtList type InertIrreds = Bag IrredCt instance Outputable InertCans where ppr (IC { inert_eqs = eqs , inert_funeqs = funeqs , inert_dicts = dicts , inert_safehask = safehask , inert_irreds = irreds , inert_given_eq_lvl = ge_lvl , inert_given_eqs = given_eqs , inert_insts = insts }) = braces $ vcat [ ppUnless (isEmptyDVarEnv eqs) $ text "Equalities =" <+> pprBag (foldTyEqs consBag eqs emptyBag) , ppUnless (isEmptyTcAppMap funeqs) $ text "Type-function equalities =" <+> pprBag (foldFunEqs consBag funeqs emptyBag) , ppUnless (isEmptyTcAppMap dicts) $ text "Dictionaries =" <+> pprBag (dictsToBag dicts) , ppUnless (isEmptyTcAppMap safehask) $ text "Safe Haskell unsafe overlap =" <+> pprBag (dictsToBag safehask) , ppUnless (isEmptyBag irreds) $ text "Irreds =" <+> pprBag irreds , ppUnless (null insts) $ text "Given instances =" <+> vcat (map ppr insts) , text "Innermost given equalities =" <+> ppr ge_lvl , text "Given eqs at this level =" <+> ppr given_eqs ] {- ********************************************************************* * * Inert equalities * * ********************************************************************* -} emptyTyEqs :: InertEqs emptyTyEqs = emptyDVarEnv addEqToCans :: TcLevel -> EqCt -> InertCans -> InertCans addEqToCans tc_lvl eq_ct@(EqCt { eq_lhs = lhs }) ics@(IC { inert_funeqs = funeqs, inert_eqs = eqs }) = updGivenEqs tc_lvl (CEqCan eq_ct) $ case lhs of TyFamLHS tc tys -> ics { inert_funeqs = addCanFunEq funeqs tc tys eq_ct } TyVarLHS tv -> ics { inert_eqs = addTyEq eqs tv eq_ct } addTyEq :: InertEqs -> TcTyVar -> EqCt -> InertEqs addTyEq old_eqs tv ct = extendDVarEnv_C add_eq old_eqs tv [ct] where add_eq old_eqs _ = addToEqualCtList ct old_eqs foldTyEqs :: (EqCt -> b -> b) -> InertEqs -> b -> b foldTyEqs k eqs z = foldDVarEnv (\cts z -> foldr k z cts) z eqs findTyEqs :: InertCans -> TyVar -> [EqCt] findTyEqs icans tv = concat @Maybe (lookupDVarEnv (inert_eqs icans) tv) delEq :: EqCt -> InertCans -> InertCans delEq (EqCt { eq_lhs = lhs, eq_rhs = rhs }) ic = case lhs of TyVarLHS tv -> ic { inert_eqs = alterDVarEnv upd (inert_eqs ic) tv } TyFamLHS tf args -> ic { inert_funeqs = alterTcApp (inert_funeqs ic) tf args upd } where isThisOne :: EqCt -> Bool isThisOne (EqCt { eq_rhs = t1 }) = tcEqTypeNoKindCheck rhs t1 upd :: Maybe EqualCtList -> Maybe EqualCtList upd (Just eq_ct_list) = filterEqualCtList (not . isThisOne) eq_ct_list upd Nothing = Nothing findEq :: InertCans -> CanEqLHS -> [EqCt] findEq icans (TyVarLHS tv) = findTyEqs icans tv findEq icans (TyFamLHS fun_tc fun_args) = concat @Maybe (findFunEq (inert_funeqs icans) fun_tc fun_args) {-# INLINE partition_eqs_container #-} partition_eqs_container :: forall container . container -- empty container -> (forall b. (EqCt -> b -> b) -> container -> b -> b) -- folder -> (EqCt -> container -> container) -- extender -> (EqCt -> Bool) -> container -> ([EqCt], container) partition_eqs_container empty_container fold_container extend_container pred orig_inerts = fold_container folder orig_inerts ([], empty_container) where folder :: EqCt -> ([EqCt], container) -> ([EqCt], container) folder eq_ct (acc_true, acc_false) | pred eq_ct = (eq_ct : acc_true, acc_false) | otherwise = (acc_true, extend_container eq_ct acc_false) partitionInertEqs :: (EqCt -> Bool) -- EqCt will always have a TyVarLHS -> InertEqs -> ([EqCt], InertEqs) partitionInertEqs = partition_eqs_container emptyTyEqs foldTyEqs addInertEqs addInertEqs :: EqCt -> InertEqs -> InertEqs -- Precondition: CanEqLHS is a TyVarLHS addInertEqs eq_ct@(EqCt { eq_lhs = TyVarLHS tv }) eqs = addTyEq eqs tv eq_ct addInertEqs other _ = pprPanic "extendInertEqs" (ppr other) ------------------------ addCanFunEq :: InertFunEqs -> TyCon -> [TcType] -> EqCt -> InertFunEqs addCanFunEq old_eqs fun_tc fun_args ct = alterTcApp old_eqs fun_tc fun_args upd where upd (Just old_equal_ct_list) = Just $ addToEqualCtList ct old_equal_ct_list upd Nothing = Just [ct] foldFunEqs :: (EqCt -> b -> b) -> FunEqMap EqualCtList -> b -> b foldFunEqs k fun_eqs z = foldTcAppMap (\eqs z -> foldr k z eqs) fun_eqs z partitionFunEqs :: (EqCt -> Bool) -- EqCt will have a TyFamLHS -> InertFunEqs -> ([EqCt], InertFunEqs) partitionFunEqs = partition_eqs_container emptyFunEqs foldFunEqs addFunEqs addFunEqs :: EqCt -> InertFunEqs -> InertFunEqs -- Precondition: EqCt is a TyFamLHS addFunEqs eq_ct@(EqCt { eq_lhs = TyFamLHS tc args }) fun_eqs = addCanFunEq fun_eqs tc args eq_ct addFunEqs other _ = pprPanic "extendFunEqs" (ppr other) {- ********************************************************************* * * Inert Dicts * * ********************************************************************* -} updDicts :: (DictMap DictCt -> DictMap DictCt) -> InertCans -> InertCans updDicts upd ics = ics { inert_dicts = upd (inert_dicts ics) } delDict :: DictCt -> DictMap a -> DictMap a delDict (DictCt { di_cls = cls, di_tys = tys }) m = delTcApp m (classTyCon cls) tys addDict :: DictCt -> DictMap DictCt -> DictMap DictCt addDict item@(DictCt { di_cls = cls, di_tys = tys }) dm = insertTcApp dm (classTyCon cls) tys item addSolvedDict :: DictCt -> DictMap DictCt -> DictMap DictCt addSolvedDict item@(DictCt { di_cls = cls, di_tys = tys }) dm = insertTcApp dm (classTyCon cls) tys item filterDicts :: (DictCt -> Bool) -> DictMap DictCt -> DictMap DictCt filterDicts f m = filterTcAppMap f m partitionDicts :: (DictCt -> Bool) -> DictMap DictCt -> (Bag DictCt, DictMap DictCt) partitionDicts f m = foldTcAppMap k m (emptyBag, emptyDictMap) where k ct (yeses, noes) | f ct = (ct `consBag` yeses, noes) | otherwise = (yeses, addDict ct noes) {- ********************************************************************* * * Inert Irreds * * ********************************************************************* -} addIrredToCans :: TcLevel -> IrredCt -> InertCans -> InertCans addIrredToCans tc_lvl irred ics = updGivenEqs tc_lvl (CIrredCan irred) $ updIrreds (addIrred irred) ics addIrreds :: [IrredCt] -> InertIrreds -> InertIrreds addIrreds extras irreds | null extras = irreds | otherwise = irreds `unionBags` listToBag extras addIrred :: IrredCt -> InertIrreds -> InertIrreds addIrred extra irreds = irreds `snocBag` extra updIrreds :: (InertIrreds -> InertIrreds) -> InertCans -> InertCans updIrreds upd ics = ics { inert_irreds = upd (inert_irreds ics) } delIrred :: IrredCt -> InertCans -> InertCans -- Remove a particular (Given) Irred, on the instructions of a plugin -- For some reason this is done vis the evidence Id, not the type -- Compare delEq. I have not idea why delIrred (IrredCt { ir_ev = ev }) ics = updIrreds (filterBag keep) ics where ev_id = ctEvEvId ev keep (IrredCt { ir_ev = ev' }) = ev_id /= ctEvEvId ev' foldIrreds :: (IrredCt -> b -> b) -> InertIrreds -> b -> b foldIrreds k irreds z = foldr k z irreds findMatchingIrreds :: InertIrreds -> CtEvidence -> (Bag (IrredCt, SwapFlag), InertIrreds) findMatchingIrreds irreds ev | EqPred eq_rel1 lty1 rty1 <- classifyPredType pred -- See Note [Solving irreducible equalities] = partitionBagWith (match_eq eq_rel1 lty1 rty1) irreds | otherwise = partitionBagWith match_non_eq irreds where pred = ctEvPred ev match_non_eq irred | irredCtPred irred `tcEqType` pred = Left (irred, NotSwapped) | otherwise = Right irred match_eq eq_rel1 lty1 rty1 irred | EqPred eq_rel2 lty2 rty2 <- classifyPredType (irredCtPred irred) , eq_rel1 == eq_rel2 , Just swap <- match_eq_help lty1 rty1 lty2 rty2 = Left (irred, swap) | otherwise = Right irred match_eq_help lty1 rty1 lty2 rty2 | lty1 `tcEqType` lty2, rty1 `tcEqType` rty2 = Just NotSwapped | lty1 `tcEqType` rty2, rty1 `tcEqType` lty2 = Just IsSwapped | otherwise = Nothing {- Note [Solving irreducible equalities] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider (#14333) [G] a b ~R# c d [W] c d ~R# a b Clearly we should be able to solve this! Even though the constraints are not decomposable. We solve this when looking up the work-item in the irreducible constraints to look for an identical one. When doing this lookup, findMatchingIrreds spots the equality case, and matches either way around. It has to return a swap-flag so we can generate evidence that is the right way round too. -} {- ********************************************************************* * * Adding to and removing from the inert set * * * * ********************************************************************* -} updGivenEqs :: TcLevel -> Ct -> InertCans -> InertCans -- Set the inert_given_eq_level to the current level (tclvl) -- if the constraint is a given equality that should prevent -- filling in an outer unification variable. -- See Note [Tracking Given equalities] -- -- Precondition: Ct is either CEqCan or CIrredCan updGivenEqs tclvl ct inerts | not (isGivenCt ct) = inerts -- See Note [Let-bound skolems] | isLetBoundSkolemCt tclvl ct = inerts { inert_given_eqs = True } -- At this point we are left with a constraint that either -- is an equality (F a ~ ty), or /might/ be, like (c a) | otherwise = inerts { inert_given_eq_lvl = tclvl , inert_given_eqs = True } isLetBoundSkolemCt :: TcLevel -> Ct -> Bool -- See Note [Let-bound skolems] isLetBoundSkolemCt tclvl (CEqCan (EqCt { eq_lhs = lhs, eq_rhs = rhs })) = case lhs of TyVarLHS tv -> not (isOuterTyVar tclvl tv) TyFamLHS {} -> case getTyVar_maybe rhs of Just tv -> not (isOuterTyVar tclvl tv) Nothing -> False isLetBoundSkolemCt _ _ = False data KickOutSpec -- See Note [KickOutSpec] = KOAfterUnify TcTyVarSet -- We have unified these tyvars | KOAfterAdding CanEqLHS -- We are adding to the inert set a canonical equality -- constraint with this LHS instance Outputable KickOutSpec where ppr (KOAfterUnify tvs) = text "KOAfterUnify" <> ppr tvs ppr (KOAfterAdding lhs) = text "KOAfterAdding" <> parens (ppr lhs) {- Note [KickOutSpec] ~~~~~~~~~~~~~~~~~~~~~~ KickOutSpec explains why we are kicking out. Important property: KOAfterAdding (TyVarLHS tv) should behave exactly like KOAfterUnifying (unitVarSet tv) The main reasons for treating the two separately are * More efficient in the single-tyvar case * The code is far more perspicuous -} data WhereToLook = LookEverywhere | LookOnlyUnderFamApps deriving( Eq ) kickOutRewritableLHS :: KickOutSpec -> CtFlavourRole -> InertCans -> (Cts, InertCans) -- See Note [kickOutRewritable] kickOutRewritableLHS ko_spec new_fr@(_, new_role) ics@(IC { inert_eqs = tv_eqs , inert_dicts = dictmap , inert_funeqs = funeqmap , inert_irreds = irreds , inert_insts = old_insts }) = (kicked_out, inert_cans_in) where -- inert_safehask stays unchanged; is that right? inert_cans_in = ics { inert_eqs = tv_eqs_in , inert_dicts = dicts_in , inert_funeqs = feqs_in , inert_irreds = irs_in , inert_insts = insts_in } kicked_out :: Cts kicked_out = (fmap CDictCan dicts_out `andCts` fmap CIrredCan irs_out) `extendCtsList` insts_out `extendCtsList` map CEqCan tv_eqs_out `extendCtsList` map CEqCan feqs_out (tv_eqs_out, tv_eqs_in) = partitionInertEqs kick_out_eq tv_eqs (feqs_out, feqs_in) = partitionFunEqs kick_out_eq funeqmap (dicts_out, dicts_in) = partitionDicts kick_out_dict dictmap (irs_out, irs_in) = partitionBag kick_out_irred irreds -- Kick out even insolubles: See Note [Rewrite insolubles] -- Of course we must kick out irreducibles like (c a), in case -- we can rewrite 'c' to something more useful -- Kick-out for inert instances -- See Note [Quantified constraints] in GHC.Tc.Solver.Solve insts_out :: [Ct] insts_in :: [QCInst] (insts_out, insts_in) | fr_may_rewrite (Given, NomEq) -- All the insts are Givens = partitionWith kick_out_qci old_insts | otherwise = ([], old_insts) kick_out_qci qci | let ev = qci_ev qci , fr_can_rewrite_ty LookEverywhere NomEq (ctEvPred (qci_ev qci)) = Left (mkNonCanonical ev) | otherwise = Right qci fr_tv_can_rewrite_ty :: WhereToLook -> (TyVar -> Bool) -> EqRel -> Type -> Bool fr_tv_can_rewrite_ty where_to_look check_tv role ty = anyRewritableTyVar role can_rewrite ty where can_rewrite :: UnderFam -> EqRel -> TyVar -> Bool can_rewrite is_under_famapp old_role tv = (where_to_look == LookEverywhere || is_under_famapp) && new_role `eqCanRewrite` old_role && check_tv tv fr_tf_can_rewrite_ty :: WhereToLook -> TyCon -> [TcType] -> EqRel -> Type -> Bool fr_tf_can_rewrite_ty where_to_look new_tf new_tf_args role ty = anyRewritableTyFamApp role can_rewrite ty where can_rewrite :: UnderFam -> EqRel -> TyCon -> [TcType] -> Bool can_rewrite is_under_famapp old_role old_tf old_tf_args = (where_to_look == LookEverywhere || is_under_famapp) && new_role `eqCanRewrite` old_role && tcEqTyConApps new_tf new_tf_args old_tf old_tf_args -- it's possible for old_tf_args to have too many. This is fine; -- we'll only check what we need to. fr_can_rewrite_ty :: WhereToLook -> EqRel -> Type -> Bool -- UnderFam = True <=> look only under type-family applications fr_can_rewrite_ty uf = case ko_spec of -- See Note [KickOutSpec] KOAfterUnify tvs -> fr_tv_can_rewrite_ty uf (`elemVarSet` tvs) KOAfterAdding (TyVarLHS tv) -> fr_tv_can_rewrite_ty uf (== tv) KOAfterAdding (TyFamLHS tf tf_args) -> fr_tf_can_rewrite_ty uf tf tf_args fr_may_rewrite :: CtFlavourRole -> Bool fr_may_rewrite fs = new_fr `eqCanRewriteFR` fs -- Can the new item rewrite the inert item? kick_out_dict :: DictCt -> Bool -- Kick it out if the new CEqCan can rewrite the inert one -- See Note [kickOutRewritable] kick_out_dict (DictCt { di_tys = tys, di_ev = ev }) = fr_may_rewrite (ctEvFlavour ev, NomEq) && any (fr_can_rewrite_ty LookEverywhere NomEq) tys kick_out_irred :: IrredCt -> Bool kick_out_irred (IrredCt { ir_ev = ev }) = fr_may_rewrite (ctEvFlavour ev, eq_rel) && fr_can_rewrite_ty LookEverywhere eq_rel pred where pred = ctEvPred ev eq_rel = predTypeEqRel pred -- Implements criteria K1-K3 in Note [Extending the inert equalities] kick_out_eq :: EqCt -> Bool kick_out_eq (EqCt { eq_lhs = lhs, eq_rhs = rhs_ty , eq_ev = ev, eq_eq_rel = eq_rel }) -- (KK0) Keep it in the inert set if the new thing can't rewrite it | not (fr_may_rewrite fs) = False -- Below here (fr_may_rewrite fs) is True -- (KK1) | fr_can_rewrite_ty LookEverywhere eq_rel (canEqLHSType lhs) = True -- (KK1) -- The above check redundantly checks the role & flavour, -- but it's very convenient -- (KK2) | let where_to_look | fs_can_rewrite_fr = LookOnlyUnderFamApps | otherwise = LookEverywhere , fr_can_rewrite_ty where_to_look eq_rel rhs_ty = True -- (KK3) | not fs_can_rewrite_fr -- (KK3a) , case eq_rel of NomEq -> is_new_lhs rhs_ty -- (KK3b) ReprEq -> head_is_new_lhs rhs_ty -- (KK3c) = True | otherwise = False where fs_can_rewrite_fr = fs `eqCanRewriteFR` new_fr fs = (ctEvFlavour ev, eq_rel) is_new_lhs :: Type -> Bool is_new_lhs = case ko_spec of -- See Note [KickOutSpec] KOAfterUnify tvs -> is_tyvar_ty_for tvs KOAfterAdding lhs -> (`eqType` canEqLHSType lhs) is_tyvar_ty_for :: TcTyVarSet -> Type -> Bool -- True if the type is equal to one of the tyvars is_tyvar_ty_for tvs ty = case getTyVar_maybe ty of Nothing -> False Just tv -> tv `elemVarSet` tvs head_is_new_lhs :: Type -> Bool head_is_new_lhs = case ko_spec of -- See Note [KickOutSpec] KOAfterUnify tvs -> tv_at_head (`elemVarSet` tvs) KOAfterAdding (TyVarLHS tv) -> tv_at_head (== tv) KOAfterAdding (TyFamLHS tf tf_args) -> fam_at_head tf tf_args tv_at_head :: (TyVar -> Bool) -> Type -> Bool tv_at_head is_tv = go where go (Rep.TyVarTy tv) = is_tv tv go (Rep.AppTy fun _) = go fun go (Rep.CastTy ty _) = go ty go (Rep.TyConApp {}) = False go (Rep.LitTy {}) = False go (Rep.ForAllTy {}) = False go (Rep.FunTy {}) = False go (Rep.CoercionTy {}) = False fam_at_head :: TyCon -> [Type] -> Type -> Bool fam_at_head fun_tc fun_args = go where go (Rep.TyVarTy {}) = False go (Rep.AppTy {}) = False -- no TyConApp to the left of an AppTy go (Rep.CastTy ty _) = go ty go (Rep.TyConApp tc args) = tcEqTyConApps fun_tc fun_args tc args go (Rep.LitTy {}) = False go (Rep.ForAllTy {}) = False go (Rep.FunTy {}) = False go (Rep.CoercionTy {}) = False {- Note [kickOutRewritable] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ See also Note [inert_eqs: the inert equalities]. When we add a new inert equality (lhs ~N ty) to the inert set, we must kick out any inert items that could be rewritten by the new equality, to maintain the inert-set invariants. - We want to kick out an existing inert constraint if a) the new constraint can rewrite the inert one b) 'lhs' is free in the inert constraint (so that it *will*) rewrite it if we kick it out. For (b) we use anyRewritableCanLHS, which examines the types /and kinds/ that are directly visible in the type. Hence we will have exposed all the rewriting we care about to make the most precise kinds visible for matching classes etc. No need to kick out constraints that mention type variables whose kinds contain this LHS! - We don't kick out constraints from inert_solved_dicts, and inert_solved_funeqs optimistically. But when we lookup we have to take the substitution into account NB: we could in principle avoid kick-out: a) When unifying a meta-tyvar from an outer level, because then the entire implication will be iterated; see Note [The Unification Level Flag] in GHC.Tc.Solver.Monad. b) For Givens, after a unification. By (GivenInv) in GHC.Tc.Utils.TcType Note [TcLevel invariants], a Given can't include a meta-tyvar from its own level, so it falls under (a). Of course, we must still kick out Givens when adding a new non-unification Given. But kicking out more vigorously may lead to earlier unification and fewer iterations, so we don't take advantage of these possibilities. Note [Rewrite insolubles] ~~~~~~~~~~~~~~~~~~~~~~~~~ Suppose we have an insoluble alpha ~ [alpha], which is insoluble because an occurs check. And then we unify alpha := [Int]. Then we really want to rewrite the insoluble to [Int] ~ [[Int]]. Now it can be decomposed. Otherwise we end up with a "Can't match [Int] ~ [[Int]]" which is true, but a bit confusing because the outer type constructors match. Hence: * In the main simplifier loops in GHC.Tc.Solver (solveWanteds, simpl_loop), we feed the insolubles in solveSimpleWanteds, so that they get rewritten (albeit not solved). * We kick insolubles out of the inert set, if they can be rewritten (see GHC.Tc.Solver.Monad.kick_out_rewritable) * We rewrite those insolubles in GHC.Tc.Solver.Equality See Note [Make sure that insolubles are fully rewritten] in GHC.Tc.Solver.Equality -} {- ********************************************************************* * * Queries * * * * ********************************************************************* -} isOuterTyVar :: TcLevel -> TyCoVar -> Bool -- True of a type variable that comes from a -- shallower level than the ambient level (tclvl) isOuterTyVar tclvl tv | isTyVar tv = assertPpr (not (isTouchableMetaTyVar tclvl tv)) (ppr tv <+> ppr tclvl) $ tclvl `strictlyDeeperThan` tcTyVarLevel tv -- ASSERT: we are dealing with Givens here, and invariant (GivenInv) from -- Note Note [TcLevel invariants] in GHC.Tc.Utils.TcType ensures that there can't -- be a touchable meta tyvar. If this wasn't true, you might worry that, -- at level 3, a meta-tv alpha[3] gets unified with skolem b[2], and thereby -- becomes "outer" even though its level numbers says it isn't. | otherwise = False -- Coercion variables; doesn't much matter noGivenNewtypeReprEqs :: TyCon -> InertSet -> Bool -- True <=> there is no Irred looking like (N tys1 ~ N tys2) -- See Note [Decomposing newtype equalities] (EX2) in GHC.Tc.Solver.Equality -- This is the only call site. noGivenNewtypeReprEqs tc inerts = not (anyBag might_help (inert_irreds (inert_cans inerts))) where might_help irred = case classifyPredType (ctEvPred (irredCtEvidence irred)) of EqPred ReprEq t1 t2 | Just (tc1,_) <- tcSplitTyConApp_maybe t1 , tc == tc1 , Just (tc2,_) <- tcSplitTyConApp_maybe t2 , tc == tc2 -> True _ -> False -- | Returns True iff there are no Given constraints that might, -- potentially, match the given class constraint. This is used when checking to see if a -- Given might overlap with an instance. See Note [Instance and Given overlap] -- in GHC.Tc.Solver.Dict noMatchableGivenDicts :: InertSet -> CtLoc -> Class -> [TcType] -> Bool noMatchableGivenDicts inerts@(IS { inert_cans = inert_cans }) loc_w clas tys = not $ anyBag matchable_given $ findDictsByClass (inert_dicts inert_cans) clas where pred_w = mkClassPred clas tys matchable_given :: DictCt -> Bool matchable_given (DictCt { di_ev = ev }) | CtGiven { ctev_loc = loc_g, ctev_pred = pred_g } <- ev = isJust $ mightEqualLater inerts pred_g loc_g pred_w loc_w | otherwise = False mightEqualLater :: InertSet -> TcPredType -> CtLoc -> TcPredType -> CtLoc -> Maybe Subst -- See Note [What might equal later?] -- Used to implement logic in Note [Instance and Given overlap] in GHC.Tc.Solver.Dict mightEqualLater inert_set given_pred given_loc wanted_pred wanted_loc | prohibitedSuperClassSolve given_loc wanted_loc = Nothing | otherwise = case tcUnifyTysFG bind_fun [flattened_given] [flattened_wanted] of Unifiable subst -> Just subst MaybeApart reason subst | MARInfinite <- reason -- see Example 7 in the Note. -> Nothing | otherwise -> Just subst SurelyApart -> Nothing where in_scope = mkInScopeSet $ tyCoVarsOfTypes [given_pred, wanted_pred] -- NB: flatten both at the same time, so that we can share mappings -- from type family applications to variables, and also to guarantee -- that the fresh variables are really fresh between the given and -- the wanted. Flattening both at the same time is needed to get -- Example 10 from the Note. ([flattened_given, flattened_wanted], var_mapping) = flattenTysX in_scope [given_pred, wanted_pred] bind_fun :: BindFun bind_fun tv rhs_ty | isMetaTyVar tv , can_unify tv (metaTyVarInfo tv) rhs_ty -- this checks for CycleBreakerTvs and TyVarTvs; forgetting -- the latter was #19106. = BindMe -- See Examples 4, 5, and 6 from the Note | Just (_fam_tc, fam_args) <- lookupVarEnv var_mapping tv , anyFreeVarsOfTypes mentions_meta_ty_var fam_args = BindMe | otherwise = Apart -- True for TauTv and TyVarTv (and RuntimeUnkTv) meta-tyvars -- (as they can be unified) -- and also for CycleBreakerTvs that mentions meta-tyvars mentions_meta_ty_var :: TyVar -> Bool mentions_meta_ty_var tv | isMetaTyVar tv = case metaTyVarInfo tv of -- See Examples 8 and 9 in the Note CycleBreakerTv -> anyFreeVarsOfType mentions_meta_ty_var (lookupCycleBreakerVar tv inert_set) _ -> True | otherwise = False -- Like checkTopShape, but allows cbv variables to unify can_unify :: TcTyVar -> MetaInfo -> Type -> Bool can_unify _lhs_tv TyVarTv rhs_ty -- see Example 3 from the Note | Just rhs_tv <- getTyVar_maybe rhs_ty = case tcTyVarDetails rhs_tv of MetaTv { mtv_info = TyVarTv } -> True MetaTv {} -> False -- Could unify with anything SkolemTv {} -> True RuntimeUnk -> True | otherwise -- not a var on the RHS = False can_unify lhs_tv _other _rhs_ty = mentions_meta_ty_var lhs_tv -- | Is it (potentially) loopy to use the first @ct1@ to solve @ct2@? -- -- Necessary (but not sufficient) conditions for this function to return @True@: -- -- - @ct1@ and @ct2@ both arise from superclass expansion, -- - @ct1@ is a Given and @ct2@ is a Wanted. -- -- See Note [Solving superclass constraints] in GHC.Tc.TyCl.Instance, (sc2). prohibitedSuperClassSolve :: CtLoc -- ^ is it loopy to use this one ... -> CtLoc -- ^ ... to solve this one? -> Bool -- ^ True ==> don't solve it prohibitedSuperClassSolve given_loc wanted_loc | GivenSCOrigin _ _ blocked <- ctLocOrigin given_loc , blocked , ScOrigin _ NakedSc <- ctLocOrigin wanted_loc = True -- Prohibited if the Wanted is a superclass -- and the Given has come via a superclass selection from -- a predicate bigger than the head | otherwise = False {- Note [What might equal later?] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We must determine whether a Given might later equal a Wanted. We definitely need to account for the possibility that any metavariable might be arbitrarily instantiated. Yet we do *not* want to allow skolems in to be instantiated, as we've already rewritten with respect to any Givens. (We're solving a Wanted here, and so all Givens have already been processed.) This is best understood by example. 1. C alpha ~? C Int That given certainly might match later. 2. C a ~? C Int No. No new givens are going to arise that will get the `a` to rewrite to Int. 3. C alpha[tv] ~? C Int That alpha[tv] is a TyVarTv, unifiable only with other type variables. It cannot equal later. 4. C (F alpha) ~? C Int Sure -- that can equal later, if we learn something useful about alpha. 5. C (F alpha[tv]) ~? C Int This, too, might equal later. Perhaps we have [G] F b ~ Int elsewhere. Or maybe we have C (F alpha[tv] beta[tv]), these unify with each other, and F x x = Int. Remember: returning True doesn't commit ourselves to anything. 6. C (F a) ~? C a No, this won't match later. If we could rewrite (F a) or a, we would have by now. But see also Red Herring below. 7. C (Maybe alpha) ~? C alpha We say this cannot equal later, because it would require alpha := Maybe (Maybe (Maybe ...)). While such a type can be contrived, we choose not to worry about it. See Note [Infinitary substitution in lookup] in GHC.Core.InstEnv. Getting this wrong let to #19107, tested in typecheck/should_compile/T19107. 8. C cbv ~? C Int where cbv = F a The cbv is a cycle-breaker var which stands for F a. See Note [Type equality cycles] in GHC.Tc.Solver.Equality This is just like case 6, and we say "no". Saying "no" here is essential in getting the parser to type-check, with its use of DisambECP. 9. C cbv ~? C Int where cbv = F alpha Here, we might indeed equal later. Distinguishing between this case and Example 8 is why we need the InertSet in mightEqualLater. 10. C (F alpha, Int) ~? C (Bool, F alpha) This cannot equal later, because F a would have to equal both Bool and Int. To deal with type family applications, we use the Core flattener. See Note [Flattening type-family applications when matching instances] in GHC.Core.Unify. The Core flattener replaces all type family applications with fresh variables. The next question: should we allow these fresh variables in the domain of a unifying substitution? A type family application that mentions only skolems (example 6) is settled: any skolems would have been rewritten w.r.t. Givens by now. These type family applications match only themselves. A type family application that mentions metavariables, on the other hand, can match anything. So, if the original type family application contains a metavariable, we use BindMe to tell the unifier to allow it in the substitution. On the other hand, a type family application with only skolems is considered rigid. This treatment fixes #18910 and is tested in typecheck/should_compile/InstanceGivenOverlap{,2} Red Herring ~~~~~~~~~~~ In #21208, we have this scenario: instance forall b. C b [G] C a[sk] [W] C (F a[sk]) What should we do with that wanted? According to the logic above, the Given cannot match later (this is example 6), and so we use the global instance. But wait, you say: What if we learn later (say by a future type instance F a = a) that F a unifies with a? That looks like the Given might really match later! This mechanism described in this Note is *not* about this kind of situation, however. It is all asking whether a Given might match the Wanted *in this run of the solver*. It is *not* about whether a variable might be instantiated so that the Given matches, or whether a type instance introduced in a downstream module might make the Given match. The reason we care about what might match later is only about avoiding order-dependence. That is, we don't want to commit to a course of action that depends on seeing constraints in a certain order. But an instantiation of a variable and a later type instance don't introduce order dependency in this way, and so mightMatchLater is right to ignore these possibilities. Here is an example, with no type families, that is perhaps clearer: instance forall b. C (Maybe b) [G] C (Maybe Int) [W] C (Maybe a) What to do? We *might* say that the Given could match later and should thus block us from using the global instance. But we don't do this. Instead, we rely on class coherence to say that choosing the global instance is just fine, even if later we call a function with (a := Int). After all, in this run of the solver, [G] C (Maybe Int) will definitely never match [W] C (Maybe a). (Recall that we process Givens before Wanteds, so there is no [G] a ~ Int hanging about unseen.) Interestingly, in the first case (from #21208), the behavior changed between GHC 8.10.7 and GHC 9.2, with the latter behaving correctly and the former reporting overlapping instances. Test case: typecheck/should_compile/T21208. -} {- ********************************************************************* * * Cycle breakers * * ********************************************************************* -} -- | Return the type family application a CycleBreakerTv maps to. lookupCycleBreakerVar :: TcTyVar -- ^ cbv, must be a CycleBreakerTv -> InertSet -> TcType -- ^ type family application the cbv maps to lookupCycleBreakerVar cbv (IS { inert_cycle_breakers = cbvs_stack }) -- This function looks at every environment in the stack. This is necessary -- to avoid #20231. This function (and its one usage site) is the only reason -- that we store a stack instead of just the top environment. | Just tyfam_app <- assert (isCycleBreakerTyVar cbv) $ firstJusts (NE.map (lookupBag cbv) cbvs_stack) = tyfam_app | otherwise = pprPanic "lookupCycleBreakerVar found an unbound cycle breaker" (ppr cbv $$ ppr cbvs_stack) -- | Push a fresh environment onto the cycle-breaker var stack. Useful -- when entering a nested implication. pushCycleBreakerVarStack :: CycleBreakerVarStack -> CycleBreakerVarStack pushCycleBreakerVarStack = (emptyBag <|) -- | Add a new cycle-breaker binding to the top environment on the stack. addCycleBreakerBindings :: Bag (TcTyVar, Type) -- ^ (cbv,expansion) pairs -> InertSet -> InertSet addCycleBreakerBindings prs ics = assertPpr (all (isCycleBreakerTyVar . fst) prs) (ppr prs) $ ics { inert_cycle_breakers = add_to (inert_cycle_breakers ics) } where add_to (top_env :| rest_envs) = (prs `unionBags` top_env) :| rest_envs -- | Perform a monadic operation on all pairs in the top environment -- in the stack. forAllCycleBreakerBindings_ :: Monad m => CycleBreakerVarStack -> (TcTyVar -> TcType -> m ()) -> m () forAllCycleBreakerBindings_ (top_env :| _rest_envs) action = forM_ top_env (uncurry action) {-# INLINABLE forAllCycleBreakerBindings_ #-} -- to allow SPECIALISE later {- ********************************************************************* * * Solving one from another * * ********************************************************************* -} data InteractResult = KeepInert -- Keep the inert item, and solve the work item from it -- (if the latter is Wanted; just discard it if not) | KeepWork -- Keep the work item, and solve the inert item from it instance Outputable InteractResult where ppr KeepInert = text "keep inert" ppr KeepWork = text "keep work-item" solveOneFromTheOther :: Ct -- Inert (Dict or Irred) -> Ct -- WorkItem (same predicate as inert) -> InteractResult -- Precondition: -- * inert and work item represent evidence for the /same/ predicate -- * Both are CDictCan or CIrredCan -- -- We can always solve one from the other: even if both are wanted, -- although we don't rewrite wanteds with wanteds, we can combine -- two wanteds into one by solving one from the other solveOneFromTheOther ct_i ct_w | CtWanted { ctev_loc = loc_w } <- ev_w , prohibitedSuperClassSolve loc_i loc_w -- See Note [Solving superclass constraints] in GHC.Tc.TyCl.Instance = -- Inert must be Given KeepWork | CtWanted {} <- ev_w = -- Inert is Given or Wanted case ev_i of CtGiven {} -> KeepInert -- work is Wanted; inert is Given: easy choice. CtWanted {} -- Both are Wanted -- If only one has no pending superclasses, use it -- Otherwise we can get infinite superclass expansion (#22516) -- in silly cases like class C T b => C a b where ... | not is_psc_i, is_psc_w -> KeepInert | is_psc_i, not is_psc_w -> KeepWork -- If only one is a WantedSuperclassOrigin (arising from expanding -- a Wanted class constraint), keep the other: wanted superclasses -- may be unexpected by users | not is_wsc_orig_i, is_wsc_orig_w -> KeepInert | is_wsc_orig_i, not is_wsc_orig_w -> KeepWork -- otherwise, just choose the lower span -- reason: if we have something like (abs 1) (where the -- Num constraint cannot be satisfied), it's better to -- get an error about abs than about 1. -- This test might become more elaborate if we see an -- opportunity to improve the error messages | ((<) `on` ctLocSpan) loc_i loc_w -> KeepInert | otherwise -> KeepWork -- From here on the work-item is Given | CtWanted { ctev_loc = loc_i } <- ev_i , prohibitedSuperClassSolve loc_w loc_i = KeepInert -- Just discard the un-usable Given -- This never actually happens because -- Givens get processed first | CtWanted {} <- ev_i = KeepWork -- From here on both are Given -- See Note [Replacement vs keeping] | lvl_i `sameDepthAs` lvl_w = same_level_strategy | otherwise -- Both are Given, levels differ = different_level_strategy where ev_i = ctEvidence ct_i ev_w = ctEvidence ct_w pred = ctEvPred ev_i loc_i = ctEvLoc ev_i loc_w = ctEvLoc ev_w orig_i = ctLocOrigin loc_i orig_w = ctLocOrigin loc_w lvl_i = ctLocLevel loc_i lvl_w = ctLocLevel loc_w is_psc_w = isPendingScDict ct_w is_psc_i = isPendingScDict ct_i is_wsc_orig_i = isWantedSuperclassOrigin orig_i is_wsc_orig_w = isWantedSuperclassOrigin orig_w different_level_strategy -- Both Given | isIPLikePred pred = if lvl_w `strictlyDeeperThan` lvl_i then KeepWork else KeepInert | otherwise = if lvl_w `strictlyDeeperThan` lvl_i then KeepInert else KeepWork -- See Note [Replacement vs keeping] part (1) -- For the isIPLikePred case see Note [Shadowing of implicit parameters] -- in GHC.Tc.Solver.Dict same_level_strategy -- Both Given = case (orig_i, orig_w) of (GivenSCOrigin _ depth_i blocked_i, GivenSCOrigin _ depth_w blocked_w) | blocked_i, not blocked_w -> KeepWork -- Case 2(a) from | not blocked_i, blocked_w -> KeepInert -- Note [Replacement vs keeping] -- Both blocked or both not blocked | depth_w < depth_i -> KeepWork -- Case 2(c) from | otherwise -> KeepInert -- Note [Replacement vs keeping] (GivenSCOrigin {}, _) -> KeepWork -- Case 2(b) from Note [Replacement vs keeping] _ -> KeepInert -- Case 2(d) from Note [Replacement vs keeping] {- Note [Replacement vs keeping] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When we have two Given constraints both of type (C tys), say, which should we keep? More subtle than you might think! This is all implemented in solveOneFromTheOther. 1) Constraints come from different levels (different_level_strategy) - For implicit parameters we want to keep the innermost (deepest) one, so that it overrides the outer one. See Note [Shadowing of implicit parameters] in GHC.Tc.Solver.Dict - For everything else, we want to keep the outermost one. Reason: that makes it more likely that the inner one will turn out to be unused, and can be reported as redundant. See Note [Tracking redundant constraints] in GHC.Tc.Solver. It transpires that using the outermost one is responsible for an 8% performance improvement in nofib cryptarithm2, compared to just rolling the dice. I didn't investigate why. 2) Constraints coming from the same level (i.e. same implication) (a) If both are GivenSCOrigin, choose the one that is unblocked if possible according to Note [Solving superclass constraints] in GHC.Tc.TyCl.Instance. (b) Prefer constraints that are not superclass selections. Example: f :: (Eq a, Ord a) => a -> Bool f x = x == x Eager superclass expansion gives us two [G] Eq a constraints. We want to keep the one from the user-written Eq a, not the superclass selection. This means we report the Ord a as redundant with -Wredundant-constraints, not the Eq a. Getting this wrong was #20602. See also Note [Tracking redundant constraints] in GHC.Tc.Solver. (c) If both are GivenSCOrigin, chooose the one with the shallower superclass-selection depth, in the hope of identifying more correct redundant constraints. This is really a generalization of point (b), because the superclass depth of a non-superclass constraint is 0. (If the levels differ, we definitely won't have both with GivenSCOrigin.) (d) Finally, when there is still a choice, use KeepInert rather than KeepWork, for two reasons: - to avoid unnecessary munging of the inert set. - to cut off superclass loops; see Note [Superclass loops] in GHC.Tc.Solver.Dict Doing the level-check for implicit parameters, rather than making the work item always override, is important. Consider data T a where { T1 :: (?x::Int) => T Int; T2 :: T a } f :: (?x::a) => T a -> Int f T1 = ?x f T2 = 3 We have a [G] (?x::a) in the inert set, and at the pattern match on T1 we add two new givens in the work-list: [G] (?x::Int) [G] (a ~ Int) Now consider these steps - process a~Int, kicking out (?x::a) - process (?x::Int), the inner given, adding to inert set - process (?x::a), the outer given, overriding the inner given Wrong! The level-check ensures that the inner implicit parameter wins. (Actually I think that the order in which the work-list is processed means that this chain of events won't happen, but that's very fragile.) -} ghc-lib-parser-9.12.2.20250421/compiler/GHC/Tc/Solver/Types.hs0000644000000000000000000002277607346545000021204 0ustar0000000000000000{-# LANGUAGE DerivingStrategies #-} {-# LANGUAGE GADTs #-} -- | Utility types used within the constraint solver module GHC.Tc.Solver.Types ( -- Inert CDictCans DictMap, emptyDictMap, findDictsByTyConKey, findDictsByClass, foldDicts, findDict, dictsToBag, FunEqMap, emptyFunEqs, findFunEq, insertFunEq, findFunEqsByTyCon, TcAppMap, emptyTcAppMap, isEmptyTcAppMap, insertTcApp, alterTcApp, filterTcAppMap, tcAppMapToBag, foldTcAppMap, delTcApp, EqualCtList, filterEqualCtList, addToEqualCtList ) where import GHC.Prelude import GHC.Tc.Types.Constraint import GHC.Tc.Types.Origin import GHC.Tc.Types.CtLoc( CtLoc, ctLocOrigin ) import GHC.Tc.Utils.TcType import GHC.Types.Unique import GHC.Types.Unique.DFM import GHC.Core.Class import GHC.Core.Map.Type import GHC.Core.Predicate import GHC.Core.TyCon import GHC.Core.TyCon.Env import GHC.Data.Bag import GHC.Data.Maybe import GHC.Data.TrieMap import GHC.Utils.Constants import GHC.Utils.Outputable import GHC.Utils.Panic {- ********************************************************************* * * TcAppMap * * ************************************************************************ Note [Use loose types in inert set] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Whenever we are looking up an inert dictionary (CDictCan) or function equality (CEqCan), we use a TcAppMap, which uses the Unique of the class/type family tycon and then a trie which maps the arguments. This trie does *not* need to match the kinds of the arguments; this Note explains why. Consider the types ty0 = (T ty1 ty2 ty3 ty4) and ty0' = (T ty1' ty2' ty3' ty4'), where ty4 and ty4' have different kinds. Let's further assume that both types ty0 and ty0' are well-typed. Because the kind of T is closed, it must be that one of the ty1..ty3 does not match ty1'..ty3' (and that the kind of the fourth argument to T is dependent on whichever one changed). Since we are matching all arguments, during the inert-set lookup, we know that ty1..ty3 do indeed match ty1'..ty3'. Therefore, the kind of ty4 and ty4' must match, too -- without ever looking at it. Accordingly, we use LooseTypeMap, which skips the kind check when looking up a type. I (Richard E) believe this is just an optimization, and that looking at kinds would be harmless. -} type TcAppMap a = DTyConEnv (ListMap LooseTypeMap a) -- Indexed by tycon then the arg types, using "loose" matching, where -- we don't require kind equality. This allows, for example, (a |> co) -- to match (a). -- See Note [Use loose types in inert set] -- Used for types and classes; hence UniqDFM -- See Note [foldTM determinism] in GHC.Data.TrieMap for why we use DTyConEnv here isEmptyTcAppMap :: TcAppMap a -> Bool isEmptyTcAppMap m = isEmptyDTyConEnv m emptyTcAppMap :: TcAppMap a emptyTcAppMap = emptyDTyConEnv findTcApp :: TcAppMap a -> TyCon -> [Type] -> Maybe a findTcApp m tc tys = do { tys_map <- lookupDTyConEnv m tc ; lookupTM tys tys_map } delTcApp :: TcAppMap a -> TyCon -> [Type] -> TcAppMap a delTcApp m tc tys = adjustDTyConEnv (deleteTM tys) m tc insertTcApp :: TcAppMap a -> TyCon -> [Type] -> a -> TcAppMap a insertTcApp m tc tys ct = alterDTyConEnv alter_tm m tc where alter_tm mb_tm = Just (insertTM tys ct (mb_tm `orElse` emptyTM)) alterTcApp :: forall a. TcAppMap a -> TyCon -> [Type] -> XT a -> TcAppMap a alterTcApp m tc tys upd = alterDTyConEnv alter_tm m tc where alter_tm :: Maybe (ListMap LooseTypeMap a) -> Maybe (ListMap LooseTypeMap a) alter_tm m_elt = Just (alterTM tys upd (m_elt `orElse` emptyTM)) filterTcAppMap :: forall a. (a -> Bool) -> TcAppMap a -> TcAppMap a filterTcAppMap f m = mapMaybeDTyConEnv one_tycon m where one_tycon :: ListMap LooseTypeMap a -> Maybe (ListMap LooseTypeMap a) one_tycon tm | isEmptyTM filtered_tm = Nothing | otherwise = Just filtered_tm where filtered_tm = filterTM f tm tcAppMapToBag :: TcAppMap a -> Bag a tcAppMapToBag m = foldTcAppMap consBag m emptyBag foldTcAppMap :: (a -> b -> b) -> TcAppMap a -> b -> b foldTcAppMap k m z = foldDTyConEnv (foldTM k) z m {- ********************************************************************* * * DictMap * * ********************************************************************* -} type DictMap a = TcAppMap a emptyDictMap :: DictMap a emptyDictMap = emptyTcAppMap findDict :: DictMap a -> CtLoc -> Class -> [Type] -> Maybe a findDict m loc cls tys | Just {} <- isCallStackPred cls tys , isPushCallStackOrigin (ctLocOrigin loc) = Nothing -- See Note [Solving CallStack constraints] | otherwise = findTcApp m (classTyCon cls) tys findDictsByClass :: DictMap a -> Class -> Bag a findDictsByClass m cls = findDictsByTyConKey m (getUnique $ classTyCon cls) findDictsByTyConKey :: DictMap a -> Unique -> Bag a findDictsByTyConKey m tc | Just tm <- lookupUDFM_Directly m tc = foldTM consBag tm emptyBag | otherwise = emptyBag dictsToBag :: DictMap a -> Bag a dictsToBag = tcAppMapToBag foldDicts :: (a -> b -> b) -> DictMap a -> b -> b foldDicts = foldTcAppMap {- Note [Solving CallStack constraints] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ See also Note [Overview of implicit CallStacks] in GHc.Tc.Types.Evidence. Suppose f :: HasCallStack => blah. Then * Each call to 'f' gives rise to [W] s1 :: IP "callStack" CallStack -- CtOrigin = OccurrenceOf f with a CtOrigin that says "OccurrenceOf f". Remember that HasCallStack is just shorthand for IP "callStack" CallStack See Note [Overview of implicit CallStacks] in GHC.Tc.Types.Evidence * We cannonicalise such constraints, in GHC.Tc.Solver.Dict.canDictNC, by pushing the call-site info on the stack, and changing the CtOrigin to record that has been done. Bind: s1 = pushCallStack s2 [W] s2 :: IP "callStack" CallStack -- CtOrigin = IPOccOrigin * Then, and only then, we can solve the constraint from an enclosing Given. So we must be careful /not/ to solve 's1' from the Givens. Again, we ensure this by arranging that findDict always misses when looking up such constraints. -} {- ********************************************************************* * * FunEqMap * * ********************************************************************* -} type FunEqMap a = TcAppMap a -- A map whose key is a (TyCon, [Type]) pair emptyFunEqs :: TcAppMap a emptyFunEqs = emptyTcAppMap findFunEq :: FunEqMap a -> TyCon -> [Type] -> Maybe a findFunEq m tc tys = findTcApp m tc tys findFunEqsByTyCon :: FunEqMap a -> TyCon -> [a] -- Get inert function equation constraints that have the given tycon -- in their head. Not that the constraints remain in the inert set. -- We use this to check for wanted interactions with built-in type-function -- constructors. findFunEqsByTyCon m tc | Just tm <- lookupDTyConEnv m tc = foldTM (:) tm [] | otherwise = [] insertFunEq :: FunEqMap a -> TyCon -> [Type] -> a -> FunEqMap a insertFunEq m tc tys val = insertTcApp m tc tys val {- ********************************************************************* * * EqualCtList * * ********************************************************************* -} {- Note [EqualCtList invariants] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * All are equalities * All these equalities have the same LHS * No element of the list can rewrite any other Accordingly, this list is either empty, contains one element, or contains a Given representational equality and a Wanted nominal one. -} type EqualCtList = [EqCt] -- See Note [EqualCtList invariants] addToEqualCtList :: EqCt -> EqualCtList -> EqualCtList -- See Note [EqualCtList invariants] addToEqualCtList ct old_eqs | debugIsOn = case ct of EqCt { eq_lhs = TyVarLHS tv } -> assert (all (shares_lhs tv) old_eqs) $ assertPpr (null bad_prs) (vcat [ text "bad_prs" <+> ppr bad_prs , text "ct:old_eqs" <+> ppr (ct : old_eqs) ]) $ (ct : old_eqs) _ -> pprPanic "addToEqualCtList not CEqCan" (ppr ct) | otherwise = ct : old_eqs where shares_lhs tv (EqCt { eq_lhs = TyVarLHS old_tv }) = tv == old_tv shares_lhs _ _ = False bad_prs = filter is_bad_pair (distinctPairs (ct : old_eqs)) is_bad_pair :: (EqCt, EqCt) -> Bool is_bad_pair (ct1,ct2) = eqCtFlavourRole ct1 `eqCanRewriteFR` eqCtFlavourRole ct2 distinctPairs :: [a] -> [(a,a)] -- distinctPairs [x1,...xn] is the list of all pairs [ ...(xi, xj)...] -- where i /= j -- NB: does not return pairs (xi,xi), which would be stupid in the -- context of addToEqualCtList (#22645) distinctPairs [] = [] distinctPairs (x:xs) = concatMap (\y -> [(x,y),(y,x)]) xs ++ distinctPairs xs -- returns Nothing when the new list is empty, to keep the environments smaller filterEqualCtList :: (EqCt -> Bool) -> EqualCtList -> Maybe EqualCtList filterEqualCtList pred cts | null new_list = Nothing | otherwise = Just new_list where new_list = filter pred cts ghc-lib-parser-9.12.2.20250421/compiler/GHC/Tc/0000755000000000000000000000000007346545000016254 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Tc/Types.hs0000644000000000000000000013445607346545000017731 0ustar0000000000000000 {-# LANGUAGE DerivingStrategies #-} {-# LANGUAGE ExistentialQuantification #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE GADTs #-} {-# LANGUAGE GeneralizedNewtypeDeriving #-} {-# LANGUAGE PatternSynonyms #-} {- (c) The University of Glasgow 2006-2012 (c) The GRASP Project, Glasgow University, 1992-2002 -} -- | Various types used during typechecking. -- -- Please see "GHC.Tc.Utils.Monad" as well for operations on these types. You probably -- want to import it, instead of this module. -- -- All the monads exported here are built on top of the same IOEnv monad. The -- monad functions like a Reader monad in the way it passes the environment -- around. This is done to allow the environment to be manipulated in a stack -- like fashion when entering expressions... etc. -- -- For state that is global and should be returned at the end (e.g not part -- of the stack mechanism), you should use a TcRef (= IORef) to store them. module GHC.Tc.Types( TcRnIf, TcRn, TcM, RnM, IfM, IfL, IfG, -- The monad is opaque outside this module TcRef, -- The environment types Env(..), TcGblEnv(..), TcLclEnv(..), modifyLclCtxt, TcLclCtxt(..), setLclEnvTcLevel, getLclEnvTcLevel, setLclEnvLoc, getLclEnvLoc, lclEnvInGeneratedCode, IfGblEnv(..), IfLclEnv(..), tcVisibleOrphanMods, RewriteEnv(..), -- Frontend types (shouldn't really be here) FrontendResult(..), -- Renamer types ErrCtxt, ImportAvails(..), emptyImportAvails, plusImportAvails, ImportUserSpec(..), ImpUserList(..), mkModDeps, -- Typechecker types TcTypeEnv, TcBinderStack, TcBinder(..), TcTyThing(..), tcTyThingTyCon_maybe, PromotionErr(..), IdBindingInfo(..), ClosedTypeId, RhsNames, IsGroupClosed(..), SelfBootInfo(..), bootExports, tcTyThingCategory, pprTcTyThingCategory, peCategory, pprPECategory, CompleteMatch, CompleteMatches, -- Template Haskell ThStage(..), SpliceType(..), SpliceOrBracket(..), PendingStuff(..), topStage, topAnnStage, topSpliceStage, ThLevel, impLevel, outerLevel, thLevel, ForeignSrcLang(..), THDocs, DocLoc(..), ThBindEnv, -- Arrows ArrowCtxt(..), -- TcSigInfo TcSigFun, TcSigInfo(..), TcIdSig(..), TcCompleteSig(..), TcPartialSig(..), TcPatSynSig(..), TcIdSigInst(..), isPartialSig, hasCompleteSig, tcSigInfoName, tcIdSigLoc, completeSigPolyId_maybe, -- Misc other types TcId, NameShape(..), removeBindingShadowing, getPlatform, -- Constraint solver plugins TcPlugin(..), TcPluginSolveResult(TcPluginContradiction, TcPluginOk, ..), TcPluginRewriteResult(..), TcPluginSolver, TcPluginRewriter, TcPluginM(runTcPluginM), unsafeTcPluginTcM, -- Defaulting plugin DefaultingPlugin(..), DefaultingProposal(..), FillDefaulting, -- Role annotations RoleAnnotEnv, emptyRoleAnnotEnv, mkRoleAnnotEnv, lookupRoleAnnot, getRoleAnnots, -- Linting lintGblEnv, -- Diagnostics TcRnMessage ) where import GHC.Prelude import GHC.Platform import GHC.Driver.Env import GHC.Driver.Env.KnotVars import GHC.Driver.Config.Core.Lint import GHC.Driver.DynFlags import {-# SOURCE #-} GHC.Driver.Hooks import GHC.Linker.Types import GHC.Hs import GHC.Tc.Utils.TcType import GHC.Tc.Types.Constraint import GHC.Tc.Types.CtLoc( CtLoc ) import GHC.Tc.Types.Evidence import GHC.Tc.Types.TH import GHC.Tc.Types.TcRef import GHC.Tc.Types.LclEnv import GHC.Tc.Types.BasicTypes import GHC.Tc.Types.ErrCtxt import {-# SOURCE #-} GHC.Tc.Errors.Hole.Plugin ( HoleFitPlugin ) import GHC.Tc.Errors.Types import GHC.Core.Reduction ( Reduction(..) ) import GHC.Core.Type import GHC.Core.TyCon ( TyCon ) import GHC.Core.PatSyn ( PatSyn ) import GHC.Core.Lint ( lintAxioms ) import GHC.Core.InstEnv import GHC.Core.FamInstEnv import GHC.Core.Predicate import GHC.Types.DefaultEnv ( DefaultEnv ) import GHC.Types.Fixity.Env import GHC.Types.Annotations import GHC.Types.CompleteMatch import GHC.Types.Name.Reader import GHC.Types.Name import GHC.Types.Name.Env import GHC.Types.Name.Set import GHC.Types.Avail import GHC.Types.Var import GHC.Types.TypeEnv import GHC.Types.SourceFile import GHC.Types.SrcLoc import GHC.Types.Unique.FM import GHC.Types.Basic import GHC.Types.CostCentre.State import GHC.Types.HpcInfo import GHC.Data.IOEnv import GHC.Data.Bag import GHC.Data.List.SetOps import GHC.Unit import GHC.Unit.Module.Warnings import GHC.Unit.Module.Deps import GHC.Unit.Module.ModDetails import GHC.Utils.Error import GHC.Utils.Outputable import GHC.Utils.Fingerprint import GHC.Utils.Panic import GHC.Utils.Logger import GHC.Builtin.Names ( isUnboundName ) import GHCi.Message import GHCi.RemoteTypes import Data.Set ( Set ) import qualified Data.Set as S import qualified Data.Map as M import Data.Dynamic ( Dynamic ) import Data.Map ( Map ) import Data.Typeable ( TypeRep ) import Data.Maybe ( mapMaybe ) -- | The import specification as written by the user, including -- the list of explicitly imported names. Used in 'ModIface' to -- allow GHCi to reconstruct the top level environment on demand. -- -- This is distinct from 'ImportSpec' because we don't want to store -- the list of explicitly imported names along with each 'GRE' -- -- We don't want to store the entire GlobalRdrEnv for modules that -- are imported without explicit export lists, as these may grow -- to be very large. However, GlobalRdrEnvs which are the result -- of explicit import lists are typically quite small. -- -- Why do we not store something like (Maybe (ImportListInterpretation, [IE GhcPs]) in such a case? -- Because we don't want to store source syntax including annotations in -- interface files. data ImportUserSpec = ImpUserSpec { ius_decl :: !ImpDeclSpec , ius_imports :: !ImpUserList } data ImpUserList = ImpUserAll -- ^ no user import list | ImpUserExplicit !GlobalRdrEnv | ImpUserEverythingBut !NameSet -- | A 'NameShape' is a substitution on 'Name's that can be used -- to refine the identities of a hole while we are renaming interfaces -- (see "GHC.Iface.Rename"). Specifically, a 'NameShape' for -- 'ns_module_name' @A@, defines a mapping from @{A.T}@ -- (for some 'OccName' @T@) to some arbitrary other 'Name'. -- -- The most intriguing thing about a 'NameShape', however, is -- how it's constructed. A 'NameShape' is *implied* by the -- exported 'AvailInfo's of the implementor of an interface: -- if an implementor of signature @\@ exports @M.T@, you implicitly -- define a substitution from @{H.T}@ to @M.T@. So a 'NameShape' -- is computed from the list of 'AvailInfo's that are exported -- by the implementation of a module, or successively merged -- together by the export lists of signatures which are joining -- together. -- -- It's not the most obvious way to go about doing this, but it -- does seem to work! -- -- NB: Can't boot this and put it in NameShape because then we -- start pulling in too many DynFlags things. data NameShape = NameShape { ns_mod_name :: ModuleName, ns_exports :: [AvailInfo], ns_map :: OccEnv Name } {- ************************************************************************ * * Standard monad definition for TcRn All the combinators for the monad can be found in GHC.Tc.Utils.Monad * * ************************************************************************ The monad itself has to be defined here, because it is mentioned by ErrCtxt -} type TcRnIf a b = IOEnv (Env a b) type TcRn = TcRnIf TcGblEnv TcLclEnv -- Type inference type IfM lcl = TcRnIf IfGblEnv lcl -- Iface stuff type IfG = IfM () -- Top level type IfL = IfM IfLclEnv -- Nested -- TcRn is the type-checking and renaming monad: the main monad that -- most type-checking takes place in. The global environment is -- 'TcGblEnv', which tracks all of the top-level type-checking -- information we've accumulated while checking a module, while the -- local environment is 'TcLclEnv', which tracks local information as -- we move inside expressions. -- | Historical "renaming monad" (now it's just 'TcRn'). type RnM = TcRn -- | Historical "type-checking monad" (now it's just 'TcRn'). type TcM = TcRn -- We 'stack' these envs through the Reader like monad infrastructure -- as we move into an expression (although the change is focused in -- the lcl type). data Env gbl lcl = Env { env_top :: !HscEnv, -- Top-level stuff that never changes -- Includes all info about imported things -- BangPattern is to fix leak, see #15111 env_ut :: {-# UNPACK #-} !Char, -- Tag for Uniques env_gbl :: gbl, -- Info about things defined at the top level -- of the module being compiled env_lcl :: lcl -- Nested stuff; changes as we go into } instance ContainsDynFlags (Env gbl lcl) where extractDynFlags env = hsc_dflags (env_top env) instance ContainsHooks (Env gbl lcl) where extractHooks env = hsc_hooks (env_top env) instance ContainsLogger (Env gbl lcl) where extractLogger env = hsc_logger (env_top env) instance ContainsModule gbl => ContainsModule (Env gbl lcl) where extractModule env = extractModule (env_gbl env) {- ************************************************************************ * * * RewriteEnv * The rewriting environment * * ************************************************************************ -} -- | A 'RewriteEnv' carries the necessary context for performing rewrites -- (i.e. type family reductions and following filled-in metavariables) -- in the solver. data RewriteEnv = RE { re_loc :: !CtLoc -- ^ In which context are we rewriting? -- -- Type-checking plugins might want to use this location information -- when emitting new Wanted constraints when rewriting type family -- applications. This ensures that such Wanted constraints will, -- when unsolved, give rise to error messages with the -- correct source location. -- Within GHC, we use this field to keep track of reduction depth. -- See Note [Rewriter CtLoc] in GHC.Tc.Solver.Rewrite. , re_flavour :: !CtFlavour , re_eq_rel :: !EqRel -- ^ At what role are we rewriting? -- See Note [Rewriter EqRels] in GHC.Tc.Solver.Rewrite , re_rewriters :: !(TcRef RewriterSet) -- ^ See Note [Wanteds rewrite Wanteds] } -- RewriteEnv is mostly used in @GHC.Tc.Solver.Rewrite@, but it is defined -- here so that it can also be passed to rewriting plugins. -- See the 'tcPluginRewrite' field of 'TcPlugin'. {- ************************************************************************ * * The interface environments Used when dealing with IfaceDecls * * ************************************************************************ -} data IfGblEnv = IfGblEnv { -- Some information about where this environment came from; -- useful for debugging. if_doc :: SDoc, -- The type environment for the module being compiled, -- in case the interface refers back to it via a reference that -- was originally a hi-boot file. -- We need the module name so we can test when it's appropriate -- to look in this env. -- See Note [Tying the knot] in GHC.IfaceToCore if_rec_types :: (KnotVars (IfG TypeEnv)) -- Allows a read effect, so it can be in a mutable -- variable; c.f. handling the external package type env -- Nothing => interactive stuff, no loops possible } data IfLclEnv = IfLclEnv { -- The module for the current IfaceDecl -- So if we see f = \x -> x -- it means M.f = \x -> x, where M is the if_mod -- NB: This is a semantic module, see -- Note [Identity versus semantic module] if_mod :: !Module, -- Whether or not the IfaceDecl came from a boot -- file or not; we'll use this to choose between -- NoUnfolding and BootUnfolding if_boot :: IsBootInterface, -- The field is used only for error reporting -- if (say) there's a Lint error in it if_loc :: SDoc, -- Where the interface came from: -- .hi file, or GHCi state, or ext core -- plus which bit is currently being examined if_nsubst :: Maybe NameShape, -- This field is used to make sure "implicit" declarations -- (anything that cannot be exported in mi_exports) get -- wired up correctly in typecheckIfacesForMerging. Most -- of the time it's @Nothing@. See Note [Resolving never-exported Names] -- in GHC.IfaceToCore. if_implicits_env :: Maybe TypeEnv, if_tv_env :: FastStringEnv TyVar, -- Nested tyvar bindings if_id_env :: FastStringEnv Id -- Nested id binding } {- ************************************************************************ * * Global typechecker environment * * ************************************************************************ -} -- | 'FrontendResult' describes the result of running the frontend of a Haskell -- module. Currently one always gets a 'FrontendTypecheck', since running the -- frontend involves typechecking a program. hs-sig merges are not handled here. -- -- This data type really should be in GHC.Driver.Env, but it needs -- to have a TcGblEnv which is only defined here. data FrontendResult = FrontendTypecheck TcGblEnv -- Note [Identity versus semantic module] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- When typechecking an hsig file, it is convenient to keep track -- of two different "this module" identifiers: -- -- - The IDENTITY module is simply thisPackage + the module -- name; i.e. it uniquely *identifies* the interface file -- we're compiling. For example, p[A=]:A is an -- identity module identifying the requirement named A -- from library p. -- -- - The SEMANTIC module, which is the actual module that -- this signature is intended to represent (e.g. if -- we have a identity module p[A=base:Data.IORef]:A, -- then the semantic module is base:Data.IORef) -- -- Which one should you use? -- -- - In the desugarer and later phases of compilation, -- identity and semantic modules coincide, since we never compile -- signatures (we just generate blank object files for -- hsig files.) -- -- A corollary of this is that the following invariant holds at any point -- past desugaring, -- -- if I have a Module, this_mod, in hand representing the module -- currently being compiled, -- then moduleUnit this_mod == thisPackage dflags -- -- - For any code involving Names, we want semantic modules. -- See lookupIfaceTop in GHC.Iface.Env, mkIface and addFingerprints -- in GHC.Iface.{Make,Recomp}, and tcLookupGlobal in GHC.Tc.Utils.Env -- -- - When reading interfaces, we want the identity module to -- identify the specific interface we want (such interfaces -- should never be loaded into the EPS). However, if a -- hole module is requested, we look for A.hi -- in the home library we are compiling. (See GHC.Iface.Load.) -- Similarly, in GHC.Rename.Names we check for self-imports using -- identity modules, to allow signatures to import their implementor. -- -- - For recompilation avoidance, you want the identity module, -- since that will actually say the specific interface you -- want to track (and recompile if it changes) -- | 'TcGblEnv' describes the top-level of the module at the -- point at which the typechecker is finished work. -- It is this structure that is handed on to the desugarer -- For state that needs to be updated during the typechecking -- phase and returned at end, use a 'TcRef' (= 'IORef'). data TcGblEnv = TcGblEnv { tcg_mod :: Module, -- ^ Module being compiled tcg_semantic_mod :: Module, -- ^ If a signature, the backing module -- See also Note [Identity versus semantic module] tcg_src :: HscSource, -- ^ What kind of module (regular Haskell, hs-boot, hsig) tcg_rdr_env :: GlobalRdrEnv, -- ^ Top level envt; used during renaming tcg_default :: DefaultEnv, -- ^ All class defaults in scope in the module tcg_default_exports :: DefaultEnv, -- ^ All class defaults exported from the module tcg_fix_env :: FixityEnv, -- ^ Just for things in this module tcg_type_env :: TypeEnv, -- ^ Global type env for the module we are compiling now. All -- TyCons and Classes (for this module) end up in here right away, -- along with their derived constructors, selectors. -- -- (Ids defined in this module start in the local envt, though they -- move to the global envt during zonking) -- -- NB: for what "things in this module" means, see -- Note [The interactive package] in "GHC.Runtime.Context" tcg_type_env_var :: KnotVars (IORef TypeEnv), -- Used only to initialise the interface-file -- typechecker in initIfaceTcRn, so that it can see stuff -- bound in this module when dealing with hi-boot recursions -- Updated at intervals (e.g. after dealing with types and classes) tcg_inst_env :: !InstEnv, -- ^ Instance envt for all /home-package/ modules; -- Includes the dfuns in tcg_insts -- NB. BangPattern is to fix a leak, see #15111 tcg_fam_inst_env :: !FamInstEnv, -- ^ Ditto for family instances -- NB. BangPattern is to fix a leak, see #15111 tcg_ann_env :: AnnEnv, -- ^ And for annotations -- Now a bunch of things about this module that are simply -- accumulated, but never consulted until the end. -- Nevertheless, it's convenient to accumulate them along -- with the rest of the info from this module. tcg_exports :: [AvailInfo], -- ^ What is exported tcg_imports :: ImportAvails, -- ^ Information about what was imported from where, including -- things bound in this module. Also store Safe Haskell info -- here about transitive trusted package requirements. -- -- There are not many uses of this field, so you can grep for -- all them. -- -- The ImportAvails records information about the following -- things: -- -- 1. All of the modules you directly imported (tcRnImports) -- 2. The orphans (only!) of all imported modules in a GHCi -- session (runTcInteractive) -- 3. The module that instantiated a signature -- 4. Each of the signatures that merged in -- -- It is used in the following ways: -- - imp_orphs is used to determine what orphan modules should be -- visible in the context (tcVisibleOrphanMods) -- - imp_finsts is used to determine what family instances should -- be visible (tcExtendLocalFamInstEnv) -- - To resolve the meaning of the export list of a module -- (tcRnExports) -- - imp_mods is used to compute usage info (mkIfaceTc, deSugar) -- - imp_trust_own_pkg is used for Safe Haskell in interfaces -- (mkIfaceTc, as well as in "GHC.Driver.Main") -- - To create the Dependencies field in interface (mkDependencies) -- This field tracks the user-written imports of a module, so they can be -- recorded in an interface file in order to reconstruct the top-level environment -- if necessary for GHCi. tcg_import_decls :: ![ImportUserSpec], -- These three fields track unused bindings and imports -- See Note [Tracking unused binding and imports] tcg_dus :: DefUses, tcg_used_gres :: TcRef [GlobalRdrElt], -- ^ INVARIANT: all these GREs were imported; that is, -- they all have a non-empty gre_imp field. tcg_keep :: TcRef NameSet, tcg_th_used :: TcRef Bool, -- ^ @True@ \<=> Template Haskell syntax used. -- -- We need this so that we can generate a dependency on the -- Template Haskell package, because the desugarer is going -- to emit loads of references to TH symbols. The reference -- is implicit rather than explicit, so we have to zap a -- mutable variable. tcg_th_splice_used :: TcRef Bool, -- ^ @True@ \<=> A Template Haskell splice was used. -- -- Splices disable recompilation avoidance (see #481) tcg_th_needed_deps :: TcRef ([Linkable], PkgsLoaded), -- ^ The set of runtime dependencies required by this module -- See Note [Object File Dependencies] tcg_dfun_n :: TcRef OccSet, -- ^ Allows us to choose unique DFun names. tcg_zany_n :: TcRef Integer, -- ^ A source of unique identities for ZonkAny instances -- See Note [Any types] in GHC.Builtin.Types, wrinkle (Any4) tcg_merged :: [(Module, Fingerprint)], -- ^ The requirements we merged with; we always have to recompile -- if any of these changed. -- The next fields accumulate the payload of the module -- The binds, rules and foreign-decl fields are collected -- initially in un-zonked form and are finally zonked in tcRnSrcDecls tcg_rn_exports :: Maybe [(LIE GhcRn, Avails)], -- Nothing <=> no explicit export list -- Is always Nothing if we don't want to retain renamed -- exports. -- If present contains each renamed export list item -- together with its exported names. tcg_rn_imports :: [LImportDecl GhcRn], -- Keep the renamed imports regardless. They are not -- voluminous and are needed if you want to report unused imports tcg_rn_decls :: Maybe (HsGroup GhcRn), -- ^ Renamed decls, maybe. @Nothing@ \<=> Don't retain renamed -- decls. tcg_dependent_files :: TcRef [FilePath], -- ^ dependencies from addDependentFile tcg_th_topdecls :: TcRef [LHsDecl GhcPs], -- ^ Top-level declarations from addTopDecls tcg_th_foreign_files :: TcRef [(ForeignSrcLang, FilePath)], -- ^ Foreign files emitted from TH. tcg_th_topnames :: TcRef NameSet, -- ^ Exact names bound in top-level declarations in tcg_th_topdecls tcg_th_modfinalizers :: TcRef [(TcLclEnv, ThModFinalizers)], -- ^ Template Haskell module finalizers. -- -- They can use particular local environments. tcg_th_coreplugins :: TcRef [String], -- ^ Core plugins added by Template Haskell code. tcg_th_state :: TcRef (Map TypeRep Dynamic), tcg_th_remote_state :: TcRef (Maybe (ForeignRef (IORef QState))), -- ^ Template Haskell state tcg_th_docs :: TcRef THDocs, -- ^ Docs added in Template Haskell via @putDoc@. tcg_ev_binds :: Bag EvBind, -- Top-level evidence bindings -- Things defined in this module, or (in GHCi) -- in the declarations for a single GHCi command. -- For the latter, see Note [The interactive package] in -- GHC.Runtime.Context tcg_tr_module :: Maybe Id, -- Id for $trModule :: GHC.Unit.Module -- for which every module has a top-level defn -- except in GHCi in which case we have Nothing tcg_binds :: LHsBinds GhcTc, -- Value bindings in this module tcg_sigs :: NameSet, -- ...Top-level names that *lack* a signature tcg_imp_specs :: [LTcSpecPrag], -- ...SPECIALISE prags for imported Ids tcg_warns :: (Warnings GhcRn), -- ...Warnings and deprecations tcg_anns :: [Annotation], -- ...Annotations tcg_tcs :: [TyCon], -- ...TyCons and Classes tcg_ksigs :: NameSet, -- ...Top-level TyCon names that *lack* a signature tcg_insts :: [ClsInst], -- ...Instances tcg_fam_insts :: [FamInst], -- ...Family instances tcg_rules :: [LRuleDecl GhcTc], -- ...Rules tcg_fords :: [LForeignDecl GhcTc], -- ...Foreign import & exports tcg_patsyns :: [PatSyn], -- ...Pattern synonyms tcg_hdr_info :: (Maybe (LHsDoc GhcRn), Maybe (XRec GhcRn ModuleName)), -- ^ Maybe Haddock header docs and Maybe located module name tcg_hpc :: !AnyHpcUsage, -- ^ @True@ if any part of the -- prog uses hpc instrumentation. -- NB. BangPattern is to fix a leak, see #15111 tcg_self_boot :: SelfBootInfo, -- ^ Whether this module has a -- corresponding hi-boot file tcg_main :: Maybe Name, -- ^ The Name of the main -- function, if this module is -- the main module. tcg_safe_infer :: TcRef Bool, -- ^ Has the typechecker inferred this module as -XSafe (Safe Haskell)? -- See Note [Safe Haskell Overlapping Instances Implementation], -- although this is used for more than just that failure case. tcg_safe_infer_reasons :: TcRef (Messages TcRnMessage), -- ^ Unreported reasons why tcg_safe_infer is False. -- INVARIANT: If this Messages is non-empty, then tcg_safe_infer is False. -- It may be that tcg_safe_infer is False but this is empty, if no reasons -- are supplied (#19714), or if those reasons have already been -- reported by GHC.Driver.Main.markUnsafeInfer tcg_tc_plugin_solvers :: [TcPluginSolver], -- ^ A list of user-defined type-checking plugins for constraint solving. tcg_tc_plugin_rewriters :: UniqFM TyCon [TcPluginRewriter], -- ^ A collection of all the user-defined type-checking plugins for rewriting -- type family applications, collated by their type family 'TyCon's. tcg_defaulting_plugins :: [FillDefaulting], -- ^ A list of user-defined plugins for type defaulting plugins. tcg_hf_plugins :: [HoleFitPlugin], -- ^ A list of user-defined plugins for hole fit suggestions. tcg_top_loc :: RealSrcSpan, -- ^ The RealSrcSpan this module came from tcg_static_wc :: TcRef WantedConstraints, -- ^ Wanted constraints of static forms. -- See Note [Constraints in static forms]. tcg_complete_matches :: !CompleteMatches, -- ^ Tracking indices for cost centre annotations tcg_cc_st :: TcRef CostCentreState, tcg_next_wrapper_num :: TcRef (ModuleEnv Int) -- ^ See Note [Generating fresh names for FFI wrappers] } -- NB: topModIdentity, not topModSemantic! -- Definition sites of orphan identities will be identity modules, not semantic -- modules. -- Note [Constraints in static forms] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- -- When a static form produces constraints like -- -- f :: StaticPtr (Bool -> String) -- f = static show -- -- we collect them in tcg_static_wc and resolve them at the end -- of type checking. They need to be resolved separately because -- we don't want to resolve them in the context of the enclosing -- expression. Consider -- -- g :: Show a => StaticPtr (a -> String) -- g = static show -- -- If the @Show a0@ constraint that the body of the static form produces was -- resolved in the context of the enclosing expression, then the body of the -- static form wouldn't be closed because the Show dictionary would come from -- g's context instead of coming from the top level. tcVisibleOrphanMods :: TcGblEnv -> ModuleSet tcVisibleOrphanMods tcg_env = mkModuleSet (tcg_mod tcg_env : imp_orphs (tcg_imports tcg_env)) instance ContainsModule TcGblEnv where extractModule env = tcg_semantic_mod env data SelfBootInfo = NoSelfBoot -- No corresponding hi-boot file | SelfBoot { sb_mds :: ModDetails } -- There was a hi-boot file bootExports :: SelfBootInfo -> NameSet bootExports boot = case boot of NoSelfBoot -> emptyNameSet SelfBoot { sb_mds = mds} -> let exports = md_exports mds in availsToNameSet exports {- Note [Tracking unused binding and imports] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We gather three sorts of usage information * tcg_dus :: DefUses (defs/uses) Records what is defined in this module and what is used. Records *defined* Names (local, top-level) and *used* Names (local or imported) Used (a) to report "defined but not used" (see GHC.Rename.Names.reportUnusedNames) (b) to generate version-tracking usage info in interface files (see GHC.Iface.Make.mkUsedNames) This usage info is mainly gathered by the renamer's gathering of free-variables * tcg_used_gres :: TcRef [GlobalRdrElt] Records occurrences of imported entities. Used only to report unused import declarations Records each *occurrence* an *imported* (not locally-defined) entity. The occurrence is recorded by keeping a GlobalRdrElt for it. These is not the GRE that is in the GlobalRdrEnv; rather it is recorded *after* the filtering done by pickGREs. So it reflect /how that occurrence is in scope/. See Note [GRE filtering] in RdrName. * tcg_keep :: TcRef NameSet Records names of the type constructors, data constructors, and Ids that are used by the constraint solver. The typechecker may use find that some imported or locally-defined things are used, even though they do not appear to be mentioned in the source code: (a) The to/from functions for generic data types (b) Top-level variables appearing free in the RHS of an orphan rule (c) Top-level variables appearing free in a TH bracket See Note [Keeping things alive for Template Haskell] in GHC.Rename.Splice (d) The data constructor of a newtype that is used to solve a Coercible instance (e.g. #10347). Example module T10347 (N, mkN) where import Data.Coerce newtype N a = MkN Int mkN :: Int -> N a mkN = coerce Then we wish to record `MkN` as used, since it is (morally) used to perform the coercion in `mkN`. To do so, the Coercible solver updates tcg_keep's TcRef whenever it encounters a use of `coerce` that crosses newtype boundaries. (e) Record fields that are used to solve HasField constraints (see Note [Unused name reporting and HasField] in GHC.Tc.Instance.Class) The tcg_keep field is used in two distinct ways: * Desugar.addExportFlagsAndRules. Where things like (a-c) are locally defined, we should give them an Exported flag, so that the simplifier does not discard them as dead code, and so that they are exposed in the interface file (but not to export to the user). * GHC.Rename.Names.reportUnusedNames. Where newtype data constructors like (d) are imported, we don't want to report them as unused. -} {- Note [Given Insts] ~~~~~~~~~~~~~~~~~~ Because of GADTs, we have to pass inwards the Insts provided by type signatures and existential contexts. Consider data T a where { T1 :: b -> b -> T [b] } f :: Eq a => T a -> Bool f (T1 x y) = [x]==[y] The constructor T1 binds an existential variable 'b', and we need Eq [b]. Well, we have it, because Eq a refines to Eq [b], but we can only spot that if we pass it inwards. -} -- fixes #12177 -- Builds up a list of bindings whose OccName has not been seen before -- i.e., If ys = removeBindingShadowing xs -- then -- - ys is obtained from xs by deleting some elements -- - ys has no duplicate OccNames -- - The first duplicated OccName in xs is retained in ys -- Overloaded so that it can be used for both GlobalRdrElt in typed-hole -- substitutions and TcBinder when looking for relevant bindings. removeBindingShadowing :: HasOccName a => [a] -> [a] removeBindingShadowing bindings = reverse $ fst $ foldl (\(bindingAcc, seenNames) binding -> if occName binding `elemOccSet` seenNames -- if we've seen it then (bindingAcc, seenNames) -- skip it else (binding:bindingAcc, extendOccSet seenNames (occName binding))) ([], emptyOccSet) bindings -- | Get target platform getPlatform :: TcRnIf a b Platform getPlatform = targetPlatform <$> getDynFlags --------------------------- -- Arrow-notation context --------------------------- {- Note [Escaping the arrow scope] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In arrow notation, a variable bound by a proc (or enclosed let/kappa) is not in scope to the left of an arrow tail (-<) or the head of (|..|). For example proc x -> (e1 -< e2) Here, x is not in scope in e1, but it is in scope in e2. This can get a bit complicated: let x = 3 in proc y -> (proc z -> e1) -< e2 Here, x and z are in scope in e1, but y is not. We implement this by recording the environment when passing a proc (using newArrowScope), and returning to that (using escapeArrowScope) on the left of -< and the head of (|..|). All this can be dealt with by the *renamer*. But the type checker needs to be involved too. Example (arrowfail001) class Foo a where foo :: a -> () data Bar = forall a. Foo a => Bar a get :: Bar -> () get = proc x -> case x of Bar a -> foo -< a Here the call of 'foo' gives rise to a (Foo a) constraint that should not be captured by the pattern match on 'Bar'. Rather it should join the constraints from further out. So we must capture the constraint bag from further out in the ArrowCtxt that we push inwards. -} {- ************************************************************************ * * Operations over ImportAvails * * ************************************************************************ -} mkModDeps :: Set (UnitId, ModuleNameWithIsBoot) -> InstalledModuleEnv ModuleNameWithIsBoot mkModDeps deps = S.foldl' add emptyInstalledModuleEnv deps where add env (uid, elt) = extendInstalledModuleEnv env (mkModule uid (gwib_mod elt)) elt plusModDeps :: InstalledModuleEnv ModuleNameWithIsBoot -> InstalledModuleEnv ModuleNameWithIsBoot -> InstalledModuleEnv ModuleNameWithIsBoot plusModDeps = plusInstalledModuleEnv plus_mod_dep where plus_mod_dep r1@(GWIB { gwib_mod = m1, gwib_isBoot = boot1 }) r2@(GWIB {gwib_mod = m2, gwib_isBoot = boot2}) | assertPpr (m1 == m2) ((ppr m1 <+> ppr m2) $$ (ppr (boot1 == IsBoot) <+> ppr (boot2 == IsBoot))) boot1 == IsBoot = r2 | otherwise = r1 -- If either side can "see" a non-hi-boot interface, use that -- Reusing existing tuples saves 10% of allocations on test -- perf/compiler/MultiLayerModules emptyImportAvails :: ImportAvails emptyImportAvails = ImportAvails { imp_mods = M.empty, imp_direct_dep_mods = emptyInstalledModuleEnv, imp_dep_direct_pkgs = S.empty, imp_sig_mods = [], imp_trust_pkgs = S.empty, imp_trust_own_pkg = False, imp_boot_mods = emptyInstalledModuleEnv, imp_orphs = [], imp_finsts = [] } -- | Union two ImportAvails -- -- This function is a key part of Import handling, basically -- for each import we create a separate ImportAvails structure -- and then union them all together with this function. plusImportAvails :: ImportAvails -> ImportAvails -> ImportAvails plusImportAvails (ImportAvails { imp_mods = mods1, imp_direct_dep_mods = ddmods1, imp_dep_direct_pkgs = ddpkgs1, imp_boot_mods = srs1, imp_sig_mods = sig_mods1, imp_trust_pkgs = tpkgs1, imp_trust_own_pkg = tself1, imp_orphs = orphs1, imp_finsts = finsts1 }) (ImportAvails { imp_mods = mods2, imp_direct_dep_mods = ddmods2, imp_dep_direct_pkgs = ddpkgs2, imp_boot_mods = srcs2, imp_sig_mods = sig_mods2, imp_trust_pkgs = tpkgs2, imp_trust_own_pkg = tself2, imp_orphs = orphs2, imp_finsts = finsts2 }) = ImportAvails { imp_mods = M.unionWith (++) mods1 mods2, imp_direct_dep_mods = ddmods1 `plusModDeps` ddmods2, imp_dep_direct_pkgs = ddpkgs1 `S.union` ddpkgs2, imp_trust_pkgs = tpkgs1 `S.union` tpkgs2, imp_trust_own_pkg = tself1 || tself2, imp_boot_mods = srs1 `plusModDeps` srcs2, imp_sig_mods = unionListsOrd sig_mods1 sig_mods2, imp_orphs = unionListsOrd orphs1 orphs2, imp_finsts = unionListsOrd finsts1 finsts2 } {- Constraint Solver Plugins ------------------------- -} -- | The @solve@ function of a type-checking plugin takes in Given -- and Wanted constraints, and should return a 'TcPluginSolveResult' -- indicating which Wanted constraints it could solve, or whether any are -- insoluble. type TcPluginSolver = EvBindsVar -> [Ct] -- ^ Givens -> [Ct] -- ^ Wanteds -> TcPluginM TcPluginSolveResult -- | For rewriting type family applications, a type-checking plugin provides -- a function of this type for each type family 'TyCon'. -- -- The function is provided with the current set of Given constraints, together -- with the arguments to the type family. -- The type family application will always be fully saturated. type TcPluginRewriter = RewriteEnv -- ^ Rewriter environment -> [Ct] -- ^ Givens -> [TcType] -- ^ type family arguments -> TcPluginM TcPluginRewriteResult -- | 'TcPluginM' is the monad in which type-checking plugins operate. newtype TcPluginM a = TcPluginM { runTcPluginM :: TcM a } deriving newtype (Functor, Applicative, Monad, MonadFail) -- | This function provides an escape for direct access to -- the 'TcM` monad. It should not be used lightly, and -- the provided 'TcPluginM' API should be favoured instead. unsafeTcPluginTcM :: TcM a -> TcPluginM a unsafeTcPluginTcM = TcPluginM data TcPlugin = forall s. TcPlugin { tcPluginInit :: TcPluginM s -- ^ Initialize plugin, when entering type-checker. , tcPluginSolve :: s -> TcPluginSolver -- ^ Solve some constraints. -- -- This function will be invoked at two points in the constraint solving -- process: once to simplify Given constraints, and once to solve -- Wanted constraints. In the first case (and only in the first case), -- no Wanted constraints will be passed to the plugin. -- -- The plugin can either return a contradiction, -- or specify that it has solved some constraints (with evidence), -- and possibly emit additional constraints. These returned constraints -- must be Givens in the first case, and Wanteds in the second. -- -- Use @ \\ _ _ _ _ -> pure $ TcPluginOk [] [] @ if your plugin -- does not provide this functionality. , tcPluginRewrite :: s -> UniqFM TyCon TcPluginRewriter -- ^ Rewrite saturated type family applications. -- -- The plugin is expected to supply a mapping from type family names to -- rewriting functions. For each type family 'TyCon', the plugin should -- provide a function which takes in the given constraints and arguments -- of a saturated type family application, and return a possible rewriting. -- See 'TcPluginRewriter' for the expected shape of such a function. -- -- Use @ \\ _ -> emptyUFM @ if your plugin does not provide this functionality. , tcPluginStop :: s -> TcPluginM () -- ^ Clean up after the plugin, when exiting the type-checker. } -- | The plugin found a contradiction. -- The returned constraints are removed from the inert set, -- and recorded as insoluble. -- -- The returned list of constraints should never be empty. pattern TcPluginContradiction :: [Ct] -> TcPluginSolveResult pattern TcPluginContradiction insols = TcPluginSolveResult { tcPluginInsolubleCts = insols , tcPluginSolvedCts = [] , tcPluginNewCts = [] } -- | The plugin has not found any contradictions, -- -- The first field is for constraints that were solved. -- The second field contains new work, that should be processed by -- the constraint solver. pattern TcPluginOk :: [(EvTerm, Ct)] -> [Ct] -> TcPluginSolveResult pattern TcPluginOk solved new = TcPluginSolveResult { tcPluginInsolubleCts = [] , tcPluginSolvedCts = solved , tcPluginNewCts = new } -- | Result of running a solver plugin. data TcPluginSolveResult = TcPluginSolveResult { -- | Insoluble constraints found by the plugin. -- -- These constraints will be added to the inert set, -- and reported as insoluble to the user. tcPluginInsolubleCts :: [Ct] -- | Solved constraints, together with their evidence. -- -- These are removed from the inert set, and the -- evidence for them is recorded. , tcPluginSolvedCts :: [(EvTerm, Ct)] -- | New constraints that the plugin wishes to emit. -- -- These will be added to the work list. , tcPluginNewCts :: [Ct] } data TcPluginRewriteResult = -- | The plugin does not rewrite the type family application. TcPluginNoRewrite -- | The plugin rewrites the type family application -- providing a rewriting together with evidence: a 'Reduction', -- which contains the rewritten type together with a 'Coercion' -- whose right-hand-side type is the rewritten type. -- -- The plugin can also emit additional Wanted constraints. | TcPluginRewriteTo { tcPluginReduction :: !Reduction , tcRewriterNewWanteds :: [Ct] } -- | A collection of candidate default types for sets of type variables. data DefaultingProposal = DefaultingProposal { deProposals :: [[(TcTyVar, Type)]] -- ^ The type variable assignments to try. , deProposalCts :: [Ct] -- ^ The constraints against which defaults are checked. } instance Outputable DefaultingProposal where ppr p = text "DefaultingProposal" <+> ppr (deProposals p) <+> ppr (deProposalCts p) type FillDefaulting = WantedConstraints -- Zonked constraints containing the unfilled metavariables that -- can be defaulted. See wrinkle (DP1) of Note [Defaulting plugins] -- in GHC.Tc.Solver -> TcPluginM [DefaultingProposal] -- | A plugin for controlling defaulting. data DefaultingPlugin = forall s. DefaultingPlugin { dePluginInit :: TcPluginM s -- ^ Initialize plugin, when entering type-checker. , dePluginRun :: s -> FillDefaulting -- ^ Default some types , dePluginStop :: s -> TcPluginM () -- ^ Clean up after the plugin, when exiting the type-checker. } {- ********************************************************************* * * Role annotations * * ********************************************************************* -} type RoleAnnotEnv = NameEnv (LRoleAnnotDecl GhcRn) mkRoleAnnotEnv :: [LRoleAnnotDecl GhcRn] -> RoleAnnotEnv mkRoleAnnotEnv role_annot_decls = mkNameEnv [ (name, ra_decl) | ra_decl <- role_annot_decls , let name = roleAnnotDeclName (unLoc ra_decl) , not (isUnboundName name) ] -- Some of the role annots will be unbound; -- we don't wish to include these emptyRoleAnnotEnv :: RoleAnnotEnv emptyRoleAnnotEnv = emptyNameEnv lookupRoleAnnot :: RoleAnnotEnv -> Name -> Maybe (LRoleAnnotDecl GhcRn) lookupRoleAnnot = lookupNameEnv getRoleAnnots :: [Name] -> RoleAnnotEnv -> [LRoleAnnotDecl GhcRn] getRoleAnnots bndrs role_env = mapMaybe (lookupRoleAnnot role_env) bndrs {- ********************************************************************* * * Linting a TcGblEnv * * ********************************************************************* -} -- | Check the 'TcGblEnv' for consistency. Currently, only checks -- axioms, but should check other aspects, too. lintGblEnv :: Logger -> DynFlags -> TcGblEnv -> TcM () lintGblEnv logger dflags tcg_env = -- TODO empty list means no extra in scope from GHCi, is this correct? liftIO $ lintAxioms logger (initLintConfig dflags []) (text "TcGblEnv axioms") axioms where axioms = typeEnvCoAxioms (tcg_type_env tcg_env) -- | This is a mirror of Template Haskell's DocLoc, but the TH names are -- resolved to GHC names. data DocLoc = DeclDoc Name | ArgDoc Name Int | InstDoc Name | ModuleDoc deriving (Eq, Ord) -- | The current collection of docs that Template Haskell has built up via -- putDoc. type THDocs = Map DocLoc (HsDoc GhcRn) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Tc/Types/0000755000000000000000000000000007346545000017360 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Tc/Types/BasicTypes.hs0000644000000000000000000004745407346545000022000 0ustar0000000000000000module GHC.Tc.Types.BasicTypes ( -- * TcBinder TcBinderStack , TcId , TcBinder(..) -- * Signatures , TcSigFun, TcSigInfo(..), TcIdSig(..) , TcCompleteSig(..), TcPartialSig(..), TcPatSynSig(..) , TcIdSigInst(..) , isPartialSig, hasCompleteSig , tcSigInfoName, tcIdSigLoc, completeSigPolyId_maybe -- * TcTyThing , TcTyThing(..) , IdBindingInfo(..) , IsGroupClosed(..) , RhsNames , ClosedTypeId , tcTyThingCategory , tcTyThingTyCon_maybe , pprTcTyThingCategory ) where import GHC.Prelude import GHC.Tc.Types.Origin( UserTypeCtxt ) import GHC.Tc.Utils.TcType import GHC.Types.Id import GHC.Types.Basic import GHC.Types.Var import GHC.Types.SrcLoc import GHC.Types.Name import GHC.Types.TyThing import GHC.Types.Name.Env import GHC.Types.Name.Set import GHC.Hs.Extension ( GhcRn ) import Language.Haskell.Syntax.Type ( LHsSigWcType ) import GHC.Tc.Errors.Types.PromotionErr (PromotionErr, peCategory) import GHC.Core.TyCon ( TyCon, tyConKind ) import GHC.Utils.Outputable import GHC.Utils.Misc --------------------------- -- The TcBinderStack --------------------------- type TcBinderStack = [TcBinder] type TcId = Id -- This is a stack of locally-bound ids and tyvars, -- innermost on top -- Used only in error reporting (relevantBindings in TcError), -- and in tidying -- We can't use the tcl_env type environment, because it doesn't -- keep track of the nesting order data TcBinder = TcIdBndr TcId TopLevelFlag -- Tells whether the binding is syntactically top-level -- (The monomorphic Ids for a recursive group count -- as not-top-level for this purpose.) | TcIdBndr_ExpType -- Variant that allows the type to be specified as -- an ExpType Name ExpType TopLevelFlag | TcTvBndr -- e.g. case x of P (y::a) -> blah Name -- We bind the lexical name "a" to the type of y, TyVar -- which might be an utterly different (perhaps -- existential) tyvar instance Outputable TcBinder where ppr (TcIdBndr id top_lvl) = ppr id <> brackets (ppr top_lvl) ppr (TcIdBndr_ExpType id _ top_lvl) = ppr id <> brackets (ppr top_lvl) ppr (TcTvBndr name tv) = ppr name <+> ppr tv instance HasOccName TcBinder where occName (TcIdBndr id _) = occName (idName id) occName (TcIdBndr_ExpType name _ _) = occName name occName (TcTvBndr name _) = occName name {- ********************************************************************* * * Type signatures * * ********************************************************************* -} -- These data types need to be here only because -- GHC.Tc.Solver uses them, and GHC.Tc.Solver is fairly -- low down in the module hierarchy type TcSigFun = Name -> Maybe TcSigInfo -- TcSigInfo is simply the range of TcSigFun data TcSigInfo = TcIdSig TcIdSig | TcPatSynSig TcPatSynSig -- For a pattern synonym -- See Note [Complete and partial type signatures] data TcIdSig -- For an Id = TcCompleteSig TcCompleteSig | TcPartialSig TcPartialSig data TcCompleteSig -- A complete signature with no wildcards, -- so the complete polymorphic type is known. = CSig { sig_bndr :: TcId -- The polymorphic Id with that type , sig_ctxt :: UserTypeCtxt -- In the case of type-class default methods, -- the Name in the FunSigCtxt is not the same -- as the TcId; the former is 'op', while the -- latter is '$dmop' or some such , sig_loc :: SrcSpan -- Location of the type signature } data TcPartialSig -- A partial type signature (i.e. includes one or more -- wildcards). In this case it doesn't make sense to give -- the polymorphic Id, because we are going to /infer/ its -- type, so we can't make the polymorphic Id ab-initio = PSig { psig_name :: Name -- Name of the function; used when report wildcards , psig_hs_ty :: LHsSigWcType GhcRn -- The original partial signature in -- HsSyn form , psig_ctxt :: UserTypeCtxt , psig_loc :: SrcSpan -- Location of the type signature } data TcPatSynSig = PatSig { patsig_name :: Name, patsig_implicit_bndrs :: [InvisTVBinder], -- Implicitly-bound kind vars (Inferred) and -- implicitly-bound type vars (Specified) -- See Note [The pattern-synonym signature splitting rule] in GHC.Tc.TyCl.PatSyn patsig_univ_bndrs :: [InvisTVBinder], -- Bound by explicit user forall patsig_req :: TcThetaType, patsig_ex_bndrs :: [InvisTVBinder], -- Bound by explicit user forall patsig_prov :: TcThetaType, patsig_body_ty :: TcSigmaType } {- Note [Complete and partial type signatures] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A type signature is partial when it contains one or more wildcards (= type holes). The wildcard can either be: * A (type) wildcard occurring in sig_theta or sig_tau. These are stored in sig_wcs. f :: Bool -> _ g :: Eq _a => _a -> _a -> Bool * Or an extra-constraints wildcard, stored in sig_cts: h :: (Num a, _) => a -> a A type signature is a complete type signature when there are no wildcards in the type signature, i.e. iff sig_wcs is empty and sig_extra_cts is Nothing. -} data TcIdSigInst = TISI { sig_inst_sig :: TcIdSig , sig_inst_skols :: [(Name, InvisTVBinder)] -- Instantiated type and kind variables, TyVarTvs -- The Name is the Name that the renamer chose; -- but the TcTyVar may come from instantiating -- the type and hence have a different unique. -- No need to keep track of whether they are truly lexically -- scoped because the renamer has named them uniquely -- See Note [Binding scoped type variables] in GHC.Tc.Gen.Sig -- -- NB: The order of sig_inst_skols is irrelevant -- for a CompleteSig, but for a PartialSig see -- Note [Quantified variables in partial type signatures] , sig_inst_theta :: TcThetaType -- Instantiated theta. In the case of a -- PartialSig, sig_theta does not include -- the extra-constraints wildcard , sig_inst_tau :: TcSigmaType -- Instantiated tau -- See Note [sig_inst_tau may be polymorphic] -- Relevant for partial signature only , sig_inst_wcs :: [(Name, TcTyVar)] -- Like sig_inst_skols, but for /named/ wildcards (_a etc). -- The named wildcards scope over the binding, and hence -- their Names may appear in type signatures in the binding , sig_inst_wcx :: Maybe TcType -- Extra-constraints wildcard to fill in, if any -- If this exists, it is surely of the form (meta_tv |> co) -- (where the co might be reflexive). This is filled in -- only from the return value of GHC.Tc.Gen.HsType.tcAnonWildCardOcc } {- Note [sig_inst_tau may be polymorphic] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Note that "sig_inst_tau" might actually be a polymorphic type, if the original function had a signature like forall a. Eq a => forall b. Ord b => .... But that's ok: tcFunBindMatches (called by tcRhs) can deal with that It happens, too! See Note [Polymorphic methods] in GHC.Tc.TyCl.Class. Note [Quantified variables in partial type signatures] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider f :: forall a b. _ -> a -> _ -> b f (x,y) p q = q Then we expect f's final type to be f :: forall {x,y}. forall a b. (x,y) -> a -> b -> b Note that x,y are Inferred, and can't be use for visible type application (VTA). But a,b are Specified, and remain Specified in the final type, so we can use VTA for them. (Exception: if it turns out that a's kind mentions b we need to reorder them with scopedSort.) The sig_inst_skols of the TISI from a partial signature records that original order, and is used to get the variables of f's final type in the correct order. Note [Wildcards in partial signatures] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The wildcards in psig_wcs may stand for a type mentioning the universally-quantified tyvars of psig_ty E.g. f :: forall a. _ -> a f x = x We get sig_inst_skols = [a] sig_inst_tau = _22 -> a sig_inst_wcs = [_22] and _22 in the end is unified with the type 'a' Moreover the kind of a wildcard in sig_inst_wcs may mention the universally-quantified tyvars sig_inst_skols e.g. f :: t a -> t _ Here we get sig_inst_skols = [k:*, (t::k ->*), (a::k)] sig_inst_tau = t a -> t _22 sig_inst_wcs = [ _22::k ] -} instance Outputable TcSigInfo where ppr (TcIdSig sig) = ppr sig ppr (TcPatSynSig sig) = ppr sig instance Outputable TcIdSig where ppr (TcCompleteSig sig) = ppr sig ppr (TcPartialSig sig) = ppr sig instance Outputable TcCompleteSig where ppr (CSig { sig_bndr = bndr }) = ppr bndr <+> dcolon <+> ppr (idType bndr) instance Outputable TcPartialSig where ppr (PSig { psig_name = name, psig_hs_ty = hs_ty }) = text "[partial signature]" <+> ppr name <+> dcolon <+> ppr hs_ty instance Outputable TcPatSynSig where ppr (PatSig { patsig_name = name}) = ppr name instance Outputable TcIdSigInst where ppr (TISI { sig_inst_sig = sig, sig_inst_skols = skols , sig_inst_theta = theta, sig_inst_tau = tau }) = hang (ppr sig) 2 (vcat [ ppr skols, ppr theta <+> darrow <+> ppr tau ]) isPartialSig :: TcIdSigInst -> Bool isPartialSig (TISI { sig_inst_sig = TcPartialSig {} }) = True isPartialSig _ = False -- | No signature or a partial signature hasCompleteSig :: TcSigFun -> Name -> Bool hasCompleteSig sig_fn name = case sig_fn name of Just (TcIdSig (TcCompleteSig {})) -> True _ -> False tcSigInfoName :: TcSigInfo -> Name tcSigInfoName (TcIdSig (TcCompleteSig sig)) = idName (sig_bndr sig) tcSigInfoName (TcIdSig (TcPartialSig sig)) = psig_name sig tcSigInfoName (TcPatSynSig sig) = patsig_name sig tcIdSigLoc :: TcIdSig -> SrcSpan tcIdSigLoc (TcCompleteSig sig) = sig_loc sig tcIdSigLoc (TcPartialSig sig) = psig_loc sig completeSigPolyId_maybe :: TcSigInfo -> Maybe TcId completeSigPolyId_maybe (TcIdSig (TcCompleteSig sig)) = Just (sig_bndr sig) completeSigPolyId_maybe _ = Nothing {- ********************************************************************* * * TcTyThing * * ********************************************************************* -} -- | A typecheckable thing available in a local context. Could be -- 'AGlobal' 'TyThing', but also lexically scoped variables, etc. -- See "GHC.Tc.Utils.Env" for how to retrieve a 'TyThing' given a 'Name'. data TcTyThing = AGlobal TyThing -- Used only in the return type of a lookup | ATcId -- Ids defined in this module; may not be fully zonked { tct_id :: Id , tct_info :: IdBindingInfo -- See Note [Meaning of IdBindingInfo] } | ATyVar Name TcTyVar -- See Note [Type variables in the type environment] | ATcTyCon TyCon -- Used temporarily, during kind checking, for the -- tycons and classes in this recursive group -- The TyCon is always a TcTyCon. Its kind -- can be a mono-kind or a poly-kind; in TcTyClsDcls see -- Note [Type checking recursive type and class declarations] | APromotionErr PromotionErr -- | Matches on either a global 'TyCon' or a 'TcTyCon'. tcTyThingTyCon_maybe :: TcTyThing -> Maybe TyCon tcTyThingTyCon_maybe (AGlobal (ATyCon tc)) = Just tc tcTyThingTyCon_maybe (ATcTyCon tc_tc) = Just tc_tc tcTyThingTyCon_maybe _ = Nothing instance Outputable TcTyThing where -- Debugging only ppr (AGlobal g) = ppr g ppr elt@(ATcId {}) = text "Identifier" <> brackets (ppr (tct_id elt) <> dcolon <> ppr (varType (tct_id elt)) <> comma <+> ppr (tct_info elt)) ppr (ATyVar n tv) = text "Type variable" <+> quotes (ppr n) <+> equals <+> ppr tv <+> dcolon <+> ppr (varType tv) ppr (ATcTyCon tc) = text "ATcTyCon" <+> ppr tc <+> dcolon <+> ppr (tyConKind tc) ppr (APromotionErr err) = text "APromotionErr" <+> ppr err -- | IdBindingInfo describes how an Id is bound. -- -- It is used for the following purposes: -- a) for static forms in 'GHC.Tc.Gen.Expr.checkClosedInStaticForm' and -- b) to figure out when a nested binding can be generalised, -- in 'GHC.Tc.Gen.Bind.decideGeneralisationPlan'. -- data IdBindingInfo -- See Note [Meaning of IdBindingInfo] = NotLetBound | ClosedLet | NonClosedLet RhsNames -- Used for (static e) checks only ClosedTypeId -- Used for generalisation checks -- and for (static e) checks -- | IsGroupClosed describes a group of mutually-recursive bindings data IsGroupClosed = IsGroupClosed (NameEnv RhsNames) -- Free var info for the RHS of each binding in the group -- Used only for (static e) checks ClosedTypeId -- True <=> all the free vars of the group are -- imported or ClosedLet or -- NonClosedLet with ClosedTypeId=True. -- In particular, no tyvars, no NotLetBound type RhsNames = NameSet -- Names of variables, mentioned on the RHS of -- a definition, that are not Global or ClosedLet type ClosedTypeId = Bool -- See Note [Meaning of IdBindingInfo] {- Note [Meaning of IdBindingInfo] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ NotLetBound means that the Id is not let-bound (e.g. it is bound in a lambda-abstraction or in a case pattern) ClosedLet means that - The Id is let-bound, - Any free term variables are also Global or ClosedLet - Its type has no free variables (NB: a top-level binding subject to the MR might have free vars in its type) These ClosedLets can definitely be floated to top level; and we may need to do so for static forms. Property: ClosedLet is equivalent to NonClosedLet emptyNameSet True (NonClosedLet (fvs::RhsNames) (cl::ClosedTypeId)) means that - The Id is let-bound - The fvs::RhsNames contains the free names of the RHS, excluding Global and ClosedLet ones. - For the ClosedTypeId field see Note [Bindings with closed types: ClosedTypeId] For (static e) to be valid, we need for every 'x' free in 'e', that x's binding is floatable to the top level. Specifically: * x's RhsNames must be empty * x's type has no free variables See Note [Grand plan for static forms] in "GHC.Iface.Tidy.StaticPtrTable". This test is made in GHC.Tc.Gen.Expr.checkClosedInStaticForm. Actually knowing x's RhsNames (rather than just its emptiness or otherwise) is just so we can produce better error messages Note [Bindings with closed types: ClosedTypeId] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider f x = let g ys = map not ys in ... Can we generalise 'g' under the OutsideIn algorithm? Yes, because all g's free variables are top-level; that is they themselves have no free type variables, and it is the type variables in the environment that makes things tricky for OutsideIn generalisation. Here's the invariant: If an Id has ClosedTypeId=True (in its IdBindingInfo), then the Id's type is /definitely/ closed (has no free type variables). Specifically, a) The Id's actual type is closed (has no free tyvars) b) Either the Id has a (closed) user-supplied type signature or all its free variables are Global/ClosedLet or NonClosedLet with ClosedTypeId=True. In particular, none are NotLetBound. Why is (b) needed? Consider \x. (x :: Int, let y = x+1 in ...) Initially x::alpha. If we happen to typecheck the 'let' before the (x::Int), y's type will have a free tyvar; but if the other way round it won't. So we treat any let-bound variable with a free non-let-bound variable as not ClosedTypeId, regardless of what the free vars of its type actually are. But if it has a signature, all is well: \x. ...(let { y::Int; y = x+1 } in let { v = y+2 } in ...)... Here the signature on 'v' makes 'y' a ClosedTypeId, so we can generalise 'v'. Note that: * A top-level binding may not have ClosedTypeId=True, if it suffers from the MR * A nested binding may be closed (eg 'g' in the example we started with). Indeed, that's the point; whether a function is defined at top level or nested is orthogonal to the question of whether or not it is closed. * A binding may be non-closed because it mentions a lexically scoped *type variable* Eg f :: forall a. blah f x = let g y = ...(y::a)... Under OutsideIn we are free to generalise an Id all of whose free variables have ClosedTypeId=True (or imported). This is an extension compared to the JFP paper on OutsideIn, which used "top-level" as a proxy for "closed". (It's not a good proxy anyway -- the MR can make a top-level binding with a free type variable.) Note [Type variables in the type environment] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The type environment has a binding for each lexically-scoped type variable that is in scope. For example f :: forall a. a -> a f x = (x :: a) g1 :: [a] -> a g1 (ys :: [b]) = head ys :: b g2 :: [Int] -> Int g2 (ys :: [c]) = head ys :: c * The forall'd variable 'a' in the signature scopes over f's RHS. * The pattern-bound type variable 'b' in 'g1' scopes over g1's RHS; note that it is bound to a skolem 'a' which is not itself lexically in scope. * The pattern-bound type variable 'c' in 'g2' is bound to Int; that is, pattern-bound type variables can stand for arbitrary types. (see GHC proposal #128 "Allow ScopedTypeVariables to refer to types" https://github.com/ghc-proposals/ghc-proposals/pull/128, and the paper "Type variables in patterns", Haskell Symposium 2018. This is implemented by the constructor ATyVar Name TcTyVar in the type environment. * The Name is the name of the original, lexically scoped type variable * The TcTyVar is sometimes a skolem (like in 'f'), and sometimes a unification variable (like in 'g1', 'g2'). We never zonk the type environment so in the latter case it always stays as a unification variable, although that variable may be later unified with a type (such as Int in 'g2'). -} instance Outputable IdBindingInfo where ppr NotLetBound = text "NotLetBound" ppr ClosedLet = text "TopLevelLet" ppr (NonClosedLet fvs closed_type) = text "TopLevelLet" <+> ppr fvs <+> ppr closed_type -------------- pprTcTyThingCategory :: TcTyThing -> SDoc pprTcTyThingCategory = text . capitalise . tcTyThingCategory tcTyThingCategory :: TcTyThing -> String tcTyThingCategory (AGlobal thing) = tyThingCategory thing tcTyThingCategory (ATyVar {}) = "type variable" tcTyThingCategory (ATcId {}) = "local identifier" tcTyThingCategory (ATcTyCon {}) = "local tycon" tcTyThingCategory (APromotionErr pe) = peCategory pe ghc-lib-parser-9.12.2.20250421/compiler/GHC/Tc/Types/Constraint.hs0000644000000000000000000030076207346545000022050 0ustar0000000000000000 {-# LANGUAGE DerivingStrategies #-} {-# LANGUAGE GeneralizedNewtypeDeriving #-} {-# LANGUAGE TypeApplications #-} -- | This module defines types and simple operations over constraints, as used -- in the type-checker and constraint solver. module GHC.Tc.Types.Constraint ( -- Constraints Xi, Ct(..), Cts, singleCt, listToCts, ctsElts, consCts, snocCts, extendCtsList, isEmptyCts, emptyCts, andCts, ctsPreds, isPendingScDictCt, isPendingScDict, pendingScDict_maybe, superClassesMightHelp, getPendingWantedScs, isWantedCt, isGivenCt, isTopLevelUserTypeError, containsUserTypeError, getUserTypeErrorMsg, isUnsatisfiableCt_maybe, ctEvidence, updCtEvidence, ctLoc, ctPred, ctFlavour, ctEqRel, ctOrigin, ctRewriters, ctEvId, wantedEvId_maybe, mkTcEqPredLikeEv, mkNonCanonical, mkGivens, tyCoVarsOfCt, tyCoVarsOfCts, tyCoVarsOfCtList, tyCoVarsOfCtsList, boundOccNamesOfWC, -- Particular forms of constraint EqCt(..), eqCtEvidence, eqCtLHS, DictCt(..), dictCtEvidence, dictCtPred, IrredCt(..), irredCtEvidence, mkIrredCt, ctIrredCt, irredCtPred, -- QCInst QCInst(..), pendingScInst_maybe, ExpansionFuel, doNotExpand, consumeFuel, pendingFuel, assertFuelPrecondition, assertFuelPreconditionStrict, CtIrredReason(..), isInsolubleReason, CheckTyEqResult, CheckTyEqProblem, cteProblem, cterClearOccursCheck, cteOK, cteImpredicative, cteTypeFamily, cteCoercionHole, cteInsolubleOccurs, cteSolubleOccurs, cterSetOccursCheckSoluble, cteConcrete, cteSkolemEscape, impredicativeProblem, insolubleOccursProblem, solubleOccursProblem, cterHasNoProblem, cterHasProblem, cterHasOnlyProblem, cterHasOnlyProblems, cterRemoveProblem, cterHasOccursCheck, cterFromKind, CanEqLHS(..), canEqLHS_maybe, canTyFamEqLHS_maybe, canEqLHSKind, canEqLHSType, eqCanEqLHS, Hole(..), HoleSort(..), isOutOfScopeHole, DelayedError(..), NotConcreteError(..), NotConcreteReason(..), WantedConstraints(..), insolubleWC, emptyWC, isEmptyWC, isSolvedWC, andWC, unionsWC, mkSimpleWC, mkImplicWC, addInsols, dropMisleading, addSimples, addImplics, addHoles, addNotConcreteError, addDelayedErrors, tyCoVarsOfWC, tyCoVarsOfWCList, insolubleWantedCt, insolubleCt, insolubleIrredCt, insolubleImplic, nonDefaultableTyVarsOfWC, Implication(..), implicationPrototype, checkTelescopeSkol, ImplicStatus(..), isInsolubleStatus, isSolvedStatus, UserGiven, getUserGivensFromImplics, HasGivenEqs(..), checkImplicationInvariants, -- CtLocEnv CtLocEnv(..), setCtLocEnvLoc, setCtLocEnvLvl, getCtLocEnvLoc, getCtLocEnvLvl, ctLocEnvInGeneratedCode, -- CtEvidence CtEvidence(..), TcEvDest(..), isWanted, isGiven, ctEvPred, ctEvLoc, ctEvOrigin, ctEvEqRel, ctEvExpr, ctEvTerm, ctEvCoercion, ctEvEvId, ctEvRewriters, ctEvUnique, tcEvDestUnique, ctEvRewriteRole, ctEvRewriteEqRel, setCtEvPredType, setCtEvLoc, tyCoVarsOfCtEvList, tyCoVarsOfCtEv, tyCoVarsOfCtEvsList, -- RewriterSet RewriterSet(..), emptyRewriterSet, isEmptyRewriterSet, -- exported concretely only for zonkRewriterSet addRewriter, unitRewriterSet, unionRewriterSet, rewriterSetFromCts, wrapType, CtFlavour(..), ctEvFlavour, CtFlavourRole, ctEvFlavourRole, ctFlavourRole, eqCtFlavourRole, eqCanRewrite, eqCanRewriteFR, -- Pretty printing pprEvVarTheta, pprEvVars, pprEvVarWithType, ) where import GHC.Prelude import GHC.Core.Predicate import GHC.Core.Type import GHC.Core.Coercion import GHC.Core.Class import GHC.Core.TyCon import GHC.Types.Name import GHC.Types.Var import GHC.Tc.Utils.TcType import GHC.Tc.Types.Evidence import GHC.Tc.Types.Origin import GHC.Tc.Types.CtLoc import GHC.Core import GHC.Core.TyCo.Ppr import GHC.Utils.FV import GHC.Types.Var.Set import GHC.Builtin.Names import GHC.Types.Unique.Set import GHC.Utils.Outputable import GHC.Data.Bag import GHC.Utils.Misc import GHC.Utils.Panic import GHC.Utils.Constants (debugIsOn) import GHC.Types.Name.Reader import Data.Coerce import qualified Data.Semigroup as S import Control.Monad ( msum, when ) import Data.Maybe ( mapMaybe, isJust ) import Data.List.NonEmpty ( NonEmpty ) -- these are for CheckTyEqResult import Data.Word ( Word8 ) import Data.List ( intersperse ) {- ************************************************************************ * * * Canonical constraints * * * * These are the constraints the low-level simplifier works with * * * ************************************************************************ -} -- | A 'Xi'-type is one that has been fully rewritten with respect -- to the inert set; that is, it has been rewritten by the algorithm -- in GHC.Tc.Solver.Rewrite. (Historical note: 'Xi', for years and years, -- meant that a type was type-family-free. It does *not* mean this -- any more.) type Xi = TcType type Cts = Bag Ct -- | Says how many layers of superclasses can we expand. -- Invariant: ExpansionFuel should always be >= 0 -- see Note [Expanding Recursive Superclasses and ExpansionFuel] type ExpansionFuel = Int -- | Do not expand superclasses any further doNotExpand :: ExpansionFuel doNotExpand = 0 -- | Consumes one unit of fuel. -- Precondition: fuel > 0 consumeFuel :: ExpansionFuel -> ExpansionFuel consumeFuel fuel = assertFuelPreconditionStrict fuel $ fuel - 1 -- | Returns True if we have any fuel left for superclass expansion pendingFuel :: ExpansionFuel -> Bool pendingFuel n = n > 0 insufficientFuelError :: SDoc insufficientFuelError = text "Superclass expansion fuel should be > 0" -- | asserts if fuel is non-negative assertFuelPrecondition :: ExpansionFuel -> a -> a {-# INLINE assertFuelPrecondition #-} assertFuelPrecondition fuel = assertPpr (fuel >= 0) insufficientFuelError -- | asserts if fuel is strictly greater than 0 assertFuelPreconditionStrict :: ExpansionFuel -> a -> a {-# INLINE assertFuelPreconditionStrict #-} assertFuelPreconditionStrict fuel = assertPpr (pendingFuel fuel) insufficientFuelError data Ct = CDictCan DictCt | CIrredCan IrredCt -- A "irreducible" constraint (non-canonical) | CEqCan EqCt -- A canonical equality constraint | CQuantCan QCInst -- A quantified constraint | CNonCanonical CtEvidence -- See Note [NonCanonical Semantics] in GHC.Tc.Solver.Monad --------------- DictCt -------------- data DictCt -- e.g. Num ty = DictCt { di_ev :: CtEvidence -- See Note [Ct/evidence invariant] , di_cls :: Class , di_tys :: [Xi] -- di_tys are rewritten w.r.t. inerts, so Xi , di_pend_sc :: ExpansionFuel -- See Note [The superclass story] in GHC.Tc.Solver.Dict -- See Note [Expanding Recursive Superclasses and ExpansionFuel] in GHC.Tc.Solver -- Invariants: di_pend_sc > 0 <=> -- (a) di_cls has superclasses -- (b) those superclasses are not yet explored } dictCtEvidence :: DictCt -> CtEvidence dictCtEvidence = di_ev dictCtPred :: DictCt -> TcPredType dictCtPred (DictCt { di_cls = cls, di_tys = tys }) = mkClassPred cls tys instance Outputable DictCt where ppr dict = ppr (CDictCan dict) --------------- EqCt -------------- {- Note [Canonical equalities] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ An EqCt is a canonical equality constraint, one that can live in the inert set, and that can be used to rewrite other constrtaints. It satisfies these invariants: * (TyEq:OC) lhs does not occur in rhs (occurs check) Note [EqCt occurs check] * (TyEq:F) rhs has no foralls (this avoids substituting a forall for the tyvar in other types) * (TyEq:K) typeKind lhs `tcEqKind` typeKind rhs; Note [Ct kind invariant] * (TyEq:N) If the equality is representational, rhs is not headed by a saturated application of a newtype TyCon. See GHC.Tc.Solver.Equality Note [No top-level newtypes on RHS of representational equalities]. (Applies only when constructor of newtype is in scope.) * (TyEq:U) An EqCt is not immediately unifiable. If we can unify a:=ty, we will not form an EqCt (a ~ ty). * (TyEq:CH) rhs does not mention any coercion holes that resulted from fixing up a hetero-kinded equality. See Note [Equalities with incompatible kinds] in GHC.Tc.Solver.Equality, wrinkle (EIK2) These invariants ensure that the EqCts in inert_eqs constitute a terminating generalised substitution. See Note [inert_eqs: the inert equalities] in GHC.Tc.Solver.InertSet for what these words mean! Note [EqCt occurs check] ~~~~~~~~~~~~~~~~~~~~~~~~~~ A CEqCan relates a CanEqLHS (a type variable or type family applications) on its left to an arbitrary type on its right. It is used for rewriting. Because it is used for rewriting, it would be disastrous if the RHS were to mention the LHS: this would cause a loop in rewriting. We thus perform an occurs-check. There is, of course, some subtlety: * For type variables, the occurs-check looks deeply including kinds of type variables. This is because a CEqCan over a meta-variable is also used to inform unification, in GHC.Tc.Solver.Monad.checkTouchableTyVarEq. If the LHS appears anywhere in the RHS, at all, unification will create an infinite structure which is bad. * For type family applications, the occurs-check is shallow; it looks only in places where we might rewrite. (Specifically, it does not look in kinds or coercions.) An occurrence of the LHS in, say, an RHS coercion is OK, as we do not rewrite in coercions. No loop to be found. You might also worry about the possibility that a type family application LHS doesn't exactly appear in the RHS, but something that reduces to the LHS does. Yet that can't happen: the RHS is already inert, with all type family redexes reduced. So a simple syntactic check is just fine. The occurs check is performed in GHC.Tc.Utils.Unify.checkTyEqRhs and forms condition T3 in Note [Extending the inert equalities] in GHC.Tc.Solver.InertSet. -} data EqCt -- An equality constraint; see Note [Canonical equalities] = EqCt { -- CanEqLHS ~ rhs eq_ev :: CtEvidence, -- See Note [Ct/evidence invariant] eq_lhs :: CanEqLHS, eq_rhs :: Xi, -- See invariants above eq_eq_rel :: EqRel -- INVARIANT: cc_eq_rel = ctEvEqRel cc_ev } -- | A 'CanEqLHS' is a type that can appear on the left of a canonical -- equality: a type variable or /exactly-saturated/ type family application. data CanEqLHS = TyVarLHS TcTyVar | TyFamLHS TyCon -- ^ TyCon of the family [Xi] -- ^ Arguments, /exactly saturating/ the family instance Outputable CanEqLHS where ppr (TyVarLHS tv) = ppr tv ppr (TyFamLHS fam_tc fam_args) = ppr (mkTyConApp fam_tc fam_args) eqCtEvidence :: EqCt -> CtEvidence eqCtEvidence = eq_ev eqCtLHS :: EqCt -> CanEqLHS eqCtLHS = eq_lhs --------------- IrredCt -------------- data IrredCt -- These stand for yet-unusable predicates -- See Note [CIrredCan constraints] = IrredCt { ir_ev :: CtEvidence -- See Note [Ct/evidence invariant] , ir_reason :: CtIrredReason } mkIrredCt :: CtIrredReason -> CtEvidence -> Ct mkIrredCt reason ev = CIrredCan (IrredCt { ir_ev = ev, ir_reason = reason }) irredCtEvidence :: IrredCt -> CtEvidence irredCtEvidence = ir_ev irredCtPred :: IrredCt -> PredType irredCtPred = ctEvPred . irredCtEvidence ctIrredCt :: CtIrredReason -> Ct -> IrredCt ctIrredCt _ (CIrredCan ir) = ir ctIrredCt reason ct = IrredCt { ir_ev = ctEvidence ct , ir_reason = reason } instance Outputable IrredCt where ppr irred = ppr (CIrredCan irred) --------------- QCInst -------------- data QCInst -- A much simplified version of ClsInst -- See Note [Quantified constraints] in GHC.Tc.Solver.Solve = QCI { qci_ev :: CtEvidence -- Always of type forall tvs. context => ty -- Always Given , qci_tvs :: [TcTyVar] -- The tvs , qci_pred :: TcPredType -- The ty , qci_pend_sc :: ExpansionFuel -- Invariants: qci_pend_sc > 0 => -- (a) qci_pred is a ClassPred -- (b) this class has superclass(es), and -- (c) the superclass(es) are not explored yet -- Same as di_pend_sc flag in DictCt -- See Note [Expanding Recursive Superclasses and ExpansionFuel] in GHC.Tc.Solver } instance Outputable QCInst where ppr (QCI { qci_ev = ev }) = ppr ev ------------------------------------------------------------------------------ -- -- Holes and other delayed errors -- ------------------------------------------------------------------------------ -- | A delayed error, to be reported after constraint solving, in order to benefit -- from deferred unifications. data DelayedError = DE_Hole Hole -- ^ A hole (in a type or in a term). -- -- See Note [Holes]. | DE_NotConcrete NotConcreteError -- ^ A type could not be ensured to be concrete. -- -- See Note [The Concrete mechanism] in GHC.Tc.Utils.Concrete. instance Outputable DelayedError where ppr (DE_Hole hole) = ppr hole ppr (DE_NotConcrete err) = ppr err -- | A hole stores the information needed to report diagnostics -- about holes in terms (unbound identifiers or underscores) or -- in types (also called wildcards, as used in partial type -- signatures). See Note [Holes]. data Hole = Hole { hole_sort :: HoleSort -- ^ What flavour of hole is this? , hole_occ :: RdrName -- ^ The name of this hole , hole_ty :: TcType -- ^ Type to be printed to the user -- For expression holes: type of expr -- For type holes: the missing type , hole_loc :: CtLoc -- ^ Where hole was written } -- For the hole_loc, we usually only want the TcLclEnv stored within. -- Except when we rewrite, where we need a whole location. And this -- might get reported to the user if reducing type families in a -- hole type loops. -- | Used to indicate which sort of hole we have. data HoleSort = ExprHole HoleExprRef -- ^ Either an out-of-scope variable or a "true" hole in an -- expression (TypedHoles). -- The HoleExprRef says where to write the -- the erroring expression for -fdefer-type-errors. | TypeHole -- ^ A hole in a type (PartialTypeSignatures) | ConstraintHole -- ^ A hole in a constraint, like @f :: (_, Eq a) => ... -- Differentiated from TypeHole because a ConstraintHole -- is simplified differently. See -- Note [Do not simplify ConstraintHoles] in GHC.Tc.Solver. instance Outputable Hole where ppr (Hole { hole_sort = ExprHole ref , hole_occ = occ , hole_ty = ty }) = parens $ (braces $ ppr occ <> colon <> ppr ref) <+> dcolon <+> ppr ty ppr (Hole { hole_sort = _other , hole_occ = occ , hole_ty = ty }) = braces $ ppr occ <> colon <> ppr ty instance Outputable HoleSort where ppr (ExprHole ref) = text "ExprHole:" <+> ppr ref ppr TypeHole = text "TypeHole" ppr ConstraintHole = text "ConstraintHole" -- | Why did we require that a certain type be concrete? data NotConcreteError -- | Concreteness was required by a representation-polymorphism -- check. -- -- See Note [The Concrete mechanism] in GHC.Tc.Utils.Concrete. = NCE_FRR { nce_loc :: CtLoc -- ^ Where did this check take place? , nce_frr_origin :: FixedRuntimeRepOrigin -- ^ Which representation-polymorphism check did we perform? , nce_reasons :: NonEmpty NotConcreteReason -- ^ Why did the check fail? } -- | Why did we decide that a type was not concrete? data NotConcreteReason -- | The type contains a 'TyConApp' of a non-concrete 'TyCon'. -- -- See Note [Concrete types] in GHC.Tc.Utils.Concrete. = NonConcreteTyCon TyCon [TcType] -- | The type contains a type variable that could not be made -- concrete (e.g. a skolem type variable). | NonConcretisableTyVar TyVar -- | The type contains a cast. | ContainsCast TcType TcCoercionN -- | The type contains a forall. | ContainsForall ForAllTyBinder TcType -- | The type contains a 'CoercionTy'. | ContainsCoercionTy TcCoercion instance Outputable NotConcreteError where ppr (NCE_FRR { nce_frr_origin = frr_orig }) = text "NCE_FRR" <+> parens (ppr (frr_type frr_orig)) ------------ -- | Used to indicate extra information about why a CIrredCan is irreducible data CtIrredReason = IrredShapeReason -- ^ This constraint has a non-canonical shape (e.g. @c Int@, for a variable @c@) | NonCanonicalReason CheckTyEqResult -- ^ An equality where some invariant other than (TyEq:H) of 'CEqCan' is not satisfied; -- the 'CheckTyEqResult' states exactly why | ReprEqReason -- ^ An equality that cannot be decomposed because it is representational. -- Example: @a b ~R# Int@. -- These might still be solved later. -- INVARIANT: The constraint is a representational equality constraint | ShapeMismatchReason -- ^ A nominal equality that relates two wholly different types, -- like @Int ~# Bool@ or @a b ~# 3@. -- INVARIANT: The constraint is a nominal equality constraint | AbstractTyConReason -- ^ An equality like @T a b c ~ Q d e@ where either @T@ or @Q@ -- is an abstract type constructor. See Note [Skolem abstract data] -- in GHC.Core.TyCon. -- INVARIANT: The constraint is an equality constraint between two TyConApps | PluginReason -- ^ A typechecker plugin returned this in the pluginBadCts field -- of TcPluginProgress instance Outputable CtIrredReason where ppr IrredShapeReason = text "(irred)" ppr (NonCanonicalReason cter) = ppr cter ppr ReprEqReason = text "(repr)" ppr ShapeMismatchReason = text "(shape)" ppr AbstractTyConReason = text "(abstc)" ppr PluginReason = text "(plugin)" -- | Are we sure that more solving will never solve this constraint? isInsolubleReason :: CtIrredReason -> Bool isInsolubleReason IrredShapeReason = False isInsolubleReason (NonCanonicalReason cter) = cterIsInsoluble cter isInsolubleReason ReprEqReason = False isInsolubleReason ShapeMismatchReason = True isInsolubleReason AbstractTyConReason = True isInsolubleReason PluginReason = True ------------------------------------------------------------------------------ -- -- CheckTyEqResult, defined here because it is stored in a CtIrredReason -- ------------------------------------------------------------------------------ -- | A /set/ of problems in checking the validity of a type equality. -- See 'checkTypeEq'. newtype CheckTyEqResult = CTER Word8 -- | No problems in checking the validity of a type equality. cteOK :: CheckTyEqResult cteOK = CTER zeroBits -- | Check whether a 'CheckTyEqResult' is marked successful. cterHasNoProblem :: CheckTyEqResult -> Bool cterHasNoProblem (CTER 0) = True cterHasNoProblem _ = False -- | An /individual/ problem that might be logged in a 'CheckTyEqResult' newtype CheckTyEqProblem = CTEP Word8 cteImpredicative, cteTypeFamily, cteInsolubleOccurs, cteSolubleOccurs, cteCoercionHole, cteConcrete, cteSkolemEscape :: CheckTyEqProblem cteImpredicative = CTEP (bit 0) -- Forall or (=>) encountered cteTypeFamily = CTEP (bit 1) -- Type family encountered cteInsolubleOccurs = CTEP (bit 2) -- Occurs-check cteSolubleOccurs = CTEP (bit 3) -- Occurs-check under a type function, or in a coercion, -- or in a representational equality; see -- See Note [Occurs check and representational equality] -- cteSolubleOccurs must be one bit to the left of cteInsolubleOccurs -- See also Note [Insoluble mis-match] in GHC.Tc.Errors cteCoercionHole = CTEP (bit 4) -- Coercion hole encountered cteConcrete = CTEP (bit 5) -- Type variable that can't be made concrete -- e.g. alpha[conc] ~ Maybe beta[tv] cteSkolemEscape = CTEP (bit 6) -- Skolem escape e.g. alpha[2] ~ b[sk,4] cteProblem :: CheckTyEqProblem -> CheckTyEqResult cteProblem (CTEP mask) = CTER mask impredicativeProblem, insolubleOccursProblem, solubleOccursProblem :: CheckTyEqResult impredicativeProblem = cteProblem cteImpredicative insolubleOccursProblem = cteProblem cteInsolubleOccurs solubleOccursProblem = cteProblem cteSolubleOccurs occurs_mask :: Word8 occurs_mask = insoluble_mask .|. soluble_mask where CTEP insoluble_mask = cteInsolubleOccurs CTEP soluble_mask = cteSolubleOccurs -- | Check whether a 'CheckTyEqResult' has a 'CheckTyEqProblem' cterHasProblem :: CheckTyEqResult -> CheckTyEqProblem -> Bool CTER bits `cterHasProblem` CTEP mask = (bits .&. mask) /= 0 -- | Check whether a 'CheckTyEqResult' has one 'CheckTyEqProblem' and no other cterHasOnlyProblem :: CheckTyEqResult -> CheckTyEqProblem -> Bool CTER bits `cterHasOnlyProblem` CTEP mask = bits == mask cterHasOnlyProblems :: CheckTyEqResult -> CheckTyEqResult -> Bool CTER bits `cterHasOnlyProblems` CTER mask = (bits .&. complement mask) == 0 cterRemoveProblem :: CheckTyEqResult -> CheckTyEqProblem -> CheckTyEqResult cterRemoveProblem (CTER bits) (CTEP mask) = CTER (bits .&. complement mask) cterHasOccursCheck :: CheckTyEqResult -> Bool cterHasOccursCheck (CTER bits) = (bits .&. occurs_mask) /= 0 cterClearOccursCheck :: CheckTyEqResult -> CheckTyEqResult cterClearOccursCheck (CTER bits) = CTER (bits .&. complement occurs_mask) -- | Mark a 'CheckTyEqResult' as not having an insoluble occurs-check: any occurs -- check under a type family or in a representation equality is soluble. cterSetOccursCheckSoluble :: CheckTyEqResult -> CheckTyEqResult cterSetOccursCheckSoluble (CTER bits) = CTER $ ((bits .&. insoluble_mask) `shift` 1) .|. (bits .&. complement insoluble_mask) where CTEP insoluble_mask = cteInsolubleOccurs -- | Retain only information about occurs-check failures, because only that -- matters after recurring into a kind. cterFromKind :: CheckTyEqResult -> CheckTyEqResult cterFromKind (CTER bits) = CTER (bits .&. occurs_mask) cterIsInsoluble :: CheckTyEqResult -> Bool cterIsInsoluble (CTER bits) = (bits .&. mask) /= 0 where mask = impredicative_mask .|. insoluble_occurs_mask CTEP impredicative_mask = cteImpredicative CTEP insoluble_occurs_mask = cteInsolubleOccurs instance Semigroup CheckTyEqResult where CTER bits1 <> CTER bits2 = CTER (bits1 .|. bits2) instance Monoid CheckTyEqResult where mempty = cteOK instance Eq CheckTyEqProblem where (CTEP b1) == (CTEP b2) = b1==b2 instance Outputable CheckTyEqProblem where ppr prob@(CTEP bits) = case lookup prob allBits of Just s -> text s Nothing -> text "unknown:" <+> ppr bits instance Outputable CheckTyEqResult where ppr cter | cterHasNoProblem cter = text "cteOK" | otherwise = braces $ fcat $ intersperse vbar $ [ text str | (bitmask, str) <- allBits , cter `cterHasProblem` bitmask ] allBits :: [(CheckTyEqProblem, String)] allBits = [ (cteImpredicative, "cteImpredicative") , (cteTypeFamily, "cteTypeFamily") , (cteInsolubleOccurs, "cteInsolubleOccurs") , (cteSolubleOccurs, "cteSolubleOccurs") , (cteConcrete, "cteConcrete") , (cteSkolemEscape, "cteSkolemEscape") , (cteCoercionHole, "cteCoercionHole") ] {- Note [CIrredCan constraints] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CIrredCan constraints are used for constraints that are "stuck" - we can't solve them (yet) - we can't use them to solve other constraints - but they may become soluble if we substitute for some of the type variables in the constraint Example 1: (c Int), where c :: * -> Constraint. We can't do anything with this yet, but if later c := Num, *then* we can solve it Example 2: a ~ b, where a :: *, b :: k, where k is a kind variable We don't want to use this to substitute 'b' for 'a', in case 'k' is subsequently unified with (say) *->*, because then we'd have ill-kinded types floating about. Rather we want to defer using the equality altogether until 'k' get resolved. Note [Ct/evidence invariant] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If ct :: Ct, then extra fields of 'ct' cache precisely the ctev_pred field of (cc_ev ct), and is fully rewritten wrt the substitution. Eg for DictCt, ctev_pred (di_ev ct) = (di_cls ct) (di_tys ct) This holds by construction; look at the unique place where DictCt is built (in GHC.Tc.Solver.Dict.canDictNC). Note [Ct kind invariant] ~~~~~~~~~~~~~~~~~~~~~~~~ CEqCan requires that the kind of the lhs matches the kind of the rhs. This is necessary because these constraints are used for substitutions during solving. If the kinds differed, then the substitution would take a well-kinded type to an ill-kinded one. Note [Holes] ~~~~~~~~~~~~ This Note explains how GHC tracks *holes*. A hole represents one of two conditions: - A missing bit of an expression. Example: foo x = x + _ - A missing bit of a type. Example: bar :: Int -> _ What these have in common is that both cause GHC to emit a diagnostic to the user describing the bit that is left out. When a hole is encountered, a new entry of type Hole is added to the ambient WantedConstraints. The type (hole_ty) of the hole is then simplified during solving (with respect to any Givens in surrounding implications). It is reported with all the other errors in GHC.Tc.Errors. For expression holes, the user has the option of deferring errors until runtime with -fdefer-type-errors. In this case, the hole actually has evidence: this evidence is an erroring expression that prints an error and crashes at runtime. The ExprHole variant of holes stores an IORef EvTerm that will contain this evidence; during constraint generation, this IORef was stored in the HsUnboundVar extension field by the type checker. The desugarer simply dereferences to get the CoreExpr. Prior to fixing #17812, we used to invent an Id to hold the erroring expression, and then bind it during type-checking. But this does not support representation-polymorphic out-of-scope identifiers. See typecheck/should_compile/T17812. We thus use the mutable-CoreExpr approach described above. You might think that the type in the HoleExprRef is the same as the type of the hole. However, because the hole type (hole_ty) is rewritten with respect to givens, this might not be the case. That is, the hole_ty is always (~) to the type of the HoleExprRef, but they might not be `eqType`. We need the type of the generated evidence to match what is expected in the context of the hole, and so we must store these types separately. Type-level holes have no evidence at all. -} mkNonCanonical :: CtEvidence -> Ct mkNonCanonical ev = CNonCanonical ev mkGivens :: CtLoc -> [EvId] -> [Ct] mkGivens loc ev_ids = map mk ev_ids where mk ev_id = mkNonCanonical (CtGiven { ctev_evar = ev_id , ctev_pred = evVarPred ev_id , ctev_loc = loc }) ctEvidence :: Ct -> CtEvidence ctEvidence (CQuantCan (QCI { qci_ev = ev })) = ev ctEvidence (CEqCan (EqCt { eq_ev = ev })) = ev ctEvidence (CIrredCan (IrredCt { ir_ev = ev })) = ev ctEvidence (CNonCanonical ev) = ev ctEvidence (CDictCan (DictCt { di_ev = ev })) = ev updCtEvidence :: (CtEvidence -> CtEvidence) -> Ct -> Ct updCtEvidence upd ct = case ct of CQuantCan qci@(QCI { qci_ev = ev }) -> CQuantCan (qci { qci_ev = upd ev }) CEqCan eq@(EqCt { eq_ev = ev }) -> CEqCan (eq { eq_ev = upd ev }) CIrredCan ir@(IrredCt { ir_ev = ev }) -> CIrredCan (ir { ir_ev = upd ev }) CNonCanonical ev -> CNonCanonical (upd ev) CDictCan di@(DictCt { di_ev = ev }) -> CDictCan (di { di_ev = upd ev }) ctLoc :: Ct -> CtLoc ctLoc = ctEvLoc . ctEvidence ctOrigin :: Ct -> CtOrigin ctOrigin = ctLocOrigin . ctLoc ctPred :: Ct -> PredType -- See Note [Ct/evidence invariant] ctPred ct = ctEvPred (ctEvidence ct) ctRewriters :: Ct -> RewriterSet ctRewriters = ctEvRewriters . ctEvidence ctEvId :: HasDebugCallStack => Ct -> EvVar -- The evidence Id for this Ct ctEvId ct = ctEvEvId (ctEvidence ct) -- | Returns the evidence 'Id' for the argument 'Ct' -- when this 'Ct' is a 'Wanted'. -- -- Returns 'Nothing' otherwise. wantedEvId_maybe :: Ct -> Maybe EvVar wantedEvId_maybe ct = case ctEvidence ct of ctev@(CtWanted {}) | otherwise -> Just $ ctEvEvId ctev CtGiven {} -> Nothing -- | Makes a new equality predicate with the same role as the given -- evidence. mkTcEqPredLikeEv :: CtEvidence -> TcType -> TcType -> TcType mkTcEqPredLikeEv ev = case predTypeEqRel pred of NomEq -> mkPrimEqPred ReprEq -> mkReprPrimEqPred where pred = ctEvPred ev -- | Get the flavour of the given 'Ct' ctFlavour :: Ct -> CtFlavour ctFlavour = ctEvFlavour . ctEvidence -- | Get the equality relation for the given 'Ct' ctEqRel :: Ct -> EqRel ctEqRel = ctEvEqRel . ctEvidence instance Outputable Ct where ppr ct = ppr (ctEvidence ct) <+> parens pp_sort where pp_sort = case ct of CEqCan {} -> text "CEqCan" CNonCanonical {} -> text "CNonCanonical" CDictCan (DictCt { di_pend_sc = psc }) | psc > 0 -> text "CDictCan" <> parens (text "psc" <+> ppr psc) | otherwise -> text "CDictCan" CIrredCan (IrredCt { ir_reason = reason }) -> text "CIrredCan" <> ppr reason CQuantCan (QCI { qci_pend_sc = psc }) | psc > 0 -> text "CQuantCan" <> parens (text "psc" <+> ppr psc) | otherwise -> text "CQuantCan" instance Outputable EqCt where ppr (EqCt { eq_ev = ev }) = ppr ev ----------------------------------- -- | Is a type a canonical LHS? That is, is it a tyvar or an exactly-saturated -- type family application? -- Does not look through type synonyms. canEqLHS_maybe :: Xi -> Maybe CanEqLHS canEqLHS_maybe xi | Just tv <- getTyVar_maybe xi = Just $ TyVarLHS tv | otherwise = canTyFamEqLHS_maybe xi canTyFamEqLHS_maybe :: Xi -> Maybe CanEqLHS canTyFamEqLHS_maybe xi | Just (tc, args) <- tcSplitTyConApp_maybe xi , isTypeFamilyTyCon tc , args `lengthIs` tyConArity tc = Just $ TyFamLHS tc args | otherwise = Nothing -- | Convert a 'CanEqLHS' back into a 'Type' canEqLHSType :: CanEqLHS -> TcType canEqLHSType (TyVarLHS tv) = mkTyVarTy tv canEqLHSType (TyFamLHS fam_tc fam_args) = mkTyConApp fam_tc fam_args -- | Retrieve the kind of a 'CanEqLHS' canEqLHSKind :: CanEqLHS -> TcKind canEqLHSKind (TyVarLHS tv) = tyVarKind tv canEqLHSKind (TyFamLHS fam_tc fam_args) = piResultTys (tyConKind fam_tc) fam_args -- | Are two 'CanEqLHS's equal? eqCanEqLHS :: CanEqLHS -> CanEqLHS -> Bool eqCanEqLHS (TyVarLHS tv1) (TyVarLHS tv2) = tv1 == tv2 eqCanEqLHS (TyFamLHS fam_tc1 fam_args1) (TyFamLHS fam_tc2 fam_args2) = tcEqTyConApps fam_tc1 fam_args1 fam_tc2 fam_args2 eqCanEqLHS _ _ = False {- ************************************************************************ * * Simple functions over evidence variables * * ************************************************************************ -} ---------------- Getting bound tyvars ------------------------- boundOccNamesOfWC :: WantedConstraints -> [OccName] -- Return the OccNames of skolem-bound type variables -- We could recurse into types, and get the forall-bound ones too, -- but I'm going wait until that is needed -- See Note [tidyAvoiding] in GHC.Core.TyCo.Tidy boundOccNamesOfWC wc = bagToList (go_wc wc) where go_wc (WC { wc_impl = implics }) = concatMapBag go_implic implics go_implic (Implic { ic_skols = tvs, ic_wanted = wc }) = listToBag (map getOccName tvs) `unionBags` go_wc wc ---------------- Getting free tyvars ------------------------- -- | Returns free variables of constraints as a non-deterministic set tyCoVarsOfCt :: Ct -> TcTyCoVarSet tyCoVarsOfCt = fvVarSet . tyCoFVsOfCt -- | Returns free variables of constraints as a non-deterministic set tyCoVarsOfCtEv :: CtEvidence -> TcTyCoVarSet tyCoVarsOfCtEv = fvVarSet . tyCoFVsOfCtEv -- | Returns free variables of constraints as a deterministically ordered -- list. See Note [Deterministic FV] in GHC.Utils.FV. tyCoVarsOfCtList :: Ct -> [TcTyCoVar] tyCoVarsOfCtList = fvVarList . tyCoFVsOfCt -- | Returns free variables of constraints as a deterministically ordered -- list. See Note [Deterministic FV] in GHC.Utils.FV. tyCoVarsOfCtEvList :: CtEvidence -> [TcTyCoVar] tyCoVarsOfCtEvList = fvVarList . tyCoFVsOfType . ctEvPred -- | Returns free variables of constraints as a composable FV computation. -- See Note [Deterministic FV] in "GHC.Utils.FV". tyCoFVsOfCt :: Ct -> FV tyCoFVsOfCt ct = tyCoFVsOfType (ctPred ct) -- This must consult only the ctPred, so that it gets *tidied* fvs if the -- constraint has been tidied. Tidying a constraint does not tidy the -- fields of the Ct, only the predicate in the CtEvidence. -- | Returns free variables of constraints as a composable FV computation. -- See Note [Deterministic FV] in GHC.Utils.FV. tyCoFVsOfCtEv :: CtEvidence -> FV tyCoFVsOfCtEv ct = tyCoFVsOfType (ctEvPred ct) -- | Returns free variables of a bag of constraints as a non-deterministic -- set. See Note [Deterministic FV] in "GHC.Utils.FV". tyCoVarsOfCts :: Cts -> TcTyCoVarSet tyCoVarsOfCts = fvVarSet . tyCoFVsOfCts -- | Returns free variables of a bag of constraints as a deterministically -- ordered list. See Note [Deterministic FV] in "GHC.Utils.FV". tyCoVarsOfCtsList :: Cts -> [TcTyCoVar] tyCoVarsOfCtsList = fvVarList . tyCoFVsOfCts -- | Returns free variables of a bag of constraints as a deterministically -- ordered list. See Note [Deterministic FV] in GHC.Utils.FV. tyCoVarsOfCtEvsList :: [CtEvidence] -> [TcTyCoVar] tyCoVarsOfCtEvsList = fvVarList . tyCoFVsOfCtEvs -- | Returns free variables of a bag of constraints as a composable FV -- computation. See Note [Deterministic FV] in "GHC.Utils.FV". tyCoFVsOfCts :: Cts -> FV tyCoFVsOfCts = foldr (unionFV . tyCoFVsOfCt) emptyFV -- | Returns free variables of a bag of constraints as a composable FV -- computation. See Note [Deterministic FV] in GHC.Utils.FV. tyCoFVsOfCtEvs :: [CtEvidence] -> FV tyCoFVsOfCtEvs = foldr (unionFV . tyCoFVsOfCtEv) emptyFV -- | Returns free variables of WantedConstraints as a non-deterministic -- set. See Note [Deterministic FV] in "GHC.Utils.FV". tyCoVarsOfWC :: WantedConstraints -> TyCoVarSet -- Only called on *zonked* things tyCoVarsOfWC = fvVarSet . tyCoFVsOfWC -- | Returns free variables of WantedConstraints as a deterministically -- ordered list. See Note [Deterministic FV] in "GHC.Utils.FV". tyCoVarsOfWCList :: WantedConstraints -> [TyCoVar] -- Only called on *zonked* things tyCoVarsOfWCList = fvVarList . tyCoFVsOfWC -- | Returns free variables of WantedConstraints as a composable FV -- computation. See Note [Deterministic FV] in "GHC.Utils.FV". tyCoFVsOfWC :: WantedConstraints -> FV -- Only called on *zonked* things tyCoFVsOfWC (WC { wc_simple = simple, wc_impl = implic, wc_errors = errors }) = tyCoFVsOfCts simple `unionFV` tyCoFVsOfBag tyCoFVsOfImplic implic `unionFV` tyCoFVsOfBag tyCoFVsOfDelayedError errors -- | Returns free variables of Implication as a composable FV computation. -- See Note [Deterministic FV] in "GHC.Utils.FV". tyCoFVsOfImplic :: Implication -> FV -- Only called on *zonked* things tyCoFVsOfImplic (Implic { ic_skols = skols , ic_given = givens , ic_wanted = wanted }) | isEmptyWC wanted = emptyFV | otherwise = tyCoFVsVarBndrs skols $ tyCoFVsVarBndrs givens $ tyCoFVsOfWC wanted tyCoFVsOfDelayedError :: DelayedError -> FV tyCoFVsOfDelayedError (DE_Hole hole) = tyCoFVsOfHole hole tyCoFVsOfDelayedError (DE_NotConcrete {}) = emptyFV tyCoFVsOfHole :: Hole -> FV tyCoFVsOfHole (Hole { hole_ty = ty }) = tyCoFVsOfType ty tyCoFVsOfBag :: (a -> FV) -> Bag a -> FV tyCoFVsOfBag tvs_of = foldr (unionFV . tvs_of) emptyFV {- ************************************************************************ * * CtEvidence The "flavor" of a canonical constraint * * ************************************************************************ -} isWantedCt :: Ct -> Bool isWantedCt = isWanted . ctEvidence isGivenCt :: Ct -> Bool isGivenCt = isGiven . ctEvidence {- Note [Custom type errors in constraints] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When GHC reports a type-error about an unsolved-constraint, we check to see if the constraint contains any custom-type errors, and if so we report them. Here are some examples of constraints containing type errors: TypeError msg -- The actual constraint is a type error TypError msg ~ Int -- Some type was supposed to be Int, but ended up -- being a type error instead Eq (TypeError msg) -- A class constraint is stuck due to a type error F (TypeError msg) ~ a -- A type function failed to evaluate due to a type err It is also possible to have constraints where the type error is nested deeper, for example see #11990, and also: Eq (F (TypeError msg)) -- Here the type error is nested under a type-function -- call, which failed to evaluate because of it, -- and so the `Eq` constraint was unsolved. -- This may happen when one function calls another -- and the called function produced a custom type error. -} -- | A constraint is considered to be a custom type error, if it contains -- custom type errors anywhere in it. -- See Note [Custom type errors in constraints] getUserTypeErrorMsg :: PredType -> Maybe ErrorMsgType getUserTypeErrorMsg pred = msum $ userTypeError_maybe pred : map getUserTypeErrorMsg (subTys pred) where -- Richard thinks this function is very broken. What is subTys -- supposed to be doing? Why are exactly-saturated tyconapps special? -- What stops this from accidentally ripping apart a call to TypeError? subTys t = case splitAppTys t of (t,[]) -> case splitTyConApp_maybe t of Nothing -> [] Just (_,ts) -> ts (t,ts) -> t : ts -- | Is this an user error message type, i.e. either the form @TypeError err@ or -- @Unsatisfiable err@? isTopLevelUserTypeError :: PredType -> Bool isTopLevelUserTypeError pred = isJust (userTypeError_maybe pred) || isJust (isUnsatisfiableCt_maybe pred) -- | Does this constraint contain an user error message? -- -- That is, the type is either of the form @Unsatisfiable err@, or it contains -- a type of the form @TypeError msg@, either at the top level or nested inside -- the type. containsUserTypeError :: PredType -> Bool containsUserTypeError pred = isJust (getUserTypeErrorMsg pred) || isJust (isUnsatisfiableCt_maybe pred) -- | Is this type an unsatisfiable constraint? -- If so, return the error message. isUnsatisfiableCt_maybe :: Type -> Maybe ErrorMsgType isUnsatisfiableCt_maybe t | Just (tc, [msg]) <- splitTyConApp_maybe t , tc `hasKey` unsatisfiableClassNameKey = Just msg | otherwise = Nothing isPendingScDict :: Ct -> Bool isPendingScDict (CDictCan dict_ct) = isPendingScDictCt dict_ct isPendingScDict _ = False isPendingScDictCt :: DictCt -> Bool -- Says whether this is a CDictCan with di_pend_sc has positive fuel; -- i.e. pending un-expanded superclasses isPendingScDictCt (DictCt { di_pend_sc = f }) = pendingFuel f pendingScDict_maybe :: Ct -> Maybe Ct -- Says whether this is a CDictCan with di_pend_sc has fuel left, -- AND if so exhausts the fuel so that they are not expanded again pendingScDict_maybe (CDictCan dict@(DictCt { di_pend_sc = f })) | pendingFuel f = Just (CDictCan (dict { di_pend_sc = doNotExpand })) | otherwise = Nothing pendingScDict_maybe _ = Nothing pendingScInst_maybe :: QCInst -> Maybe QCInst -- Same as isPendingScDict, but for QCInsts pendingScInst_maybe qci@(QCI { qci_pend_sc = f }) | pendingFuel f = Just (qci { qci_pend_sc = doNotExpand }) | otherwise = Nothing superClassesMightHelp :: WantedConstraints -> Bool -- ^ True if taking superclasses of givens, or of wanteds (to perhaps -- expose more equalities or functional dependencies) might help to -- solve this constraint. See Note [When superclasses help] superClassesMightHelp (WC { wc_simple = simples, wc_impl = implics }) = anyBag might_help_ct simples || anyBag might_help_implic implics where might_help_implic ic | IC_Unsolved <- ic_status ic = superClassesMightHelp (ic_wanted ic) | otherwise = False might_help_ct ct = not (is_ip ct) is_ip (CDictCan (DictCt { di_cls = cls })) = isIPClass cls is_ip _ = False getPendingWantedScs :: Cts -> ([Ct], Cts) -- in the return values [Ct] has original fuel while Cts has fuel exhausted getPendingWantedScs simples = mapAccumBagL get [] simples where get acc ct | Just ct_exhausted <- pendingScDict_maybe ct = (ct:acc, ct_exhausted) | otherwise = (acc, ct) {- Note [When superclasses help] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ First read Note [The superclass story] in GHC.Tc.Solver.Dict We expand superclasses and iterate only if there is at unsolved wanted for which expansion of superclasses (e.g. from given constraints) might actually help. The function superClassesMightHelp tells if doing this superclass expansion might help solve this constraint. Note that * We look inside implications; maybe it'll help to expand the Givens at level 2 to help solve an unsolved Wanted buried inside an implication. E.g. forall a. Ord a => forall b. [W] Eq a * We say "no" for implicit parameters. we have [W] ?x::ty, expanding superclasses won't help: - Superclasses can't be implicit parameters - If we have a [G] ?x:ty2, then we'll have another unsolved [W] ty ~ ty2 (from the functional dependency) which will trigger superclass expansion. It's a bit of a special case, but it's easy to do. The runtime cost is low because the unsolved set is usually empty anyway (errors aside), and the first non-implicit-parameter will terminate the search. The special case is worth it (#11480, comment:2) because it applies to CallStack constraints, which aren't type errors. If we have f :: (C a) => blah f x = ...undefined... we'll get a CallStack constraint. If that's the only unsolved constraint it'll eventually be solved by defaulting. So we don't want to emit warnings about hitting the simplifier's iteration limit. A CallStack constraint really isn't an unsolved constraint; it can always be solved by defaulting. -} singleCt :: Ct -> Cts singleCt = unitBag andCts :: Cts -> Cts -> Cts andCts = unionBags listToCts :: [Ct] -> Cts listToCts = listToBag ctsElts :: Cts -> [Ct] ctsElts = bagToList consCts :: Ct -> Cts -> Cts consCts = consBag snocCts :: Cts -> Ct -> Cts snocCts = snocBag extendCtsList :: Cts -> [Ct] -> Cts extendCtsList cts xs | null xs = cts | otherwise = cts `unionBags` listToBag xs emptyCts :: Cts emptyCts = emptyBag isEmptyCts :: Cts -> Bool isEmptyCts = isEmptyBag ctsPreds :: Cts -> [PredType] ctsPreds cts = foldr ((:) . ctPred) [] cts {- ************************************************************************ * * Wanted constraints * * ************************************************************************ -} data WantedConstraints = WC { wc_simple :: Cts -- Unsolved constraints, all wanted , wc_impl :: Bag Implication , wc_errors :: Bag DelayedError } emptyWC :: WantedConstraints emptyWC = WC { wc_simple = emptyBag , wc_impl = emptyBag , wc_errors = emptyBag } mkSimpleWC :: [CtEvidence] -> WantedConstraints mkSimpleWC cts = emptyWC { wc_simple = listToBag (map mkNonCanonical cts) } mkImplicWC :: Bag Implication -> WantedConstraints mkImplicWC implic = emptyWC { wc_impl = implic } isEmptyWC :: WantedConstraints -> Bool isEmptyWC (WC { wc_simple = f, wc_impl = i, wc_errors = errors }) = isEmptyBag f && isEmptyBag i && isEmptyBag errors -- | Checks whether a the given wanted constraints are solved, i.e. -- that there are no simple constraints left and all the implications -- are solved. isSolvedWC :: WantedConstraints -> Bool isSolvedWC WC {wc_simple = wc_simple, wc_impl = wc_impl, wc_errors = errors} = isEmptyBag wc_simple && allBag (isSolvedStatus . ic_status) wc_impl && isEmptyBag errors andWC :: WantedConstraints -> WantedConstraints -> WantedConstraints andWC (WC { wc_simple = f1, wc_impl = i1, wc_errors = e1 }) (WC { wc_simple = f2, wc_impl = i2, wc_errors = e2 }) = WC { wc_simple = f1 `unionBags` f2 , wc_impl = i1 `unionBags` i2 , wc_errors = e1 `unionBags` e2 } unionsWC :: [WantedConstraints] -> WantedConstraints unionsWC = foldr andWC emptyWC addSimples :: WantedConstraints -> Bag Ct -> WantedConstraints addSimples wc cts = wc { wc_simple = wc_simple wc `unionBags` cts } -- Consider: Put the new constraints at the front, so they get solved first addImplics :: WantedConstraints -> Bag Implication -> WantedConstraints addImplics wc implic = wc { wc_impl = wc_impl wc `unionBags` implic } addInsols :: WantedConstraints -> Bag IrredCt -> WantedConstraints addInsols wc insols = wc { wc_simple = wc_simple wc `unionBags` fmap CIrredCan insols } addHoles :: WantedConstraints -> Bag Hole -> WantedConstraints addHoles wc holes = wc { wc_errors = mapBag DE_Hole holes `unionBags` wc_errors wc } addNotConcreteError :: WantedConstraints -> NotConcreteError -> WantedConstraints addNotConcreteError wc err = wc { wc_errors = unitBag (DE_NotConcrete err) `unionBags` wc_errors wc } addDelayedErrors :: WantedConstraints -> Bag DelayedError -> WantedConstraints addDelayedErrors wc errs = wc { wc_errors = errs `unionBags` wc_errors wc } dropMisleading :: WantedConstraints -> WantedConstraints -- Drop misleading constraints; really just class constraints -- See Note [Constraints and errors] in GHC.Tc.Utils.Monad -- for why this function is so strange, treating the 'simples' -- and the implications differently. Sigh. dropMisleading (WC { wc_simple = simples, wc_impl = implics, wc_errors = errors }) = WC { wc_simple = filterBag insolubleWantedCt simples , wc_impl = mapBag drop_implic implics , wc_errors = filterBag keep_delayed_error errors } where drop_implic implic = implic { ic_wanted = drop_wanted (ic_wanted implic) } drop_wanted (WC { wc_simple = simples, wc_impl = implics, wc_errors = errors }) = WC { wc_simple = filterBag keep_ct simples , wc_impl = mapBag drop_implic implics , wc_errors = filterBag keep_delayed_error errors } keep_ct ct = case classifyPredType (ctPred ct) of ClassPred {} -> False _ -> True keep_delayed_error (DE_Hole hole) = isOutOfScopeHole hole keep_delayed_error (DE_NotConcrete {}) = True isSolvedStatus :: ImplicStatus -> Bool isSolvedStatus (IC_Solved {}) = True isSolvedStatus _ = False isInsolubleStatus :: ImplicStatus -> Bool isInsolubleStatus IC_Insoluble = True isInsolubleStatus IC_BadTelescope = True isInsolubleStatus _ = False insolubleImplic :: Implication -> Bool insolubleImplic ic = isInsolubleStatus (ic_status ic) -- | Gather all the type variables from 'WantedConstraints' -- that it would be unhelpful to default. For the moment, -- these are only 'ConcreteTv' metavariables participating -- in a nominal equality whose other side is not concrete; -- it's usually better to report those as errors instead of -- defaulting. nonDefaultableTyVarsOfWC :: WantedConstraints -> TyCoVarSet -- Currently used in simplifyTop and in tcRule. -- TODO: should we also use this in decideQuantifiedTyVars, kindGeneralize{All,Some}? nonDefaultableTyVarsOfWC (WC { wc_simple = simples, wc_impl = implics, wc_errors = errs }) = concatMapBag non_defaultable_tvs_of_ct simples `unionVarSet` concatMapBag (nonDefaultableTyVarsOfWC . ic_wanted) implics `unionVarSet` concatMapBag non_defaultable_tvs_of_err errs where concatMapBag :: (a -> TyVarSet) -> Bag a -> TyCoVarSet concatMapBag f = foldr (\ r acc -> f r `unionVarSet` acc) emptyVarSet -- Don't default ConcreteTv metavariables involved -- in an equality with something non-concrete: it's usually -- better to report the unsolved Wanted. -- -- Example: alpha[conc] ~# rr[sk]. non_defaultable_tvs_of_ct :: Ct -> TyCoVarSet non_defaultable_tvs_of_ct ct = -- NB: using classifyPredType instead of inspecting the Ct -- so that we deal uniformly with CNonCanonical (which come up in tcRule), -- CEqCan (unsolved but potentially soluble, e.g. @alpha[conc] ~# RR@) -- and CIrredCan. case classifyPredType $ ctPred ct of EqPred NomEq lhs rhs | Just tv <- getTyVar_maybe lhs , isConcreteTyVar tv , not (isConcreteType rhs) -> unitVarSet tv | Just tv <- getTyVar_maybe rhs , isConcreteTyVar tv , not (isConcreteType lhs) -> unitVarSet tv _ -> emptyVarSet -- Make sure to apply the same logic as above to delayed errors. non_defaultable_tvs_of_err (DE_NotConcrete err) = case err of NCE_FRR { nce_frr_origin = frr } -> tyCoVarsOfType (frr_type frr) non_defaultable_tvs_of_err (DE_Hole {}) = emptyVarSet insolubleWC :: WantedConstraints -> Bool insolubleWC (WC { wc_impl = implics, wc_simple = simples, wc_errors = errors }) = anyBag insolubleWantedCt simples -- insolubleWantedCt: wanteds only: see Note [Given insolubles] || anyBag insolubleImplic implics || anyBag is_insoluble errors where is_insoluble (DE_Hole hole) = isOutOfScopeHole hole -- See Note [Insoluble holes] is_insoluble (DE_NotConcrete {}) = True insolubleWantedCt :: Ct -> Bool -- Definitely insoluble, in particular /excluding/ type-hole constraints -- Namely: -- a) an insoluble constraint as per 'insolubleIrredCt', i.e. either -- - an insoluble equality constraint (e.g. Int ~ Bool), or -- - a custom type error constraint, TypeError msg :: Constraint -- b) that does not arise from a Given or a Wanted/Wanted fundep interaction -- See Note [Insoluble Wanteds] insolubleWantedCt ct | CIrredCan ir_ct <- ct -- CIrredCan: see (IW1) in Note [Insoluble Wanteds] , IrredCt { ir_ev = ev } <- ir_ct , CtWanted { ctev_loc = loc, ctev_rewriters = rewriters } <- ev -- It's a Wanted , insolubleIrredCt ir_ct -- It's insoluble , isEmptyRewriterSet rewriters -- It has no rewriters; see (IW2) in Note [Insoluble Wanteds] , not (isGivenLoc loc) -- isGivenLoc: see (IW3) in Note [Insoluble Wanteds] , not (isWantedWantedFunDepOrigin (ctLocOrigin loc)) -- origin check: see (IW4) in Note [Insoluble Wanteds] = True | otherwise = False -- | Returns True of constraints that are definitely insoluble, -- as well as TypeError constraints. -- Can return 'True' for Given constraints, unlike 'insolubleWantedCt'. -- -- The function is tuned for application /after/ constraint solving -- i.e. assuming canonicalisation has been done -- That's why it looks only for IrredCt; all insoluble constraints -- are put into CIrredCan insolubleCt :: Ct -> Bool insolubleCt (CIrredCan ir_ct) = insolubleIrredCt ir_ct insolubleCt _ = False insolubleIrredCt :: IrredCt -> Bool -- Returns True of Irred constraints that are /definitely/ insoluble -- -- This function is critical for accurate pattern-match overlap warnings. -- See Note [Pattern match warnings with insoluble Givens] in GHC.Tc.Solver -- -- Note that this does not traverse through the constraint to find -- nested custom type errors: it only detects @TypeError msg :: Constraint@, -- and not e.g. @Eq (TypeError msg)@. insolubleIrredCt (IrredCt { ir_ev = ev, ir_reason = reason }) = isInsolubleReason reason || isTopLevelUserTypeError (ctEvPred ev) -- NB: 'isTopLevelUserTypeError' detects constraints of the form "TypeError msg" -- and "Unsatisfiable msg". It deliberately does not detect TypeError -- nested in a type (e.g. it does not use "containsUserTypeError"), as that -- would be too eager: the TypeError might appear inside a type family -- application which might later reduce, but we only want to return 'True' -- for constraints that are definitely insoluble. -- -- For example: Num (F Int (TypeError "msg")), where F is a type family. -- -- Test case: T11503, with the 'Assert' type family: -- -- > type Assert :: Bool -> Constraint -> Constraint -- > type family Assert check errMsg where -- > Assert 'True _errMsg = () -- > Assert _check errMsg = errMsg -- | Does this hole represent an "out of scope" error? -- See Note [Insoluble holes] isOutOfScopeHole :: Hole -> Bool isOutOfScopeHole (Hole { hole_occ = occ }) = not (startsWithUnderscore (occName occ)) instance Outputable WantedConstraints where ppr (WC {wc_simple = s, wc_impl = i, wc_errors = e}) = text "WC" <+> braces (vcat [ ppr_bag (text "wc_simple") s , ppr_bag (text "wc_impl") i , ppr_bag (text "wc_errors") e ]) ppr_bag :: Outputable a => SDoc -> Bag a -> SDoc ppr_bag doc bag | isEmptyBag bag = empty | otherwise = hang (doc <+> equals) 2 (foldr (($$) . ppr) empty bag) {- Note [Given insolubles] ~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider (#14325, comment:) class (a~b) => C a b foo :: C a c => a -> c foo x = x hm3 :: C (f b) b => b -> f b hm3 x = foo x In the RHS of hm3, from the [G] C (f b) b we get the insoluble [G] f b ~# b. Then we also get an unsolved [W] C b (f b). Residual implication looks like forall b. C (f b) b => [G] f b ~# b [W] C f (f b) We do /not/ want to set the implication status to IC_Insoluble, because that'll suppress reports of [W] C b (f b). But we may not report the insoluble [G] f b ~# b either (see Note [Given errors] in GHC.Tc.Errors), so we may fail to report anything at all! Yikes. Bottom line: insolubleWC (called in GHC.Tc.Solver.setImplicationStatus) should ignore givens even if they are insoluble. Note [Insoluble Wanteds] ~~~~~~~~~~~~~~~~~~~~~~~~ insolubleWantedCt returns True of a Wanted constraint that definitely can't be solved. But not quite all such constraints; see wrinkles. (IW1) insolubleWantedCt is tuned for application /after/ constraint solving i.e. assuming canonicalisation has been done. That's why it looks only for IrredCt; all insoluble constraints are put into CIrredCan (IW2) We only treat it as insoluble if it has an empty rewriter set. (See Note [Wanteds rewrite Wanteds].) Otherwise #25325 happens: a Wanted constraint A that is /not/ insoluble rewrites some other Wanted constraint B, so B has A in its rewriter set. Now B looks insoluble. The danger is that we'll suppress reporting B because of its empty rewriter set; and suppress reporting A because there is an insoluble B lying around. (This suppression happens in GHC.Tc.Errors.mkErrorItem.) Solution: don't treat B as insoluble. (IW3) If the Wanted arises from a Given (how can that happen?), don't treat it as a Wanted insoluble (obviously). (IW4) If the Wanted came from a Wanted/Wanted fundep interaction, don't treat the constraint as insoluble. See Note [Suppressing confusing errors] in GHC.Tc.Errors Note [Insoluble holes] ~~~~~~~~~~~~~~~~~~~~~~ Hole constraints that ARE NOT treated as truly insoluble: a) type holes, arising from PartialTypeSignatures, b) "true" expression holes arising from TypedHoles An "expression hole" or "type hole" isn't really an error at all; it's a report saying "_ :: Int" here. But an out-of-scope variable masquerading as expression holes IS treated as truly insoluble, so that it trumps other errors during error reporting. Yuk! ************************************************************************ * * Implication constraints * * ************************************************************************ -} data Implication = Implic { -- Invariants for a tree of implications: -- see TcType Note [TcLevel invariants] ic_tclvl :: TcLevel, -- TcLevel of unification variables -- allocated /inside/ this implication ic_info :: SkolemInfoAnon, -- See Note [Skolems in an implication] -- See Note [Shadowing in a constraint] ic_skols :: [TcTyVar], -- Introduced skolems; always skolem TcTyVars -- Their level numbers should be precisely ic_tclvl -- Their SkolemInfo should be precisely ic_info (almost) -- See Note [Implication invariants] ic_given :: [EvVar], -- Given evidence variables -- (order does not matter) -- See Invariant (GivenInv) in GHC.Tc.Utils.TcType ic_given_eqs :: HasGivenEqs, -- Are there Given equalities here? ic_warn_inaccessible :: Bool, -- True <=> we should report inaccessible code -- Note [Avoid -Winaccessible-code when deriving] -- in GHC.Tc.TyCl.Instance ic_env :: !CtLocEnv, -- Records the context at the time of creation. -- -- This provides all the information needed about -- the context to report the source of errors linked -- to this implication. ic_wanted :: WantedConstraints, -- The wanteds -- See Invariant (WantedInf) in GHC.Tc.Utils.TcType ic_binds :: EvBindsVar, -- Points to the place to fill in the -- abstraction and bindings. -- The ic_need fields keep track of which Given evidence -- is used by this implication or its children -- NB: including stuff used by nested implications that have since -- been discarded -- See Note [Needed evidence variables] -- and (RC2) in Note [Tracking redundant constraints]a ic_need_inner :: VarSet, -- Includes all used Given evidence ic_need_outer :: VarSet, -- Includes only the free Given evidence -- i.e. ic_need_inner after deleting -- (a) givens (b) binders of ic_binds ic_status :: ImplicStatus } implicationPrototype :: CtLocEnv -> Implication implicationPrototype ct_loc_env = Implic { -- These fields must be initialised ic_tclvl = panic "newImplic:tclvl" , ic_binds = panic "newImplic:binds" , ic_info = panic "newImplic:info" , ic_warn_inaccessible = panic "newImplic:warn_inaccessible" , ic_env = ct_loc_env -- The rest have sensible default values , ic_skols = [] , ic_given = [] , ic_wanted = emptyWC , ic_given_eqs = MaybeGivenEqs , ic_status = IC_Unsolved , ic_need_inner = emptyVarSet , ic_need_outer = emptyVarSet } data ImplicStatus = IC_Solved -- All wanteds in the tree are solved, all the way down { ics_dead :: [EvVar] } -- Subset of ic_given that are not needed -- See Note [Tracking redundant constraints] in GHC.Tc.Solver | IC_Insoluble -- At least one insoluble Wanted constraint in the tree | IC_BadTelescope -- Solved, but the skolems in the telescope are out of -- dependency order. See Note [Checking telescopes] | IC_Unsolved -- Neither of the above; might go either way data HasGivenEqs -- See Note [HasGivenEqs] = NoGivenEqs -- Definitely no given equalities, -- except by Note [Let-bound skolems] in GHC.Tc.Solver.InertSet | LocalGivenEqs -- Might have Given equalities, but only ones that affect only -- local skolems e.g. forall a b. (a ~ F b) => ... | MaybeGivenEqs -- Might have any kind of Given equalities; no floating out -- is possible. deriving Eq type UserGiven = Implication getUserGivensFromImplics :: [Implication] -> [UserGiven] getUserGivensFromImplics implics = reverse (filterOut (null . ic_given) implics) {- Note [HasGivenEqs] ~~~~~~~~~~~~~~~~~~~~~ The GivenEqs data type describes the Given constraints of an implication constraint: * NoGivenEqs: definitely no Given equalities, except perhaps let-bound skolems which don't count: see Note [Let-bound skolems] in GHC.Tc.Solver.InertSet Examples: forall a. Eq a => ... forall a. (Show a, Num a) => ... forall a. a ~ Either Int Bool => ... -- Let-bound skolem * LocalGivenEqs: definitely no Given equalities that would affect principal types. But may have equalities that affect only skolems of this implication (and hence do not affect principal types) Examples: forall a. F a ~ Int => ... forall a b. F a ~ G b => ... * MaybeGivenEqs: may have Given equalities that would affect principal types Examples: forall. (a ~ b) => ... forall a. F a ~ b => ... forall a. c a => ... -- The 'c' might be instantiated to (b ~) forall a. C a b => .... where class x~y => C a b so there is an equality in the superclass of a Given The HasGivenEqs classifications affect two things: * Suppressing redundant givens during error reporting; see GHC.Tc.Errors Note [Suppress redundant givens during error reporting] * Floating in approximateWC. Specifically, here's how it goes: Stops floating | Suppresses Givens in errors in approximateWC | ----------------------------------------------- NoGivenEqs NO | YES LocalGivenEqs NO | NO MaybeGivenEqs YES | NO -} instance Outputable Implication where ppr (Implic { ic_tclvl = tclvl, ic_skols = skols , ic_given = given, ic_given_eqs = given_eqs , ic_wanted = wanted, ic_status = status , ic_binds = binds , ic_need_inner = need_in, ic_need_outer = need_out , ic_info = info }) = hang (text "Implic" <+> lbrace) 2 (sep [ text "TcLevel =" <+> ppr tclvl , text "Skolems =" <+> pprTyVars skols , text "Given-eqs =" <+> ppr given_eqs , text "Status =" <+> ppr status , hang (text "Given =") 2 (pprEvVars given) , hang (text "Wanted =") 2 (ppr wanted) , text "Binds =" <+> ppr binds , whenPprDebug (text "Needed inner =" <+> ppr need_in) , whenPprDebug (text "Needed outer =" <+> ppr need_out) , pprSkolInfo info ] <+> rbrace) instance Outputable ImplicStatus where ppr IC_Insoluble = text "Insoluble" ppr IC_BadTelescope = text "Bad telescope" ppr IC_Unsolved = text "Unsolved" ppr (IC_Solved { ics_dead = dead }) = text "Solved" <+> (braces (text "Dead givens =" <+> ppr dead)) checkTelescopeSkol :: SkolemInfoAnon -> Bool -- See Note [Checking telescopes] checkTelescopeSkol (ForAllSkol {}) = True checkTelescopeSkol _ = False instance Outputable HasGivenEqs where ppr NoGivenEqs = text "NoGivenEqs" ppr LocalGivenEqs = text "LocalGivenEqs" ppr MaybeGivenEqs = text "MaybeGivenEqs" -- Used in GHC.Tc.Solver.Monad.getHasGivenEqs instance Semigroup HasGivenEqs where NoGivenEqs <> other = other other <> NoGivenEqs = other MaybeGivenEqs <> _other = MaybeGivenEqs _other <> MaybeGivenEqs = MaybeGivenEqs LocalGivenEqs <> LocalGivenEqs = LocalGivenEqs -- Used in GHC.Tc.Solver.Monad.getHasGivenEqs instance Monoid HasGivenEqs where mempty = NoGivenEqs {- Note [Checking telescopes] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When kind-checking a /user-written/ type, we might have a "bad telescope" like this one: data SameKind :: forall k. k -> k -> Type type Foo :: forall a k (b :: k). SameKind a b -> Type The kind of 'a' mentions 'k' which is bound after 'a'. Oops. One approach to doing this would be to bring each of a, k, and b into scope, one at a time, creating a separate implication constraint for each one, and bumping the TcLevel. This would work, because the kind of, say, a would be untouchable when k is in scope (and the constraint couldn't float out because k blocks it). However, it leads to terrible error messages, complaining about skolem escape. While it is indeed a problem of skolem escape, we can do better. Instead, our approach is to bring the block of variables into scope all at once, creating one implication constraint for the lot: * We make a single implication constraint when kind-checking the 'forall' in Foo's kind, something like forall a k (b::k). { wanted constraints } * Having solved {wanted}, before discarding the now-solved implication, the constraint solver checks the dependency order of the skolem variables (ic_skols). This is done in setImplicationStatus. * This check is only necessary if the implication was born from a 'forall' in a user-written signature (the HsForAllTy case in GHC.Tc.Gen.HsType. If, say, it comes from checking a pattern match that binds existentials, where the type of the data constructor is known to be valid (it in tcConPat), no need for the check. So the check is done /if and only if/ ic_info is ForAllSkol. * If ic_info is (ForAllSkol dt dvs), the dvs::SDoc displays the original, user-written type variables. * Be careful /NOT/ to discard an implication with a ForAllSkol ic_info, even if ic_wanted is empty. We must give the constraint solver a chance to make that bad-telescope test! Hence the extra guard in emitResidualTvConstraint; see #16247 * Don't mix up inferred and explicit variables in the same implication constraint. E.g. foo :: forall a kx (b :: kx). SameKind a b We want an implication Implic { ic_skol = [(a::kx), kx, (b::kx)], ... } but GHC will attempt to quantify over kx, since it is free in (a::kx), and it's hopelessly confusing to report an error about quantified variables kx (a::kx) kx (b::kx). Instead, the outer quantification over kx should be in a separate implication. TL;DR: an explicit forall should generate an implication quantified only over those explicitly quantified variables. Note [Needed evidence variables] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Th ic_need_evs field holds the free vars of ic_binds, and all the ic_binds in nested implications. * Main purpose: if one of the ic_givens is not mentioned in here, it is redundant. * solveImplication may drop an implication altogether if it has no remaining 'wanteds'. But we still track the free vars of its evidence binds, even though it has now disappeared. Note [Shadowing in a constraint] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We assume NO SHADOWING in a constraint. Specifically * The unification variables are all implicitly quantified at top level, and are all unique * The skolem variables bound in ic_skols are all fresh when the implication is created. So we can safely substitute. For example, if we have forall a. a~Int => ...(forall b. ...a...)... we can push the (a~Int) constraint inwards in the "givens" without worrying that 'b' might clash. Note [Skolems in an implication] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The skolems in an implication are used: * When considering floating a constraint outside the implication in GHC.Tc.Solver.floatEqualities or GHC.Tc.Solver.approximateImplications For this, we can treat ic_skols as a set. * When checking that a /user-specified/ forall (ic_info = ForAllSkol tvs) has its variables in the correct order; see Note [Checking telescopes]. Only for these implications does ic_skols need to be a list. Nota bene: Although ic_skols is a list, it is not necessarily in dependency order: - In the ic_info=ForAllSkol case, the user might have written them in the wrong order - In the case of a type signature like f :: [a] -> [b] the renamer gathers the implicit "outer" forall'd variables {a,b}, but does not know what order to put them in. The type checker can sort them into dependency order, but only after solving all the kind constraints; and to do that it's convenient to create the Implication! So we accept that ic_skols may be out of order. Think of it as a set or (in the case of ic_info=ForAllSkol, a list in user-specified, and possibly wrong, order. Note [Insoluble constraints] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Some of the errors that we get during canonicalization are best reported when all constraints have been simplified as much as possible. For instance, assume that during simplification the following constraints arise: [Wanted] F alpha ~ uf1 [Wanted] beta ~ uf1 beta When canonicalizing the wanted (beta ~ uf1 beta), if we eagerly fail we will simply see a message: 'Can't construct the infinite type beta ~ uf1 beta' and the user has no idea what the uf1 variable is. Instead our plan is that we will NOT fail immediately, but: (1) Record the "frozen" error in the ic_insols field (2) Isolate the offending constraint from the rest of the inerts (3) Keep on simplifying/canonicalizing At the end, we will hopefully have substituted uf1 := F alpha, and we will be able to report a more informative error: 'Can't construct the infinite type beta ~ F alpha beta' ************************************************************************ * * Invariant checking (debug only) * * ************************************************************************ Note [Implication invariants] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The skolems of an implication have the following invariants, which are checked by checkImplicationInvariants: a) They are all SkolemTv TcTyVars; no TyVars, no unification variables b) Their TcLevel matches the ic_lvl for the implication c) Their SkolemInfo matches the implication. Actually (c) is not quite true. Consider data T a = forall b. MkT a b In tcConDecl for MkT we'll create an implication with ic_info of DataConSkol; but the type variable 'a' will have a SkolemInfo of TyConSkol. So we allow the tyvar to have a SkolemInfo of TyConFlav if the implication SkolemInfo is DataConSkol. -} checkImplicationInvariants, check_implic :: (HasCallStack, Applicative m) => Implication -> m () {-# INLINE checkImplicationInvariants #-} -- Nothing => OK, Just doc => doc gives info checkImplicationInvariants implic = when debugIsOn (check_implic implic) check_implic implic@(Implic { ic_tclvl = lvl , ic_info = skol_info , ic_skols = skols }) | null bads = pure () | otherwise = massertPpr False (vcat [ text "checkImplicationInvariants failure" , nest 2 (vcat bads) , ppr implic ]) where bads = mapMaybe check skols check :: TcTyVar -> Maybe SDoc check tv | not (isTcTyVar tv) = Just (ppr tv <+> text "is not a TcTyVar") | otherwise = check_details tv (tcTyVarDetails tv) check_details :: TcTyVar -> TcTyVarDetails -> Maybe SDoc check_details tv (SkolemTv tv_skol_info tv_lvl _) | not (tv_lvl `sameDepthAs` lvl) = Just (vcat [ ppr tv <+> text "has level" <+> ppr tv_lvl , text "ic_lvl" <+> ppr lvl ]) | not (skol_info `checkSkolInfoAnon` skol_info_anon) = Just (vcat [ ppr tv <+> text "has skol info" <+> ppr skol_info_anon , text "ic_info" <+> ppr skol_info ]) | otherwise = Nothing where skol_info_anon = getSkolemInfo tv_skol_info check_details tv details = Just (ppr tv <+> text "is not a SkolemTv" <+> ppr details) checkSkolInfoAnon :: SkolemInfoAnon -- From the implication -> SkolemInfoAnon -- From the type variable -> Bool -- True <=> ok -- Used only for debug-checking; checkImplicationInvariants -- So it doesn't matter much if its's incomplete checkSkolInfoAnon sk1 sk2 = go sk1 sk2 where go (SigSkol c1 t1 s1) (SigSkol c2 t2 s2) = c1==c2 && t1 `tcEqType` t2 && s1==s2 go (SigTypeSkol cx1) (SigTypeSkol cx2) = cx1==cx2 go (ForAllSkol _) (ForAllSkol _) = True go (IPSkol ips1) (IPSkol ips2) = ips1 == ips2 go (DerivSkol pred1) (DerivSkol pred2) = pred1 `tcEqType` pred2 go (TyConSkol f1 n1) (TyConSkol f2 n2) = f1==f2 && n1==n2 go (DataConSkol n1) (DataConSkol n2) = n1==n2 go (InstSkol {}) (InstSkol {}) = True go FamInstSkol FamInstSkol = True go BracketSkol BracketSkol = True go (RuleSkol n1) (RuleSkol n2) = n1==n2 go (PatSkol c1 _) (PatSkol c2 _) = getName c1 == getName c2 -- Too tedious to compare the HsMatchContexts go (InferSkol ids1) (InferSkol ids2) = equalLength ids1 ids2 && and (zipWith eq_pr ids1 ids2) go (UnifyForAllSkol t1) (UnifyForAllSkol t2) = t1 `tcEqType` t2 go ReifySkol ReifySkol = True go RuntimeUnkSkol RuntimeUnkSkol = True go ArrowReboundIfSkol ArrowReboundIfSkol = True go (UnkSkol _) (UnkSkol _) = True -------- Three slightly strange special cases -------- go (DataConSkol _) (TyConSkol f _) = h98_data_decl f -- In the H98 declaration data T a = forall b. MkT a b -- in tcConDecl for MkT we'll have a SkolemInfo in the implication of -- DataConSkol, but the type variable 'a' will have a SkolemInfo of TyConSkol go (DataConSkol _) FamInstSkol = True -- In data/newtype instance T a = MkT (a -> a), -- in tcConDecl for MkT we'll have a SkolemInfo in the implication of -- DataConSkol, but 'a' will have SkolemInfo of FamInstSkol go FamInstSkol (InstSkol {}) = True -- In instance C (T a) where { type F (T a) b = ... } -- we have 'a' with SkolemInfo InstSkol, but we make an implication wi -- SkolemInfo of FamInstSkol. Very like the ConDecl/TyConSkol case go (ForAllSkol _) _ = True -- Telescope tests: we need a ForAllSkol to force the telescope -- test, but the skolems might come from (say) a family instance decl -- type instance forall a. F [a] = a->a go (SigTypeSkol DerivClauseCtxt) (TyConSkol f _) = h98_data_decl f -- e.g. newtype T a = MkT ... deriving blah -- We use the skolems from T (TyConSkol) when typechecking -- the deriving clauses (SigTypeSkol DerivClauseCtxt) go _ _ = False eq_pr :: (Name,TcType) -> (Name,TcType) -> Bool eq_pr (i1,_) (i2,_) = i1==i2 -- Types may be differently zonked h98_data_decl DataTypeFlavour = True h98_data_decl NewtypeFlavour = True h98_data_decl _ = False {- ********************************************************************* * * Pretty printing * * ********************************************************************* -} pprEvVars :: [EvVar] -> SDoc -- Print with their types pprEvVars ev_vars = vcat (map pprEvVarWithType ev_vars) pprEvVarTheta :: [EvVar] -> SDoc pprEvVarTheta ev_vars = pprTheta (map evVarPred ev_vars) pprEvVarWithType :: EvVar -> SDoc pprEvVarWithType v = ppr v <+> dcolon <+> pprType (evVarPred v) wrapType :: Type -> [TyVar] -> [PredType] -> Type wrapType ty skols givens = mkSpecForAllTys skols $ mkPhiTy givens ty {- ************************************************************************ * * CtEvidence * * ************************************************************************ Note [CtEvidence invariants] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The `ctev_pred` field of a `CtEvidence` is a just a cache for the type of the evidence. More precisely: * For Givens, `ctev_pred` = `varType ctev_evar` * For Wanteds, `ctev_pred` = `evDestType ctev_dest` where evDestType :: TcEvDest -> TcType evDestType (EvVarDest evVar) = varType evVar evDestType (HoleDest coercionHole) = varType (coHoleCoVar coercionHole) The invariant is maintained by `setCtEvPredType`, the only function that updates the `ctev_pred` field of a `CtEvidence`. Why is the invariant important? Because when the evidence is a coercion, it may be used in (CastTy ty co); and then we may call `typeKind` on that type (e.g. in the kind-check of `eqType`); and expect to see a fully zonked kind. (This came up in test T13333, in the MR that fixed #20641, namely !6942.) Historical Note [Evidence field of CtEvidence] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In the past we tried leaving the `ctev_evar`/`ctev_dest` field of a constraint untouched (and hence un-zonked) on the grounds that it is never looked at. But in fact it is: the evidence can become part of a type (via `CastTy ty kco`) and we may later ask the kind of that type and expect a zonked result. (For example, in the kind-check of `eqType`.) The safest thing is simply to keep `ctev_evar`/`ctev_dest` in sync with `ctev_pred`, as stated in `Note [CtEvidence invariants]`. Note [Bind new Givens immediately] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For Givens we make new EvVars and bind them immediately. Two main reasons: * Gain sharing. E.g. suppose we start with g :: C a b, where class D a => C a b class (E a, F a) => D a If we generate all g's superclasses as separate EvTerms we might get selD1 (selC1 g) :: E a selD2 (selC1 g) :: F a selC1 g :: D a which we could do more economically as: g1 :: D a = selC1 g g2 :: E a = selD1 g1 g3 :: F a = selD2 g1 * For *coercion* evidence we *must* bind each given: class (a~b) => C a b where .... f :: C a b => .... Then in f's Givens we have g:(C a b) and the superclass sc(g,0):a~b. But that superclass selector can't (yet) appear in a coercion (see evTermCoercion), so the easy thing is to bind it to an Id. So a Given has EvVar inside it rather than (as previously) an EvTerm. Note [The rewrite-role of a constraint] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The rewrite-role of a constraint says what can rewrite that constraint: * If the rewrite-role = Nominal, only a nominal equality can rewrite it * If the rewrite-rule = Representational, either a nominal or representational equality can rewrit it. Notice that the constraint may itself not be an equality at all. For example, the rewrite-role of (Eq [a]) is Nominal; only nominal equalities can rewrite it. -} -- | A place for type-checking evidence to go after it is generated. -- -- - Wanted equalities use HoleDest, -- - other Wanteds use EvVarDest. data TcEvDest = EvVarDest EvVar -- ^ bind this var to the evidence -- EvVarDest is always used for non-type-equalities -- e.g. class constraints | HoleDest CoercionHole -- ^ fill in this hole with the evidence -- HoleDest is always used for type-equalities -- See Note [Coercion holes] in GHC.Core.TyCo.Rep data CtEvidence = CtGiven -- Truly given, not depending on subgoals { ctev_pred :: TcPredType -- See Note [Ct/evidence invariant] , ctev_evar :: EvVar -- See Note [CtEvidence invariants] , ctev_loc :: CtLoc } | CtWanted -- Wanted goal { ctev_pred :: TcPredType -- See Note [Ct/evidence invariant] , ctev_dest :: TcEvDest -- See Note [CtEvidence invariants] , ctev_loc :: CtLoc , ctev_rewriters :: RewriterSet } -- See Note [Wanteds rewrite Wanteds] ctEvPred :: CtEvidence -> TcPredType -- The predicate of a flavor ctEvPred = ctev_pred ctEvLoc :: CtEvidence -> CtLoc ctEvLoc = ctev_loc ctEvOrigin :: CtEvidence -> CtOrigin ctEvOrigin = ctLocOrigin . ctEvLoc -- | Get the equality relation relevant for a 'CtEvidence' ctEvEqRel :: HasDebugCallStack => CtEvidence -> EqRel ctEvEqRel = predTypeEqRel . ctEvPred -- | Get the rewrite-role relevant for a 'CtEvidence' -- See Note [The rewrite-role of a constraint] ctEvRewriteRole :: HasDebugCallStack => CtEvidence -> Role ctEvRewriteRole = eqRelRole . ctEvRewriteEqRel ctEvRewriteEqRel :: CtEvidence -> EqRel -- ^ Return the rewrite-role of an abitrary CtEvidence -- See Note [The rewrite-role of a constraint] -- We return ReprEq for (a ~R# b) and NomEq for all other preds ctEvRewriteEqRel = predTypeEqRel . ctEvPred ctEvTerm :: CtEvidence -> EvTerm ctEvTerm ev = EvExpr (ctEvExpr ev) -- | Extract the set of rewriters from a 'CtEvidence' -- See Note [Wanteds rewrite Wanteds] -- If the provided CtEvidence is not for a Wanted, just -- return an empty set. ctEvRewriters :: CtEvidence -> RewriterSet ctEvRewriters (CtWanted { ctev_rewriters = rewriters }) = rewriters ctEvRewriters (CtGiven {}) = emptyRewriterSet ctEvExpr :: HasDebugCallStack => CtEvidence -> EvExpr ctEvExpr ev@(CtWanted { ctev_dest = HoleDest _ }) = Coercion $ ctEvCoercion ev ctEvExpr ev = evId (ctEvEvId ev) ctEvCoercion :: HasDebugCallStack => CtEvidence -> TcCoercion ctEvCoercion (CtGiven { ctev_evar = ev_id }) = mkCoVarCo ev_id ctEvCoercion (CtWanted { ctev_dest = dest }) | HoleDest hole <- dest = -- ctEvCoercion is only called on type equalities -- and they always have HoleDests mkHoleCo hole ctEvCoercion ev = pprPanic "ctEvCoercion" (ppr ev) ctEvEvId :: CtEvidence -> EvVar ctEvEvId (CtWanted { ctev_dest = EvVarDest ev }) = ev ctEvEvId (CtWanted { ctev_dest = HoleDest h }) = coHoleCoVar h ctEvEvId (CtGiven { ctev_evar = ev }) = ev ctEvUnique :: CtEvidence -> Unique ctEvUnique (CtGiven { ctev_evar = ev }) = varUnique ev ctEvUnique (CtWanted { ctev_dest = dest }) = tcEvDestUnique dest tcEvDestUnique :: TcEvDest -> Unique tcEvDestUnique (EvVarDest ev_var) = varUnique ev_var tcEvDestUnique (HoleDest co_hole) = varUnique (coHoleCoVar co_hole) setCtEvLoc :: CtEvidence -> CtLoc -> CtEvidence setCtEvLoc ctev loc = ctev { ctev_loc = loc } -- | Set the type of CtEvidence. -- -- This function ensures that the invariants on 'CtEvidence' hold, by updating -- the evidence and the ctev_pred in sync with each other. -- See Note [CtEvidence invariants]. setCtEvPredType :: HasDebugCallStack => CtEvidence -> Type -> CtEvidence setCtEvPredType old_ctev@(CtGiven { ctev_evar = ev }) new_pred = old_ctev { ctev_pred = new_pred , ctev_evar = setVarType ev new_pred } setCtEvPredType old_ctev@(CtWanted { ctev_dest = dest }) new_pred = old_ctev { ctev_pred = new_pred , ctev_dest = new_dest } where new_dest = case dest of EvVarDest ev -> EvVarDest (setVarType ev new_pred) HoleDest h -> HoleDest (setCoHoleType h new_pred) instance Outputable TcEvDest where ppr (HoleDest h) = text "hole" <> ppr h ppr (EvVarDest ev) = ppr ev instance Outputable CtEvidence where ppr ev = ppr (ctEvFlavour ev) <+> pp_ev <+> braces (ppr (ctl_depth (ctEvLoc ev)) <> pp_rewriters) -- Show the sub-goal depth too <> dcolon <+> ppr (ctEvPred ev) where pp_ev = case ev of CtGiven { ctev_evar = v } -> ppr v CtWanted {ctev_dest = d } -> ppr d rewriters = ctEvRewriters ev pp_rewriters | isEmptyRewriterSet rewriters = empty | otherwise = semi <> ppr rewriters isWanted :: CtEvidence -> Bool isWanted (CtWanted {}) = True isWanted _ = False isGiven :: CtEvidence -> Bool isGiven (CtGiven {}) = True isGiven _ = False {- ************************************************************************ * * RewriterSet * * ************************************************************************ -} -- | Stores a set of CoercionHoles that have been used to rewrite a constraint. -- See Note [Wanteds rewrite Wanteds]. newtype RewriterSet = RewriterSet (UniqSet CoercionHole) deriving newtype (Outputable, Semigroup, Monoid) emptyRewriterSet :: RewriterSet emptyRewriterSet = RewriterSet emptyUniqSet unitRewriterSet :: CoercionHole -> RewriterSet unitRewriterSet = coerce (unitUniqSet @CoercionHole) unionRewriterSet :: RewriterSet -> RewriterSet -> RewriterSet unionRewriterSet = coerce (unionUniqSets @CoercionHole) isEmptyRewriterSet :: RewriterSet -> Bool isEmptyRewriterSet = coerce (isEmptyUniqSet @CoercionHole) addRewriter :: RewriterSet -> CoercionHole -> RewriterSet addRewriter = coerce (addOneToUniqSet @CoercionHole) rewriterSetFromCts :: Bag Ct -> RewriterSet -- Take a bag of Wanted equalities, and collect them as a RewriterSet rewriterSetFromCts cts = foldr add emptyRewriterSet cts where add ct rw_set = case ctEvidence ct of CtWanted { ctev_dest = HoleDest hole } -> rw_set `addRewriter` hole _ -> rw_set {- ************************************************************************ * * CtFlavour * * ************************************************************************ -} data CtFlavour = Given -- we have evidence | Wanted -- we want evidence deriving Eq instance Outputable CtFlavour where ppr Given = text "[G]" ppr Wanted = text "[W]" ctEvFlavour :: CtEvidence -> CtFlavour ctEvFlavour (CtWanted {}) = Wanted ctEvFlavour (CtGiven {}) = Given -- | Whether or not one 'Ct' can rewrite another is determined by its -- flavour and its equality relation. See also -- Note [Flavours with roles] in GHC.Tc.Solver.InertSet type CtFlavourRole = (CtFlavour, EqRel) -- | Extract the flavour, role, and boxity from a 'CtEvidence' ctEvFlavourRole :: HasDebugCallStack => CtEvidence -> CtFlavourRole ctEvFlavourRole ev = (ctEvFlavour ev, ctEvRewriteEqRel ev) -- | Extract the flavour and role from a 'Ct' eqCtFlavourRole :: EqCt -> CtFlavourRole eqCtFlavourRole (EqCt { eq_ev = ev, eq_eq_rel = eq_rel }) = (ctEvFlavour ev, eq_rel) dictCtFlavourRole :: DictCt -> CtFlavourRole dictCtFlavourRole (DictCt { di_ev = ev }) = (ctEvFlavour ev, NomEq) -- | Extract the flavour and role from a 'Ct' ctFlavourRole :: HasDebugCallStack => Ct -> CtFlavourRole -- Uses short-cuts for the Role field, for special cases ctFlavourRole (CDictCan di_ct) = dictCtFlavourRole di_ct ctFlavourRole (CEqCan eq_ct) = eqCtFlavourRole eq_ct ctFlavourRole ct = ctEvFlavourRole (ctEvidence ct) {- Note [eqCanRewrite] ~~~~~~~~~~~~~~~~~~~~~~ (eqCanRewrite ct1 ct2) holds if the constraint ct1 (a CEqCan of form lhs ~ ty) can be used to rewrite ct2. It must satisfy the properties of a can-rewrite relation, see Definition [Can-rewrite relation] in GHC.Tc.Solver.Monad. With the solver handling Coercible constraints like equality constraints, the rewrite conditions must take role into account, never allowing a representational equality to rewrite a nominal one. Note [Wanteds rewrite Wanteds] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Should one Wanted constraint be allowed to rewrite another? This example (along with #8450) suggests not: f :: a -> Bool f x = ( [x,'c'], [x,True] ) `seq` True Here we get [W] a ~ Char [W] a ~ Bool but we do not want to complain about Bool ~ Char! This example suggests yes (indexed-types/should_fail/T4093a): type family Foo a f :: (Foo e ~ Maybe e) => Foo e In the ambiguity check, we get [G] g1 :: Foo e ~ Maybe e [W] w1 :: Foo alpha ~ Foo e [W] w2 :: Foo alpha ~ Maybe alpha w1 gets rewritten by the Given to become [W] w3 :: Foo alpha ~ Maybe e Now, the only way to make progress is to allow Wanteds to rewrite Wanteds. Rewriting w3 with w2 gives us [W] w4 :: Maybe alpha ~ Maybe e which will soon get us to alpha := e and thence to victory. TL;DR we want equality saturation. We thus want Wanteds to rewrite Wanteds in order to accept more programs, but we don't want Wanteds to rewrite Wanteds because doing so can create inscrutable error messages. To solve this dilemma: * We allow Wanteds to rewrite Wanteds, but... * Each Wanted tracks the set of Wanteds it has been rewritten by, in its RewriterSet, stored in the ctev_rewriters field of the CtWanted constructor of CtEvidence. (Only Wanteds have RewriterSets.) * In error reporting, we simply suppress any errors that have been rewritten by /unsolved/ wanteds. This suppression happens in GHC.Tc.Errors.mkErrorItem, which uses GHC.Tc.Zonk.Type.zonkRewriterSet to look through any filled coercion holes. The idea is that we wish to report the "root cause" -- the error that rewrote all the others. * We prioritise Wanteds that have an empty RewriterSet: see Note [Prioritise Wanteds with empty RewriterSet]. Let's continue our first example above: inert: [W] w1 :: a ~ Char work: [W] w2 :: a ~ Bool Because Wanteds can rewrite Wanteds, w1 will rewrite w2, yielding inert: [W] w1 :: a ~ Char [W] w2 {w1}:: Char ~ Bool The {w1} in the second line of output is the RewriterSet of w1. A RewriterSet is just a set of unfilled CoercionHoles. This is sufficient because only equalities (evidenced by coercion holes) are used for rewriting; other (dictionary) constraints cannot ever rewrite. The rewriter (in e.g. GHC.Tc.Solver.Rewrite.rewrite) tracks and returns a RewriterSet, consisting of the evidence (a CoercionHole) for any Wanted equalities used in rewriting. Then GHC.Tc.Solver.Solve.rewriteEvidence and GHC.Tc.Solver.Equality.rewriteEqEvidence add this RewriterSet to the rewritten constraint's rewriter set. Note [Prioritise Wanteds with empty RewriterSet] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When extending the WorkList, in GHC.Tc.Solver.InertSet.extendWorkListEq, we priorities constraints that have no rewriters. Here's why. Consider this, which came up in T22793: inert: {} work list: [W] co_ayf : awq ~ awo work item: [W] co_ayb : awq ~ awp ==> {just put work item in inert set} inert: co_ayb : awq ~ awp work list: {} work: [W] co_ayf : awq ~ awo ==> {rewrite ayf with co_ayb} work list: {} inert: co_ayb : awq ~ awp co_aym{co_ayb} : awp ~ awo ^ rewritten by ayb ----- start again in simplify_loop in Solver.hs ----- inert: {} work list: [W] co_ayb : awq ~ awp work: co_aym{co_ayb} : awp ~ awo ==> {add to inert set} inert: co_aym{co_ayb} : awp ~ awo work list: {} work: co_ayb : awq ~ awp ==> {rewrite co_ayb} inert: co_aym{co_ayb} : awp ~ awo co_ayp{co_aym} : awq ~ awo work list: {} Now both wanteds have been rewriten by the other! This happened because in our simplify_loop iteration, we happened to start with co_aym. All would have been well if we'd started with the (not-rewritten) co_ayb and gotten it into the inert set. With that in mind, we /prioritise/ the work-list to put constraints with no rewriters first. This prioritisation is done in GHC.Tc.Solver.InertSet.extendWorkListEq, and extendWorkListEqs. Wrinkles (WRW1) Before checking for an empty RewriterSet, we zonk the RewriterSet, because some of those CoercionHoles may have been filled in since we last looked: see GHC.Tc.Solver.Monad.emitWork. (WRW2) Despite the prioritisation, it is hard to be /certain/ that we can't end up in a situation where all of the Wanteds have rewritten each other. In order to report /some/ error in this case, we simply report all the Wanteds. The user will get a perhaps-confusing error message, but they've written a confusing program! (T22707 and T22793 were close, but they do not exhibit this behaviour.) So belt and braces: see the `suppress` stuff in GHC.Tc.Errors.mkErrorItem. Note [Avoiding rewriting cycles] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Note [inert_eqs: the inert equalities] in GHC.Tc.Solver.InertSet describes the can-rewrite relation among CtFlavour/Role pairs, saying which constraints can rewrite which other constraints. It puts forth (R2): (R2) If f1 >= f, and f2 >= f, then either f1 >= f2 or f2 >= f1 The naive can-rewrite relation says that (Given, Representational) can rewrite (Wanted, Representational) and that (Wanted, Nominal) can rewrite (Wanted, Representational), but neither of (Given, Representational) and (Wanted, Nominal) can rewrite the other. This would violate (R2). See also Note [Why R2?] in GHC.Tc.Solver.InertSet. To keep R2, we do not allow (Wanted, Nominal) to rewrite (Wanted, Representational). This can, in theory, bite, in this scenario: type family F a data T a type role T nominal [G] F a ~N T a [W] F alpha ~N T alpha [W] F alpha ~R T a As written, this makes no progress, and GHC errors. But, if we allowed W/N to rewrite W/R, the first W could rewrite the second: [G] F a ~N T a [W] F alpha ~N T alpha [W] T alpha ~R T a Now we decompose the second W to get [W] alpha ~N a noting the role annotation on T. This causes (alpha := a), and then everything else unlocks. What to do? We could "decompose" nominal equalities into nominal-only ("NO") equalities and representational ones, where a NO equality rewrites only nominals. That is, when considering whether [W] F alpha ~N T alpha should rewrite [W] F alpha ~R T a, we could require splitting the first W into [W] F alpha ~NO T alpha, [W] F alpha ~R T alpha. Then, we use the R half of the split to rewrite the second W, and off we go. This splitting would allow the split-off R equality to be rewritten by other equalities, thus avoiding the problem in Note [Why R2?] in GHC.Tc.Solver.InertSet. However, note that I said that this bites in theory. That's because no known program actually gives rise to this scenario. A direct encoding ends up starting with [G] F a ~ T a [W] F alpha ~ T alpha [W] Coercible (F alpha) (T a) where ~ and Coercible denote lifted class constraints. The ~s quickly reduce to ~N: good. But the Coercible constraint gets rewritten to [W] Coercible (T alpha) (T a) by the first Wanted. This is because Coercible is a class, and arguments in class constraints use *nominal* rewriting, not the representational rewriting that is restricted due to (R2). Note that reordering the code doesn't help, because equalities (including lifted ones) are prioritized over Coercible. Thus, I (Richard E.) see no way to write a program that is rejected because of this infelicity. I have not proved it impossible, exactly, but my usual tricks have not yielded results. In the olden days, when we had Derived constraints, this Note was all about G/R and D/N both rewriting D/R. Back then, the code in typecheck/should_compile/T19665 really did get rejected. But now, according to the rewriting of the Coercible constraint, the program is accepted. -} eqCanRewrite :: EqRel -> EqRel -> Bool eqCanRewrite NomEq _ = True eqCanRewrite ReprEq ReprEq = True eqCanRewrite ReprEq NomEq = False eqCanRewriteFR :: CtFlavourRole -> CtFlavourRole -> Bool -- Can fr1 actually rewrite fr2? -- Very important function! -- See Note [eqCanRewrite] -- See Note [Wanteds rewrite Wanteds] -- See Note [Avoiding rewriting cycles] eqCanRewriteFR (Given, r1) (_, r2) = eqCanRewrite r1 r2 eqCanRewriteFR (Wanted, NomEq) (Wanted, ReprEq) = False eqCanRewriteFR (Wanted, r1) (Wanted, r2) = eqCanRewrite r1 r2 eqCanRewriteFR (Wanted, _) (Given, _) = False ghc-lib-parser-9.12.2.20250421/compiler/GHC/Tc/Types/CtLoc.hs0000644000000000000000000002201507346545000020720 0ustar0000000000000000module GHC.Tc.Types.CtLoc ( -- * CtLoc CtLoc(..), ctLocSpan, ctLocEnv, ctLocLevel, ctLocOrigin, ctLocTypeOrKind_maybe, toInvisibleLoc, ctLocDepth, bumpCtLocDepth, isGivenLoc, mkGivenLoc, mkKindEqLoc, setCtLocOrigin, updateCtLocOrigin, setCtLocEnv, setCtLocSpan, pprCtLoc, adjustCtLoc, adjustCtLocTyConBinder, -- * CtLocEnv CtLocEnv(..), getCtLocEnvLoc, setCtLocEnvLoc, setCtLocRealLoc, getCtLocEnvLvl, setCtLocEnvLvl, ctLocEnvInGeneratedCode, -- * SubGoalDepth SubGoalDepth, initialSubGoalDepth, maxSubGoalDepth, bumpSubGoalDepth, subGoalDepthExceeded ) where import GHC.Prelude import GHC.Tc.Types.BasicTypes import GHC.Tc.Types.ErrCtxt import GHC.Tc.Types.Origin import GHC.Tc.Utils.TcType import GHC.Types.SrcLoc import GHC.Types.Name.Reader import GHC.Types.Basic( IntWithInf, mkIntWithInf, TypeOrKind(..) ) import GHC.Core.TyCon( TyConBinder, isVisibleTyConBinder, isNamedTyConBinder ) import GHC.Utils.Outputable {- ********************************************************************* * * SubGoalDepth * * ********************************************************************* -} {- Note [SubGoalDepth] ~~~~~~~~~~~~~~~~~~~~~~ The 'SubGoalDepth' takes care of stopping the constraint solver from looping. The counter starts at zero and increases. It includes dictionary constraints, equality simplification, and type family reduction. (Why combine these? Because it's actually quite easy to mistake one for another, in sufficiently involved scenarios, like ConstraintKinds.) The flag -freduction-depth=n fixes the maximum level. * The counter includes the depth of type class instance declarations. Example: [W] d{7} : Eq [Int] That is d's dictionary-constraint depth is 7. If we use the instance $dfEqList :: Eq a => Eq [a] to simplify it, we get d{7} = $dfEqList d'{8} where d'{8} : Eq Int, and d' has depth 8. For civilised (decidable) instance declarations, each increase of depth removes a type constructor from the type, so the depth never gets big; i.e. is bounded by the structural depth of the type. * The counter also increments when resolving equalities involving type functions. Example: Assume we have a wanted at depth 7: [W] d{7} : F () ~ a If there is a type function equation "F () = Int", this would be rewritten to [W] d{8} : Int ~ a and remembered as having depth 8. Again, without UndecidableInstances, this counter is bounded, but without it can resolve things ad infinitum. Hence there is a maximum level. * Lastly, every time an equality is rewritten, the counter increases. Again, rewriting an equality constraint normally makes progress, but it's possible the "progress" is just the reduction of an infinitely-reducing type family. Hence we need to track the rewrites. When compiling a program requires a greater depth, then GHC recommends turning off this check entirely by setting -freduction-depth=0. This is because the exact number that works is highly variable, and is likely to change even between minor releases. Because this check is solely to prevent infinite compilation times, it seems safe to disable it when a user has ascertained that their program doesn't loop at the type level. -} -- | See Note [SubGoalDepth] newtype SubGoalDepth = SubGoalDepth Int deriving (Eq, Ord, Outputable) initialSubGoalDepth :: SubGoalDepth initialSubGoalDepth = SubGoalDepth 0 bumpSubGoalDepth :: SubGoalDepth -> SubGoalDepth bumpSubGoalDepth (SubGoalDepth n) = SubGoalDepth (n + 1) maxSubGoalDepth :: SubGoalDepth -> SubGoalDepth -> SubGoalDepth maxSubGoalDepth (SubGoalDepth n) (SubGoalDepth m) = SubGoalDepth (n `max` m) subGoalDepthExceeded :: IntWithInf -> SubGoalDepth -> Bool subGoalDepthExceeded reductionDepth (SubGoalDepth d) = mkIntWithInf d > reductionDepth {- ********************************************************************* * * CtLoc * * ************************************************************************ The 'CtLoc' gives information about where a constraint came from. This is important for decent error message reporting because dictionaries don't appear in the original source code. -} data CtLoc = CtLoc { ctl_origin :: CtOrigin , ctl_env :: CtLocEnv -- Everything we need to know about -- the context this Ct arose in. , ctl_t_or_k :: Maybe TypeOrKind -- OK if we're not sure , ctl_depth :: !SubGoalDepth } mkKindEqLoc :: TcType -> TcType -- original *types* being compared -> CtLoc -> CtLoc mkKindEqLoc s1 s2 ctloc | CtLoc { ctl_t_or_k = t_or_k, ctl_origin = origin } <- ctloc = ctloc { ctl_origin = KindEqOrigin s1 s2 origin t_or_k , ctl_t_or_k = Just KindLevel } adjustCtLocTyConBinder :: TyConBinder -> CtLoc -> CtLoc -- Adjust the CtLoc when decomposing a type constructor adjustCtLocTyConBinder tc_bndr loc = adjustCtLoc is_vis is_kind loc where is_vis = isVisibleTyConBinder tc_bndr is_kind = isNamedTyConBinder tc_bndr adjustCtLoc :: Bool -- True <=> A visible argument -> Bool -- True <=> A kind argument -> CtLoc -> CtLoc -- Adjust the CtLoc when decomposing a type constructor, application, etc adjustCtLoc is_vis is_kind loc = loc2 where loc1 | is_kind = toKindLoc loc | otherwise = loc loc2 | is_vis = loc1 | otherwise = toInvisibleLoc loc1 -- | Take a CtLoc and moves it to the kind level toKindLoc :: CtLoc -> CtLoc toKindLoc loc = loc { ctl_t_or_k = Just KindLevel } toInvisibleLoc :: CtLoc -> CtLoc toInvisibleLoc loc = updateCtLocOrigin loc toInvisibleOrigin mkGivenLoc :: TcLevel -> SkolemInfoAnon -> CtLocEnv -> CtLoc mkGivenLoc tclvl skol_info env = CtLoc { ctl_origin = GivenOrigin skol_info , ctl_env = setCtLocEnvLvl env tclvl , ctl_t_or_k = Nothing -- this only matters for error msgs , ctl_depth = initialSubGoalDepth } ctLocEnv :: CtLoc -> CtLocEnv ctLocEnv = ctl_env ctLocLevel :: CtLoc -> TcLevel ctLocLevel loc = getCtLocEnvLvl (ctLocEnv loc) ctLocDepth :: CtLoc -> SubGoalDepth ctLocDepth = ctl_depth ctLocOrigin :: CtLoc -> CtOrigin ctLocOrigin = ctl_origin ctLocSpan :: CtLoc -> RealSrcSpan ctLocSpan (CtLoc { ctl_env = lcl}) = getCtLocEnvLoc lcl ctLocTypeOrKind_maybe :: CtLoc -> Maybe TypeOrKind ctLocTypeOrKind_maybe = ctl_t_or_k setCtLocSpan :: CtLoc -> RealSrcSpan -> CtLoc setCtLocSpan ctl@(CtLoc { ctl_env = lcl }) loc = setCtLocEnv ctl (setCtLocRealLoc lcl loc) bumpCtLocDepth :: CtLoc -> CtLoc bumpCtLocDepth loc@(CtLoc { ctl_depth = d }) = loc { ctl_depth = bumpSubGoalDepth d } setCtLocOrigin :: CtLoc -> CtOrigin -> CtLoc setCtLocOrigin ctl orig = ctl { ctl_origin = orig } updateCtLocOrigin :: CtLoc -> (CtOrigin -> CtOrigin) -> CtLoc updateCtLocOrigin ctl@(CtLoc { ctl_origin = orig }) upd = ctl { ctl_origin = upd orig } setCtLocEnv :: CtLoc -> CtLocEnv -> CtLoc setCtLocEnv ctl env = ctl { ctl_env = env } isGivenLoc :: CtLoc -> Bool isGivenLoc loc = isGivenOrigin (ctLocOrigin loc) pprCtLoc :: CtLoc -> SDoc -- "arising from ... at ..." -- Not an instance of Outputable because of the "arising from" prefix pprCtLoc (CtLoc { ctl_origin = o, ctl_env = lcl}) = sep [ pprCtOrigin o , text "at" <+> ppr (getCtLocEnvLoc lcl)] {- ********************************************************************* * * CtLocEnv * * ********************************************************************* -} -- | Local typechecker environment for a constraint. -- -- Used to restore the environment of a constraint -- when reporting errors, see `setCtLocM`. -- -- See also 'TcLclCtxt'. data CtLocEnv = CtLocEnv { ctl_ctxt :: ![ErrCtxt] , ctl_loc :: !RealSrcSpan , ctl_bndrs :: !TcBinderStack , ctl_tclvl :: !TcLevel , ctl_in_gen_code :: !Bool , ctl_rdr :: !LocalRdrEnv } getCtLocEnvLoc :: CtLocEnv -> RealSrcSpan getCtLocEnvLoc = ctl_loc getCtLocEnvLvl :: CtLocEnv -> TcLevel getCtLocEnvLvl = ctl_tclvl setCtLocEnvLvl :: CtLocEnv -> TcLevel -> CtLocEnv setCtLocEnvLvl env lvl = env { ctl_tclvl = lvl } setCtLocRealLoc :: CtLocEnv -> RealSrcSpan -> CtLocEnv setCtLocRealLoc env ss = env { ctl_loc = ss } setCtLocEnvLoc :: CtLocEnv -> SrcSpan -> CtLocEnv -- See Note [Error contexts in generated code] -- for the ctl_in_gen_code manipulation setCtLocEnvLoc env (RealSrcSpan loc _) = env { ctl_loc = loc, ctl_in_gen_code = False } setCtLocEnvLoc env loc@(UnhelpfulSpan _) | isGeneratedSrcSpan loc = env { ctl_in_gen_code = True } | otherwise = env ctLocEnvInGeneratedCode :: CtLocEnv -> Bool ctLocEnvInGeneratedCode = ctl_in_gen_code ghc-lib-parser-9.12.2.20250421/compiler/GHC/Tc/Types/ErrCtxt.hs0000644000000000000000000000112307346545000021304 0ustar0000000000000000module GHC.Tc.Types.ErrCtxt where import GHC.Prelude import GHC.Types.Var.Env import GHC.Tc.Zonk.Monad (ZonkM) import GHC.Utils.Outputable -- | Additional context to include in an error message, e.g. -- "In the type signature ...", "In the ambiguity check for ...", etc. type ErrCtxt = (Bool, TidyEnv -> ZonkM (TidyEnv, SDoc)) -- Monadic so that we have a chance -- to deal with bound type variables just before error -- message construction -- Bool: True <=> this is a landmark context; do not -- discard it when trimming for display ghc-lib-parser-9.12.2.20250421/compiler/GHC/Tc/Types/Evidence.hs0000644000000000000000000011402007346545000021434 0ustar0000000000000000-- (c) The University of Glasgow 2006 {-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE LambdaCase #-} module GHC.Tc.Types.Evidence ( -- * HsWrapper HsWrapper(..), (<.>), mkWpTyApps, mkWpEvApps, mkWpEvVarApps, mkWpTyLams, mkWpForAllCast, mkWpEvLams, mkWpLet, mkWpFun, mkWpCastN, mkWpCastR, mkWpEta, collectHsWrapBinders, idHsWrapper, isIdHsWrapper, pprHsWrapper, hsWrapDictBinders, -- * Evidence bindings TcEvBinds(..), EvBindsVar(..), EvBindMap(..), emptyEvBindMap, extendEvBinds, lookupEvBind, evBindMapBinds, foldEvBindMap, nonDetStrictFoldEvBindMap, filterEvBindMap, isEmptyEvBindMap, evBindMapToVarSet, varSetMinusEvBindMap, EvBindInfo(..), EvBind(..), emptyTcEvBinds, isEmptyTcEvBinds, mkGivenEvBind, mkWantedEvBind, evBindVar, isCoEvBindsVar, -- * EvTerm (already a CoreExpr) EvTerm(..), EvExpr, evId, evCoercion, evCast, evDFunApp, evDataConApp, evSelector, mkEvCast, evVarsOfTerm, mkEvScSelectors, evTypeable, findNeededEvVars, evTermCoercion, evTermCoercion_maybe, EvCallStack(..), EvTypeable(..), -- * HoleExprRef HoleExprRef(..), -- * TcCoercion TcCoercion, TcCoercionR, TcCoercionN, TcCoercionP, CoercionHole, TcMCoercion, TcMCoercionN, TcMCoercionR, MultiplicityCheckCoercions, Role(..), LeftOrRight(..), pickLR, maybeSymCo, unwrapIP, wrapIP, -- * QuoteWrapper QuoteWrapper(..), applyQuoteWrapper, quoteWrapperTyVarTy ) where import GHC.Prelude import GHC.Types.Unique.DFM import GHC.Types.Unique.FM import GHC.Types.Var import GHC.Types.Id( idScaledType ) import GHC.Core.Coercion.Axiom import GHC.Core.Coercion import GHC.Core.Ppr () -- Instance OutputableBndr TyVar import GHC.Tc.Utils.TcType import GHC.Core.Type import GHC.Core.TyCon import GHC.Core.DataCon ( DataCon, dataConWrapId ) import GHC.Builtin.Names import GHC.Types.Var.Env import GHC.Types.Var.Set import GHC.Core.Predicate import GHC.Types.Basic import GHC.Core import GHC.Core.Class (Class, classSCSelId ) import GHC.Core.FVs ( exprSomeFreeVars ) import GHC.Core.InstEnv ( CanonicalEvidence(..) ) import GHC.Utils.Misc import GHC.Utils.Panic import GHC.Utils.Outputable import GHC.Data.Bag import GHC.Data.FastString import qualified Data.Data as Data import GHC.Types.SrcLoc import Data.IORef( IORef ) import GHC.Types.Unique.Set import GHC.Core.Multiplicity import qualified Data.Semigroup as S {- Note [TcCoercions] ~~~~~~~~~~~~~~~~~~ | TcCoercions are a hack used by the typechecker. Normally, Coercions have free variables of type (a ~# b): we call these CoVars. However, the type checker passes around equality evidence (boxed up) at type (a ~ b). An TcCoercion is simply a Coercion whose free variables have may be either boxed or unboxed. After we are done with typechecking the desugarer finds the boxed free variables, unboxes them, and creates a resulting real Coercion with kosher free variables. -} type TcCoercion = Coercion type TcCoercionN = CoercionN -- A Nominal coercion ~N type TcCoercionR = CoercionR -- A Representational coercion ~R type TcCoercionP = CoercionP -- a phantom coercion type TcMCoercion = MCoercion type TcMCoercionN = MCoercionN -- nominal type TcMCoercionR = MCoercionR -- representational type MultiplicityCheckCoercions = [TcCoercion] -- Coercions which must all be reflexivity after zonking. -- See Note [Coercions returned from tcSubMult] in GHC.Tc.Utils.Unify. -- | If a 'SwapFlag' is 'IsSwapped', flip the orientation of a coercion maybeSymCo :: SwapFlag -> TcCoercion -> TcCoercion maybeSymCo IsSwapped co = mkSymCo co maybeSymCo NotSwapped co = co {- %************************************************************************ %* * HsWrapper * * ************************************************************************ -} -- We write wrap :: t1 ~> t2 -- if wrap[ e::t1 ] :: t2 data HsWrapper = WpHole -- The identity coercion | WpCompose HsWrapper HsWrapper -- (wrap1 `WpCompose` wrap2)[e] = wrap1[ wrap2[ e ]] -- -- Hence (\a. []) `WpCompose` (\b. []) = (\a b. []) -- But ([] a) `WpCompose` ([] b) = ([] b a) -- -- If wrap1 :: t2 ~> t3 -- wrap2 :: t1 ~> t2 --- Then (wrap1 `WpCompose` wrap2) :: t1 ~> t3 | WpFun HsWrapper HsWrapper (Scaled TcTypeFRR) -- (WpFun wrap1 wrap2 (w, t1))[e] = \(x:_w exp_arg). wrap2[ e wrap1[x] ] -- So note that if e :: act_arg -> act_res -- wrap1 :: exp_arg ~> act_arg -- wrap2 :: act_res ~> exp_res -- then WpFun wrap1 wrap2 : (act_arg -> arg_res) ~> (exp_arg -> exp_res) -- This isn't the same as for mkFunCo, but it has to be this way -- because we can't use 'sym' to flip around these HsWrappers -- The TcType is the "from" type of the first wrapper; -- it always a Type, not a Constraint -- -- NB: a WpFun is always for a (->) function arrow -- -- Use 'mkWpFun' to construct such a wrapper. | WpCast TcCoercionR -- A cast: [] `cast` co -- Guaranteed not the identity coercion -- At role Representational -- Evidence abstraction and application -- (both dictionaries and coercions) -- Both WpEvLam and WpEvApp abstract and apply values -- of kind CONSTRAINT rep | WpEvLam EvVar -- \d. [] the 'd' is an evidence variable | WpEvApp EvTerm -- [] d the 'd' is evidence for a constraint -- Kind and Type abstraction and application | WpTyLam TyVar -- \a. [] the 'a' is a type/kind variable (not coercion var) | WpTyApp KindOrType -- [] t the 't' is a type (not coercion) | WpLet TcEvBinds -- Non-empty (or possibly non-empty) evidence bindings, -- so that the identity coercion is always exactly WpHole | WpMultCoercion Coercion -- Require that a Coercion be reflexive; otherwise, -- error in the desugarer. See GHC.Tc.Utils.Unify -- Note [Coercions returned from tcSubMult] deriving Data.Data -- | The Semigroup instance is a bit fishy, since @WpCompose@, as a data -- constructor, is "syntactic" and not associative. Concretely, if @a@, @b@, -- and @c@ aren't @WpHole@: -- -- > (a <> b) <> c ?= a <> (b <> c) -- -- ==> -- -- > (a `WpCompose` b) `WpCompose` c /= @ a `WpCompose` (b `WpCompose` c) -- -- However these two associations are are "semantically equal" in the sense -- that they produce equal functions when passed to -- @GHC.HsToCore.Binds.dsHsWrapper@. instance S.Semigroup HsWrapper where (<>) = (<.>) instance Monoid HsWrapper where mempty = WpHole (<.>) :: HsWrapper -> HsWrapper -> HsWrapper WpHole <.> c = c c <.> WpHole = c c1 <.> c2 = c1 `WpCompose` c2 -- | Smart constructor to create a 'WpFun' 'HsWrapper'. -- -- PRECONDITION: the "from" type of the first wrapper must have a syntactically -- fixed RuntimeRep (see Note [Fixed RuntimeRep] in GHC.Tc.Utils.Concrete). mkWpFun :: HsWrapper -> HsWrapper -> Scaled TcTypeFRR -- ^ the "from" type of the first wrapper -- MUST have a fixed RuntimeRep -> TcType -- ^ Either "from" type or "to" type of the second wrapper -- (used only when the second wrapper is the identity) -> HsWrapper -- NB: we can't check that the argument type has a fixed RuntimeRep with an assertion, -- because of [Wrinkle: Typed Template Haskell] in Note [hasFixedRuntimeRep] -- in GHC.Tc.Utils.Concrete. mkWpFun WpHole WpHole _ _ = WpHole mkWpFun WpHole (WpCast co2) (Scaled w t1) _ = WpCast (mk_wp_fun_co w (mkRepReflCo t1) co2) mkWpFun (WpCast co1) WpHole (Scaled w _) t2 = WpCast (mk_wp_fun_co w (mkSymCo co1) (mkRepReflCo t2)) mkWpFun (WpCast co1) (WpCast co2) (Scaled w _) _ = WpCast (mk_wp_fun_co w (mkSymCo co1) co2) mkWpFun co1 co2 t1 _ = WpFun co1 co2 t1 mkWpEta :: [Id] -> HsWrapper -> HsWrapper -- (mkWpEta [x1, x2] wrap) [e] -- = \x1. \x2. wrap[e x1 x2] -- Just generates a bunch of WpFuns mkWpEta xs wrap = foldr eta_one wrap xs where eta_one x wrap = WpFun idHsWrapper wrap (idScaledType x) mk_wp_fun_co :: Mult -> TcCoercionR -> TcCoercionR -> TcCoercionR mk_wp_fun_co mult arg_co res_co = mkNakedFunCo Representational FTF_T_T (multToCo mult) arg_co res_co -- FTF_T_T: WpFun is always (->) mkWpCastR :: TcCoercionR -> HsWrapper mkWpCastR co | isReflCo co = WpHole | otherwise = assertPpr (coercionRole co == Representational) (ppr co) $ WpCast co mkWpCastN :: TcCoercionN -> HsWrapper mkWpCastN co | isReflCo co = WpHole | otherwise = assertPpr (coercionRole co == Nominal) (ppr co) $ WpCast (mkSubCo co) -- The mkTcSubCo converts Nominal to Representational mkWpTyApps :: [Type] -> HsWrapper mkWpTyApps tys = mk_co_app_fn WpTyApp tys mkWpEvApps :: [EvTerm] -> HsWrapper mkWpEvApps args = mk_co_app_fn WpEvApp args mkWpEvVarApps :: [EvVar] -> HsWrapper mkWpEvVarApps vs = mk_co_app_fn WpEvApp (map (EvExpr . evId) vs) mkWpTyLams :: [TyVar] -> HsWrapper mkWpTyLams ids = mk_co_lam_fn WpTyLam ids -- mkWpForAllCast [tv{vis}] constructs a cast -- forall tv. res ~R# forall tv{vis} res`. -- See Note [Required foralls in Core] in GHC.Core.TyCo.Rep -- -- It's a no-op if all binders are invisible; -- but in that case we refrain from calling it. mkWpForAllCast :: [ForAllTyBinder] -> Type -> HsWrapper mkWpForAllCast bndrs res_ty = mkWpCastR (go bndrs) where go [] = mkRepReflCo res_ty go (Bndr tv vis : bs) = mkForAllCo tv coreTyLamForAllTyFlag vis kind_co (go bs) where kind_co = mkNomReflCo (varType tv) mkWpEvLams :: [Var] -> HsWrapper mkWpEvLams ids = mk_co_lam_fn WpEvLam ids mkWpLet :: TcEvBinds -> HsWrapper -- This no-op is a quite a common case mkWpLet (EvBinds b) | isEmptyBag b = WpHole mkWpLet ev_binds = WpLet ev_binds mk_co_lam_fn :: (a -> HsWrapper) -> [a] -> HsWrapper mk_co_lam_fn f as = foldr (\x wrap -> f x <.> wrap) WpHole as mk_co_app_fn :: (a -> HsWrapper) -> [a] -> HsWrapper -- For applications, the *first* argument must -- come *last* in the composition sequence mk_co_app_fn f as = foldr (\x wrap -> wrap <.> f x) WpHole as idHsWrapper :: HsWrapper idHsWrapper = WpHole isIdHsWrapper :: HsWrapper -> Bool isIdHsWrapper WpHole = True isIdHsWrapper _ = False hsWrapDictBinders :: HsWrapper -> Bag DictId -- ^ Identifies the /lambda-bound/ dictionaries of an 'HsWrapper'. This is used -- (only) to allow the pattern-match overlap checker to know what Given -- dictionaries are in scope. -- -- We specifically do not collect dictionaries bound in a 'WpLet'. These are -- either superclasses of lambda-bound ones, or (extremely numerous) results of -- binding Wanted dictionaries. We definitely don't want all those cluttering -- up the Given dictionaries for pattern-match overlap checking! hsWrapDictBinders wrap = go wrap where go (WpEvLam dict_id) = unitBag dict_id go (w1 `WpCompose` w2) = go w1 `unionBags` go w2 go (WpFun _ w _) = go w go WpHole = emptyBag go (WpCast {}) = emptyBag go (WpEvApp {}) = emptyBag go (WpTyLam {}) = emptyBag go (WpTyApp {}) = emptyBag go (WpLet {}) = emptyBag go (WpMultCoercion {}) = emptyBag collectHsWrapBinders :: HsWrapper -> ([Var], HsWrapper) -- Collect the outer lambda binders of a HsWrapper, -- stopping as soon as you get to a non-lambda binder collectHsWrapBinders wrap = go wrap [] where -- go w ws = collectHsWrapBinders (w <.> w1 <.> ... <.> wn) go :: HsWrapper -> [HsWrapper] -> ([Var], HsWrapper) go (WpEvLam v) wraps = add_lam v (gos wraps) go (WpTyLam v) wraps = add_lam v (gos wraps) go (WpCompose w1 w2) wraps = go w1 (w2:wraps) go wrap wraps = ([], foldl' (<.>) wrap wraps) gos [] = ([], WpHole) gos (w:ws) = go w ws add_lam v (vs,w) = (v:vs, w) {- ************************************************************************ * * Evidence bindings * * ************************************************************************ -} data TcEvBinds = TcEvBinds -- Mutable evidence bindings EvBindsVar -- Mutable because they are updated "later" -- when an implication constraint is solved | EvBinds -- Immutable after zonking (Bag EvBind) data EvBindsVar = EvBindsVar { ebv_uniq :: Unique, -- The Unique is for debug printing only ebv_binds :: IORef EvBindMap, -- The main payload: the value-level evidence bindings -- (dictionaries etc) -- Some Given, some Wanted ebv_tcvs :: IORef CoVarSet -- The free Given coercion vars needed by Wanted coercions that -- are solved by filling in their HoleDest in-place. Since they -- don't appear in ebv_binds, we keep track of their free -- variables so that we can report unused given constraints -- See Note [Tracking redundant constraints] in GHC.Tc.Solver } | CoEvBindsVar { -- See Note [Coercion evidence only] -- See above for comments on ebv_uniq, ebv_tcvs ebv_uniq :: Unique, ebv_tcvs :: IORef CoVarSet } instance Data.Data TcEvBinds where -- Placeholder; we can't travers into TcEvBinds toConstr _ = abstractConstr "TcEvBinds" gunfold _ _ = error "gunfold" dataTypeOf _ = Data.mkNoRepType "TcEvBinds" {- Note [Coercion evidence only] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Class constraints etc give rise to /term/ bindings for evidence, and we have nowhere to put term bindings in /types/. So in some places we use CoEvBindsVar (see newCoTcEvBinds) to signal that no term-level evidence bindings are allowed. Notably (): - Places in types where we are solving kind constraints (all of which are equalities); see solveEqualities - When unifying forall-types -} isCoEvBindsVar :: EvBindsVar -> Bool isCoEvBindsVar (CoEvBindsVar {}) = True isCoEvBindsVar (EvBindsVar {}) = False ----------------- newtype EvBindMap = EvBindMap { ev_bind_varenv :: DVarEnv EvBind } -- Map from evidence variables to evidence terms -- We use @DVarEnv@ here to get deterministic ordering when we -- turn it into a Bag. -- If we don't do that, when we generate let bindings for -- dictionaries in dsTcEvBinds they will be generated in random -- order. -- -- For example: -- -- let $dEq = GHC.Classes.$fEqInt in -- let $$dNum = GHC.Num.$fNumInt in ... -- -- vs -- -- let $dNum = GHC.Num.$fNumInt in -- let $dEq = GHC.Classes.$fEqInt in ... -- -- See Note [Deterministic UniqFM] in GHC.Types.Unique.DFM for explanation why -- @UniqFM@ can lead to nondeterministic order. emptyEvBindMap :: EvBindMap emptyEvBindMap = EvBindMap { ev_bind_varenv = emptyDVarEnv } extendEvBinds :: EvBindMap -> EvBind -> EvBindMap extendEvBinds bs ev_bind = EvBindMap { ev_bind_varenv = extendDVarEnv (ev_bind_varenv bs) (eb_lhs ev_bind) ev_bind } isEmptyEvBindMap :: EvBindMap -> Bool isEmptyEvBindMap (EvBindMap m) = isEmptyDVarEnv m lookupEvBind :: EvBindMap -> EvVar -> Maybe EvBind lookupEvBind bs = lookupDVarEnv (ev_bind_varenv bs) evBindMapBinds :: EvBindMap -> Bag EvBind evBindMapBinds = foldEvBindMap consBag emptyBag foldEvBindMap :: (EvBind -> a -> a) -> a -> EvBindMap -> a foldEvBindMap k z bs = foldDVarEnv k z (ev_bind_varenv bs) -- See Note [Deterministic UniqFM] to learn about nondeterminism. -- If you use this please provide a justification why it doesn't introduce -- nondeterminism. nonDetStrictFoldEvBindMap :: (EvBind -> a -> a) -> a -> EvBindMap -> a nonDetStrictFoldEvBindMap k z bs = nonDetStrictFoldDVarEnv k z (ev_bind_varenv bs) filterEvBindMap :: (EvBind -> Bool) -> EvBindMap -> EvBindMap filterEvBindMap k (EvBindMap { ev_bind_varenv = env }) = EvBindMap { ev_bind_varenv = filterDVarEnv k env } evBindMapToVarSet :: EvBindMap -> VarSet evBindMapToVarSet (EvBindMap dve) = unsafeUFMToUniqSet (mapUFM evBindVar (udfmToUfm dve)) varSetMinusEvBindMap :: VarSet -> EvBindMap -> VarSet varSetMinusEvBindMap vs (EvBindMap dve) = vs `uniqSetMinusUDFM` dve instance Outputable EvBindMap where ppr (EvBindMap m) = ppr m data EvBindInfo = EvBindGiven { -- See Note [Tracking redundant constraints] in GHC.Tc.Solver } | EvBindWanted { ebi_canonical :: CanonicalEvidence -- See Note [Desugaring non-canonical evidence] } ----------------- -- All evidence is bound by EvBinds; no side effects data EvBind = EvBind { eb_lhs :: EvVar , eb_rhs :: EvTerm , eb_info :: EvBindInfo } evBindVar :: EvBind -> EvVar evBindVar = eb_lhs mkWantedEvBind :: EvVar -> CanonicalEvidence -> EvTerm -> EvBind mkWantedEvBind ev c tm = EvBind { eb_info = EvBindWanted c, eb_lhs = ev, eb_rhs = tm } -- EvTypeable are never given, so we can work with EvExpr here instead of EvTerm mkGivenEvBind :: EvVar -> EvTerm -> EvBind mkGivenEvBind ev tm = EvBind { eb_info = EvBindGiven, eb_lhs = ev, eb_rhs = tm } -- An EvTerm is, conceptually, a CoreExpr that implements the constraint. -- Unfortunately, we cannot just do -- type EvTerm = CoreExpr -- Because of staging problems issues around EvTypeable data EvTerm = EvExpr EvExpr | EvTypeable Type EvTypeable -- Dictionary for (Typeable ty) | EvFun -- /\as \ds. let binds in v { et_tvs :: [TyVar] , et_given :: [EvVar] , et_binds :: TcEvBinds -- This field is why we need an EvFun -- constructor, and can't just use EvExpr , et_body :: EvVar } deriving Data.Data type EvExpr = CoreExpr -- An EvTerm is (usually) constructed by any of the constructors here -- and those more complicated ones who were moved to module GHC.Tc.Types.EvTerm -- | Any sort of evidence Id, including coercions evId :: EvId -> EvExpr evId = Var -- coercion bindings -- See Note [Coercion evidence terms] evCoercion :: TcCoercion -> EvTerm evCoercion co = EvExpr (Coercion co) -- | d |> co evCast :: EvExpr -> TcCoercion -> EvTerm evCast et tc | isReflCo tc = EvExpr et | otherwise = EvExpr (Cast et tc) -- Dictionary instance application evDFunApp :: DFunId -> [Type] -> [EvExpr] -> EvTerm evDFunApp df tys ets = EvExpr $ Var df `mkTyApps` tys `mkApps` ets evDataConApp :: DataCon -> [Type] -> [EvExpr] -> EvTerm evDataConApp dc tys ets = evDFunApp (dataConWrapId dc) tys ets -- Selector id plus the types at which it -- should be instantiated, used for HasField -- dictionaries; see Note [HasField instances] -- in TcInterface evSelector :: Id -> [Type] -> [EvExpr] -> EvExpr evSelector sel_id tys tms = Var sel_id `mkTyApps` tys `mkApps` tms -- Dictionary for (Typeable ty) evTypeable :: Type -> EvTypeable -> EvTerm evTypeable = EvTypeable -- | Instructions on how to make a 'Typeable' dictionary. -- See Note [Typeable evidence terms] data EvTypeable = EvTypeableTyCon TyCon [EvTerm] -- ^ Dictionary for @Typeable T@ where @T@ is a type constructor with all of -- its kind variables saturated. The @[EvTerm]@ is @Typeable@ evidence for -- the applied kinds.. | EvTypeableTyApp EvTerm EvTerm -- ^ Dictionary for @Typeable (s t)@, -- given a dictionaries for @s@ and @t@. | EvTypeableTrFun EvTerm EvTerm EvTerm -- ^ Dictionary for @Typeable (s % w -> t)@, -- given a dictionaries for @w@, @s@, and @t@. | EvTypeableTyLit EvTerm -- ^ Dictionary for a type literal, -- e.g. @Typeable "foo"@ or @Typeable 3@ -- The 'EvTerm' is evidence of, e.g., @KnownNat 3@ -- (see #10348) deriving Data.Data -- | Evidence for @CallStack@ implicit parameters. data EvCallStack -- See Note [Overview of implicit CallStacks] = EvCsEmpty | EvCsPushCall FastString -- Usually the name of the function being called -- but can also be "the literal 42" -- or "an if-then-else expression", etc RealSrcSpan -- Location of the call EvExpr -- Rest of the stack -- ^ @EvCsPushCall origin loc stk@ represents a call from @origin@, -- occurring at @loc@, in a calling context @stk@. deriving Data.Data {- ************************************************************************ * * Evidence for holes * * ************************************************************************ -} -- | Where to store evidence for expression holes -- See Note [Holes] in GHC.Tc.Types.Constraint data HoleExprRef = HER (IORef EvTerm) -- ^ where to write the erroring expression TcType -- ^ expected type of that expression Unique -- ^ for debug output only instance Outputable HoleExprRef where ppr (HER _ _ u) = ppr u instance Data.Data HoleExprRef where -- Placeholder; we can't traverse into HoleExprRef toConstr _ = abstractConstr "HoleExprRef" gunfold _ _ = error "gunfold" dataTypeOf _ = Data.mkNoRepType "HoleExprRef" {- Note [Typeable evidence terms] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The EvTypeable data type looks isomorphic to Type, but the EvTerms inside can be EvIds. Eg f :: forall a. Typeable a => a -> TypeRep f x = typeRep (undefined :: Proxy [a]) Here for the (Typeable [a]) dictionary passed to typeRep we make evidence dl :: Typeable [a] = EvTypeable [a] (EvTypeableTyApp (EvTypeableTyCon []) (EvId d)) where d :: Typeable a is the lambda-bound dictionary passed into f. Note [Coercion evidence terms] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A "coercion evidence term" takes one of these forms co_tm ::= EvId v where v :: t1 ~# t2 | EvCoercion co | EvCast co_tm co We do quite often need to get a TcCoercion from an EvTerm; see 'evTermCoercion'. INVARIANT: The evidence for any constraint with type (t1 ~# t2) is a coercion evidence term. Consider for example [G] d :: F Int a If we have ax7 a :: F Int a ~ (a ~ Bool) then we do NOT generate the constraint [G] (d |> ax7 a) :: a ~ Bool because that does not satisfy the invariant (d is not a coercion variable). Instead we make a binding g1 :: a~Bool = g |> ax7 a and the constraint [G] g1 :: a~Bool See #7238 and Note [Bind new Givens immediately] in GHC.Tc.Types.Constraint Note [EvBinds/EvTerm] ~~~~~~~~~~~~~~~~~~~~~ How evidence is created and updated. Bindings for dictionaries, and coercions and implicit parameters are carried around in TcEvBinds which during constraint generation and simplification is always of the form (TcEvBinds ref). After constraint simplification is finished it will be transformed to t an (EvBinds ev_bag). Evidence for coercions *SHOULD* be filled in using the TcEvBinds However, all EvVars that correspond to *wanted* coercion terms in an EvBind must be mutable variables so that they can be readily inlined (by zonking) after constraint simplification is finished. Conclusion: a new wanted coercion variable should be made mutable. [Notice though that evidence variables that bind coercion terms from super classes will be "given" and hence rigid] Note [Overview of implicit CallStacks] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ (See https://gitlab.haskell.org/ghc/ghc/wikis/explicit-call-stack/implicit-locations) The goal of CallStack evidence terms is to reify locations in the program source as runtime values, without any support from the RTS. We accomplish this by assigning a special meaning to constraints of type GHC.Stack.Types.HasCallStack, an alias type HasCallStack = (?callStack :: CallStack) Implicit parameters of type GHC.Stack.Types.CallStack (the name is not important) are solved in three steps: 1. Explicit, user-written occurrences of `?stk :: CallStack` which have IPOccOrigin, are solved directly from the given IP, just like a regular IP; see GHC.Tc.Solver.Dict.tryInertDicts. For example, the occurrence of `?stk` in error :: (?stk :: CallStack) => String -> a error s = raise (ErrorCall (s ++ prettyCallStack ?stk)) will be solved for the `?stk` in `error`s context as before. 2. In a function call, instead of simply passing the given IP, we first append the current call-site to it. For example, consider a call to the callstack-aware `error` above. foo :: (?stk :: CallStack) => a foo = error "undefined!" Here we want to take the given `?stk` and append the current call-site, before passing it to `error`. In essence, we want to rewrite `foo "undefined!"` to let ?stk = pushCallStack ?stk in foo "undefined!" We achieve this as follows: * At a call of foo :: (?stk :: CallStack) => blah we emit a Wanted [W] d1 : IP "stk" CallStack with CtOrigin = OccurrenceOf "foo" * We /solve/ this constraint, in GHC.Tc.Solver.Dict.canDictNC by emitting a NEW Wanted [W] d2 :: IP "stk" CallStack with CtOrigin = IPOccOrigin and solve d1 = EvCsPushCall "foo" (EvId d1) * The new Wanted, for `d2` will be solved per rule (1), ie as a regular IP. 3. We use the predicate isPushCallStackOrigin to identify whether we want to do (1) solve directly, or (2) push and then solve directly. Key point (see #19918): the CtOrigin where we want to push an item on the call stack can include IfThenElseOrigin etc, when RebindableSyntax is involved. See the defn of fun_orig in GHC.Tc.Gen.App.tcInstFun; it is this CtOrigin that is pinned on the constraints generated by functions in the "expansion" for rebindable syntax. c.f. GHC.Rename.Expr Note [Handling overloaded and rebindable constructs] 4. We default any insoluble CallStacks to the empty CallStack. Suppose `undefined` did not request a CallStack, ie undefinedNoStk :: a undefinedNoStk = error "undefined!" Under the usual IP rules, the new wanted from rule (2) would be insoluble as there's no given IP from which to solve it, so we would get an "unbound implicit parameter" error. We don't ever want to emit an insoluble CallStack IP, so we add a defaulting pass to default any remaining wanted CallStacks to the empty CallStack with the evidence term EvCsEmpty (see GHC.Tc.Solver.simplifyTopWanteds and GHC.Tc.Solver.defaultCallStacks) This provides a lightweight mechanism for building up call-stacks explicitly, but is notably limited by the fact that the stack will stop at the first function whose type does not include a CallStack IP. For example, using the above definition of `undefined`: head :: [a] -> a head [] = undefined head (x:_) = x g = head [] the resulting CallStack will include the call to `undefined` in `head` and the call to `error` in `undefined`, but *not* the call to `head` in `g`, because `head` did not explicitly request a CallStack. Important Details: - GHC should NEVER report an insoluble CallStack constraint. - GHC should NEVER infer a CallStack constraint unless one was requested with a partial type signature (See GHC.Tc.Solver..pickQuantifiablePreds). - A CallStack (defined in GHC.Stack.Types) is a [(String, SrcLoc)], where the String is the name of the binder that is used at the SrcLoc. SrcLoc is also defined in GHC.Stack.Types and contains the package/module/file name, as well as the full source-span. Both CallStack and SrcLoc are kept abstract so only GHC can construct new values. - We will automatically solve any wanted CallStack regardless of the name of the IP, i.e. f = show (?stk :: CallStack) g = show (?loc :: CallStack) are both valid. However, we will only push new SrcLocs onto existing CallStacks when the IP names match, e.g. in head :: (?loc :: CallStack) => [a] -> a head [] = error (show (?stk :: CallStack)) the printed CallStack will NOT include head's call-site. This reflects the standard scoping rules of implicit-parameters. - An EvCallStack term desugars to a CoreExpr of type `IP "some str" CallStack`. The desugarer will need to unwrap the IP newtype before pushing a new call-site onto a given stack (See GHC.HsToCore.Binds.dsEvCallStack) - When we emit a new wanted CallStack from rule (2) we set its origin to `IPOccOrigin ip_name` instead of the original `OccurrenceOf func` (see GHC.Tc.Solver.Dict.tryInertDicts). This is a bit shady, but is how we ensure that the new wanted is solved like a regular IP. -} mkEvCast :: EvExpr -> TcCoercion -> EvTerm mkEvCast ev lco | assertPpr (coercionRole lco == Representational) (vcat [text "Coercion of wrong role passed to mkEvCast:", ppr ev, ppr lco]) $ isReflCo lco = EvExpr ev | otherwise = evCast ev lco mkEvScSelectors -- Assume class (..., D ty, ...) => C a b :: Class -> [TcType] -- C ty1 ty2 -> [(TcPredType, -- D ty[ty1/a,ty2/b] EvExpr) -- :: C ty1 ty2 -> D ty[ty1/a,ty2/b] ] mkEvScSelectors cls tys = zipWith mk_pr (immSuperClasses cls tys) [0..] where mk_pr pred i = (pred, Var sc_sel_id `mkTyApps` tys) where sc_sel_id = classSCSelId cls i -- Zero-indexed emptyTcEvBinds :: TcEvBinds emptyTcEvBinds = EvBinds emptyBag isEmptyTcEvBinds :: TcEvBinds -> Bool isEmptyTcEvBinds (EvBinds b) = isEmptyBag b isEmptyTcEvBinds (TcEvBinds {}) = panic "isEmptyTcEvBinds" evTermCoercion_maybe :: EvTerm -> Maybe TcCoercion -- Applied only to EvTerms of type (s~t) -- See Note [Coercion evidence terms] evTermCoercion_maybe ev_term | EvExpr e <- ev_term = go e | otherwise = Nothing where go :: EvExpr -> Maybe TcCoercion go (Var v) = return (mkCoVarCo v) go (Coercion co) = return co go (Cast tm co) = do { co' <- go tm ; return (mkCoCast co' co) } go _ = Nothing evTermCoercion :: EvTerm -> TcCoercion evTermCoercion tm = case evTermCoercion_maybe tm of Just co -> co Nothing -> pprPanic "evTermCoercion" (ppr tm) {- ********************************************************************* * * Free variables * * ********************************************************************* -} findNeededEvVars :: EvBindMap -> VarSet -> VarSet -- Find all the Given evidence needed by seeds, -- looking transitively through binds findNeededEvVars ev_binds seeds = transCloVarSet also_needs seeds where also_needs :: VarSet -> VarSet also_needs needs = nonDetStrictFoldUniqSet add emptyVarSet needs -- It's OK to use a non-deterministic fold here because we immediately -- forget about the ordering by creating a set add :: Var -> VarSet -> VarSet add v needs | Just ev_bind <- lookupEvBind ev_binds v , EvBind { eb_info = EvBindGiven, eb_rhs = rhs } <- ev_bind = evVarsOfTerm rhs `unionVarSet` needs | otherwise = needs evVarsOfTerm :: EvTerm -> VarSet evVarsOfTerm (EvExpr e) = exprSomeFreeVars isEvVar e evVarsOfTerm (EvTypeable _ ev) = evVarsOfTypeable ev evVarsOfTerm (EvFun {}) = emptyVarSet -- See Note [Free vars of EvFun] evVarsOfTerms :: [EvTerm] -> VarSet evVarsOfTerms = mapUnionVarSet evVarsOfTerm evVarsOfTypeable :: EvTypeable -> VarSet evVarsOfTypeable ev = case ev of EvTypeableTyCon _ e -> mapUnionVarSet evVarsOfTerm e EvTypeableTyApp e1 e2 -> evVarsOfTerms [e1,e2] EvTypeableTrFun em e1 e2 -> evVarsOfTerms [em,e1,e2] EvTypeableTyLit e -> evVarsOfTerm e {- Note [Free vars of EvFun] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Finding the free vars of an EvFun is made tricky by the fact the bindings et_binds may be a mutable variable. Fortunately, we can just squeeze by. Here's how. * evVarsOfTerm is used only by GHC.Tc.Solver.neededEvVars. * Each EvBindsVar in an et_binds field of an EvFun is /also/ in the ic_binds field of an Implication * So we can track usage via the processing for that implication, (see Note [Tracking redundant constraints] in GHC.Tc.Solver). We can ignore usage from the EvFun altogether. ************************************************************************ * * Pretty printing * * ************************************************************************ -} instance Outputable HsWrapper where ppr co_fn = pprHsWrapper co_fn (no_parens (text "<>")) pprHsWrapper :: HsWrapper -> (Bool -> SDoc) -> SDoc -- With -fprint-typechecker-elaboration, print the wrapper -- otherwise just print what's inside -- The pp_thing_inside function takes Bool to say whether -- it's in a position that needs parens for a non-atomic thing pprHsWrapper wrap pp_thing_inside = sdocOption sdocPrintTypecheckerElaboration $ \case True -> help pp_thing_inside wrap False False -> pp_thing_inside False where help :: (Bool -> SDoc) -> HsWrapper -> Bool -> SDoc -- True <=> appears in function application position -- False <=> appears as body of let or lambda help it WpHole = it help it (WpCompose f1 f2) = help (help it f2) f1 help it (WpFun f1 f2 (Scaled w t1)) = add_parens $ text "\\(x" <> dcolon <> brackets (ppr w) <> ppr t1 <> text ")." <+> help (\_ -> it True <+> help (\_ -> text "x") f1 True) f2 False help it (WpCast co) = add_parens $ sep [it False, nest 2 (text "|>" <+> pprParendCo co)] help it (WpEvApp id) = no_parens $ sep [it True, nest 2 (ppr id)] help it (WpTyApp ty) = no_parens $ sep [it True, text "@" <> pprParendType ty] help it (WpEvLam id) = add_parens $ sep [ text "\\" <> pprLamBndr id <> dot, it False] help it (WpTyLam tv) = add_parens $ sep [text "/\\" <> pprLamBndr tv <> dot, it False] help it (WpLet binds) = add_parens $ sep [text "let" <+> braces (ppr binds), it False] help it (WpMultCoercion co) = add_parens $ sep [it False, nest 2 (text "" <+> pprParendCo co)] pprLamBndr :: Id -> SDoc pprLamBndr v = pprBndr LambdaBind v add_parens, no_parens :: SDoc -> Bool -> SDoc add_parens d True = parens d add_parens d False = d no_parens d _ = d instance Outputable TcEvBinds where ppr (TcEvBinds v) = ppr v ppr (EvBinds bs) = text "EvBinds" <> braces (vcat (map ppr (bagToList bs))) instance Outputable EvBindsVar where ppr (EvBindsVar { ebv_uniq = u }) = text "EvBindsVar" <> angleBrackets (ppr u) ppr (CoEvBindsVar { ebv_uniq = u }) = text "CoEvBindsVar" <> angleBrackets (ppr u) instance Uniquable EvBindsVar where getUnique = ebv_uniq instance Outputable EvBind where ppr (EvBind { eb_lhs = v, eb_rhs = e, eb_info = info }) = sep [ pp_gw <+> ppr v , nest 2 $ equals <+> ppr e ] -- We cheat a bit and pretend EqVars are CoVars for the purposes of pretty printing where pp_gw = brackets $ case info of EvBindGiven{} -> char 'G' EvBindWanted{} -> char 'W' instance Outputable EvTerm where ppr (EvExpr e) = ppr e ppr (EvTypeable ty ev) = ppr ev <+> dcolon <+> text "Typeable" <+> ppr ty ppr (EvFun { et_tvs = tvs, et_given = gs, et_binds = bs, et_body = w }) = hang (text "\\" <+> sep (map pprLamBndr (tvs ++ gs)) <+> arrow) 2 (ppr bs $$ ppr w) -- Not very pretty instance Outputable EvCallStack where ppr EvCsEmpty = text "[]" ppr (EvCsPushCall orig loc tm) = ppr (orig,loc) <+> text ":" <+> ppr tm instance Outputable EvTypeable where ppr (EvTypeableTyCon ts _) = text "TyCon" <+> ppr ts ppr (EvTypeableTyApp t1 t2) = parens (ppr t1 <+> ppr t2) ppr (EvTypeableTyLit t1) = text "TyLit" <> ppr t1 ppr (EvTypeableTrFun tm t1 t2) = parens (ppr t1 <+> arr <+> ppr t2) where arr = pprArrowWithMultiplicity visArgTypeLike (Right (ppr tm)) ---------------------------------------------------------------------- -- Helper functions for dealing with IP newtype-dictionaries ---------------------------------------------------------------------- -- | Create a 'Coercion' that unwraps an implicit-parameter -- dictionary to expose the underlying value. -- We expect the 'Type' to have the form `IP sym ty`, -- and return a 'Coercion' `co :: IP sym ty ~ ty` unwrapIP :: Type -> CoercionR unwrapIP ty = case unwrapNewTyCon_maybe tc of Just (_,_,ax) -> mkUnbranchedAxInstCo Representational ax tys [] Nothing -> pprPanic "unwrapIP" $ text "The dictionary for" <+> quotes (ppr tc) <+> text "is not a newtype!" where (tc, tys) = splitTyConApp ty -- | Create a 'Coercion' that wraps a value in an implicit-parameter -- dictionary. See 'unwrapIP'. wrapIP :: Type -> CoercionR wrapIP ty = mkSymCo (unwrapIP ty) ---------------------------------------------------------------------- -- A datatype used to pass information when desugaring quotations ---------------------------------------------------------------------- -- We have to pass a `EvVar` and `Type` into `dsBracket` so that the -- correct evidence and types are applied to all the TH combinators. -- This data type bundles them up together with some convenience methods. -- -- The EvVar is evidence for `Quote m` -- The Type is a metavariable for `m` -- data QuoteWrapper = QuoteWrapper EvVar Type deriving Data.Data quoteWrapperTyVarTy :: QuoteWrapper -> Type quoteWrapperTyVarTy (QuoteWrapper _ t) = t -- | Convert the QuoteWrapper into a normal HsWrapper which can be used to -- apply its contents. applyQuoteWrapper :: QuoteWrapper -> HsWrapper applyQuoteWrapper (QuoteWrapper ev_var m_var) = mkWpEvVarApps [ev_var] <.> mkWpTyApps [m_var] ghc-lib-parser-9.12.2.20250421/compiler/GHC/Tc/Types/LclEnv.hs0000644000000000000000000002055207346545000021103 0ustar0000000000000000module GHC.Tc.Types.LclEnv ( TcLclEnv(..) , TcLclCtxt(..) , modifyLclCtxt , getLclEnvArrowCtxt , getLclEnvThBndrs , getLclEnvTypeEnv , getLclEnvBinderStack , getLclEnvErrCtxt , getLclEnvLoc , getLclEnvRdrEnv , getLclEnvTcLevel , getLclEnvThStage , setLclEnvTcLevel , setLclEnvLoc , setLclEnvRdrEnv , setLclEnvBinderStack , setLclEnvErrCtxt , setLclEnvThStage , setLclEnvTypeEnv , modifyLclEnvTcLevel , lclEnvInGeneratedCode , addLclEnvErrCtxt , ArrowCtxt(..) , ThBindEnv , TcTypeEnv ) where import GHC.Prelude import GHC.Tc.Utils.TcType ( TcLevel ) import GHC.Tc.Errors.Types ( TcRnMessage ) import GHC.Core.UsageEnv ( UsageEnv ) import GHC.Types.Name.Reader ( LocalRdrEnv ) import GHC.Types.Name.Env ( NameEnv ) import GHC.Types.SrcLoc ( RealSrcSpan ) import GHC.Types.Basic ( TopLevelFlag ) import GHC.Types.Error ( Messages ) import GHC.Tc.Types.BasicTypes import GHC.Tc.Types.TH import GHC.Tc.Types.TcRef import GHC.Tc.Types.ErrCtxt import GHC.Tc.Types.Constraint ( WantedConstraints ) {- ************************************************************************ * * The local typechecker environment * * ************************************************************************ Note [The Global-Env/Local-Env story] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ During type checking, we keep in the tcg_type_env * All types and classes * All Ids derived from types and classes (constructors, selectors) At the end of type checking, we zonk the local bindings, and as we do so we add to the tcg_type_env * Locally defined top-level Ids Why? Because they are now Ids not TcIds. This final GlobalEnv is a) fed back (via the knot) to typechecking the unfoldings of interface signatures b) used in the ModDetails of this module -} data TcLclEnv -- Changes as we move inside an expression -- Discarded after typecheck/rename; not passed on to desugarer = TcLclEnv { -- The part that we sometimes restore using `restoreLclEnv`. tcl_lcl_ctxt :: !TcLclCtxt, -- These are exactly the parts of TcLclEnv which are not set by `restoreLclEnv`. tcl_usage :: TcRef UsageEnv, -- Required multiplicity of bindings is accumulated here. tcl_lie :: TcRef WantedConstraints, -- Place to accumulate type constraints tcl_errs :: TcRef (Messages TcRnMessage) -- Place to accumulate diagnostics } data TcLclCtxt = TcLclCtxt { tcl_loc :: RealSrcSpan, -- Source span tcl_ctxt :: [ErrCtxt], -- Error context, innermost on top tcl_in_gen_code :: Bool, -- See Note [Rebindable syntax and XXExprGhcRn] tcl_tclvl :: TcLevel, tcl_bndrs :: TcBinderStack, -- Used for reporting relevant bindings, -- and for tidying type tcl_rdr :: LocalRdrEnv, -- Local name envt -- Maintained during renaming, of course, but also during -- type checking, solely so that when renaming a Template-Haskell -- splice we have the right environment for the renamer. -- -- Does *not* include global name envt; may shadow it -- Includes both ordinary variables and type variables; -- they are kept distinct because tyvar have a different -- occurrence constructor (Name.TvOcc) -- We still need the unsullied global name env so that -- we can look up record field names tcl_th_ctxt :: ThStage, -- Template Haskell context tcl_th_bndrs :: ThBindEnv, -- and binder info -- The ThBindEnv records the TH binding level of in-scope Names -- defined in this module (not imported) -- We can't put this info in the TypeEnv because it's needed -- (and extended) in the renamer, for untyped splices tcl_arrow_ctxt :: ArrowCtxt, -- Arrow-notation context tcl_env :: TcTypeEnv -- The local type environment: -- Ids and TyVars defined in this module } getLclEnvThStage :: TcLclEnv -> ThStage getLclEnvThStage = tcl_th_ctxt . tcl_lcl_ctxt setLclEnvThStage :: ThStage -> TcLclEnv -> TcLclEnv setLclEnvThStage s = modifyLclCtxt (\env -> env { tcl_th_ctxt = s }) getLclEnvThBndrs :: TcLclEnv -> ThBindEnv getLclEnvThBndrs = tcl_th_bndrs . tcl_lcl_ctxt getLclEnvArrowCtxt :: TcLclEnv -> ArrowCtxt getLclEnvArrowCtxt = tcl_arrow_ctxt . tcl_lcl_ctxt getLclEnvTypeEnv :: TcLclEnv -> TcTypeEnv getLclEnvTypeEnv = tcl_env . tcl_lcl_ctxt setLclEnvTypeEnv :: TcTypeEnv -> TcLclEnv -> TcLclEnv setLclEnvTypeEnv ty_env = modifyLclCtxt (\env -> env { tcl_env = ty_env}) setLclEnvTcLevel :: TcLevel -> TcLclEnv -> TcLclEnv setLclEnvTcLevel lvl = modifyLclCtxt (\env -> env {tcl_tclvl = lvl }) modifyLclEnvTcLevel :: (TcLevel -> TcLevel) -> TcLclEnv -> TcLclEnv modifyLclEnvTcLevel f = modifyLclCtxt (\env -> env { tcl_tclvl = f (tcl_tclvl env)}) getLclEnvTcLevel :: TcLclEnv -> TcLevel getLclEnvTcLevel = tcl_tclvl . tcl_lcl_ctxt setLclEnvLoc :: RealSrcSpan -> TcLclEnv -> TcLclEnv setLclEnvLoc loc = modifyLclCtxt (\lenv -> lenv { tcl_loc = loc }) getLclEnvLoc :: TcLclEnv -> RealSrcSpan getLclEnvLoc = tcl_loc . tcl_lcl_ctxt getLclEnvErrCtxt :: TcLclEnv -> [ErrCtxt] getLclEnvErrCtxt = tcl_ctxt . tcl_lcl_ctxt setLclEnvErrCtxt :: [ErrCtxt] -> TcLclEnv -> TcLclEnv setLclEnvErrCtxt ctxt = modifyLclCtxt (\env -> env { tcl_ctxt = ctxt }) addLclEnvErrCtxt :: ErrCtxt -> TcLclEnv -> TcLclEnv addLclEnvErrCtxt ctxt = modifyLclCtxt (\env -> env { tcl_ctxt = ctxt : (tcl_ctxt env) }) lclEnvInGeneratedCode :: TcLclEnv -> Bool lclEnvInGeneratedCode = tcl_in_gen_code . tcl_lcl_ctxt getLclEnvBinderStack :: TcLclEnv -> TcBinderStack getLclEnvBinderStack = tcl_bndrs . tcl_lcl_ctxt setLclEnvBinderStack :: TcBinderStack -> TcLclEnv -> TcLclEnv setLclEnvBinderStack stack = modifyLclCtxt (\env -> env { tcl_bndrs = stack }) getLclEnvRdrEnv :: TcLclEnv -> LocalRdrEnv getLclEnvRdrEnv = tcl_rdr . tcl_lcl_ctxt setLclEnvRdrEnv :: LocalRdrEnv -> TcLclEnv -> TcLclEnv setLclEnvRdrEnv rdr_env = modifyLclCtxt (\env -> env { tcl_rdr = rdr_env }) modifyLclCtxt :: (TcLclCtxt -> TcLclCtxt) -> TcLclEnv -> TcLclEnv modifyLclCtxt upd env = let !res = upd (tcl_lcl_ctxt env) in env { tcl_lcl_ctxt = res } type TcTypeEnv = NameEnv TcTyThing type ThBindEnv = NameEnv (TopLevelFlag, ThLevel) -- Domain = all Ids bound in this module (ie not imported) -- The TopLevelFlag tells if the binding is syntactically top level. -- We need to know this, because the cross-stage persistence story allows -- cross-stage at arbitrary types if the Id is bound at top level. -- -- Nota bene: a ThLevel of 'outerLevel' is *not* the same as being -- bound at top level! See Note [Template Haskell levels] in GHC.Tc.Gen.Splice --------------------------- -- Arrow-notation context --------------------------- {- Note [Escaping the arrow scope] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In arrow notation, a variable bound by a proc (or enclosed let/kappa) is not in scope to the left of an arrow tail (-<) or the head of (|..|). For example proc x -> (e1 -< e2) Here, x is not in scope in e1, but it is in scope in e2. This can get a bit complicated: let x = 3 in proc y -> (proc z -> e1) -< e2 Here, x and z are in scope in e1, but y is not. We implement this by recording the environment when passing a proc (using newArrowScope), and returning to that (using escapeArrowScope) on the left of -< and the head of (|..|). All this can be dealt with by the *renamer*. But the type checker needs to be involved too. Example (arrowfail001) class Foo a where foo :: a -> () data Bar = forall a. Foo a => Bar a get :: Bar -> () get = proc x -> case x of Bar a -> foo -< a Here the call of 'foo' gives rise to a (Foo a) constraint that should not be captured by the pattern match on 'Bar'. Rather it should join the constraints from further out. So we must capture the constraint bag from further out in the ArrowCtxt that we push inwards. -} data ArrowCtxt -- Note [Escaping the arrow scope] = NoArrowCtxt | ArrowCtxt LocalRdrEnv (TcRef WantedConstraints) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Tc/Types/LclEnv.hs-boot0000644000000000000000000000022107346545000022033 0ustar0000000000000000module GHC.Tc.Types.LclEnv where -- See W1 of Note [Tracking dependencies on primitives] in GHC.Internal.Base import GHC.Base () data TcLclEnv ghc-lib-parser-9.12.2.20250421/compiler/GHC/Tc/Types/Origin.hs0000644000000000000000000020421507346545000021147 0ustar0000000000000000{-# LANGUAGE DataKinds #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE GADTs #-} {-# LANGUAGE PolyKinds #-} {-# LANGUAGE StandaloneKindSignatures #-} {-# LANGUAGE TypeFamilies #-} -- | Describes the provenance of types as they flow through the type-checker. -- The datatypes here are mainly used for error message generation. module GHC.Tc.Types.Origin ( -- * UserTypeCtxt UserTypeCtxt(..), pprUserTypeCtxt, isSigMaybe, ReportRedundantConstraints(..), reportRedundantConstraints, redundantConstraintsSpan, -- * SkolemInfo SkolemInfo(..), SkolemInfoAnon(..), mkSkolemInfo, getSkolemInfo, pprSigSkolInfo, pprSkolInfo, unkSkol, unkSkolAnon, mkClsInstSkol, -- * CtOrigin CtOrigin(..), exprCtOrigin, lexprCtOrigin, matchesCtOrigin, grhssCtOrigin, isVisibleOrigin, toInvisibleOrigin, pprCtOrigin, isGivenOrigin, isWantedWantedFunDepOrigin, isWantedSuperclassOrigin, ClsInstOrQC(..), NakedScFlag(..), NonLinearPatternReason(..), TypedThing(..), TyVarBndrs(..), -- * CallStack isPushCallStackOrigin, callStackOriginFS, -- * FixedRuntimeRep origin FixedRuntimeRepOrigin(..), FixedRuntimeRepContext(..), pprFixedRuntimeRepContext, StmtOrigin(..), ArgPos(..), mkFRRUnboxedTuple, mkFRRUnboxedSum, -- ** FixedRuntimeRep origin for rep-poly 'Id's RepPolyId(..), Polarity(..), Position(..), -- ** Arrow command FixedRuntimeRep origin FRRArrowContext(..), pprFRRArrowContext, -- ** ExpectedFunTy FixedRuntimeRepOrigin ExpectedFunTyOrigin(..), pprExpectedFunTyOrigin, pprExpectedFunTyHerald, -- * InstanceWhat InstanceWhat(..), SafeOverlapping ) where import GHC.Prelude import GHC.Tc.Utils.TcType import GHC.Hs import GHC.Core.DataCon import GHC.Core.ConLike import GHC.Core.TyCon import GHC.Core.Class import GHC.Core.InstEnv import GHC.Core.PatSyn import GHC.Core.Multiplicity ( scaledThing ) import GHC.Unit.Module import GHC.Unit.Module.Warnings import GHC.Types.Id import GHC.Types.Name import GHC.Types.Name.Reader import GHC.Types.Basic import GHC.Types.SrcLoc import GHC.Data.FastString import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Stack import GHC.Utils.Monad import GHC.Utils.Misc( HasDebugCallStack ) import GHC.Types.Unique import GHC.Types.Unique.Supply import Language.Haskell.Syntax.Basic (FieldLabelString(..)) import qualified Data.Kind as Hs {- ********************************************************************* * * UserTypeCtxt * * ********************************************************************* -} ------------------------------------- -- | UserTypeCtxt describes the origin of the polymorphic type -- in the places where we need an expression to have that type data UserTypeCtxt = FunSigCtxt -- Function type signature, when checking the type -- Also used for types in SPECIALISE pragmas Name -- Name of the function ReportRedundantConstraints -- See Note [Tracking redundant constraints] in GHC.Tc.Solver -- This field is usually 'WantRCC', but 'NoRCC' for -- * Record selectors (not important here) -- * Class and instance methods. Here the code may legitimately -- be more polymorphic than the signature generated from the -- class declaration -- * Functions whose type signature has hidden the constraints -- behind a type synonym. E.g. -- type Foo = forall a. Eq a => a -> a -- id :: Foo -- id x = x -- Here we can't give a good location for the redundant constraints -- (see lhsSigWcTypeContextSpan), so we don't report redundant -- constraints at all. It's not clear that this a good choice; -- perhaps we should report, just with a less informative SrcSpan. -- c.f. #16154 | InfSigCtxt Name -- Inferred type for function | ExprSigCtxt -- Expression type signature ReportRedundantConstraints | KindSigCtxt -- Kind signature | StandaloneKindSigCtxt -- Standalone kind signature Name -- Name of the type/class | TypeAppCtxt -- Visible type application | ConArgCtxt Name -- Data constructor argument | TySynCtxt Name -- RHS of a type synonym decl | PatSynCtxt Name -- Type sig for a pattern synonym | PatSigCtxt -- Type sig in pattern -- eg f (x::t) = ... -- or (x::t, y) = e | RuleSigCtxt FastString Name -- LHS of a RULE forall -- RULE "foo" forall (x :: a -> a). f (Just x) = ... | ForSigCtxt Name -- Foreign import or export signature | DefaultDeclCtxt -- Class or types in a default declaration | InstDeclCtxt Bool -- An instance declaration -- True: stand-alone deriving -- False: vanilla instance declaration | SpecInstCtxt -- SPECIALISE instance pragma | GenSigCtxt -- Higher-rank or impredicative situations -- e.g. (f e) where f has a higher-rank type -- We might want to elaborate this | GhciCtxt Bool -- GHCi command :kind -- The Bool indicates if we are checking the outermost -- type application. -- See Note [Unsaturated type synonyms in GHCi] in -- GHC.Tc.Validity. | ClassSCCtxt Name -- Superclasses of a class | SigmaCtxt -- Theta part of a normal for-all type -- f :: => a -> a | DataTyCtxt Name -- The "stupid theta" part of a data decl -- data => T a = MkT a | DerivClauseCtxt -- A 'deriving' clause | TyVarBndrKindCtxt Name -- The kind of a type variable being bound | DataKindCtxt Name -- The kind of a data/newtype (instance) | TySynKindCtxt Name -- The kind of the RHS of a type synonym | TyFamResKindCtxt Name -- The result kind of a type family deriving( Eq ) -- Just for checkSkolInfoAnon -- | Report Redundant Constraints. data ReportRedundantConstraints = NoRRC -- ^ Don't report redundant constraints | WantRRC SrcSpan -- ^ Report redundant constraints -- The SrcSpan is for the constraints -- E.g. f :: (Eq a, Ord b) => blah -- The span is for the (Eq a, Ord b) -- We need to record the span here because we have -- long since discarded the HsType in favour of a Type deriving( Eq ) -- Just for checkSkolInfoAnon reportRedundantConstraints :: ReportRedundantConstraints -> Bool reportRedundantConstraints NoRRC = False reportRedundantConstraints (WantRRC {}) = True redundantConstraintsSpan :: UserTypeCtxt -> SrcSpan redundantConstraintsSpan (FunSigCtxt _ (WantRRC span)) = span redundantConstraintsSpan (ExprSigCtxt (WantRRC span)) = span redundantConstraintsSpan _ = noSrcSpan {- -- Notes re TySynCtxt -- We allow type synonyms that aren't types; e.g. type List = [] -- -- If the RHS mentions tyvars that aren't in scope, we'll -- quantify over them: -- e.g. type T = a->a -- will become type T = forall a. a->a -- -- With gla-exts that's right, but for H98 we should complain. -} pprUserTypeCtxt :: UserTypeCtxt -> SDoc pprUserTypeCtxt (FunSigCtxt n _) = text "the type signature for" <+> quotes (ppr n) pprUserTypeCtxt (InfSigCtxt n) = text "the inferred type for" <+> quotes (ppr n) pprUserTypeCtxt (RuleSigCtxt _ n) = text "the type signature for" <+> quotes (ppr n) pprUserTypeCtxt (ExprSigCtxt _) = text "an expression type signature" pprUserTypeCtxt KindSigCtxt = text "a kind signature" pprUserTypeCtxt (StandaloneKindSigCtxt n) = text "a standalone kind signature for" <+> quotes (ppr n) pprUserTypeCtxt TypeAppCtxt = text "a type argument" pprUserTypeCtxt (ConArgCtxt c) = text "the type of the constructor" <+> quotes (ppr c) pprUserTypeCtxt (TySynCtxt c) = text "the RHS of the type synonym" <+> quotes (ppr c) pprUserTypeCtxt PatSigCtxt = text "a pattern type signature" pprUserTypeCtxt (ForSigCtxt n) = text "the foreign declaration for" <+> quotes (ppr n) pprUserTypeCtxt DefaultDeclCtxt = text "a `default' declaration" pprUserTypeCtxt (InstDeclCtxt False) = text "an instance declaration" pprUserTypeCtxt (InstDeclCtxt True) = text "a stand-alone deriving instance declaration" pprUserTypeCtxt SpecInstCtxt = text "a SPECIALISE instance pragma" pprUserTypeCtxt GenSigCtxt = text "a type expected by the context" pprUserTypeCtxt (GhciCtxt {}) = text "a type in a GHCi command" pprUserTypeCtxt (ClassSCCtxt c) = text "the super-classes of class" <+> quotes (ppr c) pprUserTypeCtxt SigmaCtxt = text "the context of a polymorphic type" pprUserTypeCtxt (DataTyCtxt tc) = text "the context of the data type declaration for" <+> quotes (ppr tc) pprUserTypeCtxt (PatSynCtxt n) = text "the signature for pattern synonym" <+> quotes (ppr n) pprUserTypeCtxt (DerivClauseCtxt) = text "a `deriving' clause" pprUserTypeCtxt (TyVarBndrKindCtxt n) = text "the kind annotation on the type variable" <+> quotes (ppr n) pprUserTypeCtxt (DataKindCtxt n) = text "the kind annotation on the declaration for" <+> quotes (ppr n) pprUserTypeCtxt (TySynKindCtxt n) = text "the kind annotation on the declaration for" <+> quotes (ppr n) pprUserTypeCtxt (TyFamResKindCtxt n) = text "the result kind for" <+> quotes (ppr n) isSigMaybe :: UserTypeCtxt -> Maybe Name isSigMaybe (FunSigCtxt n _) = Just n isSigMaybe (ConArgCtxt n) = Just n isSigMaybe (ForSigCtxt n) = Just n isSigMaybe (PatSynCtxt n) = Just n isSigMaybe _ = Nothing {- ************************************************************************ * * SkolemInfo * * ************************************************************************ -} -- | 'SkolemInfo' stores the origin of a skolem type variable, -- so that we can display this information to the user in case of a type error. -- -- The 'Unique' field allows us to report all skolem type variables bound in the -- same place in a single report. data SkolemInfo = SkolemInfo Unique -- ^ The Unique is used to common up skolem variables bound -- at the same location (only used in pprSkols) SkolemInfoAnon -- ^ The information about the origin of the skolem type variable instance Uniquable SkolemInfo where getUnique (SkolemInfo u _) = u -- | 'SkolemInfoAnon' stores the origin of a skolem type variable (e.g. bound by -- a user-written forall, the header of a data declaration, a deriving clause, ...). -- -- This information is displayed when reporting an error message, such as -- -- @"Couldn't match 'k' with 'l'"@ -- -- This allows us to explain where the type variable came from. -- -- When several skolem type variables are bound at once, prefer using 'SkolemInfo', -- which stores a 'Unique' which allows these type variables to be reported data SkolemInfoAnon = SigSkol -- A skolem that is created by instantiating -- a programmer-supplied type signature -- Location of the binding site is on the TyVar -- See Note [SigSkol SkolemInfo] UserTypeCtxt -- What sort of signature TcType -- Original type signature (before skolemisation) [(Name,TcTyVar)] -- Maps the original name of the skolemised tyvar -- to its instantiated version | SigTypeSkol UserTypeCtxt -- like SigSkol, but when we're kind-checking the *type* -- hence, we have less info | ForAllSkol -- Bound by a user-written "forall". TyVarBndrs -- Shows just the binders, used when reporting a bad telescope -- See Note [Checking telescopes] in GHC.Tc.Types.Constraint | DerivSkol Type -- Bound by a 'deriving' clause; -- the type is the instance we are trying to derive | InstSkol -- Bound at an instance decl, or quantified constraint ClsInstOrQC -- Whether class instance or quantified constraint PatersonSize -- Head has the given PatersonSize | FamInstSkol -- Bound at a family instance decl | PatSkol -- An existential type variable bound by a pattern for ConLike -- a data constructor with an existential type. HsMatchContextRn -- e.g. data T = forall a. Eq a => MkT a -- f (MkT x) = ... -- The pattern MkT x will allocate an existential type -- variable for 'a'. | IPSkol [HsIPName] -- Binding site of an implicit parameter | RuleSkol RuleName -- The LHS of a RULE | InferSkol [(Name,TcType)] -- We have inferred a type for these (mutually recursive) -- polymorphic Ids, and are now checking that their RHS -- constraints are satisfied. | BracketSkol -- Template Haskell bracket | UnifyForAllSkol -- We are unifying two for-all types TcType -- The instantiated type *inside* the forall | TyConSkol (TyConFlavour TyCon) Name -- bound in a type declaration of the given flavour | DataConSkol Name -- bound as an existential in a Haskell98 datacon decl or -- as any variable in a GADT datacon decl | ReifySkol -- Bound during Template Haskell reification | RuntimeUnkSkol -- Runtime skolem from the GHCi debugger #14628 | ArrowReboundIfSkol -- Bound by the expected type of the rebound arrow ifThenElse command. | UnkSkol CallStack -- | Use this when you can't specify a helpful origin for -- some skolem type variable. -- -- We're hoping to be able to get rid of this entirely, but for the moment -- it's still needed. unkSkol :: HasDebugCallStack => SkolemInfo unkSkol = SkolemInfo (mkUniqueGrimily 0) unkSkolAnon unkSkolAnon :: HasDebugCallStack => SkolemInfoAnon unkSkolAnon = UnkSkol callStack -- | Wrap up the origin of a skolem type variable with a new 'Unique', -- so that we can common up skolem type variables whose 'SkolemInfo' -- shares a certain 'Unique'. mkSkolemInfo :: MonadIO m => SkolemInfoAnon -> m SkolemInfo mkSkolemInfo sk_anon = do u <- liftIO $! uniqFromTag 's' return (SkolemInfo u sk_anon) getSkolemInfo :: SkolemInfo -> SkolemInfoAnon getSkolemInfo (SkolemInfo _ skol_anon) = skol_anon mkClsInstSkol :: Class -> [Type] -> SkolemInfoAnon mkClsInstSkol cls tys = InstSkol IsClsInst (pSizeClassPred cls tys) instance Outputable SkolemInfo where ppr (SkolemInfo _ sk_info ) = ppr sk_info instance Outputable SkolemInfoAnon where ppr = pprSkolInfo pprSkolInfo :: SkolemInfoAnon -> SDoc -- Complete the sentence "is a rigid type variable bound by..." pprSkolInfo (SigSkol cx ty _) = pprSigSkolInfo cx ty pprSkolInfo (SigTypeSkol cx) = pprUserTypeCtxt cx pprSkolInfo (ForAllSkol tvs) = text "an explicit forall" <+> ppr tvs pprSkolInfo (IPSkol ips) = text "the implicit-parameter binding" <> plural ips <+> text "for" <+> pprWithCommas ppr ips pprSkolInfo (DerivSkol pred) = text "the deriving clause for" <+> quotes (ppr pred) pprSkolInfo (InstSkol IsClsInst sz) = vcat [ text "the instance declaration" , whenPprDebug (braces (ppr sz)) ] pprSkolInfo (InstSkol (IsQC {}) sz) = vcat [ text "a quantified context" , whenPprDebug (braces (ppr sz)) ] pprSkolInfo FamInstSkol = text "a family instance declaration" pprSkolInfo BracketSkol = text "a Template Haskell bracket" pprSkolInfo (RuleSkol name) = text "the RULE" <+> pprRuleName name pprSkolInfo (PatSkol cl mc) = sep [ pprPatSkolInfo cl , text "in" <+> pprMatchContext mc ] pprSkolInfo (InferSkol ids) = hang (text "the inferred type" <> plural ids <+> text "of") 2 (vcat [ ppr name <+> dcolon <+> ppr ty | (name,ty) <- ids ]) pprSkolInfo (UnifyForAllSkol ty) = text "the type" <+> ppr ty pprSkolInfo (TyConSkol flav name) = text "the" <+> ppr flav <+> text "declaration for" <+> quotes (ppr name) pprSkolInfo (DataConSkol name) = text "the type signature for" <+> quotes (ppr name) pprSkolInfo ReifySkol = text "the type being reified" pprSkolInfo RuntimeUnkSkol = text "Unknown type from GHCi runtime" pprSkolInfo ArrowReboundIfSkol = text "the expected type of a rebound if-then-else command" -- unkSkol -- For type variables the others are dealt with by pprSkolTvBinding. -- For Insts, these cases should not happen pprSkolInfo (UnkSkol cs) = text "UnkSkol (please report this as a bug)" $$ prettyCallStackDoc cs pprSigSkolInfo :: UserTypeCtxt -> TcType -> SDoc -- The type is already tidied pprSigSkolInfo ctxt ty = case ctxt of FunSigCtxt f _ -> vcat [ text "the type signature for:" , nest 2 (pprPrefixOcc f <+> dcolon <+> ppr ty) ] PatSynCtxt {} -> pprUserTypeCtxt ctxt -- See Note [Skolem info for pattern synonyms] _ -> vcat [ pprUserTypeCtxt ctxt <> colon , nest 2 (ppr ty) ] pprPatSkolInfo :: ConLike -> SDoc pprPatSkolInfo (RealDataCon dc) = sdocOption sdocLinearTypes (\show_linear_types -> sep [ text "a pattern with constructor:" , nest 2 $ ppr dc <+> dcolon <+> pprType (dataConDisplayType show_linear_types dc) <> comma ]) -- pprType prints forall's regardless of -fprint-explicit-foralls -- which is what we want here, since we might be saying -- type variable 't' is bound by ... pprPatSkolInfo (PatSynCon ps) = sep [ text "a pattern with pattern synonym:" , nest 2 $ ppr ps <+> dcolon <+> pprPatSynType ps <> comma ] {- Note [Skolem info for pattern synonyms] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For pattern synonym SkolemInfo we have SigSkol (PatSynCtxt p) ty _ but the type 'ty' is not very helpful. The full pattern-synonym type has the provided and required pieces, which it is inconvenient to record and display here. So we simply don't display the type at all, contenting ourselves with just the name of the pattern synonym, which is fine. We could do more, but it doesn't seem worth it. Note [SigSkol SkolemInfo] ~~~~~~~~~~~~~~~~~~~~~~~~~ Suppose we skolemise a type f :: forall a. Eq a => forall b. b -> a Then we'll instantiate [a :-> a', b :-> b'], and with the instantiated a' -> b' -> a. But when, in an error message, we report that "b is a rigid type variable bound by the type signature for f", we want to show the foralls in the right place. So we proceed as follows: * In SigSkol we record - the original signature forall a. a -> forall b. b -> a - the instantiation mapping [a :-> a', b :-> b'] * Then when tidying in GHC.Tc.Utils.TcMType.tidySkolemInfo, we first tidy a' to whatever it tidies to, say a''; and then we walk over the type replacing the binder a by the tidied version a'', to give forall a''. Eq a'' => forall b''. b'' -> a'' We need to do this under (=>) arrows and (->), to match what skolemisation does. * Typically a'' will have a nice pretty name like "a", but the point is that the foral-bound variables of the signature we report line up with the instantiated skolems lying around in other types. -} {- ********************************************************************* * * CtOrigin * * ************************************************************************ -} -- | Some thing which has a type. -- -- This datatype is used when we want to report to the user -- that something has an unexpected type. data TypedThing = HsTypeRnThing (HsType GhcRn) | TypeThing Type | HsExprRnThing (HsExpr GhcRn) | HsExprTcThing (HsExpr GhcTc) | NameThing Name -- | Some kind of type variable binder. -- -- Used for reporting errors, in 'SkolemInfo' and 'TcSolverReportMsg'. data TyVarBndrs = forall flag. OutputableBndrFlag flag 'Renamed => HsTyVarBndrsRn [HsTyVarBndr flag GhcRn] instance Outputable TypedThing where ppr (HsTypeRnThing ty) = ppr ty ppr (TypeThing ty) = ppr ty ppr (HsExprRnThing expr) = ppr expr ppr (HsExprTcThing expr) = ppr expr ppr (NameThing name) = ppr name instance Outputable TyVarBndrs where ppr (HsTyVarBndrsRn bndrs) = fsep (map ppr bndrs) data CtOrigin = -- | A given constraint from a user-written type signature. The -- 'SkolemInfo' inside gives more information. GivenOrigin SkolemInfoAnon -- | 'GivenSCOrigin' is used for a Given constraint obtained by superclass selection -- from the context of an instance declaration. E.g. -- instance @(Foo a, Bar a) => C [a]@ where ... -- When typechecking the instance decl itself, including producing evidence -- for the superclasses of @C@, the superclasses of @(Foo a)@ and @(Bar a)@ will -- have 'GivenSCOrigin' origin. | GivenSCOrigin SkolemInfoAnon -- ^ Just like GivenOrigin ScDepth -- ^ The number of superclass selections necessary to -- get this constraint; see Note [Replacement vs keeping] -- in GHC.Tc.Solver.Dict Bool -- ^ True => "blocked": cannot use this to solve naked superclass Wanteds -- i.e. ones with (ScOrigin _ NakedSc) -- False => can use this to solve all Wanted constraints -- See Note [Solving superclass constraints] in GHC.Tc.TyCl.Instance ----------- Below here, all are Origins for Wanted constraints ------------ | OccurrenceOf Name -- Occurrence of an overloaded identifier | OccurrenceOfRecSel RdrName -- Occurrence of a record selector | AppOrigin -- An application of some kind | SpecPragOrigin UserTypeCtxt -- Specialisation pragma for -- function or instance | TypeEqOrigin { uo_actual :: TcType , uo_expected :: TcType , uo_thing :: Maybe TypedThing -- ^ The thing that has type "actual" , uo_visible :: Bool -- ^ Is at least one of the three elements above visible? -- (Errors from the polymorphic subsumption check are considered -- visible.) Only used for prioritizing error messages. } | KindEqOrigin TcType TcType -- A kind equality arising from unifying these two types CtOrigin -- originally arising from this (Maybe TypeOrKind) -- the level of the eq this arises from | IPOccOrigin HsIPName -- Occurrence of an implicit parameter | OverLabelOrigin FastString -- Occurrence of an overloaded label | LiteralOrigin (HsOverLit GhcRn) -- Occurrence of a literal | NegateOrigin -- Occurrence of syntactic negation | ArithSeqOrigin (ArithSeqInfo GhcRn) -- [x..], [x..y] etc | AssocFamPatOrigin -- When matching the patterns of an associated -- family instance with that of its parent class -- IMPORTANT: These constraints will never cause errors; -- See Note [Constraints to ignore] in GHC.Tc.Errors | SectionOrigin | HasFieldOrigin FastString | TupleOrigin -- (..,..) | ExprSigOrigin -- e :: ty | PatSigOrigin -- p :: ty | PatOrigin -- Instantiating a polytyped pattern at a constructor | ProvCtxtOrigin -- The "provided" context of a pattern synonym signature (PatSynBind GhcRn GhcRn) -- Information about the pattern synonym, in -- particular the name and the right-hand side | RecordUpdOrigin | ViewPatOrigin -- | 'ScOrigin' is used only for the Wanted constraints for the -- superclasses of an instance declaration. | ScOrigin ClsInstOrQC -- Whether class instance or quantified constraint NakedScFlag | DerivClauseOrigin -- Typechecking a deriving clause (as opposed to -- standalone deriving). | DerivOriginDC DataCon Int Bool -- Checking constraints arising from this data con and field index. The -- Bool argument in DerivOriginDC and DerivOriginCoerce is True if -- standalong deriving (with a wildcard constraint) is being used. This -- is used to inform error messages on how to recommended fixes (e.g., if -- the argument is True, then don't recommend "use standalone deriving", -- but rather "fill in the wildcard constraint yourself"). -- See Note [Inferring the instance context] in GHC.Tc.Deriv.Infer | DerivOriginCoerce Id Type Type Bool -- DerivOriginCoerce id ty1 ty2: Trying to coerce class method `id` from -- `ty1` to `ty2`. | StandAloneDerivOrigin -- Typechecking stand-alone deriving. Useful for -- constraints coming from a wildcard constraint, -- e.g., deriving instance _ => Eq (Foo a) -- See Note [Inferring the instance context] -- in GHC.Tc.Deriv.Infer | DefaultOrigin -- Typechecking a default decl | DoOrigin -- Arising from a do expression | DoPatOrigin (LPat GhcRn) -- Arising from a failable pattern in -- a do expression | MCompOrigin -- Arising from a monad comprehension | MCompPatOrigin (LPat GhcRn) -- Arising from a failable pattern in a -- monad comprehension | ProcOrigin -- Arising from a proc expression | ArrowCmdOrigin -- Arising from an arrow command | AnnOrigin -- An annotation | FunDepOrigin1 -- A functional dependency from combining PredType CtOrigin RealSrcSpan -- This constraint arising from ... PredType CtOrigin RealSrcSpan -- and this constraint arising from ... | FunDepOrigin2 -- A functional dependency from combining PredType CtOrigin -- This constraint arising from ... PredType SrcSpan -- and this top-level instance -- We only need a CtOrigin on the first, because the location -- is pinned on the entire error message | InjTFOrigin1 -- injective type family equation combining PredType CtOrigin RealSrcSpan -- This constraint arising from ... PredType CtOrigin RealSrcSpan -- and this constraint arising from ... | ExprHoleOrigin (Maybe RdrName) -- from an expression hole | TypeHoleOrigin OccName -- from a type hole (partial type signature) | PatCheckOrigin -- normalisation of a type during pattern-match checking | ListOrigin -- An overloaded list | IfThenElseOrigin -- An if-then-else expression | BracketOrigin -- An overloaded quotation bracket | StaticOrigin -- A static form | ImpedanceMatching Id -- See Note [Impedance matching] in GHC.Tc.Gen.Bind | Shouldn'tHappenOrigin String -- The user should never see this one -- | Testing whether the constraint associated with an instance declaration -- in a signature file is satisfied upon instantiation. -- -- Test cases: backpack/should_fail/bkpfail{11,43}.bkp | InstProvidedOrigin Module -- ^ Module in which the instance was declared ClsInst -- ^ The declared typeclass instance | NonLinearPatternOrigin NonLinearPatternReason (LPat GhcRn) | OmittedFieldOrigin (Maybe FieldLabel) | UsageEnvironmentOf Name | CycleBreakerOrigin CtOrigin -- origin of the original constraint -- See Detail (7) of Note [Type equality cycles] in GHC.Tc.Solver.Equality | FRROrigin FixedRuntimeRepOrigin | WantedSuperclassOrigin PredType CtOrigin -- From expanding out the superclasses of a Wanted; the PredType -- is the subclass predicate, and the origin -- of the original Wanted is the CtOrigin | InstanceSigOrigin -- from the sub-type check of an InstanceSig Name -- the method name Type -- the instance-sig type Type -- the instantiated type of the method | AmbiguityCheckOrigin UserTypeCtxt data NonLinearPatternReason = LazyPatternReason | GeneralisedPatternReason | PatternSynonymReason | ViewPatternReason | OtherPatternReason -- | The number of superclass selections needed to get this Given. -- If @d :: C ty@ has @ScDepth=2@, then the evidence @d@ will look -- like @sc_sel (sc_sel dg)@, where @dg@ is a Given. type ScDepth = Int data ClsInstOrQC = IsClsInst | IsQC CtOrigin data NakedScFlag = NakedSc | NotNakedSc -- The NakedScFlag affects only GHC.Tc.Solver.InertSet.prohibitedSuperClassSolve -- * For the original superclass constraints we use (ScOrigin _ NakedSc) -- * But after using an instance declaration we use (ScOrigin _ NotNakedSc) -- See Note [Solving superclass constraints] in GHC.Tc.TyCl.Instance instance Outputable NakedScFlag where ppr NakedSc = text "NakedSc" ppr NotNakedSc = text "NotNakedSc" -- An origin is visible if the place where the constraint arises is manifest -- in user code. Currently, all origins are visible except for invisible -- TypeEqOrigins. This is used when choosing which error of -- several to report isVisibleOrigin :: CtOrigin -> Bool isVisibleOrigin (TypeEqOrigin { uo_visible = vis }) = vis isVisibleOrigin (KindEqOrigin _ _ sub_orig _) = isVisibleOrigin sub_orig isVisibleOrigin _ = True -- Converts a visible origin to an invisible one, if possible. Currently, -- this works only for TypeEqOrigin toInvisibleOrigin :: CtOrigin -> CtOrigin toInvisibleOrigin orig@(TypeEqOrigin {}) = orig { uo_visible = False } toInvisibleOrigin orig = orig isGivenOrigin :: CtOrigin -> Bool isGivenOrigin (GivenOrigin {}) = True isGivenOrigin (GivenSCOrigin {}) = True isGivenOrigin (CycleBreakerOrigin o) = isGivenOrigin o isGivenOrigin _ = False -- See Note [Suppressing confusing errors] in GHC.Tc.Errors isWantedWantedFunDepOrigin :: CtOrigin -> Bool isWantedWantedFunDepOrigin (FunDepOrigin1 _ orig1 _ _ orig2 _) = not (isGivenOrigin orig1) && not (isGivenOrigin orig2) isWantedWantedFunDepOrigin (InjTFOrigin1 _ orig1 _ _ orig2 _) = not (isGivenOrigin orig1) && not (isGivenOrigin orig2) isWantedWantedFunDepOrigin _ = False -- | Did a constraint arise from expanding a Wanted constraint -- to look at superclasses? isWantedSuperclassOrigin :: CtOrigin -> Bool isWantedSuperclassOrigin (WantedSuperclassOrigin {}) = True isWantedSuperclassOrigin _ = False instance Outputable CtOrigin where ppr = pprCtOrigin ctoHerald :: SDoc ctoHerald = text "arising from" -- | Extract a suitable CtOrigin from a HsExpr lexprCtOrigin :: LHsExpr GhcRn -> CtOrigin lexprCtOrigin (L _ e) = exprCtOrigin e exprCtOrigin :: HsExpr GhcRn -> CtOrigin exprCtOrigin (HsVar _ (L _ name)) = OccurrenceOf name exprCtOrigin (HsGetField _ _ (L _ f)) = HasFieldOrigin (field_label $ unLoc $ dfoLabel f) exprCtOrigin (HsUnboundVar {}) = Shouldn'tHappenOrigin "unbound variable" exprCtOrigin (HsOverLabel _ l) = OverLabelOrigin l exprCtOrigin (ExplicitList {}) = ListOrigin exprCtOrigin (HsIPVar _ ip) = IPOccOrigin ip exprCtOrigin (HsOverLit _ lit) = LiteralOrigin lit exprCtOrigin (HsLit {}) = Shouldn'tHappenOrigin "concrete literal" exprCtOrigin (HsLam _ _ ms) = matchesCtOrigin ms exprCtOrigin (HsApp _ e1 _) = lexprCtOrigin e1 exprCtOrigin (HsAppType _ e1 _) = lexprCtOrigin e1 exprCtOrigin (OpApp _ _ op _) = lexprCtOrigin op exprCtOrigin (NegApp _ e _) = lexprCtOrigin e exprCtOrigin (HsPar _ e) = lexprCtOrigin e exprCtOrigin (HsProjection _ _) = SectionOrigin exprCtOrigin (SectionL _ _ _) = SectionOrigin exprCtOrigin (SectionR _ _ _) = SectionOrigin exprCtOrigin (ExplicitTuple {}) = Shouldn'tHappenOrigin "explicit tuple" exprCtOrigin ExplicitSum{} = Shouldn'tHappenOrigin "explicit sum" exprCtOrigin (HsCase _ _ matches) = matchesCtOrigin matches exprCtOrigin (HsIf {}) = IfThenElseOrigin exprCtOrigin (HsMultiIf _ rhs) = lGRHSCtOrigin rhs exprCtOrigin (HsLet _ _ e) = lexprCtOrigin e exprCtOrigin (HsDo {}) = DoOrigin exprCtOrigin (RecordCon {}) = Shouldn'tHappenOrigin "record construction" exprCtOrigin (RecordUpd {}) = RecordUpdOrigin exprCtOrigin (ExprWithTySig {}) = ExprSigOrigin exprCtOrigin (ArithSeq {}) = Shouldn'tHappenOrigin "arithmetic sequence" exprCtOrigin (HsPragE _ _ e) = lexprCtOrigin e exprCtOrigin (HsTypedBracket {}) = Shouldn'tHappenOrigin "TH typed bracket" exprCtOrigin (HsUntypedBracket {}) = Shouldn'tHappenOrigin "TH untyped bracket" exprCtOrigin (HsTypedSplice {}) = Shouldn'tHappenOrigin "TH typed splice" exprCtOrigin (HsUntypedSplice {}) = Shouldn'tHappenOrigin "TH untyped splice" exprCtOrigin (HsProc {}) = Shouldn'tHappenOrigin "proc" exprCtOrigin (HsStatic {}) = Shouldn'tHappenOrigin "static expression" exprCtOrigin (HsEmbTy {}) = Shouldn'tHappenOrigin "type expression" exprCtOrigin (HsForAll {}) = Shouldn'tHappenOrigin "forall telescope" -- See Note [Types in terms] exprCtOrigin (HsQual {}) = Shouldn'tHappenOrigin "constraint context" -- See Note [Types in terms] exprCtOrigin (HsFunArr {}) = Shouldn'tHappenOrigin "function arrow" -- See Note [Types in terms] exprCtOrigin (XExpr (ExpandedThingRn thing _)) | OrigExpr a <- thing = exprCtOrigin a | OrigStmt _ <- thing = DoOrigin | OrigPat p <- thing = DoPatOrigin p exprCtOrigin (XExpr (PopErrCtxt {})) = Shouldn'tHappenOrigin "PopErrCtxt" exprCtOrigin (XExpr (HsRecSelRn f)) = OccurrenceOfRecSel (foExt f) -- | Extract a suitable CtOrigin from a MatchGroup matchesCtOrigin :: MatchGroup GhcRn (LHsExpr GhcRn) -> CtOrigin matchesCtOrigin (MG { mg_alts = alts }) | L _ [L _ match] <- alts , Match { m_grhss = grhss } <- match = grhssCtOrigin grhss | otherwise = Shouldn'tHappenOrigin "multi-way match" -- | Extract a suitable CtOrigin from guarded RHSs grhssCtOrigin :: GRHSs GhcRn (LHsExpr GhcRn) -> CtOrigin grhssCtOrigin (GRHSs { grhssGRHSs = lgrhss }) = lGRHSCtOrigin lgrhss -- | Extract a suitable CtOrigin from a list of guarded RHSs lGRHSCtOrigin :: [LGRHS GhcRn (LHsExpr GhcRn)] -> CtOrigin lGRHSCtOrigin [L _ (GRHS _ _ (L _ e))] = exprCtOrigin e lGRHSCtOrigin _ = Shouldn'tHappenOrigin "multi-way GRHS" pprCtOrigin :: CtOrigin -> SDoc -- "arising from ..." pprCtOrigin (GivenOrigin sk) = ctoHerald <+> ppr sk pprCtOrigin (GivenSCOrigin sk d blk) = vcat [ ctoHerald <+> pprSkolInfo sk , whenPprDebug (braces (text "given-sc:" <+> ppr d <> comma <> ppr blk)) ] pprCtOrigin (SpecPragOrigin ctxt) = case ctxt of FunSigCtxt n _ -> text "for" <+> quotes (ppr n) SpecInstCtxt -> text "a SPECIALISE INSTANCE pragma" _ -> text "a SPECIALISE pragma" -- Never happens I think pprCtOrigin (FunDepOrigin1 pred1 orig1 loc1 pred2 orig2 loc2) = hang (ctoHerald <+> text "a functional dependency between constraints:") 2 (vcat [ hang (quotes (ppr pred1)) 2 (pprCtOrigin orig1 <+> text "at" <+> ppr loc1) , hang (quotes (ppr pred2)) 2 (pprCtOrigin orig2 <+> text "at" <+> ppr loc2) ]) pprCtOrigin (FunDepOrigin2 pred1 orig1 pred2 loc2) = hang (ctoHerald <+> text "a functional dependency between:") 2 (vcat [ hang (text "constraint" <+> quotes (ppr pred1)) 2 (pprCtOrigin orig1 ) , hang (text "instance" <+> quotes (ppr pred2)) 2 (text "at" <+> ppr loc2) ]) pprCtOrigin (InjTFOrigin1 pred1 orig1 loc1 pred2 orig2 loc2) = hang (ctoHerald <+> text "reasoning about an injective type family using constraints:") 2 (vcat [ hang (quotes (ppr pred1)) 2 (pprCtOrigin orig1 <+> text "at" <+> ppr loc1) , hang (quotes (ppr pred2)) 2 (pprCtOrigin orig2 <+> text "at" <+> ppr loc2) ]) pprCtOrigin AssocFamPatOrigin = text "when matching a family LHS with its class instance head" pprCtOrigin (TypeEqOrigin { uo_actual = t1, uo_expected = t2, uo_visible = vis }) = hang (ctoHerald <+> text "a type equality" <> whenPprDebug (brackets (ppr vis))) 2 (sep [ppr t1, char '~', ppr t2]) pprCtOrigin (KindEqOrigin t1 t2 _ _) = hang (ctoHerald <+> text "a kind equality arising from") 2 (sep [ppr t1, char '~', ppr t2]) pprCtOrigin (DerivOriginDC dc n _) = hang (ctoHerald <+> text "the" <+> speakNth n <+> text "field of" <+> quotes (ppr dc)) 2 (parens (text "type" <+> quotes (ppr (scaledThing ty)))) where ty = dataConOrigArgTys dc !! (n-1) pprCtOrigin (DerivOriginCoerce meth ty1 ty2 _) = hang (ctoHerald <+> text "the coercion of the method" <+> quotes (ppr meth)) 2 (sep [ text "from type" <+> quotes (ppr ty1) , nest 2 $ text "to type" <+> quotes (ppr ty2) ]) pprCtOrigin (DoPatOrigin pat) = ctoHerald <+> text "a do statement" $$ text "with the failable pattern" <+> quotes (ppr pat) pprCtOrigin (MCompPatOrigin pat) = ctoHerald <+> hsep [ text "the failable pattern" , quotes (ppr pat) , text "in a statement in a monad comprehension" ] pprCtOrigin (Shouldn'tHappenOrigin note) = vcat [ text "<< This should not appear in error messages. If you see this" , text "in an error message, please report a bug mentioning" <+> quotes (text note) <+> text "at" , text "https://gitlab.haskell.org/ghc/ghc/wikis/report-a-bug >>" ] pprCtOrigin (ProvCtxtOrigin PSB{ psb_id = (L _ name) }) = hang (ctoHerald <+> text "the \"provided\" constraints claimed by") 2 (text "the signature of" <+> quotes (ppr name)) pprCtOrigin (InstProvidedOrigin mod cls_inst) = vcat [ text "arising when attempting to show that" , ppr cls_inst , text "is provided by" <+> quotes (ppr mod)] pprCtOrigin (ImpedanceMatching x) = vcat [ text "arising when matching required constraints" , text "in a group involving" <+> quotes (ppr x)] pprCtOrigin (CycleBreakerOrigin orig) = pprCtOrigin orig pprCtOrigin (WantedSuperclassOrigin subclass_pred subclass_orig) = sep [ ctoHerald <+> text "a superclass required to satisfy" <+> quotes (ppr subclass_pred) <> comma , pprCtOrigin subclass_orig ] pprCtOrigin (InstanceSigOrigin method_name sig_type orig_method_type) = vcat [ ctoHerald <+> text "the check that an instance signature is more general" , text "than the type of the method (instantiated for this instance)" , hang (text "instance signature:") 2 (ppr method_name <+> dcolon <+> ppr sig_type) , hang (text "instantiated method type:") 2 (ppr orig_method_type) ] pprCtOrigin (AmbiguityCheckOrigin ctxt) = ctoHerald <+> text "a type ambiguity check for" $$ pprUserTypeCtxt ctxt pprCtOrigin (ScOrigin IsClsInst nkd) = vcat [ ctoHerald <+> text "the superclasses of an instance declaration" , whenPprDebug (braces (text "sc-origin:" <> ppr nkd)) ] pprCtOrigin (ScOrigin (IsQC orig) nkd) = vcat [ ctoHerald <+> text "the head of a quantified constraint" , whenPprDebug (braces (text "sc-origin:" <> ppr nkd)) , pprCtOrigin orig ] pprCtOrigin (NonLinearPatternOrigin reason pat) = hang (ctoHerald <+> text "a non-linear pattern" <+> quotes (ppr pat)) 2 (pprNonLinearPatternReason reason) pprCtOrigin simple_origin = ctoHerald <+> pprCtO simple_origin -- | Short one-liners pprCtO :: HasDebugCallStack => CtOrigin -> SDoc pprCtO (OccurrenceOf name) = hsep [text "a use of", quotes (ppr name)] pprCtO (OccurrenceOfRecSel name) = hsep [text "a use of", quotes (ppr name)] pprCtO AppOrigin = text "an application" pprCtO (IPOccOrigin name) = hsep [text "a use of implicit parameter", quotes (ppr name)] pprCtO (OverLabelOrigin l) = hsep [text "the overloaded label" ,quotes (char '#' <> ppr l)] pprCtO RecordUpdOrigin = text "a record update" pprCtO ExprSigOrigin = text "an expression type signature" pprCtO PatSigOrigin = text "a pattern type signature" pprCtO PatOrigin = text "a pattern" pprCtO ViewPatOrigin = text "a view pattern" pprCtO (LiteralOrigin lit) = hsep [text "the literal", quotes (ppr lit)] pprCtO (ArithSeqOrigin seq) = hsep [text "the arithmetic sequence", quotes (ppr seq)] pprCtO SectionOrigin = text "an operator section" pprCtO (HasFieldOrigin f) = hsep [text "selecting the field", quotes (ppr f)] pprCtO AssocFamPatOrigin = text "the LHS of a family instance" pprCtO TupleOrigin = text "a tuple" pprCtO NegateOrigin = text "a use of syntactic negation" pprCtO (ScOrigin IsClsInst _) = text "the superclasses of an instance declaration" pprCtO (ScOrigin (IsQC {}) _) = text "the head of a quantified constraint" pprCtO DerivClauseOrigin = text "the 'deriving' clause of a data type declaration" pprCtO StandAloneDerivOrigin = text "a 'deriving' declaration" pprCtO DefaultOrigin = text "a 'default' declaration" pprCtO DoOrigin = text "a do statement" pprCtO MCompOrigin = text "a statement in a monad comprehension" pprCtO ProcOrigin = text "a proc expression" pprCtO ArrowCmdOrigin = text "an arrow command" pprCtO AnnOrigin = text "an annotation" pprCtO (ExprHoleOrigin Nothing) = text "an expression hole" pprCtO (ExprHoleOrigin (Just occ)) = text "a use of" <+> quotes (ppr occ) pprCtO (TypeHoleOrigin occ) = text "a use of wildcard" <+> quotes (ppr occ) pprCtO PatCheckOrigin = text "a pattern-match completeness check" pprCtO ListOrigin = text "an overloaded list" pprCtO IfThenElseOrigin = text "an if-then-else expression" pprCtO StaticOrigin = text "a static form" pprCtO (UsageEnvironmentOf x) = hsep [text "multiplicity of", quotes (ppr x)] pprCtO (OmittedFieldOrigin Nothing) = text "an omitted anonymous field" pprCtO (OmittedFieldOrigin (Just fl)) = hsep [text "omitted field" <+> quotes (ppr fl)] pprCtO BracketOrigin = text "a quotation bracket" -- These ones are handled by pprCtOrigin, but we nevertheless sometimes -- get here via callStackOriginFS, when doing ambiguity checks -- A bit silly, but no great harm pprCtO (GivenOrigin {}) = text "a given constraint" pprCtO (GivenSCOrigin {}) = text "the superclass of a given constraint" pprCtO (SpecPragOrigin {}) = text "a SPECIALISE pragma" pprCtO (FunDepOrigin1 {}) = text "a functional dependency" pprCtO (FunDepOrigin2 {}) = text "a functional dependency" pprCtO (InjTFOrigin1 {}) = text "an injective type family" pprCtO (TypeEqOrigin {}) = text "a type equality" pprCtO (KindEqOrigin {}) = text "a kind equality" pprCtO (DerivOriginDC {}) = text "a deriving clause" pprCtO (DerivOriginCoerce {}) = text "a derived method" pprCtO (DoPatOrigin {}) = text "a do statement" pprCtO (MCompPatOrigin {}) = text "a monad comprehension pattern" pprCtO (Shouldn'tHappenOrigin note) = text note pprCtO (ProvCtxtOrigin {}) = text "a provided constraint" pprCtO (InstProvidedOrigin {}) = text "a provided constraint" pprCtO (CycleBreakerOrigin orig) = pprCtO orig pprCtO (FRROrigin {}) = text "a representation-polymorphism check" pprCtO (WantedSuperclassOrigin {}) = text "a superclass constraint" pprCtO (InstanceSigOrigin {}) = text "a type signature in an instance" pprCtO (AmbiguityCheckOrigin {}) = text "a type ambiguity check" pprCtO (ImpedanceMatching {}) = text "combining required constraints" pprCtO (NonLinearPatternOrigin _ pat) = hsep [text "a non-linear pattern" <+> quotes (ppr pat)] pprNonLinearPatternReason :: HasDebugCallStack => NonLinearPatternReason -> SDoc pprNonLinearPatternReason LazyPatternReason = parens (text "non-variable lazy pattern aren't linear") pprNonLinearPatternReason GeneralisedPatternReason = parens (text "non-variable pattern bindings that have been generalised aren't linear") pprNonLinearPatternReason PatternSynonymReason = parens (text "pattern synonyms aren't linear") pprNonLinearPatternReason ViewPatternReason = parens (text "view patterns aren't linear") pprNonLinearPatternReason OtherPatternReason = empty {- ********************************************************************* * * CallStacks and CtOrigin See Note [Overview of implicit CallStacks] in GHC.Tc.Types.Evidence * * ********************************************************************* -} isPushCallStackOrigin :: CtOrigin -> Bool -- Do we want to solve this IP constraint directly (return False) -- or push the call site (return True) -- See Note [Overview of implicit CallStacks] in GHc.Tc.Types.Evidence isPushCallStackOrigin (IPOccOrigin {}) = False isPushCallStackOrigin _ = True callStackOriginFS :: CtOrigin -> FastString -- This is the string that appears in the CallStack callStackOriginFS (OccurrenceOf fun) = occNameFS (getOccName fun) callStackOriginFS orig = mkFastString (showSDocUnsafe (pprCtO orig)) {- ************************************************************************ * * Checking for representation polymorphism * * ************************************************************************ Note [Reporting representation-polymorphism errors] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ As explained in Note [The Concrete mechanism] in GHC.Tc.Utils.Concrete, to check that (ty :: ki) has a fixed runtime representation, we emit an equality constraint of the form ki ~# concrete_tv where concrete_tv is a concrete metavariable. In this situation, we attach a 'FixedRuntimeRepOrigin' to both the equality and the concrete type variable. The 'FixedRuntimeRepOrigin' consists of two pieces of information: - the type 'ty' on which we performed the representation-polymorphism check, - a 'FixedRuntimeRepContext' which explains why we needed to perform a check (e.g. because 'ty' was the kind of a function argument, or of a bound variable in a lambda abstraction, ...). This information gets passed along as we make progress on solving the constraint, and if we end up with an unsolved constraint we can report an informative error message to the user using the 'FixedRuntimeRepOrigin'. The error reporting goes through two different paths: - constraints whose 'CtOrigin' contains a 'FixedRuntimeRepOrigin' are reported using 'mkFRRErr' in 'reportWanteds', - equality constraints in which one side is a concrete metavariable and the other side is not concrete are reported using 'mkTyVarEqErr'. In this case, we pass on the type variable and the non-concrete type for error reporting, using the 'frr_info_not_concrete' field. This is why we have the 'FixedRuntimeRepErrorInfo' datatype: so that we can optionally include this extra message about an unsolved equality between a concrete type variable and a non-concrete type. -} -- | The context for a representation-polymorphism check. -- -- For example, when typechecking @ \ (a :: k) -> ...@, -- we are checking the type @a@ because it's the type of -- a term variable bound in a lambda, so we use 'FRRBinder'. data FixedRuntimeRepOrigin = FixedRuntimeRepOrigin { frr_type :: Type -- ^ What type are we checking? -- For example, @a[tau]@ in @a[tau] :: TYPE rr[tau]@. , frr_context :: FixedRuntimeRepContext -- ^ What context requires a fixed runtime representation? } instance Outputable FixedRuntimeRepOrigin where ppr (FixedRuntimeRepOrigin { frr_type = ty, frr_context = cxt }) = text "FrOrigin" <> braces (vcat [ text "frr_type:" <+> ppr ty , text "frr_context:" <+> ppr cxt ]) -- | The context in which a representation-polymorphism check was performed. -- -- Does not include the type on which the check was performed; see -- 'FixedRuntimeRepOrigin' for that. data FixedRuntimeRepContext -- | Record fields in record construction must have a fixed runtime -- representation. = FRRRecordCon !RdrName !(HsExpr GhcTc) -- | Record fields in record updates must have a fixed runtime representation. -- -- Test case: RepPolyRecordUpdate. | FRRRecordUpdate !Name !(HsExpr GhcRn) -- | Variable binders must have a fixed runtime representation. -- -- Test cases: LevPolyLet, RepPolyPatBind. | FRRBinder !Name -- | Types appearing in negative position in the type of a -- representation-polymorphic 'Id' must have a fixed runtime representation. -- -- This includes: -- -- - arguments, -- -- Test cases: RepPolyMagic, RepPolyRightSection, RepPolyWrappedVar, -- T14561b, T17817. -- -- - continuation result types, such as in 'catch#', 'keepAlive#' -- and 'control0#'. -- -- Test case: T21906. | FRRRepPolyId !Name !RepPolyId !(Position Neg) -- | A partial application of the constructor of a representation-polymorphic -- unlifted newtype in which the argument type does not have a fixed -- runtime representation. -- -- Test cases: UnliftedNewtypesLevityBinder, UnliftedNewtypesCoerceFail. | FRRRepPolyUnliftedNewtype !DataCon -- | Pattern binds must have a fixed runtime representation. -- -- Test case: RepPolyInferPatBind. | FRRPatBind -- | Pattern synonym arguments must have a fixed runtime representation. -- -- Test case: RepPolyInferPatSyn. | FRRPatSynArg -- | The type of the scrutinee in a case statement must have a -- fixed runtime representation. -- -- Test cases: RepPolyCase{1,2}. | FRRCase -- | An instantiation of a newtype/data constructor pattern in which -- an argument type does not have a fixed runtime representation. -- -- Test case: T20363. | FRRDataConPatArg !DataCon !Int -- | The 'RuntimeRep' arguments to unboxed tuples must be concrete 'RuntimeRep's. -- -- Test case: RepPolyTuple. | FRRUnboxedTuple !Int -- | Tuple sections must have a fixed runtime representation. -- -- Test case: RepPolyTupleSection. | FRRUnboxedTupleSection !Int -- | The 'RuntimeRep' arguments to unboxed sums must be concrete 'RuntimeRep's. -- -- Test cases: RepPolySum. | FRRUnboxedSum !(Maybe Int) -- | The body of a @do@ expression or a monad comprehension must -- have a fixed runtime representation. -- -- Test cases: RepPolyDoBody{1,2}, RepPolyMcBody. | FRRBodyStmt !StmtOrigin !Int -- | Arguments to a guard in a monad comprehension must have -- a fixed runtime representation. -- -- Test case: RepPolyMcGuard. | FRRBodyStmtGuard -- | Arguments to `(>>=)` arising from a @do@ expression -- or a monad comprehension must have a fixed runtime representation. -- -- Test cases: RepPolyDoBind, RepPolyMcBind. | FRRBindStmt !StmtOrigin -- | A value bound by a pattern guard must have a fixed runtime representation. -- -- Test cases: none. | FRRBindStmtGuard -- | A representation-polymorphism check arising from arrow notation. -- -- See 'FRRArrowContext' for more details. | FRRArrow !FRRArrowContext -- | A representation-polymorphic check arising from a call -- to 'matchExpectedFunTys' or 'matchActualFunTy'. -- -- See 'ExpectedFunTyOrigin' for more details. | FRRExpectedFunTy !ExpectedFunTyOrigin !Int -- ^ argument position (1-indexed) -- | The description of a representation-polymorphic 'Id'. data RepPolyId -- | A representation-polymorphic 'PrimOp'. = RepPolyPrimOp -- | An unboxed tuple constructor. | RepPolyTuple -- | An unboxed sum constructor. | RepPolySum -- | An unspecified representation-polymorphic function, -- e.g. a pseudo-op such as 'coerce'. | RepPolyFunction -- | A synonym for 'FRRUnboxedTuple' exposed in the hs-boot file -- for "GHC.Tc.Types.Origin". mkFRRUnboxedTuple :: Int -> FixedRuntimeRepContext mkFRRUnboxedTuple = FRRUnboxedTuple -- | A synonym for 'FRRUnboxedSum' exposed in the hs-boot file -- for "GHC.Tc.Types.Origin". mkFRRUnboxedSum :: Maybe Int -> FixedRuntimeRepContext mkFRRUnboxedSum = FRRUnboxedSum -- | Print the context for a @FixedRuntimeRep@ representation-polymorphism check. -- -- Note that this function does not include the specific 'RuntimeRep' -- which is not fixed. That information is stored in 'FixedRuntimeRepOrigin' -- and is reported separately. pprFixedRuntimeRepContext :: FixedRuntimeRepContext -> SDoc pprFixedRuntimeRepContext (FRRRecordCon lbl _arg) = sep [ text "The field", quotes (ppr lbl) , text "of the record constructor" ] pprFixedRuntimeRepContext (FRRRecordUpdate lbl _arg) = sep [ text "The record update at field" , quotes (ppr lbl) ] pprFixedRuntimeRepContext (FRRBinder binder) = sep [ text "The binder" , quotes (ppr binder) ] pprFixedRuntimeRepContext (FRRRepPolyId nm id what) = pprFRRRepPolyId id nm what pprFixedRuntimeRepContext FRRPatBind = text "The pattern binding" pprFixedRuntimeRepContext FRRPatSynArg = text "The pattern synonym argument pattern" pprFixedRuntimeRepContext FRRCase = text "The scrutinee of the case statement" pprFixedRuntimeRepContext (FRRDataConPatArg con i) = text "The" <+> what where what :: SDoc what | isNewDataCon con = text "newtype constructor pattern" | otherwise = text "data constructor pattern in" <+> speakNth i <+> text "position" pprFixedRuntimeRepContext (FRRRepPolyUnliftedNewtype dc) = vcat [ text "Unsaturated use of a representation-polymorphic unlifted newtype." , text "The argument of the newtype constructor" <+> quotes (ppr dc) ] pprFixedRuntimeRepContext (FRRUnboxedTuple i) = text "The" <+> speakNth i <+> text "component of the unboxed tuple" pprFixedRuntimeRepContext (FRRUnboxedTupleSection i) = text "The" <+> speakNth i <+> text "component of the unboxed tuple section" pprFixedRuntimeRepContext (FRRUnboxedSum Nothing) = text "The unboxed sum" pprFixedRuntimeRepContext (FRRUnboxedSum (Just i)) = text "The" <+> speakNth i <+> text "component of the unboxed sum" pprFixedRuntimeRepContext (FRRBodyStmt stmtOrig i) = vcat [ text "The" <+> speakNth i <+> text "argument to (>>)" <> comma , text "arising from the" <+> ppr stmtOrig <> comma ] pprFixedRuntimeRepContext FRRBodyStmtGuard = vcat [ text "The argument to" <+> quotes (text "guard") <> comma , text "arising from the" <+> ppr MonadComprehension <> comma ] pprFixedRuntimeRepContext (FRRBindStmt stmtOrig) = vcat [ text "The first argument to (>>=)" <> comma , text "arising from the" <+> ppr stmtOrig <> comma ] pprFixedRuntimeRepContext FRRBindStmtGuard = sep [ text "The body of the bind statement" ] pprFixedRuntimeRepContext (FRRArrow arrowContext) = pprFRRArrowContext arrowContext pprFixedRuntimeRepContext (FRRExpectedFunTy funTyOrig arg_pos) = pprExpectedFunTyOrigin funTyOrig arg_pos instance Outputable FixedRuntimeRepContext where ppr = pprFixedRuntimeRepContext -- | Are we in a @do@ expression or a monad comprehension? -- -- This datatype is only used to report this context to the user in error messages. data StmtOrigin = MonadComprehension | DoNotation instance Outputable StmtOrigin where ppr MonadComprehension = text "monad comprehension" ppr DoNotation = quotes ( text "do" ) <+> text "statement" -- | The position of an argument (to be reported in an error message). data ArgPos = ArgPosInvis -- ^ Invisible argument: don't report its position to the user. | ArgPosVis !Int -- ^ Visible argument in i-th position. {- ********************************************************************* * * FixedRuntimeRep: representation-polymorphic Ids * * ********************************************************************* -} data Polarity = Pos | Neg type FlipPolarity :: Polarity -> Polarity type family FlipPolarity p where FlipPolarity Pos = Neg FlipPolarity Neg = Pos -- | A position in which a type variable appears in a type; -- in particular, whether it appears in a positive or a negative position. type Position :: Polarity -> Hs.Type data Position p where -- | In the @i@-th argument of a function arrow Argument :: Int -> Position (FlipPolarity p) -> Position p -- | In the result of a function arrow Result :: Position p -> Position p -- | At the top level of a type Top :: Position Pos pprFRRRepPolyId :: RepPolyId -> Name -> Position Neg -> SDoc pprFRRRepPolyId id nm (Argument i pos) = text "The" <+> what <+> speakNth i <+> text "argument of" <+> pprRepPolyId id nm where what = case pos of Top -> empty Result {} -> text "return type of the" _ -> text "nested return type inside the" pprFRRRepPolyId id nm (Result {}) = text "The result of" <+> pprRepPolyId id nm pprRepPolyId :: RepPolyId -> Name -> SDoc pprRepPolyId id nm = id_desc <+> quotes (ppr nm) where id_desc = case id of RepPolyPrimOp {} -> text "the primop" RepPolySum {} -> text "the unboxed sum constructor" RepPolyTuple {} -> text "the unboxed tuple constructor" RepPolyFunction {} -> empty {- ********************************************************************* * * FixedRuntimeRep: arrows * * ********************************************************************* -} -- | While typechecking arrow notation, in which context -- did a representation polymorphism check arise? -- -- See 'FixedRuntimeRepContext' for more general origins of -- representation polymorphism checks. data FRRArrowContext -- | The result of an arrow command does not have a fixed runtime representation. -- -- Test case: RepPolyArrowCmd. = ArrowCmdResTy !(HsCmd GhcRn) -- | The argument to an arrow in an arrow command application does not have -- a fixed runtime representation. -- -- Test cases: none. | ArrowCmdApp !(HsCmd GhcRn) !(HsExpr GhcRn) -- | A function in an arrow application does not have -- a fixed runtime representation. -- -- Test cases: none. | ArrowCmdArrApp !(HsExpr GhcRn) !(HsExpr GhcRn) !HsArrAppType -- | The scrutinee type in an arrow command case statement does not have a -- fixed runtime representation. -- -- Test cases: none. | ArrowCmdCase -- | The overall type of an arrow proc expression does not have -- a fixed runtime representation. -- -- Test case: RepPolyArrowFun. | ArrowFun !(HsExpr GhcRn) pprFRRArrowContext :: FRRArrowContext -> SDoc pprFRRArrowContext (ArrowCmdResTy cmd) = vcat [ hang (text "The arrow command") 2 (quotes (ppr cmd)) ] pprFRRArrowContext (ArrowCmdApp fun arg) = vcat [ text "The argument in the arrow command application of" , nest 2 (quotes (ppr fun)) , text "to" , nest 2 (quotes (ppr arg)) ] pprFRRArrowContext (ArrowCmdArrApp fun arg ho_app) = vcat [ text "The function in the" <+> pprHsArrType ho_app <+> text "of" , nest 2 (quotes (ppr fun)) , text "to" , nest 2 (quotes (ppr arg)) ] pprFRRArrowContext ArrowCmdCase = text "The scrutinee of the arrow case command" pprFRRArrowContext (ArrowFun fun) = vcat [ text "The return type of the arrow function" , nest 2 (quotes (ppr fun)) ] instance Outputable FRRArrowContext where ppr = pprFRRArrowContext {- ********************************************************************* * * FixedRuntimeRep: ExpectedFunTy origin * * ********************************************************************* -} -- | In what context are we calling 'matchExpectedFunTys' -- or 'matchActualFunTy'? -- -- Used for two things: -- -- 1. Reporting error messages which explain that a function has been -- given an unexpected number of arguments. -- Uses 'pprExpectedFunTyHerald'. -- See Note [Herald for matchExpectedFunTys] in GHC.Tc.Utils.Unify. -- -- 2. Reporting representation-polymorphism errors when a function argument -- doesn't have a fixed RuntimeRep as per Note [Fixed RuntimeRep] -- in GHC.Tc.Utils.Concrete. -- Uses 'pprExpectedFunTyOrigin'. -- See 'FixedRuntimeRepContext' for the situations in which -- representation-polymorphism checks are performed. data ExpectedFunTyOrigin -- | A rebindable syntax operator is expected to have a function type. -- -- Test cases for representation-polymorphism checks: -- RepPolyDoBind, RepPolyDoBody{1,2}, RepPolyMc{Bind,Body,Guard}, RepPolyNPlusK = forall (p :: Pass) . (OutputableBndrId p) => ExpectedFunTySyntaxOp !CtOrigin !(HsExpr (GhcPass p)) -- ^ rebindable syntax operator -- | A view pattern must have a function type. -- -- Test cases for representation-polymorphism checks: -- RepPolyBinder | ExpectedFunTyViewPat !(HsExpr GhcRn) -- ^ function used in the view pattern -- | Need to be able to extract an argument type from a function type. -- -- Test cases for representation-polymorphism checks: -- RepPolyApp | forall (p :: Pass) . Outputable (HsExpr (GhcPass p)) => ExpectedFunTyArg !TypedThing -- ^ function !(HsExpr (GhcPass p)) -- ^ argument -- | Ensure that a function defined by equations indeed has a function type -- with the appropriate number of arguments. -- -- Test cases for representation-polymorphism checks: -- RepPolyBinder, RepPolyRecordPattern, RepPolyWildcardPattern | ExpectedFunTyMatches !TypedThing -- ^ name of the function !(MatchGroup GhcRn (LHsExpr GhcRn)) -- ^ equations -- | Ensure that a lambda abstraction has a function type. -- -- Test cases for representation-polymorphism checks: -- RepPolyLambda, RepPolyMatch | ExpectedFunTyLam HsLamVariant !(HsExpr GhcRn) -- ^ the entire lambda-case expression pprExpectedFunTyOrigin :: ExpectedFunTyOrigin -> Int -- ^ argument position (starting at 1) -> SDoc pprExpectedFunTyOrigin funTy_origin i = case funTy_origin of ExpectedFunTySyntaxOp orig op -> vcat [ sep [ the_arg_of , text "the rebindable syntax operator" , quotes (ppr op) ] , nest 2 (ppr orig) ] ExpectedFunTyViewPat expr -> vcat [ the_arg_of <+> text "the view pattern" , nest 2 (ppr expr) ] ExpectedFunTyArg fun arg -> sep [ text "The argument" , quotes (ppr arg) , text "of" , quotes (ppr fun) ] ExpectedFunTyMatches fun (MG { mg_alts = L _ alts }) | null alts -> the_arg_of <+> quotes (ppr fun) | otherwise -> text "The" <+> speakNth i <+> text "pattern in the equation" <> plural alts <+> text "for" <+> quotes (ppr fun) ExpectedFunTyLam lam_variant _ -> binder_of $ lamCaseKeyword lam_variant where the_arg_of :: SDoc the_arg_of = text "The" <+> speakNth i <+> text "argument of" binder_of :: SDoc -> SDoc binder_of what = text "The binder of the" <+> what <+> text "expression" pprExpectedFunTyHerald :: ExpectedFunTyOrigin -> SDoc pprExpectedFunTyHerald (ExpectedFunTySyntaxOp {}) = text "This rebindable syntax expects a function with" pprExpectedFunTyHerald (ExpectedFunTyViewPat {}) = text "A view pattern expression expects" pprExpectedFunTyHerald (ExpectedFunTyArg fun _) = sep [ text "The function" <+> quotes (ppr fun) , text "is applied to" ] pprExpectedFunTyHerald (ExpectedFunTyMatches fun (MG { mg_alts = L _ alts })) = text "The equation" <> plural alts <+> text "for" <+> quotes (ppr fun) <+> hasOrHave alts pprExpectedFunTyHerald (ExpectedFunTyLam lam_variant expr) = sep [ text "The" <+> lamCaseKeyword lam_variant <+> text "expression" <+> quotes (pprSetDepth (PartWay 1) (ppr expr)) -- The pprSetDepth makes the lambda abstraction print briefly , text "has" ] {- ******************************************************************* * * InstanceWhat * * **********************************************************************-} -- | Indicates if Instance met the Safe Haskell overlapping instances safety -- check. -- -- See Note [Safe Haskell Overlapping Instances] in GHC.Tc.Solver -- See Note [Safe Haskell Overlapping Instances Implementation] in GHC.Tc.Solver type SafeOverlapping = Bool data InstanceWhat -- How did we solve this constraint? = BuiltinEqInstance -- Built-in solver for (t1 ~ t2), (t1 ~~ t2), Coercible t1 t2 -- See GHC.Tc.Solver.InertSet Note [Solved dictionaries] | BuiltinTypeableInstance TyCon -- Built-in solver for Typeable (T t1 .. tn) -- See Note [Well-staged instance evidence] | BuiltinInstance -- Built-in solver for (C t1 .. tn) where C is -- KnownNat, .. etc (classes with no top-level evidence) | LocalInstance -- Solved by a quantified constraint -- See GHC.Tc.Solver.InertSet Note [Solved dictionaries] | TopLevInstance -- Solved by a top-level instance decl { iw_dfun_id :: DFunId , iw_safe_over :: SafeOverlapping , iw_warn :: Maybe (WarningTxt GhcRn) } -- See Note [Implementation of deprecated instances] -- in GHC.Tc.Solver.Dict instance Outputable InstanceWhat where ppr BuiltinInstance = text "a built-in instance" ppr BuiltinTypeableInstance {} = text "a built-in typeable instance" ppr BuiltinEqInstance = text "a built-in equality instance" ppr LocalInstance = text "a locally-quantified instance" ppr (TopLevInstance { iw_dfun_id = dfun }) = hang (text "instance" <+> pprSigmaType (idType dfun)) 2 (text "--" <+> pprDefinedAt (idName dfun)) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Tc/Types/Origin.hs-boot0000644000000000000000000000100007346545000022073 0ustar0000000000000000module GHC.Tc.Types.Origin where import GHC.Prelude.Basic ( Int, Maybe ) import GHC.Utils.Misc ( HasDebugCallStack ) import {-# SOURCE #-} GHC.Core.TyCo.Rep ( Type ) data SkolemInfoAnon data SkolemInfo data FixedRuntimeRepContext data FixedRuntimeRepOrigin = FixedRuntimeRepOrigin { frr_type :: Type , frr_context :: FixedRuntimeRepContext } mkFRRUnboxedTuple :: Int -> FixedRuntimeRepContext mkFRRUnboxedSum :: Maybe Int -> FixedRuntimeRepContext unkSkol :: HasDebugCallStack => SkolemInfo ghc-lib-parser-9.12.2.20250421/compiler/GHC/Tc/Types/Rank.hs0000644000000000000000000000267707346545000020623 0ustar0000000000000000module GHC.Tc.Types.Rank (Rank(..)) where import GHC.Base (Bool) import GHC.Utils.Outputable (Outputable, (<+>), parens, ppr, text) {- Note [Higher rank types] ~~~~~~~~~~~~~~~~~~~~~~~~ Technically Int -> forall a. a->a is still a rank-1 type, but it's not Haskell 98 (#5957). So the validity checker allow a forall after an arrow only if we allow it before -- that is, with Rank2Types or RankNTypes -} data Rank = ArbitraryRank -- Any rank ok | LimitedRank -- Note [Higher rank types] Bool -- Forall ok at top Rank -- Use for function arguments -- Monotypes that could be a polytype through an extension | MonoTypeRankZero -- RankNTypes | MonoTypeTyConArg -- ImpredicativeTypes | MonoTypeSynArg -- LiberalTypeSynonyms | MonoTypeConstraint -- QuantifiedConstraints -- | MustBeMonoType -- Monotype regardless of flags instance Outputable Rank where ppr ArbitraryRank = text "ArbitraryRank" ppr (LimitedRank top_forall_ok r) = text "LimitedRank" <+> ppr top_forall_ok <+> parens (ppr r) ppr MonoTypeRankZero = text "MonoTypeRankZero" ppr MonoTypeTyConArg = text "MonoTypeTyConArg" ppr MonoTypeSynArg = text "MonoTypeSynArg" ppr MonoTypeConstraint = text "MonoTypeConstraint" ppr MustBeMonoType = text "MustBeMonoType" ghc-lib-parser-9.12.2.20250421/compiler/GHC/Tc/Types/TH.hs0000644000000000000000000001163207346545000020232 0ustar0000000000000000module GHC.Tc.Types.TH ( SpliceType(..) , SpliceOrBracket(..) , ThStage(..) , PendingStuff(..) , ThLevel , topStage , topAnnStage , topSpliceStage , thLevel , impLevel , outerLevel ) where import GHCi.RemoteTypes import qualified GHC.Internal.TH.Syntax as TH import GHC.Tc.Types.Evidence import GHC.Utils.Outputable import GHC.Prelude import GHC.Tc.Types.TcRef import GHC.Tc.Types.Constraint import GHC.Hs.Expr ( PendingTcSplice, PendingRnSplice ) --------------------------- -- Template Haskell stages and levels --------------------------- data SpliceType = Typed | Untyped data SpliceOrBracket = IsSplice | IsBracket data ThStage -- See Note [Template Haskell state diagram] -- and Note [Template Haskell levels] in GHC.Tc.Gen.Splice -- Start at: Comp -- At bracket: wrap current stage in Brack -- At splice: currently Brack: return to previous stage -- currently Comp/Splice: compile and run = Splice SpliceType -- Inside a top-level splice -- This code will be run *at compile time*; -- the result replaces the splice -- Binding level = 0 | RunSplice (TcRef [ForeignRef (TH.Q ())]) -- Set when running a splice, i.e. NOT when renaming or typechecking the -- Haskell code for the splice. See Note [RunSplice ThLevel]. -- -- Contains a list of mod finalizers collected while executing the splice. -- -- 'addModFinalizer' inserts finalizers here, and from here they are taken -- to construct an @HsSpliced@ annotation for untyped splices. See Note -- [Delaying modFinalizers in untyped splices] in GHC.Rename.Splice. -- -- For typed splices, the typechecker takes finalizers from here and -- inserts them in the list of finalizers in the global environment. -- -- See Note [Collecting modFinalizers in typed splices] in "GHC.Tc.Gen.Splice". | Comp -- Ordinary Haskell code -- Binding level = 1 | Brack -- Inside brackets ThStage -- Enclosing stage PendingStuff data PendingStuff = RnPendingUntyped -- Renaming the inside of an *untyped* bracket (TcRef [PendingRnSplice]) -- Pending splices in here | RnPendingTyped -- Renaming the inside of a *typed* bracket | TcPending -- Typechecking the inside of a typed bracket (TcRef [PendingTcSplice]) -- Accumulate pending splices here (TcRef WantedConstraints) -- and type constraints here QuoteWrapper -- A type variable and evidence variable -- for the overall monad of -- the bracket. Splices are checked -- against this monad. The evidence -- variable is used for desugaring -- `lift`. topStage, topAnnStage, topSpliceStage :: ThStage topStage = Comp topAnnStage = Splice Untyped topSpliceStage = Splice Untyped instance Outputable ThStage where ppr (Splice _) = text "Splice" ppr (RunSplice _) = text "RunSplice" ppr Comp = text "Comp" ppr (Brack s _) = text "Brack" <> parens (ppr s) type ThLevel = Int -- NB: see Note [Template Haskell levels] in GHC.Tc.Gen.Splice -- Incremented when going inside a bracket, -- decremented when going inside a splice -- NB: ThLevel is one greater than the 'n' in Fig 2 of the -- original "Template meta-programming for Haskell" paper impLevel, outerLevel :: ThLevel impLevel = 0 -- Imported things; they can be used inside a top level splice outerLevel = 1 -- Things defined outside brackets thLevel :: ThStage -> ThLevel thLevel (Splice _) = 0 thLevel Comp = 1 thLevel (Brack s _) = thLevel s + 1 thLevel (RunSplice _) = 0 -- previously: panic "thLevel: called when running a splice" -- See Note [RunSplice ThLevel]. {- Note [RunSplice ThLevel] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The 'RunSplice' stage is set when executing a splice, and only when running a splice. In particular it is not set when the splice is renamed or typechecked. However, this is not true. `reifyInstances` for example does rename the given type, and these types may contain variables (#9262 allow free variables in reifyInstances). Therefore here we assume that thLevel (RunSplice _) = 0. Proper fix would probably require renaming argument `reifyInstances` separately prior to evaluation of the overall splice. 'RunSplice' is needed to provide a reference where 'addModFinalizer' can insert the finalizer (see Note [Delaying modFinalizers in untyped splices]), and 'addModFinalizer' runs when doing Q things. Therefore, It doesn't make sense to set 'RunSplice' when renaming or typechecking the splice, where 'Splice', 'Brack' or 'Comp' are used instead. -} ghc-lib-parser-9.12.2.20250421/compiler/GHC/Tc/Types/TcRef.hs0000644000000000000000000000221607346545000020720 0ustar0000000000000000module GHC.Tc.Types.TcRef (TcRef, newTcRef, readTcRef, writeTcRef, updTcRef, updTcRefM) where import GHC.Prelude import Control.Monad.IO.Class import Data.IORef -- | Type alias for 'IORef'; the convention is we'll use this for mutable -- bits of data in the typechecker which are updated during typechecking and -- returned at the end. type TcRef a = IORef a -- The following functions are all marked INLINE so that we -- don't end up passing a Monad or MonadIO dictionary. newTcRef :: MonadIO m => a -> m (TcRef a) newTcRef = \ a -> liftIO $ newIORef a {-# INLINE newTcRef #-} readTcRef :: MonadIO m => TcRef a -> m a readTcRef = \ ref -> liftIO $ readIORef ref {-# INLINE readTcRef #-} writeTcRef :: MonadIO m => TcRef a -> a -> m () writeTcRef = \ ref a -> liftIO $ writeIORef ref a {-# INLINE writeTcRef #-} updTcRef :: MonadIO m => TcRef a -> (a -> a) -> m () updTcRef = \ ref fn -> liftIO $ modifyIORef' ref fn {-# INLINE updTcRef #-} updTcRefM :: MonadIO m => TcRef a -> (a -> m a) -> m () updTcRefM ref upd = do { contents <- readTcRef ref ; !new_contents <- upd contents ; writeTcRef ref new_contents } {-# INLINE updTcRefM #-}ghc-lib-parser-9.12.2.20250421/compiler/GHC/Tc/Utils/0000755000000000000000000000000007346545000017354 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Tc/Utils/TcType.hs0000644000000000000000000027022307346545000021126 0ustar0000000000000000{-# LANGUAGE DeriveGeneric #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE TupleSections #-} {-# LANGUAGE MagicHash #-} {-# LANGUAGE MultiWayIf #-} {- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 -} -- | Types used in the typechecker -- -- This module provides the Type interface for front-end parts of the -- compiler. These parts -- -- * treat "source types" as opaque: -- newtypes, and predicates are meaningful. -- * look through usage types -- module GHC.Tc.Utils.TcType ( -------------------------------- -- Types TcType, TcSigmaType, TcTypeFRR, TcSigmaTypeFRR, TcRhoType, TcTauType, TcPredType, TcThetaType, TcTyVar, TcTyVarSet, TcDTyVarSet, TcTyCoVarSet, TcDTyCoVarSet, TcKind, TcCoVar, TcTyCoVar, TcTyVarBinder, TcInvisTVBinder, TcReqTVBinder, TcTyCon, MonoTcTyCon, PolyTcTyCon, TcTyConBinder, KnotTied, ExpType(..), ExpKind, InferResult(..), ExpTypeFRR, ExpSigmaType, ExpSigmaTypeFRR, ExpRhoType, mkCheckExpType, checkingExpType_maybe, checkingExpType, ExpPatType(..), mkCheckExpFunPatTy, mkInvisExpPatType, isVisibleExpPatType, isExpFunPatType, SyntaxOpType(..), synKnownType, mkSynFunTys, -------------------------------- -- TcLevel TcLevel(..), topTcLevel, pushTcLevel, isTopTcLevel, strictlyDeeperThan, deeperThanOrSame, sameDepthAs, tcTypeLevel, tcTyVarLevel, maxTcLevel, minTcLevel, -------------------------------- -- MetaDetails TcTyVarDetails(..), pprTcTyVarDetails, vanillaSkolemTvUnk, MetaDetails(Flexi, Indirect), MetaInfo(..), skolemSkolInfo, isImmutableTyVar, isSkolemTyVar, isMetaTyVar, isMetaTyVarTy, isTyVarTy, tcIsTcTyVar, isTyVarTyVar, isOverlappableTyVar, isTyConableTyVar, ConcreteTvOrigin(..), isConcreteTyVar_maybe, isConcreteTyVar, isConcreteTyVarTy, isConcreteTyVarTy_maybe, isConcreteInfo, ConcreteTyVars, noConcreteTyVars, isAmbiguousTyVar, isCycleBreakerTyVar, metaTyVarRef, metaTyVarInfo, isFlexi, isIndirect, isRuntimeUnkSkol, isQLInstTyVar, isRuntimeUnkTyVar, metaTyVarTcLevel, setMetaTyVarTcLevel, metaTyVarTcLevel_maybe, isTouchableMetaTyVar, isPromotableMetaTyVar, findDupTyVarTvs, mkTyVarNamePairs, -------------------------------- -- Builders mkInfSigmaTy, mkSpecSigmaTy, mkSigmaTy, mkPhiTy, tcMkPhiTy, tcMkDFunSigmaTy, tcMkDFunPhiTy, -------------------------------- -- Splitters getTyVar, getTyVar_maybe, getCastedTyVar_maybe, tcSplitForAllTyVarBinder_maybe, tcSplitForAllTyVarsReqTVBindersN, tcSplitForAllTyVars, tcSplitForAllInvisTyVars, tcSplitSomeForAllTyVars, tcSplitForAllReqTVBinders, tcSplitForAllInvisTVBinders, tcSplitPiTys, tcSplitPiTy_maybe, tcSplitForAllTyVarBinders, tcSplitPhiTy, tcSplitPredFunTy_maybe, tcSplitFunTy_maybe, tcSplitFunTys, tcFunArgTy, tcFunResultTy, tcFunResultTyN, tcSplitFunTysN, tcSplitTyConApp, tcSplitTyConApp_maybe, tcTyConAppTyCon, tcTyConAppTyCon_maybe, tcTyConAppArgs, tcSplitAppTy_maybe, tcSplitAppTy, tcSplitAppTys, tcSplitAppTyNoView_maybe, tcSplitSigmaTy, tcSplitSigmaTyBndrs, tcSplitNestedSigmaTys, tcSplitIOType_maybe, --------------------------------- -- Predicates. -- Again, newtypes are opaque isSigmaTy, isRhoTy, isRhoExpTy, isOverloadedTy, isFloatingPrimTy, isDoubleTy, isFloatTy, isIntTy, isWordTy, isStringTy, isIntegerTy, isNaturalTy, isBoolTy, isUnitTy, isCharTy, isTauTy, isTauTyCon, tcIsTyVarTy, isPredTy, isTyVarClassPred, checkValidClsArgs, hasTyVarHead, isRigidTy, anyTy_maybe, -- Re-exported from GHC.Core.TyCo.Compare -- mainly just for back-compat reasons eqType, eqTypes, nonDetCmpType, eqTypeX, pickyEqType, tcEqType, tcEqKind, tcEqTypeNoKindCheck, mayLookIdentical, tcEqTyConApps, eqForAllVis, eqVarBndrs, --------------------------------- -- Misc type manipulators deNoteType, getDFunTyKey, evVarPred, ambigTkvsOfTy, --------------------------------- -- Predicate types mkMinimalBySCs, transSuperClasses, pickCapturedPreds, immSuperClasses, boxEqPred, isImprovementPred, -- * Finding type instances tcTyFamInsts, tcTyFamInstsAndVis, tcTyConAppTyFamInstsAndVis, isTyFamFree, -- * Finding "exact" (non-dead) type variables exactTyCoVarsOfType, exactTyCoVarsOfTypes, anyRewritableTyVar, anyRewritableTyFamApp, UnderFam, --------------------------------- -- Patersons sizes PatersonSize(..), PatersonCondFailure(..), PatersonCondFailureContext(..), ltPatersonSize, pSizeZero, pSizeOne, pSizeType, pSizeTypeX, pSizeTypes, pSizeClassPred, pSizeClassPredX, pSizeTyConApp, noMoreTyVars, allDistinctTyVars, TypeSize, sizeType, sizeTypes, scopedSort, isTerminatingClass, isStuckTypeFamily, -------------------------------- -- Reexported from Kind Kind, liftedTypeKind, constraintKind, isLiftedTypeKind, isUnliftedTypeKind, isTYPEorCONSTRAINT, -------------------------------- -- Reexported from Type Type, PredType, ThetaType, PiTyBinder, ForAllTyFlag(..), FunTyFlag(..), mkForAllTy, mkForAllTys, mkInvisForAllTys, mkTyCoInvForAllTys, mkSpecForAllTys, mkTyCoInvForAllTy, mkInfForAllTy, mkInfForAllTys, mkVisFunTy, mkVisFunTyMany, mkVisFunTysMany, mkScaledFunTys, mkInvisFunTy, mkInvisFunTys, mkTyConApp, mkAppTy, mkAppTys, mkTyConTy, mkTyVarTy, mkTyVarTys, mkTyCoVarTy, mkTyCoVarTys, isClassPred, isEqPrimPred, isIPLikePred, isEqPred, isEqualityClass, mkClassPred, tcSplitQuantPredTy, tcSplitDFunTy, tcSplitDFunHead, tcSplitMethodTy, isRuntimeRepVar, isFixedRuntimeRepKind, isVisiblePiTyBinder, isInvisiblePiTyBinder, -- Type substitutions Subst(..), -- Representation visible to a few friends TvSubstEnv, emptySubst, mkEmptySubst, zipTvSubst, mkTvSubstPrs, notElemSubst, unionSubst, getTvSubstEnv, getSubstInScope, extendSubstInScope, extendSubstInScopeList, extendSubstInScopeSet, extendTvSubstAndInScope, Type.lookupTyVar, Type.extendTCvSubst, Type.substTyVarBndr, Type.extendTvSubst, isInScope, mkTCvSubst, mkTvSubst, zipTyEnv, zipCoEnv, Type.substTy, substTys, substScaledTys, substTyWith, substTyWithCoVars, substTyAddInScope, substTyUnchecked, substTysUnchecked, substScaledTyUnchecked, substThetaUnchecked, substTyWithUnchecked, substCoUnchecked, substCoWithUnchecked, substTheta, isUnliftedType, isUnboxedTupleType, isPrimitiveType, coreView, tyCoVarsOfType, tyCoVarsOfTypes, closeOverKinds, tyCoFVsOfType, tyCoFVsOfTypes, tyCoVarsOfTypeDSet, tyCoVarsOfTypesDSet, closeOverKindsDSet, tyCoVarsOfTypeList, tyCoVarsOfTypesList, noFreeVarsOfType, -------------------------------- pprKind, pprParendKind, pprSigmaType, pprType, pprParendType, pprTypeApp, pprTheta, pprParendTheta, pprThetaArrowTy, pprClassPred, pprTCvBndr, pprTCvBndrs, --------------------------------- -- argument visibility tyConVisibilities, isNextTyConArgVisible, isNextArgVisible ) where -- friends: import GHC.Prelude import GHC.Core.TyCo.Rep import GHC.Core.TyCo.Subst ( mkTvSubst, substTyWithCoVars ) import GHC.Core.TyCo.Compare import GHC.Core.TyCo.FVs import GHC.Core.TyCo.Ppr import GHC.Core.Class import GHC.Types.Var import GHC.Types.Var.Set import GHC.Core.Coercion import GHC.Core.Type as Type import GHC.Core.Predicate import GHC.Core.TyCon import {-# SOURCE #-} GHC.Tc.Types.Origin ( SkolemInfo, unkSkol , FixedRuntimeRepOrigin, FixedRuntimeRepContext ) -- others: import GHC.Types.Name as Name -- We use this to make dictionaries for type literals. -- Perhaps there's a better way to do this? import GHC.Types.Name.Env import GHC.Types.Name.Set import GHC.Builtin.Names import GHC.Builtin.Types ( coercibleClass, eqClass, heqClass, unitTyConKey , listTyCon, constraintKind ) import GHC.Types.Basic import GHC.Utils.Misc import GHC.Data.Maybe import GHC.Data.List.SetOps ( getNth, findDupsEq ) import GHC.Utils.Outputable import GHC.Utils.Panic import Data.IORef ( IORef ) import Data.List.NonEmpty( NonEmpty(..) ) import Data.List ( partition, nub, (\\) ) {- ************************************************************************ * * Types * * ************************************************************************ The type checker divides the generic Type world into the following more structured beasts: sigma ::= forall tyvars. phi -- A sigma type is a qualified type -- -- Note that even if 'tyvars' is empty, theta -- may not be: e.g. (?x::Int) => Int -- Note that 'sigma' is in prenex form: -- all the foralls are at the front. -- A 'phi' type has no foralls to the right of -- an arrow phi :: theta => rho rho ::= sigma -> rho | tau -- A 'tau' type has no quantification anywhere -- Note that the args of a type constructor must be taus tau ::= tyvar | tycon tau_1 .. tau_n | tau_1 tau_2 | tau_1 -> tau_2 -- In all cases, a (saturated) type synonym application is legal, -- provided it expands to the required form. Note [TcTyVars and TyVars in the typechecker] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The typechecker uses a lot of type variables with special properties, notably being a unification variable with a mutable reference. These use the 'TcTyVar' variant of Var.Var. Note, though, that a /bound/ type variable can (and probably should) be a TyVar. E.g forall a. a -> a Here 'a' is really just a deBruijn-number; it certainly does not have a significant TcLevel (as every TcTyVar does). So a forall-bound type variable should be TyVars; and hence a TyVar can appear free in a TcType. The type checker and constraint solver can also encounter /free/ type variables that use the 'TyVar' variant of Var.Var, for a couple of reasons: - When typechecking a class decl, say class C (a :: k) where foo :: T a -> Int We have first kind-check the header; fix k and (a:k) to be TyVars, bring 'k' and 'a' into scope, and kind check the signature for 'foo'. In doing so we call solveEqualities to solve any kind equalities in foo's signature. So the solver may see free occurrences of 'k'. See calls to tcExtendTyVarEnv for other places that ordinary TyVars are bought into scope, and hence may show up in the types and kinds generated by GHC.Tc.Gen.HsType. - The pattern-match overlap checker calls the constraint solver, long after TcTyVars have been zonked away It's convenient to simply treat these TyVars as skolem constants, which of course they are. We give them a level number of "outermost", so they behave as global constants. Specifically: * Var.tcTyVarDetails succeeds on a TyVar, returning vanillaSkolemTv, as well as on a TcTyVar. * tcIsTcTyVar returns True for both TyVar and TcTyVar variants of Var.Var. The "tc" prefix means "a type variable that can be encountered by the typechecker". This is a bit of a change from an earlier era when we remorselessly insisted on real TcTyVars in the type checker. But that seems unnecessary (for skolems, TyVars are fine) and it's now very hard to guarantee, with the advent of kind equalities. Note [Coercion variables in free variable lists] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ There are several places in the GHC codebase where functions like tyCoVarsOfType, tyCoVarsOfCt, et al. are used to compute the free type variables of a type. The "Co" part of these functions' names shouldn't be dismissed, as it is entirely possible that they will include coercion variables in addition to type variables! As a result, there are some places in GHC.Tc.Utils.TcType where we must take care to check that a variable is a _type_ variable (using isTyVar) before calling tcTyVarDetails--a partial function that is not defined for coercion variables--on the variable. Failing to do so led to GHC #12785. -} -- See Note [TcTyVars and TyVars in the typechecker] type TcCoVar = CoVar -- Used only during type inference type TcType = Type -- A TcType can have mutable type variables type TcTyCoVar = Var -- Either a TcTyVar or a CoVar -- | A type which has a syntactically fixed RuntimeRep as per -- Note [Fixed RuntimeRep] in GHC.Tc.Utils.Concrete. type TcTypeFRR = TcType -- TODO: consider making this a newtype. type TcTyVarBinder = TyVarBinder type TcInvisTVBinder = InvisTVBinder type TcReqTVBinder = ReqTVBinder -- See Note [TcTyCon, MonoTcTyCon, and PolyTcTyCon] in GHC.Tc.TyCl type TcTyCon = TyCon type MonoTcTyCon = TcTyCon type PolyTcTyCon = TcTyCon type TcTyConBinder = TyConBinder -- With skolem TcTyVars -- These types do not have boxy type variables in them type TcPredType = PredType type TcThetaType = ThetaType type TcSigmaType = TcType -- | A 'TcSigmaTypeFRR' is a 'TcSigmaType' which has a syntactically -- fixed 'RuntimeRep' in the sense of Note [Fixed RuntimeRep] -- in GHC.Tc.Utils.Concrete. -- -- In particular, this means that: -- -- - 'GHC.Types.RepType.typePrimRep' does not panic, -- - 'GHC.Core.typeLevity_maybe' does not return 'Nothing'. -- -- This property is important in functions such as 'matchExpectedFunTys', where -- we want to provide argument types which have a known runtime representation. -- See Note [Return arguments with a fixed RuntimeRep. type TcSigmaTypeFRR = TcSigmaType -- TODO: consider making this a newtype. type TcRhoType = TcType -- Note [TcRhoType] type TcTauType = TcType type TcKind = Kind type TcTyVarSet = TyVarSet type TcTyCoVarSet = TyCoVarSet type TcDTyVarSet = DTyVarSet type TcDTyCoVarSet = DTyCoVarSet {- ********************************************************************* * * ExpType: an "expected type" in the type checker * * ********************************************************************* -} -- | An expected type to check against during type-checking. -- See Note [ExpType] in "GHC.Tc.Utils.TcMType", where you'll also find manipulators. data ExpType = Check TcType | Infer !InferResult data InferResult = IR { ir_uniq :: Unique -- ^ This 'Unique' is for debugging only , ir_lvl :: TcLevel -- ^ See Note [TcLevel of ExpType] in GHC.Tc.Utils.TcMType , ir_frr :: Maybe FixedRuntimeRepContext -- ^ See Note [FixedRuntimeRep context in ExpType] in GHC.Tc.Utils.TcMType , ir_ref :: IORef (Maybe TcType) } -- ^ The type that fills in this hole should be a @Type@, -- that is, its kind should be @TYPE rr@ for some @rr :: RuntimeRep@. -- -- Additionally, if the 'ir_frr' field is @Just frr_orig@ then -- @rr@ must be concrete, in the sense of Note [Concrete types] -- in GHC.Tc.Utils.Concrete. type ExpSigmaType = ExpType -- | An 'ExpType' which has a fixed RuntimeRep. -- -- For a 'Check' 'ExpType', the stored 'TcType' must have -- a fixed RuntimeRep. For an 'Infer' 'ExpType', the 'ir_frr' -- field must be of the form @Just frr_orig@. type ExpTypeFRR = ExpType -- | Like 'TcSigmaTypeFRR', but for an expected type. -- -- See 'ExpTypeFRR'. type ExpSigmaTypeFRR = ExpTypeFRR -- TODO: consider making this a newtype. type ExpRhoType = ExpType -- Invariant: if -XDeepSubsumption is on, -- and we are checking (i.e. the ExpRhoType is (Check rho)), -- then the `rho` is deeply skolemised -- | Like 'ExpType', but on kind level type ExpKind = ExpType instance Outputable ExpType where ppr (Check ty) = text "Check" <> braces (ppr ty) ppr (Infer ir) = ppr ir instance Outputable InferResult where ppr (IR { ir_uniq = u, ir_lvl = lvl, ir_frr = mb_frr }) = text "Infer" <> mb_frr_text <> braces (ppr u <> comma <> ppr lvl) where mb_frr_text = case mb_frr of Just _ -> text "FRR" Nothing -> empty -- | Make an 'ExpType' suitable for checking. mkCheckExpType :: TcType -> ExpType mkCheckExpType = Check -- | Returns the expected type when in checking mode. checkingExpType_maybe :: ExpType -> Maybe TcType checkingExpType_maybe (Check ty) = Just ty checkingExpType_maybe (Infer {}) = Nothing -- | Returns the expected type when in checking mode. -- Panics if in inference mode. checkingExpType :: ExpType -> TcType checkingExpType (Check ty) = ty checkingExpType et@(Infer {}) = pprPanic "checkingExpType" (ppr et) -- Expected type of a pattern in a lambda or a function left-hand side. data ExpPatType = ExpFunPatTy (Scaled ExpSigmaTypeFRR) -- the type A of a function A -> B | ExpForAllPatTy ForAllTyBinder -- the binder (a::A) of forall (a::A) -> B or forall (a :: A). B mkCheckExpFunPatTy :: Scaled TcType -> ExpPatType mkCheckExpFunPatTy (Scaled mult ty) = ExpFunPatTy (Scaled mult (mkCheckExpType ty)) mkInvisExpPatType :: InvisTyBinder -> ExpPatType mkInvisExpPatType (Bndr tv spec) = ExpForAllPatTy (Bndr tv (Invisible spec)) isVisibleExpPatType :: ExpPatType -> Bool isVisibleExpPatType (ExpForAllPatTy (Bndr _ vis)) = isVisibleForAllTyFlag vis isVisibleExpPatType (ExpFunPatTy {}) = True isExpFunPatType :: ExpPatType -> Bool isExpFunPatType ExpFunPatTy{} = True isExpFunPatType ExpForAllPatTy{} = False instance Outputable ExpPatType where ppr (ExpFunPatTy t) = ppr t ppr (ExpForAllPatTy tv) = text "forall" <+> ppr tv {- ********************************************************************* * * SyntaxOpType * * ********************************************************************* -} -- | What to expect for an argument to a rebindable-syntax operator. -- Quite like 'Type', but allows for holes to be filled in by tcSyntaxOp. -- The callback called from tcSyntaxOp gets a list of types; the meaning -- of these types is determined by a left-to-right depth-first traversal -- of the 'SyntaxOpType' tree. So if you pass in -- -- > SynAny `SynFun` (SynList `SynFun` SynType Int) `SynFun` SynAny -- -- you'll get three types back: one for the first 'SynAny', the /element/ -- type of the list, and one for the last 'SynAny'. You don't get anything -- for the 'SynType', because you've said positively that it should be an -- Int, and so it shall be. -- -- You'll also get three multiplicities back: one for each function arrow. See -- also Note [Linear types] in Multiplicity. -- -- This is defined here to avoid defining it in "GHC.Tc.Gen.Expr" boot file. data SyntaxOpType = SynAny -- ^ Any type | SynRho -- ^ A rho type, skolemised or instantiated as appropriate | SynList -- ^ A list type. You get back the element type of the list | SynFun SyntaxOpType SyntaxOpType -- ^ A function. | SynType ExpType -- ^ A known type. infixr 0 `SynFun` -- | Like 'SynType' but accepts a regular TcType synKnownType :: TcType -> SyntaxOpType synKnownType = SynType . mkCheckExpType -- | Like 'mkFunTys' but for 'SyntaxOpType' mkSynFunTys :: [SyntaxOpType] -> ExpType -> SyntaxOpType mkSynFunTys arg_tys res_ty = foldr SynFun (SynType res_ty) arg_tys {- Note [TcRhoType] ~~~~~~~~~~~~~~~~ A TcRhoType has no foralls or contexts at the top NO forall a. a -> Int NO Eq a => a -> a YES a -> a YES (forall a. a->a) -> Int YES Int -> forall a. a -> Int ************************************************************************ * * TyVarDetails, MetaDetails, MetaInfo * * ************************************************************************ TyVarDetails gives extra info about type variables, used during type checking. It's attached to mutable type variables only. It's knot-tied back to "GHC.Types.Var". There is no reason in principle why "GHC.Types.Var" shouldn't actually have the definition, but it "belongs" here. Note [TyVars and TcTyVars during type checking] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The Var type has constructors TyVar and TcTyVar. They are used as follows: * TcTyVar: used /only/ during type checking. Should never appear afterwards. May contain a mutable field, in the MetaTv case. * TyVar: is never seen by the constraint solver, except locally inside a type like (forall a. [a] ->[a]), where 'a' is a TyVar. We instantiate these with TcTyVars before exposing the type to the constraint solver. I have swithered about the latter invariant, excluding TyVars from the constraint solver. It's not strictly essential, and indeed (historically but still there) Var.tcTyVarDetails returns vanillaSkolemTv for a TyVar. But ultimately I want to separate Type from TcType, and in that case we would need to enforce the separation. Note [Keeping SkolemInfo inside a SkolemTv] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A SkolemTv contains a SkolemInfo, which describes the binding side of that TcTyVar. This is very convenient to a consumer of a SkolemTv, but it is a bit awkward for the /producer/. Why? Because sometimes we can't produce the SkolemInfo until we have the TcTyVars! Example: in `GHC.Tc.Utils.Unify.tcSkolemise` we create SkolemTvs whose `SkolemInfo` is `SigSkol`, whose arguments in turn mention the newly-created SkolemTvs. So we a RecrusiveDo idiom, like this: rec { (wrap, tv_prs, given, rho_ty) <- skolemise skol_info expected_ty ; skol_info <- mkSkolemInfo (SigSkol ctxt expected_ty tv_prs) } Note that the `skol_info` can't be created until we have the `tv_prs` returned by `skolemise`. Note also that `skolemise` had better be lazy in `skol_info`. All uses of this idiom should be flagged with a reference to this Note. -} -- A TyVarDetails is inside a TyVar -- See Note [TyVars and TcTyVars during type checking] data TcTyVarDetails = SkolemTv -- A skolem SkolemInfo -- See Note [Keeping SkolemInfo inside a SkolemTv] TcLevel -- Level of the implication that binds it -- See GHC.Tc.Utils.Unify Note [Deeper level on the left] for -- how this level number is used Bool -- True <=> this skolem type variable can be overlapped -- when looking up instances -- See Note [Binding when looking up instances] in GHC.Core.InstEnv | RuntimeUnk -- Stands for an as-yet-unknown type in the GHCi -- interactive context | MetaTv { mtv_info :: MetaInfo , mtv_ref :: IORef MetaDetails , mtv_tclvl :: TcLevel } -- See Note [TcLevel invariants] vanillaSkolemTvUnk :: HasDebugCallStack => TcTyVarDetails vanillaSkolemTvUnk = SkolemTv unkSkol topTcLevel False instance Outputable TcTyVarDetails where ppr = pprTcTyVarDetails pprTcTyVarDetails :: TcTyVarDetails -> SDoc -- For debugging pprTcTyVarDetails (RuntimeUnk {}) = text "rt" pprTcTyVarDetails (SkolemTv _sk lvl True) = text "ssk" <> colon <> ppr lvl pprTcTyVarDetails (SkolemTv _sk lvl False) = text "sk" <> colon <> ppr lvl pprTcTyVarDetails (MetaTv { mtv_info = info, mtv_tclvl = tclvl }) = ppr info <> colon <> ppr tclvl ----------------------------- data MetaDetails = Flexi -- Flexi type variables unify to become Indirects | Indirect TcType -- | What restrictions are on this metavariable around unification? -- These are checked in GHC.Tc.Utils.Unify.checkTopShape data MetaInfo = TauTv -- ^ This MetaTv is an ordinary unification variable -- A TauTv is always filled in with a tau-type, which -- never contains any ForAlls. | TyVarTv -- ^ A variant of TauTv, except that it should not be -- unified with a type, only with a type variable -- See Note [TyVarTv] in GHC.Tc.Utils.TcMType | RuntimeUnkTv -- ^ A unification variable used in the GHCi debugger. -- It /is/ allowed to unify with a polytype, unlike TauTv | CycleBreakerTv -- Used to fix occurs-check problems in Givens -- See Note [Type equality cycles] in -- GHC.Tc.Solver.Equality | ConcreteTv ConcreteTvOrigin -- ^ A unification variable that can only be unified -- with a concrete type, in the sense of -- Note [Concrete types] in GHC.Tc.Utils.Concrete. -- See Note [ConcreteTv] in GHC.Tc.Utils.Concrete. -- See also Note [The Concrete mechanism] in GHC.Tc.Utils.Concrete -- for an overview of how this works in context. instance Outputable MetaDetails where ppr Flexi = text "Flexi" ppr (Indirect ty) = text "Indirect" <+> ppr ty instance Outputable MetaInfo where ppr TauTv = text "tau" ppr TyVarTv = text "tyv" ppr RuntimeUnkTv = text "rutv" ppr CycleBreakerTv = text "cbv" ppr (ConcreteTv {}) = text "conc" -- | What caused us to create a 'ConcreteTv' metavariable? -- See Note [ConcreteTv] in GHC.Tc.Utils.Concrete. data ConcreteTvOrigin -- | A 'ConcreteTv' used to enforce the representation-polymorphism invariants. -- -- See 'FixedRuntimeRepOrigin' for more information. = ConcreteFRR FixedRuntimeRepOrigin -- | A mapping from skolem type variable 'Name' to concreteness information, -- -- See Note [Representation-polymorphism checking built-ins] in GHC.Tc.Utils.Concrete. type ConcreteTyVars = NameEnv ConcreteTvOrigin -- | The 'Id' has no outer forall'd type variables which must be instantiated -- to concrete types. noConcreteTyVars :: ConcreteTyVars noConcreteTyVars = emptyNameEnv {- ********************************************************************* * * Untouchable type variables * * ********************************************************************* -} data TcLevel = TcLevel {-# UNPACK #-} !Int | QLInstVar -- See Note [TcLevel invariants] for what this Int is -- See also Note [TcLevel assignment] -- See also Note [The QLInstVar TcLevel] {- Note [TcLevel invariants] ~~~~~~~~~~~~~~~~~~~~~~~~~ * Each unification variable (MetaTv) and skolem (SkolemTv) and each Implication has a level number (of type TcLevel) * INVARIANT (KindInv) Given a type variable (tv::ki) at at level L, the free vars of `ki` all have level <= L * INVARIANTS. In a tree of Implications, (ImplicInv) The level number (ic_tclvl) of an Implication is STRICTLY GREATER THAN that of its parent (SkolInv) The level number of the skolems (ic_skols) of an Implication is equal to the level of the implication itself (ic_tclvl) (GivenInv) The level number of a unification variable appearing in the 'ic_given' of an implication I should be STRICTLY LESS THAN the ic_tclvl of I See Note [GivenInv] (WantedInv) The level number of a unification variable appearing in the 'ic_wanted' of an implication I should be LESS THAN OR EQUAL TO the ic_tclvl of I See Note [WantedInv] The level of a MetaTyVar also governs its untouchability. See Note [Unification preconditions] in GHC.Tc.Utils.Unify. -- See also Note [The QLInstVar TcLevel] Note [TcLevel assignment] ~~~~~~~~~~~~~~~~~~~~~~~~~ We arrange the TcLevels like this 0 Top level 1 First-level implication constraints 2 Second-level implication constraints ...etc... QLInstVar The level for QuickLook instantiation variables See Note [The QLInstVar TcLevel] Note [The QLInstVar TcLevel] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ QuickLook instantiation variables are identified by having a TcLevel of QLInstVar. See Note [Quick Look overview] in GHC.Tc.Gen.App. The QLInstVar level behaves like infinity: it is greater than any other TcLevel. See `strictlyDeeperThan` and friends in this module. That ensures that we never unify an ordinary unification variable with a QL instantiation variable, e.g. alpha[tau:3] := Maybe beta[tau:qlinstvar] (This is an immediate consequence of our general rule that we never unify a variable with a type mentioning deeper variables; the skolem escape check.) QL instantation variables are eventually turned into ordinary unificaiton variables; see (QL3) in Note [Quick Look overview]. Note [GivenInv] ~~~~~~~~~~~~~~~ Invariant (GivenInv) is not essential, but it is easy to guarantee, and it is a useful extra piece of structure. It ensures that the Givens of an implication don't change because of unifications /at the same level/ caused by Wanteds. (Wanteds can also cause unifications at an outer level, but that will iterate the entire implication; see GHC.Tc.Solver.Monad Note [The Unification Level Flag].) Givens can certainly contain meta-tyvars from /outer/ levels. E.g. data T a where MkT :: Eq a => a -> MkT a f x = case x of MkT y -> y && True Then we'll infer (x :: T alpha[1]). The Givens from the implication arising from the pattern match will look like this: forall[2] . Eq alpha[1] => (alpha[1] ~ Bool) But if we unify alpha (which in this case we will), we'll iterate the entire implication via Note [The Unification Level Flag] in GHC.Tc.Solver.Monad. That isn't true of unifications at the /ambient/ level. It would be entirely possible to weaken (GivenInv), to LESS THAN OR EQUAL TO, but we'd need to think carefully about - kick-out for Givens - GHC.Tc.Solver.Monad.isOuterTyVar But in fact (GivenInv) is automatically true, so we're adhering to it for now. See #18929. * If a tyvar tv has level n, then the levels of all variables free in tv's kind are <= n. Consequence: if tv is untouchable, so are all variables in tv's kind. Note [WantedInv] ~~~~~~~~~~~~~~~~ Why is WantedInv important? Consider this implication, where the constraint (C alpha[3]) disobeys WantedInv: forall[2] a. blah => (C alpha[3]) (forall[3] b. alpha[3] ~ b) We can unify alpha:=b in the inner implication, because 'alpha' is touchable; but then 'b' has escaped its scope into the outer implication. -} maxTcLevel :: TcLevel -> TcLevel -> TcLevel maxTcLevel (TcLevel a) (TcLevel b) | a > b = TcLevel a | otherwise = TcLevel b maxTcLevel _ _ = QLInstVar minTcLevel :: TcLevel -> TcLevel -> TcLevel minTcLevel tcla@(TcLevel a) tclb@(TcLevel b) | a < b = tcla | otherwise = tclb minTcLevel tcla@(TcLevel {}) QLInstVar = tcla minTcLevel QLInstVar tclb@(TcLevel {}) = tclb minTcLevel QLInstVar QLInstVar = QLInstVar topTcLevel :: TcLevel -- See Note [TcLevel assignment] topTcLevel = TcLevel 0 -- 0 = outermost level isTopTcLevel :: TcLevel -> Bool isTopTcLevel (TcLevel 0) = True isTopTcLevel _ = False pushTcLevel :: TcLevel -> TcLevel -- See Note [TcLevel assignment] pushTcLevel (TcLevel us) = TcLevel (us + 1) pushTcLevel QLInstVar = QLInstVar strictlyDeeperThan :: TcLevel -> TcLevel -> Bool -- See Note [The QLInstVar TcLevel] strictlyDeeperThan (TcLevel tv_tclvl) (TcLevel ctxt_tclvl) = tv_tclvl > ctxt_tclvl strictlyDeeperThan QLInstVar (TcLevel {}) = True strictlyDeeperThan _ _ = False deeperThanOrSame :: TcLevel -> TcLevel -> Bool -- See Note [The QLInstVar TcLevel] deeperThanOrSame (TcLevel tv_tclvl) (TcLevel ctxt_tclvl) = tv_tclvl >= ctxt_tclvl deeperThanOrSame (TcLevel {}) QLInstVar = False deeperThanOrSame QLInstVar _ = True sameDepthAs :: TcLevel -> TcLevel -> Bool sameDepthAs (TcLevel ctxt_tclvl) (TcLevel tv_tclvl) = ctxt_tclvl == tv_tclvl -- NB: invariant ctxt_tclvl >= tv_tclvl -- So <= would be equivalent sameDepthAs QLInstVar QLInstVar = True sameDepthAs _ _ = False checkTcLevelInvariant :: TcLevel -> TcLevel -> Bool -- Checks (WantedInv) from Note [TcLevel invariants] checkTcLevelInvariant ctxt_tclvl tv_tclvl = ctxt_tclvl `deeperThanOrSame` tv_tclvl -- Returns topTcLevel for non-TcTyVars tcTyVarLevel :: TcTyVar -> TcLevel tcTyVarLevel tv = case tcTyVarDetails tv of MetaTv { mtv_tclvl = tv_lvl } -> tv_lvl SkolemTv _ tv_lvl _ -> tv_lvl RuntimeUnk -> topTcLevel tcTypeLevel :: TcType -> TcLevel -- Max level of any free var of the type tcTypeLevel ty = nonDetStrictFoldDVarSet add topTcLevel (tyCoVarsOfTypeDSet ty) -- It's safe to use a non-deterministic fold because `maxTcLevel` is -- commutative. where add v lvl | isTcTyVar v = lvl `maxTcLevel` tcTyVarLevel v | otherwise = lvl instance Outputable TcLevel where ppr (TcLevel n) = ppr n ppr QLInstVar = text "qlinst" {- ********************************************************************* * * Finding type family instances * * ************************************************************************ -} -- | Finds outermost type-family applications occurring in a type, -- after expanding synonyms. In the list (F, tys) that is returned -- we guarantee that tys matches F's arity. For example, given -- type family F a :: * -> * (arity 1) -- calling tcTyFamInsts on (Maybe (F Int Bool) will return -- (F, [Int]), not (F, [Int,Bool]) -- -- This is important for its use in deciding termination of type -- instances (see #11581). E.g. -- type instance G [Int] = ...(F Int \)... -- we don't need to take \ into account when asking if -- the calls on the RHS are smaller than the LHS tcTyFamInsts :: Type -> [(TyCon, [Type])] tcTyFamInsts = map (\(_,b,c) -> (b,c)) . tcTyFamInstsAndVis -- | Like 'tcTyFamInsts', except that the output records whether the -- type family and its arguments occur as an /invisible/ argument in -- some type application. This information is useful because it helps GHC know -- when to turn on @-fprint-explicit-kinds@ during error reporting so that -- users can actually see the type family being mentioned. -- -- As an example, consider: -- -- @ -- class C a -- data T (a :: k) -- type family F a :: k -- instance C (T @(F Int) (F Bool)) -- @ -- -- There are two occurrences of the type family `F` in that `C` instance, so -- @'tcTyFamInstsAndVis' (C (T \@(F Int) (F Bool)))@ will return: -- -- @ -- [ ('True', F, [Int]) -- , ('False', F, [Bool]) ] -- @ -- -- @F Int@ is paired with 'True' since it appears as an /invisible/ argument -- to @C@, whereas @F Bool@ is paired with 'False' since it appears an a -- /visible/ argument to @C@. -- -- See also Note [Showing invisible bits of types in error messages] -- in "GHC.Tc.Errors.Ppr". tcTyFamInstsAndVis :: Type -> [(Bool, TyCon, [Type])] tcTyFamInstsAndVis = tcTyFamInstsAndVisX False tcTyFamInstsAndVisX :: Bool -- ^ Is this an invisible argument to some type application? -> Type -> [(Bool, TyCon, [Type])] tcTyFamInstsAndVisX = go where go is_invis_arg ty | Just exp_ty <- coreView ty = go is_invis_arg exp_ty go _ (TyVarTy _) = [] go is_invis_arg (TyConApp tc tys) | isTypeFamilyTyCon tc = [(is_invis_arg, tc, take (tyConArity tc) tys)] | otherwise = tcTyConAppTyFamInstsAndVisX is_invis_arg tc tys go _ (LitTy {}) = [] go is_invis_arg (ForAllTy bndr ty) = go is_invis_arg (binderType bndr) ++ go is_invis_arg ty go is_invis_arg (FunTy _ w ty1 ty2) = go is_invis_arg w ++ go is_invis_arg ty1 ++ go is_invis_arg ty2 go is_invis_arg ty@(AppTy _ _) = let (ty_head, ty_args) = splitAppTys ty ty_arg_flags = appTyForAllTyFlags ty_head ty_args in go is_invis_arg ty_head ++ concat (zipWith (\flag -> go (isInvisibleForAllTyFlag flag)) ty_arg_flags ty_args) go is_invis_arg (CastTy ty _) = go is_invis_arg ty go _ (CoercionTy _) = [] -- don't count tyfams in coercions, -- as they never get normalized, -- anyway -- | In an application of a 'TyCon' to some arguments, find the outermost -- occurrences of type family applications within the arguments. This function -- will not consider the 'TyCon' itself when checking for type family -- applications. -- -- See 'tcTyFamInstsAndVis' for more details on how this works (as this -- function is called inside of 'tcTyFamInstsAndVis'). tcTyConAppTyFamInstsAndVis :: TyCon -> [Type] -> [(Bool, TyCon, [Type])] tcTyConAppTyFamInstsAndVis = tcTyConAppTyFamInstsAndVisX False tcTyConAppTyFamInstsAndVisX :: Bool -- ^ Is this an invisible argument to some type application? -> TyCon -> [Type] -> [(Bool, TyCon, [Type])] tcTyConAppTyFamInstsAndVisX is_invis_arg tc tys = let (invis_tys, vis_tys) = partitionInvisibleTypes tc tys in concat $ map (tcTyFamInstsAndVisX True) invis_tys ++ map (tcTyFamInstsAndVisX is_invis_arg) vis_tys isTyFamFree :: Type -> Bool -- ^ Check that a type does not contain any type family applications. isTyFamFree = null . tcTyFamInsts type UnderFam = Bool -- True <=> we are in the argument of a type family application any_rewritable :: EqRel -- Ambient role -> (UnderFam -> EqRel -> TcTyVar -> Bool) -- Check tyvar -> (UnderFam -> EqRel -> TyCon -> [TcType] -> Bool) -- Check type family application -> TcType -> Bool -- Checks every tyvar and tyconapp (not including FunTys) within a type, -- ORing the results of the predicates above together -- Do not look inside casts and coercions -- See Note [anyRewritableTyVar must be role-aware] -- -- This looks like it should use foldTyCo, but that function is -- role-agnostic, and this one must be role-aware. We could make -- foldTyCon role-aware, but that may slow down more common usages. -- -- See Note [Rewritable] in GHC.Tc.Solver.InertSet for a specification for this function. {-# INLINE any_rewritable #-} -- this allows specialization of predicates any_rewritable role tv_pred tc_pred ty = go False emptyVarSet role ty where go_tv uf bvs rl tv | tv `elemVarSet` bvs = False | otherwise = tv_pred uf rl tv go :: UnderFam -> VarSet -> EqRel -> TcType -> Bool go under_fam bvs rl (TyConApp tc tys) -- Expand synonyms, unless (a) we are at Nominal role and (b) the synonym -- is type-family-free; then it suffices just to look at the args | isTypeSynonymTyCon tc , case rl of { NomEq -> not (isFamFreeTyCon tc); ReprEq -> True } , Just ty' <- expandSynTyConApp_maybe tc tys = go under_fam bvs rl ty' -- Check if we are going under a type family application | case rl of NomEq -> isTypeFamilyTyCon tc ReprEq -> isFamilyTyCon tc = if | tc_pred under_fam rl tc tys -> True | otherwise -> go_fam under_fam (tyConArity tc) bvs tys | otherwise = go_tc under_fam bvs rl tc tys go uf bvs rl (TyVarTy tv) = go_tv uf bvs rl tv go _ _ _ (LitTy {}) = False go uf bvs rl (AppTy fun arg) = go uf bvs rl fun || go uf bvs NomEq arg go uf bvs rl (FunTy _ w arg res) = go uf bvs NomEq arg_rep || go uf bvs NomEq res_rep || go uf bvs rl arg || go uf bvs rl res || go uf bvs NomEq w where arg_rep = getRuntimeRep arg -- forgetting these causes #17024 res_rep = getRuntimeRep res go uf bvs rl (ForAllTy tv ty) = go uf (bvs `extendVarSet` binderVar tv) rl ty go uf bvs rl (CastTy ty _) = go uf bvs rl ty go _ _ _ (CoercionTy _) = False go_tc :: UnderFam -> VarSet -> EqRel -> TyCon -> [TcType] -> Bool go_tc uf bvs NomEq _ tys = any (go uf bvs NomEq) tys go_tc uf bvs ReprEq tc tys = any2 (go_arg uf bvs) tys (tyConRoleListRepresentational tc) go_arg uf bvs ty Nominal = go uf bvs NomEq ty go_arg uf bvs ty Representational = go uf bvs ReprEq ty go_arg _ _ _ Phantom = False -- We never rewrite with phantoms -- For a type-family or data-family application (F t1 .. tn), all arguments -- have Nominal role (whether in F's arity or, if over-saturated, beyond it) -- Switch on under_fam for arguments <= arity go_fam uf 0 bvs tys = any (go uf bvs NomEq) tys -- Like AppTy go_fam _ _ _ [] = False go_fam uf n bvs (ty:tys) = go True bvs NomEq ty || go_fam uf (n-1) bvs tys -- True <=> switch on under_fam anyRewritableTyVar :: EqRel -- Ambient role -> (UnderFam -> EqRel -> TcTyVar -> Bool) -- check tyvar -> TcType -> Bool -- See Note [Rewritable] in GHC.Tc.Solver.InertSet for a specification for this function. anyRewritableTyVar role check_tv = any_rewritable role check_tv (\ _ _ _ _ -> False) -- No special check for tyconapps -- (this False is ORed with other results, -- so it really means "do nothing special"; -- the arguments are still inspected) anyRewritableTyFamApp :: EqRel -- Ambient role -> (UnderFam -> EqRel -> TyCon -> [TcType] -> Bool) -- Check a type-family application -> TcType -> Bool -- always ignores casts & coercions -- See Note [Rewritable] in GHC.Tc.Solver.InertSet for a specification for this function. anyRewritableTyFamApp role check_tyconapp = any_rewritable role (\ _ _ _ -> False) check_tyconapp {- Note [anyRewritableTyVar must be role-aware] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ anyRewritableTyVar is used during kick-out from the inert set, to decide if, given a new equality (a ~ ty), we should kick out a constraint C. Rather than gather free variables and see if 'a' is among them, we instead pass in a predicate; this is just efficiency. Moreover, consider work item: [G] a ~R f b inert item: [G] b ~R f a We use anyRewritableTyVar to decide whether to kick out the inert item, on the grounds that the work item might rewrite it. Well, 'a' is certainly free in [G] b ~R f a. But because the role of a type variable ('f' in this case) is nominal, the work item can't actually rewrite the inert item. Moreover, if we were to kick out the inert item the exact same situation would re-occur and we end up with an infinite loop in which each kicks out the other (#14363). -} {- ********************************************************************* * * The "exact" free variables of a type * * ********************************************************************* -} {- Note [Silly type synonym] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider type T a = Int What are the free tyvars of (T x)? Empty, of course! exactTyCoVarsOfType is used by the type checker to figure out exactly which type variables are mentioned in a type. It only matters occasionally -- see the calls to exactTyCoVarsOfType. We place this function here in GHC.Tc.Utils.TcType, not in GHC.Core.TyCo.FVs, because we want to "see" coreView (efficiency issue only). -} exactTyCoVarsOfType :: Type -> TyCoVarSet exactTyCoVarsOfTypes :: [Type] -> TyCoVarSet -- Find the free type variables (of any kind) -- but *expand* type synonyms. See Note [Silly type synonym] above. exactTyCoVarsOfType ty = runTyCoVars (exact_ty ty) exactTyCoVarsOfTypes tys = runTyCoVars (exact_tys tys) exact_ty :: Type -> Endo TyCoVarSet exact_tys :: [Type] -> Endo TyCoVarSet (exact_ty, exact_tys, _, _) = foldTyCo exactTcvFolder emptyVarSet exactTcvFolder :: TyCoFolder TyCoVarSet (Endo TyCoVarSet) exactTcvFolder = deepTcvFolder { tcf_view = coreView } -- This is the key line {- ************************************************************************ * * Predicates * * ************************************************************************ -} tcIsTcTyVar :: TcTyVar -> Bool -- See Note [TcTyVars and TyVars in the typechecker] tcIsTcTyVar tv = isTyVar tv isPromotableMetaTyVar :: TcTyVar -> Bool -- True is this is a meta-tyvar that can be -- promoted to an outer level isPromotableMetaTyVar tv | isTyVar tv -- See Note [Coercion variables in free variable lists] , MetaTv { mtv_info = info } <- tcTyVarDetails tv = isTouchableInfo info -- Can't promote cycle breakers | otherwise = False isTouchableMetaTyVar :: TcLevel -> TcTyVar -> Bool isTouchableMetaTyVar ctxt_tclvl tv | isTyVar tv -- See Note [Coercion variables in free variable lists] , MetaTv { mtv_tclvl = tv_tclvl, mtv_info = info } <- tcTyVarDetails tv , isTouchableInfo info = assertPpr (checkTcLevelInvariant ctxt_tclvl tv_tclvl) (ppr tv $$ ppr tv_tclvl $$ ppr ctxt_tclvl) $ tv_tclvl `sameDepthAs` ctxt_tclvl | otherwise = False isImmutableTyVar :: TyVar -> Bool isImmutableTyVar tv = isSkolemTyVar tv isTyConableTyVar, isSkolemTyVar, isOverlappableTyVar, isMetaTyVar, isAmbiguousTyVar, isCycleBreakerTyVar :: TcTyVar -> Bool isTyConableTyVar tv -- True of a meta-type variable that can be filled in -- with a type constructor application; in particular, -- not a TyVarTv | isTyVar tv -- See Note [Coercion variables in free variable lists] = case tcTyVarDetails tv of MetaTv { mtv_info = TyVarTv } -> False _ -> True | otherwise = True isSkolemTyVar tv = assertPpr (tcIsTcTyVar tv) (ppr tv) $ case tcTyVarDetails tv of MetaTv {} -> False _other -> True skolemSkolInfo :: TcTyVar -> SkolemInfo skolemSkolInfo tv = assert (isSkolemTyVar tv) $ case tcTyVarDetails tv of SkolemTv skol_info _ _ -> skol_info RuntimeUnk -> panic "RuntimeUnk" MetaTv {} -> panic "skolemSkolInfo" isOverlappableTyVar tv | isTyVar tv -- See Note [Coercion variables in free variable lists] = case tcTyVarDetails tv of SkolemTv _ _ overlappable -> overlappable _ -> False | otherwise = False isMetaTyVar tv | isTyVar tv -- See Note [Coercion variables in free variable lists] = case tcTyVarDetails tv of MetaTv {} -> True _ -> False | otherwise = False -- isAmbiguousTyVar is used only when reporting type errors -- It picks out variables that are unbound, namely meta -- type variables and the RuntimeUnk variables created by -- GHC.Runtime.Heap.Inspect.zonkRTTIType. These are "ambiguous" in -- the sense that they stand for an as-yet-unknown type isAmbiguousTyVar tv | isTyVar tv -- See Note [Coercion variables in free variable lists] = case tcTyVarDetails tv of MetaTv {} -> True RuntimeUnk {} -> True _ -> False | otherwise = False isQLInstTyVar :: TcTyVar -> Bool isQLInstTyVar tv = case tcTyVarDetails tv of MetaTv { mtv_tclvl = QLInstVar } -> True _ -> False isRuntimeUnkTyVar :: TcTyVar -> Bool isRuntimeUnkTyVar tv = case tcTyVarDetails tv of MetaTv { mtv_info = RuntimeUnkTv } -> True _ -> False isCycleBreakerTyVar tv | isTyVar tv -- See Note [Coercion variables in free variable lists] , MetaTv { mtv_info = CycleBreakerTv } <- tcTyVarDetails tv = True | otherwise = False -- | Is this type variable a concrete type variable, i.e. -- it is a metavariable with 'ConcreteTv' 'MetaInfo'? -- -- Returns the 'ConcreteTvOrigin' stored in the type variable -- if so, or 'Nothing' otherwise. isConcreteTyVar_maybe :: TcTyVar -> Maybe ConcreteTvOrigin isConcreteTyVar_maybe tv | isTcTyVar tv , MetaTv { mtv_info = ConcreteTv conc_orig } <- tcTyVarDetails tv = Just conc_orig | otherwise = Nothing isConcreteInfo :: MetaInfo -> Bool isConcreteInfo (ConcreteTv {}) = True isConcreteInfo _ = False -- | Is this type variable a concrete type variable, i.e. -- it is a metavariable with 'ConcreteTv' 'MetaInfo'? isConcreteTyVar :: TcTyVar -> Bool isConcreteTyVar = isJust . isConcreteTyVar_maybe -- | Is this type concrete type variable, i.e. -- a metavariable with 'ConcreteTv' 'MetaInfo'? isConcreteTyVarTy :: TcType -> Bool isConcreteTyVarTy = isJust . isConcreteTyVarTy_maybe -- | Is this type a concrete type variable? If so, return -- the associated 'TcTyVar' and 'ConcreteTvOrigin'. isConcreteTyVarTy_maybe :: TcType -> Maybe (TcTyVar, ConcreteTvOrigin) isConcreteTyVarTy_maybe (TyVarTy tv) = (tv, ) <$> isConcreteTyVar_maybe tv isConcreteTyVarTy_maybe _ = Nothing isMetaTyVarTy :: TcType -> Bool isMetaTyVarTy (TyVarTy tv) = isMetaTyVar tv isMetaTyVarTy _ = False metaTyVarInfo :: TcTyVar -> MetaInfo metaTyVarInfo tv = case tcTyVarDetails tv of MetaTv { mtv_info = info } -> info _ -> pprPanic "metaTyVarInfo" (ppr tv) isTouchableInfo :: MetaInfo -> Bool isTouchableInfo info | CycleBreakerTv <- info = False | otherwise = True metaTyVarTcLevel :: TcTyVar -> TcLevel metaTyVarTcLevel tv = case tcTyVarDetails tv of MetaTv { mtv_tclvl = tclvl } -> tclvl _ -> pprPanic "metaTyVarTcLevel" (ppr tv) metaTyVarTcLevel_maybe :: TcTyVar -> Maybe TcLevel metaTyVarTcLevel_maybe tv = case tcTyVarDetails tv of MetaTv { mtv_tclvl = tclvl } -> Just tclvl _ -> Nothing metaTyVarRef :: TyVar -> IORef MetaDetails metaTyVarRef tv = case tcTyVarDetails tv of MetaTv { mtv_ref = ref } -> ref _ -> pprPanic "metaTyVarRef" (ppr tv) setMetaTyVarTcLevel :: TcTyVar -> TcLevel -> TcTyVar setMetaTyVarTcLevel tv tclvl = case tcTyVarDetails tv of details@(MetaTv {}) -> setTcTyVarDetails tv (details { mtv_tclvl = tclvl }) _ -> pprPanic "metaTyVarTcLevel" (ppr tv) isTyVarTyVar :: Var -> Bool isTyVarTyVar tv = case tcTyVarDetails tv of MetaTv { mtv_info = TyVarTv } -> True _ -> False isFlexi, isIndirect :: MetaDetails -> Bool isFlexi Flexi = True isFlexi _ = False isIndirect (Indirect _) = True isIndirect _ = False isRuntimeUnkSkol :: TyVar -> Bool -- Called only in GHC.Tc.Errors; see Note [Runtime skolems] there isRuntimeUnkSkol x | RuntimeUnk <- tcTyVarDetails x = True | otherwise = False mkTyVarNamePairs :: [TyVar] -> [(Name,TyVar)] -- Just pair each TyVar with its own name mkTyVarNamePairs tvs = [(tyVarName tv, tv) | tv <- tvs] findDupTyVarTvs :: [(Name,TcTyVar)] -> [(Name,Name)] -- If we have [...(x1,tv)...(x2,tv)...] -- return (x1,x2) in the result list findDupTyVarTvs prs = concatMap mk_result_prs $ findDupsEq eq_snd prs where eq_snd (_,tv1) (_,tv2) = tv1 == tv2 mk_result_prs ((n1,_) :| xs) = map (\(n2,_) -> (n1,n2)) xs -- | Returns the (kind, type) variables in a type that are -- as-yet-unknown: metavariables and RuntimeUnks ambigTkvsOfTy :: TcType -> ([Var],[Var]) ambigTkvsOfTy ty = partition (`elemVarSet` dep_tkv_set) ambig_tkvs where tkvs = tyCoVarsOfTypeList ty ambig_tkvs = filter isAmbiguousTyVar tkvs dep_tkv_set = tyCoVarsOfTypes (map tyVarKind tkvs) {- ************************************************************************ * * Tau, sigma and rho * * ************************************************************************ -} -- | Make a sigma ty where all type variables are 'Inferred'. That is, -- they cannot be used with visible type application. mkInfSigmaTy :: HasDebugCallStack => [TyCoVar] -> [PredType] -> Type -> Type mkInfSigmaTy tyvars theta ty = mkSigmaTy (mkForAllTyBinders Inferred tyvars) theta ty -- | Make a sigma ty where all type variables are "specified". That is, -- they can be used with visible type application mkSpecSigmaTy :: HasDebugCallStack => [TyVar] -> [PredType] -> Type -> Type mkSpecSigmaTy tyvars preds ty = mkSigmaTy (mkForAllTyBinders Specified tyvars) preds ty mkSigmaTy :: HasDebugCallStack => [ForAllTyBinder] -> [PredType] -> Type -> Type -- Result is TypeLike mkSigmaTy bndrs theta tau = mkForAllTys bndrs (mkPhiTy theta tau) tcMkDFunSigmaTy :: [TyVar] -> ThetaType -> Type -> Type tcMkDFunSigmaTy tvs theta res_ty = mkForAllTys (mkForAllTyBinders Specified tvs) $ tcMkDFunPhiTy theta res_ty mkPhiTy :: HasDebugCallStack => [PredType] -> Type -> Type -- Result type is TypeLike mkPhiTy = mkInvisFunTys tcMkPhiTy :: HasDebugCallStack => [PredType] -> Type -> Type -- Like mkPhiTy, but with no assertion checks; it is called -- by the type checker and the result kind may not be zonked yet -- But the result kind is TypeLike tcMkPhiTy tys ty = foldr (tcMkInvisFunTy TypeLike) ty tys tcMkDFunPhiTy :: HasDebugCallStack => [PredType] -> Type -> Type -- Just like tcMkPhiTy, but result type is ConstraintLike tcMkDFunPhiTy preds res = foldr (tcMkInvisFunTy ConstraintLike) res preds --------------- getDFunTyKey :: Type -> OccName -- Get some string from a type, to be used to -- construct a dictionary function name getDFunTyKey ty | Just ty' <- coreView ty = getDFunTyKey ty' getDFunTyKey (TyVarTy tv) = getOccName tv getDFunTyKey (TyConApp tc _) = getOccName tc getDFunTyKey (LitTy x) = getDFunTyLitKey x getDFunTyKey (AppTy fun _) = getDFunTyKey fun getDFunTyKey (FunTy { ft_af = af }) = getOccName (funTyFlagTyCon af) getDFunTyKey (ForAllTy _ t) = getDFunTyKey t getDFunTyKey (CastTy ty _) = getDFunTyKey ty getDFunTyKey t@(CoercionTy _) = pprPanic "getDFunTyKey" (ppr t) getDFunTyLitKey :: TyLit -> OccName getDFunTyLitKey (NumTyLit n) = mkOccName Name.varName (show n) getDFunTyLitKey (StrTyLit n) = mkOccName Name.varName (show n) -- hm getDFunTyLitKey (CharTyLit n) = mkOccName Name.varName (show n) {- ************************************************************************ * * Expanding and splitting * * ************************************************************************ -} -- | Splits a forall type into a list of 'PiTyVarBinder's and the inner type. -- Always succeeds, even if it returns an empty list. tcSplitPiTys :: Type -> ([PiTyVarBinder], Type) tcSplitPiTys ty = assert (all isTyBinder (fst sty)) -- No CoVar binders here sty where sty = splitPiTys ty -- | Splits a type into a PiTyVarBinder and a body, if possible. tcSplitPiTy_maybe :: Type -> Maybe (PiTyVarBinder, Type) tcSplitPiTy_maybe ty = assert (isMaybeTyBinder sty) -- No CoVar binders here sty where sty = splitPiTy_maybe ty isMaybeTyBinder (Just (t,_)) = isTyBinder t isMaybeTyBinder _ = True tcSplitForAllTyVarBinder_maybe :: Type -> Maybe (TyVarBinder, Type) tcSplitForAllTyVarBinder_maybe ty | Just ty' <- coreView ty = tcSplitForAllTyVarBinder_maybe ty' tcSplitForAllTyVarBinder_maybe (ForAllTy tv ty) = assert (isTyVarBinder tv ) Just (tv, ty) tcSplitForAllTyVarBinder_maybe _ = Nothing -- | Like 'tcSplitPiTys', but splits off only named binders, -- returning just the tyvars. tcSplitForAllTyVars :: Type -> ([TyVar], Type) tcSplitForAllTyVars ty = assert (all isTyVar (fst sty)) sty where sty = splitForAllTyCoVars ty -- | Like 'tcSplitForAllTyVars', but only splits 'ForAllTy's with 'Invisible' -- type variable binders. tcSplitForAllInvisTyVars :: Type -> ([TyVar], Type) tcSplitForAllInvisTyVars ty = tcSplitSomeForAllTyVars isInvisibleForAllTyFlag ty -- | Like 'tcSplitForAllTyVars', but only splits a 'ForAllTy' if @argf_pred argf@ -- is 'True', where @argf@ is the visibility of the @ForAllTy@'s binder and -- @argf_pred@ is a predicate over visibilities provided as an argument to this -- function. tcSplitSomeForAllTyVars :: (ForAllTyFlag -> Bool) -> Type -> ([TyVar], Type) tcSplitSomeForAllTyVars argf_pred ty = split ty ty [] where split _ (ForAllTy (Bndr tv argf) ty) tvs | argf_pred argf = split ty ty (tv:tvs) split orig_ty ty tvs | Just ty' <- coreView ty = split orig_ty ty' tvs split orig_ty _ tvs = (reverse tvs, orig_ty) tcSplitForAllTyVarsReqTVBindersN :: Arity -> Type -> (Arity, [ForAllTyBinder], Type) -- Split off at most N /required/ (aka visible) binders, plus any invisible ones -- in the way, /and/ any trailing invisible ones tcSplitForAllTyVarsReqTVBindersN n_req ty = split n_req ty ty [] where split n_req _orig_ty (ForAllTy b@(Bndr _ argf) ty) bs | isVisibleForAllTyFlag argf, n_req > 0 = split (n_req - 1) ty ty (b:bs) | otherwise = split n_req ty ty (b:bs) split n_req orig_ty ty bs | Just ty' <- coreView ty = split n_req orig_ty ty' bs split n_req orig_ty _ty bs = (n_req, reverse bs, orig_ty) -- | Like 'tcSplitForAllTyVars', but only splits 'ForAllTy's with 'Required' type -- variable binders. All split tyvars are annotated with '()'. tcSplitForAllReqTVBinders :: Type -> ([TcReqTVBinder], Type) tcSplitForAllReqTVBinders ty = assert (all isTyVarBinder (fst sty) ) sty where sty = splitForAllReqTyBinders ty -- | Like 'tcSplitForAllTyVars', but only splits 'ForAllTy's with 'Invisible' type -- variable binders. All split tyvars are annotated with their 'Specificity'. tcSplitForAllInvisTVBinders :: Type -> ([TcInvisTVBinder], Type) tcSplitForAllInvisTVBinders ty = assert (all (isTyVar . binderVar) (fst sty)) sty where sty = splitForAllInvisTyBinders ty -- | Like 'tcSplitForAllTyVars', but splits off only named binders. tcSplitForAllTyVarBinders :: Type -> ([TyVarBinder], Type) tcSplitForAllTyVarBinders ty = assert (all isTyVarBinder (fst sty)) sty where sty = splitForAllForAllTyBinders ty tcSplitPredFunTy_maybe :: Type -> Maybe (PredType, Type) -- Split off the first predicate argument from a type tcSplitPredFunTy_maybe ty | Just ty' <- coreView ty = tcSplitPredFunTy_maybe ty' tcSplitPredFunTy_maybe (FunTy { ft_af = af, ft_arg = arg, ft_res = res }) | isInvisibleFunArg af = Just (arg, res) tcSplitPredFunTy_maybe _ = Nothing tcSplitPhiTy :: Type -> (ThetaType, Type) tcSplitPhiTy ty = split ty [] where split ty ts = case tcSplitPredFunTy_maybe ty of Just (pred, ty) -> split ty (pred:ts) Nothing -> (reverse ts, ty) -- | Split a sigma type into its parts. This only splits /invisible/ type -- variable binders, as these are the only forms of binder that the typechecker -- will implicitly instantiate. tcSplitSigmaTy :: Type -> ([TyVar], ThetaType, Type) tcSplitSigmaTy ty = case tcSplitForAllInvisTyVars ty of (tvs, rho) -> case tcSplitPhiTy rho of (theta, tau) -> (tvs, theta, tau) tcSplitSigmaTyBndrs :: Type -> ([TcInvisTVBinder], ThetaType, Type) tcSplitSigmaTyBndrs ty = case tcSplitForAllInvisTVBinders ty of (tvs, rho) -> case tcSplitPhiTy rho of (theta, tau) -> (tvs, theta, tau) -- | Split a sigma type into its parts, going underneath as many arrows -- and foralls as possible. See Note [tcSplitNestedSigmaTys] tcSplitNestedSigmaTys :: Type -> ([TyVar], ThetaType, Type) -- See Note [tcSplitNestedSigmaTys] -- NB: This is basically a pure version of deeplyInstantiate (from Unify) that -- doesn't compute an HsWrapper. tcSplitNestedSigmaTys ty -- If there's a forall, split it apart and try splitting the rho type -- underneath it. | (arg_tys, body_ty) <- tcSplitFunTys ty , (tvs1, theta1, rho1) <- tcSplitSigmaTy body_ty , not (null tvs1 && null theta1) = let (tvs2, theta2, rho2) = tcSplitNestedSigmaTys rho1 in (tvs1 ++ tvs2, theta1 ++ theta2, mkScaledFunTys arg_tys rho2) -- If there's no forall, we're done. | otherwise = ([], [], ty) {- Note [tcSplitNestedSigmaTys] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ tcSplitNestedSigmaTys splits out all the /nested/ foralls and constraints, including under function arrows. E.g. given this type synonym: type Traversal s t a b = forall f. Applicative f => (a -> f b) -> s -> f t then tcSplitNestedSigmaTys (forall s t a b. C s t a b => Int -> Traversal s t a b) will return ( [s,t,a,b,f] , [C s t a b, Applicative f] , Int -> (a -> f b) -> s -> f t)@. This function is used in these places: * Improving error messages in GHC.Tc.Gen.Head.addFunResCtxt * Validity checking for default methods: GHC.Tc.TyCl.checkValidClass * A couple of calls in the GHCi debugger: GHC.Runtime.Heap.Inspect In other words, just in validity checking and error messages; hence no wrappers or evidence generation. Notice that tcSplitNestedSigmaTys even looks under function arrows; doing so is the Right Thing even with simple subsumption, not just with deep subsumption. -} ----------------------- tcTyConAppTyCon :: Type -> TyCon tcTyConAppTyCon ty = case tcTyConAppTyCon_maybe ty of Just tc -> tc Nothing -> pprPanic "tcTyConAppTyCon" (pprType ty) -- | Like 'tcRepSplitTyConApp_maybe', but only returns the 'TyCon'. tcTyConAppTyCon_maybe :: Type -> Maybe TyCon tcTyConAppTyCon_maybe ty | Just ty' <- coreView ty = tcTyConAppTyCon_maybe ty' tcTyConAppTyCon_maybe (TyConApp tc _) = Just tc tcTyConAppTyCon_maybe (FunTy { ft_af = af }) = Just (funTyFlagTyCon af) tcTyConAppTyCon_maybe _ = Nothing tcTyConAppArgs :: Type -> [Type] tcTyConAppArgs ty = case tcSplitTyConApp_maybe ty of Just (_, args) -> args Nothing -> pprPanic "tcTyConAppArgs" (pprType ty) ----------------------- tcSplitFunTys :: Type -> ([Scaled Type], Type) tcSplitFunTys ty = case tcSplitFunTy_maybe ty of Nothing -> ([], ty) Just (arg,res) -> (arg:args, res') where (args,res') = tcSplitFunTys res tcSplitFunTy_maybe :: Type -> Maybe (Scaled Type, Type) -- Only splits function (->) and (-=>), not (=>) or (==>) tcSplitFunTy_maybe ty | Just ty' <- coreView ty = tcSplitFunTy_maybe ty' tcSplitFunTy_maybe (FunTy { ft_af = af, ft_mult = w, ft_arg = arg, ft_res = res }) | isVisibleFunArg af = Just (Scaled w arg, res) tcSplitFunTy_maybe _ = Nothing -- Note the isVisibleFunArg guard -- Consider (?x::Int) => Bool -- We don't want to treat this as a function type! -- A concrete example is test tc230: -- f :: () -> (?p :: ()) => () -> () -- -- g = f () () tcSplitFunTysN :: Arity -- n: Number of desired args -> TcRhoType -> Either Arity -- Number of missing arrows ([Scaled TcSigmaType],-- Arg types (always N types) TcSigmaType) -- The rest of the type -- ^ Split off exactly the specified number argument types -- Returns -- (Left m) if there are 'm' missing arrows in the type -- (Right (tys,res)) if the type looks like t1 -> ... -> tn -> res tcSplitFunTysN n ty | n == 0 = Right ([], ty) | Just (arg,res) <- tcSplitFunTy_maybe ty = case tcSplitFunTysN (n-1) res of Left m -> Left m Right (args,body) -> Right (arg:args, body) | otherwise = Left n tcSplitFunTy :: Type -> (Scaled Type, Type) tcSplitFunTy ty = expectJust "tcSplitFunTy" (tcSplitFunTy_maybe ty) tcFunArgTy :: Type -> Scaled Type tcFunArgTy ty = fst (tcSplitFunTy ty) tcFunResultTy :: Type -> Type tcFunResultTy ty = snd (tcSplitFunTy ty) -- | Strips off n *visible* arguments and returns the resulting type tcFunResultTyN :: HasDebugCallStack => Arity -> Type -> Type tcFunResultTyN n ty | Right (_, res_ty) <- tcSplitFunTysN n ty = res_ty | otherwise = pprPanic "tcFunResultTyN" (ppr n <+> ppr ty) ----------------------- tcSplitAppTy_maybe :: Type -> Maybe (Type, Type) tcSplitAppTy_maybe ty | Just ty' <- coreView ty = tcSplitAppTy_maybe ty' tcSplitAppTy_maybe ty = tcSplitAppTyNoView_maybe ty tcSplitAppTy :: Type -> (Type, Type) tcSplitAppTy ty = case tcSplitAppTy_maybe ty of Just stuff -> stuff Nothing -> pprPanic "tcSplitAppTy" (pprType ty) tcSplitAppTys :: Type -> (Type, [Type]) tcSplitAppTys ty = go ty [] where go ty args = case tcSplitAppTy_maybe ty of Just (ty', arg) -> go ty' (arg:args) Nothing -> (ty,args) ----------------------- tcIsTyVarTy :: Type -> Bool tcIsTyVarTy ty | Just ty' <- coreView ty = tcIsTyVarTy ty' tcIsTyVarTy (CastTy ty _) = tcIsTyVarTy ty -- look through casts, as -- this is only used for -- e.g., FlexibleContexts tcIsTyVarTy (TyVarTy _) = True tcIsTyVarTy _ = False ----------------------- tcSplitQuantPredTy :: Type -> ([TyVar], [Type], PredType) -- Split up the type of a quantified predicate -- forall tys, theta => head -- NB splitFunTys, not tcSplitFunTys; -- the latter specifically stops at PredTy arguments, -- and we don't want to do that here tcSplitQuantPredTy ty = case tcSplitForAllInvisTyVars ty of { (tvs, rho) -> case splitFunTys rho of { (theta, head) -> (tvs, map scaledThing theta, head) }} tcSplitDFunTy :: Type -> ([TyVar], [Type], Class, [Type]) -- Split the type of a dictionary function tcSplitDFunTy ty = case tcSplitQuantPredTy ty of { (tvs, theta, head) -> case tcSplitDFunHead head of { (clas, tys) -> (tvs, theta, clas, tys) }} tcSplitDFunHead :: Type -> (Class, [Type]) tcSplitDFunHead = getClassPredTys tcSplitMethodTy :: Type -> ([TyVar], PredType, Type) -- A class method (selector) always has a type like -- forall as. C as => blah -- So if the class looks like -- class C a where -- op :: forall b. (Eq a, Ix b) => a -> b -- the class method type looks like -- op :: forall a. C a => forall b. (Eq a, Ix b) => a -> b -- -- tcSplitMethodTy just peels off the outer forall and -- that first predicate tcSplitMethodTy ty | (sel_tyvars,sel_rho) <- tcSplitForAllInvisTyVars ty , Just (first_pred, local_meth_ty) <- tcSplitPredFunTy_maybe sel_rho = (sel_tyvars, first_pred, local_meth_ty) | otherwise = pprPanic "tcSplitMethodTy" (ppr ty) {- ********************************************************************* * * Predicate types * * ************************************************************************ Deconstructors and tests on predicate types Note [Kind polymorphic type classes] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ class C f where... -- C :: forall k. k -> Constraint g :: forall (f::*). C f => f -> f Here the (C f) in the signature is really (C * f), and we don't want to complain that the * isn't a type variable! -} isTyVarClassPred :: PredType -> Bool isTyVarClassPred ty = case getClassPredTys_maybe ty of Just (_, tys) -> all isTyVarTy tys _ -> False ------------------------- checkValidClsArgs :: Bool -> Class -> [KindOrType] -> Bool -- If the Bool is True (flexible contexts), return True (i.e. ok) -- Otherwise, check that the type (not kind) args are all headed by a tyvar -- E.g. (Eq a) accepted, (Eq (f a)) accepted, but (Eq Int) rejected -- This function is here in GHC.Tc.Utils.TcType, rather than in GHC.Tc.Validity, -- because it is called from GHC.Tc.Solver, which itself is imported by GHC.Tc.Validity checkValidClsArgs flexible_contexts cls kts | flexible_contexts = True | otherwise = all hasTyVarHead tys where tys = filterOutInvisibleTypes (classTyCon cls) kts hasTyVarHead :: Type -> Bool -- Returns true of (a t1 .. tn), where 'a' is a type variable hasTyVarHead ty -- Haskell 98 allows predicates of form | tcIsTyVarTy ty = True -- C (a ty1 .. tyn) | otherwise -- where a is a type variable = case tcSplitAppTy_maybe ty of Just (ty, _) -> hasTyVarHead ty Nothing -> False evVarPred :: EvVar -> PredType evVarPred var = varType var -- Historical note: I used to have an ASSERT here, -- checking (isEvVarType (varType var)). But with something like -- f :: c => _ -> _ -- we end up with (c :: kappa), and (kappa ~ Constraint). Until -- we solve and zonk (which there is no particular reason to do for -- partial signatures, (isEvVarType kappa) will return False. But -- nothing is wrong. So I just removed the ASSERT. --------------------------- boxEqPred :: EqRel -> Type -> Type -> Maybe (Class, [Type]) -- Given (t1 ~# t2) or (t1 ~R# t2) return the boxed version -- (t1 ~ t2) or (t1 `Coercible` t2) boxEqPred eq_rel ty1 ty2 = case eq_rel of NomEq | homo_kind -> Just (eqClass, [k1, ty1, ty2]) | otherwise -> Just (heqClass, [k1, k2, ty1, ty2]) ReprEq | homo_kind -> Just (coercibleClass, [k1, ty1, ty2]) | otherwise -> Nothing -- Sigh: we do not have heterogeneous Coercible -- so we can't abstract over it -- Nothing fundamental: we could add it where k1 = typeKind ty1 k2 = typeKind ty2 homo_kind = k1 `tcEqType` k2 pickCapturedPreds :: TyVarSet -- Quantifying over these -> TcThetaType -- Proposed constraints to quantify -> TcThetaType -- A subset that we can actually quantify -- A simpler version of pickQuantifiablePreds, used to winnow down -- the inferred constraints of a group of bindings, into those for -- one particular identifier pickCapturedPreds qtvs theta = filter captured theta where captured pred = isIPLikePred pred || (tyCoVarsOfType pred `intersectsVarSet` qtvs) -- Superclasses type PredWithSCs a = (PredType, [PredType], a) mkMinimalBySCs :: forall a. (a -> PredType) -> [a] -> [a] -- Remove predicates that -- -- - are the same as another predicate -- -- - can be deduced from another by superclasses, -- -- - are a reflexive equality (e.g * ~ *) -- (see Note [Remove redundant provided dicts] in GHC.Tc.TyCl.PatSyn) -- -- The result is a subset of the input. -- The 'a' is just paired up with the PredType; -- typically it might be a dictionary Id mkMinimalBySCs get_pred xs = go preds_with_scs [] where preds_with_scs :: [PredWithSCs a] preds_with_scs = [ (pred, implicants pred, x) | x <- xs , let pred = get_pred x ] go :: [PredWithSCs a] -- Work list -> [PredWithSCs a] -- Accumulating result -> [a] go [] min_preds = reverse (map thdOf3 min_preds) -- The 'reverse' isn't strictly necessary, but it -- means that the results are returned in the same -- order as the input, which is generally saner go (work_item@(p,_,_) : work_list) min_preds | EqPred _ t1 t2 <- classifyPredType p , t1 `tcEqType` t2 -- See GHC.Tc.TyCl.PatSyn -- Note [Remove redundant provided dicts] = go work_list min_preds | p `in_cloud` work_list || p `in_cloud` min_preds -- Why look at work-list too? Suppose work_item is Eq a, -- and work-list contains Ord a = go work_list min_preds | otherwise = go work_list (work_item : min_preds) in_cloud :: PredType -> [PredWithSCs a] -> Bool in_cloud p ps = or [ p `tcEqType` p' | (_, scs, _) <- ps, p' <- scs ] implicants pred = pred : eq_extras pred ++ transSuperClasses pred -- Combine (a ~ b) and (b ~ a); no need to have both in one context -- These can arise when dealing with partial type signatures (e.g. T14715) eq_extras pred = case classifyPredType pred of EqPred r t1 t2 -> [mkPrimEqPredRole (eqRelRole r) t2 t1] ClassPred cls [k1,k2,t1,t2] | cls `hasKey` heqTyConKey -> [mkClassPred cls [k2, k1, t2, t1]] ClassPred cls [k,t1,t2] | cls `hasKey` eqTyConKey -> [mkClassPred cls [k, t2, t1]] _ -> [] transSuperClasses :: PredType -> [PredType] -- (transSuperClasses p) returns (p's superclasses) not including p -- Stop if you encounter the same class again -- See Note [Expanding superclasses] transSuperClasses p = go emptyNameSet p where go :: NameSet -> PredType -> [PredType] go rec_clss p | ClassPred cls tys <- classifyPredType p , let cls_nm = className cls , not (cls_nm `elemNameSet` rec_clss) , let rec_clss' | isCTupleClass cls = rec_clss | otherwise = rec_clss `extendNameSet` cls_nm = [ p' | sc <- immSuperClasses cls tys , p' <- sc : go rec_clss' sc ] | otherwise = [] immSuperClasses :: Class -> [Type] -> [PredType] immSuperClasses cls tys = substTheta (zipTvSubst tyvars tys) sc_theta where (tyvars,sc_theta,_,_) = classBigSig cls isImprovementPred :: PredType -> Bool -- Either it's an equality, or has some functional dependency isImprovementPred ty = case classifyPredType ty of EqPred NomEq t1 t2 -> not (t1 `tcEqType` t2) EqPred ReprEq _ _ -> False ClassPred cls _ -> classHasFds cls IrredPred {} -> True -- Might have equalities after reduction? ForAllPred {} -> False {- Note [Expanding superclasses] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When we expand superclasses, we use the following algorithm: transSuperClasses( C tys ) returns the transitive superclasses of (C tys), not including C itself For example class C a b => D a b class D b a => C a b Then transSuperClasses( Ord ty ) = [Eq ty] transSuperClasses( C ta tb ) = [D tb ta, C tb ta] Notice that in the recursive-superclass case we include C again at the end of the chain. One could exclude C in this case, but the code is more awkward and there seems no good reason to do so. (However C.f. GHC.Tc.Solver.Dict.mk_strict_superclasses, which /does/ appear to do so.) The algorithm is expand( so_far, pred ): 1. If pred is not a class constraint, return empty set Otherwise pred = C ts 2. If C is in so_far, return empty set (breaks loops) 3. Find the immediate superclasses constraints of (C ts) 4. For each such sc_pred, return (sc_pred : expand( so_far+C, D ss ) Notice that * With normal Haskell-98 classes, the loop-detector will never bite, so we'll get all the superclasses. * We need the loop-breaker in case we have UndecidableSuperClasses on * Since there is only a finite number of distinct classes, expansion must terminate. * The loop breaking is a bit conservative. Notably, a tuple class could contain many times without threatening termination: (Eq a, (Ord a, Ix a)) And this is try of any class that we can statically guarantee as non-recursive (in some sense). For now, we just make a special case for tuples. Something better would be cool. See also GHC.Tc.TyCl.Utils.checkClassCycles. ************************************************************************ * * Classifying types * * ************************************************************************ -} isSigmaTy :: TcType -> Bool -- isSigmaTy returns true of any type with /invisible/ quantifiers at the top: -- forall a. blah -- Eq a => blah -- ?x::Int => blah -- But not -- forall a -> blah isSigmaTy (ForAllTy (Bndr _ af) _) = isInvisibleForAllTyFlag af isSigmaTy (FunTy { ft_af = af }) = isInvisibleFunArg af isSigmaTy ty | Just ty' <- coreView ty = isSigmaTy ty' isSigmaTy _ = False isRhoTy :: TcType -> Bool -- True of TcRhoTypes; see Note [TcRhoType] isRhoTy ty = not (isSigmaTy ty) -- | Like 'isRhoTy', but also says 'True' for 'Infer' types isRhoExpTy :: ExpType -> Bool isRhoExpTy (Check ty) = isRhoTy ty isRhoExpTy (Infer {}) = True isOverloadedTy :: Type -> Bool -- Yes for a type of a function that might require evidence-passing -- Used by bindLocalMethods and for -fprof-late-overloaded isOverloadedTy ty | Just ty' <- coreView ty = isOverloadedTy ty' isOverloadedTy (ForAllTy _ ty) = isOverloadedTy ty isOverloadedTy (FunTy { ft_af = af }) = isInvisibleFunArg af isOverloadedTy _ = False isFloatTy, isDoubleTy, isFloatPrimTy, isDoublePrimTy, isIntegerTy, isNaturalTy, isIntTy, isWordTy, isBoolTy, isUnitTy, isCharTy :: Type -> Bool isFloatTy = is_tc floatTyConKey isDoubleTy = is_tc doubleTyConKey isFloatPrimTy = is_tc floatPrimTyConKey isDoublePrimTy = is_tc doublePrimTyConKey isIntegerTy = is_tc integerTyConKey isNaturalTy = is_tc naturalTyConKey isIntTy = is_tc intTyConKey isWordTy = is_tc wordTyConKey isBoolTy = is_tc boolTyConKey isUnitTy = is_tc unitTyConKey isCharTy = is_tc charTyConKey -- | Check whether the type is of the form @Any :: k@, -- returning the kind @k@. anyTy_maybe :: Type -> Maybe Kind anyTy_maybe ty | Just (tc, [k]) <- splitTyConApp_maybe ty , getUnique tc == anyTyConKey = Just k | otherwise = Nothing -- | Is the type inhabited by machine floating-point numbers? -- -- Used to check that we don't use floating-point literal patterns -- in Core. -- -- See #9238 and Note [Rules for floating-point comparisons] -- in GHC.Core.Opt.ConstantFold. isFloatingPrimTy :: Type -> Bool isFloatingPrimTy ty = isFloatPrimTy ty || isDoublePrimTy ty -- | Is a type 'String'? isStringTy :: Type -> Bool isStringTy ty = case tcSplitTyConApp_maybe ty of Just (tc, [arg_ty]) -> tc == listTyCon && isCharTy arg_ty _ -> False is_tc :: Unique -> Type -> Bool -- Newtypes are opaque to this is_tc uniq ty = case tcSplitTyConApp_maybe ty of Just (tc, _) -> uniq == getUnique tc Nothing -> False isRigidTy :: TcType -> Bool isRigidTy ty | Just (tc,_) <- tcSplitTyConApp_maybe ty = isGenerativeTyCon tc Nominal | Just {} <- tcSplitAppTy_maybe ty = True | isForAllTy ty = True | otherwise = False {- ************************************************************************ * * Misc * * ************************************************************************ Note [Visible type application] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ GHC implements a generalisation of the algorithm described in the "Visible Type Application" paper (available from http://www.cis.upenn.edu/~sweirich/publications.html). A key part of that algorithm is to distinguish user-specified variables from inferred variables. For example, the following should typecheck: f :: forall a b. a -> b -> b f = const id g = const id x = f @Int @Bool 5 False y = g 5 @Bool False The idea is that we wish to allow visible type application when we are instantiating a specified, fixed variable. In practice, specified, fixed variables are either written in a type signature (or annotation), OR are imported from another module. (We could do better here, for example by doing SCC analysis on parts of a module and considering any type from outside one's SCC to be fully specified, but this is very confusing to users. The simple rule above is much more straightforward and predictable.) So, both of f's quantified variables are specified and may be instantiated. But g has no type signature, so only id's variable is specified (because id is imported). We write the type of g as forall {a}. a -> forall b. b -> b. Note that the a is in braces, meaning it cannot be instantiated with visible type application. Tracking specified vs. inferred variables is done conveniently by a field in PiTyVarBinder. -} deNoteType :: Type -> Type -- Remove all *outermost* type synonyms and other notes deNoteType ty | Just ty' <- coreView ty = deNoteType ty' deNoteType ty = ty {- Find the free tycons and classes of a type. This is used in the front end of the compiler. -} {- ************************************************************************ * * External types * * ************************************************************************ The compiler's foreign function interface supports the passing of a restricted set of types as arguments and results (the restricting factor being the ) -} tcSplitIOType_maybe :: Type -> Maybe (TyCon, Type) -- (tcSplitIOType_maybe t) returns Just (IO,t') -- if t = IO t' -- returns Nothing otherwise tcSplitIOType_maybe ty = case tcSplitTyConApp_maybe ty of Just (io_tycon, [io_res_ty]) | io_tycon `hasKey` ioTyConKey -> Just (io_tycon, io_res_ty) _ -> Nothing {- ************************************************************************ * * Visiblities * * ************************************************************************ -} -- | For every arg a tycon can take, the returned list says True if the argument -- is taken visibly, and False otherwise. Ends with an infinite tail of Trues to -- allow for oversaturation. tyConVisibilities :: TyCon -> [Bool] tyConVisibilities tc = tc_binder_viss ++ tc_return_kind_viss ++ repeat True where tc_binder_viss = map isVisibleTyConBinder (tyConBinders tc) tc_return_kind_viss = map isVisiblePiTyBinder (fst $ tcSplitPiTys (tyConResKind tc)) -- | If the tycon is applied to the types, is the next argument visible? isNextTyConArgVisible :: TyCon -> [Type] -> Bool isNextTyConArgVisible tc tys = tyConVisibilities tc `getNth` length tys -- | Should this type be applied to a visible argument? -- E.g. (s t): is `t` a visible argument of `s`? isNextArgVisible :: TcType -> Bool isNextArgVisible ty | Just (bndr, _) <- tcSplitPiTy_maybe (typeKind ty) = isVisiblePiTyBinder bndr | otherwise = True -- this second case might happen if, say, we have an unzonked TauTv. -- But TauTvs can't range over types that take invisible arguments {- ************************************************************************ * * Paterson sizes * * ************************************************************************ -} {- Note [The PatersonSize of a type] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The PatersonSize of type is something we can compare, with `ltPatersonSize`, to determine if the Paterson conditions are satisfied for an instance declaration. See Note [Paterson conditions] in GHC.Tc.Validity. There are some wrinkles (PS1) Once we get into an implicit parameter or equality we can't get back to a class constraint, so it's safe to say "size 0". See #4200. We do this with isTerminatingClass Note [Invisible arguments and termination] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When checking the ​Paterson conditions for termination an instance declaration, we check for the number of "constructors and variables" in the instance head and constraints. Question: Do we look at * All the arguments, visible or invisible? * Just the visible arguments? I think both will ensure termination, provided we are consistent. Currently we are /not/ consistent, which is really a bug. It's described in #15177, which contains a number of examples. The suspicious bits are the calls to filterOutInvisibleTypes. See also #11833. Note [Stuck type families] ~~~~~~~~~~~~~~~~~~~~~~~~~~ A type-family application generally has infinite size (PS_TyFam); see (PC3) in Note [Paterson conditions] in GHC.Tc.Validity. But a couple of built-in type families have no axioms, and can never expand into anything else. They are: * (TypeError "stuff"). E.g. consider type family F a where F Int = Bool F Bool = Char F _ = TypeError "Bad" We don't want to complain about possible non-termination of F, in GHC.Tc.Validity.checkFamInstRhs. cf indexed-types/should_fail/T13271 * (Any @k). For now we treat them as being size zero, but (#22696) I think we should actually treat them as big (like any other ype family) because we don't want to abstract over them in e.g. validDerivPred. The type-family termination test, in GHC.Tc.Validity.checkFamInstRhs, already has a separate call to isStuckTypeFamily, so the `F` above will still be accepted. -} -- | Why did the Paterson conditions fail; that is, why -- was the context P not Paterson-smaller than the head H? -- -- See Note [Paterson conditions] in GHC.Tc.Validity. data PatersonCondFailure -- | Some type variables occur more often in P than in H. -- See (PC1) in Note [Paterson conditions] in GHC.Tc.Validity. = PCF_TyVar [TyVar] -- ^ the type variables which appear more often in the context -- | P is not smaller in size than H. -- See (PC2) in Note [Paterson conditions] in GHC.Tc.Validity. | PCF_Size -- | P contains a type family. -- See (PC3) in Note [Paterson conditions] in GHC.Tc.Validity. | PCF_TyFam TyCon -- ^ the type constructor of the type family -- | Indicates whether a Paterson condition failure occurred in an instance declaration or a type family equation. -- Useful for differentiating context in error messages. data PatersonCondFailureContext = InInstanceDecl | InTyFamEquation -------------------------------------- -- | The Paterson size of a given type, in the sense of -- Note [Paterson conditions] in GHC.Tc.Validity -- -- - after expanding synonyms, -- - ignoring coercions (as they are not user written). data PatersonSize -- | The type mentions a type family, so the size could be anything. = PS_TyFam TyCon -- | The type does not mention a type family. | PS_Vanilla { ps_tvs :: [TyVar] -- ^ free tyvars, including repetitions; , ps_size :: Int -- ^ number of type constructors and variables } -- ToDo: ignore invisible arguments? See Note [Invisible arguments and termination] instance Outputable PatersonSize where ppr (PS_TyFam tc) = text "PS_TyFam" <+> ppr tc ppr (PS_Vanilla { ps_tvs = tvs, ps_size = size }) = text "PS_Vanilla" <> braces (sep [ text "ps_tvs =" <+> ppr tvs <> comma , text "ps_size =" <+> int size ]) pSizeZero, pSizeOne :: PatersonSize pSizeZero = PS_Vanilla { ps_tvs = [], ps_size = 0 } pSizeOne = PS_Vanilla { ps_tvs = [], ps_size = 1 } -- | @ltPatersonSize ps1 ps2@ returns: -- -- - @Nothing@ iff @ps1@ is definitely strictly smaller than @ps2@, -- - @Just ps_fail@ otherwise; @ps_fail@ says what went wrong. ltPatersonSize :: PatersonSize -> PatersonSize -> Maybe PatersonCondFailure ltPatersonSize (PS_Vanilla { ps_tvs = tvs1, ps_size = s1 }) (PS_Vanilla { ps_tvs = tvs2, ps_size = s2 }) | s1 >= s2 = Just PCF_Size | bad_tvs@(_:_) <- noMoreTyVars tvs1 tvs2 = Just (PCF_TyVar bad_tvs) | otherwise = Nothing -- OK! ltPatersonSize (PS_TyFam tc) _ = Just (PCF_TyFam tc) ltPatersonSize _ (PS_TyFam tc) = Just (PCF_TyFam tc) -- NB: this last equation is never taken when checking instances, because -- type families are disallowed in instance heads. -- -- However, this function is also used in the logic for solving superclass -- constraints (see Note [Solving superclass constraints] in GHC.Tc.TyCl.Instance), -- in which case we might well hit this case (see e.g. T23171). noMoreTyVars :: [TyVar] -- Free vars (with repetitions) of the constraint C -> [TyVar] -- Free vars (with repetitions) of the head H -> [TyVar] -- TyVars that appear more often in C than H; -- no repetitions in this list noMoreTyVars tvs head_tvs = nub (tvs \\ head_tvs) -- The (\\) is list difference; e.g. -- [a,b,a,a] \\ [a,a] = [b,a] -- So we are counting repetitions addPSize :: PatersonSize -> PatersonSize -> PatersonSize addPSize ps1@(PS_TyFam {}) _ = ps1 addPSize _ ps2@(PS_TyFam {}) = ps2 addPSize (PS_Vanilla { ps_tvs = tvs1, ps_size = s1 }) (PS_Vanilla { ps_tvs = tvs2, ps_size = s2 }) = PS_Vanilla { ps_tvs = tvs1 ++ tvs2, ps_size = s1 + s2 } -- (++) is not very performant, but the types -- are user-written and never large pSizeType :: Type -> PatersonSize pSizeType = pSizeTypeX emptyVarSet pSizeTypes :: [Type] -> PatersonSize pSizeTypes = pSizeTypesX emptyVarSet pSizeZero -- Paterson size of a type, retaining repetitions, and expanding synonyms -- This ignores coercions, as coercions aren't user-written pSizeTypeX :: VarSet -> Type -> PatersonSize pSizeTypeX bvs ty | Just exp_ty <- coreView ty = pSizeTypeX bvs exp_ty pSizeTypeX bvs (TyVarTy tv) | tv `elemVarSet` bvs = pSizeOne | otherwise = PS_Vanilla { ps_tvs = [tv], ps_size = 1 } pSizeTypeX _ (LitTy {}) = pSizeOne pSizeTypeX bvs (TyConApp tc tys) = pSizeTyConAppX bvs tc tys pSizeTypeX bvs (AppTy fun arg) = pSizeTypeX bvs fun `addPSize` pSizeTypeX bvs arg pSizeTypeX bvs (FunTy _ w arg res) = pSizeTypeX bvs w `addPSize` pSizeTypeX bvs arg `addPSize` pSizeTypeX bvs res pSizeTypeX bvs (ForAllTy (Bndr tv _) ty) = pSizeTypeX bvs (tyVarKind tv) `addPSize` pSizeTypeX (bvs `extendVarSet` tv) ty pSizeTypeX bvs (CastTy ty _) = pSizeTypeX bvs ty pSizeTypeX _ (CoercionTy {}) = pSizeOne pSizeTypesX :: VarSet -> PatersonSize -> [Type] -> PatersonSize pSizeTypesX bvs sz tys = foldr (addPSize . pSizeTypeX bvs) sz tys pSizeTyConApp :: TyCon -> [Type] -> PatersonSize pSizeTyConApp = pSizeTyConAppX emptyVarSet pSizeTyConAppX :: VarSet -> TyCon -> [Type] -> PatersonSize -- Open question: do we count all args, or just the visible ones? -- See Note [Invisible arguments and termination] pSizeTyConAppX bvs tc tys | isTypeFamilyTyCon tc = pSizeTyFamApp tc | otherwise = pSizeTypesX bvs pSizeOne tys pSizeTyFamApp :: TyCon -> PatersonSize -- See Note [Stuck type families] pSizeTyFamApp tc | isStuckTypeFamily tc = pSizeZero | otherwise = PS_TyFam tc pSizeClassPred :: Class -> [Type] -> PatersonSize pSizeClassPred = pSizeClassPredX emptyVarSet pSizeClassPredX :: VarSet -> Class -> [Type] -> PatersonSize pSizeClassPredX bvs cls tys | isTerminatingClass cls -- See (PS1) in Note [The PatersonSize of a type] = pSizeZero | otherwise = pSizeTypesX bvs pSizeOne $ filterOutInvisibleTypes (classTyCon cls) tys -- filterOutInvisibleTypes Yuk! See Note [Invisible arguments and termination] isStuckTypeFamily :: TyCon -> Bool -- See Note [Stuck type families] isStuckTypeFamily tc = tc `hasKey` errorMessageTypeErrorFamKey || tc `hasKey` anyTyConKey -- | When this says "True", ignore this class constraint during -- a termination check -- See (PS1) in Note [The PatersonSize of a type] isTerminatingClass :: Class -> Bool isTerminatingClass cls = isIPClass cls -- Implicit parameter constraints always terminate because -- there are no instances for them --- they are only solved -- by "local instances" in expressions || isEqualityClass cls || cls `hasKey` typeableClassKey -- Typeable constraints are bigger than they appear due -- to kind polymorphism, but we can never get instance divergence this way || cls `hasKey` unsatisfiableClassNameKey allDistinctTyVars :: TyVarSet -> [KindOrType] -> Bool -- (allDistinctTyVars tvs tys) returns True if tys are -- a) all tyvars -- b) all distinct -- c) disjoint from tvs allDistinctTyVars _ [] = True allDistinctTyVars tkvs (ty : tys) = case getTyVar_maybe ty of Nothing -> False Just tv | tv `elemVarSet` tkvs -> False | otherwise -> allDistinctTyVars (tkvs `extendVarSet` tv) tys ----------------------- type TypeSize = IntWithInf sizeType :: Type -> TypeSize -- Size of a type: the number of variables and constructors sizeType ty = toTypeSize (pSizeType ty) sizeTypes :: [Type] -> TypeSize sizeTypes tys = toTypeSize (foldr (addPSize . pSizeType) pSizeZero tys) toTypeSize :: PatersonSize -> TypeSize toTypeSize (PS_TyFam {}) = infinity toTypeSize (PS_Vanilla { ps_size = size }) = mkIntWithInf size ghc-lib-parser-9.12.2.20250421/compiler/GHC/Tc/Utils/TcType.hs-boot0000644000000000000000000000125107346545000022060 0ustar0000000000000000module GHC.Tc.Utils.TcType where import GHC.Utils.Outputable( SDoc ) import GHC.Utils.Misc( HasDebugCallStack ) import GHC.Prelude ( Bool ) import {-# SOURCE #-} GHC.Types.Var ( TcTyVar ) import {-# SOURCE #-} GHC.Tc.Types.Origin ( FixedRuntimeRepOrigin ) import GHC.Types.Name.Env ( NameEnv ) data MetaDetails data TcTyVarDetails pprTcTyVarDetails :: TcTyVarDetails -> SDoc vanillaSkolemTvUnk :: HasDebugCallStack => TcTyVarDetails isMetaTyVar :: TcTyVar -> Bool isTyConableTyVar :: TcTyVar -> Bool type ConcreteTyVars = NameEnv ConcreteTvOrigin data ConcreteTvOrigin = ConcreteFRR FixedRuntimeRepOrigin isConcreteTyVar :: TcTyVar -> Bool noConcreteTyVars :: ConcreteTyVars ghc-lib-parser-9.12.2.20250421/compiler/GHC/Tc/Zonk/0000755000000000000000000000000007346545000017175 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Tc/Zonk/Monad.hs0000644000000000000000000000672607346545000020602 0ustar0000000000000000{-# LANGUAGE DerivingVia #-} {-# LANGUAGE PatternSynonyms #-} {-# LANGUAGE RankNTypes #-} -- | The 'ZonkM' monad, a stripped down 'TcM', used when zonking within -- the typechecker in "GHC.Tc.Zonk.TcType". -- -- See Note [Module structure for zonking] in GHC.Tc.Zonk.Type. module GHC.Tc.Zonk.Monad ( -- * The 'ZonkM' monad, a stripped down 'TcM' for zonking ZonkM(ZonkM,runZonkM) , ZonkGblEnv(..), getZonkGblEnv, getZonkTcLevel -- ** Logging within 'ZonkM' , traceZonk ) where import GHC.Prelude import GHC.Driver.Flags ( DumpFlag(Opt_D_dump_tc_trace) ) import GHC.Types.SrcLoc ( SrcSpan ) import GHC.Tc.Types.BasicTypes ( TcBinderStack ) import GHC.Tc.Utils.TcType ( TcLevel ) import GHC.Utils.Logger import GHC.Utils.Outputable import Control.Monad ( when ) import Control.Monad.IO.Class ( MonadIO(..) ) import GHC.Exts ( oneShot ) -------------------------------------------------------------------------------- -- | Information needed by the 'ZonkM' monad, which is a slimmed down version -- of 'TcM' with just enough information for zonking. data ZonkGblEnv = ZonkGblEnv { zge_logger :: Logger -- needed for traceZonk , zge_name_ppr_ctx :: NamePprCtx -- '' , zge_src_span :: SrcSpan -- needed for skolemiseUnboundMetaTyVar , zge_tc_level :: TcLevel -- '' , zge_binder_stack :: TcBinderStack -- needed for tcInitTidyEnv } -- | A stripped down version of 'TcM' which is sufficient for zonking types. newtype ZonkM a = ZonkM' { runZonkM :: ZonkGblEnv -> IO a } {- NB: we write the following instances by hand: -- deriving (Functor, Applicative, Monad, MonadIO) -- via ReaderT ZonkGblEnv IO See Note [Instances for ZonkT] in GHC.Tc.Zonk.Env for the reasoning: - oneShot annotations, - strictness annotations to enable worker-wrapper. -} {-# COMPLETE ZonkM #-} pattern ZonkM :: forall a. (ZonkGblEnv -> IO a) -> ZonkM a pattern ZonkM m <- ZonkM' m where ZonkM m = ZonkM' (oneShot m) -- See Note [The one-shot state monad trick] in GHC.Utils.Monad instance Functor ZonkM where fmap f (ZonkM g) = ZonkM $ \ !env -> fmap f (g env) a <$ ZonkM g = ZonkM $ \ !env -> a <$ g env {-# INLINE fmap #-} {-# INLINE (<$) #-} instance Applicative ZonkM where pure a = ZonkM (\ !_ -> pure a) ZonkM f <*> ZonkM x = ZonkM (\ !env -> f env <*> x env ) ZonkM m *> f = ZonkM (\ !env -> m env *> runZonkM f env) {-# INLINE pure #-} {-# INLINE (<*>) #-} {-# INLINE (*>) #-} instance Monad ZonkM where ZonkM m >>= f = ZonkM (\ !env -> do { r <- m env ; runZonkM (f r) env }) (>>) = (*>) {-# INLINE (>>=) #-} {-# INLINE (>>) #-} instance MonadIO ZonkM where liftIO f = ZonkM (\ !_ -> f) {-# INLINE liftIO #-} getZonkGblEnv :: ZonkM ZonkGblEnv getZonkGblEnv = ZonkM return {-# INLINE getZonkGblEnv #-} getZonkTcLevel :: ZonkM TcLevel getZonkTcLevel = ZonkM (\env -> return (zge_tc_level env)) -- | Same as 'traceTc', but for the 'ZonkM' monad. traceZonk :: String -> SDoc -> ZonkM () traceZonk herald doc = ZonkM $ \ ( ZonkGblEnv { zge_logger = !logger, zge_name_ppr_ctx = ppr_ctx }) -> do { let sty = mkDumpStyle ppr_ctx flag = Opt_D_dump_tc_trace title = "" msg = hang (text herald) 2 doc ; when (logHasDumpFlag logger flag) $ logDumpFile logger sty flag title FormatText msg } {-# INLINE traceZonk #-} -- see Note [INLINE conditional tracing utilities] in GHC.Tc.Utils.Monad ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/0000755000000000000000000000000007346545000017012 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/Annotations.hs0000644000000000000000000001177207346545000021653 0ustar0000000000000000-- | -- Support for source code annotation feature of GHC. That is the ANN pragma. -- -- (c) The University of Glasgow 2006 -- (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 -- {-# LANGUAGE DeriveFunctor #-} module GHC.Types.Annotations ( -- * Main Annotation data types Annotation(..), AnnPayload, AnnTarget(..), CoreAnnTarget, -- * AnnEnv for collecting and querying Annotations AnnEnv, mkAnnEnv, extendAnnEnvList, plusAnnEnv, emptyAnnEnv, findAnns, findAnnsByTypeRep, deserializeAnns ) where import GHC.Prelude import GHC.Utils.Binary import GHC.Unit.Module ( Module ) import GHC.Unit.Module.Env import GHC.Types.Name.Env import GHC.Types.Name import GHC.Utils.Outputable import GHC.Serialized import Control.Monad import Data.Maybe import Data.Typeable import Data.Word ( Word8 ) -- | Represents an annotation after it has been sufficiently desugared from -- it's initial form of 'GHC.Hs.Decls.AnnDecl' data Annotation = Annotation { ann_target :: CoreAnnTarget, -- ^ The target of the annotation ann_value :: AnnPayload } type AnnPayload = Serialized -- ^ The "payload" of an annotation -- allows recovery of its value at a given type, -- and can be persisted to an interface file -- | An annotation target data AnnTarget name = NamedTarget name -- ^ We are annotating something with a name: -- a type or identifier | ModuleTarget Module -- ^ We are annotating a particular module deriving (Functor) -- | The kind of annotation target found in the middle end of the compiler type CoreAnnTarget = AnnTarget Name instance Outputable name => Outputable (AnnTarget name) where ppr (NamedTarget nm) = text "Named target" <+> ppr nm ppr (ModuleTarget mod) = text "Module target" <+> ppr mod instance Binary name => Binary (AnnTarget name) where put_ bh (NamedTarget a) = do putByte bh 0 put_ bh a put_ bh (ModuleTarget a) = do putByte bh 1 put_ bh a get bh = do h <- getByte bh case h of 0 -> liftM NamedTarget $ get bh _ -> liftM ModuleTarget $ get bh instance Outputable Annotation where ppr ann = ppr (ann_target ann) -- | A collection of annotations data AnnEnv = MkAnnEnv { ann_mod_env :: !(ModuleEnv [AnnPayload]) , ann_name_env :: !(NameEnv [AnnPayload]) } -- | An empty annotation environment. emptyAnnEnv :: AnnEnv emptyAnnEnv = MkAnnEnv emptyModuleEnv emptyNameEnv -- | Construct a new annotation environment that contains the list of -- annotations provided. mkAnnEnv :: [Annotation] -> AnnEnv mkAnnEnv = extendAnnEnvList emptyAnnEnv -- | Add the given annotation to the environment. extendAnnEnvList :: AnnEnv -> [Annotation] -> AnnEnv extendAnnEnvList env = foldl' extendAnnEnv env extendAnnEnv :: AnnEnv -> Annotation -> AnnEnv extendAnnEnv (MkAnnEnv mod_env name_env) (Annotation tgt payload) = case tgt of NamedTarget name -> MkAnnEnv mod_env (extendNameEnv_C (++) name_env name [payload]) ModuleTarget mod -> MkAnnEnv (extendModuleEnvWith (++) mod_env mod [payload]) name_env -- | Union two annotation environments. plusAnnEnv :: AnnEnv -> AnnEnv -> AnnEnv plusAnnEnv a b = MkAnnEnv { ann_mod_env = plusModuleEnv_C (++) (ann_mod_env a) (ann_mod_env b) , ann_name_env = plusNameEnv_C (++) (ann_name_env a) (ann_name_env b) } -- | Find the annotations attached to the given target as 'Typeable' -- values of your choice. If no deserializer is specified, -- only transient annotations will be returned. findAnns :: Typeable a => ([Word8] -> a) -> AnnEnv -> CoreAnnTarget -> [a] findAnns deserialize env = mapMaybe (fromSerialized deserialize) . findAnnPayloads env -- | Find the annotations attached to the given target as 'Typeable' -- values of your choice. If no deserializer is specified, -- only transient annotations will be returned. findAnnsByTypeRep :: AnnEnv -> CoreAnnTarget -> TypeRep -> [[Word8]] findAnnsByTypeRep env target tyrep = [ ws | Serialized tyrep' ws <- findAnnPayloads env target , tyrep' == tyrep ] -- | Find payloads for the given 'CoreAnnTarget' in an 'AnnEnv'. findAnnPayloads :: AnnEnv -> CoreAnnTarget -> [AnnPayload] findAnnPayloads env target = case target of ModuleTarget mod -> lookupWithDefaultModuleEnv (ann_mod_env env) [] mod NamedTarget name -> fromMaybe [] $ lookupNameEnv (ann_name_env env) name -- | Deserialize all annotations of a given type. This happens lazily, that is -- no deserialization will take place until the [a] is actually demanded and -- the [a] can also be empty (the UniqFM is not filtered). deserializeAnns :: Typeable a => ([Word8] -> a) -> AnnEnv -> (ModuleEnv [a], NameEnv [a]) deserializeAnns deserialize env = ( mapModuleEnv deserAnns (ann_mod_env env) , mapNameEnv deserAnns (ann_name_env env) ) where deserAnns = mapMaybe (fromSerialized deserialize) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/Avail.hs0000644000000000000000000002023507346545000020404 0ustar0000000000000000 {-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE DerivingStrategies #-} {-# LANGUAGE PatternSynonyms #-} -- -- (c) The University of Glasgow -- module GHC.Types.Avail ( Avails, AvailInfo(..), availsToNameSet, availsToNameEnv, availExportsDecl, availName, availNames, availSubordinateNames, stableAvailCmp, plusAvail, trimAvail, filterAvail, filterAvails, nubAvails, sortAvails, DetOrdAvails(DetOrdAvails, DefinitelyDeterministicAvails) ) where import GHC.Prelude import GHC.Types.Name import GHC.Types.Name.Env import GHC.Types.Name.Set import GHC.Utils.Binary import GHC.Data.List.SetOps import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Utils.Constants (debugIsOn) import Control.DeepSeq import Data.Data ( Data ) import Data.Functor.Classes ( liftCompare ) import Data.List ( find, sortBy ) import qualified Data.Semigroup as S -- ----------------------------------------------------------------------------- -- The AvailInfo type -- | Records what things are \"available\", i.e. in scope data AvailInfo -- | An ordinary identifier in scope, or a field label without a parent type -- (see Note [Representing pattern synonym fields in AvailInfo]). = Avail Name -- | A type or class in scope -- -- The __AvailTC Invariant__: If the type or class is itself to be in scope, -- it must be /first/ in this list. Thus, typically: -- -- > AvailTC Eq [Eq, ==, \/=] | AvailTC Name -- ^ The name of the type or class [Name] -- ^ The available pieces of type or class deriving Data -- | A collection of 'AvailInfo' - several things that are \"available\" type Avails = [AvailInfo] -- | Occurrences of Avails in interface files must be deterministically ordered -- to guarantee interface file determinism. -- -- We guarantee a deterministic order by either using the order explicitly -- given by the user (e.g. in an explicit constructor export list) or instead -- by sorting the avails with 'sortAvails'. newtype DetOrdAvails = DefinitelyDeterministicAvails Avails deriving newtype (Binary, Outputable, NFData) -- | It's always safe to match on 'DetOrdAvails' pattern DetOrdAvails :: Avails -> DetOrdAvails pattern DetOrdAvails x <- DefinitelyDeterministicAvails x {-# COMPLETE DetOrdAvails #-} {- Note [Representing pattern synonym fields in AvailInfo] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Record pattern synonym fields cannot be represented using AvailTC like fields of normal record types, because they do not always have a parent type constructor. So we represent them using the Avail constructor. Thus under -XDuplicateRecordFields -XPatternSynoynms, the declaration pattern MkFoo{f} = Bar f gives rise to the AvailInfo Avail MkFoo, Avail f However, if `f` is bundled with a type constructor `T` by using `T(MkFoo,f)` in an export list, then whenever `f` is imported the parent will be `T`, represented as AvailTC T [ T, MkFoo, f ] -} -- | Compare lexicographically stableAvailCmp :: AvailInfo -> AvailInfo -> Ordering stableAvailCmp (Avail c1) (Avail c2) = c1 `stableNameCmp` c2 stableAvailCmp (Avail {}) (AvailTC {}) = LT stableAvailCmp (AvailTC n ns) (AvailTC m ms) = stableNameCmp n m S.<> liftCompare stableNameCmp ns ms stableAvailCmp (AvailTC {}) (Avail {}) = GT -- ----------------------------------------------------------------------------- -- Operations on AvailInfo availsToNameSet :: [AvailInfo] -> NameSet availsToNameSet avails = foldr add emptyNameSet avails where add avail set = extendNameSetList set (availNames avail) availsToNameEnv :: [AvailInfo] -> NameEnv AvailInfo availsToNameEnv avails = foldr add emptyNameEnv avails where add avail env = extendNameEnvList env (zip (availNames avail) (repeat avail)) -- | Does this 'AvailInfo' export the parent decl? This depends on the -- invariant that the parent is first if it appears at all. availExportsDecl :: AvailInfo -> Bool availExportsDecl (AvailTC ty_name names) | n : _ <- names = ty_name == n | otherwise = False availExportsDecl _ = True -- | Just the main name made available, i.e. not the available pieces -- of type or class brought into scope by the 'AvailInfo' availName :: AvailInfo -> Name availName (Avail n) = n availName (AvailTC n _) = n -- | Names and fields made available by the availability information. availNames :: AvailInfo -> [Name] availNames (Avail c) = [c] availNames (AvailTC _ cs) = cs -- | Names and fields made available by the availability information, other than -- the main decl itself. availSubordinateNames :: AvailInfo -> [Name] availSubordinateNames (Avail {}) = [] availSubordinateNames avail@(AvailTC _ ns) | availExportsDecl avail = tail ns | otherwise = ns -- | Sort 'Avails'/'AvailInfo's sortAvails :: Avails -> DetOrdAvails sortAvails = DefinitelyDeterministicAvails . sortBy stableAvailCmp . map sort_subs where sort_subs :: AvailInfo -> AvailInfo sort_subs (Avail n) = Avail n sort_subs (AvailTC n []) = AvailTC n [] sort_subs (AvailTC n (m:ms)) | n == m = AvailTC n (m:sortBy stableNameCmp ms) | otherwise = AvailTC n (sortBy stableNameCmp (m:ms)) -- Maintain the AvailTC Invariant -- ----------------------------------------------------------------------------- -- Utility plusAvail :: AvailInfo -> AvailInfo -> AvailInfo plusAvail a1 a2 | debugIsOn && availName a1 /= availName a2 = pprPanic "GHC.Rename.Env.plusAvail names differ" (hsep [ppr a1,ppr a2]) plusAvail a1@(Avail {}) (Avail {}) = a1 plusAvail (AvailTC _ []) a2@(AvailTC {}) = a2 plusAvail a1@(AvailTC {}) (AvailTC _ []) = a1 plusAvail (AvailTC n1 (s1:ss1)) (AvailTC n2 (s2:ss2)) = case (n1 == s1, n2 == s2) of -- Maintain invariant the parent is first (True,True) -> AvailTC n1 (s1 : (ss1 `unionListsOrd` ss2)) (True,False) -> AvailTC n1 (s1 : (ss1 `unionListsOrd` (s2:ss2))) (False,True) -> AvailTC n1 (s2 : ((s1:ss1) `unionListsOrd` ss2)) (False,False) -> AvailTC n1 ((s1:ss1) `unionListsOrd` (s2:ss2)) plusAvail a1 a2 = pprPanic "GHC.Rename.Env.plusAvail" (hsep [ppr a1,ppr a2]) -- | trims an 'AvailInfo' to keep only a single name trimAvail :: AvailInfo -> Name -> AvailInfo trimAvail avail@(Avail {}) _ = avail trimAvail avail@(AvailTC n ns) m = case find (== m) ns of Just c -> AvailTC n [c] Nothing -> pprPanic "trimAvail" (hsep [ppr avail, ppr m]) -- | filters 'AvailInfo's by the given predicate filterAvails :: (Name -> Bool) -> [AvailInfo] -> [AvailInfo] filterAvails keep avails = foldr (filterAvail keep) [] avails -- | filters an 'AvailInfo' by the given predicate filterAvail :: (Name -> Bool) -> AvailInfo -> [AvailInfo] -> [AvailInfo] filterAvail keep ie rest = case ie of Avail c | keep c -> ie : rest | otherwise -> rest AvailTC tc cs -> let cs' = filter keep cs in if null cs' then rest else AvailTC tc cs' : rest -- | Combines 'AvailInfo's from the same family -- 'avails' may have several items with the same availName -- E.g import Ix( Ix(..), index ) -- will give Ix(Ix,index,range) and Ix(index) -- We want to combine these; plusAvail does that nubAvails :: [AvailInfo] -> [AvailInfo] nubAvails avails = eltsDNameEnv (foldl' add emptyDNameEnv avails) where add env avail = extendDNameEnv_C plusAvail env (availName avail) avail -- ----------------------------------------------------------------------------- -- Printing instance Outputable AvailInfo where ppr = pprAvail pprAvail :: AvailInfo -> SDoc pprAvail (Avail n) = ppr n pprAvail (AvailTC n ns) = ppr n <> braces (pprWithCommas ppr ns) instance Binary AvailInfo where put_ bh (Avail aa) = do putByte bh 0 put_ bh aa put_ bh (AvailTC ab ac) = do putByte bh 1 put_ bh ab put_ bh ac get bh = do h <- getByte bh case h of 0 -> do aa <- get bh return (Avail aa) _ -> do ab <- get bh ac <- get bh return (AvailTC ab ac) instance NFData AvailInfo where rnf (Avail n) = rnf n rnf (AvailTC a b) = rnf a `seq` rnf b ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/Basic.hs0000644000000000000000000025027107346545000020376 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1997-1998 \section[BasicTypes]{Miscellaneous types} This module defines a miscellaneously collection of very simple types that \begin{itemize} \item have no other obvious home \item don't depend on any other complicated types \item are used in more than one "part" of the compiler \end{itemize} -} {-# OPTIONS_GHC -Wno-orphans #-} -- Outputable PromotionFlag, Binary PromotionFlag, Outputable Boxity, Binay Boxity {-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE DeriveFunctor #-} {-# LANGUAGE MultiParamTypeClasses #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE LambdaCase #-} module GHC.Types.Basic ( LeftOrRight(..), pickLR, ConTag, ConTagZ, fIRST_TAG, Arity, VisArity, RepArity, JoinArity, FullArgCount, JoinPointHood(..), isJoinPoint, Alignment, mkAlignment, alignmentOf, alignmentBytes, PromotionFlag(..), isPromoted, FunctionOrData(..), RecFlag(..), isRec, isNonRec, boolToRecFlag, Origin(..), isGenerated, DoPmc(..), requiresPMC, GenReason(..), isDoExpansionGenerated, doExpansionFlavour, doExpansionOrigin, RuleName, pprRuleName, TopLevelFlag(..), isTopLevel, isNotTopLevel, OverlapFlag(..), OverlapMode(..), setOverlapModeMaybe, hasOverlappingFlag, hasOverlappableFlag, hasIncoherentFlag, hasNonCanonicalFlag, Boxity(..), isBoxed, CbvMark(..), isMarkedCbv, PprPrec(..), topPrec, sigPrec, opPrec, funPrec, starPrec, appPrec, maxPrec, maybeParen, TupleSort(..), tupleSortBoxity, boxityTupleSort, tupleParens, UnboxedTupleOrSum(..), unboxedTupleOrSumExtension, sumParens, pprAlternative, -- ** The OneShotInfo type OneShotInfo(..), noOneShotInfo, hasNoOneShotInfo, isOneShotInfo, bestOneShot, worstOneShot, OccInfo(..), noOccInfo, seqOccInfo, zapFragileOcc, isOneOcc, isDeadOcc, isStrongLoopBreaker, isWeakLoopBreaker, isManyOccs, isNoOccInfo, strongLoopBreaker, weakLoopBreaker, InsideLam(..), BranchCount, oneBranch, InterestingCxt(..), TailCallInfo(..), tailCallInfo, zapOccTailCallInfo, isAlwaysTailCalled, EP(..), DefMethSpec(..), SwapFlag(..), flipSwap, unSwap, notSwapped, isSwapped, pickSwap, CompilerPhase(..), PhaseNum, beginPhase, nextPhase, laterPhase, Activation(..), isActive, competesWith, isNeverActive, isAlwaysActive, activeInFinalPhase, activateAfterInitial, activateDuringFinal, activeAfter, RuleMatchInfo(..), isConLike, isFunLike, InlineSpec(..), noUserInlineSpec, InlinePragma(..), defaultInlinePragma, alwaysInlinePragma, neverInlinePragma, dfunInlinePragma, isDefaultInlinePragma, isInlinePragma, isInlinablePragma, isNoInlinePragma, isOpaquePragma, isAnyInlinePragma, alwaysInlineConLikePragma, inlinePragmaSource, inlinePragmaName, inlineSpecSource, inlinePragmaSpec, inlinePragmaSat, inlinePragmaActivation, inlinePragmaRuleMatchInfo, setInlinePragmaActivation, setInlinePragmaRuleMatchInfo, pprInline, pprInlineDebug, UnfoldingSource(..), isStableSource, isStableUserSource, isStableSystemSource, isCompulsorySource, SuccessFlag(..), succeeded, failed, successIf, IntWithInf, infinity, treatZeroAsInf, subWithInf, mkIntWithInf, intGtLimit, TypeOrKind(..), isTypeLevel, isKindLevel, Levity(..), mightBeLifted, mightBeUnlifted, TypeOrConstraint(..), TyConFlavour(..), TypeOrData(..), tyConFlavourAssoc_maybe, NonStandardDefaultingStrategy(..), DefaultingStrategy(..), defaultNonStandardTyVars, ForeignSrcLang (..) ) where import GHC.Prelude import GHC.ForeignSrcLang import GHC.Data.FastString import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Utils.Binary import GHC.Types.SourceText import qualified GHC.LanguageExtensions as LangExt import {-# SOURCE #-} Language.Haskell.Syntax.Type (PromotionFlag(..), isPromoted) import Language.Haskell.Syntax.Basic (Boxity(..), isBoxed, ConTag) import {-# SOURCE #-} Language.Haskell.Syntax.Expr (HsDoFlavour) import Control.DeepSeq ( NFData(..) ) import Data.Data import Data.Maybe import qualified Data.Semigroup as Semi {- ************************************************************************ * * Binary choice * * ********************************************************************* -} data LeftOrRight = CLeft | CRight deriving( Eq, Data, Ord ) pickLR :: LeftOrRight -> (a,a) -> a pickLR CLeft (l,_) = l pickLR CRight (_,r) = r instance Outputable LeftOrRight where ppr CLeft = text "Left" ppr CRight = text "Right" instance Binary LeftOrRight where put_ bh CLeft = putByte bh 0 put_ bh CRight = putByte bh 1 get bh = do { h <- getByte bh ; case h of 0 -> return CLeft _ -> return CRight } {- ************************************************************************ * * \subsection[Arity]{Arity} * * ************************************************************************ -} -- | The number of value arguments that can be applied to a value before it does -- "real work". So: -- fib 100 has arity 0 -- \x -> fib x has arity 1 -- See also Note [Definition of arity] in "GHC.Core.Opt.Arity" type Arity = Int -- | Syntactic (visibility) arity, i.e. the number of visible arguments. -- See Note [Visibility and arity] type VisArity = Int -- | Representation Arity -- -- The number of represented arguments that can be applied to a value before it does -- "real work". So: -- fib 100 has representation arity 0 -- \x -> fib x has representation arity 1 -- \(# x, y #) -> fib (x + y) has representation arity 2 type RepArity = Int -- | The number of arguments that a join point takes. Unlike the arity of a -- function, this is a purely syntactic property and is fixed when the join -- point is created (or converted from a value). Both type and value arguments -- are counted. type JoinArity = Int -- | FullArgCount is the number of type or value arguments in an application, -- or the number of type or value binders in a lambda. Note: it includes -- both type and value arguments! type FullArgCount = Int {- Note [Visibility and arity] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Arity is the number of arguments that a function expects. In a curried language like Haskell, there is more than one way to count those arguments. * `Arity` is the classic notion of arity, concerned with evalution, so it counts the number of /value/ arguments that need to be supplied before evaluation can take place, as described in notes Note [Definition of arity] in GHC.Core.Opt.Arity Note [Arity and function types] in GHC.Types.Id.Info Examples: Int has arity == 0 Int -> Int has arity <= 1 Int -> Bool -> Int has arity <= 2 We write (<=) rather than (==) as sometimes evaluation can occur before all value arguments are supplied, depending on the actual function definition. This evaluation-focused notion of arity ignores type arguments, so: forall a. a has arity == 0 forall a. a -> a has arity <= 1 forall a b. a -> b -> a has arity <= 2 This is true regardless of ForAllTyFlag, so the arity is also unaffected by (forall {a}. ty) or (forall a -> ty). Class dictionaries count towards the arity, as they are passed at runtime forall a. (Num a) => a has arity <= 1 forall a. (Num a) => a -> a has arity <= 2 forall a b. (Num a, Ord b) => a -> b -> a has arity <= 4 * `VisArity` is the syntactic notion of arity. It is the number of /visible/ arguments, i.e. arguments that occur visibly in the source code. In a function call `f x y z`, we can confidently say that f's vis-arity >= 3, simply because we see three arguments [x,y,z]. We write (>=) rather than (==) as this could be a partial application. At definition sites, we can acquire an underapproximation of vis-arity by counting the patterns on the LHS, e.g. `f a b = rhs` has vis-arity >= 2. The actual vis-arity can be higher if there is a lambda on the RHS, e.g. `f a b = \c -> rhs`. If we look at the types, we can observe the following * function arrows (a -> b) add to the vis-arity * visible foralls (forall a -> b) add to the vis-arity * constraint arrows (a => b) do not affect the vis-arity * invisible foralls (forall a. b) do not affect the vis-arity This means that ForAllTyFlag matters for VisArity (in contrast to Arity), while the type/value distinction is unimportant (again in contrast to Arity). Examples: Int -- vis-arity == 0 (no args) Int -> Int -- vis-arity == 1 (1 funarg) forall a. a -> a -- vis-arity == 1 (1 funarg) forall a. Num a => a -> a -- vis-arity == 1 (1 funarg) forall a -> Num a => a -- vis-arity == 1 (1 req tyarg, 0 funargs) forall a -> a -> a -- vis-arity == 2 (1 req tyarg, 1 funarg) Int -> forall a -> Int -- vis-arity == 2 (1 funarg, 1 req tyarg) Wrinkle: with TypeApplications and TypeAbstractions, it is possible to visibly bind and pass invisible arguments, e.g. `f @a x = ...` or `f @Int 42`. Those @-prefixed arguments are ignored for the purposes of vis-arity. -} {- ************************************************************************ * * Constructor tags * * ************************************************************************ -} -- | A *zero-indexed* constructor tag type ConTagZ = Int fIRST_TAG :: ConTag -- ^ Tags are allocated from here for real constructors -- or for superclass selectors fIRST_TAG = 1 {- ************************************************************************ * * \subsection[Alignment]{Alignment} * * ************************************************************************ -} -- | A power-of-two alignment newtype Alignment = Alignment { alignmentBytes :: Int } deriving (Eq, Ord) -- Builds an alignment, throws on non power of 2 input. This is not -- ideal, but convenient for internal use and better then silently -- passing incorrect data. mkAlignment :: Int -> Alignment mkAlignment n | n == 1 = Alignment 1 | n == 2 = Alignment 2 | n == 4 = Alignment 4 | n == 8 = Alignment 8 | n == 16 = Alignment 16 | n == 32 = Alignment 32 | n == 64 = Alignment 64 | n == 128 = Alignment 128 | n == 256 = Alignment 256 | n == 512 = Alignment 512 | otherwise = panic "mkAlignment: received either a non power of 2 argument or > 512" -- Calculates an alignment of a number. x is aligned at N bytes means -- the remainder from x / N is zero. Currently, interested in N <= 8, -- but can be expanded to N <= 16 or N <= 32 if used within SSE or AVX -- context. alignmentOf :: Int -> Alignment alignmentOf x = case x .&. 7 of 0 -> Alignment 8 4 -> Alignment 4 2 -> Alignment 2 _ -> Alignment 1 instance Outputable Alignment where ppr (Alignment m) = ppr m instance OutputableP env Alignment where pdoc _ = ppr {- ************************************************************************ * * One-shot information * * ************************************************************************ -} {- Note [OneShotInfo overview] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Lambda-bound Ids (and only lambda-bound Ids) may be decorated with one-shot info. The idea is that if we see (\x{one-shot}. e) it means that this lambda will only be applied once. In particular that means we can float redexes under the lambda without losing work. For example, consider let t = expensive in (\x{one-shot}. case t of { True -> ...; False -> ... }) Because it's a one-shot lambda, we can safely inline t, giving (\x{one_shot}. case of { True -> ...; False -> ... }) Moving parts: * Usage analysis, performed as part of demand-analysis, finds out whether functions call their argument once. Consider f g x = Just (case g x of { ... }) Here 'f' is lazy in 'g', but it guarantees to call it no more than once. So g will get a C(1,U) usage demand. * Occurrence analysis propagates this usage information (in the demand signature of a function) to its calls. Example, given 'f' above f (\x.e) blah Since f's demand signature says it has a C(1,U) usage demand on its first argument, the occurrence analyser sets the \x to be one-shot. This is done via the occ_one_shots field of OccEnv. * Float-in and float-out take account of one-shot-ness * Occurrence analysis doesn't set "inside-lam" for occurrences inside a one-shot lambda Other notes * A one-shot lambda can use its argument many times. To elaborate the example above let t = expensive in (\x{one-shot}. case t of { True -> x+x; False -> x*x }) Here the '\x' is one-shot, which justifies inlining 't', but x is used many times. That's absolutely fine. * It's entirely possible to have (\x{one-shot}. \y{many-shot}. e) For example let t = expensive g = \x -> let v = x+t in \y -> x + v in map (g 5) xs Here the `\x` is a one-shot binder: `g` is applied to one argument exactly once. And because the `\x` is one-shot, it would be fine to float that `let t = expensive` binding inside the `\x`. But the `\y` is most definitely not one-shot! -} -- | If the 'Id' is a lambda-bound variable then it may have lambda-bound -- variable info. Sometimes we know whether the lambda binding this variable -- is a "one-shot" lambda; that is, whether it is applied at most once. -- -- This information may be useful in optimisation, as computations may -- safely be floated inside such a lambda without risk of duplicating -- work. -- -- See also Note [OneShotInfo overview] above. data OneShotInfo = NoOneShotInfo -- ^ No information | OneShotLam -- ^ The lambda is applied at most once. deriving (Eq) -- | It is always safe to assume that an 'Id' has no lambda-bound variable information noOneShotInfo :: OneShotInfo noOneShotInfo = NoOneShotInfo isOneShotInfo, hasNoOneShotInfo :: OneShotInfo -> Bool isOneShotInfo OneShotLam = True isOneShotInfo _ = False hasNoOneShotInfo NoOneShotInfo = True hasNoOneShotInfo _ = False worstOneShot, bestOneShot :: OneShotInfo -> OneShotInfo -> OneShotInfo worstOneShot NoOneShotInfo _ = NoOneShotInfo worstOneShot OneShotLam os = os bestOneShot NoOneShotInfo os = os bestOneShot OneShotLam _ = OneShotLam pprOneShotInfo :: OneShotInfo -> SDoc pprOneShotInfo NoOneShotInfo = text "NoOS" pprOneShotInfo OneShotLam = text "OneShot" instance Outputable OneShotInfo where ppr = pprOneShotInfo {- ************************************************************************ * * Swap flag * * ************************************************************************ -} data SwapFlag = NotSwapped -- Args are: actual, expected | IsSwapped -- Args are: expected, actual deriving( Eq ) instance Outputable SwapFlag where ppr IsSwapped = text "Is-swapped" ppr NotSwapped = text "Not-swapped" flipSwap :: SwapFlag -> SwapFlag flipSwap IsSwapped = NotSwapped flipSwap NotSwapped = IsSwapped isSwapped :: SwapFlag -> Bool isSwapped IsSwapped = True isSwapped NotSwapped = False notSwapped :: SwapFlag -> Bool notSwapped NotSwapped = True notSwapped IsSwapped = False pickSwap :: SwapFlag -> a -> a -> a pickSwap NotSwapped a _ = a pickSwap IsSwapped _ b = b unSwap :: SwapFlag -> (a->a->b) -> a -> a -> b unSwap NotSwapped f a b = f a b unSwap IsSwapped f a b = f b a {- ********************************************************************* * * Promotion flag * * ********************************************************************* -} instance Outputable PromotionFlag where ppr NotPromoted = text "NotPromoted" ppr IsPromoted = text "IsPromoted" instance Binary PromotionFlag where put_ bh NotPromoted = putByte bh 0 put_ bh IsPromoted = putByte bh 1 get bh = do n <- getByte bh case n of 0 -> return NotPromoted 1 -> return IsPromoted _ -> fail "Binary(IsPromoted): fail)" {- ************************************************************************ * * \subsection[FunctionOrData]{FunctionOrData} * * ************************************************************************ -} data FunctionOrData = IsFunction | IsData deriving (Eq, Ord, Data) instance Outputable FunctionOrData where ppr IsFunction = text "(function)" ppr IsData = text "(data)" instance Binary FunctionOrData where put_ bh IsFunction = putByte bh 0 put_ bh IsData = putByte bh 1 get bh = do h <- getByte bh case h of 0 -> return IsFunction 1 -> return IsData _ -> panic "Binary FunctionOrData" {- ************************************************************************ * * Rules * * ************************************************************************ -} type RuleName = FastString pprRuleName :: RuleName -> SDoc pprRuleName rn = doubleQuotes (ftext rn) {- ************************************************************************ * * \subsection[Top-level/local]{Top-level/not-top level flag} * * ************************************************************************ -} data TopLevelFlag = TopLevel | NotTopLevel deriving Data isTopLevel, isNotTopLevel :: TopLevelFlag -> Bool isNotTopLevel NotTopLevel = True isNotTopLevel TopLevel = False isTopLevel TopLevel = True isTopLevel NotTopLevel = False instance Outputable TopLevelFlag where ppr TopLevel = text "" ppr NotTopLevel = text "" {- ************************************************************************ * * Boxity flag * * ************************************************************************ -} instance Outputable Boxity where ppr Boxed = text "Boxed" ppr Unboxed = text "Unboxed" instance Binary Boxity where -- implemented via isBoxed-isomorphism to Bool put_ bh = put_ bh . isBoxed get bh = do b <- get bh pure $ if b then Boxed else Unboxed {- ************************************************************************ * * Call by value flag * * ************************************************************************ -} -- | Should an argument be passed evaluated *and* tagged. data CbvMark = MarkedCbv | NotMarkedCbv deriving Eq instance Outputable CbvMark where ppr MarkedCbv = text "!" ppr NotMarkedCbv = text "~" instance Binary CbvMark where put_ bh NotMarkedCbv = putByte bh 0 put_ bh MarkedCbv = putByte bh 1 get bh = do h <- getByte bh case h of 0 -> return NotMarkedCbv 1 -> return MarkedCbv _ -> panic "Invalid binary format" isMarkedCbv :: CbvMark -> Bool isMarkedCbv MarkedCbv = True isMarkedCbv NotMarkedCbv = False {- ************************************************************************ * * Recursive/Non-Recursive flag * * ************************************************************************ -} -- | Recursivity Flag data RecFlag = Recursive | NonRecursive deriving( Eq, Data ) isRec :: RecFlag -> Bool isRec Recursive = True isRec NonRecursive = False isNonRec :: RecFlag -> Bool isNonRec Recursive = False isNonRec NonRecursive = True boolToRecFlag :: Bool -> RecFlag boolToRecFlag True = Recursive boolToRecFlag False = NonRecursive instance Outputable RecFlag where ppr Recursive = text "Recursive" ppr NonRecursive = text "NonRecursive" instance Binary RecFlag where put_ bh Recursive = putByte bh 0 put_ bh NonRecursive = putByte bh 1 get bh = do h <- getByte bh case h of 0 -> return Recursive _ -> return NonRecursive {- ************************************************************************ * * Code origin * * ************************************************************************ -} -- | Was this piece of code user-written or generated by the compiler? -- -- See Note [Generated code and pattern-match checking]. data Origin = FromSource | Generated GenReason DoPmc deriving( Eq, Data ) isGenerated :: Origin -> Bool isGenerated Generated{} = True isGenerated FromSource = False -- | This metadata stores the information as to why was the piece of code generated -- It is useful for generating the right error context -- See Part 3 in Note [Expanding HsDo with XXExprGhcRn] in `GHC.Tc.Gen.Do` data GenReason = DoExpansion HsDoFlavour | OtherExpansion deriving (Eq, Data) instance Outputable GenReason where ppr DoExpansion{} = text "DoExpansion" ppr OtherExpansion = text "OtherExpansion" doExpansionFlavour :: Origin -> Maybe HsDoFlavour doExpansionFlavour (Generated (DoExpansion f) _) = Just f doExpansionFlavour _ = Nothing -- See Part 3 in Note [Expanding HsDo with XXExprGhcRn] in `GHC.Tc.Gen.Do` isDoExpansionGenerated :: Origin -> Bool isDoExpansionGenerated = isJust . doExpansionFlavour -- See Part 3 in Note [Expanding HsDo with XXExprGhcRn] in `GHC.Tc.Gen.Do` doExpansionOrigin :: HsDoFlavour -> Origin doExpansionOrigin f = Generated (DoExpansion f) DoPmc -- It is important that we perfrom PMC -- on the expressions generated by do statements -- to get the right pattern match checker warnings -- See `GHC.HsToCore.Pmc.pmcMatches` instance Outputable Origin where ppr FromSource = text "FromSource" ppr (Generated reason pmc) = text "Generated" <+> ppr reason <+> ppr pmc -- | Whether to run pattern-match checks in generated code. -- -- See Note [Generated code and pattern-match checking]. data DoPmc = SkipPmc | DoPmc deriving( Eq, Data ) instance Outputable DoPmc where ppr SkipPmc = text "SkipPmc" ppr DoPmc = text "DoPmc" -- | Does this 'Origin' require us to run pattern-match checking, -- or should we skip these checks? -- -- See Note [Generated code and pattern-match checking]. requiresPMC :: Origin -> Bool requiresPMC (Generated _ SkipPmc) = False requiresPMC _ = True {- Note [Generated code and pattern-match checking] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Some parts of the compiler generate code that is then typechecked. For example: - the XXExprGhcRn mechanism described in Note [Rebindable syntax and XXExprGhcRn] in GHC.Hs.Expr, - the deriving mechanism. It is usually the case that we want to avoid generating error messages that refer to generated code. The way this is handled is that we mark certain parts of the AST as being generated (using the Origin datatype); this is then used to set the tcl_in_gen_code flag in TcLclEnv, as explained in Note [Error contexts in generated code] in GHC.Tc.Utils.Monad. Being in generated code is usually taken to mean we should also skip doing pattern-match checking, but not always. For example, when desugaring a record update (as described in Note [Record Updates] in GHC.Tc.Gen.Expr), we still want to do pattern-match checking, in order to report incomplete record updates (failing to do so lead to #23250). So, for a 'Generated' 'Origin', we keep track of whether we should do pattern-match checks; see the calls of the requiresPMC function (e.g. isMatchContextPmChecked and needToRunPmCheck in GHC.HsToCore.Pmc.Utils). -} {- ************************************************************************ * * Instance overlap flag * * ************************************************************************ -} -- | The semantics allowed for overlapping instances for a particular -- instance. See Note [Safe Haskell isSafeOverlap] in GHC.Core.InstEnv for a -- explanation of the `isSafeOverlap` field. -- data OverlapFlag = OverlapFlag { overlapMode :: OverlapMode , isSafeOverlap :: Bool } deriving (Eq, Data) setOverlapModeMaybe :: OverlapFlag -> Maybe OverlapMode -> OverlapFlag setOverlapModeMaybe f Nothing = f setOverlapModeMaybe f (Just m) = f { overlapMode = m } hasIncoherentFlag :: OverlapMode -> Bool hasIncoherentFlag mode = case mode of Incoherent _ -> True NonCanonical _ -> True _ -> False hasOverlappableFlag :: OverlapMode -> Bool hasOverlappableFlag mode = case mode of Overlappable _ -> True Overlaps _ -> True Incoherent _ -> True NonCanonical _ -> True _ -> False hasOverlappingFlag :: OverlapMode -> Bool hasOverlappingFlag mode = case mode of Overlapping _ -> True Overlaps _ -> True Incoherent _ -> True NonCanonical _ -> True _ -> False hasNonCanonicalFlag :: OverlapMode -> Bool hasNonCanonicalFlag = \case NonCanonical{} -> True _ -> False data OverlapMode -- See Note [Rules for instance lookup] in GHC.Core.InstEnv = NoOverlap SourceText -- See Note [Pragma source text] -- ^ This instance must not overlap another `NoOverlap` instance. -- However, it may be overlapped by `Overlapping` instances, -- and it may overlap `Overlappable` instances. | Overlappable SourceText -- See Note [Pragma source text] -- ^ Silently ignore this instance if you find a -- more specific one that matches the constraint -- you are trying to resolve -- -- Example: constraint (Foo [Int]) -- instance Foo [Int] -- instance {-# OVERLAPPABLE #-} Foo [a] -- -- Since the second instance has the Overlappable flag, -- the first instance will be chosen (otherwise -- its ambiguous which to choose) | Overlapping SourceText -- See Note [Pragma source text] -- ^ Silently ignore any more general instances that may be -- used to solve the constraint. -- -- Example: constraint (Foo [Int]) -- instance {-# OVERLAPPING #-} Foo [Int] -- instance Foo [a] -- -- Since the first instance has the Overlapping flag, -- the second---more general---instance will be ignored (otherwise -- it is ambiguous which to choose) | Overlaps SourceText -- See Note [Pragma source text] -- ^ Equivalent to having both `Overlapping` and `Overlappable` flags. | Incoherent SourceText -- See Note [Pragma source text] -- ^ Behave like Overlappable and Overlapping, and in addition pick -- an arbitrary one if there are multiple matching candidates, and -- don't worry about later instantiation -- -- Example: constraint (Foo [b]) -- instance {-# INCOHERENT -} Foo [Int] -- instance Foo [a] -- Without the Incoherent flag, we'd complain that -- instantiating 'b' would change which instance -- was chosen. See also Note [Incoherent instances] in "GHC.Core.InstEnv" | NonCanonical SourceText -- ^ Behave like Incoherent, but the instance choice is observable -- by the program behaviour. See Note [Coherence and specialisation: overview]. -- -- We don't have surface syntax for the distinction between -- Incoherent and NonCanonical instances; instead, the flag -- `-f{no-}specialise-incoherents` (on by default) controls -- whether `INCOHERENT` instances are regarded as Incoherent or -- NonCanonical. deriving (Eq, Data) instance Outputable OverlapFlag where ppr flag = ppr (overlapMode flag) <+> pprSafeOverlap (isSafeOverlap flag) instance Outputable OverlapMode where ppr (NoOverlap _) = empty ppr (Overlappable _) = text "[overlappable]" ppr (Overlapping _) = text "[overlapping]" ppr (Overlaps _) = text "[overlap ok]" ppr (Incoherent _) = text "[incoherent]" ppr (NonCanonical _) = text "[noncanonical]" instance Binary OverlapMode where put_ bh (NoOverlap s) = putByte bh 0 >> put_ bh s put_ bh (Overlaps s) = putByte bh 1 >> put_ bh s put_ bh (Incoherent s) = putByte bh 2 >> put_ bh s put_ bh (Overlapping s) = putByte bh 3 >> put_ bh s put_ bh (Overlappable s) = putByte bh 4 >> put_ bh s put_ bh (NonCanonical s) = putByte bh 5 >> put_ bh s get bh = do h <- getByte bh case h of 0 -> (get bh) >>= \s -> return $ NoOverlap s 1 -> (get bh) >>= \s -> return $ Overlaps s 2 -> (get bh) >>= \s -> return $ Incoherent s 3 -> (get bh) >>= \s -> return $ Overlapping s 4 -> (get bh) >>= \s -> return $ Overlappable s 5 -> (get bh) >>= \s -> return $ NonCanonical s _ -> panic ("get OverlapMode" ++ show h) instance Binary OverlapFlag where put_ bh flag = do put_ bh (overlapMode flag) put_ bh (isSafeOverlap flag) get bh = do h <- get bh b <- get bh return OverlapFlag { overlapMode = h, isSafeOverlap = b } pprSafeOverlap :: Bool -> SDoc pprSafeOverlap True = text "[safe]" pprSafeOverlap False = empty {- ************************************************************************ * * Precedence * * ************************************************************************ -} -- | A general-purpose pretty-printing precedence type. newtype PprPrec = PprPrec Int deriving (Eq, Ord, Show) -- See Note [Precedence in types] topPrec, sigPrec, funPrec, opPrec, starPrec, appPrec, maxPrec :: PprPrec topPrec = PprPrec 0 -- No parens sigPrec = PprPrec 1 -- Explicit type signatures funPrec = PprPrec 2 -- Function args; no parens for constructor apps -- See [Type operator precedence] for why both -- funPrec and opPrec exist. opPrec = PprPrec 2 -- Infix operator starPrec = PprPrec 3 -- Star syntax for the type of types, i.e. the * in (* -> *) -- See Note [Star kind precedence] appPrec = PprPrec 4 -- Constructor args; no parens for atomic maxPrec = appPrec -- Maximum precendence maybeParen :: PprPrec -> PprPrec -> SDoc -> SDoc maybeParen ctxt_prec inner_prec pretty | ctxt_prec < inner_prec = pretty | otherwise = parens pretty {- Note [Precedence in types] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Many pretty-printing functions have type ppr_ty :: PprPrec -> Type -> SDoc The PprPrec gives the binding strength of the context. For example, in T ty1 ty2 we will pretty-print 'ty1' and 'ty2' with the call (ppr_ty appPrec ty) to indicate that the context is that of an argument of a TyConApp. We use this consistently for Type and HsType. Note [Type operator precedence] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We don't keep the fixity of type operators in the operator. So the pretty printer follows the following precedence order: TyConPrec Type constructor application TyOpPrec/FunPrec Operator application and function arrow We have funPrec and opPrec to represent the precedence of function arrow and type operators respectively, but currently we implement funPrec == opPrec, so that we don't distinguish the two. Reason: it's hard to parse a type like a ~ b => c * d -> e - f By treating opPrec = funPrec we end up with more parens (a ~ b) => (c * d) -> (e - f) But the two are different constructors of PprPrec so we could make (->) bind more or less tightly if we wanted. Note [Star kind precedence] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ We parenthesize the (*) kind to avoid two issues: 1. Printing invalid or incorrect code. For example, instead of type F @(*) x = x GHC used to print type F @* x = x However, (@*) is a type operator, not a kind application. 2. Printing kinds that are correct but hard to read. Should Either * Int be read as Either (*) Int or as (*) Either Int ? This depends on whether -XStarIsType is enabled, but it would be easier if we didn't have to check for the flag when reading the code. At the same time, we cannot parenthesize (*) blindly. Consider this Haskell98 kind: ((* -> *) -> *) -> * With parentheses, it is less readable: (((*) -> (*)) -> (*)) -> (*) The solution is to assign a special precedence to (*), 'starPrec', which is higher than 'funPrec' but lower than 'appPrec': F * * * becomes F (*) (*) (*) F A * B becomes F A (*) B Proxy * becomes Proxy (*) a * -> * becomes a (*) -> * -} {- ************************************************************************ * * Tuples * * ************************************************************************ -} data TupleSort = BoxedTuple | UnboxedTuple | ConstraintTuple deriving( Eq, Data, Ord ) instance Outputable TupleSort where ppr ts = text $ case ts of BoxedTuple -> "BoxedTuple" UnboxedTuple -> "UnboxedTuple" ConstraintTuple -> "ConstraintTuple" instance Binary TupleSort where put_ bh BoxedTuple = putByte bh 0 put_ bh UnboxedTuple = putByte bh 1 put_ bh ConstraintTuple = putByte bh 2 get bh = do h <- getByte bh case h of 0 -> return BoxedTuple 1 -> return UnboxedTuple _ -> return ConstraintTuple tupleSortBoxity :: TupleSort -> Boxity tupleSortBoxity BoxedTuple = Boxed tupleSortBoxity UnboxedTuple = Unboxed tupleSortBoxity ConstraintTuple = Boxed boxityTupleSort :: Boxity -> TupleSort boxityTupleSort Boxed = BoxedTuple boxityTupleSort Unboxed = UnboxedTuple tupleParens :: TupleSort -> SDoc -> SDoc tupleParens BoxedTuple p = parens p tupleParens UnboxedTuple p = text "(#" <+> p <+> text "#)" tupleParens ConstraintTuple p -- In debug-style write (% Eq a, Ord b %) = ifPprDebug (text "(%" <+> p <+> text "%)") (parens p) {- ************************************************************************ * * Sums * * ************************************************************************ -} sumParens :: SDoc -> SDoc sumParens p = text "(#" <+> p <+> text "#)" -- | Pretty print an alternative in an unboxed sum e.g. "| a | |". pprAlternative :: (a -> SDoc) -- ^ The pretty printing function to use -> a -- ^ The things to be pretty printed -> ConTag -- ^ Alternative (one-based) -> Arity -- ^ Arity -> SDoc -- ^ 'SDoc' where the alternative havs been pretty -- printed and finally packed into a paragraph. pprAlternative pp x alt arity = fsep (replicate (alt - 1) vbar ++ [pp x] ++ replicate (arity - alt) vbar) -- | Are we dealing with an unboxed tuple or an unboxed sum? -- -- Used when validity checking, see 'check_ubx_tuple_or_sum'. data UnboxedTupleOrSum = UnboxedTupleType | UnboxedSumType deriving Eq instance Outputable UnboxedTupleOrSum where ppr UnboxedTupleType = text "UnboxedTupleType" ppr UnboxedSumType = text "UnboxedSumType" unboxedTupleOrSumExtension :: UnboxedTupleOrSum -> LangExt.Extension unboxedTupleOrSumExtension UnboxedTupleType = LangExt.UnboxedTuples unboxedTupleOrSumExtension UnboxedSumType = LangExt.UnboxedSums {- ************************************************************************ * * \subsection[Generic]{Generic flag} * * ************************************************************************ This is the "Embedding-Projection pair" datatype, it contains two pieces of code (normally either RenamedExpr's or Id's) If we have a such a pair (EP from to), the idea is that 'from' and 'to' represents functions of type from :: T -> Tring to :: Tring -> T And we should have to (from x) = x T and Tring are arbitrary, but typically T is the 'main' type while Tring is the 'representation' type. (This just helps us remember whether to use 'from' or 'to'. -} -- | Embedding Projection pair data EP a = EP { fromEP :: a, -- :: T -> Tring toEP :: a } -- :: Tring -> T {- Embedding-projection pairs are used in several places: First of all, each type constructor has an EP associated with it, the code in EP converts (datatype T) from T to Tring and back again. Secondly, when we are filling in Generic methods (in the typechecker, tcMethodBinds), we are constructing bimaps by induction on the structure of the type of the method signature. ************************************************************************ * * \subsection{Occurrence information} * * ************************************************************************ Note [OccInfo] ~~~~~~~~~~~~~ The OccInfo data type is used exclusively by the simplifier, but it appears in a SubstResult, which is currently defined in GHC.Types.Var.Env, which is pretty near the base of the module hierarchy. So it seemed simpler to put the defn of OccInfo here, safely at the bottom. Note that `OneOcc` doesn't meant that it occurs /syntactially/ only once; it means that it is /used/ only once. It might occur syntactically many times. For example, in (case x of A -> y; B -> y; C -> True), * `y` is used only once * but it occurs syntactically twice -} -- | identifier Occurrence Information data OccInfo -- See Note [OccInfo] = ManyOccs { occ_tail :: !TailCallInfo } -- ^ There are many occurrences, or unknown occurrences | IAmDead -- ^ Marks unused variables. Sometimes useful for -- lambda and case-bound variables. | OneOcc { occ_in_lam :: !InsideLam , occ_n_br :: {-# UNPACK #-} !BranchCount , occ_int_cxt :: !InterestingCxt , occ_tail :: !TailCallInfo } -- ^ Occurs exactly once (per branch), not inside a rule -- | This identifier breaks a loop of mutually recursive functions. The field -- marks whether it is only a loop breaker due to a reference in a rule | IAmALoopBreaker { occ_rules_only :: !RulesOnly , occ_tail :: !TailCallInfo } -- Note [LoopBreaker OccInfo] deriving (Eq) type RulesOnly = Bool type BranchCount = Int -- For OneOcc, the BranchCount says how many syntactic occurrences there are -- At the moment we really only check for 1 or >1, but in principle -- we could pay attention to how *many* occurrences there are -- (notably in postInlineUnconditionally). -- But meanwhile, Ints are very efficiently represented. oneBranch :: BranchCount oneBranch = 1 {- Note [LoopBreaker OccInfo] ~~~~~~~~~~~~~~~~~~~~~~~~~~ IAmALoopBreaker True <=> A "weak" or rules-only loop breaker Do not preInlineUnconditionally IAmALoopBreaker False <=> A "strong" loop breaker Do not inline at all See OccurAnal Note [Weak loop breakers] -} noOccInfo :: OccInfo noOccInfo = ManyOccs { occ_tail = NoTailCallInfo } isNoOccInfo :: OccInfo -> Bool isNoOccInfo ManyOccs { occ_tail = NoTailCallInfo } = True isNoOccInfo _ = False isManyOccs :: OccInfo -> Bool isManyOccs ManyOccs{} = True isManyOccs _ = False seqOccInfo :: OccInfo -> () seqOccInfo occ = occ `seq` () ----------------- -- | Interesting Context data InterestingCxt = IsInteresting -- ^ Function: is applied -- Data value: scrutinised by a case with at least one non-DEFAULT branch | NotInteresting deriving (Eq) -- | If there is any 'interesting' identifier occurrence, then the -- aggregated occurrence info of that identifier is considered interesting. instance Semi.Semigroup InterestingCxt where NotInteresting <> x = x IsInteresting <> _ = IsInteresting instance Monoid InterestingCxt where mempty = NotInteresting mappend = (Semi.<>) ----------------- -- | Inside Lambda data InsideLam = IsInsideLam -- ^ Occurs inside a non-linear lambda -- Substituting a redex for this occurrence is -- dangerous because it might duplicate work. | NotInsideLam deriving (Eq) -- | If any occurrence of an identifier is inside a lambda, then the -- occurrence info of that identifier marks it as occurring inside a lambda instance Semi.Semigroup InsideLam where NotInsideLam <> x = x IsInsideLam <> _ = IsInsideLam instance Monoid InsideLam where mempty = NotInsideLam mappend = (Semi.<>) ----------------- data TailCallInfo = AlwaysTailCalled {-# UNPACK #-} !JoinArity -- See Note [TailCallInfo] | NoTailCallInfo deriving (Eq) tailCallInfo :: OccInfo -> TailCallInfo tailCallInfo IAmDead = NoTailCallInfo tailCallInfo other = occ_tail other zapOccTailCallInfo :: OccInfo -> OccInfo zapOccTailCallInfo IAmDead = IAmDead zapOccTailCallInfo occ = occ { occ_tail = NoTailCallInfo } isAlwaysTailCalled :: OccInfo -> Bool isAlwaysTailCalled occ = case tailCallInfo occ of AlwaysTailCalled{} -> True NoTailCallInfo -> False instance Outputable TailCallInfo where ppr (AlwaysTailCalled ar) = sep [ text "Tail", int ar ] ppr _ = empty ----------------- strongLoopBreaker, weakLoopBreaker :: OccInfo strongLoopBreaker = IAmALoopBreaker False NoTailCallInfo weakLoopBreaker = IAmALoopBreaker True NoTailCallInfo isWeakLoopBreaker :: OccInfo -> Bool isWeakLoopBreaker (IAmALoopBreaker{}) = True isWeakLoopBreaker _ = False isStrongLoopBreaker :: OccInfo -> Bool isStrongLoopBreaker (IAmALoopBreaker { occ_rules_only = False }) = True -- Loop-breaker that breaks a non-rule cycle isStrongLoopBreaker _ = False isDeadOcc :: OccInfo -> Bool isDeadOcc IAmDead = True isDeadOcc _ = False isOneOcc :: OccInfo -> Bool isOneOcc (OneOcc {}) = True isOneOcc _ = False zapFragileOcc :: OccInfo -> OccInfo -- Keep only the most robust data: deadness, loop-breaker-hood zapFragileOcc (OneOcc {}) = noOccInfo zapFragileOcc occ = zapOccTailCallInfo occ instance Outputable OccInfo where -- only used for debugging; never parsed. KSW 1999-07 ppr (ManyOccs tails) = pprShortTailCallInfo tails ppr IAmDead = text "Dead" ppr (IAmALoopBreaker rule_only tails) = text "LoopBreaker" <> pp_ro <> pprShortTailCallInfo tails where pp_ro | rule_only = char '!' | otherwise = empty ppr (OneOcc inside_lam one_branch int_cxt tail_info) = text "Once" <> pp_lam inside_lam <> ppr one_branch <> pp_args int_cxt <> pp_tail where pp_lam IsInsideLam = char 'L' pp_lam NotInsideLam = empty pp_args IsInteresting = char '!' pp_args NotInteresting = empty pp_tail = pprShortTailCallInfo tail_info pprShortTailCallInfo :: TailCallInfo -> SDoc pprShortTailCallInfo (AlwaysTailCalled ar) = char 'T' <> brackets (int ar) pprShortTailCallInfo NoTailCallInfo = empty {- Note [TailCallInfo] ~~~~~~~~~~~~~~~~~~~ The occurrence analyser determines what can be made into a join point, but it doesn't change the binder into a JoinId because then it would be inconsistent with the occurrences. Thus it's left to the simplifier (or to simpleOptExpr) to change the IdDetails. The AlwaysTailCalled marker actually means slightly more than simply that the function is always tail-called. See Note [Invariants on join points]. This info is quite fragile and should not be relied upon unless the occurrence analyser has *just* run. Use 'Id.idJoinPointHood' for the permanent state of the join-point-hood of a binder; a join id itself will not be marked AlwaysTailCalled. Note that there is a 'TailCallInfo' on a 'ManyOccs' value. One might expect that being tail-called would mean that the variable could only appear once per branch (thus getting a `OneOcc { }` occurrence info), but a join point can also be invoked from other join points, not just from case branches: let j1 x = ... j2 y = ... j1 z {- tail call -} ... in case w of A -> j1 v B -> j2 u C -> j2 q Here both 'j1' and 'j2' will get marked AlwaysTailCalled, but j1 will get ManyOccs and j2 will get `OneOcc { occ_n_br = 2 }`. ************************************************************************ * * Default method specification * * ************************************************************************ The DefMethSpec enumeration just indicates what sort of default method is used for a class. It is generated from source code, and present in interface files; it is converted to Class.DefMethInfo before begin put in a Class object. -} -- | Default Method Specification data DefMethSpec ty = VanillaDM -- Default method given with polymorphic code | GenericDM ty -- Default method given with code of this type instance Outputable (DefMethSpec ty) where ppr VanillaDM = text "{- Has default method -}" ppr (GenericDM {}) = text "{- Has generic default method -}" {- ************************************************************************ * * \subsection{Success flag} * * ************************************************************************ -} data SuccessFlag = Succeeded | Failed instance Semigroup SuccessFlag where Failed <> _ = Failed _ <> Failed = Failed _ <> _ = Succeeded instance Outputable SuccessFlag where ppr Succeeded = text "Succeeded" ppr Failed = text "Failed" successIf :: Bool -> SuccessFlag successIf True = Succeeded successIf False = Failed succeeded, failed :: SuccessFlag -> Bool succeeded Succeeded = True succeeded Failed = False failed Succeeded = False failed Failed = True {- ************************************************************************ * * \subsection{Activation} * * ************************************************************************ When a rule or inlining is active Note [Compiler phases] ~~~~~~~~~~~~~~~~~~~~~~ The CompilerPhase says which phase the simplifier is running in: * InitialPhase: before all user-visible phases * Phase 2,1,0: user-visible phases; the phase number controls rule ordering an inlining. * FinalPhase: used for all subsequent simplifier runs. By delaying inlining of wrappers to FinalPhase we can ensure that RULE have a good chance to fire. See Note [Wrapper activation] in GHC.Core.Opt.WorkWrap NB: FinalPhase is run repeatedly, not just once. NB: users don't have access to InitialPhase or FinalPhase. They write {-# INLINE[n] f #-}, meaning (Phase n) The phase sequencing is done by GHC.Opt.Simplify.Driver -} -- | Phase Number type PhaseNum = Int -- Compilation phase -- Phases decrease towards zero -- Zero is the last phase data CompilerPhase = InitialPhase -- The first phase -- number = infinity! | Phase PhaseNum -- User-specificable phases | FinalPhase -- The last phase -- number = -infinity! deriving Eq instance Outputable CompilerPhase where ppr (Phase n) = int n ppr InitialPhase = text "InitialPhase" ppr FinalPhase = text "FinalPhase" -- See Note [Pragma source text] data Activation = AlwaysActive | ActiveBefore SourceText PhaseNum -- Active only *strictly before* this phase | ActiveAfter SourceText PhaseNum -- Active in this phase and later | FinalActive -- Active in final phase only | NeverActive deriving( Eq, Data ) -- Eq used in comparing rules in GHC.Hs.Decls beginPhase :: Activation -> CompilerPhase -- First phase in which the Activation is active -- or FinalPhase if it is never active beginPhase AlwaysActive = InitialPhase beginPhase (ActiveBefore {}) = InitialPhase beginPhase (ActiveAfter _ n) = Phase n beginPhase FinalActive = FinalPhase beginPhase NeverActive = FinalPhase activeAfter :: CompilerPhase -> Activation -- (activeAfter p) makes an Activation that is active in phase p and after -- Invariant: beginPhase (activeAfter p) = p activeAfter InitialPhase = AlwaysActive activeAfter (Phase n) = ActiveAfter NoSourceText n activeAfter FinalPhase = FinalActive nextPhase :: CompilerPhase -> CompilerPhase -- Tells you the next phase after this one -- Currently we have just phases [2,1,0,FinalPhase,FinalPhase,...] -- Where FinalPhase means GHC's internal simplification steps -- after all rules have run nextPhase InitialPhase = Phase 2 nextPhase (Phase 0) = FinalPhase nextPhase (Phase n) = Phase (n-1) nextPhase FinalPhase = FinalPhase laterPhase :: CompilerPhase -> CompilerPhase -> CompilerPhase -- Returns the later of two phases laterPhase (Phase n1) (Phase n2) = Phase (n1 `min` n2) laterPhase InitialPhase p2 = p2 laterPhase FinalPhase _ = FinalPhase laterPhase p1 InitialPhase = p1 laterPhase _ FinalPhase = FinalPhase activateAfterInitial :: Activation -- Active in the first phase after the initial phase activateAfterInitial = activeAfter (nextPhase InitialPhase) activateDuringFinal :: Activation -- Active in the final simplification phase (which is repeated) activateDuringFinal = FinalActive isActive :: CompilerPhase -> Activation -> Bool isActive InitialPhase act = activeInInitialPhase act isActive (Phase p) act = activeInPhase p act isActive FinalPhase act = activeInFinalPhase act activeInInitialPhase :: Activation -> Bool activeInInitialPhase AlwaysActive = True activeInInitialPhase (ActiveBefore {}) = True activeInInitialPhase _ = False activeInPhase :: PhaseNum -> Activation -> Bool activeInPhase _ AlwaysActive = True activeInPhase _ NeverActive = False activeInPhase _ FinalActive = False activeInPhase p (ActiveAfter _ n) = p <= n activeInPhase p (ActiveBefore _ n) = p > n activeInFinalPhase :: Activation -> Bool activeInFinalPhase AlwaysActive = True activeInFinalPhase FinalActive = True activeInFinalPhase (ActiveAfter {}) = True activeInFinalPhase _ = False isNeverActive, isAlwaysActive :: Activation -> Bool isNeverActive NeverActive = True isNeverActive _ = False isAlwaysActive AlwaysActive = True isAlwaysActive _ = False competesWith :: Activation -> Activation -> Bool -- See Note [Competing activations] competesWith AlwaysActive _ = True competesWith NeverActive _ = False competesWith _ NeverActive = False competesWith FinalActive FinalActive = True competesWith FinalActive _ = False competesWith (ActiveBefore {}) AlwaysActive = True competesWith (ActiveBefore {}) FinalActive = False competesWith (ActiveBefore {}) (ActiveBefore {}) = True competesWith (ActiveBefore _ a) (ActiveAfter _ b) = a < b competesWith (ActiveAfter {}) AlwaysActive = False competesWith (ActiveAfter {}) FinalActive = True competesWith (ActiveAfter {}) (ActiveBefore {}) = False competesWith (ActiveAfter _ a) (ActiveAfter _ b) = a >= b {- Note [Competing activations] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Sometimes a RULE and an inlining may compete, or two RULES. See Note [Rules and inlining/other rules] in GHC.HsToCore. We say that act1 "competes with" act2 iff act1 is active in the phase when act2 *becomes* active NB: remember that phases count *down*: 2, 1, 0! It's too conservative to ensure that the two are never simultaneously active. For example, a rule might be always active, and an inlining might switch on in phase 2. We could switch off the rule, but it does no harm. -} {- ********************************************************************* * * InlinePragma, InlineSpec, RuleMatchInfo * * ********************************************************************* -} data InlinePragma -- Note [InlinePragma] = InlinePragma { inl_src :: SourceText -- See Note [Pragma source text] , inl_inline :: InlineSpec -- See Note [inl_inline and inl_act] , inl_sat :: Maybe Arity -- Just n <=> Inline only when applied to n -- explicit (non-type, non-dictionary) args -- That is, inl_sat describes the number of *source-code* -- arguments the thing must be applied to. We add on the -- number of implicit, dictionary arguments when making -- the Unfolding, and don't look at inl_sat further , inl_act :: Activation -- Says during which phases inlining is allowed -- See Note [inl_inline and inl_act] , inl_rule :: RuleMatchInfo -- Should the function be treated like a constructor? } deriving( Eq, Data ) -- | Rule Match Information data RuleMatchInfo = ConLike -- See Note [CONLIKE pragma] | FunLike deriving( Eq, Data, Show ) -- Show needed for GHC.Parser.Lexer -- | Inline Specification data InlineSpec -- What the user's INLINE pragma looked like = Inline SourceText -- User wrote INLINE | Inlinable SourceText -- User wrote INLINABLE | NoInline SourceText -- User wrote NOINLINE | Opaque SourceText -- User wrote OPAQUE -- Each of the above keywords is accompanied with -- a string of type SourceText written by the user | NoUserInlinePrag -- User did not write any of INLINE/INLINABLE/NOINLINE -- e.g. in `defaultInlinePragma` or when created by CSE deriving( Eq, Data, Show ) -- Show needed for GHC.Parser.Lexer {- Note [InlinePragma] ~~~~~~~~~~~~~~~~~~~~~~ This data type mirrors what you can write in an INLINE or NOINLINE pragma in the source program. If you write nothing at all, you get defaultInlinePragma: inl_inline = NoUserInlinePrag inl_act = AlwaysActive inl_rule = FunLike It's not possible to get that combination by *writing* something, so if an Id has defaultInlinePragma it means the user didn't specify anything. If inl_inline = Inline or Inlineable, then the Id should have a stable unfolding. If you want to know where InlinePragmas take effect: Look in GHC.HsToCore.Binds.makeCorePair Note [inl_inline and inl_act] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * inl_inline says what the user wrote: did they say INLINE, NOINLINE, INLINABLE, OPAQUE, or nothing at all * inl_act says in what phases the unfolding is active or inactive E.g If you write INLINE[1] then inl_act will be set to ActiveAfter 1 If you write NOINLINE[1] then inl_act will be set to ActiveBefore 1 If you write NOINLINE[~1] then inl_act will be set to ActiveAfter 1 So note that inl_act does not say what pragma you wrote: it just expresses its consequences * inl_act just says when the unfolding is active; it doesn't say what to inline. If you say INLINE f, then f's inl_act will be AlwaysActive, but in addition f will get a "stable unfolding" with UnfoldingGuidance that tells the inliner to be pretty eager about it. Note [CONLIKE pragma] ~~~~~~~~~~~~~~~~~~~~~ The ConLike constructor of a RuleMatchInfo is aimed at the following. Consider first {-# RULE "r/cons" forall a as. r (a:as) = f (a+1) #-} g b bs = let x = b:bs in ..x...x...(r x)... Now, the rule applies to the (r x) term, because GHC "looks through" the definition of 'x' to see that it is (b:bs). Now consider {-# RULE "r/f" forall v. r (f v) = f (v+1) #-} g v = let x = f v in ..x...x...(r x)... Normally the (r x) would *not* match the rule, because GHC would be scared about duplicating the redex (f v), so it does not "look through" the bindings. However the CONLIKE modifier says to treat 'f' like a constructor in this situation, and "look through" the unfolding for x. So (r x) fires, yielding (f (v+1)). This is all controlled with a user-visible pragma: {-# NOINLINE CONLIKE [1] f #-} The main effects of CONLIKE are: - The occurrence analyser (OccAnal) and simplifier (Simplify) treat CONLIKE thing like constructors, by ANF-ing them - New function GHC.Core.Utils.exprIsExpandable is like exprIsCheap, but additionally spots applications of CONLIKE functions - A CoreUnfolding has a field that caches exprIsExpandable - The rule matcher consults this field. See Note [Expanding variables] in GHC.Core.Rules. Note [OPAQUE pragma] ~~~~~~~~~~~~~~~~~~~~ Suppose a function `f` is marked {-# OPAQUE f #-}. Then every call of `f` should remain a call of `f` throughout optimisation; it should not be turned into a call of a name-mangled variant of `f` (e.g by worker/wrapper). The motivation for the OPAQUE pragma is discussed in GHC proposal 0415: https://github.com/ghc-proposals/ghc-proposals/blob/master/proposals/0415-opaque-pragma.rst Basically it boils down to the desire of GHC API users and GHC RULE writers for calls to certain binders to be left completely untouched by GHCs optimisations. What this entails at the time of writing, is that for every binder annotated with the OPAQUE pragma we: * Do not do worker/wrapper via cast W/W: See the guard in GHC.Core.Opt.Simplify.tryCastWorkerWrapper * Do not any worker/wrapper after demand/CPR analysis. To that end add a guard in GHC.Core.Opt.WorkWrap.tryWW to disable worker/wrapper * It is important that the demand signature and CPR signature do not lie, else clients of the function will believe that it has the CPR property etc. But it won't, because we've disabled worker/wrapper. To avoid the signatures lying: * Strip boxity information from the demand signature in GHC.Core.Opt.DmdAnal.finaliseArgBoxities See Note [The OPAQUE pragma and avoiding the reboxing of arguments] * Strip CPR information from the CPR signature in GHC.Core.Opt.CprAnal.cprAnalBind See Note [The OPAQUE pragma and avoiding the reboxing of results] * Do create specialised versions of the function in * Specialise: see GHC.Core.Opt.Specialise.specCalls * SpecConstr: see GHC.Core.Opt.SpecConstr.specialise Both are accomplished easily: these passes already skip NOINLINE functions with NeverActive activation, and an OPAQUE function is also NeverActive. At the moment of writing, the major difference between the NOINLINE pragma and the OPAQUE pragma is that binders annoted with the NOINLINE pragma _are_ W/W transformed (see also Note [Worker/wrapper for NOINLINE functions]) where binders annoted with the OPAQUE pragma are _not_ W/W transformed. Future "name-mangling" optimisations should respect the OPAQUE pragma and update the list of moving parts referenced in this note. -} isConLike :: RuleMatchInfo -> Bool isConLike ConLike = True isConLike _ = False isFunLike :: RuleMatchInfo -> Bool isFunLike FunLike = True isFunLike _ = False noUserInlineSpec :: InlineSpec -> Bool noUserInlineSpec NoUserInlinePrag = True noUserInlineSpec _ = False defaultInlinePragma, alwaysInlinePragma, neverInlinePragma, dfunInlinePragma :: InlinePragma defaultInlinePragma = InlinePragma { inl_src = SourceText $ fsLit "{-# INLINE" , inl_act = AlwaysActive , inl_rule = FunLike , inl_inline = NoUserInlinePrag , inl_sat = Nothing } alwaysInlinePragma = defaultInlinePragma { inl_inline = Inline (inlinePragmaSource defaultInlinePragma) } neverInlinePragma = defaultInlinePragma { inl_act = NeverActive } alwaysInlineConLikePragma :: InlinePragma alwaysInlineConLikePragma = alwaysInlinePragma { inl_rule = ConLike } inlinePragmaSpec :: InlinePragma -> InlineSpec inlinePragmaSpec = inl_inline inlinePragmaSource :: InlinePragma -> SourceText inlinePragmaSource prag = case inl_inline prag of Inline x -> x Inlinable y -> y NoInline z -> z Opaque q -> q NoUserInlinePrag -> NoSourceText inlineSpecSource :: InlineSpec -> SourceText inlineSpecSource spec = case spec of Inline x -> x Inlinable y -> y NoInline z -> z Opaque q -> q NoUserInlinePrag -> NoSourceText -- A DFun has an always-active inline activation so that -- exprIsConApp_maybe can "see" its unfolding -- (However, its actual Unfolding is a DFunUnfolding, which is -- never inlined other than via exprIsConApp_maybe.) dfunInlinePragma = defaultInlinePragma { inl_act = AlwaysActive , inl_rule = ConLike } isDefaultInlinePragma :: InlinePragma -> Bool isDefaultInlinePragma (InlinePragma { inl_act = activation , inl_rule = match_info , inl_inline = inline }) = noUserInlineSpec inline && isAlwaysActive activation && isFunLike match_info isInlinePragma :: InlinePragma -> Bool isInlinePragma prag = case inl_inline prag of Inline _ -> True _ -> False isInlinablePragma :: InlinePragma -> Bool isInlinablePragma prag = case inl_inline prag of Inlinable _ -> True _ -> False isNoInlinePragma :: InlinePragma -> Bool isNoInlinePragma prag = case inl_inline prag of NoInline _ -> True _ -> False isAnyInlinePragma :: InlinePragma -> Bool -- INLINE or INLINABLE isAnyInlinePragma prag = case inl_inline prag of Inline _ -> True Inlinable _ -> True _ -> False isOpaquePragma :: InlinePragma -> Bool isOpaquePragma prag = case inl_inline prag of Opaque _ -> True _ -> False inlinePragmaSat :: InlinePragma -> Maybe Arity inlinePragmaSat = inl_sat inlinePragmaActivation :: InlinePragma -> Activation inlinePragmaActivation (InlinePragma { inl_act = activation }) = activation inlinePragmaRuleMatchInfo :: InlinePragma -> RuleMatchInfo inlinePragmaRuleMatchInfo (InlinePragma { inl_rule = info }) = info setInlinePragmaActivation :: InlinePragma -> Activation -> InlinePragma setInlinePragmaActivation prag activation = prag { inl_act = activation } setInlinePragmaRuleMatchInfo :: InlinePragma -> RuleMatchInfo -> InlinePragma setInlinePragmaRuleMatchInfo prag info = prag { inl_rule = info } instance Outputable Activation where ppr AlwaysActive = empty ppr NeverActive = brackets (text "~") ppr (ActiveBefore _ n) = brackets (char '~' <> int n) ppr (ActiveAfter _ n) = brackets (int n) ppr FinalActive = text "[final]" instance Binary Activation where put_ bh NeverActive = putByte bh 0 put_ bh FinalActive = putByte bh 1 put_ bh AlwaysActive = putByte bh 2 put_ bh (ActiveBefore src aa) = do putByte bh 3 put_ bh src put_ bh aa put_ bh (ActiveAfter src ab) = do putByte bh 4 put_ bh src put_ bh ab get bh = do h <- getByte bh case h of 0 -> return NeverActive 1 -> return FinalActive 2 -> return AlwaysActive 3 -> do src <- get bh aa <- get bh return (ActiveBefore src aa) _ -> do src <- get bh ab <- get bh return (ActiveAfter src ab) instance Outputable RuleMatchInfo where ppr ConLike = text "CONLIKE" ppr FunLike = text "FUNLIKE" instance Binary RuleMatchInfo where put_ bh FunLike = putByte bh 0 put_ bh ConLike = putByte bh 1 get bh = do h <- getByte bh if h == 1 then return ConLike else return FunLike instance Outputable InlineSpec where ppr (Inline src) = text "INLINE" <+> pprWithSourceText src empty ppr (NoInline src) = text "NOINLINE" <+> pprWithSourceText src empty ppr (Inlinable src) = text "INLINABLE" <+> pprWithSourceText src empty ppr (Opaque src) = text "OPAQUE" <+> pprWithSourceText src empty ppr NoUserInlinePrag = empty instance Binary InlineSpec where put_ bh NoUserInlinePrag = putByte bh 0 put_ bh (Inline s) = do putByte bh 1 put_ bh s put_ bh (Inlinable s) = do putByte bh 2 put_ bh s put_ bh (NoInline s) = do putByte bh 3 put_ bh s put_ bh (Opaque s) = do putByte bh 4 put_ bh s get bh = do h <- getByte bh case h of 0 -> return NoUserInlinePrag 1 -> do s <- get bh return (Inline s) 2 -> do s <- get bh return (Inlinable s) 3 -> do s <- get bh return (NoInline s) _ -> do s <- get bh return (Opaque s) instance Outputable InlinePragma where ppr = pprInline instance Binary InlinePragma where put_ bh (InlinePragma s a b c d) = do put_ bh s put_ bh a put_ bh b put_ bh c put_ bh d get bh = do s <- get bh a <- get bh b <- get bh c <- get bh d <- get bh return (InlinePragma s a b c d) -- | Outputs string for pragma name for any of INLINE/INLINABLE/NOINLINE. This -- differs from the Outputable instance for the InlineSpec type where the pragma -- name string as well as the accompanying SourceText (if any) is printed. inlinePragmaName :: InlineSpec -> SDoc inlinePragmaName (Inline _) = text "INLINE" inlinePragmaName (Inlinable _) = text "INLINABLE" inlinePragmaName (NoInline _) = text "NOINLINE" inlinePragmaName (Opaque _) = text "OPAQUE" inlinePragmaName NoUserInlinePrag = empty -- | Pretty-print without displaying the user-specified 'InlineSpec'. pprInline :: InlinePragma -> SDoc pprInline = pprInline' True -- | Pretty-print including the user-specified 'InlineSpec'. pprInlineDebug :: InlinePragma -> SDoc pprInlineDebug = pprInline' False pprInline' :: Bool -- True <=> do not display the inl_inline field -> InlinePragma -> SDoc pprInline' emptyInline (InlinePragma { inl_inline = inline, inl_act = activation, inl_rule = info, inl_sat = mb_arity }) = pp_inl inline <> pp_act inline activation <+> pp_sat <+> pp_info where pp_inl x = if emptyInline then empty else inlinePragmaName x pp_act Inline {} AlwaysActive = empty pp_act NoInline {} NeverActive = empty pp_act Opaque {} NeverActive = empty pp_act _ act = ppr act pp_sat | Just ar <- mb_arity = parens (text "sat-args=" <> int ar) | otherwise = empty pp_info | isFunLike info = empty | otherwise = ppr info {- ********************************************************************* * * UnfoldingSource * * ********************************************************************* -} data UnfoldingSource = -- See also Note [Historical note: unfoldings for wrappers] VanillaSrc -- The current rhs of the function -- Replace uf_tmpl each time around -- See Note [Stable unfoldings] in GHC.Core | StableUserSrc -- From a user-specified INLINE or INLINABLE pragma | StableSystemSrc -- From a wrapper, or system-generated unfolding | CompulsorySrc -- Something that *has* no binding, so you *must* inline it -- Only a few primop-like things have this property -- (see "GHC.Types.Id.Make", calls to mkCompulsoryUnfolding). -- Inline absolutely always, however boring the context. isStableUserSource :: UnfoldingSource -> Bool isStableUserSource StableUserSrc = True isStableUserSource _ = False isStableSystemSource :: UnfoldingSource -> Bool isStableSystemSource StableSystemSrc = True isStableSystemSource _ = False isCompulsorySource :: UnfoldingSource -> Bool isCompulsorySource CompulsorySrc = True isCompulsorySource _ = False isStableSource :: UnfoldingSource -> Bool isStableSource CompulsorySrc = True isStableSource StableSystemSrc = True isStableSource StableUserSrc = True isStableSource VanillaSrc = False instance Binary UnfoldingSource where put_ bh CompulsorySrc = putByte bh 0 put_ bh StableUserSrc = putByte bh 1 put_ bh StableSystemSrc = putByte bh 2 put_ bh VanillaSrc = putByte bh 3 get bh = do h <- getByte bh case h of 0 -> return CompulsorySrc 1 -> return StableUserSrc 2 -> return StableSystemSrc _ -> return VanillaSrc instance Outputable UnfoldingSource where ppr CompulsorySrc = text "Compulsory" ppr StableUserSrc = text "StableUser" ppr StableSystemSrc = text "StableSystem" ppr VanillaSrc = text "" {- ************************************************************************ * * IntWithInf * * ************************************************************************ Represents an integer or positive infinity -} -- | An integer or infinity data IntWithInf = Int {-# UNPACK #-} !Int | Infinity deriving Eq -- | A representation of infinity infinity :: IntWithInf infinity = Infinity instance Ord IntWithInf where compare Infinity Infinity = EQ compare (Int _) Infinity = LT compare Infinity (Int _) = GT compare (Int a) (Int b) = a `compare` b instance Outputable IntWithInf where ppr Infinity = char '∞' ppr (Int n) = int n instance Num IntWithInf where (+) = plusWithInf (*) = mulWithInf abs Infinity = Infinity abs (Int n) = Int (abs n) signum Infinity = Int 1 signum (Int n) = Int (signum n) fromInteger = Int . fromInteger (-) = panic "subtracting IntWithInfs" intGtLimit :: Int -> IntWithInf -> Bool intGtLimit _ Infinity = False intGtLimit n (Int m) = n > m -- | Add two 'IntWithInf's plusWithInf :: IntWithInf -> IntWithInf -> IntWithInf plusWithInf Infinity _ = Infinity plusWithInf _ Infinity = Infinity plusWithInf (Int a) (Int b) = Int (a + b) -- | Multiply two 'IntWithInf's mulWithInf :: IntWithInf -> IntWithInf -> IntWithInf mulWithInf Infinity _ = Infinity mulWithInf _ Infinity = Infinity mulWithInf (Int a) (Int b) = Int (a * b) -- | Subtract an 'Int' from an 'IntWithInf' subWithInf :: IntWithInf -> Int -> IntWithInf subWithInf Infinity _ = Infinity subWithInf (Int a) b = Int (a - b) -- | Turn a positive number into an 'IntWithInf', where 0 represents infinity treatZeroAsInf :: Int -> IntWithInf treatZeroAsInf 0 = Infinity treatZeroAsInf n = Int n -- | Inject any integer into an 'IntWithInf' mkIntWithInf :: Int -> IntWithInf mkIntWithInf = Int {- ********************************************************************* * * Types vs Kinds * * ********************************************************************* -} -- | Flag to see whether we're type-checking terms or kind-checking types data TypeOrKind = TypeLevel | KindLevel deriving Eq instance Outputable TypeOrKind where ppr TypeLevel = text "TypeLevel" ppr KindLevel = text "KindLevel" isTypeLevel :: TypeOrKind -> Bool isTypeLevel TypeLevel = True isTypeLevel KindLevel = False isKindLevel :: TypeOrKind -> Bool isKindLevel TypeLevel = False isKindLevel KindLevel = True {- ********************************************************************* * * Levity and TypeOrConstraint * * ********************************************************************* -} {- The types `Levity` and `TypeOrConstraint` are internal to GHC. They have the same shape as the eponymous types in the library ghc-prim:GHC.Types but they aren't the same types -- after all, they are defined in a different module. -} data Levity = Lifted | Unlifted deriving (Data,Eq,Ord,Show) instance Outputable Levity where ppr Lifted = text "Lifted" ppr Unlifted = text "Unlifted" instance Binary Levity where put_ bh = \case Lifted -> putByte bh 0 Unlifted -> putByte bh 1 get bh = getByte bh >>= \case 0 -> pure Lifted _ -> pure Unlifted mightBeLifted :: Maybe Levity -> Bool mightBeLifted (Just Unlifted) = False mightBeLifted _ = True mightBeUnlifted :: Maybe Levity -> Bool mightBeUnlifted (Just Lifted) = False mightBeUnlifted _ = True data TypeOrConstraint = TypeLike | ConstraintLike deriving( Eq, Ord, Data ) {- ********************************************************************* * * TyConFlavour * * ********************************************************************* -} -- | Paints a picture of what a 'TyCon' represents, in broad strokes. -- This is used towards more informative error messages. data TyConFlavour tc = ClassFlavour | TupleFlavour Boxity | SumFlavour | DataTypeFlavour | NewtypeFlavour | AbstractTypeFlavour | OpenFamilyFlavour TypeOrData (Maybe tc) -- Just tc <=> (tc == associated class) | ClosedTypeFamilyFlavour | TypeSynonymFlavour | BuiltInTypeFlavour -- ^ e.g., the @(->)@ 'TyCon'. | PromotedDataConFlavour deriving (Eq, Data, Functor) instance Outputable (TyConFlavour tc) where ppr = text . go where go ClassFlavour = "class" go (TupleFlavour boxed) | isBoxed boxed = "tuple" | otherwise = "unboxed tuple" go SumFlavour = "unboxed sum" go DataTypeFlavour = "data type" go NewtypeFlavour = "newtype" go AbstractTypeFlavour = "abstract type" go (OpenFamilyFlavour type_or_data mb_par) = assoc ++ t_or_d ++ " family" where assoc = if isJust mb_par then "associated " else "" t_or_d = case type_or_data of { IAmType -> "type"; IAmData -> "data" } go ClosedTypeFamilyFlavour = "type family" go TypeSynonymFlavour = "type synonym" go BuiltInTypeFlavour = "built-in type" go PromotedDataConFlavour = "promoted data constructor" instance NFData tc => NFData (TyConFlavour tc) where rnf ClassFlavour = () rnf (TupleFlavour !_) = () rnf SumFlavour = () rnf DataTypeFlavour = () rnf NewtypeFlavour = () rnf AbstractTypeFlavour = () rnf (OpenFamilyFlavour !_ mb_tc) = rnf mb_tc rnf ClosedTypeFamilyFlavour = () rnf TypeSynonymFlavour = () rnf BuiltInTypeFlavour = () rnf PromotedDataConFlavour = () -- | Get the enclosing class TyCon (if there is one) for the given TyConFlavour tyConFlavourAssoc_maybe :: TyConFlavour tc -> Maybe tc tyConFlavourAssoc_maybe (OpenFamilyFlavour _ mb_parent) = mb_parent tyConFlavourAssoc_maybe _ = Nothing -- | Whether something is a type or a data declaration, -- e.g. a type family or a data family. data TypeOrData = IAmData | IAmType deriving (Eq, Data) instance Outputable TypeOrData where ppr IAmData = text "data" ppr IAmType = text "type" {- ********************************************************************* * * Defaulting options * * ********************************************************************* -} {- Note [Type variable defaulting options] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Here is an overview of the current type variable defaulting mechanisms, in the order in which they happen. GHC.Tc.Utils.TcMType.defaultTyVar This is a built-in defaulting mechanism for the following type variables: (1) kind variables with -XNoPolyKinds, (2) type variables of kind 'RuntimeRep' default to 'LiftedRep', of kind 'Levity' to 'Lifted', and of kind 'Multiplicity' to 'Many'. It is used in many situations: - inferring a type (e.g. a declaration with no type signature or a partial type signature), in 'GHC.Tc.Solver.simplifyInfer', - simplifying top-level constraints in 'GHC.Tc.Solver.simplifyTop', - kind checking a CUSK in 'GHC.Tc.Gen.kcCheckDeclHeader_cusk', - 'GHC.Tc.TyCl.generaliseTcTyCon', - type checking type family and data family instances, in 'GHC.Tc.TyCl.tcTyFamInstEqnGuts' and 'GHC.Tc.TyCl.Instance.tcDataFamInstHeader' respectively, - type-checking rules in 'GHC.Tc.Gen.tcRule', - kind generalisation in 'GHC.Tc.Gen.HsType.kindGeneralizeSome' and 'GHC.Tc.Gen.HsType.kindGeneralizeAll'. Different situations call for a different defaulting strategy, so 'defaultTyVar' takes a strategy parameter which determines which type variables to default. Currently, this strategy is set as follows: - Kind variables: - with -XNoPolyKinds, these must be defaulted. This includes kind variables of kind 'RuntimeRep', 'Levity' and 'Multiplicity'. Test case: T20584. - with -XPolyKinds, behave as if they were type variables (see below). - Type variables of kind 'RuntimeRep', 'Levity' or 'Multiplicity' - in type and data families instances, these are not defaulted. Test case: T17536. - otherwise: default variables of these three kinds. This ensures that in a program such as foo :: forall a. a -> a foo x = x we continue to infer `a :: Type`. Note that the strategy is set in two steps: callers of 'defaultTyVars' only specify whether to default type variables of "non-standard" kinds (that is, of kinds 'RuntimeRep'/'Levity'/'Multiplicity'). Then 'defaultTyVars' determines which variables are type variables and which are kind variables, and if the user has asked for -XNoPolyKinds we default the kind variables. GHC.Tc.Solver.defaultTyVarTcS This is a built-in defaulting mechanism that happens after the constraint solver has run, in 'GHC.Tc.Solver.simplifyTopWanteds'. It only defaults type (and kind) variables of kind 'RuntimeRep', 'Levity', 'Multiplicity'. It is not configurable, neither by options nor by the user. GHC.Tc.Solver.applyDefaultingRules This is typeclass defaulting, and includes defaulting plugins. It happens right after 'defaultTyVarTcS' in 'GHC.Tc.Solver.simplifyTopWanteds'. It is user configurable, using default declarations (/plugins). GHC.Iface.Type.defaultIfaceTyVarsOfKind This is a built-in defaulting mechanism that only applies when pretty-printing. It defaults 'RuntimeRep'/'Levity' variables unless -fprint-explicit-runtime-reps is enabled, and 'Multiplicity' variables unless -XLinearTypes is enabled. -} -- | Specify whether to default type variables of kind 'RuntimeRep'/'Levity'/'Multiplicity'. data NonStandardDefaultingStrategy -- | Default type variables of the given kinds: -- -- - default 'RuntimeRep' variables to 'LiftedRep' -- - default 'Levity' variables to 'Lifted' -- - default 'Multiplicity' variables to 'Many' = DefaultNonStandardTyVars -- | Try not to default type variables of the kinds 'RuntimeRep'/'Levity'/'Multiplicity'. -- -- Note that these might get defaulted anyway, if they are kind variables -- and `-XNoPolyKinds` is enabled. | TryNotToDefaultNonStandardTyVars -- | Specify whether to default kind variables, and type variables -- of kind 'RuntimeRep'/'Levity'/'Multiplicity'. data DefaultingStrategy -- | Default kind variables: -- -- - default kind variables of kind 'Type' to 'Type', -- - default 'RuntimeRep'/'Levity'/'Multiplicity' kind variables -- to 'LiftedRep'/'Lifted'/'Many', respectively. -- -- When this strategy is used, it means that we have determined that -- the variables we are considering defaulting are all kind variables. -- -- Usually, we pass this option when -XNoPolyKinds is enabled. = DefaultKindVars -- | Default (or don't default) non-standard variables, of kinds -- 'RuntimeRep', 'Levity' and 'Multiplicity'. | NonStandardDefaulting NonStandardDefaultingStrategy defaultNonStandardTyVars :: DefaultingStrategy -> Bool defaultNonStandardTyVars DefaultKindVars = True defaultNonStandardTyVars (NonStandardDefaulting DefaultNonStandardTyVars) = True defaultNonStandardTyVars (NonStandardDefaulting TryNotToDefaultNonStandardTyVars) = False instance Outputable NonStandardDefaultingStrategy where ppr DefaultNonStandardTyVars = text "DefaultOnlyNonStandardTyVars" ppr TryNotToDefaultNonStandardTyVars = text "TryNotToDefaultNonStandardTyVars" instance Outputable DefaultingStrategy where ppr DefaultKindVars = text "DefaultKindVars" ppr (NonStandardDefaulting ns) = text "NonStandardDefaulting" <+> ppr ns ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/Breakpoint.hs0000644000000000000000000000347007346545000021450 0ustar0000000000000000-- | Breakpoint related types module GHC.Types.Breakpoint ( BreakpointId (..) , InternalBreakpointId (..) , toBreakpointId ) where import GHC.Prelude import GHC.Unit.Module -- | Breakpoint identifier. -- -- See Note [Breakpoint identifiers] data BreakpointId = BreakpointId { bi_tick_mod :: !Module -- ^ Breakpoint tick module , bi_tick_index :: !Int -- ^ Breakpoint tick index } -- | Internal breakpoint identifier -- -- See Note [Breakpoint identifiers] data InternalBreakpointId = InternalBreakpointId { ibi_tick_mod :: !Module -- ^ Breakpoint tick module , ibi_tick_index :: !Int -- ^ Breakpoint tick index , ibi_info_mod :: !Module -- ^ Breakpoint info module , ibi_info_index :: !Int -- ^ Breakpoint info index } toBreakpointId :: InternalBreakpointId -> BreakpointId toBreakpointId ibi = BreakpointId { bi_tick_mod = ibi_tick_mod ibi , bi_tick_index = ibi_tick_index ibi } -- Note [Breakpoint identifiers] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- -- Before optimization a breakpoint is identified uniquely with a tick module -- and a tick index. See BreakpointId. A tick module contains an array, indexed -- with the tick indexes, which indicates breakpoint status. -- -- When we generate ByteCode, we collect information for every breakpoint at -- their *occurrence sites* (see CgBreakInfo in GHC.ByteCode.Types) and these info -- are stored in the ModIface of the occurrence module. Because of inlining, we -- can't reuse the tick index to uniquely identify an occurrence; because of -- cross-module inlining, we can't assume that the occurrence module is the same -- as the tick module (#24712). -- -- So every breakpoint occurrence gets assigned a module-unique *info index* and -- we store it alongside the occurrence module (*info module*) in the -- InternalBreakpointId datatype. ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/CompleteMatch.hs0000644000000000000000000000464207346545000022101 0ustar0000000000000000{-# LANGUAGE DataKinds #-} {-# LANGUAGE TypeApplications #-} {-# LANGUAGE UndecidableInstances #-} -- | COMPLETE signature module GHC.Types.CompleteMatch ( CompleteMatchX(..) , CompleteMatch, CompleteMatches , DsCompleteMatch, DsCompleteMatches , mkCompleteMatch, vanillaCompleteMatch , completeMatchAppliesAtType ) where import GHC.Prelude import GHC.Core.TyCo.Rep import GHC.Types.Unique import GHC.Core.ConLike import GHC.Core.TyCon import GHC.Core.Type ( splitTyConApp_maybe ) import GHC.Types.Name ( Name ) import GHC.Types.Unique.DSet import GHC.Utils.Outputable type CompleteMatch = CompleteMatchX Name type DsCompleteMatch = CompleteMatchX ConLike type CompleteMatches = [CompleteMatch] type DsCompleteMatches = [DsCompleteMatch] -- | A list of conlikes which represents a complete pattern match. -- These arise from @COMPLETE@ signatures. -- See also Note [Implementation of COMPLETE pragmas]. data CompleteMatchX con = CompleteMatch { cmConLikes :: UniqDSet con -- ^ The set of constructor names , cmResultTyCon :: Maybe Name -- ^ The optional, concrete result TyCon name the set applies to } deriving Eq mkCompleteMatch :: UniqDSet con -> Maybe Name -> CompleteMatchX con mkCompleteMatch nms mb_tc = CompleteMatch { cmConLikes = nms, cmResultTyCon = mb_tc } vanillaCompleteMatch :: UniqDSet con -> CompleteMatchX con vanillaCompleteMatch nms = mkCompleteMatch nms Nothing instance Outputable con => Outputable (CompleteMatchX con) where ppr (CompleteMatch cls mty) = case mty of Nothing -> ppr cls Just ty -> ppr cls <> text "@" <> parens (ppr ty) -- | Does this 'COMPLETE' set apply at this type? -- -- See the part about "result type constructors" in -- Note [Implementation of COMPLETE pragmas] in GHC.HsToCore.Pmc.Solver. completeMatchAppliesAtType :: Type -> CompleteMatchX con -> Bool completeMatchAppliesAtType ty cm = all @Maybe ty_matches (getUnique <$> cmResultTyCon cm) where ty_matches :: Unique -> Bool ty_matches sig_tc | Just (tc, _arg_tys) <- splitTyConApp_maybe ty , tc `hasKey` sig_tc || sig_tc `is_family_ty_con_of` tc -- #24326: sig_tc might be the data Family TyCon of the representation -- TyCon tc -- this CompleteMatch still applies = True | otherwise = False fam_tc `is_family_ty_con_of` repr_tc = case fst <$> tyConFamInst_maybe repr_tc of Just tc -> tc `hasKey` fam_tc Nothing -> False ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/CostCentre.hs0000644000000000000000000003433607346545000021430 0ustar0000000000000000{-# LANGUAGE DeriveDataTypeable #-} module GHC.Types.CostCentre ( -- All abstract except to friend: ParseIface.y CostCentre(..), CcName, CCFlavour, mkCafFlavour, mkExprCCFlavour, mkDeclCCFlavour, mkHpcCCFlavour, mkLateCCFlavour, mkCallerCCFlavour, getAllCAFsCC, pprCostCentre, CostCentreStack, pprCostCentreStack, CollectedCCs, emptyCollectedCCs, collectCC, currentCCS, dontCareCCS, isCurrentCCS, maybeSingletonCCS, mkUserCC, mkAutoCC, mkAllCafsCC, mkSingletonCCS, isCafCCS, isCafCC, isSccCountCC, sccAbleCC, ccFromThisModule, pprCostCentreCore, costCentreUserName, costCentreUserNameFS, costCentreSrcSpan, cmpCostCentre -- used for removing dups in a list ) where import GHC.Prelude import GHC.Utils.Binary import GHC.Types.Var import GHC.Types.Name import GHC.Unit.Module import GHC.Types.Unique import GHC.Utils.Outputable import GHC.Types.SrcLoc import GHC.Data.FastString import GHC.Types.CostCentre.State import Data.Data ----------------------------------------------------------------------------- -- Cost Centres -- | A Cost Centre is a single @{-# SCC #-}@ annotation. data CostCentre = NormalCC { cc_flavour :: CCFlavour, -- ^ Two cost centres may have the same name and -- module but different SrcSpans, so we need a way to -- distinguish them easily and give them different -- object-code labels. So every CostCentre has an -- associated flavour that indicates how it was -- generated, and flavours that allow multiple instances -- of the same name and module have a deterministic 0-based -- index. cc_name :: CcName, -- ^ Name of the cost centre itself cc_mod :: Module, -- ^ Name of module defining this CC. cc_loc :: SrcSpan } | AllCafsCC { cc_mod :: Module, -- Name of module defining this CC. cc_loc :: SrcSpan } deriving Data type CcName = FastString data IndexedCCFlavour = ExprCC -- ^ Explicitly annotated expression | DeclCC -- ^ Explicitly annotated declaration | HpcCC -- ^ Generated by HPC for coverage | LateCC -- ^ Annotated by the one of the prof-last* passes. | CallerCC -- ^ Annotated by the one of the prof-last* passes. deriving (Eq,Ord,Data,Enum) -- | The flavour of a cost centre. -- -- Index fields represent 0-based indices giving source-code ordering of -- centres with the same module, name, and flavour. data CCFlavour = CafCC -- ^ Auto-generated top-level thunk, they all go into the same bucket | IndexedCC !IndexedCCFlavour !CostCentreIndex -- ^ Explicitly annotated expression deriving (Eq, Ord, Data) -- Construct a CC flavour mkCafFlavour :: CCFlavour mkCafFlavour = CafCC mkExprCCFlavour :: CostCentreIndex -> CCFlavour mkExprCCFlavour idx = IndexedCC ExprCC idx mkDeclCCFlavour :: CostCentreIndex -> CCFlavour mkDeclCCFlavour idx = IndexedCC DeclCC idx mkHpcCCFlavour :: CostCentreIndex -> CCFlavour mkHpcCCFlavour idx = IndexedCC HpcCC idx mkLateCCFlavour :: CostCentreIndex -> CCFlavour mkLateCCFlavour idx = IndexedCC LateCC idx mkCallerCCFlavour :: CostCentreIndex -> CCFlavour mkCallerCCFlavour idx = IndexedCC CallerCC idx -- | Extract the index from a flavour flavourIndex :: CCFlavour -> Int flavourIndex CafCC = 0 flavourIndex (IndexedCC _flav x) = unCostCentreIndex x instance Eq CostCentre where c1 == c2 = case c1 `cmpCostCentre` c2 of { EQ -> True; _ -> False } instance Ord CostCentre where compare = cmpCostCentre cmpCostCentre :: CostCentre -> CostCentre -> Ordering cmpCostCentre (AllCafsCC {cc_mod = m1}) (AllCafsCC {cc_mod = m2}) = m1 `compare` m2 cmpCostCentre NormalCC {cc_flavour = f1, cc_mod = m1, cc_name = n1} NormalCC {cc_flavour = f2, cc_mod = m2, cc_name = n2} -- first key is module name, then centre name, then flavour = mconcat [ m1 `compare` m2 , n1 `lexicalCompareFS` n2 -- compare lexically to avoid non-determinism , f1 `compare` f2 ] cmpCostCentre other_1 other_2 = let tag1 = tag_CC other_1 tag2 = tag_CC other_2 in if tag1 < tag2 then LT else GT where tag_CC :: CostCentre -> Int tag_CC (NormalCC {}) = 0 tag_CC (AllCafsCC {}) = 1 ----------------------------------------------------------------------------- -- Predicates on CostCentre isCafCC :: CostCentre -> Bool isCafCC (AllCafsCC {}) = True isCafCC (NormalCC {cc_flavour = CafCC}) = True isCafCC _ = False -- | Is this a cost-centre which records scc counts isSccCountCC :: CostCentre -> Bool isSccCountCC cc | isCafCC cc = False | otherwise = True -- | Is this a cost-centre which can be sccd ? sccAbleCC :: CostCentre -> Bool sccAbleCC cc | isCafCC cc = False | otherwise = True ccFromThisModule :: CostCentre -> Module -> Bool ccFromThisModule cc m = cc_mod cc == m ----------------------------------------------------------------------------- -- Building cost centres mkUserCC :: FastString -> Module -> SrcSpan -> CCFlavour -> CostCentre mkUserCC cc_name mod loc flavour = NormalCC { cc_name = cc_name, cc_mod = mod, cc_loc = loc, cc_flavour = flavour } mkAutoCC :: Id -> Module -> CostCentre mkAutoCC id mod = NormalCC { cc_name = str, cc_mod = mod, cc_loc = nameSrcSpan (getName id), cc_flavour = CafCC } where name = getName id -- beware: only external names are guaranteed to have unique -- Occnames. If the name is not external, we must append its -- Unique. -- See bug #249, tests prof001, prof002, also #2411 str | isExternalName name = occNameFS (getOccName id) | otherwise = concatFS [occNameFS (getOccName id), fsLit "_", mkFastString (show (getUnique name))] mkAllCafsCC :: Module -> SrcSpan -> CostCentre mkAllCafsCC m loc = AllCafsCC { cc_mod = m, cc_loc = loc } ----------------------------------------------------------------------------- -- Cost Centre Stacks -- | A Cost Centre Stack is something that can be attached to a closure. -- This is either: -- -- * the current cost centre stack (CCCS) -- * a pre-defined cost centre stack (there are several -- pre-defined CCSs, see below). data CostCentreStack = CurrentCCS -- Pinned on a let(rec)-bound -- thunk/function/constructor, this says that the -- cost centre to be attached to the object, when it -- is allocated, is whatever is in the -- current-cost-centre-stack register. | DontCareCCS -- We need a CCS to stick in static closures -- (for data), but we *don't* expect them to -- accumulate any costs. But we still need -- the placeholder. This CCS is it. | SingletonCCS CostCentre deriving (Eq, Ord) -- needed for Ord on CLabel -- synonym for triple which describes the cost centre info in the generated -- code for a module. type CollectedCCs = ( [CostCentre] -- local cost-centres that need to be decl'd , [CostCentreStack] -- pre-defined "singleton" cost centre stacks ) emptyCollectedCCs :: CollectedCCs emptyCollectedCCs = ([], []) collectCC :: CostCentre -> CostCentreStack -> CollectedCCs -> CollectedCCs collectCC cc ccs (c, cs) = (cc : c, ccs : cs) currentCCS, dontCareCCS :: CostCentreStack currentCCS = CurrentCCS dontCareCCS = DontCareCCS ----------------------------------------------------------------------------- -- Predicates on Cost-Centre Stacks isCurrentCCS :: CostCentreStack -> Bool isCurrentCCS CurrentCCS = True isCurrentCCS _ = False isCafCCS :: CostCentreStack -> Bool isCafCCS (SingletonCCS cc) = isCafCC cc isCafCCS _ = False maybeSingletonCCS :: CostCentreStack -> Maybe CostCentre maybeSingletonCCS (SingletonCCS cc) = Just cc maybeSingletonCCS _ = Nothing mkSingletonCCS :: CostCentre -> CostCentreStack mkSingletonCCS cc = SingletonCCS cc ----------------------------------------------------------------------------- -- Printing Cost Centre Stacks. -- The outputable instance for CostCentreStack prints the CCS as a C -- expression. instance Outputable CostCentreStack where ppr = pprCostCentreStack pprCostCentreStack :: IsLine doc => CostCentreStack -> doc pprCostCentreStack CurrentCCS = text "CCCS" pprCostCentreStack DontCareCCS = text "CCS_DONT_CARE" pprCostCentreStack (SingletonCCS cc) = pprCostCentre cc <> text "_ccs" {-# SPECIALISE pprCostCentreStack :: CostCentreStack -> SDoc #-} {-# SPECIALISE pprCostCentreStack :: CostCentreStack -> HLine #-} -- see Note [SPECIALIZE to HDoc] in GHC.Utils.Outputable ----------------------------------------------------------------------------- -- Printing Cost Centres -- -- There are several different ways in which we might want to print a -- cost centre: -- -- - the name of the cost centre, for profiling output (a C string) -- - the label, i.e. C label for cost centre in .hc file. -- - the debugging name, for output in -ddump things -- - the interface name, for printing in _scc_ exprs in iface files. -- -- The last 3 are derived from costCentreStr below. The first is given -- by costCentreName. instance Outputable CostCentre where ppr = pprCostCentre pprCostCentre :: IsLine doc => CostCentre -> doc pprCostCentre cc = docWithStyle (ppCostCentreLbl cc) (\_ -> ftext (costCentreUserNameFS cc)) {-# SPECIALISE pprCostCentre :: CostCentre -> SDoc #-} {-# SPECIALISE pprCostCentre :: CostCentre -> HLine #-} -- see Note [SPECIALIZE to HDoc] in GHC.Utils.Outputable -- Printing in Core pprCostCentreCore :: CostCentre -> SDoc pprCostCentreCore (AllCafsCC {cc_mod = m}) = text "__sccC" <+> braces (ppr m) pprCostCentreCore (NormalCC {cc_flavour = flavour, cc_name = n, cc_mod = m, cc_loc = loc}) = text "__scc" <+> braces (hsep [ ppr m <> char '.' <> ftext n, pprFlavourCore flavour, whenPprDebug (ppr loc) ]) -- ^ Print a flavour in Core pprFlavourCore :: CCFlavour -> SDoc pprFlavourCore CafCC = text "__C" pprFlavourCore f = pprIdxCore $ flavourIndex f -- ^ Print a flavour's index in Core pprIdxCore :: Int -> SDoc pprIdxCore 0 = empty pprIdxCore idx = whenPprDebug $ ppr idx -- Printing as a C label ppCostCentreLbl :: IsLine doc => CostCentre -> doc ppCostCentreLbl (AllCafsCC {cc_mod = m}) = pprModule m <> text "_CAFs_cc" ppCostCentreLbl (NormalCC {cc_flavour = f, cc_name = n, cc_mod = m}) = pprModule m <> char '_' <> ztext (zEncodeFS n) <> char '_' <> ppFlavourLblComponent f <> text "_cc" {-# SPECIALISE ppCostCentreLbl :: CostCentre -> SDoc #-} {-# SPECIALISE ppCostCentreLbl :: CostCentre -> HLine #-} -- see Note [SPECIALIZE to HDoc] in GHC.Utils.Outputable -- ^ Print the flavour component of a C label ppFlavourLblComponent :: IsLine doc => CCFlavour -> doc ppFlavourLblComponent CafCC = text "CAF" ppFlavourLblComponent (IndexedCC flav i) = case flav of ExprCC -> text "EXPR" <> ppIdxLblComponent i DeclCC -> text "DECL" <> ppIdxLblComponent i HpcCC -> text "HPC" <> ppIdxLblComponent i LateCC -> text "LATECC" <> ppIdxLblComponent i CallerCC -> text "CALLERCC" <> ppIdxLblComponent i {-# SPECIALISE ppFlavourLblComponent :: CCFlavour -> SDoc #-} {-# SPECIALISE ppFlavourLblComponent :: CCFlavour -> HLine #-} -- see Note [SPECIALIZE to HDoc] in GHC.Utils.Outputable -- ^ Print the flavour index component of a C label ppIdxLblComponent :: IsLine doc => CostCentreIndex -> doc ppIdxLblComponent n = case unCostCentreIndex n of 0 -> empty n -> int n {-# SPECIALISE ppIdxLblComponent :: CostCentreIndex -> SDoc #-} {-# SPECIALISE ppIdxLblComponent :: CostCentreIndex -> HLine #-} -- see Note [SPECIALIZE to HDoc] in GHC.Utils.Outputable -- This is the name to go in the user-displayed string, -- recorded in the cost centre declaration costCentreUserName :: CostCentre -> String costCentreUserName = unpackFS . costCentreUserNameFS costCentreUserNameFS :: CostCentre -> FastString costCentreUserNameFS (AllCafsCC {}) = mkFastString "CAF" costCentreUserNameFS (NormalCC {cc_name = name, cc_flavour = is_caf}) = case is_caf of CafCC -> mkFastString "CAF:" `appendFS` name _ -> name costCentreSrcSpan :: CostCentre -> SrcSpan costCentreSrcSpan = cc_loc instance Binary CCFlavour where put_ bh CafCC = putByte bh 0 put_ bh (IndexedCC flav i) = do putByte bh 1 let !flav_index = fromEnum flav put_ bh flav_index put_ bh i get bh = do h <- getByte bh case h of 0 -> return CafCC _ -> do IndexedCC <$> (toEnum <$> get bh) <*> get bh instance Binary CostCentre where put_ bh (NormalCC aa ab ac _ad) = do putByte bh 0 put_ bh aa put_ bh ab put_ bh ac put_ bh (AllCafsCC ae _af) = do putByte bh 1 put_ bh ae get bh = do h <- getByte bh case h of 0 -> do aa <- get bh ab <- get bh ac <- get bh return (NormalCC aa ab ac noSrcSpan) _ -> do ae <- get bh return (AllCafsCC ae noSrcSpan) -- We ignore the SrcSpans in CostCentres when we serialise them, -- and set the SrcSpans to noSrcSpan when deserialising. This is -- ok, because we only need the SrcSpan when declaring the -- CostCentre in the original module, it is not used by importing -- modules. getAllCAFsCC :: Module -> (CostCentre, CostCentreStack) getAllCAFsCC this_mod = let span = mkGeneralSrcSpan (mkFastString "") -- XXX do better all_cafs_cc = mkAllCafsCC this_mod span all_cafs_ccs = mkSingletonCCS all_cafs_cc in (all_cafs_cc, all_cafs_ccs) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/CostCentre/0000755000000000000000000000000007346545000021063 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/CostCentre/State.hs0000644000000000000000000000223207346545000022476 0ustar0000000000000000{-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE GeneralizedNewtypeDeriving #-} module GHC.Types.CostCentre.State ( CostCentreState , newCostCentreState , CostCentreIndex , unCostCentreIndex , getCCIndex ) where import GHC.Prelude import GHC.Data.FastString import GHC.Data.FastString.Env import Data.Data import GHC.Utils.Binary -- | Per-module state for tracking cost centre indices. -- -- See documentation of 'GHC.Types.CostCentre.cc_flavour' for more details. newtype CostCentreState = CostCentreState (FastStringEnv Int) -- | Initialize cost centre state. newCostCentreState :: CostCentreState newCostCentreState = CostCentreState emptyFsEnv -- | An index into a given cost centre module,name,flavour set newtype CostCentreIndex = CostCentreIndex { unCostCentreIndex :: Int } deriving (Eq, Ord, Data, Binary) -- | Get a new index for a given cost centre name. getCCIndex :: FastString -> CostCentreState -> (CostCentreIndex, CostCentreState) getCCIndex nm (CostCentreState m) = (CostCentreIndex idx, CostCentreState m') where m_idx = lookupFsEnv m nm idx = maybe 0 id m_idx m' = extendFsEnv m nm (idx + 1) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/Cpr.hs0000644000000000000000000001717007346545000020100 0ustar0000000000000000{-# LANGUAGE GeneralisedNewtypeDeriving #-} {-# LANGUAGE ViewPatterns #-} {-# LANGUAGE PatternSynonyms #-} -- | Types for the Constructed Product Result lattice. -- "GHC.Core.Opt.CprAnal" and "GHC.Core.Opt.WorkWrap.Utils" -- are its primary customers via 'GHC.Types.Id.idCprSig'. module GHC.Types.Cpr ( Cpr (ConCpr), topCpr, botCpr, flatConCpr, asConCpr, CprType (..), topCprType, botCprType, flatConCprType, lubCprType, applyCprTy, abstractCprTy, trimCprTy, UnpackConFieldsResult (..), unpackConFieldsCpr, CprSig (..), topCprSig, isTopCprSig, mkCprSigForArity, mkCprSig, seqCprSig, prependArgsCprSig ) where import GHC.Prelude import GHC.Core.DataCon import GHC.Types.Basic import GHC.Utils.Binary import GHC.Utils.Misc import GHC.Utils.Outputable import GHC.Utils.Panic -- -- * Cpr -- data Cpr = BotCpr | ConCpr_ !ConTag ![Cpr] -- ^ The number of field Cprs equals 'dataConRepArity'. -- If all of them are top, better use 'FlatConCpr', as ensured by the pattern -- synonym 'ConCpr'. | FlatConCpr !ConTag -- ^ @FlatConCpr tag@ is an efficient encoding for @'ConCpr_' tag [TopCpr..]@. -- Purely for compiler perf. Can be constructed with 'ConCpr'. | TopCpr deriving Eq pattern ConCpr :: ConTag -> [Cpr] -> Cpr pattern ConCpr t cs <- ConCpr_ t cs where ConCpr t cs | all (== TopCpr) cs = FlatConCpr t | otherwise = ConCpr_ t cs {-# COMPLETE BotCpr, TopCpr, FlatConCpr, ConCpr #-} viewConTag :: Cpr -> Maybe ConTag viewConTag (FlatConCpr t) = Just t viewConTag (ConCpr t _) = Just t viewConTag _ = Nothing {-# INLINE viewConTag #-} lubCpr :: Cpr -> Cpr -> Cpr lubCpr BotCpr cpr = cpr lubCpr cpr BotCpr = cpr lubCpr (FlatConCpr t1) (viewConTag -> Just t2) | t1 == t2 = FlatConCpr t1 lubCpr (viewConTag -> Just t1) (FlatConCpr t2) | t1 == t2 = FlatConCpr t2 lubCpr (ConCpr t1 cs1) (ConCpr t2 cs2) | t1 == t2 = ConCpr t1 (lubFieldCprs cs1 cs2) lubCpr _ _ = TopCpr lubFieldCprs :: [Cpr] -> [Cpr] -> [Cpr] lubFieldCprs as bs | as `equalLength` bs = zipWith lubCpr as bs | otherwise = [] topCpr :: Cpr topCpr = TopCpr botCpr :: Cpr botCpr = BotCpr flatConCpr :: ConTag -> Cpr flatConCpr t = FlatConCpr t trimCpr :: Cpr -> Cpr trimCpr BotCpr = botCpr trimCpr _ = topCpr asConCpr :: Cpr -> Maybe (ConTag, [Cpr]) asConCpr (ConCpr t cs) = Just (t, cs) asConCpr (FlatConCpr t) = Just (t, []) asConCpr TopCpr = Nothing asConCpr BotCpr = Nothing seqCpr :: Cpr -> () seqCpr (ConCpr _ cs) = foldr (seq . seqCpr) () cs seqCpr _ = () -- -- * CprType -- -- | The abstract domain \(A_t\) from the original 'CPR for Haskell' paper. data CprType = CprType { ct_arty :: !Arity -- ^ Number of value arguments the denoted expression -- eats before returning the 'ct_cpr' , ct_cpr :: !Cpr -- ^ 'Cpr' eventually unleashed when applied to -- 'ct_arty' arguments } instance Eq CprType where a == b = ct_cpr a == ct_cpr b && (ct_arty a == ct_arty b || ct_cpr a == topCpr) topCprType :: CprType topCprType = CprType 0 topCpr botCprType :: CprType botCprType = CprType 0 botCpr flatConCprType :: ConTag -> CprType flatConCprType con_tag = CprType { ct_arty = 0, ct_cpr = flatConCpr con_tag } lubCprType :: CprType -> CprType -> CprType lubCprType ty1@(CprType n1 cpr1) ty2@(CprType n2 cpr2) -- The arity of bottom CPR types can be extended arbitrarily. | cpr1 == botCpr && n1 <= n2 = ty2 | cpr2 == botCpr && n2 <= n1 = ty1 -- There might be non-bottom CPR types with mismatching arities. -- Consider test DmdAnalGADTs. We want to return top in these cases. | n1 == n2 = CprType n1 (lubCpr cpr1 cpr2) | otherwise = topCprType applyCprTy :: CprType -> Arity -> CprType applyCprTy (CprType n res) k | n >= k = CprType (n-k) res | res == botCpr = botCprType | otherwise = topCprType abstractCprTy :: CprType -> CprType abstractCprTy (CprType n res) | res == topCpr = topCprType | otherwise = CprType (n+1) res trimCprTy :: CprType -> CprType trimCprTy (CprType arty res) = CprType arty (trimCpr res) -- | The result of 'unpackConFieldsCpr'. data UnpackConFieldsResult = AllFieldsSame !Cpr | ForeachField ![Cpr] -- | Unpacks a 'ConCpr'-shaped 'Cpr' and returns the field 'Cpr's wrapped in a -- 'ForeachField'. Otherwise, it returns 'AllFieldsSame' with the appropriate -- 'Cpr' to assume for each field. -- -- The use of 'UnpackConFieldsResult' allows O(1) space for the common, -- non-'ConCpr' case. unpackConFieldsCpr :: DataCon -> Cpr -> UnpackConFieldsResult unpackConFieldsCpr dc (ConCpr t cs) | t == dataConTag dc, cs `lengthIs` dataConRepArity dc = ForeachField cs unpackConFieldsCpr _ BotCpr = AllFieldsSame BotCpr unpackConFieldsCpr _ _ = AllFieldsSame TopCpr {-# INLINE unpackConFieldsCpr #-} seqCprTy :: CprType -> () seqCprTy (CprType _ cpr) = seqCpr cpr -- | The arity of the wrapped 'CprType' is the arity at which it is safe -- to unleash. See Note [Understanding DmdType and DmdSig] in "GHC.Types.Demand" newtype CprSig = CprSig { getCprSig :: CprType } deriving (Eq, Binary) -- | Turns a 'CprType' computed for the particular 'Arity' into a 'CprSig' -- unleashable at that arity. See Note [Understanding DmdType and DmdSig] in -- "GHC.Types.Demand" mkCprSigForArity :: Arity -> CprType -> CprSig mkCprSigForArity arty ty@(CprType n _) | arty /= n = topCprSig -- Trim on arity mismatch | otherwise = CprSig ty topCprSig :: CprSig topCprSig = CprSig topCprType isTopCprSig :: CprSig -> Bool isTopCprSig (CprSig ty) = ct_cpr ty == topCpr mkCprSig :: Arity -> Cpr -> CprSig mkCprSig arty cpr = CprSig (CprType arty cpr) seqCprSig :: CprSig -> () seqCprSig (CprSig ty) = seqCprTy ty prependArgsCprSig :: Arity -> CprSig -> CprSig -- ^ Add extra value args to CprSig prependArgsCprSig n_extra cpr_sig@(CprSig (CprType arity cpr)) | n_extra == 0 = cpr_sig | otherwise = assertPpr (n_extra > 0) (ppr n_extra) $ CprSig (CprType (arity + n_extra) cpr) -- | BNF: -- -- > cpr ::= '' -- TopCpr -- > | n -- FlatConCpr n -- > | n '(' cpr1 ',' cpr2 ',' ... ')' -- ConCpr n [cpr1,cpr2,...] -- > | 'b' -- BotCpr -- -- Examples: -- * `f x = f x` has result CPR `b` -- * `1(1,)` is a valid (nested) 'Cpr' denotation for `(I# 42#, f 42)`. instance Outputable Cpr where ppr TopCpr = empty ppr (FlatConCpr n) = int n ppr (ConCpr n cs) = int n <> parens (pprWithCommas ppr cs) ppr BotCpr = char 'b' -- | BNF: -- -- > cpr_ty ::= cpr -- short form if arty == 0 -- > | '\' arty '.' cpr -- if arty > 0 -- -- Examples: -- * `f x y z = f x y z` has denotation `\3.b` -- * `g !x = (x+1, x+2)` has denotation `\1.1(1,1)`. instance Outputable CprType where ppr (CprType arty res) | 0 <- arty = ppr res | otherwise = char '\\' <> ppr arty <> char '.' <> ppr res -- | Only print the CPR result instance Outputable CprSig where ppr (CprSig ty) = ppr (ct_cpr ty) instance Binary Cpr where put_ bh TopCpr = putByte bh 0 put_ bh BotCpr = putByte bh 1 put_ bh (FlatConCpr n) = putByte bh 2 *> put_ bh n put_ bh (ConCpr n cs) = putByte bh 3 *> put_ bh n *> put_ bh cs get bh = do h <- getByte bh case h of 0 -> return TopCpr 1 -> return BotCpr 2 -> FlatConCpr <$> get bh 3 -> ConCpr <$> get bh <*> get bh _ -> pprPanic "Binary Cpr: Invalid tag" (int (fromIntegral h)) instance Binary CprType where put_ bh (CprType arty cpr) = put_ bh arty *> put_ bh cpr get bh = CprType <$> get bh <*> get bh ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/DefaultEnv.hs0000644000000000000000000000500407346545000021402 0ustar0000000000000000{-# LANGUAGE DeriveDataTypeable #-} module GHC.Types.DefaultEnv ( ClassDefaults (..) , DefaultEnv , emptyDefaultEnv , isEmptyDefaultEnv , defaultEnv , unitDefaultEnv , lookupDefaultEnv , filterDefaultEnv , defaultList , plusDefaultEnv ) where import GHC.Prelude import GHC.Core.TyCon (TyCon(tyConName)) import GHC.Core.TyCon.Env (TyConEnv, emptyTyConEnv, isEmptyTyConEnv, mkTyConEnvWith, unitTyConEnv, filterTyConEnv, nonDetTyConEnvElts, plusTyConEnv) import GHC.Hs.Extension (GhcRn) import GHC.Tc.Utils.TcType (Type) import GHC.Types.Name (Name, nameUnique, stableNameCmp) import GHC.Types.Unique.FM (lookupUFM_Directly) import GHC.Unit.Module.Warnings (WarningTxt) import GHC.Unit.Types (Module) import GHC.Utils.Outputable import Data.Data (Data) import Data.List (sortBy) import Data.Function (on) -- See Note [Named default declarations] in GHC.Tc.Gen.Default -- | Default environment mapping class @TyCon@s to their default type lists type DefaultEnv = TyConEnv ClassDefaults data ClassDefaults = ClassDefaults { cd_class :: !TyCon -- ^ always a class constructor , cd_types :: [Type] , cd_module :: Maybe Module -- ^ @Nothing@ for built-in, -- @Just@ the current module or the module whence the default was imported -- see Note [Default exports] in GHC.Tc.Gen.Export , cd_warn :: Maybe (WarningTxt GhcRn) -- ^ Warning emitted when the default is used } deriving Data instance Outputable ClassDefaults where ppr ClassDefaults {cd_class = cls, cd_types = tys} = text "default" <+> ppr cls <+> parens (interpp'SP tys) emptyDefaultEnv :: DefaultEnv emptyDefaultEnv = emptyTyConEnv isEmptyDefaultEnv :: DefaultEnv -> Bool isEmptyDefaultEnv = isEmptyTyConEnv unitDefaultEnv :: ClassDefaults -> DefaultEnv unitDefaultEnv d = unitTyConEnv (cd_class d) d defaultEnv :: [ClassDefaults] -> DefaultEnv defaultEnv = mkTyConEnvWith cd_class defaultList :: DefaultEnv -> [ClassDefaults] defaultList = sortBy (stableNameCmp `on` (tyConName . cd_class)) . nonDetTyConEnvElts -- sortBy recovers determinism lookupDefaultEnv :: DefaultEnv -> Name -> Maybe ClassDefaults lookupDefaultEnv env = lookupUFM_Directly env . nameUnique filterDefaultEnv :: (ClassDefaults -> Bool) -> DefaultEnv -> DefaultEnv filterDefaultEnv = filterTyConEnv plusDefaultEnv :: DefaultEnv -> DefaultEnv -> DefaultEnv plusDefaultEnv = plusTyConEnv ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/Demand.hs0000644000000000000000000035262507346545000020553 0ustar0000000000000000 {-# LANGUAGE ViewPatterns #-} {-# LANGUAGE BinaryLiterals #-} {-# LANGUAGE PatternSynonyms #-} {- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 -} -- | A language to express the evaluation context of an expression as a -- 'Demand' and track how an expression evaluates free variables and arguments -- in turn as a 'DmdType'. -- -- Lays out the abstract domain for "GHC.Core.Opt.DmdAnal". module GHC.Types.Demand ( -- * Demands Boxity(..), Card(C_00, C_01, C_0N, C_10, C_11, C_1N), CardNonAbs, CardNonOnce, Demand(AbsDmd, BotDmd, (:*)), SubDemand(Prod, Poly), mkProd, viewProd, -- ** Algebra absDmd, topDmd, botDmd, seqDmd, topSubDmd, -- *** Least upper bound lubCard, lubDmd, lubSubDmd, -- *** Greatest lower bound glbCard, -- *** Plus plusCard, plusDmd, plusSubDmd, -- *** Multiply multCard, multDmd, multSubDmd, -- ** Predicates on @Card@inalities and @Demand@s isAbs, isAtMostOnce, isStrict, isAbsDmd, isAtMostOnceDmd, isStrUsedDmd, isStrictDmd, isTopDmd, isWeakDmd, onlyBoxedArguments, -- ** Special demands evalDmd, -- *** Demands used in PrimOp signatures lazyApply1Dmd, lazyApply2Dmd, strictOnceApply1Dmd, strictManyApply1Dmd, -- ** Other @Demand@ operations oneifyCard, oneifyDmd, strictifyDmd, strictifyDictDmd, lazifyDmd, floatifyDmd, peelCallDmd, peelManyCalls, mkCalledOnceDmd, mkCalledOnceDmds, strictCallArity, mkWorkerDemand, subDemandIfEvaluated, -- ** Extracting one-shot information callCards, argOneShots, argsOneShots, saturatedByOneShots, -- ** Manipulating Boxity of a Demand unboxDeeplyDmd, -- * Divergence Divergence(..), topDiv, botDiv, exnDiv, lubDivergence, isDeadEndDiv, -- * Demand environments DmdEnv(..), addVarDmdEnv, mkTermDmdEnv, nopDmdEnv, plusDmdEnv, plusDmdEnvs, multDmdEnv, reuseEnv, -- * Demand types DmdType(..), dmdTypeDepth, -- ** Algebra nopDmdType, botDmdType, lubDmdType, plusDmdType, multDmdType, discardArgDmds, -- ** Other operations peelFV, findIdDemand, addDemand, splitDmdTy, deferAfterPreciseException, -- * Demand signatures DmdSig(..), mkDmdSigForArity, mkClosedDmdSig, mkVanillaDmdSig, splitDmdSig, dmdSigDmdEnv, hasDemandEnvSig, nopSig, botSig, isNopSig, isBottomingSig, isDeadEndSig, isDeadEndAppSig, trimBoxityDmdSig, transferArgBoxityDmdSig, -- ** Handling arity adjustments prependArgsDmdSig, etaConvertDmdSig, -- * Demand transformers from demand signatures DmdTransformer, dmdTransformSig, dmdTransformDataConSig, dmdTransformDictSelSig, -- * Trim to a type shape TypeShape(..), trimToType, trimBoxity, -- * @seq@ing stuff seqDemand, seqDemandList, seqDmdType, seqDmdSig, -- * Zapping usage information zapUsageDemand, zapDmdEnvSig, zapUsedOnceDemand, zapUsedOnceSig ) where import GHC.Prelude import GHC.Types.Var import GHC.Types.Var.Env import GHC.Types.Unique.FM import GHC.Types.Basic import GHC.Data.Maybe ( orElse ) import GHC.Core.Type ( Type, isTerminatingType ) import GHC.Core.DataCon ( splitDataProductType_maybe, StrictnessMark, isMarkedStrict ) import GHC.Core.Multiplicity ( scaledThing ) import GHC.Utils.Binary import GHC.Utils.Misc import GHC.Utils.Outputable import GHC.Utils.Panic import Data.Coerce (coerce) import Data.Function {- ************************************************************************ * * Boxity: Whether the box of something is used * * ************************************************************************ -} {- Note [Strictness and Unboxing] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If an argument is used strictly by the function body, we may use use call-by-value instead of call-by-need for that argument. What's more, we may unbox an argument that is used strictly, discarding the box at the call site. This can reduce allocations of the program drastically if the box really isn't needed in the function body. Here's an example: ``` even :: Int -> Bool even (I# 0) = True even (I# 1) = False even (I# n) = even (I# (n -# 2)) ``` All three code paths of 'even' are (a) strict in the argument, and (b) immediately discard the boxed 'Int'. Now if we have a call site like `even (I# 42)`, then it would be terrible to allocate the 'I#' box for the argument only to tear it apart immediately in the body of 'even'! Hence, worker/wrapper will allocate a wrapper for 'even' that not only uses call-by-value for the argument (e.g., `case I# 42 of b { $weven b }`), but also *unboxes* the argument, resulting in ``` even :: Int -> Bool even (I# n) = $weven n $weven :: Int# -> Bool $weven 0 = True $weven 1 = False $weven n = $weven (n -# 2) ``` And now the box in `even (I# 42)` will cancel away after inlining the wrapper. As far as the permission to unbox is concerned, *evaluatedness* of the argument is the important trait. Unboxing implies eager evaluation of an argument and we don't want to change the termination properties of the function. One way to ensure that is to unbox strict arguments only, but strictness is only a sufficient condition for evaluatedness. See Note [Unboxing evaluated arguments] in "GHC.Core.Opt.DmdAnal", where we manage to unbox *strict fields* of unboxed arguments that the function is not actually strict in, simply by realising that those fields have to be evaluated. Note [Boxity analysis] ~~~~~~~~~~~~~~~~~~~~~~ Alas, we don't want to unbox *every* strict argument (as Note [Strictness and Unboxing] might suggest). Here's an example (from T19871): ``` data Huge = H Bool Bool ... Bool ann :: Huge -> (Bool, Huge) ann h@(Huge True _ ... _) = (False, h) ann h = (True, h) ``` Unboxing 'h' yields ``` $wann :: Bool -> Bool -> ... -> Bool -> (Bool, Huge) $wann True b2 ... bn = (False, Huge True b2 ... bn) $wann b1 b2 ... bn = (True, Huge b1 b2 ... bn) ``` The pair constructor really needs its fields boxed. But '$wann' doesn't get passed 'h' anymore, only its components! Ergo it has to reallocate the 'Huge' box, in a process called "reboxing". After w/w, call sites like `case ... of Just h -> ann h` pay for the allocation of the additional box. In earlier versions of GHC we simply accepted that reboxing would sometimes happen, but we found some cases where it made a big difference: #19407, for example. We therefore perform a simple syntactic boxity analysis that piggy-backs on demand analysis in order to determine whether the box of a strict argument is always discarded in the function body, in which case we can pass it unboxed without risking regressions such as in 'ann' above. But as soon as one use needs the box, we want Boxed to win over any Unboxed uses. The demand signature (cf. Note [Demand notation]) will say whether it uses its arguments boxed or unboxed. Indeed it does so for every sub-component of the argument demand. Here's an example: ``` f :: (Int, Int) -> Bool f (a, b) = even (a + b) -- demand signature: <1!P(1!L,1!L)> ``` The '!' indicates places where we want to unbox, the lack thereof indicates the box is used by the function. Boxity flags are part of the 'Poly' and 'Prod' 'SubDemand's, see Note [Why Boxity in SubDemand and not in Demand?]. The given demand signature says "Unbox the pair and then nestedly unbox its two fields". By contrast, the demand signature of 'ann' above would look like <1P(1L,L,...,L)>, lacking any '!'. A demand signature like <1P(1!L)> -- Boxed outside but Unboxed in the field -- doesn't make a lot of sense, as we can never unbox the field without unboxing the containing record. See Note [Finalising boxity for demand signatures] in "GHC.Core.Opt.DmdAnal" for how we avoid to spread this and other kinds of misinformed boxities. Due to various practical reasons, Boxity Analysis is not conservative at times. Here are reasons for too much optimism: * Note [Function body boxity and call sites] is an observation about when it is beneficial to unbox a parameter that is returned from a function. Note [Unboxed demand on function bodies returning small products] derives a heuristic from the former Note, pretending that all call sites of a function need returned small products Unboxed. * Note [Boxity for bottoming functions] in DmdAnal makes all bottoming functions unbox their arguments, incurring reboxing in code paths that will diverge anyway. In turn we get more unboxing in hot code paths. Boxity analysis fixes a number of issues: #19871, #19407, #4267, #16859, #18907, #13331 Note [Function body boxity and call sites] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider (from T5949) ``` f n p = case n of 0 -> p :: (a, b) _ -> f (n-1) p -- Worker/wrapper split if we decide to unbox: $wf n x y = case n of 0 -> (# x, y #) _ -> $wf (n-1) x y f n (x,y) = case $wf n x y of (# r, s #) -> (r,s) ``` When is it better to /not/ to unbox 'p'? That depends on the callers of 'f'! If all call sites 1. Wouldn't need to allocate fresh boxes for 'p', and 2. Needed the result pair of 'f' boxed Only then we'd see an increase in allocation resulting from unboxing. But as soon as only one of (1) or (2) holds, it really doesn't matter if 'f' unboxes 'p' (and its result, it's important that CPR follows suit). For example ``` res = ... case f m (field t) of (r1,r2) -> ... -- (1) holds arg = ... [ f m (x,y) ] ... -- (2) holds ``` Because one of the boxes in the call site can cancel away: ``` res = ... case field1 t of (x1,x2) -> case field2 t of (y1,y2) -> case $wf x1 x2 y1 y2 of (#r1,r2#) -> ... arg = ... [ case $wf x1 x2 y1 y2 of (#r1,r2#) -> (r1,r2) ] ... ``` And when call sites neither have arg boxes (1) nor need the result boxed (2), then hesitating to unbox means /more/ allocation in the call site because of the need for fresh argument boxes. Summary: If call sites that satisfy both (1) and (2) occur more often than call sites that satisfy neither condition, then it's best /not/ to unbox 'p'. Note [Unboxed demand on function bodies returning small products] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Note [Boxity analysis] achieves its biggest wins when we avoid reboxing huge records. But when we return small products from a function, we often get faster programs by pretending that the caller unboxes the result. Long version: Observation: Big record arguments (e.g., DynFlags) tend to be modified much less frequently than small records (e.g., Int). Result: Big records tend to be passed around boxed (unmodified) much more frequently than small records. Consequence: The larger the record, the more likely conditions (1) and (2) from Note [Function body boxity and call sites] are met, in which case unboxing returned parameters leads to reboxing. So we put an Unboxed demand on function bodies returning small products and a Boxed demand on the others. What is regarded a small product is controlled by the -fdmd-unbox-width flag. This also manages to unbox functions like ``` sum z [] = z sum (I# n) ((I# x):xs) = sum (I# (n +# x)) xs ``` where we can unbox 'z' on the grounds that it's but a small box anyway. That in turn means that the I# allocation in the recursive call site can cancel away and we get a non-allocating loop, nice and tight. Note that this is the typical case in "Observation" above: A small box is unboxed, modified, the result reboxed for the recursive call. Originally, this came up in binary-trees' check' function and #4267 which (similarly) features a strict fold over a tree. We'd also regress in join004 and join007 if we didn't assume an optimistic Unboxed demand on the function body. T17932 features a (non-recursive) function that returns a large record, e.g., ``` flags (Options f x) = `seq` f ``` and here we won't unbox 'f' because it has 5 fields (which is larger than the default -fdmd-unbox-width threshold). Why not focus on putting Unboxed demands on *all recursive* function? Then we'd unbox ``` flags 0 (Options f x) = `seq` f flags n o = flags (n-1) o ``` and that seems hardly useful. (NB: Similar to 'f' from Note [Preserving Boxity of results is rarely a win], but there we only had 2 fields.) What about the Boxity of *fields* of a small, returned box? Consider ``` sumIO :: Int -> Int -> IO Int sumIO 0 !z = return z -- What DmdAnal sees: sumIO 0 z s = z `seq` (# s, z #) sumIO n !z = sumIO (n-1) (z+n) ``` We really want 'z' to unbox here. Yet its use in the returned unboxed pair is fundamentally a Boxed one! CPR would manage to unbox it, but DmdAnal runs before that. There is an Unboxed use in the recursive call to 'go' though. But 'IO Int' returns a small product, and 'Int' is a small product itself. So we'll put the RHS of 'sumIO' under sub-demand '!P(L,L!P(L))', indicating that *if* we evaluate 'z', we don't need the box later on. And indeed the bang will evaluate `z`, so we conclude with a total demand of `1!P(L)` on `z` and unbox it. Unlike for recursive functions, where we can often speed up the loop by unboxing at the cost of a bit of reboxing in the base case, the wins for non-recursive functions quickly turn into losses when unboxing too deeply. That happens in T11545, T18109 and T18174. Therefore, we deeply unbox recursive function bodies but only shallowly unbox non-recursive function bodies (governed by the max_depth variable). The implementation is in 'GHC.Core.Opt.DmdAnal.unboxWhenSmall'. It is quite vital, guarding for regressions in test cases like #2387, #3586, #16040, #5075 and #19871. Note that this is fundamentally working around a phase problem, namely that the results of boxity analysis depend on CPR analysis (and vice versa, of course). Note [unboxedWins] ~~~~~~~~~~~~~~~~~~ We used to use '_unboxedWins' below in 'lubBoxity', which was too optimistic. While it worked around some shortcomings of the phase separation between Boxity analysis and CPR analysis, it was a gross hack which caused regressions itself that needed all kinds of fixes and workarounds. Examples (from #21119): * As #20767 says, L and B were no longer top and bottom of our lattice * In #20746 we unboxed huge Handle types that were never needed boxed in the first place. See Note [deferAfterPreciseException]. * It also caused unboxing of huge records where we better shouldn't, for example in T19871.absent. * It became impossible to work with when implementing !7599, mostly due to the chaos that results from #20767. Conclusion: We should use 'boxedWins' in 'lubBoxity', #21119. Fortunately, we could come up with a number of better mechanisms to make up for the sometimes huge regressions that would have otherwise incured: 1. A beefed up Note [Unboxed demand on function bodies returning small products] that works recursively fixes most regressions. It's a bit unsound, but pretty well-behaved. 2. We saw bottoming functions spoil boxity in some less severe cases and countered that with Note [Boxity for bottoming functions]. -} boxedWins :: Boxity -> Boxity -> Boxity boxedWins Unboxed Unboxed = Unboxed boxedWins _ !_ = Boxed _unboxedWins :: Boxity -> Boxity -> Boxity -- See Note [unboxedWins] _unboxedWins Boxed Boxed = Boxed _unboxedWins _ !_ = Unboxed lubBoxity :: Boxity -> Boxity -> Boxity -- See Note [Boxity analysis] for the lattice. lubBoxity = boxedWins {- ************************************************************************ * * Card: Combining Strictness and Usage * * ************************************************************************ -} {- Note [Evaluation cardinalities] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The demand analyser uses an (abstraction of) /evaluation cardinality/ of type Card, to specify how many times a term is evaluated. A Card C_lu represents an /interval/ of possible cardinalities [l..u], meaning * Evaluated /at least/ 'l' times (strictness). Hence 'l' is either 0 (lazy) or 1 (strict) * Evaluated /at most/ 'u' times (usage). Hence 'u' is either 0 (not used at all), or 1 (used at most once) or n (no information) Intervals describe sets, so the underlying lattice is the powerset lattice. Usually l<=u, but we also have C_10, the interval [1,0], the empty interval, denoting the empty set. This is the bottom element of the lattice. See Note [Demand notation] for the notation we use for each of the constructors. Note [Bit vector representation for Card] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ While the 6 inhabitants of Card admit an efficient representation as an enumeration, implementing operations such as lubCard, plusCard and multCard leads to unreasonably bloated code. This was the old defn for lubCard, for example: -- Handle C_10 (bot) lubCard C_10 n = n -- bot lubCard n C_10 = n -- bot -- Handle C_0N (top) lubCard C_0N _ = C_0N -- top lubCard _ C_0N = C_0N -- top -- Handle C_11 lubCard C_00 C_11 = C_01 -- {0} ∪ {1} = {0,1} lubCard C_11 C_00 = C_01 -- {0} ∪ {1} = {0,1} lubCard C_11 n = n -- {1} is a subset of all other intervals lubCard n C_11 = n -- {1} is a subset of all other intervals -- Handle C_1N lubCard C_1N C_1N = C_1N -- reflexivity lubCard _ C_1N = C_0N -- {0} ∪ {1,n} = top lubCard C_1N _ = C_0N -- {0} ∪ {1,n} = top -- Handle C_01 lubCard C_01 _ = C_01 -- {0} ∪ {0,1} = {0,1} lubCard _ C_01 = C_01 -- {0} ∪ {0,1} = {0,1} -- Handle C_00 lubCard C_00 C_00 = C_00 -- reflexivity There's a much more compact way to encode these operations if Card is represented not as distinctly denoted intervals, but as the subset of the set of all cardinalities {0,1,n} instead. We represent such a subset as a bit vector of length 3 (which fits in an Int). That's actually pretty common for such powerset lattices. There's one bit per denoted cardinality that is set iff that cardinality is part of the denoted set, with n being the most significand bit (index 2) and 0 being represented by the least significand bit (index 0). How does that help? Well, for one, lubCard just becomes lubCard (Card a) (Card b) = Card (a .|. b) The other operations, 'plusCard' and 'multCard', become significantly more tricky, but immensely more compact. It's all straight-line code with a few bit twiddling instructions now! Note [Algebraic specification for plusCard and multCard] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The representation change in Note [Bit vector representation for Card] admits very dense definitions of 'plusCard' and 'multCard' in terms of bit twiddling, but the connection to the algebraic operations they implement is lost. It's helpful to have a written specification of what 'plusCard' and 'multCard' here that says what they should compute. * plusCard: a@[l1,u1] + b@[l2,u2] = r@[l1+l2,u1+u2]. - In terms of sets, 0 ∈ r iff 0 ∈ a and 0 ∈ b. Examples: set in C_00 + C_00, C_01 + C_0N, but not in C_10 + C_00 - In terms of sets, 1 ∈ r iff 1 ∈ a or 1 ∈ b. Examples: set in C_01 + C_00, C_0N + C_0N, but not in C_10 + C_00 - In terms of sets, n ∈ r iff n ∈ a or n ∈ b, or (1 ∈ a and 1 ∈ b), so not unlike add with carry. Examples: set in C_01 + C_01, C_01 + C_0N, but not in C_10 + C_01 - Handy special cases: o 'plusCard C_10' bumps up the strictness of its argument, just like 'lubCard C_00' lazifies it, without touching upper bounds. See also 'strictifyCard' o Similarly, 'plusCard C_0N' discards usage information (incl. absence) but leaves strictness alone. * multCard: a@[l1,u1] * b@[l2,u2] = r@[l1*l2,u1*u2]. - In terms of sets, 0 ∈ r iff 0 ∈ a or 0 ∈ b. Examples: set in C_00 * C_10, C_01 * C_1N, but not in C_10 * C_1N - In terms of sets, 1 ∈ r iff 1 ∈ a and 1 ∈ b. Examples: set in C_01 * C_01, C_01 * C_1N, but not in C_11 * C_10 - In terms of sets, n ∈ r iff 1 ∈ r and (n ∈ a or n ∈ b). Examples: set in C_1N * C_01, C_1N * C_0N, but not in C_10 * C_1N - Handy special cases: o 'multCard C_1N c' is the same as 'plusCard c c' and drops used-once info. But unlike 'plusCard C_0N', it leaves absence and strictness. o 'multCard C_01' drops strictness info, like 'lubCard C_00'. o 'multCard C_0N' does both; it discards all strictness and used-once info and retains only absence info. -} -- | Describes an interval of /evaluation cardinalities/. -- See Note [Evaluation cardinalities] -- See Note [Bit vector representation for Card] newtype Card = Card Int deriving Eq -- | A subtype of 'Card' for which the upper bound is never 0 (no 'C_00' or -- 'C_10'). The only four inhabitants are 'C_01', 'C_0N', 'C_11', 'C_1N'. -- Membership can be tested with 'isCardNonAbs'. -- See 'D' and 'Call' for use sites and explanation. type CardNonAbs = Card -- | A subtype of 'Card' for which the upper bound is never 1 (no 'C_01' or -- 'C_11'). The only four inhabitants are 'C_00', 'C_0N', 'C_10', 'C_1N'. -- Membership can be tested with 'isCardNonOnce'. -- See 'Poly' for use sites and explanation. type CardNonOnce = Card -- | Absent, {0}. Pretty-printed as A. pattern C_00 :: Card pattern C_00 = Card 0b001 -- | Bottom, {}. Pretty-printed as A. pattern C_10 :: Card pattern C_10 = Card 0b000 -- | Strict and used once, {1}. Pretty-printed as 1. pattern C_11 :: Card pattern C_11 = Card 0b010 -- | Used at most once, {0,1}. Pretty-printed as M. pattern C_01 :: Card pattern C_01 = Card 0b011 -- | Strict and used (possibly) many times, {1,n}. Pretty-printed as S. pattern C_1N :: Card pattern C_1N = Card 0b110 -- | Every possible cardinality; the top element, {0,1,n}. Pretty-printed as L. pattern C_0N :: Card pattern C_0N = Card 0b111 {-# COMPLETE C_00, C_01, C_0N, C_10, C_11, C_1N :: Card #-} _botCard, topCard :: Card _botCard = C_10 topCard = C_0N -- | True <=> lower bound is 1. isStrict :: Card -> Bool -- See Note [Bit vector representation for Card] isStrict (Card c) = c .&. 0b001 == 0 -- simply check 0 bit is not set -- | True <=> upper bound is 0. isAbs :: Card -> Bool -- See Note [Bit vector representation for Card] isAbs (Card c) = c .&. 0b110 == 0 -- simply check 1 and n bit are not set -- | True <=> upper bound is 1. isAtMostOnce :: Card -> Bool -- See Note [Bit vector representation for Card] isAtMostOnce (Card c) = c .&. 0b100 == 0 -- simply check n bit is not set -- | Is this a 'CardNonAbs'? isCardNonAbs :: Card -> Bool isCardNonAbs = not . isAbs -- | Is this a 'CardNonOnce'? isCardNonOnce :: Card -> Bool isCardNonOnce n = isAbs n || not (isAtMostOnce n) -- | Intersect with [0,1]. oneifyCard :: Card -> Card oneifyCard = glbCard C_01 -- | Intersect with [1,n]. The same as @'plusCard' 'C_10'@. strictifyCard :: Card -> Card strictifyCard = glbCard C_1N -- | Denotes '∪' on 'Card'. lubCard :: Card -> Card -> Card -- See Note [Bit vector representation for Card] lubCard (Card a) (Card b) = Card (a .|. b) -- main point of the bit-vector encoding! -- | Denotes '∩' on 'Card'. glbCard :: Card -> Card -> Card -- See Note [Bit vector representation for Card] glbCard (Card a) (Card b) = Card (a .&. b) -- | Denotes '+' on lower and upper bounds of 'Card'. plusCard :: Card -> Card -> Card -- See Note [Algebraic specification for plusCard and multCard] plusCard (Card a) (Card b) = Card (bit0 .|. bit1 .|. bitN) where bit0 = (a .&. b) .&. 0b001 bit1 = (a .|. b) .&. 0b010 bitN = ((a .|. b) .|. shiftL (a .&. b) 1) .&. 0b100 -- | Denotes '*' on lower and upper bounds of 'Card'. multCard :: Card -> Card -> Card -- See Note [Algebraic specification for plusCard and multCard] multCard (Card a) (Card b) = Card (bit0 .|. bit1 .|. bitN) where bit0 = (a .|. b) .&. 0b001 bit1 = (a .&. b) .&. 0b010 bitN = (a .|. b) .&. shiftL bit1 1 .&. 0b100 {- ************************************************************************ * * Demand: Evaluation contexts * * ************************************************************************ -} -- | A demand describes -- -- * How many times a variable is evaluated, via a 'Card'inality, and -- * How deep its value was evaluated in turn, via a 'SubDemand'. -- -- See also Note [Demand notation] -- and Note [Demand examples]. -- -- This data type is quite similar to `'Scaled' 'SubDemand'`, but it's scaled -- by 'Card', which is an /interval/ on 'Multiplicity', the upper bound of -- which could be used to infer uniqueness types. Also we treat 'AbsDmd' and -- 'BotDmd' specially, as the concept of a 'SubDemand' doesn't apply when there -- isn't any evaluation at all. If you don't care, simply use '(:*)'. data Demand = BotDmd -- ^ A bottoming demand, produced by a diverging function ('C_10'), hence there is no -- 'SubDemand' that describes how it was evaluated. | AbsDmd -- ^ An absent demand: Evaluated exactly 0 times ('C_00'), hence there is no -- 'SubDemand' that describes how it was evaluated. | D !CardNonAbs !SubDemand -- ^ Don't use this internal data constructor; use '(:*)' instead. -- Since BotDmd deals with 'C_10' and AbsDmd deals with 'C_00', the -- cardinality component is CardNonAbs deriving Eq -- | Only meant to be used in the pattern synonym below! viewDmdPair :: Demand -> (Card, SubDemand) viewDmdPair BotDmd = (C_10, botSubDmd) viewDmdPair AbsDmd = (C_00, botSubDmd) viewDmdPair (D n sd) = (n, sd) -- | @c :* sd@ is a demand that says \"evaluated @c@ times, and any trace in -- which it is evaluated will evaluate at least as deep as @sd@\". -- -- Matching on this pattern synonym is a complete match. -- If the matched demand was 'AbsDmd', it will match as @C_00 :* seqSubDmd@. -- If the matched demand was 'BotDmd', it will match as @C_10 :* botSubDmd@. -- The builder of this pattern synonym simply /discards/ the 'SubDemand' if the -- 'Card' was absent and returns 'AbsDmd' or 'BotDmd' instead. It will assert -- that the discarded sub-demand was 'seqSubDmd' and 'botSubDmd', respectively. -- -- Call sites should consider whether they really want to look at the -- 'SubDemand' of an absent demand and match on 'AbsDmd' and/or 'BotDmd' -- otherwise. Really, any other 'SubDemand' would be allowed and -- might work better, depending on context. pattern (:*) :: HasDebugCallStack => Card -> SubDemand -> Demand pattern n :* sd <- (viewDmdPair -> (n, sd)) where C_10 :* sd = BotDmd & assertPpr (sd == botSubDmd) (text "B /=" <+> ppr sd) C_00 :* sd = AbsDmd & assertPpr (sd == botSubDmd) (text "A /=" <+> ppr sd) n :* sd = D n sd & assertPpr (isCardNonAbs n) (ppr n $$ ppr sd) {-# COMPLETE (:*) #-} -- | A sub-demand describes an /evaluation context/ (in the sense of an -- operational semantics), e.g. how deep the denoted thing is going to be -- evaluated. See 'Demand' for examples. -- -- See Note [SubDemand denotes at least one evaluation] for a more detailed -- description of what a sub-demand means. -- -- See Note [Demand notation] for the extensively used short-hand notation. -- See also Note [Why Boxity in SubDemand and not in Demand?]. data SubDemand = Poly !Boxity !CardNonOnce -- ^ Polymorphic demand, the denoted thing is evaluated arbitrarily deep, -- with the specified cardinality at every level. The 'Boxity' applies only -- to the outer evaluation context as well as all inner evaluation context. -- See Note [Boxity in Poly] for why we want it to carry 'Boxity'. -- Expands to 'Call' via 'viewCall' and to 'Prod' via 'viewProd'. -- -- @Poly b n@ is semantically equivalent to @Prod b [n :* Poly b n, ...] -- or @Call n (Poly Boxed n)@. 'viewCall' and 'viewProd' do these rewrites. -- -- In Note [Demand notation]: @L === P(L,L,...)@ and @L === C(L)@, -- @B === P(B,B,...)@ and @B === C(B)@, -- @!A === !P(A,A,...)@ and @!A === C(A)@, -- and so on. -- -- We'll only see 'Poly' with 'C_10' (B), 'C_00' (A), 'C_0N' (L) and sometimes -- 'C_1N' (S) through 'plusSubDmd', never 'C_01' (M) or 'C_11' (1) (grep the -- source code). Hence 'CardNonOnce', which is closed under 'lub' and 'plus'. -- -- Why doesn't this constructor simply carry a 'Demand' instead of its fields? -- See Note [Call SubDemand vs. evaluation Demand]. | Call !CardNonAbs !SubDemand -- ^ @Call n sd@ describes the evaluation context of @n@ function -- applications (with one argument), where the result of each call is -- evaluated according to @sd@. -- @sd@ describes program traces in which the denoted thing was called at all, -- see Note [SubDemand denotes at least one evaluation]. -- That Note also explains why it doesn't make sense for @n@ to be absent, -- hence we forbid it with 'CardNonAbs'. Absent call demands can still be -- expressed with 'Poly'. -- Used only for values of function type. Use the smart constructor 'mkCall' -- whenever possible! | Prod !Boxity ![Demand] -- ^ @Prod b ds@ describes the evaluation context of a case scrutinisation -- on an expression of product type, where the product components are -- evaluated according to @ds@. The 'Boxity' @b@ says whether or not the box -- of the product was used. -- | We have to respect Poly rewrites through 'viewCall' and 'viewProd'. instance Eq SubDemand where d1 == d2 = case d1 of Prod b1 ds1 | Just (b2, ds2) <- viewProd (length ds1) d2 -> b1 == b2 && ds1 == ds2 Call n1 sd1 | Just (n2, sd2) <- viewCall d2 -> n1 == n2 && sd1 == sd2 Poly b1 n1 | Poly b2 n2 <- d2 -> b1 == b2 && n1 == n2 _ -> False topSubDmd, botSubDmd, seqSubDmd :: SubDemand topSubDmd = Poly Boxed C_0N botSubDmd = Poly Unboxed C_10 seqSubDmd = Poly Unboxed C_00 -- | The uniform field demand when viewing a 'Poly' as a 'Prod', as in -- 'viewProd'. polyFieldDmd :: Boxity -> CardNonOnce -> Demand polyFieldDmd _ C_00 = AbsDmd polyFieldDmd _ C_10 = BotDmd polyFieldDmd Boxed C_0N = topDmd polyFieldDmd b n = n :* Poly b n & assertPpr (isCardNonOnce n) (ppr n) -- | A smart constructor for 'Prod', applying rewrite rules along the semantic -- equality @Prod b [n :* Poly Boxed n, ...] === Poly b n@, simplifying to -- 'Poly' 'SubDemand's when possible. Examples: -- -- * Rewrites @P(L,L)@ (e.g., arguments @Boxed@, @[L,L]@) to @L@ -- * Rewrites @!P(L!L,L!L)@ (e.g., arguments @Unboxed@, @[L!L,L!L]@) to @!L@ -- * Does not rewrite @P(1L)@, @P(L!L)@, @!P(L)@ or @P(L,A)@ -- mkProd :: Boxity -> [Demand] -> SubDemand mkProd b ds | all (== AbsDmd) ds = Poly b C_00 | all (== BotDmd) ds = Poly b C_10 | dmd@(n :* Poly b2 m):_ <- ds , n == m -- don't rewrite P(SL) to S , b == b2 -- don't rewrite P(S!S) to !S , all (== dmd) ds -- don't rewrite P(L,A) to L = Poly b n | otherwise = Prod b ds -- | @viewProd n sd@ interprets @sd@ as a 'Prod' of arity @n@, expanding 'Poly' -- demands as necessary. viewProd :: Arity -> SubDemand -> Maybe (Boxity, [Demand]) -- It's quite important that this function is optimised well; -- it is used by lubSubDmd and plusSubDmd. viewProd n (Prod b ds) | ds `lengthIs` n = Just (b, ds) -- Note the strict application to replicate: This makes sure we don't allocate -- a thunk for it, inlines it and lets case-of-case fire at call sites. viewProd n (Poly b card) | let !ds = replicate n $! polyFieldDmd b card = Just (b, ds) viewProd _ _ = Nothing {-# INLINE viewProd #-} -- we want to fuse away the replicate and the allocation -- for Arity. Otherwise, #18304 bites us. -- | A smart constructor for 'Call', applying rewrite rules along the semantic -- equality @Call C_0N (Poly C_0N) === Poly C_0N@, simplifying to 'Poly' 'SubDemand's -- when possible. mkCall :: CardNonAbs -> SubDemand -> SubDemand --mkCall C_1N sd@(Poly Boxed C_1N) = sd -- NO! #21085 strikes. See Note [mkCall and plusSubDmd] mkCall C_0N sd@(Poly Boxed C_0N) = sd mkCall n sd = assertPpr (isCardNonAbs n) (ppr n $$ ppr sd) $ Call n sd -- | @viewCall sd@ interprets @sd@ as a 'Call', expanding 'Poly' subdemands as -- necessary. viewCall :: SubDemand -> Maybe (Card, SubDemand) viewCall (Call n sd) = Just (n :: Card, sd) viewCall (Poly _ n) | isAbs n = Just (n :: Card, botSubDmd) | otherwise = Just (n :: Card, Poly Boxed n) viewCall _ = Nothing topDmd, absDmd, botDmd, seqDmd :: Demand topDmd = C_0N :* topSubDmd absDmd = AbsDmd botDmd = BotDmd seqDmd = C_11 :* seqSubDmd -- | Sets 'Boxity' to 'Unboxed' for non-'Call' sub-demands and recurses into 'Prod'. unboxDeeplySubDmd :: SubDemand -> SubDemand unboxDeeplySubDmd (Poly _ n) = Poly Unboxed n unboxDeeplySubDmd (Prod _ ds) = mkProd Unboxed (strictMap unboxDeeplyDmd ds) unboxDeeplySubDmd call@Call{} = call -- | Sets 'Boxity' to 'Unboxed' for the 'Demand', recursing into 'Prod's. -- Don't recurse into lazy arguments; see GHC.Core.Opt.DmdAnal -- Note [No lazy, Unboxed demands in demand signature] unboxDeeplyDmd :: Demand -> Demand unboxDeeplyDmd AbsDmd = AbsDmd unboxDeeplyDmd BotDmd = BotDmd unboxDeeplyDmd dmd@(D n sd) | isStrict n = D n (unboxDeeplySubDmd sd) | otherwise = dmd multDmd :: Card -> Demand -> Demand multDmd C_11 dmd = dmd -- An optimisation -- The following four lines make sure that we rewrite to AbsDmd and BotDmd -- whenever the leading cardinality is absent (C_00 or C_10). -- Otherwise it may happen that the SubDemand is not 'botSubDmd', triggering -- the assertion in `:*`. -- Example: `multDmd B 1L = BA`, so with an inner `seqSubDmd`. Our lattice -- allows us to always rewrite this to proper BotDmd and we maintain the -- invariant that this is indeed the case. multDmd C_00 _ = AbsDmd multDmd _ AbsDmd = AbsDmd multDmd C_10 (D n _) = if isStrict n then BotDmd else AbsDmd multDmd n BotDmd = if isStrict n then BotDmd else AbsDmd -- See Note [SubDemand denotes at least one evaluation] for the strictifyCard multDmd n (D m sd) = multCard n m :* multSubDmd (strictifyCard n) sd multSubDmd :: Card -> SubDemand -> SubDemand multSubDmd C_11 sd = sd -- An optimisation, for when sd is a deep Prod -- The following three equations don't have an impact on Demands, only on -- Boxity. They are needed so that we don't trigger the assertions in `:*` -- when called from `multDmd`. multSubDmd C_00 _ = seqSubDmd -- Otherwise `multSubDmd A L == A /= !A` multSubDmd C_10 (Poly _ n) = if isStrict n then botSubDmd else seqSubDmd -- Otherwise `multSubDmd B L == B /= !B` multSubDmd C_10 (Call n _) = if isStrict n then botSubDmd else seqSubDmd -- Otherwise we'd call `mkCall` with absent cardinality multSubDmd n (Poly b m) = Poly b (multCard n m) multSubDmd n (Call n' sd) = mkCall (multCard n n') sd multSubDmd n (Prod b ds) = mkProd b (strictMap (multDmd n) ds) lazifyIfStrict :: Card -> SubDemand -> SubDemand lazifyIfStrict n sd = multSubDmd (glbCard C_01 n) sd -- | Denotes '∪' on 'Demand'. lubDmd :: Demand -> Demand -> Demand lubDmd BotDmd dmd2 = dmd2 lubDmd dmd1 BotDmd = dmd1 lubDmd (n1 :* sd1) (n2 :* sd2) = -- pprTraceWith "lubDmd" (\it -> ppr (n1:*sd1) $$ ppr (n2:*sd2) $$ ppr it) $ lubCard n1 n2 :* lubSubDmd sd1 sd2 lubSubDmd :: SubDemand -> SubDemand -> SubDemand -- Shortcuts for neutral and absorbing elements. -- Below we assume that Boxed always wins. lubSubDmd (Poly Unboxed C_10) sd = sd lubSubDmd sd (Poly Unboxed C_10) = sd lubSubDmd sd@(Poly Boxed C_0N) _ = sd lubSubDmd _ sd@(Poly Boxed C_0N) = sd -- Handle Prod lubSubDmd (Prod b1 ds1) (Poly b2 n2) | let !d = polyFieldDmd b2 n2 = mkProd (lubBoxity b1 b2) (strictMap (lubDmd d) ds1) lubSubDmd (Prod b1 ds1) (Prod b2 ds2) | equalLength ds1 ds2 = mkProd (lubBoxity b1 b2) (strictZipWith lubDmd ds1 ds2) -- Handle Call lubSubDmd (Call n1 sd1) (viewCall -> Just (n2, sd2)) = mkCall (lubCard n1 n2) (lubSubDmd sd1 sd2) -- Handle Poly lubSubDmd (Poly b1 n1) (Poly b2 n2) = Poly (lubBoxity b1 b2) (lubCard n1 n2) -- Other Poly case by commutativity lubSubDmd sd1@Poly{} sd2 = lubSubDmd sd2 sd1 -- Otherwise (Call `lub` Prod) return Top lubSubDmd _ _ = topSubDmd -- | Denotes '+' on 'Demand'. plusDmd :: Demand -> Demand -> Demand plusDmd AbsDmd dmd2 = dmd2 plusDmd dmd1 AbsDmd = dmd1 plusDmd (n1 :* sd1) (n2 :* sd2) = -- pprTraceWith "plusDmd" (\it -> ppr (n1:*sd1) $$ ppr (n2:*sd2) $$ ppr it) $ -- Why lazify? See Note [SubDemand denotes at least one evaluation] -- and also Note [Unrealised opportunity in plusDmd] which applies when both -- n1 and n2 are lazy already plusCard n1 n2 :* plusSubDmd (lazifyIfStrict n1 sd1) (lazifyIfStrict n2 sd2) plusSubDmd :: SubDemand -> SubDemand -> SubDemand -- Shortcuts for neutral and absorbing elements. -- Below we assume that Boxed always wins. plusSubDmd (Poly Unboxed C_00) sd = sd plusSubDmd sd (Poly Unboxed C_00) = sd plusSubDmd sd@(Poly Boxed C_1N) _ = sd plusSubDmd _ sd@(Poly Boxed C_1N) = sd -- Handle Prod plusSubDmd (Prod b1 ds1) (Poly b2 n2) | let !d = polyFieldDmd b2 n2 = mkProd (lubBoxity b1 b2) (strictMap (plusDmd d) ds1) plusSubDmd (Prod b1 ds1) (Prod b2 ds2) | equalLength ds1 ds2 = mkProd (lubBoxity b1 b2) (strictZipWith plusDmd ds1 ds2) -- Handle Call plusSubDmd (Call n1 sd1) (viewCall -> Just (n2, sd2)) = mkCall (plusCard n1 n2) (lubSubDmd sd1 sd2) -- Handle Poly plusSubDmd (Poly b1 n1) (Poly b2 n2) = Poly (lubBoxity b1 b2) (plusCard n1 n2) -- Other Poly case by commutativity plusSubDmd sd1@Poly{} sd2 = plusSubDmd sd2 sd1 -- Otherwise (Call `plus` Prod) return Top plusSubDmd _ _ = topSubDmd -- | Used to suppress pretty-printing of an uninformative demand isTopDmd :: Demand -> Bool isTopDmd dmd = dmd == topDmd isAbsDmd :: Demand -> Bool isAbsDmd (n :* _) = isAbs n -- | Contrast with isStrictUsedDmd. See Note [Strict demands] isStrictDmd :: Demand -> Bool isStrictDmd (n :* _) = isStrict n -- | Not absent and used strictly. See Note [Strict demands] isStrUsedDmd :: Demand -> Bool isStrUsedDmd (n :* _) = isStrict n && not (isAbs n) -- | Is the value used at most once? isAtMostOnceDmd :: Demand -> Bool isAtMostOnceDmd (n :* _) = isAtMostOnce n -- | We try to avoid tracking weak free variable demands in strictness -- signatures for analysis performance reasons. -- See Note [Lazy and unleashable free variables] in "GHC.Core.Opt.DmdAnal". isWeakDmd :: Demand -> Bool isWeakDmd dmd@(n :* _) = not (isStrict n) && is_plus_idem_dmd dmd where -- @is_plus_idem_* thing@ checks whether @thing `plus` thing = thing@, -- e.g. if @thing@ is idempotent wrt. to @plus@. -- is_plus_idem_card n = plusCard n n == n is_plus_idem_card = isCardNonOnce -- is_plus_idem_dmd dmd = plusDmd dmd dmd == dmd is_plus_idem_dmd AbsDmd = True is_plus_idem_dmd BotDmd = True is_plus_idem_dmd (n :* sd) = is_plus_idem_card n && is_plus_idem_sub_dmd sd -- is_plus_idem_sub_dmd sd = plusSubDmd sd sd == sd is_plus_idem_sub_dmd (Poly _ n) = assert (isCardNonOnce n) True is_plus_idem_sub_dmd (Prod _ ds) = all is_plus_idem_dmd ds is_plus_idem_sub_dmd (Call n _) = is_plus_idem_card n evalDmd :: Demand evalDmd = C_1N :* topSubDmd -- | First argument of 'GHC.Exts.maskAsyncExceptions#': @1C(1,L)@. -- Called exactly once. strictOnceApply1Dmd :: Demand strictOnceApply1Dmd = C_11 :* mkCall C_11 topSubDmd -- | First argument of 'GHC.Exts.atomically#': @SC(S,L)@. -- Called at least once, possibly many times. strictManyApply1Dmd :: Demand strictManyApply1Dmd = C_1N :* mkCall C_1N topSubDmd -- | First argument of catch#: @MC(1,L)@. -- Evaluates its arg lazily, but then applies it exactly once to one argument. lazyApply1Dmd :: Demand lazyApply1Dmd = C_01 :* mkCall C_11 topSubDmd -- | Second argument of catch#: @MC(1,C(1,L))@. -- Evaluates its arg lazily, but then applies it exactly once to two arguments. lazyApply2Dmd :: Demand lazyApply2Dmd = C_01 :* mkCall C_11 (mkCall C_11 topSubDmd) -- | Make a 'Demand' evaluated at-most-once. oneifyDmd :: Demand -> Demand oneifyDmd AbsDmd = AbsDmd oneifyDmd BotDmd = BotDmd oneifyDmd (n :* sd) = oneifyCard n :* sd -- | Make a 'Demand' evaluated at-least-once (e.g. strict). strictifyDmd :: Demand -> Demand strictifyDmd = plusDmd seqDmd -- | If the argument is a guaranteed-terminating type -- (i.e. a non-newtype dictionary) give it strict demand. -- This is sound because terminating types can't be bottom: -- See GHC.Core Note [NON-BOTTOM-DICTS invariant] -- Also split the product type & demand and recur in order to similarly -- strictify the argument's contained used non-newtype superclass dictionaries. -- We use the demand as our recursive measure to guarantee termination. strictifyDictDmd :: Type -> Demand -> Demand strictifyDictDmd ty (n :* Prod b ds) | not (isAbs n) , Just field_tys <- as_non_newtype_dict ty = C_1N :* mkProd b (zipWith strictifyDictDmd field_tys ds) -- main idea: ensure it's strict where -- Return a TyCon and a list of field types if the given -- type is a non-newtype dictionary type as_non_newtype_dict ty | isTerminatingType ty , Just (_tc, _arg_tys, _data_con, field_tys) <- splitDataProductType_maybe ty = Just (map scaledThing field_tys) | otherwise = Nothing strictifyDictDmd _ dmd = dmd -- | Make a 'Demand' lazy. lazifyDmd :: Demand -> Demand lazifyDmd = multDmd C_01 -- | Adjust the demand on a binding that may float outwards -- See Note [Floatifying demand info when floating] floatifyDmd :: Demand -> Demand floatifyDmd = multDmd C_0N -- | Wraps the 'SubDemand' with a one-shot call demand: @d@ -> @C(1,d)@. mkCalledOnceDmd :: SubDemand -> SubDemand mkCalledOnceDmd sd = mkCall C_11 sd -- | @mkCalledOnceDmds n d@ returns @C(1,C1...C(1,d))@ where there are @n@ @C1@'s. mkCalledOnceDmds :: Arity -> SubDemand -> SubDemand mkCalledOnceDmds arity sd = iterate mkCalledOnceDmd sd !! arity -- | Peels one call level from the sub-demand, and also returns how many -- times we entered the lambda body. peelCallDmd :: SubDemand -> (Card, SubDemand) peelCallDmd sd = viewCall sd `orElse` (topCard, topSubDmd) -- Peels multiple nestings of 'Call' sub-demands and also returns -- whether it was unsaturated in the form of a 'Card'inality, denoting -- how many times the lambda body was entered. -- See Note [Demands from unsaturated function calls]. peelManyCalls :: Arity -> SubDemand -> (Card, SubDemand) peelManyCalls k sd = go k C_11 sd where go 0 !n !sd = (n, sd) go k !n (viewCall -> Just (m, sd)) = go (k-1) (n `multCard` m) sd go _ _ _ = (topCard, topSubDmd) {-# INLINE peelManyCalls #-} -- so that the pair cancels away in a `fst _` context strictCallArity :: SubDemand -> Arity strictCallArity sd = go 0 sd where go n (Call card sd) | isStrict card = go (n+1) sd go n _ = n -- | Extract the 'SubDemand' of a 'Demand'. -- PRECONDITION: The SubDemand must be used in a context where the expression -- denoted by the Demand is under evaluation. subDemandIfEvaluated :: Demand -> SubDemand subDemandIfEvaluated (_ :* sd) = sd -- See Note [Demand on the worker] in GHC.Core.Opt.WorkWrap mkWorkerDemand :: Int -> Demand mkWorkerDemand n = C_01 :* go n where go 0 = topSubDmd go n = mkCall C_01 $ go (n-1) argsOneShots :: DmdSig -> Arity -> [[OneShotInfo]] -- ^ See Note [Computing one-shot info] argsOneShots (DmdSig (DmdType _ arg_ds)) n_val_args | unsaturated_call = [] | otherwise = go arg_ds where unsaturated_call = arg_ds `lengthExceeds` n_val_args go [] = [] go (arg_d : arg_ds) = argOneShots arg_d `cons` go arg_ds -- Avoid list tail like [ [], [], [] ] cons [] [] = [] cons a as = a:as argOneShots :: Demand -- ^ depending on saturation -> [OneShotInfo] -- ^ See Note [Computing one-shot info] argOneShots AbsDmd = [] -- This defn conflicts with 'saturatedByOneShots', argOneShots BotDmd = [] -- according to which we should return -- @repeat OneShotLam@ here... argOneShots (_ :* sd) = map go (callCards sd) where go n | isAtMostOnce n = OneShotLam | otherwise = NoOneShotInfo -- | See Note [Computing one-shot info] callCards :: SubDemand -> [Card] callCards (Call n sd) = n : callCards sd callCards (Poly _ _n) = [] -- n is never C_01 or C_11 so we may as well stop here callCards Prod{} = [] -- | -- @saturatedByOneShots n C(M,C(M,...)) = True@ -- <=> -- There are at least n nested C(M,..) calls. -- See Note [Demand on the worker] in GHC.Core.Opt.WorkWrap saturatedByOneShots :: Int -> Demand -> Bool saturatedByOneShots _ AbsDmd = True saturatedByOneShots _ BotDmd = True saturatedByOneShots n (_ :* sd) = isAtMostOnce $ fst $ peelManyCalls n sd {- Note [Strict demands] ~~~~~~~~~~~~~~~~~~~~~~~~ 'isStrUsedDmd' returns true only of demands that are both strict and used In particular, it is False for (i.e. strict and not used, cardinality C_10), which can and does arise in, say (#7319) f x = raise# Then 'x' is not used, so f gets strictness -> . Now the w/w generates fx = let x = absentError "unused" in raise At this point we really don't want to convert to fx = case absentError "unused" of x -> raise Since the program is going to diverge, this swaps one error for another, but it's really a bad idea to *ever* evaluate an absent argument. In #7319 we get T7319.exe: Oops! Entered absent arg w_s1Hd{v} [lid] [base:GHC.Base.String{tc 36u}] Note [SubDemand denotes at least one evaluation] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider a demand `n :* sd` on a binding `let x = e in `. (Similarly, a call sub-demand `Cn(sd)` on a lambda `\_. e`). While `n` describes how *often* `x` had been evaluated in , the sub-demand `sd` describes how *deep* `e` has been evaluated, under the following PREMISE: *for all program traces where `x` had been evaluated at all* That is, `sd` disregards all program traces where `x` had not been evaluated, because it can't describe the depth of an evaluation that never happened. NB: The Premise only makes a difference for lower bounds/strictness. Upper bounds/usage are unaffected by adding or leaving out evaluations that never happen. The Premise comes into play when we have lazy Demands. For example, if `x` was demanded with `LP(SL,A)`, so perhaps the full expression was let x = (e1, e2) in (x `seq` fun y `seq` case x of (a,b) -> a, True) then `x` will be evaluated lazily, but in any trace in which `x` is evaluated, the pair in its RHS will ultimately be evaluated deeply with sub-demand `P(SL,A)`. That means that `e1` is ultimately evaluated strictly, even though evaluation of the field does not directly follow the eval of `x` due to the intermittent call `fun y`. How does the additional strictness help? The long version is the list of examples at the end of this Note (as procured in #21081 and #18903). The short version is * We get to take advantage of call-by-value/let-to-case in more situations, as for e1 above. See example "More let-to-case" below. * Note [Eta reduction based on evaluation context] applies in more situations. See example "More eta reduction" below. * We get to unbox more results, see example "More CPR" below. It seems like we don't give up anything in return. Indeed that is the case: * If we dropped the Premise, then a lazy `n` in `nP(m..)` would always force `m` to be lazy, too. That is quite redundant! It seems wasteful not to use the lower bound of `m` for something more useful. So indeed we give up on nothing in return for some nice wins. * Even if `n` is absent (so the Premise does hold for no trace whatsoever), it's pretty easy to describe how `e` was evaluated. Answer: 'botSubDmd'. We use it when expanding 'Absent' and 'Bottom' demands in 'viewDmdPair' as well as when expanding absent 'Poly's to 'Call' sub-demands in 'viewCall'. Of course, we now have to maintain the Premise when we unpack and rebuild Demands. For strict demands, we know that the Premise indeed always holds for any program trace abstracted over, whereas we have to be careful for lazy demands. In particular, when doing `plusDmd` we have to *lazify* the nested SubDemand if the outer cardinality is lazy. E.g., LP(SL) + SP(L) = (L+S)P((M*SL)+L) = SP(L+L) = SP(L) Multiplying with `M`/`C_01` is the "lazify" part here and is implemented in `lazifyIfStrict`. Example proving that point: d2 :: d2 x y = y `seq` (case x of (a,b) -> a, True) -- What is the demand on x in (d2 x x)? NOT SP(SL)!! We used to apply the same reasoning to Call SubDemands `Cn(sd)` in `plusSubDmd`, but that led to #21717, because different calls return different heap objects. See Note [Call SubDemand vs. evaluation Demand]. There are a couple more examples that improve in T21081. Here is a selection of those examples demonstrating the usefulness of The Premise: * "More let-to-case" (from testcase T21081): ```hs f :: (Bool, Bool) -> (Bool, Bool) f pr = (case pr of (a,b) -> a /= b, True) g :: Int -> (Bool, Bool) g x = let y = let z = odd x in (z,z) in f y ``` Although `f` is lazy in `pr`, we could case-bind `z` because it is always evaluated when `y` is evaluated. So we give `pr` demand `LP(SL,SL)` (most likely with better upper bounds/usage) and demand analysis then infers a strict demand for `z`. * "More eta reduction" (from testcase T21081): ```hs myfoldl :: (a -> b -> a) -> a -> [b] -> a myfoldl f z [] = z myfoldl f !z (x:xs) = myfoldl (\a b -> f a b) (f z x) xs ``` Here, we can give `f` a demand of `LC(S,C(1,L))` (instead of the lazier `LC(L,C(1,L))`) which says "Whenever `f` is evaluated (lazily), it is also called with two arguments". And Note [Eta reduction based on evaluation context] means we can rewrite `\a b -> f a b` to `f` in the call site of `myfoldl`. Nice! * "More CPR" (from testcase T18903): ```hs h :: Int -> Int h m = let g :: Int -> (Int,Int) g 1 = (m, 0) g n = (2 * n, 2 `div` n) {-# NOINLINE g #-} in case m of 1 -> 0 2 -> snd (g m) _ -> uncurry (+) (g m) ``` We want to give `g` the demand `MC(1,P(MP(L),1P(L)))`, so we see that in each call site of `g`, we are strict in the second component of the returned pair. That in turn means that Nested CPR can unbox the result of the division even though it might throw. Note [Unrealised opportunity in plusDmd] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Recall the lazification of SubDemands happening in `plusDmd` as described in Note [SubDemand denotes at least one evaluation]. We *could* do better when both Demands are lazy already. Example (fun 1, fun 2) Both args put Demand SC(S,L) on `fun`. The lazy pair arg context lazifies this to LC(S,L), and it would be reasonable to report this Demand on `fun` for the entire pair expression; after all, `fun` is called whenever it is evaluated. But our definition of `plusDmd` will compute LC(S,L) + LC(S,L) = (L+L)(M*C(S,L) + M*C(S,L)) = L(C(L,L)) = L Which is clearly less precise. Doing better here could mean to `lub` when both demands are lazy, e.g., LC(S,L) + LC(S,L) = (L+L)(C(S,L) ⊔ C(S,L)) = L(C(S,L)) Indeed that's what we did at one point between 9.4 and 9.6 after !7599, but it means that we need a function `lubPlusSubDmd` that lubs on lower bounds but plus'es upper bounds, implying maintenance challenges and complicated explanations. Plus, NoFib says that this special case doesn't bring all that much (geom. mean +0.0% counted instructions), so we don't bother anymore. Note [Call SubDemand vs. evaluation Demand] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Although both evaluation Demands and Call SubDemands carry a (Card,SubDemand) pair, their interpretation is quite different. Example: f x = fst x * snd x -- f :: , because 1P(1L,A)+1P(A,1L) = SP(1L,1L) g x = fst (x 1) * snd (x 2) -- g :: , because 1C(1,P(1L,A))+1C(1,P(A,1L)) = SC(S,P(ML,ML)) The point about this example is that both demands have P(A,1L)/P(1L,A) as sub-expressions, but when these sub-demands occur 1. under an evaluation demand, we combine with `plusSubDmd` 2. whereas under a Call sub-demand, we combine with `lubSubDmd` And thus (1) yields a stricter demand on the pair components than (2). In #21717 we saw that we really need lub in (2), because otherwise we make an unsound prediction in `g (\n -> if n == 1 then (1,1) else (bot,2))`; we'd say that the `bot` expression is always evaluated, when it clearly is not. Operationally, every call to `g` gives back a potentially distinct, heap-allocated pair with potentially different contents, and we must `lubSubDmd` over all such calls to approximate how any of those pairs might be used. That is in stark contrast to f's argument `x`: Operationally, every eval of `x` must yield the same pair and `f` evaluates both components of that pair. The theorem "every eval of `x` returns the same heap object" is a very strong MUST-alias property and we capitalise on that by using `plusSubDmd` in (1). And indeed we *must* use `plusSubDmd` in (1) for sound upper bounds in an analysis that assumes call-by-need (as opposed to the weaker call-by-name) for let bindings. Consider h x = fst x * fst x -- h :: And the expression `let a=1; p=(a,a)} in h p`. Here, *although* the RHS of `p` is only evaluated once under call-by-need, `a` is still evaluated twice. If we had used `lubSubDmd`, we'd see SP(1L,A) and the 1L unsoundly says "exactly once". If the analysis had assumed call-by-name, it would be sound to say "a is used once in p": p is used multiple times and hence so would a, as if p was a function. So using `plusSubDmd` does not only yield better strictness, it is also "holding up the other end of the bargain" of the call-by-need assumption for upper bounds. (To SG's knowledge, the distinction between call-by-name and call-by-need does not matter for strictness analysis/lower bounds, thus it would be sound to use `lubSubDmd` all the time there.) Note [mkCall and plusSubDmd] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We never rewrite a strict, non-absent Call sub-demand like C(S,S) to a polymorphic sub-demand like S, otherwise #21085 strikes. Consider the following inequality (would also for M and 1 instead of L and S, but we forbid such Polys): L+S = S = C(S,S) < C(S,L) = C(L,L)+C(S,S) Note that L=C(L,L). If we also had S=C(S,S), we'd be in trouble: Now `plusSubDmd` would no longer maintain the equality relation on sub-demands, much less monotonicity. Bad! Clearly, `n <= Cn(n)` is unproblematic, as is `n >= Cn(n)` for any `n` except 1 and S. But `C(S,S) >= S` would mean trouble, because then we'd get the problematic `C(S,S) = S`. We have just established that `S < C(S,S)`! As such, the rewrite C(S,S) to S is anti-monotone and we forbid it, first and foremost in `mkCall` (which is the only place that rewrites Cn(n) to n). Crisis and #21085 averted! Note [Computing one-shot info] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider a call f (\pqr. e1) (\xyz. e2) e3 where f has usage signature Then argsOneShots returns a [[OneShotInfo]] of [[OneShot,NoOneShotInfo,OneShot], [OneShot]] The occurrence analyser propagates this one-shot infor to the binders \pqr and \xyz; see Note [Sources of one-shot information] in GHC.Core.Opt.OccurAnal. Note [Boxity in Poly] ~~~~~~~~~~~~~~~~~~~~~ To support Note [Boxity analysis], it makes sense that 'Prod' carries a 'Boxity'. But why does 'Poly' have to carry a 'Boxity', too? Shouldn't all 'Poly's be 'Boxed'? Couldn't we simply use 'Prod Unboxed' when we need to express an unboxing demand? 'botSubDmd' (B) needs to be the bottom of the lattice, so it needs to be an Unboxed demand (and deeply, at that). Similarly, 'seqSubDmd' (A) is an Unboxed demand. So why not say that Polys with absent cardinalities have Unboxed boxity? That doesn't work, because we also need the boxed equivalents. Here's an example for A (function 'absent' in T19871): ``` f _ True = 1 f a False = a `seq` 2 -- demand on a: MA, the A is short for `Poly Boxed C_00` g a = a `seq` f a True -- demand on a: SA, which is `Poly Boxed C_00` h True p = g p -- SA on p (inherited from g) h False p@(x,y) = x+y -- S!P(1!L,1!L) on p ``` If A is treated as Unboxed, we get reboxing in the call site to 'g'. So we obviously would need a Boxed variant of A. Rather than introducing a lot of special cases, we just carry the Boxity in 'Poly'. Plus, we could most likely find examples like the above for any other cardinality. Note [Why Boxity in SubDemand and not in Demand?] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In #19871, we started out by storing 'Boxity' in 'SubDemand', in the 'Prod' constructor only. But then we found that we weren't able to express the unboxing 'seqSubDmd', because that one really is a `Poly C_00` sub-demand. We then tried to store the Boxity in 'Demand' instead, for these reasons: 1. The whole boxity-of-seq business comes to a satisfying conclusion 2. Putting Boxity in the SubDemand is weird to begin with, because it describes the box and not its fields, just as the evaluation cardinality of a Demand describes how often the box is used. It makes more sense that Card and Boxity travel together. Also the alternative would have been to store Boxity with Poly, which is even weirder and more redundant. But then we regressed in T7837 (grep #19871 for boring specifics), which needed to transfer an ambient unboxed *demand* on a dictionary selector to its argument dictionary, via a 'Call' sub-demand `C(1,sd)`, as Note [Demand transformer for a dictionary selector] explains. Annoyingly, the boxity info has to be stored in the *sub-demand* `sd`! There's no demand to store the boxity in. So we bit the bullet and now we store Boxity in 'SubDemand', both in 'Prod' *and* 'Poly'. See also Note [Boxity in Poly]. Note [Demand transformer for data constructors] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider the expression (x,y) with sub-demand P(SL,A). What is the demand on x,y? Obviously `x` is used strictly, and `y` not at all. So we want to decompose a product demand, and feed its components demands into the arguments. That is the job of dmdTransformDataConSig. More precisely, * it gets the demand on the data constructor itself; in the above example that is C(1,C(1,P(SL,A))) * it returns the demands on the arguments; in the above example that is [SL, A] Nasty wrinkle. Consider this code (#22475 has more realistic examples but assume this is what the demand analyser sees) data T = MkT !Int Bool get :: T -> Bool get (MkT _ b) = b foo = let v::Int = I# 7 t::T = MkT v True in get t Now `v` is unused by `get`, /but/ we can't give `v` an Absent demand, else we'll drop the binding and replace it with an error thunk. Then the code generator (more specifically GHC.Stg.InferTags.Rewrite) will add an extra eval of MkT's argument to give foo = let v::Int = error "absent" t::T = case v of v' -> MkT v' True in get t Boo! Because of this extra eval (added in STG-land), the truth is that `MkT` may (or may not) evaluate its arguments (as established in #21497). Hence the use of `bump` in dmdTransformDataConSig, which adds in a `C_01` eval. The `C_01` says "may or may not evaluate" which is absolutely faithful to what InferTags.Rewrite does. In particular it is very important /not/ to make that a `C_11` eval, see Note [Data-con worker strictness]. -} {- ********************************************************************* * * Divergence: Whether evaluation surely diverges * * ********************************************************************* -} -- | 'Divergence' characterises whether something surely diverges. -- Models a subset lattice of the following exhaustive set of divergence -- results: -- -- [n] nontermination (e.g. loops) -- [i] throws imprecise exception -- [p] throws precise exception -- [c] converges (reduces to WHNF). -- -- The different lattice elements correspond to different subsets, indicated by -- juxtaposition of indicators (e.g. __nc__ definitely doesn't throw an -- exception, and may or may not reduce to WHNF). -- -- @ -- Dunno (nipc) -- | -- ExnOrDiv (nip) -- | -- Diverges (ni) -- @ -- -- As you can see, we don't distinguish __n__ and __i__. -- See Note [Precise exceptions and strictness analysis] for why __p__ is so -- special compared to __i__. data Divergence = Diverges -- ^ Definitely throws an imprecise exception or diverges. | ExnOrDiv -- ^ Definitely throws a *precise* exception, an imprecise -- exception or diverges. Never converges, hence 'isDeadEndDiv'! -- See scenario 1 in Note [Precise exceptions and strictness analysis]. | Dunno -- ^ Might diverge, throw any kind of exception or converge. deriving Eq lubDivergence :: Divergence -> Divergence -> Divergence lubDivergence Diverges div = div lubDivergence div Diverges = div lubDivergence ExnOrDiv ExnOrDiv = ExnOrDiv lubDivergence _ _ = Dunno -- This needs to commute with defaultFvDmd, i.e. -- defaultFvDmd (r1 `lubDivergence` r2) = defaultFvDmd r1 `lubDmd` defaultFvDmd r2 -- (See Note [Default demand on free variables and arguments] for why) -- | See Note [Asymmetry of plusDmdType], which concludes that 'plusDivergence' -- needs to be symmetric. -- Strictly speaking, we should have @plusDivergence Dunno Diverges = ExnOrDiv@. -- But that regresses in too many places (every infinite loop, basically) to be -- worth it and is only relevant in higher-order scenarios -- (e.g. Divergence of @f (throwIO blah)@). -- So 'plusDivergence' currently is 'glbDivergence', really. plusDivergence :: Divergence -> Divergence -> Divergence plusDivergence Dunno Dunno = Dunno plusDivergence Diverges _ = Diverges plusDivergence _ Diverges = Diverges plusDivergence _ _ = ExnOrDiv -- | In a non-strict scenario, we might not force the Divergence, in which case -- we might converge, hence Dunno. multDivergence :: Card -> Divergence -> Divergence multDivergence n _ | not (isStrict n) = Dunno multDivergence _ d = d topDiv, exnDiv, botDiv :: Divergence topDiv = Dunno exnDiv = ExnOrDiv botDiv = Diverges -- | True if the 'Divergence' indicates that evaluation will not return. -- See Note [Dead ends]. isDeadEndDiv :: Divergence -> Bool isDeadEndDiv Diverges = True isDeadEndDiv ExnOrDiv = True isDeadEndDiv Dunno = False -- See Notes [Default demand on free variables and arguments] -- and Scenario 1 in [Precise exceptions and strictness analysis] defaultFvDmd :: Divergence -> Demand defaultFvDmd Dunno = absDmd defaultFvDmd ExnOrDiv = absDmd -- This is the whole point of ExnOrDiv! defaultFvDmd Diverges = botDmd -- Diverges defaultArgDmd :: Divergence -> Demand -- TopRes and BotRes are polymorphic, so that -- BotRes === (Bot -> BotRes) === ... -- TopRes === (Top -> TopRes) === ... -- This function makes that concrete -- Also see Note [Default demand on free variables and arguments] defaultArgDmd Dunno = topDmd -- NB: not botDmd! We don't want to mask the precise exception by forcing the -- argument. But it is still absent. defaultArgDmd ExnOrDiv = absDmd defaultArgDmd Diverges = botDmd {- Note [Precise vs imprecise exceptions] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ An exception is considered to be /precise/ when it is thrown by the 'raiseIO#' primop. It follows that all other primops (such as 'raise#' or division-by-zero) throw /imprecise/ exceptions. Note that the actual type of the exception thrown doesn't have any impact! GHC undertakes some effort not to apply an optimisation that would mask a /precise/ exception with some other source of nontermination, such as genuine divergence or an imprecise exception, so that the user can reliably intercept the precise exception with a catch handler before and after optimisations. See also the wiki page on precise exceptions: https://gitlab.haskell.org/ghc/ghc/wikis/exceptions/precise-exceptions Section 5 of "Tackling the awkward squad" talks about semantic concerns. Imprecise exceptions are actually more interesting than precise ones (which are fairly standard) from the perspective of semantics. See the paper "A Semantics for Imprecise Exceptions" for more details. Note [Dead ends] ~~~~~~~~~~~~~~~~ We call an expression that either diverges or throws a precise or imprecise exception a "dead end". We used to call such an expression just "bottoming", but with the measures we take to preserve precise exception semantics (see Note [Precise exceptions and strictness analysis]), that is no longer accurate: 'exnDiv' is no longer the bottom of the Divergence lattice. Yet externally to demand analysis, we mostly care about being able to drop dead code etc., which is all due to the property that such an expression never returns, hence we consider throwing a precise exception to be a dead end. See also 'isDeadEndDiv'. Note [Precise exceptions and strictness analysis] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We have to take care to preserve precise exception semantics in strictness analysis (#17676). There are two scenarios that need careful treatment. The fixes were discussed at https://gitlab.haskell.org/ghc/ghc/wikis/fixing-precise-exceptions Recall that raiseIO# raises a *precise* exception, in contrast to raise# which raises an *imprecise* exception. See Note [Precise vs imprecise exceptions]. Scenario 1: Precise exceptions in case alternatives --------------------------------------------------- Unlike raise# (which returns botDiv), we want raiseIO# to return exnDiv. Here's why. Consider this example from #13380 (similarly #17676): f x y | x>0 = raiseIO# Exc | y>0 = return 1 | otherwise = return 2 Is 'f' strict in 'y'? One might be tempted to say yes! But that plays fast and loose with the precise exception; after optimisation, (f 42 (error "boom")) turns from throwing the precise Exc to throwing the imprecise user error "boom". So, the defaultFvDmd of raiseIO# should be lazy (topDmd), which can be achieved by giving it divergence exnDiv. See Note [Default demand on free variables and arguments]. Why don't we just give it topDiv instead of introducing exnDiv? Because then the simplifier will fail to discard raiseIO#'s continuation in case raiseIO# x s of { (# s', r #) -> } which we'd like to optimise to case raiseIO# x s of {} Hence we came up with exnDiv. The default FV demand of exnDiv is lazy (and its default arg dmd is absent), but otherwise (in terms of 'isDeadEndDiv') it behaves exactly as botDiv, so that dead code elimination works as expected. This is tracked by T13380b. Scenario 2: Precise exceptions in case scrutinees ------------------------------------------------- Consider (more complete examples in #148, #1592, testcase strun003) case foo x s of { (# s', r #) -> y } Is this strict in 'y'? Often not! If @foo x s@ might throw a precise exception (ultimately via raiseIO#), then we must not force 'y', which may fail to terminate or throw an imprecise exception, until we have performed @foo x s@. So we have to 'deferAfterPreciseException' (which 'lub's with 'exnDmdType' to model the exceptional control flow) when @foo x s@ may throw a precise exception. Motivated by T13380{d,e,f}. See Note [Which scrutinees may throw precise exceptions] in "GHC.Core.Opt.DmdAnal". We have to be careful not to discard dead-end Divergence from case alternatives, though (#18086): m = putStrLn "foo" >> error "bar" 'm' should still have 'exnDiv', which is why it is not sufficient to lub with 'nopDmdType' (which has 'topDiv') in 'deferAfterPreciseException'. Historical Note: This used to be called the "IO hack". But that term is rather a bad fit because 1. It's easily confused with the "State hack", which also affects IO. 2. Neither "IO" nor "hack" is a good description of what goes on here, which is deferring strictness results after possibly throwing a precise exception. The "hack" is probably not having to defer when we can prove that the expression may not throw a precise exception (increasing precision of the analysis), but that's just a favourable guess. Note [Exceptions and strictness] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We used to smart about catching exceptions, but we aren't anymore. See #14998 for the way it's resolved at the moment. Here's a historic breakdown: Apparently, exception handling prim-ops didn't use to have any special strictness signatures, thus defaulting to nopSig, which assumes they use their arguments lazily. Joachim was the first to realise that we could provide richer information. Thus, in 0558911f91c (Dec 13), he added signatures to primops.txt.pp indicating that functions like `catch#` and `catchRetry#` call their argument, which is useful information for usage analysis. Still with a 'Lazy' strictness demand (i.e. 'lazyApply1Dmd'), though, and the world was fine. In 7c0fff4 (July 15), Simon argued that giving `catch#` et al. a 'strictApply1Dmd' leads to substantial performance gains. That was at the cost of correctness, as #10712 proved. So, back to 'lazyApply1Dmd' in 28638dfe79e (Dec 15). Motivated to reproduce the gains of 7c0fff4 without the breakage of #10712, Ben opened #11222. Simon made the demand analyser "understand catch" in 9915b656 (Jan 16) by adding a new 'catchArgDmd', which basically said to call its argument strictly, but also swallow any thrown exceptions in 'multDivergence'. This was realized by extending the 'Str' constructor of 'ArgStr' with a 'ExnStr' field, indicating that it catches the exception, and adding a 'ThrowsExn' constructor to the 'Divergence' lattice as an element between 'Dunno' and 'Diverges'. Then along came #11555 and finally #13330, so we had to revert to 'lazyApply1Dmd' again in 701256df88c (Mar 17). This left the other variants like 'catchRetry#' having 'catchArgDmd', which is where #14998 picked up. Item 1 was concerned with measuring the impact of also making `catchRetry#` and `catchSTM#` have 'lazyApply1Dmd'. The result was that there was none. We removed the last usages of 'catchArgDmd' in 00b8ecb7 (Apr 18). There was a lot of dead code resulting from that change, that we removed in ef6b283 (Jan 19): We got rid of 'ThrowsExn' and 'ExnStr' again and removed any code that was dealing with the peculiarities. Where did the speed-ups vanish to? In #14998, item 3 established that turning 'catch#' strict in its first argument didn't bring back any of the alleged performance benefits. Item 2 of that ticket finally found out that it was entirely due to 'catchException's new (since #11555) definition, which was simply catchException !io handler = catch io handler While 'catchException' is arguably the saner semantics for 'catch', it is an internal helper function in "GHC.IO". Its use in "GHC.IO.Handle.Internals.do_operation" made for the huge allocation differences: Remove the bang and you find the regressions we originally wanted to avoid with 'catchArgDmd'. See also #exceptions_and_strictness# in "GHC.IO". So history keeps telling us that the only possibly correct strictness annotation for the first argument of 'catch#' is 'lazyApply1Dmd', because 'catch#' really is not strict in its argument: Just try this in GHCi :set -XScopedTypeVariables import Control.Exception catch undefined (\(_ :: SomeException) -> putStrLn "you'll see this") Any analysis that assumes otherwise will be broken in some way or another (beyond `-fno-pedantic-bottoms`). But then #13380 and #17676 suggest (in Mar 20) that we need to re-introduce a subtly different variant of `ThrowsExn` (which we call `ExnOrDiv` now) that is only used by `raiseIO#` in order to preserve precise exceptions by strictness analysis, while not impacting the ability to eliminate dead code. See Note [Precise exceptions and strictness analysis]. Note [Default demand on free variables and arguments] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Free variables not mentioned in the environment of a 'DmdType' are demanded according to the demand type's Divergence: * In a Diverges (botDiv) context, that demand is botDmd (strict and absent). * In all other contexts, the demand is absDmd (lazy and absent). This is recorded in 'defaultFvDmd'. Similarly, we can eta-expand demand types to get demands on excess arguments not accounted for in the type, by consulting 'defaultArgDmd': * In a Diverges (botDiv) context, that demand is again botDmd. * In a ExnOrDiv (exnDiv) context, that demand is absDmd: We surely diverge before evaluating the excess argument, but don't want to eagerly evaluate it (cf. Note [Precise exceptions and strictness analysis]). * In a Dunno context (topDiv), the demand is topDmd, because it's perfectly possible to enter the additional lambda and evaluate it in unforeseen ways (so, not absent). Note [Bottom CPR iff Dead-Ending Divergence] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Both CPR analysis and Demand analysis handle recursive functions by doing fixed-point iteration. To find the *least* (e.g., most informative) fixed-point, iteration starts with the bottom element of the semantic domain. Diverging functions generally have the bottom element as their least fixed-point. One might think that CPR analysis and Demand analysis then agree in when a function gets a bottom denotation. E.g., whenever it has 'botCpr', it should also have 'botDiv'. But that is not the case, because strictness analysis has to be careful around precise exceptions, see Note [Precise vs imprecise exceptions]. So Demand analysis gives some diverging functions 'exnDiv' (which is *not* the bottom element) when the CPR signature says 'botCpr', and that's OK. Here's an example (from #18086) where that is the case: ioTest :: IO () ioTest = do putStrLn "hi" undefined However, one can loosely say that we give a function 'botCpr' whenever its 'Divergence' is 'exnDiv' or 'botDiv', i.e., dead-ending. But that's just a consequence of fixed-point iteration, it's not important that they agree. ************************************************************************ * * Demand environments and types * * ************************************************************************ -} -- Subject to Note [Default demand on free variables and arguments] -- | Captures the result of an evaluation of an expression, by -- -- * Listing how the free variables of that expression have been evaluated -- ('de_fvs') -- * Saying whether or not evaluation would surely diverge ('de_div') -- -- See Note [Demand env Equality]. data DmdEnv = DE { de_fvs :: !(VarEnv Demand), de_div :: !Divergence } instance Eq DmdEnv where DE fv1 div1 == DE fv2 div2 = div1 == div2 && canonicalise div1 fv1 == canonicalise div2 fv2 where canonicalise div fv = filterUFM (/= defaultFvDmd div) fv mkEmptyDmdEnv :: Divergence -> DmdEnv mkEmptyDmdEnv div = DE emptyVarEnv div -- | Build a potentially terminating 'DmdEnv' from a finite map that says what -- has been evaluated so far mkTermDmdEnv :: VarEnv Demand -> DmdEnv mkTermDmdEnv fvs = DE fvs topDiv nopDmdEnv :: DmdEnv nopDmdEnv = mkEmptyDmdEnv topDiv botDmdEnv :: DmdEnv botDmdEnv = mkEmptyDmdEnv botDiv exnDmdEnv :: DmdEnv exnDmdEnv = mkEmptyDmdEnv exnDiv lubDmdEnv :: DmdEnv -> DmdEnv -> DmdEnv lubDmdEnv (DE fv1 d1) (DE fv2 d2) = DE lub_fv lub_div where -- See Note [Demand env Equality] lub_fv = plusVarEnv_CD lubDmd fv1 (defaultFvDmd d1) fv2 (defaultFvDmd d2) lub_div = lubDivergence d1 d2 addVarDmdEnv :: DmdEnv -> Id -> Demand -> DmdEnv addVarDmdEnv env@(DE fvs div) id dmd = DE (extendVarEnv fvs id (dmd `plusDmd` lookupDmdEnv env id)) div plusDmdEnv :: DmdEnv -> DmdEnv -> DmdEnv plusDmdEnv (DE fv1 d1) (DE fv2 d2) -- In contrast to Note [Asymmetry of plusDmdType], this function is symmetric. | isEmptyVarEnv fv2, defaultFvDmd d2 == absDmd = DE fv1 (d1 `plusDivergence` d2) -- a very common case that is much more efficient | isEmptyVarEnv fv1, defaultFvDmd d1 == absDmd = DE fv2 (d1 `plusDivergence` d2) -- another very common case that is much more efficient | otherwise = DE (plusVarEnv_CD plusDmd fv1 (defaultFvDmd d1) fv2 (defaultFvDmd d2)) (d1 `plusDivergence` d2) -- | 'DmdEnv' is a monoid via 'plusDmdEnv' and 'nopDmdEnv'; this is its 'msum' plusDmdEnvs :: [DmdEnv] -> DmdEnv plusDmdEnvs [] = nopDmdEnv plusDmdEnvs pdas = foldl1' plusDmdEnv pdas multDmdEnv :: Card -> DmdEnv -> DmdEnv multDmdEnv C_11 env = env multDmdEnv C_00 _ = nopDmdEnv multDmdEnv n (DE fvs div) = DE (mapVarEnv (multDmd n) fvs) (multDivergence n div) reuseEnv :: DmdEnv -> DmdEnv reuseEnv = multDmdEnv C_1N lookupDmdEnv :: DmdEnv -> Id -> Demand -- See Note [Default demand on free variables and arguments] lookupDmdEnv (DE fv div) id = lookupVarEnv fv id `orElse` defaultFvDmd div delDmdEnv :: DmdEnv -> Id -> DmdEnv delDmdEnv (DE fv div) id = DE (fv `delVarEnv` id) div -- | Characterises how an expression -- -- * Evaluates its free variables ('dt_env') including divergence info -- * Evaluates its arguments ('dt_args') -- data DmdType = DmdType { dt_env :: !DmdEnv -- ^ Demands on free variables. -- See Note [Demand type Divergence] , dt_args :: ![Demand] -- ^ Demand on arguments } -- | See Note [Demand env Equality]. instance Eq DmdType where DmdType env1 ds1 == DmdType env2 ds2 = ds1 == ds2 -- cheap checks first && env1 == env2 -- | Compute the least upper bound of two 'DmdType's elicited /by the same -- incoming demand/! lubDmdType :: DmdType -> DmdType -> DmdType lubDmdType d1 d2 = DmdType lub_fv lub_ds where n = max (dmdTypeDepth d1) (dmdTypeDepth d2) (DmdType fv1 ds1) = etaExpandDmdType n d1 (DmdType fv2 ds2) = etaExpandDmdType n d2 lub_ds = zipWithEqual "lubDmdType" lubDmd ds1 ds2 lub_fv = lubDmdEnv fv1 fv2 discardArgDmds :: DmdType -> DmdEnv discardArgDmds (DmdType fv _) = fv plusDmdType :: DmdType -> DmdEnv -> DmdType plusDmdType (DmdType fv ds) fv' -- See Note [Asymmetry of plusDmdType] -- 'DmdEnv' forms a (monoidal) action on 'DmdType' via this operation. = DmdType (plusDmdEnv fv fv') ds botDmdType :: DmdType botDmdType = DmdType botDmdEnv [] -- | The demand type of doing nothing (lazy, absent, no Divergence -- information). Note that it is ''not'' the top of the lattice (which would be -- "may use everything"), so it is (no longer) called topDmdType. nopDmdType :: DmdType nopDmdType = DmdType nopDmdEnv [] -- | The demand type of an unspecified expression that is guaranteed to -- throw a (precise or imprecise) exception or diverge. exnDmdType :: DmdType exnDmdType = DmdType exnDmdEnv [] dmdTypeDepth :: DmdType -> Arity dmdTypeDepth = length . dt_args -- | This makes sure we can use the demand type with n arguments after eta -- expansion, where n must not be lower than the demand types depth. -- It appends the argument list with the correct 'defaultArgDmd'. etaExpandDmdType :: Arity -> DmdType -> DmdType etaExpandDmdType n d@DmdType{dt_args = ds, dt_env = env} | n == depth = d | n > depth = d{dt_args = inc_ds} | otherwise = pprPanic "etaExpandDmdType: arity decrease" (ppr n $$ ppr d) where depth = length ds -- Arity increase: -- * Demands on FVs are still valid -- * Demands on args also valid, plus we can extend with defaultArgDmd -- as appropriate for the given Divergence -- * Divergence is still valid: -- - A dead end after 2 arguments stays a dead end after 3 arguments -- - The remaining case is Dunno, which is already topDiv inc_ds = take n (ds ++ repeat (defaultArgDmd (de_div env))) -- | A conservative approximation for a given 'DmdType' in case of an arity -- decrease. Currently, it's just nopDmdType. decreaseArityDmdType :: DmdType -> DmdType decreaseArityDmdType _ = nopDmdType splitDmdTy :: DmdType -> (Demand, DmdType) -- Split off one function argument -- We already have a suitable demand on all -- free vars, so no need to add more! splitDmdTy ty@DmdType{dt_args=dmd:args} = (dmd, ty{dt_args=args}) splitDmdTy ty@DmdType{dt_env=env} = (defaultArgDmd (de_div env), ty) multDmdType :: Card -> DmdType -> DmdType multDmdType C_11 dmd_ty = dmd_ty -- a vital optimisation for T25196 multDmdType n (DmdType fv args) = -- pprTrace "multDmdType" (ppr n $$ ppr fv $$ ppr (multDmdEnv n fv)) $ DmdType (multDmdEnv n fv) (strictMap (multDmd n) args) peelFV :: DmdType -> Var -> (DmdType, Demand) peelFV (DmdType fv ds) id = -- pprTrace "rfv" (ppr id <+> ppr dmd $$ ppr fv) (DmdType fv' ds, dmd) where -- Force these arguments so that old `Env` is not retained. !fv' = fv `delDmdEnv` id !dmd = lookupDmdEnv fv id addDemand :: Demand -> DmdType -> DmdType addDemand dmd (DmdType fv ds) = DmdType fv (dmd:ds) findIdDemand :: DmdType -> Var -> Demand findIdDemand (DmdType fv _) id = lookupDmdEnv fv id -- | When e is evaluated after executing an IO action that may throw a precise -- exception, we act as if there is an additional control flow path that is -- taken if e throws a precise exception. The demand type of this control flow -- path -- * is lazy and absent ('topDmd') and boxed in all free variables and arguments -- * has 'exnDiv' 'Divergence' result -- See Note [Precise exceptions and strictness analysis] -- -- So we can simply take a variant of 'nopDmdType', 'exnDmdType'. -- Why not 'nopDmdType'? Because then the result of 'e' can never be 'exnDiv'! -- That means failure to drop dead-ends, see #18086. deferAfterPreciseException :: DmdType -> DmdType deferAfterPreciseException = lubDmdType exnDmdType {- Note [deferAfterPreciseException] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The big picture is in Note [Precise exceptions and strictness analysis] The idea is that we want to treat case of (# s', r #) -> rhs as if it was case of Just (# s', r #) -> rhs Nothing -> error That is, the I/O operation might throw an exception, so that 'rhs' never gets reached. For example, we don't want to be strict in the strict free variables of 'rhs'. So we have the simple definition deferAfterPreciseException = lubDmdType (DmdType emptyDmdEnv [] exnDiv) Historically, when we had `lubBoxity = _unboxedWins` (see Note [unboxedWins]), we had a more complicated definition for deferAfterPreciseException to make sure it preserved boxity in its argument. That was needed for code like case of (# s', r) -> f x which uses `x` *boxed*. If we `lub`bed it with `(DmdType emptyDmdEnv [] exnDiv)` we'd get an *unboxed* demand on `x` (because we let Unboxed win), which led to #20746. Nowadays with `lubBoxity = boxedWins` we don't need the complicated definition. Note [Demand type Divergence] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In contrast to DmdSigs, DmdTypes are elicited under a specific incoming demand. This is described in detail in Note [Understanding DmdType and DmdSig]. Here, we'll focus on what that means for a DmdType's Divergence in a higher-order scenario. Consider err x y = x `seq` y `seq` error (show x) this has a strictness signature of <1L><1L>b meaning that we don't know what happens when we call err in weaker contexts than C(1,C(1,L)), like @err `seq` ()@ (1A) and @err 1 `seq` ()@ (C(S,A)). We may not unleash the botDiv, hence assume topDiv. Of course, in @err 1 2 `seq` ()@ the incoming demand C(S,C(S,A)) is strong enough and we see that the expression diverges. Now consider a function f g = g 1 2 with signature , and the expression f err `seq` () now f puts a strictness demand of C(1,C(1,L)) onto its argument, which is unleashed on err via the App rule. In contrast to weaker head strictness, this demand is strong enough to unleash err's signature and hence we see that the whole expression diverges! Note [Demand env Equality] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ What is the difference between the Demand env {x->A} and {}? Answer: There is none! They have the exact same semantics, because any var that is not mentioned in 'de_fvs' implicitly has demand 'defaultFvDmd', based on the divergence of the demand env 'de_div'. Similarly, b{x->B, y->A} is the same as b{y->A}, because the default FV demand of BotDiv is B. But neither is equal to b{}, because y has demand B in the latter, not A as before. The Eq instance of DmdEnv must reflect that, otherwise we can get into monotonicity issues during fixed-point iteration ({x->A} /= {} /= {x->A} /= ...). It does so by filtering out any default FV demands prior to comparing 'de_fvs'. Note that 'lubDmdEnv' maintains this kind of equality by using 'plusVarEnv_CD', involving 'defaultFvDmd' for any entries present in one 'de_fvs' but not the other. Note [Asymmetry of plusDmdType] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ 'plus' for DmdTypes is *asymmetrical*, because there can only one be one type contributing argument demands! For example, given (e1 e2), we get a DmdType dt1 for e1, use its arg demand to analyse e2 giving dt2, and then do (dt1 `plusType` dt2). Similarly with case e of { p -> rhs } we get dt_scrut from the scrutinee and dt_rhs from the RHS, and then compute (dt_rhs `plusType` dt_scrut). We 1. combine the information on the free variables, 2. take the demand on arguments from the first argument 3. combine the termination results, as in plusDivergence. Since we don't use argument demands of the second argument anyway, 'plus's second argument is just a 'PlusDmdType'. But note that the argument demand types are not guaranteed to be observed in left to right order. For example, analysis of a case expression will pass the demand type for the alts as the left argument and the type for the scrutinee as the right argument. Also, it is not at all clear if there is such an order; consider the LetUp case, where the RHS might be forced at any point while evaluating the let body. Therefore, it is crucial that 'plusDivergence' is symmetric! Note [Demands from unsaturated function calls] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider a demand transformer d1 -> d2 -> r for f. If a sufficiently detailed demand is fed into this transformer, e.g arising from "f x1 x2" in a strict, use-once context, then d1 and d2 is precisely the demand unleashed onto x1 and x2 (similar for the free variable environment) and furthermore the result information r is the one we want to use. An anonymous lambda is also an unsaturated function all (needs one argument, none given), so this applies to that case as well. But the demand fed into f might be less than C(1,C(1,L)). Then we have to 'multDmdType' the announced demand type. Examples: * Not strict enough, e.g. C(1,C(1,L)): - We have to multiply all argument and free variable demands with C_01, zapping strictness. - We have to multiply divergence with C_01. If r says that f Diverges for sure, then this holds when the demand guarantees that two arguments are going to be passed. If the demand is lower, we may just as well converge. If we were tracking definite convergence, than that would still hold under a weaker demand than expected by the demand transformer. * Used more than once, e.g. C(S,C(1,L)): - Multiply with C_1N. Even if f puts a used-once demand on any of its argument or free variables, if we call f multiple times, we may evaluate this argument or free variable multiple times. In dmdTransformSig, we call peelManyCalls to find out the 'Card'inality with which we have to multiply and then call multDmdType with that. Similarly, dmdTransformDictSelSig and dmdAnal, when analyzing a Lambda, use peelCallDmd, which peels only one level, but also returns the demand put on the body of the function. -} {- ************************************************************************ * * Demand signatures * * ************************************************************************ Note [DmdSig: demand signatures, and demand-sig arity] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ See also * Note [Demand signatures semantically] * Note [Understanding DmdType and DmdSig] In a let-bound Id we record its demand signature. In principle, this demand signature is a demand transformer, mapping a demand on the Id into a DmdType, which gives a) the free vars of the Id's value b) the Id's arguments c) an indication of the result of applying the Id to its arguments However, in fact we store in the Id an extremely emasculated demand transformer, namely a single DmdType (Nevertheless we dignify DmdSig as a distinct type.) The DmdSig for an Id is a semantic thing. Suppose a function `f` has a DmdSig of DmdSig (DmdType (fv_dmds,res) [d1..dn]) Here `n` is called the "demand-sig arity" of the DmdSig. The signature means: * If you apply `f` to n arguments (the demand-sig-arity) * then you can unleash demands d1..dn on the arguments * and demands fv_dmds on the free variables. Also see Note [Demand type Divergence] for the meaning of a Divergence in a demand signature. If `f` is applied to fewer value arguments than its demand-sig arity, it means that the demand on the function at a call site is weaker than the vanilla call demand, used for signature inference. Therefore we place a top demand on all arguments. For example, the demand transformer described by the demand signature DmdSig (DmdType {x -> <1L>} <1P(L,L)>) says that when the function is applied to two arguments, it unleashes demand 1L on the free var x, A on the first arg, and 1P(L,L) on the second. If this same function is applied to one arg, all we can say is that it uses x with 1L, and its arg with demand 1P(L,L). Note [Demand signatures semantically] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Demand analysis interprets expressions in the abstract domain of demand transformers. Given a (sub-)demand that denotes the evaluation context, the abstract transformer of an expression gives us back a demand type denoting how other things (like arguments and free vars) were used when the expression was evaluated. Here's an example: f x y = if x + expensive then \z -> z + y * ... else \z -> z * ... The abstract transformer (let's call it F_e) of the if expression (let's call it e) would transform an incoming (undersaturated!) head sub-demand A into a demand type like {x-><1L>,y->}. In pictures: SubDemand ---F_e---> DmdType {x-><1L>,y->} Let's assume that the demand transformers we compute for an expression are correct wrt. to some concrete semantics for Core. How do demand signatures fit in? They are strange beasts, given that they come with strict rules when to it's sound to unleash them. Fortunately, we can formalise the rules with Galois connections. Consider f's strictness signature, {}<1L>. It's a single-point approximation of the actual abstract transformer of f's RHS for arity 2. So, what happens is that we abstract *once more* from the abstract domain we already are in, replacing the incoming Demand by a simple lattice with two elements denoting incoming arity: A_2 = {<2, >=2} (where '<2' is the top element and >=2 the bottom element). Here's the diagram: A_2 -----f_f----> DmdType ^ | | α γ | | v SubDemand --F_f----> DmdType With α(C(1,C(1,_))) = >=2 α(_) = <2 γ(ty) = ty and F_f being the abstract transformer of f's RHS and f_f being the abstracted abstract transformer computable from our demand signature simply by f_f(>=2) = {}<1L> f_f(<2) = multDmdType C_0N {}<1L> where multDmdType makes a proper top element out of the given demand type. In practice, the A_n domain is not just a simple Bool, but a Card, which is exactly the Card with which we have to multDmdType. The Card for arity n is computed by calling @peelManyCalls n@, which corresponds to α above. Note [Understanding DmdType and DmdSig] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Demand types are sound approximations of an expression's semantics relative to the incoming demand we put the expression under. Consider the following expression: \x y -> x `seq` (y, 2*x) Here is a table with demand types resulting from different incoming demands we put that expression under. Note the monotonicity; a stronger incoming demand yields a more precise demand type: incoming sub-demand | demand type -------------------------------- P(A) | {} C(1,C(1,P(L))) | <1P(L)>{} C(1,C(1,1P(1P(L),A))) | <1P(A)>{} Note that in the first example, the depth of the demand type was *higher* than the arity of the incoming call demand due to the anonymous lambda. The converse is also possible and happens when we unleash demand signatures. In @f x y@, the incoming call demand on f has arity 2. But if all we have is a demand signature with depth 1 for @f@ (which we can safely unleash, see below), the demand type of @f@ under a call demand of arity 2 has a *lower* depth of 1. So: Demand types are elicited by putting an expression under an incoming (call) demand, the arity of which can be lower or higher than the depth of the resulting demand type. In contrast, a demand signature summarises a function's semantics *without* immediately specifying the incoming demand it was produced under. Despite StrSig being a newtype wrapper around DmdType, it actually encodes two things: * The threshold (i.e., minimum arity) to unleash the signature * A demand type that is sound to unleash when the minimum arity requirement is met. Here comes the subtle part: The threshold is encoded in the demand-sig arity! So in mkDmdSigForArity we make sure to trim the list of argument demands to the given threshold arity. Call sites will make sure that this corresponds to the arity of the call demand that elicited the wrapped demand type. See also Note [DmdSig: demand signatures, and demand-sig arity] -} -- | The depth of the wrapped 'DmdType' encodes the arity at which it is safe -- to unleash. Better construct this through 'mkDmdSigForArity'. -- See Note [Understanding DmdType and DmdSig] newtype DmdSig = DmdSig DmdType deriving Eq -- | Turns a 'DmdType' computed for the particular 'Arity' into a 'DmdSig' -- unleashable at that arity. See Note [Understanding DmdType and DmdSig]. mkDmdSigForArity :: Arity -> DmdType -> DmdSig mkDmdSigForArity threshold_arity dmd_ty@(DmdType fvs args) | threshold_arity < dmdTypeDepth dmd_ty = DmdSig $ DmdType (fvs { de_div = topDiv }) (take threshold_arity args) | otherwise = DmdSig (etaExpandDmdType threshold_arity dmd_ty) mkClosedDmdSig :: [Demand] -> Divergence -> DmdSig mkClosedDmdSig ds div = mkDmdSigForArity (length ds) (DmdType (mkEmptyDmdEnv div) ds) mkVanillaDmdSig :: Arity -> Divergence -> DmdSig mkVanillaDmdSig ar div = mkClosedDmdSig (replicate ar topDmd) div splitDmdSig :: DmdSig -> ([Demand], Divergence) splitDmdSig (DmdSig (DmdType env dmds)) = (dmds, de_div env) dmdSigDmdEnv :: DmdSig -> DmdEnv dmdSigDmdEnv (DmdSig (DmdType env _)) = env hasDemandEnvSig :: DmdSig -> Bool hasDemandEnvSig = not . isEmptyVarEnv . de_fvs . dmdSigDmdEnv botSig :: DmdSig botSig = DmdSig botDmdType nopSig :: DmdSig nopSig = DmdSig nopDmdType isNopSig :: DmdSig -> Bool isNopSig (DmdSig ty) = ty == nopDmdType -- | True if the signature diverges or throws an exception in a saturated call. -- See Note [Dead ends]. isDeadEndSig :: DmdSig -> Bool isDeadEndSig (DmdSig (DmdType env _)) = isDeadEndDiv (de_div env) -- | True if the signature diverges or throws an imprecise exception in a saturated call. -- NB: In constrast to 'isDeadEndSig' this returns False for 'exnDiv'. -- See Note [Dead ends] -- and Note [Precise vs imprecise exceptions]. isBottomingSig :: DmdSig -> Bool isBottomingSig (DmdSig (DmdType env _)) = de_div env == botDiv -- | True when the signature indicates all arguments are boxed onlyBoxedArguments :: DmdSig -> Bool onlyBoxedArguments (DmdSig (DmdType _ dmds)) = all demandIsBoxed dmds where demandIsBoxed BotDmd = True demandIsBoxed AbsDmd = True demandIsBoxed (_ :* sd) = subDemandIsboxed sd subDemandIsboxed (Poly Unboxed _) = False subDemandIsboxed (Poly _ _) = True subDemandIsboxed (Call _ sd) = subDemandIsboxed sd subDemandIsboxed (Prod Unboxed _) = False subDemandIsboxed (Prod _ ds) = all demandIsBoxed ds -- | Returns true if an application to n value args would diverge or throw an -- exception. -- -- If a function having 'botDiv' is applied to a less number of arguments than -- its syntactic arity, we cannot say for sure that it is going to diverge. -- Hence this function conservatively returns False in that case. -- See Note [Dead ends]. isDeadEndAppSig :: DmdSig -> Int -> Bool isDeadEndAppSig (DmdSig (DmdType env ds)) n = isDeadEndDiv (de_div env) && not (lengthExceeds ds n) trimBoxityDmdEnv :: DmdEnv -> DmdEnv trimBoxityDmdEnv (DE fvs div) = DE (mapVarEnv trimBoxity fvs) div trimBoxityDmdType :: DmdType -> DmdType trimBoxityDmdType (DmdType env ds) = DmdType (trimBoxityDmdEnv env) (map trimBoxity ds) trimBoxityDmdSig :: DmdSig -> DmdSig trimBoxityDmdSig = coerce trimBoxityDmdType -- | Transfers the boxity of the left arg to the demand structure of the right -- arg. This only makes sense if applied to new and old demands of the same -- value. transferBoxity :: Demand -> Demand -> Demand transferBoxity from to = go_dmd from to where go_dmd (from_n :* from_sd) to_dmd@(to_n :* to_sd) | isAbs from_n || isAbs to_n = to_dmd | otherwise = case (from_sd, to_sd) of (Poly from_b _, Poly _ to_c) -> to_n :* Poly from_b to_c (_, Prod _ to_ds) | Just (from_b, from_ds) <- viewProd (length to_ds) from_sd -> to_n :* mkProd from_b (strictZipWith go_dmd from_ds to_ds) (Prod from_b from_ds, _) | Just (_, to_ds) <- viewProd (length from_ds) to_sd -> to_n :* mkProd from_b (strictZipWith go_dmd from_ds to_ds) _ -> trimBoxity to_dmd transferArgBoxityDmdType :: DmdType -> DmdType -> DmdType transferArgBoxityDmdType _from@(DmdType _ from_ds) to@(DmdType to_env to_ds) | equalLength from_ds to_ds = -- pprTraceWith "transfer" (\r -> ppr _from $$ ppr to $$ ppr r) $ DmdType to_env -- Only arg boxity! See Note [Don't change boxity without worker/wrapper] (zipWith transferBoxity from_ds to_ds) | otherwise = trimBoxityDmdType to transferArgBoxityDmdSig :: DmdSig -> DmdSig -> DmdSig transferArgBoxityDmdSig = coerce transferArgBoxityDmdType prependArgsDmdSig :: Int -> DmdSig -> DmdSig -- ^ Add extra ('topDmd') arguments to a strictness signature. -- In contrast to 'etaConvertDmdSig', this /prepends/ additional argument -- demands. This is used by FloatOut. prependArgsDmdSig new_args sig@(DmdSig dmd_ty@(DmdType env dmds)) | new_args == 0 = sig | dmd_ty == nopDmdType = sig | otherwise = DmdSig (DmdType env dmds') where dmds' = assertPpr (new_args > 0) (ppr new_args) $ replicate new_args topDmd ++ dmds etaConvertDmdSig :: Arity -> DmdSig -> DmdSig -- ^ We are expanding (\x y. e) to (\x y z. e z) or reducing from the latter to -- the former (when the Simplifier identifies a new join points, for example). -- In contrast to 'prependArgsDmdSig', this /appends/ extra arg demands if -- necessary. -- This works by looking at the 'DmdType' (which was produced under a call -- demand for the old arity) and trying to transfer as many facts as we can to -- the call demand of new arity. -- An arity increase (resulting in a stronger incoming demand) can retain much -- of the info, while an arity decrease (a weakening of the incoming demand) -- must fall back to a conservative default. etaConvertDmdSig arity (DmdSig dmd_ty) | arity < dmdTypeDepth dmd_ty = DmdSig $ decreaseArityDmdType dmd_ty | otherwise = DmdSig $ etaExpandDmdType arity dmd_ty {- ************************************************************************ * * Demand transformers * * ************************************************************************ -} -- | A /demand transformer/ is a monotone function from an incoming evaluation -- context ('SubDemand') to a 'DmdType', describing how the denoted thing -- (i.e. expression, function) uses its arguments and free variables, and -- whether it diverges. -- -- See Note [Understanding DmdType and DmdSig] -- and Note [DmdSig: demand signatures, and demand-sig arity] type DmdTransformer = SubDemand -> DmdType -- | Extrapolate a demand signature ('DmdSig') into a 'DmdTransformer'. -- -- Given a function's 'DmdSig' and a 'SubDemand' for the evaluation context, -- return how the function evaluates its free variables and arguments. dmdTransformSig :: DmdSig -> DmdTransformer dmdTransformSig (DmdSig dmd_ty@(DmdType _ arg_ds)) sd = multDmdType (fst $ peelManyCalls (length arg_ds) sd) dmd_ty -- see Note [Demands from unsaturated function calls] -- and Note [DmdSig: demand signatures, and demand-sig arity] -- | A special 'DmdTransformer' for data constructors that feeds product -- demands into the constructor arguments. dmdTransformDataConSig :: [StrictnessMark] -> DmdTransformer -- See Note [Demand transformer for data constructors] dmdTransformDataConSig str_marks sd = case viewProd arity body_sd of Just (_, dmds) -> mk_body_ty n dmds Nothing -> nopDmdType where arity = length str_marks (n, body_sd) = peelManyCalls arity sd mk_body_ty n dmds = DmdType nopDmdEnv (zipWith (bump n) str_marks dmds) bump n str dmd | isMarkedStrict str = multDmd n (plusDmd str_field_dmd dmd) | otherwise = multDmd n dmd str_field_dmd = C_01 :* seqSubDmd -- Why not C_11? See Note [Data-con worker strictness] -- | A special 'DmdTransformer' for dictionary selectors that feeds the demand -- on the result into the indicated dictionary component (if saturated). -- See Note [Demand transformer for a dictionary selector]. dmdTransformDictSelSig :: DmdSig -> DmdTransformer -- NB: This currently doesn't handle newtype dictionaries. -- It should simply apply call_sd directly to the dictionary, I suppose. dmdTransformDictSelSig (DmdSig (DmdType _ [_ :* prod])) call_sd | (n, sd') <- peelCallDmd call_sd , Prod _ sig_ds <- prod = multDmdType n $ DmdType nopDmdEnv [C_11 :* mkProd Unboxed (map (enhance sd') sig_ds)] | otherwise = nopDmdType -- See Note [Demand transformer for a dictionary selector] where enhance _ AbsDmd = AbsDmd enhance _ BotDmd = BotDmd enhance sd _dmd_var = C_11 :* sd -- This is the one! -- C_11, because we multiply with n above dmdTransformDictSelSig sig sd = pprPanic "dmdTransformDictSelSig: no args" (ppr sig $$ ppr sd) {- Note [Demand transformer for a dictionary selector] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Suppose we have a superclass selector 'sc_sel' and a class method selector 'op_sel', and a function that uses both, like this -- Strictness sig: 1P(1,A) sc_sel (x,y) = x -- Strictness sig: 1P(A,1) op_sel (p,q)= q f d v = op_sel (sc_sel d) v What do we learn about the demand on 'd'? Alas, we see only the demand from 'sc_sel', namely '1P(1,A)'. We /don't/ see that 'd' really has a nested demand '1P(1P(A,1C(1,1)),A)'. On the other hand, if we inlined the two selectors we'd have f d x = case d of (x,_) -> case x of (_,q) -> q v If we analyse that, we'll get a richer, nested demand on 'd'. We want to behave /as if/ we'd inlined 'op_sel' and 'sc_sel'. We can do this easily by building a richer demand transformer for dictionary selectors than is expressible by a regular demand signature. And that is what 'dmdTransformDictSelSig' does: it transforms the demand on the result to a demand on the (single) argument. How does it do that? If we evaluate (op dict-expr) under demand 'd', then we can push the demand 'd' into the appropriate field of the dictionary. What *is* the appropriate field? We just look at the strictness signature of the class op, which will be something like: P(AAA1AAAAA). Then replace the '1' (or any other non-absent demand, really) by the demand 'd'. The '1' acts as if it was a demand variable, the whole signature really means `\d. P(AAAdAAAAA)` for any incoming demand 'd'. For single-method classes, which are represented by newtypes the signature of 'op' won't look like P(...), so matching on Prod will fail. That's fine: if we are doing strictness analysis we are also doing inlining, so we'll have inlined 'op' into a cast. So we can bale out in a conservative way, returning nopDmdType. SG: Although we then probably want to apply the eval demand 'd' directly to 'op' rather than turning it into 'topSubDmd'... It is (just.. #8329) possible to be running strictness analysis *without* having inlined class ops from single-method classes. Suppose you are using ghc --make; and the first module has a local -O0 flag. So you may load a class without interface pragmas, ie (currently) without an unfolding for the class ops. Now if a subsequent module in the --make sweep has a local -O flag you might do strictness analysis, but there is no inlining for the class op. This is weird, so I'm not worried about whether this optimises brilliantly; but it should not fall over. -} zapDmdEnv :: DmdEnv -> DmdEnv zapDmdEnv (DE _ div) = mkEmptyDmdEnv div -- | Remove the demand environment from the signature. zapDmdEnvSig :: DmdSig -> DmdSig zapDmdEnvSig (DmdSig (DmdType env ds)) = DmdSig (DmdType (zapDmdEnv env) ds) zapUsageDemand :: Demand -> Demand -- Remove the usage info, but not the strictness info, from the demand zapUsageDemand = kill_usage $ KillFlags { kf_abs = True , kf_used_once = True , kf_called_once = True } -- | Remove all `C_01 :*` info (but not `CM` sub-demands) from the demand zapUsedOnceDemand :: Demand -> Demand zapUsedOnceDemand = kill_usage $ KillFlags { kf_abs = False , kf_used_once = True , kf_called_once = False } -- | Remove all `C_01 :*` info (but not `CM` sub-demands) from the strictness -- signature zapUsedOnceSig :: DmdSig -> DmdSig zapUsedOnceSig (DmdSig (DmdType env ds)) = DmdSig (DmdType env (map zapUsedOnceDemand ds)) data KillFlags = KillFlags { kf_abs :: Bool , kf_used_once :: Bool , kf_called_once :: Bool } kill_usage_card :: KillFlags -> Card -> Card kill_usage_card kfs C_00 | kf_abs kfs = C_0N kill_usage_card kfs C_10 | kf_abs kfs = C_1N kill_usage_card kfs C_01 | kf_used_once kfs = C_0N kill_usage_card kfs C_11 | kf_used_once kfs = C_1N kill_usage_card _ n = n kill_usage :: KillFlags -> Demand -> Demand kill_usage _ AbsDmd = AbsDmd kill_usage _ BotDmd = BotDmd kill_usage kfs (n :* sd) = kill_usage_card kfs n :* kill_usage_sd kfs sd kill_usage_sd :: KillFlags -> SubDemand -> SubDemand kill_usage_sd kfs (Call n sd) | kf_called_once kfs = mkCall (lubCard C_1N n) (kill_usage_sd kfs sd) | otherwise = mkCall n (kill_usage_sd kfs sd) kill_usage_sd kfs (Prod b ds) = mkProd b (map (kill_usage kfs) ds) kill_usage_sd _ sd = sd {- ********************************************************************* * * TypeShape and demand trimming * * ********************************************************************* -} data TypeShape -- See Note [Trimming a demand to a type] -- in GHC.Core.Opt.DmdAnal = TsFun TypeShape | TsProd [TypeShape] | TsUnk trimToType :: Demand -> TypeShape -> Demand -- See Note [Trimming a demand to a type] in GHC.Core.Opt.DmdAnal trimToType AbsDmd _ = AbsDmd trimToType BotDmd _ = BotDmd trimToType (n :* sd) ts = n :* go sd ts where go (Prod b ds) (TsProd tss) | equalLength ds tss = mkProd b (zipWith trimToType ds tss) go (Call n sd) (TsFun ts) = mkCall n (go sd ts) go sd@Poly{} _ = sd go _ _ = topSubDmd -- | Drop all boxity trimBoxity :: Demand -> Demand trimBoxity AbsDmd = AbsDmd trimBoxity BotDmd = BotDmd trimBoxity (n :* sd) = n :* go sd where go (Poly _ n) = Poly Boxed n go (Prod _ ds) = mkProd Boxed (map trimBoxity ds) go (Call n sd) = mkCall n $ go sd {- ************************************************************************ * * 'seq'ing demands * * ************************************************************************ -} seqDemand :: Demand -> () seqDemand AbsDmd = () seqDemand BotDmd = () seqDemand (_ :* sd) = seqSubDemand sd seqSubDemand :: SubDemand -> () seqSubDemand (Prod _ ds) = seqDemandList ds seqSubDemand (Call _ sd) = seqSubDemand sd seqSubDemand (Poly _ _) = () seqDemandList :: [Demand] -> () seqDemandList = foldr (seq . seqDemand) () seqDmdType :: DmdType -> () seqDmdType (DmdType env ds) = seqDmdEnv env `seq` seqDemandList ds `seq` () seqDmdEnv :: DmdEnv -> () seqDmdEnv (DE fvs _) = seqEltsUFM seqDemand fvs seqDmdSig :: DmdSig -> () seqDmdSig (DmdSig ty) = seqDmdType ty {- ************************************************************************ * * Outputable and Binary instances * * ************************************************************************ -} -- Just for debugging purposes. instance Show Card where show C_00 = "C_00" show C_01 = "C_01" show C_0N = "C_0N" show C_10 = "C_10" show C_11 = "C_11" show C_1N = "C_1N" {- Note [Demand notation] ~~~~~~~~~~~~~~~~~~~~~~~~~ This Note should be kept up to date with the documentation of `-fstrictness` in the user's guide. For pretty-printing demands, we use quite a compact notation with some abbreviations. Here's the BNF: card ::= B {} | A {0} | M {0,1} | L {0,1,n} | 1 {1} | S {1,n} box ::= ! Unboxed | Boxed d ::= card sd The :* constructor, just juxtaposition | card abbreviation: Same as "card card" sd ::= box card @Poly box card@ | box P(d,d,..) @Prod box [d1,d2,..]@ | Ccard(sd) @Call card sd@ So, L can denote a 'Card', polymorphic 'SubDemand' or polymorphic 'Demand', but it's always clear from context which "overload" is meant. It's like return-type inference of e.g. 'read'. An example of the demand syntax is 1!P(1!L,A), the demand of fst's argument. See Note [Demand examples] for more examples and their semantics. This is the syntax for demand signatures: div ::= topDiv | x exnDiv | b botDiv sig ::= {x->dx,y->dy,z->dz...}...div ^ ^ ^ ^ ^ ^ | | | | | | | \---+---+------/ | | | | demand on free demand on divergence variables arguments information (omitted if empty) (omitted if no information) Note [Demand examples] ~~~~~~~~~~~~~~~~~~~~~~ Here are some examples of the demand notation, specified in Note [Demand notation], in action. In each case we give the demand on the variable `x`. Demand on x Example Explanation 1!A seq x y Evaluates `x` exactly once (`1`), but not any deeper (`A`), and discards the box (`!`). S!A seq x (seq x y) Twice the previous demand; hence eval'd more than once (`S` for strict). 1!P(1!L,A) fst x Evaluates pair `x` exactly once, first component exactly once. No info that (`L`). Second component is absent. Discards boxes (`!`). 1P(1L,A) opq_fst x Like fst, but all boxes are retained. SP(1!L,A) opq_seq x (fst x) Two evals of x but exactly one of its first component. Box of x retained, but box of first component discarded. 1!C(1,L) x $ 3 Evals x exactly once ( 1 ) and calls it exactly once ( C(1,_) ). No info on how the result is evaluated ( L ). MC(M,L) maybe y x Evals x at most once ( 1 ) and calls it at most once ( C(1,_) ). No info on how the result is evaluated ( L ). LP(SL,A) map (+ fst x) Evals x lazily and multiple times ( L ), but when it is evaluated, the first component is evaluated (strictly) as well. In the examples above, `opq_fst` is an opaque wrapper around `fst`, i.e. opq_fst = fst {-# OPAQUE opq_fst #-} Similarly for `seq`. The effect of an OPAQUE pragma is that it discards any boxity flags in the demand signature, as described in Note [OPAQUE pragma]. -} -- | See Note [Demand notation] -- Current syntax was discussed in #19016. instance Outputable Card where ppr C_00 = char 'A' -- "Absent" ppr C_01 = char 'M' -- "Maybe" ppr C_0N = char 'L' -- "Lazy" ppr C_11 = char '1' -- "exactly 1" ppr C_1N = char 'S' -- "Strict" ppr C_10 = char 'B' -- "Bottom" -- | See Note [Demand notation] instance Outputable Demand where ppr AbsDmd = char 'A' ppr BotDmd = char 'B' ppr (C_0N :* Poly Boxed C_0N) = char 'L' -- Print LL as just L ppr (C_1N :* Poly Boxed C_1N) = char 'S' -- Dito SS ppr (n :* sd) = ppr n <> ppr sd -- | See Note [Demand notation] instance Outputable SubDemand where ppr (Poly b n) = pp_boxity b <> ppr n ppr (Call n sd) = char 'C' <> parens (ppr n <> comma <> ppr sd) ppr (Prod b ds) = pp_boxity b <> char 'P' <> parens (fields ds) where fields [] = empty fields [x] = ppr x fields (x:xs) = ppr x <> char ',' <> fields xs pp_boxity :: Boxity -> SDoc pp_boxity Unboxed = char '!' pp_boxity _ = empty instance Outputable Divergence where ppr Diverges = char 'b' -- for (b)ottom ppr ExnOrDiv = char 'x' -- for e(x)ception ppr Dunno = empty instance Outputable DmdEnv where ppr (DE fvs div) = ppr div <> if null fv_elts then empty else braces (fsep (map pp_elt fv_elts)) where pp_elt (uniq, dmd) = ppr uniq <> text "->" <> ppr dmd fv_elts = nonDetUFMToList fvs -- It's OK to use nonDetUFMToList here because we only do it for -- pretty printing instance Outputable DmdType where ppr (DmdType fv ds) = hcat (map (angleBrackets . ppr) ds) <> ppr fv instance Outputable DmdSig where ppr (DmdSig ty) = ppr ty instance Outputable TypeShape where ppr TsUnk = text "TsUnk" ppr (TsFun ts) = text "TsFun" <> parens (ppr ts) ppr (TsProd tss) = parens (hsep $ punctuate comma $ map ppr tss) instance Binary Card where put_ bh C_00 = putByte bh 0 put_ bh C_01 = putByte bh 1 put_ bh C_0N = putByte bh 2 put_ bh C_11 = putByte bh 3 put_ bh C_1N = putByte bh 4 put_ bh C_10 = putByte bh 5 get bh = do h <- getByte bh case h of 0 -> return C_00 1 -> return C_01 2 -> return C_0N 3 -> return C_11 4 -> return C_1N 5 -> return C_10 _ -> pprPanic "Binary:Card" (ppr (fromIntegral h :: Int)) instance Binary Demand where put_ bh (n :* sd) = put_ bh n *> case n of C_00 -> return () C_10 -> return () _ -> put_ bh sd get bh = get bh >>= \n -> case n of C_00 -> return AbsDmd C_10 -> return BotDmd _ -> (n :*) <$> get bh instance Binary SubDemand where put_ bh (Poly b sd) = putByte bh 0 *> put_ bh b *> put_ bh sd put_ bh (Call n sd) = putByte bh 1 *> put_ bh n *> put_ bh sd put_ bh (Prod b ds) = putByte bh 2 *> put_ bh b *> put_ bh ds get bh = do h <- getByte bh case h of 0 -> Poly <$> get bh <*> get bh 1 -> mkCall <$> get bh <*> get bh 2 -> Prod <$> get bh <*> get bh _ -> pprPanic "Binary:SubDemand" (ppr (fromIntegral h :: Int)) instance Binary Divergence where put_ bh Dunno = putByte bh 0 put_ bh ExnOrDiv = putByte bh 1 put_ bh Diverges = putByte bh 2 get bh = do h <- getByte bh case h of 0 -> return Dunno 1 -> return ExnOrDiv 2 -> return Diverges _ -> pprPanic "Binary:Divergence" (ppr (fromIntegral h :: Int)) instance Binary DmdEnv where -- Ignore VarEnv when spitting out the DmdType put_ bh (DE _ d) = put_ bh d get bh = DE emptyVarEnv <$> get bh instance Binary DmdType where put_ bh (DmdType fv ds) = put_ bh fv *> put_ bh ds get bh = DmdType <$> get bh <*> get bh instance Binary DmdSig where put_ bh (DmdSig aa) = put_ bh aa get bh = DmdSig <$> get bh ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/Error.hs0000644000000000000000000011107707346545000020446 0ustar0000000000000000{-# LANGUAGE GADTs #-} {-# LANGUAGE DeriveTraversable #-} {-# LANGUAGE DerivingStrategies #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE GeneralizedNewtypeDeriving #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE AllowAmbiguousTypes #-} {-# LANGUAGE TypeApplications #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE PatternSynonyms #-} module GHC.Types.Error ( -- * Messages Messages , mkMessages , getMessages , emptyMessages , isEmptyMessages , singleMessage , addMessage , unionMessages , unionManyMessages , filterMessages , MsgEnvelope (..) -- * Classifying Messages , MessageClass (..) , Severity (..) , Diagnostic (..) , UnknownDiagnostic (..) , mkSimpleUnknownDiagnostic , mkUnknownDiagnostic , embedUnknownDiagnostic , DiagnosticMessage (..) , DiagnosticReason (WarningWithFlag, ..) , ResolvedDiagnosticReason(..) , DiagnosticHint (..) , mkPlainDiagnostic , mkPlainError , mkDecoratedDiagnostic , mkDecoratedError , pprDiagnostic , HasDefaultDiagnosticOpts(..) , defaultDiagnosticOpts , NoDiagnosticOpts(..) -- * Hints and refactoring actions , GhcHint (..) , AvailableBindings(..) , LanguageExtensionHint(..) , suggestExtension , suggestExtensionWithInfo , suggestExtensions , suggestExtensionsWithInfo , suggestAnyExtension , suggestAnyExtensionWithInfo , useExtensionInOrderTo , noHints -- * Rendering Messages , SDoc , DecoratedSDoc (unDecorated) , mkDecorated, mkSimpleDecorated , unionDecoratedSDoc , mapDecoratedSDoc , pprMessageBag , mkLocMessage , mkLocMessageWarningGroups , getCaretDiagnostic -- * Queries , isIntrinsicErrorMessage , isExtrinsicErrorMessage , isWarningMessage , getErrorMessages , getWarningMessages , partitionMessages , errorsFound , errorsOrFatalWarningsFound -- * Diagnostic codes , DiagnosticCode(..) ) where import GHC.Prelude import GHC.Driver.Flags import GHC.Data.Bag import GHC.IO (catchException) import GHC.Utils.Outputable as Outputable import qualified GHC.Utils.Ppr.Colour as Col import GHC.Types.SrcLoc as SrcLoc import GHC.Types.Hint import GHC.Data.FastString (unpackFS) import GHC.Data.StringBuffer (atLine, hGetStringBuffer, len, lexemeToString) import GHC.Types.Hint.Ppr () -- Outputable instance import GHC.Unit.Module.Warnings (WarningCategory(..)) import GHC.Utils.Json import GHC.Utils.Panic import GHC.Version (cProjectVersion) import Data.Bifunctor import Data.Foldable ( fold, toList ) import Data.List.NonEmpty ( NonEmpty (..) ) import qualified Data.List.NonEmpty as NE import Data.List ( intercalate ) import Data.Maybe ( maybeToList ) import Data.Typeable ( Typeable ) import Numeric.Natural ( Natural ) import Text.Printf ( printf ) {- Note [Messages] ~~~~~~~~~~~~~~~~~~ We represent the 'Messages' as a single bag of warnings and errors. The reason behind that is that there is a fluid relationship between errors and warnings and we want to be able to promote or demote errors and warnings based on certain flags (e.g. -Werror, -fdefer-type-errors or -XPartialTypeSignatures). More specifically, every diagnostic has a 'DiagnosticReason', but a warning 'DiagnosticReason' might be associated with 'SevError', in the case of -Werror. We rely on the 'Severity' to distinguish between a warning and an error. 'WarningMessages' and 'ErrorMessages' are for now simple type aliases to retain backward compatibility, but in future iterations these can be either parameterised over an 'e' message type (to make type signatures a bit more declarative) or removed altogether. -} -- | A collection of messages emitted by GHC during error reporting. A -- diagnostic message is typically a warning or an error. See Note [Messages]. -- -- /INVARIANT/: All the messages in this collection must be relevant, i.e. -- their 'Severity' should /not/ be 'SevIgnore'. The smart constructor -- 'mkMessages' will filter out any message which 'Severity' is 'SevIgnore'. newtype Messages e = Messages { getMessages :: Bag (MsgEnvelope e) } deriving newtype (Semigroup, Monoid) deriving stock (Functor, Foldable, Traversable) emptyMessages :: Messages e emptyMessages = Messages emptyBag mkMessages :: Bag (MsgEnvelope e) -> Messages e mkMessages = Messages . filterBag interesting where interesting :: MsgEnvelope e -> Bool interesting = (/=) SevIgnore . errMsgSeverity isEmptyMessages :: Messages e -> Bool isEmptyMessages (Messages msgs) = isEmptyBag msgs singleMessage :: MsgEnvelope e -> Messages e singleMessage e = addMessage e emptyMessages instance Diagnostic e => Outputable (Messages e) where ppr msgs = braces (vcat (map ppr_one (bagToList (getMessages msgs)))) where ppr_one :: MsgEnvelope e -> SDoc ppr_one envelope = vcat [ text "Resolved:" <+> ppr (errMsgReason envelope), pprDiagnostic (errMsgDiagnostic envelope) ] instance Diagnostic e => ToJson (Messages e) where json msgs = JSArray . toList $ json <$> getMessages msgs {- Note [Discarding Messages] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Discarding a 'SevIgnore' message from 'addMessage' and 'unionMessages' is just an optimisation, as GHC would /also/ suppress any diagnostic which severity is 'SevIgnore' before printing the message: See for example 'putLogMsg' and 'defaultLogAction'. -} -- | Adds a 'Message' to the input collection of messages. -- See Note [Discarding Messages]. addMessage :: MsgEnvelope e -> Messages e -> Messages e addMessage x (Messages xs) | SevIgnore <- errMsgSeverity x = Messages xs | otherwise = Messages (x `consBag` xs) -- | Joins two collections of messages together. -- See Note [Discarding Messages]. unionMessages :: Messages e -> Messages e -> Messages e unionMessages (Messages msgs1) (Messages msgs2) = Messages (msgs1 `unionBags` msgs2) -- | Joins many 'Messages's together unionManyMessages :: Foldable f => f (Messages e) -> Messages e unionManyMessages = fold filterMessages :: (MsgEnvelope e -> Bool) -> Messages e -> Messages e filterMessages f (Messages msgs) = Messages (filterBag f msgs) -- | A 'DecoratedSDoc' is isomorphic to a '[SDoc]' but it carries the -- invariant that the input '[SDoc]' needs to be rendered /decorated/ into its -- final form, where the typical case would be adding bullets between each -- elements of the list. The type of decoration depends on the formatting -- function used, but in practice GHC uses the 'formatBulleted'. newtype DecoratedSDoc = Decorated { unDecorated :: [SDoc] } -- | Creates a new 'DecoratedSDoc' out of a list of 'SDoc'. mkDecorated :: [SDoc] -> DecoratedSDoc mkDecorated = Decorated -- | Creates a new 'DecoratedSDoc' out of a single 'SDoc' mkSimpleDecorated :: SDoc -> DecoratedSDoc mkSimpleDecorated doc = Decorated [doc] -- | Joins two 'DecoratedSDoc' together. The resulting 'DecoratedSDoc' -- will have a number of entries which is the sum of the lengths of -- the input. unionDecoratedSDoc :: DecoratedSDoc -> DecoratedSDoc -> DecoratedSDoc unionDecoratedSDoc (Decorated s1) (Decorated s2) = Decorated (s1 `mappend` s2) -- | Apply a transformation function to all elements of a 'DecoratedSDoc'. mapDecoratedSDoc :: (SDoc -> SDoc) -> DecoratedSDoc -> DecoratedSDoc mapDecoratedSDoc f (Decorated s1) = Decorated (map f s1) class HasDefaultDiagnosticOpts opts where defaultOpts :: opts defaultDiagnosticOpts :: forall opts . HasDefaultDiagnosticOpts (DiagnosticOpts opts) => DiagnosticOpts opts defaultDiagnosticOpts = defaultOpts @(DiagnosticOpts opts) -- | A class identifying a diagnostic. -- Dictionary.com defines a diagnostic as: -- -- \"a message output by a computer diagnosing an error in a computer program, -- computer system, or component device\". -- -- A 'Diagnostic' carries the /actual/ description of the message (which, in -- GHC's case, it can be an error or a warning) and the /reason/ why such -- message was generated in the first place. class (HasDefaultDiagnosticOpts (DiagnosticOpts a)) => Diagnostic a where -- | Type of configuration options for the diagnostic. type DiagnosticOpts a -- | Extract the error message text from a 'Diagnostic'. diagnosticMessage :: DiagnosticOpts a -> a -> DecoratedSDoc -- | Extract the reason for this diagnostic. For warnings, -- a 'DiagnosticReason' includes the warning flag. diagnosticReason :: a -> DiagnosticReason -- | Extract any hints a user might use to repair their -- code to avoid this diagnostic. diagnosticHints :: a -> [GhcHint] -- | Get the 'DiagnosticCode' associated with this 'Diagnostic'. -- This can return 'Nothing' for at least two reasons: -- -- 1. The message might be from a plugin that does not supply codes. -- 2. The message might not yet have been assigned a code. See the -- 'Diagnostic' instance for 'DiagnosticMessage'. -- -- Ideally, case (2) would not happen, but because -- some errors in GHC still use the old system of just writing the -- error message in-place (instead of using a dedicated error type -- and constructor), we do not have error codes for all errors. -- #18516 tracks our progress toward this goal. diagnosticCode :: a -> Maybe DiagnosticCode -- | An existential wrapper around an unknown diagnostic. data UnknownDiagnostic opts where UnknownDiagnostic :: (Diagnostic a, Typeable a) => (opts -> DiagnosticOpts a) -- Inject the options of the outer context -- into the options for the wrapped diagnostic. -> a -> UnknownDiagnostic opts instance HasDefaultDiagnosticOpts opts => Diagnostic (UnknownDiagnostic opts) where type DiagnosticOpts (UnknownDiagnostic opts) = opts diagnosticMessage opts (UnknownDiagnostic f diag) = diagnosticMessage (f opts) diag diagnosticReason (UnknownDiagnostic _ diag) = diagnosticReason diag diagnosticHints (UnknownDiagnostic _ diag) = diagnosticHints diag diagnosticCode (UnknownDiagnostic _ diag) = diagnosticCode diag -- A fallback 'DiagnosticOpts' which can be used when there are no options -- for a particular diagnostic. data NoDiagnosticOpts = NoDiagnosticOpts instance HasDefaultDiagnosticOpts NoDiagnosticOpts where defaultOpts = NoDiagnosticOpts -- | Make a "simple" unknown diagnostic which doesn't have any configuration options. mkSimpleUnknownDiagnostic :: (Diagnostic a, Typeable a, DiagnosticOpts a ~ NoDiagnosticOpts) => a -> UnknownDiagnostic b mkSimpleUnknownDiagnostic = UnknownDiagnostic (const NoDiagnosticOpts) -- | Make an unknown diagnostic which uses the same options as the context it will be embedded into. mkUnknownDiagnostic :: (Typeable a, Diagnostic a) => a -> UnknownDiagnostic (DiagnosticOpts a) mkUnknownDiagnostic = UnknownDiagnostic id -- | Embed a more complicated diagnostic which requires a potentially different options type. embedUnknownDiagnostic :: (Diagnostic a, Typeable a) => (opts -> DiagnosticOpts a) -> a -> UnknownDiagnostic opts embedUnknownDiagnostic = UnknownDiagnostic -------------------------------------------------------------------------------- pprDiagnostic :: forall e . Diagnostic e => e -> SDoc pprDiagnostic e = vcat [ ppr (diagnosticReason e) , nest 2 (vcat (unDecorated (diagnosticMessage opts e))) ] where opts = defaultDiagnosticOpts @e -- | A generic 'Hint' message, to be used with 'DiagnosticMessage'. data DiagnosticHint = DiagnosticHint !SDoc instance Outputable DiagnosticHint where ppr (DiagnosticHint msg) = msg -- | A generic 'Diagnostic' message, without any further classification or -- provenance: By looking at a 'DiagnosticMessage' we don't know neither -- /where/ it was generated nor how to interpret its payload (as it's just a -- structured document). All we can do is to print it out and look at its -- 'DiagnosticReason'. data DiagnosticMessage = DiagnosticMessage { diagMessage :: !DecoratedSDoc , diagReason :: !DiagnosticReason , diagHints :: [GhcHint] } instance Diagnostic DiagnosticMessage where type DiagnosticOpts DiagnosticMessage = NoDiagnosticOpts diagnosticMessage _ = diagMessage diagnosticReason = diagReason diagnosticHints = diagHints diagnosticCode _ = Nothing -- | Helper function to use when no hints can be provided. Currently this function -- can be used to construct plain 'DiagnosticMessage' and add hints to them, but -- once #18516 will be fully executed, the main usage of this function would be in -- the implementation of the 'diagnosticHints' typeclass method, to report the fact -- that a particular 'Diagnostic' has no hints. noHints :: [GhcHint] noHints = mempty mkPlainDiagnostic :: DiagnosticReason -> [GhcHint] -> SDoc -> DiagnosticMessage mkPlainDiagnostic rea hints doc = DiagnosticMessage (mkSimpleDecorated doc) rea hints -- | Create an error 'DiagnosticMessage' holding just a single 'SDoc' mkPlainError :: [GhcHint] -> SDoc -> DiagnosticMessage mkPlainError hints doc = DiagnosticMessage (mkSimpleDecorated doc) ErrorWithoutFlag hints -- | Create a 'DiagnosticMessage' from a list of bulleted SDocs and a 'DiagnosticReason' mkDecoratedDiagnostic :: DiagnosticReason -> [GhcHint] -> [SDoc] -> DiagnosticMessage mkDecoratedDiagnostic rea hints docs = DiagnosticMessage (mkDecorated docs) rea hints -- | Create an error 'DiagnosticMessage' from a list of bulleted SDocs mkDecoratedError :: [GhcHint] -> [SDoc] -> DiagnosticMessage mkDecoratedError hints docs = DiagnosticMessage (mkDecorated docs) ErrorWithoutFlag hints -- | The reason /why/ a 'Diagnostic' was emitted in the first place. -- Diagnostic messages are born within GHC with a very precise reason, which -- can be completely statically-computed (i.e. this is an error or a warning -- no matter what), or influenced by the specific state of the 'DynFlags' at -- the moment of the creation of a new 'Diagnostic'. For example, a parsing -- error is /always/ going to be an error, whereas a 'WarningWithoutFlag -- Opt_WarnUnusedImports' might turn into an error due to '-Werror' or -- '-Werror=warn-unused-imports'. Interpreting a 'DiagnosticReason' together -- with its associated 'Severity' gives us the full picture. data DiagnosticReason = WarningWithoutFlag -- ^ Born as a warning. | WarningWithFlags !(NE.NonEmpty WarningFlag) -- ^ Warning was enabled with the flag. | WarningWithCategory !WarningCategory -- ^ Warning was enabled with a custom category. | ErrorWithoutFlag -- ^ Born as an error. deriving (Eq, Show) -- | Like a 'DiagnosticReason', but resolved against a specific set of `DynFlags` to -- work out which warning flag actually enabled this warning. newtype ResolvedDiagnosticReason = ResolvedDiagnosticReason { resolvedDiagnosticReason :: DiagnosticReason } -- | The single warning case 'DiagnosticReason' is very common. pattern WarningWithFlag :: WarningFlag -> DiagnosticReason pattern WarningWithFlag w = WarningWithFlags (w :| []) {- Note [Warnings controlled by multiple flags] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Diagnostics that started life as flag-controlled warnings have a 'diagnosticReason' of 'WarningWithFlags', giving the flags that control the warning. Usually there is only one flag, but in a few cases multiple flags apply. Where there are more than one, they are listed highest-priority first. For example, the same exported binding may give rise to a warning if either `-Wmissing-signatures` or `-Wmissing-exported-signatures` is enabled. Here `-Wmissing-signatures` has higher priority, because we want to mention it if before are enabled. See `missingSignatureWarningFlags` for the specific logic in this case. When reporting such a warning to the user, it is important to mention the correct flag (e.g. `-Wmissing-signatures` if it is enabled, or `-Wmissing-exported-signatures` if only the latter is enabled). Thus `diag_reason_severity` filters the `DiagnosticReason` based on the currently active `DiagOpts`. For a `WarningWithFlags` it returns only the flags that are enabled; it leaves other `DiagnosticReason`s unchanged. This is then wrapped in a `ResolvedDiagnosticReason` newtype which records that this filtering has taken place. If we have `-Wmissing-signatures -Werror=missing-exported-signatures` we want the error to mention `-Werror=missing-exported-signatures` (even though `-Wmissing-signatures` would normally take precedence). Thus if there are any fatal warnings, `diag_reason_severity` returns those alone. The `MsgEnvelope` stores the filtered `ResolvedDiagnosticReason` listing only the relevant flags for subsequent display. Side note: we do not treat `-Wmissing-signatures` as a warning group that includes `-Wmissing-exported-signatures`, because (a) this would require us to provide a flag for the complement, and (b) currently, in `-Wmissing-exported-signatures -Wno-missing-signatures`, the latter option does not switch off the former. -} instance Outputable DiagnosticReason where ppr = \case WarningWithoutFlag -> text "WarningWithoutFlag" WarningWithFlags wf -> text ("WarningWithFlags " ++ show wf) WarningWithCategory cat -> text "WarningWithCategory" <+> ppr cat ErrorWithoutFlag -> text "ErrorWithoutFlag" instance Outputable ResolvedDiagnosticReason where ppr = ppr . resolvedDiagnosticReason -- | An envelope for GHC's facts about a running program, parameterised over the -- /domain-specific/ (i.e. parsing, typecheck-renaming, etc) diagnostics. -- -- To say things differently, GHC emits /diagnostics/ about the running -- program, each of which is wrapped into a 'MsgEnvelope' that carries -- specific information like where the error happened, etc. Finally, multiple -- 'MsgEnvelope's are aggregated into 'Messages' that are returned to the -- user. data MsgEnvelope e = MsgEnvelope { errMsgSpan :: SrcSpan -- ^ The SrcSpan is used for sorting errors into line-number order , errMsgContext :: NamePprCtx , errMsgDiagnostic :: e , errMsgSeverity :: Severity , errMsgReason :: ResolvedDiagnosticReason -- ^ The actual reason caused this message -- -- See Note [Warnings controlled by multiple flags] } deriving (Functor, Foldable, Traversable) -- | The class for a diagnostic message. The main purpose is to classify a -- message within GHC, to distinguish it from a debug/dump message vs a proper -- diagnostic, for which we include a 'DiagnosticReason'. data MessageClass = MCOutput | MCFatal | MCInteractive | MCDump -- ^ Log message intended for compiler developers -- No file\/line\/column stuff | MCInfo -- ^ Log messages intended for end users. -- No file\/line\/column stuff. | MCDiagnostic Severity ResolvedDiagnosticReason (Maybe DiagnosticCode) -- ^ Diagnostics from the compiler. This constructor is very powerful as -- it allows the construction of a 'MessageClass' with a completely -- arbitrary permutation of 'Severity' and 'DiagnosticReason'. As such, -- users are encouraged to use the 'mkMCDiagnostic' smart constructor -- instead. Use this constructor directly only if you need to construct -- and manipulate diagnostic messages directly, for example inside -- 'GHC.Utils.Error'. In all the other circumstances, /especially/ when -- emitting compiler diagnostics, use the smart constructor. -- -- The @Maybe 'DiagnosticCode'@ field carries a code (if available) for -- this diagnostic. If you are creating a message not tied to any -- error-message type, then use Nothing. In the long run, this really -- should always have a 'DiagnosticCode'. See Note [Diagnostic codes]. {- Note [Suppressing Messages] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ The 'SevIgnore' constructor is used to generate messages for diagnostics which are meant to be suppressed and not reported to the user: the classic example are warnings for which the user didn't enable the corresponding 'WarningFlag', so GHC shouldn't print them. A different approach would be to extend the zoo of 'mkMsgEnvelope' functions to return a 'Maybe (MsgEnvelope e)', so that we won't need to even create the message to begin with. Both approaches have been evaluated, but we settled on the "SevIgnore one" for a number of reasons: * It's less invasive to deal with; * It plays slightly better with deferred diagnostics (see 'GHC.Tc.Errors') as for those we need to be able to /always/ produce a message (so that is reported at runtime); * It gives us more freedom: we can still decide to drop a 'SevIgnore' message at leisure, or we can decide to keep it around until the last moment. Maybe in the future we would need to turn a 'SevIgnore' into something else, for example to "unsuppress" diagnostics if a flag is set: with this approach, we have more leeway to accommodate new features. -} -- | Used to describe warnings and errors -- o The message has a file\/line\/column heading, -- plus "warning:" or "error:", -- added by mkLocMessage -- o With 'SevIgnore' the message is suppressed -- o Output is intended for end users data Severity = SevIgnore -- ^ Ignore this message, for example in -- case of suppression of warnings users -- don't want to see. See Note [Suppressing Messages] | SevWarning | SevError deriving (Eq, Ord, Show) instance Outputable Severity where ppr = \case SevIgnore -> text "SevIgnore" SevWarning -> text "SevWarning" SevError -> text "SevError" instance ToJson Severity where json SevIgnore = JSString "Ignore" json SevWarning = JSString "Warning" json SevError = JSString "Error" instance ToJson MessageClass where json MCOutput = JSString "MCOutput" json MCFatal = JSString "MCFatal" json MCInteractive = JSString "MCInteractive" json MCDump = JSString "MCDump" json MCInfo = JSString "MCInfo" json (MCDiagnostic sev reason code) = JSString $ renderWithContext defaultSDocContext (ppr $ text "MCDiagnostic" <+> ppr sev <+> ppr reason <+> ppr code) instance ToJson DiagnosticCode where json c = JSInt (fromIntegral (diagnosticCodeNumber c)) {- Note [Diagnostic Message JSON Schema] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The below instance of ToJson must conform to the JSON schema specified in docs/users_guide/diagnostics-as-json-schema-1_1.json. When the schema is altered, please bump the version. If the content is altered in a backwards compatible way, update the minor version (e.g. 1.3 ~> 1.4). If the content is breaking, update the major version (e.g. 1.3 ~> 2.0). When updating the schema, replace the above file and name it appropriately with the version appended, and change the documentation of the -fdiagnostics-as-json flag to reflect the new schema. To learn more about JSON schemas, check out the below link: https://json-schema.org -} schemaVersion :: String schemaVersion = "1.1" -- See Note [Diagnostic Message JSON Schema] before editing! instance Diagnostic e => ToJson (MsgEnvelope e) where json m = JSObject $ [ ("version", JSString schemaVersion), ("ghcVersion", JSString $ "ghc-" ++ cProjectVersion), ("span", json $ errMsgSpan m), ("severity", json $ errMsgSeverity m), ("code", maybe JSNull json (diagnosticCode diag)), ("message", JSArray $ map renderToJSString diagMsg), ("hints", JSArray $ map (renderToJSString . ppr) (diagnosticHints diag) ) ] ++ [ ("reason", reasonJson) | reasonJson <- maybeToList $ usefulReasonJson_maybe (errMsgReason m) ] where diag = errMsgDiagnostic m opts = defaultDiagnosticOpts @e style = mkErrStyle (errMsgContext m) ctx = defaultSDocContext {sdocStyle = style } diagMsg = filter (not . isEmpty ctx) (unDecorated (diagnosticMessage (opts) diag)) renderToJSString :: SDoc -> JsonDoc renderToJSString = JSString . (renderWithContext ctx) usefulReasonJson_maybe :: ResolvedDiagnosticReason -> Maybe JsonDoc usefulReasonJson_maybe (ResolvedDiagnosticReason rea) = case rea of WarningWithoutFlag -> Nothing ErrorWithoutFlag -> Nothing WarningWithFlags flags -> Just $ JSObject [ ("flags", JSArray $ map (JSString . NE.head . warnFlagNames) (NE.toList flags)) ] WarningWithCategory (WarningCategory cat) -> Just $ JSObject [ ("category", JSString $ unpackFS cat) ] instance Show (MsgEnvelope DiagnosticMessage) where show = showMsgEnvelope -- | Shows an 'MsgEnvelope'. Only use this for debugging. showMsgEnvelope :: forall a . Diagnostic a => MsgEnvelope a -> String showMsgEnvelope err = renderWithContext defaultSDocContext (vcat (unDecorated . (diagnosticMessage (defaultDiagnosticOpts @a)) $ errMsgDiagnostic err)) pprMessageBag :: Bag SDoc -> SDoc pprMessageBag msgs = vcat (punctuate blankLine (bagToList msgs)) mkLocMessage :: MessageClass -- ^ What kind of message? -> SrcSpan -- ^ location -> SDoc -- ^ message -> SDoc mkLocMessage = mkLocMessageWarningGroups True -- | Make an error message with location info, specifying whether to show -- warning groups (if applicable). mkLocMessageWarningGroups :: Bool -- ^ Print warning groups (if applicable)? -> MessageClass -- ^ What kind of message? -> SrcSpan -- ^ location -> SDoc -- ^ message -> SDoc -- Always print the location, even if it is unhelpful. Error messages -- are supposed to be in a standard format, and one without a location -- would look strange. Better to say explicitly "". mkLocMessageWarningGroups show_warn_groups msg_class locn msg = sdocOption sdocColScheme $ \col_scheme -> let locn' = sdocOption sdocErrorSpans $ \case True -> ppr locn False -> ppr (srcSpanStart locn) msg_colour = getMessageClassColour msg_class col_scheme col = coloured msg_colour . text msg_title = coloured msg_colour $ case msg_class of MCDiagnostic SevError _ _ -> text "error" MCDiagnostic SevWarning _ _ -> text "warning" MCFatal -> text "fatal" _ -> empty warning_flag_doc = case msg_class of MCDiagnostic sev reason _code | Just msg <- flag_msg sev (resolvedDiagnosticReason reason) -> brackets msg _ -> empty ppr_with_hyperlink code = -- this is a bit hacky, but we assume that if the terminal supports colors -- then it should also support links sdocOption (\ ctx -> sdocPrintErrIndexLinks ctx) $ \ use_hyperlinks -> if use_hyperlinks then ppr $ LinkedDiagCode code else ppr code code_doc = case msg_class of MCDiagnostic _ _ (Just code) -> brackets (ppr_with_hyperlink code) _ -> empty flag_msg :: Severity -> DiagnosticReason -> Maybe SDoc flag_msg SevIgnore _ = Nothing -- The above can happen when displaying an error message -- in a log file, e.g. with -ddump-tc-trace. It should not -- happen otherwise, though. flag_msg SevError WarningWithoutFlag = Just (col "-Werror") flag_msg SevError (WarningWithFlags (wflag :| _)) = let name = NE.head (warnFlagNames wflag) in Just $ col ("-W" ++ name) <+> warn_flag_grp (smallestWarningGroups wflag) <> comma <+> col ("Werror=" ++ name) flag_msg SevError (WarningWithCategory cat) = Just $ coloured msg_colour (text "-W" <> ppr cat) <+> warn_flag_grp smallestWarningGroupsForCategory <> comma <+> coloured msg_colour (text "-Werror=" <> ppr cat) flag_msg SevError ErrorWithoutFlag = Nothing flag_msg SevWarning WarningWithoutFlag = Nothing flag_msg SevWarning (WarningWithFlags (wflag :| _)) = let name = NE.head (warnFlagNames wflag) in Just (col ("-W" ++ name) <+> warn_flag_grp (smallestWarningGroups wflag)) flag_msg SevWarning (WarningWithCategory cat) = Just (coloured msg_colour (text "-W" <> ppr cat) <+> warn_flag_grp smallestWarningGroupsForCategory) flag_msg SevWarning ErrorWithoutFlag = pprPanic "SevWarning with ErrorWithoutFlag" $ vcat [ text "locn:" <+> ppr locn , text "msg:" <+> ppr msg ] warn_flag_grp groups | show_warn_groups, not (null groups) = text $ "(in " ++ intercalate ", " (map (("-W"++) . warningGroupName) groups) ++ ")" | otherwise = empty -- Add prefixes, like Foo.hs:34: warning: -- header = locn' <> colon <+> msg_title <> colon <+> code_doc <+> warning_flag_doc in coloured (Col.sMessage col_scheme) (hang (coloured (Col.sHeader col_scheme) header) 4 msg) getMessageClassColour :: MessageClass -> Col.Scheme -> Col.PprColour getMessageClassColour (MCDiagnostic SevError _reason _code) = Col.sError getMessageClassColour (MCDiagnostic SevWarning _reason _code) = Col.sWarning getMessageClassColour MCFatal = Col.sFatal getMessageClassColour _ = const mempty getCaretDiagnostic :: MessageClass -> SrcSpan -> IO SDoc getCaretDiagnostic _ (UnhelpfulSpan _) = pure empty getCaretDiagnostic msg_class (RealSrcSpan span _) = caretDiagnostic <$> getSrcLine (srcSpanFile span) row where getSrcLine fn i = getLine i (unpackFS fn) `catchException` \(_ :: IOError) -> pure Nothing getLine i fn = do -- StringBuffer has advantages over readFile: -- (a) no lazy IO, otherwise IO exceptions may occur in pure code -- (b) always UTF-8, rather than some system-dependent encoding -- (Haskell source code must be UTF-8 anyway) content <- hGetStringBuffer fn case atLine i content of Just at_line -> pure $ case lines (fix <$> lexemeToString at_line (len at_line)) of srcLine : _ -> Just srcLine _ -> Nothing _ -> pure Nothing -- allow user to visibly see that their code is incorrectly encoded -- (StringBuffer.nextChar uses \0 to represent undecodable characters) fix '\0' = '\xfffd' fix c = c row = srcSpanStartLine span rowStr = show row multiline = row /= srcSpanEndLine span caretDiagnostic Nothing = empty caretDiagnostic (Just srcLineWithNewline) = sdocOption sdocColScheme$ \col_scheme -> let sevColour = getMessageClassColour msg_class col_scheme marginColour = Col.sMargin col_scheme in coloured marginColour (text marginSpace) <> text ("\n") <> coloured marginColour (text marginRow) <> text (" " ++ srcLinePre) <> coloured sevColour (text srcLineSpan) <> text (srcLinePost ++ "\n") <> coloured marginColour (text marginSpace) <> coloured sevColour (text (" " ++ caretLine)) where -- expand tabs in a device-independent manner #13664 expandTabs tabWidth i s = case s of "" -> "" '\t' : cs -> replicate effectiveWidth ' ' ++ expandTabs tabWidth (i + effectiveWidth) cs c : cs -> c : expandTabs tabWidth (i + 1) cs where effectiveWidth = tabWidth - i `mod` tabWidth srcLine = filter (/= '\n') (expandTabs 8 0 srcLineWithNewline) start = srcSpanStartCol span - 1 end | multiline = length srcLine | otherwise = srcSpanEndCol span - 1 width = max 1 (end - start) marginWidth = length rowStr marginSpace = replicate marginWidth ' ' ++ " |" marginRow = rowStr ++ " |" (srcLinePre, srcLineRest) = splitAt start srcLine (srcLineSpan, srcLinePost) = splitAt width srcLineRest caretEllipsis | multiline = "..." | otherwise = "" caretLine = replicate start ' ' ++ replicate width '^' ++ caretEllipsis -- -- Queries -- {- Note [Intrinsic And Extrinsic Failures] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We distinguish between /intrinsic/ and /extrinsic/ failures. We classify in the former category those diagnostics which are /essentially/ failures, and their nature can't be changed. This is the case for 'ErrorWithoutFlag'. We classify as /extrinsic/ all those diagnostics (like fatal warnings) which are born as warnings but which are still failures under particular 'DynFlags' settings. It's important to be aware of such logic distinction, because when we are inside the typechecker or the desugarer, we are interested about intrinsic errors, and to bail out as soon as we find one of them. Conversely, if we find an /extrinsic/ one, for example because a particular 'WarningFlag' makes a warning into an error, we /don't/ want to bail out, that's still not the right time to do so: Rather, we want to first collect all the diagnostics, and later classify and report them appropriately (in the driver). -} -- | Returns 'True' if this is, intrinsically, a failure. See -- Note [Intrinsic And Extrinsic Failures]. isIntrinsicErrorMessage :: Diagnostic e => MsgEnvelope e -> Bool isIntrinsicErrorMessage = (==) ErrorWithoutFlag . resolvedDiagnosticReason . errMsgReason isWarningMessage :: Diagnostic e => MsgEnvelope e -> Bool isWarningMessage = not . isIntrinsicErrorMessage -- | Are there any hard errors here? -Werror warnings are /not/ detected. If -- you want to check for -Werror warnings, use 'errorsOrFatalWarningsFound'. errorsFound :: Diagnostic e => Messages e -> Bool errorsFound (Messages msgs) = any isIntrinsicErrorMessage msgs -- | Returns 'True' if the envelope contains a message that will stop -- compilation: either an intrinsic error or a fatal (-Werror) warning isExtrinsicErrorMessage :: MsgEnvelope e -> Bool isExtrinsicErrorMessage = (==) SevError . errMsgSeverity -- | Are there any errors or -Werror warnings here? errorsOrFatalWarningsFound :: Messages e -> Bool errorsOrFatalWarningsFound (Messages msgs) = any isExtrinsicErrorMessage msgs getWarningMessages :: Diagnostic e => Messages e -> Bag (MsgEnvelope e) getWarningMessages (Messages xs) = fst $ partitionBag isWarningMessage xs getErrorMessages :: Diagnostic e => Messages e -> Bag (MsgEnvelope e) getErrorMessages (Messages xs) = fst $ partitionBag isIntrinsicErrorMessage xs -- | Partitions the 'Messages' and returns a tuple which first element are the -- warnings, and the second the errors. partitionMessages :: Diagnostic e => Messages e -> (Messages e, Messages e) partitionMessages (Messages xs) = bimap Messages Messages (partitionBag isWarningMessage xs) ---------------------------------------------------------------- -- -- -- Definition of diagnostic codes -- -- -- ---------------------------------------------------------------- -- | A diagnostic code is a namespaced numeric identifier -- unique to the given diagnostic (error or warning). -- -- All diagnostic codes defined within GHC are given the -- GHC namespace. -- -- See Note [Diagnostic codes] in GHC.Types.Error.Codes. data DiagnosticCode = DiagnosticCode { diagnosticCodeNameSpace :: String -- ^ diagnostic code prefix (e.g. "GHC") , diagnosticCodeNumber :: Natural -- ^ the actual diagnostic code } deriving ( Eq, Ord ) instance Show DiagnosticCode where show (DiagnosticCode prefix c) = prefix ++ "-" ++ printf "%05d" c -- pad the numeric code to have at least 5 digits instance Outputable DiagnosticCode where ppr code = text (show code) -- | A newtype that is a witness to the `-fprint-error-index-links` flag. It -- alters the @Outputable@ instance to emit @DiagnosticCode@ as ANSI hyperlinks -- to the HF error index newtype LinkedDiagCode = LinkedDiagCode DiagnosticCode instance Outputable LinkedDiagCode where ppr (LinkedDiagCode d@DiagnosticCode{}) = linkEscapeCode d -- | Wrap the link in terminal escape codes specified by OSC 8. linkEscapeCode :: DiagnosticCode -> SDoc linkEscapeCode d = text "\ESC]8;;" <> hfErrorLink d -- make the actual link <> text "\ESC\\" <> ppr d <> text "\ESC]8;;\ESC\\" -- the rest is the visible text -- | create a link to the HF error index given an error code. hfErrorLink :: DiagnosticCode -> SDoc hfErrorLink errorCode = text "https://errors.haskell.org/messages/" <> ppr errorCode ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/Error/0000755000000000000000000000000007346545000020103 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/Error/Codes.hs0000644000000000000000000023537307346545000021511 0ustar0000000000000000{-# LANGUAGE AllowAmbiguousTypes #-} {-# LANGUAGE ConstraintKinds #-} {-# LANGUAGE DataKinds #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE MagicHash #-} {-# LANGUAGE MultiParamTypeClasses #-} {-# LANGUAGE PolyKinds #-} {-# LANGUAGE StandaloneKindSignatures #-} {-# LANGUAGE TypeApplications #-} {-# LANGUAGE TypeFamilyDependencies #-} {-# LANGUAGE UndecidableInstances #-} -- | Defines diagnostic codes for the diagnostics emitted by GHC. -- -- A diagnostic code is a numeric unique identifier for a diagnostic. -- See Note [Diagnostic codes]. module GHC.Types.Error.Codes ( GhcDiagnosticCode, constructorCode, constructorCodes ) where import GHC.Prelude import GHC.Types.Error ( DiagnosticCode(..), UnknownDiagnostic (..), diagnosticCode, NoDiagnosticOpts ) import GHC.Hs.Extension ( GhcRn ) import GHC.Core.InstEnv (LookupInstanceErrReason) import GHC.Iface.Errors.Types import GHC.Driver.Errors.Types ( DriverMessage, GhcMessageOpts, DriverMessageOpts ) import GHC.Parser.Errors.Types ( PsMessage, PsHeaderMessage ) import GHC.HsToCore.Errors.Types ( DsMessage ) import GHC.Tc.Errors.Types import GHC.Unit.Module.Warnings ( WarningTxt ) import GHC.Utils.Panic.Plain import Data.Kind ( Type, Constraint ) import GHC.Exts ( proxy# ) import GHC.Generics import GHC.TypeLits ( Symbol, KnownSymbol, symbolVal' , TypeError, ErrorMessage(..) ) import GHC.TypeNats ( Nat, KnownNat, natVal' ) import Data.Map.Strict ( Map ) import qualified Data.Map.Strict as Map {- Note [Diagnostic codes] ~~~~~~~~~~~~~~~~~~~~~~~~~~ Every time a new diagnostic (error or warning) is introduced to GHC, it is assigned a new numeric code, which has never been used before. To ensure uniqueness across GHC versions, we proceed as follows: - all diagnostic codes are defined in a single module, GHC.Types.Error.Codes. - uniqueness of diagnostic codes is ensured by the use of an injective type family, GhcDiagnosticCode, - a diagnostic code never gets deleted from the GhcDiagnosticCode type family in GHC.Types.Error.Codes, even if it is no longer used. Older versions of GHC might still display the code, and we don't want that old code to get confused with the error code of a different, new, error message. [Instructions for adding a new diagnostic code] After adding a constructor to a diagnostic datatype, such as PsMessage, TcRnMessage, DsMessage or DriverMessage, you can add corresponding diagnostic codes as follows: a. To give a single diagnostic code to the constructor, simply add a type family equation to GHC.Error.Codes.GhcDiagnosticCode, e.g.: GhcDiagnosticCode "MyNewErrorConstructor" = 12345 You can obtain new randomly-generated error codes by using https://www.random.org/integers/?num=10&min=1&max=99999&col=1&base=10&format=plain You will get a type error if you try to use an error code that is already used by another constructor. b. If you instead require more granular diagnostic codes, add a type family equation to GHC.Error.Codes.ConRecursInto, specifying which argument to recur into to obtain an diagnostic code. For example, the 'TcRnCannotDeriveInstance' constructor is associated with several diagnostic codes, depending on the value of the argument of type 'DeriveInstanceErrReason'. This is achieved as follows: - The equation ConRecursInto "TcRnCannotDeriveInstance" = 'Just DeriveInstanceErrReason says to recur into the argument of type 'DeriveInstanceErrReason' to get a diagnostic code. - The equations GhcDiagnosticCode "DerivErrNotWellKinded" = 62016 GhcDiagnosticCode "DerivErrSafeHaskellGenericInst" = 07214 GhcDiagnosticCode "DerivErrDerivingViaWrongKind" = 63174 ... give the diagnostic codes for the various constructors of DeriveInstanceErrReason. These are added following the procedure in (a). Never remove a return value from the 'GhcDiagnosticCode' type family! Outdated error messages must still be tracked to ensure uniqueness of diagnostic codes across GHC versions. -} {- ********************************************************************* * * The GhcDiagnosticCode type family * * ********************************************************************* -} -- | This function obtain a diagnostic code by looking up the constructor -- name using generics, and using the 'GhcDiagnosticCode' type family. constructorCode :: (Generic diag, GDiagnosticCode (Rep diag)) => diag -> Maybe DiagnosticCode constructorCode diag = gdiagnosticCode (from diag) -- | This function computes all diagnostic codes that occur inside a given -- type using generics and the 'GhcDiagnosticCode' type family. -- -- For example, if @T = MkT1 | MkT2@, @GhcDiagnosticCode \"MkT1\" = 123@ and -- @GhcDiagnosticCode \"MkT2\" = 456@, then we will get -- > constructorCodes @T = fromList [ (123, \"MkT1\"), (456, \"MkT2\") ] constructorCodes :: forall diag. (Generic diag, GDiagnosticCodes '[diag] (Rep diag)) => Map DiagnosticCode String constructorCodes = gdiagnosticCodes @'[diag] @(Rep diag) -- See Note [diagnosticCodes: don't recur into already-seen types] -- for the @'[diag] type argument. -- | Type family computing the numeric diagnostic code for a given error message constructor. -- -- Its injectivity annotation ensures uniqueness of error codes. -- -- Never remove a return value from this type family! Outdated error messages must still -- be tracked here to ensure uniqueness of diagnostic codes across GHC versions. -- -- See Note [Diagnostic codes] in GHC.Types.Error. type GhcDiagnosticCode :: Symbol -> Nat type family GhcDiagnosticCode c = n | n -> c where -- Desugarer diagnostic codes GhcDiagnosticCode "DsEmptyEnumeration" = 10190 GhcDiagnosticCode "DsIdentitiesFound" = 04214 GhcDiagnosticCode "DsOverflowedLiterals" = 97441 GhcDiagnosticCode "DsRedundantBangPatterns" = 38520 GhcDiagnosticCode "DsOverlappingPatterns" = 53633 GhcDiagnosticCode "DsInaccessibleRhs" = 94210 GhcDiagnosticCode "DsMaxPmCheckModelsReached" = 61505 GhcDiagnosticCode "DsNonExhaustivePatterns" = 62161 GhcDiagnosticCode "DsTopLevelBindsNotAllowed" = 48099 GhcDiagnosticCode "DsUselessSpecialiseForClassMethodSelector" = 93315 GhcDiagnosticCode "DsUselessSpecialiseForNoInlineFunction" = 38524 GhcDiagnosticCode "DsMultiplicityCoercionsNotSupported" = 59840 GhcDiagnosticCode "DsOrphanRule" = 58181 GhcDiagnosticCode "DsRuleLhsTooComplicated" = 69441 GhcDiagnosticCode "DsRuleIgnoredDueToConstructor" = 00828 GhcDiagnosticCode "DsRuleBindersNotBound" = 40548 GhcDiagnosticCode "DsLazyPatCantBindVarsOfUnliftedType" = 17879 GhcDiagnosticCode "DsNotYetHandledByTH" = 65904 GhcDiagnosticCode "DsAggregatedViewExpressions" = 19551 GhcDiagnosticCode "DsUnbangedStrictPatterns" = 21030 GhcDiagnosticCode "DsCannotMixPolyAndUnliftedBindings" = 20036 GhcDiagnosticCode "DsWrongDoBind" = 08838 GhcDiagnosticCode "DsUnusedDoBind" = 81995 GhcDiagnosticCode "DsRecBindsNotAllowedForUnliftedTys" = 20185 GhcDiagnosticCode "DsRuleMightInlineFirst" = 95396 GhcDiagnosticCode "DsAnotherRuleMightFireFirst" = 87502 GhcDiagnosticCode "DsIncompleteRecordSelector" = 17335 -- Parser diagnostic codes GhcDiagnosticCode "PsErrParseLanguagePragma" = 68686 GhcDiagnosticCode "PsErrUnsupportedExt" = 46537 GhcDiagnosticCode "PsErrParseOptionsPragma" = 24342 GhcDiagnosticCode "PsErrUnknownOptionsPragma" = 04924 GhcDiagnosticCode "PsWarnBidirectionalFormatChars" = 03272 GhcDiagnosticCode "PsWarnTab" = 94817 GhcDiagnosticCode "PsWarnTransitionalLayout" = 93617 GhcDiagnosticCode "PsWarnOperatorWhitespaceExtConflict" = 47082 GhcDiagnosticCode "PsWarnOperatorWhitespace" = 40798 GhcDiagnosticCode "PsWarnHaddockInvalidPos" = 94458 GhcDiagnosticCode "PsWarnHaddockIgnoreMulti" = 05641 GhcDiagnosticCode "PsWarnStarBinder" = 21887 GhcDiagnosticCode "PsWarnStarIsType" = 39567 GhcDiagnosticCode "PsWarnUnrecognisedPragma" = 42044 GhcDiagnosticCode "PsWarnMisplacedPragma" = 28007 GhcDiagnosticCode "PsWarnImportPreQualified" = 07924 GhcDiagnosticCode "PsWarnViewPatternSignatures" = 00834 GhcDiagnosticCode "PsErrLexer" = 21231 GhcDiagnosticCode "PsErrCmmLexer" = 75725 GhcDiagnosticCode "PsErrCmmParser" = 09848 GhcDiagnosticCode "PsErrParse" = 58481 GhcDiagnosticCode "PsErrTypeAppWithoutSpace" = 84077 GhcDiagnosticCode "PsErrLazyPatWithoutSpace" = 27207 GhcDiagnosticCode "PsErrBangPatWithoutSpace" = 95644 GhcDiagnosticCode "PsErrInvalidInfixHole" = 45106 GhcDiagnosticCode "PsErrExpectedHyphen" = 44524 GhcDiagnosticCode "PsErrSpaceInSCC" = 76176 GhcDiagnosticCode "PsErrEmptyDoubleQuotes" = 11861 GhcDiagnosticCode "PsErrLambdaCase" = 51179 GhcDiagnosticCode "PsErrEmptyLambda" = 71614 GhcDiagnosticCode "PsErrLinearFunction" = 31574 GhcDiagnosticCode "PsErrMultiWayIf" = 28985 GhcDiagnosticCode "PsErrOverloadedRecordUpdateNotEnabled" = 82135 GhcDiagnosticCode "PsErrNumUnderscores" = 62330 GhcDiagnosticCode "PsErrIllegalBangPattern" = 79767 GhcDiagnosticCode "PsErrOverloadedRecordDotInvalid" = 26832 GhcDiagnosticCode "PsErrIllegalPatSynExport" = 89515 GhcDiagnosticCode "PsErrOverloadedRecordUpdateNoQualifiedFields" = 94863 GhcDiagnosticCode "PsErrExplicitForall" = 25955 GhcDiagnosticCode "PsErrIllegalQualifiedDo" = 40280 GhcDiagnosticCode "PsErrQualifiedDoInCmd" = 54089 GhcDiagnosticCode "PsErrRecordSyntaxInPatSynDecl" = 28021 GhcDiagnosticCode "PsErrEmptyWhereInPatSynDecl" = 13248 GhcDiagnosticCode "PsErrInvalidWhereBindInPatSynDecl" = 24737 GhcDiagnosticCode "PsErrNoSingleWhereBindInPatSynDecl" = 65536 GhcDiagnosticCode "PsErrDeclSpliceNotAtTopLevel" = 08451 GhcDiagnosticCode "PsErrMultipleNamesInStandaloneKindSignature" = 42569 GhcDiagnosticCode "PsErrIllegalExplicitNamespace" = 47007 GhcDiagnosticCode "PsErrUnallowedPragma" = 85314 GhcDiagnosticCode "PsErrImportPostQualified" = 87491 GhcDiagnosticCode "PsErrImportQualifiedTwice" = 05661 GhcDiagnosticCode "PsErrIllegalImportBundleForm" = 81284 GhcDiagnosticCode "PsErrInvalidRuleActivationMarker" = 50396 GhcDiagnosticCode "PsErrMissingBlock" = 16849 GhcDiagnosticCode "PsErrUnsupportedBoxedSumExpr" = 09550 GhcDiagnosticCode "PsErrUnsupportedBoxedSumPat" = 16863 GhcDiagnosticCode "PsErrUnexpectedQualifiedConstructor" = 73413 GhcDiagnosticCode "PsErrTupleSectionInPat" = 09646 GhcDiagnosticCode "PsErrOpFewArgs" = 24180 GhcDiagnosticCode "PsErrVarForTyCon" = 18208 GhcDiagnosticCode "PsErrMalformedEntityString" = 26204 GhcDiagnosticCode "PsErrDotsInRecordUpdate" = 70712 GhcDiagnosticCode "PsErrInvalidDataCon" = 46574 GhcDiagnosticCode "PsErrInvalidInfixDataCon" = 30670 GhcDiagnosticCode "PsErrIllegalPromotionQuoteDataCon" = 80236 GhcDiagnosticCode "PsErrUnpackDataCon" = 40845 GhcDiagnosticCode "PsErrUnexpectedKindAppInDataCon" = 83653 GhcDiagnosticCode "PsErrInvalidRecordCon" = 08195 GhcDiagnosticCode "PsErrIllegalUnboxedStringInPat" = 69925 GhcDiagnosticCode "PsErrIllegalUnboxedFloatingLitInPat" = 76595 GhcDiagnosticCode "PsErrDoNotationInPat" = 06446 GhcDiagnosticCode "PsErrIfThenElseInPat" = 45696 GhcDiagnosticCode "PsErrLambdaCaseInPat" = Outdated 07636 GhcDiagnosticCode "PsErrCaseInPat" = 53786 GhcDiagnosticCode "PsErrLetInPat" = 78892 GhcDiagnosticCode "PsErrLambdaInPat" = 00482 GhcDiagnosticCode "PsErrArrowExprInPat" = 04584 GhcDiagnosticCode "PsErrArrowCmdInPat" = 98980 GhcDiagnosticCode "PsErrArrowCmdInExpr" = 66043 GhcDiagnosticCode "PsErrViewPatInExpr" = Outdated 66228 GhcDiagnosticCode "PsErrOrPatInExpr" = 66718 GhcDiagnosticCode "PsErrLambdaCmdInFunAppCmd" = 12178 GhcDiagnosticCode "PsErrCaseCmdInFunAppCmd" = 92971 GhcDiagnosticCode "PsErrLambdaCaseCmdInFunAppCmd" = Outdated 47171 GhcDiagnosticCode "PsErrIfCmdInFunAppCmd" = 97005 GhcDiagnosticCode "PsErrLetCmdInFunAppCmd" = 70526 GhcDiagnosticCode "PsErrDoCmdInFunAppCmd" = 77808 GhcDiagnosticCode "PsErrDoInFunAppExpr" = 52095 GhcDiagnosticCode "PsErrMDoInFunAppExpr" = 67630 GhcDiagnosticCode "PsErrLambdaInFunAppExpr" = 06074 GhcDiagnosticCode "PsErrCaseInFunAppExpr" = 25037 GhcDiagnosticCode "PsErrLambdaCaseInFunAppExpr" = Outdated 77182 GhcDiagnosticCode "PsErrLetInFunAppExpr" = 90355 GhcDiagnosticCode "PsErrIfInFunAppExpr" = 01239 GhcDiagnosticCode "PsErrProcInFunAppExpr" = 04807 GhcDiagnosticCode "PsErrMalformedTyOrClDecl" = 47568 GhcDiagnosticCode "PsErrIllegalWhereInDataDecl" = 36952 GhcDiagnosticCode "PsErrIllegalDataTypeContext" = 87429 GhcDiagnosticCode "PsErrPrimStringInvalidChar" = 43080 GhcDiagnosticCode "PsErrSuffixAT" = 33856 GhcDiagnosticCode "PsErrPrecedenceOutOfRange" = 25078 GhcDiagnosticCode "PsErrSemiColonsInCondExpr" = 75254 GhcDiagnosticCode "PsErrSemiColonsInCondCmd" = 18910 GhcDiagnosticCode "PsErrAtInPatPos" = 08382 GhcDiagnosticCode "PsErrParseErrorOnInput" = 66418 GhcDiagnosticCode "PsErrMalformedDecl" = 85316 GhcDiagnosticCode "PsErrNotADataCon" = 25742 GhcDiagnosticCode "PsErrInferredTypeVarNotAllowed" = 57342 GhcDiagnosticCode "PsErrIllegalTraditionalRecordSyntax" = 65719 GhcDiagnosticCode "PsErrParseErrorInCmd" = 03790 GhcDiagnosticCode "PsErrInPat" = 07626 GhcDiagnosticCode "PsErrIllegalRoleName" = 09009 GhcDiagnosticCode "PsErrInvalidTypeSignature" = 94426 GhcDiagnosticCode "PsErrUnexpectedTypeInDecl" = 77878 GhcDiagnosticCode "PsErrInvalidPackageName" = 21926 GhcDiagnosticCode "PsErrParseRightOpSectionInPat" = 72516 GhcDiagnosticCode "PsErrIllegalGadtRecordMultiplicity" = 37475 GhcDiagnosticCode "PsErrInvalidCApiImport" = 72744 GhcDiagnosticCode "PsErrMultipleConForNewtype" = 05380 GhcDiagnosticCode "PsErrUnicodeCharLooksLike" = 31623 GhcDiagnosticCode "PsErrInvalidPun" = 52943 GhcDiagnosticCode "PsErrIllegalOrPat" = 29847 GhcDiagnosticCode "PsErrTypeSyntaxInPat" = 32181 -- Driver diagnostic codes GhcDiagnosticCode "DriverMissingHomeModules" = 32850 GhcDiagnosticCode "DriverUnknownHiddenModules" = 38189 GhcDiagnosticCode "DriverUnknownReexportedModules" = 68286 GhcDiagnosticCode "DriverUnusedPackages" = 42258 GhcDiagnosticCode "DriverUnnecessarySourceImports" = 88907 GhcDiagnosticCode "DriverDuplicatedModuleDeclaration" = 29235 GhcDiagnosticCode "DriverModuleNotFound" = 82272 GhcDiagnosticCode "DriverFileModuleNameMismatch" = 28623 GhcDiagnosticCode "DriverUnexpectedSignature" = 66004 GhcDiagnosticCode "DriverFileNotFound" = 49196 GhcDiagnosticCode "DriverStaticPointersNotSupported" = 77799 GhcDiagnosticCode "DriverBackpackModuleNotFound" = 19971 GhcDiagnosticCode "DriverUserDefinedRuleIgnored" = 56147 GhcDiagnosticCode "DriverMixedSafetyImport" = 70172 GhcDiagnosticCode "DriverCannotLoadInterfaceFile" = 37141 GhcDiagnosticCode "DriverInferredSafeModule" = 58656 GhcDiagnosticCode "DriverMarkedTrustworthyButInferredSafe" = 19244 GhcDiagnosticCode "DriverInferredSafeImport" = 82658 GhcDiagnosticCode "DriverCannotImportUnsafeModule" = 44360 GhcDiagnosticCode "DriverMissingSafeHaskellMode" = 29747 GhcDiagnosticCode "DriverPackageNotTrusted" = 08674 GhcDiagnosticCode "DriverCannotImportFromUntrustedPackage" = 75165 GhcDiagnosticCode "DriverRedirectedNoMain" = 95379 GhcDiagnosticCode "DriverHomePackagesNotClosed" = 03271 GhcDiagnosticCode "DriverInconsistentDynFlags" = 74335 GhcDiagnosticCode "DriverSafeHaskellIgnoredExtension" = 98887 GhcDiagnosticCode "DriverPackageTrustIgnored" = 83552 GhcDiagnosticCode "DriverUnrecognisedFlag" = 93741 GhcDiagnosticCode "DriverDeprecatedFlag" = 53692 GhcDiagnosticCode "DriverModuleGraphCycle" = 92213 GhcDiagnosticCode "DriverInstantiationNodeInDependencyGeneration" = 74284 GhcDiagnosticCode "DriverNoConfiguredLLVMToolchain" = 66599 -- Constraint solver diagnostic codes GhcDiagnosticCode "BadTelescope" = 97739 GhcDiagnosticCode "UserTypeError" = 64725 GhcDiagnosticCode "UnsatisfiableError" = 22250 GhcDiagnosticCode "ReportHoleError" = 88464 GhcDiagnosticCode "FixedRuntimeRepError" = 55287 GhcDiagnosticCode "BlockedEquality" = 06200 GhcDiagnosticCode "ExpectingMoreArguments" = 81325 GhcDiagnosticCode "UnboundImplicitParams" = 91416 GhcDiagnosticCode "AmbiguityPreventsSolvingCt" = 78125 GhcDiagnosticCode "CannotResolveInstance" = 39999 GhcDiagnosticCode "OverlappingInstances" = 43085 GhcDiagnosticCode "UnsafeOverlap" = 36705 -- Type mismatch errors GhcDiagnosticCode "BasicMismatch" = 18872 GhcDiagnosticCode "KindMismatch" = 89223 GhcDiagnosticCode "TypeEqMismatch" = 83865 GhcDiagnosticCode "CouldNotDeduce" = 05617 -- Variable unification errors GhcDiagnosticCode "CannotUnifyWithPolytype" = 91028 GhcDiagnosticCode "OccursCheck" = 27958 GhcDiagnosticCode "SkolemEscape" = 46956 GhcDiagnosticCode "DifferentTyVars" = 25897 GhcDiagnosticCode "RepresentationalEq" = 10283 -- Typechecker/renamer diagnostic codes GhcDiagnosticCode "TcRnSolverDepthError" = 40404 GhcDiagnosticCode "TcRnRedundantConstraints" = 30606 GhcDiagnosticCode "TcRnInaccessibleCode" = 40564 GhcDiagnosticCode "TcRnInaccessibleCoAxBranch" = 28129 GhcDiagnosticCode "TcRnTypeDoesNotHaveFixedRuntimeRep" = 18478 GhcDiagnosticCode "TcRnImplicitLift" = 00846 GhcDiagnosticCode "TcRnUnusedPatternBinds" = 61367 GhcDiagnosticCode "TcRnDodgyExports" = 75356 GhcDiagnosticCode "TcRnMissingImportList" = 77037 GhcDiagnosticCode "TcRnUnsafeDueToPlugin" = 01687 GhcDiagnosticCode "TcRnModMissingRealSrcSpan" = 84170 GhcDiagnosticCode "TcRnIdNotExportedFromModuleSig" = 44188 GhcDiagnosticCode "TcRnIdNotExportedFromLocalSig" = 50058 GhcDiagnosticCode "TcRnShadowedName" = 63397 GhcDiagnosticCode "TcRnInvalidWarningCategory" = 53573 GhcDiagnosticCode "TcRnDuplicateWarningDecls" = 00711 GhcDiagnosticCode "TcRnSimplifierTooManyIterations" = 95822 GhcDiagnosticCode "TcRnIllegalPatSynDecl" = 82077 GhcDiagnosticCode "TcRnLinearPatSyn" = 15172 GhcDiagnosticCode "TcRnEmptyRecordUpdate" = 20825 GhcDiagnosticCode "TcRnIllegalFieldPunning" = 44287 GhcDiagnosticCode "TcRnIllegalWildcardsInRecord" = 37132 GhcDiagnosticCode "TcRnIllegalWildcardInType" = 65507 GhcDiagnosticCode "TcRnIllegalNamedWildcardInTypeArgument" = 93411 GhcDiagnosticCode "TcRnIllegalImplicitTyVarInTypeArgument" = 80557 GhcDiagnosticCode "TcRnDuplicateFieldName" = 85524 GhcDiagnosticCode "TcRnIllegalViewPattern" = 22406 GhcDiagnosticCode "TcRnCharLiteralOutOfRange" = 17268 GhcDiagnosticCode "TcRnIllegalWildcardsInConstructor" = 47217 GhcDiagnosticCode "TcRnIgnoringAnnotations" = 66649 GhcDiagnosticCode "TcRnAnnotationInSafeHaskell" = 68934 GhcDiagnosticCode "TcRnInvalidTypeApplication" = 95781 GhcDiagnosticCode "TcRnTagToEnumMissingValArg" = 36495 GhcDiagnosticCode "TcRnTagToEnumUnspecifiedResTy" = 08522 GhcDiagnosticCode "TcRnTagToEnumResTyNotAnEnum" = 49356 GhcDiagnosticCode "TcRnTagToEnumResTyTypeData" = 96189 GhcDiagnosticCode "TcRnArrowIfThenElsePredDependsOnResultTy" = 55868 GhcDiagnosticCode "TcRnIllegalHsBootOrSigDecl" = 58195 GhcDiagnosticCode "TcRnRecursivePatternSynonym" = 72489 GhcDiagnosticCode "TcRnPartialTypeSigTyVarMismatch" = 88793 GhcDiagnosticCode "TcRnPartialTypeSigBadQuantifier" = 94185 GhcDiagnosticCode "TcRnMissingSignature" = 38417 GhcDiagnosticCode "TcRnPolymorphicBinderMissingSig" = 64414 GhcDiagnosticCode "TcRnOverloadedSig" = 16675 GhcDiagnosticCode "TcRnTupleConstraintInst" = 69012 GhcDiagnosticCode "TcRnUserTypeError" = 47403 GhcDiagnosticCode "TcRnConstraintInKind" = 01259 GhcDiagnosticCode "TcRnUnboxedTupleOrSumTypeFuncArg" = 19590 GhcDiagnosticCode "TcRnLinearFuncInKind" = 13218 GhcDiagnosticCode "TcRnForAllEscapeError" = 31147 GhcDiagnosticCode "TcRnVDQInTermType" = 51580 GhcDiagnosticCode "TcRnBadQuantPredHead" = 02550 GhcDiagnosticCode "TcRnIllegalTupleConstraint" = 77539 GhcDiagnosticCode "TcRnNonTypeVarArgInConstraint" = 80003 GhcDiagnosticCode "TcRnIllegalImplicitParam" = 75863 GhcDiagnosticCode "TcRnIllegalConstraintSynonymOfKind" = 75844 GhcDiagnosticCode "TcRnOversaturatedVisibleKindArg" = 45474 GhcDiagnosticCode "TcRnForAllRankErr" = 91510 GhcDiagnosticCode "TcRnMonomorphicBindings" = 55524 GhcDiagnosticCode "TcRnOrphanInstance" = 90177 GhcDiagnosticCode "TcRnFunDepConflict" = 46208 GhcDiagnosticCode "TcRnDupInstanceDecls" = 59692 GhcDiagnosticCode "TcRnConflictingFamInstDecls" = 34447 GhcDiagnosticCode "TcRnFamInstNotInjective" = 05175 GhcDiagnosticCode "TcRnBangOnUnliftedType" = 55666 GhcDiagnosticCode "TcRnLazyBangOnUnliftedType" = 71444 GhcDiagnosticCode "TcRnPatSynBundledWithNonDataCon" = 66775 GhcDiagnosticCode "TcRnPatSynBundledWithWrongType" = 66025 GhcDiagnosticCode "TcRnDupeModuleExport" = 51876 GhcDiagnosticCode "TcRnExportedModNotImported" = 90973 GhcDiagnosticCode "TcRnNullExportedModule" = 64649 GhcDiagnosticCode "TcRnMissingExportList" = 85401 GhcDiagnosticCode "TcRnExportHiddenComponents" = 94558 GhcDiagnosticCode "TcRnExportHiddenDefault" = 74775 GhcDiagnosticCode "TcRnDuplicateExport" = 47854 GhcDiagnosticCode "TcRnExportedParentChildMismatch" = 88993 GhcDiagnosticCode "TcRnConflictingExports" = 69158 GhcDiagnosticCode "TcRnDuplicateFieldExport" = 97219 GhcDiagnosticCode "TcRnAmbiguousFieldInUpdate" = 56428 GhcDiagnosticCode "TcRnAmbiguousRecordUpdate" = 02256 GhcDiagnosticCode "TcRnMissingFields" = 20125 GhcDiagnosticCode "TcRnFieldUpdateInvalidType" = 63055 GhcDiagnosticCode "TcRnMissingStrictFields" = 95909 GhcDiagnosticCode "TcRnStaticFormNotClosed" = 88431 GhcDiagnosticCode "TcRnIllegalStaticExpression" = 23800 GhcDiagnosticCode "TcRnUselessTypeable" = 90584 GhcDiagnosticCode "TcRnDerivingDefaults" = 20042 GhcDiagnosticCode "TcRnNonUnaryTypeclassConstraint" = 73993 GhcDiagnosticCode "TcRnPartialTypeSignatures" = 60661 GhcDiagnosticCode "TcRnLazyGADTPattern" = 87005 GhcDiagnosticCode "TcRnArrowProcGADTPattern" = 64525 GhcDiagnosticCode "TcRnTypeEqualityOutOfScope" = 12003 GhcDiagnosticCode "TcRnTypeEqualityRequiresOperators" = 58520 GhcDiagnosticCode "TcRnIllegalTypeOperator" = 62547 GhcDiagnosticCode "TcRnGADTMonoLocalBinds" = 58008 GhcDiagnosticCode "TcRnIncorrectNameSpace" = 31891 GhcDiagnosticCode "TcRnNoRebindableSyntaxRecordDot" = 65945 GhcDiagnosticCode "TcRnNoFieldPunsRecordDot" = 57365 GhcDiagnosticCode "TcRnListComprehensionDuplicateBinding" = 81232 GhcDiagnosticCode "TcRnLastStmtNotExpr" = 55814 GhcDiagnosticCode "TcRnUnexpectedStatementInContext" = 42026 GhcDiagnosticCode "TcRnSectionWithoutParentheses" = 95880 GhcDiagnosticCode "TcRnIllegalImplicitParameterBindings" = 50730 GhcDiagnosticCode "TcRnIllegalTupleSection" = 59155 GhcDiagnosticCode "TcRnTermNameInType" = 37479 GhcDiagnosticCode "TcRnUnexpectedKindVar" = 12875 GhcDiagnosticCode "TcRnNegativeNumTypeLiteral" = 93632 GhcDiagnosticCode "TcRnUnusedQuantifiedTypeVar" = 54180 GhcDiagnosticCode "TcRnMissingRoleAnnotation" = 65490 GhcDiagnosticCode "TcRnUntickedPromotedThing" = 49957 GhcDiagnosticCode "TcRnIllegalBuiltinSyntax" = 39716 GhcDiagnosticCode "TcRnForeignImportPrimExtNotSet" = 49692 GhcDiagnosticCode "TcRnForeignImportPrimSafeAnn" = 26133 GhcDiagnosticCode "TcRnForeignFunctionImportAsValue" = 76251 GhcDiagnosticCode "TcRnFunPtrImportWithoutAmpersand" = 57989 GhcDiagnosticCode "TcRnIllegalForeignDeclBackend" = 03355 GhcDiagnosticCode "TcRnUnsupportedCallConv" = 01245 GhcDiagnosticCode "TcRnInvalidCIdentifier" = 95774 GhcDiagnosticCode "TcRnExpectedValueId" = 01570 GhcDiagnosticCode "TcRnRecSelectorEscapedTyVar" = 55876 GhcDiagnosticCode "TcRnPatSynNotBidirectional" = 16444 GhcDiagnosticCode "TcRnIllegalDerivingItem" = 11913 GhcDiagnosticCode "TcRnUnexpectedAnnotation" = 18932 GhcDiagnosticCode "TcRnIllegalRecordSyntax" = 89246 GhcDiagnosticCode "TcRnInvalidVisibleKindArgument" = 20967 GhcDiagnosticCode "TcRnTooManyBinders" = 05989 GhcDiagnosticCode "TcRnDifferentNamesForTyVar" = 17370 GhcDiagnosticCode "TcRnDisconnectedTyVar" = 59738 GhcDiagnosticCode "TcRnInvalidReturnKind" = 55233 GhcDiagnosticCode "TcRnClassKindNotConstraint" = 80768 GhcDiagnosticCode "TcRnMatchesHaveDiffNumArgs" = 91938 GhcDiagnosticCode "TcRnCannotBindScopedTyVarInPatSig" = 46131 GhcDiagnosticCode "TcRnCannotBindTyVarsInPatBind" = 48361 GhcDiagnosticCode "TcRnTooManyTyArgsInConPattern" = 01629 GhcDiagnosticCode "TcRnMultipleInlinePragmas" = 96665 GhcDiagnosticCode "TcRnUnexpectedPragmas" = 88293 GhcDiagnosticCode "TcRnNonOverloadedSpecialisePragma" = 35827 GhcDiagnosticCode "TcRnSpecialiseNotVisible" = 85337 GhcDiagnosticCode "TcRnDifferentExportWarnings" = 92878 GhcDiagnosticCode "TcRnIncompleteExportWarnings" = 94721 GhcDiagnosticCode "TcRnIllegalTypeOperatorDecl" = 50649 GhcDiagnosticCode "TcRnOrPatBindsVariables" = 81303 GhcDiagnosticCode "TcRnIllegalKind" = 64861 GhcDiagnosticCode "TcRnUnexpectedPatSigType" = 74097 GhcDiagnosticCode "TcRnIllegalKindSignature" = 91382 GhcDiagnosticCode "TcRnDataKindsError" = 68567 GhcDiagnosticCode "TcRnIllegalHsigDefaultMethods" = 93006 GhcDiagnosticCode "TcRnHsigFixityMismatch" = 93007 GhcDiagnosticCode "TcRnHsigMissingModuleExport" = 93011 GhcDiagnosticCode "TcRnBadGenericMethod" = 59794 GhcDiagnosticCode "TcRnWarningMinimalDefIncomplete" = 13511 GhcDiagnosticCode "TcRnDefaultMethodForPragmaLacksBinding" = 28587 GhcDiagnosticCode "TcRnIgnoreSpecialisePragmaOnDefMethod" = 72520 GhcDiagnosticCode "TcRnBadMethodErr" = 46284 GhcDiagnosticCode "TcRnIllegalTypeData" = 15013 GhcDiagnosticCode "TcRnTypeDataForbids" = 67297 GhcDiagnosticCode "TcRnUnsatisfiedMinimalDef" = 06201 GhcDiagnosticCode "TcRnMisplacedInstSig" = 06202 GhcDiagnosticCode "TcRnCapturedTermName" = 54201 GhcDiagnosticCode "TcRnBindingOfExistingName" = 58805 GhcDiagnosticCode "TcRnMultipleFixityDecls" = 50419 GhcDiagnosticCode "TcRnIllegalPatternSynonymDecl" = 41507 GhcDiagnosticCode "TcRnIllegalClassBinding" = 69248 GhcDiagnosticCode "TcRnOrphanCompletePragma" = 93961 GhcDiagnosticCode "TcRnEmptyCase" = 48010 GhcDiagnosticCode "TcRnNonStdGuards" = 59119 GhcDiagnosticCode "TcRnDuplicateSigDecl" = 31744 GhcDiagnosticCode "TcRnMisplacedSigDecl" = 87866 GhcDiagnosticCode "TcRnUnexpectedDefaultSig" = 40700 GhcDiagnosticCode "TcRnDuplicateMinimalSig" = 85346 GhcDiagnosticCode "TcRnLoopySuperclassSolve" = Outdated 36038 GhcDiagnosticCode "TcRnUnexpectedStandaloneDerivingDecl" = 95159 GhcDiagnosticCode "TcRnUnusedVariableInRuleDecl" = 65669 GhcDiagnosticCode "TcRnUnexpectedStandaloneKindSig" = 45906 GhcDiagnosticCode "TcRnIllegalRuleLhs" = 63294 GhcDiagnosticCode "TcRnDuplicateRoleAnnot" = 97170 GhcDiagnosticCode "TcRnDuplicateKindSig" = 43371 GhcDiagnosticCode "TcRnIllegalDerivStrategy" = 87139 GhcDiagnosticCode "TcRnIllegalMultipleDerivClauses" = 30281 GhcDiagnosticCode "TcRnNoDerivStratSpecified" = 55631 GhcDiagnosticCode "TcRnStupidThetaInGadt" = 18403 GhcDiagnosticCode "TcRnShadowedTyVarNameInFamResult" = 99412 GhcDiagnosticCode "TcRnIncorrectTyVarOnLhsOfInjCond" = 88333 GhcDiagnosticCode "TcRnUnknownTyVarsOnRhsOfInjCond" = 48254 GhcDiagnosticCode "TcRnBadlyStaged" = 28914 GhcDiagnosticCode "TcRnBadlyStagedType" = 86357 GhcDiagnosticCode "TcRnStageRestriction" = 18157 GhcDiagnosticCode "TcRnTyThingUsedWrong" = 10969 GhcDiagnosticCode "TcRnCannotDefaultKindVar" = 79924 GhcDiagnosticCode "TcRnUninferrableTyVar" = 16220 GhcDiagnosticCode "TcRnSkolemEscape" = 71451 GhcDiagnosticCode "TcRnPatSynEscapedCoercion" = 88986 GhcDiagnosticCode "TcRnPatSynExistentialInResult" = 33973 GhcDiagnosticCode "TcRnPatSynArityMismatch" = 18365 GhcDiagnosticCode "TcRnTyFamDepsDisabled" = 43991 GhcDiagnosticCode "TcRnAbstractClosedTyFamDecl" = 60012 GhcDiagnosticCode "TcRnPartialFieldSelector" = 82712 GhcDiagnosticCode "TcRnHasFieldResolvedIncomplete" = 86894 GhcDiagnosticCode "TcRnSuperclassCycle" = 29210 GhcDiagnosticCode "TcRnDefaultSigMismatch" = 72771 GhcDiagnosticCode "TcRnTyFamResultDisabled" = 44012 GhcDiagnosticCode "TcRnCommonFieldResultTypeMismatch" = 31004 GhcDiagnosticCode "TcRnCommonFieldTypeMismatch" = 91827 GhcDiagnosticCode "TcRnDataConParentTypeMismatch" = 45219 GhcDiagnosticCode "TcRnGADTsDisabled" = 23894 GhcDiagnosticCode "TcRnExistentialQuantificationDisabled" = 25709 GhcDiagnosticCode "TcRnGADTDataContext" = 61072 GhcDiagnosticCode "TcRnMultipleConForNewtype" = 16409 GhcDiagnosticCode "TcRnKindSignaturesDisabled" = 49378 GhcDiagnosticCode "TcRnEmptyDataDeclsDisabled" = 32478 GhcDiagnosticCode "TcRnRoleMismatch" = 29178 GhcDiagnosticCode "TcRnRoleCountMismatch" = 54298 GhcDiagnosticCode "TcRnIllegalRoleAnnotation" = 77192 GhcDiagnosticCode "TcRnRoleAnnotationsDisabled" = 17779 GhcDiagnosticCode "TcRnIncoherentRoles" = 18273 GhcDiagnosticCode "TcRnTypeSynonymCycle" = 97522 GhcDiagnosticCode "TcRnSelfImport" = 43281 GhcDiagnosticCode "TcRnNoExplicitImportList" = 16029 GhcDiagnosticCode "TcRnSafeImportsDisabled" = 26971 GhcDiagnosticCode "TcRnDeprecatedModule" = 15328 GhcDiagnosticCode "TcRnCompatUnqualifiedImport" = Outdated 82347 GhcDiagnosticCode "TcRnRedundantSourceImport" = 54478 GhcDiagnosticCode "TcRnDuplicateDecls" = 29916 GhcDiagnosticCode "TcRnPackageImportsDisabled" = 10032 GhcDiagnosticCode "TcRnIllegalDataCon" = 78448 GhcDiagnosticCode "TcRnNestedForallsContexts" = 71492 GhcDiagnosticCode "TcRnRedundantRecordWildcard" = 15932 GhcDiagnosticCode "TcRnUnusedRecordWildcard" = 83475 GhcDiagnosticCode "TcRnUnusedName" = 40910 GhcDiagnosticCode "TcRnQualifiedBinder" = 28329 GhcDiagnosticCode "TcRnInvalidRecordField" = 53822 GhcDiagnosticCode "TcRnTupleTooLarge" = 94803 GhcDiagnosticCode "TcRnCTupleTooLarge" = 89347 GhcDiagnosticCode "TcRnIllegalInferredTyVars" = 54832 GhcDiagnosticCode "TcRnAmbiguousName" = 87543 GhcDiagnosticCode "TcRnBindingNameConflict" = 10498 GhcDiagnosticCode "NonCanonicalMonoid" = 50928 GhcDiagnosticCode "NonCanonicalMonad" = 22705 GhcDiagnosticCode "TcRnDefaultedExceptionContext" = 46235 GhcDiagnosticCode "TcRnImplicitImportOfPrelude" = 20540 GhcDiagnosticCode "TcRnMissingMain" = 67120 GhcDiagnosticCode "TcRnGhciUnliftedBind" = 17999 GhcDiagnosticCode "TcRnGhciMonadLookupFail" = 44990 GhcDiagnosticCode "TcRnArityMismatch" = 27346 GhcDiagnosticCode "TcRnSimplifiableConstraint" = 62412 GhcDiagnosticCode "TcRnIllegalQuasiQuotes" = 77343 GhcDiagnosticCode "TcRnImplicitRhsQuantification" = 16382 GhcDiagnosticCode "TcRnBadTyConTelescope" = 87279 GhcDiagnosticCode "TcRnPatersonCondFailure" = 22979 GhcDiagnosticCode "TcRnDeprecatedInvisTyArgInConPat" = 69797 GhcDiagnosticCode "TcRnInvalidDefaultedTyVar" = 45625 GhcDiagnosticCode "TcRnIllegalTermLevelUse" = 01928 GhcDiagnosticCode "TcRnNamespacedWarningPragmaWithoutFlag" = 14995 GhcDiagnosticCode "TcRnInvisPatWithNoForAll" = 14964 GhcDiagnosticCode "TcRnIllegalInvisibleTypePattern" = 78249 GhcDiagnosticCode "TcRnNamespacedFixitySigWithoutFlag" = 78534 GhcDiagnosticCode "TcRnOutOfArityTyVar" = 84925 GhcDiagnosticCode "TcRnMisplacedInvisPat" = 11983 GhcDiagnosticCode "TcRnIllformedTypePattern" = 88754 GhcDiagnosticCode "TcRnIllegalTypePattern" = 70206 GhcDiagnosticCode "TcRnIllformedTypeArgument" = 29092 GhcDiagnosticCode "TcRnIllegalTypeExpr" = 35499 GhcDiagnosticCode "TcRnUnexpectedTypeSyntaxInTerms" = 31244 -- TcRnTypeApplicationsDisabled GhcDiagnosticCode "TypeApplication" = 23482 GhcDiagnosticCode "TypeApplicationInPattern" = 17916 -- PatSynInvalidRhsReason GhcDiagnosticCode "PatSynNotInvertible" = 69317 GhcDiagnosticCode "PatSynUnboundVar" = 28572 -- TcRnBadFieldAnnotation/BadFieldAnnotationReason GhcDiagnosticCode "LazyFieldsDisabled" = 81601 GhcDiagnosticCode "UnpackWithoutStrictness" = 10107 GhcDiagnosticCode "BackpackUnpackAbstractType" = 40091 -- TcRnRoleValidationFailed/RoleInferenceFailedReason GhcDiagnosticCode "TyVarRoleMismatch" = 22221 GhcDiagnosticCode "TyVarMissingInEnv" = 99991 GhcDiagnosticCode "BadCoercionRole" = 92834 -- TcRnClassExtensionDisabled/DisabledClassExtension GhcDiagnosticCode "MultiParamDisabled" = 28349 GhcDiagnosticCode "FunDepsDisabled" = 15708 GhcDiagnosticCode "ConstrainedClassMethodsDisabled" = 25079 -- TcRnTyFamsDisabled/TyFamsDisabledReason GhcDiagnosticCode "TyFamsDisabledFamily" = 39191 GhcDiagnosticCode "TyFamsDisabledInstance" = 06206 GhcDiagnosticCode "TcRnPrecedenceParsingError" = 88747 GhcDiagnosticCode "TcRnSectionPrecedenceError" = 46878 -- HsigShapeMismatchReason GhcDiagnosticCode "HsigShapeSortMismatch" = 93008 GhcDiagnosticCode "HsigShapeNotUnifiable" = 93009 -- Invisible binders GhcDiagnosticCode "TcRnIllegalInvisTyVarBndr" = 58589 GhcDiagnosticCode "TcRnIllegalWildcardTyVarBndr" = 12211 GhcDiagnosticCode "TcRnInvalidInvisTyVarBndr" = 57916 GhcDiagnosticCode "TcRnInvisBndrWithoutSig" = 92337 -- IllegalNewtypeReason GhcDiagnosticCode "DoesNotHaveSingleField" = 23517 GhcDiagnosticCode "IsNonLinear" = 38291 GhcDiagnosticCode "IsGADT" = 89498 GhcDiagnosticCode "HasConstructorContext" = 17440 GhcDiagnosticCode "HasExistentialTyVar" = 07525 GhcDiagnosticCode "HasStrictnessAnnotation" = 04049 -- TcRnBadRecordUpdate GhcDiagnosticCode "NoConstructorHasAllFields" = 14392 GhcDiagnosticCode "MultiplePossibleParents" = 99339 GhcDiagnosticCode "InvalidTyConParent" = 33238 -- BadImport GhcDiagnosticCode "BadImportNotExported" = 61689 GhcDiagnosticCode "BadImportAvailDataCon" = 35373 GhcDiagnosticCode "BadImportNotExportedSubordinates" = 10237 GhcDiagnosticCode "BadImportAvailTyCon" = 56449 GhcDiagnosticCode "BadImportAvailVar" = 12112 -- TcRnPragmaWarning GhcDiagnosticCode "WarningTxt" = 63394 GhcDiagnosticCode "DeprecatedTxt" = 68441 -- TcRnRunSliceFailure/ConversionFail GhcDiagnosticCode "IllegalOccName" = 55017 GhcDiagnosticCode "SumAltArityExceeded" = 68444 GhcDiagnosticCode "IllegalSumAlt" = 63966 GhcDiagnosticCode "IllegalSumArity" = 97721 GhcDiagnosticCode "MalformedType" = 28709 GhcDiagnosticCode "IllegalLastStatement" = 47373 GhcDiagnosticCode "KindSigsOnlyAllowedOnGADTs" = 40746 GhcDiagnosticCode "IllegalDeclaration" = 23882 GhcDiagnosticCode "CannotMixGADTConsWith98Cons" = 24104 GhcDiagnosticCode "EmptyStmtListInDoBlock" = 34949 GhcDiagnosticCode "NonVarInInfixExpr" = 99831 GhcDiagnosticCode "MultiWayIfWithoutAlts" = 63930 GhcDiagnosticCode "CasesExprWithoutAlts" = 91745 GhcDiagnosticCode "ImplicitParamsWithOtherBinds" = 42974 GhcDiagnosticCode "InvalidCCallImpent" = 60220 GhcDiagnosticCode "RecGadtNoCons" = 18816 GhcDiagnosticCode "GadtNoCons" = 38140 GhcDiagnosticCode "InvalidTypeInstanceHeader" = 37056 GhcDiagnosticCode "InvalidTyFamInstLHS" = 78486 GhcDiagnosticCode "InvalidImplicitParamBinding" = 51603 GhcDiagnosticCode "DefaultDataInstDecl" = 39639 GhcDiagnosticCode "FunBindLacksEquations" = 52078 -- TcRnDodgyImports/DodgyImportsReason GhcDiagnosticCode "DodgyImportsEmptyParent" = 99623 -- TcRnImportLookup/ImportLookupReason GhcDiagnosticCode "ImportLookupQualified" = 48795 GhcDiagnosticCode "ImportLookupIllegal" = 14752 GhcDiagnosticCode "ImportLookupAmbiguous" = 92057 -- TcRnUnusedImport/UnusedImportReason GhcDiagnosticCode "UnusedImportNone" = 66111 GhcDiagnosticCode "UnusedImportSome" = 38856 -- TcRnIllegalInstance GhcDiagnosticCode "IllegalFamilyApplicationInInstance" = 73138 -- TcRnIllegalClassInstance/IllegalClassInstanceReason GhcDiagnosticCode "IllegalSpecialClassInstance" = 97044 GhcDiagnosticCode "IllegalInstanceFailsCoverageCondition" = 21572 -- IllegalInstanceHead GhcDiagnosticCode "InstHeadAbstractClass" = 51758 GhcDiagnosticCode "InstHeadNonClass" = 53946 GhcDiagnosticCode "InstHeadTySynArgs" = 93557 GhcDiagnosticCode "InstHeadNonTyVarArgs" = 48406 GhcDiagnosticCode "InstHeadMultiParam" = 91901 -- IllegalHasFieldInstance GhcDiagnosticCode "IllegalHasFieldInstanceNotATyCon" = 88994 GhcDiagnosticCode "IllegalHasFieldInstanceFamilyTyCon" = 70743 GhcDiagnosticCode "IllegalHasFieldInstanceTyConHasFields" = 43406 GhcDiagnosticCode "IllegalHasFieldInstanceTyConHasField" = 30836 -- TcRnIllegalFamilyInstance/IllegalFamilyInstanceReason GhcDiagnosticCode "NotAFamilyTyCon" = 06204 GhcDiagnosticCode "NotAnOpenFamilyTyCon" = 06207 GhcDiagnosticCode "FamilyCategoryMismatch" = 52347 GhcDiagnosticCode "FamilyArityMismatch" = 12985 GhcDiagnosticCode "TyFamNameMismatch" = 88221 GhcDiagnosticCode "FamInstRHSOutOfScopeTyVars" = 53634 GhcDiagnosticCode "FamInstLHSUnusedBoundTyVars" = 30337 -- InvalidAssocInstance GhcDiagnosticCode "AssocInstanceMissing" = 08585 GhcDiagnosticCode "AssocInstanceNotInAClass" = 06205 GhcDiagnosticCode "AssocNotInThisClass" = 38351 GhcDiagnosticCode "AssocNoClassTyVar" = 55912 GhcDiagnosticCode "AssocTyVarsDontMatch" = 95424 -- InvalidAssocDefault GhcDiagnosticCode "AssocDefaultNotAssoc" = 78822 GhcDiagnosticCode "AssocMultipleDefaults" = 59128 -- AssocDefaultBadArgs GhcDiagnosticCode "AssocDefaultNonTyVarArg" = 41522 GhcDiagnosticCode "AssocDefaultDuplicateTyVars" = 48178 -- Diagnostic codes for the foreign function interface GhcDiagnosticCode "NotADataType" = 31136 GhcDiagnosticCode "NewtypeDataConNotInScope" = 72317 GhcDiagnosticCode "UnliftedFFITypesNeeded" = 10964 GhcDiagnosticCode "NotABoxedMarshalableTyCon" = 89401 GhcDiagnosticCode "ForeignLabelNotAPtr" = 26070 GhcDiagnosticCode "NotSimpleUnliftedType" = 43510 GhcDiagnosticCode "NotBoxedKindAny" = 64097 GhcDiagnosticCode "ForeignDynNotPtr" = 27555 GhcDiagnosticCode "SafeHaskellMustBeInIO" = 57638 GhcDiagnosticCode "IOResultExpected" = 41843 GhcDiagnosticCode "UnexpectedNestedForall" = 92994 GhcDiagnosticCode "LinearTypesNotAllowed" = 57396 GhcDiagnosticCode "OneArgExpected" = 91490 GhcDiagnosticCode "AtLeastOneArgExpected" = 07641 -- Interface errors GhcDiagnosticCode "BadSourceImport" = 64852 GhcDiagnosticCode "HomeModError" = 58427 GhcDiagnosticCode "DynamicHashMismatchError" = 54709 GhcDiagnosticCode "CouldntFindInFiles" = 94559 GhcDiagnosticCode "GenericMissing" = 87110 GhcDiagnosticCode "MissingPackageFiles" = 22211 GhcDiagnosticCode "MissingPackageWayFiles" = 88719 GhcDiagnosticCode "ModuleSuggestion" = 61948 GhcDiagnosticCode "MultiplePackages" = 45102 GhcDiagnosticCode "NoUnitIdMatching" = 51294 GhcDiagnosticCode "NotAModule" = 35235 GhcDiagnosticCode "Can'tFindNameInInterface" = 83249 GhcDiagnosticCode "CircularImport" = 75429 GhcDiagnosticCode "HiModuleNameMismatchWarn" = 53693 GhcDiagnosticCode "ExceptionOccurred" = 47808 -- Out of scope errors GhcDiagnosticCode "NotInScope" = 76037 GhcDiagnosticCode "NotARecordField" = 22385 GhcDiagnosticCode "NoExactName" = 97784 GhcDiagnosticCode "SameName" = 81573 GhcDiagnosticCode "MissingBinding" = 44432 GhcDiagnosticCode "NoTopLevelBinding" = 10173 GhcDiagnosticCode "UnknownSubordinate" = 54721 GhcDiagnosticCode "NotInScopeTc" = 76329 -- Diagnostic codes for deriving GhcDiagnosticCode "DerivErrNotWellKinded" = 62016 GhcDiagnosticCode "DerivErrSafeHaskellGenericInst" = 07214 GhcDiagnosticCode "DerivErrDerivingViaWrongKind" = 63174 GhcDiagnosticCode "DerivErrNoEtaReduce" = 38996 GhcDiagnosticCode "DerivErrBootFileFound" = 30903 GhcDiagnosticCode "DerivErrDataConsNotAllInScope" = 54540 GhcDiagnosticCode "DerivErrGNDUsedOnData" = 10333 GhcDiagnosticCode "DerivErrNullaryClasses" = 04956 GhcDiagnosticCode "DerivErrLastArgMustBeApp" = 28323 GhcDiagnosticCode "DerivErrNoFamilyInstance" = 82614 GhcDiagnosticCode "DerivErrNotStockDeriveable" = 00158 GhcDiagnosticCode "DerivErrHasAssociatedDatatypes" = 34611 GhcDiagnosticCode "DerivErrNewtypeNonDeriveableClass" = 82023 GhcDiagnosticCode "DerivErrCannotEtaReduceEnough" = 26557 GhcDiagnosticCode "DerivErrOnlyAnyClassDeriveable" = 23244 GhcDiagnosticCode "DerivErrNotDeriveable" = 38178 GhcDiagnosticCode "DerivErrNotAClass" = 63388 GhcDiagnosticCode "DerivErrNoConstructors" = 64560 GhcDiagnosticCode "DerivErrLangExtRequired" = 86639 GhcDiagnosticCode "DerivErrDunnoHowToDeriveForType" = 48959 GhcDiagnosticCode "DerivErrMustBeEnumType" = 30750 GhcDiagnosticCode "DerivErrMustHaveExactlyOneConstructor" = 37542 GhcDiagnosticCode "DerivErrMustHaveSomeParameters" = 45539 GhcDiagnosticCode "DerivErrMustNotHaveClassContext" = 16588 GhcDiagnosticCode "DerivErrBadConstructor" = 16437 GhcDiagnosticCode "DerivErrGenerics" = 30367 GhcDiagnosticCode "DerivErrEnumOrProduct" = 58291 -- Diagnostic codes for instance lookup GhcDiagnosticCode "LookupInstErrNotExact" = 10372 GhcDiagnosticCode "LookupInstErrFlexiVar" = 10373 GhcDiagnosticCode "LookupInstErrNotFound" = 10374 -- Diagnostic codes for default declarations and type defaulting GhcDiagnosticCode "TcRnMultipleDefaultDeclarations" = 99565 GhcDiagnosticCode "TcRnIllegalDefaultClass" = 26555 GhcDiagnosticCode "TcRnIllegalNamedDefault" = 55756 GhcDiagnosticCode "TcRnBadDefaultType" = 88933 GhcDiagnosticCode "TcRnWarnDefaulting" = 18042 GhcDiagnosticCode "TcRnWarnClashingDefaultImports" = 77007 -- TcRnEmptyStmtsGroupError/EmptyStatementGroupErrReason GhcDiagnosticCode "EmptyStmtsGroupInParallelComp" = 41242 GhcDiagnosticCode "EmptyStmtsGroupInTransformListComp" = 92693 GhcDiagnosticCode "EmptyStmtsGroupInDoNotation" = 82311 GhcDiagnosticCode "EmptyStmtsGroupInArrowNotation" = 19442 -- HsBoot and Hsig errors GhcDiagnosticCode "MissingBootDefinition" = 63610 GhcDiagnosticCode "MissingBootExport" = 91999 GhcDiagnosticCode "MissingBootInstance" = 79857 GhcDiagnosticCode "BadReexportedBootThing" = 12424 GhcDiagnosticCode "BootMismatchedIdTypes" = 11890 GhcDiagnosticCode "BootMismatchedTyCons" = 15843 -- TH errors GhcDiagnosticCode "TypedTHWithPolyType" = 94642 GhcDiagnosticCode "SplicePolymorphicLocalVar" = 06568 GhcDiagnosticCode "SpliceThrewException" = 87897 GhcDiagnosticCode "InvalidTopDecl" = 52886 GhcDiagnosticCode "NonExactName" = 77923 GhcDiagnosticCode "AddInvalidCorePlugin" = 86463 GhcDiagnosticCode "AddDocToNonLocalDefn" = 67760 GhcDiagnosticCode "FailedToLookupThInstName" = 49530 GhcDiagnosticCode "CannotReifyInstance" = 30384 GhcDiagnosticCode "CannotReifyOutOfScopeThing" = 24922 GhcDiagnosticCode "CannotReifyThingNotInTypeEnv" = 79890 GhcDiagnosticCode "NoRolesAssociatedWithThing" = 65923 GhcDiagnosticCode "CannotRepresentType" = 75721 GhcDiagnosticCode "ReportCustomQuasiError" = 39584 GhcDiagnosticCode "MismatchedSpliceType" = 45108 GhcDiagnosticCode "IllegalTHQuotes" = 62558 GhcDiagnosticCode "IllegalTHSplice" = 26759 GhcDiagnosticCode "NestedTHBrackets" = 59185 GhcDiagnosticCode "AddTopDeclsUnexpectedDeclarationSplice" = 17599 GhcDiagnosticCode "BadImplicitSplice" = 25277 GhcDiagnosticCode "QuotedNameWrongStage" = 57695 GhcDiagnosticCode "IllegalStaticFormInSplice" = 12219 -- Zonker messages GhcDiagnosticCode "ZonkerCannotDefaultConcrete" = 52083 -- Promotion errors GhcDiagnosticCode "ClassPE" = 86934 GhcDiagnosticCode "TyConPE" = 85413 GhcDiagnosticCode "PatSynPE" = 70349 GhcDiagnosticCode "FamDataConPE" = 64578 GhcDiagnosticCode "ConstrainedDataConPE" = 28374 GhcDiagnosticCode "RecDataConPE" = 56753 GhcDiagnosticCode "TermVariablePE" = 45510 GhcDiagnosticCode "TypeVariablePE" = 47557 -- To generate new random numbers: -- https://www.random.org/integers/?num=10&min=1&max=99999&col=1&base=10&format=plain -- -- NB: never remove a return value from this type family! -- We need to ensure uniquess of diagnostic codes across GHC versions, -- and this includes outdated diagnostic codes for errors that GHC -- no longer reports. These are collected below. GhcDiagnosticCode "TcRnIllegalInstanceHeadDecl" = Outdated 12222 GhcDiagnosticCode "TcRnNoClassInstHead" = Outdated 56538 -- The above two are subsumed by InstHeadNonClass [GHC-53946] GhcDiagnosticCode "TcRnNameByTemplateHaskellQuote" = Outdated 40027 GhcDiagnosticCode "TcRnIllegalBindingOfBuiltIn" = Outdated 69639 GhcDiagnosticCode "TcRnMixedSelectors" = Outdated 40887 GhcDiagnosticCode "TcRnBadBootFamInstDecl" = Outdated 06203 GhcDiagnosticCode "TcRnBindInBootFile" = Outdated 11247 GhcDiagnosticCode "TcRnUnexpectedTypeSplice" = Outdated 39180 GhcDiagnosticCode "PsErrUnexpectedTypeAppInDecl" = Outdated 45054 GhcDiagnosticCode "TcRnUnpromotableThing" = Outdated 88634 GhcDiagnosticCode "UntouchableVariable" = Outdated 34699 GhcDiagnosticCode "TcRnBindVarAlreadyInScope" = Outdated 69710 GhcDiagnosticCode "TcRnBindMultipleVariables" = Outdated 92957 GhcDiagnosticCode "TcRnHsigNoIface" = Outdated 93010 GhcDiagnosticCode "TcRnInterfaceLookupError" = Outdated 52243 GhcDiagnosticCode "TcRnForallIdentifier" = Outdated 64088 -- | Use this type synonym to mark a diagnostic code as outdated. -- -- The presence of this type synonym is used by the 'codes' test to determine -- which diagnostic codes to check for testsuite coverage. type Outdated a = a {- ********************************************************************* * * Recurring into an argument * * ********************************************************************* -} -- | Some constructors of diagnostic datatypes don't have -- corresponding error codes, because we recur inside them. -- -- For example, we don't have an error code for the -- 'TcRnCannotDeriveInstance' constructor of 'TcRnMessage', -- because we recur into the 'DeriveInstanceErrReason' to obtain -- an error code. -- -- This type family keeps track of such constructors. type ConRecursInto :: Symbol -> Maybe Type type family ConRecursInto con where ---------------------------------- -- Constructors of GhcMessage ConRecursInto "GhcDriverMessage" = 'Just DriverMessage ConRecursInto "GhcPsMessage" = 'Just PsMessage ConRecursInto "GhcTcRnMessage" = 'Just TcRnMessage ConRecursInto "GhcDsMessage" = 'Just DsMessage ConRecursInto "GhcUnknownMessage" = 'Just (UnknownDiagnostic GhcMessageOpts) ---------------------------------- -- Constructors of DriverMessage ConRecursInto "DriverUnknownMessage" = 'Just (UnknownDiagnostic DriverMessageOpts) ConRecursInto "DriverPsHeaderMessage" = 'Just PsMessage ConRecursInto "DriverInterfaceError" = 'Just IfaceMessage ConRecursInto "CantFindErr" = 'Just CantFindInstalled ConRecursInto "CantFindInstalledErr" = 'Just CantFindInstalled ConRecursInto "CantFindInstalled" = 'Just CantFindInstalledReason ConRecursInto "BadIfaceFile" = 'Just ReadInterfaceError ConRecursInto "FailedToLoadDynamicInterface" = 'Just ReadInterfaceError ---------------------------------- -- Constructors of PsMessage ConRecursInto "PsUnknownMessage" = 'Just (UnknownDiagnostic NoDiagnosticOpts) ConRecursInto "PsHeaderMessage" = 'Just PsHeaderMessage ---------------------------------- -- Constructors of TcRnMessage ConRecursInto "TcRnUnknownMessage" = 'Just (UnknownDiagnostic TcRnMessageOpts) -- Recur into TcRnMessageWithInfo to get the underlying TcRnMessage ConRecursInto "TcRnMessageWithInfo" = 'Just TcRnMessageDetailed ConRecursInto "TcRnMessageDetailed" = 'Just TcRnMessage ConRecursInto "TcRnWithHsDocContext" = 'Just TcRnMessage ConRecursInto "TcRnCannotDeriveInstance" = 'Just DeriveInstanceErrReason ConRecursInto "TcRnLookupInstance" = 'Just LookupInstanceErrReason ConRecursInto "TcRnPragmaWarning" = 'Just (WarningTxt GhcRn) ConRecursInto "TcRnNotInScope" = 'Just NotInScopeError ConRecursInto "TcRnIllegalNewtype" = 'Just IllegalNewtypeReason ConRecursInto "TcRnHsigShapeMismatch" = 'Just HsigShapeMismatchReason ConRecursInto "TcRnPatSynInvalidRhs" = 'Just PatSynInvalidRhsReason ConRecursInto "TcRnBadRecordUpdate" = 'Just BadRecordUpdateReason ConRecursInto "TcRnBadFieldAnnotation" = 'Just BadFieldAnnotationReason ConRecursInto "TcRnRoleValidationFailed" = 'Just RoleValidationFailedReason ConRecursInto "TcRnClassExtensionDisabled" = 'Just DisabledClassExtension ConRecursInto "TcRnTyFamsDisabled" = 'Just TyFamsDisabledReason ConRecursInto "TcRnDodgyImports" = 'Just DodgyImportsReason ConRecursInto "DodgyImportsHiding" = 'Just ImportLookupReason ConRecursInto "TcRnImportLookup" = 'Just ImportLookupReason ConRecursInto "TcRnUnusedImport" = 'Just UnusedImportReason ConRecursInto "TcRnNonCanonicalDefinition" = 'Just NonCanonicalDefinition ConRecursInto "TcRnIllegalInstance" = 'Just IllegalInstanceReason ConRecursInto "TcRnTypeApplicationsDisabled" = 'Just TypeApplication -- Illegal instance reasons ConRecursInto "IllegalClassInstance" = 'Just IllegalClassInstanceReason ConRecursInto "IllegalFamilyInstance" = 'Just IllegalFamilyInstanceReason -- Illegal class instance reasons ConRecursInto "IllegalInstanceHead" = 'Just IllegalInstanceHeadReason ConRecursInto "IllegalHasFieldInstance" = 'Just IllegalHasFieldInstance -- Illegal family instance reasons ConRecursInto "InvalidAssoc" = 'Just InvalidAssoc ConRecursInto "InvalidAssocInstance" = 'Just InvalidAssocInstance ConRecursInto "InvalidAssocDefault" = 'Just InvalidAssocDefault ConRecursInto "AssocDefaultBadArgs" = 'Just AssocDefaultBadArgs -- -- TH errors ConRecursInto "TcRnTHError" = 'Just THError ConRecursInto "THSyntaxError" = 'Just THSyntaxError ConRecursInto "THNameError" = 'Just THNameError ConRecursInto "THReifyError" = 'Just THReifyError ConRecursInto "TypedTHError" = 'Just TypedTHError ConRecursInto "THSpliceFailed" = 'Just SpliceFailReason ConRecursInto "RunSpliceFailure" = 'Just RunSpliceFailReason ConRecursInto "ConversionFail" = 'Just ConversionFailReason ConRecursInto "AddTopDeclsError" = 'Just AddTopDeclsError ConRecursInto "AddTopDeclsRunSpliceFailure" = 'Just RunSpliceFailReason -- Interface file errors ConRecursInto "TcRnInterfaceError" = 'Just IfaceMessage ConRecursInto "Can'tFindInterface" = 'Just MissingInterfaceError -- HsBoot and Hsig errors ConRecursInto "TcRnBootMismatch" = 'Just BootMismatch ConRecursInto "MissingBootThing" = 'Just MissingBootThing ConRecursInto "BootMismatch" = 'Just BootMismatchWhat -- Zonker errors ConRecursInto "TcRnZonkerMessage" = 'Just ZonkerMessage ------------------ -- FFI errors ConRecursInto "TcRnIllegalForeignType" = 'Just IllegalForeignTypeReason -- IllegalForeignTypeReason: recur into TypeCannotBeMarshaled for the reason ConRecursInto "TypeCannotBeMarshaled" = 'Just TypeCannotBeMarshaledReason ------------------ -- Solver reports -- Recur inside TcRnSolverReport to get the underlying TcSolverReportMsg ConRecursInto "TcRnSolverReport" = 'Just SolverReportWithCtxt ConRecursInto "SolverReportWithCtxt" = 'Just TcSolverReportMsg ConRecursInto "TcReportWithInfo" = 'Just TcSolverReportMsg -- Recur inside CannotUnifyVariable to get the underlying reason ConRecursInto "CannotUnifyVariable" = 'Just CannotUnifyVariableReason -- Recur inside Mismatch to get the underlying reason ConRecursInto "Mismatch" = 'Just MismatchMsg -- Recur inside empty statements groups to get the underlying statements block ConRecursInto "TcRnEmptyStmtsGroup" = 'Just EmptyStatementGroupErrReason ---------------------------------- -- Constructors of DsMessage ConRecursInto "DsUnknownMessage" = 'Just (UnknownDiagnostic NoDiagnosticOpts) ---------------------------------- -- Constructors of ImportLookupBad ConRecursInto "ImportLookupBad" = 'Just BadImportKind ConRecursInto "TcRnUnpromotableThing" = 'Just PromotionErr ---------------------------------- -- Any other constructors: don't recur, instead directly -- use the constructor name for the error code. ConRecursInto _ = 'Nothing {- ********************************************************************* * * Generics machinery * * ********************************************************************* -} {- Note [Diagnostic codes using generics] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Diagnostic codes are specified at the type-level using the injective type family 'GhcDiagnosticCode'. This ensures uniqueness of diagnostic codes, giving quick feedback (in the form of a type error). Using this type family, we need to obtain corresponding value-level functions, e.g. diagnosticCode :: TcRnMessage -> DiagnosticCode diagnosticCode diag = case diag of TcRnInaccessibleCode {} -> ghcDiagnosticCode 40564 TcRnTypeDoesNotHaveFixedRuntimeRep {} -> ghcDiagnosticCode 18478 TcRnCannotDeriveInstance _ _ _ _ reason -> case reason of DerivErrNotWellKinded {} -> ghcDiagnosticCode 62016 DerivErrNotAClass {} -> ghcDiagnosticCode 63388 ... ... For some constructors, such as 'TcRnInaccessibleCode', we directly get a diagnostic code, using the 'GhcDiagnosticCode' type family. For other constructors, such as 'TcRnCannotDeriveInstance', we instead recur into an argument (in this case 'DeriveInstanceErrReason') to obtain a diagnostic code. To achieve this, we use a variant of the 'typed' lens from 'generic-lens' (we only need a getter, not a setter): - Using GHC.Generics, we obtain the type-level structure of diagnostic types, as sums of products, with extra metadata. - The 'ConRecursInto' type family declares when we should recur into an argument of the constructor instead of using the constructor name itself for the diagnostic code. - To decide whether to recur, in the generic representation, we must look at all factors of a product to see if there is a type we should recur into. We look at the left branch first, and decide whether to recur into it using the HasTypeQ type family. - The two different behaviours are controlled by two main instances (*) and (**). - (*) recurses into a subtype, when we have a type family equation such as: ConRecursInto "TcRnCannotDeriveInstance" = 'Just DeriveInstanceErrReason In this case, for the constructor 'TcRnCannotDeriveInstance', we recur into the type 'DeriveInstanceErrReason'. The overlapping instance (ERR1) provides an error message in case a constructor does not have the type specified by the 'ConRecursInto' type family. - (**) directly uses the constructor name, by using the 'GhcDiagnosticCode' type family. The 'KnownConstructor' context (ERR2) on the instance provides a custom error message in case of a missing diagnostic code, which points GHC contributors to the documentation explaining how to add diagnostic codes for their diagnostics. -} -- | Use the generic representation of a type to retrieve the -- diagnostic code, using the 'GhcDiagnosticCode' type family. -- -- See Note [Diagnostic codes using generics] in GHC.Types.Error.Codes. type GDiagnosticCode :: (Type -> Type) -> Constraint class GDiagnosticCode f where gdiagnosticCode :: f a -> Maybe DiagnosticCode -- | Use the generic representation of a type to retrieve the collection -- of all diagnostic codes it can give rise to. type GDiagnosticCodes :: [Type] -> (Type -> Type) -> Constraint class GDiagnosticCodes seen f where gdiagnosticCodes :: Map DiagnosticCode String type ConstructorCode :: Symbol -> (Type -> Type) -> Maybe Type -> Constraint class ConstructorCode con f recur where gconstructorCode :: f a -> Maybe DiagnosticCode type ConstructorCodes :: Symbol -> (Type -> Type) -> [Type] -> Maybe Type -> Constraint class ConstructorCodes con f seen recur where gconstructorCodes :: Map DiagnosticCode String instance (KnownConstructor con, KnownSymbol con) => ConstructorCode con f 'Nothing where gconstructorCode _ = Just $ DiagnosticCode "GHC" $ natVal' @(GhcDiagnosticCode con) proxy# instance (KnownConstructor con, KnownSymbol con) => ConstructorCodes con f seen 'Nothing where gconstructorCodes = Map.singleton (DiagnosticCode "GHC" $ natVal' @(GhcDiagnosticCode con) proxy#) (symbolVal' @con proxy#) -- If we recur into the 'UnknownDiagnostic' existential datatype, -- unwrap the existential and obtain the error code. instance {-# OVERLAPPING #-} ( ConRecursInto con ~ 'Just (UnknownDiagnostic opts) , HasType (UnknownDiagnostic opts) con f ) => ConstructorCode con f ('Just (UnknownDiagnostic opts)) where gconstructorCode diag = case getType @(UnknownDiagnostic opts) @con @f diag of UnknownDiagnostic _ diag -> diagnosticCode diag instance {-# OVERLAPPING #-} ( ConRecursInto con ~ 'Just (UnknownDiagnostic opts) ) => ConstructorCodes con f seen ('Just (UnknownDiagnostic opts)) where gconstructorCodes = Map.empty -- (*) Recursive instance: Recur into the given type. instance ( ConRecursInto con ~ 'Just ty, HasType ty con f , Generic ty, GDiagnosticCode (Rep ty) ) => ConstructorCode con f ('Just ty) where gconstructorCode diag = gdiagnosticCode (from $ getType @ty @con @f diag) instance ( ConRecursInto con ~ 'Just ty, HasType ty con f , Generic ty, GDiagnosticCodes (Insert ty seen) (Rep ty) , Seen seen ty ) => ConstructorCodes con f seen ('Just ty) where gconstructorCodes = -- See Note [diagnosticCodes: don't recur into already-seen types] if wasSeen @seen @ty then Map.empty else gdiagnosticCodes @(Insert ty seen) @(Rep ty) -- (**) Constructor instance: handle constructors directly. -- -- Obtain the code from the 'GhcDiagnosticCode' -- type family, applied to the name of the constructor. instance (ConstructorCode con f recur, recur ~ ConRecursInto con, KnownSymbol con) => GDiagnosticCode (M1 i ('MetaCons con x y) f) where gdiagnosticCode (M1 x) = gconstructorCode @con @f @recur x instance (ConstructorCodes con f seen recur, recur ~ ConRecursInto con, KnownSymbol con) => GDiagnosticCodes seen (M1 i ('MetaCons con x y) f) where gdiagnosticCodes = gconstructorCodes @con @f @seen @recur -- Handle sum types (the diagnostic types are sums of constructors). instance (GDiagnosticCode f, GDiagnosticCode g) => GDiagnosticCode (f :+: g) where gdiagnosticCode (L1 x) = gdiagnosticCode @f x gdiagnosticCode (R1 y) = gdiagnosticCode @g y instance (GDiagnosticCodes seen f, GDiagnosticCodes seen g) => GDiagnosticCodes seen (f :+: g) where gdiagnosticCodes = Map.union (gdiagnosticCodes @seen @f) (gdiagnosticCodes @seen @g) -- Discard metadata we don't need. instance GDiagnosticCode f => GDiagnosticCode (M1 i ('MetaData nm mod pkg nt) f) where gdiagnosticCode (M1 x) = gdiagnosticCode @f x instance GDiagnosticCodes seen f => GDiagnosticCodes seen (M1 i ('MetaData nm mod pkg nt) f) where gdiagnosticCodes = gdiagnosticCodes @seen @f -- | Decide whether to pick the left or right branch -- when deciding how to recurse into a product. type family HasTypeQ (ty :: Type) f :: Maybe Type where HasTypeQ typ (M1 _ _ (K1 _ typ)) = 'Just typ HasTypeQ typ (M1 _ _ x) = HasTypeQ typ x HasTypeQ typ (l :*: r) = Alt (HasTypeQ typ l) (HasTypeQ typ r) HasTypeQ typ (l :+: r) = Both (HasTypeQ typ l) (HasTypeQ typ r) HasTypeQ typ (K1 _ _) = 'Nothing HasTypeQ typ U1 = 'Nothing HasTypeQ typ V1 = 'Nothing type family Both (m1 :: Maybe a) (m2 :: Maybe a) :: Maybe a where Both ('Just a) ('Just a) = 'Just a type family Alt (m1 :: Maybe a) (m2 :: Maybe a) :: Maybe a where Alt ('Just a) _ = 'Just a Alt _ b = b type HasType :: Type -> Symbol -> (Type -> Type) -> Constraint class HasType ty orig f where getType :: f a -> ty instance HasType ty orig (M1 i s (K1 x ty)) where getType (M1 (K1 x)) = x instance HasTypeProd ty (HasTypeQ ty f) orig f g => HasType ty orig (f :*: g) where getType = getTypeProd @ty @(HasTypeQ ty f) @orig -- The lr parameter tells us whether to pick the left or right -- branch in a product, and is computed using 'HasTypeQ'. -- -- If it's @Just l@, then we have found the type in the left branch, -- so use that. Otherwise, look in the right branch. class HasTypeProd ty lr orig f g where getTypeProd :: (f :*: g) a -> ty -- Pick the left branch. instance HasType ty orig f => HasTypeProd ty ('Just l) orig f g where getTypeProd (x :*: _) = getType @ty @orig @f x -- Pick the right branch. instance HasType ty orig g => HasTypeProd ty 'Nothing orig f g where getTypeProd (_ :*: y) = getType @ty @orig @g y {- Note [diagnosticCodes: don't recur into already-seen types] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When traversing through the Generic representation of a datatype to compute all of the corresponding error codes, we need to keep track of types we have already seen in order to avoid a runtime loop. For example, TcRnMessage is defined recursively in terms of itself: data TcRnMessage where ... TcRnMessageWithInfo :: !UnitState -> !TcRnMessageDetailed -- contains a TcRnMessage -> TcRnMessage If we naively computed the collection of error codes, we would get a computation of the form diagnosticCodes @TcRnMessage = ... `Map.union` constructorCodes "TcRnMessageWithInfo" constructorCodes "TcRnMessageWithInfo" = diagnosticCodes @TcRnMessage This would cause an infinite loop. We thus keep track of a list of types we have already encountered, and when we recur into a type we have already encountered, we simply skip taking that union (see (*)). Note that 'constructorCodes' starts by marking the initial type itself as "seen", which precisely avoids the loop above when calling 'constructorCodes @TcRnMessage'. -} type Seen :: [Type] -> Type -> Constraint class Seen seen ty where wasSeen :: Bool instance Seen '[] ty where wasSeen = False instance {-# OVERLAPPING #-} Seen (ty ': tys) ty where wasSeen = True instance Seen tys ty => Seen (ty' ': tys) ty where wasSeen = wasSeen @tys @ty type Insert :: Type -> [Type] -> [Type] type family Insert ty tys where Insert ty '[] = '[ty] Insert ty (ty ': tys) = ty ': tys Insert ty (ty' ': tys) = ty' ': Insert ty tys {- ********************************************************************* * * Custom type errors for diagnostic codes * * ********************************************************************* -} -- (ERR1) Improve error messages for recurring into an argument. instance {-# OVERLAPPABLE #-} TypeError ( 'Text "The constructor '" ':<>: 'Text orig ':<>: 'Text "'" ':$$: 'Text "does not have any argument of type '" ':<>: 'ShowType ty ':<>: 'Text "'." ':$$: 'Text "" ':$$: 'Text "This is likely due to an incorrect type family equation:" ':$$: 'Text " ConRecursInto \"" ':<>: 'Text orig ':<>: 'Text "\" = " ':<>: 'ShowType ty ) => HasType ty orig f where getType = panic "getType: unreachable" -- (ERR2) Improve error messages for missing 'GhcDiagnosticCode' equations. type KnownConstructor :: Symbol -> Constraint type family KnownConstructor con where KnownConstructor con = KnownNatOrErr ( TypeError ( 'Text "Missing diagnostic code for constructor " ':<>: 'Text "'" ':<>: 'Text con ':<>: 'Text "'." ':$$: 'Text "" ':$$: 'Text "Note [Diagnostic codes] in GHC.Types.Error.Codes" ':$$: 'Text "contains instructions for adding a new diagnostic code." ) ) (GhcDiagnosticCode con) type KnownNatOrErr :: Constraint -> Nat -> Constraint type KnownNatOrErr err n = (Assert err n, KnownNat n) -- Detecting a stuck type family using a data family. -- See https://blog.csongor.co.uk/report-stuck-families/. type Assert :: Constraint -> k -> Constraint type family Assert err n where Assert _ Dummy = Dummy Assert _ n = () data family Dummy :: k ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/FieldLabel.hs0000644000000000000000000001216207346545000021333 0ustar0000000000000000{-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE UndecidableInstances #-} {-# OPTIONS_GHC -Wno-orphans #-} -- Outputable FieldLabelString {- % % (c) Adam Gundry 2013-2015 % Note [FieldLabel] ~~~~~~~~~~~~~~~~~ This module defines the representation of FieldLabels as stored in TyCons. As well as a selector name, these have some extra structure to support the DuplicateRecordFields and NoFieldSelectors extensions. In the normal case (with NoDuplicateRecordFields and FieldSelectors), a datatype like data T = MkT { foo :: Int } has FieldLabel { flHasDuplicateRecordFields = NoDuplicateRecordFields , flHasFieldSelector = FieldSelectors , flSelector = foo }. If DuplicateRecordFields is enabled, however, the same declaration instead gives FieldLabel { flHasDuplicateRecordFields = DuplicateRecordFields , flHasFieldSelector = FieldSelectors , flSelector = foo }. We need to keep track of whether FieldSelectors or DuplicateRecordFields were enabled when a record field was defined, as they affect name resolution and shadowing of record fields, as explained in Note [NoFieldSelectors] in GHC.Types.Name.Reader and Note [Reporting duplicate local declarations] in GHC.Rename.Names. -} module GHC.Types.FieldLabel ( FieldLabelEnv , FieldLabel(..), flLabel , DuplicateRecordFields(..) , FieldSelectors(..) , flIsOverloaded ) where import GHC.Prelude import {-# SOURCE #-} GHC.Types.Name import GHC.Data.FastString.Env import GHC.Types.Unique (Uniquable(..)) import GHC.Utils.Outputable import GHC.Utils.Binary import Language.Haskell.Syntax.Basic (FieldLabelString(..)) import Control.DeepSeq import Data.Bool import Data.Data -- | A map from labels to all the auxiliary information type FieldLabelEnv = DFastStringEnv FieldLabel -- | Fields in an algebraic record type; see Note [FieldLabel]. data FieldLabel = FieldLabel { flHasDuplicateRecordFields :: DuplicateRecordFields, -- ^ Was @DuplicateRecordFields@ on in the defining module for this datatype? flHasFieldSelector :: FieldSelectors, -- ^ Was @FieldSelectors@ enabled in the defining module for this datatype? -- See Note [NoFieldSelectors] in GHC.Rename.Env flSelector :: Name -- ^ The 'Name' of the selector function, which uniquely identifies -- the field label. } deriving (Data, Eq) -- | User-visible label of a field. flLabel :: FieldLabel -> FieldLabelString flLabel = FieldLabelString . occNameFS . nameOccName . flSelector instance HasOccName FieldLabel where occName = nameOccName . flSelector instance Outputable FieldLabel where ppr fl = ppr (flLabel fl) <> whenPprDebug (braces (ppr (flSelector fl)) <> ppr (flHasDuplicateRecordFields fl) <> ppr (flHasFieldSelector fl)) instance Outputable FieldLabelString where ppr (FieldLabelString l) = ppr l instance Uniquable FieldLabelString where getUnique (FieldLabelString fs) = getUnique fs -- | Flag to indicate whether the DuplicateRecordFields extension is enabled. data DuplicateRecordFields = DuplicateRecordFields -- ^ Fields may be duplicated in a single module | NoDuplicateRecordFields -- ^ Fields must be unique within a module (the default) deriving (Show, Eq, Data) instance Binary DuplicateRecordFields where put_ bh f = put_ bh (f == DuplicateRecordFields) get bh = bool NoDuplicateRecordFields DuplicateRecordFields <$> get bh instance Outputable DuplicateRecordFields where ppr DuplicateRecordFields = text "+dup" ppr NoDuplicateRecordFields = text "-dup" instance NFData DuplicateRecordFields where rnf DuplicateRecordFields = () rnf NoDuplicateRecordFields = () -- | Flag to indicate whether the FieldSelectors extension is enabled. data FieldSelectors = FieldSelectors -- ^ Selector functions are available (the default) | NoFieldSelectors -- ^ Selector functions are not available deriving (Show, Eq, Data) instance Binary FieldSelectors where put_ bh f = put_ bh (f == FieldSelectors) get bh = bool NoFieldSelectors FieldSelectors <$> get bh instance Outputable FieldSelectors where ppr FieldSelectors = text "+sel" ppr NoFieldSelectors = text "-sel" instance NFData FieldSelectors where rnf FieldSelectors = () rnf NoFieldSelectors = () -- | We need the @Binary Name@ constraint here even though there is an instance -- defined in "GHC.Types.Name", because the we have a SOURCE import, so the -- instance is not in scope. And the instance cannot be added to Name.hs-boot -- because "GHC.Utils.Binary" itself depends on "GHC.Types.Name". instance Binary Name => Binary FieldLabel where put_ bh (FieldLabel aa ab ac) = do put_ bh aa put_ bh ab put_ bh ac get bh = do aa <- get bh ab <- get bh ac <- get bh return (FieldLabel aa ab ac) flIsOverloaded :: FieldLabel -> Bool flIsOverloaded fl = flHasDuplicateRecordFields fl == DuplicateRecordFields || flHasFieldSelector fl == NoFieldSelectors ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/Fixity.hs0000644000000000000000000000313607346545000020625 0ustar0000000000000000{-# LANGUAGE DeriveDataTypeable #-} {-# OPTIONS_GHC -Wno-dodgy-exports #-} -- For re-export of GHC.Hs.Basic instances -- | Fixity module GHC.Types.Fixity ( Fixity (..) , FixityDirection (..) , LexicalFixity (..) , maxPrecedence , minPrecedence , defaultFixity , negateFixity , funTyFixity , compareFixity , module GHC.Hs.Basic ) where import GHC.Prelude import Language.Haskell.Syntax.Basic (LexicalFixity(..), FixityDirection(..), Fixity(..) ) import GHC.Hs.Basic () -- For instances only ------------------------ maxPrecedence, minPrecedence :: Int maxPrecedence = 9 minPrecedence = 0 defaultFixity :: Fixity defaultFixity = Fixity maxPrecedence InfixL negateFixity, funTyFixity :: Fixity -- Wired-in fixities negateFixity = Fixity 6 InfixL -- Fixity of unary negate funTyFixity = Fixity (-1) InfixR -- Fixity of '->', see #15235 {- Consider \begin{verbatim} a `op1` b `op2` c \end{verbatim} @(compareFixity op1 op2)@ tells which way to arrange application, or whether there's an error. -} compareFixity :: Fixity -> Fixity -> (Bool, -- Error please Bool) -- Associate to the right: a op1 (b op2 c) compareFixity (Fixity prec1 dir1) (Fixity prec2 dir2) = case prec1 `compare` prec2 of GT -> left LT -> right EQ -> case (dir1, dir2) of (InfixR, InfixR) -> right (InfixL, InfixL) -> left _ -> error_please where right = (False, True) left = (False, False) error_please = (True, False) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/Fixity/0000755000000000000000000000000007346545000020266 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/Fixity/Env.hs0000644000000000000000000000221407346545000021351 0ustar0000000000000000module GHC.Types.Fixity.Env ( FixityEnv , FixItem (..) , emptyFixityEnv , lookupFixity , mkIfaceFixCache , emptyIfaceFixCache ) where import GHC.Prelude import GHC.Types.Fixity import GHC.Types.Name import GHC.Types.Name.Env import GHC.Utils.Outputable -- | Fixity environment mapping names to their fixities type FixityEnv = NameEnv FixItem -- | Fixity information for an 'Name'. We keep the OccName in the range -- so that we can generate an interface from it data FixItem = FixItem OccName Fixity instance Outputable FixItem where ppr (FixItem occ fix) = ppr fix <+> ppr occ emptyFixityEnv :: FixityEnv emptyFixityEnv = emptyNameEnv lookupFixity :: FixityEnv -> Name -> Fixity lookupFixity env n = case lookupNameEnv env n of Just (FixItem _ fix) -> fix Nothing -> defaultFixity -- | Creates cached lookup for the 'mi_fix_fn' field of 'ModIface' mkIfaceFixCache :: [(OccName, Fixity)] -> OccName -> Maybe Fixity mkIfaceFixCache pairs = \n -> lookupOccEnv env n where env = mkOccEnv pairs emptyIfaceFixCache :: OccName -> Maybe Fixity emptyIfaceFixCache _ = Nothing ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/ForeignCall.hs0000644000000000000000000002647407346545000021550 0ustar0000000000000000{- (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 \section[Foreign]{Foreign calls} -} {-# LANGUAGE DeriveDataTypeable #-} module GHC.Types.ForeignCall ( ForeignCall(..), isSafeForeignCall, Safety(..), playSafe, playInterruptible, CExportSpec(..), CLabelString, isCLabelString, pprCLabelString, CCallSpec(..), CCallTarget(..), isDynamicTarget, CCallConv(..), defaultCCallConv, ccallConvAttribute, Header(..), CType(..), ) where import GHC.Prelude import GHC.Data.FastString import GHC.Utils.Binary import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Unit.Module import GHC.Types.SourceText ( SourceText, pprWithSourceText ) import Data.Char import Data.Data {- ************************************************************************ * * \subsubsection{Data types} * * ************************************************************************ -} newtype ForeignCall = CCall CCallSpec deriving Eq isSafeForeignCall :: ForeignCall -> Bool isSafeForeignCall (CCall (CCallSpec _ _ safe)) = playSafe safe -- We may need more clues to distinguish foreign calls -- but this simple printer will do for now instance Outputable ForeignCall where ppr (CCall cc) = ppr cc data Safety = PlaySafe -- ^ Might invoke Haskell GC, or do a call back, or -- switch threads, etc. So make sure things are -- tidy before the call. Additionally, in the threaded -- RTS we arrange for the external call to be executed -- by a separate OS thread, i.e., _concurrently_ to the -- execution of other Haskell threads. | PlayInterruptible -- ^ Like PlaySafe, but additionally -- the worker thread running this foreign call may -- be unceremoniously killed, so it must be scheduled -- on an unbound thread. | PlayRisky -- ^ None of the above can happen; the call will return -- without interacting with the runtime system at all. -- Specifically: -- -- * No GC -- * No call backs -- * No blocking -- * No precise exceptions -- deriving ( Eq, Show, Data, Enum ) -- Show used just for Show Lex.Token, I think instance Outputable Safety where ppr PlaySafe = text "safe" ppr PlayInterruptible = text "interruptible" ppr PlayRisky = text "unsafe" playSafe :: Safety -> Bool playSafe PlaySafe = True playSafe PlayInterruptible = True playSafe PlayRisky = False playInterruptible :: Safety -> Bool playInterruptible PlayInterruptible = True playInterruptible _ = False {- ************************************************************************ * * \subsubsection{Calling C} * * ************************************************************************ -} data CExportSpec = CExportStatic -- foreign export ccall foo :: ty SourceText -- of the CLabelString. -- See Note [Pragma source text] in "GHC.Types.SourceText" CLabelString -- C Name of exported function CCallConv deriving Data data CCallSpec = CCallSpec CCallTarget -- What to call CCallConv -- Calling convention to use. Safety deriving( Eq ) -- The call target: -- | How to call a particular function in C-land. data CCallTarget -- An "unboxed" ccall# to named function in a particular package. = StaticTarget SourceText -- of the CLabelString. -- See Note [Pragma source text] in "GHC.Types.SourceText" CLabelString -- C-land name of label. (Maybe Unit) -- What package the function is in. -- If Nothing, then it's taken to be in the current package. -- Note: This information is only used for PrimCalls on Windows. -- See CLabel.labelDynamic and CoreToStg.coreToStgApp -- for the difference in representation between PrimCalls -- and ForeignCalls. If the CCallTarget is representing -- a regular ForeignCall then it's safe to set this to Nothing. -- The first argument of the import is the name of a function pointer (an Addr#). -- Used when importing a label as "foreign import ccall "dynamic" ..." Bool -- True => really a function -- False => a value; only -- allowed in CAPI imports | DynamicTarget deriving( Eq, Data ) isDynamicTarget :: CCallTarget -> Bool isDynamicTarget DynamicTarget = True isDynamicTarget _ = False {- Stuff to do with calling convention: ccall: Caller allocates parameters, *and* deallocates them. See: http://www.programmersheaven.com/2/Calling-conventions -} -- any changes here should be replicated in the Callconv type in template haskell data CCallConv = CCallConv | CApiConv | StdCallConv | PrimCallConv | JavaScriptCallConv deriving (Eq, Data, Enum) instance Outputable CCallConv where ppr StdCallConv = text "stdcall" ppr CCallConv = text "ccall" ppr CApiConv = text "capi" ppr PrimCallConv = text "prim" ppr JavaScriptCallConv = text "javascript" defaultCCallConv :: CCallConv defaultCCallConv = CCallConv {- Generate the gcc attribute corresponding to the given calling convention (used by PprAbsC): -} ccallConvAttribute :: CCallConv -> SDoc ccallConvAttribute StdCallConv = panic "ccallConvAttribute StdCallConv" ccallConvAttribute CCallConv = empty ccallConvAttribute CApiConv = empty ccallConvAttribute (PrimCallConv {}) = panic "ccallConvAttribute PrimCallConv" ccallConvAttribute JavaScriptCallConv = empty type CLabelString = FastString -- A C label, completely unencoded pprCLabelString :: CLabelString -> SDoc pprCLabelString lbl = ftext lbl isCLabelString :: CLabelString -> Bool -- Checks to see if this is a valid C label isCLabelString lbl = all ok (unpackFS lbl) where ok c = isAlphaNum c || c == '_' || c == '.' || c == '@' -- The '.' appears in e.g. "foo.so" in the -- module part of a ExtName. Maybe it should be separate -- Printing into C files: instance Outputable CExportSpec where ppr (CExportStatic _ str _) = pprCLabelString str instance Outputable CCallSpec where ppr (CCallSpec fun cconv safety) = hcat [ whenPprDebug callconv, ppr_fun fun, text " ::" ] where callconv = text "{-" <> ppr cconv <> text "-}" gc_suf | playSafe safety = text "_safe" | otherwise = text "_unsafe" ppr_fun (StaticTarget st lbl mPkgId isFun) = (if isFun then text "__ffi_static_ccall" else text "__ffi_static_ccall_value") <> gc_suf <+> (case mPkgId of Nothing -> empty Just pkgId -> ppr pkgId) <> text ":" <> ppr lbl <+> (pprWithSourceText st empty) ppr_fun DynamicTarget = text "__ffi_dyn_ccall" <> gc_suf <+> text "\"\"" -- The filename for a C header file -- See Note [Pragma source text] in "GHC.Types.SourceText" data Header = Header SourceText FastString deriving (Eq, Data) instance Outputable Header where ppr (Header st h) = pprWithSourceText st (doubleQuotes $ ppr h) -- | A C type, used in CAPI FFI calls data CType = CType SourceText -- See Note [Pragma source text] in "GHC.Types.SourceText" (Maybe Header) -- header to include for this type (SourceText,FastString) -- the type itself deriving (Eq, Data) instance Outputable CType where ppr (CType stp mh (stct,ct)) = pprWithSourceText stp (text "{-# CTYPE") <+> hDoc <+> pprWithSourceText stct (doubleQuotes (ftext ct)) <+> text "#-}" where hDoc = case mh of Nothing -> empty Just h -> ppr h {- ************************************************************************ * * \subsubsection{Misc} * * ************************************************************************ -} instance Binary ForeignCall where put_ bh (CCall aa) = put_ bh aa get bh = do aa <- get bh; return (CCall aa) instance Binary Safety where put_ bh PlaySafe = putByte bh 0 put_ bh PlayInterruptible = putByte bh 1 put_ bh PlayRisky = putByte bh 2 get bh = do h <- getByte bh case h of 0 -> return PlaySafe 1 -> return PlayInterruptible _ -> return PlayRisky instance Binary CExportSpec where put_ bh (CExportStatic ss aa ab) = do put_ bh ss put_ bh aa put_ bh ab get bh = do ss <- get bh aa <- get bh ab <- get bh return (CExportStatic ss aa ab) instance Binary CCallSpec where put_ bh (CCallSpec aa ab ac) = do put_ bh aa put_ bh ab put_ bh ac get bh = do aa <- get bh ab <- get bh ac <- get bh return (CCallSpec aa ab ac) instance Binary CCallTarget where put_ bh (StaticTarget ss aa ab ac) = do putByte bh 0 put_ bh ss put_ bh aa put_ bh ab put_ bh ac put_ bh DynamicTarget = putByte bh 1 get bh = do h <- getByte bh case h of 0 -> do ss <- get bh aa <- get bh ab <- get bh ac <- get bh return (StaticTarget ss aa ab ac) _ -> return DynamicTarget instance Binary CCallConv where put_ bh CCallConv = putByte bh 0 put_ bh StdCallConv = putByte bh 1 put_ bh PrimCallConv = putByte bh 2 put_ bh CApiConv = putByte bh 3 put_ bh JavaScriptCallConv = putByte bh 4 get bh = do h <- getByte bh case h of 0 -> return CCallConv 1 -> return StdCallConv 2 -> return PrimCallConv 3 -> return CApiConv _ -> return JavaScriptCallConv instance Binary CType where put_ bh (CType s mh fs) = do put_ bh s put_ bh mh put_ bh fs get bh = do s <- get bh mh <- get bh fs <- get bh return (CType s mh fs) instance Binary Header where put_ bh (Header s h) = put_ bh s >> put_ bh h get bh = do s <- get bh h <- get bh return (Header s h) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/ForeignStubs.hs0000644000000000000000000000546107346545000021766 0ustar0000000000000000-- | Foreign export stubs {-# LANGUAGE DerivingVia #-} {-# LANGUAGE TypeApplications #-} module GHC.Types.ForeignStubs ( ForeignStubs (..) , CHeader(..) , CStub(..) , initializerCStub , finalizerCStub , appendStubC ) where import {-# SOURCE #-} GHC.Cmm.CLabel import GHC.Platform import GHC.Utils.Outputable import Data.List ((++)) import Data.Monoid import Data.Semigroup import Data.Coerce data CStub = CStub { getCStub :: SDoc , getInitializers :: [CLabel] -- ^ Initializers to be run at startup -- See Note [Initializers and finalizers in Cmm] in -- "GHC.Cmm.InitFini". , getFinalizers :: [CLabel] -- ^ Finalizers to be run at shutdown } emptyCStub :: CStub emptyCStub = CStub empty [] [] instance Monoid CStub where mempty = emptyCStub instance Semigroup CStub where CStub a0 b0 c0 <> CStub a1 b1 c1 = CStub (a0 $$ a1) (b0 ++ b1) (c0 ++ c1) functionCStub :: Platform -> CLabel -> SDoc -> SDoc -> CStub functionCStub platform clbl declarations body = CStub body' [] [] where body' = vcat [ declarations , hsep [text "void", pprCLabel platform clbl, text "(void)"] , braces body ] -- | @initializerCStub fn_nm decls body@ is a 'CStub' containing C initializer -- function (e.g. an entry of the @.init_array@ section) named -- @fn_nm@ with the given body and the given set of declarations. initializerCStub :: Platform -> CLabel -> SDoc -> SDoc -> CStub initializerCStub platform clbl declarations body = functionCStub platform clbl declarations body `mappend` CStub empty [clbl] [] -- | @finalizerCStub fn_nm decls body@ is a 'CStub' containing C finalizer -- function (e.g. an entry of the @.fini_array@ section) named -- @fn_nm@ with the given body and the given set of declarations. finalizerCStub :: Platform -> CLabel -> SDoc -> SDoc -> CStub finalizerCStub platform clbl declarations body = functionCStub platform clbl declarations body `mappend` CStub empty [] [clbl] newtype CHeader = CHeader { getCHeader :: SDoc } instance Monoid CHeader where mempty = CHeader empty mconcat = coerce (vcat @SDoc) instance Semigroup CHeader where (<>) = coerce (($$) @SDoc) -- | Foreign export stubs data ForeignStubs = NoStubs -- ^ We don't have any stubs | ForeignStubs CHeader CStub -- ^ There are some stubs. Parameters: -- -- 1) Header file prototypes for -- "foreign exported" functions -- -- 2) C stubs to use when calling -- "foreign exported" functions appendStubC :: ForeignStubs -> CStub -> ForeignStubs appendStubC NoStubs c_code = ForeignStubs mempty c_code appendStubC (ForeignStubs h c) c_code = ForeignStubs h (c `mappend` c_code) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/GREInfo.hs0000644000000000000000000003175207346545000020607 0ustar0000000000000000{-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE DerivingStrategies #-} -- | Renamer-level information about 'Name's. -- -- Renamer equivalent of 'TyThing'. module GHC.Types.GREInfo where import GHC.Prelude import GHC.Types.Basic import GHC.Types.FieldLabel import GHC.Types.Name import GHC.Types.Unique import GHC.Types.Unique.Set import GHC.Utils.Outputable import GHC.Utils.Panic import Control.DeepSeq ( NFData(..), deepseq ) import Data.Data ( Data ) import Data.List.NonEmpty ( NonEmpty ) import qualified Data.List.NonEmpty as NonEmpty {-********************************************************************** * * GREInfo * * ************************************************************************ Note [GREInfo] ~~~~~~~~~~~~~~ In the renamer, we sometimes need a bit more information about a 'Name', e.g. whether it is a type constructor, class, data constructor, record field, etc. For example, when typechecking record construction, the renamer needs to look up the fields of the data constructor being used (see e.g. GHC.Rename.Pat.rnHsRecFields). Extra information also allows us to provide better error messages when a fatal error occurs in the renamer, as it allows us to distinguish classes, type families, type synonyms, etc. For imported Names, we have access to the full type information in the form of a TyThing (although see Note [Retrieving the GREInfo from interfaces]). However, for Names in the module currently being renamed, we don't yet have full information. Instead of using TyThing, we use the GREInfo type, and this information gets affixed to each element in the GlobalRdrEnv. This allows us to treat imported and local Names in a consistent manner: always look at the GREInfo. Note [Retrieving the GREInfo from interfaces] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If we have a TyThing, we can easily compute the corresponding GREInfo: this is done in GHC.Types.TyThing.tyThingGREInfo. However, one often needs to produce GlobalRdrElts (and thus their GREInfos) directly after loading interface files, before they are typechecked. For example: - GHC.Tc.Module.tcRnModuleTcRnM first calls tcRnImports, which starts off calling rnImports which transitively calls filterImports. That function is responsible for coughing up GlobalRdrElts (and their GREInfos) obtained from interfaces, but we will only typecheck the interfaces after we have finished processing the imports (see e.g. the logic at the start of tcRnImports which sets eps_is_boot, which decides whether we should look in the boot or non-boot interface for any particular module). - GHC.Tc.Utils.Backpack.mergeSignatures first loads the relevant signature interfaces to merge them, but only later on does it typecheck them. In both of these examples, what's important is that we **lazily** produce the GREInfo: it should only be consulted once the interfaces have been typechecked, which will add the necessary information to the type-level environment. In particular, the respective functions 'filterImports' and 'mergeSignatures' should NOT force the gre_info field. We delay the loading of interfaces by making the gre_info field of 'GlobalRdrElt' a thunk which, when forced, loads the interface, looks up the 'Name' in the type environment to get its associated TyThing, and computes the GREInfo from that. See 'GHC.Rename.Env.lookupGREInfo'. A possible alternative design would be to change the AvailInfo datatype to also store GREInfo. We currently don't do that, as this would mean that every time an interface re-exports something it has to also provide its GREInfo, which could lead to bloat. Note [Forcing GREInfo] ~~~~~~~~~~~~~~~~~~~~~~ The GREInfo field of a GlobalRdrElt needs to be lazy, as explained in Note [Retrieving the GREInfo from interfaces]. For imported things, this field is usually a thunk which looks up the GREInfo in a type environment (see GHC.Rename.Env.lookupGREInfo). We thus need to be careful not to introduce space leaks: such thunks could end up retaining old type environments, which would violate invariant (5) of Note [GHC Heap Invariants] in GHC.Driver.Make. This can happen, for example, when reloading in GHCi (see e.g. test T15369, which can trigger the ghci leak check if we're not careful). A naive approach is to simply deeply force the whole GlobalRdrEnv. However, forcing the GREInfo thunks can force the loading of interface files which we otherwise might not need to load, so it leads to wasted work. Instead, whenever we are about to store the GlobalRdrEnv somewhere (such as in ModDetails), we dehydrate it by stripping away the GREInfo field, turning it into (). See 'forceGlobalRdrEnv' and its cousin 'hydrateGlobalRdrEnv', as well as Note [IfGlobalRdrEnv] in GHC.Types.Name.Reader. Search for references to this note in the code for illustration. -} -- | Information about a 'Name' that is pertinent to the renamer. -- -- See Note [GREInfo] data GREInfo -- | No particular information... e.g. a function = Vanilla -- | An unbound GRE... could be anything | UnboundGRE -- | 'TyCon' | IAmTyCon !(TyConFlavour Name) -- | 'ConLike' | IAmConLike !ConInfo -- ^ The constructor fields. -- See Note [Local constructor info in the renamer]. -- | Record field | IAmRecField !RecFieldInfo deriving Data instance NFData GREInfo where rnf Vanilla = () rnf UnboundGRE = () rnf (IAmTyCon tc) = rnf tc rnf (IAmConLike info) = rnf info rnf (IAmRecField info) = rnf info plusGREInfo :: GREInfo -> GREInfo -> GREInfo plusGREInfo Vanilla Vanilla = Vanilla plusGREInfo UnboundGRE UnboundGRE = UnboundGRE plusGREInfo (IAmTyCon {}) info2@(IAmTyCon {}) = info2 plusGREInfo (IAmConLike {}) info2@(IAmConLike {}) = info2 plusGREInfo (IAmRecField {}) info2@(IAmRecField {}) = info2 plusGREInfo info1 info2 = pprPanic "plusInfo" $ vcat [ text "info1:" <+> ppr info1 , text "info2:" <+> ppr info2 ] instance Outputable GREInfo where ppr Vanilla = text "Vanilla" ppr UnboundGRE = text "UnboundGRE" ppr (IAmTyCon flav) = text "TyCon" <+> ppr flav ppr (IAmConLike info) = text "ConLike" <+> ppr info ppr (IAmRecField info) = text "RecField" <+> ppr info {-********************************************************************** * * Constructor info * * ************************************************************************ Note [Local constructor info in the renamer] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ As explained in Note [GREInfo], information pertinent to the renamer is stored using the GREInfo datatype. What information do we need about constructors? Consider the following example: data T = T1 { x, y :: Int } | T2 { x :: Int } | T3 | T4 Int Bool We need to know: * The fields of the data constructor, so that - We can complain if you say `T1 { v = 3 }`, where `v` is not a field of `T1` See the following call stack * GHC.Rename.Expr.rnExpr (RecordCon case) * GHC.Rename.Pat.rnHsRecFields * GHC.Rename.Env.lookupRecFieldOcc - Ditto if you pattern match on `T1 { v = x }`. See the following call stack * GHC.Rename.Pat.rnHsRecPatsAndThen * GHC.Rename.Pat.rnHsRecFields * GHC.Rename.Env.lookupRecFieldOcc - We can fill in the dots if you say `T1 {..}` in construction or pattern matching See GHC.Rename.Pat.rnHsRecFields.rn_dotdot This information is stored in ConFieldInfo. * Whether the constructor is nullary. We need to know this to accept `T2 {..}`, and `T3 {..}`, but reject `T4 {..}`, in both construction and pattern matching. See GHC.Rename.Pat.rnHsRecFields.rn_dotdot and Note [Nullary constructors and empty record wildcards] This information is stored in ConFieldInfo. * Whether the constructor is a data constructor or a pattern synonym, and, if it is a data constructor, what are the other data constructors of the parent type. This is used for computing irrefutability of pattern matches when deciding how to desugar do blocks (whether to use a fail operation). See GHC.Hs.Pat.isIrrefutableHsPat. This information is stored in ConLikeInfo. Note [Nullary constructors and empty record wildcards] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A nullary constructor is one with no arguments. For example, both `data T = MkT` and `data T = MkT {}` are nullary. For consistency and TH convenience, it was agreed that a `{..}` match or usage on nullary constructors would be accepted. This is done as as per https://github.com/ghc-proposals/ghc-proposals/blob/master/proposals/0496-empty-record-wildcards.rst -} -- | Information known to the renamer about a data constructor or pattern synonym. -- -- See Note [Local constructor info in the renamer]. data ConInfo = ConInfo { conLikeInfo :: !ConLikeInfo , conFieldInfo :: !ConFieldInfo } deriving stock Eq deriving Data -- | Whether a constructor is a data constructor or a pattern synonym. -- -- See Note [Local constructor info in the renamer]. data ConLikeInfo = ConIsData { conLikeDataCons :: [Name] -- ^ All the 'DataCon's of the parent 'TyCon', -- including the 'ConLike' itself. -- -- Used in 'GHC.Hs.Pat.isIrrefutableHsPat'. } | ConIsPatSyn deriving stock Eq deriving Data instance NFData ConInfo where rnf (ConInfo a b) = rnf a `seq` rnf b instance NFData ConLikeInfo where rnf (ConIsData a) = rnf a rnf ConIsPatSyn = () -- | Information about the record fields of a constructor. -- -- See Note [Local constructor info in the renamer] data ConFieldInfo = ConHasRecordFields (NonEmpty FieldLabel) | ConHasPositionalArgs | ConIsNullary deriving stock Eq deriving Data instance NFData ConFieldInfo where rnf ConIsNullary = () rnf ConHasPositionalArgs = () rnf (ConHasRecordFields flds) = rnf flds mkConInfo :: ConLikeInfo -> Arity -> [FieldLabel] -> ConInfo mkConInfo con_ty n flds = ConInfo { conLikeInfo = con_ty , conFieldInfo = mkConFieldInfo n flds } mkConFieldInfo :: Arity -> [FieldLabel] -> ConFieldInfo mkConFieldInfo 0 _ = ConIsNullary mkConFieldInfo _ fields = maybe ConHasPositionalArgs ConHasRecordFields $ NonEmpty.nonEmpty fields conInfoFields :: ConInfo -> [FieldLabel] conInfoFields = conFieldInfoFields . conFieldInfo conFieldInfoFields :: ConFieldInfo -> [FieldLabel] conFieldInfoFields (ConHasRecordFields fields) = NonEmpty.toList fields conFieldInfoFields ConHasPositionalArgs = [] conFieldInfoFields ConIsNullary = [] instance Outputable ConInfo where ppr (ConInfo { conLikeInfo = con_ty, conFieldInfo = fld_info }) = text "ConInfo" <+> braces (text "con_ty:" <+> ppr con_ty <> comma <+> text "fields:" <+> ppr fld_info) instance Outputable ConLikeInfo where ppr (ConIsData cons) = text "ConIsData" <+> parens (ppr cons) ppr ConIsPatSyn = text "ConIsPatSyn" instance Outputable ConFieldInfo where ppr ConIsNullary = text "ConIsNullary" ppr ConHasPositionalArgs = text "ConHasPositionalArgs" ppr (ConHasRecordFields fieldLabels) = text "ConHasRecordFields" <+> braces (ppr fieldLabels) -- | The 'Name' of a 'ConLike'. -- -- Useful when we are in the renamer and don't yet have a full 'DataCon' or -- 'PatSyn' to hand. data ConLikeName = DataConName { conLikeName_Name :: !Name } | PatSynName { conLikeName_Name :: !Name } deriving (Eq, Data) instance Outputable ConLikeName where ppr = ppr . conLikeName_Name instance OutputableBndr ConLikeName where pprInfixOcc con = pprInfixName (conLikeName_Name con) pprPrefixOcc con = pprPrefixName (conLikeName_Name con) instance Uniquable ConLikeName where getUnique = getUnique . conLikeName_Name instance NFData ConLikeName where rnf = rnf . conLikeName_Name {-********************************************************************** * * Record field info * * **********************************************************************-} data RecFieldInfo = RecFieldInfo { recFieldLabel :: !FieldLabel , recFieldCons :: !(UniqSet ConLikeName) -- ^ The constructors which have this field label. -- Always non-empty. -- -- NB: these constructors will always share a single parent, -- as the field label disambiguates between parents in the presence -- of duplicate record fields. } deriving (Eq, Data) instance NFData RecFieldInfo where rnf (RecFieldInfo lbl cons) = rnf lbl `seq` nonDetStrictFoldUniqSet deepseq () cons instance Outputable RecFieldInfo where ppr (RecFieldInfo { recFieldLabel = fl, recFieldCons = cons }) = text "RecFieldInfo" <+> braces (text "recFieldLabel:" <+> ppr fl <> comma <+> text "recFieldCons:" <+> pprWithCommas ppr (nonDetEltsUniqSet cons)) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/Hint.hs0000644000000000000000000006214107346545000020254 0ustar0000000000000000{-# LANGUAGE ExistentialQuantification #-} module GHC.Types.Hint ( GhcHint(..) , AvailableBindings(..) , InstantiationSuggestion(..) , LanguageExtensionHint(..) , ImportSuggestion(..) , HowInScope(..) , SimilarName(..) , StarIsType(..) , UntickedPromotedThing(..) , AssumedDerivingStrategy(..) , pprUntickedConstructor, isBareSymbol , suggestExtension , suggestExtensionWithInfo , suggestExtensions , suggestExtensionsWithInfo , suggestAnyExtension , suggestAnyExtensionWithInfo , useExtensionInOrderTo , noStarIsTypeHints ) where import Language.Haskell.Syntax.Expr (LHsExpr) import Language.Haskell.Syntax (LPat, LIdP, LHsSigType, LHsSigWcType) import GHC.Prelude import qualified Data.List.NonEmpty as NE import qualified GHC.LanguageExtensions as LangExt import GHC.Unit.Module (ModuleName, Module) import GHC.Unit.Module.Imported (ImportedModsVal) import GHC.Hs.Extension (GhcTc, GhcRn) import GHC.Core.Coercion import GHC.Core.FamInstEnv (FamFlavor) import GHC.Core.TyCon (TyCon) import GHC.Core.Type (Type) import GHC.Types.Fixity (LexicalFixity(..)) import GHC.Types.Name (Name, NameSpace, OccName (occNameFS), isSymOcc, nameOccName) import GHC.Types.Name.Reader (RdrName (Unqual), ImpDeclSpec) import GHC.Types.SrcLoc (SrcSpan) import GHC.Types.Basic (Activation, RuleName) import GHC.Parser.Errors.Basic import GHC.Utils.Outputable import GHC.Data.FastString (fsLit, FastString) import Data.Typeable ( Typeable ) import Data.Map.Strict (Map) -- | The bindings we have available in scope when -- suggesting an explicit type signature. data AvailableBindings = NamedBindings (NE.NonEmpty Name) | UnnamedBinding -- ^ An unknown binding (i.e. too complicated to turn into a 'Name') data LanguageExtensionHint = -- | Suggest to enable the input extension. This is the hint that -- GHC emits if this is not a \"known\" fix, i.e. this is GHC giving -- its best guess on what extension might be necessary to make a -- certain program compile. For example, GHC might suggests to -- enable 'BlockArguments' when the user simply formatted incorrectly -- the input program, so GHC here is trying to be as helpful as -- possible. -- If the input 'SDoc' is not empty, it will contain some extra -- information about the why the extension is required, but -- it's totally irrelevant/redundant for IDEs and other tools. SuggestSingleExtension !SDoc !LangExt.Extension -- | Suggest to enable the input extensions. The list -- is to be intended as /disjunctive/ i.e. the user is -- suggested to enable /any/ of the extensions listed. If -- the input 'SDoc' is not empty, it will contain some extra -- information about the why the extensions are required, but -- it's totally irrelevant/redundant for IDEs and other tools. | SuggestAnyExtension !SDoc [LangExt.Extension] -- | Suggest to enable the input extensions. The list -- is to be intended as /conjunctive/ i.e. the user is -- suggested to enable /all/ the extensions listed. If -- the input 'SDoc' is not empty, it will contain some extra -- information about the why the extensions are required, but -- it's totally irrelevant/redundant for IDEs and other tools. | SuggestExtensions !SDoc [LangExt.Extension] -- | Suggest to enable the input extension in order to fix -- a certain problem. This is the suggestion that GHC emits when -- is more-or-less clear \"what's going on\". For example, if -- both 'DeriveAnyClass' and 'GeneralizedNewtypeDeriving' are -- turned on, the right thing to do is to enabled 'DerivingStrategies', -- so in contrast to 'SuggestSingleExtension' GHC will be a bit more -- \"imperative\" (i.e. \"Use X Y Z in order to ... \"). -- If the input 'SDoc' is not empty, it will contain some extra -- information about the why the extensions are required, but -- it's totally irrelevant/redundant for IDEs and other tools. | SuggestExtensionInOrderTo !SDoc !LangExt.Extension -- | Suggests a single extension without extra user info. suggestExtension :: LangExt.Extension -> GhcHint suggestExtension ext = SuggestExtension (SuggestSingleExtension empty ext) -- | Like 'suggestExtension' but allows supplying extra info for the user. suggestExtensionWithInfo :: SDoc -> LangExt.Extension -> GhcHint suggestExtensionWithInfo extraInfo ext = SuggestExtension (SuggestSingleExtension extraInfo ext) -- | Suggests to enable /every/ extension in the list. suggestExtensions :: [LangExt.Extension] -> GhcHint suggestExtensions exts = SuggestExtension (SuggestExtensions empty exts) -- | Like 'suggestExtensions' but allows supplying extra info for the user. suggestExtensionsWithInfo :: SDoc -> [LangExt.Extension] -> GhcHint suggestExtensionsWithInfo extraInfo exts = SuggestExtension (SuggestExtensions extraInfo exts) -- | Suggests to enable /any/ extension in the list. suggestAnyExtension :: [LangExt.Extension] -> GhcHint suggestAnyExtension exts = SuggestExtension (SuggestAnyExtension empty exts) -- | Like 'suggestAnyExtension' but allows supplying extra info for the user. suggestAnyExtensionWithInfo :: SDoc -> [LangExt.Extension] -> GhcHint suggestAnyExtensionWithInfo extraInfo exts = SuggestExtension (SuggestAnyExtension extraInfo exts) useExtensionInOrderTo :: SDoc -> LangExt.Extension -> GhcHint useExtensionInOrderTo extraInfo ext = SuggestExtension (SuggestExtensionInOrderTo extraInfo ext) -- | A type for hints emitted by GHC. -- A /hint/ suggests a possible way to deal with a particular warning or error. data GhcHint = {-| An \"unknown\" hint. This type constructor allows arbitrary -- hints to be embedded. The typical use case would be GHC plugins -- willing to emit hints alongside their custom diagnostics. -} forall a. (Outputable a, Typeable a) => UnknownHint a {-| Suggests adding a particular language extension. GHC will do its best trying to guess when the user is using the syntax of a particular language extension without having the relevant extension enabled. Example: If the user uses the keyword \"mdo\" (and we are in a monadic block), but the relevant extension is not enabled, GHC will emit a 'SuggestExtension RecursiveDo'. Test case(s): parser/should_fail/T12429, parser/should_fail/T8501c, parser/should_fail/T18251e, ... (and many more) -} | SuggestExtension !LanguageExtensionHint {-| Suggests possible corrections of a misspelled pragma. Its argument represents all applicable suggestions. Example: {-# LNGUAGE BangPatterns #-} Test case(s): parser/should_compile/T21589 -} | SuggestCorrectPragmaName ![String] {-| Suggests that a monadic code block is probably missing a \"do\" keyword. Example: main = putStrLn "hello" putStrLn "world" Test case(s): parser/should_fail/T8501a, parser/should_fail/readFail007, parser/should_fail/InfixAppPatErr, parser/should_fail/T984 -} | SuggestMissingDo {-| Suggests that a \"let\" expression is needed in a \"do\" block. Test cases: None (that explicitly test this particular hint is emitted). -} | SuggestLetInDo {-| Suggests to add an \".hsig\" signature file to the Cabal manifest. Triggered by: 'GHC.Driver.Errors.Types.DriverUnexpectedSignature', if Cabal is being used. Example: See comment of 'DriverUnexpectedSignature'. Test case(s): driver/T12955 -} | SuggestAddSignatureCabalFile !ModuleName {-| Suggests to explicitly list the instantiations for the signatures in the GHC invocation command. Triggered by: 'GHC.Driver.Errors.Types.DriverUnexpectedSignature', if Cabal is /not/ being used. Example: See comment of 'DriverUnexpectedSignature'. Test case(s): driver/T12955 -} | SuggestSignatureInstantiations !ModuleName [InstantiationSuggestion] {-| Suggests to use spaces instead of tabs. Triggered by: 'GHC.Parser.Errors.Types.PsWarnTab'. Examples: None Test Case(s): None -} | SuggestUseSpaces {-| Suggests adding a whitespace after the given symbol. Examples: None Test Case(s): parser/should_compile/T18834a.hs -} | SuggestUseWhitespaceAfter !OperatorWhitespaceSymbol {-| Suggests adding a whitespace around the given operator symbol, as it might be repurposed as special syntax by a future language extension. The second parameter is how such operator occurred, if in a prefix, suffix or tight infix position. Triggered by: 'GHC.Parser.Errors.Types.PsWarnOperatorWhitespace'. Example: h a b = a+b -- not OK, no spaces around '+'. Test Case(s): parser/should_compile/T18834b.hs -} | SuggestUseWhitespaceAround !String !OperatorWhitespaceOccurrence {-| Suggests wrapping an expression in parentheses Examples: None Test Case(s): None -} | SuggestParentheses {-| Suggests to increase the -fmax-pmcheck-models limit for the pattern match checker. Triggered by: 'GHC.HsToCore.Errors.Types.DsMaxPmCheckModelsReached' Test case(s): pmcheck/should_compile/TooManyDeltas pmcheck/should_compile/TooManyDeltas pmcheck/should_compile/T11822 -} | SuggestIncreaseMaxPmCheckModels {-| Suggests adding a type signature, typically to resolve ambiguity or help GHC inferring types. -} | SuggestAddTypeSignatures AvailableBindings {-| Suggests to explicitly discard the result of a monadic action by binding the result to the '_' wilcard. Example: main = do _ <- getCurrentTime -} | SuggestBindToWildcard !(LHsExpr GhcTc) | SuggestAddInlineOrNoInlinePragma !Var !Activation | SuggestAddPhaseToCompetingRule !RuleName {-| Suggests adding an identifier to the export list of a signature. -} | SuggestAddToHSigExportList !Name !(Maybe Module) {-| Suggests increasing the limit for the number of iterations in the simplifier. -} | SuggestIncreaseSimplifierIterations {-| Suggests to explicitly import 'Type' from the 'Data.Kind' module, because using "*" to mean 'Data.Kind.Type' relies on the StarIsType extension, which will become deprecated in the future. Triggered by: 'GHC.Parser.Errors.Types.PsWarnStarIsType' Example: None Test case(s): wcompat-warnings/WCompatWarningsOn.hs -} | SuggestUseTypeFromDataKind (Maybe RdrName) {-| Suggests placing the 'qualified' keyword /after/ the module name. Triggered by: 'GHC.Parser.Errors.Types.PsWarnImportPreQualified' Example: None Test case(s): module/mod184.hs -} | SuggestQualifiedAfterModuleName {-| Suggests using TemplateHaskell quotation syntax. Triggered by: 'GHC.Parser.Errors.Types.PsErrEmptyDoubleQuotes' only if TemplateHaskell is enabled. Example: None Test case(s): parser/should_fail/T13450TH.hs -} | SuggestThQuotationSyntax {-| Suggests alternative roles in case we found an illegal one. Triggered by: 'GHC.Parser.Errors.Types.PsErrIllegalRoleName' Example: None Test case(s): roles/should_fail/Roles7.hs -} | SuggestRoles [Role] {-| Suggests qualifying the '*' operator in modules where StarIsType is enabled. Triggered by: 'GHC.Parser.Errors.Types.PsWarnStarBinder' Test case(s): warnings/should_compile/StarBinder.hs -} | SuggestQualifyStarOperator {-| Suggests that for a type signature 'M.x :: ...' the qualifier should be omitted in order to be accepted by GHC. Triggered by: 'GHC.Parser.Errors.Types.PsErrInvalidTypeSignature' Test case(s): module/mod98 -} | SuggestTypeSignatureRemoveQualifier {-| Suggests to move an orphan instance (for a typeclass or a type or data family), or to newtype-wrap it. Triggered by: 'GHC.Tc.Errors.Types.TcRnOrphanInstance' Test cases(s): warnings/should_compile/T9178 typecheck/should_compile/T4912 indexed-types/should_compile/T22717_fam_orph -} | SuggestFixOrphanInst { isFamilyInstance :: Maybe FamFlavor } -- ^ Whether this is a family instance (of the given 'FamFlavor'), -- or a class instance ('Nothing'). {-| Suggests to use a standalone deriving declaration when GHC can't derive a typeclass instance in a trivial way. Triggered by: 'GHC.Tc.Errors.Types.DerivBadErrConstructor' Test cases(s): typecheck/should_fail/tcfail086 -} | SuggestAddStandaloneDerivation {-| Suggests to add a standalone kind signature when GHC can't perform kind inference. Triggered by: 'GHC.Tc.Errors.Types.TcRnInvisBndrWithoutSig' Test case(s): typecheck/should_fail/T22560_fail_d -} | SuggestAddStandaloneKindSignature Name {-| Suggests the user to fill in the wildcard constraint to disambiguate which constraint that is. Example: deriving instance _ => Eq (Foo f a) Triggered by: 'GHC.Tc.Errors.Types.DerivBadErrConstructor' Test cases(s): partial-sigs/should_fail/T13324_fail2 -} | SuggestFillInWildcardConstraint {-| Suggests to use the appropriate Template Haskell tick: a single tick for a term-level 'NameSpace', or a double tick for a type-level 'NameSpace'. Triggered by: 'GHC.Tc.Errors.Types.TcRnIncorrectNameSpace'. -} | SuggestAppropriateTHTick NameSpace {-| Suggests enabling -ddump-splices to help debug an issue when a 'Name' is not in scope or is used in multiple different namespaces (e.g. both as a data constructor and a type constructor). Concomitant with 'NoExactName' or 'SameName' errors, see e.g. "GHC.Rename.Env.lookupExactOcc_either". Test cases: T5971, T7241, T13937. -} | SuggestDumpSlices {-| Suggests adding a tick to refer to something which has been promoted to the type level, e.g. a data constructor. Test cases: T9778, T19984. -} | SuggestAddTick UntickedPromotedThing {-| Something is split off from its corresponding declaration. For example, a datatype is given a role declaration in a different module. Test cases: T495, T8485, T2713, T5533. -} | SuggestMoveToDeclarationSite -- TODO: remove the SDoc argument. SDoc -- ^ fixity declaration, role annotation, type signature, ... RdrName -- ^ the 'RdrName' for the declaration site {-| Suggest a similar name that the user might have meant, e.g. suggest 'traverse' when the user has written @travrese@. Test case: mod73. -} | SuggestSimilarNames RdrName (NE.NonEmpty SimilarName) {-| Remind the user that the field selector has been suppressed because of -XNoFieldSelectors. Test cases: NFSSuppressed, records-nofieldselectors. -} | RemindFieldSelectorSuppressed { suppressed_selector :: RdrName , suppressed_parents :: [Name] } {-| Suggest importing from a module, removing a @hiding@ clause, or explain to the user that we couldn't find a module with the given 'ModuleName'. Test cases: mod28, mod36, mod87, mod114, ... -} | ImportSuggestion OccName ImportSuggestion {-| Found a pragma in the body of a module, suggest placing it in the header. -} | SuggestPlacePragmaInHeader {-| Suggest using pattern matching syntax for a non-bidirectional pattern synonym Test cases: patsyn/should_fail/record-exquant typecheck/should_fail/T3176 -} | SuggestPatternMatchingSyntax {-| Suggest tips for making a definition visible for the purpose of writing a SPECIALISE pragma for it in a different module. Test cases: none -} | SuggestSpecialiseVisibilityHints Name {-| Suggest renaming implicitly quantified type variable in case it captures a term's name. -} | SuggestRenameTypeVariable | SuggestExplicitBidiPatSyn Name (LPat GhcRn) [LIdP GhcRn] {-| Suggest enabling one of the SafeHaskell modes Safe, Unsafe or Trustworthy. -} | SuggestSafeHaskell {-| Suggest removing a record wildcard from a pattern when it doesn't bind anything useful. -} | SuggestRemoveRecordWildcard {-| Suggest moving a method implementation to a different instance to its superclass that defines the canonical version of the method. -} | SuggestMoveNonCanonicalDefinition Name -- ^ move the implementation from this method Name -- ^ ... to this method String -- ^ Documentation URL {-| Suggest to increase the solver maximum reduction depth -} | SuggestIncreaseReductionDepth {-| Suggest removing a method implementation when a superclass defines the canonical version of that method. -} | SuggestRemoveNonCanonicalDefinition Name -- ^ method with non-canonical implementation Name -- ^ possible other method to use as the RHS instead String -- ^ Documentation URL {-| Suggest eta-reducing a type synonym used in the implementation of abstract data. -} | SuggestEtaReduceAbsDataTySyn TyCon {-| Remind the user that there is no field of a type and name in the record, constructors are in the usual order $x$, $r$, $a$ -} | RemindRecordMissingField FastString Type Type {-| Suggest binding the type variable on the LHS of the type declaration -} | SuggestBindTyVarOnLhs RdrName {-| Suggest using an anonymous wildcard instead of a named wildcard -} | SuggestAnonymousWildcard {-| Suggest explicitly quantifying a type variable instead of relying on implicit quantification -} | SuggestExplicitQuantification RdrName {-| Suggest binding explicitly; e.g data T @k (a :: F k) = .... -} | SuggestBindTyVarExplicitly Name {-| Suggest a default declaration; e.g @default Cls (Ty1, Ty2)@ -} | SuggestDefaultDeclaration TyCon [Type] {-| Suggest using explicit deriving strategies for a deriving clause. Triggered by: 'GHC.Tc.Errors.Types.TcRnNoDerivingClauseStrategySpecified'. See comment of 'TcRnNoDerivingClauseStrategySpecified' for context. -} | SuggestExplicitDerivingClauseStrategies (Map AssumedDerivingStrategy [LHsSigType GhcRn]) -- ^ Those deriving clauses that we assumed a particular strategy for. {-| Suggest using an explicit deriving strategy for a standalone deriving instance. Triggered by: 'GHC.Tc.Errors.Types.TcRnNoStandaloneDerivingStrategySpecified'. See comment of 'TcRnNoStandaloneDerivingStrategySpecified' for context. -} | SuggestExplicitStandaloneDerivingStrategy AssumedDerivingStrategy -- ^ The deriving strategy we assumed (LHsSigWcType GhcRn) -- ^ The instance signature (e.g 'Show a => Show (T a)') {-| Suggest add parens to pattern `e -> p :: t` -} | SuggestParenthesizePatternRHS -- | The deriving strategy that was assumed when not explicitly listed in the -- source. This is used solely by the missing-deriving-strategies warning. -- There's no `Via` case because we never assume that. data AssumedDerivingStrategy = AssumedStockStrategy | AssumedAnyclassStrategy | AssumedNewtypeStrategy deriving (Eq, Ord) instance Outputable AssumedDerivingStrategy where ppr AssumedStockStrategy = text "stock" ppr AssumedAnyclassStrategy = text "anyclass" ppr AssumedNewtypeStrategy = text "newtype" -- | An 'InstantiationSuggestion' for a '.hsig' file. This is generated -- by GHC in case of a 'DriverUnexpectedSignature' and suggests a way -- to instantiate a particular signature, where the first argument is -- the signature name and the second is the module where the signature -- was defined. -- Example: -- -- src/MyStr.hsig:2:11: error: -- Unexpected signature: ‘MyStr’ -- (Try passing -instantiated-with="MyStr=" -- replacing as necessary.) data InstantiationSuggestion = InstantiationSuggestion !ModuleName !Module -- | Suggest how to fix an import. data ImportSuggestion -- | Some module exports what we want, but we aren't explicitly importing it. = CouldImportFrom (NE.NonEmpty (Module, ImportedModsVal)) -- | Some module exports what we want, but we are explicitly hiding it. | CouldUnhideFrom (NE.NonEmpty (Module, ImportedModsVal)) -- | The module exports what we want, but it isn't a type. | CouldRemoveTypeKeyword ModuleName -- | The module exports what we want, but it's a type and we have @ExplicitNamespaces@ on. | CouldAddTypeKeyword ModuleName -- | Suggest importing a data constructor to bring it into scope | ImportDataCon -- | Where to suggest importing the 'DataCon' from. -- -- The 'Bool' tracks whether to suggest using an import of the form -- @import (pattern Foo)@, depending on whether @-XPatternSynonyms@ -- was enabled. { ies_suggest_import_from :: Maybe (ModuleName, Bool) -- | The 'OccName' of the parent of the data constructor. , ies_parent :: OccName } -- | Explain how something is in scope. data HowInScope -- | It was locally bound at this particular source location. = LocallyBoundAt SrcSpan -- | It was imported by this particular import declaration. | ImportedBy ImpDeclSpec data SimilarName = SimilarName Name | SimilarRdrName RdrName (Maybe HowInScope) -- | Something is promoted to the type-level without a promotion tick. data UntickedPromotedThing = UntickedConstructor LexicalFixity Name | UntickedExplicitList pprUntickedConstructor :: LexicalFixity -> Name -> SDoc pprUntickedConstructor fixity nm = case fixity of Prefix -> pprPrefixVar is_op ppr_nm -- e.g. (:) and '(:) Infix -> pprInfixVar is_op ppr_nm -- e.g. `Con` and '`Con` where ppr_nm = ppr nm is_op = isSymOcc (nameOccName nm) -- | Whether a constructor name is printed out as a bare symbol, e.g. @:@. -- -- True for symbolic names in infix position. -- -- Used for pretty-printing. isBareSymbol :: LexicalFixity -> Name -> Bool isBareSymbol fixity nm | isSymOcc (nameOccName nm) , Infix <- fixity = True | otherwise = False -------------------------------------------------------------------------------- -- | Whether '*' is a synonym for 'Data.Kind.Type'. data StarIsType = StarIsNotType | StarIsType -- | Display info about the treatment of '*' under NoStarIsType. -- -- With StarIsType, three properties of '*' hold: -- -- (a) it is not an infix operator -- (b) it is always in scope -- (c) it is a synonym for Data.Kind.Type -- -- However, the user might not know that they are working on a module with -- NoStarIsType and write code that still assumes (a), (b), and (c), which -- actually do not hold in that module. -- -- Violation of (a) shows up in the parser. For instance, in the following -- examples, we have '*' not applied to enough arguments: -- -- data A :: * -- data F :: * -> * -- -- Violation of (b) or (c) show up in the renamer and the typechecker -- respectively. For instance: -- -- type K = Either * Bool -- -- This will parse differently depending on whether StarIsType is enabled, -- but it will parse nonetheless. With NoStarIsType it is parsed as a type -- operator, thus we have ((*) Either Bool). Now there are two cases to -- consider: -- -- 1. There is no definition of (*) in scope. In this case the renamer will -- fail to look it up. This is a violation of assumption (b). -- -- 2. There is a definition of the (*) type operator in scope (for example -- coming from GHC.TypeNats). In this case the user will get a kind -- mismatch error. This is a violation of assumption (c). -- -- The user might unknowingly be working on a module with NoStarIsType -- or use '*' as 'Data.Kind.Type' out of habit. So it is important to give a -- hint whenever an assumption about '*' is violated. Unfortunately, it is -- somewhat difficult to deal with (c), so we limit ourselves to (a) and (b). -- -- 'noStarIsTypeHints' returns appropriate hints to the user depending on the -- extensions enabled in the module and the name that triggered the error. -- That is, if we have NoStarIsType and the error is related to '*' or its -- Unicode variant, we will suggest using 'Data.Kind.Type'; otherwise we won't -- suggest anything. noStarIsTypeHints :: StarIsType -> RdrName -> [GhcHint] noStarIsTypeHints is_star_type rdr_name -- One might ask: if can use `sdocOption sdocStarIsType` here, why bother to -- take star_is_type as input? Why not refactor? -- -- The reason is that `sdocOption sdocStarIsType` would indicate that -- StarIsType is enabled in the module that tries to load the problematic -- definition, not in the module that is being loaded. -- -- So if we have 'data T :: *' in a module with NoStarIsType, then the hint -- must be displayed even if we load this definition from a module (or GHCi) -- with StarIsType enabled! -- | isUnqualStar , StarIsNotType <- is_star_type = [SuggestUseTypeFromDataKind (Just rdr_name)] | otherwise = [] where -- Does rdr_name look like the user might have meant the '*' kind by it? -- We focus on unqualified stars specifically, because qualified stars are -- treated as type operators even under StarIsType. isUnqualStar | Unqual occName <- rdr_name = let fs = occNameFS occName in fs == fsLit "*" || fs == fsLit "★" | otherwise = False ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/Hint/0000755000000000000000000000000007346545000017714 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/Hint/Ppr.hs0000644000000000000000000004247207346545000021022 0ustar0000000000000000{-# LANGUAGE LambdaCase #-} {-# LANGUAGE OverloadedStrings #-} {-# OPTIONS_GHC -Wno-orphans #-} {- instance Outputable GhcHint -} module GHC.Types.Hint.Ppr ( perhapsAsPat -- also, and more interesting: instance Outputable GhcHint ) where import GHC.Prelude import GHC.Parser.Errors.Basic import GHC.Types.Hint import GHC.Core.FamInstEnv (FamFlavor(..)) import GHC.Core.TyCon import GHC.Core.TyCo.Rep ( mkVisFunTyMany ) import GHC.Hs.Expr () -- instance Outputable import GHC.Types.Id import GHC.Types.Name import GHC.Types.Name.Reader (RdrName,ImpDeclSpec (..), rdrNameOcc, rdrNameSpace) import GHC.Types.SrcLoc (SrcSpan(..), srcSpanStartLine) import GHC.Unit.Module.Imported (ImportedModsVal(..)) import GHC.Unit.Types import GHC.Utils.Outputable import GHC.Driver.Flags import qualified Data.List.NonEmpty as NE import qualified Data.Map.Strict as Map import qualified GHC.LanguageExtensions as LangExt instance Outputable GhcHint where ppr = \case UnknownHint m -> ppr m SuggestExtension extHint -> case extHint of SuggestSingleExtension extraUserInfo ext -> ("Perhaps you intended to use" <+> extension_with_implied ext) $$ extraUserInfo SuggestAnyExtension extraUserInfo exts -> (enable "any" <+> unquotedListWith "or" (map implied exts)) $$ extraUserInfo SuggestExtensions extraUserInfo exts -> (enable "all" <+> unquotedListWith "and" (map implied exts)) $$ extraUserInfo SuggestExtensionInOrderTo extraUserInfo ext -> ("Use" <+> extension_with_implied ext) $$ extraUserInfo where extension_with_implied ext = "the" <+> quotes (ppr ext) <+> "extension" <+> pprImpliedExtensions ext implied ext = quotes (ppr ext) <+> pprImpliedExtensions ext enable any_or_all = "Enable" <+> any_or_all <+> "of the following extensions" <> colon SuggestCorrectPragmaName suggestions -> text "Perhaps you meant" <+> quotedListWithOr (map text suggestions) SuggestMissingDo -> text "Possibly caused by a missing 'do'?" SuggestLetInDo -> text "Perhaps you need a 'let' in a 'do' block?" $$ text "e.g. 'let x = 5' instead of 'x = 5'" SuggestAddSignatureCabalFile pi_mod_name -> text "Try adding" <+> quotes (ppr pi_mod_name) <+> text "to the" <+> quotes (text "signatures") <+> text "field in your Cabal file." SuggestSignatureInstantiations pi_mod_name suggestions -> let suggested_instantiated_with = hcat (punctuate comma $ [ ppr k <> text "=" <> ppr v | InstantiationSuggestion k v <- suggestions ]) in text "Try passing -instantiated-with=\"" <> suggested_instantiated_with <> text "\"" $$ text "replacing <" <> ppr pi_mod_name <> text "> as necessary." SuggestUseSpaces -> text "Please use spaces instead." SuggestUseWhitespaceAfter sym -> text "Add whitespace after the" <+> quotes (pprOperatorWhitespaceSymbol sym) <> char '.' SuggestUseWhitespaceAround sym _occurrence -> text "Add whitespace around" <+> quotes (text sym) <> char '.' SuggestParentheses -> text "Use parentheses." SuggestIncreaseMaxPmCheckModels -> text "Increase the limit or resolve the warnings to suppress this message." SuggestAddTypeSignatures bindings -> case bindings of -- This might happen when we have bindings which are /too complicated/, -- see for example 'DsCannotMixPolyAndUnliftedBindings' in 'GHC.HsToCore.Errors.Types'. -- In this case, we emit a generic message. UnnamedBinding -> text "Add a type signature." NamedBindings (x NE.:| xs) -> let nameList = case xs of [] -> quotes . ppr $ x _ -> pprWithCommas (quotes . ppr) xs <+> text "and" <+> quotes (ppr x) in hsep [ text "Consider giving" , nameList , text "a type signature"] SuggestBindToWildcard rhs -> hang (text "Suppress this warning by saying") 2 (quotes $ text "_ <-" <+> ppr rhs) SuggestAddInlineOrNoInlinePragma lhs_id rule_act -> vcat [ text "Add an INLINE[n] or NOINLINE[n] pragma for" <+> quotes (ppr lhs_id) , whenPprDebug (ppr (idInlineActivation lhs_id) $$ ppr rule_act) ] SuggestAddPhaseToCompetingRule bad_rule -> vcat [ text "Add phase [n] or [~n] to the competing rule" , whenPprDebug (ppr bad_rule) ] SuggestIncreaseSimplifierIterations -> text "Set limit with -fconstraint-solver-iterations=n; n=0 for no limit" SuggestUseTypeFromDataKind mb_rdr_name -> text "Use" <+> quotes (text "Type") <+> text "from" <+> quotes (text "Data.Kind") <+> text "instead." $$ maybe empty (\rdr_name -> text "NB: with NoStarIsType, " <> quotes (ppr rdr_name) <+> text "is treated as a regular type operator.") mb_rdr_name SuggestQualifiedAfterModuleName -> text "Place" <+> quotes (text "qualified") <+> text "after the module name." SuggestThQuotationSyntax -> vcat [ text "Perhaps you intended to use quotation syntax of TemplateHaskell," , text "but the type variable or constructor is missing" ] SuggestRoles nearby -> case nearby of [] -> empty [r] -> text "Perhaps you meant" <+> quotes (ppr r) -- will this last case ever happen?? _ -> hang (text "Perhaps you meant one of these:") 2 (pprWithCommas (quotes . ppr) nearby) SuggestQualifyStarOperator -> text "To use (or export) this operator in" <+> text "modules with StarIsType," $$ text " including the definition module, you must qualify it." SuggestTypeSignatureRemoveQualifier -> text "Perhaps you meant to omit the qualifier" SuggestAddToHSigExportList _name mb_mod -> let header = text "Try adding it to the export list of" in case mb_mod of Nothing -> header <+> text "the hsig file." Just mod -> header <+> ppr (moduleName mod) <> text "'s hsig file." SuggestFixOrphanInst { isFamilyInstance = mbFamFlavor } -> vcat [ text "Move the instance declaration to the module of the" <+> what <+> text "or of the type, or" , text "wrap the type with a newtype and declare the instance on the new type." ] where what = case mbFamFlavor of Nothing -> text "class" Just SynFamilyInst -> text "type family" Just (DataFamilyInst {}) -> text "data family" SuggestAddStandaloneDerivation -> text "Use a standalone deriving declaration instead" SuggestAddStandaloneKindSignature name -> text "Add a standalone kind signature for" <+> quotes (ppr name) SuggestFillInWildcardConstraint -> text "Fill in the wildcard constraint yourself" SuggestAppropriateTHTick ns -> text "Perhaps use a" <+> how_many <+> text "tick" where how_many | isValNameSpace ns = text "single" | otherwise = text "double" SuggestDumpSlices -> vcat [ text "If you bound a unique Template Haskell name (NameU)" , text "perhaps via newName," , text "then -ddump-splices might be useful." ] SuggestAddTick (UntickedConstructor fixity name) -> hsep [ text "Use" , char '\'' <> con , text "instead of" , con <> mb_dot ] where con = pprUntickedConstructor fixity name mb_dot | isBareSymbol fixity name -- A final dot can be confusing for a symbol without parens, e.g. -- -- * Use ': instead of :. = empty | otherwise = dot SuggestAddTick UntickedExplicitList -> text "Add a promotion tick, e.g." <+> text "'[x,y,z]" <> dot SuggestMoveToDeclarationSite what rdr_name -> text "Move the" <+> what <+> text "to the declaration site of" <+> quotes (ppr rdr_name) <> dot SuggestSimilarNames tried_rdr_name similar_names -> case similar_names of n NE.:| [] -> text "Perhaps use" <+> pp_item n _ -> sep [ text "Perhaps use one of these:" , nest 2 (pprWithCommas pp_item $ NE.toList similar_names) ] where tried_ns = occNameSpace $ rdrNameOcc tried_rdr_name pp_item = pprSimilarName tried_ns RemindFieldSelectorSuppressed rdr_name parents -> text "Notice that" <+> quotes (ppr rdr_name) <+> text "is a field selector" <+> whose $$ text "that has been suppressed by NoFieldSelectors." where -- parents may be empty if this is a pattern synonym field without a selector whose | null parents = empty | otherwise = text "belonging to the type" <> plural parents <+> pprQuotedList parents ImportSuggestion occ_name import_suggestion -> pprImportSuggestion occ_name import_suggestion SuggestPlacePragmaInHeader -> text "Perhaps you meant to place it in the module header?" $$ text "The module header is the section at the top of the file, before the" <+> quotes (text "module") <+> text "keyword" SuggestPatternMatchingSyntax -> text "Use pattern-matching syntax instead" SuggestSpecialiseVisibilityHints name -> text "Make sure" <+> ppr mod <+> text "is compiled with -O and that" <+> quotes (ppr name) <+> text "has an INLINABLE pragma" where mod = nameModule name SuggestRenameTypeVariable -> text "Consider renaming the type variable." SuggestExplicitBidiPatSyn name pat args -> hang (text "Instead use an explicitly bidirectional" <+> text "pattern synonym, e.g.") 2 (hang (text "pattern" <+> pp_name <+> pp_args <+> larrow <+> ppr pat <+> text "where") 2 (pp_name <+> pp_args <+> equals <+> text "...")) where pp_name = ppr name pp_args = hsep (map ppr args) SuggestSafeHaskell -> text "Enable Safe Haskell through either Safe, Trustworthy or Unsafe." SuggestRemoveRecordWildcard -> text "Omit the" <+> quotes (text "..") SuggestIncreaseReductionDepth -> vcat [ text "Use -freduction-depth=0 to disable this check" , text "(any upper bound you could choose might fail unpredictably with" , text " minor updates to GHC, so disabling the check is recommended if" , text " you're sure that type checking should terminate)" ] SuggestMoveNonCanonicalDefinition lhs rhs refURL -> text "Move definition from" <+> quotes (pprPrefixUnqual rhs) <+> text "to" <+> quotes (pprPrefixUnqual lhs) $$ text "See also:" <+> text refURL SuggestRemoveNonCanonicalDefinition lhs rhs refURL -> text "Either remove definition for" <+> quotes (pprPrefixUnqual lhs) <+> text "(recommended)" <+> text "or define as" <+> quotes (pprPrefixUnqual lhs <+> text "=" <+> pprPrefixUnqual rhs) $$ text "See also:" <+> text refURL SuggestEtaReduceAbsDataTySyn tc -> text "If possible, eta-reduce the type synonym" <+> ppr_tc <+> text "so that it is nullary." where ppr_tc = quotes (ppr $ tyConName tc) RemindRecordMissingField x r a -> text "NB: There is no field selector" <+> ppr_sel <+> text "in scope for record type" <+> ppr_r where ppr_sel = quotes (ftext x <+> dcolon <+> ppr_arr_r_a) ppr_arr_r_a = ppr $ mkVisFunTyMany r a ppr_r = quotes $ ppr r SuggestBindTyVarOnLhs tv -> text "Bind" <+> quotes (ppr tv) <+> text "on the LHS of the type declaration" SuggestAnonymousWildcard -> text "Use an anonymous wildcard" <+> quotes (text "_") SuggestExplicitQuantification tv -> hsep [ text "Use an explicit", quotes (text "forall") , text "to quantify over", quotes (ppr tv) ] SuggestBindTyVarExplicitly tv -> text "bind" <+> quotes (ppr tv) <+> text "explicitly with" <+> quotes (char '@' <> ppr tv) SuggestDefaultDeclaration cls tys -> hang (text "Consider declaring") 2 (text "default" <+> ppr cls <+> parens (pprWithCommas ppr tys)) SuggestExplicitDerivingClauseStrategies assumed_derivings -> hang (text "Use explicit deriving strategies:") 2 (vcat $ map pp_derivings (Map.toList assumed_derivings)) where pp_derivings (strat, preds) = hsep [text "deriving", ppr strat, parens (pprWithCommas ppr preds)] SuggestExplicitStandaloneDerivingStrategy strat deriv_sig -> hang (text "Use an explicit deriving strategy:") 2 (hsep [text "deriving", ppr strat, text "instance", ppr deriv_sig]) SuggestParenthesizePatternRHS -> text "Parenthesize the RHS of the view pattern" perhapsAsPat :: SDoc perhapsAsPat = text "Perhaps you meant an as-pattern, which must not be surrounded by whitespace" -- | Pretty-print an 'ImportSuggestion'. pprImportSuggestion :: OccName -> ImportSuggestion -> SDoc pprImportSuggestion occ_name (CouldImportFrom mods) | (mod, imv) NE.:| [] <- mods = fsep [ text "Add" , quotes (ppr occ_name) , text "to the import list" , text "in the import of" , quotes (ppr mod) , parens (text "at" <+> ppr (imv_span imv)) <> dot ] | otherwise = fsep [ text "Add" , quotes (ppr occ_name) , text "to one of these import lists:" ] $$ nest 2 (vcat [ quotes (ppr mod) <+> parens (text "at" <+> ppr (imv_span imv)) | (mod,imv) <- NE.toList mods ]) pprImportSuggestion occ_name (CouldUnhideFrom mods) | (mod, imv) NE.:| [] <- mods = fsep [ text "Remove" , quotes (ppr occ_name) , text "from the explicit hiding list" , text "in the import of" , quotes (ppr mod) , parens (text "at" <+> ppr (imv_span imv)) <> dot ] | otherwise = fsep [ text "Remove" , quotes (ppr occ_name) , text "from the hiding clauses" , text "in one of these imports:" ] $$ nest 2 (vcat [ quotes (ppr mod) <+> parens (text "at" <+> ppr (imv_span imv)) | (mod,imv) <- NE.toList mods ]) pprImportSuggestion occ_name (CouldAddTypeKeyword mod) = vcat [ text "Add the" <+> quotes (text "type") <+> text "keyword to the import statement:" , nest 2 $ text "import" <+> ppr mod <+> parens_sp (text "type" <+> pprPrefixOcc occ_name) ] where parens_sp d = parens (space <> d <> space) pprImportSuggestion occ_name (CouldRemoveTypeKeyword mod) = vcat [ text "Remove the" <+> quotes (text "type") <+> text "keyword from the import statement:" , nest 2 $ text "import" <+> ppr mod <+> parens_sp (pprPrefixOcc occ_name) ] where parens_sp d = parens (space <> d <> space) pprImportSuggestion dc_occ (ImportDataCon Nothing parent_occ) = text "Import the data constructor" <+> quotes (ppr dc_occ) <+> text "of" <+> quotes (ppr parent_occ) pprImportSuggestion dc_occ (ImportDataCon (Just (mod, patsyns_enabled)) parent_occ) = vcat $ [ text "Use" , nest 2 $ text "import" <+> ppr mod <+> parens_sp (pprPrefixOcc parent_occ <> parens_sp (pprPrefixOcc dc_occ)) , text "or" , nest 2 $ text "import" <+> ppr mod <+> parens_sp (pprPrefixOcc parent_occ <> text "(..)") ] ++ if patsyns_enabled then [ text "or" , nest 2 $ text "import" <+> ppr mod <+> parens_sp (text "pattern" <+> pprPrefixOcc dc_occ) ] else [] where parens_sp d = parens (space <> d <> space) -- | Pretty-print a 'SimilarName'. pprSimilarName :: NameSpace -> SimilarName -> SDoc pprSimilarName _ (SimilarName name) = quotes (ppr name) <+> parens (pprDefinedAt name) pprSimilarName tried_ns (SimilarRdrName rdr_name how_in_scope) = pp_ns rdr_name <+> quotes (ppr rdr_name) <+> loc where loc = case how_in_scope of Nothing -> empty Just scope -> case scope of LocallyBoundAt loc -> case loc of UnhelpfulSpan l -> parens (ppr l) RealSrcSpan l _ -> parens (text "line" <+> int (srcSpanStartLine l)) ImportedBy is -> parens (text "imported from" <+> ppr (moduleName $ is_mod is)) pp_ns :: RdrName -> SDoc pp_ns rdr | ns /= tried_ns = pprNameSpace ns | otherwise = empty where ns = rdrNameSpace rdr pprImpliedExtensions :: LangExt.Extension -> SDoc pprImpliedExtensions extension = case implied of [] -> empty xs -> parens $ "implied by" <+> unquotedListWith "and" xs where implied = map (quotes . ppr) . filter (\ext -> extensionDeprecation ext == ExtensionNotDeprecated) . map (\(impl, _, _) -> impl) . filter (\(_, t, orig) -> orig == extension && t == turnOn) $ impliedXFlags pprPrefixUnqual :: Name -> SDoc pprPrefixUnqual name = pprPrefixOcc (getOccName name) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/HpcInfo.hs0000644000000000000000000000154407346545000020700 0ustar0000000000000000-- | Haskell Program Coverage (HPC) support module GHC.Types.HpcInfo ( HpcInfo (..) , AnyHpcUsage , emptyHpcInfo , isHpcUsed ) where import GHC.Prelude -- | Information about a modules use of Haskell Program Coverage data HpcInfo = HpcInfo { hpcInfoTickCount :: Int , hpcInfoHash :: Int } | NoHpcInfo { hpcUsed :: AnyHpcUsage -- ^ Is hpc used anywhere on the module \*tree\*? } -- | This is used to signal if one of my imports used HPC instrumentation -- even if there is no module-local HPC usage type AnyHpcUsage = Bool emptyHpcInfo :: AnyHpcUsage -> HpcInfo emptyHpcInfo = NoHpcInfo -- | Find out if HPC is used by this module or any of the modules -- it depends upon isHpcUsed :: HpcInfo -> AnyHpcUsage isHpcUsed (HpcInfo {}) = True isHpcUsed (NoHpcInfo { hpcUsed = used }) = used ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/IPE.hs0000644000000000000000000000362607346545000017772 0ustar0000000000000000module GHC.Types.IPE ( DCMap, ClosureMap, InfoTableProvMap(..), emptyInfoTableProvMap, IpeSourceLocation ) where import GHC.Prelude import GHC.Types.Name import GHC.Data.FastString import GHC.Types.SrcLoc import GHC.Core.DataCon import GHC.Types.Unique.DFM import GHC.Core.Type import Data.List.NonEmpty import GHC.Cmm.CLabel (CLabel) import qualified Data.Map.Strict as Map -- | Position and information about an info table. -- For return frames these are the contents of a 'CoreSyn.SourceNote'. type IpeSourceLocation = (RealSrcSpan, LexicalFastString) -- | A map from a 'Name' to the best approximate source position that -- name arose from. type ClosureMap = UniqDFM Name -- The binding (Name, (Type, Maybe IpeSourceLocation)) -- The best approximate source position. -- (rendered type, source position, source note -- label) -- | A map storing all the different uses of a specific data constructor and the -- approximate source position that usage arose from. -- The 'Int' is an incrementing identifier which distinguishes each usage -- of a constructor in a module. It is paired with the source position -- the constructor was used at, if possible and a string which names -- the source location. This is the same information as is the payload -- for the 'GHC.Core.SourceNote' constructor. type DCMap = UniqDFM DataCon (DataCon, NonEmpty (Int, Maybe IpeSourceLocation)) type InfoTableToSourceLocationMap = Map.Map CLabel (Maybe IpeSourceLocation) data InfoTableProvMap = InfoTableProvMap { provDC :: DCMap , provClosure :: ClosureMap , provInfoTables :: InfoTableToSourceLocationMap } emptyInfoTableProvMap :: InfoTableProvMap emptyInfoTableProvMap = InfoTableProvMap emptyUDFM emptyUDFM Map.empty ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/Id.hs0000644000000000000000000012006507346545000017706 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 \section[Id]{@Ids@: Value and constructor identifiers} -} -- | -- #name_types# -- GHC uses several kinds of name internally: -- -- * 'GHC.Types.Name.Occurrence.OccName': see "GHC.Types.Name.Occurrence#name_types" -- -- * 'GHC.Types.Name.Reader.RdrName': see "GHC.Types.Name.Reader#name_types" -- -- * 'GHC.Types.Name.Name': see "GHC.Types.Name#name_types" -- -- * 'GHC.Types.Id.Id' represents names that not only have a 'GHC.Types.Name.Name' but also a -- 'GHC.Core.TyCo.Rep.Type' and some additional details (a 'GHC.Types.Id.Info.IdInfo' and -- one of LocalIdDetails or GlobalIdDetails) that are added, -- modified and inspected by various compiler passes. These 'GHC.Types.Var.Var' names -- may either be global or local, see "GHC.Types.Var#globalvslocal" -- -- * 'GHC.Types.Var.Var': see "GHC.Types.Var#name_types" module GHC.Types.Id ( -- * The main types Var, Id, isId, -- * In and Out variants InVar, InId, OutVar, OutId, -- ** Simple construction mkGlobalId, mkVanillaGlobal, mkVanillaGlobalWithInfo, mkLocalId, mkLocalCoVar, mkLocalIdOrCoVar, mkLocalIdWithInfo, mkExportedLocalId, mkExportedVanillaId, mkSysLocal, mkSysLocalM, mkSysLocalOrCoVar, mkSysLocalOrCoVarM, mkUserLocal, mkUserLocalOrCoVar, mkTemplateLocals, mkTemplateLocalsNum, mkTemplateLocal, mkScaledTemplateLocal, mkWorkerId, -- ** Taking an Id apart idName, idType, idMult, idScaledType, idUnique, idInfo, idDetails, recordSelectorTyCon, recordSelectorTyCon_maybe, -- ** Modifying an Id setIdName, setIdUnique, GHC.Types.Id.setIdType, setIdMult, updateIdTypeButNotMult, updateIdTypeAndMult, updateIdTypeAndMultM, setIdExported, setIdNotExported, globaliseId, localiseId, setIdInfo, lazySetIdInfo, modifyIdInfo, maybeModifyIdInfo, zapLamIdInfo, floatifyIdDemandInfo, zapIdUsageInfo, zapIdUsageEnvInfo, zapIdUsedOnceInfo, zapIdTailCallInfo, zapFragileIdInfo, zapIdDmdSig, zapStableUnfolding, transferPolyIdInfo, scaleIdBy, scaleVarBy, -- ** Predicates on Ids isImplicitId, isDeadBinder, isStrictId, isExportedId, isLocalId, isGlobalId, isRecordSelector, isNaughtyRecordSelector, isPatSynRecordSelector, isDataConRecordSelector, isClassOpId, isClassOpId_maybe, isDFunId, isPrimOpId, isPrimOpId_maybe, isFCallId, isFCallId_maybe, isDataConWorkId, isDataConWorkId_maybe, isDataConWrapId, isDataConWrapId_maybe, isDataConId, isDataConId_maybe, idDataCon, isConLikeId, isWorkerLikeId, isDeadEndId, idIsFrom, hasNoBinding, -- ** Join variables JoinId, JoinPointHood, isJoinId, idJoinPointHood, idJoinArity, asJoinId, asJoinId_maybe, zapJoinId, -- ** Inline pragma stuff idInlinePragma, setInlinePragma, modifyInlinePragma, idInlineActivation, setInlineActivation, idRuleMatchInfo, -- ** One-shot lambdas setOneShotLambda, clearOneShotLambda, updOneShotInfo, setIdOneShotInfo, -- ** Reading 'IdInfo' fields idArity, idCallArity, idFunRepArity, idSpecialisation, idCoreRules, idHasRules, idCafInfo, idLFInfo_maybe, idOneShotInfo, idOccInfo, IdUnfoldingFun, idUnfolding, realIdUnfolding, alwaysActiveUnfoldingFun, whenActiveUnfoldingFun, noUnfoldingFun, -- ** Writing 'IdInfo' fields setIdUnfolding, zapIdUnfolding, setCaseBndrEvald, setIdArity, setIdCallArity, setIdSpecialisation, setIdCafInfo, setIdOccInfo, zapIdOccInfo, setIdLFInfo, setIdDemandInfo, setIdDmdSig, setIdCprSig, setIdCbvMarks, idCbvMarks_maybe, idCbvMarkArity, asWorkerLikeId, asNonWorkerLikeId, idDemandInfo, idDmdSig, idCprSig, idTagSig_maybe, setIdTagSig ) where import GHC.Prelude import GHC.Core ( CoreRule, isStableUnfolding, evaldUnfolding , isCompulsoryUnfolding, Unfolding( NoUnfolding ) , IdUnfoldingFun, isEvaldUnfolding, hasSomeUnfolding, noUnfolding ) import GHC.Types.Id.Info import GHC.Types.Basic -- Imported and re-exported import GHC.Types.Var( Id, CoVar, JoinId, InId, InVar, OutId, OutVar, idInfo, idDetails, setIdDetails, globaliseId, isId, isLocalId, isGlobalId, isExportedId, setIdMult, updateIdTypeAndMult, updateIdTypeButNotMult, updateIdTypeAndMultM) import qualified GHC.Types.Var as Var import GHC.Core.Type import GHC.Core.Predicate( isCoVarType ) import GHC.Core.DataCon import GHC.Core.Class import GHC.Core.Multiplicity import GHC.Types.RepType import GHC.Types.Demand import GHC.Types.Cpr import GHC.Types.Name import GHC.Types.ForeignCall import GHC.Types.SrcLoc import GHC.Types.Unique import GHC.Stg.InferTags.TagSig import GHC.Unit.Module import {-# SOURCE #-} GHC.Builtin.PrimOps (PrimOp) import GHC.Builtin.Uniques (mkBuiltinUnique) import GHC.Types.Unique.Supply import GHC.Data.Maybe import GHC.Data.FastString import GHC.Utils.Misc import GHC.Utils.Outputable import GHC.Utils.Panic -- infixl so you can say (id `set` a `set` b) infixl 1 `setIdUnfolding`, `setIdArity`, `setIdCallArity`, `setIdOccInfo`, `setIdOneShotInfo`, `setIdSpecialisation`, `setInlinePragma`, `setInlineActivation`, `idCafInfo`, `setIdDemandInfo`, `setIdDmdSig`, `setIdCprSig`, `asJoinId`, `asJoinId_maybe`, `setIdCbvMarks` {- ************************************************************************ * * \subsection{Basic Id manipulation} * * ************************************************************************ -} idName :: Id -> Name idName = Var.varName idUnique :: Id -> Unique idUnique = Var.varUnique idType :: Id -> Kind idType = Var.varType idMult :: Id -> Mult idMult = Var.varMult idScaledType :: Id -> Scaled Type idScaledType id = Scaled (idMult id) (idType id) scaleIdBy :: Mult -> Id -> Id scaleIdBy m id = setIdMult id (m `mkMultMul` idMult id) -- | Like 'scaleIdBy', but skips non-Ids. Useful for scaling -- a mixed list of ids and tyvars. scaleVarBy :: Mult -> Var -> Var scaleVarBy m id | isId id = scaleIdBy m id | otherwise = id setIdName :: Id -> Name -> Id setIdName = Var.setVarName setIdUnique :: Id -> Unique -> Id setIdUnique = Var.setVarUnique -- | Not only does this set the 'Id' 'Type', it also evaluates the type to try and -- reduce space usage setIdType :: Id -> Type -> Id setIdType id ty = seqType ty `seq` Var.setVarType id ty setIdExported :: Id -> Id setIdExported = Var.setIdExported setIdNotExported :: Id -> Id setIdNotExported = Var.setIdNotExported localiseId :: Id -> Id -- Make an Id with the same unique and type as the -- incoming Id, but with an *Internal* Name and *LocalId* flavour localiseId id | assert (isId id) $ isLocalId id && isInternalName name = id | otherwise = Var.mkLocalVar (idDetails id) (localiseName name) (Var.varMult id) (idType id) (idInfo id) where name = idName id lazySetIdInfo :: Id -> IdInfo -> Id lazySetIdInfo = Var.lazySetIdInfo setIdInfo :: Id -> IdInfo -> Id setIdInfo id info = info `seq` (lazySetIdInfo id info) -- Try to avoid space leaks by seq'ing modifyIdInfo :: HasDebugCallStack => (IdInfo -> IdInfo) -> Id -> Id modifyIdInfo fn id = setIdInfo id (fn (idInfo id)) -- maybeModifyIdInfo tries to avoid unnecessary thrashing maybeModifyIdInfo :: Maybe IdInfo -> Id -> Id maybeModifyIdInfo (Just new_info) id = lazySetIdInfo id new_info maybeModifyIdInfo Nothing id = id -- maybeModifyIdInfo tries to avoid unnecessary thrashing maybeModifyIdDetails :: Maybe IdDetails -> Id -> Id maybeModifyIdDetails (Just new_details) id = setIdDetails id new_details maybeModifyIdDetails Nothing id = id {- ************************************************************************ * * \subsection{Simple Id construction} * * ************************************************************************ Absolutely all Ids are made by mkId. It is just like Var.mkId, but in addition it pins free-tyvar-info onto the Id's type, where it can easily be found. Note [Free type variables] ~~~~~~~~~~~~~~~~~~~~~~~~~~ At one time we cached the free type variables of the type of an Id at the root of the type in a TyNote. The idea was to avoid repeating the free-type-variable calculation. But it turned out to slow down the compiler overall. I don't quite know why; perhaps finding free type variables of an Id isn't all that common whereas applying a substitution (which changes the free type variables) is more common. Anyway, we removed it in March 2008. -} -- | For an explanation of global vs. local 'Id's, see "GHC.Types.Var.Var#globalvslocal" mkGlobalId :: IdDetails -> Name -> Type -> IdInfo -> Id mkGlobalId = Var.mkGlobalVar -- | Make a global 'Id' without any extra information at all mkVanillaGlobal :: HasDebugCallStack => Name -> Type -> Id mkVanillaGlobal name ty = mkVanillaGlobalWithInfo name ty vanillaIdInfo -- | Make a global 'Id' with no global information but some generic 'IdInfo' mkVanillaGlobalWithInfo :: HasDebugCallStack => Name -> Type -> IdInfo -> Id mkVanillaGlobalWithInfo nm = assertPpr (not $ isFieldNameSpace $ nameNameSpace nm) (text "mkVanillaGlobalWithInfo called on record field:" <+> ppr nm) $ mkGlobalId VanillaId nm -- | For an explanation of global vs. local 'Id's, see "GHC.Types.Var#globalvslocal" mkLocalId :: HasDebugCallStack => Name -> Mult -> Type -> Id mkLocalId name w ty = mkLocalIdWithInfo name w (assert (not (isCoVarType ty)) ty) vanillaIdInfo -- | Make a local CoVar mkLocalCoVar :: HasDebugCallStack => Name -> Type -> CoVar mkLocalCoVar name ty = assert (isCoVarType ty) $ Var.mkLocalVar CoVarId name ManyTy ty vanillaIdInfo -- | Like 'mkLocalId', but checks the type to see if it should make a covar mkLocalIdOrCoVar :: HasDebugCallStack => Name -> Mult -> Type -> Id mkLocalIdOrCoVar name w ty -- We should assert (eqType w Many) in the isCoVarType case. -- However, currently this assertion does not hold. -- In tests with -fdefer-type-errors, such as T14584a, -- we create a linear 'case' where the scrutinee is a coercion -- (see castBottomExpr). This problem is covered by #17291. | isCoVarType ty = mkLocalCoVar name ty | otherwise = mkLocalId name w ty -- proper ids only; no covars! mkLocalIdWithInfo :: HasDebugCallStack => Name -> Mult -> Type -> IdInfo -> Id mkLocalIdWithInfo name w ty info = Var.mkLocalVar VanillaId name w (assert (not (isCoVarType ty)) ty) info -- Note [Free type variables] -- | Create a local 'Id' that is marked as exported. -- This prevents things attached to it from being removed as dead code. -- See Note [Exported LocalIds] mkExportedLocalId :: IdDetails -> Name -> Type -> Id mkExportedLocalId details name ty = Var.mkExportedLocalVar details name ty vanillaIdInfo -- Note [Free type variables] mkExportedVanillaId :: Name -> Type -> Id mkExportedVanillaId name ty = assertPpr (not $ isFieldNameSpace $ nameNameSpace name) (text "mkExportedVanillaId called on record field:" <+> ppr name) $ Var.mkExportedLocalVar VanillaId name ty vanillaIdInfo -- Note [Free type variables] -- | Create a system local 'Id'. These are local 'Id's (see "Var#globalvslocal") -- that are created by the compiler out of thin air mkSysLocal :: FastString -> Unique -> Mult -> Type -> Id mkSysLocal fs uniq w ty = assert (not (isCoVarType ty)) $ mkLocalId (mkSystemVarName uniq fs) w ty -- | Like 'mkSysLocal', but checks to see if we have a covar type mkSysLocalOrCoVar :: FastString -> Unique -> Mult -> Type -> Id mkSysLocalOrCoVar fs uniq w ty = mkLocalIdOrCoVar (mkSystemVarName uniq fs) w ty mkSysLocalM :: MonadUnique m => FastString -> Mult -> Type -> m Id mkSysLocalM fs w ty = getUniqueM >>= (\uniq -> return (mkSysLocal fs uniq w ty)) mkSysLocalOrCoVarM :: MonadUnique m => FastString -> Mult -> Type -> m Id mkSysLocalOrCoVarM fs w ty = getUniqueM >>= (\uniq -> return (mkSysLocalOrCoVar fs uniq w ty)) -- | Create a user local 'Id'. These are local 'Id's (see "GHC.Types.Var#globalvslocal") with a name and location that the user might recognize mkUserLocal :: OccName -> Unique -> Mult -> Type -> SrcSpan -> Id mkUserLocal occ uniq w ty loc = assert (not (isCoVarType ty)) $ mkLocalId (mkInternalName uniq occ loc) w ty -- | Like 'mkUserLocal', but checks if we have a coercion type mkUserLocalOrCoVar :: OccName -> Unique -> Mult -> Type -> SrcSpan -> Id mkUserLocalOrCoVar occ uniq w ty loc = mkLocalIdOrCoVar (mkInternalName uniq occ loc) w ty {- Make some local @Ids@ for a template @CoreExpr@. These have bogus @Uniques@, but that's OK because the templates are supposed to be instantiated before use. -} -- | Workers get local names. "CoreTidy" will externalise these if necessary mkWorkerId :: Unique -> Id -> Type -> Id mkWorkerId uniq unwrkr ty = mkLocalId (mkDerivedInternalName mkWorkerOcc uniq (getName unwrkr)) ManyTy ty -- | Create a /template local/: a family of system local 'Id's in bijection with @Int@s, typically used in unfoldings mkTemplateLocal :: Int -> Type -> Id mkTemplateLocal i ty = mkScaledTemplateLocal i (unrestricted ty) mkScaledTemplateLocal :: Int -> Scaled Type -> Id mkScaledTemplateLocal i (Scaled w ty) = mkSysLocalOrCoVar (fsLit "v") (mkBuiltinUnique i) w ty -- "OrCoVar" since this is used in a superclass selector, -- and "~" and "~~" have coercion "superclasses". -- | Create a template local for a series of types mkTemplateLocals :: [Type] -> [Id] mkTemplateLocals = mkTemplateLocalsNum 1 -- | Create a template local for a series of type, but start from a specified template local mkTemplateLocalsNum :: Int -> [Type] -> [Id] mkTemplateLocalsNum n tys = zipWith mkTemplateLocal [n..] tys {- Note [Exported LocalIds] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ We use mkExportedLocalId for things like - Dictionary functions (DFunId) - Wrapper and matcher Ids for pattern synonyms - Default methods for classes - Pattern-synonym matcher and builder Ids - etc They marked as "exported" in the sense that they should be kept alive even if apparently unused in other bindings, and not dropped as dead code by the occurrence analyser. (But "exported" here does not mean "brought into lexical scope by an import declaration". Indeed these things are always internal Ids that the user never sees.) It's very important that they are *LocalIds*, not GlobalIds, for lots of reasons: * We want to treat them as free variables for the purpose of dependency analysis (e.g. GHC.Core.FVs.exprFreeVars). * Look them up in the current substitution when we come across occurrences of them (in Subst.lookupIdSubst). Lacking this we can get an out-of-date unfolding, which can in turn make the simplifier go into an infinite loop (#9857) * Ensure that for dfuns that the specialiser does not float dict uses above their defns, which would prevent good simplifications happening. * The strictness analyser treats a occurrence of a GlobalId as imported and assumes it contains strictness in its IdInfo, which isn't true if the thing is bound in the same module as the occurrence. In CoreTidy we must make all these LocalIds into GlobalIds, so that in importing modules (in --make mode) we treat them as properly global. That is what is happening in, say tidy_insts in GHC.Iface.Tidy. ************************************************************************ * * \subsection{Special Ids} * * ************************************************************************ -} -- | If the 'Id' is that for a record selector, extract the 'sel_tycon'. Panic otherwise. recordSelectorTyCon :: Id -> RecSelParent recordSelectorTyCon id = case recordSelectorTyCon_maybe id of Just parent -> parent _ -> panic "recordSelectorTyCon" recordSelectorTyCon_maybe :: Id -> Maybe RecSelParent recordSelectorTyCon_maybe id = case Var.idDetails id of RecSelId { sel_tycon = parent } -> Just parent _ -> Nothing isRecordSelector :: Id -> Bool isNaughtyRecordSelector :: Id -> Bool isPatSynRecordSelector :: Id -> Bool isDataConRecordSelector :: Id -> Bool isPrimOpId :: Id -> Bool isFCallId :: Id -> Bool isDataConWorkId :: Id -> Bool isDataConWrapId :: Id -> Bool isDFunId :: Id -> Bool isClassOpId :: Id -> Bool isClassOpId_maybe :: Id -> Maybe Class isPrimOpId_maybe :: Id -> Maybe PrimOp isFCallId_maybe :: Id -> Maybe ForeignCall isDataConWorkId_maybe :: Id -> Maybe DataCon isDataConWrapId_maybe :: Id -> Maybe DataCon isRecordSelector id = case Var.idDetails id of RecSelId {} -> True _ -> False isDataConRecordSelector id = case Var.idDetails id of RecSelId {sel_tycon = RecSelData _} -> True _ -> False isPatSynRecordSelector id = case Var.idDetails id of RecSelId {sel_tycon = RecSelPatSyn _} -> True _ -> False isNaughtyRecordSelector id = case Var.idDetails id of RecSelId { sel_naughty = n } -> n _ -> False isClassOpId id = case Var.idDetails id of ClassOpId {} -> True _other -> False isClassOpId_maybe id = case Var.idDetails id of ClassOpId cls _ -> Just cls _other -> Nothing isPrimOpId id = case Var.idDetails id of PrimOpId {} -> True _ -> False isDFunId id = case Var.idDetails id of DFunId {} -> True _ -> False isPrimOpId_maybe id = case Var.idDetails id of PrimOpId op _ -> Just op _ -> Nothing isFCallId id = case Var.idDetails id of FCallId _ -> True _ -> False isFCallId_maybe id = case Var.idDetails id of FCallId call -> Just call _ -> Nothing isDataConWorkId id = case Var.idDetails id of DataConWorkId _ -> True _ -> False isDataConWorkId_maybe id = case Var.idDetails id of DataConWorkId con -> Just con _ -> Nothing isDataConWrapId id = case Var.idDetails id of DataConWrapId _ -> True _ -> False isDataConWrapId_maybe id = case Var.idDetails id of DataConWrapId con -> Just con _ -> Nothing isDataConId_maybe :: Id -> Maybe DataCon isDataConId_maybe id = case Var.idDetails id of DataConWorkId con -> Just con DataConWrapId con -> Just con _ -> Nothing isDataConId :: Id -> Bool isDataConId id = case Var.idDetails id of DataConWorkId {} -> True DataConWrapId {} -> True _ -> False -- | An Id for which we might require all callers to pass strict arguments properly tagged + evaluated. -- -- See Note [CBV Function Ids] isWorkerLikeId :: Id -> Bool isWorkerLikeId id = case Var.idDetails id of WorkerLikeId _ -> True JoinId _ Just{} -> True _ -> False isJoinId :: Var -> Bool -- It is convenient in GHC.Core.Opt.SetLevels.lvlMFE to apply isJoinId -- to the free vars of an expression, so it's convenient -- if it returns False for type variables isJoinId id | isId id = case Var.idDetails id of JoinId {} -> True _ -> False | otherwise = False -- | Doesn't return strictness marks idJoinPointHood :: Var -> JoinPointHood idJoinPointHood id | isId id = case Var.idDetails id of JoinId arity _marks -> JoinPoint arity _ -> NotJoinPoint | otherwise = NotJoinPoint idDataCon :: Id -> DataCon -- ^ Get from either the worker or the wrapper 'Id' to the 'DataCon'. Currently used only in the desugarer. -- -- INVARIANT: @idDataCon (dataConWrapId d) = d@: remember, 'dataConWrapId' can return either the wrapper or the worker idDataCon id = isDataConId_maybe id `orElse` pprPanic "idDataCon" (ppr id) hasNoBinding :: Id -> Bool -- ^ Returns @True@ of an 'Id' which may not have a -- binding, even though it is defined in this module. -- Data constructor workers used to be things of this kind, but they aren't any -- more. Instead, we inject a binding for them at the CorePrep stage. The -- exception to this is unboxed tuples and sums datacons, which definitely have -- no binding hasNoBinding id = case Var.idDetails id of -- TEMPORARILY make all primops hasNoBinding, to avoid #20155 -- The goal is to understand #20155 and revert to the commented out version PrimOpId _ _ -> True -- See Note [Eta expanding primops] in GHC.Builtin.PrimOps -- PrimOpId _ lev_poly -> lev_poly -- TEMPORARILY commented out FCallId _ -> True DataConWorkId dc -> isUnboxedTupleDataCon dc || isUnboxedSumDataCon dc _ -> isCompulsoryUnfolding (realIdUnfolding id) -- Note: this function must be very careful not to force -- any of the fields that aren't the 'uf_src' field of -- the 'Unfolding' of the 'Id'. This is because these fields are computed -- in terms of the 'uf_tmpl' field, which is not available -- until we have finished Core Lint for the unfolding, which calls 'hasNoBinding' -- in 'checkCanEtaExpand'. -- -- In particular, calling 'idUnfolding' rather than 'realIdUnfolding' here can -- force the 'uf_tmpl' field, because 'trimUnfolding' forces the 'uf_is_value' field, -- and this field is usually computed in terms of the 'uf_tmpl' field, -- so we will force that as well. -- -- See Note [Lazily checking Unfoldings] in GHC.IfaceToCore. isImplicitId :: Id -> Bool -- ^ 'isImplicitId' tells whether an 'Id's info is implied by other -- declarations, so we don't need to put its signature in an interface -- file, even if it's mentioned in some other interface unfolding. isImplicitId id = case Var.idDetails id of FCallId {} -> True ClassOpId {} -> True PrimOpId {} -> True DataConWorkId {} -> True DataConWrapId {} -> True -- These are implied by their type or class decl; -- remember that all type and class decls appear in the interface file. -- The dfun id is not an implicit Id; it must *not* be omitted, because -- it carries version info for the instance decl _ -> False idIsFrom :: Module -> Id -> Bool idIsFrom mod id = nameIsLocalOrFrom mod (idName id) isDeadBinder :: Id -> Bool isDeadBinder bndr | isId bndr = isDeadOcc (idOccInfo bndr) | otherwise = False -- TyVars count as not dead {- ************************************************************************ * * Join variables * * ************************************************************************ -} idJoinArity :: JoinId -> JoinArity idJoinArity id = case idJoinPointHood id of JoinPoint ar -> ar NotJoinPoint -> pprPanic "idJoinArity" (ppr id) asJoinId :: Id -> JoinArity -> JoinId asJoinId id arity = warnPprTrace (not (isLocalId id)) "global id being marked as join var" (ppr id) $ warnPprTrace (not (is_vanilla_or_join id)) "asJoinId" (ppr id <+> pprIdDetails (idDetails id)) $ id `setIdDetails` JoinId arity (idCbvMarks_maybe id) where is_vanilla_or_join id = case Var.idDetails id of VanillaId -> True -- Can workers become join ids? Yes! WorkerLikeId {} -> pprTraceDebug "asJoinId (call by value function)" (ppr id) True JoinId {} -> True _ -> False zapJoinId :: Id -> Id -- May be a regular id already zapJoinId jid | isJoinId jid = zapIdTailCallInfo (newIdDetails `seq` jid `setIdDetails` newIdDetails) -- Core Lint may complain if still marked -- as AlwaysTailCalled | otherwise = jid where newIdDetails = case idDetails jid of -- We treat join points as CBV functions. Even after they are floated out. -- See Note [Use CBV semantics only for join points and workers] JoinId _ (Just marks) -> WorkerLikeId marks JoinId _ Nothing -> WorkerLikeId [] _ -> panic "zapJoinId: newIdDetails can only be used if Id was a join Id." asJoinId_maybe :: Id -> JoinPointHood -> Id asJoinId_maybe id (JoinPoint arity) = asJoinId id arity asJoinId_maybe id NotJoinPoint = zapJoinId id {- ************************************************************************ * * \subsection{IdInfo stuff} * * ************************************************************************ -} --------------------------------- -- ARITY idArity :: Id -> Arity idArity id = arityInfo (idInfo id) setIdArity :: Id -> Arity -> Id setIdArity id arity = modifyIdInfo (`setArityInfo` arity) id idCallArity :: Id -> Arity idCallArity id = callArityInfo (idInfo id) setIdCallArity :: Id -> Arity -> Id setIdCallArity id arity = modifyIdInfo (`setCallArityInfo` arity) id -- | This function counts all arguments post-unarisation, which includes -- arguments with no runtime representation -- see Note [Unarisation and arity] idFunRepArity :: Id -> RepArity idFunRepArity x = countFunRepArgs (idArity x) (idType x) -- | Returns true if an application to n args diverges or throws an exception -- See Note [Dead ends] in "GHC.Types.Demand". isDeadEndId :: Var -> Bool isDeadEndId v | isId v = isDeadEndSig (idDmdSig v) | otherwise = False -- | Accesses the 'Id''s 'dmdSigInfo'. idDmdSig :: Id -> DmdSig idDmdSig id = dmdSigInfo (idInfo id) setIdDmdSig :: Id -> DmdSig -> Id setIdDmdSig id sig = modifyIdInfo (`setDmdSigInfo` sig) id idCprSig :: Id -> CprSig idCprSig id = cprSigInfo (idInfo id) setIdCprSig :: Id -> CprSig -> Id setIdCprSig id sig = modifyIdInfo (\info -> setCprSigInfo info sig) id zapIdDmdSig :: Id -> Id zapIdDmdSig id = modifyIdInfo (`setDmdSigInfo` nopSig) id -- | `isStrictId` says whether either -- (a) the 'Id' has a strict demand placed on it or -- (b) definitely has a \"strict type\", such that it can always be -- evaluated strictly (i.e an unlifted type) -- We need to check (b) as well as (a), because when the demand for the -- given `id` hasn't been computed yet but `id` has a strict -- type, we still want `isStrictId id` to be `True`. -- Returns False if the type is levity polymorphic; False is always safe. isStrictId :: Id -> Bool isStrictId id | assertPpr (isId id) (text "isStrictId: not an id: " <+> ppr id) $ isJoinId id = False | otherwise = isStrictType (idType id) || isStrUsedDmd (idDemandInfo id) -- Take the best of both strictnesses - old and new idTagSig_maybe :: Id -> Maybe TagSig idTagSig_maybe = tagSig . idInfo --------------------------------- -- UNFOLDING -- | Returns the 'Id's unfolding, but does not expose the unfolding of a strong -- loop breaker. See 'unfoldingInfo'. -- -- If you really want the unfolding of a strong loopbreaker, call 'realIdUnfolding'. idUnfolding :: IdUnfoldingFun idUnfolding id = unfoldingInfo (idInfo id) noUnfoldingFun :: IdUnfoldingFun noUnfoldingFun _id = noUnfolding -- | Returns an unfolding only if -- (a) not a strong loop breaker and -- (b) always active alwaysActiveUnfoldingFun :: IdUnfoldingFun alwaysActiveUnfoldingFun id | isAlwaysActive (idInlineActivation id) = idUnfolding id | otherwise = noUnfolding -- | Returns an unfolding only if -- (a) not a strong loop breaker and -- (b) active in according to is_active whenActiveUnfoldingFun :: (Activation -> Bool) -> IdUnfoldingFun whenActiveUnfoldingFun is_active id | is_active (idInlineActivation id) = idUnfolding id | otherwise = NoUnfolding realIdUnfolding :: Id -> Unfolding -- ^ Expose the unfolding if there is one, including for loop breakers realIdUnfolding id = realUnfoldingInfo (idInfo id) setIdUnfolding :: Id -> Unfolding -> Id setIdUnfolding id unfolding = modifyIdInfo (`setUnfoldingInfo` unfolding) id idDemandInfo :: Id -> Demand idDemandInfo id = demandInfo (idInfo id) setIdDemandInfo :: Id -> Demand -> Id setIdDemandInfo id dmd = modifyIdInfo (`setDemandInfo` dmd) id setIdTagSig :: Id -> TagSig -> Id setIdTagSig id sig = modifyIdInfo (`setTagSig` sig) id -- | If all marks are NotMarkedStrict we just set nothing. setIdCbvMarks :: Id -> [CbvMark] -> Id setIdCbvMarks id marks | not (any isMarkedCbv marks) = id | otherwise = -- pprTrace "setMarks:" (ppr id <> text ":" <> ppr marks) $ case idDetails id of -- good ol (likely worker) function VanillaId -> id `setIdDetails` (WorkerLikeId trimmedMarks) JoinId arity _ -> id `setIdDetails` (JoinId arity (Just trimmedMarks)) -- Updating an existing call by value function. WorkerLikeId _ -> id `setIdDetails` (WorkerLikeId trimmedMarks) -- Do nothing for these RecSelId{} -> id DFunId{} -> id _ -> pprTrace "setIdCbvMarks: Unable to set cbv marks for" (ppr id $$ text "marks:" <> ppr marks $$ text "idDetails:" <> ppr (idDetails id)) id where -- (Currently) no point in passing args beyond the arity unlifted. -- We would have to eta expand all call sites to (length marks). -- Perhaps that's sensible but for now be conservative. -- Similarly we don't need any lazy marks at the end of the list. -- This way the length of the list is always exactly number of arguments -- that must be visible to CodeGen. See See Note [CBV Function Ids] -- for more details. trimmedMarks = dropWhileEndLE (not . isMarkedCbv) $ take (idArity id) marks idCbvMarks_maybe :: Id -> Maybe [CbvMark] idCbvMarks_maybe id = case idDetails id of WorkerLikeId marks -> Just marks JoinId _arity marks -> marks _ -> Nothing -- Id must be called with at least this arity in order to allow arguments to -- be passed unlifted. idCbvMarkArity :: Id -> Arity idCbvMarkArity fn = maybe 0 length (idCbvMarks_maybe fn) -- | Remove any cbv marks on arguments from a given Id. asNonWorkerLikeId :: Id -> Id asNonWorkerLikeId id = let details = case idDetails id of WorkerLikeId{} -> Just $ VanillaId JoinId arity Just{} -> Just $ JoinId arity Nothing _ -> Nothing in maybeModifyIdDetails details id -- | Turn this id into a WorkerLikeId if possible. asWorkerLikeId :: Id -> Id asWorkerLikeId id = let details = case idDetails id of WorkerLikeId{} -> Nothing JoinId _arity Just{} -> Nothing JoinId arity Nothing -> Just (JoinId arity (Just [])) VanillaId -> Just $ WorkerLikeId [] _ -> Nothing in maybeModifyIdDetails details id setCaseBndrEvald :: StrictnessMark -> Id -> Id -- Used for variables bound by a case expressions, both the case-binder -- itself, and any pattern-bound variables that are argument of a -- strict constructor. It just marks the variable as already-evaluated, -- so that (for example) a subsequent 'seq' can be dropped setCaseBndrEvald str id | isMarkedStrict str = id `setIdUnfolding` evaldUnfolding | otherwise = id -- | Similar to trimUnfolding, but also removes evaldness info. zapIdUnfolding :: Id -> Id zapIdUnfolding v | isId v, hasSomeUnfolding (idUnfolding v) = setIdUnfolding v noUnfolding | otherwise = v --------------------------------- -- SPECIALISATION -- See Note [Specialisations and RULES in IdInfo] in GHC.Types.Id.Info idSpecialisation :: Id -> RuleInfo idSpecialisation id = ruleInfo (idInfo id) idCoreRules :: Id -> [CoreRule] idCoreRules id = ruleInfoRules (idSpecialisation id) idHasRules :: Id -> Bool idHasRules id = not (isEmptyRuleInfo (idSpecialisation id)) setIdSpecialisation :: Id -> RuleInfo -> Id setIdSpecialisation id spec_info = modifyIdInfo (`setRuleInfo` spec_info) id --------------------------------- -- CAF INFO idCafInfo :: Id -> CafInfo idCafInfo id = cafInfo (idInfo id) setIdCafInfo :: Id -> CafInfo -> Id setIdCafInfo id caf_info = modifyIdInfo (`setCafInfo` caf_info) id --------------------------------- -- Lambda form info idLFInfo_maybe :: Id -> Maybe LambdaFormInfo idLFInfo_maybe = lfInfo . idInfo setIdLFInfo :: Id -> LambdaFormInfo -> Id setIdLFInfo id lf = modifyIdInfo (`setLFInfo` lf) id --------------------------------- -- Occurrence INFO idOccInfo :: Id -> OccInfo idOccInfo id = occInfo (idInfo id) setIdOccInfo :: Id -> OccInfo -> Id setIdOccInfo id occ_info = modifyIdInfo (`setOccInfo` occ_info) id zapIdOccInfo :: Id -> Id zapIdOccInfo b = b `setIdOccInfo` noOccInfo {- --------------------------------- -- INLINING The inline pragma tells us to be very keen to inline this Id, but it's still OK not to if optimisation is switched off. -} idInlinePragma :: Id -> InlinePragma idInlinePragma id = inlinePragInfo (idInfo id) setInlinePragma :: Id -> InlinePragma -> Id setInlinePragma id prag = modifyIdInfo (`setInlinePragInfo` prag) id modifyInlinePragma :: Id -> (InlinePragma -> InlinePragma) -> Id modifyInlinePragma id fn = modifyIdInfo (\info -> info `setInlinePragInfo` (fn (inlinePragInfo info))) id idInlineActivation :: Id -> Activation idInlineActivation id = inlinePragmaActivation (idInlinePragma id) setInlineActivation :: Id -> Activation -> Id setInlineActivation id act = modifyInlinePragma id (\prag -> setInlinePragmaActivation prag act) idRuleMatchInfo :: Id -> RuleMatchInfo idRuleMatchInfo id = inlinePragmaRuleMatchInfo (idInlinePragma id) isConLikeId :: Id -> Bool isConLikeId id = isConLike (idRuleMatchInfo id) {- --------------------------------- -- ONE-SHOT LAMBDAS -} idOneShotInfo :: Id -> OneShotInfo idOneShotInfo id = oneShotInfo (idInfo id) setOneShotLambda :: Id -> Id setOneShotLambda id = modifyIdInfo (`setOneShotInfo` OneShotLam) id clearOneShotLambda :: Id -> Id clearOneShotLambda id = modifyIdInfo (`setOneShotInfo` NoOneShotInfo) id setIdOneShotInfo :: Id -> OneShotInfo -> Id setIdOneShotInfo id one_shot = modifyIdInfo (`setOneShotInfo` one_shot) id updOneShotInfo :: Id -> OneShotInfo -> Id -- Combine the info in the Id with new info updOneShotInfo id one_shot | OneShotLam <- one_shot , NoOneShotInfo <- idOneShotInfo id = setIdOneShotInfo id OneShotLam | otherwise = id -- The OneShotLambda functions simply fiddle with the IdInfo flag -- But watch out: this may change the type of something else -- f = \x -> e -- If we change the one-shot-ness of x, f's type changes -- Replaces the id info if the zapper returns @Just idinfo@ zapInfo :: (IdInfo -> Maybe IdInfo) -> Id -> Id zapInfo zapper id = maybeModifyIdInfo (zapper (idInfo id)) id zapLamIdInfo :: Id -> Id zapLamIdInfo = zapInfo zapLamInfo zapFragileIdInfo :: Id -> Id zapFragileIdInfo = zapInfo zapFragileInfo floatifyIdDemandInfo :: Id -> Id -- See Note [Floatifying demand info when floating] in GHC.Core.Opt.SetLevels floatifyIdDemandInfo = zapInfo floatifyDemandInfo zapIdUsageInfo :: Id -> Id zapIdUsageInfo = zapInfo zapUsageInfo zapIdUsageEnvInfo :: Id -> Id zapIdUsageEnvInfo = zapInfo zapUsageEnvInfo zapIdUsedOnceInfo :: Id -> Id zapIdUsedOnceInfo = zapInfo zapUsedOnceInfo zapIdTailCallInfo :: Id -> Id zapIdTailCallInfo = zapInfo zapTailCallInfo zapStableUnfolding :: Id -> Id zapStableUnfolding id | isStableUnfolding (realIdUnfolding id) = setIdUnfolding id NoUnfolding | otherwise = id {- Note [transferPolyIdInfo] ~~~~~~~~~~~~~~~~~~~~~~~~~ This transfer is used in three places: FloatOut (long-distance let-floating) GHC.Core.Opt.Simplify.Utils.abstractFloats (short-distance let-floating) StgLiftLams (selectively lambda-lift local functions to top-level) Consider the short-distance let-floating: f = /\a. let g = rhs in ... Then if we float thus g' = /\a. rhs f = /\a. ...[g' a/g].... we *do not* want to lose g's * strictness information * arity * inline pragma (though that is bit more debatable) * occurrence info Mostly this is just an optimisation, but it's *vital* to transfer the occurrence info. Consider NonRec { f = /\a. let Rec { g* = ..g.. } in ... } where the '*' means 'LoopBreaker'. Then if we float we must get Rec { g'* = /\a. ...(g' a)... } NonRec { f = /\a. ...[g' a/g]....} where g' is also marked as LoopBreaker. If not, terrible things can happen if we re-simplify the binding (and the Simplifier does sometimes simplify a term twice); see #4345. It's not so simple to retain * worker info * rules so we simply discard those. Sooner or later this may bite us. If we abstract wrt one or more *value* binders, we must modify the arity and strictness info before transferring it. E.g. f = \x. e --> g' = \y. \x. e + substitute (g' y) for g Notice that g' has an arity one more than the original g -} transferPolyIdInfo :: Id -- Original Id -> [Var] -- Abstract wrt these variables -> Id -- New Id -> Id transferPolyIdInfo old_id abstract_wrt new_id = modifyIdInfo transfer new_id `setIdCbvMarks` new_cbv_marks where arity_increase = count isId abstract_wrt -- Arity increases by the -- number of value binders old_info = idInfo old_id old_arity = arityInfo old_info old_inline_prag = inlinePragInfo old_info old_occ_info = occInfo old_info new_arity = old_arity + arity_increase new_occ_info = zapOccTailCallInfo old_occ_info old_strictness = dmdSigInfo old_info new_strictness = prependArgsDmdSig arity_increase old_strictness old_cpr = cprSigInfo old_info new_cpr = prependArgsCprSig arity_increase old_cpr old_cbv_marks = fromMaybe (replicate old_arity NotMarkedCbv) (idCbvMarks_maybe old_id) abstr_cbv_marks = mapMaybe getMark abstract_wrt new_cbv_marks = abstr_cbv_marks ++ old_cbv_marks getMark v | not (isId v) = Nothing | isId v , isEvaldUnfolding (idUnfolding v) , mightBeLiftedType (idType v) = Just MarkedCbv | otherwise = Just NotMarkedCbv transfer new_info = new_info `setArityInfo` new_arity `setInlinePragInfo` old_inline_prag `setOccInfo` new_occ_info `setDmdSigInfo` new_strictness `setCprSigInfo` new_cpr ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/Id.hs-boot0000644000000000000000000000017407346545000020645 0ustar0000000000000000module GHC.Types.Id where import {-# SOURCE #-} GHC.Types.Name import {-# SOURCE #-} GHC.Types.Var idName :: Id -> Name ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/Id/0000755000000000000000000000000007346545000017346 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/Id/Info.hs0000644000000000000000000010740507346545000020604 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1993-1998 \section[IdInfo]{@IdInfos@: Non-essential information about @Ids@} (And a pretty good illustration of quite a few things wrong with Haskell. [WDP 94/11]) -} {-# LANGUAGE BinaryLiterals #-} {-# LANGUAGE DataKinds #-} {-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE TypeFamilies #-} {-# OPTIONS_GHC -Wno-incomplete-record-updates #-} module GHC.Types.Id.Info ( -- * The IdDetails type IdDetails(..), pprIdDetails, coVarDetails, isCoVarDetails, JoinArity, isJoinIdDetails_maybe, RecSelParent(..), recSelParentName, recSelFirstConName, recSelParentCons, idDetailsConcreteTvs, -- * The IdInfo type IdInfo, -- Abstract vanillaIdInfo, noCafIdInfo, -- ** The OneShotInfo type OneShotInfo(..), oneShotInfo, noOneShotInfo, hasNoOneShotInfo, setOneShotInfo, -- ** Zapping various forms of Info zapLamInfo, zapFragileInfo, lazifyDemandInfo, floatifyDemandInfo, zapUsageInfo, zapUsageEnvInfo, zapUsedOnceInfo, zapTailCallInfo, zapCallArityInfo, trimUnfolding, -- ** The ArityInfo type ArityInfo, unknownArity, arityInfo, setArityInfo, ppArityInfo, callArityInfo, setCallArityInfo, -- ** Demand and strictness Info dmdSigInfo, setDmdSigInfo, cprSigInfo, setCprSigInfo, demandInfo, setDemandInfo, pprStrictness, -- ** Unfolding Info realUnfoldingInfo, unfoldingInfo, setUnfoldingInfo, hasInlineUnfolding, -- ** The InlinePragInfo type InlinePragInfo, inlinePragInfo, setInlinePragInfo, -- ** The OccInfo type OccInfo(..), isDeadOcc, isStrongLoopBreaker, isWeakLoopBreaker, occInfo, setOccInfo, InsideLam(..), BranchCount, TailCallInfo(..), tailCallInfo, isAlwaysTailCalled, -- ** The RuleInfo type RuleInfo(..), emptyRuleInfo, isEmptyRuleInfo, ruleInfoFreeVars, ruleInfoRules, setRuleInfoHead, ruleInfo, setRuleInfo, tagSigInfo, -- ** The CAFInfo type CafInfo(..), ppCafInfo, mayHaveCafRefs, cafInfo, setCafInfo, -- ** The LambdaFormInfo type LambdaFormInfo, lfInfo, setLFInfo, setTagSig, tagSig, -- ** Tick-box Info TickBoxOp(..), TickBoxId, ) where import GHC.Prelude import GHC.Core import GHC.Core.Class import {-# SOURCE #-} GHC.Builtin.PrimOps (PrimOp) import GHC.Types.Name import GHC.Types.Var.Set import GHC.Types.Basic import GHC.Core.DataCon import GHC.Core.TyCon import GHC.Core.Type (mkTyConApp) import GHC.Core.PatSyn import GHC.Core.ConLike import GHC.Types.ForeignCall import GHC.Unit.Module import GHC.Types.Demand import GHC.Types.Cpr import {-# SOURCE #-} GHC.Tc.Utils.TcType ( ConcreteTyVars, noConcreteTyVars ) import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Stg.InferTags.TagSig import GHC.StgToCmm.Types (LambdaFormInfo) import Data.Data ( Data ) import Data.Word -- infixl so you can say (id `set` a `set` b) infixl 1 `setRuleInfo`, `setArityInfo`, `setInlinePragInfo`, `setUnfoldingInfo`, `setOneShotInfo`, `setOccInfo`, `setCafInfo`, `setDmdSigInfo`, `setCprSigInfo`, `setDemandInfo`, `setLFInfo` {- ************************************************************************ * * IdDetails * * ************************************************************************ -} -- | Identifier Details -- -- The 'IdDetails' of an 'Id' give stable, and necessary, -- information about the Id. data IdDetails = VanillaId -- | The 'Id' for a record selector | RecSelId { sel_tycon :: RecSelParent , sel_fieldLabel :: FieldLabel , sel_naughty :: Bool -- True <=> a "naughty" selector which can't actually exist, for example @x@ in: -- data T = forall a. MkT { x :: a } -- See Note [Naughty record selectors] in GHC.Tc.TyCl , sel_cons :: ([ConLike], [ConLike]) -- If record selector is not defined for all constructors -- of a parent type, this is the pair of lists of constructors that -- it is and is not defined for. Otherwise, it's Nothing. -- Cached here based on the RecSelParent. } -- See Note [Detecting incomplete record selectors] in GHC.HsToCore.Pmc | DataConWorkId DataCon -- ^ The 'Id' is for a data constructor /worker/ | DataConWrapId DataCon -- ^ The 'Id' is for a data constructor /wrapper/ -- [the only reasons we need to know is so that -- a) to support isImplicitId -- b) when desugaring a RecordCon we can get -- from the Id back to the data con] | ClassOpId -- ^ The 'Id' is a superclass selector or class operation Class -- for this class Bool -- True <=> given a non-bottom dictionary, the class op will -- definitely return a non-bottom result -- and Note [exprOkForSpeculation and type classes] -- in GHC.Core.Utils -- | A representation-polymorphic pseudo-op. | RepPolyId { id_concrete_tvs :: ConcreteTyVars } -- ^ Which type variables of this representation-polymorphic 'Id -- should be instantiated to concrete type variables? -- -- See Note [Representation-polymorphism checking built-ins] -- in GHC.Tc.Utils.Concrete. -- | The 'Id' is for a primitive operator. | PrimOpId { id_primop :: PrimOp , id_concrete_tvs :: ConcreteTyVars } -- ^ Which type variables of this primop should be instantiated -- to concrete type variables? -- -- Only ever non-empty when the PrimOp has representation-polymorphic -- type variables. -- -- See Note [Representation-polymorphism checking built-ins] -- in GHC.Tc.Utils.Concrete. | FCallId ForeignCall -- ^ The 'Id' is for a foreign call. -- Type will be simple: no type families, newtypes, etc | TickBoxOpId TickBoxOp -- ^ The 'Id' is for a HPC tick box (both traditional and binary) | DFunId Bool -- ^ A dictionary function. -- Bool = True <=> the class has only one method, so may be -- implemented with a newtype, so it might be bad -- to be strict on this dictionary | CoVarId -- ^ A coercion variable -- This only covers /un-lifted/ coercions, of type -- (t1 ~# t2) or (t1 ~R# t2), not their lifted variants | JoinId JoinArity (Maybe [CbvMark]) -- ^ An 'Id' for a join point taking n arguments -- Note [Join points] in "GHC.Core" -- Can also work as a WorkerLikeId if given `CbvMark`s. -- See Note [CBV Function Ids] -- The [CbvMark] is always empty (and ignored) until after Tidy. | WorkerLikeId [CbvMark] -- ^ An 'Id' for a worker like function, which might expect some arguments to be -- passed both evaluated and tagged. -- Worker like functions are create by W/W and SpecConstr and we can expect that they -- aren't used unapplied. -- See Note [CBV Function Ids] -- See Note [Tag Inference] -- The [CbvMark] is always empty (and ignored) until after Tidy for ids from the current -- module. idDetailsConcreteTvs :: IdDetails -> ConcreteTyVars idDetailsConcreteTvs = \ case PrimOpId _ conc_tvs -> conc_tvs RepPolyId conc_tvs -> conc_tvs DataConWorkId dc -> dataConConcreteTyVars dc DataConWrapId dc -> dataConConcreteTyVars dc _ -> noConcreteTyVars {- Note [CBV Function Ids] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ A WorkerLikeId essentially allows us to constrain the calling convention for the given Id. Each such Id carries with it a list of CbvMarks with each element representing a value argument. Arguments who have a matching `MarkedCbv` entry in the list need to be passed evaluated+*properly tagged*. CallByValueFunIds give us additional expressiveness which we use to improve runtime. This is all part of the TagInference work. See also Note [Tag Inference]. They allows us to express the fact that an argument is not only evaluated to WHNF once we entered it's RHS but also that an lifted argument is already *properly tagged* once we jump into the RHS. This means when e.g. branching on such an argument the RHS doesn't needed to perform an eval check to ensure the argument isn't an indirection. All seqs on such an argument in the functions body become no-ops as well. The invariants around the arguments of call by value function like Ids are then: * In any call `(f e1 .. en)`, if `f`'s i'th argument is marked `MarkedCbv`, then the caller must ensure that the i'th argument * points directly to the value (and hence is certainly evaluated before the call) * is a properly tagged pointer to that value * The following functions (and only these functions) have `CbvMarks`: * Any `WorkerLikeId` * Some `JoinId` bindings. This works analogous to the Strict Field Invariant. See also Note [Strict Field Invariant]. To make this work what we do is: * During W/W and SpecConstr any worker/specialized binding we introduce is marked as a worker binding by `asWorkerLikeId`. * W/W and SpecConstr further set OtherCon[] unfoldings on arguments which represent contents of a strict fields. * During Tidy we look at all bindings. For any callByValueLike Id and join point we mark arguments as cbv if they Are strict. We don't do so for regular bindings. See Note [Use CBV semantics only for join points and workers] for why. We might have made some ids rhs *more* strict in order to make their arguments be passed CBV. See Note [Call-by-value for worker args] for why. * During CorePrep calls to CallByValueFunIds are eta expanded. * During Stg CodeGen: * When we see a call to a callByValueLike Id: * We check if all arguments marked to be passed unlifted are already tagged. * If they aren't we will wrap the call in case expressions which will evaluate+tag these arguments before jumping to the function. * During Cmm codeGen: * When generating code for the RHS of a StrictWorker binding we omit tag checks when using arguments marked as tagged. We only use this for workers and specialized versions of SpecConstr But we also check other functions during tidy and potentially turn some of them into call by value functions and mark some of their arguments as call-by-value by looking at argument unfoldings. NB: I choose to put the information into a new Id constructor since these are loaded at all optimization levels. This makes it trivial to ensure the additional calling convention demands are available at all call sites. Putting it into IdInfo would require us at the very least to always decode the IdInfo just to decide if we need to throw it away or not after. Note [Use CBV semantics only for join points and workers] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A function with cbv-semantics requires arguments to be visible and if no arguments are visible requires us to eta-expand it's call site. That is for a binding with three cbv arguments like `w[WorkerLikeId[!,!,!]]` we would need to eta expand undersaturated occurrences like `map w xs` into `map (\x1 x2 x3 -> w x1 x2 x3) xs. In experiments it turned out that the code size increase of doing so can outweigh the performance benefits of doing so. So we only do this for join points, workers and specialized functions (from SpecConstr). Join points are naturally always called saturated so this problem can't occur for them. For workers and specialized functions there are also always at least some applied arguments as we won't inline the wrapper/apply their rule if there are unapplied occurrences like `map f xs`. -} -- | Parent of a record selector function. -- -- Either the parent 'TyCon' or 'PatSyn' depending -- on the origin of the record selector. -- -- For a data family, this is the /instance/ 'TyCon', -- **not** the family 'TyCon'. data RecSelParent -- | Parent of a data constructor record field. -- -- For a data family, this is the /instance/ 'TyCon'. = RecSelData TyCon -- | Parent of a pattern synonym record field: -- the 'PatSyn' itself. | RecSelPatSyn PatSyn deriving (Eq, Data) recSelParentName :: RecSelParent -> Name recSelParentName (RecSelData tc) = tyConName tc recSelParentName (RecSelPatSyn ps) = patSynName ps recSelFirstConName :: RecSelParent -> Name recSelFirstConName (RecSelData tc) = dataConName $ head $ tyConDataCons tc recSelFirstConName (RecSelPatSyn ps) = patSynName ps recSelParentCons :: RecSelParent -> [ConLike] recSelParentCons (RecSelData tc) | isAlgTyCon tc = map RealDataCon $ visibleDataCons $ algTyConRhs tc | otherwise = [] recSelParentCons (RecSelPatSyn ps) = [PatSynCon ps] instance Outputable RecSelParent where ppr p = case p of RecSelData tc | Just (parent_tc, tys) <- tyConFamInst_maybe tc -> ppr (mkTyConApp parent_tc tys) | otherwise -> ppr tc RecSelPatSyn ps -> ppr ps -- | Just a synonym for 'CoVarId'. Written separately so it can be -- exported in the hs-boot file. coVarDetails :: IdDetails coVarDetails = CoVarId -- | Check if an 'IdDetails' says 'CoVarId'. isCoVarDetails :: IdDetails -> Bool isCoVarDetails CoVarId = True isCoVarDetails _ = False isJoinIdDetails_maybe :: IdDetails -> Maybe (JoinArity, (Maybe [CbvMark])) isJoinIdDetails_maybe (JoinId join_arity marks) = Just (join_arity, marks) isJoinIdDetails_maybe _ = Nothing instance Outputable IdDetails where ppr = pprIdDetails pprIdDetails :: IdDetails -> SDoc pprIdDetails VanillaId = empty pprIdDetails other = brackets (pp other) where pp VanillaId = panic "pprIdDetails" pp (WorkerLikeId dmds) = text "StrictWorker" <> parens (ppr dmds) pp (DataConWorkId _) = text "DataCon" pp (DataConWrapId _) = text "DataConWrapper" pp (ClassOpId {}) = text "ClassOp" pp (RepPolyId {}) = text "RepPolyId" pp (PrimOpId {}) = text "PrimOp" pp (FCallId _) = text "ForeignCall" pp (TickBoxOpId _) = text "TickBoxOp" pp (DFunId nt) = text "DFunId" <> ppWhen nt (text "(nt)") pp (RecSelId { sel_naughty = is_naughty }) = brackets $ text "RecSel" <> ppWhen is_naughty (text "(naughty)") pp CoVarId = text "CoVarId" pp (JoinId arity marks) = text "JoinId" <> parens (int arity) <> parens (ppr marks) {- ************************************************************************ * * \subsection{The main IdInfo type} * * ************************************************************************ -} -- | Identifier Information -- -- An 'IdInfo' gives /optional/ information about an 'Id'. If -- present it never lies, but it may not be present, in which case there -- is always a conservative assumption which can be made. -- -- Two 'Id's may have different info even though they have the same -- 'Unique' (and are hence the same 'Id'); for example, one might lack -- the properties attached to the other. -- -- Most of the 'IdInfo' gives information about the value, or definition, of -- the 'Id', independent of its usage. Exceptions to this -- are 'demandInfo', 'occInfo', 'oneShotInfo' and 'callArityInfo'. -- -- Performance note: when we update 'IdInfo', we have to reallocate this -- entire record, so it is a good idea not to let this data structure get -- too big. data IdInfo = IdInfo { ruleInfo :: RuleInfo, -- ^ Specialisations of the 'Id's function which exist. -- See Note [Specialisations and RULES in IdInfo] realUnfoldingInfo :: Unfolding, -- ^ The 'Id's unfolding inlinePragInfo :: InlinePragma, -- ^ Any inline pragma attached to the 'Id' occInfo :: OccInfo, -- ^ How the 'Id' occurs in the program dmdSigInfo :: DmdSig, -- ^ A strictness signature. Describes how a function uses its arguments -- See Note [idArity varies independently of dmdTypeDepth] -- in GHC.Core.Opt.DmdAnal cprSigInfo :: CprSig, -- ^ Information on whether the function will ultimately return a -- freshly allocated constructor. demandInfo :: Demand, -- ^ ID demand information bitfield :: {-# UNPACK #-} !BitField, -- ^ Bitfield packs CafInfo, OneShotInfo, arity info, and -- call arity info in one 64-bit word. Packing these fields reduces size -- of `IdInfo` from 12 words to 7 words and reduces residency by almost -- 4% in some programs. See #17497 and associated MR. -- -- See documentation of the getters for what these packed fields mean. lfInfo :: !(Maybe LambdaFormInfo), -- ^ If lfInfo = Just info, then the `info` is guaranteed /correct/. -- If lfInfo = Nothing, then we do not have a `LambdaFormInfo` for this Id, -- so (for imported Ids) we make a conservative version. -- See Note [The LFInfo of Imported Ids] in GHC.StgToCmm.Closure -- For locally-defined Ids other than DataCons, the `lfInfo` field is always Nothing. -- See also Note [LFInfo of DataCon workers and wrappers] -- See documentation of the getters for what these packed fields mean. tagSig :: !(Maybe TagSig) } -- | Encodes arities, OneShotInfo, CafInfo. -- From least-significant to most-significant bits: -- -- - Bit 0 (1): OneShotInfo -- - Bit 1 (1): CafInfo -- - Bit 2 (1): unused -- - Bits 3-32(30): Call Arity info -- - Bits 33-62(30): Arity info -- newtype BitField = BitField Word64 emptyBitField :: BitField emptyBitField = BitField 0 bitfieldGetOneShotInfo :: BitField -> OneShotInfo bitfieldGetOneShotInfo (BitField bits) = if testBit bits 0 then OneShotLam else NoOneShotInfo bitfieldGetCafInfo :: BitField -> CafInfo bitfieldGetCafInfo (BitField bits) = if testBit bits 1 then NoCafRefs else MayHaveCafRefs bitfieldGetCallArityInfo :: BitField -> ArityInfo bitfieldGetCallArityInfo (BitField bits) = fromIntegral (bits `shiftR` 3) .&. ((1 `shiftL` 30) - 1) bitfieldGetArityInfo :: BitField -> ArityInfo bitfieldGetArityInfo (BitField bits) = fromIntegral (bits `shiftR` 33) bitfieldSetOneShotInfo :: OneShotInfo -> BitField -> BitField bitfieldSetOneShotInfo info (BitField bits) = case info of NoOneShotInfo -> BitField (clearBit bits 0) OneShotLam -> BitField (setBit bits 0) bitfieldSetCafInfo :: CafInfo -> BitField -> BitField bitfieldSetCafInfo info (BitField bits) = case info of MayHaveCafRefs -> BitField (clearBit bits 1) NoCafRefs -> BitField (setBit bits 1) bitfieldSetCallArityInfo :: ArityInfo -> BitField -> BitField bitfieldSetCallArityInfo info bf@(BitField bits) = assert (info < 2^(30 :: Int) - 1) $ bitfieldSetArityInfo (bitfieldGetArityInfo bf) $ BitField ((fromIntegral info `shiftL` 3) .|. (bits .&. 0b111)) bitfieldSetArityInfo :: ArityInfo -> BitField -> BitField bitfieldSetArityInfo info (BitField bits) = assert (info < 2^(30 :: Int) - 1) $ BitField ((fromIntegral info `shiftL` 33) .|. (bits .&. ((1 `shiftL` 33) - 1))) -- Getters -- | Info about a lambda-bound variable, if the 'Id' is one oneShotInfo :: IdInfo -> OneShotInfo oneShotInfo = bitfieldGetOneShotInfo . bitfield -- | 'Id' arity, as computed by "GHC.Core.Opt.Arity". Specifies how many arguments -- this 'Id' has to be applied to before it does any meaningful work. arityInfo :: IdInfo -> ArityInfo arityInfo = bitfieldGetArityInfo . bitfield -- | 'Id' CAF info cafInfo :: IdInfo -> CafInfo cafInfo = bitfieldGetCafInfo . bitfield -- | How this is called. This is the number of arguments to which a binding can -- be eta-expanded without losing any sharing. n <=> all calls have at least n -- arguments callArityInfo :: IdInfo -> ArityInfo callArityInfo = bitfieldGetCallArityInfo . bitfield tagSigInfo :: IdInfo -> Maybe TagSig tagSigInfo = tagSig -- Setters setRuleInfo :: IdInfo -> RuleInfo -> IdInfo setRuleInfo info sp = sp `seq` info { ruleInfo = sp } setInlinePragInfo :: IdInfo -> InlinePragma -> IdInfo setInlinePragInfo info pr = pr `seq` info { inlinePragInfo = pr } setOccInfo :: IdInfo -> OccInfo -> IdInfo setOccInfo info oc = oc `seq` info { occInfo = oc } -- Try to avoid space leaks by seq'ing -- | Essentially returns the 'realUnfoldingInfo' field, but does not expose the -- unfolding of a strong loop breaker. -- -- This is the right thing to call if you plan to decide whether an unfolding -- will inline. unfoldingInfo :: IdInfo -> Unfolding unfoldingInfo info | isStrongLoopBreaker (occInfo info) = trimUnfolding $ realUnfoldingInfo info | otherwise = realUnfoldingInfo info setUnfoldingInfo :: IdInfo -> Unfolding -> IdInfo setUnfoldingInfo info uf = -- We don't seq the unfolding, as we generate intermediate -- unfoldings which are just thrown away, so evaluating them is a -- waste of time. -- seqUnfolding uf `seq` info { realUnfoldingInfo = uf } hasInlineUnfolding :: IdInfo -> Bool -- ^ True of a /non-loop-breaker/ Id that has a /stable/ unfolding that is -- (a) always inlined; that is, with an `UnfWhen` guidance, or -- (b) a DFunUnfolding which never needs to be inlined hasInlineUnfolding info = isInlineUnfolding (unfoldingInfo info) setArityInfo :: IdInfo -> ArityInfo -> IdInfo setArityInfo info ar = info { bitfield = bitfieldSetArityInfo ar (bitfield info) } setCallArityInfo :: IdInfo -> ArityInfo -> IdInfo setCallArityInfo info ar = info { bitfield = bitfieldSetCallArityInfo ar (bitfield info) } setCafInfo :: IdInfo -> CafInfo -> IdInfo setCafInfo info caf = info { bitfield = bitfieldSetCafInfo caf (bitfield info) } setLFInfo :: IdInfo -> LambdaFormInfo -> IdInfo setLFInfo info lf = info { lfInfo = Just lf } setTagSig :: IdInfo -> TagSig -> IdInfo setTagSig info sig = info { tagSig = Just sig } setOneShotInfo :: IdInfo -> OneShotInfo -> IdInfo setOneShotInfo info lb = info { bitfield = bitfieldSetOneShotInfo lb (bitfield info) } setDemandInfo :: IdInfo -> Demand -> IdInfo setDemandInfo info dd = dd `seq` info { demandInfo = dd } setDmdSigInfo :: IdInfo -> DmdSig -> IdInfo setDmdSigInfo info dd = dd `seq` info { dmdSigInfo = dd } setCprSigInfo :: IdInfo -> CprSig -> IdInfo setCprSigInfo info cpr = cpr `seq` info { cprSigInfo = cpr } -- | Basic 'IdInfo' that carries no useful information whatsoever vanillaIdInfo :: IdInfo vanillaIdInfo = IdInfo { ruleInfo = emptyRuleInfo, realUnfoldingInfo = noUnfolding, inlinePragInfo = defaultInlinePragma, occInfo = noOccInfo, demandInfo = topDmd, dmdSigInfo = nopSig, cprSigInfo = topCprSig, bitfield = bitfieldSetCafInfo vanillaCafInfo $ bitfieldSetArityInfo unknownArity $ bitfieldSetCallArityInfo unknownArity $ bitfieldSetOneShotInfo NoOneShotInfo $ emptyBitField, lfInfo = Nothing, tagSig = Nothing } -- | More informative 'IdInfo' we can use when we know the 'Id' has no CAF references noCafIdInfo :: IdInfo noCafIdInfo = vanillaIdInfo `setCafInfo` NoCafRefs -- Used for built-in type Ids in GHC.Types.Id.Make. {- ************************************************************************ * * \subsection[arity-IdInfo]{Arity info about an @Id@} * * ************************************************************************ For locally-defined Ids, the code generator maintains its own notion of their arities; so it should not be asking... (but other things besides the code-generator need arity info!) Note [Arity and function types] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The arity of an 'Id' must never exceed the number of arguments that can be read off from the 'Id's type, possibly after expanding newtypes. Examples: f1 :: forall a. a -> a idArity f1 <= 1: only one value argument, of type 'a' f2 :: forall a. Show a => Int -> a idArity f2 <= 2: two value arguments, of types 'Show a' and 'Int'. newtype Id a = MkId a f3 :: forall b. Id (Int -> b) idArity f3 <= 1: there is one value argument, of type 'Int', hidden under the newtype. newtype RecFun = MkRecFun (Int -> RecFun) f4 :: RecFun no constraint on the arity of f4: we can unwrap as many layers of the newtype as we want, to get arbitrarily many arguments of type 'Int'. -} -- | Arity Information -- -- An 'ArityInfo' of @n@ tells us that partial application of this -- 'Id' to up to @n-1@ value arguments does essentially no work. -- -- That is not necessarily the same as saying that it has @n@ leading -- lambdas, because coerces may get in the way. -- -- The arity might increase later in the compilation process, if -- an extra lambda floats up to the binding site. -- -- /Invariant:/ the 'Arity' of an 'Id' must never exceed the number of -- value arguments that appear in the type of the 'Id'. -- See Note [Arity and function types]. type ArityInfo = Arity -- | It is always safe to assume that an 'Id' has an arity of 0 unknownArity :: Arity unknownArity = 0 ppArityInfo :: Int -> SDoc ppArityInfo 0 = empty ppArityInfo n = hsep [text "Arity", int n] {- ************************************************************************ * * \subsection{Inline-pragma information} * * ************************************************************************ -} -- | Inline Pragma Information -- -- Tells when the inlining is active. -- When it is active the thing may be inlined, depending on how -- big it is. -- -- If there was an @INLINE@ pragma, then as a separate matter, the -- RHS will have been made to look small with a Core inline 'Note' -- -- The default 'InlinePragInfo' is 'AlwaysActive', so the info serves -- entirely as a way to inhibit inlining until we want it type InlinePragInfo = InlinePragma {- ************************************************************************ * * Strictness * * ************************************************************************ -} pprStrictness :: DmdSig -> SDoc pprStrictness sig = ppr sig {- ************************************************************************ * * RuleInfo * * ************************************************************************ Note [Specialisations and RULES in IdInfo] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Generally speaking, a GlobalId has an *empty* RuleInfo. All their RULES are contained in the globally-built rule-base. In principle, one could attach the to M.f the RULES for M.f that are defined in M. But we don't do that for instance declarations and so we just treat them all uniformly. The EXCEPTION is PrimOpIds, which do have rules in their IdInfo. That is just for convenience really. However, LocalIds may have non-empty RuleInfo. We treat them differently because: a) they might be nested, in which case a global table won't work b) the RULE might mention free variables, which we use to keep things alive In GHC.Iface.Tidy, when the LocalId becomes a GlobalId, its RULES are stripped off and put in the global list. -} -- | Rule Information -- -- Records the specializations of this 'Id' that we know about -- in the form of rewrite 'CoreRule's that target them data RuleInfo = RuleInfo [CoreRule] DVarSet -- Locally-defined free vars of *both* LHS and RHS -- of rules. I don't think it needs to include the -- ru_fn though. -- Note [Rule dependency info] in "GHC.Core.Opt.OccurAnal" -- | Assume that no specializations exist: always safe emptyRuleInfo :: RuleInfo emptyRuleInfo = RuleInfo [] emptyDVarSet isEmptyRuleInfo :: RuleInfo -> Bool isEmptyRuleInfo (RuleInfo rs _) = null rs -- | Retrieve the locally-defined free variables of both the left and -- right hand sides of the specialization rules ruleInfoFreeVars :: RuleInfo -> DVarSet ruleInfoFreeVars (RuleInfo _ fvs) = fvs ruleInfoRules :: RuleInfo -> [CoreRule] ruleInfoRules (RuleInfo rules _) = rules -- | Change the name of the function the rule is keyed on all of the 'CoreRule's setRuleInfoHead :: Name -> RuleInfo -> RuleInfo setRuleInfoHead fn (RuleInfo rules fvs) = RuleInfo (map (setRuleIdName fn) rules) fvs {- ************************************************************************ * * \subsection[CG-IdInfo]{Code generator-related information} * * ************************************************************************ -} -- CafInfo is used to build Static Reference Tables (see simplStg/SRT.hs). -- | Constant applicative form Information -- -- Records whether an 'Id' makes Constant Applicative Form references data CafInfo = MayHaveCafRefs -- ^ Indicates that the 'Id' is for either: -- -- 1. A function or static constructor -- that refers to one or more CAFs, or -- -- 2. A real live CAF | NoCafRefs -- ^ A function or static constructor -- that refers to no CAFs. deriving (Eq, Ord) -- | Assumes that the 'Id' has CAF references: definitely safe vanillaCafInfo :: CafInfo vanillaCafInfo = MayHaveCafRefs mayHaveCafRefs :: CafInfo -> Bool mayHaveCafRefs MayHaveCafRefs = True mayHaveCafRefs _ = False instance Outputable CafInfo where ppr = ppCafInfo ppCafInfo :: CafInfo -> SDoc ppCafInfo NoCafRefs = text "NoCafRefs" ppCafInfo MayHaveCafRefs = empty {- ************************************************************************ * * \subsection{Bulk operations on IdInfo} * * ************************************************************************ -} -- | This is used to remove information on lambda binders that we have -- setup as part of a lambda group, assuming they will be applied all at once, -- but turn out to be part of an unsaturated lambda as in e.g: -- -- > (\x1. \x2. e) arg1 zapLamInfo :: IdInfo -> Maybe IdInfo zapLamInfo info@(IdInfo {occInfo = occ, demandInfo = demand}) | is_safe_occ occ && is_safe_dmd demand = Nothing | otherwise = Just (info {occInfo = safe_occ, demandInfo = topDmd}) where -- The "unsafe" occ info is the ones that say I'm not in a lambda -- because that might not be true for an unsaturated lambda is_safe_occ occ | isAlwaysTailCalled occ = False is_safe_occ (OneOcc { occ_in_lam = NotInsideLam }) = False is_safe_occ _other = True safe_occ = case occ of OneOcc{} -> occ { occ_in_lam = IsInsideLam , occ_tail = NoTailCallInfo } IAmALoopBreaker{} -> occ { occ_tail = NoTailCallInfo } _other -> occ is_safe_dmd dmd = not (isStrUsedDmd dmd) -- | Lazify (remove the top-level demand, only) the demand in `IdInfo` -- Keep nested demands; see Note [Floatifying demand info when floating] -- in GHC.Core.Opt.SetLevels lazifyDemandInfo :: IdInfo -> Maybe IdInfo lazifyDemandInfo info@(IdInfo { demandInfo = dmd }) = Just (info {demandInfo = lazifyDmd dmd }) -- | Floatify the demand in `IdInfo` -- But keep /nested/ demands; see Note [Floatifying demand info when floating] -- in GHC.Core.Opt.SetLevels floatifyDemandInfo :: IdInfo -> Maybe IdInfo floatifyDemandInfo info@(IdInfo { demandInfo = dmd }) = Just (info {demandInfo = floatifyDmd dmd }) -- | Remove usage (but not strictness) info on the `IdInfo` zapUsageInfo :: IdInfo -> Maybe IdInfo zapUsageInfo info = Just (info {demandInfo = zapUsageDemand (demandInfo info)}) -- | Remove usage environment info from the strictness signature on the 'IdInfo' zapUsageEnvInfo :: IdInfo -> Maybe IdInfo zapUsageEnvInfo info | hasDemandEnvSig (dmdSigInfo info) = Just (info {dmdSigInfo = zapDmdEnvSig (dmdSigInfo info)}) | otherwise = Nothing zapUsedOnceInfo :: IdInfo -> Maybe IdInfo zapUsedOnceInfo info = Just $ info { dmdSigInfo = zapUsedOnceSig (dmdSigInfo info) , demandInfo = zapUsedOnceDemand (demandInfo info) } zapFragileInfo :: IdInfo -> Maybe IdInfo -- ^ Zap info that depends on free variables zapFragileInfo info@(IdInfo { occInfo = occ, realUnfoldingInfo = unf }) = new_unf `seq` -- The unfolding field is not (currently) strict, so we -- force it here to avoid a (zapFragileUnfolding unf) thunk -- which might leak space Just (info `setRuleInfo` emptyRuleInfo `setUnfoldingInfo` new_unf `setOccInfo` zapFragileOcc occ) where new_unf = zapFragileUnfolding unf zapFragileUnfolding :: Unfolding -> Unfolding -- ^ Zaps any core unfolding, but /preserves/ evaluated-ness, -- i.e. an unfolding of OtherCon zapFragileUnfolding unf -- N.B. isEvaldUnfolding catches *both* OtherCon [] *and* core unfoldings -- representing values. | isEvaldUnfolding unf = evaldUnfolding | otherwise = noUnfolding trimUnfolding :: Unfolding -> Unfolding -- Squash all unfolding info, preserving only evaluated-ness trimUnfolding unf | isEvaldUnfolding unf = evaldUnfolding | otherwise = noUnfolding zapTailCallInfo :: IdInfo -> Maybe IdInfo zapTailCallInfo info = case occInfo info of occ | isAlwaysTailCalled occ -> Just (info `setOccInfo` safe_occ) | otherwise -> Nothing where safe_occ = occ { occ_tail = NoTailCallInfo } zapCallArityInfo :: IdInfo -> IdInfo zapCallArityInfo info = setCallArityInfo info 0 {- ************************************************************************ * * \subsection{TickBoxOp} * * ************************************************************************ -} type TickBoxId = Int -- | Tick box for Hpc-style coverage data TickBoxOp = TickBox Module {-# UNPACK #-} !TickBoxId instance Outputable TickBoxOp where ppr (TickBox mod n) = text "tick" <+> ppr (mod,n) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/Id/Info.hs-boot0000644000000000000000000000034307346545000021536 0ustar0000000000000000module GHC.Types.Id.Info where import GHC.Prelude import GHC.Utils.Outputable data IdInfo data IdDetails vanillaIdInfo :: IdInfo coVarDetails :: IdDetails isCoVarDetails :: IdDetails -> Bool pprIdDetails :: IdDetails -> SDoc ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/Id/Make.hs0000644000000000000000000030374107346545000020567 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The AQUA Project, Glasgow University, 1998 This module contains definitions for the IdInfo for things that have a standard form, namely: - data constructors - record selectors - method and superclass selectors - primitive operations -} {-# OPTIONS_GHC -Wno-incomplete-uni-patterns #-} {-# LANGUAGE DataKinds #-} module GHC.Types.Id.Make ( mkDictFunId, mkDictSelId, mkDictSelRhs, mkFCallId, unwrapNewTypeBody, wrapFamInstBody, DataConBoxer(..), vanillaDataConBoxer, mkDataConRep, mkDataConWorkId, DataConBangOpts (..), BangOpts (..), unboxedUnitExpr, -- And some particular Ids; see below for why they are wired in wiredInIds, ghcPrimIds, realWorldPrimId, voidPrimId, voidArgId, nullAddrId, seqId, lazyId, lazyIdKey, coercionTokenId, coerceId, proxyHashId, nospecId, nospecIdName, noinlineId, noinlineIdName, noinlineConstraintId, noinlineConstraintIdName, coerceName, leftSectionName, rightSectionName, pcRepPolyId, mkRepPolyIdConcreteTyVars, ) where import GHC.Prelude import GHC.Builtin.Types.Prim import GHC.Builtin.Types import GHC.Builtin.Names import GHC.Core import GHC.Core.Opt.Arity( typeOneShot ) import GHC.Core.Type import GHC.Core.Multiplicity import GHC.Core.TyCo.Rep import GHC.Core.FamInstEnv import GHC.Core.Coercion import GHC.Core.Reduction import GHC.Core.Make import GHC.Core.FVs ( mkRuleInfo ) import GHC.Core.Utils ( exprType, mkCast, mkDefaultCase, coreAltsType ) import GHC.Core.Unfold.Make import GHC.Core.SimpleOpt import GHC.Core.TyCon import GHC.Core.Class import GHC.Core.DataCon import GHC.Types.Literal import GHC.Types.SourceText import GHC.Types.RepType ( countFunRepArgs, typePrimRep ) import GHC.Types.Name.Set import GHC.Types.Name import GHC.Types.Name.Env import GHC.Types.ForeignCall import GHC.Types.Id import GHC.Types.Id.Info import GHC.Types.Demand import GHC.Types.Cpr import GHC.Types.Unique.Supply import GHC.Types.Basic hiding ( SuccessFlag(..) ) import GHC.Types.Var (VarBndr(Bndr), visArgConstraintLike, tyVarName) import GHC.Tc.Types.Origin import GHC.Tc.Utils.TcType as TcType import GHC.Utils.Misc import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Data.FastString import GHC.Data.List.SetOps import Data.List ( zipWith4 ) -- A bit of a shame we must import these here import GHC.StgToCmm.Types (LambdaFormInfo(..)) import GHC.Runtime.Heap.Layout (ArgDescr(ArgUnknown)) {- ************************************************************************ * * \subsection{Wired in Ids} * * ************************************************************************ Note [Wired-in Ids] ~~~~~~~~~~~~~~~~~~~ A "wired-in" Id can be referred to directly in GHC (e.g. 'voidPrimId') rather than by looking it up its name in some environment or fetching it from an interface file. There are several reasons why an Id might appear in the wiredInIds: * ghcPrimIds: see Note [ghcPrimIds (aka pseudoops)] * magicIds: see Note [magicIds] * errorIds, defined in GHC.Core.Make. These error functions (e.g. rUNTIME_ERROR_ID) are wired in because the desugarer generates code that mentions them directly In all cases except ghcPrimIds, there is a definition site in a library module, which may be called (e.g. in higher order situations); but the wired-in version means that the details are never read from that module's interface file; instead, the full definition is right here. Note [ghcPrimIds (aka pseudoops)] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The ghcPrimIds * Are exported from GHC.Prim (see ghcPrimExports, used in ghcPrimInterface) See Note [GHC.Prim] in primops.txt.pp for the remaining items in GHC.Prim. * Can't be defined in Haskell, and hence no Haskell binding site, but have perfectly reasonable unfoldings in Core * Either have a CompulsoryUnfolding (hence always inlined), or of an EvaldUnfolding and void representation (e.g. realWorldPrimId) * Are (or should be) defined in primops.txt.pp as 'pseudoop' Reason: that's how we generate documentation for them Note [magicIds] ~~~~~~~~~~~~~~~ The magicIds * Are exported from GHC.Magic * Can be defined in Haskell (and are, in ghc-prim:GHC/Magic.hs). This definition at least generates Haddock documentation for them. * May or may not have a CompulsoryUnfolding. * But have some special behaviour that can't be done via an unfolding from an interface file. * May have IdInfo that differs from what would be imported from GHC.Magic.hi. For example, 'lazy' gets a lazy strictness signature, per Note [lazyId magic]. The two remaining identifiers in GHC.Magic, runRW# and inline, are not listed in magicIds: they have special behavior but they can be known-key and not wired-in. Similarly for GHC.Internal.IO.seq# and GHC.Internal.Exts.considerAccessible. runRW#: see Note [Simplification of runRW#] in Prep, runRW# code in Simplifier, Note [Linting of runRW#]. seq#: see Note [seq# magic] inline: see Note [inlineId magic] considerAccessible: see Note [considerAccessible] -} wiredInIds :: [Id] wiredInIds = magicIds ++ ghcPrimIds ++ errorIds -- Defined in GHC.Core.Make magicIds :: [Id] -- See Note [magicIds] magicIds = [lazyId, oneShotId, noinlineId, noinlineConstraintId, nospecId] ghcPrimIds :: [Id] -- See Note [ghcPrimIds (aka pseudoops)] ghcPrimIds = [ realWorldPrimId , voidPrimId , nullAddrId , seqId , coerceId , proxyHashId , leftSectionId , rightSectionId ] {- ************************************************************************ * * \subsection{Data constructors} * * ************************************************************************ The wrapper for a constructor is an ordinary top-level binding that evaluates any strict args, unboxes any args that are going to be flattened, and calls the worker. We're going to build a constructor that looks like: data (Data a, C b) => T a b = T1 !a !Int b T1 = /\ a b -> \d1::Data a, d2::C b -> \p q r -> case p of { p -> case q of { q -> Con T1 [a,b] [p,q,r]}} Notice that * d2 is thrown away --- a context in a data decl is used to make sure one *could* construct dictionaries at the site the constructor is used, but the dictionary isn't actually used. * We have to check that we can construct Data dictionaries for the types a and Int. Once we've done that we can throw d1 away too. * We use (case p of q -> ...) to evaluate p, rather than "seq" because all that matters is that the arguments are evaluated. "seq" is very careful to preserve evaluation order, which we don't need to be here. You might think that we could simply give constructors some strictness info, like PrimOps, and let CoreToStg do the let-to-case transformation. But we don't do that because in the case of primops and functions strictness is a *property* not a *requirement*. In the case of constructors we need to do something active to evaluate the argument. Making an explicit case expression allows the simplifier to eliminate it in the (common) case where the constructor arg is already evaluated. Note [Wrappers for data instance tycons] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In the case of data instances, the wrapper also applies the coercion turning the representation type into the family instance type to cast the result of the wrapper. For example, consider the declarations data family Map k :: * -> * data instance Map (a, b) v = MapPair (Map a (Pair b v)) The tycon to which the datacon MapPair belongs gets a unique internal name of the form :R123Map, and we call it the representation tycon. In contrast, Map is the family tycon (accessible via tyConFamInst_maybe). A coercion allows you to move between representation and family type. It is accessible from :R123Map via tyConFamilyCoercion_maybe and has kind Co123Map a b v :: {Map (a, b) v ~ :R123Map a b v} The wrapper and worker of MapPair get the types -- Wrapper $WMapPair :: forall a b v. Map a (Map a b v) -> Map (a, b) v $WMapPair a b v = MapPair a b v `cast` sym (Co123Map a b v) -- Worker MapPair :: forall a b v. Map a (Map a b v) -> :R123Map a b v This coercion is conditionally applied by wrapFamInstBody. It's a bit more complicated if the data instance is a GADT as well! data instance T [a] where T1 :: forall b. b -> T [Maybe b] Hence we translate to -- Wrapper $WT1 :: forall b. b -> T [Maybe b] $WT1 b v = T1 (Maybe b) b (Maybe b) v `cast` sym (Co7T (Maybe b)) -- Worker T1 :: forall c b. (c ~ Maybe b) => b -> :R7T c -- Coercion from family type to representation type Co7T a :: T [a] ~ :R7T a Newtype instances through an additional wrinkle into the mix. Consider the following example (adapted from #15318, comment:2): data family T a newtype instance T [a] = MkT [a] Within the newtype instance, there are three distinct types at play: 1. The newtype's underlying type, [a]. 2. The instance's representation type, TList a (where TList is the representation tycon). 3. The family type, T [a]. We need two coercions in order to cast from (1) to (3): (a) A newtype coercion axiom: axiom coTList a :: TList a ~ [a] (Where TList is the representation tycon of the newtype instance.) (b) A data family instance coercion axiom: axiom coT a :: T [a] ~ TList a When we translate the newtype instance to Core, we obtain: -- Wrapper $WMkT :: forall a. [a] -> T [a] $WMkT a x = MkT a x |> Sym (coT a) -- Worker MkT :: forall a. [a] -> TList [a] MkT a x = x |> Sym (coTList a) Unlike for data instances, the worker for a newtype instance is actually an executable function which expands to a cast, but otherwise, the general strategy is essentially the same as for data instances. Also note that we have a wrapper, which is unusual for a newtype, but we make GHC produce one anyway for symmetry with the way data instances are handled. Note [Newtype datacons] ~~~~~~~~~~~~~~~~~~~~~~~ The "data constructor" for a newtype should have no existentials. It's not quite a "vanilla" data constructor, because the newtype arising from class C a => D a looks like newtype T:D a = C:D (C a) so the data constructor for T:C has a single argument, namely the predicate (C a). That ends up in the dcOtherTheta for the data con, which makes it not vanilla. So the assert just tests for existentials. The rest is checked by having a singleton arg_tys. Note [Newtype workers] ~~~~~~~~~~~~~~~~~~~~~~ A newtype does not really have a worker. Instead, newtype constructors just unfold into a cast. But we need *something* for, say, MkAge to refer to. So, we do this: * The Id used as the newtype worker will have a compulsory unfolding to a cast. See Note [Compulsory newtype unfolding] * This Id is labeled as a DataConWrapId. We don't want to use a DataConWorkId, as those have special treatment in the back end. * There is no top-level binding, because the compulsory unfolding means that it will be inlined (to a cast) at every call site. We probably should have a NewtypeWorkId, but these Ids disappear as soon as we desugar anyway, so it seems a step too far. Note [Compulsory newtype unfolding] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Newtype wrappers, just like workers, have compulsory unfoldings. This is needed so that two optimizations involving newtypes have the same effect whether a wrapper is present or not: (1) Case-of-known constructor. See Note [beta-reduction in exprIsConApp_maybe]. (2) Matching against the map/coerce RULE. Suppose we have the RULE {-# RULE "map/coerce" map coerce = ... #-} As described in Note [Getting the map/coerce RULE to work], the occurrence of 'coerce' is transformed into: {-# RULE "map/coerce" forall (c :: T1 ~R# T2). map ((\v -> v) `cast` c) = ... #-} We'd like 'map Age' to match the LHS. For this to happen, Age must be unfolded, otherwise we'll be stuck. This is tested in T16208. It also allows for the possibility of representation-polymorphic newtypes with wrappers (with -XUnliftedNewtypes): newtype N (a :: TYPE r) = MkN a With -XUnliftedNewtypes, this is allowed -- even though MkN is representation- polymorphic. It's OK because MkN evaporates in the compiled code, becoming just a cast. That is, it has a compulsory unfolding. As long as its argument is not representation-polymorphic (which it can't be, according to Note [Representation polymorphism invariants] in GHC.Core), and it's saturated, no representation-polymorphic code ends up in the code generator. The saturation condition is effectively checked in GHC.Tc.Gen.Head.rejectRepPolyNewtypes. However, if we make a *wrapper* for a newtype, we get into trouble. In that case, we generate a forbidden representation-polymorphic binding, and we must then ensure that it is always instantiated at a representation-monomorphic type. The solution is simple, though: just make the newtype wrappers as ephemeral as the newtype workers. In other words, give the wrappers compulsory unfoldings and no bindings. The compulsory unfolding is given in wrap_unf in mkDataConRep, and the lack of a binding happens in GHC.Iface.Tidy.getTyConImplicitBinds, where we say that a newtype has no implicit bindings. Note [Records and linear types] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ All the fields, in a record constructor, are linear, because there is no syntax to specify the type of record field. There will be (see the proposal https://github.com/ghc-proposals/ghc-proposals/blob/master/proposals/0111-linear-types.rst#records-and-projections ), but it isn't implemented yet. Projections of records can't be linear: data Foo = MkFoo { a :: A, b :: B } If we had a :: Foo %1 -> A We could write bad :: A %1 -> B %1 -> A bad x y = a (MkFoo { a=x, b=y }) There is an exception: if `b` (more generally all the fields besides `a`) is unrestricted, then is perfectly possible to have a linear projection. Such a linear projection has as simple definition. data Bar = MkBar { c :: C, d % Many :: D } c :: Bar %1 -> C c MkBar{ c=x, d=_} = x The `% Many` syntax, for records, does not exist yet. But there is one important special case which already happens: when there is a single field (usually a newtype). newtype Baz = MkBaz { unbaz :: E } unbaz could be linear. And, in fact, it is linear in the proposal design. However, this hasn't been implemented yet. ************************************************************************ * * \subsection{Dictionary selectors} * * ************************************************************************ Selecting a field for a dictionary. If there is just one field, then there's nothing to do. Dictionary selectors may get nested forall-types. Thus: class Foo a where op :: forall b. Ord b => a -> b -> b Then the top-level type for op is op :: forall a. Foo a => forall b. Ord b => a -> b -> b Note [Type classes and linear types] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Constraints, in particular type classes, don't have attached linearity information. Implicitly, they are all unrestricted. See the linear types proposal, https://github.com/ghc-proposals/ghc-proposals/blob/master/proposals/0111-linear-types.rst . When translating to core `C => ...` is always translated to an unrestricted arrow `C % Many -> ...`. Therefore there is no loss of generality if we make all selectors unrestricted. -} mkDictSelId :: Name -- Name of one of the *value* selectors -- (dictionary superclass or method) -> Class -> Id mkDictSelId name clas = mkGlobalId (ClassOpId clas terminating) name sel_ty info where tycon = classTyCon clas sel_names = map idName (classAllSelIds clas) new_tycon = isNewTyCon tycon [data_con] = tyConDataCons tycon tyvars = dataConUserTyVarBinders data_con n_ty_args = length tyvars arg_tys = dataConRepArgTys data_con -- Includes the dictionary superclasses val_index = assoc "MkId.mkDictSelId" (sel_names `zip` [0..]) name pred_ty = mkClassPred clas (mkTyVarTys (binderVars tyvars)) res_ty = scaledThing (getNth arg_tys val_index) sel_ty = mkInvisForAllTys tyvars $ mkFunctionType ManyTy pred_ty res_ty -- See Note [Type classes and linear types] terminating = isTerminatingType res_ty || definitelyUnliftedType res_ty -- If the field is unlifted, it can't be bottom -- Ditto if it's a terminating type base_info = noCafIdInfo `setArityInfo` 1 `setDmdSigInfo` strict_sig `setCprSigInfo` topCprSig info | new_tycon = base_info `setInlinePragInfo` alwaysInlinePragma `setUnfoldingInfo` mkInlineUnfoldingWithArity defaultSimpleOpts StableSystemSrc 1 (mkDictSelRhs clas val_index) -- See Note [Single-method classes] in GHC.Tc.TyCl.Instance -- for why alwaysInlinePragma | otherwise = base_info `setRuleInfo` mkRuleInfo [rule] `setInlinePragInfo` neverInlinePragma `setUnfoldingInfo` mkInlineUnfoldingWithArity defaultSimpleOpts StableSystemSrc 1 (mkDictSelRhs clas val_index) -- Add a magic BuiltinRule, but no unfolding -- so that the rule is always available to fire. -- See Note [ClassOp/DFun selection] in GHC.Tc.TyCl.Instance -- This is the built-in rule that goes -- op (dfT d1 d2) ---> opT d1 d2 rule = BuiltinRule { ru_name = fsLit "Class op " `appendFS` occNameFS (getOccName name) , ru_fn = name , ru_nargs = n_ty_args + 1 , ru_try = dictSelRule val_index n_ty_args } -- The strictness signature is of the form U(AAAVAAAA) -> T -- where the V depends on which item we are selecting -- It's worth giving one, so that absence info etc is generated -- even if the selector isn't inlined strict_sig = mkClosedDmdSig [arg_dmd] topDiv arg_dmd | new_tycon = evalDmd | otherwise = C_1N :* mkProd Unboxed dict_field_dmds where -- The evalDmd below is just a placeholder and will be replaced in -- GHC.Types.Demand.dmdTransformDictSel dict_field_dmds = [ if name == sel_name then evalDmd else absDmd | sel_name <- sel_names ] mkDictSelRhs :: Class -> Int -- 0-indexed selector among (superclasses ++ methods) -> CoreExpr mkDictSelRhs clas val_index = mkLams tyvars (Lam dict_id rhs_body) where tycon = classTyCon clas new_tycon = isNewTyCon tycon [data_con] = tyConDataCons tycon tyvars = dataConUnivTyVars data_con arg_tys = dataConRepArgTys data_con -- Includes the dictionary superclasses the_arg_id = getNth arg_ids val_index pred = mkClassPred clas (mkTyVarTys tyvars) dict_id = mkTemplateLocal 1 pred arg_ids = mkTemplateLocalsNum 2 (map scaledThing arg_tys) rhs_body | new_tycon = unwrapNewTypeBody tycon (mkTyVarTys tyvars) (Var dict_id) | otherwise = mkSingleAltCase (Var dict_id) dict_id (DataAlt data_con) arg_ids (varToCoreExpr the_arg_id) -- varToCoreExpr needed for equality superclass selectors -- sel a b d = case x of { MkC _ (g:a~b) _ -> CO g } dictSelRule :: Int -> Arity -> RuleFun -- Tries to persuade the argument to look like a constructor -- application, using exprIsConApp_maybe, and then selects -- from it -- sel_i t1..tk (D t1..tk op1 ... opm) = opi -- dictSelRule val_index n_ty_args _ id_unf _ args | (dict_arg : _) <- drop n_ty_args args , Just (_, floats, _, _, con_args) <- exprIsConApp_maybe id_unf dict_arg = Just (wrapFloats floats $ getNth con_args val_index) | otherwise = Nothing {- ************************************************************************ * * Data constructors * * ************************************************************************ -} mkDataConWorkId :: Name -> DataCon -> Id mkDataConWorkId wkr_name data_con | isNewTyCon tycon = mkGlobalId (DataConWrapId data_con) wkr_name wkr_ty nt_work_info -- See Note [Newtype workers] | otherwise = mkGlobalId (DataConWorkId data_con) wkr_name wkr_ty alg_wkr_info where tycon = dataConTyCon data_con -- The representation TyCon wkr_ty = dataConRepType data_con ----------- Workers for data types -------------- alg_wkr_info = noCafIdInfo `setArityInfo` wkr_arity `setInlinePragInfo` wkr_inline_prag `setUnfoldingInfo` evaldUnfolding -- Record that it's evaluated, -- even if arity = 0 `setLFInfo` wkr_lf_info -- No strictness: see Note [Data-con worker strictness] in GHC.Core.DataCon wkr_inline_prag = defaultInlinePragma { inl_rule = ConLike } wkr_arity = dataConRepArity data_con -- See Note [LFInfo of DataCon workers and wrappers] wkr_lf_info | wkr_arity == 0 = LFCon data_con | otherwise = LFReEntrant TopLevel (countFunRepArgs wkr_arity wkr_ty) True ArgUnknown -- LFInfo stores post-unarisation arity ----------- Workers for newtypes -------------- univ_tvs = dataConUnivTyVars data_con ex_tcvs = dataConExTyCoVars data_con arg_tys = dataConRepArgTys data_con -- Should be same as dataConOrigArgTys nt_work_info = noCafIdInfo -- The NoCaf-ness is set by noCafIdInfo `setArityInfo` 1 -- Arity 1 `setInlinePragInfo` dataConWrapperInlinePragma `setUnfoldingInfo` newtype_unf -- See W1 in Note [LFInfo of DataCon workers and wrappers] `setLFInfo` (panic "mkDataConWorkId: we shouldn't look at LFInfo for newtype worker ids") id_arg1 = mkScaledTemplateLocal 1 (head arg_tys) res_ty_args = mkTyCoVarTys univ_tvs newtype_unf = assertPpr (null ex_tcvs && isSingleton arg_tys) (ppr data_con) -- Note [Newtype datacons] mkCompulsoryUnfolding $ mkLams univ_tvs $ Lam id_arg1 $ wrapNewTypeBody tycon res_ty_args (Var id_arg1) {- Note [LFInfo of DataCon workers and wrappers] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ As noted in Note [The LFInfo of Imported Ids] in GHC.StgToCmm.Closure, it's crucial that saturated data con applications are given an LFInfo of `LFCon`. Since for data constructors we never serialise the worker and the wrapper (only the data type declaration), we never serialise their lambda form info either. Therefore, when making data constructors workers and wrappers, we construct a correct `LFInfo` for them right away, and put it it in the `lfInfo` field of the worker/wrapper Id, ensuring that: The `lfInfo` field of a DataCon worker or wrapper is always populated with the correct LFInfo. How do we construct a /correct/ LFInfo for workers and wrappers? (Remember: `LFCon` means "a saturated constructor application") (1) Data constructor workers and wrappers with arity > 0 are unambiguously functions and should be given `LFReEntrant`, regardless of the runtime relevance of the arguments. - For example, `Just :: a -> Maybe a` is given `LFReEntrant`, and `HNil :: (a ~# '[]) -> HList a` is given `LFReEntrant` too. (2) A datacon /worker/ with zero arity is trivially fully saturated -- it takes no arguments whatsoever (not even zero-width args), so it is given `LFCon`. (3) Perhaps surprisingly, a datacon /wrapper/ can be an `LFCon`. See Wrinkle (W1) below. A datacon /wrapper/ with zero arity must be a fully saturated application of the worker to zero-width arguments only (which are dropped after unarisation), and therefore is also given `LFCon`. For example, consider the following data constructors: data T1 a where TCon1 :: {-# UNPACK #-} !(a :~: True) -> T1 a data T2 a where TCon2 :: {-# UNPACK #-} !() -> T2 a data T3 a where TCon3 :: T3 '[] `TCon1`'s wrapper has a lifted argument, which is non-zero-width, while the worker has an unlifted equality argument, which is zero-width. `TCon2`'s wrapper has a lifted argument, which is non-zero-width, while the worker has no arguments. Wrinkle (W1). Perhaps surprisingly, it is possible for the /wrapper/ to be an `LFCon` even though the /worker/ is not. Consider `T3` above. Here is the Core representation of the worker and wrapper: $WTCon3 :: T3 '[] -- Wrapper $WTCon3 = TCon3 @[] -- A saturated constructor application: LFCon TCon3 :: forall (a :: * -> *). (a ~# []) => T a -- Worker TCon3 = /\a. \(co :: a~#[]). TCon3 co -- A function: LFReEntrant For `TCon1`, both the wrapper and worker will be given `LFReEntrant` since they both have arity == 1. For `TCon2`, the wrapper will be given `LFReEntrant` since it has arity == 1 while the worker is `LFCon` since its arity == 0 For `TCon3`, the wrapper will be given `LFCon` since its arity == 0 and the worker `LFReEntrant` since its arity == 1 One might think we could give *workers* with only zero-width-args the `LFCon` LambdaFormInfo, e.g. give `LFCon` to the worker of `TCon1` and `TCon3`. However, these workers are unambiguously functions -- which makes `LFReEntrant`, the LambdaFormInfo we give them, correct. See also the discussion in #23158. Wrinkles: (W1) Why do we panic when generating `LFInfo` for newtype workers and wrappers? We don't generate code for newtype workers/wrappers, so we should never have to look at their LFInfo (and in general we can't; they may be representation-polymorphic). See also the Note [Imported unlifted nullary datacon wrappers must have correct LFInfo] in GHC.StgToCmm.Types. ------------------------------------------------- -- Data constructor representation -- -- This is where we decide how to wrap/unwrap the -- constructor fields -- -------------------------------------------------- -} type Unboxer = Var -> UniqSM ([Var], CoreExpr -> CoreExpr) -- Unbox: bind rep vars by decomposing src var data Boxer = UnitBox | Boxer (Subst -> UniqSM ([Var], CoreExpr)) -- Box: build src arg using these rep vars -- | Data Constructor Boxer newtype DataConBoxer = DCB ([Type] -> [Var] -> UniqSM ([Var], [CoreBind])) -- Bind these src-level vars, returning the -- rep-level vars to bind in the pattern vanillaDataConBoxer :: DataConBoxer -- No transformation on arguments needed vanillaDataConBoxer = DCB (\_tys args -> return (args, [])) {- Note [Inline partially-applied constructor wrappers] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We allow the wrapper to inline when partially applied to avoid boxing values unnecessarily. For example, consider data Foo a = Foo !Int a instance Traversable Foo where traverse f (Foo i a) = Foo i <$> f a This desugars to traverse f foo = case foo of Foo i# a -> let i = I# i# in map ($WFoo i) (f a) If the wrapper `$WFoo` is not inlined, we get a fruitless reboxing of `i`. But if we inline the wrapper, we get map (\a. case i of I# i# a -> Foo i# a) (f a) and now case-of-known-constructor eliminates the redundant allocation. -} data DataConBangOpts = FixedBangOpts [HsImplBang] -- ^ Used for imported data constructors -- See Note [Bangs on imported data constructors] | SrcBangOpts !BangOpts data BangOpts = BangOpts { bang_opt_strict_data :: !Bool -- ^ Strict fields by default , bang_opt_unbox_disable :: !Bool -- ^ Disable automatic field unboxing (e.g. if we aren't optimising) , bang_opt_unbox_strict :: !Bool -- ^ Unbox strict fields , bang_opt_unbox_small :: !Bool -- ^ Unbox small strict fields } mkDataConRep :: DataConBangOpts -> FamInstEnvs -> Name -> DataCon -> UniqSM DataConRep mkDataConRep dc_bang_opts fam_envs wrap_name data_con | not wrapper_reqd = return NoDataConRep | otherwise = do { wrap_args <- mapM (newLocal (fsLit "conrep")) wrap_arg_tys ; wrap_body <- mk_rep_app (dropList stupid_theta wrap_args `zip` dropList eq_spec unboxers) initial_wrap_app -- Drop the stupid theta arguments, as per -- Note [Instantiating stupid theta] in GHC.Core.DataCon. ; let wrap_id = mkGlobalId (DataConWrapId data_con) wrap_name wrap_ty wrap_info wrap_info = noCafIdInfo `setArityInfo` wrap_arity -- It's important to specify the arity, so that partial -- applications are treated as values `setInlinePragInfo` wrap_prag `setUnfoldingInfo` wrap_unf `setDmdSigInfo` wrap_sig -- We need to get the CAF info right here because GHC.Iface.Tidy -- does not tidy the IdInfo of implicit bindings (like the wrapper) -- so it not make sure that the CAF info is sane `setLFInfo` wrap_lf_info -- The signature is purely for passes like the Simplifier, not for -- DmdAnal itself; see Note [DmdAnal for DataCon wrappers]. wrap_sig = mkClosedDmdSig wrap_arg_dmds topDiv -- See Note [LFInfo of DataCon workers and wrappers] wrap_lf_info | wrap_arity == 0 = LFCon data_con -- See W1 in Note [LFInfo of DataCon workers and wrappers] | isNewTyCon tycon = panic "mkDataConRep: we shouldn't look at LFInfo for newtype wrapper ids" | otherwise = LFReEntrant TopLevel (countFunRepArgs wrap_arity wrap_ty) True ArgUnknown -- LFInfo stores post-unarisation arity wrap_arg_dmds = replicate (length theta) topDmd ++ map mk_dmd arg_ibangs -- Don't forget the dictionary arguments when building -- the strictness signature (#14290). mk_dmd str | isBanged str = evalDmd | otherwise = topDmd wrap_prag = dataConWrapperInlinePragma `setInlinePragmaActivation` activateDuringFinal -- See Note [Activation for data constructor wrappers] -- The wrapper will usually be inlined (see wrap_unf), so its -- strictness and CPR info is usually irrelevant. But this is -- not always the case; GHC may choose not to inline it. In -- particular, the wrapper constructor is not inlined inside -- an INLINE rhs or when it is not applied to any arguments. -- See Note [Inline partially-applied constructor wrappers] -- Passing Nothing here allows the wrapper to inline when -- unsaturated. wrap_unf | isNewTyCon tycon = mkCompulsoryUnfolding wrap_rhs -- See Note [Compulsory newtype unfolding] | otherwise = mkDataConUnfolding wrap_rhs wrap_rhs = mkLams wrap_tvs $ mkLams wrap_args $ wrapFamInstBody tycon res_ty_args $ wrap_body ; return (DCR { dcr_wrap_id = wrap_id , dcr_boxer = mk_boxer boxers , dcr_arg_tys = rep_tys , dcr_stricts = rep_strs -- For newtypes, dcr_bangs is always [HsLazy]. -- See Note [HsImplBangs for newtypes]. , dcr_bangs = arg_ibangs }) } where (univ_tvs, ex_tvs, eq_spec, theta, orig_arg_tys, _orig_res_ty) = dataConFullSig data_con stupid_theta = dataConStupidTheta data_con wrap_tvs = dataConUserTyVars data_con res_ty_args = dataConResRepTyArgs data_con tycon = dataConTyCon data_con -- The representation TyCon (not family) wrap_ty = dataConWrapperType data_con ev_tys = eqSpecPreds eq_spec ++ theta all_arg_tys = map unrestricted ev_tys ++ orig_arg_tys ev_ibangs = map (const HsLazy) ev_tys orig_bangs = dataConSrcBangs data_con wrap_arg_tys = (map unrestricted $ stupid_theta ++ theta) ++ orig_arg_tys wrap_arity = count isCoVar ex_tvs + length wrap_arg_tys -- The wrap_args are the arguments *other than* the eq_spec -- Because we are going to apply the eq_spec args manually in the -- wrapper new_tycon = isNewTyCon tycon arg_ibangs | new_tycon = map (const HsLazy) orig_arg_tys -- See Note [HsImplBangs for newtypes] -- orig_arg_tys should be a singleton, but -- if a user declared a wrong newtype we -- detect this later (see test T2334A) | otherwise = case dc_bang_opts of SrcBangOpts bang_opts -> zipWith (dataConSrcToImplBang bang_opts fam_envs) orig_arg_tys orig_bangs FixedBangOpts bangs -> bangs (rep_tys_w_strs, wrappers) = unzip (zipWith dataConArgRep all_arg_tys (ev_ibangs ++ arg_ibangs)) (unboxers, boxers) = unzip wrappers (rep_tys, rep_strs) = unzip (concat rep_tys_w_strs) -- This is True if the data constructor or class dictionary constructor -- needs a wrapper. This wrapper is injected into the program later in the -- CoreTidy pass. See Note [Injecting implicit bindings] in GHC.Iface.Tidy, -- along with the accompanying implementation in getTyConImplicitBinds. wrapper_reqd | isTypeDataTyCon tycon -- `type data` declarations never have data-constructor wrappers -- Their data constructors only live at the type level, in the -- form of PromotedDataCon, and therefore do not need wrappers. -- See wrinkle (W0) in Note [Type data declarations] in GHC.Rename.Module. = False | otherwise = (not new_tycon -- (Most) newtypes have only a worker, with the exception -- of some newtypes written with GADT syntax. -- See dataConUserTyVarsNeedWrapper below. && (any isBanged (ev_ibangs ++ arg_ibangs))) -- Some forcing/unboxing (includes eq_spec) || isFamInstTyCon tycon -- Cast result || dataConUserTyVarsNeedWrapper data_con -- If the data type was written with GADT syntax and -- orders the type variables differently from what the -- worker expects, it needs a data con wrapper to reorder -- the type variables. -- See Note [Data con wrappers and GADT syntax]. -- -- NB: All GADTs return true from this function, but there -- is one exception that we must check below. || not (null stupid_theta) -- If the data constructor has a datatype context, -- we need a wrapper in order to drop the stupid arguments. -- See Note [Instantiating stupid theta] in GHC.Core.DataCon. initial_wrap_app = Var (dataConWorkId data_con) `mkTyApps` res_ty_args `mkVarApps` ex_tvs `mkCoApps` map (mkReflCo Nominal . eqSpecType) eq_spec mk_boxer :: [Boxer] -> DataConBoxer mk_boxer boxers = DCB (\ ty_args src_vars -> do { let (ex_vars, term_vars) = splitAtList ex_tvs src_vars subst1 = zipTvSubst univ_tvs ty_args subst2 = foldl2 extendTvSubstWithClone subst1 ex_tvs ex_vars ; (rep_ids, binds) <- go subst2 boxers term_vars ; return (ex_vars ++ rep_ids, binds) } ) go _ [] src_vars = assertPpr (null src_vars) (ppr data_con) $ return ([], []) go subst (UnitBox : boxers) (src_var : src_vars) = do { (rep_ids2, binds) <- go subst boxers src_vars ; return (src_var : rep_ids2, binds) } go subst (Boxer boxer : boxers) (src_var : src_vars) = do { (rep_ids1, arg) <- boxer subst ; (rep_ids2, binds) <- go subst boxers src_vars ; return (rep_ids1 ++ rep_ids2, NonRec src_var arg : binds) } go _ (_:_) [] = pprPanic "mk_boxer" (ppr data_con) mk_rep_app :: [(Id,Unboxer)] -> CoreExpr -> UniqSM CoreExpr mk_rep_app [] con_app = return con_app mk_rep_app ((wrap_arg, unboxer) : prs) con_app = do { (rep_ids, unbox_fn) <- unboxer wrap_arg ; expr <- mk_rep_app prs (mkVarApps con_app rep_ids) ; return (unbox_fn expr) } dataConWrapperInlinePragma :: InlinePragma -- See Note [DataCon wrappers are conlike] dataConWrapperInlinePragma = alwaysInlineConLikePragma {- Note [Activation for data constructor wrappers] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The Activation on a data constructor wrapper allows it to inline only in FinalPhase. This way rules have a chance to fire if they mention a data constructor on the left RULE "foo" f (K a b) = ... Since the LHS of rules are simplified with InitialPhase, we won't inline the wrapper on the LHS either. On the other hand, this means that exprIsConApp_maybe must be able to deal with wrappers so that case-of-constructor is not delayed; see Note [exprIsConApp_maybe on data constructors with wrappers] for details. It used to activate in phases 2 (afterInitial) and later, but it makes it awkward to write a RULE[1] with a constructor on the left: it would work if a constructor has no wrapper, but whether a constructor has a wrapper depends, for instance, on the order of type argument of that constructors. Therefore changing the order of type argument could make previously working RULEs fail. See also https://gitlab.haskell.org/ghc/ghc/issues/15840 . Note [DataCon wrappers are conlike] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ DataCon workers are clearly ConLike --- they are the “Con” in “ConLike”, after all --- but what about DataCon wrappers? Should they be marked ConLike, too? Yes, absolutely! As described in Note [CONLIKE pragma] in GHC.Types.Basic, isConLike influences GHC.Core.Utils.exprIsExpandable, which is used by both RULE matching and the case-of-known-constructor optimization. It’s crucial that both of those things can see applications of DataCon wrappers: * User-defined RULEs match on wrappers, not workers, so we might need to look through an unfolding built from a DataCon wrapper to determine if a RULE matches. * Likewise, if we have something like let x = $WC a b in ... case x of { C y z -> e } ... we still want to apply case-of-known-constructor. Therefore, it’s important that we consider DataCon wrappers conlike. This is especially true now that we don’t inline DataCon wrappers until the final simplifier phase; see Note [Activation for data constructor wrappers]. For further reading, see: * Note [Conlike is interesting] in GHC.Core.Op.Simplify.Utils * Note [Lone variables] in GHC.Core.Unfold * Note [exprIsConApp_maybe on data constructors with wrappers] in GHC.Core.SimpleOpt * #18012 Note [Bangs on imported data constructors] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We pass Maybe [HsImplBang] to mkDataConRep to make use of HsImplBangs from imported modules. - Nothing <=> use HsSrcBangs - Just bangs <=> use HsImplBangs For imported types we can't work it all out from the HsSrcBangs, because we want to be very sure to follow what the original module (where the data type was declared) decided, and that depends on what flags were enabled when it was compiled. So we record the decisions in the interface file. The HsImplBangs passed are in 1-1 correspondence with the dataConOrigArgTys of the DataCon. Note [Data con wrappers and unlifted types] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider data T = MkT !Int# We certainly do not want to make a wrapper $WMkT x = case x of y { DEFAULT -> MkT y } For a start, it's still to generate a no-op. But worse, since wrappers are currently injected at TidyCore, we don't even optimise it away! So the stupid case expression stays there. This actually happened for the Integer data type (see #1600 comment:66)! Note [Data con wrappers and GADT syntax] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider these two very similar data types: data T1 a b = MkT1 b data T2 a b where MkT2 :: forall b a. b -> T2 a b Despite their similar appearance, T2 will have a data con wrapper but T1 will not. What sets them apart? The types of their constructors, which are: MkT1 :: forall a b. b -> T1 a b MkT2 :: forall b a. b -> T2 a b MkT2's use of GADT syntax allows it to permute the order in which `a` and `b` would normally appear. See Note [DataCon user type variable binders] in GHC.Core.DataCon for further discussion on this topic. The worker data cons for T1 and T2, however, both have types such that `a` is expected to come before `b` as arguments. Because MkT2 permutes this order, it needs a data con wrapper to swizzle around the type variables to be in the order the worker expects. A somewhat surprising consequence of this is that *newtypes* can have data con wrappers! After all, a newtype can also be written with GADT syntax: newtype T3 a b where MkT3 :: forall b a. b -> T3 a b Again, this needs a wrapper data con to reorder the type variables. It does mean that this newtype constructor requires another level of indirection when being called, but the inliner should make swift work of that. Note [HsImplBangs for newtypes] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Most of the time, we use the dataConSrctoImplBang function to decide what strictness/unpackedness to use for the fields of a data type constructor. But there is an exception to this rule: newtype constructors. You might not think that newtypes would pose a challenge, since newtypes are seemingly forbidden from having strictness annotations in the first place. But consider this (from #16141): {-# LANGUAGE StrictData #-} {-# OPTIONS_GHC -O #-} newtype T a b where MkT :: forall b a. Int -> T a b Because StrictData (plus optimization) is enabled, invoking dataConSrcToImplBang would sneak in and unpack the field of type Int to Int#! This would be disastrous, since the wrapper for `MkT` uses a coercion involving Int, not Int#. Bottom line: dataConSrcToImplBang should never be invoked for newtypes. In the case of a newtype constructor, we simply hardcode its dcr_bangs field to [HsLazy]. -} ------------------------- -- | Conjure a fresh local binder. newLocal :: FastString -- ^ a string which will form part of the 'Var'\'s name -> Scaled Type -- ^ the type of the 'Var' -> UniqSM Var newLocal name_stem (Scaled w ty) = mkSysLocalOrCoVarM name_stem w ty -- We should not have "OrCoVar" here, this is a bug (#17545) -- | Unpack/Strictness decisions from source module. -- -- This function should only ever be invoked for data constructor fields, and -- never on the field of a newtype constructor. -- See @Note [HsImplBangs for newtypes]@. dataConSrcToImplBang :: BangOpts -> FamInstEnvs -> Scaled Type -> HsSrcBang -> HsImplBang dataConSrcToImplBang bang_opts fam_envs arg_ty (HsSrcBang ann (HsBang unpk NoSrcStrict)) | bang_opt_strict_data bang_opts -- StrictData => strict field = dataConSrcToImplBang bang_opts fam_envs arg_ty (mkHsSrcBang ann unpk SrcStrict) | otherwise -- no StrictData => lazy field = HsLazy dataConSrcToImplBang _ _ _ (HsSrcBang _ (HsBang _ SrcLazy)) = HsLazy dataConSrcToImplBang bang_opts fam_envs arg_ty (HsSrcBang _ (HsBang unpk_prag SrcStrict)) | isUnliftedType (scaledThing arg_ty) -- NB: non-newtype data constructors can't have representation-polymorphic fields -- so this is OK. = HsLazy -- For !Int#, say, use HsLazy -- See Note [Data con wrappers and unlifted types] | let mb_co = topNormaliseType_maybe fam_envs (scaledThing arg_ty) -- Unwrap type families and newtypes arg_ty' = case mb_co of { Just redn -> scaledSet arg_ty (reductionReducedType redn) ; Nothing -> arg_ty } , shouldUnpackArgTy bang_opts unpk_prag fam_envs arg_ty' = if bang_opt_unbox_disable bang_opts then HsStrict True -- Not unpacking because of -O0 -- See Note [Detecting useless UNPACK pragmas] in GHC.Core.DataCon else case mb_co of Nothing -> HsUnpack Nothing Just redn -> HsUnpack (Just $ reductionCoercion redn) | otherwise -- Record the strict-but-no-unpack decision = HsStrict False -- | Wrappers/Workers and representation following Unpack/Strictness -- decisions dataConArgRep :: Scaled Type -> HsImplBang -> ([(Scaled Type,StrictnessMark)] -- Rep types ,(Unboxer,Boxer)) dataConArgRep arg_ty HsLazy = ([(arg_ty, NotMarkedStrict)], (unitUnboxer, unitBoxer)) dataConArgRep arg_ty (HsStrict _) = ([(arg_ty, MarkedStrict)], (seqUnboxer, unitBoxer)) dataConArgRep arg_ty (HsUnpack Nothing) = dataConArgUnpack arg_ty dataConArgRep (Scaled w _) (HsUnpack (Just co)) | let co_rep_ty = coercionRKind co , (rep_tys, wrappers) <- dataConArgUnpack (Scaled w co_rep_ty) = (rep_tys, wrapCo co co_rep_ty wrappers) ------------------------- wrapCo :: Coercion -> Type -> (Unboxer, Boxer) -> (Unboxer, Boxer) wrapCo co rep_ty (unbox_rep, box_rep) -- co :: arg_ty ~ rep_ty = (unboxer, boxer) where unboxer arg_id = do { rep_id <- newLocal (fsLit "cowrap_unbx") (Scaled (idMult arg_id) rep_ty) ; (rep_ids, rep_fn) <- unbox_rep rep_id ; let co_bind = NonRec rep_id (Var arg_id `Cast` co) ; return (rep_ids, Let co_bind . rep_fn) } boxer = Boxer $ \ subst -> do { (rep_ids, rep_expr) <- case box_rep of UnitBox -> do { rep_id <- newLocal (fsLit "cowrap_bx") (linear $ TcType.substTy subst rep_ty) ; return ([rep_id], Var rep_id) } Boxer boxer -> boxer subst ; let sco = substCoUnchecked subst co ; return (rep_ids, rep_expr `Cast` mkSymCo sco) } ------------------------ seqUnboxer :: Unboxer seqUnboxer v = return ([v], mkDefaultCase (Var v) v) unitUnboxer :: Unboxer unitUnboxer v = return ([v], \e -> e) unitBoxer :: Boxer unitBoxer = UnitBox ------------------------- {- Note [UNPACK for sum types] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Suppose we have a data type D, for example: data D = D1 [Int] [Bool] | D2 and another data type which unpacks a field of type D: data U a = MkU {-# UNPACK #-} !D {-# UNPACK #-} !(a,a) {-# UNPACK #-} !D Then the wrapper and worker for MkU have these types -- Wrapper $WMkU :: D -> (a,a) -> D -> U a -- Worker MkU :: (# (# [Int],[Bool] #) | (# #) #) -> a -> a -> (# (# [Int],[Bool] #) | (# #) #) -> U a For each unpacked /sum/-type argument, the worker gets one argument. But for each unpacked /product/-type argument, the worker gets N arguments (here two). Why treat them differently? See Note [Why sums and products are treated differently]. The wrapper $WMkU looks like this: $WMkU :: D -> (a,a) -> D -> U a $WMkU x1 y x2 = case (case x1 of { D1 a b -> (# (# a,b #) | #) D2 -> (# | (# #) #) }) of { x1_ubx -> case y of { (y1, y2) -> case (case x2 of { D1 a b -> (# (# a,b #) | #) D2 -> (# | (# #) #) }) of { x2_ubx -> MkU x1_ubx y1 y2 x2_ubx Notice the nested case needed for sums. This different treatment for sums and product is implemented in dataConArgUnpackSum and dataConArgUnpackProduct respectively. Note [Why sums and products are treated differently] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Can we handle sums like products, with each wrapper argument occupying multiple argument slots in the worker? No: for a sum type the number of argument slots varies, and that's exactly what unboxed sums are designed for. Can we handle products like sums, with each wrapper argument occupying exactly one argument slot (and unboxed tuple) in the worker? Yes, we could. For example data P = MkP {-# UNPACK #-} !Q data Q = MkQ {-# NOUNPACK #-} !Int {-# NOUNPACK #-} Int Currently could unpack P thus, taking two slots in the worker $WMkP :: Q -> P $WMkP x = case x of { MkQ a b -> MkP a b } MkP :: Int -> Int -> P -- Worker We could instead do this (uniformly with sums) $WMkP1 :: Q -> P $WMkP1 x = case (case x of { MkQ a b -> (# a, b #) }) of ubx_x MkP1 ubx_x MkP1 :: (# Int, Int #) -> P -- Worker The representation of MkP and MkP1 would be identical (a constructor with two fields). BUT, with MkP (as with every data constructor) we record its argument strictness as a bit-vector, actually [StrictnessMark] MkP strictness: SL This information is used in Core to record which fields are sure to be evaluated. (Look for calls to dataConRepStrictness.) E.g. in Core case v of MkP x y -> ........ Alas, with MkP1 this information is hidden by the unboxed pair, In Core there will be an auxiliary case expression to take apart the pair: case v of MkP1 xy -> case xy of (# x,y #) -> ... And now we have no easy way to know that x is evaluated in the "...". Fixing this might be possible, but it'd be tricky. So we avoid the problem entirely by treating sums and products differently here. -} dataConArgUnpack :: Scaled Type -> ( [(Scaled Type, StrictnessMark)] -- Rep types , (Unboxer, Boxer) ) dataConArgUnpack scaledTy@(Scaled _ arg_ty) | Just (tc, tc_args) <- splitTyConApp_maybe arg_ty = assert (not (isNewTyCon tc)) $ case tyConDataCons tc of [con] -> dataConArgUnpackProduct scaledTy tc_args con cons -> dataConArgUnpackSum scaledTy tc_args cons | otherwise = pprPanic "dataConArgUnpack" (ppr arg_ty) -- An interface file specified Unpacked, but we couldn't unpack it dataConArgUnpackProduct :: Scaled Type -> [Type] -> DataCon -> ( [(Scaled Type, StrictnessMark)] -- Rep types , (Unboxer, Boxer) ) dataConArgUnpackProduct (Scaled arg_mult _) tc_args con = assert (null (dataConExTyCoVars con)) $ -- Note [Unpacking GADTs and existentials] let rep_tys = map (scaleScaled arg_mult) $ dataConInstArgTys con tc_args in ( rep_tys `zip` dataConRepStrictness con , ( \ arg_id -> do { rep_ids <- mapM (newLocal (fsLit "unbx")) rep_tys ; let r_mult = idMult arg_id ; let rep_ids' = map (scaleIdBy r_mult) rep_ids ; let unbox_fn body = mkSingleAltCase (Var arg_id) arg_id (DataAlt con) rep_ids' body ; return (rep_ids, unbox_fn) } , Boxer $ \ subst -> do { rep_ids <- mapM (newLocal (fsLit "bx") . TcType.substScaledTyUnchecked subst) rep_tys ; return (rep_ids, Var (dataConWorkId con) `mkTyApps` (substTysUnchecked subst tc_args) `mkVarApps` rep_ids ) } ) ) dataConArgUnpackSum :: Scaled Type -> [Type] -> [DataCon] -> ( [(Scaled Type, StrictnessMark)] -- Rep types , (Unboxer, Boxer) ) dataConArgUnpackSum (Scaled arg_mult arg_ty) tc_args cons = ( [ (sum_ty, MarkedStrict) ] -- The idea: Unpacked variant will -- be one field only, and the type of the -- field will be an unboxed sum. , ( unboxer, boxer ) ) where !ubx_sum_arity = length cons src_tys = map (\con -> map scaledThing $ dataConInstArgTys con tc_args) cons sum_alt_tys = map mkUbxSumAltTy src_tys sum_ty_unscaled = mkSumTy sum_alt_tys sum_ty = Scaled arg_mult sum_ty_unscaled newLocal' fs = newLocal fs . Scaled arg_mult -- See Note [UNPACK for sum types] unboxer :: Unboxer unboxer arg_id = do con_arg_binders <- mapM (mapM (newLocal' (fsLit "unbx"))) src_tys ubx_sum_bndr <- newLocal (fsLit "unbx") sum_ty let mk_ubx_sum_alt :: Int -> DataCon -> [Var] -> CoreAlt mk_ubx_sum_alt alt con [bndr] = Alt (DataAlt con) [bndr] (mkCoreUnboxedSum ubx_sum_arity alt sum_alt_tys (Var bndr)) mk_ubx_sum_alt alt con bndrs = let tuple = mkCoreUnboxedTuple (map Var bndrs) in Alt (DataAlt con) bndrs (mkCoreUnboxedSum ubx_sum_arity alt sum_alt_tys tuple ) ubx_sum :: CoreExpr ubx_sum = let alts = zipWith3 mk_ubx_sum_alt [ 1 .. ] cons con_arg_binders in Case (Var arg_id) arg_id (coreAltsType alts) alts unbox_fn :: CoreExpr -> CoreExpr unbox_fn body = mkSingleAltCase ubx_sum ubx_sum_bndr DEFAULT [] body return ([ubx_sum_bndr], unbox_fn) boxer :: Boxer boxer = Boxer $ \ subst -> do unboxed_field_id <- newLocal' (fsLit "bx") (TcType.substTy subst sum_ty_unscaled) tuple_bndrs <- mapM (newLocal' (fsLit "bx") . TcType.substTy subst) sum_alt_tys let tc_args' = substTys subst tc_args arg_ty' = substTy subst arg_ty con_arg_binders <- mapM (mapM (newLocal' (fsLit "bx")) . map (TcType.substTy subst)) src_tys let mk_sum_alt :: Int -> DataCon -> Var -> [Var] -> CoreAlt mk_sum_alt alt con _ [datacon_bndr] = ( Alt (DataAlt (sumDataCon alt ubx_sum_arity)) [datacon_bndr] (Var (dataConWorkId con) `mkTyApps` tc_args' `mkVarApps` [datacon_bndr] )) mk_sum_alt alt con tuple_bndr datacon_bndrs = ( Alt (DataAlt (sumDataCon alt ubx_sum_arity)) [tuple_bndr] ( Case (Var tuple_bndr) tuple_bndr arg_ty' [ Alt (DataAlt (tupleDataCon Unboxed (length datacon_bndrs))) datacon_bndrs (Var (dataConWorkId con) `mkTyApps` tc_args' `mkVarApps` datacon_bndrs ) ] )) return ( [unboxed_field_id], Case (Var unboxed_field_id) unboxed_field_id arg_ty' (zipWith4 mk_sum_alt [ 1 .. ] cons tuple_bndrs con_arg_binders) ) -- | Every alternative of an unboxed sum has exactly one field, and we use -- unboxed tuples when we need more than one field. This generates an unboxed -- tuple when necessary, to be used in unboxed sum alts. mkUbxSumAltTy :: [Type] -> Type mkUbxSumAltTy [ty] = ty mkUbxSumAltTy tys = mkTupleTy Unboxed tys shouldUnpackArgTy :: BangOpts -> SrcUnpackedness -> FamInstEnvs -> Scaled Type -> Bool -- True if we ought to unpack the UNPACK the argument type -- See Note [Recursive unboxing] -- We look "deeply" inside rather than relying on the DataCons -- we encounter on the way, because otherwise we might well -- end up relying on ourselves! shouldUnpackArgTy bang_opts prag fam_envs arg_ty | Just data_cons <- unpackable_type_datacons (scaledThing arg_ty) , all ok_con data_cons -- Returns True only if we can't get a -- loop involving these data cons , should_unpack prag arg_ty data_cons -- ...hence the call to dataConArgUnpack in -- should_unpack won't loop -- See Wrinkle (W1b) of Note [Recursive unboxing] for this loopy stuff = True | otherwise = False where ok_con :: DataCon -> Bool -- True <=> OK to unpack ok_con top_con -- False <=> not safe = ok_args emptyNameSet top_con where top_con_name = getName top_con ok_args dcs con = all (ok_arg dcs) $ (dataConOrigArgTys con `zip` dataConSrcBangs con) -- NB: dataConSrcBangs gives the *user* request; -- We'd get a black hole if we used dataConImplBangs ok_arg :: NameSet -> (Scaled Type, HsSrcBang) -> Bool ok_arg dcs (Scaled _ ty, HsSrcBang _ (HsBang unpack_prag str_prag)) | strict_field str_prag , Just data_cons <- unpackable_type_datacons (topNormaliseType fam_envs ty) , should_unpack_conservative unpack_prag data_cons -- Wrinkle (W3) = all (ok_rec_con dcs) data_cons -- of Note [Recursive unboxing] | otherwise = True -- NB True here, in contrast to False at top level -- See Note [Recursive unboxing] -- * Do not look at the HsImplBangs to `con`; see Wrinkle (W1a) -- * For the "at the root" comments see Wrinkle (W2) ok_rec_con dcs con | dc_name == top_con_name = False -- Recursion at the root | dc_name `elemNameSet` dcs = True -- Not at the root | otherwise = ok_args (dcs `extendNameSet` dc_name) con where dc_name = getName con strict_field :: SrcStrictness -> Bool -- True <=> strict field strict_field NoSrcStrict = bang_opt_strict_data bang_opts strict_field SrcStrict = True strict_field SrcLazy = False -- Determine whether we ought to unpack a field, -- based on user annotations if present. -- A conservative version of should_unpack that doesn't look at how -- many fields the field would unpack to... because that leads to a loop. -- "Conservative" = err on the side of saying "yes". should_unpack_conservative :: SrcUnpackedness -> [DataCon] -> Bool should_unpack_conservative SrcNoUnpack _ = False -- {-# NOUNPACK #-} should_unpack_conservative SrcUnpack _ = True -- {-# NOUNPACK #-} should_unpack_conservative NoSrcUnpack dcs = not (is_sum dcs) -- is_sum: we never unpack sums without a pragma; otherwise be conservative -- Determine whether we ought to unpack a field, -- based on user annotations if present, and heuristics if not. should_unpack :: SrcUnpackedness -> Scaled Type -> [DataCon] -> Bool should_unpack prag arg_ty data_cons = case prag of SrcNoUnpack -> False -- {-# NOUNPACK #-} SrcUnpack -> True -- {-# UNPACK #-} NoSrcUnpack -- No explicit unpack pragma, so use heuristics | is_sum data_cons -> False -- Don't unpack sum types automatically, but they can -- be unpacked with an explicit source UNPACK. | otherwise -- Wrinkle (W4) of Note [Recursive unboxing] -> bang_opt_unbox_strict bang_opts || (bang_opt_unbox_small bang_opts && is_small_rep) -- See Note [Unpack one-wide fields] where (rep_tys, _) = dataConArgUnpack arg_ty -- Takes in the list of reps used to represent the dataCon after it's unpacked -- and tells us if they can fit into 8 bytes. See Note [Unpack one-wide fields] is_small_rep = let -- Neccesary to look through unboxed tuples. prim_reps = concatMap (typePrimRep . scaledThing . fst) $ rep_tys -- And then get the actual size of the unpacked constructor. rep_size = sum $ map primRepSizeW64_B prim_reps in rep_size <= 8 is_sum :: [DataCon] -> Bool -- We never unpack sum types automatically -- (Product types, we do. Empty types are weeded out by unpackable_type_datacons.) is_sum (_:_:_) = True is_sum _ = False -- Given a type already assumed to have been normalized by topNormaliseType, -- unpackable_type_datacons ty = Just datacons -- iff ty is of the form -- T ty1 .. tyn -- and T is an algebraic data type (not newtype), in which no data -- constructors have existentials, and datacons is the list of data -- constructors of T. unpackable_type_datacons :: Type -> Maybe [DataCon] unpackable_type_datacons ty | Just (tc, _) <- splitTyConApp_maybe ty , not (isNewTyCon tc) -- Even though `ty` has been normalised, it could still -- be a /recursive/ newtype, so we must check for that , Just cons <- tyConDataCons_maybe tc , not (null cons) -- Don't upack nullary sums; no need. -- They already take zero bits , all (null . dataConExTyCoVars) cons = Just cons -- See Note [Unpacking GADTs and existentials] | otherwise = Nothing {- Note [Unpacking GADTs and existentials] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ There is nothing stopping us unpacking a data type with equality components, like data Equal a b where Equal :: Equal a a And it'd be fine to unpack a product type with existential components too, but that would require a bit more plumbing, so currently we don't. So for now we require: null (dataConExTyCoVars data_con) See #14978 Note [Unpack one-wide fields] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The flag UnboxSmallStrictFields ensures that any field that can (safely) be unboxed to a word-sized unboxed field, should be so unboxed. For example: data A = A Int# newtype B = B A data C = C !B data D = D !C data E = E !() data F = F !D data G = G !F !F All of these should have an Int# as their representation, except G which should have two Int#s. However data T = T !(S Int) data S = S !a Here we can represent T with an Int#. Special care has to be taken to make sure we don't mistake fields with unboxed tuple/sum rep or very large reps. See #22309 For consistency we unpack anything that fits into 8 bytes on a 64-bit platform, even when compiling for 32bit platforms. This way unpacking decisions will be the same for 32bit and 64bit systems. To do so we use primRepSizeW64_B instead of primRepSizeB. See also the tests in test case T22309. Note [Recursive unboxing] ~~~~~~~~~~~~~~~~~~~~~~~~~ Consider data R = MkR {-# UNPACK #-} !S Int data S = MkS {-# UNPACK #-} !Int The representation arguments of MkR are the *representation* arguments of S (plus Int); the rep args of MkS are Int#. This is all fine. But be careful not to try to unbox this! data T = MkT {-# UNPACK #-} !T Int Because then we'd get an infinite number of arguments. Note that it's the *argument* type that matters. This is fine: data S = MkS S !Int because Int is non-recursive. Wrinkles: (W1a) We have to be careful that the compiler doesn't go into a loop! First, we must not look at the HsImplBang decisions of data constructors in the same mutually recursive group. E.g. data S = MkS {-# UNPACK #-} !T Int data T = MkT {-# UNPACK #-} !S Int Each of S and T must decide /independently/ whether to unpack and they had better not both say yes. So they must both say no. (We could detect when we leave the group, and /then/ we can rely on HsImplBangs; but that requires more plumbing.) (W1b) Here is another way the compiler might go into a loop (test T23307b): data data T = MkT !S Int data S = MkS !T Suppose we call `shouldUnpackArgTy` on the !S arg of `T`. In `should_unpack` we ask if the number of fields that `MkS` unpacks to is small enough (via rep_tys `lengthAtMost` 1). But how many field /does/ `MkS` unpack to? Well it depends on the unpacking decision we make for `MkS`, which in turn depends on `MkT`, which we are busy deciding. Black holes beckon. So we /first/ call `ok_con` on `MkS` (and `ok_con` is conservative; see `should_unpack_conservative`), and only /then/ call `should_unpack`. Tricky! (W2) As #23307 shows, we /do/ want to unpack the second arg of the Yes data constructor in this example, despite the recursion in List: data Stream a = Cons a !(Stream a) data Unconsed a = Unconsed a !(Stream a) data MUnconsed a = No | Yes {-# UNPACK #-} !(Unconsed a) When looking at {-# UNPACK #-} (Unconsed a) we can take Unconsed apart, but then get into a loop with Stream. That's fine: we can still take Unconsed apart. It's only if we have a loop /at the root/ that we must not unpack. (W3) Moreover (W2) can apply even if there is a recursive loop: data List a = Nil | Cons {-# UNPACK #-} !(Unconsed a) data Unconsed a = Unconsed a !(List a) Here there is mutual recursion between `Unconsed` and `List`; and yet we can unpack the field of `Cons` because we will not unpack the second field of `Unconsed`: we never unpack a sum type without an explicit pragma (see should_unpack). (W4) Consider data T = MkT !Wombat data Wombat = MkW {-# UNPACK #-} !S Int data S = MkS {-# NOUNPACK #-} !Wombat Int Suppose we are deciding whether to unpack the first field of MkT, by calling (shouldUnpackArgTy Wombat). Then we'll try to unpack the !S field of MkW, and be stopped by the {-# NOUNPACK #-}, and all is fine; we can unpack MkT. If that NOUNPACK had been a UNPACK, though, we'd get a loop, and would decide not to unpack the Wombat field of MkT. But what if there was no pragma in `data S`? Then we /still/ decide not to unpack the Wombat field of MkT (at least when auto-unpacking is on), because we don't know for sure which decision will be taken for the Wombat field of MkS. TL;DR when there is no pragma, behave as if there was a UNPACK, at least when auto-unpacking is on. See `should_unpack` in `shouldUnpackArgTy`. ************************************************************************ * * Wrapping and unwrapping newtypes and type families * * ************************************************************************ -} wrapNewTypeBody :: TyCon -> [Type] -> CoreExpr -> CoreExpr -- The wrapper for the data constructor for a newtype looks like this: -- newtype T a = MkT (a,Int) -- MkT :: forall a. (a,Int) -> T a -- MkT = /\a. \(x:(a,Int)). x `cast` sym (CoT a) -- where CoT is the coercion TyCon associated with the newtype -- -- The call (wrapNewTypeBody T [a] e) returns the -- body of the wrapper, namely -- e `cast` (CoT [a]) -- -- If a coercion constructor is provided in the newtype, then we use -- it, otherwise the wrap/unwrap are both no-ops wrapNewTypeBody tycon args result_expr = assert (isNewTyCon tycon) $ mkCast result_expr (mkSymCo co) where co = mkUnbranchedAxInstCo Representational (newTyConCo tycon) args [] -- When unwrapping, we do *not* apply any family coercion, because this will -- be done via a CoPat by the type checker. We have to do it this way as -- computing the right type arguments for the coercion requires more than just -- a splitting operation (cf, GHC.Tc.Gen.Pat.tcConPat). unwrapNewTypeBody :: TyCon -> [Type] -> CoreExpr -> CoreExpr unwrapNewTypeBody tycon args result_expr = assert (isNewTyCon tycon) $ mkCast result_expr (mkUnbranchedAxInstCo Representational (newTyConCo tycon) args []) -- If the type constructor is a representation type of a data instance, wrap -- the expression into a cast adjusting the expression type, which is an -- instance of the representation type, to the corresponding instance of the -- family instance type. -- See Note [Wrappers for data instance tycons] wrapFamInstBody :: TyCon -> [Type] -> CoreExpr -> CoreExpr wrapFamInstBody tycon args body | Just co_con <- tyConFamilyCoercion_maybe tycon = mkCast body (mkSymCo (mkUnbranchedAxInstCo Representational co_con args [])) | otherwise = body {- ************************************************************************ * * * Foreign calls * * ************************************************************************ -} -- For each ccall we manufacture a separate CCallOpId, giving it -- a fresh unique, a type that is correct for this particular ccall, -- and a CCall structure that gives the correct details about calling -- convention etc. -- -- The *name* of this Id is a local name whose OccName gives the full -- details of the ccall, type and all. This means that the interface -- file reader can reconstruct a suitable Id mkFCallId :: Unique -> ForeignCall -> Type -> Id mkFCallId uniq fcall ty = assert (noFreeVarsOfType ty) $ -- A CCallOpId should have no free type variables; -- when doing substitutions won't substitute over it mkGlobalId (FCallId fcall) name ty info where occ_str = renderWithContext defaultSDocContext (braces (ppr fcall <+> ppr ty)) -- The "occurrence name" of a ccall is the full info about the -- ccall; it is encoded, but may have embedded spaces etc! name = mkFCallName uniq (mkFastString occ_str) info = noCafIdInfo `setArityInfo` arity `setDmdSigInfo` strict_sig `setCprSigInfo` topCprSig (bndrs, _) = tcSplitPiTys ty arity = count isAnonPiTyBinder bndrs strict_sig = mkVanillaDmdSig arity topDiv -- the call does not claim to be strict in its arguments, since they -- may be lifted (foreign import prim) and the called code doesn't -- necessarily force them. See #11076. {- ************************************************************************ * * \subsection{DictFuns and default methods} * * ************************************************************************ Note [Dict funs and default methods] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Dict funs and default methods are *not* ImplicitIds. Their definition involves user-written code, so we can't figure out their strictness etc based on fixed info, as we can for constructors and record selectors (say). NB: See also Note [Exported LocalIds] in GHC.Types.Id -} mkDictFunId :: Name -- Name to use for the dict fun; -> [TyVar] -> ThetaType -> Class -> [Type] -> Id -- Implements the DFun Superclass Invariant (see GHC.Tc.TyCl.Instance) -- See Note [Dict funs and default methods] mkDictFunId dfun_name tvs theta clas tys = mkExportedLocalId (DFunId is_nt) dfun_name dfun_ty where is_nt = isNewTyCon (classTyCon clas) dfun_ty = TcType.tcMkDFunSigmaTy tvs theta (mkClassPred clas tys) {- ************************************************************************ * * \subsection{Un-definable} * * ************************************************************************ These Ids can't be defined in Haskell. They could be defined in unfoldings in the wired-in GHC.Prim interface file, but we'd have to ensure that they were definitely, definitely inlined, because there is no curried identifier for them. That's what mkCompulsoryUnfolding does. Alternatively, we could add the definitions to mi_decls of ghcPrimIface but it's not clear if this would be simpler. coercionToken# is not listed in ghcPrimIds, since its type uses (~#) which is not supposed to be used in expressions (GHC throws an assertion failure when trying.) -} nullAddrName, seqName, realWorldName, voidPrimIdName, coercionTokenName, coerceName, proxyName, leftSectionName, rightSectionName :: Name nullAddrName = mkWiredInIdName gHC_PRIM (fsLit "nullAddr#") nullAddrIdKey nullAddrId seqName = mkWiredInIdName gHC_PRIM (fsLit "seq") seqIdKey seqId realWorldName = mkWiredInIdName gHC_PRIM (fsLit "realWorld#") realWorldPrimIdKey realWorldPrimId voidPrimIdName = mkWiredInIdName gHC_PRIM (fsLit "void#") voidPrimIdKey voidPrimId coercionTokenName = mkWiredInIdName gHC_PRIM (fsLit "coercionToken#") coercionTokenIdKey coercionTokenId coerceName = mkWiredInIdName gHC_PRIM (fsLit "coerce") coerceKey coerceId proxyName = mkWiredInIdName gHC_PRIM (fsLit "proxy#") proxyHashKey proxyHashId leftSectionName = mkWiredInIdName gHC_PRIM (fsLit "leftSection") leftSectionKey leftSectionId rightSectionName = mkWiredInIdName gHC_PRIM (fsLit "rightSection") rightSectionKey rightSectionId -- Names listed in magicIds; see Note [magicIds] lazyIdName, oneShotName, nospecIdName :: Name lazyIdName = mkWiredInIdName gHC_MAGIC (fsLit "lazy") lazyIdKey lazyId oneShotName = mkWiredInIdName gHC_MAGIC (fsLit "oneShot") oneShotKey oneShotId nospecIdName = mkWiredInIdName gHC_MAGIC (fsLit "nospec") nospecIdKey nospecId ------------------------------------------------ proxyHashId :: Id proxyHashId = pcMiscPrelId proxyName ty (noCafIdInfo `setUnfoldingInfo` evaldUnfolding) -- Note [evaldUnfoldings] where -- proxy# :: forall {k} (a:k). Proxy# k a -- -- The visibility of the `k` binder is Inferred to match the type of the -- Proxy data constructor (#16293). [kv,tv] = mkTemplateKiTyVar liftedTypeKind (\x -> [x]) kv_ty = mkTyVarTy kv tv_ty = mkTyVarTy tv ty = mkInfForAllTy kv $ mkSpecForAllTy tv $ mkProxyPrimTy kv_ty tv_ty ------------------------------------------------ nullAddrId :: Id -- nullAddr# :: Addr# -- The reason it is here is because we don't provide -- a way to write this literal in Haskell. nullAddrId = pcMiscPrelId nullAddrName addrPrimTy info where info = noCafIdInfo `setInlinePragInfo` alwaysInlinePragma `setUnfoldingInfo` mkCompulsoryUnfolding (Lit nullAddrLit) ------------------------------------------------ seqId :: Id -- See Note [seqId magic] seqId = pcRepPolyId seqName ty concs info where info = noCafIdInfo `setInlinePragInfo` inline_prag `setUnfoldingInfo` mkCompulsoryUnfolding rhs `setArityInfo` arity inline_prag = alwaysInlinePragma `setInlinePragmaActivation` ActiveAfter NoSourceText 0 -- Make 'seq' not inline-always, so that simpleOptExpr -- (see GHC.Core.Subst.simple_app) won't inline 'seq' on the -- LHS of rules. That way we can have rules for 'seq'; -- see Note [seqId magic] -- seq :: forall (r :: RuntimeRep) a (b :: TYPE r). a -> b -> b ty = mkInfForAllTy runtimeRep2TyVar $ mkSpecForAllTys [alphaTyVar, openBetaTyVar] $ mkVisFunTyMany alphaTy (mkVisFunTyMany openBetaTy openBetaTy) [x,y] = mkTemplateLocals [alphaTy, openBetaTy] rhs = mkLams ([runtimeRep2TyVar, alphaTyVar, openBetaTyVar, x, y]) $ Case (Var x) x openBetaTy [Alt DEFAULT [] (Var y)] concs = mkRepPolyIdConcreteTyVars [ ((openBetaTy, Argument 2 Top), runtimeRep2TyVar)] arity = 2 ------------------------------------------------ lazyId :: Id -- See Note [lazyId magic] lazyId = pcMiscPrelId lazyIdName ty info where info = noCafIdInfo ty = mkSpecForAllTys [alphaTyVar] (mkVisFunTyMany alphaTy alphaTy) ------------------------------------------------ noinlineIdName, noinlineConstraintIdName :: Name noinlineIdName = mkWiredInIdName gHC_MAGIC (fsLit "noinline") noinlineIdKey noinlineId noinlineConstraintIdName = mkWiredInIdName gHC_MAGIC (fsLit "noinlineConstraint") noinlineConstraintIdKey noinlineConstraintId noinlineId :: Id -- See Note [noinlineId magic] noinlineId = pcMiscPrelId noinlineIdName ty info where info = noCafIdInfo ty = mkSpecForAllTys [alphaTyVar] $ mkVisFunTyMany alphaTy alphaTy noinlineConstraintId :: Id -- See Note [noinlineId magic] noinlineConstraintId = pcMiscPrelId noinlineConstraintIdName ty info where info = noCafIdInfo ty = mkSpecForAllTys [alphaConstraintTyVar] $ mkFunTy visArgConstraintLike ManyTy alphaTy alphaConstraintTy ------------------------------------------------ nospecId :: Id -- See Note [nospecId magic] nospecId = pcMiscPrelId nospecIdName ty info where info = noCafIdInfo ty = mkSpecForAllTys [alphaTyVar] (mkVisFunTyMany alphaTy alphaTy) oneShotId :: Id -- See Note [oneShot magic] oneShotId = pcRepPolyId oneShotName ty concs info where info = noCafIdInfo `setInlinePragInfo` alwaysInlinePragma `setUnfoldingInfo` mkCompulsoryUnfolding rhs `setArityInfo` arity -- oneShot :: forall {r1 r2} (a :: TYPE r1) (b :: TYPE r2). (a -> b) -> (a -> b) ty = mkInfForAllTys [ runtimeRep1TyVar, runtimeRep2TyVar ] $ mkSpecForAllTys [ openAlphaTyVar, openBetaTyVar ] $ mkVisFunTyMany fun_ty fun_ty fun_ty = mkVisFunTyMany openAlphaTy openBetaTy [body, x] = mkTemplateLocals [fun_ty, openAlphaTy] x' = setOneShotLambda x -- Here is the magic bit! rhs = mkLams [ runtimeRep1TyVar, runtimeRep2TyVar , openAlphaTyVar, openBetaTyVar , body, x'] $ Var body `App` Var x' arity = 2 concs = mkRepPolyIdConcreteTyVars [((openAlphaTy, Argument 2 Top), runtimeRep1TyVar)] ---------------------------------------------------------------------- {- Note [Wired-in Ids for rebindable syntax] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The functions leftSectionId, rightSectionId are wired in here ONLY because they are used in a representation-polymorphic way by the rebindable syntax mechanism. See GHC.Rename.Expr Note [Handling overloaded and rebindable constructs]. Alas, we can't currently give Haskell definitions for representation-polymorphic functions. They have Compulsory unfoldings, so that the representation polymorphism does not linger for long. -} -- See Note [Left and right sections] in GHC.Rename.Expr -- See Note [Wired-in Ids for rebindable syntax] -- leftSection :: forall r1 r2 n (a::TYPE r1) (b::TYPE r2). -- (a %n-> b) -> a %n-> b -- leftSection f x = f x -- Important that it is eta-expanded, so that (leftSection undefined `seq` ()) -- is () and not undefined -- Important that is is multiplicity-polymorphic (test linear/should_compile/OldList) leftSectionId :: Id leftSectionId = pcRepPolyId leftSectionName ty concs info where info = noCafIdInfo `setInlinePragInfo` alwaysInlinePragma `setUnfoldingInfo` mkCompulsoryUnfolding rhs `setArityInfo` arity ty = mkInfForAllTys [runtimeRep1TyVar,runtimeRep2TyVar, multiplicityTyVar1] $ mkSpecForAllTys [openAlphaTyVar, openBetaTyVar] $ exprType body [f,x] = mkTemplateLocals [mkVisFunTy mult openAlphaTy openBetaTy, openAlphaTy] mult = mkTyVarTy multiplicityTyVar1 :: Mult xmult = setIdMult x mult rhs = mkLams [ runtimeRep1TyVar, runtimeRep2TyVar, multiplicityTyVar1 , openAlphaTyVar, openBetaTyVar ] body body = mkLams [f,xmult] $ App (Var f) (Var xmult) arity = 2 concs = mkRepPolyIdConcreteTyVars [((openAlphaTy, Argument 2 Top), runtimeRep1TyVar)] -- See Note [Left and right sections] in GHC.Rename.Expr -- See Note [Wired-in Ids for rebindable syntax] -- rightSection :: forall r1 r2 r3 n1 n2 (a::TYPE r1) (b::TYPE r2) (c::TYPE r3). -- (a %n1 -> b %n2-> c) -> b %n2-> a %n1-> c -- rightSection f y x = f x y -- Again, multiplicity polymorphism is important rightSectionId :: Id rightSectionId = pcRepPolyId rightSectionName ty concs info where info = noCafIdInfo `setInlinePragInfo` alwaysInlinePragma `setUnfoldingInfo` mkCompulsoryUnfolding rhs `setArityInfo` arity ty = mkInfForAllTys [runtimeRep1TyVar,runtimeRep2TyVar,runtimeRep3TyVar , multiplicityTyVar1, multiplicityTyVar2 ] $ mkSpecForAllTys [openAlphaTyVar, openBetaTyVar, openGammaTyVar ] $ exprType body mult1 = mkTyVarTy multiplicityTyVar1 mult2 = mkTyVarTy multiplicityTyVar2 [f,x,y] = mkTemplateLocals [ mkScaledFunTys [ Scaled mult1 openAlphaTy , Scaled mult2 openBetaTy ] openGammaTy , openAlphaTy, openBetaTy ] xmult = setIdMult x mult1 ymult = setIdMult y mult2 rhs = mkLams [ runtimeRep1TyVar, runtimeRep2TyVar, runtimeRep3TyVar , multiplicityTyVar1, multiplicityTyVar2 , openAlphaTyVar, openBetaTyVar, openGammaTyVar ] body body = mkLams [f,ymult,xmult] $ mkVarApps (Var f) [xmult,ymult] arity = 3 concs = mkRepPolyIdConcreteTyVars [ ((openAlphaTy, Argument 3 Top), runtimeRep1TyVar) , ((openBetaTy , Argument 2 Top), runtimeRep2TyVar)] -------------------------------------------------------------------------------- coerceId :: Id coerceId = pcRepPolyId coerceName ty concs info where info = noCafIdInfo `setInlinePragInfo` alwaysInlinePragma `setUnfoldingInfo` mkCompulsoryUnfolding rhs `setArityInfo` 2 eqRTy = mkTyConApp coercibleTyCon [ tYPE_r, a, b ] eqRPrimTy = mkTyConApp eqReprPrimTyCon [ tYPE_r, tYPE_r, a, b ] ty = mkInvisForAllTys [ Bndr rv InferredSpec , Bndr av SpecifiedSpec , Bndr bv SpecifiedSpec ] $ mkInvisFunTy eqRTy $ mkVisFunTyMany a b bndrs@[rv,av,bv] = mkTemplateKiTyVar runtimeRepTy (\r -> [mkTYPEapp r, mkTYPEapp r]) [r, a, b] = mkTyVarTys bndrs tYPE_r = mkTYPEapp r [eqR,x,eq] = mkTemplateLocals [eqRTy, a, eqRPrimTy] rhs = mkLams (bndrs ++ [eqR, x]) $ mkWildCase (Var eqR) (unrestricted eqRTy) b $ [Alt (DataAlt coercibleDataCon) [eq] (Cast (Var x) (mkCoVarCo eq))] concs = mkRepPolyIdConcreteTyVars [((mkTyVarTy av, Argument 1 Top), rv)] {- Note [seqId magic] ~~~~~~~~~~~~~~~~~~ 'GHC.Prim.seq' is special in several ways. a) Its fixity is set in GHC.Iface.Load.ghcPrimIface b) It has quite a bit of desugaring magic. See GHC.HsToCore.Utils Note [Desugaring seq] (1) and (2) and (3) c) There is some special rule handing: Note [User-defined RULES for seq] Historical note: In GHC.Tc.Gen.Expr we used to need a special typing rule for 'seq', to handle calls whose second argument had an unboxed type, e.g. x `seq` 3# However, with representation polymorphism we can now give seq the type seq :: forall (r :: RuntimeRep) a (b :: TYPE r). a -> b -> b which handles this case without special treatment in the typechecker. Note [User-defined RULES for seq] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Roman found situations where he had case (f n) of _ -> e where he knew that f (which was strict in n) would terminate if n did. Notice that the result of (f n) is discarded. So it makes sense to transform to case n of _ -> e Rather than attempt some general analysis to support this, I've added enough support that you can do this using a rewrite rule: RULE "f/seq" forall n. seq (f n) = seq n You write that rule. When GHC sees a case expression that discards its result, it mentally transforms it to a call to 'seq' and looks for a RULE. (This is done in GHC.Core.Opt.Simplify.trySeqRules.) As usual, the correctness of the rule is up to you. VERY IMPORTANT: to make this work, we give the RULE an arity of 1, not 2. If we wrote RULE "f/seq" forall n e. seq (f n) e = seq n e with rule arity 2, then two bad things would happen: - The magical desugaring done in Note [seqId magic] item (b) for saturated application of 'seq' would turn the LHS into a case expression! - The code in GHC.Core.Opt.Simplify.rebuildCase would need to actually supply the value argument, which turns out to be awkward. See also: Note [User-defined RULES for seq] in GHC.Core.Opt.Simplify. Note [lazyId magic] ~~~~~~~~~~~~~~~~~~~ lazy :: forall a. a -> a 'lazy' is used to make sure that a sub-expression, and its free variables, are truly used call-by-need, with no code motion. Key examples: * pseq: pseq a b = a `seq` lazy b We want to make sure that the free vars of 'b' are not evaluated before 'a', even though the expression is plainly strict in 'b'. * catch: catch a b = catch# (lazy a) b Again, it's clear that 'a' will be evaluated strictly (and indeed applied to a state token) but we want to make sure that any exceptions arising from the evaluation of 'a' are caught by the catch (see #11555). Implementing 'lazy' is a bit tricky: * It must not have a strictness signature: by being a built-in Id, all the info about lazyId comes from here, not from GHC.Magic.hi. This is important, because the strictness analyser will spot it as strict! * It must not have an unfolding: it gets "inlined" by a HACK in CorePrep. It's very important to do this inlining *after* unfoldings are exposed in the interface file. Otherwise, the unfolding for (say) pseq in the interface file will not mention 'lazy', so if we inline 'pseq' we'll totally miss the very thing that 'lazy' was there for in the first place. See #3259 for a real world example. * Suppose CorePrep sees (catch# (lazy e) b). At all costs we must avoid using call by value here: case e of r -> catch# r b Avoiding that is the whole point of 'lazy'. So in CorePrep (which generate the 'case' expression for a call-by-value call) we must spot the 'lazy' on the arg (in CorePrep.cpeApp), and build a 'let' instead. * lazyId is defined in GHC.Base, so we don't *have* to inline it. If it appears un-applied, we'll end up just calling it. Note [noinlineId magic] ~~~~~~~~~~~~~~~~~~~~~~~ 'noinline' is used to make sure that a function f is never inlined, e.g., as in 'noinline f x'. We won't inline f because we never inline lone variables (see Note [Lone variables] in GHC.Core.Unfold You might think that we could implement noinline like this: {-# NOINLINE #-} noinline :: forall a. a -> a noinline x = x But actually we give 'noinline' a wired-in name for three distinct reasons: 1. We don't want to leave a (useless) call to noinline in the final program, to be executed at runtime. So we have a little bit of magic to optimize away 'noinline' after we are done running the simplifier. This is done in GHC.CoreToStg.Prep.cpeApp. 2. 'noinline' sometimes gets inserted automatically when we serialize an expression to the interface format, in GHC.CoreToIface.toIfaceVar. See Note [Inlining and hs-boot files] in GHC.CoreToIface 3. Given foo :: Eq a => [a] -> Bool, the expression noinline foo x xs where x::Int, will naturally desugar to noinline @Int (foo @Int dEqInt) x xs But now it's entirely possible that (foo @Int dEqInt) will inline foo, since 'foo' is no longer a lone variable -- see #18995 Solution: in the desugarer, rewrite noinline (f x y) ==> noinline f x y This is done in GHC.HsToCore.Utils.mkCoreAppDs. This is only needed for noinlineId, not noInlineConstraintId (wrinkle (W1) below), because the latter never shows up in user code. Wrinkles (W1) Sometimes case (2) above needs to apply `noinline` to a type of kind Constraint; e.g. noinline @(Eq Int) $dfEqInt We don't have type-or-kind polymorphism, so we simply have two `inline` Ids, namely `noinlineId` and `noinlineConstraintId`. (W2) Note that noinline as currently implemented can hide some simplifications since it hides strictness from the demand analyser. Specifically, the demand analyser will treat 'noinline f x' as lazy in 'x', even if the demand signature of 'f' specifies that it is strict in its argument. We considered fixing this this by adding a special case to the demand analyser to address #16588. However, the special case seemed like a large and expensive hammer to address a rare case and consequently we rather opted to use a more minimal solution. Note [nospecId magic] ~~~~~~~~~~~~~~~~~~~~~ The 'nospec' magic Id is used to ensure to make a value opaque to the typeclass specialiser. In CorePrep, we inline 'nospec', turning (nospec e) into e. Note that this happens *after* unfoldings are exposed in the interface file. This is crucial: otherwise, we could import an unfolding in which 'nospec' has been inlined (= erased), and we would lose the benefit. 'nospec' is used: * In the implementation of 'withDict': we insert 'nospec' so that the typeclass specialiser doesn't assume any two evidence terms of the same type are equal. See Note [withDict] in GHC.Tc.Instance.Class, and see test case T21575b for an example. * To defeat the specialiser when we have incoherent instances. See Note [Coherence and specialisation: overview] in GHC.Core.InstEnv. Note [seq# magic] ~~~~~~~~~~~~~~~~~ The purpose of the magic Id (See Note [magicIds]) seq# :: forall a s . a -> State# s -> (# State# s, a #) is to elevate evaluation of its argument `a` into an observable side effect. This implies that GHC's optimisations must preserve the evaluation "exactly here", in the state thread. The main use of seq# is to implement `evaluate` evaluate :: a -> IO a evaluate a = IO $ \s -> seq# a s Its (NOINLINE) definition in GHC.Magic is simply seq# a s = let !a' = lazy a in (# s, a' #) Things to note (SEQ1) It must be NOINLINE, because otherwise the eval !a' would be decoupled from the state token s, and GHC's optimisations, in particular strictness analysis, would happily move the eval around. However, we *do* inline saturated applications of seq# in CorePrep, where evaluation order is fixed; see the implementation notes below. This is one reason why we need seq# to be known-key. (SEQ2) The use of `lazy` ensures that strictness analysis does not see the eval that takes place, so the final demand signature is , not <1L>. This is important for a definition like foo x y = evaluate y >> evaluate x Although both y and x are ultimately evaluated, the user made it clear they want to evaluate y *before* x. But if strictness analysis sees the evals, it infers foo as strict in both parameters. This strictness would be exploited in the backend by picking a call-by-value calling convention for foo, one that would evaluate x *before* y. Nononono! Because the definition of seq# uses `lazy`, it must live in a different module (GHC.Internal.IO); otherwise strictness analysis uses its own strictness signature for the definition of `lazy` instead of the one we wire in. (SEQ3) Why does seq# return the value? Consider let x = e in case seq# x s of (# _, x' #) -> ... x' ... case x' of __DEFAULT -> ... Here, we could simply use x instead of x', but doing so would introduce an unnecessary indirection and tag check at runtime; also we can attach an evaldUnfolding to x' to discard any subsequent evals such as the `case x' of __DEFAULT`. (SEQ4) T15226 demonstrates that we want to discard ok-for-discard seq#s. That is, simplify `case seq# s of (# s', _ #) -> rhs[s']` to `rhs[s]`. You might wonder whether the Simplifier could do this. But see the excellent example in #24334 (immortalised as test T24334) for why it should be done in CorePrep. Implementing seq#. The compiler has magic for `seq#` in - GHC.CoreToStg.Prep.cpeRhsE: Implement (SEQ4). - Simplify.addEvals records evaluated-ness for the result (cf. (SEQ3)); see Note [Adding evaluatedness info to pattern-bound variables] in GHC.Core.Opt.Simplify.Iteration - GHC.Core.Opt.DmdAnal.exprMayThrowPreciseException: Historically, seq# used to be a primop, and the majority of primops should return False in exprMayThrowPreciseException, so we do the same for seq# for back compat. - GHC.CoreToStg.Prep: Inline saturated applications to a Case, e.g., seq# (f 13) s ==> case f 13 of sat of __DEFAULT -> (# s, sat #) This is implemented in `cpeApp`, not unlike Note [runRW magic]. We are only inlining seq#, leaving opportunities for case-of-known-con behind that are easily picked up by Unarise: case seq# f 13 s of (# s', r #) -> rhs ==> {Prep} case f 13 of sat of __DEFAULT -> case (# s, sat #) of (# s', r #) -> rhs ==> {Unarise} case f 13 of sat of __DEFAULT -> rhs[s/s',sat/r] Note that CorePrep really allocates a CaseBound FloatingBind for `f 13`. That's OK, because the telescope of Floats always stays in the same order and won't be floated out of binders, so all guarantees of evaluation order provided by seq# are upheld. Note [oneShot magic] ~~~~~~~~~~~~~~~~~~~~ In the context of making left-folds fuse somewhat okish (see ticket #7994 and Note [Left folds via right fold]) it was determined that it would be useful if library authors could explicitly tell the compiler that a certain lambda is called at most once. The oneShot function allows that. 'oneShot' is representation-polymorphic, i.e. the type variables can refer to unlifted types as well (#10744); e.g. oneShot (\x:Int# -> x +# 1#) Like most magic functions it has a compulsory unfolding, so there is no need for a real definition somewhere. We have one in GHC.Magic for the convenience of putting the documentation there. It uses `setOneShotLambda` on the lambda's binder. That is the whole magic: A typical call looks like oneShot (\y. e) after unfolding the definition `oneShot = \f \x[oneshot]. f x` we get (\f \x[oneshot]. f x) (\y. e) --> \x[oneshot]. ((\y.e) x) --> \x[oneshot] e[x/y] which is what we want. Also see https://gitlab.haskell.org/ghc/ghc/wikis/one-shot. Wrinkles: (OS1) It is only effective if the one-shot info survives as long as possible; in particular it must make it into the interface in unfoldings. See Note [Preserve OneShotInfo] in GHC.Core.Tidy. (OS2) (oneShot (error "urk")) rewrites to \x[oneshot]. error "urk" x thereby hiding the `error` under a lambda, which might be surprising, particularly if you have `-fpedantic-bottoms` on. See #24296. ------------------------------------------------------------- @realWorld#@ used to be a magic literal, \tr{void#}. If things get nasty as-is, change it back to a literal (@Literal@). voidArgId is a Local Id used simply as an argument in functions where we just want an arg to avoid having a thunk of unlifted type. E.g. x = \ void :: Void# -> (# p, q #) This comes up in strictness analysis Note [evaldUnfoldings] ~~~~~~~~~~~~~~~~~~~~~~ The evaldUnfolding makes it look that some primitive value is evaluated, which in turn makes Simplify.interestingArg return True, which in turn makes INLINE things applied to said value likely to be inlined. -} realWorldPrimId :: Id -- :: State# RealWorld realWorldPrimId = pcMiscPrelId realWorldName id_ty (noCafIdInfo `setUnfoldingInfo` evaldUnfolding -- Note [evaldUnfoldings] `setOneShotInfo` typeOneShot id_ty) where id_ty = realWorldStatePrimTy voidPrimId :: Id -- Global constant :: Void# -- The type Void# is now the same as (# #) (ticket #18441), -- this identifier just signifies the (# #) datacon -- and is kept for backwards compatibility. -- We cannot define it in normal Haskell, since it's -- a top-level unlifted value. voidPrimId = pcMiscPrelId voidPrimIdName unboxedUnitTy (noCafIdInfo `setUnfoldingInfo` mkCompulsoryUnfolding unboxedUnitExpr) unboxedUnitExpr :: CoreExpr unboxedUnitExpr = Var (dataConWorkId unboxedUnitDataCon) voidArgId :: Id -- Local lambda-bound :: Void# voidArgId = mkSysLocal (fsLit "void") voidArgIdKey ManyTy unboxedUnitTy coercionTokenId :: Id -- :: () ~# () coercionTokenId -- See Note [Coercion tokens] in "GHC.CoreToStg" = pcMiscPrelId coercionTokenName (mkTyConApp eqPrimTyCon [liftedTypeKind, liftedTypeKind, unitTy, unitTy]) noCafIdInfo pcMiscPrelId :: Name -> Type -> IdInfo -> Id pcMiscPrelId name ty info = mkVanillaGlobalWithInfo name ty info pcRepPolyId :: Name -> Type -> (Name -> ConcreteTyVars) -> IdInfo -> Id pcRepPolyId name ty conc_tvs info = mkGlobalId (RepPolyId $ conc_tvs name) name ty info -- | Directly specify which outer forall'd type variables of a -- representation-polymorphic 'Id' such become concrete metavariables when -- instantiated. mkRepPolyIdConcreteTyVars :: [((Type, Position Neg), TyVar)] -- ^ ((ty, pos), tv) -- 'ty' is the type on which the representation-polymorphism -- check is done -- 'tv' is the type variable we are checking for concreteness -- (usually the kind of 'ty') -- 'pos' is the position of 'ty' in the -- type of the 'Id' -> Name -- ^ 'Name' of the rep-poly 'Id' -> ConcreteTyVars mkRepPolyIdConcreteTyVars vars nm = mkNameEnv [ (tyVarName tv, mk_conc_frr ty pos) | ((ty,pos), tv) <- vars ] where mk_conc_frr ty pos = ConcreteFRR $ FixedRuntimeRepOrigin ty $ FRRRepPolyId nm RepPolyFunction pos ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/Id/Make.hs-boot0000644000000000000000000000041707346545000021522 0ustar0000000000000000module GHC.Types.Id.Make where import GHC.Types.Name( Name ) import GHC.Types.Var( Id ) import GHC.Core.Class( Class ) import {-# SOURCE #-} GHC.Core.DataCon( DataCon ) data DataConBoxer mkDataConWorkId :: Name -> DataCon -> Id mkDictSelId :: Name -> Class -> Id ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/Literal.hs0000644000000000000000000012503107346545000020744 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1998 -} {-# LANGUAGE DeriveDataTypeable, ScopedTypeVariables #-} {-# LANGUAGE TypeApplications #-} {-# LANGUAGE MagicHash #-} {-# LANGUAGE AllowAmbiguousTypes #-} {-# OPTIONS_GHC -Wno-incomplete-uni-patterns #-} -- | Core literals module GHC.Types.Literal ( -- * Main data type Literal(..) -- Exported to ParseIface , LitNumType(..) -- ** Creating Literals , mkLitInt, mkLitIntWrap, mkLitIntWrapC, mkLitIntUnchecked , mkLitWord, mkLitWordWrap, mkLitWordWrapC, mkLitWordUnchecked , mkLitInt8, mkLitInt8Wrap, mkLitInt8Unchecked , mkLitWord8, mkLitWord8Wrap, mkLitWord8Unchecked , mkLitInt16, mkLitInt16Wrap, mkLitInt16Unchecked , mkLitWord16, mkLitWord16Wrap, mkLitWord16Unchecked , mkLitInt32, mkLitInt32Wrap, mkLitInt32Unchecked , mkLitWord32, mkLitWord32Wrap, mkLitWord32Unchecked , mkLitInt64, mkLitInt64Wrap, mkLitInt64Unchecked , mkLitWord64, mkLitWord64Wrap, mkLitWord64Unchecked , mkLitFloat, mkLitDouble , mkLitChar, mkLitString , mkLitBigNat , mkLitNumber, mkLitNumberWrap, mkLitNumberMaybe -- ** Operations on Literals , literalType , pprLiteral , litNumIsSigned , litNumRange , litNumCheckRange , litNumWrap , litNumCoerce , litNumNarrow , litNumBitSize , isMinBound , isMaxBound -- ** Predicates on Literals and their contents , litIsDupable, litIsTrivial, litIsLifted , inCharRange , isZeroLit, isOneLit , litFitsInChar , litValue, mapLitValue , isLitValue_maybe, isLitRubbish -- ** Coercions , narrowInt8Lit, narrowInt16Lit, narrowInt32Lit, narrowInt64Lit , narrowWord8Lit, narrowWord16Lit, narrowWord32Lit, narrowWord64Lit , convertToIntLit, convertToWordLit , charToIntLit, intToCharLit , floatToIntLit, intToFloatLit, doubleToIntLit, intToDoubleLit , nullAddrLit, floatToDoubleLit, doubleToFloatLit ) where import GHC.Prelude import GHC.Builtin.Types.Prim import GHC.Core.Type( Type, RuntimeRepType, mkForAllTy, mkTyVarTy, typeOrConstraintKind ) import GHC.Core.TyCo.Compare( nonDetCmpType ) import GHC.Types.Var import GHC.Utils.Outputable import GHC.Data.FastString import GHC.Types.Basic import GHC.Utils.Binary import GHC.Settings.Constants import GHC.Platform import GHC.Utils.Panic import GHC.Utils.Encoding import Data.ByteString (ByteString) import Data.Int import Data.Word import Data.Char import Data.Data ( Data ) import GHC.Exts( isTrue#, dataToTag#, (<#) ) import Numeric ( fromRat ) {- ************************************************************************ * * \subsection{Literals} * * ************************************************************************ -} -- | So-called 'Literal's are one of: -- -- * An unboxed numeric literal or floating-point literal which is presumed -- to be surrounded by appropriate constructors (@Int#@, etc.), so that -- the overall thing makes sense. -- -- We maintain the invariant that the 'Integer' in the 'LitNumber' -- constructor is actually in the (possibly target-dependent) range. -- The mkLit{Int,Word}*Wrap smart constructors ensure this by applying -- the target machine's wrapping semantics. Use these in situations -- where you know the wrapping semantics are correct. -- -- * The literal derived from the label mentioned in a \"foreign label\" -- declaration ('LitLabel') -- -- * A 'LitRubbish' to be used in place of values that are never used. -- -- * A character -- * A string -- * The NULL pointer -- data Literal = LitChar Char -- ^ @Char#@ - at least 31 bits. Create with -- 'mkLitChar' | LitNumber !LitNumType !Integer -- ^ Any numeric literal that can be -- internally represented with an Integer. | LitString !ByteString -- ^ A string-literal: stored and emitted -- UTF-8 encoded, we'll arrange to decode it -- at runtime. Also emitted with a @\'\\0\'@ -- terminator. Create with 'mkLitString' | LitNullAddr -- ^ The @NULL@ pointer, the only pointer value -- that can be represented as a Literal. Create -- with 'nullAddrLit' | LitRubbish -- ^ A nonsense value; See Note [Rubbish literals]. TypeOrConstraint -- t_or_c: whether this is a type or a constraint RuntimeRepType -- rr: a type of kind RuntimeRep -- The type of the literal is forall (a::TYPE rr). a -- or forall (a::CONSTRAINT rr). a -- -- INVARIANT: the Type has no free variables -- and so substitution etc can ignore it | LitFloat Rational -- ^ @Float#@. Create with 'mkLitFloat' | LitDouble Rational -- ^ @Double#@. Create with 'mkLitDouble' | LitLabel FastString FunctionOrData -- ^ A label literal. Parameters: -- -- 1) The name of the symbol mentioned in the -- declaration -- -- 2) Flag indicating whether the symbol -- references a function or a data deriving Data -- | Numeric literal type data LitNumType = LitNumBigNat -- ^ @Bignat@ (see Note [BigNum literals]) | LitNumInt -- ^ @Int#@ - according to target machine | LitNumInt8 -- ^ @Int8#@ - exactly 8 bits | LitNumInt16 -- ^ @Int16#@ - exactly 16 bits | LitNumInt32 -- ^ @Int32#@ - exactly 32 bits | LitNumInt64 -- ^ @Int64#@ - exactly 64 bits | LitNumWord -- ^ @Word#@ - according to target machine | LitNumWord8 -- ^ @Word8#@ - exactly 8 bits | LitNumWord16 -- ^ @Word16#@ - exactly 16 bits | LitNumWord32 -- ^ @Word32#@ - exactly 32 bits | LitNumWord64 -- ^ @Word64#@ - exactly 64 bits deriving (Data,Enum,Eq,Ord) -- | Indicate if a numeric literal type supports negative numbers litNumIsSigned :: LitNumType -> Bool litNumIsSigned nt = case nt of LitNumBigNat -> False LitNumInt -> True LitNumInt8 -> True LitNumInt16 -> True LitNumInt32 -> True LitNumInt64 -> True LitNumWord -> False LitNumWord8 -> False LitNumWord16 -> False LitNumWord32 -> False LitNumWord64 -> False -- | Number of bits litNumBitSize :: Platform -> LitNumType -> Maybe Word litNumBitSize platform nt = case nt of LitNumBigNat -> Nothing LitNumInt -> Just (fromIntegral (platformWordSizeInBits platform)) LitNumInt8 -> Just 8 LitNumInt16 -> Just 16 LitNumInt32 -> Just 32 LitNumInt64 -> Just 64 LitNumWord -> Just (fromIntegral (platformWordSizeInBits platform)) LitNumWord8 -> Just 8 LitNumWord16 -> Just 16 LitNumWord32 -> Just 32 LitNumWord64 -> Just 64 instance Binary LitNumType where put_ bh numTyp = putByte bh (fromIntegral (fromEnum numTyp)) get bh = do h <- getByte bh return (toEnum (fromIntegral h)) {- Note [BigNum literals] ~~~~~~~~~~~~~~~~~~~~~~ GHC supports 2 kinds of arbitrary precision numbers (a.k.a BigNum): * data Natural = NS Word# | NB BigNat# * data Integer = IS Int# | IN BigNat# | IP BigNat# In the past, we had Core constructors to represent Integer and Natural literals. These literals were then lowered into their real Core representation only in Core prep. The issue with this approach is that literals have two representations and we have to ensure that we handle them the same everywhere (in every optimisation, etc.). For example (0 :: Integer) was representable in Core with both: Lit (LitNumber LitNumInteger 0) -- literal App (Var integerISDataCon) (Lit (LitNumber LitNumInt 0)) -- real representation Nowadays we always use the real representation for Integer and Natural literals. However we still have two representations for BigNat# literals. BigNat# literals are still lowered in Core prep into a call to a constructor function (BigNat# is ByteArray# and we don't have ByteArray# literals yet so we have to build them at runtime). Note [String literals] ~~~~~~~~~~~~~~~~~~~~~~ String literals are UTF-8 encoded and stored into ByteStrings in the following ASTs: Haskell, Core, Stg, Cmm. TH can also emit ByteString based string literals with the BytesPrimL constructor (see #14741). It wasn't true before as [Word8] was used in Cmm AST and in TH which was quite bad for performance with large strings (see #16198 and #14741). To include string literals into output objects, the assembler code generator has to embed the UTF-8 encoded binary blob. See Note [Embedding large binary blobs] for more details. -} instance Binary Literal where put_ bh (LitChar aa) = do putByte bh 0; put_ bh aa put_ bh (LitString ab) = do putByte bh 1; put_ bh ab put_ bh (LitNullAddr) = putByte bh 2 put_ bh (LitFloat ah) = do putByte bh 3; put_ bh ah put_ bh (LitDouble ai) = do putByte bh 4; put_ bh ai put_ bh (LitLabel aj fod) = do putByte bh 5 put_ bh aj put_ bh fod put_ bh (LitNumber nt i) = do putByte bh 6 put_ bh nt put_ bh i put_ _ lit@(LitRubbish {}) = pprPanic "Binary LitRubbish" (ppr lit) -- We use IfaceLitRubbish; see Note [Rubbish literals], item (6) get bh = do h <- getByte bh case h of 0 -> do aa <- get bh return (LitChar aa) 1 -> do ab <- get bh return (LitString ab) 2 -> return (LitNullAddr) 3 -> do ah <- get bh return (LitFloat ah) 4 -> do ai <- get bh return (LitDouble ai) 5 -> do aj <- get bh fod <- get bh return (LitLabel aj fod) 6 -> do nt <- get bh i <- get bh return (LitNumber nt i) _ -> pprPanic "Binary:Literal" (int (fromIntegral h)) instance Outputable Literal where ppr = pprLiteral id instance Eq Literal where a == b = compare a b == EQ -- | Needed for the @Ord@ instance of 'AltCon', which in turn is needed in -- 'GHC.Data.TrieMap.CoreMap'. instance Ord Literal where compare = cmpLit {- Construction ~~~~~~~~~~~~ -} {- Note [Word/Int underflow/overflow] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ According to the Haskell Report 2010 (Sections 18.1 and 23.1 about signed and unsigned integral types): "All arithmetic is performed modulo 2^n, where n is the number of bits in the type." GHC stores Word# and Int# constant values as Integer. Core optimizations such as constant folding must ensure that the Integer value remains in the valid target Word/Int range (see #13172). The following functions are used to ensure this. Note that we *don't* warn the user about overflow. It's not done at runtime either, and compilation of completely harmless things like ((124076834 :: Word32) + (2147483647 :: Word32)) doesn't yield a warning. Instead we simply squash the value into the *target* Int/Word range. -} -- | Make a literal number using wrapping semantics if the value is out of -- bound. mkLitNumberWrap :: Platform -> LitNumType -> Integer -> Literal mkLitNumberWrap platform nt i = case nt of LitNumInt -> case platformWordSize platform of PW4 -> wrap @Int32 PW8 -> wrap @Int64 LitNumWord -> case platformWordSize platform of PW4 -> wrap @Word32 PW8 -> wrap @Word64 LitNumInt8 -> wrap @Int8 LitNumInt16 -> wrap @Int16 LitNumInt32 -> wrap @Int32 LitNumInt64 -> wrap @Int64 LitNumWord8 -> wrap @Word8 LitNumWord16 -> wrap @Word16 LitNumWord32 -> wrap @Word32 LitNumWord64 -> wrap @Word64 LitNumBigNat | i < 0 -> panic "mkLitNumberWrap: trying to create a negative BigNat" | otherwise -> LitNumber nt i where wrap :: forall a. (Integral a, Num a) => Literal wrap = LitNumber nt (toInteger (fromIntegral i :: a)) -- | Wrap a literal number according to its type using wrapping semantics. litNumWrap :: Platform -> Literal -> Literal litNumWrap platform (LitNumber nt i) = mkLitNumberWrap platform nt i litNumWrap _ l = pprPanic "litNumWrap" (ppr l) -- | Coerce a literal number into another using wrapping semantics. litNumCoerce :: LitNumType -> Platform -> Literal -> Literal litNumCoerce pt platform (LitNumber _nt i) = mkLitNumberWrap platform pt i litNumCoerce _ _ l = pprPanic "litNumWrapCoerce: not a number" (ppr l) -- | Narrow a literal number by converting it into another number type and then -- converting it back to its original type. litNumNarrow :: LitNumType -> Platform -> Literal -> Literal litNumNarrow pt platform (LitNumber nt i) = case mkLitNumberWrap platform pt i of LitNumber _ j -> mkLitNumberWrap platform nt j l -> pprPanic "litNumNarrow: got invalid literal" (ppr l) litNumNarrow _ _ l = pprPanic "litNumNarrow: invalid literal" (ppr l) -- | Check that a given number is in the range of a numeric literal litNumCheckRange :: Platform -> LitNumType -> Integer -> Bool litNumCheckRange platform nt i = maybe True (i >=) m_lower && maybe True (i <=) m_upper where (m_lower, m_upper) = litNumRange platform nt -- | Get the literal range litNumRange :: Platform -> LitNumType -> (Maybe Integer, Maybe Integer) litNumRange platform nt = case nt of LitNumInt -> (Just (platformMinInt platform), Just (platformMaxInt platform)) LitNumWord -> (Just 0, Just (platformMaxWord platform)) LitNumInt8 -> bounded_range @Int8 LitNumInt16 -> bounded_range @Int16 LitNumInt32 -> bounded_range @Int32 LitNumInt64 -> bounded_range @Int64 LitNumWord8 -> bounded_range @Word8 LitNumWord16 -> bounded_range @Word16 LitNumWord32 -> bounded_range @Word32 LitNumWord64 -> bounded_range @Word64 LitNumBigNat -> (Just 0, Nothing) where bounded_range :: forall a . (Integral a, Bounded a) => (Maybe Integer,Maybe Integer) bounded_range = case boundedRange @a of (mi,ma) -> (Just mi, Just ma) -- | Create a numeric 'Literal' of the given type mkLitNumber :: Platform -> LitNumType -> Integer -> Literal mkLitNumber platform nt i = assertPpr (litNumCheckRange platform nt i) (integer i) (LitNumber nt i) -- | Create a numeric 'Literal' of the given type if it is in range mkLitNumberMaybe :: Platform -> LitNumType -> Integer -> Maybe Literal mkLitNumberMaybe platform nt i | litNumCheckRange platform nt i = Just (LitNumber nt i) | otherwise = Nothing -- | Creates a 'Literal' of type @Int#@ mkLitInt :: Platform -> Integer -> Literal mkLitInt platform x = assertPpr (platformInIntRange platform x) (integer x) (mkLitIntUnchecked x) -- | Creates a 'Literal' of type @Int#@. -- If the argument is out of the (target-dependent) range, it is wrapped. -- See Note [Word/Int underflow/overflow] mkLitIntWrap :: Platform -> Integer -> Literal mkLitIntWrap platform i = mkLitNumberWrap platform LitNumInt i -- | Creates a 'Literal' of type @Int#@ without checking its range. mkLitIntUnchecked :: Integer -> Literal mkLitIntUnchecked i = LitNumber LitNumInt i -- | Creates a 'Literal' of type @Int#@, as well as a 'Bool'ean flag indicating -- overflow. That is, if the argument is out of the (target-dependent) range -- the argument is wrapped and the overflow flag will be set. -- See Note [Word/Int underflow/overflow] mkLitIntWrapC :: Platform -> Integer -> (Literal, Bool) mkLitIntWrapC platform i = (n, i /= i') where n@(LitNumber _ i') = mkLitIntWrap platform i -- | Creates a 'Literal' of type @Word#@ mkLitWord :: Platform -> Integer -> Literal mkLitWord platform x = assertPpr (platformInWordRange platform x) (integer x) (mkLitWordUnchecked x) -- | Creates a 'Literal' of type @Word#@. -- If the argument is out of the (target-dependent) range, it is wrapped. -- See Note [Word/Int underflow/overflow] mkLitWordWrap :: Platform -> Integer -> Literal mkLitWordWrap platform i = mkLitNumberWrap platform LitNumWord i -- | Creates a 'Literal' of type @Word#@ without checking its range. mkLitWordUnchecked :: Integer -> Literal mkLitWordUnchecked i = LitNumber LitNumWord i -- | Creates a 'Literal' of type @Word#@, as well as a 'Bool'ean flag indicating -- carry. That is, if the argument is out of the (target-dependent) range -- the argument is wrapped and the carry flag will be set. -- See Note [Word/Int underflow/overflow] mkLitWordWrapC :: Platform -> Integer -> (Literal, Bool) mkLitWordWrapC platform i = (n, i /= i') where n@(LitNumber _ i') = mkLitWordWrap platform i -- | Creates a 'Literal' of type @Int8#@ mkLitInt8 :: Integer -> Literal mkLitInt8 x = assertPpr (inBoundedRange @Int8 x) (integer x) (mkLitInt8Unchecked x) -- | Creates a 'Literal' of type @Int8#@. -- If the argument is out of the range, it is wrapped. mkLitInt8Wrap :: Integer -> Literal mkLitInt8Wrap i = mkLitInt8Unchecked (toInteger (fromIntegral i :: Int8)) -- | Creates a 'Literal' of type @Int8#@ without checking its range. mkLitInt8Unchecked :: Integer -> Literal mkLitInt8Unchecked i = LitNumber LitNumInt8 i -- | Creates a 'Literal' of type @Word8#@ mkLitWord8 :: Integer -> Literal mkLitWord8 x = assertPpr (inBoundedRange @Word8 x) (integer x) (mkLitWord8Unchecked x) -- | Creates a 'Literal' of type @Word8#@. -- If the argument is out of the range, it is wrapped. mkLitWord8Wrap :: Integer -> Literal mkLitWord8Wrap i = mkLitWord8Unchecked (toInteger (fromIntegral i :: Word8)) -- | Creates a 'Literal' of type @Word8#@ without checking its range. mkLitWord8Unchecked :: Integer -> Literal mkLitWord8Unchecked i = LitNumber LitNumWord8 i -- | Creates a 'Literal' of type @Int16#@ mkLitInt16 :: Integer -> Literal mkLitInt16 x = assertPpr (inBoundedRange @Int16 x) (integer x) (mkLitInt16Unchecked x) -- | Creates a 'Literal' of type @Int16#@. -- If the argument is out of the range, it is wrapped. mkLitInt16Wrap :: Integer -> Literal mkLitInt16Wrap i = mkLitInt16Unchecked (toInteger (fromIntegral i :: Int16)) -- | Creates a 'Literal' of type @Int16#@ without checking its range. mkLitInt16Unchecked :: Integer -> Literal mkLitInt16Unchecked i = LitNumber LitNumInt16 i -- | Creates a 'Literal' of type @Word16#@ mkLitWord16 :: Integer -> Literal mkLitWord16 x = assertPpr (inBoundedRange @Word16 x) (integer x) (mkLitWord16Unchecked x) -- | Creates a 'Literal' of type @Word16#@. -- If the argument is out of the range, it is wrapped. mkLitWord16Wrap :: Integer -> Literal mkLitWord16Wrap i = mkLitWord16Unchecked (toInteger (fromIntegral i :: Word16)) -- | Creates a 'Literal' of type @Word16#@ without checking its range. mkLitWord16Unchecked :: Integer -> Literal mkLitWord16Unchecked i = LitNumber LitNumWord16 i -- | Creates a 'Literal' of type @Int32#@ mkLitInt32 :: Integer -> Literal mkLitInt32 x = assertPpr (inBoundedRange @Int32 x) (integer x) (mkLitInt32Unchecked x) -- | Creates a 'Literal' of type @Int32#@. -- If the argument is out of the range, it is wrapped. mkLitInt32Wrap :: Integer -> Literal mkLitInt32Wrap i = mkLitInt32Unchecked (toInteger (fromIntegral i :: Int32)) -- | Creates a 'Literal' of type @Int32#@ without checking its range. mkLitInt32Unchecked :: Integer -> Literal mkLitInt32Unchecked i = LitNumber LitNumInt32 i -- | Creates a 'Literal' of type @Word32#@ mkLitWord32 :: Integer -> Literal mkLitWord32 x = assertPpr (inBoundedRange @Word32 x) (integer x) (mkLitWord32Unchecked x) -- | Creates a 'Literal' of type @Word32#@. -- If the argument is out of the range, it is wrapped. mkLitWord32Wrap :: Integer -> Literal mkLitWord32Wrap i = mkLitWord32Unchecked (toInteger (fromIntegral i :: Word32)) -- | Creates a 'Literal' of type @Word32#@ without checking its range. mkLitWord32Unchecked :: Integer -> Literal mkLitWord32Unchecked i = LitNumber LitNumWord32 i -- | Creates a 'Literal' of type @Int64#@ mkLitInt64 :: Integer -> Literal mkLitInt64 x = assertPpr (inBoundedRange @Int64 x) (integer x) (mkLitInt64Unchecked x) -- | Creates a 'Literal' of type @Int64#@. -- If the argument is out of the range, it is wrapped. mkLitInt64Wrap :: Integer -> Literal mkLitInt64Wrap i = mkLitInt64Unchecked (toInteger (fromIntegral i :: Int64)) -- | Creates a 'Literal' of type @Int64#@ without checking its range. mkLitInt64Unchecked :: Integer -> Literal mkLitInt64Unchecked i = LitNumber LitNumInt64 i -- | Creates a 'Literal' of type @Word64#@ mkLitWord64 :: Integer -> Literal mkLitWord64 x = assertPpr (inBoundedRange @Word64 x) (integer x) (mkLitWord64Unchecked x) -- | Creates a 'Literal' of type @Word64#@. -- If the argument is out of the range, it is wrapped. mkLitWord64Wrap :: Integer -> Literal mkLitWord64Wrap i = mkLitWord64Unchecked (toInteger (fromIntegral i :: Word64)) -- | Creates a 'Literal' of type @Word64#@ without checking its range. mkLitWord64Unchecked :: Integer -> Literal mkLitWord64Unchecked i = LitNumber LitNumWord64 i -- | Creates a 'Literal' of type @Float#@ mkLitFloat :: Rational -> Literal mkLitFloat = LitFloat -- | Creates a 'Literal' of type @Double#@ mkLitDouble :: Rational -> Literal mkLitDouble = LitDouble -- | Creates a 'Literal' of type @Char#@ mkLitChar :: Char -> Literal mkLitChar = LitChar -- | Creates a 'Literal' of type @Addr#@, which is appropriate for passing to -- e.g. some of the \"error\" functions in GHC.Err such as @GHC.Err.runtimeError@ mkLitString :: String -> Literal -- stored UTF-8 encoded mkLitString [] = LitString mempty mkLitString s = LitString (utf8EncodeByteString s) mkLitBigNat :: Integer -> Literal mkLitBigNat x = assertPpr (x >= 0) (integer x) (LitNumber LitNumBigNat x) isLitRubbish :: Literal -> Bool isLitRubbish (LitRubbish {}) = True isLitRubbish _ = False inBoundedRange :: forall a. (Bounded a, Integral a) => Integer -> Bool inBoundedRange x = x >= toInteger (minBound :: a) && x <= toInteger (maxBound :: a) boundedRange :: forall a. (Bounded a, Integral a) => (Integer,Integer) boundedRange = (toInteger (minBound :: a), toInteger (maxBound :: a)) isMinBound :: Platform -> Literal -> Bool isMinBound _ (LitChar c) = c == minBound isMinBound platform (LitNumber nt i) = case nt of LitNumInt -> i == platformMinInt platform LitNumInt8 -> i == toInteger (minBound :: Int8) LitNumInt16 -> i == toInteger (minBound :: Int16) LitNumInt32 -> i == toInteger (minBound :: Int32) LitNumInt64 -> i == toInteger (minBound :: Int64) LitNumWord -> i == 0 LitNumWord8 -> i == 0 LitNumWord16 -> i == 0 LitNumWord32 -> i == 0 LitNumWord64 -> i == 0 LitNumBigNat -> i == 0 isMinBound _ _ = False isMaxBound :: Platform -> Literal -> Bool isMaxBound _ (LitChar c) = c == maxBound isMaxBound platform (LitNumber nt i) = case nt of LitNumInt -> i == platformMaxInt platform LitNumInt8 -> i == toInteger (maxBound :: Int8) LitNumInt16 -> i == toInteger (maxBound :: Int16) LitNumInt32 -> i == toInteger (maxBound :: Int32) LitNumInt64 -> i == toInteger (maxBound :: Int64) LitNumWord -> i == platformMaxWord platform LitNumWord8 -> i == toInteger (maxBound :: Word8) LitNumWord16 -> i == toInteger (maxBound :: Word16) LitNumWord32 -> i == toInteger (maxBound :: Word32) LitNumWord64 -> i == toInteger (maxBound :: Word64) LitNumBigNat -> False isMaxBound _ _ = False inCharRange :: Char -> Bool inCharRange c = c >= '\0' && c <= chr tARGET_MAX_CHAR -- | Tests whether the literal represents a zero of whatever type it is isZeroLit :: Literal -> Bool isZeroLit (LitNumber _ 0) = True isZeroLit (LitFloat 0) = True isZeroLit (LitDouble 0) = True isZeroLit _ = False -- | Tests whether the literal represents a one of whatever type it is isOneLit :: Literal -> Bool isOneLit (LitNumber _ 1) = True isOneLit (LitFloat 1) = True isOneLit (LitDouble 1) = True isOneLit _ = False -- | Returns the 'Integer' contained in the 'Literal', for when that makes -- sense, i.e. for 'Char' and numbers. litValue :: Literal -> Integer litValue l = case isLitValue_maybe l of Just x -> x Nothing -> pprPanic "litValue" (ppr l) -- | Returns the 'Integer' contained in the 'Literal', for when that makes -- sense, i.e. for 'Char' and numbers. isLitValue_maybe :: Literal -> Maybe Integer isLitValue_maybe (LitChar c) = Just $ toInteger $ ord c isLitValue_maybe (LitNumber _ i) = Just i isLitValue_maybe _ = Nothing -- | Apply a function to the 'Integer' contained in the 'Literal', for when that -- makes sense, e.g. for 'Char' and numbers. -- For fixed-size integral literals, the result will be wrapped in accordance -- with the semantics of the target type. -- See Note [Word/Int underflow/overflow] mapLitValue :: Platform -> (Integer -> Integer) -> Literal -> Literal mapLitValue _ f (LitChar c) = mkLitChar (fchar c) where fchar = chr . fromInteger . f . toInteger . ord mapLitValue platform f (LitNumber nt i) = mkLitNumberWrap platform nt (f i) mapLitValue _ _ l = pprPanic "mapLitValue" (ppr l) {- Coercions ~~~~~~~~~ -} charToIntLit, intToCharLit, floatToIntLit, intToFloatLit, doubleToIntLit, intToDoubleLit, floatToDoubleLit, doubleToFloatLit :: Literal -> Literal -- | Narrow a literal number (unchecked result range) narrowLit' :: forall a. Integral a => LitNumType -> Literal -> Literal narrowLit' nt' (LitNumber _ i) = LitNumber nt' (toInteger (fromInteger i :: a)) narrowLit' _ l = pprPanic "narrowLit" (ppr l) narrowInt8Lit, narrowInt16Lit, narrowInt32Lit, narrowInt64Lit, narrowWord8Lit, narrowWord16Lit, narrowWord32Lit, narrowWord64Lit :: Literal -> Literal narrowInt8Lit = narrowLit' @Int8 LitNumInt8 narrowInt16Lit = narrowLit' @Int16 LitNumInt16 narrowInt32Lit = narrowLit' @Int32 LitNumInt32 narrowInt64Lit = narrowLit' @Int64 LitNumInt64 narrowWord8Lit = narrowLit' @Word8 LitNumWord8 narrowWord16Lit = narrowLit' @Word16 LitNumWord16 narrowWord32Lit = narrowLit' @Word32 LitNumWord32 narrowWord64Lit = narrowLit' @Word64 LitNumWord64 -- | Extend or narrow a fixed-width literal (e.g. 'Int16#') to a target -- word-sized literal ('Int#' or 'Word#'). Narrowing can only happen on 32-bit -- architectures when we convert a 64-bit literal into a 32-bit one. convertToWordLit, convertToIntLit :: Platform -> Literal -> Literal convertToWordLit platform (LitNumber _nt i) = mkLitWordWrap platform i convertToWordLit _platform l = pprPanic "convertToWordLit" (ppr l) convertToIntLit platform (LitNumber _nt i) = mkLitIntWrap platform i convertToIntLit _platform l = pprPanic "convertToIntLit" (ppr l) charToIntLit (LitChar c) = mkLitIntUnchecked (toInteger (ord c)) charToIntLit l = pprPanic "charToIntLit" (ppr l) intToCharLit (LitNumber _ i) = LitChar (chr (fromInteger i)) intToCharLit l = pprPanic "intToCharLit" (ppr l) floatToIntLit (LitFloat f) = mkLitIntUnchecked (truncate f) floatToIntLit l = pprPanic "floatToIntLit" (ppr l) intToFloatLit (LitNumber _ i) = LitFloat (fromInteger i) intToFloatLit l = pprPanic "intToFloatLit" (ppr l) doubleToIntLit (LitDouble f) = mkLitIntUnchecked (truncate f) doubleToIntLit l = pprPanic "doubleToIntLit" (ppr l) intToDoubleLit (LitNumber _ i) = LitDouble (fromInteger i) intToDoubleLit l = pprPanic "intToDoubleLit" (ppr l) floatToDoubleLit (LitFloat f) = LitDouble f floatToDoubleLit l = pprPanic "floatToDoubleLit" (ppr l) doubleToFloatLit (LitDouble d) = LitFloat d doubleToFloatLit l = pprPanic "doubleToFloatLit" (ppr l) nullAddrLit :: Literal nullAddrLit = LitNullAddr {- Predicates ~~~~~~~~~~ -} -- | True if there is absolutely no penalty to duplicating the literal. -- False principally of strings. -- -- "Why?", you say? I'm glad you asked. Well, for one duplicating strings would -- blow up code sizes. Not only this, it's also unsafe. -- -- Consider a program that wants to traverse a string. One way it might do this -- is to first compute the Addr# pointing to the end of the string, and then, -- starting from the beginning, bump a pointer using eqAddr# to determine the -- end. For instance, -- -- @ -- -- Given pointers to the start and end of a string, count how many zeros -- -- the string contains. -- countZeros :: Addr# -> Addr# -> -> Int -- countZeros start end = go start 0 -- where -- go off n -- | off `addrEq#` end = n -- | otherwise = go (off `plusAddr#` 1) n' -- where n' | isTrue# (indexInt8OffAddr# off 0# ==# 0#) = n + 1 -- | otherwise = n -- @ -- -- Consider what happens if we considered strings to be trivial (and therefore -- duplicable) and emitted a call like @countZeros "hello"# ("hello"# -- `plusAddr`# 5)@. The beginning and end pointers do not belong to the same -- string, meaning that an iteration like the above would blow up terribly. -- This is what happened in #12757. -- -- Ultimately the solution here is to make primitive strings a bit more -- structured, ensuring that the compiler can't inline in ways that will break -- user code. One approach to this is described in #8472. litIsTrivial :: Literal -> Bool -- c.f. GHC.Core.Utils.exprIsTrivial litIsTrivial (LitString _) = False litIsTrivial (LitNumber nt _) = case nt of LitNumBigNat -> False LitNumInt -> True LitNumInt8 -> True LitNumInt16 -> True LitNumInt32 -> True LitNumInt64 -> True LitNumWord -> True LitNumWord8 -> True LitNumWord16 -> True LitNumWord32 -> True LitNumWord64 -> True litIsTrivial _ = True -- | True if code space does not go bad if we duplicate this literal litIsDupable :: Platform -> Literal -> Bool -- c.f. GHC.Core.Utils.exprIsDupable litIsDupable platform x = case x of LitNumber nt i -> case nt of LitNumBigNat -> i <= platformMaxWord platform * 8 -- arbitrary, reasonable LitNumInt -> True LitNumInt8 -> True LitNumInt16 -> True LitNumInt32 -> True LitNumInt64 -> True LitNumWord -> True LitNumWord8 -> True LitNumWord16 -> True LitNumWord32 -> True LitNumWord64 -> True LitString _ -> False _ -> True litFitsInChar :: Literal -> Bool litFitsInChar (LitNumber _ i) = i >= toInteger (ord minBound) && i <= toInteger (ord maxBound) litFitsInChar _ = False litIsLifted :: Literal -> Bool litIsLifted (LitNumber nt _) = case nt of LitNumBigNat -> True LitNumInt -> False LitNumInt8 -> False LitNumInt16 -> False LitNumInt32 -> False LitNumInt64 -> False LitNumWord -> False LitNumWord8 -> False LitNumWord16 -> False LitNumWord32 -> False LitNumWord64 -> False litIsLifted _ = False -- Even RUBBISH[LiftedRep] is unlifted, as rubbish values are always evaluated. {- Types ~~~~~ -} -- | Find the Haskell 'Type' the literal occupies literalType :: Literal -> Type literalType LitNullAddr = addrPrimTy literalType (LitChar _) = charPrimTy literalType (LitString _) = addrPrimTy literalType (LitFloat _) = floatPrimTy literalType (LitDouble _) = doublePrimTy literalType (LitLabel _ _) = addrPrimTy literalType (LitNumber lt _) = case lt of LitNumBigNat -> byteArrayPrimTy LitNumInt -> intPrimTy LitNumInt8 -> int8PrimTy LitNumInt16 -> int16PrimTy LitNumInt32 -> int32PrimTy LitNumInt64 -> int64PrimTy LitNumWord -> wordPrimTy LitNumWord8 -> word8PrimTy LitNumWord16 -> word16PrimTy LitNumWord32 -> word32PrimTy LitNumWord64 -> word64PrimTy -- LitRubbish: see Note [Rubbish literals] literalType (LitRubbish torc rep) = mkForAllTy (Bndr a Inferred) (mkTyVarTy a) where a = mkTemplateKindVar (typeOrConstraintKind torc rep) {- Comparison ~~~~~~~~~~ -} cmpLit :: Literal -> Literal -> Ordering cmpLit (LitChar a) (LitChar b) = a `compare` b cmpLit (LitString a) (LitString b) = a `compare` b cmpLit (LitNullAddr) (LitNullAddr) = EQ cmpLit (LitFloat a) (LitFloat b) = a `compare` b cmpLit (LitDouble a) (LitDouble b) = a `compare` b cmpLit (LitLabel a _) (LitLabel b _) = a `lexicalCompareFS` b cmpLit (LitNumber nt1 a) (LitNumber nt2 b) = (nt1 `compare` nt2) `mappend` (a `compare` b) cmpLit (LitRubbish tc1 b1) (LitRubbish tc2 b2) = (tc1 `compare` tc2) `mappend` (b1 `nonDetCmpType` b2) cmpLit lit1 lit2 | isTrue# (dataToTag# lit1 <# dataToTag# lit2) = LT | otherwise = GT {- Printing ~~~~~~~~ * See Note [Printing of literals in Core] -} pprLiteral :: (SDoc -> SDoc) -> Literal -> SDoc pprLiteral _ (LitChar c) = pprPrimChar c pprLiteral _ (LitString s) = pprHsBytes s pprLiteral _ (LitNullAddr) = text "__NULL" pprLiteral _ (LitFloat f) = float (fromRat f) <> primFloatSuffix pprLiteral _ (LitDouble d) = double (fromRat d) <> primDoubleSuffix pprLiteral _ (LitNumber nt i) = case nt of LitNumBigNat -> integer i LitNumInt -> pprPrimInt i LitNumInt8 -> pprPrimInt8 i LitNumInt16 -> pprPrimInt16 i LitNumInt32 -> pprPrimInt32 i LitNumInt64 -> pprPrimInt64 i LitNumWord -> pprPrimWord i LitNumWord8 -> pprPrimWord8 i LitNumWord16 -> pprPrimWord16 i LitNumWord32 -> pprPrimWord32 i LitNumWord64 -> pprPrimWord64 i pprLiteral add_par (LitLabel l fod) = add_par (text "__label" <+> pprHsString l <+> ppr fod) pprLiteral _ (LitRubbish torc rep) = text "RUBBISH" <> pp_tc <> parens (ppr rep) where pp_tc = case torc of TypeLike -> empty ConstraintLike -> text "[c]" {- Note [Printing of literals in Core] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The function `add_par` is used to wrap parenthesis around labels (`LitLabel`), if they occur in a context requiring an atomic thing (for example function application). Although not all Core literals would be valid Haskell, we are trying to stay as close as possible to Haskell syntax in the printing of Core, to make it easier for a Haskell user to read Core. To that end: * We do print parenthesis around negative `LitInteger`, because we print `LitInteger` using plain number literals (no prefix or suffix), and plain number literals in Haskell require parenthesis in contexts like function application (i.e. `1 - -1` is not valid Haskell). * We don't print parenthesis around other (negative) literals, because they aren't needed in GHC/Haskell either (i.e. `1# -# -1#` is accepted by GHC's parser). Literal Output Output if context requires an atom (if different) ------- ------- ---------------------- LitChar 'a'# LitString "aaa"# LitNullAddr "__NULL" LitInt -1# LitIntN -1#N LitWord 1## LitWordN 1##N LitFloat -1.0# LitDouble -1.0## LitBigNat 1 LitLabel "__label" ... ("__label" ...) LitRubbish "RUBBISH[...]" Note [Rubbish literals] ~~~~~~~~~~~~~~~~~~~~~~~ Sometimes, we need to cough up a rubbish value of a certain type that is used in place of dead code we thus aim to eliminate. The value of a dead occurrence has no effect on the dynamic semantics of the program, so we can pick any value of the same representation. Exploiting the results of absence analysis in worker/wrapper is a scenario where we need such a rubbish value, see examples in Note [Absent fillers] in GHC.Core.Opt.WorkWrap.Utils. It's completely undefined what the *value* of a rubbish value is, e.g., we could pick @0#@ for @Int#@ or @42#@; it mustn't matter where it's inserted into a Core program. We embed these rubbish values in the 'LitRubbish' case of the 'Literal' data type. Here are the moving parts: 1. Source Haskell: No way to produce rubbish lits in source syntax. Purely an IR feature. 2. Core: 'LitRubbish' carries a `Type` of kind RuntimeRep, describing the runtime representation of the literal (is it a pointer, an unboxed Double#, or whatever). We have it that `RUBBISH[rr]` has type `forall (a :: TYPE rr). a`. See the `LitRubbish` case of `literalType`. The function GHC.Core.Make.mkLitRubbish makes a Core rubbish literal of a given type. It obeys the following invariants: INVARIANT 1: 'rr' has no free variables. Main reason: we don't need to run substitutions and free variable finders over Literal. The rules around levity/runtime-rep polymorphism naturally uphold this invariant. INVARIANT 2: we never make a rubbish literal of type (a ~# b). Reason: see Note [Core type and coercion invariant] in GHC.Core. We can't substitute a LitRubbish inside a coercion, so it's best not to make one. They are zero width anyway, so passing absent ones around costs nothing. If we wanted an absent filler of type (a ~# b) we should use (Coercion (UnivCo ...)), but it doesn't seem worth making a new UnivCoProvenance for this purpose. This is sad, though: see #18983. 3. STG: The type app in `RUBBISH[IntRep] @Int# :: Int#` is erased and we get the (untyped) 'StgLit' `RUBBISH[IntRep] :: Int#` in STG. It's treated mostly opaque, with the exception of the Unariser, where we take apart a case scrutinisation on, or arg occurrence of, e.g., `RUBBISH[TupleRep[IntRep,DoubleRep]]` (which may stand in for `(# Int#, Double# #)`) into its sub-parts `RUBBISH[IntRep]` and `RUBBISH[DoubleRep]`, similar to unboxed tuples. See 'unariseLiteral_maybe' and also Note [Post-unarisation invariants]. 4. Cmm: We translate 'LitRubbish' to their actual rubbish value in 'cgLit'. The particulars are boring, and only matter when debugging illicit use of a rubbish value; see Modes of failure below. 5. Bytecode: In GHC.ByteCode.Asm we just lower it as a 0 literal, because it's all boxed to the host GC anyway. 6. IfaceSyn: `Literal` is part of `IfaceSyn`, but `Type` really isn't. So in the passage from Core to Iface we put LitRubbish into its own IfaceExpr data constructor, IfaceLitRubbish. The remaining constructors of Literal are fine as IfaceSyn. Wrinkles a) Why do we put the `Type` (of kind RuntimeRep) inside the literal? Could we not instead /apply/ the literal to that RuntimeRep? Alas no, because then LitRubbish :: forall (rr::RuntimeRep) (a::TYPE rr). a and that's an ill-formed type because its kind is `TYPE rr`, which escapes the binding site of `rr`. Annoying. b) A rubbish literal is not bottom, and replies True to exprOkForSpeculation. For unboxed types there is no bottom anyway. If we have let (x::Int#) = RUBBISH[IntRep] @Int# we want to convert that to a case! We want to leave it as a let, and probably discard it as dead code soon after because x is unused. c) We can see a rubbish literal at the head of an application chain. Most obviously, pretty much every rubbish literal is the head of a type application e.g. `RUBBISH[IntRep] @Int#`. But see also Note [How a rubbish literal can be the head of an application] c) Literal is in Ord, because (and only because) we use Ord on AltCon when building a TypeMap. Annoying. We use `nonDetCmpType` here; the non-determinism won't matter because it's only used in TrieMap. Moreover, rubbish literals should not appear in patterns anyway. d) Why not lower LitRubbish in CoreToStg? Because it enables us to use LitRubbish when unarising unboxed sums in the future, and it allows rubbish values of e.g. VecRep, for which we can't cough up dummy values in STG. Modes of failure ---------------- Suppose there is a bug in GHC, and a rubbish value is used after all. That is undefined behavior, of course, but let us list a few examples for failure modes: a) For an value of unboxed numeric type like `Int#`, we just use a silly value like 42#. The error might propagate indefinitely, hence we better pick a rather unique literal. Same for Word, Floats, Char and VecRep. b) For AddrRep (like String lits), we emit a null pointer, resulting in a definitive segfault when accessed. c) For boxed values, unlifted or not, we use a pointer to a fixed closure, like `()`, so that the GC has a pointer to follow. If we use that pointer as an 'Array#', we will likely access fields of the array that don't exist, and a seg-fault is likely, but not guaranteed. If we use that pointer as `Either Int Bool`, we might try to access the 'Int' field of the 'Left' constructor (which has the same ConTag as '()'), which doesn't exists. In the best case, we'll find an invalid pointer in its position and get a seg-fault, in the worst case the error manifests only one or two indirections later. Note [How a rubbish literal can be the head of an application] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider this (#19824): h :: T3 -> Int -> blah h _ (I# n) = ... f :: (T1 -> T2 -> T3) -> T4 -> blah f g x = ....(h (g n s) x)... Demand analysis finds that h does not use its first argument, and w/w's h to {-# INLINE h #-} h a b = case b of I# n -> $wh n Demand analysis also finds that f does not use its first arg, so the worker for f look like $wf x = let g = RUBBISH in ....(h (g n s) x)... Now we inline g to get: $wf x = ....(h (RUBBISH n s) x)... And lo, until we inline `h`, we have that application of RUBBISH in $wf's RHS. But surely `h` will inline? Not if the arguments look boring. Well, RUBBISH doesn't look boring. But it could be a bit more complicated like f g x = let t = ...(g n s)... in ...(h t x)... and now the call looks more boring. Anyway, the point is that we might reasonably see RUBBISH at the head of an application chain. It would be fine to rewrite RUBBISH @(ta->tb->tr) a b ---> RUBBISH @tr but we don't currently do so. It is NOT ok to discard the entire continuation: case RUBBISH @ty of DEFAULT -> blah does not return RUBBISH! -} ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/Meta.hs0000644000000000000000000000321707346545000020237 0ustar0000000000000000-- | Metaprogramming types module GHC.Types.Meta ( MetaRequest(..) , MetaHook , MetaResult -- data constructors not exported to ensure correct response type , metaRequestE , metaRequestP , metaRequestT , metaRequestD , metaRequestAW ) where import GHC.Prelude import GHC.Serialized ( Serialized ) import GHC.Hs -- | The supported metaprogramming result types data MetaRequest = MetaE (LHsExpr GhcPs -> MetaResult) | MetaP (LPat GhcPs -> MetaResult) | MetaT (LHsType GhcPs -> MetaResult) | MetaD ([LHsDecl GhcPs] -> MetaResult) | MetaAW (Serialized -> MetaResult) -- | data constructors not exported to ensure correct result type data MetaResult = MetaResE { unMetaResE :: LHsExpr GhcPs } | MetaResP { unMetaResP :: LPat GhcPs } | MetaResT { unMetaResT :: LHsType GhcPs } | MetaResD { unMetaResD :: [LHsDecl GhcPs] } | MetaResAW { unMetaResAW :: Serialized } type MetaHook f = MetaRequest -> LHsExpr GhcTc -> f MetaResult metaRequestE :: Functor f => MetaHook f -> LHsExpr GhcTc -> f (LHsExpr GhcPs) metaRequestE h = fmap unMetaResE . h (MetaE MetaResE) metaRequestP :: Functor f => MetaHook f -> LHsExpr GhcTc -> f (LPat GhcPs) metaRequestP h = fmap unMetaResP . h (MetaP MetaResP) metaRequestT :: Functor f => MetaHook f -> LHsExpr GhcTc -> f (LHsType GhcPs) metaRequestT h = fmap unMetaResT . h (MetaT MetaResT) metaRequestD :: Functor f => MetaHook f -> LHsExpr GhcTc -> f [LHsDecl GhcPs] metaRequestD h = fmap unMetaResD . h (MetaD MetaResD) metaRequestAW :: Functor f => MetaHook f -> LHsExpr GhcTc -> f Serialized metaRequestAW h = fmap unMetaResAW . h (MetaAW MetaResAW) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/Name.hs0000644000000000000000000010410707346545000020231 0ustar0000000000000000{-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE RecordWildCards #-} {-# LANGUAGE TypeFamilies #-} {-# OPTIONS_GHC -Wno-orphans #-} -- instance NFData FieldLabel {- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 \section[Name]{@Name@: to transmit name info from renamer to typechecker} -} -- | -- #name_types# -- GHC uses several kinds of name internally: -- -- * 'GHC.Types.Name.Occurrence.OccName': see "GHC.Types.Name.Occurrence#name_types" -- -- * 'GHC.Types.Name.Reader.RdrName': see "GHC.Types.Name.Reader#name_types" -- -- * 'GHC.Types.Name.Name' is the type of names that have had their scoping and -- binding resolved. They have an 'OccName' but also a 'GHC.Types.Unique.Unique' -- that disambiguates Names that have the same 'OccName' and indeed is used for all -- 'Name' comparison. Names also contain information about where they originated -- from, see "GHC.Types.Name#name_sorts" -- -- * 'GHC.Types.Id.Id': see "GHC.Types.Id#name_types" -- -- * 'GHC.Types.Var.Var': see "GHC.Types.Var#name_types" -- -- #name_sorts# -- Names are one of: -- -- * External, if they name things declared in other modules. Some external -- Names are wired in, i.e. they name primitives defined in the compiler itself -- -- * Internal, if they name things in the module being compiled. Some internal -- Names are system names, if they are names manufactured by the compiler module GHC.Types.Name ( -- * The main types Name, -- Abstract BuiltInSyntax(..), -- ** Creating 'Name's mkSystemName, mkSystemNameAt, mkInternalName, mkClonedInternalName, mkDerivedInternalName, mkSystemVarName, mkSysTvName, mkFCallName, mkExternalName, mkWiredInName, -- ** Manipulating and deconstructing 'Name's nameUnique, setNameUnique, nameOccName, nameNameSpace, nameModule, nameModule_maybe, setNameLoc, tidyNameOcc, localiseName, namePun_maybe, pprName, nameSrcLoc, nameSrcSpan, pprNameDefnLoc, pprDefinedAt, pprFullName, pprTickyName, -- ** Predicates on 'Name's isSystemName, isInternalName, isExternalName, isTyVarName, isTyConName, isDataConName, isValName, isVarName, isDynLinkName, isFieldName, isWiredInName, isWiredIn, isBuiltInSyntax, isTupleTyConName, isSumTyConName, isUnboxedTupleDataConLikeName, isHoleName, wiredInNameTyThing_maybe, nameIsLocalOrFrom, nameIsExternalOrFrom, nameIsHomePackage, nameIsHomePackageImport, nameIsFromExternalPackage, stableNameCmp, -- * Class 'NamedThing' and overloaded friends NamedThing(..), getSrcLoc, getSrcSpan, getOccString, getOccFS, pprInfixName, pprPrefixName, pprModulePrefix, pprNameUnqualified, nameStableString, -- Re-export the OccName stuff module GHC.Types.Name.Occurrence ) where import GHC.Prelude import {-# SOURCE #-} GHC.Types.TyThing ( TyThing ) import {-# SOURCE #-} GHC.Builtin.Types ( listTyCon ) import GHC.Platform import GHC.Types.Name.Occurrence import GHC.Unit.Module import GHC.Unit.Home import GHC.Types.FieldLabel import GHC.Types.SrcLoc import GHC.Types.Unique import GHC.Utils.Misc import GHC.Data.Maybe import GHC.Utils.Binary import GHC.Data.FastString import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.OldList (intersperse) import Control.DeepSeq import Data.Data import qualified Data.Semigroup as S import GHC.Types.Basic (Boxity(Boxed, Unboxed)) import GHC.Builtin.Uniques (isTupleTyConUnique, isSumTyConUnique, isTupleDataConLikeUnique) {- ************************************************************************ * * \subsection[Name-datatype]{The @Name@ datatype, and name construction} * * ************************************************************************ -} -- | A unique, unambiguous name for something, containing information about where -- that thing originated. data Name = Name { n_sort :: NameSort -- ^ What sort of name it is , n_occ :: OccName -- ^ Its occurrence name. -- -- NOTE: kept lazy to allow known names to be known constructor applications -- and to inline better. See Note [Fast comparison for built-in Names] , n_uniq :: {-# UNPACK #-} !Unique -- ^ Its unique. , n_loc :: !SrcSpan -- ^ Definition site -- -- NOTE: we make the n_loc field strict to eliminate some potential -- (and real!) space leaks, due to the fact that we don't look at -- the SrcLoc in a Name all that often. } -- See Note [About the NameSorts] data NameSort = External Module -- Either an import from another module -- or a top-level name -- See Note [About the NameSorts] | WiredIn Module TyThing BuiltInSyntax -- A variant of External, for wired-in things | Internal -- A user-defined local Id or TyVar -- defined in the module being compiled -- See Note [About the NameSorts] | System -- A system-defined Id or TyVar. Typically the -- OccName is very uninformative (like 's') instance Outputable NameSort where ppr (External _) = text "external" ppr (WiredIn _ _ _) = text "wired-in" ppr Internal = text "internal" ppr System = text "system" instance NFData Name where rnf Name{..} = rnf n_sort `seq` rnf n_occ `seq` n_uniq `seq` rnf n_loc -- Needs NFData Name, so the instance is here to avoid cyclic imports. instance NFData FieldLabel where rnf (FieldLabel a b c) = rnf a `seq` rnf b `seq` rnf c instance NFData NameSort where rnf (External m) = rnf m rnf (WiredIn m t b) = rnf m `seq` t `seq` b `seq` () -- XXX this is a *lie*, we're not going to rnf the TyThing, but -- since the TyThings for WiredIn Names are all static they can't -- be hiding space leaks or errors. rnf Internal = () rnf System = () -- | BuiltInSyntax is for things like @(:)@, @[]@ and tuples, -- which have special syntactic forms. They aren't in scope -- as such. data BuiltInSyntax = BuiltInSyntax | UserSyntax {- Note [Fast comparison for built-in Names] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider this wired-in Name in GHC.Builtin.Names: int8TyConName = tcQual gHC_INTERNAL_INT (fsLit "Int8") int8TyConKey Ultimately this turns into something like: int8TyConName = Name gHC_INTERNAL_INT (mkOccName ..."Int8") int8TyConKey So a comparison like `x == int8TyConName` will turn into `getUnique x == int8TyConKey`, nice and efficient. But if the `n_occ` field is strict, that definition will look like: int8TyConName = case (mkOccName..."Int8") of occ -> Name gHC_INTERNAL_INT occ int8TyConKey and now the comparison will not optimise. This matters even more when there are numerous comparisons (see #19386): if | tc == int8TyCon -> ... | tc == int16TyCon -> ... ...etc... when we would like to get a single multi-branched case. TL;DR: we make the `n_occ` field lazy. -} {- Note [About the NameSorts] ~~~~~~~~~~~~~~~~~~~~~~~~~~ 1. Initially: * All types, classes, data constructors get Extenal Names * Top-level Ids (including locally-defined ones) get External Names, * All other local (non-top-level) Ids get Internal names 2. In the Tidy phase (GHC.Iface.Tidy): * An Id that is "externally-visible" is given an External Name, even if the name was Internal up to that point * An Id that is not externally visible is given an Internal Name. even if the name was External up to that point See GHC.Iface.Tidy.tidyTopName An Id is externally visible if it is mentioned in the interface file; e.g. - it is exported - it is mentioned in an unfolding See GHC.Iface.Tidy.chooseExternalIds 3. In any invocation of GHC, an External Name for "M.x" has one and only one unique. This unique association is ensured via the Name Cache; see Note [The Name Cache] in GHC.Iface.Env. 4. In code generation, things with a External name are given C static labels, so they finally appear in the .o file's symbol table. They appear in the symbol table in the form M.n. That is why externally-visible things are made External (see (2) above). 5. A System Name differs in the following ways: a) has unique attached when printing dumps b) unifier eliminates sys tyvars in favour of user provs where possible Before anything gets printed in interface files or output code, it's fed through a 'tidy' processor, which zaps the OccNames to have unique names; and converts all sys-locals to user locals If any desugarer sys-locals have survived that far, they get changed to "ds1", "ds2", etc. 6. A WiredIn Name is used for things (Id, TyCon) that are fully known to the compiler, not read from an interface file. E.g. Bool, True, Int, Float, and many others. A WiredIn Name contains contains a TyThing, so we don't have to look it up. The BuiltInSyntax flag => It's a syntactic form, not "in scope" (e.g. []) All built-in syntax thigs are WiredIn. -} instance HasOccName Name where occName = nameOccName nameUnique :: Name -> Unique nameOccName :: Name -> OccName nameNameSpace :: Name -> NameSpace nameModule :: HasDebugCallStack => Name -> Module nameSrcLoc :: Name -> SrcLoc nameSrcSpan :: Name -> SrcSpan nameUnique name = n_uniq name nameOccName name = n_occ name nameNameSpace name = occNameSpace (n_occ name) nameSrcLoc name = srcSpanStart (n_loc name) nameSrcSpan name = n_loc name {- ************************************************************************ * * \subsection{Predicates on names} * * ************************************************************************ -} isInternalName :: Name -> Bool isExternalName :: Name -> Bool isSystemName :: Name -> Bool isWiredInName :: Name -> Bool isWiredInName (Name {n_sort = WiredIn _ _ _}) = True isWiredInName _ = False isWiredIn :: NamedThing thing => thing -> Bool isWiredIn = isWiredInName . getName wiredInNameTyThing_maybe :: Name -> Maybe TyThing wiredInNameTyThing_maybe (Name {n_sort = WiredIn _ thing _}) = Just thing wiredInNameTyThing_maybe _ = Nothing isBuiltInSyntax :: Name -> Bool isBuiltInSyntax (Name {n_sort = WiredIn _ _ BuiltInSyntax}) = True isBuiltInSyntax _ = False isTupleTyConName :: Name -> Bool isTupleTyConName = isJust . isTupleTyConUnique . getUnique isSumTyConName :: Name -> Bool isSumTyConName = isJust . isSumTyConUnique . getUnique -- | This matches a datacon as well as its worker and promoted tycon. isUnboxedTupleDataConLikeName :: Name -> Bool isUnboxedTupleDataConLikeName n | Just (Unboxed, _) <- isTupleDataConLikeUnique (getUnique n) = True | otherwise = False isExternalName (Name {n_sort = External _}) = True isExternalName (Name {n_sort = WiredIn _ _ _}) = True isExternalName _ = False isInternalName name = not (isExternalName name) isHoleName :: Name -> Bool isHoleName = isHoleModule . nameModule -- | Will the 'Name' come from a dynamically linked package? isDynLinkName :: Platform -> Module -> Name -> Bool isDynLinkName platform this_mod name | Just mod <- nameModule_maybe name -- Issue #8696 - when GHC is dynamically linked, it will attempt -- to load the dynamic dependencies of object files at compile -- time for things like QuasiQuotes or -- TemplateHaskell. Unfortunately, this interacts badly with -- intra-package linking, because we don't generate indirect -- (dynamic) symbols for intra-package calls. This means that if a -- module with an intra-package call is loaded without its -- dependencies, then GHC fails to link. -- -- In the mean time, always force dynamic indirections to be -- generated: when the module name isn't the module being -- compiled, references are dynamic. = case platformOS platform of -- On Windows the hack for #8696 makes it unlinkable. -- As the entire setup of the code from Cmm down to the RTS expects -- the use of trampolines for the imported functions only when -- doing intra-package linking, e.g. referring to a symbol defined in the same -- package should not use a trampoline. -- I much rather have dynamic TH not supported than the entire Dynamic linking -- not due to a hack. -- Also not sure this would break on Windows anyway. OSMinGW32 -> moduleUnit mod /= moduleUnit this_mod -- For the other platforms, still perform the hack _ -> mod /= this_mod | otherwise = False -- no, it is not even an external name nameModule name = nameModule_maybe name `orElse` pprPanic "nameModule" (ppr (n_sort name) <+> ppr name) nameModule_maybe :: Name -> Maybe Module nameModule_maybe (Name { n_sort = External mod}) = Just mod nameModule_maybe (Name { n_sort = WiredIn mod _ _}) = Just mod nameModule_maybe _ = Nothing is_interactive_or_from :: Module -> Module -> Bool is_interactive_or_from from mod = from == mod || isInteractiveModule mod -- Return the pun for a name if available. -- Used for pretty-printing under ListTuplePuns. -- Arity 1 is skipped here because unary tuples have no prefix representation, -- since that is occupied by the unit tuple. namePun_maybe :: Name -> Maybe FastString namePun_maybe name | getUnique name == getUnique listTyCon = Just (fsLit "[]") | Just (boxity, ar) <- isTupleTyConUnique (getUnique name) , ar /= 1 = let (lpar, rpar) = case boxity of Boxed -> ("(", ")") Unboxed -> ("(#", "#)") in Just (fsLit $ lpar ++ commas ar ++ rpar) | Just ar <- isSumTyConUnique (getUnique name) = Just (fsLit $ "(# " ++ bars ar ++ " #)") where commas ar = replicate (ar-1) ',' bars ar = intersperse ' ' (replicate (ar-1) '|') namePun_maybe _ = Nothing nameIsLocalOrFrom :: Module -> Name -> Bool -- ^ Returns True if the name is -- (a) Internal -- (b) External but from the specified module -- (c) External but from the 'interactive' package -- -- The key idea is that -- False means: the entity is defined in some other module -- you can find the details (type, fixity, instances) -- in some interface file -- those details will be stored in the EPT or HPT -- -- True means: the entity is defined in this module or earlier in -- the GHCi session -- you can find details (type, fixity, instances) in the -- TcGblEnv or TcLclEnv -- -- The isInteractiveModule part is because successive interactions of a GHCi session -- each give rise to a fresh module (Ghci1, Ghci2, etc), but they all come -- from the magic 'interactive' package; and all the details are kept in the -- TcLclEnv, TcGblEnv, NOT in the HPT or EPT. -- See Note [The interactive package] in "GHC.Runtime.Context" nameIsLocalOrFrom from name | Just mod <- nameModule_maybe name = is_interactive_or_from from mod | otherwise = True nameIsExternalOrFrom :: Module -> Name -> Bool -- ^ Returns True if the name is external or from the 'interactive' package -- See documentation of `nameIsLocalOrFrom` function nameIsExternalOrFrom from name | Just mod <- nameModule_maybe name = is_interactive_or_from from mod | otherwise = False nameIsHomePackage :: Module -> Name -> Bool -- True if the Name is defined in module of this package nameIsHomePackage this_mod = \nm -> case n_sort nm of External nm_mod -> moduleUnit nm_mod == this_pkg WiredIn nm_mod _ _ -> moduleUnit nm_mod == this_pkg Internal -> True System -> False where this_pkg = moduleUnit this_mod nameIsHomePackageImport :: Module -> Name -> Bool -- True if the Name is defined in module of this package -- /other than/ the this_mod nameIsHomePackageImport this_mod = \nm -> case nameModule_maybe nm of Nothing -> False Just nm_mod -> nm_mod /= this_mod && moduleUnit nm_mod == this_pkg where this_pkg = moduleUnit this_mod -- | Returns True if the Name comes from some other package: neither this -- package nor the interactive package. nameIsFromExternalPackage :: HomeUnit -> Name -> Bool nameIsFromExternalPackage home_unit name | Just mod <- nameModule_maybe name , notHomeModule home_unit mod -- Not the current unit , not (isInteractiveModule mod) -- Not the 'interactive' package = True | otherwise = False isTyVarName :: Name -> Bool isTyVarName name = isTvOcc (nameOccName name) isTyConName :: Name -> Bool isTyConName name = isTcOcc (nameOccName name) isDataConName :: Name -> Bool isDataConName name = isDataOcc (nameOccName name) isValName :: Name -> Bool isValName name = isValOcc (nameOccName name) isVarName :: Name -> Bool isVarName = isVarOcc . nameOccName isFieldName :: Name -> Bool isFieldName = isFieldOcc . nameOccName isSystemName (Name {n_sort = System}) = True isSystemName _ = False {- ************************************************************************ * * \subsection{Making names} * * ************************************************************************ -} -- | Create a name which is (for now at least) local to the current module and hence -- does not need a 'Module' to disambiguate it from other 'Name's mkInternalName :: Unique -> OccName -> SrcSpan -> Name mkInternalName uniq occ loc = Name { n_uniq = uniq , n_sort = Internal , n_occ = occ , n_loc = loc } -- NB: You might worry that after lots of huffing and -- puffing we might end up with two local names with distinct -- uniques, but the same OccName. Indeed we can, but that's ok -- * the insides of the compiler don't care: they use the Unique -- * when printing for -ddump-xxx you can switch on -dppr-debug to get the -- uniques if you get confused -- * for interface files we tidyCore first, which makes -- the OccNames distinct when they need to be mkClonedInternalName :: Unique -> Name -> Name mkClonedInternalName uniq (Name { n_occ = occ, n_loc = loc }) = Name { n_uniq = uniq, n_sort = Internal , n_occ = occ, n_loc = loc } mkDerivedInternalName :: (OccName -> OccName) -> Unique -> Name -> Name mkDerivedInternalName derive_occ uniq (Name { n_occ = occ, n_loc = loc }) = Name { n_uniq = uniq, n_sort = Internal , n_occ = derive_occ occ, n_loc = loc } -- | Create a name which definitely originates in the given module mkExternalName :: Unique -> Module -> OccName -> SrcSpan -> Name {-# INLINE mkExternalName #-} -- WATCH OUT! External Names should be in the Name Cache -- (see Note [The Name Cache] in GHC.Iface.Env), so don't just call mkExternalName -- with some fresh unique without populating the Name Cache mkExternalName uniq mod occ loc = Name { n_uniq = uniq, n_sort = External mod, n_occ = occ, n_loc = loc } -- | Create a name which is actually defined by the compiler itself mkWiredInName :: Module -> OccName -> Unique -> TyThing -> BuiltInSyntax -> Name {-# INLINE mkWiredInName #-} mkWiredInName mod occ uniq thing built_in = Name { n_uniq = uniq, n_sort = WiredIn mod thing built_in, n_occ = occ, n_loc = wiredInSrcSpan } -- | Create a name brought into being by the compiler mkSystemName :: Unique -> OccName -> Name mkSystemName uniq occ = mkSystemNameAt uniq occ noSrcSpan mkSystemNameAt :: Unique -> OccName -> SrcSpan -> Name mkSystemNameAt uniq occ loc = Name { n_uniq = uniq, n_sort = System , n_occ = occ, n_loc = loc } mkSystemVarName :: Unique -> FastString -> Name mkSystemVarName uniq fs = mkSystemName uniq (mkVarOccFS fs) mkSysTvName :: Unique -> FastString -> Name mkSysTvName uniq fs = mkSystemName uniq (mkTyVarOccFS fs) -- | Make a name for a foreign call mkFCallName :: Unique -> FastString -> Name mkFCallName uniq str = mkInternalName uniq (mkVarOccFS str) noSrcSpan -- The encoded string completely describes the ccall -- When we renumber/rename things, we need to be -- able to change a Name's Unique to match the cached -- one in the thing it's the name of. If you know what I mean. setNameUnique :: Name -> Unique -> Name setNameUnique name uniq = name {n_uniq = uniq} -- This is used for hsigs: we want to use the name of the originally exported -- entity, but edit the location to refer to the reexport site setNameLoc :: Name -> SrcSpan -> Name setNameLoc name loc = name {n_loc = loc} tidyNameOcc :: Name -> OccName -> Name -- We set the OccName of a Name when tidying -- In doing so, we change System --> Internal, so that when we print -- it we don't get the unique by default. It's tidy now! tidyNameOcc name@(Name { n_sort = System }) occ = name { n_occ = occ, n_sort = Internal} tidyNameOcc name occ = name { n_occ = occ } -- | Make the 'Name' into an internal name, regardless of what it was to begin with localiseName :: Name -> Name localiseName n = n { n_sort = Internal } {- ************************************************************************ * * \subsection{Hashing and comparison} * * ************************************************************************ -} cmpName :: Name -> Name -> Ordering cmpName n1 n2 = n_uniq n1 `nonDetCmpUnique` n_uniq n2 -- | Compare Names lexicographically -- This only works for Names that originate in the source code or have been -- tidied. stableNameCmp :: Name -> Name -> Ordering stableNameCmp (Name { n_sort = s1, n_occ = occ1 }) (Name { n_sort = s2, n_occ = occ2 }) = sort_cmp s1 s2 S.<> compare occ1 occ2 -- The ordinary compare on OccNames is lexicographic where -- Later constructors are bigger sort_cmp (External m1) (External m2) = m1 `stableModuleCmp` m2 sort_cmp (External {}) _ = LT sort_cmp (WiredIn {}) (External {}) = GT sort_cmp (WiredIn m1 _ _) (WiredIn m2 _ _) = m1 `stableModuleCmp` m2 sort_cmp (WiredIn {}) _ = LT sort_cmp Internal (External {}) = GT sort_cmp Internal (WiredIn {}) = GT sort_cmp Internal Internal = EQ sort_cmp Internal System = LT sort_cmp System System = EQ sort_cmp System _ = GT {- ************************************************************************ * * \subsection[Name-instances]{Instance declarations} * * ************************************************************************ -} -- | The same comments as for `Name`'s `Ord` instance apply. instance Eq Name where a == b = case (a `compare` b) of { EQ -> True; _ -> False } a /= b = case (a `compare` b) of { EQ -> False; _ -> True } -- | __Caution__: This instance is implemented via `nonDetCmpUnique`, which -- means that the ordering is not stable across deserialization or rebuilds. -- -- See `nonDetCmpUnique` for further information, and #15240 for a bug -- caused by improper use of this instance. -- For a deterministic lexicographic ordering, use `stableNameCmp`. instance Ord Name where compare = cmpName instance Uniquable Name where getUnique = nameUnique instance NamedThing Name where getName n = n instance Data Name where -- don't traverse? toConstr _ = abstractConstr "Name" gunfold _ _ = error "gunfold" dataTypeOf _ = mkNoRepType "Name" {- ************************************************************************ * * \subsection{Binary} * * ************************************************************************ -} -- | Assumes that the 'Name' is a non-binding one. See -- 'GHC.Iface.Syntax.putIfaceTopBndr' and 'GHC.Iface.Syntax.getIfaceTopBndr' for -- serializing binding 'Name's. See 'UserData' for the rationale for this -- distinction. instance Binary Name where put_ bh name = case findUserDataWriter Proxy bh of tbl -> putEntry tbl bh name get bh = case findUserDataReader Proxy bh of tbl -> getEntry tbl bh {- ************************************************************************ * * \subsection{Pretty printing} * * ************************************************************************ -} instance Outputable Name where ppr name = pprName name instance OutputableBndr Name where pprBndr _ name = pprName name pprInfixOcc = pprInfixName pprPrefixOcc = pprPrefixName pprName :: forall doc. IsLine doc => Name -> doc pprName name@(Name {n_sort = sort, n_uniq = uniq, n_occ = occ}) = docWithStyle codeDoc normalDoc where codeDoc = case sort of WiredIn mod _ _ -> pprModule mod <> char '_' <> z_occ External mod -> pprModule mod <> char '_' <> z_occ -- In code style, always qualify -- ToDo: maybe we could print all wired-in things unqualified -- in code style, to reduce symbol table bloat? System -> pprUniqueAlways uniq Internal -> pprUniqueAlways uniq z_occ = ztext $ zEncodeFS $ occNameMangledFS occ normalDoc sty = getPprDebug $ \debug -> sdocOption sdocListTuplePuns $ \listTuplePuns -> handlePuns listTuplePuns (namePun_maybe name) $ case sort of WiredIn mod _ builtin -> pprExternal debug sty uniq mod occ True builtin External mod -> pprExternal debug sty uniq mod occ False UserSyntax System -> pprSystem debug sty uniq occ Internal -> pprInternal debug sty uniq occ handlePuns :: Bool -> Maybe FastString -> SDoc -> SDoc handlePuns True (Just pun) _ = ftext pun handlePuns _ _ r = r {-# SPECIALISE pprName :: Name -> SDoc #-} {-# SPECIALISE pprName :: Name -> HLine #-} -- see Note [SPECIALIZE to HDoc] in GHC.Utils.Outputable -- | Print fully qualified name (with unit-id and module, but no unique) pprFullName :: Module -> Name -> SDoc pprFullName this_mod Name{n_sort = sort, n_occ = occ} = let mod = case sort of WiredIn m _ _ -> m External m -> m System -> this_mod Internal -> this_mod in ftext (unitIdFS (moduleUnitId mod)) <> colon <> ftext (moduleNameFS $ moduleName mod) <> dot <> ftext (occNameFS occ) -- | Print a ticky ticky styled name -- -- Module argument is the module to use for internal and system names. When -- printing the name in a ticky profile, the module name is included even for -- local things. However, ticky uses the format "x (M)" rather than "M.x". -- Hence, this function provides a separation from normal styling. pprTickyName :: Module -> Name -> SDoc pprTickyName this_mod name | isInternalName name = pprName name <+> parens (ppr this_mod) | otherwise = pprName name -- | Print the string of Name unqualifiedly directly. pprNameUnqualified :: Name -> SDoc pprNameUnqualified Name { n_occ = occ } = ppr_occ_name occ pprExternal :: Bool -> PprStyle -> Unique -> Module -> OccName -> Bool -> BuiltInSyntax -> SDoc pprExternal debug sty uniq mod occ is_wired is_builtin | debug = pp_mod <> ppr_occ_name occ <> braces (hsep [if is_wired then text "(w)" else empty, pprNameSpaceBrief (occNameSpace occ), pprUnique uniq]) | BuiltInSyntax <- is_builtin = ppr_occ_name occ -- Never qualify builtin syntax | otherwise = if isHoleModule mod then case qualName sty mod occ of NameUnqual -> ppr_occ_name occ _ -> braces (pprModuleName (moduleName mod) <> dot <> ppr_occ_name occ) else pprModulePrefix sty mod occ <> ppr_occ_name occ where pp_mod = ppUnlessOption sdocSuppressModulePrefixes (pprModule mod <> dot) pprInternal :: Bool -> PprStyle -> Unique -> OccName -> SDoc pprInternal debug sty uniq occ | debug = ppr_occ_name occ <> braces (hsep [pprNameSpaceBrief (occNameSpace occ), pprUnique uniq]) | dumpStyle sty = ppr_occ_name occ <> ppr_underscore_unique uniq -- For debug dumps, we're not necessarily dumping -- tidied code, so we need to print the uniques. | otherwise = ppr_occ_name occ -- User style -- Like Internal, except that we only omit the unique in Iface style pprSystem :: Bool -> PprStyle -> Unique -> OccName -> SDoc pprSystem debug _sty uniq occ | debug = ppr_occ_name occ <> ppr_underscore_unique uniq <> braces (pprNameSpaceBrief (occNameSpace occ)) | otherwise = ppr_occ_name occ <> ppr_underscore_unique uniq -- If the tidy phase hasn't run, the OccName -- is unlikely to be informative (like 's'), -- so print the unique pprModulePrefix :: PprStyle -> Module -> OccName -> SDoc -- Print the "M." part of a name, based on whether it's in scope or not -- See Note [Printing original names] in GHC.Types.Name.Ppr pprModulePrefix sty mod occ = ppUnlessOption sdocSuppressModulePrefixes $ case qualName sty mod occ of -- See Outputable.QualifyName: NameQual modname -> pprModuleName modname <> dot -- Name is in scope NameNotInScope1 -> pprModule mod <> dot -- Not in scope NameNotInScope2 -> pprUnit (moduleUnit mod) <> colon -- Module not in <> pprModuleName (moduleName mod) <> dot -- scope either NameUnqual -> empty -- In scope unqualified pprUnique :: Unique -> SDoc -- Print a unique unless we are suppressing them pprUnique uniq = ppUnlessOption sdocSuppressUniques $ pprUniqueAlways uniq ppr_underscore_unique :: Unique -> SDoc -- Print an underscore separating the name from its unique -- But suppress it if we aren't printing the uniques anyway ppr_underscore_unique uniq = ppUnlessOption sdocSuppressUniques $ char '_' <> pprUniqueAlways uniq ppr_occ_name :: OccName -> SDoc ppr_occ_name occ = ftext (occNameFS occ) -- Don't use pprOccName; instead, just print the string of the OccName; -- we print the namespace in the debug stuff above -- Prints (if mod information is available) "Defined at " or -- "Defined in " information for a Name. pprDefinedAt :: Name -> SDoc pprDefinedAt name = text "Defined" <+> pprNameDefnLoc name pprNameDefnLoc :: Name -> SDoc -- Prints "at " or -- or "in " depending on what info is available pprNameDefnLoc name = case nameSrcLoc name of -- nameSrcLoc rather than nameSrcSpan -- It seems less cluttered to show a location -- rather than a span for the definition point RealSrcLoc s _ -> text "at" <+> ppr s UnhelpfulLoc s | isInternalName name || isSystemName name -> text "at" <+> ftext s | otherwise -> text "in" <+> quotes (ppr (nameModule name)) -- | Get a string representation of a 'Name' that's unique and stable -- across recompilations. Used for deterministic generation of binds for -- derived instances. -- eg. "$aeson_70dylHtv1FFGeai1IoxcQr$Data.Aeson.Types.Internal$String" nameStableString :: Name -> String nameStableString Name{..} = nameSortStableString n_sort ++ "$" ++ occNameString n_occ nameSortStableString :: NameSort -> String nameSortStableString System = "$_sys" nameSortStableString Internal = "$_in" nameSortStableString (External mod) = moduleStableString mod nameSortStableString (WiredIn mod _ _) = moduleStableString mod {- ************************************************************************ * * \subsection{Overloaded functions related to Names} * * ************************************************************************ -} -- | A class allowing convenient access to the 'Name' of various datatypes class NamedThing a where getOccName :: a -> OccName getName :: a -> Name getOccName n = nameOccName (getName n) -- Default method instance NamedThing e => NamedThing (Located e) where getName = getName . unLoc getSrcLoc :: NamedThing a => a -> SrcLoc getSrcSpan :: NamedThing a => a -> SrcSpan getOccString :: NamedThing a => a -> String getOccFS :: NamedThing a => a -> FastString getSrcLoc = nameSrcLoc . getName getSrcSpan = nameSrcSpan . getName getOccString = occNameString . getOccName getOccFS = occNameFS . getOccName pprInfixName :: (Outputable a, NamedThing a) => a -> SDoc -- See Outputable.pprPrefixVar, pprInfixVar; -- add parens or back-quotes as appropriate pprInfixName n = pprInfixVar (isSymOcc (getOccName n)) (ppr n) pprPrefixName :: NamedThing a => a -> SDoc pprPrefixName thing = pprPrefixVar (isSymOcc (nameOccName name)) (ppr name) where name = getName thing ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/Name.hs-boot0000644000000000000000000000127207346545000021171 0ustar0000000000000000module GHC.Types.Name ( module GHC.Types.Name, module GHC.Types.Name.Occurrence ) where import GHC.Prelude (Eq, Bool) import {-# SOURCE #-} GHC.Types.Name.Occurrence import GHC.Types.Unique import GHC.Utils.Outputable import Data.Data (Data) import Control.DeepSeq (NFData) data Name instance Eq Name instance Data Name instance Uniquable Name instance Outputable Name instance NFData Name class NamedThing a where getOccName :: a -> OccName getName :: a -> Name getOccName n = nameOccName (getName n) nameUnique :: Name -> Unique setNameUnique :: Name -> Unique -> Name nameOccName :: Name -> OccName tidyNameOcc :: Name -> OccName -> Name isFieldName :: Name -> Bool ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/Name/0000755000000000000000000000000007346545000017672 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/Name/Cache.hs0000644000000000000000000001307207346545000021234 0ustar0000000000000000{-# LANGUAGE RankNTypes #-} -- | The Name Cache module GHC.Types.Name.Cache ( NameCache (..) , initNameCache , takeUniqFromNameCache , updateNameCache' , updateNameCache -- * OrigNameCache , OrigNameCache , lookupOrigNameCache , extendOrigNameCache' , extendOrigNameCache ) where import GHC.Prelude import GHC.Unit.Module import GHC.Types.Name import GHC.Types.Unique.Supply import GHC.Builtin.Types import GHC.Builtin.Names import GHC.Utils.Outputable import GHC.Utils.Panic import Control.Concurrent.MVar import Control.Monad import Control.Applicative {- Note [The Name Cache] ~~~~~~~~~~~~~~~~~~~~~ The Name Cache makes sure that, during any invocation of GHC, each External Name "M.x" has one, and only one globally-agreed Unique. * The first time we come across M.x we make up a Unique and record that association in the Name Cache. * When we come across "M.x" again, we look it up in the Name Cache, and get a hit. The functions newGlobalBinder, allocateGlobalBinder do the main work. When you make an External name, you should probably be calling one of them. Names in a NameCache are always stored as a Global, and have the SrcLoc of their binding locations. Actually that's not quite right. When we first encounter the original name, we might not be at its binding site (e.g. we are reading an interface file); so we give it 'noSrcLoc' then. Later, when we find its binding site, we fix it up. Note [Built-in syntax and the OrigNameCache] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Built-in syntax like unboxed sums and punned syntax like tuples are quite ubiquitous. To lower their cost we use two tricks, a. We specially encode tuple and sum Names in interface files' symbol tables to avoid having to look up their names while loading interface files. Namely these names are encoded as by their Uniques. We know how to get from a Unique back to the Name which it represents via the mapping defined in the SumTupleUniques module. See Note [Symbol table representation of names] in GHC.Iface.Binary and for details. b. We don't include them in the Orig name cache but instead parse their OccNames (in isBuiltInOcc_maybe and isPunOcc_maybe) to avoid bloating the name cache with them. Why is the second measure necessary? Good question; afterall, 1) the parser emits built-in and punned syntax directly as Exact RdrNames, and 2) built-in and punned syntax never needs to looked-up during interface loading due to (a). It turns out that there are two reasons why we might look up an Orig RdrName for built-in and punned syntax, * If you use setRdrNameSpace on an Exact RdrName it may be turned into an Orig RdrName. * Template Haskell turns a BuiltInSyntax Name into a TH.NameG (GHC.HsToCore.Quote.globalVar), and parses a NameG into an Orig RdrName (GHC.ThToHs.thRdrName). So, e.g. $(do { reify '(,); ... }) will go this route (#8954). -} -- | The NameCache makes sure that there is just one Unique assigned for -- each original name; i.e. (module-name, occ-name) pair and provides -- something of a lookup mechanism for those names. data NameCache = NameCache { nsUniqChar :: {-# UNPACK #-} !Char , nsNames :: {-# UNPACK #-} !(MVar OrigNameCache) } -- | Per-module cache of original 'OccName's given 'Name's type OrigNameCache = ModuleEnv (OccEnv Name) takeUniqFromNameCache :: NameCache -> IO Unique takeUniqFromNameCache (NameCache c _) = uniqFromTag c lookupOrigNameCache :: OrigNameCache -> Module -> OccName -> Maybe Name lookupOrigNameCache nc mod occ | mod == gHC_TYPES || mod == gHC_PRIM || mod == gHC_INTERNAL_TUPLE || mod == gHC_CLASSES , Just name <- isBuiltInOcc_maybe occ <|> isPunOcc_maybe mod occ = -- See Note [Known-key names], 3(c) in GHC.Builtin.Names -- Special case for tuples; there are too many -- of them to pre-populate the original-name cache Just name | otherwise = case lookupModuleEnv nc mod of Nothing -> Nothing Just occ_env -> lookupOccEnv occ_env occ extendOrigNameCache' :: OrigNameCache -> Name -> OrigNameCache extendOrigNameCache' nc name = assertPpr (isExternalName name) (ppr name) $ extendOrigNameCache nc (nameModule name) (nameOccName name) name extendOrigNameCache :: OrigNameCache -> Module -> OccName -> Name -> OrigNameCache extendOrigNameCache nc mod occ name = extendModuleEnvWith combine nc mod (unitOccEnv occ name) where combine _ occ_env = extendOccEnv occ_env occ name initNameCache :: Char -> [Name] -> IO NameCache initNameCache c names = NameCache c <$> newMVar (initOrigNames names) initOrigNames :: [Name] -> OrigNameCache initOrigNames names = foldl' extendOrigNameCache' emptyModuleEnv names -- | Update the name cache with the given function updateNameCache' :: NameCache -> (OrigNameCache -> IO (OrigNameCache, c)) -- The updating function -> IO c updateNameCache' (NameCache _c nc) upd_fn = modifyMVar' nc upd_fn -- this should be in `base` modifyMVar' :: MVar a -> (a -> IO (a,b)) -> IO b modifyMVar' m f = modifyMVar m $ f >=> \c -> fst c `seq` pure c -- | Update the name cache with the given function -- -- Additionally, it ensures that the given Module and OccName are evaluated. -- If not, chaos can ensue: -- we read the name-cache -- then pull on mod (say) -- which does some stuff that modifies the name cache -- This did happen, with tycon_mod in GHC.IfaceToCore.tcIfaceAlt (DataAlt..) updateNameCache :: NameCache -> Module -> OccName -> (OrigNameCache -> IO (OrigNameCache, c)) -> IO c updateNameCache name_cache !_mod !_occ upd_fn = updateNameCache' name_cache upd_fn ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/Name/Env.hs0000644000000000000000000001773207346545000020770 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 \section[NameEnv]{@NameEnv@: name environments} -} module GHC.Types.Name.Env ( -- * Var, Id and TyVar environments (maps) NameEnv, -- ** Manipulating these environments mkNameEnv, mkNameEnvWith, fromUniqMap, emptyNameEnv, isEmptyNameEnv, unitNameEnv, nonDetNameEnvElts, extendNameEnv_C, extendNameEnv_Acc, extendNameEnv, extendNameEnvList, extendNameEnvList_C, filterNameEnv, anyNameEnv, mapMaybeNameEnv, extendNameEnvListWith, plusNameEnv, plusNameEnv_C, plusNameEnv_CD, plusNameEnv_CD2, alterNameEnv, plusNameEnvList, plusNameEnvListWith, lookupNameEnv, lookupNameEnv_NF, delFromNameEnv, delListFromNameEnv, elemNameEnv, mapNameEnv, disjointNameEnv, seqEltsNameEnv, DNameEnv, emptyDNameEnv, isEmptyDNameEnv, lookupDNameEnv, delFromDNameEnv, filterDNameEnv, mapDNameEnv, adjustDNameEnv, alterDNameEnv, extendDNameEnv, eltsDNameEnv, extendDNameEnv_C, plusDNameEnv_C, foldDNameEnv, nonDetStrictFoldDNameEnv, -- ** Dependency analysis depAnal ) where import GHC.Prelude import GHC.Data.Graph.Directed import GHC.Types.Name import GHC.Types.Unique.FM import GHC.Types.Unique.DFM import GHC.Types.Unique.Map import GHC.Data.Maybe {- ************************************************************************ * * \subsection{Name environment} * * ************************************************************************ -} {- Note [depAnal determinism] ~~~~~~~~~~~~~~~~~~~~~~~~~~ depAnal is deterministic provided it gets the nodes in a deterministic order. The order of lists that get_defs and get_uses return doesn't matter, as these are only used to construct the edges, and stronglyConnCompFromEdgedVertices is deterministic even when the edges are not in deterministic order as explained in Note [Deterministic SCC] in GHC.Data.Graph.Directed. -} depAnal :: forall node. (node -> [Name]) -- Defs -> (node -> [Name]) -- Uses -> [node] -> [SCC node] -- Perform dependency analysis on a group of definitions, -- where each definition may define more than one Name -- -- The get_defs and get_uses functions are called only once per node depAnal get_defs get_uses nodes = stronglyConnCompFromEdgedVerticesUniq graph_nodes where graph_nodes = (map mk_node keyed_nodes) :: [Node Int node] keyed_nodes = nodes `zip` [(1::Int)..] mk_node (node, key) = let !edges = (mapMaybe (lookupNameEnv key_map) (get_uses node)) in DigraphNode node key edges key_map :: NameEnv Int -- Maps a Name to the key of the decl that defines it key_map = mkNameEnv [(name,key) | (node, key) <- keyed_nodes, name <- get_defs node] {- ************************************************************************ * * \subsection{Name environment} * * ************************************************************************ -} -- | Name Environment type NameEnv a = UniqFM Name a -- Domain is Name emptyNameEnv :: NameEnv a isEmptyNameEnv :: NameEnv a -> Bool mkNameEnv :: [(Name,a)] -> NameEnv a mkNameEnvWith :: (a -> Name) -> [a] -> NameEnv a fromUniqMap :: UniqMap Name a -> NameEnv a nonDetNameEnvElts :: NameEnv a -> [a] alterNameEnv :: (Maybe a-> Maybe a) -> NameEnv a -> Name -> NameEnv a extendNameEnv_C :: (a->a->a) -> NameEnv a -> Name -> a -> NameEnv a extendNameEnv_Acc :: (a->b->b) -> (a->b) -> NameEnv b -> Name -> a -> NameEnv b extendNameEnv :: NameEnv a -> Name -> a -> NameEnv a plusNameEnv :: NameEnv a -> NameEnv a -> NameEnv a plusNameEnv_C :: (a->a->a) -> NameEnv a -> NameEnv a -> NameEnv a plusNameEnv_CD :: (a->a->a) -> NameEnv a -> a -> NameEnv a -> a -> NameEnv a plusNameEnv_CD2 :: (Maybe a->Maybe a->a) -> NameEnv a -> NameEnv a -> NameEnv a plusNameEnvList :: [NameEnv a] -> NameEnv a plusNameEnvListWith :: (a->a->a) -> [NameEnv a] -> NameEnv a extendNameEnvList :: NameEnv a -> [(Name,a)] -> NameEnv a extendNameEnvListWith :: (a -> Name) -> NameEnv a -> [a] -> NameEnv a extendNameEnvList_C :: (a->a->a) -> NameEnv a -> [(Name,a)] -> NameEnv a delFromNameEnv :: NameEnv a -> Name -> NameEnv a delListFromNameEnv :: NameEnv a -> [Name] -> NameEnv a elemNameEnv :: Name -> NameEnv a -> Bool unitNameEnv :: Name -> a -> NameEnv a lookupNameEnv :: NameEnv a -> Name -> Maybe a lookupNameEnv_NF :: NameEnv a -> Name -> a filterNameEnv :: (elt -> Bool) -> NameEnv elt -> NameEnv elt mapMaybeNameEnv :: (a -> Maybe b) -> NameEnv a -> NameEnv b anyNameEnv :: (elt -> Bool) -> NameEnv elt -> Bool mapNameEnv :: (elt1 -> elt2) -> NameEnv elt1 -> NameEnv elt2 disjointNameEnv :: NameEnv a -> NameEnv a -> Bool seqEltsNameEnv :: (elt -> ()) -> NameEnv elt -> () nonDetNameEnvElts x = nonDetEltsUFM x emptyNameEnv = emptyUFM isEmptyNameEnv = isNullUFM unitNameEnv x y = unitUFM x y extendNameEnv x y z = addToUFM x y z extendNameEnvList x l = addListToUFM x l extendNameEnvListWith f x l = addListToUFM x (map (\a -> (f a, a)) l) lookupNameEnv x y = lookupUFM x y alterNameEnv = alterUFM mkNameEnv l = listToUFM l mkNameEnvWith f = mkNameEnv . map (\a -> (f a, a)) fromUniqMap = mapUFM snd . getUniqMap elemNameEnv x y = elemUFM x y plusNameEnv x y = plusUFM x y plusNameEnv_C f x y = plusUFM_C f x y {-# INLINE plusNameEnv_CD #-} plusNameEnv_CD f x d y b = plusUFM_CD f x d y b plusNameEnv_CD2 f x y = plusUFM_CD2 f x y {-# INLINE plusNameEnvList #-} plusNameEnvList xs = plusUFMList xs {-# INLINE plusNameEnvListWith #-} plusNameEnvListWith f xs = plusUFMListWith f xs extendNameEnv_C f x y z = addToUFM_C f x y z mapNameEnv f x = mapUFM f x extendNameEnv_Acc x y z a b = addToUFM_Acc x y z a b extendNameEnvList_C x y z = addListToUFM_C x y z delFromNameEnv x y = delFromUFM x y delListFromNameEnv x y = delListFromUFM x y filterNameEnv x y = filterUFM x y mapMaybeNameEnv x y = mapMaybeUFM x y anyNameEnv f x = nonDetFoldUFM ((||) . f) False x disjointNameEnv x y = disjointUFM x y seqEltsNameEnv seqElt x = seqEltsUFM seqElt x lookupNameEnv_NF env n = expectJust "lookupNameEnv_NF" (lookupNameEnv env n) -- | Deterministic Name Environment -- -- See Note [Deterministic UniqFM] in "GHC.Types.Unique.DFM" for explanation why -- we need DNameEnv. type DNameEnv a = UniqDFM Name a emptyDNameEnv :: DNameEnv a emptyDNameEnv = emptyUDFM isEmptyDNameEnv :: DNameEnv a -> Bool isEmptyDNameEnv = isNullUDFM lookupDNameEnv :: DNameEnv a -> Name -> Maybe a lookupDNameEnv = lookupUDFM delFromDNameEnv :: DNameEnv a -> Name -> DNameEnv a delFromDNameEnv = delFromUDFM filterDNameEnv :: (a -> Bool) -> DNameEnv a -> DNameEnv a filterDNameEnv = filterUDFM mapDNameEnv :: (a -> b) -> DNameEnv a -> DNameEnv b mapDNameEnv = mapUDFM adjustDNameEnv :: (a -> a) -> DNameEnv a -> Name -> DNameEnv a adjustDNameEnv = adjustUDFM alterDNameEnv :: (Maybe a -> Maybe a) -> DNameEnv a -> Name -> DNameEnv a alterDNameEnv = alterUDFM extendDNameEnv :: DNameEnv a -> Name -> a -> DNameEnv a extendDNameEnv = addToUDFM extendDNameEnv_C :: (a -> a -> a) -> DNameEnv a -> Name -> a -> DNameEnv a extendDNameEnv_C = addToUDFM_C eltsDNameEnv :: DNameEnv a -> [a] eltsDNameEnv = eltsUDFM foldDNameEnv :: (a -> b -> b) -> b -> DNameEnv a -> b foldDNameEnv = foldUDFM plusDNameEnv_C :: (elt -> elt -> elt) -> DNameEnv elt -> DNameEnv elt -> DNameEnv elt plusDNameEnv_C = plusUDFM_C nonDetStrictFoldDNameEnv :: (a -> b -> b) -> b -> DNameEnv a -> b nonDetStrictFoldDNameEnv = nonDetStrictFoldUDFM ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/Name/Occurrence.hs0000644000000000000000000013637007346545000022330 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 -} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE MagicHash #-} {-# LANGUAGE OverloadedStrings #-} -- | -- #name_types# -- GHC uses several kinds of name internally: -- -- * 'GHC.Types.Name.Occurrence.OccName' represents names as strings with just a little more information: -- the \"namespace\" that the name came from, e.g. the namespace of value, type constructors or -- data constructors -- -- * 'GHC.Types.Name.Reader.RdrName': see "GHC.Types.Name.Reader#name_types" -- -- * 'GHC.Types.Name.Name': see "GHC.Types.Name#name_types" -- -- * 'GHC.Types.Id.Id': see "GHC.Types.Id#name_types" -- -- * 'GHC.Types.Var.Var': see "GHC.Types.Var#name_types" module GHC.Types.Name.Occurrence ( -- * The 'NameSpace' type NameSpace, -- Abstract -- ** Construction -- $real_vs_source_data_constructors tcName, clsName, tcClsName, dataName, varName, fieldName, tvName, srcDataName, -- ** Pretty Printing pprNameSpace, pprNonVarNameSpace, pprNameSpaceBrief, -- * The 'OccName' type OccName, -- Abstract, instance of Outputable pprOccName, occNameMangledFS, -- ** Construction mkOccName, mkOccNameFS, mkVarOcc, mkVarOccFS, mkRecFieldOcc, mkRecFieldOccFS, mkDataOcc, mkDataOccFS, mkTyVarOcc, mkTyVarOccFS, mkTcOcc, mkTcOccFS, mkClsOcc, mkClsOccFS, mkDFunOcc, setOccNameSpace, demoteOccName, demoteOccTvName, promoteOccName, varToRecFieldOcc, recFieldToVarOcc, HasOccName(..), -- ** Derived 'OccName's isDerivedOccName, mkDataConWrapperOcc, mkWorkerOcc, mkMatcherOcc, mkBuilderOcc, mkDefaultMethodOcc, isDefaultMethodOcc, isTypeableBindOcc, mkNewTyCoOcc, mkClassOpAuxOcc, mkCon2TagOcc, mkTag2ConOcc, mkMaxTagOcc, mkClassDataConOcc, mkDictOcc, mkIPOcc, mkSpecOcc, mkForeignExportOcc, mkRepEqOcc, mkGenR, mkGen1R, mkDataTOcc, mkDataCOcc, mkDataConWorkerOcc, mkSuperDictSelOcc, mkSuperDictAuxOcc, mkLocalOcc, mkMethodOcc, mkInstTyTcOcc, mkInstTyCoOcc, mkEqPredCoOcc, mkTyConRepOcc, -- ** Deconstruction occNameFS, occNameString, occNameSpace, isVarOcc, isTvOcc, isTcOcc, isDataOcc, isDataSymOcc, isSymOcc, isValOcc, isFieldOcc, fieldOcc_maybe, parenSymOcc, startsWithUnderscore, isUnderscore, isTcClsNameSpace, isTvNameSpace, isDataConNameSpace, isVarNameSpace, isValNameSpace, isFieldNameSpace, isTermVarOrFieldNameSpace, -- * The 'OccEnv' type OccEnv, emptyOccEnv, unitOccEnv, extendOccEnv, mapOccEnv, strictMapOccEnv, mapMaybeOccEnv, lookupOccEnv, lookupOccEnv_AllNameSpaces, lookupOccEnv_WithFields, lookupFieldsOccEnv, mkOccEnv, mkOccEnv_C, extendOccEnvList, elemOccEnv, nonDetOccEnvElts, nonDetFoldOccEnv, plusOccEnv, plusOccEnv_C, extendOccEnv_Acc, filterOccEnv, delListFromOccEnv, delFromOccEnv, alterOccEnv, minusOccEnv, minusOccEnv_C, minusOccEnv_C_Ns, pprOccEnv, forceOccEnv, intersectOccEnv_C, -- * The 'OccSet' type OccSet, emptyOccSet, unitOccSet, mkOccSet, extendOccSet, extendOccSetList, unionOccSets, unionManyOccSets, elemOccSet, isEmptyOccSet, -- * Dealing with main mainOcc, ppMainFn, -- * Tidying up TidyOccEnv, emptyTidyOccEnv, initTidyOccEnv, trimTidyOccEnv, tidyOccName, avoidClashesOccEnv, delTidyOccEnvList, -- FsEnv FastStringEnv, emptyFsEnv, lookupFsEnv, extendFsEnv, mkFsEnv ) where import GHC.Prelude import GHC.Builtin.Uniques import GHC.Utils.Misc import GHC.Types.Unique import GHC.Types.Unique.FM import GHC.Types.Unique.Set import GHC.Data.FastString import GHC.Data.FastString.Env import GHC.Utils.Outputable import GHC.Utils.Lexeme import GHC.Utils.Binary import GHC.Utils.Panic.Plain import Control.DeepSeq import Data.Char import Data.Data import qualified Data.Semigroup as S import GHC.Exts( Int(I#), dataToTag# ) {- ************************************************************************ * * \subsection{Name space} * * ************************************************************************ -} data NameSpace -- | Variable name space (including "real" data constructors). = VarName -- | Record field namespace for the given record. | FldName { fldParent :: !FastString -- ^ The textual name of the parent of the field. -- -- - For a field of a datatype, this is the name of the first constructor -- of the datatype (regardless of whether this constructor has this field). -- - For a field of a pattern synonym, this is the name of the pattern synonym. } -- | "Source" data constructor namespace. | DataName -- | Type variable namespace. | TvName -- | Type constructor and class namespace. | TcClsName -- Haskell has type constructors and classes in the same namespace, for now. deriving Eq instance Ord NameSpace where compare ns1 ns2 = case compare (I# (dataToTag# ns1)) (I# (dataToTag# ns2)) of LT -> LT GT -> GT EQ | FldName { fldParent = p1 } <- ns1 , FldName { fldParent = p2 } <- ns2 -> lexicalCompareFS p1 p2 | otherwise -> EQ instance Uniquable NameSpace where getUnique (FldName fs) = mkFldNSUnique fs getUnique VarName = varNSUnique getUnique DataName = dataNSUnique getUnique TvName = tvNSUnique getUnique TcClsName = tcNSUnique instance NFData NameSpace where rnf VarName = () rnf (FldName par) = rnf par rnf DataName = () rnf TvName = () rnf TcClsName = () {- Note [Data Constructors] ~~~~~~~~~~~~~~~~~~~~~~~~ see also: Note [Data Constructor Naming] in GHC.Core.DataCon $real_vs_source_data_constructors There are two forms of data constructor: [Source data constructors] The data constructors mentioned in Haskell source code [Real data constructors] The data constructors of the representation type, which may not be the same as the source type For example: > data T = T !(Int, Int) The source datacon has type @(Int, Int) -> T@ The real datacon has type @Int -> Int -> T@ GHC chooses a representation based on the strictness etc. Note [Record field namespacing] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Record fields have a separate namespace from variables, to support DuplicateRecordFields, e.g. in data X = MkX { fld :: Int } data Y = MkY { fld :: Bool } f x = x { fld = 3 } g y = y { fld = False } we want the two occurrences of "fld" to refer to the field names associated with the corresponding data type. The namespace for a record field is as follows: - for a data type, it is the textual name of the first constructor of the datatype, whether this constructor has this field or not; - for a pattern synonym, it is the textual name of the pattern synonym itself. Record fields are initially parsed as variables, but the renamer resolves their namespace in GHC.Rename.Names.newRecordFieldLabel, which is called when renaming record data declarations and record pattern synonym declarations. To illustrate the namespacing, consider the record field "fld" in the following datatype data instance A Int Bool Char = MkA1 | MkA2 { fld :: Int } | MkA3 { bar :: Bool, fld :: Int } Its namespace is `FldName "MkA1"`. This is a convention used throughout GHC to circumvent the fact that we don't have a way to refer to the type constructor "A Int Bool Char" in the renamer, as data family instances only get given 'Name's in the typechecker. -} tcName, clsName, tcClsName :: NameSpace dataName, srcDataName :: NameSpace tvName, varName :: NameSpace -- Though type constructors and classes are in the same name space now, -- the NameSpace type is abstract, so we can easily separate them later tcName = TcClsName -- Type constructors clsName = TcClsName -- Classes tcClsName = TcClsName -- Not sure which! dataName = DataName srcDataName = DataName -- Haskell-source data constructors should be -- in the Data name space tvName = TvName varName = VarName fieldName :: FastString -> NameSpace fieldName = FldName isDataConNameSpace :: NameSpace -> Bool isDataConNameSpace DataName = True isDataConNameSpace _ = False isTcClsNameSpace :: NameSpace -> Bool isTcClsNameSpace TcClsName = True isTcClsNameSpace _ = False isTvNameSpace :: NameSpace -> Bool isTvNameSpace TvName = True isTvNameSpace _ = False isVarNameSpace :: NameSpace -> Bool -- Variables or type variables, but not constructors isVarNameSpace TvName = True isVarNameSpace VarName = True isVarNameSpace (FldName {}) = True isVarNameSpace _ = False -- | Is this a term variable or field name namespace? isTermVarOrFieldNameSpace :: NameSpace -> Bool isTermVarOrFieldNameSpace VarName = True isTermVarOrFieldNameSpace (FldName {}) = True isTermVarOrFieldNameSpace _ = False isValNameSpace :: NameSpace -> Bool isValNameSpace DataName = True isValNameSpace VarName = True isValNameSpace (FldName {}) = True isValNameSpace _ = False isFieldNameSpace :: NameSpace -> Bool isFieldNameSpace (FldName {}) = True isFieldNameSpace _ = False pprNameSpace :: NameSpace -> SDoc pprNameSpace DataName = text "data constructor" pprNameSpace VarName = text "variable" pprNameSpace TvName = text "type variable" pprNameSpace TcClsName = text "type constructor or class" pprNameSpace (FldName p) = text "record field of" <+> ftext p pprNonVarNameSpace :: NameSpace -> SDoc pprNonVarNameSpace VarName = empty pprNonVarNameSpace ns = pprNameSpace ns pprNameSpaceBrief :: NameSpace -> SDoc pprNameSpaceBrief DataName = char 'd' pprNameSpaceBrief VarName = char 'v' pprNameSpaceBrief TvName = text "tv" pprNameSpaceBrief TcClsName = text "tc" pprNameSpaceBrief (FldName {}) = text "fld" -- demoteNameSpace lowers the NameSpace if possible. We can not know -- in advance, since a TvName can appear in an HsTyVar. -- See Note [Demotion] in GHC.Rename.Env. demoteNameSpace :: NameSpace -> Maybe NameSpace demoteNameSpace VarName = Nothing demoteNameSpace DataName = Nothing demoteNameSpace TvName = Nothing demoteNameSpace TcClsName = Just DataName demoteNameSpace (FldName {}) = Nothing -- demoteTvNameSpace lowers the NameSpace of a type variable. -- See Note [Demotion] in GHC.Rename.Env. demoteTvNameSpace :: NameSpace -> Maybe NameSpace demoteTvNameSpace TvName = Just VarName demoteTvNameSpace VarName = Nothing demoteTvNameSpace DataName = Nothing demoteTvNameSpace TcClsName = Nothing demoteTvNameSpace (FldName {}) = Nothing -- promoteNameSpace promotes the NameSpace as follows. -- See Note [Promotion] in GHC.Rename.Env. promoteNameSpace :: NameSpace -> Maybe NameSpace promoteNameSpace DataName = Just TcClsName promoteNameSpace VarName = Just TvName promoteNameSpace TcClsName = Nothing promoteNameSpace TvName = Nothing promoteNameSpace (FldName {}) = Nothing {- ************************************************************************ * * \subsection[Name-pieces-datatypes]{The @OccName@ datatypes} * * ************************************************************************ -} -- | Occurrence Name -- -- In this context that means: -- "classified (i.e. as a type name, value name, etc) but not qualified -- and not yet resolved" data OccName = OccName { occNameSpace :: !NameSpace , occNameFS :: !FastString } instance Eq OccName where (OccName sp1 s1) == (OccName sp2 s2) = s1 == s2 && sp1 == sp2 instance Ord OccName where -- Compares lexicographically, *not* by Unique of the string compare (OccName sp1 s1) (OccName sp2 s2) = lexicalCompareFS s1 s2 S.<> compare sp1 sp2 instance Data OccName where -- don't traverse? toConstr _ = abstractConstr "OccName" gunfold _ _ = error "gunfold" dataTypeOf _ = mkNoRepType "OccName" instance HasOccName OccName where occName = id instance NFData OccName where rnf x = x `seq` () {- ************************************************************************ * * \subsection{Printing} * * ************************************************************************ -} instance Outputable OccName where ppr = pprOccName instance OutputableBndr OccName where pprBndr _ = ppr pprInfixOcc n = pprInfixVar (isSymOcc n) (ppr n) pprPrefixOcc n = pprPrefixVar (isSymOcc n) (ppr n) pprOccName :: IsLine doc => OccName -> doc pprOccName (OccName sp occ) = docWithStyle (ztext (zEncodeFS occ)) (\_ -> ftext occ <> whenPprDebug (braces (pprNameSpaceBrief sp))) {-# SPECIALIZE pprOccName :: OccName -> SDoc #-} {-# SPECIALIZE pprOccName :: OccName -> HLine #-} -- see Note [SPECIALIZE to HDoc] in GHC.Utils.Outputable -- | Mangle field names to avoid duplicate symbols. -- -- See Note [Mangling OccNames]. occNameMangledFS :: OccName -> FastString occNameMangledFS (OccName ns fs) = case ns of -- Fields need to include the constructor, to ensure that we don't define -- duplicate symbols when using DuplicateRecordFields. FldName con -> concatFS [fsLit "$fld:", con, ":", fs] -- Otherwise, we can ignore the namespace, as there is no risk of name -- clashes. _ -> fs {- Note [Mangling OccNames] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ When generating a symbol for a Name, we usually discard the NameSpace entirely (see GHC.Types.Name.pprName). This is because clashes are usually not possible, e.g. a variable and a data constructor can't clash because data constructors start with a capital letter or a colon, while variables never do. However, record field names, in the presence of DuplicateRecordFields, need this disambiguation. So, for a record field like data A = MkA { foo :: Int } we generate the symbol $fld:MkA:foo. We use the constructor 'MkA' to disambiguate, and not the TyCon A as one might naively expect: this is explained in Note [Record field namespacing]. -} {- ************************************************************************ * * \subsection{Construction} * * ************************************************************************ -} mkOccName :: NameSpace -> String -> OccName mkOccName occ_sp str = OccName occ_sp (mkFastString str) mkOccNameFS :: NameSpace -> FastString -> OccName mkOccNameFS occ_sp fs = OccName occ_sp fs mkVarOcc :: String -> OccName mkVarOcc s = mkOccName varName s mkVarOccFS :: FastString -> OccName mkVarOccFS fs = mkOccNameFS varName fs mkRecFieldOcc :: FastString -> String -> OccName mkRecFieldOcc dc = mkOccName (fieldName dc) mkRecFieldOccFS :: FastString -> FastString -> OccName mkRecFieldOccFS dc = mkOccNameFS (fieldName dc) varToRecFieldOcc :: HasDebugCallStack => FastString -> OccName -> OccName varToRecFieldOcc dc (OccName ns s) = assert makes_sense $ mkRecFieldOccFS dc s where makes_sense = case ns of VarName -> True FldName {} -> True -- NB: it's OK to change the parent data constructor, -- see e.g. test T23220 in which we construct with TH -- a datatype using the fields of a different datatype. _ -> False recFieldToVarOcc :: HasDebugCallStack => OccName -> OccName recFieldToVarOcc (OccName _ns s) = mkVarOccFS s mkDataOcc :: String -> OccName mkDataOcc = mkOccName dataName mkDataOccFS :: FastString -> OccName mkDataOccFS = mkOccNameFS dataName mkTyVarOcc :: String -> OccName mkTyVarOcc = mkOccName tvName mkTyVarOccFS :: FastString -> OccName mkTyVarOccFS fs = mkOccNameFS tvName fs mkTcOcc :: String -> OccName mkTcOcc = mkOccName tcName mkTcOccFS :: FastString -> OccName mkTcOccFS = mkOccNameFS tcName mkClsOcc :: String -> OccName mkClsOcc = mkOccName clsName mkClsOccFS :: FastString -> OccName mkClsOccFS = mkOccNameFS clsName -- demoteOccName lowers the Namespace of OccName. -- See Note [Demotion] in GHC.Rename.Env. demoteOccName :: OccName -> Maybe OccName demoteOccName (OccName space name) = do space' <- demoteNameSpace space return $ OccName space' name demoteOccTvName :: OccName -> Maybe OccName demoteOccTvName (OccName space name) = do space' <- demoteTvNameSpace space return $ OccName space' name -- promoteOccName promotes the NameSpace of OccName. -- See Note [Promotion] in GHC.Rename.Env. promoteOccName :: OccName -> Maybe OccName promoteOccName (OccName space name) = do promoted_space <- promoteNameSpace space let tyop = isTvNameSpace promoted_space && isLexVarSym name space' = if tyop then tcClsName else promoted_space -- special case for type operators (#24570) return $ OccName space' name {- | Other names in the compiler add additional information to an OccName. This class provides a consistent way to access the underlying OccName. -} class HasOccName name where occName :: name -> OccName {- ************************************************************************ * * Environments * * ************************************************************************ OccEnvs are used for the GlobalRdrEnv and for the envts in ModIface. Note [OccEnv] ~~~~~~~~~~~~~ An OccEnv is a map keyed on OccName. Recall that an OccEnv consists of two components: - a namespace, - a textual name (in the form of a FastString). In general, for a given textual name, there is only one appropriate namespace. However, sometimes we do get an occurrence that belongs to several namespaces: - Symbolic identifiers such as (:+) can belong to both the data constructor and type constructor/class namespaces. - With duplicate record fields, a field name can belong to several different namespaces, one for each parent datatype (or pattern synonym). So we represent an OccEnv as a nested data structure FastStringEnv (UniqFM NameSpace a) in which we can first look up the textual name, and then choose which of the namespaces are relevant. This supports the two main uses of OccEnvs: 1. One wants to look up a specific OccName in the environment, at a specific namespace. One looks up the textual name, and then the namespace. 2. One wants to look up something, but isn't sure in advance of the namespace. So one looks up the textual name, and then can decide what to do based on the returned map of namespaces. This data structure isn't performance critical in most situations, but some improvements to its performance that might be worth it are as follows: A. Use a tailor-made data structure for a map keyed on NameSpaces. Recall that we have: data IntMap a = Bin !Int !Int !(IntMap a) !(IntMap a) | Tip !Key a | Nil This is already pretty efficient for singletons, but we don't need the empty case (as we would simply omit the parent key in the OccEnv instead of storing an empty inner map). B. Always ensure the inner map (keyed on namespaces) is evaluated, i.e. is never a thunk. For this, we would need to use strict operations on the outer FastStringEnv (but we'd keep using lazy operations on the inner UniqFM). -} -- | A map keyed on 'OccName'. See Note [OccEnv]. newtype OccEnv a = MkOccEnv (FastStringEnv (UniqFM NameSpace a)) deriving Functor -- | The empty 'OccEnv'. emptyOccEnv :: OccEnv a emptyOccEnv = MkOccEnv emptyFsEnv -- | A singleton 'OccEnv'. unitOccEnv :: OccName -> a -> OccEnv a unitOccEnv (OccName ns s) a = MkOccEnv $ unitFsEnv s (unitUFM ns a) -- | Add a single element to an 'OccEnv'. extendOccEnv :: OccEnv a -> OccName -> a -> OccEnv a extendOccEnv (MkOccEnv as) (OccName ns s) a = MkOccEnv $ extendFsEnv_C plusUFM as s (unitUFM ns a) -- | Extend an 'OccEnv' by a list. -- -- 'OccName's later on in the list override earlier 'OccName's. extendOccEnvList :: OccEnv a -> [(OccName, a)] -> OccEnv a extendOccEnvList = foldl' $ \ env (occ, a) -> extendOccEnv env occ a -- | Look an element up in an 'OccEnv'. lookupOccEnv :: OccEnv a -> OccName -> Maybe a lookupOccEnv (MkOccEnv as) (OccName ns s) = do { m <- lookupFsEnv as s ; lookupUFM m ns } -- | Lookup an element in an 'OccEnv', ignoring 'NameSpace's entirely. lookupOccEnv_AllNameSpaces :: OccEnv a -> OccName -> [a] lookupOccEnv_AllNameSpaces (MkOccEnv as) (OccName _ s) = case lookupFsEnv as s of Nothing -> [] Just r -> nonDetEltsUFM r -- | Lookup an element in an 'OccEnv', looking in the record field -- namespace for a variable. lookupOccEnv_WithFields :: OccEnv a -> OccName -> [a] lookupOccEnv_WithFields env occ = case lookupOccEnv env occ of Nothing -> fieldGREs Just gre -> gre : fieldGREs where fieldGREs -- If the 'OccName' is a variable, also look up -- in the record field namespaces. | isVarOcc occ = lookupFieldsOccEnv env (occNameFS occ) | otherwise = [] -- | Look up all the record fields that match with the given 'FastString' -- in an 'OccEnv'. lookupFieldsOccEnv :: OccEnv a -> FastString -> [a] lookupFieldsOccEnv (MkOccEnv as) fld = case lookupFsEnv as fld of Nothing -> [] Just flds -> nonDetEltsUFM $ filter_flds flds -- NB: non-determinism is OK: in practice we will either end up resolving -- to a single field or throwing an error. where filter_flds = filterUFM_Directly (\ uniq _ -> isFldNSUnique uniq) -- | Create an 'OccEnv' from a list. -- -- 'OccName's later on in the list override earlier 'OccName's. mkOccEnv :: [(OccName,a)] -> OccEnv a mkOccEnv = extendOccEnvList emptyOccEnv -- | Create an 'OccEnv' from a list, combining different values -- with the same 'OccName' using the combining function. mkOccEnv_C :: (a -> a -> a) -- ^ old -> new -> result -> [(OccName,a)] -> OccEnv a mkOccEnv_C f elts = MkOccEnv $ foldl' g emptyFsEnv elts where g env (OccName ns s, a) = extendFsEnv_C (plusUFM_C $ flip f) env s (unitUFM ns a) -- | Compute whether there is a value keyed by the given 'OccName'. elemOccEnv :: OccName -> OccEnv a -> Bool elemOccEnv (OccName ns s) (MkOccEnv as) = case lookupFsEnv as s of Nothing -> False Just m -> ns `elemUFM` m -- | Fold over an 'OccEnv'. Non-deterministic, unless the folding function -- is commutative (i.e. @a1 `f` ( a2 `f` b ) == a2 `f` ( a1 `f` b )@ for all @a1@, @a2@, @b@). nonDetFoldOccEnv :: (a -> b -> b) -> b -> OccEnv a -> b nonDetFoldOccEnv f b0 (MkOccEnv as) = nonDetFoldFsEnv (flip $ nonDetFoldUFM f) b0 as -- | Obtain the elements of an 'OccEnv'. -- -- The resulting order is non-deterministic. nonDetOccEnvElts :: OccEnv a -> [a] nonDetOccEnvElts = nonDetFoldOccEnv (:) [] -- | Union of two 'OccEnv's, right-biased. plusOccEnv :: OccEnv a -> OccEnv a -> OccEnv a plusOccEnv (MkOccEnv env1) (MkOccEnv env2) = MkOccEnv $ plusFsEnv_C plusUFM env1 env2 -- | Union of two 'OccEnv's with a combining function. plusOccEnv_C :: (a->a->a) -> OccEnv a -> OccEnv a -> OccEnv a plusOccEnv_C f (MkOccEnv env1) (MkOccEnv env2) = MkOccEnv $ plusFsEnv_C (plusUFM_C f) env1 env2 -- | Map over an 'OccEnv' ('Functor' instance). mapOccEnv :: (a->b) -> OccEnv a -> OccEnv b mapOccEnv = fmap -- | 'mapMaybe' for b 'OccEnv'. mapMaybeOccEnv :: (a -> Maybe b) -> OccEnv a -> OccEnv b mapMaybeOccEnv f (MkOccEnv env) = MkOccEnv $ mapMaybeUFM g env where g as = case mapMaybeUFM f as of m' | isNullUFM m' -> Nothing | otherwise -> Just m' -- | Add a single element to an 'OccEnv', using a different function whether -- the 'OccName' already exists or not. extendOccEnv_Acc :: forall a b . (a->b->b) -- ^ add to existing -> (a->b) -- ^ new element -> OccEnv b -- ^ old -> OccName -> a -- ^ new -> OccEnv b extendOccEnv_Acc f g (MkOccEnv env) (OccName ns s) = MkOccEnv . extendFsEnv_Acc f' g' env s where f' :: a -> UniqFM NameSpace b -> UniqFM NameSpace b f' a bs = alterUFM (Just . \ case { Nothing -> g a ; Just b -> f a b }) bs ns g' a = unitUFM ns (g a) -- | Delete one element from an 'OccEnv'. delFromOccEnv :: forall a. OccEnv a -> OccName -> OccEnv a delFromOccEnv (MkOccEnv env1) (OccName ns s) = MkOccEnv $ alterFsEnv f env1 s where f :: Maybe (UniqFM NameSpace a) -> Maybe (UniqFM NameSpace a) f Nothing = Nothing f (Just m) = case delFromUFM m ns of m' | isNullUFM m' -> Nothing | otherwise -> Just m' -- | Delete multiple elements from an 'OccEnv'. delListFromOccEnv :: OccEnv a -> [OccName] -> OccEnv a delListFromOccEnv = foldl' delFromOccEnv -- | Filter out all elements in an 'OccEnv' using a predicate. filterOccEnv :: forall a. (a -> Bool) -> OccEnv a -> OccEnv a filterOccEnv f (MkOccEnv env) = MkOccEnv $ mapMaybeFsEnv g env where g :: UniqFM NameSpace a -> Maybe (UniqFM NameSpace a) g ms = case filterUFM f ms of m' | isNullUFM m' -> Nothing | otherwise -> Just m' -- | Alter an 'OccEnv', adding or removing an element at the given key. alterOccEnv :: forall a. (Maybe a -> Maybe a) -> OccEnv a -> OccName -> OccEnv a alterOccEnv f (MkOccEnv env) (OccName ns s) = MkOccEnv $ alterFsEnv g env s where g :: Maybe (UniqFM NameSpace a) -> Maybe (UniqFM NameSpace a) g Nothing = fmap (unitUFM ns) (f Nothing) g (Just m) = case alterUFM f m ns of m' | isNullUFM m' -> Nothing | otherwise -> Just m' intersectOccEnv_C :: (a -> b -> c) -> OccEnv a -> OccEnv b -> OccEnv c intersectOccEnv_C f (MkOccEnv as) (MkOccEnv bs) = MkOccEnv $ intersectUFM_C (intersectUFM_C f) as bs -- | Remove elements of the first 'OccEnv' that appear in the second 'OccEnv'. minusOccEnv :: OccEnv a -> OccEnv b -> OccEnv a minusOccEnv = minusOccEnv_C_Ns minusUFM -- | Alters (replaces or removes) those elements of the first 'OccEnv' that are -- mentioned in the second 'OccEnv'. -- -- Same idea as 'Data.Map.differenceWith'. minusOccEnv_C :: (a -> b -> Maybe a) -> OccEnv a -> OccEnv b -> OccEnv a minusOccEnv_C f = minusOccEnv_C_Ns (minusUFM_C f) minusOccEnv_C_Ns :: forall a b . (UniqFM NameSpace a -> UniqFM NameSpace b -> UniqFM NameSpace a) -> OccEnv a -> OccEnv b -> OccEnv a minusOccEnv_C_Ns f (MkOccEnv as) (MkOccEnv bs) = MkOccEnv $ minusUFM_C g as bs where g :: UniqFM NameSpace a -> UniqFM NameSpace b -> Maybe (UniqFM NameSpace a) g as bs = let m = f as bs in if isNullUFM m then Nothing else Just m instance Outputable a => Outputable (OccEnv a) where ppr x = pprOccEnv ppr x pprOccEnv :: (a -> SDoc) -> OccEnv a -> SDoc pprOccEnv ppr_elt (MkOccEnv env) = brackets $ fsep $ punctuate comma $ [ ppr uq <+> text ":->" <+> ppr_elt elt | (uq, elts) <- nonDetUFMToList env , elt <- nonDetEltsUFM elts ] instance NFData a => NFData (OccEnv a) where rnf = forceOccEnv rnf -- | Map over an 'OccEnv' strictly. strictMapOccEnv :: (a -> b) -> OccEnv a -> OccEnv b strictMapOccEnv f (MkOccEnv as) = MkOccEnv $ strictMapFsEnv (strictMapUFM f) as -- | Force an 'OccEnv' with the provided function. forceOccEnv :: (a -> ()) -> OccEnv a -> () forceOccEnv nf (MkOccEnv fs) = seqEltsUFM (seqEltsUFM nf) fs -------------------------------------------------------------------------------- newtype OccSet = OccSet (FastStringEnv (UniqSet NameSpace)) emptyOccSet :: OccSet unitOccSet :: OccName -> OccSet mkOccSet :: [OccName] -> OccSet extendOccSet :: OccSet -> OccName -> OccSet extendOccSetList :: OccSet -> [OccName] -> OccSet unionOccSets :: OccSet -> OccSet -> OccSet unionManyOccSets :: [OccSet] -> OccSet elemOccSet :: OccName -> OccSet -> Bool isEmptyOccSet :: OccSet -> Bool emptyOccSet = OccSet emptyFsEnv unitOccSet (OccName ns s) = OccSet $ unitFsEnv s (unitUniqSet ns) mkOccSet = extendOccSetList emptyOccSet extendOccSet (OccSet occs) (OccName ns s) = OccSet $ extendFsEnv occs s (unitUniqSet ns) extendOccSetList = foldl' extendOccSet unionOccSets (OccSet xs) (OccSet ys) = OccSet $ plusFsEnv_C unionUniqSets xs ys unionManyOccSets = foldl' unionOccSets emptyOccSet elemOccSet (OccName ns s) (OccSet occs) = maybe False (elementOfUniqSet ns) $ lookupFsEnv occs s isEmptyOccSet (OccSet occs) = isNullUFM occs {- ************************************************************************ * * \subsection{Predicates and taking them apart} * * ************************************************************************ -} occNameString :: OccName -> String occNameString (OccName _ s) = unpackFS s setOccNameSpace :: NameSpace -> OccName -> OccName setOccNameSpace sp (OccName _ occ) = OccName sp occ isVarOcc, isTvOcc, isTcOcc, isDataOcc, isFieldOcc :: OccName -> Bool isVarOcc (OccName VarName _) = True isVarOcc _ = False isTvOcc (OccName TvName _) = True isTvOcc _ = False isTcOcc (OccName TcClsName _) = True isTcOcc _ = False isFieldOcc (OccName (FldName {}) _) = True isFieldOcc _ = False fieldOcc_maybe :: OccName -> Maybe FastString fieldOcc_maybe (OccName (FldName con) _) = Just con fieldOcc_maybe _ = Nothing -- | /Value/ 'OccNames's are those that are either in -- the variable, field name or data constructor namespaces isValOcc :: OccName -> Bool isValOcc (OccName VarName _) = True isValOcc (OccName DataName _) = True isValOcc (OccName (FldName {}) _) = True isValOcc _ = False isDataOcc (OccName DataName _) = True isDataOcc _ = False -- | Test if the 'OccName' is a data constructor that starts with -- a symbol (e.g. @:@, or @[]@) isDataSymOcc :: OccName -> Bool isDataSymOcc (OccName DataName s) = isLexConSym s isDataSymOcc _ = False -- Pretty inefficient! -- | Test if the 'OccName' is that for any operator (whether -- it is a data constructor or variable or whatever) isSymOcc :: OccName -> Bool isSymOcc (OccName ns s) = case ns of DataName -> isLexConSym s TcClsName -> isLexSym s VarName -> isLexSym s TvName -> isLexSym s FldName {} -> isLexSym s -- Pretty inefficient! parenSymOcc :: OccName -> SDoc -> SDoc -- ^ Wrap parens around an operator parenSymOcc occ doc | isSymOcc occ = parens doc | otherwise = doc startsWithUnderscore :: OccName -> Bool -- ^ Haskell 98 encourages compilers to suppress warnings about unused -- names in a pattern if they start with @_@: this implements that test startsWithUnderscore occ = case unpackFS (occNameFS occ) of '_':_ -> True _ -> False isUnderscore :: OccName -> Bool isUnderscore occ = occNameFS occ == fsLit "_" {- ************************************************************************ * * \subsection{Making system names} * * ************************************************************************ Here's our convention for splitting up the interface file name space: d... dictionary identifiers (local variables, so no name-clash worries) All of these other OccNames contain a mixture of alphabetic and symbolic characters, and hence cannot possibly clash with a user-written type or function name $f... Dict-fun identifiers (from inst decls) $dmop Default method for 'op' $pnC n'th superclass selector for class C $wf Worker for function 'f' $sf.. Specialised version of f D:C Data constructor for dictionary for class C NTCo:T Coercion connecting newtype T with its representation type TFCo:R Coercion connecting a data family to its representation type R In encoded form these appear as Zdfxxx etc :... keywords (export:, letrec: etc.) --- I THINK THIS IS WRONG! This knowledge is encoded in the following functions. @mk_deriv@ generates an @OccName@ from the prefix and a string. NB: The string must already be encoded! -} -- | Build an 'OccName' derived from another 'OccName'. -- -- Note that the pieces of the name are passed in as a @[FastString]@ so that -- the whole name can be constructed with a single 'concatFS', minimizing -- unnecessary intermediate allocations. mk_deriv :: NameSpace -> FastString -- ^ A prefix which distinguishes one sort of -- derived name from another -> [FastString] -- ^ The name we are deriving from in pieces which -- will be concatenated. -> OccName mk_deriv occ_sp sys_prefix str = mkOccNameFS occ_sp (concatFS $ sys_prefix : str) isDerivedOccName :: OccName -> Bool -- ^ Test for definitions internally generated by GHC. This predicate -- is used to suppress printing of internal definitions in some debug prints isDerivedOccName occ = case occNameString occ of '$':c:_ | isAlphaNum c -> True -- E.g. $wfoo c:':':_ | isAlphaNum c -> True -- E.g. N:blah newtype coercions _other -> False isDefaultMethodOcc :: OccName -> Bool isDefaultMethodOcc occ = case occNameString occ of '$':'d':'m':_ -> True _ -> False -- | Is an 'OccName' one of a Typeable @TyCon@ or @Module@ binding? -- This is needed as these bindings are renamed differently. -- See Note [Grand plan for Typeable] in "GHC.Tc.Instance.Typeable". isTypeableBindOcc :: OccName -> Bool isTypeableBindOcc occ = case occNameString occ of '$':'t':'c':_ -> True -- mkTyConRepOcc '$':'t':'r':_ -> True -- Module binding _ -> False mkDataConWrapperOcc, mkWorkerOcc, mkMatcherOcc, mkBuilderOcc, mkDefaultMethodOcc, mkClassDataConOcc, mkDictOcc, mkIPOcc, mkSpecOcc, mkForeignExportOcc, mkRepEqOcc, mkGenR, mkGen1R, mkDataConWorkerOcc, mkNewTyCoOcc, mkInstTyCoOcc, mkEqPredCoOcc, mkClassOpAuxOcc, mkCon2TagOcc, mkTag2ConOcc, mkMaxTagOcc, mkDataTOcc, mkDataCOcc, mkTyConRepOcc :: OccName -> OccName -- These derived variables have a prefix that no Haskell value could have mkDataConWrapperOcc = mk_simple_deriv varName "$W" mkWorkerOcc = mk_simple_deriv varName "$w" mkMatcherOcc = mk_simple_deriv varName "$m" mkBuilderOcc = mk_simple_deriv varName "$b" mkDefaultMethodOcc = mk_simple_deriv varName "$dm" mkClassOpAuxOcc = mk_simple_deriv varName "$c" mkDictOcc = mk_simple_deriv varName "$d" mkIPOcc = mk_simple_deriv varName "$i" mkSpecOcc = mk_simple_deriv varName "$s" mkForeignExportOcc = mk_simple_deriv varName "$f" mkRepEqOcc = mk_simple_deriv tvName "$r" -- In RULES involving Coercible mkClassDataConOcc = mk_simple_deriv dataName "C:" -- Data con for a class mkNewTyCoOcc = mk_simple_deriv tcName "N:" -- Coercion for newtypes mkInstTyCoOcc = mk_simple_deriv tcName "D:" -- Coercion for type functions mkEqPredCoOcc = mk_simple_deriv tcName "$co" -- Used in derived instances for the names of auxiliary bindings. -- See Note [Auxiliary binders] in GHC.Tc.Deriv.Generate. mkCon2TagOcc = mk_simple_deriv varName "$con2tag_" mkTag2ConOcc = mk_simple_deriv varName "$tag2con_" mkMaxTagOcc = mk_simple_deriv varName "$maxtag_" mkDataTOcc = mk_simple_deriv varName "$t" mkDataCOcc = mk_simple_deriv varName "$c" -- TyConRepName stuff; see Note [Grand plan for Typeable] in GHC.Tc.Instance.Typeable mkTyConRepOcc occ = mk_simple_deriv varName prefix occ where prefix | isDataOcc occ = "$tc'" | otherwise = "$tc" -- Generic deriving mechanism mkGenR = mk_simple_deriv tcName "Rep_" mkGen1R = mk_simple_deriv tcName "Rep1_" mk_simple_deriv :: NameSpace -> FastString -> OccName -> OccName mk_simple_deriv sp px occ = mk_deriv sp px [occNameFS occ] -- Data constructor workers are made by setting the name space -- of the data constructor OccName (which should be a DataName) -- to VarName mkDataConWorkerOcc datacon_occ = setOccNameSpace varName datacon_occ mkSuperDictAuxOcc :: Int -> OccName -> OccName mkSuperDictAuxOcc index cls_tc_occ = mk_deriv varName "$cp" [fsLit $ show index, occNameFS cls_tc_occ] mkSuperDictSelOcc :: Int -- ^ Index of superclass, e.g. 3 -> OccName -- ^ Class, e.g. @Ord@ -> OccName -- ^ Derived 'Occname', e.g. @$p3Ord@ mkSuperDictSelOcc index cls_tc_occ = mk_deriv varName "$p" [fsLit $ show index, occNameFS cls_tc_occ] mkLocalOcc :: Unique -- ^ Unique to combine with the 'OccName' -> OccName -- ^ Local name, e.g. @sat@ -> OccName -- ^ Nice unique version, e.g. @$L23sat@ mkLocalOcc uniq occ = mk_deriv varName "$L" [fsLit $ show uniq, occNameFS occ] -- The Unique might print with characters -- that need encoding (e.g. 'z'!) -- | Derive a name for the representation type constructor of a -- @data@\/@newtype@ instance. mkInstTyTcOcc :: String -- ^ Family name, e.g. @Map@ -> OccSet -- ^ avoid these Occs -> OccName -- ^ @R:Map@ mkInstTyTcOcc str = chooseUniqueOcc tcName ('R' : ':' : str) mkDFunOcc :: String -- ^ Typically the class and type glommed together e.g. @OrdMaybe@. -- Only used in debug mode, for extra clarity -> Bool -- ^ Is this a hs-boot instance DFun? -> OccSet -- ^ avoid these Occs -> OccName -- ^ E.g. @$f3OrdMaybe@ -- In hs-boot files we make dict funs like $fx7ClsTy, which get bound to the real -- thing when we compile the mother module. Reason: we don't know exactly -- what the mother module will call it. mkDFunOcc info_str is_boot set = chooseUniqueOcc VarName (prefix ++ info_str) set where prefix | is_boot = "$fx" | otherwise = "$f" {- Sometimes we need to pick an OccName that has not already been used, given a set of in-use OccNames. -} chooseUniqueOcc :: NameSpace -> String -> OccSet -> OccName chooseUniqueOcc ns str set = loop (mkOccName ns str) (0::Int) where loop occ n | occ `elemOccSet` set = loop (mkOccName ns (str ++ show n)) (n+1) | otherwise = occ {- We used to add a '$m' to indicate a method, but that gives rise to bad error messages from the type checker when we print the function name or pattern of an instance-decl binding. Why? Because the binding is zapped to use the method name in place of the selector name. (See GHC.Tc.TyCl.Class.tcMethodBind) The way it is now, -ddump-xx output may look confusing, but you can always say -dppr-debug to get the uniques. However, we *do* have to zap the first character to be lower case, because overloaded constructors (blarg) generate methods too. And convert to VarName space e.g. a call to constructor MkFoo where data (Ord a) => Foo a = MkFoo a If this is necessary, we do it by prefixing '$m'. These guys never show up in error messages. What a hack. -} mkMethodOcc :: OccName -> OccName mkMethodOcc occ@(OccName VarName _) = occ mkMethodOcc occ = mk_simple_deriv varName "$m" occ {- ************************************************************************ * * \subsection{Tidying them up} * * ************************************************************************ Before we print chunks of code we like to rename it so that we don't have to print lots of silly uniques in it. But we mustn't accidentally introduce name clashes! So the idea is that we leave the OccName alone unless it accidentally clashes with one that is already in scope; if so, we tack on '1' at the end and try again, then '2', and so on till we find a unique one. There's a wrinkle for operators. Consider '>>='. We can't use '>>=1' because that isn't a single lexeme. So we encode it to 'lle' and *then* tack on the '1', if necessary. Note [TidyOccEnv] ~~~~~~~~~~~~~~~~~ type TidyOccEnv = UniqFM FastString Int * Domain = The OccName's FastString. These FastStrings are "taken"; make sure that we don't re-use * Int, n = A plausible starting point for new guesses There is no guarantee that "FSn" is available; you must look that up in the TidyOccEnv. But it's a good place to start looking. * When looking for a renaming for "foo2" we strip off the "2" and start with "foo". Otherwise if we tidy twice we get silly names like foo23. However, if it started with digits at the end, we always make a name with digits at the end, rather than shortening "foo2" to just "foo", even if "foo" is unused. Reasons: - Plain "foo" might be used later - We use trailing digits to subtly indicate a unification variable in typechecker error message; see TypeRep.tidyTyVarBndr We have to take care though! Consider a machine-generated module (#10370) module Foo where a1 = e1 a2 = e2 ... a2000 = e2000 Then "a1", "a2" etc are all marked taken. But now if we come across "a7" again, we have to do a linear search to find a free one, "a2001". That might just be acceptable once. But if we now come across "a8" again, we don't want to repeat that search. So we use the TidyOccEnv mapping for "a" (not "a7" or "a8") as our base for starting the search; and we make sure to update the starting point for "a" after we allocate a new one. Note [Tidying multiple names at once] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider > :t (id,id,id) Every id contributes a type variable to the type signature, and all of them are "a". If we tidy them one by one, we get (id,id,id) :: (a2 -> a2, a1 -> a1, a -> a) which is a bit unfortunate, as it unfairly renames only two of them. What we would like to see is (id,id,id) :: (a3 -> a3, a2 -> a2, a1 -> a1) To achieve this, the function avoidClashesOccEnv can be used to prepare the TidyEnv, by “blocking” every name that occurs twice in the map. This way, none of the "a"s will get the privilege of keeping this name, and all of them will get a suitable number by tidyOccName. Thus avoidNameClashesOccEnv ["a" :-> 7] ["b", "a", "c", "b", "a"] = ["a" :-> 7, "b" :-> 1] Here * "a" is already the TidyOccEnv, and so is unaffected * "b" occurs twice, so is blocked by adding "b" :-> 1 * "c" occurs only once, and so is not affected. This prepared TidyEnv can then be used with tidyOccName. See tidyTyCoVarBndrs for an example where this is used. This is #12382. -} type TidyOccEnv = UniqFM FastString Int -- The in-scope OccNames -- See Note [TidyOccEnv] emptyTidyOccEnv :: TidyOccEnv emptyTidyOccEnv = emptyUFM initTidyOccEnv :: [OccName] -> TidyOccEnv -- Initialise with names to avoid! initTidyOccEnv = foldl' add emptyUFM where add env (OccName _ fs) = addToUFM env fs 1 delTidyOccEnvList :: TidyOccEnv -> [OccName] -> TidyOccEnv delTidyOccEnvList env occs = env `delListFromUFM` map occNameFS occs -- see Note [Tidying multiple names at once] avoidClashesOccEnv :: TidyOccEnv -> [OccName] -> TidyOccEnv avoidClashesOccEnv env occs = go env emptyUFM occs where go env _ [] = env go env seenOnce ((OccName _ fs):occs) | fs `elemUFM` env = go env seenOnce occs | fs `elemUFM` seenOnce = go (addToUFM env fs 1) seenOnce occs | otherwise = go env (addToUFM seenOnce fs ()) occs tidyOccName :: TidyOccEnv -> OccName -> (TidyOccEnv, OccName) tidyOccName env occ@(OccName occ_sp fs) | not (fs `elemUFM` env) = -- Desired OccName is free, so use it, -- and record in 'env' that it's no longer available (addToUFM env fs 1, occ) | otherwise = case lookupUFM env base1 of Nothing -> (addToUFM env base1 2, OccName occ_sp base1) Just n -> find 1 n where base :: String -- Drop trailing digits (see Note [TidyOccEnv]) base = dropWhileEndLE isDigit (unpackFS fs) base1 = mkFastString (base ++ "1") find !k !n = case elemUFM new_fs env of True -> find (k+1 :: Int) (n+k) -- By using n+k, the n argument to find goes -- 1, add 1, add 2, add 3, etc which -- moves at quadratic speed through a dense patch False -> (new_env, OccName occ_sp new_fs) where new_fs = mkFastString (base ++ show n) new_env = addToUFM (addToUFM env new_fs 1) base1 (n+1) -- Update: base1, so that next time we'll start where we left off -- new_fs, so that we know it is taken -- If they are the same (n==1), the former wins -- See Note [TidyOccEnv] trimTidyOccEnv :: TidyOccEnv -> [OccName] -> TidyOccEnv -- Restrict the env to just the [OccName] trimTidyOccEnv env vs = foldl' add emptyUFM vs where add :: TidyOccEnv -> OccName -> TidyOccEnv add so_far (OccName _ fs) = case lookupUFM env fs of Just n -> addToUFM so_far fs n Nothing -> so_far {- ************************************************************************ * * Utilies for "main" * * ************************************************************************ -} mainOcc :: OccName mainOcc = mkVarOccFS (fsLit "main") ppMainFn :: OccName -> SDoc ppMainFn main_occ | main_occ == mainOcc = text "IO action" <+> quotes (ppr main_occ) | otherwise = text "main IO action" <+> quotes (ppr main_occ) {- ************************************************************************ * * Binary instance Here rather than in GHC.Iface.Binary because OccName is abstract * * ************************************************************************ -} instance Binary NameSpace where put_ bh VarName = putByte bh 0 put_ bh DataName = putByte bh 1 put_ bh TvName = putByte bh 2 put_ bh TcClsName = putByte bh 3 put_ bh (FldName parent) = do putByte bh 4 put_ bh parent get bh = do h <- getByte bh case h of 0 -> return VarName 1 -> return DataName 2 -> return TvName 3 -> return TcClsName _ -> do parent <- get bh return $ FldName { fldParent = parent } instance Binary OccName where put_ bh (OccName aa ab) = do put_ bh aa put_ bh ab get bh = do aa <- get bh ab <- get bh return (OccName aa ab) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/Name/Occurrence.hs-boot0000644000000000000000000000034207346545000023256 0ustar0000000000000000module GHC.Types.Name.Occurrence where import GHC.Data.FastString ( FastString ) data OccName class HasOccName name where occName :: name -> OccName occNameFS :: OccName -> FastString mkVarOccFS :: FastString -> OccName ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/Name/Ppr.hs0000644000000000000000000002201007346545000020762 0ustar0000000000000000 module GHC.Types.Name.Ppr ( mkNamePprCtx , mkQualModule , mkQualPackage , pkgQual ) where import GHC.Prelude import GHC.Data.FastString import GHC.Unit import GHC.Unit.Env import GHC.Types.Name import GHC.Types.Name.Reader import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Utils.Misc import GHC.Builtin.Types.Prim ( fUNTyConName ) import GHC.Builtin.Types import Data.Maybe (isJust) {- Note [Printing original names] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Deciding how to print names is pretty tricky. We are given a name P:M.T, where P is the package name, M is the defining module, and T is the occurrence name, and we have to decide in which form to display the name given a GlobalRdrEnv describing the current scope. Ideally we want to display the name in the form in which it is in scope. However, the name might not be in scope at all, and that's where it gets tricky. Here are the cases: 1. T uniquely maps to P:M.T ---> "T" NameUnqual 2. There is an X for which X.T uniquely maps to P:M.T ---> "X.T" NameQual X 3. There is no binding for "M.T" ---> "M.T" NameNotInScope1 4. Otherwise ---> "P:M.T" NameNotInScope2 (3) and (4) apply when the entity P:M.T is not in the GlobalRdrEnv at all. In these cases we still want to refer to the name as "M.T", *but* "M.T" might mean something else in the current scope (e.g. if there's an "import X as M"), so to avoid confusion we avoid using "M.T" if there's already a binding for it. Instead we write P:M.T. There's one further subtlety: in case (3), what if there are two things around, P1:M.T and P2:M.T? Then we don't want to print both of them as M.T! However only one of the modules P1:M and P2:M can be exposed (say P2), so we use M.T for that, and P1:M.T for the other one. This is handled by the qual_mod component of NamePprCtx, inside the (ppr mod) of case (3), in Name.pprModulePrefix Note [Printing unit ids] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In the old days, original names were tied to PackageIds, which directly corresponded to the entities that users wrote in Cabal files, and were perfectly suitable for printing when we need to disambiguate packages. However, with instantiated units, the situation can be different: if the key is instantiated with some holes, we should try to give the user some more useful information. -} -- | Creates some functions that work out the best ways to format -- names for the user according to a set of heuristics. mkNamePprCtx :: Outputable info => PromotionTickContext -> UnitEnv -> GlobalRdrEnvX info -> NamePprCtx mkNamePprCtx ptc unit_env env = QueryQualify (mkQualName env) (mkQualModule unit_state home_unit) (mkQualPackage unit_state) (mkPromTick ptc env) where unit_state = ue_units unit_env home_unit = ue_homeUnit unit_env mkQualName :: Outputable info => GlobalRdrEnvX info -> QueryQualifyName mkQualName env = qual_name where qual_name mod occ | [gre] <- unqual_gres , right_name gre = NameUnqual -- If there's a unique entity that's in scope -- unqualified with 'occ' AND that entity is -- the right one, then we can use the unqualified name | [] <- unqual_gres , pretendNameIsInScopeForPpr , not (isDerivedOccName occ) = NameUnqual -- See Note [pretendNameIsInScopeForPpr] | [gre] <- qual_gres = NameQual (greQualModName gre) | null qual_gres = if null $ lookupGRE env $ LookupRdrName (mkRdrQual (moduleName mod) occ) SameNameSpace then NameNotInScope1 else NameNotInScope2 | otherwise = NameNotInScope1 -- Can happen if 'f' is bound twice in the module -- Eg f = True; g = 0; f = False where is_name :: Name -> Bool is_name name = assertPpr (isExternalName name) (ppr name) $ nameModule name == mod && nameOccName name == occ -- See Note [pretendNameIsInScopeForPpr] pretendNameIsInScopeForPpr :: Bool pretendNameIsInScopeForPpr = any is_name [ liftedTypeKindTyConName , constraintKindTyConName , heqTyConName , coercibleTyConName , eqTyConName , tYPETyConName , fUNTyConName, unrestrictedFunTyConName , oneDataConName , listTyConName , manyDataConName ] || isJust (isTupleTyOcc_maybe mod occ) || isJust (isSumTyOcc_maybe mod occ) right_name gre = greDefinitionModule gre == Just mod unqual_gres = lookupGRE env (LookupRdrName (mkRdrUnqual occ) SameNameSpace) qual_gres = filter right_name (lookupGRE env (LookupOccName occ SameNameSpace)) -- we can mention a module P:M without the P: qualifier iff -- "import M" would resolve unambiguously to P:M. (if P is the -- current package we can just assume it is unqualified). mkPromTick :: PromotionTickContext -> GlobalRdrEnvX info -> QueryPromotionTick mkPromTick ptc env | ptcPrintRedundantPromTicks ptc = alwaysPrintPromTick | otherwise = print_prom_tick where print_prom_tick (PromotedItemListSyntax (IsEmptyOrSingleton eos)) = -- Ticked: '[], '[x] -- Unticked: [x,y], [x,y,z], and so on ptcListTuplePuns ptc && eos print_prom_tick PromotedItemTupleSyntax = ptcListTuplePuns ptc print_prom_tick (PromotedItemDataCon occ) | isPunnedDataConName occ -- '[], '(,), ''(,,) = ptcListTuplePuns ptc | Just occ' <- promoteOccName occ , [] <- lookupGRE env (LookupRdrName (mkRdrUnqual occ') SameNameSpace) = -- Could not find a corresponding type name in the environment, -- so the data name is unambiguous. Promotion tick not needed. False | otherwise = True isPunnedDataConName :: OccName -> Bool isPunnedDataConName occ = isDataOcc occ && case unpackFS (occNameFS occ) of '[':_ -> True '(':_ -> True _ -> False {- Note [pretendNameIsInScopeForPpr] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ c.f. Note [pretendNameIsInScope] in GHC.Builtin.Names Normally, a name is printed unqualified if it's in scope and unambiguous: ghci> :t not not :: Bool -> Bool Out of scope names are qualified: ghci> import Prelude hiding (Bool) ghci> :t not not :: GHC.Types.Bool -> GHC.Types.Bool And so are ambiguous names: ghci> data Bool ghci> :t not not :: Prelude.Bool -> Prelude.Bool However, these rules alone would lead to excessive qualification: ghci> :k Functor Functor :: (GHC.Types.Type -> GHC.Types.Type) -> GHC.Types.Constraint Even if the user has not imported Data.Kind, we would rather print: Functor :: (Type -> Type) -> Constraint So we maintain a list of names for which we only require that they are unambiguous. It reduces the amount of qualification in GHCi output and error messages thus improving readability. One potential problem here is that external tooling that relies on parsing GHCi output (e.g. Emacs mode for Haskell) requires names to be properly qualified to make sense of the output (see #11208). So extend this list with care. Side note (int-index): This function is distinct from GHC.Bulitin.Names.pretendNameIsInScope (used when filtering out instances), and perhaps we could unify them by taking a union, but I have not looked into what that would entail. -} -- | Creates a function for formatting modules based on two heuristics: -- (1) if the module is the current module, don't qualify, and (2) if there -- is only one exposed package which exports this module, don't qualify. mkQualModule :: UnitState -> Maybe HomeUnit -> QueryQualifyModule mkQualModule unit_state mhome_unit mod | Just home_unit <- mhome_unit , isHomeModule home_unit mod = False | [(_, pkgconfig)] <- lookup, mkUnit pkgconfig == moduleUnit mod -- this says: we are given a module P:M, is there just one exposed package -- that exposes a module M, and is it package P? = False | otherwise = True where lookup = lookupModuleInAllUnits unit_state (moduleName mod) -- | Creates a function for formatting packages based on two heuristics: -- (1) don't qualify if the package in question is "main", and (2) only qualify -- with a unit id if the package ID would be ambiguous. mkQualPackage :: UnitState -> QueryQualifyPackage mkQualPackage pkgs uid | uid == mainUnit || uid == interactiveUnit -- Skip the lookup if it's main, since it won't be in the package -- database! = False | Just pkgid <- mb_pkgid , searchPackageId pkgs pkgid `lengthIs` 1 -- this says: we are given a package pkg-0.1@MMM, are there only one -- exposed packages whose package ID is pkg-0.1? = False | otherwise = True where mb_pkgid = fmap unitPackageId (lookupUnit pkgs uid) -- | A function which only qualifies package names if necessary; but -- qualifies all other identifiers. pkgQual :: UnitState -> NamePprCtx pkgQual pkgs = alwaysQualify { queryQualifyPackage = mkQualPackage pkgs } ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/Name/Reader.hs0000644000000000000000000023702307346545000021437 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 -} {-# LANGUAGE CPP #-} {-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE GADTs #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE PatternSynonyms #-} -- | -- #name_types# -- GHC uses several kinds of name internally: -- -- * 'GHC.Types.Name.Occurrence.OccName': see "GHC.Types.Name.Occurrence#name_types" -- -- * 'GHC.Types.Name.Reader.RdrName' is the type of names that come directly from the parser. They -- have not yet had their scoping and binding resolved by the renamer and can be -- thought of to a first approximation as an 'GHC.Types.Name.Occurrence.OccName' with an optional module -- qualifier -- -- * 'GHC.Types.Name.Name': see "GHC.Types.Name#name_types" -- -- * 'GHC.Types.Id.Id': see "GHC.Types.Id#name_types" -- -- * 'GHC.Types.Var.Var': see "GHC.Types.Var#name_types" module GHC.Types.Name.Reader ( -- * The main type RdrName(..), -- Constructors exported only to GHC.Iface.Binary -- ** Construction mkRdrUnqual, mkRdrQual, mkUnqual, mkVarUnqual, mkQual, mkOrig, nameRdrName, getRdrName, -- ** Destruction rdrNameOcc, rdrNameSpace, demoteRdrName, demoteRdrNameTv, promoteRdrName, isRdrDataCon, isRdrTyVar, isRdrTc, isQual, isQual_maybe, isUnqual, isOrig, isOrig_maybe, isExact, isExact_maybe, isSrcRdrName, -- * Local mapping of 'RdrName' to 'Name.Name' LocalRdrEnv, emptyLocalRdrEnv, extendLocalRdrEnv, extendLocalRdrEnvList, lookupLocalRdrEnv, lookupLocalRdrOcc, elemLocalRdrEnv, inLocalRdrEnvScope, localRdrEnvElts, minusLocalRdrEnv, minusLocalRdrEnvList, -- * Global mapping of 'RdrName' to 'GlobalRdrElt's GlobalRdrEnvX, GlobalRdrEnv, IfGlobalRdrEnv, emptyGlobalRdrEnv, mkGlobalRdrEnv, plusGlobalRdrEnv, extendGlobalRdrEnv, greOccName, pprGlobalRdrEnv, globalRdrEnvElts, globalRdrEnvLocal, -- ** Looking up 'GlobalRdrElt's FieldsOrSelectors(..), filterFieldGREs, allowGRE, LookupGRE(..), lookupGRE, WhichGREs(.., AllRelevantGREs, RelevantGREsFOS), greIsRelevant, LookupChild(..), lookupGRE_Name, lookupGRE_FieldLabel, getGRE_NameQualifier_maybes, transformGREs, pickGREs, pickGREsModExp, -- * GlobalRdrElts availFromGRE, greRdrNames, greSrcSpan, greQualModName, gresToAvailInfo, greDefinitionModule, greDefinitionSrcSpan, greFieldLabel_maybe, -- ** Global 'RdrName' mapping elements: 'GlobalRdrElt', 'Provenance', 'ImportSpec' GlobalRdrEltX(..), GlobalRdrElt, IfGlobalRdrElt, FieldGlobalRdrElt, greName, greNameSpace, greParent, greInfo, plusGRE, insertGRE, forceGlobalRdrEnv, hydrateGlobalRdrEnv, isLocalGRE, isImportedGRE, isRecFldGRE, fieldGREInfo, isDuplicateRecFldGRE, isNoFieldSelectorGRE, isFieldSelectorGRE, unQualOK, qualSpecOK, unQualSpecOK, pprNameProvenance, mkGRE, mkExactGRE, mkLocalGRE, mkLocalVanillaGRE, mkLocalTyConGRE, mkLocalConLikeGRE, mkLocalFieldGREs, gresToNameSet, -- ** Shadowing greClashesWith, shadowNames, -- ** Information attached to a 'GlobalRdrElt' ConLikeName(..), GREInfo(..), RecFieldInfo(..), plusGREInfo, recFieldConLike_maybe, recFieldInfo_maybe, fieldGRE_maybe, fieldGRELabel, -- ** Parent information Parent(..), greParent_maybe, mkParent, availParent, ImportSpec(..), ImpDeclSpec(..), ImpItemSpec(..), importSpecLoc, importSpecModule, isExplicitItem, bestImport, -- * Utils opIsAt ) where import GHC.Prelude import GHC.Data.Bag import GHC.Data.FastString import GHC.Data.Maybe import GHC.Types.Avail import GHC.Types.Basic import GHC.Types.GREInfo import GHC.Types.FieldLabel import GHC.Types.Name import GHC.Types.Name.Env ( NameEnv, nonDetNameEnvElts, emptyNameEnv, extendNameEnv_Acc ) import GHC.Types.Name.Set import GHC.Types.PkgQual import GHC.Types.SrcLoc as SrcLoc import GHC.Types.Unique import GHC.Types.Unique.FM import GHC.Types.Unique.Set import GHC.Builtin.Uniques ( isFldNSUnique ) import GHC.Unit.Module import GHC.Utils.Misc as Utils import GHC.Utils.Outputable import GHC.Utils.Panic import Control.DeepSeq import Control.Monad ( guard ) import Data.Data import Data.List ( sort ) import qualified Data.List.NonEmpty as NE import qualified Data.Map.Strict as Map import qualified Data.Semigroup as S import System.IO.Unsafe ( unsafePerformIO ) {- ************************************************************************ * * \subsection{The main data type} * * ************************************************************************ -} -- | Reader Name -- -- Do not use the data constructors of RdrName directly: prefer the family -- of functions that creates them, such as 'mkRdrUnqual' -- -- - Note: A Located RdrName will only have API Annotations if it is a -- compound one, -- e.g. -- -- > `bar` -- > ( ~ ) -- data RdrName = Unqual OccName -- ^ Unqualified name -- -- Used for ordinary, unqualified occurrences, e.g. @x@, @y@ or @Foo@. -- Create such a 'RdrName' with 'mkRdrUnqual' | Qual ModuleName OccName -- ^ Qualified name -- -- A qualified name written by the user in -- /source/ code. The module isn't necessarily -- the module where the thing is defined; -- just the one from which it is imported. -- Examples are @Bar.x@, @Bar.y@ or @Bar.Foo@. -- Create such a 'RdrName' with 'mkRdrQual' | Orig Module OccName -- ^ Original name -- -- An original name; the module is the /defining/ module. -- This is used when GHC generates code that will be fed -- into the renamer (e.g. from deriving clauses), but where -- we want to say \"Use Prelude.map dammit\". One of these -- can be created with 'mkOrig' | Exact Name -- ^ Exact name -- -- We know exactly the 'Name'. This is used: -- -- (1) When the parser parses built-in syntax like @[]@ -- and @(,)@, but wants a 'RdrName' from it -- -- (2) By Template Haskell, when TH has generated a unique name -- -- Such a 'RdrName' can be created by using 'getRdrName' on a 'Name' deriving Data {- ************************************************************************ * * \subsection{Simple functions} * * ************************************************************************ -} instance HasOccName RdrName where occName = rdrNameOcc rdrNameOcc :: RdrName -> OccName rdrNameOcc (Qual _ occ) = occ rdrNameOcc (Unqual occ) = occ rdrNameOcc (Orig _ occ) = occ rdrNameOcc (Exact name) = nameOccName name rdrNameSpace :: RdrName -> NameSpace rdrNameSpace = occNameSpace . rdrNameOcc -- demoteRdrName lowers the NameSpace of RdrName. -- See Note [Demotion] in GHC.Rename.Env demoteRdrName :: RdrName -> Maybe RdrName demoteRdrName (Unqual occ) = fmap Unqual (demoteOccName occ) demoteRdrName (Qual m occ) = fmap (Qual m) (demoteOccName occ) demoteRdrName (Orig _ _) = Nothing demoteRdrName (Exact _) = Nothing demoteRdrNameTv :: RdrName -> Maybe RdrName demoteRdrNameTv (Unqual occ) = fmap Unqual (demoteOccTvName occ) demoteRdrNameTv (Qual m occ) = fmap (Qual m) (demoteOccTvName occ) demoteRdrNameTv (Orig _ _) = Nothing demoteRdrNameTv (Exact _) = Nothing -- promoteRdrName promotes the NameSpace of RdrName. -- See Note [Promotion] in GHC.Rename.Env. promoteRdrName :: RdrName -> Maybe RdrName promoteRdrName (Unqual occ) = fmap Unqual (promoteOccName occ) promoteRdrName (Qual m occ) = fmap (Qual m) (promoteOccName occ) promoteRdrName (Orig _ _) = Nothing promoteRdrName (Exact _) = Nothing -- These two are the basic constructors mkRdrUnqual :: OccName -> RdrName mkRdrUnqual occ = Unqual occ mkRdrQual :: ModuleName -> OccName -> RdrName mkRdrQual mod occ = Qual mod occ mkOrig :: Module -> OccName -> RdrName mkOrig mod occ = Orig mod occ --------------- -- These two are used when parsing source files -- They do encode the module and occurrence names mkUnqual :: NameSpace -> FastString -> RdrName mkUnqual sp n = Unqual (mkOccNameFS sp n) mkVarUnqual :: FastString -> RdrName mkVarUnqual n = Unqual (mkVarOccFS n) -- | Make a qualified 'RdrName' in the given namespace and where the 'ModuleName' and -- the 'OccName' are taken from the first and second elements of the tuple respectively mkQual :: NameSpace -> (FastString, FastString) -> RdrName mkQual sp (m, n) = Qual (mkModuleNameFS m) (mkOccNameFS sp n) getRdrName :: NamedThing thing => thing -> RdrName getRdrName name = nameRdrName (getName name) nameRdrName :: Name -> RdrName nameRdrName name = Exact name -- Keep the Name even for Internal names, so that the -- unique is still there for debug printing, particularly -- of Types (which are converted to IfaceTypes before printing) nukeExact :: Name -> RdrName nukeExact n | isExternalName n = Orig (nameModule n) (nameOccName n) | otherwise = Unqual (nameOccName n) isRdrDataCon :: RdrName -> Bool isRdrTyVar :: RdrName -> Bool isRdrTc :: RdrName -> Bool isRdrDataCon rn = isDataOcc (rdrNameOcc rn) isRdrTyVar rn = isTvOcc (rdrNameOcc rn) isRdrTc rn = isTcOcc (rdrNameOcc rn) isSrcRdrName :: RdrName -> Bool isSrcRdrName (Unqual _) = True isSrcRdrName (Qual _ _) = True isSrcRdrName _ = False isUnqual :: RdrName -> Bool isUnqual (Unqual _) = True isUnqual _ = False isQual :: RdrName -> Bool isQual (Qual _ _) = True isQual _ = False isQual_maybe :: RdrName -> Maybe (ModuleName, OccName) isQual_maybe (Qual m n) = Just (m,n) isQual_maybe _ = Nothing isOrig :: RdrName -> Bool isOrig (Orig _ _) = True isOrig _ = False isOrig_maybe :: RdrName -> Maybe (Module, OccName) isOrig_maybe (Orig m n) = Just (m,n) isOrig_maybe _ = Nothing isExact :: RdrName -> Bool isExact (Exact _) = True isExact _ = False isExact_maybe :: RdrName -> Maybe Name isExact_maybe (Exact n) = Just n isExact_maybe _ = Nothing {- ************************************************************************ * * \subsection{Instances} * * ************************************************************************ -} instance Outputable RdrName where ppr (Exact name) = ppr name ppr (Unqual occ) = ppr occ ppr (Qual mod occ) = ppr mod <> dot <> ppr occ ppr (Orig mod occ) = getPprStyle (\sty -> pprModulePrefix sty mod occ <> ppr occ) instance OutputableBndr RdrName where pprBndr _ n | isTvOcc (rdrNameOcc n) = char '@' <> ppr n | otherwise = ppr n pprInfixOcc rdr = pprInfixVar (isSymOcc (rdrNameOcc rdr)) (ppr rdr) pprPrefixOcc rdr | Just name <- isExact_maybe rdr = pprPrefixName name -- pprPrefixName has some special cases, so -- we delegate to them rather than reproduce them | otherwise = pprPrefixVar (isSymOcc (rdrNameOcc rdr)) (ppr rdr) instance Eq RdrName where (Exact n1) == (Exact n2) = n1==n2 -- Convert exact to orig (Exact n1) == r2@(Orig _ _) = nukeExact n1 == r2 r1@(Orig _ _) == (Exact n2) = r1 == nukeExact n2 (Orig m1 o1) == (Orig m2 o2) = m1==m2 && o1==o2 (Qual m1 o1) == (Qual m2 o2) = m1==m2 && o1==o2 (Unqual o1) == (Unqual o2) = o1==o2 _ == _ = False instance Ord RdrName where a <= b = case (a `compare` b) of { LT -> True; EQ -> True; GT -> False } a < b = case (a `compare` b) of { LT -> True; EQ -> False; GT -> False } a >= b = case (a `compare` b) of { LT -> False; EQ -> True; GT -> True } a > b = case (a `compare` b) of { LT -> False; EQ -> False; GT -> True } -- Exact < Unqual < Qual < Orig -- [Note: Apr 2004] We used to use nukeExact to convert Exact to Orig -- before comparing so that Prelude.map == the exact Prelude.map, but -- that meant that we reported duplicates when renaming bindings -- generated by Template Haskell; e.g -- do { n1 <- newName "foo"; n2 <- newName "foo"; -- } -- I think we can do without this conversion compare (Exact n1) (Exact n2) = n1 `compare` n2 compare (Exact _) _ = LT compare (Unqual _) (Exact _) = GT compare (Unqual o1) (Unqual o2) = o1 `compare` o2 compare (Unqual _) _ = LT compare (Qual _ _) (Exact _) = GT compare (Qual _ _) (Unqual _) = GT compare (Qual m1 o1) (Qual m2 o2) = compare o1 o2 S.<> compare m1 m2 compare (Qual _ _) (Orig _ _) = LT compare (Orig m1 o1) (Orig m2 o2) = compare o1 o2 S.<> compare m1 m2 compare (Orig _ _) _ = GT {- ************************************************************************ * * LocalRdrEnv * * ************************************************************************ -} {- Note [LocalRdrEnv] ~~~~~~~~~~~~~~~~~~~~~ The LocalRdrEnv is used to store local bindings (let, where, lambda, case). * It is keyed by OccName, because we never use it for qualified names. * It maps the OccName to a Name. That Name is almost always an Internal Name, but (hackily) it can be External too for top-level pattern bindings. See Note [bindLocalNames for an External name] in GHC.Rename.Pat * We keep the current mapping (lre_env), *and* the set of all Names in scope (lre_in_scope). Reason: see Note [Splicing Exact names] in GHC.Rename.Env. -} -- | Local Reader Environment -- See Note [LocalRdrEnv] data LocalRdrEnv = LRE { lre_env :: OccEnv Name , lre_in_scope :: NameSet } instance Outputable LocalRdrEnv where ppr (LRE {lre_env = env, lre_in_scope = ns}) = hang (text "LocalRdrEnv {") 2 (vcat [ text "env =" <+> pprOccEnv ppr_elt env , text "in_scope =" <+> pprUFM (getUniqSet ns) (braces . pprWithCommas ppr) ] <+> char '}') where ppr_elt name = parens (ppr (nameOccName name)) <+> ppr name -- So we can see if the keys line up correctly emptyLocalRdrEnv :: LocalRdrEnv emptyLocalRdrEnv = LRE { lre_env = emptyOccEnv , lre_in_scope = emptyNameSet } extendLocalRdrEnv :: LocalRdrEnv -> Name -> LocalRdrEnv -- See Note [LocalRdrEnv] extendLocalRdrEnv lre@(LRE { lre_env = env, lre_in_scope = ns }) name = lre { lre_env = extendOccEnv env (nameOccName name) name , lre_in_scope = extendNameSet ns name } extendLocalRdrEnvList :: LocalRdrEnv -> [Name] -> LocalRdrEnv -- See Note [LocalRdrEnv] extendLocalRdrEnvList lre@(LRE { lre_env = env, lre_in_scope = ns }) names = lre { lre_env = extendOccEnvList env [(nameOccName n, n) | n <- names] , lre_in_scope = extendNameSetList ns names } lookupLocalRdrEnv :: LocalRdrEnv -> RdrName -> Maybe Name lookupLocalRdrEnv (LRE { lre_env = env, lre_in_scope = ns }) rdr | Unqual occ <- rdr = lookupOccEnv env occ -- See Note [Local bindings with Exact Names] | Exact name <- rdr , name `elemNameSet` ns = Just name | otherwise = Nothing lookupLocalRdrOcc :: LocalRdrEnv -> OccName -> Maybe Name lookupLocalRdrOcc (LRE { lre_env = env }) occ = lookupOccEnv env occ elemLocalRdrEnv :: RdrName -> LocalRdrEnv -> Bool elemLocalRdrEnv rdr_name (LRE { lre_env = env, lre_in_scope = ns }) = case rdr_name of Unqual occ -> occ `elemOccEnv` env Exact name -> name `elemNameSet` ns -- See Note [Local bindings with Exact Names] Qual {} -> False Orig {} -> False localRdrEnvElts :: LocalRdrEnv -> [Name] localRdrEnvElts (LRE { lre_env = env }) = nonDetOccEnvElts env inLocalRdrEnvScope :: Name -> LocalRdrEnv -> Bool -- This is the point of the NameSet inLocalRdrEnvScope name (LRE { lre_in_scope = ns }) = name `elemNameSet` ns minusLocalRdrEnv :: LocalRdrEnv -> OccEnv a -> LocalRdrEnv minusLocalRdrEnv lre@(LRE { lre_env = env }) occs = lre { lre_env = minusOccEnv env occs } minusLocalRdrEnvList :: LocalRdrEnv -> [OccName] -> LocalRdrEnv minusLocalRdrEnvList lre@(LRE { lre_env = env }) occs = lre { lre_env = delListFromOccEnv env occs } {- Note [Local bindings with Exact Names] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ With Template Haskell we can make local bindings that have Exact Names. Computing shadowing etc may use elemLocalRdrEnv (at least it certainly does so in GHC.Rename.HsType.bindHsQTyVars), so for an Exact Name we must consult the in-scope-name-set. ************************************************************************ * * GlobalRdrEnv * * ************************************************************************ -} -- | Global Reader Environment type GlobalRdrEnv = GlobalRdrEnvX GREInfo -- ^ Keyed by 'OccName'; when looking up a qualified name -- we look up the 'OccName' part, and then check the 'Provenance' -- to see if the appropriate qualification is valid. This -- saves routinely doubling the size of the env by adding both -- qualified and unqualified names to the domain. -- -- The list in the codomain is required because there may be name clashes -- These only get reported on lookup, not on construction -- -- INVARIANT 1: All the members of the list have distinct -- 'gre_name' fields; that is, no duplicate Names -- -- INVARIANT 2: Imported provenance => Name is an ExternalName -- However LocalDefs can have an InternalName. This -- happens only when type-checking a [d| ... |] Template -- Haskell quotation; see this note in GHC.Rename.Names -- Note [Top-level Names in Template Haskell decl quotes] -- -- INVARIANT 3: If the GlobalRdrEnv maps [occ -> gre], then -- greOccName gre = occ -- | A 'GlobalRdrEnv' in which the 'GlobalRdrElt's don't have any 'GREInfo' -- attached to them. This is useful to avoid space leaks, see Note [IfGlobalRdrEnv]. type IfGlobalRdrEnv = GlobalRdrEnvX () -- | Parametrises 'GlobalRdrEnv' over the presence or absence of 'GREInfo'. -- -- See Note [IfGlobalRdrEnv]. type GlobalRdrEnvX info = OccEnv [GlobalRdrEltX info] -- | Global Reader Element -- -- Something in scope in the renamer; usually a member of the 'GlobalRdrEnv'. -- See Note [GlobalRdrElt provenance]. type GlobalRdrElt = GlobalRdrEltX GREInfo -- | A 'GlobalRdrElt' in which we stripped out the 'GREInfo' field, -- in order to avoid space leaks. -- -- See Note [IfGlobalRdrEnv]. type IfGlobalRdrElt = GlobalRdrEltX () -- | Global Reader Element -- -- Something in scope in the renamer; usually a member of the 'GlobalRdrEnv'. -- See Note [GlobalRdrElt provenance]. -- -- Why do we parametrise over the 'gre_info' field? See Note [IfGlobalRdrEnv]. data GlobalRdrEltX info = GRE { gre_name :: !Name , gre_par :: !Parent -- ^ See Note [Parents] , gre_lcl :: !Bool -- ^ True <=> the thing was defined locally , gre_imp :: !(Bag ImportSpec) -- ^ In scope through these imports -- See Note [GlobalRdrElt provenance] for the relation between gre_lcl and gre_imp. , gre_info :: info -- ^ Information the renamer knows about this particular 'Name'. -- -- Careful about forcing this field! Forcing it can trigger -- the loading of interface files. -- -- Note [Retrieving the GREInfo from interfaces] in GHC.Types.GREInfo. } deriving (Data) instance NFData a => NFData (GlobalRdrEltX a) where rnf (GRE name par _ imp info) = rnf name `seq` rnf par `seq` rnf imp `seq` rnf info {- Note [IfGlobalRdrEnv] ~~~~~~~~~~~~~~~~~~~~~~~~ Information pertinent to the renamer about a 'Name' is stored in the fields of 'GlobalRdrElt'. The 'gre_info' field, described in Note [GREInfo] in GHC.Types.GREInfo, is a bit special: as Note [Retrieving the GREInfo from interfaces] in GHC.Types.GREInfo describes, for imported 'Name's it is usually obtained by a look up in a type environment, and forcing can cause the interface file for the module defining the 'Name' to be loaded. As described in Note [Forcing GREInfo] in GHC.Types.GREInfo, keeping it a thunk can cause space leaks, while forcing it can cause extra work to be done. So it's best to discard it when we don't need it, for example when we are about to store it in a 'ModIface'. We thus parametrise 'GlobalRdrElt' (and 'GlobalRdrEnv') over the presence or absence of the 'GREInfo' field. - When we are about to stash the 'GlobalRdrElt' in a long-lived data structure, e.g. a 'ModIface', we force it by setting all the 'GREInfo' fields to '()'. See 'forceGlobalRdrEnv'. - To go back the other way, we use 'hydrateGlobalRdrEnv', which sets the 'gre_info' fields back to lazy lookups. This parametrisation also helps ensure that we don't accidentally force the GREInfo field (which can cause unnecessary loading of interface files). In particular, the 'lookupGRE' function is statically guaranteed to not consult the 'GREInfo' field when using 'SameNameSpace', which is important as we sometimes need to use this function with an 'IfaceGlobalRdrEnv' in which the 'GREInfo' fields have been stripped. -} -- | A 'FieldGlobalRdrElt' is a 'GlobalRdrElt' -- in which the 'gre_info' field is 'IAmRecField'. type FieldGlobalRdrElt = GlobalRdrElt greName :: GlobalRdrEltX info -> Name greName = gre_name greNameSpace :: GlobalRdrEltX info -> NameSpace greNameSpace = nameNameSpace . greName greParent :: GlobalRdrEltX info -> Parent greParent = gre_par greInfo :: GlobalRdrElt -> GREInfo greInfo = gre_info -- | See Note [Parents] data Parent = NoParent | ParentIs { par_is :: !Name } deriving (Eq, Data) instance Outputable Parent where ppr NoParent = empty ppr (ParentIs n) = text "parent:" <> ppr n instance NFData Parent where rnf NoParent = () rnf (ParentIs n) = rnf n plusParent :: Parent -> Parent -> Parent -- See Note [Combining parents] plusParent p1@(ParentIs _) p2 = hasParent p1 p2 plusParent p1 p2@(ParentIs _) = hasParent p2 p1 plusParent NoParent NoParent = NoParent hasParent :: Parent -> Parent -> Parent #if defined(DEBUG) hasParent p NoParent = p hasParent p p' | p /= p' = pprPanic "hasParent" (ppr p <+> ppr p') -- Parents should agree #endif hasParent p _ = p {- Note [GlobalRdrElt provenance] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The gre_lcl and gre_imp fields of a GlobalRdrElt describe its "provenance", i.e. how the Name came to be in scope. It can be in scope in one of the following three ways: A. The Name was locally bound, in the current module. gre_lcl = True The renamer adds this Name to the GlobalRdrEnv after renaming the binding. See the calls to "extendGlobalRdrEnvRn" in GHC.Rename.Module.rnSrcDecls. B. The Name was imported gre_imp = Just imps <=> brought into scope by the imports "imps" The renamer adds this Name to the GlobalRdrEnv after processing the imports. See GHC.Rename.Names.filterImports and GHC.Tc.Module.tcRnImports. C. We followed an exact reference (i.e. an Exact or Orig RdrName) gre_lcl = False, gre_imp = Nothing In this case, we directly fetch a Name and its GREInfo from direct reference. We don't add it to the GlobalRdrEnv. See "GHC.Rename.Env.lookupExactOrOrig". It is just about possible to have *both* gre_lcl = True and gre_imp = Just imps. This can happen with module loops: a Name is defined locally in A, and also brought into scope by importing a module that SOURCE-imported A. Example (#7672): A.hs-boot module A where data T B.hs module B(Decl.T) where import {-# SOURCE #-} qualified A as Decl A.hs module A where import qualified B data T = Z | S B.T In A.hs, 'T' is locally bound, *and* imported as B.T. Note [Parents] ~~~~~~~~~~~~~~~~~ The children of a Name are the things that are abbreviated by the ".." notation in export lists. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Parent Children ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ data T Data constructors Record-field ids data family T Data constructors and record-field ids of all visible data instances of T class C Class operations Associated type constructors ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Constructor Meaning ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ NoParent Not bundled with a type constructor. ParentIs n Bundled with the type constructor corresponding to n. Pattern synonym constructors (and their record fields, if any) are unusual: their gre_par is NoParent in the module in which they are defined. However, a pattern synonym can be bundled with a type constructor on export, in which case whenever the pattern synonym is imported the gre_par will be ParentIs. Thus the gre_name and gre_par fields are independent, because a normal datatype introduces FieldGlobalRdrElts using ParentIs, but a record pattern synonym can introduce FieldGlobalRdrElts that use NoParent. (In the past we represented fields using an additional constructor of the Parent type, which could not adequately represent this situation.) See also Note [Representing pattern synonym fields in AvailInfo] in GHC.Types.Avail. Note [Combining parents] ~~~~~~~~~~~~~~~~~~~~~~~~ With an associated type we might have module M where class C a where data T a op :: T a -> a instance C Int where data T Int = TInt instance C Bool where data T Bool = TBool Then: C is the parent of T T is the parent of TInt and TBool So: in an export list C(..) is short for C( op, T ) T(..) is short for T( TInt, TBool ) Module M exports everything, so its exports will be AvailTC C [C,T,op] AvailTC T [T,TInt,TBool] On import we convert to GlobalRdrElt and then combine those. For T that will mean we have one GRE with Parent C one GRE with NoParent That's why plusParent picks the "best" case. -} mkGRE :: (Name -> Maybe ImportSpec) -> GREInfo -> Parent -> Name -> GlobalRdrElt mkGRE prov_fn info par n = case prov_fn n of -- Nothing => bound locally -- Just is => imported from 'is' Nothing -> GRE { gre_name = n, gre_par = par , gre_lcl = True, gre_imp = emptyBag , gre_info = info } Just is -> GRE { gre_name = n, gre_par = par , gre_lcl = False, gre_imp = unitBag is , gre_info = info } mkExactGRE :: Name -> GREInfo -> GlobalRdrElt mkExactGRE nm info = GRE { gre_name = nm, gre_par = NoParent , gre_lcl = False, gre_imp = emptyBag , gre_info = info } mkLocalGRE :: GREInfo -> Parent -> Name -> GlobalRdrElt mkLocalGRE = mkGRE (const Nothing) mkLocalVanillaGRE :: Parent -> Name -> GlobalRdrElt mkLocalVanillaGRE = mkLocalGRE Vanilla -- | Create a local 'GlobalRdrElt' for a 'TyCon'. mkLocalTyConGRE :: TyConFlavour Name -> Name -> GlobalRdrElt mkLocalTyConGRE flav nm = mkLocalGRE (IAmTyCon flav) par nm where par = case tyConFlavourAssoc_maybe flav of Nothing -> NoParent Just p -> ParentIs p mkLocalConLikeGRE :: Parent -> (ConLikeName, ConInfo) -> GlobalRdrElt mkLocalConLikeGRE p (con_nm, con_info) = mkLocalGRE (IAmConLike con_info) p (conLikeName_Name con_nm ) mkLocalFieldGREs :: Parent -> [(ConLikeName, ConInfo)] -> [GlobalRdrElt] mkLocalFieldGREs p cons = [ mkLocalGRE (IAmRecField fld_info) p fld_nm | (S.Arg fld_nm fl, fl_cons) <- flds , let fld_info = RecFieldInfo { recFieldLabel = fl , recFieldCons = fl_cons } ] where -- We are given a map taking a constructor to its fields, but we want -- a map taking a field to the constructors which have it. -- We thus need to convert [(Con, [Field])] into [(Field, [Con])]. flds = Map.toList $ Map.fromListWith unionUniqSets [ (S.Arg (flSelector fl) fl, unitUniqSet con) | (con, con_info) <- cons , ConInfo _ (ConHasRecordFields fls) <- [con_info] , fl <- NE.toList fls ] instance HasOccName (GlobalRdrEltX info) where occName = greOccName greOccName :: GlobalRdrEltX info -> OccName greOccName ( GRE { gre_name = nm } ) = nameOccName nm -- | The SrcSpan of the name pointed to by the GRE. greDefinitionSrcSpan :: GlobalRdrEltX info -> SrcSpan greDefinitionSrcSpan = nameSrcSpan . greName -- | The module in which the name pointed to by the GRE is defined. greDefinitionModule :: GlobalRdrEltX info -> Maybe Module greDefinitionModule = nameModule_maybe . greName greQualModName :: Outputable info => GlobalRdrEltX info -> ModuleName -- Get a suitable module qualifier for the GRE -- (used in mkPrintUnqualified) -- Precondition: the gre_name is always External greQualModName gre@(GRE { gre_lcl = lcl, gre_imp = iss }) | lcl, Just mod <- greDefinitionModule gre = moduleName mod | Just is <- headMaybe iss = is_as (is_decl is) | otherwise = pprPanic "greQualModName" (ppr gre) greRdrNames :: GlobalRdrEltX info -> [RdrName] greRdrNames gre@GRE{ gre_lcl = lcl, gre_imp = iss } = bagToList $ (if lcl then unitBag unqual else emptyBag) `unionBags` concatMapBag do_spec (mapBag is_decl iss) where occ = greOccName gre unqual = Unqual occ do_spec decl_spec | is_qual decl_spec = unitBag qual | otherwise = listToBag [unqual,qual] where qual = Qual (is_as decl_spec) occ -- the SrcSpan that pprNameProvenance prints out depends on whether -- the Name is defined locally or not: for a local definition the -- definition site is used, otherwise the location of the import -- declaration. We want to sort the export locations in -- exportClashErr by this SrcSpan, we need to extract it: greSrcSpan :: Outputable info => GlobalRdrEltX info -> SrcSpan greSrcSpan gre@(GRE { gre_lcl = lcl, gre_imp = iss } ) | lcl = greDefinitionSrcSpan gre | Just is <- headMaybe iss = is_dloc (is_decl is) | otherwise = pprPanic "greSrcSpan" (ppr gre) mkParent :: Name -> AvailInfo -> Parent mkParent _ (Avail _) = NoParent mkParent n (AvailTC m _) | n == m = NoParent | otherwise = ParentIs m availParent :: AvailInfo -> Parent availParent (AvailTC m _) = ParentIs m availParent (Avail {}) = NoParent greParent_maybe :: GlobalRdrEltX info -> Maybe Name greParent_maybe gre = case gre_par gre of NoParent -> Nothing ParentIs n -> Just n gresToNameSet :: [GlobalRdrEltX info] -> NameSet gresToNameSet gres = foldr add emptyNameSet gres where add gre set = extendNameSet set (greName gre) -- | Takes a list of distinct GREs and folds them -- into AvailInfos. This is more efficient than mapping each individual -- GRE to an AvailInfo and then folding using `plusAvail`, but needs the -- uniqueness assumption. gresToAvailInfo :: forall info. [GlobalRdrEltX info] -> [AvailInfo] gresToAvailInfo gres = nonDetNameEnvElts avail_env where avail_env :: NameEnv AvailInfo -- Keyed by the parent (avail_env, _) = foldl' add (emptyNameEnv, emptyNameSet) gres add :: (NameEnv AvailInfo, NameSet) -> GlobalRdrEltX info -> (NameEnv AvailInfo, NameSet) add (env, done) gre | name `elemNameSet` done = (env, done) -- Don't insert twice into the AvailInfo | otherwise = ( extendNameEnv_Acc comb availFromGRE env key gre , done `extendNameSet` name ) where name = greName gre key = case greParent_maybe gre of Just parent -> parent Nothing -> greName gre -- We want to insert the child `k` into a list of children but -- need to maintain the invariant that the parent is first. -- -- We also use the invariant that `k` is not already in `ns`. insertChildIntoChildren :: Name -> [Name] -> Name -> [Name] insertChildIntoChildren _ [] k = [k] insertChildIntoChildren p (n:ns) k | p == k = k:n:ns | otherwise = n:k:ns comb :: GlobalRdrEltX info -> AvailInfo -> AvailInfo comb _ (Avail n) = Avail n -- Duplicated name, should not happen comb gre (AvailTC m ns) = case gre_par gre of NoParent -> AvailTC m (greName gre:ns) -- Not sure this ever happens ParentIs {} -> AvailTC m (insertChildIntoChildren m ns (greName gre)) availFromGRE :: GlobalRdrEltX info -> AvailInfo availFromGRE (GRE { gre_name = child, gre_par = parent }) = case parent of ParentIs p -> AvailTC p [child] NoParent | isTyConName child -- NB: don't force the GREInfo field unnecessarily. -> AvailTC child [child] | otherwise -> Avail child emptyGlobalRdrEnv :: GlobalRdrEnvX info emptyGlobalRdrEnv = emptyOccEnv globalRdrEnvElts :: GlobalRdrEnvX info -> [GlobalRdrEltX info] globalRdrEnvElts env = nonDetFoldOccEnv (++) [] env globalRdrEnvLocal :: GlobalRdrEnvX info -> GlobalRdrEnvX info globalRdrEnvLocal = mapOccEnv (filter isLocalGRE) -- | Drop all 'GREInfo' fields in a 'GlobalRdrEnv' in order to -- avoid space leaks. -- See Note [Forcing GREInfo] in GHC.Types.GREInfo. forceGlobalRdrEnv :: GlobalRdrEnvX info -> IfGlobalRdrEnv forceGlobalRdrEnv rdrs = strictMapOccEnv (strictMap (\ gre -> gre { gre_info = ()})) rdrs -- | Hydrate a previously dehydrated 'GlobalRdrEnv', -- by (lazily!) looking up the 'GREInfo' using the provided function. -- -- See Note [Forcing GREInfo] in GHC.Types.GREInfo. hydrateGlobalRdrEnv :: forall info noInfo . (Name -> IO info) -> GlobalRdrEnvX noInfo -> GlobalRdrEnvX info hydrateGlobalRdrEnv f = mapOccEnv (fmap g) where g gre = gre { gre_info = unsafePerformIO $ f (greName gre) } -- NB: use unsafePerformIO to delay the lookup until it is forced. -- See also 'GHC.Rename.Env.lookupGREInfo'. instance Outputable info => Outputable (GlobalRdrEltX info) where ppr gre = hang (ppr (greName gre) <+> ppr (gre_par gre) <+> ppr (gre_info gre)) 2 (pprNameProvenance gre) pprGlobalRdrEnv :: Bool -> GlobalRdrEnv -> SDoc pprGlobalRdrEnv locals_only env = vcat [ text "GlobalRdrEnv" <+> ppWhen locals_only (text "(locals only)") <+> lbrace , nest 2 (vcat [ pp (remove_locals gre_list) | gre_list <- nonDetOccEnvElts env ] <+> rbrace) ] where remove_locals gres | locals_only = filter isLocalGRE gres | otherwise = gres pp [] = empty pp gres@(gre:_) = hang (ppr occ <> colon) 2 (vcat (map ppr gres)) where occ = nameOccName (greName gre) {- Note [NoFieldSelectors] ~~~~~~~~~~~~~~~~~~~~~~~ The NoFieldSelectors extension allows record fields to be defined without bringing the corresponding selector functions into scope. However, such fields may still be used in contexts such as record construction, pattern matching or update. This requires us to distinguish contexts in which selectors are required from those in which any field may be used. For example: {-# LANGUAGE NoFieldSelectors #-} module M (T(foo), foo) where -- T(foo) refers to the field, -- unadorned foo to the value binding data T = MkT { foo :: Int } foo = () bar = foo -- refers to the value binding, field ignored module N where import M (T(..)) baz = MkT { foo = 3 } -- refers to the field oops = foo -- an error: the field is in scope but the value binding is not Each 'FieldLabel' indicates (in the 'flHasFieldSelector' field) whether the FieldSelectors extension was enabled in the defining module. This allows them to be filtered out by 'filterFieldGREs'. Even when NoFieldSelectors is in use, we still generate selector functions internally. For example, the expression getField @"foo" t or (with dot-notation) t.foo extracts the `foo` field of t::T, and hence needs the selector function (see Note [HasField instances] in GHC.Tc.Instance.Class). In many of the name lookup functions in this module we pass a FieldsOrSelectors value, indicating what we are looking for: * WantNormal: fields are in scope only if they have an accompanying selector function, e.g. we are looking up a variable in an expression (lookupExprOccRn). * WantBoth: any name or field will do, regardless of whether the selector function is available, e.g. record updates (lookupRecUpdFields) with NoDisambiguateRecordFields. * WantField: any field will do, regardless of whether the selector function is available, but ignoring any non-field names, e.g. record updates (lookupRecUpdFields with DisambiguateRecordFields. ----------------------------------------------------------------------------------- Context FieldsOrSelectors ----------------------------------------------------------------------------------- Record construction/pattern match WantField, but unless DisambiguateRecordFields e.g. MkT { foo = 3 } is in effect, also look up using WantBoth Record update, e.g. e { foo = 3 } to report when a non-field clashes with a field. :info in GHCi WantBoth Variable occurrence in expression WantNormal Type variable, data constructor Pretty much everything else ----------------------------------------------------------------------------------- -} fieldGRE_maybe :: GlobalRdrElt -> Maybe FieldGlobalRdrElt fieldGRE_maybe gre = do guard (isRecFldGRE gre) return gre fieldGRELabel :: HasDebugCallStack => FieldGlobalRdrElt -> FieldLabel fieldGRELabel = recFieldLabel . fieldGREInfo fieldGREInfo :: HasDebugCallStack => FieldGlobalRdrElt -> RecFieldInfo fieldGREInfo gre = assertPpr (isRecFldGRE gre) (ppr gre) $ case greInfo gre of IAmRecField info -> info info -> pprPanic "fieldGREInfo" $ vcat [ text "gre_name:" <+> ppr (greName gre) , text "info:" <+> ppr info ] recFieldConLike_maybe :: HasDebugCallStack => GlobalRdrElt -> Maybe ConInfo recFieldConLike_maybe gre = case greInfo gre of IAmConLike info -> Just info _ -> Nothing recFieldInfo_maybe :: HasDebugCallStack => GlobalRdrElt -> Maybe RecFieldInfo recFieldInfo_maybe gre = case greInfo gre of IAmRecField info -> assertPpr (isRecFldGRE gre) (ppr gre) $ Just info _ -> Nothing -- | When looking up GREs, we may or may not want to include fields that were -- defined in modules with @NoFieldSelectors@ enabled. See Note -- [NoFieldSelectors]. data FieldsOrSelectors = WantNormal -- ^ Include normal names, and fields with selectors, but -- ignore fields without selectors. | WantBoth -- ^ Include normal names and all fields (regardless of whether -- they have selectors). | WantField -- ^ Include only fields, with or without selectors, ignoring -- any non-fields in scope. deriving (Eq, Show) filterFieldGREs :: FieldsOrSelectors -> [GlobalRdrElt] -> [GlobalRdrElt] filterFieldGREs WantBoth = id filterFieldGREs fos = filter (allowGRE fos) allowGRE :: FieldsOrSelectors -> GlobalRdrElt -> Bool allowGRE WantBoth _ = True allowGRE WantNormal gre -- NB: we only need to consult the GREInfo for record field GREs, -- to check whether they define field selectors. -- By checking 'isRecFldGRE' first, which only consults the NameSpace, -- we avoid forcing the GREInfo for things that aren't record fields. | isRecFldGRE gre = flHasFieldSelector (fieldGRELabel gre) == FieldSelectors | otherwise = True allowGRE WantField gre = isRecFldGRE gre -- | What should we look up in a 'GlobalRdrEnv'? Should we only look up -- names with the exact same 'OccName', or do we allow different 'NameSpace's? -- -- Depending on the answer, we might need more or less information from the -- 'GlobalRdrEnv', e.g. if we want to include matching record fields we need -- to know if the corresponding record fields define field selectors, for which -- we need to consult the 'GREInfo'. This is why this datatype is a GADT. -- -- See Note [IfGlobalRdrEnv]. data LookupGRE info where -- | Look for this specific 'OccName', with the exact same 'NameSpace', -- in the 'GlobalRdrEnv'. LookupOccName :: OccName -- ^ the 'OccName' to look up -> WhichGREs info -- ^ information about other relevant 'NameSpace's -> LookupGRE info -- | Look up the 'OccName' of this 'RdrName' in the 'GlobalRdrEnv', -- filtering out those whose qualification matches that of the 'RdrName'. -- -- Lookup returns an empty result for 'Exact' or 'Orig' 'RdrName's. LookupRdrName :: RdrName -- ^ the 'RdrName' to look up -> WhichGREs info -- ^ information about other relevant 'NameSpace's -> LookupGRE info -- | Look for 'GRE's with the same unique as the given 'Name' -- in the 'GlobalRdrEnv'. LookupExactName :: { lookupExactName :: Name -- ^ the 'Name' to look up , lookInAllNameSpaces :: Bool -- ^ whether to look in *all* 'NameSpace's, or just -- in the 'NameSpace' of the 'Name' -- See Note [Template Haskell ambiguity] } -> LookupGRE info -- | Look up children 'GlobalRdrElt's with a given 'Parent'. LookupChildren :: OccName -- ^ the 'OccName' to look up -> LookupChild -- ^ information to decide which 'GlobalRdrElt's -- are valid children after looking up -> LookupGRE info -- | How should we look up in a 'GlobalRdrEnv'? -- Which 'NameSpace's are considered relevant for a given lookup? data WhichGREs info where -- | Only consider 'GlobalRdrElt's with the exact 'NameSpace' we look up. SameNameSpace :: WhichGREs info -- | Allow 'GlobalRdrElt's with different 'NameSpace's, e.g. allow looking up -- record fields from the variable 'NameSpace', or looking up a 'TyCon' from -- the data constructor 'NameSpace'. RelevantGREs :: { includeFieldSelectors :: !FieldsOrSelectors -- ^ how should we handle looking up variables? -- -- - should we include record fields defined with @-XNoFieldSelectors@? -- - should we include non-fields? -- -- See Note [NoFieldSelectors]. , lookupVariablesForFields :: !Bool -- ^ when looking up a record field, should we also look up plain variables? , lookupTyConsAsWell :: !Bool -- ^ when looking up a variable, field or data constructor, should we -- also try the type constructor 'NameSpace'? } -> WhichGREs GREInfo instance Outputable (WhichGREs info) where ppr SameNameSpace = text "SameNameSpace" ppr (RelevantGREs { includeFieldSelectors = sel , lookupVariablesForFields = vars , lookupTyConsAsWell = tcs_too }) = braces $ hsep [ text "RelevantGREs" , text (show sel) , if vars then text "[vars]" else empty , if tcs_too then text "[tcs]" else empty ] -- | Look up as many possibly relevant 'GlobalRdrElt's as possible. pattern AllRelevantGREs :: WhichGREs GREInfo pattern AllRelevantGREs = RelevantGREs { includeFieldSelectors = WantBoth , lookupVariablesForFields = True , lookupTyConsAsWell = True } -- | Look up relevant GREs, taking into account the interaction between the -- variable and field 'NameSpace's as determined by the 'FieldsOrSelector' -- argument. pattern RelevantGREsFOS :: FieldsOrSelectors -> WhichGREs GREInfo pattern RelevantGREsFOS fos <- RelevantGREs { includeFieldSelectors = fos } where RelevantGREsFOS fos = RelevantGREs { includeFieldSelectors = fos , lookupVariablesForFields = fos == WantBoth , lookupTyConsAsWell = False } data LookupChild = LookupChild { wantedParent :: Name -- ^ the parent we are looking up children of , lookupDataConFirst :: Bool -- ^ for type constructors, should we look in the data constructor -- namespace first? , prioritiseParent :: Bool -- ^ should we prioritise getting the right 'Parent'? -- -- - @True@: prioritise getting the right 'Parent' -- - @False@: prioritise getting the right 'NameSpace' -- -- See Note [childGREPriority]. } instance Outputable LookupChild where ppr (LookupChild { wantedParent = par , lookupDataConFirst = dc , prioritiseParent = prio_parent }) = braces $ hsep [ text "LookupChild" , braces (text "parent:" <+> ppr par) , if dc then text "[dc_first]" else empty , if prio_parent then text "[prio_parent]" else empty ] -- | After looking up something with the given 'NameSpace', is the resulting -- 'GlobalRdrElt' we have obtained relevant, according to the 'RelevantGREs' -- specification of which 'NameSpace's are relevant? greIsRelevant :: WhichGREs GREInfo -- ^ specification of which 'GlobalRdrElt's to consider relevant -> NameSpace -- ^ the 'NameSpace' of the thing we are looking up -> GlobalRdrElt -- ^ the 'GlobalRdrElt' we have looked up, in a -- potentially different 'NameSpace' than we wanted -> Bool greIsRelevant which_gres ns gre | ns == other_ns = True | otherwise = case which_gres of SameNameSpace -> False RelevantGREs { includeFieldSelectors = fos , lookupVariablesForFields = vars_for_flds , lookupTyConsAsWell = tycons_too } | ns == varName -> (isFieldNameSpace other_ns && allowGRE fos gre) || tc_too | isFieldNameSpace ns -> vars_for_flds && ( other_ns == varName || (isFieldNameSpace other_ns && allowGRE fos gre) || tc_too ) | isDataConNameSpace ns -> tc_too | otherwise -> False where tc_too = tycons_too && isTcClsNameSpace other_ns where other_ns = greNameSpace gre {- Note [childGREPriority] ~~~~~~~~~~~~~~~~~~~~~~~~~~ There are currently two places in the compiler where we look up GlobalRdrElts which have a given Parent. These are the two calls to lookupSubBndrOcc_helper: A. Looking up children in an export item, e.g. module M ( T(MkT, D) ) where { data T = MkT; data D = D } B. Looking up binders in a class or instance declaration, e.g. the operator +++ in the fixity declaration: class C a where { type (+++) :: a -> a ->; infixl 6 +++ } (+++) :: Int -> Int -> Int; (+++) = (+) In these two situations, there are two competing metrics for finding the "best" 'GlobalRdrElt' that a particular 'OccName' resolves to: - does the resolved 'GlobalRdrElt' have the correct parent? - does the resolved 'GlobalRdrElt' have the same 'NameSpace' as the 'OccName'? (A) and (B) have competing requirements. For the example of (A) above, we know that the child 'D' of 'T' must live in the data namespace, so we look up the OccName 'OccName DataName "D"' and prioritise the lookup results based on the 'NameSpace'. This means we get an error message of the form: The type constructor 'T' is not the parent of the data constructor 'D'. as opposed to the rather unhelpful and confusing: The type constructor 'T' is not the parent of the type constructor 'D'. See test case T11970. For the example of (B) above, the fixity declaration for +++ lies inside the class, so we should prioritise looking up 'GlobalRdrElt's whose parent is 'C'. Not doing so led to #23664. -} -- | Scoring priority function for looking up children 'GlobalRdrElt'. -- -- We score by 'Parent' and 'NameSpace', with higher priorities having lower -- numbers. Which lexicographic order we use ('Parent' or 'NameSpace' first) -- is determined by the first argument; see Note [childGREPriority]. childGREPriority :: LookupChild -- ^ what kind of child do we want, -- e.g. what should its parent be? -> NameSpace -- ^ what 'NameSpace' are we originally looking in? -> GlobalRdrEltX info -- ^ the result of looking up; it might be in a different -- 'NameSpace', which is used to determine the score -- (in the first component) -> Maybe (Int, Int) childGREPriority (LookupChild { wantedParent = wanted_parent , lookupDataConFirst = try_dc_first , prioritiseParent = par_first }) ns gre = case child_ns_prio $ greNameSpace gre of Nothing -> Nothing Just ns_prio -> let par_prio = parent_prio $ greParent gre in Just $ if par_first then (par_prio, ns_prio) else (ns_prio, par_prio) -- See Note [childGREPriority]. where -- Pick out the possible 'NameSpace's in order of priority. child_ns_prio :: (NameSpace -> Maybe Int) child_ns_prio other_ns | other_ns == ns = Just 0 | isTermVarOrFieldNameSpace ns , isTermVarOrFieldNameSpace other_ns = Just 0 | isValNameSpace varName , other_ns == tcName -- When looking up children, we sometimes want a value name -- to resolve to a type constructor. -- For example, for an infix declaration "infixr 3 +!" or "infix 2 `Fun`" -- inside a class declaration, we want to account for the possibility -- that the identifier refers to an associated type (type constructor -- NameSpace), when otherwise "+!" would be in the term-level variable -- NameSpace, and "Fun" would be in the term-level data constructor -- NameSpace. See tests T10816, T23664, T24037. = Just 1 | ns == tcName , other_ns == dataName , try_dc_first -- try data namespace before type/class namespace? = Just (-1) | otherwise = Nothing parent_prio :: Parent -> Int parent_prio (ParentIs other_parent) | other_parent == wanted_parent = 0 | otherwise = 1 parent_prio NoParent = 0 -- | Look something up in the Global Reader Environment. -- -- The 'LookupGRE' argument specifies what to look up, and in particular -- whether there should there be any lee-way if the 'NameSpace's don't -- exactly match. lookupGRE :: GlobalRdrEnvX info -> LookupGRE info -> [GlobalRdrEltX info] lookupGRE env = \case LookupOccName occ which_gres -> case which_gres of SameNameSpace -> concat $ lookupOccEnv env occ rel@(RelevantGREs{}) -> filter (greIsRelevant rel (occNameSpace occ)) $ concat $ lookupOccEnv_AllNameSpaces env occ LookupRdrName rdr rel -> pickGREs rdr $ lookupGRE env (LookupOccName (rdrNameOcc rdr) rel) LookupExactName { lookupExactName = nm , lookInAllNameSpaces = all_ns } -> [ gre | gre <- lkup, greName gre == nm ] where occ = nameOccName nm lkup | all_ns = concat $ lookupOccEnv_AllNameSpaces env occ | otherwise = fromMaybe [] $ lookupOccEnv env occ LookupChildren occ which_child -> let ns = occNameSpace occ all_gres = concat $ lookupOccEnv_AllNameSpaces env occ in highestPriorityGREs (childGREPriority which_child ns) all_gres -- | Collect the 'GlobalRdrElt's with the highest priority according -- to the given function (lower value <=> higher priority). -- -- This allows us to first look in e.g. the data 'NameSpace', and then fall back -- to the type/class 'NameSpace'. highestPriorityGREs :: forall gre prio . Ord prio => (gre -> Maybe prio) -- ^ priority function -- lower value <=> higher priority -> [gre] -> [gre] highestPriorityGREs priority gres = take_highest_prio $ NE.group $ sort [ S.Arg prio gre | gre <- gres , prio <- maybeToList $ priority gre ] where take_highest_prio :: [NE.NonEmpty (S.Arg prio gre)] -> [gre] take_highest_prio [] = [] take_highest_prio (fs:_) = map (\ (S.Arg _ gre) -> gre) $ NE.toList fs {-# INLINEABLE highestPriorityGREs #-} -- | Look for precisely this 'Name' in the environment, -- in the __same 'NameSpace'__ as the 'Name'. -- -- This tests whether it is in scope, ignoring anything -- else that might be in scope which doesn't have the same 'Unique'. lookupGRE_Name :: Outputable info => GlobalRdrEnvX info -> Name -> Maybe (GlobalRdrEltX info) lookupGRE_Name env name = case lookupGRE env (LookupExactName { lookupExactName = name , lookInAllNameSpaces = False }) of [] -> Nothing [gre] -> Just gre gres -> pprPanic "lookupGRE_Name" (ppr name $$ ppr (nameOccName name) $$ ppr gres) -- See INVARIANT 1 on GlobalRdrEnv -- | Look for a particular record field selector in the environment. lookupGRE_FieldLabel :: GlobalRdrEnv -> FieldLabel -> Maybe FieldGlobalRdrElt lookupGRE_FieldLabel env fl = case lookupGRE_Name env (flSelector fl) of Nothing -> Nothing Just gre -> assertPpr (isRecFldGRE gre) (vcat [ text "lookupGre_FieldLabel:" <+> ppr fl ]) $ Just gre getGRE_NameQualifier_maybes :: GlobalRdrEnv -> Name -> [Maybe [ModuleName]] -- Returns all the qualifiers by which 'x' is in scope -- Nothing means "the unqualified version is in scope" -- [] means the thing is not in scope at all getGRE_NameQualifier_maybes env name = case lookupGRE_Name env name of Just gre -> [qualifier_maybe gre] Nothing -> [] where qualifier_maybe (GRE { gre_lcl = lcl, gre_imp = iss }) | lcl = Nothing | otherwise = Just $ map (is_as . is_decl) (bagToList iss) -- | Is this 'GlobalRdrElt' defined locally? isLocalGRE :: GlobalRdrEltX info -> Bool isLocalGRE (GRE { gre_lcl = lcl }) = lcl -- | Is this 'GlobalRdrElt' imported? -- -- Not just the negation of 'isLocalGRE', because it might be an Exact or -- Orig name reference. See Note [GlobalRdrElt provenance]. isImportedGRE :: GlobalRdrEltX info -> Bool isImportedGRE (GRE { gre_imp = imps }) = not $ isEmptyBag imps -- | Is this a record field GRE? -- -- Important: does /not/ consult the 'GreInfo' field. isRecFldGRE :: GlobalRdrEltX info -> Bool isRecFldGRE (GRE { gre_name = nm }) = isFieldName nm isDuplicateRecFldGRE :: GlobalRdrElt -> Bool -- ^ Is this a record field defined with DuplicateRecordFields? isDuplicateRecFldGRE = maybe False ((DuplicateRecordFields ==) . flHasDuplicateRecordFields) . greFieldLabel_maybe isNoFieldSelectorGRE :: GlobalRdrElt -> Bool -- ^ Is this a record field defined with NoFieldSelectors? -- (See Note [NoFieldSelectors] in GHC.Rename.Env) isNoFieldSelectorGRE = maybe False ((NoFieldSelectors ==) . flHasFieldSelector) . greFieldLabel_maybe isFieldSelectorGRE :: GlobalRdrElt -> Bool -- ^ Is this a record field defined with FieldSelectors? -- (See Note [NoFieldSelectors] in GHC.Rename.Env) isFieldSelectorGRE = maybe False ((FieldSelectors ==) . flHasFieldSelector) . greFieldLabel_maybe greFieldLabel_maybe :: GlobalRdrElt -> Maybe FieldLabel -- ^ Returns the field label of this GRE, if it has one greFieldLabel_maybe = fmap fieldGRELabel . fieldGRE_maybe unQualOK :: GlobalRdrEltX info -> Bool -- ^ Test if an unqualified version of this thing would be in scope unQualOK (GRE {gre_lcl = lcl, gre_imp = iss }) | lcl = True | otherwise = any unQualSpecOK iss {- Note [GRE filtering] ~~~~~~~~~~~~~~~~~~~~~~~ (pickGREs rdr gres) takes a list of GREs which have the same OccName as 'rdr', say "x". It does two things: (a) filters the GREs to a subset that are in scope * Qualified, as 'M.x' if want_qual is Qual M _ * Unqualified, as 'x' if want_unqual is Unqual _ (b) for that subset, filter the provenance field (gre_lcl and gre_imp) to ones that brought it into scope qualified or unqualified resp. Example: module A ( f ) where import qualified Foo( f ) import Baz( f ) f = undefined Let's suppose that Foo.f and Baz.f are the same entity really, but the local 'f' is different, so there will be two GREs matching "f": gre1: gre_lcl = True, gre_imp = [] gre2: gre_lcl = False, gre_imp = [ imported from Foo, imported from Bar ] The use of "f" in the export list is ambiguous because it's in scope from the local def and the import Baz(f); but *not* the import qualified Foo. pickGREs returns two GRE gre1: gre_lcl = True, gre_imp = [] gre2: gre_lcl = False, gre_imp = [ imported from Bar ] Now the "ambiguous occurrence" message can correctly report how the ambiguity arises. -} pickGREs :: RdrName -> [GlobalRdrEltX info] -> [GlobalRdrEltX info] -- ^ Takes a list of GREs which have the right OccName 'x' -- Pick those GREs that are in scope -- * Qualified, as 'M.x' if want_qual is Qual M _ -- * Unqualified, as 'x' if want_unqual is Unqual _ -- -- Return each such GRE, with its ImportSpecs filtered, to reflect -- how it is in scope qualified or unqualified respectively. -- See Note [GRE filtering] pickGREs (Unqual {}) gres = mapMaybe pickUnqualGRE gres pickGREs (Qual mod _) gres = mapMaybe (pickQualGRE mod) gres pickGREs _ _ = [] -- I don't think this actually happens pickUnqualGRE :: GlobalRdrEltX info -> Maybe (GlobalRdrEltX info) pickUnqualGRE gre@(GRE { gre_lcl = lcl, gre_imp = iss }) | not lcl, null iss' = Nothing | otherwise = Just (gre { gre_imp = iss' }) where iss' = filterBag unQualSpecOK iss pickQualGRE :: ModuleName -> GlobalRdrEltX info -> Maybe (GlobalRdrEltX info) pickQualGRE mod gre@(GRE { gre_lcl = lcl, gre_imp = iss }) | not lcl', null iss' = Nothing | otherwise = Just (gre { gre_lcl = lcl', gre_imp = iss' }) where iss' = filterBag (qualSpecOK mod) iss lcl' = lcl && name_is_from mod name_is_from :: ModuleName -> Bool name_is_from mod = case greDefinitionModule gre of Just n_mod -> moduleName n_mod == mod Nothing -> False pickGREsModExp :: ModuleName -> [GlobalRdrEltX info] -> [(GlobalRdrEltX info,GlobalRdrEltX info)] -- ^ Pick GREs that are in scope *both* qualified *and* unqualified -- Return each GRE that is, as a pair -- (qual_gre, unqual_gre) -- These two GREs are the original GRE with imports filtered to express how -- it is in scope qualified an unqualified respectively -- -- Used only for the 'module M' item in export list; -- see 'GHC.Tc.Gen.Export.exports_from_avail' pickGREsModExp mod gres = mapMaybe (pickBothGRE mod) gres -- | isBuiltInSyntax filter out names for built-in syntax They -- just clutter up the environment (esp tuples), and the -- parser will generate Exact RdrNames for them, so the -- cluttered envt is no use. Really, it's only useful for -- GHC.Base and GHC.Tuple. pickBothGRE :: ModuleName -> GlobalRdrEltX info -> Maybe (GlobalRdrEltX info, GlobalRdrEltX info) pickBothGRE mod gre | isBuiltInSyntax (greName gre) = Nothing | Just gre1 <- pickQualGRE mod gre , Just gre2 <- pickUnqualGRE gre = Just (gre1, gre2) | otherwise = Nothing -- Building GlobalRdrEnvs plusGlobalRdrEnv :: GlobalRdrEnv -> GlobalRdrEnv -> GlobalRdrEnv plusGlobalRdrEnv env1 env2 = plusOccEnv_C (foldr insertGRE) env1 env2 mkGlobalRdrEnv :: [GlobalRdrElt] -> GlobalRdrEnv mkGlobalRdrEnv gres = foldr add emptyGlobalRdrEnv gres where add gre env = extendOccEnv_Acc insertGRE Utils.singleton env (greOccName gre) gre insertGRE :: GlobalRdrElt -> [GlobalRdrElt] -> [GlobalRdrElt] insertGRE new_g [] = [new_g] insertGRE new_g (old_g : old_gs) | greName new_g == greName old_g = new_g `plusGRE` old_g : old_gs | otherwise = old_g : insertGRE new_g old_gs plusGRE :: GlobalRdrElt -> GlobalRdrElt -> GlobalRdrElt -- Used when the gre_name fields match plusGRE g1 g2 = GRE { gre_name = gre_name g1 , gre_lcl = gre_lcl g1 || gre_lcl g2 , gre_imp = gre_imp g1 `unionBags` gre_imp g2 , gre_par = gre_par g1 `plusParent` gre_par g2 , gre_info = gre_info g1 `plusGREInfo` gre_info g2 } transformGREs :: (GlobalRdrElt -> GlobalRdrElt) -> [OccName] -> GlobalRdrEnv -> GlobalRdrEnv -- ^ Apply a transformation function to the GREs for these OccNames transformGREs trans_gre occs rdr_env = foldr trans rdr_env occs where trans occ env = case lookupOccEnv env occ of Just gres -> extendOccEnv env occ (map trans_gre gres) Nothing -> env extendGlobalRdrEnv :: GlobalRdrEnv -> GlobalRdrElt -> GlobalRdrEnv extendGlobalRdrEnv env gre = extendOccEnv_Acc insertGRE Utils.singleton env (greOccName gre) gre {- Note [GlobalRdrEnv shadowing] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Before adding new names to the GlobalRdrEnv we nuke some existing entries; this is "shadowing". The actual work is done by GHC.Types.Name.Reader.shadowNames. Suppose env' = shadowNames env { f } `extendGlobalRdrEnv` { M.f } Then: * Looking up (Unqual f) in env' should succeed, returning M.f, even if env contains existing unqualified bindings for f. They are shadowed * Looking up (Qual M.f) in env' should succeed, returning M.f * Looking up (Qual X.f) in env', where X /= M, should be the same as looking up (Qual X.f) in env. That is, shadowNames does /not/ delete earlier qualified bindings There are two reasons for shadowing: * The GHCi REPL - Ids bought into scope on the command line (eg let x = True) have External Names, like Ghci4.x. We want a new binding for 'x' (say) to override the existing binding for 'x'. Example: ghci> :load M -- Brings `x` and `M.x` into scope ghci> x ghci> "Hello" ghci> M.x ghci> "hello" ghci> let x = True -- Shadows `x` ghci> x -- The locally bound `x` -- NOT an ambiguous reference ghci> True ghci> M.x -- M.x is still in scope! ghci> "Hello" So when we add `x = True` we must not delete the `M.x` from the `GlobalRdrEnv`; rather we just want to make it "qualified only"; hence the `set_qual` in `shadowNames`. See also Note [Interactively-bound Ids in GHCi] in GHC.Runtime.Context - Data types also have External Names, like Ghci4.T; but we still want 'T' to mean the newly-declared 'T', not an old one. * Nested Template Haskell declaration brackets See Note [Top-level Names in Template Haskell decl quotes] in GHC.Rename.Names Consider a TH decl quote: module M where f x = h [d| f = ...f...M.f... |] We must shadow the outer unqualified binding of 'f', else we'll get a complaint when extending the GlobalRdrEnv, saying that there are two bindings for 'f'. There are several tricky points: - This shadowing applies even if the binding for 'f' is in a where-clause, and hence is in the *local* RdrEnv not the *global* RdrEnv. This is done in lcl_env_TH in extendGlobalRdrEnvRn. - The External Name M.f from the enclosing module must certainly still be available. So we don't nuke it entirely; we just make it seem like qualified import. - We only shadow *External* names (which come from the main module), or from earlier GHCi commands. Do not shadow *Internal* names because in the bracket [d| class C a where f :: a f = 4 |] rnSrcDecls will first call extendGlobalRdrEnvRn with C[f] from the class decl, and *separately* extend the envt with the value binding. At that stage, the class op 'f' will have an Internal name. Wrinkle [Shadowing namespaces] In the following GHCi session: > data A = MkA { foo :: Int } > foo = False > bar = foo We expect the variable 'foo' to shadow the record field 'foo', even though they are in separate namespaces, so that the occurrence of 'foo' in the body of 'bar' is not ambiguous. -} shadowNames :: Bool -- ^ discard names that are only available qualified? -> GlobalRdrEnv -> GlobalRdrEnv -> GlobalRdrEnv -- Remove certain old GREs that share the same OccName as this new Name. -- See Note [GlobalRdrEnv shadowing] for details shadowNames drop_only_qualified env new_gres = minusOccEnv_C_Ns do_shadowing env new_gres where do_shadowing :: UniqFM NameSpace [GlobalRdrElt] -> UniqFM NameSpace [GlobalRdrElt] -> UniqFM NameSpace [GlobalRdrElt] do_shadowing olds news = -- Start off by accumulating all 'NameSpace's shadowed -- by the entire collection of new GREs. let shadowed_gres :: ShadowedGREs shadowed_gres = nonDetFoldUFM (\ gres shads -> foldMap greShadowedNameSpaces gres S.<> shads) mempty news -- Then shadow the old 'GlobalRdrElt's, now that we know which 'NameSpace's -- should be shadowed. shadow_list :: Unique -> [GlobalRdrElt] -> Maybe [GlobalRdrElt] shadow_list old_ns old_gres = case namespace_is_shadowed old_ns shadowed_gres of IsNotShadowed -> Just old_gres IsShadowed -> guard_nonEmpty $ mapMaybe shadow old_gres IsShadowedIfFieldSelector -> guard_nonEmpty $ mapMaybe (\ old_gre -> if isFieldSelectorGRE old_gre then shadow old_gre else Just old_gre) old_gres -- Now do all of the shadowing in a single go. This avoids traversing -- the old GlobalRdrEnv multiple times over. in mapMaybeWithKeyUFM shadow_list olds guard_nonEmpty :: [a] -> Maybe [a] guard_nonEmpty xs | null xs = Nothing | otherwise = Just xs -- Shadow a single GRE, by either qualifying it or removing it entirely. shadow :: GlobalRdrElt-> Maybe GlobalRdrElt shadow old_gre@(GRE { gre_lcl = lcl, gre_imp = iss }) = case greDefinitionModule old_gre of Nothing -> Just old_gre -- Old name is Internal; do not shadow Just old_mod | null iss' -- Nothing remains || drop_only_qualified -> Nothing | otherwise -> Just (old_gre { gre_lcl = False, gre_imp = iss' }) where iss' = lcl_imp `unionBags` mapBag set_qual iss lcl_imp | lcl = unitBag $ mk_fake_imp_spec old_gre old_mod | otherwise = emptyBag mk_fake_imp_spec old_gre old_mod -- Urgh! = ImpSpec id_spec ImpAll where old_mod_name = moduleName old_mod id_spec = ImpDeclSpec { is_mod = old_mod , is_as = old_mod_name , is_pkg_qual = NoPkgQual , is_qual = True , is_isboot = NotBoot , is_dloc = greDefinitionSrcSpan old_gre } set_qual :: ImportSpec -> ImportSpec set_qual is = is { is_decl = (is_decl is) { is_qual = True } } -- | @greClashesWith new_gre old_gre@ computes whether @new_gre@ clashes -- with @old_gre@ (assuming they both have the same underlying 'occNameFS'). greClashesWith :: GlobalRdrElt -> (GlobalRdrElt -> Bool) greClashesWith new_gre old_gre = old_gre `greIsShadowed` greShadowedNameSpaces new_gre -- | Is the given 'GlobalRdrElt' shadowed, as specified by the 'ShadowedNameSpace's? greIsShadowed :: GlobalRdrElt -> ShadowedGREs -> Bool greIsShadowed old_gre shadowed = case getUnique old_ns `namespace_is_shadowed` shadowed of IsShadowed -> True IsNotShadowed -> False IsShadowedIfFieldSelector -> isFieldSelectorGRE old_gre where old_ns = occNameSpace $ greOccName old_gre -- | Whether a 'GlobalRdrElt' is definitely shadowed, definitely not shadowed, -- or conditionally shadowed based on more information beyond the 'NameSpace'. data IsShadowed -- | The GRE is not shadowed. = IsNotShadowed -- | The GRE is shadowed. | IsShadowed -- | The GRE is shadowed iff it is a record field GRE -- which defines a field selector (i.e. FieldSelectors is enabled in its -- defining module). | IsShadowedIfFieldSelector -- | Internal function: is a 'GlobalRdrElt' with the 'NameSpace' with given -- 'Unique' shadowed by the specified 'ShadowedGREs'? namespace_is_shadowed :: Unique -> ShadowedGREs -> IsShadowed namespace_is_shadowed old_ns (ShadowedGREs shadowed_nonflds shadowed_flds) | isFldNSUnique old_ns = case shadowed_flds of ShadowAllFieldGREs -> IsShadowed ShadowFieldSelectorsAnd shadowed | old_ns `elemUniqSet_Directly` shadowed -> IsShadowed | otherwise -> IsShadowedIfFieldSelector ShadowFieldNameSpaces shadowed | old_ns `elemUniqSet_Directly` shadowed -> IsShadowed | otherwise -> IsNotShadowed | old_ns `elemUniqSet_Directly` shadowed_nonflds = IsShadowed | otherwise = IsNotShadowed -- | What are all the 'GlobalRdrElt's that are shadowed by this new 'GlobalRdrElt'? greShadowedNameSpaces :: GlobalRdrElt -> ShadowedGREs greShadowedNameSpaces gre = ShadowedGREs shadowed_nonflds shadowed_flds where ns = occNameSpace $ greOccName gre !shadowed_nonflds | isFieldNameSpace ns -- A new record field shadows variables if it defines a field selector. = if isFieldSelectorGRE gre then unitUniqSet varName else emptyUniqSet | otherwise = unitUniqSet ns !shadowed_flds | ns == varName -- A new variable shadows record fields with field selectors. = ShadowFieldSelectorsAnd emptyUniqSet | isFieldNameSpace ns -- A new record field shadows record fields unless it is a duplicate record field. = if isDuplicateRecFldGRE gre then ShadowFieldNameSpaces (unitUniqSet ns) -- NB: we must still shadow fields with the same constructor name. else ShadowAllFieldGREs | otherwise = ShadowFieldNameSpaces emptyUniqSet -- | A description of which 'GlobalRdrElt's are shadowed. data ShadowedGREs = ShadowedGREs { shadowedNonFieldNameSpaces :: !(UniqSet NameSpace) -- ^ These specific non-field 'NameSpace's are shadowed. , shadowedFieldGREs :: !ShadowedFieldGREs -- ^ These field 'GlobalRdrElt's are shadowed. } -- | A description of which record field 'GlobalRdrElt's are shadowed. data ShadowedFieldGREs -- | All field 'GlobalRdrElt's are shadowed. = ShadowAllFieldGREs -- | Record field GREs defining field selectors, as well as those -- with the explicitly specified field 'NameSpace's, are shadowed. | ShadowFieldSelectorsAnd { shadowedFieldNameSpaces :: !(UniqSet NameSpace) } -- | These specific field 'NameSpace's are shadowed. | ShadowFieldNameSpaces { shadowedFieldNameSpaces :: !(UniqSet NameSpace) } instance Monoid ShadowedFieldGREs where mempty = ShadowFieldNameSpaces { shadowedFieldNameSpaces = emptyUniqSet } instance Semigroup ShadowedFieldGREs where ShadowAllFieldGREs <> _ = ShadowAllFieldGREs _ <> ShadowAllFieldGREs = ShadowAllFieldGREs ShadowFieldSelectorsAnd ns1 <> ShadowFieldSelectorsAnd ns2 = ShadowFieldSelectorsAnd (ns1 S.<> ns2) ShadowFieldSelectorsAnd ns1 <> ShadowFieldNameSpaces ns2 = ShadowFieldSelectorsAnd (ns1 S.<> ns2) ShadowFieldNameSpaces ns1 <> ShadowFieldSelectorsAnd ns2 = ShadowFieldSelectorsAnd (ns1 S.<> ns2) ShadowFieldNameSpaces ns1 <> ShadowFieldNameSpaces ns2 = ShadowFieldNameSpaces (ns1 S.<> ns2) instance Monoid ShadowedGREs where mempty = ShadowedGREs { shadowedNonFieldNameSpaces = emptyUniqSet , shadowedFieldGREs = mempty } instance Semigroup ShadowedGREs where ShadowedGREs nonflds1 flds1 <> ShadowedGREs nonflds2 flds2 = ShadowedGREs (nonflds1 S.<> nonflds2) (flds1 S.<> flds2) {- ************************************************************************ * * ImportSpec * * ************************************************************************ -} -- | Import Specification -- -- The 'ImportSpec' of something says how it came to be imported -- It's quite elaborate so that we can give accurate unused-name warnings. data ImportSpec = ImpSpec { is_decl :: !ImpDeclSpec, is_item :: !ImpItemSpec } deriving( Eq, Data ) instance NFData ImportSpec where rnf = rwhnf -- All fields are strict, so we don't need to do anything -- | Import Declaration Specification -- -- Describes a particular import declaration and is -- shared among all the 'Provenance's for that decl data ImpDeclSpec = ImpDeclSpec { is_mod :: !Module, -- ^ Module imported, e.g. @import Muggle@ -- Note the @Muggle@ may well not be -- the defining module for this thing! -- TODO: either should be Module, or there -- should be a Maybe UnitId here too. is_as :: !ModuleName, -- ^ Import alias, e.g. from @as M@ (or @Muggle@ if there is no @as@ clause) is_pkg_qual :: !PkgQual, -- ^ Was this a package import? is_qual :: !Bool, -- ^ Was this import qualified? is_dloc :: !SrcSpan, -- ^ The location of the entire import declaration is_isboot :: !IsBootInterface -- ^ Was this a SOURCE import? } deriving (Eq, Data) instance NFData ImpDeclSpec where rnf = rwhnf -- Already strict in all fields -- | Import Item Specification -- -- Describes import info a particular Name data ImpItemSpec = ImpAll -- ^ The import had no import list, -- or had a hiding list | ImpSome { is_explicit :: !Bool, is_iloc :: !SrcSpan -- Location of the import item } -- ^ The import had an import list. -- The 'is_explicit' field is @True@ iff the thing was named -- /explicitly/ in the import specs rather -- than being imported as part of a "..." group. Consider: -- -- > import C( T(..) ) -- -- Here the constructors of @T@ are not named explicitly; -- only @T@ is named explicitly. deriving (Eq, Data) bestImport :: NE.NonEmpty ImportSpec -> ImportSpec -- See Note [Choosing the best import declaration] bestImport iss = NE.head $ NE.sortBy best iss where best :: ImportSpec -> ImportSpec -> Ordering -- Less means better -- Unqualified always wins over qualified; then -- import-all wins over import-some; then -- earlier declaration wins over later best (ImpSpec { is_item = item1, is_decl = d1 }) (ImpSpec { is_item = item2, is_decl = d2 }) = (is_qual d1 `compare` is_qual d2) S.<> best_item item1 item2 S.<> SrcLoc.leftmost_smallest (is_dloc d1) (is_dloc d2) best_item :: ImpItemSpec -> ImpItemSpec -> Ordering best_item ImpAll ImpAll = EQ best_item ImpAll (ImpSome {}) = LT best_item (ImpSome {}) ImpAll = GT best_item (ImpSome { is_explicit = e1 }) (ImpSome { is_explicit = e2 }) = e1 `compare` e2 -- False < True, so if e1 is explicit and e2 is not, we get GT {- Note [Choosing the best import declaration] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When reporting unused import declarations we use the following rules. (see [wiki:commentary/compiler/unused-imports]) Say that an import-item is either * an entire import-all decl (eg import Foo), or * a particular item in an import list (eg import Foo( ..., x, ...)). The general idea is that for each /occurrence/ of an imported name, we will attribute that use to one import-item. Once we have processed all the occurrences, any import items with no uses attributed to them are unused, and are warned about. More precisely: 1. For every RdrName in the program text, find its GlobalRdrElt. 2. Then, from the [ImportSpec] (gre_imp) of that GRE, choose one the "chosen import-item", and mark it "used". This is done by 'bestImport' 3. After processing all the RdrNames, bleat about any import-items that are unused. This is done in GHC.Rename.Names.warnUnusedImportDecls. The function 'bestImport' returns the dominant import among the ImportSpecs it is given, implementing Step 2. We say import-item A dominates import-item B if we choose A over B. In general, we try to choose the import that is most likely to render other imports unnecessary. Here is the dominance relationship we choose: a) import Foo dominates import qualified Foo. b) import Foo dominates import Foo(x). c) Otherwise choose the textually first one. Rationale for (a). Consider import qualified M -- Import #1 import M( x ) -- Import #2 foo = M.x + x The unqualified 'x' can only come from import #2. The qualified 'M.x' could come from either, but bestImport picks import #2, because it is more likely to be useful in other imports, as indeed it is in this case (see #5211 for a concrete example). But the rules are not perfect; consider import qualified M -- Import #1 import M( x ) -- Import #2 foo = M.x + M.y The M.x will use import #2, but M.y can only use import #1. -} unQualSpecOK :: ImportSpec -> Bool -- ^ Is in scope unqualified? unQualSpecOK is = not (is_qual (is_decl is)) qualSpecOK :: ModuleName -> ImportSpec -> Bool -- ^ Is in scope qualified with the given module? qualSpecOK mod is = mod == is_as (is_decl is) importSpecLoc :: ImportSpec -> SrcSpan importSpecLoc (ImpSpec decl ImpAll) = is_dloc decl importSpecLoc (ImpSpec _ item) = is_iloc item importSpecModule :: ImportSpec -> ModuleName importSpecModule = moduleName . is_mod . is_decl isExplicitItem :: ImpItemSpec -> Bool isExplicitItem ImpAll = False isExplicitItem (ImpSome {is_explicit = exp}) = exp pprNameProvenance :: GlobalRdrEltX info -> SDoc -- ^ Print out one place where the name was define/imported -- (With -dppr-debug, print them all) pprNameProvenance (GRE { gre_name = name, gre_lcl = lcl, gre_imp = iss }) = ifPprDebug (vcat pp_provs) (head pp_provs) where pp_provs = pp_lcl ++ map pp_is (bagToList iss) pp_lcl = if lcl then [text "defined at" <+> ppr (nameSrcLoc name)] else [] pp_is is = sep [ppr is, ppr_defn_site is name] -- If we know the exact definition point (which we may do with GHCi) -- then show that too. But not if it's just "imported from X". ppr_defn_site :: ImportSpec -> Name -> SDoc ppr_defn_site imp_spec name | same_module && not (isGoodSrcSpan loc) = empty -- Nothing interesting to say | otherwise = parens $ hang (text "and originally defined" <+> pp_mod) 2 (pprLoc loc) where loc = nameSrcSpan name defining_mod = assertPpr (isExternalName name) (ppr name) $ nameModule name same_module = importSpecModule imp_spec == moduleName defining_mod pp_mod | same_module = empty | otherwise = text "in" <+> quotes (ppr defining_mod) instance Outputable ImportSpec where ppr imp_spec = text "imported" <+> qual <+> text "from" <+> quotes (ppr (importSpecModule imp_spec)) <+> pprLoc (importSpecLoc imp_spec) where qual | is_qual (is_decl imp_spec) = text "qualified" | otherwise = empty pprLoc :: SrcSpan -> SDoc pprLoc (RealSrcSpan s _) = text "at" <+> ppr s pprLoc (UnhelpfulSpan {}) = empty -- | Indicate if the given name is the "@" operator opIsAt :: RdrName -> Bool opIsAt e = e == mkUnqual varName (fsLit "@") ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/Name/Set.hs0000644000000000000000000001704007346545000020763 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1998 -} {-# LANGUAGE GeneralizedNewtypeDeriving #-} module GHC.Types.Name.Set ( -- * Names set type NameSet, -- ** Manipulating these sets emptyNameSet, unitNameSet, mkNameSet, unionNameSet, unionNameSets, minusNameSet, elemNameSet, extendNameSet, extendNameSetList, delFromNameSet, delListFromNameSet, isEmptyNameSet, filterNameSet, intersectsNameSet, disjointNameSet, intersectNameSet, nameSetAny, nameSetAll, nameSetElemsStable, -- * Free variables FreeVars, -- ** Manipulating sets of free variables isEmptyFVs, emptyFVs, plusFVs, plusFV, mkFVs, addOneFV, unitFV, delFV, delFVs, intersectFVs, intersectsFVs, -- * Defs and uses Defs, Uses, DefUse, DefUses, -- ** Manipulating defs and uses emptyDUs, usesOnly, mkDUs, plusDU, findUses, duDefs, duUses, allUses, -- * Non-CAFfy names NonCaffySet(..) ) where import GHC.Prelude import GHC.Types.Name import GHC.Data.OrdList import GHC.Types.Unique.Set import Data.List (sortBy) {- ************************************************************************ * * \subsection[Sets of names} * * ************************************************************************ -} type NameSet = UniqSet Name emptyNameSet :: NameSet unitNameSet :: Name -> NameSet extendNameSetList :: NameSet -> [Name] -> NameSet extendNameSet :: NameSet -> Name -> NameSet mkNameSet :: [Name] -> NameSet unionNameSet :: NameSet -> NameSet -> NameSet unionNameSets :: [NameSet] -> NameSet minusNameSet :: NameSet -> NameSet -> NameSet elemNameSet :: Name -> NameSet -> Bool isEmptyNameSet :: NameSet -> Bool delFromNameSet :: NameSet -> Name -> NameSet delListFromNameSet :: NameSet -> [Name] -> NameSet filterNameSet :: (Name -> Bool) -> NameSet -> NameSet intersectNameSet :: NameSet -> NameSet -> NameSet intersectsNameSet :: NameSet -> NameSet -> Bool disjointNameSet :: NameSet -> NameSet -> Bool -- ^ True if there is a non-empty intersection. -- @s1 `intersectsNameSet` s2@ doesn't compute @s2@ if @s1@ is empty isEmptyNameSet = isEmptyUniqSet emptyNameSet = emptyUniqSet unitNameSet = unitUniqSet mkNameSet = mkUniqSet extendNameSetList = addListToUniqSet extendNameSet = addOneToUniqSet unionNameSet = unionUniqSets unionNameSets = unionManyUniqSets minusNameSet = minusUniqSet elemNameSet = elementOfUniqSet delFromNameSet = delOneFromUniqSet filterNameSet = filterUniqSet intersectNameSet = intersectUniqSets disjointNameSet = disjointUniqSets delListFromNameSet set ns = foldl' delFromNameSet set ns intersectsNameSet s1 s2 = not (s1 `disjointNameSet` s2) nameSetAny :: (Name -> Bool) -> NameSet -> Bool nameSetAny = uniqSetAny nameSetAll :: (Name -> Bool) -> NameSet -> Bool nameSetAll = uniqSetAll -- | Get the elements of a NameSet with some stable ordering. -- This only works for Names that originate in the source code or have been -- tidied. -- See Note [Deterministic UniqFM] to learn about nondeterminism nameSetElemsStable :: NameSet -> [Name] nameSetElemsStable ns = sortBy stableNameCmp $ nonDetEltsUniqSet ns -- It's OK to use nonDetEltsUniqSet here because we immediately sort -- with stableNameCmp {- ************************************************************************ * * \subsection{Free variables} * * ************************************************************************ These synonyms are useful when we are thinking of free variables -} type FreeVars = NameSet plusFV :: FreeVars -> FreeVars -> FreeVars addOneFV :: FreeVars -> Name -> FreeVars unitFV :: Name -> FreeVars emptyFVs :: FreeVars plusFVs :: [FreeVars] -> FreeVars mkFVs :: [Name] -> FreeVars delFV :: Name -> FreeVars -> FreeVars delFVs :: [Name] -> FreeVars -> FreeVars intersectFVs :: FreeVars -> FreeVars -> FreeVars intersectsFVs :: FreeVars -> FreeVars -> Bool isEmptyFVs :: NameSet -> Bool isEmptyFVs = isEmptyNameSet emptyFVs = emptyNameSet plusFVs = unionNameSets plusFV = unionNameSet mkFVs = mkNameSet addOneFV = extendNameSet unitFV = unitNameSet delFV n s = delFromNameSet s n delFVs ns s = delListFromNameSet s ns intersectFVs = intersectNameSet intersectsFVs = intersectsNameSet {- ************************************************************************ * * Defs and uses * * ************************************************************************ -} -- | A set of names that are defined somewhere type Defs = NameSet -- | A set of names that are used somewhere type Uses = NameSet -- | @(Just ds, us) =>@ The use of any member of the @ds@ -- implies that all the @us@ are used too. -- Also, @us@ may mention @ds@. -- -- @Nothing =>@ Nothing is defined in this group, but -- nevertheless all the uses are essential. -- Used for instance declarations, for example type DefUse = (Maybe Defs, Uses) -- | A number of 'DefUse's in dependency order: earlier 'Defs' scope over later 'Uses' -- In a single (def, use) pair, the defs also scope over the uses type DefUses = OrdList DefUse emptyDUs :: DefUses emptyDUs = nilOL usesOnly :: Uses -> DefUses usesOnly uses = unitOL (Nothing, uses) mkDUs :: [(Defs,Uses)] -> DefUses mkDUs pairs = toOL [(Just defs, uses) | (defs,uses) <- pairs] plusDU :: DefUses -> DefUses -> DefUses plusDU = appOL duDefs :: DefUses -> Defs duDefs dus = foldr get emptyNameSet dus where get (Nothing, _u1) d2 = d2 get (Just d1, _u1) d2 = d1 `unionNameSet` d2 allUses :: DefUses -> Uses -- ^ Just like 'duUses', but 'Defs' are not eliminated from the 'Uses' returned allUses dus = foldr get emptyNameSet dus where get (_d1, u1) u2 = u1 `unionNameSet` u2 duUses :: DefUses -> Uses -- ^ Collect all 'Uses', regardless of whether the group is itself used, -- but remove 'Defs' on the way duUses dus = foldr get emptyNameSet dus where get (Nothing, rhs_uses) uses = rhs_uses `unionNameSet` uses get (Just defs, rhs_uses) uses = (rhs_uses `unionNameSet` uses) `minusNameSet` defs findUses :: DefUses -> Uses -> Uses -- ^ Given some 'DefUses' and some 'Uses', find all the uses, transitively. -- The result is a superset of the input 'Uses'; and includes things defined -- in the input 'DefUses' (but only if they are used) findUses dus uses = foldr get uses dus where get (Nothing, rhs_uses) uses = rhs_uses `unionNameSet` uses get (Just defs, rhs_uses) uses | defs `intersectsNameSet` uses -- Used || nameSetAny (startsWithUnderscore . nameOccName) defs -- At least one starts with an "_", -- so treat the group as used = rhs_uses `unionNameSet` uses | otherwise -- No def is used = uses -- | 'Id's which have no CAF references. This is a result of analysis of C--. -- It is always safe to use an empty 'NonCaffySet'. TODO Refer to Note. newtype NonCaffySet = NonCaffySet { ncs_nameSet :: NameSet } deriving (Semigroup, Monoid) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/PkgQual.hs0000644000000000000000000000203707346545000020714 0ustar0000000000000000{-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE LambdaCase #-} module GHC.Types.PkgQual where import GHC.Prelude import GHC.Types.SourceText import GHC.Unit.Types import GHC.Utils.Outputable import Data.Data -- | Package-qualifier as it was parsed data RawPkgQual = NoRawPkgQual -- ^ No package qualifier | RawPkgQual StringLiteral -- ^ Raw package qualifier string. deriving (Data) -- | Package-qualifier after renaming -- -- Renaming detects if "this" or the unit-id of the home-unit was used as a -- package qualifier. data PkgQual = NoPkgQual -- ^ No package qualifier | ThisPkg !UnitId -- ^ Import from home-unit | OtherPkg !UnitId -- ^ Import from another unit deriving (Data, Ord, Eq) instance Outputable RawPkgQual where ppr = \case NoRawPkgQual -> empty RawPkgQual (StringLiteral st p _) -> pprWithSourceText st (doubleQuotes (ftext p)) instance Outputable PkgQual where ppr = \case NoPkgQual -> empty ThisPkg u -> doubleQuotes (ppr u) OtherPkg u -> doubleQuotes (ppr u) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/ProfAuto.hs0000644000000000000000000000071407346545000021107 0ustar0000000000000000module GHC.Types.ProfAuto ( ProfAuto (..) ) where import GHC.Prelude -- | What kind of {-# SCC #-} to add automatically data ProfAuto = NoProfAuto -- ^ no SCC annotations added | ProfAutoAll -- ^ top-level and nested functions are annotated | ProfAutoTop -- ^ top-level functions annotated only | ProfAutoExports -- ^ exported functions annotated only | ProfAutoCalls -- ^ annotate call-sites deriving (Eq,Enum) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/RepType.hs0000644000000000000000000006563007346545000020750 0ustar0000000000000000 {-# LANGUAGE FlexibleContexts #-} module GHC.Types.RepType ( -- * Code generator views onto Types UnaryType, NvUnaryType, isNvUnaryRep, unwrapType, -- * Predicates on types isZeroBitTy, -- * Type representation for the code generator typePrimRep, typePrimRep1, typePrimRepU, runtimeRepPrimRep, PrimRep(..), primRepToRuntimeRep, primRepToType, countFunRepArgs, countConRepArgs, dataConRuntimeRepStrictness, tyConPrimRep, runtimeRepPrimRep_maybe, kindPrimRep_maybe, typePrimRep_maybe, -- * Unboxed sum representation type ubxSumRepType, layoutUbxSum, repSlotTy, SlotTy (..), slotPrimRep, primRepSlot, -- * Is this type known to be data? mightBeFunTy ) where import GHC.Prelude import GHC.Types.Basic (Arity, RepArity) import GHC.Core.DataCon import GHC.Core.Coercion import GHC.Core.TyCon import GHC.Core.TyCon.RecWalk import GHC.Core.TyCo.Rep import GHC.Core.Type import {-# SOURCE #-} GHC.Builtin.Types ( anyTypeOfKind , vecRepDataConTyCon , liftedRepTy, unliftedRepTy , intRepDataConTy , int8RepDataConTy, int16RepDataConTy, int32RepDataConTy, int64RepDataConTy , wordRepDataConTy , word16RepDataConTy, word8RepDataConTy, word32RepDataConTy, word64RepDataConTy , addrRepDataConTy , floatRepDataConTy, doubleRepDataConTy , vec2DataConTy, vec4DataConTy, vec8DataConTy, vec16DataConTy, vec32DataConTy , vec64DataConTy , int8ElemRepDataConTy, int16ElemRepDataConTy, int32ElemRepDataConTy , int64ElemRepDataConTy, word8ElemRepDataConTy, word16ElemRepDataConTy , word32ElemRepDataConTy, word64ElemRepDataConTy, floatElemRepDataConTy , doubleElemRepDataConTy ) import GHC.Utils.Misc import GHC.Utils.Outputable import GHC.Utils.Panic import Data.List.NonEmpty (NonEmpty (..)) import Data.List (sort) import qualified Data.IntSet as IS {- ********************************************************************** * * Representation types * * ********************************************************************** -} type NvUnaryType = Type type UnaryType = Type -- Both are always a value type; i.e. its kind is TYPE rr -- for some rr; moreover the rr is never a variable. -- -- NvUnaryType : never an unboxed tuple or sum, or void -- -- UnaryType : never an unboxed tuple or sum; -- can be Void# or (# #) isNvUnaryRep :: [PrimRep] -> Bool isNvUnaryRep [_] = True isNvUnaryRep _ = False -- | Gets rid of the stuff that prevents us from understanding the -- runtime representation of a type. Including: -- 1. Casts -- 2. Newtypes -- 3. Foralls -- 4. Synonyms -- But not type/data families, because we don't have the envs to hand. unwrapType :: Type -> Type unwrapType ty | Just (_, unwrapped) <- topNormaliseTypeX stepper mappend inner_ty = unwrapped | otherwise = inner_ty where inner_ty = go ty go t | Just t' <- coreView t = go t' go (ForAllTy _ t) = go t go (CastTy t _) = go t go t = t -- cf. Coercion.unwrapNewTypeStepper stepper rec_nts tc tys | Just (ty', _) <- instNewTyCon_maybe tc tys = case checkRecTc rec_nts tc of Just rec_nts' -> NS_Step rec_nts' (go ty') () Nothing -> NS_Abort -- infinite newtypes | otherwise = NS_Done -- | Count the arity of a function post-unarisation, including zero-width arguments. -- -- The post-unarisation arity may be larger than the arity of the original -- function type. See Note [Unarisation]. countFunRepArgs :: Arity -> Type -> RepArity countFunRepArgs 0 _ = 0 countFunRepArgs n ty | FunTy _ _ arg res <- unwrapType ty = (length (typePrimRep arg) `max` 1) + countFunRepArgs (n - 1) res -- If typePrimRep returns [] that means a void arg, -- and we count 1 for that | otherwise = pprPanic "countFunRepArgs: arity greater than type can handle" (ppr (n, ty, typePrimRep ty)) countConRepArgs :: DataCon -> RepArity countConRepArgs dc = go (dataConRepArity dc) (dataConRepType dc) where go :: Arity -> Type -> RepArity go 0 _ = 0 go n ty | FunTy _ _ arg res <- unwrapType ty = length (typePrimRep arg) + go (n - 1) res | otherwise = pprPanic "countConRepArgs: arity greater than type can handle" (ppr (n, ty, typePrimRep ty)) dataConRuntimeRepStrictness :: HasDebugCallStack => DataCon -> [StrictnessMark] -- ^ Give the demands on the arguments of a -- Core constructor application (Con dc args) at runtime. -- Assumes the constructor is not levity polymorphic. For example -- unboxed tuples won't work. dataConRuntimeRepStrictness dc = -- pprTrace "dataConRuntimeRepStrictness" (ppr dc $$ ppr (dataConRepArgTys dc)) $ let repMarks = dataConRepStrictness dc repTys = map irrelevantMult $ dataConRepArgTys dc in -- todo: assert dc != unboxedTuple/unboxedSum go repMarks repTys [] where go (mark:marks) (ty:types) out_marks = case reps of -- Zero-width argument, mark is irrelevant at runtime. [] -> -- pprTrace "VoidTy" (ppr ty) $ go marks types out_marks -- Single rep argument, e.g. Int -- Keep mark as-is [_] -> go marks types (mark:out_marks) -- Multi-rep argument, e.g. (# Int, Bool #) or (# Int | Bool #) -- Make up one non-strict mark per runtime argument. _ -> go marks types ((replicate (length reps) NotMarkedStrict)++out_marks) where reps = typePrimRep ty go [] [] out_marks = reverse out_marks go _m _t _o = pprPanic "dataConRuntimeRepStrictness2" (ppr dc $$ ppr _m $$ ppr _t $$ ppr _o) -- | True if the type has zero width. isZeroBitTy :: HasDebugCallStack => Type -> Bool isZeroBitTy = null . typePrimRep {- ********************************************************************** * * Unboxed sums See Note [Translating unboxed sums to unboxed tuples] in GHC.Stg.Unarise * * ********************************************************************** -} type SortedSlotTys = [SlotTy] -- | Given the arguments of a sum type constructor application, -- return the unboxed sum rep type. -- -- E.g. -- -- (# Int# | Maybe Int | (# Int#, Float# #) #) -- -- We call `ubxSumRepType [ [IntRep], [LiftedRep], [IntRep, FloatRep] ]`, -- which returns [WordSlot, PtrSlot, WordSlot, FloatSlot] -- -- INVARIANT: Result slots are sorted (via Ord SlotTy), except that at the head -- of the list we have the slot for the tag. ubxSumRepType :: [[PrimRep]] -> NonEmpty SlotTy ubxSumRepType constrs0 -- These first two cases never classify an actual unboxed sum, which always -- has at least two disjuncts. But it could happen if a user writes, e.g., -- forall (a :: TYPE (SumRep [IntRep])). ... -- which could never be instantiated. We still don't want to panic. | constrs0 `lengthLessThan` 2 = WordSlot :| [] | otherwise = let combine_alts :: [SortedSlotTys] -- slots of constructors -> SortedSlotTys -- final slots combine_alts constrs = foldl' merge [] constrs merge :: SortedSlotTys -> SortedSlotTys -> SortedSlotTys merge existing_slots [] = existing_slots merge [] needed_slots = needed_slots merge (es : ess) (s : ss) | Just s' <- s `fitsIn` es = -- found a slot, use it s' : merge ess ss | s < es = -- we need a new slot and this is the right place for it s : merge (es : ess) ss | otherwise = -- keep searching for a slot es : merge ess (s : ss) -- Nesting unboxed tuples and sums is OK, so we need to flatten first. rep :: [PrimRep] -> SortedSlotTys rep ty = sort (map primRepSlot ty) sumRep = WordSlot :| combine_alts (map rep constrs0) -- WordSlot: for the tag of the sum in sumRep layoutUbxSum :: HasDebugCallStack => SortedSlotTys -- Layout of sum. Does not include tag. -- We assume that they are in increasing order -> [SlotTy] -- Slot types of things we want to map to locations in the -- sum layout -> [Int] -- Where to map 'things' in the sum layout layoutUbxSum sum_slots0 arg_slots0 = go arg_slots0 IS.empty where go :: [SlotTy] -> IS.IntSet -> [Int] go [] _ = [] go (arg : args) used = let slot_idx = findSlot arg 0 sum_slots0 used in slot_idx : go args (IS.insert slot_idx used) findSlot :: SlotTy -> Int -> SortedSlotTys -> IS.IntSet -> Int findSlot arg slot_idx (slot : slots) useds | not (IS.member slot_idx useds) , Just slot == arg `fitsIn` slot = slot_idx | otherwise = findSlot arg (slot_idx + 1) slots useds findSlot _ _ [] _ = pprPanic "findSlot" (text "Can't find slot" $$ text "sum_slots:" <> ppr sum_slots0 $$ text "arg_slots:" <> ppr arg_slots0 ) -------------------------------------------------------------------------------- -- We have 3 kinds of slots: -- -- - Pointer slot: Only shared between actual pointers to Haskell heap (i.e. -- boxed objects). These come in two variants: Lifted and unlifted (see -- #19645). -- -- - Word slots: Shared between IntRep, WordRep, Int64Rep, Word64Rep, AddrRep. -- -- - Float slots: Shared between floating point types. -- -- - Void slots: Shared between void types. Not used in sums. -- -- TODO(michalt): We should probably introduce `SlotTy`s for 8-/16-/32-bit -- values, so that we can pack things more tightly. data SlotTy = PtrLiftedSlot | PtrUnliftedSlot | WordSlot | Word64Slot | FloatSlot | DoubleSlot | VecSlot Int PrimElemRep deriving (Eq, Ord) -- Constructor order is important! If slot A could fit into slot B -- then slot A must occur first. E.g. FloatSlot before DoubleSlot -- -- We are assuming that WordSlot is smaller than or equal to Word64Slot -- (would not be true on a 128-bit machine) instance Outputable SlotTy where ppr PtrLiftedSlot = text "PtrLiftedSlot" ppr PtrUnliftedSlot = text "PtrUnliftedSlot" ppr Word64Slot = text "Word64Slot" ppr WordSlot = text "WordSlot" ppr DoubleSlot = text "DoubleSlot" ppr FloatSlot = text "FloatSlot" ppr (VecSlot n e) = text "VecSlot" <+> ppr n <+> ppr e repSlotTy :: [PrimRep] -> Maybe SlotTy repSlotTy reps = case reps of [] -> Nothing [rep] -> Just (primRepSlot rep) _ -> pprPanic "repSlotTy" (ppr reps) primRepSlot :: PrimRep -> SlotTy primRepSlot (BoxedRep mlev) = case mlev of Nothing -> panic "primRepSlot: levity polymorphic BoxedRep" Just Lifted -> PtrLiftedSlot Just Unlifted -> PtrUnliftedSlot primRepSlot IntRep = WordSlot primRepSlot Int8Rep = WordSlot primRepSlot Int16Rep = WordSlot primRepSlot Int32Rep = WordSlot primRepSlot Int64Rep = Word64Slot primRepSlot WordRep = WordSlot primRepSlot Word8Rep = WordSlot primRepSlot Word16Rep = WordSlot primRepSlot Word32Rep = WordSlot primRepSlot Word64Rep = Word64Slot primRepSlot AddrRep = WordSlot primRepSlot FloatRep = FloatSlot primRepSlot DoubleRep = DoubleSlot primRepSlot (VecRep n e) = VecSlot n e slotPrimRep :: SlotTy -> PrimRep slotPrimRep PtrLiftedSlot = BoxedRep (Just Lifted) slotPrimRep PtrUnliftedSlot = BoxedRep (Just Unlifted) slotPrimRep Word64Slot = Word64Rep slotPrimRep WordSlot = WordRep slotPrimRep DoubleSlot = DoubleRep slotPrimRep FloatSlot = FloatRep slotPrimRep (VecSlot n e) = VecRep n e -- | Returns the bigger type if one fits into the other. (commutative) -- -- Note that lifted and unlifted pointers are *not* in a fits-in relation for -- the reasons described in Note [Don't merge lifted and unlifted slots] in -- GHC.Stg.Unarise. fitsIn :: SlotTy -> SlotTy -> Maybe SlotTy fitsIn ty1 ty2 | ty1 == ty2 = Just ty1 | isWordSlot ty1 && isWordSlot ty2 = Just (max ty1 ty2) | otherwise = Nothing -- We used to share slots between Float/Double but currently we can't easily -- covert between float/double in a way that is both work free and safe. -- So we put them in different slots. -- See Note [Casting slot arguments] where isWordSlot Word64Slot = True isWordSlot WordSlot = True isWordSlot _ = False {- ********************************************************************** * * PrimRep * * ************************************************************************* Note [RuntimeRep and PrimRep] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This Note describes the relationship between GHC.Types.RuntimeRep (of levity/representation polymorphism fame) and GHC.Core.TyCon.PrimRep, as these types are closely related. A "primitive entity" is one that can be * stored in one register * manipulated with one machine instruction Examples include: * a 32-bit integer * a 32-bit float * a 64-bit float * a machine address (heap pointer), etc. * a quad-float (on a machine with SIMD register and instructions) * ...etc... The "representation or a primitive entity" specifies what kind of register is needed and how many bits are required. The data type GHC.Core.TyCon.PrimRep enumerates all the possibilities. data PrimRep = LiftedRep -- ^ Lifted pointer | UnliftedRep -- ^ Unlifted pointer | Int8Rep -- ^ Signed, 8-bit value | Int16Rep -- ^ Signed, 16-bit value ...etc... | VecRep Int PrimElemRep -- ^ SIMD fixed-width vector The Haskell source language is a bit more flexible: a single value may need multiple PrimReps. For example utup :: (# Int, Int #) -> Bool utup x = ... Here x :: (# Int, Int #), and that takes two registers, and two instructions to move around. Unboxed sums are similar. Every Haskell expression e has a type ty, whose kind is of form TYPE rep e :: ty :: TYPE rep where rep :: RuntimeRep. Here rep describes the runtime representation for e's value, but RuntimeRep has some extra cases: data RuntimeRep = VecRep VecCount VecElem -- ^ a SIMD vector type | TupleRep [RuntimeRep] -- ^ An unboxed tuple of the given reps | SumRep [RuntimeRep] -- ^ An unboxed sum of the given reps | BoxedRep Levity -- ^ boxed; represented by a pointer | IntRep -- ^ signed, word-sized value ...etc... data Levity = Lifted | Unlifted It's all in 1-1 correspondence with PrimRep except for TupleRep and SumRep, which describe unboxed products and sums respectively. RuntimeRep is defined in the library ghc-prim:GHC.Types. It is also "wired-in" to GHC: see GHC.Builtin.Types.runtimeRepTyCon. The unarisation pass, in GHC.Stg.Unarise, transforms the program, so that every variable has a type that has a PrimRep. For example, unarisation transforms our utup function above, to take two Int arguments instead of one (# Int, Int #) argument. Also, note that boxed types are represented slightly differently in RuntimeRep and PrimRep. PrimRep just has the nullary LiftedRep and UnliftedRep data constructors. RuntimeRep has a BoxedRep data constructor, which accepts a Levity. The subtle distinction is that since BoxedRep can accept a variable argument, RuntimeRep can talk about levity polymorphic types. PrimRep, by contrast, cannot. See also Note [Getting from RuntimeRep to PrimRep] and Note [VoidRep]. Note [VoidRep] ~~~~~~~~~~~~~~ PrimRep is used to denote one primitive representation. Because of unboxed tuples and sums, the representation of a value in general is a list of PrimReps. (See also Note [RuntimeRep and PrimRep].) For example: typePrimRep Int# = [IntRep] typePrimRep Int = [LiftedRep] typePrimRep (# Int#, Int# #) = [IntRep,IntRep] typePrimRep (# #) = [] typePrimRep (State# s) = [] After the unariser, all identifiers have at most one PrimRep (that is, the [PrimRep] for each identifier is empty or a singleton list). More precisely: typePrimRep1 will succeed (not crash) on every binder and argument type. (See Note [Post-unarisation invariants] in GHC.Stg.Unarise.) Thus, we have 1. typePrimRep :: Type -> [PrimRep] which returns the list 2. typePrimRepU :: Type -> PrimRep which asserts that the type has exactly one PrimRep and returns it 3. typePrimRep1 :: Type -> PrimOrVoidRep data PrimOrVoidRep = VoidRep | NVRep PrimRep which asserts that the type either has exactly one PrimRep or is void. Likewise, we have idPrimRepU and idPrimRep1, stgArgRepU and stgArgRep1, which have analogous preconditions. Note [Getting from RuntimeRep to PrimRep] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ General info on RuntimeRep and PrimRep is in Note [RuntimeRep and PrimRep]. How do we get from an Id to the list or PrimReps used to store it? We get the Id's type ty (using idType), then ty's kind ki (using typeKind), then pattern-match on ki to extract rep (in kindPrimRep), then extract the PrimRep from the RuntimeRep (in runtimeRepPrimRep). We now must convert the RuntimeRep to a list of PrimReps. Let's look at two examples: 1. x :: Int# 2. y :: (# Int, Word# #) With these types, we can extract these kinds: 1. Int# :: TYPE IntRep 2. (# Int, Word# #) :: TYPE (TupleRep [LiftedRep, WordRep]) In the end, we will get these PrimReps: 1. [IntRep] 2. [LiftedRep, WordRep] It would thus seem that we should have a function somewhere of type `RuntimeRep -> [PrimRep]`. This doesn't work though: when we look at the argument of TYPE, we get something of type Type (of course). RuntimeRep exists in the user's program, but not in GHC as such. Instead, we must decompose the Type of kind RuntimeRep into tycons and extract the PrimReps from the TyCons. This is what runtimeRepPrimRep does: it takes a Type and returns a [PrimRep] runtimeRepPrimRep works by using tyConRuntimeRepInfo. That function should be passed the TyCon produced by promoting one of the constructors of RuntimeRep into type-level data. The RuntimeRep promoted datacons are associated with a RuntimeRepInfo (stored directly in the PromotedDataCon constructor of TyCon, field promDcRepInfo). This pairing happens in GHC.Builtin.Types. A RuntimeRepInfo usually(*) contains a function from [Type] to [PrimRep]: the [Type] are the arguments to the promoted datacon. These arguments are necessary for the TupleRep and SumRep constructors, so that this process can recur, producing a flattened list of PrimReps. Calling this extracted function happens in runtimeRepPrimRep; the functions themselves are defined in tupleRepDataCon and sumRepDataCon, both in GHC.Builtin.Types. The (*) above is to support vector representations. RuntimeRep refers to VecCount and VecElem, whose promoted datacons have nuggets of information related to vectors; these form the other alternatives for RuntimeRepInfo. Returning to our examples, the Types we get (after stripping off TYPE) are 1. TyConApp (PromotedDataCon "IntRep") [] 2. TyConApp (PromotedDataCon "TupleRep") [TyConApp (PromotedDataCon ":") [ TyConApp (AlgTyCon "RuntimeRep") [] , TyConApp (PromotedDataCon "LiftedRep") [] , TyConApp (PromotedDataCon ":") [ TyConApp (AlgTyCon "RuntimeRep") [] , TyConApp (PromotedDataCon "WordRep") [] , TyConApp (PromotedDataCon "'[]") [TyConApp (AlgTyCon "RuntimeRep") []]]]] runtimeRepPrimRep calls tyConRuntimeRepInfo on (PromotedDataCon "IntRep"), resp. (PromotedDataCon "TupleRep"), extracting a function that will produce the PrimReps. In example 1, this function is passed an empty list (the empty list of args to IntRep) and returns the PrimRep IntRep. (See the definition of runtimeRepSimpleDataCons in GHC.Builtin.Types and its helper function mk_runtime_rep_dc.) Example 2 passes the promoted list as the one argument to the extracted function. The extracted function is defined as prim_rep_fun within tupleRepDataCon in GHC.Builtin.Types. It takes one argument, decomposes the promoted list (with extractPromotedList), and then recurses back to runtimeRepPrimRep to process the LiftedRep and WordRep, concatenating the results. -} -- | Discovers the primitive representation of a 'Type'. Returns -- a list of 'PrimRep': it's a list because of the possibility of -- no runtime representation (void) or multiple (unboxed tuple/sum) -- See also Note [Getting from RuntimeRep to PrimRep] typePrimRep :: HasDebugCallStack => Type -> [PrimRep] typePrimRep ty = kindPrimRep (text "typePrimRep" <+> parens (ppr ty <+> dcolon <+> ppr (typeKind ty))) (typeKind ty) -- | Discovers the primitive representation of a 'Type'. Returns -- a list of 'PrimRep': it's a list because of the possibility of -- no runtime representation (void) or multiple (unboxed tuple/sum) -- See also Note [Getting from RuntimeRep to PrimRep] -- Returns Nothing if rep can't be determined. Eg. levity polymorphic types. typePrimRep_maybe :: Type -> Maybe [PrimRep] typePrimRep_maybe ty = kindPrimRep_maybe (typeKind ty) -- | Like 'typePrimRep', but assumes that there is at most one 'PrimRep' output; -- an empty list of PrimReps becomes a VoidRep. -- This assumption holds after unarise, see Note [Post-unarisation invariants]. -- Before unarise it may or may not hold. -- See also Note [RuntimeRep and PrimRep] and Note [VoidRep] typePrimRep1 :: HasDebugCallStack => UnaryType -> PrimOrVoidRep typePrimRep1 ty = case typePrimRep ty of [] -> VoidRep [rep] -> NVRep rep _ -> pprPanic "typePrimRep1" (ppr ty $$ ppr (typePrimRep ty)) typePrimRepU :: HasDebugCallStack => NvUnaryType -> PrimRep typePrimRepU ty = case typePrimRep ty of [rep] -> rep _ -> pprPanic "typePrimRepU" (ppr ty $$ ppr (typePrimRep ty)) -- | Find the runtime representation of a 'TyCon'. Defined here to -- avoid module loops. Returns a list of the register shapes necessary. -- See also Note [Getting from RuntimeRep to PrimRep] tyConPrimRep :: HasDebugCallStack => TyCon -> [PrimRep] tyConPrimRep tc = kindPrimRep (text "kindRep tc" <+> ppr tc $$ ppr res_kind) res_kind where res_kind = tyConResKind tc -- | Take a kind (of shape @TYPE rr@) and produce the 'PrimRep's -- of values of types of this kind. -- See also Note [Getting from RuntimeRep to PrimRep] kindPrimRep :: HasDebugCallStack => SDoc -> Kind -> [PrimRep] kindPrimRep doc ki | Just runtime_rep <- kindRep_maybe ki = runtimeRepPrimRep doc runtime_rep kindPrimRep doc ki = pprPanic "kindPrimRep" (ppr ki $$ doc) -- NB: We could implement the partial methods by calling into the maybe -- variants here. But then both would need to pass around the doc argument. -- | Take a kind (of shape `TYPE rr` or `CONSTRAINT rr`) and produce the 'PrimRep's -- of values of types of this kind. -- See also Note [Getting from RuntimeRep to PrimRep] -- Returns Nothing if rep can't be determined. Eg. levity polymorphic types. kindPrimRep_maybe :: HasDebugCallStack => Kind -> Maybe [PrimRep] kindPrimRep_maybe ki | Just (_torc, rep) <- sORTKind_maybe ki = runtimeRepPrimRep_maybe rep | otherwise = pprPanic "kindPrimRep" (ppr ki) -- | Take a type of kind RuntimeRep and extract the list of 'PrimRep' that -- it encodes. See also Note [Getting from RuntimeRep to PrimRep]. -- The @[PrimRep]@ is the final runtime representation /after/ unarisation. runtimeRepPrimRep :: HasDebugCallStack => SDoc -> RuntimeRepType -> [PrimRep] runtimeRepPrimRep doc rr_ty | Just rr_ty' <- coreView rr_ty = runtimeRepPrimRep doc rr_ty' | TyConApp rr_dc args <- rr_ty , RuntimeRep fun <- tyConPromDataConInfo rr_dc = fun args | otherwise = pprPanic "runtimeRepPrimRep" (doc $$ ppr rr_ty) -- | Take a type of kind RuntimeRep and extract the list of 'PrimRep' that -- it encodes. See also Note [Getting from RuntimeRep to PrimRep]. -- The @[PrimRep]@ is the final runtime representation /after/ unarisation. -- -- Returns @Nothing@ if rep can't be determined. Eg. levity polymorphic types. runtimeRepPrimRep_maybe :: Type -> Maybe [PrimRep] runtimeRepPrimRep_maybe rr_ty | Just rr_ty' <- coreView rr_ty = runtimeRepPrimRep_maybe rr_ty' | TyConApp rr_dc args <- rr_ty , RuntimeRep fun <- tyConPromDataConInfo rr_dc = Just $! fun args | otherwise = Nothing -- | Convert a 'PrimRep' to a 'Type' of kind RuntimeRep primRepToRuntimeRep :: PrimRep -> RuntimeRepType primRepToRuntimeRep rep = case rep of BoxedRep mlev -> case mlev of Nothing -> panic "primRepToRuntimeRep: levity polymorphic BoxedRep" Just Lifted -> liftedRepTy Just Unlifted -> unliftedRepTy IntRep -> intRepDataConTy Int8Rep -> int8RepDataConTy Int16Rep -> int16RepDataConTy Int32Rep -> int32RepDataConTy Int64Rep -> int64RepDataConTy WordRep -> wordRepDataConTy Word8Rep -> word8RepDataConTy Word16Rep -> word16RepDataConTy Word32Rep -> word32RepDataConTy Word64Rep -> word64RepDataConTy AddrRep -> addrRepDataConTy FloatRep -> floatRepDataConTy DoubleRep -> doubleRepDataConTy VecRep n elem -> TyConApp vecRepDataConTyCon [n', elem'] where n' = case n of 2 -> vec2DataConTy 4 -> vec4DataConTy 8 -> vec8DataConTy 16 -> vec16DataConTy 32 -> vec32DataConTy 64 -> vec64DataConTy _ -> pprPanic "Disallowed VecCount" (ppr n) elem' = case elem of Int8ElemRep -> int8ElemRepDataConTy Int16ElemRep -> int16ElemRepDataConTy Int32ElemRep -> int32ElemRepDataConTy Int64ElemRep -> int64ElemRepDataConTy Word8ElemRep -> word8ElemRepDataConTy Word16ElemRep -> word16ElemRepDataConTy Word32ElemRep -> word32ElemRepDataConTy Word64ElemRep -> word64ElemRepDataConTy FloatElemRep -> floatElemRepDataConTy DoubleElemRep -> doubleElemRepDataConTy -- | Convert a PrimRep back to a Type. Used only in the unariser to give types -- to fresh Ids. Really, only the type's representation matters. -- See also Note [RuntimeRep and PrimRep] primRepToType :: PrimRep -> Type primRepToType = anyTypeOfKind . mkTYPEapp . primRepToRuntimeRep -------------- mightBeFunTy :: Type -> Bool -- Return False only if we are *sure* it's a data type -- Look through newtypes etc as much as possible. Used to -- decide if we need to enter a closure via a slow call. -- -- AK: It would be nice to figure out and document the difference -- between this and isFunTy at some point. mightBeFunTy ty -- Currently ghc has no unlifted functions. | definitelyUnliftedType ty = False | [BoxedRep _] <- typePrimRep ty , Just tc <- tyConAppTyCon_maybe (unwrapType ty) , isDataTyCon tc = False | otherwise = True ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/SafeHaskell.hs0000644000000000000000000000517207346545000021535 0ustar0000000000000000-- | This stuff here is related to supporting the Safe Haskell extension, -- primarily about storing under what trust type a module has been compiled. module GHC.Types.SafeHaskell ( IsSafeImport , SafeHaskellMode(..) , IfaceTrustInfo , getSafeMode , setSafeMode , noIfaceTrustInfo ) where import GHC.Prelude import GHC.Utils.Binary import GHC.Utils.Outputable import Data.Word -- | Is an import a safe import? type IsSafeImport = Bool -- | The various Safe Haskell modes data SafeHaskellMode = Sf_None -- ^ inferred unsafe | Sf_Unsafe -- ^ declared and checked | Sf_Trustworthy -- ^ declared and checked | Sf_Safe -- ^ declared and checked | Sf_SafeInferred -- ^ inferred as safe | Sf_Ignore -- ^ @-fno-safe-haskell@ state deriving (Eq) instance Show SafeHaskellMode where show Sf_None = "None" show Sf_Unsafe = "Unsafe" show Sf_Trustworthy = "Trustworthy" show Sf_Safe = "Safe" show Sf_SafeInferred = "Safe-Inferred" show Sf_Ignore = "Ignore" instance Outputable SafeHaskellMode where ppr = text . show -- | Safe Haskell information for 'ModIface' -- Simply a wrapper around SafeHaskellMode to separate iface and flags newtype IfaceTrustInfo = TrustInfo SafeHaskellMode getSafeMode :: IfaceTrustInfo -> SafeHaskellMode getSafeMode (TrustInfo x) = x setSafeMode :: SafeHaskellMode -> IfaceTrustInfo setSafeMode = TrustInfo noIfaceTrustInfo :: IfaceTrustInfo noIfaceTrustInfo = setSafeMode Sf_None trustInfoToNum :: IfaceTrustInfo -> Word8 trustInfoToNum it = case getSafeMode it of Sf_None -> 0 Sf_Unsafe -> 1 Sf_Trustworthy -> 2 Sf_Safe -> 3 Sf_SafeInferred -> 4 Sf_Ignore -> 0 numToTrustInfo :: Word8 -> IfaceTrustInfo numToTrustInfo 0 = setSafeMode Sf_None numToTrustInfo 1 = setSafeMode Sf_Unsafe numToTrustInfo 2 = setSafeMode Sf_Trustworthy numToTrustInfo 3 = setSafeMode Sf_Safe numToTrustInfo 4 = setSafeMode Sf_SafeInferred numToTrustInfo n = error $ "numToTrustInfo: bad input number! (" ++ show n ++ ")" instance Outputable IfaceTrustInfo where ppr (TrustInfo Sf_None) = text "none" ppr (TrustInfo Sf_Ignore) = text "none" ppr (TrustInfo Sf_Unsafe) = text "unsafe" ppr (TrustInfo Sf_Trustworthy) = text "trustworthy" ppr (TrustInfo Sf_Safe) = text "safe" ppr (TrustInfo Sf_SafeInferred) = text "safe-inferred" instance Binary IfaceTrustInfo where put_ bh iftrust = putByte bh $ trustInfoToNum iftrust get bh = getByte bh >>= (return . numToTrustInfo) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/SaneDouble.hs0000644000000000000000000000306107346545000021367 0ustar0000000000000000{-# LANGUAGE GeneralizedNewtypeDeriving #-} {-# LANGUAGE LambdaCase #-} -- | Double datatype with saner instances module GHC.Types.SaneDouble ( SaneDouble (..) ) where import GHC.Prelude import GHC.Utils.Binary import GHC.Float (castDoubleToWord64, castWord64ToDouble) -- | A newtype wrapper around 'Double' to ensure we never generate a 'Double' -- that becomes a 'NaN', see instances for details on sanity. newtype SaneDouble = SaneDouble { unSaneDouble :: Double } deriving (Fractional, Num) instance Eq SaneDouble where (SaneDouble x) == (SaneDouble y) = x == y || (isNaN x && isNaN y) instance Ord SaneDouble where compare (SaneDouble x) (SaneDouble y) = compare (fromNaN x) (fromNaN y) where fromNaN z | isNaN z = Nothing | otherwise = Just z instance Show SaneDouble where show (SaneDouble x) = show x -- we need to preserve NaN and infinities, unfortunately the Binary instance for -- Double does not do this instance Binary SaneDouble where put_ bh (SaneDouble d) | isNaN d = putByte bh 1 | isInfinite d && d > 0 = putByte bh 2 | isInfinite d && d < 0 = putByte bh 3 | isNegativeZero d = putByte bh 4 | otherwise = putByte bh 5 >> put_ bh (castDoubleToWord64 d) get bh = getByte bh >>= \case 1 -> pure $ SaneDouble (0 / 0) 2 -> pure $ SaneDouble (1 / 0) 3 -> pure $ SaneDouble ((-1) / 0) 4 -> pure $ SaneDouble (-0) 5 -> SaneDouble . castWord64ToDouble <$> get bh n -> error ("Binary get bh SaneDouble: invalid tag: " ++ show n) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/SourceError.hs0000644000000000000000000000506307346545000021624 0ustar0000000000000000-- | Source errors module GHC.Types.SourceError ( SourceError (..) , mkSrcErr , srcErrorMessages , throwErrors , throwOneError , handleSourceError ) where import GHC.Prelude import GHC.Types.Error import GHC.Utils.Monad import GHC.Utils.Panic import GHC.Utils.Exception import GHC.Utils.Error (pprMsgEnvelopeBagWithLocDefault) import GHC.Utils.Outputable import GHC.Driver.Errors.Ppr () -- instance Diagnostic GhcMessage import GHC.Driver.Errors.Types import Control.Monad.Catch as MC (MonadCatch, catch) mkSrcErr :: Messages GhcMessage -> SourceError mkSrcErr = SourceError srcErrorMessages :: SourceError -> Messages GhcMessage srcErrorMessages (SourceError msgs) = msgs throwErrors :: MonadIO io => Messages GhcMessage -> io a throwErrors = liftIO . throwIO . mkSrcErr throwOneError :: MonadIO io => MsgEnvelope GhcMessage -> io a throwOneError = throwErrors . singleMessage -- | A source error is an error that is caused by one or more errors in the -- source code. A 'SourceError' is thrown by many functions in the -- compilation pipeline. Inside GHC these errors are merely printed via -- 'log_action', but API clients may treat them differently, for example, -- insert them into a list box. If you want the default behaviour, use the -- idiom: -- -- > handleSourceError printExceptionAndWarnings $ do -- > ... api calls that may fail ... -- -- The 'SourceError's error messages can be accessed via 'srcErrorMessages'. -- This list may be empty if the compiler failed due to @-Werror@ -- ('Opt_WarnIsError'). -- -- See 'printExceptionAndWarnings' for more information on what to take care -- of when writing a custom error handler. newtype SourceError = SourceError (Messages GhcMessage) instance Show SourceError where -- We implement 'Show' because it's required by the 'Exception' instance, but diagnostics -- shouldn't be shown via the 'Show' typeclass, but rather rendered using the ppr functions. -- This also explains why there is no 'Show' instance for a 'MsgEnvelope'. show (SourceError msgs) = renderWithContext defaultSDocContext . vcat . pprMsgEnvelopeBagWithLocDefault . getMessages $ msgs instance Exception SourceError -- | Perform the given action and call the exception handler if the action -- throws a 'SourceError'. See 'SourceError' for more information. handleSourceError :: (MonadCatch m) => (SourceError -> m a) -- ^ exception handler -> m a -- ^ action to perform -> m a handleSourceError handler act = MC.catch act (\(e :: SourceError) -> handler e) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/SourceFile.hs0000644000000000000000000000641107346545000021410 0ustar0000000000000000{-# LANGUAGE PatternSynonyms #-} module GHC.Types.SourceFile ( HscSource(HsBootFile, HsigFile, ..) , HsBootOrSig(..) , hscSourceToIsBoot , isHsBootOrSig , isHsBootFile, isHsigFile , hscSourceString ) where import GHC.Prelude import GHC.Utils.Binary import GHC.Unit.Types {- Note [HscSource types] ~~~~~~~~~~~~~~~~~~~~~~~~~ There are three types of source file for Haskell code: * HsSrcFile is an ordinary hs file which contains code, * HsBootFile is an hs-boot file, which is used to break recursive module imports (there will always be an HsSrcFile associated with it), and * HsigFile is an hsig file, which contains only type signatures and is used to specify signatures for modules. Syntactically, hs-boot files and hsig files are quite similar: they only include type signatures and must be associated with an actual HsSrcFile. isHsBootOrSig allows us to abstract over code which is indifferent to which. However, there are some important differences, mostly owing to the fact that hsigs are proper modules (you `import Sig` directly) whereas HsBootFiles are temporary placeholders (you `import {-# SOURCE #-} Mod). When we finish compiling the true implementation of an hs-boot, we replace the HomeModInfo with the real HsSrcFile. An HsigFile, on the other hand, is never replaced (in particular, we *cannot* use the HomeModInfo of the original HsSrcFile backing the signature, since it will export too many symbols.) Additionally, while HsSrcFile is the only Haskell file which has *code*, we do generate .o files for HsigFile, because this is how the recompilation checker figures out if a file needs to be recompiled. These are fake object files which should NOT be linked against. -} data HsBootOrSig = HsBoot -- ^ .hs-boot file | Hsig -- ^ .hsig file deriving (Eq, Ord, Show) data HscSource -- | .hs file = HsSrcFile -- | .hs-boot or .hsig file | HsBootOrSig !HsBootOrSig deriving (Eq, Ord, Show) {-# COMPLETE HsSrcFile, HsBootFile, HsigFile #-} pattern HsBootFile, HsigFile :: HscSource pattern HsBootFile = HsBootOrSig HsBoot pattern HsigFile = HsBootOrSig Hsig -- | Tests if an 'HscSource' is a boot file, primarily for constructing elements -- of 'BuildModule'. We conflate signatures and modules because they are bound -- in the same namespace; only boot interfaces can be disambiguated with -- `import {-# SOURCE #-}`. hscSourceToIsBoot :: HscSource -> IsBootInterface hscSourceToIsBoot HsBootFile = IsBoot hscSourceToIsBoot _ = NotBoot instance Binary HscSource where put_ bh HsSrcFile = putByte bh 0 put_ bh HsBootFile = putByte bh 1 put_ bh HsigFile = putByte bh 2 get bh = do h <- getByte bh case h of 0 -> return HsSrcFile 1 -> return HsBootFile _ -> return HsigFile hscSourceString :: HscSource -> String hscSourceString HsSrcFile = "" hscSourceString HsBootFile = "[boot]" hscSourceString HsigFile = "[sig]" -- See Note [HscSource types] isHsBootOrSig :: HscSource -> Bool isHsBootOrSig (HsBootOrSig _) = True isHsBootOrSig HsSrcFile = False isHsBootFile :: HscSource -> Bool isHsBootFile HsBootFile = True isHsBootFile _ = False isHsigFile :: HscSource -> Bool isHsigFile HsigFile = True isHsigFile _ = False ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/SourceText.hs0000644000000000000000000002675107346545000021466 0ustar0000000000000000{-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE ViewPatterns #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE RecordWildCards #-} -- | Source text -- -- Keeping Source Text for source to source conversions -- module GHC.Types.SourceText ( SourceText (..) , pprWithSourceText -- * Literals , IntegralLit(..) , FractionalLit(..) , StringLiteral(..) , negateIntegralLit , negateFractionalLit , mkIntegralLit , mkTHFractionalLit, rationalFromFractionalLit , integralFractionalLit, mkSourceFractionalLit , FractionalExponentBase(..) -- Used by the pm checker. , fractionalLitFromRational , mkFractionalLit ) where import GHC.Prelude import GHC.Data.FastString import GHC.Utils.Outputable import GHC.Utils.Binary import GHC.Utils.Panic import Data.Function (on) import Data.Data import GHC.Real ( Ratio(..) ) import GHC.Types.SrcLoc import Control.DeepSeq {- Note [Pragma source text] ~~~~~~~~~~~~~~~~~~~~~~~~~ The lexer does a case-insensitive match for pragmas, as well as accepting both UK and US spelling variants. So {-# SPECIALISE #-} {-# SPECIALIZE #-} {-# Specialize #-} will all generate ITspec_prag token for the start of the pragma. In order to be able to do source to source conversions, the original source text for the token needs to be preserved, hence the `SourceText` field. So the lexer will then generate ITspec_prag "{ -# SPECIALISE" ITspec_prag "{ -# SPECIALIZE" ITspec_prag "{ -# Specialize" for the cases above. [without the space between '{' and '-', otherwise this comment won't parse] Note [Literal source text] ~~~~~~~~~~~~~~~~~~~~~~~~~~ The lexer/parser converts literals from their original source text versions to an appropriate internal representation. This is a problem for tools doing source to source conversions, so the original source text is stored in literals where this can occur. Motivating examples for HsLit HsChar '\n' == '\x20' HsCharPrim '\x41'# == 'A'# HsString "\x20\x41" == " A" HsStringPrim "\x20"# == " "# HsInt 001 == 1 HsIntPrim 002# == 2# HsWordPrim 003## == 3## HsInt64Prim 004#Int64 == 4#Int64 HsWord64Prim 005#Word64 == 5#Word64 HsInteger 006 == 6 For OverLitVal HsIntegral 003 == 0x003 HsIsString "\x41nd" == "And" -} -- Note [Literal source text],[Pragma source text] data SourceText = SourceText FastString | NoSourceText -- ^ For when code is generated, e.g. TH, -- deriving. The pretty printer will then make -- its own representation of the item. deriving (Data, Show, Eq ) instance Outputable SourceText where ppr (SourceText s) = text "SourceText" <+> ftext s ppr NoSourceText = text "NoSourceText" instance NFData SourceText where rnf = \case SourceText s -> rnf s NoSourceText -> () instance Binary SourceText where put_ bh NoSourceText = putByte bh 0 put_ bh (SourceText s) = do putByte bh 1 put_ bh s get bh = do h <- getByte bh case h of 0 -> return NoSourceText 1 -> do s <- get bh return (SourceText s) _ -> panic $ "Binary SourceText:" ++ show h -- | Special combinator for showing string literals. pprWithSourceText :: SourceText -> SDoc -> SDoc pprWithSourceText NoSourceText d = d pprWithSourceText (SourceText src) _ = ftext src ------------------------------------------------ -- Literals ------------------------------------------------ -- | Integral Literal -- -- Used (instead of Integer) to represent negative zegative zero which is -- required for NegativeLiterals extension to correctly parse `-0::Double` -- as negative zero. See also #13211. data IntegralLit = IL { il_text :: SourceText , il_neg :: Bool -- See Note [Negative zero] in GHC.Rename.Pat , il_value :: Integer } deriving (Data, Show) mkIntegralLit :: Integral a => a -> IntegralLit mkIntegralLit i = IL { il_text = SourceText (fsLit $ show i_integer) , il_neg = i < 0 , il_value = i_integer } where i_integer :: Integer i_integer = toInteger i negateIntegralLit :: IntegralLit -> IntegralLit negateIntegralLit (IL text neg value) = case text of SourceText (unconsFS -> Just ('-',src)) -> IL (SourceText src) False (negate value) SourceText src -> IL (SourceText ('-' `consFS` src)) True (negate value) NoSourceText -> IL NoSourceText (not neg) (negate value) -- | Fractional Literal -- -- Used (instead of Rational) to represent exactly the floating point literal that we -- encountered in the user's source program. This allows us to pretty-print exactly what -- the user wrote, which is important e.g. for floating point numbers that can't represented -- as Doubles (we used to via Double for pretty-printing). See also #2245. -- Note [FractionalLit representation] in GHC.HsToCore.Match.Literal -- The actual value then is: sign * fl_signi * (fl_exp_base^fl_exp) -- where sign = if fl_neg then (-1) else 1 -- -- For example FL { fl_neg = True, fl_signi = 5.3, fl_exp = 4, fl_exp_base = Base10 } -- denotes -5300 data FractionalLit = FL { fl_text :: SourceText -- ^ How the value was written in the source , fl_neg :: Bool -- See Note [Negative zero] , fl_signi :: Rational -- The significand component of the literal , fl_exp :: Integer -- The exponent component of the literal , fl_exp_base :: FractionalExponentBase -- See Note [fractional exponent bases] } deriving (Data, Show) -- The Show instance is required for the derived GHC.Parser.Lexer.Token instance when DEBUG is on -- See Note [FractionalLit representation] in GHC.HsToCore.Match.Literal data FractionalExponentBase = Base2 -- Used in hex fractional literals | Base10 deriving (Eq, Ord, Data, Show) mkFractionalLit :: SourceText -> Bool -> Rational -> Integer -> FractionalExponentBase -> FractionalLit mkFractionalLit = FL mkRationalWithExponentBase :: Rational -> Integer -> FractionalExponentBase -> Rational mkRationalWithExponentBase i e feb = i * (eb ^^ e) where eb = case feb of Base2 -> 2 ; Base10 -> 10 fractionalLitFromRational :: Rational -> FractionalLit fractionalLitFromRational r = FL { fl_text = NoSourceText , fl_neg = r < 0 , fl_signi = r , fl_exp = 0 , fl_exp_base = Base10 } rationalFromFractionalLit :: FractionalLit -> Rational rationalFromFractionalLit (FL _ _ i e expBase) = mkRationalWithExponentBase i e expBase mkTHFractionalLit :: Rational -> FractionalLit mkTHFractionalLit r = FL { fl_text = SourceText (fsLit $ show (realToFrac r::Double)) -- Converting to a Double here may technically lose -- precision (see #15502). We could alternatively -- convert to a Rational for the most accuracy, but -- it would cause Floats and Doubles to be displayed -- strangely, so we opt not to do this. (In contrast -- to mkIntegralLit, where we always convert to an -- Integer for the highest accuracy.) , fl_neg = r < 0 , fl_signi = r , fl_exp = 0 , fl_exp_base = Base10 } negateFractionalLit :: FractionalLit -> FractionalLit negateFractionalLit (FL text neg i e eb) = case text of SourceText (unconsFS -> Just ('-',src)) -> FL (SourceText src) False (negate i) e eb SourceText src -> FL (SourceText ('-' `consFS` src)) True (negate i) e eb NoSourceText -> FL NoSourceText (not neg) (negate i) e eb -- | The integer should already be negated if it's negative. integralFractionalLit :: Bool -> Integer -> FractionalLit integralFractionalLit neg i = FL { fl_text = SourceText (fsLit $ show i) , fl_neg = neg , fl_signi = i :% 1 , fl_exp = 0 , fl_exp_base = Base10 } -- | The arguments should already be negated if they are negative. mkSourceFractionalLit :: String -> Bool -> Integer -> Integer -> FractionalExponentBase -> FractionalLit mkSourceFractionalLit !str !b !r !i !ff = FL (SourceText $ fsLit str) b (r :% 1) i ff {- Note [fractional exponent bases] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For hexadecimal rationals of the form 0x0.3p10 the exponent is given on base 2 rather than base 10. These are the only options, hence the sum type. See also #15646. -} -- Comparison operations are needed when grouping literals -- for compiling pattern-matching (module GHC.HsToCore.Match.Literal) instance Eq IntegralLit where (==) = (==) `on` il_value instance Ord IntegralLit where compare = compare `on` il_value instance Outputable IntegralLit where ppr (IL (SourceText src) _ _) = ftext src ppr (IL NoSourceText _ value) = text (show value) -- | Compare fractional lits with small exponents for value equality but -- large values for syntactic equality. compareFractionalLit :: FractionalLit -> FractionalLit -> Ordering compareFractionalLit fl1 fl2 | fl_exp fl1 < 100 && fl_exp fl2 < 100 && fl_exp fl1 >= -100 && fl_exp fl2 >= -100 = rationalFromFractionalLit fl1 `compare` rationalFromFractionalLit fl2 | otherwise = (compare `on` (\x -> (fl_signi x, fl_exp x, fl_exp_base x))) fl1 fl2 -- | Be wary of using this instance to compare for equal *values* when exponents are -- large. The same value expressed in different syntactic form won't compare as equal when -- any of the exponents is >= 100. instance Eq FractionalLit where (==) fl1 fl2 = case compare fl1 fl2 of EQ -> True _ -> False -- | Be wary of using this instance to compare for equal *values* when exponents are -- large. The same value expressed in different syntactic form won't compare as equal when -- any of the exponents is >= 100. instance Ord FractionalLit where compare = compareFractionalLit instance Outputable FractionalLit where ppr (fl@(FL {})) = pprWithSourceText (fl_text fl) $ rational $ mkRationalWithExponentBase (fl_signi fl) (fl_exp fl) (fl_exp_base fl) -- | A String Literal in the source, including its original raw format for use by -- source to source manipulation tools. data StringLiteral = StringLiteral { sl_st :: SourceText, -- literal raw source. -- See Note [Literal source text] sl_fs :: FastString, -- literal string value sl_tc :: Maybe NoCommentsLocation -- Location of -- possible -- trailing comma -- AZ: if we could have a LocatedA -- StringLiteral we would not need sl_tc, but -- that would cause import loops. } deriving Data instance Eq StringLiteral where (StringLiteral _ a _) == (StringLiteral _ b _) = a == b instance Outputable StringLiteral where ppr sl = pprWithSourceText (sl_st sl) (doubleQuotes $ ftext $ sl_fs sl) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/SptEntry.hs0000644000000000000000000000067507346545000021146 0ustar0000000000000000module GHC.Types.SptEntry ( SptEntry(..) ) where import GHC.Types.Var ( Id ) import GHC.Fingerprint.Type ( Fingerprint ) import GHC.Utils.Outputable -- | An entry to be inserted into a module's static pointer table. -- See Note [Grand plan for static forms] in "GHC.Iface.Tidy.StaticPtrTable". data SptEntry = SptEntry Id Fingerprint instance Outputable SptEntry where ppr (SptEntry id fpr) = ppr id <> colon <+> ppr fpr ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/SrcLoc.hs0000644000000000000000000011050507346545000020535 0ustar0000000000000000{-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE DeriveTraversable #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE RecordWildCards #-} {-# LANGUAGE TypeFamilies #-} -- (c) The University of Glasgow, 1992-2006 -- | This module contains types that relate to the positions of things -- in source files, and allow tagging of those things with locations module GHC.Types.SrcLoc ( -- * SrcLoc RealSrcLoc, -- Abstract SrcLoc(..), -- ** Constructing SrcLoc mkSrcLoc, mkRealSrcLoc, mkGeneralSrcLoc, leftmostColumn, noSrcLoc, -- "I'm sorry, I haven't a clue" generatedSrcLoc, -- Code generated within the compiler interactiveSrcLoc, -- Code from an interactive session advanceSrcLoc, advanceBufPos, -- ** Unsafely deconstructing SrcLoc -- These are dubious exports, because they crash on some inputs srcLocFile, -- return the file name part srcLocLine, -- return the line part srcLocCol, -- return the column part -- * SrcSpan RealSrcSpan, -- Abstract SrcSpan(..), UnhelpfulSpanReason(..), -- ** Constructing SrcSpan mkGeneralSrcSpan, mkSrcSpan, mkRealSrcSpan, noSrcSpan, generatedSrcSpan, isGeneratedSrcSpan, wiredInSrcSpan, -- Something wired into the compiler interactiveSrcSpan, srcLocSpan, realSrcLocSpan, combineSrcSpans, srcSpanFirstCharacter, -- ** Deconstructing SrcSpan srcSpanStart, srcSpanEnd, realSrcSpanStart, realSrcSpanEnd, srcSpanFileName_maybe, pprUserRealSpan, pprUnhelpfulSpanReason, pprUserSpan, unhelpfulSpanFS, srcSpanToRealSrcSpan, -- ** Unsafely deconstructing SrcSpan -- These are dubious exports, because they crash on some inputs srcSpanFile, srcSpanStartLine, srcSpanEndLine, srcSpanStartCol, srcSpanEndCol, -- ** Predicates on SrcSpan isGoodSrcSpan, isOneLineSpan, isZeroWidthSpan, containsSpan, isNoSrcSpan, -- ** Predicates on RealSrcSpan isPointRealSpan, -- * StringBuffer locations BufPos(..), getBufPos, BufSpan(..), getBufSpan, removeBufSpan, combineBufSpans, -- * Located Located, RealLocated, GenLocated(..), -- ** Constructing Located noLoc, mkGeneralLocated, -- ** Deconstructing Located getLoc, unLoc, unRealSrcSpan, getRealSrcSpan, pprLocated, pprLocatedAlways, -- ** Combining and comparing Located values eqLocated, cmpLocated, cmpBufSpan, combineLocs, addCLoc, leftmost_smallest, leftmost_largest, rightmost_smallest, spans, isSubspanOf, isRealSubspanOf, sortLocated, sortRealLocated, lookupSrcLoc, lookupSrcSpan, -- * Parser locations PsLoc(..), PsSpan(..), PsLocated, advancePsLoc, mkPsSpan, psSpanStart, psSpanEnd, mkSrcSpanPs, combineRealSrcSpans, psLocatedToLocated, -- * Exact print locations EpaLocation'(..), NoCommentsLocation, NoComments(..), DeltaPos(..), deltaPos, getDeltaLine, ) where import GHC.Prelude import GHC.Utils.Misc import GHC.Utils.Json import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Data.FastString import qualified GHC.Data.Strict as Strict import Control.DeepSeq import Data.Data import Data.List (sortBy, intercalate) import Data.Function (on) import qualified Data.Map as Map import qualified Data.Semigroup as S {- ************************************************************************ * * \subsection[SrcLoc-SrcLocations]{Source-location information} * * ************************************************************************ We keep information about the {\em definition} point for each entity; this is the obvious stuff: -} -- | Real Source Location -- -- Represents a single point within a file data RealSrcLoc = SrcLoc LexicalFastString -- A precise location (file name) {-# UNPACK #-} !Int -- line number, begins at 1 {-# UNPACK #-} !Int -- column number, begins at 1 deriving (Eq, Ord) -- | 0-based offset identifying the raw location in the 'StringBuffer'. -- -- The lexer increments the 'BufPos' every time a character (UTF-8 code point) -- is read from the input buffer. As UTF-8 is a variable-length encoding and -- 'StringBuffer' needs a byte offset for indexing, a 'BufPos' cannot be used -- for indexing. -- -- The parser guarantees that 'BufPos' are monotonic. See #17632. This means -- that syntactic constructs that appear later in the 'StringBuffer' are guaranteed to -- have a higher 'BufPos'. Contrast that with 'RealSrcLoc', which does *not* make the -- analogous guarantee about higher line/column numbers. -- -- This is due to #line and {-# LINE ... #-} pragmas that can arbitrarily -- modify 'RealSrcLoc'. Notice how 'setSrcLoc' and 'resetAlrLastLoc' in -- "GHC.Parser.Lexer" update 'PsLoc', modifying 'RealSrcLoc' but preserving -- 'BufPos'. -- -- Monotonicity makes 'BufPos' useful to determine the order in which syntactic -- elements appear in the source. Consider this example (haddockA041 in the test suite): -- -- haddockA041.hs -- {-# LANGUAGE CPP #-} -- -- | Module header documentation -- module Comments_and_CPP_include where -- #include "IncludeMe.hs" -- -- IncludeMe.hs: -- -- | Comment on T -- data T = MkT -- ^ Comment on MkT -- -- After the C preprocessor runs, the 'StringBuffer' will contain a program that -- looks like this (unimportant lines at the beginning removed): -- -- # 1 "haddockA041.hs" -- {-# LANGUAGE CPP #-} -- -- | Module header documentation -- module Comments_and_CPP_include where -- # 1 "IncludeMe.hs" 1 -- -- | Comment on T -- data T = MkT -- ^ Comment on MkT -- # 7 "haddockA041.hs" 2 -- -- The line pragmas inserted by CPP make the error messages more informative. -- The downside is that we can't use RealSrcLoc to determine the ordering of -- syntactic elements. -- -- With RealSrcLoc, we have the following location information recorded in the AST: -- * The module name is located at haddockA041.hs:3:8-31 -- * The Haddock comment "Comment on T" is located at IncludeMe:1:1-17 -- * The data declaration is located at IncludeMe.hs:2:1-32 -- -- Is the Haddock comment located between the module name and the data -- declaration? This is impossible to tell because the locations are not -- comparable; they even refer to different files. -- -- On the other hand, with 'BufPos', we have the following location information: -- * The module name is located at 846-870 -- * The Haddock comment "Comment on T" is located at 898-915 -- * The data declaration is located at 916-928 -- -- Aside: if you're wondering why the numbers are so high, try running -- @ghc -E haddockA041.hs@ -- and see the extra fluff that CPP inserts at the start of the file. -- -- For error messages, 'BufPos' is not useful at all. On the other hand, this is -- exactly what we need to determine the order of syntactic elements: -- 870 < 898, therefore the Haddock comment appears *after* the module name. -- 915 < 916, therefore the Haddock comment appears *before* the data declaration. -- -- We use 'BufPos' in in GHC.Parser.PostProcess.Haddock to associate Haddock -- comments with parts of the AST using location information (#17544). newtype BufPos = BufPos { bufPos :: Int } deriving (Eq, Ord, Show, Data) -- | Source Location data SrcLoc = RealSrcLoc !RealSrcLoc !(Strict.Maybe BufPos) -- See Note [Why Maybe BufPos] | UnhelpfulLoc !FastString -- Just a general indication deriving (Eq, Show) {- ************************************************************************ * * \subsection[SrcLoc-access-fns]{Access functions} * * ************************************************************************ -} mkSrcLoc :: FastString -> Int -> Int -> SrcLoc mkSrcLoc x line col = RealSrcLoc (mkRealSrcLoc x line col) Strict.Nothing mkRealSrcLoc :: FastString -> Int -> Int -> RealSrcLoc mkRealSrcLoc x line col = SrcLoc (LexicalFastString x) line col -- | Indentation level is 1-indexed, so the leftmost column is 1. leftmostColumn :: Int leftmostColumn = 1 getBufPos :: SrcLoc -> Strict.Maybe BufPos getBufPos (RealSrcLoc _ mbpos) = mbpos getBufPos (UnhelpfulLoc _) = Strict.Nothing -- | Built-in "bad" 'SrcLoc' values for particular locations noSrcLoc, generatedSrcLoc, interactiveSrcLoc :: SrcLoc noSrcLoc = UnhelpfulLoc (fsLit "") generatedSrcLoc = UnhelpfulLoc (fsLit "") interactiveSrcLoc = UnhelpfulLoc (fsLit "") -- | Creates a "bad" 'SrcLoc' that has no detailed information about its location mkGeneralSrcLoc :: FastString -> SrcLoc mkGeneralSrcLoc = UnhelpfulLoc -- | Gives the filename of the 'RealSrcLoc' srcLocFile :: RealSrcLoc -> FastString srcLocFile (SrcLoc (LexicalFastString fname) _ _) = fname -- | Raises an error when used on a "bad" 'SrcLoc' srcLocLine :: RealSrcLoc -> Int srcLocLine (SrcLoc _ l _) = l -- | Raises an error when used on a "bad" 'SrcLoc' srcLocCol :: RealSrcLoc -> Int srcLocCol (SrcLoc _ _ c) = c -- | Move the 'SrcLoc' down by one line if the character is a newline, -- to the next 8-char tabstop if it is a tab, and across by one -- character in any other case advanceSrcLoc :: RealSrcLoc -> Char -> RealSrcLoc advanceSrcLoc (SrcLoc f l _) '\n' = SrcLoc f (l + 1) 1 advanceSrcLoc (SrcLoc f l c) '\t' = SrcLoc f l (advance_tabstop c) advanceSrcLoc (SrcLoc f l c) _ = SrcLoc f l (c + 1) advance_tabstop :: Int -> Int advance_tabstop c = ((((c - 1) `shiftR` 3) + 1) `shiftL` 3) + 1 advanceBufPos :: BufPos -> BufPos advanceBufPos (BufPos i) = BufPos (i+1) {- ************************************************************************ * * \subsection[SrcLoc-instances]{Instance declarations for various names} * * ************************************************************************ -} sortLocated :: [Located a] -> [Located a] sortLocated = sortBy (leftmost_smallest `on` getLoc) sortRealLocated :: [RealLocated a] -> [RealLocated a] sortRealLocated = sortBy (compare `on` getLoc) lookupSrcLoc :: SrcLoc -> Map.Map RealSrcLoc a -> Maybe a lookupSrcLoc (RealSrcLoc l _) = Map.lookup l lookupSrcLoc (UnhelpfulLoc _) = const Nothing lookupSrcSpan :: SrcSpan -> Map.Map RealSrcSpan a -> Maybe a lookupSrcSpan (RealSrcSpan l _) = Map.lookup l lookupSrcSpan (UnhelpfulSpan _) = const Nothing instance Outputable RealSrcLoc where ppr (SrcLoc (LexicalFastString src_path) src_line src_col) = hcat [ pprFastFilePath src_path <> colon , int src_line <> colon , int src_col ] -- I don't know why there is this style-based difference -- if userStyle sty || debugStyle sty then -- hcat [ pprFastFilePath src_path, char ':', -- int src_line, -- char ':', int src_col -- ] -- else -- hcat [text "{-# LINE ", int src_line, space, -- char '\"', pprFastFilePath src_path, text " #-}"] instance Outputable SrcLoc where ppr (RealSrcLoc l _) = ppr l ppr (UnhelpfulLoc s) = ftext s instance Data RealSrcSpan where -- don't traverse? toConstr _ = abstractConstr "RealSrcSpan" gunfold _ _ = error "gunfold" dataTypeOf _ = mkNoRepType "RealSrcSpan" instance Data SrcSpan where -- don't traverse? toConstr _ = abstractConstr "SrcSpan" gunfold _ _ = error "gunfold" dataTypeOf _ = mkNoRepType "SrcSpan" {- ************************************************************************ * * \subsection[SrcSpan]{Source Spans} * * ************************************************************************ -} {- | A 'RealSrcSpan' delimits a portion of a text file. It could be represented by a pair of (line,column) coordinates, but in fact we optimise slightly by using more compact representations for single-line and zero-length spans, both of which are quite common. The end position is defined to be the column /after/ the end of the span. That is, a span of (1,1)-(1,2) is one character long, and a span of (1,1)-(1,1) is zero characters long. -} -- | Real Source Span data RealSrcSpan = RealSrcSpan' { srcSpanFile :: !FastString, srcSpanSLine :: {-# UNPACK #-} !Int, srcSpanSCol :: {-# UNPACK #-} !Int, srcSpanELine :: {-# UNPACK #-} !Int, srcSpanECol :: {-# UNPACK #-} !Int } deriving Eq -- | StringBuffer Source Span data BufSpan = BufSpan { bufSpanStart, bufSpanEnd :: {-# UNPACK #-} !BufPos } deriving (Eq, Ord, Show, Data) instance Semigroup BufSpan where BufSpan start1 end1 <> BufSpan start2 end2 = BufSpan (min start1 start2) (max end1 end2) -- | Source Span -- -- A 'SrcSpan' identifies either a specific portion of a text file -- or a human-readable description of a location. data SrcSpan = RealSrcSpan !RealSrcSpan !(Strict.Maybe BufSpan) -- See Note [Why Maybe BufPos] | UnhelpfulSpan !UnhelpfulSpanReason deriving (Eq, Show) -- Show is used by GHC.Parser.Lexer, because we -- derive Show for Token data UnhelpfulSpanReason = UnhelpfulNoLocationInfo | UnhelpfulWiredIn | UnhelpfulInteractive | UnhelpfulGenerated | UnhelpfulOther !FastString deriving (Eq, Show) removeBufSpan :: SrcSpan -> SrcSpan removeBufSpan (RealSrcSpan s _) = RealSrcSpan s Strict.Nothing removeBufSpan s = s {- Note [Why Maybe BufPos] ~~~~~~~~~~~~~~~~~~~~~~~~~~ In SrcLoc we store (Maybe BufPos); in SrcSpan we store (Maybe BufSpan). Why the Maybe? Surely, the lexer can always fill in the buffer position, and it guarantees to do so. However, sometimes the SrcLoc/SrcSpan is constructed in a different context where the buffer location is not available, and then we use Nothing instead of a fake value like BufPos (-1). Perhaps the compiler could be re-engineered to pass around BufPos more carefully and never discard it, and this 'Maybe' could be removed. If you're interested in doing so, you may find this ripgrep query useful: rg "RealSrc(Loc|Span).*?Nothing" For example, it is not uncommon to whip up source locations for e.g. error messages, constructing a SrcSpan without a BufSpan. -} instance ToJson SrcSpan where json (UnhelpfulSpan {} ) = JSNull --JSObject [( "type", "unhelpful")] json (RealSrcSpan rss _) = json rss instance ToJson RealSrcSpan where json (RealSrcSpan'{..}) = JSObject [ ("file", JSString (unpackFS srcSpanFile)), ("start", start), ("end", end) ] where start = JSObject [ ("line", JSInt srcSpanSLine), ("column", JSInt srcSpanSCol) ] end = JSObject [ ("line", JSInt srcSpanELine), ("column", JSInt srcSpanECol) ] instance NFData SrcSpan where rnf x = x `seq` () getBufSpan :: SrcSpan -> Strict.Maybe BufSpan getBufSpan (RealSrcSpan _ mbspan) = mbspan getBufSpan (UnhelpfulSpan _) = Strict.Nothing -- | Built-in "bad" 'SrcSpan's for common sources of location uncertainty noSrcSpan, generatedSrcSpan, wiredInSrcSpan, interactiveSrcSpan :: SrcSpan noSrcSpan = UnhelpfulSpan UnhelpfulNoLocationInfo wiredInSrcSpan = UnhelpfulSpan UnhelpfulWiredIn interactiveSrcSpan = UnhelpfulSpan UnhelpfulInteractive generatedSrcSpan = UnhelpfulSpan UnhelpfulGenerated isGeneratedSrcSpan :: SrcSpan -> Bool isGeneratedSrcSpan (UnhelpfulSpan UnhelpfulGenerated) = True isGeneratedSrcSpan _ = False isNoSrcSpan :: SrcSpan -> Bool isNoSrcSpan (UnhelpfulSpan UnhelpfulNoLocationInfo) = True isNoSrcSpan _ = False -- | Create a "bad" 'SrcSpan' that has not location information mkGeneralSrcSpan :: FastString -> SrcSpan mkGeneralSrcSpan = UnhelpfulSpan . UnhelpfulOther -- | Create a 'SrcSpan' corresponding to a single point srcLocSpan :: SrcLoc -> SrcSpan srcLocSpan (UnhelpfulLoc str) = UnhelpfulSpan (UnhelpfulOther str) srcLocSpan (RealSrcLoc l mb) = RealSrcSpan (realSrcLocSpan l) (fmap (\b -> BufSpan b b) mb) realSrcLocSpan :: RealSrcLoc -> RealSrcSpan realSrcLocSpan (SrcLoc (LexicalFastString file) line col) = RealSrcSpan' file line col line col -- | Create a 'SrcSpan' between two points in a file mkRealSrcSpan :: RealSrcLoc -> RealSrcLoc -> RealSrcSpan mkRealSrcSpan loc1 loc2 = RealSrcSpan' file line1 col1 line2 col2 where line1 = srcLocLine loc1 line2 = srcLocLine loc2 col1 = srcLocCol loc1 col2 = srcLocCol loc2 file = srcLocFile loc1 -- | 'True' if the span is known to straddle only one line. isOneLineRealSpan :: RealSrcSpan -> Bool isOneLineRealSpan (RealSrcSpan' _ line1 _ line2 _) = line1 == line2 -- | 'True' if the span is a single point isPointRealSpan :: RealSrcSpan -> Bool isPointRealSpan (RealSrcSpan' _ line1 col1 line2 col2) = line1 == line2 && col1 == col2 -- | Create a 'SrcSpan' between two points in a file mkSrcSpan :: SrcLoc -> SrcLoc -> SrcSpan mkSrcSpan (UnhelpfulLoc str) _ = UnhelpfulSpan (UnhelpfulOther str) mkSrcSpan _ (UnhelpfulLoc str) = UnhelpfulSpan (UnhelpfulOther str) mkSrcSpan (RealSrcLoc loc1 mbpos1) (RealSrcLoc loc2 mbpos2) = RealSrcSpan (mkRealSrcSpan loc1 loc2) (liftA2 BufSpan mbpos1 mbpos2) -- | Combines two 'SrcSpan' into one that spans at least all the characters -- within both spans. Returns UnhelpfulSpan if the files differ. combineSrcSpans :: SrcSpan -> SrcSpan -> SrcSpan combineSrcSpans (UnhelpfulSpan _) r = r -- this seems more useful combineSrcSpans l (UnhelpfulSpan _) = l combineSrcSpans (RealSrcSpan span1 mbspan1) (RealSrcSpan span2 mbspan2) | srcSpanFile span1 == srcSpanFile span2 = RealSrcSpan (combineRealSrcSpans span1 span2) (liftA2 combineBufSpans mbspan1 mbspan2) | otherwise = UnhelpfulSpan $ UnhelpfulOther (fsLit "") -- | Combines two 'SrcSpan' into one that spans at least all the characters -- within both spans. Assumes the "file" part is the same in both inputs combineRealSrcSpans :: RealSrcSpan -> RealSrcSpan -> RealSrcSpan combineRealSrcSpans span1 span2 = RealSrcSpan' file line_start col_start line_end col_end where (line_start, col_start) = min (srcSpanStartLine span1, srcSpanStartCol span1) (srcSpanStartLine span2, srcSpanStartCol span2) (line_end, col_end) = max (srcSpanEndLine span1, srcSpanEndCol span1) (srcSpanEndLine span2, srcSpanEndCol span2) file = srcSpanFile span1 combineBufSpans :: BufSpan -> BufSpan -> BufSpan combineBufSpans span1 span2 = BufSpan start end where start = min (bufSpanStart span1) (bufSpanStart span2) end = max (bufSpanEnd span1) (bufSpanEnd span2) -- | Convert a SrcSpan into one that represents only its first character srcSpanFirstCharacter :: SrcSpan -> SrcSpan srcSpanFirstCharacter l@(UnhelpfulSpan {}) = l srcSpanFirstCharacter (RealSrcSpan span mbspan) = RealSrcSpan (mkRealSrcSpan loc1 loc2) (fmap mkBufSpan mbspan) where loc1@(SrcLoc f l c) = realSrcSpanStart span loc2 = SrcLoc f l (c+1) mkBufSpan bspan = let bpos1@(BufPos i) = bufSpanStart bspan bpos2 = BufPos (i+1) in BufSpan bpos1 bpos2 {- ************************************************************************ * * \subsection[SrcSpan-predicates]{Predicates} * * ************************************************************************ -} -- | Test if a 'SrcSpan' is "good", i.e. has precise location information isGoodSrcSpan :: SrcSpan -> Bool isGoodSrcSpan (RealSrcSpan _ _) = True isGoodSrcSpan (UnhelpfulSpan _) = False isOneLineSpan :: SrcSpan -> Bool -- ^ True if the span is known to straddle only one line. -- For "bad" 'SrcSpan', it returns False isOneLineSpan (RealSrcSpan s _) = srcSpanStartLine s == srcSpanEndLine s isOneLineSpan (UnhelpfulSpan _) = False isZeroWidthSpan :: SrcSpan -> Bool -- ^ True if the span has a width of zero, as returned for "virtual" -- semicolons in the lexer. -- For "bad" 'SrcSpan', it returns False isZeroWidthSpan (RealSrcSpan s _) = srcSpanStartLine s == srcSpanEndLine s && srcSpanStartCol s == srcSpanEndCol s isZeroWidthSpan (UnhelpfulSpan _) = False -- | Tests whether the first span "contains" the other span, meaning -- that it covers at least as much source code. True where spans are equal. containsSpan :: RealSrcSpan -> RealSrcSpan -> Bool containsSpan s1 s2 = (srcSpanStartLine s1, srcSpanStartCol s1) <= (srcSpanStartLine s2, srcSpanStartCol s2) && (srcSpanEndLine s1, srcSpanEndCol s1) >= (srcSpanEndLine s2, srcSpanEndCol s2) && (srcSpanFile s1 == srcSpanFile s2) -- We check file equality last because it is (presumably?) least -- likely to fail. {- %************************************************************************ %* * \subsection[SrcSpan-unsafe-access-fns]{Unsafe access functions} * * ************************************************************************ -} srcSpanStartLine :: RealSrcSpan -> Int srcSpanEndLine :: RealSrcSpan -> Int srcSpanStartCol :: RealSrcSpan -> Int srcSpanEndCol :: RealSrcSpan -> Int srcSpanStartLine RealSrcSpan'{ srcSpanSLine=l } = l srcSpanEndLine RealSrcSpan'{ srcSpanELine=l } = l srcSpanStartCol RealSrcSpan'{ srcSpanSCol=l } = l srcSpanEndCol RealSrcSpan'{ srcSpanECol=c } = c {- ************************************************************************ * * \subsection[SrcSpan-access-fns]{Access functions} * * ************************************************************************ -} -- | Returns the location at the start of the 'SrcSpan' or a "bad" 'SrcSpan' if that is unavailable srcSpanStart :: SrcSpan -> SrcLoc srcSpanStart (UnhelpfulSpan r) = UnhelpfulLoc (unhelpfulSpanFS r) srcSpanStart (RealSrcSpan s b) = RealSrcLoc (realSrcSpanStart s) (fmap bufSpanStart b) -- | Returns the location at the end of the 'SrcSpan' or a "bad" 'SrcSpan' if that is unavailable srcSpanEnd :: SrcSpan -> SrcLoc srcSpanEnd (UnhelpfulSpan r) = UnhelpfulLoc (unhelpfulSpanFS r) srcSpanEnd (RealSrcSpan s b) = RealSrcLoc (realSrcSpanEnd s) (fmap bufSpanEnd b) realSrcSpanStart :: RealSrcSpan -> RealSrcLoc realSrcSpanStart s = mkRealSrcLoc (srcSpanFile s) (srcSpanStartLine s) (srcSpanStartCol s) realSrcSpanEnd :: RealSrcSpan -> RealSrcLoc realSrcSpanEnd s = mkRealSrcLoc (srcSpanFile s) (srcSpanEndLine s) (srcSpanEndCol s) -- | Obtains the filename for a 'SrcSpan' if it is "good" srcSpanFileName_maybe :: SrcSpan -> Maybe FastString srcSpanFileName_maybe (RealSrcSpan s _) = Just (srcSpanFile s) srcSpanFileName_maybe (UnhelpfulSpan _) = Nothing srcSpanToRealSrcSpan :: SrcSpan -> Maybe RealSrcSpan srcSpanToRealSrcSpan (RealSrcSpan ss _) = Just ss srcSpanToRealSrcSpan _ = Nothing {- ************************************************************************ * * \subsection[SrcSpan-instances]{Instances} * * ************************************************************************ -} -- We want to order RealSrcSpans first by the start point, then by the -- end point. instance Ord RealSrcSpan where compare = on compare realSrcSpanStart S.<> on compare realSrcSpanEnd instance Show RealSrcLoc where show (SrcLoc filename row col) = "SrcLoc " ++ show filename ++ " " ++ show row ++ " " ++ show col -- Show is used by GHC.Parser.Lexer, because we derive Show for Token instance Show RealSrcSpan where show span@(RealSrcSpan' file sl sc el ec) | isPointRealSpan span = "SrcSpanPoint " ++ show file ++ " " ++ intercalate " " (map show [sl,sc]) | isOneLineRealSpan span = "SrcSpanOneLine " ++ show file ++ " " ++ intercalate " " (map show [sl,sc,ec]) | otherwise = "SrcSpanMultiLine " ++ show file ++ " " ++ intercalate " " (map show [sl,sc,el,ec]) instance Outputable RealSrcSpan where ppr span = pprUserRealSpan True span -- I don't know why there is this style-based difference -- = getPprStyle $ \ sty -> -- if userStyle sty || debugStyle sty then -- text (showUserRealSpan True span) -- else -- hcat [text "{-# LINE ", int (srcSpanStartLine span), space, -- char '\"', pprFastFilePath $ srcSpanFile span, text " #-}"] instance Outputable SrcSpan where ppr span = pprUserSpan True span instance Outputable UnhelpfulSpanReason where ppr = pprUnhelpfulSpanReason -- I don't know why there is this style-based difference -- = getPprStyle $ \ sty -> -- if userStyle sty || debugStyle sty then -- pprUserSpan True span -- else -- case span of -- UnhelpfulSpan _ -> panic "Outputable UnhelpfulSpan" -- RealSrcSpan s -> ppr s unhelpfulSpanFS :: UnhelpfulSpanReason -> FastString unhelpfulSpanFS r = case r of UnhelpfulOther s -> s UnhelpfulNoLocationInfo -> fsLit "" UnhelpfulWiredIn -> fsLit "" UnhelpfulInteractive -> fsLit "" UnhelpfulGenerated -> fsLit "" pprUnhelpfulSpanReason :: UnhelpfulSpanReason -> SDoc pprUnhelpfulSpanReason r = ftext (unhelpfulSpanFS r) pprUserSpan :: Bool -> SrcSpan -> SDoc pprUserSpan _ (UnhelpfulSpan r) = pprUnhelpfulSpanReason r pprUserSpan show_path (RealSrcSpan s _) = pprUserRealSpan show_path s pprUserRealSpan :: Bool -> RealSrcSpan -> SDoc pprUserRealSpan show_path span@(RealSrcSpan' src_path line col _ _) | isPointRealSpan span = hcat [ ppWhen show_path (pprFastFilePath src_path <> colon) , int line <> colon , int col ] pprUserRealSpan show_path span@(RealSrcSpan' src_path line scol _ ecol) | isOneLineRealSpan span = hcat [ ppWhen show_path (pprFastFilePath src_path <> colon) , int line <> colon , int scol , ppUnless (ecol - scol <= 1) (char '-' <> int (ecol - 1)) ] -- For single-character or point spans, we just -- output the starting column number pprUserRealSpan show_path (RealSrcSpan' src_path sline scol eline ecol) = hcat [ ppWhen show_path (pprFastFilePath src_path <> colon) , parens (int sline <> comma <> int scol) , char '-' , parens (int eline <> comma <> int ecol') ] where ecol' = if ecol == 0 then ecol else ecol - 1 {- ************************************************************************ * * \subsection[Located]{Attaching SrcSpans to things} * * ************************************************************************ -} -- | We attach SrcSpans to lots of things, so let's have a datatype for it. data GenLocated l e = L l e deriving (Eq, Ord, Show, Data, Functor, Foldable, Traversable) instance (NFData l, NFData e) => NFData (GenLocated l e) where rnf (L l e) = rnf l `seq` rnf e type Located = GenLocated SrcSpan type RealLocated = GenLocated RealSrcSpan unLoc :: GenLocated l e -> e unLoc (L _ e) = e getLoc :: GenLocated l e -> l getLoc (L l _) = l noLoc :: e -> Located e noLoc e = L noSrcSpan e mkGeneralLocated :: String -> e -> Located e mkGeneralLocated s e = L (mkGeneralSrcSpan (fsLit s)) e combineLocs :: Located a -> Located b -> SrcSpan combineLocs a b = combineSrcSpans (getLoc a) (getLoc b) -- | Combine locations from two 'Located' things and add them to a third thing addCLoc :: Located a -> Located b -> c -> Located c addCLoc a b c = L (combineSrcSpans (getLoc a) (getLoc b)) c -- not clear whether to add a general Eq instance, but this is useful sometimes: -- | Tests whether the two located things are equal eqLocated :: Eq a => GenLocated l a -> GenLocated l a -> Bool eqLocated a b = unLoc a == unLoc b -- not clear whether to add a general Ord instance, but this is useful sometimes: -- | Tests the ordering of the two located things cmpLocated :: Ord a => GenLocated l a -> GenLocated l a -> Ordering cmpLocated a b = unLoc a `compare` unLoc b -- | Compare the 'BufSpan' of two located things. -- -- Precondition: both operands have an associated 'BufSpan'. cmpBufSpan :: HasDebugCallStack => Located a -> Located a -> Ordering cmpBufSpan (L l1 _) (L l2 _) | Strict.Just a <- getBufSpan l1 , Strict.Just b <- getBufSpan l2 = compare a b | otherwise = panic "cmpBufSpan: no BufSpan" instance (Outputable e) => Outputable (Located e) where ppr (L l e) = -- GenLocated: -- Print spans without the file name etc whenPprDebug (braces (pprUserSpan False l)) $$ ppr e instance (Outputable e) => Outputable (GenLocated RealSrcSpan e) where ppr (L l e) = -- GenLocated: -- Print spans without the file name etc whenPprDebug (braces (pprUserSpan False (RealSrcSpan l Strict.Nothing))) $$ ppr e pprLocated :: (Outputable l, Outputable e) => GenLocated l e -> SDoc pprLocated (L l e) = -- Print spans without the file name etc whenPprDebug (braces (ppr l)) $$ ppr e -- | Always prints the location, even without -dppr-debug pprLocatedAlways :: (Outputable l, Outputable e) => GenLocated l e -> SDoc pprLocatedAlways (L l e) = braces (ppr l) $$ ppr e {- ************************************************************************ * * \subsection{Ordering SrcSpans for InteractiveUI} * * ************************************************************************ -} -- | Strategies for ordering 'SrcSpan's leftmost_smallest, leftmost_largest, rightmost_smallest :: SrcSpan -> SrcSpan -> Ordering rightmost_smallest = compareSrcSpanBy (flip compare) leftmost_smallest = compareSrcSpanBy compare leftmost_largest = compareSrcSpanBy $ on compare realSrcSpanStart S.<> flip (on compare realSrcSpanEnd) compareSrcSpanBy :: (RealSrcSpan -> RealSrcSpan -> Ordering) -> SrcSpan -> SrcSpan -> Ordering compareSrcSpanBy cmp (RealSrcSpan a _) (RealSrcSpan b _) = cmp a b compareSrcSpanBy _ (RealSrcSpan _ _) (UnhelpfulSpan _) = LT compareSrcSpanBy _ (UnhelpfulSpan _) (RealSrcSpan _ _) = GT compareSrcSpanBy _ (UnhelpfulSpan _) (UnhelpfulSpan _) = EQ -- | Determines whether a span encloses a given line and column index spans :: SrcSpan -> (Int, Int) -> Bool spans (UnhelpfulSpan _) _ = panic "spans UnhelpfulSpan" spans (RealSrcSpan span _) (l,c) = realSrcSpanStart span <= loc && loc <= realSrcSpanEnd span where loc = mkRealSrcLoc (srcSpanFile span) l c -- | Determines whether a span is enclosed by another one isSubspanOf :: SrcSpan -- ^ The span that may be enclosed by the other -> SrcSpan -- ^ The span it may be enclosed by -> Bool isSubspanOf (RealSrcSpan src _) (RealSrcSpan parent _) = isRealSubspanOf src parent isSubspanOf _ _ = False -- | Determines whether a span is enclosed by another one isRealSubspanOf :: RealSrcSpan -- ^ The span that may be enclosed by the other -> RealSrcSpan -- ^ The span it may be enclosed by -> Bool isRealSubspanOf src parent | srcSpanFile parent /= srcSpanFile src = False | otherwise = realSrcSpanStart parent <= realSrcSpanStart src && realSrcSpanEnd parent >= realSrcSpanEnd src getRealSrcSpan :: RealLocated a -> RealSrcSpan getRealSrcSpan (L l _) = l unRealSrcSpan :: RealLocated a -> a unRealSrcSpan (L _ e) = e -- | A location as produced by the parser. Consists of two components: -- -- * The location in the file, adjusted for #line and {-# LINE ... #-} pragmas (RealSrcLoc) -- * The location in the string buffer (BufPos) with monotonicity guarantees (see #17632) data PsLoc = PsLoc { psRealLoc :: !RealSrcLoc, psBufPos :: !BufPos } deriving (Eq, Ord, Show) data PsSpan = PsSpan { psRealSpan :: !RealSrcSpan, psBufSpan :: !BufSpan } deriving (Eq, Ord, Show, Data) type PsLocated = GenLocated PsSpan psLocatedToLocated :: PsLocated a -> Located a psLocatedToLocated (L sp a) = L (mkSrcSpanPs sp) a advancePsLoc :: PsLoc -> Char -> PsLoc advancePsLoc (PsLoc real_loc buf_loc) c = PsLoc (advanceSrcLoc real_loc c) (advanceBufPos buf_loc) mkPsSpan :: PsLoc -> PsLoc -> PsSpan mkPsSpan (PsLoc r1 b1) (PsLoc r2 b2) = PsSpan (mkRealSrcSpan r1 r2) (BufSpan b1 b2) psSpanStart :: PsSpan -> PsLoc psSpanStart (PsSpan r b) = PsLoc (realSrcSpanStart r) (bufSpanStart b) psSpanEnd :: PsSpan -> PsLoc psSpanEnd (PsSpan r b) = PsLoc (realSrcSpanEnd r) (bufSpanEnd b) mkSrcSpanPs :: PsSpan -> SrcSpan mkSrcSpanPs (PsSpan r b) = RealSrcSpan r (Strict.Just b) -- --------------------------------------------------------------------- -- The following section contains basic types related to exact printing. -- See https://gitlab.haskell.org/ghc/ghc/wikis/api-annotations for -- details. -- This is only s subset, to prevent import loops. The balance are in -- GHC.Parser.Annotation -- --------------------------------------------------------------------- -- | The anchor for an exact print annotation. The Parser inserts the -- @'EpaSpan'@ variant, giving the exact location of the original item -- in the parsed source. This can be replaced by the @'EpaDelta'@ -- version, to provide a position for the item relative to the end of -- the previous item in the source. This is useful when editing an -- AST prior to exact printing the changed one. -- The EpaDelta also contains the original @'SrcSpan'@ for use by -- tools wanting to manipulate the AST after converting it using -- ghc-exactprint' @'makeDeltaAst'@. data EpaLocation' a = EpaSpan !SrcSpan | EpaDelta !SrcSpan !DeltaPos !a deriving (Data,Eq,Show) type NoCommentsLocation = EpaLocation' NoComments data NoComments = NoComments deriving (Data,Eq,Ord,Show) -- | Spacing between output items when exact printing. It captures -- the spacing from the current print position on the page to the -- position required for the thing about to be printed. This is -- either on the same line in which case is is simply the number of -- spaces to emit, or it is some number of lines down, with a given -- column offset. The exact printing algorithm keeps track of the -- column offset pertaining to the current anchor position, so the -- `deltaColumn` is the additional spaces to add in this case. See -- https://gitlab.haskell.org/ghc/ghc/wikis/api-annotations for -- details. data DeltaPos = SameLine { deltaColumn :: !Int } | DifferentLine { deltaLine :: !Int, -- ^ deltaLine should always be > 0 deltaColumn :: !Int } deriving (Show,Eq,Ord,Data) -- | Smart constructor for a 'DeltaPos'. It preserves the invariant -- that for the 'DifferentLine' constructor 'deltaLine' is always > 0. deltaPos :: Int -> Int -> DeltaPos deltaPos l c = case l of 0 -> SameLine c _ -> DifferentLine l c getDeltaLine :: DeltaPos -> Int getDeltaLine (SameLine _) = 0 getDeltaLine (DifferentLine r _) = r instance Outputable NoComments where ppr NoComments = text "NoComments" instance (Outputable a) => Outputable (EpaLocation' a) where ppr (EpaSpan r) = text "EpaSpan" <+> ppr r ppr (EpaDelta s d cs) = text "EpaDelta" <+> ppr s <+> ppr d <+> ppr cs instance Outputable DeltaPos where ppr (SameLine c) = text "SameLine" <+> ppr c ppr (DifferentLine l c) = text "DifferentLine" <+> ppr l <+> ppr c ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/Target.hs0000644000000000000000000000441707346545000020602 0ustar0000000000000000module GHC.Types.Target ( Target(..) , TargetId(..) , InputFileBuffer , pprTarget , pprTargetId ) where import GHC.Prelude import GHC.Driver.Phases ( Phase ) import GHC.Unit import GHC.Data.StringBuffer ( StringBuffer ) import GHC.Utils.Outputable import Data.Time -- | A compilation target. -- -- A target may be supplied with the actual text of the -- module. If so, use this instead of the file contents (this -- is for use in an IDE where the file hasn't been saved by -- the user yet). -- -- These fields are strict because Targets are long lived. data Target = Target { targetId :: !TargetId, -- ^ module or filename targetAllowObjCode :: !Bool, -- ^ object code allowed? targetUnitId :: !UnitId, -- ^ id of the unit this target is part of targetContents :: !(Maybe (InputFileBuffer, UTCTime)) -- ^ Optional in-memory buffer containing the source code GHC should -- use for this target instead of reading it from disk. -- -- Since GHC version 8.10 modules which require preprocessors such as -- Literate Haskell or CPP to run are also supported. -- -- If a corresponding source file does not exist on disk this will -- result in a 'SourceError' exception if @targetId = TargetModule _@ -- is used. However together with @targetId = TargetFile _@ GHC will -- not complain about the file missing. } data TargetId = TargetModule !ModuleName -- ^ A module name: search for the file | TargetFile !FilePath !(Maybe Phase) -- ^ A filename: preprocess & parse it to find the module name. -- If specified, the Phase indicates how to compile this file -- (which phase to start from). Nothing indicates the starting phase -- should be determined from the suffix of the filename. deriving Eq type InputFileBuffer = StringBuffer pprTarget :: Target -> SDoc pprTarget Target { targetUnitId = uid, targetId = id, targetAllowObjCode = obj } = (if obj then empty else char '*') <> ppr uid <> colon <> pprTargetId id instance Outputable Target where ppr = pprTarget pprTargetId :: TargetId -> SDoc pprTargetId (TargetModule m) = ppr m pprTargetId (TargetFile f _) = text f instance Outputable TargetId where ppr = pprTargetId ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/Tickish.hs0000644000000000000000000003532107346545000020750 0ustar0000000000000000{-# LANGUAGE DataKinds #-} {-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE StandaloneDeriving #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} module GHC.Types.Tickish ( GenTickish(..), CoreTickish, StgTickish, CmmTickish, XTickishId, tickishCounts, TickishScoping(..), tickishScoped, tickishScopesLike, tickishFloatable, tickishCanSplit, mkNoCount, mkNoScope, tickishIsCode, isProfTick, TickishPlacement(..), tickishPlace, tickishContains ) where import GHC.Prelude import GHC.Data.FastString import GHC.Core.Type import GHC.Unit.Module import GHC.Types.CostCentre import GHC.Types.SrcLoc ( RealSrcSpan, containsSpan ) import GHC.Types.Var import GHC.Utils.Panic import Language.Haskell.Syntax.Extension ( NoExtField ) import Data.Data import GHC.Utils.Outputable (Outputable (ppr), text) {- ********************************************************************* * * Ticks * * ************************************************************************ -} -- | Allows attaching extra information to points in expressions {- | Used as a data type index for the GenTickish annotations. See Note [Tickish passes] -} data TickishPass = TickishPassCore | TickishPassStg | TickishPassCmm {- Note [Tickish passes] ~~~~~~~~~~~~~~~~~~~~~ Tickish annotations store different information depending on where they are used. Here's a summary of the differences between the passes. - CoreTickish: Haskell and Core The tickish annotations store the free variables of breakpoints. - StgTickish: Stg The GHCi bytecode generator (GHC.StgToByteCode) needs to know the type of each breakpoint in addition to its free variables. Since we cannot compute the type from an STG expression, the tickish annotations store the type of breakpoints in addition to the free variables. - CmmTickish: Cmm Breakpoints are unsupported and no free variables or type are stored. -} type family XBreakpoint (pass :: TickishPass) type instance XBreakpoint 'TickishPassCore = NoExtField -- | Keep track of the type of breakpoints in STG, for GHCi type instance XBreakpoint 'TickishPassStg = Type type instance XBreakpoint 'TickishPassCmm = NoExtField type family XTickishId (pass :: TickishPass) type instance XTickishId 'TickishPassCore = Id type instance XTickishId 'TickishPassStg = Id type instance XTickishId 'TickishPassCmm = NoExtField type CoreTickish = GenTickish 'TickishPassCore type StgTickish = GenTickish 'TickishPassStg -- | Tickish in Cmm context (annotations only) type CmmTickish = GenTickish 'TickishPassCmm -- If you edit this type, you may need to update the GHC formalism -- See Note [GHC Formalism] in GHC.Core.Lint data GenTickish pass = -- | An @{-# SCC #-}@ profiling annotation, either automatically -- added by the desugarer as a result of -auto-all, or added by -- the user. ProfNote { profNoteCC :: CostCentre, -- ^ the cost centre profNoteCount :: !Bool, -- ^ bump the entry count? profNoteScope :: !Bool -- ^ scopes over the enclosed expression -- (i.e. not just a tick) -- Invariant: the False/False case never happens } -- | A "tick" used by HPC to track the execution of each -- subexpression in the original source code. | HpcTick { tickModule :: Module, tickId :: !Int } -- | A breakpoint for the GHCi debugger. This behaves like an HPC -- tick, but has a list of free variables which will be available -- for inspection in GHCi when the program stops at the breakpoint. -- -- NB. we must take account of these Ids when (a) counting free variables, -- and (b) substituting (don't substitute for them) | Breakpoint { breakpointExt :: XBreakpoint pass , breakpointId :: !Int , breakpointFVs :: [XTickishId pass] -- ^ the order of this list is important: -- it matches the order of the lists in the -- appropriate entry in 'GHC.ByteCode.Types.ModBreaks'. -- -- Careful about substitution! See -- Note [substTickish] in "GHC.Core.Subst". , breakpointModule :: Module } -- | A source note. -- -- Source notes are pure annotations: Their presence should neither -- influence compilation nor execution. The semantics are given by -- causality: The presence of a source note means that a local -- change in the referenced source code span will possibly provoke -- the generated code to change. On the flip-side, the functionality -- of annotated code *must* be invariant against changes to all -- source code *except* the spans referenced in the source notes -- (see "Causality of optimized Haskell" paper for details). -- -- Therefore extending the scope of any given source note is always -- valid. Note that it is still undesirable though, as this reduces -- their usefulness for debugging and profiling. Therefore we will -- generally try only to make use of this property where it is -- necessary to enable optimizations. | SourceNote { sourceSpan :: RealSrcSpan -- ^ Source covered , sourceName :: LexicalFastString -- ^ Name for source location -- (uses same names as CCs) } deriving instance Eq (GenTickish 'TickishPassCore) deriving instance Ord (GenTickish 'TickishPassCore) deriving instance Data (GenTickish 'TickishPassCore) deriving instance Data (GenTickish 'TickishPassStg) deriving instance Eq (GenTickish 'TickishPassCmm) deriving instance Ord (GenTickish 'TickishPassCmm) deriving instance Data (GenTickish 'TickishPassCmm) -- | A "counting tick" (where tickishCounts is True) is one that -- counts evaluations in some way. We cannot discard a counting tick, -- and the compiler should preserve the number of counting ticks as -- far as possible. -- -- However, we still allow the simplifier to increase or decrease -- sharing, so in practice the actual number of ticks may vary, except -- that we never change the value from zero to non-zero or vice versa. tickishCounts :: GenTickish pass -> Bool tickishCounts n@ProfNote{} = profNoteCount n tickishCounts HpcTick{} = True tickishCounts Breakpoint{} = True tickishCounts _ = False -- | Specifies the scoping behaviour of ticks. This governs the -- behaviour of ticks that care about the covered code and the cost -- associated with it. Important for ticks relating to profiling. data TickishScoping = -- | No scoping: The tick does not care about what code it -- covers. Transformations can freely move code inside as well as -- outside without any additional annotation obligations NoScope -- | Soft scoping: We want all code that is covered to stay -- covered. Note that this scope type does not forbid -- transformations from happening, as long as all results of -- the transformations are still covered by this tick or a copy of -- it. For example -- -- let x = tick<...> (let y = foo in bar) in baz -- ===> -- let x = tick<...> bar; y = tick<...> foo in baz -- -- Is a valid transformation as far as "bar" and "foo" is -- concerned, because both still are scoped over by the tick. -- -- Note though that one might object to the "let" not being -- covered by the tick any more. However, we are generally lax -- with this - constant costs don't matter too much, and given -- that the "let" was effectively merged we can view it as having -- lost its identity anyway. -- -- Also note that this scoping behaviour allows floating a tick -- "upwards" in pretty much any situation. For example: -- -- case foo of x -> tick<...> bar -- ==> -- tick<...> case foo of x -> bar -- -- While this is always legal, we want to make a best effort to -- only make us of this where it exposes transformation -- opportunities. | SoftScope -- | Cost centre scoping: We don't want any costs to move to other -- cost-centre stacks. This means we not only want no code or cost -- to get moved out of their cost centres, but we also object to -- code getting associated with new cost-centre ticks - or -- changing the order in which they get applied. -- -- A rule of thumb is that we don't want any code to gain new -- annotations. However, there are notable exceptions, for -- example: -- -- let f = \y -> foo in tick<...> ... (f x) ... -- ==> -- tick<...> ... foo[x/y] ... -- -- In-lining lambdas like this is always legal, because inlining a -- function does not change the cost-centre stack when the -- function is called. | CostCentreScope deriving (Eq) -- | Returns the intended scoping rule for a Tickish tickishScoped :: GenTickish pass -> TickishScoping tickishScoped n@ProfNote{} | profNoteScope n = CostCentreScope | otherwise = NoScope tickishScoped HpcTick{} = NoScope tickishScoped Breakpoint{} = CostCentreScope -- Breakpoints are scoped: eventually we're going to do call -- stacks, but also this helps prevent the simplifier from moving -- breakpoints around and changing their result type (see #1531). tickishScoped SourceNote{} = SoftScope -- | Returns whether the tick scoping rule is at least as permissive -- as the given scoping rule. tickishScopesLike :: GenTickish pass -> TickishScoping -> Bool tickishScopesLike t scope = tickishScoped t `like` scope where NoScope `like` _ = True _ `like` NoScope = False SoftScope `like` _ = True _ `like` SoftScope = False CostCentreScope `like` _ = True -- | Returns @True@ for ticks that can be floated upwards easily even -- where it might change execution counts, such as: -- -- Just (tick<...> foo) -- ==> -- tick<...> (Just foo) -- -- This is a combination of @tickishSoftScope@ and -- @tickishCounts@. Note that in principle splittable ticks can become -- floatable using @mkNoTick@ -- even though there's currently no -- tickish for which that is the case. tickishFloatable :: GenTickish pass -> Bool tickishFloatable t = t `tickishScopesLike` SoftScope && not (tickishCounts t) -- | Returns @True@ for a tick that is both counting /and/ scoping and -- can be split into its (tick, scope) parts using 'mkNoScope' and -- 'mkNoTick' respectively. tickishCanSplit :: GenTickish pass -> Bool tickishCanSplit ProfNote{profNoteScope = True, profNoteCount = True} = True tickishCanSplit _ = False mkNoCount :: GenTickish pass -> GenTickish pass mkNoCount n | not (tickishCounts n) = n | not (tickishCanSplit n) = panic "mkNoCount: Cannot split!" mkNoCount n@ProfNote{} = n {profNoteCount = False} mkNoCount _ = panic "mkNoCount: Undefined split!" mkNoScope :: GenTickish pass -> GenTickish pass mkNoScope n | tickishScoped n == NoScope = n | not (tickishCanSplit n) = panic "mkNoScope: Cannot split!" mkNoScope n@ProfNote{} = n {profNoteScope = False} mkNoScope _ = panic "mkNoScope: Undefined split!" -- | Return @True@ if this source annotation compiles to some backend -- code. Without this flag, the tickish is seen as a simple annotation -- that does not have any associated evaluation code. -- -- What this means that we are allowed to disregard the tick if doing -- so means that we can skip generating any code in the first place. A -- typical example is top-level bindings: -- -- foo = tick<...> \y -> ... -- ==> -- foo = \y -> tick<...> ... -- -- Here there is just no operational difference between the first and -- the second version. Therefore code generation should simply -- translate the code as if it found the latter. tickishIsCode :: GenTickish pass -> Bool tickishIsCode SourceNote{} = False tickishIsCode _tickish = True -- all the rest for now isProfTick :: GenTickish pass -> Bool isProfTick ProfNote{} = True isProfTick _ = False -- | Governs the kind of expression that the tick gets placed on when -- annotating for example using @mkTick@. If we find that we want to -- put a tickish on an expression ruled out here, we try to float it -- inwards until we find a suitable expression. data TickishPlacement = -- | Place ticks exactly on run-time expressions. We can still -- move the tick through pure compile-time constructs such as -- other ticks, casts or type lambdas. This is the most -- restrictive placement rule for ticks, as all tickishs have in -- common that they want to track runtime processes. The only -- legal placement rule for counting ticks. -- NB: We generally try to move these as close to the relevant -- runtime expression as possible. This means they get pushed through -- tyoe arguments. E.g. we create `(tick f) @Bool` instead of `tick (f @Bool)`. PlaceRuntime -- | As @PlaceRuntime@, but we float the tick through all -- lambdas. This makes sense where there is little difference -- between annotating the lambda and annotating the lambda's code. | PlaceNonLam -- | In addition to floating through lambdas, cost-centre style -- tickishs can also be moved from constructors, non-function -- variables and literals. For example: -- -- let x = scc<...> C (scc<...> y) (scc<...> 3) in ... -- -- Neither the constructor application, the variable or the -- literal are likely to have any cost worth mentioning. And even -- if y names a thunk, the call would not care about the -- evaluation context. Therefore removing all annotations in the -- above example is safe. | PlaceCostCentre deriving (Eq,Show) instance Outputable TickishPlacement where ppr = text . show -- | Placement behaviour we want for the ticks tickishPlace :: GenTickish pass -> TickishPlacement tickishPlace n@ProfNote{} | profNoteCount n = PlaceRuntime | otherwise = PlaceCostCentre tickishPlace HpcTick{} = PlaceRuntime tickishPlace Breakpoint{} = PlaceRuntime tickishPlace SourceNote{} = PlaceNonLam -- | Returns whether one tick "contains" the other one, therefore -- making the second tick redundant. tickishContains :: Eq (GenTickish pass) => GenTickish pass -> GenTickish pass -> Bool tickishContains (SourceNote sp1 n1) (SourceNote sp2 n2) = containsSpan sp1 sp2 && n1 == n2 -- compare the String last tickishContains t1 t2 = t1 == t2 ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/TyThing.hs0000644000000000000000000003765307346545000020752 0ustar0000000000000000{-# LANGUAGE LambdaCase #-} -- | A global typecheckable-thing, essentially anything that has a name. module GHC.Types.TyThing ( TyThing (..) , MonadThings (..) , mkATyCon , mkAnId , pprShortTyThing , pprTyThingCategory , tyThingCategory , implicitTyThings , implicitConLikeThings , implicitClassThings , implicitTyConThings , implicitCoTyCon , isImplicitTyThing , tyThingParent_maybe , tyThingsTyCoVars , tyThingLocalGREs, tyThingGREInfo , tyThingTyCon , tyThingCoAxiom , tyThingDataCon , tyThingConLike , tyThingId ) where import GHC.Prelude import GHC.Types.GREInfo import GHC.Types.Name import GHC.Types.Name.Reader import GHC.Types.Var import GHC.Types.Var.Set import GHC.Types.Id import GHC.Types.Id.Info import GHC.Types.Unique.Set import GHC.Core.Class import GHC.Core.DataCon import GHC.Core.ConLike import GHC.Core.PatSyn import GHC.Core.TyCo.FVs import GHC.Core.TyCon import GHC.Core.Coercion.Axiom import GHC.Utils.Outputable import GHC.Utils.Misc import GHC.Utils.Panic import Control.Monad ( liftM ) import Control.Monad.Trans.Reader import Control.Monad.Trans.Class import Data.List.NonEmpty ( NonEmpty(..) ) import qualified Data.List.NonEmpty as NE import Data.List ( intersect ) {- Note [ATyCon for classes] ~~~~~~~~~~~~~~~~~~~~~~~~~ Both classes and type constructors are represented in the type environment as ATyCon. You can tell the difference, and get to the class, with isClassTyCon :: TyCon -> Bool tyConClass_maybe :: TyCon -> Maybe Class The Class and its associated TyCon have the same Name. -} -- | A global typecheckable-thing, essentially anything that has a name. -- Not to be confused with a 'TcTyThing', which is also a typecheckable -- thing but in the *local* context. See "GHC.Tc.Utils.Env" for how to retrieve -- a 'TyThing' given a 'Name'. data TyThing = AnId Id | AConLike ConLike | ATyCon TyCon -- TyCons and classes; see Note [ATyCon for classes] | ACoAxiom (CoAxiom Branched) instance Outputable TyThing where ppr = pprShortTyThing instance NamedThing TyThing where -- Can't put this with the type getName (AnId id) = getName id -- decl, because the DataCon instance getName (ATyCon tc) = getName tc -- isn't visible there getName (ACoAxiom cc) = getName cc getName (AConLike cl) = conLikeName cl mkATyCon :: TyCon -> TyThing mkATyCon = ATyCon mkAnId :: Id -> TyThing mkAnId = AnId pprShortTyThing :: TyThing -> SDoc -- c.f. GHC.Types.TyThing.Ppr.pprTyThing, which prints all the details pprShortTyThing thing = pprTyThingCategory thing <+> quotes (ppr (getName thing)) pprTyThingCategory :: TyThing -> SDoc pprTyThingCategory = text . capitalise . tyThingCategory tyThingCategory :: TyThing -> String tyThingCategory (ATyCon tc) | isClassTyCon tc = "class" | otherwise = "type constructor" tyThingCategory (ACoAxiom _) = "coercion axiom" tyThingCategory (AnId _) = "identifier" tyThingCategory (AConLike (RealDataCon _)) = "data constructor" tyThingCategory (AConLike (PatSynCon _)) = "pattern synonym" {- Note [Implicit TyThings] ~~~~~~~~~~~~~~~~~~~~~~~~ DEFINITION: An "implicit" TyThing is one that does not have its own IfaceDecl in an interface file. Instead, its binding in the type environment is created as part of typechecking the IfaceDecl for some other thing. Examples: * All DataCons are implicit, because they are generated from the IfaceDecl for the data/newtype. Ditto class methods. * Record selectors are *not* implicit, because they get their own free-standing IfaceDecl. * Associated data/type families are implicit because they are included in the IfaceDecl of the parent class. (NB: the IfaceClass decl happens to use IfaceDecl recursively for the associated types, but that's irrelevant here.) * Dictionary function Ids are not implicit. * Axioms for newtypes are implicit (same as above), but axioms for data/type family instances are *not* implicit (like DFunIds). -} -- | Determine the 'TyThing's brought into scope by another 'TyThing' -- /other/ than itself. For example, Id's don't have any implicit TyThings -- as they just bring themselves into scope, but classes bring their -- dictionary datatype, type constructor and some selector functions into -- scope, just for a start! -- N.B. the set of TyThings returned here *must* match the set of -- names returned by 'GHC.Iface.Load.ifaceDeclImplicitBndrs', in the sense that -- TyThing.getOccName should define a bijection between the two lists. -- This invariant is used in 'GHC.IfaceToCore.tc_iface_decl_fingerprint' (see -- Note [Tricky iface loop]) -- The order of the list does not matter. implicitTyThings :: TyThing -> [TyThing] implicitTyThings (AnId _) = [] implicitTyThings (ACoAxiom _cc) = [] implicitTyThings (ATyCon tc) = implicitTyConThings tc implicitTyThings (AConLike cl) = implicitConLikeThings cl implicitConLikeThings :: ConLike -> [TyThing] implicitConLikeThings (RealDataCon dc) = dataConImplicitTyThings dc implicitConLikeThings (PatSynCon {}) = [] -- Pattern synonyms have no implicit Ids; the wrapper and matcher -- are not "implicit"; they are simply new top-level bindings, -- and they have their own declaration in an interface file -- Unless a record pat syn when there are implicit selectors -- They are still not included here as `implicitConLikeThings` is -- used by `tcTyClsDecls` whilst pattern synonyms are typed checked -- by `tcTopValBinds`. implicitClassThings :: Class -> [TyThing] implicitClassThings cl = -- Does not include default methods, because those Ids may have -- their own pragmas, unfoldings etc, not derived from the Class object -- associated types -- No recursive call for the classATs, because they -- are only the family decls; they have no implicit things map ATyCon (classATs cl) ++ -- superclass and operation selectors map AnId (classAllSelIds cl) implicitTyConThings :: TyCon -> [TyThing] implicitTyConThings tc = class_stuff ++ -- fields (names of selectors) -- (possibly) implicit newtype axioms -- or type family axioms implicitCoTyCon tc ++ -- for each data constructor in order, -- the constructor and associated implicit 'Id's datacon_stuff -- NB. record selectors are *not* implicit, they have fully-fledged -- bindings that pass through the compilation pipeline as normal. where class_stuff = case tyConClass_maybe tc of Nothing -> [] Just cl -> implicitClassThings cl -- For each data constructor in order, -- the constructor, worker, and (possibly) wrapper -- -- If the data constructor is in a "type data" declaration, -- promote it to the type level now. -- See Note [Type data declarations] in GHC.Rename.Module. datacon_stuff :: [TyThing] datacon_stuff | isTypeDataTyCon tc = [ATyCon (promoteDataCon dc) | dc <- cons] | otherwise = [ty_thing | dc <- cons, ty_thing <- AConLike (RealDataCon dc) : dataConImplicitTyThings dc] cons :: [DataCon] cons = tyConDataCons tc -- For newtypes and closed type families (only) add the implicit coercion tycon implicitCoTyCon :: TyCon -> [TyThing] implicitCoTyCon tc | Just co <- newTyConCo_maybe tc = [ACoAxiom $ toBranchedAxiom co] | Just co <- isClosedSynFamilyTyConWithAxiom_maybe tc = [ACoAxiom co] | otherwise = [] -- | Returns @True@ if there should be no interface-file declaration -- for this thing on its own: either it is built-in, or it is part -- of some other declaration, or it is generated implicitly by some -- other declaration. isImplicitTyThing :: TyThing -> Bool isImplicitTyThing (AConLike cl) = case cl of RealDataCon {} -> True PatSynCon {} -> False isImplicitTyThing (AnId id) = isImplicitId id isImplicitTyThing (ATyCon tc) = isImplicitTyCon tc isImplicitTyThing (ACoAxiom ax) = isImplicitCoAxiom ax -- | tyThingParent_maybe x returns (Just p) -- when pprTyThingInContext should print a declaration for p -- (albeit with some "..." in it) when asked to show x -- It returns the *immediate* parent. So a datacon returns its tycon -- but the tycon could be the associated type of a class, so it in turn -- might have a parent. tyThingParent_maybe :: TyThing -> Maybe TyThing tyThingParent_maybe (AConLike cl) = case cl of RealDataCon dc -> Just (ATyCon (dataConTyCon dc)) PatSynCon{} -> Nothing tyThingParent_maybe (ATyCon tc) | -- Special case for `type data` data constructors. They appear as an -- ATyCon (not ADataCon) but we want to display them here as if they were -- a DataCon (i.e. with the parent declaration) (#22817). -- See Note [Type data declarations] in GHC.Rename.Module. Just dc <- isPromotedDataCon_maybe tc , let parent_tc = dataConTyCon dc , isTypeDataTyCon parent_tc = Just (ATyCon parent_tc) | Just tc <- tyConAssoc_maybe tc = Just (ATyCon tc) | otherwise = Nothing tyThingParent_maybe (AnId id) = case idDetails id of RecSelId { sel_tycon = RecSelData tc } -> Just (ATyCon tc) RecSelId { sel_tycon = RecSelPatSyn ps } -> Just (AConLike (PatSynCon ps)) ClassOpId cls _ -> Just (ATyCon (classTyCon cls)) _other -> Nothing tyThingParent_maybe _other = Nothing tyThingsTyCoVars :: [TyThing] -> TyCoVarSet tyThingsTyCoVars tts = unionVarSets $ map ttToVarSet tts where ttToVarSet (AnId id) = tyCoVarsOfType $ idType id ttToVarSet (AConLike cl) = case cl of RealDataCon dc -> tyCoVarsOfType $ dataConRepType dc PatSynCon{} -> emptyVarSet ttToVarSet (ATyCon tc) = case tyConClass_maybe tc of Just cls -> (mkVarSet . fst . classTvsFds) cls Nothing -> tyCoVarsOfType $ tyConKind tc ttToVarSet (ACoAxiom _) = emptyVarSet -- | The 'GlobalRdrElt's that a 'TyThing' should bring into scope. -- Used to build the 'GlobalRdrEnv' for the InteractiveContext. tyThingLocalGREs :: TyThing -> [GlobalRdrElt] tyThingLocalGREs ty_thing = case ty_thing of ATyCon t | Just c <- tyConClass_maybe t -> myself NoParent : ( map (mkLocalVanillaGRE (ParentIs $ className c) . getName) (classMethods c) ++ map tc_GRE (classATs c) ) | otherwise -> let dcs = tyConDataCons t par = ParentIs $ tyConName t mk_nm = DataConName . dataConName in myself NoParent : map (dc_GRE par) dcs ++ mkLocalFieldGREs par [ (mk_nm dc, con_info) | dc <- dcs , let con_info = conLikeConInfo (RealDataCon dc) ] AConLike con -> let (par, cons_flds) = case con of PatSynCon {} -> (NoParent, [(conLikeConLikeName con, conLikeConInfo con)]) -- NB: NoParent for local pattern synonyms, as per -- Note [Parents] in GHC.Types.Name.Reader. RealDataCon dc1 -> (ParentIs $ tyConName $ dataConTyCon dc1 , [ (DataConName $ dataConName $ dc, ConInfo conInfo (ConHasRecordFields (fld :| flds))) | dc <- tyConDataCons $ dataConTyCon dc1 -- Go through all the data constructors of the parent TyCon, -- to ensure that all the record fields have the correct set -- of parent data constructors. See #23546. , let con_info = conLikeConInfo (RealDataCon dc) , ConInfo conInfo (ConHasRecordFields flds0) <- [con_info] , let flds1 = NE.toList flds0 `intersect` dataConFieldLabels dc , fld:flds <- [flds1] ]) in myself par : mkLocalFieldGREs par cons_flds AnId id | RecSelId { sel_tycon = RecSelData tc } <- idDetails id -> [ myself (ParentIs $ tyConName tc) ] -- Fallback to NoParent for PatSyn record selectors, -- as per Note [Parents] in GHC.Types.Name.Reader. _ -> [ myself NoParent ] where tc_GRE :: TyCon -> GlobalRdrElt tc_GRE at = mkLocalTyConGRE (fmap tyConName $ tyConFlavour at) (tyConName at) dc_GRE :: Parent -> DataCon -> GlobalRdrElt dc_GRE par dc = let con_info = conLikeConInfo (RealDataCon dc) in mkLocalConLikeGRE par (DataConName $ dataConName dc, con_info) myself :: Parent -> GlobalRdrElt myself p = mkLocalGRE (tyThingGREInfo ty_thing) p (getName ty_thing) -- | Obtain information pertinent to the renamer about a particular 'TyThing'. -- -- This extracts out renamer information from typechecker information. tyThingGREInfo :: TyThing -> GREInfo tyThingGREInfo = \case AConLike con -> IAmConLike $ conLikeConInfo con AnId id -> case idDetails id of RecSelId { sel_tycon = parent, sel_fieldLabel = fl } -> let relevant_cons = case parent of RecSelPatSyn ps -> unitUniqSet $ PatSynName (patSynName ps) RecSelData tc -> let dcs = map RealDataCon $ tyConDataCons tc in case conLikesWithFields dcs [flLabel fl] of ([], _) -> pprPanic "tyThingGREInfo: no DataCons with this FieldLabel" $ vcat [ text "id:" <+> ppr id , text "fl:" <+> ppr fl , text "dcs:" <+> ppr dcs ] (cons, _) -> mkUniqSet $ map conLikeConLikeName cons in IAmRecField $ RecFieldInfo { recFieldLabel = fl , recFieldCons = relevant_cons } _ -> Vanilla ATyCon tc -> IAmTyCon (fmap tyConName $ tyConFlavour tc) _ -> Vanilla -- | Get the 'TyCon' from a 'TyThing' if it is a type constructor thing. Panics otherwise tyThingTyCon :: HasDebugCallStack => TyThing -> TyCon tyThingTyCon (ATyCon tc) = tc tyThingTyCon other = pprPanic "tyThingTyCon" (ppr other) -- | Get the 'CoAxiom' from a 'TyThing' if it is a coercion axiom thing. Panics otherwise tyThingCoAxiom :: HasDebugCallStack => TyThing -> CoAxiom Branched tyThingCoAxiom (ACoAxiom ax) = ax tyThingCoAxiom other = pprPanic "tyThingCoAxiom" (ppr other) -- | Get the 'DataCon' from a 'TyThing' if it is a data constructor thing. Panics otherwise tyThingDataCon :: HasDebugCallStack => TyThing -> DataCon tyThingDataCon (AConLike (RealDataCon dc)) = dc tyThingDataCon other = pprPanic "tyThingDataCon" (ppr other) -- | Get the 'ConLike' from a 'TyThing' if it is a data constructor thing. -- Panics otherwise tyThingConLike :: HasDebugCallStack => TyThing -> ConLike tyThingConLike (AConLike dc) = dc tyThingConLike other = pprPanic "tyThingConLike" (ppr other) -- | Get the 'Id' from a 'TyThing' if it is a id *or* data constructor thing. Panics otherwise tyThingId :: HasDebugCallStack => TyThing -> Id tyThingId (AnId id) = id tyThingId (AConLike (RealDataCon dc)) = dataConWrapId dc tyThingId other = pprPanic "tyThingId" (ppr other) -- | Class that abstracts out the common ability of the monads in GHC -- to lookup a 'TyThing' in the monadic environment by 'Name'. Provides -- a number of related convenience functions for accessing particular -- kinds of 'TyThing' class Monad m => MonadThings m where lookupThing :: Name -> m TyThing lookupId :: Name -> m Id lookupId = liftM tyThingId . lookupThing lookupDataCon :: Name -> m DataCon lookupDataCon = liftM tyThingDataCon . lookupThing lookupTyCon :: Name -> m TyCon lookupTyCon = liftM tyThingTyCon . lookupThing -- Instance used in GHC.HsToCore.Quote instance MonadThings m => MonadThings (ReaderT s m) where lookupThing = lift . lookupThing ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/TyThing.hs-boot0000644000000000000000000000025607346545000021700 0ustar0000000000000000module GHC.Types.TyThing where import {-# SOURCE #-} GHC.Core.TyCon import {-# SOURCE #-} GHC.Types.Var data TyThing mkATyCon :: TyCon -> TyThing mkAnId :: Id -> TyThing ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/TyThing/0000755000000000000000000000000007346545000020400 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/TyThing/Ppr.hs0000644000000000000000000001701007346545000021474 0ustar0000000000000000----------------------------------------------------------------------------- -- -- Pretty-printing TyThings -- -- (c) The GHC Team 2005 -- ----------------------------------------------------------------------------- module GHC.Types.TyThing.Ppr ( pprTyThing, pprTyThingInContext, pprTyThingLoc, pprTyThingInContextLoc, pprTyThingHdr, pprFamInst ) where import GHC.Prelude import GHC.Types.TyThing ( TyThing(..), tyThingParent_maybe ) import GHC.Types.Name import GHC.Core.Type ( ForAllTyFlag(..), mkTyVarBinders ) import GHC.Core.Coercion.Axiom ( coAxiomTyCon ) import GHC.Core.FamInstEnv( FamInst(..), FamFlavor(..) ) import GHC.Core.TyCo.Ppr ( pprUserForAll, pprTypeApp ) import GHC.Iface.Decl ( tyThingToIfaceDecl ) import GHC.Iface.Syntax ( ShowSub(..), ShowHowMuch(..), AltPpr(..) , showToHeader, pprIfaceDecl ) import GHC.Utils.Outputable import Data.Maybe ( isJust ) -- ----------------------------------------------------------------------------- -- Pretty-printing entities that we get from the GHC API {- Note [Pretty printing via Iface syntax] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Our general plan for pretty-printing - Types - TyCons - Classes - Pattern synonyms ...etc... is to convert them to Iface syntax, and pretty-print that. For example - pprType converts a Type to an IfaceType, and pretty prints that. - pprTyThing converts the TyThing to an IfaceDecl, and pretty prints that. So Iface syntax plays a dual role: - it's the internal version of an interface files - it's used for pretty-printing Why do this? * A significant reason is that we need to be able to pretty-print Iface syntax (to display Foo.hi), and it was a pain to duplicate masses of pretty-printing goop, esp for Type and IfaceType. * When pretty-printing (a type, say), we want to tidy (with tidyType) to avoids having (forall a a. blah) where the two a's have different uniques. Alas, for type constructors, TyCon, tidying does not work well, because a TyCon includes DataCons which include Types, which mention TyCons. And tidying can't tidy a mutually recursive data structure graph, only trees. * Interface files contains fast-strings, not uniques, so the very same tidying must take place when we convert to IfaceDecl. E.g. GHC.Iface.Make.tyThingToIfaceDecl which converts a TyThing (i.e. TyCon, Class etc) to an IfaceDecl. Bottom line: IfaceDecls are already 'tidy', so it's straightforward to print them. * An alternative I once explored was to ensure that TyCons get type variables with distinct print-names. That's ok for type variables but less easy for kind variables. Processing data type declarations is already so complicated that I don't think it's sensible to add the extra requirement that it generates only "pretty" types and kinds. Consequences: - Iface syntax (and IfaceType) must contain enough information to print nicely. Hence, for example, the IfaceAppArgs type, which allows us to suppress invisible kind arguments in types (see Note [Suppressing invisible arguments] in GHC.Iface.Type) - In a few places we have info that is used only for pretty-printing, and is totally ignored when turning Iface syntax back into Core (in GHC.IfaceToCore). For example, IfaceClosedSynFamilyTyCon stores a [IfaceAxBranch] that is used only for pretty-printing. - See Note [Free TyVars and CoVars in IfaceType] in GHC.Iface.Type See #7730, #8776 for details -} -------------------- -- | Pretty-prints a 'FamInst' (type/data family instance) with its defining location. pprFamInst :: FamInst -> SDoc -- * For data instances we go via pprTyThing of the representational TyCon, -- because there is already much cleverness associated with printing -- data type declarations that I don't want to duplicate -- * For type instances we print directly here; there is no TyCon -- to give to pprTyThing -- -- FamInstEnv.pprFamInst does a more quick-and-dirty job for internal purposes pprFamInst (FamInst { fi_flavor = DataFamilyInst rep_tc }) = pprTyThingInContextLoc (ATyCon rep_tc) pprFamInst (FamInst { fi_flavor = SynFamilyInst, fi_axiom = axiom , fi_tvs = tvs, fi_tys = lhs_tys, fi_rhs = rhs }) = showWithLoc (pprDefinedAt (getName axiom)) $ hang (text "type instance" <+> pprUserForAll (mkTyVarBinders Specified tvs) -- See Note [Printing foralls in type family instances] -- in GHC.Iface.Type <+> pprTypeApp (coAxiomTyCon axiom) lhs_tys) 2 (equals <+> ppr rhs) ---------------------------- -- | Pretty-prints a 'TyThing' with its defining location. pprTyThingLoc :: TyThing -> SDoc pprTyThingLoc tyThing = showWithLoc (pprDefinedAt (getName tyThing)) (pprTyThing showToHeader tyThing) -- | Pretty-prints the 'TyThing' header. For functions and data constructors -- the function is equivalent to 'pprTyThing' but for type constructors -- and classes it prints only the header part of the declaration. pprTyThingHdr :: TyThing -> SDoc pprTyThingHdr = pprTyThing showToHeader -- | Pretty-prints a 'TyThing' in context: that is, if the entity -- is a data constructor, record selector, or class method, then -- the entity's parent declaration is pretty-printed with irrelevant -- parts omitted. pprTyThingInContext :: ShowSub -> TyThing -> SDoc pprTyThingInContext show_sub thing = case parents thing of -- If there are no parents print everything. [] -> print_it Nothing thing -- If `thing` has a parent, print the parent and only its child `thing` thing':rest -> let subs = map getOccName (thing:rest) filt = (`elem` subs) in print_it (Just filt) thing' where parents = go where go thing = case tyThingParent_maybe thing of Just parent -> parent : go parent Nothing -> [] print_it :: Maybe (OccName -> Bool) -> TyThing -> SDoc print_it mb_filt thing = pprTyThing (show_sub { ss_how_much = ShowSome mb_filt (AltPpr Nothing) }) thing -- | Like 'pprTyThingInContext', but adds the defining location. pprTyThingInContextLoc :: TyThing -> SDoc pprTyThingInContextLoc tyThing = showWithLoc (pprDefinedAt (getName tyThing)) (pprTyThingInContext showToHeader tyThing) -- | Pretty-prints a 'TyThing'. pprTyThing :: ShowSub -> TyThing -> SDoc -- We pretty-print 'TyThing' via 'IfaceDecl' -- See Note [Pretty printing via Iface syntax] pprTyThing ss ty_thing = sdocOption sdocLinearTypes $ \show_linear_types -> pprIfaceDecl ss' (tyThingToIfaceDecl show_linear_types ty_thing) where ss' = case ss_how_much ss of ShowHeader (AltPpr Nothing) -> ss { ss_how_much = ShowHeader ppr' } ShowSome filt (AltPpr Nothing) -> ss { ss_how_much = ShowSome filt ppr' } _ -> ss ppr' = AltPpr $ ppr_bndr $ getName ty_thing ppr_bndr :: Name -> Maybe (OccName -> SDoc) ppr_bndr name | isBuiltInSyntax name || isJust (namePun_maybe name) = Nothing | otherwise = case nameModule_maybe name of Just mod -> Just $ \occ -> getPprStyle $ \sty -> pprModulePrefix sty mod occ <> ppr occ Nothing -> warnPprTrace True "pprTyThing" (ppr name) Nothing -- Nothing is unexpected here; TyThings have External names showWithLoc :: SDoc -> SDoc -> SDoc showWithLoc loc doc = hang doc 2 (char '\t' <> comment <+> loc) -- The tab tries to make them line up a bit where comment = text "--" ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/TyThing/Ppr.hs-boot0000644000000000000000000000045307346545000022440 0ustar0000000000000000module GHC.Types.TyThing.Ppr ( pprTyThing, pprTyThingInContext ) where import GHC.Iface.Type ( ShowSub ) import GHC.Types.TyThing ( TyThing ) import GHC.Utils.Outputable ( SDoc ) pprTyThing :: ShowSub -> TyThing -> SDoc pprTyThingInContext :: ShowSub -> TyThing -> SDoc ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/TypeEnv.hs0000644000000000000000000000552307346545000020745 0ustar0000000000000000module GHC.Types.TypeEnv ( TypeEnv , emptyTypeEnv , lookupTypeEnv , mkTypeEnv , typeEnvFromEntities , mkTypeEnvWithImplicits , extendTypeEnv , extendTypeEnvList , extendTypeEnvWithIds , plusTypeEnv , typeEnvElts , typeEnvTyCons , typeEnvIds , typeEnvPatSyns , typeEnvDataCons , typeEnvCoAxioms , typeEnvClasses ) where import GHC.Prelude import GHC.Core.Class import GHC.Core.Coercion.Axiom import GHC.Core.ConLike import GHC.Core.DataCon import GHC.Core.FamInstEnv import GHC.Core.PatSyn import GHC.Core.TyCon import GHC.Types.Name import GHC.Types.Name.Env import GHC.Types.Var import GHC.Types.TyThing -- | A map from 'Name's to 'TyThing's, constructed by typechecking -- local declarations or interface files type TypeEnv = NameEnv TyThing emptyTypeEnv :: TypeEnv typeEnvElts :: TypeEnv -> [TyThing] typeEnvTyCons :: TypeEnv -> [TyCon] typeEnvCoAxioms :: TypeEnv -> [CoAxiom Branched] typeEnvIds :: TypeEnv -> [Id] typeEnvPatSyns :: TypeEnv -> [PatSyn] typeEnvDataCons :: TypeEnv -> [DataCon] typeEnvClasses :: TypeEnv -> [Class] lookupTypeEnv :: TypeEnv -> Name -> Maybe TyThing emptyTypeEnv = emptyNameEnv typeEnvElts env = nonDetNameEnvElts env typeEnvTyCons env = [tc | ATyCon tc <- typeEnvElts env] typeEnvCoAxioms env = [ax | ACoAxiom ax <- typeEnvElts env] typeEnvIds env = [id | AnId id <- typeEnvElts env] typeEnvPatSyns env = [ps | AConLike (PatSynCon ps) <- typeEnvElts env] typeEnvDataCons env = [dc | AConLike (RealDataCon dc) <- typeEnvElts env] typeEnvClasses env = [cl | tc <- typeEnvTyCons env, Just cl <- [tyConClass_maybe tc]] mkTypeEnv :: [TyThing] -> TypeEnv mkTypeEnv things = extendTypeEnvList emptyTypeEnv things mkTypeEnvWithImplicits :: [TyThing] -> TypeEnv mkTypeEnvWithImplicits things = mkTypeEnv things `plusNameEnv` mkTypeEnv (concatMap implicitTyThings things) typeEnvFromEntities :: [Id] -> [TyCon] -> [PatSyn] -> [FamInst] -> TypeEnv typeEnvFromEntities ids tcs patsyns famInsts = mkTypeEnv ( map AnId ids ++ map ATyCon all_tcs ++ concatMap implicitTyConThings all_tcs ++ map (ACoAxiom . toBranchedAxiom . famInstAxiom) famInsts ++ map (AConLike . PatSynCon) patsyns ) where all_tcs = tcs ++ famInstsRepTyCons famInsts lookupTypeEnv = lookupNameEnv -- Extend the type environment extendTypeEnv :: TypeEnv -> TyThing -> TypeEnv extendTypeEnv env thing = extendNameEnv env (getName thing) thing extendTypeEnvList :: TypeEnv -> [TyThing] -> TypeEnv extendTypeEnvList env things = foldl' extendTypeEnv env things extendTypeEnvWithIds :: TypeEnv -> [Id] -> TypeEnv extendTypeEnvWithIds env ids = extendNameEnvList env [(getName id, AnId id) | id <- ids] plusTypeEnv :: TypeEnv -> TypeEnv -> TypeEnv plusTypeEnv env1 env2 = plusNameEnv env1 env2 ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/Unique.hs0000644000000000000000000002752007346545000020622 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 @Uniques@ are used to distinguish entities in the compiler (@Ids@, @Classes@, etc.) from each other. Thus, @Uniques@ are the basic comparison key in the compiler. If there is any single operation that needs to be fast, it is @Unique@ comparison. Unsurprisingly, there is quite a bit of huff-and-puff directed to that end. Some of the other hair in this code is to be able to use a ``splittable @UniqueSupply@'' if requested/possible (not standard Haskell). -} {-# LANGUAGE CPP #-} {-# LANGUAGE MagicHash #-} module GHC.Types.Unique ( -- * Main data types Unique, Uniquable(..), uNIQUE_BITS, -- ** Constructors, destructors and operations on 'Unique's hasKey, pprUniqueAlways, mkTag, mkUniqueGrimily, mkUniqueIntGrimily, getKey, mkUnique, unpkUnique, mkUniqueInt, eqUnique, ltUnique, incrUnique, stepUnique, newTagUnique, nonDetCmpUnique, isValidKnownKeyUnique, -- ** Local uniques -- | These are exposed exclusively for use by 'GHC.Types.Var.Env.uniqAway', which -- has rather peculiar needs. See Note [Local uniques]. mkLocalUnique, minLocalUnique, maxLocalUnique, ) where #include "Unique.h" import GHC.Prelude import GHC.Data.FastString import GHC.Utils.Outputable import GHC.Utils.Word64 (intToWord64, word64ToInt) -- just for implementing a fast [0,61) -> Char function import GHC.Exts (indexCharOffAddr#, Char(..), Int(..)) import GHC.Word ( Word64 ) import Data.Char ( chr, ord ) import Language.Haskell.Syntax.Module.Name {- ************************************************************************ * * \subsection[Unique-type]{@Unique@ type and operations} * * ************************************************************************ Note [Uniques and tags] ~~~~~~~~~~~~~~~~~~~~~~~~ A `Unique` in GHC is a 64 bit value composed of two pieces: * A "tag", of width `UNIQUE_TAG_BITS`, in the high order bits * A number, of width `uNIQUE_BITS`, which fills up the remainder of the Word64 The tag is typically an ASCII character. It is typically used to make it easier to distinguish uniques constructed by different parts of the compiler. There is a (potentially incomplete) list of unique tags used given in GHC.Builtin.Uniques. See Note [Uniques for wired-in prelude things and known tags] `mkUnique` constructs a `Unique` from its pieces mkUnique :: Char -> Word64 -> Unique -} -- | Unique identifier. -- -- The type of unique identifiers that are used in many places in GHC -- for fast ordering and equality tests. You should generate these with -- the functions from the 'UniqSupply' module -- -- These are sometimes also referred to as \"keys\" in comments in GHC. newtype Unique = MkUnique Word64 {-# INLINE uNIQUE_BITS #-} uNIQUE_BITS :: Int uNIQUE_BITS = 64 - UNIQUE_TAG_BITS {- Now come the functions which construct uniques from their pieces, and vice versa. The stuff about unique *supplies* is handled further down this module. -} unpkUnique :: Unique -> (Char, Word64) -- The reverse mkUniqueGrimily :: Word64 -> Unique -- A trap-door for UniqSupply getKey :: Unique -> Word64 -- for Var incrUnique :: Unique -> Unique stepUnique :: Unique -> Word64 -> Unique newTagUnique :: Unique -> Char -> Unique mkUniqueGrimily = MkUnique {-# INLINE getKey #-} getKey (MkUnique x) = x incrUnique (MkUnique i) = MkUnique (i + 1) stepUnique (MkUnique i) n = MkUnique (i + n) mkLocalUnique :: Word64 -> Unique mkLocalUnique i = mkUnique 'X' i minLocalUnique :: Unique minLocalUnique = mkLocalUnique 0 maxLocalUnique :: Unique maxLocalUnique = mkLocalUnique uniqueMask -- newTagUnique changes the "domain" of a unique to a different char newTagUnique u c = mkUnique c i where (_,i) = unpkUnique u -- | Bitmask that has zeros for the tag bits and ones for the rest. uniqueMask :: Word64 uniqueMask = (1 `shiftL` uNIQUE_BITS) - 1 -- | Put the character in the highest bits of the Word64. -- This may truncate the character to UNIQUE_TAG_BITS. -- This function is used in @`mkSplitUniqSupply`@ so that it can -- precompute and share the tag part of the uniques it generates. mkTag :: Char -> Word64 mkTag c = intToWord64 (ord c) `shiftL` uNIQUE_BITS -- pop the Char in the top 8 bits of the Unique(Supply) -- No 64-bit bugs here, as long as we have at least 32 bits. --JSM -- and as long as the Char fits in 8 bits, which we assume anyway! mkUnique :: Char -> Word64 -> Unique -- Builds a unique from pieces -- EXPORTED and used only in GHC.Builtin.Uniques mkUnique c i = MkUnique (tag .|. bits) where tag = mkTag c bits = i .&. uniqueMask mkUniqueInt :: Char -> Int -> Unique mkUniqueInt c i = mkUnique c (intToWord64 i) mkUniqueIntGrimily :: Int -> Unique mkUniqueIntGrimily = MkUnique . intToWord64 unpkUnique (MkUnique u) = let -- The potentially truncating use of fromIntegral here is safe -- because the argument is just the tag bits after shifting. tag = chr (word64ToInt (u `shiftR` uNIQUE_BITS)) i = u .&. uniqueMask in (tag, i) -- | The interface file symbol-table encoding assumes that known-key uniques fit -- in 30-bits; verify this. -- -- See Note [Symbol table representation of names] in "GHC.Iface.Binary" for details. isValidKnownKeyUnique :: Unique -> Bool isValidKnownKeyUnique u = case unpkUnique u of (c, x) -> ord c < 0xff && x <= (1 `shiftL` 22) {- ************************************************************************ * * \subsection[Uniquable-class]{The @Uniquable@ class} * * ************************************************************************ -} -- | Class of things that we can obtain a 'Unique' from class Uniquable a where getUnique :: a -> Unique hasKey :: Uniquable a => a -> Unique -> Bool x `hasKey` k = getUnique x == k instance Uniquable FastString where getUnique fs = mkUniqueIntGrimily (uniqueOfFS fs) instance Uniquable Int where getUnique i = mkUniqueIntGrimily i instance Uniquable Word64 where getUnique i = mkUniqueGrimily i instance Uniquable ModuleName where getUnique (ModuleName nm) = getUnique nm {- ************************************************************************ * * \subsection[Unique-instances]{Instance declarations for @Unique@} * * ************************************************************************ And the whole point (besides uniqueness) is fast equality. We don't use `deriving' because we want {\em precise} control of ordering (equality on @Uniques@ is v common). -} -- Note [Unique Determinism] -- ~~~~~~~~~~~~~~~~~~~~~~~~~ -- The order of allocated @Uniques@ is not stable across rebuilds. -- The main reason for that is that typechecking interface files pulls -- @Uniques@ from @UniqSupply@ and the interface file for the module being -- currently compiled can, but doesn't have to exist. -- -- It gets more complicated if you take into account that the interface -- files are loaded lazily and that building multiple files at once has to -- work for any subset of interface files present. When you add parallelism -- this makes @Uniques@ hopelessly random. -- -- As such, to get deterministic builds, the order of the allocated -- @Uniques@ should not affect the final result. -- see also wiki/deterministic-builds -- -- Note [Unique Determinism and code generation] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- The goal of the deterministic builds (wiki/deterministic-builds, #4012) -- is to get ABI compatible binaries given the same inputs and environment. -- The motivation behind that is that if the ABI doesn't change the -- binaries can be safely reused. -- -- Besides ABI/interface determinism, we also guarantee bit-for-bit identical -- binaries (when -fobject-determinism is given), also known as object -- determinism (#12935) -- -- To achieve this, we must take care to non-determinism in the code -- generation, and, in particular, guarantee that the existing uniques are -- renamed deterministically and new ones are produced deterministically too. -- The overview of object determinism is given by Note [Object determinism]. -- References to this note identify code where the unique determinism may -- impact object determinism more specifically. eqUnique :: Unique -> Unique -> Bool eqUnique (MkUnique u1) (MkUnique u2) = u1 == u2 ltUnique :: Unique -> Unique -> Bool ltUnique (MkUnique u1) (MkUnique u2) = u1 < u2 -- Provided here to make it explicit at the call-site that it can -- introduce non-determinism. -- See Note [Unique Determinism] -- See Note [No Ord for Unique] nonDetCmpUnique :: Unique -> Unique -> Ordering nonDetCmpUnique (MkUnique u1) (MkUnique u2) = if u1 == u2 then EQ else if u1 < u2 then LT else GT {- Note [No Ord for Unique] ~~~~~~~~~~~~~~~~~~~~~~~~~~ As explained in Note [Unique Determinism] the relative order of Uniques is nondeterministic. To prevent from accidental use the Ord Unique instance has been removed. This makes it easier to maintain deterministic builds, but comes with some drawbacks. The biggest drawback is that Maps keyed by Uniques can't directly be used. The alternatives are: 1) Use UniqFM or UniqDFM, see Note [Deterministic UniqFM] to decide which 2) Create a newtype wrapper based on Unique ordering where nondeterminism is controlled. See GHC.Unit.Module.Env.ModuleEnv 3) Change the algorithm to use nonDetCmpUnique and document why it's still deterministic 4) Use TrieMap as done in GHC.Cmm.CommonBlockElim.groupByLabel -} instance Eq Unique where a == b = eqUnique a b a /= b = not (eqUnique a b) instance Uniquable Unique where getUnique u = u -- We do sometimes make strings with @Uniques@ in them: showUnique :: Unique -> String showUnique uniq = case unpkUnique uniq of (tag, u) -> tag : w64ToBase62 u pprUniqueAlways :: IsLine doc => Unique -> doc -- The "always" means regardless of -dsuppress-uniques -- It replaces the old pprUnique to remind callers that -- they should consider whether they want to consult -- Opt_SuppressUniques pprUniqueAlways u = text (showUnique u) {-# SPECIALIZE pprUniqueAlways :: Unique -> SDoc #-} {-# SPECIALIZE pprUniqueAlways :: Unique -> HLine #-} -- see Note [SPECIALIZE to HDoc] in GHC.Utils.Outputable instance Outputable Unique where ppr = pprUniqueAlways instance Show Unique where show uniq = showUnique uniq {- ************************************************************************ * * \subsection[Utils-base62]{Base-62 numbers} * * ************************************************************************ A character-stingy way to read/write numbers (notably Uniques). The ``62-its'' are \tr{[0-9a-zA-Z]}. Code stolen from Lennart. -} w64ToBase62 :: Word64 -> String w64ToBase62 n_ = go n_ "" where -- The potentially truncating uses of fromIntegral here are safe -- because the argument is guaranteed to be less than 62 in both cases. go n cs | n < 62 = let !c = chooseChar62 (word64ToInt n) in c : cs | otherwise = go q (c : cs) where (!q, r) = quotRem n 62 !c = chooseChar62 (word64ToInt r) chooseChar62 :: Int -> Char {-# INLINE chooseChar62 #-} chooseChar62 (I# n) = C# (indexCharOffAddr# chars62 n) chars62 = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"# ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/Unique/0000755000000000000000000000000007346545000020260 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/Unique/DFM.hs0000644000000000000000000004772307346545000021237 0ustar0000000000000000{- (c) Bartosz Nitka, Facebook, 2015 UniqDFM: Specialised deterministic finite maps, for things with @Uniques@. Basically, the things need to be in class @Uniquable@, and we use the @getUnique@ method to grab their @Uniques@. This is very similar to @UniqFM@, the major difference being that the order of folding is not dependent on @Unique@ ordering, giving determinism. Currently the ordering is determined by insertion order. See Note [Unique Determinism] in GHC.Types.Unique for explanation why @Unique@ ordering is not deterministic. -} {-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE DeriveTraversable #-} {-# LANGUAGE DerivingStrategies #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE TupleSections #-} {-# OPTIONS_GHC -Wall #-} module GHC.Types.Unique.DFM ( -- * Unique-keyed deterministic mappings UniqDFM, -- abstract type -- ** Manipulating those mappings emptyUDFM, unitUDFM, addToUDFM, addToUDFM_C, addToUDFM_C_Directly, addToUDFM_Directly, addListToUDFM, delFromUDFM, delListFromUDFM, adjustUDFM, adjustUDFM_Directly, alterUDFM, mapUDFM, mapMaybeUDFM, mapMUDFM, plusUDFM, plusUDFM_C, plusUDFM_CK, lookupUDFM, lookupUDFM_Directly, elemUDFM, foldUDFM, foldWithKeyUDFM, eltsUDFM, filterUDFM, filterUDFM_Directly, isNullUDFM, sizeUDFM, intersectUDFM, udfmIntersectUFM, disjointUDFM, disjointUdfmUfm, equalKeysUDFM, minusUDFM, listToUDFM, listToUDFM_Directly, listToUDFM_C_Directly, udfmMinusUFM, ufmMinusUDFM, partitionUDFM, udfmRestrictKeys, udfmRestrictKeysSet, anyUDFM, allUDFM, pprUniqDFM, pprUDFM, udfmToList, udfmToUfm, nonDetStrictFoldUDFM, unsafeCastUDFMKey, alwaysUnsafeUfmToUdfm, ) where import GHC.Prelude import GHC.Types.Unique ( Uniquable(..), Unique, getKey, mkUniqueGrimily ) import GHC.Utils.Outputable import qualified GHC.Data.Word64Map.Strict as MS import qualified GHC.Data.Word64Map as M import Data.Data import Data.Functor.Classes (Eq1 (..)) import Data.List (sortBy) import Data.Function (on) import GHC.Types.Unique.FM (UniqFM, nonDetUFMToList, ufmToIntMap, unsafeIntMapToUFM) import Unsafe.Coerce import qualified GHC.Data.Word64Set as W -- Note [Deterministic UniqFM] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- A @UniqDFM@ is just like @UniqFM@ with the following additional -- property: the function `udfmToList` returns the elements in some -- deterministic order not depending on the Unique key for those elements. -- -- If the client of the map performs operations on the map in deterministic -- order then `udfmToList` returns them in deterministic order. -- -- There is an implementation cost: each element is given a serial number -- as it is added, and `udfmToList` sorts its result by this serial -- number. So you should only use `UniqDFM` if you need the deterministic -- property. -- -- `foldUDFM` also preserves determinism. -- -- Normal @UniqFM@ when you turn it into a list will use -- Data.IntMap.toList function that returns the elements in the order of -- the keys. The keys in @UniqFM@ are always @Uniques@, so you end up with -- with a list ordered by @Uniques@. -- The order of @Uniques@ is known to be not stable across rebuilds. -- See Note [Unique Determinism] in GHC.Types.Unique. -- -- -- There's more than one way to implement this. The implementation here tags -- every value with the insertion time that can later be used to sort the -- values when asked to convert to a list. -- -- An alternative would be to have -- -- data UniqDFM ele = UDFM (M.IntMap ele) [ele] -- -- where the list determines the order. This makes deletion tricky as we'd -- only accumulate elements in that list, but makes merging easier as you -- can just merge both structures independently. -- Deletion can probably be done in amortized fashion when the size of the -- list is twice the size of the set. -- | A type of values tagged with insertion time data TaggedVal val = TaggedVal !val {-# UNPACK #-} !Int -- ^ insertion time deriving stock (Data, Functor, Foldable, Traversable) taggedFst :: TaggedVal val -> val taggedFst (TaggedVal v _) = v taggedSnd :: TaggedVal val -> Int taggedSnd (TaggedVal _ i) = i instance Eq val => Eq (TaggedVal val) where (TaggedVal v1 _) == (TaggedVal v2 _) = v1 == v2 -- | Type of unique deterministic finite maps -- -- The key is just here to keep us honest. It's always safe -- to use a single type as key. -- If two types don't overlap in their uniques it's also safe -- to index the same map at multiple key types. But this is -- very much discouraged. data UniqDFM key ele = UDFM !(M.Word64Map (TaggedVal ele)) -- A map where keys are Unique's values and -- values are tagged with insertion time. -- The invariant is that all the tags will -- be distinct within a single map {-# UNPACK #-} !Int -- Upper bound on the values' insertion -- time. See Note [Overflow on plusUDFM] deriving (Data, Functor) -- | Deterministic, in O(n log n). instance Foldable (UniqDFM key) where foldr = foldUDFM -- | Deterministic, in O(n log n). instance Traversable (UniqDFM key) where traverse f = fmap listToUDFM_Directly . traverse (\(u,a) -> (u,) <$> f a) . udfmToList emptyUDFM :: UniqDFM key elt emptyUDFM = UDFM M.empty 0 unitUDFM :: Uniquable key => key -> elt -> UniqDFM key elt unitUDFM k v = UDFM (M.singleton (getKey $ getUnique k) (TaggedVal v 0)) 1 -- The new binding always goes to the right of existing ones addToUDFM :: Uniquable key => UniqDFM key elt -> key -> elt -> UniqDFM key elt addToUDFM m k v = addToUDFM_Directly m (getUnique k) v -- The new binding always goes to the right of existing ones addToUDFM_Directly :: UniqDFM key elt -> Unique -> elt -> UniqDFM key elt addToUDFM_Directly (UDFM m i) u v = UDFM (MS.insertWith tf (getKey u) (TaggedVal v i) m) (i + 1) where tf (TaggedVal new_v _) (TaggedVal _ old_i) = TaggedVal new_v old_i -- Keep the old tag, but insert the new value -- This means that udfmToList typically returns elements -- in the order of insertion, rather than the reverse -- It is quite critical that the strict insertWith is used as otherwise -- the combination function 'tf' is not forced and both old values are retained -- in the map. addToUDFM_C_Directly :: (elt -> elt -> elt) -- old -> new -> result -> UniqDFM key elt -> Unique -> elt -> UniqDFM key elt addToUDFM_C_Directly f (UDFM m i) u v = UDFM (MS.insertWith tf (getKey u) (TaggedVal v i) m) (i + 1) where tf (TaggedVal new_v _) (TaggedVal old_v old_i) = TaggedVal (f old_v new_v) old_i -- Flip the arguments, because M.insertWith uses (new->old->result) -- but f needs (old->new->result) -- Like addToUDFM_Directly, keep the old tag addToUDFM_C :: Uniquable key => (elt -> elt -> elt) -- old -> new -> result -> UniqDFM key elt -- old -> key -> elt -- new -> UniqDFM key elt -- result addToUDFM_C f m k v = addToUDFM_C_Directly f m (getUnique k) v addListToUDFM :: Uniquable key => UniqDFM key elt -> [(key,elt)] -> UniqDFM key elt addListToUDFM = foldl' (\m (k, v) -> addToUDFM m k v) {-# INLINEABLE addListToUDFM #-} addListToUDFM_Directly :: UniqDFM key elt -> [(Unique,elt)] -> UniqDFM key elt addListToUDFM_Directly = foldl' (\m (k, v) -> addToUDFM_Directly m k v) {-# INLINEABLE addListToUDFM_Directly #-} addListToUDFM_Directly_C :: (elt -> elt -> elt) -> UniqDFM key elt -> [(Unique,elt)] -> UniqDFM key elt addListToUDFM_Directly_C f = foldl' (\m (k, v) -> addToUDFM_C_Directly f m k v) {-# INLINEABLE addListToUDFM_Directly_C #-} -- | Like 'addListToUDFM_Directly_C' but also passes the unique key to the combine function addListToUDFM_Directly_CK :: (Unique -> elt -> elt -> elt) -> UniqDFM key elt -> [(Unique,elt)] -> UniqDFM key elt addListToUDFM_Directly_CK f = foldl' (\m (k, v) -> addToUDFM_C_Directly (f k) m k v) {-# INLINEABLE addListToUDFM_Directly_CK #-} delFromUDFM :: Uniquable key => UniqDFM key elt -> key -> UniqDFM key elt delFromUDFM (UDFM m i) k = UDFM (M.delete (getKey $ getUnique k) m) i plusUDFM_C :: (elt -> elt -> elt) -> UniqDFM key elt -> UniqDFM key elt -> UniqDFM key elt plusUDFM_C f udfml@(UDFM _ i) udfmr@(UDFM _ j) -- we will use the upper bound on the tag as a proxy for the set size, -- to insert the smaller one into the bigger one | i > j = insertUDFMIntoLeft_C f udfml udfmr | otherwise = insertUDFMIntoLeft_C f udfmr udfml -- | Like 'plusUDFM_C' but the combine function also receives the unique key plusUDFM_CK :: (Unique -> elt -> elt -> elt) -> UniqDFM key elt -> UniqDFM key elt -> UniqDFM key elt plusUDFM_CK f udfml@(UDFM _ i) udfmr@(UDFM _ j) -- we will use the upper bound on the tag as a proxy for the set size, -- to insert the smaller one into the bigger one | i > j = insertUDFMIntoLeft_CK f udfml udfmr | otherwise = insertUDFMIntoLeft_CK f udfmr udfml -- Note [Overflow on plusUDFM] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- There are multiple ways of implementing plusUDFM. -- The main problem that needs to be solved is overlap on times of -- insertion between different keys in two maps. -- Consider: -- -- A = fromList [(a, (x, 1))] -- B = fromList [(b, (y, 1))] -- -- If you merge them naively you end up with: -- -- C = fromList [(a, (x, 1)), (b, (y, 1))] -- -- Which loses information about ordering and brings us back into -- non-deterministic world. -- -- The solution I considered before would increment the tags on one of the -- sets by the upper bound of the other set. The problem with this approach -- is that you'll run out of tags for some merge patterns. -- Say you start with A with upper bound 1, you merge A with A to get A' and -- the upper bound becomes 2. You merge A' with A' and the upper bound -- doubles again. After 64 merges you overflow. -- This solution would have the same time complexity as plusUFM, namely O(n+m). -- -- The solution I ended up with has time complexity of -- O(m log m + m * min (n+m, W)) where m is the smaller set. -- It simply inserts the elements of the smaller set into the larger -- set in the order that they were inserted into the smaller set. That's -- O(m log m) for extracting the elements from the smaller set in the -- insertion order and O(m * min(n+m, W)) to insert them into the bigger -- set. plusUDFM :: UniqDFM key elt -> UniqDFM key elt -> UniqDFM key elt plusUDFM udfml@(UDFM _ i) udfmr@(UDFM _ j) -- we will use the upper bound on the tag as a proxy for the set size, -- to insert the smaller one into the bigger one | i > j = insertUDFMIntoLeft udfml udfmr | otherwise = insertUDFMIntoLeft udfmr udfml insertUDFMIntoLeft :: UniqDFM key elt -> UniqDFM key elt -> UniqDFM key elt insertUDFMIntoLeft udfml udfmr = addListToUDFM_Directly udfml $ udfmToList udfmr insertUDFMIntoLeft_C :: (elt -> elt -> elt) -> UniqDFM key elt -> UniqDFM key elt -> UniqDFM key elt insertUDFMIntoLeft_C f udfml udfmr = addListToUDFM_Directly_C f udfml $ udfmToList udfmr -- | Like 'insertUDFMIntoLeft_C', but the merge function also receives the unique key insertUDFMIntoLeft_CK :: (Unique -> elt -> elt -> elt) -> UniqDFM key elt -> UniqDFM key elt -> UniqDFM key elt insertUDFMIntoLeft_CK f udfml udfmr = addListToUDFM_Directly_CK f udfml $ udfmToList udfmr lookupUDFM :: Uniquable key => UniqDFM key elt -> key -> Maybe elt lookupUDFM (UDFM m _i) k = taggedFst `fmap` M.lookup (getKey $ getUnique k) m lookupUDFM_Directly :: UniqDFM key elt -> Unique -> Maybe elt lookupUDFM_Directly (UDFM m _i) k = taggedFst `fmap` M.lookup (getKey k) m elemUDFM :: Uniquable key => key -> UniqDFM key elt -> Bool elemUDFM k (UDFM m _i) = M.member (getKey $ getUnique k) m -- | Performs a deterministic fold over the UniqDFM. -- It's O(n log n) while the corresponding function on `UniqFM` is O(n). foldUDFM :: (elt -> a -> a) -> a -> UniqDFM key elt -> a {-# INLINE foldUDFM #-} -- This INLINE prevents a regression in !10568 foldUDFM k z m = foldr k z (eltsUDFM m) -- | Like 'foldUDFM' but the function also receives a key foldWithKeyUDFM :: (Unique -> elt -> a -> a) -> a -> UniqDFM key elt -> a {-# INLINE foldWithKeyUDFM #-} -- This INLINE was copied from foldUDFM foldWithKeyUDFM k z m = foldr (uncurry k) z (udfmToList m) -- | Performs a nondeterministic strict fold over the UniqDFM. -- It's O(n), same as the corresponding function on `UniqFM`. -- If you use this please provide a justification why it doesn't introduce -- nondeterminism. nonDetStrictFoldUDFM :: (elt -> a -> a) -> a -> UniqDFM key elt -> a nonDetStrictFoldUDFM k z (UDFM m _i) = foldl' k' z m where k' acc (TaggedVal v _) = k v acc eltsUDFM :: UniqDFM key elt -> [elt] {-# INLINE eltsUDFM #-} -- The INLINE makes it a good producer (from the map) eltsUDFM (UDFM m _i) = map taggedFst (sort_it m) sort_it :: M.Word64Map (TaggedVal elt) -> [TaggedVal elt] sort_it m = sortBy (compare `on` taggedSnd) (M.elems m) filterUDFM :: (elt -> Bool) -> UniqDFM key elt -> UniqDFM key elt filterUDFM p (UDFM m i) = UDFM (M.filter (\(TaggedVal v _) -> p v) m) i filterUDFM_Directly :: (Unique -> elt -> Bool) -> UniqDFM key elt -> UniqDFM key elt filterUDFM_Directly p (UDFM m i) = UDFM (M.filterWithKey p' m) i where p' k (TaggedVal v _) = p (mkUniqueGrimily k) v udfmRestrictKeys :: UniqDFM key elt -> UniqDFM key elt2 -> UniqDFM key elt udfmRestrictKeys (UDFM a i) (UDFM b _) = UDFM (M.restrictKeys a (M.keysSet b)) i udfmRestrictKeysSet :: UniqDFM key elt -> W.Word64Set -> UniqDFM key elt udfmRestrictKeysSet (UDFM val_set i) set = let key_set = set in UDFM (M.restrictKeys val_set key_set) i -- | Converts `UniqDFM` to a list, with elements in deterministic order. -- It's O(n log n) while the corresponding function on `UniqFM` is O(n). udfmToList :: UniqDFM key elt -> [(Unique, elt)] udfmToList (UDFM m _i) = [ (mkUniqueGrimily k, taggedFst v) | (k, v) <- sortBy (compare `on` (taggedSnd . snd)) $ M.toList m ] -- Determines whether two 'UniqDFM's contain the same keys. equalKeysUDFM :: UniqDFM key a -> UniqDFM key b -> Bool equalKeysUDFM (UDFM m1 _) (UDFM m2 _) = liftEq (\_ _ -> True) m1 m2 isNullUDFM :: UniqDFM key elt -> Bool isNullUDFM (UDFM m _) = M.null m sizeUDFM :: UniqDFM key elt -> Int sizeUDFM (UDFM m _i) = M.size m intersectUDFM :: UniqDFM key elt -> UniqDFM key elt -> UniqDFM key elt intersectUDFM (UDFM x i) (UDFM y _j) = UDFM (M.intersection x y) i -- M.intersection is left biased, that means the result will only have -- a subset of elements from the left set, so `i` is a good upper bound. udfmIntersectUFM :: UniqDFM key elt1 -> UniqFM key elt2 -> UniqDFM key elt1 udfmIntersectUFM (UDFM x i) y = UDFM (M.intersection x (ufmToIntMap y)) i -- M.intersection is left biased, that means the result will only have -- a subset of elements from the left set, so `i` is a good upper bound. disjointUDFM :: UniqDFM key elt -> UniqDFM key elt -> Bool disjointUDFM (UDFM x _i) (UDFM y _j) = M.disjoint x y disjointUdfmUfm :: UniqDFM key elt -> UniqFM key elt2 -> Bool disjointUdfmUfm (UDFM x _i) y = M.disjoint x (ufmToIntMap y) minusUDFM :: UniqDFM key elt1 -> UniqDFM key elt2 -> UniqDFM key elt1 minusUDFM (UDFM x i) (UDFM y _j) = UDFM (M.difference x y) i -- M.difference returns a subset of a left set, so `i` is a good upper -- bound. udfmMinusUFM :: UniqDFM key elt1 -> UniqFM key elt2 -> UniqDFM key elt1 udfmMinusUFM (UDFM x i) y = UDFM (M.difference x (ufmToIntMap y)) i -- M.difference returns a subset of a left set, so `i` is a good upper -- bound. ufmMinusUDFM :: UniqFM key elt1 -> UniqDFM key elt2 -> UniqFM key elt1 ufmMinusUDFM x (UDFM y _i) = unsafeIntMapToUFM (M.difference (ufmToIntMap x) y) -- | Partition UniqDFM into two UniqDFMs according to the predicate partitionUDFM :: (elt -> Bool) -> UniqDFM key elt -> (UniqDFM key elt, UniqDFM key elt) partitionUDFM p (UDFM m i) = case M.partition (p . taggedFst) m of (left, right) -> (UDFM left i, UDFM right i) -- | Delete a list of elements from a UniqDFM delListFromUDFM :: Uniquable key => UniqDFM key elt -> [key] -> UniqDFM key elt delListFromUDFM = foldl' delFromUDFM -- | This allows for lossy conversion from UniqDFM to UniqFM udfmToUfm :: UniqDFM key elt -> UniqFM key elt udfmToUfm (UDFM m _i) = unsafeIntMapToUFM (M.map taggedFst m) listToUDFM :: Uniquable key => [(key,elt)] -> UniqDFM key elt listToUDFM = foldl' (\m (k, v) -> addToUDFM m k v) emptyUDFM listToUDFM_Directly :: [(Unique, elt)] -> UniqDFM key elt listToUDFM_Directly = foldl' (\m (u, v) -> addToUDFM_Directly m u v) emptyUDFM listToUDFM_C_Directly :: (elt -> elt -> elt) -> [(Unique, elt)] -> UniqDFM key elt listToUDFM_C_Directly f = foldl' (\m (u, v) -> addToUDFM_C_Directly f m u v) emptyUDFM -- | Apply a function to a particular element adjustUDFM :: Uniquable key => (elt -> elt) -> UniqDFM key elt -> key -> UniqDFM key elt adjustUDFM f (UDFM m i) k = UDFM (M.adjust (fmap f) (getKey $ getUnique k) m) i -- | Apply a function to a particular element adjustUDFM_Directly :: (elt -> elt) -> UniqDFM key elt -> Unique -> UniqDFM key elt adjustUDFM_Directly f (UDFM m i) k = UDFM (M.adjust (fmap f) (getKey k) m) i -- | The expression (alterUDFM f k map) alters value x at k, or absence -- thereof. alterUDFM can be used to insert, delete, or update a value in -- UniqDFM. Use addToUDFM, delFromUDFM or adjustUDFM when possible, they are -- more efficient. alterUDFM :: Uniquable key => (Maybe elt -> Maybe elt) -- How to adjust -> UniqDFM key elt -- old -> key -- new -> UniqDFM key elt -- result alterUDFM f (UDFM m i) k = UDFM (M.alter alterf (getKey $ getUnique k) m) (i + 1) where alterf Nothing = inject $ f Nothing alterf (Just (TaggedVal v _)) = inject $ f (Just v) inject Nothing = Nothing inject (Just v) = Just $ TaggedVal v i -- | Map a function over every value in a UniqDFM mapUDFM :: (elt1 -> elt2) -> UniqDFM key elt1 -> UniqDFM key elt2 mapUDFM f (UDFM m i) = UDFM (MS.map (fmap f) m) i -- Critical this is strict map, otherwise you get a big space leak when reloading -- in GHCi because all old ModDetails are retained (see pruneHomePackageTable). -- Modify with care. {-# INLINEABLE mapMUDFM #-} -- | 'mapM' for a 'UniqDFM'. mapMUDFM :: Monad m => (elt1 -> m elt2) -> UniqDFM key elt1 -> m (UniqDFM key elt2) mapMUDFM f (UDFM m i) = do m' <- traverse (traverse f) m return $ UDFM m' i mapMaybeUDFM :: forall elt1 elt2 key. (elt1 -> Maybe elt2) -> UniqDFM key elt1 -> UniqDFM key elt2 mapMaybeUDFM f (UDFM m i) = UDFM (M.mapMaybe (traverse f) m) i anyUDFM :: (elt -> Bool) -> UniqDFM key elt -> Bool anyUDFM p (UDFM m _i) = M.foldr ((||) . p . taggedFst) False m allUDFM :: (elt -> Bool) -> UniqDFM key elt -> Bool allUDFM p (UDFM m _i) = M.foldr ((&&) . p . taggedFst) True m -- This should not be used in committed code, provided for convenience to -- make ad-hoc conversions when developing alwaysUnsafeUfmToUdfm :: UniqFM key elt -> UniqDFM key elt alwaysUnsafeUfmToUdfm = listToUDFM_Directly . nonDetUFMToList -- | Cast the key domain of a UniqFM. -- -- As long as the domains don't overlap in their uniques -- this is safe. unsafeCastUDFMKey :: UniqDFM key1 elt -> UniqDFM key2 elt unsafeCastUDFMKey = unsafeCoerce -- Only phantom parameter changes so -- this is safe and avoids reallocation. -- Output-ery instance Outputable a => Outputable (UniqDFM key a) where ppr ufm = pprUniqDFM ppr ufm pprUniqDFM :: (a -> SDoc) -> UniqDFM key a -> SDoc pprUniqDFM ppr_elt ufm = brackets $ fsep $ punctuate comma $ [ ppr uq <+> text ":->" <+> ppr_elt elt | (uq, elt) <- udfmToList ufm ] pprUDFM :: UniqDFM key a -- ^ The things to be pretty printed -> ([a] -> SDoc) -- ^ The pretty printing function to use on the elements -> SDoc -- ^ 'SDoc' where the things have been pretty -- printed pprUDFM ufm pp = pp (eltsUDFM ufm) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/Unique/DSM.hs0000644000000000000000000002164007346545000021242 0ustar0000000000000000{-# LANGUAGE UnboxedTuples, PatternSynonyms, DerivingVia #-} module GHC.Types.Unique.DSM ( -- * Threading a deterministic supply DUniqSupply , UniqDSM(UDSM) , DUniqResult , pattern DUniqResult -- ** UniqDSM and DUniqSupply operations , getUniqueDSM , runUniqueDSM , takeUniqueFromDSupply , initDUniqSupply -- ** Tag operations , newTagDUniqSupply , getTagDUniqSupply -- * A transfomer threading a deterministic supply , UniqDSMT(UDSMT) -- ** UniqDSMT operations , runUDSMT , withDUS , hoistUDSMT , liftUDSMT -- ** Tags , setTagUDSMT -- * Monad class for deterministic supply threading , MonadGetUnique(..) , MonadUniqDSM(..) ) where import GHC.Exts (oneShot) import GHC.Prelude import GHC.Word import Control.Monad.Fix import GHC.Types.Unique import qualified GHC.Utils.Monad.State.Strict as Strict import qualified GHC.Types.Unique.Supply as USM import Control.Monad.IO.Class {- Note [Deterministic Uniques in the CG] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ GHC produces fully deterministic object code. To achieve this, there is a key pass (detRenameCmmGroup) which renames all non-deterministic uniques in the Cmm code right after StgToCmm. See Note [Object determinism] for the big picture and some details. The code generation pipeline that processes this renamed, deterministic, Cmm, however, may still need to generate new uniques. If we were to resort to the non-deterministic unique supply used in the rest of the compiler, our renaming efforts would be for naught. Therefore, after having renamed Cmm deterministically, we must ensure that all uniques created by the code generation pipeline use a deterministic source of uniques. Most often, this means don't use `UniqSM` in the Cmm passes, use `UniqDSM`: `UniqDSM` is a pure state monad with an incrementing counter from which we source new uniques. Unlike `UniqSM`, there's no way to `split` the supply, but it turns out this was rarely really needed for code generation and migrating from UniqSM to UniqDSM was easy. Secondly, the `DUniqSupply` used to run a `UniqDSM` must be threaded through all passes to guarantee uniques in different passes are unique amongst them altogether. Specifically, the same `DUniqSupply` must be threaded through the CG Streaming pipeline, starting with Driver.Main calling `StgToCmm.codeGen`, `cmmPipeline`, `cmmToRawCmm`, and `codeOutput` in sequence. To thread resources through the `Stream` abstraction, we use the `UniqDSMT` transformer on top of `IO` as the Monad underlying the Stream. `UniqDSMT` will thread the `DUniqSupply` through every pass applied to the `Stream`, for every element. We use @type CgStream = Stream (UniqDSMT IO)@ for the Stream used in code generation which that carries through the deterministic unique supply. Unlike non-deterministic unique supplies which can be split into supplies using different tags, or where a new supply with a new tag can be brought from the void, a `DUniqSupply` needs to be sampled iteratively. To use a different tag during a specific pass (to more easily identify uniques created in it), the tag should be manually set and then reset on the unique supply. There's also the auxiliary `setTagUDSMT` which sets the tag for all uniques supplied in the given action, and resets it implicitly. See also Note [Object determinism] in GHC.StgToCmm -} -- See Note [Deterministic Uniques in the CG] newtype DUniqSupply = DUS Word64 -- supply uniques iteratively type DUniqResult result = (# result, DUniqSupply #) pattern DUniqResult :: a -> DUniqSupply -> (# a, DUniqSupply #) pattern DUniqResult x y = (# x, y #) {-# COMPLETE DUniqResult #-} -- | A monad which just gives the ability to obtain 'Unique's deterministically. -- There's no splitting. newtype UniqDSM result = UDSM' { unUDSM :: DUniqSupply -> DUniqResult result } deriving (Functor, Applicative, Monad) via (Strict.State DUniqSupply) instance MonadFix UniqDSM where mfix m = UDSM (\us0 -> let (r,us1) = runUniqueDSM us0 (m r) in DUniqResult r us1) -- See Note [The one-shot state monad trick] in GHC.Utils.Monad pattern UDSM :: (DUniqSupply -> DUniqResult a) -> UniqDSM a pattern UDSM m <- UDSM' m where UDSM m = UDSM' (oneShot $ \s -> m s) {-# COMPLETE UDSM #-} getUniqueDSM :: UniqDSM Unique getUniqueDSM = UDSM (\(DUS us0) -> DUniqResult (mkUniqueGrimily us0) (DUS $ us0+1)) takeUniqueFromDSupply :: DUniqSupply -> (Unique, DUniqSupply) takeUniqueFromDSupply d = case unUDSM getUniqueDSM d of DUniqResult x y -> (x, y) -- | Initialize a deterministic unique supply with the given Tag and initial unique. initDUniqSupply :: Char -> Word64 -> DUniqSupply initDUniqSupply c firstUniq = let !tag = mkTag c in DUS (tag .|. firstUniq) runUniqueDSM :: DUniqSupply -> UniqDSM a -> (a, DUniqSupply) runUniqueDSM ds (UDSM f) = case f ds of DUniqResult uq us -> (uq, us) -- | Set the tag of uniques generated from this deterministic unique supply newTagDUniqSupply :: Char -> DUniqSupply -> DUniqSupply newTagDUniqSupply c (DUS w) = DUS $ getKey $ newTagUnique (mkUniqueGrimily w) c -- | Get the tag uniques generated from this deterministic unique supply would have getTagDUniqSupply :: DUniqSupply -> Char getTagDUniqSupply (DUS w) = fst $ unpkUnique (mkUniqueGrimily w) -- | Get a unique from a monad that can access a unique supply. -- -- Crucially, because 'MonadGetUnique' doesn't allow you to get the -- 'UniqSupply' (unlike 'MonadUnique'), an instance such as 'UniqDSM' can use a -- deterministic unique supply to return deterministic uniques without allowing -- for the 'UniqSupply' to be shared. class Monad m => MonadGetUnique m where getUniqueM :: m Unique instance MonadGetUnique UniqDSM where getUniqueM = getUniqueDSM -- non deterministic instance instance MonadGetUnique USM.UniqSM where getUniqueM = USM.getUniqueM -------------------------------------------------------------------------------- -- UniqDSMT -------------------------------------------------------------------------------- -- | Transformer version of 'UniqDSM' to use when threading a deterministic -- uniq supply over a Monad. Specifically, it is used in the `Stream` of Cmm -- decls. newtype UniqDSMT m result = UDSMT' (DUniqSupply -> m (result, DUniqSupply)) deriving (Functor) -- Similar to GHC.Utils.Monad.State.Strict, using Note [The one-shot state monad trick] -- Using the one-shot trick is necessary for performance. -- Using transfomer's strict `StateT` regressed some performance tests in 1-2%. -- The one-shot trick here fixes those regressions. pattern UDSMT :: (DUniqSupply -> m (result, DUniqSupply)) -> UniqDSMT m result pattern UDSMT m <- UDSMT' m where UDSMT m = UDSMT' (oneShot $ \s -> m s) {-# COMPLETE UDSMT #-} instance Monad m => Applicative (UniqDSMT m) where pure x = UDSMT $ \s -> pure (x, s) UDSMT f <*> UDSMT x = UDSMT $ \s0 -> do (f', s1) <- f s0 (x', s2) <- x s1 pure (f' x', s2) instance Monad m => Monad (UniqDSMT m) where UDSMT x >>= f = UDSMT $ \s0 -> do (x', s1) <- x s0 case f x' of UDSMT y -> y s1 instance MonadIO m => MonadIO (UniqDSMT m) where liftIO x = UDSMT $ \s -> (,s) <$> liftIO x instance Monad m => MonadGetUnique (UniqDSMT m) where getUniqueM = UDSMT $ \us -> do let (u, us') = takeUniqueFromDSupply us return (u, us') -- | Set the tag of the running @UniqDSMT@ supply to the given tag and run an action with it. -- All uniques produced in the given action will use this tag, until the tag is changed -- again. setTagUDSMT :: Monad m => Char {-^ Tag -} -> UniqDSMT m a -> UniqDSMT m a setTagUDSMT tag (UDSMT act) = UDSMT $ \us -> do let origtag = getTagDUniqSupply us new_us = newTagDUniqSupply tag us (a, us') <- act new_us let us'_origtag = newTagDUniqSupply origtag us' -- restore original tag return (a, us'_origtag) -- | Like 'runUniqueDSM' but for 'UniqDSMT' runUDSMT :: DUniqSupply -> UniqDSMT m a -> m (a, DUniqSupply) runUDSMT dus (UDSMT st) = st dus -- | Lift an IO action that depends on, and threads through, a unique supply -- into UniqDSMT IO. withDUS :: (DUniqSupply -> IO (a, DUniqSupply)) -> UniqDSMT IO a withDUS f = UDSMT $ \us -> do (a, us') <- liftIO (f us) return (a, us') -- | Change the monad underyling an applied @UniqDSMT@, i.e. transform a -- @UniqDSMT m@ into a @UniqDSMT n@ given @m ~> n@. hoistUDSMT :: (forall x. m x -> n x) -> UniqDSMT m a -> UniqDSMT n a hoistUDSMT nt (UDSMT m) = UDSMT $ \s -> nt (m s) -- | Lift a monadic action @m a@ into an @UniqDSMT m a@ liftUDSMT :: Functor m => m a -> UniqDSMT m a liftUDSMT m = UDSMT $ \s -> (,s) <$> m -------------------------------------------------------------------------------- -- MonadUniqDSM -------------------------------------------------------------------------------- class (Monad m) => MonadUniqDSM m where -- | Lift a pure 'UniqDSM' action into a 'MonadUniqDSM' such as 'UniqDSMT' liftUniqDSM :: UniqDSM a -> m a instance MonadUniqDSM UniqDSM where liftUniqDSM = id instance Monad m => MonadUniqDSM (UniqDSMT m) where liftUniqDSM act = UDSMT $ \us -> pure $ runUniqueDSM us act ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/Unique/DSet.hs0000644000000000000000000001275707346545000021467 0ustar0000000000000000-- (c) Bartosz Nitka, Facebook, 2015 -- | -- Specialised deterministic sets, for things with @Uniques@ -- -- Based on 'UniqDFM's (as you would expect). -- See Note [Deterministic UniqFM] in "GHC.Types.Unique.DFM" for explanation why we need it. -- -- Basically, the things need to be in class 'Uniquable'. {-# LANGUAGE DeriveDataTypeable #-} module GHC.Types.Unique.DSet ( -- * Unique set type UniqDSet, -- type synonym for UniqFM a getUniqDSet, pprUniqDSet, -- ** Manipulating these sets delOneFromUniqDSet, delListFromUniqDSet, emptyUniqDSet, unitUniqDSet, mkUniqDSet, addOneToUniqDSet, addListToUniqDSet, unionUniqDSets, unionManyUniqDSets, minusUniqDSet, uniqDSetMinusUniqSet, intersectUniqDSets, uniqDSetIntersectUniqSet, nonDetStrictFoldUniqDSet, elementOfUniqDSet, filterUniqDSet, sizeUniqDSet, isEmptyUniqDSet, lookupUniqDSet, uniqDSetToList, partitionUniqDSet, mapUniqDSet, strictFoldUniqDSet, mapMUniqDSet ) where import GHC.Prelude import GHC.Utils.Outputable import GHC.Types.Unique.DFM import GHC.Types.Unique.Set import GHC.Types.Unique import Data.Coerce import Data.Data -- See Note [UniqSet invariant] in GHC.Types.Unique.Set for why we want a newtype here. -- Beyond preserving invariants, we may also want to 'override' typeclass -- instances. newtype UniqDSet a = UniqDSet {getUniqDSet' :: UniqDFM a a} deriving (Data) emptyUniqDSet :: UniqDSet a emptyUniqDSet = UniqDSet emptyUDFM unitUniqDSet :: Uniquable a => a -> UniqDSet a unitUniqDSet x = UniqDSet (unitUDFM x x) mkUniqDSet :: Uniquable a => [a] -> UniqDSet a mkUniqDSet = foldl' addOneToUniqDSet emptyUniqDSet -- The new element always goes to the right of existing ones. addOneToUniqDSet :: Uniquable a => UniqDSet a -> a -> UniqDSet a addOneToUniqDSet (UniqDSet set) x = UniqDSet (addToUDFM set x x) addListToUniqDSet :: Uniquable a => UniqDSet a -> [a] -> UniqDSet a addListToUniqDSet = foldl' addOneToUniqDSet delOneFromUniqDSet :: Uniquable a => UniqDSet a -> a -> UniqDSet a delOneFromUniqDSet (UniqDSet s) = UniqDSet . delFromUDFM s delListFromUniqDSet :: Uniquable a => UniqDSet a -> [a] -> UniqDSet a delListFromUniqDSet (UniqDSet s) = UniqDSet . delListFromUDFM s unionUniqDSets :: UniqDSet a -> UniqDSet a -> UniqDSet a unionUniqDSets (UniqDSet s) (UniqDSet t) = UniqDSet (plusUDFM s t) unionManyUniqDSets :: [UniqDSet a] -> UniqDSet a unionManyUniqDSets [] = emptyUniqDSet unionManyUniqDSets (x:xs) = foldl' unionUniqDSets x xs minusUniqDSet :: UniqDSet a -> UniqDSet a -> UniqDSet a minusUniqDSet (UniqDSet s) (UniqDSet t) = UniqDSet (minusUDFM s t) uniqDSetMinusUniqSet :: UniqDSet a -> UniqSet a -> UniqDSet a uniqDSetMinusUniqSet xs ys = UniqDSet (udfmMinusUFM (getUniqDSet xs) (getUniqSet ys)) intersectUniqDSets :: UniqDSet a -> UniqDSet a -> UniqDSet a intersectUniqDSets (UniqDSet s) (UniqDSet t) = UniqDSet (intersectUDFM s t) uniqDSetIntersectUniqSet :: UniqDSet a -> UniqSet a -> UniqDSet a uniqDSetIntersectUniqSet xs ys = UniqDSet (udfmIntersectUFM (getUniqDSet xs) (getUniqSet ys)) -- See Note [Deterministic UniqFM] to learn about nondeterminism. -- If you use this please provide a justification why it doesn't introduce -- nondeterminism. nonDetStrictFoldUniqDSet :: (a -> b -> b) -> b -> UniqDSet a -> b nonDetStrictFoldUniqDSet f acc (UniqDSet s) = nonDetStrictFoldUDFM f acc s elementOfUniqDSet :: Uniquable a => a -> UniqDSet a -> Bool elementOfUniqDSet k = elemUDFM k . getUniqDSet filterUniqDSet :: (a -> Bool) -> UniqDSet a -> UniqDSet a filterUniqDSet p (UniqDSet s) = UniqDSet (filterUDFM p s) sizeUniqDSet :: UniqDSet a -> Int sizeUniqDSet = sizeUDFM . getUniqDSet isEmptyUniqDSet :: UniqDSet a -> Bool isEmptyUniqDSet = isNullUDFM . getUniqDSet lookupUniqDSet :: Uniquable a => UniqDSet a -> a -> Maybe a lookupUniqDSet = lookupUDFM . getUniqDSet uniqDSetToList :: UniqDSet a -> [a] uniqDSetToList = eltsUDFM . getUniqDSet partitionUniqDSet :: (a -> Bool) -> UniqDSet a -> (UniqDSet a, UniqDSet a) partitionUniqDSet p = coerce . partitionUDFM p . getUniqDSet -- See Note [UniqSet invariant] in GHC.Types.Unique.Set mapUniqDSet :: Uniquable b => (a -> b) -> UniqDSet a -> UniqDSet b mapUniqDSet f (UniqDSet m) = UniqDSet $ unsafeCastUDFMKey $ mapUDFM f m -- Simply apply `f` to each element, retaining all the structure unchanged. -- The identification of keys and elements prevents a derived Functor -- instance, but `unsafeCastUDFMKey` makes it possible to apply the strict -- mapping from DFM. -- | Like 'mapUniqDSet' but for 'mapM'. Assumes the function we are mapping -- over the 'UniqDSet' does not modify uniques, as per -- Note [UniqSet invariant] in GHC.Types.Unique.Set. mapMUniqDSet :: (Monad m, Uniquable b) => (a -> m b) -> UniqDSet a -> m (UniqDSet b) mapMUniqDSet f (UniqDSet m) = UniqDSet . unsafeCastUDFMKey <$> mapMUDFM f m {-# INLINEABLE mapMUniqDSet #-} strictFoldUniqDSet :: (a -> r -> r) -> r -> UniqDSet a -> r strictFoldUniqDSet k r s = foldl' (\ !r e -> k e r) r $ uniqDSetToList s -- Two 'UniqDSet's are considered equal if they contain the same -- uniques. instance Eq (UniqDSet a) where UniqDSet a == UniqDSet b = equalKeysUDFM a b getUniqDSet :: UniqDSet a -> UniqDFM a a getUniqDSet = getUniqDSet' instance Outputable a => Outputable (UniqDSet a) where ppr = pprUniqDSet ppr pprUniqDSet :: (a -> SDoc) -> UniqDSet a -> SDoc pprUniqDSet f = braces . pprWithCommas f . uniqDSetToList ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/Unique/FM.hs0000644000000000000000000005404107346545000021122 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The AQUA Project, Glasgow University, 1994-1998 UniqFM: Specialised finite maps, for things with @Uniques@. Basically, the things need to be in class @Uniquable@, and we use the @getUnique@ method to grab their @Uniques@. (A similar thing to @UniqSet@, as opposed to @Set@.) The interface is based on @FiniteMap@s, but the implementation uses @Data.IntMap@, which is both maintained and faster than the past implementation (see commit log). The @UniqFM@ interface maps directly to Data.IntMap, only ``Data.IntMap.union'' is left-biased and ``plusUFM'' right-biased and ``addToUFM\_C'' and ``Data.IntMap.insertWith'' differ in the order of arguments of combining function. -} {-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE GeneralizedNewtypeDeriving #-} {-# LANGUAGE ScopedTypeVariables #-} {-# OPTIONS_GHC -Wall #-} module GHC.Types.Unique.FM ( -- * Unique-keyed mappings UniqFM, -- abstract type NonDetUniqFM(..), -- wrapper for opting into nondeterminism -- ** Manipulating those mappings emptyUFM, unitUFM, unitDirectlyUFM, zipToUFM, listToUFM, listToUFM_Directly, listToUFM_C, listToIdentityUFM, addToUFM,addToUFM_C,addToUFM_Acc,addToUFM_L, addListToUFM,addListToUFM_C, addToUFM_Directly, addListToUFM_Directly, adjustUFM, alterUFM, alterUFM_Directly, adjustUFM_Directly, delFromUFM, delFromUFM_Directly, delListFromUFM, delListFromUFM_Directly, plusUFM, plusUFM_C, plusUFM_CD, plusUFM_CD2, mergeUFM, plusMaybeUFM_C, plusUFMList, plusUFMListWith, sequenceUFMList, minusUFM, minusUFM_C, intersectUFM, intersectUFM_C, disjointUFM, equalKeysUFM, diffUFM, nonDetStrictFoldUFM, nonDetFoldUFM, nonDetStrictFoldUFM_DirectlyM, nonDetFoldWithKeyUFM, nonDetStrictFoldUFM_Directly, anyUFM, allUFM, seqEltsUFM, mapUFM, mapUFM_Directly, strictMapUFM, mapMaybeUFM, mapMaybeUFM_sameUnique, mapMaybeWithKeyUFM, elemUFM, elemUFM_Directly, filterUFM, filterUFM_Directly, partitionUFM, sizeUFM, isNullUFM, lookupUFM, lookupUFM_Directly, lookupWithDefaultUFM, lookupWithDefaultUFM_Directly, nonDetEltsUFM, nonDetKeysUFM, ufmToSet_Directly, nonDetUFMToList, ufmToIntMap, unsafeIntMapToUFM, unsafeCastUFMKey, pprUniqFM, pprUFM, pprUFMWithKeys, pluralUFM ) where import GHC.Prelude import GHC.Types.Unique ( Uniquable(..), Unique, getKey, mkUniqueGrimily ) import GHC.Utils.Outputable import GHC.Utils.Panic.Plain import qualified GHC.Data.Word64Map as M import qualified GHC.Data.Word64Map.Strict as MS import qualified GHC.Data.Word64Set as S import Data.Data import qualified Data.Semigroup as Semi import Data.Functor.Classes (Eq1 (..)) import Data.Coerce -- | A finite map from @uniques@ of one type to -- elements in another type. -- -- The key is just here to keep us honest. It's always safe -- to use a single type as key. -- If two types don't overlap in their uniques it's also safe -- to index the same map at multiple key types. But this is -- very much discouraged. newtype UniqFM key ele = UFM (M.Word64Map ele) deriving (Data, Eq, Functor) -- Nondeterministic Foldable and Traversable instances are accessible through -- use of the 'NonDetUniqFM' wrapper. -- See Note [Deterministic UniqFM] in GHC.Types.Unique.DFM to learn about determinism. emptyUFM :: UniqFM key elt emptyUFM = UFM M.empty isNullUFM :: UniqFM key elt -> Bool isNullUFM (UFM m) = M.null m unitUFM :: Uniquable key => key -> elt -> UniqFM key elt unitUFM k v = UFM (M.singleton (getKey $ getUnique k) v) -- when you've got the Unique already unitDirectlyUFM :: Unique -> elt -> UniqFM key elt unitDirectlyUFM u v = UFM (M.singleton (getKey u) v) -- zipToUFM ks vs = listToUFM (zip ks vs) -- This function exists because it's a common case (#18535), and -- it's inefficient to first build a list of pairs, and then immediately -- take it apart. Astonishingly, fusing this one list away reduces total -- compiler allocation by more than 10% (in T12545, see !3935) -- Note that listToUFM (zip ks vs) performs similarly, but -- the explicit recursion avoids relying too much on fusion. zipToUFM :: Uniquable key => [key] -> [elt] -> UniqFM key elt zipToUFM ks vs = assert (length ks == length vs ) innerZip emptyUFM ks vs where innerZip ufm (k:kList) (v:vList) = innerZip (addToUFM ufm k v) kList vList innerZip ufm _ _ = ufm listToUFM :: Uniquable key => [(key,elt)] -> UniqFM key elt listToUFM = foldl' (\m (k, v) -> addToUFM m k v) emptyUFM {-# INLINEABLE listToUFM #-} listToUFM_Directly :: [(Unique, elt)] -> UniqFM key elt listToUFM_Directly = foldl' (\m (u, v) -> addToUFM_Directly m u v) emptyUFM {-# INLINEABLE listToUFM_Directly #-} listToIdentityUFM :: Uniquable key => [key] -> UniqFM key key listToIdentityUFM = foldl' (\m x -> addToUFM m x x) emptyUFM listToUFM_C :: Uniquable key => (elt -> elt -> elt) -> [(key, elt)] -> UniqFM key elt listToUFM_C f = foldl' (\m (k, v) -> addToUFM_C f m k v) emptyUFM {-# INLINEABLE listToUFM_C #-} addToUFM :: Uniquable key => UniqFM key elt -> key -> elt -> UniqFM key elt addToUFM (UFM m) k v = UFM (M.insert (getKey $ getUnique k) v m) addListToUFM :: Uniquable key => UniqFM key elt -> [(key,elt)] -> UniqFM key elt addListToUFM = foldl' (\m (k, v) -> addToUFM m k v) addListToUFM_Directly :: UniqFM key elt -> [(Unique,elt)] -> UniqFM key elt addListToUFM_Directly = foldl' (\m (k, v) -> addToUFM_Directly m k v) addToUFM_Directly :: UniqFM key elt -> Unique -> elt -> UniqFM key elt addToUFM_Directly (UFM m) u v = UFM (M.insert (getKey u) v m) addToUFM_C :: Uniquable key => (elt -> elt -> elt) -- ^ old -> new -> result -> UniqFM key elt -- ^ old -> key -> elt -- ^ new -> UniqFM key elt -- ^ result -- Arguments of combining function of M.insertWith and addToUFM_C are flipped. addToUFM_C f (UFM m) k v = UFM (M.insertWith (flip f) (getKey $ getUnique k) v m) addToUFM_Acc :: Uniquable key => (elt -> elts -> elts) -- Add to existing -> (elt -> elts) -- New element -> UniqFM key elts -- old -> key -> elt -- new -> UniqFM key elts -- result addToUFM_Acc exi new (UFM m) k v = UFM (M.insertWith (\_new old -> exi v old) (getKey $ getUnique k) (new v) m) -- | Add an element, returns previous lookup result and new map. If -- old element doesn't exist, add the passed element directly, -- otherwise compute the element to add using the passed function. addToUFM_L :: Uniquable key => (key -> elt -> elt -> elt) -- ^ key,old,new -> key -> elt -- new -> UniqFM key elt -> (Maybe elt, UniqFM key elt) -- ^ old, result addToUFM_L f k v (UFM m) = coerce $ M.insertLookupWithKey (\_ _n _o -> f k _o _n) (getKey $ getUnique k) v m alterUFM :: Uniquable key => (Maybe elt -> Maybe elt) -- ^ How to adjust -> UniqFM key elt -- ^ old -> key -- ^ new -> UniqFM key elt -- ^ result alterUFM f (UFM m) k = UFM (M.alter f (getKey $ getUnique k) m) alterUFM_Directly :: (Maybe elt -> Maybe elt) -- ^ How to adjust -> UniqFM key elt -- ^ old -> Unique -- ^ new -> UniqFM key elt -- ^ result alterUFM_Directly f (UFM m) k = UFM (M.alter f (getKey k) m) -- | Add elements to the map, combining existing values with inserted ones using -- the given function. addListToUFM_C :: Uniquable key => (elt -> elt -> elt) -> UniqFM key elt -> [(key,elt)] -> UniqFM key elt addListToUFM_C f = foldl' (\m (k, v) -> addToUFM_C f m k v) adjustUFM :: Uniquable key => (elt -> elt) -> UniqFM key elt -> key -> UniqFM key elt adjustUFM f (UFM m) k = UFM (M.adjust f (getKey $ getUnique k) m) adjustUFM_Directly :: (elt -> elt) -> UniqFM key elt -> Unique -> UniqFM key elt adjustUFM_Directly f (UFM m) u = UFM (M.adjust f (getKey u) m) delFromUFM :: Uniquable key => UniqFM key elt -> key -> UniqFM key elt delFromUFM (UFM m) k = UFM (M.delete (getKey $ getUnique k) m) delListFromUFM :: Uniquable key => UniqFM key elt -> [key] -> UniqFM key elt delListFromUFM = foldl' delFromUFM delListFromUFM_Directly :: UniqFM key elt -> [Unique] -> UniqFM key elt delListFromUFM_Directly = foldl' delFromUFM_Directly delFromUFM_Directly :: UniqFM key elt -> Unique -> UniqFM key elt delFromUFM_Directly (UFM m) u = UFM (M.delete (getKey u) m) -- Bindings in right argument shadow those in the left plusUFM :: UniqFM key elt -> UniqFM key elt -> UniqFM key elt -- M.union is left-biased, plusUFM should be right-biased. plusUFM (UFM x) (UFM y) = UFM (M.union y x) -- Note (M.union y x), with arguments flipped -- M.union is left-biased, plusUFM should be right-biased. plusUFM_C :: (elt -> elt -> elt) -> UniqFM key elt -> UniqFM key elt -> UniqFM key elt plusUFM_C f (UFM x) (UFM y) = UFM (M.unionWith f x y) -- | `plusUFM_CD f m1 d1 m2 d2` merges the maps using `f` as the -- combinding function and `d1` resp. `d2` as the default value if -- there is no entry in `m1` reps. `m2`. The domain is the union of -- the domains of `m1` and `m2`. -- -- IMPORTANT NOTE: This function strictly applies the modification function -- and forces the result unlike most the other functions in this module. -- -- Representative example: -- -- @ -- plusUFM_CD f {A: 1, B: 2} 23 {B: 3, C: 4} 42 -- == {A: f 1 42, B: f 2 3, C: f 23 4 } -- @ {-# INLINE plusUFM_CD #-} plusUFM_CD :: (elta -> eltb -> eltc) -> UniqFM key elta -- map X -> elta -- default for X -> UniqFM key eltb -- map Y -> eltb -- default for Y -> UniqFM key eltc plusUFM_CD f (UFM xm) dx (UFM ym) dy = UFM $ MS.mergeWithKey (\_ x y -> Just (x `f` y)) (MS.map (\x -> x `f` dy)) (MS.map (\y -> dx `f` y)) xm ym -- | `plusUFM_CD2 f m1 m2` merges the maps using `f` as the combining -- function. Unlike `plusUFM_CD`, a missing value is not defaulted: it is -- instead passed as `Nothing` to `f`. `f` can never have both its arguments -- be `Nothing`. -- -- IMPORTANT NOTE: This function strictly applies the modification function -- and forces the result. -- -- `plusUFM_CD2 f m1 m2` is the same as `plusUFM_CD f (mapUFM Just m1) Nothing -- (mapUFM Just m2) Nothing`. plusUFM_CD2 :: (Maybe elta -> Maybe eltb -> eltc) -> UniqFM key elta -- map X -> UniqFM key eltb -- map Y -> UniqFM key eltc plusUFM_CD2 f (UFM xm) (UFM ym) = UFM $ MS.mergeWithKey (\_ x y -> Just (Just x `f` Just y)) (MS.map (\x -> Just x `f` Nothing)) (MS.map (\y -> Nothing `f` Just y)) xm ym mergeUFM :: (elta -> eltb -> Maybe eltc) -> (UniqFM key elta -> UniqFM key eltc) -- map X -> (UniqFM key eltb -> UniqFM key eltc) -- map Y -> UniqFM key elta -> UniqFM key eltb -> UniqFM key eltc mergeUFM f g h (UFM xm) (UFM ym) = UFM $ MS.mergeWithKey (\_ x y -> (x `f` y)) (coerce g) (coerce h) xm ym plusMaybeUFM_C :: (elt -> elt -> Maybe elt) -> UniqFM key elt -> UniqFM key elt -> UniqFM key elt plusMaybeUFM_C f (UFM xm) (UFM ym) = UFM $ M.mergeWithKey (\_ x y -> x `f` y) id id xm ym plusUFMList :: [UniqFM key elt] -> UniqFM key elt plusUFMList = foldl' plusUFM emptyUFM plusUFMListWith :: (elt -> elt -> elt) -> [UniqFM key elt] -> UniqFM key elt plusUFMListWith f xs = unsafeIntMapToUFM $ M.unionsWith f (map ufmToIntMap xs) sequenceUFMList :: forall key elt. [UniqFM key elt] -> UniqFM key [elt] sequenceUFMList = foldr (plusUFM_CD2 cons) emptyUFM where cons :: Maybe elt -> Maybe [elt] -> [elt] cons (Just x) (Just ys) = x : ys cons Nothing (Just ys) = ys cons (Just x) Nothing = [x] cons Nothing Nothing = [] minusUFM :: UniqFM key elt1 -> UniqFM key elt2 -> UniqFM key elt1 minusUFM (UFM x) (UFM y) = UFM (M.difference x y) -- | @minusUFC_C f map1 map2@ returns @map1@, except that every mapping @key -- |-> value1@ in @map1@ that shares a key with a mapping @key |-> value2@ in -- @map2@ is altered by @f@: @value1@ is replaced by @f value1 value2@, where -- 'Just' means that the new value is used and 'Nothing' means that the mapping -- is deleted. minusUFM_C :: (elt1 -> elt2 -> Maybe elt1) -> UniqFM key elt1 -> UniqFM key elt2 -> UniqFM key elt1 minusUFM_C f (UFM x) (UFM y) = UFM (M.differenceWith f x y) intersectUFM :: UniqFM key elt1 -> UniqFM key elt2 -> UniqFM key elt1 intersectUFM (UFM x) (UFM y) = UFM (M.intersection x y) intersectUFM_C :: (elt1 -> elt2 -> elt3) -> UniqFM key elt1 -> UniqFM key elt2 -> UniqFM key elt3 intersectUFM_C f (UFM x) (UFM y) = UFM (M.intersectionWith f x y) disjointUFM :: UniqFM key elt1 -> UniqFM key elt2 -> Bool disjointUFM (UFM x) (UFM y) = M.disjoint x y -- | Fold over a 'UniqFM'. -- -- Non-deterministic, unless the folding function is commutative -- (i.e. @a1 `f` ( a2 `f` b ) == a2 `f` ( a1 `f` b )@ for all @a1@, @a2@, @b@). nonDetFoldUFM :: (elt -> a -> a) -> a -> UniqFM key elt -> a nonDetFoldUFM f z (UFM m) = M.foldr f z m -- | Like 'nonDetFoldUFM', but with the 'Unique' key as well. nonDetFoldWithKeyUFM :: (Unique -> elt -> a -> a) -> a -> UniqFM key elt -> a nonDetFoldWithKeyUFM f z (UFM m) = M.foldrWithKey f' z m where f' k e a = f (mkUniqueGrimily k) e a mapUFM :: (elt1 -> elt2) -> UniqFM key elt1 -> UniqFM key elt2 mapUFM f (UFM m) = UFM (M.map f m) mapMaybeUFM :: (elt1 -> Maybe elt2) -> UniqFM key elt1 -> UniqFM key elt2 mapMaybeUFM = mapMaybeUFM_sameUnique -- | Like 'Data.Map.mapMaybe', but you must ensure the passed-in function does -- not modify the unique. mapMaybeUFM_sameUnique :: (elt1 -> Maybe elt2) -> UniqFM key1 elt1 -> UniqFM key2 elt2 mapMaybeUFM_sameUnique f (UFM m) = UFM (M.mapMaybe f m) mapMaybeWithKeyUFM :: (Unique -> elt1 -> Maybe elt2) -> UniqFM key elt1 -> UniqFM key elt2 mapMaybeWithKeyUFM f (UFM m) = UFM (M.mapMaybeWithKey (f . mkUniqueGrimily) m) mapUFM_Directly :: (Unique -> elt1 -> elt2) -> UniqFM key elt1 -> UniqFM key elt2 mapUFM_Directly f (UFM m) = UFM (M.mapWithKey (f . mkUniqueGrimily) m) strictMapUFM :: (a -> b) -> UniqFM k a -> UniqFM k b strictMapUFM f (UFM a) = UFM $ MS.map f a filterUFM :: (elt -> Bool) -> UniqFM key elt -> UniqFM key elt filterUFM p (UFM m) = UFM (M.filter p m) filterUFM_Directly :: (Unique -> elt -> Bool) -> UniqFM key elt -> UniqFM key elt filterUFM_Directly p (UFM m) = UFM (M.filterWithKey (p . mkUniqueGrimily) m) partitionUFM :: (elt -> Bool) -> UniqFM key elt -> (UniqFM key elt, UniqFM key elt) partitionUFM p (UFM m) = case M.partition p m of (left, right) -> (UFM left, UFM right) sizeUFM :: UniqFM key elt -> Int sizeUFM (UFM m) = M.size m elemUFM :: Uniquable key => key -> UniqFM key elt -> Bool elemUFM k (UFM m) = M.member (getKey $ getUnique k) m elemUFM_Directly :: Unique -> UniqFM key elt -> Bool elemUFM_Directly u (UFM m) = M.member (getKey u) m lookupUFM :: Uniquable key => UniqFM key elt -> key -> Maybe elt lookupUFM (UFM m) k = M.lookup (getKey $ getUnique k) m -- when you've got the Unique already lookupUFM_Directly :: UniqFM key elt -> Unique -> Maybe elt lookupUFM_Directly (UFM m) u = M.lookup (getKey u) m lookupWithDefaultUFM :: Uniquable key => UniqFM key elt -> elt -> key -> elt lookupWithDefaultUFM (UFM m) v k = M.findWithDefault v (getKey $ getUnique k) m lookupWithDefaultUFM_Directly :: UniqFM key elt -> elt -> Unique -> elt lookupWithDefaultUFM_Directly (UFM m) v u = M.findWithDefault v (getKey u) m ufmToSet_Directly :: UniqFM key elt -> S.Word64Set ufmToSet_Directly (UFM m) = M.keysSet m anyUFM :: (elt -> Bool) -> UniqFM key elt -> Bool anyUFM p (UFM m) = M.foldr ((||) . p) False m allUFM :: (elt -> Bool) -> UniqFM key elt -> Bool allUFM p (UFM m) = M.foldr ((&&) . p) True m seqEltsUFM :: (elt -> ()) -> UniqFM key elt -> () seqEltsUFM seqElt = nonDetFoldUFM (\v rest -> seqElt v `seq` rest) () -- See Note [Deterministic UniqFM] to learn about nondeterminism. -- If you use this please provide a justification why it doesn't introduce -- nondeterminism. nonDetEltsUFM :: UniqFM key elt -> [elt] nonDetEltsUFM (UFM m) = M.elems m -- See Note [Deterministic UniqFM] to learn about nondeterminism. -- If you use this please provide a justification why it doesn't introduce -- nondeterminism. nonDetKeysUFM :: UniqFM key elt -> [Unique] nonDetKeysUFM (UFM m) = map mkUniqueGrimily $ M.keys m -- See Note [Deterministic UniqFM] to learn about nondeterminism. -- If you use this please provide a justification why it doesn't introduce -- nondeterminism. nonDetStrictFoldUFM :: (elt -> a -> a) -> a -> UniqFM key elt -> a nonDetStrictFoldUFM k z (UFM m) = M.foldl' (flip k) z m {-# INLINE nonDetStrictFoldUFM #-} -- | In essence foldM -- See Note [Deterministic UniqFM] to learn about nondeterminism. -- If you use this please provide a justification why it doesn't introduce -- nondeterminism. {-# INLINE nonDetStrictFoldUFM_DirectlyM #-} -- Allow specialization nonDetStrictFoldUFM_DirectlyM :: (Monad m) => (Unique -> b -> elt -> m b) -> b -> UniqFM key elt -> m b nonDetStrictFoldUFM_DirectlyM f z0 (UFM xs) = M.foldrWithKey c return xs z0 -- See Note [List fusion and continuations in 'c'] where c u x k z = f (mkUniqueGrimily u) z x >>= k {-# INLINE c #-} nonDetStrictFoldUFM_Directly:: (Unique -> elt -> a -> a) -> a -> UniqFM key elt -> a nonDetStrictFoldUFM_Directly k z (UFM m) = M.foldlWithKey' (\z' i x -> k (mkUniqueGrimily i) x z') z m {-# INLINE nonDetStrictFoldUFM_Directly #-} -- See Note [Deterministic UniqFM] to learn about nondeterminism. -- If you use this please provide a justification why it doesn't introduce -- nondeterminism. nonDetUFMToList :: UniqFM key elt -> [(Unique, elt)] nonDetUFMToList (UFM m) = map (\(k, v) -> (mkUniqueGrimily k, v)) $ M.toList m -- | A wrapper around 'UniqFM' with the sole purpose of informing call sites -- that the provided 'Foldable' and 'Traversable' instances are -- nondeterministic. -- If you use this please provide a justification why it doesn't introduce -- nondeterminism. -- See Note [Deterministic UniqFM] in "GHC.Types.Unique.DFM" to learn about determinism. newtype NonDetUniqFM key ele = NonDetUniqFM { getNonDet :: UniqFM key ele } deriving (Functor) -- | Inherently nondeterministic. -- If you use this please provide a justification why it doesn't introduce -- nondeterminism. -- See Note [Deterministic UniqFM] in "GHC.Types.Unique.DFM" to learn about determinism. instance forall key. Foldable (NonDetUniqFM key) where foldr f z (NonDetUniqFM (UFM m)) = foldr f z m -- | Inherently nondeterministic. -- If you use this please provide a justification why it doesn't introduce -- nondeterminism. -- See Note [Deterministic UniqFM] in "GHC.Types.Unique.DFM" to learn about determinism. instance forall key. Traversable (NonDetUniqFM key) where traverse f (NonDetUniqFM (UFM m)) = NonDetUniqFM . UFM <$> traverse f m ufmToIntMap :: UniqFM key elt -> M.Word64Map elt ufmToIntMap (UFM m) = m unsafeIntMapToUFM :: M.Word64Map elt -> UniqFM key elt unsafeIntMapToUFM = UFM -- | Cast the key domain of a UniqFM. -- -- As long as the domains don't overlap in their uniques -- this is safe. unsafeCastUFMKey :: UniqFM key1 elt -> UniqFM key2 elt unsafeCastUFMKey (UFM m) = UFM m -- Determines whether two 'UniqFM's contain the same keys. equalKeysUFM :: UniqFM key a -> UniqFM key b -> Bool equalKeysUFM (UFM m1) (UFM m2) = liftEq (\_ _ -> True) m1 m2 -- | An edit on type @a@, relating an element of a container (like an entry in a -- map or a line in a file) before and after. data Edit a = Removed !a -- ^ Element was removed from the container | Added !a -- ^ Element was added to the container | Changed !a !a -- ^ Element was changed. Carries the values before and after deriving Eq instance Outputable a => Outputable (Edit a) where ppr (Removed a) = text "-" <> ppr a ppr (Added a) = text "+" <> ppr a ppr (Changed l r) = ppr l <> text "->" <> ppr r -- A very convient function to have for debugging: -- | Computes the diff of two 'UniqFM's in terms of 'Edit's. -- Equal points will not be present in the result map at all. diffUFM :: Eq a => UniqFM key a -> UniqFM key a -> UniqFM key (Edit a) diffUFM = mergeUFM both (mapUFM Removed) (mapUFM Added) where both x y | x == y = Nothing | otherwise = Just $! Changed x y -- Instances instance Semi.Semigroup (UniqFM key a) where (<>) = plusUFM instance Monoid (UniqFM key a) where mempty = emptyUFM mappend = (Semi.<>) -- Output-ery instance Outputable a => Outputable (UniqFM key a) where ppr ufm = pprUniqFM ppr ufm pprUniqFM :: (a -> SDoc) -> UniqFM key a -> SDoc pprUniqFM ppr_elt ufm = brackets $ fsep $ punctuate comma $ [ ppr uq <+> text ":->" <+> ppr_elt elt | (uq, elt) <- nonDetUFMToList ufm ] -- It's OK to use nonDetUFMToList here because we only use it for -- pretty-printing. -- | Pretty-print a non-deterministic set. -- The order of variables is non-deterministic and for pretty-printing that -- shouldn't be a problem. -- Having this function helps contain the non-determinism created with -- nonDetEltsUFM. pprUFM :: UniqFM key a -- ^ The things to be pretty printed -> ([a] -> SDoc) -- ^ The pretty printing function to use on the elements -> SDoc -- ^ 'SDoc' where the things have been pretty -- printed pprUFM ufm pp = pp (nonDetEltsUFM ufm) -- | Pretty-print a non-deterministic set. -- The order of variables is non-deterministic and for pretty-printing that -- shouldn't be a problem. -- Having this function helps contain the non-determinism created with -- nonDetUFMToList. pprUFMWithKeys :: UniqFM key a -- ^ The things to be pretty printed -> ([(Unique, a)] -> SDoc) -- ^ The pretty printing function to use on the elements -> SDoc -- ^ 'SDoc' where the things have been pretty -- printed pprUFMWithKeys ufm pp = pp (nonDetUFMToList ufm) -- | Determines the pluralisation suffix appropriate for the length of a set -- in the same way that plural from Outputable does for lists. pluralUFM :: UniqFM key a -> SDoc pluralUFM ufm | sizeUFM ufm == 1 = empty | otherwise = char 's' ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/Unique/Map.hs0000644000000000000000000002054207346545000021334 0ustar0000000000000000{-# LANGUAGE RoleAnnotations #-} {-# LANGUAGE TupleSections #-} {-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE DeriveFunctor #-} {-# OPTIONS_GHC -Wall #-} -- Like 'UniqFM', these are maps for keys which are Uniquable. -- Unlike 'UniqFM', these maps also remember their keys, which -- makes them a much better drop in replacement for 'Data.Map.Map'. -- -- Key preservation is right-biased. module GHC.Types.Unique.Map ( UniqMap(..), emptyUniqMap, isNullUniqMap, unitUniqMap, listToUniqMap, listToUniqMap_C, addToUniqMap, addListToUniqMap, addToUniqMap_C, addToUniqMap_Acc, addToUniqMap_L, alterUniqMap, addListToUniqMap_C, adjustUniqMap, delFromUniqMap, delListFromUniqMap, plusUniqMap, plusUniqMap_C, plusMaybeUniqMap_C, plusUniqMapList, plusUniqMapListWith, minusUniqMap, intersectUniqMap, intersectUniqMap_C, disjointUniqMap, mapUniqMap, filterUniqMap, filterWithKeyUniqMap, partitionUniqMap, sizeUniqMap, elemUniqMap, nonDetKeysUniqMap, nonDetEltsUniqMap, lookupUniqMap, lookupWithDefaultUniqMap, anyUniqMap, allUniqMap, nonDetUniqMapToList, nonDetUniqMapToKeySet, nonDetFoldUniqMap -- Non-deterministic functions omitted ) where import GHC.Prelude import GHC.Types.Unique.FM import GHC.Types.Unique import GHC.Utils.Outputable import Data.Semigroup as Semi ( Semigroup(..) ) import Data.Coerce import Data.Maybe import Data.Data import Control.DeepSeq import Data.Set (Set, fromList) -- | Maps indexed by 'Uniquable' keys newtype UniqMap k a = UniqMap { getUniqMap :: UniqFM k (k, a) } deriving (Data, Eq, Functor) type role UniqMap nominal representational instance Semigroup (UniqMap k a) where (<>) = plusUniqMap instance Monoid (UniqMap k a) where mempty = emptyUniqMap mappend = (Semi.<>) instance (Outputable k, Outputable a) => Outputable (UniqMap k a) where ppr (UniqMap m) = brackets $ fsep $ punctuate comma $ [ ppr k <+> text "->" <+> ppr v | (k, v) <- nonDetEltsUFM m ] instance (NFData k, NFData a) => NFData (UniqMap k a) where rnf (UniqMap fm) = seqEltsUFM rnf fm liftC :: (a -> a -> a) -> (k, a) -> (k, a) -> (k, a) liftC f (_, v) (k', v') = (k', f v v') emptyUniqMap :: UniqMap k a emptyUniqMap = UniqMap emptyUFM isNullUniqMap :: UniqMap k a -> Bool isNullUniqMap (UniqMap m) = isNullUFM m unitUniqMap :: Uniquable k => k -> a -> UniqMap k a unitUniqMap k v = UniqMap (unitUFM k (k, v)) listToUniqMap :: Uniquable k => [(k,a)] -> UniqMap k a listToUniqMap kvs = UniqMap (listToUFM [ (k,(k,v)) | (k,v) <- kvs]) listToUniqMap_C :: Uniquable k => (a -> a -> a) -> [(k,a)] -> UniqMap k a listToUniqMap_C f kvs = UniqMap $ listToUFM_C (liftC f) [ (k,(k,v)) | (k,v) <- kvs] addToUniqMap :: Uniquable k => UniqMap k a -> k -> a -> UniqMap k a addToUniqMap (UniqMap m) k v = UniqMap $ addToUFM m k (k, v) addListToUniqMap :: Uniquable k => UniqMap k a -> [(k,a)] -> UniqMap k a addListToUniqMap (UniqMap m) kvs = UniqMap $ addListToUFM m [(k,(k,v)) | (k,v) <- kvs] addToUniqMap_C :: Uniquable k => (a -> a -> a) -> UniqMap k a -> k -> a -> UniqMap k a addToUniqMap_C f (UniqMap m) k v = UniqMap $ addToUFM_C (liftC f) m k (k, v) addToUniqMap_Acc :: Uniquable k => (b -> a -> a) -> (b -> a) -> UniqMap k a -> k -> b -> UniqMap k a addToUniqMap_Acc exi new (UniqMap m) k0 v0 = UniqMap $ addToUFM_Acc (\b (k, v) -> (k, exi b v)) (\b -> (k0, new b)) m k0 v0 -- | Add an element, returns previous lookup result and new map. If -- old element doesn't exist, add the passed element directly, -- otherwise compute the element to add using the passed function. addToUniqMap_L :: Uniquable k => (k -> a -> a -> a) -- key,old,new -> k -> a -- new -> UniqMap k a -> (Maybe a, UniqMap k a) addToUniqMap_L f k v (UniqMap m) = case addToUFM_L (\_k (_, _o) (_, _n) -> (_k, f _k _o _n)) k (k, v) m of (_maybe, _ufm) -> (snd <$> _maybe, UniqMap _ufm) alterUniqMap :: Uniquable k => (Maybe a -> Maybe a) -> UniqMap k a -> k -> UniqMap k a alterUniqMap f (UniqMap m) k = UniqMap $ alterUFM (fmap (k,) . f . fmap snd) m k addListToUniqMap_C :: Uniquable k => (a -> a -> a) -> UniqMap k a -> [(k, a)] -> UniqMap k a addListToUniqMap_C f (UniqMap m) kvs = UniqMap $ addListToUFM_C (liftC f) m [(k,(k,v)) | (k,v) <- kvs] adjustUniqMap :: Uniquable k => (a -> a) -> UniqMap k a -> k -> UniqMap k a adjustUniqMap f (UniqMap m) k = UniqMap $ adjustUFM (\(_,v) -> (k,f v)) m k delFromUniqMap :: Uniquable k => UniqMap k a -> k -> UniqMap k a delFromUniqMap (UniqMap m) k = UniqMap $ delFromUFM m k delListFromUniqMap :: Uniquable k => UniqMap k a -> [k] -> UniqMap k a delListFromUniqMap (UniqMap m) ks = UniqMap $ delListFromUFM m ks plusUniqMap :: UniqMap k a -> UniqMap k a -> UniqMap k a plusUniqMap (UniqMap m1) (UniqMap m2) = UniqMap $ plusUFM m1 m2 plusUniqMap_C :: (a -> a -> a) -> UniqMap k a -> UniqMap k a -> UniqMap k a plusUniqMap_C f (UniqMap m1) (UniqMap m2) = UniqMap $ plusUFM_C (liftC f) m1 m2 plusMaybeUniqMap_C :: (a -> a -> Maybe a) -> UniqMap k a -> UniqMap k a -> UniqMap k a plusMaybeUniqMap_C f (UniqMap m1) (UniqMap m2) = UniqMap $ plusMaybeUFM_C (\(_, v) (k', v') -> fmap (k',) (f v v')) m1 m2 plusUniqMapList :: [UniqMap k a] -> UniqMap k a plusUniqMapList xs = UniqMap $ plusUFMList (coerce xs) plusUniqMapListWith :: (a -> a -> a) -> [UniqMap k a] -> UniqMap k a plusUniqMapListWith f xs = UniqMap $ plusUFMListWith go (coerce xs) where -- l and r keys will be identical so we choose the former go (l_key, l) (_r, r) = (l_key, f l r) {-# INLINE plusUniqMapListWith #-} minusUniqMap :: UniqMap k a -> UniqMap k b -> UniqMap k a minusUniqMap (UniqMap m1) (UniqMap m2) = UniqMap $ minusUFM m1 m2 intersectUniqMap :: UniqMap k a -> UniqMap k b -> UniqMap k a intersectUniqMap (UniqMap m1) (UniqMap m2) = UniqMap $ intersectUFM m1 m2 -- | Intersection with a combining function. intersectUniqMap_C :: (a -> b -> c) -> UniqMap k a -> UniqMap k b -> UniqMap k c intersectUniqMap_C f (UniqMap m1) (UniqMap m2) = UniqMap $ intersectUFM_C (\(k, a) (_, b) -> (k, f a b)) m1 m2 {-# INLINE intersectUniqMap #-} disjointUniqMap :: UniqMap k a -> UniqMap k b -> Bool disjointUniqMap (UniqMap m1) (UniqMap m2) = disjointUFM m1 m2 mapUniqMap :: (a -> b) -> UniqMap k a -> UniqMap k b mapUniqMap f (UniqMap m) = UniqMap $ mapUFM (fmap f) m -- (,) k instance filterUniqMap :: (a -> Bool) -> UniqMap k a -> UniqMap k a filterUniqMap f (UniqMap m) = UniqMap $ filterUFM (f . snd) m filterWithKeyUniqMap :: (k -> a -> Bool) -> UniqMap k a -> UniqMap k a filterWithKeyUniqMap f (UniqMap m) = UniqMap $ filterUFM (uncurry f) m partitionUniqMap :: (a -> Bool) -> UniqMap k a -> (UniqMap k a, UniqMap k a) partitionUniqMap f (UniqMap m) = coerce $ partitionUFM (f . snd) m sizeUniqMap :: UniqMap k a -> Int sizeUniqMap (UniqMap m) = sizeUFM m elemUniqMap :: Uniquable k => k -> UniqMap k a -> Bool elemUniqMap k (UniqMap m) = elemUFM k m lookupUniqMap :: Uniquable k => UniqMap k a -> k -> Maybe a lookupUniqMap (UniqMap m) k = fmap snd (lookupUFM m k) lookupWithDefaultUniqMap :: Uniquable k => UniqMap k a -> a -> k -> a lookupWithDefaultUniqMap (UniqMap m) a k = fromMaybe a (fmap snd (lookupUFM m k)) anyUniqMap :: (a -> Bool) -> UniqMap k a -> Bool anyUniqMap f (UniqMap m) = anyUFM (f . snd) m allUniqMap :: (a -> Bool) -> UniqMap k a -> Bool allUniqMap f (UniqMap m) = allUFM (f . snd) m nonDetUniqMapToList :: UniqMap k a -> [(k, a)] nonDetUniqMapToList (UniqMap m) = nonDetEltsUFM m {-# INLINE nonDetUniqMapToList #-} nonDetUniqMapToKeySet :: Ord k => UniqMap k a -> Set k nonDetUniqMapToKeySet m = fromList (nonDetKeysUniqMap m) nonDetKeysUniqMap :: UniqMap k a -> [k] nonDetKeysUniqMap m = map fst (nonDetUniqMapToList m) {-# INLINE nonDetKeysUniqMap #-} nonDetEltsUniqMap :: UniqMap k a -> [a] nonDetEltsUniqMap m = map snd (nonDetUniqMapToList m) {-# INLINE nonDetEltsUniqMap #-} nonDetFoldUniqMap :: ((k, a) -> b -> b) -> b -> UniqMap k a -> b nonDetFoldUniqMap go z (UniqMap m) = nonDetFoldUFM go z m {-# INLINE nonDetFoldUniqMap #-} ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/Unique/SDFM.hs0000644000000000000000000001161107346545000021345 0ustar0000000000000000{-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE ApplicativeDo #-} {-# OPTIONS_GHC -Wall #-} -- | Like a 'UniqDFM', but maintains equivalence classes of keys sharing the -- same entry. See 'UniqSDFM'. module GHC.Types.Unique.SDFM ( -- * Unique-keyed, /shared/, deterministic mappings UniqSDFM, emptyUSDFM, lookupUSDFM, equateUSDFM, addToUSDFM, traverseUSDFM ) where import GHC.Prelude import GHC.Types.Unique import GHC.Types.Unique.DFM import GHC.Utils.Outputable -- | Either @Indirect x@, meaning the value is represented by that of @x@, or -- an @Entry@ containing containing the actual value it represents. data Shared key ele = Indirect !key | Entry !ele -- | A 'UniqDFM' whose domain is /sets/ of 'Unique's, each of which share a -- common value of type @ele@. -- Every such set (\"equivalence class\") has a distinct representative -- 'Unique'. Supports merging the entries of multiple such sets in a union-find -- like fashion. -- -- An accurate model is that of @[(Set key, Maybe ele)]@: A finite mapping from -- sets of @key@s to possibly absent entries @ele@, where the sets don't overlap. -- Example: -- @ -- m = [({u1,u3}, Just ele1), ({u2}, Just ele2), ({u4,u7}, Nothing)] -- @ -- On this model we support the following main operations: -- -- * @'lookupUSDFM' m u3 == Just ele1@, @'lookupUSDFM' m u4 == Nothing@, -- @'lookupUSDFM' m u5 == Nothing@. -- * @'equateUSDFM' m u1 u3@ is a no-op, but -- @'equateUSDFM' m u1 u2@ merges @{u1,u3}@ and @{u2}@ to point to -- @Just ele2@ and returns the old entry of @{u1,u3}@, @Just ele1@. -- * @'addToUSDFM' m u3 ele4@ sets the entry of @{u1,u3}@ to @Just ele4@. -- -- As well as a few means for traversal/conversion to list. newtype UniqSDFM key ele = USDFM { unUSDFM :: UniqDFM key (Shared key ele) } emptyUSDFM :: UniqSDFM key ele emptyUSDFM = USDFM emptyUDFM lookupReprAndEntryUSDFM :: Uniquable key => UniqSDFM key ele -> key -> (key, Maybe ele) lookupReprAndEntryUSDFM (USDFM env) = go where go x = case lookupUDFM env x of Nothing -> (x, Nothing) Just (Indirect y) -> go y Just (Entry ele) -> (x, Just ele) -- | @lookupSUDFM env x@ looks up an entry for @x@, looking through all -- 'Indirect's until it finds a shared 'Entry'. -- -- Examples in terms of the model (see 'UniqSDFM'): -- >>> lookupUSDFM [({u1,u3}, Just ele1), ({u2}, Just ele2)] u3 == Just ele1 -- >>> lookupUSDFM [({u1,u3}, Just ele1), ({u2}, Just ele2)] u4 == Nothing -- >>> lookupUSDFM [({u1,u3}, Just ele1), ({u2}, Nothing)] u2 == Nothing lookupUSDFM :: Uniquable key => UniqSDFM key ele -> key -> Maybe ele lookupUSDFM usdfm x = snd (lookupReprAndEntryUSDFM usdfm x) -- | @equateUSDFM env x y@ makes @x@ and @y@ point to the same entry, -- thereby merging @x@'s class with @y@'s. -- If both @x@ and @y@ are in the domain of the map, then @y@'s entry will be -- chosen as the new entry and @x@'s old entry will be returned. -- -- Examples in terms of the model (see 'UniqSDFM'): -- >>> equateUSDFM [] u1 u2 == (Nothing, [({u1,u2}, Nothing)]) -- >>> equateUSDFM [({u1,u3}, Just ele1)] u3 u4 == (Nothing, [({u1,u3,u4}, Just ele1)]) -- >>> equateUSDFM [({u1,u3}, Just ele1)] u4 u3 == (Nothing, [({u1,u3,u4}, Just ele1)]) -- >>> equateUSDFM [({u1,u3}, Just ele1), ({u2}, Just ele2)] u3 u2 == (Just ele1, [({u2,u1,u3}, Just ele2)]) equateUSDFM :: Uniquable key => UniqSDFM key ele -> key -> key -> (Maybe ele, UniqSDFM key ele) equateUSDFM usdfm@(USDFM env) x y = case (lu x, lu y) of ((x', _) , (y', _)) | getUnique x' == getUnique y' -> (Nothing, usdfm) -- nothing to do ((x', _) , (y', Nothing)) -> (Nothing, set_indirect y' x') ((x', mb_ex), (y', _)) -> (mb_ex, set_indirect x' y') where lu = lookupReprAndEntryUSDFM usdfm set_indirect a b = USDFM $ addToUDFM env a (Indirect b) -- | @addToUSDFM env x a@ sets the entry @x@ is associated with to @a@, -- thereby modifying its whole equivalence class. -- -- Examples in terms of the model (see 'UniqSDFM'): -- >>> addToUSDFM [] u1 ele1 == [({u1}, Just ele1)] -- >>> addToUSDFM [({u1,u3}, Just ele1)] u3 ele2 == [({u1,u3}, Just ele2)] addToUSDFM :: Uniquable key => UniqSDFM key ele -> key -> ele -> UniqSDFM key ele addToUSDFM usdfm@(USDFM env) x v = USDFM $ addToUDFM env (fst (lookupReprAndEntryUSDFM usdfm x)) (Entry v) traverseUSDFM :: forall key a b f. Applicative f => (a -> f b) -> UniqSDFM key a -> f (UniqSDFM key b) traverseUSDFM f = fmap (USDFM . listToUDFM_Directly) . traverse g . udfmToList . unUSDFM where g :: (Unique, Shared key a) -> f (Unique, Shared key b) g (u, Indirect y) = pure (u,Indirect y) g (u, Entry a) = do a' <- f a pure (u,Entry a') instance (Outputable key, Outputable ele) => Outputable (Shared key ele) where ppr (Indirect x) = ppr x ppr (Entry a) = ppr a instance (Outputable key, Outputable ele) => Outputable (UniqSDFM key ele) where ppr (USDFM env) = ppr env ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/Unique/Set.hs0000644000000000000000000002465707346545000021365 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The AQUA Project, Glasgow University, 1994-1998 \section[UniqSet]{Specialised sets, for things with @Uniques@} Based on @UniqFMs@ (as you would expect). Basically, the things need to be in class @Uniquable@. -} {-# LANGUAGE GeneralizedNewtypeDeriving #-} {-# LANGUAGE DeriveDataTypeable #-} module GHC.Types.Unique.Set ( -- * Unique set type UniqSet, -- type synonym for UniqFM a getUniqSet, pprUniqSet, -- ** Manipulating these sets emptyUniqSet, unitUniqSet, mkUniqSet, addOneToUniqSet, addListToUniqSet, delOneFromUniqSet, delOneFromUniqSet_Directly, delListFromUniqSet, delListFromUniqSet_Directly, unionUniqSets, unionManyUniqSets, minusUniqSet, uniqSetMinusUFM, uniqSetMinusUDFM, intersectUniqSets, disjointUniqSets, restrictUniqSetToUFM, uniqSetAny, uniqSetAll, elementOfUniqSet, elemUniqSet_Directly, filterUniqSet, filterUniqSet_Directly, sizeUniqSet, isEmptyUniqSet, lookupUniqSet, lookupUniqSet_Directly, partitionUniqSet, mapUniqSet, unsafeUFMToUniqSet, nonDetEltsUniqSet, nonDetKeysUniqSet, nonDetStrictFoldUniqSet, mapMaybeUniqSet_sameUnique, -- UniqueSet UniqueSet(..), nullUniqueSet, sizeUniqueSet, memberUniqueSet, emptyUniqueSet, singletonUniqueSet, insertUniqueSet, deleteUniqueSet, differenceUniqueSet, unionUniqueSet, unionsUniqueSet, intersectionUniqueSet, isSubsetOfUniqueSet, filterUniqueSet, foldlUniqueSet, foldrUniqueSet, elemsUniqueSet, fromListUniqueSet, ) where import GHC.Prelude import GHC.Types.Unique.DFM import GHC.Types.Unique.FM import GHC.Types.Unique import Data.Coerce import GHC.Utils.Outputable import Data.Data import qualified Data.Semigroup as Semi import Control.DeepSeq import qualified GHC.Data.Word64Set as S -- Note [UniqSet invariant] -- ~~~~~~~~~~~~~~~~~~~~~~~~~ -- UniqSet has the following invariant: -- The keys in the map are the uniques of the values -- It means that to implement mapUniqSet you have to update -- both the keys and the values. -- | Set of Uniquable values newtype UniqSet a = UniqSet {getUniqSet' :: UniqFM a a} deriving (Data, Semi.Semigroup, Monoid) instance NFData a => NFData (UniqSet a) where rnf = forceUniqSet rnf emptyUniqSet :: UniqSet a emptyUniqSet = UniqSet emptyUFM unitUniqSet :: Uniquable a => a -> UniqSet a unitUniqSet x = UniqSet $ unitUFM x x mkUniqSet :: Uniquable a => [a] -> UniqSet a mkUniqSet = foldl' addOneToUniqSet emptyUniqSet {-# INLINEABLE mkUniqSet #-} addOneToUniqSet :: Uniquable a => UniqSet a -> a -> UniqSet a addOneToUniqSet (UniqSet set) x = UniqSet (addToUFM set x x) addListToUniqSet :: Uniquable a => UniqSet a -> [a] -> UniqSet a addListToUniqSet = foldl' addOneToUniqSet {-# INLINEABLE addListToUniqSet #-} delOneFromUniqSet :: Uniquable a => UniqSet a -> a -> UniqSet a delOneFromUniqSet (UniqSet s) a = UniqSet (delFromUFM s a) delOneFromUniqSet_Directly :: UniqSet a -> Unique -> UniqSet a delOneFromUniqSet_Directly (UniqSet s) u = UniqSet (delFromUFM_Directly s u) delListFromUniqSet :: Uniquable a => UniqSet a -> [a] -> UniqSet a delListFromUniqSet (UniqSet s) l = UniqSet (delListFromUFM s l) {-# INLINEABLE delListFromUniqSet #-} delListFromUniqSet_Directly :: UniqSet a -> [Unique] -> UniqSet a delListFromUniqSet_Directly (UniqSet s) l = UniqSet (delListFromUFM_Directly s l) {-# INLINEABLE delListFromUniqSet_Directly #-} unionUniqSets :: UniqSet a -> UniqSet a -> UniqSet a unionUniqSets (UniqSet s) (UniqSet t) = UniqSet (plusUFM s t) unionManyUniqSets :: [UniqSet a] -> UniqSet a unionManyUniqSets = foldl' (flip unionUniqSets) emptyUniqSet minusUniqSet :: UniqSet a -> UniqSet a -> UniqSet a minusUniqSet (UniqSet s) (UniqSet t) = UniqSet (minusUFM s t) intersectUniqSets :: UniqSet a -> UniqSet a -> UniqSet a intersectUniqSets (UniqSet s) (UniqSet t) = UniqSet (intersectUFM s t) disjointUniqSets :: UniqSet a -> UniqSet a -> Bool disjointUniqSets (UniqSet s) (UniqSet t) = disjointUFM s t restrictUniqSetToUFM :: UniqSet key -> UniqFM key b -> UniqSet key restrictUniqSetToUFM (UniqSet s) m = UniqSet (intersectUFM s m) uniqSetMinusUFM :: UniqSet key -> UniqFM key b -> UniqSet key uniqSetMinusUFM (UniqSet s) t = UniqSet (minusUFM s t) uniqSetMinusUDFM :: UniqSet key -> UniqDFM key b -> UniqSet key uniqSetMinusUDFM (UniqSet s) t = UniqSet (ufmMinusUDFM s t) elementOfUniqSet :: Uniquable a => a -> UniqSet a -> Bool elementOfUniqSet a (UniqSet s) = elemUFM a s elemUniqSet_Directly :: Unique -> UniqSet a -> Bool elemUniqSet_Directly a (UniqSet s) = elemUFM_Directly a s filterUniqSet :: (a -> Bool) -> UniqSet a -> UniqSet a filterUniqSet p (UniqSet s) = UniqSet (filterUFM p s) filterUniqSet_Directly :: (Unique -> elt -> Bool) -> UniqSet elt -> UniqSet elt filterUniqSet_Directly f (UniqSet s) = UniqSet (filterUFM_Directly f s) partitionUniqSet :: (a -> Bool) -> UniqSet a -> (UniqSet a, UniqSet a) partitionUniqSet p (UniqSet s) = coerce (partitionUFM p s) uniqSetAny :: (a -> Bool) -> UniqSet a -> Bool uniqSetAny p (UniqSet s) = anyUFM p s uniqSetAll :: (a -> Bool) -> UniqSet a -> Bool uniqSetAll p (UniqSet s) = allUFM p s sizeUniqSet :: UniqSet a -> Int sizeUniqSet (UniqSet s) = sizeUFM s isEmptyUniqSet :: UniqSet a -> Bool isEmptyUniqSet (UniqSet s) = isNullUFM s -- | What's the point you might ask? We might have changed an object -- without it's key changing. In which case this lookup makes sense. lookupUniqSet :: Uniquable key => UniqSet key -> key -> Maybe key lookupUniqSet (UniqSet s) k = lookupUFM s k lookupUniqSet_Directly :: UniqSet a -> Unique -> Maybe a lookupUniqSet_Directly (UniqSet s) k = lookupUFM_Directly s k -- See Note [Deterministic UniqFM] to learn about nondeterminism. -- If you use this please provide a justification why it doesn't introduce -- nondeterminism. nonDetEltsUniqSet :: UniqSet elt -> [elt] nonDetEltsUniqSet = nonDetEltsUFM . getUniqSet' -- See Note [Deterministic UniqFM] to learn about nondeterminism. -- If you use this please provide a justification why it doesn't introduce -- nondeterminism. nonDetKeysUniqSet :: UniqSet elt -> [Unique] nonDetKeysUniqSet = nonDetKeysUFM . getUniqSet' -- See Note [Deterministic UniqFM] to learn about nondeterminism. -- If you use this please provide a justification why it doesn't introduce -- nondeterminism. nonDetStrictFoldUniqSet :: (elt -> a -> a) -> a -> UniqSet elt -> a nonDetStrictFoldUniqSet c n (UniqSet s) = nonDetStrictFoldUFM c n s -- See Note [UniqSet invariant] mapUniqSet :: Uniquable b => (a -> b) -> UniqSet a -> UniqSet b mapUniqSet f = mkUniqSet . map f . nonDetEltsUniqSet -- | Like 'Data.Set.mapMaybe', but you must ensure the passed in function -- does not change the 'Unique'. mapMaybeUniqSet_sameUnique :: (a -> Maybe b) -> UniqSet a -> UniqSet b mapMaybeUniqSet_sameUnique f (UniqSet a) = UniqSet $ mapMaybeUFM_sameUnique f a -- Two 'UniqSet's are considered equal if they contain the same -- uniques. instance Eq (UniqSet a) where UniqSet a == UniqSet b = equalKeysUFM a b getUniqSet :: UniqSet a -> UniqFM a a getUniqSet = getUniqSet' -- | 'unsafeUFMToUniqSet' converts a @'UniqFM' a@ into a @'UniqSet' a@ -- assuming, without checking, that it maps each 'Unique' to a value -- that has that 'Unique'. See Note [UniqSet invariant]. unsafeUFMToUniqSet :: UniqFM a a -> UniqSet a unsafeUFMToUniqSet = UniqSet instance Outputable a => Outputable (UniqSet a) where ppr = pprUniqSet ppr pprUniqSet :: (a -> SDoc) -> UniqSet a -> SDoc -- It's OK to use nonDetUFMToList here because we only use it for -- pretty-printing. pprUniqSet f = braces . pprWithCommas f . nonDetEltsUniqSet forceUniqSet :: (a -> ()) -> UniqSet a -> () forceUniqSet f (UniqSet fm) = seqEltsUFM f fm -------------------------------------------------------- -- UniqueSet -------------------------------------------------------- -- | Set of Unique values -- -- Similar to 'UniqSet Unique' but with a more compact representation. newtype UniqueSet = US { unUniqueSet :: S.Word64Set } deriving (Eq, Ord, Show, Semigroup, Monoid) {-# INLINE nullUniqueSet #-} nullUniqueSet :: UniqueSet -> Bool nullUniqueSet (US s) = S.null s {-# INLINE sizeUniqueSet #-} sizeUniqueSet :: UniqueSet -> Int sizeUniqueSet (US s) = S.size s {-# INLINE memberUniqueSet #-} memberUniqueSet :: Unique -> UniqueSet -> Bool memberUniqueSet k (US s) = S.member (getKey k) s {-# INLINE emptyUniqueSet #-} emptyUniqueSet :: UniqueSet emptyUniqueSet = US S.empty {-# INLINE singletonUniqueSet #-} singletonUniqueSet :: Unique -> UniqueSet singletonUniqueSet k = US (S.singleton (getKey k)) {-# INLINE insertUniqueSet #-} insertUniqueSet :: Unique -> UniqueSet -> UniqueSet insertUniqueSet k (US s) = US (S.insert (getKey k) s) {-# INLINE deleteUniqueSet #-} deleteUniqueSet :: Unique -> UniqueSet -> UniqueSet deleteUniqueSet k (US s) = US (S.delete (getKey k) s) {-# INLINE unionUniqueSet #-} unionUniqueSet :: UniqueSet -> UniqueSet -> UniqueSet unionUniqueSet (US x) (US y) = US (S.union x y) {-# INLINE unionsUniqueSet #-} unionsUniqueSet :: [UniqueSet] -> UniqueSet unionsUniqueSet xs = US (S.unions (map unUniqueSet xs)) {-# INLINE differenceUniqueSet #-} differenceUniqueSet :: UniqueSet -> UniqueSet -> UniqueSet differenceUniqueSet (US x) (US y) = US (S.difference x y) {-# INLINE intersectionUniqueSet #-} intersectionUniqueSet :: UniqueSet -> UniqueSet -> UniqueSet intersectionUniqueSet (US x) (US y) = US (S.intersection x y) {-# INLINE isSubsetOfUniqueSet #-} isSubsetOfUniqueSet :: UniqueSet -> UniqueSet -> Bool isSubsetOfUniqueSet (US x) (US y) = S.isSubsetOf x y {-# INLINE filterUniqueSet #-} filterUniqueSet :: (Unique -> Bool) -> UniqueSet -> UniqueSet filterUniqueSet f (US s) = US (S.filter (f . mkUniqueGrimily) s) {-# INLINE foldlUniqueSet #-} foldlUniqueSet :: (a -> Unique -> a) -> a -> UniqueSet -> a foldlUniqueSet k z (US s) = S.foldl' (\a b -> k a (mkUniqueGrimily b)) z s {-# INLINE foldrUniqueSet #-} foldrUniqueSet :: (Unique -> b -> b) -> b -> UniqueSet -> b foldrUniqueSet k z (US s) = S.foldr (k . mkUniqueGrimily) z s {-# INLINE elemsUniqueSet #-} elemsUniqueSet :: UniqueSet -> [Unique] elemsUniqueSet (US s) = map mkUniqueGrimily (S.elems s) {-# INLINE fromListUniqueSet #-} fromListUniqueSet :: [Unique] -> UniqueSet fromListUniqueSet ks = US (S.fromList (map getKey ks)) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/Unique/Supply.hs0000644000000000000000000003474707346545000022127 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 -} {-# LANGUAGE CPP #-} {-# LANGUAGE DerivingVia #-} {-# LANGUAGE MagicHash #-} {-# LANGUAGE PatternSynonyms #-} {-# LANGUAGE UnboxedTuples #-} module GHC.Types.Unique.Supply ( -- * Main data type UniqSupply, -- Abstractly -- ** Operations on supplies uniqFromSupply, uniqsFromSupply, -- basic ops takeUniqFromSupply, uniqFromTag, mkSplitUniqSupply, splitUniqSupply, listSplitUniqSupply, -- * Unique supply monad and its abstraction UniqSM, MonadUnique(..), -- ** Operations on the monad initUs, initUs_, -- * Set supply strategy initUniqSupply ) where import GHC.Prelude import GHC.Types.Unique import GHC.Utils.Panic.Plain import GHC.IO import GHC.Utils.Monad import Control.Monad import Data.Word import GHC.Exts( Ptr(..), noDuplicate#, oneShot ) import Foreign.Storable import GHC.Utils.Monad.State.Strict as Strict #include "MachDeps.h" #if WORD_SIZE_IN_BITS != 64 #define NO_FETCH_ADD #endif #if defined(NO_FETCH_ADD) import GHC.Exts ( atomicCasWord64Addr#, eqWord64#, readWord64OffAddr# ) #else import GHC.Exts( fetchAddWordAddr#, word64ToWord# ) #endif import GHC.Exts ( Addr#, State#, Word64#, RealWorld ) import GHC.Int ( Int(..) ) import GHC.Word( Word64(..) ) import GHC.Exts( plusWord64#, int2Word#, wordToWord64# ) {- ************************************************************************ * * \subsection{Splittable Unique supply: @UniqSupply@} * * ************************************************************************ -} {- Note [How the unique supply works] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The basic idea (due to Lennart Augustsson) is that a UniqSupply is lazily-evaluated infinite tree. * At each MkSplitUniqSupply node is a unique Word64, and two sub-trees (see data UniqSupply) * takeUniqFromSupply :: UniqSupply -> (Unique, UniqSupply) returns the unique Word64 and one of the sub-trees * splitUniqSupply :: UniqSupply -> (UniqSupply, UniqSupply) returns the two sub-trees * When you poke on one of the thunks, it does a foreign call to get a fresh Word64 from a thread-safe counter, and returns a fresh MkSplitUniqSupply node. This has to be as efficient as possible: it should allocate only * The fresh node * A thunk for each sub-tree Note [How unique supplies are used] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The general design (used throughout GHC) is to: * For creating new uniques either a UniqSupply is used and threaded through or for monadic code a MonadUnique instance might conjure up uniques using `uniqFromTag`. * Different parts of the compiler will use a UniqSupply or MonadUnique instance with a specific tag. This way the different parts of the compiler will generate uniques with different tags. If different code shares the same tag then care has to be taken that all uniques still get distinct numbers. Usually this is done by relying on genSym which has *one* counter per GHC invocation that is relied on by all calls to it. But using something like the address for pinned objects works as well and in fact is done for fast strings. This is important for example in the simplifier. Most passes of the simplifier use the same tag 's'. However in some places we create a unique supply using `mkSplitUniqSupply` and thread it through the code, while in GHC.Core.Opt.Simplify.Monad we use the `instance MonadUnique SimplM`, which uses `mkSplitUniqSupply` in getUniqueSupplyM and `uniqFromTag` in getUniqueM. Ultimately all these boil down to each new unique consisting of the tag and the result from a call to `genSym`. The latter producing a distinct number for each invocation ensuring uniques are distinct. Note [Optimising the unique supply] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The inner loop of mkSplitUniqSupply is a function closure mk_supply s0 = case noDuplicate# s0 of { s1 -> case unIO genSym s1 of { (# s2, u #) -> case unIO (unsafeDupableInterleaveIO (IO mk_supply)) s2 of { (# s3, x #) -> case unIO (unsafeDupableInterleaveIO (IO mk_supply)) s3 of { (# s4, y #) -> (# s4, MkSplitUniqSupply (tag .|. u) x y #) }}}} It's a classic example of an IO action that is captured and then called repeatedly (see #18238 for some discussion). It mustn't allocate! The test perf/should_run/UniqLoop keeps track of this loop. Watch it carefully. We used to write it as: mk_supply :: IO UniqSupply mk_supply = unsafeInterleaveIO $ genSym >>= \ u -> mk_supply >>= \ s1 -> mk_supply >>= \ s2 -> return (MkSplitUniqSupply (tag .|. u) s1 s2) and to rely on -fno-state-hack, full laziness and inlining to get the same result. It was very brittle and required enabling -fno-state-hack globally. So it has been rewritten using lower level constructs to explicitly state what we want. Note [Optimising use of unique supplies] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When it comes to having a way to generate new Uniques there are generally three ways to deal with this: For pure code the only good approach is to take an UniqSupply as argument. Then thread it through the code splitting it for sub-passes or when creating uniques. The code for this is about as optimized as it gets, but we can't get around the need to allocate one `UniqSupply` for each Unique we need. For code in IO we can improve on this by threading only the *tag* we are going to use for Uniques. Using `uniqFromTag` to generate uniques as needed. This gets rid of the overhead of allocating a new UniqSupply for each unique generated. It also avoids frequent state updates when the Unique/Tag is part of the state in a state monad. For monadic code in IO which always uses the same tag we can go further and hardcode the tag into the MonadUnique instance. On top of all the benefits of threading the tag this *also* has the benefit of avoiding the tag getting captured in thunks, or being passed around at runtime. It does however come at the cost of having to use a fixed tag for all code run in this Monad. The tag is mostly cosmetic: See Note [Uniques and tags]. NB: It's *not* an optimization to pass around the UniqSupply inside an IORef instead of the tag. While this would avoid frequent state updates it still requires allocating one UniqSupply per Unique. On top of some overhead for reading/writing to/from the IORef. All of this hinges on the assumption that UniqSupply and uniqFromTag use the same source of distinct numbers (`genSym`) which allows both to be used at the same time, with the same tag, while still ensuring distinct uniques. One might consider this fact to be an "accident". But GHC worked like this as far back as source control history goes. It also allows the later two optimizations to be used. So it seems safe to depend on this fact. -} -- | Unique Supply -- -- A value of type 'UniqSupply' is unique, and it can -- supply /one/ distinct 'Unique'. Also, from the supply, one can -- also manufacture an arbitrary number of further 'UniqueSupply' values, -- which will be distinct from the first and from all others. data UniqSupply = MkSplitUniqSupply {-# UNPACK #-} !Word64 -- make the Unique with this UniqSupply UniqSupply -- when split => these two supplies mkSplitUniqSupply :: Char -> IO UniqSupply -- ^ Create a unique supply out of thin air. -- The "tag" (Char) supplied is mostly cosmetic, making it easier -- to figure out where a Unique was born. See Note [Uniques and tags]. -- -- The payload part of the Uniques allocated from this UniqSupply are -- guaranteed distinct wrt all other supplies, regardless of their "tag". -- This is achieved by allocating the payload part from -- a single source of Uniques, namely `genSym`, shared across -- all UniqSupply's. -- See Note [How the unique supply works] -- See Note [Optimising the unique supply] mkSplitUniqSupply c = unsafeDupableInterleaveIO (IO mk_supply) where !tag = mkTag c -- Here comes THE MAGIC: see Note [How the unique supply works] -- This is one of the most hammered bits in the whole compiler -- See Note [Optimising the unique supply] -- NB: Use noDuplicate# for thread-safety. mk_supply s0 = case noDuplicate# s0 of { s1 -> case unIO genSym s1 of { (# s2, u #) -> -- deferred IO computations case unIO (unsafeDupableInterleaveIO (IO mk_supply)) s2 of { (# s3, x #) -> case unIO (unsafeDupableInterleaveIO (IO mk_supply)) s3 of { (# s4, y #) -> (# s4, MkSplitUniqSupply (tag .|. u) x y #) }}}} #if defined(NO_FETCH_ADD) -- GHC currently does not provide this operation on 32-bit platforms, -- hence the CAS-based implementation. fetchAddWord64Addr# :: Addr# -> Word64# -> State# RealWorld -> (# State# RealWorld, Word64# #) fetchAddWord64Addr# = go where go ptr inc s0 = case readWord64OffAddr# ptr 0# s0 of (# s1, n0 #) -> case atomicCasWord64Addr# ptr n0 (n0 `plusWord64#` inc) s1 of (# s2, res #) | 1# <- res `eqWord64#` n0 -> (# s2, n0 #) | otherwise -> go ptr inc s2 #else fetchAddWord64Addr# :: Addr# -> Word64# -> State# RealWorld -> (# State# RealWorld, Word64# #) fetchAddWord64Addr# addr inc s0 = case fetchAddWordAddr# addr (word64ToWord# inc) s0 of (# s1, res #) -> (# s1, wordToWord64# res #) #endif genSym :: IO Word64 genSym = do let !mask = (1 `unsafeShiftL` uNIQUE_BITS) - 1 let !(Ptr counter) = ghc_unique_counter64 I# inc# <- peek ghc_unique_inc let !inc = wordToWord64# (int2Word# inc#) u <- IO $ \s1 -> case fetchAddWord64Addr# counter inc s1 of (# s2, val #) -> let !u = W64# (val `plusWord64#` inc) .&. mask in (# s2, u #) #if defined(DEBUG) -- Uh oh! We will overflow next time a unique is requested. -- (Note that if the increment isn't 1 we may miss this check) massert (u /= mask) #endif return u foreign import ccall unsafe "&ghc_unique_counter64" ghc_unique_counter64 :: Ptr Word64 foreign import ccall unsafe "&ghc_unique_inc" ghc_unique_inc :: Ptr Int initUniqSupply :: Word64 -> Int -> IO () initUniqSupply counter inc = do poke ghc_unique_counter64 counter poke ghc_unique_inc inc uniqFromTag :: Char -> IO Unique uniqFromTag !tag = do { uqNum <- genSym ; return $! mkUnique tag uqNum } {-# NOINLINE uniqFromTag #-} -- We'll unbox everything, but we don't want to inline it splitUniqSupply :: UniqSupply -> (UniqSupply, UniqSupply) -- ^ Build two 'UniqSupply' from a single one, each of which -- can supply its own 'Unique'. listSplitUniqSupply :: UniqSupply -> [UniqSupply] -- ^ Create an infinite list of 'UniqSupply' from a single one uniqFromSupply :: UniqSupply -> Unique -- ^ Obtain the 'Unique' from this particular 'UniqSupply' uniqsFromSupply :: UniqSupply -> [Unique] -- Infinite -- ^ Obtain an infinite list of 'Unique' that can be generated by constant splitting of the supply takeUniqFromSupply :: UniqSupply -> (Unique, UniqSupply) -- ^ Obtain the 'Unique' from this particular 'UniqSupply', and a new supply splitUniqSupply (MkSplitUniqSupply _ s1 s2) = (s1, s2) listSplitUniqSupply (MkSplitUniqSupply _ s1 s2) = s1 : listSplitUniqSupply s2 uniqFromSupply (MkSplitUniqSupply n _ _) = mkUniqueGrimily n uniqsFromSupply (MkSplitUniqSupply n _ s2) = mkUniqueGrimily n : uniqsFromSupply s2 takeUniqFromSupply (MkSplitUniqSupply n s1 _) = (mkUniqueGrimily n, s1) {-# INLINE splitUniqSupply #-} {- ************************************************************************ * * \subsubsection[UniqSupply-monad]{@UniqSupply@ monad: @UniqSM@} * * ************************************************************************ -} type UniqResult result = (# result, UniqSupply #) pattern UniqResult :: a -> b -> (# a, b #) pattern UniqResult x y = (# x, y #) {-# COMPLETE UniqResult #-} -- | A monad which just gives the ability to obtain 'Unique's newtype UniqSM result = USM { unUSM :: UniqSupply -> UniqResult result } deriving (Functor, Applicative, Monad) via (Strict.State UniqSupply) -- | Smart constructor for 'UniqSM', as described in Note [The one-shot state -- monad trick]. mkUniqSM :: (UniqSupply -> UniqResult a) -> UniqSM a mkUniqSM f = USM (oneShot f) {-# INLINE mkUniqSM #-} -- TODO: try to get rid of this instance instance MonadFail UniqSM where fail = panic -- | Run the 'UniqSM' action, returning the final 'UniqSupply' initUs :: UniqSupply -> UniqSM a -> (a, UniqSupply) initUs init_us m = case unUSM m init_us of { UniqResult r us -> (r, us) } -- | Run the 'UniqSM' action, discarding the final 'UniqSupply' initUs_ :: UniqSupply -> UniqSM a -> a initUs_ init_us m = case unUSM m init_us of { UniqResult r _ -> r } liftUSM :: UniqSM a -> UniqSupply -> (a, UniqSupply) liftUSM (USM m) us0 = case m us0 of UniqResult a us1 -> (a, us1) instance MonadFix UniqSM where mfix m = mkUniqSM (\us0 -> let (r,us1) = liftUSM (m r) us0 in UniqResult r us1) getUs :: UniqSM UniqSupply getUs = mkUniqSM (\us0 -> case splitUniqSupply us0 of (us1,us2) -> UniqResult us1 us2) -- | A monad for generating unique identifiers class Monad m => MonadUnique m where -- | Get a new UniqueSupply getUniqueSupplyM :: m UniqSupply -- | Get a new unique identifier getUniqueM :: m Unique -- | Get an infinite list of new unique identifiers getUniquesM :: m [Unique] -- This default definition of getUniqueM, while correct, is not as -- efficient as it could be since it needlessly generates and throws away -- an extra Unique. For your instances consider providing an explicit -- definition for 'getUniqueM' which uses 'takeUniqFromSupply' directly. getUniqueM = liftM uniqFromSupply getUniqueSupplyM getUniquesM = liftM uniqsFromSupply getUniqueSupplyM instance MonadUnique UniqSM where getUniqueSupplyM = getUs getUniqueM = getUniqueUs getUniquesM = getUniquesUs getUniqueUs :: UniqSM Unique getUniqueUs = mkUniqSM (\us0 -> case takeUniqFromSupply us0 of (u,us1) -> UniqResult u us1) getUniquesUs :: UniqSM [Unique] getUniquesUs = mkUniqSM (\us0 -> case splitUniqSupply us0 of (us1,us2) -> UniqResult (uniqsFromSupply us1) us2) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/Var.hs0000644000000000000000000013061007346545000020077 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 \section{@Vars@: Variables} -} {-# LANGUAGE MultiWayIf, PatternSynonyms #-} {-# OPTIONS_GHC -Wno-incomplete-record-updates #-} {-# LANGUAGE DeriveFunctor #-} -- | -- #name_types# -- GHC uses several kinds of name internally: -- -- * 'GHC.Types.Name.Occurrence.OccName': see "GHC.Types.Name.Occurrence#name_types" -- -- * 'GHC.Types.Name.Reader.RdrName': see "GHC.Types.Name.Reader#name_types" -- -- * 'GHC.Types.Name.Name': see "GHC.Types.Name#name_types" -- -- * 'GHC.Types.Id.Id': see "GHC.Types.Id#name_types" -- -- * 'GHC.Types.Var.Var' is a synonym for the 'GHC.Types.Id.Id' type but it may additionally -- potentially contain type variables, which have a 'GHC.Core.TyCo.Rep.Kind' -- rather than a 'GHC.Core.TyCo.Rep.Type' and only contain some extra -- details during typechecking. -- -- These 'Var' names may either be global or local, see "GHC.Types.Var#globalvslocal" -- -- #globalvslocal# -- Global 'Id's and 'Var's are those that are imported or correspond -- to a data constructor, primitive operation, or record selectors. -- Local 'Id's and 'Var's are those bound within an expression -- (e.g. by a lambda) or at the top level of the module being compiled. module GHC.Types.Var ( -- * The main data type and synonyms Var, CoVar, Id, NcId, DictId, DFunId, EvVar, EqVar, EvId, IpId, JoinId, TyVar, TcTyVar, TypeVar, KindVar, TKVar, TyCoVar, -- * In and Out variants InVar, InCoVar, InId, InTyVar, OutVar, OutCoVar, OutId, OutTyVar, -- ** Taking 'Var's apart varName, varUnique, varType, varMult, varMultMaybe, -- ** Modifying 'Var's setVarName, setVarUnique, setVarType, updateVarType, updateVarTypeM, -- ** Constructing, taking apart, modifying 'Id's mkGlobalVar, mkLocalVar, mkExportedLocalVar, mkCoVar, idInfo, idDetails, lazySetIdInfo, setIdDetails, globaliseId, setIdExported, setIdNotExported, setIdMult, updateIdTypeButNotMult, updateIdTypeAndMult, updateIdTypeAndMultM, -- ** Predicates isId, isTyVar, isTcTyVar, isLocalVar, isLocalId, isLocalId_maybe, isCoVar, isNonCoVarId, isTyCoVar, isGlobalId, isExportedId, mustHaveLocalBinding, -- * ForAllTyFlags ForAllTyFlag(Invisible,Required,Specified,Inferred), Specificity(..), isVisibleForAllTyFlag, isInvisibleForAllTyFlag, isInferredForAllTyFlag, isSpecifiedForAllTyFlag, coreTyLamForAllTyFlag, -- * FunTyFlag FunTyFlag(..), isVisibleFunArg, isInvisibleFunArg, isFUNArg, mkFunTyFlag, visArg, invisArg, visArgTypeLike, visArgConstraintLike, invisArgTypeLike, invisArgConstraintLike, funTyFlagArgTypeOrConstraint, funTyFlagResultTypeOrConstraint, TypeOrConstraint(..), -- Re-export this: it's an argument of FunTyFlag -- * PiTyBinder PiTyBinder(..), PiTyVarBinder, isInvisiblePiTyBinder, isVisiblePiTyBinder, isTyBinder, isNamedPiTyBinder, isAnonPiTyBinder, namedPiTyBinder_maybe, anonPiTyBinderType_maybe, piTyBinderType, -- * TyVar's VarBndr(..), ForAllTyBinder, TyVarBinder, InvisTyBinder, InvisTVBinder, ReqTyBinder, ReqTVBinder, binderVar, binderVars, binderFlag, binderFlags, binderType, mkForAllTyBinder, mkForAllTyBinders, mkTyVarBinder, mkTyVarBinders, isVisibleForAllTyBinder, isInvisibleForAllTyBinder, isTyVarBinder, tyVarSpecToBinder, tyVarSpecToBinders, tyVarReqToBinder, tyVarReqToBinders, mapVarBndr, mapVarBndrs, -- ** ExportFlag ExportFlag(..), -- ** Constructing TyVar's mkTyVar, mkTcTyVar, -- ** Taking 'TyVar's apart tyVarName, tyVarKind, tcTyVarDetails, setTcTyVarDetails, -- ** Modifying 'TyVar's setTyVarName, setTyVarUnique, setTyVarKind, updateTyVarKind, updateTyVarKindM, nonDetCmpVar ) where import GHC.Prelude import {-# SOURCE #-} GHC.Core.TyCo.Rep( Type, Kind, Mult, Scaled, scaledThing ) import {-# SOURCE #-} GHC.Core.TyCo.Ppr( pprKind ) import {-# SOURCE #-} GHC.Tc.Utils.TcType( TcTyVarDetails, pprTcTyVarDetails, vanillaSkolemTvUnk ) import {-# SOURCE #-} GHC.Types.Id.Info( IdDetails, IdInfo, coVarDetails, isCoVarDetails, vanillaIdInfo, pprIdDetails ) import {-# SOURCE #-} GHC.Builtin.Types ( manyDataConTy ) import GHC.Types.Name hiding (varName) import GHC.Types.Unique ( Uniquable, Unique, getKey, getUnique , nonDetCmpUnique ) import GHC.Types.Basic( TypeOrConstraint(..) ) import GHC.Utils.Misc import GHC.Utils.Binary import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Hs.Specificity () import Language.Haskell.Syntax.Specificity import Data.Data {- ************************************************************************ * * Synonyms * * ************************************************************************ -- These synonyms are here and not in Id because otherwise we need a very -- large number of SOURCE imports of "GHC.Types.Id" :-( -} -- | Identifier type Id = Var -- A term-level identifier -- predicate: isId -- | Coercion Variable type CoVar = Id -- See Note [Evidence: EvIds and CoVars] -- predicate: isCoVar -- | type NcId = Id -- A term-level (value) variable that is -- /not/ an (unlifted) coercion -- predicate: isNonCoVarId -- | Type or kind Variable type TyVar = Var -- Type *or* kind variable (historical) -- | Type or Kind Variable type TKVar = Var -- Type *or* kind variable (historical) -- | Type variable that might be a metavariable type TcTyVar = Var -- | Type Variable type TypeVar = Var -- Definitely a type variable -- | Kind Variable type KindVar = Var -- Definitely a kind variable -- See Note [Kind and type variables] -- See Note [Evidence: EvIds and CoVars] -- | Evidence Identifier type EvId = Id -- Term-level evidence: DictId, IpId, or EqVar -- | Evidence Variable type EvVar = EvId -- ...historical name for EvId -- | Dictionary Function Identifier type DFunId = Id -- A dictionary function -- | Dictionary Identifier type DictId = EvId -- A dictionary variable -- | Implicit parameter Identifier type IpId = EvId -- A term-level implicit parameter -- | Equality Variable type EqVar = EvId -- Boxed equality evidence type JoinId = Id -- A join variable -- | Type or Coercion Variable type TyCoVar = Id -- Type, *or* coercion variable -- predicate: isTyCoVar {- Many passes apply a substitution, and it's very handy to have type synonyms to remind us whether or not the substitution has been applied -} type InVar = Var type InTyVar = TyVar type InCoVar = CoVar type InId = Id type OutVar = Var type OutTyVar = TyVar type OutCoVar = CoVar type OutId = Id {- Note [Evidence: EvIds and CoVars] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * An EvId (evidence Id) is a term-level evidence variable (dictionary, implicit parameter, or equality). Could be boxed or unboxed. * DictId, IpId, and EqVar are synonyms when we know what kind of evidence we are talking about. For example, an EqVar has type (t1 ~ t2). * A CoVar is always an un-lifted coercion, of type (t1 ~# t2) or (t1 ~R# t2) Note [Kind and type variables] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Before kind polymorphism, TyVar were used to mean type variables. Now they are used to mean kind *or* type variables. KindVar is used when we know for sure that it is a kind variable. In future, we might want to go over the whole compiler code to use: - TKVar to mean kind or type variables - TypeVar to mean type variables only - KindVar to mean kind variables ************************************************************************ * * \subsection{The main data type declarations} * * ************************************************************************ Every @Var@ has a @Unique@, to uniquify it and for fast comparison, a @Type@, and an @IdInfo@ (non-essential info about it, e.g., strictness). The essential info about different kinds of @Vars@ is in its @VarDetails@. -} -- | Variable -- -- Essentially a typed 'Name', that may also contain some additional information -- about the 'Var' and its use sites. data Var = TyVar { -- Type and kind variables -- see Note [Kind and type variables] varName :: !Name, realUnique :: {-# UNPACK #-} !Unique, -- ^ Key for fast comparison -- Identical to the Unique in the name, -- cached here for speed varType :: Kind -- ^ The type or kind of the 'Var' in question } | TcTyVar { -- Used only during type inference -- Used for kind variables during -- inference, as well varName :: !Name, realUnique :: {-# UNPACK #-} !Unique, varType :: Kind, tc_tv_details :: TcTyVarDetails } | Id { varName :: !Name, realUnique :: {-# UNPACK #-} !Unique, varType :: Type, varMult :: Mult, -- See Note [Multiplicity of let binders] idScope :: IdScope, id_details :: IdDetails, -- Stable, doesn't change id_info :: IdInfo } -- Unstable, updated by simplifier -- | Identifier Scope data IdScope -- See Note [GlobalId/LocalId] = GlobalId | LocalId ExportFlag data ExportFlag -- See Note [ExportFlag on binders] = NotExported -- ^ Not exported: may be discarded as dead code. | Exported -- ^ Exported: kept alive {- Note [ExportFlag on binders] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ An ExportFlag of "Exported" on a top-level binder says "keep this binding alive; do not drop it as dead code". This transitively keeps alive all the other top-level bindings that this binding refers to. This property is persisted all the way down the pipeline, so that the binding will be compiled all the way to object code, and its symbols will appear in the linker symbol table. However, note that this use of "exported" is quite different to the export list on a Haskell module. Setting the ExportFlag on an Id does /not/ mean that if you import the module (in Haskell source code) you will see this Id. Of course, things that appear in the export list of the source Haskell module do indeed have their ExportFlag set. But many other things, such as dictionary functions, are kept alive by having their ExportFlag set, even though they are not exported in the source-code sense. We should probably use a different term for ExportFlag, like KeepAlive. Note [GlobalId/LocalId] ~~~~~~~~~~~~~~~~~~~~~~~ A GlobalId is * always a constant (top-level) * imported, or data constructor, or primop, or record selector * has a Unique that is globally unique across the whole GHC invocation (a single invocation may compile multiple modules) * never treated as a candidate by the free-variable finder; it's a constant! A LocalId is * bound within an expression (lambda, case, local let(rec)) * or defined at top level in the module being compiled * always treated as a candidate by the free-variable finder In the output of CoreTidy, top level Ids are all GlobalIds, which are then serialised into interface files. Do note however that CorePrep may introduce new LocalIds for local floats (even at the top level). These will be visible in STG and end up in generated code. Note [Multiplicity of let binders] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In Core, let-binders' multiplicity is always completely determined by syntax: a recursive let will always have multiplicity Many (it's a prerequisite for being recursive), and non-recursive let doesn't have a conventional multiplicity, instead they act, for the purpose of multiplicity, as an alias for their right-hand side. Therefore, the `varMult` field of identifier is only used by binders in lambda and case expressions. In a let expression the `varMult` field holds an arbitrary value which will (and must!) be ignored. -} instance Outputable Var where ppr var = docWithStyle ppr_code ppr_normal where -- don't display debug info with Code style (#25255) ppr_code = ppr (varName var) ppr_normal sty = sdocOption sdocSuppressVarKinds $ \supp_var_kinds -> getPprDebug $ \debug -> let ppr_var = case var of (TyVar {}) | debug -> brackets (text "tv") (TcTyVar {tc_tv_details = d}) | dumpStyle sty || debug -> brackets (pprTcTyVarDetails d) (Id { idScope = s, id_details = d }) | debug -> brackets (ppr_id_scope s <> pprIdDetails d) _ -> empty in if | debug && (not supp_var_kinds) -> parens (ppr (varName var) <+> ppr (varMultMaybe var) <+> ppr_var <+> dcolon <+> pprKind (tyVarKind var)) | otherwise -> ppr (varName var) <> ppr_var ppr_id_scope :: IdScope -> SDoc ppr_id_scope GlobalId = text "gid" ppr_id_scope (LocalId Exported) = text "lidx" ppr_id_scope (LocalId NotExported) = text "lid" instance NamedThing Var where getName = varName instance Uniquable Var where getUnique = varUnique instance Eq Var where a == b = realUnique a == realUnique b instance Ord Var where a <= b = getKey (realUnique a) <= getKey (realUnique b) a < b = getKey (realUnique a) < getKey (realUnique b) a >= b = getKey (realUnique a) >= getKey (realUnique b) a > b = getKey (realUnique a) > getKey (realUnique b) a `compare` b = a `nonDetCmpVar` b -- | Compare Vars by their Uniques. -- This is what Ord Var does, provided here to make it explicit at the -- call-site that it can introduce non-determinism. -- See Note [Unique Determinism] nonDetCmpVar :: Var -> Var -> Ordering nonDetCmpVar a b = varUnique a `nonDetCmpUnique` varUnique b instance Data Var where -- don't traverse? toConstr _ = abstractConstr "Var" gunfold _ _ = error "gunfold" dataTypeOf _ = mkNoRepType "Var" instance HasOccName Var where occName = nameOccName . varName varUnique :: Var -> Unique varUnique var = realUnique var varMultMaybe :: Id -> Maybe Mult varMultMaybe (Id { varMult = mult }) = Just mult varMultMaybe _ = Nothing setVarUnique :: Var -> Unique -> Var setVarUnique var uniq = var { realUnique = uniq, varName = setNameUnique (varName var) uniq } setVarName :: Var -> Name -> Var setVarName var new_name = var { realUnique = getUnique new_name, varName = new_name } setVarType :: Var -> Type -> Var setVarType id ty = id { varType = ty } -- | Update a 'Var's type. Does not update the /multiplicity/ -- stored in an 'Id', if any. Because of the possibility for -- abuse, ASSERTs that there is no multiplicity to update. updateVarType :: (Type -> Type) -> Var -> Var updateVarType upd var = case var of Id { id_details = details } -> assert (isCoVarDetails details) $ result _ -> result where result = var { varType = upd (varType var) } -- | Update a 'Var's type monadically. Does not update the /multiplicity/ -- stored in an 'Id', if any. Because of the possibility for -- abuse, ASSERTs that there is no multiplicity to update. updateVarTypeM :: Monad m => (Type -> m Type) -> Var -> m Var updateVarTypeM upd var = case var of Id { id_details = details } -> assert (isCoVarDetails details) $ result _ -> result where result = do { ty' <- upd (varType var) ; return (var { varType = ty' }) } {- ********************************************************************* * * * FunTyFlag * * ********************************************************************* -} -- | The non-dependent version of 'ForAllTyFlag'. -- See Note [FunTyFlag] -- Appears here partly so that it's together with its friends ForAllTyFlag -- and ForallVisFlag, but also because it is used in IfaceType, rather -- early in the compilation chain data FunTyFlag = FTF_T_T -- (->) Type -> Type | FTF_T_C -- (-=>) Type -> Constraint | FTF_C_T -- (=>) Constraint -> Type | FTF_C_C -- (==>) Constraint -> Constraint deriving (Eq, Ord, Data) instance Outputable FunTyFlag where ppr FTF_T_T = text "[->]" ppr FTF_T_C = text "[-=>]" ppr FTF_C_T = text "[=>]" ppr FTF_C_C = text "[==>]" instance Binary FunTyFlag where put_ bh FTF_T_T = putByte bh 0 put_ bh FTF_T_C = putByte bh 1 put_ bh FTF_C_T = putByte bh 2 put_ bh FTF_C_C = putByte bh 3 get bh = do h <- getByte bh case h of 0 -> return FTF_T_T 1 -> return FTF_T_C 2 -> return FTF_C_T _ -> return FTF_C_C mkFunTyFlag :: TypeOrConstraint -> TypeOrConstraint -> FunTyFlag mkFunTyFlag TypeLike torc = visArg torc mkFunTyFlag ConstraintLike torc = invisArg torc visArg :: TypeOrConstraint -> FunTyFlag visArg TypeLike = FTF_T_T visArg ConstraintLike = FTF_T_C visArgTypeLike :: FunTyFlag visArgTypeLike = FTF_T_T visArgConstraintLike :: FunTyFlag visArgConstraintLike = FTF_T_C invisArg :: TypeOrConstraint -> FunTyFlag invisArg TypeLike = FTF_C_T invisArg ConstraintLike = FTF_C_C invisArgTypeLike :: FunTyFlag invisArgTypeLike = FTF_C_T invisArgConstraintLike :: FunTyFlag invisArgConstraintLike = FTF_C_C isInvisibleFunArg :: FunTyFlag -> Bool isInvisibleFunArg af = not (isVisibleFunArg af) isVisibleFunArg :: FunTyFlag -> Bool isVisibleFunArg FTF_T_T = True isVisibleFunArg FTF_T_C = True isVisibleFunArg _ = False isFUNArg :: FunTyFlag -> Bool -- This one, FUN, or (->), has an extra multiplicity argument isFUNArg FTF_T_T = True isFUNArg _ = False funTyFlagArgTypeOrConstraint :: FunTyFlag -> TypeOrConstraint -- Whether it /takes/ a type or a constraint funTyFlagArgTypeOrConstraint FTF_T_T = TypeLike funTyFlagArgTypeOrConstraint FTF_T_C = TypeLike funTyFlagArgTypeOrConstraint _ = ConstraintLike funTyFlagResultTypeOrConstraint :: FunTyFlag -> TypeOrConstraint -- Whether it /returns/ a type or a constraint funTyFlagResultTypeOrConstraint FTF_T_T = TypeLike funTyFlagResultTypeOrConstraint FTF_C_T = TypeLike funTyFlagResultTypeOrConstraint _ = ConstraintLike {- Note [FunTyFlag] ~~~~~~~~~~~~~~~~~~~~~ FunTyFlag is used principally in the FunTy constructor of Type. FunTy FTF_T_T t1 t2 means t1 -> t2 FunTy FTF_C_T t1 t2 means t1 => t2 FunTy FTF_T_C t1 t2 means t1 -=> t2 FunTy FTF_C_C t1 t2 means t1 ==> t2 However, the FunTyFlag in a FunTy is just redundant, cached information. In (FunTy { ft_af = af, ft_arg = t1, ft_res = t2 }) --------------------------------------------- (isPredTy t1) (isPredTy ty) FunTyFlag --------------------------------------------- False False FTF_T_T False True FTF_T_C True False FTF_C_T True True FTF_C_C where isPredTy is defined in GHC.Core.Type, and sees if t1's kind is Constraint. See GHC.Core.Type.chooseFunTyFlag, and GHC.Core.TyCo.Rep Note [Types for coercions, predicates, and evidence] The term (Lam b e) donesn't carry an FunTyFlag; instead it uses mkFunctionType when we want to get its types; see mkLamType. This is just an engineering choice; we could cache here too if we wanted. Why bother with all this? After all, we are in Core, where (=>) and (->) behave the same. We maintain this distinction throughout Core so that we can cheaply and conveniently determine * How to print a type * How to split up a type: tcSplitSigmaTy * How to specialise it (over type classes; GHC.Core.Opt.Specialise) For the specialisation point, consider (\ (d :: Ord a). blah). We want to give it type (Ord a => blah_ty) with a fat arrow; that is, using mkInvisFunTy, not mkVisFunTy. Why? Because the /specialiser/ treats dictionary arguments specially. Suppose we do w/w on 'foo', thus (#11272, #6056) foo :: Ord a => Int -> blah foo a d x = case x of I# x' -> $wfoo @a d x' $wfoo :: Ord a => Int# -> blah Now, at a call we see (foo @Int dOrdInt). The specialiser will specialise this to $sfoo, where $sfoo :: Int -> blah $sfoo x = case x of I# x' -> $wfoo @Int dOrdInt x' Now we /must/ also specialise $wfoo! But it wasn't user-written, and has a type built with mkLamTypes. Conclusion: the easiest thing is to make mkLamType build (c => ty) when the argument is a predicate type. See GHC.Core.TyCo.Rep Note [Types for coercions, predicates, and evidence] -} {- ********************************************************************* * * * VarBndr, ForAllTyBinder * * ********************************************************************* -} {- Note [The VarBndr type and its uses] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ VarBndr is polymorphic in both var and visibility fields. Currently there are nine different uses of 'VarBndr': * Var.ForAllTyBinder = VarBndr TyCoVar ForAllTyFlag Binder of a forall-type; see ForAllTy in GHC.Core.TyCo.Rep * Var.TyVarBinder = VarBndr TyVar ForAllTyFlag Subset of ForAllTyBinder when we are sure the binder is a TyVar * Var.InvisTVBinder = VarBndr TyVar Specificity Specialised form of TyVarBinder, when ForAllTyFlag = Invisible s See GHC.Core.Type.splitForAllInvisTVBinders * Var.ReqTVBinder = VarBndr TyVar () Specialised form of TyVarBinder, when ForAllTyFlag = Required See GHC.Core.Type.splitForAllReqTVBinders This one is barely used * TyCon.TyConBinder = VarBndr TyVar TyConBndrVis Binders of a TyCon; see TyCon in GHC.Core.TyCon * IfaceType.IfaceForAllBndr = VarBndr IfaceBndr ForAllTyFlag * IfaceType.IfaceForAllSpecBndr = VarBndr IfaceBndr Specificity * IfaceType.IfaceTyConBinder = VarBndr IfaceBndr TyConBndrVis -} data VarBndr var argf = Bndr var argf -- See Note [The VarBndr type and its uses] deriving( Data, Eq, Ord) -- | Variable Binder -- -- A 'ForAllTyBinder' is the binder of a ForAllTy -- It's convenient to define this synonym here rather its natural -- home in "GHC.Core.TyCo.Rep", because it's used in GHC.Core.DataCon.hs-boot -- -- A 'TyVarBinder' is a binder with only TyVar type ForAllTyBinder = VarBndr TyCoVar ForAllTyFlag type InvisTyBinder = VarBndr TyCoVar Specificity type ReqTyBinder = VarBndr TyCoVar () type TyVarBinder = VarBndr TyVar ForAllTyFlag type InvisTVBinder = VarBndr TyVar Specificity type ReqTVBinder = VarBndr TyVar () tyVarSpecToBinders :: [VarBndr a Specificity] -> [VarBndr a ForAllTyFlag] tyVarSpecToBinders = map tyVarSpecToBinder tyVarSpecToBinder :: VarBndr a Specificity -> VarBndr a ForAllTyFlag tyVarSpecToBinder (Bndr tv vis) = Bndr tv (Invisible vis) tyVarReqToBinders :: [VarBndr a ()] -> [VarBndr a ForAllTyFlag] tyVarReqToBinders = map tyVarReqToBinder tyVarReqToBinder :: VarBndr a () -> VarBndr a ForAllTyFlag tyVarReqToBinder (Bndr tv _) = Bndr tv Required isVisibleForAllTyBinder :: ForAllTyBinder -> Bool isVisibleForAllTyBinder (Bndr _ vis) = isVisibleForAllTyFlag vis isInvisibleForAllTyBinder :: ForAllTyBinder -> Bool isInvisibleForAllTyBinder (Bndr _ vis) = isInvisibleForAllTyFlag vis binderVar :: VarBndr tv argf -> tv binderVar (Bndr v _) = v binderVars :: [VarBndr tv argf] -> [tv] binderVars tvbs = map binderVar tvbs binderFlag :: VarBndr tv argf -> argf binderFlag (Bndr _ argf) = argf binderFlags :: [VarBndr tv argf] -> [argf] binderFlags tvbs = map binderFlag tvbs binderType :: VarBndr TyCoVar argf -> Type binderType (Bndr tv _) = varType tv isTyVarBinder :: VarBndr TyCoVar vis -> Bool isTyVarBinder (Bndr tcv _) = isTyVar tcv -- | Make a named binder mkForAllTyBinder :: vis -> TyCoVar -> VarBndr TyCoVar vis mkForAllTyBinder vis var = Bndr var vis -- | Make a named binder -- 'var' should be a type variable mkTyVarBinder :: vis -> TyVar -> VarBndr TyVar vis mkTyVarBinder vis var = assert (isTyVar var) $ Bndr var vis -- | Make many named binders mkForAllTyBinders :: vis -> [TyCoVar] -> [VarBndr TyCoVar vis] mkForAllTyBinders vis = map (mkForAllTyBinder vis) -- | Make many named binders -- Input vars should be type variables mkTyVarBinders :: vis -> [TyVar] -> [VarBndr TyVar vis] mkTyVarBinders vis = map (mkTyVarBinder vis) mapVarBndr :: (var -> var') -> (VarBndr var flag) -> (VarBndr var' flag) mapVarBndr f (Bndr v fl) = Bndr (f v) fl mapVarBndrs :: (var -> var') -> [VarBndr var flag] -> [VarBndr var' flag] mapVarBndrs f = map (mapVarBndr f) instance Outputable tv => Outputable (VarBndr tv ForAllTyFlag) where ppr (Bndr v Required) = ppr v ppr (Bndr v Specified) = char '@' <> ppr v ppr (Bndr v Inferred) = braces (ppr v) instance Outputable tv => Outputable (VarBndr tv Specificity) where ppr = ppr . tyVarSpecToBinder instance (Binary tv, Binary vis) => Binary (VarBndr tv vis) where put_ bh (Bndr tv vis) = do { put_ bh tv; put_ bh vis } get bh = do { tv <- get bh; vis <- get bh; return (Bndr tv vis) } instance NamedThing tv => NamedThing (VarBndr tv flag) where getName (Bndr tv _) = getName tv {- ********************************************************************** * * PiTyBinder * * ********************************************************************** -} -- | A 'PiTyBinder' represents an argument to a function. PiTyBinders can be -- dependent ('Named') or nondependent ('Anon'). They may also be visible or -- not. See Note [PiTyBinders] data PiTyBinder = Named ForAllTyBinder -- A type-lambda binder, with a ForAllTyFlag | Anon (Scaled Type) FunTyFlag -- A term-lambda binder. Type here can be CoercionTy. -- The arrow is described by the FunTyFlag deriving Data instance Outputable PiTyBinder where ppr (Anon ty af) = ppr af <+> ppr ty ppr (Named (Bndr v Required)) = ppr v ppr (Named (Bndr v Specified)) = char '@' <> ppr v ppr (Named (Bndr v Inferred)) = braces (ppr v) -- | 'PiTyVarBinder' is like 'PiTyBinder', but there can only be 'TyVar' -- in the 'Named' field. type PiTyVarBinder = PiTyBinder -- | Does this binder bind an invisible argument? isInvisiblePiTyBinder :: PiTyBinder -> Bool isInvisiblePiTyBinder (Named (Bndr _ vis)) = isInvisibleForAllTyFlag vis isInvisiblePiTyBinder (Anon _ af) = isInvisibleFunArg af -- | Does this binder bind a visible argument? isVisiblePiTyBinder :: PiTyBinder -> Bool isVisiblePiTyBinder = not . isInvisiblePiTyBinder isNamedPiTyBinder :: PiTyBinder -> Bool isNamedPiTyBinder (Named {}) = True isNamedPiTyBinder (Anon {}) = False namedPiTyBinder_maybe :: PiTyBinder -> Maybe TyCoVar namedPiTyBinder_maybe (Named tv) = Just $ binderVar tv namedPiTyBinder_maybe _ = Nothing -- | Does this binder bind a variable that is /not/ erased? Returns -- 'True' for anonymous binders. isAnonPiTyBinder :: PiTyBinder -> Bool isAnonPiTyBinder (Named {}) = False isAnonPiTyBinder (Anon {}) = True -- | Extract a relevant type, if there is one. anonPiTyBinderType_maybe :: PiTyBinder -> Maybe Type anonPiTyBinderType_maybe (Named {}) = Nothing anonPiTyBinderType_maybe (Anon ty _) = Just (scaledThing ty) -- | If its a named binder, is the binder a tyvar? -- Returns True for nondependent binder. -- This check that we're really returning a *Ty*Binder (as opposed to a -- coercion binder). That way, if/when we allow coercion quantification -- in more places, we'll know we missed updating some function. isTyBinder :: PiTyBinder -> Bool isTyBinder (Named bnd) = isTyVarBinder bnd isTyBinder _ = True piTyBinderType :: PiTyBinder -> Type piTyBinderType (Named (Bndr tv _)) = varType tv piTyBinderType (Anon ty _) = scaledThing ty {- Note [PiTyBinders] ~~~~~~~~~~~~~~~~~~~ But a type like forall a. Maybe a -> forall b. (a,b) -> b can be decomposed to a telescope of type [PiTyBinder], using splitPiTys. That function splits off all leading foralls and arrows, giving ([Named a, Anon (Maybe a), Named b, Anon (a,b)], b) A PiTyBinder represents the type of binders -- that is, the type of an argument to a Pi-type. GHC Core currently supports two different Pi-types: * Anon ty1 fun_flag: a non-dependent function type, written with ->, e.g. ty1 -> ty2 represented as FunTy ty1 ty2. These are lifted to Coercions with the corresponding FunCo. * Named (Var tv forall_flag) A dependent compile-time-only polytype, written with forall, e.g. forall (a:*). ty represented as ForAllTy (Bndr a v) ty Both forms of Pi-types classify terms/types that take an argument. In other words, if `x` is either a function or a polytype, `x arg` makes sense (for an appropriate `arg`). Wrinkles * The Anon constructor of PiTyBinder contains a FunTyFlag. Since the PiTyBinder really only describes the /argument/ it should perhaps only have a TypeOrConstraint rather than a full FunTyFlag. But it's very convenient to have the full FunTyFlag, say in mkPiTys, so that's what we do. Note [VarBndrs, ForAllTyBinders, TyConBinders, and visibility] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * A ForAllTy (used for both types and kinds) contains a ForAllTyBinder. Each ForAllTyBinder Bndr a tvis is equipped with tvis::ForAllTyFlag, which says whether or not arguments for this binder should be visible (explicit) in source Haskell. * A TyCon contains a list of TyConBinders. Each TyConBinder Bndr a cvis is equipped with cvis::TyConBndrVis, which says whether or not type and kind arguments for this TyCon should be visible (explicit) in source Haskell. This table summarises the visibility rules: --------------------------------------------------------------------------------------- | Occurrences look like this | GHC displays type as in Haskell source code |-------------------------------------------------------------------------------------- | Bndr a tvis :: ForAllTyBinder, in the binder of ForAllTy for a term | tvis :: ForAllTyFlag | tvis = Inferred: f :: forall {a}. type Arg not allowed: f f :: forall {co}. type Arg not allowed: f | tvis = Specified: f :: forall a. type Arg optional: f or f @Int | tvis = Required: f :: forall k -> type Arg required: f (type Int) | | Bndr k cvis :: TyConBinder, in the TyConBinders of a TyCon | cvis :: TyConBndrVis | cvis = AnonTCB: T :: kind -> kind Required: T * | cvis = NamedTCB Inferred: T :: forall {k}. kind Arg not allowed: T | T :: forall {co}. kind Arg not allowed: T | cvis = NamedTCB Specified: T :: forall k. kind Arg not allowed[1]: T | cvis = NamedTCB Required: T :: forall k -> kind Required: T * --------------------------------------------------------------------------------------- [1] In types, in the Specified case, it would make sense to allow optional kind applications, thus (T @*), but we have not yet implemented that ---- In term declarations ---- * Inferred. Function defn, with no signature: f1 x = x We infer f1 :: forall {a}. a -> a, with 'a' Inferred It's Inferred because it doesn't appear in any user-written signature for f1 * Specified. Function defn, with signature (implicit forall): f2 :: a -> a; f2 x = x So f2 gets the type f2 :: forall a. a -> a, with 'a' Specified even though 'a' is not bound in the source code by an explicit forall * Specified. Function defn, with signature (explicit forall): f3 :: forall a. a -> a; f3 x = x So f3 gets the type f3 :: forall a. a -> a, with 'a' Specified * Required. Function defn, with signature (explicit forall): f4 :: forall a -> a -> a; f4 (type _) x = x So f4 gets the type f4 :: forall a -> a -> a, with 'a' Required This is the experimental RequiredTypeArguments extension, see GHC Proposal #281 "Visible forall in types of terms" * Inferred. Function defn, with signature (explicit forall), marked as inferred: f5 :: forall {a}. a -> a; f5 x = x So f5 gets the type f5 :: forall {a}. a -> a, with 'a' Inferred It's Inferred because the user marked it as such, even though it does appear in the user-written signature for f5 * Inferred/Specified. Function signature with inferred kind polymorphism. f6 :: a b -> Int So 'f6' gets the type f6 :: forall {k} (a :: k -> Type) (b :: k). a b -> Int Here 'k' is Inferred (it's not mentioned in the type), but 'a' and 'b' are Specified. * Specified. Function signature with explicit kind polymorphism f7 :: a (b :: k) -> Int This time 'k' is Specified, because it is mentioned explicitly, so we get f7 :: forall (k :: Type) (a :: k -> Type) (b :: k). a b -> Int * Similarly pattern synonyms: Inferred - from inferred types (e.g. no pattern type signature) - or from inferred kind polymorphism ---- In type declarations ---- * Inferred (k) data T1 a b = MkT1 (a b) Here T1's kind is T1 :: forall {k:*}. (k->*) -> k -> * The kind variable 'k' is Inferred, since it is not mentioned Note that 'a' and 'b' correspond to /Anon/ PiTyBinders in T1's kind, and Anon binders don't have a visibility flag. (Or you could think of Anon having an implicit Required flag.) * Specified (k) data T2 (a::k->*) b = MkT (a b) Here T's kind is T :: forall (k:*). (k->*) -> k -> * The kind variable 'k' is Specified, since it is mentioned in the signature. * Required (k) data T k (a::k->*) b = MkT (a b) Here T's kind is T :: forall k:* -> (k->*) -> k -> * The kind is Required, since it bound in a positional way in T's declaration Every use of T must be explicitly applied to a kind * Inferred (k1), Specified (k) data T a b (c :: k) = MkT (a b) (Proxy c) Here T's kind is T :: forall {k1:*} (k:*). (k1->*) -> k1 -> k -> * So 'k' is Specified, because it appears explicitly, but 'k1' is Inferred, because it does not Generally, in the list of TyConBinders for a TyCon, * Inferred arguments always come first * Specified, Anon and Required can be mixed e.g. data Foo (a :: Type) :: forall b. (a -> b -> Type) -> Type where ... Here Foo's TyConBinders are [Required 'a', Specified 'b', Anon] and its kind prints as Foo :: forall a -> forall b. (a -> b -> Type) -> Type See also Note [Required, Specified, and Inferred for types] in GHC.Tc.TyCl ---- Printing ----- We print forall types with enough syntax to tell you their visibility flag. But this is not source Haskell, and these types may not all be parsable. Specified: a list of Specified binders is written between `forall` and `.`: const :: forall a b. a -> b -> a Inferred: like Specified, but every binder is written in braces: f :: forall {k} (a :: k). S k a -> Int Required: binders are put between `forall` and `->`: T :: forall k -> * ---- Other points ----- * In classic Haskell, all named binders (that is, the type variables in a polymorphic function type f :: forall a. a -> a) have been Inferred. * Inferred variables correspond to "generalized" variables from the Visible Type Applications paper (ESOP'16). -} {- ************************************************************************ * * * Type and kind variables * * * ************************************************************************ -} tyVarName :: TyVar -> Name tyVarName = varName tyVarKind :: TyVar -> Kind tyVarKind = varType setTyVarUnique :: TyVar -> Unique -> TyVar setTyVarUnique = setVarUnique setTyVarName :: TyVar -> Name -> TyVar setTyVarName = setVarName setTyVarKind :: TyVar -> Kind -> TyVar setTyVarKind tv k = tv {varType = k} updateTyVarKind :: (Kind -> Kind) -> TyVar -> TyVar updateTyVarKind update tv = tv {varType = update (tyVarKind tv)} updateTyVarKindM :: (Monad m) => (Kind -> m Kind) -> TyVar -> m TyVar updateTyVarKindM update tv = do { k' <- update (tyVarKind tv) ; return $ tv {varType = k'} } mkTyVar :: Name -> Kind -> TyVar mkTyVar name kind = TyVar { varName = name , realUnique = nameUnique name , varType = kind } mkTcTyVar :: Name -> Kind -> TcTyVarDetails -> TyVar mkTcTyVar name kind details = -- NB: 'kind' may be a coercion kind; cf, 'GHC.Tc.Utils.TcMType.newMetaCoVar' TcTyVar { varName = name, realUnique = nameUnique name, varType = kind, tc_tv_details = details } tcTyVarDetails :: TyVar -> TcTyVarDetails -- See Note [TcTyVars and TyVars in the typechecker] in GHC.Tc.Utils.TcType tcTyVarDetails (TcTyVar { tc_tv_details = details }) = details -- MP: This should never happen, but it does. Future work is to turn this into a panic. tcTyVarDetails (TyVar {}) = vanillaSkolemTvUnk tcTyVarDetails var = pprPanic "tcTyVarDetails" (ppr var <+> dcolon <+> pprKind (tyVarKind var)) setTcTyVarDetails :: TyVar -> TcTyVarDetails -> TyVar setTcTyVarDetails tv details = tv { tc_tv_details = details } {- %************************************************************************ %* * \subsection{Ids} * * ************************************************************************ -} idInfo :: HasDebugCallStack => Id -> IdInfo idInfo (Id { id_info = info }) = info idInfo other = pprPanic "idInfo" (ppr other) idDetails :: Id -> IdDetails idDetails (Id { id_details = details }) = details idDetails other = pprPanic "idDetails" (ppr other) -- The next three have a 'Var' suffix even though they always build -- Ids, because "GHC.Types.Id" uses 'mkGlobalId' etc with different types mkGlobalVar :: IdDetails -> Name -> Type -> IdInfo -> Id mkGlobalVar details name ty info = mk_id name manyDataConTy ty GlobalId details info -- There is no support for linear global variables yet. They would require -- being checked at link-time, which can be useful, but is not a priority. mkLocalVar :: IdDetails -> Name -> Mult -> Type -> IdInfo -> Id mkLocalVar details name w ty info = mk_id name w ty (LocalId NotExported) details info mkCoVar :: Name -> Type -> CoVar -- Coercion variables have no IdInfo mkCoVar name ty = mk_id name manyDataConTy ty (LocalId NotExported) coVarDetails vanillaIdInfo -- | Exported 'Var's will not be removed as dead code mkExportedLocalVar :: IdDetails -> Name -> Type -> IdInfo -> Id mkExportedLocalVar details name ty info = mk_id name manyDataConTy ty (LocalId Exported) details info -- There is no support for exporting linear variables. See also [mkGlobalVar] mk_id :: Name -> Mult -> Type -> IdScope -> IdDetails -> IdInfo -> Id mk_id name !w ty scope details info = Id { varName = name, realUnique = nameUnique name, varMult = w, varType = ty, idScope = scope, id_details = details, id_info = info } ------------------- lazySetIdInfo :: Id -> IdInfo -> Var lazySetIdInfo id info = id { id_info = info } setIdDetails :: Id -> IdDetails -> Id setIdDetails id details = id { id_details = details } globaliseId :: Id -> Id -- ^ If it's a local, make it global globaliseId id = id { idScope = GlobalId } setIdExported :: Id -> Id -- ^ Exports the given local 'Id'. Can also be called on global 'Id's, such as data constructors -- and class operations, which are born as global 'Id's and automatically exported setIdExported id@(Id { idScope = LocalId {} }) = id { idScope = LocalId Exported } setIdExported id@(Id { idScope = GlobalId }) = id setIdExported tv = pprPanic "setIdExported" (ppr tv) setIdNotExported :: Id -> Id -- ^ We can only do this to LocalIds setIdNotExported id = assert (isLocalId id) $ id { idScope = LocalId NotExported } ----------------------- updateIdTypeButNotMult :: (Type -> Type) -> Id -> Id updateIdTypeButNotMult f id = id { varType = f (varType id) } updateIdTypeAndMult :: (Type -> Type) -> Id -> Id updateIdTypeAndMult f id@(Id { varType = ty , varMult = mult }) = id { varType = ty' , varMult = mult' } where !ty' = f ty !mult' = f mult updateIdTypeAndMult _ other = pprPanic "updateIdTypeAndMult" (ppr other) updateIdTypeAndMultM :: Monad m => (Type -> m Type) -> Id -> m Id updateIdTypeAndMultM f id@(Id { varType = ty , varMult = mult }) = do { !ty' <- f ty ; !mult' <- f mult ; return (id { varType = ty', varMult = mult' }) } updateIdTypeAndMultM _ other = pprPanic "updateIdTypeAndMultM" (ppr other) setIdMult :: Id -> Mult -> Id setIdMult id !r | isId id = id { varMult = r } | otherwise = pprPanic "setIdMult" (ppr id <+> ppr r) {- ************************************************************************ * * \subsection{Predicates over variables} * * ************************************************************************ -} -- | Is this a type-level (i.e., computationally irrelevant, thus erasable) -- variable? Satisfies @isTyVar = not . isId@. isTyVar :: Var -> Bool -- True of both TyVar and TcTyVar isTyVar (TyVar {}) = True isTyVar (TcTyVar {}) = True isTyVar _ = False isTcTyVar :: Var -> Bool -- True of TcTyVar only isTcTyVar (TcTyVar {}) = True isTcTyVar _ = False isTyCoVar :: Var -> Bool isTyCoVar v = isTyVar v || isCoVar v -- | Is this a value-level (i.e., computationally relevant) 'Id'entifier? -- Satisfies @isId = not . isTyVar@. isId :: Var -> Bool isId (Id {}) = True isId _ = False -- | Is this a coercion variable? -- Satisfies @'isId' v ==> 'isCoVar' v == not ('isNonCoVarId' v)@. isCoVar :: Var -> Bool isCoVar (Id { id_details = details }) = isCoVarDetails details isCoVar _ = False -- | Is this a term variable ('Id') that is /not/ a coercion variable? -- Satisfies @'isId' v ==> 'isCoVar' v == not ('isNonCoVarId' v)@. isNonCoVarId :: Var -> Bool isNonCoVarId (Id { id_details = details }) = not (isCoVarDetails details) isNonCoVarId _ = False isLocalId :: Var -> Bool isLocalId (Id { idScope = LocalId _ }) = True isLocalId _ = False isLocalId_maybe :: Var -> Maybe ExportFlag isLocalId_maybe (Id { idScope = LocalId ef }) = Just ef isLocalId_maybe _ = Nothing -- | 'isLocalVar' returns @True@ for type variables as well as local 'Id's -- These are the variables that we need to pay attention to when finding free -- variables, or doing dependency analysis. isLocalVar :: Var -> Bool isLocalVar v = not (isGlobalId v) isGlobalId :: Var -> Bool isGlobalId (Id { idScope = GlobalId }) = True isGlobalId _ = False -- | 'mustHaveLocalBinding' returns @True@ of 'Id's and 'TyVar's -- that must have a binding in this module. The converse -- is not quite right: there are some global 'Id's that must have -- bindings, such as record selectors. But that doesn't matter, -- because it's only used for assertions mustHaveLocalBinding :: Var -> Bool mustHaveLocalBinding var = isLocalVar var -- | 'isExportedIdVar' means \"don't throw this away\" isExportedId :: Var -> Bool isExportedId (Id { idScope = GlobalId }) = True isExportedId (Id { idScope = LocalId Exported}) = True isExportedId _ = False ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/Var.hs-boot0000644000000000000000000000052407346545000021040 0ustar0000000000000000{-# LANGUAGE NoPolyKinds #-} module GHC.Types.Var where import {-# SOURCE #-} GHC.Types.Name import Language.Haskell.Syntax.Specificity (Specificity) data FunTyFlag data Var instance NamedThing Var data VarBndr var argf type TyVar = Var type Id = Var type TyCoVar = Id type TcTyVar = Var type InvisTVBinder = VarBndr TyVar Specificity ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/Var/0000755000000000000000000000000007346545000017542 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/Var/Env.hs0000644000000000000000000006322207346545000020633 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 -} module GHC.Types.Var.Env ( -- * Var, Id and TyVar environments (maps) VarEnv, IdEnv, TyVarEnv, CoVarEnv, TyCoVarEnv, -- ** Manipulating these environments emptyVarEnv, unitVarEnv, mkVarEnv, mkVarEnv_Directly, elemVarEnv, disjointVarEnv, anyVarEnv, extendVarEnv, extendVarEnv_C, extendVarEnv_Acc, extendVarEnvList, plusVarEnv, plusVarEnv_C, plusVarEnv_CD, plusMaybeVarEnv_C, plusVarEnvList, alterVarEnv, delVarEnvList, delVarEnv, minusVarEnv, lookupVarEnv, lookupVarEnv_NF, lookupWithDefaultVarEnv, lookupVarEnv_Directly, mapVarEnv, zipVarEnv, modifyVarEnv, modifyVarEnv_Directly, isEmptyVarEnv, elemVarEnvByKey, filterVarEnv, restrictVarEnv, partitionVarEnv, varEnvDomain, nonDetStrictFoldVarEnv_Directly, -- * Deterministic Var environments (maps) DVarEnv, DIdEnv, DTyVarEnv, -- ** Manipulating these environments emptyDVarEnv, mkDVarEnv, dVarEnvElts, extendDVarEnv, extendDVarEnv_C, extendDVarEnvList, lookupDVarEnv, elemDVarEnv, isEmptyDVarEnv, foldDVarEnv, nonDetStrictFoldDVarEnv, mapDVarEnv, filterDVarEnv, modifyDVarEnv, alterDVarEnv, plusDVarEnv, plusDVarEnv_C, unitDVarEnv, delDVarEnv, delDVarEnvList, minusDVarEnv, partitionDVarEnv, anyDVarEnv, -- * The InScopeSet type InScopeSet(..), -- ** Operations on InScopeSets emptyInScopeSet, mkInScopeSet, mkInScopeSetList, delInScopeSet, extendInScopeSet, extendInScopeSetList, extendInScopeSetSet, getInScopeVars, lookupInScope, lookupInScope_Directly, unionInScope, elemInScopeSet, uniqAway, varSetInScope, unsafeGetFreshLocalUnique, -- * The RnEnv2 type RnEnv2, -- ** Operations on RnEnv2s mkRnEnv2, rnBndr2, rnBndrs2, rnBndr2_var, rnOccL, rnOccR, inRnEnvL, inRnEnvR, anyInRnEnvR, rnOccL_maybe, rnOccR_maybe, rnBndrL, rnBndrR, nukeRnEnvL, nukeRnEnvR, rnSwap, delBndrL, delBndrR, delBndrsL, delBndrsR, extendRnInScopeSetList, rnEtaL, rnEtaR, rnInScope, rnInScopeSet, lookupRnInScope, rnEnvL, rnEnvR, -- * TidyEnv and its operation TidyEnv, emptyTidyEnv, mkEmptyTidyEnv, delTidyEnvList ) where import GHC.Prelude import qualified GHC.Data.Word64Map.Strict as Word64Map -- TODO: Move this to UniqFM import GHC.Types.Name.Occurrence import GHC.Types.Name import GHC.Types.Var as Var import GHC.Types.Var.Set import GHC.Data.Graph.UnVar -- UnVarSet import GHC.Types.Unique.Set import GHC.Types.Unique.FM import GHC.Types.Unique.DFM import GHC.Types.Unique import GHC.Utils.Misc import GHC.Utils.Panic import GHC.Data.Maybe import GHC.Utils.Outputable {- ************************************************************************ * * In-scope sets * * ************************************************************************ -} -- | A set of variables that are in scope at some point. -- -- Note that this is a /superset/ of the variables that are currently in scope. -- See Note [The InScopeSet invariant]. -- -- "Secrets of the Glasgow Haskell Compiler inliner" Section 3.2 provides -- the motivation for this abstraction. newtype InScopeSet = InScope VarSet -- Note [Lookups in in-scope set] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- We store a VarSet here, but we use this for lookups rather than just -- membership tests. Typically the InScopeSet contains the canonical -- version of the variable (e.g. with an informative unfolding), so this -- lookup is useful (see, for instance, Note [In-scope set as a -- substitution]). -- Note [The InScopeSet invariant] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- The InScopeSet must include every in-scope variable, but it may also -- include other variables. -- Its principal purpose is to provide a set of variables to be avoided -- when creating a fresh identifier (fresh in the sense that it does not -- "shadow" any in-scope binding). To do this we simply have to find one that -- does not appear in the InScopeSet. This is done by the key function -- GHC.Types.Var.Env.uniqAway. -- See "Secrets of the Glasgow Haskell Compiler inliner" Section 3.2 -- for more detailed motivation. #20419 has further discussion. instance Outputable InScopeSet where ppr (InScope s) = text "InScope" <+> braces (fsep (map (ppr . Var.varName) (nonDetEltsUniqSet s))) -- It's OK to use nonDetEltsUniqSet here because it's -- only for pretty printing -- In-scope sets get big, and with -dppr-debug -- the output is overwhelming emptyInScopeSet :: InScopeSet emptyInScopeSet = InScope emptyVarSet getInScopeVars :: InScopeSet -> VarSet getInScopeVars (InScope vs) = vs mkInScopeSet :: VarSet -> InScopeSet mkInScopeSet in_scope = InScope in_scope mkInScopeSetList :: [Var] -> InScopeSet mkInScopeSetList vs = InScope (mkVarSet vs) extendInScopeSet :: InScopeSet -> Var -> InScopeSet extendInScopeSet (InScope in_scope) v = InScope (extendVarSet in_scope v) extendInScopeSetList :: InScopeSet -> [Var] -> InScopeSet extendInScopeSetList (InScope in_scope) vs = InScope $ foldl' extendVarSet in_scope vs extendInScopeSetSet :: InScopeSet -> VarSet -> InScopeSet extendInScopeSetSet (InScope in_scope) vs = InScope (in_scope `unionVarSet` vs) delInScopeSet :: InScopeSet -> Var -> InScopeSet delInScopeSet (InScope in_scope) v = InScope (in_scope `delVarSet` v) elemInScopeSet :: Var -> InScopeSet -> Bool elemInScopeSet v (InScope in_scope) = v `elemVarSet` in_scope -- | Look up a variable the 'InScopeSet'. This lets you map from -- the variable's identity (unique) to its full value. lookupInScope :: InScopeSet -> Var -> Maybe Var lookupInScope (InScope in_scope) v = lookupVarSet in_scope v lookupInScope_Directly :: InScopeSet -> Unique -> Maybe Var lookupInScope_Directly (InScope in_scope) uniq = lookupVarSet_Directly in_scope uniq unionInScope :: InScopeSet -> InScopeSet -> InScopeSet unionInScope (InScope s1) (InScope s2) = InScope (s1 `unionVarSet` s2) varSetInScope :: VarSet -> InScopeSet -> Bool varSetInScope vars (InScope s1) = vars `subVarSet` s1 {- Note [Local uniques] ~~~~~~~~~~~~~~~~~~~~ Sometimes one must create conjure up a unique which is unique in a particular context (but not necessarily globally unique). For instance, one might need to create a fresh local identifier which does not shadow any of the locally in-scope variables. For this we purpose we provide 'uniqAway'. 'uniqAway' is implemented in terms of the 'unsafeGetFreshLocalUnique' operation, which generates an unclaimed 'Unique' from an 'InScopeSet'. To ensure that we do not conflict with uniques allocated by future allocations from 'UniqSupply's, Uniques generated by 'unsafeGetFreshLocalUnique' are allocated into a dedicated region of the unique space (namely the X tag). Note that one must be quite carefully when using uniques generated in this way since they are only locally unique. In particular, two successive calls to 'uniqAway' on the same 'InScopeSet' will produce the same unique. -} -- | @uniqAway in_scope v@ finds a unique that is not used in the -- in-scope set, and gives that to v. See Note [Local uniques] and -- Note [The InScopeSet invariant]. uniqAway :: InScopeSet -> Var -> Var -- It starts with v's current unique, of course, in the hope that it won't -- have to change, and thereafter uses the successor to the last derived unique -- found in the in-scope set. uniqAway in_scope var | var `elemInScopeSet` in_scope = uniqAway' in_scope var -- Make a new one | otherwise = var -- Nothing to do uniqAway' :: InScopeSet -> Var -> Var -- This one *always* makes up a new variable uniqAway' in_scope var = setVarUnique var (unsafeGetFreshLocalUnique in_scope) -- | @unsafeGetFreshUnique in_scope@ finds a unique that is not in-scope in the -- given 'InScopeSet'. This must be used very carefully since one can very easily -- introduce non-unique 'Unique's this way. See Note [Local uniques]. unsafeGetFreshLocalUnique :: InScopeSet -> Unique unsafeGetFreshLocalUnique (InScope set) | Just (uniq,_) <- Word64Map.lookupLT (getKey maxLocalUnique) (ufmToIntMap $ getUniqSet set) , let uniq' = mkLocalUnique uniq , not $ uniq' `ltUnique` minLocalUnique = incrUnique uniq' | otherwise = minLocalUnique {- ************************************************************************ * * Dual renaming * * ************************************************************************ -} -- | Rename Environment 2 -- -- When we are comparing (or matching) types or terms, we are faced with -- \"going under\" corresponding binders. E.g. when comparing: -- -- > \x. e1 ~ \y. e2 -- -- Basically we want to rename [@x@ -> @y@] or [@y@ -> @x@], but there are lots of -- things we must be careful of. In particular, @x@ might be free in @e2@, or -- y in @e1@. So the idea is that we come up with a fresh binder that is free -- in neither, and rename @x@ and @y@ respectively. That means we must maintain: -- -- 1. A renaming for the left-hand expression -- -- 2. A renaming for the right-hand expressions -- -- 3. An in-scope set -- -- Furthermore, when matching, we want to be able to have an 'occurs check', -- to prevent: -- -- > \x. f ~ \y. y -- -- matching with [@f@ -> @y@]. So for each expression we want to know that set of -- locally-bound variables. That is precisely the domain of the mappings 1. -- and 2., but we must ensure that we always extend the mappings as we go in. -- -- All of this information is bundled up in the 'RnEnv2' data RnEnv2 = RV2 { envL :: VarEnv Var -- Renaming for Left term , envR :: VarEnv Var -- Renaming for Right term , in_scope :: InScopeSet } -- In scope in left or right terms -- The renamings envL and envR are *guaranteed* to contain a binding -- for every variable bound as we go into the term, even if it is not -- renamed. That way we can ask what variables are locally bound -- (inRnEnvL, inRnEnvR) mkRnEnv2 :: InScopeSet -> RnEnv2 mkRnEnv2 vars = RV2 { envL = emptyVarEnv , envR = emptyVarEnv , in_scope = vars } extendRnInScopeSetList :: RnEnv2 -> [Var] -> RnEnv2 extendRnInScopeSetList env vs | null vs = env | otherwise = env { in_scope = extendInScopeSetList (in_scope env) vs } rnInScope :: Var -> RnEnv2 -> Bool rnInScope x env = x `elemInScopeSet` in_scope env rnInScopeSet :: RnEnv2 -> InScopeSet rnInScopeSet = in_scope -- | Retrieve the left mapping rnEnvL :: RnEnv2 -> VarEnv Var rnEnvL = envL -- | Retrieve the right mapping rnEnvR :: RnEnv2 -> VarEnv Var rnEnvR = envR rnBndrs2 :: RnEnv2 -> [Var] -> [Var] -> RnEnv2 -- ^ Applies 'rnBndr2' to several variables: the two variable lists must be of equal length rnBndrs2 env bsL bsR = foldl2 rnBndr2 env bsL bsR rnBndr2 :: RnEnv2 -> Var -> Var -> RnEnv2 -- ^ @rnBndr2 env bL bR@ goes under a binder @bL@ in the Left term, -- and binder @bR@ in the Right term. -- It finds a new binder, @new_b@, -- and returns an environment mapping @bL -> new_b@ and @bR -> new_b@ rnBndr2 env bL bR = fst $ rnBndr2_var env bL bR rnBndr2_var :: RnEnv2 -> Var -> Var -> (RnEnv2, Var) -- ^ Similar to 'rnBndr2' but returns the new variable as well as the -- new environment. -- Postcondition: the type of the returned Var is that of bR rnBndr2_var (RV2 { envL = envL, envR = envR, in_scope = in_scope }) bL bR = (RV2 { envL = extendVarEnv envL bL new_b -- See Note , envR = extendVarEnv envR bR new_b -- [Rebinding] , in_scope = extendInScopeSet in_scope new_b }, new_b) where -- Find a new binder not in scope in either term -- To avoid calling `uniqAway`, we try bL's Unique -- But we always return a Var whose type is that of bR new_b | not (bR `elemInScopeSet` in_scope) = bR | not (bL `elemInScopeSet` in_scope) = bR `setVarUnique` varUnique bL | otherwise = uniqAway' in_scope bR -- Note [Rebinding] -- ~~~~~~~~~~~~~~~~ -- If the new var is the same as the old one, note that -- the extendVarEnv *replaces* any current renaming -- E.g. (\x. \x. ...) ~ (\y. \z. ...) -- -- envL envR in_scope -- Inside \x \y { [x->y], [y->y], {y} } -- \x \z { [x->z], [y->y, z->z], {y,z} } -- The envL binding [x->y] is replaced by [x->z] rnBndrL :: RnEnv2 -> Var -> (RnEnv2, Var) -- ^ Similar to 'rnBndr2' but used when there's a binder on the left -- side only. rnBndrL (RV2 { envL = envL, envR = envR, in_scope = in_scope }) bL = (RV2 { envL = extendVarEnv envL bL new_b , envR = envR , in_scope = extendInScopeSet in_scope new_b }, new_b) where new_b = uniqAway in_scope bL rnBndrR :: RnEnv2 -> Var -> (RnEnv2, Var) -- ^ Similar to 'rnBndr2' but used when there's a binder on the right -- side only. rnBndrR (RV2 { envL = envL, envR = envR, in_scope = in_scope }) bR = (RV2 { envR = extendVarEnv envR bR new_b , envL = envL , in_scope = extendInScopeSet in_scope new_b }, new_b) where new_b = uniqAway in_scope bR rnEtaL :: RnEnv2 -> Var -> (RnEnv2, Var) -- ^ Similar to 'rnBndrL' but used for eta expansion -- See Note [Eta expansion] rnEtaL (RV2 { envL = envL, envR = envR, in_scope = in_scope }) bL = (RV2 { envL = extendVarEnv envL bL new_b , envR = extendVarEnv envR new_b new_b -- Note [Eta expansion] , in_scope = extendInScopeSet in_scope new_b }, new_b) where new_b = uniqAway in_scope bL rnEtaR :: RnEnv2 -> Var -> (RnEnv2, Var) -- ^ Similar to 'rnBndr2' but used for eta expansion -- See Note [Eta expansion] rnEtaR (RV2 { envL = envL, envR = envR, in_scope = in_scope }) bR = (RV2 { envL = extendVarEnv envL new_b new_b -- Note [Eta expansion] , envR = extendVarEnv envR bR new_b , in_scope = extendInScopeSet in_scope new_b }, new_b) where new_b = uniqAway in_scope bR delBndrL, delBndrR :: RnEnv2 -> Var -> RnEnv2 delBndrL rn@(RV2 { envL = env, in_scope = in_scope }) v = rn { envL = env `delVarEnv` v, in_scope = in_scope `extendInScopeSet` v } delBndrR rn@(RV2 { envR = env, in_scope = in_scope }) v = rn { envR = env `delVarEnv` v, in_scope = in_scope `extendInScopeSet` v } delBndrsL, delBndrsR :: RnEnv2 -> [Var] -> RnEnv2 delBndrsL rn@(RV2 { envL = env, in_scope = in_scope }) v = rn { envL = env `delVarEnvList` v, in_scope = in_scope `extendInScopeSetList` v } delBndrsR rn@(RV2 { envR = env, in_scope = in_scope }) v = rn { envR = env `delVarEnvList` v, in_scope = in_scope `extendInScopeSetList` v } rnOccL, rnOccR :: RnEnv2 -> Var -> Var -- ^ Look up the renaming of an occurrence in the left or right term rnOccL (RV2 { envL = env }) v = lookupVarEnv env v `orElse` v rnOccR (RV2 { envR = env }) v = lookupVarEnv env v `orElse` v rnOccL_maybe, rnOccR_maybe :: RnEnv2 -> Var -> Maybe Var -- ^ Look up the renaming of an occurrence in the left or right term rnOccL_maybe (RV2 { envL = env }) v = lookupVarEnv env v rnOccR_maybe (RV2 { envR = env }) v = lookupVarEnv env v inRnEnvL, inRnEnvR :: RnEnv2 -> Var -> Bool -- ^ Tells whether a variable is locally bound inRnEnvL (RV2 { envL = env }) v = v `elemVarEnv` env inRnEnvR (RV2 { envR = env }) v = v `elemVarEnv` env -- | `anyInRnEnvR env set` == `any (inRnEnvR rn_env) (toList set)` -- but lazy in the second argument if the right side of the env is empty. anyInRnEnvR :: RnEnv2 -> VarSet -> Bool anyInRnEnvR (RV2 { envR = env }) vs -- Avoid allocating the predicate if we deal with an empty env. | isEmptyVarEnv env = False | otherwise = anyVarSet (`elemVarEnv` env) vs lookupRnInScope :: RnEnv2 -> Var -> Var lookupRnInScope env v = lookupInScope (in_scope env) v `orElse` v nukeRnEnvL, nukeRnEnvR :: RnEnv2 -> RnEnv2 -- ^ Wipe the left or right side renaming nukeRnEnvL env = env { envL = emptyVarEnv } nukeRnEnvR env = env { envR = emptyVarEnv } rnSwap :: RnEnv2 -> RnEnv2 -- ^ swap the meaning of left and right rnSwap (RV2 { envL = envL, envR = envR, in_scope = in_scope }) = RV2 { envL = envR, envR = envL, in_scope = in_scope } {- Note [Eta expansion] ~~~~~~~~~~~~~~~~~~~~ When matching (\x.M) ~ N we rename x to x' with, where x' is not in scope in either term. Then we want to behave as if we'd seen (\x'.M) ~ (\x'.N x') Since x' isn't in scope in N, the form (\x'. N x') doesn't capture any variables in N. But we must nevertheless extend the envR with a binding [x' -> x'], to support the occurs check. For example, if we don't do this, we can get silly matches like forall a. (\y.a) ~ v succeeding with [a -> v y], which is bogus of course. ************************************************************************ * * Tidying * * ************************************************************************ -} -- | Tidy Environment -- -- When tidying up print names, we keep a mapping of in-scope occ-names -- (the 'TidyOccEnv') and a Var-to-Var of the current renamings type TidyEnv = (TidyOccEnv, VarEnv Var) emptyTidyEnv :: TidyEnv emptyTidyEnv = (emptyTidyOccEnv, emptyVarEnv) mkEmptyTidyEnv :: TidyOccEnv -> TidyEnv mkEmptyTidyEnv occ_env = (occ_env, emptyVarEnv) delTidyEnvList :: TidyEnv -> [Var] -> TidyEnv delTidyEnvList (occ_env, var_env) vs = (occ_env', var_env') where occ_env' = occ_env `delTidyOccEnvList` map getOccName vs var_env' = var_env `delVarEnvList` vs {- ************************************************************************ * * VarEnv * * ************************************************************************ -} -- We would like this to be `UniqFM Var elt` -- but the code uses various key types. -- So for now make it explicitly untyped -- | Variable Environment type VarEnv elt = UniqFM Var elt -- | Identifier Environment type IdEnv elt = UniqFM Id elt -- | Type Variable Environment type TyVarEnv elt = UniqFM Var elt -- | Type or Coercion Variable Environment type TyCoVarEnv elt = UniqFM TyCoVar elt -- | Coercion Variable Environment type CoVarEnv elt = UniqFM CoVar elt emptyVarEnv :: VarEnv a mkVarEnv :: [(Var, a)] -> VarEnv a mkVarEnv_Directly :: [(Unique, a)] -> VarEnv a zipVarEnv :: [Var] -> [a] -> VarEnv a unitVarEnv :: Var -> a -> VarEnv a alterVarEnv :: (Maybe a -> Maybe a) -> VarEnv a -> Var -> VarEnv a extendVarEnv :: VarEnv a -> Var -> a -> VarEnv a extendVarEnv_C :: (a->a->a) -> VarEnv a -> Var -> a -> VarEnv a extendVarEnv_Acc :: (a->b->b) -> (a->b) -> VarEnv b -> Var -> a -> VarEnv b plusVarEnv :: VarEnv a -> VarEnv a -> VarEnv a plusVarEnvList :: [VarEnv a] -> VarEnv a extendVarEnvList :: VarEnv a -> [(Var, a)] -> VarEnv a varEnvDomain :: VarEnv elt -> UnVarSet partitionVarEnv :: (a -> Bool) -> VarEnv a -> (VarEnv a, VarEnv a) -- | Only keep variables contained in the VarSet restrictVarEnv :: VarEnv a -> VarSet -> VarEnv a delVarEnvList :: VarEnv a -> [Var] -> VarEnv a delVarEnv :: VarEnv a -> Var -> VarEnv a minusVarEnv :: VarEnv a -> VarEnv b -> VarEnv a plusVarEnv_C :: (a -> a -> a) -> VarEnv a -> VarEnv a -> VarEnv a plusVarEnv_CD :: (a -> a -> a) -> VarEnv a -> a -> VarEnv a -> a -> VarEnv a plusMaybeVarEnv_C :: (a -> a -> Maybe a) -> VarEnv a -> VarEnv a -> VarEnv a mapVarEnv :: (a -> b) -> VarEnv a -> VarEnv b modifyVarEnv :: (a -> a) -> VarEnv a -> Var -> VarEnv a isEmptyVarEnv :: VarEnv a -> Bool lookupVarEnv :: VarEnv a -> Var -> Maybe a lookupVarEnv_Directly :: VarEnv a -> Unique -> Maybe a filterVarEnv :: (a -> Bool) -> VarEnv a -> VarEnv a anyVarEnv :: (elt -> Bool) -> UniqFM key elt -> Bool lookupVarEnv_NF :: VarEnv a -> Var -> a lookupWithDefaultVarEnv :: VarEnv a -> a -> Var -> a elemVarEnv :: Var -> VarEnv a -> Bool elemVarEnvByKey :: Unique -> VarEnv a -> Bool disjointVarEnv :: VarEnv a -> VarEnv a -> Bool nonDetStrictFoldVarEnv_Directly :: (Unique -> a -> r -> r) -> r -> VarEnv a -> r elemVarEnv = elemUFM elemVarEnvByKey = elemUFM_Directly disjointVarEnv = disjointUFM alterVarEnv = alterUFM extendVarEnv = addToUFM extendVarEnv_C = addToUFM_C extendVarEnv_Acc = addToUFM_Acc extendVarEnvList = addListToUFM plusVarEnv_C = plusUFM_C plusVarEnv_CD = plusUFM_CD plusMaybeVarEnv_C = plusMaybeUFM_C delVarEnvList = delListFromUFM delVarEnv = delFromUFM minusVarEnv = minusUFM plusVarEnv = plusUFM plusVarEnvList = plusUFMList -- lookupVarEnv is very hot (in part due to being called by substTyVar), -- if it's not inlined than the mere allocation of the Just constructor causes -- perf benchmarks to regress by 2% in some cases. See #21159, !7638 and containers#821 -- for some more explanation about what exactly went wrong. {-# INLINE lookupVarEnv #-} lookupVarEnv = lookupUFM lookupVarEnv_Directly = lookupUFM_Directly filterVarEnv = filterUFM anyVarEnv = anyUFM lookupWithDefaultVarEnv = lookupWithDefaultUFM mapVarEnv = mapUFM mkVarEnv = listToUFM mkVarEnv_Directly= listToUFM_Directly emptyVarEnv = emptyUFM unitVarEnv = unitUFM isEmptyVarEnv = isNullUFM partitionVarEnv = partitionUFM varEnvDomain = domUFMUnVarSet nonDetStrictFoldVarEnv_Directly = nonDetStrictFoldUFM_Directly restrictVarEnv env vs = filterUFM_Directly keep env where keep u _ = u `elemVarSetByKey` vs zipVarEnv tyvars tys = mkVarEnv (zipEqual "zipVarEnv" tyvars tys) lookupVarEnv_NF env id = case lookupVarEnv env id of Just xx -> xx Nothing -> panic "lookupVarEnv_NF: Nothing" {- @modifyVarEnv@: Look up a thing in the VarEnv, then mash it with the modify function, and put it back. -} modifyVarEnv mangle_fn env key = case (lookupVarEnv env key) of Nothing -> env Just xx -> extendVarEnv env key (mangle_fn xx) modifyVarEnv_Directly :: (a -> a) -> UniqFM key a -> Unique -> UniqFM key a modifyVarEnv_Directly mangle_fn env key = case (lookupUFM_Directly env key) of Nothing -> env Just xx -> addToUFM_Directly env key (mangle_fn xx) {- ************************************************************************ * * Deterministic VarEnv (DVarEnv) * * ************************************************************************ -} -- See Note [Deterministic UniqFM] in GHC.Types.Unique.DFM for explanation why we need -- DVarEnv. -- | Deterministic Variable Environment type DVarEnv elt = UniqDFM Var elt -- | Deterministic Identifier Environment -- Sadly not always indexed by Id, but it is in the common case. type DIdEnv elt = UniqDFM Var elt -- | Deterministic Type Variable Environment type DTyVarEnv elt = UniqDFM TyVar elt emptyDVarEnv :: DVarEnv a emptyDVarEnv = emptyUDFM dVarEnvElts :: DVarEnv a -> [a] dVarEnvElts = eltsUDFM mkDVarEnv :: [(Var, a)] -> DVarEnv a mkDVarEnv = listToUDFM extendDVarEnv :: DVarEnv a -> Var -> a -> DVarEnv a extendDVarEnv = addToUDFM minusDVarEnv :: DVarEnv a -> DVarEnv a' -> DVarEnv a minusDVarEnv = minusUDFM lookupDVarEnv :: DVarEnv a -> Var -> Maybe a lookupDVarEnv = lookupUDFM foldDVarEnv :: (a -> b -> b) -> b -> DVarEnv a -> b foldDVarEnv = foldUDFM -- See Note [Deterministic UniqFM] to learn about nondeterminism. -- If you use this please provide a justification why it doesn't introduce -- nondeterminism. nonDetStrictFoldDVarEnv :: (a -> b -> b) -> b -> DVarEnv a -> b nonDetStrictFoldDVarEnv = nonDetStrictFoldUDFM mapDVarEnv :: (a -> b) -> DVarEnv a -> DVarEnv b mapDVarEnv = mapUDFM filterDVarEnv :: (a -> Bool) -> DVarEnv a -> DVarEnv a filterDVarEnv = filterUDFM alterDVarEnv :: (Maybe a -> Maybe a) -> DVarEnv a -> Var -> DVarEnv a alterDVarEnv = alterUDFM plusDVarEnv :: DVarEnv a -> DVarEnv a -> DVarEnv a plusDVarEnv = plusUDFM plusDVarEnv_C :: (a -> a -> a) -> DVarEnv a -> DVarEnv a -> DVarEnv a plusDVarEnv_C = plusUDFM_C unitDVarEnv :: Var -> a -> DVarEnv a unitDVarEnv = unitUDFM delDVarEnv :: DVarEnv a -> Var -> DVarEnv a delDVarEnv = delFromUDFM delDVarEnvList :: DVarEnv a -> [Var] -> DVarEnv a delDVarEnvList = delListFromUDFM isEmptyDVarEnv :: DVarEnv a -> Bool isEmptyDVarEnv = isNullUDFM elemDVarEnv :: Var -> DVarEnv a -> Bool elemDVarEnv = elemUDFM extendDVarEnv_C :: (a -> a -> a) -> DVarEnv a -> Var -> a -> DVarEnv a extendDVarEnv_C = addToUDFM_C modifyDVarEnv :: (a -> a) -> DVarEnv a -> Var -> DVarEnv a modifyDVarEnv mangle_fn env key = case (lookupDVarEnv env key) of Nothing -> env Just xx -> extendDVarEnv env key (mangle_fn xx) partitionDVarEnv :: (a -> Bool) -> DVarEnv a -> (DVarEnv a, DVarEnv a) partitionDVarEnv = partitionUDFM extendDVarEnvList :: DVarEnv a -> [(Var, a)] -> DVarEnv a extendDVarEnvList = addListToUDFM anyDVarEnv :: (a -> Bool) -> DVarEnv a -> Bool anyDVarEnv = anyUDFM ghc-lib-parser-9.12.2.20250421/compiler/GHC/Types/Var/Set.hs0000644000000000000000000003125007346545000020632 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 -} module GHC.Types.Var.Set ( -- * Var, Id and TyVar set types VarSet, IdSet, TyVarSet, CoVarSet, TyCoVarSet, -- ** Manipulating these sets emptyVarSet, unitVarSet, mkVarSet, extendVarSet, extendVarSetList, elemVarSet, subVarSet, unionVarSet, unionVarSets, mapUnionVarSet, intersectVarSet, intersectsVarSet, disjointVarSet, isEmptyVarSet, delVarSet, delVarSetList, delVarSetByKey, minusVarSet, filterVarSet, mapVarSet, anyVarSet, allVarSet, transCloVarSet, fixVarSet, lookupVarSet_Directly, lookupVarSet, lookupVarSetByName, sizeVarSet, seqVarSet, elemVarSetByKey, partitionVarSet, pluralVarSet, pprVarSet, nonDetStrictFoldVarSet, -- * Deterministic Var set types DVarSet, DIdSet, DTyVarSet, DCoVarSet, DTyCoVarSet, -- ** Manipulating these sets emptyDVarSet, unitDVarSet, mkDVarSet, extendDVarSet, extendDVarSetList, elemDVarSet, dVarSetElems, subDVarSet, unionDVarSet, unionDVarSets, mapUnionDVarSet, intersectDVarSet, dVarSetIntersectVarSet, intersectsDVarSet, disjointDVarSet, isEmptyDVarSet, delDVarSet, delDVarSetList, minusDVarSet, nonDetStrictFoldDVarSet, filterDVarSet, mapDVarSet, strictFoldDVarSet, dVarSetMinusVarSet, anyDVarSet, allDVarSet, transCloDVarSet, sizeDVarSet, seqDVarSet, partitionDVarSet, dVarSetToVarSet, ) where import GHC.Prelude import GHC.Types.Var ( Var, TyVar, CoVar, TyCoVar, Id ) import GHC.Types.Unique import GHC.Types.Name ( Name ) import GHC.Types.Unique.Set import GHC.Types.Unique.DSet import GHC.Types.Unique.FM( disjointUFM, pluralUFM, pprUFM ) import GHC.Types.Unique.DFM( disjointUDFM, udfmToUfm, anyUDFM, allUDFM ) import GHC.Utils.Outputable (SDoc) -- | A non-deterministic Variable Set -- -- A non-deterministic set of variables. -- See Note [Deterministic UniqFM] in "GHC.Types.Unique.DFM" for explanation why it's not -- deterministic and why it matters. Use DVarSet if the set eventually -- gets converted into a list or folded over in a way where the order -- changes the generated code, for example when abstracting variables. type VarSet = UniqSet Var -- | Identifier Set type IdSet = UniqSet Id -- | Type Variable Set type TyVarSet = UniqSet TyVar -- | Coercion Variable Set type CoVarSet = UniqSet CoVar -- | Type or Coercion Variable Set type TyCoVarSet = UniqSet TyCoVar emptyVarSet :: VarSet intersectVarSet :: VarSet -> VarSet -> VarSet unionVarSet :: VarSet -> VarSet -> VarSet unionVarSets :: [VarSet] -> VarSet mapUnionVarSet :: (a -> VarSet) -> [a] -> VarSet -- ^ map the function over the list, and union the results unitVarSet :: Var -> VarSet extendVarSet :: VarSet -> Var -> VarSet extendVarSetList:: VarSet -> [Var] -> VarSet elemVarSet :: Var -> VarSet -> Bool delVarSet :: VarSet -> Var -> VarSet delVarSetList :: VarSet -> [Var] -> VarSet minusVarSet :: VarSet -> VarSet -> VarSet isEmptyVarSet :: VarSet -> Bool mkVarSet :: [Var] -> VarSet lookupVarSet_Directly :: VarSet -> Unique -> Maybe Var lookupVarSet :: VarSet -> Var -> Maybe Var -- Returns the set element, which may be -- (==) to the argument, but not the same as lookupVarSetByName :: VarSet -> Name -> Maybe Var sizeVarSet :: VarSet -> Int filterVarSet :: (Var -> Bool) -> VarSet -> VarSet delVarSetByKey :: VarSet -> Unique -> VarSet elemVarSetByKey :: Unique -> VarSet -> Bool partitionVarSet :: (Var -> Bool) -> VarSet -> (VarSet, VarSet) emptyVarSet = emptyUniqSet unitVarSet = unitUniqSet extendVarSet = addOneToUniqSet extendVarSetList= addListToUniqSet intersectVarSet = intersectUniqSets intersectsVarSet:: VarSet -> VarSet -> Bool -- True if non-empty intersection disjointVarSet :: VarSet -> VarSet -> Bool -- True if empty intersection subVarSet :: VarSet -> VarSet -> Bool -- True if first arg is subset of second -- (s1 `intersectsVarSet` s2) doesn't compute s2 if s1 is empty; -- ditto disjointVarSet, subVarSet unionVarSet = unionUniqSets unionVarSets = unionManyUniqSets elemVarSet = elementOfUniqSet minusVarSet = minusUniqSet delVarSet = delOneFromUniqSet delVarSetList = delListFromUniqSet isEmptyVarSet = isEmptyUniqSet mkVarSet = mkUniqSet lookupVarSet_Directly = lookupUniqSet_Directly lookupVarSet = lookupUniqSet lookupVarSetByName set name = lookupUniqSet_Directly set (getUnique name) sizeVarSet = sizeUniqSet filterVarSet = filterUniqSet delVarSetByKey = delOneFromUniqSet_Directly elemVarSetByKey = elemUniqSet_Directly partitionVarSet = partitionUniqSet mapUnionVarSet get_set xs = foldr (unionVarSet . get_set) emptyVarSet xs -- See comments with type signatures intersectsVarSet s1 s2 = not (s1 `disjointVarSet` s2) disjointVarSet s1 s2 = disjointUFM (getUniqSet s1) (getUniqSet s2) subVarSet s1 s2 = isEmptyVarSet (s1 `minusVarSet` s2) anyVarSet :: (Var -> Bool) -> VarSet -> Bool anyVarSet = uniqSetAny allVarSet :: (Var -> Bool) -> VarSet -> Bool allVarSet = uniqSetAll mapVarSet :: Uniquable b => (a -> b) -> UniqSet a -> UniqSet b mapVarSet = mapUniqSet -- See Note [Deterministic UniqFM] to learn about nondeterminism. -- If you use this please provide a justification why it doesn't introduce -- nondeterminism. nonDetStrictFoldVarSet :: (Var -> a -> a) -> a -> VarSet -> a nonDetStrictFoldVarSet = nonDetStrictFoldUniqSet fixVarSet :: (VarSet -> VarSet) -- Map the current set to a new set -> VarSet -> VarSet -- (fixVarSet f s) repeatedly applies f to the set s, -- until it reaches a fixed point. fixVarSet fn vars | new_vars `subVarSet` vars = vars | otherwise = fixVarSet fn new_vars where new_vars = fn vars transCloVarSet :: (VarSet -> VarSet) -- Map some variables in the set to -- extra variables that should be in it -> VarSet -> VarSet -- (transCloVarSet f s) repeatedly applies f to new candidates, adding any -- new variables to s that it finds thereby, until it reaches a fixed point. -- -- The function fn could be (Var -> VarSet), but we use (VarSet -> VarSet) -- for efficiency, so that the test can be batched up. -- It's essential that fn will work fine if given new candidates -- one at a time; ie fn {v1,v2} = fn v1 `union` fn v2 -- Use fixVarSet if the function needs to see the whole set all at once transCloVarSet fn seeds = go seeds seeds where go :: VarSet -- Accumulating result -> VarSet -- Work-list; un-processed subset of accumulating result -> VarSet -- Specification: go acc vs = acc `union` transClo fn vs go acc candidates | isEmptyVarSet new_vs = acc | otherwise = go (acc `unionVarSet` new_vs) new_vs where new_vs = fn candidates `minusVarSet` acc seqVarSet :: VarSet -> () seqVarSet s = s `seq` () -- | Determines the pluralisation suffix appropriate for the length of a set -- in the same way that plural from Outputable does for lists. pluralVarSet :: VarSet -> SDoc pluralVarSet = pluralUFM . getUniqSet -- | Pretty-print a non-deterministic set. -- The order of variables is non-deterministic and for pretty-printing that -- shouldn't be a problem. -- Having this function helps contain the non-determinism created with -- nonDetEltsUFM. -- Passing a list to the pretty-printing function allows the caller -- to decide on the order of Vars (eg. toposort them) without them having -- to use nonDetEltsUFM at the call site. This prevents from let-binding -- non-deterministically ordered lists and reusing them where determinism -- matters. pprVarSet :: VarSet -- ^ The things to be pretty printed -> ([Var] -> SDoc) -- ^ The pretty printing function to use on the -- elements -> SDoc -- ^ 'SDoc' where the things have been pretty -- printed pprVarSet = pprUFM . getUniqSet -- Deterministic VarSet -- See Note [Deterministic UniqFM] in GHC.Types.Unique.DFM for explanation why we need -- DVarSet. -- | Deterministic Variable Set type DVarSet = UniqDSet Var -- | Deterministic Identifier Set type DIdSet = UniqDSet Id -- | Deterministic Type Variable Set type DTyVarSet = UniqDSet TyVar -- | Deterministic Coercion Variable Set type DCoVarSet = UniqDSet CoVar -- | Deterministic Type or Coercion Variable Set type DTyCoVarSet = UniqDSet TyCoVar emptyDVarSet :: DVarSet emptyDVarSet = emptyUniqDSet unitDVarSet :: Var -> DVarSet unitDVarSet = unitUniqDSet mkDVarSet :: [Var] -> DVarSet mkDVarSet = mkUniqDSet -- The new element always goes to the right of existing ones. extendDVarSet :: DVarSet -> Var -> DVarSet extendDVarSet = addOneToUniqDSet elemDVarSet :: Var -> DVarSet -> Bool elemDVarSet = elementOfUniqDSet dVarSetElems :: DVarSet -> [Var] dVarSetElems = uniqDSetToList subDVarSet :: DVarSet -> DVarSet -> Bool subDVarSet s1 s2 = isEmptyDVarSet (s1 `minusDVarSet` s2) unionDVarSet :: DVarSet -> DVarSet -> DVarSet unionDVarSet = unionUniqDSets unionDVarSets :: [DVarSet] -> DVarSet unionDVarSets = unionManyUniqDSets -- | Map the function over the list, and union the results mapUnionDVarSet :: (a -> DVarSet) -> [a] -> DVarSet mapUnionDVarSet get_set xs = foldr (unionDVarSet . get_set) emptyDVarSet xs intersectDVarSet :: DVarSet -> DVarSet -> DVarSet intersectDVarSet = intersectUniqDSets dVarSetIntersectVarSet :: DVarSet -> VarSet -> DVarSet dVarSetIntersectVarSet = uniqDSetIntersectUniqSet -- | True if empty intersection disjointDVarSet :: DVarSet -> DVarSet -> Bool disjointDVarSet s1 s2 = disjointUDFM (getUniqDSet s1) (getUniqDSet s2) -- | True if non-empty intersection intersectsDVarSet :: DVarSet -> DVarSet -> Bool intersectsDVarSet s1 s2 = not (s1 `disjointDVarSet` s2) isEmptyDVarSet :: DVarSet -> Bool isEmptyDVarSet = isEmptyUniqDSet delDVarSet :: DVarSet -> Var -> DVarSet delDVarSet = delOneFromUniqDSet minusDVarSet :: DVarSet -> DVarSet -> DVarSet minusDVarSet = minusUniqDSet dVarSetMinusVarSet :: DVarSet -> VarSet -> DVarSet dVarSetMinusVarSet = uniqDSetMinusUniqSet -- See Note [Deterministic UniqFM] to learn about nondeterminism. -- If you use this please provide a justification why it doesn't introduce -- nondeterminism. nonDetStrictFoldDVarSet :: (Var -> a -> a) -> a -> DVarSet -> a nonDetStrictFoldDVarSet = nonDetStrictFoldUniqDSet anyDVarSet :: (Var -> Bool) -> DVarSet -> Bool anyDVarSet p = anyUDFM p . getUniqDSet allDVarSet :: (Var -> Bool) -> DVarSet -> Bool allDVarSet p = allUDFM p . getUniqDSet mapDVarSet :: Uniquable b => (a -> b) -> UniqDSet a -> UniqDSet b mapDVarSet = mapUniqDSet strictFoldDVarSet :: (a -> r -> r) -> r -> UniqDSet a -> r strictFoldDVarSet = strictFoldUniqDSet filterDVarSet :: (Var -> Bool) -> DVarSet -> DVarSet filterDVarSet = filterUniqDSet sizeDVarSet :: DVarSet -> Int sizeDVarSet = sizeUniqDSet -- | Partition DVarSet according to the predicate given partitionDVarSet :: (Var -> Bool) -> DVarSet -> (DVarSet, DVarSet) partitionDVarSet = partitionUniqDSet -- | Delete a list of variables from DVarSet delDVarSetList :: DVarSet -> [Var] -> DVarSet delDVarSetList = delListFromUniqDSet seqDVarSet :: DVarSet -> () seqDVarSet s = s `seq` () -- | Add a list of variables to DVarSet extendDVarSetList :: DVarSet -> [Var] -> DVarSet extendDVarSetList = addListToUniqDSet -- | Convert a DVarSet to a VarSet by forgetting the order of insertion dVarSetToVarSet :: DVarSet -> VarSet dVarSetToVarSet = unsafeUFMToUniqSet . udfmToUfm . getUniqDSet -- | transCloVarSet for DVarSet transCloDVarSet :: (DVarSet -> DVarSet) -- Map some variables in the set to -- extra variables that should be in it -> DVarSet -> DVarSet -- (transCloDVarSet f s) repeatedly applies f to new candidates, adding any -- new variables to s that it finds thereby, until it reaches a fixed point. -- -- The function fn could be (Var -> DVarSet), but we use (DVarSet -> DVarSet) -- for efficiency, so that the test can be batched up. -- It's essential that fn will work fine if given new candidates -- one at a time; ie fn {v1,v2} = fn v1 `union` fn v2 transCloDVarSet fn seeds = go seeds seeds where go :: DVarSet -- Accumulating result -> DVarSet -- Work-list; un-processed subset of accumulating result -> DVarSet -- Specification: go acc vs = acc `union` transClo fn vs go acc candidates | isEmptyDVarSet new_vs = acc | otherwise = go (acc `unionDVarSet` new_vs) new_vs where new_vs = fn candidates `minusDVarSet` acc ghc-lib-parser-9.12.2.20250421/compiler/GHC/Unit.hs0000644000000000000000000003707007346545000017170 0ustar0000000000000000{-# LANGUAGE FlexibleInstances #-} -- | Units are library components from Cabal packages compiled and installed in -- a database module GHC.Unit ( module GHC.Unit.Types , module GHC.Unit.Info , module GHC.Unit.Parser , module GHC.Unit.State , module GHC.Unit.Module , module GHC.Unit.Home ) where import GHC.Unit.Types import GHC.Unit.Info import GHC.Unit.Parser import GHC.Unit.Module import GHC.Unit.Home import GHC.Unit.State {- Note [About units] ~~~~~~~~~~~~~~~~~~ Haskell users are used to manipulating Cabal packages. These packages are identified by: - a package name :: String - a package version :: Version - (a revision number, when they are registered on Hackage) Cabal packages may contain several components (libraries, programs, testsuites). In GHC we are mostly interested in libraries because those are the components that can be depended upon by other components. Components in a package are identified by their component name. Historically only one library component was allowed per package, hence it didn't need a name. For this reason, component name may be empty for one library component in each package: - a component name :: Maybe String UnitId ------ Cabal libraries can be compiled in various ways (different compiler options or Cabal flags, different dependencies, etc.), hence using package name, package version and component name isn't enough to identify a built library. We use another identifier called UnitId: package name \ package version | ________ component name | hash of all this ==> | UnitId | Cabal flags | -------- compiler options | dependencies' UnitId / Fortunately GHC doesn't have to generate these UnitId: they are provided by external build tools (e.g. Cabal) with `-this-unit-id` command-line parameter. UnitIds are important because they are used to generate internal names (symbols, etc.). Wired-in units -------------- Certain libraries (ghc-prim, base, etc.) are known to the compiler and to the RTS as they provide some basic primitives. Hence UnitIds of wired-in libraries are fixed. Instead of letting Cabal choose the UnitId for these libraries, their .cabal file uses the following stanza to force it to a specific value: ghc-options: -this-unit-id ghc-prim -- taken from ghc-prim.cabal The RTS also uses entities of wired-in units by directly referring to symbols such as "base_GHCziIOziException_heapOverflow_closure" where the prefix is the UnitId of "base" unit. Unit databases -------------- Units are stored in databases in order to be reused by other codes: UnitKey ---> UnitInfo { exposed modules, package name, package version component name, various file paths, dependencies :: [UnitKey], etc. } Because of the wired-in units described above, we can't exactly use UnitIds as UnitKeys in the database: if we did this, we could only have a single unit (compiled library) in the database for each wired-in library. As we want to support databases containing several different units for the same wired-in library, we do this: * for non wired-in units: * UnitId = UnitKey = Identifier (hash) computed by Cabal * for wired-in units: * UnitKey = Identifier computed by Cabal (just like for non wired-in units) * UnitId = unit-id specified with -this-unit-id command-line flag We can expose several units to GHC via the `package-id ` command-line parameter. We must use the UnitKeys of the units so that GHC can find them in the database. During unit loading, GHC replaces UnitKeys with UnitIds. It identifies wired units by their package name (stored in their UnitInfo) and uses wired-in UnitIds for them. For example, knowing that "base", "ghc-prim" and "rts" are wired-in units, the following dependency graph expressed with database UnitKeys will be transformed into a similar graph expressed with UnitIds: UnitKeys ~~~~~~~~ ----------> rts-1.0-hashABC <-- | | | | foo-2.0-hash123 --> base-4.1-hashXYZ ---> ghc-prim-0.5.3-hashUVW UnitIds ~~~~~~~ ---------------> rts <-- | | | | foo-2.0-hash123 --> base ---------------> ghc-prim Note that "foo-2.0-hash123" isn't wired-in so its UnitId is the same as its UnitKey. Module signatures / indefinite units / instantiated units --------------------------------------------------------- GHC distinguishes two kinds of units: * definite units: * units without module holes and with definite dependencies * can be compiled into machine code (.o/.a/.so/.dll/...) * indefinite units: * units with some module holes or with some indefinite dependencies * can only be type-checked Module holes are constrained by module signatures (.hsig files). Module signatures are a kind of interface (similar to .hs-boot files). They are used in place of some real code. GHC allows modules from other units to be used to fill these module holes: the process is called "unit/module instantiation". The instantiating module may either be a concrete module or a module signature. In the latter case, the signatures are merged to form a new one. You can think of this as polymorphism at the module level: module signatures give constraints on the "type" of module that can be used to fill the hole (where "type" means types of the exported module entities, etc.). Module signatures contain enough information (datatypes, abstract types, type synonyms, classes, etc.) to typecheck modules depending on them but not enough to compile them. As such, indefinite units found in databases only provide module interfaces (the .hi ones this time), not object code. Unit instantiation / on-the-fly instantiation --------------------------------------------- Indefinite units can be instantiated with modules from other units. The instantiating units can also be instantiated themselves (if there are indefinite) and so on. On-the-fly unit instantiation is a tricky optimization explained in http://blog.ezyang.com/2016/08/optimizing-incremental-compilation Here is a summary: 1. Indefinite units can only be type-checked, not compiled into real code. Type-checking produces interface files (.hi) which are incomplete for code generation (they lack unfoldings, etc.) but enough to perform type-checking of units depending on them. 2. Type-checking an instantiated unit is cheap as we only have to merge interface files (.hi) of the instantiated unit and of the instantiating units, hence it can be done on-the-fly. Interface files of the dependencies can be concrete or produced on-the-fly recursively. 3. When we compile a unit, we mustn't use interfaces produced by the type-checker (on-the-fly or not) for the instantiated unit dependencies because they lack some information. 4. When we type-check an indefinite unit, we must be consistent about the interfaces we use for each dependency: only those produced by the type-checker (on-the-fly or not) or only those produced after a full compilation, but not both at the same time. It can be tricky if we have the following kind of dependency graph: X (indefinite) ------> D (definite, compiled) -----> I (instantiated, definite, compiled) |----------------------------------------------------^ Suppose we want to type-check unit X which depends on unit I and D: * I is definite and compiled: we have compiled .hi files for its modules on disk * I is instantiated: it is cheap to produce type-checker .hi files for its modules on-the-fly But we must not do: X (indefinite) ------> D (definite, compiled) -----> I (instantiated, definite, compiled) |--------------------------------------------------> I (instantiated on-the-fly) ==> inconsistent module interfaces for I Nor: X (indefinite) ------> D (definite, compiled) -------v |--------------------------------------------------> I (instantiated on-the-fly) ==> D's interfaces may refer to things that only exist in I's *compiled* interfaces An alternative would be to store both type-checked and compiled interfaces for every compiled non-instantiated unit (instantiated unit can be done on-the-fly) so that we could use type-checked interfaces of D in the example above. But it would increase compilation time and unit size. The 'Unit' datatype represents a unit which may have been instantiated on-the-fly: data Unit = RealUnit DefUnitId -- use compiled interfaces on disk | VirtUnit InstantiatedUnit -- use on-the-fly instantiation 'InstantiatedUnit' has two interesting fields: * instUnitInstanceOf :: UnitId -- ^ the indefinite unit that is instantiated * instUnitInsts :: [(ModuleName,(Unit,ModuleName)] -- ^ a list of instantiations, where an instantiation is: (module hole name, (instantiating unit, instantiating module name)) A 'VirtUnit' may be indefinite or definite, it depends on whether some holes remain in the instantiated unit OR in the instantiating units (recursively). Having a fully instantiated (i.e. definite) virtual unit can lead to some issues if there is a matching compiled unit in the preload closure. See Note [VirtUnit to RealUnit improvement] Unit database and indefinite units ---------------------------------- We don't store partially instantiated units in the unit database. Units in the database are either: * definite (fully instantiated or without holes): in this case we have *compiled* module interfaces (.hi) and object codes (.o/.a/.so/.dll/...). * fully indefinite (not instantiated at all): in this case we only have *type-checked* module interfaces (.hi). Note that indefinite units are stored as an instantiation of themselves where each instantiating module is a module variable (see Note [Representation of module/name variables]). E.g. "xyz" (UnitKey) ---> UnitInfo { instanceOf = "xyz" , instantiatedWith = [A=,B=...] , ... } Note that non-instantiated units are also stored as an instantiation of themselves. It is a reminiscence of previous terminology (when "instanceOf" was "componentId"). E.g. "xyz" (UnitKey) ---> UnitInfo { instanceOf = "xyz" , instantiatedWith = [] , ... } TODO: We should probably have `instanceOf :: Maybe UnitId` instead. Note [Pretty-printing UnitId] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When we pretty-print a UnitId for the user, we try to map it back to its origin package name, version and component to print "package-version:component" instead of some hash. How to retrieve these information from a UnitId? Solution 0: ask for a UnitState to be passed each time we want to pretty-print a SDoc so that the Outputable instance for UnitId could retrieve the information from it. That what we used in the past: a DynFlags was passed and the UnitState was retrieved from it. This is wrong for several reasons: 1. The UnitState is accessed when the message is printed, not when it is generated. So we could imagine that the UnitState could have changed in-between. Especially if we want to allow unit unloading. 2. We want GHC to support several independent sessions at once, hence several UnitState. This approach supposes there is a unique UnitState (the one given at printing-time), moreover a UnitId doesn't indicate which UnitState it comes from (think about statically defined UnitId for wired-in units). Solution 1: an obvious approach would be to store the required information in the UnitId itself. However it doesn't work because some UnitId are defined statically for wired-in units and the same UnitId can map to different units in different contexts. This solution would make wired-in units harder to deal with. Solution 2: another approach would be to thread the UnitState to all places where a UnitId is pretty-printed and to retrieve the information from the UnitState only when needed. It would mean that UnitId couldn't have an Outputable instance as it would need an additional UnitState parameter to be printed. It means that many other types couldn't have an Outputable instance either: Unit, Module, Name, InstEnv, etc. Too many to make this solution feasible. Solution 3: the approach we use is a compromise between solutions 0 and 2: the appropriate UnitState has to be threaded close enough to the function generating the SDoc so that the latter can use `pprWithUnitState` to set the UnitState to fetch information from. However the UnitState doesn't have to be threaded explicitly all the way down to the point where the UnitId itself is printed: instead the Outputable instance of UnitId fetches the "sdocUnitIdForUser" field in the SDocContext to pretty-print. 1. We can still have Outputable instances for common types (Module, Unit, Name, etc.) 2. End-users don't have to pass a UnitState (via a DynFlags) to print a SDoc. 3. By default "sdocUnitIdForUser" prints the UnitId hash. In case of a bug (i.e. GHC doesn't correctly call `pprWithUnitState` before pretty-printing a UnitId), that's what will be shown to the user so it's no big deal. Note [VirtUnit to RealUnit improvement] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Over the course of instantiating VirtUnits on the fly while typechecking an indefinite library, we may end up with a fully instantiated VirtUnit. I.e. one that could be compiled and installed in the database. During type-checking we generate a virtual UnitId for it, say "abc". Now the question is: do we have a matching installed unit in the database? Suppose we have one with UnitId "xyz" (provided by Cabal so we don't know how to generate it). The trouble is that if both units end up being used in the same type-checking session, their names won't match (e.g. "abc:M.X" vs "xyz:M.X"). As we want them to match we just replace the virtual unit with the installed one: for some reason this is called "improvement". There is one last niggle: improvement based on the unit database means that we might end up developing on a unit that is not transitively depended upon by the units the user specified directly via command line flags. This could lead to strange and difficult to understand bugs if those instantiations are out of date. The solution is to only improve a unit id if the new unit id is part of the 'preloadClosure'; i.e., the closure of all the units which were explicitly specified. Note [Representation of module/name variables] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In our ICFP'16, we use to represent module holes, and {A.T} to represent name holes. This could have been represented by adding some new cases to the core data types, but this would have made the existing 'moduleName' and 'moduleUnit' partial, which would have required a lot of modifications to existing code. Instead, we use a fake "hole" unit: ===> hole:A {A.T} ===> hole:A.T This encoding is quite convenient, but it is also a bit dangerous too, because if you have a 'hole:A' you need to know if it's actually a 'Module' or just a module stored in a 'Name'; these two cases must be treated differently when doing substitutions. 'renameHoleModule' and 'renameHoleUnit' assume they are NOT operating on a 'Name'; 'NameShape' handles name substitutions exclusively. -} ghc-lib-parser-9.12.2.20250421/compiler/GHC/Unit/0000755000000000000000000000000007346545000016625 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Unit/Env.hs0000644000000000000000000005246407346545000017724 0ustar0000000000000000{-# LANGUAGE DeriveTraversable #-} {-# LANGUAGE FlexibleInstances #-} module GHC.Unit.Env ( UnitEnv (..) , initUnitEnv , ueEPS , unsafeGetHomeUnit , updateHug , updateHpt_lazy , updateHpt -- * Unit Env helper functions , ue_units , ue_currentHomeUnitEnv , ue_setUnits , ue_setUnitFlags , ue_unit_dbs , ue_all_home_unit_ids , ue_setUnitDbs , ue_hpt , ue_homeUnit , ue_unsafeHomeUnit , ue_setFlags , ue_setActiveUnit , ue_currentUnit , ue_findHomeUnitEnv , ue_updateHomeUnitEnv , ue_unitHomeUnit , ue_unitFlags , ue_renameUnitId , ue_transitiveHomeDeps -- * HomeUnitEnv , HomeUnitGraph , HomeUnitEnv (..) , mkHomeUnitEnv , lookupHugByModule , hugElts , lookupHug , addHomeModInfoToHug -- * UnitEnvGraph , UnitEnvGraph (..) , UnitEnvGraphKey , unitEnv_insert , unitEnv_delete , unitEnv_adjust , unitEnv_new , unitEnv_singleton , unitEnv_map , unitEnv_member , unitEnv_lookup_maybe , unitEnv_lookup , unitEnv_keys , unitEnv_elts , unitEnv_hpts , unitEnv_foldWithKey , unitEnv_union , unitEnv_mapWithKey -- * Invariants , assertUnitEnvInvariant -- * Preload units info , preloadUnitsInfo , preloadUnitsInfo' -- * Home Module functions , isUnitEnvInstalledModule ) where import GHC.Prelude import GHC.Unit.External import GHC.Unit.State import GHC.Unit.Home import GHC.Unit.Types import GHC.Unit.Home.ModInfo import GHC.Platform import GHC.Settings import GHC.Data.Maybe import Data.Map.Strict (Map) import qualified Data.Map.Strict as Map import GHC.Utils.Misc (HasDebugCallStack) import GHC.Driver.DynFlags import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Unit.Module.ModIface import GHC.Unit.Module import qualified Data.Set as Set data UnitEnv = UnitEnv { ue_eps :: {-# UNPACK #-} !ExternalUnitCache -- ^ Information about the currently loaded external packages. -- This is mutable because packages will be demand-loaded during -- a compilation run as required. , ue_current_unit :: UnitId , ue_home_unit_graph :: !HomeUnitGraph -- See Note [Multiple Home Units] , ue_platform :: !Platform -- ^ Platform , ue_namever :: !GhcNameVersion -- ^ GHC name/version (used for dynamic library suffix) } ueEPS :: UnitEnv -> IO ExternalPackageState ueEPS = eucEPS . ue_eps initUnitEnv :: UnitId -> HomeUnitGraph -> GhcNameVersion -> Platform -> IO UnitEnv initUnitEnv cur_unit hug namever platform = do eps <- initExternalUnitCache return $ UnitEnv { ue_eps = eps , ue_home_unit_graph = hug , ue_current_unit = cur_unit , ue_platform = platform , ue_namever = namever } -- | Get home-unit -- -- Unsafe because the home-unit may not be set unsafeGetHomeUnit :: UnitEnv -> HomeUnit unsafeGetHomeUnit ue = ue_unsafeHomeUnit ue updateHpt_lazy :: (HomePackageTable -> HomePackageTable) -> UnitEnv -> UnitEnv updateHpt_lazy = ue_updateHPT_lazy updateHpt :: (HomePackageTable -> HomePackageTable) -> UnitEnv -> UnitEnv updateHpt = ue_updateHPT updateHug :: (HomeUnitGraph -> HomeUnitGraph) -> UnitEnv -> UnitEnv updateHug = ue_updateHUG ue_transitiveHomeDeps :: UnitId -> UnitEnv -> [UnitId] ue_transitiveHomeDeps uid unit_env = Set.toList (loop Set.empty [uid]) where loop acc [] = acc loop acc (uid:uids) | uid `Set.member` acc = loop acc uids | otherwise = let hue = homeUnitDepends (homeUnitEnv_units (ue_findHomeUnitEnv uid unit_env)) in loop (Set.insert uid acc) (hue ++ uids) -- ----------------------------------------------------------------------------- -- Extracting information from the packages in scope -- Many of these functions take a list of packages: in those cases, -- the list is expected to contain the "dependent packages", -- i.e. those packages that were found to be depended on by the -- current module/program. These can be auto or non-auto packages, it -- doesn't really matter. The list is always combined with the list -- of preload (command-line) packages to determine which packages to -- use. -- | Lookup 'UnitInfo' for every preload unit from the UnitState, for every unit -- used to instantiate the home unit, and for every unit explicitly passed in -- the given list of UnitId. preloadUnitsInfo' :: UnitEnv -> [UnitId] -> MaybeErr UnitErr [UnitInfo] preloadUnitsInfo' unit_env ids0 = all_infos where unit_state = ue_units unit_env ids = ids0 ++ inst_ids inst_ids = case ue_homeUnit unit_env of Nothing -> [] Just home_unit -- An indefinite package will have insts to HOLE, -- which is not a real package. Don't look it up. -- Fixes #14525 | isHomeUnitIndefinite home_unit -> [] | otherwise -> map (toUnitId . moduleUnit . snd) (homeUnitInstantiations home_unit) pkg_map = unitInfoMap unit_state preload = preloadUnits unit_state all_pkgs = closeUnitDeps' pkg_map preload (ids `zip` repeat Nothing) all_infos = map (unsafeLookupUnitId unit_state) <$> all_pkgs -- | Lookup 'UnitInfo' for every preload unit from the UnitState and for every -- unit used to instantiate the home unit. preloadUnitsInfo :: UnitEnv -> MaybeErr UnitErr [UnitInfo] preloadUnitsInfo unit_env = preloadUnitsInfo' unit_env [] -- ----------------------------------------------------------------------------- data HomeUnitEnv = HomeUnitEnv { homeUnitEnv_units :: !UnitState -- ^ External units , homeUnitEnv_unit_dbs :: !(Maybe [UnitDatabase UnitId]) -- ^ Stack of unit databases for the target platform. -- -- This field is populated with the result of `initUnits`. -- -- 'Nothing' means the databases have never been read from disk. -- -- Usually we don't reload the databases from disk if they are -- cached, even if the database flags changed! , homeUnitEnv_dflags :: DynFlags -- ^ The dynamic flag settings , homeUnitEnv_hpt :: HomePackageTable -- ^ The home package table describes already-compiled -- home-package modules, /excluding/ the module we -- are compiling right now. -- (In one-shot mode the current module is the only -- home-package module, so homeUnitEnv_hpt is empty. All other -- modules count as \"external-package\" modules. -- However, even in GHCi mode, hi-boot interfaces are -- demand-loaded into the external-package table.) -- -- 'homeUnitEnv_hpt' is not mutable because we only demand-load -- external packages; the home package is eagerly -- loaded, module by module, by the compilation manager. -- -- The HPT may contain modules compiled earlier by @--make@ -- but not actually below the current module in the dependency -- graph. -- -- (This changes a previous invariant: changed Jan 05.) , homeUnitEnv_home_unit :: !(Maybe HomeUnit) -- ^ Home-unit } instance Outputable HomeUnitEnv where ppr hug = pprHPT (homeUnitEnv_hpt hug) homeUnitEnv_unsafeHomeUnit :: HomeUnitEnv -> HomeUnit homeUnitEnv_unsafeHomeUnit hue = case homeUnitEnv_home_unit hue of Nothing -> panic "homeUnitEnv_unsafeHomeUnit: No home unit" Just h -> h mkHomeUnitEnv :: DynFlags -> HomePackageTable -> Maybe HomeUnit -> HomeUnitEnv mkHomeUnitEnv dflags hpt home_unit = HomeUnitEnv { homeUnitEnv_units = emptyUnitState , homeUnitEnv_unit_dbs = Nothing , homeUnitEnv_dflags = dflags , homeUnitEnv_hpt = hpt , homeUnitEnv_home_unit = home_unit } -- | Test if the module comes from the home unit isUnitEnvInstalledModule :: UnitEnv -> InstalledModule -> Bool isUnitEnvInstalledModule ue m = maybe False (`isHomeInstalledModule` m) hu where hu = ue_unitHomeUnit_maybe (moduleUnit m) ue type HomeUnitGraph = UnitEnvGraph HomeUnitEnv lookupHugByModule :: Module -> HomeUnitGraph -> Maybe HomeModInfo lookupHugByModule mod hug | otherwise = do env <- (unitEnv_lookup_maybe (toUnitId $ moduleUnit mod) hug) lookupHptByModule (homeUnitEnv_hpt env) mod hugElts :: HomeUnitGraph -> [(UnitId, HomeUnitEnv)] hugElts hug = unitEnv_elts hug addHomeModInfoToHug :: HomeModInfo -> HomeUnitGraph -> HomeUnitGraph addHomeModInfoToHug hmi hug = unitEnv_alter go hmi_unit hug where hmi_mod :: Module hmi_mod = mi_module (hm_iface hmi) hmi_unit = toUnitId (moduleUnit hmi_mod) _hmi_mn = moduleName hmi_mod go :: Maybe HomeUnitEnv -> Maybe HomeUnitEnv go Nothing = pprPanic "addHomeInfoToHug" (ppr hmi_mod) go (Just hue) = Just (updateHueHpt (addHomeModInfoToHpt hmi) hue) updateHueHpt :: (HomePackageTable -> HomePackageTable) -> HomeUnitEnv -> HomeUnitEnv updateHueHpt f hue = let !hpt = f (homeUnitEnv_hpt hue) in hue { homeUnitEnv_hpt = hpt } lookupHug :: HomeUnitGraph -> UnitId -> ModuleName -> Maybe HomeModInfo lookupHug hug uid mod = unitEnv_lookup_maybe uid hug >>= flip lookupHpt mod . homeUnitEnv_hpt instance Outputable (UnitEnvGraph HomeUnitEnv) where ppr g = ppr [(k, length (homeUnitEnv_hpt hue)) | (k, hue) <- (unitEnv_elts g)] type UnitEnvGraphKey = UnitId newtype UnitEnvGraph v = UnitEnvGraph { unitEnv_graph :: Map UnitEnvGraphKey v } deriving (Functor, Foldable, Traversable) unitEnv_insert :: UnitEnvGraphKey -> v -> UnitEnvGraph v -> UnitEnvGraph v unitEnv_insert unitId env unitEnv = unitEnv { unitEnv_graph = Map.insert unitId env (unitEnv_graph unitEnv) } unitEnv_delete :: UnitEnvGraphKey -> UnitEnvGraph v -> UnitEnvGraph v unitEnv_delete uid unitEnv = unitEnv { unitEnv_graph = Map.delete uid (unitEnv_graph unitEnv) } unitEnv_adjust :: (v -> v) -> UnitEnvGraphKey -> UnitEnvGraph v -> UnitEnvGraph v unitEnv_adjust f uid unitEnv = unitEnv { unitEnv_graph = Map.adjust f uid (unitEnv_graph unitEnv) } unitEnv_alter :: (Maybe v -> Maybe v) -> UnitEnvGraphKey -> UnitEnvGraph v -> UnitEnvGraph v unitEnv_alter f uid unitEnv = unitEnv { unitEnv_graph = Map.alter f uid (unitEnv_graph unitEnv) } unitEnv_mapWithKey :: (UnitEnvGraphKey -> v -> b) -> UnitEnvGraph v -> UnitEnvGraph b unitEnv_mapWithKey f (UnitEnvGraph u) = UnitEnvGraph $ Map.mapWithKey f u unitEnv_new :: Map UnitEnvGraphKey v -> UnitEnvGraph v unitEnv_new m = UnitEnvGraph { unitEnv_graph = m } unitEnv_singleton :: UnitEnvGraphKey -> v -> UnitEnvGraph v unitEnv_singleton active m = UnitEnvGraph { unitEnv_graph = Map.singleton active m } unitEnv_map :: (v -> v) -> UnitEnvGraph v -> UnitEnvGraph v unitEnv_map f m = m { unitEnv_graph = Map.map f (unitEnv_graph m)} unitEnv_member :: UnitEnvGraphKey -> UnitEnvGraph v -> Bool unitEnv_member u env = Map.member u (unitEnv_graph env) unitEnv_lookup_maybe :: UnitEnvGraphKey -> UnitEnvGraph v -> Maybe v unitEnv_lookup_maybe u env = Map.lookup u (unitEnv_graph env) unitEnv_lookup :: UnitEnvGraphKey -> UnitEnvGraph v -> v unitEnv_lookup u env = fromJust $ unitEnv_lookup_maybe u env unitEnv_keys :: UnitEnvGraph v -> Set.Set UnitEnvGraphKey unitEnv_keys env = Map.keysSet (unitEnv_graph env) unitEnv_elts :: UnitEnvGraph v -> [(UnitEnvGraphKey, v)] unitEnv_elts env = Map.toList (unitEnv_graph env) unitEnv_hpts :: UnitEnvGraph HomeUnitEnv -> [HomePackageTable] unitEnv_hpts env = map homeUnitEnv_hpt (Map.elems (unitEnv_graph env)) unitEnv_foldWithKey :: (b -> UnitEnvGraphKey -> a -> b) -> b -> UnitEnvGraph a -> b unitEnv_foldWithKey f z (UnitEnvGraph g)= Map.foldlWithKey' f z g unitEnv_union :: (a -> a -> a) -> UnitEnvGraph a -> UnitEnvGraph a -> UnitEnvGraph a unitEnv_union f (UnitEnvGraph env1) (UnitEnvGraph env2) = UnitEnvGraph (Map.unionWith f env1 env2) -- ------------------------------------------------------- -- Query and modify UnitState in HomeUnitEnv -- ------------------------------------------------------- ue_units :: HasDebugCallStack => UnitEnv -> UnitState ue_units = homeUnitEnv_units . ue_currentHomeUnitEnv ue_setUnits :: UnitState -> UnitEnv -> UnitEnv ue_setUnits units ue = ue_updateHomeUnitEnv f (ue_currentUnit ue) ue where f hue = hue { homeUnitEnv_units = units } ue_unit_dbs :: UnitEnv -> Maybe [UnitDatabase UnitId] ue_unit_dbs = homeUnitEnv_unit_dbs . ue_currentHomeUnitEnv ue_setUnitDbs :: Maybe [UnitDatabase UnitId] -> UnitEnv -> UnitEnv ue_setUnitDbs unit_dbs ue = ue_updateHomeUnitEnv f (ue_currentUnit ue) ue where f hue = hue { homeUnitEnv_unit_dbs = unit_dbs } -- ------------------------------------------------------- -- Query and modify Home Package Table in HomeUnitEnv -- ------------------------------------------------------- ue_hpt :: HasDebugCallStack => UnitEnv -> HomePackageTable ue_hpt = homeUnitEnv_hpt . ue_currentHomeUnitEnv ue_updateHPT_lazy :: HasDebugCallStack => (HomePackageTable -> HomePackageTable) -> UnitEnv -> UnitEnv ue_updateHPT_lazy f e = ue_updateUnitHPT_lazy f (ue_currentUnit e) e ue_updateHPT :: HasDebugCallStack => (HomePackageTable -> HomePackageTable) -> UnitEnv -> UnitEnv ue_updateHPT f e = ue_updateUnitHPT f (ue_currentUnit e) e ue_updateHUG :: HasDebugCallStack => (HomeUnitGraph -> HomeUnitGraph) -> UnitEnv -> UnitEnv ue_updateHUG f e = ue_updateUnitHUG f e ue_updateUnitHPT_lazy :: HasDebugCallStack => (HomePackageTable -> HomePackageTable) -> UnitId -> UnitEnv -> UnitEnv ue_updateUnitHPT_lazy f uid ue_env = ue_updateHomeUnitEnv update uid ue_env where update unitEnv = unitEnv { homeUnitEnv_hpt = f $ homeUnitEnv_hpt unitEnv } ue_updateUnitHPT :: HasDebugCallStack => (HomePackageTable -> HomePackageTable) -> UnitId -> UnitEnv -> UnitEnv ue_updateUnitHPT f uid ue_env = ue_updateHomeUnitEnv update uid ue_env where update unitEnv = let !res = f $ homeUnitEnv_hpt unitEnv in unitEnv { homeUnitEnv_hpt = res } ue_updateUnitHUG :: HasDebugCallStack => (HomeUnitGraph -> HomeUnitGraph) -> UnitEnv -> UnitEnv ue_updateUnitHUG f ue_env = ue_env { ue_home_unit_graph = f (ue_home_unit_graph ue_env)} -- ------------------------------------------------------- -- Query and modify DynFlags in HomeUnitEnv -- ------------------------------------------------------- ue_setFlags :: HasDebugCallStack => DynFlags -> UnitEnv -> UnitEnv ue_setFlags dflags ue_env = ue_setUnitFlags (ue_currentUnit ue_env) dflags ue_env ue_setUnitFlags :: HasDebugCallStack => UnitId -> DynFlags -> UnitEnv -> UnitEnv ue_setUnitFlags uid dflags e = ue_updateUnitFlags (const dflags) uid e ue_unitFlags :: HasDebugCallStack => UnitId -> UnitEnv -> DynFlags ue_unitFlags uid ue_env = homeUnitEnv_dflags $ ue_findHomeUnitEnv uid ue_env ue_updateUnitFlags :: HasDebugCallStack => (DynFlags -> DynFlags) -> UnitId -> UnitEnv -> UnitEnv ue_updateUnitFlags f uid e = ue_updateHomeUnitEnv update uid e where update hue = hue { homeUnitEnv_dflags = f $ homeUnitEnv_dflags hue } -- ------------------------------------------------------- -- Query and modify home units in HomeUnitEnv -- ------------------------------------------------------- ue_homeUnit :: UnitEnv -> Maybe HomeUnit ue_homeUnit = homeUnitEnv_home_unit . ue_currentHomeUnitEnv ue_unsafeHomeUnit :: UnitEnv -> HomeUnit ue_unsafeHomeUnit ue = case ue_homeUnit ue of Nothing -> panic "unsafeGetHomeUnit: No home unit" Just h -> h ue_unitHomeUnit_maybe :: UnitId -> UnitEnv -> Maybe HomeUnit ue_unitHomeUnit_maybe uid ue_env = homeUnitEnv_unsafeHomeUnit <$> (ue_findHomeUnitEnv_maybe uid ue_env) ue_unitHomeUnit :: UnitId -> UnitEnv -> HomeUnit ue_unitHomeUnit uid ue_env = homeUnitEnv_unsafeHomeUnit $ ue_findHomeUnitEnv uid ue_env ue_all_home_unit_ids :: UnitEnv -> Set.Set UnitId ue_all_home_unit_ids = unitEnv_keys . ue_home_unit_graph -- ------------------------------------------------------- -- Query and modify the currently active unit -- ------------------------------------------------------- ue_currentHomeUnitEnv :: HasDebugCallStack => UnitEnv -> HomeUnitEnv ue_currentHomeUnitEnv e = case ue_findHomeUnitEnv_maybe (ue_currentUnit e) e of Just unitEnv -> unitEnv Nothing -> pprPanic "packageNotFound" $ (ppr $ ue_currentUnit e) $$ ppr (ue_home_unit_graph e) ue_setActiveUnit :: UnitId -> UnitEnv -> UnitEnv ue_setActiveUnit u ue_env = assertUnitEnvInvariant $ ue_env { ue_current_unit = u } ue_currentUnit :: UnitEnv -> UnitId ue_currentUnit = ue_current_unit -- ------------------------------------------------------- -- Operations on arbitrary elements of the home unit graph -- ------------------------------------------------------- ue_findHomeUnitEnv_maybe :: UnitId -> UnitEnv -> Maybe HomeUnitEnv ue_findHomeUnitEnv_maybe uid e = unitEnv_lookup_maybe uid (ue_home_unit_graph e) ue_findHomeUnitEnv :: HasDebugCallStack => UnitId -> UnitEnv -> HomeUnitEnv ue_findHomeUnitEnv uid e = case unitEnv_lookup_maybe uid (ue_home_unit_graph e) of Nothing -> pprPanic "Unit unknown to the internal unit environment" $ text "unit (" <> ppr uid <> text ")" $$ pprUnitEnvGraph e Just hue -> hue ue_updateHomeUnitEnv :: (HomeUnitEnv -> HomeUnitEnv) -> UnitId -> UnitEnv -> UnitEnv ue_updateHomeUnitEnv f uid e = e { ue_home_unit_graph = unitEnv_adjust f uid $ ue_home_unit_graph e } -- | Rename a unit id in the internal unit env. -- -- @'ue_renameUnitId' oldUnit newUnit UnitEnv@, it is assumed that the 'oldUnit' exists in the map, -- otherwise we panic. -- The 'DynFlags' associated with the home unit will have its field 'homeUnitId' set to 'newUnit'. ue_renameUnitId :: HasDebugCallStack => UnitId -> UnitId -> UnitEnv -> UnitEnv ue_renameUnitId oldUnit newUnit unitEnv = case ue_findHomeUnitEnv_maybe oldUnit unitEnv of Nothing -> pprPanic "Tried to rename unit, but it didn't exist" $ text "Rename old unit \"" <> ppr oldUnit <> text "\" to \""<> ppr newUnit <> text "\"" $$ nest 2 (pprUnitEnvGraph unitEnv) Just oldEnv -> let activeUnit :: UnitId !activeUnit = if ue_currentUnit unitEnv == oldUnit then newUnit else ue_currentUnit unitEnv newInternalUnitEnv = oldEnv { homeUnitEnv_dflags = (homeUnitEnv_dflags oldEnv) { homeUnitId_ = newUnit } } in unitEnv { ue_current_unit = activeUnit , ue_home_unit_graph = unitEnv_insert newUnit newInternalUnitEnv $ unitEnv_delete oldUnit $ ue_home_unit_graph unitEnv } -- --------------------------------------------- -- Asserts to enforce invariants for the UnitEnv -- --------------------------------------------- assertUnitEnvInvariant :: HasDebugCallStack => UnitEnv -> UnitEnv assertUnitEnvInvariant u = if ue_current_unit u `unitEnv_member` ue_home_unit_graph u then u else pprPanic "invariant" (ppr (ue_current_unit u) $$ ppr (ue_home_unit_graph u)) -- ----------------------------------------------------------------------------- -- Pretty output functions -- ----------------------------------------------------------------------------- pprUnitEnvGraph :: UnitEnv -> SDoc pprUnitEnvGraph env = text "pprInternalUnitMap" $$ nest 2 (pprHomeUnitGraph $ ue_home_unit_graph env) pprHomeUnitGraph :: HomeUnitGraph -> SDoc pprHomeUnitGraph unitEnv = vcat (map (\(k, v) -> pprHomeUnitEnv k v) $ Map.assocs $ unitEnv_graph unitEnv) pprHomeUnitEnv :: UnitId -> HomeUnitEnv -> SDoc pprHomeUnitEnv uid env = ppr uid <+> text "(flags:" <+> ppr (homeUnitId_ $ homeUnitEnv_dflags env) <> text "," <+> ppr (fmap homeUnitId $ homeUnitEnv_home_unit env) <> text ")" <+> text "->" $$ nest 4 (pprHPT $ homeUnitEnv_hpt env) {- Note [Multiple Home Units] ~~~~~~~~~~~~~~~~~~~~~~~~~~ The basic idea of multiple home units is quite simple. Instead of allowing one home unit, you can multiple home units The flow: 1. Dependencies between units are specified between each other in the normal manner, a unit is identified by the -this-unit-id flag and dependencies specified by the normal -package-id flag. 2. Downsweep is augmented to know to know how to look for dependencies in any home unit. 3. The rest of the compiler is modified appropriately to offset paths to the right places. 4. --make mode can parallelise between home units and multiple units are allowed to produce linkables. Closure Property ---------------- You must perform a clean cut of the dependency graph. > Any dependency which is not a home unit must not (transitively) depend on a home unit. For example, if you have three packages p, q and r, then if p depends on q which depends on r then it is illegal to load both p and r as home units but not q, because q is a dependency of the home unit p which depends on another home unit r. Offsetting Paths ---------------- The main complication to the implementation is to do with offsetting paths appropriately. For a long time it has been assumed that GHC will execute in the top-directory for a unit, normally where the .cabal file is and all paths are interpreted relative to there. When you have multiple home units then it doesn't make sense to pick one of these units to choose as the base-unit, and you can't robustly change directories when using parallelism. Therefore there is an option `-working-directory`, which tells GHC where the relative paths for each unit should be interpreted relative to. For example, if you specify `-working-dir a -ib`, then GHC will offset the relative path `b`, by `a`, and look for source files in `a/b`. The same thing happens for any path passed on the command line. A non-exhaustive list is * -i * -I * -odir/-hidir/-outputdir/-stubdir/-hiedir * Target files passed on the command line There is also a template-haskell function, makeRelativeToProject, which uses the `-working-directory` option in order to allow users to offset their own relative paths. -} ghc-lib-parser-9.12.2.20250421/compiler/GHC/Unit/External.hs0000644000000000000000000001671407346545000020754 0ustar0000000000000000module GHC.Unit.External ( ExternalUnitCache (..) , initExternalUnitCache , eucEPS , ExternalPackageState (..) , initExternalPackageState , EpsStats(..) , addEpsInStats , PackageTypeEnv , PackageIfaceTable , PackageInstEnv , PackageFamInstEnv , PackageRuleBase , PackageCompleteMatches , emptyPackageIfaceTable ) where import GHC.Prelude import GHC.Unit import GHC.Unit.Module.ModIface import GHC.Core.FamInstEnv import GHC.Core.InstEnv ( InstEnv, emptyInstEnv ) import GHC.Core.Opt.ConstantFold import GHC.Core.Rules ( RuleBase, mkRuleBase) import GHC.Types.Annotations ( AnnEnv, emptyAnnEnv ) import GHC.Types.CompleteMatch import GHC.Types.TypeEnv import GHC.Types.Unique.DSet import GHC.Linker.Types (Linkable) import Data.IORef type PackageTypeEnv = TypeEnv type PackageRuleBase = RuleBase type PackageInstEnv = InstEnv type PackageFamInstEnv = FamInstEnv type PackageAnnEnv = AnnEnv type PackageCompleteMatches = CompleteMatches -- | Helps us find information about modules in the imported packages type PackageIfaceTable = ModuleEnv ModIface -- Domain = modules in the imported packages -- | Constructs an empty PackageIfaceTable emptyPackageIfaceTable :: PackageIfaceTable emptyPackageIfaceTable = emptyModuleEnv -- | Information about the currently loaded external packages. -- This is mutable because packages will be demand-loaded during -- a compilation run as required. newtype ExternalUnitCache = ExternalUnitCache { euc_eps :: IORef ExternalPackageState } initExternalUnitCache :: IO ExternalUnitCache initExternalUnitCache = ExternalUnitCache <$> newIORef initExternalPackageState eucEPS :: ExternalUnitCache -> IO ExternalPackageState eucEPS = readIORef . euc_eps initExternalPackageState :: ExternalPackageState initExternalPackageState = EPS { eps_is_boot = emptyInstalledModuleEnv , eps_PIT = emptyPackageIfaceTable , eps_free_holes = emptyInstalledModuleEnv , eps_PTE = emptyTypeEnv , eps_iface_bytecode = emptyModuleEnv , eps_inst_env = emptyInstEnv , eps_fam_inst_env = emptyFamInstEnv , eps_rule_base = mkRuleBase builtinRules , -- Initialise the EPS rule pool with the built-in rules eps_mod_fam_inst_env = emptyModuleEnv , eps_complete_matches = [] , eps_ann_env = emptyAnnEnv , eps_stats = EpsStats { n_ifaces_in = 0 , n_decls_in = 0 , n_decls_out = 0 , n_insts_in = 0 , n_insts_out = 0 , n_rules_in = length builtinRules , n_rules_out = 0 } } -- | Information about other packages that we have slurped in by reading -- their interface files data ExternalPackageState = EPS { eps_is_boot :: !(InstalledModuleEnv ModuleNameWithIsBoot), -- ^ In OneShot mode (only), home-package modules -- accumulate in the external package state, and are -- sucked in lazily. For these home-pkg modules -- (only) we need to record which are boot modules. -- We set this field after loading all the -- explicitly-imported interfaces, but before doing -- anything else -- -- The 'ModuleName' part is not necessary, but it's useful for -- debug prints, and it's convenient because this field comes -- direct from 'GHC.Tc.Utils.imp_dep_mods' eps_PIT :: !PackageIfaceTable, -- ^ The 'ModIface's for modules in external packages -- whose interfaces we have opened. -- The declarations in these interface files are held in the -- 'eps_decls', 'eps_inst_env', 'eps_fam_inst_env' and 'eps_rules' -- fields of this record, not in the 'mi_decls' fields of the -- interface we have sucked in. -- -- What /is/ in the PIT is: -- -- * The Module -- -- * Fingerprint info -- -- * Its exports -- -- * Fixities -- -- * Deprecations and warnings eps_free_holes :: InstalledModuleEnv (UniqDSet ModuleName), -- ^ Cache for 'mi_free_holes'. Ordinarily, we can rely on -- the 'eps_PIT' for this information, EXCEPT that when -- we do dependency analysis, we need to look at the -- 'Dependencies' of our imports to determine what their -- precise free holes are ('moduleFreeHolesPrecise'). We -- don't want to repeatedly reread in the interface -- for every import, so cache it here. When the PIT -- gets filled in we can drop these entries. eps_PTE :: !PackageTypeEnv, -- ^ Result of typechecking all the external package -- interface files we have sucked in. The domain of -- the mapping is external-package modules -- | If an interface was written with @-fwrite-if-simplified-core@, this -- will contain an IO action that compiles bytecode from core bindings. -- -- See Note [Interface Files with Core Definitions] eps_iface_bytecode :: !(ModuleEnv (IO Linkable)), eps_inst_env :: !PackageInstEnv, -- ^ The total 'InstEnv' accumulated -- from all the external-package modules eps_fam_inst_env :: !PackageFamInstEnv,-- ^ The total 'FamInstEnv' accumulated -- from all the external-package modules eps_rule_base :: !PackageRuleBase, -- ^ The total 'RuleEnv' accumulated -- from all the external-package modules eps_ann_env :: !PackageAnnEnv, -- ^ The total 'AnnEnv' accumulated -- from all the external-package modules eps_complete_matches :: !PackageCompleteMatches, -- ^ The total 'CompleteMatches' accumulated -- from all the external-package modules eps_mod_fam_inst_env :: !(ModuleEnv FamInstEnv), -- ^ The family instances accumulated from external -- packages, keyed off the module that declared them eps_stats :: !EpsStats -- ^ Statistics about what was loaded from external packages } -- | Accumulated statistics about what we are putting into the 'ExternalPackageState'. -- \"In\" means stuff that is just /read/ from interface files, -- \"Out\" means actually sucked in and type-checked data EpsStats = EpsStats { n_ifaces_in , n_decls_in, n_decls_out , n_rules_in, n_rules_out , n_insts_in, n_insts_out :: !Int } addEpsInStats :: EpsStats -> Int -> Int -> Int -> EpsStats -- ^ Add stats for one newly-read interface addEpsInStats stats n_decls n_insts n_rules = stats { n_ifaces_in = n_ifaces_in stats + 1 , n_decls_in = n_decls_in stats + n_decls , n_insts_in = n_insts_in stats + n_insts , n_rules_in = n_rules_in stats + n_rules } ghc-lib-parser-9.12.2.20250421/compiler/GHC/Unit/Finder/0000755000000000000000000000000007346545000020034 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Unit/Finder/Types.hs0000644000000000000000000001120707346545000021475 0ustar0000000000000000module GHC.Unit.Finder.Types ( FinderCache (..) , FinderCacheState , FileCacheState , FindResult (..) , InstalledFindResult (..) , FinderOpts(..) ) where import GHC.Prelude import GHC.Unit import GHC.Data.OsPath import qualified Data.Map as M import GHC.Fingerprint import GHC.Platform.Ways import GHC.Unit.Env import GHC.Data.FastString import qualified Data.Set as Set -- | The 'FinderCache' maps modules to the result of -- searching for that module. It records the results of searching for -- modules along the search path. On @:load@, we flush the entire -- contents of this cache. -- type FinderCacheState = InstalledModuleEnv InstalledFindResult type FileCacheState = M.Map FilePath Fingerprint data FinderCache = FinderCache { flushFinderCaches :: UnitEnv -> IO () -- ^ remove all the home modules from the cache; package modules are -- assumed to not move around during a session; also flush the file hash -- cache. , addToFinderCache :: InstalledModuleWithIsBoot -> InstalledFindResult -> IO () -- ^ Add a found location to the cache for the module. , lookupFinderCache :: InstalledModuleWithIsBoot -> IO (Maybe InstalledFindResult) -- ^ Look for a location in the cache. , lookupFileCache :: FilePath -> IO Fingerprint -- ^ Look for the hash of a file in the cache. This should add it to the -- cache. If the file doesn't exist, raise an IOException. } data InstalledFindResult = InstalledFound ModLocation InstalledModule | InstalledNoPackage UnitId | InstalledNotFound [OsPath] (Maybe UnitId) -- | The result of searching for an imported module. -- -- NB: FindResult manages both user source-import lookups -- (which can result in 'Module') as well as direct imports -- for interfaces (which always result in 'InstalledModule'). data FindResult = Found ModLocation Module -- ^ The module was found | NoPackage Unit -- ^ The requested unit was not found | FoundMultiple [(Module, ModuleOrigin)] -- ^ _Error_: both in multiple packages -- | Not found | NotFound { fr_paths :: [FilePath] -- ^ Places where I looked , fr_pkg :: Maybe Unit -- ^ Just p => module is in this unit's -- manifest, but couldn't find the -- .hi file , fr_mods_hidden :: [Unit] -- ^ Module is in these units, -- but the *module* is hidden , fr_pkgs_hidden :: [Unit] -- ^ Module is in these units, -- but the *unit* is hidden -- | Module is in these units, but it is unusable , fr_unusables :: [UnusableUnit] , fr_suggestions :: [ModuleSuggestion] -- ^ Possible mis-spelled modules } -- | Locations and information the finder cares about. -- -- Should be taken from 'DynFlags' via 'initFinderOpts'. data FinderOpts = FinderOpts { finder_importPaths :: [OsPath] -- ^ Where are we allowed to look for Modules and Source files , finder_lookupHomeInterfaces :: Bool -- ^ When looking up a home module: -- -- * 'True': search interface files (e.g. in '-c' mode) -- * 'False': search source files (e.g. in '--make' mode) , finder_bypassHiFileCheck :: Bool -- ^ Don't check that an imported interface file actually exists -- if it can only be at one location. The interface will be reported -- as `InstalledFound` even if the file doesn't exist, so this is -- only useful in specific cases (e.g. to generate dependencies -- with `ghc -M`) , finder_ways :: Ways , finder_enableSuggestions :: Bool -- ^ If we encounter unknown modules, should we suggest modules -- that have a similar name. , finder_workingDirectory :: Maybe OsPath , finder_thisPackageName :: Maybe FastString , finder_hiddenModules :: Set.Set ModuleName , finder_reexportedModules :: M.Map ModuleName ModuleName -- Reverse mapping, if you are looking for this name then look for this module. , finder_hieDir :: Maybe OsPath , finder_hieSuf :: OsString , finder_hiDir :: Maybe OsPath , finder_hiSuf :: OsString , finder_dynHiSuf :: OsString , finder_objectDir :: Maybe OsPath , finder_objectSuf :: OsString , finder_dynObjectSuf :: OsString , finder_stubDir :: Maybe OsPath } ghc-lib-parser-9.12.2.20250421/compiler/GHC/Unit/Home.hs0000644000000000000000000002047707346545000020063 0ustar0000000000000000-- | The home unit is the unit (i.e. compiled package) that contains the module -- we are compiling/typechecking. module GHC.Unit.Home ( GenHomeUnit (..) , HomeUnit , homeUnitId , homeUnitInstantiations , homeUnitInstanceOf , homeUnitInstanceOfMaybe , homeUnitAsUnit , homeUnitMap -- * Predicates , isHomeUnitIndefinite , isHomeUnitDefinite , isHomeUnitInstantiating , isHomeUnit , isHomeUnitId , isHomeUnitInstanceOf , isHomeModule , isHomeInstalledModule , notHomeUnitId , notHomeModule , notHomeModuleMaybe , notHomeInstalledModule , notHomeInstalledModuleMaybe -- * Helpers , mkHomeModule , mkHomeInstalledModule , homeModuleInstantiation , homeModuleNameInstantiation ) where import GHC.Prelude import GHC.Unit.Types import Data.Maybe import Language.Haskell.Syntax.Module.Name -- | Information about the home unit (i.e., the until that will contain the -- modules we are compiling) -- -- The unit identifier of the instantiating units is left open to allow -- switching from UnitKey (what is provided by the user) to UnitId (internal -- unit identifier) with `homeUnitMap`. -- -- TODO: this isn't implemented yet. UnitKeys are still converted too early into -- UnitIds in GHC.Unit.State.readUnitDataBase data GenHomeUnit u = DefiniteHomeUnit UnitId (Maybe (u, GenInstantiations u)) -- ^ Definite home unit (i.e. that we can compile). -- -- Nothing: not an instantiated unit -- Just (i,insts): made definite by instantiating "i" with "insts" | IndefiniteHomeUnit UnitId (GenInstantiations u) -- ^ Indefinite home unit (i.e. that we can only typecheck) -- -- All the holes are instantiated with fake modules from the Hole unit. -- See Note [Representation of module/name variables] in "GHC.Unit" type HomeUnit = GenHomeUnit UnitId -- | Return home unit id homeUnitId :: GenHomeUnit u -> UnitId homeUnitId (DefiniteHomeUnit u _) = u homeUnitId (IndefiniteHomeUnit u _) = u -- | Return home unit instantiations homeUnitInstantiations :: GenHomeUnit u -> GenInstantiations u homeUnitInstantiations (DefiniteHomeUnit _ Nothing) = [] homeUnitInstantiations (DefiniteHomeUnit _ (Just (_,is))) = is homeUnitInstantiations (IndefiniteHomeUnit _ is) = is -- | Return the unit id of the unit that is instantiated by the home unit. -- -- E.g. if home unit = q[A=p:B,...] we return q. -- -- If the home unit is not an instance of another unit, we return its own unit -- id (it is an instance of itself if you will). homeUnitInstanceOf :: HomeUnit -> UnitId homeUnitInstanceOf h = fromMaybe (homeUnitId h) (homeUnitInstanceOfMaybe h) -- | Return the unit id of the unit that is instantiated by the home unit. -- -- E.g. if home unit = q[A=p:B,...] we return (Just q). -- -- If the home unit is not an instance of another unit, we return Nothing. homeUnitInstanceOfMaybe :: GenHomeUnit u -> Maybe u homeUnitInstanceOfMaybe (DefiniteHomeUnit _ (Just (u,_))) = Just u homeUnitInstanceOfMaybe _ = Nothing -- | Return the home unit as a normal unit. -- -- We infer from the home unit itself the kind of unit we create: -- 1. If the home unit is definite, we must be compiling so we return a real -- unit. The definite home unit may be the result of a unit instantiation, -- say `p = q[A=r:X]`. In this case we could have returned a virtual unit -- `q[A=r:X]` but it's not what the clients of this function expect, -- especially because `p` is lost when we do this. The unit id of a virtual -- unit is made up internally so `unitId(q[A=r:X])` is not equal to `p`. -- -- 2. If the home unit is indefinite we can only create a virtual unit from -- it. It's ok because we must be only typechecking the home unit so we won't -- produce any code object that rely on the unit id of this virtual unit. homeUnitAsUnit :: HomeUnit -> Unit homeUnitAsUnit (DefiniteHomeUnit u _) = RealUnit (Definite u) homeUnitAsUnit (IndefiniteHomeUnit u is) = mkVirtUnit u is -- | Map over the unit identifier for instantiating units homeUnitMap :: IsUnitId v => (u -> v) -> GenHomeUnit u -> GenHomeUnit v homeUnitMap _ (DefiniteHomeUnit u Nothing) = DefiniteHomeUnit u Nothing homeUnitMap f (DefiniteHomeUnit u (Just (i,is))) = DefiniteHomeUnit u (Just (f i, mapInstantiations f is)) homeUnitMap f (IndefiniteHomeUnit u is) = IndefiniteHomeUnit u (mapInstantiations f is) ---------------------------- -- Predicates ---------------------------- -- | Test if we are type-checking an indefinite unit -- -- (if it is not, we should never use on-the-fly renaming) isHomeUnitIndefinite :: GenHomeUnit u -> Bool isHomeUnitIndefinite (DefiniteHomeUnit {}) = False isHomeUnitIndefinite (IndefiniteHomeUnit {}) = True -- | Test if we are compiling a definite unit -- -- (if it is, we should never use on-the-fly renaming) isHomeUnitDefinite :: GenHomeUnit u -> Bool isHomeUnitDefinite (DefiniteHomeUnit {}) = True isHomeUnitDefinite (IndefiniteHomeUnit {}) = False -- | Test if we are compiling by instantiating a definite unit isHomeUnitInstantiating :: GenHomeUnit u -> Bool isHomeUnitInstantiating u = isHomeUnitDefinite u && not (null (homeUnitInstantiations u)) -- | Test if the unit is the home unit isHomeUnit :: HomeUnit -> Unit -> Bool isHomeUnit hu u = u == homeUnitAsUnit hu -- | Test if the unit-id is the home unit-id isHomeUnitId :: GenHomeUnit u -> UnitId -> Bool isHomeUnitId hu uid = uid == homeUnitId hu -- | Test if the unit-id is not the home unit-id notHomeUnitId :: Maybe (GenHomeUnit u) -> UnitId -> Bool notHomeUnitId Nothing _ = True notHomeUnitId (Just hu) uid = not (isHomeUnitId hu uid) -- | Test if the home unit is an instance of the given unit-id isHomeUnitInstanceOf :: HomeUnit -> UnitId -> Bool isHomeUnitInstanceOf hu u = homeUnitInstanceOf hu == u -- | Test if the module comes from the home unit isHomeModule :: HomeUnit -> Module -> Bool isHomeModule hu m = isHomeUnit hu (moduleUnit m) -- | Test if the module comes from the home unit isHomeInstalledModule :: GenHomeUnit u -> InstalledModule -> Bool isHomeInstalledModule hu m = isHomeUnitId hu (moduleUnit m) -- | Test if a module doesn't come from the given home unit notHomeInstalledModule :: GenHomeUnit u -> InstalledModule -> Bool notHomeInstalledModule hu m = not (isHomeInstalledModule hu m) -- | Test if a module doesn't come from the given home unit notHomeInstalledModuleMaybe :: Maybe (GenHomeUnit u) -> InstalledModule -> Bool notHomeInstalledModuleMaybe mh m = fromMaybe True $ fmap (`notHomeInstalledModule` m) mh -- | Test if a module doesn't come from the given home unit notHomeModule :: HomeUnit -> Module -> Bool notHomeModule hu m = not (isHomeModule hu m) -- | Test if a module doesn't come from the given home unit notHomeModuleMaybe :: Maybe HomeUnit -> Module -> Bool notHomeModuleMaybe mh m = fromMaybe True $ fmap (`notHomeModule` m) mh ---------------------------- -- helpers ---------------------------- -- | Make a module in home unit mkHomeModule :: HomeUnit -> ModuleName -> Module mkHomeModule hu = mkModule (homeUnitAsUnit hu) -- | Make a module in home unit mkHomeInstalledModule :: GenHomeUnit u -> ModuleName -> InstalledModule mkHomeInstalledModule hu = mkModule (homeUnitId hu) -- | Return the module that is used to instantiate the given home module name. -- If the ModuleName doesn't refer to a signature, return the actual home -- module. -- -- E.g., the instantiating module of @A@ in @p[A=q[]:B]@ is @q[]:B@. -- the instantiating module of @A@ in @p@ is @p:A@. homeModuleNameInstantiation :: HomeUnit -> ModuleName -> Module homeModuleNameInstantiation hu mod_name = case lookup mod_name (homeUnitInstantiations hu) of Nothing -> mkHomeModule hu mod_name Just mod -> mod -- | Return the module that is used to instantiate the given home module. -- -- If the given module isn't a module hole, return the actual home module. -- -- E.g., the instantiating module of @p:A@ in @p[A=q[]:B]@ is @q[]:B@. -- the instantiating module of @r:A@ in @p[A=q[]:B]@ is @r:A@. -- the instantiating module of @p:A@ in @p@ is @p:A@. -- the instantiating module of @r:A@ in @p@ is @r:A@. homeModuleInstantiation :: Maybe HomeUnit -> Module -> Module homeModuleInstantiation mhu mod | Just hu <- mhu , isHomeModule hu mod = homeModuleNameInstantiation hu (moduleName mod) | otherwise = mod ghc-lib-parser-9.12.2.20250421/compiler/GHC/Unit/Home/0000755000000000000000000000000007346545000017515 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Unit/Home/ModInfo.hs0000644000000000000000000001556507346545000021420 0ustar0000000000000000-- | Info about modules in the "home" unit module GHC.Unit.Home.ModInfo ( HomeModInfo (..) , HomeModLinkable(..) , homeModInfoObject , homeModInfoByteCode , emptyHomeModInfoLinkable , justBytecode , justObjects , bytecodeAndObjects , HomePackageTable , emptyHomePackageTable , lookupHpt , eltsHpt , concatHpt , filterHpt , allHpt , anyHpt , mapHpt , delFromHpt , addToHpt , addHomeModInfoToHpt , addListToHpt , lookupHptDirectly , lookupHptByModule , listToHpt , listHMIToHpt , pprHPT ) where import GHC.Prelude import GHC.Unit.Module.ModIface import GHC.Unit.Module.ModDetails import GHC.Unit.Module import GHC.Linker.Types ( Linkable(..), linkableIsNativeCodeOnly ) import GHC.Types.Unique import GHC.Types.Unique.DFM import GHC.Utils.Outputable import Data.List (sortOn) import Data.Ord import GHC.Utils.Panic -- | Information about modules in the package being compiled data HomeModInfo = HomeModInfo { hm_iface :: !ModIface -- ^ The basic loaded interface file: every loaded module has one of -- these, even if it is imported from another package , hm_details :: ModDetails -- ^ Extra information that has been created from the 'ModIface' for -- the module, typically during typechecking -- This field is LAZY because a ModDetails is constructed by knot tying. , hm_linkable :: !HomeModLinkable -- ^ The actual artifact we would like to link to access things in -- this module. See Note [Home module build products] -- -- 'hm_linkable' might be empty: -- -- 1. If this is an .hs-boot module -- -- 2. Temporarily during compilation if we pruned away -- the old linkable because it was out of date. -- -- When re-linking a module ('GHC.Driver.Main.HscNoRecomp'), we construct the -- 'HomeModInfo' by building a new 'ModDetails' from the old -- 'ModIface' (only). } homeModInfoByteCode :: HomeModInfo -> Maybe Linkable homeModInfoByteCode = homeMod_bytecode . hm_linkable homeModInfoObject :: HomeModInfo -> Maybe Linkable homeModInfoObject = homeMod_object . hm_linkable emptyHomeModInfoLinkable :: HomeModLinkable emptyHomeModInfoLinkable = HomeModLinkable Nothing Nothing -- See Note [Home module build products] data HomeModLinkable = HomeModLinkable { homeMod_bytecode :: !(Maybe Linkable) , homeMod_object :: !(Maybe Linkable) } instance Outputable HomeModLinkable where ppr (HomeModLinkable l1 l2) = ppr l1 $$ ppr l2 justBytecode :: Linkable -> HomeModLinkable justBytecode lm = assertPpr (not (linkableIsNativeCodeOnly lm)) (ppr lm) $ emptyHomeModInfoLinkable { homeMod_bytecode = Just lm } justObjects :: Linkable -> HomeModLinkable justObjects lm = assertPpr (linkableIsNativeCodeOnly lm) (ppr lm) $ emptyHomeModInfoLinkable { homeMod_object = Just lm } bytecodeAndObjects :: Linkable -> Linkable -> HomeModLinkable bytecodeAndObjects bc o = assertPpr (not (linkableIsNativeCodeOnly bc) && linkableIsNativeCodeOnly o) (ppr bc $$ ppr o) (HomeModLinkable (Just bc) (Just o)) {- Note [Home module build products] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When compiling a home module we can produce some combination of the following build products. 1. A byte code linkable, for use with the byte code interpreter. 2. An object file linkable, for linking a final executable or the byte code interpreter What we have produced is recorded in the `HomeModLinkable` type. In the case that these linkables are produced they are stored in the relevant field so that subsequent modules can retrieve and use them as necessary. * `-fbyte-code` will *only* produce a byte code linkable. This is the default in GHCi. * `-fobject-code` will *only* produce an object file linkable. This is the default in -c and --make mode. * `-fbyte-code-and-object-code` produces both a byte-code and object file linkable. So both fields are populated. Why would you want to produce both an object file and byte code linkable? If you also want to use `-fprefer-byte-code` then you should probably also use this flag to make sure that byte code is generated for your modules. -} -- | Helps us find information about modules in the home package type HomePackageTable = DModuleNameEnv HomeModInfo -- Domain = modules in the home unit that have been fully compiled -- "home" unit id cached (implicit) here for convenience -- | Constructs an empty HomePackageTable emptyHomePackageTable :: HomePackageTable emptyHomePackageTable = emptyUDFM lookupHpt :: HomePackageTable -> ModuleName -> Maybe HomeModInfo lookupHpt = lookupUDFM lookupHptDirectly :: HomePackageTable -> Unique -> Maybe HomeModInfo lookupHptDirectly = lookupUDFM_Directly eltsHpt :: HomePackageTable -> [HomeModInfo] eltsHpt = eltsUDFM -- | Like @concatMap f . 'eltsHpt'@, but filters out all 'HomeModInfo' for which -- @f@ returns the empty list before doing the sort inherent to 'eltsUDFM'. concatHpt :: (HomeModInfo -> [a]) -> HomePackageTable -> [a] concatHpt f = concat . eltsUDFM . mapMaybeUDFM g where g hmi = case f hmi of { [] -> Nothing; as -> Just as } filterHpt :: (HomeModInfo -> Bool) -> HomePackageTable -> HomePackageTable filterHpt = filterUDFM allHpt :: (HomeModInfo -> Bool) -> HomePackageTable -> Bool allHpt = allUDFM anyHpt :: (HomeModInfo -> Bool) -> HomePackageTable -> Bool anyHpt = anyUDFM mapHpt :: (HomeModInfo -> HomeModInfo) -> HomePackageTable -> HomePackageTable mapHpt = mapUDFM delFromHpt :: HomePackageTable -> ModuleName -> HomePackageTable delFromHpt = delFromUDFM addToHpt :: HomePackageTable -> ModuleName -> HomeModInfo -> HomePackageTable addToHpt = addToUDFM addHomeModInfoToHpt :: HomeModInfo -> HomePackageTable -> HomePackageTable addHomeModInfoToHpt hmi hpt = addToHpt hpt (moduleName (mi_module (hm_iface hmi))) hmi addListToHpt :: HomePackageTable -> [(ModuleName, HomeModInfo)] -> HomePackageTable addListToHpt = addListToUDFM listToHpt :: [(ModuleName, HomeModInfo)] -> HomePackageTable listToHpt = listToUDFM listHMIToHpt :: [HomeModInfo] -> HomePackageTable listHMIToHpt hmis = listToHpt [(moduleName (mi_module (hm_iface hmi)), hmi) | hmi <- sorted_hmis] where -- Sort to put Non-boot things last, so they overwrite the boot interfaces -- in the HPT, other than that, the order doesn't matter sorted_hmis = sortOn (Down . mi_boot . hm_iface) hmis lookupHptByModule :: HomePackageTable -> Module -> Maybe HomeModInfo -- The HPT is indexed by ModuleName, not Module, -- we must check for a hit on the right Module lookupHptByModule hpt mod = case lookupHpt hpt (moduleName mod) of Just hm | mi_module (hm_iface hm) == mod -> Just hm _otherwise -> Nothing pprHPT :: HomePackageTable -> SDoc -- A bit arbitrary for now pprHPT hpt = pprUDFM hpt $ \hms -> vcat [ ppr (mi_module (hm_iface hm)) | hm <- hms ] ghc-lib-parser-9.12.2.20250421/compiler/GHC/Unit/Info.hs0000644000000000000000000002223107346545000020054 0ustar0000000000000000{-# LANGUAGE RecordWildCards, FlexibleInstances, MultiParamTypeClasses #-} -- | Info about installed units (compiled libraries) module GHC.Unit.Info ( GenericUnitInfo (..) , GenUnitInfo , UnitInfo , UnitKey (..) , UnitKeyInfo , mkUnitKeyInfo , mapUnitInfo , mkUnitPprInfo , mkUnit , PackageId(..) , PackageName(..) , Version(..) , unitPackageNameString , unitPackageIdString , pprUnitInfo , collectIncludeDirs , collectExtraCcOpts , collectLibraryDirs , collectFrameworks , collectFrameworksDirs , unitHsLibs ) where import GHC.Prelude import GHC.Platform.Ways import GHC.Utils.Misc import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Types.Unique import GHC.Data.FastString import qualified GHC.Data.ShortText as ST import GHC.Unit.Module as Module import GHC.Unit.Ppr import GHC.Unit.Database import GHC.Settings import Data.Version import Data.Bifunctor import Data.List (isPrefixOf, stripPrefix) -- | Information about an installed unit -- -- We parameterize on the unit identifier: -- * UnitKey: identifier used in the database (cf 'UnitKeyInfo') -- * UnitId: identifier used to generate code (cf 'UnitInfo') -- -- These two identifiers are different for wired-in packages. See Note [About -- units] in "GHC.Unit" type GenUnitInfo unit = GenericUnitInfo PackageId PackageName unit ModuleName (GenModule (GenUnit unit)) -- | Information about an installed unit (units are identified by their database -- UnitKey) type UnitKeyInfo = GenUnitInfo UnitKey -- | Information about an installed unit (units are identified by their internal -- UnitId) type UnitInfo = GenUnitInfo UnitId -- | Convert a DbUnitInfo (read from a package database) into `UnitKeyInfo` mkUnitKeyInfo :: DbUnitInfo -> UnitKeyInfo mkUnitKeyInfo = mapGenericUnitInfo mkUnitKey' mkPackageIdentifier' mkPackageName' mkModuleName' mkModule' where mkPackageIdentifier' = PackageId . mkFastStringByteString mkPackageName' = PackageName . mkFastStringByteString mkUnitKey' = UnitKey . mkFastStringByteString mkModuleName' = mkModuleNameFS . mkFastStringByteString mkVirtUnitKey' i = case i of DbInstUnitId cid insts -> mkVirtUnit (mkUnitKey' cid) (fmap (bimap mkModuleName' mkModule') insts) DbUnitId uid -> RealUnit (Definite (mkUnitKey' uid)) mkModule' m = case m of DbModule uid n -> mkModule (mkVirtUnitKey' uid) (mkModuleName' n) DbModuleVar n -> mkHoleModule (mkModuleName' n) -- | Map over the unit parameter mapUnitInfo :: IsUnitId v => (u -> v) -> GenUnitInfo u -> GenUnitInfo v mapUnitInfo f = mapGenericUnitInfo f -- unit identifier id -- package identifier id -- package name id -- module name (fmap (mapGenUnit f)) -- instantiating modules newtype PackageId = PackageId FastString deriving (Eq) newtype PackageName = PackageName { unPackageName :: FastString } deriving (Eq) instance Uniquable PackageId where getUnique (PackageId n) = getUnique n instance Uniquable PackageName where getUnique (PackageName n) = getUnique n instance Outputable PackageId where ppr (PackageId str) = ftext str instance Outputable PackageName where ppr (PackageName str) = ftext str unitPackageIdString :: GenUnitInfo u -> String unitPackageIdString pkg = unpackFS str where PackageId str = unitPackageId pkg unitPackageNameString :: GenUnitInfo u -> String unitPackageNameString pkg = unpackFS str where PackageName str = unitPackageName pkg pprUnitInfo :: UnitInfo -> SDoc pprUnitInfo GenericUnitInfo {..} = vcat [ field "name" (ppr unitPackageName), field "version" (text (showVersion unitPackageVersion)), field "id" (ppr unitId), field "exposed" (ppr unitIsExposed), field "exposed-modules" (ppr unitExposedModules), field "hidden-modules" (fsep (map ppr unitHiddenModules)), field "trusted" (ppr unitIsTrusted), field "import-dirs" (fsep (map (text . ST.unpack) unitImportDirs)), field "library-dirs" (fsep (map (text . ST.unpack) unitLibraryDirs)), field "dynamic-library-dirs" (fsep (map (text . ST.unpack) unitLibraryDynDirs)), field "hs-libraries" (fsep (map (text . ST.unpack) unitLibraries)), field "extra-libraries" (fsep (map (text . ST.unpack) unitExtDepLibsSys)), field "extra-ghci-libraries" (fsep (map (text . ST.unpack) unitExtDepLibsGhc)), field "include-dirs" (fsep (map (text . ST.unpack) unitIncludeDirs)), field "includes" (fsep (map (text . ST.unpack) unitIncludes)), field "depends" (fsep (map ppr unitDepends)), field "cc-options" (fsep (map (text . ST.unpack) unitCcOptions)), field "ld-options" (fsep (map (text . ST.unpack) unitLinkerOptions)), field "framework-dirs" (fsep (map (text . ST.unpack) unitExtDepFrameworkDirs)), field "frameworks" (fsep (map (text . ST.unpack) unitExtDepFrameworks)), field "haddock-interfaces" (fsep (map (text . ST.unpack) unitHaddockInterfaces)), field "haddock-html" (fsep (map (text . ST.unpack) unitHaddockHTMLs)) ] where field name body = text name <> colon <+> nest 4 body -- | Make a `Unit` from a `UnitInfo` -- -- If the unit is definite, make a `RealUnit` from `unitId` field. -- -- If the unit is indefinite, make a `VirtUnit` from `unitInstanceOf` and -- `unitInstantiations` fields. Note that in this case we don't keep track of -- `unitId`. It can be retrieved later with "improvement", i.e. matching on -- `unitInstanceOf/unitInstantiations` fields (see Note [About units] in -- GHC.Unit). mkUnit :: UnitInfo -> Unit mkUnit p | unitIsIndefinite p = mkVirtUnit (unitInstanceOf p) (unitInstantiations p) | otherwise = RealUnit (Definite (unitId p)) -- | Create a UnitPprInfo from a UnitInfo mkUnitPprInfo :: (u -> FastString) -> GenUnitInfo u -> UnitPprInfo mkUnitPprInfo ufs i = UnitPprInfo (ufs (unitId i)) (unitPackageNameString i) (unitPackageVersion i) ((unpackFS . unPackageName) <$> unitComponentName i) -- | Find all the include directories in the given units collectIncludeDirs :: [UnitInfo] -> [FilePath] collectIncludeDirs ps = map ST.unpack $ ordNub (filter (not . ST.null) (concatMap unitIncludeDirs ps)) -- | Find all the C-compiler options in the given units collectExtraCcOpts :: [UnitInfo] -> [String] collectExtraCcOpts ps = map ST.unpack (concatMap unitCcOptions ps) -- | Find all the library directories in the given units for the given ways collectLibraryDirs :: Ways -> [UnitInfo] -> [FilePath] collectLibraryDirs ws = ordNub . filter notNull . concatMap (libraryDirsForWay ws) -- | Find all the frameworks in the given units collectFrameworks :: [UnitInfo] -> [String] collectFrameworks ps = map ST.unpack (concatMap unitExtDepFrameworks ps) -- | Find all the package framework paths in these and the preload packages collectFrameworksDirs :: [UnitInfo] -> [String] collectFrameworksDirs ps = map ST.unpack (ordNub (filter (not . ST.null) (concatMap unitExtDepFrameworkDirs ps))) -- | Either the 'unitLibraryDirs' or 'unitLibraryDynDirs' as appropriate for the way. libraryDirsForWay :: Ways -> UnitInfo -> [String] libraryDirsForWay ws | hasWay ws WayDyn = map ST.unpack . unitLibraryDynDirs | otherwise = map ST.unpack . unitLibraryDirs unitHsLibs :: GhcNameVersion -> Ways -> UnitInfo -> [String] unitHsLibs namever ways0 p = map (mkDynName . addSuffix . ST.unpack) (unitLibraries p) where ways1 = removeWay WayDyn ways0 -- the name of a shared library is libHSfoo-ghc.so -- we leave out the _dyn, because it is superfluous tag = waysTag (fullWays ways1) rts_tag = waysTag ways1 mkDynName x | not (ways0 `hasWay` WayDyn) = x | "HS" `isPrefixOf` x = x ++ dynLibSuffix namever -- For non-Haskell libraries, we use the name "Cfoo". The .a -- file is libCfoo.a, and the .so is libfoo.so. That way the -- linker knows what we mean for the vanilla (-lCfoo) and dyn -- (-lfoo) ways. We therefore need to strip the 'C' off here. | Just x' <- stripPrefix "C" x = x' | otherwise = panic ("Don't understand library name " ++ x) -- Add _thr and other rts suffixes to packages named -- `rts` or `rts-1.0`. Why both? Traditionally the rts -- package is called `rts` only. However the tooling -- usually expects a package name to have a version. -- As such we will gradually move towards the `rts-1.0` -- package name, at which point the `rts` package name -- will eventually be unused. -- -- This change elevates the need to add custom hooks -- and handling specifically for the `rts` package. addSuffix rts@"HSrts" = rts ++ (expandTag rts_tag) addSuffix rts@"HSrts-1.0.2" = rts ++ (expandTag rts_tag) addSuffix other_lib = other_lib ++ (expandTag tag) expandTag t | null t = "" | otherwise = '_':t ghc-lib-parser-9.12.2.20250421/compiler/GHC/Unit/Module.hs0000644000000000000000000001042707346545000020412 0ustar0000000000000000{-# LANGUAGE ExplicitNamespaces #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE MultiParamTypeClasses #-} {-# LANGUAGE RecordWildCards #-} {- (c) The University of Glasgow, 2004-2006 Module ~~~~~~~~~~ Simply the name of a module, represented as a FastString. These are Uniquable, hence we can build Maps with Modules as the keys. -} module GHC.Unit.Module ( module GHC.Unit.Types -- * The ModuleName type , module Language.Haskell.Syntax.Module.Name -- * The ModLocation type , module GHC.Unit.Module.Location -- * ModuleEnv , module GHC.Unit.Module.Env -- * Generalization , getModuleInstantiation , getUnitInstantiations , uninstantiateInstantiatedUnit , uninstantiateInstantiatedModule -- * The Module type , mkHoleModule , isHoleModule , stableModuleCmp , moduleStableString , moduleIsDefinite , HasModule(..) , ContainsModule(..) , installedModuleEq ) where import GHC.Prelude import GHC.Types.Unique.DSet import GHC.Unit.Types import GHC.Unit.Module.Location import GHC.Unit.Module.Env import Language.Haskell.Syntax.Module.Name import Data.Semigroup -- | A 'Module' is definite if it has no free holes. moduleIsDefinite :: Module -> Bool moduleIsDefinite = isEmptyUniqDSet . moduleFreeHoles -- | Get a string representation of a 'Module' that's unique and stable -- across recompilations. -- eg. "$aeson_70dylHtv1FFGeai1IoxcQr$Data.Aeson.Types.Internal" moduleStableString :: Module -> String moduleStableString Module{..} = "$" ++ unitString moduleUnit ++ "$" ++ moduleNameString moduleName -- | This gives a stable ordering, as opposed to the Ord instance which -- gives an ordering based on the 'Unique's of the components, which may -- not be stable from run to run of the compiler. stableModuleCmp :: Module -> Module -> Ordering stableModuleCmp (Module p1 n1) (Module p2 n2) = stableUnitCmp p1 p2 <> stableModuleNameCmp n1 n2 class ContainsModule t where extractModule :: t -> Module class HasModule m where getModule :: m Module -- | Test if a 'Module' corresponds to a given 'InstalledModule', -- modulo instantiation. installedModuleEq :: InstalledModule -> Module -> Bool installedModuleEq imod mod = fst (getModuleInstantiation mod) == imod {- ************************************************************************ * * Hole substitutions * * ************************************************************************ -} -- | Given a possibly on-the-fly instantiated module, split it into -- a 'Module' that we definitely can find on-disk, as well as an -- instantiation if we need to instantiate it on the fly. If the -- instantiation is @Nothing@ no on-the-fly renaming is needed. getModuleInstantiation :: Module -> (InstalledModule, Maybe InstantiatedModule) getModuleInstantiation m = let (uid, mb_iuid) = getUnitInstantiations (moduleUnit m) in (Module uid (moduleName m), fmap (\iuid -> Module iuid (moduleName m)) mb_iuid) -- | Return the unit-id this unit is an instance of and the module instantiations (if any). getUnitInstantiations :: Unit -> (UnitId, Maybe InstantiatedUnit) getUnitInstantiations (VirtUnit iuid) = (instUnitInstanceOf iuid, Just iuid) getUnitInstantiations (RealUnit (Definite uid)) = (uid, Nothing) getUnitInstantiations (HoleUnit {}) = error "Hole unit" -- | Remove instantiations of the given instantiated unit uninstantiateInstantiatedUnit :: InstantiatedUnit -> InstantiatedUnit uninstantiateInstantiatedUnit u = mkInstantiatedUnit (instUnitInstanceOf u) (map (\(m,_) -> (m, mkHoleModule m)) (instUnitInsts u)) -- | Remove instantiations of the given module instantiated unit uninstantiateInstantiatedModule :: InstantiatedModule -> InstantiatedModule uninstantiateInstantiatedModule (Module uid n) = Module (uninstantiateInstantiatedUnit uid) n -- | Test if a Module is not instantiated isHoleModule :: GenModule (GenUnit u) -> Bool isHoleModule (Module HoleUnit _) = True isHoleModule _ = False -- | Create a hole Module mkHoleModule :: ModuleName -> GenModule (GenUnit u) mkHoleModule = Module HoleUnit ghc-lib-parser-9.12.2.20250421/compiler/GHC/Unit/Module/0000755000000000000000000000000007346545000020052 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Unit/Module/Deps.hs0000644000000000000000000005150607346545000021310 0ustar0000000000000000-- | Dependencies and Usage of a module module GHC.Unit.Module.Deps ( Dependencies , mkDependencies , noDependencies , dep_direct_mods , dep_direct_pkgs , dep_sig_mods , dep_trusted_pkgs , dep_orphs , dep_plugin_pkgs , dep_finsts , dep_boot_mods , dep_orphs_update , dep_finsts_update , pprDeps , Usage (..) , ImportAvails (..) ) where import GHC.Prelude import GHC.Data.FastString import GHC.Types.SafeHaskell import GHC.Types.Name import GHC.Unit.Module.Imported import GHC.Unit.Module import GHC.Unit.Home import GHC.Unit.State import GHC.Utils.Fingerprint import GHC.Utils.Binary import GHC.Utils.Outputable import Data.List (sortBy, sort, partition) import Data.Set (Set) import qualified Data.Set as Set import Data.Bifunctor -- | Dependency information about ALL modules and packages below this one -- in the import hierarchy. This is the serialisable version of `ImportAvails`. -- -- Invariant: the dependencies of a module @M@ never includes @M@. -- -- Invariant: none of the lists contain duplicates. -- -- Invariant: lists are ordered canonically (e.g. using stableModuleCmp) -- -- See Note [Transitive Information in Dependencies] data Dependencies = Deps { dep_direct_mods :: Set (UnitId, ModuleNameWithIsBoot) -- ^ All home-package modules which are directly imported by this one. -- This may include modules from other units when using multiple home units , dep_direct_pkgs :: Set UnitId -- ^ All packages directly imported by this module -- I.e. packages to which this module's direct imports belong. -- Does not include other home units when using multiple home units. -- Modules from these units will go in `dep_direct_mods` , dep_plugin_pkgs :: Set UnitId -- ^ All units needed for plugins ------------------------------------ -- Transitive information below here , dep_sig_mods :: ![ModuleName] -- ^ Transitive closure of hsig files in the home package , dep_trusted_pkgs :: Set UnitId -- Packages which we are required to trust -- when the module is imported as a safe import -- (Safe Haskell). See Note [Tracking Trust Transitively] in GHC.Rename.Names , dep_boot_mods :: Set (UnitId, ModuleNameWithIsBoot) -- ^ All modules which have boot files below this one, and whether we -- should use the boot file or not. -- This information is only used to populate the eps_is_boot field. -- See Note [Structure of dep_boot_mods] , dep_orphs :: [Module] -- ^ Transitive closure of orphan modules (whether -- home or external pkg). -- -- (Possible optimization: don't include family -- instance orphans as they are anyway included in -- 'dep_finsts'. But then be careful about code -- which relies on dep_orphs having the complete list!) -- This does NOT include us, unlike 'imp_orphs'. , dep_finsts :: [Module] -- ^ Transitive closure of depended upon modules which -- contain family instances (whether home or external). -- This is used by 'checkFamInstConsistency'. This -- does NOT include us, unlike 'imp_finsts'. See Note -- [The type family instance consistency story]. } deriving( Eq ) -- Equality used only for old/new comparison in GHC.Iface.Recomp.addFingerprints -- See 'GHC.Tc.Utils.ImportAvails' for details on dependencies. -- | Extract information from the rename and typecheck phases to produce -- a dependencies information for the module being compiled. -- -- The fourth argument is a list of plugin modules. mkDependencies :: HomeUnit -> Module -> ImportAvails -> [Module] -> Dependencies mkDependencies home_unit mod imports plugin_mods = let (home_plugins, external_plugins) = partition (isHomeUnit home_unit . moduleUnit) plugin_mods plugin_units = Set.fromList (map (toUnitId . moduleUnit) external_plugins) all_direct_mods = foldr (\mn m -> extendInstalledModuleEnv m mn (GWIB (moduleName mn) NotBoot)) (imp_direct_dep_mods imports) (map (fmap toUnitId) home_plugins) modDepsElts = Set.fromList . installedModuleEnvElts -- It's OK to use nonDetEltsUFM here because sorting by module names -- restores determinism direct_mods = first moduleUnit `Set.map` modDepsElts (delInstalledModuleEnv all_direct_mods (toUnitId <$> mod)) -- M.hi-boot can be in the imp_dep_mods, but we must remove -- it before recording the modules on which this one depends! -- (We want to retain M.hi-boot in imp_dep_mods so that -- loadHiBootInterface can see if M's direct imports depend -- on M.hi-boot, and hence that we should do the hi-boot consistency -- check.) dep_orphs = filter (/= mod) (imp_orphs imports) -- We must also remove self-references from imp_orphs. See -- Note [Module self-dependency] direct_pkgs = imp_dep_direct_pkgs imports -- Set the packages required to be Safe according to Safe Haskell. -- See Note [Tracking Trust Transitively] in GHC.Rename.Names trust_pkgs = imp_trust_pkgs imports -- If there's a non-boot import, then it shadows the boot import -- coming from the dependencies source_mods = first moduleUnit `Set.map` modDepsElts (imp_boot_mods imports) sig_mods = filter (/= (moduleName mod)) $ imp_sig_mods imports in Deps { dep_direct_mods = direct_mods , dep_direct_pkgs = direct_pkgs , dep_plugin_pkgs = plugin_units , dep_sig_mods = sort sig_mods , dep_trusted_pkgs = trust_pkgs , dep_boot_mods = source_mods , dep_orphs = sortBy stableModuleCmp dep_orphs , dep_finsts = sortBy stableModuleCmp (imp_finsts imports) -- sort to get into canonical order -- NB. remember to use lexicographic ordering } -- | Update module dependencies containing orphans (used by Backpack) dep_orphs_update :: Monad m => Dependencies -> ([Module] -> m [Module]) -> m Dependencies dep_orphs_update deps f = do r <- f (dep_orphs deps) pure (deps { dep_orphs = sortBy stableModuleCmp r }) -- | Update module dependencies containing family instances (used by Backpack) dep_finsts_update :: Monad m => Dependencies -> ([Module] -> m [Module]) -> m Dependencies dep_finsts_update deps f = do r <- f (dep_finsts deps) pure (deps { dep_finsts = sortBy stableModuleCmp r }) instance Binary Dependencies where put_ bh deps = do put_ bh (dep_direct_mods deps) put_ bh (dep_direct_pkgs deps) put_ bh (dep_plugin_pkgs deps) put_ bh (dep_trusted_pkgs deps) put_ bh (dep_sig_mods deps) put_ bh (dep_boot_mods deps) put_ bh (dep_orphs deps) put_ bh (dep_finsts deps) get bh = do dms <- get bh dps <- get bh plugin_pkgs <- get bh tps <- get bh hsigms <- get bh sms <- get bh os <- get bh fis <- get bh return (Deps { dep_direct_mods = dms , dep_direct_pkgs = dps , dep_plugin_pkgs = plugin_pkgs , dep_sig_mods = hsigms , dep_boot_mods = sms , dep_trusted_pkgs = tps , dep_orphs = os, dep_finsts = fis }) noDependencies :: Dependencies noDependencies = Deps { dep_direct_mods = Set.empty , dep_direct_pkgs = Set.empty , dep_plugin_pkgs = Set.empty , dep_sig_mods = [] , dep_boot_mods = Set.empty , dep_trusted_pkgs = Set.empty , dep_orphs = [] , dep_finsts = [] } -- | Pretty-print unit dependencies pprDeps :: UnitState -> Dependencies -> SDoc pprDeps unit_state (Deps { dep_direct_mods = dmods , dep_boot_mods = bmods , dep_plugin_pkgs = plgns , dep_orphs = orphs , dep_direct_pkgs = pkgs , dep_trusted_pkgs = tps , dep_finsts = finsts }) = pprWithUnitState unit_state $ vcat [text "direct module dependencies:" <+> ppr_set ppr_mod dmods, text "boot module dependencies:" <+> ppr_set ppr bmods, text "direct package dependencies:" <+> ppr_set ppr pkgs, text "plugin package dependencies:" <+> ppr_set ppr plgns, if null tps then empty else text "trusted package dependencies:" <+> ppr_set ppr tps, text "orphans:" <+> fsep (map ppr orphs), text "family instance modules:" <+> fsep (map ppr finsts) ] where ppr_mod (uid, (GWIB mod IsBoot)) = ppr uid <> colon <> ppr mod <+> text "[boot]" ppr_mod (uid, (GWIB mod NotBoot)) = ppr uid <> colon <> ppr mod ppr_set :: Outputable a => (a -> SDoc) -> Set a -> SDoc ppr_set w = fsep . fmap w . Set.toAscList -- | Records modules for which changes may force recompilation of this module -- See wiki: https://gitlab.haskell.org/ghc/ghc/wikis/commentary/compiler/recompilation-avoidance -- -- This differs from Dependencies. A module X may be in the dep_mods of this -- module (via an import chain) but if we don't use anything from X it won't -- appear in our Usage data Usage -- | Module from another package = UsagePackageModule { usg_mod :: Module, -- ^ External package module depended on usg_mod_hash :: Fingerprint, -- ^ Cached module ABI fingerprint (corresponds to mi_mod_hash) usg_safe :: IsSafeImport -- ^ Was this module imported as a safe import } -- | Module from the current package | UsageHomeModule { usg_mod_name :: ModuleName, -- ^ Name of the module usg_unit_id :: UnitId, -- ^ UnitId of the HomeUnit the module is from usg_mod_hash :: Fingerprint, -- ^ Cached module ABI fingerprint (corresponds to mi_mod_hash). -- This may be out dated after recompilation was avoided, but is -- still used as a fast initial check for change during -- recompilation avoidance. usg_entities :: [(OccName,Fingerprint)], -- ^ Entities we depend on, sorted by occurrence name and fingerprinted. -- NB: usages are for parent names only, e.g. type constructors -- but not the associated data constructors. usg_exports :: Maybe Fingerprint, -- ^ Fingerprint for the export list of this module, -- if we directly imported it (and hence we depend on its export list) usg_safe :: IsSafeImport -- ^ Was this module imported as a safe import } -- | A file upon which the module depends, e.g. a CPP #include, or using TH's -- 'addDependentFile' | UsageFile { usg_file_path :: FastString, -- ^ External file dependency. From a CPP #include or TH -- addDependentFile. Should be absolute. usg_file_hash :: Fingerprint, -- ^ 'Fingerprint' of the file contents. usg_file_label :: Maybe String -- ^ An optional string which is used in recompilation messages if -- file in question has changed. -- Note: We don't consider things like modification timestamps -- here, because there's no reason to recompile if the actual -- contents don't change. This previously lead to odd -- recompilation behaviors; see #8114 } | UsageHomeModuleInterface { usg_mod_name :: ModuleName -- ^ Name of the module , usg_unit_id :: UnitId -- ^ UnitId of the HomeUnit the module is from , usg_iface_hash :: Fingerprint -- ^ The *interface* hash of the module, not the ABI hash. -- This changes when anything about the interface (and hence the -- module) has changed. -- UsageHomeModuleInterface is *only* used for recompilation -- checking when using TemplateHaskell in the interpreter (where -- some modules are loaded as BCOs). } -- | A requirement which was merged into this one. | UsageMergedRequirement { usg_mod :: Module, usg_mod_hash :: Fingerprint } deriving( Eq ) -- The export list field is (Just v) if we depend on the export list: -- i.e. we imported the module directly, whether or not we -- enumerated the things we imported, or just imported -- everything -- We need to recompile if M's exports change, because -- if the import was import M, we might now have a name clash -- in the importing module. -- if the import was import M(x) M might no longer export x -- The only way we don't depend on the export list is if we have -- import M() -- And of course, for modules that aren't imported directly we don't -- depend on their export lists instance Binary Usage where put_ bh usg@UsagePackageModule{} = do putByte bh 0 put_ bh (usg_mod usg) put_ bh (usg_mod_hash usg) put_ bh (usg_safe usg) put_ bh usg@UsageHomeModule{} = do putByte bh 1 put_ bh (usg_mod_name usg) put_ bh (usg_unit_id usg) put_ bh (usg_mod_hash usg) put_ bh (usg_exports usg) put_ bh (usg_entities usg) put_ bh (usg_safe usg) put_ bh usg@UsageFile{} = do putByte bh 2 put_ bh (usg_file_path usg) put_ bh (usg_file_hash usg) put_ bh (usg_file_label usg) put_ bh usg@UsageMergedRequirement{} = do putByte bh 3 put_ bh (usg_mod usg) put_ bh (usg_mod_hash usg) put_ bh usg@UsageHomeModuleInterface{} = do putByte bh 4 put_ bh (usg_mod_name usg) put_ bh (usg_unit_id usg) put_ bh (usg_iface_hash usg) get bh = do h <- getByte bh case h of 0 -> do nm <- get bh mod <- get bh safe <- get bh return UsagePackageModule { usg_mod = nm, usg_mod_hash = mod, usg_safe = safe } 1 -> do nm <- get bh uid <- get bh mod <- get bh exps <- get bh ents <- get bh safe <- get bh return UsageHomeModule { usg_mod_name = nm, usg_mod_hash = mod, usg_unit_id = uid, usg_exports = exps, usg_entities = ents, usg_safe = safe } 2 -> do fp <- get bh hash <- get bh label <- get bh return UsageFile { usg_file_path = fp, usg_file_hash = hash, usg_file_label = label } 3 -> do mod <- get bh hash <- get bh return UsageMergedRequirement { usg_mod = mod, usg_mod_hash = hash } 4 -> do mod <- get bh uid <- get bh hash <- get bh return UsageHomeModuleInterface { usg_mod_name = mod, usg_unit_id = uid, usg_iface_hash = hash } i -> error ("Binary.get(Usage): " ++ show i) {- Note [Transitive Information in Dependencies] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ It is important to be careful what information we put in 'Dependencies' because ultimately it ends up serialised in an interface file. Interface files must always be kept up-to-date with the state of the world, so if `Dependencies` needs to be updated then the module had to be recompiled just to update `Dependencies`. Before #16885, the dependencies used to contain the transitive closure of all home modules. Therefore, if you added an import somewhere low down in the home package it would recompile nearly every module in your project, just to update this information. Now, we are a bit more careful about what we store and explicitly store transitive information only if it is really needed. ~ Direct Information * dep_direct_mods - Directly imported home package modules * dep_direct_pkgs - Directly imported packages * dep_plgins - Directly used plugins ~ Transitive Information Some features of the compiler require transitive information about what is currently being compiled, so that is explicitly stored separately in the form they need. * dep_trusted_pkgs - Only used for the -fpackage-trust feature * dep_boot_mods - Only used to populate eps_is_boot in -c mode * dep_orphs - Modules with orphan instances * dep_finsts - Modules with type family instances Important note: If you add some transitive information to the interface file then you need to make sure recompilation is triggered when it could be out of date. The correct way to do this is to include the transitive information in the export hash of the module. The export hash is computed in `GHC.Iface.Recomp.addFingerprints`. -} {- Note [Structure of dep_boot_deps] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In `-c` mode we always need to know whether to load the normal or boot version of an interface file, and this can't be determined from just looking at the direct imports. Consider modules with dependencies: ``` A -(S)-> B A -> C -> B -(S)-> B ``` Say when compiling module `A` that we need to load the interface for `B`, do we load `B.hi` or `B.hi-boot`? Well, `A` does directly {-# SOURCE #-} import B, so you might think that we would load the `B.hi-boot` file, however this is wrong because `C` imports `B` normally. Therefore in the interface file for `C` we still need to record that there is a hs-boot file for `B` below it but that we now want `B.hi` rather than `B.hi-boot`. When `C` is imported, the fact that it needs `B.hi` clobbers the `{- SOURCE -}` import for `B`. Therefore in mod_boot_deps we store the names of any modules which have hs-boot files, and whether we want to import the .hi or .hi-boot version of the interface file. If you get this wrong, then GHC fails to compile, so there is a test but you might not make it that far if you get this wrong! Question: does this happen even across packages? No: if I need to load the interface for module X from package P I always look for p:X.hi. -} -- | 'ImportAvails' summarises what was imported from where, irrespective of -- whether the imported things are actually used or not. It is used: -- -- * when processing the export list, -- -- * when constructing usage info for the interface file, -- -- * to identify the list of directly imported modules for initialisation -- purposes and for optimised overlap checking of family instances, -- -- * when figuring out what things are really unused -- data ImportAvails = ImportAvails { imp_mods :: ImportedMods, -- = ModuleEnv [ImportedModsVal], -- ^ Domain is all directly-imported modules -- -- See the documentation on ImportedModsVal in -- "GHC.Unit.Module.Imported" for the meaning of the fields. -- -- We need a full ModuleEnv rather than a ModuleNameEnv here, -- because we might be importing modules of the same name from -- different packages. (currently not the case, but might be in the -- future). imp_direct_dep_mods :: InstalledModuleEnv ModuleNameWithIsBoot, -- ^ Home-package modules directly imported by the module being compiled. imp_dep_direct_pkgs :: Set UnitId, -- ^ Packages directly needed by the module being compiled imp_trust_own_pkg :: Bool, -- ^ Do we require that our own package is trusted? -- This is to handle efficiently the case where a Safe module imports -- a Trustworthy module that resides in the same package as it. -- See Note [Trust Own Package] in "GHC.Rename.Names" -- Transitive information below here imp_trust_pkgs :: Set UnitId, -- ^ This records the -- packages the current module needs to trust for Safe Haskell -- compilation to succeed. A package is required to be trusted if -- we are dependent on a trustworthy module in that package. -- See Note [Tracking Trust Transitively] in "GHC.Rename.Names" imp_boot_mods :: InstalledModuleEnv ModuleNameWithIsBoot, -- ^ Domain is all modules which have hs-boot files, and whether -- we should import the boot version of interface file. Only used -- in one-shot mode to populate eps_is_boot. imp_sig_mods :: [ModuleName], -- ^ Signature modules below this one imp_orphs :: [Module], -- ^ Orphan modules below us in the import tree (and maybe including -- us for imported modules) imp_finsts :: [Module] -- ^ Family instance modules below us in the import tree (and maybe -- including us for imported modules) } ghc-lib-parser-9.12.2.20250421/compiler/GHC/Unit/Module/Env.hs0000644000000000000000000003134607346545000021145 0ustar0000000000000000-- | Module environment module GHC.Unit.Module.Env ( -- * Module mappings ModuleEnv , elemModuleEnv, extendModuleEnv, extendModuleEnvList , extendModuleEnvList_C, plusModuleEnv_C , delModuleEnvList, delModuleEnv, plusModuleEnv, lookupModuleEnv , lookupWithDefaultModuleEnv, mapModuleEnv, mkModuleEnv, emptyModuleEnv , alterModuleEnv , partitionModuleEnv , moduleEnvKeys, moduleEnvElts, moduleEnvToList , unitModuleEnv, isEmptyModuleEnv , extendModuleEnvWith, filterModuleEnv, mapMaybeModuleEnv -- * ModuleName mappings , ModuleNameEnv, DModuleNameEnv -- * Sets of Modules , ModuleSet , emptyModuleSet, mkModuleSet, moduleSetElts , extendModuleSet, extendModuleSetList, delModuleSet , elemModuleSet, intersectModuleSet, minusModuleSet, unionModuleSet , unitModuleSet, isEmptyModuleSet , unionManyModuleSets -- * InstalledModuleEnv , InstalledModuleEnv , emptyInstalledModuleEnv , lookupInstalledModuleEnv , extendInstalledModuleEnv , filterInstalledModuleEnv , delInstalledModuleEnv , mergeInstalledModuleEnv , plusInstalledModuleEnv , installedModuleEnvElts -- * InstalledModuleWithIsBootEnv , InstalledModuleWithIsBootEnv , emptyInstalledModuleWithIsBootEnv , lookupInstalledModuleWithIsBootEnv , extendInstalledModuleWithIsBootEnv , filterInstalledModuleWithIsBootEnv , delInstalledModuleWithIsBootEnv , mergeInstalledModuleWithIsBootEnv , plusInstalledModuleWithIsBootEnv , installedModuleWithIsBootEnvElts ) where import GHC.Prelude import GHC.Types.Unique import GHC.Types.Unique.FM import GHC.Types.Unique.DFM import GHC.Unit.Types import Data.List (sortBy, sort) import Data.Ord import Data.Coerce import Data.Map (Map) import Data.Set (Set) import qualified Data.Map as Map import qualified Data.Set as Set import qualified Data.Semigroup as S import qualified GHC.Data.FiniteMap as Map import GHC.Utils.Outputable import Language.Haskell.Syntax.Module.Name -- | A map keyed off of 'Module's newtype ModuleEnv elt = ModuleEnv (Map NDModule elt) instance Outputable a => Outputable (ModuleEnv a) where ppr (ModuleEnv m) = ppr m {- Note [ModuleEnv performance and determinism] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To prevent accidental reintroduction of nondeterminism the Ord instance for Module was changed to not depend on Unique ordering and to use the lexicographic order. This is potentially expensive, but when measured there was no difference in performance. To be on the safe side and not pessimize ModuleEnv uses nondeterministic ordering on Module and normalizes by doing the lexicographic sort when turning the env to a list. See Note [Unique Determinism] for more information about the source of nondeterminism and Note [Deterministic UniqFM] for explanation of why it matters for maps. -} newtype NDModule = NDModule { unNDModule :: Module } deriving Eq -- A wrapper for Module with faster nondeterministic Ord. -- Don't export, See [ModuleEnv performance and determinism] -- instance Outputable NDModule where ppr (NDModule a) = ppr a instance Ord NDModule where compare (NDModule (Module p1 n1)) (NDModule (Module p2 n2)) = (getUnique p1 `nonDetCmpUnique` getUnique p2) S.<> (getUnique n1 `nonDetCmpUnique` getUnique n2) filterModuleEnv :: (Module -> a -> Bool) -> ModuleEnv a -> ModuleEnv a filterModuleEnv f (ModuleEnv e) = ModuleEnv (Map.filterWithKey (f . unNDModule) e) mapMaybeModuleEnv :: (Module -> a -> Maybe b) -> ModuleEnv a -> ModuleEnv b mapMaybeModuleEnv f (ModuleEnv e) = ModuleEnv (Map.mapMaybeWithKey (f . unNDModule) e) elemModuleEnv :: Module -> ModuleEnv a -> Bool elemModuleEnv m (ModuleEnv e) = Map.member (NDModule m) e extendModuleEnv :: ModuleEnv a -> Module -> a -> ModuleEnv a extendModuleEnv (ModuleEnv e) m x = ModuleEnv (Map.insert (NDModule m) x e) extendModuleEnvWith :: (a -> a -> a) -> ModuleEnv a -> Module -> a -> ModuleEnv a extendModuleEnvWith f (ModuleEnv e) m x = ModuleEnv (Map.insertWith f (NDModule m) x e) extendModuleEnvList :: ModuleEnv a -> [(Module, a)] -> ModuleEnv a extendModuleEnvList (ModuleEnv e) xs = ModuleEnv (Map.insertList [(NDModule k, v) | (k,v) <- xs] e) extendModuleEnvList_C :: (a -> a -> a) -> ModuleEnv a -> [(Module, a)] -> ModuleEnv a extendModuleEnvList_C f (ModuleEnv e) xs = ModuleEnv (Map.insertListWith f [(NDModule k, v) | (k,v) <- xs] e) plusModuleEnv_C :: (a -> a -> a) -> ModuleEnv a -> ModuleEnv a -> ModuleEnv a plusModuleEnv_C f (ModuleEnv e1) (ModuleEnv e2) = ModuleEnv (Map.unionWith f e1 e2) delModuleEnvList :: ModuleEnv a -> [Module] -> ModuleEnv a delModuleEnvList (ModuleEnv e) ms = ModuleEnv (Map.deleteList (map NDModule ms) e) delModuleEnv :: ModuleEnv a -> Module -> ModuleEnv a delModuleEnv (ModuleEnv e) m = ModuleEnv (Map.delete (NDModule m) e) plusModuleEnv :: ModuleEnv a -> ModuleEnv a -> ModuleEnv a plusModuleEnv (ModuleEnv e1) (ModuleEnv e2) = ModuleEnv (Map.union e1 e2) lookupModuleEnv :: ModuleEnv a -> Module -> Maybe a lookupModuleEnv (ModuleEnv e) m = Map.lookup (NDModule m) e lookupWithDefaultModuleEnv :: ModuleEnv a -> a -> Module -> a lookupWithDefaultModuleEnv (ModuleEnv e) x m = Map.findWithDefault x (NDModule m) e mapModuleEnv :: (a -> b) -> ModuleEnv a -> ModuleEnv b mapModuleEnv f (ModuleEnv e) = ModuleEnv (Map.mapWithKey (\_ v -> f v) e) partitionModuleEnv :: (a -> Bool) -> ModuleEnv a -> (ModuleEnv a, ModuleEnv a) partitionModuleEnv f (ModuleEnv e) = (ModuleEnv a, ModuleEnv b) where (a,b) = Map.partition f e alterModuleEnv :: (Maybe a -> Maybe a) -> Module -> ModuleEnv a -> ModuleEnv a alterModuleEnv f m (ModuleEnv e) = ModuleEnv (Map.alter f (NDModule m) e) mkModuleEnv :: [(Module, a)] -> ModuleEnv a mkModuleEnv xs = ModuleEnv (Map.fromList [(NDModule k, v) | (k,v) <- xs]) emptyModuleEnv :: ModuleEnv a emptyModuleEnv = ModuleEnv Map.empty moduleEnvKeys :: ModuleEnv a -> [Module] moduleEnvKeys (ModuleEnv e) = sort $ map unNDModule $ Map.keys e -- See Note [ModuleEnv performance and determinism] moduleEnvElts :: ModuleEnv a -> [a] moduleEnvElts e = map snd $ moduleEnvToList e -- See Note [ModuleEnv performance and determinism] moduleEnvToList :: ModuleEnv a -> [(Module, a)] moduleEnvToList (ModuleEnv e) = sortBy (comparing fst) [(m, v) | (NDModule m, v) <- Map.toList e] -- See Note [ModuleEnv performance and determinism] unitModuleEnv :: Module -> a -> ModuleEnv a unitModuleEnv m x = ModuleEnv (Map.singleton (NDModule m) x) isEmptyModuleEnv :: ModuleEnv a -> Bool isEmptyModuleEnv (ModuleEnv e) = Map.null e -- | A set of 'Module's type ModuleSet = Set NDModule mkModuleSet :: [Module] -> ModuleSet mkModuleSet = Set.fromList . coerce extendModuleSet :: ModuleSet -> Module -> ModuleSet extendModuleSet s m = Set.insert (NDModule m) s extendModuleSetList :: ModuleSet -> [Module] -> ModuleSet extendModuleSetList s ms = foldl' (coerce . flip Set.insert) s ms emptyModuleSet :: ModuleSet emptyModuleSet = Set.empty isEmptyModuleSet :: ModuleSet -> Bool isEmptyModuleSet = Set.null moduleSetElts :: ModuleSet -> [Module] moduleSetElts = sort . coerce . Set.toList elemModuleSet :: Module -> ModuleSet -> Bool elemModuleSet = Set.member . coerce intersectModuleSet :: ModuleSet -> ModuleSet -> ModuleSet intersectModuleSet = coerce Set.intersection minusModuleSet :: ModuleSet -> ModuleSet -> ModuleSet minusModuleSet = coerce Set.difference delModuleSet :: ModuleSet -> Module -> ModuleSet delModuleSet = coerce (flip Set.delete) unionModuleSet :: ModuleSet -> ModuleSet -> ModuleSet unionModuleSet = coerce Set.union unionManyModuleSets :: [ModuleSet] -> ModuleSet unionManyModuleSets = coerce (Set.unions :: [Set NDModule] -> Set NDModule) unitModuleSet :: Module -> ModuleSet unitModuleSet = coerce Set.singleton {- A ModuleName has a Unique, so we can build mappings of these using UniqFM. -} -- | A map keyed off of 'ModuleName's (actually, their 'Unique's) type ModuleNameEnv elt = UniqFM ModuleName elt -- | A map keyed off of 'ModuleName's (actually, their 'Unique's) -- Has deterministic folds and can be deterministically converted to a list type DModuleNameEnv elt = UniqDFM ModuleName elt -------------------------------------------------------------------- -- InstalledModuleEnv -------------------------------------------------------------------- -- | A map keyed off of 'InstalledModule' newtype InstalledModuleEnv elt = InstalledModuleEnv (Map InstalledModule elt) instance Outputable elt => Outputable (InstalledModuleEnv elt) where ppr (InstalledModuleEnv env) = ppr env emptyInstalledModuleEnv :: InstalledModuleEnv a emptyInstalledModuleEnv = InstalledModuleEnv Map.empty lookupInstalledModuleEnv :: InstalledModuleEnv a -> InstalledModule -> Maybe a lookupInstalledModuleEnv (InstalledModuleEnv e) m = Map.lookup m e extendInstalledModuleEnv :: InstalledModuleEnv a -> InstalledModule -> a -> InstalledModuleEnv a extendInstalledModuleEnv (InstalledModuleEnv e) m x = InstalledModuleEnv (Map.insert m x e) filterInstalledModuleEnv :: (InstalledModule -> a -> Bool) -> InstalledModuleEnv a -> InstalledModuleEnv a filterInstalledModuleEnv f (InstalledModuleEnv e) = InstalledModuleEnv (Map.filterWithKey f e) delInstalledModuleEnv :: InstalledModuleEnv a -> InstalledModule -> InstalledModuleEnv a delInstalledModuleEnv (InstalledModuleEnv e) m = InstalledModuleEnv (Map.delete m e) installedModuleEnvElts :: InstalledModuleEnv a -> [(InstalledModule, a)] installedModuleEnvElts (InstalledModuleEnv e) = Map.assocs e mergeInstalledModuleEnv :: (elta -> eltb -> Maybe eltc) -> (InstalledModuleEnv elta -> InstalledModuleEnv eltc) -- map X -> (InstalledModuleEnv eltb -> InstalledModuleEnv eltc) -- map Y -> InstalledModuleEnv elta -> InstalledModuleEnv eltb -> InstalledModuleEnv eltc mergeInstalledModuleEnv f g h (InstalledModuleEnv xm) (InstalledModuleEnv ym) = InstalledModuleEnv $ Map.mergeWithKey (\_ x y -> (x `f` y)) (coerce g) (coerce h) xm ym plusInstalledModuleEnv :: (elt -> elt -> elt) -> InstalledModuleEnv elt -> InstalledModuleEnv elt -> InstalledModuleEnv elt plusInstalledModuleEnv f (InstalledModuleEnv xm) (InstalledModuleEnv ym) = InstalledModuleEnv $ Map.unionWith f xm ym -------------------------------------------------------------------- -- InstalledModuleWithIsBootEnv -------------------------------------------------------------------- -- | A map keyed off of 'InstalledModuleWithIsBoot' newtype InstalledModuleWithIsBootEnv elt = InstalledModuleWithIsBootEnv (Map InstalledModuleWithIsBoot elt) instance Outputable elt => Outputable (InstalledModuleWithIsBootEnv elt) where ppr (InstalledModuleWithIsBootEnv env) = ppr env emptyInstalledModuleWithIsBootEnv :: InstalledModuleWithIsBootEnv a emptyInstalledModuleWithIsBootEnv = InstalledModuleWithIsBootEnv Map.empty lookupInstalledModuleWithIsBootEnv :: InstalledModuleWithIsBootEnv a -> InstalledModuleWithIsBoot -> Maybe a lookupInstalledModuleWithIsBootEnv (InstalledModuleWithIsBootEnv e) m = Map.lookup m e extendInstalledModuleWithIsBootEnv :: InstalledModuleWithIsBootEnv a -> InstalledModuleWithIsBoot -> a -> InstalledModuleWithIsBootEnv a extendInstalledModuleWithIsBootEnv (InstalledModuleWithIsBootEnv e) m x = InstalledModuleWithIsBootEnv (Map.insert m x e) filterInstalledModuleWithIsBootEnv :: (InstalledModuleWithIsBoot -> a -> Bool) -> InstalledModuleWithIsBootEnv a -> InstalledModuleWithIsBootEnv a filterInstalledModuleWithIsBootEnv f (InstalledModuleWithIsBootEnv e) = InstalledModuleWithIsBootEnv (Map.filterWithKey f e) delInstalledModuleWithIsBootEnv :: InstalledModuleWithIsBootEnv a -> InstalledModuleWithIsBoot -> InstalledModuleWithIsBootEnv a delInstalledModuleWithIsBootEnv (InstalledModuleWithIsBootEnv e) m = InstalledModuleWithIsBootEnv (Map.delete m e) installedModuleWithIsBootEnvElts :: InstalledModuleWithIsBootEnv a -> [(InstalledModuleWithIsBoot, a)] installedModuleWithIsBootEnvElts (InstalledModuleWithIsBootEnv e) = Map.assocs e mergeInstalledModuleWithIsBootEnv :: (elta -> eltb -> Maybe eltc) -> (InstalledModuleWithIsBootEnv elta -> InstalledModuleWithIsBootEnv eltc) -- map X -> (InstalledModuleWithIsBootEnv eltb -> InstalledModuleWithIsBootEnv eltc) -- map Y -> InstalledModuleWithIsBootEnv elta -> InstalledModuleWithIsBootEnv eltb -> InstalledModuleWithIsBootEnv eltc mergeInstalledModuleWithIsBootEnv f g h (InstalledModuleWithIsBootEnv xm) (InstalledModuleWithIsBootEnv ym) = InstalledModuleWithIsBootEnv $ Map.mergeWithKey (\_ x y -> (x `f` y)) (coerce g) (coerce h) xm ym plusInstalledModuleWithIsBootEnv :: (elt -> elt -> elt) -> InstalledModuleWithIsBootEnv elt -> InstalledModuleWithIsBootEnv elt -> InstalledModuleWithIsBootEnv elt plusInstalledModuleWithIsBootEnv f (InstalledModuleWithIsBootEnv xm) (InstalledModuleWithIsBootEnv ym) = InstalledModuleWithIsBootEnv $ Map.unionWith f xm ym ghc-lib-parser-9.12.2.20250421/compiler/GHC/Unit/Module/Graph.hs0000644000000000000000000003606107346545000021455 0ustar0000000000000000{-# LANGUAGE LambdaCase #-} {-# LANGUAGE RecordWildCards #-} {-# LANGUAGE DeriveTraversable #-} module GHC.Unit.Module.Graph ( ModuleGraph , ModuleGraphNode(..) , nodeDependencies , emptyMG , mkModuleGraph , extendMG , extendMGInst , extendMG' , unionMG , isTemplateHaskellOrQQNonBoot , filterToposortToModules , mapMG , mgModSummaries , mgModSummaries' , mgLookupModule , mgTransDeps , showModMsg , moduleGraphNodeModule , moduleGraphNodeModSum , moduleGraphModulesBelow , moduleGraphNodes , SummaryNode , summaryNodeSummary , NodeKey(..) , nodeKeyUnitId , nodeKeyModName , ModNodeKey , mkNodeKey , msKey , moduleGraphNodeUnitId , ModNodeKeyWithUid(..) ) where import GHC.Prelude import GHC.Platform import qualified GHC.LanguageExtensions as LangExt import GHC.Data.Maybe import GHC.Data.Graph.Directed import GHC.Driver.Backend import GHC.Driver.DynFlags import GHC.Types.SourceFile ( hscSourceString ) import GHC.Unit.Module.ModSummary import GHC.Unit.Types import GHC.Utils.Outputable import GHC.Utils.Misc ( partitionWith ) import System.FilePath import qualified Data.Map as Map import GHC.Types.Unique.DSet import qualified Data.Set as Set import Data.Set (Set) import GHC.Unit.Module import GHC.Linker.Static.Utils import Data.Bifunctor import Data.Function import Data.List (sort) import GHC.Data.List.SetOps -- | A '@ModuleGraphNode@' is a node in the '@ModuleGraph@'. -- Edges between nodes mark dependencies arising from module imports -- and dependencies arising from backpack instantiations. data ModuleGraphNode -- | Instantiation nodes track the instantiation of other units -- (backpack dependencies) with the holes (signatures) of the current package. = InstantiationNode UnitId InstantiatedUnit -- | There is a module summary node for each module, signature, and boot module being built. | ModuleNode [NodeKey] ModSummary -- | Link nodes are whether are are creating a linked product (ie executable/shared object etc) for a unit. | LinkNode [NodeKey] UnitId moduleGraphNodeModule :: ModuleGraphNode -> Maybe ModuleName moduleGraphNodeModule mgn = ms_mod_name <$> (moduleGraphNodeModSum mgn) moduleGraphNodeModSum :: ModuleGraphNode -> Maybe ModSummary moduleGraphNodeModSum (InstantiationNode {}) = Nothing moduleGraphNodeModSum (LinkNode {}) = Nothing moduleGraphNodeModSum (ModuleNode _ ms) = Just ms moduleGraphNodeUnitId :: ModuleGraphNode -> UnitId moduleGraphNodeUnitId mgn = case mgn of InstantiationNode uid _iud -> uid ModuleNode _ ms -> toUnitId (moduleUnit (ms_mod ms)) LinkNode _ uid -> uid instance Outputable ModuleGraphNode where ppr = \case InstantiationNode _ iuid -> ppr iuid ModuleNode nks ms -> ppr (msKey ms) <+> ppr nks LinkNode uid _ -> text "LN:" <+> ppr uid instance Eq ModuleGraphNode where (==) = (==) `on` mkNodeKey instance Ord ModuleGraphNode where compare = compare `on` mkNodeKey data NodeKey = NodeKey_Unit {-# UNPACK #-} !InstantiatedUnit | NodeKey_Module {-# UNPACK #-} !ModNodeKeyWithUid | NodeKey_Link !UnitId deriving (Eq, Ord) instance Outputable NodeKey where ppr nk = pprNodeKey nk pprNodeKey :: NodeKey -> SDoc pprNodeKey (NodeKey_Unit iu) = ppr iu pprNodeKey (NodeKey_Module mk) = ppr mk pprNodeKey (NodeKey_Link uid) = ppr uid nodeKeyUnitId :: NodeKey -> UnitId nodeKeyUnitId (NodeKey_Unit iu) = instUnitInstanceOf iu nodeKeyUnitId (NodeKey_Module mk) = mnkUnitId mk nodeKeyUnitId (NodeKey_Link uid) = uid nodeKeyModName :: NodeKey -> Maybe ModuleName nodeKeyModName (NodeKey_Module mk) = Just (gwib_mod $ mnkModuleName mk) nodeKeyModName _ = Nothing data ModNodeKeyWithUid = ModNodeKeyWithUid { mnkModuleName :: !ModuleNameWithIsBoot , mnkUnitId :: !UnitId } deriving (Eq, Ord) instance Outputable ModNodeKeyWithUid where ppr (ModNodeKeyWithUid mnwib uid) = ppr uid <> colon <> ppr mnwib -- | A '@ModuleGraph@' contains all the nodes from the home package (only). See -- '@ModuleGraphNode@' for information about the nodes. -- -- Modules need to be compiled. hs-boots need to be typechecked before -- the associated "real" module so modules with {-# SOURCE #-} imports can be -- built. Instantiations also need to be typechecked to ensure that the module -- fits the signature. Substantiation typechecking is roughly comparable to the -- check that the module and its hs-boot agree. -- -- The graph is not necessarily stored in topologically-sorted order. Use -- 'GHC.topSortModuleGraph' and 'GHC.Data.Graph.Directed.flattenSCC' to achieve this. data ModuleGraph = ModuleGraph { mg_mss :: [ModuleGraphNode] , mg_trans_deps :: Map.Map NodeKey (Set.Set NodeKey) -- A cached transitive dependency calculation so that a lot of work is not -- repeated whenever the transitive dependencies need to be calculated (for example, hptInstances) } -- | Map a function 'f' over all the 'ModSummaries'. -- To preserve invariants 'f' can't change the isBoot status. mapMG :: (ModSummary -> ModSummary) -> ModuleGraph -> ModuleGraph mapMG f mg@ModuleGraph{..} = mg { mg_mss = flip fmap mg_mss $ \case InstantiationNode uid iuid -> InstantiationNode uid iuid LinkNode uid nks -> LinkNode uid nks ModuleNode deps ms -> ModuleNode deps (f ms) } unionMG :: ModuleGraph -> ModuleGraph -> ModuleGraph unionMG a b = let new_mss = nubOrdBy compare $ mg_mss a `mappend` mg_mss b in ModuleGraph { mg_mss = new_mss , mg_trans_deps = mkTransDeps new_mss } mgTransDeps :: ModuleGraph -> Map.Map NodeKey (Set.Set NodeKey) mgTransDeps = mg_trans_deps mgModSummaries :: ModuleGraph -> [ModSummary] mgModSummaries mg = [ m | ModuleNode _ m <- mgModSummaries' mg ] mgModSummaries' :: ModuleGraph -> [ModuleGraphNode] mgModSummaries' = mg_mss -- | Look up a ModSummary in the ModuleGraph -- Looks up the non-boot ModSummary -- Linear in the size of the module graph mgLookupModule :: ModuleGraph -> Module -> Maybe ModSummary mgLookupModule ModuleGraph{..} m = listToMaybe $ mapMaybe go mg_mss where go (ModuleNode _ ms) | NotBoot <- isBootSummary ms , ms_mod ms == m = Just ms go _ = Nothing emptyMG :: ModuleGraph emptyMG = ModuleGraph [] Map.empty isTemplateHaskellOrQQNonBoot :: ModSummary -> Bool isTemplateHaskellOrQQNonBoot ms = (xopt LangExt.TemplateHaskell (ms_hspp_opts ms) || xopt LangExt.QuasiQuotes (ms_hspp_opts ms)) && (isBootSummary ms == NotBoot) -- | Add an ExtendedModSummary to ModuleGraph. Assumes that the new ModSummary is -- not an element of the ModuleGraph. extendMG :: ModuleGraph -> [NodeKey] -> ModSummary -> ModuleGraph extendMG ModuleGraph{..} deps ms = ModuleGraph { mg_mss = ModuleNode deps ms : mg_mss , mg_trans_deps = mkTransDeps (ModuleNode deps ms : mg_mss) } mkTransDeps :: [ModuleGraphNode] -> Map.Map NodeKey (Set.Set NodeKey) mkTransDeps mss = let (gg, _lookup_node) = moduleGraphNodes False mss in allReachable gg (mkNodeKey . node_payload) extendMGInst :: ModuleGraph -> UnitId -> InstantiatedUnit -> ModuleGraph extendMGInst mg uid depUnitId = mg { mg_mss = InstantiationNode uid depUnitId : mg_mss mg } extendMGLink :: ModuleGraph -> UnitId -> [NodeKey] -> ModuleGraph extendMGLink mg uid nks = mg { mg_mss = LinkNode nks uid : mg_mss mg } extendMG' :: ModuleGraph -> ModuleGraphNode -> ModuleGraph extendMG' mg = \case InstantiationNode uid depUnitId -> extendMGInst mg uid depUnitId ModuleNode deps ms -> extendMG mg deps ms LinkNode deps uid -> extendMGLink mg uid deps mkModuleGraph :: [ModuleGraphNode] -> ModuleGraph mkModuleGraph = foldr (flip extendMG') emptyMG -- | This function filters out all the instantiation nodes from each SCC of a -- topological sort. Use this with care, as the resulting "strongly connected components" -- may not really be strongly connected in a direct way, as instantiations have been -- removed. It would probably be best to eliminate uses of this function where possible. filterToposortToModules :: [SCC ModuleGraphNode] -> [SCC ModSummary] filterToposortToModules = mapMaybe $ mapMaybeSCC $ \case InstantiationNode _ _ -> Nothing LinkNode{} -> Nothing ModuleNode _deps node -> Just node where -- This higher order function is somewhat bogus, -- as the definition of "strongly connected component" -- is not necessarily respected. mapMaybeSCC :: (a -> Maybe b) -> SCC a -> Maybe (SCC b) mapMaybeSCC f = \case AcyclicSCC a -> AcyclicSCC <$> f a CyclicSCC as -> case mapMaybe f as of [] -> Nothing [a] -> Just $ AcyclicSCC a as -> Just $ CyclicSCC as showModMsg :: DynFlags -> Bool -> ModuleGraphNode -> SDoc showModMsg dflags _ (LinkNode {}) = let staticLink = case ghcLink dflags of LinkStaticLib -> True _ -> False platform = targetPlatform dflags arch_os = platformArchOS platform exe_file = exeFileName arch_os staticLink (outputFile_ dflags) in text exe_file showModMsg _ _ (InstantiationNode _uid indef_unit) = ppr $ instUnitInstanceOf indef_unit showModMsg dflags recomp (ModuleNode _ mod_summary) = if gopt Opt_HideSourcePaths dflags then text mod_str else hsep $ [ text (mod_str ++ replicate (max 0 (16 - length mod_str)) ' ') , char '(' , text (op $ msHsFilePath mod_summary) <> char ',' , message, char ')' ] where op = normalise mod_str = moduleNameString (moduleName (ms_mod mod_summary)) ++ hscSourceString (ms_hsc_src mod_summary) dyn_file = op $ msDynObjFilePath mod_summary obj_file = op $ msObjFilePath mod_summary files = [ obj_file ] ++ [ dyn_file | gopt Opt_BuildDynamicToo dflags ] ++ [ "interpreted" | gopt Opt_ByteCodeAndObjectCode dflags ] message = case backendSpecialModuleSource (backend dflags) recomp of Just special -> text special Nothing -> foldr1 (\ofile rest -> ofile <> comma <+> rest) (map text files) type SummaryNode = Node Int ModuleGraphNode summaryNodeKey :: SummaryNode -> Int summaryNodeKey = node_key summaryNodeSummary :: SummaryNode -> ModuleGraphNode summaryNodeSummary = node_payload -- | Collect the immediate dependencies of a ModuleGraphNode, -- optionally avoiding hs-boot dependencies. -- If the drop_hs_boot_nodes flag is False, and if this is a .hs and there is -- an equivalent .hs-boot, add a link from the former to the latter. This -- has the effect of detecting bogus cases where the .hs-boot depends on the -- .hs, by introducing a cycle. Additionally, it ensures that we will always -- process the .hs-boot before the .hs, and so the HomePackageTable will always -- have the most up to date information. nodeDependencies :: Bool -> ModuleGraphNode -> [NodeKey] nodeDependencies drop_hs_boot_nodes = \case LinkNode deps _uid -> deps InstantiationNode uid iuid -> NodeKey_Module . (\mod -> ModNodeKeyWithUid (GWIB mod NotBoot) uid) <$> uniqDSetToList (instUnitHoles iuid) ModuleNode deps _ms -> map drop_hs_boot deps where -- Drop hs-boot nodes by using HsSrcFile as the key hs_boot_key | drop_hs_boot_nodes = NotBoot -- is regular mod or signature | otherwise = IsBoot drop_hs_boot (NodeKey_Module (ModNodeKeyWithUid (GWIB mn IsBoot) uid)) = (NodeKey_Module (ModNodeKeyWithUid (GWIB mn hs_boot_key) uid)) drop_hs_boot x = x -- | Turn a list of graph nodes into an efficient queriable graph. -- The first boolean parameter indicates whether nodes corresponding to hs-boot files -- should be collapsed into their relevant hs nodes. moduleGraphNodes :: Bool -> [ModuleGraphNode] -> (Graph SummaryNode, NodeKey -> Maybe SummaryNode) moduleGraphNodes drop_hs_boot_nodes summaries = (graphFromEdgedVerticesUniq nodes, lookup_node) where -- Map from module to extra boot summary dependencies which need to be merged in (boot_summaries, nodes) = bimap Map.fromList id $ partitionWith go numbered_summaries where go (s, key) = case s of ModuleNode __deps ms | isBootSummary ms == IsBoot, drop_hs_boot_nodes -- Using nodeDependencies here converts dependencies on other -- boot files to dependencies on dependencies on non-boot files. -> Left (ms_mod ms, nodeDependencies drop_hs_boot_nodes s) _ -> normal_case where normal_case = let lkup_key = ms_mod <$> moduleGraphNodeModSum s extra = (lkup_key >>= \key -> Map.lookup key boot_summaries) in Right $ DigraphNode s key $ out_edge_keys $ (fromMaybe [] extra ++ nodeDependencies drop_hs_boot_nodes s) numbered_summaries = zip summaries [1..] lookup_node :: NodeKey -> Maybe SummaryNode lookup_node key = Map.lookup key (unNodeMap node_map) lookup_key :: NodeKey -> Maybe Int lookup_key = fmap summaryNodeKey . lookup_node node_map :: NodeMap SummaryNode node_map = NodeMap $ Map.fromList [ (mkNodeKey s, node) | node <- nodes , let s = summaryNodeSummary node ] out_edge_keys :: [NodeKey] -> [Int] out_edge_keys = mapMaybe lookup_key -- If we want keep_hi_boot_nodes, then we do lookup_key with -- IsBoot; else False newtype NodeMap a = NodeMap { unNodeMap :: Map.Map NodeKey a } deriving (Functor, Traversable, Foldable) mkNodeKey :: ModuleGraphNode -> NodeKey mkNodeKey = \case InstantiationNode _ iu -> NodeKey_Unit iu ModuleNode _ x -> NodeKey_Module $ msKey x LinkNode _ uid -> NodeKey_Link uid msKey :: ModSummary -> ModNodeKeyWithUid msKey ms = ModNodeKeyWithUid (ms_mnwib ms) (ms_unitid ms) type ModNodeKey = ModuleNameWithIsBoot -- | This function returns all the modules belonging to the home-unit that can -- be reached by following the given dependencies. Additionally, if both the -- boot module and the non-boot module can be reached, it only returns the -- non-boot one. moduleGraphModulesBelow :: ModuleGraph -> UnitId -> ModuleNameWithIsBoot -> Set ModNodeKeyWithUid moduleGraphModulesBelow mg uid mn = filtered_mods $ [ mn | NodeKey_Module mn <- modules_below] where td_map = mgTransDeps mg modules_below = maybe [] Set.toList $ Map.lookup (NodeKey_Module (ModNodeKeyWithUid mn uid)) td_map filtered_mods = Set.fromDistinctAscList . filter_mods . sort -- IsBoot and NotBoot modules are necessarily consecutive in the sorted list -- (cf Ord instance of GenWithIsBoot). Hence we only have to perform a -- linear sweep with a window of size 2 to remove boot modules for which we -- have the corresponding non-boot. filter_mods = \case (r1@(ModNodeKeyWithUid (GWIB m1 b1) uid1) : r2@(ModNodeKeyWithUid (GWIB m2 _) uid2): rs) | m1 == m2 && uid1 == uid2 -> let !r' = case b1 of NotBoot -> r1 IsBoot -> r2 in r' : filter_mods rs | otherwise -> r1 : filter_mods (r2:rs) rs -> rs ghc-lib-parser-9.12.2.20250421/compiler/GHC/Unit/Module/Imported.hs0000644000000000000000000000324107346545000022171 0ustar0000000000000000module GHC.Unit.Module.Imported ( ImportedMods , ImportedBy (..) , ImportedModsVal (..) , importedByUser ) where import GHC.Prelude import GHC.Unit.Module import GHC.Types.Name.Reader import GHC.Types.SafeHaskell import GHC.Types.SrcLoc import Data.Map (Map) -- | Records the modules directly imported by a module for extracting e.g. -- usage information, and also to give better error message type ImportedMods = Map Module [ImportedBy] -- We don't want to use a `ModuleEnv` since it would leak a non-deterministic -- order to the interface files when passed as a list to `mkUsageInfo`. -- | If a module was "imported" by the user, we associate it with -- more detailed usage information 'ImportedModsVal'; a module -- imported by the system only gets used for usage information. data ImportedBy = ImportedByUser ImportedModsVal | ImportedBySystem importedByUser :: [ImportedBy] -> [ImportedModsVal] importedByUser (ImportedByUser imv : bys) = imv : importedByUser bys importedByUser (ImportedBySystem : bys) = importedByUser bys importedByUser [] = [] data ImportedModsVal = ImportedModsVal { imv_name :: ModuleName -- ^ The name the module is imported with , imv_span :: SrcSpan -- ^ the source span of the whole import , imv_is_safe :: IsSafeImport -- ^ whether this is a safe import , imv_is_hiding :: Bool -- ^ whether this is an "hiding" import , imv_all_exports :: !GlobalRdrEnv -- ^ all the things the module could provide. -- -- NB. BangPattern here: otherwise this leaks. (#15111) , imv_qualified :: Bool -- ^ whether this is a qualified import } ghc-lib-parser-9.12.2.20250421/compiler/GHC/Unit/Module/Location.hs0000644000000000000000000001466707346545000022174 0ustar0000000000000000{-# LANGUAGE PatternSynonyms #-} {-# LANGUAGE ViewPatterns #-} -- | Module location module GHC.Unit.Module.Location ( ModLocation ( .. , ml_hs_file , ml_hi_file , ml_dyn_hi_file , ml_obj_file , ml_dyn_obj_file , ml_hie_file ) , pattern ModLocation , addBootSuffix , addBootSuffix_maybe , addBootSuffixLocn_maybe , addBootSuffixLocn , addBootSuffixLocnOut , removeBootSuffix , mkFileSrcSpan ) where import GHC.Prelude import GHC.Data.OsPath import GHC.Unit.Types import GHC.Types.SrcLoc import GHC.Utils.Outputable import GHC.Data.FastString (mkFastString) import qualified System.OsString as OsString -- | Module Location -- -- Where a module lives on the file system: the actual locations -- of the .hs, .hi, .dyn_hi, .o, .dyn_o and .hie files, if we have them. -- -- For a module in another unit, the ml_hs_file_ospath and ml_obj_file_ospath components of -- ModLocation are undefined. -- -- The locations specified by a ModLocation may or may not -- correspond to actual files yet: for example, even if the object -- file doesn't exist, the ModLocation still contains the path to -- where the object file will reside if/when it is created. -- -- The paths of anything which can affect recompilation should be placed inside -- ModLocation. -- -- When a ModLocation is created none of the filepaths will have -boot suffixes. -- This is because in --make mode the ModLocation is put in the finder cache which -- is indexed by ModuleName, when a ModLocation is retrieved from the FinderCache -- the boot suffixes are appended. -- The other case is in -c mode, there the ModLocation immediately gets given the -- boot suffixes in mkOneShotModLocation. data ModLocation = OsPathModLocation { ml_hs_file_ospath :: Maybe OsPath, -- ^ The source file, if we have one. Package modules -- probably don't have source files. ml_hi_file_ospath :: OsPath, -- ^ Where the .hi file is, whether or not it exists -- yet. Always of form foo.hi, even if there is an -- hi-boot file (we add the -boot suffix later) ml_dyn_hi_file_ospath :: OsPath, -- ^ Where the .dyn_hi file is, whether or not it exists -- yet. ml_obj_file_ospath :: OsPath, -- ^ Where the .o file is, whether or not it exists yet. -- (might not exist either because the module hasn't -- been compiled yet, or because it is part of a -- unit with a .a file) ml_dyn_obj_file_ospath :: OsPath, -- ^ Where the .dy file is, whether or not it exists -- yet. ml_hie_file_ospath :: OsPath -- ^ Where the .hie file is, whether or not it exists -- yet. } deriving Show instance Outputable ModLocation where ppr = text . show -- | Add the @-boot@ suffix to .hs, .hi and .o files addBootSuffix :: OsPath -> OsPath addBootSuffix path = path `mappend` os "-boot" -- | Remove the @-boot@ suffix to .hs, .hi and .o files removeBootSuffix :: OsPath -> OsPath removeBootSuffix pathWithBootSuffix = case OsString.stripSuffix (os "-boot") pathWithBootSuffix of Just path -> path Nothing -> error "removeBootSuffix: no -boot suffix" -- | Add the @-boot@ suffix if the @Bool@ argument is @True@ addBootSuffix_maybe :: IsBootInterface -> OsPath -> OsPath addBootSuffix_maybe is_boot path = case is_boot of IsBoot -> addBootSuffix path NotBoot -> path addBootSuffixLocn_maybe :: IsBootInterface -> ModLocation -> ModLocation addBootSuffixLocn_maybe is_boot locn = case is_boot of IsBoot -> addBootSuffixLocn locn _ -> locn -- | Add the @-boot@ suffix to all file paths associated with the module addBootSuffixLocn :: ModLocation -> ModLocation addBootSuffixLocn locn = locn { ml_hs_file_ospath = fmap addBootSuffix (ml_hs_file_ospath locn) , ml_hi_file_ospath = addBootSuffix (ml_hi_file_ospath locn) , ml_dyn_hi_file_ospath = addBootSuffix (ml_dyn_hi_file_ospath locn) , ml_obj_file_ospath = addBootSuffix (ml_obj_file_ospath locn) , ml_dyn_obj_file_ospath = addBootSuffix (ml_dyn_obj_file_ospath locn) , ml_hie_file_ospath = addBootSuffix (ml_hie_file_ospath locn) } -- | Add the @-boot@ suffix to all output file paths associated with the -- module, not including the input file itself addBootSuffixLocnOut :: ModLocation -> ModLocation addBootSuffixLocnOut locn = locn { ml_hi_file_ospath = addBootSuffix (ml_hi_file_ospath locn) , ml_dyn_hi_file_ospath = addBootSuffix (ml_dyn_hi_file_ospath locn) , ml_obj_file_ospath = addBootSuffix (ml_obj_file_ospath locn) , ml_dyn_obj_file_ospath = addBootSuffix (ml_dyn_obj_file_ospath locn) , ml_hie_file_ospath = addBootSuffix (ml_hie_file_ospath locn) } -- | Compute a 'SrcSpan' from a 'ModLocation'. mkFileSrcSpan :: ModLocation -> SrcSpan mkFileSrcSpan mod_loc = case ml_hs_file mod_loc of Just file_path -> mkGeneralSrcSpan (mkFastString file_path) Nothing -> interactiveSrcSpan -- Presumably -- ---------------------------------------------------------------------------- -- Helpers for backwards compatibility -- ---------------------------------------------------------------------------- {-# COMPLETE ModLocation #-} pattern ModLocation :: Maybe FilePath -> FilePath -> FilePath -> FilePath -> FilePath -> FilePath -> ModLocation pattern ModLocation { ml_hs_file , ml_hi_file , ml_dyn_hi_file , ml_obj_file , ml_dyn_obj_file , ml_hie_file } <- OsPathModLocation { ml_hs_file_ospath = (fmap unsafeDecodeUtf -> ml_hs_file) , ml_hi_file_ospath = (unsafeDecodeUtf -> ml_hi_file) , ml_dyn_hi_file_ospath = (unsafeDecodeUtf -> ml_dyn_hi_file) , ml_obj_file_ospath = (unsafeDecodeUtf -> ml_obj_file) , ml_dyn_obj_file_ospath = (unsafeDecodeUtf -> ml_dyn_obj_file) , ml_hie_file_ospath = (unsafeDecodeUtf -> ml_hie_file) } where ModLocation ml_hs_file ml_hi_file ml_dyn_hi_file ml_obj_file ml_dyn_obj_file ml_hie_file = OsPathModLocation { ml_hs_file_ospath = fmap unsafeEncodeUtf ml_hs_file , ml_hi_file_ospath = unsafeEncodeUtf ml_hi_file , ml_dyn_hi_file_ospath = unsafeEncodeUtf ml_dyn_hi_file , ml_obj_file_ospath = unsafeEncodeUtf ml_obj_file , ml_dyn_obj_file_ospath = unsafeEncodeUtf ml_dyn_obj_file , ml_hie_file_ospath = unsafeEncodeUtf ml_hie_file } ghc-lib-parser-9.12.2.20250421/compiler/GHC/Unit/Module/ModDetails.hs0000644000000000000000000000335407346545000022440 0ustar0000000000000000module GHC.Unit.Module.ModDetails ( ModDetails (..) , emptyModDetails ) where import GHC.Core ( CoreRule ) import GHC.Core.FamInstEnv import GHC.Core.InstEnv ( InstEnv, emptyInstEnv ) import GHC.Types.Avail import GHC.Types.CompleteMatch import GHC.Types.DefaultEnv ( DefaultEnv, emptyDefaultEnv ) import GHC.Types.TypeEnv import GHC.Types.Annotations ( Annotation ) -- | The 'ModDetails' is essentially a cache for information in the 'ModIface' -- for home modules only. Information relating to packages will be loaded into -- global environments in 'ExternalPackageState'. data ModDetails = ModDetails { -- The next two fields are created by the typechecker md_exports :: [AvailInfo] , md_types :: !TypeEnv -- ^ Local type environment for this particular module -- Includes Ids, TyCons, PatSyns , md_defaults :: !DefaultEnv -- ^ default declarations exported by this module , md_insts :: InstEnv -- ^ 'DFunId's for the instances in this module , md_fam_insts :: ![FamInst] , md_rules :: ![CoreRule] -- ^ Domain may include 'Id's from other modules , md_anns :: ![Annotation] -- ^ Annotations present in this module: currently -- they only annotate things also declared in this module , md_complete_matches :: CompleteMatches -- ^ Complete match pragmas for this module } -- | Constructs an empty ModDetails emptyModDetails :: ModDetails emptyModDetails = ModDetails { md_types = emptyTypeEnv , md_exports = [] , md_defaults = emptyDefaultEnv , md_insts = emptyInstEnv , md_rules = [] , md_fam_insts = [] , md_anns = [] , md_complete_matches = [] } ghc-lib-parser-9.12.2.20250421/compiler/GHC/Unit/Module/ModGuts.hs0000644000000000000000000001573207346545000022000 0ustar0000000000000000module GHC.Unit.Module.ModGuts ( ModGuts (..) , mg_mnwib , CgGuts (..) ) where import GHC.Prelude import GHC.ByteCode.Types import GHC.ForeignSrcLang import GHC.Hs import GHC.Unit import GHC.Unit.Module.Deps import GHC.Unit.Module.Warnings import GHC.Core.InstEnv ( InstEnv, ClsInst ) import GHC.Core.FamInstEnv import GHC.Core ( CoreProgram, CoreRule ) import GHC.Core.TyCon import GHC.Core.PatSyn import GHC.Linker.Types ( SptEntry(..) ) import GHC.Types.Annotations ( Annotation ) import GHC.Types.Avail import GHC.Types.CompleteMatch import GHC.Types.DefaultEnv ( DefaultEnv ) import GHC.Types.Fixity.Env import GHC.Types.ForeignStubs import GHC.Types.HpcInfo import GHC.Types.Name.Reader import GHC.Types.Name.Set (NameSet) import GHC.Types.SafeHaskell import GHC.Types.SourceFile ( HscSource(..), hscSourceToIsBoot ) import GHC.Types.SrcLoc import GHC.Types.CostCentre import Data.Set (Set) -- | A ModGuts is carried through the compiler, accumulating stuff as it goes -- There is only one ModGuts at any time, the one for the module -- being compiled right now. Once it is compiled, a 'ModIface' and -- 'ModDetails' are extracted and the ModGuts is discarded. data ModGuts = ModGuts { mg_module :: !Module, -- ^ Module being compiled mg_hsc_src :: HscSource, -- ^ Whether it's an hs-boot module mg_loc :: SrcSpan, -- ^ For error messages from inner passes mg_exports :: ![AvailInfo], -- ^ What it exports mg_deps :: !Dependencies, -- ^ What it depends on, directly or -- otherwise mg_usages :: ![Usage], -- ^ What was used? Used for interfaces. mg_used_th :: !Bool, -- ^ Did we run a TH splice? mg_rdr_env :: !GlobalRdrEnv, -- ^ Top-level lexical environment -- These fields all describe the things **declared in this module** mg_fix_env :: !FixityEnv, -- ^ Fixities declared in this module. -- Used for creating interface files. mg_tcs :: ![TyCon], -- ^ TyCons declared in this module -- (includes TyCons for classes) mg_defaults :: !DefaultEnv , -- ^ Class defaults exported from this module mg_insts :: ![ClsInst], -- ^ Class instances declared in this module mg_fam_insts :: ![FamInst], -- ^ Family instances declared in this module mg_patsyns :: ![PatSyn], -- ^ Pattern synonyms declared in this module mg_rules :: ![CoreRule], -- ^ Before the core pipeline starts, contains -- See Note [Overall plumbing for rules] in "GHC.Core.Rules" mg_binds :: !CoreProgram, -- ^ Bindings for this module mg_foreign :: !ForeignStubs, -- ^ Foreign exports declared in this module mg_foreign_files :: ![(ForeignSrcLang, FilePath)], -- ^ Files to be compiled with the C compiler mg_warns :: !(Warnings GhcRn), -- ^ Warnings declared in the module mg_anns :: [Annotation], -- ^ Annotations declared in this module mg_complete_matches :: CompleteMatches, -- ^ Complete Matches mg_hpc_info :: !HpcInfo, -- ^ Coverage tick boxes in the module mg_modBreaks :: !(Maybe ModBreaks), -- ^ Breakpoints for the module -- The next two fields are unusual, because they give instance -- environments for *all* modules in the home package, including -- this module, rather than for *just* this module. -- Reason: when looking up an instance we don't want to have to -- look at each module in the home package in turn mg_inst_env :: InstEnv, -- ^ Class instance environment for -- /home-package/ modules (including this -- one); c.f. 'tcg_inst_env' mg_fam_inst_env :: FamInstEnv, -- ^ Type-family instance environment for -- /home-package/ modules (including this -- one); c.f. 'tcg_fam_inst_env' mg_boot_exports :: !NameSet, -- Things that are also export via hs-boot file mg_safe_haskell :: SafeHaskellMode, -- ^ Safe Haskell mode mg_trust_pkg :: Bool, -- ^ Do we need to trust our -- own package for Safe Haskell? -- See Note [Trust Own Package] -- in "GHC.Rename.Names" mg_docs :: !(Maybe Docs) -- ^ Documentation. } mg_mnwib :: ModGuts -> ModuleNameWithIsBoot mg_mnwib mg = GWIB (moduleName (mg_module mg)) (hscSourceToIsBoot (mg_hsc_src mg)) -- The ModGuts takes on several slightly different forms: -- -- After simplification, the following fields change slightly: -- mg_rules Orphan rules only (local ones now attached to binds) -- mg_binds With rules attached --------------------------------------------------------- -- The Tidy pass forks the information about this module: -- * one lot goes to interface file generation (ModIface) -- and later compilations (ModDetails) -- * the other lot goes to code generation (CgGuts) -- | A restricted form of 'ModGuts' for code generation purposes data CgGuts = CgGuts { cg_module :: !Module, -- ^ Module being compiled cg_tycons :: [TyCon], -- ^ Algebraic data types (including ones that started -- life as classes); generate constructors and info -- tables. Includes newtypes, just for the benefit of -- External Core cg_binds :: CoreProgram, -- ^ The tidied main bindings, including -- previously-implicit bindings for record and class -- selectors, and data constructor wrappers. But *not* -- data constructor workers; reason: we regard them -- as part of the code-gen of tycons cg_ccs :: [CostCentre], -- List of cost centres used in bindings and rules cg_foreign :: !ForeignStubs, -- ^ Foreign export stubs cg_foreign_files :: ![(ForeignSrcLang, FilePath)], cg_dep_pkgs :: !(Set UnitId), -- ^ Dependent packages, used to -- generate #includes for C code gen cg_modBreaks :: !(Maybe ModBreaks), -- ^ Module breakpoints cg_spt_entries :: [SptEntry] -- ^ Static pointer table entries for static forms defined in -- the module. -- See Note [Grand plan for static forms] in "GHC.Iface.Tidy.StaticPtrTable" } ghc-lib-parser-9.12.2.20250421/compiler/GHC/Unit/Module/ModIface.hs0000644000000000000000000011654407346545000022070 0ustar0000000000000000{-# LANGUAGE DataKinds #-} {-# LANGUAGE TypeFamilyDependencies #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE UndecidableInstances #-} {-# LANGUAGE NamedFieldPuns #-} {-# LANGUAGE PatternSynonyms #-} {-# LANGUAGE DerivingStrategies #-} {-# LANGUAGE DeriveAnyClass #-} module GHC.Unit.Module.ModIface ( ModIface , ModIface_ ( mi_module , mi_sig_of , mi_hsc_src , mi_deps , mi_usages , mi_exports , mi_used_th , mi_fixities , mi_warns , mi_anns , mi_decls , mi_defaults , mi_extra_decls , mi_foreign , mi_top_env , mi_insts , mi_fam_insts , mi_rules , mi_hpc , mi_trust , mi_trust_pkg , mi_complete_matches , mi_docs , mi_final_exts , mi_ext_fields , mi_src_hash , mi_hi_bytes ) , pattern ModIface , restoreFromOldModIface , addSourceFingerprint , set_mi_module , set_mi_sig_of , set_mi_hsc_src , set_mi_src_hash , set_mi_hi_bytes , set_mi_deps , set_mi_usages , set_mi_exports , set_mi_used_th , set_mi_fixities , set_mi_warns , set_mi_anns , set_mi_insts , set_mi_fam_insts , set_mi_rules , set_mi_decls , set_mi_defaults , set_mi_extra_decls , set_mi_foreign , set_mi_top_env , set_mi_hpc , set_mi_trust , set_mi_trust_pkg , set_mi_complete_matches , set_mi_docs , set_mi_final_exts , set_mi_ext_fields , completePartialModIface , IfaceBinHandle(..) , PartialModIface , ModIfaceBackend (..) , IfaceDeclExts , IfaceBackendExts , IfaceExport , WhetherHasOrphans , WhetherHasFamInst , IfaceTopEnv (..) , IfaceImport(..) , mi_boot , mi_fix , mi_semantic_module , mi_free_holes , mi_mnwib , renameFreeHoles , emptyPartialModIface , emptyFullModIface , mkIfaceHashCache , emptyIfaceHashCache , forceModIface ) where import GHC.Prelude import GHC.Hs import GHC.Iface.Syntax import GHC.Iface.Ext.Fields import GHC.Unit import GHC.Unit.Module.Deps import GHC.Unit.Module.Warnings import GHC.Unit.Module.WholeCoreBindings (IfaceForeign (..), emptyIfaceForeign) import GHC.Types.Avail import GHC.Types.Fixity import GHC.Types.Fixity.Env import GHC.Types.HpcInfo import GHC.Types.Name import GHC.Types.Name.Reader (IfGlobalRdrEnv) import GHC.Types.SafeHaskell import GHC.Types.SourceFile import GHC.Types.Unique.DSet import GHC.Types.Unique.FM import GHC.Data.Maybe import qualified GHC.Data.Strict as Strict import GHC.Utils.Fingerprint import GHC.Utils.Binary import Control.DeepSeq import Control.Exception {- Note [Interface file stages] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Interface files have two possible stages. * A partial stage built from the result of the core pipeline. * A fully instantiated form. Which also includes fingerprints and potentially information provided by backends. We can build a full interface file two ways: * Directly from a partial one: Then we omit backend information and mostly compute fingerprints. * From a partial one + information produced by a backend. Then we store the provided information and fingerprint both. -} type PartialModIface = ModIface_ 'ModIfaceCore type ModIface = ModIface_ 'ModIfaceFinal -- | Extends a PartialModIface with information which is either: -- * Computed after codegen -- * Or computed just before writing the iface to disk. (Hashes) -- In order to fully instantiate it. data ModIfaceBackend = ModIfaceBackend { mi_iface_hash :: !Fingerprint -- ^ Hash of the whole interface , mi_mod_hash :: !Fingerprint -- ^ Hash of the ABI only , mi_flag_hash :: !Fingerprint -- ^ Hash of the important flags used when compiling the module, excluding -- optimisation flags , mi_opt_hash :: !Fingerprint -- ^ Hash of optimisation flags , mi_hpc_hash :: !Fingerprint -- ^ Hash of hpc flags , mi_plugin_hash :: !Fingerprint -- ^ Hash of plugins , mi_orphan :: !WhetherHasOrphans -- ^ Whether this module has orphans , mi_finsts :: !WhetherHasFamInst -- ^ Whether this module has family instances. See Note [The type family -- instance consistency story]. , mi_exp_hash :: !Fingerprint -- ^ Hash of export list , mi_orphan_hash :: !Fingerprint -- ^ Hash for orphan rules, class and family instances combined -- Cached environments for easy lookup. These are computed (lazily) from -- other fields and are not put into the interface file. -- Not really produced by the backend but there is no need to create them -- any earlier. , mi_decl_warn_fn :: !(OccName -> Maybe (WarningTxt GhcRn)) -- ^ Cached lookup for 'mi_warns' for declaration deprecations , mi_export_warn_fn :: !(Name -> Maybe (WarningTxt GhcRn)) -- ^ Cached lookup for 'mi_warns' for export deprecations , mi_fix_fn :: !(OccName -> Maybe Fixity) -- ^ Cached lookup for 'mi_fixities' , mi_hash_fn :: !(OccName -> Maybe (OccName, Fingerprint)) -- ^ Cached lookup for 'mi_decls'. The @Nothing@ in 'mi_hash_fn' means that -- the thing isn't in decls. It's useful to know that when seeing if we are -- up to date wrt. the old interface. The 'OccName' is the parent of the -- name, if it has one. } data ModIfacePhase = ModIfaceCore -- ^ Partial interface built based on output of core pipeline. | ModIfaceFinal -- | Selects a IfaceDecl representation. -- For fully instantiated interfaces we also maintain -- a fingerprint, which is used for recompilation checks. type family IfaceDeclExts (phase :: ModIfacePhase) = decl | decl -> phase where IfaceDeclExts 'ModIfaceCore = IfaceDecl IfaceDeclExts 'ModIfaceFinal = (Fingerprint, IfaceDecl) type family IfaceBackendExts (phase :: ModIfacePhase) = bk | bk -> phase where IfaceBackendExts 'ModIfaceCore = () IfaceBackendExts 'ModIfaceFinal = ModIfaceBackend -- | In-memory byte array representation of a 'ModIface'. -- -- See Note [Sharing of ModIface] for why we need this. data IfaceBinHandle (phase :: ModIfacePhase) where -- | A partial 'ModIface' cannot be serialised to disk. PartialIfaceBinHandle :: IfaceBinHandle 'ModIfaceCore -- | Optional 'FullBinData' that can be serialised to disk directly. -- -- See Note [Private fields in ModIface] for when this fields needs to be cleared -- (e.g., set to 'Nothing'). FullIfaceBinHandle :: !(Strict.Maybe FullBinData) -> IfaceBinHandle 'ModIfaceFinal -- | A 'ModIface' plus a 'ModDetails' summarises everything we know -- about a compiled module. The 'ModIface' is the stuff *before* linking, -- and can be written out to an interface file. The 'ModDetails is after -- linking and can be completely recovered from just the 'ModIface'. -- -- When we read an interface file, we also construct a 'ModIface' from it, -- except that we explicitly make the 'mi_decls' and a few other fields empty; -- as when reading we consolidate the declarations etc. into a number of indexed -- maps and environments in the 'ExternalPackageState'. -- -- See Note [Strictness in ModIface] to learn about why some fields are -- strict and others are not. -- -- See Note [Private fields in ModIface] to learn why we don't export any of the -- fields. data ModIface_ (phase :: ModIfacePhase) = PrivateModIface { mi_module_ :: !Module, -- ^ Name of the module we are for mi_sig_of_ :: !(Maybe Module), -- ^ Are we a sig of another mod? mi_hsc_src_ :: !HscSource, -- ^ Boot? Signature? mi_deps_ :: Dependencies, -- ^ The dependencies of the module. This is -- consulted for directly-imported modules, but not -- for anything else (hence lazy) mi_usages_ :: [Usage], -- ^ Usages; kept sorted so that it's easy to decide -- whether to write a new iface file (changing usages -- doesn't affect the hash of this module) -- NOT STRICT! we read this field lazily from the interface file -- It is *only* consulted by the recompilation checker -- -- The elements must be *deterministically* sorted to guarantee -- deterministic interface files mi_exports_ :: ![IfaceExport], -- ^ Exports -- Kept sorted by (mod,occ), to make version comparisons easier -- Records the modules that are the declaration points for things -- exported by this module, and the 'OccName's of those things mi_used_th_ :: !Bool, -- ^ Module required TH splices when it was compiled. -- This disables recompilation avoidance (see #481). mi_fixities_ :: [(OccName,Fixity)], -- ^ Fixities -- NOT STRICT! we read this field lazily from the interface file mi_warns_ :: IfaceWarnings, -- ^ Warnings -- NOT STRICT! we read this field lazily from the interface file mi_anns_ :: [IfaceAnnotation], -- ^ Annotations -- NOT STRICT! we read this field lazily from the interface file mi_decls_ :: [IfaceDeclExts phase], -- ^ Type, class and variable declarations -- The hash of an Id changes if its fixity or deprecations change -- (as well as its type of course) -- Ditto data constructors, class operations, except that -- the hash of the parent class/tycon changes mi_extra_decls_ :: Maybe [IfaceBindingX IfaceMaybeRhs IfaceTopBndrInfo], -- ^ Extra variable definitions which are **NOT** exposed but when -- combined with mi_decls allows us to restart code generation. -- See Note [Interface Files with Core Definitions] and Note [Interface File with Core: Sharing RHSs] mi_foreign_ :: !IfaceForeign, -- ^ Foreign stubs and files to supplement 'mi_extra_decls_'. -- See Note [Foreign stubs and TH bytecode linking] mi_defaults_ :: [IfaceDefault], -- ^ default declarations exported by the module mi_top_env_ :: !(Maybe IfaceTopEnv), -- ^ Just enough information to reconstruct the top level environment in -- the /original source/ code for this module. which -- is NOT the same as mi_exports, nor mi_decls (which -- may contains declarations for things not actually -- defined by the user). Used for GHCi and for inspecting -- the contents of modules via the GHC API only. -- -- (We need the source file to figure out the -- top-level environment, if we didn't compile this module -- from source then this field contains @Nothing@). -- -- Strictly speaking this field should live in the -- 'HomeModInfo', but that leads to more plumbing. -- Instance declarations and rules mi_insts_ :: [IfaceClsInst], -- ^ Sorted class instance mi_fam_insts_ :: [IfaceFamInst], -- ^ Sorted family instances mi_rules_ :: [IfaceRule], -- ^ Sorted rules mi_hpc_ :: !AnyHpcUsage, -- ^ True if this program uses Hpc at any point in the program. mi_trust_ :: !IfaceTrustInfo, -- ^ Safe Haskell Trust information for this module. mi_trust_pkg_ :: !Bool, -- ^ Do we require the package this module resides in be trusted -- to trust this module? This is used for the situation where a -- module is Safe (so doesn't require the package be trusted -- itself) but imports some trustworthy modules from its own -- package (which does require its own package be trusted). -- See Note [Trust Own Package] in GHC.Rename.Names mi_complete_matches_ :: ![IfaceCompleteMatch], mi_docs_ :: !(Maybe Docs), -- ^ Docstrings and related data for use by haddock, the ghci -- @:doc@ command, and other tools. -- -- @Just _@ @<=>@ the module was built with @-haddock@. mi_final_exts_ :: !(IfaceBackendExts phase), -- ^ Either `()` or `ModIfaceBackend` for -- a fully instantiated interface. mi_ext_fields_ :: !ExtensibleFields, -- ^ Additional optional fields, where the Map key represents -- the field name, resulting in a (size, serialized data) pair. -- Because the data is intended to be serialized through the -- internal `Binary` class (increasing compatibility with types -- using `Name` and `FastString`, such as HIE), this format is -- chosen over `ByteString`s. -- mi_src_hash_ :: !Fingerprint, -- ^ Hash of the .hs source, used for recompilation checking. mi_hi_bytes_ :: !(IfaceBinHandle phase) -- ^ A serialised in-memory buffer of this 'ModIface'. -- If this handle is given, we can avoid serialising the 'ModIface' -- when writing this 'ModIface' to disk, and write this buffer to disk instead. -- See Note [Sharing of ModIface]. } -- Enough information to reconstruct the top level environment for a module data IfaceTopEnv = IfaceTopEnv { ifaceTopExports :: !IfGlobalRdrEnv -- ^ all top level things in this module, including unexported stuff , ifaceImports :: ![IfaceImport] -- ^ all the imports in this module } instance NFData IfaceTopEnv where rnf (IfaceTopEnv a b) = rnf a `seq` rnf b {- Note [Strictness in ModIface] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The ModIface is the Haskell representation of an interface (.hi) file. * During compilation we write out ModIface values to disk for files that we have just compiled * For packages that we depend on we load the ModIface from disk. Some fields in the ModIface are deliberately lazy because when we read an interface file we don't always need all the parts. For example, an interface file contains information about documentation which is often not needed during compilation. This is achieved using the lazyPut/lazyGet pair. If the field was strict then we would pointlessly load this information into memory. On the other hand, if we create a ModIface but **don't** write it to disk then to avoid space leaks we need to make sure to deepseq all these lazy fields because the ModIface might live for a long time (for instance in a GHCi session). That's why in GHC.Driver.Main.hscMaybeWriteIface there is the call to forceModIface. -} -- | Old-style accessor for whether or not the ModIface came from an hs-boot -- file. mi_boot :: ModIface -> IsBootInterface mi_boot iface = if mi_hsc_src iface == HsBootFile then IsBoot else NotBoot mi_mnwib :: ModIface -> ModuleNameWithIsBoot mi_mnwib iface = GWIB (moduleName $ mi_module iface) (mi_boot iface) -- | Lookups up a (possibly cached) fixity from a 'ModIface'. If one cannot be -- found, 'defaultFixity' is returned instead. mi_fix :: ModIface -> OccName -> Fixity mi_fix iface name = mi_fix_fn (mi_final_exts iface) name `orElse` defaultFixity -- | The semantic module for this interface; e.g., if it's a interface -- for a signature, if 'mi_module' is @p[A=]:A@, 'mi_semantic_module' -- will be @@. mi_semantic_module :: ModIface_ a -> Module mi_semantic_module iface = case mi_sig_of iface of Nothing -> mi_module iface Just mod -> mod -- | The "precise" free holes, e.g., the signatures that this -- 'ModIface' depends on. mi_free_holes :: ModIface -> UniqDSet ModuleName mi_free_holes iface = case getModuleInstantiation (mi_module iface) of (_, Just indef) -- A mini-hack: we rely on the fact that 'renameFreeHoles' -- drops things that aren't holes. -> renameFreeHoles (mkUniqDSet cands) (instUnitInsts (moduleUnit indef)) _ -> emptyUniqDSet where cands = dep_sig_mods $ mi_deps iface -- | Given a set of free holes, and a unit identifier, rename -- the free holes according to the instantiation of the unit -- identifier. For example, if we have A and B free, and -- our unit identity is @p[A=,B=impl:B]@, the renamed free -- holes are just C. renameFreeHoles :: UniqDSet ModuleName -> [(ModuleName, Module)] -> UniqDSet ModuleName renameFreeHoles fhs insts = unionManyUniqDSets (map lookup_impl (uniqDSetToList fhs)) where hmap = listToUFM insts lookup_impl mod_name | Just mod <- lookupUFM hmap mod_name = moduleFreeHoles mod -- It wasn't actually a hole | otherwise = emptyUniqDSet -- See Note [Strictness in ModIface] about where we use lazyPut vs put instance Binary ModIface where put_ bh (PrivateModIface { mi_module_ = mod, mi_sig_of_ = sig_of, mi_hsc_src_ = hsc_src, mi_src_hash_ = _src_hash, -- Don't `put_` this in the instance -- because we are going to write it -- out separately in the actual file mi_hi_bytes_ = _hi_bytes, -- We don't serialise the 'mi_hi_bytes_', as it itself -- may contain an in-memory byte array buffer for this -- 'ModIface'. If we used 'put_' on this 'ModIface', then -- we likely have a good reason, and do not want to reuse -- the byte array. -- See Note [Private fields in ModIface] mi_deps_ = deps, mi_usages_ = usages, mi_exports_ = exports, mi_used_th_ = used_th, mi_fixities_ = fixities, mi_warns_ = warns, mi_anns_ = anns, mi_decls_ = decls, mi_extra_decls_ = extra_decls, mi_foreign_ = foreign_, mi_defaults_ = defaults, mi_insts_ = insts, mi_fam_insts_ = fam_insts, mi_rules_ = rules, mi_hpc_ = hpc_info, mi_trust_ = trust, mi_trust_pkg_ = trust_pkg, mi_complete_matches_ = complete_matches, mi_docs_ = docs, mi_ext_fields_ = _ext_fields, -- Don't `put_` this in the instance so we -- can deal with it's pointer in the header -- when we write the actual file mi_final_exts_ = ModIfaceBackend { mi_iface_hash = iface_hash, mi_mod_hash = mod_hash, mi_flag_hash = flag_hash, mi_opt_hash = opt_hash, mi_hpc_hash = hpc_hash, mi_plugin_hash = plugin_hash, mi_orphan = orphan, mi_finsts = hasFamInsts, mi_exp_hash = exp_hash, mi_orphan_hash = orphan_hash }}) = do put_ bh mod put_ bh sig_of put_ bh hsc_src put_ bh iface_hash put_ bh mod_hash put_ bh flag_hash put_ bh opt_hash put_ bh hpc_hash put_ bh plugin_hash put_ bh orphan put_ bh hasFamInsts lazyPut bh deps lazyPut bh usages put_ bh exports put_ bh exp_hash put_ bh used_th put_ bh fixities lazyPut bh warns lazyPut bh anns put_ bh decls put_ bh extra_decls put_ bh defaults put_ bh foreign_ put_ bh insts put_ bh fam_insts lazyPut bh rules put_ bh orphan_hash put_ bh hpc_info put_ bh trust put_ bh trust_pkg put_ bh complete_matches lazyPutMaybe bh docs get bh = do mod <- get bh sig_of <- get bh hsc_src <- get bh iface_hash <- get bh mod_hash <- get bh flag_hash <- get bh opt_hash <- get bh hpc_hash <- get bh plugin_hash <- get bh orphan <- get bh hasFamInsts <- get bh deps <- lazyGet bh usages <- {-# SCC "bin_usages" #-} lazyGet bh exports <- {-# SCC "bin_exports" #-} get bh exp_hash <- get bh used_th <- get bh fixities <- {-# SCC "bin_fixities" #-} get bh warns <- {-# SCC "bin_warns" #-} lazyGet bh anns <- {-# SCC "bin_anns" #-} lazyGet bh decls <- {-# SCC "bin_tycldecls" #-} get bh extra_decls <- get bh defaults <- get bh foreign_ <- get bh insts <- {-# SCC "bin_insts" #-} get bh fam_insts <- {-# SCC "bin_fam_insts" #-} get bh rules <- {-# SCC "bin_rules" #-} lazyGet bh orphan_hash <- get bh hpc_info <- get bh trust <- get bh trust_pkg <- get bh complete_matches <- get bh docs <- lazyGetMaybe bh return (PrivateModIface { mi_module_ = mod, mi_sig_of_ = sig_of, mi_hsc_src_ = hsc_src, mi_src_hash_ = fingerprint0, -- placeholder because this is dealt -- with specially when the file is read mi_hi_bytes_ = -- We can't populate this field here, as we are -- missing the 'mi_ext_fields_' field, which is -- handled in 'getIfaceWithExtFields'. FullIfaceBinHandle Strict.Nothing, mi_deps_ = deps, mi_usages_ = usages, mi_exports_ = exports, mi_used_th_ = used_th, mi_anns_ = anns, mi_fixities_ = fixities, mi_warns_ = warns, mi_decls_ = decls, mi_extra_decls_ = extra_decls, mi_foreign_ = foreign_, mi_top_env_ = Nothing, mi_defaults_ = defaults, mi_insts_ = insts, mi_fam_insts_ = fam_insts, mi_rules_ = rules, mi_hpc_ = hpc_info, mi_trust_ = trust, mi_trust_pkg_ = trust_pkg, -- And build the cached values mi_complete_matches_ = complete_matches, mi_docs_ = docs, mi_ext_fields_ = emptyExtensibleFields, -- placeholder because this is dealt -- with specially when the file is read mi_final_exts_ = ModIfaceBackend { mi_iface_hash = iface_hash, mi_mod_hash = mod_hash, mi_flag_hash = flag_hash, mi_opt_hash = opt_hash, mi_hpc_hash = hpc_hash, mi_plugin_hash = plugin_hash, mi_orphan = orphan, mi_finsts = hasFamInsts, mi_exp_hash = exp_hash, mi_orphan_hash = orphan_hash, mi_decl_warn_fn = mkIfaceDeclWarnCache $ fromIfaceWarnings warns, mi_export_warn_fn = mkIfaceExportWarnCache $ fromIfaceWarnings warns, mi_fix_fn = mkIfaceFixCache fixities, mi_hash_fn = mkIfaceHashCache decls }}) -- | The original names declared of a certain module that are exported type IfaceExport = AvailInfo emptyPartialModIface :: Module -> PartialModIface emptyPartialModIface mod = PrivateModIface { mi_module_ = mod, mi_sig_of_ = Nothing, mi_hsc_src_ = HsSrcFile, mi_src_hash_ = fingerprint0, mi_hi_bytes_ = PartialIfaceBinHandle, mi_deps_ = noDependencies, mi_usages_ = [], mi_exports_ = [], mi_used_th_ = False, mi_fixities_ = [], mi_warns_ = IfWarnSome [] [], mi_anns_ = [], mi_defaults_ = [], mi_insts_ = [], mi_fam_insts_ = [], mi_rules_ = [], mi_decls_ = [], mi_extra_decls_ = Nothing, mi_foreign_ = emptyIfaceForeign, mi_top_env_ = Nothing, mi_hpc_ = False, mi_trust_ = noIfaceTrustInfo, mi_trust_pkg_ = False, mi_complete_matches_ = [], mi_docs_ = Nothing, mi_final_exts_ = (), mi_ext_fields_ = emptyExtensibleFields } emptyFullModIface :: Module -> ModIface emptyFullModIface mod = (emptyPartialModIface mod) { mi_decls_ = [] , mi_hi_bytes_ = FullIfaceBinHandle Strict.Nothing , mi_final_exts_ = ModIfaceBackend { mi_iface_hash = fingerprint0, mi_mod_hash = fingerprint0, mi_flag_hash = fingerprint0, mi_opt_hash = fingerprint0, mi_hpc_hash = fingerprint0, mi_plugin_hash = fingerprint0, mi_orphan = False, mi_finsts = False, mi_exp_hash = fingerprint0, mi_orphan_hash = fingerprint0, mi_decl_warn_fn = emptyIfaceWarnCache, mi_export_warn_fn = emptyIfaceWarnCache, mi_fix_fn = emptyIfaceFixCache, mi_hash_fn = emptyIfaceHashCache } } -- | Constructs cache for the 'mi_hash_fn' field of a 'ModIface' mkIfaceHashCache :: [(Fingerprint,IfaceDecl)] -> (OccName -> Maybe (OccName, Fingerprint)) mkIfaceHashCache pairs = \occ -> lookupOccEnv env occ where env = foldl' add_decl emptyOccEnv pairs add_decl env0 (v,d) = foldl' add env0 (ifaceDeclFingerprints v d) where add env0 (occ,hash) = extendOccEnv env0 occ (occ,hash) emptyIfaceHashCache :: OccName -> Maybe (OccName, Fingerprint) emptyIfaceHashCache _occ = Nothing -- Take care, this instance only forces to the degree necessary to -- avoid major space leaks. instance ( NFData (IfaceBackendExts (phase :: ModIfacePhase)) , NFData (IfaceDeclExts (phase :: ModIfacePhase)) ) => NFData (ModIface_ phase) where rnf (PrivateModIface { mi_module_, mi_sig_of_, mi_hsc_src_, mi_hi_bytes_, mi_deps_, mi_usages_ , mi_exports_, mi_used_th_, mi_fixities_, mi_warns_, mi_anns_ , mi_decls_, mi_defaults_, mi_extra_decls_, mi_foreign_, mi_top_env_, mi_insts_ , mi_fam_insts_, mi_rules_, mi_hpc_, mi_trust_, mi_trust_pkg_ , mi_complete_matches_, mi_docs_, mi_final_exts_ , mi_ext_fields_, mi_src_hash_ }) = rnf mi_module_ `seq` rnf mi_sig_of_ `seq` mi_hsc_src_ `seq` mi_hi_bytes_ `seq` mi_deps_ `seq` mi_usages_ `seq` mi_exports_ `seq` rnf mi_used_th_ `seq` mi_fixities_ `seq` rnf mi_warns_ `seq` rnf mi_anns_ `seq` rnf mi_decls_ `seq` rnf mi_defaults_ `seq` rnf mi_extra_decls_ `seq` rnf mi_foreign_ `seq` rnf mi_top_env_ `seq` rnf mi_insts_ `seq` rnf mi_fam_insts_ `seq` rnf mi_rules_ `seq` rnf mi_hpc_ `seq` mi_trust_ `seq` rnf mi_trust_pkg_ `seq` rnf mi_complete_matches_ `seq` rnf mi_docs_ `seq` mi_final_exts_ `seq` mi_ext_fields_ `seq` rnf mi_src_hash_ `seq` () instance NFData (ModIfaceBackend) where rnf (ModIfaceBackend{ mi_iface_hash, mi_mod_hash, mi_flag_hash, mi_opt_hash , mi_hpc_hash, mi_plugin_hash, mi_orphan, mi_finsts, mi_exp_hash , mi_orphan_hash, mi_decl_warn_fn, mi_export_warn_fn, mi_fix_fn , mi_hash_fn}) = rnf mi_iface_hash `seq` rnf mi_mod_hash `seq` rnf mi_flag_hash `seq` rnf mi_opt_hash `seq` rnf mi_hpc_hash `seq` rnf mi_plugin_hash `seq` rnf mi_orphan `seq` rnf mi_finsts `seq` rnf mi_exp_hash `seq` rnf mi_orphan_hash `seq` rnf mi_decl_warn_fn `seq` rnf mi_export_warn_fn `seq` rnf mi_fix_fn `seq` rnf mi_hash_fn forceModIface :: ModIface -> IO () forceModIface iface = () <$ (evaluate $ force iface) -- | Records whether a module has orphans. An \"orphan\" is one of: -- -- * An instance declaration in a module other than the definition -- module for one of the type constructors or classes in the instance head -- -- * A rewrite rule in a module other than the one defining -- the function in the head of the rule -- type WhetherHasOrphans = Bool -- | Does this module define family instances? type WhetherHasFamInst = Bool -- ---------------------------------------------------------------------------- -- Modify a 'ModIface'. -- ---------------------------------------------------------------------------- {- Note [Private fields in ModIface] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The fields of 'ModIface' are private, e.g., not exported, to make the API impossible to misuse. A 'ModIface' can be "compressed" in-memory using 'shareIface', which serialises the 'ModIface' to an in-memory buffer. This has the advantage of reducing memory usage of 'ModIface', reducing the overall memory usage of GHC. See Note [Sharing of ModIface]. This in-memory buffer can be reused, if and only if the 'ModIface' is not modified after it has been "compressed"/shared via 'shareIface'. Instead of serialising 'ModIface', we simply write the in-memory buffer to disk directly. However, we can't rely that a 'ModIface' isn't modified after 'shareIface' has been called. Thus, we make all fields of 'ModIface' private and modification only happens via exported update functions, such as 'set_mi_decls'. These functions unconditionally clear any in-memory buffer if used, forcing us to serialise the 'ModIface' to disk again. -} -- | Given a 'PartialModIface', turn it into a 'ModIface' by completing -- missing fields. completePartialModIface :: PartialModIface -> [(Fingerprint, IfaceDecl)] -> Maybe [IfaceBindingX IfaceMaybeRhs IfaceTopBndrInfo] -> ModIfaceBackend -> ModIface completePartialModIface partial decls extra_decls final_exts = partial { mi_decls_ = decls , mi_extra_decls_ = extra_decls , mi_final_exts_ = final_exts , mi_hi_bytes_ = FullIfaceBinHandle Strict.Nothing } -- | Add a source fingerprint to a 'ModIface_' without invalidating the byte array -- buffer 'mi_hi_bytes'. -- This is a variant of 'set_mi_src_hash' which does invalidate the buffer. -- -- The 'mi_src_hash' is computed outside of 'ModIface_' based on the 'ModSummary'. addSourceFingerprint :: Fingerprint -> ModIface_ phase -> ModIface_ phase addSourceFingerprint val iface = iface { mi_src_hash_ = val } -- | Copy fields that aren't serialised to disk to the new 'ModIface_'. -- This includes especially hashes that are usually stored in the interface -- file header and 'mi_top_env'. -- -- We need this function after calling 'shareIface', to make sure the -- 'ModIface_' doesn't lose any information. This function does not discard -- the in-memory byte array buffer 'mi_hi_bytes'. restoreFromOldModIface :: ModIface_ phase -> ModIface_ phase -> ModIface_ phase restoreFromOldModIface old new = new { mi_top_env_ = mi_top_env_ old , mi_hsc_src_ = mi_hsc_src_ old , mi_src_hash_ = mi_src_hash_ old } set_mi_module :: Module -> ModIface_ phase -> ModIface_ phase set_mi_module val iface = clear_mi_hi_bytes $ iface { mi_module_ = val } set_mi_sig_of :: Maybe Module -> ModIface_ phase -> ModIface_ phase set_mi_sig_of val iface = clear_mi_hi_bytes $ iface { mi_sig_of_ = val } set_mi_hsc_src :: HscSource -> ModIface_ phase -> ModIface_ phase set_mi_hsc_src val iface = clear_mi_hi_bytes $ iface { mi_hsc_src_ = val } set_mi_src_hash :: Fingerprint -> ModIface_ phase -> ModIface_ phase set_mi_src_hash val iface = clear_mi_hi_bytes $ iface { mi_src_hash_ = val } set_mi_hi_bytes :: IfaceBinHandle phase -> ModIface_ phase -> ModIface_ phase set_mi_hi_bytes val iface = iface { mi_hi_bytes_ = val } set_mi_deps :: Dependencies -> ModIface_ phase -> ModIface_ phase set_mi_deps val iface = clear_mi_hi_bytes $ iface { mi_deps_ = val } set_mi_usages :: [Usage] -> ModIface_ phase -> ModIface_ phase set_mi_usages val iface = clear_mi_hi_bytes $ iface { mi_usages_ = val } set_mi_exports :: [IfaceExport] -> ModIface_ phase -> ModIface_ phase set_mi_exports val iface = clear_mi_hi_bytes $ iface { mi_exports_ = val } set_mi_used_th :: Bool -> ModIface_ phase -> ModIface_ phase set_mi_used_th val iface = clear_mi_hi_bytes $ iface { mi_used_th_ = val } set_mi_fixities :: [(OccName, Fixity)] -> ModIface_ phase -> ModIface_ phase set_mi_fixities val iface = clear_mi_hi_bytes $ iface { mi_fixities_ = val } set_mi_warns :: IfaceWarnings -> ModIface_ phase -> ModIface_ phase set_mi_warns val iface = clear_mi_hi_bytes $ iface { mi_warns_ = val } set_mi_anns :: [IfaceAnnotation] -> ModIface_ phase -> ModIface_ phase set_mi_anns val iface = clear_mi_hi_bytes $ iface { mi_anns_ = val } set_mi_insts :: [IfaceClsInst] -> ModIface_ phase -> ModIface_ phase set_mi_insts val iface = clear_mi_hi_bytes $ iface { mi_insts_ = val } set_mi_fam_insts :: [IfaceFamInst] -> ModIface_ phase -> ModIface_ phase set_mi_fam_insts val iface = clear_mi_hi_bytes $ iface { mi_fam_insts_ = val } set_mi_rules :: [IfaceRule] -> ModIface_ phase -> ModIface_ phase set_mi_rules val iface = clear_mi_hi_bytes $ iface { mi_rules_ = val } set_mi_decls :: [IfaceDeclExts phase] -> ModIface_ phase -> ModIface_ phase set_mi_decls val iface = clear_mi_hi_bytes $ iface { mi_decls_ = val } set_mi_defaults :: [IfaceDefault] -> ModIface_ phase -> ModIface_ phase set_mi_defaults val iface = clear_mi_hi_bytes $ iface { mi_defaults_ = val } set_mi_extra_decls :: Maybe [IfaceBindingX IfaceMaybeRhs IfaceTopBndrInfo] -> ModIface_ phase -> ModIface_ phase set_mi_extra_decls val iface = clear_mi_hi_bytes $ iface { mi_extra_decls_ = val } set_mi_foreign :: IfaceForeign -> ModIface_ phase -> ModIface_ phase set_mi_foreign foreign_ iface = clear_mi_hi_bytes $ iface { mi_foreign_ = foreign_ } set_mi_top_env :: Maybe IfaceTopEnv -> ModIface_ phase -> ModIface_ phase set_mi_top_env val iface = clear_mi_hi_bytes $ iface { mi_top_env_ = val } set_mi_hpc :: AnyHpcUsage -> ModIface_ phase -> ModIface_ phase set_mi_hpc val iface = clear_mi_hi_bytes $ iface { mi_hpc_ = val } set_mi_trust :: IfaceTrustInfo -> ModIface_ phase -> ModIface_ phase set_mi_trust val iface = clear_mi_hi_bytes $ iface { mi_trust_ = val } set_mi_trust_pkg :: Bool -> ModIface_ phase -> ModIface_ phase set_mi_trust_pkg val iface = clear_mi_hi_bytes $ iface { mi_trust_pkg_ = val } set_mi_complete_matches :: [IfaceCompleteMatch] -> ModIface_ phase -> ModIface_ phase set_mi_complete_matches val iface = clear_mi_hi_bytes $ iface { mi_complete_matches_ = val } set_mi_docs :: Maybe Docs -> ModIface_ phase -> ModIface_ phase set_mi_docs val iface = clear_mi_hi_bytes $ iface { mi_docs_ = val } set_mi_final_exts :: IfaceBackendExts phase -> ModIface_ phase -> ModIface_ phase set_mi_final_exts val iface = clear_mi_hi_bytes $ iface { mi_final_exts_ = val } set_mi_ext_fields :: ExtensibleFields -> ModIface_ phase -> ModIface_ phase set_mi_ext_fields val iface = clear_mi_hi_bytes $ iface { mi_ext_fields_ = val } -- | Invalidate any byte array buffer we might have. clear_mi_hi_bytes :: ModIface_ phase -> ModIface_ phase clear_mi_hi_bytes iface = iface { mi_hi_bytes_ = case mi_hi_bytes iface of PartialIfaceBinHandle -> PartialIfaceBinHandle FullIfaceBinHandle _ -> FullIfaceBinHandle Strict.Nothing } -- ---------------------------------------------------------------------------- -- 'ModIface' pattern synonyms to keep breakage low. -- ---------------------------------------------------------------------------- {- Note [Inline Pattern synonym of ModIface] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The introduction of the 'ModIface' pattern synonym originally caused an increase in allocated bytes in multiple performance tests. In some benchmarks, it was a 2~3% increase. Without {-# INLINE ModIface #-}, the generated core reveals the reason for this increase. We show the core for the 'mi_module' record selector: @ mi_module = \ @phase iface -> $w$mModIface iface mi_module1 $w$mModIface = \ @phase iface cont -> case iface of { PrivateModIface a b ... z -> cont a b ... z } mi_module1 = \ @phase a _ ... _ -> a @ Thus, we can see the '$w$mModIface' is not inlined, leading to an increase in the allocated bytes. However, with the pragma, the correct core is generated: @ mi_module = mi_module_ @ -} -- See Note [Inline Pattern synonym of ModIface] for why we have all these -- inline pragmas. {-# INLINE ModIface #-} {-# INLINE mi_module #-} {-# INLINE mi_sig_of #-} {-# INLINE mi_hsc_src #-} {-# INLINE mi_deps #-} {-# INLINE mi_usages #-} {-# INLINE mi_exports #-} {-# INLINE mi_used_th #-} {-# INLINE mi_fixities #-} {-# INLINE mi_warns #-} {-# INLINE mi_anns #-} {-# INLINE mi_decls #-} {-# INLINE mi_extra_decls #-} {-# INLINE mi_foreign #-} {-# INLINE mi_top_env #-} {-# INLINE mi_insts #-} {-# INLINE mi_fam_insts #-} {-# INLINE mi_rules #-} {-# INLINE mi_hpc #-} {-# INLINE mi_trust #-} {-# INLINE mi_trust_pkg #-} {-# INLINE mi_complete_matches #-} {-# INLINE mi_docs #-} {-# INLINE mi_final_exts #-} {-# INLINE mi_ext_fields #-} {-# INLINE mi_src_hash #-} {-# INLINE mi_hi_bytes #-} {-# COMPLETE ModIface #-} pattern ModIface :: Module -> Maybe Module -> HscSource -> Dependencies -> [Usage] -> [IfaceExport] -> Bool -> [(OccName, Fixity)] -> IfaceWarnings -> [IfaceAnnotation] -> [IfaceDeclExts phase] -> Maybe [IfaceBindingX IfaceMaybeRhs IfaceTopBndrInfo] -> IfaceForeign -> [IfaceDefault] -> Maybe IfaceTopEnv -> [IfaceClsInst] -> [IfaceFamInst] -> [IfaceRule] -> AnyHpcUsage -> IfaceTrustInfo -> Bool -> [IfaceCompleteMatch] -> Maybe Docs -> IfaceBackendExts phase -> ExtensibleFields -> Fingerprint -> IfaceBinHandle phase -> ModIface_ phase pattern ModIface { mi_module , mi_sig_of , mi_hsc_src , mi_deps , mi_usages , mi_exports , mi_used_th , mi_fixities , mi_warns , mi_anns , mi_decls , mi_extra_decls , mi_foreign , mi_defaults , mi_top_env , mi_insts , mi_fam_insts , mi_rules , mi_hpc , mi_trust , mi_trust_pkg , mi_complete_matches , mi_docs , mi_final_exts , mi_ext_fields , mi_src_hash , mi_hi_bytes } <- PrivateModIface { mi_module_ = mi_module , mi_sig_of_ = mi_sig_of , mi_hsc_src_ = mi_hsc_src , mi_deps_ = mi_deps , mi_usages_ = mi_usages , mi_exports_ = mi_exports , mi_used_th_ = mi_used_th , mi_fixities_ = mi_fixities , mi_warns_ = mi_warns , mi_anns_ = mi_anns , mi_decls_ = mi_decls , mi_extra_decls_ = mi_extra_decls , mi_foreign_ = mi_foreign , mi_defaults_ = mi_defaults , mi_top_env_ = mi_top_env , mi_insts_ = mi_insts , mi_fam_insts_ = mi_fam_insts , mi_rules_ = mi_rules , mi_hpc_ = mi_hpc , mi_trust_ = mi_trust , mi_trust_pkg_ = mi_trust_pkg , mi_complete_matches_ = mi_complete_matches , mi_docs_ = mi_docs , mi_final_exts_ = mi_final_exts , mi_ext_fields_ = mi_ext_fields , mi_src_hash_ = mi_src_hash , mi_hi_bytes_ = mi_hi_bytes } ghc-lib-parser-9.12.2.20250421/compiler/GHC/Unit/Module/ModSummary.hs0000644000000000000000000001753207346545000022513 0ustar0000000000000000{-# LANGUAGE TupleSections #-} -- | A ModSummary is a node in the compilation manager's dependency graph -- (ModuleGraph) module GHC.Unit.Module.ModSummary ( ModSummary (..) , ms_unitid , ms_installed_mod , ms_mod_name , ms_imps , ms_plugin_imps , ms_mnwib , ms_home_srcimps , ms_home_imps , msHiFilePath , msDynHiFilePath , msHsFilePath , msObjFilePath , msDynObjFilePath , msHsFileOsPath , msHiFileOsPath , msDynHiFileOsPath , msObjFileOsPath , msDynObjFileOsPath , msDeps , isBootSummary , findTarget ) where import GHC.Prelude import GHC.Hs import GHC.Driver.DynFlags import GHC.Unit.Types import GHC.Unit.Module import GHC.Types.SourceFile ( HscSource(..), hscSourceString ) import GHC.Types.SrcLoc import GHC.Types.Target import GHC.Types.PkgQual import GHC.Data.Maybe import GHC.Data.OsPath (OsPath) import GHC.Data.StringBuffer ( StringBuffer ) import GHC.Utils.Fingerprint import GHC.Utils.Outputable import Data.Time -- | Data for a module node in a 'ModuleGraph'. Module nodes of the module graph -- are one of: -- -- * A regular Haskell source module -- * A hi-boot source module -- data ModSummary = ModSummary { ms_mod :: Module, -- ^ Identity of the module ms_hsc_src :: HscSource, -- ^ The module source either plain Haskell, hs-boot, or hsig ms_location :: ModLocation, -- ^ Location of the various files belonging to the module ms_hs_hash :: Fingerprint, -- ^ Content hash of source file ms_obj_date :: Maybe UTCTime, -- ^ Timestamp of object, if we have one ms_dyn_obj_date :: !(Maybe UTCTime), -- ^ Timestamp of dynamic object, if we have one ms_iface_date :: Maybe UTCTime, -- ^ Timestamp of hi file, if we have one -- See Note [When source is considered modified] and #9243 ms_hie_date :: Maybe UTCTime, -- ^ Timestamp of hie file, if we have one ms_srcimps :: [(PkgQual, Located ModuleName)], -- FIXME: source imports are never from an external package, why do we allow PkgQual? -- ^ Source imports of the module ms_textual_imps :: [(PkgQual, Located ModuleName)], -- ^ Non-source imports of the module from the module *text* ms_ghc_prim_import :: !Bool, -- ^ Whether the special module GHC.Prim was imported explicitly ms_parsed_mod :: Maybe HsParsedModule, -- ^ The parsed, nonrenamed source, if we have it. This is also -- used to support "inline module syntax" in Backpack files. ms_hspp_file :: FilePath, -- ^ Filename of preprocessed source file ms_hspp_opts :: DynFlags, -- ^ Cached flags from @OPTIONS@, @INCLUDE@ and @LANGUAGE@ -- pragmas in the modules source code ms_hspp_buf :: Maybe StringBuffer -- ^ The actual preprocessed source, if we have it } ms_unitid :: ModSummary -> UnitId ms_unitid = toUnitId . moduleUnit . ms_mod ms_installed_mod :: ModSummary -> InstalledModule ms_installed_mod = fst . getModuleInstantiation . ms_mod ms_mod_name :: ModSummary -> ModuleName ms_mod_name = moduleName . ms_mod -- | Textual imports, plus plugin imports but not SOURCE imports. ms_imps :: ModSummary -> [(PkgQual, Located ModuleName)] ms_imps ms = ms_textual_imps ms ++ ms_plugin_imps ms -- | Plugin imports ms_plugin_imps :: ModSummary -> [(PkgQual, Located ModuleName)] ms_plugin_imps ms = map ((NoPkgQual,) . noLoc) (pluginModNames (ms_hspp_opts ms)) -- | All of the (possibly) home module imports from the given list that is to -- say, each of these module names could be a home import if an appropriately -- named file existed. (This is in contrast to package qualified imports, which -- are guaranteed not to be home imports.) home_imps :: [(PkgQual, Located ModuleName)] -> [(PkgQual, Located ModuleName)] home_imps imps = filter (maybe_home . fst) imps where maybe_home NoPkgQual = True maybe_home (ThisPkg _) = True maybe_home (OtherPkg _) = False -- | Like 'ms_home_imps', but for SOURCE imports. ms_home_srcimps :: ModSummary -> ([Located ModuleName]) -- [] here because source imports can only refer to the current package. ms_home_srcimps = map snd . home_imps . ms_srcimps -- | All of the (possibly) home module imports from a -- 'ModSummary'; that is to say, each of these module names -- could be a home import if an appropriately named file -- existed. (This is in contrast to package qualified -- imports, which are guaranteed not to be home imports.) ms_home_imps :: ModSummary -> ([(PkgQual, Located ModuleName)]) ms_home_imps = home_imps . ms_imps -- The ModLocation contains both the original source filename and the -- filename of the cleaned-up source file after all preprocessing has been -- done. The point is that the summariser will have to cpp/unlit/whatever -- all files anyway, and there's no point in doing this twice -- just -- park the result in a temp file, put the name of it in the location, -- and let @compile@ read from that file on the way back up. -- The ModLocation is stable over successive up-sweeps in GHCi, wheres -- the ms_hs_hash and imports can, of course, change msHsFilePath, msDynHiFilePath, msHiFilePath, msObjFilePath, msDynObjFilePath :: ModSummary -> FilePath msHsFilePath ms = expectJust "msHsFilePath" (ml_hs_file (ms_location ms)) msHiFilePath ms = ml_hi_file (ms_location ms) msDynHiFilePath ms = ml_dyn_hi_file (ms_location ms) msObjFilePath ms = ml_obj_file (ms_location ms) msDynObjFilePath ms = ml_dyn_obj_file (ms_location ms) msHsFileOsPath, msDynHiFileOsPath, msHiFileOsPath, msObjFileOsPath, msDynObjFileOsPath :: ModSummary -> OsPath msHsFileOsPath ms = expectJust "msHsFilePath" (ml_hs_file_ospath (ms_location ms)) msHiFileOsPath ms = ml_hi_file_ospath (ms_location ms) msDynHiFileOsPath ms = ml_dyn_hi_file_ospath (ms_location ms) msObjFileOsPath ms = ml_obj_file_ospath (ms_location ms) msDynObjFileOsPath ms = ml_dyn_obj_file_ospath (ms_location ms) -- | Did this 'ModSummary' originate from a hs-boot file? isBootSummary :: ModSummary -> IsBootInterface isBootSummary ms = if ms_hsc_src ms == HsBootFile then IsBoot else NotBoot ms_mnwib :: ModSummary -> ModuleNameWithIsBoot ms_mnwib ms = GWIB (ms_mod_name ms) (isBootSummary ms) -- | Returns the dependencies of the ModSummary s. msDeps :: ModSummary -> ([(PkgQual, GenWithIsBoot (Located ModuleName))]) msDeps s = [ (NoPkgQual, d) | m <- ms_home_srcimps s , d <- [ GWIB { gwib_mod = m, gwib_isBoot = IsBoot } ] ] ++ [ (pkg, (GWIB { gwib_mod = m, gwib_isBoot = NotBoot })) | (pkg, m) <- ms_imps s ] instance Outputable ModSummary where ppr ms = sep [text "ModSummary {", nest 3 (sep [text "ms_hs_hash = " <> text (show (ms_hs_hash ms)), text "ms_mod =" <+> ppr (ms_mod ms) <> text (hscSourceString (ms_hsc_src ms)) <> comma, text "unit =" <+> ppr (ms_unitid ms), text "ms_textual_imps =" <+> ppr (ms_textual_imps ms), text "ms_srcimps =" <+> ppr (ms_srcimps ms)]), char '}' ] -- | Find the first target in the provided list which matches the specified -- 'ModSummary'. findTarget :: ModSummary -> [Target] -> Maybe Target findTarget ms ts = case filter (matches ms) ts of [] -> Nothing (t:_) -> Just t where summary `matches` Target { targetId = TargetModule m, targetUnitId = unitId } = ms_mod_name summary == m && ms_unitid summary == unitId summary `matches` Target { targetId = TargetFile f _, targetUnitId = unitid } | Just f' <- ml_hs_file (ms_location summary) = f == f' && ms_unitid summary == unitid _ `matches` _ = False ghc-lib-parser-9.12.2.20250421/compiler/GHC/Unit/Module/Status.hs0000644000000000000000000000310707346545000021672 0ustar0000000000000000module GHC.Unit.Module.Status ( HscBackendAction(..), HscRecompStatus (..) ) where import GHC.Prelude import GHC.Unit import GHC.Unit.Module.ModGuts import GHC.Unit.Module.ModIface import GHC.Utils.Fingerprint import GHC.Utils.Outputable import GHC.Unit.Home.ModInfo -- | Status of a module in incremental compilation data HscRecompStatus -- | Nothing to do because code already exists. = HscUpToDate ModIface HomeModLinkable -- | Recompilation of module, or update of interface is required. Optionally -- pass the old interface hash to avoid updating the existing interface when -- it has not changed. | HscRecompNeeded (Maybe Fingerprint) -- | Action to perform in backend compilation data HscBackendAction -- | Update the boot and signature file results. = HscUpdate ModIface -- | Recompile this module. | HscRecomp { hscs_guts :: CgGuts -- ^ Information for the code generator. , hscs_mod_location :: !ModLocation -- ^ Module info , hscs_partial_iface :: !PartialModIface -- ^ Partial interface , hscs_old_iface_hash :: !(Maybe Fingerprint) -- ^ Old interface hash for this compilation, if an old interface file -- exists. Pass to `hscMaybeWriteIface` when writing the interface to -- avoid updating the existing interface when the interface isn't -- changed. } instance Outputable HscBackendAction where ppr (HscUpdate mi) = text "Update:" <+> (ppr (mi_module mi)) ppr (HscRecomp _ ml _mi _mf) = text "Recomp:" <+> ppr ml ghc-lib-parser-9.12.2.20250421/compiler/GHC/Unit/Module/Warnings.hs0000644000000000000000000003267007346545000022206 0ustar0000000000000000{-# LANGUAGE DataKinds #-} {-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE DeriveGeneric #-} {-# LANGUAGE DerivingStrategies #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE GeneralizedNewtypeDeriving #-} {-# LANGUAGE UndecidableInstances #-} {-# LANGUAGE StandaloneDeriving #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE TypeFamilies #-} -- | Warnings for a module module GHC.Unit.Module.Warnings ( WarningCategory(..) , mkWarningCategory , defaultWarningCategory , validWarningCategory , InWarningCategory(..) , fromWarningCategory , WarningCategorySet , emptyWarningCategorySet , completeWarningCategorySet , nullWarningCategorySet , elemWarningCategorySet , insertWarningCategorySet , deleteWarningCategorySet , Warnings (..) , WarningTxt (..) , LWarningTxt , DeclWarnOccNames , ExportWarnNames , warningTxtCategory , warningTxtMessage , warningTxtSame , pprWarningTxtForMsg , emptyWarn , mkIfaceDeclWarnCache , mkIfaceExportWarnCache , emptyIfaceWarnCache , insertWarnDecls , insertWarnExports ) where import GHC.Prelude import GHC.Data.FastString (FastString, mkFastString, unpackFS) import GHC.Types.SourceText import GHC.Types.Name.Occurrence import GHC.Types.Name.Env import GHC.Types.Name (Name) import GHC.Types.SrcLoc import GHC.Types.Unique import GHC.Types.Unique.Set import GHC.Hs.Doc import GHC.Hs.Extension import GHC.Parser.Annotation import GHC.Utils.Outputable import GHC.Utils.Binary import GHC.Unicode import Language.Haskell.Syntax.Extension import Data.Data import Data.List (isPrefixOf) import GHC.Generics ( Generic ) import Control.DeepSeq {- Note [Warning categories] ~~~~~~~~~~~~~~~~~~~~~~~~~ See GHC Proposal 541 for the design of the warning categories feature: https://github.com/ghc-proposals/ghc-proposals/blob/master/proposals/0541-warning-pragmas-with-categories.rst A WARNING pragma may be annotated with a category such as "x-partial" written after the 'in' keyword, like this: {-# WARNING in "x-partial" head "This function is partial..." #-} This is represented by the 'Maybe (Located WarningCategory)' field in 'WarningTxt'. The parser will accept an arbitrary string as the category name, then the renamer (in 'rnWarningTxt') will check it contains only valid characters, so we can generate a nicer error message than a parse error. The corresponding warnings can then be controlled with the -Wx-partial, -Wno-x-partial, -Werror=x-partial and -Wwarn=x-partial flags. Such a flag is distinguished from an 'unrecognisedWarning' by the flag parser testing 'validWarningCategory'. The 'x-' prefix means we can still usually report an unrecognised warning where the user has made a mistake. A DEPRECATED pragma may not have a user-defined category, and is always treated as belonging to the special category 'deprecations'. Similarly, a WARNING pragma without a category belongs to the 'deprecations' category. Thus the '-Wdeprecations' flag will enable all of the following: {-# WARNING in "deprecations" foo "This function is deprecated..." #-} {-# WARNING foo "This function is deprecated..." #-} {-# DEPRECATED foo "This function is deprecated..." #-} The '-Wwarnings-deprecations' flag is supported for backwards compatibility purposes as being equivalent to '-Wdeprecations'. The '-Wextended-warnings' warning group collects together all warnings with user-defined categories, so they can be enabled or disabled collectively. Moreover they are treated as being part of other warning groups such as '-Wdefault' (see 'warningGroupIncludesExtendedWarnings'). 'DynFlags' and 'DiagOpts' each contain a set of enabled and a set of fatal warning categories, just as they do for the finite enumeration of 'WarningFlag's built in to GHC. These are represented as 'WarningCategorySet's to allow for the possibility of them being infinite. -} data InWarningCategory = InWarningCategory { iwc_in :: !(EpToken "in"), iwc_st :: !SourceText, iwc_wc :: (LocatedE WarningCategory) } deriving Data fromWarningCategory :: WarningCategory -> InWarningCategory fromWarningCategory wc = InWarningCategory noAnn NoSourceText (noLocA wc) -- See Note [Warning categories] newtype WarningCategory = WarningCategory FastString deriving stock Data deriving newtype (Binary, Eq, Outputable, Show, Uniquable, NFData) mkWarningCategory :: FastString -> WarningCategory mkWarningCategory = WarningCategory -- | The @deprecations@ category is used for all DEPRECATED pragmas and for -- WARNING pragmas that do not specify a category. defaultWarningCategory :: WarningCategory defaultWarningCategory = mkWarningCategory (mkFastString "deprecations") -- | Is this warning category allowed to appear in user-defined WARNING pragmas? -- It must either be the known category @deprecations@, or be a custom category -- that begins with @x-@ and contains only valid characters (letters, numbers, -- apostrophes and dashes). validWarningCategory :: WarningCategory -> Bool validWarningCategory cat@(WarningCategory c) = cat == defaultWarningCategory || ("x-" `isPrefixOf` s && all is_allowed s) where s = unpackFS c is_allowed c = isAlphaNum c || c == '\'' || c == '-' -- | A finite or infinite set of warning categories. -- -- Unlike 'WarningFlag', there are (in principle) infinitely many warning -- categories, so we cannot necessarily enumerate all of them. However the set -- is constructed by adding or removing categories one at a time, so we can -- represent it as either a finite set of categories, or a cofinite set (where -- we store the complement). data WarningCategorySet = FiniteWarningCategorySet (UniqSet WarningCategory) -- ^ The set of warning categories is the given finite set. | CofiniteWarningCategorySet (UniqSet WarningCategory) -- ^ The set of warning categories is infinite, so the constructor stores -- its (finite) complement. -- | The empty set of warning categories. emptyWarningCategorySet :: WarningCategorySet emptyWarningCategorySet = FiniteWarningCategorySet emptyUniqSet -- | The set consisting of all possible warning categories. completeWarningCategorySet :: WarningCategorySet completeWarningCategorySet = CofiniteWarningCategorySet emptyUniqSet -- | Is this set empty? nullWarningCategorySet :: WarningCategorySet -> Bool nullWarningCategorySet (FiniteWarningCategorySet s) = isEmptyUniqSet s nullWarningCategorySet CofiniteWarningCategorySet{} = False -- | Does this warning category belong to the set? elemWarningCategorySet :: WarningCategory -> WarningCategorySet -> Bool elemWarningCategorySet c (FiniteWarningCategorySet s) = c `elementOfUniqSet` s elemWarningCategorySet c (CofiniteWarningCategorySet s) = not (c `elementOfUniqSet` s) -- | Insert an element into a warning category set. insertWarningCategorySet :: WarningCategory -> WarningCategorySet -> WarningCategorySet insertWarningCategorySet c (FiniteWarningCategorySet s) = FiniteWarningCategorySet (addOneToUniqSet s c) insertWarningCategorySet c (CofiniteWarningCategorySet s) = CofiniteWarningCategorySet (delOneFromUniqSet s c) -- | Delete an element from a warning category set. deleteWarningCategorySet :: WarningCategory -> WarningCategorySet -> WarningCategorySet deleteWarningCategorySet c (FiniteWarningCategorySet s) = FiniteWarningCategorySet (delOneFromUniqSet s c) deleteWarningCategorySet c (CofiniteWarningCategorySet s) = CofiniteWarningCategorySet (addOneToUniqSet s c) type LWarningTxt pass = XRec pass (WarningTxt pass) -- | Warning Text -- -- reason/explanation from a WARNING or DEPRECATED pragma data WarningTxt pass = WarningTxt (Maybe (LocatedE InWarningCategory)) -- ^ Warning category attached to this WARNING pragma, if any; -- see Note [Warning categories] SourceText [LocatedE (WithHsDocIdentifiers StringLiteral pass)] | DeprecatedTxt SourceText [LocatedE (WithHsDocIdentifiers StringLiteral pass)] deriving Generic -- | To which warning category does this WARNING or DEPRECATED pragma belong? -- See Note [Warning categories]. warningTxtCategory :: WarningTxt pass -> WarningCategory warningTxtCategory (WarningTxt (Just (L _ (InWarningCategory _ _ (L _ cat)))) _ _) = cat warningTxtCategory _ = defaultWarningCategory -- | The message that the WarningTxt was specified to output warningTxtMessage :: WarningTxt p -> [LocatedE (WithHsDocIdentifiers StringLiteral p)] warningTxtMessage (WarningTxt _ _ m) = m warningTxtMessage (DeprecatedTxt _ m) = m -- | True if the 2 WarningTxts have the same category and messages warningTxtSame :: WarningTxt p1 -> WarningTxt p2 -> Bool warningTxtSame w1 w2 = warningTxtCategory w1 == warningTxtCategory w2 && literal_message w1 == literal_message w2 && same_type where literal_message :: WarningTxt p -> [StringLiteral] literal_message = map (hsDocString . unLoc) . warningTxtMessage same_type | DeprecatedTxt {} <- w1, DeprecatedTxt {} <- w2 = True | WarningTxt {} <- w1, WarningTxt {} <- w2 = True | otherwise = False deriving instance Eq InWarningCategory deriving instance (Eq (IdP pass)) => Eq (WarningTxt pass) deriving instance (Data pass, Data (IdP pass)) => Data (WarningTxt pass) type instance Anno (WarningTxt (GhcPass pass)) = SrcSpanAnnP instance Outputable InWarningCategory where ppr (InWarningCategory _ _ wt) = text "in" <+> doubleQuotes (ppr wt) instance Outputable (WarningTxt pass) where ppr (WarningTxt mcat lsrc ws) = case lsrc of NoSourceText -> pp_ws ws SourceText src -> ftext src <+> ctg_doc <+> pp_ws ws <+> text "#-}" where ctg_doc = maybe empty (\ctg -> ppr ctg) mcat ppr (DeprecatedTxt lsrc ds) = case lsrc of NoSourceText -> pp_ws ds SourceText src -> ftext src <+> pp_ws ds <+> text "#-}" pp_ws :: [LocatedE (WithHsDocIdentifiers StringLiteral pass)] -> SDoc pp_ws [l] = ppr $ unLoc l pp_ws ws = text "[" <+> vcat (punctuate comma (map (ppr . unLoc) ws)) <+> text "]" pprWarningTxtForMsg :: WarningTxt p -> SDoc pprWarningTxtForMsg (WarningTxt _ _ ws) = doubleQuotes (vcat (map (ftext . sl_fs . hsDocString . unLoc) ws)) pprWarningTxtForMsg (DeprecatedTxt _ ds) = text "Deprecated:" <+> doubleQuotes (vcat (map (ftext . sl_fs . hsDocString . unLoc) ds)) -- | Warning information from a module data Warnings pass = WarnSome (DeclWarnOccNames pass) -- ^ Names deprecated (may be empty) (ExportWarnNames pass) -- ^ Exports deprecated (may be empty) | WarnAll (WarningTxt pass) -- ^ Whole module deprecated -- For the module-specific names only an OccName is needed because -- (1) a deprecation always applies to a binding -- defined in the module in which the deprecation appears. -- (2) deprecations are only reported outside the defining module. -- this is important because, otherwise, if we saw something like -- -- {-# DEPRECATED f "" #-} -- f = ... -- h = f -- g = let f = undefined in f -- -- we'd need more information than an OccName to know to say something -- about the use of f in h but not the use of the locally bound f in g -- -- however, because we only report about deprecations from the outside, -- and a module can only export one value called f, -- an OccName suffices. -- -- this is in contrast with fixity declarations, where we need to map -- a Name to its fixity declaration. -- -- For export deprecations we need to know where the symbol comes from, since -- we need to be able to check if the deprecated export that was imported is -- the same thing as imported by another import, which would not trigger -- a deprecation message. -- | Deprecated declarations type DeclWarnOccNames pass = [(OccName, WarningTxt pass)] -- | Names that are deprecated as exports type ExportWarnNames pass = [(Name, WarningTxt pass)] deriving instance Eq (IdP pass) => Eq (Warnings pass) emptyWarn :: Warnings p emptyWarn = WarnSome [] [] -- | Constructs the cache for the 'mi_decl_warn_fn' field of a 'ModIface' mkIfaceDeclWarnCache :: Warnings p -> OccName -> Maybe (WarningTxt p) mkIfaceDeclWarnCache (WarnAll t) = \_ -> Just t mkIfaceDeclWarnCache (WarnSome vs _) = lookupOccEnv (mkOccEnv vs) -- | Constructs the cache for the 'mi_export_warn_fn' field of a 'ModIface' mkIfaceExportWarnCache :: Warnings p -> Name -> Maybe (WarningTxt p) mkIfaceExportWarnCache (WarnAll _) = const Nothing -- We do not want a double report of the module deprecation mkIfaceExportWarnCache (WarnSome _ ds) = lookupNameEnv (mkNameEnv ds) emptyIfaceWarnCache :: name -> Maybe (WarningTxt p) emptyIfaceWarnCache _ = Nothing insertWarnDecls :: Warnings p -- ^ Existing warnings -> [(OccName, WarningTxt p)] -- ^ New declaration deprecations -> Warnings p -- ^ Updated warnings insertWarnDecls ws@(WarnAll _) _ = ws insertWarnDecls (WarnSome wns wes) wns' = WarnSome (wns ++ wns') wes insertWarnExports :: Warnings p -- ^ Existing warnings -> [(Name, WarningTxt p)] -- ^ New export deprecations -> Warnings p -- ^ Updated warnings insertWarnExports ws@(WarnAll _) _ = ws insertWarnExports (WarnSome wns wes) wes' = WarnSome wns (wes ++ wes') ghc-lib-parser-9.12.2.20250421/compiler/GHC/Unit/Module/WholeCoreBindings.hs0000644000000000000000000004617207346545000023765 0ustar0000000000000000{-# LANGUAGE RecordWildCards #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE NoFieldSelectors #-} {-# LANGUAGE DuplicateRecordFields #-} module GHC.Unit.Module.WholeCoreBindings where import GHC.Cmm.CLabel import GHC.Driver.DynFlags (DynFlags (targetPlatform), initSDocContext) import GHC.ForeignSrcLang (ForeignSrcLang (..)) import GHC.Iface.Syntax import GHC.Prelude import GHC.Types.ForeignStubs import GHC.Unit.Module.Location import GHC.Unit.Types (Module) import GHC.Utils.Binary import GHC.Utils.Error (debugTraceMsg) import GHC.Utils.Logger (Logger) import GHC.Utils.Outputable import GHC.Utils.Panic (panic, pprPanic) import GHC.Utils.TmpFs import Control.DeepSeq (NFData (..)) import Data.Traversable (for) import Data.Word (Word8) import Data.Maybe (fromMaybe) import System.FilePath (takeExtension) {- Note [Interface Files with Core Definitions] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A interface file can optionally contain the definitions of all core bindings, this is enabled by the flag `-fwrite-if-simplified-core`. This provides everything needed in addition to the normal ModIface and ModDetails to restart compilation after typechecking to generate bytecode. The `wcb_bindings` field is stored in the normal interface file and the other fields populated whilst loading the interface file. The lifecycle of a WholeCoreBindings typically proceeds as follows: 1. The ModIface which contains mi_extra_decls is loaded from disk. A linkable is created (which is headed by the `CoreBindings` constructor). This is an unhydrated set of bindings which is currently unsuitable for linking, but at the point it is loaded, the ModIface hasn't been hydrated yet (See Note [Hydrating Modules]) either so the CoreBindings constructor allows the delaying of converting the WholeCoreBindings into a proper Linkable (if we ever do that). The CoreBindings constructor also allows us to convert the WholeCoreBindings into multiple different linkables if we so desired. 2. `initWholeCoreBindings` turns a WholeCoreBindings into a proper BCOs linkable. This step combines together all the necessary information from a ModIface, ModDetails and WholeCoreBindings in order to create the linkable. The linkable created is a "LazyBCOs" linkable, which was introduced just for initWholeCoreBindings, so that the bytecode can be generated lazily. Using the `BCOs` constructor directly here leads to the bytecode being forced too eagerly. 3. Then when bytecode is needed, the LazyBCOs value is inspected and unpacked and the linkable is used as before. The flag `-fwrite-if-simplified-core` determines whether the extra information is written to an interface file. The program which is written is the core bindings of the module after whatever simplification the user requested has been performed. So the simplified core bindings of the interface file agree with the optimisation level as reported by the interface file. The lifecycle differs beyond laziness depending on the provenance of a module. In all cases, the main consumer for interface bytecode is 'get_link_deps', which traverses a splice's or GHCi expression's dependencies and collects the needed build artifacts, which can be objects or bytecode, depending on the build settings. 1. In make mode, all eligible modules are part of the dependency graph. Their interfaces are loaded unconditionally and in dependency order by the compilation manager, and each module's bytecode is prepared before its dependents are compiled, in one of two ways: - If the interface file for a module is missing or out of sync with its source, it is recompiled and bytecode is generated directly and immediately, not involving 'WholeCoreBindings' (in 'runHscBackendPhase'). - If the interface file is up to date, no compilation is performed, and a lazy thunk generating bytecode from interface Core bindings is created in 'compileOne'', which will only be compiled if a downstream module contains a splice that depends on it, as described above. In both cases, the bytecode 'Linkable' is stored in a 'HomeModLinkable' in the Home Unit Graph, lazy or not. 2. In oneshot mode, which compiles individual modules without a shared home unit graph, a previously compiled module is not reprocessed as described for make mode above. When 'get_link_deps' encounters a dependency on a local module, it requests its bytecode from the External Package State, who loads the interface on-demand. Since the EPS stores interfaces for all package dependencies in addition to local modules in oneshot mode, it has a substantial memory footprint. We try to curtail that by extracting important data into specialized fields in the EPS, and retaining only a few fields of 'ModIface' by overwriting the others with bottom values. In order to avoid keeping around all of the interface's components needed for compiling bytecode, we instead store an IO action in 'eps_iface_bytecode'. When 'get_link_deps' evaluates this action, the result is not retained in the EPS, but stored in 'LoaderState', where it may eventually get evicted to free up the memory. This IO action retains the dehydrated Core bindings from the interface in its closure. Like the bytecode 'Linkable' stored in 'LoaderState', this is preferable to storing the intermediate representation as rehydrated Core bindings, since the latter have a significantly greater memory footprint. Note [Size of Interface Files with Core Definitions] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ How much overhead does `-fwrite-if-simplified-core` add to a typical interface file? As an experiment I compiled the `Cabal` library and `ghc` library (Aug 22) with | Project | .hi | .hi (fat) | .o | | --------| ---- | --------- | -- | | ghc | 32M | 68M | 127M | | Cabal | 3.2M | 9.8M | 14M | So the interface files gained in size but the end result was still smaller than the object files. -} data WholeCoreBindings = WholeCoreBindings { wcb_bindings :: [IfaceBindingX IfaceMaybeRhs IfaceTopBndrInfo] -- ^ serialised tidied core bindings. , wcb_module :: Module -- ^ The module which the bindings are for , wcb_mod_location :: ModLocation -- ^ The location where the sources reside. -- | Stubs for foreign declarations and files added via -- 'GHC.Internal.TH.Syntax.addForeignFilePath'. , wcb_foreign :: IfaceForeign } {- Note [Foreign stubs and TH bytecode linking] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Foreign declarations may introduce additional build products called "stubs" that contain wrappers for the exposed functions. For example, consider a foreign import of a C function named @main_loop@ from the file @bindings.h@ in the module @CLibrary@: @ foreign import capi "bindings.h main_loop" mainLoop :: IO Int @ GHC will generate a snippet of C code containing a wrapper: @ #include "bindings.h" HsInt ghczuwrapperZC0ZCmainZCCLibraryZCmainzuloop(void) {return main_loop();} @ Wrappers like these are generated as 'ForeignStubs' by the desugarer in 'dsForeign' and stored in the various @*Guts@ types; until they are compiled to temporary object files in 'runHscBackendPhase' during code generation and ultimately merged into the final object file for the module, @CLibrary.o@. This creates some problems with @-fprefer-byte-code@, which allows splices to execute bytecode instead of native code for dependencies that provide it. Usually, when some TH code depends on @CLibrary@, the linker would look for @CLibrary.o@ and load that before executing the splice, but with this flag, it will first attempt to load bytecode from @CLibrary.hi@ and compile it in-memory. Problem 1: Code for splices is loaded from interfaces in the shape of Core bindings (see 'WholeCoreBindings'), rather than from object files. Those Core bindings are intermediate build products that do not contain the module's stubs, since those are separated from the Haskell code before Core is generated and only compiled and linked into the final object when native code is generated. Therefore, stubs have to be stored separately in interface files. Unfortunately, the type 'ForeignStubs' contains 'CLabel', which is a huge type with several 'Unique's used mainly by C--. Luckily, the only constructor used for foreign stubs is 'ModuleLabel', which contains the name of a foreign declaration's initializer, if it has one. So we convert a 'CLabel' to 'CStubLabel' in 'encodeIfaceForeign' and store only the simplified data. Problem 2: Given module B, which contains a splice that executes code from module A, both in the home package, consider these different circumstances: 1. In make mode, both modules are recompiled 2. In make mode, only B is recompiled 3. In oneshot mode, B is compiled In case 1, 'runHscBackendPhase' directly generates bytecode from the 'CgGuts' that the main pipeline produced and stores it in the 'HomeModLinkable' that is one of its build products. The stubs are merged into a single object and added to the 'HomeModLinkable' in 'hscGenBackendPipeline'. In case 2, 'hscRecompStatus' short-circuits the pipeline while checking A, since the module is up to date. Nevertheless, it calls 'checkByteCode', which extracts Core bindings from A's interface and adds them to the 'HomeModLinkable'. No stubs are generated in this case, since the desugarer wasn't run! In both of these cases, 'compileOne'' proceeds to call 'initWholeCoreBindings', applied to the 'HomeModLinkable', to compile Core bindings (lazily) to bytecode, which is then written back to the 'HomeModLinkable'. If the 'HomeModLinkable' already contains bytecode (case 1), this is a no-op. Otherwise, the stub objects from the interface are compiled to objects in 'generateByteCode' and added to the 'HomeModLinkable' as well. Case 3 is not implemented yet (!13042). Problem 3: In all three cases, the final step before splice execution is linking. The function 'getLinkDeps' is responsible for assembling all of a splice's dependencies, looking up imported modules in the HPT and EPS, collecting all 'HomeModLinkable's and object files that it can find. However, since splices are executed in the interpreter, the 'Way' of the current build may differ from the interpreter's. For example, the current GHC invocation might be building a static binary, but the internal interpreter requires dynamic linking; or profiling might be enabled. To adapt to the interpreter's 'Way', 'getLinkDeps' substitutes all object files' extensions with that corresponding to that 'Way' – e.g. changing @.o@ to @.dyn_o@, which requires dependencies to be built with @-dynamic[-too]@, which in turn is enforced after downsweep in 'GHC.Driver.Make.enableCodeGenWhen'. This doesn't work for stub objects, though – they are compiled to temporary files with mismatching names, so simply switching out the suffix would refer to a nonexisting file. Even if that wasn't an issue, they are compiled for the session's 'Way', not its associated module's, so the dynamic variant wouldn't be available when building only static outputs. To mitigate this, we instead build foreign objects specially for the interpreter, updating the build flags in 'compile_for_interpreter' to use the interpreter's way. Problem 4: Foreign code may have dependencies on Haskell code. Both foreign exports and @StaticPointers@ produce stubs that contain @extern@ declarations of values referring to STG closures. When those stub objects are loaded, the undefined symbols need to be provided to the linker. I have no insight into how this works, and whether we could provide the memory address of a BCO as a ccall symbol while linking, so it's unclear at the moment what to do about this. In addition to that, those objects would also have to be loaded _after_ bytecode, and therefore 'DotO' would have to be marked additionally to separate them from those that are loaded before. If mutual dependencies between BCOs and foreign code are possible, this will be much more diffcult though. Problem 5: TH allows splices to add arbitrary files as additional linker inputs. Using the method `qAddForeignFilePath`, a foreign source file or a precompiled object file can be added to the current modules dependencies. These files will be processed by the pipeline and linked into the final object. Since the files may be temporarily created from a string, we have to read their contents in 'encodeIfaceForeign' and store them in the interface as well, and write them to temporary files when loading bytecode in 'decodeIfaceForeign'. -} -- | Wrapper for avoiding a dependency on 'Binary' and 'NFData' in 'CLabel'. newtype IfaceCLabel = IfaceCLabel CStubLabel instance Binary IfaceCLabel where get bh = do csl_is_initializer <- get bh csl_module <- get bh csl_name <- get bh pure (IfaceCLabel CStubLabel {csl_is_initializer, csl_module, csl_name}) put_ bh (IfaceCLabel CStubLabel {csl_is_initializer, csl_module, csl_name}) = do put_ bh csl_is_initializer put_ bh csl_module put_ bh csl_name instance NFData IfaceCLabel where rnf (IfaceCLabel CStubLabel {csl_is_initializer, csl_module, csl_name}) = rnf csl_is_initializer `seq` rnf csl_module `seq` rnf csl_name instance Outputable IfaceCLabel where ppr (IfaceCLabel l) = ppr l -- | Simplified encoding of 'GHC.Types.ForeignStubs.ForeignStubs' for interface -- serialization. -- -- See Note [Foreign stubs and TH bytecode linking] data IfaceCStubs = IfaceCStubs { header :: String, source :: String, initializers :: [IfaceCLabel], finalizers :: [IfaceCLabel] } instance Outputable IfaceCStubs where ppr IfaceCStubs {header, source, initializers, finalizers} = vcat [ hang (text "header:") 2 (vcat (text <$> lines header)), hang (text "source:") 2 (vcat (text <$> lines source)), hang (text "initializers:") 2 (ppr initializers), hang (text "finalizers:") 2 (ppr finalizers) ] -- | 'Binary' 'put_' for 'ForeignSrcLang'. binary_put_ForeignSrcLang :: WriteBinHandle -> ForeignSrcLang -> IO () binary_put_ForeignSrcLang bh lang = put_ @Word8 bh $ case lang of LangC -> 0 LangCxx -> 1 LangObjc -> 2 LangObjcxx -> 3 LangAsm -> 4 LangJs -> 5 RawObject -> 6 -- | 'Binary' 'get' for 'ForeignSrcLang'. binary_get_ForeignSrcLang :: ReadBinHandle -> IO ForeignSrcLang binary_get_ForeignSrcLang bh = do b <- getByte bh pure $ case b of 0 -> LangC 1 -> LangCxx 2 -> LangObjc 3 -> LangObjcxx 4 -> LangAsm 5 -> LangJs 6 -> RawObject _ -> panic "invalid Binary value for ForeignSrcLang" instance Binary IfaceCStubs where get bh = do header <- get bh source <- get bh initializers <- get bh finalizers <- get bh pure IfaceCStubs {..} put_ bh IfaceCStubs {..} = do put_ bh header put_ bh source put_ bh initializers put_ bh finalizers instance NFData IfaceCStubs where rnf IfaceCStubs {..} = rnf header `seq` rnf source `seq` rnf initializers `seq` rnf finalizers -- | A source file added from Template Haskell using 'qAddForeignFilePath', for -- storage in interfaces. -- -- See Note [Foreign stubs and TH bytecode linking] data IfaceForeignFile = IfaceForeignFile { -- | The language is specified by the user. lang :: ForeignSrcLang, -- | The contents of the file, which will be written to a temporary file -- when loaded from an interface. source :: String, -- | The extension used by the user is preserved, to avoid confusing -- external tools with an unexpected @.c@ file or similar. extension :: FilePath } instance Outputable IfaceForeignFile where ppr IfaceForeignFile {lang, source} = hang (text (show lang) <> colon) 2 (vcat (text <$> lines source)) instance Binary IfaceForeignFile where get bh = do lang <- binary_get_ForeignSrcLang bh source <- get bh extension <- get bh pure IfaceForeignFile {lang, source, extension} put_ bh IfaceForeignFile {lang, source, extension} = do binary_put_ForeignSrcLang bh lang put_ bh source put_ bh extension instance NFData IfaceForeignFile where rnf IfaceForeignFile {lang, source, extension} = lang `seq` rnf source `seq` rnf extension data IfaceForeign = IfaceForeign { stubs :: Maybe IfaceCStubs, files :: [IfaceForeignFile] } instance Outputable IfaceForeign where ppr IfaceForeign {stubs, files} = hang (text "stubs:") 2 (maybe (text "empty") ppr stubs) $$ vcat (ppr <$> files) emptyIfaceForeign :: IfaceForeign emptyIfaceForeign = IfaceForeign {stubs = Nothing, files = []} -- | Convert foreign stubs and foreign files to a format suitable for writing to -- interfaces. -- -- See Note [Foreign stubs and TH bytecode linking] encodeIfaceForeign :: Logger -> DynFlags -> ForeignStubs -> [(ForeignSrcLang, FilePath)] -> IO IfaceForeign encodeIfaceForeign logger dflags foreign_stubs lang_paths = do files <- read_foreign_files stubs <- encode_stubs foreign_stubs let iff = IfaceForeign {stubs, files} debugTraceMsg logger 3 $ hang (text "Encoding foreign data for iface:") 2 (ppr iff) pure iff where -- We can't just store the paths, since files may have been generated with -- GHC session lifetime in 'GHC.Internal.TH.Syntax.addForeignSource'. read_foreign_files = for lang_paths $ \ (lang, path) -> do source <- readFile path pure IfaceForeignFile {lang, source, extension = takeExtension path} encode_stubs = \case NoStubs -> pure Nothing ForeignStubs (CHeader header) (CStub source inits finals) -> pure $ Just IfaceCStubs { header = render header, source = render source, initializers = encode_label <$> inits, finalizers = encode_label <$> finals } encode_label clabel = fromMaybe (invalid_label clabel) (IfaceCLabel <$> cStubLabel clabel) invalid_label clabel = pprPanic "-fwrite-if-simplified-core is incompatible with this foreign stub:" (pprCLabel (targetPlatform dflags) clabel) render = renderWithContext (initSDocContext dflags PprCode) -- | Decode serialized foreign stubs and foreign files. -- -- See Note [Foreign stubs and TH bytecode linking] decodeIfaceForeign :: Logger -> TmpFs -> TempDir -> IfaceForeign -> IO (ForeignStubs, [(ForeignSrcLang, FilePath)]) decodeIfaceForeign logger tmpfs tmp_dir iff@IfaceForeign {stubs, files} = do debugTraceMsg logger 3 $ hang (text "Decoding foreign data from iface:") 2 (ppr iff) lang_paths <- for files $ \ IfaceForeignFile {lang, source, extension} -> do f <- newTempName logger tmpfs tmp_dir TFL_GhcSession extension writeFile f source pure (lang, f) pure (maybe NoStubs decode_stubs stubs, lang_paths) where decode_stubs IfaceCStubs {header, source, initializers, finalizers} = ForeignStubs (CHeader (text header)) (CStub (text source) (labels initializers) (labels finalizers)) labels ls = [fromCStubLabel l | IfaceCLabel l <- ls] instance Binary IfaceForeign where get bh = do stubs <- get bh files <- get bh pure IfaceForeign {stubs, files} put_ bh IfaceForeign {stubs, files} = do put_ bh stubs put_ bh files instance NFData IfaceForeign where rnf IfaceForeign {stubs, files} = rnf stubs `seq` rnf files ghc-lib-parser-9.12.2.20250421/compiler/GHC/Unit/Parser.hs0000644000000000000000000000300107346545000020407 0ustar0000000000000000-- | Parsers for unit/module identifiers module GHC.Unit.Parser ( parseUnit , parseUnitId , parseHoleyModule , parseModSubst ) where import GHC.Prelude import GHC.Unit.Types import GHC.Data.FastString import qualified Text.ParserCombinators.ReadP as Parse import Text.ParserCombinators.ReadP (ReadP, (<++)) import Data.Char (isAlphaNum) import Language.Haskell.Syntax.Module.Name (ModuleName, parseModuleName) parseUnit :: ReadP Unit parseUnit = parseVirtUnitId <++ parseDefUnitId where parseVirtUnitId = do uid <- parseUnitId insts <- parseModSubst return (mkVirtUnit uid insts) parseDefUnitId = do s <- parseUnitId return (RealUnit (Definite s)) parseUnitId :: ReadP UnitId parseUnitId = do s <- Parse.munch1 (\c -> isAlphaNum c || c `elem` "-_.+") return (UnitId (mkFastString s)) parseHoleyModule :: ReadP Module parseHoleyModule = parseModuleVar <++ parseModule where parseModuleVar = do _ <- Parse.char '<' modname <- parseModuleName _ <- Parse.char '>' return (Module HoleUnit modname) parseModule = do uid <- parseUnit _ <- Parse.char ':' modname <- parseModuleName return (Module uid modname) parseModSubst :: ReadP [(ModuleName, Module)] parseModSubst = Parse.between (Parse.char '[') (Parse.char ']') . flip Parse.sepBy (Parse.char ',') $ do k <- parseModuleName _ <- Parse.char '=' v <- parseHoleyModule return (k, v) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Unit/Ppr.hs0000644000000000000000000000213607346545000017724 0ustar0000000000000000-- | Unit identifier pretty-printing module GHC.Unit.Ppr ( UnitPprInfo (..) ) where import GHC.Prelude import GHC.Data.FastString import GHC.Utils.Outputable import Data.Version -- | Subset of UnitInfo: just enough to pretty-print a unit-id -- -- Instead of printing the unit-id which may contain a hash, we print: -- package-version:componentname -- data UnitPprInfo = UnitPprInfo { unitPprId :: FastString -- ^ Identifier , unitPprPackageName :: String -- ^ Source package name , unitPprPackageVersion :: Version -- ^ Source package version , unitPprComponentName :: Maybe String -- ^ Component name } instance Outputable UnitPprInfo where ppr pprinfo = getPprDebug $ \debug -> if debug then ftext (unitPprId pprinfo) else text $ mconcat [ unitPprPackageName pprinfo , case unitPprPackageVersion pprinfo of Version [] [] -> "" version -> "-" ++ showVersion version , case unitPprComponentName pprinfo of Nothing -> "" Just cname -> ":" ++ cname ] ghc-lib-parser-9.12.2.20250421/compiler/GHC/Unit/State.hs0000644000000000000000000027535707346545000020264 0ustar0000000000000000-- (c) The University of Glasgow, 2006 {-# LANGUAGE LambdaCase #-} -- | Unit manipulation module GHC.Unit.State ( module GHC.Unit.Info, -- * Reading the package config, and processing cmdline args UnitState(..), PreloadUnitClosure, UnitDatabase (..), UnitErr (..), emptyUnitState, initUnits, readUnitDatabases, readUnitDatabase, getUnitDbRefs, resolveUnitDatabase, listUnitInfo, -- * Querying the package config UnitInfoMap, lookupUnit, lookupUnit', unsafeLookupUnit, lookupUnitId, lookupUnitId', unsafeLookupUnitId, lookupPackageName, resolvePackageImport, improveUnit, searchPackageId, listVisibleModuleNames, lookupModuleInAllUnits, lookupModuleWithSuggestions, lookupModulePackage, lookupPluginModuleWithSuggestions, requirementMerges, LookupResult(..), ModuleSuggestion(..), ModuleOrigin(..), UnusableUnit(..), UnusableUnitReason(..), pprReason, closeUnitDeps, closeUnitDeps', mayThrowUnitErr, -- * Module hole substitution ShHoleSubst, renameHoleUnit, renameHoleModule, renameHoleUnit', renameHoleModule', instUnitToUnit, instModuleToModule, -- * Pretty-printing pprFlag, pprUnits, pprUnitsSimple, pprUnitIdForUser, pprUnitInfoForUser, pprModuleMap, pprWithUnitState, -- * Utils unwireUnit) where import GHC.Prelude import GHC.Driver.DynFlags import GHC.Platform import GHC.Platform.Ways import GHC.Unit.Database import GHC.Unit.Info import GHC.Unit.Ppr import GHC.Unit.Types import GHC.Unit.Module import GHC.Unit.Home import GHC.Types.Unique.FM import GHC.Types.Unique.DFM import GHC.Types.Unique.Set import GHC.Types.Unique.DSet import GHC.Types.Unique.Map import GHC.Types.Unique import GHC.Types.PkgQual import GHC.Utils.Misc import GHC.Utils.Panic import GHC.Utils.Outputable as Outputable import GHC.Data.Maybe import System.Environment ( getEnv ) import GHC.Data.FastString import qualified GHC.Data.ShortText as ST import GHC.Utils.Logger import GHC.Utils.Error import GHC.Utils.Exception import System.Directory import System.FilePath as FilePath import Control.Monad import Data.Graph (stronglyConnComp, SCC(..)) import Data.Char ( toUpper ) import Data.List ( intersperse, partition, sortBy, isSuffixOf, sortOn ) import Data.Set (Set) import Data.Monoid (First(..)) import qualified Data.Semigroup as Semigroup import qualified Data.Set as Set import Control.Applicative -- --------------------------------------------------------------------------- -- The Unit state -- The unit state is computed by 'initUnits', and kept in HscEnv. -- It is influenced by various command-line flags: -- -- * @-package \@ and @-package-id \@ cause @\@ to become exposed. -- If @-hide-all-packages@ was not specified, these commands also cause -- all other packages with the same name to become hidden. -- -- * @-hide-package \@ causes @\@ to become hidden. -- -- * (there are a few more flags, check below for their semantics) -- -- The unit state has the following properties. -- -- * Let @exposedUnits@ be the set of packages thus exposed. -- Let @depExposedUnits@ be the transitive closure from @exposedUnits@ of -- their dependencies. -- -- * When searching for a module from a preload import declaration, -- only the exposed modules in @exposedUnits@ are valid. -- -- * When searching for a module from an implicit import, all modules -- from @depExposedUnits@ are valid. -- -- * When linking in a compilation manager mode, we link in packages the -- program depends on (the compiler knows this list by the -- time it gets to the link step). Also, we link in all packages -- which were mentioned with preload @-package@ flags on the command-line, -- or are a transitive dependency of same, or are \"base\"\/\"rts\". -- The reason for this is that we might need packages which don't -- contain any Haskell modules, and therefore won't be discovered -- by the normal mechanism of dependency tracking. -- Notes on DLLs -- ~~~~~~~~~~~~~ -- When compiling module A, which imports module B, we need to -- know whether B will be in the same DLL as A. -- If it's in the same DLL, we refer to B_f_closure -- If it isn't, we refer to _imp__B_f_closure -- When compiling A, we record in B's Module value whether it's -- in a different DLL, by setting the DLL flag. -- | Given a module name, there may be multiple ways it came into scope, -- possibly simultaneously. This data type tracks all the possible ways -- it could have come into scope. Warning: don't use the record functions, -- they're partial! data ModuleOrigin = -- | Module is hidden, and thus never will be available for import. -- (But maybe the user didn't realize), so we'll still keep track -- of these modules.) ModHidden -- | Module is unavailable because the unit is unusable. | ModUnusable !UnusableUnit -- | Module is public, and could have come from some places. | ModOrigin { -- | @Just False@ means that this module is in -- someone's @exported-modules@ list, but that package is hidden; -- @Just True@ means that it is available; @Nothing@ means neither -- applies. fromOrigUnit :: Maybe Bool -- | Is the module available from a reexport of an exposed package? -- There could be multiple. , fromExposedReexport :: [UnitInfo] -- | Is the module available from a reexport of a hidden package? , fromHiddenReexport :: [UnitInfo] -- | Did the module export come from a package flag? (ToDo: track -- more information. , fromPackageFlag :: Bool } -- | A unusable unit module origin data UnusableUnit = UnusableUnit { uuUnit :: !Unit -- ^ Unusable unit , uuReason :: !UnusableUnitReason -- ^ Reason , uuIsReexport :: !Bool -- ^ Is the "module" a reexport? } instance Outputable ModuleOrigin where ppr ModHidden = text "hidden module" ppr (ModUnusable _) = text "unusable module" ppr (ModOrigin e res rhs f) = sep (punctuate comma ( (case e of Nothing -> [] Just False -> [text "hidden package"] Just True -> [text "exposed package"]) ++ (if null res then [] else [text "reexport by" <+> sep (map (ppr . mkUnit) res)]) ++ (if null rhs then [] else [text "hidden reexport by" <+> sep (map (ppr . mkUnit) res)]) ++ (if f then [text "package flag"] else []) )) -- | Smart constructor for a module which is in @exposed-modules@. Takes -- as an argument whether or not the defining package is exposed. fromExposedModules :: Bool -> ModuleOrigin fromExposedModules e = ModOrigin (Just e) [] [] False -- | Smart constructor for a module which is in @reexported-modules@. Takes -- as an argument whether or not the reexporting package is exposed, and -- also its 'UnitInfo'. fromReexportedModules :: Bool -> UnitInfo -> ModuleOrigin fromReexportedModules True pkg = ModOrigin Nothing [pkg] [] False fromReexportedModules False pkg = ModOrigin Nothing [] [pkg] False -- | Smart constructor for a module which was bound by a package flag. fromFlag :: ModuleOrigin fromFlag = ModOrigin Nothing [] [] True instance Semigroup ModuleOrigin where x@(ModOrigin e res rhs f) <> y@(ModOrigin e' res' rhs' f') = ModOrigin (g e e') (res ++ res') (rhs ++ rhs') (f || f') where g (Just b) (Just b') | b == b' = Just b | otherwise = pprPanic "ModOrigin: package both exposed/hidden" $ text "x: " <> ppr x $$ text "y: " <> ppr y g Nothing x = x g x Nothing = x x <> y = pprPanic "ModOrigin: module origin mismatch" $ text "x: " <> ppr x $$ text "y: " <> ppr y instance Monoid ModuleOrigin where mempty = ModOrigin Nothing [] [] False mappend = (Semigroup.<>) -- | Is the name from the import actually visible? (i.e. does it cause -- ambiguity, or is it only relevant when we're making suggestions?) originVisible :: ModuleOrigin -> Bool originVisible ModHidden = False originVisible (ModUnusable _) = False originVisible (ModOrigin b res _ f) = b == Just True || not (null res) || f -- | Are there actually no providers for this module? This will never occur -- except when we're filtering based on package imports. originEmpty :: ModuleOrigin -> Bool originEmpty (ModOrigin Nothing [] [] False) = True originEmpty _ = False type PreloadUnitClosure = UniqSet UnitId -- | 'UniqFM' map from 'Unit' to a 'UnitVisibility'. type VisibilityMap = UniqMap Unit UnitVisibility -- | 'UnitVisibility' records the various aspects of visibility of a particular -- 'Unit'. data UnitVisibility = UnitVisibility { uv_expose_all :: Bool -- ^ Should all modules in exposed-modules should be dumped into scope? , uv_renamings :: [(ModuleName, ModuleName)] -- ^ Any custom renamings that should bring extra 'ModuleName's into -- scope. , uv_package_name :: First FastString -- ^ The package name associated with the 'Unit'. This is used -- to implement legacy behavior where @-package foo-0.1@ implicitly -- hides any packages named @foo@ , uv_requirements :: UniqMap ModuleName (Set InstantiatedModule) -- ^ The signatures which are contributed to the requirements context -- from this unit ID. , uv_explicit :: Maybe PackageArg -- ^ Whether or not this unit was explicitly brought into scope, -- as opposed to implicitly via the 'exposed' fields in the -- package database (when @-hide-all-packages@ is not passed.) } instance Outputable UnitVisibility where ppr (UnitVisibility { uv_expose_all = b, uv_renamings = rns, uv_package_name = First mb_pn, uv_requirements = reqs, uv_explicit = explicit }) = ppr (b, rns, mb_pn, reqs, explicit) instance Semigroup UnitVisibility where uv1 <> uv2 = UnitVisibility { uv_expose_all = uv_expose_all uv1 || uv_expose_all uv2 , uv_renamings = uv_renamings uv1 ++ uv_renamings uv2 , uv_package_name = mappend (uv_package_name uv1) (uv_package_name uv2) , uv_requirements = plusUniqMap_C Set.union (uv_requirements uv2) (uv_requirements uv1) , uv_explicit = uv_explicit uv1 <|> uv_explicit uv2 } instance Monoid UnitVisibility where mempty = UnitVisibility { uv_expose_all = False , uv_renamings = [] , uv_package_name = First Nothing , uv_requirements = emptyUniqMap , uv_explicit = Nothing } mappend = (Semigroup.<>) -- | Unit configuration data UnitConfig = UnitConfig { unitConfigPlatformArchOS :: !ArchOS -- ^ Platform arch and OS , unitConfigWays :: !Ways -- ^ Ways to use , unitConfigAllowVirtual :: !Bool -- ^ Allow virtual units -- ^ Do we allow the use of virtual units instantiated on-the-fly (see -- Note [About units] in GHC.Unit). This should only be true when we are -- type-checking an indefinite unit (not producing any code). , unitConfigProgramName :: !String -- ^ Name of the compiler (e.g. "GHC", "GHCJS"). Used to fetch environment -- variables such as "GHC[JS]_PACKAGE_PATH". , unitConfigGlobalDB :: !FilePath -- ^ Path to global DB , unitConfigGHCDir :: !FilePath -- ^ Main GHC dir: contains settings, etc. , unitConfigDBName :: !String -- ^ User DB name (e.g. "package.conf.d") , unitConfigAutoLink :: ![UnitId] -- ^ Units to link automatically (e.g. base, rts) , unitConfigDistrustAll :: !Bool -- ^ Distrust all units by default , unitConfigHideAll :: !Bool -- ^ Hide all units by default , unitConfigHideAllPlugins :: !Bool -- ^ Hide all plugins units by default , unitConfigDBCache :: Maybe [UnitDatabase UnitId] -- ^ Cache of databases to use, in the order they were specified on the -- command line (later databases shadow earlier ones). -- If Nothing, databases will be found using `unitConfigFlagsDB`. -- command-line flags , unitConfigFlagsDB :: [PackageDBFlag] -- ^ Unit databases flags , unitConfigFlagsExposed :: [PackageFlag] -- ^ Exposed units , unitConfigFlagsIgnored :: [IgnorePackageFlag] -- ^ Ignored units , unitConfigFlagsTrusted :: [TrustFlag] -- ^ Trusted units , unitConfigFlagsPlugins :: [PackageFlag] -- ^ Plugins exposed units , unitConfigHomeUnits :: Set.Set UnitId } initUnitConfig :: DynFlags -> Maybe [UnitDatabase UnitId] -> Set.Set UnitId -> UnitConfig initUnitConfig dflags cached_dbs home_units = let !hu_id = homeUnitId_ dflags !hu_instanceof = homeUnitInstanceOf_ dflags !hu_instantiations = homeUnitInstantiations_ dflags autoLink | not (gopt Opt_AutoLinkPackages dflags) = [] -- By default we add ghc-internal & rts to the preload units (when they are -- found in the unit database) except when we are building them | otherwise = filter (hu_id /=) [ghcInternalUnitId, rtsUnitId] -- if the home unit is indefinite, it means we are type-checking it only -- (not producing any code). Hence we can use virtual units instantiated -- on-the-fly. See Note [About units] in GHC.Unit allow_virtual_units = case (hu_instanceof, hu_instantiations) of (Just u, is) -> u == hu_id && any (isHoleModule . snd) is _ -> False in UnitConfig { unitConfigPlatformArchOS = platformArchOS (targetPlatform dflags) , unitConfigProgramName = programName dflags , unitConfigWays = ways dflags , unitConfigAllowVirtual = allow_virtual_units , unitConfigGlobalDB = globalPackageDatabasePath dflags , unitConfigGHCDir = topDir dflags , unitConfigDBName = "package.conf.d" , unitConfigAutoLink = autoLink , unitConfigDistrustAll = gopt Opt_DistrustAllPackages dflags , unitConfigHideAll = gopt Opt_HideAllPackages dflags , unitConfigHideAllPlugins = gopt Opt_HideAllPluginPackages dflags , unitConfigDBCache = cached_dbs , unitConfigFlagsDB = map (offsetPackageDb (workingDirectory dflags)) $ packageDBFlags dflags , unitConfigFlagsExposed = packageFlags dflags , unitConfigFlagsIgnored = ignorePackageFlags dflags , unitConfigFlagsTrusted = trustFlags dflags , unitConfigFlagsPlugins = pluginPackageFlags dflags , unitConfigHomeUnits = home_units } where offsetPackageDb :: Maybe FilePath -> PackageDBFlag -> PackageDBFlag offsetPackageDb (Just offset) (PackageDB (PkgDbPath p)) | isRelative p = PackageDB (PkgDbPath (offset p)) offsetPackageDb _ p = p -- | Map from 'ModuleName' to a set of module providers (i.e. a 'Module' and -- its 'ModuleOrigin'). -- -- NB: the set is in fact a 'Map Module ModuleOrigin', probably to keep only one -- origin for a given 'Module' type ModuleNameProvidersMap = UniqMap ModuleName (UniqMap Module ModuleOrigin) data UnitState = UnitState { -- | A mapping of 'Unit' to 'UnitInfo'. This list is adjusted -- so that only valid units are here. 'UnitInfo' reflects -- what was stored *on disk*, except for the 'trusted' flag, which -- is adjusted at runtime. (In particular, some units in this map -- may have the 'exposed' flag be 'False'.) unitInfoMap :: UnitInfoMap, -- | The set of transitively reachable units according -- to the explicitly provided command line arguments. -- A fully instantiated VirtUnit may only be replaced by a RealUnit from -- this set. -- See Note [VirtUnit to RealUnit improvement] preloadClosure :: PreloadUnitClosure, -- | A mapping of 'PackageName' to 'UnitId'. If several units have the same -- package name (e.g. different instantiations), then we return one of them... -- This is used when users refer to packages in Backpack includes. -- And also to resolve package qualifiers with the PackageImports extension. packageNameMap :: UniqFM PackageName UnitId, -- | A mapping from database unit keys to wired in unit ids. wireMap :: UniqMap UnitId UnitId, -- | A mapping from wired in unit ids to unit keys from the database. unwireMap :: UniqMap UnitId UnitId, -- | The units we're going to link in eagerly. This list -- should be in reverse dependency order; that is, a unit -- is always mentioned before the units it depends on. preloadUnits :: [UnitId], -- | Units which we explicitly depend on (from a command line flag). -- We'll use this to generate version macros and the unused packages warning. The -- original flag which was used to bring the unit into scope is recorded for the -- -Wunused-packages warning. explicitUnits :: [(Unit, Maybe PackageArg)], homeUnitDepends :: [UnitId], -- | This is a full map from 'ModuleName' to all modules which may possibly -- be providing it. These providers may be hidden (but we'll still want -- to report them in error messages), or it may be an ambiguous import. moduleNameProvidersMap :: !ModuleNameProvidersMap, -- | A map, like 'moduleNameProvidersMap', but controlling plugin visibility. pluginModuleNameProvidersMap :: !ModuleNameProvidersMap, -- | A map saying, for each requirement, what interfaces must be merged -- together when we use them. For example, if our dependencies -- are @p[A=\]@ and @q[A=\,B=r[C=\]:B]@, then the interfaces -- to merge for A are @p[A=\]:A@, @q[A=\,B=r[C=\]:B]:A@ -- and @r[C=\]:C@. -- -- There's an entry in this map for each hole in our home library. requirementContext :: UniqMap ModuleName [InstantiatedModule], -- | Indicate if we can instantiate units on-the-fly. -- -- This should only be true when we are type-checking an indefinite unit. -- See Note [About units] in GHC.Unit. allowVirtualUnits :: !Bool } emptyUnitState :: UnitState emptyUnitState = UnitState { unitInfoMap = emptyUniqMap, preloadClosure = emptyUniqSet, packageNameMap = emptyUFM, wireMap = emptyUniqMap, unwireMap = emptyUniqMap, preloadUnits = [], explicitUnits = [], homeUnitDepends = [], moduleNameProvidersMap = emptyUniqMap, pluginModuleNameProvidersMap = emptyUniqMap, requirementContext = emptyUniqMap, allowVirtualUnits = False } -- | Unit database data UnitDatabase unit = UnitDatabase { unitDatabasePath :: FilePath , unitDatabaseUnits :: [GenUnitInfo unit] } instance Outputable u => Outputable (UnitDatabase u) where ppr (UnitDatabase fp _u) = text "DB:" <+> text fp type UnitInfoMap = UniqMap UnitId UnitInfo -- | Find the unit we know about with the given unit, if any lookupUnit :: UnitState -> Unit -> Maybe UnitInfo lookupUnit pkgs = lookupUnit' (allowVirtualUnits pkgs) (unitInfoMap pkgs) (preloadClosure pkgs) -- | A more specialized interface, which doesn't require a 'UnitState' (so it -- can be used while we're initializing 'DynFlags') -- -- Parameters: -- * a boolean specifying whether or not to look for on-the-fly renamed interfaces -- * a 'UnitInfoMap' -- * a 'PreloadUnitClosure' lookupUnit' :: Bool -> UnitInfoMap -> PreloadUnitClosure -> Unit -> Maybe UnitInfo lookupUnit' allowOnTheFlyInst pkg_map closure u = case u of HoleUnit -> error "Hole unit" RealUnit i -> lookupUniqMap pkg_map (unDefinite i) VirtUnit i | allowOnTheFlyInst -> -- lookup UnitInfo of the indefinite unit to be instantiated and -- instantiate it on-the-fly fmap (renameUnitInfo pkg_map closure (instUnitInsts i)) (lookupUniqMap pkg_map (instUnitInstanceOf i)) | otherwise -> -- lookup UnitInfo by virtual UnitId. This is used to find indefinite -- units. Even if they are real, installed units, they can't use the -- `RealUnit` constructor (it is reserved for definite units) so we use -- the `VirtUnit` constructor. lookupUniqMap pkg_map (virtualUnitId i) -- | Find the unit we know about with the given unit id, if any lookupUnitId :: UnitState -> UnitId -> Maybe UnitInfo lookupUnitId state uid = lookupUnitId' (unitInfoMap state) uid -- | Find the unit we know about with the given unit id, if any lookupUnitId' :: UnitInfoMap -> UnitId -> Maybe UnitInfo lookupUnitId' db uid = lookupUniqMap db uid -- | Looks up the given unit in the unit state, panicking if it is not found unsafeLookupUnit :: HasDebugCallStack => UnitState -> Unit -> UnitInfo unsafeLookupUnit state u = case lookupUnit state u of Just info -> info Nothing -> pprPanic "unsafeLookupUnit" (ppr u) -- | Looks up the given unit id in the unit state, panicking if it is not found unsafeLookupUnitId :: HasDebugCallStack => UnitState -> UnitId -> UnitInfo unsafeLookupUnitId state uid = case lookupUnitId state uid of Just info -> info Nothing -> pprPanic "unsafeLookupUnitId" (ppr uid) -- | Find the unit we know about with the given package name (e.g. @foo@), if any -- (NB: there might be a locally defined unit name which overrides this) -- This function is unsafe to use in general because it doesn't respect package -- visibility. lookupPackageName :: UnitState -> PackageName -> Maybe UnitId lookupPackageName pkgstate n = lookupUFM (packageNameMap pkgstate) n -- | Search for units with a given package ID (e.g. \"foo-0.1\") searchPackageId :: UnitState -> PackageId -> [UnitInfo] searchPackageId pkgstate pid = filter ((pid ==) . unitPackageId) (listUnitInfo pkgstate) -- | Find the UnitId which an import qualified by a package import comes from. -- Compared to 'lookupPackageName', this function correctly accounts for visibility, -- renaming and thinning. resolvePackageImport :: UnitState -> ModuleName -> PackageName -> Maybe UnitId resolvePackageImport unit_st mn pn = do -- 1. Find all modules providing the ModuleName (this accounts for visibility/thinning etc) providers <- filterUniqMap originVisible <$> lookupUniqMap (moduleNameProvidersMap unit_st) mn -- 2. Get the UnitIds of the candidates let candidates_uid = concatMap to_uid $ sortOn fst $ nonDetUniqMapToList providers -- 3. Get the package names of the candidates let candidates_units = map (\ui -> ((unitPackageName ui), unitId ui)) $ mapMaybe (\uid -> lookupUniqMap (unitInfoMap unit_st) uid) candidates_uid -- 4. Check to see if the PackageName helps us disambiguate any candidates. lookup pn candidates_units where -- Get the UnitId from which a visible identifier is from to_uid :: (Module, ModuleOrigin) -> [UnitId] to_uid (mod, ModOrigin mo re_exps _ _) = case mo of -- Available directly, but also potentially from re-exports Just True -> (toUnitId (moduleUnit mod)) : map unitId re_exps -- Just available from these re-exports _ -> map unitId re_exps to_uid _ = [] -- | Create a Map UnitId UnitInfo -- -- For each instantiated unit, we add two map keys: -- * the real unit id -- * the virtual unit id made from its instantiation -- -- We do the same thing for fully indefinite units (which are "instantiated" -- with module holes). -- mkUnitInfoMap :: [UnitInfo] -> UnitInfoMap mkUnitInfoMap infos = foldl' add emptyUniqMap infos where mkVirt p = virtualUnitId (mkInstantiatedUnit (unitInstanceOf p) (unitInstantiations p)) add pkg_map p | not (null (unitInstantiations p)) = addToUniqMap (addToUniqMap pkg_map (mkVirt p) p) (unitId p) p | otherwise = addToUniqMap pkg_map (unitId p) p -- | Get a list of entries from the unit database. NB: be careful with -- this function, although all units in this map are "visible", this -- does not imply that the exposed-modules of the unit are available -- (they may have been thinned or renamed). listUnitInfo :: UnitState -> [UnitInfo] listUnitInfo state = nonDetEltsUniqMap (unitInfoMap state) -- ---------------------------------------------------------------------------- -- Loading the unit db files and building up the unit state -- | Read the unit database files, and sets up various internal tables of -- unit information, according to the unit-related flags on the -- command-line (@-package@, @-hide-package@ etc.) -- -- 'initUnits' can be called again subsequently after updating the -- 'packageFlags' field of the 'DynFlags', and it will update the -- 'unitState' in 'DynFlags'. initUnits :: Logger -> DynFlags -> Maybe [UnitDatabase UnitId] -> Set.Set UnitId -> IO ([UnitDatabase UnitId], UnitState, HomeUnit, Maybe PlatformConstants) initUnits logger dflags cached_dbs home_units = do let forceUnitInfoMap (state, _) = unitInfoMap state `seq` () (unit_state,dbs) <- withTiming logger (text "initializing unit database") forceUnitInfoMap $ mkUnitState logger (initUnitConfig dflags cached_dbs home_units) putDumpFileMaybe logger Opt_D_dump_mod_map "Module Map" FormatText (updSDocContext (\ctx -> ctx {sdocLineLength = 200}) $ pprModuleMap (moduleNameProvidersMap unit_state)) let home_unit = mkHomeUnit unit_state (homeUnitId_ dflags) (homeUnitInstanceOf_ dflags) (homeUnitInstantiations_ dflags) -- Try to find platform constants -- -- See Note [Platform constants] in GHC.Platform mconstants <- if homeUnitId_ dflags == rtsUnitId then do -- we're building the RTS! Lookup GhclibDerivedConstants.h in the include paths lookupPlatformConstants (includePathsGlobal (includePaths dflags)) else -- lookup the GhclibDerivedConstants.h header bundled with the RTS unit. We -- don't fail if we can't find the RTS unit as it can be a valid (but -- uncommon) case, e.g. building a C utility program (not depending on the -- RTS) before building the RTS. In any case, we will fail later on if we -- really need to use the platform constants but they have not been loaded. case lookupUnitId unit_state rtsUnitId of Nothing -> return Nothing Just info -> lookupPlatformConstants (fmap ST.unpack (unitIncludeDirs info)) return (dbs,unit_state,home_unit,mconstants) mkHomeUnit :: UnitState -> UnitId -- ^ Home unit id -> Maybe UnitId -- ^ Home unit instance of -> [(ModuleName, Module)] -- ^ Home unit instantiations -> HomeUnit mkHomeUnit unit_state hu_id hu_instanceof hu_instantiations_ = let -- Some wired units can be used to instantiate the home unit. We need to -- replace their unit keys with their wired unit ids. wmap = wireMap unit_state hu_instantiations = map (fmap (upd_wired_in_mod wmap)) hu_instantiations_ in case (hu_instanceof, hu_instantiations) of (Nothing,[]) -> DefiniteHomeUnit hu_id Nothing (Nothing, _) -> throwGhcException $ CmdLineError ("Use of -instantiated-with requires -this-component-id") (Just _, []) -> throwGhcException $ CmdLineError ("Use of -this-component-id requires -instantiated-with") (Just u, is) -- detect fully indefinite units: all their instantiations are hole -- modules and the home unit id is the same as the instantiating unit -- id (see Note [About units] in GHC.Unit) | all (isHoleModule . snd) is && u == hu_id -> IndefiniteHomeUnit u is -- otherwise it must be that we (fully) instantiate an indefinite unit -- to make it definite. -- TODO: error when the unit is partially instantiated?? | otherwise -> DefiniteHomeUnit hu_id (Just (u, is)) -- ----------------------------------------------------------------------------- -- Reading the unit database(s) readUnitDatabases :: Logger -> UnitConfig -> IO [UnitDatabase UnitId] readUnitDatabases logger cfg = do conf_refs <- getUnitDbRefs cfg confs <- liftM catMaybes $ mapM (resolveUnitDatabase cfg) conf_refs mapM (readUnitDatabase logger cfg) confs getUnitDbRefs :: UnitConfig -> IO [PkgDbRef] getUnitDbRefs cfg = do let system_conf_refs = [UserPkgDb, GlobalPkgDb] e_pkg_path <- tryIO (getEnv $ map toUpper (unitConfigProgramName cfg) ++ "_PACKAGE_PATH") let base_conf_refs = case e_pkg_path of Left _ -> system_conf_refs Right path | Just (xs, x) <- snocView path, isSearchPathSeparator x -> map PkgDbPath (splitSearchPath xs) ++ system_conf_refs | otherwise -> map PkgDbPath (splitSearchPath path) -- Apply the package DB-related flags from the command line to get the -- final list of package DBs. -- -- Notes on ordering: -- * The list of flags is reversed (later ones first) -- * We work with the package DB list in "left shadows right" order -- * and finally reverse it at the end, to get "right shadows left" -- return $ reverse (foldr doFlag base_conf_refs (unitConfigFlagsDB cfg)) where doFlag (PackageDB p) dbs = p : dbs doFlag NoUserPackageDB dbs = filter isNotUser dbs doFlag NoGlobalPackageDB dbs = filter isNotGlobal dbs doFlag ClearPackageDBs _ = [] isNotUser UserPkgDb = False isNotUser _ = True isNotGlobal GlobalPkgDb = False isNotGlobal _ = True -- | Return the path of a package database from a 'PkgDbRef'. Return 'Nothing' -- when the user database filepath is expected but the latter doesn't exist. -- -- NB: This logic is reimplemented in Cabal, so if you change it, -- make sure you update Cabal. (Or, better yet, dump it in the -- compiler info so Cabal can use the info.) resolveUnitDatabase :: UnitConfig -> PkgDbRef -> IO (Maybe FilePath) resolveUnitDatabase cfg GlobalPkgDb = return $ Just (unitConfigGlobalDB cfg) resolveUnitDatabase cfg UserPkgDb = runMaybeT $ do dir <- versionedAppDir (unitConfigProgramName cfg) (unitConfigPlatformArchOS cfg) let pkgconf = dir unitConfigDBName cfg exist <- tryMaybeT $ doesDirectoryExist pkgconf if exist then return pkgconf else mzero resolveUnitDatabase _ (PkgDbPath name) = return $ Just name readUnitDatabase :: Logger -> UnitConfig -> FilePath -> IO (UnitDatabase UnitId) readUnitDatabase logger cfg conf_file = do isdir <- doesDirectoryExist conf_file proto_pkg_configs <- if isdir then readDirStyleUnitInfo conf_file else do isfile <- doesFileExist conf_file if isfile then do mpkgs <- tryReadOldFileStyleUnitInfo case mpkgs of Just pkgs -> return pkgs Nothing -> throwGhcExceptionIO $ InstallationError $ "ghc no longer supports single-file style package " ++ "databases (" ++ conf_file ++ ") use 'ghc-pkg init' to create the database with " ++ "the correct format." else throwGhcExceptionIO $ InstallationError $ "can't find a package database at " ++ conf_file let -- Fix #16360: remove trailing slash from conf_file before calculating pkgroot conf_file' = dropTrailingPathSeparator conf_file top_dir = unitConfigGHCDir cfg pkgroot = takeDirectory conf_file' pkg_configs1 = map (mungeUnitInfo top_dir pkgroot . mapUnitInfo (\(UnitKey x) -> UnitId x) . mkUnitKeyInfo) proto_pkg_configs -- return $ UnitDatabase conf_file' pkg_configs1 where readDirStyleUnitInfo conf_dir = do let filename = conf_dir "package.cache" cache_exists <- doesFileExist filename if cache_exists then do debugTraceMsg logger 2 $ text "Using binary package database:" <+> text filename readPackageDbForGhc filename else do -- If there is no package.cache file, we check if the database is not -- empty by inspecting if the directory contains any .conf file. If it -- does, something is wrong and we fail. Otherwise we assume that the -- database is empty. debugTraceMsg logger 2 $ text "There is no package.cache in" <+> text conf_dir <> text ", checking if the database is empty" db_empty <- all (not . isSuffixOf ".conf") <$> getDirectoryContents conf_dir if db_empty then do debugTraceMsg logger 3 $ text "There are no .conf files in" <+> text conf_dir <> text ", treating" <+> text "package database as empty" return [] else throwGhcExceptionIO $ InstallationError $ "there is no package.cache in " ++ conf_dir ++ " even though package database is not empty" -- Single-file style package dbs have been deprecated for some time, but -- it turns out that Cabal was using them in one place. So this is a -- workaround to allow older Cabal versions to use this newer ghc. -- We check if the file db contains just "[]" and if so, we look for a new -- dir-style db in conf_file.d/, ie in a dir next to the given file. -- We cannot just replace the file with a new dir style since Cabal still -- assumes it's a file and tries to overwrite with 'writeFile'. -- ghc-pkg also cooperates with this workaround. tryReadOldFileStyleUnitInfo = do content <- readFile conf_file `catchIO` \_ -> return "" if take 2 content == "[]" then do let conf_dir = conf_file <.> "d" direxists <- doesDirectoryExist conf_dir if direxists then do debugTraceMsg logger 2 (text "Ignoring old file-style db and trying:" <+> text conf_dir) liftM Just (readDirStyleUnitInfo conf_dir) else return (Just []) -- ghc-pkg will create it when it's updated else return Nothing distrustAllUnits :: [UnitInfo] -> [UnitInfo] distrustAllUnits pkgs = map distrust pkgs where distrust pkg = pkg{ unitIsTrusted = False } mungeUnitInfo :: FilePath -> FilePath -> UnitInfo -> UnitInfo mungeUnitInfo top_dir pkgroot = mungeDynLibFields . mungeUnitInfoPaths (ST.pack top_dir) (ST.pack pkgroot) mungeDynLibFields :: UnitInfo -> UnitInfo mungeDynLibFields pkg = pkg { unitLibraryDynDirs = case unitLibraryDynDirs pkg of [] -> unitLibraryDirs pkg ds -> ds } -- ----------------------------------------------------------------------------- -- Modify our copy of the unit database based on trust flags, -- -trust and -distrust. applyTrustFlag :: UnitPrecedenceMap -> UnusableUnits -> [UnitInfo] -> TrustFlag -> MaybeErr UnitErr [UnitInfo] applyTrustFlag prec_map unusable pkgs flag = case flag of -- we trust all matching packages. Maybe should only trust first one? -- and leave others the same or set them untrusted TrustPackage str -> case selectPackages prec_map (PackageArg str) pkgs unusable of Left ps -> Failed (TrustFlagErr flag ps) Right (ps,qs) -> Succeeded (map trust ps ++ qs) where trust p = p {unitIsTrusted=True} DistrustPackage str -> case selectPackages prec_map (PackageArg str) pkgs unusable of Left ps -> Failed (TrustFlagErr flag ps) Right (ps,qs) -> Succeeded (distrustAllUnits ps ++ qs) applyPackageFlag :: UnitPrecedenceMap -> UnitInfoMap -> PreloadUnitClosure -> UnusableUnits -> Bool -- if False, if you expose a package, it implicitly hides -- any previously exposed packages with the same name -> [UnitInfo] -> VisibilityMap -- Initially exposed -> PackageFlag -- flag to apply -> MaybeErr UnitErr VisibilityMap -- Now exposed applyPackageFlag prec_map pkg_map closure unusable no_hide_others pkgs vm flag = case flag of ExposePackage _ arg (ModRenaming b rns) -> case findPackages prec_map pkg_map closure arg pkgs unusable of Left ps -> Failed (PackageFlagErr flag ps) Right (p:_) -> Succeeded vm' where n = fsPackageName p -- If a user says @-unit-id p[A=]@, this imposes -- a requirement on us: whatever our signature A is, -- it must fulfill all of p[A=]:A's requirements. -- This method is responsible for computing what our -- inherited requirements are. reqs | UnitIdArg orig_uid <- arg = collectHoles orig_uid | otherwise = emptyUniqMap collectHoles uid = case uid of HoleUnit -> emptyUniqMap RealUnit {} -> emptyUniqMap -- definite units don't have holes VirtUnit indef -> let local = [ unitUniqMap (moduleName mod) (Set.singleton $ Module indef mod_name) | (mod_name, mod) <- instUnitInsts indef , isHoleModule mod ] recurse = [ collectHoles (moduleUnit mod) | (_, mod) <- instUnitInsts indef ] in plusUniqMapListWith Set.union $ local ++ recurse uv = UnitVisibility { uv_expose_all = b , uv_renamings = rns , uv_package_name = First (Just n) , uv_requirements = reqs , uv_explicit = Just arg } vm' = addToUniqMap_C mappend vm_cleared (mkUnit p) uv -- In the old days, if you said `ghc -package p-0.1 -package p-0.2` -- (or if p-0.1 was registered in the pkgdb as exposed: True), -- the second package flag would override the first one and you -- would only see p-0.2 in exposed modules. This is good for -- usability. -- -- However, with thinning and renaming (or Backpack), there might be -- situations where you legitimately want to see two versions of a -- package at the same time, and this behavior would make it -- impossible to do so. So we decided that if you pass -- -hide-all-packages, this should turn OFF the overriding behavior -- where an exposed package hides all other packages with the same -- name. This should not affect Cabal at all, which only ever -- exposes one package at a time. -- -- NB: Why a variable no_hide_others? We have to apply this logic to -- -plugin-package too, and it's more consistent if the switch in -- behavior is based off of -- -hide-all-packages/-hide-all-plugin-packages depending on what -- flag is in question. vm_cleared | no_hide_others = vm -- NB: renamings never clear | (_:_) <- rns = vm | otherwise = filterWithKeyUniqMap (\k uv -> k == mkUnit p || First (Just n) /= uv_package_name uv) vm _ -> panic "applyPackageFlag" HidePackage str -> case findPackages prec_map pkg_map closure (PackageArg str) pkgs unusable of Left ps -> Failed (PackageFlagErr flag ps) Right ps -> Succeeded $ foldl' delFromUniqMap vm (map mkUnit ps) -- | Like 'selectPackages', but doesn't return a list of unmatched -- packages. Furthermore, any packages it returns are *renamed* -- if the 'UnitArg' has a renaming associated with it. findPackages :: UnitPrecedenceMap -> UnitInfoMap -> PreloadUnitClosure -> PackageArg -> [UnitInfo] -> UnusableUnits -> Either [(UnitInfo, UnusableUnitReason)] [UnitInfo] findPackages prec_map pkg_map closure arg pkgs unusable = let ps = mapMaybe (finder arg) pkgs in if null ps then Left (mapMaybe (\(x,y) -> finder arg x >>= \x' -> return (x',y)) (nonDetEltsUniqMap unusable)) else Right (sortByPreference prec_map ps) where finder (PackageArg str) p = if matchingStr str p then Just p else Nothing finder (UnitIdArg uid) p = case uid of RealUnit (Definite iuid) | iuid == unitId p -> Just p VirtUnit inst | instUnitInstanceOf inst == unitId p -> Just (renameUnitInfo pkg_map closure (instUnitInsts inst) p) _ -> Nothing selectPackages :: UnitPrecedenceMap -> PackageArg -> [UnitInfo] -> UnusableUnits -> Either [(UnitInfo, UnusableUnitReason)] ([UnitInfo], [UnitInfo]) selectPackages prec_map arg pkgs unusable = let matches = matching arg (ps,rest) = partition matches pkgs in if null ps then Left (filter (matches.fst) (nonDetEltsUniqMap unusable)) else Right (sortByPreference prec_map ps, rest) -- | Rename a 'UnitInfo' according to some module instantiation. renameUnitInfo :: UnitInfoMap -> PreloadUnitClosure -> [(ModuleName, Module)] -> UnitInfo -> UnitInfo renameUnitInfo pkg_map closure insts conf = let hsubst = listToUFM insts smod = renameHoleModule' pkg_map closure hsubst new_insts = map (\(k,v) -> (k,smod v)) (unitInstantiations conf) in conf { unitInstantiations = new_insts, unitExposedModules = map (\(mod_name, mb_mod) -> (mod_name, fmap smod mb_mod)) (unitExposedModules conf) } -- A package named on the command line can either include the -- version, or just the name if it is unambiguous. matchingStr :: String -> UnitInfo -> Bool matchingStr str p = str == unitPackageIdString p || str == unitPackageNameString p matchingId :: UnitId -> UnitInfo -> Bool matchingId uid p = uid == unitId p matching :: PackageArg -> UnitInfo -> Bool matching (PackageArg str) = matchingStr str matching (UnitIdArg (RealUnit (Definite uid))) = matchingId uid matching (UnitIdArg _) = \_ -> False -- TODO: warn in this case -- | This sorts a list of packages, putting "preferred" packages first. -- See 'compareByPreference' for the semantics of "preference". sortByPreference :: UnitPrecedenceMap -> [UnitInfo] -> [UnitInfo] sortByPreference prec_map = sortBy (flip (compareByPreference prec_map)) -- | Returns 'GT' if @pkg@ should be preferred over @pkg'@ when picking -- which should be "active". Here is the order of preference: -- -- 1. First, prefer the latest version -- 2. If the versions are the same, prefer the package that -- came in the latest package database. -- -- Pursuant to #12518, we could change this policy to, for example, remove -- the version preference, meaning that we would always prefer the units -- in later unit database. compareByPreference :: UnitPrecedenceMap -> UnitInfo -> UnitInfo -> Ordering compareByPreference prec_map pkg pkg' = case comparing unitPackageVersion pkg pkg' of GT -> GT EQ | Just prec <- lookupUniqMap prec_map (unitId pkg) , Just prec' <- lookupUniqMap prec_map (unitId pkg') -- Prefer the unit from the later DB flag (i.e., higher -- precedence) -> compare prec prec' | otherwise -> EQ LT -> LT comparing :: Ord a => (t -> a) -> t -> t -> Ordering comparing f a b = f a `compare` f b pprFlag :: PackageFlag -> SDoc pprFlag flag = case flag of HidePackage p -> text "-hide-package " <> text p ExposePackage doc _ _ -> text doc pprTrustFlag :: TrustFlag -> SDoc pprTrustFlag flag = case flag of TrustPackage p -> text "-trust " <> text p DistrustPackage p -> text "-distrust " <> text p -- ----------------------------------------------------------------------------- -- Wired-in units -- -- See Note [Wired-in units] in GHC.Unit.Types type WiringMap = UniqMap UnitId UnitId findWiredInUnits :: Logger -> UnitPrecedenceMap -> [UnitInfo] -- database -> VisibilityMap -- info on what units are visible -- for wired in selection -> IO ([UnitInfo], -- unit database updated for wired in WiringMap) -- map from unit id to wired identity findWiredInUnits logger prec_map pkgs vis_map = do -- Now we must find our wired-in units, and rename them to -- their canonical names (eg. base-1.0 ==> base), as described -- in Note [Wired-in units] in GHC.Unit.Types let matches :: UnitInfo -> UnitId -> Bool pc `matches` pid = unitPackageName pc == PackageName (unitIdFS pid) -- find which package corresponds to each wired-in package -- delete any other packages with the same name -- update the package and any dependencies to point to the new -- one. -- -- When choosing which package to map to a wired-in package -- name, we try to pick the latest version of exposed packages. -- However, if there are no exposed wired in packages available -- (e.g. -hide-all-packages was used), we can't bail: we *have* -- to assign a package for the wired-in package: so we try again -- with hidden packages included to (and pick the latest -- version). -- -- You can also override the default choice by using -ignore-package: -- this works even when there is no exposed wired in package -- available. -- findWiredInUnit :: [UnitInfo] -> UnitId -> IO (Maybe (UnitId, UnitInfo)) findWiredInUnit pkgs wired_pkg = firstJustsM [try all_exposed_ps, try all_ps, notfound] where all_ps = [ p | p <- pkgs, p `matches` wired_pkg ] all_exposed_ps = [ p | p <- all_ps, (mkUnit p) `elemUniqMap` vis_map ] try ps = case sortByPreference prec_map ps of p:_ -> Just <$> pick p _ -> pure Nothing notfound = do debugTraceMsg logger 2 $ text "wired-in package " <> ftext (unitIdFS wired_pkg) <> text " not found." return Nothing pick :: UnitInfo -> IO (UnitId, UnitInfo) pick pkg = do debugTraceMsg logger 2 $ text "wired-in package " <> ftext (unitIdFS wired_pkg) <> text " mapped to " <> ppr (unitId pkg) return (wired_pkg, pkg) mb_wired_in_pkgs <- mapM (findWiredInUnit pkgs) wiredInUnitIds let wired_in_pkgs = catMaybes mb_wired_in_pkgs wiredInMap :: UniqMap UnitId UnitId wiredInMap = listToUniqMap [ (unitId realUnitInfo, wiredInUnitId) | (wiredInUnitId, realUnitInfo) <- wired_in_pkgs , not (unitIsIndefinite realUnitInfo) ] updateWiredInDependencies pkgs = map (upd_deps . upd_pkg) pkgs where upd_pkg pkg | Just wiredInUnitId <- lookupUniqMap wiredInMap (unitId pkg) = pkg { unitId = wiredInUnitId , unitInstanceOf = wiredInUnitId -- every non instantiated unit is an instance of -- itself (required by Backpack...) -- -- See Note [About units] in GHC.Unit } | otherwise = pkg upd_deps pkg = pkg { unitDepends = map (upd_wired_in wiredInMap) (unitDepends pkg), unitExposedModules = map (\(k,v) -> (k, fmap (upd_wired_in_mod wiredInMap) v)) (unitExposedModules pkg) } return (updateWiredInDependencies pkgs, wiredInMap) -- Helper functions for rewiring Module and Unit. These -- rewrite Units of modules in wired-in packages to the form known to the -- compiler, as described in Note [Wired-in units] in GHC.Unit.Types. -- -- For instance, base-4.9.0.0 will be rewritten to just base, to match -- what appears in GHC.Builtin.Names. upd_wired_in_mod :: WiringMap -> Module -> Module upd_wired_in_mod wiredInMap (Module uid m) = Module (upd_wired_in_uid wiredInMap uid) m upd_wired_in_uid :: WiringMap -> Unit -> Unit upd_wired_in_uid wiredInMap u = case u of HoleUnit -> HoleUnit RealUnit (Definite uid) -> RealUnit (Definite (upd_wired_in wiredInMap uid)) VirtUnit indef_uid -> VirtUnit $ mkInstantiatedUnit (instUnitInstanceOf indef_uid) (map (\(x,y) -> (x,upd_wired_in_mod wiredInMap y)) (instUnitInsts indef_uid)) upd_wired_in :: WiringMap -> UnitId -> UnitId upd_wired_in wiredInMap key | Just key' <- lookupUniqMap wiredInMap key = key' | otherwise = key updateVisibilityMap :: WiringMap -> VisibilityMap -> VisibilityMap updateVisibilityMap wiredInMap vis_map = foldl' f vis_map (nonDetUniqMapToList wiredInMap) where f vm (from, to) = case lookupUniqMap vis_map (RealUnit (Definite from)) of Nothing -> vm Just r -> addToUniqMap (delFromUniqMap vm (RealUnit (Definite from))) (RealUnit (Definite to)) r -- ---------------------------------------------------------------------------- -- | The reason why a unit is unusable. data UnusableUnitReason = -- | We ignored it explicitly using @-ignore-package@. IgnoredWithFlag -- | This unit transitively depends on a unit that was never present -- in any of the provided databases. | BrokenDependencies [UnitId] -- | This unit transitively depends on a unit involved in a cycle. -- Note that the list of 'UnitId' reports the direct dependencies -- of this unit that (transitively) depended on the cycle, and not -- the actual cycle itself (which we report separately at high verbosity.) | CyclicDependencies [UnitId] -- | This unit transitively depends on a unit which was ignored. | IgnoredDependencies [UnitId] -- | This unit transitively depends on a unit which was -- shadowed by an ABI-incompatible unit. | ShadowedDependencies [UnitId] instance Outputable UnusableUnitReason where ppr IgnoredWithFlag = text "[ignored with flag]" ppr (BrokenDependencies uids) = brackets (text "broken" <+> ppr uids) ppr (CyclicDependencies uids) = brackets (text "cyclic" <+> ppr uids) ppr (IgnoredDependencies uids) = brackets (text "ignored" <+> ppr uids) ppr (ShadowedDependencies uids) = brackets (text "shadowed" <+> ppr uids) type UnusableUnits = UniqMap UnitId (UnitInfo, UnusableUnitReason) pprReason :: SDoc -> UnusableUnitReason -> SDoc pprReason pref reason = case reason of IgnoredWithFlag -> pref <+> text "ignored due to an -ignore-package flag" BrokenDependencies deps -> pref <+> text "unusable due to missing dependencies:" $$ nest 2 (hsep (map ppr deps)) CyclicDependencies deps -> pref <+> text "unusable due to cyclic dependencies:" $$ nest 2 (hsep (map ppr deps)) IgnoredDependencies deps -> pref <+> text ("unusable because the -ignore-package flag was used to " ++ "ignore at least one of its dependencies:") $$ nest 2 (hsep (map ppr deps)) ShadowedDependencies deps -> pref <+> text "unusable due to shadowed dependencies:" $$ nest 2 (hsep (map ppr deps)) reportCycles :: Logger -> [SCC UnitInfo] -> IO () reportCycles logger sccs = mapM_ report sccs where report (AcyclicSCC _) = return () report (CyclicSCC vs) = debugTraceMsg logger 2 $ text "these packages are involved in a cycle:" $$ nest 2 (hsep (map (ppr . unitId) vs)) reportUnusable :: Logger -> UnusableUnits -> IO () reportUnusable logger pkgs = mapM_ report (nonDetUniqMapToList pkgs) where report (ipid, (_, reason)) = debugTraceMsg logger 2 $ pprReason (text "package" <+> ppr ipid <+> text "is") reason -- ---------------------------------------------------------------------------- -- -- Utilities on the database -- -- | A reverse dependency index, mapping an 'UnitId' to -- the 'UnitId's which have a dependency on it. type RevIndex = UniqMap UnitId [UnitId] -- | Compute the reverse dependency index of a unit database. reverseDeps :: UnitInfoMap -> RevIndex reverseDeps db = nonDetFoldUniqMap go emptyUniqMap db where go :: (UnitId, UnitInfo) -> RevIndex -> RevIndex go (_uid, pkg) r = foldl' (go' (unitId pkg)) r (unitDepends pkg) go' from r to = addToUniqMap_C (++) r to [from] -- | Given a list of 'UnitId's to remove, a database, -- and a reverse dependency index (as computed by 'reverseDeps'), -- remove those units, plus any units which depend on them. -- Returns the pruned database, as well as a list of 'UnitInfo's -- that was removed. removeUnits :: [UnitId] -> RevIndex -> UnitInfoMap -> (UnitInfoMap, [UnitInfo]) removeUnits uids index m = go uids (m,[]) where go [] (m,pkgs) = (m,pkgs) go (uid:uids) (m,pkgs) | Just pkg <- lookupUniqMap m uid = case lookupUniqMap index uid of Nothing -> go uids (delFromUniqMap m uid, pkg:pkgs) Just rdeps -> go (rdeps ++ uids) (delFromUniqMap m uid, pkg:pkgs) | otherwise = go uids (m,pkgs) -- | Given a 'UnitInfo' from some 'UnitInfoMap', return all entries in 'depends' -- which correspond to units that do not exist in the index. depsNotAvailable :: UnitInfoMap -> UnitInfo -> [UnitId] depsNotAvailable pkg_map pkg = filter (not . (`elemUniqMap` pkg_map)) (unitDepends pkg) -- | Given a 'UnitInfo' from some 'UnitInfoMap' return all entries in -- 'unitAbiDepends' which correspond to units that do not exist, OR have -- mismatching ABIs. depsAbiMismatch :: UnitInfoMap -> UnitInfo -> [UnitId] depsAbiMismatch pkg_map pkg = map fst . filter (not . abiMatch) $ unitAbiDepends pkg where abiMatch (dep_uid, abi) | Just dep_pkg <- lookupUniqMap pkg_map dep_uid = unitAbiHash dep_pkg == abi | otherwise = False -- ----------------------------------------------------------------------------- -- Ignore units ignoreUnits :: [IgnorePackageFlag] -> [UnitInfo] -> UnusableUnits ignoreUnits flags pkgs = listToUniqMap (concatMap doit flags) where doit (IgnorePackage str) = case partition (matchingStr str) pkgs of (ps, _) -> [ (unitId p, (p, IgnoredWithFlag)) | p <- ps ] -- missing unit is not an error for -ignore-package, -- because a common usage is to -ignore-package P as -- a preventative measure just in case P exists. -- ---------------------------------------------------------------------------- -- -- Merging databases -- -- | For each unit, a mapping from uid -> i indicates that this -- unit was brought into GHC by the ith @-package-db@ flag on -- the command line. We use this mapping to make sure we prefer -- units that were defined later on the command line, if there -- is an ambiguity. type UnitPrecedenceMap = UniqMap UnitId Int -- | Given a list of databases, merge them together, where -- units with the same unit id in later databases override -- earlier ones. This does NOT check if the resulting database -- makes sense (that's done by 'validateDatabase'). mergeDatabases :: Logger -> [UnitDatabase UnitId] -> IO (UnitInfoMap, UnitPrecedenceMap) mergeDatabases logger = foldM merge (emptyUniqMap, emptyUniqMap) . zip [1..] where merge (pkg_map, prec_map) (i, UnitDatabase db_path db) = do debugTraceMsg logger 2 $ text "loading package database" <+> text db_path forM_ (Set.toList override_set) $ \pkg -> debugTraceMsg logger 2 $ text "package" <+> ppr pkg <+> text "overrides a previously defined package" return (pkg_map', prec_map') where db_map = mk_pkg_map db mk_pkg_map = listToUniqMap . map (\p -> (unitId p, p)) -- The set of UnitIds which appear in both db and pkgs. These are the -- ones that get overridden. Compute this just to give some -- helpful debug messages at -v2 override_set :: Set UnitId override_set = Set.intersection (nonDetUniqMapToKeySet db_map) (nonDetUniqMapToKeySet pkg_map) -- Now merge the sets together (NB: in case of duplicate, -- first argument preferred) pkg_map' :: UnitInfoMap pkg_map' = pkg_map `plusUniqMap` db_map prec_map' :: UnitPrecedenceMap prec_map' = prec_map `plusUniqMap` (mapUniqMap (const i) db_map) -- | Validates a database, removing unusable units from it -- (this includes removing units that the user has explicitly -- ignored.) Our general strategy: -- -- 1. Remove all broken units (dangling dependencies) -- 2. Remove all units that are cyclic -- 3. Apply ignore flags -- 4. Remove all units which have deps with mismatching ABIs -- validateDatabase :: UnitConfig -> UnitInfoMap -> (UnitInfoMap, UnusableUnits, [SCC UnitInfo]) validateDatabase cfg pkg_map1 = (pkg_map5, unusable, sccs) where ignore_flags = reverse (unitConfigFlagsIgnored cfg) -- Compute the reverse dependency index index = reverseDeps pkg_map1 -- Helper function mk_unusable mk_err dep_matcher m uids = listToUniqMap [ (unitId pkg, (pkg, mk_err (dep_matcher m pkg))) | pkg <- uids ] -- Find broken units directly_broken = filter (not . null . depsNotAvailable pkg_map1) (nonDetEltsUniqMap pkg_map1) (pkg_map2, broken) = removeUnits (map unitId directly_broken) index pkg_map1 unusable_broken = mk_unusable BrokenDependencies depsNotAvailable pkg_map2 broken -- Find recursive units sccs = stronglyConnComp [ (pkg, unitId pkg, unitDepends pkg) | pkg <- nonDetEltsUniqMap pkg_map2 ] getCyclicSCC (CyclicSCC vs) = map unitId vs getCyclicSCC (AcyclicSCC _) = [] (pkg_map3, cyclic) = removeUnits (concatMap getCyclicSCC sccs) index pkg_map2 unusable_cyclic = mk_unusable CyclicDependencies depsNotAvailable pkg_map3 cyclic -- Apply ignore flags directly_ignored = ignoreUnits ignore_flags (nonDetEltsUniqMap pkg_map3) (pkg_map4, ignored) = removeUnits (nonDetKeysUniqMap directly_ignored) index pkg_map3 unusable_ignored = mk_unusable IgnoredDependencies depsNotAvailable pkg_map4 ignored -- Knock out units whose dependencies don't agree with ABI -- (i.e., got invalidated due to shadowing) directly_shadowed = filter (not . null . depsAbiMismatch pkg_map4) (nonDetEltsUniqMap pkg_map4) (pkg_map5, shadowed) = removeUnits (map unitId directly_shadowed) index pkg_map4 unusable_shadowed = mk_unusable ShadowedDependencies depsAbiMismatch pkg_map5 shadowed -- combine all unusables. The order is important for shadowing. -- plusUniqMapList folds using plusUFM which is right biased (opposite of -- Data.Map.union) so the head of the list should be the least preferred unusable = plusUniqMapList [ unusable_shadowed , unusable_cyclic , unusable_broken , unusable_ignored , directly_ignored ] -- ----------------------------------------------------------------------------- -- When all the command-line options are in, we can process our unit -- settings and populate the unit state. mkUnitState :: Logger -> UnitConfig -> IO (UnitState,[UnitDatabase UnitId]) mkUnitState logger cfg = do {- Plan. There are two main steps for making the package state: 1. We want to build a single, unified package database based on all of the input databases, which upholds the invariant that there is only one package per any UnitId and there are no dangling dependencies. We'll do this by merging, and then successively filtering out bad dependencies. a) Merge all the databases together. If an input database defines unit ID that is already in the unified database, that package SHADOWS the existing package in the current unified database. Note that order is important: packages defined later in the list of command line arguments shadow those defined earlier. b) Remove all packages with missing dependencies, or mutually recursive dependencies. b) Remove packages selected by -ignore-package from input database c) Remove all packages which depended on packages that are now shadowed by an ABI-incompatible package d) report (with -v) any packages that were removed by steps 1-3 2. We want to look at the flags controlling package visibility, and build a mapping of what module names are in scope and where they live. a) on the final, unified database, we apply -trust/-distrust flags directly, modifying the database so that the 'trusted' field has the correct value. b) we use the -package/-hide-package flags to compute a visibility map, stating what packages are "exposed" for the purposes of computing the module map. * if any flag refers to a package which was removed by 1-5, then we can give an error message explaining why * if -hide-all-packages was not specified, this step also hides packages which are superseded by later exposed packages * this step is done TWICE if -plugin-package/-hide-all-plugin-packages are used c) based on the visibility map, we pick wired packages and rewrite them to have the expected unitId. d) finally, using the visibility map and the package database, we build a mapping saying what every in scope module name points to. -} -- if databases have not been provided, read the database flags raw_dbs <- case unitConfigDBCache cfg of Nothing -> readUnitDatabases logger cfg Just dbs -> return dbs -- distrust all units if the flag is set let distrust_all db = db { unitDatabaseUnits = distrustAllUnits (unitDatabaseUnits db) } dbs | unitConfigDistrustAll cfg = map distrust_all raw_dbs | otherwise = raw_dbs -- This, and the other reverse's that you will see, are due to the fact that -- packageFlags, pluginPackageFlags, etc. are all specified in *reverse* order -- than they are on the command line. let raw_other_flags = reverse (unitConfigFlagsExposed cfg) (hpt_flags, other_flags) = partition (selectHptFlag (unitConfigHomeUnits cfg)) raw_other_flags debugTraceMsg logger 2 $ text "package flags" <+> ppr other_flags let home_unit_deps = selectHomeUnits (unitConfigHomeUnits cfg) hpt_flags -- Merge databases together, without checking validity (pkg_map1, prec_map) <- mergeDatabases logger dbs -- Now that we've merged everything together, prune out unusable -- packages. let (pkg_map2, unusable, sccs) = validateDatabase cfg pkg_map1 reportCycles logger sccs reportUnusable logger unusable -- Apply trust flags (these flags apply regardless of whether -- or not packages are visible or not) pkgs1 <- mayThrowUnitErr $ foldM (applyTrustFlag prec_map unusable) (nonDetEltsUniqMap pkg_map2) (reverse (unitConfigFlagsTrusted cfg)) let prelim_pkg_db = mkUnitInfoMap pkgs1 -- -- Calculate the initial set of units from package databases, prior to any package flags. -- -- Conceptually, we select the latest versions of all valid (not unusable) *packages* -- (not units). This is empty if we have -hide-all-packages. -- -- Then we create an initial visibility map with default visibilities for all -- exposed, definite units which belong to the latest valid packages. -- let preferLater unit unit' = case compareByPreference prec_map unit unit' of GT -> unit _ -> unit' addIfMorePreferable m unit = addToUDFM_C preferLater m (fsPackageName unit) unit -- This is the set of maximally preferable packages. In fact, it is a set of -- most preferable *units* keyed by package name, which act as stand-ins in -- for "a package in a database". We use units here because we don't have -- "a package in a database" as a type currently. mostPreferablePackageReps = if unitConfigHideAll cfg then emptyUDFM else foldl' addIfMorePreferable emptyUDFM pkgs1 -- When exposing units, we want to consider all of those in the most preferable -- packages. We can implement that by looking for units that are equi-preferable -- with the most preferable unit for package. Being equi-preferable means that -- they must be in the same database, with the same version, and the same package name. -- -- We must take care to consider all these units and not just the most -- preferable one, otherwise we can end up with problems like #16228. mostPreferable u = case lookupUDFM mostPreferablePackageReps (fsPackageName u) of Nothing -> False Just u' -> compareByPreference prec_map u u' == EQ vis_map1 = foldl' (\vm p -> -- Note: we NEVER expose indefinite packages by -- default, because it's almost assuredly not -- what you want (no mix-in linking has occurred). if unitIsExposed p && unitIsDefinite (mkUnit p) && mostPreferable p then addToUniqMap vm (mkUnit p) UnitVisibility { uv_expose_all = True, uv_renamings = [], uv_package_name = First (Just (fsPackageName p)), uv_requirements = emptyUniqMap, uv_explicit = Nothing } else vm) emptyUniqMap pkgs1 -- -- Compute a visibility map according to the command-line flags (-package, -- -hide-package). This needs to know about the unusable packages, since if a -- user tries to enable an unusable package, we should let them know. -- vis_map2 <- mayThrowUnitErr $ foldM (applyPackageFlag prec_map prelim_pkg_db emptyUniqSet unusable (unitConfigHideAll cfg) pkgs1) vis_map1 other_flags -- -- Sort out which packages are wired in. This has to be done last, since -- it modifies the unit ids of wired in packages, but when we process -- package arguments we need to key against the old versions. -- (pkgs2, wired_map) <- findWiredInUnits logger prec_map pkgs1 vis_map2 let pkg_db = mkUnitInfoMap pkgs2 -- Update the visibility map, so we treat wired packages as visible. let vis_map = updateVisibilityMap wired_map vis_map2 let hide_plugin_pkgs = unitConfigHideAllPlugins cfg plugin_vis_map <- case unitConfigFlagsPlugins cfg of -- common case; try to share the old vis_map [] | not hide_plugin_pkgs -> return vis_map | otherwise -> return emptyUniqMap _ -> do let plugin_vis_map1 | hide_plugin_pkgs = emptyUniqMap -- Use the vis_map PRIOR to wired in, -- because otherwise applyPackageFlag -- won't work. | otherwise = vis_map2 plugin_vis_map2 <- mayThrowUnitErr $ foldM (applyPackageFlag prec_map prelim_pkg_db emptyUniqSet unusable hide_plugin_pkgs pkgs1) plugin_vis_map1 (reverse (unitConfigFlagsPlugins cfg)) -- Updating based on wired in packages is mostly -- good hygiene, because it won't matter: no wired in -- package has a compiler plugin. -- TODO: If a wired in package had a compiler plugin, -- and you tried to pick different wired in packages -- with the plugin flags and the normal flags... what -- would happen? I don't know! But this doesn't seem -- likely to actually happen. return (updateVisibilityMap wired_map plugin_vis_map2) let pkgname_map = listToUFM [ (unitPackageName p, unitInstanceOf p) | p <- pkgs2 ] -- The explicitUnits accurately reflects the set of units we have turned -- on; as such, it also is the only way one can come up with requirements. -- The requirement context is directly based off of this: we simply -- look for nested unit IDs that are directly fed holes: the requirements -- of those units are precisely the ones we need to track let explicit_pkgs = [(k, uv_explicit v) | (k, v) <- nonDetUniqMapToList vis_map] req_ctx = mapUniqMap (Set.toList) $ plusUniqMapListWith Set.union (map uv_requirements (nonDetEltsUniqMap vis_map)) -- -- Here we build up a set of the packages mentioned in -package -- flags on the command line; these are called the "preload" -- packages. we link these packages in eagerly. The preload set -- should contain at least rts & base, which is why we pretend that -- the command line contains -package rts & -package base. -- -- NB: preload IS important even for type-checking, because we -- need the correct include path to be set. -- let preload1 = nonDetKeysUniqMap (filterUniqMap (isJust . uv_explicit) vis_map) -- add default preload units if they can be found in the db basicLinkedUnits = fmap (RealUnit . Definite) $ filter (flip elemUniqMap pkg_db) $ unitConfigAutoLink cfg preload3 = ordNub $ (basicLinkedUnits ++ preload1) -- Close the preload packages with their dependencies dep_preload <- mayThrowUnitErr $ closeUnitDeps pkg_db $ zip (map toUnitId preload3) (repeat Nothing) let mod_map1 = mkModuleNameProvidersMap logger cfg pkg_db emptyUniqSet vis_map mod_map2 = mkUnusableModuleNameProvidersMap unusable mod_map = mod_map2 `plusUniqMap` mod_map1 -- Force the result to avoid leaking input parameters let !state = UnitState { preloadUnits = dep_preload , explicitUnits = explicit_pkgs , homeUnitDepends = Set.toList home_unit_deps , unitInfoMap = pkg_db , preloadClosure = emptyUniqSet , moduleNameProvidersMap = mod_map , pluginModuleNameProvidersMap = mkModuleNameProvidersMap logger cfg pkg_db emptyUniqSet plugin_vis_map , packageNameMap = pkgname_map , wireMap = wired_map , unwireMap = listToUniqMap [ (v,k) | (k,v) <- nonDetUniqMapToList wired_map ] , requirementContext = req_ctx , allowVirtualUnits = unitConfigAllowVirtual cfg } return (state, raw_dbs) selectHptFlag :: Set.Set UnitId -> PackageFlag -> Bool selectHptFlag home_units (ExposePackage _ (UnitIdArg uid) _) | toUnitId uid `Set.member` home_units = True selectHptFlag _ _ = False selectHomeUnits :: Set.Set UnitId -> [PackageFlag] -> Set.Set UnitId selectHomeUnits home_units flags = foldl' go Set.empty flags where go :: Set.Set UnitId -> PackageFlag -> Set.Set UnitId go cur (ExposePackage _ (UnitIdArg uid) _) | toUnitId uid `Set.member` home_units = Set.insert (toUnitId uid) cur -- MP: This does not yet support thinning/renaming go cur _ = cur -- | Given a wired-in 'Unit', "unwire" it into the 'Unit' -- that it was recorded as in the package database. unwireUnit :: UnitState -> Unit -> Unit unwireUnit state uid@(RealUnit (Definite def_uid)) = maybe uid (RealUnit . Definite) (lookupUniqMap (unwireMap state) def_uid) unwireUnit _ uid = uid -- ----------------------------------------------------------------------------- -- | Makes the mapping from ModuleName to package info -- Slight irritation: we proceed by leafing through everything -- in the installed package database, which makes handling indefinite -- packages a bit bothersome. mkModuleNameProvidersMap :: Logger -> UnitConfig -> UnitInfoMap -> PreloadUnitClosure -> VisibilityMap -> ModuleNameProvidersMap mkModuleNameProvidersMap logger cfg pkg_map closure vis_map = -- What should we fold on? Both situations are awkward: -- -- * Folding on the visibility map means that we won't create -- entries for packages that aren't mentioned in vis_map -- (e.g., hidden packages, causing #14717) -- -- * Folding on pkg_map is awkward because if we have an -- Backpack instantiation, we need to possibly add a -- package from pkg_map multiple times to the actual -- ModuleNameProvidersMap. Also, we don't really want -- definite package instantiations to show up in the -- list of possibilities. -- -- So what will we do instead? We'll extend vis_map with -- entries for every definite (for non-Backpack) and -- indefinite (for Backpack) package, so that we get the -- hidden entries we need. nonDetFoldUniqMap extend_modmap emptyMap vis_map_extended where vis_map_extended = {- preferred -} default_vis `plusUniqMap` vis_map default_vis = listToUniqMap [ (mkUnit pkg, mempty) | (_, pkg) <- nonDetUniqMapToList pkg_map -- Exclude specific instantiations of an indefinite -- package , unitIsIndefinite pkg || null (unitInstantiations pkg) ] emptyMap = emptyUniqMap setOrigins m os = fmap (const os) m extend_modmap (uid, UnitVisibility { uv_expose_all = b, uv_renamings = rns }) modmap = addListTo modmap theBindings where pkg = unit_lookup uid theBindings :: [(ModuleName, UniqMap Module ModuleOrigin)] theBindings = newBindings b rns newBindings :: Bool -> [(ModuleName, ModuleName)] -> [(ModuleName, UniqMap Module ModuleOrigin)] newBindings e rns = es e ++ hiddens ++ map rnBinding rns rnBinding :: (ModuleName, ModuleName) -> (ModuleName, UniqMap Module ModuleOrigin) rnBinding (orig, new) = (new, setOrigins origEntry fromFlag) where origEntry = case lookupUFM esmap orig of Just r -> r Nothing -> throwGhcException (CmdLineError (renderWithContext (log_default_user_context (logFlags logger)) (text "package flag: could not find module name" <+> ppr orig <+> text "in package" <+> ppr pk))) es :: Bool -> [(ModuleName, UniqMap Module ModuleOrigin)] es e = do (m, exposedReexport) <- exposed_mods let (pk', m', origin') = case exposedReexport of Nothing -> (pk, m, fromExposedModules e) Just (Module pk' m') -> (pk', m', fromReexportedModules e pkg) return (m, mkModMap pk' m' origin') esmap :: UniqFM ModuleName (UniqMap Module ModuleOrigin) esmap = listToUFM (es False) -- parameter here doesn't matter, orig will -- be overwritten hiddens = [(m, mkModMap pk m ModHidden) | m <- hidden_mods] pk = mkUnit pkg unit_lookup uid = lookupUnit' (unitConfigAllowVirtual cfg) pkg_map closure uid `orElse` pprPanic "unit_lookup" (ppr uid) exposed_mods = unitExposedModules pkg hidden_mods = unitHiddenModules pkg -- | Make a 'ModuleNameProvidersMap' covering a set of unusable packages. mkUnusableModuleNameProvidersMap :: UnusableUnits -> ModuleNameProvidersMap mkUnusableModuleNameProvidersMap unusables = nonDetFoldUniqMap extend_modmap emptyUniqMap unusables where extend_modmap (_uid, (unit_info, reason)) modmap = addListTo modmap bindings where bindings :: [(ModuleName, UniqMap Module ModuleOrigin)] bindings = exposed ++ hidden origin_reexport = ModUnusable (UnusableUnit unit reason True) origin_normal = ModUnusable (UnusableUnit unit reason False) unit = mkUnit unit_info exposed = map get_exposed exposed_mods hidden = [(m, mkModMap unit m origin_normal) | m <- hidden_mods] -- with re-exports, c:Foo can be reexported from two (or more) -- unusable packages: -- Foo -> a:Foo (unusable reason A) -> c:Foo -- -> b:Foo (unusable reason B) -> c:Foo -- -- We must be careful to not record the following (#21097): -- Foo -> c:Foo (unusable reason A) -- -> c:Foo (unusable reason B) -- But: -- Foo -> a:Foo (unusable reason A) -- -> b:Foo (unusable reason B) -- get_exposed (mod, Just _) = (mod, mkModMap unit mod origin_reexport) get_exposed (mod, _) = (mod, mkModMap unit mod origin_normal) -- in the reexport case, we create a virtual module that doesn't -- exist but we don't care as it's only used as a key in the map. exposed_mods = unitExposedModules unit_info hidden_mods = unitHiddenModules unit_info -- | Add a list of key/value pairs to a nested map. -- -- The outer map is processed with 'Data.Map.Strict' to prevent memory leaks -- when reloading modules in GHCi (see #4029). This ensures that each -- value is forced before installing into the map. addListTo :: (Monoid a, Ord k1, Ord k2, Uniquable k1, Uniquable k2) => UniqMap k1 (UniqMap k2 a) -> [(k1, UniqMap k2 a)] -> UniqMap k1 (UniqMap k2 a) addListTo = foldl' merge where merge m (k, v) = addToUniqMap_C (plusUniqMap_C mappend) m k v -- | Create a singleton module mapping mkModMap :: Unit -> ModuleName -> ModuleOrigin -> UniqMap Module ModuleOrigin mkModMap pkg mod = unitUniqMap (mkModule pkg mod) -- ----------------------------------------------------------------------------- -- Package Utils -- | Takes a 'ModuleName', and if the module is in any package returns -- list of modules which take that name. lookupModuleInAllUnits :: UnitState -> ModuleName -> [(Module, UnitInfo)] lookupModuleInAllUnits pkgs m = case lookupModuleWithSuggestions pkgs m NoPkgQual of LookupFound a b -> [(a,fst b)] LookupMultiple rs -> map f rs where f (m,_) = (m, expectJust "lookupModule" (lookupUnit pkgs (moduleUnit m))) _ -> [] -- | The result of performing a lookup data LookupResult = -- | Found the module uniquely, nothing else to do LookupFound Module (UnitInfo, ModuleOrigin) -- | Multiple modules with the same name in scope | LookupMultiple [(Module, ModuleOrigin)] -- | No modules found, but there were some hidden ones with -- an exact name match. First is due to package hidden, second -- is due to module being hidden | LookupHidden [(Module, ModuleOrigin)] [(Module, ModuleOrigin)] -- | No modules found, but there were some unusable ones with -- an exact name match | LookupUnusable [(Module, ModuleOrigin)] -- | Nothing found, here are some suggested different names | LookupNotFound [ModuleSuggestion] -- suggestions data ModuleSuggestion = SuggestVisible ModuleName Module ModuleOrigin | SuggestHidden ModuleName Module ModuleOrigin lookupModuleWithSuggestions :: UnitState -> ModuleName -> PkgQual -> LookupResult lookupModuleWithSuggestions pkgs = lookupModuleWithSuggestions' pkgs (moduleNameProvidersMap pkgs) -- | The package which the module **appears** to come from, this could be -- the one which reexports the module from it's original package. This function -- is currently only used for -Wunused-packages lookupModulePackage :: UnitState -> ModuleName -> PkgQual -> Maybe [UnitInfo] lookupModulePackage pkgs mn mfs = case lookupModuleWithSuggestions' pkgs (moduleNameProvidersMap pkgs) mn mfs of LookupFound _ (orig_unit, origin) -> case origin of ModOrigin {fromOrigUnit, fromExposedReexport} -> case fromOrigUnit of -- Just True means, the import is available from its original location Just True -> pure [orig_unit] -- Otherwise, it must be available from a reexport _ -> pure fromExposedReexport _ -> Nothing _ -> Nothing lookupPluginModuleWithSuggestions :: UnitState -> ModuleName -> PkgQual -> LookupResult lookupPluginModuleWithSuggestions pkgs = lookupModuleWithSuggestions' pkgs (pluginModuleNameProvidersMap pkgs) lookupModuleWithSuggestions' :: UnitState -> ModuleNameProvidersMap -> ModuleName -> PkgQual -> LookupResult lookupModuleWithSuggestions' pkgs mod_map m mb_pn = case lookupUniqMap mod_map m of Nothing -> LookupNotFound suggestions Just xs -> case foldl' classify ([],[],[], []) (sortOn fst $ nonDetUniqMapToList xs) of ([], [], [], []) -> LookupNotFound suggestions (_, _, _, [(m, o)]) -> LookupFound m (mod_unit m, o) (_, _, _, exposed@(_:_)) -> LookupMultiple exposed ([], [], unusable@(_:_), []) -> LookupUnusable unusable (hidden_pkg, hidden_mod, _, []) -> LookupHidden hidden_pkg hidden_mod where classify (hidden_pkg, hidden_mod, unusable, exposed) (m, origin0) = let origin = filterOrigin mb_pn (mod_unit m) origin0 x = (m, origin) in case origin of ModHidden -> (hidden_pkg, x:hidden_mod, unusable, exposed) ModUnusable _ -> (hidden_pkg, hidden_mod, x:unusable, exposed) _ | originEmpty origin -> (hidden_pkg, hidden_mod, unusable, exposed) | originVisible origin -> (hidden_pkg, hidden_mod, unusable, x:exposed) | otherwise -> (x:hidden_pkg, hidden_mod, unusable, exposed) unit_lookup p = lookupUnit pkgs p `orElse` pprPanic "lookupModuleWithSuggestions" (ppr p <+> ppr m) mod_unit = unit_lookup . moduleUnit -- Filters out origins which are not associated with the given package -- qualifier. No-op if there is no package qualifier. Test if this -- excluded all origins with 'originEmpty'. filterOrigin :: PkgQual -> UnitInfo -> ModuleOrigin -> ModuleOrigin filterOrigin NoPkgQual _ o = o filterOrigin (ThisPkg _) _ o = o filterOrigin (OtherPkg u) pkg o = let match_pkg p = u == unitId p in case o of ModHidden | match_pkg pkg -> ModHidden | otherwise -> mempty ModUnusable _ | match_pkg pkg -> o | otherwise -> mempty ModOrigin { fromOrigUnit = e, fromExposedReexport = res, fromHiddenReexport = rhs } -> ModOrigin { fromOrigUnit = if match_pkg pkg then e else Nothing , fromExposedReexport = filter match_pkg res , fromHiddenReexport = filter match_pkg rhs , fromPackageFlag = False -- always excluded } suggestions = fuzzyLookup (moduleNameString m) all_mods all_mods :: [(String, ModuleSuggestion)] -- All modules all_mods = sortBy (comparing fst) $ [ (moduleNameString m, suggestion) | (m, e) <- nonDetUniqMapToList (moduleNameProvidersMap pkgs) , suggestion <- map (getSuggestion m) (nonDetUniqMapToList e) ] getSuggestion name (mod, origin) = (if originVisible origin then SuggestVisible else SuggestHidden) name mod origin listVisibleModuleNames :: UnitState -> [ModuleName] listVisibleModuleNames state = map fst (filter visible (nonDetUniqMapToList (moduleNameProvidersMap state))) where visible (_, ms) = anyUniqMap originVisible ms -- | Takes a list of UnitIds (and their "parent" dependency, used for error -- messages), and returns the list with dependencies included, in reverse -- dependency order (a units appears before those it depends on). closeUnitDeps :: UnitInfoMap -> [(UnitId,Maybe UnitId)] -> MaybeErr UnitErr [UnitId] closeUnitDeps pkg_map ps = closeUnitDeps' pkg_map [] ps -- | Similar to closeUnitDeps but takes a list of already loaded units as an -- additional argument. closeUnitDeps' :: UnitInfoMap -> [UnitId] -> [(UnitId,Maybe UnitId)] -> MaybeErr UnitErr [UnitId] closeUnitDeps' pkg_map current_ids ps = foldM (uncurry . add_unit pkg_map) current_ids ps -- | Add a UnitId and those it depends on (recursively) to the given list of -- UnitIds if they are not already in it. Return a list in reverse dependency -- order (a unit appears before those it depends on). -- -- The UnitId is looked up in the given UnitInfoMap (to find its dependencies). -- It it's not found, the optional parent unit is used to return a more precise -- error message ("dependency of "). add_unit :: UnitInfoMap -> [UnitId] -> UnitId -> Maybe UnitId -> MaybeErr UnitErr [UnitId] add_unit pkg_map ps p mb_parent | p `elem` ps = return ps -- Check if we've already added this unit | otherwise = case lookupUnitId' pkg_map p of Nothing -> Failed (CloseUnitErr p mb_parent) Just info -> do -- Add the unit's dependents also ps' <- foldM add_unit_key ps (unitDepends info) return (p : ps') where add_unit_key xs key = add_unit pkg_map xs key (Just p) data UnitErr = CloseUnitErr !UnitId !(Maybe UnitId) | PackageFlagErr !PackageFlag ![(UnitInfo,UnusableUnitReason)] | TrustFlagErr !TrustFlag ![(UnitInfo,UnusableUnitReason)] mayThrowUnitErr :: MaybeErr UnitErr a -> IO a mayThrowUnitErr = \case Failed e -> throwGhcExceptionIO $ CmdLineError $ renderWithContext defaultSDocContext $ withPprStyle defaultUserStyle $ ppr e Succeeded a -> return a instance Outputable UnitErr where ppr = \case CloseUnitErr p mb_parent -> (text "unknown unit:" <+> ppr p) <> case mb_parent of Nothing -> Outputable.empty Just parent -> space <> parens (text "dependency of" <+> ftext (unitIdFS parent)) PackageFlagErr flag reasons -> flag_err (pprFlag flag) reasons TrustFlagErr flag reasons -> flag_err (pprTrustFlag flag) reasons where flag_err flag_doc reasons = text "cannot satisfy " <> flag_doc <> (if null reasons then Outputable.empty else text ": ") $$ nest 4 (vcat (map ppr_reason reasons) $$ text "(use -v for more information)") ppr_reason (p, reason) = pprReason (ppr (unitId p) <+> text "is") reason -- | Return this list of requirement interfaces that need to be merged -- to form @mod_name@, or @[]@ if this is not a requirement. requirementMerges :: UnitState -> ModuleName -> [InstantiatedModule] requirementMerges pkgstate mod_name = fromMaybe [] (lookupUniqMap (requirementContext pkgstate) mod_name) -- ----------------------------------------------------------------------------- -- | Pretty-print a UnitId for the user. -- -- Cabal packages may contain several components (programs, libraries, etc.). -- As far as GHC is concerned, installed package components ("units") are -- identified by an opaque UnitId string provided by Cabal. As the string -- contains a hash, we don't want to display it to users so GHC queries the -- database to retrieve some infos about the original source package (name, -- version, component name). -- -- Instead we want to display: packagename-version[:componentname] -- -- Component name is only displayed if it isn't the default library -- -- To do this we need to query a unit database. pprUnitIdForUser :: UnitState -> UnitId -> SDoc pprUnitIdForUser state uid@(UnitId fs) = case lookupUnitPprInfo state uid of Nothing -> ftext fs -- we didn't find the unit at all Just i -> ppr i pprUnitInfoForUser :: UnitInfo -> SDoc pprUnitInfoForUser info = ppr (mkUnitPprInfo unitIdFS info) lookupUnitPprInfo :: UnitState -> UnitId -> Maybe UnitPprInfo lookupUnitPprInfo state uid = fmap (mkUnitPprInfo unitIdFS) (lookupUnitId state uid) -- ----------------------------------------------------------------------------- -- Displaying packages -- | Show (very verbose) package info pprUnits :: UnitState -> SDoc pprUnits = pprUnitsWith pprUnitInfo pprUnitsWith :: (UnitInfo -> SDoc) -> UnitState -> SDoc pprUnitsWith pprIPI pkgstate = vcat (intersperse (text "---") (map pprIPI (listUnitInfo pkgstate))) -- | Show simplified unit info. -- -- The idea is to only print package id, and any information that might -- be different from the package databases (exposure, trust) pprUnitsSimple :: UnitState -> SDoc pprUnitsSimple = pprUnitsWith pprIPI where pprIPI ipi = let i = unitIdFS (unitId ipi) e = if unitIsExposed ipi then text "E" else text " " t = if unitIsTrusted ipi then text "T" else text " " in e <> t <> text " " <> ftext i -- | Show the mapping of modules to where they come from. pprModuleMap :: ModuleNameProvidersMap -> SDoc pprModuleMap mod_map = vcat (map pprLine (nonDetUniqMapToList mod_map)) where pprLine (m,e) = ppr m $$ nest 50 (vcat (map (pprEntry m) (nonDetUniqMapToList e))) pprEntry :: Outputable a => ModuleName -> (Module, a) -> SDoc pprEntry m (m',o) | m == moduleName m' = ppr (moduleUnit m') <+> parens (ppr o) | otherwise = ppr m' <+> parens (ppr o) fsPackageName :: UnitInfo -> FastString fsPackageName info = fs where PackageName fs = unitPackageName info -- | Given a fully instantiated 'InstantiatedUnit', improve it into a -- 'RealUnit' if we can find it in the package database. improveUnit :: UnitState -> Unit -> Unit improveUnit state u = improveUnit' (unitInfoMap state) (preloadClosure state) u -- | Given a fully instantiated 'InstantiatedUnit', improve it into a -- 'RealUnit' if we can find it in the package database. improveUnit' :: UnitInfoMap -> PreloadUnitClosure -> Unit -> Unit improveUnit' _ _ uid@(RealUnit _) = uid -- short circuit improveUnit' pkg_map closure uid = -- Do NOT lookup indefinite ones, they won't be useful! case lookupUnit' False pkg_map closure uid of Nothing -> uid Just pkg -> -- Do NOT improve if the indefinite unit id is not -- part of the closure unique set. See -- Note [VirtUnit to RealUnit improvement] if unitId pkg `elementOfUniqSet` closure then mkUnit pkg else uid -- | Check the database to see if we already have an installed unit that -- corresponds to the given 'InstantiatedUnit'. -- -- Return a `UnitId` which either wraps the `InstantiatedUnit` unchanged or -- references a matching installed unit. -- -- See Note [VirtUnit to RealUnit improvement] instUnitToUnit :: UnitState -> InstantiatedUnit -> Unit instUnitToUnit state iuid = -- NB: suppose that we want to compare the instantiated -- unit p[H=impl:H] against p+abcd (where p+abcd -- happens to be the existing, installed version of -- p[H=impl:H]. If we *only* wrap in p[H=impl:H] -- VirtUnit, they won't compare equal; only -- after improvement will the equality hold. improveUnit state $ VirtUnit iuid -- | Substitution on module variables, mapping module names to module -- identifiers. type ShHoleSubst = ModuleNameEnv Module -- | Substitutes holes in a 'Module'. NOT suitable for being called -- directly on a 'nameModule', see Note [Representation of module/name variables]. -- @p[A=\]:B@ maps to @p[A=q():A]:B@ with @A=q():A@; -- similarly, @\@ maps to @q():A@. renameHoleModule :: UnitState -> ShHoleSubst -> Module -> Module renameHoleModule state = renameHoleModule' (unitInfoMap state) (preloadClosure state) -- | Substitutes holes in a 'Unit', suitable for renaming when -- an include occurs; see Note [Representation of module/name variables]. -- -- @p[A=\]@ maps to @p[A=\]@ with @A=\@. renameHoleUnit :: UnitState -> ShHoleSubst -> Unit -> Unit renameHoleUnit state = renameHoleUnit' (unitInfoMap state) (preloadClosure state) -- | Like 'renameHoleModule', but requires only 'ClosureUnitInfoMap' -- so it can be used by "GHC.Unit.State". renameHoleModule' :: UnitInfoMap -> PreloadUnitClosure -> ShHoleSubst -> Module -> Module renameHoleModule' pkg_map closure env m | not (isHoleModule m) = let uid = renameHoleUnit' pkg_map closure env (moduleUnit m) in mkModule uid (moduleName m) | Just m' <- lookupUFM env (moduleName m) = m' -- NB m = , that's what's in scope. | otherwise = m -- | Like 'renameHoleUnit, but requires only 'ClosureUnitInfoMap' -- so it can be used by "GHC.Unit.State". renameHoleUnit' :: UnitInfoMap -> PreloadUnitClosure -> ShHoleSubst -> Unit -> Unit renameHoleUnit' pkg_map closure env uid = case uid of (VirtUnit InstantiatedUnit{ instUnitInstanceOf = cid , instUnitInsts = insts , instUnitHoles = fh }) -> if isNullUFM (intersectUFM_C const (udfmToUfm (getUniqDSet fh)) env) then uid -- Functorially apply the substitution to the instantiation, -- then check the 'ClosureUnitInfoMap' to see if there is -- a compiled version of this 'InstantiatedUnit' we can improve to. -- See Note [VirtUnit to RealUnit improvement] else improveUnit' pkg_map closure $ mkVirtUnit cid (map (\(k,v) -> (k, renameHoleModule' pkg_map closure env v)) insts) _ -> uid -- | Injects an 'InstantiatedModule' to 'Module' (see also -- 'instUnitToUnit'. instModuleToModule :: UnitState -> InstantiatedModule -> Module instModuleToModule pkgstate (Module iuid mod_name) = mkModule (instUnitToUnit pkgstate iuid) mod_name -- | Print unit-ids with UnitInfo found in the given UnitState pprWithUnitState :: UnitState -> SDoc -> SDoc pprWithUnitState state = updSDocContext (\ctx -> ctx { sdocUnitIdForUser = \fs -> pprUnitIdForUser state (UnitId fs) }) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Unit/Types.hs0000644000000000000000000006263707346545000020303 0ustar0000000000000000{-# OPTIONS_GHC -Wno-orphans #-} -- instance Binary IsBootInterface {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE DeriveTraversable #-} {-# LANGUAGE NamedFieldPuns #-} {-# LANGUAGE DerivingStrategies #-} {-# LANGUAGE GeneralizedNewtypeDeriving #-} -- | Unit & Module types -- -- This module is used to resolve the loops between Unit and Module types -- (Module references a Unit and vice-versa). module GHC.Unit.Types ( -- * Modules GenModule (..) , Module , InstalledModule , HomeUnitModule , InstantiatedModule , mkModule , moduleUnitId , pprModule , pprInstantiatedModule , moduleFreeHoles -- * Units , IsUnitId , GenUnit (..) , Unit , UnitId (..) , UnitKey (..) , GenInstantiatedUnit (..) , InstantiatedUnit , DefUnitId , Instantiations , GenInstantiations , mkInstantiatedUnit , mkInstantiatedUnitHash , mkVirtUnit , mapGenUnit , mapInstantiations , unitFreeModuleHoles , fsToUnit , unitFS , unitString , toUnitId , virtualUnitId , stringToUnit , stableUnitCmp , unitIsDefinite , isHoleUnit , pprUnit -- * Unit Ids , unitIdString , stringToUnitId -- * Utils , Definite (..) -- * Wired-in units , primUnitId , bignumUnitId , ghcInternalUnitId , rtsUnitId , mainUnitId , thisGhcUnitId , interactiveUnitId , primUnit , bignumUnit , ghcInternalUnit , rtsUnit , mainUnit , thisGhcUnit , interactiveUnit , isInteractiveModule , wiredInUnitIds -- * Boot modules , IsBootInterface (..) , GenWithIsBoot (..) , ModuleNameWithIsBoot , ModuleWithIsBoot , InstalledModuleWithIsBoot , notBoot ) where import GHC.Prelude import GHC.Types.Unique import GHC.Types.Unique.DSet import GHC.Utils.Binary import GHC.Utils.Outputable import GHC.Data.FastString import GHC.Utils.Encoding import GHC.Utils.Fingerprint import GHC.Utils.Misc import GHC.Settings.Config (cProjectUnitId) import Control.DeepSeq (NFData(..)) import Data.Data import Data.List (sortBy) import Data.Function import Data.Bifunctor import qualified Data.ByteString as BS import qualified Data.ByteString.Char8 as BS.Char8 import Language.Haskell.Syntax.Module.Name import {-# SOURCE #-} Language.Haskell.Syntax.ImpExp (IsBootInterface(..)) --------------------------------------------------------------------- -- MODULES --------------------------------------------------------------------- -- | A generic module is a pair of a unit identifier and a 'ModuleName'. data GenModule unit = Module { moduleUnit :: !unit -- ^ Unit the module belongs to , moduleName :: !ModuleName -- ^ Module name (e.g. A.B.C) } deriving (Eq,Ord,Data,Functor) instance Data ModuleName where -- don't traverse? toConstr _ = abstractConstr "ModuleName" gunfold _ _ = error "gunfold" dataTypeOf _ = mkNoRepType "ModuleName" -- | A Module is a pair of a 'Unit' and a 'ModuleName'. type Module = GenModule Unit moduleUnitId :: Module -> UnitId moduleUnitId = toUnitId . moduleUnit -- | A 'InstalledModule' is a 'Module' whose unit is identified with an -- 'UnitId'. type InstalledModule = GenModule UnitId -- | A 'HomeUnitModule' is like an 'InstalledModule' but we expect to find it in -- one of the home units rather than the package database. type HomeUnitModule = GenModule UnitId -- | An `InstantiatedModule` is a 'Module' whose unit is identified with an `InstantiatedUnit`. type InstantiatedModule = GenModule InstantiatedUnit mkModule :: u -> ModuleName -> GenModule u mkModule = Module instance Uniquable Module where getUnique (Module p n) = getUnique (unitFS p `appendFS` moduleNameFS n) instance Binary a => Binary (GenModule a) where put_ bh (Module p n) = put_ bh p >> put_ bh n -- Module has strict fields, so use $! in order not to allocate a thunk get bh = do p <- get bh; n <- get bh; return $! Module p n instance NFData (GenModule a) where rnf (Module unit name) = unit `seq` name `seq` () instance Outputable Module where ppr = pprModule instance Outputable InstalledModule where ppr (Module p n) = ppr p <> char ':' <> pprModuleName n instance Outputable InstantiatedModule where ppr = pprInstantiatedModule instance Outputable InstantiatedUnit where ppr = pprInstantiatedUnit pprInstantiatedUnit :: InstantiatedUnit -> SDoc pprInstantiatedUnit uid = -- getPprStyle $ \sty -> pprUnitId cid <> (if not (null insts) -- pprIf then brackets (hcat (punctuate comma $ [ pprModuleName modname <> text "=" <> pprModule m | (modname, m) <- insts])) else empty) where cid = instUnitInstanceOf uid insts = instUnitInsts uid -- | Class for types that are used as unit identifiers (UnitKey, UnitId, Unit) -- -- We need this class because we create new unit ids for virtual units (see -- VirtUnit) and they have to to be made from units with different kinds of -- identifiers. class IsUnitId u where unitFS :: u -> FastString instance IsUnitId UnitKey where unitFS (UnitKey fs) = fs instance IsUnitId UnitId where unitFS (UnitId fs) = fs instance IsUnitId u => IsUnitId (GenUnit u) where unitFS (VirtUnit x) = instUnitFS x unitFS (RealUnit (Definite x)) = unitFS x unitFS HoleUnit = holeFS pprModule :: IsLine doc => Module -> doc pprModule mod@(Module p n) = docWithStyle code doc where code = (if p == mainUnit then empty -- never qualify the main package in code else ztext (zEncodeFS (unitFS p)) <> char '_') <> pprModuleName n doc sty | qualModule sty mod = case p of HoleUnit -> angleBrackets (pprModuleName n) _ -> pprUnit p <> char ':' <> pprModuleName n | otherwise = pprModuleName n {-# SPECIALIZE pprModule :: Module -> SDoc #-} {-# SPECIALIZE pprModule :: Module -> HLine #-} -- see Note [SPECIALIZE to HDoc] in GHC.Utils.Outputable pprInstantiatedModule :: InstantiatedModule -> SDoc pprInstantiatedModule (Module uid m) = ppr uid <> char ':' <> ppr m --------------------------------------------------------------------- -- UNITS --------------------------------------------------------------------- -- | A unit key in the database newtype UnitKey = UnitKey FastString -- | A unit identifier identifies a (possibly partially) instantiated library. -- It is primarily used as part of 'Module', which in turn is used in 'Name', -- which is used to give names to entities when typechecking. -- -- There are two possible forms for a 'Unit': -- -- 1) It can be a 'RealUnit', in which case we just have a 'DefUnitId' that -- uniquely identifies some fully compiled, installed library we have on disk. -- -- 2) It can be an 'VirtUnit'. When we are typechecking a library with missing -- holes, we may need to instantiate a library on the fly (in which case we -- don't have any on-disk representation.) In that case, you have an -- 'InstantiatedUnit', which explicitly records the instantiation, so that we -- can substitute over it. data GenUnit uid = RealUnit !(Definite uid) -- ^ Installed definite unit (either a fully instantiated unit or a closed unit) | VirtUnit {-# UNPACK #-} !(GenInstantiatedUnit uid) -- ^ Virtual unit instantiated on-the-fly. It may be definite if all the -- holes are instantiated but we don't have code objects for it. | HoleUnit -- ^ Fake hole unit -- | An instantiated unit. -- -- It identifies an indefinite library (with holes) that has been instantiated. -- -- This unit may be indefinite or not (i.e. with remaining holes or not). If it -- is definite, we don't know if it has already been compiled and installed in a -- database. Nevertheless, we have a mechanism called "improvement" to try to -- match a fully instantiated unit with existing compiled and installed units: -- see Note [VirtUnit to RealUnit improvement]. -- -- An indefinite unit identifier pretty-prints to something like -- @p[H=,A=aimpl:A>]@ (@p@ is the 'UnitId', and the -- brackets enclose the module substitution). data GenInstantiatedUnit unit = InstantiatedUnit { -- | A private, uniquely identifying representation of -- an InstantiatedUnit. This string is completely private to GHC -- and is just used to get a unique. instUnitFS :: !FastString, -- | Cached unique of 'unitFS'. instUnitKey :: !Unique, -- | The (indefinite) unit being instantiated. instUnitInstanceOf :: !unit, -- | The sorted (by 'ModuleName') instantiations of this unit. instUnitInsts :: !(GenInstantiations unit), -- | A cache of the free module holes of 'instUnitInsts'. -- This lets us efficiently tell if a 'InstantiatedUnit' has been -- fully instantiated (empty set of free module holes) -- and whether or not a substitution can have any effect. instUnitHoles :: UniqDSet ModuleName } type Unit = GenUnit UnitId type InstantiatedUnit = GenInstantiatedUnit UnitId type GenInstantiations unit = [(ModuleName,GenModule (GenUnit unit))] type Instantiations = GenInstantiations UnitId holeUnique :: Unique holeUnique = getUnique holeFS holeFS :: FastString holeFS = fsLit "" isHoleUnit :: GenUnit u -> Bool isHoleUnit HoleUnit = True isHoleUnit _ = False instance Eq (GenInstantiatedUnit unit) where u1 == u2 = instUnitKey u1 == instUnitKey u2 instance Ord (GenInstantiatedUnit unit) where u1 `compare` u2 = instUnitFS u1 `lexicalCompareFS` instUnitFS u2 instance Binary InstantiatedUnit where put_ bh indef = do put_ bh (instUnitInstanceOf indef) put_ bh (instUnitInsts indef) get bh = do cid <- get bh insts <- get bh let fs = mkInstantiatedUnitHash cid insts -- InstantiatedUnit has strict fields, so use $! in order not to allocate a thunk return $! InstantiatedUnit { instUnitInstanceOf = cid, instUnitInsts = insts, instUnitHoles = unionManyUniqDSets (map (moduleFreeHoles.snd) insts), instUnitFS = fs, instUnitKey = getUnique fs } instance IsUnitId u => Eq (GenUnit u) where uid1 == uid2 = unitUnique uid1 == unitUnique uid2 instance IsUnitId u => Uniquable (GenUnit u) where getUnique = unitUnique instance Ord Unit where nm1 `compare` nm2 = stableUnitCmp nm1 nm2 instance Data Unit where -- don't traverse? toConstr _ = abstractConstr "Unit" gunfold _ _ = error "gunfold" dataTypeOf _ = mkNoRepType "Unit" instance NFData Unit where rnf x = x `seq` () -- | Compares unit ids lexically, rather than by their 'Unique's stableUnitCmp :: Unit -> Unit -> Ordering stableUnitCmp p1 p2 = unitFS p1 `lexicalCompareFS` unitFS p2 instance Outputable Unit where ppr pk = pprUnit pk pprUnit :: Unit -> SDoc pprUnit (RealUnit (Definite d)) = pprUnitId d pprUnit (VirtUnit uid) = pprInstantiatedUnit uid pprUnit HoleUnit = ftext holeFS instance Show Unit where show = unitString -- Performance: would prefer to have a NameCache like thing instance Binary Unit where put_ bh (RealUnit def_uid) = do putByte bh 0 put_ bh def_uid put_ bh (VirtUnit indef_uid) = do putByte bh 1 put_ bh indef_uid put_ bh HoleUnit = putByte bh 2 get bh = do b <- getByte bh u <- case b of 0 -> fmap RealUnit (get bh) 1 -> fmap VirtUnit (get bh) _ -> pure HoleUnit -- Unit has strict fields that need forcing; otherwise we allocate a thunk. pure $! u -- | Retrieve the set of free module holes of a 'Unit'. unitFreeModuleHoles :: GenUnit u -> UniqDSet ModuleName unitFreeModuleHoles (VirtUnit x) = instUnitHoles x unitFreeModuleHoles (RealUnit _) = emptyUniqDSet unitFreeModuleHoles HoleUnit = emptyUniqDSet -- | Calculate the free holes of a 'Module'. If this set is non-empty, -- this module was defined in an indefinite library that had required -- signatures. -- -- If a module has free holes, that means that substitutions can operate on it; -- if it has no free holes, substituting over a module has no effect. moduleFreeHoles :: GenModule (GenUnit u) -> UniqDSet ModuleName moduleFreeHoles (Module HoleUnit name) = unitUniqDSet name moduleFreeHoles (Module u _ ) = unitFreeModuleHoles u -- | Create a new 'GenInstantiatedUnit' given an explicit module substitution. mkInstantiatedUnit :: IsUnitId u => u -> GenInstantiations u -> GenInstantiatedUnit u mkInstantiatedUnit cid insts = InstantiatedUnit { instUnitInstanceOf = cid, instUnitInsts = sorted_insts, instUnitHoles = unionManyUniqDSets (map (moduleFreeHoles.snd) insts), instUnitFS = fs, instUnitKey = getUnique fs } where fs = mkInstantiatedUnitHash cid sorted_insts sorted_insts = sortBy (stableModuleNameCmp `on` fst) insts -- | Smart constructor for instantiated GenUnit mkVirtUnit :: IsUnitId u => u -> [(ModuleName, GenModule (GenUnit u))] -> GenUnit u mkVirtUnit uid [] = RealUnit $ Definite uid mkVirtUnit uid insts = VirtUnit $ mkInstantiatedUnit uid insts -- | Generate a uniquely identifying hash (internal unit-id) for an instantiated -- unit. -- -- This is a one-way function. If the indefinite unit has not been instantiated at all, we return its unit-id. -- -- This hash is completely internal to GHC and is not used for symbol names or -- file paths. It is different from the hash Cabal would produce for the same -- instantiated unit. mkInstantiatedUnitHash :: IsUnitId u => u -> [(ModuleName, GenModule (GenUnit u))] -> FastString mkInstantiatedUnitHash cid sorted_holes = mkFastStringByteString . fingerprintUnitId (bytesFS (unitFS cid)) $ hashInstantiations sorted_holes -- | Generate a hash for a sorted module instantiation. hashInstantiations :: IsUnitId u => [(ModuleName, GenModule (GenUnit u))] -> Fingerprint hashInstantiations sorted_holes = fingerprintByteString . BS.concat $ do (m, b) <- sorted_holes [ bytesFS (moduleNameFS m), BS.Char8.singleton ' ', bytesFS (unitFS (moduleUnit b)), BS.Char8.singleton ':', bytesFS (moduleNameFS (moduleName b)), BS.Char8.singleton '\n'] fingerprintUnitId :: BS.ByteString -> Fingerprint -> BS.ByteString fingerprintUnitId prefix (Fingerprint a b) = BS.concat $ [ prefix , BS.Char8.singleton '-' , BS.Char8.pack (toBase62Padded a) , BS.Char8.pack (toBase62Padded b) ] unitUnique :: IsUnitId u => GenUnit u -> Unique unitUnique (VirtUnit x) = instUnitKey x unitUnique (RealUnit (Definite x)) = getUnique (unitFS x) unitUnique HoleUnit = holeUnique -- | Create a new simple unit identifier from a 'FastString'. Internally, -- this is primarily used to specify wired-in unit identifiers. fsToUnit :: FastString -> Unit fsToUnit = RealUnit . Definite . UnitId unitString :: IsUnitId u => u -> String unitString = unpackFS . unitFS stringToUnit :: String -> Unit stringToUnit = fsToUnit . mkFastString -- | Map over the unit type of a 'GenUnit' mapGenUnit :: IsUnitId v => (u -> v) -> GenUnit u -> GenUnit v mapGenUnit f = go where go gu = case gu of HoleUnit -> HoleUnit RealUnit d -> RealUnit (fmap f d) VirtUnit i -> VirtUnit $ mkInstantiatedUnit (f (instUnitInstanceOf i)) (fmap (second (fmap go)) (instUnitInsts i)) -- | Map over the unit identifier of unit instantiations. mapInstantiations :: IsUnitId v => (u -> v) -> GenInstantiations u -> GenInstantiations v mapInstantiations f = map (second (fmap (mapGenUnit f))) -- | Return the UnitId of the Unit. For on-the-fly instantiated units, return -- the UnitId of the indefinite unit this unit is an instance of. toUnitId :: Unit -> UnitId toUnitId (RealUnit (Definite iuid)) = iuid toUnitId (VirtUnit indef) = instUnitInstanceOf indef toUnitId HoleUnit = error "Hole unit" -- | Return the virtual UnitId of an on-the-fly instantiated unit. virtualUnitId :: InstantiatedUnit -> UnitId virtualUnitId i = UnitId (instUnitFS i) -- | A 'Unit' is definite if it has no free holes. unitIsDefinite :: Unit -> Bool unitIsDefinite = isEmptyUniqDSet . unitFreeModuleHoles --------------------------------------------------------------------- -- UNIT IDs --------------------------------------------------------------------- -- | A UnitId identifies a built library in a database and is used to generate -- unique symbols, etc. It's usually of the form: -- -- pkgname-1.2:libname+hash -- -- These UnitId are provided to us via the @-this-unit-id@ flag. -- -- The library in question may be definite or indefinite; if it is indefinite, -- none of the holes have been filled (we never install partially instantiated -- libraries as we can cheaply instantiate them on-the-fly, cf VirtUnit). Put -- another way, an installed unit id is either fully instantiated, or not -- instantiated at all. newtype UnitId = UnitId { unitIdFS :: FastString -- ^ The full hashed unit identifier, including the component id -- and the hash. } deriving (Data) instance Binary UnitId where put_ bh (UnitId fs) = put_ bh fs get bh = do fs <- get bh; return (UnitId fs) instance Eq UnitId where uid1 == uid2 = getUnique uid1 == getUnique uid2 instance Ord UnitId where -- we compare lexically to avoid non-deterministic output when sets of -- unit-ids are printed (dependencies, etc.) u1 `compare` u2 = unitIdFS u1 `lexicalCompareFS` unitIdFS u2 instance Uniquable UnitId where getUnique = getUnique . unitIdFS instance Outputable UnitId where ppr = pprUnitId pprUnitId :: UnitId -> SDoc pprUnitId (UnitId fs) = sdocOption sdocUnitIdForUser ($ fs) -- | A 'DefUnitId' is an 'UnitId' with the invariant that -- it only refers to a definite library; i.e., one we have generated -- code for. type DefUnitId = Definite UnitId unitIdString :: UnitId -> String unitIdString = unpackFS . unitIdFS stringToUnitId :: String -> UnitId stringToUnitId = UnitId . mkFastString --------------------------------------------------------------------- -- UTILS --------------------------------------------------------------------- -- | A definite unit (i.e. without any free module hole) newtype Definite unit = Definite { unDefinite :: unit } deriving (Functor) deriving newtype (Eq, Ord, Outputable, Binary, Uniquable, IsUnitId) --------------------------------------------------------------------- -- WIRED-IN UNITS --------------------------------------------------------------------- {- Note [Wired-in units] ~~~~~~~~~~~~~~~~~~~~~ Certain packages are known to the compiler, in that we know about certain entities that reside in these packages, and the compiler needs to declare static Modules and Names that refer to these packages. Hence the wired-in packages can't include version numbers in their package UnitId, since we don't want to bake the version numbers of these packages into GHC. So here's the plan. Wired-in units are still versioned as normal in the packages database, and you can still have multiple versions of them installed. To the user, everything looks normal. However, for each invocation of GHC, only a single instance of each wired-in package will be recognised (the desired one is selected via @-package@\/@-hide-package@), and GHC will internally pretend that it has the *unversioned* 'UnitId', including in .hi files and object file symbols. Unselected versions of wired-in packages will be ignored, as will any other package that depends directly or indirectly on it (much as if you had used @-ignore-package@). The affected packages are compiled with, e.g., @-this-unit-id base@, so that the symbols in the object files have the unversioned unit id in their name. Make sure you change 'GHC.Unit.State.findWiredInUnits' if you add an entry here. -} bignumUnitId, primUnitId, ghcInternalUnitId, rtsUnitId, mainUnitId, thisGhcUnitId, interactiveUnitId :: UnitId bignumUnit, primUnit, ghcInternalUnit, rtsUnit, mainUnit, thisGhcUnit, interactiveUnit :: Unit primUnitId = UnitId (fsLit "ghc-prim") bignumUnitId = UnitId (fsLit "ghc-bignum") ghcInternalUnitId = UnitId (fsLit "ghc-internal") rtsUnitId = UnitId (fsLit "rts") thisGhcUnitId = UnitId (fsLit cProjectUnitId) -- See Note [GHC's Unit Id] interactiveUnitId = UnitId (fsLit "interactive") primUnit = RealUnit (Definite primUnitId) bignumUnit = RealUnit (Definite bignumUnitId) ghcInternalUnit = RealUnit (Definite ghcInternalUnitId) rtsUnit = RealUnit (Definite rtsUnitId) thisGhcUnit = RealUnit (Definite thisGhcUnitId) interactiveUnit = RealUnit (Definite interactiveUnitId) -- | This is the package Id for the current program. It is the default -- package Id if you don't specify a package name. We don't add this prefix -- to symbol names, since there can be only one main package per program. mainUnitId = UnitId (fsLit "main") mainUnit = RealUnit (Definite mainUnitId) isInteractiveModule :: Module -> Bool isInteractiveModule mod = moduleUnit mod == interactiveUnit wiredInUnitIds :: [UnitId] wiredInUnitIds = [ primUnitId , bignumUnitId , ghcInternalUnitId , rtsUnitId ] -- NB: ghc is no longer part of the wired-in units since its unit-id, given -- by hadrian or cabal, is no longer overwritten and now matches both the -- cProjectUnitId defined in build-time-generated module GHC.Version, and -- the unit key. -- -- See also Note [About units], taking into consideration ghc is still a -- wired-in unit but whose unit-id no longer needs special handling because -- we take care that it matches the unit key. {- Note [GHC's Unit Id] ~~~~~~~~~~~~~~~~~~~~ Previously, the unit-id of ghc-the-library was fixed as `ghc`. This was done primarily because the compiler must know the unit-id of some packages (including ghc) a-priori to define wired-in names. However, as seen in #20742, a reinstallable `ghc` whose unit-id is fixed to `ghc` might result in subtle bugs when different ghc's interact. A good example of this is having GHC_A load a plugin compiled by GHC_B, where GHC_A and GHC_B are linked to ghc-libraries that are ABI incompatible. Without a distinction between the unit-id of the ghc library GHC_A is linked against and the ghc library the plugin it is loading was compiled against, we can't check compatibility. Now, we give a better unit-id to ghc (`ghc-version-hash`) by (1) Not setting -this-unit-id fixed to `ghc` in `ghc.cabal`, but rather by having (1.1) Hadrian pass the new unit-id with -this-unit-id for stage0-1 (1.2) Cabal pass the unit-id it computes to ghc, which it already does by default (2) Adding a definition to `GHC.Settings.Config` whose value is the new unit-id. This is crucial to define the wired-in name of the GHC unit (`thisGhcUnitId`) which *must* match the value of the -this-unit-id flag. (Where `GHC.Settings.Config` is a module generated by the build system which, be it either hadrian or cabal, knows exactly the unit-id it passed with -this-unit-id) Note that we also ensure the ghc's unit key matches its unit id, both when hadrian or cabal is building ghc. This way, we no longer need to add `ghc` to the WiringMap, and that's why 'wiredInUnitIds' no longer includes 'thisGhcUnitId'. -} --------------------------------------------------------------------- -- Boot Modules --------------------------------------------------------------------- -- Note [Boot Module Naming] -- ~~~~~~~~~~~~~~~~~~~~~~~~~ -- Why is this section here? After all, these modules are supposed to be about -- ways of referring to modules, not modules themselves. Well, the "bootness" of -- a module is in a way part of its name, because 'import {-# SOURCE #-} Foo' -- references the boot module in particular while 'import Foo' references the -- regular module. Backpack signatures live in the normal module namespace (no -- special import), so they don't matter here. When dealing with the modules -- themselves, however, one should use not 'IsBoot' or conflate signatures and -- modules in opposition to boot interfaces. Instead, one should use -- 'DriverPhases.HscSource'. See Note [HscSource types]. instance Binary IsBootInterface where put_ bh ib = put_ bh $ case ib of NotBoot -> False IsBoot -> True get bh = do b <- get bh return $ case b of False -> NotBoot True -> IsBoot -- | This data type just pairs a value 'mod' with an IsBootInterface flag. In -- practice, 'mod' is usually a @Module@ or @ModuleName@'. data GenWithIsBoot mod = GWIB { gwib_mod :: mod , gwib_isBoot :: IsBootInterface } deriving ( Eq, Ord, Show , Functor, Foldable, Traversable ) -- the Ord instance must ensure that we first sort by Module and then by -- IsBootInterface: this is assumed to perform filtering of non-boot modules, -- e.g. in GHC.Driver.Env.hptModulesBelow type ModuleNameWithIsBoot = GenWithIsBoot ModuleName type ModuleWithIsBoot = GenWithIsBoot Module type InstalledModuleWithIsBoot = GenWithIsBoot InstalledModule instance Binary a => Binary (GenWithIsBoot a) where put_ bh (GWIB { gwib_mod, gwib_isBoot }) = do put_ bh gwib_mod put_ bh gwib_isBoot get bh = do gwib_mod <- get bh gwib_isBoot <- get bh pure $ GWIB { gwib_mod, gwib_isBoot } instance Outputable a => Outputable (GenWithIsBoot a) where ppr (GWIB { gwib_mod, gwib_isBoot }) = hsep $ ppr gwib_mod : case gwib_isBoot of IsBoot -> [ text "{-# SOURCE #-}" ] NotBoot -> [] notBoot :: mod -> GenWithIsBoot mod notBoot gwib_mod = GWIB {gwib_mod, gwib_isBoot = NotBoot} ghc-lib-parser-9.12.2.20250421/compiler/GHC/Unit/Types.hs-boot0000644000000000000000000000064207346545000021230 0ustar0000000000000000{-# LANGUAGE KindSignatures #-} module GHC.Unit.Types where -- No Prelude. See Note [Exporting pprTrace from GHC.Prelude] import Language.Haskell.Syntax.Module.Name (ModuleName) import Data.Kind (Type) data UnitId data GenModule (unit :: Type) data GenUnit (uid :: Type) type Module = GenModule Unit type Unit = GenUnit UnitId moduleName :: GenModule a -> ModuleName moduleUnit :: GenModule a -> a ghc-lib-parser-9.12.2.20250421/compiler/GHC/Utils/0000755000000000000000000000000007346545000017006 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Utils/Binary.hs0000644000000000000000000021613707346545000020600 0ustar0000000000000000 {-# LANGUAGE CPP #-} {-# LANGUAGE GADTs #-} {-# LANGUAGE UnboxedTuples #-} {-# OPTIONS_GHC -O2 -funbox-strict-fields #-} -- We always optimise this, otherwise performance of a non-optimised -- compiler is severely affected -- -- (c) The University of Glasgow 2002-2006 -- -- Binary I/O library, with special tweaks for GHC -- -- Based on the nhc98 Binary library, which is copyright -- (c) Malcolm Wallace and Colin Runciman, University of York, 1998. -- Under the terms of the license for that software, we must tell you -- where you can obtain the original version of the Binary library, namely -- http://www.cs.york.ac.uk/fp/nhc98/ module GHC.Utils.Binary ( {-type-} Bin, RelBin(..), getRelBin, {-class-} Binary(..), {-type-} ReadBinHandle, WriteBinHandle, SymbolTable, Dictionary, BinData(..), dataHandle, handleData, unsafeUnpackBinBuffer, openBinMem, -- closeBin, seekBinWriter, seekBinReader, seekBinReaderRel, tellBinReader, tellBinWriter, castBin, withBinBuffer, freezeWriteHandle, shrinkBinBuffer, thawReadHandle, foldGet, foldGet', writeBinMem, readBinMem, readBinMemN, putAt, getAt, putAtRel, forwardPut, forwardPut_, forwardGet, forwardPutRel, forwardPutRel_, forwardGetRel, -- * For writing instances putByte, getByte, putByteString, getByteString, -- * Variable length encodings putULEB128, getULEB128, putSLEB128, getSLEB128, -- * Fixed length encoding FixedLengthEncoding(..), -- * Lazy Binary I/O lazyGet, lazyPut, lazyGet', lazyPut', lazyGetMaybe, lazyPutMaybe, -- * User data ReaderUserData, getReaderUserData, setReaderUserData, noReaderUserData, WriterUserData, getWriterUserData, setWriterUserData, noWriterUserData, mkWriterUserData, mkReaderUserData, newReadState, newWriteState, addReaderToUserData, addWriterToUserData, findUserDataReader, findUserDataWriter, -- * Binary Readers & Writers BinaryReader(..), BinaryWriter(..), mkWriter, mkReader, SomeBinaryReader, SomeBinaryWriter, mkSomeBinaryReader, mkSomeBinaryWriter, -- * Tables ReaderTable(..), WriterTable(..), -- * String table ("dictionary") initFastStringReaderTable, initFastStringWriterTable, putDictionary, getDictionary, putFS, FSTable(..), getDictFastString, putDictFastString, -- * Generic deduplication table GenericSymbolTable(..), initGenericSymbolTable, getGenericSymtab, putGenericSymTab, getGenericSymbolTable, putGenericSymbolTable, -- * Newtype wrappers BinSpan(..), BinSrcSpan(..), BinLocated(..), -- * Newtypes for types that have canonically more than one valid encoding BindingName(..), simpleBindingNameWriter, simpleBindingNameReader, FullBinData(..), freezeBinHandle, thawBinHandle, putFullBinData, BinArray, ) where import GHC.Prelude import Language.Haskell.Syntax.Module.Name (ModuleName(..)) import {-# SOURCE #-} GHC.Types.Name (Name) import GHC.Data.FastString import GHC.Data.TrieMap import GHC.Utils.Panic.Plain import GHC.Types.Unique.FM import GHC.Data.FastMutInt import GHC.Utils.Fingerprint import GHC.Types.SrcLoc import GHC.Types.Unique import qualified GHC.Data.Strict as Strict import GHC.Utils.Outputable( JoinPointHood(..) ) import Control.DeepSeq import Control.Monad ( when, (<$!>), unless, forM_, void ) import Foreign hiding (bit, setBit, clearBit, shiftL, shiftR, void) import Data.Array import Data.Array.IO import Data.Array.Unsafe import Data.ByteString (ByteString, copy) import Data.Coerce import qualified Data.ByteString.Internal as BS import qualified Data.ByteString.Unsafe as BS import Data.IORef import Data.Char ( ord, chr ) import Data.List.NonEmpty ( NonEmpty(..)) import qualified Data.List.NonEmpty as NonEmpty import Data.Map.Strict (Map) import qualified Data.Map.Strict as Map import Data.Proxy import Data.Set ( Set ) import qualified Data.Set as Set import Data.Time import Data.List (unfoldr) import System.IO as IO import System.IO.Unsafe ( unsafeInterleaveIO ) import System.IO.Error ( mkIOError, eofErrorType ) import Type.Reflection ( Typeable, SomeTypeRep(..) ) import qualified Type.Reflection as Refl import GHC.Real ( Ratio(..) ) import Data.IntMap (IntMap) import qualified Data.IntMap as IntMap #if MIN_VERSION_base(4,15,0) import GHC.ForeignPtr ( unsafeWithForeignPtr ) #endif import Unsafe.Coerce (unsafeCoerce) type BinArray = ForeignPtr Word8 #if !MIN_VERSION_base(4,15,0) unsafeWithForeignPtr :: ForeignPtr a -> (Ptr a -> IO b) -> IO b unsafeWithForeignPtr = withForeignPtr #endif --------------------------------------------------------------- -- BinData --------------------------------------------------------------- data BinData = BinData Int BinArray instance NFData BinData where rnf (BinData sz _) = rnf sz instance Binary BinData where put_ bh (BinData sz dat) = do put_ bh sz putPrim bh sz $ \dest -> unsafeWithForeignPtr dat $ \orig -> copyBytes dest orig sz -- get bh = do sz <- get bh dat <- mallocForeignPtrBytes sz getPrim bh sz $ \orig -> unsafeWithForeignPtr dat $ \dest -> copyBytes dest orig sz return (BinData sz dat) dataHandle :: BinData -> IO ReadBinHandle dataHandle (BinData size bin) = do ixr <- newFastMutInt 0 return (ReadBinMem noReaderUserData ixr size bin) handleData :: WriteBinHandle -> IO BinData handleData (WriteBinMem _ ixr _ binr) = BinData <$> readFastMutInt ixr <*> readIORef binr --------------------------------------------------------------- -- FullBinData --------------------------------------------------------------- -- | 'FullBinData' stores a slice to a 'BinArray'. -- -- It requires less memory than 'ReadBinHandle', and can be constructed from -- a 'ReadBinHandle' via 'freezeBinHandle' and turned back into a -- 'ReadBinHandle' using 'thawBinHandle'. -- Additionally, the byte array slice can be put into a 'WriteBinHandle' without extra -- conversions via 'putFullBinData'. data FullBinData = FullBinData { fbd_readerUserData :: ReaderUserData -- ^ 'ReaderUserData' that can be used to resume reading. , fbd_off_s :: {-# UNPACK #-} !Int -- ^ start offset , fbd_off_e :: {-# UNPACK #-} !Int -- ^ end offset , fbd_size :: {-# UNPACK #-} !Int -- ^ total buffer size , fbd_buffer :: {-# UNPACK #-} !BinArray } -- Equality and Ord assume that two distinct buffers are different, even if they compare the same things. instance Eq FullBinData where (FullBinData _ b c d e) == (FullBinData _ b1 c1 d1 e1) = b == b1 && c == c1 && d == d1 && e == e1 instance Ord FullBinData where compare (FullBinData _ b c d e) (FullBinData _ b1 c1 d1 e1) = compare b b1 `mappend` compare c c1 `mappend` compare d d1 `mappend` compare e e1 -- | Write the 'FullBinData' slice into the 'WriteBinHandle'. putFullBinData :: WriteBinHandle -> FullBinData -> IO () putFullBinData bh (FullBinData _ o1 o2 _sz ba) = do let sz = o2 - o1 putPrim bh sz $ \dest -> unsafeWithForeignPtr (ba `plusForeignPtr` o1) $ \orig -> copyBytes dest orig sz -- | Freeze a 'ReadBinHandle' and a start index into a 'FullBinData'. -- -- 'FullBinData' stores a slice starting from the 'Bin a' location to the current -- offset of the 'ReadBinHandle'. freezeBinHandle :: ReadBinHandle -> Bin a -> IO FullBinData freezeBinHandle (ReadBinMem user_data ixr sz binr) (BinPtr start) = do ix <- readFastMutInt ixr pure (FullBinData user_data start ix sz binr) -- | Turn the 'FullBinData' into a 'ReadBinHandle', setting the 'ReadBinHandle' -- offset to the start of the 'FullBinData' and restore the 'ReaderUserData' that was -- obtained from 'freezeBinHandle'. thawBinHandle :: FullBinData -> IO ReadBinHandle thawBinHandle (FullBinData user_data ix _end sz ba) = do ixr <- newFastMutInt ix return $ ReadBinMem user_data ixr sz ba --------------------------------------------------------------- -- BinHandle --------------------------------------------------------------- -- | A write-only handle that can be used to serialise binary data into a buffer. -- -- The buffer is an unboxed binary array. data WriteBinHandle = WriteBinMem { wbm_userData :: WriterUserData, -- ^ User data for writing binary outputs. -- Allows users to overwrite certain 'Binary' instances. -- This is helpful when a non-canonical 'Binary' instance is required, -- such as in the case of 'Name'. wbm_off_r :: !FastMutInt, -- ^ the current offset wbm_sz_r :: !FastMutInt, -- ^ size of the array (cached) wbm_arr_r :: !(IORef BinArray) -- ^ the array (bounds: (0,size-1)) } -- | A read-only handle that can be used to deserialise binary data from a buffer. -- -- The buffer is an unboxed binary array. data ReadBinHandle = ReadBinMem { rbm_userData :: ReaderUserData, -- ^ User data for reading binary inputs. -- Allows users to overwrite certain 'Binary' instances. -- This is helpful when a non-canonical 'Binary' instance is required, -- such as in the case of 'Name'. rbm_off_r :: !FastMutInt, -- ^ the current offset rbm_sz_r :: !Int, -- ^ size of the array (cached) rbm_arr_r :: !BinArray -- ^ the array (bounds: (0,size-1)) } getReaderUserData :: ReadBinHandle -> ReaderUserData getReaderUserData bh = rbm_userData bh getWriterUserData :: WriteBinHandle -> WriterUserData getWriterUserData bh = wbm_userData bh setWriterUserData :: WriteBinHandle -> WriterUserData -> WriteBinHandle setWriterUserData bh us = bh { wbm_userData = us } setReaderUserData :: ReadBinHandle -> ReaderUserData -> ReadBinHandle setReaderUserData bh us = bh { rbm_userData = us } -- | Add 'SomeBinaryReader' as a known binary decoder. -- If a 'BinaryReader' for the associated type already exists in 'ReaderUserData', -- it is overwritten. addReaderToUserData :: forall a. Typeable a => BinaryReader a -> ReadBinHandle -> ReadBinHandle addReaderToUserData reader bh = bh { rbm_userData = (rbm_userData bh) { ud_reader_data = let typRep = Refl.typeRep @a in Map.insert (SomeTypeRep typRep) (SomeBinaryReader typRep reader) (ud_reader_data (rbm_userData bh)) } } -- | Add 'SomeBinaryWriter' as a known binary encoder. -- If a 'BinaryWriter' for the associated type already exists in 'WriterUserData', -- it is overwritten. addWriterToUserData :: forall a . Typeable a => BinaryWriter a -> WriteBinHandle -> WriteBinHandle addWriterToUserData writer bh = bh { wbm_userData = (wbm_userData bh) { ud_writer_data = let typRep = Refl.typeRep @a in Map.insert (SomeTypeRep typRep) (SomeBinaryWriter typRep writer) (ud_writer_data (wbm_userData bh)) } } -- | Get access to the underlying buffer. withBinBuffer :: WriteBinHandle -> (ByteString -> IO a) -> IO a withBinBuffer (WriteBinMem _ ix_r _ arr_r) action = do ix <- readFastMutInt ix_r arr <- readIORef arr_r action $ BS.fromForeignPtr arr 0 ix unsafeUnpackBinBuffer :: ByteString -> IO ReadBinHandle unsafeUnpackBinBuffer (BS.BS arr len) = do ix_r <- newFastMutInt 0 return (ReadBinMem noReaderUserData ix_r len arr) --------------------------------------------------------------- -- Bin --------------------------------------------------------------- newtype Bin a = BinPtr Int deriving (Eq, Ord, Show, Bounded) -- | Like a 'Bin' but is used to store relative offset pointers. -- Relative offset pointers store a relative location, but also contain an -- anchor that allow to obtain the absolute offset. data RelBin a = RelBin { relBin_anchor :: {-# UNPACK #-} !(Bin a) -- ^ Absolute position from where we read 'relBin_offset'. , relBin_offset :: {-# UNPACK #-} !(RelBinPtr a) -- ^ Relative offset to 'relBin_anchor'. -- The absolute position of the 'RelBin' is @relBin_anchor + relBin_offset@ } deriving (Eq, Ord, Show, Bounded) -- | A 'RelBinPtr' is like a 'Bin', but contains a relative offset pointer -- instead of an absolute offset. newtype RelBinPtr a = RelBinPtr (Bin a) deriving (Eq, Ord, Show, Bounded) castBin :: Bin a -> Bin b castBin (BinPtr i) = BinPtr i -- | Read a relative offset location and wrap it in 'RelBin'. -- -- The resulting 'RelBin' can be translated into an absolute offset location using -- 'makeAbsoluteBin' getRelBin :: ReadBinHandle -> IO (RelBin a) getRelBin bh = do start <- tellBinReader bh off <- get bh pure $ RelBin start off makeAbsoluteBin :: RelBin a -> Bin a makeAbsoluteBin (RelBin (BinPtr !start) (RelBinPtr (BinPtr !offset))) = BinPtr $ start + offset makeRelativeBin :: RelBin a -> RelBinPtr a makeRelativeBin (RelBin _ offset) = offset toRelBin :: Bin (RelBinPtr a) -> Bin a -> RelBin a toRelBin (BinPtr !start) (BinPtr !goal) = RelBin (BinPtr start) (RelBinPtr $ BinPtr $ goal - start) --------------------------------------------------------------- -- class Binary --------------------------------------------------------------- -- | Do not rely on instance sizes for general types, -- we use variable length encoding for many of them. class Binary a where put_ :: WriteBinHandle -> a -> IO () put :: WriteBinHandle -> a -> IO (Bin a) get :: ReadBinHandle -> IO a -- define one of put_, put. Use of put_ is recommended because it -- is more likely that tail-calls can kick in, and we rarely need the -- position return value. put_ bh a = do _ <- put bh a; return () put bh a = do p <- tellBinWriter bh; put_ bh a; return p putAt :: Binary a => WriteBinHandle -> Bin a -> a -> IO () putAt bh p x = do seekBinWriter bh p; put_ bh x; return () putAtRel :: WriteBinHandle -> Bin (RelBinPtr a) -> Bin a -> IO () putAtRel bh from to = putAt bh from (makeRelativeBin $ toRelBin from to) getAt :: Binary a => ReadBinHandle -> Bin a -> IO a getAt bh p = do seekBinReader bh p; get bh openBinMem :: Int -> IO WriteBinHandle openBinMem size | size <= 0 = error "GHC.Utils.Binary.openBinMem: size must be >= 0" | otherwise = do arr <- mallocForeignPtrBytes size arr_r <- newIORef arr ix_r <- newFastMutInt 0 sz_r <- newFastMutInt size return WriteBinMem { wbm_userData = noWriterUserData , wbm_off_r = ix_r , wbm_sz_r = sz_r , wbm_arr_r = arr_r } -- | Freeze the given 'WriteBinHandle' and turn it into an equivalent 'ReadBinHandle'. -- -- The current offset of the 'WriteBinHandle' is maintained in the new 'ReadBinHandle'. freezeWriteHandle :: WriteBinHandle -> IO ReadBinHandle freezeWriteHandle wbm = do rbm_off_r <- newFastMutInt =<< readFastMutInt (wbm_off_r wbm) rbm_sz_r <- readFastMutInt (wbm_sz_r wbm) rbm_arr_r <- readIORef (wbm_arr_r wbm) pure $ ReadBinMem { rbm_userData = noReaderUserData , rbm_off_r = rbm_off_r , rbm_sz_r = rbm_sz_r , rbm_arr_r = rbm_arr_r } -- | Copy the BinBuffer to a new BinBuffer which is exactly the right size. -- This performs a copy of the underlying buffer. -- The buffer may be truncated if the offset is not at the end of the written -- output. -- -- UserData is also discarded during the copy -- You should just use this when translating a Put handle into a Get handle. shrinkBinBuffer :: WriteBinHandle -> IO ReadBinHandle shrinkBinBuffer bh = withBinBuffer bh $ \bs -> do unsafeUnpackBinBuffer (copy bs) thawReadHandle :: ReadBinHandle -> IO WriteBinHandle thawReadHandle rbm = do wbm_off_r <- newFastMutInt =<< readFastMutInt (rbm_off_r rbm) wbm_sz_r <- newFastMutInt (rbm_sz_r rbm) wbm_arr_r <- newIORef (rbm_arr_r rbm) pure $ WriteBinMem { wbm_userData = noWriterUserData , wbm_off_r = wbm_off_r , wbm_sz_r = wbm_sz_r , wbm_arr_r = wbm_arr_r } tellBinWriter :: WriteBinHandle -> IO (Bin a) tellBinWriter (WriteBinMem _ r _ _) = do ix <- readFastMutInt r; return (BinPtr ix) tellBinReader :: ReadBinHandle -> IO (Bin a) tellBinReader (ReadBinMem _ r _ _) = do ix <- readFastMutInt r; return (BinPtr ix) seekBinWriter :: WriteBinHandle -> Bin a -> IO () seekBinWriter h@(WriteBinMem _ ix_r sz_r _) (BinPtr !p) = do sz <- readFastMutInt sz_r if (p > sz) then do expandBin h p; writeFastMutInt ix_r p else writeFastMutInt ix_r p -- | 'seekBinNoExpandWriter' moves the index pointer to the location pointed to -- by 'Bin a'. -- This operation may 'panic', if the pointer location is out of bounds of the -- buffer of 'BinHandle'. seekBinNoExpandWriter :: WriteBinHandle -> Bin a -> IO () seekBinNoExpandWriter (WriteBinMem _ ix_r sz_r _) (BinPtr !p) = do sz <- readFastMutInt sz_r if (p > sz) then panic "seekBinNoExpandWriter: seek out of range" else writeFastMutInt ix_r p -- | SeekBin but without calling expandBin seekBinReader :: ReadBinHandle -> Bin a -> IO () seekBinReader (ReadBinMem _ ix_r sz_r _) (BinPtr !p) = do if (p > sz_r) then panic "seekBinReader: seek out of range" else writeFastMutInt ix_r p seekBinReaderRel :: ReadBinHandle -> RelBin a -> IO () seekBinReaderRel (ReadBinMem _ ix_r sz_r _) relBin = do let (BinPtr !p) = makeAbsoluteBin relBin if (p > sz_r) then panic "seekBinReaderRel: seek out of range" else writeFastMutInt ix_r p writeBinMem :: WriteBinHandle -> FilePath -> IO () writeBinMem (WriteBinMem _ ix_r _ arr_r) fn = do h <- openBinaryFile fn WriteMode arr <- readIORef arr_r ix <- readFastMutInt ix_r unsafeWithForeignPtr arr $ \p -> hPutBuf h p ix hClose h readBinMem :: FilePath -> IO ReadBinHandle readBinMem filename = do withBinaryFile filename ReadMode $ \h -> do filesize' <- hFileSize h let filesize = fromIntegral filesize' readBinMem_ filesize h readBinMemN :: Int -> FilePath -> IO (Maybe ReadBinHandle) readBinMemN size filename = do withBinaryFile filename ReadMode $ \h -> do filesize' <- hFileSize h let filesize = fromIntegral filesize' if filesize < size then pure Nothing else Just <$> readBinMem_ size h readBinMem_ :: Int -> Handle -> IO ReadBinHandle readBinMem_ filesize h = do arr <- mallocForeignPtrBytes filesize count <- unsafeWithForeignPtr arr $ \p -> hGetBuf h p filesize when (count /= filesize) $ error ("Binary.readBinMem: only read " ++ show count ++ " bytes") ix_r <- newFastMutInt 0 return ReadBinMem { rbm_userData = noReaderUserData , rbm_off_r = ix_r , rbm_sz_r = filesize , rbm_arr_r = arr } -- expand the size of the array to include a specified offset expandBin :: WriteBinHandle -> Int -> IO () expandBin (WriteBinMem _ _ sz_r arr_r) !off = do !sz <- readFastMutInt sz_r let !sz' = getSize sz arr <- readIORef arr_r arr' <- mallocForeignPtrBytes sz' withForeignPtr arr $ \old -> withForeignPtr arr' $ \new -> copyBytes new old sz writeFastMutInt sz_r sz' writeIORef arr_r arr' where getSize :: Int -> Int getSize !sz | sz > off = sz | otherwise = getSize (sz * 2) foldGet :: Binary a => Word -- n elements -> ReadBinHandle -> b -- initial accumulator -> (Word -> a -> b -> IO b) -> IO b foldGet n bh init_b f = go 0 init_b where go i b | i == n = return b | otherwise = do a <- get bh b' <- f i a b go (i+1) b' foldGet' :: Binary a => Word -- n elements -> ReadBinHandle -> b -- initial accumulator -> (Word -> a -> b -> IO b) -> IO b {-# INLINE foldGet' #-} foldGet' n bh init_b f = go 0 init_b where go i !b | i == n = return b | otherwise = do !a <- get bh b' <- f i a b go (i+1) b' -- ----------------------------------------------------------------------------- -- Low-level reading/writing of bytes -- | Takes a size and action writing up to @size@ bytes. -- After the action has run advance the index to the buffer -- by size bytes. putPrim :: WriteBinHandle -> Int -> (Ptr Word8 -> IO ()) -> IO () putPrim h@(WriteBinMem _ ix_r sz_r arr_r) size f = do ix <- readFastMutInt ix_r sz <- readFastMutInt sz_r when (ix + size > sz) $ expandBin h (ix + size) arr <- readIORef arr_r unsafeWithForeignPtr arr $ \op -> f (op `plusPtr` ix) writeFastMutInt ix_r (ix + size) -- -- | Similar to putPrim but advances the index by the actual number of -- -- bytes written. -- putPrimMax :: BinHandle -> Int -> (Ptr Word8 -> IO Int) -> IO () -- putPrimMax h@(BinMem _ ix_r sz_r arr_r) size f = do -- ix <- readFastMutInt ix_r -- sz <- readFastMutInt sz_r -- when (ix + size > sz) $ -- expandBin h (ix + size) -- arr <- readIORef arr_r -- written <- withForeignPtr arr $ \op -> f (op `plusPtr` ix) -- writeFastMutInt ix_r (ix + written) getPrim :: ReadBinHandle -> Int -> (Ptr Word8 -> IO a) -> IO a getPrim (ReadBinMem _ ix_r sz_r arr_r) size f = do ix <- readFastMutInt ix_r when (ix + size > sz_r) $ ioError (mkIOError eofErrorType "Data.Binary.getPrim" Nothing Nothing) w <- unsafeWithForeignPtr arr_r $ \p -> f (p `plusPtr` ix) -- This is safe WRT #17760 as we we guarantee that the above line doesn't -- diverge writeFastMutInt ix_r (ix + size) return w putWord8 :: WriteBinHandle -> Word8 -> IO () putWord8 h !w = putPrim h 1 (\op -> poke op w) getWord8 :: ReadBinHandle -> IO Word8 getWord8 h = getPrim h 1 peek putWord16 :: WriteBinHandle -> Word16 -> IO () putWord16 h w = putPrim h 2 (\op -> do pokeElemOff op 0 (fromIntegral (w `shiftR` 8)) pokeElemOff op 1 (fromIntegral (w .&. 0xFF)) ) getWord16 :: ReadBinHandle -> IO Word16 getWord16 h = getPrim h 2 (\op -> do w0 <- fromIntegral <$> peekElemOff op 0 w1 <- fromIntegral <$> peekElemOff op 1 return $! w0 `shiftL` 8 .|. w1 ) putWord32 :: WriteBinHandle -> Word32 -> IO () putWord32 h w = putPrim h 4 (\op -> do pokeElemOff op 0 (fromIntegral (w `shiftR` 24)) pokeElemOff op 1 (fromIntegral ((w `shiftR` 16) .&. 0xFF)) pokeElemOff op 2 (fromIntegral ((w `shiftR` 8) .&. 0xFF)) pokeElemOff op 3 (fromIntegral (w .&. 0xFF)) ) getWord32 :: ReadBinHandle -> IO Word32 getWord32 h = getPrim h 4 (\op -> do w0 <- fromIntegral <$> peekElemOff op 0 w1 <- fromIntegral <$> peekElemOff op 1 w2 <- fromIntegral <$> peekElemOff op 2 w3 <- fromIntegral <$> peekElemOff op 3 return $! (w0 `shiftL` 24) .|. (w1 `shiftL` 16) .|. (w2 `shiftL` 8) .|. w3 ) putWord64 :: WriteBinHandle -> Word64 -> IO () putWord64 h w = putPrim h 8 (\op -> do pokeElemOff op 0 (fromIntegral (w `shiftR` 56)) pokeElemOff op 1 (fromIntegral ((w `shiftR` 48) .&. 0xFF)) pokeElemOff op 2 (fromIntegral ((w `shiftR` 40) .&. 0xFF)) pokeElemOff op 3 (fromIntegral ((w `shiftR` 32) .&. 0xFF)) pokeElemOff op 4 (fromIntegral ((w `shiftR` 24) .&. 0xFF)) pokeElemOff op 5 (fromIntegral ((w `shiftR` 16) .&. 0xFF)) pokeElemOff op 6 (fromIntegral ((w `shiftR` 8) .&. 0xFF)) pokeElemOff op 7 (fromIntegral (w .&. 0xFF)) ) getWord64 :: ReadBinHandle -> IO Word64 getWord64 h = getPrim h 8 (\op -> do w0 <- fromIntegral <$> peekElemOff op 0 w1 <- fromIntegral <$> peekElemOff op 1 w2 <- fromIntegral <$> peekElemOff op 2 w3 <- fromIntegral <$> peekElemOff op 3 w4 <- fromIntegral <$> peekElemOff op 4 w5 <- fromIntegral <$> peekElemOff op 5 w6 <- fromIntegral <$> peekElemOff op 6 w7 <- fromIntegral <$> peekElemOff op 7 return $! (w0 `shiftL` 56) .|. (w1 `shiftL` 48) .|. (w2 `shiftL` 40) .|. (w3 `shiftL` 32) .|. (w4 `shiftL` 24) .|. (w5 `shiftL` 16) .|. (w6 `shiftL` 8) .|. w7 ) putByte :: WriteBinHandle -> Word8 -> IO () putByte bh !w = putWord8 bh w getByte :: ReadBinHandle -> IO Word8 getByte h = getWord8 h -- ----------------------------------------------------------------------------- -- Encode numbers in LEB128 encoding. -- Requires one byte of space per 7 bits of data. -- -- There are signed and unsigned variants. -- Do NOT use the unsigned one for signed values, at worst it will -- result in wrong results, at best it will lead to bad performance -- when coercing negative values to an unsigned type. -- -- We mark them as SPECIALIZE as it's extremely critical that they get specialized -- to their specific types. -- -- TODO: Each use of putByte performs a bounds check, -- we should use putPrimMax here. However it's quite hard to return -- the number of bytes written into putPrimMax without allocating an -- Int for it, while the code below does not allocate at all. -- So we eat the cost of the bounds check instead of increasing allocations -- for now. -- Unsigned numbers {-# SPECIALISE putULEB128 :: WriteBinHandle -> Word -> IO () #-} {-# SPECIALISE putULEB128 :: WriteBinHandle -> Word64 -> IO () #-} {-# SPECIALISE putULEB128 :: WriteBinHandle -> Word32 -> IO () #-} {-# SPECIALISE putULEB128 :: WriteBinHandle -> Word16 -> IO () #-} {-# SPECIALISE putULEB128 :: WriteBinHandle -> Int -> IO () #-} {-# SPECIALISE putULEB128 :: WriteBinHandle -> Int64 -> IO () #-} {-# SPECIALISE putULEB128 :: WriteBinHandle -> Int32 -> IO () #-} {-# SPECIALISE putULEB128 :: WriteBinHandle -> Int16 -> IO () #-} putULEB128 :: forall a. (Integral a, FiniteBits a) => WriteBinHandle -> a -> IO () putULEB128 bh w = #if defined(DEBUG) (if w < 0 then panic "putULEB128: Signed number" else id) $ #endif go w where go :: a -> IO () go w | w <= (127 :: a) = putByte bh (fromIntegral w :: Word8) | otherwise = do -- bit 7 (8th bit) indicates more to come. let !byte = setBit (fromIntegral w) 7 :: Word8 putByte bh byte go (w `unsafeShiftR` 7) {-# SPECIALISE getULEB128 :: ReadBinHandle -> IO Word #-} {-# SPECIALISE getULEB128 :: ReadBinHandle -> IO Word64 #-} {-# SPECIALISE getULEB128 :: ReadBinHandle -> IO Word32 #-} {-# SPECIALISE getULEB128 :: ReadBinHandle -> IO Word16 #-} {-# SPECIALISE getULEB128 :: ReadBinHandle -> IO Int #-} {-# SPECIALISE getULEB128 :: ReadBinHandle -> IO Int64 #-} {-# SPECIALISE getULEB128 :: ReadBinHandle -> IO Int32 #-} {-# SPECIALISE getULEB128 :: ReadBinHandle -> IO Int16 #-} getULEB128 :: forall a. (Integral a, FiniteBits a) => ReadBinHandle -> IO a getULEB128 bh = go 0 0 where go :: Int -> a -> IO a go shift w = do b <- getByte bh let !hasMore = testBit b 7 let !val = w .|. ((clearBit (fromIntegral b) 7) `unsafeShiftL` shift) :: a if hasMore then do go (shift+7) val else return $! val -- Signed numbers {-# SPECIALISE putSLEB128 :: WriteBinHandle -> Word -> IO () #-} {-# SPECIALISE putSLEB128 :: WriteBinHandle -> Word64 -> IO () #-} {-# SPECIALISE putSLEB128 :: WriteBinHandle -> Word32 -> IO () #-} {-# SPECIALISE putSLEB128 :: WriteBinHandle -> Word16 -> IO () #-} {-# SPECIALISE putSLEB128 :: WriteBinHandle -> Int -> IO () #-} {-# SPECIALISE putSLEB128 :: WriteBinHandle -> Int64 -> IO () #-} {-# SPECIALISE putSLEB128 :: WriteBinHandle -> Int32 -> IO () #-} {-# SPECIALISE putSLEB128 :: WriteBinHandle -> Int16 -> IO () #-} putSLEB128 :: forall a. (Integral a, Bits a) => WriteBinHandle -> a -> IO () putSLEB128 bh initial = go initial where go :: a -> IO () go val = do let !byte = fromIntegral (clearBit val 7) :: Word8 let !val' = val `unsafeShiftR` 7 let !signBit = testBit byte 6 let !done = -- Unsigned value, val' == 0 and last value can -- be discriminated from a negative number. ((val' == 0 && not signBit) || -- Signed value, (val' == -1 && signBit)) let !byte' = if done then byte else setBit byte 7 putByte bh byte' unless done $ go val' {-# SPECIALISE getSLEB128 :: ReadBinHandle -> IO Word #-} {-# SPECIALISE getSLEB128 :: ReadBinHandle -> IO Word64 #-} {-# SPECIALISE getSLEB128 :: ReadBinHandle -> IO Word32 #-} {-# SPECIALISE getSLEB128 :: ReadBinHandle -> IO Word16 #-} {-# SPECIALISE getSLEB128 :: ReadBinHandle -> IO Int #-} {-# SPECIALISE getSLEB128 :: ReadBinHandle -> IO Int64 #-} {-# SPECIALISE getSLEB128 :: ReadBinHandle -> IO Int32 #-} {-# SPECIALISE getSLEB128 :: ReadBinHandle -> IO Int16 #-} getSLEB128 :: forall a. (Show a, Integral a, FiniteBits a) => ReadBinHandle -> IO a getSLEB128 bh = do (val,shift,signed) <- go 0 0 if signed && (shift < finiteBitSize val ) then return $! ((complement 0 `unsafeShiftL` shift) .|. val) else return val where go :: Int -> a -> IO (a,Int,Bool) go shift val = do byte <- getByte bh let !byteVal = fromIntegral (clearBit byte 7) :: a let !val' = val .|. (byteVal `unsafeShiftL` shift) let !more = testBit byte 7 let !shift' = shift+7 if more then go (shift') val' else do let !signed = testBit byte 6 return (val',shift',signed) -- ----------------------------------------------------------------------------- -- Fixed length encoding instances -- Sometimes words are used to represent a certain bit pattern instead -- of a number. Using FixedLengthEncoding we will write the pattern as -- is to the interface file without the variable length encoding we usually -- apply. -- | Encode the argument in its full length. This is different from many default -- binary instances which make no guarantee about the actual encoding and -- might do things using variable length encoding. newtype FixedLengthEncoding a = FixedLengthEncoding { unFixedLength :: a } deriving (Eq,Ord,Show) instance Binary (FixedLengthEncoding Word8) where put_ h (FixedLengthEncoding x) = putByte h x get h = FixedLengthEncoding <$> getByte h instance Binary (FixedLengthEncoding Word16) where put_ h (FixedLengthEncoding x) = putWord16 h x get h = FixedLengthEncoding <$> getWord16 h instance Binary (FixedLengthEncoding Word32) where put_ h (FixedLengthEncoding x) = putWord32 h x get h = FixedLengthEncoding <$> getWord32 h instance Binary (FixedLengthEncoding Word64) where put_ h (FixedLengthEncoding x) = putWord64 h x get h = FixedLengthEncoding <$> getWord64 h -- ----------------------------------------------------------------------------- -- Primitive Word writes instance Binary Word8 where put_ bh !w = putWord8 bh w get = getWord8 instance Binary Word16 where put_ = putULEB128 get = getULEB128 instance Binary Word32 where put_ = putULEB128 get = getULEB128 instance Binary Word64 where put_ = putULEB128 get = getULEB128 -- ----------------------------------------------------------------------------- -- Primitive Int writes instance Binary Int8 where put_ h w = put_ h (fromIntegral w :: Word8) get h = do w <- get h; return $! (fromIntegral (w::Word8)) instance Binary Int16 where put_ = putSLEB128 get = getSLEB128 instance Binary Int32 where put_ = putSLEB128 get = getSLEB128 instance Binary Int64 where put_ h w = putSLEB128 h w get h = getSLEB128 h -- ----------------------------------------------------------------------------- -- Instances for standard types instance Binary () where put_ _ () = return () get _ = return () instance Binary Bool where put_ bh b = putByte bh (fromIntegral (fromEnum b)) get bh = do x <- getWord8 bh; return $! (toEnum (fromIntegral x)) instance Binary Char where put_ bh c = put_ bh (fromIntegral (ord c) :: Word32) get bh = do x <- get bh; return $! (chr (fromIntegral (x :: Word32))) instance Binary Int where put_ bh i = put_ bh (fromIntegral i :: Int64) get bh = do x <- get bh return $! (fromIntegral (x :: Int64)) instance Binary a => Binary [a] where put_ bh l = do let len = length l put_ bh len mapM_ (put_ bh) l get bh = do len <- get bh :: IO Int -- Int is variable length encoded so only -- one byte for small lists. let loop 0 = return [] loop n = do a <- get bh; as <- loop (n-1); return (a:as) loop len -- | This instance doesn't rely on the determinism of the keys' 'Ord' instance, -- so it works e.g. for 'Name's too. instance (Binary a, Ord a) => Binary (Set a) where put_ bh s = put_ bh (Set.toList s) get bh = Set.fromList <$> get bh instance Binary a => Binary (NonEmpty a) where put_ bh = put_ bh . NonEmpty.toList get bh = NonEmpty.fromList <$> get bh instance (Ix a, Binary a, Binary b) => Binary (Array a b) where put_ bh arr = do put_ bh $ bounds arr put_ bh $ elems arr get bh = do bounds <- get bh xs <- get bh return $ listArray bounds xs instance (Binary a, Binary b) => Binary (a,b) where put_ bh (a,b) = do put_ bh a; put_ bh b get bh = do a <- get bh b <- get bh return (a,b) instance (Binary a, Binary b, Binary c) => Binary (a,b,c) where put_ bh (a,b,c) = do put_ bh a; put_ bh b; put_ bh c get bh = do a <- get bh b <- get bh c <- get bh return (a,b,c) instance (Binary a, Binary b, Binary c, Binary d) => Binary (a,b,c,d) where put_ bh (a,b,c,d) = do put_ bh a; put_ bh b; put_ bh c; put_ bh d get bh = do a <- get bh b <- get bh c <- get bh d <- get bh return (a,b,c,d) instance (Binary a, Binary b, Binary c, Binary d, Binary e) => Binary (a,b,c,d, e) where put_ bh (a,b,c,d, e) = do put_ bh a; put_ bh b; put_ bh c; put_ bh d; put_ bh e; get bh = do a <- get bh b <- get bh c <- get bh d <- get bh e <- get bh return (a,b,c,d,e) instance (Binary a, Binary b, Binary c, Binary d, Binary e, Binary f) => Binary (a,b,c,d, e, f) where put_ bh (a,b,c,d, e, f) = do put_ bh a; put_ bh b; put_ bh c; put_ bh d; put_ bh e; put_ bh f; get bh = do a <- get bh b <- get bh c <- get bh d <- get bh e <- get bh f <- get bh return (a,b,c,d,e,f) instance (Binary a, Binary b, Binary c, Binary d, Binary e, Binary f, Binary g) => Binary (a,b,c,d,e,f,g) where put_ bh (a,b,c,d,e,f,g) = do put_ bh a; put_ bh b; put_ bh c; put_ bh d; put_ bh e; put_ bh f; put_ bh g get bh = do a <- get bh b <- get bh c <- get bh d <- get bh e <- get bh f <- get bh g <- get bh return (a,b,c,d,e,f,g) instance Binary a => Binary (Maybe a) where put_ bh Nothing = putByte bh 0 put_ bh (Just a) = do putByte bh 1; put_ bh a get bh = do h <- getWord8 bh case h of 0 -> return Nothing _ -> do x <- get bh; return (Just x) instance Binary a => Binary (Strict.Maybe a) where put_ bh Strict.Nothing = putByte bh 0 put_ bh (Strict.Just a) = do putByte bh 1; put_ bh a get bh = do h <- getWord8 bh case h of 0 -> return Strict.Nothing _ -> do x <- get bh; return (Strict.Just x) instance (Binary a, Binary b) => Binary (Either a b) where put_ bh (Left a) = do putByte bh 0; put_ bh a put_ bh (Right b) = do putByte bh 1; put_ bh b get bh = do h <- getWord8 bh case h of 0 -> do a <- get bh ; return (Left a) _ -> do b <- get bh ; return (Right b) instance Binary UTCTime where put_ bh u = do put_ bh (utctDay u) put_ bh (utctDayTime u) get bh = do day <- get bh dayTime <- get bh return $ UTCTime { utctDay = day, utctDayTime = dayTime } instance Binary Day where put_ bh d = put_ bh (toModifiedJulianDay d) get bh = do i <- get bh return $ ModifiedJulianDay { toModifiedJulianDay = i } instance Binary DiffTime where put_ bh dt = put_ bh (toRational dt) get bh = do r <- get bh return $ fromRational r instance Binary JoinPointHood where put_ bh NotJoinPoint = putByte bh 0 put_ bh (JoinPoint ar) = do putByte bh 1 put_ bh ar get bh = do h <- getByte bh case h of 0 -> return NotJoinPoint _ -> do { ar <- get bh; return (JoinPoint ar) } {- Finally - a reasonable portable Integer instance. We used to encode values in the Int32 range as such, falling back to a string of all things. In either case we stored a tag byte to discriminate between the two cases. This made some sense as it's highly portable but also not very efficient. However GHC stores a surprisingly large number of large Integer values. In the examples looked at between 25% and 50% of Integers serialized were outside of the Int32 range. Consider a value like `2724268014499746065`, some sort of hash actually generated by GHC. In the old scheme this was encoded as a list of 19 chars. This gave a size of 77 Bytes, one for the length of the list and 76 since we encode chars as Word32 as well. We can easily do better. The new plan is: * Start with a tag byte * 0 => Int64 (LEB128 encoded) * 1 => Negative large integer * 2 => Positive large integer * Followed by the value: * Int64 is encoded as usual * Large integers are encoded as a list of bytes (Word8). We use Data.Bits which defines a bit order independent of the representation. Values are stored LSB first. This means our example value `2724268014499746065` is now only 10 bytes large. * One byte tag * One byte for the length of the [Word8] list. * 8 bytes for the actual date. The new scheme also does not depend in any way on architecture specific details. We still use this scheme even with LEB128 available, as it has less overhead for truly large numbers. (> maxBound :: Int64) The instance is used for in Binary Integer and Binary Rational in GHC.Types.Literal -} instance Binary Integer where put_ bh i | i >= lo64 && i <= hi64 = do putWord8 bh 0 put_ bh (fromIntegral i :: Int64) | otherwise = do if i < 0 then putWord8 bh 1 else putWord8 bh 2 put_ bh (unroll $ abs i) where lo64 = fromIntegral (minBound :: Int64) hi64 = fromIntegral (maxBound :: Int64) get bh = do int_kind <- getWord8 bh case int_kind of 0 -> fromIntegral <$!> (get bh :: IO Int64) -- Large integer 1 -> negate <$!> getInt 2 -> getInt _ -> panic "Binary Integer - Invalid byte" where getInt :: IO Integer getInt = roll <$!> (get bh :: IO [Word8]) unroll :: Integer -> [Word8] unroll = unfoldr step where step 0 = Nothing step i = Just (fromIntegral i, i `shiftR` 8) roll :: [Word8] -> Integer roll = foldl' unstep 0 . reverse where unstep a b = a `shiftL` 8 .|. fromIntegral b {- -- This code is currently commented out. -- See https://gitlab.haskell.org/ghc/ghc/issues/3379#note_104346 for -- discussion. put_ bh (S# i#) = do putByte bh 0; put_ bh (I# i#) put_ bh (J# s# a#) = do putByte bh 1 put_ bh (I# s#) let sz# = sizeofByteArray# a# -- in *bytes* put_ bh (I# sz#) -- in *bytes* putByteArray bh a# sz# get bh = do b <- getByte bh case b of 0 -> do (I# i#) <- get bh return (S# i#) _ -> do (I# s#) <- get bh sz <- get bh (BA a#) <- getByteArray bh sz return (J# s# a#) putByteArray :: BinHandle -> ByteArray# -> Int# -> IO () putByteArray bh a s# = loop 0# where loop n# | n# ==# s# = return () | otherwise = do putByte bh (indexByteArray a n#) loop (n# +# 1#) getByteArray :: BinHandle -> Int -> IO ByteArray getByteArray bh (I# sz) = do (MBA arr) <- newByteArray sz let loop n | n ==# sz = return () | otherwise = do w <- getByte bh writeByteArray arr n w loop (n +# 1#) loop 0# freezeByteArray arr -} {- data ByteArray = BA ByteArray# data MBA = MBA (MutableByteArray# RealWorld) newByteArray :: Int# -> IO MBA newByteArray sz = IO $ \s -> case newByteArray# sz s of { (# s, arr #) -> (# s, MBA arr #) } freezeByteArray :: MutableByteArray# RealWorld -> IO ByteArray freezeByteArray arr = IO $ \s -> case unsafeFreezeByteArray# arr s of { (# s, arr #) -> (# s, BA arr #) } writeByteArray :: MutableByteArray# RealWorld -> Int# -> Word8 -> IO () writeByteArray arr i (W8# w) = IO $ \s -> case writeWord8Array# arr i w s of { s -> (# s, () #) } indexByteArray :: ByteArray# -> Int# -> Word8 indexByteArray a# n# = W8# (indexWord8Array# a# n#) -} instance (Binary a) => Binary (Ratio a) where put_ bh (a :% b) = do put_ bh a; put_ bh b get bh = do a <- get bh; b <- get bh; return (a :% b) -- Instance uses fixed-width encoding to allow inserting -- Bin placeholders in the stream. instance Binary (Bin a) where put_ bh (BinPtr i) = putWord32 bh (fromIntegral i :: Word32) get bh = do i <- getWord32 bh; return (BinPtr (fromIntegral (i :: Word32))) -- Instance uses fixed-width encoding to allow inserting -- Bin placeholders in the stream. instance Binary (RelBinPtr a) where put_ bh (RelBinPtr i) = put_ bh i get bh = RelBinPtr <$> get bh -- ----------------------------------------------------------------------------- -- Forward reading/writing -- | @'forwardPut' put_A put_B@ outputs A after B but allows A to be read before B -- by using a forward reference. forwardPut :: WriteBinHandle -> (b -> IO a) -> IO b -> IO (a,b) forwardPut bh put_A put_B = do -- write placeholder pointer to A pre_a <- tellBinWriter bh put_ bh pre_a -- write B r_b <- put_B -- update A's pointer a <- tellBinWriter bh putAt bh pre_a a seekBinNoExpandWriter bh a -- write A r_a <- put_A r_b pure (r_a,r_b) forwardPut_ :: WriteBinHandle -> (b -> IO a) -> IO b -> IO () forwardPut_ bh put_A put_B = void $ forwardPut bh put_A put_B -- | Read a value stored using a forward reference -- -- The forward reference is expected to be an absolute offset. forwardGet :: ReadBinHandle -> IO a -> IO a forwardGet bh get_A = do -- read forward reference p <- get bh -- a BinPtr -- store current position p_a <- tellBinReader bh -- go read the forward value, then seek back seekBinReader bh p r <- get_A seekBinReader bh p_a pure r -- | @'forwardPutRel' put_A put_B@ outputs A after B but allows A to be read before B -- by using a forward reference. -- -- This forward reference is a relative offset that allows us to skip over the -- result of 'put_A'. forwardPutRel :: WriteBinHandle -> (b -> IO a) -> IO b -> IO (a,b) forwardPutRel bh put_A put_B = do -- write placeholder pointer to A pre_a <- tellBinWriter bh put_ bh pre_a -- write B r_b <- put_B -- update A's pointer a <- tellBinWriter bh putAtRel bh pre_a a seekBinNoExpandWriter bh a -- write A r_a <- put_A r_b pure (r_a,r_b) -- | Like 'forwardGetRel', but discard the result. forwardPutRel_ :: WriteBinHandle -> (b -> IO a) -> IO b -> IO () forwardPutRel_ bh put_A put_B = void $ forwardPutRel bh put_A put_B -- | Read a value stored using a forward reference. -- -- The forward reference is expected to be a relative offset. forwardGetRel :: ReadBinHandle -> IO a -> IO a forwardGetRel bh get_A = do -- read forward reference p <- getRelBin bh -- store current position p_a <- tellBinReader bh -- go read the forward value, then seek back seekBinReader bh $ makeAbsoluteBin p r <- get_A seekBinReader bh p_a pure r -- ----------------------------------------------------------------------------- -- Lazy reading/writing lazyPut :: Binary a => WriteBinHandle -> a -> IO () lazyPut = lazyPut' put_ lazyGet :: Binary a => ReadBinHandle -> IO a lazyGet = lazyGet' get lazyPut' :: (WriteBinHandle -> a -> IO ()) -> WriteBinHandle -> a -> IO () lazyPut' f bh a = do -- output the obj with a ptr to skip over it: pre_a <- tellBinWriter bh put_ bh pre_a -- save a slot for the ptr f bh a -- dump the object q <- tellBinWriter bh -- q = ptr to after object putAtRel bh pre_a q -- fill in slot before a with ptr to q seekBinWriter bh q -- finally carry on writing at q lazyGet' :: (ReadBinHandle -> IO a) -> ReadBinHandle -> IO a lazyGet' f bh = do p <- getRelBin bh -- a BinPtr p_a <- tellBinReader bh a <- unsafeInterleaveIO $ do -- NB: Use a fresh rbm_off_r variable in the child thread, for thread -- safety. off_r <- newFastMutInt 0 let bh' = bh { rbm_off_r = off_r } seekBinReader bh' p_a f bh' seekBinReader bh (makeAbsoluteBin p) -- skip over the object for now return a -- | Serialize the constructor strictly but lazily serialize a value inside a -- 'Just'. -- -- This way we can check for the presence of a value without deserializing the -- value itself. lazyPutMaybe :: Binary a => WriteBinHandle -> Maybe a -> IO () lazyPutMaybe bh Nothing = putWord8 bh 0 lazyPutMaybe bh (Just x) = do putWord8 bh 1 lazyPut bh x -- | Deserialize a value serialized by 'lazyPutMaybe'. lazyGetMaybe :: Binary a => ReadBinHandle -> IO (Maybe a) lazyGetMaybe bh = do h <- getWord8 bh case h of 0 -> pure Nothing _ -> Just <$> lazyGet bh -- ----------------------------------------------------------------------------- -- UserData -- ----------------------------------------------------------------------------- -- Note [Binary UserData] -- ~~~~~~~~~~~~~~~~~~~~~~ -- Information we keep around during interface file -- serialization/deserialization. Namely we keep the functions for serializing -- and deserializing 'Name's and 'FastString's. We do this because we actually -- use serialization in two distinct settings, -- -- * When serializing interface files themselves -- -- * When computing the fingerprint of an IfaceDecl (which we computing by -- hashing its Binary serialization) -- -- These two settings have different needs while serializing Names: -- -- * Names in interface files are serialized via a symbol table (see Note -- [Symbol table representation of names] in "GHC.Iface.Binary"). -- -- * During fingerprinting a binding Name is serialized as the OccName and a -- non-binding Name is serialized as the fingerprint of the thing they -- represent. See Note [Fingerprinting IfaceDecls] for further discussion. -- -- | Newtype to serialise binding names differently to non-binding 'Name'. -- See Note [Binary UserData] newtype BindingName = BindingName { getBindingName :: Name } deriving ( Eq ) simpleBindingNameWriter :: BinaryWriter Name -> BinaryWriter BindingName simpleBindingNameWriter = coerce simpleBindingNameReader :: BinaryReader Name -> BinaryReader BindingName simpleBindingNameReader = coerce -- | Existential for 'BinaryWriter' with a type witness. data SomeBinaryWriter = forall a . SomeBinaryWriter (Refl.TypeRep a) (BinaryWriter a) -- | Existential for 'BinaryReader' with a type witness. data SomeBinaryReader = forall a . SomeBinaryReader (Refl.TypeRep a) (BinaryReader a) -- | UserData required to serialise symbols for interface files. -- -- See Note [Binary UserData] data WriterUserData = WriterUserData { ud_writer_data :: Map SomeTypeRep SomeBinaryWriter -- ^ A mapping from a type witness to the 'Writer' for the associated type. -- This is a 'Map' because microbenchmarks indicated this is more efficient -- than other representations for less than ten elements. -- -- Considered representations: -- -- * [(TypeRep, SomeBinaryWriter)] -- * bytehash (on hackage) -- * Map TypeRep SomeBinaryWriter } -- | UserData required to deserialise symbols for interface files. -- -- See Note [Binary UserData] data ReaderUserData = ReaderUserData { ud_reader_data :: Map SomeTypeRep SomeBinaryReader -- ^ A mapping from a type witness to the 'Reader' for the associated type. -- This is a 'Map' because microbenchmarks indicated this is more efficient -- than other representations for less than ten elements. -- -- Considered representations: -- -- * [(TypeRep, SomeBinaryReader)] -- * bytehash (on hackage) -- * Map TypeRep SomeBinaryReader } mkWriterUserData :: [SomeBinaryWriter] -> WriterUserData mkWriterUserData caches = noWriterUserData { ud_writer_data = Map.fromList $ map (\cache@(SomeBinaryWriter typRep _) -> (SomeTypeRep typRep, cache)) caches } mkReaderUserData :: [SomeBinaryReader] -> ReaderUserData mkReaderUserData caches = noReaderUserData { ud_reader_data = Map.fromList $ map (\cache@(SomeBinaryReader typRep _) -> (SomeTypeRep typRep, cache)) caches } mkSomeBinaryWriter :: forall a . Refl.Typeable a => BinaryWriter a -> SomeBinaryWriter mkSomeBinaryWriter cb = SomeBinaryWriter (Refl.typeRep @a) cb mkSomeBinaryReader :: forall a . Refl.Typeable a => BinaryReader a -> SomeBinaryReader mkSomeBinaryReader cb = SomeBinaryReader (Refl.typeRep @a) cb newtype BinaryReader s = BinaryReader { getEntry :: ReadBinHandle -> IO s } deriving (Functor) newtype BinaryWriter s = BinaryWriter { putEntry :: WriteBinHandle -> s -> IO () } mkWriter :: (WriteBinHandle -> s -> IO ()) -> BinaryWriter s mkWriter f = BinaryWriter { putEntry = f } mkReader :: (ReadBinHandle -> IO s) -> BinaryReader s mkReader f = BinaryReader { getEntry = f } -- | Find the 'BinaryReader' for the 'Binary' instance for the type identified by 'Proxy a'. -- -- If no 'BinaryReader' has been configured before, this function will panic. findUserDataReader :: forall a . Refl.Typeable a => Proxy a -> ReadBinHandle -> BinaryReader a findUserDataReader query bh = case Map.lookup (Refl.someTypeRep query) (ud_reader_data $ getReaderUserData bh) of Nothing -> panic $ "Failed to find BinaryReader for the key: " ++ show (Refl.someTypeRep query) Just (SomeBinaryReader _ (reader :: BinaryReader x)) -> unsafeCoerce @(BinaryReader x) @(BinaryReader a) reader -- This 'unsafeCoerce' could be written safely like this: -- -- @ -- Just (SomeBinaryReader _ (reader :: BinaryReader x)) -> -- case testEquality (typeRep @a) tyRep of -- Just Refl -> coerce @(BinaryReader x) @(BinaryReader a) reader -- Nothing -> panic $ "Invariant violated" -- @ -- -- But it comes at a slight performance cost and this function is used in -- binary serialisation hot loops, thus, we prefer the small performance boost over -- the additional type safety. -- | Find the 'BinaryWriter' for the 'Binary' instance for the type identified by 'Proxy a'. -- -- If no 'BinaryWriter' has been configured before, this function will panic. findUserDataWriter :: forall a . Refl.Typeable a => Proxy a -> WriteBinHandle -> BinaryWriter a findUserDataWriter query bh = case Map.lookup (Refl.someTypeRep query) (ud_writer_data $ getWriterUserData bh) of Nothing -> panic $ "Failed to find BinaryWriter for the key: " ++ show (Refl.someTypeRep query) Just (SomeBinaryWriter _ (writer :: BinaryWriter x)) -> unsafeCoerce @(BinaryWriter x) @(BinaryWriter a) writer -- This 'unsafeCoerce' could be written safely like this: -- -- @ -- Just (SomeBinaryWriter tyRep (writer :: BinaryWriter x)) -> -- case testEquality (typeRep @a) tyRep of -- Just Refl -> coerce @(BinaryWriter x) @(BinaryWriter a) writer -- Nothing -> panic $ "Invariant violated" -- @ -- -- But it comes at a slight performance cost and this function is used in -- binary serialisation hot loops, thus, we prefer the small performance boost over -- the additional type safety. noReaderUserData :: ReaderUserData noReaderUserData = ReaderUserData { ud_reader_data = Map.empty } noWriterUserData :: WriterUserData noWriterUserData = WriterUserData { ud_writer_data = Map.empty } newReadState :: (ReadBinHandle -> IO Name) -- ^ how to deserialize 'Name's -> (ReadBinHandle -> IO FastString) -> ReaderUserData newReadState get_name get_fs = mkReaderUserData [ mkSomeBinaryReader $ mkReader get_name , mkSomeBinaryReader $ mkReader @BindingName (coerce get_name) , mkSomeBinaryReader $ mkReader get_fs ] newWriteState :: (WriteBinHandle -> Name -> IO ()) -- ^ how to serialize non-binding 'Name's -> (WriteBinHandle -> Name -> IO ()) -- ^ how to serialize binding 'Name's -> (WriteBinHandle -> FastString -> IO ()) -> WriterUserData newWriteState put_non_binding_name put_binding_name put_fs = mkWriterUserData [ mkSomeBinaryWriter $ mkWriter (\bh name -> put_binding_name bh (getBindingName name)) , mkSomeBinaryWriter $ mkWriter put_non_binding_name , mkSomeBinaryWriter $ mkWriter put_fs ] -- ---------------------------------------------------------------------------- -- Types for lookup and deduplication tables. -- ---------------------------------------------------------------------------- -- | A 'ReaderTable' describes how to deserialise a table from disk, -- and how to create a 'BinaryReader' that looks up values in the deduplication table. data ReaderTable a = ReaderTable { getTable :: ReadBinHandle -> IO (SymbolTable a) -- ^ Deserialise a list of elements into a 'SymbolTable'. , mkReaderFromTable :: SymbolTable a -> BinaryReader a -- ^ Given the table from 'getTable', create a 'BinaryReader' -- that reads values only from the 'SymbolTable'. } -- | A 'WriterTable' is an interface any deduplication table can implement to -- describe how the table can be written to disk. newtype WriterTable = WriterTable { putTable :: WriteBinHandle -> IO Int -- ^ Serialise a table to disk. Returns the number of written elements. } -- ---------------------------------------------------------------------------- -- Common data structures for constructing and maintaining lookup tables for -- binary serialisation and deserialisation. -- ---------------------------------------------------------------------------- -- | The 'GenericSymbolTable' stores a mapping from already seen elements to an index. -- If an element wasn't seen before, it is added to the mapping together with a fresh -- index. -- -- 'GenericSymbolTable' is a variant of a 'BinSymbolTable' that is polymorphic in the table implementation. -- As such it can be used with any container that implements the 'TrieMap' type class. -- -- While 'GenericSymbolTable' is similar to the 'BinSymbolTable', it supports storing tree-like -- structures such as 'Type' and 'IfaceType' more efficiently. -- data GenericSymbolTable m = GenericSymbolTable { gen_symtab_next :: !FastMutInt -- ^ The next index to use. , gen_symtab_map :: !(IORef (m Int)) -- ^ Given a symbol, find the symbol and return its index. , gen_symtab_to_write :: !(IORef [Key m]) -- ^ Reversed list of values to write into the buffer. -- This is an optimisation, as it allows us to write out quickly all -- newly discovered values that are discovered when serialising 'Key m' -- to disk. } -- | Initialise a 'GenericSymbolTable', initialising the index to '0'. initGenericSymbolTable :: TrieMap m => IO (GenericSymbolTable m) initGenericSymbolTable = do symtab_next <- newFastMutInt 0 symtab_map <- newIORef emptyTM symtab_todo <- newIORef [] pure $ GenericSymbolTable { gen_symtab_next = symtab_next , gen_symtab_map = symtab_map , gen_symtab_to_write = symtab_todo } -- | Serialise the 'GenericSymbolTable' to disk. -- -- Since 'GenericSymbolTable' stores tree-like structures, such as 'IfaceType', -- serialising an element can add new elements to the mapping. -- Thus, 'putGenericSymbolTable' first serialises all values, and then checks whether any -- new elements have been discovered. If so, repeat the loop. putGenericSymbolTable :: forall m. (TrieMap m) => GenericSymbolTable m -> (WriteBinHandle -> Key m -> IO ()) -> WriteBinHandle -> IO Int {-# INLINE putGenericSymbolTable #-} putGenericSymbolTable gen_sym_tab serialiser bh = do putGenericSymbolTable bh where symtab_next = gen_symtab_next gen_sym_tab symtab_to_write = gen_symtab_to_write gen_sym_tab putGenericSymbolTable :: WriteBinHandle -> IO Int putGenericSymbolTable bh = do let loop = do vs <- atomicModifyIORef' symtab_to_write (\a -> ([], a)) case vs of [] -> readFastMutInt symtab_next todo -> do mapM_ (\n -> serialiser bh n) (reverse todo) loop snd <$> (forwardPutRel bh (const $ readFastMutInt symtab_next >>= put_ bh) $ loop) -- | Read the elements of a 'GenericSymbolTable' from disk into a 'SymbolTable'. getGenericSymbolTable :: forall a . (ReadBinHandle -> IO a) -> ReadBinHandle -> IO (SymbolTable a) getGenericSymbolTable deserialiser bh = do sz <- forwardGetRel bh (get bh) :: IO Int mut_arr <- newArray_ (0, sz-1) :: IO (IOArray Int a) forM_ [0..(sz-1)] $ \i -> do f <- deserialiser bh writeArray mut_arr i f unsafeFreeze mut_arr -- | Write an element 'Key m' to the given 'WriteBinHandle'. -- -- If the element was seen before, we simply write the index of that element to the -- 'WriteBinHandle'. If we haven't seen it before, we add the element to -- the 'GenericSymbolTable', increment the index, and return this new index. putGenericSymTab :: (TrieMap m) => GenericSymbolTable m -> WriteBinHandle -> Key m -> IO () {-# INLINE putGenericSymTab #-} putGenericSymTab GenericSymbolTable{ gen_symtab_map = symtab_map_ref, gen_symtab_next = symtab_next, gen_symtab_to_write = symtab_todo } bh val = do symtab_map <- readIORef symtab_map_ref case lookupTM val symtab_map of Just off -> put_ bh (fromIntegral off :: Word32) Nothing -> do off <- readFastMutInt symtab_next writeFastMutInt symtab_next (off+1) writeIORef symtab_map_ref $! insertTM val off symtab_map atomicModifyIORef symtab_todo (\todo -> (val : todo, ())) put_ bh (fromIntegral off :: Word32) -- | Read a value from a 'SymbolTable'. getGenericSymtab :: Binary a => SymbolTable a -> ReadBinHandle -> IO a getGenericSymtab symtab bh = do i :: Word32 <- get bh return $! symtab ! fromIntegral i --------------------------------------------------------- -- The Dictionary --------------------------------------------------------- -- | A 'SymbolTable' of 'FastString's. type Dictionary = SymbolTable FastString initFastStringReaderTable :: IO (ReaderTable FastString) initFastStringReaderTable = do return $ ReaderTable { getTable = getDictionary , mkReaderFromTable = \tbl -> mkReader (getDictFastString tbl) } initFastStringWriterTable :: IO (WriterTable, BinaryWriter FastString) initFastStringWriterTable = do dict_next_ref <- newFastMutInt 0 dict_map_ref <- newIORef emptyUFM let bin_dict = FSTable { fs_tab_next = dict_next_ref , fs_tab_map = dict_map_ref } let put_dict bh = do fs_count <- readFastMutInt dict_next_ref dict_map <- readIORef dict_map_ref putDictionary bh fs_count dict_map pure fs_count return ( WriterTable { putTable = put_dict } , mkWriter $ putDictFastString bin_dict ) putDictionary :: WriteBinHandle -> Int -> UniqFM FastString (Int,FastString) -> IO () putDictionary bh sz dict = do put_ bh sz mapM_ (putFS bh) (elems (array (0,sz-1) (nonDetEltsUFM dict))) -- It's OK to use nonDetEltsUFM here because the elements have indices -- that array uses to create order getDictionary :: ReadBinHandle -> IO Dictionary getDictionary bh = do sz <- get bh :: IO Int mut_arr <- newArray_ (0, sz-1) :: IO (IOArray Int FastString) forM_ [0..(sz-1)] $ \i -> do fs <- getFS bh writeArray mut_arr i fs unsafeFreeze mut_arr getDictFastString :: Dictionary -> ReadBinHandle -> IO FastString getDictFastString dict bh = do j <- get bh return $! (dict ! fromIntegral (j :: Word32)) putDictFastString :: FSTable -> WriteBinHandle -> FastString -> IO () putDictFastString dict bh fs = allocateFastString dict fs >>= put_ bh allocateFastString :: FSTable -> FastString -> IO Word32 allocateFastString FSTable { fs_tab_next = j_r , fs_tab_map = out_r } f = do out <- readIORef out_r let !uniq = getUnique f case lookupUFM_Directly out uniq of Just (j, _) -> return (fromIntegral j :: Word32) Nothing -> do j <- readFastMutInt j_r writeFastMutInt j_r (j + 1) writeIORef out_r $! addToUFM_Directly out uniq (j, f) return (fromIntegral j :: Word32) -- FSTable is an exact copy of Haddock.InterfaceFile.BinDictionary. We rename to -- avoid a collision and copy to avoid a dependency. data FSTable = FSTable { fs_tab_next :: !FastMutInt -- The next index to use , fs_tab_map :: !(IORef (UniqFM FastString (Int,FastString))) -- indexed by FastString } --------------------------------------------------------- -- The Symbol Table --------------------------------------------------------- -- | Symbols that are read from disk. -- The 'SymbolTable' index starts on '0'. type SymbolTable a = Array Int a --------------------------------------------------------- -- Reading and writing FastStrings --------------------------------------------------------- putFS :: WriteBinHandle -> FastString -> IO () putFS bh fs = putBS bh $ bytesFS fs getFS :: ReadBinHandle -> IO FastString getFS bh = do l <- get bh :: IO Int getPrim bh l (\src -> pure $! mkFastStringBytes src l ) -- | Put a ByteString without its length (can't be read back without knowing the -- length!) putByteString :: WriteBinHandle -> ByteString -> IO () putByteString bh bs = BS.unsafeUseAsCStringLen bs $ \(ptr, l) -> do putPrim bh l (\op -> copyBytes op (castPtr ptr) l) -- | Get a ByteString whose length is known getByteString :: ReadBinHandle -> Int -> IO ByteString getByteString bh l = BS.create l $ \dest -> do getPrim bh l (\src -> copyBytes dest src l) putBS :: WriteBinHandle -> ByteString -> IO () putBS bh bs = BS.unsafeUseAsCStringLen bs $ \(ptr, l) -> do put_ bh l putPrim bh l (\op -> copyBytes op (castPtr ptr) l) getBS :: ReadBinHandle -> IO ByteString getBS bh = do l <- get bh :: IO Int BS.create l $ \dest -> do getPrim bh l (\src -> copyBytes dest src l) instance Binary ByteString where put_ bh f = putBS bh f get bh = getBS bh instance Binary FastString where put_ bh f = case findUserDataWriter (Proxy :: Proxy FastString) bh of tbl -> putEntry tbl bh f get bh = case findUserDataReader (Proxy :: Proxy FastString) bh of tbl -> getEntry tbl bh deriving instance Binary NonDetFastString deriving instance Binary LexicalFastString instance Binary Fingerprint where put_ h (Fingerprint w1 w2) = do put_ h w1; put_ h w2 get h = do w1 <- get h; w2 <- get h; return (Fingerprint w1 w2) instance Binary ModuleName where put_ bh (ModuleName fs) = put_ bh fs get bh = do fs <- get bh; return (ModuleName fs) -- instance Binary TupleSort where -- put_ bh BoxedTuple = putByte bh 0 -- put_ bh UnboxedTuple = putByte bh 1 -- put_ bh ConstraintTuple = putByte bh 2 -- get bh = do -- h <- getByte bh -- case h of -- 0 -> do return BoxedTuple -- 1 -> do return UnboxedTuple -- _ -> do return ConstraintTuple -- instance Binary Activation where -- put_ bh NeverActive = do -- putByte bh 0 -- put_ bh FinalActive = do -- putByte bh 1 -- put_ bh AlwaysActive = do -- putByte bh 2 -- put_ bh (ActiveBefore src aa) = do -- putByte bh 3 -- put_ bh src -- put_ bh aa -- put_ bh (ActiveAfter src ab) = do -- putByte bh 4 -- put_ bh src -- put_ bh ab -- get bh = do -- h <- getByte bh -- case h of -- 0 -> do return NeverActive -- 1 -> do return FinalActive -- 2 -> do return AlwaysActive -- 3 -> do src <- get bh -- aa <- get bh -- return (ActiveBefore src aa) -- _ -> do src <- get bh -- ab <- get bh -- return (ActiveAfter src ab) -- instance Binary InlinePragma where -- put_ bh (InlinePragma s a b c d) = do -- put_ bh s -- put_ bh a -- put_ bh b -- put_ bh c -- put_ bh d -- get bh = do -- s <- get bh -- a <- get bh -- b <- get bh -- c <- get bh -- d <- get bh -- return (InlinePragma s a b c d) -- instance Binary RuleMatchInfo where -- put_ bh FunLike = putByte bh 0 -- put_ bh ConLike = putByte bh 1 -- get bh = do -- h <- getByte bh -- if h == 1 then return ConLike -- else return FunLike -- instance Binary InlineSpec where -- put_ bh NoUserInlinePrag = putByte bh 0 -- put_ bh Inline = putByte bh 1 -- put_ bh Inlinable = putByte bh 2 -- put_ bh NoInline = putByte bh 3 -- get bh = do h <- getByte bh -- case h of -- 0 -> return NoUserInlinePrag -- 1 -> return Inline -- 2 -> return Inlinable -- _ -> return NoInline -- instance Binary RecFlag where -- put_ bh Recursive = do -- putByte bh 0 -- put_ bh NonRecursive = do -- putByte bh 1 -- get bh = do -- h <- getByte bh -- case h of -- 0 -> do return Recursive -- _ -> do return NonRecursive -- instance Binary OverlapMode where -- put_ bh (NoOverlap s) = putByte bh 0 >> put_ bh s -- put_ bh (Overlaps s) = putByte bh 1 >> put_ bh s -- put_ bh (Incoherent s) = putByte bh 2 >> put_ bh s -- put_ bh (Overlapping s) = putByte bh 3 >> put_ bh s -- put_ bh (Overlappable s) = putByte bh 4 >> put_ bh s -- get bh = do -- h <- getByte bh -- case h of -- 0 -> (get bh) >>= \s -> return $ NoOverlap s -- 1 -> (get bh) >>= \s -> return $ Overlaps s -- 2 -> (get bh) >>= \s -> return $ Incoherent s -- 3 -> (get bh) >>= \s -> return $ Overlapping s -- 4 -> (get bh) >>= \s -> return $ Overlappable s -- _ -> panic ("get OverlapMode" ++ show h) -- instance Binary OverlapFlag where -- put_ bh flag = do put_ bh (overlapMode flag) -- put_ bh (isSafeOverlap flag) -- get bh = do -- h <- get bh -- b <- get bh -- return OverlapFlag { overlapMode = h, isSafeOverlap = b } -- instance Binary FixityDirection where -- put_ bh InfixL = do -- putByte bh 0 -- put_ bh InfixR = do -- putByte bh 1 -- put_ bh InfixN = do -- putByte bh 2 -- get bh = do -- h <- getByte bh -- case h of -- 0 -> do return InfixL -- 1 -> do return InfixR -- _ -> do return InfixN -- instance Binary Fixity where -- put_ bh (Fixity src aa ab) = do -- put_ bh src -- put_ bh aa -- put_ bh ab -- get bh = do -- src <- get bh -- aa <- get bh -- ab <- get bh -- return (Fixity src aa ab) -- instance Binary WarningTxt where -- put_ bh (WarningTxt s w) = do -- putByte bh 0 -- put_ bh s -- put_ bh w -- put_ bh (DeprecatedTxt s d) = do -- putByte bh 1 -- put_ bh s -- put_ bh d -- get bh = do -- h <- getByte bh -- case h of -- 0 -> do s <- get bh -- w <- get bh -- return (WarningTxt s w) -- _ -> do s <- get bh -- d <- get bh -- return (DeprecatedTxt s d) -- instance Binary StringLiteral where -- put_ bh (StringLiteral st fs _) = do -- put_ bh st -- put_ bh fs -- get bh = do -- st <- get bh -- fs <- get bh -- return (StringLiteral st fs Nothing) newtype BinLocated a = BinLocated { unBinLocated :: Located a } instance Binary a => Binary (BinLocated a) where put_ bh (BinLocated (L l x)) = do put_ bh $ BinSrcSpan l put_ bh x get bh = do l <- unBinSrcSpan <$> get bh x <- get bh return $ BinLocated (L l x) newtype BinSpan = BinSpan { unBinSpan :: RealSrcSpan } -- See Note [Source Location Wrappers] instance Binary BinSpan where put_ bh (BinSpan ss) = do put_ bh (srcSpanFile ss) put_ bh (srcSpanStartLine ss) put_ bh (srcSpanStartCol ss) put_ bh (srcSpanEndLine ss) put_ bh (srcSpanEndCol ss) get bh = do f <- get bh sl <- get bh sc <- get bh el <- get bh ec <- get bh return $ BinSpan (mkRealSrcSpan (mkRealSrcLoc f sl sc) (mkRealSrcLoc f el ec)) instance Binary UnhelpfulSpanReason where put_ bh r = case r of UnhelpfulNoLocationInfo -> putByte bh 0 UnhelpfulWiredIn -> putByte bh 1 UnhelpfulInteractive -> putByte bh 2 UnhelpfulGenerated -> putByte bh 3 UnhelpfulOther fs -> putByte bh 4 >> put_ bh fs get bh = do h <- getByte bh case h of 0 -> return UnhelpfulNoLocationInfo 1 -> return UnhelpfulWiredIn 2 -> return UnhelpfulInteractive 3 -> return UnhelpfulGenerated _ -> UnhelpfulOther <$> get bh newtype BinSrcSpan = BinSrcSpan { unBinSrcSpan :: SrcSpan } -- See Note [Source Location Wrappers] instance Binary BinSrcSpan where put_ bh (BinSrcSpan (RealSrcSpan ss _sb)) = do putByte bh 0 -- BufSpan doesn't ever get serialised because the positions depend -- on build location. put_ bh $ BinSpan ss put_ bh (BinSrcSpan (UnhelpfulSpan s)) = do putByte bh 1 put_ bh s get bh = do h <- getByte bh case h of 0 -> do BinSpan ss <- get bh return $ BinSrcSpan (RealSrcSpan ss Strict.Nothing) _ -> do s <- get bh return $ BinSrcSpan (UnhelpfulSpan s) {- Note [Source Location Wrappers] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Source locations are banned from interface files, to prevent filepaths affecting interface hashes. Unfortunately, we can't remove all binary instances, as they're used to serialise .hie files, and we don't want to break binary compatibility. To this end, the Bin[Src]Span newtypes wrappers were introduced to prevent accidentally serialising a source location as part of a larger structure. -} -------------------------------------------------------------------------------- -- Instances for the containers package -------------------------------------------------------------------------------- instance (Binary v) => Binary (IntMap v) where put_ bh m = put_ bh (IntMap.toList m) get bh = IntMap.fromList <$> get bh ghc-lib-parser-9.12.2.20250421/compiler/GHC/Utils/Binary/0000755000000000000000000000000007346545000020232 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Utils/Binary/Typeable.hs0000644000000000000000000001513507346545000022340 0ustar0000000000000000{-# LANGUAGE CPP #-} {-# LANGUAGE PolyKinds #-} {-# LANGUAGE GADTs #-} {-# OPTIONS_GHC -O2 -funbox-strict-fields #-} {-# OPTIONS_GHC -Wno-orphans -Wincomplete-patterns #-} -- | Orphan Binary instances for Data.Typeable stuff module GHC.Utils.Binary.Typeable ( getSomeTypeRep ) where import GHC.Prelude import GHC.Utils.Binary import GHC.Exts (RuntimeRep(..), VecCount(..), VecElem(..)) import GHC.Exts (Levity(Lifted, Unlifted)) import GHC.Serialized import Foreign import Type.Reflection import Type.Reflection.Unsafe import Data.Kind (Type) instance Binary TyCon where put_ bh tc = do put_ bh (tyConPackage tc) put_ bh (tyConModule tc) put_ bh (tyConName tc) put_ bh (tyConKindArgs tc) put_ bh (tyConKindRep tc) get bh = mkTyCon <$> get bh <*> get bh <*> get bh <*> get bh <*> get bh getSomeTypeRep :: ReadBinHandle -> IO SomeTypeRep getSomeTypeRep bh = do tag <- get bh :: IO Word8 case tag of 0 -> return $ SomeTypeRep (typeRep :: TypeRep Type) 1 -> do con <- get bh :: IO TyCon ks <- get bh :: IO [SomeTypeRep] return $ SomeTypeRep $ mkTrCon con ks 2 -> do SomeTypeRep f <- getSomeTypeRep bh SomeTypeRep x <- getSomeTypeRep bh case typeRepKind f of Fun arg res -> case arg `eqTypeRep` typeRepKind x of Just HRefl -> case typeRepKind res `eqTypeRep` (typeRep :: TypeRep Type) of Just HRefl -> return $ SomeTypeRep $ mkTrApp f x _ -> failure "Kind mismatch in type application" [] _ -> failure "Kind mismatch in type application" [ " Found argument of kind: " ++ show (typeRepKind x) , " Where the constructor: " ++ show f , " Expects kind: " ++ show arg ] _ -> failure "Applied non-arrow" [ " Applied type: " ++ show f , " To argument: " ++ show x ] _ -> failure "Invalid SomeTypeRep" [] where failure description info = fail $ unlines $ [ "Binary.getSomeTypeRep: "++description ] ++ map (" "++) info instance Binary SomeTypeRep where put_ bh (SomeTypeRep rep) = putTypeRep bh rep get = getSomeTypeRep instance Typeable a => Binary (TypeRep (a :: k)) where put_ = putTypeRep get bh = do SomeTypeRep rep <- getSomeTypeRep bh case rep `eqTypeRep` expected of Just HRefl -> pure rep Nothing -> fail $ unlines [ "Binary: Type mismatch" , " Deserialized type: " ++ show rep , " Expected type: " ++ show expected ] where expected = typeRep :: TypeRep a instance Binary VecCount where put_ bh = putByte bh . fromIntegral . fromEnum get bh = toEnum . fromIntegral <$> getByte bh instance Binary VecElem where put_ bh = putByte bh . fromIntegral . fromEnum get bh = toEnum . fromIntegral <$> getByte bh instance Binary RuntimeRep where put_ bh (VecRep a b) = putByte bh 0 >> put_ bh a >> put_ bh b put_ bh (TupleRep reps) = putByte bh 1 >> put_ bh reps put_ bh (SumRep reps) = putByte bh 2 >> put_ bh reps put_ bh (BoxedRep Lifted) = putByte bh 3 put_ bh (BoxedRep Unlifted) = putByte bh 4 put_ bh IntRep = putByte bh 5 put_ bh WordRep = putByte bh 6 put_ bh Int64Rep = putByte bh 7 put_ bh Word64Rep = putByte bh 8 put_ bh AddrRep = putByte bh 9 put_ bh FloatRep = putByte bh 10 put_ bh DoubleRep = putByte bh 11 put_ bh Int8Rep = putByte bh 12 put_ bh Word8Rep = putByte bh 13 put_ bh Int16Rep = putByte bh 14 put_ bh Word16Rep = putByte bh 15 put_ bh Int32Rep = putByte bh 16 put_ bh Word32Rep = putByte bh 17 get bh = do tag <- getByte bh case tag of 0 -> VecRep <$> get bh <*> get bh 1 -> TupleRep <$> get bh 2 -> SumRep <$> get bh 3 -> pure (BoxedRep Lifted) 4 -> pure (BoxedRep Unlifted) 5 -> pure IntRep 6 -> pure WordRep 7 -> pure Int64Rep 8 -> pure Word64Rep 9 -> pure AddrRep 10 -> pure FloatRep 11 -> pure DoubleRep 12 -> pure Int8Rep 13 -> pure Word8Rep 14 -> pure Int16Rep 15 -> pure Word16Rep 16 -> pure Int32Rep 17 -> pure Word32Rep _ -> fail "Binary.putRuntimeRep: invalid tag" instance Binary KindRep where put_ bh (KindRepTyConApp tc k) = putByte bh 0 >> put_ bh tc >> put_ bh k put_ bh (KindRepVar bndr) = putByte bh 1 >> put_ bh bndr put_ bh (KindRepApp a b) = putByte bh 2 >> put_ bh a >> put_ bh b put_ bh (KindRepFun a b) = putByte bh 3 >> put_ bh a >> put_ bh b put_ bh (KindRepTYPE r) = putByte bh 4 >> put_ bh r put_ bh (KindRepTypeLit sort r) = putByte bh 5 >> put_ bh sort >> put_ bh r get bh = do tag <- getByte bh case tag of 0 -> KindRepTyConApp <$> get bh <*> get bh 1 -> KindRepVar <$> get bh 2 -> KindRepApp <$> get bh <*> get bh 3 -> KindRepFun <$> get bh <*> get bh 4 -> KindRepTYPE <$> get bh 5 -> KindRepTypeLit <$> get bh <*> get bh _ -> fail "Binary.putKindRep: invalid tag" instance Binary TypeLitSort where put_ bh TypeLitSymbol = putByte bh 0 put_ bh TypeLitNat = putByte bh 1 put_ bh TypeLitChar = putByte bh 2 get bh = do tag <- getByte bh case tag of 0 -> pure TypeLitSymbol 1 -> pure TypeLitNat 2 -> pure TypeLitChar _ -> fail "Binary.putTypeLitSort: invalid tag" putTypeRep :: WriteBinHandle -> TypeRep a -> IO () putTypeRep bh rep -- Handle Type specially since it's so common | Just HRefl <- rep `eqTypeRep` (typeRep :: TypeRep Type) = put_ bh (0 :: Word8) putTypeRep bh (Con' con ks) = do put_ bh (1 :: Word8) put_ bh con put_ bh ks putTypeRep bh (App f x) = do put_ bh (2 :: Word8) putTypeRep bh f putTypeRep bh x instance Binary Serialized where put_ bh (Serialized the_type bytes) = do put_ bh the_type put_ bh bytes get bh = do the_type <- get bh bytes <- get bh return (Serialized the_type bytes) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Utils/BufHandle.hs0000644000000000000000000001165407346545000021201 0ustar0000000000000000{-# LANGUAGE MagicHash #-} ----------------------------------------------------------------------------- -- -- Fast write-buffered Handles -- -- (c) The University of Glasgow 2005-2006 -- -- This is a simple abstraction over Handles that offers very fast write -- buffering, but without the thread safety that Handles provide. It's used -- to save time in GHC.Utils.Ppr.printDoc. -- ----------------------------------------------------------------------------- module GHC.Utils.BufHandle ( BufHandle(..), newBufHandle, bPutChar, bPutStr, bPutFS, bPutFZS, bPutPtrString, bPutReplicate, bFlush, ) where import GHC.Prelude.Basic import GHC.Data.FastString import GHC.Data.FastMutInt import Control.Monad ( when ) import Data.ByteString (ByteString) import qualified Data.ByteString.Unsafe as BS import Data.Char ( ord ) import Foreign import Foreign.C.String import System.IO -- for RULES import GHC.Exts (unpackCString#, unpackNBytes#, Int(..)) import GHC.Ptr (Ptr(..)) -- ----------------------------------------------------------------------------- data BufHandle = BufHandle {-#UNPACK#-}!(Ptr Word8) {-#UNPACK#-}!FastMutInt Handle newBufHandle :: Handle -> IO BufHandle newBufHandle hdl = do ptr <- mallocBytes buf_size r <- newFastMutInt 0 return (BufHandle ptr r hdl) buf_size :: Int buf_size = 8192 bPutChar :: BufHandle -> Char -> IO () bPutChar b@(BufHandle buf r hdl) !c = do i <- readFastMutInt r if (i >= buf_size) then do hPutBuf hdl buf buf_size writeFastMutInt r 0 bPutChar b c else do pokeElemOff buf i (fromIntegral (ord c) :: Word8) writeFastMutInt r (i+1) -- Equivalent of the text/str, text/unpackNBytes#, text/[] rules -- in GHC.Utils.Ppr. {-# RULES "hdoc/str" forall a h. bPutStr h (unpackCString# a) = bPutPtrString h (mkPtrString# a) #-} {-# RULES "hdoc/unpackNBytes#" forall p n h. bPutStr h (unpackNBytes# p n) = bPutPtrString h (PtrString (Ptr p) (I# n)) #-} {-# RULES "hdoc/[]#" forall h. bPutStr h [] = return () #-} {-# NOINLINE [0] bPutStr #-} -- Give the RULE a chance to fire -- It must wait till after phase 1 when -- the unpackCString first is manifested bPutStr :: BufHandle -> String -> IO () bPutStr (BufHandle buf r hdl) !str = do i <- readFastMutInt r loop str i where loop "" !i = do writeFastMutInt r i; return () loop (c:cs) !i | i >= buf_size = do hPutBuf hdl buf buf_size loop (c:cs) 0 | otherwise = do pokeElemOff buf i (fromIntegral (ord c)) loop cs (i+1) bPutFS :: BufHandle -> FastString -> IO () bPutFS b fs = bPutBS b $ bytesFS fs bPutFZS :: BufHandle -> FastZString -> IO () bPutFZS b fs = bPutBS b $ fastZStringToByteString fs bPutBS :: BufHandle -> ByteString -> IO () bPutBS b bs = BS.unsafeUseAsCStringLen bs $ bPutCStringLen b bPutCStringLen :: BufHandle -> CStringLen -> IO () bPutCStringLen b@(BufHandle buf r hdl) cstr@(ptr, len) = do i <- readFastMutInt r if (i + len) >= buf_size then do hPutBuf hdl buf i writeFastMutInt r 0 if (len >= buf_size) then hPutBuf hdl ptr len else bPutCStringLen b cstr else do copyBytes (buf `plusPtr` i) ptr len writeFastMutInt r (i + len) bPutPtrString :: BufHandle -> PtrString -> IO () bPutPtrString b@(BufHandle buf r hdl) l@(PtrString a len) = l `seq` do i <- readFastMutInt r if (i+len) >= buf_size then do hPutBuf hdl buf i writeFastMutInt r 0 if (len >= buf_size) then hPutBuf hdl a len else bPutPtrString b l else do copyBytes (buf `plusPtr` i) a len writeFastMutInt r (i+len) -- | Replicate an 8-bit character bPutReplicate :: BufHandle -> Int -> Char -> IO () bPutReplicate (BufHandle buf r hdl) len c = do i <- readFastMutInt r let oc = fromIntegral (ord c) if (i+len) < buf_size then do fillBytes (buf `plusPtr` i) oc len writeFastMutInt r (i+len) else do -- flush the current buffer when (i /= 0) $ hPutBuf hdl buf i if (len < buf_size) then do fillBytes buf oc len writeFastMutInt r len else do -- fill a full buffer fillBytes buf oc buf_size -- flush it as many times as necessary let go n | n >= buf_size = do hPutBuf hdl buf buf_size go (n-buf_size) | otherwise = writeFastMutInt r n go len bFlush :: BufHandle -> IO () bFlush (BufHandle buf r hdl) = do i <- readFastMutInt r when (i > 0) $ hPutBuf hdl buf i free buf return () ghc-lib-parser-9.12.2.20250421/compiler/GHC/Utils/CliOption.hs0000644000000000000000000000176207346545000021250 0ustar0000000000000000module GHC.Utils.CliOption ( Option (..) , showOpt ) where import GHC.Prelude -- ----------------------------------------------------------------------------- -- Command-line options -- | When invoking external tools as part of the compilation pipeline, we -- pass these a sequence of options on the command-line. Rather than -- just using a list of Strings, we use a type that allows us to distinguish -- between filepaths and 'other stuff'. The reason for this is that -- this type gives us a handle on transforming filenames, and filenames only, -- to whatever format they're expected to be on a particular platform. data Option = FileOption -- an entry that _contains_ filename(s) / filepaths. String -- a non-filepath prefix that shouldn't be -- transformed (e.g., "/out=") String -- the filepath/filename portion | Option String deriving ( Eq ) showOpt :: Option -> String showOpt (FileOption pre f) = pre ++ f showOpt (Option s) = s ghc-lib-parser-9.12.2.20250421/compiler/GHC/Utils/Constants.hs0000644000000000000000000000200207346545000021310 0ustar0000000000000000{-# LANGUAGE CPP #-} module GHC.Utils.Constants ( debugIsOn , ghciSupported , isWindowsHost , isDarwinHost ) where import GHC.Prelude.Basic {- These booleans are global constants, set by CPP flags. They allow us to recompile a single module (this one) to change whether or not debug output appears. They sometimes let us avoid even running CPP elsewhere. It's important that the flags are literal constants (True/False). Then, with -0, tests of the flags in other modules will simplify to the correct branch of the conditional, thereby dropping debug code altogether when the flags are off. -} ghciSupported :: Bool #if defined(HAVE_INTERNAL_INTERPRETER) ghciSupported = True #else ghciSupported = False #endif debugIsOn :: Bool #if defined(DEBUG) debugIsOn = True #else debugIsOn = False #endif isWindowsHost :: Bool #if defined(mingw32_HOST_OS) isWindowsHost = True #else isWindowsHost = False #endif isDarwinHost :: Bool #if defined(darwin_HOST_OS) isDarwinHost = True #else isDarwinHost = False #endif ghc-lib-parser-9.12.2.20250421/compiler/GHC/Utils/Containers/Internal/0000755000000000000000000000000007346545000022667 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Utils/Containers/Internal/BitUtil.hs0000644000000000000000000000427407346545000024606 0ustar0000000000000000{-# LANGUAGE CPP #-} {-# LANGUAGE MagicHash #-} ----------------------------------------------------------------------------- -- | -- Module : Utils.Containers.Internal.BitUtil -- Copyright : (c) Clark Gaebel 2012 -- (c) Johan Tibel 2012 -- License : BSD-style -- Maintainer : libraries@haskell.org -- Portability : portable ----------------------------------------------------------------------------- -- -- = WARNING -- -- This module is considered __internal__. -- -- The Package Versioning Policy __does not apply__. -- -- The contents of this module may change __in any way whatsoever__ -- and __without any warning__ between minor versions of this package. -- -- Authors importing this module are expected to track development -- closely. module GHC.Utils.Containers.Internal.BitUtil ( bitcount , highestBitMask , shiftLL , shiftRL ) where import GHC.Prelude.Basic import Data.Word {---------------------------------------------------------------------- [bitcount] as posted by David F. Place to haskell-cafe on April 11, 2006, based on the code on http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetKernighan, where the following source is given: Published in 1988, the C Programming Language 2nd Ed. (by Brian W. Kernighan and Dennis M. Ritchie) mentions this in exercise 2-9. On April 19, 2006 Don Knuth pointed out to me that this method "was first published by Peter Wegner in CACM 3 (1960), 322. (Also discovered independently by Derrick Lehmer and published in 1964 in a book edited by Beckenbach.)" ----------------------------------------------------------------------} bitcount :: Int -> Word64 -> Int bitcount a x = a + popCount x {-# INLINE bitcount #-} -- The highestBitMask implementation is based on -- http://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2 -- which has been put in the public domain. -- | Return a word where only the highest bit is set. highestBitMask :: Word64 -> Word64 highestBitMask w = shiftLL 1 (63 - countLeadingZeros w) {-# INLINE highestBitMask #-} -- Right and left logical shifts. shiftRL, shiftLL :: Word64 -> Int -> Word64 shiftRL = unsafeShiftR shiftLL = unsafeShiftL ghc-lib-parser-9.12.2.20250421/compiler/GHC/Utils/Containers/Internal/StrictPair.hs0000644000000000000000000000076507346545000025317 0ustar0000000000000000{-# LANGUAGE CPP #-} -- | A strict pair module GHC.Utils.Containers.Internal.StrictPair (StrictPair(..), toPair) where -- See W1 of Note [Tracking dependencies on primitives] in GHC.Internal.Base import GHC.Base () default () -- | The same as a regular Haskell pair, but -- -- @ -- (x :*: _|_) = (_|_ :*: y) = _|_ -- @ data StrictPair a b = !a :*: !b infixr 1 :*: -- | Convert a strict pair to a standard pair. toPair :: StrictPair a b -> (a, b) toPair (x :*: y) = (x, y) {-# INLINE toPair #-} ghc-lib-parser-9.12.2.20250421/compiler/GHC/Utils/Error.hs0000644000000000000000000005660607346545000020450 0ustar0000000000000000{-# LANGUAGE ViewPatterns #-} {- (c) The AQUA Project, Glasgow University, 1994-1998 \section[ErrsUtils]{Utilities for error reporting} -} module GHC.Utils.Error ( -- * Basic types Validity'(..), Validity, andValid, allValid, getInvalids, Severity(..), -- * Messages Diagnostic(..), MsgEnvelope(..), MessageClass(..), SDoc, DecoratedSDoc(unDecorated), Messages, mkMessages, unionMessages, errorsFound, isEmptyMessages, -- ** Formatting pprMessageBag, pprMsgEnvelopeBagWithLoc, pprMsgEnvelopeBagWithLocDefault, pprMessages, pprLocMsgEnvelope, pprLocMsgEnvelopeDefault, formatBulleted, -- ** Construction DiagOpts (..), emptyDiagOpts, diag_wopt, diag_fatal_wopt, emptyMessages, mkDecorated, mkLocMessage, mkMsgEnvelope, mkPlainMsgEnvelope, mkPlainErrorMsgEnvelope, mkErrorMsgEnvelope, mkMCDiagnostic, errorDiagnostic, diagReasonSeverity, mkPlainError, mkPlainDiagnostic, mkDecoratedError, mkDecoratedDiagnostic, noHints, -- * Utilities getCaretDiagnostic, -- * Issuing messages during compilation putMsg, printInfoForUser, printOutputForUser, logInfo, logOutput, errorMsg, fatalErrorMsg, compilationProgressMsg, showPass, withTiming, withTimingSilent, debugTraceMsg, ghcExit, prettyPrintGhcErrors, traceCmd, traceSystoolCommand, sortMsgBag ) where import GHC.Prelude import GHC.Driver.Flags import GHC.Data.Bag import qualified GHC.Data.EnumSet as EnumSet import GHC.Data.EnumSet (EnumSet) import GHC.Utils.Exception import GHC.Utils.Outputable as Outputable import GHC.Utils.Panic import GHC.Utils.Logger import GHC.Types.Error import GHC.Types.SrcLoc as SrcLoc import GHC.Unit.Module.Warnings import System.Exit ( ExitCode(..), exitWith ) import Data.List ( sortBy ) import Data.Function import Debug.Trace import Control.Monad import Control.Monad.IO.Class import Control.Monad.Catch as MC (handle) import GHC.Conc ( getAllocationCounter ) import System.CPUTime import Data.List.NonEmpty (NonEmpty (..)) import qualified Data.List.NonEmpty as NE data DiagOpts = DiagOpts { diag_warning_flags :: !(EnumSet WarningFlag) -- ^ Enabled warnings , diag_fatal_warning_flags :: !(EnumSet WarningFlag) -- ^ Fatal warnings , diag_custom_warning_categories :: !WarningCategorySet -- ^ Enabled custom warning categories , diag_fatal_custom_warning_categories :: !WarningCategorySet -- ^ Fatal custom warning categories , diag_warn_is_error :: !Bool -- ^ Treat warnings as errors , diag_reverse_errors :: !Bool -- ^ Reverse error reporting order , diag_max_errors :: !(Maybe Int) -- ^ Max reported error count , diag_ppr_ctx :: !SDocContext -- ^ Error printing context } emptyDiagOpts :: DiagOpts emptyDiagOpts = DiagOpts { diag_warning_flags = EnumSet.empty , diag_fatal_warning_flags = EnumSet.empty , diag_custom_warning_categories = emptyWarningCategorySet , diag_fatal_custom_warning_categories = emptyWarningCategorySet , diag_warn_is_error = False , diag_reverse_errors = False , diag_max_errors = Nothing , diag_ppr_ctx = defaultSDocContext } diag_wopt :: WarningFlag -> DiagOpts -> Bool diag_wopt wflag opts = wflag `EnumSet.member` diag_warning_flags opts diag_fatal_wopt :: WarningFlag -> DiagOpts -> Bool diag_fatal_wopt wflag opts = wflag `EnumSet.member` diag_fatal_warning_flags opts diag_wopt_custom :: WarningCategory -> DiagOpts -> Bool diag_wopt_custom wflag opts = wflag `elemWarningCategorySet` diag_custom_warning_categories opts diag_fatal_wopt_custom :: WarningCategory -> DiagOpts -> Bool diag_fatal_wopt_custom wflag opts = wflag `elemWarningCategorySet` diag_fatal_custom_warning_categories opts -- | Computes the /right/ 'Severity' for the input 'DiagnosticReason' out of -- the 'DiagOpts. This function /has/ to be called when a diagnostic is constructed, -- i.e. with a 'DiagOpts \"snapshot\" taken as close as possible to where a -- particular diagnostic message is built, otherwise the computed 'Severity' might -- not be correct, due to the mutable nature of the 'DynFlags' in GHC. -- -- diagReasonSeverity :: DiagOpts -> DiagnosticReason -> Severity diagReasonSeverity opts reason = fst (diag_reason_severity opts reason) -- Like the diagReasonSeverity but the second half of the pair is a small -- ReasolvedDiagnosticReason which would cause the diagnostic to be triggered with the -- same severity. -- -- See Note [Warnings controlled by multiple flags] -- diag_reason_severity :: DiagOpts -> DiagnosticReason -> (Severity, ResolvedDiagnosticReason) diag_reason_severity opts reason = fmap ResolvedDiagnosticReason $ case reason of WarningWithFlags wflags -> case wflags' of [] -> (SevIgnore, reason) w : ws -> case wflagsE of [] -> (SevWarning, WarningWithFlags (w :| ws)) e : es -> (SevError, WarningWithFlags (e :| es)) where wflags' = NE.filter (\wflag -> diag_wopt wflag opts) wflags wflagsE = filter (\wflag -> diag_fatal_wopt wflag opts) wflags' WarningWithCategory wcat | not (diag_wopt_custom wcat opts) -> (SevIgnore, reason) | diag_fatal_wopt_custom wcat opts -> (SevError, reason) | otherwise -> (SevWarning, reason) WarningWithoutFlag | diag_warn_is_error opts -> (SevError, reason) | otherwise -> (SevWarning, reason) ErrorWithoutFlag -> (SevError, reason) -- | Make a 'MessageClass' for a given 'DiagnosticReason', consulting the -- 'DiagOpts'. mkMCDiagnostic :: DiagOpts -> DiagnosticReason -> Maybe DiagnosticCode -> MessageClass mkMCDiagnostic opts reason code = MCDiagnostic sev reason' code where (sev, reason') = diag_reason_severity opts reason -- | Varation of 'mkMCDiagnostic' which can be used when we are /sure/ the -- input 'DiagnosticReason' /is/ 'ErrorWithoutFlag' and there is no diagnostic code. errorDiagnostic :: MessageClass errorDiagnostic = MCDiagnostic SevError (ResolvedDiagnosticReason ErrorWithoutFlag) Nothing -- -- Creating MsgEnvelope(s) -- mk_msg_envelope :: Diagnostic e => Severity -> SrcSpan -> NamePprCtx -> ResolvedDiagnosticReason -> e -> MsgEnvelope e mk_msg_envelope severity locn name_ppr_ctx reason err = MsgEnvelope { errMsgSpan = locn , errMsgContext = name_ppr_ctx , errMsgDiagnostic = err , errMsgSeverity = severity , errMsgReason = reason } -- | Wrap a 'Diagnostic' in a 'MsgEnvelope', recording its location. -- If you know your 'Diagnostic' is an error, consider using 'mkErrorMsgEnvelope', -- which does not require looking at the 'DiagOpts' mkMsgEnvelope :: Diagnostic e => DiagOpts -> SrcSpan -> NamePprCtx -> e -> MsgEnvelope e mkMsgEnvelope opts locn name_ppr_ctx err = mk_msg_envelope sev locn name_ppr_ctx reason err where (sev, reason) = diag_reason_severity opts (diagnosticReason err) -- | Wrap a 'Diagnostic' in a 'MsgEnvelope', recording its location. -- Precondition: the diagnostic is, in fact, an error. That is, -- @diagnosticReason msg == ErrorWithoutFlag@. mkErrorMsgEnvelope :: Diagnostic e => SrcSpan -> NamePprCtx -> e -> MsgEnvelope e mkErrorMsgEnvelope locn name_ppr_ctx msg = assert (diagnosticReason msg == ErrorWithoutFlag) $ mk_msg_envelope SevError locn name_ppr_ctx (ResolvedDiagnosticReason ErrorWithoutFlag) msg -- | Variant that doesn't care about qualified/unqualified names. mkPlainMsgEnvelope :: Diagnostic e => DiagOpts -> SrcSpan -> e -> MsgEnvelope e mkPlainMsgEnvelope opts locn msg = mkMsgEnvelope opts locn alwaysQualify msg -- | Variant of 'mkPlainMsgEnvelope' which can be used when we are /sure/ we -- are constructing a diagnostic with a 'ErrorWithoutFlag' reason. mkPlainErrorMsgEnvelope :: Diagnostic e => SrcSpan -> e -> MsgEnvelope e mkPlainErrorMsgEnvelope locn msg = mk_msg_envelope SevError locn alwaysQualify (ResolvedDiagnosticReason ErrorWithoutFlag) msg ------------------------- data Validity' a = IsValid -- ^ Everything is fine | NotValid a -- ^ A problem, and some indication of why deriving Functor -- | Monomorphic version of @Validity'@ specialised for 'SDoc's. type Validity = Validity' SDoc andValid :: Validity' a -> Validity' a -> Validity' a andValid IsValid v = v andValid v _ = v -- | If they aren't all valid, return the first allValid :: [Validity' a] -> Validity' a allValid [] = IsValid allValid (v : vs) = v `andValid` allValid vs getInvalids :: [Validity' a] -> [a] getInvalids vs = [d | NotValid d <- vs] -- ----------------------------------------------------------------------------- -- Collecting up messages for later ordering and printing. ---------------- -- | Formats the input list of structured document, where each element of the list gets a bullet. formatBulleted :: DecoratedSDoc -> SDoc formatBulleted (unDecorated -> docs) = sdocWithContext $ \ctx -> case msgs ctx of [] -> Outputable.empty [msg] -> msg xs -> vcat $ map starred xs where msgs ctx = filter (not . Outputable.isEmpty ctx) docs starred = (bullet<+>) pprMessages :: Diagnostic e => DiagnosticOpts e -> Messages e -> SDoc pprMessages e = vcat . pprMsgEnvelopeBagWithLoc e . getMessages pprMsgEnvelopeBagWithLoc :: Diagnostic e => DiagnosticOpts e -> Bag (MsgEnvelope e) -> [SDoc] pprMsgEnvelopeBagWithLoc e bag = [ pprLocMsgEnvelope e item | item <- sortMsgBag Nothing bag ] -- | Print the messages with the suitable default configuration, usually not what you want but sometimes you don't really -- care about what the configuration is (for example, if the message is in a panic). pprMsgEnvelopeBagWithLocDefault :: forall e . Diagnostic e => Bag (MsgEnvelope e) -> [SDoc] pprMsgEnvelopeBagWithLocDefault bag = [ pprLocMsgEnvelopeDefault item | item <- sortMsgBag Nothing bag ] pprLocMsgEnvelopeDefault :: forall e . Diagnostic e => MsgEnvelope e -> SDoc pprLocMsgEnvelopeDefault = pprLocMsgEnvelope (defaultDiagnosticOpts @e) pprLocMsgEnvelope :: Diagnostic e => DiagnosticOpts e -> MsgEnvelope e -> SDoc pprLocMsgEnvelope opts (MsgEnvelope { errMsgSpan = s , errMsgDiagnostic = e , errMsgSeverity = sev , errMsgContext = name_ppr_ctx , errMsgReason = reason }) = withErrStyle name_ppr_ctx $ mkLocMessage (MCDiagnostic sev reason (diagnosticCode e)) s (formatBulleted $ diagnosticMessage opts e) sortMsgBag :: Maybe DiagOpts -> Bag (MsgEnvelope e) -> [MsgEnvelope e] sortMsgBag mopts = maybeLimit . sortBy (cmp `on` errMsgSpan) . bagToList where cmp | Just opts <- mopts , diag_reverse_errors opts = SrcLoc.rightmost_smallest | otherwise = SrcLoc.leftmost_smallest maybeLimit | Just opts <- mopts , Just err_limit <- diag_max_errors opts = take err_limit | otherwise = id ghcExit :: Logger -> Int -> IO () ghcExit logger val | val == 0 = exitWith ExitSuccess | otherwise = do errorMsg logger (text "\nCompilation had errors\n\n") exitWith (ExitFailure val) -- ----------------------------------------------------------------------------- -- Outputting messages from the compiler errorMsg :: Logger -> SDoc -> IO () errorMsg logger msg = logMsg logger errorDiagnostic noSrcSpan $ withPprStyle defaultErrStyle msg fatalErrorMsg :: Logger -> SDoc -> IO () fatalErrorMsg logger msg = logMsg logger MCFatal noSrcSpan $ withPprStyle defaultErrStyle msg compilationProgressMsg :: Logger -> SDoc -> IO () compilationProgressMsg logger msg = do let logflags = logFlags logger let str = renderWithContext (log_default_user_context logflags) (text "GHC progress: " <> msg) traceEventIO str when (logVerbAtLeast logger 1) $ logOutput logger $ withPprStyle defaultUserStyle msg showPass :: Logger -> String -> IO () showPass logger what = when (logVerbAtLeast logger 2) $ logInfo logger $ withPprStyle defaultUserStyle (text "***" <+> text what <> colon) data PrintTimings = PrintTimings | DontPrintTimings deriving (Eq, Show) -- | Time a compilation phase. -- -- When timings are enabled (e.g. with the @-v2@ flag), the allocations -- and CPU time used by the phase will be reported to stderr. Consider -- a typical usage: -- @withTiming getDynFlags (text "simplify") force PrintTimings pass@. -- When timings are enabled the following costs are included in the -- produced accounting, -- -- - The cost of executing @pass@ to a result @r@ in WHNF -- - The cost of evaluating @force r@ to WHNF (e.g. @()@) -- -- The choice of the @force@ function depends upon the amount of forcing -- desired; the goal here is to ensure that the cost of evaluating the result -- is, to the greatest extent possible, included in the accounting provided by -- 'withTiming'. Often the pass already sufficiently forces its result during -- construction; in this case @const ()@ is a reasonable choice. -- In other cases, it is necessary to evaluate the result to normal form, in -- which case something like @Control.DeepSeq.rnf@ is appropriate. -- -- To avoid adversely affecting compiler performance when timings are not -- requested, the result is only forced when timings are enabled. -- -- See Note [withTiming] for more. withTiming :: MonadIO m => Logger -> SDoc -- ^ The name of the phase -> (a -> ()) -- ^ A function to force the result -- (often either @const ()@ or 'rnf') -> m a -- ^ The body of the phase to be timed -> m a withTiming logger what force action = withTiming' logger what force PrintTimings action -- | Same as 'withTiming', but doesn't print timings in the -- console (when given @-vN@, @N >= 2@ or @-ddump-timings@). -- -- See Note [withTiming] for more. withTimingSilent :: MonadIO m => Logger -> SDoc -- ^ The name of the phase -> (a -> ()) -- ^ A function to force the result -- (often either @const ()@ or 'rnf') -> m a -- ^ The body of the phase to be timed -> m a withTimingSilent logger what force action = withTiming' logger what force DontPrintTimings action -- | Worker for 'withTiming' and 'withTimingSilent'. withTiming' :: MonadIO m => Logger -> SDoc -- ^ The name of the phase -> (a -> ()) -- ^ A function to force the result -- (often either @const ()@ or 'rnf') -> PrintTimings -- ^ Whether to print the timings -> m a -- ^ The body of the phase to be timed -> m a withTiming' logger what force_result prtimings action = if logVerbAtLeast logger 2 || logHasDumpFlag logger Opt_D_dump_timings then do when printTimingsNotDumpToFile $ liftIO $ logInfo logger $ withPprStyle defaultUserStyle $ text "***" <+> what <> colon let ctx = log_default_user_context (logFlags logger) alloc0 <- liftIO getAllocationCounter start <- liftIO getCPUTime eventBegins ctx what recordAllocs alloc0 !r <- action () <- pure $ force_result r eventEnds ctx what end <- liftIO getCPUTime alloc1 <- liftIO getAllocationCounter recordAllocs alloc1 -- recall that allocation counter counts down let alloc = alloc0 - alloc1 time = realToFrac (end - start) * 1e-9 when (logVerbAtLeast logger 2 && printTimingsNotDumpToFile) $ liftIO $ logInfo logger $ withPprStyle defaultUserStyle (text "!!!" <+> what <> colon <+> text "finished in" <+> doublePrec 2 time <+> text "milliseconds" <> comma <+> text "allocated" <+> doublePrec 3 (realToFrac alloc / 1024 / 1024) <+> text "megabytes") whenPrintTimings $ putDumpFileMaybe logger Opt_D_dump_timings "" FormatText $ text $ showSDocOneLine ctx $ hsep [ what <> colon , text "alloc=" <> ppr alloc , text "time=" <> doublePrec 3 time ] pure r else action where whenPrintTimings = liftIO . when printTimings printTimings = prtimings == PrintTimings -- Avoid both printing to console and dumping to a file (#20316). printTimingsNotDumpToFile = printTimings && not (log_dump_to_file (logFlags logger)) recordAllocs alloc = liftIO $ traceMarkerIO $ "GHC:allocs:" ++ show alloc eventBegins ctx w = do let doc = eventBeginsDoc ctx w whenPrintTimings $ traceMarkerIO doc liftIO $ traceEventIO doc eventEnds ctx w = do let doc = eventEndsDoc ctx w whenPrintTimings $ traceMarkerIO doc liftIO $ traceEventIO doc eventBeginsDoc ctx w = showSDocOneLine ctx $ text "GHC:started:" <+> w eventEndsDoc ctx w = showSDocOneLine ctx $ text "GHC:finished:" <+> w debugTraceMsg :: Logger -> Int -> SDoc -> IO () debugTraceMsg logger val msg = when (log_verbosity (logFlags logger) >= val) $ logInfo logger (withPprStyle defaultDumpStyle msg) {-# INLINE debugTraceMsg #-} -- see Note [INLINE conditional tracing utilities] putMsg :: Logger -> SDoc -> IO () putMsg logger msg = logInfo logger (withPprStyle defaultUserStyle msg) printInfoForUser :: Logger -> NamePprCtx -> SDoc -> IO () printInfoForUser logger name_ppr_ctx msg = logInfo logger (withUserStyle name_ppr_ctx AllTheWay msg) printOutputForUser :: Logger -> NamePprCtx -> SDoc -> IO () printOutputForUser logger name_ppr_ctx msg = logOutput logger (withUserStyle name_ppr_ctx AllTheWay msg) logInfo :: Logger -> SDoc -> IO () logInfo logger msg = logMsg logger MCInfo noSrcSpan msg -- | Like 'logInfo' but with 'SevOutput' rather then 'SevInfo' logOutput :: Logger -> SDoc -> IO () logOutput logger msg = logMsg logger MCOutput noSrcSpan msg prettyPrintGhcErrors :: ExceptionMonad m => Logger -> m a -> m a prettyPrintGhcErrors logger = do let ctx = log_default_user_context (logFlags logger) MC.handle $ \e -> case e of PprPanic str doc -> pprDebugAndThen ctx panic (text str) doc PprSorry str doc -> pprDebugAndThen ctx sorry (text str) doc PprProgramError str doc -> pprDebugAndThen ctx pgmError (text str) doc _ -> liftIO $ throwIO e -- | Trace a command (when verbosity level >= 3) traceCmd :: Logger -> String -> String -> IO a -> IO a traceCmd logger phase_name cmd_line action = do showPass logger phase_name let cmd_doc = text cmd_line handle_exn exn = do debugTraceMsg logger 2 (char '\n') debugTraceMsg logger 2 (text "Failed:" <+> cmd_doc <+> text (show exn)) throwGhcExceptionIO (ProgramError (show exn)) debugTraceMsg logger 3 cmd_doc loggerTraceFlush logger -- And run it! action `catchIO` handle_exn -- * Tracing utility -- | Record in the eventlog when the given tool command starts -- and finishes, prepending the given 'String' with -- \"systool:\", to easily be able to collect and process -- all the systool events. -- -- For those events to show up in the eventlog, you need -- to run GHC with @-v2@ or @-ddump-timings@. traceSystoolCommand :: Logger -> String -> IO a -> IO a traceSystoolCommand logger tool = withTiming logger (text "systool:" <> text tool) (const ()) {- Note [withTiming] ~~~~~~~~~~~~~~~~~~~~ For reference: withTiming :: MonadIO => m DynFlags -- how to get the DynFlags -> SDoc -- label for the computation we're timing -> (a -> ()) -- how to evaluate the result -> PrintTimings -- whether to report the timings when passed -- -v2 or -ddump-timings -> m a -- computation we're timing -> m a withTiming lets you run an action while: (1) measuring the CPU time it took and reporting that on stderr (when PrintTimings is passed), (2) emitting start/stop events to GHC's event log, with the label given as an argument. Evaluation of the result ------------------------ 'withTiming' takes as an argument a function of type 'a -> ()', whose purpose is to evaluate the result "sufficiently". A given pass might return an 'm a' for some monad 'm' and result type 'a', but where the 'a' is complex enough that evaluating it to WHNF barely scratches its surface and leaves many complex and time-consuming computations unevaluated. Those would only be forced by the next pass, and the time needed to evaluate them would be mis-attributed to that next pass. A more appropriate function would be one that deeply evaluates the result, so as to assign the time spent doing it to the pass we're timing. Note: as hinted at above, the time spent evaluating the application of the forcing function to the result is included in the timings reported by 'withTiming'. How we use it ------------- We measure the time and allocations of various passes in GHC's pipeline by just wrapping the whole pass with 'withTiming'. This also materializes by having a label for each pass in the eventlog, where each pass is executed in one go, during a continuous time window. However, from STG onwards, the pipeline uses streams to emit groups of STG/Cmm/etc declarations one at a time, and process them until we get to assembly code generation. This means that the execution of those last few passes is interleaved and that we cannot measure how long they take by just wrapping the whole thing with 'withTiming'. Instead we wrap the processing of each individual stream element, all along the codegen pipeline, using the appropriate label for the pass to which this processing belongs. That generates a lot more data but allows us to get fine-grained timings about all the passes and we can easily compute totals with tools like ghc-events-analyze (see below). Producing an eventlog for GHC ----------------------------- You can produce an eventlog when compiling, for instance, hello.hs by simply running: If GHC was built by Hadrian: $ _build/stage1/bin/ghc -ddump-timings hello.hs -o hello +RTS -l If GHC was built with Make: $ inplace/bin/ghc-stage2 -ddump-timing hello.hs -o hello +RTS -l You could alternatively use -v (with N >= 2) instead of -ddump-timings, to ask GHC to report timings (on stderr and the eventlog). This will write the eventlog to ./ghc.eventlog in both cases. You can then visualize it or look at the totals for each label by using ghc-events-analyze, threadscope or any other eventlog consumer. Illustrating with ghc-events-analyze: $ ghc-events-analyze --timed --timed-txt --totals \ --start "GHC:started:" --stop "GHC:finished:" \ ghc.eventlog This produces ghc.timed.txt (all event timestamps), ghc.timed.svg (visualisation of the execution through the various labels) and ghc.totals.txt (total time spent in each label). -} ghc-lib-parser-9.12.2.20250421/compiler/GHC/Utils/Exception.hs0000644000000000000000000000124707346545000021304 0ustar0000000000000000{-# OPTIONS_GHC -fno-warn-deprecations #-} {-# LANGUAGE ConstraintKinds #-} module GHC.Utils.Exception ( module CE, module GHC.Utils.Exception ) where import GHC.Prelude.Basic import GHC.IO (catchException) import Control.Exception as CE hiding (assert) import Control.Monad.IO.Class import Control.Monad.Catch -- Monomorphised versions of exception-handling utilities catchIO :: IO a -> (IOException -> IO a) -> IO a catchIO = catchException handleIO :: (IOException -> IO a) -> IO a -> IO a handleIO = flip catchIO tryIO :: IO a -> IO (Either IOException a) tryIO = CE.try type ExceptionMonad m = (MonadCatch m, MonadThrow m, MonadMask m, MonadIO m) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Utils/FV.hs0000644000000000000000000001562207346545000017663 0ustar0000000000000000{- (c) Bartosz Nitka, Facebook 2015 -} -- | Utilities for efficiently and deterministically computing free variables. module GHC.Utils.FV ( -- * Deterministic free vars computations FV, InterestingVarFun, -- * Running the computations fvVarList, fvVarSet, fvDVarSet, -- ** Manipulating those computations unitFV, emptyFV, mkFVs, unionFV, unionsFV, delFV, delFVs, filterFV, mapUnionFV, fvDVarSetSome, ) where import GHC.Prelude import GHC.Types.Var import GHC.Types.Var.Set -- | Predicate on possible free variables: returns @True@ iff the variable is -- interesting type InterestingVarFun = Var -> Bool -- Note [Deterministic FV] -- ~~~~~~~~~~~~~~~~~~~~~~~ -- When computing free variables, the order in which you get them affects -- the results of floating and specialization. If you use UniqFM to collect -- them and then turn that into a list, you get them in nondeterministic -- order as described in Note [Deterministic UniqFM] in GHC.Types.Unique.DFM. -- A naive algorithm for free variables relies on merging sets of variables. -- Merging costs O(n+m) for UniqFM and for UniqDFM there's an additional log -- factor. It's cheaper to incrementally add to a list and use a set to check -- for duplicates. type FV = InterestingVarFun -- Used for filtering sets as we build them -> VarSet -- Locally bound variables -> VarAcc -- Accumulator -> VarAcc type VarAcc = ([Var], VarSet) -- List to preserve ordering and set to check for membership, -- so that the list doesn't have duplicates -- For explanation of why using `VarSet` is not deterministic see -- Note [Deterministic UniqFM] in GHC.Types.Unique.DFM. -- Note [FV naming conventions] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- To get the performance and determinism that FV provides, FV computations -- need to built up from smaller FV computations and then evaluated with -- one of `fvVarList`, `fvDVarSet` That means the functions -- returning FV need to be exported. -- -- The conventions are: -- -- a) non-deterministic functions: -- * a function that returns VarSet -- e.g. `tyVarsOfType` -- b) deterministic functions: -- * a worker that returns FV -- e.g. `tyFVsOfType` -- * a function that returns [Var] -- e.g. `tyVarsOfTypeList` -- * a function that returns DVarSet -- e.g. `tyVarsOfTypeDSet` -- -- Where tyVarsOfType, tyVarsOfTypeList, tyVarsOfTypeDSet are implemented -- in terms of the worker evaluated with fvVarSet, fvVarList, fvDVarSet -- respectively. -- | Run a free variable computation, returning a list of distinct free -- variables in deterministic order and a non-deterministic set containing -- those variables. fvVarAcc :: FV -> ([Var], VarSet) fvVarAcc fv = fv (const True) emptyVarSet ([], emptyVarSet) -- | Run a free variable computation, returning a list of distinct free -- variables in deterministic order. fvVarList :: FV -> [Var] fvVarList = fst . fvVarAcc -- | Run a free variable computation, returning a deterministic set of free -- variables. Note that this is just a wrapper around the version that -- returns a deterministic list. If you need a list you should use -- `fvVarList`. fvDVarSet :: FV -> DVarSet fvDVarSet = mkDVarSet . fvVarList -- | Run a free variable computation, returning a non-deterministic set of -- free variables. Don't use if the set will be later converted to a list -- and the order of that list will impact the generated code. fvVarSet :: FV -> VarSet fvVarSet = snd . fvVarAcc -- Note [FV eta expansion] -- ~~~~~~~~~~~~~~~~~~~~~~~ -- Let's consider an eta-reduced implementation of freeVarsOf using FV: -- -- freeVarsOf (App a b) = freeVarsOf a `unionFV` freeVarsOf b -- -- If GHC doesn't eta-expand it, after inlining unionFV we end up with -- -- freeVarsOf = \x -> -- case x of -- App a b -> \fv_cand in_scope acc -> -- freeVarsOf a fv_cand in_scope $! freeVarsOf b fv_cand in_scope $! acc -- -- which has to create a thunk, resulting in more allocations. -- -- On the other hand if it is eta-expanded: -- -- freeVarsOf (App a b) fv_cand in_scope acc = -- (freeVarsOf a `unionFV` freeVarsOf b) fv_cand in_scope acc -- -- after inlining unionFV we have: -- -- freeVarsOf = \x fv_cand in_scope acc -> -- case x of -- App a b -> -- freeVarsOf a fv_cand in_scope $! freeVarsOf b fv_cand in_scope $! acc -- -- which saves allocations. -- -- GHC when presented with knowledge about all the call sites, correctly -- eta-expands in this case. Unfortunately due to the fact that freeVarsOf gets -- exported to be composed with other functions, GHC doesn't have that -- information and has to be more conservative here. -- -- Hence functions that get exported and return FV need to be manually -- eta-expanded. See also #11146. -- | Add a variable - when free, to the returned free variables. -- Ignores duplicates and respects the filtering function. unitFV :: Id -> FV unitFV var fv_cand in_scope acc@(have, haveSet) | var `elemVarSet` in_scope = acc | var `elemVarSet` haveSet = acc | fv_cand var = (var:have, extendVarSet haveSet var) | otherwise = acc {-# INLINE unitFV #-} -- | Return no free variables. emptyFV :: FV emptyFV _ _ acc = acc {-# INLINE emptyFV #-} -- | Union two free variable computations. unionFV :: FV -> FV -> FV unionFV fv1 fv2 fv_cand in_scope acc = fv1 fv_cand in_scope $! fv2 fv_cand in_scope $! acc {-# INLINE unionFV #-} -- | Mark the variable as not free by putting it in scope. delFV :: Var -> FV -> FV delFV var fv fv_cand !in_scope acc = fv fv_cand (extendVarSet in_scope var) acc {-# INLINE delFV #-} -- | Mark many free variables as not free. delFVs :: VarSet -> FV -> FV delFVs vars fv fv_cand !in_scope acc = fv fv_cand (in_scope `unionVarSet` vars) acc {-# INLINE delFVs #-} -- | Filter a free variable computation. filterFV :: InterestingVarFun -> FV -> FV filterFV fv_cand2 fv fv_cand1 in_scope acc = fv (\v -> fv_cand1 v && fv_cand2 v) in_scope acc {-# INLINE filterFV #-} -- | Map a free variable computation over a list and union the results. mapUnionFV :: (a -> FV) -> [a] -> FV mapUnionFV _f [] _fv_cand _in_scope acc = acc mapUnionFV f (a:as) fv_cand in_scope acc = mapUnionFV f as fv_cand in_scope $! f a fv_cand in_scope $! acc {-# INLINABLE mapUnionFV #-} -- | Union many free variable computations. unionsFV :: [FV] -> FV unionsFV fvs fv_cand in_scope acc = mapUnionFV id fvs fv_cand in_scope acc {-# INLINE unionsFV #-} -- | Add multiple variables - when free, to the returned free variables. -- Ignores duplicates and respects the filtering function. mkFVs :: [Var] -> FV mkFVs vars fv_cand in_scope acc = mapUnionFV unitFV vars fv_cand in_scope acc {-# INLINE mkFVs #-} fvDVarSetSome :: InterestingVarFun -> FV -> DVarSet fvDVarSetSome interesting_var fv = mkDVarSet $ fst $ fv interesting_var emptyVarSet ([], emptyVarSet) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Utils/Fingerprint.hs0000644000000000000000000000300507346545000021627 0ustar0000000000000000 {-# OPTIONS_GHC -Wno-incomplete-uni-patterns #-} -- ---------------------------------------------------------------------------- -- -- (c) The University of Glasgow 2006 -- -- Fingerprints for recompilation checking and ABI versioning. -- -- https://gitlab.haskell.org/ghc/ghc/wikis/commentary/compiler/recompilation-avoidance -- -- ---------------------------------------------------------------------------- module GHC.Utils.Fingerprint ( readHexFingerprint, fingerprintByteString, -- * Re-exported from GHC.Fingerprint Fingerprint(..), fingerprint0, fingerprintFingerprints, fingerprintData, fingerprintString, fingerprintStrings, getFileHash ) where import GHC.Prelude.Basic import Foreign import GHC.IO import Numeric ( readHex ) import qualified Data.ByteString as BS import qualified Data.ByteString.Unsafe as BS import GHC.Fingerprint -- useful for parsing the output of 'md5sum', should we want to do that. readHexFingerprint :: String -> Fingerprint readHexFingerprint s = Fingerprint w1 w2 where (s1,s2) = splitAt 16 s [(w1,"")] = readHex s1 [(w2,"")] = readHex (take 16 s2) fingerprintByteString :: BS.ByteString -> Fingerprint fingerprintByteString bs = unsafeDupablePerformIO $ BS.unsafeUseAsCStringLen bs $ \(ptr, len) -> fingerprintData (castPtr ptr) len -- See Note [Repeated -optP hashing] fingerprintStrings :: [String] -> Fingerprint fingerprintStrings ss = fingerprintFingerprints $ map fingerprintString ss ghc-lib-parser-9.12.2.20250421/compiler/GHC/Utils/GlobalVars.hs0000644000000000000000000001036107346545000021377 0ustar0000000000000000{-# LANGUAGE CPP #-} {-# OPTIONS_GHC -fno-cse #-} -- -fno-cse is needed for GLOBAL_VAR's to behave properly -- | Do not use global variables! -- -- Global variables are a hack. Do not use them if you can help it. module GHC.Utils.GlobalVars ( v_unsafeHasPprDebug , v_unsafeHasNoDebugOutput , v_unsafeHasNoStateHack , unsafeHasPprDebug , unsafeHasNoDebugOutput , unsafeHasNoStateHack , global , consIORef , globalM , sharedGlobal , sharedGlobalM ) where import GHC.Prelude.Basic import GHC.Conc.Sync ( sharedCAF ) import System.IO.Unsafe import Data.IORef import Foreign (Ptr) #define GLOBAL_VAR(name,value,ty) \ {-# NOINLINE name #-}; \ name :: IORef (ty); \ name = global (value); #define GLOBAL_VAR_M(name,value,ty) \ {-# NOINLINE name #-}; \ name :: IORef (ty); \ name = globalM (value); #define SHARED_GLOBAL_VAR(name,accessor,saccessor,value,ty) \ {-# NOINLINE name #-}; \ name :: IORef (ty); \ name = sharedGlobal (value) (accessor); \ foreign import ccall unsafe saccessor \ accessor :: Ptr (IORef a) -> IO (Ptr (IORef a)); #define SHARED_GLOBAL_VAR_M(name,accessor,saccessor,value,ty) \ {-# NOINLINE name #-}; \ name :: IORef (ty); \ name = sharedGlobalM (value) (accessor); \ foreign import ccall unsafe saccessor \ accessor :: Ptr (IORef a) -> IO (Ptr (IORef a)); #if !MIN_VERSION_GLASGOW_HASKELL(9,3,0,0) GLOBAL_VAR(v_unsafeHasPprDebug, False, Bool) GLOBAL_VAR(v_unsafeHasNoDebugOutput, False, Bool) GLOBAL_VAR(v_unsafeHasNoStateHack, False, Bool) #else SHARED_GLOBAL_VAR( v_unsafeHasPprDebug , getOrSetLibHSghcGlobalHasPprDebug , "getOrSetLibHSghcGlobalHasPprDebug" , False , Bool ) SHARED_GLOBAL_VAR( v_unsafeHasNoDebugOutput , getOrSetLibHSghcGlobalHasNoDebugOutput , "getOrSetLibHSghcGlobalHasNoDebugOutput" , False , Bool ) SHARED_GLOBAL_VAR( v_unsafeHasNoStateHack , getOrSetLibHSghcGlobalHasNoStateHack , "getOrSetLibHSghcGlobalHasNoStateHack" , False , Bool ) #endif unsafeHasPprDebug :: Bool unsafeHasPprDebug = unsafePerformIO $ readIORef v_unsafeHasPprDebug unsafeHasNoDebugOutput :: Bool unsafeHasNoDebugOutput = unsafePerformIO $ readIORef v_unsafeHasNoDebugOutput unsafeHasNoStateHack :: Bool unsafeHasNoStateHack = unsafePerformIO $ readIORef v_unsafeHasNoStateHack {- ************************************************************************ * * Globals and the RTS * * ************************************************************************ When a plugin is loaded, it currently gets linked against a *newly loaded* copy of the GHC package. This would not be a problem, except that the new copy has its own mutable state that is not shared with that state that has already been initialized by the original GHC package. (Note that if the GHC executable was dynamically linked this wouldn't be a problem, because we could share the GHC library it links to; this is only a problem if DYNAMIC_GHC_PROGRAMS=NO.) The solution is to make use of @sharedCAF@ through @sharedGlobal@ for globals that are shared between multiple copies of ghc packages. -} -- Global variables: global :: a -> IORef a global a = unsafePerformIO (newIORef a) consIORef :: IORef [a] -> a -> IO () consIORef var x = atomicModifyIORef' var (\xs -> (x:xs,())) globalM :: IO a -> IORef a globalM ma = unsafePerformIO (ma >>= newIORef) -- Shared global variables: sharedGlobal :: a -> (Ptr (IORef a) -> IO (Ptr (IORef a))) -> IORef a sharedGlobal a get_or_set = unsafePerformIO $ newIORef a >>= flip sharedCAF get_or_set sharedGlobalM :: IO a -> (Ptr (IORef a) -> IO (Ptr (IORef a))) -> IORef a sharedGlobalM ma get_or_set = unsafePerformIO $ ma >>= newIORef >>= flip sharedCAF get_or_set ghc-lib-parser-9.12.2.20250421/compiler/GHC/Utils/IO/0000755000000000000000000000000007346545000017315 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Utils/IO/Unsafe.hs0000644000000000000000000000062207346545000021072 0ustar0000000000000000{- (c) The University of Glasgow, 2000-2006 -} {-# LANGUAGE MagicHash, UnboxedTuples #-} module GHC.Utils.IO.Unsafe ( inlinePerformIO, ) where import GHC.Prelude.Basic () import GHC.Exts import GHC.IO (IO(..)) -- Just like unsafeDupablePerformIO, but we inline it. {-# INLINE inlinePerformIO #-} inlinePerformIO :: IO a -> a inlinePerformIO (IO m) = case m realWorld# of (# _, r #) -> r ghc-lib-parser-9.12.2.20250421/compiler/GHC/Utils/Json.hs0000644000000000000000000000334007346545000020253 0ustar0000000000000000{-# LANGUAGE GADTs #-} {-# LANGUAGE FlexibleInstances #-} module GHC.Utils.Json where import GHC.Prelude import GHC.Utils.Outputable import Data.Char import Numeric -- | Simple data type to represent JSON documents. data JsonDoc where JSNull :: JsonDoc JSBool :: Bool -> JsonDoc JSInt :: Int -> JsonDoc JSString :: String -> JsonDoc -- ^ The 'String' is unescaped JSArray :: [JsonDoc] -> JsonDoc JSObject :: [(String, JsonDoc)] -> JsonDoc -- This is simple and slow as it is only used for error reporting renderJSON :: JsonDoc -> SDoc renderJSON d = case d of JSNull -> text "null" JSBool b -> if b then text "true" else text "false" JSInt n -> ppr n JSString s -> doubleQuotes $ text $ escapeJsonString s JSArray as -> brackets $ pprList renderJSON as JSObject fs -> braces $ pprList renderField fs where renderField :: (String, JsonDoc) -> SDoc renderField (s, j) = doubleQuotes (text s) <> colon <> renderJSON j pprList pp xs = hcat (punctuate comma (map pp xs)) escapeJsonString :: String -> String escapeJsonString = concatMap escapeChar where escapeChar '\b' = "\\b" escapeChar '\f' = "\\f" escapeChar '\n' = "\\n" escapeChar '\r' = "\\r" escapeChar '\t' = "\\t" escapeChar '"' = "\\\"" escapeChar '\\' = "\\\\" escapeChar c | isControl c || fromEnum c >= 0x7f = uni_esc c escapeChar c = [c] uni_esc c = "\\u" ++ (pad 4 (showHex (fromEnum c) "")) pad n cs | len < n = replicate (n-len) '0' ++ cs | otherwise = cs where len = length cs class ToJson a where json :: a -> JsonDoc instance ToJson String where json = JSString instance ToJson Int where json = JSInt ghc-lib-parser-9.12.2.20250421/compiler/GHC/Utils/Lexeme.hs0000644000000000000000000002075207346545000020567 0ustar0000000000000000-- (c) The GHC Team -- -- Functions to evaluate whether or not a string is a valid identifier. -- There is considerable overlap between the logic here and the logic -- in GHC.Parser.Lexer, but sadly there seems to be no way to merge them. module GHC.Utils.Lexeme ( -- * Lexical characteristics of Haskell names -- | Use these functions to figure what kind of name a 'FastString' -- represents; these functions do /not/ check that the identifier -- is valid. isLexCon, isLexVar, isLexId, isLexSym, isLexConId, isLexConSym, isLexVarId, isLexVarSym, startsVarSym, startsVarId, startsConSym, startsConId, -- * Validating identifiers -- | These functions (working over plain old 'String's) check -- to make sure that the identifier is valid. okVarOcc, okConOcc, okTcOcc, okVarIdOcc, okVarSymOcc, okConIdOcc, okConSymOcc -- Some of the exports above are not used within GHC, but may -- be of value to GHC API users. ) where import GHC.Prelude import GHC.Data.FastString import Data.Char import qualified Data.Set as Set import GHC.Lexeme {- ************************************************************************ * * Lexical categories * * ************************************************************************ These functions test strings to see if they fit the lexical categories defined in the Haskell report. Note [Classification of generated names] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Some names generated for internal use can show up in debugging output, e.g. when using -ddump-simpl. These generated names start with a $ but should still be pretty-printed using prefix notation. We make sure this is the case in isLexVarSym by only classifying a name as a symbol if all its characters are symbols, not just its first one. -} isLexCon, isLexVar, isLexId, isLexSym :: FastString -> Bool isLexConId, isLexConSym, isLexVarId, isLexVarSym :: FastString -> Bool isLexCon cs = isLexConId cs || isLexConSym cs isLexVar cs = isLexVarId cs || isLexVarSym cs isLexId cs = isLexConId cs || isLexVarId cs isLexSym cs = isLexConSym cs || isLexVarSym cs ------------- isLexConId cs = case unpackFS cs of -- Prefix type or data constructors [] -> False -- e.g. "Foo", "[]", "(,)" c:_ -> cs == fsLit "[]" || startsConId c isLexVarId cs = case unpackFS cs of -- Ordinary prefix identifiers [] -> False -- e.g. "x", "_x" c:_ -> startsVarId c isLexConSym cs = case unpackFS cs of -- Infix type or data constructors [] -> False -- e.g. ":-:", ":", "->" c:_ -> cs == fsLit "->" || startsConSym c isLexVarSym fs -- Infix identifiers e.g. "+" | fs == (fsLit "~R#") = True | otherwise = case (if nullFS fs then [] else unpackFS fs) of [] -> False (c:cs) -> startsVarSym c && all isVarSymChar cs -- See Note [Classification of generated names] {- ************************************************************************ * * Detecting valid names for Template Haskell * * ************************************************************************ -} ---------------------- -- External interface ---------------------- -- | Is this an acceptable variable name? okVarOcc :: String -> Bool okVarOcc str@(c:_) | startsVarId c = okVarIdOcc str | startsVarSym c = okVarSymOcc str okVarOcc _ = False -- | Is this an acceptable constructor name? okConOcc :: String -> Bool okConOcc str@(c:_) | startsConId c = okConIdOcc str | startsConSym c = okConSymOcc str | str == "[]" = True okConOcc _ = False -- | Is this an acceptable type name? okTcOcc :: String -> Bool okTcOcc "[]" = True okTcOcc "->" = True okTcOcc "~" = True okTcOcc str@(c:_) | startsConId c = okConIdOcc str | startsConSym c = okConSymOcc str | startsVarSym c = okVarSymOcc str okTcOcc _ = False -- | Is this an acceptable alphanumeric variable name, assuming it starts -- with an acceptable letter? okVarIdOcc :: String -> Bool okVarIdOcc str = okIdOcc str && -- admit "_" as a valid identifier. Required to support typed -- holes in Template Haskell. See #10267 (str == "_" || not (str `Set.member` reservedIds)) -- | Is this an acceptable symbolic variable name, assuming it starts -- with an acceptable character? okVarSymOcc :: String -> Bool okVarSymOcc str = all okSymChar str && not (str `Set.member` reservedOps) && not (isDashes str) -- | Is this an acceptable alphanumeric constructor name, assuming it -- starts with an acceptable letter? okConIdOcc :: String -> Bool okConIdOcc str = okIdOcc str || is_tuple_name1 True str || -- Is it a boxed tuple... is_tuple_name1 False str || -- ...or an unboxed tuple (#12407)... is_sum_name1 str -- ...or an unboxed sum (#12514)? where -- check for tuple name, starting at the beginning is_tuple_name1 True ('(' : rest) = is_tuple_name2 True rest is_tuple_name1 False ('(' : '#' : rest) = is_tuple_name2 False rest is_tuple_name1 _ _ = False -- check for tuple tail is_tuple_name2 True ")" = True is_tuple_name2 False "#)" = True is_tuple_name2 boxed (',' : rest) = is_tuple_name2 boxed rest is_tuple_name2 boxed (ws : rest) | isSpace ws = is_tuple_name2 boxed rest is_tuple_name2 _ _ = False -- check for sum name, starting at the beginning is_sum_name1 ('(' : '#' : rest) = is_sum_name2 False rest is_sum_name1 _ = False -- check for sum tail, only allowing at most one underscore is_sum_name2 _ "#)" = True is_sum_name2 underscore ('|' : rest) = is_sum_name2 underscore rest is_sum_name2 False ('_' : rest) = is_sum_name2 True rest is_sum_name2 underscore (ws : rest) | isSpace ws = is_sum_name2 underscore rest is_sum_name2 _ _ = False -- | Is this an acceptable symbolic constructor name, assuming it -- starts with an acceptable character? okConSymOcc :: String -> Bool okConSymOcc ":" = True okConSymOcc str = all okSymChar str && not (str `Set.member` reservedOps) ---------------------- -- Internal functions ---------------------- -- | Is this string an acceptable id, possibly with a suffix of hashes, -- but not worrying about case or clashing with reserved words? okIdOcc :: String -> Bool okIdOcc str = let hashes = dropWhile okIdChar str in all (== '#') hashes -- -XMagicHash allows a suffix of hashes -- of course, `all` says "True" to an empty list -- | Is this character acceptable in an identifier (after the first letter)? -- See alexGetByte in GHC.Parser.Lexer okIdChar :: Char -> Bool okIdChar c = case generalCategory c of UppercaseLetter -> True LowercaseLetter -> True TitlecaseLetter -> True ModifierLetter -> True -- See #10196 OtherLetter -> True -- See #1103 NonSpacingMark -> True -- See #7650 DecimalNumber -> True OtherNumber -> True -- See #4373 _ -> c == '\'' || c == '_' -- | All reserved identifiers. Taken from section 2.4 of the 2010 Report, -- plus the GHC-specific @forall@ keyword (see GHC Proposal #281). reservedIds :: Set.Set String reservedIds = Set.fromList [ "case", "class", "data", "default", "deriving" , "do", "else", "forall", "foreign", "if", "import" , "in", "infix", "infixl", "infixr", "instance" , "let", "module", "newtype", "of", "then", "type" , "where", "_" ] -- | All reserved operators. Taken from section 2.4 of the 2010 Report, -- excluding @\@@ and @~@ that are allowed by GHC (see GHC Proposal #229). reservedOps :: Set.Set String reservedOps = Set.fromList [ "..", ":", "::", "=", "\\", "|", "<-", "->" , "=>" ] -- | Does this string contain only dashes and has at least 2 of them? isDashes :: String -> Bool isDashes ('-' : '-' : rest) = all (== '-') rest isDashes _ = False ghc-lib-parser-9.12.2.20250421/compiler/GHC/Utils/Logger.hs0000644000000000000000000005650007346545000020567 0ustar0000000000000000{-# LANGUAGE RankNTypes #-} -- | Logger -- -- The Logger is an configurable entity that is used by the compiler to output -- messages on the console (stdout, stderr) and in dump files. -- -- The behaviour of default Logger returned by `initLogger` can be modified with -- hooks. The compiler itself uses hooks in multithreaded code (--make) and it -- is also probably used by ghc-api users (IDEs, etc.). -- -- In addition to hooks, the Logger supports LogFlags: basically a subset of the -- command-line flags that control the logger behaviour at a higher level than -- hooks. -- -- 1. Hooks are used to define how to generate a info/warning/error/dump messages -- 2. LogFlags are used to decide when and how to generate messages -- module GHC.Utils.Logger ( Logger , HasLogger (..) , ContainsLogger (..) -- * Logger setup , initLogger , LogAction , LogJsonAction , DumpAction , TraceAction , DumpFormat (..) -- ** Hooks , popLogHook , pushLogHook , popJsonLogHook , pushJsonLogHook , popDumpHook , pushDumpHook , popTraceHook , pushTraceHook , makeThreadSafe -- ** Flags , LogFlags (..) , defaultLogFlags , log_dopt , log_set_dopt , setLogFlags , updateLogFlags , logFlags , logHasDumpFlag , logVerbAtLeast -- * Logging , putLogMsg , defaultLogAction , defaultLogJsonAction , defaultLogActionHPrintDoc , defaultLogActionHPutStrDoc , logMsg , logJsonMsg , logDumpMsg -- * Dumping , defaultDumpAction , putDumpFile , putDumpFileMaybe , putDumpFileMaybe' , withDumpFileHandle , touchDumpFile , logDumpFile -- * Tracing , defaultTraceAction , putTraceMsg , loggerTraceFlushUpdate , loggerTraceFlush , logTraceMsg ) where import GHC.Prelude import GHC.Driver.Flags import GHC.Types.Error import GHC.Types.SrcLoc import qualified GHC.Utils.Ppr as Pretty import GHC.Utils.Outputable import GHC.Utils.Json import GHC.Utils.Panic import GHC.Data.EnumSet (EnumSet) import qualified GHC.Data.EnumSet as EnumSet import GHC.Data.FastString import System.Directory import System.FilePath ( takeDirectory, () ) import qualified Data.Map.Strict as Map import Data.Map.Strict (Map) import Data.List (stripPrefix) import Data.Time import System.IO import Control.Monad import Control.Concurrent.MVar import System.IO.Unsafe import Debug.Trace (trace) import GHC.Platform.Ways --------------------------------------------------------------- -- Log flags --------------------------------------------------------------- -- | Logger flags data LogFlags = LogFlags { log_default_user_context :: SDocContext , log_default_dump_context :: SDocContext , log_dump_flags :: !(EnumSet DumpFlag) -- ^ Dump flags , log_show_caret :: !Bool -- ^ Show caret in diagnostics , log_diagnostics_as_json :: !Bool -- ^ Format diagnostics as JSON , log_show_warn_groups :: !Bool -- ^ Show warning flag groups , log_enable_timestamps :: !Bool -- ^ Enable timestamps , log_dump_to_file :: !Bool -- ^ Enable dump to file , log_dump_dir :: !(Maybe FilePath) -- ^ Dump directory , log_dump_prefix :: !FilePath -- ^ Normal dump path ("basename.") , log_dump_prefix_override :: !(Maybe FilePath) -- ^ Overriden dump path , log_with_ways :: !Bool -- ^ Use different dump files names for different ways , log_enable_debug :: !Bool -- ^ Enable debug output , log_verbosity :: !Int -- ^ Verbosity level , log_ways :: !(Maybe Ways) -- ^ Current ways (to name dump files) } -- | Default LogFlags defaultLogFlags :: LogFlags defaultLogFlags = LogFlags { log_default_user_context = defaultSDocContext , log_default_dump_context = defaultSDocContext , log_dump_flags = EnumSet.empty , log_show_caret = True , log_diagnostics_as_json = False , log_show_warn_groups = True , log_enable_timestamps = True , log_dump_to_file = False , log_dump_dir = Nothing , log_dump_prefix = "" , log_dump_prefix_override = Nothing , log_with_ways = True , log_enable_debug = False , log_verbosity = 0 , log_ways = Nothing } -- | Test if a DumpFlag is enabled log_dopt :: DumpFlag -> LogFlags -> Bool log_dopt = getDumpFlagFrom log_verbosity log_dump_flags -- | Enable a DumpFlag log_set_dopt :: DumpFlag -> LogFlags -> LogFlags log_set_dopt f logflags = logflags { log_dump_flags = EnumSet.insert f (log_dump_flags logflags) } -- | Test if a DumpFlag is set logHasDumpFlag :: Logger -> DumpFlag -> Bool logHasDumpFlag logger f = log_dopt f (logFlags logger) -- | Test if verbosity is >= to the given value logVerbAtLeast :: Logger -> Int -> Bool logVerbAtLeast logger v = log_verbosity (logFlags logger) >= v -- | Update LogFlags updateLogFlags :: Logger -> (LogFlags -> LogFlags) -> Logger updateLogFlags logger f = setLogFlags logger (f (logFlags logger)) -- | Set LogFlags setLogFlags :: Logger -> LogFlags -> Logger setLogFlags logger flags = logger { logFlags = flags } --------------------------------------------------------------- -- Logger --------------------------------------------------------------- type LogAction = LogFlags -> MessageClass -> SrcSpan -> SDoc -> IO () type LogJsonAction = LogFlags -> MessageClass -> JsonDoc -> IO () type DumpAction = LogFlags -> PprStyle -> DumpFlag -> String -> DumpFormat -> SDoc -> IO () type TraceAction a = LogFlags -> String -> SDoc -> a -> a -- | Format of a dump -- -- Dump formats are loosely defined: dumps may contain various additional -- headers and annotations and they may be partial. 'DumpFormat' is mainly a hint -- (e.g. for syntax highlighters). data DumpFormat = FormatHaskell -- ^ Haskell | FormatCore -- ^ Core | FormatSTG -- ^ STG | FormatByteCode -- ^ ByteCode | FormatCMM -- ^ Cmm | FormatASM -- ^ Assembly code | FormatC -- ^ C code/header | FormatLLVM -- ^ LLVM bytecode | FormatJS -- ^ JavaScript code | FormatText -- ^ Unstructured dump deriving (Show,Eq) -- | A set of the dump files to which we have written thusfar. Each dump file -- has a corresponding MVar to ensure that a dump file has at most one active -- writer at a time, avoiding interleaved output. type DumpCache = MVar (Map FilePath (MVar ())) data Logger = Logger { log_hook :: [LogAction -> LogAction] -- ^ Log hooks stack , json_log_hook :: [LogJsonAction -> LogJsonAction] -- ^ Json log hooks stack , dump_hook :: [DumpAction -> DumpAction] -- ^ Dump hooks stack , trace_hook :: forall a. [TraceAction a -> TraceAction a] -- ^ Trace hooks stack , generated_dumps :: DumpCache -- ^ Already dumped files (to append instead of overwriting them) , trace_flush :: IO () -- ^ Flush the trace buffer , logFlags :: !LogFlags -- ^ Logger flags } -- | Set the trace flushing function -- -- The currently set trace flushing function is passed to the updating function loggerTraceFlushUpdate :: Logger -> (IO () -> IO ()) -> Logger loggerTraceFlushUpdate logger upd = logger { trace_flush = upd (trace_flush logger) } -- | Calls the trace flushing function loggerTraceFlush :: Logger -> IO () loggerTraceFlush logger = trace_flush logger -- | Default trace flushing function (flush stderr) defaultTraceFlush :: IO () defaultTraceFlush = hFlush stderr initLogger :: IO Logger initLogger = do dumps <- newMVar Map.empty return $ Logger { log_hook = [] , json_log_hook = [] , dump_hook = [] , trace_hook = [] , generated_dumps = dumps , trace_flush = defaultTraceFlush , logFlags = defaultLogFlags } -- | Log something putLogMsg :: Logger -> LogAction putLogMsg logger = foldr ($) defaultLogAction (log_hook logger) -- | Log a JsonDoc putJsonLogMsg :: Logger -> LogJsonAction putJsonLogMsg logger = foldr ($) defaultLogJsonAction (json_log_hook logger) -- | Dump something putDumpFile :: Logger -> DumpAction putDumpFile logger = let fallback = putLogMsg logger dumps = generated_dumps logger deflt = defaultDumpAction dumps fallback in foldr ($) deflt (dump_hook logger) -- | Trace something putTraceMsg :: Logger -> TraceAction a putTraceMsg logger = foldr ($) defaultTraceAction (trace_hook logger) -- | Push a log hook pushLogHook :: (LogAction -> LogAction) -> Logger -> Logger pushLogHook h logger = logger { log_hook = h:log_hook logger } -- | Pop a log hook popLogHook :: Logger -> Logger popLogHook logger = case log_hook logger of [] -> panic "popLogHook: empty hook stack" _:hs -> logger { log_hook = hs } -- | Push a json log hook pushJsonLogHook :: (LogJsonAction -> LogJsonAction) -> Logger -> Logger pushJsonLogHook h logger = logger { json_log_hook = h:json_log_hook logger } popJsonLogHook :: Logger -> Logger popJsonLogHook logger = case json_log_hook logger of [] -> panic "popJsonLogHook: empty hook stack" _:hs -> logger { json_log_hook = hs} -- | Push a dump hook pushDumpHook :: (DumpAction -> DumpAction) -> Logger -> Logger pushDumpHook h logger = logger { dump_hook = h:dump_hook logger } -- | Pop a dump hook popDumpHook :: Logger -> Logger popDumpHook logger = case dump_hook logger of [] -> panic "popDumpHook: empty hook stack" _:hs -> logger { dump_hook = hs } -- | Push a trace hook pushTraceHook :: (forall a. TraceAction a -> TraceAction a) -> Logger -> Logger pushTraceHook h logger = logger { trace_hook = h:trace_hook logger } -- | Pop a trace hook popTraceHook :: Logger -> Logger popTraceHook logger = case trace_hook logger of [] -> panic "popTraceHook: empty hook stack" _ -> logger { trace_hook = tail (trace_hook logger) } -- | Make the logger thread-safe makeThreadSafe :: Logger -> IO Logger makeThreadSafe logger = do lock <- newMVar () let with_lock :: forall a. IO a -> IO a with_lock act = withMVar lock (const act) log action logflags msg_class loc doc = with_lock (action logflags msg_class loc doc) dmp action logflags sty opts str fmt doc = with_lock (action logflags sty opts str fmt doc) trc :: forall a. TraceAction a -> TraceAction a trc action logflags str doc v = unsafePerformIO (with_lock (return $! action logflags str doc v)) return $ pushLogHook log $ pushDumpHook dmp $ pushTraceHook trc $ logger -- See Note [JSON Error Messages] defaultLogJsonAction :: LogJsonAction defaultLogJsonAction logflags msg_class jsdoc = case msg_class of MCOutput -> printOut msg MCDump -> printOut (msg $$ blankLine) MCInteractive -> putStrSDoc msg MCInfo -> printErrs msg MCFatal -> printErrs msg MCDiagnostic SevIgnore _ _ -> pure () -- suppress the message MCDiagnostic _sev _rea _code -> printErrs msg where printOut = defaultLogActionHPrintDoc logflags False stdout printErrs = defaultLogActionHPrintDoc logflags False stderr putStrSDoc = defaultLogActionHPutStrDoc logflags False stdout msg = renderJSON jsdoc -- See Note [JSON Error Messages] -- this is to be removed jsonLogAction :: LogAction jsonLogAction _ (MCDiagnostic SevIgnore _ _) _ _ = return () -- suppress the message jsonLogAction logflags msg_class srcSpan msg = defaultLogActionHPutStrDoc logflags True stdout (withPprStyle PprCode (doc $$ text "")) where str = renderWithContext (log_default_user_context logflags) msg doc = renderJSON $ JSObject [ ( "span", spanToDumpJSON srcSpan ) , ( "doc" , JSString str ) , ( "messageClass", json msg_class ) ] spanToDumpJSON :: SrcSpan -> JsonDoc spanToDumpJSON s = case s of (RealSrcSpan rss _) -> JSObject [ ("file", json file) , ("startLine", json $ srcSpanStartLine rss) , ("startCol", json $ srcSpanStartCol rss) , ("endLine", json $ srcSpanEndLine rss) , ("endCol", json $ srcSpanEndCol rss) ] where file = unpackFS $ srcSpanFile rss UnhelpfulSpan _ -> JSNull defaultLogAction :: LogAction defaultLogAction logflags msg_class srcSpan msg | log_dopt Opt_D_dump_json logflags = jsonLogAction logflags msg_class srcSpan msg | otherwise = case msg_class of MCOutput -> printOut msg MCDump -> printOut (msg $$ blankLine) MCInteractive -> putStrSDoc msg MCInfo -> printErrs msg MCFatal -> printErrs msg MCDiagnostic SevIgnore _ _ -> pure () -- suppress the message MCDiagnostic _sev _rea _code -> printDiagnostics where printOut = defaultLogActionHPrintDoc logflags False stdout printErrs = defaultLogActionHPrintDoc logflags False stderr putStrSDoc = defaultLogActionHPutStrDoc logflags False stdout -- Pretty print the warning flag, if any (#10752) message = mkLocMessageWarningGroups (log_show_warn_groups logflags) msg_class srcSpan msg printDiagnostics = do caretDiagnostic <- if log_show_caret logflags then getCaretDiagnostic msg_class srcSpan else pure empty printErrs $ getPprStyle $ \style -> withPprStyle (setStyleColoured True style) (message $+$ caretDiagnostic $+$ blankLine) -- careful (#2302): printErrs prints in UTF-8, -- whereas converting to string first and using -- hPutStr would just emit the low 8 bits of -- each unicode char. -- | Like 'defaultLogActionHPutStrDoc' but appends an extra newline. defaultLogActionHPrintDoc :: LogFlags -> Bool -> Handle -> SDoc -> IO () defaultLogActionHPrintDoc logflags asciiSpace h d = defaultLogActionHPutStrDoc logflags asciiSpace h (d $$ text "") -- | The boolean arguments let's the pretty printer know if it can optimize indent -- by writing ascii ' ' characters without going through decoding. defaultLogActionHPutStrDoc :: LogFlags -> Bool -> Handle -> SDoc -> IO () defaultLogActionHPutStrDoc logflags asciiSpace h d -- Don't add a newline at the end, so that successive -- calls to this log-action can output all on the same line = printSDoc (log_default_user_context logflags) (Pretty.PageMode asciiSpace) h d -- -- Note [JSON Error Messages] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~ -- -- When the user requests the compiler output to be dumped as json -- we used to collect them all in an IORef and then print them at the end. -- This doesn't work very well with GHCi. (See #14078) So instead we now -- use the simpler method of just outputting a JSON document inplace to -- stdout. -- -- Before the compiler calls log_action, it has already turned the `ErrMsg` -- into a formatted message. This means that we lose some possible -- information to provide to the user but refactoring log_action is quite -- invasive as it is called in many places. So, for now I left it alone -- and we can refine its behaviour as users request different output. -- -- The recent work here replaces the purpose of flag -ddump-json with -- -fdiagnostics-as-json. For temporary backwards compatibility while -- -ddump-json is being deprecated, `jsonLogAction` has been added in, but -- it should be removed along with -ddump-json. Similarly, the guard in -- `defaultLogAction` should be removed. This cleanup is tracked in #24113. -- | Default action for 'dumpAction' hook defaultDumpAction :: DumpCache -> LogAction -> DumpAction defaultDumpAction dumps log_action logflags sty flag title _fmt doc = dumpSDocWithStyle dumps log_action sty logflags flag title doc -- | Write out a dump. -- -- If --dump-to-file is set then this goes to a file. -- otherwise emit to stdout (via the LogAction parameter). -- -- When @hdr@ is empty, we print in a more compact format (no separators and -- blank lines) dumpSDocWithStyle :: DumpCache -> LogAction -> PprStyle -> LogFlags -> DumpFlag -> String -> SDoc -> IO () dumpSDocWithStyle dumps log_action sty logflags flag hdr doc = withDumpFileHandle dumps logflags flag writeDump where -- write dump to file writeDump (Just handle) = do doc' <- if null hdr then return doc else do timeStamp <- if log_enable_timestamps logflags then (text . show) <$> getCurrentTime else pure empty let d = timeStamp $$ blankLine $$ doc return $ mkDumpDoc hdr d -- When we dump to files we use UTF8. Which allows ascii spaces. defaultLogActionHPrintDoc logflags True handle (withPprStyle sty doc') -- write the dump to stdout writeDump Nothing = do let (doc', msg_class) | null hdr = (doc, MCOutput) | otherwise = (mkDumpDoc hdr doc, MCDump) log_action logflags msg_class noSrcSpan (withPprStyle sty doc') -- | Run an action with the handle of a 'DumpFlag' if we are outputting to a -- file, otherwise 'Nothing'. withDumpFileHandle :: DumpCache -> LogFlags -> DumpFlag -> (Maybe Handle -> IO ()) -> IO () withDumpFileHandle dumps logflags flag action = do let dump_ways = log_ways logflags let mFile = chooseDumpFile logflags dump_ways flag case mFile of Just fileName -> do lock <- modifyMVar dumps $ \gd -> case Map.lookup fileName gd of Nothing -> do lock <- newMVar () let gd' = Map.insert fileName lock gd -- ensure that file exists so we can append to it createDirectoryIfMissing True (takeDirectory fileName) writeFile fileName "" return (gd', lock) Just lock -> do return (gd, lock) let withLock k = withMVar lock $ \() -> k >> return () withLock $ withFile fileName AppendMode $ \handle -> do -- We do not want the dump file to be affected by -- environment variables, but instead to always use -- UTF8. See: -- https://gitlab.haskell.org/ghc/ghc/issues/10762 hSetEncoding handle utf8 action (Just handle) Nothing -> action Nothing -- | Choose where to put a dump file based on LogFlags and DumpFlag chooseDumpFile :: LogFlags -> Maybe Ways -> DumpFlag -> Maybe FilePath chooseDumpFile logflags ways flag | log_dump_to_file logflags || forced_to_file = Just $ setDir (getPrefix ++ way_infix ++ dump_suffix) | otherwise = Nothing where way_infix = case ways of _ | not (log_with_ways logflags) -> "" Nothing -> "" Just ws | null ws || null (waysTag ws) -> "" | otherwise -> waysTag ws ++ "." (forced_to_file, dump_suffix) = case flag of -- -dth-dec-file dumps expansions of TH -- splices into MODULE.th.hs even when -- -ddump-to-file isn't set Opt_D_th_dec_file -> (True, "th.hs") _ -> (False, default_suffix) -- build a suffix from the flag name -- e.g. -ddump-asm => ".dump-asm" default_suffix = map (\c -> if c == '_' then '-' else c) $ let str = show flag in case stripPrefix "Opt_D_" str of Just x -> x Nothing -> panic ("chooseDumpFile: bad flag name: " ++ str) getPrefix -- dump file location is being forced -- by the -ddump-file-prefix flag. | Just prefix <- log_dump_prefix_override logflags = prefix -- dump file locations, module specified to [modulename] set by -- GHC.Driver.Pipeline.runPipeline; non-module specific, e.g. Chasing dependencies, -- to 'non-module' by default. | otherwise = log_dump_prefix logflags setDir f = case log_dump_dir logflags of Just d -> d f Nothing -> f -- | Default action for 'traceAction' hook defaultTraceAction :: TraceAction a defaultTraceAction logflags title doc x = if not (log_enable_debug logflags) then x else trace (renderWithContext (log_default_dump_context logflags) (sep [text title, nest 2 doc])) x -- | Log something logMsg :: Logger -> MessageClass -> SrcSpan -> SDoc -> IO () logMsg logger mc loc msg = putLogMsg logger (logFlags logger) mc loc msg logJsonMsg :: ToJson a => Logger -> MessageClass -> a -> IO () logJsonMsg logger mc d = putJsonLogMsg logger (logFlags logger) mc (json d) -- | Dump something logDumpFile :: Logger -> PprStyle -> DumpFlag -> String -> DumpFormat -> SDoc -> IO () logDumpFile logger = putDumpFile logger (logFlags logger) -- | Log a trace message logTraceMsg :: Logger -> String -> SDoc -> a -> a logTraceMsg logger hdr doc a = putTraceMsg logger (logFlags logger) hdr doc a -- | Log a dump message (not a dump file) logDumpMsg :: Logger -> String -> SDoc -> IO () logDumpMsg logger hdr doc = logMsg logger MCDump noSrcSpan (withPprStyle defaultDumpStyle (mkDumpDoc hdr doc)) mkDumpDoc :: String -> SDoc -> SDoc mkDumpDoc hdr doc = vcat [blankLine, line <+> text hdr <+> line, doc, blankLine] where line = text "====================" -- | Dump if the given DumpFlag is set putDumpFileMaybe :: Logger -> DumpFlag -> String -> DumpFormat -> SDoc -> IO () putDumpFileMaybe logger = putDumpFileMaybe' logger alwaysQualify {-# INLINE putDumpFileMaybe #-} -- see Note [INLINE conditional tracing utilities] -- | Dump if the given DumpFlag is set -- -- Unlike 'putDumpFileMaybe', has a NamePprCtx argument putDumpFileMaybe' :: Logger -> NamePprCtx -> DumpFlag -> String -> DumpFormat -> SDoc -> IO () putDumpFileMaybe' logger name_ppr_ctx flag hdr fmt doc = when (logHasDumpFlag logger flag) $ logDumpFile' logger name_ppr_ctx flag hdr fmt doc {-# INLINE putDumpFileMaybe' #-} -- see Note [INLINE conditional tracing utilities] logDumpFile' :: Logger -> NamePprCtx -> DumpFlag -> String -> DumpFormat -> SDoc -> IO () {-# NOINLINE logDumpFile' #-} -- NOINLINE: Now we are past the conditional, into the "cold" path, -- don't inline, to reduce code size at the call site -- See Note [INLINE conditional tracing utilities] logDumpFile' logger name_ppr_ctx flag hdr fmt doc = logDumpFile logger (mkDumpStyle name_ppr_ctx) flag hdr fmt doc -- | Ensure that a dump file is created even if it stays empty touchDumpFile :: Logger -> DumpFlag -> IO () touchDumpFile logger flag = withDumpFileHandle (generated_dumps logger) (logFlags logger) flag (const (return ())) class HasLogger m where getLogger :: m Logger class ContainsLogger t where extractLogger :: t -> Logger ghc-lib-parser-9.12.2.20250421/compiler/GHC/Utils/Misc.hs0000644000000000000000000014070307346545000020242 0ustar0000000000000000-- (c) The University of Glasgow 2006 {-# LANGUAGE CPP #-} {-# LANGUAGE MagicHash #-} -- | Highly random utility functions -- module GHC.Utils.Misc ( -- * Miscellaneous higher-order functions applyWhen, nTimes, const2, -- * General list processing zipEqual, zipWithEqual, zipWith3Equal, zipWith4Equal, stretchZipWith, zipWithAndUnzip, zipAndUnzip, filterByList, filterByLists, partitionByList, unzipWith, mapFst, mapSnd, chkAppend, mapAndUnzip, mapAndUnzip3, mapAndUnzip4, filterOut, partitionWith, partitionWithM, dropWhileEndLE, spanEnd, last2, lastMaybe, onJust, List.foldl1', foldl2, count, countWhile, all2, any2, lengthExceeds, lengthIs, lengthIsNot, lengthAtLeast, lengthAtMost, lengthLessThan, listLengthCmp, atLength, equalLength, compareLength, leLength, ltLength, isSingleton, only, expectOnly, GHC.Utils.Misc.singleton, notNull, expectNonEmpty, snocView, holes, changeLast, whenNonEmpty, mergeListsBy, isSortedBy, -- Foldable generalised functions, mapMaybe', -- * Tuples fstOf3, sndOf3, thdOf3, fstOf4, sndOf4, fst3, snd3, third3, uncurry3, -- * List operations controlled by another list takeList, dropList, splitAtList, split, dropTail, capitalise, -- * Sorting sortWith, minWith, nubSort, ordNub, ordNubOn, -- * Comparisons isEqual, removeSpaces, (<&&>), (<||>), -- * Edit distance fuzzyMatch, fuzzyLookup, -- * Transitive closures transitiveClosure, -- * Strictness seqList, strictMap, strictZipWith, strictZipWith3, -- * Module names looksLikeModuleName, looksLikePackageName, -- * Integers exactLog2, -- * Floating point readRational, readSignificandExponentPair, readHexRational, readHexSignificandExponentPair, -- * IO-ish utilities doesDirNameExist, getModificationUTCTime, modificationTimeIfExists, fileHashIfExists, withAtomicRename, -- * Filenames and paths Suffix, splitLongestPrefix, escapeSpaces, Direction(..), reslash, makeRelativeTo, -- * Utils for defining Data instances abstractConstr, abstractDataType, mkNoRepType, -- * Utils for printing C code charToC, -- * Hashing hashString, -- * Call stacks HasCallStack, HasDebugCallStack, ) where import GHC.Prelude.Basic hiding ( head, init, last, tail ) import qualified GHC.Prelude.Basic as Partial ( head ) import GHC.Utils.Exception import GHC.Utils.Panic.Plain import GHC.Utils.Constants import GHC.Utils.Fingerprint import Data.Data import qualified Data.List as List import Data.List.NonEmpty ( NonEmpty(..), last, nonEmpty ) import GHC.Exts import GHC.Stack (HasCallStack) import Control.Monad ( guard ) import Control.Monad.IO.Class ( MonadIO, liftIO ) import System.IO.Error as IO ( isDoesNotExistError ) import System.Directory ( doesDirectoryExist, getModificationTime, renameFile ) import System.FilePath import Data.Bifunctor ( first, second ) import Data.Char ( isUpper, isAlphaNum, isSpace, chr, ord, isDigit, toUpper , isHexDigit, digitToInt ) import Data.Int import Data.Ratio ( (%) ) import Data.Ord ( comparing ) import Data.Word import qualified Data.IntMap as IM import qualified Data.Set as Set import Data.Time {- ************************************************************************ * * \subsection{Miscellaneous higher-order functions} * * ************************************************************************ -} -- | Apply a function iff some condition is met. applyWhen :: Bool -> (a -> a) -> a -> a applyWhen True f x = f x applyWhen _ _ x = x -- | Apply a function @n@ times to a given value. nTimes :: Int -> (a -> a) -> (a -> a) nTimes 0 _ = id nTimes 1 f = f nTimes n f = f . nTimes (n-1) f const2 :: a -> b -> c -> a const2 x _ _ = x fstOf3 :: (a,b,c) -> a sndOf3 :: (a,b,c) -> b thdOf3 :: (a,b,c) -> c fstOf3 (a,_,_) = a sndOf3 (_,b,_) = b thdOf3 (_,_,c) = c fstOf4 :: (a,b,c,d) -> a sndOf4 :: (a,b,c,d) -> b fstOf4 (a,_,_,_) = a sndOf4 (_,b,_,_) = b fst3 :: (a -> d) -> (a, b, c) -> (d, b, c) fst3 f (a, b, c) = (f a, b, c) snd3 :: (b -> d) -> (a, b, c) -> (a, d, c) snd3 f (a, b, c) = (a, f b, c) third3 :: (c -> d) -> (a, b, c) -> (a, b, d) third3 f (a, b, c) = (a, b, f c) uncurry3 :: (a -> b -> c -> d) -> (a, b, c) -> d uncurry3 f (a, b, c) = f a b c {- ************************************************************************ * * \subsection[Utils-lists]{General list processing} * * ************************************************************************ -} filterOut :: (a->Bool) -> [a] -> [a] -- ^ Like filter, only it reverses the sense of the test filterOut p = filter (not . p) partitionWith :: (a -> Either b c) -> [a] -> ([b], [c]) -- ^ Uses a function to determine which of two output lists an input element should join partitionWith _ [] = ([],[]) partitionWith f (x:xs) = case f x of Left b -> (b:bs, cs) Right c -> (bs, c:cs) where (bs,cs) = partitionWith f xs partitionWithM :: Monad m => (a -> m (Either b c)) -> [a] -> m ([b], [c]) -- ^ Monadic version of `partitionWith` partitionWithM _ [] = return ([], []) partitionWithM f (x:xs) = do y <- f x (bs, cs) <- partitionWithM f xs case y of Left b -> return (b:bs, cs) Right c -> return (bs, c:cs) {-# INLINEABLE partitionWithM #-} chkAppend :: [a] -> [a] -> [a] -- Checks for the second argument being empty -- Used in situations where that situation is common chkAppend xs ys | null ys = xs | otherwise = xs ++ ys {- A paranoid @zip@ (and some @zipWith@ friends) that checks the lists are of equal length. Alastair Reid thinks this should only happen if DEBUGging on; hey, why not? -} zipEqual :: HasDebugCallStack => String -> [a] -> [b] -> [(a,b)] zipWithEqual :: HasDebugCallStack => String -> (a->b->c) -> [a]->[b]->[c] zipWith3Equal :: HasDebugCallStack => String -> (a->b->c->d) -> [a]->[b]->[c]->[d] zipWith4Equal :: HasDebugCallStack => String -> (a->b->c->d->e) -> [a]->[b]->[c]->[d]->[e] #if !defined(DEBUG) zipEqual _ = zip zipWithEqual _ = zipWith zipWith3Equal _ = zipWith3 zipWith4Equal _ = List.zipWith4 #else zipEqual _ [] [] = [] zipEqual msg (a:as) (b:bs) = (a,b) : zipEqual msg as bs zipEqual msg _ _ = panic ("zipEqual: unequal lists: "++msg) zipWithEqual msg z (a:as) (b:bs)= z a b : zipWithEqual msg z as bs zipWithEqual _ _ [] [] = [] zipWithEqual msg _ _ _ = panic ("zipWithEqual: unequal lists: "++msg) zipWith3Equal msg z (a:as) (b:bs) (c:cs) = z a b c : zipWith3Equal msg z as bs cs zipWith3Equal _ _ [] [] [] = [] zipWith3Equal msg _ _ _ _ = panic ("zipWith3Equal: unequal lists: "++msg) zipWith4Equal msg z (a:as) (b:bs) (c:cs) (d:ds) = z a b c d : zipWith4Equal msg z as bs cs ds zipWith4Equal _ _ [] [] [] [] = [] zipWith4Equal msg _ _ _ _ _ = panic ("zipWith4Equal: unequal lists: "++msg) #endif -- | 'filterByList' takes a list of Bools and a list of some elements and -- filters out these elements for which the corresponding value in the list of -- Bools is False. This function does not check whether the lists have equal -- length. filterByList :: [Bool] -> [a] -> [a] filterByList (True:bs) (x:xs) = x : filterByList bs xs filterByList (False:bs) (_:xs) = filterByList bs xs filterByList _ _ = [] -- | 'filterByLists' takes a list of Bools and two lists as input, and -- outputs a new list consisting of elements from the last two input lists. For -- each Bool in the list, if it is 'True', then it takes an element from the -- former list. If it is 'False', it takes an element from the latter list. -- The elements taken correspond to the index of the Bool in its list. -- For example: -- -- @ -- filterByLists [True, False, True, False] \"abcd\" \"wxyz\" = \"axcz\" -- @ -- -- This function does not check whether the lists have equal length. filterByLists :: [Bool] -> [a] -> [a] -> [a] filterByLists (True:bs) (x:xs) (_:ys) = x : filterByLists bs xs ys filterByLists (False:bs) (_:xs) (y:ys) = y : filterByLists bs xs ys filterByLists _ _ _ = [] -- | 'partitionByList' takes a list of Bools and a list of some elements and -- partitions the list according to the list of Bools. Elements corresponding -- to 'True' go to the left; elements corresponding to 'False' go to the right. -- For example, @partitionByList [True, False, True] [1,2,3] == ([1,3], [2])@ -- This function does not check whether the lists have equal -- length; when one list runs out, the function stops. partitionByList :: [Bool] -> [a] -> ([a], [a]) partitionByList = go [] [] where go trues falses (True : bs) (x : xs) = go (x:trues) falses bs xs go trues falses (False : bs) (x : xs) = go trues (x:falses) bs xs go trues falses _ _ = (reverse trues, reverse falses) stretchZipWith :: (a -> Bool) -> b -> (a->b->c) -> [a] -> [b] -> [c] -- ^ @stretchZipWith p z f xs ys@ stretches @ys@ by inserting @z@ in -- the places where @p@ returns @True@ stretchZipWith _ _ _ [] _ = [] stretchZipWith p z f (x:xs) ys | p x = f x z : stretchZipWith p z f xs ys | otherwise = case ys of [] -> [] (y:ys) -> f x y : stretchZipWith p z f xs ys mapFst :: Functor f => (a->c) -> f(a,b) -> f(c,b) mapSnd :: Functor f => (b->c) -> f(a,b) -> f(a,c) mapFst = fmap . first mapSnd = fmap . second mapAndUnzip :: (a -> (b, c)) -> [a] -> ([b], [c]) mapAndUnzip _ [] = ([], []) mapAndUnzip f (x:xs) = let (r1, r2) = f x (rs1, rs2) = mapAndUnzip f xs in (r1:rs1, r2:rs2) mapAndUnzip3 :: (a -> (b, c, d)) -> [a] -> ([b], [c], [d]) mapAndUnzip3 _ [] = ([], [], []) mapAndUnzip3 f (x:xs) = let (r1, r2, r3) = f x (rs1, rs2, rs3) = mapAndUnzip3 f xs in (r1:rs1, r2:rs2, r3:rs3) mapAndUnzip4 :: (a -> (b, c, d, e)) -> [a] -> ([b], [c], [d], [e]) mapAndUnzip4 _ [] = ([], [], [], []) mapAndUnzip4 f (x:xs) = let (r1, r2, r3, r4) = f x (rs1, rs2, rs3, rs4) = mapAndUnzip4 f xs in (r1:rs1, r2:rs2, r3:rs3, r4:rs4) zipWithAndUnzip :: (a -> b -> (c,d)) -> [a] -> [b] -> ([c],[d]) zipWithAndUnzip f (a:as) (b:bs) = let (r1, r2) = f a b (rs1, rs2) = zipWithAndUnzip f as bs in (r1:rs1, r2:rs2) zipWithAndUnzip _ _ _ = ([],[]) -- | This has the effect of making the two lists have equal length by dropping -- the tail of the longer one. zipAndUnzip :: [a] -> [b] -> ([a],[b]) zipAndUnzip (a:as) (b:bs) = let (rs1, rs2) = zipAndUnzip as bs in (a:rs1, b:rs2) zipAndUnzip _ _ = ([],[]) -- | @atLength atLen atEnd ls n@ unravels list @ls@ to position @n@. Precisely: -- -- @ -- atLength atLenPred atEndPred ls n -- | n < 0 = atLenPred ls -- | length ls < n = atEndPred (n - length ls) -- | otherwise = atLenPred (drop n ls) -- @ atLength :: ([a] -> b) -- Called when length ls >= n, passed (drop n ls) -- NB: arg passed to this function may be [] -> b -- Called when length ls < n -> [a] -> Int -> b atLength atLenPred atEnd ls0 n0 | n0 < 0 = atLenPred ls0 | otherwise = go n0 ls0 where -- go's first arg n >= 0 go 0 ls = atLenPred ls go _ [] = atEnd -- n > 0 here go n (_:xs) = go (n-1) xs -- Some special cases of atLength: -- | @(lengthExceeds xs n) = (length xs > n)@ lengthExceeds :: [a] -> Int -> Bool lengthExceeds lst n | n < 0 = True | otherwise = atLength notNull False lst n -- | @(lengthAtLeast xs n) = (length xs >= n)@ lengthAtLeast :: [a] -> Int -> Bool lengthAtLeast = atLength (const True) False -- | @(lengthIs xs n) = (length xs == n)@ lengthIs :: [a] -> Int -> Bool lengthIs lst n | n < 0 = False | otherwise = atLength null False lst n -- | @(lengthIsNot xs n) = (length xs /= n)@ lengthIsNot :: [a] -> Int -> Bool lengthIsNot lst n | n < 0 = True | otherwise = atLength notNull True lst n -- | @(lengthAtMost xs n) = (length xs <= n)@ lengthAtMost :: [a] -> Int -> Bool lengthAtMost lst n | n < 0 = False | otherwise = atLength null True lst n -- | @(lengthLessThan xs n) == (length xs < n)@ lengthLessThan :: [a] -> Int -> Bool lengthLessThan = atLength (const False) True listLengthCmp :: [a] -> Int -> Ordering listLengthCmp = atLength atLen atEnd where atEnd = LT -- Not yet seen 'n' elts, so list length is < n. atLen [] = EQ atLen _ = GT equalLength :: [a] -> [b] -> Bool -- ^ True if length xs == length ys equalLength [] [] = True equalLength (_:xs) (_:ys) = equalLength xs ys equalLength _ _ = False compareLength :: [a] -> [b] -> Ordering compareLength [] [] = EQ compareLength (_:xs) (_:ys) = compareLength xs ys compareLength [] _ = LT compareLength _ [] = GT leLength :: [a] -> [b] -> Bool -- ^ True if length xs <= length ys leLength xs ys = case compareLength xs ys of LT -> True EQ -> True GT -> False ltLength :: [a] -> [b] -> Bool -- ^ True if length xs < length ys ltLength xs ys = case compareLength xs ys of LT -> True EQ -> False GT -> False ---------------------------- singleton :: a -> [a] singleton x = [x] isSingleton :: [a] -> Bool isSingleton [_] = True isSingleton _ = False notNull :: Foldable f => f a -> Bool notNull = not . null -- | Utility function to go from a singleton list to it's element. -- -- Wether or not the argument is a singleton list is only checked -- in debug builds. only :: [a] -> a #if defined(DEBUG) only [a] = a #else only (a:_) = a #endif only _ = panic "Util: only" -- | Extract the single element of a list and panic with the given message if -- there are more elements or the list was empty. -- Like 'expectJust', but for lists. expectOnly :: HasDebugCallStack => String -> [a] -> a {-# INLINE expectOnly #-} #if defined(DEBUG) expectOnly _ [a] = a #else expectOnly _ (a:_) = a #endif expectOnly msg _ = panic ("expectOnly: " ++ msg) -- | Compute all the ways of removing a single element from a list. -- -- > holes [1,2,3] = [(1, [2,3]), (2, [1,3]), (3, [1,2])] holes :: [a] -> [(a, [a])] holes [] = [] holes (x:xs) = (x, xs) : mapSnd (x:) (holes xs) -- | Replace the last element of a list with another element. changeLast :: [a] -> a -> [a] changeLast [] _ = panic "changeLast" changeLast [_] x = [x] changeLast (x:xs) x' = x : changeLast xs x' -- | Like @expectJust msg . nonEmpty@; a better alternative to 'NE.fromList'. expectNonEmpty :: HasDebugCallStack => String -> [a] -> NonEmpty a {-# INLINE expectNonEmpty #-} expectNonEmpty _ (x:xs) = x:|xs expectNonEmpty msg [] = expectNonEmptyPanic msg expectNonEmptyPanic :: String -> a expectNonEmptyPanic msg = panic ("expectNonEmpty: " ++ msg) {-# NOINLINE expectNonEmptyPanic #-} whenNonEmpty :: Applicative m => [a] -> (NonEmpty a -> m ()) -> m () whenNonEmpty [] _ = pure () whenNonEmpty (x:xs) f = f (x :| xs) -- | Merge an unsorted list of sorted lists, for example: -- -- > mergeListsBy compare [ [2,5,15], [1,10,100] ] = [1,2,5,10,15,100] -- -- \( O(n \log{} k) \) mergeListsBy :: forall a. (a -> a -> Ordering) -> [[a]] -> [a] mergeListsBy cmp lists | debugIsOn, not (all sorted lists) = -- When debugging is on, we check that the input lists are sorted. panic "mergeListsBy: input lists must be sorted" where sorted = isSortedBy cmp mergeListsBy cmp all_lists = merge_lists all_lists where -- Implements "Iterative 2-Way merge" described at -- https://en.wikipedia.org/wiki/K-way_merge_algorithm -- Merge two sorted lists into one in O(n). merge2 :: [a] -> [a] -> [a] merge2 [] ys = ys merge2 xs [] = xs merge2 (x:xs) (y:ys) = case cmp x y of GT -> y : merge2 (x:xs) ys _ -> x : merge2 xs (y:ys) -- Merge the first list with the second, the third with the fourth, and so -- on. The output has half as much lists as the input. merge_neighbours :: [[a]] -> [[a]] merge_neighbours [] = [] merge_neighbours [xs] = [xs] merge_neighbours (xs : ys : lists) = merge2 xs ys : merge_neighbours lists -- Since 'merge_neighbours' halves the amount of lists in each iteration, -- we perform O(log k) iteration. Each iteration is O(n). The total running -- time is therefore O(n log k). merge_lists :: [[a]] -> [a] merge_lists lists = case merge_neighbours lists of [] -> [] [xs] -> xs lists' -> merge_lists lists' isSortedBy :: (a -> a -> Ordering) -> [a] -> Bool isSortedBy cmp = sorted where sorted [] = True sorted [_] = True sorted (x:y:xs) = cmp x y /= GT && sorted (y:xs) {- ************************************************************************ * * \subsubsection{Sort utils} * * ************************************************************************ -} minWith :: Ord b => (a -> b) -> [a] -> a minWith get_key xs = assert (not (null xs) ) Partial.head (sortWith get_key xs) nubSort :: Ord a => [a] -> [a] nubSort = Set.toAscList . Set.fromList -- | Remove duplicates but keep elements in order. -- O(n * log n) ordNub :: Ord a => [a] -> [a] ordNub xs = ordNubOn id xs -- | Remove duplicates but keep elements in order. -- O(n * log n) ordNubOn :: Ord b => (a -> b) -> [a] -> [a] ordNubOn f xs = go Set.empty xs where go _ [] = [] go s (x:xs) | Set.member (f x) s = go s xs | otherwise = x : go (Set.insert (f x) s) xs {- ************************************************************************ * * \subsection[Utils-transitive-closure]{Transitive closure} * * ************************************************************************ This algorithm for transitive closure is straightforward, albeit quadratic. -} transitiveClosure :: (a -> [a]) -- Successor function -> (a -> a -> Bool) -- Equality predicate -> [a] -> [a] -- The transitive closure transitiveClosure succ eq xs = go [] xs where go done [] = done go done (x:xs) | x `is_in` done = go done xs | otherwise = go (x:done) (succ x ++ xs) _ `is_in` [] = False x `is_in` (y:ys) | eq x y = True | otherwise = x `is_in` ys {- ************************************************************************ * * \subsection[Utils-accum]{Accumulating} * * ************************************************************************ A combination of foldl with zip. It works with equal length lists. -} foldl2 :: (acc -> a -> b -> acc) -> acc -> [a] -> [b] -> acc foldl2 _ z [] [] = z foldl2 k z (a:as) (b:bs) = foldl2 k (k z a b) as bs foldl2 _ _ _ _ = panic "Util: foldl2" all2 :: (a -> b -> Bool) -> [a] -> [b] -> Bool -- True if the lists are the same length, and -- all corresponding elements satisfy the predicate all2 _ [] [] = True all2 p (x:xs) (y:ys) = p x y && all2 p xs ys all2 _ _ _ = False any2 :: (a -> b -> Bool) -> [a] -> [b] -> Bool -- True if any of the corresponding elements satisfy the predicate -- Unlike `all2`, this ignores excess elements of the other list any2 p (x:xs) (y:ys) = p x y || any2 p xs ys any2 _ _ _ = False -- Count the number of times a predicate is true count :: (a -> Bool) -> [a] -> Int count p = go 0 where go !n [] = n go !n (x:xs) | p x = go (n+1) xs | otherwise = go n xs countWhile :: (a -> Bool) -> [a] -> Int -- Length of an /initial prefix/ of the list satisfying p countWhile p = go 0 where go !n (x:xs) | p x = go (n+1) xs go !n _ = n {- @splitAt@, @take@, and @drop@ but with length of another list giving the break-off point: -} takeList :: [b] -> [a] -> [a] -- (takeList as bs) trims bs to the be same length -- as as, unless as is longer in which case it's a no-op takeList [] _ = [] takeList (_:xs) ls = case ls of [] -> [] (y:ys) -> y : takeList xs ys dropList :: [b] -> [a] -> [a] dropList [] xs = xs dropList _ xs@[] = xs dropList (_:xs) (_:ys) = dropList xs ys -- | Given two lists xs and ys, return `splitAt (length xs) ys`. splitAtList :: [b] -> [a] -> ([a], [a]) splitAtList xs ys = go 0# xs ys where -- we are careful to avoid allocating when there are no leftover -- arguments: in this case we can return "ys" directly (cf #18535) -- -- We make `xs` strict because in the general case `ys` isn't `[]` so we -- will have to evaluate `xs` anyway. go _ !_ [] = (ys, []) -- length ys <= length xs go n [] bs = (take (I# n) ys, bs) -- = splitAt n ys go n (_:as) (_:bs) = go (n +# 1#) as bs -- | drop from the end of a list dropTail :: Int -> [a] -> [a] -- Specification: dropTail n = reverse . drop n . reverse -- Better implementation due to Joachim Breitner -- http://www.joachim-breitner.de/blog/archives/600-On-taking-the-last-n-elements-of-a-list.html dropTail n xs = go (drop n xs) xs where go (_:ys) (x:xs) = x : go ys xs go _ _ = [] -- Stop when ys runs out -- It'll always run out before xs does -- dropWhile from the end of a list. This is similar to Data.List.dropWhileEnd, -- but is lazy in the elements and strict in the spine. For reasonably short lists, -- such as path names and typical lines of text, dropWhileEndLE is generally -- faster than dropWhileEnd. Its advantage is magnified when the predicate is -- expensive--using dropWhileEndLE isSpace to strip the space off a line of text -- is generally much faster than using dropWhileEnd isSpace for that purpose. -- Specification: dropWhileEndLE p = reverse . dropWhile p . reverse -- Pay attention to the short-circuit (&&)! The order of its arguments is the only -- difference between dropWhileEnd and dropWhileEndLE. dropWhileEndLE :: (a -> Bool) -> [a] -> [a] dropWhileEndLE p = foldr (\x r -> if null r && p x then [] else x:r) [] -- | @spanEnd p l == reverse (span p (reverse l))@. The first list -- returns actually comes after the second list (when you look at the -- input list). spanEnd :: (a -> Bool) -> [a] -> ([a], [a]) spanEnd p l = go l [] [] l where go yes _rev_yes rev_no [] = (yes, reverse rev_no) go yes rev_yes rev_no (x:xs) | p x = go yes (x : rev_yes) rev_no xs | otherwise = go xs [] (x : rev_yes ++ rev_no) xs -- | Get the last two elements in a list. {-# INLINE last2 #-} last2 :: [a] -> Maybe (a,a) last2 = uncurry (liftA2 (,)) . List.foldl' (\(_,x2) x -> (x2, Just x)) (Nothing, Nothing) lastMaybe :: [a] -> Maybe a lastMaybe [] = Nothing lastMaybe (x:xs) = Just $ last (x:|xs) -- | @onJust x m f@ applies f to the value inside the Just or returns the default. onJust :: b -> Maybe a -> (a->b) -> b onJust dflt = flip (maybe dflt) -- | Split a list into its last element and the initial part of the list. -- @snocView xs = Just (init xs, last xs)@ for non-empty lists. -- @snocView xs = Nothing@ otherwise. -- Unless both parts of the result are guaranteed to be used -- prefer separate calls to @last@ + @init@. -- If you are guaranteed to use both, this will -- be more efficient. snocView :: [a] -> Maybe ([a],a) snocView = fmap go . nonEmpty where go :: NonEmpty a -> ([a],a) go (x:|xs) = case nonEmpty xs of Nothing -> ([],x) Just xs -> case go xs of !(xs', x') -> (x:xs', x') split :: Char -> String -> [String] split c s = case rest of [] -> [chunk] _:rest -> chunk : split c rest where (chunk, rest) = break (==c) s -- | Convert a word to title case by capitalising the first letter capitalise :: String -> String capitalise [] = [] capitalise (c:cs) = toUpper c : cs {- ************************************************************************ * * \subsection[Utils-comparison]{Comparisons} * * ************************************************************************ -} isEqual :: Ordering -> Bool -- Often used in (isEqual (a `compare` b)) isEqual GT = False isEqual EQ = True isEqual LT = False removeSpaces :: String -> String removeSpaces = dropWhileEndLE isSpace . dropWhile isSpace -- Boolean operators lifted to Applicative (<&&>) :: Applicative f => f Bool -> f Bool -> f Bool (<&&>) = liftA2 (&&) infixr 3 <&&> -- same as (&&) (<||>) :: Applicative f => f Bool -> f Bool -> f Bool (<||>) = liftA2 (||) infixr 2 <||> -- same as (||) {- ************************************************************************ * * \subsection{Edit distance} * * ************************************************************************ -} -- | Find the "restricted" Damerau-Levenshtein edit distance between two strings. -- See: . -- Based on the algorithm presented in "A Bit-Vector Algorithm for Computing -- Levenshtein and Damerau Edit Distances" in PSC'02 (Heikki Hyyro). -- See http://www.cs.uta.fi/~helmu/pubs/psc02.pdf and -- http://www.cs.uta.fi/~helmu/pubs/PSCerr.html for an explanation restrictedDamerauLevenshteinDistance :: String -> String -> Int restrictedDamerauLevenshteinDistance str1 str2 = restrictedDamerauLevenshteinDistanceWithLengths m n str1 str2 where m = length str1 n = length str2 restrictedDamerauLevenshteinDistanceWithLengths :: Int -> Int -> String -> String -> Int restrictedDamerauLevenshteinDistanceWithLengths m n str1 str2 | m <= n = if n <= 32 -- n must be larger so this check is sufficient then restrictedDamerauLevenshteinDistance' (undefined :: Word32) m n str1 str2 else restrictedDamerauLevenshteinDistance' (undefined :: Integer) m n str1 str2 | otherwise = if m <= 32 -- m must be larger so this check is sufficient then restrictedDamerauLevenshteinDistance' (undefined :: Word32) n m str2 str1 else restrictedDamerauLevenshteinDistance' (undefined :: Integer) n m str2 str1 restrictedDamerauLevenshteinDistance' :: (Bits bv, Num bv) => bv -> Int -> Int -> String -> String -> Int restrictedDamerauLevenshteinDistance' _bv_dummy m n str1 str2 | [] <- str1 = n | otherwise = extractAnswer $ List.foldl' (restrictedDamerauLevenshteinDistanceWorker (matchVectors str1) top_bit_mask vector_mask) (0, 0, m_ones, 0, m) str2 where m_ones@vector_mask = (2 ^ m) - 1 top_bit_mask = (1 `shiftL` (m - 1)) `asTypeOf` _bv_dummy extractAnswer (_, _, _, _, distance) = distance restrictedDamerauLevenshteinDistanceWorker :: (Bits bv, Num bv) => IM.IntMap bv -> bv -> bv -> (bv, bv, bv, bv, Int) -> Char -> (bv, bv, bv, bv, Int) restrictedDamerauLevenshteinDistanceWorker str1_mvs top_bit_mask vector_mask (pm, d0, vp, vn, distance) char2 = seq str1_mvs $ seq top_bit_mask $ seq vector_mask $ seq pm' $ seq d0' $ seq vp' $ seq vn' $ seq distance'' $ seq char2 $ (pm', d0', vp', vn', distance'') where pm' = IM.findWithDefault 0 (ord char2) str1_mvs d0' = ((((sizedComplement vector_mask d0) .&. pm') `shiftL` 1) .&. pm) .|. ((((pm' .&. vp) + vp) .&. vector_mask) `xor` vp) .|. pm' .|. vn -- No need to mask the shiftL because of the restricted range of pm hp' = vn .|. sizedComplement vector_mask (d0' .|. vp) hn' = d0' .&. vp hp'_shift = ((hp' `shiftL` 1) .|. 1) .&. vector_mask hn'_shift = (hn' `shiftL` 1) .&. vector_mask vp' = hn'_shift .|. sizedComplement vector_mask (d0' .|. hp'_shift) vn' = d0' .&. hp'_shift distance' = if hp' .&. top_bit_mask /= 0 then distance + 1 else distance distance'' = if hn' .&. top_bit_mask /= 0 then distance' - 1 else distance' sizedComplement :: Bits bv => bv -> bv -> bv sizedComplement vector_mask vect = vector_mask `xor` vect matchVectors :: (Bits bv, Num bv) => String -> IM.IntMap bv matchVectors = snd . List.foldl' go (0 :: Int, IM.empty) where go (ix, im) char = let ix' = ix + 1 im' = IM.insertWith (.|.) (ord char) (2 ^ ix) im in seq ix' $ seq im' $ (ix', im') {-# SPECIALIZE INLINE restrictedDamerauLevenshteinDistance' :: Word32 -> Int -> Int -> String -> String -> Int #-} {-# SPECIALIZE INLINE restrictedDamerauLevenshteinDistance' :: Integer -> Int -> Int -> String -> String -> Int #-} {-# SPECIALIZE restrictedDamerauLevenshteinDistanceWorker :: IM.IntMap Word32 -> Word32 -> Word32 -> (Word32, Word32, Word32, Word32, Int) -> Char -> (Word32, Word32, Word32, Word32, Int) #-} {-# SPECIALIZE restrictedDamerauLevenshteinDistanceWorker :: IM.IntMap Integer -> Integer -> Integer -> (Integer, Integer, Integer, Integer, Int) -> Char -> (Integer, Integer, Integer, Integer, Int) #-} {-# SPECIALIZE INLINE sizedComplement :: Word32 -> Word32 -> Word32 #-} {-# SPECIALIZE INLINE sizedComplement :: Integer -> Integer -> Integer #-} {-# SPECIALIZE matchVectors :: String -> IM.IntMap Word32 #-} {-# SPECIALIZE matchVectors :: String -> IM.IntMap Integer #-} fuzzyMatch :: String -> [String] -> [String] fuzzyMatch key vals = fuzzyLookup key [(v,v) | v <- vals] -- | Search for possible matches to the users input in the given list, -- returning a small number of ranked results fuzzyLookup :: String -> [(String,a)] -> [a] fuzzyLookup user_entered possibilities = map fst $ take mAX_RESULTS $ List.sortBy (comparing snd) [ (poss_val, sort_key) | (poss_str, poss_val) <- possibilities , let distance = restrictedDamerauLevenshteinDistance poss_str user_entered , distance <= fuzzy_threshold , let sort_key = (distance, length poss_str, poss_str) ] where -- Work out an appropriate match threshold: -- We report a candidate if its edit distance is <= the threshold, -- The threshold is set to about a quarter of the # of characters the user entered -- Length Threshold -- 1 0 -- Don't suggest *any* candidates -- 2 1 -- for single-char identifiers -- 3 1 -- 4 1 -- 5 1 -- 6 2 -- -- Candidates with the same distance are sorted by their length. We also -- use the actual string as the third sorting criteria the sort key to get -- deterministic output, even if the input may have depended on the uniques -- in question fuzzy_threshold = truncate $ fromIntegral (length user_entered + 2) / (4 :: Rational) mAX_RESULTS = 3 {- ************************************************************************ * * \subsection[Utils-pairs]{Pairs} * * ************************************************************************ -} unzipWith :: (a -> b -> c) -> [(a, b)] -> [c] unzipWith = fmap . uncurry seqList :: [a] -> b -> b seqList [] b = b seqList (x:xs) b = x `seq` seqList xs b strictMap :: (a -> b) -> [a] -> [b] strictMap _ [] = [] strictMap f (x:xs) = let !x' = f x !xs' = strictMap f xs in x' : xs' strictZipWith :: (a -> b -> c) -> [a] -> [b] -> [c] strictZipWith _ [] _ = [] strictZipWith _ _ [] = [] strictZipWith f (x:xs) (y:ys) = let !x' = f x y !xs' = strictZipWith f xs ys in x' : xs' strictZipWith3 :: (a -> b -> c -> d) -> [a] -> [b] -> [c] -> [d] strictZipWith3 _ [] _ _ = [] strictZipWith3 _ _ [] _ = [] strictZipWith3 _ _ _ [] = [] strictZipWith3 f (x:xs) (y:ys) (z:zs) = let !x' = f x y z !xs' = strictZipWith3 f xs ys zs in x' : xs' -- Module names: looksLikeModuleName :: String -> Bool looksLikeModuleName [] = False looksLikeModuleName (c:cs) = isUpper c && go cs where go [] = True go ('.':cs) = looksLikeModuleName cs go (c:cs) = (isAlphaNum c || c == '_' || c == '\'') && go cs -- Similar to 'parse' for Distribution.Package.PackageName, -- but we don't want to depend on Cabal. looksLikePackageName :: String -> Bool looksLikePackageName = all (all isAlphaNum <&&> not . (all isDigit)) . split '-' ----------------------------------------------------------------------------- -- Integers -- | Determine the $\log_2$ of exact powers of 2 exactLog2 :: Integer -> Maybe Integer exactLog2 x | x <= 0 = Nothing | x > fromIntegral (maxBound :: Int32) = Nothing | x' .&. (-x') /= x' = Nothing | otherwise = Just (fromIntegral c) where x' = fromIntegral x :: Int32 c = countTrailingZeros x' {- -- ----------------------------------------------------------------------------- -- Floats -} readRational__ :: ReadS Rational -- NB: doesn't handle leading "-" readRational__ r = do ((i, e), t) <- readSignificandExponentPair__ r return ((i%1)*10^^e, t) readRational :: String -> Rational -- NB: *does* handle a leading "-" readRational top_s = case top_s of '-' : xs -> negate (read_me xs) xs -> read_me xs where read_me s = case (do { (x,"") <- readRational__ s ; return x }) of [x] -> x [] -> error ("readRational: no parse:" ++ top_s) _ -> error ("readRational: ambiguous parse:" ++ top_s) readSignificandExponentPair__ :: ReadS (Integer, Integer) -- NB: doesn't handle leading "-" readSignificandExponentPair__ r = do (n,d,s) <- readFix r (k,t) <- readExp s let pair = (n, toInteger (k - d)) return (pair, t) where readFix r = do (ds,s) <- lexDecDigits r (ds',t) <- lexDotDigits s return (read (ds++ds'), length ds', t) readExp (e:s) | e `elem` "eE" = readExp' s readExp s = return (0,s) readExp' ('+':s) = readDec s readExp' ('-':s) = do (k,t) <- readDec s return (-k,t) readExp' s = readDec s readDec s = do (ds,r) <- nonnull isDigit s return (foldl1 (\n d -> n * 10 + d) [ ord d - ord '0' | d <- ds ], r) lexDecDigits = nonnull isDigit lexDotDigits ('.':s) = return (span' isDigit s) lexDotDigits s = return ("",s) nonnull p s = do (cs@(_:_),t) <- return (span' p s) return (cs,t) span' _ xs@[] = (xs, xs) span' p xs@(x:xs') | x == '_' = span' p xs' -- skip "_" (#14473) | p x = let (ys,zs) = span' p xs' in (x:ys,zs) | otherwise = ([],xs) -- | Parse a string into a significand and exponent. -- A trivial example might be: -- ghci> readSignificandExponentPair "1E2" -- (1,2) -- In a more complex case we might return a exponent different than that -- which the user wrote. This is needed in order to use a Integer significand. -- ghci> readSignificandExponentPair "-1.11E5" -- (-111,3) readSignificandExponentPair :: String -> (Integer, Integer) -- NB: *does* handle a leading "-" readSignificandExponentPair top_s = case top_s of '-' : xs -> let (i, e) = read_me xs in (-i, e) xs -> read_me xs where read_me s = case (do { (x,"") <- readSignificandExponentPair__ s ; return x }) of [x] -> x [] -> error ("readSignificandExponentPair: no parse:" ++ top_s) _ -> error ("readSignificandExponentPair: ambiguous parse:" ++ top_s) readHexRational :: String -> Rational readHexRational str = case str of '-' : xs -> negate (readMe xs) xs -> readMe xs where readMe as = case readHexRational__ as of Just n -> n _ -> error ("readHexRational: no parse:" ++ str) readHexRational__ :: String -> Maybe Rational readHexRational__ ('0' : x : rest) | x == 'X' || x == 'x' = do let (front,rest2) = span' isHexDigit rest guard (not (null front)) let frontNum = steps 16 0 front case rest2 of '.' : rest3 -> do let (back,rest4) = span' isHexDigit rest3 guard (not (null back)) let backNum = steps 16 frontNum back exp1 = -4 * length back case rest4 of p : ps | isExp p -> fmap (mk backNum . (+ exp1)) (getExp ps) _ -> return (mk backNum exp1) p : ps | isExp p -> fmap (mk frontNum) (getExp ps) _ -> Nothing where isExp p = p == 'p' || p == 'P' getExp ('+' : ds) = dec ds getExp ('-' : ds) = fmap negate (dec ds) getExp ds = dec ds mk :: Integer -> Int -> Rational mk n e = fromInteger n * 2^^e dec cs = case span' isDigit cs of (ds,"") | not (null ds) -> Just (steps 10 0 ds) _ -> Nothing steps base n ds = List.foldl' (step base) n ds step base n d = base * n + fromIntegral (digitToInt d) span' _ xs@[] = (xs, xs) span' p xs@(x:xs') | x == '_' = span' p xs' -- skip "_" (#14473) | p x = let (ys,zs) = span' p xs' in (x:ys,zs) | otherwise = ([],xs) readHexRational__ _ = Nothing -- | Parse a string into a significand and exponent according to -- the "Hexadecimal Floats in Haskell" proposal. -- A trivial example might be: -- ghci> readHexSignificandExponentPair "0x1p+1" -- (1,1) -- Behaves similar to readSignificandExponentPair but the base is 16 -- and numbers are given in hexadecimal: -- ghci> readHexSignificandExponentPair "0xAp-4" -- (10,-4) -- ghci> readHexSignificandExponentPair "0x1.2p3" -- (18,-1) readHexSignificandExponentPair :: String -> (Integer, Integer) readHexSignificandExponentPair str = case str of '-' : xs -> let (i, e) = readMe xs in (-i, e) xs -> readMe xs where readMe as = case readHexSignificandExponentPair__ as of Just n -> n _ -> error ("readHexSignificandExponentPair: no parse:" ++ str) readHexSignificandExponentPair__ :: String -> Maybe (Integer, Integer) readHexSignificandExponentPair__ ('0' : x : rest) | x == 'X' || x == 'x' = do let (front,rest2) = span' isHexDigit rest guard (not (null front)) let frontNum = steps 16 0 front case rest2 of '.' : rest3 -> do let (back,rest4) = span' isHexDigit rest3 guard (not (null back)) let backNum = steps 16 frontNum back exp1 = -4 * length back case rest4 of p : ps | isExp p -> fmap (mk backNum . (+ exp1)) (getExp ps) _ -> return (mk backNum exp1) p : ps | isExp p -> fmap (mk frontNum) (getExp ps) _ -> Nothing where isExp p = p == 'p' || p == 'P' getExp ('+' : ds) = dec ds getExp ('-' : ds) = fmap negate (dec ds) getExp ds = dec ds mk :: Integer -> Int -> (Integer, Integer) mk n e = (n, fromIntegral e) dec cs = case span' isDigit cs of (ds,"") | not (null ds) -> Just (steps 10 0 ds) _ -> Nothing steps base n ds = foldl' (step base) n ds step base n d = base * n + fromIntegral (digitToInt d) span' _ xs@[] = (xs, xs) span' p xs@(x:xs') | x == '_' = span' p xs' -- skip "_" (#14473) | p x = let (ys,zs) = span' p xs' in (x:ys,zs) | otherwise = ([],xs) readHexSignificandExponentPair__ _ = Nothing ----------------------------------------------------------------------------- -- Verify that the 'dirname' portion of a FilePath exists. -- doesDirNameExist :: FilePath -> IO Bool doesDirNameExist fpath = doesDirectoryExist (takeDirectory fpath) ----------------------------------------------------------------------------- -- Backwards compatibility definition of getModificationTime getModificationUTCTime :: FilePath -> IO UTCTime getModificationUTCTime = getModificationTime -- -------------------------------------------------------------- -- check existence & modification time at the same time modificationTimeIfExists :: FilePath -> IO (Maybe UTCTime) modificationTimeIfExists f = (do t <- getModificationUTCTime f; return (Just t)) `catchIO` \e -> if isDoesNotExistError e then return Nothing else ioError e -- -------------------------------------------------------------- -- check existence & hash at the same time fileHashIfExists :: FilePath -> IO (Maybe Fingerprint) fileHashIfExists f = (do t <- getFileHash f; return (Just t)) `catchIO` \e -> if isDoesNotExistError e then return Nothing else ioError e -- -------------------------------------------------------------- -- atomic file writing by writing to a temporary file first (see #14533) -- -- This should be used in all cases where GHC writes files to disk -- and uses their modification time to skip work later, -- as otherwise a partially written file (e.g. due to crash or Ctrl+C) -- also results in a skip. withAtomicRename :: (MonadIO m) => FilePath -> (FilePath -> m a) -> m a withAtomicRename targetFile f = do -- The temp file must be on the same file system (mount) as the target file -- to result in an atomic move on most platforms. -- The standard way to ensure that is to place it into the same directory. -- This can still be fooled when somebody mounts a different file system -- at just the right time, but that is not a case we aim to cover here. let temp = targetFile <.> "tmp" res <- f temp liftIO $ renameFile temp targetFile return res -- -------------------------------------------------------------- -- split a string at the last character where 'pred' is True, -- returning a pair of strings. The first component holds the string -- up (but not including) the last character for which 'pred' returned -- True, the second whatever comes after (but also not including the -- last character). -- -- If 'pred' returns False for all characters in the string, the original -- string is returned in the first component (and the second one is just -- empty). splitLongestPrefix :: String -> (Char -> Bool) -> (String,String) splitLongestPrefix str pred = case r_pre of [] -> (str, []) _:r_pre' -> (reverse r_pre', reverse r_suf) -- 'tail' drops the char satisfying 'pred' where (r_suf, r_pre) = break pred (reverse str) escapeSpaces :: String -> String escapeSpaces = foldr (\c s -> if isSpace c then '\\':c:s else c:s) "" type Suffix = String -------------------------------------------------------------- -- * Search path -------------------------------------------------------------- data Direction = Forwards | Backwards reslash :: Direction -> FilePath -> FilePath reslash d = f where f ('/' : xs) = slash : f xs f ('\\' : xs) = slash : f xs f (x : xs) = x : f xs f "" = "" slash = case d of Forwards -> '/' Backwards -> '\\' makeRelativeTo :: FilePath -> FilePath -> FilePath this `makeRelativeTo` that = directory thisFilename where (thisDirectory, thisFilename) = splitFileName this thatDirectory = dropFileName that directory = joinPath $ f (splitPath thisDirectory) (splitPath thatDirectory) f (x : xs) (y : ys) | x == y = f xs ys f xs ys = replicate (length ys) ".." ++ xs {- ************************************************************************ * * \subsection[Utils-Data]{Utils for defining Data instances} * * ************************************************************************ These functions helps us to define Data instances for abstract types. -} abstractConstr :: String -> Constr abstractConstr n = mkConstr (abstractDataType n) ("{abstract:"++n++"}") [] Prefix abstractDataType :: String -> DataType abstractDataType n = mkDataType n [abstractConstr n] {- ************************************************************************ * * \subsection[Utils-C]{Utils for printing C code} * * ************************************************************************ -} charToC :: Word8 -> String charToC w = case chr (fromIntegral w) of '\"' -> "\\\"" '\'' -> "\\\'" '\\' -> "\\\\" c | c >= ' ' && c <= '~' -> [c] | otherwise -> ['\\', chr (ord '0' + ord c `div` 64), chr (ord '0' + ord c `div` 8 `mod` 8), chr (ord '0' + ord c `mod` 8)] {- ************************************************************************ * * \subsection[Utils-Hashing]{Utils for hashing} * * ************************************************************************ -} -- | A sample hash function for Strings. We keep multiplying by the -- golden ratio and adding. The implementation is: -- -- > hashString = foldl' f golden -- > where f m c = fromIntegral (ord c) * magic + hashInt32 m -- > magic = 0xdeadbeef -- -- Where hashInt32 works just as hashInt shown above. -- -- Knuth argues that repeated multiplication by the golden ratio -- will minimize gaps in the hash space, and thus it's a good choice -- for combining together multiple keys to form one. -- -- Here we know that individual characters c are often small, and this -- produces frequent collisions if we use ord c alone. A -- particular problem are the shorter low ASCII and ISO-8859-1 -- character strings. We pre-multiply by a magic twiddle factor to -- obtain a good distribution. In fact, given the following test: -- -- > testp :: Int32 -> Int -- > testp k = (n - ) . length . group . sort . map hs . take n $ ls -- > where ls = [] : [c : l | l <- ls, c <- ['\0'..'\xff']] -- > hs = foldl' f golden -- > f m c = fromIntegral (ord c) * k + hashInt32 m -- > n = 100000 -- -- We discover that testp magic = 0. hashString :: String -> Int32 hashString = foldl' f golden where f m c = fromIntegral (ord c) * magic + hashInt32 m magic = fromIntegral (0xdeadbeef :: Word32) golden :: Int32 golden = 1013904242 -- = round ((sqrt 5 - 1) * 2^32) :: Int32 -- was -1640531527 = round ((sqrt 5 - 1) * 2^31) :: Int32 -- but that has bad mulHi properties (even adding 2^32 to get its inverse) -- Whereas the above works well and contains no hash duplications for -- [-32767..65536] -- | A sample (and useful) hash function for Int32, -- implemented by extracting the uppermost 32 bits of the 64-bit -- result of multiplying by a 33-bit constant. The constant is from -- Knuth, derived from the golden ratio: -- -- > golden = round ((sqrt 5 - 1) * 2^32) -- -- We get good key uniqueness on small inputs -- (a problem with previous versions): -- (length $ group $ sort $ map hashInt32 [-32767..65536]) == 65536 + 32768 -- hashInt32 :: Int32 -> Int32 hashInt32 x = mulHi x golden + x -- hi 32 bits of a x-bit * 32 bit -> 64-bit multiply mulHi :: Int32 -> Int32 -> Int32 mulHi a b = fromIntegral (r `shiftR` 32) where r :: Int64 r = fromIntegral a * fromIntegral b -- | A call stack constraint, but only when 'isDebugOn'. #if defined(DEBUG) type HasDebugCallStack = HasCallStack #else type HasDebugCallStack = (() :: Constraint) #endif mapMaybe' :: Foldable f => (a -> Maybe b) -> f a -> [b] mapMaybe' f = foldr g [] where g x rest | Just y <- f x = y : rest | otherwise = rest ghc-lib-parser-9.12.2.20250421/compiler/GHC/Utils/Monad.hs0000644000000000000000000004135107346545000020404 0ustar0000000000000000{-# LANGUAGE MonadComprehensions #-} -- | Utilities related to Monad and Applicative classes -- Mostly for backwards compatibility. module GHC.Utils.Monad ( Applicative(..) , (<$>) , MonadFix(..) , MonadIO(..) , zipWith3M, zipWith3M_, zipWith4M, zipWithAndUnzipM , zipWith3MNE , mapAndUnzipM, mapAndUnzip3M, mapAndUnzip4M, mapAndUnzip5M , mapAccumLM , mapSndM , concatMapM , mapMaybeM , anyM, allM, orM , foldlM, foldlM_, foldrM, foldMapM , whenM, unlessM , filterOutM , partitionM ) where ------------------------------------------------------------------------------- -- Imports ------------------------------------------------------------------------------- import GHC.Prelude import Control.Monad import Control.Monad.Fix import Control.Monad.IO.Class import Control.Monad.Trans.State.Strict (StateT (..)) import Data.Foldable (sequenceA_, foldlM, foldrM) import Data.List (unzip4, unzip5, zipWith4) import Data.List.NonEmpty (NonEmpty (..)) import Data.Monoid (Ap (Ap, getAp)) import Data.Tuple (swap) ------------------------------------------------------------------------------- -- Common functions -- These are used throughout the compiler ------------------------------------------------------------------------------- {- Note [Inline @zipWithNM@ functions] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The inline principle for 'zipWith3M', 'zipWith4M' and 'zipWith3M_' is the same as for 'zipWithM' and 'zipWithM_' in "Control.Monad", see Note [Fusion for zipN/zipWithN] in GHC/List.hs for more details. The 'zipWithM'/'zipWithM_' functions are inlined so that the `zipWith` and `sequenceA` functions with which they are defined have an opportunity to fuse. Furthermore, 'zipWith3M'/'zipWith4M' and 'zipWith3M_' have been explicitly rewritten in a non-recursive way similarly to 'zipWithM'/'zipWithM_', and for more than just uniformity: after [D5241](https://phabricator.haskell.org/D5241) for issue #14037, all @zipN@/@zipWithN@ functions fuse, meaning 'zipWith3M'/'zipWIth4M' and 'zipWith3M_'@ now behave like 'zipWithM' and 'zipWithM_', respectively, with regards to fusion. As such, since there are not any differences between 2-ary 'zipWithM'/ 'zipWithM_' and their n-ary counterparts below aside from the number of arguments, the `INLINE` pragma should be replicated in the @zipWithNM@ functions below as well. -} zipWith3M :: Monad m => (a -> b -> c -> m d) -> [a] -> [b] -> [c] -> m [d] {-# INLINE zipWith3M #-} -- Inline so that fusion with 'zipWith3' and 'sequenceA' has a chance to fire. -- See Note [Inline @zipWithNM@ functions] above. zipWith3M f xs ys zs = sequenceA (zipWith3 f xs ys zs) zipWith3M_ :: Monad m => (a -> b -> c -> m d) -> [a] -> [b] -> [c] -> m () {-# INLINE zipWith3M_ #-} -- Inline so that fusion with 'zipWith4' and 'sequenceA' has a chance to fire. -- See Note [Inline @zipWithNM@ functions] above. zipWith3M_ f xs ys zs = sequenceA_ (zipWith3 f xs ys zs) zipWith4M :: Monad m => (a -> b -> c -> d -> m e) -> [a] -> [b] -> [c] -> [d] -> m [e] {-# INLINE zipWith4M #-} -- Inline so that fusion with 'zipWith5' and 'sequenceA' has a chance to fire. -- See Note [Inline @zipWithNM@ functions] above. zipWith4M f xs ys ws zs = sequenceA (zipWith4 f xs ys ws zs) zipWithAndUnzipM :: Monad m => (a -> b -> m (c, d)) -> [a] -> [b] -> m ([c], [d]) {-# INLINABLE zipWithAndUnzipM #-} -- this allows specialization to a given monad zipWithAndUnzipM f (x:xs) (y:ys) = do { (c, d) <- f x y ; (cs, ds) <- zipWithAndUnzipM f xs ys ; return (c:cs, d:ds) } zipWithAndUnzipM _ _ _ = return ([], []) -- | 'zipWith3M' for 'NonEmpty' lists. zipWith3MNE :: Monad m => (a -> b -> c -> m d) -> NonEmpty a -> NonEmpty b -> NonEmpty c -> m (NonEmpty d) zipWith3MNE f ~(x :| xs) ~(y :| ys) ~(z :| zs) = do { w <- f x y z ; ws <- zipWith3M f xs ys zs ; return $ w :| ws } {- Note [Inline @mapAndUnzipNM@ functions] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The inline principle is the same as 'mapAndUnzipM' in "Control.Monad". The 'mapAndUnzipM' function is inlined so that the `unzip` and `traverse` functions with which it is defined have an opportunity to fuse, see Note [Inline @unzipN@ functions] in Data/OldList.hs for more details. Furthermore, the @mapAndUnzipNM@ functions have been explicitly rewritten in a non-recursive way similarly to 'mapAndUnzipM', and for more than just uniformity: after [D5249](https://phabricator.haskell.org/D5249) for Trac ticket #14037, all @unzipN@ functions fuse, meaning 'mapAndUnzip3M', 'mapAndUnzip4M' and 'mapAndUnzip5M' now behave like 'mapAndUnzipM' with regards to fusion. As such, since there are not any differences between 2-ary 'mapAndUnzipM' and its n-ary counterparts below aside from the number of arguments, the `INLINE` pragma should be replicated in the @mapAndUnzipNM@ functions below as well. -} -- | mapAndUnzipM for triples mapAndUnzip3M :: Monad m => (a -> m (b,c,d)) -> [a] -> m ([b],[c],[d]) {-# INLINE mapAndUnzip3M #-} -- Inline so that fusion with 'unzip3' and 'traverse' has a chance to fire. -- See Note [Inline @mapAndUnzipNM@ functions] above. mapAndUnzip3M f xs = unzip3 <$> traverse f xs mapAndUnzip4M :: Monad m => (a -> m (b,c,d,e)) -> [a] -> m ([b],[c],[d],[e]) {-# INLINE mapAndUnzip4M #-} -- Inline so that fusion with 'unzip4' and 'traverse' has a chance to fire. -- See Note [Inline @mapAndUnzipNM@ functions] above. mapAndUnzip4M f xs = unzip4 <$> traverse f xs mapAndUnzip5M :: Monad m => (a -> m (b,c,d,e,f)) -> [a] -> m ([b],[c],[d],[e],[f]) {-# INLINE mapAndUnzip5M #-} -- Inline so that fusion with 'unzip5' and 'traverse' has a chance to fire. -- See Note [Inline @mapAndUnzipNM@ functions] above. mapAndUnzip5M f xs = unzip5 <$> traverse f xs -- TODO: mapAccumLM is used in many places. Surely most of -- these don't actually want to be lazy. We should add a strict -- variant and use it where appropriate. -- | Monadic version of mapAccumL mapAccumLM :: (Monad m, Traversable t) => (acc -> x -> m (acc, y)) -- ^ combining function -> acc -- ^ initial state -> t x -- ^ inputs -> m (acc, t y) -- ^ final state, outputs {-# INLINE [1] mapAccumLM #-} -- INLINE pragma. mapAccumLM is called in inner loops. Like 'map', -- we inline it so that we can take advantage of knowing 'f'. -- This makes a few percent difference (in compiler allocations) -- when compiling perf/compiler/T9675 mapAccumLM f s = fmap swap . flip runStateT s . traverse f' where f' = StateT . (fmap . fmap) swap . flip f {-# RULES "mapAccumLM/List" mapAccumLM = mapAccumLM_List #-} {-# RULES "mapAccumLM/NonEmpty" mapAccumLM = mapAccumLM_NonEmpty #-} mapAccumLM_List :: Monad m => (acc -> x -> m (acc, y)) -> acc -> [x] -> m (acc, [y]) {-# INLINE mapAccumLM_List #-} mapAccumLM_List f s = go s where go s (x:xs) = do (s1, x') <- f s x (s2, xs') <- go s1 xs return (s2, x' : xs') go s [] = return (s, []) mapAccumLM_NonEmpty :: Monad m => (acc -> x -> m (acc, y)) -> acc -> NonEmpty x -> m (acc, NonEmpty y) {-# INLINE mapAccumLM_NonEmpty #-} mapAccumLM_NonEmpty f s (x:|xs) = [(s2, x':|xs') | (s1, x') <- f s x, (s2, xs') <- mapAccumLM_List f s1 xs] -- | Monadic version of mapSnd mapSndM :: (Applicative m, Traversable f) => (b -> m c) -> f (a,b) -> m (f (a,c)) mapSndM = traverse . traverse -- | Monadic version of concatMap concatMapM :: (Monad m, Traversable f) => (a -> m [b]) -> f a -> m [b] concatMapM f xs = liftM concat (mapM f xs) {-# INLINE concatMapM #-} -- It's better to inline to inline this than to specialise -- concatMapM :: (Monad m) => (a -> m [b]) -> [a] -> m [b] -- Inlining cuts compiler allocation by around 1% -- | Applicative version of mapMaybe mapMaybeM :: Applicative m => (a -> m (Maybe b)) -> [a] -> m [b] mapMaybeM f = foldr g (pure []) where g a = liftA2 (maybe id (:)) (f a) -- | Monadic version of 'any', aborts the computation at the first @True@ value anyM :: (Monad m, Foldable f) => (a -> m Bool) -> f a -> m Bool anyM f = foldr (orM . f) (pure False) -- | Monad version of 'all', aborts the computation at the first @False@ value allM :: (Monad m, Foldable f) => (a -> m Bool) -> f a -> m Bool allM f = foldr (andM . f) (pure True) -- | Monadic version of or orM :: Monad m => m Bool -> m Bool -> m Bool orM m1 m2 = m1 >>= \x -> if x then return True else m2 -- | Monadic version of and andM :: Monad m => m Bool -> m Bool -> m Bool andM m1 m2 = m1 >>= \x -> if x then m2 else return False -- | Monadic version of foldl that discards its result foldlM_ :: (Monad m, Foldable t) => (a -> b -> m a) -> a -> t b -> m () foldlM_ = foldM_ -- | Monadic version of 'foldMap' foldMapM :: (Applicative m, Foldable t, Monoid b) => (a -> m b) -> t a -> m b foldMapM f = getAp <$> foldMap (Ap . f) -- | Monadic version of @when@, taking the condition in the monad whenM :: Monad m => m Bool -> m () -> m () whenM mb thing = do { b <- mb ; when b thing } -- | Monadic version of @unless@, taking the condition in the monad unlessM :: Monad m => m Bool -> m () -> m () unlessM condM acc = do { cond <- condM ; unless cond acc } -- | Like 'filterM', only it reverses the sense of the test. filterOutM :: (Applicative m) => (a -> m Bool) -> [a] -> m [a] filterOutM p = foldr (\ x -> liftA2 (\ flg -> if flg then id else (x:)) (p x)) (pure []) -- | Monadic version of @partition@ partitionM :: Monad m => (a -> m Bool) -> [a] -> m ([a], [a]) partitionM _ [] = pure ([], []) partitionM f (x:xs) = do res <- f x (as,bs) <- partitionM f xs pure ([x | res]++as, [x | not res]++bs) {- Note [The one-shot state monad trick] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Summary: many places in GHC use a state monad, and we really want those functions to be eta-expanded (#18202). The problem ~~~~~~~~~~~ Consider newtype M a = MkM (State -> (State, a)) instance Monad M where mf >>= k = MkM (\s -> case mf of MkM f -> case f s of (s',r) -> case k r of MkM g -> g s') fooM :: Int -> M Int fooM x = g y >>= \r -> h r where y = expensive x Now suppose you say (repeat 20 (fooM 4)), where repeat :: Int -> M Int -> M Int performs its argument n times. You would expect (expensive 4) to be evaluated only once, not 20 times. So foo should have arity 1 (not 2); it should look like this (modulo casts) fooM x = let y = expensive x in \s -> case g y of ... But creating and then repeating, a monadic computation is rare. If you /aren't/ re-using (M a) value, it's /much/ more efficient to make foo have arity 2, thus: fooM x s = case g (expensive x) of ... Why more efficient? Because now foo takes its argument both at once, rather than one at a time, creating a heap-allocated function closure. See https://www.joachim-breitner.de/blog/763-Faster_Winter_5__Eta-Expanding_ReaderT for a very good explanation of the issue which led to these optimisations into GHC. The trick ~~~~~~~~~ With state monads like M the general case is that we *aren't* reusing (M a) values so it is much more efficient to avoid allocating a function closure for them. So the state monad trick is a way to keep the monadic syntax but to make GHC eta-expand functions like `fooM`. To do that we use the "oneShot" magic function. Here is the trick: * Define a "smart constructor" mkM :: (State -> (State,a)) -> M a mkM f = MkM (oneShot m) * Never call MkM directly, as a constructor. Instead, always call mkM. And that's it! The magic 'oneShot' function does this transformation: oneShot (\s. e) ==> \s{os}. e which pins a one-shot flag {os} onto the binder 's'. That tells GHC that it can assume the lambda is called only once, and thus can freely float computations in and out of the lambda. To be concrete, let's see what happens to fooM: fooM = \x. g (expensive x) >>= \r -> h r = \x. let mf = g (expensive x) k = \r -> h r in MkM (oneShot (\s -> case mf of MkM' f -> case f s of (s',r) -> case k r of MkM' g -> g s')) -- The MkM' are just newtype casts nt_co = \x. let mf = g (expensive x) k = \r -> h r in (\s{os}. case (mf |> nt_co) s of (s',r) -> (k r) |> nt_co s') |> sym nt_co -- Crucial step: float let-bindings into that \s{os} = \x. (\s{os}. case (g (expensive x) |> nt_co) s of (s',r) -> h r |> nt_co s') |> sym nt_co and voila! fooM has arity 2. The trick is very similar to the built-in "state hack" (see Note [The state-transformer hack] in "GHC.Core.Opt.Arity") but is applicable on a monad-by-monad basis under programmer control. Using pattern synonyms ~~~~~~~~~~~~~~~~~~~~~~ Using a smart constructor is fine, but there is no way to check that we have found *all* uses, especially if the uses escape a single module. A neat (but more sophisticated) alternative is to use pattern synonyms: -- We rename the existing constructor. newtype M a = MkM' (State -> (State, a)) -- The pattern has the old constructor name. pattern MkM f <- MkM' f where MkM f = MkM' (oneShot f) Now we can simply grep to check that there are no uses of MkM' /anywhere/, to guarantee that we have not missed any. (Using the smart constructor alone we still need the data constructor in patterns.) That's the advantage of the pattern-synonym approach, but it is more elaborate. The pattern synonym approach is due to Sebastian Graf (#18238) Do note that for monads for multiple arguments more than one oneShot function might be required. For example in FCode we use: newtype FCode a = FCode' { doFCode :: StgToCmmConfig -> CgState -> (a, CgState) } pattern FCode :: (StgToCmmConfig -> CgState -> (a, CgState)) -> FCode a pattern FCode m <- FCode' m where FCode m = FCode' $ oneShot (\cgInfoDown -> oneShot (\state ->m cgInfoDown state)) Note [INLINE pragmas and (>>)] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A nasty gotcha is described in #20008. In brief, be careful if you get (>>) via its default method: instance Applicative M where pure a = MkM (\s -> (s, a)) (<*>) = ap instance Monad UM where {-# INLINE (>>=) #-} m >>= k = MkM (\s -> blah) Here we define (>>), via its default method, in terms of (>>=). If you do this, be sure to put an INLINE pragma on (>>=), as above. That tells it to inline (>>=) in the RHS of (>>), even when it is applied to only two arguments, which in turn conveys the one-shot info from (>>=) to (>>). Lacking the INLINE, GHC may eta-expand (>>), and with a non-one-shot lambda. #20008 has more discussion. Derived instances ~~~~~~~~~~~~~~~~~ One caveat of both approaches is that derived instances don't use the smart constructor /or/ the pattern synonym. So they won't benefit from the automatic insertion of "oneShot". data M a = MkM' (State -> (State,a)) deriving (Functor) <-- Functor implementation will use MkM'! Conclusion: don't use 'derviving' in these cases. Multi-shot actions (cf #18238) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Sometimes we really *do* want computations to be shared! Remember our example (repeat 20 (fooM 4)). See Note [multiShotIO] in GHC.Types.Unique.Supply We can force fooM to have arity 1 using multiShot: fooM :: Int -> M Int fooM x = multiShotM (g y >>= \r -> h r) where y = expensive x multiShotM :: M a -> M a {-# INLINE multiShotM #-} multiShotM (MkM m) = MkM (\s -> inline m s) -- Really uses the data constructor, -- not the smart constructor! Now we can see how fooM optimises (ignoring casts) multiShotM (g y >>= \r -> h r) ==> {inline (>>=)} multiShotM (\s{os}. case g y s of ...) ==> {inline multiShotM} let m = \s{os}. case g y s of ... in \s. inline m s ==> {inline m} \s. (\s{os}. case g y s of ...) s ==> \s. case g y s of ... and voila! the one-shot flag has gone. It's possible that y has been replaced by (expensive x), but full laziness should pull it back out. (This part seems less robust.) The magic `inline` function does two things * It prevents eta reduction. If we wrote just multiShotIO (IO m) = IO (\s -> m s) the lambda would eta-reduce to 'm' and all would be lost. * It helps ensure that 'm' really does inline. Note that 'inline' evaporates in phase 0. See Note [inlineId magic] in GHC.Core.Opt.ConstantFold.match_inline. The INLINE pragma on multiShotM is very important, else the 'inline' call will evaporate when compiling the module that defines 'multiShotM', before it is ever exported. -} ghc-lib-parser-9.12.2.20250421/compiler/GHC/Utils/Monad/State/0000755000000000000000000000000007346545000021124 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Utils/Monad/State/Strict.hs0000644000000000000000000001064707346545000022740 0ustar0000000000000000{-# LANGUAGE UnboxedTuples #-} {-# LANGUAGE PatternSynonyms #-} -- | A state monad which is strict in its state. module GHC.Utils.Monad.State.Strict ( -- * The State monad State(State, State' {- for deriving via purposes only -}) , state , evalState , execState , runState -- * Operations , get , gets , put , modify ) where import GHC.Prelude import GHC.Exts (oneShot) {- Note [Strict State monad] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A State monad can be strict in many ways. Which kind of strictness do we mean? First of, since we represent the result pair as an unboxed pair, this State monad is strict in the sense of "Control.Monad.Trans.State.Strict": The computations and the sequencing there-of (through 'Applicative and 'Monad' instances) are forced strictly. Beyond the manual unboxing of one level (which CPR could achieve similarly, yet perhaps a bit less reliably), our 'State' is even stricter than the transformers version: It's also strict in the state `s` (but still lazy in the value `a`). What this means is that whenever callers examine the state component (perhaps through 'runState'), they will find that the `s` has already been evaluated. This additional strictness maintained in a single place, by the ubiquitous 'State' pattern synonym, by forcing the state component *after* any state action has been run. The INVARIANT is: > Any `s` that makes it into the unboxed pair representation is evaluated. This invariant has another nice effect: Because the evaluatedness is quite apparent, Nested CPR will try to unbox the state component `s` nestedly if feasible. Detecting evaluatedness of nested components is a necessary condition for Nested CPR to trigger; see the user's guide entry on that: https://ghc.gitlab.haskell.org/ghc/doc/users_guide/using-optimisation.html#ghc-flag--fcpr-anal Note that this doesn't have any effects on whether Nested CPR will unbox the `a` component (which is still lazy by default). The user still has to use the `return $!` idiom from the user's guide to encourage Nested CPR to unbox the `a` result of a stateful computation. -} -- | A state monad which is strict in the state `s`, but lazy in the value `a`. -- -- See Note [Strict State monad] for the particular notion of strictness and -- implementation details. newtype State s a = State' { runState' :: s -> (# a, s #) } pattern State :: (s -> (# a, s #)) -> State s a -- This pattern synonym makes the monad eta-expand, -- which as a very beneficial effect on compiler performance -- See #18202. -- See Note [The one-shot state monad trick] in GHC.Utils.Monad -- It also implements the particular notion of strictness of this monad; -- see Note [Strict State monad]. pattern State m <- State' m where State m = State' (oneShot $ \s -> forceState (m s)) -- | Forces the state component of the unboxed representation pair of 'State'. -- See Note [Strict State monad]. This is The Place doing the forcing! forceState :: (# a, s #) -> (# a, s #) forceState (# a, !s #) = (# a, s #) -- See Note [The one-shot state monad trick] for why we don't derive this. instance Functor (State s) where fmap f m = State $ \s -> case runState' m s of (# x, s' #) -> (# f x, s' #) {-# INLINE fmap #-} instance Applicative (State s) where pure x = State $ \s -> (# x, s #) m <*> n = State $ \s -> case runState' m s of { (# f, s' #) -> case runState' n s' of { (# x, s'' #) -> (# f x, s'' #) }} m *> n = State $ \s -> case runState' m s of { (# _, s' #) -> case runState' n s' of { (# x, s'' #) -> (# x, s'' #) }} {-# INLINE pure #-} {-# INLINE (<*>) #-} {-# INLINE (*>) #-} instance Monad (State s) where m >>= n = State $ \s -> case runState' m s of (# r, !s' #) -> runState' (n r) s' (>>) = (*>) {-# INLINE (>>=) #-} {-# INLINE (>>) #-} state :: (s -> (a, s)) -> State s a state f = State $ \s -> case f s of (r, s') -> (# r, s' #) get :: State s s get = State $ \s -> (# s, s #) gets :: (s -> a) -> State s a gets f = State $ \s -> (# f s, s #) put :: s -> State s () put s' = State $ \_ -> (# (), s' #) modify :: (s -> s) -> State s () modify f = State $ \s -> (# (), f s #) evalState :: State s a -> s -> a evalState s i = case runState' s i of (# a, _ #) -> a execState :: State s a -> s -> s execState s i = case runState' s i of (# _, s' #) -> s' runState :: State s a -> s -> (a, s) runState s i = case runState' s i of (# a, !s' #) -> (a, s') ghc-lib-parser-9.12.2.20250421/compiler/GHC/Utils/Outputable.hs0000644000000000000000000021655607346545000021505 0ustar0000000000000000{-# LANGUAGE EmptyCase #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE PatternSynonyms #-} {-# LANGUAGE StandaloneDeriving #-} {-# LANGUAGE DerivingStrategies #-} {-# LANGUAGE GeneralizedNewtypeDeriving #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE MultiParamTypeClasses #-} {-# LANGUAGE TypeFamilyDependencies #-} {-# LANGUAGE FlexibleContexts #-} {- (c) The University of Glasgow 2006-2012 (c) The GRASP Project, Glasgow University, 1992-1998 -} -- | This module defines classes and functions for pretty-printing. It also -- exports a number of helpful debugging and other utilities such as 'trace' and 'panic'. -- -- The interface to this module is very similar to the standard Hughes-PJ pretty printing -- module, except that it exports a number of additional functions that are rarely used, -- and works over the 'SDoc' type. module GHC.Utils.Outputable ( -- * Type classes Outputable(..), OutputableBndr(..), OutputableP(..), BindingSite(..), JoinPointHood(..), isJoinPoint, IsOutput(..), IsLine(..), IsDoc(..), HLine, HDoc, -- * Pretty printing combinators SDoc, runSDoc, PDoc(..), docToSDoc, interppSP, interpp'SP, interpp'SP', pprQuotedList, pprWithCommas, pprWithSemis, unquotedListWith, quotedListWithOr, quotedListWithNor, quotedListWithAnd, pprWithBars, spaceIfSingleQuote, isEmpty, nest, ptext, int, intWithCommas, integer, word64, word, float, double, rational, doublePrec, parens, cparen, brackets, braces, quotes, quote, quoteIfPunsEnabled, doubleQuotes, angleBrackets, semi, comma, colon, dcolon, space, equals, dot, vbar, arrow, lollipop, larrow, darrow, arrowt, larrowt, arrowtt, larrowtt, lambda, lparen, rparen, lbrack, rbrack, lbrace, rbrace, underscore, blankLine, forAllLit, bullet, ($+$), cat, fcat, hang, hangNotEmpty, punctuate, punctuateFinal, ppWhen, ppUnless, ppWhenOption, ppUnlessOption, speakNth, speakN, speakNOf, plural, singular, isOrAre, doOrDoes, itsOrTheir, thisOrThese, hasOrHave, itOrThey, unicodeSyntax, coloured, keyword, -- * Converting 'SDoc' into strings and outputting it printSDoc, printSDocLn, bufLeftRenderSDoc, pprCode, showSDocOneLine, showSDocUnsafe, showPprUnsafe, renderWithContext, pprDebugAndThen, pprInfixVar, pprPrefixVar, pprHsChar, pprHsString, pprHsBytes, primFloatSuffix, primCharSuffix, primDoubleSuffix, primInt8Suffix, primWord8Suffix, primInt16Suffix, primWord16Suffix, primInt32Suffix, primWord32Suffix, primInt64Suffix, primWord64Suffix, primIntSuffix, primWordSuffix, pprPrimChar, pprPrimInt, pprPrimWord, pprPrimInt8, pprPrimWord8, pprPrimInt16, pprPrimWord16, pprPrimInt32, pprPrimWord32, pprPrimInt64, pprPrimWord64, pprFastFilePath, pprFilePathString, pprModuleName, -- * Controlling the style in which output is printed PprStyle(..), NamePprCtx(..), QueryQualifyName, QueryQualifyModule, QueryQualifyPackage, QueryPromotionTick, PromotedItem(..), IsEmptyOrSingleton(..), isListEmptyOrSingleton, PromotionTickContext(..), reallyAlwaysQualify, reallyAlwaysQualifyNames, alwaysQualify, alwaysQualifyNames, alwaysQualifyModules, neverQualify, neverQualifyNames, neverQualifyModules, alwaysQualifyPackages, neverQualifyPackages, alwaysPrintPromTick, QualifyName(..), queryQual, sdocOption, updSDocContext, SDocContext (..), sdocWithContext, defaultSDocContext, traceSDocContext, getPprStyle, withPprStyle, setStyleColoured, pprDeeper, pprDeeperList, pprSetDepth, codeStyle, userStyle, dumpStyle, qualName, qualModule, qualPackage, promTick, mkErrStyle, defaultErrStyle, defaultDumpStyle, mkDumpStyle, defaultUserStyle, mkUserStyle, cmdlineParserStyle, Depth(..), withUserStyle, withErrStyle, ifPprDebug, whenPprDebug, getPprDebug, bPutHDoc ) where import Language.Haskell.Syntax.Module.Name ( ModuleName(..) ) import GHC.Prelude.Basic import {-# SOURCE #-} GHC.Unit.Types ( Unit, Module, moduleName ) import {-# SOURCE #-} GHC.Types.Name.Occurrence( OccName ) import GHC.Utils.BufHandle (BufHandle, bPutChar, bPutStr, bPutFS, bPutFZS) import GHC.Data.FastString import qualified GHC.Utils.Ppr as Pretty import qualified GHC.Utils.Ppr.Colour as Col import GHC.Utils.Ppr ( Doc, Mode(..) ) import GHC.Utils.Panic.Plain (assert) import GHC.Serialized import GHC.LanguageExtensions (Extension) import GHC.Utils.GlobalVars( unsafeHasPprDebug ) import GHC.Utils.Misc (lastMaybe, snocView) import Data.ByteString (ByteString) import qualified Data.ByteString as BS import Data.Char import qualified Data.Map as M import Data.Int import qualified Data.IntMap as IM import Data.Set (Set) import qualified Data.Set as Set import qualified Data.IntSet as IntSet import qualified GHC.Data.Word64Set as Word64Set import Data.String import Data.Word import System.IO ( Handle ) import System.FilePath import Text.Printf import Numeric (showFFloat) import Data.Graph (SCC(..)) import Data.List (intersperse) import Data.List.NonEmpty (NonEmpty (..)) import Data.Semigroup (Arg(..)) import qualified Data.List.NonEmpty as NEL import Data.Time ( UTCTime ) import Data.Time.Format.ISO8601 import Data.Void import Control.DeepSeq (NFData(rnf)) import GHC.Fingerprint import GHC.Show ( showMultiLineString ) import GHC.Utils.Exception import GHC.Exts (oneShot) {- ************************************************************************ * * \subsection{The @PprStyle@ data type} * * ************************************************************************ -} data PprStyle = PprUser NamePprCtx Depth Coloured -- Pretty-print in a way that will make sense to the -- ordinary user; must be very close to Haskell -- syntax, etc. -- Assumes printing tidied code: non-system names are -- printed without uniques. | PprDump NamePprCtx -- For -ddump-foo; less verbose than in ppr-debug mode, but more than PprUser -- Does not assume tidied code: non-external names -- are printed with uniques. | PprCode -- ^ Print code; either C or assembler data Depth = AllTheWay | PartWay Int -- ^ 0 => stop | DefaultDepth -- ^ Use 'sdocDefaultDepth' field as depth data Coloured = Uncoloured | Coloured -- ----------------------------------------------------------------------------- -- Printing original names -- | When printing code that contains original names, we need to map the -- original names back to something the user understands. This is the -- purpose of the triple of functions that gets passed around -- when rendering 'SDoc'. data NamePprCtx = QueryQualify { queryQualifyName :: QueryQualifyName, queryQualifyModule :: QueryQualifyModule, queryQualifyPackage :: QueryQualifyPackage, queryPromotionTick :: QueryPromotionTick } -- | Given a `Name`'s `Module` and `OccName`, decide whether and how to qualify -- it. type QueryQualifyName = Module -> OccName -> QualifyName -- | For a given module, we need to know whether to print it with -- a package name to disambiguate it. type QueryQualifyModule = Module -> Bool -- | For a given package, we need to know whether to print it with -- the component id to disambiguate it. type QueryQualifyPackage = Unit -> Bool -- | Given a promoted data constructor, -- decide whether to print a tick to disambiguate the namespace. type QueryPromotionTick = PromotedItem -> Bool -- | Flags that affect whether a promotion tick is printed. data PromotionTickContext = PromTickCtx { ptcListTuplePuns :: !Bool, ptcPrintRedundantPromTicks :: !Bool } data PromotedItem = PromotedItemListSyntax IsEmptyOrSingleton -- '[x] | PromotedItemTupleSyntax -- '(x, y) | PromotedItemDataCon OccName -- 'MkT newtype IsEmptyOrSingleton = IsEmptyOrSingleton Bool isListEmptyOrSingleton :: [a] -> IsEmptyOrSingleton isListEmptyOrSingleton xs = IsEmptyOrSingleton $ case xs of [] -> True [_] -> True _ -> False -- See Note [Printing original names] in GHC.Types.Name.Ppr data QualifyName -- Given P:M.T = NameUnqual -- It's in scope unqualified as "T" -- OR nothing called "T" is in scope | NameQual ModuleName -- It's in scope qualified as "X.T" | NameNotInScope1 -- It's not in scope at all, but M.T is not bound -- in the current scope, so we can refer to it as "M.T" | NameNotInScope2 -- It's not in scope at all, and M.T is already bound in -- the current scope, so we must refer to it as "P:M.T" instance Outputable QualifyName where ppr NameUnqual = text "NameUnqual" ppr (NameQual _mod) = text "NameQual" -- can't print the mod without module loops :( ppr NameNotInScope1 = text "NameNotInScope1" ppr NameNotInScope2 = text "NameNotInScope2" reallyAlwaysQualifyNames :: QueryQualifyName reallyAlwaysQualifyNames _ _ = NameNotInScope2 -- | NB: This won't ever show package IDs alwaysQualifyNames :: QueryQualifyName alwaysQualifyNames m _ = NameQual (moduleName m) neverQualifyNames :: QueryQualifyName neverQualifyNames _ _ = NameUnqual alwaysQualifyModules :: QueryQualifyModule alwaysQualifyModules _ = True neverQualifyModules :: QueryQualifyModule neverQualifyModules _ = False alwaysQualifyPackages :: QueryQualifyPackage alwaysQualifyPackages _ = True neverQualifyPackages :: QueryQualifyPackage neverQualifyPackages _ = False alwaysPrintPromTick :: QueryPromotionTick alwaysPrintPromTick _ = True reallyAlwaysQualify, alwaysQualify, neverQualify :: NamePprCtx reallyAlwaysQualify = QueryQualify reallyAlwaysQualifyNames alwaysQualifyModules alwaysQualifyPackages alwaysPrintPromTick alwaysQualify = QueryQualify alwaysQualifyNames alwaysQualifyModules alwaysQualifyPackages alwaysPrintPromTick neverQualify = QueryQualify neverQualifyNames neverQualifyModules neverQualifyPackages alwaysPrintPromTick defaultUserStyle :: PprStyle defaultUserStyle = mkUserStyle neverQualify AllTheWay defaultDumpStyle :: PprStyle -- Print without qualifiers to reduce verbosity, unless -dppr-debug defaultDumpStyle = PprDump neverQualify mkDumpStyle :: NamePprCtx -> PprStyle mkDumpStyle name_ppr_ctx = PprDump name_ppr_ctx -- | Default style for error messages, when we don't know NamePprCtx -- It's a bit of a hack because it doesn't take into account what's in scope -- Only used for desugarer warnings, and typechecker errors in interface sigs defaultErrStyle :: PprStyle defaultErrStyle = mkErrStyle neverQualify -- | Style for printing error messages mkErrStyle :: NamePprCtx -> PprStyle mkErrStyle name_ppr_ctx = mkUserStyle name_ppr_ctx DefaultDepth cmdlineParserStyle :: PprStyle cmdlineParserStyle = mkUserStyle alwaysQualify AllTheWay mkUserStyle :: NamePprCtx -> Depth -> PprStyle mkUserStyle name_ppr_ctx depth = PprUser name_ppr_ctx depth Uncoloured withUserStyle :: NamePprCtx -> Depth -> SDoc -> SDoc withUserStyle name_ppr_ctx depth doc = withPprStyle (PprUser name_ppr_ctx depth Uncoloured) doc withErrStyle :: NamePprCtx -> SDoc -> SDoc withErrStyle name_ppr_ctx doc = withPprStyle (mkErrStyle name_ppr_ctx) doc setStyleColoured :: Bool -> PprStyle -> PprStyle setStyleColoured col style = case style of PprUser q d _ -> PprUser q d c _ -> style where c | col = Coloured | otherwise = Uncoloured instance Outputable PprStyle where ppr (PprUser {}) = text "user-style" ppr (PprCode {}) = text "code-style" ppr (PprDump {}) = text "dump-style" {- Orthogonal to the above printing styles are (possibly) some command-line flags that affect printing (often carried with the style). The most likely ones are variations on how much type info is shown. The following test decides whether or not we are actually generating code (either C or assembly), or generating interface files. ************************************************************************ * * \subsection{The @SDoc@ data type} * * ************************************************************************ -} -- | Represents a pretty-printable document. -- -- To display an 'SDoc', use 'printSDoc', 'printSDocLn', 'bufLeftRenderSDoc', -- or 'renderWithContext'. Avoid calling 'runSDoc' directly as it breaks the -- abstraction layer. newtype SDoc = SDoc' (SDocContext -> Doc) -- See Note [The one-shot state monad trick] in GHC.Utils.Monad {-# COMPLETE SDoc #-} pattern SDoc :: (SDocContext -> Doc) -> SDoc pattern SDoc m <- SDoc' m where SDoc m = SDoc' (oneShot m) runSDoc :: SDoc -> (SDocContext -> Doc) runSDoc (SDoc m) = m data SDocContext = SDC { sdocStyle :: !PprStyle , sdocColScheme :: !Col.Scheme , sdocLastColour :: !Col.PprColour -- ^ The most recently used colour. -- This allows nesting colours. , sdocShouldUseColor :: !Bool , sdocDefaultDepth :: !Int , sdocLineLength :: !Int , sdocCanUseUnicode :: !Bool -- ^ True if Unicode encoding is supported -- and not disabled by GHC_NO_UNICODE environment variable , sdocPrintErrIndexLinks :: !Bool , sdocHexWordLiterals :: !Bool , sdocPprDebug :: !Bool , sdocPrintUnicodeSyntax :: !Bool , sdocPrintCaseAsLet :: !Bool , sdocPrintTypecheckerElaboration :: !Bool , sdocPrintAxiomIncomps :: !Bool , sdocPrintExplicitKinds :: !Bool , sdocPrintExplicitCoercions :: !Bool , sdocPrintExplicitRuntimeReps :: !Bool , sdocPrintExplicitForalls :: !Bool , sdocPrintPotentialInstances :: !Bool , sdocPrintEqualityRelations :: !Bool , sdocSuppressTicks :: !Bool , sdocSuppressTypeSignatures :: !Bool , sdocSuppressTypeApplications :: !Bool , sdocSuppressIdInfo :: !Bool , sdocSuppressCoercions :: !Bool , sdocSuppressCoercionTypes :: !Bool , sdocSuppressUnfoldings :: !Bool , sdocSuppressVarKinds :: !Bool , sdocSuppressUniques :: !Bool , sdocSuppressModulePrefixes :: !Bool , sdocSuppressStgExts :: !Bool , sdocSuppressStgReps :: !Bool , sdocErrorSpans :: !Bool , sdocStarIsType :: !Bool , sdocLinearTypes :: !Bool , sdocListTuplePuns :: !Bool , sdocPrintTypeAbbreviations :: !Bool , sdocUnitIdForUser :: !(FastString -> SDoc) -- ^ Used to map UnitIds to more friendly "package-version:component" -- strings while pretty-printing. -- -- Use `GHC.Unit.State.pprWithUnitState` to set it. Users should never -- have to set it to pretty-print SDocs emitted by GHC, otherwise it's a -- bug. It's an internal field used to thread the UnitState so that the -- Outputable instance of UnitId can use it. -- -- See Note [Pretty-printing UnitId] in "GHC.Unit" for more details. -- -- Note that we use `FastString` instead of `UnitId` to avoid boring -- module inter-dependency issues. } instance IsString SDoc where fromString = text -- The lazy programmer's friend. instance Outputable SDoc where ppr = id -- | Default pretty-printing options defaultSDocContext :: SDocContext defaultSDocContext = SDC { sdocStyle = defaultDumpStyle , sdocColScheme = Col.defaultScheme , sdocLastColour = Col.colReset , sdocShouldUseColor = False , sdocDefaultDepth = 5 , sdocLineLength = 100 , sdocCanUseUnicode = False , sdocPrintErrIndexLinks = False , sdocHexWordLiterals = False , sdocPprDebug = False , sdocPrintUnicodeSyntax = False , sdocPrintCaseAsLet = False , sdocPrintTypecheckerElaboration = False , sdocPrintAxiomIncomps = False , sdocPrintExplicitKinds = False , sdocPrintExplicitCoercions = False , sdocPrintExplicitRuntimeReps = False , sdocPrintExplicitForalls = False , sdocPrintPotentialInstances = False , sdocPrintEqualityRelations = False , sdocSuppressTicks = False , sdocSuppressTypeSignatures = False , sdocSuppressTypeApplications = False , sdocSuppressIdInfo = False , sdocSuppressCoercions = False , sdocSuppressCoercionTypes = False , sdocSuppressUnfoldings = False , sdocSuppressVarKinds = False , sdocSuppressUniques = False , sdocSuppressModulePrefixes = False , sdocSuppressStgExts = False , sdocSuppressStgReps = True , sdocErrorSpans = False , sdocStarIsType = False , sdocLinearTypes = False , sdocListTuplePuns = True , sdocPrintTypeAbbreviations = True , sdocUnitIdForUser = ftext } traceSDocContext :: SDocContext -- Used for pprTrace, when we want to see lots of info traceSDocContext = defaultSDocContext { sdocPprDebug = unsafeHasPprDebug , sdocPrintTypecheckerElaboration = True , sdocPrintExplicitKinds = True , sdocPrintExplicitCoercions = True , sdocPrintExplicitRuntimeReps = True , sdocPrintExplicitForalls = True , sdocPrintEqualityRelations = True } withPprStyle :: PprStyle -> SDoc -> SDoc {-# INLINE CONLIKE withPprStyle #-} withPprStyle sty d = SDoc $ \ctxt -> runSDoc d ctxt{sdocStyle=sty} pprDeeper :: SDoc -> SDoc pprDeeper d = SDoc $ \ctx -> case sdocStyle ctx of PprUser q depth c -> let deeper 0 = Pretty.text "..." deeper n = runSDoc d ctx{sdocStyle = PprUser q (PartWay (n-1)) c} in case depth of DefaultDepth -> deeper (sdocDefaultDepth ctx) PartWay n -> deeper n AllTheWay -> runSDoc d ctx _ -> runSDoc d ctx -- | Truncate a list that is longer than the current depth. pprDeeperList :: ([SDoc] -> SDoc) -> [SDoc] -> SDoc pprDeeperList f ds | null ds = f [] | otherwise = SDoc work where work ctx@SDC{sdocStyle=PprUser q depth c} | DefaultDepth <- depth = work (ctx { sdocStyle = PprUser q (PartWay (sdocDefaultDepth ctx)) c }) | PartWay 0 <- depth = Pretty.text "..." | PartWay n <- depth = let go _ [] = [] go i (d:ds) | i >= n = [text "...."] | otherwise = d : go (i+1) ds in runSDoc (f (go 0 ds)) ctx{sdocStyle = PprUser q (PartWay (n-1)) c} work other_ctx = runSDoc (f ds) other_ctx pprSetDepth :: Depth -> SDoc -> SDoc pprSetDepth depth doc = SDoc $ \ctx -> case ctx of SDC{sdocStyle=PprUser q _ c} -> runSDoc doc ctx{sdocStyle = PprUser q depth c} _ -> runSDoc doc ctx getPprStyle :: (PprStyle -> SDoc) -> SDoc {-# INLINE CONLIKE getPprStyle #-} getPprStyle df = SDoc $ \ctx -> runSDoc (df (sdocStyle ctx)) ctx sdocWithContext :: (SDocContext -> SDoc) -> SDoc {-# INLINE CONLIKE sdocWithContext #-} sdocWithContext f = SDoc $ \ctx -> runSDoc (f ctx) ctx sdocOption :: (SDocContext -> a) -> (a -> SDoc) -> SDoc {-# INLINE CONLIKE sdocOption #-} sdocOption f g = sdocWithContext (g . f) updSDocContext :: (SDocContext -> SDocContext) -> SDoc -> SDoc {-# INLINE CONLIKE updSDocContext #-} updSDocContext upd doc = SDoc $ \ctx -> runSDoc doc (upd ctx) qualName :: PprStyle -> QueryQualifyName qualName (PprUser q _ _) mod occ = queryQualifyName q mod occ qualName (PprDump q) mod occ = queryQualifyName q mod occ qualName _other mod _ = NameQual (moduleName mod) qualModule :: PprStyle -> QueryQualifyModule qualModule (PprUser q _ _) m = queryQualifyModule q m qualModule (PprDump q) m = queryQualifyModule q m qualModule _other _m = True qualPackage :: PprStyle -> QueryQualifyPackage qualPackage (PprUser q _ _) m = queryQualifyPackage q m qualPackage (PprDump q) m = queryQualifyPackage q m qualPackage _other _m = True promTick :: PprStyle -> QueryPromotionTick promTick (PprUser q _ _) occ = queryPromotionTick q occ promTick (PprDump q) occ = queryPromotionTick q occ promTick _ _ = True queryQual :: PprStyle -> NamePprCtx queryQual s = QueryQualify (qualName s) (qualModule s) (qualPackage s) (promTick s) codeStyle :: PprStyle -> Bool codeStyle PprCode = True codeStyle _ = False dumpStyle :: PprStyle -> Bool dumpStyle (PprDump {}) = True dumpStyle _other = False userStyle :: PprStyle -> Bool userStyle (PprUser {}) = True userStyle _other = False -- | Indicate if -dppr-debug mode is enabled getPprDebug :: IsOutput doc => (Bool -> doc) -> doc {-# INLINE CONLIKE getPprDebug #-} getPprDebug d = docWithContext $ \ctx -> d (sdocPprDebug ctx) -- | Says what to do with and without -dppr-debug ifPprDebug :: IsOutput doc => doc -> doc -> doc {-# INLINE CONLIKE ifPprDebug #-} ifPprDebug yes no = getPprDebug $ \dbg -> if dbg then yes else no -- | Says what to do with -dppr-debug; without, return empty whenPprDebug :: IsOutput doc => doc -> doc -- Empty for non-debug style {-# INLINE CONLIKE whenPprDebug #-} whenPprDebug d = ifPprDebug d empty -- | The analog of 'Pretty.printDoc_' for 'SDoc', which tries to make sure the -- terminal doesn't get screwed up by the ANSI color codes if an exception -- is thrown during pretty-printing. printSDoc :: SDocContext -> Mode -> Handle -> SDoc -> IO () printSDoc ctx mode handle doc = Pretty.printDoc_ mode cols handle (runSDoc doc ctx) `finally` Pretty.printDoc_ mode cols handle (runSDoc (coloured Col.colReset empty) ctx) where cols = sdocLineLength ctx -- | Like 'printSDoc' but appends an extra newline. printSDocLn :: SDocContext -> Mode -> Handle -> SDoc -> IO () printSDocLn ctx mode handle doc = printSDoc ctx mode handle (doc $$ text "") -- | An efficient variant of 'printSDoc' specialized for 'LeftMode' that -- outputs to a 'BufHandle'. bufLeftRenderSDoc :: SDocContext -> BufHandle -> SDoc -> IO () bufLeftRenderSDoc ctx bufHandle doc = Pretty.bufLeftRender bufHandle (runSDoc doc ctx) pprCode :: SDoc -> SDoc {-# INLINE CONLIKE pprCode #-} pprCode d = withPprStyle PprCode d renderWithContext :: SDocContext -> SDoc -> String renderWithContext ctx sdoc = let s = Pretty.style{ Pretty.mode = PageMode False, Pretty.lineLength = sdocLineLength ctx } in Pretty.renderStyle s $ runSDoc sdoc ctx -- This shows an SDoc, but on one line only. It's cheaper than a full -- showSDoc, designed for when we're getting results like "Foo.bar" -- and "foo{uniq strictness}" so we don't want fancy layout anyway. showSDocOneLine :: SDocContext -> SDoc -> String showSDocOneLine ctx d = let s = Pretty.style{ Pretty.mode = OneLineMode, Pretty.lineLength = sdocLineLength ctx } in Pretty.renderStyle s $ runSDoc d ctx showSDocUnsafe :: SDoc -> String showSDocUnsafe sdoc = renderWithContext defaultSDocContext sdoc showPprUnsafe :: Outputable a => a -> String showPprUnsafe a = renderWithContext defaultSDocContext (ppr a) pprDebugAndThen :: SDocContext -> (String -> a) -> SDoc -> SDoc -> a pprDebugAndThen ctx cont heading pretty_msg = cont (renderWithContext ctx doc) where doc = withPprStyle defaultDumpStyle (sep [heading, nest 2 pretty_msg]) isEmpty :: SDocContext -> SDoc -> Bool isEmpty ctx sdoc = Pretty.isEmpty $ runSDoc sdoc (ctx {sdocPprDebug = True}) docToSDoc :: Doc -> SDoc docToSDoc d = SDoc (\_ -> d) ptext :: PtrString -> SDoc int :: IsLine doc => Int -> doc integer :: IsLine doc => Integer -> doc word :: Integer -> SDoc word64 :: IsLine doc => Word64 -> doc float :: IsLine doc => Float -> doc double :: IsLine doc => Double -> doc rational :: Rational -> SDoc {-# INLINE CONLIKE ptext #-} ptext s = docToSDoc $ Pretty.ptext s {-# INLINE CONLIKE int #-} int n = text $ show n {-# INLINE CONLIKE integer #-} integer n = text $ show n {-# INLINE CONLIKE float #-} float n = text $ show n {-# INLINE CONLIKE double #-} double n = text $ show n {-# INLINE CONLIKE rational #-} rational n = text $ show n -- See Note [Print Hexadecimal Literals] in GHC.Utils.Ppr {-# INLINE CONLIKE word64 #-} word64 n = text $ show n {-# INLINE CONLIKE word #-} word n = sdocOption sdocHexWordLiterals $ \case True -> docToSDoc $ Pretty.hex n False -> docToSDoc $ Pretty.integer n -- | @doublePrec p n@ shows a floating point number @n@ with @p@ -- digits of precision after the decimal point. doublePrec :: Int -> Double -> SDoc doublePrec p n = text (showFFloat (Just p) n "") quotes, quote :: SDoc -> SDoc parens, brackets, braces, doubleQuotes, angleBrackets :: IsLine doc => doc -> doc {-# INLINE CONLIKE parens #-} parens d = char '(' <> d <> char ')' {-# INLINE CONLIKE braces #-} braces d = char '{' <> d <> char '}' {-# INLINE CONLIKE brackets #-} brackets d = char '[' <> d <> char ']' {-# INLINE CONLIKE quote #-} quote d = SDoc $ Pretty.quote . runSDoc d {-# INLINE CONLIKE doubleQuotes #-} doubleQuotes d = char '"' <> d <> char '"' {-# INLINE CONLIKE angleBrackets #-} angleBrackets d = char '<' <> d <> char '>' cparen :: Bool -> SDoc -> SDoc {-# INLINE CONLIKE cparen #-} cparen b d = SDoc $ Pretty.maybeParens b . runSDoc d quoteIfPunsEnabled :: SDoc -> SDoc quoteIfPunsEnabled doc = sdocOption sdocListTuplePuns $ \case True -> quote doc False -> doc -- 'quotes' encloses something in single quotes... -- but it omits them if the thing begins or ends in a single quote -- so that we don't get `foo''. Instead we just have foo'. quotes d = sdocOption sdocCanUseUnicode $ \case True -> char '‘' <> d <> char '’' False -> SDoc $ \sty -> let pp_d = runSDoc d sty str = show pp_d in case str of [] -> Pretty.quotes pp_d '\'' : _ -> pp_d _ | Just '\'' <- lastMaybe str -> pp_d | otherwise -> Pretty.quotes pp_d blankLine, dcolon, arrow, lollipop, larrow, darrow, arrowt, larrowt, arrowtt, larrowtt, lambda :: SDoc blankLine = docToSDoc Pretty.emptyText dcolon = unicodeSyntax (char '∷') (text "::") arrow = unicodeSyntax (char '→') (text "->") lollipop = unicodeSyntax (char '⊸') (text "%1 ->") larrow = unicodeSyntax (char '←') (text "<-") darrow = unicodeSyntax (char '⇒') (text "=>") arrowt = unicodeSyntax (char '⤚') (text ">-") larrowt = unicodeSyntax (char '⤙') (text "-<") arrowtt = unicodeSyntax (char '⤜') (text ">>-") larrowtt = unicodeSyntax (char '⤛') (text "-<<") lambda = unicodeSyntax (char 'λ') (char '\\') semi, comma, colon, equals, space, underscore, dot, vbar :: IsLine doc => doc lparen, rparen, lbrack, rbrack, lbrace, rbrace :: IsLine doc => doc semi = char ';' comma = char ',' colon = char ':' equals = char '=' space = char ' ' underscore = char '_' dot = char '.' vbar = char '|' lparen = char '(' rparen = char ')' lbrack = char '[' rbrack = char ']' lbrace = char '{' rbrace = char '}' forAllLit :: SDoc forAllLit = unicodeSyntax (char '∀') (text "forall") bullet :: SDoc bullet = unicode (char '•') (char '*') unicodeSyntax :: SDoc -> SDoc -> SDoc unicodeSyntax unicode plain = sdocOption sdocCanUseUnicode $ \can_use_unicode -> sdocOption sdocPrintUnicodeSyntax $ \print_unicode_syntax -> if can_use_unicode && print_unicode_syntax then unicode else plain unicode :: SDoc -> SDoc -> SDoc unicode unicode plain = sdocOption sdocCanUseUnicode $ \case True -> unicode False -> plain nest :: Int -> SDoc -> SDoc -- ^ Indent 'SDoc' some specified amount ($+$) :: SDoc -> SDoc -> SDoc -- ^ Join two 'SDoc' together vertically {-# INLINE CONLIKE nest #-} nest n d = SDoc $ Pretty.nest n . runSDoc d {-# INLINE CONLIKE ($+$) #-} ($+$) d1 d2 = SDoc $ \ctx -> (Pretty.$+$) (runSDoc d1 ctx) (runSDoc d2 ctx) cat :: [SDoc] -> SDoc -- ^ A paragraph-fill combinator. It's much like sep, only it -- keeps fitting things on one line until it can't fit any more. fcat :: [SDoc] -> SDoc -- ^ This behaves like 'fsep', but it uses '<>' for horizontal composition rather than '<+>' -- Inline all those wrappers to help ensure we create lists of Doc, not of SDoc -- later applied to the same SDocContext. It helps the worker/wrapper -- transformation extracting only the required fields from the SDocContext. {-# INLINE CONLIKE cat #-} cat ds = SDoc $ \ctx -> Pretty.cat [runSDoc d ctx | d <- ds] {-# INLINE CONLIKE fcat #-} fcat ds = SDoc $ \ctx -> Pretty.fcat [runSDoc d ctx | d <- ds] hang :: SDoc -- ^ The header -> Int -- ^ Amount to indent the hung body -> SDoc -- ^ The hung body, indented and placed below the header -> SDoc {-# INLINE CONLIKE hang #-} hang d1 n d2 = SDoc $ \sty -> Pretty.hang (runSDoc d1 sty) n (runSDoc d2 sty) -- | This behaves like 'hang', but does not indent the second document -- when the header is empty. hangNotEmpty :: SDoc -> Int -> SDoc -> SDoc {-# INLINE CONLIKE hangNotEmpty #-} hangNotEmpty d1 n d2 = SDoc $ \ctx -> Pretty.hangNotEmpty (runSDoc d1 ctx) n (runSDoc d2 ctx) punctuate :: IsLine doc => doc -- ^ The punctuation -> [doc] -- ^ The list that will have punctuation added between every adjacent pair of elements -> [doc] -- ^ Punctuated list punctuate _ [] = [] punctuate p (d:ds) = go d ds where go d [] = [d] go d (e:es) = (d <> p) : go e es -- | Punctuate a list, e.g. with commas and dots. -- -- > sep $ punctuateFinal comma dot [text "ab", text "cd", text "ef"] -- > ab, cd, ef. punctuateFinal :: IsLine doc => doc -- ^ The interstitial punctuation -> doc -- ^ The final punctuation -> [doc] -- ^ The list that will have punctuation added between every adjacent pair of elements -> [doc] -- ^ Punctuated list punctuateFinal _ _ [] = [] punctuateFinal p q (d:ds) = go d ds where go d [] = [d <> q] go d (e:es) = (d <> p) : go e es ppWhen, ppUnless :: IsOutput doc => Bool -> doc -> doc {-# INLINE CONLIKE ppWhen #-} ppWhen True doc = doc ppWhen False _ = empty {-# INLINE CONLIKE ppUnless #-} ppUnless True _ = empty ppUnless False doc = doc {-# INLINE CONLIKE ppWhenOption #-} ppWhenOption :: (SDocContext -> Bool) -> SDoc -> SDoc ppWhenOption f doc = sdocOption f $ \case True -> doc False -> empty {-# INLINE CONLIKE ppUnlessOption #-} ppUnlessOption :: (SDocContext -> Bool) -> SDoc -> SDoc ppUnlessOption f doc = sdocOption f $ \case True -> empty False -> doc -- | Apply the given colour\/style for the argument. -- -- Only takes effect if colours are enabled. coloured :: Col.PprColour -> SDoc -> SDoc coloured col sdoc = sdocOption sdocShouldUseColor $ \case True -> SDoc $ \case ctx@SDC{ sdocLastColour = lastCol, sdocStyle = PprUser _ _ Coloured } -> let ctx' = ctx{ sdocLastColour = lastCol `mappend` col } in Pretty.zeroWidthText (Col.renderColour col) Pretty.<> runSDoc sdoc ctx' Pretty.<> Pretty.zeroWidthText (Col.renderColourAfresh lastCol) ctx -> runSDoc sdoc ctx False -> sdoc keyword :: SDoc -> SDoc keyword = coloured Col.colBold ----------------------------------------------------------------------- -- The @Outputable@ class ----------------------------------------------------------------------- -- | Class designating that some type has an 'SDoc' representation class Outputable a where ppr :: a -> SDoc -- There's no Outputable for Char; it's too easy to use Outputable -- on String and have ppr "hello" rendered as "h,e,l,l,o". instance Outputable Void where ppr _ = text "<>" instance Outputable Bool where ppr True = text "True" ppr False = text "False" instance Outputable Ordering where ppr LT = text "LT" ppr EQ = text "EQ" ppr GT = text "GT" instance Outputable Int8 where ppr n = integer $ fromIntegral n instance Outputable Int16 where ppr n = integer $ fromIntegral n instance Outputable Int32 where ppr n = integer $ fromIntegral n instance Outputable Int64 where ppr n = integer $ fromIntegral n instance Outputable Int where ppr n = int n instance Outputable Integer where ppr n = integer n instance Outputable Word8 where ppr n = integer $ fromIntegral n instance Outputable Word16 where ppr n = integer $ fromIntegral n instance Outputable Word32 where ppr n = integer $ fromIntegral n instance Outputable Word64 where ppr n = integer $ fromIntegral n instance Outputable Word where ppr n = integer $ fromIntegral n instance Outputable Float where ppr f = float f instance Outputable Double where ppr f = double f instance Outputable () where ppr _ = text "()" instance Outputable UTCTime where ppr = text . formatShow iso8601Format instance (Outputable a) => Outputable [a] where ppr xs = brackets (pprWithCommas ppr xs) instance (Outputable a) => Outputable (NonEmpty a) where ppr = ppr . NEL.toList instance (Outputable a, Outputable b) => Outputable (Arg a b) where ppr (Arg a b) = text "Arg" <+> ppr a <+> ppr b instance (Outputable a) => Outputable (Set a) where ppr s = braces (pprWithCommas ppr (Set.toList s)) instance Outputable Word64Set.Word64Set where ppr s = braces (pprWithCommas ppr (Word64Set.toList s)) instance Outputable IntSet.IntSet where ppr s = braces (pprWithCommas ppr (IntSet.toList s)) instance (Outputable a, Outputable b) => Outputable (a, b) where ppr (x,y) = parens (sep [ppr x <> comma, ppr y]) instance Outputable a => Outputable (Maybe a) where ppr Nothing = text "Nothing" ppr (Just x) = text "Just" <+> ppr x instance (Outputable a, Outputable b) => Outputable (Either a b) where ppr (Left x) = text "Left" <+> ppr x ppr (Right y) = text "Right" <+> ppr y -- ToDo: may not be used instance (Outputable a, Outputable b, Outputable c) => Outputable (a, b, c) where ppr (x,y,z) = parens (sep [ppr x <> comma, ppr y <> comma, ppr z ]) instance (Outputable a, Outputable b, Outputable c, Outputable d) => Outputable (a, b, c, d) where ppr (a,b,c,d) = parens (sep [ppr a <> comma, ppr b <> comma, ppr c <> comma, ppr d]) instance (Outputable a, Outputable b, Outputable c, Outputable d, Outputable e) => Outputable (a, b, c, d, e) where ppr (a,b,c,d,e) = parens (sep [ppr a <> comma, ppr b <> comma, ppr c <> comma, ppr d <> comma, ppr e]) instance (Outputable a, Outputable b, Outputable c, Outputable d, Outputable e, Outputable f) => Outputable (a, b, c, d, e, f) where ppr (a,b,c,d,e,f) = parens (sep [ppr a <> comma, ppr b <> comma, ppr c <> comma, ppr d <> comma, ppr e <> comma, ppr f]) instance (Outputable a, Outputable b, Outputable c, Outputable d, Outputable e, Outputable f, Outputable g) => Outputable (a, b, c, d, e, f, g) where ppr (a,b,c,d,e,f,g) = parens (sep [ppr a <> comma, ppr b <> comma, ppr c <> comma, ppr d <> comma, ppr e <> comma, ppr f <> comma, ppr g]) instance Outputable FastString where ppr fs = ftext fs -- Prints an unadorned string, -- no double quotes or anything deriving newtype instance Outputable NonDetFastString deriving newtype instance Outputable LexicalFastString instance (Outputable key, Outputable elt) => Outputable (M.Map key elt) where ppr m = ppr (M.toList m) instance (Outputable elt) => Outputable (IM.IntMap elt) where ppr m = ppr (IM.toList m) instance Outputable Fingerprint where ppr (Fingerprint w1 w2) = text (printf "%016x%016x" w1 w2) instance Outputable a => Outputable (SCC a) where ppr (AcyclicSCC v) = text "NONREC" $$ (nest 3 (ppr v)) ppr (CyclicSCC vs) = text "REC" $$ (nest 3 (vcat (map ppr vs))) instance Outputable Serialized where ppr (Serialized the_type bytes) = int (length bytes) <+> text "of type" <+> text (show the_type) instance Outputable Extension where ppr = text . show instance Outputable ModuleName where ppr = pprModuleName pprModuleName :: IsLine doc => ModuleName -> doc pprModuleName (ModuleName nm) = docWithStyle (ztext (zEncodeFS nm)) (\_ -> ftext nm) {-# SPECIALIZE pprModuleName :: ModuleName -> SDoc #-} {-# SPECIALIZE pprModuleName :: ModuleName -> HLine #-} -- see Note [SPECIALIZE to HDoc] ----------------------------------------------------------------------- -- The @OutputableP@ class ----------------------------------------------------------------------- -- Note [The OutputableP class] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- -- SDoc has become the common type to -- * display messages in the terminal -- * dump outputs (Cmm, Asm, C, etc.) -- * return messages to ghc-api clients -- -- SDoc is a kind of state Monad: SDoc ~ State SDocContext Doc -- I.e. to render a SDoc, a SDocContext must be provided. -- -- SDocContext contains legit rendering options (e.g., line length, color and -- unicode settings). Sadly SDocContext ended up also being used to thread -- values that were considered bothersome to thread otherwise: -- * current HomeModule: to decide if module names must be printed qualified -- * current UnitState: to print unit-ids as "packagename-version:component" -- * target platform: to render labels, instructions, etc. -- * selected backend: to display CLabel as C labels or Asm labels -- -- In fact the whole compiler session state that is DynFlags was passed in -- SDocContext and these values were retrieved from it. -- -- The Outputable class makes SDoc creation easy for many values by providing -- the ppr method: -- -- class Outputable a where -- ppr :: a -> SDoc -- -- Almost every type is Outputable in the compiler and it seems great because it -- is similar to the Show class. But it's a fallacious simplicity because `SDoc` -- needs a `SDocContext` to be transformed into a renderable `Doc`: who is going -- to provide the SDocContext with the correct values in it? -- -- E.g. if a SDoc is returned in an exception, how could we know the home -- module at the time it was thrown? -- -- A workaround is to pass dummy values (no home module, empty UnitState) at SDoc -- rendering time and to hope that the code that produced the SDoc has updated -- the SDocContext with meaningful values (e.g. using withPprStyle or -- pprWithUnitState). If the context isn't correctly updated, a dummy value is -- used and the printed result isn't what we expected. Note that the compiler -- doesn't help us finding spots where we need to update the SDocContext. -- -- In some cases we can't pass a dummy value because we can't create one. For -- example, how can we create a dummy Platform value? In the old days, GHC only -- supported a single Platform set when it was built, so we could use it without -- any risk of mistake. But now GHC starts supporting several Platform in the -- same session so it becomes an issue. We could be tempted to use the -- workaround described above by using "undefined" as a dummy Platform value. -- However in this case, if we forget to update it we will get a runtime -- error/crash. We could use "Maybe Platform" and die with a better error -- message at places where we really really need to know if we are on Windows or -- not, or if we use 32- or 64-bit. Still the compiler would not help us in -- finding spots where to update the context with a valid Platform. -- -- So finally here comes the OutputableP class: -- -- class OutputableP env a where -- pdoc :: env -> a -> SDoc -- -- OutputableP forces us to thread an environment necessary to print a value. -- For now we only use it to thread a Platform environment, so we have several -- "Outputable Platform XYZ" instances. In the future we could imagine using a -- Has class to retrieve a value from a generic environment to make the code -- more composable. E.g.: -- -- instance Has Platform env => OutputableP env XYZ where -- pdoc env a = ... (getter env :: Platform) -- -- A drawback of this approach over Outputable is that we have to thread an -- environment explicitly to use "pdoc" and it's more cumbersome. But it's the -- price to pay to have some help from the compiler to ensure that we... thread -- an environment down to the places where we need it, i.e. where SDoc are -- created (not rendered). On the other hand, it makes life easier for SDoc -- renderers as they only have to deal with pretty-printing related options in -- SDocContext. -- -- TODO: -- -- 1) we could use OutputableP to thread a UnitState and replace the Outputable -- instance of UnitId with: -- -- instance OutputableP UnitState UnitId where ... -- -- This would allow the removal of the `sdocUnitIdForUser` field. -- -- Be warned: I've tried to do it, but there are A LOT of other Outputable -- instances depending on UnitId's one. In particular: -- UnitId <- Unit <- Module <- Name <- Var <- Core.{Type,Expr} <- ... -- -- 2) Use it to pass the HomeModule (but I fear it will be as difficult as for -- UnitId). -- -- -- | Outputable class with an additional environment value -- -- See Note [The OutputableP class] class OutputableP env a where pdoc :: env -> a -> SDoc -- | Wrapper for types having a Outputable instance when an OutputableP instance -- is required. newtype PDoc a = PDoc a instance Outputable a => OutputableP env (PDoc a) where pdoc _ (PDoc a) = ppr a instance OutputableP env a => OutputableP env [a] where pdoc env xs = ppr (fmap (pdoc env) xs) instance OutputableP env a => OutputableP env (Maybe a) where pdoc env xs = ppr (fmap (pdoc env) xs) instance OutputableP env () where pdoc _ _ = ppr () instance (OutputableP env a, OutputableP env b) => OutputableP env (a, b) where pdoc env (a,b) = ppr (pdoc env a, pdoc env b) instance (OutputableP env a, OutputableP env b, OutputableP env c) => OutputableP env (a, b, c) where pdoc env (a,b,c) = ppr (pdoc env a, pdoc env b, pdoc env c) instance (OutputableP env key, OutputableP env elt) => OutputableP env (M.Map key elt) where pdoc env m = ppr $ fmap (\(x,y) -> (pdoc env x, pdoc env y)) $ M.toList m instance OutputableP env a => OutputableP env (SCC a) where pdoc env scc = ppr (fmap (pdoc env) scc) instance OutputableP env SDoc where pdoc _ x = x instance (OutputableP env a) => OutputableP env (Set a) where pdoc env s = braces (fsep (punctuate comma (map (pdoc env) (Set.toList s)))) instance OutputableP env Void where pdoc _ = \ case {- ************************************************************************ * * \subsection{The @OutputableBndr@ class} * * ************************************************************************ -} -- | When we print a binder, we often want to print its type too. -- The @OutputableBndr@ class encapsulates this idea. class Outputable a => OutputableBndr a where pprBndr :: BindingSite -> a -> SDoc pprBndr _b x = ppr x pprPrefixOcc, pprInfixOcc :: a -> SDoc -- Print an occurrence of the name, suitable either in the -- prefix position of an application, thus (f a b) or ((+) x) -- or infix position, thus (a `f` b) or (x + y) bndrIsJoin_maybe :: a -> JoinPointHood bndrIsJoin_maybe _ = NotJoinPoint -- When pretty-printing we sometimes want to find -- whether the binder is a join point. You might think -- we could have a function of type (a->Var), but Var -- isn't available yet, alas -- | 'BindingSite' is used to tell the thing that prints binder what -- language construct is binding the identifier. This can be used -- to decide how much info to print. -- Also see Note [Binding-site specific printing] in "GHC.Core.Ppr" data BindingSite = LambdaBind -- ^ The x in (\x. e) | CaseBind -- ^ The x in case scrut of x { (y,z) -> ... } | CasePatBind -- ^ The y,z in case scrut of x { (y,z) -> ... } | LetBind -- ^ The x in (let x = rhs in e) deriving Eq data JoinPointHood = JoinPoint {-# UNPACK #-} !Int -- The JoinArity (but an Int here because | NotJoinPoint -- synonym JoinArity is defined in Types.Basic) deriving( Eq ) isJoinPoint :: JoinPointHood -> Bool isJoinPoint (JoinPoint {}) = True isJoinPoint NotJoinPoint = False instance Outputable JoinPointHood where ppr NotJoinPoint = text "NotJoinPoint" ppr (JoinPoint arity) = text "JoinPoint" <> parens (ppr arity) instance NFData JoinPointHood where rnf x = x `seq` () {- ************************************************************************ * * \subsection{Random printing helpers} * * ************************************************************************ -} -- We have 31-bit Chars and will simply use Show instances of Char and String. -- | Special combinator for showing character literals. pprHsChar :: Char -> SDoc pprHsChar c | c > '\x10ffff' = char '\\' <> text (show (fromIntegral (ord c) :: Word32)) | otherwise = text (show c) -- | Special combinator for showing string literals. pprHsString :: FastString -> SDoc pprHsString fs = vcat (map text (showMultiLineString (unpackFS fs))) -- | Special combinator for showing bytestring literals. pprHsBytes :: ByteString -> SDoc pprHsBytes bs = let escaped = concatMap escape $ BS.unpack bs in vcat (map text (showMultiLineString escaped)) <> char '#' where escape :: Word8 -> String escape w = let c = chr (fromIntegral w) in if isAscii c then [c] else '\\' : show w -- Postfix modifiers for unboxed literals. -- See Note [Printing of literals in Core] in "GHC.Types.Literal". primCharSuffix, primFloatSuffix, primDoubleSuffix, primIntSuffix, primWordSuffix, primInt8Suffix, primWord8Suffix, primInt16Suffix, primWord16Suffix, primInt32Suffix, primWord32Suffix, primInt64Suffix, primWord64Suffix :: SDoc primCharSuffix = char '#' primFloatSuffix = char '#' primIntSuffix = char '#' primDoubleSuffix = text "##" primWordSuffix = text "##" primInt8Suffix = text "#Int8" primWord8Suffix = text "#Word8" primInt16Suffix = text "#Int16" primWord16Suffix = text "#Word16" primInt32Suffix = text "#Int32" primWord32Suffix = text "#Word32" primInt64Suffix = text "#Int64" primWord64Suffix = text "#Word64" -- | Special combinator for showing unboxed literals. pprPrimChar :: Char -> SDoc pprPrimInt, pprPrimWord, pprPrimInt8, pprPrimWord8, pprPrimInt16, pprPrimWord16, pprPrimInt32, pprPrimWord32, pprPrimInt64, pprPrimWord64 :: Integer -> SDoc pprPrimChar c = pprHsChar c <> primCharSuffix pprPrimInt i = integer i <> primIntSuffix pprPrimWord w = word w <> primWordSuffix pprPrimInt8 i = integer i <> primInt8Suffix pprPrimInt16 i = integer i <> primInt16Suffix pprPrimInt32 i = integer i <> primInt32Suffix pprPrimInt64 i = integer i <> primInt64Suffix pprPrimWord8 w = word w <> primWord8Suffix pprPrimWord16 w = word w <> primWord16Suffix pprPrimWord32 w = word w <> primWord32Suffix pprPrimWord64 w = word w <> primWord64Suffix --------------------- -- Put a name in parens if it's an operator pprPrefixVar :: Bool -> SDoc -> SDoc pprPrefixVar is_operator pp_v | is_operator = parens pp_v | otherwise = pp_v -- Put a name in backquotes if it's not an operator pprInfixVar :: Bool -> SDoc -> SDoc pprInfixVar is_operator pp_v | is_operator = pp_v | otherwise = char '`' <> pp_v <> char '`' --------------------- pprFastFilePath :: FastString -> SDoc pprFastFilePath path = text $ normalise $ unpackFS path -- | Normalise, escape and render a string representing a path -- -- e.g. "c:\\whatever" pprFilePathString :: IsLine doc => FilePath -> doc pprFilePathString path = doubleQuotes $ text (escape (normalise path)) where escape [] = [] escape ('\\':xs) = '\\':'\\':escape xs escape (x:xs) = x:escape xs {-# SPECIALIZE pprFilePathString :: FilePath -> SDoc #-} {-# SPECIALIZE pprFilePathString :: FilePath -> HLine #-} -- see Note [SPECIALIZE to HDoc] {- ************************************************************************ * * \subsection{Other helper functions} * * ************************************************************************ -} pprWithCommas :: (a -> SDoc) -- ^ The pretty printing function to use -> [a] -- ^ The things to be pretty printed -> SDoc -- ^ 'SDoc' where the things have been pretty printed, -- comma-separated and finally packed into a paragraph. pprWithCommas pp xs = fsep (punctuate comma (map pp xs)) pprWithSemis :: (a -> SDoc) -- ^ The pretty printing function to use -> [a] -- ^ The things to be pretty printed -> SDoc -- ^ 'SDoc' where the things have been pretty printed, -- semicolon-separated and finally packed into a paragraph. pprWithSemis pp xs = fsep (punctuate semi (map pp xs)) pprWithBars :: (a -> SDoc) -- ^ The pretty printing function to use -> [a] -- ^ The things to be pretty printed -> SDoc -- ^ 'SDoc' where the things have been pretty printed, -- bar-separated and finally packed into a paragraph. pprWithBars pp xs = fsep (intersperse vbar (map pp xs)) -- Prefix the document with a space if it starts with a single quote. -- See Note [Printing promoted type constructors] in GHC.Iface.Type spaceIfSingleQuote :: SDoc -> SDoc spaceIfSingleQuote (SDoc m) = SDoc $ \ctx -> let (mHead, d) = Pretty.docHead (m ctx) in if mHead == Just '\'' then Pretty.space Pretty.<> d else d -- | Returns the separated concatenation of the pretty printed things. interppSP :: Outputable a => [a] -> SDoc interppSP xs = sep (map ppr xs) -- | Returns the comma-separated concatenation of the pretty printed things. interpp'SP :: Outputable a => [a] -> SDoc interpp'SP xs = interpp'SP' ppr xs interpp'SP' :: (a -> SDoc) -> [a] -> SDoc interpp'SP' f xs = sep (punctuate comma (map f xs)) -- | Returns the comma-separated concatenation of the quoted pretty printed things. -- -- > [x,y,z] ==> `x', `y', `z' pprQuotedList :: Outputable a => [a] -> SDoc pprQuotedList = quotedList . map ppr quotedList :: [SDoc] -> SDoc quotedList xs = fsep (punctuate comma (map quotes xs)) quotedListWithOr :: [SDoc] -> SDoc -- [x,y,z] ==> `x', `y' or `z' quotedListWithOr xs@(_:_:_) = quotedList (init xs) <+> text "or" <+> quotes (last xs) quotedListWithOr xs = quotedList xs quotedListWithNor :: [SDoc] -> SDoc -- [x,y,z] ==> `x', `y' nor `z' quotedListWithNor xs@(_:_:_) = quotedList (init xs) <+> text "nor" <+> quotes (last xs) quotedListWithNor xs = quotedList xs quotedListWithAnd :: [SDoc] -> SDoc -- [x,y,z] ==> `x', `y' and `z' quotedListWithAnd xs@(_:_:_) = quotedList (init xs) <+> text "and" <+> quotes (last xs) quotedListWithAnd xs = quotedList xs unquotedListWith :: SDoc -> [SDoc] -> SDoc -- "whatever" [x,y,z] ==> x, y whatever z unquotedListWith d xs | Just (fs@(_:_), l) <- snocView xs = unquotedList fs <+> d <+> l | otherwise = unquotedList xs where unquotedList = fsep . punctuate comma {- ************************************************************************ * * \subsection{Printing numbers verbally} * * ************************************************************************ -} intWithCommas :: Integral a => a -> SDoc -- Prints a big integer with commas, eg 345,821 intWithCommas n | n < 0 = char '-' <> intWithCommas (-n) | q == 0 = int (fromIntegral r) | otherwise = intWithCommas q <> comma <> zeroes <> int (fromIntegral r) where (q,r) = n `quotRem` 1000 zeroes | r >= 100 = empty | r >= 10 = char '0' | otherwise = text "00" -- | Converts an integer to a verbal index: -- -- > speakNth 1 = text "first" -- > speakNth 5 = text "fifth" -- > speakNth 21 = text "21st" speakNth :: Int -> SDoc speakNth 1 = text "first" speakNth 2 = text "second" speakNth 3 = text "third" speakNth 4 = text "fourth" speakNth 5 = text "fifth" speakNth 6 = text "sixth" speakNth n = hcat [ int n, text suffix ] where suffix | n <= 20 = "th" -- 11,12,13 are non-std | last_dig == 1 = "st" | last_dig == 2 = "nd" | last_dig == 3 = "rd" | otherwise = "th" last_dig = n `rem` 10 -- | Converts an integer to a verbal multiplicity: -- -- > speakN 0 = text "none" -- > speakN 5 = text "five" -- > speakN 10 = text "10" speakN :: Int -> SDoc speakN 0 = text "none" -- E.g. "they have none" speakN 1 = text "one" -- E.g. "they have one" speakN 2 = text "two" speakN 3 = text "three" speakN 4 = text "four" speakN 5 = text "five" speakN 6 = text "six" speakN n = int n -- | Converts an integer and object description to a statement about the -- multiplicity of those objects: -- -- > speakNOf 0 (text "melon") = text "no melons" -- > speakNOf 1 (text "melon") = text "one melon" -- > speakNOf 3 (text "melon") = text "three melons" speakNOf :: Int -> SDoc -> SDoc speakNOf 0 d = text "no" <+> d <> char 's' speakNOf 1 d = text "one" <+> d -- E.g. "one argument" speakNOf n d = speakN n <+> d <> char 's' -- E.g. "three arguments" -- | Determines the pluralisation suffix appropriate for the length of a list: -- -- > plural [] = char 's' -- > plural ["Hello"] = empty -- > plural ["Hello", "World"] = char 's' plural :: [a] -> SDoc plural [_] = empty -- a bit frightening, but there you are plural _ = char 's' -- | Determines the singular verb suffix appropriate for the length of a list: -- -- > singular [] = empty -- > singular["Hello"] = char 's' -- > singular ["Hello", "World"] = empty singular :: [a] -> SDoc singular [_] = char 's' singular _ = empty -- | Determines the form of to be appropriate for the length of a list: -- -- > isOrAre [] = text "are" -- > isOrAre ["Hello"] = text "is" -- > isOrAre ["Hello", "World"] = text "are" isOrAre :: [a] -> SDoc isOrAre [_] = text "is" isOrAre _ = text "are" -- | Determines the form of to do appropriate for the length of a list: -- -- > doOrDoes [] = text "do" -- > doOrDoes ["Hello"] = text "does" -- > doOrDoes ["Hello", "World"] = text "do" doOrDoes :: [a] -> SDoc doOrDoes [_] = text "does" doOrDoes _ = text "do" -- | Determines the form of possessive appropriate for the length of a list: -- -- > itsOrTheir [x] = text "its" -- > itsOrTheir [x,y] = text "their" -- > itsOrTheir [] = text "their" -- probably avoid this itsOrTheir :: [a] -> SDoc itsOrTheir [_] = text "its" itsOrTheir _ = text "their" -- | 'it' or 'they', depeneding on the length of the list. -- -- > itOrThey [x] = text "it" -- > itOrThey [x,y] = text "they" -- > itOrThey [] = text "they" -- probably avoid this itOrThey :: [a] -> SDoc itOrThey [_] = text "it" itOrThey _ = text "they" -- | Determines the form of subject appropriate for the length of a list: -- -- > thisOrThese [x] = text "This" -- > thisOrThese [x,y] = text "These" -- > thisOrThese [] = text "These" -- probably avoid this thisOrThese :: [a] -> SDoc thisOrThese [_] = text "This" thisOrThese _ = text "These" -- | @"has"@ or @"have"@ depending on the length of a list. hasOrHave :: [a] -> SDoc hasOrHave [_] = text "has" hasOrHave _ = text "have" {- Note [SDoc versus HDoc] ~~~~~~~~~~~~~~~~~~~~~~~~~~ The SDoc type is used pervasively throughout the compiler to represent pretty- printable output. Almost all text written by GHC, from the Haskell types and expressions included in error messages to debug dumps, is assembled using SDoc. SDoc is nice because it handles multiline layout in a semi-automatic fashion, enabling printed expressions to wrap to fit a given line width while correctly indenting the following lines to preserve alignment. SDoc’s niceties necessarily have some performance cost, but this is normally okay, as printing output is rarely a performance bottleneck. However, one notable exception to this is code generation: GHC must sometimes write megabytes’ worth of generated assembly when compiling a single module, in which case the overhead of SDoc has a significant cost (see #21853 for some numbers). Moreover, generated assembly does not have the complex layout requirements of pretty-printed Haskell code, so using SDoc does not buy us much, anyway. Nevertheless, we do still want to be able to share some logic between writing assembly and pretty-printing. For example, the logic for printing basic block labels (GHC.Cmm.CLabel.pprCLabel) is nontrivial, so we want to have a single implementation that can be used both when generating code and when generating Cmm dumps. This is where HDoc comes in: HDoc provides a subset of the SDoc interface, but it is implemented in a far more efficient way, writing directly to a `Handle` (via a `BufHandle`) without building any intermediate structures. We can then use typeclasses to parameterize functions like `pprCLabel` over the printing implementation. One might imagine this would result in one IsDoc typeclass, and two instances, one for SDoc and one for HDoc. However, in fact, we need two *variants* of HDoc, as described in Note [HLine versus HDoc], and this gives rise to a small typeclass hierarchy consisting of IsOutput, IsLine, and IsDoc; see Note [The outputable class hierarchy] for details. Note [HLine versus HDoc] ~~~~~~~~~~~~~~~~~~~~~~~~ As described in Note [SDoc versus HDoc], HDoc does not support any of the layout niceties of SDoc for efficiency. However, this presents a small problem if we want to be compatible with the SDoc API, as expressions like text "foo" <+> (text "bar" $$ text "baz") are expected to produce foo bar baz which requires tracking line widths to know how far to indent the second line. We can’t throw out vertical composition altogether, as we need to be able to construct multiline HDocs, but we *can* restrict vertical composition to concatenating whole lines at a time, as this is all that is necessary to generate assembly in the code generator. To implement this restriction, we provide two distinct types: HLine and HDoc. As their names suggests, an HLine represents a single line of output, while an HDoc represents a multiline document. Atoms formed from `char` and `text` begin their lives as HLines, which can be horizontally (but not vertically) composed: char :: Char -> HLine text :: String -> HLine (<+>) :: HLine -> HLine -> HLine Once a line has been fully assembled, it can be “locked up” into a single-line HDoc via `line`, and HDocs can be vertically (but not horizontally) composed: line :: HLine -> HDoc ($$) :: HLine -> HLine -> HLine Note that, at runtime, HLine and HDoc use exactly the same representation. This distinction only exists in the type system to rule out the cases we don’t want to have to handle. Note [The outputable class hierarchy] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ As described in Note [SDoc versus HDoc], we want to be able to parameterize over the choice of printing implementation when implementing common bits of printing logic. However, as described in Note [HLine versus HDoc], we also want to distinguish code that does single-line printing from code that does multi-line printing. Therefore, code that is parameterized over the choice of printer must respect this single- versus multi-line distinction. This naturally leads to two typeclasses: class IsLine doc where char :: Char -> doc text :: String -> doc (<>) :: doc -> doc -> doc ... class IsLine (Line doc) => IsDoc doc where type Line doc = r | r -> doc line :: Line doc -> doc ($$) :: doc -> doc -> doc ... These classes support the following instances: instance IsLine SDoc instance IsLine SDoc where type Line SDoc = SDoc instance IsLine HLine instance IsDoc HDoc where type Line HDoc = HLine However, we run into a new problem: we provide many useful combinators on docs that don’t care at all about the single-/multi-line distinction. For example, ppWhen and ppUnless provide conditional logic, and docWithContext provides access to the ambient SDocContext. Given the above classes, we would need two variants of each of these combinators: ppWhenL :: IsLine doc => Bool -> doc -> doc ppWhenL c d = if c then d else emptyL ppWhenD :: IsDoc doc => Bool -> doc -> doc ppWhenD c d = if c then d else emptyD This is a needlessly annoying distinction, so we introduce a common superclass, IsOutput, that allows these combinators to be generic over both variants: class IsOutput doc where empty :: doc docWithContext :: (SDocContext -> doc) -> doc docWithStyle :: doc -> (PprStyle -> SDoc) -> doc class IsOutput doc => IsLine doc class (IsOutput doc, IsLine (Line doc)) => IsDoc doc In practice, IsOutput isn’t used explicitly very often, but it makes code that uses the combinators derived from it significantly less noisy. Note [SPECIALIZE to HDoc] ~~~~~~~~~~~~~~~~~~~~~~~~~ The IsLine and IsDoc classes are useful to share printing logic between code that uses SDoc and code that uses HDoc, but we must take some care when doing so. Much HDoc’s efficiency comes from GHC’s ability to optimize code that uses it to eliminate unnecessary indirection, but the HDoc primitives must be inlined before these opportunities can be exposed. Therefore, we want to explicitly request that GHC generate HDoc (or HLine) specializations of any polymorphic printing functions used by the code generator. In code generators (CmmToAsm.{AArch64,PPC,X86}.Ppr) we add a specialize pragma just to the entry point pprNatCmmDecl, to avoid cluttering the entire module. Because specialization is transitive, this makes sure that other functions in that module are specialized too. Note [dualLine and dualDoc] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ The IsLine and IsDoc classes provide the dualLine and dualDoc methods, respectively, which have the following types: dualLine :: IsLine doc => SDoc -> HLine -> doc dualDoc :: IsDoc doc => SDoc -> HDoc -> doc These are effectively a form of type-`case`, selecting between each of their two arguments depending on the type they are instantiated at. They serve as a “nuclear option” for code that is, for some reason or another, unreasonably difficult to make completely equivalent under both printer implementations. These operations should generally be avoided, as they can result in surprising changes in behavior when the printer implementation is changed. Right now, they are used only when outputting debugging comments in codegen, as it is difficult to adapt that code to use HLine and not necessary. Use these operations wisely. Note [docWithStyle] ~~~~~~~~~~~~~~~~~~~ Sometimes when printing, we consult the printing style. This can be done with 'docWithStyle c f'. This is similar to 'docWithContext (f . sdocStyle)', but: * For code style, 'docWithStyle c f' will return 'c'. * For other styles, 'docWithStyle c f', will call 'f style', but expect an SDoc rather than doc. This removes the need to write code polymorphic in SDoc and HDoc, since the latter is used only for code style. -} -- | Represents a single line of output that can be efficiently printed directly -- to a 'System.IO.Handle' (actually a 'BufHandle'). -- See Note [SDoc versus HDoc] and Note [HLine versus HDoc] for more details. newtype HLine = HLine' { runHLine :: SDocContext -> BufHandle -> IO () } -- | Represents a (possibly empty) sequence of lines that can be efficiently -- printed directly to a 'System.IO.Handle' (actually a 'BufHandle'). -- See Note [SDoc versus HDoc] and Note [HLine versus HDoc] for more details. newtype HDoc = HDoc' { runHDoc :: SDocContext -> BufHandle -> IO () } -- See Note [The one-shot state monad trick] in GHC.Utils.Monad pattern HLine :: (SDocContext -> BufHandle -> IO ()) -> HLine pattern HLine f <- HLine' f where HLine f = HLine' (oneShot (\ctx -> oneShot (\h -> f ctx h))) {-# COMPLETE HLine #-} -- See Note [The one-shot state monad trick] in GHC.Utils.Monad pattern HDoc :: (SDocContext -> BufHandle -> IO ()) -> HDoc pattern HDoc f <- HDoc' f where HDoc f = HDoc' (oneShot (\ctx -> oneShot (\h -> f ctx h))) {-# COMPLETE HDoc #-} bPutHDoc :: BufHandle -> SDocContext -> HDoc -> IO () bPutHDoc h ctx (HDoc f) = assert (codeStyle (sdocStyle ctx)) (f ctx h) -- | A superclass for 'IsLine' and 'IsDoc' that provides an identity, 'empty', -- as well as access to the shared 'SDocContext'. -- -- See Note [The outputable class hierarchy] for more details. class IsOutput doc where empty :: doc docWithContext :: (SDocContext -> doc) -> doc docWithStyle :: doc -> (PprStyle -> SDoc) -> doc -- see Note [docWithStyle] -- | A class of types that represent a single logical line of text, with support -- for horizontal composition. -- -- See Note [HLine versus HDoc] and Note [The outputable class hierarchy] for -- more details. class IsOutput doc => IsLine doc where char :: Char -> doc text :: String -> doc ftext :: FastString -> doc ztext :: FastZString -> doc -- | Join two @doc@s together horizontally without a gap. (<>) :: doc -> doc -> doc -- | Join two @doc@s together horizontally with a gap between them. (<+>) :: doc -> doc -> doc -- | Separate: is either like 'hsep' or like 'vcat', depending on what fits. sep :: [doc] -> doc -- | A paragraph-fill combinator. It's much like 'sep', only it keeps fitting -- things on one line until it can't fit any more. fsep :: [doc] -> doc -- | Concatenate @doc@s horizontally without gaps. hcat :: [doc] -> doc hcat docs = foldr (<>) empty docs {-# INLINE CONLIKE hcat #-} -- | Concatenate @doc@s horizontally with a space between each one. hsep :: [doc] -> doc hsep docs = foldr (<+>) empty docs {-# INLINE CONLIKE hsep #-} -- | Prints as either the given 'SDoc' or the given 'HLine', depending on -- which type the result is instantiated to. This should generally be avoided; -- see Note [dualLine and dualDoc] for details. dualLine :: SDoc -> HLine -> doc -- | A class of types that represent a multiline document, with support for -- vertical composition. -- -- See Note [HLine versus HDoc] and Note [The outputable class hierarchy] for -- more details. class (IsOutput doc, IsLine (Line doc)) => IsDoc doc where type Line doc = r | r -> doc line :: Line doc -> doc -- | Join two @doc@s together vertically. If there is no vertical overlap it -- "dovetails" the two onto one line. ($$) :: doc -> doc -> doc lines_ :: [Line doc] -> doc lines_ = vcat . map line {-# INLINE CONLIKE lines_ #-} -- | Concatenate @doc@s vertically with dovetailing. vcat :: [doc] -> doc vcat ls = foldr ($$) empty ls {-# INLINE CONLIKE vcat #-} -- | Prints as either the given 'SDoc' or the given 'HDoc', depending on -- which type the result is instantiated to. This should generally be avoided; -- see Note [dualLine and dualDoc] for details. dualDoc :: SDoc -> HDoc -> doc instance IsOutput SDoc where empty = docToSDoc $ Pretty.empty {-# INLINE CONLIKE empty #-} docWithContext = sdocWithContext {-# INLINE docWithContext #-} docWithStyle c f = sdocWithContext (\ctx -> let sty = sdocStyle ctx in if codeStyle sty then c else f sty) -- see Note [docWithStyle] {-# INLINE CONLIKE docWithStyle #-} instance IsLine SDoc where char c = docToSDoc $ Pretty.char c {-# INLINE CONLIKE char #-} text s = docToSDoc $ Pretty.text s {-# INLINE CONLIKE text #-} -- Inline so that the RULE Pretty.text will fire ftext s = docToSDoc $ Pretty.ftext s {-# INLINE CONLIKE ftext #-} ztext s = docToSDoc $ Pretty.ztext s {-# INLINE CONLIKE ztext #-} (<>) d1 d2 = SDoc $ \ctx -> (Pretty.<>) (runSDoc d1 ctx) (runSDoc d2 ctx) {-# INLINE CONLIKE (<>) #-} (<+>) d1 d2 = SDoc $ \ctx -> (Pretty.<+>) (runSDoc d1 ctx) (runSDoc d2 ctx) {-# INLINE CONLIKE (<+>) #-} hcat ds = SDoc $ \ctx -> Pretty.hcat [runSDoc d ctx | d <- ds] {-# INLINE CONLIKE hcat #-} hsep ds = SDoc $ \ctx -> Pretty.hsep [runSDoc d ctx | d <- ds] {-# INLINE CONLIKE hsep #-} sep ds = SDoc $ \ctx -> Pretty.sep [runSDoc d ctx | d <- ds] {-# INLINE CONLIKE sep #-} fsep ds = SDoc $ \ctx -> Pretty.fsep [runSDoc d ctx | d <- ds] {-# INLINE CONLIKE fsep #-} dualLine s _ = s {-# INLINE CONLIKE dualLine #-} instance IsDoc SDoc where type Line SDoc = SDoc line = id {-# INLINE line #-} lines_ = vcat {-# INLINE lines_ #-} ($$) d1 d2 = SDoc $ \ctx -> (Pretty.$$) (runSDoc d1 ctx) (runSDoc d2 ctx) {-# INLINE CONLIKE ($$) #-} vcat ds = SDoc $ \ctx -> Pretty.vcat [runSDoc d ctx | d <- ds] {-# INLINE CONLIKE vcat #-} dualDoc s _ = s {-# INLINE CONLIKE dualDoc #-} instance IsOutput HLine where empty = HLine (\_ _ -> pure ()) {-# INLINE empty #-} docWithContext f = HLine $ \ctx h -> runHLine (f ctx) ctx h {-# INLINE CONLIKE docWithContext #-} docWithStyle c _ = c -- see Note [docWithStyle] {-# INLINE CONLIKE docWithStyle #-} instance IsOutput HDoc where empty = HDoc (\_ _ -> pure ()) {-# INLINE empty #-} docWithContext f = HDoc $ \ctx h -> runHDoc (f ctx) ctx h {-# INLINE CONLIKE docWithContext #-} docWithStyle c _ = c -- see Note [docWithStyle] {-# INLINE CONLIKE docWithStyle #-} instance IsLine HLine where char c = HLine (\_ h -> bPutChar h c) {-# INLINE CONLIKE char #-} text str = HLine (\_ h -> bPutStr h str) {-# INLINE CONLIKE text #-} ftext fstr = HLine (\_ h -> bPutFS h fstr) {-# INLINE CONLIKE ftext #-} ztext fstr = HLine (\_ h -> bPutFZS h fstr) {-# INLINE CONLIKE ztext #-} HLine f <> HLine g = HLine (\ctx h -> f ctx h *> g ctx h) {-# INLINE CONLIKE (<>) #-} f <+> g = f <> char ' ' <> g {-# INLINE CONLIKE (<+>) #-} sep = hsep {-# INLINE sep #-} fsep = hsep {-# INLINE fsep #-} dualLine _ h = h {-# INLINE CONLIKE dualLine #-} instance IsDoc HDoc where type Line HDoc = HLine line (HLine f) = HDoc (\ctx h -> f ctx h *> bPutChar h '\n') {-# INLINE CONLIKE line #-} HDoc f $$ HDoc g = HDoc (\ctx h -> f ctx h *> g ctx h) {-# INLINE CONLIKE ($$) #-} dualDoc _ h = h {-# INLINE CONLIKE dualDoc #-} ghc-lib-parser-9.12.2.20250421/compiler/GHC/Utils/Panic.hs0000644000000000000000000002643207346545000020403 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The GRASP Project, Glasgow University, 1992-2000 -} {-# LANGUAGE CPP #-} {-# LANGUAGE ScopedTypeVariables, LambdaCase #-} #include -- | Defines basic functions for printing error messages. -- -- It's hard to put these functions anywhere else without causing -- some unnecessary loops in the module dependency graph. module GHC.Utils.Panic ( -- * GHC exception type GhcException(..) , showGhcException , showGhcExceptionUnsafe , throwGhcException , throwGhcExceptionIO , handleGhcException -- * Command error throwing patterns , pprPanic , panicDoc , sorryDoc , pgmErrorDoc -- ** Assertions , assertPprPanic , assertPpr , assertPprMaybe , assertPprM , massertPpr -- * Call stacks , callStackDoc , prettyCallStackDoc -- * Exception utilities , Exception.Exception(..) , showException , safeShowException , try , tryMost , throwTo , withSignalHandlers , module GHC.Utils.Panic.Plain ) where import GHC.Prelude.Basic import GHC.Stack import GHC.Utils.Outputable import GHC.Utils.Panic.Plain import GHC.Utils.Constants import GHC.Utils.Exception as Exception import Control.Monad.IO.Class import qualified Control.Monad.Catch as MC import Control.Concurrent import Data.Typeable ( cast ) import System.IO.Unsafe #if !defined(mingw32_HOST_OS) import System.Posix.Signals as S #endif #if defined(mingw32_HOST_OS) import GHC.ConsoleHandler as S #endif import System.Mem.Weak ( deRefWeak ) -- | GHC's own exception type -- error messages all take the form: -- -- @ -- \: \ -- @ -- -- If the location is on the command line, or in GHC itself, then -- \="ghc". All of the error types below correspond to -- a \ of "ghc", except for ProgramError (where the string is -- assumed to contain a location already, so we don't print one). data GhcException -- | Some other fatal signal (SIGHUP,SIGTERM) = Signal Int -- | Prints the short usage msg after the error | UsageError String -- | A problem with the command line arguments, but don't print usage. | CmdLineError String -- | The 'impossible' happened. | Panic String | PprPanic String SDoc -- | The user tickled something that's known not to work yet, -- but we're not counting it as a bug. | Sorry String | PprSorry String SDoc -- | An installation problem. | InstallationError String -- | An error in the user's code, probably. | ProgramError String | PprProgramError String SDoc instance Exception GhcException where fromException (SomeException e) | Just ge <- cast e = Just ge | Just pge <- cast e = Just $ case pge of PlainSignal n -> Signal n PlainUsageError str -> UsageError str PlainCmdLineError str -> CmdLineError str PlainPanic str -> Panic str PlainSorry str -> Sorry str PlainInstallationError str -> InstallationError str PlainProgramError str -> ProgramError str | otherwise = Nothing -- Explicitly omit ExceptionContext since we generally don't -- want backtraces and other context in GHC's user errors. displayException exc = showGhcExceptionUnsafe exc "" instance Show GhcException where showsPrec _ e = showGhcExceptionUnsafe e -- | Show an exception as a string. showException :: Exception e => e -> String showException = show -- | Show an exception which can possibly throw other exceptions. -- Used when displaying exception thrown within TH code. safeShowException :: Exception e => e -> IO String safeShowException e = do -- ensure the whole error message is evaluated inside try r <- try (return $! forceList (showException e)) case r of Right msg -> return msg Left e' -> safeShowException (e' :: SomeException) where forceList [] = [] forceList xs@(x : xt) = x `seq` forceList xt `seq` xs -- | Append a description of the given exception to this string. -- -- Note that this uses 'defaultSDocContext', which doesn't use the options -- set by the user via DynFlags. showGhcExceptionUnsafe :: GhcException -> ShowS showGhcExceptionUnsafe = showGhcException defaultSDocContext -- | Append a description of the given exception to this string. showGhcException :: SDocContext -> GhcException -> ShowS showGhcException ctx = showPlainGhcException . \case Signal n -> PlainSignal n UsageError str -> PlainUsageError str CmdLineError str -> PlainCmdLineError str Panic str -> PlainPanic str Sorry str -> PlainSorry str InstallationError str -> PlainInstallationError str ProgramError str -> PlainProgramError str PprPanic str sdoc -> PlainPanic $ concat [str, "\n\n", renderWithContext ctx sdoc] PprSorry str sdoc -> PlainProgramError $ concat [str, "\n\n", renderWithContext ctx sdoc] PprProgramError str sdoc -> PlainProgramError $ concat [str, "\n\n", renderWithContext ctx sdoc] throwGhcException :: GhcException -> a throwGhcException = Exception.throw throwGhcExceptionIO :: GhcException -> IO a throwGhcExceptionIO = Exception.throwIO handleGhcException :: ExceptionMonad m => (GhcException -> m a) -> m a -> m a handleGhcException = MC.handle -- | Throw an exception saying "bug in GHC" with a callstack pprPanic :: HasCallStack => String -> SDoc -> a pprPanic s doc = panicDoc s (doc $$ callStackDoc) -- | Throw an exception saying "bug in GHC" panicDoc :: String -> SDoc -> a panicDoc x doc = throwGhcException (PprPanic x doc) -- | Throw an exception saying "this isn't finished yet" sorryDoc :: String -> SDoc -> a sorryDoc x doc = throwGhcException (PprSorry x doc) -- | Throw an exception saying "bug in pgm being compiled" (used for unusual program errors) pgmErrorDoc :: String -> SDoc -> a pgmErrorDoc x doc = throwGhcException (PprProgramError x doc) -- | Like try, but pass through UserInterrupt and Panic exceptions. -- Used when we want soft failures when reading interface files, for example. -- TODO: I'm not entirely sure if this is catching what we really want to catch tryMost :: IO a -> IO (Either SomeException a) tryMost action = do r <- try action case r of Left se -> case fromException se of -- Some GhcException's we rethrow, Just (Signal _) -> throwIO se Just (Panic _) -> throwIO se -- others we return Just _ -> return (Left se) Nothing -> case fromException se of -- All IOExceptions are returned Just (_ :: IOException) -> return (Left se) -- Anything else is rethrown Nothing -> throwIO se Right v -> return (Right v) -- | We use reference counting for signal handlers {-# NOINLINE signalHandlersRefCount #-} #if !defined(mingw32_HOST_OS) signalHandlersRefCount :: MVar (Word, Maybe (S.Handler,S.Handler ,S.Handler,S.Handler)) #else signalHandlersRefCount :: MVar (Word, Maybe S.Handler) #endif signalHandlersRefCount = unsafePerformIO $ newMVar (0,Nothing) -- | Temporarily install standard signal handlers for catching ^C, which just -- throw an exception in the current thread. withSignalHandlers :: ExceptionMonad m => m a -> m a #if !defined(HAVE_SIGNAL_H) -- No signal functionality exist on the host platform (e.g. on -- wasm32-wasi), so don't attempt to set up signal handlers withSignalHandlers = id #else withSignalHandlers act = do main_thread <- liftIO myThreadId wtid <- liftIO (mkWeakThreadId main_thread) let interrupt = do r <- deRefWeak wtid case r of Nothing -> return () Just t -> throwTo t UserInterrupt #if !defined(mingw32_HOST_OS) let installHandlers = do let installHandler' a b = installHandler a b Nothing hdlQUIT <- installHandler' sigQUIT (Catch interrupt) hdlINT <- installHandler' sigINT (Catch interrupt) -- see #3656; in the future we should install these automatically for -- all Haskell programs in the same way that we install a ^C handler. let fatal_signal n = throwTo main_thread (Signal (fromIntegral n)) hdlHUP <- installHandler' sigHUP (Catch (fatal_signal sigHUP)) hdlTERM <- installHandler' sigTERM (Catch (fatal_signal sigTERM)) return (hdlQUIT,hdlINT,hdlHUP,hdlTERM) let uninstallHandlers (hdlQUIT,hdlINT,hdlHUP,hdlTERM) = do _ <- installHandler sigQUIT hdlQUIT Nothing _ <- installHandler sigINT hdlINT Nothing _ <- installHandler sigHUP hdlHUP Nothing _ <- installHandler sigTERM hdlTERM Nothing return () #else -- GHC 6.3+ has support for console events on Windows -- NOTE: running GHCi under a bash shell for some reason requires -- you to press Ctrl-Break rather than Ctrl-C to provoke -- an interrupt. Ctrl-C is getting blocked somewhere, I don't know -- why --SDM 17/12/2004 let sig_handler ControlC = interrupt sig_handler Break = interrupt sig_handler _ = return () let installHandlers = installHandler (Catch sig_handler) let uninstallHandlers = installHandler -- directly install the old handler #endif -- install signal handlers if necessary let mayInstallHandlers = liftIO $ modifyMVar_ signalHandlersRefCount $ \case (0,Nothing) -> do hdls <- installHandlers return (1,Just hdls) (c,oldHandlers) -> return (c+1,oldHandlers) -- uninstall handlers if necessary let mayUninstallHandlers = liftIO $ modifyMVar_ signalHandlersRefCount $ \case (1,Just hdls) -> do _ <- uninstallHandlers hdls return (0,Nothing) (c,oldHandlers) -> return (c-1,oldHandlers) mayInstallHandlers act `MC.finally` mayUninstallHandlers #endif callStackDoc :: HasCallStack => SDoc callStackDoc = prettyCallStackDoc callStack prettyCallStackDoc :: CallStack -> SDoc prettyCallStackDoc cs = hang (text "Call stack:") 4 (vcat $ map text $ lines (prettyCallStack cs)) -- | Panic with an assertion failure, recording the given file and -- line number. Should typically be accessed with the ASSERT family of macros assertPprPanic :: HasCallStack => SDoc -> a assertPprPanic msg = withFrozenCallStack (pprPanic "ASSERT failed!" msg) assertPpr :: HasCallStack => Bool -> SDoc -> a -> a {-# INLINE assertPpr #-} assertPpr cond msg a = if debugIsOn && not cond then withFrozenCallStack (assertPprPanic msg) else a assertPprMaybe :: HasCallStack => Maybe SDoc -> a -> a {-# INLINE assertPprMaybe #-} assertPprMaybe mb_msg a | debugIsOn, Just msg <- mb_msg = withFrozenCallStack (assertPprPanic msg) | otherwise = a massertPpr :: (HasCallStack, Applicative m) => Bool -> SDoc -> m () {-# INLINE massertPpr #-} massertPpr cond msg = withFrozenCallStack (assertPpr cond msg (pure ())) assertPprM :: (HasCallStack, Monad m) => m Bool -> SDoc -> m () {-# INLINE assertPprM #-} assertPprM mcond msg = withFrozenCallStack (mcond >>= \cond -> massertPpr cond msg) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Utils/Panic/0000755000000000000000000000000007346545000020040 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Utils/Panic/Plain.hs0000644000000000000000000001132107346545000021435 0ustar0000000000000000{-# LANGUAGE ScopedTypeVariables, LambdaCase #-} -- | Defines a simple exception type and utilities to throw it. The -- 'PlainGhcException' type is a subset of the 'GHC.Utils.Panic.GhcException' -- type. It omits the exception constructors that involve -- pretty-printing via 'GHC.Utils.Outputable.SDoc'. -- -- The reason for this is to avoid import cycles / use of boot files. -- "GHC.Utils.Outputable" has many transitive dependencies. -- To throw exceptions from these modules, the functions here can be used -- without introducing import cycles. module GHC.Utils.Panic.Plain ( PlainGhcException(..) , showPlainGhcException , panic, sorry, pgmError , cmdLineError, cmdLineErrorIO , assertPanic , assert, assertM, massert ) where import GHC.Settings.Config import GHC.Utils.Constants import GHC.Utils.Exception as Exception import GHC.Stack import GHC.Prelude.Basic import Control.Monad (when) import System.IO.Unsafe -- | This type is very similar to 'GHC.Utils.Panic.GhcException', but it omits -- the constructors that involve pretty-printing via -- 'GHC.Utils.Outputable.SDoc'. Due to the implementation of 'fromException' -- for 'GHC.Utils.Panic.GhcException', this type can be caught as a -- 'GHC.Utils.Panic.GhcException'. -- -- Note that this should only be used for throwing exceptions, not for -- catching, as 'GHC.Utils.Panic.GhcException' will not be converted to this -- type when catching. data PlainGhcException -- | Some other fatal signal (SIGHUP,SIGTERM) = PlainSignal Int -- | Prints the short usage msg after the error | PlainUsageError String -- | A problem with the command line arguments, but don't print usage. | PlainCmdLineError String -- | The 'impossible' happened. | PlainPanic String -- | The user tickled something that's known not to work yet, -- but we're not counting it as a bug. | PlainSorry String -- | An installation problem. | PlainInstallationError String -- | An error in the user's code, probably. | PlainProgramError String instance Exception PlainGhcException instance Show PlainGhcException where showsPrec _ e = showPlainGhcException e -- | Short usage information to display when we are given the wrong cmd line arguments. short_usage :: String short_usage = "Usage: For basic information, try the `--help' option." -- | Append a description of the given exception to this string. showPlainGhcException :: PlainGhcException -> ShowS showPlainGhcException = \case PlainSignal n -> showString "signal: " . shows n PlainUsageError str -> showString str . showChar '\n' . showString short_usage PlainCmdLineError str -> showString str PlainPanic s -> panicMsg (showString s) PlainSorry s -> sorryMsg (showString s) PlainInstallationError str -> showString str PlainProgramError str -> showString str where sorryMsg :: ShowS -> ShowS sorryMsg s = showString "sorry! (unimplemented feature or known bug)\n" . showString (" GHC version " ++ cProjectVersion ++ ":\n\t") . s . showString "\n" panicMsg :: ShowS -> ShowS panicMsg s = showString "panic! (the 'impossible' happened)\n" . showString (" GHC version " ++ cProjectVersion ++ ":\n\t") . s . showString "\n\n" . showString "Please report this as a GHC bug: https://www.haskell.org/ghc/reportabug\n" throwPlainGhcException :: PlainGhcException -> a throwPlainGhcException = Exception.throw -- | Panics and asserts. panic, sorry, pgmError :: HasCallStack => String -> a panic x = unsafeDupablePerformIO $ throwPlainGhcException (PlainPanic x) sorry x = throwPlainGhcException (PlainSorry x) pgmError x = throwPlainGhcException (PlainProgramError x) cmdLineError :: String -> a cmdLineError = unsafeDupablePerformIO . cmdLineErrorIO cmdLineErrorIO :: String -> IO a cmdLineErrorIO x = throwPlainGhcException (PlainCmdLineError x) -- | Throw a failed assertion exception for a given filename and line number. assertPanic :: String -> Int -> a assertPanic file line = Exception.throw (Exception.AssertionFailed ("ASSERT failed! file " ++ file ++ ", line " ++ show line)) assertPanic' :: HasCallStack => a assertPanic' = Exception.throw (Exception.AssertionFailed "ASSERT failed!") assert :: HasCallStack => Bool -> a -> a {-# INLINE assert #-} assert cond a = if debugIsOn && not cond then withFrozenCallStack assertPanic' else a massert :: (HasCallStack, Applicative m) => Bool -> m () {-# INLINE massert #-} massert cond = withFrozenCallStack (assert cond (pure ())) assertM :: (HasCallStack, Monad m) => m Bool -> m () {-# INLINE assertM #-} assertM mcond | debugIsOn = withFrozenCallStack $ do res <- mcond when (not res) assertPanic' | otherwise = return () ghc-lib-parser-9.12.2.20250421/compiler/GHC/Utils/Ppr.hs0000644000000000000000000012127707346545000020115 0ustar0000000000000000{-# LANGUAGE MagicHash #-} ----------------------------------------------------------------------------- -- | -- Module : GHC.Utils.Ppr -- Copyright : (c) The University of Glasgow 2001 -- License : BSD-style (see the file LICENSE) -- -- Maintainer : David Terei -- Stability : stable -- Portability : portable -- -- John Hughes's and Simon Peyton Jones's Pretty Printer Combinators -- -- Based on /The Design of a Pretty-printing Library/ -- in Advanced Functional Programming, -- Johan Jeuring and Erik Meijer (eds), LNCS 925 -- -- ----------------------------------------------------------------------------- {- Note [Differences between libraries/pretty and compiler/GHC/Utils/Ppr.hs] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For historical reasons, there are two different copies of `Pretty` in the GHC source tree: * `libraries/pretty` is a submodule containing https://github.com/haskell/pretty. This is the `pretty` library as released on hackage. It is used by several other libraries in the GHC source tree (e.g. template-haskell and Cabal). * `compiler/GHC/Utils/Ppr.hs` (this module). It is used by GHC only. There is an ongoing effort in https://github.com/haskell/pretty/issues/1 and https://gitlab.haskell.org/ghc/ghc/issues/10735 to try to get rid of GHC's copy of Pretty. Currently, GHC's copy of Pretty resembles pretty-1.1.2.0, with the following major differences: * GHC's copy uses `Faststring` for performance reasons. * GHC's copy has received a backported bugfix for #12227, which was released as pretty-1.1.3.4 ("Remove harmful $! forcing in beside", https://github.com/haskell/pretty/pull/35). Other differences are minor. Both copies define some extra functions and instances not defined in the other copy. To see all differences, do this in a ghc git tree: $ cd libraries/pretty $ git checkout v1.1.2.0 $ cd - $ vimdiff compiler/GHC/Utils/Ppr.hs \ libraries/pretty/src/Text/PrettyPrint/HughesPJ.hs For parity with `pretty-1.1.2.1`, the following two `pretty` commits would have to be backported: * "Resolve foldr-strictness stack overflow bug" (307b8173f41cd776eae8f547267df6d72bff2d68) * "Special-case reduce for horiz/vert" (c57c7a9dfc49617ba8d6e4fcdb019a3f29f1044c) This has not been done sofar, because these commits seem to cause more allocation in the compiler (see thomie's comments in https://github.com/haskell/pretty/pull/9). -} module GHC.Utils.Ppr ( -- * The document type Doc, TextDetails(..), -- * Constructing documents -- ** Converting values into documents char, text, ftext, ptext, ztext, sizedText, zeroWidthText, emptyText, int, integer, float, double, rational, hex, -- ** Simple derived documents semi, comma, colon, space, equals, lparen, rparen, lbrack, rbrack, lbrace, rbrace, -- ** Wrapping documents in delimiters parens, brackets, braces, quotes, squotes, quote, doubleQuotes, maybeParens, -- ** Combining documents empty, (<>), (<+>), hcat, hsep, ($$), ($+$), vcat, sep, cat, fsep, fcat, nest, hang, hangNotEmpty, punctuate, -- * Predicates on documents isEmpty, docHead, -- * Rendering documents -- ** Rendering with a particular style Style(..), style, renderStyle, Mode(..), -- ** General rendering fullRender, txtPrinter, -- ** GHC-specific rendering printDoc, printDoc_, bufLeftRender, printLeftRender -- performance hack ) where import GHC.Prelude.Basic hiding (error) import Control.Applicative ((<|>)) import GHC.Utils.BufHandle import GHC.Data.FastString import GHC.Utils.Panic.Plain import System.IO import Numeric (showHex) --for a RULES import GHC.Base ( unpackCString#, unpackNBytes#, Int(..) ) import GHC.Ptr ( Ptr(..) ) -- --------------------------------------------------------------------------- -- The Doc calculus {- Laws for $$ ~~~~~~~~~~~ (x $$ y) $$ z = x $$ (y $$ z) empty $$ x = x x $$ empty = x ...ditto $+$... Laws for <> ~~~~~~~~~~~ (x <> y) <> z = x <> (y <> z) empty <> x = empty x <> empty = x ...ditto <+>... Laws for text ~~~~~~~~~~~~~ text s <> text t = text (s++t) text "" <> x = x, if x non-empty ** because of law n6, t2 only holds if x doesn't ** start with `nest'. Laws for nest ~~~~~~~~~~~~~ nest 0 x = x nest k (nest k' x) = nest (k+k') x nest k (x <> y) = nest k x <> nest k y nest k (x $$ y) = nest k x $$ nest k y nest k empty = empty x <> nest k y = x <> y, if x non-empty ** Note the side condition on ! It is this that ** makes it OK for empty to be a left unit for <>. Miscellaneous ~~~~~~~~~~~~~ (text s <> x) $$ y = text s <> ((text "" <> x) $$ nest (-length s) y) (x $$ y) <> z = x $$ (y <> z) if y non-empty Laws for list versions ~~~~~~~~~~~~~~~~~~~~~~ sep (ps++[empty]++qs) = sep (ps ++ qs) ...ditto hsep, hcat, vcat, fill... nest k (sep ps) = sep (map (nest k) ps) ...ditto hsep, hcat, vcat, fill... Laws for oneLiner ~~~~~~~~~~~~~~~~~ oneLiner (nest k p) = nest k (oneLiner p) oneLiner (x <> y) = oneLiner x <> oneLiner y You might think that the following version of would be neater: <3 NO> (text s <> x) $$ y = text s <> ((empty <> x)) $$ nest (-length s) y) But it doesn't work, for if x=empty, we would have text s $$ y = text s <> (empty $$ nest (-length s) y) = text s <> nest (-length s) y -} -- --------------------------------------------------------------------------- -- Operator fixity infixl 6 <> infixl 6 <+> infixl 5 $$, $+$ -- --------------------------------------------------------------------------- -- The Doc data type -- | The abstract type of documents. -- A Doc represents a *set* of layouts. A Doc with -- no occurrences of Union or NoDoc represents just one layout. data Doc = Empty -- empty | NilAbove Doc -- text "" $$ x | TextBeside !TextDetails {-# UNPACK #-} !Int Doc -- text s <> x | Nest {-# UNPACK #-} !Int Doc -- nest k x | Union Doc Doc -- ul `union` ur | NoDoc -- The empty set of documents | Beside Doc Bool Doc -- True <=> space between | Above Doc Bool Doc -- True <=> never overlap {- Here are the invariants: 1) The argument of NilAbove is never Empty. Therefore a NilAbove occupies at least two lines. 2) The argument of @TextBeside@ is never @Nest@. 3) The layouts of the two arguments of @Union@ both flatten to the same string. 4) The arguments of @Union@ are either @TextBeside@, or @NilAbove@. 5) A @NoDoc@ may only appear on the first line of the left argument of an union. Therefore, the right argument of an union can never be equivalent to the empty set (@NoDoc@). 6) An empty document is always represented by @Empty@. It can't be hidden inside a @Nest@, or a @Union@ of two @Empty@s. 7) The first line of every layout in the left argument of @Union@ is longer than the first line of any layout in the right argument. (1) ensures that the left argument has a first line. In view of (3), this invariant means that the right argument must have at least two lines. Notice the difference between * NoDoc (no documents) * Empty (one empty document; no height and no width) * text "" (a document containing the empty string; one line high, but has no width) -} -- | RDoc is a "reduced GDoc", guaranteed not to have a top-level Above or Beside. type RDoc = Doc -- | The TextDetails data type -- -- A TextDetails represents a fragment of text that will be -- output at some point. data TextDetails = Chr {-# UNPACK #-} !Char -- ^ A single Char fragment | Str String -- ^ A whole String fragment | PStr FastString -- a hashed string | ZStr FastZString -- a z-encoded string | LStr {-# UNPACK #-} !PtrString -- a '\0'-terminated array of bytes | RStr {-# UNPACK #-} !Int {-# UNPACK #-} !Char -- a repeated character (e.g., ' ') instance Show Doc where showsPrec _ doc cont = fullRender (mode style) (lineLength style) (ribbonsPerLine style) txtPrinter cont doc -- --------------------------------------------------------------------------- -- Values and Predicates on GDocs and TextDetails -- | A document of height and width 1, containing a literal character. char :: Char -> Doc char c = textBeside_ (Chr c) 1 Empty -- | A document of height 1 containing a literal string. -- 'text' satisfies the following laws: -- -- * @'text' s '<>' 'text' t = 'text' (s'++'t)@ -- -- * @'text' \"\" '<>' x = x@, if @x@ non-empty -- -- The side condition on the last law is necessary because @'text' \"\"@ -- has height 1, while 'empty' has no height. text :: String -> Doc text s = textBeside_ (Str s) (length s) Empty {-# NOINLINE [0] text #-} -- Give the RULE a chance to fire -- It must wait till after phase 1 when -- the unpackCString first is manifested -- RULE that turns (text "abc") into (ptext (A# "abc"#)) to avoid the -- intermediate packing/unpacking of the string. {-# RULES "text/str" forall a. text (unpackCString# a) = ptext (mkPtrString# a) #-} {-# RULES "text/unpackNBytes#" forall p n. text (unpackNBytes# p n) = ptext (PtrString (Ptr p) (I# n)) #-} -- Empty strings are desugared into [] (not "unpackCString#..."), hence they are -- not matched by the text/str rule above. {-# RULES "text/[]" text [] = emptyText #-} ftext :: FastString -> Doc ftext s = textBeside_ (PStr s) (lengthFS s) Empty ptext :: PtrString -> Doc ptext s = textBeside_ (LStr s) (lengthPS s) Empty ztext :: FastZString -> Doc ztext s = textBeside_ (ZStr s) (lengthFZS s) Empty -- | Some text with any width. (@text s = sizedText (length s) s@) sizedText :: Int -> String -> Doc sizedText l s = textBeside_ (Str s) l Empty -- | Some text, but without any width. Use for non-printing text -- such as a HTML or Latex tags zeroWidthText :: String -> Doc zeroWidthText = sizedText 0 -- | Empty text (one line high but no width). (@emptyText = text ""@) emptyText :: Doc emptyText = sizedText 0 [] -- defined as a CAF. Sharing occurs especially via the text/[] rule above. -- Every use of `text ""` in user code should be replaced with this. -- | The empty document, with no height and no width. -- 'empty' is the identity for '<>', '<+>', '$$' and '$+$', and anywhere -- in the argument list for 'sep', 'hcat', 'hsep', 'vcat', 'fcat' etc. empty :: Doc empty = Empty -- | Returns 'True' if the document is empty isEmpty :: Doc -> Bool isEmpty Empty = True isEmpty _ = False -- | Get the first character of a document. We also return a new document, -- equivalent to the original one but faster to render. Use it to avoid work -- duplication. docHead :: Doc -> (Maybe Char, Doc) docHead d = (headChar, rdoc) where rdoc = reduceDoc d headChar = go rdoc go :: RDoc -> Maybe Char go (Union p q) = go (first p q) go (Nest _ p) = go p go Empty = Nothing go (NilAbove _) = Just '\n' go (TextBeside td _ p) = go_td td <|> go p go NoDoc = error "docHead: NoDoc" go (Above {}) = error "docHead: Above" go (Beside {}) = error "docHead: Beside" go_td :: TextDetails -> Maybe Char go_td (Chr c) = Just c go_td (Str s) = go_str s go_td (PStr s) = go_str (unpackFS s) -- O(1) because unpackFS is lazy go_td (ZStr s) = go_str (zStringTakeN 1 s) go_td (LStr s) = go_str (unpackPtrStringTakeN 1 s) go_td (RStr n c) = if n > 0 then Just c else Nothing go_str :: String -> Maybe Char go_str [] = Nothing go_str (c:_) = Just c {- Q: What is the reason for negative indentation (i.e. argument to indent is < 0) ? A: This indicates an error in the library client's code. If we compose a <> b, and the first line of b is more indented than some other lines of b, the law (<> eats nests) may cause the pretty printer to produce an invalid layout: doc |0123345 ------------------ d1 |a...| d2 |...b| |c...| d1<>d2 |ab..| c|....| Consider a <> b, let `s' be the length of the last line of `a', `k' the indentation of the first line of b, and `k0' the indentation of the left-most line b_i of b. The produced layout will have negative indentation if `k - k0 > s', as the first line of b will be put on the (s+1)th column, effectively translating b horizontally by (k-s). Now if the i^th line of b has an indentation k0 < (k-s), it is translated out-of-page, causing `negative indentation'. -} semi :: Doc -- ^ A ';' character comma :: Doc -- ^ A ',' character colon :: Doc -- ^ A ':' character space :: Doc -- ^ A space character equals :: Doc -- ^ A '=' character lparen :: Doc -- ^ A '(' character rparen :: Doc -- ^ A ')' character lbrack :: Doc -- ^ A '[' character rbrack :: Doc -- ^ A ']' character lbrace :: Doc -- ^ A '{' character rbrace :: Doc -- ^ A '}' character semi = char ';' comma = char ',' colon = char ':' space = char ' ' equals = char '=' lparen = char '(' rparen = char ')' lbrack = char '[' rbrack = char ']' lbrace = char '{' rbrace = char '}' spaceText, nlText :: TextDetails spaceText = Chr ' ' nlText = Chr '\n' int :: Int -> Doc -- ^ @int n = text (show n)@ integer :: Integer -> Doc -- ^ @integer n = text (show n)@ float :: Float -> Doc -- ^ @float n = text (show n)@ double :: Double -> Doc -- ^ @double n = text (show n)@ rational :: Rational -> Doc -- ^ @rational n = text (show n)@ hex :: Integer -> Doc -- ^ See Note [Print Hexadecimal Literals] int n = text (show n) integer n = text (show n) float n = text (show n) double n = text (show n) rational n = text (show n) hex n = text ('0' : 'x' : padded) where str = showHex n "" strLen = max 1 (length str) len = 2 ^ (ceiling (logBase 2 (fromIntegral strLen :: Double)) :: Int) padded = replicate (len - strLen) '0' ++ str parens :: Doc -> Doc -- ^ Wrap document in @(...)@ brackets :: Doc -> Doc -- ^ Wrap document in @[...]@ braces :: Doc -> Doc -- ^ Wrap document in @{...}@ quotes :: Doc -> Doc -- ^ Wrap document in @\`...\'@ squotes :: Doc -> Doc -- ^ Wrap document in @\'...\'@ quote :: Doc -> Doc doubleQuotes :: Doc -> Doc -- ^ Wrap document in @\"...\"@ quotes p = char '`' <> p <> char '\'' squotes p = char '\'' <> p <> char '\'' quote p = char '\'' <> p doubleQuotes p = char '"' <> p <> char '"' parens p = char '(' <> p <> char ')' brackets p = char '[' <> p <> char ']' braces p = char '{' <> p <> char '}' {- Note [Print Hexadecimal Literals] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Relevant discussions: * Phabricator: https://phabricator.haskell.org/D4465 * GHC Trac: https://gitlab.haskell.org/ghc/ghc/issues/14872 There is a flag `-dhex-word-literals` that causes literals of type `Word#` or `Word64#` to be displayed in hexadecimal instead of decimal when dumping GHC core. It also affects the presentation of these in GHC's error messages. Additionally, the hexadecimal encoding of these numbers is zero-padded so that its length is a power of two. As an example of what this does, consider the following haskell file `Literals.hs`: module Literals where alpha :: Int alpha = 100 + 200 beta :: Word -> Word beta x = x + div maxBound 255 + div 0xFFFFFFFF 255 + 0x0202 We get the following dumped core when we compile on a 64-bit machine with ghc -O2 -fforce-recomp -ddump-simpl -dsuppress-all -dhex-word-literals literals.hs: ==================== Tidy Core ==================== ... omitted for brevity ... -- RHS size: {terms: 2, types: 0, coercions: 0, joins: 0/0} alpha alpha = I# 300# -- RHS size: {terms: 12, types: 3, coercions: 0, joins: 0/0} beta beta = \ x_aYE -> case x_aYE of { W# x#_a1v0 -> W# (plusWord# (plusWord# (plusWord# x#_a1v0 0x0101010101010101##) 0x01010101##) 0x0202##) } Notice that the word literals are in hexadecimals and that they have been padded with zeroes so that their lengths are 16, 8, and 4, respectively. -} -- | Apply 'parens' to 'Doc' if boolean is true. maybeParens :: Bool -> Doc -> Doc maybeParens False = id maybeParens True = parens -- --------------------------------------------------------------------------- -- Structural operations on GDocs -- | Perform some simplification of a built up @GDoc@. reduceDoc :: Doc -> RDoc reduceDoc (Beside p g q) = p `seq` g `seq` (beside p g $! reduceDoc q) reduceDoc (Above p g q) = p `seq` g `seq` (above p g $! reduceDoc q) reduceDoc p = p -- | List version of '<>'. hcat :: [Doc] -> Doc hcat = reduceAB . foldr (beside_' False) empty -- | List version of '<+>'. hsep :: [Doc] -> Doc hsep = reduceAB . foldr (beside_' True) empty -- | List version of '$$'. vcat :: [Doc] -> Doc vcat = reduceAB . foldr (above_' False) empty -- | Nest (or indent) a document by a given number of positions -- (which may also be negative). 'nest' satisfies the laws: -- -- * @'nest' 0 x = x@ -- -- * @'nest' k ('nest' k' x) = 'nest' (k+k') x@ -- -- * @'nest' k (x '<>' y) = 'nest' k z '<>' 'nest' k y@ -- -- * @'nest' k (x '$$' y) = 'nest' k x '$$' 'nest' k y@ -- -- * @'nest' k 'empty' = 'empty'@ -- -- * @x '<>' 'nest' k y = x '<>' y@, if @x@ non-empty -- -- The side condition on the last law is needed because -- 'empty' is a left identity for '<>'. nest :: Int -> Doc -> Doc nest k p = mkNest k (reduceDoc p) -- | @hang d1 n d2 = sep [d1, nest n d2]@ hang :: Doc -> Int -> Doc -> Doc hang d1 n d2 = sep [d1, nest n d2] -- | Apply 'hang' to the arguments if the first 'Doc' is not empty. hangNotEmpty :: Doc -> Int -> Doc -> Doc hangNotEmpty d1 n d2 = if isEmpty d1 then d2 else hang d1 n d2 -- | @punctuate p [d1, ... dn] = [d1 \<> p, d2 \<> p, ... dn-1 \<> p, dn]@ punctuate :: Doc -> [Doc] -> [Doc] punctuate _ [] = [] punctuate p (x:xs) = go x xs where go y [] = [y] go y (z:zs) = (y <> p) : go z zs -- mkNest checks for Nest's invariant that it doesn't have an Empty inside it mkNest :: Int -> Doc -> Doc mkNest k _ | k `seq` False = undefined mkNest k (Nest k1 p) = mkNest (k + k1) p mkNest _ NoDoc = NoDoc mkNest _ Empty = Empty mkNest 0 p = p mkNest k p = nest_ k p -- mkUnion checks for an empty document mkUnion :: Doc -> Doc -> Doc mkUnion Empty _ = Empty mkUnion p q = p `union_` q beside_' :: Bool -> Doc -> Doc -> Doc beside_' _ p Empty = p beside_' g p q = Beside p g q above_' :: Bool -> Doc -> Doc -> Doc above_' _ p Empty = p above_' g p q = Above p g q reduceAB :: Doc -> Doc reduceAB (Above Empty _ q) = q reduceAB (Beside Empty _ q) = q reduceAB doc = doc nilAbove_ :: RDoc -> RDoc nilAbove_ = NilAbove -- Arg of a TextBeside is always an RDoc textBeside_ :: TextDetails -> Int -> RDoc -> RDoc textBeside_ = TextBeside nest_ :: Int -> RDoc -> RDoc nest_ = Nest union_ :: RDoc -> RDoc -> RDoc union_ = Union -- --------------------------------------------------------------------------- -- Vertical composition @$$@ -- | Above, except that if the last line of the first argument stops -- at least one position before the first line of the second begins, -- these two lines are overlapped. For example: -- -- > text "hi" $$ nest 5 (text "there") -- -- lays out as -- -- > hi there -- -- rather than -- -- > hi -- > there -- -- '$$' is associative, with identity 'empty', and also satisfies -- -- * @(x '$$' y) '<>' z = x '$$' (y '<>' z)@, if @y@ non-empty. -- ($$) :: Doc -> Doc -> Doc p $$ q = above_ p False q -- | Above, with no overlapping. -- '$+$' is associative, with identity 'empty'. ($+$) :: Doc -> Doc -> Doc p $+$ q = above_ p True q above_ :: Doc -> Bool -> Doc -> Doc above_ p _ Empty = p above_ Empty _ q = q above_ p g q = Above p g q above :: Doc -> Bool -> RDoc -> RDoc above (Above p g1 q1) g2 q2 = above p g1 (above q1 g2 q2) above p@(Beside{}) g q = aboveNest (reduceDoc p) g 0 (reduceDoc q) above p g q = aboveNest p g 0 (reduceDoc q) -- Specification: aboveNest p g k q = p $g$ (nest k q) aboveNest :: RDoc -> Bool -> Int -> RDoc -> RDoc aboveNest _ _ k _ | k `seq` False = undefined aboveNest NoDoc _ _ _ = NoDoc aboveNest (p1 `Union` p2) g k q = aboveNest p1 g k q `union_` aboveNest p2 g k q aboveNest Empty _ k q = mkNest k q aboveNest (Nest k1 p) g k q = nest_ k1 (aboveNest p g (k - k1) q) -- p can't be Empty, so no need for mkNest aboveNest (NilAbove p) g k q = nilAbove_ (aboveNest p g k q) aboveNest (TextBeside s sl p) g k q = textBeside_ s sl rest where !k1 = k - sl rest = case p of Empty -> nilAboveNest g k1 q _ -> aboveNest p g k1 q aboveNest (Above {}) _ _ _ = error "aboveNest Above" aboveNest (Beside {}) _ _ _ = error "aboveNest Beside" -- Specification: text s <> nilaboveNest g k q -- = text s <> (text "" $g$ nest k q) nilAboveNest :: Bool -> Int -> RDoc -> RDoc nilAboveNest _ k _ | k `seq` False = undefined nilAboveNest _ _ Empty = Empty -- Here's why the "text s <>" is in the spec! nilAboveNest g k (Nest k1 q) = nilAboveNest g (k + k1) q nilAboveNest g k q | not g && k > 0 -- No newline if no overlap = textBeside_ (RStr k ' ') k q | otherwise -- Put them really above = nilAbove_ (mkNest k q) -- --------------------------------------------------------------------------- -- Horizontal composition @<>@ -- We intentionally avoid Data.Monoid.(<>) here due to interactions of -- Data.Monoid.(<>) and (<+>). See -- http://www.haskell.org/pipermail/libraries/2011-November/017066.html -- | Beside. -- '<>' is associative, with identity 'empty'. (<>) :: Doc -> Doc -> Doc p <> q = beside_ p False q -- | Beside, separated by space, unless one of the arguments is 'empty'. -- '<+>' is associative, with identity 'empty'. (<+>) :: Doc -> Doc -> Doc p <+> q = beside_ p True q beside_ :: Doc -> Bool -> Doc -> Doc beside_ p _ Empty = p beside_ Empty _ q = q beside_ p g q = Beside p g q -- Specification: beside g p q = p q beside :: Doc -> Bool -> RDoc -> RDoc beside NoDoc _ _ = NoDoc beside (p1 `Union` p2) g q = beside p1 g q `union_` beside p2 g q beside Empty _ q = q beside (Nest k p) g q = nest_ k $! beside p g q beside p@(Beside p1 g1 q1) g2 q2 | g1 == g2 = beside p1 g1 $! beside q1 g2 q2 | otherwise = beside (reduceDoc p) g2 q2 beside p@(Above{}) g q = let !d = reduceDoc p in beside d g q beside (NilAbove p) g q = nilAbove_ $! beside p g q beside (TextBeside s sl p) g q = textBeside_ s sl rest where rest = case p of Empty -> nilBeside g q _ -> beside p g q -- Specification: text "" <> nilBeside g p -- = text "" p nilBeside :: Bool -> RDoc -> RDoc nilBeside _ Empty = Empty -- Hence the text "" in the spec nilBeside g (Nest _ p) = nilBeside g p nilBeside g p | g = textBeside_ spaceText 1 p | otherwise = p -- --------------------------------------------------------------------------- -- Separate, @sep@ -- Specification: sep ps = oneLiner (hsep ps) -- `union` -- vcat ps -- | Either 'hsep' or 'vcat'. sep :: [Doc] -> Doc sep = sepX True -- Separate with spaces -- | Either 'hcat' or 'vcat'. cat :: [Doc] -> Doc cat = sepX False -- Don't sepX :: Bool -> [Doc] -> Doc sepX _ [] = empty sepX x (p:ps) = sep1 x (reduceDoc p) 0 ps -- Specification: sep1 g k ys = sep (x : map (nest k) ys) -- = oneLiner (x nest k (hsep ys)) -- `union` x $$ nest k (vcat ys) sep1 :: Bool -> RDoc -> Int -> [Doc] -> RDoc sep1 _ _ k _ | k `seq` False = undefined sep1 _ NoDoc _ _ = NoDoc sep1 g (p `Union` q) k ys = sep1 g p k ys `union_` aboveNest q False k (reduceDoc (vcat ys)) sep1 g Empty k ys = mkNest k (sepX g ys) sep1 g (Nest n p) k ys = nest_ n (sep1 g p (k - n) ys) sep1 _ (NilAbove p) k ys = nilAbove_ (aboveNest p False k (reduceDoc (vcat ys))) sep1 g (TextBeside s sl p) k ys = textBeside_ s sl (sepNB g p (k - sl) ys) sep1 _ (Above {}) _ _ = error "sep1 Above" sep1 _ (Beside {}) _ _ = error "sep1 Beside" -- Specification: sepNB p k ys = sep1 (text "" <> p) k ys -- Called when we have already found some text in the first item -- We have to eat up nests sepNB :: Bool -> Doc -> Int -> [Doc] -> Doc sepNB g (Nest _ p) k ys = sepNB g p k ys -- Never triggered, because of invariant (2) sepNB g Empty k ys = oneLiner (nilBeside g (reduceDoc rest)) `mkUnion` -- XXX: TODO: PRETTY: Used to use True here (but GHC used False...) nilAboveNest False k (reduceDoc (vcat ys)) where rest | g = hsep ys | otherwise = hcat ys sepNB g p k ys = sep1 g p k ys -- --------------------------------------------------------------------------- -- @fill@ -- | \"Paragraph fill\" version of 'cat'. fcat :: [Doc] -> Doc fcat = fill False -- | \"Paragraph fill\" version of 'sep'. fsep :: [Doc] -> Doc fsep = fill True -- Specification: -- -- fill g docs = fillIndent 0 docs -- -- fillIndent k [] = [] -- fillIndent k [p] = p -- fillIndent k (p1:p2:ps) = -- oneLiner p1 fillIndent (k + length p1 + g ? 1 : 0) -- (remove_nests (oneLiner p2) : ps) -- `Union` -- (p1 $*$ nest (-k) (fillIndent 0 ps)) -- -- $*$ is defined for layouts (not Docs) as -- layout1 $*$ layout2 | hasMoreThanOneLine layout1 = layout1 $$ layout2 -- | otherwise = layout1 $+$ layout2 fill :: Bool -> [Doc] -> RDoc fill _ [] = empty fill g (p:ps) = fill1 g (reduceDoc p) 0 ps fill1 :: Bool -> RDoc -> Int -> [Doc] -> Doc fill1 _ _ k _ | k `seq` False = undefined fill1 _ NoDoc _ _ = NoDoc fill1 g (p `Union` q) k ys = fill1 g p k ys `union_` aboveNest q False k (fill g ys) fill1 g Empty k ys = mkNest k (fill g ys) fill1 g (Nest n p) k ys = nest_ n (fill1 g p (k - n) ys) fill1 g (NilAbove p) k ys = nilAbove_ (aboveNest p False k (fill g ys)) fill1 g (TextBeside s sl p) k ys = textBeside_ s sl (fillNB g p (k - sl) ys) fill1 _ (Above {}) _ _ = error "fill1 Above" fill1 _ (Beside {}) _ _ = error "fill1 Beside" fillNB :: Bool -> Doc -> Int -> [Doc] -> Doc fillNB _ _ k _ | k `seq` False = undefined fillNB g (Nest _ p) k ys = fillNB g p k ys -- Never triggered, because of invariant (2) fillNB _ Empty _ [] = Empty fillNB g Empty k (Empty:ys) = fillNB g Empty k ys fillNB g Empty k (y:ys) = fillNBE g k y ys fillNB g p k ys = fill1 g p k ys fillNBE :: Bool -> Int -> Doc -> [Doc] -> Doc fillNBE g k y ys = nilBeside g (fill1 g ((elideNest . oneLiner . reduceDoc) y) k' ys) -- XXX: TODO: PRETTY: Used to use True here (but GHC used False...) `mkUnion` nilAboveNest False k (fill g (y:ys)) where k' = if g then k - 1 else k elideNest :: Doc -> Doc elideNest (Nest _ d) = d elideNest d = d -- --------------------------------------------------------------------------- -- Selecting the best layout best :: Int -- Line length -> Int -- Ribbon length -> RDoc -> RDoc -- No unions in here! best w0 r = get w0 where get :: Int -- (Remaining) width of line -> Doc -> Doc get w _ | w == 0 && False = undefined get _ Empty = Empty get _ NoDoc = NoDoc get w (NilAbove p) = nilAbove_ (get w p) get w (TextBeside s sl p) = textBeside_ s sl (get1 w sl p) get w (Nest k p) = nest_ k (get (w - k) p) get w (p `Union` q) = nicest w r (get w p) (get w q) get _ (Above {}) = error "best get Above" get _ (Beside {}) = error "best get Beside" get1 :: Int -- (Remaining) width of line -> Int -- Amount of first line already eaten up -> Doc -- This is an argument to TextBeside => eat Nests -> Doc -- No unions in here! get1 w _ _ | w == 0 && False = undefined get1 _ _ Empty = Empty get1 _ _ NoDoc = NoDoc get1 w sl (NilAbove p) = nilAbove_ (get (w - sl) p) get1 w sl (TextBeside t tl p) = textBeside_ t tl (get1 w (sl + tl) p) get1 w sl (Nest _ p) = get1 w sl p get1 w sl (p `Union` q) = nicest1 w r sl (get1 w sl p) (get1 w sl q) get1 _ _ (Above {}) = error "best get1 Above" get1 _ _ (Beside {}) = error "best get1 Beside" nicest :: Int -> Int -> Doc -> Doc -> Doc nicest !w !r = nicest1 w r 0 nicest1 :: Int -> Int -> Int -> Doc -> Doc -> Doc nicest1 !w !r !sl p q | fits ((w `min` r) - sl) p = p | otherwise = q fits :: Int -- Space available -> Doc -> Bool -- True if *first line* of Doc fits in space available fits n _ | n < 0 = False fits _ NoDoc = False fits _ Empty = True fits _ (NilAbove _) = True fits n (TextBeside _ sl p) = fits (n - sl) p fits _ (Above {}) = error "fits Above" fits _ (Beside {}) = error "fits Beside" fits _ (Union {}) = error "fits Union" fits _ (Nest {}) = error "fits Nest" -- | @first@ returns its first argument if it is non-empty, otherwise its second. first :: Doc -> Doc -> Doc first p q | nonEmptySet p = p -- unused, because (get OneLineMode) is unused | otherwise = q nonEmptySet :: Doc -> Bool nonEmptySet NoDoc = False nonEmptySet (_ `Union` _) = True nonEmptySet Empty = True nonEmptySet (NilAbove _) = True nonEmptySet (TextBeside _ _ p) = nonEmptySet p nonEmptySet (Nest _ p) = nonEmptySet p nonEmptySet (Above {}) = error "nonEmptySet Above" nonEmptySet (Beside {}) = error "nonEmptySet Beside" -- @oneLiner@ returns the one-line members of the given set of @GDoc@s. oneLiner :: Doc -> Doc oneLiner NoDoc = NoDoc oneLiner Empty = Empty oneLiner (NilAbove _) = NoDoc oneLiner (TextBeside s sl p) = textBeside_ s sl (oneLiner p) oneLiner (Nest k p) = nest_ k (oneLiner p) oneLiner (p `Union` _) = oneLiner p oneLiner (Above {}) = error "oneLiner Above" oneLiner (Beside {}) = error "oneLiner Beside" -- --------------------------------------------------------------------------- -- Rendering -- | A rendering style. data Style = Style { mode :: Mode -- ^ The rendering mode , lineLength :: Int -- ^ Length of line, in chars , ribbonsPerLine :: Float -- ^ Ratio of line length to ribbon length } -- | The default style (@mode=PageMode False, lineLength=100, ribbonsPerLine=1.5@). style :: Style style = Style { lineLength = 100, ribbonsPerLine = 1.5, mode = PageMode False } -- | Rendering mode. data Mode = PageMode { asciiSpace :: Bool } -- ^ Normal | ZigZagMode -- ^ With zig-zag cuts | LeftMode -- ^ No indentation, infinitely long lines | OneLineMode -- ^ All on one line -- | Can we output an ascii space character for spaces? -- Mostly true, but not for e.g. UTF16 -- See Note [putSpaces optimizations] for why we bother -- to track this. hasAsciiSpace :: Mode -> Bool hasAsciiSpace mode = case mode of PageMode asciiSpace -> asciiSpace _ -> False -- | Render the @Doc@ to a String using the given @Style@. renderStyle :: Style -> Doc -> String renderStyle s = fullRender (mode s) (lineLength s) (ribbonsPerLine s) txtPrinter "" -- | Default TextDetails printer txtPrinter :: TextDetails -> String -> String txtPrinter (Chr c) s = c:s txtPrinter (Str s1) s2 = s1 ++ s2 txtPrinter (PStr s1) s2 = unpackFS s1 ++ s2 txtPrinter (ZStr s1) s2 = zString s1 ++ s2 txtPrinter (LStr s1) s2 = unpackPtrString s1 ++ s2 txtPrinter (RStr n c) s2 = replicate n c ++ s2 -- | The general rendering interface. fullRender :: Mode -- ^ Rendering mode -> Int -- ^ Line length -> Float -- ^ Ribbons per line -> (TextDetails -> a -> a) -- ^ What to do with text -> a -- ^ What to do at the end -> Doc -- ^ The document -> a -- ^ Result fullRender OneLineMode _ _ txt end doc = easyDisplay spaceText (\_ y -> y) txt end (reduceDoc doc) fullRender LeftMode _ _ txt end doc = easyDisplay nlText first txt end (reduceDoc doc) fullRender m lineLen ribbons txt rest doc = display m lineLen ribbonLen txt rest doc' where doc' = best bestLineLen ribbonLen (reduceDoc doc) bestLineLen, ribbonLen :: Int ribbonLen = round (fromIntegral lineLen / ribbons) bestLineLen = case m of ZigZagMode -> maxBound _ -> lineLen easyDisplay :: TextDetails -> (Doc -> Doc -> Doc) -> (TextDetails -> a -> a) -> a -> Doc -> a easyDisplay nlSpaceText choose txt end = lay where lay NoDoc = error "easyDisplay: NoDoc" lay (Union p q) = lay (choose p q) lay (Nest _ p) = lay p lay Empty = end lay (NilAbove p) = nlSpaceText `txt` lay p lay (TextBeside s _ p) = s `txt` lay p lay (Above {}) = error "easyDisplay Above" lay (Beside {}) = error "easyDisplay Beside" display :: Mode -> Int -> Int -> (TextDetails -> a -> a) -> a -> Doc -> a display m !page_width !ribbon_width txt end doc = case page_width - ribbon_width of { gap_width -> case gap_width `quot` 2 of { shift -> let lay k _ | k `seq` False = undefined lay k (Nest k1 p) = lay (k + k1) p lay _ Empty = end lay k (NilAbove p) = nlText `txt` lay k p lay k (TextBeside s sl p) = case m of ZigZagMode | k >= gap_width -> nlText `txt` ( Str (replicate shift '/') `txt` ( nlText `txt` lay1 (k - shift) s sl p )) | k < 0 -> nlText `txt` ( Str (replicate shift '\\') `txt` ( nlText `txt` lay1 (k + shift) s sl p )) _ -> lay1 k s sl p lay _ (Above {}) = error "display lay Above" lay _ (Beside {}) = error "display lay Beside" lay _ NoDoc = error "display lay NoDoc" lay _ (Union {}) = error "display lay Union" lay1 !k s !sl p = let !r = k + sl in indent k (s `txt` lay2 r p) lay2 k _ | k `seq` False = undefined lay2 k (NilAbove p) = nlText `txt` lay k p lay2 k (TextBeside s sl p) = s `txt` lay2 (k + sl) p lay2 k (Nest _ p) = lay2 k p lay2 _ Empty = end lay2 _ (Above {}) = error "display lay2 Above" lay2 _ (Beside {}) = error "display lay2 Beside" lay2 _ NoDoc = error "display lay2 NoDoc" lay2 _ (Union {}) = error "display lay2 Union" indent !n r = RStr n ' ' `txt` r in lay 0 doc }} printDoc :: Mode -> Int -> Handle -> Doc -> IO () -- printDoc adds a newline to the end printDoc mode cols hdl doc = printDoc_ mode cols hdl (doc $$ text "") {- Note [putSpaces optimizations] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When using dump flags a lot of what we are dumping ends up being whitespace. This is especially true for Core/Stg dumps. Enough so that it's worth optimizing. Especially in the common case of writing to an UTF8 or similarly encoded file where space is equal to ascii space we use hPutBuf to write a preallocated buffer to the file. This avoids a fair bit of allocation. For other cases we fall back to the old and slow path for simplicity. -} printDoc_ :: Mode -> Int -> Handle -> Doc -> IO () -- printDoc_ does not add a newline at the end, so that -- successive calls can output stuff on the same line -- Rather like putStr vs putStrLn printDoc_ LeftMode _ hdl doc = do { printLeftRender hdl doc; hFlush hdl } printDoc_ mode pprCols hdl doc = do { fullRender mode pprCols 1.5 put done doc ; hFlush hdl } where put (Chr c) next = hPutChar hdl c >> next put (Str s) next = hPutStr hdl s >> next put (PStr s) next = hPutStr hdl (unpackFS s) >> next -- NB. not hPutFS, we want this to go through -- the I/O library's encoding layer. (#3398) put (ZStr s) next = hPutFZS hdl s >> next put (LStr s) next = hPutPtrString hdl s >> next put (RStr n c) next | c == ' ' = putSpaces n >> next | otherwise = hPutStr hdl (replicate n c) >> next putSpaces n -- If we use ascii spaces we are allowed to use hPutBuf -- See Note [putSpaces optimizations] | hasAsciiSpace mode , n <= 100 = hPutBuf hdl (Ptr spaces') n | hasAsciiSpace mode , n > 100 = hPutBuf hdl (Ptr spaces') 100 >> putSpaces (n-100) | otherwise = hPutStr hdl (replicate n ' ') done = return () -- hPutChar hdl '\n' -- 100 spaces, so we avoid the allocation of replicate n ' ' spaces' = " "# -- some versions of hPutBuf will barf if the length is zero hPutPtrString :: Handle -> PtrString -> IO () hPutPtrString _handle (PtrString _ 0) = return () hPutPtrString handle (PtrString a l) = hPutBuf handle a l -- Printing output in LeftMode is performance critical: it's used when -- dumping C and assembly output, so we allow ourselves a few dirty -- hacks: -- -- (1) we specialise fullRender for LeftMode with IO output. -- -- (2) we add a layer of buffering on top of Handles. Handles -- don't perform well with lots of hPutChars, which is mostly -- what we're doing here, because Handles have to be thread-safe -- and async exception-safe. We only have a single thread and don't -- care about exceptions, so we add a layer of fast buffering -- over the Handle interface. printLeftRender :: Handle -> Doc -> IO () printLeftRender hdl doc = do b <- newBufHandle hdl bufLeftRender b doc bFlush b bufLeftRender :: BufHandle -> Doc -> IO () bufLeftRender b doc = layLeft b (reduceDoc doc) layLeft :: BufHandle -> Doc -> IO () layLeft !_ NoDoc = error "layLeft: NoDoc" layLeft b (Union p q) = layLeft b $! first p q layLeft b (Nest _ p) = layLeft b $! p layLeft b Empty = bPutChar b '\n' layLeft b (NilAbove p) = bPutChar b '\n' >> layLeft b p layLeft b (TextBeside s _ p) = put b s >> layLeft b p where put !b (Chr c) = bPutChar b c put b (Str s) = bPutStr b s put b (PStr s) = bPutFS b s put b (ZStr s) = bPutFZS b s put b (LStr s) = bPutPtrString b s put b (RStr n c) = bPutReplicate b n c layLeft _ _ = panic "layLeft: Unhandled case" -- Define error=panic, for easier comparison with libraries/pretty. error :: String -> a error = panic ghc-lib-parser-9.12.2.20250421/compiler/GHC/Utils/Ppr/0000755000000000000000000000000007346545000017547 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/GHC/Utils/Ppr/Colour.hs0000644000000000000000000000535107346545000021352 0ustar0000000000000000module GHC.Utils.Ppr.Colour where import GHC.Prelude.Basic import Data.Maybe (fromMaybe) import GHC.Data.Bool import Data.Semigroup as Semi -- | A colour\/style for use with 'coloured'. newtype PprColour = PprColour { renderColour :: String } instance Semi.Semigroup PprColour where PprColour s1 <> PprColour s2 = PprColour (s1 <> s2) -- | Allow colours to be combined (e.g. bold + red); -- In case of conflict, right side takes precedence. instance Monoid PprColour where mempty = PprColour mempty mappend = (<>) renderColourAfresh :: PprColour -> String renderColourAfresh c = renderColour (colReset `mappend` c) colCustom :: String -> PprColour colCustom "" = mempty colCustom s = PprColour ("\27[" ++ s ++ "m") colReset :: PprColour colReset = colCustom "0" colBold :: PprColour colBold = colCustom ";1" colBlackFg :: PprColour colBlackFg = colCustom "30" colRedFg :: PprColour colRedFg = colCustom "31" colGreenFg :: PprColour colGreenFg = colCustom "32" colYellowFg :: PprColour colYellowFg = colCustom "33" colBlueFg :: PprColour colBlueFg = colCustom "34" colMagentaFg :: PprColour colMagentaFg = colCustom "35" colCyanFg :: PprColour colCyanFg = colCustom "36" colWhiteFg :: PprColour colWhiteFg = colCustom "37" data Scheme = Scheme { sHeader :: PprColour , sMessage :: PprColour , sWarning :: PprColour , sError :: PprColour , sFatal :: PprColour , sMargin :: PprColour } defaultScheme :: Scheme defaultScheme = Scheme { sHeader = mempty , sMessage = colBold , sWarning = colBold `mappend` colMagentaFg , sError = colBold `mappend` colRedFg , sFatal = colBold `mappend` colRedFg , sMargin = colBold `mappend` colBlueFg } -- | Parse the colour scheme from a string (presumably from the @GHC_COLORS@ -- environment variable). parseScheme :: String -> (OverridingBool, Scheme) -> (OverridingBool, Scheme) parseScheme "always" (_, cs) = (Always, cs) parseScheme "auto" (_, cs) = (Auto, cs) parseScheme "never" (_, cs) = (Never, cs) parseScheme input (b, cs) = ( b , Scheme { sHeader = fromMaybe (sHeader cs) (lookup "header" table) , sMessage = fromMaybe (sMessage cs) (lookup "message" table) , sWarning = fromMaybe (sWarning cs) (lookup "warning" table) , sError = fromMaybe (sError cs) (lookup "error" table) , sFatal = fromMaybe (sFatal cs) (lookup "fatal" table) , sMargin = fromMaybe (sMargin cs) (lookup "margin" table) } ) where split :: Char -> String -> [String] split c s = case break (==c) s of (chunk,[]) -> [chunk] (chunk,_:rest) -> chunk : split c rest table = do w <- split ':' input let (k, v') = break (== '=') w case v' of '=' : v -> return (k, colCustom v) _ -> [] ghc-lib-parser-9.12.2.20250421/compiler/GHC/Utils/TmpFs.hs0000644000000000000000000004503107346545000020376 0ustar0000000000000000{-# LANGUAGE CPP #-} -- | Temporary file-system management module GHC.Utils.TmpFs ( TmpFs , initTmpFs , forkTmpFsFrom , mergeTmpFsInto , PathsToClean(..) , emptyPathsToClean , TempFileLifetime(..) , TempDir (..) , cleanTempDirs , cleanTempFiles , cleanCurrentModuleTempFiles , keepCurrentModuleTempFiles , addFilesToClean , changeTempFilesLifetime , newTempName , newTempLibName , newTempSubDir , withSystemTempDirectory , withTempDirectory ) where import GHC.Prelude import GHC.Utils.Error import GHC.Utils.Outputable import GHC.Utils.Logger import GHC.Utils.Misc import GHC.Utils.Exception as Exception import GHC.Driver.Phases import Data.List (partition) import qualified Data.Set as Set import Data.Set (Set) import qualified Data.Map as Map import Data.Map (Map) import Data.IORef import System.Directory import System.FilePath import System.IO.Error #if !defined(mingw32_HOST_OS) import qualified System.Posix.Internals #endif -- | Temporary file-system data TmpFs = TmpFs { tmp_dirs_to_clean :: IORef (Map FilePath FilePath) -- ^ Maps system temporary directory (passed via settings or DynFlags) to -- an actual temporary directory for this process. -- -- It's a Map probably to support changing the system temporary directory -- over time. -- -- Shared with forked TmpFs. , tmp_next_suffix :: IORef Int -- ^ The next available suffix to uniquely name a temp file, updated -- atomically. -- -- Shared with forked TmpFs. , tmp_dir_prefix :: String , tmp_files_to_clean :: IORef PathsToClean -- ^ Files to clean (per session or per module) -- -- Not shared with forked TmpFs. , tmp_subdirs_to_clean :: IORef PathsToClean -- ^ Subdirs to clean (per session or per module) -- -- Not shared with forked TmpFs. } -- | A collection of paths that must be deleted before ghc exits. data PathsToClean = PathsToClean { ptcGhcSession :: !(Set FilePath) -- ^ Paths that will be deleted at the end of runGhc(T) , ptcCurrentModule :: !(Set FilePath) -- ^ Paths that will be deleted the next time -- 'cleanCurrentModuleTempFiles' is called, or otherwise at the end of -- the session. } -- | Used when a temp file is created. This determines which component Set of -- PathsToClean will get the temp file data TempFileLifetime = TFL_CurrentModule -- ^ A file with lifetime TFL_CurrentModule will be cleaned up at the -- end of upweep_mod | TFL_GhcSession -- ^ A file with lifetime TFL_GhcSession will be cleaned up at the end of -- runGhc(T) deriving (Show) newtype TempDir = TempDir FilePath -- | An empty PathsToClean emptyPathsToClean :: PathsToClean emptyPathsToClean = PathsToClean Set.empty Set.empty -- | Merge two PathsToClean mergePathsToClean :: PathsToClean -> PathsToClean -> PathsToClean mergePathsToClean x y = PathsToClean { ptcGhcSession = Set.union (ptcGhcSession x) (ptcGhcSession y) , ptcCurrentModule = Set.union (ptcCurrentModule x) (ptcCurrentModule y) } -- | Initialise an empty TmpFs initTmpFs :: IO TmpFs initTmpFs = do files <- newIORef emptyPathsToClean subdirs <- newIORef emptyPathsToClean dirs <- newIORef Map.empty next <- newIORef 0 return $ TmpFs { tmp_files_to_clean = files , tmp_subdirs_to_clean = subdirs , tmp_dirs_to_clean = dirs , tmp_next_suffix = next , tmp_dir_prefix = "tmp" } -- | Initialise an empty TmpFs sharing unique numbers and per-process temporary -- directories with the given TmpFs -- -- It's not safe to use the subdirs created by the original TmpFs with the -- forked one. Use @newTempSubDir@ to create new subdirs instead. forkTmpFsFrom :: TmpFs -> IO TmpFs forkTmpFsFrom old = do files <- newIORef emptyPathsToClean subdirs <- newIORef emptyPathsToClean counter <- newIORef 0 prefix <- newTempSuffix old return $ TmpFs { tmp_files_to_clean = files , tmp_subdirs_to_clean = subdirs , tmp_dirs_to_clean = tmp_dirs_to_clean old , tmp_next_suffix = counter , tmp_dir_prefix = prefix } -- | Merge the first TmpFs into the second. -- -- The first TmpFs is returned emptied. mergeTmpFsInto :: TmpFs -> TmpFs -> IO () mergeTmpFsInto src dst = do src_files <- atomicModifyIORef' (tmp_files_to_clean src) (\s -> (emptyPathsToClean, s)) src_subdirs <- atomicModifyIORef' (tmp_subdirs_to_clean src) (\s -> (emptyPathsToClean, s)) atomicModifyIORef' (tmp_files_to_clean dst) (\s -> (mergePathsToClean src_files s, ())) atomicModifyIORef' (tmp_subdirs_to_clean dst) (\s -> (mergePathsToClean src_subdirs s, ())) cleanTempDirs :: Logger -> TmpFs -> IO () cleanTempDirs logger tmpfs = mask_ $ do let ref = tmp_dirs_to_clean tmpfs ds <- atomicModifyIORef' ref $ \ds -> (Map.empty, ds) removeTmpDirs logger (Map.elems ds) -- | Delete all paths in @tmp_files_to_clean@ and @tmp_subdirs_to_clean@. cleanTempFiles :: Logger -> TmpFs -> IO () cleanTempFiles logger tmpfs = mask_ $ do removeWith (removeTmpFiles logger) (tmp_files_to_clean tmpfs) removeWith (removeTmpSubdirs logger) (tmp_subdirs_to_clean tmpfs) where removeWith remove ref = do to_delete <- atomicModifyIORef' ref $ \PathsToClean { ptcCurrentModule = cm_paths , ptcGhcSession = gs_paths } -> ( emptyPathsToClean , Set.toList cm_paths ++ Set.toList gs_paths) remove to_delete -- | Keep all the paths in @tmp_files_to_clean@ and @tmp_subdirs_to_clean@ -- that have lifetime TFL_CurrentModule. This function is used when `-keep-tmp-files` is -- used in an OPTIONS_GHC pragma. -- This function removes the temporary file from the TmpFs so we no longer remove -- it at the env when cleanTempFiles is called. keepCurrentModuleTempFiles :: HasCallStack => Logger -> TmpFs -> IO () keepCurrentModuleTempFiles logger tmpfs = mask_ $ do to_keep_files <- keep (tmp_files_to_clean tmpfs) to_keep_subdirs <- keep (tmp_subdirs_to_clean tmpfs) -- Remove any folders which contain any files we want to keep from the -- directories we are tracking. A new temporary directory will be created -- the next time a temporary file is needed (by perhaps another module). keepDirs (to_keep_files ++ to_keep_subdirs) (tmp_dirs_to_clean tmpfs) where keepDirs keeps ref = do let keep_dirs = Set.fromList (map takeDirectory keeps) atomicModifyIORef' ref $ \m -> (Map.filter (\fp -> fp `Set.notMember` keep_dirs) m, ()) keep ref = do to_keep <- atomicModifyIORef' ref $ \ptc@PathsToClean{ptcCurrentModule = cm_paths} -> (ptc {ptcCurrentModule = Set.empty}, Set.toList cm_paths) debugTraceMsg logger 2 (text "Keeping:" <+> hsep (map text to_keep)) return to_keep -- | Delete all paths in @tmp_files_to_clean@ and @tmp_subdirs_to_clean@ -- That have lifetime TFL_CurrentModule. -- If a file must be cleaned eventually, but must survive a -- cleanCurrentModuleTempFiles, ensure it has lifetime TFL_GhcSession. cleanCurrentModuleTempFiles :: Logger -> TmpFs -> IO () cleanCurrentModuleTempFiles logger tmpfs = mask_ $ do removeWith (removeTmpFiles logger) (tmp_files_to_clean tmpfs) removeWith (removeTmpSubdirs logger) (tmp_subdirs_to_clean tmpfs) where removeWith remove ref = do to_delete <- atomicModifyIORef' ref $ \ptc@PathsToClean{ptcCurrentModule = cm_paths} -> (ptc {ptcCurrentModule = Set.empty}, Set.toList cm_paths) remove to_delete -- | Ensure that new_files are cleaned on the next call of -- 'cleanTempFiles' or 'cleanCurrentModuleTempFiles', depending on lifetime. -- If any of new_files are already tracked, they will have their lifetime -- updated. addFilesToClean :: TmpFs -> TempFileLifetime -> [FilePath] -> IO () addFilesToClean tmpfs lifetime new_files = addToClean (tmp_files_to_clean tmpfs) lifetime new_files addSubdirsToClean :: TmpFs -> TempFileLifetime -> [FilePath] -> IO () addSubdirsToClean tmpfs lifetime new_subdirs = addToClean (tmp_subdirs_to_clean tmpfs) lifetime new_subdirs addToClean :: IORef PathsToClean -> TempFileLifetime -> [FilePath] -> IO () addToClean ref lifetime new_filepaths = modifyIORef' ref $ \PathsToClean { ptcCurrentModule = cm_paths , ptcGhcSession = gs_paths } -> case lifetime of TFL_CurrentModule -> PathsToClean { ptcCurrentModule = cm_paths `Set.union` new_filepaths_set , ptcGhcSession = gs_paths `Set.difference` new_filepaths_set } TFL_GhcSession -> PathsToClean { ptcCurrentModule = cm_paths `Set.difference` new_filepaths_set , ptcGhcSession = gs_paths `Set.union` new_filepaths_set } where new_filepaths_set = Set.fromList new_filepaths -- | Update the lifetime of files already being tracked. If any files are -- not being tracked they will be discarded. changeTempFilesLifetime :: TmpFs -> TempFileLifetime -> [FilePath] -> IO () changeTempFilesLifetime tmpfs lifetime files = do PathsToClean { ptcCurrentModule = cm_paths , ptcGhcSession = gs_paths } <- readIORef (tmp_files_to_clean tmpfs) let old_set = case lifetime of TFL_CurrentModule -> gs_paths TFL_GhcSession -> cm_paths existing_files = [f | f <- files, f `Set.member` old_set] addFilesToClean tmpfs lifetime existing_files -- Return a unique numeric temp file suffix newTempSuffix :: TmpFs -> IO String newTempSuffix tmpfs = do n <- atomicModifyIORef' (tmp_next_suffix tmpfs) $ \n -> (n+1,n) return $ tmp_dir_prefix tmpfs ++ "_" ++ show n -- Find a temporary name that doesn't already exist. newTempName :: Logger -> TmpFs -> TempDir -> TempFileLifetime -> Suffix -> IO FilePath newTempName logger tmpfs tmp_dir lifetime extn = do d <- getTempDir logger tmpfs tmp_dir findTempName (d "ghc_") -- See Note [Deterministic base name] where findTempName :: FilePath -> IO FilePath findTempName prefix = do suffix <- newTempSuffix tmpfs let filename = prefix ++ suffix <.> extn b <- doesFileExist filename if b then findTempName prefix else do -- clean it up later addFilesToClean tmpfs lifetime [filename] return filename -- | Create a new temporary subdirectory that doesn't already exist -- The temporary subdirectory is automatically removed at the end of the -- GHC session, but its contents aren't. Make sure to leave the directory -- empty before the end of the session, either by removing content -- directly or by using @addFilesToClean@. -- -- If the created subdirectory is not empty, it will not be removed (along -- with its parent temporary directory) and a warning message will be -- printed at verbosity 2 and higher. newTempSubDir :: Logger -> TmpFs -> TempDir -> IO FilePath newTempSubDir logger tmpfs tmp_dir = do d <- getTempDir logger tmpfs tmp_dir findTempDir (d "ghc_") where findTempDir :: FilePath -> IO FilePath findTempDir prefix = do suffix <- newTempSuffix tmpfs let name = prefix ++ suffix b <- doesDirectoryExist name if b then findTempDir prefix else (do createDirectory name addSubdirsToClean tmpfs TFL_GhcSession [name] return name) `Exception.catchIO` \e -> if isAlreadyExistsError e then findTempDir prefix else ioError e newTempLibName :: Logger -> TmpFs -> TempDir -> TempFileLifetime -> Suffix -> IO (FilePath, FilePath, String) newTempLibName logger tmpfs tmp_dir lifetime extn = do d <- getTempDir logger tmpfs tmp_dir findTempName d ("ghc_") where findTempName :: FilePath -> String -> IO (FilePath, FilePath, String) findTempName dir prefix = do suffix <- newTempSuffix tmpfs -- See Note [Deterministic base name] let libname = prefix ++ suffix filename = dir "lib" ++ libname <.> extn b <- doesFileExist filename if b then findTempName dir prefix else do -- clean it up later addFilesToClean tmpfs lifetime [filename] return (filename, dir, libname) -- Return our temporary directory within tmp_dir, creating one if we -- don't have one yet. getTempDir :: Logger -> TmpFs -> TempDir -> IO FilePath getTempDir logger tmpfs (TempDir tmp_dir) = do mapping <- readIORef dir_ref case Map.lookup tmp_dir mapping of Nothing -> do pid <- getProcessID let prefix = tmp_dir "ghc" ++ show pid ++ "_" mask_ $ mkTempDir prefix Just dir -> return dir where dir_ref = tmp_dirs_to_clean tmpfs mkTempDir :: FilePath -> IO FilePath mkTempDir prefix = do suffix <- newTempSuffix tmpfs let our_dir = prefix ++ suffix -- 1. Speculatively create our new directory. createDirectory our_dir -- 2. Update the tmp_dirs_to_clean mapping unless an entry already exists -- (i.e. unless another thread beat us to it). their_dir <- atomicModifyIORef' dir_ref $ \mapping -> case Map.lookup tmp_dir mapping of Just dir -> (mapping, Just dir) Nothing -> (Map.insert tmp_dir our_dir mapping, Nothing) -- 3. If there was an existing entry, return it and delete the -- directory we created. Otherwise return the directory we created. case their_dir of Nothing -> do debugTraceMsg logger 2 $ text "Created temporary directory:" <+> text our_dir return our_dir Just dir -> do removeDirectory our_dir return dir `Exception.catchIO` \e -> if isAlreadyExistsError e then mkTempDir prefix else ioError e {- Note [Deterministic base name] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The filename of temporary files, especially the basename of C files, can end up in the output in some form, e.g. as part of linker debug information. In the interest of bit-wise exactly reproducible compilation (#4012), the basename of the temporary file no longer contains random information (it used to contain the process id). This is ok, as the temporary directory used contains the pid (see getTempDir). In addition to this, multiple threads can race against each other creating temporary files. Therefore we supply a prefix when creating temporary files, when a thread is forked, each thread must be given an TmpFs with a unique prefix. This is achieved by forkTmpFsFrom creating a fresh prefix from the parent TmpFs. -} manyWithTrace :: Logger -> String -> ([FilePath] -> IO ()) -> [FilePath] -> IO () manyWithTrace _ _ _ [] = pure () -- do silent nothing on zero filepaths manyWithTrace logger phase act paths = traceCmd logger phase ("Deleting: " ++ unwords paths) (act paths) removeTmpDirs :: Logger -> [FilePath] -> IO () removeTmpDirs logger = manyWithTrace logger "Deleting temp dirs" (mapM_ (removeWith logger removeDirectory)) removeTmpSubdirs :: Logger -> [FilePath] -> IO () removeTmpSubdirs logger = manyWithTrace logger "Deleting temp subdirs" (mapM_ (removeWith logger removeDirectory)) removeTmpFiles :: Logger -> [FilePath] -> IO () removeTmpFiles logger fs = warnNon $ manyWithTrace logger "Deleting temp files" (mapM_ (removeWith logger removeFile)) deletees where -- Flat out refuse to delete files that are likely to be source input -- files (is there a worse bug than having a compiler delete your source -- files?) -- -- Deleting source files is a sign of a bug elsewhere, so prominently flag -- the condition. warnNon act | null non_deletees = act | otherwise = do putMsg logger (text "WARNING - NOT deleting source files:" <+> hsep (map text non_deletees)) act (non_deletees, deletees) = partition isHaskellUserSrcFilename fs removeWith :: Logger -> (FilePath -> IO ()) -> FilePath -> IO () removeWith logger remover f = remover f `Exception.catchIO` (\e -> let msg = if isDoesNotExistError e then text "Warning: deleting non-existent" <+> text f else text "Warning: exception raised when deleting" <+> text f <> colon $$ text (show e) in debugTraceMsg logger 2 msg ) #if defined(mingw32_HOST_OS) -- relies on Int == Int32 on Windows foreign import ccall unsafe "_getpid" getProcessID :: IO Int #else getProcessID :: IO Int getProcessID = System.Posix.Internals.c_getpid >>= return . fromIntegral #endif -- The following three functions are from the `temporary` package. -- | Create and use a temporary directory in the system standard temporary -- directory. -- -- Behaves exactly the same as 'withTempDirectory', except that the parent -- temporary directory will be that returned by 'getTemporaryDirectory'. withSystemTempDirectory :: String -- ^ Directory name template. See 'openTempFile'. -> (FilePath -> IO a) -- ^ Callback that can use the directory -> IO a withSystemTempDirectory template action = getTemporaryDirectory >>= \tmpDir -> withTempDirectory tmpDir template action -- | Create and use a temporary directory. -- -- Creates a new temporary directory inside the given directory, making use -- of the template. The temp directory is deleted after use. For example: -- -- > withTempDirectory "src" "sdist." $ \tmpDir -> do ... -- -- The @tmpDir@ will be a new subdirectory of the given directory, e.g. -- @src/sdist.342@. withTempDirectory :: FilePath -- ^ Temp directory to create the directory in -> String -- ^ Directory name template. See 'openTempFile'. -> (FilePath -> IO a) -- ^ Callback that can use the directory -> IO a withTempDirectory targetDir template = Exception.bracket (createTempDirectory targetDir template) (ignoringIOErrors . removeDirectoryRecursive) ignoringIOErrors :: IO () -> IO () ignoringIOErrors ioe = ioe `Exception.catchIO` const (return ()) createTempDirectory :: FilePath -> String -> IO FilePath createTempDirectory dir template = do pid <- getProcessID findTempName pid where findTempName x = do let path = dir template ++ show x createDirectory path return path `Exception.catchIO` \e -> if isAlreadyExistsError e then findTempName (x+1) else ioError e ghc-lib-parser-9.12.2.20250421/compiler/GHC/Utils/Trace.hs0000644000000000000000000000722707346545000020410 0ustar0000000000000000-- | Tracing utilities module GHC.Utils.Trace ( pprTrace , pprTraceM , pprTraceDebug , pprTraceIt , pprTraceWith , pprSTrace , pprTraceException , warnPprTrace , warnPprTraceM , pprTraceUserWarning , trace ) where {- Note [Exporting pprTrace from GHC.Prelude] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For our own sanity we want to export pprTrace from GHC.Prelude. Since calls to traces should never be performance sensitive it's okay for these to be source imports/exports. However we still need to make sure that all transitive imports from Trace.hs-boot do not import GHC.Prelude. To get there we import the basic GHC.Prelude.Basic prelude instead of GHC.Prelude within the transitive dependencies of Trace.hs -} import GHC.Prelude.Basic import GHC.Utils.Outputable import GHC.Utils.Exception import GHC.Utils.Panic import GHC.Utils.GlobalVars import GHC.Utils.Constants import GHC.Stack import Debug.Trace (trace) import Control.Monad.IO.Class -- | If debug output is on, show some 'SDoc' on the screen pprTrace :: String -> SDoc -> a -> a pprTrace str doc x | unsafeHasNoDebugOutput = x | otherwise = pprDebugAndThen traceSDocContext trace (text str) doc x pprTraceM :: Applicative f => String -> SDoc -> f () pprTraceM str doc = pprTrace str doc (pure ()) pprTraceDebug :: String -> SDoc -> a -> a pprTraceDebug str doc x | debugIsOn && unsafeHasPprDebug = pprTrace str doc x | otherwise = x -- | @pprTraceWith desc f x@ is equivalent to @pprTrace desc (f x) x@. -- This allows you to print details from the returned value as well as from -- ambient variables. pprTraceWith :: String -> (a -> SDoc) -> a -> a pprTraceWith desc f x = pprTrace desc (f x) x -- | @pprTraceIt desc x@ is equivalent to @pprTrace desc (ppr x) x@ pprTraceIt :: Outputable a => String -> a -> a pprTraceIt desc x = pprTraceWith desc ppr x -- | @pprTraceException desc x action@ runs action, printing a message -- if it throws an exception. pprTraceException :: ExceptionMonad m => String -> SDoc -> m a -> m a pprTraceException heading doc = handleGhcException $ \exc -> liftIO $ do putStrLn $ renderWithContext defaultSDocContext $ withPprStyle defaultDumpStyle $ sep [text heading, nest 2 doc] throwGhcExceptionIO exc -- | If debug output is on, show some 'SDoc' on the screen along -- with a call stack when available. pprSTrace :: HasCallStack => SDoc -> a -> a pprSTrace doc = pprTrace "" (doc $$ traceCallStackDoc) -- | Just warn about an assertion failure, recording the given file and line number. warnPprTrace :: HasCallStack => Bool -> String -> SDoc -> a -> a warnPprTrace _ _s _ x | not debugIsOn = x warnPprTrace _ _s _msg x | unsafeHasNoDebugOutput = x warnPprTrace False _s _msg x = x warnPprTrace True s msg x = pprDebugAndThen traceSDocContext trace (text "WARNING:") (text s $$ msg $$ withFrozenCallStack traceCallStackDoc ) x warnPprTraceM :: (Applicative f, HasCallStack) => Bool -> String -> SDoc -> f () warnPprTraceM b s doc = withFrozenCallStack warnPprTrace b s doc (pure ()) -- | For when we want to show the user a non-fatal WARNING so that they can -- report a GHC bug, but don't want to panic. pprTraceUserWarning :: HasCallStack => SDoc -> a -> a pprTraceUserWarning msg x | unsafeHasNoDebugOutput = x | otherwise = pprDebugAndThen traceSDocContext trace (text "WARNING:") (msg $$ withFrozenCallStack traceCallStackDoc ) x traceCallStackDoc :: HasCallStack => SDoc traceCallStackDoc = hang (text "Call stack:") 4 (vcat $ map text $ lines (prettyCallStack callStack)) ghc-lib-parser-9.12.2.20250421/compiler/GHC/Utils/Word64.hs0000644000000000000000000000101707346545000020426 0ustar0000000000000000module GHC.Utils.Word64 ( intToWord64, word64ToInt, truncateWord64ToWord32, ) where import GHC.Prelude import GHC.Utils.Panic.Plain (assert) import GHC.Utils.Misc (HasDebugCallStack) import Data.Word intToWord64 :: HasDebugCallStack => Int -> Word64 intToWord64 x = assert (0 <= x) (fromIntegral x) word64ToInt :: HasDebugCallStack => Word64 -> Int word64ToInt x = assert (x <= fromIntegral (maxBound :: Int)) (fromIntegral x) truncateWord64ToWord32 :: Word64 -> Word32 truncateWord64ToWord32 = fromIntegral ghc-lib-parser-9.12.2.20250421/compiler/Language/Haskell/0000755000000000000000000000000007346545000020413 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/Language/Haskell/Syntax.hs0000644000000000000000000000622407346545000022241 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 \section{Haskell abstract syntax definition} This module glues together the pieces of the Haskell abstract syntax, which is declared in the various \tr{Hs*} modules. This module, therefore, is almost nothing but re-exporting. -} {-# LANGUAGE ConstraintKinds #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} -- For deriving instance Data {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE UndecidableInstances #-} -- Wrinkle in Note [Trees That Grow] -- in module Language.Haskell.Syntax.Extension -- See Note [Language.Haskell.Syntax.* Hierarchy] for why not GHC.Hs.* module Language.Haskell.Syntax ( module Language.Haskell.Syntax.Binds, module Language.Haskell.Syntax.Decls, module Language.Haskell.Syntax.Expr, module Language.Haskell.Syntax.ImpExp, module Language.Haskell.Syntax.Lit, module Language.Haskell.Syntax.Module.Name, module Language.Haskell.Syntax.Pat, module Language.Haskell.Syntax.Type, module Language.Haskell.Syntax.Extension, ModuleName(..), HsModule(..) ) where import Language.Haskell.Syntax.Decls import Language.Haskell.Syntax.Binds import Language.Haskell.Syntax.Expr import Language.Haskell.Syntax.ImpExp import Language.Haskell.Syntax.Module.Name import Language.Haskell.Syntax.Lit import Language.Haskell.Syntax.Extension import Language.Haskell.Syntax.Pat import Language.Haskell.Syntax.Type import Data.Maybe (Maybe) {- Note [Language.Haskell.Syntax.* Hierarchy] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Why are these modules not 'GHC.Hs.*', or some other 'GHC.*'? The answer is that they are to be separated from GHC and put into another package, in accordance with the final goals of Trees That Grow. (See Note [Trees That Grow] in 'Language.Haskell.Syntax.Extension'.) In short, the 'Language.Haskell.Syntax.*' tree should be entirely GHC-independent. GHC-specific stuff related to source-language syntax should be in 'GHC.Hs.*'. We cannot move them to the separate package yet, but by giving them names like so, we hope to remind others that the goal is to factor them out, and therefore dependencies on the rest of GHC should never be added, only removed. For more details, see https://gitlab.haskell.org/ghc/ghc/-/wikis/implementing-trees-that-grow -} -- | Haskell Module -- -- All we actually declare here is the top-level structure for a module. data HsModule p = HsModule { hsmodExt :: XCModule p, -- ^ HsModule extension point hsmodName :: Maybe (XRec p ModuleName), -- ^ @Nothing@: \"module X where\" is omitted (in which case the next -- field is Nothing too) hsmodExports :: Maybe (XRec p [LIE p]), -- ^ Export list -- -- - @Nothing@: export list omitted, so export everything -- -- - @Just []@: export /nothing/ -- -- - @Just [...]@: as you would expect... hsmodImports :: [LImportDecl p], hsmodDecls :: [LHsDecl p] -- ^ Type, class, value, and interface signature decls } | XModule !(XXModule p) ghc-lib-parser-9.12.2.20250421/compiler/Language/Haskell/Syntax/0000755000000000000000000000000007346545000021701 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/Language/Haskell/Syntax/Basic.hs0000644000000000000000000001062607346545000023263 0ustar0000000000000000{-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE GeneralisedNewtypeDeriving #-} module Language.Haskell.Syntax.Basic where import Data.Data import Data.Eq import Data.Ord import Data.Bool import Data.Int (Int) import GHC.Data.FastString (FastString) import Control.DeepSeq {- ************************************************************************ * * Boxity * * ************************************************************************ -} data Boxity = Boxed | Unboxed deriving( Eq, Data ) isBoxed :: Boxity -> Bool isBoxed Boxed = True isBoxed Unboxed = False {- ************************************************************************ * * Counts and indices * * ************************************************************************ -} -- | The width of an unboxed sum type SumWidth = Int -- | A *one-index* constructor tag -- -- Type of the tags associated with each constructor possibility or superclass -- selector type ConTag = Int {- ************************************************************************ * * Field Labels * * ************************************************************************ -} -- | Field labels are just represented as strings; -- they are not necessarily unique (even within a module) newtype FieldLabelString = FieldLabelString { field_label:: FastString } deriving (Data, Eq, NFData) {- ************************************************************************ * * Field Labels * * ************************************************************************ -} -- | See Note [Roles] in GHC.Core.Coercion -- -- Order of constructors matters: the Ord instance coincides with the *super*typing -- relation on roles. data Role = Nominal | Representational | Phantom deriving (Eq, Ord, Data) {- ************************************************************************ * * Source Strictness and Unpackedness * * ************************************************************************ -} -- | Haskell Bang -- -- Bangs on data constructor arguments written by the user. -- -- @(HsBang SrcUnpack SrcLazy)@ and -- @(HsBang SrcUnpack NoSrcStrict)@ (without StrictData) makes no sense, we -- emit a warning (in checkValidDataCon) and treat it like -- @(HsBang NoSrcUnpack SrcLazy)@ -- -- 'GHC.Core.DataCon.HsSrcBang' is a wrapper around this, associating it with -- a 'GHC.Types.SourceText.SourceText' as written by the user. -- In the AST, the @SourceText@ is hidden inside the extension point -- 'Language.Haskell.Syntax.Extension.XBangTy'. data HsBang = HsBang SrcUnpackedness SrcStrictness deriving Data -- | Source Strictness -- -- What strictness annotation the user wrote data SrcStrictness = SrcLazy -- ^ Lazy, ie '~' | SrcStrict -- ^ Strict, ie '!' | NoSrcStrict -- ^ no strictness annotation deriving (Eq, Data) -- | Source Unpackedness -- -- What unpackedness the user requested data SrcUnpackedness = SrcUnpack -- ^ {-# UNPACK #-} specified | SrcNoUnpack -- ^ {-# NOUNPACK #-} specified | NoSrcUnpack -- ^ no unpack pragma deriving (Eq, Data) {- ************************************************************************ * * Fixity * * ************************************************************************ -} -- | Captures the fixity of declarations as they are parsed. This is not -- necessarily the same as the fixity declaration, as the normal fixity may be -- overridden using parens or backticks. data LexicalFixity = Prefix | Infix deriving (Eq, Data) data FixityDirection = InfixL | InfixR | InfixN deriving (Eq, Data) data Fixity = Fixity Int FixityDirection deriving (Eq, Data) ghc-lib-parser-9.12.2.20250421/compiler/Language/Haskell/Syntax/Binds.hs0000644000000000000000000004164307346545000023304 0ustar0000000000000000{-# LANGUAGE ConstraintKinds #-} {-# LANGUAGE DataKinds #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE TypeApplications #-} {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE UndecidableInstances #-} -- Wrinkle in Note [Trees That Grow] -- in module Language.Haskell.Syntax.Extension {-# LANGUAGE ViewPatterns #-} {- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 \section[HsBinds]{Abstract syntax: top-level bindings and signatures} Datatype for: @BindGroup@, @Bind@, @Sig@, @Bind@. -} -- See Note [Language.Haskell.Syntax.* Hierarchy] for why not GHC.Hs.* module Language.Haskell.Syntax.Binds where import {-# SOURCE #-} Language.Haskell.Syntax.Expr ( LHsExpr , MatchGroup , GRHSs ) import {-# SOURCE #-} Language.Haskell.Syntax.Pat ( LPat ) import Language.Haskell.Syntax.Extension import Language.Haskell.Syntax.Type import GHC.Types.Fixity (Fixity) import GHC.Types.Basic (InlinePragma) import GHC.Data.BooleanFormula (LBooleanFormula) import GHC.Types.SourceText (StringLiteral) import Data.Void import Data.Bool import Data.Maybe {- ************************************************************************ * * \subsection{Bindings: @BindGroup@} * * ************************************************************************ Global bindings (where clauses) -} -- During renaming, we need bindings where the left-hand sides -- have been renamed but the right-hand sides have not. -- Other than during renaming, these will be the same. -- | Haskell Local Bindings type HsLocalBinds id = HsLocalBindsLR id id -- | Located Haskell local bindings type LHsLocalBinds id = XRec id (HsLocalBinds id) -- | Haskell Local Bindings with separate Left and Right identifier types -- -- Bindings in a 'let' expression -- or a 'where' clause data HsLocalBindsLR idL idR = HsValBinds (XHsValBinds idL idR) (HsValBindsLR idL idR) -- ^ Haskell Value Bindings -- There should be no pattern synonyms in the HsValBindsLR -- These are *local* (not top level) bindings -- The parser accepts them, however, leaving the -- renamer to report them | HsIPBinds (XHsIPBinds idL idR) (HsIPBinds idR) -- ^ Haskell Implicit Parameter Bindings | EmptyLocalBinds (XEmptyLocalBinds idL idR) -- ^ Empty Local Bindings | XHsLocalBindsLR !(XXHsLocalBindsLR idL idR) type LHsLocalBindsLR idL idR = XRec idL (HsLocalBindsLR idL idR) -- | Haskell Value Bindings type HsValBinds id = HsValBindsLR id id -- | Haskell Value bindings with separate Left and Right identifier types -- (not implicit parameters) -- Used for both top level and nested bindings -- May contain pattern synonym bindings data HsValBindsLR idL idR = -- | Value Bindings In -- -- Before renaming RHS; idR is always RdrName -- Not dependency analysed -- Recursive by default ValBinds (XValBinds idL idR) (LHsBindsLR idL idR) [LSig idR] -- | Value Bindings Out -- -- After renaming RHS; idR can be Name or Id Dependency analysed, -- later bindings in the list may depend on earlier ones. | XValBindsLR !(XXValBindsLR idL idR) -- --------------------------------------------------------------------- -- | Located Haskell Binding type LHsBind id = LHsBindLR id id -- | Located Haskell Bindings type LHsBinds id = LHsBindsLR id id -- | Haskell Binding type HsBind id = HsBindLR id id -- | Located Haskell Bindings with separate Left and Right identifier types type LHsBindsLR idL idR = [LHsBindLR idL idR] -- | Located Haskell Binding with separate Left and Right identifier types type LHsBindLR idL idR = XRec idL (HsBindLR idL idR) {- Note [FunBind vs PatBind] ~~~~~~~~~~~~~~~~~~~~~~~~~ The distinction between FunBind and PatBind is a bit subtle. FunBind covers patterns which resemble function bindings and simple variable bindings. f x = e f !x = e f = e !x = e -- FunRhs has SrcStrict x `f` y = e -- FunRhs has Infix The actual patterns and RHSs of a FunBind are encoding in fun_matches. The m_ctxt field of each Match in fun_matches will be FunRhs and carries two bits of information about the match, * The mc_fixity field on each Match describes the fixity of the function binder in that match. E.g. this is legal: f True False = e1 True `f` True = e2 * The mc_strictness field is used /only/ for nullary FunBinds: ones with one Match, which has no pats. For these, it describes whether the match is decorated with a bang (e.g. `!x = e`). By contrast, PatBind represents data constructor patterns, as well as a few other interesting cases. Namely, Just x = e (x) = e x :: Ty = e Note [Multiplicity annotations] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Multiplicity annotations are stored in the pat_mult field on PatBinds, represented by the HsMultAnn data type HsNoMultAnn <=> no annotation in the source file HsPct1Ann <=> the %1 annotation HsMultAnn <=> the %t annotation, where `t` is some type In case of HsNoMultAnn the typechecker infers a multiplicity. We don't need to store a multiplicity on FunBinds: - let %1 x = … is parsed as a PatBind. So we don't need an annotation before typechecking. - the multiplicity that the typechecker infers is stored in the binder's Var for the desugarer to use. It's only relevant for strict FunBinds, see Wrinkle 1 in Note [Desugar Strict binds] in GHC.HsToCore.Binds as, in Core, let expressions don't have multiplicity annotations. -} -- | Haskell Binding with separate Left and Right id's data HsBindLR idL idR = -- | Function-like Binding -- -- FunBind is used for both functions @f x = e@ -- and variables @f = \x -> e@ -- and strict variables @!x = x + 1@ -- -- Reason 1: Special case for type inference: see 'GHC.Tc.Gen.Bind.tcMonoBinds'. -- -- Reason 2: Instance decls can only have FunBinds, which is convenient. -- If you change this, you'll need to change e.g. rnMethodBinds -- -- But note that the form @f :: a->a = ...@ -- parses as a pattern binding, just like -- @(f :: a -> a) = ... @ -- -- Strict bindings have their strictness recorded in the 'SrcStrictness' of their -- 'MatchContext'. See Note [FunBind vs PatBind] for -- details about the relationship between FunBind and PatBind. FunBind { fun_ext :: XFunBind idL idR, fun_id :: LIdP idL, -- Note [fun_id in Match] in GHC.Hs.Expr fun_matches :: MatchGroup idR (LHsExpr idR) -- ^ The payload } -- | Pattern Binding -- -- The pattern is never a simple variable; -- That case is done by FunBind. -- See Note [FunBind vs PatBind] for details about the -- relationship between FunBind and PatBind. | PatBind { pat_ext :: XPatBind idL idR, pat_lhs :: LPat idL, pat_mult :: HsMultAnn idL, -- ^ See Note [Multiplicity annotations]. pat_rhs :: GRHSs idR (LHsExpr idR) } -- | Variable Binding -- -- Dictionary binding and suchlike. -- All VarBinds are introduced by the type checker | VarBind { var_ext :: XVarBind idL idR, var_id :: IdP idL, var_rhs :: LHsExpr idR -- ^ Located only for consistency } -- | Patterns Synonym Binding | PatSynBind (XPatSynBind idL idR) (PatSynBind idL idR) | XHsBindsLR !(XXHsBindsLR idL idR) -- | Pattern Synonym binding data PatSynBind idL idR = PSB { psb_ext :: XPSB idL idR, psb_id :: LIdP idL, -- ^ Name of the pattern synonym psb_args :: HsPatSynDetails idR, -- ^ Formal parameter names psb_def :: LPat idR, -- ^ Right-hand side psb_dir :: HsPatSynDir idR -- ^ Directionality } | XPatSynBind !(XXPatSynBind idL idR) -- | Multiplicity annotations, on binders, are always resolved (to a unification -- variable if there is no annotation) during type-checking. The resolved -- multiplicity is stored in the extension fields. data HsMultAnn pass = HsNoMultAnn !(XNoMultAnn pass) | HsPct1Ann !(XPct1Ann pass) | HsMultAnn !(XMultAnn pass) (LHsType (NoGhcTc pass)) | XMultAnn !(XXMultAnn pass) type family XNoMultAnn p type family XPct1Ann p type family XMultAnn p type family XXMultAnn p {- ************************************************************************ * * Implicit parameter bindings * * ************************************************************************ -} -- | Haskell Implicit Parameter Bindings data HsIPBinds id = IPBinds (XIPBinds id) [LIPBind id] -- TcEvBinds -- Only in typechecker output; binds -- -- uses of the implicit parameters | XHsIPBinds !(XXHsIPBinds id) -- | Located Implicit Parameter Binding type LIPBind id = XRec id (IPBind id) -- | Implicit parameter bindings. data IPBind id = IPBind (XCIPBind id) (XRec id HsIPName) (LHsExpr id) | XIPBind !(XXIPBind id) {- ************************************************************************ * * \subsection{@Sig@: type signatures and value-modifying user pragmas} * * ************************************************************************ It is convenient to lump ``value-modifying'' user-pragmas (e.g., ``specialise this function to these four types...'') in with type signatures. Then all the machinery to move them into place, etc., serves for both. -} -- | Located Signature type LSig pass = XRec pass (Sig pass) -- | Signatures and pragmas data Sig pass = -- | An ordinary type signature -- -- > f :: Num a => a -> a -- -- After renaming, this list of Names contains the named -- wildcards brought into scope by this signature. For a signature -- @_ -> _a -> Bool@, the renamer will leave the unnamed wildcard @_@ -- untouched, and the named wildcard @_a@ is then replaced with -- fresh meta vars in the type. Their names are stored in the type -- signature that brought them into scope, in this third field to be -- more specific. TypeSig (XTypeSig pass) [LIdP pass] -- LHS of the signature; e.g. f,g,h :: blah (LHsSigWcType pass) -- RHS of the signature; can have wildcards -- | A pattern synonym type signature -- -- > pattern Single :: () => (Show a) => a -> [a] | PatSynSig (XPatSynSig pass) [LIdP pass] (LHsSigType pass) -- P :: forall a b. Req => Prov => ty -- | A signature for a class method -- False: ordinary class-method signature -- True: generic-default class method signature -- e.g. class C a where -- op :: a -> a -- Ordinary -- default op :: Eq a => a -> a -- Generic default -- No wildcards allowed here | ClassOpSig (XClassOpSig pass) Bool [LIdP pass] (LHsSigType pass) -- | An ordinary fixity declaration -- -- > infixl 8 *** | FixSig (XFixSig pass) (FixitySig pass) -- | An inline pragma -- -- > {#- INLINE f #-} | InlineSig (XInlineSig pass) (LIdP pass) -- Function name InlinePragma -- Never defaultInlinePragma -- | A specialisation pragma -- -- > {-# SPECIALISE f :: Int -> Int #-} | SpecSig (XSpecSig pass) (LIdP pass) -- Specialise a function or datatype ... [LHsSigType pass] -- ... to these types InlinePragma -- The pragma on SPECIALISE_INLINE form. -- If it's just defaultInlinePragma, then we said -- SPECIALISE, not SPECIALISE_INLINE -- | A specialisation pragma for instance declarations only -- -- > {-# SPECIALISE instance Eq [Int] #-} -- -- (Class tys); should be a specialisation of the -- current instance declaration | SpecInstSig (XSpecInstSig pass) (LHsSigType pass) -- | A minimal complete definition pragma -- -- > {-# MINIMAL a | (b, c | (d | e)) #-} | MinimalSig (XMinimalSig pass) (LBooleanFormula (LIdP pass)) -- | A "set cost centre" pragma for declarations -- -- > {-# SCC funName #-} -- -- or -- -- > {-# SCC funName "cost_centre_name" #-} | SCCFunSig (XSCCFunSig pass) (LIdP pass) -- Function name (Maybe (XRec pass StringLiteral)) -- | A complete match pragma -- -- > {-# COMPLETE C, D [:: T] #-} -- -- Used to inform the pattern match checker about additional -- complete matchings which, for example, arise from pattern -- synonym definitions. | CompleteMatchSig (XCompleteMatchSig pass) [LIdP pass] (Maybe (LIdP pass)) | XSig !(XXSig pass) -- | Located Fixity Signature type LFixitySig pass = XRec pass (FixitySig pass) -- | Fixity Signature data FixitySig pass = FixitySig (XFixitySig pass) [LIdP pass] Fixity | XFixitySig !(XXFixitySig pass) isFixityLSig :: forall p. UnXRec p => LSig p -> Bool isFixityLSig (unXRec @p -> FixSig {}) = True isFixityLSig _ = False isTypeLSig :: forall p. UnXRec p => LSig p -> Bool -- Type signatures isTypeLSig (unXRec @p -> TypeSig {}) = True isTypeLSig (unXRec @p -> ClassOpSig {}) = True isTypeLSig (unXRec @p -> XSig {}) = True isTypeLSig _ = False isSpecLSig :: forall p. UnXRec p => LSig p -> Bool isSpecLSig (unXRec @p -> SpecSig {}) = True isSpecLSig _ = False isSpecInstLSig :: forall p. UnXRec p => LSig p -> Bool isSpecInstLSig (unXRec @p -> SpecInstSig {}) = True isSpecInstLSig _ = False isPragLSig :: forall p. UnXRec p => LSig p -> Bool -- Identifies pragmas isPragLSig (unXRec @p -> SpecSig {}) = True isPragLSig (unXRec @p -> InlineSig {}) = True isPragLSig (unXRec @p -> SCCFunSig {}) = True isPragLSig (unXRec @p -> CompleteMatchSig {}) = True isPragLSig _ = False isInlineLSig :: forall p. UnXRec p => LSig p -> Bool -- Identifies inline pragmas isInlineLSig (unXRec @p -> InlineSig {}) = True isInlineLSig _ = False isMinimalLSig :: forall p. UnXRec p => LSig p -> Bool isMinimalLSig (unXRec @p -> MinimalSig {}) = True isMinimalLSig _ = False isSCCFunSig :: forall p. UnXRec p => LSig p -> Bool isSCCFunSig (unXRec @p -> SCCFunSig {}) = True isSCCFunSig _ = False isCompleteMatchSig :: forall p. UnXRec p => LSig p -> Bool isCompleteMatchSig (unXRec @p -> CompleteMatchSig {} ) = True isCompleteMatchSig _ = False {- ************************************************************************ * * \subsection[PatSynBind]{A pattern synonym definition} * * ************************************************************************ -} -- | Haskell Pattern Synonym Details type HsPatSynDetails pass = HsConDetails Void (LIdP pass) [RecordPatSynField pass] -- See Note [Record PatSyn Fields] -- | Record Pattern Synonym Field data RecordPatSynField pass = RecordPatSynField { recordPatSynField :: FieldOcc pass -- ^ Field label visible in rest of the file , recordPatSynPatVar :: LIdP pass -- ^ Filled in by renamer, the name used internally by the pattern } {- Note [Record PatSyn Fields] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider the following two pattern synonyms. pattern P x y = ([x,True], [y,'v']) pattern Q{ x, y } =([x,True], [y,'v']) In P, we just have two local binders, x and y. In Q, we have local binders but also top-level record selectors x :: ([Bool], [Char]) -> Bool y :: ([Bool], [Char]) -> Char Both are recorded in the `RecordPatSynField`s for `x` and `y`: * recordPatSynField: the top-level record selector * recordPatSynPatVar: the local `x`, bound only in the RHS of the pattern synonym. It would make sense to support record-like syntax pattern Q{ x=x1, y=y1 } = ([x1,True], [y1,'v']) when we have a different name for the local and top-level binder, making the distinction between the two names clear. -} -- | Haskell Pattern Synonym Direction data HsPatSynDir id = Unidirectional | ImplicitBidirectional | ExplicitBidirectional (MatchGroup id (LHsExpr id)) ghc-lib-parser-9.12.2.20250421/compiler/Language/Haskell/Syntax/Decls.hs0000644000000000000000000020172507346545000023276 0ustar0000000000000000 {-# LANGUAGE ConstraintKinds #-} {-# LANGUAGE DataKinds #-} {-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE DeriveTraversable #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE TypeApplications #-} {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE StandaloneDeriving #-} {-# LANGUAGE UndecidableInstances #-} -- Wrinkle in Note [Trees That Grow] -- in module Language.Haskell.Syntax.Extension {-# LANGUAGE ViewPatterns #-} {- (c) The University of Glasgow 2006 (c) The GRASP/@type@AQUA Project, Glasgow University, 1992-1998 -} -- See Note [Language.Haskell.Syntax.* Hierarchy] for why not GHC.Hs.* -- | Abstract syntax of global declarations. -- -- Definitions for: @SynDecl@ and @ConDecl@, @ClassDecl@, -- @InstDecl@, @DefaultDecl@ and @ForeignDecl@. module Language.Haskell.Syntax.Decls ( -- * Toplevel declarations HsDecl(..), LHsDecl, HsDataDefn(..), HsDeriving, LHsFunDep, FunDep(..), HsDerivingClause(..), LHsDerivingClause, DerivClauseTys(..), LDerivClauseTys, NewOrData(..), DataDefnCons(..), dataDefnConsNewOrData, isTypeDataDefnCons, firstDataDefnCon, StandaloneKindSig(..), LStandaloneKindSig, -- ** Class or type declarations TyClDecl(..), LTyClDecl, TyClGroup(..), tyClGroupTyClDecls, tyClGroupInstDecls, tyClGroupRoleDecls, tyClGroupKindSigs, isClassDecl, isDataDecl, isSynDecl, isFamilyDecl, isTypeFamilyDecl, isDataFamilyDecl, isOpenTypeFamilyInfo, isClosedTypeFamilyInfo, tyClDeclTyVars, FamilyDecl(..), LFamilyDecl, -- ** Instance declarations InstDecl(..), LInstDecl, FamilyInfo(..), familyInfoTyConFlavour, TyFamInstDecl(..), LTyFamInstDecl, TyFamDefltDecl, LTyFamDefltDecl, DataFamInstDecl(..), LDataFamInstDecl, FamEqn(..), TyFamInstEqn, LTyFamInstEqn, HsFamEqnPats, LClsInstDecl, ClsInstDecl(..), -- ** Standalone deriving declarations DerivDecl(..), LDerivDecl, -- ** Deriving strategies DerivStrategy(..), LDerivStrategy, -- ** @RULE@ declarations LRuleDecls,RuleDecls(..),RuleDecl(..),LRuleDecl, RuleBndr(..),LRuleBndr, collectRuleBndrSigTys, -- ** @default@ declarations DefaultDecl(..), LDefaultDecl, -- ** Template haskell declaration splice SpliceDecoration(..), SpliceDecl(..), LSpliceDecl, -- ** Foreign function interface declarations ForeignDecl(..), LForeignDecl, ForeignImport(..), ForeignExport(..), CImportSpec(..), -- ** Data-constructor declarations ConDecl(..), LConDecl, HsConDeclH98Details, HsConDeclGADTDetails(..), XPrefixConGADT, XRecConGADT, XXConDeclGADTDetails, -- ** Document comments DocDecl(..), LDocDecl, docDeclDoc, -- ** Deprecations WarnDecl(..), LWarnDecl, WarnDecls(..), LWarnDecls, -- ** Annotations AnnDecl(..), LAnnDecl, AnnProvenance(..), annProvenanceName_maybe, -- ** Role annotations RoleAnnotDecl(..), LRoleAnnotDecl, -- ** Injective type families FamilyResultSig(..), LFamilyResultSig, InjectivityAnn(..), LInjectivityAnn, -- * Grouping HsGroup(..), hsGroupInstDecls, ) where -- friends: import {-# SOURCE #-} Language.Haskell.Syntax.Expr ( HsExpr, HsUntypedSplice ) -- Because Expr imports Decls via HsBracket import Language.Haskell.Syntax.Binds import Language.Haskell.Syntax.Extension import Language.Haskell.Syntax.Type import Language.Haskell.Syntax.Basic (Role) import Language.Haskell.Syntax.Specificity (Specificity) import GHC.Types.Basic (TopLevelFlag, OverlapMode, RuleName, Activation ,TyConFlavour(..), TypeOrData(..)) import GHC.Types.ForeignCall (CType, CCallConv, Safety, Header, CLabelString, CCallTarget, CExportSpec) import GHC.Types.Fixity (LexicalFixity) import GHC.Unit.Module.Warnings (WarningTxt) import GHC.Hs.Doc (LHsDoc) -- ROMES:TODO Discuss in #21592 whether this is parsed AST or base AST import Control.Monad import Control.Exception (assert) import Data.Data hiding (TyCon, Fixity, Infix) import Data.Void import Data.Maybe import Data.String import Data.Function import Data.Eq import Data.Int import Data.Bool import Prelude (Show) import qualified Data.List import Data.Foldable import Data.Traversable import Data.List.NonEmpty (NonEmpty (..)) {- ************************************************************************ * * \subsection[HsDecl]{Declarations} * * ************************************************************************ -} type LHsDecl p = XRec p (HsDecl p) -- ^ When in a list this may have -- | A Haskell Declaration data HsDecl p = TyClD (XTyClD p) (TyClDecl p) -- ^ Type or Class Declaration | InstD (XInstD p) (InstDecl p) -- ^ Instance declaration | DerivD (XDerivD p) (DerivDecl p) -- ^ Deriving declaration | ValD (XValD p) (HsBind p) -- ^ Value declaration | SigD (XSigD p) (Sig p) -- ^ Signature declaration | KindSigD (XKindSigD p) (StandaloneKindSig p) -- ^ Standalone kind signature | DefD (XDefD p) (DefaultDecl p) -- ^ 'default' declaration | ForD (XForD p) (ForeignDecl p) -- ^ Foreign declaration | WarningD (XWarningD p) (WarnDecls p) -- ^ Warning declaration | AnnD (XAnnD p) (AnnDecl p) -- ^ Annotation declaration | RuleD (XRuleD p) (RuleDecls p) -- ^ Rule declaration | SpliceD (XSpliceD p) (SpliceDecl p) -- ^ Splice declaration -- (Includes quasi-quotes) | DocD (XDocD p) (DocDecl p) -- ^ Documentation comment -- declaration | RoleAnnotD (XRoleAnnotD p) (RoleAnnotDecl p) -- ^Role annotation declaration | XHsDecl !(XXHsDecl p) {- Note [Top-level fixity signatures in an HsGroup] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ An `HsGroup p` stores every top-level fixity declarations in one of two places: 1. hs_fixds :: [LFixitySig p] This stores fixity signatures for top-level declarations (e.g., functions, data constructors, classes, type families, etc.) as well as fixity signatures for class methods written outside of the class, as in this example: infixl 4 `m1` class C1 a where m1 :: a -> a -> a 2. hs_tyclds :: [TyClGroup p] Each type class can be found in a TyClDecl inside a TyClGroup, and that TyClDecl stores the fixity signatures for its methods written inside of the class, as in this example: class C2 a where infixl 4 `m2` m2 :: a -> a -> a The story for fixity signatures for class methods is made slightly complicated by the fact that they can appear both inside and outside of the class itself, and both forms of fixity signatures are considered top-level. This matters in `GHC.Rename.Module.rnSrcDecls`, which must create a fixity environment out of all top-level fixity signatures before doing anything else. Therefore, `rnSrcDecls` must be aware of both (1) and (2) above. The `hsGroupTopLevelFixitySigs` function is responsible for collecting this information from an `HsGroup`. One might wonder why we even bother separating top-level fixity signatures into two places at all. That is, why not just take the fixity signatures from `hs_tyclds` and put them into `hs_fixds` so that they are all in one location? This ends up causing problems for `GHC.HsToCore.Quote.repTopDs`, which translates each fixity signature in `hs_fixds` and `hs_tyclds` into a Template Haskell `Dec`. If there are any duplicate signatures between the two fields, this will result in an error (#17608). -} -- | Haskell Group -- -- A 'HsDecl' is categorised into a 'HsGroup' before being -- fed to the renamer. data HsGroup p = HsGroup { hs_ext :: XCHsGroup p, hs_valds :: HsValBinds p, hs_splcds :: [LSpliceDecl p], hs_tyclds :: [TyClGroup p], -- A list of mutually-recursive groups; -- This includes `InstDecl`s as well; -- Parser generates a singleton list; -- renamer does dependency analysis hs_derivds :: [LDerivDecl p], hs_fixds :: [LFixitySig p], -- A list of fixity signatures defined for top-level -- declarations and class methods (defined outside of the class -- itself). -- See Note [Top-level fixity signatures in an HsGroup] hs_defds :: [LDefaultDecl p], hs_fords :: [LForeignDecl p], hs_warnds :: [LWarnDecls p], hs_annds :: [LAnnDecl p], hs_ruleds :: [LRuleDecls p], hs_docs :: [LDocDecl p] } | XHsGroup !(XXHsGroup p) hsGroupInstDecls :: HsGroup id -> [LInstDecl id] hsGroupInstDecls = (=<<) group_instds . hs_tyclds -- | Located Splice Declaration type LSpliceDecl pass = XRec pass (SpliceDecl pass) -- | Splice Declaration data SpliceDecl p = SpliceDecl -- Top level splice (XSpliceDecl p) (XRec p (HsUntypedSplice p)) SpliceDecoration -- Whether $( ) variant found, for pretty printing | XSpliceDecl !(XXSpliceDecl p) -- | A splice can appear with various decorations wrapped around it. This data -- type captures explicitly how it was originally written, for use in the pretty -- printer. data SpliceDecoration = DollarSplice -- ^ $splice | BareSplice -- ^ bare splice deriving (Data, Eq, Show) {- ************************************************************************ * * Type and class declarations * * ************************************************************************ Note [The Naming story] ~~~~~~~~~~~~~~~~~~~~~~~ Here is the story about the implicit names that go with type, class, and instance decls. It's a bit tricky, so pay attention! "Implicit" (or "system") binders ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Each data type decl defines a worker name for each constructor to-T and from-T convertors Each class decl defines a tycon for the class a data constructor for that tycon the worker for that constructor a selector for each superclass All have occurrence names that are derived uniquely from their parent declaration. None of these get separate definitions in an interface file; they are fully defined by the data or class decl. But they may *occur* in interface files, of course. Any such occurrence must haul in the relevant type or class decl. Plan of attack: - Ensure they "point to" the parent data/class decl when loading that decl from an interface file (See RnHiFiles.getSysBinders) - When typechecking the decl, we build the implicit TyCons and Ids. When doing so we look them up in the name cache (GHC.Rename.Env.lookupSysName), to ensure correct module and provenance is set These are the two places that we have to conjure up the magic derived names. (The actual magic is in GHC.Types.Name.Occurrence.mkWorkerOcc, etc.) Default methods ~~~~~~~~~~~~~~~ - Occurrence name is derived uniquely from the method name E.g. $dmmax - If there is a default method name at all, it's recorded in the ClassOpSig (in GHC.Hs.Binds), in the DefMethInfo field. (DefMethInfo is defined in GHC.Core.Class) Source-code class decls and interface-code class decls are treated subtly differently, which has given me a great deal of confusion over the years. Here's the deal. (We distinguish the two cases because source-code decls have (Just binds) in the tcdMeths field, whereas interface decls have Nothing. In *source-code* class declarations: - When parsing, every ClassOpSig gets a DefMeth with a suitable RdrName This is done by GHC.Parser.PostProcess.mkClassOpSigDM - The renamer renames it to a Name - During typechecking, we generate a binding for each $dm for which there's a programmer-supplied default method: class Foo a where op1 :: op2 :: op1 = ... We generate a binding for $dmop1 but not for $dmop2. The Class for Foo has a Nothing for op2 and a Just ($dm_op1, VanillaDM) for op1. The Name for $dmop2 is simply discarded. In *interface-file* class declarations: - When parsing, we see if there's an explicit programmer-supplied default method because there's an '=' sign to indicate it: class Foo a where op1 = :: -- NB the '=' op2 :: We use this info to generate a DefMeth with a suitable RdrName for op1, and a NoDefMeth for op2 - The interface file has a separate definition for $dmop1, with unfolding etc. - The renamer renames it to a Name. - The renamer treats $dmop1 as a free variable of the declaration, so that the binding for $dmop1 will be sucked in. (See RnHsSyn.tyClDeclFVs) This doesn't happen for source code class decls, because they *bind* the default method. Dictionary functions ~~~~~~~~~~~~~~~~~~~~ Each instance declaration gives rise to one dictionary function binding. The type checker makes up new source-code instance declarations (e.g. from 'deriving' or generic default methods --- see GHC.Tc.TyCl.Instance.tcInstDecls1). So we can't generate the names for dictionary functions in advance (we don't know how many we need). On the other hand for interface-file instance declarations, the decl specifies the name of the dictionary function, and it has a binding elsewhere in the interface file: instance {Eq Int} = dEqInt dEqInt :: {Eq Int} So again we treat source code and interface file code slightly differently. Source code: - Source code instance decls have a Nothing in the (Maybe name) field (see data InstDecl below) - The typechecker makes up a Local name for the dict fun for any source-code instance decl, whether it comes from a source-code instance decl, or whether the instance decl is derived from some other construct (e.g. 'deriving'). - The occurrence name it chooses is derived from the instance decl (just for documentation really) --- e.g. dNumInt. Two dict funs may share a common occurrence name, but will have different uniques. E.g. instance Foo [Int] where ... instance Foo [Bool] where ... These might both be dFooList - The CoreTidy phase externalises the name, and ensures the occurrence name is unique (this isn't special to dict funs). So we'd get dFooList and dFooList1. - We can take this relaxed approach (changing the occurrence name later) because dict fun Ids are not captured in a TyCon or Class (unlike default methods, say). Instead, they are kept separately in the InstEnv. This makes it easy to adjust them after compiling a module. (Once we've finished compiling that module, they don't change any more.) Interface file code: - The instance decl gives the dict fun name, so the InstDecl has a (Just name) in the (Maybe name) field. - RnHsSyn.instDeclFVs treats the dict fun name as free in the decl, so that we suck in the dfun binding -} -- | Located Declaration of a Type or Class type LTyClDecl pass = XRec pass (TyClDecl pass) -- | A type or class declaration. data TyClDecl pass = -- | @type/data family T :: *->*@ FamDecl { tcdFExt :: XFamDecl pass, tcdFam :: FamilyDecl pass } | -- | @type@ declaration SynDecl { tcdSExt :: XSynDecl pass -- ^ Post renamer, FVs , tcdLName :: LIdP pass -- ^ Type constructor , tcdTyVars :: LHsQTyVars pass -- ^ Type variables; for an -- associated type these -- include outer binders , tcdFixity :: LexicalFixity -- ^ Fixity used in the declaration , tcdRhs :: LHsType pass } -- ^ RHS of type declaration | -- | @data@ declaration DataDecl { tcdDExt :: XDataDecl pass -- ^ Post renamer, CUSK flag, FVs , tcdLName :: LIdP pass -- ^ Type constructor , tcdTyVars :: LHsQTyVars pass -- ^ Type variables -- See Note [TyVar binders for associated decls] , tcdFixity :: LexicalFixity -- ^ Fixity used in the declaration , tcdDataDefn :: HsDataDefn pass } | ClassDecl { tcdCExt :: XClassDecl pass, -- ^ Post renamer, FVs tcdCtxt :: Maybe (LHsContext pass), -- ^ Context... tcdLName :: LIdP pass, -- ^ Name of the class tcdTyVars :: LHsQTyVars pass, -- ^ Class type variables tcdFixity :: LexicalFixity, -- ^ Fixity used in the declaration tcdFDs :: [LHsFunDep pass], -- ^ Functional deps tcdSigs :: [LSig pass], -- ^ Methods' signatures tcdMeths :: LHsBinds pass, -- ^ Default methods tcdATs :: [LFamilyDecl pass], -- ^ Associated types; tcdATDefs :: [LTyFamDefltDecl pass], -- ^ Associated type defaults tcdDocs :: [LDocDecl pass] -- ^ Haddock docs } | XTyClDecl !(XXTyClDecl pass) data FunDep pass = FunDep (XCFunDep pass) [LIdP pass] [LIdP pass] | XFunDep !(XXFunDep pass) type LHsFunDep pass = XRec pass (FunDep pass) {- Note [TyVar binders for associated decls] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For an /associated/ data, newtype, or type-family decl, the LHsQTyVars /includes/ outer binders. For example class T a where data D a c type F a b :: * type F a b = a -> a Here the data decl for 'D', and type-family decl for 'F', both include 'a' in their LHsQTyVars (tcdTyVars and fdTyVars resp). Ditto any implicit binders in the hsq_implicit field of the LHSQTyVars. The idea is that the associated type is really a top-level decl in its own right. However we are careful to use the same name 'a', so that we can match things up. c.f. Note [Associated type tyvar names] in GHC.Core.Class Note [Family instance declaration binders] -} {- Note [Class EpLayout] ~~~~~~~~~~~~~~~~~~~~~~~~ The EpLayout is used to associate Haddock comments with parts of the declaration. Compare the following examples: class C a where f :: a -> Int -- ^ comment on f class C a where f :: a -> Int -- ^ comment on C Notice how "comment on f" and "comment on C" differ only by indentation level. Thus we have to record the indentation level of the class declarations. See also Note [Adding Haddock comments to the syntax tree] in GHC.Parser.PostProcess.Haddock -} -- Simple classifiers for TyClDecl -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- | @True@ <=> argument is a @data@\/@newtype@ -- declaration. isDataDecl :: TyClDecl pass -> Bool isDataDecl (DataDecl {}) = True isDataDecl _other = False -- | type or type instance declaration isSynDecl :: TyClDecl pass -> Bool isSynDecl (SynDecl {}) = True isSynDecl _other = False -- | type class isClassDecl :: TyClDecl pass -> Bool isClassDecl (ClassDecl {}) = True isClassDecl _ = False -- | type/data family declaration isFamilyDecl :: TyClDecl pass -> Bool isFamilyDecl (FamDecl {}) = True isFamilyDecl _other = False -- | type family declaration isTypeFamilyDecl :: TyClDecl pass -> Bool isTypeFamilyDecl (FamDecl _ (FamilyDecl { fdInfo = info })) = case info of OpenTypeFamily -> True ClosedTypeFamily {} -> True _ -> False isTypeFamilyDecl _ = False -- | open type family info isOpenTypeFamilyInfo :: FamilyInfo pass -> Bool isOpenTypeFamilyInfo OpenTypeFamily = True isOpenTypeFamilyInfo _ = False -- | closed type family info isClosedTypeFamilyInfo :: FamilyInfo pass -> Bool isClosedTypeFamilyInfo (ClosedTypeFamily {}) = True isClosedTypeFamilyInfo _ = False -- | data family declaration isDataFamilyDecl :: TyClDecl pass -> Bool isDataFamilyDecl (FamDecl _ (FamilyDecl { fdInfo = DataFamily })) = True isDataFamilyDecl _other = False -- Dealing with names tyClDeclTyVars :: TyClDecl pass -> LHsQTyVars pass tyClDeclTyVars (FamDecl { tcdFam = FamilyDecl { fdTyVars = tvs } }) = tvs tyClDeclTyVars d = tcdTyVars d {- Note [CUSKs: complete user-supplied kind signatures] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We kind-check declarations differently if they have a complete, user-supplied kind signature (CUSK). This is because we can safely generalise a CUSKed declaration before checking all of the others, supporting polymorphic recursion. See https://gitlab.haskell.org/ghc/ghc/wikis/ghc-kinds/kind-inference#proposed-new-strategy and #9200 for lots of discussion of how we got here. The detection of CUSKs is enabled by the -XCUSKs extension, switched off by default in GHC2021 and on in Haskell98/2010. Under -XNoCUSKs, all declarations are treated as if they have no CUSK. See https://github.com/ghc-proposals/ghc-proposals/blob/master/proposals/0036-kind-signatures.rst PRINCIPLE: a type declaration has a CUSK iff we could produce a separate kind signature for it, just like a type signature for a function, looking only at the header of the declaration. Examples: * data T1 (a :: *->*) (b :: *) = .... -- Has CUSK; equivalent to T1 :: (*->*) -> * -> * * data T2 a b = ... -- No CUSK; we do not want to guess T2 :: * -> * -> * -- because the full decl might be data T a b = MkT (a b) * data T3 (a :: k -> *) (b :: *) = ... -- CUSK; equivalent to T3 :: (k -> *) -> * -> * -- We lexically generalise over k to get -- T3 :: forall k. (k -> *) -> * -> * -- The generalisation is here is purely lexical, just like -- f3 :: a -> a -- means -- f3 :: forall a. a -> a * data T4 (a :: j k) = ... -- CUSK; equivalent to T4 :: j k -> * -- which we lexically generalise to T4 :: forall j k. j k -> * -- and then, if PolyKinds is on, we further generalise to -- T4 :: forall kk (j :: kk -> *) (k :: kk). j k -> * -- Again this is exactly like what happens as the term level -- when you write -- f4 :: forall a b. a b -> Int NOTE THAT * A CUSK does /not/ mean that everything about the kind signature is fully specified by the user. Look at T4 and f4: we had to do kind inference to figure out the kind-quantification. But in both cases (T4 and f4) that inference is done looking /only/ at the header of T4 (or signature for f4), not at the definition thereof. * The CUSK completely fixes the kind of the type constructor, forever. * The precise rules, for each declaration form, for whether a declaration has a CUSK are given in the user manual section "Complete user-supplied kind signatures and polymorphic recursion". But they simply implement PRINCIPLE above. * Open type families are interesting: type family T5 a b :: * There simply /is/ no accompanying declaration, so that info is all we'll ever get. So we it has a CUSK by definition, and we default any un-fixed kind variables to *. * Associated types are a bit tricker: class C6 a where type family T6 a b :: * op :: a Int -> Int Here C6 does not have a CUSK (in fact we ultimately discover that a :: * -> *). And hence neither does T6, the associated family, because we can't fix its kind until we have settled C6. Another way to say it: unlike a top-level, we /may/ discover more about a's kind from C6's definition. * A data definition with a top-level :: must explicitly bind all kind variables to the right of the ::. See test dependent/should_compile/KindLevels, which requires this case. (Naturally, any kind variable mentioned before the :: should not be bound after it.) This last point is much more debatable than the others; see #15142 comment:22 Because this is fiddly to check, there is a field in the DataDeclRn structure (included in a DataDecl after the renamer) that stores whether or not the declaration has a CUSK. -} {- ********************************************************************* * * TyClGroup Strongly connected components of type, class, instance, and role declarations * * ********************************************************************* -} {- Note [TyClGroups and dependency analysis] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A TyClGroup represents a strongly connected components of type/class/instance decls, together with the role annotations for the type/class declarations. The hs_tyclds :: [TyClGroup] field of a HsGroup is a dependency-order sequence of strongly-connected components. Invariants * The type and class declarations, group_tyclds, may depend on each other, or earlier TyClGroups, but not on later ones * The role annotations, group_roles, are role-annotations for some or all of the types and classes in group_tyclds (only). * The instance declarations, group_instds, may (and usually will) depend on group_tyclds, or on earlier TyClGroups, but not on later ones. See Note [Dependency analysis of type, class, and instance decls] in GHC.Rename.Module for more info. -} -- | Type or Class Group data TyClGroup pass -- See Note [TyClGroups and dependency analysis] = TyClGroup { group_ext :: XCTyClGroup pass , group_tyclds :: [LTyClDecl pass] , group_roles :: [LRoleAnnotDecl pass] , group_kisigs :: [LStandaloneKindSig pass] , group_instds :: [LInstDecl pass] } | XTyClGroup !(XXTyClGroup pass) tyClGroupTyClDecls :: [TyClGroup pass] -> [LTyClDecl pass] tyClGroupTyClDecls = Data.List.concatMap group_tyclds tyClGroupInstDecls :: [TyClGroup pass] -> [LInstDecl pass] tyClGroupInstDecls = Data.List.concatMap group_instds tyClGroupRoleDecls :: [TyClGroup pass] -> [LRoleAnnotDecl pass] tyClGroupRoleDecls = Data.List.concatMap group_roles tyClGroupKindSigs :: [TyClGroup pass] -> [LStandaloneKindSig pass] tyClGroupKindSigs = Data.List.concatMap group_kisigs {- ********************************************************************* * * Data and type family declarations * * ********************************************************************* -} {- Note [FamilyResultSig] ~~~~~~~~~~~~~~~~~~~~~~~~~ This data type represents the return signature of a type family. Possible values are: * NoSig - the user supplied no return signature: type family Id a where ... * KindSig - the user supplied the return kind: type family Id a :: * where ... * TyVarSig - user named the result with a type variable and possibly provided a kind signature for that variable: type family Id a = r where ... type family Id a = (r :: *) where ... Naming result of a type family is required if we want to provide injectivity annotation for a type family: type family Id a = r | r -> a where ... See also: Note [Injectivity annotation] Note [Injectivity annotation] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A user can declare a type family to be injective: type family Id a = r | r -> a where ... * The part after the "|" is called "injectivity annotation". * "r -> a" part is called "injectivity condition"; at the moment terms "injectivity annotation" and "injectivity condition" are synonymous because we only allow a single injectivity condition. * "r" is the "LHS of injectivity condition". LHS can only contain the variable naming the result of a type family. * "a" is the "RHS of injectivity condition". RHS contains space-separated type and kind variables representing the arguments of a type family. Variables can be omitted if a type family is not injective in these arguments. Example: type family Foo a b c = d | d -> a c where ... Note that: (a) naming of type family result is required to provide injectivity annotation (b) for associated types if the result was named then injectivity annotation is mandatory. Otherwise result type variable is indistinguishable from associated type default. It is possible that in the future this syntax will be extended to support more complicated injectivity annotations. For example we could declare that if we know the result of Plus and one of its arguments we can determine the other argument: type family Plus a b = (r :: Nat) | r a -> b, r b -> a where ... Here injectivity annotation would consist of two comma-separated injectivity conditions. See also Note [Injective type families] in GHC.Core.TyCon -} -- | Located type Family Result Signature type LFamilyResultSig pass = XRec pass (FamilyResultSig pass) -- | type Family Result Signature data FamilyResultSig pass = -- see Note [FamilyResultSig] NoSig (XNoSig pass) | KindSig (XCKindSig pass) (LHsKind pass) | TyVarSig (XTyVarSig pass) (LHsTyVarBndr () pass) | XFamilyResultSig !(XXFamilyResultSig pass) -- | Located type Family Declaration type LFamilyDecl pass = XRec pass (FamilyDecl pass) -- | type Family Declaration data FamilyDecl pass = FamilyDecl { fdExt :: XCFamilyDecl pass , fdInfo :: FamilyInfo pass -- type/data, closed/open , fdTopLevel :: TopLevelFlag -- used for printing only , fdLName :: LIdP pass -- type constructor , fdTyVars :: LHsQTyVars pass -- type variables -- See Note [TyVar binders for associated decls] , fdFixity :: LexicalFixity -- Fixity used in the declaration , fdResultSig :: LFamilyResultSig pass -- result signature , fdInjectivityAnn :: Maybe (LInjectivityAnn pass) -- optional injectivity ann } | XFamilyDecl !(XXFamilyDecl pass) -- | Located Injectivity Annotation type LInjectivityAnn pass = XRec pass (InjectivityAnn pass) -- | If the user supplied an injectivity annotation it is represented using -- InjectivityAnn. At the moment this is a single injectivity condition - see -- Note [Injectivity annotation]. `Located name` stores the LHS of injectivity -- condition. `[Located name]` stores the RHS of injectivity condition. Example: -- -- type family Foo a b c = r | r -> a c where ... -- -- This will be represented as "InjectivityAnn `r` [`a`, `c`]" data InjectivityAnn pass = InjectivityAnn (XCInjectivityAnn pass) (LIdP pass) [LIdP pass] | XInjectivityAnn !(XXInjectivityAnn pass) data FamilyInfo pass = DataFamily | OpenTypeFamily -- | 'Nothing' if we're in an hs-boot file and the user -- said "type family Foo x where .." | ClosedTypeFamily (Maybe [LTyFamInstEqn pass]) familyInfoTyConFlavour :: Maybe tc -- ^ Just cls <=> this is an associated family of class cls -> FamilyInfo pass -> TyConFlavour tc familyInfoTyConFlavour mb_parent_tycon info = case info of DataFamily -> OpenFamilyFlavour IAmData mb_parent_tycon OpenTypeFamily -> OpenFamilyFlavour IAmType mb_parent_tycon ClosedTypeFamily _ -> assert (isNothing mb_parent_tycon) -- See Note [Closed type family mb_parent_tycon] ClosedTypeFamilyFlavour {- Note [Closed type family mb_parent_tycon] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ There's no way to write a closed type family inside a class declaration: class C a where type family F a where -- error: parse error on input ‘where’ In fact, it is not clear what the meaning of such a declaration would be. Therefore, 'mb_parent_tycon' of any closed type family has to be Nothing. -} {- ********************************************************************* * * Data types and data constructors * * ********************************************************************* -} -- | Haskell Data type Definition data HsDataDefn pass -- The payload of a data type defn -- Used *both* for vanilla data declarations, -- *and* for data family instances = -- | Declares a data type or newtype, giving its constructors -- @ -- data/newtype T a = -- data/newtype instance T [a] = -- @ HsDataDefn { dd_ext :: XCHsDataDefn pass, dd_ctxt :: Maybe (LHsContext pass), -- ^ Context dd_cType :: Maybe (XRec pass CType), dd_kindSig:: Maybe (LHsKind pass), -- ^ Optional kind signature. -- -- @(Just k)@ for a GADT-style @data@, -- or @data instance@ decl, with explicit kind sig -- -- Always @Nothing@ for H98-syntax decls dd_cons :: DataDefnCons (LConDecl pass), -- ^ Data constructors -- -- For @data T a = T1 | T2 a@ -- the 'LConDecl's all have 'ConDeclH98'. -- For @data T a where { T1 :: T a }@ -- the 'LConDecls' all have 'ConDeclGADT'. dd_derivs :: HsDeriving pass -- ^ Optional 'deriving' clause -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation } | XHsDataDefn !(XXHsDataDefn pass) -- | Haskell Deriving clause type HsDeriving pass = [LHsDerivingClause pass] -- ^ The optional @deriving@ clauses of a data declaration. "Clauses" is -- plural because one can specify multiple deriving clauses using the -- @-XDerivingStrategies@ language extension. -- -- The list of 'LHsDerivingClause's corresponds to exactly what the user -- requested to derive, in order. If no deriving clauses were specified, -- the list is empty. type LHsDerivingClause pass = XRec pass (HsDerivingClause pass) -- | A single @deriving@ clause of a data declaration. data HsDerivingClause pass -- See Note [Deriving strategies] in GHC.Tc.Deriv = HsDerivingClause { deriv_clause_ext :: XCHsDerivingClause pass , deriv_clause_strategy :: Maybe (LDerivStrategy pass) -- ^ The user-specified strategy (if any) to use when deriving -- 'deriv_clause_tys'. , deriv_clause_tys :: LDerivClauseTys pass -- ^ The types to derive. } | XHsDerivingClause !(XXHsDerivingClause pass) type LDerivClauseTys pass = XRec pass (DerivClauseTys pass) -- | The types mentioned in a single @deriving@ clause. This can come in two -- forms, 'DctSingle' or 'DctMulti', depending on whether the types are -- surrounded by enclosing parentheses or not. These parentheses are -- semantically different than 'HsParTy'. For example, @deriving ()@ means -- \"derive zero classes\" rather than \"derive an instance of the 0-tuple\". -- -- 'DerivClauseTys' use 'LHsSigType' because @deriving@ clauses can mention -- type variables that aren't bound by the datatype, e.g. -- -- > data T b = ... deriving (C [a]) -- -- should produce a derived instance for @C [a] (T b)@. data DerivClauseTys pass = -- | A @deriving@ clause with a single type. Moreover, that type can only -- be a type constructor without any arguments. -- -- Example: @deriving Eq@ DctSingle (XDctSingle pass) (LHsSigType pass) -- | A @deriving@ clause with a comma-separated list of types, surrounded -- by enclosing parentheses. -- -- Example: @deriving (Eq, C a)@ | DctMulti (XDctMulti pass) [LHsSigType pass] | XDerivClauseTys !(XXDerivClauseTys pass) -- | Located Standalone Kind Signature type LStandaloneKindSig pass = XRec pass (StandaloneKindSig pass) data StandaloneKindSig pass = StandaloneKindSig (XStandaloneKindSig pass) (LIdP pass) -- Why a single binder? See #16754 (LHsSigType pass) -- Why not LHsSigWcType? See Note [Wildcards in standalone kind signatures] | XStandaloneKindSig !(XXStandaloneKindSig pass) {- Note [Wildcards in standalone kind signatures] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Standalone kind signatures enable polymorphic recursion, and it is unclear how to reconcile this with partial type signatures, so we disallow wildcards in them. We reject wildcards in 'rnStandaloneKindSignature' by returning False for 'StandaloneKindSigCtx' in 'wildCardsAllowed'. The alternative design is to have special treatment for partial standalone kind signatures, much like we have special treatment for partial type signatures in terms. However, partial standalone kind signatures are not a proper replacement for CUSKs, so this would be a separate feature. -} -- | When we only care whether a data-type declaration is `data` or `newtype`, but not what constructors it has data NewOrData = NewType -- ^ @newtype Blah ...@ | DataType -- ^ @data Blah ...@ deriving ( Eq, Data ) -- Needed because Demand derives Eq -- | Whether a data-type declaration is @data@ or @newtype@, and its constructors. data DataDefnCons a = NewTypeCon -- @newtype N x = MkN blah@ a -- Info about the single data constructor @MkN@ | DataTypeCons Bool -- True <=> type data T x = ... -- See Note [Type data declarations] in GHC.Rename.Module -- False <=> data T x = ... [a] -- The (possibly empty) list of data constructors deriving ( Eq, Data, Foldable, Functor, Traversable ) -- Needed because Demand derives Eq dataDefnConsNewOrData :: DataDefnCons a -> NewOrData dataDefnConsNewOrData = \ case NewTypeCon _ -> NewType DataTypeCons _ _ -> DataType -- | Are the constructors within a @type data@ declaration? -- See Note [Type data declarations] in GHC.Rename.Module. isTypeDataDefnCons :: DataDefnCons a -> Bool isTypeDataDefnCons (NewTypeCon _) = False isTypeDataDefnCons (DataTypeCons is_type_data _) = is_type_data -- | Retrieve the first data constructor in a 'DataDefnCons' (if one exists). firstDataDefnCon :: DataDefnCons a -> Maybe a firstDataDefnCon (NewTypeCon con) = Just con firstDataDefnCon (DataTypeCons _ cons) = listToMaybe cons -- | Located data Constructor Declaration type LConDecl pass = XRec pass (ConDecl pass) -- | -- -- @ -- data T b = forall a. Eq a => MkT a b -- MkT :: forall b a. Eq a => MkT a b -- -- data T b where -- MkT1 :: Int -> T Int -- -- data T = Int `MkT` Int -- | MkT2 -- -- data T a where -- Int `MkT` Int :: T Int -- @ -- | data Constructor Declaration data ConDecl pass = ConDeclGADT { con_g_ext :: XConDeclGADT pass , con_names :: NonEmpty (LIdP pass) -- The following fields describe the type after the '::' -- See Note [GADT abstract syntax] , con_bndrs :: XRec pass (HsOuterSigTyVarBndrs pass) -- ^ The outermost type variable binders, be they explicit or -- implicit. The 'XRec' is used to anchor exact print -- annotations, AnnForall and AnnDot. , con_mb_cxt :: Maybe (LHsContext pass) -- ^ User-written context (if any) , con_g_args :: HsConDeclGADTDetails pass -- ^ Arguments; never infix , con_res_ty :: LHsType pass -- ^ Result type , con_doc :: Maybe (LHsDoc pass) -- ^ A possible Haddock -- comment. } | ConDeclH98 { con_ext :: XConDeclH98 pass , con_name :: LIdP pass , con_forall :: Bool -- ^ True <=> explicit user-written forall -- e.g. data T a = forall b. MkT b (b->a) -- con_ex_tvs = {b} -- False => con_ex_tvs is empty , con_ex_tvs :: [LHsTyVarBndr Specificity pass] -- ^ Existentials only , con_mb_cxt :: Maybe (LHsContext pass) -- ^ User-written context (if any) , con_args :: HsConDeclH98Details pass -- ^ Arguments; can be infix , con_doc :: Maybe (LHsDoc pass) -- ^ A possible Haddock comment. } | XConDecl !(XXConDecl pass) {- Note [GADT abstract syntax] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The types of both forms of GADT constructors are very structured, as they must consist of the quantified type variables (if provided), followed by the context (if provided), followed by the argument types (if provided), followed by the result type. (See "Wrinkle: No nested foralls or contexts" below for more discussion on the restrictions imposed here.) As a result, instead of storing the type of a GADT constructor as a single LHsType, we split it up into its constituent components for easier access. There are two broad ways to classify GADT constructors: * Record-syntax constructors. For example: data T a where K :: forall a. Ord a => { x :: [a], ... } -> T a * Prefix constructors, which do not use record syntax. For example: data T a where K :: forall a. Ord a => [a] -> ... -> T a This distinction is recorded in the `con_args :: HsConDetails pass`, which tracks if we're dealing with a RecCon or PrefixCon. It is easy to distinguish the two in the AST since record GADT constructors use HsRecTy. This distinction is made in GHC.Parser.PostProcess.mkGadtDecl. It is worth elaborating a bit more on the process of splitting the argument types of a GADT constructor, since there are some non-obvious details involved. While splitting the argument types of a record GADT constructor is easy (they are stored in an HsRecTy), splitting the arguments of a prefix GADT constructor is trickier. The basic idea is that we must split along the outermost function arrows ((->) and (%1 ->)) in the type, which GHC.Hs.Type.splitHsFunType accomplishes. But what about type operators? Consider: C :: a :*: b -> a :*: b -> a :+: b This could parse in many different ways depending on the precedences of each type operator. In particular, if (:*:) were to have lower precedence than (->), then it could very well parse like this: a :*: ((b -> a) :*: ((b -> a) :+: b))) This would give the false impression that the whole type is part of one large return type, with no arguments. Note that we do not fully resolve the exact precedences of each user-defined type operator until the renamer, so this a more difficult task for the parser. Fortunately, there is no risk of the above happening. GHC's parser gives special treatment to function arrows, and as a result, they are always parsed with a lower precedence than any other type operator. As a result, the type above is actually parsed like this: (a :*: b) -> ((a :*: b) -> (a :+: b)) While we won't know the exact precedences of (:*:) and (:+:) until the renamer, all we are concerned about in the parser is identifying the overall shape of the argument and result types, which we can accomplish by piggybacking on the special treatment given to function arrows. In a future where function arrows aren't given special status in the parser, we will likely have to modify GHC.Parser.PostProcess.mkHsOpTyPV to preserve this trick. ----- -- Wrinkle: No nested foralls or contexts ----- GADT constructors provide some freedom to change the order of foralls in their types (see Note [DataCon user type variable binders] in GHC.Core.DataCon), but this freedom is still limited. GADTs still require that all quantification occurs "prenex". That is, any explicitly quantified type variables must occur at the front of the GADT type, followed by any contexts, followed by the body of the GADT type, in precisely that order. For instance: data T where MkT1 :: forall a b. (Eq a, Eq b) => a -> b -> T -- OK MkT2 :: forall a. Eq a => forall b. a -> b -> T -- Rejected, `forall b` is nested MkT3 :: forall a b. Eq a => Eq b => a -> b -> T -- Rejected, `Eq b` is nested MkT4 :: Int -> forall a. a -> T -- Rejected, `forall a` is nested MkT5 :: forall a. Int -> Eq a => a -> T -- Rejected, `Eq a` is nested MkT6 :: (forall a. a -> T) -- Rejected, `forall a` is nested due to the surrounding parentheses MkT7 :: (Eq a => a -> t) -- Rejected, `Eq a` is nested due to the surrounding parentheses For the full details, see the "Formal syntax for GADTs" section of the GHC User's Guide. GHC enforces that GADT constructors do not have nested `forall`s or contexts in two parts: 1. GHC, in the process of splitting apart a GADT's type, extracts out the leading `forall` and context (if they are provided). To accomplish this splitting, the renamer uses the GHC.Hs.Type.splitLHsGADTPrefixTy function, which is careful not to remove parentheses surrounding the leading `forall` or context (as these parentheses can be syntactically significant). If the third result returned by splitLHsGADTPrefixTy contains any `forall`s or contexts, then they must be nested, so they will be rejected. Note that this step applies to both prefix and record GADTs alike, as they both have syntax which permits `forall`s and contexts. The difference is where this step happens: * For prefix GADTs, this happens in the renamer (in rnConDecl), as we cannot split until after the type operator fixities have been resolved. * For record GADTs, this happens in the parser (in mkGadtDecl). 2. If the GADT type is prefix, the renamer (in the ConDeclGADTPrefixPs case of rnConDecl) will then check for nested `forall`s/contexts in the body of a prefix GADT type, after it has determined what all of the argument types are. This step is necessary to catch examples like MkT4 above, where the nested quantification occurs after a visible argument type. -} -- | The arguments in a Haskell98-style data constructor. type HsConDeclH98Details pass = HsConDetails Void (HsScaled pass (LBangType pass)) (XRec pass [LConDeclField pass]) -- The Void argument to HsConDetails here is a reflection of the fact that -- type applications are not allowed in data constructor declarations. -- | The arguments in a GADT constructor. Unlike Haskell98-style constructors, -- GADT constructors cannot be declared with infix syntax. As a result, we do -- not use 'HsConDetails' here, as 'InfixCon' would be an unrepresentable -- state. (There is a notion of infix GADT constructors for the purposes of -- derived Show instances—see Note [Infix GADT constructors] in -- GHC.Tc.TyCl—but that is an orthogonal concern.) data HsConDeclGADTDetails pass = PrefixConGADT !(XPrefixConGADT pass) [HsScaled pass (LBangType pass)] | RecConGADT !(XRecConGADT pass) (XRec pass [LConDeclField pass]) | XConDeclGADTDetails !(XXConDeclGADTDetails pass) type family XPrefixConGADT p type family XRecConGADT p type family XXConDeclGADTDetails p {- ************************************************************************ * * Instance declarations * * ************************************************************************ Note [Type family instance declarations in HsSyn] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The data type FamEqn represents one equation of a type family instance. Aside from the pass, it is also parameterised over another field, feqn_rhs. feqn_rhs is either an HsDataDefn (for data family instances) or an LHsType (for type family instances). Type family instances also include associated type family default equations. That is because a default for a type family looks like this: class C a where type family F a b :: Type type F c d = (c,d) -- Default instance The default declaration is really just a `type instance` declaration, but one with particularly simple patterns: they must all be distinct type variables. That's because we will instantiate it (in an instance declaration for `C`) if we don't give an explicit instance for `F`. Note that the names of the variables don't need to match those of the class: it really is like a free-standing `type instance` declaration. -} ----------------- Type synonym family instances ------------- -- | Located Type Family Instance Equation type LTyFamInstEqn pass = XRec pass (TyFamInstEqn pass) -- | HsFamEqnPats represents patterns on the left-hand side of a type instance, -- e.g. `type instance F @k (a :: k) = a` has patterns `@k` and `(a :: k)`. -- -- HsFamEqnPats used to be called HsTyPats but it was renamed to avoid confusion -- with a different notion of type patterns, see #23657. type HsFamEqnPats pass = [LHsTypeArg pass] {- Note [Family instance declaration binders] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The feqn_pats field of FamEqn (family instance equation) stores the LHS type (and kind) patterns. Any type (and kind) variables contained in these type patterns are bound in the feqn_bndrs field. Note that in particular: * The feqn_bndrs *include* any anonymous wildcards. For example type instance F a _ = a The feqn_bndrs will be HsOuterImplicit {a, _}. Remember that each separate wildcard '_' gets its own unique. In this context wildcards behave just like an ordinary type variable, only anonymous. * The feqn_bndrs *include* type variables that are already in scope Eg class C s t where type F t p :: * instance C w (a,b) where type F (a,b) x = x->a The feqn_bndrs of the F decl is HsOuterImplicit {a,b,x}, even though the F decl is nested inside the 'instance' decl. However after the renamer, the uniques will match up: instance C w7 (a8,b9) where type F (a8,b9) x10 = x10->a8 so that we can compare the type pattern in the 'instance' decl and in the associated 'type' decl c.f. Note [TyVar binders for associated decls] -} -- | Type Family Instance Equation type TyFamInstEqn pass = FamEqn pass (LHsType pass) -- Here, the @pats@ are type patterns (with kind and type bndrs). -- See Note [Family instance declaration binders] -- | Type family default declarations. -- A convenient synonym for 'TyFamInstDecl'. -- See @Note [Type family instance declarations in HsSyn]@. type TyFamDefltDecl = TyFamInstDecl -- | Located type family default declarations. type LTyFamDefltDecl pass = XRec pass (TyFamDefltDecl pass) -- | Located Type Family Instance Declaration type LTyFamInstDecl pass = XRec pass (TyFamInstDecl pass) -- | Type Family Instance Declaration data TyFamInstDecl pass = TyFamInstDecl { tfid_xtn :: XCTyFamInstDecl pass , tfid_eqn :: TyFamInstEqn pass } | XTyFamInstDecl !(XXTyFamInstDecl pass) ----------------- Data family instances ------------- -- | Located Data Family Instance Declaration type LDataFamInstDecl pass = XRec pass (DataFamInstDecl pass) -- | Data Family Instance Declaration newtype DataFamInstDecl pass = DataFamInstDecl { dfid_eqn :: FamEqn pass (HsDataDefn pass) } ----------------- Family instances (common types) ------------- -- | Family Equation -- -- One equation in a type family instance declaration, data family instance -- declaration, or type family default. -- See Note [Type family instance declarations in HsSyn] -- See Note [Family instance declaration binders] data FamEqn pass rhs = FamEqn { feqn_ext :: XCFamEqn pass rhs , feqn_tycon :: LIdP pass , feqn_bndrs :: HsOuterFamEqnTyVarBndrs pass -- ^ Optional quantified type vars , feqn_pats :: HsFamEqnPats pass , feqn_fixity :: LexicalFixity -- ^ Fixity used in the declaration , feqn_rhs :: rhs } | XFamEqn !(XXFamEqn pass rhs) ----------------- Class instances ------------- -- | Located Class Instance Declaration type LClsInstDecl pass = XRec pass (ClsInstDecl pass) -- | Class Instance Declaration data ClsInstDecl pass = ClsInstDecl { cid_ext :: XCClsInstDecl pass , cid_poly_ty :: LHsSigType pass -- Context => Class Instance-type -- Using a polytype means that the renamer conveniently -- figures out the quantified type variables for us. , cid_binds :: LHsBinds pass -- Class methods , cid_sigs :: [LSig pass] -- User-supplied pragmatic info , cid_tyfam_insts :: [LTyFamInstDecl pass] -- Type family instances , cid_datafam_insts :: [LDataFamInstDecl pass] -- Data family instances , cid_overlap_mode :: Maybe (XRec pass OverlapMode) } | XClsInstDecl !(XXClsInstDecl pass) ----------------- Instances of all kinds ------------- -- | Located Instance Declaration type LInstDecl pass = XRec pass (InstDecl pass) -- | Instance Declaration data InstDecl pass -- Both class and family instances = ClsInstD { cid_d_ext :: XClsInstD pass , cid_inst :: ClsInstDecl pass } | DataFamInstD -- data family instance { dfid_ext :: XDataFamInstD pass , dfid_inst :: DataFamInstDecl pass } | TyFamInstD -- type family instance { tfid_ext :: XTyFamInstD pass , tfid_inst :: TyFamInstDecl pass } | XInstDecl !(XXInstDecl pass) {- ************************************************************************ * * \subsection[DerivDecl]{A stand-alone instance deriving declaration} * * ************************************************************************ -} -- | Located stand-alone 'deriving instance' declaration type LDerivDecl pass = XRec pass (DerivDecl pass) -- | Stand-alone 'deriving instance' declaration data DerivDecl pass = DerivDecl { deriv_ext :: XCDerivDecl pass , deriv_type :: LHsSigWcType pass -- ^ The instance type to derive. -- -- It uses an 'LHsSigWcType' because the context is allowed to be a -- single wildcard: -- -- > deriving instance _ => Eq (Foo a) -- -- Which signifies that the context should be inferred. -- See Note [Inferring the instance context] in GHC.Tc.Deriv.Infer. , deriv_strategy :: Maybe (LDerivStrategy pass) , deriv_overlap_mode :: Maybe (XRec pass OverlapMode) } | XDerivDecl !(XXDerivDecl pass) {- ************************************************************************ * * Deriving strategies * * ************************************************************************ -} -- | A 'Located' 'DerivStrategy'. type LDerivStrategy pass = XRec pass (DerivStrategy pass) -- | Which technique the user explicitly requested when deriving an instance. data DerivStrategy pass -- See Note [Deriving strategies] in GHC.Tc.Deriv = StockStrategy (XStockStrategy pass) -- ^ GHC's \"standard\" strategy, which is to implement a -- custom instance for the data type. This only works -- for certain types that GHC knows about (e.g., 'Eq', -- 'Show', 'Functor' when @-XDeriveFunctor@ is enabled, -- etc.) | AnyclassStrategy (XAnyClassStrategy pass) -- ^ @-XDeriveAnyClass@ | NewtypeStrategy (XNewtypeStrategy pass) -- ^ @-XGeneralizedNewtypeDeriving@ | ViaStrategy (XViaStrategy pass) -- ^ @-XDerivingVia@ {- ************************************************************************ * * \subsection[DefaultDecl]{A @default@ declaration} * * ************************************************************************ -} -- | Located Default Declaration type LDefaultDecl pass = XRec pass (DefaultDecl pass) -- See Note [Named default declarations] in GHC.Tc.Gen.Default -- | Default Declaration data DefaultDecl pass = DefaultDecl { defd_ext :: XCDefaultDecl pass , defd_class :: Maybe (LIdP pass) -- Nothing in absence of NamedDefaults , defd_defaults :: [LHsType pass] } | XDefaultDecl !(XXDefaultDecl pass) {- ************************************************************************ * * \subsection{Foreign function interface declaration} * * ************************************************************************ -} -- foreign declarations are distinguished as to whether they define or use a -- Haskell name -- -- * the Boolean value indicates whether the pre-standard deprecated syntax -- has been used -- | Located Foreign Declaration type LForeignDecl pass = XRec pass (ForeignDecl pass) -- | Foreign Declaration data ForeignDecl pass = ForeignImport { fd_i_ext :: XForeignImport pass -- Post typechecker, rep_ty ~ sig_ty , fd_name :: LIdP pass -- defines this name , fd_sig_ty :: LHsSigType pass -- sig_ty , fd_fi :: ForeignImport pass } | ForeignExport { fd_e_ext :: XForeignExport pass -- Post typechecker, rep_ty ~ sig_ty , fd_name :: LIdP pass -- uses this name , fd_sig_ty :: LHsSigType pass -- sig_ty , fd_fe :: ForeignExport pass } | XForeignDecl !(XXForeignDecl pass) {- In both ForeignImport and ForeignExport: sig_ty is the type given in the Haskell code rep_ty is the representation for this type, i.e. with newtypes coerced away and type functions evaluated. Thus if the declaration is valid, then rep_ty will only use types such as Int and IO that we know how to make foreign calls with. -} -- Specification Of an imported external entity in dependence on the calling -- convention -- data ForeignImport pass = -- import of a C entity -- -- * the two strings specifying a header file or library -- may be empty, which indicates the absence of a -- header or object specification (both are not used -- in the case of `CWrapper' and when `CFunction' -- has a dynamic target) -- -- * the calling convention is irrelevant for code -- generation in the case of `CLabel', but is needed -- for pretty printing -- -- * `Safety' is irrelevant for `CLabel' and `CWrapper' -- CImport (XCImport pass) (XRec pass CCallConv) -- ccall (XRec pass Safety) -- interruptible, safe or unsafe (Maybe Header) -- name of C header CImportSpec -- details of the C entity | XForeignImport !(XXForeignImport pass) -- details of an external C entity -- data CImportSpec = CLabel CLabelString -- import address of a C label | CFunction CCallTarget -- static or dynamic function | CWrapper -- wrapper to expose closures -- (former f.e.d.) deriving Data -- specification of an externally exported entity in dependence on the calling -- convention -- data ForeignExport pass = CExport (XCExport pass) (XRec pass CExportSpec) -- contains the calling convention | XForeignExport !(XXForeignExport pass) {- ************************************************************************ * * \subsection{Rewrite rules} * * ************************************************************************ -} -- | Located Rule Declarations type LRuleDecls pass = XRec pass (RuleDecls pass) -- | Rule Declarations data RuleDecls pass = HsRules { rds_ext :: XCRuleDecls pass , rds_rules :: [LRuleDecl pass] } | XRuleDecls !(XXRuleDecls pass) -- | Located Rule Declaration type LRuleDecl pass = XRec pass (RuleDecl pass) -- | Rule Declaration data RuleDecl pass = HsRule -- Source rule { rd_ext :: XHsRule pass -- ^ After renamer, free-vars from the LHS and RHS , rd_name :: XRec pass RuleName -- ^ Note [Pragma source text] in "GHC.Types.SourceText" , rd_act :: Activation , rd_tyvs :: Maybe [LHsTyVarBndr () (NoGhcTc pass)] -- ^ Forall'd type vars , rd_tmvs :: [LRuleBndr pass] -- ^ Forall'd term vars, before typechecking; after typechecking -- this includes all forall'd vars , rd_lhs :: XRec pass (HsExpr pass) , rd_rhs :: XRec pass (HsExpr pass) } | XRuleDecl !(XXRuleDecl pass) -- | Located Rule Binder type LRuleBndr pass = XRec pass (RuleBndr pass) -- | Rule Binder data RuleBndr pass = RuleBndr (XCRuleBndr pass) (LIdP pass) | RuleBndrSig (XRuleBndrSig pass) (LIdP pass) (HsPatSigType pass) | XRuleBndr !(XXRuleBndr pass) collectRuleBndrSigTys :: [RuleBndr pass] -> [HsPatSigType pass] collectRuleBndrSigTys bndrs = [ty | RuleBndrSig _ _ ty <- bndrs] {- ************************************************************************ * * \subsection[DocDecl]{Document comments} * * ************************************************************************ -} -- | Located Documentation comment Declaration type LDocDecl pass = XRec pass (DocDecl pass) -- | Documentation comment Declaration data DocDecl pass = DocCommentNext (LHsDoc pass) | DocCommentPrev (LHsDoc pass) | DocCommentNamed String (LHsDoc pass) | DocGroup Int (LHsDoc pass) deriving instance (Data pass, Data (IdP pass)) => Data (DocDecl pass) docDeclDoc :: DocDecl pass -> LHsDoc pass docDeclDoc (DocCommentNext d) = d docDeclDoc (DocCommentPrev d) = d docDeclDoc (DocCommentNamed _ d) = d docDeclDoc (DocGroup _ d) = d {- ************************************************************************ * * \subsection[DeprecDecl]{Deprecations} * * ************************************************************************ We use exported entities for things to deprecate. -} -- | Located Warning Declarations type LWarnDecls pass = XRec pass (WarnDecls pass) -- | Warning pragma Declarations data WarnDecls pass = Warnings { wd_ext :: XWarnings pass , wd_warnings :: [LWarnDecl pass] } | XWarnDecls !(XXWarnDecls pass) -- | Located Warning pragma Declaration type LWarnDecl pass = XRec pass (WarnDecl pass) -- | Warning pragma Declaration data WarnDecl pass = Warning (XWarning pass) [LIdP pass] (WarningTxt pass) | XWarnDecl !(XXWarnDecl pass) {- ************************************************************************ * * \subsection[AnnDecl]{Annotations} * * ************************************************************************ -} -- | Located Annotation Declaration type LAnnDecl pass = XRec pass (AnnDecl pass) -- | Annotation Declaration data AnnDecl pass = HsAnnotation (XHsAnnotation pass) (AnnProvenance pass) (XRec pass (HsExpr pass)) | XAnnDecl !(XXAnnDecl pass) -- | Annotation Provenance data AnnProvenance pass = ValueAnnProvenance (LIdP pass) | TypeAnnProvenance (LIdP pass) | ModuleAnnProvenance -- deriving instance Functor AnnProvenance -- deriving instance Foldable AnnProvenance -- deriving instance Traversable AnnProvenance -- deriving instance (Data pass) => Data (AnnProvenance pass) annProvenanceName_maybe :: forall p. UnXRec p => AnnProvenance p -> Maybe (IdP p) annProvenanceName_maybe (ValueAnnProvenance (unXRec @p -> name)) = Just name annProvenanceName_maybe (TypeAnnProvenance (unXRec @p -> name)) = Just name annProvenanceName_maybe ModuleAnnProvenance = Nothing {- ************************************************************************ * * \subsection[RoleAnnot]{Role annotations} * * ************************************************************************ -} -- | Located Role Annotation Declaration type LRoleAnnotDecl pass = XRec pass (RoleAnnotDecl pass) -- See #8185 for more info about why role annotations are -- top-level declarations -- | Role Annotation Declaration data RoleAnnotDecl pass = RoleAnnotDecl (XCRoleAnnotDecl pass) (LIdP pass) -- type constructor [XRec pass (Maybe Role)] -- optional annotations | XRoleAnnotDecl !(XXRoleAnnotDecl pass) ghc-lib-parser-9.12.2.20250421/compiler/Language/Haskell/Syntax/Expr.hs0000644000000000000000000015651407346545000023167 0ustar0000000000000000 {-# LANGUAGE ConstraintKinds #-} {-# LANGUAGE DataKinds #-} {-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE ExistentialQuantification #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE GADTs #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE TypeFamilyDependencies #-} {-# LANGUAGE UndecidableInstances #-} -- Wrinkle in Note [Trees That Grow] -- in module Language.Haskell.Syntax.Extension {- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 -} -- See Note [Language.Haskell.Syntax.* Hierarchy] for why not GHC.Hs.* -- | Abstract Haskell syntax for expressions. module Language.Haskell.Syntax.Expr where import Language.Haskell.Syntax.Basic import Language.Haskell.Syntax.Decls import Language.Haskell.Syntax.Pat import Language.Haskell.Syntax.Lit import Language.Haskell.Syntax.Extension import Language.Haskell.Syntax.Module.Name (ModuleName) import Language.Haskell.Syntax.Type import Language.Haskell.Syntax.Binds -- others: import GHC.Types.SourceText (StringLiteral) import GHC.Data.FastString (FastString) -- libraries: import Data.Data hiding (Fixity(..)) import Data.Bool import Data.Eq import Data.Maybe import Data.List.NonEmpty ( NonEmpty ) import GHC.Types.Name.Reader {- Note [RecordDotSyntax field updates] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The extensions @OverloadedRecordDot@ @OverloadedRecordUpdate@ together enable record updates like @a{foo.bar.baz = 1}@. Introducing this syntax slightly complicates parsing. This note explains how it's done. In the event a record is being constructed or updated, it's this production that's in play: @ aexp1 -> aexp1 '{' fbinds '}' { ... mkHsRecordPV ... $1 (snd $3) } @ @fbinds@ is a list of field bindings. @mkHsRecordPV@ is a function of the @DisambECP b@ typeclass, see Note [Ambiguous syntactic categories]. The "normal" rules for an @fbind@ are: @ fbind : qvar '=' texp | qvar @ These rules compute values of @LHsRecField GhcPs (Located b)@. They apply in the context of record construction, record updates, record patterns and record expressions. That is, @b@ ranges over @HsExpr GhcPs@, @HsPat GhcPs@ and @HsCmd GhcPs@. When @OverloadedRecordDot@ and @OverloadedRecordUpdate@ are both enabled, two additional @fbind@ rules are admitted: @ | field TIGHT_INFIX_PROJ fieldToUpdate '=' texp | field TIGHT_INFIX_PROJ fieldToUpdate @ These rules only make sense when parsing record update expressions (that is, patterns and commands cannot be parsed by these rules and neither record constructions). The results of these new rules cannot be represented by @LHsRecField GhcPs (LHsExpr GhcPs)@ values as the type is defined today. We minimize modifying existing code by having these new rules calculate @LHsRecProj GhcPs (LHsExpr GhcPs)@ ("record projection") values instead: @ newtype FieldLabelStrings = FieldLabelStrings [XRec p (DotFieldOcc p)] type RecProj arg = HsFieldBind FieldLabelStrings arg type LHsRecProj p arg = XRec p (RecProj arg) @ The @fbind@ rule is then given the type @fbind :: { forall b. DisambECP b => PV (Fbind b) }@ accommodating both alternatives: @ type Fbind b = Either (LHsRecField GhcPs (LocatedA b)) ( LHsRecProj GhcPs (LocatedA b)) @ In @data HsExpr p@, the @RecordUpd@ constuctor indicates regular updates vs. projection updates by means of the @rupd_flds@ member type, an @Either@ instance: @ | RecordUpd { rupd_ext :: XRecordUpd p , rupd_expr :: LHsExpr p , rupd_flds :: Either [LHsRecUpdField p] [LHsRecUpdProj p] } @ Here, @ type RecUpdProj p = RecProj p (LHsExpr p) type LHsRecUpdProj p = XRec p (RecUpdProj p) @ and @Left@ values indicating regular record update, @Right@ values updates desugared to @setField@s. If @OverloadedRecordUpdate@ is enabled, any updates parsed as @LHsRecField GhcPs@ values are converted to @LHsRecUpdProj GhcPs@ values (see function @mkRdrRecordUpd@ in 'GHC.Parser.PostProcess'). -} -- | RecordDotSyntax field updates type LFieldLabelStrings p = XRec p (FieldLabelStrings p) newtype FieldLabelStrings p = FieldLabelStrings [XRec p (DotFieldOcc p)] -- Field projection updates (e.g. @foo.bar.baz = 1@). See Note -- [RecordDotSyntax field updates]. type RecProj p arg = HsFieldBind (LFieldLabelStrings p) arg -- The phantom type parameter @p@ is for symmetry with @LHsRecField p -- arg@ in the definition of @data Fbind@ (see GHC.Parser.Process). type LHsRecProj p arg = XRec p (RecProj p arg) -- These two synonyms are used in the definition of syntax @RecordUpd@ -- below. type RecUpdProj p = RecProj p (LHsExpr p) type LHsRecUpdProj p = XRec p (RecUpdProj p) -- | Haskell Record Update Fields. data LHsRecUpdFields p where -- | A regular (non-overloaded) record update. RegularRecUpdFields :: { xRecUpdFields :: XLHsRecUpdLabels p , recUpdFields :: [LHsRecUpdField p p] } -> LHsRecUpdFields p -- | An overloaded record update. OverloadedRecUpdFields :: { xOLRecUpdFields :: XLHsOLRecUpdLabels p , olRecUpdFields :: [LHsRecUpdProj p] } -> LHsRecUpdFields p {- ************************************************************************ * * \subsection{Expressions proper} * * ************************************************************************ -} -- * Expressions proper -- | Located Haskell Expression type LHsExpr p = XRec p (HsExpr p) ------------------------- {- Note [NoSyntaxExpr] ~~~~~~~~~~~~~~~~~~~~~~ Syntax expressions can be missing (NoSyntaxExprRn or NoSyntaxExprTc) for several reasons: 1. As described in Note [Rebindable if] 2. In order to suppress "not in scope: xyz" messages when a bit of rebindable syntax does not apply. For example, when using an irrefutable pattern in a BindStmt, we don't need a `fail` operator. 3. Rebindable syntax might just not make sense. For example, a BodyStmt contains the syntax for `guard`, but that's used only in monad comprehensions. If we had more of a whiz-bang type system, we might be able to rule this case out statically. -} -- | Syntax Expression -- -- SyntaxExpr is represents the function used in interpreting rebindable -- syntax. In the parser, we have no information to supply; in the renamer, -- we have the name of the function (but see -- Note [Monad fail : Rebindable syntax, overloaded strings] for a wrinkle) -- and in the type-checker we have a more elaborate structure 'SyntaxExprTc'. -- -- In some contexts, rebindable syntax is not implemented, and so we have -- constructors to represent that possibility in both the renamer and -- typechecker instantiations. -- -- E.g. @(>>=)@ is filled in before the renamer by the appropriate 'Name' for -- @(>>=)@, and then instantiated by the type checker with its type args -- etc type family SyntaxExpr p {- Note [Record selectors in the AST] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Here is how record selectors are expressed in GHC's AST: Example data type data T = MkT { size :: Int } Record selectors: | GhcPs | GhcRn | GhcTc | ----------------------------------------------------------------------------------| size (assuming one | HsVar | HsRecSel | HsRecSel | 'size' in scope) | | | | ----------------------|--------------|----------------------|---------------------| .size (assuming | HsProjection | getField @"size" | getField @"size" | OverloadedRecordDot) | | | | ----------------------|--------------|----------------------|---------------------| e.size (assuming | HsGetField | getField @"size" e | getField @"size" e | OverloadedRecordDot) | | | | NB 1: DuplicateRecordFields makes no difference to the first row of this table, except that if 'size' is a field of more than one data type, then a naked use of the record selector 'size' may well be ambiguous. You have to use a qualified name. And there is no way to do this if both data types are declared in the same module. NB 2: The notation getField @"size" e is short for HsApp (HsAppType (HsVar "getField") (HsWC (HsTyLit (HsStrTy "size")) [])) e. We track the original parsed syntax via ExpandedThingRn. -} {- Note [Non-overloaded record field selectors] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider data T = MkT { x,y :: Int } f r x = x + y r This parses with HsVar for x, y, r on the RHS of f. Later, the renamer recognises that y in the RHS of f is really a record selector, and changes it to a HsRecSel. In contrast x is locally bound, shadowing the record selector, and stays as an HsVar. The renamer adds the Name of the record selector into the XCFieldOcc extension field, The typechecker keeps HsRecSel as HsRecSel, and transforms the record-selector Name to an Id. -} {- Note [Types in terms] ~~~~~~~~~~~~~~~~~~~~~~~~ Types-in-terms is a notion introduced by GHC Proposal #281. It refers to the extension of term syntax (HsExpr in the AST, infixexp2 in Parser.y) with constructs that previously could only occur at the type level: * Function arrows: a -> b * Multiplicity-polymorphic function arrows: a %m -> b (LinearTypes) * Constraint arrows: a => b * Universal quantification: forall a. b * Visible universal quantification: forall a -> b This syntax can't be used to construct a type at the term level because `Type` is not inhabited by any terms. Its use is limited to required type arguments: -- Error: t :: Type t = (Int -> String) -- Not supported by GHC, `tcExpr` emits `TcRnIllegalTypeExpr` -- OK: s :: String s = vfun (Int -> String) -- Valid use in a required type argument, -- see `expr_to_type` (GHC.Tc.Gen.App) where vfun :: forall t -> Typeable t => String vfun t = show (typeRep @t) In GHC, types-in-terms are implemented by the following additions to the AST of expressions and their grammar: -- Language/Haskell/Syntax/Expr.hs data HsExpr p = ... | HsForAll (XForAll p) (HsForAllTelescope p) (LHsExpr p) | HsQual (XQual p) (XRec p [LHsExpr p]) (LHsExpr p) | HsFunArr (XFunArr p) (HsArrowOf (LHsExpr p) p) (LHsExpr p) (LHsExpr p) -- GHC/Parser.y infixexp2 :: { ECP } : infixexp %shift { ... } | infixexp '->' infixexp2 { ... } | infixexp expmult '->' infixexp2 { ... } | infixexp '->.' infixexp2 { ... } | expcontext '=>' infixexp2 { ... } | forall_telescope infixexp2 { ... } These constructors and non-terminals mirror those found in HsType HsType | HsExpr -------------+----------- HsForAllTy | HsForAll HsFunTy | HsFunArr HsQualTy | HsQual The resulting code duplication can be removed if we unify HsExpr and HsType into one type (#25121). Per the proposal, the constituents of types-in-terms are parsed and renamed as terms, and forall-bound variables inhabit the term namespace. Example: h = \a -> g (forall a. Maybe a) a To ensure that the `a` in `Maybe a` refers to the innermost binding (i.e. to the forall-bound `a` and not to the lambda-bound `a`), we must consistently use the term namespace `varName` throughout the expression. We set the correct namespace using `setTelescopeBndrsNameSpace` in GHC.Parser.PostProcess and GHC.ThToHs. `exprCtOrigin` returns `Shouldn'tHappenOrigin` for types-in-terms because they either undergo the T2T translation `expr_to_type` in `tcVDQ` or result in `TcRnIllegalTypeExpr`. -} -- | A Haskell expression. data HsExpr p = HsVar (XVar p) (LIdP p) -- ^ Variable -- See Note [Located RdrNames] | HsUnboundVar (XUnboundVar p) RdrName -- ^ Unbound variable; also used for "holes" -- (_ or _x). -- Turned from HsVar to HsUnboundVar by the -- renamer, when it finds an out-of-scope -- variable or hole. -- The (XUnboundVar p) field becomes an HoleExprRef -- after typechecking; this is where the -- erroring expression will be written after -- solving. See Note [Holes] in GHC.Tc.Types.Constraint. | HsOverLabel (XOverLabel p) FastString -- ^ Overloaded label (Note [Overloaded labels] in GHC.OverloadedLabels) | HsIPVar (XIPVar p) HsIPName -- ^ Implicit parameter (not in use after typechecking) | HsOverLit (XOverLitE p) (HsOverLit p) -- ^ Overloaded literals | HsLit (XLitE p) (HsLit p) -- ^ Simple (non-overloaded) literals -- | Lambda, Lambda-case, and Lambda-cases | HsLam (XLam p) HsLamVariant -- ^ Tells whether this is for lambda, \case, or \cases (MatchGroup p (LHsExpr p)) -- ^ LamSingle: one match of arity >= 1 -- LamCase: many arity-1 matches -- LamCases: many matches of uniform arity >= 1 | HsApp (XApp p) (LHsExpr p) (LHsExpr p) -- ^ Application | HsAppType (XAppTypeE p) -- After typechecking: the type argument (LHsExpr p) (LHsWcType (NoGhcTc p)) -- ^ Visible type application -- -- Explicit type argument; e.g f @Int x y -- NB: Has wildcards, but no implicit quantification -- | Operator applications: -- NB Bracketed ops such as (+) come out as Vars. -- NB Sadly, we need an expr for the operator in an OpApp/Section since -- the renamer may turn a HsVar into HsRecSel or HsUnboundVar | OpApp (XOpApp p) (LHsExpr p) -- left operand (LHsExpr p) -- operator (LHsExpr p) -- right operand -- | Negation operator. Contains the negated expression and the name -- of 'negate' | NegApp (XNegApp p) (LHsExpr p) (SyntaxExpr p) | HsPar (XPar p) (LHsExpr p) -- ^ Parenthesised expr; see Note [Parens in HsSyn] | SectionL (XSectionL p) (LHsExpr p) -- operand; see Note [Sections in HsSyn] (LHsExpr p) -- operator | SectionR (XSectionR p) (LHsExpr p) -- operator; see Note [Sections in HsSyn] (LHsExpr p) -- operand -- | Used for explicit tuples and sections thereof -- Note [ExplicitTuple] | ExplicitTuple (XExplicitTuple p) [HsTupArg p] Boxity -- | Used for unboxed sum types | ExplicitSum (XExplicitSum p) ConTag -- Alternative (one-based) SumWidth -- Sum arity (LHsExpr p) | HsCase (XCase p) (LHsExpr p) (MatchGroup p (LHsExpr p)) | HsIf (XIf p) -- GhcPs: this is a Bool; False <=> do not use -- rebindable syntax (LHsExpr p) -- predicate (LHsExpr p) -- then part (LHsExpr p) -- else part -- | Multi-way if | HsMultiIf (XMultiIf p) [LGRHS p (LHsExpr p)] -- | let(rec) | HsLet (XLet p) (HsLocalBinds p) (LHsExpr p) | HsDo (XDo p) -- Type of the whole expression HsDoFlavour (XRec p [ExprLStmt p]) -- "do":one or more stmts -- | Syntactic list: [a,b,c,...] -- See Note [Empty lists] | ExplicitList (XExplicitList p) -- Gives type of components of list [LHsExpr p] -- | Record construction | RecordCon { rcon_ext :: XRecordCon p , rcon_con :: XRec p (ConLikeP p) -- The constructor , rcon_flds :: HsRecordBinds p } -- The fields -- | Record update | RecordUpd { rupd_ext :: XRecordUpd p , rupd_expr :: LHsExpr p , rupd_flds :: LHsRecUpdFields p } -- For a type family, the arg types are of the *instance* tycon, -- not the family tycon -- | Record field selection e.g @z.x@. -- This case only arises when the OverloadedRecordDot langauge -- extension is enabled. See Note [Record selectors in the AST]. | HsGetField { gf_ext :: XGetField p , gf_expr :: LHsExpr p , gf_field :: XRec p (DotFieldOcc p) } -- | Record field selector. e.g. @(.x)@ or @(.x.y)@ -- -- This case only arises when the OverloadedRecordDot langauge -- extensions is enabled. See Note [Record selectors in the AST]. | HsProjection { proj_ext :: XProjection p , proj_flds :: NonEmpty (DotFieldOcc p) } -- | Expression with an explicit type signature. @e :: type@ | ExprWithTySig (XExprWithTySig p) (LHsExpr p) (LHsSigWcType (NoGhcTc p)) -- | Arithmetic sequence | ArithSeq (XArithSeq p) (Maybe (SyntaxExpr p)) -- For OverloadedLists, the fromList witness (ArithSeqInfo p) -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation ----------------------------------------------------------- -- MetaHaskell Extensions | HsTypedBracket (XTypedBracket p) (LHsExpr p) | HsUntypedBracket (XUntypedBracket p) (HsQuote p) | HsTypedSplice (XTypedSplice p) (LHsExpr p) -- `$$z` or `$$(f 4)` | HsUntypedSplice (XUntypedSplice p) (HsUntypedSplice p) ----------------------------------------------------------- -- Arrow notation extension -- | @proc@ notation for Arrows | HsProc (XProc p) (LPat p) -- arrow abstraction, proc (LHsCmdTop p) -- body of the abstraction -- always has an empty stack --------------------------------------- -- static pointers extension | HsStatic (XStatic p) -- Free variables of the body, and type after typechecking (LHsExpr p) -- Body --------------------------------------- -- Expressions annotated with pragmas, written as {-# ... #-} | HsPragE (XPragE p) (HsPragE p) (LHsExpr p) -- Embed the syntax of types into expressions. -- Used with @RequiredTypeArguments@, e.g. @fn (type (Int -> Bool))@. | HsEmbTy (XEmbTy p) (LHsWcType (NoGhcTc p)) -- | Forall-types @forall tvs. t@ and @forall tvs -> t@. -- Used with @RequiredTypeArguments@, e.g. @fn (forall a. Proxy a)@. -- See Note [Types in terms] | HsForAll (XForAll p) (HsForAllTelescope p) (LHsExpr p) -- Constrained types @ctx => t@. -- Used with @RequiredTypeArguments@, e.g. @fn (Bounded a => a)@. -- See Note [Types in terms] | HsQual (XQual p) (XRec p [LHsExpr p]) (LHsExpr p) -- | Function types @a -> b@. -- Used with @RequiredTypeArguments@, e.g. @fn (Int -> Bool)@. -- See Note [Types in terms] | HsFunArr (XFunArr p) (HsArrowOf (LHsExpr p) p) (LHsExpr p) (LHsExpr p) | XExpr !(XXExpr p) -- Note [Trees That Grow] in Language.Haskell.Syntax.Extension for the -- general idea, and Note [Rebindable syntax and XXExprGhcRn] in GHC.Hs.Expr -- for an example of how we use it. -- --------------------------------------------------------------------- data DotFieldOcc p = DotFieldOcc { dfoExt :: XCDotFieldOcc p , dfoLabel :: XRec p FieldLabelString } | XDotFieldOcc !(XXDotFieldOcc p) -- --------------------------------------------------------------------- -- | A pragma, written as {-# ... #-}, that may appear within an expression. data HsPragE p = HsPragSCC (XSCC p) StringLiteral -- "set cost centre" SCC pragma | XHsPragE !(XXPragE p) -- | Located Haskell Tuple Argument -- -- 'HsTupArg' is used for tuple sections -- @(,a,)@ is represented by -- @ExplicitTuple [Missing ty1, Present a, Missing ty3]@ -- Which in turn stands for @(\x:ty1 \y:ty2. (x,a,y))@ type LHsTupArg id = XRec id (HsTupArg id) -- | Haskell Tuple Argument data HsTupArg id = Present (XPresent id) (LHsExpr id) -- ^ The argument | Missing (XMissing id) -- ^ The argument is missing, but this is its type | XTupArg !(XXTupArg id) -- ^ Extension point; see Note [Trees That Grow] -- in Language.Haskell.Syntax.Extension -- | Which kind of lambda case are we dealing with? data HsLamVariant = LamSingle -- ^ `\p -> e` | LamCase -- ^ `\case pi -> ei ` | LamCases -- ^ `\cases psi -> ei` deriving (Data, Eq) {- Note [Parens in HsSyn] ~~~~~~~~~~~~~~~~~~~~~~ HsPar (and ParPat in patterns, HsParTy in types) is used as follows * HsPar is required; the pretty printer does not add parens. * HsPars are respected when rearranging operator fixities. So a * (b + c) means what it says (where the parens are an HsPar) * For ParPat and HsParTy the pretty printer does add parens but this should be a no-op for ParsedSource, based on the pretty printer round trip feature introduced in https://phabricator.haskell.org/rGHC499e43824bda967546ebf95ee33ec1f84a114a7c * ParPat and HsParTy are pretty printed as '( .. )' regardless of whether or not they are strictly necessary. This should be addressed when #13238 is completed, to be treated the same as HsPar. Note [Sections in HsSyn] ~~~~~~~~~~~~~~~~~~~~~~~~ Sections should always appear wrapped in an HsPar, thus HsPar (SectionR ...) The parser parses sections in a wider variety of situations (See Note [Parsing sections]), but the renamer checks for those parens. This invariant makes pretty-printing easier; we don't need a special case for adding the parens round sections. Note [Rebindable if] ~~~~~~~~~~~~~~~~~~~~ The rebindable syntax for 'if' is a bit special, because when rebindable syntax is *off* we do not want to treat (if c then t else e) as if it was an application (ifThenElse c t e). Why not? Because we allow an 'if' to return *unboxed* results, thus if blah then 3# else 4# whereas that would not be possible using a all to a polymorphic function (because you can't call a polymorphic function at an unboxed type). So we use NoSyntaxExpr to mean "use the old built-in typing rule". A further complication is that, in the `deriving` code, we never want to use rebindable syntax. So, even in GhcPs, we want to denote whether to use rebindable syntax or not. This is done via the type instance for XIf GhcPs. Note [Record Update HsWrapper] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ There is a wrapper in RecordUpd which is used for the *required* constraints for pattern synonyms. This wrapper is created in the typechecking and is then directly used in the desugaring without modification. For example, if we have the record pattern synonym P, pattern P :: (Show a) => a -> Maybe a pattern P{x} = Just x foo = (Just True) { x = False } then `foo` desugars to something like foo = case Just True of P x -> P False hence we need to provide the correct dictionaries to P's matcher on the RHS so that we can build the expression. Note [Located RdrNames] ~~~~~~~~~~~~~~~~~~~~~~~ A number of syntax elements have seemingly redundant locations attached to them. This is deliberate, to allow transformations making use of the exact print annotations to easily correlate a Located Name in the RenamedSource with a Located RdrName in the ParsedSource. There are unfortunately enough differences between the ParsedSource and the RenamedSource that the exact print annotations cannot be used directly with RenamedSource, so this allows a simple mapping to be used based on the location. Note [ExplicitTuple] ~~~~~~~~~~~~~~~~~~~~ An ExplicitTuple is never just a data constructor like (,,,). That is, the `[LHsTupArg p]` argument of `ExplicitTuple` has at least one `Present` member (and is thus never empty). A tuple data constructor like () or (,,,) is parsed as an `HsVar`, not an `ExplicitTuple`, and stays that way. This is important for two reasons: 1. We don't need -XTupleSections for (,,,) 2. The type variables in (,,,) can be instantiated with visible type application. That is, (,,) :: forall a b c. a -> b -> c -> (a,b,c) (True,,) :: forall {b} {c}. b -> c -> (Bool,b,c) Note that the tuple section has *inferred* arguments, while the data constructor has *specified* ones. (See Note [Required, Specified, and Inferred for types] in GHC.Tc.TyCl for background.) Sadly, the grammar for this is actually ambiguous, and it's only thanks to the preference of a shift in a shift/reduce conflict that the parser works as this Note details. Search for a reference to this Note in GHC.Parser for further explanation. Note [Empty lists] ~~~~~~~~~~~~~~~~~~ An empty list could be considered either a data constructor (stored with HsVar) or an ExplicitList. This Note describes how empty lists flow through the various phases and why. Parsing ------- An empty list is parsed by the sysdcon nonterminal. It thus comes to life via HsVar nilDataCon (defined in GHC.Builtin.Types). A freshly-parsed (HsExpr GhcPs) empty list is never a ExplicitList. Renaming -------- If -XOverloadedLists is enabled, we must type-check the empty list as if it were a call to fromListN. (This is true regardless of the setting of -XRebindableSyntax.) This is very easy if the empty list is an ExplicitList, but an annoying special case if it's an HsVar. So the renamer changes a HsVar nilDataCon to an ExplicitList [], but only if -XOverloadedLists is on. (Why not always? Read on, dear friend.) This happens in the HsVar case of rnExpr. Type-checking ------------- We want to accept an expression like [] @Int. To do this, we must infer that [] :: forall a. [a]. This is easy if [] is a HsVar with the right DataCon inside. However, the type-checking for explicit lists works differently: [x,y,z] is never polymorphic. Instead, we unify the types of x, y, and z together, and use the unified type as the argument to the cons and nil constructors. Thus, treating [] as an empty ExplicitList in the type-checker would prevent [] @Int from working. However, if -XOverloadedLists is on, then [] @Int really shouldn't be allowed: it's just like fromListN 0 [] @Int. Since fromListN :: forall list. IsList list => Int -> [Item list] -> list that expression really should be rejected. Thus, the renamer's behaviour is exactly what we want: treat [] as a datacon when -XNoOverloadedLists, and as an empty ExplicitList when -XOverloadedLists. See also #13680, which requested [] @Int to work. -} {- HsSyn records exactly where the user put parens, with HsPar. So generally speaking we print without adding any parens. However, some code is internally generated, and in some places parens are absolutely required; so for these places we use pprParendLExpr (but don't print double parens of course). For operator applications we don't add parens, because the operator fixities should do the job, except in debug mode (-dppr-debug) so we can see the structure of the parse tree. -} {- ************************************************************************ * * \subsection{Commands (in arrow abstractions)} * * ************************************************************************ We re-use HsExpr to represent these. -} -- | Located Haskell Command (for arrow syntax) type LHsCmd id = XRec id (HsCmd id) -- | Haskell Command (e.g. a "statement" in an Arrow proc block) data HsCmd id = HsCmdArrApp -- Arrow tail, or arrow application (f -< arg) (XCmdArrApp id) -- type of the arrow expressions f, -- of the form a t t', where arg :: t (LHsExpr id) -- arrow expression, f (LHsExpr id) -- input expression, arg HsArrAppType -- higher-order (-<<) or first-order (-<) Bool -- True => right-to-left (f -< arg) -- False => left-to-right (arg >- f) | HsCmdArrForm -- Command formation, (| e cmd1 .. cmdn |) (XCmdArrForm id) (LHsExpr id) -- The operator. -- After type-checking, a type abstraction to be -- applied to the type of the local environment tuple LexicalFixity -- Whether the operator appeared prefix or infix when -- parsed. [LHsCmdTop id] -- argument commands | HsCmdApp (XCmdApp id) (LHsCmd id) (LHsExpr id) -- | Lambda-case -- | HsCmdLam (XCmdLamCase id) HsLamVariant (MatchGroup id (LHsCmd id)) -- bodies are HsCmd's | HsCmdPar (XCmdPar id) (LHsCmd id) -- parenthesised command | HsCmdCase (XCmdCase id) (LHsExpr id) (MatchGroup id (LHsCmd id)) -- bodies are HsCmd's | HsCmdIf (XCmdIf id) (SyntaxExpr id) -- cond function (LHsExpr id) -- predicate (LHsCmd id) -- then part (LHsCmd id) -- else part | HsCmdLet (XCmdLet id) (HsLocalBinds id) -- let(rec) (LHsCmd id) | HsCmdDo (XCmdDo id) -- Type of the whole expression (XRec id [CmdLStmt id]) | XCmd !(XXCmd id) -- Extension point; see Note [Trees That Grow] -- in Language.Haskell.Syntax.Extension -- | Haskell arrow application type. data HsArrAppType -- | First order arrow application '-<' = HsHigherOrderApp -- | Higher order arrow application '-<<' | HsFirstOrderApp deriving Data {- | Top-level command, introducing a new arrow. This may occur inside a proc (where the stack is empty) or as an argument of a command-forming operator. -} -- | Located Haskell Top-level Command type LHsCmdTop p = XRec p (HsCmdTop p) -- | Haskell Top-level Command data HsCmdTop p = HsCmdTop (XCmdTop p) (LHsCmd p) | XCmdTop !(XXCmdTop p) -- Extension point; see Note [Trees That Grow] -- in Language.Haskell.Syntax.Extension ----------------------- {- ************************************************************************ * * \subsection{Record binds} * * ************************************************************************ -} -- | Haskell Record Bindings type HsRecordBinds p = HsRecFields p (LHsExpr p) {- ************************************************************************ * * \subsection{@Match@, @GRHSs@, and @GRHS@ datatypes} * * ************************************************************************ @Match@es are sets of pattern bindings and right hand sides for functions, patterns or case branches. For example, if a function @g@ is defined as: \begin{verbatim} g (x,y) = y g ((x:ys),y) = y+1, \end{verbatim} then \tr{g} has two @Match@es: @(x,y) = y@ and @((x:ys),y) = y+1@. It is always the case that each element of an @[Match]@ list has the same number of @pats@s inside it. This corresponds to saying that a function defined by pattern matching must have the same number of patterns in each equation. -} data MatchGroup p body = MG { mg_ext :: XMG p body -- Post-typechecker, types of args and result, and origin , mg_alts :: XRec p [LMatch p body] } -- The alternatives -- The type is the type of the entire group -- t1 -> ... -> tn -> tr -- where there are n patterns | XMatchGroup !(XXMatchGroup p body) -- | Located Match type LMatch id body = XRec id (Match id body) data Match p body = Match { m_ext :: XCMatch p body, m_ctxt :: HsMatchContext (LIdP (NoGhcTc p)), -- See Note [m_ctxt in Match] m_pats :: XRec p [LPat p], -- The patterns m_grhss :: (GRHSs p body) } | XMatch !(XXMatch p body) {- Note [m_ctxt in Match] ~~~~~~~~~~~~~~~~~~~~~~ A Match can occur in a number of contexts, such as a FunBind, HsCase, HsLam and so on. In order to simplify tooling processing and pretty print output, the provenance is captured in an HsMatchContext. This is particularly important for the exact print annotations for a multi-equation FunBind. The parser initially creates a FunBind with a single Match in it for every function definition it sees. These are then grouped together by getMonoBind into a single FunBind, where all the Matches are combined. In the process, all the original FunBind fun_id's bar one are discarded, including the locations. This causes a problem for source to source conversions via exact print annotations, so the original fun_ids and infix flags are preserved in the Match, when it originates from a FunBind. Example infix function definition requiring individual exact print annotations (&&& ) [] [] = [] xs &&& [] = xs ( &&& ) [] ys = ys -} isInfixMatch :: Match id body -> Bool isInfixMatch match = case m_ctxt match of FunRhs {mc_fixity = Infix} -> True _ -> False -- | Guarded Right-Hand Sides -- -- GRHSs are used both for pattern bindings and for Matches data GRHSs p body = GRHSs { grhssExt :: XCGRHSs p body, grhssGRHSs :: [LGRHS p body], -- ^ Guarded RHSs grhssLocalBinds :: HsLocalBinds p -- ^ The where clause } | XGRHSs !(XXGRHSs p body) -- | Located Guarded Right-Hand Side type LGRHS id body = XRec id (GRHS id body) -- | Guarded Right Hand Side. data GRHS p body = GRHS (XCGRHS p body) [GuardLStmt p] -- Guards body -- Right hand side | XGRHS !(XXGRHS p body) -- We know the list must have at least one @Match@ in it. {- ************************************************************************ * * \subsection{Do stmts and list comprehensions} * * ************************************************************************ -} -- | Located @do@ block Statement type LStmt id body = XRec id (StmtLR id id body) -- | Located Statement with separate Left and Right id's type LStmtLR idL idR body = XRec idL (StmtLR idL idR body) -- | @do@ block Statement type Stmt id body = StmtLR id id body -- | Command Located Statement type CmdLStmt id = LStmt id (LHsCmd id) -- | Command Statement type CmdStmt id = Stmt id (LHsCmd id) -- | Expression Located Statement type ExprLStmt id = LStmt id (LHsExpr id) -- | Expression Statement type ExprStmt id = Stmt id (LHsExpr id) -- | Guard Located Statement type GuardLStmt id = LStmt id (LHsExpr id) -- | Guard Statement type GuardStmt id = Stmt id (LHsExpr id) -- | Ghci Located Statement type GhciLStmt id = LStmt id (LHsExpr id) -- | Ghci Statement type GhciStmt id = Stmt id (LHsExpr id) -- The SyntaxExprs in here are used *only* for do-notation and monad -- comprehensions, which have rebindable syntax. Otherwise they are unused. data StmtLR idL idR body -- body should always be (LHs**** idR) = LastStmt -- Always the last Stmt in ListComp, MonadComp, -- and (after the renamer, see GHC.Rename.Expr.checkLastStmt) DoExpr, MDoExpr -- Not used for GhciStmtCtxt, PatGuard, which scope over other stuff (XLastStmt idL idR body) body (Maybe Bool) -- Whether return was stripped -- Just True <=> return with a dollar was stripped by ApplicativeDo -- Just False <=> return without a dollar was stripped by ApplicativeDo -- Nothing <=> Nothing was stripped (SyntaxExpr idR) -- The return operator -- The return operator is used only for MonadComp -- For ListComp we use the baked-in 'return' -- For DoExpr, MDoExpr, we don't apply a 'return' at all -- See Note [Monad Comprehensions] | BindStmt (XBindStmt idL idR body) -- ^ Post renaming has optional fail and bind / (>>=) operator. -- Post typechecking, also has multiplicity of the argument -- and the result type of the function passed to bind; -- that is, (P, S) in (>>=) :: Q -> (R % P -> S) -> T -- See Note [The type of bind in Stmts] (LPat idL) body | BodyStmt (XBodyStmt idL idR body) -- Post typecheck, element type -- of the RHS (used for arrows) body -- See Note [BodyStmt] (SyntaxExpr idR) -- The (>>) operator (SyntaxExpr idR) -- The `guard` operator; used only in MonadComp -- See notes [Monad Comprehensions] | LetStmt (XLetStmt idL idR body) (HsLocalBindsLR idL idR) -- ParStmts only occur in a list/monad comprehension | ParStmt (XParStmt idL idR body) -- Post typecheck, -- S in (>>=) :: Q -> (R -> S) -> T [ParStmtBlock idL idR] (HsExpr idR) -- Polymorphic `mzip` for monad comprehensions (SyntaxExpr idR) -- The `>>=` operator -- See notes [Monad Comprehensions] -- After renaming, the ids are the binders -- bound by the stmts and used after them | TransStmt { trS_ext :: XTransStmt idL idR body, -- Post typecheck, -- R in (>>=) :: Q -> (R -> S) -> T trS_form :: TransForm, trS_stmts :: [ExprLStmt idL], -- Stmts to the *left* of the 'group' -- which generates the tuples to be grouped trS_bndrs :: [(IdP idR, IdP idR)], -- See Note [TransStmt binder map] trS_using :: LHsExpr idR, trS_by :: Maybe (LHsExpr idR), -- "by e" (optional) -- Invariant: if trS_form = GroupBy, then grp_by = Just e trS_ret :: SyntaxExpr idR, -- The monomorphic 'return' function for -- the inner monad comprehensions trS_bind :: SyntaxExpr idR, -- The '(>>=)' operator trS_fmap :: HsExpr idR -- The polymorphic 'fmap' function for desugaring -- Only for 'group' forms -- Just a simple HsExpr, because it's -- too polymorphic for tcSyntaxOp } -- See Note [Monad Comprehensions] -- Recursive statement (see Note [How RecStmt works] below) | RecStmt { recS_ext :: XRecStmt idL idR body , recS_stmts :: XRec idR [LStmtLR idL idR body] -- Assume XRec is the same for idL and idR, pick one arbitrarily -- The next two fields are only valid after renaming , recS_later_ids :: [IdP idR] -- The ids are a subset of the variables bound by the -- stmts that are used in stmts that follow the RecStmt , recS_rec_ids :: [IdP idR] -- Ditto, but these variables are the "recursive" ones, -- that are used before they are bound in the stmts of -- the RecStmt. -- An Id can be in both groups -- Both sets of Ids are (now) treated monomorphically -- See Note [How RecStmt works] for why they are separate -- Rebindable syntax , recS_bind_fn :: SyntaxExpr idR -- The bind function , recS_ret_fn :: SyntaxExpr idR -- The return function , recS_mfix_fn :: SyntaxExpr idR -- The mfix function } | XStmtLR !(XXStmtLR idL idR body) data TransForm -- The 'f' below is the 'using' function, 'e' is the by function = ThenForm -- then f or then f by e (depending on trS_by) | GroupForm -- then group using f or then group by e using f (depending on trS_by) deriving Data -- | Parenthesised Statement Block data ParStmtBlock idL idR = ParStmtBlock (XParStmtBlock idL idR) [ExprLStmt idL] [IdP idR] -- The variables to be returned (SyntaxExpr idR) -- The return operator | XParStmtBlock !(XXParStmtBlock idL idR) -- | The fail operator -- -- This is used for `.. <-` "bind statements" in do notation, including -- non-monadic "binds" in applicative. -- -- The fail operator is 'Just expr' if it potentially fail monadically. if the -- pattern match cannot fail, or shouldn't fail monadically (regular incomplete -- pattern exception), it is 'Nothing'. -- -- See Note [Monad fail : Rebindable syntax, overloaded strings] for the type of -- expression in the 'Just' case, and why it is so. -- -- See Note [Failing pattern matches in Stmts] for which contexts for -- '@BindStmt@'s should use the monadic fail and which shouldn't. type FailOperator id = Maybe (SyntaxExpr id) {- Note [The type of bind in Stmts] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Some Stmts, notably BindStmt, keep the (>>=) bind operator. We do NOT assume that it has type (>>=) :: m a -> (a -> m b) -> m b In some cases (see #303, #1537) it might have a more exotic type, such as (>>=) :: m i j a -> (a -> m j k b) -> m i k b So we must be careful not to make assumptions about the type. In particular, the monad may not be uniform throughout. Note [TransStmt binder map] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ The [(idR,idR)] in a TransStmt behaves as follows: * Before renaming: [] * After renaming: [ (x27,x27), ..., (z35,z35) ] These are the variables bound by the stmts to the left of the 'group' and used either in the 'by' clause, or in the stmts following the 'group' Each item is a pair of identical variables. * After typechecking: [ (x27:Int, x27:[Int]), ..., (z35:Bool, z35:[Bool]) ] Each pair has the same unique, but different *types*. Note [BodyStmt] ~~~~~~~~~~~~~~~ BodyStmts are a bit tricky, because what they mean depends on the context. Consider the following contexts: A do expression of type (m res_ty) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * BodyStmt E any_ty: do { ....; E; ... } E :: m any_ty Translation: E >> ... A list comprehensions of type [elt_ty] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * BodyStmt E Bool: [ .. | .... E ] [ .. | ..., E, ... ] [ .. | .... | ..., E | ... ] E :: Bool Translation: if E then fail else ... A guard list, guarding a RHS of type rhs_ty ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * BodyStmt E BooParStmtBlockl: f x | ..., E, ... = ...rhs... E :: Bool Translation: if E then fail else ... A monad comprehension of type (m res_ty) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * BodyStmt E Bool: [ .. | .... E ] E :: Bool Translation: guard E >> ... Array comprehensions are handled like list comprehensions. Note [How RecStmt works] ~~~~~~~~~~~~~~~~~~~~~~~~ Example: HsDo [ BindStmt x ex , RecStmt { recS_rec_ids = [a, c] , recS_stmts = [ BindStmt b (return (a,c)) , LetStmt a = ...b... , BindStmt c ec ] , recS_later_ids = [a, b] , return (a b) ] Here, the RecStmt binds a,b,c; but - Only a,b are used in the stmts *following* the RecStmt, - Only a,c are used in the stmts *inside* the RecStmt *before* their bindings Why do we need *both* rec_ids and later_ids? For monads they could be combined into a single set of variables, but not for arrows. That follows from the types of the respective feedback operators: mfix :: MonadFix m => (a -> m a) -> m a loop :: ArrowLoop a => a (b,d) (c,d) -> a b c * For mfix, the 'a' covers the union of the later_ids and the rec_ids * For 'loop', 'c' is the later_ids and 'd' is the rec_ids Note [Typing a RecStmt] ~~~~~~~~~~~~~~~~~~~~~~~ A (RecStmt stmts) types as if you had written (v1,..,vn, _, ..., _) <- mfix (\~(_, ..., _, r1, ..., rm) -> do { stmts ; return (v1,..vn, r1, ..., rm) }) where v1..vn are the later_ids r1..rm are the rec_ids Note [Monad Comprehensions] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Monad comprehensions require separate functions like 'return' and '>>=' for desugaring. These functions are stored in the statements used in monad comprehensions. For example, the 'return' of the 'LastStmt' expression is used to lift the body of the monad comprehension: [ body | stmts ] => stmts >>= \bndrs -> return body In transform and grouping statements ('then ..' and 'then group ..') the 'return' function is required for nested monad comprehensions, for example: [ body | stmts, then f, rest ] => f [ env | stmts ] >>= \bndrs -> [ body | rest ] BodyStmts require the 'Control.Monad.guard' function for boolean expressions: [ body | exp, stmts ] => guard exp >> [ body | stmts ] Parallel statements require the 'Control.Monad.Zip.mzip' function: [ body | stmts1 | stmts2 | .. ] => mzip stmts1 (mzip stmts2 (..)) >>= \(bndrs1, (bndrs2, ..)) -> return body In any other context than 'MonadComp', the fields for most of these 'SyntaxExpr's stay bottom. Note [Applicative BodyStmt] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ (#12143) For the purposes of ApplicativeDo, we treat any BodyStmt as if it was a BindStmt with a wildcard pattern. For example, do x <- A B return x is transformed as if it were do x <- A _ <- B return x so it transforms to (\(x,_) -> x) <$> A <*> B But we have to remember when we treat a BodyStmt like a BindStmt, because in error messages we want to emit the original syntax the user wrote, not our internal representation. So ApplicativeArgOne has a Bool flag that is True when the original statement was a BodyStmt, so that we can pretty-print it correctly. -} {- ************************************************************************ * * Template Haskell quotation brackets * * ************************************************************************ -} {- Note [Quasi-quote overview] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ The "quasi-quote" extension is described by Geoff Mainland's paper "Why it's nice to be quoted: quasiquoting for Haskell" (Haskell Workshop 2007). Briefly, one writes [p| stuff |] and the arbitrary string "stuff" gets parsed by the parser 'p', whose type should be Language.Haskell.TH.Quote.QuasiQuoter. 'p' must be defined in another module, because we are going to run it here. It's a bit like an /untyped/ TH splice where the parser is applied the "stuff" as a string, thus: $(p "stuff") Notice that it's an /untyped/ TH splice: it can occur in patterns and types, as well as in expressions; and it runs in the renamer. -} -- | Haskell Splice data HsUntypedSplice id = HsUntypedSpliceExpr -- $z or $(f 4) (XUntypedSpliceExpr id) (LHsExpr id) | HsQuasiQuote -- See Note [Quasi-quote overview] (XQuasiQuote id) (IdP id) -- The quoter (the bit between `[` and `|`) (XRec id FastString) -- The enclosed string | XUntypedSplice !(XXUntypedSplice id) -- Extension point; see Note [Trees That Grow] -- in Language.Haskell.Syntax.Extension -- | Haskell (Untyped) Quote = Expr + Pat + Type + Var data HsQuote p = ExpBr (XExpBr p) (LHsExpr p) -- [| expr |] | PatBr (XPatBr p) (LPat p) -- [p| pat |] | DecBrL (XDecBrL p) [LHsDecl p] -- [d| decls |]; result of parser | DecBrG (XDecBrG p) (HsGroup p) -- [d| decls |]; result of renamer | TypBr (XTypBr p) (LHsType p) -- [t| type |] | VarBr (XVarBr p) Bool (LIdP p) -- True: 'x, False: ''T | XQuote !(XXQuote p) -- Extension point; see Note [Trees That Grow] -- in Language.Haskell.Syntax.Extension {- ************************************************************************ * * \subsection{Enumerations and list comprehensions} * * ************************************************************************ -} -- | Arithmetic Sequence Information data ArithSeqInfo id = From (LHsExpr id) | FromThen (LHsExpr id) (LHsExpr id) | FromTo (LHsExpr id) (LHsExpr id) | FromThenTo (LHsExpr id) (LHsExpr id) (LHsExpr id) -- AZ: Should ArithSeqInfo have a TTG extension? {- ************************************************************************ * * \subsection{HsMatchCtxt} * * ************************************************************************ -} -- | Haskell Match Context -- -- Context of a pattern match. This is more subtle than it would seem. See -- Note [FunBind vs PatBind]. data HsMatchContext fn = FunRhs -- ^ A pattern matching on an argument of a -- function binding { mc_fun :: fn -- ^ function binder of @f@ -- See Note [mc_fun field of FunRhs] -- See #20415 for a long discussion about this field , mc_fixity :: LexicalFixity -- ^ fixing of @f@ , mc_strictness :: SrcStrictness -- ^ was @f@ banged? -- See Note [FunBind vs PatBind] , mc_an :: XFunRhs } | CaseAlt -- ^Patterns and guards in a case alternative | LamAlt HsLamVariant -- ^Patterns and guards in @\@, @\case@ and @\cases@ | IfAlt -- ^Guards of a multi-way if alternative | ArrowMatchCtxt -- ^A pattern match inside arrow notation HsArrowMatchContext | PatBindRhs -- ^A pattern binding eg [y] <- e = e | PatBindGuards -- ^Guards of pattern bindings, e.g., -- (Just b) | Just _ <- x = e -- | otherwise = e' | RecUpd -- ^Record update [used only in GHC.HsToCore.Expr to -- tell matchWrapper what sort of -- runtime error message to generate] | StmtCtxt (HsStmtContext fn) -- ^Pattern of a do-stmt, list comprehension, -- pattern guard, etc | ThPatSplice -- ^A Template Haskell pattern splice | ThPatQuote -- ^A Template Haskell pattern quotation [p| (a,b) |] | PatSyn -- ^A pattern synonym declaration | LazyPatCtx -- ^An irrefutable pattern {- Note [mc_fun field of FunRhs] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ HsMatchContext is parameterised over `fn`, the function binder stored in `FunRhs`. This makes pretty printing easy. In the use of `HsMatchContext` in `Match`, it is parameterised thus: data Match p body = Match { m_ctxt :: HsMatchContext (LIdP (NoGhcTc p)), ... } So in a Match, the mc_fun field `FunRhs` will be a `RdrName` in pass `GhcPs`, a `Name` in `GhcRn`, and (importantly) still a `Name` in `GhcTc` -- not an `Id`. See Note [NoGhcTc] in GHC.Hs.Extension. * Why a `Name` in the typechecker phase? Because: * A `Name` is all we need, as it turns out. * Using an `Id` involves knot-tying in the monad, which led to #22695. * Why a /located/ name? Because we want to record the location of the Id on the LHS of /this/ match. See Note [m_ctxt in Match]. Example: (&&&) [] [] = [] xs &&& [] = xs The two occurrences of `&&&` have different locations. * Why parameterise `HsMatchContext` over `fn` rather than over the pass `p`? Because during typechecking (specifically GHC.Tc.Gen.Match.tcMatch) we need to convert HsMatchContext (LIdP (NoGhcTc GhcRn)) --> HsMatchContext (LIdP (NoGhcTc GhcTc)) With this parameterisation it's easy; if it was parametersed over `p` we'd need a recursive traversal of the HsMatchContext. See #20415 for a long discussion. -} -- | Haskell Statement Context. data HsStmtContext fn = HsDoStmt HsDoFlavour -- ^ Context for HsDo (do-notation and comprehensions) | PatGuard (HsMatchContext fn) -- ^ Pattern guard for specified thing | ParStmtCtxt (HsStmtContext fn) -- ^ A branch of a parallel stmt | TransStmtCtxt (HsStmtContext fn) -- ^ A branch of a transform stmt | ArrowExpr -- ^ do-notation in an arrow-command context -- | Haskell arrow match context. data HsArrowMatchContext = ProcExpr -- ^ A proc expression | ArrowCaseAlt -- ^ A case alternative inside arrow notation | ArrowLamAlt HsLamVariant -- ^ A \, \case or \cases alternative inside arrow notation data HsDoFlavour = DoExpr (Maybe ModuleName) -- ^[ModuleName.]do { ... } | MDoExpr (Maybe ModuleName) -- ^[ModuleName.]mdo { ... } ie recursive do-expression | GhciStmtCtxt -- ^A command-line Stmt in GHCi pat <- rhs | ListComp | MonadComp deriving (Eq, Data) qualifiedDoModuleName_maybe :: HsStmtContext fn -> Maybe ModuleName qualifiedDoModuleName_maybe ctxt = case ctxt of HsDoStmt (DoExpr m) -> m HsDoStmt (MDoExpr m) -> m _ -> Nothing isPatSynCtxt :: HsMatchContext fn -> Bool isPatSynCtxt ctxt = case ctxt of PatSyn -> True _ -> False isComprehensionContext :: HsStmtContext fn -> Bool -- Uses comprehension syntax [ e | quals ] isComprehensionContext (ParStmtCtxt c) = isComprehensionContext c isComprehensionContext (TransStmtCtxt c) = isComprehensionContext c isComprehensionContext ArrowExpr = False isComprehensionContext (PatGuard _) = False isComprehensionContext (HsDoStmt flavour) = isDoComprehensionContext flavour isDoComprehensionContext :: HsDoFlavour -> Bool isDoComprehensionContext GhciStmtCtxt = False isDoComprehensionContext (DoExpr _) = False isDoComprehensionContext (MDoExpr _) = False isDoComprehensionContext ListComp = True isDoComprehensionContext MonadComp = True -- | Is this a monadic context? isMonadStmtContext :: HsStmtContext fn -> Bool isMonadStmtContext (ParStmtCtxt ctxt) = isMonadStmtContext ctxt isMonadStmtContext (TransStmtCtxt ctxt) = isMonadStmtContext ctxt isMonadStmtContext (HsDoStmt flavour) = isMonadDoStmtContext flavour isMonadStmtContext (PatGuard _) = False isMonadStmtContext ArrowExpr = False isMonadDoStmtContext :: HsDoFlavour -> Bool isMonadDoStmtContext ListComp = False isMonadDoStmtContext MonadComp = True isMonadDoStmtContext DoExpr{} = True isMonadDoStmtContext MDoExpr{} = True isMonadDoStmtContext GhciStmtCtxt = True isMonadCompContext :: HsStmtContext fn -> Bool isMonadCompContext (HsDoStmt flavour) = isMonadDoCompContext flavour isMonadCompContext (ParStmtCtxt _) = False isMonadCompContext (TransStmtCtxt _) = False isMonadCompContext (PatGuard _) = False isMonadCompContext ArrowExpr = False isMonadDoCompContext :: HsDoFlavour -> Bool isMonadDoCompContext MonadComp = True isMonadDoCompContext ListComp = False isMonadDoCompContext GhciStmtCtxt = False isMonadDoCompContext (DoExpr _) = False isMonadDoCompContext (MDoExpr _) = False ghc-lib-parser-9.12.2.20250421/compiler/Language/Haskell/Syntax/Expr.hs-boot0000644000000000000000000000140207346545000024111 0ustar0000000000000000{-# LANGUAGE KindSignatures #-} {-# LANGUAGE ConstraintKinds #-} {-# LANGUAGE RoleAnnotations #-} {-# LANGUAGE TypeFamilies #-} -- See Note [Language.Haskell.Syntax.* Hierarchy] for why not GHC.Hs.* module Language.Haskell.Syntax.Expr where import Language.Haskell.Syntax.Extension ( XRec ) import Data.Kind ( Type ) import Prelude (Eq) import Data.Data (Data) type role HsExpr nominal type role MatchGroup nominal nominal type role GRHSs nominal nominal type role HsUntypedSplice nominal data HsExpr (i :: Type) data HsUntypedSplice (i :: Type) data MatchGroup (a :: Type) (body :: Type) data GRHSs (a :: Type) (body :: Type) type family SyntaxExpr (i :: Type) type LHsExpr a = XRec a (HsExpr a) data HsDoFlavour instance Eq HsDoFlavour instance Data HsDoFlavourghc-lib-parser-9.12.2.20250421/compiler/Language/Haskell/Syntax/Extension.hs0000644000000000000000000005347007346545000024222 0ustar0000000000000000{-# LANGUAGE AllowAmbiguousTypes #-} -- for unXRec, etc. {-# LANGUAGE CPP #-} {-# LANGUAGE ConstraintKinds #-} {-# LANGUAGE DataKinds #-} {-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE EmptyCase #-} {-# LANGUAGE EmptyDataDeriving #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE GADTs #-} {-# LANGUAGE MultiParamTypeClasses #-} {-# LANGUAGE RankNTypes #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE TypeFamilyDependencies #-} {-# LANGUAGE UndecidableInstances #-} -- Wrinkle in Note [Trees That Grow] -- in module Language.Haskell.Syntax.Extension -- See Note [Language.Haskell.Syntax.* Hierarchy] for why not GHC.Hs.* module Language.Haskell.Syntax.Extension where -- This module captures the type families to precisely identify the extension -- points for GHC.Hs syntax #if MIN_VERSION_GLASGOW_HASKELL(9,3,0,0) import Data.Type.Equality (type (~)) #endif import Data.Data hiding ( Fixity ) import Data.Kind (Type) import Data.Eq import Data.Ord {- Note [Trees That Grow] ~~~~~~~~~~~~~~~~~~~~~~ See https://gitlab.haskell.org/ghc/ghc/wikis/implementing-trees-that-grow The hsSyn AST is reused across multiple compiler passes. We also have the Template Haskell AST, and the haskell-src-exts one (outside of GHC) Supporting multiple passes means the AST has various warts on it to cope with the specifics for the phases, such as the 'ValBindsOut', 'ConPatOut', 'SigPatOut' etc. The growable AST will allow each of these variants to be captured explicitly, such that they only exist in the given compiler pass AST, as selected by the type parameter to the AST. In addition it will allow tool writers to define their own extensions to capture additional information for the tool, in a natural way. A further goal is to provide a means to harmonise the Template Haskell and haskell-src-exts ASTs as well. Wrinkle: In order to print out the AST, we need to know it is Outputable. We also sometimes need to branch on the particular pass that we're in (e.g. to print out type information once we know it). In order to allow both of these actions, we define OutputableBndrId, which gathers the necessary OutputableBndr and IsPass constraints. The use of this constraint in instances generally requires UndecidableInstances. See also Note [IsPass] and Note [NoGhcTc] in GHC.Hs.Extension. -} -- | A placeholder type for TTG extension points that are not currently -- used to represent any particular value. -- -- This should not be confused with 'DataConCantHappen', which are found in unused -- extension /constructors/ and therefore should never be inhabited. In -- contrast, 'NoExtField' is used in extension /points/ (e.g., as the field of -- some constructor), so it must have an inhabitant to construct AST passes -- that manipulate fields with that extension point as their type. data NoExtField = NoExtField deriving (Data,Eq,Ord) -- | Used when constructing a term with an unused extension point. noExtField :: NoExtField noExtField = NoExtField {- Note [Constructor cannot occur] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Some data constructors can't occur in certain phases; e.g. the output of the type checker never has OverLabel. We signal this by * setting the extension field to DataConCantHappen * using dataConCantHappen in the cases that can't happen For example: type instance XOverLabel GhcTc = DataConCantHappen dsExpr :: HsExpr GhcTc -> blah dsExpr (HsOverLabel x _) = dataConCantHappen x The function dataConCantHappen is defined thus: dataConCantHappen :: DataConCantHappen -> a dataConCantHappen x = case x of {} (i.e. identically to Data.Void.absurd, but more helpfully named). Remember DataConCantHappen is a type whose only element is bottom. This should not be confused with 'NoExtField', which are found in unused extension /points/ (not /constructors/) and therefore can be inhabited. It would be better to omit the pattern match altogether, but we can only do that if the extension field was strict (#18764). See also [DataConCantHappen and strict fields]. -} data DataConCantHappen deriving (Data,Eq,Ord) -- | Eliminate a 'DataConCantHappen'. See Note [Constructor cannot occur]. dataConCantHappen :: DataConCantHappen -> a dataConCantHappen x = case x of {} -- | GHC's L prefixed variants wrap their vanilla variant in this type family, -- to add 'SrcLoc' info via 'Located'. Other passes than 'GhcPass' not -- interested in location information can define this as -- @type instance XRec NoLocated a = a@. -- See Note [XRec and SrcSpans in the AST] type family XRec p a = r | r -> a type family Anno a = b -- See Note [XRec and Anno in the AST] in GHC.Parser.Annotation {- Note [XRec and SrcSpans in the AST] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ XRec is meant to replace most of the uses of `Located` in the AST. It is another extension point meant to make it easier for non-GHC applications to reuse the AST for their own purposes, and not have to deal the hassle of (perhaps) useless SrcSpans everywhere. instead of `Located (HsExpr p)` or similar types, we will now have `XRec p (HsExpr p)` XRec allows annotating certain points in the AST with extra information. This maybe be source spans (for GHC), nothing (for TH), types (for HIE files), exact print annotations (for exactprint) or anything else. This should hopefully bring us one step closer to sharing the AST between GHC and TH. We use the `UnXRec`, `MapXRec` and `WrapXRec` type classes to aid us in writing pass-polymorphic code that deals with `XRec`s -} -- | We can strip off the XRec to access the underlying data. -- See Note [XRec and SrcSpans in the AST] class UnXRec p where unXRec :: XRec p a -> a -- | We can map over the underlying type contained in an @XRec@ while preserving -- the annotation as is. class MapXRec p where mapXRec :: (Anno a ~ Anno b) => (a -> b) -> XRec p a -> XRec p b -- See Note [XRec and SrcSpans in the AST] -- See Note [XRec and Anno in the AST] in GHC.Parser.Annotation -- AZ: Is there a way to not have Anno in this file, but still have MapXRec? -- Perhaps define XRec with an additional b parameter, only used in Hs as (Anno b)? -- | The trivial wrapper that carries no additional information -- See Note [XRec and SrcSpans in the AST] class WrapXRec p a where wrapXRec :: a -> XRec p a -- | Maps the "normal" id type for a given pass type family IdP p type LIdP p = XRec p (IdP p) -- ===================================================================== -- Type families for the HsBinds extension points -- HsLocalBindsLR type families type family XHsValBinds x x' type family XHsIPBinds x x' type family XEmptyLocalBinds x x' type family XXHsLocalBindsLR x x' -- HsValBindsLR type families type family XValBinds x x' type family XXValBindsLR x x' -- HsBindLR type families type family XFunBind x x' type family XPatBind x x' type family XVarBind x x' type family XPatSynBind x x' type family XXHsBindsLR x x' -- PatSynBind type families type family XPSB x x' type family XXPatSynBind x x' -- HsIPBinds type families type family XIPBinds x type family XXHsIPBinds x -- IPBind type families type family XCIPBind x type family XXIPBind x -- Sig type families type family XTypeSig x type family XPatSynSig x type family XClassOpSig x type family XIdSig x type family XFixSig x type family XInlineSig x type family XSpecSig x type family XSpecInstSig x type family XMinimalSig x type family XSCCFunSig x type family XCompleteMatchSig x type family XXSig x -- FixitySig type families type family XFixitySig x type family XXFixitySig x -- StandaloneKindSig type families type family XStandaloneKindSig x type family XXStandaloneKindSig x -- ===================================================================== -- Type families for the HsDecls extension points -- HsDecl type families type family XTyClD x type family XInstD x type family XDerivD x type family XValD x type family XSigD x type family XKindSigD x type family XDefD x type family XForD x type family XWarningD x type family XAnnD x type family XRuleD x type family XSpliceD x type family XDocD x type family XRoleAnnotD x type family XXHsDecl x -- ------------------------------------- -- HsGroup type families type family XCHsGroup x type family XXHsGroup x -- ------------------------------------- -- SpliceDecl type families type family XSpliceDecl x type family XXSpliceDecl x -- ------------------------------------- -- TyClDecl type families type family XFamDecl x type family XSynDecl x type family XDataDecl x type family XClassDecl x type family XXTyClDecl x -- ------------------------------------- -- FunDep type families type family XCFunDep x type family XXFunDep x -- ------------------------------------- -- TyClGroup type families type family XCTyClGroup x type family XXTyClGroup x -- ------------------------------------- -- FamilyResultSig type families type family XNoSig x type family XCKindSig x -- Clashes with XKindSig above type family XTyVarSig x type family XXFamilyResultSig x -- ------------------------------------- -- FamilyDecl type families type family XCFamilyDecl x type family XXFamilyDecl x -- ------------------------------------- -- HsDataDefn type families type family XCHsDataDefn x type family XXHsDataDefn x -- ------------------------------------- -- HsDerivingClause type families type family XCHsDerivingClause x type family XXHsDerivingClause x -- ------------------------------------- -- DerivClauseTys type families type family XDctSingle x type family XDctMulti x type family XXDerivClauseTys x -- ------------------------------------- -- ConDecl type families type family XConDeclGADT x type family XConDeclH98 x type family XXConDecl x -- ------------------------------------- -- FamEqn type families type family XCFamEqn x r type family XXFamEqn x r -- ------------------------------------- -- TyFamInstDecl type families type family XCTyFamInstDecl x type family XXTyFamInstDecl x -- ------------------------------------- -- ClsInstDecl type families type family XCClsInstDecl x type family XXClsInstDecl x -- ------------------------------------- -- InstDecl type families type family XClsInstD x type family XDataFamInstD x type family XTyFamInstD x type family XXInstDecl x -- ------------------------------------- -- DerivDecl type families type family XCDerivDecl x type family XXDerivDecl x -- ------------------------------------- -- DerivStrategy type family type family XStockStrategy x type family XAnyClassStrategy x type family XNewtypeStrategy x type family XViaStrategy x -- ------------------------------------- -- DefaultDecl type families type family XCDefaultDecl x type family XXDefaultDecl x -- ------------------------------------- -- ForeignDecl type families type family XForeignImport x type family XForeignExport x type family XXForeignDecl x type family XCImport x type family XXForeignImport x type family XCExport x type family XXForeignExport x -- ------------------------------------- -- RuleDecls type families type family XCRuleDecls x type family XXRuleDecls x -- ------------------------------------- -- RuleDecl type families type family XHsRule x type family XXRuleDecl x -- ------------------------------------- -- RuleBndr type families type family XCRuleBndr x type family XRuleBndrSig x type family XXRuleBndr x -- ------------------------------------- -- WarnDecls type families type family XWarnings x type family XXWarnDecls x -- ------------------------------------- -- WarnDecl type families type family XWarning x type family XXWarnDecl x -- ------------------------------------- -- AnnDecl type families type family XHsAnnotation x type family XXAnnDecl x -- ------------------------------------- -- RoleAnnotDecl type families type family XCRoleAnnotDecl x type family XXRoleAnnotDecl x -- ------------------------------------- -- InjectivityAnn type families type family XCInjectivityAnn x type family XXInjectivityAnn x -- ===================================================================== -- Type families for the HsModule extension points type family XCModule x type family XXModule x -- ===================================================================== -- Type families for the HsExpr extension points type family XVar x type family XUnboundVar x type family XRecSel x type family XOverLabel x type family XIPVar x type family XOverLitE x type family XLitE x type family XLam x type family XLamCase x type family XApp x type family XAppTypeE x type family XOpApp x type family XNegApp x type family XPar x type family XSectionL x type family XSectionR x type family XExplicitTuple x type family XExplicitSum x type family XCase x type family XIf x type family XMultiIf x type family XLet x type family XDo x type family XExplicitList x type family XRecordCon x type family XRecordUpd x type family XLHsRecUpdLabels x type family XLHsOLRecUpdLabels x type family XGetField x type family XProjection x type family XExprWithTySig x type family XArithSeq x type family XTypedBracket x type family XUntypedBracket x type family XTypedSplice x type family XUntypedSplice x type family XProc x type family XStatic x type family XTick x type family XBinTick x type family XPragE x type family XEmbTy x type family XForAll x type family XQual x type family XFunArr x type family XXExpr x -- ------------------------------------- -- HsMatchContext type families type family XFunRhs -- ------------------------------------- -- DotFieldOcc type families type family XCDotFieldOcc x type family XXDotFieldOcc x -- ------------------------------------- -- HsPragE type families type family XSCC x type family XXPragE x -- ------------------------------------- -- HsTupArg type families type family XPresent x type family XMissing x type family XXTupArg x -- ------------------------------------- -- HsUntypedSplice type families type family XUntypedSpliceExpr x type family XQuasiQuote x type family XXUntypedSplice x -- ------------------------------------- -- HsQuoteBracket type families type family XExpBr x type family XPatBr x type family XDecBrL x type family XDecBrG x type family XTypBr x type family XVarBr x type family XXQuote x -- ------------------------------------- -- HsCmdTop type families type family XCmdTop x type family XXCmdTop x -- ------------------------------------- -- MatchGroup type families type family XMG x b type family XXMatchGroup x b -- ------------------------------------- -- Match type families type family XCMatch x b type family XXMatch x b -- ------------------------------------- -- GRHSs type families type family XCGRHSs x b type family XXGRHSs x b -- ------------------------------------- -- GRHS type families type family XCGRHS x b type family XXGRHS x b -- ------------------------------------- -- StmtLR type families type family XLastStmt x x' b type family XBindStmt x x' b type family XBodyStmt x x' b type family XLetStmt x x' b type family XParStmt x x' b type family XTransStmt x x' b type family XRecStmt x x' b type family XXStmtLR x x' b -- ------------------------------------- -- HsCmd type families type family XCmdArrApp x type family XCmdArrForm x type family XCmdApp x type family XCmdLam x type family XCmdPar x type family XCmdCase x type family XCmdLamCase x type family XCmdIf x type family XCmdLet x type family XCmdDo x type family XCmdWrap x type family XXCmd x -- ------------------------------------- -- ParStmtBlock type families type family XParStmtBlock x x' type family XXParStmtBlock x x' -- ===================================================================== -- Type families for the HsLit extension points -- We define a type family for each extension point. This is based on prepending -- 'X' to the constructor name, for ease of reference. type family XHsChar x type family XHsCharPrim x type family XHsString x type family XHsMultilineString x type family XHsStringPrim x type family XHsInt x type family XHsIntPrim x type family XHsWordPrim x type family XHsInt8Prim x type family XHsInt16Prim x type family XHsInt32Prim x type family XHsInt64Prim x type family XHsWord8Prim x type family XHsWord16Prim x type family XHsWord32Prim x type family XHsWord64Prim x type family XHsInteger x type family XHsRat x type family XHsFloatPrim x type family XHsDoublePrim x type family XXLit x -- ------------------------------------- -- HsOverLit type families type family XOverLit x type family XXOverLit x -- ===================================================================== -- Type families for the HsPat extension points type family XWildPat x type family XVarPat x type family XLazyPat x type family XAsPat x type family XParPat x type family XBangPat x type family XListPat x type family XTuplePat x type family XSumPat x type family XOrPat x type family XConPat x type family XViewPat x type family XSplicePat x type family XLitPat x type family XNPat x type family XNPlusKPat x type family XSigPat x type family XEmbTyPat x type family XInvisPat x type family XCoPat x type family XXPat x type family XHsFieldBind x -- ===================================================================== -- Type families for the HsTypes type families -- ------------------------------------- -- LHsQTyVars type families type family XHsQTvs x type family XXLHsQTyVars x -- ------------------------------------- -- HsOuterTyVarBndrs type families type family XHsOuterImplicit x type family XHsOuterExplicit x flag type family XXHsOuterTyVarBndrs x -- ------------------------------------- -- HsSigType type families type family XHsSig x type family XXHsSigType x -- ------------------------------------- -- HsWildCardBndrs type families type family XHsWC x b type family XXHsWildCardBndrs x b -- ------------------------------------- -- HsPatSigType type families type family XHsPS x type family XXHsPatSigType x -- ------------------------------------- -- HsTyPat type families type family XHsTP x type family XXHsTyPat x -- ------------------------------------- -- HsType type families type family XForAllTy x type family XQualTy x type family XTyVar x type family XAppTy x type family XAppKindTy x type family XFunTy x type family XListTy x type family XTupleTy x type family XSumTy x type family XOpTy x type family XParTy x type family XIParamTy x type family XStarTy x type family XKindSig x type family XSpliceTy x type family XDocTy x type family XBangTy x type family XRecTy x type family XExplicitListTy x type family XExplicitTupleTy x type family XTyLit x type family XWildCardTy x type family XXType x -- --------------------------------------------------------------------- -- HsTyLit type families type family XNumTy x type family XStrTy x type family XCharTy x type family XXTyLit x -- --------------------------------------------------------------------- -- HsForAllTelescope type families type family XHsForAllVis x type family XHsForAllInvis x type family XXHsForAllTelescope x -- --------------------------------------------------------------------- -- HsTyVarBndr type families type family XTyVarBndr x type family XXTyVarBndr x -- --------------------------------------------------------------------- -- ConDeclField type families type family XConDeclField x type family XXConDeclField x -- --------------------------------------------------------------------- -- FieldOcc type families type family XCFieldOcc x type family XXFieldOcc x -- ===================================================================== -- Type families for the HsImpExp extension points -- ------------------------------------- -- ImportDecl type families type family XCImportDecl x type family XXImportDecl x type family ImportDeclPkgQual x -- stores the package qualifier in an import statement -- ------------------------------------- -- IE type families type family XIEVar x type family XIEThingAbs x type family XIEThingAll x type family XIEThingWith x type family XIEModuleContents x type family XIEGroup x type family XIEDoc x type family XIEDocNamed x type family XXIE x -- ------------------------------------- -- IEWrappedName type families type family XIEName p type family XIEDefault p type family XIEPattern p type family XIEType p type family XXIEWrappedName p -- ===================================================================== -- Misc -- | See Note [NoGhcTc] in GHC.Hs.Extension. It has to be in this -- module because it is used like an extension point (in the data definitions -- of types that should be parameter-agnostic. type family NoGhcTc (p :: Type) -- ===================================================================== -- End of Type family definitions -- ===================================================================== ghc-lib-parser-9.12.2.20250421/compiler/Language/Haskell/Syntax/ImpExp.hs0000644000000000000000000001526407346545000023447 0ustar0000000000000000{-# LANGUAGE TypeFamilies #-} {-# LANGUAGE DeriveDataTypeable #-} module Language.Haskell.Syntax.ImpExp where import Language.Haskell.Syntax.Extension import Language.Haskell.Syntax.Module.Name import Data.Eq (Eq) import Data.Ord (Ord) import Text.Show (Show) import Data.Data (Data) import Data.Bool (Bool) import Data.Maybe (Maybe) import Data.String (String) import Data.Int (Int) import Control.DeepSeq import GHC.Hs.Doc -- ROMES:TODO Discuss in #21592 whether this is parsed AST or base AST {- ************************************************************************ * * Import and export declaration lists * * ************************************************************************ One per import declaration in a module. -} -- | Located Import Declaration type LImportDecl pass = XRec pass (ImportDecl pass) -- | If/how an import is 'qualified'. data ImportDeclQualifiedStyle = QualifiedPre -- ^ 'qualified' appears in prepositive position. | QualifiedPost -- ^ 'qualified' appears in postpositive position. | NotQualified -- ^ Not qualified. deriving (Eq, Data) -- | Indicates whether a module name is referring to a boot interface (hs-boot -- file) or regular module (hs file). We need to treat boot modules specially -- when building compilation graphs, since they break cycles. Regular source -- files and signature files are treated equivalently. data IsBootInterface = NotBoot | IsBoot deriving (Eq, Ord, Show, Data) instance NFData IsBootInterface where rnf = rwhnf -- | Import Declaration -- -- A single Haskell @import@ declaration. data ImportDecl pass = ImportDecl { ideclExt :: XCImportDecl pass, -- ^ Locations of keywords like @import@, @qualified@, etc. are captured here. ideclName :: XRec pass ModuleName, -- ^ Module name. ideclPkgQual :: ImportDeclPkgQual pass, -- ^ Package qualifier. ideclSource :: IsBootInterface, -- ^ IsBoot \<=> {-\# SOURCE \#-} import ideclSafe :: Bool, -- ^ True => safe import ideclQualified :: ImportDeclQualifiedStyle, -- ^ If/how the import is qualified. ideclAs :: Maybe (XRec pass ModuleName), -- ^ as Module ideclImportList :: Maybe (ImportListInterpretation, XRec pass [LIE pass]) -- ^ Explicit import list (EverythingBut => hiding, names) } | XImportDecl !(XXImportDecl pass) -- | Whether the import list is exactly what to import, or whether @hiding@ was -- used, and therefore everything but what was listed should be imported data ImportListInterpretation = Exactly | EverythingBut deriving (Eq, Data) instance NFData ImportListInterpretation where rnf = rwhnf -- | Located Import or Export type LIE pass = XRec pass (IE pass) -- ^ When in a list this may have -- | A docstring attached to an export list item. type ExportDoc pass = LHsDoc pass -- | Imported or exported entity. data IE pass = IEVar (XIEVar pass) (LIEWrappedName pass) (Maybe (ExportDoc pass)) -- ^ Imported or exported variable -- -- @ -- module Mod ( test ) -- import Mod ( test ) -- @ | IEThingAbs (XIEThingAbs pass) (LIEWrappedName pass) (Maybe (ExportDoc pass)) -- ^ Imported or exported Thing with absent subordinate list -- -- The thing is a Class\/Type (can't tell) -- -- @ -- module Mod ( Test ) -- import Mod ( Test ) -- @ -- See Note [Located RdrNames] in GHC.Hs.Expr | IEThingAll (XIEThingAll pass) (LIEWrappedName pass) (Maybe (ExportDoc pass)) -- ^ Imported or exported thing with wildcard subordinate list (e.g. @(..)@) -- -- The thing is a Class\/Type and the All refers to methods\/constructors -- -- @ -- module Mod ( Test(..) ) -- import Mod ( Test(..) ) -- @ -- See Note [Located RdrNames] in GHC.Hs.Expr | IEThingWith (XIEThingWith pass) (LIEWrappedName pass) IEWildcard [LIEWrappedName pass] (Maybe (ExportDoc pass)) -- ^ Imported or exported thing with explicit subordinate list. -- -- The thing is a Class\/Type (can't tell) and the imported or exported things are -- its children. -- -- @ -- module Mod ( Test(f, g) ) -- import Mod ( Test(f, g) ) -- @ | IEModuleContents (XIEModuleContents pass) (XRec pass ModuleName) -- ^ Export of entire module. Can only occur in export list. -- -- @ -- module Mod ( module Mod2 ) -- @ | IEGroup (XIEGroup pass) Int (LHsDoc pass) -- ^ A Haddock section in an export list. -- -- @ -- module Mod -- ( -- * Section heading -- ... -- ) -- @ | IEDoc (XIEDoc pass) (LHsDoc pass) -- ^ A bit of unnamed documentation. -- -- @ -- module Mod -- ( -- | Documentation -- ... -- ) -- @ | IEDocNamed (XIEDocNamed pass) String -- ^ A reference to a named documentation chunk. -- -- @ -- module Mod -- ( -- $chunkName -- ... -- ) -- @ | XIE !(XXIE pass) -- | Wildcard in an import or export sublist, like the @..@ in -- @import Mod ( T(Mk1, Mk2, ..) )@. data IEWildcard = NoIEWildcard -- ^ no wildcard in this list | IEWildcard Int -- ^ wildcard after the given \# of items in this list -- The @Int@ is in the range [0..n], where n is the length -- of the list. deriving (Eq, Data) -- | A name in an import or export specification which may have -- adornments. Used primarily for accurate pretty printing of -- ParsedSource, and API Annotation placement. data IEWrappedName p = IEName (XIEName p) (LIdP p) -- ^ unadorned name, e.g @myFun@ | IEDefault (XIEDefault p) (LIdP p) -- ^ @default X ()@, see Note [Named default declarations] in GHC.Tc.Gen.Default | IEPattern (XIEPattern p) (LIdP p) -- ^ @pattern X@ -- -- exactprint: the location of @pattern@ keyword is captured via 'GHC.Parser.Annotation.EpaLocation' | IEType (XIEType p) (LIdP p) -- ^ @type (:+:)@ -- -- exactprint: the location of @type@ keyword is captured via 'GHC.Parser.Annotation.EpaLocation' | XIEWrappedName !(XXIEWrappedName p) -- | Located name with possible adornment type LIEWrappedName p = XRec p (IEWrappedName p) ghc-lib-parser-9.12.2.20250421/compiler/Language/Haskell/Syntax/ImpExp.hs-boot0000644000000000000000000000064307346545000024403 0ustar0000000000000000module Language.Haskell.Syntax.ImpExp where import Data.Eq import Data.Ord import Text.Show import Data.Data -- This boot file should be short lived: As soon as the dependency on -- `GHC.Hs.Doc` is gone we'll no longer have cycles and can get rid this file. data IsBootInterface = NotBoot | IsBoot instance Eq IsBootInterface instance Ord IsBootInterface instance Show IsBootInterface instance Data IsBootInterface ghc-lib-parser-9.12.2.20250421/compiler/Language/Haskell/Syntax/Lit.hs0000644000000000000000000001270207346545000022767 0ustar0000000000000000 {-# LANGUAGE ConstraintKinds #-} {-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE UndecidableInstances #-} -- Wrinkle in Note [Trees That Grow] -- in module Language.Haskell.Syntax.Extension {- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 -} -- See Note [Language.Haskell.Syntax.* Hierarchy] for why not GHC.Hs.* -- | Source-language literals module Language.Haskell.Syntax.Lit where import Language.Haskell.Syntax.Extension import GHC.Types.SourceText (IntegralLit, FractionalLit, SourceText) import GHC.Core.Type (Type) import GHC.Data.FastString (FastString, lexicalCompareFS) import Data.ByteString (ByteString) import Data.Data hiding ( Fixity ) import Data.Bool import Data.Ord import Data.Eq import Data.Char import Prelude (Integer) {- ************************************************************************ * * \subsection[HsLit]{Literals} * * ************************************************************************ -} -- Note [Literal source text] in "GHC.Types.SourceText" for SourceText fields in -- the following -- Note [Trees That Grow] in "Language.Haskell.Syntax.Extension" for the Xxxxx -- fields in the following -- | Haskell Literal data HsLit x = HsChar (XHsChar x) {- SourceText -} Char -- ^ Character | HsCharPrim (XHsCharPrim x) {- SourceText -} Char -- ^ Unboxed character | HsString (XHsString x) {- SourceText -} FastString -- ^ String | HsMultilineString (XHsMultilineString x) {- SourceText -} FastString -- ^ String | HsStringPrim (XHsStringPrim x) {- SourceText -} !ByteString -- ^ Packed bytes | HsInt (XHsInt x) IntegralLit -- ^ Genuinely an Int; arises from -- "GHC.Tc.Deriv.Generate", and from TRANSLATION | HsIntPrim (XHsIntPrim x) {- SourceText -} Integer -- ^ literal @Int#@ | HsWordPrim (XHsWordPrim x) {- SourceText -} Integer -- ^ literal @Word#@ | HsInt8Prim (XHsInt8Prim x) {- SourceText -} Integer -- ^ literal @Int8#@ | HsInt16Prim (XHsInt16Prim x) {- SourceText -} Integer -- ^ literal @Int16#@ | HsInt32Prim (XHsInt32Prim x) {- SourceText -} Integer -- ^ literal @Int32#@ | HsInt64Prim (XHsInt64Prim x) {- SourceText -} Integer -- ^ literal @Int64#@ | HsWord8Prim (XHsWord8Prim x) {- SourceText -} Integer -- ^ literal @Word8#@ | HsWord16Prim (XHsWord16Prim x) {- SourceText -} Integer -- ^ literal @Word16#@ | HsWord32Prim (XHsWord32Prim x) {- SourceText -} Integer -- ^ literal @Word32#@ | HsWord64Prim (XHsWord64Prim x) {- SourceText -} Integer -- ^ literal @Word64#@ | HsInteger (XHsInteger x) {- SourceText -} Integer Type -- ^ Genuinely an integer; arises only -- from TRANSLATION (overloaded -- literals are done with HsOverLit) | HsRat (XHsRat x) FractionalLit Type -- ^ Genuinely a rational; arises only from -- TRANSLATION (overloaded literals are -- done with HsOverLit) | HsFloatPrim (XHsFloatPrim x) FractionalLit -- ^ Unboxed Float | HsDoublePrim (XHsDoublePrim x) FractionalLit -- ^ Unboxed Double | XLit !(XXLit x) instance Eq (HsLit x) where (HsChar _ x1) == (HsChar _ x2) = x1==x2 (HsCharPrim _ x1) == (HsCharPrim _ x2) = x1==x2 (HsString _ x1) == (HsString _ x2) = x1==x2 (HsStringPrim _ x1) == (HsStringPrim _ x2) = x1==x2 (HsInt _ x1) == (HsInt _ x2) = x1==x2 (HsIntPrim _ x1) == (HsIntPrim _ x2) = x1==x2 (HsWordPrim _ x1) == (HsWordPrim _ x2) = x1==x2 (HsInt64Prim _ x1) == (HsInt64Prim _ x2) = x1==x2 (HsWord64Prim _ x1) == (HsWord64Prim _ x2) = x1==x2 (HsInteger _ x1 _) == (HsInteger _ x2 _) = x1==x2 (HsRat _ x1 _) == (HsRat _ x2 _) = x1==x2 (HsFloatPrim _ x1) == (HsFloatPrim _ x2) = x1==x2 (HsDoublePrim _ x1) == (HsDoublePrim _ x2) = x1==x2 _ == _ = False -- | Haskell Overloaded Literal data HsOverLit p = OverLit { ol_ext :: (XOverLit p), ol_val :: OverLitVal} | XOverLit !(XXOverLit p) -- Note [Literal source text] in "GHC.Types.SourceText" for SourceText fields in -- the following -- | Overloaded Literal Value data OverLitVal = HsIntegral !IntegralLit -- ^ Integer-looking literals; | HsFractional !FractionalLit -- ^ Frac-looking literals | HsIsString !SourceText !FastString -- ^ String-looking literals deriving Data instance Eq OverLitVal where (HsIntegral i1) == (HsIntegral i2) = i1 == i2 (HsFractional f1) == (HsFractional f2) = f1 == f2 (HsIsString _ s1) == (HsIsString _ s2) = s1 == s2 _ == _ = False instance Ord OverLitVal where compare (HsIntegral i1) (HsIntegral i2) = i1 `compare` i2 compare (HsIntegral _) (HsFractional _) = LT compare (HsIntegral _) (HsIsString _ _) = LT compare (HsFractional f1) (HsFractional f2) = f1 `compare` f2 compare (HsFractional _) (HsIntegral _) = GT compare (HsFractional _) (HsIsString _ _) = LT compare (HsIsString _ s1) (HsIsString _ s2) = s1 `lexicalCompareFS` s2 compare (HsIsString _ _) (HsIntegral _) = GT compare (HsIsString _ _) (HsFractional _) = GT ghc-lib-parser-9.12.2.20250421/compiler/Language/Haskell/Syntax/Module/0000755000000000000000000000000007346545000023126 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/Language/Haskell/Syntax/Module/Name.hs0000644000000000000000000000322307346545000024342 0ustar0000000000000000module Language.Haskell.Syntax.Module.Name where import Prelude import Data.Char (isAlphaNum) import Control.DeepSeq import qualified Text.ParserCombinators.ReadP as Parse import System.FilePath import GHC.Data.FastString -- | A ModuleName is essentially a simple string, e.g. @Data.List@. newtype ModuleName = ModuleName FastString deriving (Show, Eq) instance Ord ModuleName where nm1 `compare` nm2 = stableModuleNameCmp nm1 nm2 instance NFData ModuleName where rnf x = x `seq` () stableModuleNameCmp :: ModuleName -> ModuleName -> Ordering -- ^ Compares module names lexically, rather than by their 'Unique's stableModuleNameCmp n1 n2 = moduleNameFS n1 `lexicalCompareFS` moduleNameFS n2 moduleNameFS :: ModuleName -> FastString moduleNameFS (ModuleName mod) = mod moduleNameString :: ModuleName -> String moduleNameString (ModuleName mod) = unpackFS mod mkModuleName :: String -> ModuleName mkModuleName s = ModuleName (mkFastString s) mkModuleNameFS :: FastString -> ModuleName mkModuleNameFS s = ModuleName s -- |Returns the string version of the module name, with dots replaced by slashes. -- moduleNameSlashes :: ModuleName -> String moduleNameSlashes = dots_to_slashes . moduleNameString where dots_to_slashes = map (\c -> if c == '.' then pathSeparator else c) -- |Returns the string version of the module name, with dots replaced by colons. -- moduleNameColons :: ModuleName -> String moduleNameColons = dots_to_colons . moduleNameString where dots_to_colons = map (\c -> if c == '.' then ':' else c) parseModuleName :: Parse.ReadP ModuleName parseModuleName = fmap mkModuleName $ Parse.munch1 (\c -> isAlphaNum c || c `elem` "_.'") ghc-lib-parser-9.12.2.20250421/compiler/Language/Haskell/Syntax/Pat.hs0000644000000000000000000003135007346545000022763 0ustar0000000000000000 {-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE ConstraintKinds #-} {-# LANGUAGE DeriveTraversable #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE TypeApplications #-} {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE UndecidableInstances #-} -- Wrinkle in Note [Trees That Grow] -- in module Language.Haskell.Syntax.Extension {-# LANGUAGE DataKinds #-} {- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 \section[PatSyntax]{Abstract Haskell syntax---patterns} -} -- See Note [Language.Haskell.Syntax.* Hierarchy] for why not GHC.Hs.* module Language.Haskell.Syntax.Pat ( Pat(..), LPat, ConLikeP, isInvisArgPat, isVisArgPat, HsConPatDetails, hsConPatArgs, hsConPatTyArgs, HsConPatTyArg(..), XConPatTyArg, HsRecFields(..), XHsRecFields, HsFieldBind(..), LHsFieldBind, HsRecField, LHsRecField, HsRecUpdField, LHsRecUpdField, RecFieldsDotDot(..), hsRecFields, hsRecFieldSel, hsRecFieldsArgs, ) where import {-# SOURCE #-} Language.Haskell.Syntax.Expr (SyntaxExpr, LHsExpr, HsUntypedSplice) -- friends: import Language.Haskell.Syntax.Basic import Language.Haskell.Syntax.Lit import Language.Haskell.Syntax.Extension import Language.Haskell.Syntax.Type -- libraries: import Data.Maybe import Data.Functor import Data.Foldable import Data.Traversable import Data.Bool import Data.Data import Data.Eq import Data.Ord import Data.Int import Data.Function import qualified Data.List import Data.List.NonEmpty (NonEmpty) type LPat p = XRec p (Pat p) -- | Pattern data Pat p = ------------ Simple patterns --------------- WildPat (XWildPat p) -- ^ Wildcard Pattern, i.e. @_@ | VarPat (XVarPat p) (LIdP p) -- ^ Variable Pattern, e.g. @x@ -- See Note [Located RdrNames] in GHC.Hs.Expr | LazyPat (XLazyPat p) (LPat p) -- ^ Lazy Pattern, e.g. @~x@ | AsPat (XAsPat p) (LIdP p) (LPat p) -- ^ As pattern, e.g. @x\@pat@ | ParPat (XParPat p) (LPat p) -- ^ Parenthesised pattern, e.g. @(x)@ -- See Note [Parens in HsSyn] in GHC.Hs.Expr | BangPat (XBangPat p) (LPat p) -- ^ Bang pattern, e.g. @!x@ ------------ Lists, tuples, arrays --------------- | ListPat (XListPat p) [LPat p] -- ^ Syntactic List, e.g. @[x]@ or @[x,y]@. -- Note that @[]@ and @(x:xs)@ patterns are both represented using 'ConPat'. | -- | Tuple pattern, e.g. @(x, y)@ (boxed tuples) or @(# x, y #)@ (requires @-XUnboxedTuples@) TuplePat (XTuplePat p) -- ^ After typechecking, holds the types of the tuple components [LPat p] -- ^ Tuple sub-patterns Boxity -- You might think that the post typechecking Type was redundant, -- because we can get the pattern type by getting the types of the -- sub-patterns. -- But it's essential -- data T a where -- T1 :: Int -> T Int -- f :: (T a, a) -> Int -- f (T1 x, z) = z -- When desugaring, we must generate -- f = /\a. \v::a. case v of (t::T a, w::a) -> -- case t of (T1 (x::Int)) -> -- Note the (w::a), NOT (w::Int), because we have not yet -- refined 'a' to Int. So we must know that the second component -- of the tuple is of type 'a' not Int. See selectMatchVar -- (June 14: I'm not sure this comment is right; the sub-patterns -- will be wrapped in CoPats, no?) | OrPat (XOrPat p) (NonEmpty (LPat p)) -- ^ Or Pattern, e.g. @(pat_1; ...; pat_n)@. Used by @-XOrPatterns@ -- -- @since 9.12.1 | SumPat (XSumPat p) -- after typechecker, types of the alternative (LPat p) -- Sum sub-pattern ConTag -- Alternative (one-based) SumWidth -- Arity (INVARIANT: ≥ 2) -- ^ Anonymous sum pattern, e.g. @(# x | #)@. Used by @-XUnboxedSums@ ------------ Constructor patterns --------------- | ConPat { pat_con_ext :: XConPat p, pat_con :: XRec p (ConLikeP p), pat_args :: HsConPatDetails p } -- ^ Constructor Pattern, e.g. @()@, @[]@ or @Nothing@ ------------ View patterns --------------- | ViewPat (XViewPat p) (LHsExpr p) (LPat p) -- ^ View Pattern, e.g. @someFun -> pat@. Used by @-XViewPatterns@ ------------ Pattern splices --------------- | SplicePat (XSplicePat p) (HsUntypedSplice p) -- ^ Splice Pattern, e.g. @$(pat)@ ------------ Literal and n+k patterns --------------- | LitPat (XLitPat p) (HsLit p) -- ^ Literal Pattern -- -- Used for __non-overloaded__ literal patterns: -- Int#, Char#, Int, Char, String, etc. | NPat (XNPat p) -- Overall type of pattern. Might be -- different than the literal's type -- if (==) or negate changes the type (XRec p (HsOverLit p)) -- ALWAYS positive (Maybe (SyntaxExpr p)) -- Just (Name of 'negate') for -- negative patterns, Nothing -- otherwise (SyntaxExpr p) -- Equality checker, of type t->t->Bool -- ^ Natural Pattern, used for all overloaded literals, including overloaded Strings -- with @-XOverloadedStrings@ | -- | n+k pattern, e.g. @n+1@, used by @-XNPlusKPatterns@ NPlusKPat (XNPlusKPat p) -- Type of overall pattern (LIdP p) -- n+k pattern (XRec p (HsOverLit p)) -- It'll always be an HsIntegral (HsOverLit p) -- See Note [NPlusK patterns] in GHC.Tc.Gen.Pat -- NB: This could be (PostTc ...), but that induced a -- a new hs-boot file. Not worth it. (SyntaxExpr p) -- (>=) function, of type t1->t2->Bool (SyntaxExpr p) -- Name of '-' (see GHC.Rename.Env.lookupSyntax) ------------ Pattern type signatures --------------- | SigPat (XSigPat p) -- After typechecker: Type (LPat p) -- Pattern with a type signature (HsPatSigType (NoGhcTc p)) -- Signature can bind both -- kind and type vars -- ^ Pattern with a type signature, e.g. @x :: Int@ | -- | Embed the syntax of types into patterns, e.g. @fn (type t) = rhs@. -- Enabled by @-XExplicitNamespaces@ in conjunction with @-XRequiredTypeArguments@. EmbTyPat (XEmbTyPat p) (HsTyPat (NoGhcTc p)) | InvisPat (XInvisPat p) (HsTyPat (NoGhcTc p)) -- ^ Type abstraction which brings into scope type variables associated with invisible forall. -- E.g. @fn \@t ... = rhs@. Used by @-XTypeAbstractions@. -- See Note [Invisible binders in functions] in GHC.Hs.Pat | -- | TTG Extension point; see Note [Trees That Grow] in Language.Haskell.Syntax.Extension XPat !(XXPat p) type family ConLikeP x -- --------------------------------------------------------------------- -- | Type argument in a data constructor pattern, -- e.g. the @\@a@ in @f (Just \@a x) = ...@. data HsConPatTyArg p = HsConPatTyArg !(XConPatTyArg p) (HsTyPat p) type family XConPatTyArg p isInvisArgPat :: Pat p -> Bool isInvisArgPat InvisPat{} = True isInvisArgPat _ = False isVisArgPat :: Pat p -> Bool isVisArgPat = not . isInvisArgPat -- | Haskell Constructor Pattern Details type HsConPatDetails p = HsConDetails (HsConPatTyArg (NoGhcTc p)) (LPat p) (HsRecFields p (LPat p)) hsConPatArgs :: forall p . (UnXRec p) => HsConPatDetails p -> [LPat p] hsConPatArgs (PrefixCon _ ps) = ps hsConPatArgs (RecCon fs) = Data.List.map (hfbRHS . unXRec @p) (rec_flds fs) hsConPatArgs (InfixCon p1 p2) = [p1,p2] hsConPatTyArgs :: forall p. HsConPatDetails p -> [HsConPatTyArg (NoGhcTc p)] hsConPatTyArgs (PrefixCon tyargs _) = tyargs hsConPatTyArgs (RecCon _) = [] hsConPatTyArgs (InfixCon _ _) = [] -- | Haskell Record Fields -- -- HsRecFields is used only for patterns and expressions (not data type -- declarations) data HsRecFields p arg -- A bunch of record fields -- { x = 3, y = True } -- Used for both expressions and patterns = HsRecFields { rec_ext :: !(XHsRecFields p), rec_flds :: [LHsRecField p arg], rec_dotdot :: Maybe (XRec p RecFieldsDotDot) } -- Note [DotDot fields] -- AZ:The XRec for LHsRecField makes the derivings fail. -- deriving (Functor, Foldable, Traversable) type family XHsRecFields p -- | Newtype to be able to have a specific XRec instance for the Int in `rec_dotdot` newtype RecFieldsDotDot = RecFieldsDotDot { unRecFieldsDotDot :: Int } deriving (Data, Eq, Ord) -- Note [DotDot fields] -- ~~~~~~~~~~~~~~~~~~~~ -- The rec_dotdot field means this: -- Nothing => the normal case -- Just n => the group uses ".." notation, -- -- In the latter case: -- -- *before* renamer: rec_flds are exactly the n user-written fields -- -- *after* renamer: rec_flds includes *all* fields, with -- the first 'n' being the user-written ones -- and the remainder being 'filled in' implicitly -- | Located Haskell Record Field type LHsFieldBind p id arg = XRec p (HsFieldBind id arg) -- | Located Haskell Record Field type LHsRecField p arg = XRec p (HsRecField p arg) -- | Haskell Record Field type HsRecField p arg = HsFieldBind (LFieldOcc p) arg -- | Located Haskell Record Update Field type LHsRecUpdField p q = XRec p (HsRecUpdField p q) -- | Haskell Record Update Field type HsRecUpdField p q = HsFieldBind (LFieldOcc p) (LHsExpr q) -- | Haskell Field Binding data HsFieldBind lhs rhs = HsFieldBind { hfbAnn :: XHsFieldBind lhs, hfbLHS :: lhs, hfbRHS :: rhs, -- ^ Filled in by renamer when punning hfbPun :: Bool -- ^ Note [Punning] } deriving (Functor, Foldable, Traversable) -- Note [Punning] -- ~~~~~~~~~~~~~~ -- If you write T { x, y = v+1 }, the HsRecFields will be -- HsRecField x x True ... -- HsRecField y (v+1) False ... -- That is, for "punned" field x is expanded (in the renamer) -- to x=x; but with a punning flag so we can detect it later -- (e.g. when pretty printing) -- -- If the original field was qualified, we un-qualify it, thus -- T { A.x } means T { A.x = x } -- Note [HsRecField and HsRecUpdField] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- A HsRecField (used for record construction and pattern matching) -- contains an unambiguous occurrence of a field (i.e. a FieldOcc). -- We can't just store the Name, because thanks to -- DuplicateRecordFields this may not correspond to the label the user -- wrote. -- -- A HsRecUpdField (used for record update) contains a potentially -- ambiguous occurrence of a field (an AmbiguousFieldOcc). The -- renamer will fill in the selector function if it can, but if the -- selector is ambiguous the renamer will defer to the typechecker. -- After the typechecker, a unique selector will have been determined. -- -- The renamer produces an Unambiguous result if it can, rather than -- just doing the lookup in the typechecker, so that completely -- unambiguous updates can be represented by 'GHC.HsToCore.Quote.repUpdFields'. -- -- For example, suppose we have: -- -- data S = MkS { x :: Int } -- data T = MkT { x :: Int } -- -- f z = (z { x = 3 }) :: S -- -- The parsed HsRecUpdField corresponding to the record update will have: -- -- hfbLHS = Unambiguous "x" noExtField :: AmbiguousFieldOcc RdrName -- -- After the renamer, this will become: -- -- hfbLHS = Ambiguous "x" noExtField :: AmbiguousFieldOcc Name -- -- (note that the Unambiguous constructor is not type-correct here). -- The typechecker will determine the particular selector: -- -- hfbLHS = Unambiguous "x" $sel:x:MkS :: AmbiguousFieldOcc Id -- -- See also Note [Disambiguating record updates] in GHC.Rename.Pat. hsRecFields :: forall p arg.UnXRec p => HsRecFields p arg -> [IdP p] hsRecFields rbinds = Data.List.map (hsRecFieldSel . unXRec @p) (rec_flds rbinds) hsRecFieldsArgs :: forall p arg. UnXRec p => HsRecFields p arg -> [arg] hsRecFieldsArgs rbinds = Data.List.map (hfbRHS . unXRec @p) (rec_flds rbinds) hsRecFieldSel :: forall p arg. UnXRec p => HsRecField p arg -> IdP p hsRecFieldSel = unXRec @p . foLabel . unXRec @p . hfbLHS ghc-lib-parser-9.12.2.20250421/compiler/Language/Haskell/Syntax/Pat.hs-boot0000644000000000000000000000053507346545000023725 0ustar0000000000000000{-# LANGUAGE KindSignatures #-} {-# LANGUAGE RoleAnnotations #-} {-# LANGUAGE TypeFamilies #-} -- See Note [Language.Haskell.Syntax.* Hierarchy] for why not GHC.Hs.* module Language.Haskell.Syntax.Pat where import Language.Haskell.Syntax.Extension ( XRec ) import Data.Kind type role Pat nominal data Pat (i :: Type) type LPat i = XRec i (Pat i) ghc-lib-parser-9.12.2.20250421/compiler/Language/Haskell/Syntax/Specificity.hs0000644000000000000000000000505607346545000024516 0ustar0000000000000000{-# LANGUAGE MultiWayIf, PatternSynonyms #-} -- TODO Everthing in this module should be moved to -- Language.Haskell.Syntax.Decls module Language.Haskell.Syntax.Specificity ( -- * ForAllTyFlags ForAllTyFlag(Invisible,Required,Specified,Inferred), Specificity(..), isVisibleForAllTyFlag, isInvisibleForAllTyFlag, isInferredForAllTyFlag, isSpecifiedForAllTyFlag, coreTyLamForAllTyFlag, ) where import Prelude import Data.Data -- | ForAllTyFlag -- -- Is something required to appear in source Haskell ('Required'), -- permitted by request ('Specified') (visible type application), or -- prohibited entirely from appearing in source Haskell ('Inferred')? -- See Note [VarBndrs, ForAllTyBinders, TyConBinders, and visibility] in "GHC.Core.TyCo.Rep" data ForAllTyFlag = Invisible !Specificity | Required deriving (Eq, Ord, Data) -- (<) on ForAllTyFlag means "is less visible than" -- | Whether an 'Invisible' argument may appear in source Haskell. data Specificity = InferredSpec -- ^ the argument may not appear in source Haskell, it is -- only inferred. | SpecifiedSpec -- ^ the argument may appear in source Haskell, but isn't -- required. deriving (Eq, Ord, Data) pattern Inferred, Specified :: ForAllTyFlag pattern Inferred = Invisible InferredSpec pattern Specified = Invisible SpecifiedSpec {-# COMPLETE Required, Specified, Inferred #-} -- | Does this 'ForAllTyFlag' classify an argument that is written in Haskell? isVisibleForAllTyFlag :: ForAllTyFlag -> Bool isVisibleForAllTyFlag af = not (isInvisibleForAllTyFlag af) -- | Does this 'ForAllTyFlag' classify an argument that is not written in Haskell? isInvisibleForAllTyFlag :: ForAllTyFlag -> Bool isInvisibleForAllTyFlag (Invisible {}) = True isInvisibleForAllTyFlag Required = False isInferredForAllTyFlag :: ForAllTyFlag -> Bool -- More restrictive than isInvisibleForAllTyFlag isInferredForAllTyFlag (Invisible InferredSpec) = True isInferredForAllTyFlag _ = False isSpecifiedForAllTyFlag :: ForAllTyFlag -> Bool -- More restrictive than isInvisibleForAllTyFlag isSpecifiedForAllTyFlag (Invisible SpecifiedSpec) = True isSpecifiedForAllTyFlag _ = False coreTyLamForAllTyFlag :: ForAllTyFlag -- ^ The ForAllTyFlag on a (Lam a e) term, where `a` is a type variable. -- If you want other ForAllTyFlag, use a cast. -- See Note [Required foralls in Core] in GHC.Core.TyCo.Rep coreTyLamForAllTyFlag = Specified ghc-lib-parser-9.12.2.20250421/compiler/Language/Haskell/Syntax/Type.hs0000644000000000000000000014205507346545000023165 0ustar0000000000000000 {-# LANGUAGE ConstraintKinds #-} {-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE StandaloneDeriving #-} {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE DataKinds #-} {-# LANGUAGE UndecidableInstances #-} -- Wrinkle in Note [Trees That Grow] {-# LANGUAGE LambdaCase #-} -- in module Language.Haskell.Syntax.Extension {- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 GHC.Hs.Type: Abstract syntax: user-defined types -} -- See Note [Language.Haskell.Syntax.* Hierarchy] for why not GHC.Hs.* module Language.Haskell.Syntax.Type ( HsScaled(..), hsMult, hsScaledThing, HsArrow, HsArrowOf(..), XUnrestrictedArrow, XLinearArrow, XExplicitMult, XXArrow, HsType(..), LHsType, HsKind, LHsKind, HsBndrVis(..), XBndrRequired, XBndrInvisible, XXBndrVis, HsBndrVar(..), XBndrVar, XBndrWildCard, XXBndrVar, HsBndrKind(..), XBndrKind, XBndrNoKind, XXBndrKind, isHsBndrInvisible, isHsBndrWildCard, HsForAllTelescope(..), HsTyVarBndr(..), LHsTyVarBndr, LHsQTyVars(..), HsOuterTyVarBndrs(..), HsOuterFamEqnTyVarBndrs, HsOuterSigTyVarBndrs, HsWildCardBndrs(..), HsPatSigType(..), HsSigType(..), LHsSigType, LHsSigWcType, LHsWcType, HsTyPat(..), LHsTyPat, HsTupleSort(..), HsContext, LHsContext, HsTyLit(..), HsIPName(..), hsIPNameFS, HsArg(..), XValArg, XTypeArg, XArgPar, XXArg, LHsTypeArg, LBangType, BangType, HsBang(..), PromotionFlag(..), isPromoted, ConDeclField(..), LConDeclField, HsConDetails(..), noTypeArgs, FieldOcc(..), LFieldOcc, mapHsOuterImplicit, hsQTvExplicit, isHsKindedTyVar, hsPatSigType, ) where import {-# SOURCE #-} Language.Haskell.Syntax.Expr ( HsUntypedSplice ) import Language.Haskell.Syntax.Basic ( HsBang(..) ) import Language.Haskell.Syntax.Extension import Language.Haskell.Syntax.Specificity import GHC.Hs.Doc (LHsDoc) import GHC.Data.FastString (FastString) import Data.Data hiding ( Fixity, Prefix, Infix ) import Data.Void import Data.Maybe import Data.Eq import Data.Bool import Data.Char import Prelude (Integer) import Data.Ord (Ord) {- ************************************************************************ * * \subsection{Promotion flag} * * ************************************************************************ -} -- | Is a TyCon a promoted data constructor or just a normal type constructor? data PromotionFlag = NotPromoted | IsPromoted deriving ( Eq, Data, Ord ) isPromoted :: PromotionFlag -> Bool isPromoted IsPromoted = True isPromoted NotPromoted = False {- ************************************************************************ * * \subsection{Bang annotations} * * ************************************************************************ -} -- | Located Bang Type type LBangType pass = XRec pass (BangType pass) -- | Bang Type -- -- In the parser, strictness and packedness annotations bind more tightly -- than docstrings. This means that when consuming a 'BangType' (and looking -- for 'HsBangTy') we must be ready to peer behind a potential layer of -- 'HsDocTy'. See #15206 for motivation and 'getBangType' for an example. type BangType pass = HsType pass -- Bangs are in the HsType data type {- ************************************************************************ * * \subsection{Data types} * * ************************************************************************ This is the syntax for types as seen in type signatures. Note [HsBSig binder lists] ~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider a binder (or pattern) decorated with a type or kind, \ (x :: a -> a). blah forall (a :: k -> *) (b :: k). blah Then we use a LHsBndrSig on the binder, so that the renamer can decorate it with the variables bound by the pattern ('a' in the first example, 'k' in the second), assuming that neither of them is in scope already See also Note [Kind and type-variable binders] in GHC.Rename.HsType Note [HsType binders] ~~~~~~~~~~~~~~~~~~~~~ The system for recording type and kind-variable binders in HsTypes is a bit complicated. Here's how it works. * In a HsType, HsForAllTy represents an /explicit, user-written/ 'forall' that is nested within another HsType e.g. forall a b. {...} or forall a b -> {...} Note that top-level 'forall's are represented with a different AST form. See the description of HsOuterTyVarBndrs below. HsQualTy represents an /explicit, user-written/ context e.g. (Eq a, Show a) => ... The context can be empty if that's what the user wrote These constructors represent what the user wrote, no more and no less. * The ForAllTelescope field of HsForAllTy represents whether a forall is invisible (e.g., forall a b. {...}, with a dot) or visible (e.g., forall a b -> {...}, with an arrow). * HsTyVarBndr describes a quantified type variable written by the user. For example f :: forall a (b :: *). blah here 'a' and '(b::*)' are each a HsTyVarBndr. A HsForAllTy has a list of LHsTyVarBndrs. * HsOuterTyVarBndrs is used to represent the outermost quantified type variables in a type that obeys the forall-or-nothing rule. An HsOuterTyVarBndrs can be one of the following: HsOuterImplicit (implicit quantification, added by renamer) f :: a -> a -- Desugars to f :: forall {a}. a -> a HsOuterExplicit (explicit user quantification): f :: forall a. a -> a See Note [forall-or-nothing rule]. * An HsSigType is an LHsType with an accompanying HsOuterTyVarBndrs that represents the presence (or absence) of its outermost 'forall'. See Note [Representing type signatures]. * HsWildCardBndrs is a wrapper that binds the wildcard variables of the wrapped thing. It is filled in by the renamer f :: _a -> _ The enclosing HsWildCardBndrs binds the wildcards _a and _. * HsSigPatType describes types that appear in pattern signatures and the signatures of term-level binders in RULES. Like HsWildCardBndrs/HsOuterTyVarBndrs, they track the names of wildcard variables and implicitly bound type variables. Unlike HsOuterTyVarBndrs, however, HsSigPatTypes do not obey the forall-or-nothing rule. See Note [Pattern signature binders and scoping]. * The explicit presence of these wrappers specifies, in the HsSyn, exactly where implicit quantification is allowed, and where wildcards are allowed. * LHsQTyVars is used in data/class declarations, where the user gives explicit *type* variable bindings, but we need to implicitly bind *kind* variables. For example class C (a :: k -> *) where ... The 'k' is implicitly bound in the hsq_tvs field of LHsQTyVars Note [The wildcard story for types] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Types can have wildcards in them, to support partial type signatures, like f :: Int -> (_ , _a) -> _a A wildcard in a type can be * An anonymous wildcard, written '_' In HsType this is represented by HsWildCardTy. The renamer leaves it untouched, and it is later given a fresh meta tyvar in the typechecker. * A named wildcard, written '_a', '_foo', etc In HsType this is represented by (HsTyVar "_a") i.e. a perfectly ordinary type variable that happens to start with an underscore Note carefully: * When NamedWildCards is off, type variables that start with an underscore really /are/ ordinary type variables. And indeed, even when NamedWildCards is on you can bind _a explicitly as an ordinary type variable: data T _a _b = MkT _b _a Or even: f :: forall _a. _a -> _b Here _a is an ordinary forall'd binder, but (With NamedWildCards) _b is a named wildcard. (See the comments in #10982) * Named wildcards are bound by the HsWildCardBndrs (for types that obey the forall-or-nothing rule) and HsPatSigType (for type signatures in patterns and term-level binders in RULES), which wrap types that are allowed to have wildcards. Unnamed wildcards, however are left unchanged until typechecking, where we give them fresh wild tyvars and determine whether or not to emit hole constraints on each wildcard (we don't if it's a visible type/kind argument or a type family pattern). See related notes Note [Wildcards in visible kind application] and Note [Wildcards in visible type application] in GHC.Tc.Gen.HsType. * After type checking is done, we report what types the wildcards got unified with. Note [Ordering of implicit variables] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Since the advent of -XTypeApplications, GHC makes promises about the ordering of implicit variable quantification. Specifically, we offer that implicitly quantified variables (such as those in const :: a -> b -> a, without a `forall`) will occur in left-to-right order of first occurrence. Here are a few examples: const :: a -> b -> a -- forall a b. ... f :: Eq a => b -> a -> a -- forall a b. ... contexts are included type a <-< b = b -> a g :: a <-< b -- forall a b. ... type synonyms matter class Functor f where fmap :: (a -> b) -> f a -> f b -- forall f a b. ... -- The f is quantified by the class, so only a and b are considered in fmap This simple story is complicated by the possibility of dependency: all variables must come after any variables mentioned in their kinds. typeRep :: Typeable a => TypeRep (a :: k) -- forall k a. ... The k comes first because a depends on k, even though the k appears later than the a in the code. Thus, GHC does a *stable topological sort* on the variables. By "stable", we mean that any two variables who do not depend on each other preserve their existing left-to-right ordering. Implicitly bound variables are collected by the extract- family of functions (extractHsTysRdrTyVars, extractHsTyVarBndrsKVs, etc.) in GHC.Rename.HsType. These functions thus promise to keep left-to-right ordering. Look for pointers to this note to see the places where the action happens. Note that we also maintain this ordering in kind signatures. Even though there's no visible kind application (yet), having implicit variables be quantified in left-to-right order in kind signatures is nice since: * It's consistent with the treatment for type signatures. * It can affect how types are displayed with -fprint-explicit-kinds (see #15568 for an example), which is a situation where knowing the order in which implicit variables are quantified can be useful. * In the event that visible kind application is implemented, the order in which we would expect implicit variables to be ordered in kinds will have already been established. -} -- | Located Haskell Context type LHsContext pass = XRec pass (HsContext pass) -- | Haskell Context type HsContext pass = [LHsType pass] -- | Located Haskell Type type LHsType pass = XRec pass (HsType pass) -- | Haskell Kind type HsKind pass = HsType pass -- | Located Haskell Kind type LHsKind pass = XRec pass (HsKind pass) -------------------------------------------------- -- LHsQTyVars -- The explicitly-quantified binders in a data/type declaration -- | The type variable binders in an 'HsForAllTy'. -- See also @Note [Variable Specificity and Forall Visibility]@ in -- "GHC.Tc.Gen.HsType". data HsForAllTelescope pass = HsForAllVis -- ^ A visible @forall@ (e.g., @forall a -> {...}@). -- These do not have any notion of specificity, so we use -- '()' as a placeholder value. { hsf_xvis :: XHsForAllVis pass , hsf_vis_bndrs :: [LHsTyVarBndr () pass] } | HsForAllInvis -- ^ An invisible @forall@ (e.g., @forall a {b} c. {...}@), -- where each binder has a 'Specificity'. { hsf_xinvis :: XHsForAllInvis pass , hsf_invis_bndrs :: [LHsTyVarBndr Specificity pass] } | XHsForAllTelescope !(XXHsForAllTelescope pass) -- | Located Haskell Type Variable Binder type LHsTyVarBndr flag pass = XRec pass (HsTyVarBndr flag pass) -- See Note [HsType binders] -- | Located Haskell Quantified Type Variables data LHsQTyVars pass -- See Note [HsType binders] = HsQTvs { hsq_ext :: XHsQTvs pass , hsq_explicit :: [LHsTyVarBndr (HsBndrVis pass) pass] -- Explicit variables, written by the user } | XLHsQTyVars !(XXLHsQTyVars pass) hsQTvExplicit :: LHsQTyVars pass -> [LHsTyVarBndr (HsBndrVis pass) pass] hsQTvExplicit = hsq_explicit ------------------------------------------------ -- HsOuterTyVarBndrs -- Used to quantify the outermost type variable binders of a type that obeys -- the forall-or-nothing rule. These are used to represent the outermost -- quantification in: -- * Type signatures (LHsSigType/LHsSigWcType) -- * Patterns in a type/data family instance (HsFamEqnPats) -- -- We support two forms: -- HsOuterImplicit (implicit quantification, added by renamer) -- f :: a -> a -- Desugars to f :: forall {a}. a -> a -- type instance F (a,b) = a->b -- HsOuterExplicit (explicit user quantification): -- f :: forall a. a -> a -- type instance forall a b. F (a,b) = a->b -- -- In constrast, when the user writes /visible/ quanitification -- T :: forall k -> k -> Type -- we use use HsOuterImplicit, wrapped around a HsForAllTy -- for the visible quantification -- -- See Note [forall-or-nothing rule] -- | The outermost type variables in a type that obeys the @forall@-or-nothing -- rule. See @Note [forall-or-nothing rule]@. data HsOuterTyVarBndrs flag pass = HsOuterImplicit -- ^ Implicit forall, e.g., -- @f :: a -> b -> b@ { hso_ximplicit :: XHsOuterImplicit pass } | HsOuterExplicit -- ^ Explicit forall, e.g., -- @f :: forall a b. a -> b -> b@ { hso_xexplicit :: XHsOuterExplicit pass flag , hso_bndrs :: [LHsTyVarBndr flag (NoGhcTc pass)] } | XHsOuterTyVarBndrs !(XXHsOuterTyVarBndrs pass) -- | Used for signatures, e.g., -- -- @ -- f :: forall a {b}. blah -- @ -- -- We use 'Specificity' for the 'HsOuterTyVarBndrs' @flag@ to allow -- distinguishing between specified and inferred type variables. type HsOuterSigTyVarBndrs = HsOuterTyVarBndrs Specificity -- | Used for type-family instance equations, e.g., -- -- @ -- type instance forall a. F [a] = Tree a -- @ -- -- The notion of specificity is irrelevant in type family equations, so we use -- @()@ for the 'HsOuterTyVarBndrs' @flag@. type HsOuterFamEqnTyVarBndrs = HsOuterTyVarBndrs () -- | Haskell Wildcard Binders data HsWildCardBndrs pass thing -- See Note [HsType binders] -- See Note [The wildcard story for types] = HsWC { hswc_ext :: XHsWC pass thing -- after the renamer -- Wild cards, only named -- See Note [Wildcards in visible kind application] , hswc_body :: thing -- Main payload (type or list of types) -- If there is an extra-constraints wildcard, -- it's still there in the hsc_body. } | XHsWildCardBndrs !(XXHsWildCardBndrs pass thing) -- | Types that can appear in pattern signatures, as well as the signatures for -- term-level binders in RULES. -- See @Note [Pattern signature binders and scoping]@. -- -- This is very similar to 'HsSigWcType', but with -- slightly different semantics: see @Note [HsType binders]@. -- See also @Note [The wildcard story for types]@. data HsPatSigType pass = HsPS { hsps_ext :: XHsPS pass -- ^ After renamer: 'HsPSRn' , hsps_body :: LHsType pass -- ^ Main payload (the type itself) } | XHsPatSigType !(XXHsPatSigType pass) -- | Located Haskell Signature Type type LHsSigType pass = XRec pass (HsSigType pass) -- Implicit only -- | Located Haskell Wildcard Type type LHsWcType pass = HsWildCardBndrs pass (LHsType pass) -- Wildcard only -- | Located Haskell Signature Wildcard Type type LHsSigWcType pass = HsWildCardBndrs pass (LHsSigType pass) -- Both data HsTyPat pass = HsTP { hstp_ext :: XHsTP pass -- ^ After renamer: 'HsTyPatRn' , hstp_body :: LHsType pass -- ^ Main payload (the type itself) } | XHsTyPat !(XXHsTyPat pass) type LHsTyPat pass = XRec pass (HsTyPat pass) -- | A type signature that obeys the @forall@-or-nothing rule. In other -- words, an 'LHsType' that uses an 'HsOuterSigTyVarBndrs' to represent its -- outermost type variable quantification. -- See @Note [Representing type signatures]@. data HsSigType pass = HsSig { sig_ext :: XHsSig pass , sig_bndrs :: HsOuterSigTyVarBndrs pass , sig_body :: LHsType pass } | XHsSigType !(XXHsSigType pass) hsPatSigType :: HsPatSigType pass -> LHsType pass hsPatSigType = hsps_body {- Note [forall-or-nothing rule] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Free variables in signatures are usually bound in an implicit 'forall' at the beginning of user-written signatures. However, if the signature has an explicit, invisible forall at the beginning, this is disabled. This is referred to as the forall-or-nothing rule. The idea is nested foralls express something which is only expressible explicitly, while a top level forall could (usually) be replaced with an implicit binding. Top-level foralls alone ("forall.") are therefore an indication that the user is trying to be fastidious, so we don't implicitly bind any variables. Note that this rule only applies to outermost /in/visible 'forall's, and not outermost visible 'forall's. See #18660 for more on this point. Here are some concrete examples to demonstrate the forall-or-nothing rule in action: type F1 :: a -> b -> b -- Legal; a,b are implicitly quantified. -- Equivalently: forall a b. a -> b -> b type F2 :: forall a b. a -> b -> b -- Legal; explicitly quantified type F3 :: forall a. a -> b -> b -- Illegal; the forall-or-nothing rule says that -- if you quantify a, you must also quantify b type F4 :: forall a -> b -> b -- Legal; the top quantifier (forall a) is a /visible/ -- quantifier, so the "nothing" part of the forall-or-nothing -- rule applies, and b is therefore implicitly quantified. -- Equivalently: forall b. forall a -> b -> b type F5 :: forall b. forall a -> b -> c -- Illegal; the forall-or-nothing rule says that -- if you quantify b, you must also quantify c type F6 :: forall a -> forall b. b -> c -- Legal: just like F4. For a complete list of all places where the forall-or-nothing rule applies, see "The `forall`-or-nothing rule" section of the GHC User's Guide. Any type that obeys the forall-or-nothing rule is represented in the AST with an HsOuterTyVarBndrs: * If the type has an outermost, invisible 'forall', it uses HsOuterExplicit, which contains a list of the explicitly quantified type variable binders in `hso_bndrs`. After typechecking, HsOuterExplicit also stores a list of the explicitly quantified `InvisTVBinder`s in `hso_xexplicit :: XHsOuterExplicit GhcTc`. * Otherwise, it uses HsOuterImplicit. HsOuterImplicit is used for different things depending on the phase: * After parsing, it does not store anything in particular. * After renaming, it stores the implicitly bound type variable `Name`s in `hso_ximplicit :: XHsOuterImplicit GhcRn`. * After typechecking, it stores the implicitly bound `TyVar`s in `hso_ximplicit :: XHsOuterImplicit GhcTc`. NB: this implicit quantification is purely lexical: we bind any type or kind variables that are not in scope. The type checker may subsequently quantify over further kind variables. See Note [Binding scoped type variables] in GHC.Tc.Gen.Sig. HsOuterTyVarBndrs GhcTc is used in the typechecker as an intermediate data type for storing the outermost TyVars/InvisTVBinders in a type. See GHC.Tc.Gen.HsType.bindOuterTKBndrsX for an example of this. Note [Representing type signatures] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ HsSigType is used to represent an explicit user type signature. These are used in a variety of places. Some examples include: * Type signatures (e.g., f :: a -> a) * Standalone kind signatures (e.g., type G :: a -> a) * GADT constructor types (e.g., data T where MkT :: a -> T) A HsSigType is the combination of an HsOuterSigTyVarBndrs and an LHsType: * The HsOuterSigTyVarBndrs binds the /explicitly/ quantified type variables when the type signature has an outermost, user-written 'forall' (i.e, the HsOuterExplicit constructor is used). If there is no outermost 'forall', then it binds the /implicitly/ quantified type variables instead (i.e., the HsOuterImplicit constructor is used). * The LHsType represents the rest of the type. E.g. For a signature like f :: forall k (a::k). blah we get HsSig { sig_bndrs = HsOuterExplicit { hso_bndrs = [k, (a :: k)] } , sig_body = blah } Note [Pattern signature binders and scoping] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider the pattern signatures like those on `t` and `g` in: f = let h = \(t :: (b, b) -> \(g :: forall a. a -> b) -> ...(t :: (Int,Int))... in woggle * The `b` in t's pattern signature is implicitly bound and scopes over the signature and the body of the lambda. It stands for a type (any type); indeed we subsequently discover that b=Int. (See Note [TyVarTv] in GHC.Tc.Utils.TcMType for more on this point.) * The `b` in g's pattern signature is an /occurrence/ of the `b` bound by t's pattern signature. * The `a` in `forall a` scopes only over the type `a -> b`, not over the body of the lambda. * There is no forall-or-nothing rule for pattern signatures, which is why the type `forall a. a -> b` is permitted in `g`'s pattern signature, even though `b` is not explicitly bound. See Note [forall-or-nothing rule]. Similar scoping rules apply to term variable binders in RULES, like in the following example: {-# RULES "h" forall (t :: (b, b)) (g :: forall a. a -> b). h t g = ... #-} Just like in pattern signatures, the `b` in t's signature is implicitly bound and scopes over the remainder of the RULE. As a result, the `b` in g's signature is an occurrence. Moreover, the `a` in `forall a` scopes only over the type `a -> b`, and the forall-or-nothing rule does not apply. While quite similar, RULE term binder signatures behave slightly differently from pattern signatures in two ways: 1. Unlike in pattern signatures, where type variables can stand for any type, type variables in RULE term binder signatures are skolems. See Note [Typechecking pattern signature binders] in GHC.Tc.Gen.HsType for more on this point. In this sense, type variables in pattern signatures are quite similar to named wildcards, as both can refer to arbitrary types. The main difference lies in error reporting: if a named wildcard `_a` in a pattern signature stands for Int, then by default GHC will emit a warning stating as much. Changing `_a` to `a`, on the other hand, will cause it not to be reported. 2. In the `h` RULE above, only term variables are explicitly bound, so any free type variables in the term variables' signatures are implicitly bound. This is just like how the free type variables in pattern signatures are implicitly bound. If a RULE explicitly binds both term and type variables, however, then free type variables in term signatures are /not/ implicitly bound. For example, this RULE would be ill scoped: {-# RULES "h2" forall b. forall (t :: (b, c)) (g :: forall a. a -> b). h2 t g = ... #-} This is because `b` and `c` occur free in the signature for `t`, but only `b` was explicitly bound, leaving `c` out of scope. If the RULE had started with `forall b c.`, then it would have been accepted. The types in pattern signatures and RULE term binder signatures are represented in the AST by HsSigPatType. From the renamer onward, the hsps_ext field (of type HsPSRn) tracks the names of named wildcards and implicitly bound type variables so that they can be brought into scope during renaming and typechecking. Note [Lexically scoped type variables] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The ScopedTypeVariables extension does two things: * It allows the use of type signatures in patterns (e.g., `f (x :: a -> a) = ...`). See Note [Pattern signature binders and scoping] for more on this point. * It brings lexically scoped type variables into scope for certain type signatures with outermost invisible 'forall's. This Note concerns the latter bullet point. Per the "Lexically scoped type variables" section of the GHC User's Guide, the following forms of type signatures can have lexically scoped type variables: * In declarations with type signatures, e.g., f :: forall a. a -> a f x = e @a Here, the 'forall a' brings 'a' into scope over the body of 'f'. Note that ScopedTypeVariables does /not/ interact with standalone kind signatures, only type signatures. * In explicit type annotations in expressions, e.g., id @a :: forall a. a -> a * In instance declarations, e.g., instance forall a. C [a] where m = e @a Note that unlike the examples above, the use of an outermost 'forall' isn't required to bring 'a' into scope. That is, the following would also work: instance forall a. C [a] where m = e @a Note that all of the types above obey the forall-or-nothing rule. As a result, the places in the AST that can have lexically scoped type variables are a subset of the places that use HsOuterTyVarBndrs (See Note [forall-or-nothing rule].) Some other observations about lexically scoped type variables: * Only type variables bound by an /invisible/ forall can be lexically scoped. See Note [hsScopedTvs and visible foralls]. * The lexically scoped type variables may be a strict subset of the type variables brought into scope by a type signature. See Note [Binding scoped type variables] in GHC.Tc.Gen.Sig. -} mapHsOuterImplicit :: (XHsOuterImplicit pass -> XHsOuterImplicit pass) -> HsOuterTyVarBndrs flag pass -> HsOuterTyVarBndrs flag pass mapHsOuterImplicit f (HsOuterImplicit{hso_ximplicit = imp}) = HsOuterImplicit{hso_ximplicit = f imp} mapHsOuterImplicit _ hso@(HsOuterExplicit{}) = hso mapHsOuterImplicit _ hso@(XHsOuterTyVarBndrs{}) = hso -------------------------------------------------- -- | These names are used early on to store the names of implicit -- parameters. They completely disappear after type-checking. newtype HsIPName = HsIPName FastString deriving( Eq, Data ) hsIPNameFS :: HsIPName -> FastString hsIPNameFS (HsIPName n) = n -------------------------------------------------- -- | Haskell Type Variable Binder -- See Note [Type variable binders] data HsTyVarBndr flag pass = HsTvb { tvb_ext :: XTyVarBndr pass , tvb_flag :: flag , tvb_var :: HsBndrVar pass , tvb_kind :: HsBndrKind pass } | XTyVarBndr !(XXTyVarBndr pass) data HsBndrVis pass = HsBndrRequired !(XBndrRequired pass) -- Binder for a visible (required) variable: -- type Dup a = (a, a) -- ^^^ | HsBndrInvisible !(XBndrInvisible pass) -- Binder for an invisible (specified) variable: -- type KindOf @k (a :: k) = k -- ^^^ | XBndrVis !(XXBndrVis pass) type family XBndrRequired p type family XBndrInvisible p type family XXBndrVis p isHsBndrInvisible :: HsBndrVis pass -> Bool isHsBndrInvisible HsBndrInvisible{} = True isHsBndrInvisible HsBndrRequired{} = False isHsBndrInvisible (XBndrVis _) = False data HsBndrVar pass = HsBndrVar !(XBndrVar pass) !(LIdP pass) | HsBndrWildCard !(XBndrWildCard pass) | XBndrVar !(XXBndrVar pass) type family XBndrVar p type family XBndrWildCard p type family XXBndrVar p isHsBndrWildCard :: HsBndrVar pass -> Bool isHsBndrWildCard HsBndrWildCard{} = True isHsBndrWildCard HsBndrVar{} = False isHsBndrWildCard (XBndrVar _) = False data HsBndrKind pass = HsBndrKind !(XBndrKind pass) (LHsKind pass) | HsBndrNoKind !(XBndrNoKind pass) | XBndrKind !(XXBndrKind pass) type family XBndrKind p type family XBndrNoKind p type family XXBndrKind p -- | Does this 'HsTyVarBndr' come with an explicit kind annotation? isHsKindedTyVar :: HsTyVarBndr flag pass -> Bool isHsKindedTyVar (HsTvb { tvb_kind = kind }) = case kind of HsBndrKind _ _ -> True HsBndrNoKind _ -> False XBndrKind _ -> False isHsKindedTyVar (XTyVarBndr {}) = False {- Note [Type variable binders] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Type variable binders, represented by the HsTyVarBndr type, can occur in the following contexts: 1. On the left-hand sides of type/class declarations (TyClDecl) data D a b = ... -- data types (DataDecl) newtype N a b = ... -- newtypes (DataDecl) type T a b = ... -- type synonyms (SynDecl) class C a b where ... -- classes (ClassDecl) type family TF a b -- type families (FamDecl) data family DF a b -- data families (FamDecl) The `a` and `b` in these examples are type variable binders. 2. In forall telescopes (HsForAllTy and HsOuterTyVarBndrs) 2-Invis. forall {a} b. ... -- invisible forall (HsForAllInvis) 2-Vis. forall a b -> ... -- visible forall (HsForAllVis) Again, `a` and `b` are type variable binders. 3. In type family result signatures (FamilyResultSig), which are part of the TypeFamilyDependencies extension type family F a = r | r -> a -- result sig (TyVarSig) The `r` immediately to the right of `=` is a type variable binder. 4. In constructor patterns, as long as the conditions outlined in Note [Type patterns: binders and unifiers] are satisfied fn (MkT @a @b x y) = ... -- type arguments (HsConPatTyArg) -- in constructor patterns (ConPat) Here, the `a` and `b` are type variable binders iff `GHC.Tc.Gen.HsType.tyPatToBndr` returns `Just`. A type variable binder has three parts: * flag (HsBndrVis, Specificity, or () -- depending on context) * variable (HsBndrVar) * kind (HsBndrKind) Details about each part: * The binder variable (HsBndrVar) is either a type variable name or a wildcard, i.e. `a` vs `_` (HsBndrVar vs HsBndrWildCard). * The binder kind (HsBndrKind) stores the optional kind annotation, i.e. `a` vs `a :: k` (HsBndrNoKind vs HsBndrKind). * The binder flag is instantiated to one of the following types, depending on the context where it occurs (contexts 1..4 are listed above) (a) flag=HsBndrVis records `a` vs `@a` (HsBndrRequired vs HsBndrInvisible) (used in contexts: 1) (b) flag=Specificity records `a` vs `{a}` (SpecifiedSpec vs InferredSpec) (used in contexts: 2-Invis) (c) flag=() is used when there is no distinction to record (used in contexts: 2-Vis, 3, 4) All in all, we have the following forms of type variable binders in the language a, (a :: k), @a, @(a :: k), {a}, {a :: k} _, (_ :: k), @_, @(_ :: k) The forms {_}, {_ :: k} are representable but never valid, see Note [Wildcard binders in disallowed contexts] in GHC.Hs.Type -} -- | Haskell Type data HsType pass = HsForAllTy -- See Note [HsType binders] { hst_xforall :: XForAllTy pass , hst_tele :: HsForAllTelescope pass -- Explicit, user-supplied 'forall a {b} c' , hst_body :: LHsType pass -- body type } | HsQualTy -- See Note [HsType binders] { hst_xqual :: XQualTy pass , hst_ctxt :: LHsContext pass -- Context C => blah , hst_body :: LHsType pass } | HsTyVar (XTyVar pass) PromotionFlag -- Whether explicitly promoted, -- for the pretty printer (LIdP pass) -- Type variable, type constructor, or data constructor -- see Note [Promotions (HsTyVar)] -- See Note [Located RdrNames] in GHC.Hs.Expr | HsAppTy (XAppTy pass) (LHsType pass) (LHsType pass) | HsAppKindTy (XAppKindTy pass) -- type level type app (LHsType pass) (LHsKind pass) | HsFunTy (XFunTy pass) (HsArrow pass) (LHsType pass) -- function type (LHsType pass) | HsListTy (XListTy pass) (LHsType pass) -- Element type | HsTupleTy (XTupleTy pass) HsTupleSort [LHsType pass] -- Element types (length gives arity) | HsSumTy (XSumTy pass) [LHsType pass] -- Element types (length gives arity) | HsOpTy (XOpTy pass) PromotionFlag -- Whether explicitly promoted, -- for the pretty printer (LHsType pass) (LIdP pass) (LHsType pass) | HsParTy (XParTy pass) (LHsType pass) -- See Note [Parens in HsSyn] in GHC.Hs.Expr -- Parenthesis preserved for the precedence re-arrangement in -- GHC.Rename.HsType -- It's important that a * (b + c) doesn't get rearranged to (a*b) + c! | HsIParamTy (XIParamTy pass) (XRec pass HsIPName) -- (?x :: ty) (LHsType pass) -- Implicit parameters as they occur in -- contexts -- ^ -- > (?x :: ty) | HsStarTy (XStarTy pass) Bool -- Is this the Unicode variant? -- Note [HsStarTy] | HsKindSig (XKindSig pass) (LHsType pass) -- (ty :: kind) (LHsKind pass) -- A type with a kind signature -- ^ -- > (ty :: kind) | HsSpliceTy (XSpliceTy pass) (HsUntypedSplice pass) -- Includes quasi-quotes | HsDocTy (XDocTy pass) (LHsType pass) (LHsDoc pass) -- A documented type | HsBangTy (XBangTy pass) -- Contains the SourceText in GHC passes. HsBang (LHsType pass) -- Bang-style type annotations | HsRecTy (XRecTy pass) [LConDeclField pass] -- Only in data type declarations | HsExplicitListTy -- A promoted explicit list (XExplicitListTy pass) PromotionFlag -- whether explicitly promoted, for pretty printer [LHsType pass] | HsExplicitTupleTy -- A promoted explicit tuple (XExplicitTupleTy pass) PromotionFlag -- whether explicitly promoted, for pretty printer [LHsType pass] | HsTyLit (XTyLit pass) (HsTyLit pass) -- A promoted numeric literal. | HsWildCardTy (XWildCardTy pass) -- A type wildcard -- See Note [The wildcard story for types] -- Extension point; see Note [Trees That Grow] in Language.Haskell.Syntax.Extension | XHsType !(XXType pass) -- | Haskell Type Literal data HsTyLit pass = HsNumTy (XNumTy pass) Integer | HsStrTy (XStrTy pass) FastString | HsCharTy (XCharTy pass) Char | XTyLit !(XXTyLit pass) type HsArrow pass = HsArrowOf (LHsType pass) pass -- | Denotes the type of arrows in the surface language data HsArrowOf mult pass = HsUnrestrictedArrow !(XUnrestrictedArrow mult pass) -- ^ a -> b or a → b | HsLinearArrow !(XLinearArrow mult pass) -- ^ a %1 -> b or a %1 → b, or a ⊸ b | HsExplicitMult !(XExplicitMult mult pass) !mult -- ^ a %m -> b or a %m → b (very much including `a %Many -> b`! -- This is how the programmer wrote it). It is stored as an -- `HsType` so as to preserve the syntax as written in the -- program. | XArrow !(XXArrow mult pass) type family XUnrestrictedArrow mult p type family XLinearArrow mult p type family XExplicitMult mult p type family XXArrow mult p -- | This is used in the syntax. In constructor declaration. It must keep the -- arrow representation. data HsScaled pass a = HsScaled (HsArrow pass) a hsMult :: HsScaled pass a -> HsArrow pass hsMult (HsScaled m _) = m hsScaledThing :: HsScaled pass a -> a hsScaledThing (HsScaled _ t) = t {- Note [Unit tuples] ~~~~~~~~~~~~~~~~~~ Consider the type type instance F Int = () We want to parse that "()" as HsTupleTy HsBoxedOrConstraintTuple [], NOT as HsTyVar unitTyCon Why? Because F might have kind (* -> Constraint), so we when parsing we don't know if that tuple is going to be a constraint tuple or an ordinary unit tuple. The HsTupleSort flag is specifically designed to deal with that, but it has to work for unit tuples too. Note [Promotions (HsTyVar)] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ HsTyVar: A name in a type or kind. Here are the allowed namespaces for the name. In a type: Var: not allowed Data: promoted data constructor Tv: type variable TcCls before renamer: type constructor, class constructor, or promoted data constructor TcCls after renamer: type constructor or class constructor In a kind: Var, Data: not allowed Tv: kind variable TcCls: kind constructor or promoted type constructor The 'Promoted' field in an HsTyVar captures whether the type was promoted in the source code by prefixing an apostrophe. Note [HsStarTy] ~~~~~~~~~~~~~~~ When the StarIsType extension is enabled, we want to treat '*' and its Unicode variant identically to 'Data.Kind.Type'. Unfortunately, doing so in the parser would mean that when we pretty-print it back, we don't know whether the user wrote '*' or 'Type', and lose the parse/ppr roundtrip property. As a workaround, we parse '*' as HsStarTy (if it stands for 'Data.Kind.Type') and then desugar it to 'Data.Kind.Type' in the typechecker (see tcHsType). When '*' is a regular type operator (StarIsType is disabled), HsStarTy is not involved. Note [Promoted lists and tuples] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Notice the difference between HsListTy HsExplicitListTy HsTupleTy HsExplicitListTupleTy E.g. f :: [Int] HsListTy g3 :: T '[] All these use g2 :: T '[True] HsExplicitListTy g1 :: T '[True,False] g1a :: T [True,False] (can omit ' where unambiguous) kind of T :: [Bool] -> * This kind uses HsListTy! E.g. h :: (Int,Bool) HsTupleTy; f is a pair k :: S '(True,False) HsExplicitTypleTy; S is indexed by a type-level pair of booleans kind of S :: (Bool,Bool) -> * This kind uses HsExplicitTupleTy Note [Distinguishing tuple kinds] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Apart from promotion, tuples can have one of three different kinds: x :: (Int, Bool) -- Regular boxed tuples f :: Int# -> (# Int#, Int# #) -- Unboxed tuples g :: (Eq a, Ord a) => a -- Constraint tuples For convenience, internally we use a single constructor for all of these, namely HsTupleTy, but keep track of the tuple kind (in the first argument to HsTupleTy, a HsTupleSort). We can tell if a tuple is unboxed while parsing, because of the #. However, with -XConstraintKinds we can only distinguish between constraint and boxed tuples during type checking, in general. Hence the two constructors of HsTupleSort: HsUnboxedTuple -> Produced by the parser HsBoxedOrConstraintTuple -> Could be a boxed or a constraint tuple. Produced by the parser only, disappears after type checking After typechecking, we use TupleSort (which clearly distinguishes between constraint tuples and boxed tuples) rather than HsTupleSort. -} -- | Haskell Tuple Sort data HsTupleSort = HsUnboxedTuple | HsBoxedOrConstraintTuple deriving Data -- | Located Constructor Declaration Field type LConDeclField pass = XRec pass (ConDeclField pass) -- | Constructor Declaration Field data ConDeclField pass -- Record fields have Haddock docs on them = ConDeclField { cd_fld_ext :: XConDeclField pass, cd_fld_names :: [LFieldOcc pass], -- ^ See Note [ConDeclField pass] cd_fld_type :: LBangType pass, cd_fld_doc :: Maybe (LHsDoc pass)} | XConDeclField !(XXConDeclField pass) -- | Describes the arguments to a data constructor. This is a common -- representation for several constructor-related concepts, including: -- -- * The arguments in a Haskell98-style constructor declaration -- (see 'HsConDeclH98Details' in "GHC.Hs.Decls"). -- -- * The arguments in constructor patterns in @case@/function definitions -- (see 'HsConPatDetails' in "GHC.Hs.Pat"). -- -- * The left-hand side arguments in a pattern synonym binding -- (see 'HsPatSynDetails' in "GHC.Hs.Binds"). -- -- One notable exception is the arguments in a GADT constructor, which uses -- a separate data type entirely (see 'HsConDeclGADTDetails' in -- "GHC.Hs.Decls"). This is because GADT constructors cannot be declared with -- infix syntax, unlike the concepts above (#18844). data HsConDetails tyarg arg rec = PrefixCon [tyarg] [arg] -- C @t1 @t2 p1 p2 p3 | RecCon rec -- C { x = p1, y = p2 } | InfixCon arg arg -- p1 `C` p2 deriving Data -- | An empty list that can be used to indicate that there are no -- type arguments allowed in cases where HsConDetails is applied to Void. noTypeArgs :: [Void] noTypeArgs = [] {- Note [ConDeclField pass] ~~~~~~~~~~~~~~~~~~~~~~~~~ A ConDeclField contains a list of field occurrences: these always include the field label as the user wrote it. After the renamer, it will additionally contain the identity of the selector function in the second component. Due to DuplicateRecordFields, the OccName of the selector function may have been mangled, which is why we keep the original field label separately. For example, when DuplicateRecordFields is enabled data T = MkT { x :: Int } gives ConDeclField { cd_fld_names = [L _ (FieldOcc "x" $sel:x:MkT)], ... }. -} ----------------------- -- A valid type must have a for-all at the top of the type, or of the fn arg -- types --------------------- {- Note [Scoping of named wildcards] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider f :: _a -> _a f x = let g :: _a -> _a g = ... in ... Currently, for better or worse, the "_a" variables are all the same. So although there is no explicit forall, the "_a" scopes over the definition. I don't know if this is a good idea, but there it is. -} {- Note [hsScopedTvs and visible foralls] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ScopedTypeVariables can be defined in terms of a desugaring to TypeAbstractions (GHC Proposals #155 and #448): fn :: forall a b c. tau(a,b,c) fn :: forall a b c. tau(a,b,c) fn = defn(a,b,c) ==> fn @x @y @z = defn(x,y,z) That is, for every type variable of the leading `forall` in the type signature, we add an invisible binder at the term level. This model does not extend to visible forall. (Visible forall is the one written with an arrow instead of a dot, i.e. `forall a ->`. See GHC Proposal #281 and the RequiredTypeArguments extension). Here is an example that demonstrates the issue: vfn :: forall a b -> tau(a, b) vfn = case of (p,q) -> \x y -> ... The `a` and `b` cannot scope over the equations of `vfn`. In particular, `a` and `b` cannot be in scope in because those type variables are bound by the `\x y ->`. Our solution is simple: ScopedTypeVariables has no effect on visible forall. It follows naturally from the fact that ScopedTypeVariables is already subject to several restrictions: 1. The type signature must be headed by an /explicit/ forall * `f :: forall a. a -> blah` brings `a` into scope in the body * `f :: a -> blah` does not 2. The forall is /not nested/ * `f :: forall a b. blah` brings `a` and `b` into scope in the body * `f :: forall a. forall b. blah` brings `a` but not `b` into scope in the body With the introduction of visible forall, we also introduce a third condition: 3. The forall has to be /invisible/ * `f :: forall a b. blah` brings `a` and `b` into scope in the body * `f :: forall a b -> blah` does not For example: f1 :: forall a. a -> a f1 x = (x::a) -- OK: `a` is in scope in the body f2 :: forall a b. a -> b -> (a, b) f2 x y = (x::a, y::b) -- OK: both `a` and `b` are in scope in the body f3 :: forall a. forall b. a -> b -> (a, b) f3 x y = (x::a, y::b) -- Wrong: the `forall b.` is not the outermost forall f4 :: forall a -> a -> a f4 t (x::t) = (x::a) -- Wrong: the `forall a ->` does not bring `a` into scope This design choice is reflected in the definition of HsOuterSigTyVarBndrs, which are used in every place where ScopedTypeVariables takes effect: data HsOuterTyVarBndrs flag pass = HsOuterImplicit { ... } | HsOuterExplicit { ..., hso_bndrs :: [LHsTyVarBndr flag pass] } | ... type HsOuterSigTyVarBndrs = HsOuterTyVarBndrs Specificity The HsOuterExplicit constructor is only used in type signatures with outermost, /invisible/ 'forall's. Any other type—including those with outermost, /visible/ 'forall's—will use HsOuterImplicit. Therefore, when we determine which type variables to bring into scope over the body of a function (in hsScopedTvs), we /only/ bring the type variables bound by the hso_bndrs in an HsOuterExplicit into scope. If we have an HsOuterImplicit instead, then we do not bring any type variables into scope over the body of a function at all. -} {- ************************************************************************ * * Decomposing HsTypes * * ************************************************************************ -} -- | Arguments in an expression/type after splitting data HsArg p tm ty = HsValArg !(XValArg p) tm -- Argument is an ordinary expression (f arg) | HsTypeArg !(XTypeArg p) ty -- Argument is a visible type application (f @ty) | HsArgPar !(XArgPar p) -- See Note [HsArgPar] | XArg !(XXArg p) type family XValArg p type family XTypeArg p type family XArgPar p type family XXArg p -- type level equivalent type LHsTypeArg p = HsArg p (LHsType p) (LHsKind p) {- Note [HsArgPar] ~~~~~~~~~~~~~~~ A HsArgPar indicates that everything to the left of this in the argument list is enclosed in parentheses together with the function itself. It is necessary so that we can recreate the parenthesis structure in the original source after typechecking the arguments. The SrcSpan is the span of the original HsPar ((f arg1) arg2 arg3) results in an input argument list of [HsValArg arg1, HsArgPar span1, HsValArg arg2, HsValArg arg3, HsArgPar span2] -} {- ************************************************************************ * * FieldOcc * * ************************************************************************ -} -- | Located Field Occurrence type LFieldOcc pass = XRec pass (FieldOcc pass) -- | Field Occurrence -- -- Represents an *occurrence* of a field. This may or may not be a -- binding occurrence (e.g. this type is used in 'ConDeclField' and -- 'RecordPatSynField' which bind their fields, but also in -- 'HsRecField' for record construction and patterns, which do not). -- -- We store both the 'RdrName' the user originally wrote, and after -- the renamer we use the extension field to store the selector -- function. -- -- There is a wrinkle in that update field occurances are sometimes -- ambiguous during the rename stage. See note -- [Ambiguous FieldOcc in record updates] to see how we currently -- handle this. data FieldOcc pass = FieldOcc { foExt :: XCFieldOcc pass , foLabel :: LIdP pass } | XFieldOcc !(XXFieldOcc pass) deriving instance ( Eq (LIdP pass) , Eq (XCFieldOcc pass) , Eq (XXFieldOcc pass) ) => Eq (FieldOcc pass) {- ************************************************************************ * * \subsection{Pretty printing} * * ************************************************************************ -} ghc-lib-parser-9.12.2.20250421/compiler/Language/Haskell/Syntax/Type.hs-boot0000644000000000000000000000117707346545000024125 0ustar0000000000000000module Language.Haskell.Syntax.Type where import Data.Bool import Data.Eq import Data.Ord {- ************************************************************************ * * \subsection{Promotion flag} * * ************************************************************************ -} -- | Is a TyCon a promoted data constructor or just a normal type constructor? data PromotionFlag = NotPromoted | IsPromoted instance Eq PromotionFlag instance Ord PromotionFlag isPromoted :: PromotionFlag -> Bool ghc-lib-parser-9.12.2.20250421/compiler/MachRegs.h0000644000000000000000000001405607346545000017155 0ustar0000000000000000/* ----------------------------------------------------------------------------- * * (c) The GHC Team, 1998-2014 * * Registers used in STG code. Might or might not correspond to * actual machine registers. * * Do not #include this file directly: #include "Rts.h" instead. * * To understand the structure of the RTS headers, see the wiki: * https://gitlab.haskell.org/ghc/ghc/wikis/commentary/source-tree/includes * * ---------------------------------------------------------------------------*/ #pragma once /* This file is #included into Haskell code in the compiler: #defines * only in here please. */ /* * Undefine these as a precaution: some of them were found to be * defined by system headers on ARM/Linux. */ #undef REG_R1 #undef REG_R2 #undef REG_R3 #undef REG_R4 #undef REG_R5 #undef REG_R6 #undef REG_R7 #undef REG_R8 #undef REG_R9 #undef REG_R10 /* * Defining MACHREGS_NO_REGS to 1 causes no global registers to be used. * MACHREGS_NO_REGS is typically controlled by NO_REGS, which is * typically defined by GHC, via a command-line option passed to gcc, * when the -funregisterised flag is given. * * NB. When MACHREGS_NO_REGS to 1, calling & return conventions may be * different. For example, all function arguments will be passed on * the stack, and components of an unboxed tuple will be returned on * the stack rather than in registers. */ #if MACHREGS_NO_REGS == 1 /* Nothing */ #elif MACHREGS_NO_REGS == 0 /* ---------------------------------------------------------------------------- Note [Caller saves and callee-saves regs.] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Caller-saves regs have to be saved around C-calls made from STG land, so this file defines CALLER_SAVES_ for each that is designated caller-saves in that machine's C calling convention. NB: Caller-saved registers not mapped to a STG register don't require a CALLER_SAVES_ define. As it stands, the only registers that are ever marked caller saves are the RX, FX, DX, XMM and USER registers; as a result, if you decide to caller save a system register (e.g. SP, HP, etc), note that this code path is completely untested! -- EZY See Note [Register parameter passing] for details. -------------------------------------------------------------------------- */ /* Define STG <-> machine register mappings. */ #if defined(MACHREGS_i386) || defined(MACHREGS_x86_64) #include "MachRegs/x86.h" #elif defined(MACHREGS_powerpc) #include "MachRegs/ppc.h" #elif defined(MACHREGS_arm) #include "MachRegs/arm32.h" #elif defined(MACHREGS_aarch64) #include "MachRegs/arm64.h" #elif defined(MACHREGS_s390x) #include "MachRegs/s390x.h" #elif defined(MACHREGS_riscv64) #include "MachRegs/riscv64.h" #elif defined(MACHREGS_wasm32) #include "MachRegs/wasm32.h" #elif defined(MACHREGS_loongarch64) #include "MachRegs/loongarch64.h" #else #error Cannot find platform to give register info for #endif #else #error Bad MACHREGS_NO_REGS value #endif /* ----------------------------------------------------------------------------- * These constants define how many stg registers will be used for * passing arguments (and results, in the case of an unboxed-tuple * return). * * We usually set MAX_REAL_VANILLA_REG and co. to be the number of the * highest STG register to occupy a real machine register, otherwise * the calling conventions will needlessly shuffle data between the * stack and memory-resident STG registers. We might occasionally * set these macros to other values for testing, though. * * Registers above these values might still be used, for instance to * communicate with PrimOps and RTS functions. */ #if !defined(MAX_REAL_VANILLA_REG) # if defined(REG_R10) # define MAX_REAL_VANILLA_REG 10 # elif defined(REG_R9) # define MAX_REAL_VANILLA_REG 9 # elif defined(REG_R8) # define MAX_REAL_VANILLA_REG 8 # elif defined(REG_R7) # define MAX_REAL_VANILLA_REG 7 # elif defined(REG_R6) # define MAX_REAL_VANILLA_REG 6 # elif defined(REG_R5) # define MAX_REAL_VANILLA_REG 5 # elif defined(REG_R4) # define MAX_REAL_VANILLA_REG 4 # elif defined(REG_R3) # define MAX_REAL_VANILLA_REG 3 # elif defined(REG_R2) # define MAX_REAL_VANILLA_REG 2 # elif defined(REG_R1) # define MAX_REAL_VANILLA_REG 1 # else # define MAX_REAL_VANILLA_REG 0 # endif #endif #if !defined(MAX_REAL_FLOAT_REG) # if defined(REG_F7) # error Please manually define MAX_REAL_FLOAT_REG for this architecture # elif defined(REG_F6) # define MAX_REAL_FLOAT_REG 6 # elif defined(REG_F5) # define MAX_REAL_FLOAT_REG 5 # elif defined(REG_F4) # define MAX_REAL_FLOAT_REG 4 # elif defined(REG_F3) # define MAX_REAL_FLOAT_REG 3 # elif defined(REG_F2) # define MAX_REAL_FLOAT_REG 2 # elif defined(REG_F1) # define MAX_REAL_FLOAT_REG 1 # else # define MAX_REAL_FLOAT_REG 0 # endif #endif #if !defined(MAX_REAL_DOUBLE_REG) # if defined(REG_D7) # error Please manually define MAX_REAL_DOUBLE_REG for this architecture # elif defined(REG_D6) # define MAX_REAL_DOUBLE_REG 6 # elif defined(REG_D5) # define MAX_REAL_DOUBLE_REG 5 # elif defined(REG_D4) # define MAX_REAL_DOUBLE_REG 4 # elif defined(REG_D3) # define MAX_REAL_DOUBLE_REG 3 # elif defined(REG_D2) # define MAX_REAL_DOUBLE_REG 2 # elif defined(REG_D1) # define MAX_REAL_DOUBLE_REG 1 # else # define MAX_REAL_DOUBLE_REG 0 # endif #endif #if !defined(MAX_REAL_LONG_REG) # if defined(REG_L1) # define MAX_REAL_LONG_REG 1 # else # define MAX_REAL_LONG_REG 0 # endif #endif #if !defined(MAX_REAL_XMM_REG) # if defined(REG_XMM6) # define MAX_REAL_XMM_REG 6 # elif defined(REG_XMM5) # define MAX_REAL_XMM_REG 5 # elif defined(REG_XMM4) # define MAX_REAL_XMM_REG 4 # elif defined(REG_XMM3) # define MAX_REAL_XMM_REG 3 # elif defined(REG_XMM2) # define MAX_REAL_XMM_REG 2 # elif defined(REG_XMM1) # define MAX_REAL_XMM_REG 1 # else # define MAX_REAL_XMM_REG 0 # endif #endif /* define NO_ARG_REGS if we have no argument registers at all (we can * optimise certain code paths using this predicate). */ #if MAX_REAL_VANILLA_REG < 2 #define NO_ARG_REGS #else #undef NO_ARG_REGS #endif ghc-lib-parser-9.12.2.20250421/compiler/Unique.h0000644000000000000000000000024007346545000016720 0ustar0000000000000000/* unique has the following structure: * HsInt unique = * (unique_tag << (sizeof (HsInt) - UNIQUE_TAG_BITS)) | unique_number */ #define UNIQUE_TAG_BITS 8 ghc-lib-parser-9.12.2.20250421/compiler/cbits/0000755000000000000000000000000007346545000016411 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/compiler/cbits/cutils.c0000644000000000000000000000106507346545000020062 0ustar0000000000000000/* These utility routines are used various places in the GHC library. */ #include #include void ghc_lib_parser_enableTimingStats( void ) /* called from the driver */ { RtsFlags.GcFlags.giveStats = ONELINE_GC_STATS; } void ghc_lib_parser_setHeapSize( HsInt size ) { RtsFlags.GcFlags.heapSizeSuggestion = size / BLOCK_SIZE; if (RtsFlags.GcFlags.maxHeapSize != 0 && RtsFlags.GcFlags.heapSizeSuggestion > RtsFlags.GcFlags.maxHeapSize) { RtsFlags.GcFlags.maxHeapSize = RtsFlags.GcFlags.heapSizeSuggestion; } } ghc-lib-parser-9.12.2.20250421/compiler/cbits/genSym.c0000644000000000000000000000123607346545000020021 0ustar0000000000000000#include #include #include "Unique.h" #include // These global variables have been moved into the RTS. It allows them to be // shared with plugins even if two different instances of the GHC library are // loaded at the same time (#19940) // // The CPP is thus about the RTS version GHC is linked against, and not the // version of the GHC being built. #if !MIN_VERSION_GLASGOW_HASKELL(9,6,7,0) HsWord64 ghc_unique_counter64 = 0; #elif MIN_VERSION_GLASGOW_HASKELL(9,8,0,0) && !MIN_VERSION_GLASGOW_HASKELL(9,8,4,0) HsWord64 ghc_unique_counter64 = 0; #endif #if !MIN_VERSION_GLASGOW_HASKELL(9,3,0,0) HsInt ghc_unique_inc = 1; #endif ghc-lib-parser-9.12.2.20250421/compiler/cbits/keepCAFsForGHCi.c0000644000000000000000000000250507346545000021342 0ustar0000000000000000#include #include // Note [ghc_lib_parser_keepCAFsForGHCi] // ~~~~~~~~~~~~~~~~~~~~~~ // This file is only included in the dynamic library. // It contains an __attribute__((constructor)) function (run prior to main()) // which sets the keepCAFs flag in the RTS, before any Haskell code is run. // This is required so that GHCi can use dynamic libraries instead of HSxyz.o // files. // // For static builds we have to guarantee that the linker loads this object file // to ensure the constructor gets run and not discarded. If the object is part of // an archive and not otherwise referenced the linker would ignore the object. // To avoid this: // * When initializing a GHC session in initGhcMonad we assert keeping cafs has been // enabled by calling ghc_lib_parser_keepCAFsForGHCi. // * This causes the GHC module from the ghc package to carry a reference to this object // file. // * Which in turn ensures the linker doesn't discard this object file, causing // the constructor to be run, allowing the assertion to succeed in the first place // as keepCAFs will have been set already during initialization of constructors. bool ghc_lib_parser_keepCAFsForGHCi(void) __attribute__((constructor)); bool ghc_lib_parser_keepCAFsForGHCi(void) { bool was_set = keepCAFs; setKeepCAFs(); return was_set; } ghc-lib-parser-9.12.2.20250421/compiler/ghc.cabal0000644000000000000000000007004207346545000017035 0ustar0000000000000000Cabal-Version: 2.2 -- WARNING: ghc.cabal is automatically generated from ghc.cabal.in by -- ./configure. Make sure you are editing ghc.cabal.in, not ghc.cabal. Name: ghc Version: 9.12.2 License: BSD-3-Clause License-File: LICENSE Author: The GHC Team Maintainer: glasgow-haskell-users@haskell.org Homepage: http://www.haskell.org/ghc/ Synopsis: The GHC API Description: GHC's functionality can be useful for more things than just compiling Haskell programs. Important use cases are programs that analyse (and perhaps transform) Haskell code. Others include loading Haskell code dynamically in a GHCi-like manner. For this reason, a lot of GHC's functionality is made available through this package. . See for more information. . __This package is not PVP-compliant.__ . This package directly exposes GHC internals, which can and do change with every release. Category: Development Build-Type: Custom extra-source-files: GHC/Builtin/primops.txt.pp Unique.h CodeGen.Platform.h -- Shared with rts via hard-link at configure time. This is safer -- for Windows, where symlinks don't work out of the box, so we -- can't just commit some in git. Bytecodes.h ClosureTypes.h FunTypes.h MachRegs.h MachRegs/arm32.h MachRegs/arm64.h MachRegs/loongarch64.h MachRegs/ppc.h MachRegs/riscv64.h MachRegs/s390x.h MachRegs/wasm32.h MachRegs/x86.h custom-setup setup-depends: base >= 3 && < 5, Cabal >= 1.6 && <3.10, directory, process, filepath, containers Flag internal-interpreter Description: Build with internal interpreter support. Default: False Manual: True Flag dynamic-system-linker Description: The system can load dynamic code. This is not the case for musl. Default: True Manual: True -- hadrian disables this flag because the user may be building from a source -- distribution where the parser has already been generated. Flag build-tool-depends Description: Use build-tool-depends Default: True Flag with-libzstd Default: False Manual: True Flag static-libzstd Default: False Manual: True -- While the boot compiler fixes ghc's unit-id to `ghc`, the stage0 compiler must still be compiled with `-this-unit-id ghc` Flag hadrian-stage0 Description: Enable if compiling the stage0 compiler with hadrian Default: False Manual: True Flag bootstrap Description: Enabled when building the stage1 compiler in order to vendor the in-tree `ghc-boot-th` library, and through that the in-tree TH AST defintions from `ghc-internal`. See Note [Bootstrapping Template Haskell] Default: False Manual: True Library Default-Language: GHC2021 Exposed: False Includes: Unique.h -- CodeGen.Platform.h -- invalid as C, skip -- shared with rts via symlink Bytecodes.h ClosureTypes.h FunTypes.h if flag(build-tool-depends) build-tool-depends: alex:alex >= 3.2.6, happy:happy >= 1.20.0, genprimopcode:genprimopcode, deriveConstants:deriveConstants if flag(with-libzstd) if flag(static-libzstd) if os(darwin) buildable: False else extra-libraries: :libzstd.a else extra-libraries: zstd CPP-Options: -DHAVE_LIBZSTD Build-Depends: base >= 4.11 && < 4.22, deepseq >= 1.4 && < 1.6, directory >= 1 && < 1.4, process >= 1 && < 1.7, bytestring >= 0.11 && < 0.13, binary == 0.8.*, time >= 1.4 && < 1.15, containers >= 0.6.2.1 && < 0.8, array >= 0.1 && < 0.6, filepath >= 1.5 && < 1.6, os-string >= 2.0.1 && < 2.1, hpc >= 0.6 && < 0.8, transformers >= 0.5 && < 0.7, exceptions == 0.10.*, semaphore-compat, stm, ghc-boot == 9.12.2, ghc-heap == 9.12.2, ghci == 9.12.2 if flag(bootstrap) Build-Depends: ghc-boot-th-next == 9.12.2 else Build-Depends: ghc-boot-th == 9.12.2 if os(windows) Build-Depends: Win32 >= 2.3 && < 2.15 else Build-Depends: unix >= 2.7 && < 2.9 GHC-Options: -Wall -Wno-name-shadowing -Wnoncanonical-monad-instances -Wnoncanonical-monoid-instances if flag(internal-interpreter) CPP-Options: -DHAVE_INTERNAL_INTERPRETER -- if no dynamic system linker is available, don't try DLLs. if flag(dynamic-system-linker) CPP-Options: -DCAN_LOAD_DLL Other-Extensions: CPP DataKinds DeriveDataTypeable DeriveFoldable DeriveFunctor DeriveTraversable DisambiguateRecordFields ExplicitForAll FlexibleContexts FlexibleInstances GADTs GeneralizedNewtypeDeriving MagicHash MultiParamTypeClasses NamedFieldPuns NondecreasingIndentation RankNTypes RecordWildCards StandaloneDeriving Trustworthy TupleSections TypeFamilies TypeSynonymInstances UnboxedTuples UndecidableInstances Include-Dirs: . if flag(hadrian-stage0) -- We need to set the unit id to ghc (without a version number) -- as it's magic. GHC-Options: -this-unit-id ghc if arch(javascript) js-sources: jsbits/genSym.js else c-sources: cbits/cutils.c cbits/genSym.c cbits/keepCAFsForGHCi.c hs-source-dirs: . -- we use an explicit Prelude Default-Extensions: NoImplicitPrelude ,MonoLocalBinds Exposed-Modules: GHC GHC.Builtin.Names GHC.Builtin.Names.TH GHC.Builtin.PrimOps GHC.Builtin.PrimOps.Casts GHC.Builtin.PrimOps.Ids GHC.Builtin.Types GHC.Builtin.Types.Literals GHC.Builtin.Types.Prim GHC.Builtin.Uniques GHC.Builtin.Utils GHC.ByteCode.Asm GHC.ByteCode.InfoTable GHC.ByteCode.Instr GHC.ByteCode.Linker GHC.ByteCode.Types GHC.Cmm GHC.Cmm.BlockId GHC.Cmm.CallConv GHC.Cmm.CLabel GHC.Cmm.CommonBlockElim GHC.Cmm.Config GHC.Cmm.ContFlowOpt GHC.Cmm.Dataflow GHC.Cmm.Dataflow.Block GHC.Cmm.Dataflow.Graph GHC.Cmm.Dataflow.Label GHC.Cmm.DebugBlock GHC.Cmm.Expr GHC.Cmm.GenericOpt GHC.Cmm.Graph GHC.Cmm.Info GHC.Cmm.Info.Build GHC.Cmm.InitFini GHC.Cmm.LayoutStack GHC.Cmm.Lexer GHC.Cmm.Lint GHC.Cmm.Liveness GHC.Cmm.MachOp GHC.Cmm.Node GHC.Cmm.Opt GHC.Cmm.Parser GHC.Cmm.Parser.Config GHC.Cmm.Parser.Monad GHC.Cmm.Pipeline GHC.Cmm.ProcPoint GHC.Cmm.Reg GHC.Cmm.Sink GHC.Cmm.Switch GHC.Cmm.Switch.Implement GHC.Cmm.ThreadSanitizer GHC.Cmm.UniqueRenamer GHC.CmmToAsm GHC.Cmm.LRegSet GHC.CmmToAsm.AArch64 GHC.CmmToAsm.AArch64.CodeGen GHC.CmmToAsm.AArch64.Cond GHC.CmmToAsm.AArch64.Instr GHC.CmmToAsm.AArch64.Ppr GHC.CmmToAsm.AArch64.RegInfo GHC.CmmToAsm.AArch64.Regs GHC.CmmToAsm.BlockLayout GHC.CmmToAsm.CFG GHC.CmmToAsm.CFG.Dominators GHC.CmmToAsm.CFG.Weight GHC.CmmToAsm.Config GHC.CmmToAsm.CPrim GHC.CmmToAsm.Dwarf GHC.CmmToAsm.Dwarf.Constants GHC.CmmToAsm.Dwarf.Types GHC.CmmToAsm.Format GHC.CmmToAsm.Instr GHC.CmmToAsm.Monad GHC.CmmToAsm.PIC GHC.CmmToAsm.PPC GHC.CmmToAsm.PPC.CodeGen GHC.CmmToAsm.PPC.Cond GHC.CmmToAsm.PPC.Instr GHC.CmmToAsm.PPC.Ppr GHC.CmmToAsm.PPC.RegInfo GHC.CmmToAsm.PPC.Regs GHC.CmmToAsm.Ppr GHC.CmmToAsm.Reg.Graph GHC.CmmToAsm.Reg.Graph.Base GHC.CmmToAsm.Reg.Graph.Coalesce GHC.CmmToAsm.Reg.Graph.Spill GHC.CmmToAsm.Reg.Graph.SpillClean GHC.CmmToAsm.Reg.Graph.SpillCost GHC.CmmToAsm.Reg.Graph.Stats GHC.CmmToAsm.Reg.Graph.TrivColorable GHC.CmmToAsm.Reg.Graph.X86 GHC.CmmToAsm.Reg.Linear GHC.CmmToAsm.Reg.Linear.AArch64 GHC.CmmToAsm.Reg.Linear.Base GHC.CmmToAsm.Reg.Linear.FreeRegs GHC.CmmToAsm.Reg.Linear.JoinToTargets GHC.CmmToAsm.Reg.Linear.PPC GHC.CmmToAsm.Reg.Linear.RV64 GHC.CmmToAsm.Reg.Linear.StackMap GHC.CmmToAsm.Reg.Linear.State GHC.CmmToAsm.Reg.Linear.Stats GHC.CmmToAsm.Reg.Linear.X86 GHC.CmmToAsm.Reg.Linear.X86_64 GHC.CmmToAsm.Reg.Liveness GHC.CmmToAsm.Reg.Target GHC.CmmToAsm.Reg.Utils GHC.CmmToAsm.RV64 GHC.CmmToAsm.RV64.CodeGen GHC.CmmToAsm.RV64.Cond GHC.CmmToAsm.RV64.Instr GHC.CmmToAsm.RV64.Ppr GHC.CmmToAsm.RV64.RegInfo GHC.CmmToAsm.RV64.Regs GHC.CmmToAsm.Types GHC.CmmToAsm.Utils GHC.CmmToAsm.X86 GHC.CmmToAsm.X86.CodeGen GHC.CmmToAsm.X86.Cond GHC.CmmToAsm.X86.Instr GHC.CmmToAsm.X86.Ppr GHC.CmmToAsm.X86.RegInfo GHC.CmmToAsm.X86.Regs GHC.CmmToC GHC.CmmToLlvm GHC.CmmToLlvm.Base GHC.CmmToLlvm.CodeGen GHC.CmmToLlvm.Config GHC.CmmToLlvm.Data GHC.CmmToLlvm.Mangler GHC.CmmToLlvm.Ppr GHC.CmmToLlvm.Regs GHC.CmmToLlvm.Version GHC.CmmToLlvm.Version.Bounds GHC.CmmToLlvm.Version.Type GHC.Cmm.Dominators GHC.Cmm.Reducibility GHC.Cmm.Type GHC.Cmm.Utils GHC.Core GHC.Core.Class GHC.Core.Coercion GHC.Core.Coercion.Axiom GHC.Core.Coercion.Opt GHC.Core.ConLike GHC.Core.DataCon GHC.Core.FamInstEnv GHC.Core.FVs GHC.Core.InstEnv GHC.Core.Lint GHC.Core.Lint.Interactive GHC.Core.LateCC GHC.Core.LateCC.Types GHC.Core.LateCC.TopLevelBinds GHC.Core.LateCC.Utils GHC.Core.LateCC.OverloadedCalls GHC.Core.Make GHC.Core.Map.Expr GHC.Core.Map.Type GHC.Core.Multiplicity GHC.Core.Opt.Arity GHC.Core.Opt.CallArity GHC.Core.Opt.CallerCC GHC.Core.Opt.CallerCC.Types GHC.Core.Opt.ConstantFold GHC.Core.Opt.CprAnal GHC.Core.Opt.CSE GHC.Core.Opt.DmdAnal GHC.Core.Opt.Exitify GHC.Core.Opt.FloatIn GHC.Core.Opt.FloatOut GHC.Core.Opt.LiberateCase GHC.Core.Opt.Monad GHC.Core.Opt.OccurAnal GHC.Core.Opt.Pipeline GHC.Core.Opt.Pipeline.Types GHC.Core.Opt.SetLevels GHC.Core.Opt.Simplify GHC.Core.Opt.Simplify.Env GHC.Core.Opt.Simplify.Inline GHC.Core.Opt.Simplify.Iteration GHC.Core.Opt.Simplify.Monad GHC.Core.Opt.Simplify.Utils GHC.Core.Opt.SpecConstr GHC.Core.Opt.Specialise GHC.Core.Opt.StaticArgs GHC.Core.Opt.Stats GHC.Core.Opt.WorkWrap GHC.Core.Opt.WorkWrap.Utils GHC.Core.PatSyn GHC.Core.Ppr GHC.Types.TyThing.Ppr GHC.Core.Predicate GHC.Core.Reduction GHC.Core.Rules GHC.Core.Rules.Config GHC.Core.Seq GHC.Core.SimpleOpt GHC.Core.Stats GHC.Core.Subst GHC.Core.Tidy GHC.CoreToIface GHC.CoreToStg GHC.CoreToStg.Prep GHC.Core.TyCo.FVs GHC.Core.TyCo.Compare GHC.Core.TyCon GHC.Core.TyCon.Env GHC.Core.TyCon.RecWalk GHC.Core.TyCon.Set GHC.Core.TyCo.Ppr GHC.Core.TyCo.Rep GHC.Core.TyCo.Subst GHC.Core.TyCo.Tidy GHC.Core.Type GHC.Core.RoughMap GHC.Core.Unfold GHC.Core.Unfold.Make GHC.Core.Unify GHC.Core.UsageEnv GHC.Core.Utils GHC.Data.Bag GHC.Data.Bitmap GHC.Data.Bool GHC.Data.BooleanFormula GHC.Data.EnumSet GHC.Data.FastMutInt GHC.Data.FastString GHC.Data.FastString.Env GHC.Data.FiniteMap GHC.Data.FlatBag GHC.Data.Graph.Base GHC.Data.Graph.Color GHC.Data.Graph.Collapse GHC.Data.Graph.Directed GHC.Data.Graph.Inductive.Graph GHC.Data.Graph.Inductive.PatriciaTree GHC.Data.Graph.Ops GHC.Data.Graph.Ppr GHC.Data.Graph.UnVar GHC.Data.IOEnv GHC.Data.List.Infinite GHC.Data.List.SetOps GHC.Data.Maybe GHC.Data.OrdList GHC.Data.OsPath GHC.Data.Pair GHC.Data.SmallArray GHC.Data.Stream GHC.Data.Strict GHC.Data.StringBuffer GHC.Data.TrieMap GHC.Data.Unboxed GHC.Data.UnionFind GHC.Data.Word64Set GHC.Data.Word64Set.Internal GHC.Data.Word64Map GHC.Data.Word64Map.Internal GHC.Data.Word64Map.Lazy GHC.Data.Word64Map.Strict GHC.Data.Word64Map.Strict.Internal GHC.Driver.Backend GHC.Driver.Backend.Internal GHC.Driver.Backpack GHC.Driver.Backpack.Syntax GHC.Driver.CmdLine GHC.Driver.CodeOutput GHC.Driver.Config GHC.Driver.Config.Cmm GHC.Driver.Config.Cmm.Parser GHC.Driver.Config.CmmToAsm GHC.Driver.Config.CmmToLlvm GHC.Driver.Config.Core.Lint GHC.Driver.Config.Core.Lint.Interactive GHC.Driver.Config.Core.Opt.Arity GHC.Driver.Config.Core.Opt.LiberateCase GHC.Driver.Config.Core.Opt.Simplify GHC.Driver.Config.Core.Opt.WorkWrap GHC.Driver.Config.Core.Rules GHC.Driver.Config.CoreToStg GHC.Driver.Config.CoreToStg.Prep GHC.Driver.Config.Diagnostic GHC.Driver.Config.Finder GHC.Driver.Config.HsToCore GHC.Driver.Config.HsToCore.Ticks GHC.Driver.Config.HsToCore.Usage GHC.Driver.Config.Linker GHC.Driver.Config.Logger GHC.Driver.Config.Parser GHC.Driver.Config.Stg.Debug GHC.Driver.Config.Stg.Lift GHC.Driver.Config.Stg.Pipeline GHC.Driver.Config.Stg.Ppr GHC.Driver.Config.StgToCmm GHC.Driver.Config.Tidy GHC.Driver.Config.StgToJS GHC.Driver.DynFlags GHC.Driver.Env GHC.Driver.Env.KnotVars GHC.Driver.Env.Types GHC.Driver.Errors GHC.Driver.Errors.Ppr GHC.Driver.Errors.Types GHC.Driver.Flags GHC.Driver.GenerateCgIPEStub GHC.Driver.Hooks GHC.Driver.LlvmConfigCache GHC.Driver.MakeSem GHC.Driver.Main GHC.Driver.Make GHC.Driver.MakeFile GHC.Driver.Monad GHC.Driver.Phases GHC.Driver.Pipeline GHC.Driver.Pipeline.Execute GHC.Driver.Pipeline.LogQueue GHC.Driver.Pipeline.Phases GHC.Driver.Pipeline.Monad GHC.Driver.Plugins GHC.Driver.Plugins.External GHC.Driver.Ppr GHC.Driver.Session GHC.Hs GHC.Hs.Basic GHC.Hs.Binds GHC.Hs.Decls GHC.Hs.Doc GHC.Hs.DocString GHC.Hs.Dump GHC.Hs.Expr GHC.Hs.Syn.Type GHC.Hs.Extension GHC.Hs.ImpExp GHC.Hs.Instances GHC.Hs.Lit GHC.Hs.Pat GHC.Hs.Specificity GHC.Hs.Stats GHC.HsToCore GHC.HsToCore.Arrows GHC.HsToCore.Binds GHC.HsToCore.Breakpoints GHC.HsToCore.Coverage GHC.HsToCore.Docs GHC.HsToCore.Errors.Ppr GHC.HsToCore.Errors.Types GHC.HsToCore.Expr GHC.HsToCore.Foreign.C GHC.HsToCore.Foreign.Call GHC.HsToCore.Foreign.Decl GHC.HsToCore.Foreign.JavaScript GHC.HsToCore.Foreign.Prim GHC.HsToCore.Foreign.Utils GHC.HsToCore.Foreign.Wasm GHC.HsToCore.GuardedRHSs GHC.HsToCore.ListComp GHC.HsToCore.Match GHC.HsToCore.Match.Constructor GHC.HsToCore.Match.Literal GHC.HsToCore.Monad GHC.HsToCore.Pmc GHC.HsToCore.Pmc.Check GHC.HsToCore.Pmc.Desugar GHC.HsToCore.Pmc.Ppr GHC.HsToCore.Pmc.Solver GHC.HsToCore.Pmc.Solver.Types GHC.HsToCore.Pmc.Types GHC.HsToCore.Pmc.Utils GHC.HsToCore.Quote GHC.HsToCore.Ticks GHC.HsToCore.Types GHC.HsToCore.Usage GHC.HsToCore.Utils GHC.Hs.Type GHC.Hs.Utils GHC.Iface.Binary GHC.Iface.Decl GHC.Iface.Env GHC.Iface.Errors GHC.Iface.Errors.Types GHC.Iface.Errors.Ppr GHC.Iface.Ext.Ast GHC.Iface.Ext.Binary GHC.Iface.Ext.Debug GHC.Iface.Ext.Fields GHC.Iface.Ext.Types GHC.Iface.Ext.Utils GHC.Iface.Load GHC.Iface.Make GHC.Iface.Recomp GHC.Iface.Recomp.Binary GHC.Iface.Recomp.Flags GHC.Iface.Rename GHC.Iface.Syntax GHC.Iface.Tidy GHC.Iface.Tidy.StaticPtrTable GHC.Iface.Warnings GHC.IfaceToCore GHC.Iface.Type GHC.JS.Ident GHC.JS.Make GHC.JS.Optimizer GHC.JS.Opt.Expr GHC.JS.Opt.Simple GHC.JS.Ppr GHC.JS.Syntax GHC.JS.JStg.Syntax GHC.JS.JStg.Monad GHC.JS.Transform GHC.Linker.Config GHC.Linker.Deps GHC.Linker.Dynamic GHC.Linker.External GHC.Linker.ExtraObj GHC.Linker.Loader GHC.Linker.MacOS GHC.Linker.Static GHC.Linker.Static.Utils GHC.Linker.Types GHC.Linker.Unit GHC.Linker.Windows GHC.Llvm GHC.Llvm.MetaData GHC.Llvm.Ppr GHC.Llvm.Syntax GHC.Llvm.Types GHC.Parser GHC.Parser.Annotation GHC.Parser.CharClass GHC.Parser.Errors.Basic GHC.Parser.Errors.Ppr GHC.Parser.Errors.Types GHC.Parser.Header GHC.Parser.Lexer GHC.Parser.HaddockLex GHC.Parser.PostProcess GHC.Parser.PostProcess.Haddock GHC.Parser.String GHC.Parser.Types GHC.Parser.Utils GHC.Platform GHC.Platform.ARM GHC.Platform.AArch64 GHC.Platform.Constants GHC.Platform.LoongArch64 GHC.Platform.NoRegs GHC.Platform.PPC GHC.Platform.Profile GHC.Platform.Reg GHC.Platform.Reg.Class GHC.Platform.Reg.Class.NoVectors GHC.Platform.Reg.Class.Separate GHC.Platform.Reg.Class.Unified GHC.Platform.Regs GHC.Platform.RISCV64 GHC.Platform.S390X GHC.Platform.Wasm32 GHC.Platform.Ways GHC.Platform.X86 GHC.Platform.X86_64 GHC.Plugins GHC.Prelude GHC.Prelude.Basic GHC.Rename.Bind GHC.Rename.Doc GHC.Rename.Env GHC.Rename.Expr GHC.Rename.Fixity GHC.Rename.HsType GHC.Rename.Module GHC.Rename.Names GHC.Rename.Pat GHC.Rename.Splice GHC.Rename.Unbound GHC.Rename.Utils GHC.Runtime.Context GHC.Runtime.Debugger GHC.Runtime.Eval GHC.Runtime.Eval.Types GHC.Runtime.Heap.Inspect GHC.Runtime.Heap.Layout GHC.Runtime.Interpreter GHC.Runtime.Interpreter.JS GHC.Runtime.Interpreter.Process GHC.Runtime.Interpreter.Types GHC.Runtime.Interpreter.Wasm GHC.Runtime.Loader GHC.Runtime.Utils GHC.Settings GHC.Settings.Config GHC.Settings.Constants GHC.Settings.IO GHC.Stg.BcPrep GHC.Stg.CSE GHC.Stg.Debug GHC.Stg.FVs GHC.Stg.Lift GHC.Stg.Lift.Analysis GHC.Stg.Lift.Config GHC.Stg.Lift.Monad GHC.Stg.Lift.Types GHC.Stg.Lint GHC.Stg.InferTags GHC.Stg.InferTags.Rewrite GHC.Stg.InferTags.TagSig GHC.Stg.InferTags.Types GHC.Stg.Make GHC.Stg.Pipeline GHC.Stg.Stats GHC.Stg.Subst GHC.Stg.Syntax GHC.Stg.Utils GHC.StgToByteCode GHC.StgToCmm GHC.StgToCmm.ArgRep GHC.StgToCmm.Bind GHC.StgToCmm.CgUtils GHC.StgToCmm.Closure GHC.StgToCmm.Config GHC.StgToCmm.DataCon GHC.StgToCmm.Env GHC.StgToCmm.Expr GHC.StgToCmm.ExtCode GHC.StgToCmm.Foreign GHC.StgToCmm.Heap GHC.StgToCmm.Hpc GHC.StgToCmm.InfoTableProv GHC.StgToCmm.Layout GHC.StgToCmm.Lit GHC.StgToCmm.Monad GHC.StgToCmm.Prim GHC.StgToCmm.Prof GHC.StgToCmm.Sequel GHC.StgToCmm.TagCheck GHC.StgToCmm.Ticky GHC.StgToCmm.Types GHC.StgToCmm.Utils GHC.StgToJS GHC.StgToJS.Apply GHC.StgToJS.Arg GHC.StgToJS.Closure GHC.StgToJS.CodeGen GHC.StgToJS.DataCon GHC.StgToJS.Deps GHC.StgToJS.Expr GHC.StgToJS.ExprCtx GHC.StgToJS.FFI GHC.StgToJS.Heap GHC.StgToJS.Ids GHC.StgToJS.Literal GHC.StgToJS.Monad GHC.StgToJS.Object GHC.StgToJS.Prim GHC.StgToJS.Profiling GHC.StgToJS.Regs GHC.StgToJS.Rts.Types GHC.StgToJS.Rts.Rts GHC.StgToJS.Sinker GHC.StgToJS.Stack GHC.StgToJS.StaticPtr GHC.StgToJS.Symbols GHC.StgToJS.Types GHC.StgToJS.Utils GHC.StgToJS.Linker.Linker GHC.StgToJS.Linker.Types GHC.StgToJS.Linker.Utils GHC.StgToJS.Linker.Opt GHC.Stg.Unarise GHC.SysTools GHC.SysTools.Ar GHC.SysTools.BaseDir GHC.SysTools.Cpp GHC.SysTools.Elf GHC.SysTools.Process GHC.SysTools.Tasks GHC.SysTools.Terminal GHC.Tc.Deriv GHC.Tc.Deriv.Functor GHC.Tc.Deriv.Generate GHC.Tc.Deriv.Generics GHC.Tc.Deriv.Infer GHC.Tc.Deriv.Utils GHC.Tc.Errors GHC.Tc.Errors.Hole GHC.Tc.Errors.Hole.FitTypes GHC.Tc.Errors.Hole.Plugin GHC.Tc.Errors.Ppr GHC.Tc.Errors.Types GHC.Tc.Errors.Types.PromotionErr GHC.Tc.Gen.Annotation GHC.Tc.Gen.App GHC.Tc.Gen.Arrow GHC.Tc.Gen.Bind GHC.Tc.Gen.Default GHC.Tc.Gen.Do GHC.Tc.Gen.Export GHC.Tc.Gen.Expr GHC.Tc.Gen.Foreign GHC.Tc.Gen.Head GHC.Tc.Gen.HsType GHC.Tc.Gen.Match GHC.Tc.Gen.Pat GHC.Tc.Gen.Rule GHC.Tc.Gen.Sig GHC.Tc.Gen.Splice GHC.Tc.Instance.Class GHC.Tc.Instance.Family GHC.Tc.Instance.FunDeps GHC.Tc.Instance.Typeable GHC.Tc.Module GHC.Tc.Plugin GHC.Tc.Solver GHC.Tc.Solver.Rewrite GHC.Tc.Solver.InertSet GHC.Tc.Solver.Solve GHC.Tc.Solver.Irred GHC.Tc.Solver.Equality GHC.Tc.Solver.Dict GHC.Tc.Solver.Monad GHC.Tc.Solver.Types GHC.Tc.TyCl GHC.Tc.TyCl.Build GHC.Tc.TyCl.Class GHC.Tc.TyCl.Instance GHC.Tc.TyCl.PatSyn GHC.Tc.TyCl.Utils GHC.Tc.Types GHC.Tc.Types.Constraint GHC.Tc.Types.Evidence GHC.Tc.Types.EvTerm GHC.Tc.Types.Origin GHC.Tc.Types.Rank GHC.Tc.Types.CtLoc GHC.Tc.Types.ErrCtxt GHC.Tc.Types.LclEnv GHC.Tc.Types.TH GHC.Tc.Types.TcRef GHC.Tc.Types.BasicTypes GHC.Tc.Utils.Backpack GHC.Tc.Utils.Concrete GHC.Tc.Utils.Env GHC.Tc.Utils.Instantiate GHC.Tc.Utils.Monad GHC.Tc.Utils.TcMType GHC.Tc.Utils.TcType GHC.Tc.Utils.Unify GHC.Tc.Validity GHC.Tc.Zonk.Env GHC.Tc.Zonk.Monad GHC.Tc.Zonk.TcType GHC.Tc.Zonk.Type GHC.ThToHs GHC.Types.Annotations GHC.Types.Avail GHC.Types.Basic GHC.Types.Breakpoint GHC.Types.CompleteMatch GHC.Types.CostCentre GHC.Types.CostCentre.State GHC.Types.Cpr GHC.Types.DefaultEnv GHC.Types.Demand GHC.Types.Error GHC.Types.Error.Codes GHC.Types.FieldLabel GHC.Types.Fixity GHC.Types.Fixity.Env GHC.Types.ForeignCall GHC.Types.ForeignStubs GHC.Types.GREInfo GHC.Types.Hint GHC.Types.Hint.Ppr GHC.Types.HpcInfo GHC.Types.Id GHC.Types.IPE GHC.Types.Id.Info GHC.Types.Id.Make GHC.Types.Literal GHC.Types.Meta GHC.Types.Name GHC.Types.Name.Cache GHC.Types.Name.Env GHC.Types.Name.Occurrence GHC.Types.Name.Reader GHC.Types.Name.Set GHC.Types.Name.Shape GHC.Types.Name.Ppr GHC.Types.PkgQual GHC.Types.ProfAuto GHC.Types.RepType GHC.Types.SafeHaskell GHC.Types.SaneDouble GHC.Types.SourceError GHC.Types.SourceFile GHC.Types.SourceText GHC.Types.SptEntry GHC.Types.SrcLoc GHC.Types.Target GHC.Types.Tickish GHC.Types.TypeEnv GHC.Types.TyThing GHC.Types.Unique GHC.Types.Unique.DFM GHC.Types.Unique.DSet GHC.Types.Unique.DSM GHC.Types.Unique.FM GHC.Types.Unique.Map GHC.Types.Unique.MemoFun GHC.Types.Unique.SDFM GHC.Types.Unique.Set GHC.Types.Unique.Supply GHC.Types.Var GHC.Types.Var.Env GHC.Types.Var.Set GHC.Unit GHC.Unit.Env GHC.Unit.External GHC.Unit.Finder GHC.Unit.Finder.Types GHC.Unit.Home GHC.Unit.Home.ModInfo GHC.Unit.Info GHC.Unit.Module GHC.Unit.Module.Deps GHC.Unit.Module.Env GHC.Unit.Module.Graph GHC.Unit.Module.Imported GHC.Unit.Module.Location GHC.Unit.Module.ModDetails GHC.Unit.Module.ModGuts GHC.Unit.Module.ModIface GHC.Unit.Module.WholeCoreBindings GHC.Unit.Module.ModSummary GHC.Unit.Module.Status GHC.Unit.Module.Warnings GHC.Unit.Parser GHC.Unit.Ppr GHC.Unit.State GHC.Unit.Types GHC.Utils.Asm GHC.Utils.Binary GHC.Utils.Binary.Typeable GHC.Utils.BufHandle GHC.Utils.CliOption GHC.Utils.Constants GHC.Utils.Containers.Internal.BitUtil GHC.Utils.Containers.Internal.StrictPair GHC.Utils.Error GHC.Utils.Exception GHC.Utils.Fingerprint GHC.Utils.FV GHC.Utils.GlobalVars GHC.Utils.IO.Unsafe GHC.Utils.Json GHC.Utils.Lexeme GHC.Utils.Logger GHC.Utils.Misc GHC.Utils.Monad GHC.Utils.Monad.Codensity GHC.Utils.Monad.State.Strict GHC.Utils.Outputable GHC.Utils.Panic GHC.Utils.Panic.Plain GHC.Utils.Ppr GHC.Utils.Ppr.Colour GHC.Utils.TmpFs GHC.Utils.Touch GHC.Utils.Trace GHC.Utils.Unique GHC.Utils.Word64 GHC.Wasm.ControlFlow GHC.Wasm.ControlFlow.FromCmm GHC.CmmToAsm.Wasm GHC.CmmToAsm.Wasm.Asm GHC.CmmToAsm.Wasm.FromCmm GHC.CmmToAsm.Wasm.Types GHC.CmmToAsm.Wasm.Utils Language.Haskell.Syntax Language.Haskell.Syntax.Basic Language.Haskell.Syntax.Binds Language.Haskell.Syntax.Decls Language.Haskell.Syntax.Expr Language.Haskell.Syntax.Extension Language.Haskell.Syntax.ImpExp Language.Haskell.Syntax.Lit Language.Haskell.Syntax.Module.Name Language.Haskell.Syntax.Pat Language.Haskell.Syntax.Specificity Language.Haskell.Syntax.Type autogen-modules: GHC.Platform.Constants GHC.Settings.Config reexported-modules: GHC.Platform.ArchOS , GHC.Platform.Host ghc-lib-parser-9.12.2.20250421/ghc-lib-parser.cabal0000644000000000000000000004364607346545000017273 0ustar0000000000000000cabal-version: 3.0 build-type: Simple name: ghc-lib-parser version: 9.12.2.20250421 license: BSD-3-Clause license-file: LICENSE category: Development author: The GHC Team and Digital Asset maintainer: Digital Asset synopsis: The GHC API, decoupled from GHC versions description: A package equivalent to the @ghc@ package, but which can be loaded on many compiler versions. homepage: https://github.com/digital-asset/ghc-lib bug-reports: https://github.com/digital-asset/ghc-lib/issues data-dir: ghc-lib/stage0/lib data-files: settings llvm-targets llvm-passes extra-source-files: ghc/ghc-bin.cabal libraries/template-haskell/template-haskell.cabal libraries/ghc-heap/ghc-heap.cabal libraries/ghc-boot-th/ghc-boot-th.cabal libraries/ghc-boot/ghc-boot.cabal libraries/ghci/ghci.cabal compiler/ghc.cabal libraries/ghc-platform/ghc-platform.cabal ghc-lib/stage0/rts/build/include/ghcautoconf.h ghc-lib/stage0/rts/build/include/ghcplatform.h ghc-lib/stage0/rts/build/include/GhclibDerivedConstants.h ghc-lib/stage0/compiler/build/primop-code-size.hs-incl ghc-lib/stage0/compiler/build/primop-commutable.hs-incl ghc-lib/stage0/compiler/build/primop-data-decl.hs-incl ghc-lib/stage0/compiler/build/primop-fixity.hs-incl ghc-lib/stage0/compiler/build/primop-effects.hs-incl ghc-lib/stage0/compiler/build/primop-list.hs-incl ghc-lib/stage0/compiler/build/primop-out-of-line.hs-incl ghc-lib/stage0/compiler/build/primop-primop-info.hs-incl ghc-lib/stage0/compiler/build/primop-strictness.hs-incl ghc-lib/stage0/compiler/build/primop-tag.hs-incl ghc-lib/stage0/compiler/build/primop-vector-tycons.hs-incl ghc-lib/stage0/compiler/build/primop-vector-tys-exports.hs-incl ghc-lib/stage0/compiler/build/primop-vector-tys.hs-incl ghc-lib/stage0/compiler/build/primop-vector-uniques.hs-incl ghc-lib/stage0/compiler/build/primop-docs.hs-incl ghc-lib/stage0/compiler/build/primop-is-work-free.hs-incl ghc-lib/stage0/compiler/build/primop-is-cheap.hs-incl ghc-lib/stage0/compiler/build/primop-deprecations.hs-incl ghc-lib/stage0/compiler/build/GHC/Platform/Constants.hs ghc-lib/stage0/compiler/build/GHC/Settings/Config.hs ghc-lib/stage0/libraries/ghc-boot/build/GHC/Version.hs ghc-lib/stage0/libraries/ghc-boot/build/GHC/Platform/Host.hs compiler/GHC/Parser.y compiler/GHC/Parser/Lexer.x compiler/GHC/Parser/HaddockLex.x compiler/GHC/Parser.hs-boot rts/include/stg/MachRegs/arm32.h rts/include/stg/MachRegs/arm64.h rts/include/stg/MachRegs/loongarch64.h rts/include/stg/MachRegs/ppc.h rts/include/stg/MachRegs/riscv64.h rts/include/stg/MachRegs/s390x.h rts/include/stg/MachRegs/wasm32.h rts/include/stg/MachRegs/x86.h libraries/containers/containers/include/containers.h rts/include/ghcconfig.h compiler/MachRegs.h compiler/CodeGen.Platform.h compiler/Bytecodes.h compiler/ClosureTypes.h compiler/FunTypes.h compiler/Unique.h source-repository head type: git location: git@github.com:digital-asset/ghc-lib.git flag threaded-rts default: True manual: True description: Pass -DTHREADED_RTS to the C toolchain library default-language: GHC2021 exposed: False include-dirs: rts/include rts/include/stg ghc-lib/stage0/lib ghc-lib/stage0/compiler/build compiler libraries/containers/containers/include if impl(ghc >= 8.8.1) ghc-options: -fno-safe-haskell if flag(threaded-rts) ghc-options: -fobject-code -package=ghc-boot-th -optc-DTHREADED_RTS cc-options: -DTHREADED_RTS cpp-options: -DTHREADED_RTS -DBOOTSTRAP_TH else ghc-options: -fobject-code -package=ghc-boot-th cpp-options: -DBOOTSTRAP_TH if !os(windows) build-depends: unix else build-depends: Win32 build-depends: base >= 4.19 && < 4.22, ghc-prim > 0.2 && < 0.14, containers >= 0.6.2.1 && < 0.8, bytestring >= 0.11.4 && < 0.13, time >= 1.4 && < 1.15, filepath >= 1.5 && < 1.6, os-string >= 2.0.1 && < 2.1, exceptions == 0.10.*, parsec, binary == 0.8.*, directory >= 1 && < 1.4, array >= 0.1 && < 0.6, deepseq >= 1.4 && < 1.6, pretty == 1.1.*, transformers >= 0.5 && < 0.7, process >= 1 && < 1.7 if impl(ghc >= 9.10) build-depends: ghc-internal build-tool-depends: alex:alex >= 3.1, happy:happy == 1.20.* || == 2.0.2 || >= 2.1.2 && < 2.2 other-extensions: BangPatterns CPP DataKinds DefaultSignatures DeriveDataTypeable DeriveFoldable DeriveFunctor DeriveGeneric DeriveTraversable DisambiguateRecordFields ExistentialQuantification ExplicitForAll FlexibleContexts FlexibleInstances GADTs GeneralizedNewtypeDeriving InstanceSigs MagicHash MultiParamTypeClasses NamedFieldPuns NondecreasingIndentation RankNTypes RecordWildCards RoleAnnotations ScopedTypeVariables StandaloneDeriving Trustworthy TupleSections TypeFamilies TypeSynonymInstances UnboxedTuples UndecidableInstances default-extensions: MonoLocalBinds NoImplicitPrelude if impl(ghc >= 9.2.2) cmm-sources: libraries/ghc-heap/cbits/HeapPrim.cmm else c-sources: libraries/ghc-heap/cbits/HeapPrim.cmm c-sources: compiler/cbits/genSym.c compiler/cbits/cutils.c compiler/cbits/keepCAFsForGHCi.c hs-source-dirs: ghc-lib/stage0/libraries/ghc-boot/build ghc-lib/stage0/compiler/build libraries/ghc-platform/src libraries/template-haskell libraries/ghc-platform libraries/ghc-boot-th libraries/ghc-boot libraries/ghc-heap libraries/ghci compiler autogen-modules: GHC.Parser.Lexer GHC.Parser exposed-modules: GHC.BaseDir GHC.Builtin.Names GHC.Builtin.PrimOps GHC.Builtin.PrimOps.Ids GHC.Builtin.Types GHC.Builtin.Types.Literals GHC.Builtin.Types.Prim GHC.Builtin.Uniques GHC.ByteCode.Types GHC.Cmm GHC.Cmm.BlockId GHC.Cmm.CLabel GHC.Cmm.Dataflow.Block GHC.Cmm.Dataflow.Graph GHC.Cmm.Dataflow.Label GHC.Cmm.Expr GHC.Cmm.MachOp GHC.Cmm.Node GHC.Cmm.Reg GHC.Cmm.Switch GHC.Cmm.Type GHC.Cmm.Utils GHC.CmmToAsm.CFG.Weight GHC.CmmToLlvm.Config GHC.CmmToLlvm.Version GHC.CmmToLlvm.Version.Bounds GHC.CmmToLlvm.Version.Type GHC.Core GHC.Core.Class GHC.Core.Coercion GHC.Core.Coercion.Axiom GHC.Core.Coercion.Opt GHC.Core.ConLike GHC.Core.DataCon GHC.Core.FVs GHC.Core.FamInstEnv GHC.Core.InstEnv GHC.Core.Lint GHC.Core.Lint.Interactive GHC.Core.Make GHC.Core.Map.Expr GHC.Core.Map.Type GHC.Core.Multiplicity GHC.Core.Opt.Arity GHC.Core.Opt.CallerCC GHC.Core.Opt.CallerCC.Types GHC.Core.Opt.ConstantFold GHC.Core.Opt.Monad GHC.Core.Opt.OccurAnal GHC.Core.Opt.Pipeline.Types GHC.Core.Opt.Simplify GHC.Core.Opt.Simplify.Env GHC.Core.Opt.Simplify.Inline GHC.Core.Opt.Simplify.Iteration GHC.Core.Opt.Simplify.Monad GHC.Core.Opt.Simplify.Utils GHC.Core.Opt.Stats GHC.Core.PatSyn GHC.Core.Ppr GHC.Core.Predicate GHC.Core.Reduction GHC.Core.RoughMap GHC.Core.Rules GHC.Core.Rules.Config GHC.Core.Seq GHC.Core.SimpleOpt GHC.Core.Stats GHC.Core.Subst GHC.Core.Tidy GHC.Core.TyCo.Compare GHC.Core.TyCo.FVs GHC.Core.TyCo.Ppr GHC.Core.TyCo.Rep GHC.Core.TyCo.Subst GHC.Core.TyCo.Tidy GHC.Core.TyCon GHC.Core.TyCon.Env GHC.Core.TyCon.RecWalk GHC.Core.Type GHC.Core.Unfold GHC.Core.Unfold.Make GHC.Core.Unify GHC.Core.UsageEnv GHC.Core.Utils GHC.CoreToIface GHC.Data.Bag GHC.Data.Bool GHC.Data.BooleanFormula GHC.Data.EnumSet GHC.Data.FastMutInt GHC.Data.FastString GHC.Data.FastString.Env GHC.Data.FiniteMap GHC.Data.FlatBag GHC.Data.Graph.Directed GHC.Data.Graph.UnVar GHC.Data.IOEnv GHC.Data.List.Infinite GHC.Data.List.SetOps GHC.Data.Maybe GHC.Data.OrdList GHC.Data.OsPath GHC.Data.Pair GHC.Data.ShortText GHC.Data.SizedSeq GHC.Data.SmallArray GHC.Data.Stream GHC.Data.Strict GHC.Data.StringBuffer GHC.Data.TrieMap GHC.Data.Unboxed GHC.Data.Word64Map GHC.Data.Word64Map.Internal GHC.Data.Word64Map.Lazy GHC.Data.Word64Map.Strict GHC.Data.Word64Map.Strict.Internal GHC.Data.Word64Set GHC.Data.Word64Set.Internal GHC.Driver.Backend GHC.Driver.Backend.Internal GHC.Driver.Backpack.Syntax GHC.Driver.CmdLine GHC.Driver.Config GHC.Driver.Config.Core.Lint GHC.Driver.Config.Diagnostic GHC.Driver.Config.Logger GHC.Driver.Config.Parser GHC.Driver.DynFlags GHC.Driver.Env GHC.Driver.Env.KnotVars GHC.Driver.Env.Types GHC.Driver.Errors GHC.Driver.Errors.Ppr GHC.Driver.Errors.Types GHC.Driver.Flags GHC.Driver.Hooks GHC.Driver.LlvmConfigCache GHC.Driver.Monad GHC.Driver.Phases GHC.Driver.Pipeline.Monad GHC.Driver.Pipeline.Phases GHC.Driver.Plugins GHC.Driver.Plugins.External GHC.Driver.Ppr GHC.Driver.Session GHC.Exts.Heap GHC.Exts.Heap.ClosureTypes GHC.Exts.Heap.Closures GHC.Exts.Heap.Constants GHC.Exts.Heap.FFIClosures GHC.Exts.Heap.FFIClosures_ProfilingDisabled GHC.Exts.Heap.FFIClosures_ProfilingEnabled GHC.Exts.Heap.InfoTable GHC.Exts.Heap.InfoTable.Types GHC.Exts.Heap.InfoTableProf GHC.Exts.Heap.ProfInfo.PeekProfInfo GHC.Exts.Heap.ProfInfo.PeekProfInfo_ProfilingDisabled GHC.Exts.Heap.ProfInfo.PeekProfInfo_ProfilingEnabled GHC.Exts.Heap.ProfInfo.Types GHC.Exts.Heap.Utils GHC.ForeignSrcLang GHC.ForeignSrcLang.Type GHC.Hs GHC.Hs.Basic GHC.Hs.Binds GHC.Hs.Decls GHC.Hs.Doc GHC.Hs.DocString GHC.Hs.Dump GHC.Hs.Expr GHC.Hs.Extension GHC.Hs.ImpExp GHC.Hs.Instances GHC.Hs.Lit GHC.Hs.Pat GHC.Hs.Specificity GHC.Hs.Type GHC.Hs.Utils GHC.HsToCore.Errors.Ppr GHC.HsToCore.Errors.Types GHC.HsToCore.Pmc.Ppr GHC.HsToCore.Pmc.Solver.Types GHC.HsToCore.Pmc.Types GHC.Iface.Decl GHC.Iface.Errors.Ppr GHC.Iface.Errors.Types GHC.Iface.Ext.Fields GHC.Iface.Recomp.Binary GHC.Iface.Syntax GHC.Iface.Type GHC.JS.Ident GHC.JS.JStg.Monad GHC.JS.JStg.Syntax GHC.JS.Make GHC.JS.Ppr GHC.JS.Syntax GHC.JS.Transform GHC.LanguageExtensions GHC.LanguageExtensions.Type GHC.Lexeme GHC.Linker.Config GHC.Linker.Static.Utils GHC.Linker.Types GHC.Parser GHC.Parser.Annotation GHC.Parser.CharClass GHC.Parser.Errors.Basic GHC.Parser.Errors.Ppr GHC.Parser.Errors.Types GHC.Parser.HaddockLex GHC.Parser.Header GHC.Parser.Lexer GHC.Parser.PostProcess GHC.Parser.PostProcess.Haddock GHC.Parser.String GHC.Parser.Types GHC.Platform GHC.Platform.AArch64 GHC.Platform.ARM GHC.Platform.ArchOS GHC.Platform.Constants GHC.Platform.LoongArch64 GHC.Platform.NoRegs GHC.Platform.PPC GHC.Platform.Profile GHC.Platform.RISCV64 GHC.Platform.Reg GHC.Platform.Reg.Class GHC.Platform.Reg.Class.NoVectors GHC.Platform.Reg.Class.Separate GHC.Platform.Reg.Class.Unified GHC.Platform.Regs GHC.Platform.S390X GHC.Platform.Wasm32 GHC.Platform.Ways GHC.Platform.X86 GHC.Platform.X86_64 GHC.Prelude GHC.Prelude.Basic GHC.Runtime.Context GHC.Runtime.Eval.Types GHC.Runtime.Heap.Layout GHC.Runtime.Interpreter.Types GHC.Serialized GHC.Settings GHC.Settings.Config GHC.Settings.Constants GHC.Settings.Utils GHC.Stg.InferTags.TagSig GHC.Stg.Lift.Types GHC.Stg.Syntax GHC.StgToCmm.CgUtils GHC.StgToCmm.Config GHC.StgToCmm.Types GHC.StgToJS.Linker.Types GHC.StgToJS.Object GHC.StgToJS.Symbols GHC.StgToJS.Types GHC.SysTools.BaseDir GHC.SysTools.Terminal GHC.Tc.Errors.Hole.FitTypes GHC.Tc.Errors.Hole.Plugin GHC.Tc.Errors.Ppr GHC.Tc.Errors.Types GHC.Tc.Errors.Types.PromotionErr GHC.Tc.Solver.InertSet GHC.Tc.Solver.Types GHC.Tc.Types GHC.Tc.Types.BasicTypes GHC.Tc.Types.Constraint GHC.Tc.Types.CtLoc GHC.Tc.Types.ErrCtxt GHC.Tc.Types.Evidence GHC.Tc.Types.LclEnv GHC.Tc.Types.Origin GHC.Tc.Types.Rank GHC.Tc.Types.TH GHC.Tc.Types.TcRef GHC.Tc.Utils.TcType GHC.Tc.Zonk.Monad GHC.Types.Annotations GHC.Types.Avail GHC.Types.Basic GHC.Types.Breakpoint GHC.Types.CompleteMatch GHC.Types.CostCentre GHC.Types.CostCentre.State GHC.Types.Cpr GHC.Types.DefaultEnv GHC.Types.Demand GHC.Types.Error GHC.Types.Error.Codes GHC.Types.FieldLabel GHC.Types.Fixity GHC.Types.Fixity.Env GHC.Types.ForeignCall GHC.Types.ForeignStubs GHC.Types.GREInfo GHC.Types.Hint GHC.Types.Hint.Ppr GHC.Types.HpcInfo GHC.Types.IPE GHC.Types.Id GHC.Types.Id.Info GHC.Types.Id.Make GHC.Types.Literal GHC.Types.Meta GHC.Types.Name GHC.Types.Name.Cache GHC.Types.Name.Env GHC.Types.Name.Occurrence GHC.Types.Name.Ppr GHC.Types.Name.Reader GHC.Types.Name.Set GHC.Types.PkgQual GHC.Types.ProfAuto GHC.Types.RepType GHC.Types.SafeHaskell GHC.Types.SaneDouble GHC.Types.SourceError GHC.Types.SourceFile GHC.Types.SourceText GHC.Types.SptEntry GHC.Types.SrcLoc GHC.Types.Target GHC.Types.Tickish GHC.Types.TyThing GHC.Types.TyThing.Ppr GHC.Types.TypeEnv GHC.Types.Unique GHC.Types.Unique.DFM GHC.Types.Unique.DSM GHC.Types.Unique.DSet GHC.Types.Unique.FM GHC.Types.Unique.Map GHC.Types.Unique.SDFM GHC.Types.Unique.Set GHC.Types.Unique.Supply GHC.Types.Var GHC.Types.Var.Env GHC.Types.Var.Set GHC.UniqueSubdir GHC.Unit GHC.Unit.Database GHC.Unit.Env GHC.Unit.External GHC.Unit.Finder.Types GHC.Unit.Home GHC.Unit.Home.ModInfo GHC.Unit.Info GHC.Unit.Module GHC.Unit.Module.Deps GHC.Unit.Module.Env GHC.Unit.Module.Graph GHC.Unit.Module.Imported GHC.Unit.Module.Location GHC.Unit.Module.ModDetails GHC.Unit.Module.ModGuts GHC.Unit.Module.ModIface GHC.Unit.Module.ModSummary GHC.Unit.Module.Status GHC.Unit.Module.Warnings GHC.Unit.Module.WholeCoreBindings GHC.Unit.Parser GHC.Unit.Ppr GHC.Unit.State GHC.Unit.Types GHC.Utils.Binary GHC.Utils.Binary.Typeable GHC.Utils.BufHandle GHC.Utils.CliOption GHC.Utils.Constants GHC.Utils.Containers.Internal.BitUtil GHC.Utils.Containers.Internal.StrictPair GHC.Utils.Encoding GHC.Utils.Encoding.UTF8 GHC.Utils.Error GHC.Utils.Exception GHC.Utils.FV GHC.Utils.Fingerprint GHC.Utils.GlobalVars GHC.Utils.IO.Unsafe GHC.Utils.Json GHC.Utils.Lexeme GHC.Utils.Logger GHC.Utils.Misc GHC.Utils.Monad GHC.Utils.Monad.State.Strict GHC.Utils.Outputable GHC.Utils.Panic GHC.Utils.Panic.Plain GHC.Utils.Ppr GHC.Utils.Ppr.Colour GHC.Utils.TmpFs GHC.Utils.Trace GHC.Utils.Word64 GHC.Version GHCi.BreakArray GHCi.FFI GHCi.Message GHCi.RemoteTypes GHCi.ResolvedBCO GHCi.TH.Binary Language.Haskell.Syntax Language.Haskell.Syntax.Basic Language.Haskell.Syntax.Binds Language.Haskell.Syntax.Decls Language.Haskell.Syntax.Expr Language.Haskell.Syntax.Extension Language.Haskell.Syntax.ImpExp Language.Haskell.Syntax.Lit Language.Haskell.Syntax.Module.Name Language.Haskell.Syntax.Pat Language.Haskell.Syntax.Specificity Language.Haskell.Syntax.Type if impl(ghc < 9.12.1) hs-source-dirs: libraries/ghc-internal/src libraries/ghc-boot-th-internal exposed-modules: GHC.Internal.ForeignSrcLang GHC.Internal.LanguageExtensions GHC.Internal.Lexeme GHC.Internal.TH.Syntax GHC.Internal.TH.Ppr GHC.Internal.TH.PprLib GHC.Internal.TH.Lib.Map ghc-lib-parser-9.12.2.20250421/ghc-lib/stage0/compiler/build/GHC/Platform/0000755000000000000000000000000007346545000023261 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/ghc-lib/stage0/compiler/build/GHC/Platform/Constants.hs0000644000000000000000000004051307346545000025574 0ustar0000000000000000module GHC.Platform.Constants where import Prelude import Data.Char data PlatformConstants = PlatformConstants { pc_CONTROL_GROUP_CONST_291 :: {-# UNPACK #-} !Int, pc_STD_HDR_SIZE :: {-# UNPACK #-} !Int, pc_PROF_HDR_SIZE :: {-# UNPACK #-} !Int, pc_BLOCK_SIZE :: {-# UNPACK #-} !Int, pc_BLOCKS_PER_MBLOCK :: {-# UNPACK #-} !Int, pc_TICKY_BIN_COUNT :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rR1 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rR2 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rR3 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rR4 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rR5 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rR6 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rR7 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rR8 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rR9 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rR10 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rF1 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rF2 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rF3 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rF4 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rF5 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rF6 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rD1 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rD2 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rD3 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rD4 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rD5 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rD6 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rXMM1 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rXMM2 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rXMM3 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rXMM4 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rXMM5 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rXMM6 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rYMM1 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rYMM2 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rYMM3 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rYMM4 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rYMM5 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rYMM6 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rZMM1 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rZMM2 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rZMM3 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rZMM4 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rZMM5 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rZMM6 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rL1 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rSp :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rSpLim :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rHp :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rHpLim :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rCCCS :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rCurrentTSO :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rCurrentNursery :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rHpAlloc :: {-# UNPACK #-} !Int, pc_OFFSET_stgEagerBlackholeInfo :: {-# UNPACK #-} !Int, pc_OFFSET_stgGCEnter1 :: {-# UNPACK #-} !Int, pc_OFFSET_stgGCFun :: {-# UNPACK #-} !Int, pc_OFFSET_Capability_r :: {-# UNPACK #-} !Int, pc_OFFSET_bdescr_start :: {-# UNPACK #-} !Int, pc_OFFSET_bdescr_free :: {-# UNPACK #-} !Int, pc_OFFSET_bdescr_blocks :: {-# UNPACK #-} !Int, pc_OFFSET_bdescr_flags :: {-# UNPACK #-} !Int, pc_SIZEOF_CostCentreStack :: {-# UNPACK #-} !Int, pc_OFFSET_CostCentreStack_mem_alloc :: {-# UNPACK #-} !Int, pc_REP_CostCentreStack_mem_alloc :: {-# UNPACK #-} !Int, pc_OFFSET_CostCentreStack_scc_count :: {-# UNPACK #-} !Int, pc_REP_CostCentreStack_scc_count :: {-# UNPACK #-} !Int, pc_OFFSET_StgHeader_ccs :: {-# UNPACK #-} !Int, pc_OFFSET_StgHeader_ldvw :: {-# UNPACK #-} !Int, pc_SIZEOF_StgSMPThunkHeader :: {-# UNPACK #-} !Int, pc_OFFSET_StgEntCounter_allocs :: {-# UNPACK #-} !Int, pc_REP_StgEntCounter_allocs :: {-# UNPACK #-} !Int, pc_OFFSET_StgEntCounter_allocd :: {-# UNPACK #-} !Int, pc_REP_StgEntCounter_allocd :: {-# UNPACK #-} !Int, pc_OFFSET_StgEntCounter_registeredp :: {-# UNPACK #-} !Int, pc_OFFSET_StgEntCounter_link :: {-# UNPACK #-} !Int, pc_OFFSET_StgEntCounter_entry_count :: {-# UNPACK #-} !Int, pc_SIZEOF_StgUpdateFrame_NoHdr :: {-# UNPACK #-} !Int, pc_SIZEOF_StgOrigThunkInfoFrame_NoHdr :: {-# UNPACK #-} !Int, pc_SIZEOF_StgMutArrPtrs_NoHdr :: {-# UNPACK #-} !Int, pc_OFFSET_StgMutArrPtrs_ptrs :: {-# UNPACK #-} !Int, pc_OFFSET_StgMutArrPtrs_size :: {-# UNPACK #-} !Int, pc_SIZEOF_StgSmallMutArrPtrs_NoHdr :: {-# UNPACK #-} !Int, pc_OFFSET_StgSmallMutArrPtrs_ptrs :: {-# UNPACK #-} !Int, pc_SIZEOF_StgArrBytes_NoHdr :: {-# UNPACK #-} !Int, pc_OFFSET_StgArrBytes_bytes :: {-# UNPACK #-} !Int, pc_OFFSET_StgTSO_alloc_limit :: {-# UNPACK #-} !Int, pc_OFFSET_StgTSO_cccs :: {-# UNPACK #-} !Int, pc_OFFSET_StgTSO_stackobj :: {-# UNPACK #-} !Int, pc_OFFSET_StgStack_sp :: {-# UNPACK #-} !Int, pc_OFFSET_StgStack_stack :: {-# UNPACK #-} !Int, pc_OFFSET_StgUpdateFrame_updatee :: {-# UNPACK #-} !Int, pc_OFFSET_StgOrigThunkInfoFrame_info_ptr :: {-# UNPACK #-} !Int, pc_OFFSET_StgFunInfoExtraFwd_arity :: {-# UNPACK #-} !Int, pc_REP_StgFunInfoExtraFwd_arity :: {-# UNPACK #-} !Int, pc_SIZEOF_StgFunInfoExtraRev :: {-# UNPACK #-} !Int, pc_OFFSET_StgFunInfoExtraRev_arity :: {-# UNPACK #-} !Int, pc_REP_StgFunInfoExtraRev_arity :: {-# UNPACK #-} !Int, pc_MAX_SPEC_SELECTEE_SIZE :: {-# UNPACK #-} !Int, pc_MAX_SPEC_AP_SIZE :: {-# UNPACK #-} !Int, pc_MIN_PAYLOAD_SIZE :: {-# UNPACK #-} !Int, pc_MIN_INTLIKE :: {-# UNPACK #-} !Int, pc_MAX_INTLIKE :: {-# UNPACK #-} !Int, pc_MIN_CHARLIKE :: {-# UNPACK #-} !Int, pc_MAX_CHARLIKE :: {-# UNPACK #-} !Int, pc_MUT_ARR_PTRS_CARD_BITS :: {-# UNPACK #-} !Int, pc_MAX_Vanilla_REG :: {-# UNPACK #-} !Int, pc_MAX_Float_REG :: {-# UNPACK #-} !Int, pc_MAX_Double_REG :: {-# UNPACK #-} !Int, pc_MAX_Long_REG :: {-# UNPACK #-} !Int, pc_MAX_XMM_REG :: {-# UNPACK #-} !Int, pc_MAX_Real_Vanilla_REG :: {-# UNPACK #-} !Int, pc_MAX_Real_Float_REG :: {-# UNPACK #-} !Int, pc_MAX_Real_Double_REG :: {-# UNPACK #-} !Int, pc_MAX_Real_XMM_REG :: {-# UNPACK #-} !Int, pc_MAX_Real_Long_REG :: {-# UNPACK #-} !Int, pc_RESERVED_C_STACK_BYTES :: {-# UNPACK #-} !Int, pc_RESERVED_STACK_WORDS :: {-# UNPACK #-} !Int, pc_AP_STACK_SPLIM :: {-# UNPACK #-} !Int, pc_WORD_SIZE :: {-# UNPACK #-} !Int, pc_CINT_SIZE :: {-# UNPACK #-} !Int, pc_CLONG_SIZE :: {-# UNPACK #-} !Int, pc_CLONG_LONG_SIZE :: {-# UNPACK #-} !Int, pc_BITMAP_BITS_SHIFT :: {-# UNPACK #-} !Int, pc_TAG_BITS :: {-# UNPACK #-} !Int, pc_LDV_SHIFT :: {-# UNPACK #-} !Int, pc_ILDV_CREATE_MASK :: !Integer, pc_ILDV_STATE_CREATE :: !Integer, pc_ILDV_STATE_USE :: !Integer, pc_USE_INLINE_SRT_FIELD :: !Bool } deriving (Show, Read, Eq, Ord) parseConstantsHeader :: FilePath -> IO PlatformConstants parseConstantsHeader fp = do s <- readFile fp let def = "#define HS_CONSTANTS \"" find [] xs = xs find _ [] = error $ "GHC couldn't find the RTS constants ("++def++") in " ++ fp ++ ": the RTS package you are trying to use is perhaps for another GHC version" ++ "(e.g. you are using the wrong package database) or the package database is broken.\n" find (d:ds) (x:xs) | d == x = find ds xs | otherwise = find def xs readVal' :: Bool -> Integer -> String -> [Integer] readVal' n c (x:xs) = case x of '"' -> [if n then negate c else c] '-' -> readVal' True c xs ',' -> (if n then negate c else c) : readVal' False 0 xs _ -> readVal' n (c*10 + fromIntegral (ord x - ord '0')) xs readVal' n c [] = [if n then negate c else c] readVal = readVal' False 0 return $! case readVal (find def s) of [v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15 ,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29,v30,v31 ,v32,v33,v34,v35,v36,v37,v38,v39,v40,v41,v42,v43,v44,v45,v46,v47 ,v48,v49,v50,v51,v52,v53,v54,v55,v56,v57,v58,v59,v60,v61,v62,v63 ,v64,v65,v66,v67,v68,v69,v70,v71,v72,v73,v74,v75,v76,v77,v78,v79 ,v80,v81,v82,v83,v84,v85,v86,v87,v88,v89,v90,v91,v92,v93,v94,v95 ,v96,v97,v98,v99,v100,v101,v102,v103,v104,v105,v106,v107,v108,v109,v110,v111 ,v112,v113,v114,v115,v116,v117,v118,v119,v120,v121,v122,v123,v124,v125,v126,v127 ,v128,v129,v130 ] -> PlatformConstants { pc_CONTROL_GROUP_CONST_291 = fromIntegral v0 , pc_STD_HDR_SIZE = fromIntegral v1 , pc_PROF_HDR_SIZE = fromIntegral v2 , pc_BLOCK_SIZE = fromIntegral v3 , pc_BLOCKS_PER_MBLOCK = fromIntegral v4 , pc_TICKY_BIN_COUNT = fromIntegral v5 , pc_OFFSET_StgRegTable_rR1 = fromIntegral v6 , pc_OFFSET_StgRegTable_rR2 = fromIntegral v7 , pc_OFFSET_StgRegTable_rR3 = fromIntegral v8 , pc_OFFSET_StgRegTable_rR4 = fromIntegral v9 , pc_OFFSET_StgRegTable_rR5 = fromIntegral v10 , pc_OFFSET_StgRegTable_rR6 = fromIntegral v11 , pc_OFFSET_StgRegTable_rR7 = fromIntegral v12 , pc_OFFSET_StgRegTable_rR8 = fromIntegral v13 , pc_OFFSET_StgRegTable_rR9 = fromIntegral v14 , pc_OFFSET_StgRegTable_rR10 = fromIntegral v15 , pc_OFFSET_StgRegTable_rF1 = fromIntegral v16 , pc_OFFSET_StgRegTable_rF2 = fromIntegral v17 , pc_OFFSET_StgRegTable_rF3 = fromIntegral v18 , pc_OFFSET_StgRegTable_rF4 = fromIntegral v19 , pc_OFFSET_StgRegTable_rF5 = fromIntegral v20 , pc_OFFSET_StgRegTable_rF6 = fromIntegral v21 , pc_OFFSET_StgRegTable_rD1 = fromIntegral v22 , pc_OFFSET_StgRegTable_rD2 = fromIntegral v23 , pc_OFFSET_StgRegTable_rD3 = fromIntegral v24 , pc_OFFSET_StgRegTable_rD4 = fromIntegral v25 , pc_OFFSET_StgRegTable_rD5 = fromIntegral v26 , pc_OFFSET_StgRegTable_rD6 = fromIntegral v27 , pc_OFFSET_StgRegTable_rXMM1 = fromIntegral v28 , pc_OFFSET_StgRegTable_rXMM2 = fromIntegral v29 , pc_OFFSET_StgRegTable_rXMM3 = fromIntegral v30 , pc_OFFSET_StgRegTable_rXMM4 = fromIntegral v31 , pc_OFFSET_StgRegTable_rXMM5 = fromIntegral v32 , pc_OFFSET_StgRegTable_rXMM6 = fromIntegral v33 , pc_OFFSET_StgRegTable_rYMM1 = fromIntegral v34 , pc_OFFSET_StgRegTable_rYMM2 = fromIntegral v35 , pc_OFFSET_StgRegTable_rYMM3 = fromIntegral v36 , pc_OFFSET_StgRegTable_rYMM4 = fromIntegral v37 , pc_OFFSET_StgRegTable_rYMM5 = fromIntegral v38 , pc_OFFSET_StgRegTable_rYMM6 = fromIntegral v39 , pc_OFFSET_StgRegTable_rZMM1 = fromIntegral v40 , pc_OFFSET_StgRegTable_rZMM2 = fromIntegral v41 , pc_OFFSET_StgRegTable_rZMM3 = fromIntegral v42 , pc_OFFSET_StgRegTable_rZMM4 = fromIntegral v43 , pc_OFFSET_StgRegTable_rZMM5 = fromIntegral v44 , pc_OFFSET_StgRegTable_rZMM6 = fromIntegral v45 , pc_OFFSET_StgRegTable_rL1 = fromIntegral v46 , pc_OFFSET_StgRegTable_rSp = fromIntegral v47 , pc_OFFSET_StgRegTable_rSpLim = fromIntegral v48 , pc_OFFSET_StgRegTable_rHp = fromIntegral v49 , pc_OFFSET_StgRegTable_rHpLim = fromIntegral v50 , pc_OFFSET_StgRegTable_rCCCS = fromIntegral v51 , pc_OFFSET_StgRegTable_rCurrentTSO = fromIntegral v52 , pc_OFFSET_StgRegTable_rCurrentNursery = fromIntegral v53 , pc_OFFSET_StgRegTable_rHpAlloc = fromIntegral v54 , pc_OFFSET_stgEagerBlackholeInfo = fromIntegral v55 , pc_OFFSET_stgGCEnter1 = fromIntegral v56 , pc_OFFSET_stgGCFun = fromIntegral v57 , pc_OFFSET_Capability_r = fromIntegral v58 , pc_OFFSET_bdescr_start = fromIntegral v59 , pc_OFFSET_bdescr_free = fromIntegral v60 , pc_OFFSET_bdescr_blocks = fromIntegral v61 , pc_OFFSET_bdescr_flags = fromIntegral v62 , pc_SIZEOF_CostCentreStack = fromIntegral v63 , pc_OFFSET_CostCentreStack_mem_alloc = fromIntegral v64 , pc_REP_CostCentreStack_mem_alloc = fromIntegral v65 , pc_OFFSET_CostCentreStack_scc_count = fromIntegral v66 , pc_REP_CostCentreStack_scc_count = fromIntegral v67 , pc_OFFSET_StgHeader_ccs = fromIntegral v68 , pc_OFFSET_StgHeader_ldvw = fromIntegral v69 , pc_SIZEOF_StgSMPThunkHeader = fromIntegral v70 , pc_OFFSET_StgEntCounter_allocs = fromIntegral v71 , pc_REP_StgEntCounter_allocs = fromIntegral v72 , pc_OFFSET_StgEntCounter_allocd = fromIntegral v73 , pc_REP_StgEntCounter_allocd = fromIntegral v74 , pc_OFFSET_StgEntCounter_registeredp = fromIntegral v75 , pc_OFFSET_StgEntCounter_link = fromIntegral v76 , pc_OFFSET_StgEntCounter_entry_count = fromIntegral v77 , pc_SIZEOF_StgUpdateFrame_NoHdr = fromIntegral v78 , pc_SIZEOF_StgOrigThunkInfoFrame_NoHdr = fromIntegral v79 , pc_SIZEOF_StgMutArrPtrs_NoHdr = fromIntegral v80 , pc_OFFSET_StgMutArrPtrs_ptrs = fromIntegral v81 , pc_OFFSET_StgMutArrPtrs_size = fromIntegral v82 , pc_SIZEOF_StgSmallMutArrPtrs_NoHdr = fromIntegral v83 , pc_OFFSET_StgSmallMutArrPtrs_ptrs = fromIntegral v84 , pc_SIZEOF_StgArrBytes_NoHdr = fromIntegral v85 , pc_OFFSET_StgArrBytes_bytes = fromIntegral v86 , pc_OFFSET_StgTSO_alloc_limit = fromIntegral v87 , pc_OFFSET_StgTSO_cccs = fromIntegral v88 , pc_OFFSET_StgTSO_stackobj = fromIntegral v89 , pc_OFFSET_StgStack_sp = fromIntegral v90 , pc_OFFSET_StgStack_stack = fromIntegral v91 , pc_OFFSET_StgUpdateFrame_updatee = fromIntegral v92 , pc_OFFSET_StgOrigThunkInfoFrame_info_ptr = fromIntegral v93 , pc_OFFSET_StgFunInfoExtraFwd_arity = fromIntegral v94 , pc_REP_StgFunInfoExtraFwd_arity = fromIntegral v95 , pc_SIZEOF_StgFunInfoExtraRev = fromIntegral v96 , pc_OFFSET_StgFunInfoExtraRev_arity = fromIntegral v97 , pc_REP_StgFunInfoExtraRev_arity = fromIntegral v98 , pc_MAX_SPEC_SELECTEE_SIZE = fromIntegral v99 , pc_MAX_SPEC_AP_SIZE = fromIntegral v100 , pc_MIN_PAYLOAD_SIZE = fromIntegral v101 , pc_MIN_INTLIKE = fromIntegral v102 , pc_MAX_INTLIKE = fromIntegral v103 , pc_MIN_CHARLIKE = fromIntegral v104 , pc_MAX_CHARLIKE = fromIntegral v105 , pc_MUT_ARR_PTRS_CARD_BITS = fromIntegral v106 , pc_MAX_Vanilla_REG = fromIntegral v107 , pc_MAX_Float_REG = fromIntegral v108 , pc_MAX_Double_REG = fromIntegral v109 , pc_MAX_Long_REG = fromIntegral v110 , pc_MAX_XMM_REG = fromIntegral v111 , pc_MAX_Real_Vanilla_REG = fromIntegral v112 , pc_MAX_Real_Float_REG = fromIntegral v113 , pc_MAX_Real_Double_REG = fromIntegral v114 , pc_MAX_Real_XMM_REG = fromIntegral v115 , pc_MAX_Real_Long_REG = fromIntegral v116 , pc_RESERVED_C_STACK_BYTES = fromIntegral v117 , pc_RESERVED_STACK_WORDS = fromIntegral v118 , pc_AP_STACK_SPLIM = fromIntegral v119 , pc_WORD_SIZE = fromIntegral v120 , pc_CINT_SIZE = fromIntegral v121 , pc_CLONG_SIZE = fromIntegral v122 , pc_CLONG_LONG_SIZE = fromIntegral v123 , pc_BITMAP_BITS_SHIFT = fromIntegral v124 , pc_TAG_BITS = fromIntegral v125 , pc_LDV_SHIFT = fromIntegral v126 , pc_ILDV_CREATE_MASK = v127 , pc_ILDV_STATE_CREATE = v128 , pc_ILDV_STATE_USE = v129 , pc_USE_INLINE_SRT_FIELD = 0 < v130 } _ -> error "Invalid platform constants" ghc-lib-parser-9.12.2.20250421/ghc-lib/stage0/compiler/build/GHC/Settings/0000755000000000000000000000000007346545000023275 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/ghc-lib/stage0/compiler/build/GHC/Settings/Config.hs0000644000000000000000000000125307346545000025037 0ustar0000000000000000module GHC.Settings.Config ( module GHC.Version , cBuildPlatformString , cHostPlatformString , cProjectName , cBooterVersion , cStage , cProjectUnitId ) where import GHC.Prelude.Basic import GHC.Version cBuildPlatformString :: String cBuildPlatformString = "x86_64-apple-darwin" cHostPlatformString :: String cHostPlatformString = "x86_64-apple-darwin" cProjectName :: String cProjectName = "The Glorious Glasgow Haskell Compilation System" cBooterVersion :: String cBooterVersion = "9.12.1" cStage :: String cStage = show (1 :: Int) cProjectUnitId :: String cProjectUnitId = "ghc-9.12.2-inplace" ghc-lib-parser-9.12.2.20250421/ghc-lib/stage0/compiler/build/0000755000000000000000000000000007346545000021074 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/ghc-lib/stage0/compiler/build/primop-code-size.hs-incl0000644000000000000000000000760607346545000025552 0ustar0000000000000000primOpCodeSize OrdOp = 0 primOpCodeSize Int8ToWord8Op = 0 primOpCodeSize Word8ToInt8Op = 0 primOpCodeSize Int16ToWord16Op = 0 primOpCodeSize Word16ToInt16Op = 0 primOpCodeSize Int32ToWord32Op = 0 primOpCodeSize Word32ToInt32Op = 0 primOpCodeSize Int64ToWord64Op = 0 primOpCodeSize Word64ToInt64Op = 0 primOpCodeSize IntAddCOp = 2 primOpCodeSize IntSubCOp = 2 primOpCodeSize ChrOp = 0 primOpCodeSize IntToWordOp = 0 primOpCodeSize WordAddCOp = 2 primOpCodeSize WordSubCOp = 2 primOpCodeSize WordAdd2Op = 2 primOpCodeSize WordToIntOp = 0 primOpCodeSize DoubleExpOp = primOpCodeSizeForeignCall primOpCodeSize DoubleExpM1Op = primOpCodeSizeForeignCall primOpCodeSize DoubleLogOp = primOpCodeSizeForeignCall primOpCodeSize DoubleLog1POp = primOpCodeSizeForeignCall primOpCodeSize DoubleSqrtOp = primOpCodeSizeForeignCall primOpCodeSize DoubleSinOp = primOpCodeSizeForeignCall primOpCodeSize DoubleCosOp = primOpCodeSizeForeignCall primOpCodeSize DoubleTanOp = primOpCodeSizeForeignCall primOpCodeSize DoubleAsinOp = primOpCodeSizeForeignCall primOpCodeSize DoubleAcosOp = primOpCodeSizeForeignCall primOpCodeSize DoubleAtanOp = primOpCodeSizeForeignCall primOpCodeSize DoubleSinhOp = primOpCodeSizeForeignCall primOpCodeSize DoubleCoshOp = primOpCodeSizeForeignCall primOpCodeSize DoubleTanhOp = primOpCodeSizeForeignCall primOpCodeSize DoubleAsinhOp = primOpCodeSizeForeignCall primOpCodeSize DoubleAcoshOp = primOpCodeSizeForeignCall primOpCodeSize DoubleAtanhOp = primOpCodeSizeForeignCall primOpCodeSize DoublePowerOp = primOpCodeSizeForeignCall primOpCodeSize FloatExpOp = primOpCodeSizeForeignCall primOpCodeSize FloatExpM1Op = primOpCodeSizeForeignCall primOpCodeSize FloatLogOp = primOpCodeSizeForeignCall primOpCodeSize FloatLog1POp = primOpCodeSizeForeignCall primOpCodeSize FloatSqrtOp = primOpCodeSizeForeignCall primOpCodeSize FloatSinOp = primOpCodeSizeForeignCall primOpCodeSize FloatCosOp = primOpCodeSizeForeignCall primOpCodeSize FloatTanOp = primOpCodeSizeForeignCall primOpCodeSize FloatAsinOp = primOpCodeSizeForeignCall primOpCodeSize FloatAcosOp = primOpCodeSizeForeignCall primOpCodeSize FloatAtanOp = primOpCodeSizeForeignCall primOpCodeSize FloatSinhOp = primOpCodeSizeForeignCall primOpCodeSize FloatCoshOp = primOpCodeSizeForeignCall primOpCodeSize FloatTanhOp = primOpCodeSizeForeignCall primOpCodeSize FloatAsinhOp = primOpCodeSizeForeignCall primOpCodeSize FloatAcoshOp = primOpCodeSizeForeignCall primOpCodeSize FloatAtanhOp = primOpCodeSizeForeignCall primOpCodeSize FloatPowerOp = primOpCodeSizeForeignCall primOpCodeSize WriteArrayOp = 2 primOpCodeSize UnsafeFreezeByteArrayOp = 0 primOpCodeSize UnsafeThawByteArrayOp = 0 primOpCodeSize CopyByteArrayOp = primOpCodeSizeForeignCall + 4 primOpCodeSize CopyMutableByteArrayOp = primOpCodeSizeForeignCall + 4 primOpCodeSize CopyMutableByteArrayNonOverlappingOp = primOpCodeSizeForeignCall + 4 primOpCodeSize CopyByteArrayToAddrOp = primOpCodeSizeForeignCall + 4 primOpCodeSize CopyMutableByteArrayToAddrOp = primOpCodeSizeForeignCall + 4 primOpCodeSize CopyAddrToByteArrayOp = primOpCodeSizeForeignCall + 4 primOpCodeSize CopyAddrToAddrOp = primOpCodeSizeForeignCall primOpCodeSize CopyAddrToAddrNonOverlappingOp = primOpCodeSizeForeignCall primOpCodeSize SetByteArrayOp = primOpCodeSizeForeignCall + 4 primOpCodeSize SetAddrRangeOp = primOpCodeSizeForeignCall primOpCodeSize AddrToIntOp = 0 primOpCodeSize IntToAddrOp = 0 primOpCodeSize WriteMutVarOp = primOpCodeSizeForeignCall primOpCodeSize RaiseUnderflowOp = primOpCodeSizeForeignCall primOpCodeSize RaiseOverflowOp = primOpCodeSizeForeignCall primOpCodeSize RaiseDivZeroOp = primOpCodeSizeForeignCall primOpCodeSize TouchOp = 0 primOpCodeSize ParOp = primOpCodeSizeForeignCall primOpCodeSize SparkOp = primOpCodeSizeForeignCall primOpCodeSize AddrToAnyOp = 0 primOpCodeSize AnyToAddrOp = 0 primOpCodeSize _thisOp = primOpCodeSizeDefault ghc-lib-parser-9.12.2.20250421/ghc-lib/stage0/compiler/build/primop-commutable.hs-incl0000644000000000000000000000360007346545000026006 0ustar0000000000000000commutableOp CharEqOp = True commutableOp CharNeOp = True commutableOp Int8AddOp = True commutableOp Int8MulOp = True commutableOp Word8AddOp = True commutableOp Word8MulOp = True commutableOp Word8AndOp = True commutableOp Word8OrOp = True commutableOp Word8XorOp = True commutableOp Int16AddOp = True commutableOp Int16MulOp = True commutableOp Word16AddOp = True commutableOp Word16MulOp = True commutableOp Word16AndOp = True commutableOp Word16OrOp = True commutableOp Word16XorOp = True commutableOp Int32AddOp = True commutableOp Int32MulOp = True commutableOp Word32AddOp = True commutableOp Word32MulOp = True commutableOp Word32AndOp = True commutableOp Word32OrOp = True commutableOp Word32XorOp = True commutableOp Int64AddOp = True commutableOp Int64MulOp = True commutableOp Word64AddOp = True commutableOp Word64MulOp = True commutableOp Word64AndOp = True commutableOp Word64OrOp = True commutableOp Word64XorOp = True commutableOp IntAddOp = True commutableOp IntMulOp = True commutableOp IntMulMayOfloOp = True commutableOp IntAndOp = True commutableOp IntOrOp = True commutableOp IntXorOp = True commutableOp IntAddCOp = True commutableOp IntEqOp = True commutableOp IntNeOp = True commutableOp WordAddOp = True commutableOp WordAddCOp = True commutableOp WordAdd2Op = True commutableOp WordMulOp = True commutableOp WordMul2Op = True commutableOp WordAndOp = True commutableOp WordOrOp = True commutableOp WordXorOp = True commutableOp DoubleEqOp = True commutableOp DoubleNeOp = True commutableOp DoubleMinOp = True commutableOp DoubleMaxOp = True commutableOp DoubleAddOp = True commutableOp DoubleMulOp = True commutableOp FloatEqOp = True commutableOp FloatNeOp = True commutableOp FloatMinOp = True commutableOp FloatMaxOp = True commutableOp FloatAddOp = True commutableOp FloatMulOp = True commutableOp (VecAddOp _ _ _) = True commutableOp (VecMulOp _ _ _) = True commutableOp _thisOp = False ghc-lib-parser-9.12.2.20250421/ghc-lib/stage0/compiler/build/primop-data-decl.hs-incl0000644000000000000000000004145507346545000025506 0ustar0000000000000000data PrimOp = CharGtOp | CharGeOp | CharEqOp | CharNeOp | CharLtOp | CharLeOp | OrdOp | Int8ToIntOp | IntToInt8Op | Int8NegOp | Int8AddOp | Int8SubOp | Int8MulOp | Int8QuotOp | Int8RemOp | Int8QuotRemOp | Int8SllOp | Int8SraOp | Int8SrlOp | Int8ToWord8Op | Int8EqOp | Int8GeOp | Int8GtOp | Int8LeOp | Int8LtOp | Int8NeOp | Word8ToWordOp | WordToWord8Op | Word8AddOp | Word8SubOp | Word8MulOp | Word8QuotOp | Word8RemOp | Word8QuotRemOp | Word8AndOp | Word8OrOp | Word8XorOp | Word8NotOp | Word8SllOp | Word8SrlOp | Word8ToInt8Op | Word8EqOp | Word8GeOp | Word8GtOp | Word8LeOp | Word8LtOp | Word8NeOp | Int16ToIntOp | IntToInt16Op | Int16NegOp | Int16AddOp | Int16SubOp | Int16MulOp | Int16QuotOp | Int16RemOp | Int16QuotRemOp | Int16SllOp | Int16SraOp | Int16SrlOp | Int16ToWord16Op | Int16EqOp | Int16GeOp | Int16GtOp | Int16LeOp | Int16LtOp | Int16NeOp | Word16ToWordOp | WordToWord16Op | Word16AddOp | Word16SubOp | Word16MulOp | Word16QuotOp | Word16RemOp | Word16QuotRemOp | Word16AndOp | Word16OrOp | Word16XorOp | Word16NotOp | Word16SllOp | Word16SrlOp | Word16ToInt16Op | Word16EqOp | Word16GeOp | Word16GtOp | Word16LeOp | Word16LtOp | Word16NeOp | Int32ToIntOp | IntToInt32Op | Int32NegOp | Int32AddOp | Int32SubOp | Int32MulOp | Int32QuotOp | Int32RemOp | Int32QuotRemOp | Int32SllOp | Int32SraOp | Int32SrlOp | Int32ToWord32Op | Int32EqOp | Int32GeOp | Int32GtOp | Int32LeOp | Int32LtOp | Int32NeOp | Word32ToWordOp | WordToWord32Op | Word32AddOp | Word32SubOp | Word32MulOp | Word32QuotOp | Word32RemOp | Word32QuotRemOp | Word32AndOp | Word32OrOp | Word32XorOp | Word32NotOp | Word32SllOp | Word32SrlOp | Word32ToInt32Op | Word32EqOp | Word32GeOp | Word32GtOp | Word32LeOp | Word32LtOp | Word32NeOp | Int64ToIntOp | IntToInt64Op | Int64NegOp | Int64AddOp | Int64SubOp | Int64MulOp | Int64QuotOp | Int64RemOp | Int64SllOp | Int64SraOp | Int64SrlOp | Int64ToWord64Op | Int64EqOp | Int64GeOp | Int64GtOp | Int64LeOp | Int64LtOp | Int64NeOp | Word64ToWordOp | WordToWord64Op | Word64AddOp | Word64SubOp | Word64MulOp | Word64QuotOp | Word64RemOp | Word64AndOp | Word64OrOp | Word64XorOp | Word64NotOp | Word64SllOp | Word64SrlOp | Word64ToInt64Op | Word64EqOp | Word64GeOp | Word64GtOp | Word64LeOp | Word64LtOp | Word64NeOp | IntAddOp | IntSubOp | IntMulOp | IntMul2Op | IntMulMayOfloOp | IntQuotOp | IntRemOp | IntQuotRemOp | IntAndOp | IntOrOp | IntXorOp | IntNotOp | IntNegOp | IntAddCOp | IntSubCOp | IntGtOp | IntGeOp | IntEqOp | IntNeOp | IntLtOp | IntLeOp | ChrOp | IntToWordOp | IntToFloatOp | IntToDoubleOp | WordToFloatOp | WordToDoubleOp | IntSllOp | IntSraOp | IntSrlOp | WordAddOp | WordAddCOp | WordSubCOp | WordAdd2Op | WordSubOp | WordMulOp | WordMul2Op | WordQuotOp | WordRemOp | WordQuotRemOp | WordQuotRem2Op | WordAndOp | WordOrOp | WordXorOp | WordNotOp | WordSllOp | WordSrlOp | WordToIntOp | WordGtOp | WordGeOp | WordEqOp | WordNeOp | WordLtOp | WordLeOp | PopCnt8Op | PopCnt16Op | PopCnt32Op | PopCnt64Op | PopCntOp | Pdep8Op | Pdep16Op | Pdep32Op | Pdep64Op | PdepOp | Pext8Op | Pext16Op | Pext32Op | Pext64Op | PextOp | Clz8Op | Clz16Op | Clz32Op | Clz64Op | ClzOp | Ctz8Op | Ctz16Op | Ctz32Op | Ctz64Op | CtzOp | BSwap16Op | BSwap32Op | BSwap64Op | BSwapOp | BRev8Op | BRev16Op | BRev32Op | BRev64Op | BRevOp | Narrow8IntOp | Narrow16IntOp | Narrow32IntOp | Narrow8WordOp | Narrow16WordOp | Narrow32WordOp | DoubleGtOp | DoubleGeOp | DoubleEqOp | DoubleNeOp | DoubleLtOp | DoubleLeOp | DoubleMinOp | DoubleMaxOp | DoubleAddOp | DoubleSubOp | DoubleMulOp | DoubleDivOp | DoubleNegOp | DoubleFabsOp | DoubleToIntOp | DoubleToFloatOp | DoubleExpOp | DoubleExpM1Op | DoubleLogOp | DoubleLog1POp | DoubleSqrtOp | DoubleSinOp | DoubleCosOp | DoubleTanOp | DoubleAsinOp | DoubleAcosOp | DoubleAtanOp | DoubleSinhOp | DoubleCoshOp | DoubleTanhOp | DoubleAsinhOp | DoubleAcoshOp | DoubleAtanhOp | DoublePowerOp | DoubleDecode_2IntOp | DoubleDecode_Int64Op | CastDoubleToWord64Op | CastWord64ToDoubleOp | FloatGtOp | FloatGeOp | FloatEqOp | FloatNeOp | FloatLtOp | FloatLeOp | FloatMinOp | FloatMaxOp | FloatAddOp | FloatSubOp | FloatMulOp | FloatDivOp | FloatNegOp | FloatFabsOp | FloatToIntOp | FloatExpOp | FloatExpM1Op | FloatLogOp | FloatLog1POp | FloatSqrtOp | FloatSinOp | FloatCosOp | FloatTanOp | FloatAsinOp | FloatAcosOp | FloatAtanOp | FloatSinhOp | FloatCoshOp | FloatTanhOp | FloatAsinhOp | FloatAcoshOp | FloatAtanhOp | FloatPowerOp | FloatToDoubleOp | FloatDecode_IntOp | CastFloatToWord32Op | CastWord32ToFloatOp | FloatFMAdd | FloatFMSub | FloatFNMAdd | FloatFNMSub | DoubleFMAdd | DoubleFMSub | DoubleFNMAdd | DoubleFNMSub | NewArrayOp | ReadArrayOp | WriteArrayOp | SizeofArrayOp | SizeofMutableArrayOp | IndexArrayOp | UnsafeFreezeArrayOp | UnsafeThawArrayOp | CopyArrayOp | CopyMutableArrayOp | CloneArrayOp | CloneMutableArrayOp | FreezeArrayOp | ThawArrayOp | CasArrayOp | NewSmallArrayOp | ShrinkSmallMutableArrayOp_Char | ReadSmallArrayOp | WriteSmallArrayOp | SizeofSmallArrayOp | SizeofSmallMutableArrayOp | GetSizeofSmallMutableArrayOp | IndexSmallArrayOp | UnsafeFreezeSmallArrayOp | UnsafeThawSmallArrayOp | CopySmallArrayOp | CopySmallMutableArrayOp | CloneSmallArrayOp | CloneSmallMutableArrayOp | FreezeSmallArrayOp | ThawSmallArrayOp | CasSmallArrayOp | NewByteArrayOp_Char | NewPinnedByteArrayOp_Char | NewAlignedPinnedByteArrayOp_Char | MutableByteArrayIsPinnedOp | ByteArrayIsPinnedOp | ByteArrayIsWeaklyPinnedOp | MutableByteArrayIsWeaklyPinnedOp | ByteArrayContents_Char | MutableByteArrayContents_Char | ShrinkMutableByteArrayOp_Char | ResizeMutableByteArrayOp_Char | UnsafeFreezeByteArrayOp | UnsafeThawByteArrayOp | SizeofByteArrayOp | SizeofMutableByteArrayOp | GetSizeofMutableByteArrayOp | IndexByteArrayOp_Char | IndexByteArrayOp_WideChar | IndexByteArrayOp_Int | IndexByteArrayOp_Word | IndexByteArrayOp_Addr | IndexByteArrayOp_Float | IndexByteArrayOp_Double | IndexByteArrayOp_StablePtr | IndexByteArrayOp_Int8 | IndexByteArrayOp_Word8 | IndexByteArrayOp_Int16 | IndexByteArrayOp_Word16 | IndexByteArrayOp_Int32 | IndexByteArrayOp_Word32 | IndexByteArrayOp_Int64 | IndexByteArrayOp_Word64 | IndexByteArrayOp_Word8AsChar | IndexByteArrayOp_Word8AsWideChar | IndexByteArrayOp_Word8AsInt | IndexByteArrayOp_Word8AsWord | IndexByteArrayOp_Word8AsAddr | IndexByteArrayOp_Word8AsFloat | IndexByteArrayOp_Word8AsDouble | IndexByteArrayOp_Word8AsStablePtr | IndexByteArrayOp_Word8AsInt16 | IndexByteArrayOp_Word8AsWord16 | IndexByteArrayOp_Word8AsInt32 | IndexByteArrayOp_Word8AsWord32 | IndexByteArrayOp_Word8AsInt64 | IndexByteArrayOp_Word8AsWord64 | ReadByteArrayOp_Char | ReadByteArrayOp_WideChar | ReadByteArrayOp_Int | ReadByteArrayOp_Word | ReadByteArrayOp_Addr | ReadByteArrayOp_Float | ReadByteArrayOp_Double | ReadByteArrayOp_StablePtr | ReadByteArrayOp_Int8 | ReadByteArrayOp_Word8 | ReadByteArrayOp_Int16 | ReadByteArrayOp_Word16 | ReadByteArrayOp_Int32 | ReadByteArrayOp_Word32 | ReadByteArrayOp_Int64 | ReadByteArrayOp_Word64 | ReadByteArrayOp_Word8AsChar | ReadByteArrayOp_Word8AsWideChar | ReadByteArrayOp_Word8AsInt | ReadByteArrayOp_Word8AsWord | ReadByteArrayOp_Word8AsAddr | ReadByteArrayOp_Word8AsFloat | ReadByteArrayOp_Word8AsDouble | ReadByteArrayOp_Word8AsStablePtr | ReadByteArrayOp_Word8AsInt16 | ReadByteArrayOp_Word8AsWord16 | ReadByteArrayOp_Word8AsInt32 | ReadByteArrayOp_Word8AsWord32 | ReadByteArrayOp_Word8AsInt64 | ReadByteArrayOp_Word8AsWord64 | WriteByteArrayOp_Char | WriteByteArrayOp_WideChar | WriteByteArrayOp_Int | WriteByteArrayOp_Word | WriteByteArrayOp_Addr | WriteByteArrayOp_Float | WriteByteArrayOp_Double | WriteByteArrayOp_StablePtr | WriteByteArrayOp_Int8 | WriteByteArrayOp_Word8 | WriteByteArrayOp_Int16 | WriteByteArrayOp_Word16 | WriteByteArrayOp_Int32 | WriteByteArrayOp_Word32 | WriteByteArrayOp_Int64 | WriteByteArrayOp_Word64 | WriteByteArrayOp_Word8AsChar | WriteByteArrayOp_Word8AsWideChar | WriteByteArrayOp_Word8AsInt | WriteByteArrayOp_Word8AsWord | WriteByteArrayOp_Word8AsAddr | WriteByteArrayOp_Word8AsFloat | WriteByteArrayOp_Word8AsDouble | WriteByteArrayOp_Word8AsStablePtr | WriteByteArrayOp_Word8AsInt16 | WriteByteArrayOp_Word8AsWord16 | WriteByteArrayOp_Word8AsInt32 | WriteByteArrayOp_Word8AsWord32 | WriteByteArrayOp_Word8AsInt64 | WriteByteArrayOp_Word8AsWord64 | CompareByteArraysOp | CopyByteArrayOp | CopyMutableByteArrayOp | CopyMutableByteArrayNonOverlappingOp | CopyByteArrayToAddrOp | CopyMutableByteArrayToAddrOp | CopyAddrToByteArrayOp | CopyAddrToAddrOp | CopyAddrToAddrNonOverlappingOp | SetByteArrayOp | SetAddrRangeOp | AtomicReadByteArrayOp_Int | AtomicWriteByteArrayOp_Int | CasByteArrayOp_Int | CasByteArrayOp_Int8 | CasByteArrayOp_Int16 | CasByteArrayOp_Int32 | CasByteArrayOp_Int64 | FetchAddByteArrayOp_Int | FetchSubByteArrayOp_Int | FetchAndByteArrayOp_Int | FetchNandByteArrayOp_Int | FetchOrByteArrayOp_Int | FetchXorByteArrayOp_Int | AddrAddOp | AddrSubOp | AddrRemOp | AddrToIntOp | IntToAddrOp | AddrGtOp | AddrGeOp | AddrEqOp | AddrNeOp | AddrLtOp | AddrLeOp | IndexOffAddrOp_Char | IndexOffAddrOp_WideChar | IndexOffAddrOp_Int | IndexOffAddrOp_Word | IndexOffAddrOp_Addr | IndexOffAddrOp_Float | IndexOffAddrOp_Double | IndexOffAddrOp_StablePtr | IndexOffAddrOp_Int8 | IndexOffAddrOp_Word8 | IndexOffAddrOp_Int16 | IndexOffAddrOp_Word16 | IndexOffAddrOp_Int32 | IndexOffAddrOp_Word32 | IndexOffAddrOp_Int64 | IndexOffAddrOp_Word64 | IndexOffAddrOp_Word8AsChar | IndexOffAddrOp_Word8AsWideChar | IndexOffAddrOp_Word8AsInt | IndexOffAddrOp_Word8AsWord | IndexOffAddrOp_Word8AsAddr | IndexOffAddrOp_Word8AsFloat | IndexOffAddrOp_Word8AsDouble | IndexOffAddrOp_Word8AsStablePtr | IndexOffAddrOp_Word8AsInt16 | IndexOffAddrOp_Word8AsWord16 | IndexOffAddrOp_Word8AsInt32 | IndexOffAddrOp_Word8AsWord32 | IndexOffAddrOp_Word8AsInt64 | IndexOffAddrOp_Word8AsWord64 | ReadOffAddrOp_Char | ReadOffAddrOp_WideChar | ReadOffAddrOp_Int | ReadOffAddrOp_Word | ReadOffAddrOp_Addr | ReadOffAddrOp_Float | ReadOffAddrOp_Double | ReadOffAddrOp_StablePtr | ReadOffAddrOp_Int8 | ReadOffAddrOp_Word8 | ReadOffAddrOp_Int16 | ReadOffAddrOp_Word16 | ReadOffAddrOp_Int32 | ReadOffAddrOp_Word32 | ReadOffAddrOp_Int64 | ReadOffAddrOp_Word64 | ReadOffAddrOp_Word8AsChar | ReadOffAddrOp_Word8AsWideChar | ReadOffAddrOp_Word8AsInt | ReadOffAddrOp_Word8AsWord | ReadOffAddrOp_Word8AsAddr | ReadOffAddrOp_Word8AsFloat | ReadOffAddrOp_Word8AsDouble | ReadOffAddrOp_Word8AsStablePtr | ReadOffAddrOp_Word8AsInt16 | ReadOffAddrOp_Word8AsWord16 | ReadOffAddrOp_Word8AsInt32 | ReadOffAddrOp_Word8AsWord32 | ReadOffAddrOp_Word8AsInt64 | ReadOffAddrOp_Word8AsWord64 | WriteOffAddrOp_Char | WriteOffAddrOp_WideChar | WriteOffAddrOp_Int | WriteOffAddrOp_Word | WriteOffAddrOp_Addr | WriteOffAddrOp_Float | WriteOffAddrOp_Double | WriteOffAddrOp_StablePtr | WriteOffAddrOp_Int8 | WriteOffAddrOp_Word8 | WriteOffAddrOp_Int16 | WriteOffAddrOp_Word16 | WriteOffAddrOp_Int32 | WriteOffAddrOp_Word32 | WriteOffAddrOp_Int64 | WriteOffAddrOp_Word64 | WriteOffAddrOp_Word8AsChar | WriteOffAddrOp_Word8AsWideChar | WriteOffAddrOp_Word8AsInt | WriteOffAddrOp_Word8AsWord | WriteOffAddrOp_Word8AsAddr | WriteOffAddrOp_Word8AsFloat | WriteOffAddrOp_Word8AsDouble | WriteOffAddrOp_Word8AsStablePtr | WriteOffAddrOp_Word8AsInt16 | WriteOffAddrOp_Word8AsWord16 | WriteOffAddrOp_Word8AsInt32 | WriteOffAddrOp_Word8AsWord32 | WriteOffAddrOp_Word8AsInt64 | WriteOffAddrOp_Word8AsWord64 | InterlockedExchange_Addr | InterlockedExchange_Word | CasAddrOp_Addr | CasAddrOp_Word | CasAddrOp_Word8 | CasAddrOp_Word16 | CasAddrOp_Word32 | CasAddrOp_Word64 | FetchAddAddrOp_Word | FetchSubAddrOp_Word | FetchAndAddrOp_Word | FetchNandAddrOp_Word | FetchOrAddrOp_Word | FetchXorAddrOp_Word | AtomicReadAddrOp_Word | AtomicWriteAddrOp_Word | NewMutVarOp | ReadMutVarOp | WriteMutVarOp | AtomicSwapMutVarOp | AtomicModifyMutVar2Op | AtomicModifyMutVar_Op | CasMutVarOp | CatchOp | RaiseOp | RaiseUnderflowOp | RaiseOverflowOp | RaiseDivZeroOp | RaiseIOOp | MaskAsyncExceptionsOp | MaskUninterruptibleOp | UnmaskAsyncExceptionsOp | MaskStatus | NewPromptTagOp | PromptOp | Control0Op | AtomicallyOp | RetryOp | CatchRetryOp | CatchSTMOp | NewTVarOp | ReadTVarOp | ReadTVarIOOp | WriteTVarOp | NewMVarOp | TakeMVarOp | TryTakeMVarOp | PutMVarOp | TryPutMVarOp | ReadMVarOp | TryReadMVarOp | IsEmptyMVarOp | NewIOPortOp | ReadIOPortOp | WriteIOPortOp | DelayOp | WaitReadOp | WaitWriteOp | ForkOp | ForkOnOp | KillThreadOp | YieldOp | MyThreadIdOp | LabelThreadOp | IsCurrentThreadBoundOp | NoDuplicateOp | GetThreadLabelOp | ThreadStatusOp | ListThreadsOp | MkWeakOp | MkWeakNoFinalizerOp | AddCFinalizerToWeakOp | DeRefWeakOp | FinalizeWeakOp | TouchOp | MakeStablePtrOp | DeRefStablePtrOp | EqStablePtrOp | MakeStableNameOp | StableNameToIntOp | CompactNewOp | CompactResizeOp | CompactContainsOp | CompactContainsAnyOp | CompactGetFirstBlockOp | CompactGetNextBlockOp | CompactAllocateBlockOp | CompactFixupPointersOp | CompactAdd | CompactAddWithSharing | CompactSize | ReallyUnsafePtrEqualityOp | ParOp | SparkOp | GetSparkOp | NumSparks | KeepAliveOp | DataToTagSmallOp | DataToTagLargeOp | TagToEnumOp | AddrToAnyOp | AnyToAddrOp | MkApUpd0_Op | NewBCOOp | UnpackClosureOp | ClosureSizeOp | GetApStackValOp | GetCCSOfOp | GetCurrentCCSOp | ClearCCSOp | WhereFromOp | TraceEventOp | TraceEventBinaryOp | TraceMarkerOp | SetThreadAllocationCounter | VecBroadcastOp PrimOpVecCat Length Width | VecPackOp PrimOpVecCat Length Width | VecUnpackOp PrimOpVecCat Length Width | VecInsertOp PrimOpVecCat Length Width | VecAddOp PrimOpVecCat Length Width | VecSubOp PrimOpVecCat Length Width | VecMulOp PrimOpVecCat Length Width | VecDivOp PrimOpVecCat Length Width | VecQuotOp PrimOpVecCat Length Width | VecRemOp PrimOpVecCat Length Width | VecNegOp PrimOpVecCat Length Width | VecIndexByteArrayOp PrimOpVecCat Length Width | VecReadByteArrayOp PrimOpVecCat Length Width | VecWriteByteArrayOp PrimOpVecCat Length Width | VecIndexOffAddrOp PrimOpVecCat Length Width | VecReadOffAddrOp PrimOpVecCat Length Width | VecWriteOffAddrOp PrimOpVecCat Length Width | VecIndexScalarByteArrayOp PrimOpVecCat Length Width | VecReadScalarByteArrayOp PrimOpVecCat Length Width | VecWriteScalarByteArrayOp PrimOpVecCat Length Width | VecIndexScalarOffAddrOp PrimOpVecCat Length Width | VecReadScalarOffAddrOp PrimOpVecCat Length Width | VecWriteScalarOffAddrOp PrimOpVecCat Length Width | VecFMAdd PrimOpVecCat Length Width | VecFMSub PrimOpVecCat Length Width | VecFNMAdd PrimOpVecCat Length Width | VecFNMSub PrimOpVecCat Length Width | VecShuffleOp PrimOpVecCat Length Width | VecMinOp PrimOpVecCat Length Width | VecMaxOp PrimOpVecCat Length Width | PrefetchByteArrayOp3 | PrefetchMutableByteArrayOp3 | PrefetchAddrOp3 | PrefetchValueOp3 | PrefetchByteArrayOp2 | PrefetchMutableByteArrayOp2 | PrefetchAddrOp2 | PrefetchValueOp2 | PrefetchByteArrayOp1 | PrefetchMutableByteArrayOp1 | PrefetchAddrOp1 | PrefetchValueOp1 | PrefetchByteArrayOp0 | PrefetchMutableByteArrayOp0 | PrefetchAddrOp0 | PrefetchValueOp0 ghc-lib-parser-9.12.2.20250421/ghc-lib/stage0/compiler/build/primop-deprecations.hs-incl0000644000000000000000000000071707346545000026344 0ustar0000000000000000primOpDeprecations = [ (mkVarOcc "sizeofSmallMutableArray#", fsLit " Use 'getSizeofSmallMutableArray#' instead ") , (mkVarOcc "sizeofMutableByteArray#", fsLit " Use 'getSizeofMutableByteArray#' instead ") , (mkVarOcc "dataToTagSmall#", fsLit " Use dataToTag# from \\\"GHC.Magic\\\" instead. ") , (mkVarOcc "dataToTagLarge#", fsLit " Use dataToTag# from \\\"GHC.Magic\\\" instead. ") , (mkVarOcc "void#", fsLit " Use an unboxed unit tuple instead ") ] ghc-lib-parser-9.12.2.20250421/ghc-lib/stage0/compiler/build/primop-docs.hs-incl0000644000000000000000000044103707346545000024620 0ustar0000000000000000primOpDocs = [ (fsLit "*#","Low word of signed integer multiply.") , (fsLit "timesInt2#","Return a triple (isHighNeeded,high,low) where high and low are respectively\n the high and low bits of the double-word result. isHighNeeded is a cheap way\n to test if the high word is a sign-extension of the low word (isHighNeeded =\n 0#) or not (isHighNeeded = 1#).") , (fsLit "mulIntMayOflo#","Return non-zero if there is any possibility that the upper word of a\n signed integer multiply might contain useful information. Return\n zero only if you are completely sure that no overflow can occur.\n On a 32-bit platform, the recommended implementation is to do a\n 32 x 32 -> 64 signed multiply, and subtract result[63:32] from\n (result[31] >>signed 31). If this is zero, meaning that the\n upper word is merely a sign extension of the lower one, no\n overflow can occur.\n\n On a 64-bit platform it is not always possible to\n acquire the top 64 bits of the result. Therefore, a recommended\n implementation is to take the absolute value of both operands, and\n return 0 iff bits[63:31] of them are zero, since that means that their\n magnitudes fit within 31 bits, so the magnitude of the product must fit\n into 62 bits.\n\n If in doubt, return non-zero, but do make an effort to create the\n correct answer for small args, since otherwise the performance of\n @(*) :: Integer -> Integer -> Integer@ will be poor.\n ") , (fsLit "quotInt#","Rounds towards zero. The behavior is undefined if the second argument is\n zero.\n ") , (fsLit "remInt#","Satisfies @('quotInt#' x y) '*#' y '+#' ('remInt#' x y) == x@. The\n behavior is undefined if the second argument is zero.\n ") , (fsLit "quotRemInt#","Rounds towards zero.") , (fsLit "andI#","Bitwise \"and\".") , (fsLit "orI#","Bitwise \"or\".") , (fsLit "xorI#","Bitwise \"xor\".") , (fsLit "notI#","Bitwise \"not\", also known as the binary complement.") , (fsLit "negateInt#","Unary negation.\n Since the negative 'Int#' range extends one further than the\n positive range, 'negateInt#' of the most negative number is an\n identity operation. This way, 'negateInt#' is always its own inverse.") , (fsLit "addIntC#","Add signed integers reporting overflow.\n First member of result is the sum truncated to an 'Int#';\n second member is zero if the true sum fits in an 'Int#',\n nonzero if overflow occurred (the sum is either too large\n or too small to fit in an 'Int#').") , (fsLit "subIntC#","Subtract signed integers reporting overflow.\n First member of result is the difference truncated to an 'Int#';\n second member is zero if the true difference fits in an 'Int#',\n nonzero if overflow occurred (the difference is either too large\n or too small to fit in an 'Int#').") , (fsLit "int2Float#","Convert an 'Int#' to the corresponding 'Float#' with the same\n integral value (up to truncation due to floating-point precision). e.g.\n @'int2Float#' 1# == 1.0#@") , (fsLit "int2Double#","Convert an 'Int#' to the corresponding 'Double#' with the same\n integral value (up to truncation due to floating-point precision). e.g.\n @'int2Double#' 1# == 1.0##@") , (fsLit "word2Float#","Convert an 'Word#' to the corresponding 'Float#' with the same\n integral value (up to truncation due to floating-point precision). e.g.\n @'word2Float#' 1## == 1.0#@") , (fsLit "word2Double#","Convert an 'Word#' to the corresponding 'Double#' with the same\n integral value (up to truncation due to floating-point precision). e.g.\n @'word2Double#' 1## == 1.0##@") , (fsLit "uncheckedIShiftL#","Shift left. Result undefined if shift amount is not\n in the range 0 to word size - 1 inclusive.") , (fsLit "uncheckedIShiftRA#","Shift right arithmetic. Result undefined if shift amount is not\n in the range 0 to word size - 1 inclusive.") , (fsLit "uncheckedIShiftRL#","Shift right logical. Result undefined if shift amount is not\n in the range 0 to word size - 1 inclusive.") , (fsLit "addWordC#","Add unsigned integers reporting overflow.\n The first element of the pair is the result. The second element is\n the carry flag, which is nonzero on overflow. See also 'plusWord2#'.") , (fsLit "subWordC#","Subtract unsigned integers reporting overflow.\n The first element of the pair is the result. The second element is\n the carry flag, which is nonzero on overflow.") , (fsLit "plusWord2#","Add unsigned integers, with the high part (carry) in the first\n component of the returned pair and the low part in the second\n component of the pair. See also 'addWordC#'.") , (fsLit "quotRemWord2#"," Takes high word of dividend, then low word of dividend, then divisor.\n Requires that high word < divisor.") , (fsLit "uncheckedShiftL#","Shift left logical. Result undefined if shift amount is not\n in the range 0 to word size - 1 inclusive.") , (fsLit "uncheckedShiftRL#","Shift right logical. Result undefined if shift amount is not\n in the range 0 to word size - 1 inclusive.") , (fsLit "popCnt8#","Count the number of set bits in the lower 8 bits of a word.") , (fsLit "popCnt16#","Count the number of set bits in the lower 16 bits of a word.") , (fsLit "popCnt32#","Count the number of set bits in the lower 32 bits of a word.") , (fsLit "popCnt64#","Count the number of set bits in a 64-bit word.") , (fsLit "popCnt#","Count the number of set bits in a word.") , (fsLit "pdep8#","Deposit bits to lower 8 bits of a word at locations specified by a mask.\n\n @since 0.5.2.0") , (fsLit "pdep16#","Deposit bits to lower 16 bits of a word at locations specified by a mask.\n\n @since 0.5.2.0") , (fsLit "pdep32#","Deposit bits to lower 32 bits of a word at locations specified by a mask.\n\n @since 0.5.2.0") , (fsLit "pdep64#","Deposit bits to a word at locations specified by a mask.\n\n @since 0.5.2.0") , (fsLit "pdep#","Deposit bits to a word at locations specified by a mask, aka\n [parallel bit deposit](https://en.wikipedia.org/wiki/Bit_Manipulation_Instruction_Sets#Parallel_bit_deposit_and_extract).\n\n Software emulation:\n\n > pdep :: Word -> Word -> Word\n > pdep src mask = go 0 src mask\n > where\n > go :: Word -> Word -> Word -> Word\n > go result _ 0 = result\n > go result src mask = go newResult newSrc newMask\n > where\n > maskCtz = countTrailingZeros mask\n > newResult = if testBit src 0 then setBit result maskCtz else result\n > newSrc = src `shiftR` 1\n > newMask = clearBit mask maskCtz\n\n @since 0.5.2.0") , (fsLit "pext8#","Extract bits from lower 8 bits of a word at locations specified by a mask.\n\n @since 0.5.2.0") , (fsLit "pext16#","Extract bits from lower 16 bits of a word at locations specified by a mask.\n\n @since 0.5.2.0") , (fsLit "pext32#","Extract bits from lower 32 bits of a word at locations specified by a mask.\n\n @since 0.5.2.0") , (fsLit "pext64#","Extract bits from a word at locations specified by a mask.\n\n @since 0.5.2.0") , (fsLit "pext#","Extract bits from a word at locations specified by a mask, aka\n [parallel bit extract](https://en.wikipedia.org/wiki/Bit_Manipulation_Instruction_Sets#Parallel_bit_deposit_and_extract).\n\n Software emulation:\n\n > pext :: Word -> Word -> Word\n > pext src mask = loop 0 0 0\n > where\n > loop i count result\n > | i >= finiteBitSize (0 :: Word)\n > = result\n > | testBit mask i\n > = loop (i + 1) (count + 1) (if testBit src i then setBit result count else result)\n > | otherwise\n > = loop (i + 1) count result\n\n @since 0.5.2.0") , (fsLit "clz8#","Count leading zeros in the lower 8 bits of a word.") , (fsLit "clz16#","Count leading zeros in the lower 16 bits of a word.") , (fsLit "clz32#","Count leading zeros in the lower 32 bits of a word.") , (fsLit "clz64#","Count leading zeros in a 64-bit word.") , (fsLit "clz#","Count leading zeros in a word.") , (fsLit "ctz8#","Count trailing zeros in the lower 8 bits of a word.") , (fsLit "ctz16#","Count trailing zeros in the lower 16 bits of a word.") , (fsLit "ctz32#","Count trailing zeros in the lower 32 bits of a word.") , (fsLit "ctz64#","Count trailing zeros in a 64-bit word.") , (fsLit "ctz#","Count trailing zeros in a word.") , (fsLit "byteSwap16#","Swap bytes in the lower 16 bits of a word. The higher bytes are undefined. ") , (fsLit "byteSwap32#","Swap bytes in the lower 32 bits of a word. The higher bytes are undefined. ") , (fsLit "byteSwap64#","Swap bytes in a 64 bits of a word.") , (fsLit "byteSwap#","Swap bytes in a word.") , (fsLit "bitReverse8#","Reverse the order of the bits in a 8-bit word.") , (fsLit "bitReverse16#","Reverse the order of the bits in a 16-bit word.") , (fsLit "bitReverse32#","Reverse the order of the bits in a 32-bit word.") , (fsLit "bitReverse64#","Reverse the order of the bits in a 64-bit word.") , (fsLit "bitReverse#","Reverse the order of the bits in a word.") , (fsLit "double2Int#","Truncates a 'Double#' value to the nearest 'Int#'.\n Results are undefined if the truncation if truncation yields\n a value outside the range of 'Int#'.") , (fsLit "**##","Exponentiation.") , (fsLit "decodeDouble_2Int#","Convert to integer.\n First component of the result is -1 or 1, indicating the sign of the\n mantissa. The next two are the high and low 32 bits of the mantissa\n respectively, and the last is the exponent.") , (fsLit "decodeDouble_Int64#","Decode 'Double#' into mantissa and base-2 exponent.") , (fsLit "castDoubleToWord64#","Bitcast a 'Double#' into a 'Word64#'") , (fsLit "castWord64ToDouble#","Bitcast a 'Word64#' into a 'Double#'") , (fsLit "float2Int#","Truncates a 'Float#' value to the nearest 'Int#'.\n Results are undefined if the truncation if truncation yields\n a value outside the range of 'Int#'.") , (fsLit "decodeFloat_Int#","Convert to integers.\n First 'Int#' in result is the mantissa; second is the exponent.") , (fsLit "castFloatToWord32#","Bitcast a 'Float#' into a 'Word32#'") , (fsLit "castWord32ToFloat#","Bitcast a 'Word32#' into a 'Float#'") , (fsLit "fmaddFloat#","Fused multiply-add operation @x*y+z@. See \"GHC.Prim#fma\".") , (fsLit "fmsubFloat#","Fused multiply-subtract operation @x*y-z@. See \"GHC.Prim#fma\".") , (fsLit "fnmaddFloat#","Fused negate-multiply-add operation @-x*y+z@. See \"GHC.Prim#fma\".") , (fsLit "fnmsubFloat#","Fused negate-multiply-subtract operation @-x*y-z@. See \"GHC.Prim#fma\".") , (fsLit "fmaddDouble#","Fused multiply-add operation @x*y+z@. See \"GHC.Prim#fma\".") , (fsLit "fmsubDouble#","Fused multiply-subtract operation @x*y-z@. See \"GHC.Prim#fma\".") , (fsLit "fnmaddDouble#","Fused negate-multiply-add operation @-x*y+z@. See \"GHC.Prim#fma\".") , (fsLit "fnmsubDouble#","Fused negate-multiply-subtract operation @-x*y-z@. See \"GHC.Prim#fma\".") , (fsLit "newArray#","Create a new mutable array with the specified number of elements,\n in the specified state thread,\n with each element containing the specified initial value.") , (fsLit "readArray#","Read from specified index of mutable array. Result is not yet evaluated.") , (fsLit "writeArray#","Write to specified index of mutable array.") , (fsLit "sizeofArray#","Return the number of elements in the array.") , (fsLit "sizeofMutableArray#","Return the number of elements in the array.") , (fsLit "indexArray#","Read from the specified index of an immutable array. The result is packaged\n into an unboxed unary tuple; the result itself is not yet\n evaluated. Pattern matching on the tuple forces the indexing of the\n array to happen but does not evaluate the element itself. Evaluating\n the thunk prevents additional thunks from building up on the\n heap. Avoiding these thunks, in turn, reduces references to the\n argument array, allowing it to be garbage collected more promptly.") , (fsLit "unsafeFreezeArray#","Make a mutable array immutable, without copying.") , (fsLit "unsafeThawArray#","Make an immutable array mutable, without copying.") , (fsLit "copyArray#","Given a source array, an offset into the source array, a\n destination array, an offset into the destination array, and a\n number of elements to copy, copy the elements from the source array\n to the destination array. Both arrays must fully contain the\n specified ranges, but this is not checked. The two arrays must not\n be the same array in different states, but this is not checked\n either.") , (fsLit "copyMutableArray#","Given a source array, an offset into the source array, a\n destination array, an offset into the destination array, and a\n number of elements to copy, copy the elements from the source array\n to the destination array. Both arrays must fully contain the\n specified ranges, but this is not checked. In the case where\n the source and destination are the same array the source and\n destination regions may overlap.") , (fsLit "cloneArray#","Given a source array, an offset into the source array, and a number\n of elements to copy, create a new array with the elements from the\n source array. The provided array must fully contain the specified\n range, but this is not checked.") , (fsLit "cloneMutableArray#","Given a source array, an offset into the source array, and a number\n of elements to copy, create a new array with the elements from the\n source array. The provided array must fully contain the specified\n range, but this is not checked.") , (fsLit "freezeArray#","Given a source array, an offset into the source array, and a number\n of elements to copy, create a new array with the elements from the\n source array. The provided array must fully contain the specified\n range, but this is not checked.") , (fsLit "thawArray#","Given a source array, an offset into the source array, and a number\n of elements to copy, create a new array with the elements from the\n source array. The provided array must fully contain the specified\n range, but this is not checked.") , (fsLit "casArray#","Given an array, an offset, the expected old value, and\n the new value, perform an atomic compare and swap (i.e. write the new\n value if the current value and the old value are the same pointer).\n Returns 0 if the swap succeeds and 1 if it fails. Additionally, returns\n the element at the offset after the operation completes. This means that\n on a success the new value is returned, and on a failure the actual old\n value (not the expected one) is returned. Implies a full memory barrier.\n The use of a pointer equality on a boxed value makes this function harder\n to use correctly than 'casIntArray#'. All of the difficulties\n of using 'reallyUnsafePtrEquality#' correctly apply to\n 'casArray#' as well.\n ") , (fsLit "newSmallArray#","Create a new mutable array with the specified number of elements,\n in the specified state thread,\n with each element containing the specified initial value.") , (fsLit "shrinkSmallMutableArray#","Shrink mutable array to new specified size, in\n the specified state thread. The new size argument must be less than or\n equal to the current size as reported by 'getSizeofSmallMutableArray#'.\n\n Assuming the non-profiling RTS, for the copying garbage collector\n (default) this primitive compiles to an O(1) operation in C--, modifying\n the array in-place. For the non-moving garbage collector, however, the\n time is proportional to the number of elements shrinked out. Backends\n bypassing C-- representation (such as JavaScript) might behave\n differently.\n\n @since 0.6.1") , (fsLit "readSmallArray#","Read from specified index of mutable array. Result is not yet evaluated.") , (fsLit "writeSmallArray#","Write to specified index of mutable array.") , (fsLit "sizeofSmallArray#","Return the number of elements in the array.") , (fsLit "sizeofSmallMutableArray#","Return the number of elements in the array. __Deprecated__, it is\n unsafe in the presence of 'shrinkSmallMutableArray#' and @resizeSmallMutableArray#@\n operations on the same small mutable array.") , (fsLit "getSizeofSmallMutableArray#","Return the number of elements in the array, correctly accounting for\n the effect of 'shrinkSmallMutableArray#' and @resizeSmallMutableArray#@.\n\n @since 0.6.1") , (fsLit "indexSmallArray#","Read from specified index of immutable array. Result is packaged into\n an unboxed singleton; the result itself is not yet evaluated.") , (fsLit "unsafeFreezeSmallArray#","Make a mutable array immutable, without copying.") , (fsLit "unsafeThawSmallArray#","Make an immutable array mutable, without copying.") , (fsLit "copySmallArray#","Given a source array, an offset into the source array, a\n destination array, an offset into the destination array, and a\n number of elements to copy, copy the elements from the source array\n to the destination array. Both arrays must fully contain the\n specified ranges, but this is not checked. The two arrays must not\n be the same array in different states, but this is not checked\n either.") , (fsLit "copySmallMutableArray#","Given a source array, an offset into the source array, a\n destination array, an offset into the destination array, and a\n number of elements to copy, copy the elements from the source array\n to the destination array. The source and destination arrays can\n refer to the same array. Both arrays must fully contain the\n specified ranges, but this is not checked.\n The regions are allowed to overlap, although this is only possible when the same\n array is provided as both the source and the destination. ") , (fsLit "cloneSmallArray#","Given a source array, an offset into the source array, and a number\n of elements to copy, create a new array with the elements from the\n source array. The provided array must fully contain the specified\n range, but this is not checked.") , (fsLit "cloneSmallMutableArray#","Given a source array, an offset into the source array, and a number\n of elements to copy, create a new array with the elements from the\n source array. The provided array must fully contain the specified\n range, but this is not checked.") , (fsLit "freezeSmallArray#","Given a source array, an offset into the source array, and a number\n of elements to copy, create a new array with the elements from the\n source array. The provided array must fully contain the specified\n range, but this is not checked.") , (fsLit "thawSmallArray#","Given a source array, an offset into the source array, and a number\n of elements to copy, create a new array with the elements from the\n source array. The provided array must fully contain the specified\n range, but this is not checked.") , (fsLit "casSmallArray#","Unsafe, machine-level atomic compare and swap on an element within an array.\n See the documentation of 'casArray#'.") , (fsLit "ByteArray#","\n A boxed, unlifted datatype representing a region of raw memory in the garbage-collected heap,\n which is not scanned for pointers during garbage collection.\n\n It is created by freezing a 'MutableByteArray#' with 'unsafeFreezeByteArray#'.\n Freezing is essentially a no-op, as 'MutableByteArray#' and 'ByteArray#' share the same heap structure under the hood.\n\n The immutable and mutable variants are commonly used for scenarios requiring high-performance data structures,\n like @Text@, @Primitive Vector@, @Unboxed Array@, and @ShortByteString@.\n\n Another application of fundamental importance is 'Integer', which is backed by 'ByteArray#'.\n\n The representation on the heap of a Byte Array is:\n\n > +------------+-----------------+-----------------------+\n > | | | |\n > | HEADER | SIZE (in bytes) | PAYLOAD |\n > | | | |\n > +------------+-----------------+-----------------------+\n\n To obtain a pointer to actual payload (e.g., for FFI purposes) use 'byteArrayContents#' or 'mutableByteArrayContents#'.\n\n Alternatively, enabling the @UnliftedFFITypes@ extension\n allows to mention 'ByteArray#' and 'MutableByteArray#' in FFI type signatures directly.\n") , (fsLit "MutableByteArray#"," A mutable 'ByteAray#'. It can be created in three ways:\n\n * 'newByteArray#': Create an unpinned array.\n * 'newPinnedByteArray#': This will create a pinned array,\n * 'newAlignedPinnedByteArray#': This will create a pinned array, with a custom alignment.\n\n Unpinned arrays can be moved around during garbage collection, so you must not store or pass pointers to these values\n if there is a chance for the garbage collector to kick in. That said, even unpinned arrays can be passed to unsafe FFI calls,\n because no garbage collection happens during these unsafe calls\n (see [Guaranteed Call Safety](https://ghc.gitlab.haskell.org/ghc/doc/users_guide/exts/ffi.html#guaranteed-call-safety)\n in the GHC Manual). For safe FFI calls, byte arrays must be not only pinned, but also kept alive by means of the keepAlive# function\n for the duration of a call (that's because garbage collection cannot move a pinned array, but is free to scrap it altogether).\n") , (fsLit "newByteArray#","Create a new mutable byte array of specified size (in bytes), in\n the specified state thread. The size of the memory underlying the\n array will be rounded up to the platform's word size.") , (fsLit "newPinnedByteArray#","Like 'newByteArray#' but GC guarantees not to move it.") , (fsLit "newAlignedPinnedByteArray#","Like 'newPinnedByteArray#' but allow specifying an arbitrary\n alignment, which must be a power of two.") , (fsLit "isMutableByteArrayPinned#","Determine whether a 'MutableByteArray#' is guaranteed not to move\n during GC.") , (fsLit "isByteArrayPinned#","Determine whether a 'ByteArray#' is guaranteed not to move.") , (fsLit "isByteArrayWeaklyPinned#","Similar to 'isByteArrayPinned#'. Weakly pinned byte arrays are allowed\n to be copied into compact regions by the user, potentially invalidating\n the results of earlier calls to 'byteArrayContents#'.\n\n See the section `Pinned Byte Arrays` in the user guide for more information.\n\n This function also returns true for regular pinned bytearrays.\n ") , (fsLit "isMutableByteArrayWeaklyPinned#"," 'isByteArrayWeaklyPinned#' but for mutable arrays.\n ") , (fsLit "byteArrayContents#","Intended for use with pinned arrays; otherwise very unsafe!") , (fsLit "mutableByteArrayContents#","Intended for use with pinned arrays; otherwise very unsafe!") , (fsLit "shrinkMutableByteArray#","Shrink mutable byte array to new specified size (in bytes), in\n the specified state thread. The new size argument must be less than or\n equal to the current size as reported by 'getSizeofMutableByteArray#'.\n\n Assuming the non-profiling RTS, this primitive compiles to an O(1)\n operation in C--, modifying the array in-place. Backends bypassing C--\n representation (such as JavaScript) might behave differently.\n\n @since 0.4.0.0") , (fsLit "resizeMutableByteArray#","Resize mutable byte array to new specified size (in bytes), shrinking or growing it.\n The returned 'MutableByteArray#' is either the original\n 'MutableByteArray#' resized in-place or, if not possible, a newly\n allocated (unpinned) 'MutableByteArray#' (with the original content\n copied over).\n\n To avoid undefined behaviour, the original 'MutableByteArray#' shall\n not be accessed anymore after a 'resizeMutableByteArray#' has been\n performed. Moreover, no reference to the old one should be kept in order\n to allow garbage collection of the original 'MutableByteArray#' in\n case a new 'MutableByteArray#' had to be allocated.\n\n @since 0.4.0.0") , (fsLit "unsafeFreezeByteArray#","Make a mutable byte array immutable, without copying.") , (fsLit "unsafeThawByteArray#","Make an immutable byte array mutable, without copying.\n\n @since 0.12.0.0") , (fsLit "sizeofByteArray#","Return the size of the array in bytes.") , (fsLit "sizeofMutableByteArray#","Return the size of the array in bytes. __Deprecated__, it is\n unsafe in the presence of 'shrinkMutableByteArray#' and 'resizeMutableByteArray#'\n operations on the same mutable byte\n array.") , (fsLit "getSizeofMutableByteArray#","Return the number of elements in the array, correctly accounting for\n the effect of 'shrinkMutableByteArray#' and 'resizeMutableByteArray#'.\n\n @since 0.5.0.0") , (fsLit "indexCharArray#","Read an 8-bit character from immutable array; offset in bytes.") , (fsLit "indexWideCharArray#","Read a 32-bit character from immutable array; offset in 4-byte words.") , (fsLit "indexIntArray#","Read a word-sized integer from immutable array; offset in machine words.") , (fsLit "indexWordArray#","Read a word-sized unsigned integer from immutable array; offset in machine words.") , (fsLit "indexAddrArray#","Read a machine address from immutable array; offset in machine words.") , (fsLit "indexFloatArray#","Read a single-precision floating-point value from immutable array; offset in 4-byte words.") , (fsLit "indexDoubleArray#","Read a double-precision floating-point value from immutable array; offset in 8-byte words.") , (fsLit "indexStablePtrArray#","Read a 'StablePtr#' value from immutable array; offset in machine words.") , (fsLit "indexInt8Array#","Read an 8-bit signed integer from immutable array; offset in bytes.") , (fsLit "indexWord8Array#","Read an 8-bit unsigned integer from immutable array; offset in bytes.") , (fsLit "indexInt16Array#","Read a 16-bit signed integer from immutable array; offset in 2-byte words.") , (fsLit "indexWord16Array#","Read a 16-bit unsigned integer from immutable array; offset in 2-byte words.") , (fsLit "indexInt32Array#","Read a 32-bit signed integer from immutable array; offset in 4-byte words.") , (fsLit "indexWord32Array#","Read a 32-bit unsigned integer from immutable array; offset in 4-byte words.") , (fsLit "indexInt64Array#","Read a 64-bit signed integer from immutable array; offset in 8-byte words.") , (fsLit "indexWord64Array#","Read a 64-bit unsigned integer from immutable array; offset in 8-byte words.") , (fsLit "indexWord8ArrayAsChar#","Read an 8-bit character from immutable array; offset in bytes.") , (fsLit "indexWord8ArrayAsWideChar#","Read a 32-bit character from immutable array; offset in bytes.") , (fsLit "indexWord8ArrayAsInt#","Read a word-sized integer from immutable array; offset in bytes.") , (fsLit "indexWord8ArrayAsWord#","Read a word-sized unsigned integer from immutable array; offset in bytes.") , (fsLit "indexWord8ArrayAsAddr#","Read a machine address from immutable array; offset in bytes.") , (fsLit "indexWord8ArrayAsFloat#","Read a single-precision floating-point value from immutable array; offset in bytes.") , (fsLit "indexWord8ArrayAsDouble#","Read a double-precision floating-point value from immutable array; offset in bytes.") , (fsLit "indexWord8ArrayAsStablePtr#","Read a 'StablePtr#' value from immutable array; offset in bytes.") , (fsLit "indexWord8ArrayAsInt16#","Read a 16-bit signed integer from immutable array; offset in bytes.") , (fsLit "indexWord8ArrayAsWord16#","Read a 16-bit unsigned integer from immutable array; offset in bytes.") , (fsLit "indexWord8ArrayAsInt32#","Read a 32-bit signed integer from immutable array; offset in bytes.") , (fsLit "indexWord8ArrayAsWord32#","Read a 32-bit unsigned integer from immutable array; offset in bytes.") , (fsLit "indexWord8ArrayAsInt64#","Read a 64-bit signed integer from immutable array; offset in bytes.") , (fsLit "indexWord8ArrayAsWord64#","Read a 64-bit unsigned integer from immutable array; offset in bytes.") , (fsLit "readCharArray#","Read an 8-bit character from mutable array; offset in bytes.") , (fsLit "readWideCharArray#","Read a 32-bit character from mutable array; offset in 4-byte words.") , (fsLit "readIntArray#","Read a word-sized integer from mutable array; offset in machine words.") , (fsLit "readWordArray#","Read a word-sized unsigned integer from mutable array; offset in machine words.") , (fsLit "readAddrArray#","Read a machine address from mutable array; offset in machine words.") , (fsLit "readFloatArray#","Read a single-precision floating-point value from mutable array; offset in 4-byte words.") , (fsLit "readDoubleArray#","Read a double-precision floating-point value from mutable array; offset in 8-byte words.") , (fsLit "readStablePtrArray#","Read a 'StablePtr#' value from mutable array; offset in machine words.") , (fsLit "readInt8Array#","Read an 8-bit signed integer from mutable array; offset in bytes.") , (fsLit "readWord8Array#","Read an 8-bit unsigned integer from mutable array; offset in bytes.") , (fsLit "readInt16Array#","Read a 16-bit signed integer from mutable array; offset in 2-byte words.") , (fsLit "readWord16Array#","Read a 16-bit unsigned integer from mutable array; offset in 2-byte words.") , (fsLit "readInt32Array#","Read a 32-bit signed integer from mutable array; offset in 4-byte words.") , (fsLit "readWord32Array#","Read a 32-bit unsigned integer from mutable array; offset in 4-byte words.") , (fsLit "readInt64Array#","Read a 64-bit signed integer from mutable array; offset in 8-byte words.") , (fsLit "readWord64Array#","Read a 64-bit unsigned integer from mutable array; offset in 8-byte words.") , (fsLit "readWord8ArrayAsChar#","Read an 8-bit character from mutable array; offset in bytes.") , (fsLit "readWord8ArrayAsWideChar#","Read a 32-bit character from mutable array; offset in bytes.") , (fsLit "readWord8ArrayAsInt#","Read a word-sized integer from mutable array; offset in bytes.") , (fsLit "readWord8ArrayAsWord#","Read a word-sized unsigned integer from mutable array; offset in bytes.") , (fsLit "readWord8ArrayAsAddr#","Read a machine address from mutable array; offset in bytes.") , (fsLit "readWord8ArrayAsFloat#","Read a single-precision floating-point value from mutable array; offset in bytes.") , (fsLit "readWord8ArrayAsDouble#","Read a double-precision floating-point value from mutable array; offset in bytes.") , (fsLit "readWord8ArrayAsStablePtr#","Read a 'StablePtr#' value from mutable array; offset in bytes.") , (fsLit "readWord8ArrayAsInt16#","Read a 16-bit signed integer from mutable array; offset in bytes.") , (fsLit "readWord8ArrayAsWord16#","Read a 16-bit unsigned integer from mutable array; offset in bytes.") , (fsLit "readWord8ArrayAsInt32#","Read a 32-bit signed integer from mutable array; offset in bytes.") , (fsLit "readWord8ArrayAsWord32#","Read a 32-bit unsigned integer from mutable array; offset in bytes.") , (fsLit "readWord8ArrayAsInt64#","Read a 64-bit signed integer from mutable array; offset in bytes.") , (fsLit "readWord8ArrayAsWord64#","Read a 64-bit unsigned integer from mutable array; offset in bytes.") , (fsLit "writeCharArray#","Write an 8-bit character to mutable array; offset in bytes.") , (fsLit "writeWideCharArray#","Write a 32-bit character to mutable array; offset in 4-byte words.") , (fsLit "writeIntArray#","Write a word-sized integer to mutable array; offset in machine words.") , (fsLit "writeWordArray#","Write a word-sized unsigned integer to mutable array; offset in machine words.") , (fsLit "writeAddrArray#","Write a machine address to mutable array; offset in machine words.") , (fsLit "writeFloatArray#","Write a single-precision floating-point value to mutable array; offset in 4-byte words.") , (fsLit "writeDoubleArray#","Write a double-precision floating-point value to mutable array; offset in 8-byte words.") , (fsLit "writeStablePtrArray#","Write a 'StablePtr#' value to mutable array; offset in machine words.") , (fsLit "writeInt8Array#","Write an 8-bit signed integer to mutable array; offset in bytes.") , (fsLit "writeWord8Array#","Write an 8-bit unsigned integer to mutable array; offset in bytes.") , (fsLit "writeInt16Array#","Write a 16-bit signed integer to mutable array; offset in 2-byte words.") , (fsLit "writeWord16Array#","Write a 16-bit unsigned integer to mutable array; offset in 2-byte words.") , (fsLit "writeInt32Array#","Write a 32-bit signed integer to mutable array; offset in 4-byte words.") , (fsLit "writeWord32Array#","Write a 32-bit unsigned integer to mutable array; offset in 4-byte words.") , (fsLit "writeInt64Array#","Write a 64-bit signed integer to mutable array; offset in 8-byte words.") , (fsLit "writeWord64Array#","Write a 64-bit unsigned integer to mutable array; offset in 8-byte words.") , (fsLit "writeWord8ArrayAsChar#","Write an 8-bit character to mutable array; offset in bytes.") , (fsLit "writeWord8ArrayAsWideChar#","Write a 32-bit character to mutable array; offset in bytes.") , (fsLit "writeWord8ArrayAsInt#","Write a word-sized integer to mutable array; offset in bytes.") , (fsLit "writeWord8ArrayAsWord#","Write a word-sized unsigned integer to mutable array; offset in bytes.") , (fsLit "writeWord8ArrayAsAddr#","Write a machine address to mutable array; offset in bytes.") , (fsLit "writeWord8ArrayAsFloat#","Write a single-precision floating-point value to mutable array; offset in bytes.") , (fsLit "writeWord8ArrayAsDouble#","Write a double-precision floating-point value to mutable array; offset in bytes.") , (fsLit "writeWord8ArrayAsStablePtr#","Write a 'StablePtr#' value to mutable array; offset in bytes.") , (fsLit "writeWord8ArrayAsInt16#","Write a 16-bit signed integer to mutable array; offset in bytes.") , (fsLit "writeWord8ArrayAsWord16#","Write a 16-bit unsigned integer to mutable array; offset in bytes.") , (fsLit "writeWord8ArrayAsInt32#","Write a 32-bit signed integer to mutable array; offset in bytes.") , (fsLit "writeWord8ArrayAsWord32#","Write a 32-bit unsigned integer to mutable array; offset in bytes.") , (fsLit "writeWord8ArrayAsInt64#","Write a 64-bit signed integer to mutable array; offset in bytes.") , (fsLit "writeWord8ArrayAsWord64#","Write a 64-bit unsigned integer to mutable array; offset in bytes.") , (fsLit "compareByteArrays#","@'compareByteArrays#' src1 src1_ofs src2 src2_ofs n@ compares\n @n@ bytes starting at offset @src1_ofs@ in the first\n 'ByteArray#' @src1@ to the range of @n@ bytes\n (i.e. same length) starting at offset @src2_ofs@ of the second\n 'ByteArray#' @src2@. Both arrays must fully contain the\n specified ranges, but this is not checked. Returns an 'Int#'\n less than, equal to, or greater than zero if the range is found,\n respectively, to be byte-wise lexicographically less than, to\n match, or be greater than the second range.\n\n @since 0.5.2.0") , (fsLit "copyByteArray#"," @'copyByteArray#' src src_ofs dst dst_ofs len@ copies the range\n starting at offset @src_ofs@ of length @len@ from the\n 'ByteArray#' @src@ to the 'MutableByteArray#' @dst@\n starting at offset @dst_ofs@. Both arrays must fully contain\n the specified ranges, but this is not checked. The two arrays must\n not be the same array in different states, but this is not checked\n either.\n ") , (fsLit "copyMutableByteArray#"," @'copyMutableByteArray#' src src_ofs dst dst_ofs len@ copies the\n range starting at offset @src_ofs@ of length @len@ from the\n 'MutableByteArray#' @src@ to the 'MutableByteArray#' @dst@\n starting at offset @dst_ofs@. Both arrays must fully contain the\n specified ranges, but this is not checked. The regions are\n allowed to overlap, although this is only possible when the same\n array is provided as both the source and the destination.\n ") , (fsLit "copyMutableByteArrayNonOverlapping#"," @'copyMutableByteArrayNonOverlapping#' src src_ofs dst dst_ofs len@\n copies the range starting at offset @src_ofs@ of length @len@ from\n the 'MutableByteArray#' @src@ to the 'MutableByteArray#' @dst@\n starting at offset @dst_ofs@. Both arrays must fully contain the\n specified ranges, but this is not checked. The regions are /not/\n allowed to overlap, but this is also not checked.\n\n @since 0.11.0\n ") , (fsLit "copyByteArrayToAddr#","Copy a range of the ByteArray\\# to the memory range starting at the Addr\\#.\n The ByteArray\\# and the memory region at Addr\\# must fully contain the\n specified ranges, but this is not checked. The Addr\\# must not point into the\n ByteArray\\# (e.g. if the ByteArray\\# were pinned), but this is not checked\n either.") , (fsLit "copyMutableByteArrayToAddr#","Copy a range of the MutableByteArray\\# to the memory range starting at the\n Addr\\#. The MutableByteArray\\# and the memory region at Addr\\# must fully\n contain the specified ranges, but this is not checked. The Addr\\# must not\n point into the MutableByteArray\\# (e.g. if the MutableByteArray\\# were\n pinned), but this is not checked either.") , (fsLit "copyAddrToByteArray#","Copy a memory range starting at the Addr\\# to the specified range in the\n MutableByteArray\\#. The memory region at Addr\\# and the ByteArray\\# must fully\n contain the specified ranges, but this is not checked. The Addr\\# must not\n point into the MutableByteArray\\# (e.g. if the MutableByteArray\\# were pinned),\n but this is not checked either.") , (fsLit "copyAddrToAddr#"," @'copyAddrToAddr#' src dest len@ copies @len@ bytes\n from @src@ to @dest@. These two memory ranges are allowed to overlap.\n\n Analogous to the standard C function @memmove@, but with a different\n argument order.\n\n @since 0.11.0\n ") , (fsLit "copyAddrToAddrNonOverlapping#"," @'copyAddrToAddrNonOverlapping#' src dest len@ copies @len@ bytes\n from @src@ to @dest@. As the name suggests, these two memory ranges\n /must not overlap/, although this pre-condition is not checked.\n\n Analogous to the standard C function @memcpy@, but with a different\n argument order.\n\n @since 0.11.0\n ") , (fsLit "setByteArray#","@'setByteArray#' ba off len c@ sets the byte range @[off, off+len)@ of\n the 'MutableByteArray#' to the byte @c@.") , (fsLit "setAddrRange#"," @'setAddrRange#' dest len c@ sets all of the bytes in\n @[dest, dest+len)@ to the value @c@.\n\n Analogous to the standard C function @memset@, but with a different\n argument order.\n\n @since 0.11.0\n ") , (fsLit "atomicReadIntArray#","Given an array and an offset in machine words, read an element. The\n index is assumed to be in bounds. Implies a full memory barrier.") , (fsLit "atomicWriteIntArray#","Given an array and an offset in machine words, write an element. The\n index is assumed to be in bounds. Implies a full memory barrier.") , (fsLit "casIntArray#","Given an array, an offset in machine words, the expected old value, and\n the new value, perform an atomic compare and swap i.e. write the new\n value if the current value matches the provided old value. Returns\n the value of the element before the operation. Implies a full memory\n barrier.") , (fsLit "casInt8Array#","Given an array, an offset in bytes, the expected old value, and\n the new value, perform an atomic compare and swap i.e. write the new\n value if the current value matches the provided old value. Returns\n the value of the element before the operation. Implies a full memory\n barrier.") , (fsLit "casInt16Array#","Given an array, an offset in 16 bit units, the expected old value, and\n the new value, perform an atomic compare and swap i.e. write the new\n value if the current value matches the provided old value. Returns\n the value of the element before the operation. Implies a full memory\n barrier.") , (fsLit "casInt32Array#","Given an array, an offset in 32 bit units, the expected old value, and\n the new value, perform an atomic compare and swap i.e. write the new\n value if the current value matches the provided old value. Returns\n the value of the element before the operation. Implies a full memory\n barrier.") , (fsLit "casInt64Array#","Given an array, an offset in 64 bit units, the expected old value, and\n the new value, perform an atomic compare and swap i.e. write the new\n value if the current value matches the provided old value. Returns\n the value of the element before the operation. Implies a full memory\n barrier.") , (fsLit "fetchAddIntArray#","Given an array, and offset in machine words, and a value to add,\n atomically add the value to the element. Returns the value of the\n element before the operation. Implies a full memory barrier.") , (fsLit "fetchSubIntArray#","Given an array, and offset in machine words, and a value to subtract,\n atomically subtract the value from the element. Returns the value of\n the element before the operation. Implies a full memory barrier.") , (fsLit "fetchAndIntArray#","Given an array, and offset in machine words, and a value to AND,\n atomically AND the value into the element. Returns the value of the\n element before the operation. Implies a full memory barrier.") , (fsLit "fetchNandIntArray#","Given an array, and offset in machine words, and a value to NAND,\n atomically NAND the value into the element. Returns the value of the\n element before the operation. Implies a full memory barrier.") , (fsLit "fetchOrIntArray#","Given an array, and offset in machine words, and a value to OR,\n atomically OR the value into the element. Returns the value of the\n element before the operation. Implies a full memory barrier.") , (fsLit "fetchXorIntArray#","Given an array, and offset in machine words, and a value to XOR,\n atomically XOR the value into the element. Returns the value of the\n element before the operation. Implies a full memory barrier.") , (fsLit "Addr#"," An arbitrary machine address assumed to point outside\n the garbage-collected heap. ") , (fsLit "nullAddr#"," The null address. ") , (fsLit "minusAddr#","Result is meaningless if two 'Addr#'s are so far apart that their\n difference doesn't fit in an 'Int#'.") , (fsLit "remAddr#","Return the remainder when the 'Addr#' arg, treated like an 'Int#',\n is divided by the 'Int#' arg.") , (fsLit "addr2Int#","Coerce directly from address to int. Users are discouraged from using\n this operation as it makes little sense on platforms with tagged pointers.") , (fsLit "int2Addr#","Coerce directly from int to address. Users are discouraged from using\n this operation as it makes little sense on platforms with tagged pointers.") , (fsLit "indexCharOffAddr#","Read an 8-bit character from immutable address; offset in bytes.\n\n") , (fsLit "indexWideCharOffAddr#","Read a 32-bit character from immutable address; offset in 4-byte words.\n\nOn some platforms, the access may fail\nfor an insufficiently aligned @Addr#@.") , (fsLit "indexIntOffAddr#","Read a word-sized integer from immutable address; offset in machine words.\n\nOn some platforms, the access may fail\nfor an insufficiently aligned @Addr#@.") , (fsLit "indexWordOffAddr#","Read a word-sized unsigned integer from immutable address; offset in machine words.\n\nOn some platforms, the access may fail\nfor an insufficiently aligned @Addr#@.") , (fsLit "indexAddrOffAddr#","Read a machine address from immutable address; offset in machine words.\n\nOn some platforms, the access may fail\nfor an insufficiently aligned @Addr#@.") , (fsLit "indexFloatOffAddr#","Read a single-precision floating-point value from immutable address; offset in 4-byte words.\n\nOn some platforms, the access may fail\nfor an insufficiently aligned @Addr#@.") , (fsLit "indexDoubleOffAddr#","Read a double-precision floating-point value from immutable address; offset in 8-byte words.\n\nOn some platforms, the access may fail\nfor an insufficiently aligned @Addr#@.") , (fsLit "indexStablePtrOffAddr#","Read a 'StablePtr#' value from immutable address; offset in machine words.\n\nOn some platforms, the access may fail\nfor an insufficiently aligned @Addr#@.") , (fsLit "indexInt8OffAddr#","Read an 8-bit signed integer from immutable address; offset in bytes.\n\n") , (fsLit "indexWord8OffAddr#","Read an 8-bit unsigned integer from immutable address; offset in bytes.\n\n") , (fsLit "indexInt16OffAddr#","Read a 16-bit signed integer from immutable address; offset in 2-byte words.\n\nOn some platforms, the access may fail\nfor an insufficiently aligned @Addr#@.") , (fsLit "indexWord16OffAddr#","Read a 16-bit unsigned integer from immutable address; offset in 2-byte words.\n\nOn some platforms, the access may fail\nfor an insufficiently aligned @Addr#@.") , (fsLit "indexInt32OffAddr#","Read a 32-bit signed integer from immutable address; offset in 4-byte words.\n\nOn some platforms, the access may fail\nfor an insufficiently aligned @Addr#@.") , (fsLit "indexWord32OffAddr#","Read a 32-bit unsigned integer from immutable address; offset in 4-byte words.\n\nOn some platforms, the access may fail\nfor an insufficiently aligned @Addr#@.") , (fsLit "indexInt64OffAddr#","Read a 64-bit signed integer from immutable address; offset in 8-byte words.\n\nOn some platforms, the access may fail\nfor an insufficiently aligned @Addr#@.") , (fsLit "indexWord64OffAddr#","Read a 64-bit unsigned integer from immutable address; offset in 8-byte words.\n\nOn some platforms, the access may fail\nfor an insufficiently aligned @Addr#@.") , (fsLit "indexWord8OffAddrAsChar#","Read an 8-bit character from immutable address; offset in bytes.") , (fsLit "indexWord8OffAddrAsWideChar#","Read a 32-bit character from immutable address; offset in bytes.") , (fsLit "indexWord8OffAddrAsInt#","Read a word-sized integer from immutable address; offset in bytes.") , (fsLit "indexWord8OffAddrAsWord#","Read a word-sized unsigned integer from immutable address; offset in bytes.") , (fsLit "indexWord8OffAddrAsAddr#","Read a machine address from immutable address; offset in bytes.") , (fsLit "indexWord8OffAddrAsFloat#","Read a single-precision floating-point value from immutable address; offset in bytes.") , (fsLit "indexWord8OffAddrAsDouble#","Read a double-precision floating-point value from immutable address; offset in bytes.") , (fsLit "indexWord8OffAddrAsStablePtr#","Read a 'StablePtr#' value from immutable address; offset in bytes.") , (fsLit "indexWord8OffAddrAsInt16#","Read a 16-bit signed integer from immutable address; offset in bytes.") , (fsLit "indexWord8OffAddrAsWord16#","Read a 16-bit unsigned integer from immutable address; offset in bytes.") , (fsLit "indexWord8OffAddrAsInt32#","Read a 32-bit signed integer from immutable address; offset in bytes.") , (fsLit "indexWord8OffAddrAsWord32#","Read a 32-bit unsigned integer from immutable address; offset in bytes.") , (fsLit "indexWord8OffAddrAsInt64#","Read a 64-bit signed integer from immutable address; offset in bytes.") , (fsLit "indexWord8OffAddrAsWord64#","Read a 64-bit unsigned integer from immutable address; offset in bytes.") , (fsLit "readCharOffAddr#","Read an 8-bit character from mutable address; offset in bytes.\n\n") , (fsLit "readWideCharOffAddr#","Read a 32-bit character from mutable address; offset in 4-byte words.\n\nOn some platforms, the access may fail\nfor an insufficiently aligned @Addr#@.") , (fsLit "readIntOffAddr#","Read a word-sized integer from mutable address; offset in machine words.\n\nOn some platforms, the access may fail\nfor an insufficiently aligned @Addr#@.") , (fsLit "readWordOffAddr#","Read a word-sized unsigned integer from mutable address; offset in machine words.\n\nOn some platforms, the access may fail\nfor an insufficiently aligned @Addr#@.") , (fsLit "readAddrOffAddr#","Read a machine address from mutable address; offset in machine words.\n\nOn some platforms, the access may fail\nfor an insufficiently aligned @Addr#@.") , (fsLit "readFloatOffAddr#","Read a single-precision floating-point value from mutable address; offset in 4-byte words.\n\nOn some platforms, the access may fail\nfor an insufficiently aligned @Addr#@.") , (fsLit "readDoubleOffAddr#","Read a double-precision floating-point value from mutable address; offset in 8-byte words.\n\nOn some platforms, the access may fail\nfor an insufficiently aligned @Addr#@.") , (fsLit "readStablePtrOffAddr#","Read a 'StablePtr#' value from mutable address; offset in machine words.\n\nOn some platforms, the access may fail\nfor an insufficiently aligned @Addr#@.") , (fsLit "readInt8OffAddr#","Read an 8-bit signed integer from mutable address; offset in bytes.\n\n") , (fsLit "readWord8OffAddr#","Read an 8-bit unsigned integer from mutable address; offset in bytes.\n\n") , (fsLit "readInt16OffAddr#","Read a 16-bit signed integer from mutable address; offset in 2-byte words.\n\nOn some platforms, the access may fail\nfor an insufficiently aligned @Addr#@.") , (fsLit "readWord16OffAddr#","Read a 16-bit unsigned integer from mutable address; offset in 2-byte words.\n\nOn some platforms, the access may fail\nfor an insufficiently aligned @Addr#@.") , (fsLit "readInt32OffAddr#","Read a 32-bit signed integer from mutable address; offset in 4-byte words.\n\nOn some platforms, the access may fail\nfor an insufficiently aligned @Addr#@.") , (fsLit "readWord32OffAddr#","Read a 32-bit unsigned integer from mutable address; offset in 4-byte words.\n\nOn some platforms, the access may fail\nfor an insufficiently aligned @Addr#@.") , (fsLit "readInt64OffAddr#","Read a 64-bit signed integer from mutable address; offset in 8-byte words.\n\nOn some platforms, the access may fail\nfor an insufficiently aligned @Addr#@.") , (fsLit "readWord64OffAddr#","Read a 64-bit unsigned integer from mutable address; offset in 8-byte words.\n\nOn some platforms, the access may fail\nfor an insufficiently aligned @Addr#@.") , (fsLit "readWord8OffAddrAsChar#","Read an 8-bit character from mutable address; offset in bytes.") , (fsLit "readWord8OffAddrAsWideChar#","Read a 32-bit character from mutable address; offset in bytes.") , (fsLit "readWord8OffAddrAsInt#","Read a word-sized integer from mutable address; offset in bytes.") , (fsLit "readWord8OffAddrAsWord#","Read a word-sized unsigned integer from mutable address; offset in bytes.") , (fsLit "readWord8OffAddrAsAddr#","Read a machine address from mutable address; offset in bytes.") , (fsLit "readWord8OffAddrAsFloat#","Read a single-precision floating-point value from mutable address; offset in bytes.") , (fsLit "readWord8OffAddrAsDouble#","Read a double-precision floating-point value from mutable address; offset in bytes.") , (fsLit "readWord8OffAddrAsStablePtr#","Read a 'StablePtr#' value from mutable address; offset in bytes.") , (fsLit "readWord8OffAddrAsInt16#","Read a 16-bit signed integer from mutable address; offset in bytes.") , (fsLit "readWord8OffAddrAsWord16#","Read a 16-bit unsigned integer from mutable address; offset in bytes.") , (fsLit "readWord8OffAddrAsInt32#","Read a 32-bit signed integer from mutable address; offset in bytes.") , (fsLit "readWord8OffAddrAsWord32#","Read a 32-bit unsigned integer from mutable address; offset in bytes.") , (fsLit "readWord8OffAddrAsInt64#","Read a 64-bit signed integer from mutable address; offset in bytes.") , (fsLit "readWord8OffAddrAsWord64#","Read a 64-bit unsigned integer from mutable address; offset in bytes.") , (fsLit "writeCharOffAddr#","Write an 8-bit character to mutable address; offset in bytes.\n\n") , (fsLit "writeWideCharOffAddr#","Write a 32-bit character to mutable address; offset in 4-byte words.\n\nOn some platforms, the access may fail\nfor an insufficiently aligned @Addr#@.") , (fsLit "writeIntOffAddr#","Write a word-sized integer to mutable address; offset in machine words.\n\nOn some platforms, the access may fail\nfor an insufficiently aligned @Addr#@.") , (fsLit "writeWordOffAddr#","Write a word-sized unsigned integer to mutable address; offset in machine words.\n\nOn some platforms, the access may fail\nfor an insufficiently aligned @Addr#@.") , (fsLit "writeAddrOffAddr#","Write a machine address to mutable address; offset in machine words.\n\nOn some platforms, the access may fail\nfor an insufficiently aligned @Addr#@.") , (fsLit "writeFloatOffAddr#","Write a single-precision floating-point value to mutable address; offset in 4-byte words.\n\nOn some platforms, the access may fail\nfor an insufficiently aligned @Addr#@.") , (fsLit "writeDoubleOffAddr#","Write a double-precision floating-point value to mutable address; offset in 8-byte words.\n\nOn some platforms, the access may fail\nfor an insufficiently aligned @Addr#@.") , (fsLit "writeStablePtrOffAddr#","Write a 'StablePtr#' value to mutable address; offset in machine words.\n\nOn some platforms, the access may fail\nfor an insufficiently aligned @Addr#@.") , (fsLit "writeInt8OffAddr#","Write an 8-bit signed integer to mutable address; offset in bytes.\n\n") , (fsLit "writeWord8OffAddr#","Write an 8-bit unsigned integer to mutable address; offset in bytes.\n\n") , (fsLit "writeInt16OffAddr#","Write a 16-bit signed integer to mutable address; offset in 2-byte words.\n\nOn some platforms, the access may fail\nfor an insufficiently aligned @Addr#@.") , (fsLit "writeWord16OffAddr#","Write a 16-bit unsigned integer to mutable address; offset in 2-byte words.\n\nOn some platforms, the access may fail\nfor an insufficiently aligned @Addr#@.") , (fsLit "writeInt32OffAddr#","Write a 32-bit signed integer to mutable address; offset in 4-byte words.\n\nOn some platforms, the access may fail\nfor an insufficiently aligned @Addr#@.") , (fsLit "writeWord32OffAddr#","Write a 32-bit unsigned integer to mutable address; offset in 4-byte words.\n\nOn some platforms, the access may fail\nfor an insufficiently aligned @Addr#@.") , (fsLit "writeInt64OffAddr#","Write a 64-bit signed integer to mutable address; offset in 8-byte words.\n\nOn some platforms, the access may fail\nfor an insufficiently aligned @Addr#@.") , (fsLit "writeWord64OffAddr#","Write a 64-bit unsigned integer to mutable address; offset in 8-byte words.\n\nOn some platforms, the access may fail\nfor an insufficiently aligned @Addr#@.") , (fsLit "writeWord8OffAddrAsChar#","Write an 8-bit character to mutable address; offset in bytes.") , (fsLit "writeWord8OffAddrAsWideChar#","Write a 32-bit character to mutable address; offset in bytes.") , (fsLit "writeWord8OffAddrAsInt#","Write a word-sized integer to mutable address; offset in bytes.") , (fsLit "writeWord8OffAddrAsWord#","Write a word-sized unsigned integer to mutable address; offset in bytes.") , (fsLit "writeWord8OffAddrAsAddr#","Write a machine address to mutable address; offset in bytes.") , (fsLit "writeWord8OffAddrAsFloat#","Write a single-precision floating-point value to mutable address; offset in bytes.") , (fsLit "writeWord8OffAddrAsDouble#","Write a double-precision floating-point value to mutable address; offset in bytes.") , (fsLit "writeWord8OffAddrAsStablePtr#","Write a 'StablePtr#' value to mutable address; offset in bytes.") , (fsLit "writeWord8OffAddrAsInt16#","Write a 16-bit signed integer to mutable address; offset in bytes.") , (fsLit "writeWord8OffAddrAsWord16#","Write a 16-bit unsigned integer to mutable address; offset in bytes.") , (fsLit "writeWord8OffAddrAsInt32#","Write a 32-bit signed integer to mutable address; offset in bytes.") , (fsLit "writeWord8OffAddrAsWord32#","Write a 32-bit unsigned integer to mutable address; offset in bytes.") , (fsLit "writeWord8OffAddrAsInt64#","Write a 64-bit signed integer to mutable address; offset in bytes.") , (fsLit "writeWord8OffAddrAsWord64#","Write a 64-bit unsigned integer to mutable address; offset in bytes.") , (fsLit "atomicExchangeAddrAddr#","The atomic exchange operation. Atomically exchanges the value at the first address\n with the Addr# given as second argument. Implies a read barrier.") , (fsLit "atomicExchangeWordAddr#","The atomic exchange operation. Atomically exchanges the value at the address\n with the given value. Returns the old value. Implies a read barrier.") , (fsLit "atomicCasAddrAddr#"," Compare and swap on a word-sized memory location.\n\n Use as: \\s -> atomicCasAddrAddr# location expected desired s\n\n This version always returns the old value read. This follows the normal\n protocol for CAS operations (and matches the underlying instruction on\n most architectures).\n\n Implies a full memory barrier.") , (fsLit "atomicCasWordAddr#"," Compare and swap on a word-sized and aligned memory location.\n\n Use as: \\s -> atomicCasWordAddr# location expected desired s\n\n This version always returns the old value read. This follows the normal\n protocol for CAS operations (and matches the underlying instruction on\n most architectures).\n\n Implies a full memory barrier.") , (fsLit "atomicCasWord8Addr#"," Compare and swap on a 8 bit-sized and aligned memory location.\n\n Use as: \\s -> atomicCasWordAddr8# location expected desired s\n\n This version always returns the old value read. This follows the normal\n protocol for CAS operations (and matches the underlying instruction on\n most architectures).\n\n Implies a full memory barrier.") , (fsLit "atomicCasWord16Addr#"," Compare and swap on a 16 bit-sized and aligned memory location.\n\n Use as: \\s -> atomicCasWordAddr16# location expected desired s\n\n This version always returns the old value read. This follows the normal\n protocol for CAS operations (and matches the underlying instruction on\n most architectures).\n\n Implies a full memory barrier.") , (fsLit "atomicCasWord32Addr#"," Compare and swap on a 32 bit-sized and aligned memory location.\n\n Use as: \\s -> atomicCasWordAddr32# location expected desired s\n\n This version always returns the old value read. This follows the normal\n protocol for CAS operations (and matches the underlying instruction on\n most architectures).\n\n Implies a full memory barrier.") , (fsLit "atomicCasWord64Addr#"," Compare and swap on a 64 bit-sized and aligned memory location.\n\n Use as: \\s -> atomicCasWordAddr64# location expected desired s\n\n This version always returns the old value read. This follows the normal\n protocol for CAS operations (and matches the underlying instruction on\n most architectures).\n\n Implies a full memory barrier.") , (fsLit "fetchAddWordAddr#","Given an address, and a value to add,\n atomically add the value to the element. Returns the value of the\n element before the operation. Implies a full memory barrier.") , (fsLit "fetchSubWordAddr#","Given an address, and a value to subtract,\n atomically subtract the value from the element. Returns the value of\n the element before the operation. Implies a full memory barrier.") , (fsLit "fetchAndWordAddr#","Given an address, and a value to AND,\n atomically AND the value into the element. Returns the value of the\n element before the operation. Implies a full memory barrier.") , (fsLit "fetchNandWordAddr#","Given an address, and a value to NAND,\n atomically NAND the value into the element. Returns the value of the\n element before the operation. Implies a full memory barrier.") , (fsLit "fetchOrWordAddr#","Given an address, and a value to OR,\n atomically OR the value into the element. Returns the value of the\n element before the operation. Implies a full memory barrier.") , (fsLit "fetchXorWordAddr#","Given an address, and a value to XOR,\n atomically XOR the value into the element. Returns the value of the\n element before the operation. Implies a full memory barrier.") , (fsLit "atomicReadWordAddr#","Given an address, read a machine word. Implies a full memory barrier.") , (fsLit "atomicWriteWordAddr#","Given an address, write a machine word. Implies a full memory barrier.") , (fsLit "MutVar#","A 'MutVar#' behaves like a single-element mutable array.") , (fsLit "newMutVar#","Create 'MutVar#' with specified initial value in specified state thread.") , (fsLit "readMutVar#","Read contents of 'MutVar#'. Result is not yet evaluated.") , (fsLit "writeMutVar#","Write contents of 'MutVar#'.") , (fsLit "atomicSwapMutVar#","Atomically exchange the value of a 'MutVar#'.") , (fsLit "atomicModifyMutVar2#"," Modify the contents of a 'MutVar#', returning the previous\n contents @x :: a@ and the result of applying the given function to the\n previous contents @f x :: c@.\n\n The @data@ type @c@ (not a @newtype@!) must be a record whose first field\n is of lifted type @a :: Type@ and is not unpacked. For example, product\n types @c ~ Solo a@ or @c ~ (a, b)@ work well. If the record type is both\n monomorphic and strict in its first field, it's recommended to mark the\n latter @{-# NOUNPACK #-}@ explicitly.\n\n Under the hood 'atomicModifyMutVar2#' atomically replaces a pointer to an\n old @x :: a@ with a pointer to a selector thunk @fst r@, where\n @fst@ is a selector for the first field of the record and @r@ is a\n function application thunk @r = f x@.\n\n @atomicModifyIORef2Native@ from @atomic-modify-general@ package makes an\n effort to reflect restrictions on @c@ faithfully, providing a\n well-typed high-level wrapper.") , (fsLit "atomicModifyMutVar_#"," Modify the contents of a 'MutVar#', returning the previous\n contents and the result of applying the given function to the\n previous contents. ") , (fsLit "casMutVar#"," Compare-and-swap: perform a pointer equality test between\n the first value passed to this function and the value\n stored inside the 'MutVar#'. If the pointers are equal,\n replace the stored value with the second value passed to this\n function, otherwise do nothing.\n Returns the final value stored inside the 'MutVar#'.\n The 'Int#' indicates whether a swap took place,\n with @1#@ meaning that we didn't swap, and @0#@\n that we did.\n Implies a full memory barrier.\n Because the comparison is done on the level of pointers,\n all of the difficulties of using\n 'reallyUnsafePtrEquality#' correctly apply to\n 'casMutVar#' as well.\n ") , (fsLit "catch#"," @'catch#' k handler s@ evaluates @k s@, invoking @handler@ on any exceptions\n thrown.\n\n Note that the result type here isn't quite as unrestricted as the\n polymorphic type might suggest; see the section \\\"RuntimeRep polymorphism\n in continuation-style primops\\\" for details. ") , (fsLit "maskAsyncExceptions#"," @'maskAsyncExceptions#' k s@ evaluates @k s@ such that asynchronous\n exceptions are deferred until after evaluation has finished.\n\n Note that the result type here isn't quite as unrestricted as the\n polymorphic type might suggest; see the section \\\"RuntimeRep polymorphism\n in continuation-style primops\\\" for details. ") , (fsLit "maskUninterruptible#"," @'maskUninterruptible#' k s@ evaluates @k s@ such that asynchronous\n exceptions are deferred until after evaluation has finished.\n\n Note that the result type here isn't quite as unrestricted as the\n polymorphic type might suggest; see the section \\\"RuntimeRep polymorphism\n in continuation-style primops\\\" for details. ") , (fsLit "unmaskAsyncExceptions#"," @'unmaskAsyncUninterruptible#' k s@ evaluates @k s@ such that asynchronous\n exceptions are unmasked.\n\n Note that the result type here isn't quite as unrestricted as the\n polymorphic type might suggest; see the section \\\"RuntimeRep polymorphism\n in continuation-style primops\\\" for details. ") , (fsLit "PromptTag#"," See \"GHC.Prim#continuations\". ") , (fsLit "newPromptTag#"," See \"GHC.Prim#continuations\". ") , (fsLit "prompt#"," See \"GHC.Prim#continuations\". ") , (fsLit "control0#"," See \"GHC.Prim#continuations\". ") , (fsLit "newTVar#","Create a new 'TVar#' holding a specified initial value.") , (fsLit "readTVar#","Read contents of 'TVar#' inside an STM transaction,\n i.e. within a call to 'atomically#'.\n Does not force evaluation of the result.") , (fsLit "readTVarIO#","Read contents of 'TVar#' outside an STM transaction.\n Does not force evaluation of the result.") , (fsLit "writeTVar#","Write contents of 'TVar#'.") , (fsLit "MVar#"," A shared mutable variable (/not/ the same as a 'MutVar#'!).\n (Note: in a non-concurrent implementation, @('MVar#' a)@ can be\n represented by @('MutVar#' (Maybe a))@.) ") , (fsLit "newMVar#","Create new 'MVar#'; initially empty.") , (fsLit "takeMVar#","If 'MVar#' is empty, block until it becomes full.\n Then remove and return its contents, and set it empty.") , (fsLit "tryTakeMVar#","If 'MVar#' is empty, immediately return with integer 0 and value undefined.\n Otherwise, return with integer 1 and contents of 'MVar#', and set 'MVar#' empty.") , (fsLit "putMVar#","If 'MVar#' is full, block until it becomes empty.\n Then store value arg as its new contents.") , (fsLit "tryPutMVar#","If 'MVar#' is full, immediately return with integer 0.\n Otherwise, store value arg as 'MVar#''s new contents, and return with integer 1.") , (fsLit "readMVar#","If 'MVar#' is empty, block until it becomes full.\n Then read its contents without modifying the MVar, without possibility\n of intervention from other threads.") , (fsLit "tryReadMVar#","If 'MVar#' is empty, immediately return with integer 0 and value undefined.\n Otherwise, return with integer 1 and contents of 'MVar#'.") , (fsLit "isEmptyMVar#","Return 1 if 'MVar#' is empty; 0 otherwise.") , (fsLit "IOPort#"," A shared I/O port is almost the same as an 'MVar#'.\n The main difference is that IOPort has no deadlock detection or\n deadlock breaking code that forcibly releases the lock. ") , (fsLit "newIOPort#","Create new 'IOPort#'; initially empty.") , (fsLit "readIOPort#","If 'IOPort#' is empty, block until it becomes full.\n Then remove and return its contents, and set it empty.\n Throws an 'IOPortException' if another thread is already\n waiting to read this 'IOPort#'.") , (fsLit "writeIOPort#","If 'IOPort#' is full, immediately return with integer 0,\n throwing an 'IOPortException'.\n Otherwise, store value arg as 'IOPort#''s new contents,\n and return with integer 1. ") , (fsLit "delay#","Sleep specified number of microseconds.") , (fsLit "waitRead#","Block until input is available on specified file descriptor.") , (fsLit "waitWrite#","Block until output is possible on specified file descriptor.") , (fsLit "State#"," 'State#' is the primitive, unlifted type of states. It has\n one type parameter, thus @'State#' 'RealWorld'@, or @'State#' s@,\n where s is a type variable. The only purpose of the type parameter\n is to keep different state threads separate. It is represented by\n nothing at all. ") , (fsLit "RealWorld"," 'RealWorld' is deeply magical. It is /primitive/, but it is not\n /unlifted/ (hence @ptrArg@). We never manipulate values of type\n 'RealWorld'; it's only used in the type system, to parameterise 'State#'. ") , (fsLit "ThreadId#","(In a non-concurrent implementation, this can be a singleton\n type, whose (unique) value is returned by 'myThreadId#'. The\n other operations can be omitted.)") , (fsLit "labelThread#","Set the label of the given thread. The @ByteArray#@ should contain\n a UTF-8-encoded string.") , (fsLit "threadLabel#","Get the label of the given thread.\n Morally of type @ThreadId# -> IO (Maybe ByteArray#)@, with a @1#@ tag\n denoting @Just@.\n\n @since 0.10") , (fsLit "threadStatus#","Get the status of the given thread. Result is\n @(ThreadStatus, Capability, Locked)@ where\n @ThreadStatus@ is one of the status constants defined in\n @rts/Constants.h@, @Capability@ is the number of\n the capability which currently owns the thread, and\n @Locked@ is a boolean indicating whether the\n thread is bound to that capability.\n\n @since 0.9") , (fsLit "listThreads#"," Returns an array of the threads started by the program. Note that this\n threads which have finished execution may or may not be present in this\n list, depending upon whether they have been collected by the garbage collector.\n\n @since 0.10") , (fsLit "mkWeak#"," @'mkWeak#' k v finalizer s@ creates a weak reference to value @k@,\n with an associated reference to some value @v@. If @k@ is still\n alive then @v@ can be retrieved using 'deRefWeak#'. Note that\n the type of @k@ must be represented by a pointer (i.e. of kind\n @'TYPE' ''LiftedRep' or @'TYPE' ''UnliftedRep'@). ") , (fsLit "addCFinalizerToWeak#"," @'addCFinalizerToWeak#' fptr ptr flag eptr w@ attaches a C\n function pointer @fptr@ to a weak pointer @w@ as a finalizer. If\n @flag@ is zero, @fptr@ will be called with one argument,\n @ptr@. Otherwise, it will be called with two arguments,\n @eptr@ and @ptr@. 'addCFinalizerToWeak#' returns\n 1 on success, or 0 if @w@ is already dead. ") , (fsLit "finalizeWeak#"," Finalize a weak pointer. The return value is an unboxed tuple\n containing the new state of the world and an \"unboxed Maybe\",\n represented by an 'Int#' and a (possibly invalid) finalization\n action. An 'Int#' of @1@ indicates that the finalizer is valid. The\n return value @b@ from the finalizer should be ignored. ") , (fsLit "compactNew#"," Create a new CNF with a single compact block. The argument is\n the capacity of the compact block (in bytes, not words).\n The capacity is rounded up to a multiple of the allocator block size\n and is capped to one mega block. ") , (fsLit "compactResize#"," Set the new allocation size of the CNF. This value (in bytes)\n determines the capacity of each compact block in the CNF. It\n does not retroactively affect existing compact blocks in the CNF. ") , (fsLit "compactContains#"," Returns 1\\# if the object is contained in the CNF, 0\\# otherwise. ") , (fsLit "compactContainsAny#"," Returns 1\\# if the object is in any CNF at all, 0\\# otherwise. ") , (fsLit "compactGetFirstBlock#"," Returns the address and the utilized size (in bytes) of the\n first compact block of a CNF.") , (fsLit "compactGetNextBlock#"," Given a CNF and the address of one its compact blocks, returns the\n next compact block and its utilized size, or 'nullAddr#' if the\n argument was the last compact block in the CNF. ") , (fsLit "compactAllocateBlock#"," Attempt to allocate a compact block with the capacity (in\n bytes) given by the first argument. The 'Addr#' is a pointer\n to previous compact block of the CNF or 'nullAddr#' to create a\n new CNF with a single compact block.\n\n The resulting block is not known to the GC until\n 'compactFixupPointers#' is called on it, and care must be taken\n so that the address does not escape or memory will be leaked.\n ") , (fsLit "compactFixupPointers#"," Given the pointer to the first block of a CNF and the\n address of the root object in the old address space, fix up\n the internal pointers inside the CNF to account for\n a different position in memory than when it was serialized.\n This method must be called exactly once after importing\n a serialized CNF. It returns the new CNF and the new adjusted\n root address. ") , (fsLit "compactAdd#"," Recursively add a closure and its transitive closure to a\n 'Compact#' (a CNF), evaluating any unevaluated components\n at the same time. Note: 'compactAdd#' is not thread-safe, so\n only one thread may call 'compactAdd#' with a particular\n 'Compact#' at any given time. The primop does not\n enforce any mutual exclusion; the caller is expected to\n arrange this. ") , (fsLit "compactAddWithSharing#"," Like 'compactAdd#', but retains sharing and cycles\n during compaction. ") , (fsLit "compactSize#"," Return the total capacity (in bytes) of all the compact blocks\n in the CNF. ") , (fsLit "reallyUnsafePtrEquality#"," Returns @1#@ if the given pointers are equal and @0#@ otherwise. ") , (fsLit "par#","Create a new spark evaluating the given argument.\n The return value should always be 1.\n Users are encouraged to use spark# instead.") , (fsLit "numSparks#"," Returns the number of sparks in the local spark pool. ") , (fsLit "keepAlive#"," @'keepAlive#' x s k@ keeps the value @x@ alive during the execution\n of the computation @k@.\n\n Note that the result type here isn't quite as unrestricted as the\n polymorphic type might suggest; see the section \\\"RuntimeRep polymorphism\n in continuation-style primops\\\" for details. ") , (fsLit "dataToTagSmall#"," Used internally to implement @dataToTag#@: Use that function instead!\n This one normally offers /no advantage/ and comes with no stability\n guarantees: it may change its type, its name, or its behavior\n with /no warning/ between compiler releases.\n\n It is expected that this function will be un-exposed in a future\n release of ghc.\n\n For more details, look at @Note [DataToTag overview]@\n in GHC.Tc.Instance.Class in the source code for\n /the specific compiler version you are using./\n ") , (fsLit "dataToTagLarge#"," Used internally to implement @dataToTag#@: Use that function instead!\n This one offers /no advantage/ and comes with no stability\n guarantees: it may change its type, its name, or its behavior\n with /no warning/ between compiler releases.\n\n It is expected that this function will be un-exposed in a future\n release of ghc.\n\n For more details, look at @Note [DataToTag overview]@\n in GHC.Tc.Instance.Class in the source code for\n /the specific compiler version you are using./\n ") , (fsLit "BCO"," Primitive bytecode type. ") , (fsLit "addrToAny#"," Convert an 'Addr#' to a followable Any type. ") , (fsLit "anyToAddr#"," Retrieve the address of any Haskell value. This is\n essentially an 'unsafeCoerce#', but if implemented as such\n the core lint pass complains and fails to compile.\n As a primop, it is opaque to core/stg, and only appears\n in cmm (where the copy propagation pass will get rid of it).\n Note that \"a\" must be a value, not a thunk! It's too late\n for strictness analysis to enforce this, so you're on your\n own to guarantee this. Also note that 'Addr#' is not a GC\n pointer - up to you to guarantee that it does not become\n a dangling pointer immediately after you get it.") , (fsLit "mkApUpd0#"," Wrap a BCO in a @AP_UPD@ thunk which will be updated with the value of\n the BCO when evaluated. ") , (fsLit "newBCO#"," @'newBCO#' instrs lits ptrs arity bitmap@ creates a new bytecode object. The\n resulting object encodes a function of the given arity with the instructions\n encoded in @instrs@, and a static reference table usage bitmap given by\n @bitmap@. ") , (fsLit "unpackClosure#"," @'unpackClosure#' closure@ copies the closure and pointers in the\n payload of the given closure into two new arrays, and returns a pointer to\n the first word of the closure's info table, a non-pointer array for the raw\n bytes of the closure, and a pointer array for the pointers in the payload. ") , (fsLit "closureSize#"," @'closureSize#' closure@ returns the size of the given closure in\n machine words. ") , (fsLit "getCurrentCCS#"," Returns the current 'CostCentreStack' (value is @NULL@ if\n not profiling). Takes a dummy argument which can be used to\n avoid the call to 'getCurrentCCS#' being floated out by the\n simplifier, which would result in an uninformative stack\n (\"CAF\"). ") , (fsLit "clearCCS#"," Run the supplied IO action with an empty CCS. For example, this\n is used by the interpreter to run an interpreted computation\n without the call stack showing that it was invoked from GHC. ") , (fsLit "whereFrom#"," Fills the given buffer with the @InfoProvEnt@ for the info table of the\n given object. Returns @1#@ on success and @0#@ otherwise.") , (fsLit "FUN","The builtin function type, written in infix form as @a % m -> b@.\n Values of this type are functions taking inputs of type @a@ and\n producing outputs of type @b@. The multiplicity of the input is\n @m@.\n\n Note that @'FUN' m a b@ permits representation polymorphism in both\n @a@ and @b@, so that types like @'Int#' -> 'Int#'@ can still be\n well-kinded.\n ") , (fsLit "realWorld#"," The token used in the implementation of the IO monad as a state monad.\n It does not pass any information at runtime.\n See also 'GHC.Magic.runRW#'. ") , (fsLit "void#"," This is an alias for the unboxed unit tuple constructor.\n In earlier versions of GHC, 'void#' was a value\n of the primitive type 'Void#', which is now defined to be @(# #)@.\n ") , (fsLit "Proxy#"," The type constructor 'Proxy#' is used to bear witness to some\n type variable. It's used when you want to pass around proxy values\n for doing things like modelling type applications. A 'Proxy#'\n is not only unboxed, it also has a polymorphic kind, and has no\n runtime representation, being totally free. ") , (fsLit "proxy#"," Witness for an unboxed 'Proxy#' value, which has no runtime\n representation. ") , (fsLit "seq"," The value of @'seq' a b@ is bottom if @a@ is bottom, and\n otherwise equal to @b@. In other words, it evaluates the first\n argument @a@ to weak head normal form (WHNF). 'seq' is usually\n introduced to improve performance by avoiding unneeded laziness.\n\n A note on evaluation order: the expression @'seq' a b@ does\n /not/ guarantee that @a@ will be evaluated before @b@.\n The only guarantee given by 'seq' is that the both @a@\n and @b@ will be evaluated before 'seq' returns a value.\n In particular, this means that @b@ may be evaluated before\n @a@. If you need to guarantee a specific order of evaluation,\n you must use the function 'pseq' from the \"parallel\" package. ") , (fsLit "traceEvent#"," Emits an event via the RTS tracing framework. The contents\n of the event is the zero-terminated byte string passed as the first\n argument. The event will be emitted either to the @.eventlog@ file,\n or to stderr, depending on the runtime RTS flags. ") , (fsLit "traceBinaryEvent#"," Emits an event via the RTS tracing framework. The contents\n of the event is the binary object passed as the first argument with\n the given length passed as the second argument. The event will be\n emitted to the @.eventlog@ file. ") , (fsLit "traceMarker#"," Emits a marker event via the RTS tracing framework. The contents\n of the event is the zero-terminated byte string passed as the first\n argument. The event will be emitted either to the @.eventlog@ file,\n or to stderr, depending on the runtime RTS flags. ") , (fsLit "setThreadAllocationCounter#"," Sets the allocation counter for the current thread to the given value. ") , (fsLit "StackSnapshot#"," Haskell representation of a @StgStack*@ that was created (cloned)\n with a function in \"GHC.Stack.CloneStack\". Please check the\n documentation in that module for more detailed explanations. ") , (fsLit "coerce"," The function 'coerce' allows you to safely convert between values of\n types that have the same representation with no run-time overhead. In the\n simplest case you can use it instead of a newtype constructor, to go from\n the newtype's concrete type to the abstract type. But it also works in\n more complicated settings, e.g. converting a list of newtypes to a list of\n concrete types.\n\n When used in conversions involving a newtype wrapper,\n make sure the newtype constructor is in scope.\n\n This function is representation-polymorphic, but the\n 'RuntimeRep' type argument is marked as 'Inferred', meaning\n that it is not available for visible type application. This means\n the typechecker will accept @'coerce' \\@'Int' \\@Age 42@.\n\n === __Examples__\n\n >>> newtype TTL = TTL Int deriving (Eq, Ord, Show)\n >>> newtype Age = Age Int deriving (Eq, Ord, Show)\n >>> coerce (Age 42) :: TTL\n TTL 42\n >>> coerce (+ (1 :: Int)) (Age 42) :: TTL\n TTL 43\n >>> coerce (map (+ (1 :: Int))) [Age 42, Age 24] :: [TTL]\n [TTL 43,TTL 25]\n\n ") , (fsLit "broadcastInt8X16#"," Broadcast a scalar to all elements of a vector. ") , (fsLit "broadcastInt16X8#"," Broadcast a scalar to all elements of a vector. ") , (fsLit "broadcastInt32X4#"," Broadcast a scalar to all elements of a vector. ") , (fsLit "broadcastInt64X2#"," Broadcast a scalar to all elements of a vector. ") , (fsLit "broadcastInt8X32#"," Broadcast a scalar to all elements of a vector. ") , (fsLit "broadcastInt16X16#"," Broadcast a scalar to all elements of a vector. ") , (fsLit "broadcastInt32X8#"," Broadcast a scalar to all elements of a vector. ") , (fsLit "broadcastInt64X4#"," Broadcast a scalar to all elements of a vector. ") , (fsLit "broadcastInt8X64#"," Broadcast a scalar to all elements of a vector. ") , (fsLit "broadcastInt16X32#"," Broadcast a scalar to all elements of a vector. ") , (fsLit "broadcastInt32X16#"," Broadcast a scalar to all elements of a vector. ") , (fsLit "broadcastInt64X8#"," Broadcast a scalar to all elements of a vector. ") , (fsLit "broadcastWord8X16#"," Broadcast a scalar to all elements of a vector. ") , (fsLit "broadcastWord16X8#"," Broadcast a scalar to all elements of a vector. ") , (fsLit "broadcastWord32X4#"," Broadcast a scalar to all elements of a vector. ") , (fsLit "broadcastWord64X2#"," Broadcast a scalar to all elements of a vector. ") , (fsLit "broadcastWord8X32#"," Broadcast a scalar to all elements of a vector. ") , (fsLit "broadcastWord16X16#"," Broadcast a scalar to all elements of a vector. ") , (fsLit "broadcastWord32X8#"," Broadcast a scalar to all elements of a vector. ") , (fsLit "broadcastWord64X4#"," Broadcast a scalar to all elements of a vector. ") , (fsLit "broadcastWord8X64#"," Broadcast a scalar to all elements of a vector. ") , (fsLit "broadcastWord16X32#"," Broadcast a scalar to all elements of a vector. ") , (fsLit "broadcastWord32X16#"," Broadcast a scalar to all elements of a vector. ") , (fsLit "broadcastWord64X8#"," Broadcast a scalar to all elements of a vector. ") , (fsLit "broadcastFloatX4#"," Broadcast a scalar to all elements of a vector. ") , (fsLit "broadcastDoubleX2#"," Broadcast a scalar to all elements of a vector. ") , (fsLit "broadcastFloatX8#"," Broadcast a scalar to all elements of a vector. ") , (fsLit "broadcastDoubleX4#"," Broadcast a scalar to all elements of a vector. ") , (fsLit "broadcastFloatX16#"," Broadcast a scalar to all elements of a vector. ") , (fsLit "broadcastDoubleX8#"," Broadcast a scalar to all elements of a vector. ") , (fsLit "packInt8X16#"," Pack the elements of an unboxed tuple into a vector. ") , (fsLit "packInt16X8#"," Pack the elements of an unboxed tuple into a vector. ") , (fsLit "packInt32X4#"," Pack the elements of an unboxed tuple into a vector. ") , (fsLit "packInt64X2#"," Pack the elements of an unboxed tuple into a vector. ") , (fsLit "packInt8X32#"," Pack the elements of an unboxed tuple into a vector. ") , (fsLit "packInt16X16#"," Pack the elements of an unboxed tuple into a vector. ") , (fsLit "packInt32X8#"," Pack the elements of an unboxed tuple into a vector. ") , (fsLit "packInt64X4#"," Pack the elements of an unboxed tuple into a vector. ") , (fsLit "packInt8X64#"," Pack the elements of an unboxed tuple into a vector. ") , (fsLit "packInt16X32#"," Pack the elements of an unboxed tuple into a vector. ") , (fsLit "packInt32X16#"," Pack the elements of an unboxed tuple into a vector. ") , (fsLit "packInt64X8#"," Pack the elements of an unboxed tuple into a vector. ") , (fsLit "packWord8X16#"," Pack the elements of an unboxed tuple into a vector. ") , (fsLit "packWord16X8#"," Pack the elements of an unboxed tuple into a vector. ") , (fsLit "packWord32X4#"," Pack the elements of an unboxed tuple into a vector. ") , (fsLit "packWord64X2#"," Pack the elements of an unboxed tuple into a vector. ") , (fsLit "packWord8X32#"," Pack the elements of an unboxed tuple into a vector. ") , (fsLit "packWord16X16#"," Pack the elements of an unboxed tuple into a vector. ") , (fsLit "packWord32X8#"," Pack the elements of an unboxed tuple into a vector. ") , (fsLit "packWord64X4#"," Pack the elements of an unboxed tuple into a vector. ") , (fsLit "packWord8X64#"," Pack the elements of an unboxed tuple into a vector. ") , (fsLit "packWord16X32#"," Pack the elements of an unboxed tuple into a vector. ") , (fsLit "packWord32X16#"," Pack the elements of an unboxed tuple into a vector. ") , (fsLit "packWord64X8#"," Pack the elements of an unboxed tuple into a vector. ") , (fsLit "packFloatX4#"," Pack the elements of an unboxed tuple into a vector. ") , (fsLit "packDoubleX2#"," Pack the elements of an unboxed tuple into a vector. ") , (fsLit "packFloatX8#"," Pack the elements of an unboxed tuple into a vector. ") , (fsLit "packDoubleX4#"," Pack the elements of an unboxed tuple into a vector. ") , (fsLit "packFloatX16#"," Pack the elements of an unboxed tuple into a vector. ") , (fsLit "packDoubleX8#"," Pack the elements of an unboxed tuple into a vector. ") , (fsLit "unpackInt8X16#"," Unpack the elements of a vector into an unboxed tuple. #") , (fsLit "unpackInt16X8#"," Unpack the elements of a vector into an unboxed tuple. #") , (fsLit "unpackInt32X4#"," Unpack the elements of a vector into an unboxed tuple. #") , (fsLit "unpackInt64X2#"," Unpack the elements of a vector into an unboxed tuple. #") , (fsLit "unpackInt8X32#"," Unpack the elements of a vector into an unboxed tuple. #") , (fsLit "unpackInt16X16#"," Unpack the elements of a vector into an unboxed tuple. #") , (fsLit "unpackInt32X8#"," Unpack the elements of a vector into an unboxed tuple. #") , (fsLit "unpackInt64X4#"," Unpack the elements of a vector into an unboxed tuple. #") , (fsLit "unpackInt8X64#"," Unpack the elements of a vector into an unboxed tuple. #") , (fsLit "unpackInt16X32#"," Unpack the elements of a vector into an unboxed tuple. #") , (fsLit "unpackInt32X16#"," Unpack the elements of a vector into an unboxed tuple. #") , (fsLit "unpackInt64X8#"," Unpack the elements of a vector into an unboxed tuple. #") , (fsLit "unpackWord8X16#"," Unpack the elements of a vector into an unboxed tuple. #") , (fsLit "unpackWord16X8#"," Unpack the elements of a vector into an unboxed tuple. #") , (fsLit "unpackWord32X4#"," Unpack the elements of a vector into an unboxed tuple. #") , (fsLit "unpackWord64X2#"," Unpack the elements of a vector into an unboxed tuple. #") , (fsLit "unpackWord8X32#"," Unpack the elements of a vector into an unboxed tuple. #") , (fsLit "unpackWord16X16#"," Unpack the elements of a vector into an unboxed tuple. #") , (fsLit "unpackWord32X8#"," Unpack the elements of a vector into an unboxed tuple. #") , (fsLit "unpackWord64X4#"," Unpack the elements of a vector into an unboxed tuple. #") , (fsLit "unpackWord8X64#"," Unpack the elements of a vector into an unboxed tuple. #") , (fsLit "unpackWord16X32#"," Unpack the elements of a vector into an unboxed tuple. #") , (fsLit "unpackWord32X16#"," Unpack the elements of a vector into an unboxed tuple. #") , (fsLit "unpackWord64X8#"," Unpack the elements of a vector into an unboxed tuple. #") , (fsLit "unpackFloatX4#"," Unpack the elements of a vector into an unboxed tuple. #") , (fsLit "unpackDoubleX2#"," Unpack the elements of a vector into an unboxed tuple. #") , (fsLit "unpackFloatX8#"," Unpack the elements of a vector into an unboxed tuple. #") , (fsLit "unpackDoubleX4#"," Unpack the elements of a vector into an unboxed tuple. #") , (fsLit "unpackFloatX16#"," Unpack the elements of a vector into an unboxed tuple. #") , (fsLit "unpackDoubleX8#"," Unpack the elements of a vector into an unboxed tuple. #") , (fsLit "insertInt8X16#"," Insert a scalar at the given position in a vector. ") , (fsLit "insertInt16X8#"," Insert a scalar at the given position in a vector. ") , (fsLit "insertInt32X4#"," Insert a scalar at the given position in a vector. ") , (fsLit "insertInt64X2#"," Insert a scalar at the given position in a vector. ") , (fsLit "insertInt8X32#"," Insert a scalar at the given position in a vector. ") , (fsLit "insertInt16X16#"," Insert a scalar at the given position in a vector. ") , (fsLit "insertInt32X8#"," Insert a scalar at the given position in a vector. ") , (fsLit "insertInt64X4#"," Insert a scalar at the given position in a vector. ") , (fsLit "insertInt8X64#"," Insert a scalar at the given position in a vector. ") , (fsLit "insertInt16X32#"," Insert a scalar at the given position in a vector. ") , (fsLit "insertInt32X16#"," Insert a scalar at the given position in a vector. ") , (fsLit "insertInt64X8#"," Insert a scalar at the given position in a vector. ") , (fsLit "insertWord8X16#"," Insert a scalar at the given position in a vector. ") , (fsLit "insertWord16X8#"," Insert a scalar at the given position in a vector. ") , (fsLit "insertWord32X4#"," Insert a scalar at the given position in a vector. ") , (fsLit "insertWord64X2#"," Insert a scalar at the given position in a vector. ") , (fsLit "insertWord8X32#"," Insert a scalar at the given position in a vector. ") , (fsLit "insertWord16X16#"," Insert a scalar at the given position in a vector. ") , (fsLit "insertWord32X8#"," Insert a scalar at the given position in a vector. ") , (fsLit "insertWord64X4#"," Insert a scalar at the given position in a vector. ") , (fsLit "insertWord8X64#"," Insert a scalar at the given position in a vector. ") , (fsLit "insertWord16X32#"," Insert a scalar at the given position in a vector. ") , (fsLit "insertWord32X16#"," Insert a scalar at the given position in a vector. ") , (fsLit "insertWord64X8#"," Insert a scalar at the given position in a vector. ") , (fsLit "insertFloatX4#"," Insert a scalar at the given position in a vector. ") , (fsLit "insertDoubleX2#"," Insert a scalar at the given position in a vector. ") , (fsLit "insertFloatX8#"," Insert a scalar at the given position in a vector. ") , (fsLit "insertDoubleX4#"," Insert a scalar at the given position in a vector. ") , (fsLit "insertFloatX16#"," Insert a scalar at the given position in a vector. ") , (fsLit "insertDoubleX8#"," Insert a scalar at the given position in a vector. ") , (fsLit "plusInt8X16#"," Add two vectors element-wise. ") , (fsLit "plusInt16X8#"," Add two vectors element-wise. ") , (fsLit "plusInt32X4#"," Add two vectors element-wise. ") , (fsLit "plusInt64X2#"," Add two vectors element-wise. ") , (fsLit "plusInt8X32#"," Add two vectors element-wise. ") , (fsLit "plusInt16X16#"," Add two vectors element-wise. ") , (fsLit "plusInt32X8#"," Add two vectors element-wise. ") , (fsLit "plusInt64X4#"," Add two vectors element-wise. ") , (fsLit "plusInt8X64#"," Add two vectors element-wise. ") , (fsLit "plusInt16X32#"," Add two vectors element-wise. ") , (fsLit "plusInt32X16#"," Add two vectors element-wise. ") , (fsLit "plusInt64X8#"," Add two vectors element-wise. ") , (fsLit "plusWord8X16#"," Add two vectors element-wise. ") , (fsLit "plusWord16X8#"," Add two vectors element-wise. ") , (fsLit "plusWord32X4#"," Add two vectors element-wise. ") , (fsLit "plusWord64X2#"," Add two vectors element-wise. ") , (fsLit "plusWord8X32#"," Add two vectors element-wise. ") , (fsLit "plusWord16X16#"," Add two vectors element-wise. ") , (fsLit "plusWord32X8#"," Add two vectors element-wise. ") , (fsLit "plusWord64X4#"," Add two vectors element-wise. ") , (fsLit "plusWord8X64#"," Add two vectors element-wise. ") , (fsLit "plusWord16X32#"," Add two vectors element-wise. ") , (fsLit "plusWord32X16#"," Add two vectors element-wise. ") , (fsLit "plusWord64X8#"," Add two vectors element-wise. ") , (fsLit "plusFloatX4#"," Add two vectors element-wise. ") , (fsLit "plusDoubleX2#"," Add two vectors element-wise. ") , (fsLit "plusFloatX8#"," Add two vectors element-wise. ") , (fsLit "plusDoubleX4#"," Add two vectors element-wise. ") , (fsLit "plusFloatX16#"," Add two vectors element-wise. ") , (fsLit "plusDoubleX8#"," Add two vectors element-wise. ") , (fsLit "minusInt8X16#"," Subtract two vectors element-wise. ") , (fsLit "minusInt16X8#"," Subtract two vectors element-wise. ") , (fsLit "minusInt32X4#"," Subtract two vectors element-wise. ") , (fsLit "minusInt64X2#"," Subtract two vectors element-wise. ") , (fsLit "minusInt8X32#"," Subtract two vectors element-wise. ") , (fsLit "minusInt16X16#"," Subtract two vectors element-wise. ") , (fsLit "minusInt32X8#"," Subtract two vectors element-wise. ") , (fsLit "minusInt64X4#"," Subtract two vectors element-wise. ") , (fsLit "minusInt8X64#"," Subtract two vectors element-wise. ") , (fsLit "minusInt16X32#"," Subtract two vectors element-wise. ") , (fsLit "minusInt32X16#"," Subtract two vectors element-wise. ") , (fsLit "minusInt64X8#"," Subtract two vectors element-wise. ") , (fsLit "minusWord8X16#"," Subtract two vectors element-wise. ") , (fsLit "minusWord16X8#"," Subtract two vectors element-wise. ") , (fsLit "minusWord32X4#"," Subtract two vectors element-wise. ") , (fsLit "minusWord64X2#"," Subtract two vectors element-wise. ") , (fsLit "minusWord8X32#"," Subtract two vectors element-wise. ") , (fsLit "minusWord16X16#"," Subtract two vectors element-wise. ") , (fsLit "minusWord32X8#"," Subtract two vectors element-wise. ") , (fsLit "minusWord64X4#"," Subtract two vectors element-wise. ") , (fsLit "minusWord8X64#"," Subtract two vectors element-wise. ") , (fsLit "minusWord16X32#"," Subtract two vectors element-wise. ") , (fsLit "minusWord32X16#"," Subtract two vectors element-wise. ") , (fsLit "minusWord64X8#"," Subtract two vectors element-wise. ") , (fsLit "minusFloatX4#"," Subtract two vectors element-wise. ") , (fsLit "minusDoubleX2#"," Subtract two vectors element-wise. ") , (fsLit "minusFloatX8#"," Subtract two vectors element-wise. ") , (fsLit "minusDoubleX4#"," Subtract two vectors element-wise. ") , (fsLit "minusFloatX16#"," Subtract two vectors element-wise. ") , (fsLit "minusDoubleX8#"," Subtract two vectors element-wise. ") , (fsLit "timesInt8X16#"," Multiply two vectors element-wise. ") , (fsLit "timesInt16X8#"," Multiply two vectors element-wise. ") , (fsLit "timesInt32X4#"," Multiply two vectors element-wise. ") , (fsLit "timesInt64X2#"," Multiply two vectors element-wise. ") , (fsLit "timesInt8X32#"," Multiply two vectors element-wise. ") , (fsLit "timesInt16X16#"," Multiply two vectors element-wise. ") , (fsLit "timesInt32X8#"," Multiply two vectors element-wise. ") , (fsLit "timesInt64X4#"," Multiply two vectors element-wise. ") , (fsLit "timesInt8X64#"," Multiply two vectors element-wise. ") , (fsLit "timesInt16X32#"," Multiply two vectors element-wise. ") , (fsLit "timesInt32X16#"," Multiply two vectors element-wise. ") , (fsLit "timesInt64X8#"," Multiply two vectors element-wise. ") , (fsLit "timesWord8X16#"," Multiply two vectors element-wise. ") , (fsLit "timesWord16X8#"," Multiply two vectors element-wise. ") , (fsLit "timesWord32X4#"," Multiply two vectors element-wise. ") , (fsLit "timesWord64X2#"," Multiply two vectors element-wise. ") , (fsLit "timesWord8X32#"," Multiply two vectors element-wise. ") , (fsLit "timesWord16X16#"," Multiply two vectors element-wise. ") , (fsLit "timesWord32X8#"," Multiply two vectors element-wise. ") , (fsLit "timesWord64X4#"," Multiply two vectors element-wise. ") , (fsLit "timesWord8X64#"," Multiply two vectors element-wise. ") , (fsLit "timesWord16X32#"," Multiply two vectors element-wise. ") , (fsLit "timesWord32X16#"," Multiply two vectors element-wise. ") , (fsLit "timesWord64X8#"," Multiply two vectors element-wise. ") , (fsLit "timesFloatX4#"," Multiply two vectors element-wise. ") , (fsLit "timesDoubleX2#"," Multiply two vectors element-wise. ") , (fsLit "timesFloatX8#"," Multiply two vectors element-wise. ") , (fsLit "timesDoubleX4#"," Multiply two vectors element-wise. ") , (fsLit "timesFloatX16#"," Multiply two vectors element-wise. ") , (fsLit "timesDoubleX8#"," Multiply two vectors element-wise. ") , (fsLit "divideFloatX4#"," Divide two vectors element-wise. ") , (fsLit "divideDoubleX2#"," Divide two vectors element-wise. ") , (fsLit "divideFloatX8#"," Divide two vectors element-wise. ") , (fsLit "divideDoubleX4#"," Divide two vectors element-wise. ") , (fsLit "divideFloatX16#"," Divide two vectors element-wise. ") , (fsLit "divideDoubleX8#"," Divide two vectors element-wise. ") , (fsLit "quotInt8X16#"," Rounds towards zero element-wise. ") , (fsLit "quotInt16X8#"," Rounds towards zero element-wise. ") , (fsLit "quotInt32X4#"," Rounds towards zero element-wise. ") , (fsLit "quotInt64X2#"," Rounds towards zero element-wise. ") , (fsLit "quotInt8X32#"," Rounds towards zero element-wise. ") , (fsLit "quotInt16X16#"," Rounds towards zero element-wise. ") , (fsLit "quotInt32X8#"," Rounds towards zero element-wise. ") , (fsLit "quotInt64X4#"," Rounds towards zero element-wise. ") , (fsLit "quotInt8X64#"," Rounds towards zero element-wise. ") , (fsLit "quotInt16X32#"," Rounds towards zero element-wise. ") , (fsLit "quotInt32X16#"," Rounds towards zero element-wise. ") , (fsLit "quotInt64X8#"," Rounds towards zero element-wise. ") , (fsLit "quotWord8X16#"," Rounds towards zero element-wise. ") , (fsLit "quotWord16X8#"," Rounds towards zero element-wise. ") , (fsLit "quotWord32X4#"," Rounds towards zero element-wise. ") , (fsLit "quotWord64X2#"," Rounds towards zero element-wise. ") , (fsLit "quotWord8X32#"," Rounds towards zero element-wise. ") , (fsLit "quotWord16X16#"," Rounds towards zero element-wise. ") , (fsLit "quotWord32X8#"," Rounds towards zero element-wise. ") , (fsLit "quotWord64X4#"," Rounds towards zero element-wise. ") , (fsLit "quotWord8X64#"," Rounds towards zero element-wise. ") , (fsLit "quotWord16X32#"," Rounds towards zero element-wise. ") , (fsLit "quotWord32X16#"," Rounds towards zero element-wise. ") , (fsLit "quotWord64X8#"," Rounds towards zero element-wise. ") , (fsLit "remInt8X16#"," Satisfies @('quot#' x y) 'times#' y 'plus#' ('rem#' x y) == x@. ") , (fsLit "remInt16X8#"," Satisfies @('quot#' x y) 'times#' y 'plus#' ('rem#' x y) == x@. ") , (fsLit "remInt32X4#"," Satisfies @('quot#' x y) 'times#' y 'plus#' ('rem#' x y) == x@. ") , (fsLit "remInt64X2#"," Satisfies @('quot#' x y) 'times#' y 'plus#' ('rem#' x y) == x@. ") , (fsLit "remInt8X32#"," Satisfies @('quot#' x y) 'times#' y 'plus#' ('rem#' x y) == x@. ") , (fsLit "remInt16X16#"," Satisfies @('quot#' x y) 'times#' y 'plus#' ('rem#' x y) == x@. ") , (fsLit "remInt32X8#"," Satisfies @('quot#' x y) 'times#' y 'plus#' ('rem#' x y) == x@. ") , (fsLit "remInt64X4#"," Satisfies @('quot#' x y) 'times#' y 'plus#' ('rem#' x y) == x@. ") , (fsLit "remInt8X64#"," Satisfies @('quot#' x y) 'times#' y 'plus#' ('rem#' x y) == x@. ") , (fsLit "remInt16X32#"," Satisfies @('quot#' x y) 'times#' y 'plus#' ('rem#' x y) == x@. ") , (fsLit "remInt32X16#"," Satisfies @('quot#' x y) 'times#' y 'plus#' ('rem#' x y) == x@. ") , (fsLit "remInt64X8#"," Satisfies @('quot#' x y) 'times#' y 'plus#' ('rem#' x y) == x@. ") , (fsLit "remWord8X16#"," Satisfies @('quot#' x y) 'times#' y 'plus#' ('rem#' x y) == x@. ") , (fsLit "remWord16X8#"," Satisfies @('quot#' x y) 'times#' y 'plus#' ('rem#' x y) == x@. ") , (fsLit "remWord32X4#"," Satisfies @('quot#' x y) 'times#' y 'plus#' ('rem#' x y) == x@. ") , (fsLit "remWord64X2#"," Satisfies @('quot#' x y) 'times#' y 'plus#' ('rem#' x y) == x@. ") , (fsLit "remWord8X32#"," Satisfies @('quot#' x y) 'times#' y 'plus#' ('rem#' x y) == x@. ") , (fsLit "remWord16X16#"," Satisfies @('quot#' x y) 'times#' y 'plus#' ('rem#' x y) == x@. ") , (fsLit "remWord32X8#"," Satisfies @('quot#' x y) 'times#' y 'plus#' ('rem#' x y) == x@. ") , (fsLit "remWord64X4#"," Satisfies @('quot#' x y) 'times#' y 'plus#' ('rem#' x y) == x@. ") , (fsLit "remWord8X64#"," Satisfies @('quot#' x y) 'times#' y 'plus#' ('rem#' x y) == x@. ") , (fsLit "remWord16X32#"," Satisfies @('quot#' x y) 'times#' y 'plus#' ('rem#' x y) == x@. ") , (fsLit "remWord32X16#"," Satisfies @('quot#' x y) 'times#' y 'plus#' ('rem#' x y) == x@. ") , (fsLit "remWord64X8#"," Satisfies @('quot#' x y) 'times#' y 'plus#' ('rem#' x y) == x@. ") , (fsLit "negateInt8X16#"," Negate element-wise. ") , (fsLit "negateInt16X8#"," Negate element-wise. ") , (fsLit "negateInt32X4#"," Negate element-wise. ") , (fsLit "negateInt64X2#"," Negate element-wise. ") , (fsLit "negateInt8X32#"," Negate element-wise. ") , (fsLit "negateInt16X16#"," Negate element-wise. ") , (fsLit "negateInt32X8#"," Negate element-wise. ") , (fsLit "negateInt64X4#"," Negate element-wise. ") , (fsLit "negateInt8X64#"," Negate element-wise. ") , (fsLit "negateInt16X32#"," Negate element-wise. ") , (fsLit "negateInt32X16#"," Negate element-wise. ") , (fsLit "negateInt64X8#"," Negate element-wise. ") , (fsLit "negateFloatX4#"," Negate element-wise. ") , (fsLit "negateDoubleX2#"," Negate element-wise. ") , (fsLit "negateFloatX8#"," Negate element-wise. ") , (fsLit "negateDoubleX4#"," Negate element-wise. ") , (fsLit "negateFloatX16#"," Negate element-wise. ") , (fsLit "negateDoubleX8#"," Negate element-wise. ") , (fsLit "indexInt8X16Array#"," Read a vector from specified index of immutable array. ") , (fsLit "indexInt16X8Array#"," Read a vector from specified index of immutable array. ") , (fsLit "indexInt32X4Array#"," Read a vector from specified index of immutable array. ") , (fsLit "indexInt64X2Array#"," Read a vector from specified index of immutable array. ") , (fsLit "indexInt8X32Array#"," Read a vector from specified index of immutable array. ") , (fsLit "indexInt16X16Array#"," Read a vector from specified index of immutable array. ") , (fsLit "indexInt32X8Array#"," Read a vector from specified index of immutable array. ") , (fsLit "indexInt64X4Array#"," Read a vector from specified index of immutable array. ") , (fsLit "indexInt8X64Array#"," Read a vector from specified index of immutable array. ") , (fsLit "indexInt16X32Array#"," Read a vector from specified index of immutable array. ") , (fsLit "indexInt32X16Array#"," Read a vector from specified index of immutable array. ") , (fsLit "indexInt64X8Array#"," Read a vector from specified index of immutable array. ") , (fsLit "indexWord8X16Array#"," Read a vector from specified index of immutable array. ") , (fsLit "indexWord16X8Array#"," Read a vector from specified index of immutable array. ") , (fsLit "indexWord32X4Array#"," Read a vector from specified index of immutable array. ") , (fsLit "indexWord64X2Array#"," Read a vector from specified index of immutable array. ") , (fsLit "indexWord8X32Array#"," Read a vector from specified index of immutable array. ") , (fsLit "indexWord16X16Array#"," Read a vector from specified index of immutable array. ") , (fsLit "indexWord32X8Array#"," Read a vector from specified index of immutable array. ") , (fsLit "indexWord64X4Array#"," Read a vector from specified index of immutable array. ") , (fsLit "indexWord8X64Array#"," Read a vector from specified index of immutable array. ") , (fsLit "indexWord16X32Array#"," Read a vector from specified index of immutable array. ") , (fsLit "indexWord32X16Array#"," Read a vector from specified index of immutable array. ") , (fsLit "indexWord64X8Array#"," Read a vector from specified index of immutable array. ") , (fsLit "indexFloatX4Array#"," Read a vector from specified index of immutable array. ") , (fsLit "indexDoubleX2Array#"," Read a vector from specified index of immutable array. ") , (fsLit "indexFloatX8Array#"," Read a vector from specified index of immutable array. ") , (fsLit "indexDoubleX4Array#"," Read a vector from specified index of immutable array. ") , (fsLit "indexFloatX16Array#"," Read a vector from specified index of immutable array. ") , (fsLit "indexDoubleX8Array#"," Read a vector from specified index of immutable array. ") , (fsLit "readInt8X16Array#"," Read a vector from specified index of mutable array. ") , (fsLit "readInt16X8Array#"," Read a vector from specified index of mutable array. ") , (fsLit "readInt32X4Array#"," Read a vector from specified index of mutable array. ") , (fsLit "readInt64X2Array#"," Read a vector from specified index of mutable array. ") , (fsLit "readInt8X32Array#"," Read a vector from specified index of mutable array. ") , (fsLit "readInt16X16Array#"," Read a vector from specified index of mutable array. ") , (fsLit "readInt32X8Array#"," Read a vector from specified index of mutable array. ") , (fsLit "readInt64X4Array#"," Read a vector from specified index of mutable array. ") , (fsLit "readInt8X64Array#"," Read a vector from specified index of mutable array. ") , (fsLit "readInt16X32Array#"," Read a vector from specified index of mutable array. ") , (fsLit "readInt32X16Array#"," Read a vector from specified index of mutable array. ") , (fsLit "readInt64X8Array#"," Read a vector from specified index of mutable array. ") , (fsLit "readWord8X16Array#"," Read a vector from specified index of mutable array. ") , (fsLit "readWord16X8Array#"," Read a vector from specified index of mutable array. ") , (fsLit "readWord32X4Array#"," Read a vector from specified index of mutable array. ") , (fsLit "readWord64X2Array#"," Read a vector from specified index of mutable array. ") , (fsLit "readWord8X32Array#"," Read a vector from specified index of mutable array. ") , (fsLit "readWord16X16Array#"," Read a vector from specified index of mutable array. ") , (fsLit "readWord32X8Array#"," Read a vector from specified index of mutable array. ") , (fsLit "readWord64X4Array#"," Read a vector from specified index of mutable array. ") , (fsLit "readWord8X64Array#"," Read a vector from specified index of mutable array. ") , (fsLit "readWord16X32Array#"," Read a vector from specified index of mutable array. ") , (fsLit "readWord32X16Array#"," Read a vector from specified index of mutable array. ") , (fsLit "readWord64X8Array#"," Read a vector from specified index of mutable array. ") , (fsLit "readFloatX4Array#"," Read a vector from specified index of mutable array. ") , (fsLit "readDoubleX2Array#"," Read a vector from specified index of mutable array. ") , (fsLit "readFloatX8Array#"," Read a vector from specified index of mutable array. ") , (fsLit "readDoubleX4Array#"," Read a vector from specified index of mutable array. ") , (fsLit "readFloatX16Array#"," Read a vector from specified index of mutable array. ") , (fsLit "readDoubleX8Array#"," Read a vector from specified index of mutable array. ") , (fsLit "writeInt8X16Array#"," Write a vector to specified index of mutable array. ") , (fsLit "writeInt16X8Array#"," Write a vector to specified index of mutable array. ") , (fsLit "writeInt32X4Array#"," Write a vector to specified index of mutable array. ") , (fsLit "writeInt64X2Array#"," Write a vector to specified index of mutable array. ") , (fsLit "writeInt8X32Array#"," Write a vector to specified index of mutable array. ") , (fsLit "writeInt16X16Array#"," Write a vector to specified index of mutable array. ") , (fsLit "writeInt32X8Array#"," Write a vector to specified index of mutable array. ") , (fsLit "writeInt64X4Array#"," Write a vector to specified index of mutable array. ") , (fsLit "writeInt8X64Array#"," Write a vector to specified index of mutable array. ") , (fsLit "writeInt16X32Array#"," Write a vector to specified index of mutable array. ") , (fsLit "writeInt32X16Array#"," Write a vector to specified index of mutable array. ") , (fsLit "writeInt64X8Array#"," Write a vector to specified index of mutable array. ") , (fsLit "writeWord8X16Array#"," Write a vector to specified index of mutable array. ") , (fsLit "writeWord16X8Array#"," Write a vector to specified index of mutable array. ") , (fsLit "writeWord32X4Array#"," Write a vector to specified index of mutable array. ") , (fsLit "writeWord64X2Array#"," Write a vector to specified index of mutable array. ") , (fsLit "writeWord8X32Array#"," Write a vector to specified index of mutable array. ") , (fsLit "writeWord16X16Array#"," Write a vector to specified index of mutable array. ") , (fsLit "writeWord32X8Array#"," Write a vector to specified index of mutable array. ") , (fsLit "writeWord64X4Array#"," Write a vector to specified index of mutable array. ") , (fsLit "writeWord8X64Array#"," Write a vector to specified index of mutable array. ") , (fsLit "writeWord16X32Array#"," Write a vector to specified index of mutable array. ") , (fsLit "writeWord32X16Array#"," Write a vector to specified index of mutable array. ") , (fsLit "writeWord64X8Array#"," Write a vector to specified index of mutable array. ") , (fsLit "writeFloatX4Array#"," Write a vector to specified index of mutable array. ") , (fsLit "writeDoubleX2Array#"," Write a vector to specified index of mutable array. ") , (fsLit "writeFloatX8Array#"," Write a vector to specified index of mutable array. ") , (fsLit "writeDoubleX4Array#"," Write a vector to specified index of mutable array. ") , (fsLit "writeFloatX16Array#"," Write a vector to specified index of mutable array. ") , (fsLit "writeDoubleX8Array#"," Write a vector to specified index of mutable array. ") , (fsLit "indexInt8X16OffAddr#"," Reads vector; offset in bytes. ") , (fsLit "indexInt16X8OffAddr#"," Reads vector; offset in bytes. ") , (fsLit "indexInt32X4OffAddr#"," Reads vector; offset in bytes. ") , (fsLit "indexInt64X2OffAddr#"," Reads vector; offset in bytes. ") , (fsLit "indexInt8X32OffAddr#"," Reads vector; offset in bytes. ") , (fsLit "indexInt16X16OffAddr#"," Reads vector; offset in bytes. ") , (fsLit "indexInt32X8OffAddr#"," Reads vector; offset in bytes. ") , (fsLit "indexInt64X4OffAddr#"," Reads vector; offset in bytes. ") , (fsLit "indexInt8X64OffAddr#"," Reads vector; offset in bytes. ") , (fsLit "indexInt16X32OffAddr#"," Reads vector; offset in bytes. ") , (fsLit "indexInt32X16OffAddr#"," Reads vector; offset in bytes. ") , (fsLit "indexInt64X8OffAddr#"," Reads vector; offset in bytes. ") , (fsLit "indexWord8X16OffAddr#"," Reads vector; offset in bytes. ") , (fsLit "indexWord16X8OffAddr#"," Reads vector; offset in bytes. ") , (fsLit "indexWord32X4OffAddr#"," Reads vector; offset in bytes. ") , (fsLit "indexWord64X2OffAddr#"," Reads vector; offset in bytes. ") , (fsLit "indexWord8X32OffAddr#"," Reads vector; offset in bytes. ") , (fsLit "indexWord16X16OffAddr#"," Reads vector; offset in bytes. ") , (fsLit "indexWord32X8OffAddr#"," Reads vector; offset in bytes. ") , (fsLit "indexWord64X4OffAddr#"," Reads vector; offset in bytes. ") , (fsLit "indexWord8X64OffAddr#"," Reads vector; offset in bytes. ") , (fsLit "indexWord16X32OffAddr#"," Reads vector; offset in bytes. ") , (fsLit "indexWord32X16OffAddr#"," Reads vector; offset in bytes. ") , (fsLit "indexWord64X8OffAddr#"," Reads vector; offset in bytes. ") , (fsLit "indexFloatX4OffAddr#"," Reads vector; offset in bytes. ") , (fsLit "indexDoubleX2OffAddr#"," Reads vector; offset in bytes. ") , (fsLit "indexFloatX8OffAddr#"," Reads vector; offset in bytes. ") , (fsLit "indexDoubleX4OffAddr#"," Reads vector; offset in bytes. ") , (fsLit "indexFloatX16OffAddr#"," Reads vector; offset in bytes. ") , (fsLit "indexDoubleX8OffAddr#"," Reads vector; offset in bytes. ") , (fsLit "readInt8X16OffAddr#"," Reads vector; offset in bytes. ") , (fsLit "readInt16X8OffAddr#"," Reads vector; offset in bytes. ") , (fsLit "readInt32X4OffAddr#"," Reads vector; offset in bytes. ") , (fsLit "readInt64X2OffAddr#"," Reads vector; offset in bytes. ") , (fsLit "readInt8X32OffAddr#"," Reads vector; offset in bytes. ") , (fsLit "readInt16X16OffAddr#"," Reads vector; offset in bytes. ") , (fsLit "readInt32X8OffAddr#"," Reads vector; offset in bytes. ") , (fsLit "readInt64X4OffAddr#"," Reads vector; offset in bytes. ") , (fsLit "readInt8X64OffAddr#"," Reads vector; offset in bytes. ") , (fsLit "readInt16X32OffAddr#"," Reads vector; offset in bytes. ") , (fsLit "readInt32X16OffAddr#"," Reads vector; offset in bytes. ") , (fsLit "readInt64X8OffAddr#"," Reads vector; offset in bytes. ") , (fsLit "readWord8X16OffAddr#"," Reads vector; offset in bytes. ") , (fsLit "readWord16X8OffAddr#"," Reads vector; offset in bytes. ") , (fsLit "readWord32X4OffAddr#"," Reads vector; offset in bytes. ") , (fsLit "readWord64X2OffAddr#"," Reads vector; offset in bytes. ") , (fsLit "readWord8X32OffAddr#"," Reads vector; offset in bytes. ") , (fsLit "readWord16X16OffAddr#"," Reads vector; offset in bytes. ") , (fsLit "readWord32X8OffAddr#"," Reads vector; offset in bytes. ") , (fsLit "readWord64X4OffAddr#"," Reads vector; offset in bytes. ") , (fsLit "readWord8X64OffAddr#"," Reads vector; offset in bytes. ") , (fsLit "readWord16X32OffAddr#"," Reads vector; offset in bytes. ") , (fsLit "readWord32X16OffAddr#"," Reads vector; offset in bytes. ") , (fsLit "readWord64X8OffAddr#"," Reads vector; offset in bytes. ") , (fsLit "readFloatX4OffAddr#"," Reads vector; offset in bytes. ") , (fsLit "readDoubleX2OffAddr#"," Reads vector; offset in bytes. ") , (fsLit "readFloatX8OffAddr#"," Reads vector; offset in bytes. ") , (fsLit "readDoubleX4OffAddr#"," Reads vector; offset in bytes. ") , (fsLit "readFloatX16OffAddr#"," Reads vector; offset in bytes. ") , (fsLit "readDoubleX8OffAddr#"," Reads vector; offset in bytes. ") , (fsLit "writeInt8X16OffAddr#"," Write vector; offset in bytes. ") , (fsLit "writeInt16X8OffAddr#"," Write vector; offset in bytes. ") , (fsLit "writeInt32X4OffAddr#"," Write vector; offset in bytes. ") , (fsLit "writeInt64X2OffAddr#"," Write vector; offset in bytes. ") , (fsLit "writeInt8X32OffAddr#"," Write vector; offset in bytes. ") , (fsLit "writeInt16X16OffAddr#"," Write vector; offset in bytes. ") , (fsLit "writeInt32X8OffAddr#"," Write vector; offset in bytes. ") , (fsLit "writeInt64X4OffAddr#"," Write vector; offset in bytes. ") , (fsLit "writeInt8X64OffAddr#"," Write vector; offset in bytes. ") , (fsLit "writeInt16X32OffAddr#"," Write vector; offset in bytes. ") , (fsLit "writeInt32X16OffAddr#"," Write vector; offset in bytes. ") , (fsLit "writeInt64X8OffAddr#"," Write vector; offset in bytes. ") , (fsLit "writeWord8X16OffAddr#"," Write vector; offset in bytes. ") , (fsLit "writeWord16X8OffAddr#"," Write vector; offset in bytes. ") , (fsLit "writeWord32X4OffAddr#"," Write vector; offset in bytes. ") , (fsLit "writeWord64X2OffAddr#"," Write vector; offset in bytes. ") , (fsLit "writeWord8X32OffAddr#"," Write vector; offset in bytes. ") , (fsLit "writeWord16X16OffAddr#"," Write vector; offset in bytes. ") , (fsLit "writeWord32X8OffAddr#"," Write vector; offset in bytes. ") , (fsLit "writeWord64X4OffAddr#"," Write vector; offset in bytes. ") , (fsLit "writeWord8X64OffAddr#"," Write vector; offset in bytes. ") , (fsLit "writeWord16X32OffAddr#"," Write vector; offset in bytes. ") , (fsLit "writeWord32X16OffAddr#"," Write vector; offset in bytes. ") , (fsLit "writeWord64X8OffAddr#"," Write vector; offset in bytes. ") , (fsLit "writeFloatX4OffAddr#"," Write vector; offset in bytes. ") , (fsLit "writeDoubleX2OffAddr#"," Write vector; offset in bytes. ") , (fsLit "writeFloatX8OffAddr#"," Write vector; offset in bytes. ") , (fsLit "writeDoubleX4OffAddr#"," Write vector; offset in bytes. ") , (fsLit "writeFloatX16OffAddr#"," Write vector; offset in bytes. ") , (fsLit "writeDoubleX8OffAddr#"," Write vector; offset in bytes. ") , (fsLit "indexInt8ArrayAsInt8X16#"," Read a vector from specified index of immutable array of scalars; offset is in scalar elements. ") , (fsLit "indexInt16ArrayAsInt16X8#"," Read a vector from specified index of immutable array of scalars; offset is in scalar elements. ") , (fsLit "indexInt32ArrayAsInt32X4#"," Read a vector from specified index of immutable array of scalars; offset is in scalar elements. ") , (fsLit "indexInt64ArrayAsInt64X2#"," Read a vector from specified index of immutable array of scalars; offset is in scalar elements. ") , (fsLit "indexInt8ArrayAsInt8X32#"," Read a vector from specified index of immutable array of scalars; offset is in scalar elements. ") , (fsLit "indexInt16ArrayAsInt16X16#"," Read a vector from specified index of immutable array of scalars; offset is in scalar elements. ") , (fsLit "indexInt32ArrayAsInt32X8#"," Read a vector from specified index of immutable array of scalars; offset is in scalar elements. ") , (fsLit "indexInt64ArrayAsInt64X4#"," Read a vector from specified index of immutable array of scalars; offset is in scalar elements. ") , (fsLit "indexInt8ArrayAsInt8X64#"," Read a vector from specified index of immutable array of scalars; offset is in scalar elements. ") , (fsLit "indexInt16ArrayAsInt16X32#"," Read a vector from specified index of immutable array of scalars; offset is in scalar elements. ") , (fsLit "indexInt32ArrayAsInt32X16#"," Read a vector from specified index of immutable array of scalars; offset is in scalar elements. ") , (fsLit "indexInt64ArrayAsInt64X8#"," Read a vector from specified index of immutable array of scalars; offset is in scalar elements. ") , (fsLit "indexWord8ArrayAsWord8X16#"," Read a vector from specified index of immutable array of scalars; offset is in scalar elements. ") , (fsLit "indexWord16ArrayAsWord16X8#"," Read a vector from specified index of immutable array of scalars; offset is in scalar elements. ") , (fsLit "indexWord32ArrayAsWord32X4#"," Read a vector from specified index of immutable array of scalars; offset is in scalar elements. ") , (fsLit "indexWord64ArrayAsWord64X2#"," Read a vector from specified index of immutable array of scalars; offset is in scalar elements. ") , (fsLit "indexWord8ArrayAsWord8X32#"," Read a vector from specified index of immutable array of scalars; offset is in scalar elements. ") , (fsLit "indexWord16ArrayAsWord16X16#"," Read a vector from specified index of immutable array of scalars; offset is in scalar elements. ") , (fsLit "indexWord32ArrayAsWord32X8#"," Read a vector from specified index of immutable array of scalars; offset is in scalar elements. ") , (fsLit "indexWord64ArrayAsWord64X4#"," Read a vector from specified index of immutable array of scalars; offset is in scalar elements. ") , (fsLit "indexWord8ArrayAsWord8X64#"," Read a vector from specified index of immutable array of scalars; offset is in scalar elements. ") , (fsLit "indexWord16ArrayAsWord16X32#"," Read a vector from specified index of immutable array of scalars; offset is in scalar elements. ") , (fsLit "indexWord32ArrayAsWord32X16#"," Read a vector from specified index of immutable array of scalars; offset is in scalar elements. ") , (fsLit "indexWord64ArrayAsWord64X8#"," Read a vector from specified index of immutable array of scalars; offset is in scalar elements. ") , (fsLit "indexFloatArrayAsFloatX4#"," Read a vector from specified index of immutable array of scalars; offset is in scalar elements. ") , (fsLit "indexDoubleArrayAsDoubleX2#"," Read a vector from specified index of immutable array of scalars; offset is in scalar elements. ") , (fsLit "indexFloatArrayAsFloatX8#"," Read a vector from specified index of immutable array of scalars; offset is in scalar elements. ") , (fsLit "indexDoubleArrayAsDoubleX4#"," Read a vector from specified index of immutable array of scalars; offset is in scalar elements. ") , (fsLit "indexFloatArrayAsFloatX16#"," Read a vector from specified index of immutable array of scalars; offset is in scalar elements. ") , (fsLit "indexDoubleArrayAsDoubleX8#"," Read a vector from specified index of immutable array of scalars; offset is in scalar elements. ") , (fsLit "readInt8ArrayAsInt8X16#"," Read a vector from specified index of mutable array of scalars; offset is in scalar elements. ") , (fsLit "readInt16ArrayAsInt16X8#"," Read a vector from specified index of mutable array of scalars; offset is in scalar elements. ") , (fsLit "readInt32ArrayAsInt32X4#"," Read a vector from specified index of mutable array of scalars; offset is in scalar elements. ") , (fsLit "readInt64ArrayAsInt64X2#"," Read a vector from specified index of mutable array of scalars; offset is in scalar elements. ") , (fsLit "readInt8ArrayAsInt8X32#"," Read a vector from specified index of mutable array of scalars; offset is in scalar elements. ") , (fsLit "readInt16ArrayAsInt16X16#"," Read a vector from specified index of mutable array of scalars; offset is in scalar elements. ") , (fsLit "readInt32ArrayAsInt32X8#"," Read a vector from specified index of mutable array of scalars; offset is in scalar elements. ") , (fsLit "readInt64ArrayAsInt64X4#"," Read a vector from specified index of mutable array of scalars; offset is in scalar elements. ") , (fsLit "readInt8ArrayAsInt8X64#"," Read a vector from specified index of mutable array of scalars; offset is in scalar elements. ") , (fsLit "readInt16ArrayAsInt16X32#"," Read a vector from specified index of mutable array of scalars; offset is in scalar elements. ") , (fsLit "readInt32ArrayAsInt32X16#"," Read a vector from specified index of mutable array of scalars; offset is in scalar elements. ") , (fsLit "readInt64ArrayAsInt64X8#"," Read a vector from specified index of mutable array of scalars; offset is in scalar elements. ") , (fsLit "readWord8ArrayAsWord8X16#"," Read a vector from specified index of mutable array of scalars; offset is in scalar elements. ") , (fsLit "readWord16ArrayAsWord16X8#"," Read a vector from specified index of mutable array of scalars; offset is in scalar elements. ") , (fsLit "readWord32ArrayAsWord32X4#"," Read a vector from specified index of mutable array of scalars; offset is in scalar elements. ") , (fsLit "readWord64ArrayAsWord64X2#"," Read a vector from specified index of mutable array of scalars; offset is in scalar elements. ") , (fsLit "readWord8ArrayAsWord8X32#"," Read a vector from specified index of mutable array of scalars; offset is in scalar elements. ") , (fsLit "readWord16ArrayAsWord16X16#"," Read a vector from specified index of mutable array of scalars; offset is in scalar elements. ") , (fsLit "readWord32ArrayAsWord32X8#"," Read a vector from specified index of mutable array of scalars; offset is in scalar elements. ") , (fsLit "readWord64ArrayAsWord64X4#"," Read a vector from specified index of mutable array of scalars; offset is in scalar elements. ") , (fsLit "readWord8ArrayAsWord8X64#"," Read a vector from specified index of mutable array of scalars; offset is in scalar elements. ") , (fsLit "readWord16ArrayAsWord16X32#"," Read a vector from specified index of mutable array of scalars; offset is in scalar elements. ") , (fsLit "readWord32ArrayAsWord32X16#"," Read a vector from specified index of mutable array of scalars; offset is in scalar elements. ") , (fsLit "readWord64ArrayAsWord64X8#"," Read a vector from specified index of mutable array of scalars; offset is in scalar elements. ") , (fsLit "readFloatArrayAsFloatX4#"," Read a vector from specified index of mutable array of scalars; offset is in scalar elements. ") , (fsLit "readDoubleArrayAsDoubleX2#"," Read a vector from specified index of mutable array of scalars; offset is in scalar elements. ") , (fsLit "readFloatArrayAsFloatX8#"," Read a vector from specified index of mutable array of scalars; offset is in scalar elements. ") , (fsLit "readDoubleArrayAsDoubleX4#"," Read a vector from specified index of mutable array of scalars; offset is in scalar elements. ") , (fsLit "readFloatArrayAsFloatX16#"," Read a vector from specified index of mutable array of scalars; offset is in scalar elements. ") , (fsLit "readDoubleArrayAsDoubleX8#"," Read a vector from specified index of mutable array of scalars; offset is in scalar elements. ") , (fsLit "writeInt8ArrayAsInt8X16#"," Write a vector to specified index of mutable array of scalars; offset is in scalar elements. ") , (fsLit "writeInt16ArrayAsInt16X8#"," Write a vector to specified index of mutable array of scalars; offset is in scalar elements. ") , (fsLit "writeInt32ArrayAsInt32X4#"," Write a vector to specified index of mutable array of scalars; offset is in scalar elements. ") , (fsLit "writeInt64ArrayAsInt64X2#"," Write a vector to specified index of mutable array of scalars; offset is in scalar elements. ") , (fsLit "writeInt8ArrayAsInt8X32#"," Write a vector to specified index of mutable array of scalars; offset is in scalar elements. ") , (fsLit "writeInt16ArrayAsInt16X16#"," Write a vector to specified index of mutable array of scalars; offset is in scalar elements. ") , (fsLit "writeInt32ArrayAsInt32X8#"," Write a vector to specified index of mutable array of scalars; offset is in scalar elements. ") , (fsLit "writeInt64ArrayAsInt64X4#"," Write a vector to specified index of mutable array of scalars; offset is in scalar elements. ") , (fsLit "writeInt8ArrayAsInt8X64#"," Write a vector to specified index of mutable array of scalars; offset is in scalar elements. ") , (fsLit "writeInt16ArrayAsInt16X32#"," Write a vector to specified index of mutable array of scalars; offset is in scalar elements. ") , (fsLit "writeInt32ArrayAsInt32X16#"," Write a vector to specified index of mutable array of scalars; offset is in scalar elements. ") , (fsLit "writeInt64ArrayAsInt64X8#"," Write a vector to specified index of mutable array of scalars; offset is in scalar elements. ") , (fsLit "writeWord8ArrayAsWord8X16#"," Write a vector to specified index of mutable array of scalars; offset is in scalar elements. ") , (fsLit "writeWord16ArrayAsWord16X8#"," Write a vector to specified index of mutable array of scalars; offset is in scalar elements. ") , (fsLit "writeWord32ArrayAsWord32X4#"," Write a vector to specified index of mutable array of scalars; offset is in scalar elements. ") , (fsLit "writeWord64ArrayAsWord64X2#"," Write a vector to specified index of mutable array of scalars; offset is in scalar elements. ") , (fsLit "writeWord8ArrayAsWord8X32#"," Write a vector to specified index of mutable array of scalars; offset is in scalar elements. ") , (fsLit "writeWord16ArrayAsWord16X16#"," Write a vector to specified index of mutable array of scalars; offset is in scalar elements. ") , (fsLit "writeWord32ArrayAsWord32X8#"," Write a vector to specified index of mutable array of scalars; offset is in scalar elements. ") , (fsLit "writeWord64ArrayAsWord64X4#"," Write a vector to specified index of mutable array of scalars; offset is in scalar elements. ") , (fsLit "writeWord8ArrayAsWord8X64#"," Write a vector to specified index of mutable array of scalars; offset is in scalar elements. ") , (fsLit "writeWord16ArrayAsWord16X32#"," Write a vector to specified index of mutable array of scalars; offset is in scalar elements. ") , (fsLit "writeWord32ArrayAsWord32X16#"," Write a vector to specified index of mutable array of scalars; offset is in scalar elements. ") , (fsLit "writeWord64ArrayAsWord64X8#"," Write a vector to specified index of mutable array of scalars; offset is in scalar elements. ") , (fsLit "writeFloatArrayAsFloatX4#"," Write a vector to specified index of mutable array of scalars; offset is in scalar elements. ") , (fsLit "writeDoubleArrayAsDoubleX2#"," Write a vector to specified index of mutable array of scalars; offset is in scalar elements. ") , (fsLit "writeFloatArrayAsFloatX8#"," Write a vector to specified index of mutable array of scalars; offset is in scalar elements. ") , (fsLit "writeDoubleArrayAsDoubleX4#"," Write a vector to specified index of mutable array of scalars; offset is in scalar elements. ") , (fsLit "writeFloatArrayAsFloatX16#"," Write a vector to specified index of mutable array of scalars; offset is in scalar elements. ") , (fsLit "writeDoubleArrayAsDoubleX8#"," Write a vector to specified index of mutable array of scalars; offset is in scalar elements. ") , (fsLit "indexInt8OffAddrAsInt8X16#"," Reads vector; offset in scalar elements. ") , (fsLit "indexInt16OffAddrAsInt16X8#"," Reads vector; offset in scalar elements. ") , (fsLit "indexInt32OffAddrAsInt32X4#"," Reads vector; offset in scalar elements. ") , (fsLit "indexInt64OffAddrAsInt64X2#"," Reads vector; offset in scalar elements. ") , (fsLit "indexInt8OffAddrAsInt8X32#"," Reads vector; offset in scalar elements. ") , (fsLit "indexInt16OffAddrAsInt16X16#"," Reads vector; offset in scalar elements. ") , (fsLit "indexInt32OffAddrAsInt32X8#"," Reads vector; offset in scalar elements. ") , (fsLit "indexInt64OffAddrAsInt64X4#"," Reads vector; offset in scalar elements. ") , (fsLit "indexInt8OffAddrAsInt8X64#"," Reads vector; offset in scalar elements. ") , (fsLit "indexInt16OffAddrAsInt16X32#"," Reads vector; offset in scalar elements. ") , (fsLit "indexInt32OffAddrAsInt32X16#"," Reads vector; offset in scalar elements. ") , (fsLit "indexInt64OffAddrAsInt64X8#"," Reads vector; offset in scalar elements. ") , (fsLit "indexWord8OffAddrAsWord8X16#"," Reads vector; offset in scalar elements. ") , (fsLit "indexWord16OffAddrAsWord16X8#"," Reads vector; offset in scalar elements. ") , (fsLit "indexWord32OffAddrAsWord32X4#"," Reads vector; offset in scalar elements. ") , (fsLit "indexWord64OffAddrAsWord64X2#"," Reads vector; offset in scalar elements. ") , (fsLit "indexWord8OffAddrAsWord8X32#"," Reads vector; offset in scalar elements. ") , (fsLit "indexWord16OffAddrAsWord16X16#"," Reads vector; offset in scalar elements. ") , (fsLit "indexWord32OffAddrAsWord32X8#"," Reads vector; offset in scalar elements. ") , (fsLit "indexWord64OffAddrAsWord64X4#"," Reads vector; offset in scalar elements. ") , (fsLit "indexWord8OffAddrAsWord8X64#"," Reads vector; offset in scalar elements. ") , (fsLit "indexWord16OffAddrAsWord16X32#"," Reads vector; offset in scalar elements. ") , (fsLit "indexWord32OffAddrAsWord32X16#"," Reads vector; offset in scalar elements. ") , (fsLit "indexWord64OffAddrAsWord64X8#"," Reads vector; offset in scalar elements. ") , (fsLit "indexFloatOffAddrAsFloatX4#"," Reads vector; offset in scalar elements. ") , (fsLit "indexDoubleOffAddrAsDoubleX2#"," Reads vector; offset in scalar elements. ") , (fsLit "indexFloatOffAddrAsFloatX8#"," Reads vector; offset in scalar elements. ") , (fsLit "indexDoubleOffAddrAsDoubleX4#"," Reads vector; offset in scalar elements. ") , (fsLit "indexFloatOffAddrAsFloatX16#"," Reads vector; offset in scalar elements. ") , (fsLit "indexDoubleOffAddrAsDoubleX8#"," Reads vector; offset in scalar elements. ") , (fsLit "readInt8OffAddrAsInt8X16#"," Reads vector; offset in scalar elements. ") , (fsLit "readInt16OffAddrAsInt16X8#"," Reads vector; offset in scalar elements. ") , (fsLit "readInt32OffAddrAsInt32X4#"," Reads vector; offset in scalar elements. ") , (fsLit "readInt64OffAddrAsInt64X2#"," Reads vector; offset in scalar elements. ") , (fsLit "readInt8OffAddrAsInt8X32#"," Reads vector; offset in scalar elements. ") , (fsLit "readInt16OffAddrAsInt16X16#"," Reads vector; offset in scalar elements. ") , (fsLit "readInt32OffAddrAsInt32X8#"," Reads vector; offset in scalar elements. ") , (fsLit "readInt64OffAddrAsInt64X4#"," Reads vector; offset in scalar elements. ") , (fsLit "readInt8OffAddrAsInt8X64#"," Reads vector; offset in scalar elements. ") , (fsLit "readInt16OffAddrAsInt16X32#"," Reads vector; offset in scalar elements. ") , (fsLit "readInt32OffAddrAsInt32X16#"," Reads vector; offset in scalar elements. ") , (fsLit "readInt64OffAddrAsInt64X8#"," Reads vector; offset in scalar elements. ") , (fsLit "readWord8OffAddrAsWord8X16#"," Reads vector; offset in scalar elements. ") , (fsLit "readWord16OffAddrAsWord16X8#"," Reads vector; offset in scalar elements. ") , (fsLit "readWord32OffAddrAsWord32X4#"," Reads vector; offset in scalar elements. ") , (fsLit "readWord64OffAddrAsWord64X2#"," Reads vector; offset in scalar elements. ") , (fsLit "readWord8OffAddrAsWord8X32#"," Reads vector; offset in scalar elements. ") , (fsLit "readWord16OffAddrAsWord16X16#"," Reads vector; offset in scalar elements. ") , (fsLit "readWord32OffAddrAsWord32X8#"," Reads vector; offset in scalar elements. ") , (fsLit "readWord64OffAddrAsWord64X4#"," Reads vector; offset in scalar elements. ") , (fsLit "readWord8OffAddrAsWord8X64#"," Reads vector; offset in scalar elements. ") , (fsLit "readWord16OffAddrAsWord16X32#"," Reads vector; offset in scalar elements. ") , (fsLit "readWord32OffAddrAsWord32X16#"," Reads vector; offset in scalar elements. ") , (fsLit "readWord64OffAddrAsWord64X8#"," Reads vector; offset in scalar elements. ") , (fsLit "readFloatOffAddrAsFloatX4#"," Reads vector; offset in scalar elements. ") , (fsLit "readDoubleOffAddrAsDoubleX2#"," Reads vector; offset in scalar elements. ") , (fsLit "readFloatOffAddrAsFloatX8#"," Reads vector; offset in scalar elements. ") , (fsLit "readDoubleOffAddrAsDoubleX4#"," Reads vector; offset in scalar elements. ") , (fsLit "readFloatOffAddrAsFloatX16#"," Reads vector; offset in scalar elements. ") , (fsLit "readDoubleOffAddrAsDoubleX8#"," Reads vector; offset in scalar elements. ") , (fsLit "writeInt8OffAddrAsInt8X16#"," Write vector; offset in scalar elements. ") , (fsLit "writeInt16OffAddrAsInt16X8#"," Write vector; offset in scalar elements. ") , (fsLit "writeInt32OffAddrAsInt32X4#"," Write vector; offset in scalar elements. ") , (fsLit "writeInt64OffAddrAsInt64X2#"," Write vector; offset in scalar elements. ") , (fsLit "writeInt8OffAddrAsInt8X32#"," Write vector; offset in scalar elements. ") , (fsLit "writeInt16OffAddrAsInt16X16#"," Write vector; offset in scalar elements. ") , (fsLit "writeInt32OffAddrAsInt32X8#"," Write vector; offset in scalar elements. ") , (fsLit "writeInt64OffAddrAsInt64X4#"," Write vector; offset in scalar elements. ") , (fsLit "writeInt8OffAddrAsInt8X64#"," Write vector; offset in scalar elements. ") , (fsLit "writeInt16OffAddrAsInt16X32#"," Write vector; offset in scalar elements. ") , (fsLit "writeInt32OffAddrAsInt32X16#"," Write vector; offset in scalar elements. ") , (fsLit "writeInt64OffAddrAsInt64X8#"," Write vector; offset in scalar elements. ") , (fsLit "writeWord8OffAddrAsWord8X16#"," Write vector; offset in scalar elements. ") , (fsLit "writeWord16OffAddrAsWord16X8#"," Write vector; offset in scalar elements. ") , (fsLit "writeWord32OffAddrAsWord32X4#"," Write vector; offset in scalar elements. ") , (fsLit "writeWord64OffAddrAsWord64X2#"," Write vector; offset in scalar elements. ") , (fsLit "writeWord8OffAddrAsWord8X32#"," Write vector; offset in scalar elements. ") , (fsLit "writeWord16OffAddrAsWord16X16#"," Write vector; offset in scalar elements. ") , (fsLit "writeWord32OffAddrAsWord32X8#"," Write vector; offset in scalar elements. ") , (fsLit "writeWord64OffAddrAsWord64X4#"," Write vector; offset in scalar elements. ") , (fsLit "writeWord8OffAddrAsWord8X64#"," Write vector; offset in scalar elements. ") , (fsLit "writeWord16OffAddrAsWord16X32#"," Write vector; offset in scalar elements. ") , (fsLit "writeWord32OffAddrAsWord32X16#"," Write vector; offset in scalar elements. ") , (fsLit "writeWord64OffAddrAsWord64X8#"," Write vector; offset in scalar elements. ") , (fsLit "writeFloatOffAddrAsFloatX4#"," Write vector; offset in scalar elements. ") , (fsLit "writeDoubleOffAddrAsDoubleX2#"," Write vector; offset in scalar elements. ") , (fsLit "writeFloatOffAddrAsFloatX8#"," Write vector; offset in scalar elements. ") , (fsLit "writeDoubleOffAddrAsDoubleX4#"," Write vector; offset in scalar elements. ") , (fsLit "writeFloatOffAddrAsFloatX16#"," Write vector; offset in scalar elements. ") , (fsLit "writeDoubleOffAddrAsDoubleX8#"," Write vector; offset in scalar elements. ") , (fsLit "fmaddFloatX4#","Fused multiply-add operation @x*y+z@. See \"GHC.Prim#fma\".") , (fsLit "fmaddDoubleX2#","Fused multiply-add operation @x*y+z@. See \"GHC.Prim#fma\".") , (fsLit "fmaddFloatX8#","Fused multiply-add operation @x*y+z@. See \"GHC.Prim#fma\".") , (fsLit "fmaddDoubleX4#","Fused multiply-add operation @x*y+z@. See \"GHC.Prim#fma\".") , (fsLit "fmaddFloatX16#","Fused multiply-add operation @x*y+z@. See \"GHC.Prim#fma\".") , (fsLit "fmaddDoubleX8#","Fused multiply-add operation @x*y+z@. See \"GHC.Prim#fma\".") , (fsLit "fmsubFloatX4#","Fused multiply-subtract operation @x*y-z@. See \"GHC.Prim#fma\".") , (fsLit "fmsubDoubleX2#","Fused multiply-subtract operation @x*y-z@. See \"GHC.Prim#fma\".") , (fsLit "fmsubFloatX8#","Fused multiply-subtract operation @x*y-z@. See \"GHC.Prim#fma\".") , (fsLit "fmsubDoubleX4#","Fused multiply-subtract operation @x*y-z@. See \"GHC.Prim#fma\".") , (fsLit "fmsubFloatX16#","Fused multiply-subtract operation @x*y-z@. See \"GHC.Prim#fma\".") , (fsLit "fmsubDoubleX8#","Fused multiply-subtract operation @x*y-z@. See \"GHC.Prim#fma\".") , (fsLit "fnmaddFloatX4#","Fused negate-multiply-add operation @-x*y+z@. See \"GHC.Prim#fma\".") , (fsLit "fnmaddDoubleX2#","Fused negate-multiply-add operation @-x*y+z@. See \"GHC.Prim#fma\".") , (fsLit "fnmaddFloatX8#","Fused negate-multiply-add operation @-x*y+z@. See \"GHC.Prim#fma\".") , (fsLit "fnmaddDoubleX4#","Fused negate-multiply-add operation @-x*y+z@. See \"GHC.Prim#fma\".") , (fsLit "fnmaddFloatX16#","Fused negate-multiply-add operation @-x*y+z@. See \"GHC.Prim#fma\".") , (fsLit "fnmaddDoubleX8#","Fused negate-multiply-add operation @-x*y+z@. See \"GHC.Prim#fma\".") , (fsLit "fnmsubFloatX4#","Fused negate-multiply-subtract operation @-x*y-z@. See \"GHC.Prim#fma\".") , (fsLit "fnmsubDoubleX2#","Fused negate-multiply-subtract operation @-x*y-z@. See \"GHC.Prim#fma\".") , (fsLit "fnmsubFloatX8#","Fused negate-multiply-subtract operation @-x*y-z@. See \"GHC.Prim#fma\".") , (fsLit "fnmsubDoubleX4#","Fused negate-multiply-subtract operation @-x*y-z@. See \"GHC.Prim#fma\".") , (fsLit "fnmsubFloatX16#","Fused negate-multiply-subtract operation @-x*y-z@. See \"GHC.Prim#fma\".") , (fsLit "fnmsubDoubleX8#","Fused negate-multiply-subtract operation @-x*y-z@. See \"GHC.Prim#fma\".") , (fsLit "shuffleInt8X16#","Shuffle elements of the concatenation of the input two vectors\n into the result vector.") , (fsLit "shuffleInt16X8#","Shuffle elements of the concatenation of the input two vectors\n into the result vector.") , (fsLit "shuffleInt32X4#","Shuffle elements of the concatenation of the input two vectors\n into the result vector.") , (fsLit "shuffleInt64X2#","Shuffle elements of the concatenation of the input two vectors\n into the result vector.") , (fsLit "shuffleInt8X32#","Shuffle elements of the concatenation of the input two vectors\n into the result vector.") , (fsLit "shuffleInt16X16#","Shuffle elements of the concatenation of the input two vectors\n into the result vector.") , (fsLit "shuffleInt32X8#","Shuffle elements of the concatenation of the input two vectors\n into the result vector.") , (fsLit "shuffleInt64X4#","Shuffle elements of the concatenation of the input two vectors\n into the result vector.") , (fsLit "shuffleInt8X64#","Shuffle elements of the concatenation of the input two vectors\n into the result vector.") , (fsLit "shuffleInt16X32#","Shuffle elements of the concatenation of the input two vectors\n into the result vector.") , (fsLit "shuffleInt32X16#","Shuffle elements of the concatenation of the input two vectors\n into the result vector.") , (fsLit "shuffleInt64X8#","Shuffle elements of the concatenation of the input two vectors\n into the result vector.") , (fsLit "shuffleWord8X16#","Shuffle elements of the concatenation of the input two vectors\n into the result vector.") , (fsLit "shuffleWord16X8#","Shuffle elements of the concatenation of the input two vectors\n into the result vector.") , (fsLit "shuffleWord32X4#","Shuffle elements of the concatenation of the input two vectors\n into the result vector.") , (fsLit "shuffleWord64X2#","Shuffle elements of the concatenation of the input two vectors\n into the result vector.") , (fsLit "shuffleWord8X32#","Shuffle elements of the concatenation of the input two vectors\n into the result vector.") , (fsLit "shuffleWord16X16#","Shuffle elements of the concatenation of the input two vectors\n into the result vector.") , (fsLit "shuffleWord32X8#","Shuffle elements of the concatenation of the input two vectors\n into the result vector.") , (fsLit "shuffleWord64X4#","Shuffle elements of the concatenation of the input two vectors\n into the result vector.") , (fsLit "shuffleWord8X64#","Shuffle elements of the concatenation of the input two vectors\n into the result vector.") , (fsLit "shuffleWord16X32#","Shuffle elements of the concatenation of the input two vectors\n into the result vector.") , (fsLit "shuffleWord32X16#","Shuffle elements of the concatenation of the input two vectors\n into the result vector.") , (fsLit "shuffleWord64X8#","Shuffle elements of the concatenation of the input two vectors\n into the result vector.") , (fsLit "shuffleFloatX4#","Shuffle elements of the concatenation of the input two vectors\n into the result vector.") , (fsLit "shuffleDoubleX2#","Shuffle elements of the concatenation of the input two vectors\n into the result vector.") , (fsLit "shuffleFloatX8#","Shuffle elements of the concatenation of the input two vectors\n into the result vector.") , (fsLit "shuffleDoubleX4#","Shuffle elements of the concatenation of the input two vectors\n into the result vector.") , (fsLit "shuffleFloatX16#","Shuffle elements of the concatenation of the input two vectors\n into the result vector.") , (fsLit "shuffleDoubleX8#","Shuffle elements of the concatenation of the input two vectors\n into the result vector.") , (fsLit "minInt8X16#","Component-wise minimum of two vectors.") , (fsLit "minInt16X8#","Component-wise minimum of two vectors.") , (fsLit "minInt32X4#","Component-wise minimum of two vectors.") , (fsLit "minInt64X2#","Component-wise minimum of two vectors.") , (fsLit "minInt8X32#","Component-wise minimum of two vectors.") , (fsLit "minInt16X16#","Component-wise minimum of two vectors.") , (fsLit "minInt32X8#","Component-wise minimum of two vectors.") , (fsLit "minInt64X4#","Component-wise minimum of two vectors.") , (fsLit "minInt8X64#","Component-wise minimum of two vectors.") , (fsLit "minInt16X32#","Component-wise minimum of two vectors.") , (fsLit "minInt32X16#","Component-wise minimum of two vectors.") , (fsLit "minInt64X8#","Component-wise minimum of two vectors.") , (fsLit "minWord8X16#","Component-wise minimum of two vectors.") , (fsLit "minWord16X8#","Component-wise minimum of two vectors.") , (fsLit "minWord32X4#","Component-wise minimum of two vectors.") , (fsLit "minWord64X2#","Component-wise minimum of two vectors.") , (fsLit "minWord8X32#","Component-wise minimum of two vectors.") , (fsLit "minWord16X16#","Component-wise minimum of two vectors.") , (fsLit "minWord32X8#","Component-wise minimum of two vectors.") , (fsLit "minWord64X4#","Component-wise minimum of two vectors.") , (fsLit "minWord8X64#","Component-wise minimum of two vectors.") , (fsLit "minWord16X32#","Component-wise minimum of two vectors.") , (fsLit "minWord32X16#","Component-wise minimum of two vectors.") , (fsLit "minWord64X8#","Component-wise minimum of two vectors.") , (fsLit "minFloatX4#","Component-wise minimum of two vectors.") , (fsLit "minDoubleX2#","Component-wise minimum of two vectors.") , (fsLit "minFloatX8#","Component-wise minimum of two vectors.") , (fsLit "minDoubleX4#","Component-wise minimum of two vectors.") , (fsLit "minFloatX16#","Component-wise minimum of two vectors.") , (fsLit "minDoubleX8#","Component-wise minimum of two vectors.") , (fsLit "maxInt8X16#","Component-wise maximum of two vectors.") , (fsLit "maxInt16X8#","Component-wise maximum of two vectors.") , (fsLit "maxInt32X4#","Component-wise maximum of two vectors.") , (fsLit "maxInt64X2#","Component-wise maximum of two vectors.") , (fsLit "maxInt8X32#","Component-wise maximum of two vectors.") , (fsLit "maxInt16X16#","Component-wise maximum of two vectors.") , (fsLit "maxInt32X8#","Component-wise maximum of two vectors.") , (fsLit "maxInt64X4#","Component-wise maximum of two vectors.") , (fsLit "maxInt8X64#","Component-wise maximum of two vectors.") , (fsLit "maxInt16X32#","Component-wise maximum of two vectors.") , (fsLit "maxInt32X16#","Component-wise maximum of two vectors.") , (fsLit "maxInt64X8#","Component-wise maximum of two vectors.") , (fsLit "maxWord8X16#","Component-wise maximum of two vectors.") , (fsLit "maxWord16X8#","Component-wise maximum of two vectors.") , (fsLit "maxWord32X4#","Component-wise maximum of two vectors.") , (fsLit "maxWord64X2#","Component-wise maximum of two vectors.") , (fsLit "maxWord8X32#","Component-wise maximum of two vectors.") , (fsLit "maxWord16X16#","Component-wise maximum of two vectors.") , (fsLit "maxWord32X8#","Component-wise maximum of two vectors.") , (fsLit "maxWord64X4#","Component-wise maximum of two vectors.") , (fsLit "maxWord8X64#","Component-wise maximum of two vectors.") , (fsLit "maxWord16X32#","Component-wise maximum of two vectors.") , (fsLit "maxWord32X16#","Component-wise maximum of two vectors.") , (fsLit "maxWord64X8#","Component-wise maximum of two vectors.") , (fsLit "maxFloatX4#","Component-wise maximum of two vectors.") , (fsLit "maxDoubleX2#","Component-wise maximum of two vectors.") , (fsLit "maxFloatX8#","Component-wise maximum of two vectors.") , (fsLit "maxDoubleX4#","Component-wise maximum of two vectors.") , (fsLit "maxFloatX16#","Component-wise maximum of two vectors.") , (fsLit "maxDoubleX8#","Component-wise maximum of two vectors.") ] ghc-lib-parser-9.12.2.20250421/ghc-lib/stage0/compiler/build/primop-effects.hs-incl0000644000000000000000000004742007346545000025305 0ustar0000000000000000primOpEffect Int8QuotOp = CanFail primOpEffect Int8RemOp = CanFail primOpEffect Int8QuotRemOp = CanFail primOpEffect Word8QuotOp = CanFail primOpEffect Word8RemOp = CanFail primOpEffect Word8QuotRemOp = CanFail primOpEffect Int16QuotOp = CanFail primOpEffect Int16RemOp = CanFail primOpEffect Int16QuotRemOp = CanFail primOpEffect Word16QuotOp = CanFail primOpEffect Word16RemOp = CanFail primOpEffect Word16QuotRemOp = CanFail primOpEffect Int32QuotOp = CanFail primOpEffect Int32RemOp = CanFail primOpEffect Int32QuotRemOp = CanFail primOpEffect Word32QuotOp = CanFail primOpEffect Word32RemOp = CanFail primOpEffect Word32QuotRemOp = CanFail primOpEffect Int64QuotOp = CanFail primOpEffect Int64RemOp = CanFail primOpEffect Word64QuotOp = CanFail primOpEffect Word64RemOp = CanFail primOpEffect IntQuotOp = CanFail primOpEffect IntRemOp = CanFail primOpEffect IntQuotRemOp = CanFail primOpEffect WordQuotOp = CanFail primOpEffect WordRemOp = CanFail primOpEffect WordQuotRemOp = CanFail primOpEffect WordQuotRem2Op = CanFail primOpEffect DoubleDivOp = CanFail primOpEffect DoubleLogOp = CanFail primOpEffect DoubleLog1POp = CanFail primOpEffect DoubleAsinOp = CanFail primOpEffect DoubleAcosOp = CanFail primOpEffect FloatDivOp = CanFail primOpEffect FloatLogOp = CanFail primOpEffect FloatLog1POp = CanFail primOpEffect FloatAsinOp = CanFail primOpEffect FloatAcosOp = CanFail primOpEffect NewArrayOp = ReadWriteEffect primOpEffect ReadArrayOp = ReadWriteEffect primOpEffect WriteArrayOp = ReadWriteEffect primOpEffect IndexArrayOp = CanFail primOpEffect UnsafeFreezeArrayOp = ReadWriteEffect primOpEffect UnsafeThawArrayOp = ReadWriteEffect primOpEffect CopyArrayOp = ReadWriteEffect primOpEffect CopyMutableArrayOp = ReadWriteEffect primOpEffect CloneArrayOp = ReadWriteEffect primOpEffect CloneMutableArrayOp = ReadWriteEffect primOpEffect FreezeArrayOp = ReadWriteEffect primOpEffect ThawArrayOp = ReadWriteEffect primOpEffect CasArrayOp = ReadWriteEffect primOpEffect NewSmallArrayOp = ReadWriteEffect primOpEffect ShrinkSmallMutableArrayOp_Char = ReadWriteEffect primOpEffect ReadSmallArrayOp = ReadWriteEffect primOpEffect WriteSmallArrayOp = ReadWriteEffect primOpEffect IndexSmallArrayOp = CanFail primOpEffect UnsafeFreezeSmallArrayOp = ReadWriteEffect primOpEffect UnsafeThawSmallArrayOp = ReadWriteEffect primOpEffect CopySmallArrayOp = ReadWriteEffect primOpEffect CopySmallMutableArrayOp = ReadWriteEffect primOpEffect CloneSmallArrayOp = ReadWriteEffect primOpEffect CloneSmallMutableArrayOp = ReadWriteEffect primOpEffect FreezeSmallArrayOp = ReadWriteEffect primOpEffect ThawSmallArrayOp = ReadWriteEffect primOpEffect CasSmallArrayOp = ReadWriteEffect primOpEffect NewByteArrayOp_Char = ReadWriteEffect primOpEffect NewPinnedByteArrayOp_Char = ReadWriteEffect primOpEffect NewAlignedPinnedByteArrayOp_Char = ReadWriteEffect primOpEffect ShrinkMutableByteArrayOp_Char = ReadWriteEffect primOpEffect ResizeMutableByteArrayOp_Char = ReadWriteEffect primOpEffect UnsafeFreezeByteArrayOp = NoEffect primOpEffect UnsafeThawByteArrayOp = NoEffect primOpEffect IndexByteArrayOp_Char = CanFail primOpEffect IndexByteArrayOp_WideChar = CanFail primOpEffect IndexByteArrayOp_Int = CanFail primOpEffect IndexByteArrayOp_Word = CanFail primOpEffect IndexByteArrayOp_Addr = CanFail primOpEffect IndexByteArrayOp_Float = CanFail primOpEffect IndexByteArrayOp_Double = CanFail primOpEffect IndexByteArrayOp_StablePtr = CanFail primOpEffect IndexByteArrayOp_Int8 = CanFail primOpEffect IndexByteArrayOp_Word8 = CanFail primOpEffect IndexByteArrayOp_Int16 = CanFail primOpEffect IndexByteArrayOp_Word16 = CanFail primOpEffect IndexByteArrayOp_Int32 = CanFail primOpEffect IndexByteArrayOp_Word32 = CanFail primOpEffect IndexByteArrayOp_Int64 = CanFail primOpEffect IndexByteArrayOp_Word64 = CanFail primOpEffect IndexByteArrayOp_Word8AsChar = CanFail primOpEffect IndexByteArrayOp_Word8AsWideChar = CanFail primOpEffect IndexByteArrayOp_Word8AsInt = CanFail primOpEffect IndexByteArrayOp_Word8AsWord = CanFail primOpEffect IndexByteArrayOp_Word8AsAddr = CanFail primOpEffect IndexByteArrayOp_Word8AsFloat = CanFail primOpEffect IndexByteArrayOp_Word8AsDouble = CanFail primOpEffect IndexByteArrayOp_Word8AsStablePtr = CanFail primOpEffect IndexByteArrayOp_Word8AsInt16 = CanFail primOpEffect IndexByteArrayOp_Word8AsWord16 = CanFail primOpEffect IndexByteArrayOp_Word8AsInt32 = CanFail primOpEffect IndexByteArrayOp_Word8AsWord32 = CanFail primOpEffect IndexByteArrayOp_Word8AsInt64 = CanFail primOpEffect IndexByteArrayOp_Word8AsWord64 = CanFail primOpEffect ReadByteArrayOp_Char = ReadWriteEffect primOpEffect ReadByteArrayOp_WideChar = ReadWriteEffect primOpEffect ReadByteArrayOp_Int = ReadWriteEffect primOpEffect ReadByteArrayOp_Word = ReadWriteEffect primOpEffect ReadByteArrayOp_Addr = ReadWriteEffect primOpEffect ReadByteArrayOp_Float = ReadWriteEffect primOpEffect ReadByteArrayOp_Double = ReadWriteEffect primOpEffect ReadByteArrayOp_StablePtr = ReadWriteEffect primOpEffect ReadByteArrayOp_Int8 = ReadWriteEffect primOpEffect ReadByteArrayOp_Word8 = ReadWriteEffect primOpEffect ReadByteArrayOp_Int16 = ReadWriteEffect primOpEffect ReadByteArrayOp_Word16 = ReadWriteEffect primOpEffect ReadByteArrayOp_Int32 = ReadWriteEffect primOpEffect ReadByteArrayOp_Word32 = ReadWriteEffect primOpEffect ReadByteArrayOp_Int64 = ReadWriteEffect primOpEffect ReadByteArrayOp_Word64 = ReadWriteEffect primOpEffect ReadByteArrayOp_Word8AsChar = ReadWriteEffect primOpEffect ReadByteArrayOp_Word8AsWideChar = ReadWriteEffect primOpEffect ReadByteArrayOp_Word8AsInt = ReadWriteEffect primOpEffect ReadByteArrayOp_Word8AsWord = ReadWriteEffect primOpEffect ReadByteArrayOp_Word8AsAddr = ReadWriteEffect primOpEffect ReadByteArrayOp_Word8AsFloat = ReadWriteEffect primOpEffect ReadByteArrayOp_Word8AsDouble = ReadWriteEffect primOpEffect ReadByteArrayOp_Word8AsStablePtr = ReadWriteEffect primOpEffect ReadByteArrayOp_Word8AsInt16 = ReadWriteEffect primOpEffect ReadByteArrayOp_Word8AsWord16 = ReadWriteEffect primOpEffect ReadByteArrayOp_Word8AsInt32 = ReadWriteEffect primOpEffect ReadByteArrayOp_Word8AsWord32 = ReadWriteEffect primOpEffect ReadByteArrayOp_Word8AsInt64 = ReadWriteEffect primOpEffect ReadByteArrayOp_Word8AsWord64 = ReadWriteEffect primOpEffect WriteByteArrayOp_Char = ReadWriteEffect primOpEffect WriteByteArrayOp_WideChar = ReadWriteEffect primOpEffect WriteByteArrayOp_Int = ReadWriteEffect primOpEffect WriteByteArrayOp_Word = ReadWriteEffect primOpEffect WriteByteArrayOp_Addr = ReadWriteEffect primOpEffect WriteByteArrayOp_Float = ReadWriteEffect primOpEffect WriteByteArrayOp_Double = ReadWriteEffect primOpEffect WriteByteArrayOp_StablePtr = ReadWriteEffect primOpEffect WriteByteArrayOp_Int8 = ReadWriteEffect primOpEffect WriteByteArrayOp_Word8 = ReadWriteEffect primOpEffect WriteByteArrayOp_Int16 = ReadWriteEffect primOpEffect WriteByteArrayOp_Word16 = ReadWriteEffect primOpEffect WriteByteArrayOp_Int32 = ReadWriteEffect primOpEffect WriteByteArrayOp_Word32 = ReadWriteEffect primOpEffect WriteByteArrayOp_Int64 = ReadWriteEffect primOpEffect WriteByteArrayOp_Word64 = ReadWriteEffect primOpEffect WriteByteArrayOp_Word8AsChar = ReadWriteEffect primOpEffect WriteByteArrayOp_Word8AsWideChar = ReadWriteEffect primOpEffect WriteByteArrayOp_Word8AsInt = ReadWriteEffect primOpEffect WriteByteArrayOp_Word8AsWord = ReadWriteEffect primOpEffect WriteByteArrayOp_Word8AsAddr = ReadWriteEffect primOpEffect WriteByteArrayOp_Word8AsFloat = ReadWriteEffect primOpEffect WriteByteArrayOp_Word8AsDouble = ReadWriteEffect primOpEffect WriteByteArrayOp_Word8AsStablePtr = ReadWriteEffect primOpEffect WriteByteArrayOp_Word8AsInt16 = ReadWriteEffect primOpEffect WriteByteArrayOp_Word8AsWord16 = ReadWriteEffect primOpEffect WriteByteArrayOp_Word8AsInt32 = ReadWriteEffect primOpEffect WriteByteArrayOp_Word8AsWord32 = ReadWriteEffect primOpEffect WriteByteArrayOp_Word8AsInt64 = ReadWriteEffect primOpEffect WriteByteArrayOp_Word8AsWord64 = ReadWriteEffect primOpEffect CompareByteArraysOp = CanFail primOpEffect CopyByteArrayOp = ReadWriteEffect primOpEffect CopyMutableByteArrayOp = ReadWriteEffect primOpEffect CopyMutableByteArrayNonOverlappingOp = ReadWriteEffect primOpEffect CopyByteArrayToAddrOp = ReadWriteEffect primOpEffect CopyMutableByteArrayToAddrOp = ReadWriteEffect primOpEffect CopyAddrToByteArrayOp = ReadWriteEffect primOpEffect CopyAddrToAddrOp = ReadWriteEffect primOpEffect CopyAddrToAddrNonOverlappingOp = ReadWriteEffect primOpEffect SetByteArrayOp = ReadWriteEffect primOpEffect SetAddrRangeOp = ReadWriteEffect primOpEffect AtomicReadByteArrayOp_Int = ReadWriteEffect primOpEffect AtomicWriteByteArrayOp_Int = ReadWriteEffect primOpEffect CasByteArrayOp_Int = ReadWriteEffect primOpEffect CasByteArrayOp_Int8 = ReadWriteEffect primOpEffect CasByteArrayOp_Int16 = ReadWriteEffect primOpEffect CasByteArrayOp_Int32 = ReadWriteEffect primOpEffect CasByteArrayOp_Int64 = ReadWriteEffect primOpEffect FetchAddByteArrayOp_Int = ReadWriteEffect primOpEffect FetchSubByteArrayOp_Int = ReadWriteEffect primOpEffect FetchAndByteArrayOp_Int = ReadWriteEffect primOpEffect FetchNandByteArrayOp_Int = ReadWriteEffect primOpEffect FetchOrByteArrayOp_Int = ReadWriteEffect primOpEffect FetchXorByteArrayOp_Int = ReadWriteEffect primOpEffect IndexOffAddrOp_Char = CanFail primOpEffect IndexOffAddrOp_WideChar = CanFail primOpEffect IndexOffAddrOp_Int = CanFail primOpEffect IndexOffAddrOp_Word = CanFail primOpEffect IndexOffAddrOp_Addr = CanFail primOpEffect IndexOffAddrOp_Float = CanFail primOpEffect IndexOffAddrOp_Double = CanFail primOpEffect IndexOffAddrOp_StablePtr = CanFail primOpEffect IndexOffAddrOp_Int8 = CanFail primOpEffect IndexOffAddrOp_Word8 = CanFail primOpEffect IndexOffAddrOp_Int16 = CanFail primOpEffect IndexOffAddrOp_Word16 = CanFail primOpEffect IndexOffAddrOp_Int32 = CanFail primOpEffect IndexOffAddrOp_Word32 = CanFail primOpEffect IndexOffAddrOp_Int64 = CanFail primOpEffect IndexOffAddrOp_Word64 = CanFail primOpEffect IndexOffAddrOp_Word8AsChar = CanFail primOpEffect IndexOffAddrOp_Word8AsWideChar = CanFail primOpEffect IndexOffAddrOp_Word8AsInt = CanFail primOpEffect IndexOffAddrOp_Word8AsWord = CanFail primOpEffect IndexOffAddrOp_Word8AsAddr = CanFail primOpEffect IndexOffAddrOp_Word8AsFloat = CanFail primOpEffect IndexOffAddrOp_Word8AsDouble = CanFail primOpEffect IndexOffAddrOp_Word8AsStablePtr = CanFail primOpEffect IndexOffAddrOp_Word8AsInt16 = CanFail primOpEffect IndexOffAddrOp_Word8AsWord16 = CanFail primOpEffect IndexOffAddrOp_Word8AsInt32 = CanFail primOpEffect IndexOffAddrOp_Word8AsWord32 = CanFail primOpEffect IndexOffAddrOp_Word8AsInt64 = CanFail primOpEffect IndexOffAddrOp_Word8AsWord64 = CanFail primOpEffect ReadOffAddrOp_Char = ReadWriteEffect primOpEffect ReadOffAddrOp_WideChar = ReadWriteEffect primOpEffect ReadOffAddrOp_Int = ReadWriteEffect primOpEffect ReadOffAddrOp_Word = ReadWriteEffect primOpEffect ReadOffAddrOp_Addr = ReadWriteEffect primOpEffect ReadOffAddrOp_Float = ReadWriteEffect primOpEffect ReadOffAddrOp_Double = ReadWriteEffect primOpEffect ReadOffAddrOp_StablePtr = ReadWriteEffect primOpEffect ReadOffAddrOp_Int8 = ReadWriteEffect primOpEffect ReadOffAddrOp_Word8 = ReadWriteEffect primOpEffect ReadOffAddrOp_Int16 = ReadWriteEffect primOpEffect ReadOffAddrOp_Word16 = ReadWriteEffect primOpEffect ReadOffAddrOp_Int32 = ReadWriteEffect primOpEffect ReadOffAddrOp_Word32 = ReadWriteEffect primOpEffect ReadOffAddrOp_Int64 = ReadWriteEffect primOpEffect ReadOffAddrOp_Word64 = ReadWriteEffect primOpEffect ReadOffAddrOp_Word8AsChar = ReadWriteEffect primOpEffect ReadOffAddrOp_Word8AsWideChar = ReadWriteEffect primOpEffect ReadOffAddrOp_Word8AsInt = ReadWriteEffect primOpEffect ReadOffAddrOp_Word8AsWord = ReadWriteEffect primOpEffect ReadOffAddrOp_Word8AsAddr = ReadWriteEffect primOpEffect ReadOffAddrOp_Word8AsFloat = ReadWriteEffect primOpEffect ReadOffAddrOp_Word8AsDouble = ReadWriteEffect primOpEffect ReadOffAddrOp_Word8AsStablePtr = ReadWriteEffect primOpEffect ReadOffAddrOp_Word8AsInt16 = ReadWriteEffect primOpEffect ReadOffAddrOp_Word8AsWord16 = ReadWriteEffect primOpEffect ReadOffAddrOp_Word8AsInt32 = ReadWriteEffect primOpEffect ReadOffAddrOp_Word8AsWord32 = ReadWriteEffect primOpEffect ReadOffAddrOp_Word8AsInt64 = ReadWriteEffect primOpEffect ReadOffAddrOp_Word8AsWord64 = ReadWriteEffect primOpEffect WriteOffAddrOp_Char = ReadWriteEffect primOpEffect WriteOffAddrOp_WideChar = ReadWriteEffect primOpEffect WriteOffAddrOp_Int = ReadWriteEffect primOpEffect WriteOffAddrOp_Word = ReadWriteEffect primOpEffect WriteOffAddrOp_Addr = ReadWriteEffect primOpEffect WriteOffAddrOp_Float = ReadWriteEffect primOpEffect WriteOffAddrOp_Double = ReadWriteEffect primOpEffect WriteOffAddrOp_StablePtr = ReadWriteEffect primOpEffect WriteOffAddrOp_Int8 = ReadWriteEffect primOpEffect WriteOffAddrOp_Word8 = ReadWriteEffect primOpEffect WriteOffAddrOp_Int16 = ReadWriteEffect primOpEffect WriteOffAddrOp_Word16 = ReadWriteEffect primOpEffect WriteOffAddrOp_Int32 = ReadWriteEffect primOpEffect WriteOffAddrOp_Word32 = ReadWriteEffect primOpEffect WriteOffAddrOp_Int64 = ReadWriteEffect primOpEffect WriteOffAddrOp_Word64 = ReadWriteEffect primOpEffect WriteOffAddrOp_Word8AsChar = ReadWriteEffect primOpEffect WriteOffAddrOp_Word8AsWideChar = ReadWriteEffect primOpEffect WriteOffAddrOp_Word8AsInt = ReadWriteEffect primOpEffect WriteOffAddrOp_Word8AsWord = ReadWriteEffect primOpEffect WriteOffAddrOp_Word8AsAddr = ReadWriteEffect primOpEffect WriteOffAddrOp_Word8AsFloat = ReadWriteEffect primOpEffect WriteOffAddrOp_Word8AsDouble = ReadWriteEffect primOpEffect WriteOffAddrOp_Word8AsStablePtr = ReadWriteEffect primOpEffect WriteOffAddrOp_Word8AsInt16 = ReadWriteEffect primOpEffect WriteOffAddrOp_Word8AsWord16 = ReadWriteEffect primOpEffect WriteOffAddrOp_Word8AsInt32 = ReadWriteEffect primOpEffect WriteOffAddrOp_Word8AsWord32 = ReadWriteEffect primOpEffect WriteOffAddrOp_Word8AsInt64 = ReadWriteEffect primOpEffect WriteOffAddrOp_Word8AsWord64 = ReadWriteEffect primOpEffect InterlockedExchange_Addr = ReadWriteEffect primOpEffect InterlockedExchange_Word = ReadWriteEffect primOpEffect CasAddrOp_Addr = ReadWriteEffect primOpEffect CasAddrOp_Word = ReadWriteEffect primOpEffect CasAddrOp_Word8 = ReadWriteEffect primOpEffect CasAddrOp_Word16 = ReadWriteEffect primOpEffect CasAddrOp_Word32 = ReadWriteEffect primOpEffect CasAddrOp_Word64 = ReadWriteEffect primOpEffect FetchAddAddrOp_Word = ReadWriteEffect primOpEffect FetchSubAddrOp_Word = ReadWriteEffect primOpEffect FetchAndAddrOp_Word = ReadWriteEffect primOpEffect FetchNandAddrOp_Word = ReadWriteEffect primOpEffect FetchOrAddrOp_Word = ReadWriteEffect primOpEffect FetchXorAddrOp_Word = ReadWriteEffect primOpEffect AtomicReadAddrOp_Word = ReadWriteEffect primOpEffect AtomicWriteAddrOp_Word = ReadWriteEffect primOpEffect NewMutVarOp = ReadWriteEffect primOpEffect ReadMutVarOp = ReadWriteEffect primOpEffect WriteMutVarOp = ReadWriteEffect primOpEffect AtomicSwapMutVarOp = ReadWriteEffect primOpEffect AtomicModifyMutVar2Op = ReadWriteEffect primOpEffect AtomicModifyMutVar_Op = ReadWriteEffect primOpEffect CasMutVarOp = ReadWriteEffect primOpEffect CatchOp = ReadWriteEffect primOpEffect RaiseOp = ThrowsException primOpEffect RaiseUnderflowOp = ThrowsException primOpEffect RaiseOverflowOp = ThrowsException primOpEffect RaiseDivZeroOp = ThrowsException primOpEffect RaiseIOOp = ThrowsException primOpEffect MaskAsyncExceptionsOp = ReadWriteEffect primOpEffect MaskUninterruptibleOp = ReadWriteEffect primOpEffect UnmaskAsyncExceptionsOp = ReadWriteEffect primOpEffect MaskStatus = ReadWriteEffect primOpEffect NewPromptTagOp = ReadWriteEffect primOpEffect PromptOp = ReadWriteEffect primOpEffect Control0Op = ReadWriteEffect primOpEffect AtomicallyOp = ReadWriteEffect primOpEffect RetryOp = ReadWriteEffect primOpEffect CatchRetryOp = ReadWriteEffect primOpEffect CatchSTMOp = ReadWriteEffect primOpEffect NewTVarOp = ReadWriteEffect primOpEffect ReadTVarOp = ReadWriteEffect primOpEffect ReadTVarIOOp = ReadWriteEffect primOpEffect WriteTVarOp = ReadWriteEffect primOpEffect NewMVarOp = ReadWriteEffect primOpEffect TakeMVarOp = ReadWriteEffect primOpEffect TryTakeMVarOp = ReadWriteEffect primOpEffect PutMVarOp = ReadWriteEffect primOpEffect TryPutMVarOp = ReadWriteEffect primOpEffect ReadMVarOp = ReadWriteEffect primOpEffect TryReadMVarOp = ReadWriteEffect primOpEffect IsEmptyMVarOp = ReadWriteEffect primOpEffect NewIOPortOp = ReadWriteEffect primOpEffect ReadIOPortOp = ReadWriteEffect primOpEffect WriteIOPortOp = ReadWriteEffect primOpEffect DelayOp = ReadWriteEffect primOpEffect WaitReadOp = ReadWriteEffect primOpEffect WaitWriteOp = ReadWriteEffect primOpEffect ForkOp = ReadWriteEffect primOpEffect ForkOnOp = ReadWriteEffect primOpEffect KillThreadOp = ReadWriteEffect primOpEffect YieldOp = ReadWriteEffect primOpEffect MyThreadIdOp = ReadWriteEffect primOpEffect LabelThreadOp = ReadWriteEffect primOpEffect IsCurrentThreadBoundOp = ReadWriteEffect primOpEffect NoDuplicateOp = ReadWriteEffect primOpEffect ThreadStatusOp = ReadWriteEffect primOpEffect ListThreadsOp = ReadWriteEffect primOpEffect MkWeakOp = ReadWriteEffect primOpEffect MkWeakNoFinalizerOp = ReadWriteEffect primOpEffect AddCFinalizerToWeakOp = ReadWriteEffect primOpEffect DeRefWeakOp = ReadWriteEffect primOpEffect FinalizeWeakOp = ReadWriteEffect primOpEffect TouchOp = ReadWriteEffect primOpEffect MakeStablePtrOp = ReadWriteEffect primOpEffect DeRefStablePtrOp = ReadWriteEffect primOpEffect EqStablePtrOp = ReadWriteEffect primOpEffect MakeStableNameOp = ReadWriteEffect primOpEffect CompactNewOp = ReadWriteEffect primOpEffect CompactResizeOp = ReadWriteEffect primOpEffect CompactAllocateBlockOp = ReadWriteEffect primOpEffect CompactFixupPointersOp = ReadWriteEffect primOpEffect CompactAdd = ReadWriteEffect primOpEffect CompactAddWithSharing = ReadWriteEffect primOpEffect CompactSize = ReadWriteEffect primOpEffect ReallyUnsafePtrEqualityOp = CanFail primOpEffect ParOp = ReadWriteEffect primOpEffect SparkOp = ReadWriteEffect primOpEffect GetSparkOp = ReadWriteEffect primOpEffect NumSparks = ReadWriteEffect primOpEffect KeepAliveOp = ReadWriteEffect primOpEffect DataToTagSmallOp = ThrowsException primOpEffect DataToTagLargeOp = ThrowsException primOpEffect TagToEnumOp = CanFail primOpEffect NewBCOOp = ReadWriteEffect primOpEffect TraceEventOp = ReadWriteEffect primOpEffect TraceEventBinaryOp = ReadWriteEffect primOpEffect TraceMarkerOp = ReadWriteEffect primOpEffect SetThreadAllocationCounter = ReadWriteEffect primOpEffect (VecInsertOp _ _ _) = CanFail primOpEffect (VecDivOp _ _ _) = CanFail primOpEffect (VecQuotOp _ _ _) = CanFail primOpEffect (VecRemOp _ _ _) = CanFail primOpEffect (VecIndexByteArrayOp _ _ _) = CanFail primOpEffect (VecReadByteArrayOp _ _ _) = ReadWriteEffect primOpEffect (VecWriteByteArrayOp _ _ _) = ReadWriteEffect primOpEffect (VecIndexOffAddrOp _ _ _) = CanFail primOpEffect (VecReadOffAddrOp _ _ _) = ReadWriteEffect primOpEffect (VecWriteOffAddrOp _ _ _) = ReadWriteEffect primOpEffect (VecIndexScalarByteArrayOp _ _ _) = CanFail primOpEffect (VecReadScalarByteArrayOp _ _ _) = ReadWriteEffect primOpEffect (VecWriteScalarByteArrayOp _ _ _) = ReadWriteEffect primOpEffect (VecIndexScalarOffAddrOp _ _ _) = CanFail primOpEffect (VecReadScalarOffAddrOp _ _ _) = ReadWriteEffect primOpEffect (VecWriteScalarOffAddrOp _ _ _) = ReadWriteEffect primOpEffect PrefetchByteArrayOp3 = ReadWriteEffect primOpEffect PrefetchMutableByteArrayOp3 = ReadWriteEffect primOpEffect PrefetchAddrOp3 = ReadWriteEffect primOpEffect PrefetchValueOp3 = ReadWriteEffect primOpEffect PrefetchByteArrayOp2 = ReadWriteEffect primOpEffect PrefetchMutableByteArrayOp2 = ReadWriteEffect primOpEffect PrefetchAddrOp2 = ReadWriteEffect primOpEffect PrefetchValueOp2 = ReadWriteEffect primOpEffect PrefetchByteArrayOp1 = ReadWriteEffect primOpEffect PrefetchMutableByteArrayOp1 = ReadWriteEffect primOpEffect PrefetchAddrOp1 = ReadWriteEffect primOpEffect PrefetchValueOp1 = ReadWriteEffect primOpEffect PrefetchByteArrayOp0 = ReadWriteEffect primOpEffect PrefetchMutableByteArrayOp0 = ReadWriteEffect primOpEffect PrefetchAddrOp0 = ReadWriteEffect primOpEffect PrefetchValueOp0 = ReadWriteEffect primOpEffect _thisOp = NoEffect ghc-lib-parser-9.12.2.20250421/ghc-lib/stage0/compiler/build/primop-fixity.hs-incl0000644000000000000000000000165607346545000025203 0ustar0000000000000000primOpFixity IntAddOp = Just (Fixity 6 InfixL) primOpFixity IntSubOp = Just (Fixity 6 InfixL) primOpFixity IntMulOp = Just (Fixity 7 InfixL) primOpFixity IntGtOp = Just (Fixity 4 InfixN) primOpFixity IntGeOp = Just (Fixity 4 InfixN) primOpFixity IntEqOp = Just (Fixity 4 InfixN) primOpFixity IntNeOp = Just (Fixity 4 InfixN) primOpFixity IntLtOp = Just (Fixity 4 InfixN) primOpFixity IntLeOp = Just (Fixity 4 InfixN) primOpFixity DoubleGtOp = Just (Fixity 4 InfixN) primOpFixity DoubleGeOp = Just (Fixity 4 InfixN) primOpFixity DoubleEqOp = Just (Fixity 4 InfixN) primOpFixity DoubleNeOp = Just (Fixity 4 InfixN) primOpFixity DoubleLtOp = Just (Fixity 4 InfixN) primOpFixity DoubleLeOp = Just (Fixity 4 InfixN) primOpFixity DoubleAddOp = Just (Fixity 6 InfixL) primOpFixity DoubleSubOp = Just (Fixity 6 InfixL) primOpFixity DoubleMulOp = Just (Fixity 7 InfixL) primOpFixity DoubleDivOp = Just (Fixity 7 InfixL) primOpFixity _thisOp = Nothing ghc-lib-parser-9.12.2.20250421/ghc-lib/stage0/compiler/build/primop-is-cheap.hs-incl0000644000000000000000000000020507346545000025345 0ustar0000000000000000primOpIsCheap DataToTagSmallOp = True primOpIsCheap DataToTagLargeOp = True primOpIsCheap _thisOp = primOpOkForSpeculation _thisOp ghc-lib-parser-9.12.2.20250421/ghc-lib/stage0/compiler/build/primop-is-work-free.hs-incl0000644000000000000000000000042407346545000026171 0ustar0000000000000000primOpIsWorkFree RaiseOp = True primOpIsWorkFree RaiseUnderflowOp = True primOpIsWorkFree RaiseOverflowOp = True primOpIsWorkFree RaiseDivZeroOp = True primOpIsWorkFree RaiseIOOp = True primOpIsWorkFree TouchOp = False primOpIsWorkFree _thisOp = primOpCodeSize _thisOp == 0 ghc-lib-parser-9.12.2.20250421/ghc-lib/stage0/compiler/build/primop-list.hs-incl0000644000000000000000000012517607346545000024646 0ustar0000000000000000 [CharGtOp , CharGeOp , CharEqOp , CharNeOp , CharLtOp , CharLeOp , OrdOp , Int8ToIntOp , IntToInt8Op , Int8NegOp , Int8AddOp , Int8SubOp , Int8MulOp , Int8QuotOp , Int8RemOp , Int8QuotRemOp , Int8SllOp , Int8SraOp , Int8SrlOp , Int8ToWord8Op , Int8EqOp , Int8GeOp , Int8GtOp , Int8LeOp , Int8LtOp , Int8NeOp , Word8ToWordOp , WordToWord8Op , Word8AddOp , Word8SubOp , Word8MulOp , Word8QuotOp , Word8RemOp , Word8QuotRemOp , Word8AndOp , Word8OrOp , Word8XorOp , Word8NotOp , Word8SllOp , Word8SrlOp , Word8ToInt8Op , Word8EqOp , Word8GeOp , Word8GtOp , Word8LeOp , Word8LtOp , Word8NeOp , Int16ToIntOp , IntToInt16Op , Int16NegOp , Int16AddOp , Int16SubOp , Int16MulOp , Int16QuotOp , Int16RemOp , Int16QuotRemOp , Int16SllOp , Int16SraOp , Int16SrlOp , Int16ToWord16Op , Int16EqOp , Int16GeOp , Int16GtOp , Int16LeOp , Int16LtOp , Int16NeOp , Word16ToWordOp , WordToWord16Op , Word16AddOp , Word16SubOp , Word16MulOp , Word16QuotOp , Word16RemOp , Word16QuotRemOp , Word16AndOp , Word16OrOp , Word16XorOp , Word16NotOp , Word16SllOp , Word16SrlOp , Word16ToInt16Op , Word16EqOp , Word16GeOp , Word16GtOp , Word16LeOp , Word16LtOp , Word16NeOp , Int32ToIntOp , IntToInt32Op , Int32NegOp , Int32AddOp , Int32SubOp , Int32MulOp , Int32QuotOp , Int32RemOp , Int32QuotRemOp , Int32SllOp , Int32SraOp , Int32SrlOp , Int32ToWord32Op , Int32EqOp , Int32GeOp , Int32GtOp , Int32LeOp , Int32LtOp , Int32NeOp , Word32ToWordOp , WordToWord32Op , Word32AddOp , Word32SubOp , Word32MulOp , Word32QuotOp , Word32RemOp , Word32QuotRemOp , Word32AndOp , Word32OrOp , Word32XorOp , Word32NotOp , Word32SllOp , Word32SrlOp , Word32ToInt32Op , Word32EqOp , Word32GeOp , Word32GtOp , Word32LeOp , Word32LtOp , Word32NeOp , Int64ToIntOp , IntToInt64Op , Int64NegOp , Int64AddOp , Int64SubOp , Int64MulOp , Int64QuotOp , Int64RemOp , Int64SllOp , Int64SraOp , Int64SrlOp , Int64ToWord64Op , Int64EqOp , Int64GeOp , Int64GtOp , Int64LeOp , Int64LtOp , Int64NeOp , Word64ToWordOp , WordToWord64Op , Word64AddOp , Word64SubOp , Word64MulOp , Word64QuotOp , Word64RemOp , Word64AndOp , Word64OrOp , Word64XorOp , Word64NotOp , Word64SllOp , Word64SrlOp , Word64ToInt64Op , Word64EqOp , Word64GeOp , Word64GtOp , Word64LeOp , Word64LtOp , Word64NeOp , IntAddOp , IntSubOp , IntMulOp , IntMul2Op , IntMulMayOfloOp , IntQuotOp , IntRemOp , IntQuotRemOp , IntAndOp , IntOrOp , IntXorOp , IntNotOp , IntNegOp , IntAddCOp , IntSubCOp , IntGtOp , IntGeOp , IntEqOp , IntNeOp , IntLtOp , IntLeOp , ChrOp , IntToWordOp , IntToFloatOp , IntToDoubleOp , WordToFloatOp , WordToDoubleOp , IntSllOp , IntSraOp , IntSrlOp , WordAddOp , WordAddCOp , WordSubCOp , WordAdd2Op , WordSubOp , WordMulOp , WordMul2Op , WordQuotOp , WordRemOp , WordQuotRemOp , WordQuotRem2Op , WordAndOp , WordOrOp , WordXorOp , WordNotOp , WordSllOp , WordSrlOp , WordToIntOp , WordGtOp , WordGeOp , WordEqOp , WordNeOp , WordLtOp , WordLeOp , PopCnt8Op , PopCnt16Op , PopCnt32Op , PopCnt64Op , PopCntOp , Pdep8Op , Pdep16Op , Pdep32Op , Pdep64Op , PdepOp , Pext8Op , Pext16Op , Pext32Op , Pext64Op , PextOp , Clz8Op , Clz16Op , Clz32Op , Clz64Op , ClzOp , Ctz8Op , Ctz16Op , Ctz32Op , Ctz64Op , CtzOp , BSwap16Op , BSwap32Op , BSwap64Op , BSwapOp , BRev8Op , BRev16Op , BRev32Op , BRev64Op , BRevOp , Narrow8IntOp , Narrow16IntOp , Narrow32IntOp , Narrow8WordOp , Narrow16WordOp , Narrow32WordOp , DoubleGtOp , DoubleGeOp , DoubleEqOp , DoubleNeOp , DoubleLtOp , DoubleLeOp , DoubleMinOp , DoubleMaxOp , DoubleAddOp , DoubleSubOp , DoubleMulOp , DoubleDivOp , DoubleNegOp , DoubleFabsOp , DoubleToIntOp , DoubleToFloatOp , DoubleExpOp , DoubleExpM1Op , DoubleLogOp , DoubleLog1POp , DoubleSqrtOp , DoubleSinOp , DoubleCosOp , DoubleTanOp , DoubleAsinOp , DoubleAcosOp , DoubleAtanOp , DoubleSinhOp , DoubleCoshOp , DoubleTanhOp , DoubleAsinhOp , DoubleAcoshOp , DoubleAtanhOp , DoublePowerOp , DoubleDecode_2IntOp , DoubleDecode_Int64Op , CastDoubleToWord64Op , CastWord64ToDoubleOp , FloatGtOp , FloatGeOp , FloatEqOp , FloatNeOp , FloatLtOp , FloatLeOp , FloatMinOp , FloatMaxOp , FloatAddOp , FloatSubOp , FloatMulOp , FloatDivOp , FloatNegOp , FloatFabsOp , FloatToIntOp , FloatExpOp , FloatExpM1Op , FloatLogOp , FloatLog1POp , FloatSqrtOp , FloatSinOp , FloatCosOp , FloatTanOp , FloatAsinOp , FloatAcosOp , FloatAtanOp , FloatSinhOp , FloatCoshOp , FloatTanhOp , FloatAsinhOp , FloatAcoshOp , FloatAtanhOp , FloatPowerOp , FloatToDoubleOp , FloatDecode_IntOp , CastFloatToWord32Op , CastWord32ToFloatOp , FloatFMAdd , FloatFMSub , FloatFNMAdd , FloatFNMSub , DoubleFMAdd , DoubleFMSub , DoubleFNMAdd , DoubleFNMSub , NewArrayOp , ReadArrayOp , WriteArrayOp , SizeofArrayOp , SizeofMutableArrayOp , IndexArrayOp , UnsafeFreezeArrayOp , UnsafeThawArrayOp , CopyArrayOp , CopyMutableArrayOp , CloneArrayOp , CloneMutableArrayOp , FreezeArrayOp , ThawArrayOp , CasArrayOp , NewSmallArrayOp , ShrinkSmallMutableArrayOp_Char , ReadSmallArrayOp , WriteSmallArrayOp , SizeofSmallArrayOp , SizeofSmallMutableArrayOp , GetSizeofSmallMutableArrayOp , IndexSmallArrayOp , UnsafeFreezeSmallArrayOp , UnsafeThawSmallArrayOp , CopySmallArrayOp , CopySmallMutableArrayOp , CloneSmallArrayOp , CloneSmallMutableArrayOp , FreezeSmallArrayOp , ThawSmallArrayOp , CasSmallArrayOp , NewByteArrayOp_Char , NewPinnedByteArrayOp_Char , NewAlignedPinnedByteArrayOp_Char , MutableByteArrayIsPinnedOp , ByteArrayIsPinnedOp , ByteArrayIsWeaklyPinnedOp , MutableByteArrayIsWeaklyPinnedOp , ByteArrayContents_Char , MutableByteArrayContents_Char , ShrinkMutableByteArrayOp_Char , ResizeMutableByteArrayOp_Char , UnsafeFreezeByteArrayOp , UnsafeThawByteArrayOp , SizeofByteArrayOp , SizeofMutableByteArrayOp , GetSizeofMutableByteArrayOp , IndexByteArrayOp_Char , IndexByteArrayOp_WideChar , IndexByteArrayOp_Int , IndexByteArrayOp_Word , IndexByteArrayOp_Addr , IndexByteArrayOp_Float , IndexByteArrayOp_Double , IndexByteArrayOp_StablePtr , IndexByteArrayOp_Int8 , IndexByteArrayOp_Word8 , IndexByteArrayOp_Int16 , IndexByteArrayOp_Word16 , IndexByteArrayOp_Int32 , IndexByteArrayOp_Word32 , IndexByteArrayOp_Int64 , IndexByteArrayOp_Word64 , IndexByteArrayOp_Word8AsChar , IndexByteArrayOp_Word8AsWideChar , IndexByteArrayOp_Word8AsInt , IndexByteArrayOp_Word8AsWord , IndexByteArrayOp_Word8AsAddr , IndexByteArrayOp_Word8AsFloat , IndexByteArrayOp_Word8AsDouble , IndexByteArrayOp_Word8AsStablePtr , IndexByteArrayOp_Word8AsInt16 , IndexByteArrayOp_Word8AsWord16 , IndexByteArrayOp_Word8AsInt32 , IndexByteArrayOp_Word8AsWord32 , IndexByteArrayOp_Word8AsInt64 , IndexByteArrayOp_Word8AsWord64 , ReadByteArrayOp_Char , ReadByteArrayOp_WideChar , ReadByteArrayOp_Int , ReadByteArrayOp_Word , ReadByteArrayOp_Addr , ReadByteArrayOp_Float , ReadByteArrayOp_Double , ReadByteArrayOp_StablePtr , ReadByteArrayOp_Int8 , ReadByteArrayOp_Word8 , ReadByteArrayOp_Int16 , ReadByteArrayOp_Word16 , ReadByteArrayOp_Int32 , ReadByteArrayOp_Word32 , ReadByteArrayOp_Int64 , ReadByteArrayOp_Word64 , ReadByteArrayOp_Word8AsChar , ReadByteArrayOp_Word8AsWideChar , ReadByteArrayOp_Word8AsInt , ReadByteArrayOp_Word8AsWord , ReadByteArrayOp_Word8AsAddr , ReadByteArrayOp_Word8AsFloat , ReadByteArrayOp_Word8AsDouble , ReadByteArrayOp_Word8AsStablePtr , ReadByteArrayOp_Word8AsInt16 , ReadByteArrayOp_Word8AsWord16 , ReadByteArrayOp_Word8AsInt32 , ReadByteArrayOp_Word8AsWord32 , ReadByteArrayOp_Word8AsInt64 , ReadByteArrayOp_Word8AsWord64 , WriteByteArrayOp_Char , WriteByteArrayOp_WideChar , WriteByteArrayOp_Int , WriteByteArrayOp_Word , WriteByteArrayOp_Addr , WriteByteArrayOp_Float , WriteByteArrayOp_Double , WriteByteArrayOp_StablePtr , WriteByteArrayOp_Int8 , WriteByteArrayOp_Word8 , WriteByteArrayOp_Int16 , WriteByteArrayOp_Word16 , WriteByteArrayOp_Int32 , WriteByteArrayOp_Word32 , WriteByteArrayOp_Int64 , WriteByteArrayOp_Word64 , WriteByteArrayOp_Word8AsChar , WriteByteArrayOp_Word8AsWideChar , WriteByteArrayOp_Word8AsInt , WriteByteArrayOp_Word8AsWord , WriteByteArrayOp_Word8AsAddr , WriteByteArrayOp_Word8AsFloat , WriteByteArrayOp_Word8AsDouble , WriteByteArrayOp_Word8AsStablePtr , WriteByteArrayOp_Word8AsInt16 , WriteByteArrayOp_Word8AsWord16 , WriteByteArrayOp_Word8AsInt32 , WriteByteArrayOp_Word8AsWord32 , WriteByteArrayOp_Word8AsInt64 , WriteByteArrayOp_Word8AsWord64 , CompareByteArraysOp , CopyByteArrayOp , CopyMutableByteArrayOp , CopyMutableByteArrayNonOverlappingOp , CopyByteArrayToAddrOp , CopyMutableByteArrayToAddrOp , CopyAddrToByteArrayOp , CopyAddrToAddrOp , CopyAddrToAddrNonOverlappingOp , SetByteArrayOp , SetAddrRangeOp , AtomicReadByteArrayOp_Int , AtomicWriteByteArrayOp_Int , CasByteArrayOp_Int , CasByteArrayOp_Int8 , CasByteArrayOp_Int16 , CasByteArrayOp_Int32 , CasByteArrayOp_Int64 , FetchAddByteArrayOp_Int , FetchSubByteArrayOp_Int , FetchAndByteArrayOp_Int , FetchNandByteArrayOp_Int , FetchOrByteArrayOp_Int , FetchXorByteArrayOp_Int , AddrAddOp , AddrSubOp , AddrRemOp , AddrToIntOp , IntToAddrOp , AddrGtOp , AddrGeOp , AddrEqOp , AddrNeOp , AddrLtOp , AddrLeOp , IndexOffAddrOp_Char , IndexOffAddrOp_WideChar , IndexOffAddrOp_Int , IndexOffAddrOp_Word , IndexOffAddrOp_Addr , IndexOffAddrOp_Float , IndexOffAddrOp_Double , IndexOffAddrOp_StablePtr , IndexOffAddrOp_Int8 , IndexOffAddrOp_Word8 , IndexOffAddrOp_Int16 , IndexOffAddrOp_Word16 , IndexOffAddrOp_Int32 , IndexOffAddrOp_Word32 , IndexOffAddrOp_Int64 , IndexOffAddrOp_Word64 , IndexOffAddrOp_Word8AsChar , IndexOffAddrOp_Word8AsWideChar , IndexOffAddrOp_Word8AsInt , IndexOffAddrOp_Word8AsWord , IndexOffAddrOp_Word8AsAddr , IndexOffAddrOp_Word8AsFloat , IndexOffAddrOp_Word8AsDouble , IndexOffAddrOp_Word8AsStablePtr , IndexOffAddrOp_Word8AsInt16 , IndexOffAddrOp_Word8AsWord16 , IndexOffAddrOp_Word8AsInt32 , IndexOffAddrOp_Word8AsWord32 , IndexOffAddrOp_Word8AsInt64 , IndexOffAddrOp_Word8AsWord64 , ReadOffAddrOp_Char , ReadOffAddrOp_WideChar , ReadOffAddrOp_Int , ReadOffAddrOp_Word , ReadOffAddrOp_Addr , ReadOffAddrOp_Float , ReadOffAddrOp_Double , ReadOffAddrOp_StablePtr , ReadOffAddrOp_Int8 , ReadOffAddrOp_Word8 , ReadOffAddrOp_Int16 , ReadOffAddrOp_Word16 , ReadOffAddrOp_Int32 , ReadOffAddrOp_Word32 , ReadOffAddrOp_Int64 , ReadOffAddrOp_Word64 , ReadOffAddrOp_Word8AsChar , ReadOffAddrOp_Word8AsWideChar , ReadOffAddrOp_Word8AsInt , ReadOffAddrOp_Word8AsWord , ReadOffAddrOp_Word8AsAddr , ReadOffAddrOp_Word8AsFloat , ReadOffAddrOp_Word8AsDouble , ReadOffAddrOp_Word8AsStablePtr , ReadOffAddrOp_Word8AsInt16 , ReadOffAddrOp_Word8AsWord16 , ReadOffAddrOp_Word8AsInt32 , ReadOffAddrOp_Word8AsWord32 , ReadOffAddrOp_Word8AsInt64 , ReadOffAddrOp_Word8AsWord64 , WriteOffAddrOp_Char , WriteOffAddrOp_WideChar , WriteOffAddrOp_Int , WriteOffAddrOp_Word , WriteOffAddrOp_Addr , WriteOffAddrOp_Float , WriteOffAddrOp_Double , WriteOffAddrOp_StablePtr , WriteOffAddrOp_Int8 , WriteOffAddrOp_Word8 , WriteOffAddrOp_Int16 , WriteOffAddrOp_Word16 , WriteOffAddrOp_Int32 , WriteOffAddrOp_Word32 , WriteOffAddrOp_Int64 , WriteOffAddrOp_Word64 , WriteOffAddrOp_Word8AsChar , WriteOffAddrOp_Word8AsWideChar , WriteOffAddrOp_Word8AsInt , WriteOffAddrOp_Word8AsWord , WriteOffAddrOp_Word8AsAddr , WriteOffAddrOp_Word8AsFloat , WriteOffAddrOp_Word8AsDouble , WriteOffAddrOp_Word8AsStablePtr , WriteOffAddrOp_Word8AsInt16 , WriteOffAddrOp_Word8AsWord16 , WriteOffAddrOp_Word8AsInt32 , WriteOffAddrOp_Word8AsWord32 , WriteOffAddrOp_Word8AsInt64 , WriteOffAddrOp_Word8AsWord64 , InterlockedExchange_Addr , InterlockedExchange_Word , CasAddrOp_Addr , CasAddrOp_Word , CasAddrOp_Word8 , CasAddrOp_Word16 , CasAddrOp_Word32 , CasAddrOp_Word64 , FetchAddAddrOp_Word , FetchSubAddrOp_Word , FetchAndAddrOp_Word , FetchNandAddrOp_Word , FetchOrAddrOp_Word , FetchXorAddrOp_Word , AtomicReadAddrOp_Word , AtomicWriteAddrOp_Word , NewMutVarOp , ReadMutVarOp , WriteMutVarOp , AtomicSwapMutVarOp , AtomicModifyMutVar2Op , AtomicModifyMutVar_Op , CasMutVarOp , CatchOp , RaiseOp , RaiseUnderflowOp , RaiseOverflowOp , RaiseDivZeroOp , RaiseIOOp , MaskAsyncExceptionsOp , MaskUninterruptibleOp , UnmaskAsyncExceptionsOp , MaskStatus , NewPromptTagOp , PromptOp , Control0Op , AtomicallyOp , RetryOp , CatchRetryOp , CatchSTMOp , NewTVarOp , ReadTVarOp , ReadTVarIOOp , WriteTVarOp , NewMVarOp , TakeMVarOp , TryTakeMVarOp , PutMVarOp , TryPutMVarOp , ReadMVarOp , TryReadMVarOp , IsEmptyMVarOp , NewIOPortOp , ReadIOPortOp , WriteIOPortOp , DelayOp , WaitReadOp , WaitWriteOp , ForkOp , ForkOnOp , KillThreadOp , YieldOp , MyThreadIdOp , LabelThreadOp , IsCurrentThreadBoundOp , NoDuplicateOp , GetThreadLabelOp , ThreadStatusOp , ListThreadsOp , MkWeakOp , MkWeakNoFinalizerOp , AddCFinalizerToWeakOp , DeRefWeakOp , FinalizeWeakOp , TouchOp , MakeStablePtrOp , DeRefStablePtrOp , EqStablePtrOp , MakeStableNameOp , StableNameToIntOp , CompactNewOp , CompactResizeOp , CompactContainsOp , CompactContainsAnyOp , CompactGetFirstBlockOp , CompactGetNextBlockOp , CompactAllocateBlockOp , CompactFixupPointersOp , CompactAdd , CompactAddWithSharing , CompactSize , ReallyUnsafePtrEqualityOp , ParOp , SparkOp , GetSparkOp , NumSparks , KeepAliveOp , DataToTagSmallOp , DataToTagLargeOp , TagToEnumOp , AddrToAnyOp , AnyToAddrOp , MkApUpd0_Op , NewBCOOp , UnpackClosureOp , ClosureSizeOp , GetApStackValOp , GetCCSOfOp , GetCurrentCCSOp , ClearCCSOp , WhereFromOp , TraceEventOp , TraceEventBinaryOp , TraceMarkerOp , SetThreadAllocationCounter , (VecBroadcastOp IntVec 16 W8) , (VecBroadcastOp IntVec 8 W16) , (VecBroadcastOp IntVec 4 W32) , (VecBroadcastOp IntVec 2 W64) , (VecBroadcastOp IntVec 32 W8) , (VecBroadcastOp IntVec 16 W16) , (VecBroadcastOp IntVec 8 W32) , (VecBroadcastOp IntVec 4 W64) , (VecBroadcastOp IntVec 64 W8) , (VecBroadcastOp IntVec 32 W16) , (VecBroadcastOp IntVec 16 W32) , (VecBroadcastOp IntVec 8 W64) , (VecBroadcastOp WordVec 16 W8) , (VecBroadcastOp WordVec 8 W16) , (VecBroadcastOp WordVec 4 W32) , (VecBroadcastOp WordVec 2 W64) , (VecBroadcastOp WordVec 32 W8) , (VecBroadcastOp WordVec 16 W16) , (VecBroadcastOp WordVec 8 W32) , (VecBroadcastOp WordVec 4 W64) , (VecBroadcastOp WordVec 64 W8) , (VecBroadcastOp WordVec 32 W16) , (VecBroadcastOp WordVec 16 W32) , (VecBroadcastOp WordVec 8 W64) , (VecBroadcastOp FloatVec 4 W32) , (VecBroadcastOp FloatVec 2 W64) , (VecBroadcastOp FloatVec 8 W32) , (VecBroadcastOp FloatVec 4 W64) , (VecBroadcastOp FloatVec 16 W32) , (VecBroadcastOp FloatVec 8 W64) , (VecPackOp IntVec 16 W8) , (VecPackOp IntVec 8 W16) , (VecPackOp IntVec 4 W32) , (VecPackOp IntVec 2 W64) , (VecPackOp IntVec 32 W8) , (VecPackOp IntVec 16 W16) , (VecPackOp IntVec 8 W32) , (VecPackOp IntVec 4 W64) , (VecPackOp IntVec 64 W8) , (VecPackOp IntVec 32 W16) , (VecPackOp IntVec 16 W32) , (VecPackOp IntVec 8 W64) , (VecPackOp WordVec 16 W8) , (VecPackOp WordVec 8 W16) , (VecPackOp WordVec 4 W32) , (VecPackOp WordVec 2 W64) , (VecPackOp WordVec 32 W8) , (VecPackOp WordVec 16 W16) , (VecPackOp WordVec 8 W32) , (VecPackOp WordVec 4 W64) , (VecPackOp WordVec 64 W8) , (VecPackOp WordVec 32 W16) , (VecPackOp WordVec 16 W32) , (VecPackOp WordVec 8 W64) , (VecPackOp FloatVec 4 W32) , (VecPackOp FloatVec 2 W64) , (VecPackOp FloatVec 8 W32) , (VecPackOp FloatVec 4 W64) , (VecPackOp FloatVec 16 W32) , (VecPackOp FloatVec 8 W64) , (VecUnpackOp IntVec 16 W8) , (VecUnpackOp IntVec 8 W16) , (VecUnpackOp IntVec 4 W32) , (VecUnpackOp IntVec 2 W64) , (VecUnpackOp IntVec 32 W8) , (VecUnpackOp IntVec 16 W16) , (VecUnpackOp IntVec 8 W32) , (VecUnpackOp IntVec 4 W64) , (VecUnpackOp IntVec 64 W8) , (VecUnpackOp IntVec 32 W16) , (VecUnpackOp IntVec 16 W32) , (VecUnpackOp IntVec 8 W64) , (VecUnpackOp WordVec 16 W8) , (VecUnpackOp WordVec 8 W16) , (VecUnpackOp WordVec 4 W32) , (VecUnpackOp WordVec 2 W64) , (VecUnpackOp WordVec 32 W8) , (VecUnpackOp WordVec 16 W16) , (VecUnpackOp WordVec 8 W32) , (VecUnpackOp WordVec 4 W64) , (VecUnpackOp WordVec 64 W8) , (VecUnpackOp WordVec 32 W16) , (VecUnpackOp WordVec 16 W32) , (VecUnpackOp WordVec 8 W64) , (VecUnpackOp FloatVec 4 W32) , (VecUnpackOp FloatVec 2 W64) , (VecUnpackOp FloatVec 8 W32) , (VecUnpackOp FloatVec 4 W64) , (VecUnpackOp FloatVec 16 W32) , (VecUnpackOp FloatVec 8 W64) , (VecInsertOp IntVec 16 W8) , (VecInsertOp IntVec 8 W16) , (VecInsertOp IntVec 4 W32) , (VecInsertOp IntVec 2 W64) , (VecInsertOp IntVec 32 W8) , (VecInsertOp IntVec 16 W16) , (VecInsertOp IntVec 8 W32) , (VecInsertOp IntVec 4 W64) , (VecInsertOp IntVec 64 W8) , (VecInsertOp IntVec 32 W16) , (VecInsertOp IntVec 16 W32) , (VecInsertOp IntVec 8 W64) , (VecInsertOp WordVec 16 W8) , (VecInsertOp WordVec 8 W16) , (VecInsertOp WordVec 4 W32) , (VecInsertOp WordVec 2 W64) , (VecInsertOp WordVec 32 W8) , (VecInsertOp WordVec 16 W16) , (VecInsertOp WordVec 8 W32) , (VecInsertOp WordVec 4 W64) , (VecInsertOp WordVec 64 W8) , (VecInsertOp WordVec 32 W16) , (VecInsertOp WordVec 16 W32) , (VecInsertOp WordVec 8 W64) , (VecInsertOp FloatVec 4 W32) , (VecInsertOp FloatVec 2 W64) , (VecInsertOp FloatVec 8 W32) , (VecInsertOp FloatVec 4 W64) , (VecInsertOp FloatVec 16 W32) , (VecInsertOp FloatVec 8 W64) , (VecAddOp IntVec 16 W8) , (VecAddOp IntVec 8 W16) , (VecAddOp IntVec 4 W32) , (VecAddOp IntVec 2 W64) , (VecAddOp IntVec 32 W8) , (VecAddOp IntVec 16 W16) , (VecAddOp IntVec 8 W32) , (VecAddOp IntVec 4 W64) , (VecAddOp IntVec 64 W8) , (VecAddOp IntVec 32 W16) , (VecAddOp IntVec 16 W32) , (VecAddOp IntVec 8 W64) , (VecAddOp WordVec 16 W8) , (VecAddOp WordVec 8 W16) , (VecAddOp WordVec 4 W32) , (VecAddOp WordVec 2 W64) , (VecAddOp WordVec 32 W8) , (VecAddOp WordVec 16 W16) , (VecAddOp WordVec 8 W32) , (VecAddOp WordVec 4 W64) , (VecAddOp WordVec 64 W8) , (VecAddOp WordVec 32 W16) , (VecAddOp WordVec 16 W32) , (VecAddOp WordVec 8 W64) , (VecAddOp FloatVec 4 W32) , (VecAddOp FloatVec 2 W64) , (VecAddOp FloatVec 8 W32) , (VecAddOp FloatVec 4 W64) , (VecAddOp FloatVec 16 W32) , (VecAddOp FloatVec 8 W64) , (VecSubOp IntVec 16 W8) , (VecSubOp IntVec 8 W16) , (VecSubOp IntVec 4 W32) , (VecSubOp IntVec 2 W64) , (VecSubOp IntVec 32 W8) , (VecSubOp IntVec 16 W16) , (VecSubOp IntVec 8 W32) , (VecSubOp IntVec 4 W64) , (VecSubOp IntVec 64 W8) , (VecSubOp IntVec 32 W16) , (VecSubOp IntVec 16 W32) , (VecSubOp IntVec 8 W64) , (VecSubOp WordVec 16 W8) , (VecSubOp WordVec 8 W16) , (VecSubOp WordVec 4 W32) , (VecSubOp WordVec 2 W64) , (VecSubOp WordVec 32 W8) , (VecSubOp WordVec 16 W16) , (VecSubOp WordVec 8 W32) , (VecSubOp WordVec 4 W64) , (VecSubOp WordVec 64 W8) , (VecSubOp WordVec 32 W16) , (VecSubOp WordVec 16 W32) , (VecSubOp WordVec 8 W64) , (VecSubOp FloatVec 4 W32) , (VecSubOp FloatVec 2 W64) , (VecSubOp FloatVec 8 W32) , (VecSubOp FloatVec 4 W64) , (VecSubOp FloatVec 16 W32) , (VecSubOp FloatVec 8 W64) , (VecMulOp IntVec 16 W8) , (VecMulOp IntVec 8 W16) , (VecMulOp IntVec 4 W32) , (VecMulOp IntVec 2 W64) , (VecMulOp IntVec 32 W8) , (VecMulOp IntVec 16 W16) , (VecMulOp IntVec 8 W32) , (VecMulOp IntVec 4 W64) , (VecMulOp IntVec 64 W8) , (VecMulOp IntVec 32 W16) , (VecMulOp IntVec 16 W32) , (VecMulOp IntVec 8 W64) , (VecMulOp WordVec 16 W8) , (VecMulOp WordVec 8 W16) , (VecMulOp WordVec 4 W32) , (VecMulOp WordVec 2 W64) , (VecMulOp WordVec 32 W8) , (VecMulOp WordVec 16 W16) , (VecMulOp WordVec 8 W32) , (VecMulOp WordVec 4 W64) , (VecMulOp WordVec 64 W8) , (VecMulOp WordVec 32 W16) , (VecMulOp WordVec 16 W32) , (VecMulOp WordVec 8 W64) , (VecMulOp FloatVec 4 W32) , (VecMulOp FloatVec 2 W64) , (VecMulOp FloatVec 8 W32) , (VecMulOp FloatVec 4 W64) , (VecMulOp FloatVec 16 W32) , (VecMulOp FloatVec 8 W64) , (VecDivOp FloatVec 4 W32) , (VecDivOp FloatVec 2 W64) , (VecDivOp FloatVec 8 W32) , (VecDivOp FloatVec 4 W64) , (VecDivOp FloatVec 16 W32) , (VecDivOp FloatVec 8 W64) , (VecQuotOp IntVec 16 W8) , (VecQuotOp IntVec 8 W16) , (VecQuotOp IntVec 4 W32) , (VecQuotOp IntVec 2 W64) , (VecQuotOp IntVec 32 W8) , (VecQuotOp IntVec 16 W16) , (VecQuotOp IntVec 8 W32) , (VecQuotOp IntVec 4 W64) , (VecQuotOp IntVec 64 W8) , (VecQuotOp IntVec 32 W16) , (VecQuotOp IntVec 16 W32) , (VecQuotOp IntVec 8 W64) , (VecQuotOp WordVec 16 W8) , (VecQuotOp WordVec 8 W16) , (VecQuotOp WordVec 4 W32) , (VecQuotOp WordVec 2 W64) , (VecQuotOp WordVec 32 W8) , (VecQuotOp WordVec 16 W16) , (VecQuotOp WordVec 8 W32) , (VecQuotOp WordVec 4 W64) , (VecQuotOp WordVec 64 W8) , (VecQuotOp WordVec 32 W16) , (VecQuotOp WordVec 16 W32) , (VecQuotOp WordVec 8 W64) , (VecRemOp IntVec 16 W8) , (VecRemOp IntVec 8 W16) , (VecRemOp IntVec 4 W32) , (VecRemOp IntVec 2 W64) , (VecRemOp IntVec 32 W8) , (VecRemOp IntVec 16 W16) , (VecRemOp IntVec 8 W32) , (VecRemOp IntVec 4 W64) , (VecRemOp IntVec 64 W8) , (VecRemOp IntVec 32 W16) , (VecRemOp IntVec 16 W32) , (VecRemOp IntVec 8 W64) , (VecRemOp WordVec 16 W8) , (VecRemOp WordVec 8 W16) , (VecRemOp WordVec 4 W32) , (VecRemOp WordVec 2 W64) , (VecRemOp WordVec 32 W8) , (VecRemOp WordVec 16 W16) , (VecRemOp WordVec 8 W32) , (VecRemOp WordVec 4 W64) , (VecRemOp WordVec 64 W8) , (VecRemOp WordVec 32 W16) , (VecRemOp WordVec 16 W32) , (VecRemOp WordVec 8 W64) , (VecNegOp IntVec 16 W8) , (VecNegOp IntVec 8 W16) , (VecNegOp IntVec 4 W32) , (VecNegOp IntVec 2 W64) , (VecNegOp IntVec 32 W8) , (VecNegOp IntVec 16 W16) , (VecNegOp IntVec 8 W32) , (VecNegOp IntVec 4 W64) , (VecNegOp IntVec 64 W8) , (VecNegOp IntVec 32 W16) , (VecNegOp IntVec 16 W32) , (VecNegOp IntVec 8 W64) , (VecNegOp FloatVec 4 W32) , (VecNegOp FloatVec 2 W64) , (VecNegOp FloatVec 8 W32) , (VecNegOp FloatVec 4 W64) , (VecNegOp FloatVec 16 W32) , (VecNegOp FloatVec 8 W64) , (VecIndexByteArrayOp IntVec 16 W8) , (VecIndexByteArrayOp IntVec 8 W16) , (VecIndexByteArrayOp IntVec 4 W32) , (VecIndexByteArrayOp IntVec 2 W64) , (VecIndexByteArrayOp IntVec 32 W8) , (VecIndexByteArrayOp IntVec 16 W16) , (VecIndexByteArrayOp IntVec 8 W32) , (VecIndexByteArrayOp IntVec 4 W64) , (VecIndexByteArrayOp IntVec 64 W8) , (VecIndexByteArrayOp IntVec 32 W16) , (VecIndexByteArrayOp IntVec 16 W32) , (VecIndexByteArrayOp IntVec 8 W64) , (VecIndexByteArrayOp WordVec 16 W8) , (VecIndexByteArrayOp WordVec 8 W16) , (VecIndexByteArrayOp WordVec 4 W32) , (VecIndexByteArrayOp WordVec 2 W64) , (VecIndexByteArrayOp WordVec 32 W8) , (VecIndexByteArrayOp WordVec 16 W16) , (VecIndexByteArrayOp WordVec 8 W32) , (VecIndexByteArrayOp WordVec 4 W64) , (VecIndexByteArrayOp WordVec 64 W8) , (VecIndexByteArrayOp WordVec 32 W16) , (VecIndexByteArrayOp WordVec 16 W32) , (VecIndexByteArrayOp WordVec 8 W64) , (VecIndexByteArrayOp FloatVec 4 W32) , (VecIndexByteArrayOp FloatVec 2 W64) , (VecIndexByteArrayOp FloatVec 8 W32) , (VecIndexByteArrayOp FloatVec 4 W64) , (VecIndexByteArrayOp FloatVec 16 W32) , (VecIndexByteArrayOp FloatVec 8 W64) , (VecReadByteArrayOp IntVec 16 W8) , (VecReadByteArrayOp IntVec 8 W16) , (VecReadByteArrayOp IntVec 4 W32) , (VecReadByteArrayOp IntVec 2 W64) , (VecReadByteArrayOp IntVec 32 W8) , (VecReadByteArrayOp IntVec 16 W16) , (VecReadByteArrayOp IntVec 8 W32) , (VecReadByteArrayOp IntVec 4 W64) , (VecReadByteArrayOp IntVec 64 W8) , (VecReadByteArrayOp IntVec 32 W16) , (VecReadByteArrayOp IntVec 16 W32) , (VecReadByteArrayOp IntVec 8 W64) , (VecReadByteArrayOp WordVec 16 W8) , (VecReadByteArrayOp WordVec 8 W16) , (VecReadByteArrayOp WordVec 4 W32) , (VecReadByteArrayOp WordVec 2 W64) , (VecReadByteArrayOp WordVec 32 W8) , (VecReadByteArrayOp WordVec 16 W16) , (VecReadByteArrayOp WordVec 8 W32) , (VecReadByteArrayOp WordVec 4 W64) , (VecReadByteArrayOp WordVec 64 W8) , (VecReadByteArrayOp WordVec 32 W16) , (VecReadByteArrayOp WordVec 16 W32) , (VecReadByteArrayOp WordVec 8 W64) , (VecReadByteArrayOp FloatVec 4 W32) , (VecReadByteArrayOp FloatVec 2 W64) , (VecReadByteArrayOp FloatVec 8 W32) , (VecReadByteArrayOp FloatVec 4 W64) , (VecReadByteArrayOp FloatVec 16 W32) , (VecReadByteArrayOp FloatVec 8 W64) , (VecWriteByteArrayOp IntVec 16 W8) , (VecWriteByteArrayOp IntVec 8 W16) , (VecWriteByteArrayOp IntVec 4 W32) , (VecWriteByteArrayOp IntVec 2 W64) , (VecWriteByteArrayOp IntVec 32 W8) , (VecWriteByteArrayOp IntVec 16 W16) , (VecWriteByteArrayOp IntVec 8 W32) , (VecWriteByteArrayOp IntVec 4 W64) , (VecWriteByteArrayOp IntVec 64 W8) , (VecWriteByteArrayOp IntVec 32 W16) , (VecWriteByteArrayOp IntVec 16 W32) , (VecWriteByteArrayOp IntVec 8 W64) , (VecWriteByteArrayOp WordVec 16 W8) , (VecWriteByteArrayOp WordVec 8 W16) , (VecWriteByteArrayOp WordVec 4 W32) , (VecWriteByteArrayOp WordVec 2 W64) , (VecWriteByteArrayOp WordVec 32 W8) , (VecWriteByteArrayOp WordVec 16 W16) , (VecWriteByteArrayOp WordVec 8 W32) , (VecWriteByteArrayOp WordVec 4 W64) , (VecWriteByteArrayOp WordVec 64 W8) , (VecWriteByteArrayOp WordVec 32 W16) , (VecWriteByteArrayOp WordVec 16 W32) , (VecWriteByteArrayOp WordVec 8 W64) , (VecWriteByteArrayOp FloatVec 4 W32) , (VecWriteByteArrayOp FloatVec 2 W64) , (VecWriteByteArrayOp FloatVec 8 W32) , (VecWriteByteArrayOp FloatVec 4 W64) , (VecWriteByteArrayOp FloatVec 16 W32) , (VecWriteByteArrayOp FloatVec 8 W64) , (VecIndexOffAddrOp IntVec 16 W8) , (VecIndexOffAddrOp IntVec 8 W16) , (VecIndexOffAddrOp IntVec 4 W32) , (VecIndexOffAddrOp IntVec 2 W64) , (VecIndexOffAddrOp IntVec 32 W8) , (VecIndexOffAddrOp IntVec 16 W16) , (VecIndexOffAddrOp IntVec 8 W32) , (VecIndexOffAddrOp IntVec 4 W64) , (VecIndexOffAddrOp IntVec 64 W8) , (VecIndexOffAddrOp IntVec 32 W16) , (VecIndexOffAddrOp IntVec 16 W32) , (VecIndexOffAddrOp IntVec 8 W64) , (VecIndexOffAddrOp WordVec 16 W8) , (VecIndexOffAddrOp WordVec 8 W16) , (VecIndexOffAddrOp WordVec 4 W32) , (VecIndexOffAddrOp WordVec 2 W64) , (VecIndexOffAddrOp WordVec 32 W8) , (VecIndexOffAddrOp WordVec 16 W16) , (VecIndexOffAddrOp WordVec 8 W32) , (VecIndexOffAddrOp WordVec 4 W64) , (VecIndexOffAddrOp WordVec 64 W8) , (VecIndexOffAddrOp WordVec 32 W16) , (VecIndexOffAddrOp WordVec 16 W32) , (VecIndexOffAddrOp WordVec 8 W64) , (VecIndexOffAddrOp FloatVec 4 W32) , (VecIndexOffAddrOp FloatVec 2 W64) , (VecIndexOffAddrOp FloatVec 8 W32) , (VecIndexOffAddrOp FloatVec 4 W64) , (VecIndexOffAddrOp FloatVec 16 W32) , (VecIndexOffAddrOp FloatVec 8 W64) , (VecReadOffAddrOp IntVec 16 W8) , (VecReadOffAddrOp IntVec 8 W16) , (VecReadOffAddrOp IntVec 4 W32) , (VecReadOffAddrOp IntVec 2 W64) , (VecReadOffAddrOp IntVec 32 W8) , (VecReadOffAddrOp IntVec 16 W16) , (VecReadOffAddrOp IntVec 8 W32) , (VecReadOffAddrOp IntVec 4 W64) , (VecReadOffAddrOp IntVec 64 W8) , (VecReadOffAddrOp IntVec 32 W16) , (VecReadOffAddrOp IntVec 16 W32) , (VecReadOffAddrOp IntVec 8 W64) , (VecReadOffAddrOp WordVec 16 W8) , (VecReadOffAddrOp WordVec 8 W16) , (VecReadOffAddrOp WordVec 4 W32) , (VecReadOffAddrOp WordVec 2 W64) , (VecReadOffAddrOp WordVec 32 W8) , (VecReadOffAddrOp WordVec 16 W16) , (VecReadOffAddrOp WordVec 8 W32) , (VecReadOffAddrOp WordVec 4 W64) , (VecReadOffAddrOp WordVec 64 W8) , (VecReadOffAddrOp WordVec 32 W16) , (VecReadOffAddrOp WordVec 16 W32) , (VecReadOffAddrOp WordVec 8 W64) , (VecReadOffAddrOp FloatVec 4 W32) , (VecReadOffAddrOp FloatVec 2 W64) , (VecReadOffAddrOp FloatVec 8 W32) , (VecReadOffAddrOp FloatVec 4 W64) , (VecReadOffAddrOp FloatVec 16 W32) , (VecReadOffAddrOp FloatVec 8 W64) , (VecWriteOffAddrOp IntVec 16 W8) , (VecWriteOffAddrOp IntVec 8 W16) , (VecWriteOffAddrOp IntVec 4 W32) , (VecWriteOffAddrOp IntVec 2 W64) , (VecWriteOffAddrOp IntVec 32 W8) , (VecWriteOffAddrOp IntVec 16 W16) , (VecWriteOffAddrOp IntVec 8 W32) , (VecWriteOffAddrOp IntVec 4 W64) , (VecWriteOffAddrOp IntVec 64 W8) , (VecWriteOffAddrOp IntVec 32 W16) , (VecWriteOffAddrOp IntVec 16 W32) , (VecWriteOffAddrOp IntVec 8 W64) , (VecWriteOffAddrOp WordVec 16 W8) , (VecWriteOffAddrOp WordVec 8 W16) , (VecWriteOffAddrOp WordVec 4 W32) , (VecWriteOffAddrOp WordVec 2 W64) , (VecWriteOffAddrOp WordVec 32 W8) , (VecWriteOffAddrOp WordVec 16 W16) , (VecWriteOffAddrOp WordVec 8 W32) , (VecWriteOffAddrOp WordVec 4 W64) , (VecWriteOffAddrOp WordVec 64 W8) , (VecWriteOffAddrOp WordVec 32 W16) , (VecWriteOffAddrOp WordVec 16 W32) , (VecWriteOffAddrOp WordVec 8 W64) , (VecWriteOffAddrOp FloatVec 4 W32) , (VecWriteOffAddrOp FloatVec 2 W64) , (VecWriteOffAddrOp FloatVec 8 W32) , (VecWriteOffAddrOp FloatVec 4 W64) , (VecWriteOffAddrOp FloatVec 16 W32) , (VecWriteOffAddrOp FloatVec 8 W64) , (VecIndexScalarByteArrayOp IntVec 16 W8) , (VecIndexScalarByteArrayOp IntVec 8 W16) , (VecIndexScalarByteArrayOp IntVec 4 W32) , (VecIndexScalarByteArrayOp IntVec 2 W64) , (VecIndexScalarByteArrayOp IntVec 32 W8) , (VecIndexScalarByteArrayOp IntVec 16 W16) , (VecIndexScalarByteArrayOp IntVec 8 W32) , (VecIndexScalarByteArrayOp IntVec 4 W64) , (VecIndexScalarByteArrayOp IntVec 64 W8) , (VecIndexScalarByteArrayOp IntVec 32 W16) , (VecIndexScalarByteArrayOp IntVec 16 W32) , (VecIndexScalarByteArrayOp IntVec 8 W64) , (VecIndexScalarByteArrayOp WordVec 16 W8) , (VecIndexScalarByteArrayOp WordVec 8 W16) , (VecIndexScalarByteArrayOp WordVec 4 W32) , (VecIndexScalarByteArrayOp WordVec 2 W64) , (VecIndexScalarByteArrayOp WordVec 32 W8) , (VecIndexScalarByteArrayOp WordVec 16 W16) , (VecIndexScalarByteArrayOp WordVec 8 W32) , (VecIndexScalarByteArrayOp WordVec 4 W64) , (VecIndexScalarByteArrayOp WordVec 64 W8) , (VecIndexScalarByteArrayOp WordVec 32 W16) , (VecIndexScalarByteArrayOp WordVec 16 W32) , (VecIndexScalarByteArrayOp WordVec 8 W64) , (VecIndexScalarByteArrayOp FloatVec 4 W32) , (VecIndexScalarByteArrayOp FloatVec 2 W64) , (VecIndexScalarByteArrayOp FloatVec 8 W32) , (VecIndexScalarByteArrayOp FloatVec 4 W64) , (VecIndexScalarByteArrayOp FloatVec 16 W32) , (VecIndexScalarByteArrayOp FloatVec 8 W64) , (VecReadScalarByteArrayOp IntVec 16 W8) , (VecReadScalarByteArrayOp IntVec 8 W16) , (VecReadScalarByteArrayOp IntVec 4 W32) , (VecReadScalarByteArrayOp IntVec 2 W64) , (VecReadScalarByteArrayOp IntVec 32 W8) , (VecReadScalarByteArrayOp IntVec 16 W16) , (VecReadScalarByteArrayOp IntVec 8 W32) , (VecReadScalarByteArrayOp IntVec 4 W64) , (VecReadScalarByteArrayOp IntVec 64 W8) , (VecReadScalarByteArrayOp IntVec 32 W16) , (VecReadScalarByteArrayOp IntVec 16 W32) , (VecReadScalarByteArrayOp IntVec 8 W64) , (VecReadScalarByteArrayOp WordVec 16 W8) , (VecReadScalarByteArrayOp WordVec 8 W16) , (VecReadScalarByteArrayOp WordVec 4 W32) , (VecReadScalarByteArrayOp WordVec 2 W64) , (VecReadScalarByteArrayOp WordVec 32 W8) , (VecReadScalarByteArrayOp WordVec 16 W16) , (VecReadScalarByteArrayOp WordVec 8 W32) , (VecReadScalarByteArrayOp WordVec 4 W64) , (VecReadScalarByteArrayOp WordVec 64 W8) , (VecReadScalarByteArrayOp WordVec 32 W16) , (VecReadScalarByteArrayOp WordVec 16 W32) , (VecReadScalarByteArrayOp WordVec 8 W64) , (VecReadScalarByteArrayOp FloatVec 4 W32) , (VecReadScalarByteArrayOp FloatVec 2 W64) , (VecReadScalarByteArrayOp FloatVec 8 W32) , (VecReadScalarByteArrayOp FloatVec 4 W64) , (VecReadScalarByteArrayOp FloatVec 16 W32) , (VecReadScalarByteArrayOp FloatVec 8 W64) , (VecWriteScalarByteArrayOp IntVec 16 W8) , (VecWriteScalarByteArrayOp IntVec 8 W16) , (VecWriteScalarByteArrayOp IntVec 4 W32) , (VecWriteScalarByteArrayOp IntVec 2 W64) , (VecWriteScalarByteArrayOp IntVec 32 W8) , (VecWriteScalarByteArrayOp IntVec 16 W16) , (VecWriteScalarByteArrayOp IntVec 8 W32) , (VecWriteScalarByteArrayOp IntVec 4 W64) , (VecWriteScalarByteArrayOp IntVec 64 W8) , (VecWriteScalarByteArrayOp IntVec 32 W16) , (VecWriteScalarByteArrayOp IntVec 16 W32) , (VecWriteScalarByteArrayOp IntVec 8 W64) , (VecWriteScalarByteArrayOp WordVec 16 W8) , (VecWriteScalarByteArrayOp WordVec 8 W16) , (VecWriteScalarByteArrayOp WordVec 4 W32) , (VecWriteScalarByteArrayOp WordVec 2 W64) , (VecWriteScalarByteArrayOp WordVec 32 W8) , (VecWriteScalarByteArrayOp WordVec 16 W16) , (VecWriteScalarByteArrayOp WordVec 8 W32) , (VecWriteScalarByteArrayOp WordVec 4 W64) , (VecWriteScalarByteArrayOp WordVec 64 W8) , (VecWriteScalarByteArrayOp WordVec 32 W16) , (VecWriteScalarByteArrayOp WordVec 16 W32) , (VecWriteScalarByteArrayOp WordVec 8 W64) , (VecWriteScalarByteArrayOp FloatVec 4 W32) , (VecWriteScalarByteArrayOp FloatVec 2 W64) , (VecWriteScalarByteArrayOp FloatVec 8 W32) , (VecWriteScalarByteArrayOp FloatVec 4 W64) , (VecWriteScalarByteArrayOp FloatVec 16 W32) , (VecWriteScalarByteArrayOp FloatVec 8 W64) , (VecIndexScalarOffAddrOp IntVec 16 W8) , (VecIndexScalarOffAddrOp IntVec 8 W16) , (VecIndexScalarOffAddrOp IntVec 4 W32) , (VecIndexScalarOffAddrOp IntVec 2 W64) , (VecIndexScalarOffAddrOp IntVec 32 W8) , (VecIndexScalarOffAddrOp IntVec 16 W16) , (VecIndexScalarOffAddrOp IntVec 8 W32) , (VecIndexScalarOffAddrOp IntVec 4 W64) , (VecIndexScalarOffAddrOp IntVec 64 W8) , (VecIndexScalarOffAddrOp IntVec 32 W16) , (VecIndexScalarOffAddrOp IntVec 16 W32) , (VecIndexScalarOffAddrOp IntVec 8 W64) , (VecIndexScalarOffAddrOp WordVec 16 W8) , (VecIndexScalarOffAddrOp WordVec 8 W16) , (VecIndexScalarOffAddrOp WordVec 4 W32) , (VecIndexScalarOffAddrOp WordVec 2 W64) , (VecIndexScalarOffAddrOp WordVec 32 W8) , (VecIndexScalarOffAddrOp WordVec 16 W16) , (VecIndexScalarOffAddrOp WordVec 8 W32) , (VecIndexScalarOffAddrOp WordVec 4 W64) , (VecIndexScalarOffAddrOp WordVec 64 W8) , (VecIndexScalarOffAddrOp WordVec 32 W16) , (VecIndexScalarOffAddrOp WordVec 16 W32) , (VecIndexScalarOffAddrOp WordVec 8 W64) , (VecIndexScalarOffAddrOp FloatVec 4 W32) , (VecIndexScalarOffAddrOp FloatVec 2 W64) , (VecIndexScalarOffAddrOp FloatVec 8 W32) , (VecIndexScalarOffAddrOp FloatVec 4 W64) , (VecIndexScalarOffAddrOp FloatVec 16 W32) , (VecIndexScalarOffAddrOp FloatVec 8 W64) , (VecReadScalarOffAddrOp IntVec 16 W8) , (VecReadScalarOffAddrOp IntVec 8 W16) , (VecReadScalarOffAddrOp IntVec 4 W32) , (VecReadScalarOffAddrOp IntVec 2 W64) , (VecReadScalarOffAddrOp IntVec 32 W8) , (VecReadScalarOffAddrOp IntVec 16 W16) , (VecReadScalarOffAddrOp IntVec 8 W32) , (VecReadScalarOffAddrOp IntVec 4 W64) , (VecReadScalarOffAddrOp IntVec 64 W8) , (VecReadScalarOffAddrOp IntVec 32 W16) , (VecReadScalarOffAddrOp IntVec 16 W32) , (VecReadScalarOffAddrOp IntVec 8 W64) , (VecReadScalarOffAddrOp WordVec 16 W8) , (VecReadScalarOffAddrOp WordVec 8 W16) , (VecReadScalarOffAddrOp WordVec 4 W32) , (VecReadScalarOffAddrOp WordVec 2 W64) , (VecReadScalarOffAddrOp WordVec 32 W8) , (VecReadScalarOffAddrOp WordVec 16 W16) , (VecReadScalarOffAddrOp WordVec 8 W32) , (VecReadScalarOffAddrOp WordVec 4 W64) , (VecReadScalarOffAddrOp WordVec 64 W8) , (VecReadScalarOffAddrOp WordVec 32 W16) , (VecReadScalarOffAddrOp WordVec 16 W32) , (VecReadScalarOffAddrOp WordVec 8 W64) , (VecReadScalarOffAddrOp FloatVec 4 W32) , (VecReadScalarOffAddrOp FloatVec 2 W64) , (VecReadScalarOffAddrOp FloatVec 8 W32) , (VecReadScalarOffAddrOp FloatVec 4 W64) , (VecReadScalarOffAddrOp FloatVec 16 W32) , (VecReadScalarOffAddrOp FloatVec 8 W64) , (VecWriteScalarOffAddrOp IntVec 16 W8) , (VecWriteScalarOffAddrOp IntVec 8 W16) , (VecWriteScalarOffAddrOp IntVec 4 W32) , (VecWriteScalarOffAddrOp IntVec 2 W64) , (VecWriteScalarOffAddrOp IntVec 32 W8) , (VecWriteScalarOffAddrOp IntVec 16 W16) , (VecWriteScalarOffAddrOp IntVec 8 W32) , (VecWriteScalarOffAddrOp IntVec 4 W64) , (VecWriteScalarOffAddrOp IntVec 64 W8) , (VecWriteScalarOffAddrOp IntVec 32 W16) , (VecWriteScalarOffAddrOp IntVec 16 W32) , (VecWriteScalarOffAddrOp IntVec 8 W64) , (VecWriteScalarOffAddrOp WordVec 16 W8) , (VecWriteScalarOffAddrOp WordVec 8 W16) , (VecWriteScalarOffAddrOp WordVec 4 W32) , (VecWriteScalarOffAddrOp WordVec 2 W64) , (VecWriteScalarOffAddrOp WordVec 32 W8) , (VecWriteScalarOffAddrOp WordVec 16 W16) , (VecWriteScalarOffAddrOp WordVec 8 W32) , (VecWriteScalarOffAddrOp WordVec 4 W64) , (VecWriteScalarOffAddrOp WordVec 64 W8) , (VecWriteScalarOffAddrOp WordVec 32 W16) , (VecWriteScalarOffAddrOp WordVec 16 W32) , (VecWriteScalarOffAddrOp WordVec 8 W64) , (VecWriteScalarOffAddrOp FloatVec 4 W32) , (VecWriteScalarOffAddrOp FloatVec 2 W64) , (VecWriteScalarOffAddrOp FloatVec 8 W32) , (VecWriteScalarOffAddrOp FloatVec 4 W64) , (VecWriteScalarOffAddrOp FloatVec 16 W32) , (VecWriteScalarOffAddrOp FloatVec 8 W64) , (VecFMAdd FloatVec 4 W32) , (VecFMAdd FloatVec 2 W64) , (VecFMAdd FloatVec 8 W32) , (VecFMAdd FloatVec 4 W64) , (VecFMAdd FloatVec 16 W32) , (VecFMAdd FloatVec 8 W64) , (VecFMSub FloatVec 4 W32) , (VecFMSub FloatVec 2 W64) , (VecFMSub FloatVec 8 W32) , (VecFMSub FloatVec 4 W64) , (VecFMSub FloatVec 16 W32) , (VecFMSub FloatVec 8 W64) , (VecFNMAdd FloatVec 4 W32) , (VecFNMAdd FloatVec 2 W64) , (VecFNMAdd FloatVec 8 W32) , (VecFNMAdd FloatVec 4 W64) , (VecFNMAdd FloatVec 16 W32) , (VecFNMAdd FloatVec 8 W64) , (VecFNMSub FloatVec 4 W32) , (VecFNMSub FloatVec 2 W64) , (VecFNMSub FloatVec 8 W32) , (VecFNMSub FloatVec 4 W64) , (VecFNMSub FloatVec 16 W32) , (VecFNMSub FloatVec 8 W64) , (VecShuffleOp IntVec 16 W8) , (VecShuffleOp IntVec 8 W16) , (VecShuffleOp IntVec 4 W32) , (VecShuffleOp IntVec 2 W64) , (VecShuffleOp IntVec 32 W8) , (VecShuffleOp IntVec 16 W16) , (VecShuffleOp IntVec 8 W32) , (VecShuffleOp IntVec 4 W64) , (VecShuffleOp IntVec 64 W8) , (VecShuffleOp IntVec 32 W16) , (VecShuffleOp IntVec 16 W32) , (VecShuffleOp IntVec 8 W64) , (VecShuffleOp WordVec 16 W8) , (VecShuffleOp WordVec 8 W16) , (VecShuffleOp WordVec 4 W32) , (VecShuffleOp WordVec 2 W64) , (VecShuffleOp WordVec 32 W8) , (VecShuffleOp WordVec 16 W16) , (VecShuffleOp WordVec 8 W32) , (VecShuffleOp WordVec 4 W64) , (VecShuffleOp WordVec 64 W8) , (VecShuffleOp WordVec 32 W16) , (VecShuffleOp WordVec 16 W32) , (VecShuffleOp WordVec 8 W64) , (VecShuffleOp FloatVec 4 W32) , (VecShuffleOp FloatVec 2 W64) , (VecShuffleOp FloatVec 8 W32) , (VecShuffleOp FloatVec 4 W64) , (VecShuffleOp FloatVec 16 W32) , (VecShuffleOp FloatVec 8 W64) , (VecMinOp IntVec 16 W8) , (VecMinOp IntVec 8 W16) , (VecMinOp IntVec 4 W32) , (VecMinOp IntVec 2 W64) , (VecMinOp IntVec 32 W8) , (VecMinOp IntVec 16 W16) , (VecMinOp IntVec 8 W32) , (VecMinOp IntVec 4 W64) , (VecMinOp IntVec 64 W8) , (VecMinOp IntVec 32 W16) , (VecMinOp IntVec 16 W32) , (VecMinOp IntVec 8 W64) , (VecMinOp WordVec 16 W8) , (VecMinOp WordVec 8 W16) , (VecMinOp WordVec 4 W32) , (VecMinOp WordVec 2 W64) , (VecMinOp WordVec 32 W8) , (VecMinOp WordVec 16 W16) , (VecMinOp WordVec 8 W32) , (VecMinOp WordVec 4 W64) , (VecMinOp WordVec 64 W8) , (VecMinOp WordVec 32 W16) , (VecMinOp WordVec 16 W32) , (VecMinOp WordVec 8 W64) , (VecMinOp FloatVec 4 W32) , (VecMinOp FloatVec 2 W64) , (VecMinOp FloatVec 8 W32) , (VecMinOp FloatVec 4 W64) , (VecMinOp FloatVec 16 W32) , (VecMinOp FloatVec 8 W64) , (VecMaxOp IntVec 16 W8) , (VecMaxOp IntVec 8 W16) , (VecMaxOp IntVec 4 W32) , (VecMaxOp IntVec 2 W64) , (VecMaxOp IntVec 32 W8) , (VecMaxOp IntVec 16 W16) , (VecMaxOp IntVec 8 W32) , (VecMaxOp IntVec 4 W64) , (VecMaxOp IntVec 64 W8) , (VecMaxOp IntVec 32 W16) , (VecMaxOp IntVec 16 W32) , (VecMaxOp IntVec 8 W64) , (VecMaxOp WordVec 16 W8) , (VecMaxOp WordVec 8 W16) , (VecMaxOp WordVec 4 W32) , (VecMaxOp WordVec 2 W64) , (VecMaxOp WordVec 32 W8) , (VecMaxOp WordVec 16 W16) , (VecMaxOp WordVec 8 W32) , (VecMaxOp WordVec 4 W64) , (VecMaxOp WordVec 64 W8) , (VecMaxOp WordVec 32 W16) , (VecMaxOp WordVec 16 W32) , (VecMaxOp WordVec 8 W64) , (VecMaxOp FloatVec 4 W32) , (VecMaxOp FloatVec 2 W64) , (VecMaxOp FloatVec 8 W32) , (VecMaxOp FloatVec 4 W64) , (VecMaxOp FloatVec 16 W32) , (VecMaxOp FloatVec 8 W64) , PrefetchByteArrayOp3 , PrefetchMutableByteArrayOp3 , PrefetchAddrOp3 , PrefetchValueOp3 , PrefetchByteArrayOp2 , PrefetchMutableByteArrayOp2 , PrefetchAddrOp2 , PrefetchValueOp2 , PrefetchByteArrayOp1 , PrefetchMutableByteArrayOp1 , PrefetchAddrOp1 , PrefetchValueOp1 , PrefetchByteArrayOp0 , PrefetchMutableByteArrayOp0 , PrefetchAddrOp0 , PrefetchValueOp0 ] ghc-lib-parser-9.12.2.20250421/ghc-lib/stage0/compiler/build/primop-out-of-line.hs-incl0000644000000000000000000001053707346545000026023 0ustar0000000000000000primOpOutOfLine DoubleDecode_2IntOp = True primOpOutOfLine DoubleDecode_Int64Op = True primOpOutOfLine FloatDecode_IntOp = True primOpOutOfLine NewArrayOp = True primOpOutOfLine UnsafeThawArrayOp = True primOpOutOfLine CopyArrayOp = True primOpOutOfLine CopyMutableArrayOp = True primOpOutOfLine CloneArrayOp = True primOpOutOfLine CloneMutableArrayOp = True primOpOutOfLine FreezeArrayOp = True primOpOutOfLine ThawArrayOp = True primOpOutOfLine CasArrayOp = True primOpOutOfLine NewSmallArrayOp = True primOpOutOfLine ShrinkSmallMutableArrayOp_Char = True primOpOutOfLine UnsafeThawSmallArrayOp = True primOpOutOfLine CopySmallArrayOp = True primOpOutOfLine CopySmallMutableArrayOp = True primOpOutOfLine CloneSmallArrayOp = True primOpOutOfLine CloneSmallMutableArrayOp = True primOpOutOfLine FreezeSmallArrayOp = True primOpOutOfLine ThawSmallArrayOp = True primOpOutOfLine CasSmallArrayOp = True primOpOutOfLine NewByteArrayOp_Char = True primOpOutOfLine NewPinnedByteArrayOp_Char = True primOpOutOfLine NewAlignedPinnedByteArrayOp_Char = True primOpOutOfLine MutableByteArrayIsPinnedOp = True primOpOutOfLine ByteArrayIsPinnedOp = True primOpOutOfLine ByteArrayIsWeaklyPinnedOp = True primOpOutOfLine MutableByteArrayIsWeaklyPinnedOp = True primOpOutOfLine ShrinkMutableByteArrayOp_Char = True primOpOutOfLine ResizeMutableByteArrayOp_Char = True primOpOutOfLine NewMutVarOp = True primOpOutOfLine AtomicModifyMutVar2Op = True primOpOutOfLine AtomicModifyMutVar_Op = True primOpOutOfLine CasMutVarOp = True primOpOutOfLine CatchOp = True primOpOutOfLine RaiseOp = True primOpOutOfLine RaiseUnderflowOp = True primOpOutOfLine RaiseOverflowOp = True primOpOutOfLine RaiseDivZeroOp = True primOpOutOfLine RaiseIOOp = True primOpOutOfLine MaskAsyncExceptionsOp = True primOpOutOfLine MaskUninterruptibleOp = True primOpOutOfLine UnmaskAsyncExceptionsOp = True primOpOutOfLine MaskStatus = True primOpOutOfLine NewPromptTagOp = True primOpOutOfLine PromptOp = True primOpOutOfLine Control0Op = True primOpOutOfLine AtomicallyOp = True primOpOutOfLine RetryOp = True primOpOutOfLine CatchRetryOp = True primOpOutOfLine CatchSTMOp = True primOpOutOfLine NewTVarOp = True primOpOutOfLine ReadTVarOp = True primOpOutOfLine ReadTVarIOOp = True primOpOutOfLine WriteTVarOp = True primOpOutOfLine NewMVarOp = True primOpOutOfLine TakeMVarOp = True primOpOutOfLine TryTakeMVarOp = True primOpOutOfLine PutMVarOp = True primOpOutOfLine TryPutMVarOp = True primOpOutOfLine ReadMVarOp = True primOpOutOfLine TryReadMVarOp = True primOpOutOfLine IsEmptyMVarOp = True primOpOutOfLine NewIOPortOp = True primOpOutOfLine ReadIOPortOp = True primOpOutOfLine WriteIOPortOp = True primOpOutOfLine DelayOp = True primOpOutOfLine WaitReadOp = True primOpOutOfLine WaitWriteOp = True primOpOutOfLine ForkOp = True primOpOutOfLine ForkOnOp = True primOpOutOfLine KillThreadOp = True primOpOutOfLine YieldOp = True primOpOutOfLine LabelThreadOp = True primOpOutOfLine IsCurrentThreadBoundOp = True primOpOutOfLine NoDuplicateOp = True primOpOutOfLine GetThreadLabelOp = True primOpOutOfLine ThreadStatusOp = True primOpOutOfLine ListThreadsOp = True primOpOutOfLine MkWeakOp = True primOpOutOfLine MkWeakNoFinalizerOp = True primOpOutOfLine AddCFinalizerToWeakOp = True primOpOutOfLine DeRefWeakOp = True primOpOutOfLine FinalizeWeakOp = True primOpOutOfLine MakeStablePtrOp = True primOpOutOfLine DeRefStablePtrOp = True primOpOutOfLine MakeStableNameOp = True primOpOutOfLine CompactNewOp = True primOpOutOfLine CompactResizeOp = True primOpOutOfLine CompactContainsOp = True primOpOutOfLine CompactContainsAnyOp = True primOpOutOfLine CompactGetFirstBlockOp = True primOpOutOfLine CompactGetNextBlockOp = True primOpOutOfLine CompactAllocateBlockOp = True primOpOutOfLine CompactFixupPointersOp = True primOpOutOfLine CompactAdd = True primOpOutOfLine CompactAddWithSharing = True primOpOutOfLine CompactSize = True primOpOutOfLine GetSparkOp = True primOpOutOfLine NumSparks = True primOpOutOfLine KeepAliveOp = True primOpOutOfLine MkApUpd0_Op = True primOpOutOfLine NewBCOOp = True primOpOutOfLine UnpackClosureOp = True primOpOutOfLine ClosureSizeOp = True primOpOutOfLine GetApStackValOp = True primOpOutOfLine ClearCCSOp = True primOpOutOfLine WhereFromOp = True primOpOutOfLine TraceEventOp = True primOpOutOfLine TraceEventBinaryOp = True primOpOutOfLine TraceMarkerOp = True primOpOutOfLine SetThreadAllocationCounter = True primOpOutOfLine _thisOp = False ghc-lib-parser-9.12.2.20250421/ghc-lib/stage0/compiler/build/primop-primop-info.hs-incl0000644000000000000000000074407707346545000026141 0ustar0000000000000000primOpInfo CharGtOp = mkCompare (fsLit "gtChar#") charPrimTy primOpInfo CharGeOp = mkCompare (fsLit "geChar#") charPrimTy primOpInfo CharEqOp = mkCompare (fsLit "eqChar#") charPrimTy primOpInfo CharNeOp = mkCompare (fsLit "neChar#") charPrimTy primOpInfo CharLtOp = mkCompare (fsLit "ltChar#") charPrimTy primOpInfo CharLeOp = mkCompare (fsLit "leChar#") charPrimTy primOpInfo OrdOp = mkGenPrimOp (fsLit "ord#") [] [charPrimTy] (intPrimTy) primOpInfo Int8ToIntOp = mkGenPrimOp (fsLit "int8ToInt#") [] [int8PrimTy] (intPrimTy) primOpInfo IntToInt8Op = mkGenPrimOp (fsLit "intToInt8#") [] [intPrimTy] (int8PrimTy) primOpInfo Int8NegOp = mkGenPrimOp (fsLit "negateInt8#") [] [int8PrimTy] (int8PrimTy) primOpInfo Int8AddOp = mkGenPrimOp (fsLit "plusInt8#") [] [int8PrimTy, int8PrimTy] (int8PrimTy) primOpInfo Int8SubOp = mkGenPrimOp (fsLit "subInt8#") [] [int8PrimTy, int8PrimTy] (int8PrimTy) primOpInfo Int8MulOp = mkGenPrimOp (fsLit "timesInt8#") [] [int8PrimTy, int8PrimTy] (int8PrimTy) primOpInfo Int8QuotOp = mkGenPrimOp (fsLit "quotInt8#") [] [int8PrimTy, int8PrimTy] (int8PrimTy) primOpInfo Int8RemOp = mkGenPrimOp (fsLit "remInt8#") [] [int8PrimTy, int8PrimTy] (int8PrimTy) primOpInfo Int8QuotRemOp = mkGenPrimOp (fsLit "quotRemInt8#") [] [int8PrimTy, int8PrimTy] ((mkTupleTy Unboxed [int8PrimTy, int8PrimTy])) primOpInfo Int8SllOp = mkGenPrimOp (fsLit "uncheckedShiftLInt8#") [] [int8PrimTy, intPrimTy] (int8PrimTy) primOpInfo Int8SraOp = mkGenPrimOp (fsLit "uncheckedShiftRAInt8#") [] [int8PrimTy, intPrimTy] (int8PrimTy) primOpInfo Int8SrlOp = mkGenPrimOp (fsLit "uncheckedShiftRLInt8#") [] [int8PrimTy, intPrimTy] (int8PrimTy) primOpInfo Int8ToWord8Op = mkGenPrimOp (fsLit "int8ToWord8#") [] [int8PrimTy] (word8PrimTy) primOpInfo Int8EqOp = mkCompare (fsLit "eqInt8#") int8PrimTy primOpInfo Int8GeOp = mkCompare (fsLit "geInt8#") int8PrimTy primOpInfo Int8GtOp = mkCompare (fsLit "gtInt8#") int8PrimTy primOpInfo Int8LeOp = mkCompare (fsLit "leInt8#") int8PrimTy primOpInfo Int8LtOp = mkCompare (fsLit "ltInt8#") int8PrimTy primOpInfo Int8NeOp = mkCompare (fsLit "neInt8#") int8PrimTy primOpInfo Word8ToWordOp = mkGenPrimOp (fsLit "word8ToWord#") [] [word8PrimTy] (wordPrimTy) primOpInfo WordToWord8Op = mkGenPrimOp (fsLit "wordToWord8#") [] [wordPrimTy] (word8PrimTy) primOpInfo Word8AddOp = mkGenPrimOp (fsLit "plusWord8#") [] [word8PrimTy, word8PrimTy] (word8PrimTy) primOpInfo Word8SubOp = mkGenPrimOp (fsLit "subWord8#") [] [word8PrimTy, word8PrimTy] (word8PrimTy) primOpInfo Word8MulOp = mkGenPrimOp (fsLit "timesWord8#") [] [word8PrimTy, word8PrimTy] (word8PrimTy) primOpInfo Word8QuotOp = mkGenPrimOp (fsLit "quotWord8#") [] [word8PrimTy, word8PrimTy] (word8PrimTy) primOpInfo Word8RemOp = mkGenPrimOp (fsLit "remWord8#") [] [word8PrimTy, word8PrimTy] (word8PrimTy) primOpInfo Word8QuotRemOp = mkGenPrimOp (fsLit "quotRemWord8#") [] [word8PrimTy, word8PrimTy] ((mkTupleTy Unboxed [word8PrimTy, word8PrimTy])) primOpInfo Word8AndOp = mkGenPrimOp (fsLit "andWord8#") [] [word8PrimTy, word8PrimTy] (word8PrimTy) primOpInfo Word8OrOp = mkGenPrimOp (fsLit "orWord8#") [] [word8PrimTy, word8PrimTy] (word8PrimTy) primOpInfo Word8XorOp = mkGenPrimOp (fsLit "xorWord8#") [] [word8PrimTy, word8PrimTy] (word8PrimTy) primOpInfo Word8NotOp = mkGenPrimOp (fsLit "notWord8#") [] [word8PrimTy] (word8PrimTy) primOpInfo Word8SllOp = mkGenPrimOp (fsLit "uncheckedShiftLWord8#") [] [word8PrimTy, intPrimTy] (word8PrimTy) primOpInfo Word8SrlOp = mkGenPrimOp (fsLit "uncheckedShiftRLWord8#") [] [word8PrimTy, intPrimTy] (word8PrimTy) primOpInfo Word8ToInt8Op = mkGenPrimOp (fsLit "word8ToInt8#") [] [word8PrimTy] (int8PrimTy) primOpInfo Word8EqOp = mkCompare (fsLit "eqWord8#") word8PrimTy primOpInfo Word8GeOp = mkCompare (fsLit "geWord8#") word8PrimTy primOpInfo Word8GtOp = mkCompare (fsLit "gtWord8#") word8PrimTy primOpInfo Word8LeOp = mkCompare (fsLit "leWord8#") word8PrimTy primOpInfo Word8LtOp = mkCompare (fsLit "ltWord8#") word8PrimTy primOpInfo Word8NeOp = mkCompare (fsLit "neWord8#") word8PrimTy primOpInfo Int16ToIntOp = mkGenPrimOp (fsLit "int16ToInt#") [] [int16PrimTy] (intPrimTy) primOpInfo IntToInt16Op = mkGenPrimOp (fsLit "intToInt16#") [] [intPrimTy] (int16PrimTy) primOpInfo Int16NegOp = mkGenPrimOp (fsLit "negateInt16#") [] [int16PrimTy] (int16PrimTy) primOpInfo Int16AddOp = mkGenPrimOp (fsLit "plusInt16#") [] [int16PrimTy, int16PrimTy] (int16PrimTy) primOpInfo Int16SubOp = mkGenPrimOp (fsLit "subInt16#") [] [int16PrimTy, int16PrimTy] (int16PrimTy) primOpInfo Int16MulOp = mkGenPrimOp (fsLit "timesInt16#") [] [int16PrimTy, int16PrimTy] (int16PrimTy) primOpInfo Int16QuotOp = mkGenPrimOp (fsLit "quotInt16#") [] [int16PrimTy, int16PrimTy] (int16PrimTy) primOpInfo Int16RemOp = mkGenPrimOp (fsLit "remInt16#") [] [int16PrimTy, int16PrimTy] (int16PrimTy) primOpInfo Int16QuotRemOp = mkGenPrimOp (fsLit "quotRemInt16#") [] [int16PrimTy, int16PrimTy] ((mkTupleTy Unboxed [int16PrimTy, int16PrimTy])) primOpInfo Int16SllOp = mkGenPrimOp (fsLit "uncheckedShiftLInt16#") [] [int16PrimTy, intPrimTy] (int16PrimTy) primOpInfo Int16SraOp = mkGenPrimOp (fsLit "uncheckedShiftRAInt16#") [] [int16PrimTy, intPrimTy] (int16PrimTy) primOpInfo Int16SrlOp = mkGenPrimOp (fsLit "uncheckedShiftRLInt16#") [] [int16PrimTy, intPrimTy] (int16PrimTy) primOpInfo Int16ToWord16Op = mkGenPrimOp (fsLit "int16ToWord16#") [] [int16PrimTy] (word16PrimTy) primOpInfo Int16EqOp = mkCompare (fsLit "eqInt16#") int16PrimTy primOpInfo Int16GeOp = mkCompare (fsLit "geInt16#") int16PrimTy primOpInfo Int16GtOp = mkCompare (fsLit "gtInt16#") int16PrimTy primOpInfo Int16LeOp = mkCompare (fsLit "leInt16#") int16PrimTy primOpInfo Int16LtOp = mkCompare (fsLit "ltInt16#") int16PrimTy primOpInfo Int16NeOp = mkCompare (fsLit "neInt16#") int16PrimTy primOpInfo Word16ToWordOp = mkGenPrimOp (fsLit "word16ToWord#") [] [word16PrimTy] (wordPrimTy) primOpInfo WordToWord16Op = mkGenPrimOp (fsLit "wordToWord16#") [] [wordPrimTy] (word16PrimTy) primOpInfo Word16AddOp = mkGenPrimOp (fsLit "plusWord16#") [] [word16PrimTy, word16PrimTy] (word16PrimTy) primOpInfo Word16SubOp = mkGenPrimOp (fsLit "subWord16#") [] [word16PrimTy, word16PrimTy] (word16PrimTy) primOpInfo Word16MulOp = mkGenPrimOp (fsLit "timesWord16#") [] [word16PrimTy, word16PrimTy] (word16PrimTy) primOpInfo Word16QuotOp = mkGenPrimOp (fsLit "quotWord16#") [] [word16PrimTy, word16PrimTy] (word16PrimTy) primOpInfo Word16RemOp = mkGenPrimOp (fsLit "remWord16#") [] [word16PrimTy, word16PrimTy] (word16PrimTy) primOpInfo Word16QuotRemOp = mkGenPrimOp (fsLit "quotRemWord16#") [] [word16PrimTy, word16PrimTy] ((mkTupleTy Unboxed [word16PrimTy, word16PrimTy])) primOpInfo Word16AndOp = mkGenPrimOp (fsLit "andWord16#") [] [word16PrimTy, word16PrimTy] (word16PrimTy) primOpInfo Word16OrOp = mkGenPrimOp (fsLit "orWord16#") [] [word16PrimTy, word16PrimTy] (word16PrimTy) primOpInfo Word16XorOp = mkGenPrimOp (fsLit "xorWord16#") [] [word16PrimTy, word16PrimTy] (word16PrimTy) primOpInfo Word16NotOp = mkGenPrimOp (fsLit "notWord16#") [] [word16PrimTy] (word16PrimTy) primOpInfo Word16SllOp = mkGenPrimOp (fsLit "uncheckedShiftLWord16#") [] [word16PrimTy, intPrimTy] (word16PrimTy) primOpInfo Word16SrlOp = mkGenPrimOp (fsLit "uncheckedShiftRLWord16#") [] [word16PrimTy, intPrimTy] (word16PrimTy) primOpInfo Word16ToInt16Op = mkGenPrimOp (fsLit "word16ToInt16#") [] [word16PrimTy] (int16PrimTy) primOpInfo Word16EqOp = mkCompare (fsLit "eqWord16#") word16PrimTy primOpInfo Word16GeOp = mkCompare (fsLit "geWord16#") word16PrimTy primOpInfo Word16GtOp = mkCompare (fsLit "gtWord16#") word16PrimTy primOpInfo Word16LeOp = mkCompare (fsLit "leWord16#") word16PrimTy primOpInfo Word16LtOp = mkCompare (fsLit "ltWord16#") word16PrimTy primOpInfo Word16NeOp = mkCompare (fsLit "neWord16#") word16PrimTy primOpInfo Int32ToIntOp = mkGenPrimOp (fsLit "int32ToInt#") [] [int32PrimTy] (intPrimTy) primOpInfo IntToInt32Op = mkGenPrimOp (fsLit "intToInt32#") [] [intPrimTy] (int32PrimTy) primOpInfo Int32NegOp = mkGenPrimOp (fsLit "negateInt32#") [] [int32PrimTy] (int32PrimTy) primOpInfo Int32AddOp = mkGenPrimOp (fsLit "plusInt32#") [] [int32PrimTy, int32PrimTy] (int32PrimTy) primOpInfo Int32SubOp = mkGenPrimOp (fsLit "subInt32#") [] [int32PrimTy, int32PrimTy] (int32PrimTy) primOpInfo Int32MulOp = mkGenPrimOp (fsLit "timesInt32#") [] [int32PrimTy, int32PrimTy] (int32PrimTy) primOpInfo Int32QuotOp = mkGenPrimOp (fsLit "quotInt32#") [] [int32PrimTy, int32PrimTy] (int32PrimTy) primOpInfo Int32RemOp = mkGenPrimOp (fsLit "remInt32#") [] [int32PrimTy, int32PrimTy] (int32PrimTy) primOpInfo Int32QuotRemOp = mkGenPrimOp (fsLit "quotRemInt32#") [] [int32PrimTy, int32PrimTy] ((mkTupleTy Unboxed [int32PrimTy, int32PrimTy])) primOpInfo Int32SllOp = mkGenPrimOp (fsLit "uncheckedShiftLInt32#") [] [int32PrimTy, intPrimTy] (int32PrimTy) primOpInfo Int32SraOp = mkGenPrimOp (fsLit "uncheckedShiftRAInt32#") [] [int32PrimTy, intPrimTy] (int32PrimTy) primOpInfo Int32SrlOp = mkGenPrimOp (fsLit "uncheckedShiftRLInt32#") [] [int32PrimTy, intPrimTy] (int32PrimTy) primOpInfo Int32ToWord32Op = mkGenPrimOp (fsLit "int32ToWord32#") [] [int32PrimTy] (word32PrimTy) primOpInfo Int32EqOp = mkCompare (fsLit "eqInt32#") int32PrimTy primOpInfo Int32GeOp = mkCompare (fsLit "geInt32#") int32PrimTy primOpInfo Int32GtOp = mkCompare (fsLit "gtInt32#") int32PrimTy primOpInfo Int32LeOp = mkCompare (fsLit "leInt32#") int32PrimTy primOpInfo Int32LtOp = mkCompare (fsLit "ltInt32#") int32PrimTy primOpInfo Int32NeOp = mkCompare (fsLit "neInt32#") int32PrimTy primOpInfo Word32ToWordOp = mkGenPrimOp (fsLit "word32ToWord#") [] [word32PrimTy] (wordPrimTy) primOpInfo WordToWord32Op = mkGenPrimOp (fsLit "wordToWord32#") [] [wordPrimTy] (word32PrimTy) primOpInfo Word32AddOp = mkGenPrimOp (fsLit "plusWord32#") [] [word32PrimTy, word32PrimTy] (word32PrimTy) primOpInfo Word32SubOp = mkGenPrimOp (fsLit "subWord32#") [] [word32PrimTy, word32PrimTy] (word32PrimTy) primOpInfo Word32MulOp = mkGenPrimOp (fsLit "timesWord32#") [] [word32PrimTy, word32PrimTy] (word32PrimTy) primOpInfo Word32QuotOp = mkGenPrimOp (fsLit "quotWord32#") [] [word32PrimTy, word32PrimTy] (word32PrimTy) primOpInfo Word32RemOp = mkGenPrimOp (fsLit "remWord32#") [] [word32PrimTy, word32PrimTy] (word32PrimTy) primOpInfo Word32QuotRemOp = mkGenPrimOp (fsLit "quotRemWord32#") [] [word32PrimTy, word32PrimTy] ((mkTupleTy Unboxed [word32PrimTy, word32PrimTy])) primOpInfo Word32AndOp = mkGenPrimOp (fsLit "andWord32#") [] [word32PrimTy, word32PrimTy] (word32PrimTy) primOpInfo Word32OrOp = mkGenPrimOp (fsLit "orWord32#") [] [word32PrimTy, word32PrimTy] (word32PrimTy) primOpInfo Word32XorOp = mkGenPrimOp (fsLit "xorWord32#") [] [word32PrimTy, word32PrimTy] (word32PrimTy) primOpInfo Word32NotOp = mkGenPrimOp (fsLit "notWord32#") [] [word32PrimTy] (word32PrimTy) primOpInfo Word32SllOp = mkGenPrimOp (fsLit "uncheckedShiftLWord32#") [] [word32PrimTy, intPrimTy] (word32PrimTy) primOpInfo Word32SrlOp = mkGenPrimOp (fsLit "uncheckedShiftRLWord32#") [] [word32PrimTy, intPrimTy] (word32PrimTy) primOpInfo Word32ToInt32Op = mkGenPrimOp (fsLit "word32ToInt32#") [] [word32PrimTy] (int32PrimTy) primOpInfo Word32EqOp = mkCompare (fsLit "eqWord32#") word32PrimTy primOpInfo Word32GeOp = mkCompare (fsLit "geWord32#") word32PrimTy primOpInfo Word32GtOp = mkCompare (fsLit "gtWord32#") word32PrimTy primOpInfo Word32LeOp = mkCompare (fsLit "leWord32#") word32PrimTy primOpInfo Word32LtOp = mkCompare (fsLit "ltWord32#") word32PrimTy primOpInfo Word32NeOp = mkCompare (fsLit "neWord32#") word32PrimTy primOpInfo Int64ToIntOp = mkGenPrimOp (fsLit "int64ToInt#") [] [int64PrimTy] (intPrimTy) primOpInfo IntToInt64Op = mkGenPrimOp (fsLit "intToInt64#") [] [intPrimTy] (int64PrimTy) primOpInfo Int64NegOp = mkGenPrimOp (fsLit "negateInt64#") [] [int64PrimTy] (int64PrimTy) primOpInfo Int64AddOp = mkGenPrimOp (fsLit "plusInt64#") [] [int64PrimTy, int64PrimTy] (int64PrimTy) primOpInfo Int64SubOp = mkGenPrimOp (fsLit "subInt64#") [] [int64PrimTy, int64PrimTy] (int64PrimTy) primOpInfo Int64MulOp = mkGenPrimOp (fsLit "timesInt64#") [] [int64PrimTy, int64PrimTy] (int64PrimTy) primOpInfo Int64QuotOp = mkGenPrimOp (fsLit "quotInt64#") [] [int64PrimTy, int64PrimTy] (int64PrimTy) primOpInfo Int64RemOp = mkGenPrimOp (fsLit "remInt64#") [] [int64PrimTy, int64PrimTy] (int64PrimTy) primOpInfo Int64SllOp = mkGenPrimOp (fsLit "uncheckedIShiftL64#") [] [int64PrimTy, intPrimTy] (int64PrimTy) primOpInfo Int64SraOp = mkGenPrimOp (fsLit "uncheckedIShiftRA64#") [] [int64PrimTy, intPrimTy] (int64PrimTy) primOpInfo Int64SrlOp = mkGenPrimOp (fsLit "uncheckedIShiftRL64#") [] [int64PrimTy, intPrimTy] (int64PrimTy) primOpInfo Int64ToWord64Op = mkGenPrimOp (fsLit "int64ToWord64#") [] [int64PrimTy] (word64PrimTy) primOpInfo Int64EqOp = mkCompare (fsLit "eqInt64#") int64PrimTy primOpInfo Int64GeOp = mkCompare (fsLit "geInt64#") int64PrimTy primOpInfo Int64GtOp = mkCompare (fsLit "gtInt64#") int64PrimTy primOpInfo Int64LeOp = mkCompare (fsLit "leInt64#") int64PrimTy primOpInfo Int64LtOp = mkCompare (fsLit "ltInt64#") int64PrimTy primOpInfo Int64NeOp = mkCompare (fsLit "neInt64#") int64PrimTy primOpInfo Word64ToWordOp = mkGenPrimOp (fsLit "word64ToWord#") [] [word64PrimTy] (wordPrimTy) primOpInfo WordToWord64Op = mkGenPrimOp (fsLit "wordToWord64#") [] [wordPrimTy] (word64PrimTy) primOpInfo Word64AddOp = mkGenPrimOp (fsLit "plusWord64#") [] [word64PrimTy, word64PrimTy] (word64PrimTy) primOpInfo Word64SubOp = mkGenPrimOp (fsLit "subWord64#") [] [word64PrimTy, word64PrimTy] (word64PrimTy) primOpInfo Word64MulOp = mkGenPrimOp (fsLit "timesWord64#") [] [word64PrimTy, word64PrimTy] (word64PrimTy) primOpInfo Word64QuotOp = mkGenPrimOp (fsLit "quotWord64#") [] [word64PrimTy, word64PrimTy] (word64PrimTy) primOpInfo Word64RemOp = mkGenPrimOp (fsLit "remWord64#") [] [word64PrimTy, word64PrimTy] (word64PrimTy) primOpInfo Word64AndOp = mkGenPrimOp (fsLit "and64#") [] [word64PrimTy, word64PrimTy] (word64PrimTy) primOpInfo Word64OrOp = mkGenPrimOp (fsLit "or64#") [] [word64PrimTy, word64PrimTy] (word64PrimTy) primOpInfo Word64XorOp = mkGenPrimOp (fsLit "xor64#") [] [word64PrimTy, word64PrimTy] (word64PrimTy) primOpInfo Word64NotOp = mkGenPrimOp (fsLit "not64#") [] [word64PrimTy] (word64PrimTy) primOpInfo Word64SllOp = mkGenPrimOp (fsLit "uncheckedShiftL64#") [] [word64PrimTy, intPrimTy] (word64PrimTy) primOpInfo Word64SrlOp = mkGenPrimOp (fsLit "uncheckedShiftRL64#") [] [word64PrimTy, intPrimTy] (word64PrimTy) primOpInfo Word64ToInt64Op = mkGenPrimOp (fsLit "word64ToInt64#") [] [word64PrimTy] (int64PrimTy) primOpInfo Word64EqOp = mkCompare (fsLit "eqWord64#") word64PrimTy primOpInfo Word64GeOp = mkCompare (fsLit "geWord64#") word64PrimTy primOpInfo Word64GtOp = mkCompare (fsLit "gtWord64#") word64PrimTy primOpInfo Word64LeOp = mkCompare (fsLit "leWord64#") word64PrimTy primOpInfo Word64LtOp = mkCompare (fsLit "ltWord64#") word64PrimTy primOpInfo Word64NeOp = mkCompare (fsLit "neWord64#") word64PrimTy primOpInfo IntAddOp = mkGenPrimOp (fsLit "+#") [] [intPrimTy, intPrimTy] (intPrimTy) primOpInfo IntSubOp = mkGenPrimOp (fsLit "-#") [] [intPrimTy, intPrimTy] (intPrimTy) primOpInfo IntMulOp = mkGenPrimOp (fsLit "*#") [] [intPrimTy, intPrimTy] (intPrimTy) primOpInfo IntMul2Op = mkGenPrimOp (fsLit "timesInt2#") [] [intPrimTy, intPrimTy] ((mkTupleTy Unboxed [intPrimTy, intPrimTy, intPrimTy])) primOpInfo IntMulMayOfloOp = mkGenPrimOp (fsLit "mulIntMayOflo#") [] [intPrimTy, intPrimTy] (intPrimTy) primOpInfo IntQuotOp = mkGenPrimOp (fsLit "quotInt#") [] [intPrimTy, intPrimTy] (intPrimTy) primOpInfo IntRemOp = mkGenPrimOp (fsLit "remInt#") [] [intPrimTy, intPrimTy] (intPrimTy) primOpInfo IntQuotRemOp = mkGenPrimOp (fsLit "quotRemInt#") [] [intPrimTy, intPrimTy] ((mkTupleTy Unboxed [intPrimTy, intPrimTy])) primOpInfo IntAndOp = mkGenPrimOp (fsLit "andI#") [] [intPrimTy, intPrimTy] (intPrimTy) primOpInfo IntOrOp = mkGenPrimOp (fsLit "orI#") [] [intPrimTy, intPrimTy] (intPrimTy) primOpInfo IntXorOp = mkGenPrimOp (fsLit "xorI#") [] [intPrimTy, intPrimTy] (intPrimTy) primOpInfo IntNotOp = mkGenPrimOp (fsLit "notI#") [] [intPrimTy] (intPrimTy) primOpInfo IntNegOp = mkGenPrimOp (fsLit "negateInt#") [] [intPrimTy] (intPrimTy) primOpInfo IntAddCOp = mkGenPrimOp (fsLit "addIntC#") [] [intPrimTy, intPrimTy] ((mkTupleTy Unboxed [intPrimTy, intPrimTy])) primOpInfo IntSubCOp = mkGenPrimOp (fsLit "subIntC#") [] [intPrimTy, intPrimTy] ((mkTupleTy Unboxed [intPrimTy, intPrimTy])) primOpInfo IntGtOp = mkCompare (fsLit ">#") intPrimTy primOpInfo IntGeOp = mkCompare (fsLit ">=#") intPrimTy primOpInfo IntEqOp = mkCompare (fsLit "==#") intPrimTy primOpInfo IntNeOp = mkCompare (fsLit "/=#") intPrimTy primOpInfo IntLtOp = mkCompare (fsLit "<#") intPrimTy primOpInfo IntLeOp = mkCompare (fsLit "<=#") intPrimTy primOpInfo ChrOp = mkGenPrimOp (fsLit "chr#") [] [intPrimTy] (charPrimTy) primOpInfo IntToWordOp = mkGenPrimOp (fsLit "int2Word#") [] [intPrimTy] (wordPrimTy) primOpInfo IntToFloatOp = mkGenPrimOp (fsLit "int2Float#") [] [intPrimTy] (floatPrimTy) primOpInfo IntToDoubleOp = mkGenPrimOp (fsLit "int2Double#") [] [intPrimTy] (doublePrimTy) primOpInfo WordToFloatOp = mkGenPrimOp (fsLit "word2Float#") [] [wordPrimTy] (floatPrimTy) primOpInfo WordToDoubleOp = mkGenPrimOp (fsLit "word2Double#") [] [wordPrimTy] (doublePrimTy) primOpInfo IntSllOp = mkGenPrimOp (fsLit "uncheckedIShiftL#") [] [intPrimTy, intPrimTy] (intPrimTy) primOpInfo IntSraOp = mkGenPrimOp (fsLit "uncheckedIShiftRA#") [] [intPrimTy, intPrimTy] (intPrimTy) primOpInfo IntSrlOp = mkGenPrimOp (fsLit "uncheckedIShiftRL#") [] [intPrimTy, intPrimTy] (intPrimTy) primOpInfo WordAddOp = mkGenPrimOp (fsLit "plusWord#") [] [wordPrimTy, wordPrimTy] (wordPrimTy) primOpInfo WordAddCOp = mkGenPrimOp (fsLit "addWordC#") [] [wordPrimTy, wordPrimTy] ((mkTupleTy Unboxed [wordPrimTy, intPrimTy])) primOpInfo WordSubCOp = mkGenPrimOp (fsLit "subWordC#") [] [wordPrimTy, wordPrimTy] ((mkTupleTy Unboxed [wordPrimTy, intPrimTy])) primOpInfo WordAdd2Op = mkGenPrimOp (fsLit "plusWord2#") [] [wordPrimTy, wordPrimTy] ((mkTupleTy Unboxed [wordPrimTy, wordPrimTy])) primOpInfo WordSubOp = mkGenPrimOp (fsLit "minusWord#") [] [wordPrimTy, wordPrimTy] (wordPrimTy) primOpInfo WordMulOp = mkGenPrimOp (fsLit "timesWord#") [] [wordPrimTy, wordPrimTy] (wordPrimTy) primOpInfo WordMul2Op = mkGenPrimOp (fsLit "timesWord2#") [] [wordPrimTy, wordPrimTy] ((mkTupleTy Unboxed [wordPrimTy, wordPrimTy])) primOpInfo WordQuotOp = mkGenPrimOp (fsLit "quotWord#") [] [wordPrimTy, wordPrimTy] (wordPrimTy) primOpInfo WordRemOp = mkGenPrimOp (fsLit "remWord#") [] [wordPrimTy, wordPrimTy] (wordPrimTy) primOpInfo WordQuotRemOp = mkGenPrimOp (fsLit "quotRemWord#") [] [wordPrimTy, wordPrimTy] ((mkTupleTy Unboxed [wordPrimTy, wordPrimTy])) primOpInfo WordQuotRem2Op = mkGenPrimOp (fsLit "quotRemWord2#") [] [wordPrimTy, wordPrimTy, wordPrimTy] ((mkTupleTy Unboxed [wordPrimTy, wordPrimTy])) primOpInfo WordAndOp = mkGenPrimOp (fsLit "and#") [] [wordPrimTy, wordPrimTy] (wordPrimTy) primOpInfo WordOrOp = mkGenPrimOp (fsLit "or#") [] [wordPrimTy, wordPrimTy] (wordPrimTy) primOpInfo WordXorOp = mkGenPrimOp (fsLit "xor#") [] [wordPrimTy, wordPrimTy] (wordPrimTy) primOpInfo WordNotOp = mkGenPrimOp (fsLit "not#") [] [wordPrimTy] (wordPrimTy) primOpInfo WordSllOp = mkGenPrimOp (fsLit "uncheckedShiftL#") [] [wordPrimTy, intPrimTy] (wordPrimTy) primOpInfo WordSrlOp = mkGenPrimOp (fsLit "uncheckedShiftRL#") [] [wordPrimTy, intPrimTy] (wordPrimTy) primOpInfo WordToIntOp = mkGenPrimOp (fsLit "word2Int#") [] [wordPrimTy] (intPrimTy) primOpInfo WordGtOp = mkCompare (fsLit "gtWord#") wordPrimTy primOpInfo WordGeOp = mkCompare (fsLit "geWord#") wordPrimTy primOpInfo WordEqOp = mkCompare (fsLit "eqWord#") wordPrimTy primOpInfo WordNeOp = mkCompare (fsLit "neWord#") wordPrimTy primOpInfo WordLtOp = mkCompare (fsLit "ltWord#") wordPrimTy primOpInfo WordLeOp = mkCompare (fsLit "leWord#") wordPrimTy primOpInfo PopCnt8Op = mkGenPrimOp (fsLit "popCnt8#") [] [wordPrimTy] (wordPrimTy) primOpInfo PopCnt16Op = mkGenPrimOp (fsLit "popCnt16#") [] [wordPrimTy] (wordPrimTy) primOpInfo PopCnt32Op = mkGenPrimOp (fsLit "popCnt32#") [] [wordPrimTy] (wordPrimTy) primOpInfo PopCnt64Op = mkGenPrimOp (fsLit "popCnt64#") [] [word64PrimTy] (wordPrimTy) primOpInfo PopCntOp = mkGenPrimOp (fsLit "popCnt#") [] [wordPrimTy] (wordPrimTy) primOpInfo Pdep8Op = mkGenPrimOp (fsLit "pdep8#") [] [wordPrimTy, wordPrimTy] (wordPrimTy) primOpInfo Pdep16Op = mkGenPrimOp (fsLit "pdep16#") [] [wordPrimTy, wordPrimTy] (wordPrimTy) primOpInfo Pdep32Op = mkGenPrimOp (fsLit "pdep32#") [] [wordPrimTy, wordPrimTy] (wordPrimTy) primOpInfo Pdep64Op = mkGenPrimOp (fsLit "pdep64#") [] [word64PrimTy, word64PrimTy] (word64PrimTy) primOpInfo PdepOp = mkGenPrimOp (fsLit "pdep#") [] [wordPrimTy, wordPrimTy] (wordPrimTy) primOpInfo Pext8Op = mkGenPrimOp (fsLit "pext8#") [] [wordPrimTy, wordPrimTy] (wordPrimTy) primOpInfo Pext16Op = mkGenPrimOp (fsLit "pext16#") [] [wordPrimTy, wordPrimTy] (wordPrimTy) primOpInfo Pext32Op = mkGenPrimOp (fsLit "pext32#") [] [wordPrimTy, wordPrimTy] (wordPrimTy) primOpInfo Pext64Op = mkGenPrimOp (fsLit "pext64#") [] [word64PrimTy, word64PrimTy] (word64PrimTy) primOpInfo PextOp = mkGenPrimOp (fsLit "pext#") [] [wordPrimTy, wordPrimTy] (wordPrimTy) primOpInfo Clz8Op = mkGenPrimOp (fsLit "clz8#") [] [wordPrimTy] (wordPrimTy) primOpInfo Clz16Op = mkGenPrimOp (fsLit "clz16#") [] [wordPrimTy] (wordPrimTy) primOpInfo Clz32Op = mkGenPrimOp (fsLit "clz32#") [] [wordPrimTy] (wordPrimTy) primOpInfo Clz64Op = mkGenPrimOp (fsLit "clz64#") [] [word64PrimTy] (wordPrimTy) primOpInfo ClzOp = mkGenPrimOp (fsLit "clz#") [] [wordPrimTy] (wordPrimTy) primOpInfo Ctz8Op = mkGenPrimOp (fsLit "ctz8#") [] [wordPrimTy] (wordPrimTy) primOpInfo Ctz16Op = mkGenPrimOp (fsLit "ctz16#") [] [wordPrimTy] (wordPrimTy) primOpInfo Ctz32Op = mkGenPrimOp (fsLit "ctz32#") [] [wordPrimTy] (wordPrimTy) primOpInfo Ctz64Op = mkGenPrimOp (fsLit "ctz64#") [] [word64PrimTy] (wordPrimTy) primOpInfo CtzOp = mkGenPrimOp (fsLit "ctz#") [] [wordPrimTy] (wordPrimTy) primOpInfo BSwap16Op = mkGenPrimOp (fsLit "byteSwap16#") [] [wordPrimTy] (wordPrimTy) primOpInfo BSwap32Op = mkGenPrimOp (fsLit "byteSwap32#") [] [wordPrimTy] (wordPrimTy) primOpInfo BSwap64Op = mkGenPrimOp (fsLit "byteSwap64#") [] [word64PrimTy] (word64PrimTy) primOpInfo BSwapOp = mkGenPrimOp (fsLit "byteSwap#") [] [wordPrimTy] (wordPrimTy) primOpInfo BRev8Op = mkGenPrimOp (fsLit "bitReverse8#") [] [wordPrimTy] (wordPrimTy) primOpInfo BRev16Op = mkGenPrimOp (fsLit "bitReverse16#") [] [wordPrimTy] (wordPrimTy) primOpInfo BRev32Op = mkGenPrimOp (fsLit "bitReverse32#") [] [wordPrimTy] (wordPrimTy) primOpInfo BRev64Op = mkGenPrimOp (fsLit "bitReverse64#") [] [word64PrimTy] (word64PrimTy) primOpInfo BRevOp = mkGenPrimOp (fsLit "bitReverse#") [] [wordPrimTy] (wordPrimTy) primOpInfo Narrow8IntOp = mkGenPrimOp (fsLit "narrow8Int#") [] [intPrimTy] (intPrimTy) primOpInfo Narrow16IntOp = mkGenPrimOp (fsLit "narrow16Int#") [] [intPrimTy] (intPrimTy) primOpInfo Narrow32IntOp = mkGenPrimOp (fsLit "narrow32Int#") [] [intPrimTy] (intPrimTy) primOpInfo Narrow8WordOp = mkGenPrimOp (fsLit "narrow8Word#") [] [wordPrimTy] (wordPrimTy) primOpInfo Narrow16WordOp = mkGenPrimOp (fsLit "narrow16Word#") [] [wordPrimTy] (wordPrimTy) primOpInfo Narrow32WordOp = mkGenPrimOp (fsLit "narrow32Word#") [] [wordPrimTy] (wordPrimTy) primOpInfo DoubleGtOp = mkCompare (fsLit ">##") doublePrimTy primOpInfo DoubleGeOp = mkCompare (fsLit ">=##") doublePrimTy primOpInfo DoubleEqOp = mkCompare (fsLit "==##") doublePrimTy primOpInfo DoubleNeOp = mkCompare (fsLit "/=##") doublePrimTy primOpInfo DoubleLtOp = mkCompare (fsLit "<##") doublePrimTy primOpInfo DoubleLeOp = mkCompare (fsLit "<=##") doublePrimTy primOpInfo DoubleMinOp = mkGenPrimOp (fsLit "minDouble#") [] [doublePrimTy, doublePrimTy] (doublePrimTy) primOpInfo DoubleMaxOp = mkGenPrimOp (fsLit "maxDouble#") [] [doublePrimTy, doublePrimTy] (doublePrimTy) primOpInfo DoubleAddOp = mkGenPrimOp (fsLit "+##") [] [doublePrimTy, doublePrimTy] (doublePrimTy) primOpInfo DoubleSubOp = mkGenPrimOp (fsLit "-##") [] [doublePrimTy, doublePrimTy] (doublePrimTy) primOpInfo DoubleMulOp = mkGenPrimOp (fsLit "*##") [] [doublePrimTy, doublePrimTy] (doublePrimTy) primOpInfo DoubleDivOp = mkGenPrimOp (fsLit "/##") [] [doublePrimTy, doublePrimTy] (doublePrimTy) primOpInfo DoubleNegOp = mkGenPrimOp (fsLit "negateDouble#") [] [doublePrimTy] (doublePrimTy) primOpInfo DoubleFabsOp = mkGenPrimOp (fsLit "fabsDouble#") [] [doublePrimTy] (doublePrimTy) primOpInfo DoubleToIntOp = mkGenPrimOp (fsLit "double2Int#") [] [doublePrimTy] (intPrimTy) primOpInfo DoubleToFloatOp = mkGenPrimOp (fsLit "double2Float#") [] [doublePrimTy] (floatPrimTy) primOpInfo DoubleExpOp = mkGenPrimOp (fsLit "expDouble#") [] [doublePrimTy] (doublePrimTy) primOpInfo DoubleExpM1Op = mkGenPrimOp (fsLit "expm1Double#") [] [doublePrimTy] (doublePrimTy) primOpInfo DoubleLogOp = mkGenPrimOp (fsLit "logDouble#") [] [doublePrimTy] (doublePrimTy) primOpInfo DoubleLog1POp = mkGenPrimOp (fsLit "log1pDouble#") [] [doublePrimTy] (doublePrimTy) primOpInfo DoubleSqrtOp = mkGenPrimOp (fsLit "sqrtDouble#") [] [doublePrimTy] (doublePrimTy) primOpInfo DoubleSinOp = mkGenPrimOp (fsLit "sinDouble#") [] [doublePrimTy] (doublePrimTy) primOpInfo DoubleCosOp = mkGenPrimOp (fsLit "cosDouble#") [] [doublePrimTy] (doublePrimTy) primOpInfo DoubleTanOp = mkGenPrimOp (fsLit "tanDouble#") [] [doublePrimTy] (doublePrimTy) primOpInfo DoubleAsinOp = mkGenPrimOp (fsLit "asinDouble#") [] [doublePrimTy] (doublePrimTy) primOpInfo DoubleAcosOp = mkGenPrimOp (fsLit "acosDouble#") [] [doublePrimTy] (doublePrimTy) primOpInfo DoubleAtanOp = mkGenPrimOp (fsLit "atanDouble#") [] [doublePrimTy] (doublePrimTy) primOpInfo DoubleSinhOp = mkGenPrimOp (fsLit "sinhDouble#") [] [doublePrimTy] (doublePrimTy) primOpInfo DoubleCoshOp = mkGenPrimOp (fsLit "coshDouble#") [] [doublePrimTy] (doublePrimTy) primOpInfo DoubleTanhOp = mkGenPrimOp (fsLit "tanhDouble#") [] [doublePrimTy] (doublePrimTy) primOpInfo DoubleAsinhOp = mkGenPrimOp (fsLit "asinhDouble#") [] [doublePrimTy] (doublePrimTy) primOpInfo DoubleAcoshOp = mkGenPrimOp (fsLit "acoshDouble#") [] [doublePrimTy] (doublePrimTy) primOpInfo DoubleAtanhOp = mkGenPrimOp (fsLit "atanhDouble#") [] [doublePrimTy] (doublePrimTy) primOpInfo DoublePowerOp = mkGenPrimOp (fsLit "**##") [] [doublePrimTy, doublePrimTy] (doublePrimTy) primOpInfo DoubleDecode_2IntOp = mkGenPrimOp (fsLit "decodeDouble_2Int#") [] [doublePrimTy] ((mkTupleTy Unboxed [intPrimTy, wordPrimTy, wordPrimTy, intPrimTy])) primOpInfo DoubleDecode_Int64Op = mkGenPrimOp (fsLit "decodeDouble_Int64#") [] [doublePrimTy] ((mkTupleTy Unboxed [int64PrimTy, intPrimTy])) primOpInfo CastDoubleToWord64Op = mkGenPrimOp (fsLit "castDoubleToWord64#") [] [doublePrimTy] (word64PrimTy) primOpInfo CastWord64ToDoubleOp = mkGenPrimOp (fsLit "castWord64ToDouble#") [] [word64PrimTy] (doublePrimTy) primOpInfo FloatGtOp = mkCompare (fsLit "gtFloat#") floatPrimTy primOpInfo FloatGeOp = mkCompare (fsLit "geFloat#") floatPrimTy primOpInfo FloatEqOp = mkCompare (fsLit "eqFloat#") floatPrimTy primOpInfo FloatNeOp = mkCompare (fsLit "neFloat#") floatPrimTy primOpInfo FloatLtOp = mkCompare (fsLit "ltFloat#") floatPrimTy primOpInfo FloatLeOp = mkCompare (fsLit "leFloat#") floatPrimTy primOpInfo FloatMinOp = mkGenPrimOp (fsLit "minFloat#") [] [floatPrimTy, floatPrimTy] (floatPrimTy) primOpInfo FloatMaxOp = mkGenPrimOp (fsLit "maxFloat#") [] [floatPrimTy, floatPrimTy] (floatPrimTy) primOpInfo FloatAddOp = mkGenPrimOp (fsLit "plusFloat#") [] [floatPrimTy, floatPrimTy] (floatPrimTy) primOpInfo FloatSubOp = mkGenPrimOp (fsLit "minusFloat#") [] [floatPrimTy, floatPrimTy] (floatPrimTy) primOpInfo FloatMulOp = mkGenPrimOp (fsLit "timesFloat#") [] [floatPrimTy, floatPrimTy] (floatPrimTy) primOpInfo FloatDivOp = mkGenPrimOp (fsLit "divideFloat#") [] [floatPrimTy, floatPrimTy] (floatPrimTy) primOpInfo FloatNegOp = mkGenPrimOp (fsLit "negateFloat#") [] [floatPrimTy] (floatPrimTy) primOpInfo FloatFabsOp = mkGenPrimOp (fsLit "fabsFloat#") [] [floatPrimTy] (floatPrimTy) primOpInfo FloatToIntOp = mkGenPrimOp (fsLit "float2Int#") [] [floatPrimTy] (intPrimTy) primOpInfo FloatExpOp = mkGenPrimOp (fsLit "expFloat#") [] [floatPrimTy] (floatPrimTy) primOpInfo FloatExpM1Op = mkGenPrimOp (fsLit "expm1Float#") [] [floatPrimTy] (floatPrimTy) primOpInfo FloatLogOp = mkGenPrimOp (fsLit "logFloat#") [] [floatPrimTy] (floatPrimTy) primOpInfo FloatLog1POp = mkGenPrimOp (fsLit "log1pFloat#") [] [floatPrimTy] (floatPrimTy) primOpInfo FloatSqrtOp = mkGenPrimOp (fsLit "sqrtFloat#") [] [floatPrimTy] (floatPrimTy) primOpInfo FloatSinOp = mkGenPrimOp (fsLit "sinFloat#") [] [floatPrimTy] (floatPrimTy) primOpInfo FloatCosOp = mkGenPrimOp (fsLit "cosFloat#") [] [floatPrimTy] (floatPrimTy) primOpInfo FloatTanOp = mkGenPrimOp (fsLit "tanFloat#") [] [floatPrimTy] (floatPrimTy) primOpInfo FloatAsinOp = mkGenPrimOp (fsLit "asinFloat#") [] [floatPrimTy] (floatPrimTy) primOpInfo FloatAcosOp = mkGenPrimOp (fsLit "acosFloat#") [] [floatPrimTy] (floatPrimTy) primOpInfo FloatAtanOp = mkGenPrimOp (fsLit "atanFloat#") [] [floatPrimTy] (floatPrimTy) primOpInfo FloatSinhOp = mkGenPrimOp (fsLit "sinhFloat#") [] [floatPrimTy] (floatPrimTy) primOpInfo FloatCoshOp = mkGenPrimOp (fsLit "coshFloat#") [] [floatPrimTy] (floatPrimTy) primOpInfo FloatTanhOp = mkGenPrimOp (fsLit "tanhFloat#") [] [floatPrimTy] (floatPrimTy) primOpInfo FloatAsinhOp = mkGenPrimOp (fsLit "asinhFloat#") [] [floatPrimTy] (floatPrimTy) primOpInfo FloatAcoshOp = mkGenPrimOp (fsLit "acoshFloat#") [] [floatPrimTy] (floatPrimTy) primOpInfo FloatAtanhOp = mkGenPrimOp (fsLit "atanhFloat#") [] [floatPrimTy] (floatPrimTy) primOpInfo FloatPowerOp = mkGenPrimOp (fsLit "powerFloat#") [] [floatPrimTy, floatPrimTy] (floatPrimTy) primOpInfo FloatToDoubleOp = mkGenPrimOp (fsLit "float2Double#") [] [floatPrimTy] (doublePrimTy) primOpInfo FloatDecode_IntOp = mkGenPrimOp (fsLit "decodeFloat_Int#") [] [floatPrimTy] ((mkTupleTy Unboxed [intPrimTy, intPrimTy])) primOpInfo CastFloatToWord32Op = mkGenPrimOp (fsLit "castFloatToWord32#") [] [floatPrimTy] (word32PrimTy) primOpInfo CastWord32ToFloatOp = mkGenPrimOp (fsLit "castWord32ToFloat#") [] [word32PrimTy] (floatPrimTy) primOpInfo FloatFMAdd = mkGenPrimOp (fsLit "fmaddFloat#") [] [floatPrimTy, floatPrimTy, floatPrimTy] (floatPrimTy) primOpInfo FloatFMSub = mkGenPrimOp (fsLit "fmsubFloat#") [] [floatPrimTy, floatPrimTy, floatPrimTy] (floatPrimTy) primOpInfo FloatFNMAdd = mkGenPrimOp (fsLit "fnmaddFloat#") [] [floatPrimTy, floatPrimTy, floatPrimTy] (floatPrimTy) primOpInfo FloatFNMSub = mkGenPrimOp (fsLit "fnmsubFloat#") [] [floatPrimTy, floatPrimTy, floatPrimTy] (floatPrimTy) primOpInfo DoubleFMAdd = mkGenPrimOp (fsLit "fmaddDouble#") [] [doublePrimTy, doublePrimTy, doublePrimTy] (doublePrimTy) primOpInfo DoubleFMSub = mkGenPrimOp (fsLit "fmsubDouble#") [] [doublePrimTy, doublePrimTy, doublePrimTy] (doublePrimTy) primOpInfo DoubleFNMAdd = mkGenPrimOp (fsLit "fnmaddDouble#") [] [doublePrimTy, doublePrimTy, doublePrimTy] (doublePrimTy) primOpInfo DoubleFNMSub = mkGenPrimOp (fsLit "fnmsubDouble#") [] [doublePrimTy, doublePrimTy, doublePrimTy] (doublePrimTy) primOpInfo NewArrayOp = mkGenPrimOp (fsLit "newArray#") [levity1TyVarInf, levPolyAlphaTyVarSpec, deltaTyVarSpec] [intPrimTy, levPolyAlphaTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, mkMutableArrayPrimTy deltaTy levPolyAlphaTy])) primOpInfo ReadArrayOp = mkGenPrimOp (fsLit "readArray#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkMutableArrayPrimTy deltaTy levPolyAlphaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, levPolyAlphaTy])) primOpInfo WriteArrayOp = mkGenPrimOp (fsLit "writeArray#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkMutableArrayPrimTy deltaTy levPolyAlphaTy, intPrimTy, levPolyAlphaTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo SizeofArrayOp = mkGenPrimOp (fsLit "sizeofArray#") [levity1TyVarInf, levPolyAlphaTyVarSpec] [mkArrayPrimTy levPolyAlphaTy] (intPrimTy) primOpInfo SizeofMutableArrayOp = mkGenPrimOp (fsLit "sizeofMutableArray#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkMutableArrayPrimTy deltaTy levPolyAlphaTy] (intPrimTy) primOpInfo IndexArrayOp = mkGenPrimOp (fsLit "indexArray#") [levity1TyVarInf, levPolyAlphaTyVarSpec] [mkArrayPrimTy levPolyAlphaTy, intPrimTy] ((mkTupleTy Unboxed [levPolyAlphaTy])) primOpInfo UnsafeFreezeArrayOp = mkGenPrimOp (fsLit "unsafeFreezeArray#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkMutableArrayPrimTy deltaTy levPolyAlphaTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, mkArrayPrimTy levPolyAlphaTy])) primOpInfo UnsafeThawArrayOp = mkGenPrimOp (fsLit "unsafeThawArray#") [levity1TyVarInf, levPolyAlphaTyVarSpec, deltaTyVarSpec] [mkArrayPrimTy levPolyAlphaTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, mkMutableArrayPrimTy deltaTy levPolyAlphaTy])) primOpInfo CopyArrayOp = mkGenPrimOp (fsLit "copyArray#") [levity1TyVarInf, levPolyAlphaTyVarSpec, deltaTyVarSpec] [mkArrayPrimTy levPolyAlphaTy, intPrimTy, mkMutableArrayPrimTy deltaTy levPolyAlphaTy, intPrimTy, intPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo CopyMutableArrayOp = mkGenPrimOp (fsLit "copyMutableArray#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkMutableArrayPrimTy deltaTy levPolyAlphaTy, intPrimTy, mkMutableArrayPrimTy deltaTy levPolyAlphaTy, intPrimTy, intPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo CloneArrayOp = mkGenPrimOp (fsLit "cloneArray#") [levity1TyVarInf, levPolyAlphaTyVarSpec] [mkArrayPrimTy levPolyAlphaTy, intPrimTy, intPrimTy] (mkArrayPrimTy levPolyAlphaTy) primOpInfo CloneMutableArrayOp = mkGenPrimOp (fsLit "cloneMutableArray#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkMutableArrayPrimTy deltaTy levPolyAlphaTy, intPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, mkMutableArrayPrimTy deltaTy levPolyAlphaTy])) primOpInfo FreezeArrayOp = mkGenPrimOp (fsLit "freezeArray#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkMutableArrayPrimTy deltaTy levPolyAlphaTy, intPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, mkArrayPrimTy levPolyAlphaTy])) primOpInfo ThawArrayOp = mkGenPrimOp (fsLit "thawArray#") [levity1TyVarInf, levPolyAlphaTyVarSpec, deltaTyVarSpec] [mkArrayPrimTy levPolyAlphaTy, intPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, mkMutableArrayPrimTy deltaTy levPolyAlphaTy])) primOpInfo CasArrayOp = mkGenPrimOp (fsLit "casArray#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkMutableArrayPrimTy deltaTy levPolyAlphaTy, intPrimTy, levPolyAlphaTy, levPolyAlphaTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, intPrimTy, levPolyAlphaTy])) primOpInfo NewSmallArrayOp = mkGenPrimOp (fsLit "newSmallArray#") [levity1TyVarInf, levPolyAlphaTyVarSpec, deltaTyVarSpec] [intPrimTy, levPolyAlphaTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, mkSmallMutableArrayPrimTy deltaTy levPolyAlphaTy])) primOpInfo ShrinkSmallMutableArrayOp_Char = mkGenPrimOp (fsLit "shrinkSmallMutableArray#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkSmallMutableArrayPrimTy deltaTy levPolyAlphaTy, intPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo ReadSmallArrayOp = mkGenPrimOp (fsLit "readSmallArray#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkSmallMutableArrayPrimTy deltaTy levPolyAlphaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, levPolyAlphaTy])) primOpInfo WriteSmallArrayOp = mkGenPrimOp (fsLit "writeSmallArray#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkSmallMutableArrayPrimTy deltaTy levPolyAlphaTy, intPrimTy, levPolyAlphaTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo SizeofSmallArrayOp = mkGenPrimOp (fsLit "sizeofSmallArray#") [levity1TyVarInf, levPolyAlphaTyVarSpec] [mkSmallArrayPrimTy levPolyAlphaTy] (intPrimTy) primOpInfo SizeofSmallMutableArrayOp = mkGenPrimOp (fsLit "sizeofSmallMutableArray#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkSmallMutableArrayPrimTy deltaTy levPolyAlphaTy] (intPrimTy) primOpInfo GetSizeofSmallMutableArrayOp = mkGenPrimOp (fsLit "getSizeofSmallMutableArray#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkSmallMutableArrayPrimTy deltaTy levPolyAlphaTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, intPrimTy])) primOpInfo IndexSmallArrayOp = mkGenPrimOp (fsLit "indexSmallArray#") [levity1TyVarInf, levPolyAlphaTyVarSpec] [mkSmallArrayPrimTy levPolyAlphaTy, intPrimTy] ((mkTupleTy Unboxed [levPolyAlphaTy])) primOpInfo UnsafeFreezeSmallArrayOp = mkGenPrimOp (fsLit "unsafeFreezeSmallArray#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkSmallMutableArrayPrimTy deltaTy levPolyAlphaTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, mkSmallArrayPrimTy levPolyAlphaTy])) primOpInfo UnsafeThawSmallArrayOp = mkGenPrimOp (fsLit "unsafeThawSmallArray#") [levity1TyVarInf, levPolyAlphaTyVarSpec, deltaTyVarSpec] [mkSmallArrayPrimTy levPolyAlphaTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, mkSmallMutableArrayPrimTy deltaTy levPolyAlphaTy])) primOpInfo CopySmallArrayOp = mkGenPrimOp (fsLit "copySmallArray#") [levity1TyVarInf, levPolyAlphaTyVarSpec, deltaTyVarSpec] [mkSmallArrayPrimTy levPolyAlphaTy, intPrimTy, mkSmallMutableArrayPrimTy deltaTy levPolyAlphaTy, intPrimTy, intPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo CopySmallMutableArrayOp = mkGenPrimOp (fsLit "copySmallMutableArray#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkSmallMutableArrayPrimTy deltaTy levPolyAlphaTy, intPrimTy, mkSmallMutableArrayPrimTy deltaTy levPolyAlphaTy, intPrimTy, intPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo CloneSmallArrayOp = mkGenPrimOp (fsLit "cloneSmallArray#") [levity1TyVarInf, levPolyAlphaTyVarSpec] [mkSmallArrayPrimTy levPolyAlphaTy, intPrimTy, intPrimTy] (mkSmallArrayPrimTy levPolyAlphaTy) primOpInfo CloneSmallMutableArrayOp = mkGenPrimOp (fsLit "cloneSmallMutableArray#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkSmallMutableArrayPrimTy deltaTy levPolyAlphaTy, intPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, mkSmallMutableArrayPrimTy deltaTy levPolyAlphaTy])) primOpInfo FreezeSmallArrayOp = mkGenPrimOp (fsLit "freezeSmallArray#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkSmallMutableArrayPrimTy deltaTy levPolyAlphaTy, intPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, mkSmallArrayPrimTy levPolyAlphaTy])) primOpInfo ThawSmallArrayOp = mkGenPrimOp (fsLit "thawSmallArray#") [levity1TyVarInf, levPolyAlphaTyVarSpec, deltaTyVarSpec] [mkSmallArrayPrimTy levPolyAlphaTy, intPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, mkSmallMutableArrayPrimTy deltaTy levPolyAlphaTy])) primOpInfo CasSmallArrayOp = mkGenPrimOp (fsLit "casSmallArray#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkSmallMutableArrayPrimTy deltaTy levPolyAlphaTy, intPrimTy, levPolyAlphaTy, levPolyAlphaTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, intPrimTy, levPolyAlphaTy])) primOpInfo NewByteArrayOp_Char = mkGenPrimOp (fsLit "newByteArray#") [deltaTyVarSpec] [intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, mkMutableByteArrayPrimTy deltaTy])) primOpInfo NewPinnedByteArrayOp_Char = mkGenPrimOp (fsLit "newPinnedByteArray#") [deltaTyVarSpec] [intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, mkMutableByteArrayPrimTy deltaTy])) primOpInfo NewAlignedPinnedByteArrayOp_Char = mkGenPrimOp (fsLit "newAlignedPinnedByteArray#") [deltaTyVarSpec] [intPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, mkMutableByteArrayPrimTy deltaTy])) primOpInfo MutableByteArrayIsPinnedOp = mkGenPrimOp (fsLit "isMutableByteArrayPinned#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy] (intPrimTy) primOpInfo ByteArrayIsPinnedOp = mkGenPrimOp (fsLit "isByteArrayPinned#") [] [byteArrayPrimTy] (intPrimTy) primOpInfo ByteArrayIsWeaklyPinnedOp = mkGenPrimOp (fsLit "isByteArrayWeaklyPinned#") [] [byteArrayPrimTy] (intPrimTy) primOpInfo MutableByteArrayIsWeaklyPinnedOp = mkGenPrimOp (fsLit "isMutableByteArrayWeaklyPinned#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy] (intPrimTy) primOpInfo ByteArrayContents_Char = mkGenPrimOp (fsLit "byteArrayContents#") [] [byteArrayPrimTy] (addrPrimTy) primOpInfo MutableByteArrayContents_Char = mkGenPrimOp (fsLit "mutableByteArrayContents#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy] (addrPrimTy) primOpInfo ShrinkMutableByteArrayOp_Char = mkGenPrimOp (fsLit "shrinkMutableByteArray#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo ResizeMutableByteArrayOp_Char = mkGenPrimOp (fsLit "resizeMutableByteArray#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, mkMutableByteArrayPrimTy deltaTy])) primOpInfo UnsafeFreezeByteArrayOp = mkGenPrimOp (fsLit "unsafeFreezeByteArray#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, byteArrayPrimTy])) primOpInfo UnsafeThawByteArrayOp = mkGenPrimOp (fsLit "unsafeThawByteArray#") [deltaTyVarSpec] [byteArrayPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, mkMutableByteArrayPrimTy deltaTy])) primOpInfo SizeofByteArrayOp = mkGenPrimOp (fsLit "sizeofByteArray#") [] [byteArrayPrimTy] (intPrimTy) primOpInfo SizeofMutableByteArrayOp = mkGenPrimOp (fsLit "sizeofMutableByteArray#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy] (intPrimTy) primOpInfo GetSizeofMutableByteArrayOp = mkGenPrimOp (fsLit "getSizeofMutableByteArray#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, intPrimTy])) primOpInfo IndexByteArrayOp_Char = mkGenPrimOp (fsLit "indexCharArray#") [] [byteArrayPrimTy, intPrimTy] (charPrimTy) primOpInfo IndexByteArrayOp_WideChar = mkGenPrimOp (fsLit "indexWideCharArray#") [] [byteArrayPrimTy, intPrimTy] (charPrimTy) primOpInfo IndexByteArrayOp_Int = mkGenPrimOp (fsLit "indexIntArray#") [] [byteArrayPrimTy, intPrimTy] (intPrimTy) primOpInfo IndexByteArrayOp_Word = mkGenPrimOp (fsLit "indexWordArray#") [] [byteArrayPrimTy, intPrimTy] (wordPrimTy) primOpInfo IndexByteArrayOp_Addr = mkGenPrimOp (fsLit "indexAddrArray#") [] [byteArrayPrimTy, intPrimTy] (addrPrimTy) primOpInfo IndexByteArrayOp_Float = mkGenPrimOp (fsLit "indexFloatArray#") [] [byteArrayPrimTy, intPrimTy] (floatPrimTy) primOpInfo IndexByteArrayOp_Double = mkGenPrimOp (fsLit "indexDoubleArray#") [] [byteArrayPrimTy, intPrimTy] (doublePrimTy) primOpInfo IndexByteArrayOp_StablePtr = mkGenPrimOp (fsLit "indexStablePtrArray#") [alphaTyVarSpec] [byteArrayPrimTy, intPrimTy] (mkStablePtrPrimTy alphaTy) primOpInfo IndexByteArrayOp_Int8 = mkGenPrimOp (fsLit "indexInt8Array#") [] [byteArrayPrimTy, intPrimTy] (int8PrimTy) primOpInfo IndexByteArrayOp_Word8 = mkGenPrimOp (fsLit "indexWord8Array#") [] [byteArrayPrimTy, intPrimTy] (word8PrimTy) primOpInfo IndexByteArrayOp_Int16 = mkGenPrimOp (fsLit "indexInt16Array#") [] [byteArrayPrimTy, intPrimTy] (int16PrimTy) primOpInfo IndexByteArrayOp_Word16 = mkGenPrimOp (fsLit "indexWord16Array#") [] [byteArrayPrimTy, intPrimTy] (word16PrimTy) primOpInfo IndexByteArrayOp_Int32 = mkGenPrimOp (fsLit "indexInt32Array#") [] [byteArrayPrimTy, intPrimTy] (int32PrimTy) primOpInfo IndexByteArrayOp_Word32 = mkGenPrimOp (fsLit "indexWord32Array#") [] [byteArrayPrimTy, intPrimTy] (word32PrimTy) primOpInfo IndexByteArrayOp_Int64 = mkGenPrimOp (fsLit "indexInt64Array#") [] [byteArrayPrimTy, intPrimTy] (int64PrimTy) primOpInfo IndexByteArrayOp_Word64 = mkGenPrimOp (fsLit "indexWord64Array#") [] [byteArrayPrimTy, intPrimTy] (word64PrimTy) primOpInfo IndexByteArrayOp_Word8AsChar = mkGenPrimOp (fsLit "indexWord8ArrayAsChar#") [] [byteArrayPrimTy, intPrimTy] (charPrimTy) primOpInfo IndexByteArrayOp_Word8AsWideChar = mkGenPrimOp (fsLit "indexWord8ArrayAsWideChar#") [] [byteArrayPrimTy, intPrimTy] (charPrimTy) primOpInfo IndexByteArrayOp_Word8AsInt = mkGenPrimOp (fsLit "indexWord8ArrayAsInt#") [] [byteArrayPrimTy, intPrimTy] (intPrimTy) primOpInfo IndexByteArrayOp_Word8AsWord = mkGenPrimOp (fsLit "indexWord8ArrayAsWord#") [] [byteArrayPrimTy, intPrimTy] (wordPrimTy) primOpInfo IndexByteArrayOp_Word8AsAddr = mkGenPrimOp (fsLit "indexWord8ArrayAsAddr#") [] [byteArrayPrimTy, intPrimTy] (addrPrimTy) primOpInfo IndexByteArrayOp_Word8AsFloat = mkGenPrimOp (fsLit "indexWord8ArrayAsFloat#") [] [byteArrayPrimTy, intPrimTy] (floatPrimTy) primOpInfo IndexByteArrayOp_Word8AsDouble = mkGenPrimOp (fsLit "indexWord8ArrayAsDouble#") [] [byteArrayPrimTy, intPrimTy] (doublePrimTy) primOpInfo IndexByteArrayOp_Word8AsStablePtr = mkGenPrimOp (fsLit "indexWord8ArrayAsStablePtr#") [alphaTyVarSpec] [byteArrayPrimTy, intPrimTy] (mkStablePtrPrimTy alphaTy) primOpInfo IndexByteArrayOp_Word8AsInt16 = mkGenPrimOp (fsLit "indexWord8ArrayAsInt16#") [] [byteArrayPrimTy, intPrimTy] (int16PrimTy) primOpInfo IndexByteArrayOp_Word8AsWord16 = mkGenPrimOp (fsLit "indexWord8ArrayAsWord16#") [] [byteArrayPrimTy, intPrimTy] (word16PrimTy) primOpInfo IndexByteArrayOp_Word8AsInt32 = mkGenPrimOp (fsLit "indexWord8ArrayAsInt32#") [] [byteArrayPrimTy, intPrimTy] (int32PrimTy) primOpInfo IndexByteArrayOp_Word8AsWord32 = mkGenPrimOp (fsLit "indexWord8ArrayAsWord32#") [] [byteArrayPrimTy, intPrimTy] (word32PrimTy) primOpInfo IndexByteArrayOp_Word8AsInt64 = mkGenPrimOp (fsLit "indexWord8ArrayAsInt64#") [] [byteArrayPrimTy, intPrimTy] (int64PrimTy) primOpInfo IndexByteArrayOp_Word8AsWord64 = mkGenPrimOp (fsLit "indexWord8ArrayAsWord64#") [] [byteArrayPrimTy, intPrimTy] (word64PrimTy) primOpInfo ReadByteArrayOp_Char = mkGenPrimOp (fsLit "readCharArray#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, charPrimTy])) primOpInfo ReadByteArrayOp_WideChar = mkGenPrimOp (fsLit "readWideCharArray#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, charPrimTy])) primOpInfo ReadByteArrayOp_Int = mkGenPrimOp (fsLit "readIntArray#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, intPrimTy])) primOpInfo ReadByteArrayOp_Word = mkGenPrimOp (fsLit "readWordArray#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, wordPrimTy])) primOpInfo ReadByteArrayOp_Addr = mkGenPrimOp (fsLit "readAddrArray#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, addrPrimTy])) primOpInfo ReadByteArrayOp_Float = mkGenPrimOp (fsLit "readFloatArray#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, floatPrimTy])) primOpInfo ReadByteArrayOp_Double = mkGenPrimOp (fsLit "readDoubleArray#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, doublePrimTy])) primOpInfo ReadByteArrayOp_StablePtr = mkGenPrimOp (fsLit "readStablePtrArray#") [deltaTyVarSpec, alphaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, mkStablePtrPrimTy alphaTy])) primOpInfo ReadByteArrayOp_Int8 = mkGenPrimOp (fsLit "readInt8Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int8PrimTy])) primOpInfo ReadByteArrayOp_Word8 = mkGenPrimOp (fsLit "readWord8Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word8PrimTy])) primOpInfo ReadByteArrayOp_Int16 = mkGenPrimOp (fsLit "readInt16Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int16PrimTy])) primOpInfo ReadByteArrayOp_Word16 = mkGenPrimOp (fsLit "readWord16Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word16PrimTy])) primOpInfo ReadByteArrayOp_Int32 = mkGenPrimOp (fsLit "readInt32Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int32PrimTy])) primOpInfo ReadByteArrayOp_Word32 = mkGenPrimOp (fsLit "readWord32Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word32PrimTy])) primOpInfo ReadByteArrayOp_Int64 = mkGenPrimOp (fsLit "readInt64Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int64PrimTy])) primOpInfo ReadByteArrayOp_Word64 = mkGenPrimOp (fsLit "readWord64Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word64PrimTy])) primOpInfo ReadByteArrayOp_Word8AsChar = mkGenPrimOp (fsLit "readWord8ArrayAsChar#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, charPrimTy])) primOpInfo ReadByteArrayOp_Word8AsWideChar = mkGenPrimOp (fsLit "readWord8ArrayAsWideChar#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, charPrimTy])) primOpInfo ReadByteArrayOp_Word8AsInt = mkGenPrimOp (fsLit "readWord8ArrayAsInt#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, intPrimTy])) primOpInfo ReadByteArrayOp_Word8AsWord = mkGenPrimOp (fsLit "readWord8ArrayAsWord#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, wordPrimTy])) primOpInfo ReadByteArrayOp_Word8AsAddr = mkGenPrimOp (fsLit "readWord8ArrayAsAddr#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, addrPrimTy])) primOpInfo ReadByteArrayOp_Word8AsFloat = mkGenPrimOp (fsLit "readWord8ArrayAsFloat#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, floatPrimTy])) primOpInfo ReadByteArrayOp_Word8AsDouble = mkGenPrimOp (fsLit "readWord8ArrayAsDouble#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, doublePrimTy])) primOpInfo ReadByteArrayOp_Word8AsStablePtr = mkGenPrimOp (fsLit "readWord8ArrayAsStablePtr#") [deltaTyVarSpec, alphaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, mkStablePtrPrimTy alphaTy])) primOpInfo ReadByteArrayOp_Word8AsInt16 = mkGenPrimOp (fsLit "readWord8ArrayAsInt16#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int16PrimTy])) primOpInfo ReadByteArrayOp_Word8AsWord16 = mkGenPrimOp (fsLit "readWord8ArrayAsWord16#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word16PrimTy])) primOpInfo ReadByteArrayOp_Word8AsInt32 = mkGenPrimOp (fsLit "readWord8ArrayAsInt32#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int32PrimTy])) primOpInfo ReadByteArrayOp_Word8AsWord32 = mkGenPrimOp (fsLit "readWord8ArrayAsWord32#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word32PrimTy])) primOpInfo ReadByteArrayOp_Word8AsInt64 = mkGenPrimOp (fsLit "readWord8ArrayAsInt64#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int64PrimTy])) primOpInfo ReadByteArrayOp_Word8AsWord64 = mkGenPrimOp (fsLit "readWord8ArrayAsWord64#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word64PrimTy])) primOpInfo WriteByteArrayOp_Char = mkGenPrimOp (fsLit "writeCharArray#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, charPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteByteArrayOp_WideChar = mkGenPrimOp (fsLit "writeWideCharArray#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, charPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteByteArrayOp_Int = mkGenPrimOp (fsLit "writeIntArray#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, intPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteByteArrayOp_Word = mkGenPrimOp (fsLit "writeWordArray#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, wordPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteByteArrayOp_Addr = mkGenPrimOp (fsLit "writeAddrArray#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, addrPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteByteArrayOp_Float = mkGenPrimOp (fsLit "writeFloatArray#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, floatPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteByteArrayOp_Double = mkGenPrimOp (fsLit "writeDoubleArray#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, doublePrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteByteArrayOp_StablePtr = mkGenPrimOp (fsLit "writeStablePtrArray#") [deltaTyVarSpec, alphaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStablePtrPrimTy alphaTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteByteArrayOp_Int8 = mkGenPrimOp (fsLit "writeInt8Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteByteArrayOp_Word8 = mkGenPrimOp (fsLit "writeWord8Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, word8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteByteArrayOp_Int16 = mkGenPrimOp (fsLit "writeInt16Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteByteArrayOp_Word16 = mkGenPrimOp (fsLit "writeWord16Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, word16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteByteArrayOp_Int32 = mkGenPrimOp (fsLit "writeInt32Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int32PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteByteArrayOp_Word32 = mkGenPrimOp (fsLit "writeWord32Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, word32PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteByteArrayOp_Int64 = mkGenPrimOp (fsLit "writeInt64Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int64PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteByteArrayOp_Word64 = mkGenPrimOp (fsLit "writeWord64Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, word64PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteByteArrayOp_Word8AsChar = mkGenPrimOp (fsLit "writeWord8ArrayAsChar#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, charPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteByteArrayOp_Word8AsWideChar = mkGenPrimOp (fsLit "writeWord8ArrayAsWideChar#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, charPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteByteArrayOp_Word8AsInt = mkGenPrimOp (fsLit "writeWord8ArrayAsInt#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, intPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteByteArrayOp_Word8AsWord = mkGenPrimOp (fsLit "writeWord8ArrayAsWord#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, wordPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteByteArrayOp_Word8AsAddr = mkGenPrimOp (fsLit "writeWord8ArrayAsAddr#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, addrPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteByteArrayOp_Word8AsFloat = mkGenPrimOp (fsLit "writeWord8ArrayAsFloat#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, floatPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteByteArrayOp_Word8AsDouble = mkGenPrimOp (fsLit "writeWord8ArrayAsDouble#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, doublePrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteByteArrayOp_Word8AsStablePtr = mkGenPrimOp (fsLit "writeWord8ArrayAsStablePtr#") [deltaTyVarSpec, alphaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStablePtrPrimTy alphaTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteByteArrayOp_Word8AsInt16 = mkGenPrimOp (fsLit "writeWord8ArrayAsInt16#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteByteArrayOp_Word8AsWord16 = mkGenPrimOp (fsLit "writeWord8ArrayAsWord16#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, word16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteByteArrayOp_Word8AsInt32 = mkGenPrimOp (fsLit "writeWord8ArrayAsInt32#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int32PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteByteArrayOp_Word8AsWord32 = mkGenPrimOp (fsLit "writeWord8ArrayAsWord32#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, word32PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteByteArrayOp_Word8AsInt64 = mkGenPrimOp (fsLit "writeWord8ArrayAsInt64#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int64PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteByteArrayOp_Word8AsWord64 = mkGenPrimOp (fsLit "writeWord8ArrayAsWord64#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, word64PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo CompareByteArraysOp = mkGenPrimOp (fsLit "compareByteArrays#") [] [byteArrayPrimTy, intPrimTy, byteArrayPrimTy, intPrimTy, intPrimTy] (intPrimTy) primOpInfo CopyByteArrayOp = mkGenPrimOp (fsLit "copyByteArray#") [deltaTyVarSpec] [byteArrayPrimTy, intPrimTy, mkMutableByteArrayPrimTy deltaTy, intPrimTy, intPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo CopyMutableByteArrayOp = mkGenPrimOp (fsLit "copyMutableByteArray#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkMutableByteArrayPrimTy deltaTy, intPrimTy, intPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo CopyMutableByteArrayNonOverlappingOp = mkGenPrimOp (fsLit "copyMutableByteArrayNonOverlapping#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkMutableByteArrayPrimTy deltaTy, intPrimTy, intPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo CopyByteArrayToAddrOp = mkGenPrimOp (fsLit "copyByteArrayToAddr#") [deltaTyVarSpec] [byteArrayPrimTy, intPrimTy, addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo CopyMutableByteArrayToAddrOp = mkGenPrimOp (fsLit "copyMutableByteArrayToAddr#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo CopyAddrToByteArrayOp = mkGenPrimOp (fsLit "copyAddrToByteArray#") [deltaTyVarSpec] [addrPrimTy, mkMutableByteArrayPrimTy deltaTy, intPrimTy, intPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo CopyAddrToAddrOp = mkGenPrimOp (fsLit "copyAddrToAddr#") [] [addrPrimTy, addrPrimTy, intPrimTy, mkStatePrimTy realWorldTy] (mkStatePrimTy realWorldTy) primOpInfo CopyAddrToAddrNonOverlappingOp = mkGenPrimOp (fsLit "copyAddrToAddrNonOverlapping#") [] [addrPrimTy, addrPrimTy, intPrimTy, mkStatePrimTy realWorldTy] (mkStatePrimTy realWorldTy) primOpInfo SetByteArrayOp = mkGenPrimOp (fsLit "setByteArray#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, intPrimTy, intPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo SetAddrRangeOp = mkGenPrimOp (fsLit "setAddrRange#") [] [addrPrimTy, intPrimTy, intPrimTy, mkStatePrimTy realWorldTy] (mkStatePrimTy realWorldTy) primOpInfo AtomicReadByteArrayOp_Int = mkGenPrimOp (fsLit "atomicReadIntArray#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, intPrimTy])) primOpInfo AtomicWriteByteArrayOp_Int = mkGenPrimOp (fsLit "atomicWriteIntArray#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, intPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo CasByteArrayOp_Int = mkGenPrimOp (fsLit "casIntArray#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, intPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, intPrimTy])) primOpInfo CasByteArrayOp_Int8 = mkGenPrimOp (fsLit "casInt8Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int8PrimTy, int8PrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int8PrimTy])) primOpInfo CasByteArrayOp_Int16 = mkGenPrimOp (fsLit "casInt16Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int16PrimTy, int16PrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int16PrimTy])) primOpInfo CasByteArrayOp_Int32 = mkGenPrimOp (fsLit "casInt32Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int32PrimTy, int32PrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int32PrimTy])) primOpInfo CasByteArrayOp_Int64 = mkGenPrimOp (fsLit "casInt64Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int64PrimTy, int64PrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int64PrimTy])) primOpInfo FetchAddByteArrayOp_Int = mkGenPrimOp (fsLit "fetchAddIntArray#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, intPrimTy])) primOpInfo FetchSubByteArrayOp_Int = mkGenPrimOp (fsLit "fetchSubIntArray#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, intPrimTy])) primOpInfo FetchAndByteArrayOp_Int = mkGenPrimOp (fsLit "fetchAndIntArray#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, intPrimTy])) primOpInfo FetchNandByteArrayOp_Int = mkGenPrimOp (fsLit "fetchNandIntArray#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, intPrimTy])) primOpInfo FetchOrByteArrayOp_Int = mkGenPrimOp (fsLit "fetchOrIntArray#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, intPrimTy])) primOpInfo FetchXorByteArrayOp_Int = mkGenPrimOp (fsLit "fetchXorIntArray#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, intPrimTy])) primOpInfo AddrAddOp = mkGenPrimOp (fsLit "plusAddr#") [] [addrPrimTy, intPrimTy] (addrPrimTy) primOpInfo AddrSubOp = mkGenPrimOp (fsLit "minusAddr#") [] [addrPrimTy, addrPrimTy] (intPrimTy) primOpInfo AddrRemOp = mkGenPrimOp (fsLit "remAddr#") [] [addrPrimTy, intPrimTy] (intPrimTy) primOpInfo AddrToIntOp = mkGenPrimOp (fsLit "addr2Int#") [] [addrPrimTy] (intPrimTy) primOpInfo IntToAddrOp = mkGenPrimOp (fsLit "int2Addr#") [] [intPrimTy] (addrPrimTy) primOpInfo AddrGtOp = mkCompare (fsLit "gtAddr#") addrPrimTy primOpInfo AddrGeOp = mkCompare (fsLit "geAddr#") addrPrimTy primOpInfo AddrEqOp = mkCompare (fsLit "eqAddr#") addrPrimTy primOpInfo AddrNeOp = mkCompare (fsLit "neAddr#") addrPrimTy primOpInfo AddrLtOp = mkCompare (fsLit "ltAddr#") addrPrimTy primOpInfo AddrLeOp = mkCompare (fsLit "leAddr#") addrPrimTy primOpInfo IndexOffAddrOp_Char = mkGenPrimOp (fsLit "indexCharOffAddr#") [] [addrPrimTy, intPrimTy] (charPrimTy) primOpInfo IndexOffAddrOp_WideChar = mkGenPrimOp (fsLit "indexWideCharOffAddr#") [] [addrPrimTy, intPrimTy] (charPrimTy) primOpInfo IndexOffAddrOp_Int = mkGenPrimOp (fsLit "indexIntOffAddr#") [] [addrPrimTy, intPrimTy] (intPrimTy) primOpInfo IndexOffAddrOp_Word = mkGenPrimOp (fsLit "indexWordOffAddr#") [] [addrPrimTy, intPrimTy] (wordPrimTy) primOpInfo IndexOffAddrOp_Addr = mkGenPrimOp (fsLit "indexAddrOffAddr#") [] [addrPrimTy, intPrimTy] (addrPrimTy) primOpInfo IndexOffAddrOp_Float = mkGenPrimOp (fsLit "indexFloatOffAddr#") [] [addrPrimTy, intPrimTy] (floatPrimTy) primOpInfo IndexOffAddrOp_Double = mkGenPrimOp (fsLit "indexDoubleOffAddr#") [] [addrPrimTy, intPrimTy] (doublePrimTy) primOpInfo IndexOffAddrOp_StablePtr = mkGenPrimOp (fsLit "indexStablePtrOffAddr#") [alphaTyVarSpec] [addrPrimTy, intPrimTy] (mkStablePtrPrimTy alphaTy) primOpInfo IndexOffAddrOp_Int8 = mkGenPrimOp (fsLit "indexInt8OffAddr#") [] [addrPrimTy, intPrimTy] (int8PrimTy) primOpInfo IndexOffAddrOp_Word8 = mkGenPrimOp (fsLit "indexWord8OffAddr#") [] [addrPrimTy, intPrimTy] (word8PrimTy) primOpInfo IndexOffAddrOp_Int16 = mkGenPrimOp (fsLit "indexInt16OffAddr#") [] [addrPrimTy, intPrimTy] (int16PrimTy) primOpInfo IndexOffAddrOp_Word16 = mkGenPrimOp (fsLit "indexWord16OffAddr#") [] [addrPrimTy, intPrimTy] (word16PrimTy) primOpInfo IndexOffAddrOp_Int32 = mkGenPrimOp (fsLit "indexInt32OffAddr#") [] [addrPrimTy, intPrimTy] (int32PrimTy) primOpInfo IndexOffAddrOp_Word32 = mkGenPrimOp (fsLit "indexWord32OffAddr#") [] [addrPrimTy, intPrimTy] (word32PrimTy) primOpInfo IndexOffAddrOp_Int64 = mkGenPrimOp (fsLit "indexInt64OffAddr#") [] [addrPrimTy, intPrimTy] (int64PrimTy) primOpInfo IndexOffAddrOp_Word64 = mkGenPrimOp (fsLit "indexWord64OffAddr#") [] [addrPrimTy, intPrimTy] (word64PrimTy) primOpInfo IndexOffAddrOp_Word8AsChar = mkGenPrimOp (fsLit "indexWord8OffAddrAsChar#") [] [addrPrimTy, intPrimTy] (charPrimTy) primOpInfo IndexOffAddrOp_Word8AsWideChar = mkGenPrimOp (fsLit "indexWord8OffAddrAsWideChar#") [] [addrPrimTy, intPrimTy] (charPrimTy) primOpInfo IndexOffAddrOp_Word8AsInt = mkGenPrimOp (fsLit "indexWord8OffAddrAsInt#") [] [addrPrimTy, intPrimTy] (intPrimTy) primOpInfo IndexOffAddrOp_Word8AsWord = mkGenPrimOp (fsLit "indexWord8OffAddrAsWord#") [] [addrPrimTy, intPrimTy] (wordPrimTy) primOpInfo IndexOffAddrOp_Word8AsAddr = mkGenPrimOp (fsLit "indexWord8OffAddrAsAddr#") [] [addrPrimTy, intPrimTy] (addrPrimTy) primOpInfo IndexOffAddrOp_Word8AsFloat = mkGenPrimOp (fsLit "indexWord8OffAddrAsFloat#") [] [addrPrimTy, intPrimTy] (floatPrimTy) primOpInfo IndexOffAddrOp_Word8AsDouble = mkGenPrimOp (fsLit "indexWord8OffAddrAsDouble#") [] [addrPrimTy, intPrimTy] (doublePrimTy) primOpInfo IndexOffAddrOp_Word8AsStablePtr = mkGenPrimOp (fsLit "indexWord8OffAddrAsStablePtr#") [alphaTyVarSpec] [addrPrimTy, intPrimTy] (mkStablePtrPrimTy alphaTy) primOpInfo IndexOffAddrOp_Word8AsInt16 = mkGenPrimOp (fsLit "indexWord8OffAddrAsInt16#") [] [addrPrimTy, intPrimTy] (int16PrimTy) primOpInfo IndexOffAddrOp_Word8AsWord16 = mkGenPrimOp (fsLit "indexWord8OffAddrAsWord16#") [] [addrPrimTy, intPrimTy] (word16PrimTy) primOpInfo IndexOffAddrOp_Word8AsInt32 = mkGenPrimOp (fsLit "indexWord8OffAddrAsInt32#") [] [addrPrimTy, intPrimTy] (int32PrimTy) primOpInfo IndexOffAddrOp_Word8AsWord32 = mkGenPrimOp (fsLit "indexWord8OffAddrAsWord32#") [] [addrPrimTy, intPrimTy] (word32PrimTy) primOpInfo IndexOffAddrOp_Word8AsInt64 = mkGenPrimOp (fsLit "indexWord8OffAddrAsInt64#") [] [addrPrimTy, intPrimTy] (int64PrimTy) primOpInfo IndexOffAddrOp_Word8AsWord64 = mkGenPrimOp (fsLit "indexWord8OffAddrAsWord64#") [] [addrPrimTy, intPrimTy] (word64PrimTy) primOpInfo ReadOffAddrOp_Char = mkGenPrimOp (fsLit "readCharOffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, charPrimTy])) primOpInfo ReadOffAddrOp_WideChar = mkGenPrimOp (fsLit "readWideCharOffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, charPrimTy])) primOpInfo ReadOffAddrOp_Int = mkGenPrimOp (fsLit "readIntOffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, intPrimTy])) primOpInfo ReadOffAddrOp_Word = mkGenPrimOp (fsLit "readWordOffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, wordPrimTy])) primOpInfo ReadOffAddrOp_Addr = mkGenPrimOp (fsLit "readAddrOffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, addrPrimTy])) primOpInfo ReadOffAddrOp_Float = mkGenPrimOp (fsLit "readFloatOffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, floatPrimTy])) primOpInfo ReadOffAddrOp_Double = mkGenPrimOp (fsLit "readDoubleOffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, doublePrimTy])) primOpInfo ReadOffAddrOp_StablePtr = mkGenPrimOp (fsLit "readStablePtrOffAddr#") [deltaTyVarSpec, alphaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, mkStablePtrPrimTy alphaTy])) primOpInfo ReadOffAddrOp_Int8 = mkGenPrimOp (fsLit "readInt8OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int8PrimTy])) primOpInfo ReadOffAddrOp_Word8 = mkGenPrimOp (fsLit "readWord8OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word8PrimTy])) primOpInfo ReadOffAddrOp_Int16 = mkGenPrimOp (fsLit "readInt16OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int16PrimTy])) primOpInfo ReadOffAddrOp_Word16 = mkGenPrimOp (fsLit "readWord16OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word16PrimTy])) primOpInfo ReadOffAddrOp_Int32 = mkGenPrimOp (fsLit "readInt32OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int32PrimTy])) primOpInfo ReadOffAddrOp_Word32 = mkGenPrimOp (fsLit "readWord32OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word32PrimTy])) primOpInfo ReadOffAddrOp_Int64 = mkGenPrimOp (fsLit "readInt64OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int64PrimTy])) primOpInfo ReadOffAddrOp_Word64 = mkGenPrimOp (fsLit "readWord64OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word64PrimTy])) primOpInfo ReadOffAddrOp_Word8AsChar = mkGenPrimOp (fsLit "readWord8OffAddrAsChar#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, charPrimTy])) primOpInfo ReadOffAddrOp_Word8AsWideChar = mkGenPrimOp (fsLit "readWord8OffAddrAsWideChar#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, charPrimTy])) primOpInfo ReadOffAddrOp_Word8AsInt = mkGenPrimOp (fsLit "readWord8OffAddrAsInt#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, intPrimTy])) primOpInfo ReadOffAddrOp_Word8AsWord = mkGenPrimOp (fsLit "readWord8OffAddrAsWord#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, wordPrimTy])) primOpInfo ReadOffAddrOp_Word8AsAddr = mkGenPrimOp (fsLit "readWord8OffAddrAsAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, addrPrimTy])) primOpInfo ReadOffAddrOp_Word8AsFloat = mkGenPrimOp (fsLit "readWord8OffAddrAsFloat#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, floatPrimTy])) primOpInfo ReadOffAddrOp_Word8AsDouble = mkGenPrimOp (fsLit "readWord8OffAddrAsDouble#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, doublePrimTy])) primOpInfo ReadOffAddrOp_Word8AsStablePtr = mkGenPrimOp (fsLit "readWord8OffAddrAsStablePtr#") [deltaTyVarSpec, alphaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, mkStablePtrPrimTy alphaTy])) primOpInfo ReadOffAddrOp_Word8AsInt16 = mkGenPrimOp (fsLit "readWord8OffAddrAsInt16#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int16PrimTy])) primOpInfo ReadOffAddrOp_Word8AsWord16 = mkGenPrimOp (fsLit "readWord8OffAddrAsWord16#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word16PrimTy])) primOpInfo ReadOffAddrOp_Word8AsInt32 = mkGenPrimOp (fsLit "readWord8OffAddrAsInt32#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int32PrimTy])) primOpInfo ReadOffAddrOp_Word8AsWord32 = mkGenPrimOp (fsLit "readWord8OffAddrAsWord32#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word32PrimTy])) primOpInfo ReadOffAddrOp_Word8AsInt64 = mkGenPrimOp (fsLit "readWord8OffAddrAsInt64#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int64PrimTy])) primOpInfo ReadOffAddrOp_Word8AsWord64 = mkGenPrimOp (fsLit "readWord8OffAddrAsWord64#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word64PrimTy])) primOpInfo WriteOffAddrOp_Char = mkGenPrimOp (fsLit "writeCharOffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, charPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteOffAddrOp_WideChar = mkGenPrimOp (fsLit "writeWideCharOffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, charPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteOffAddrOp_Int = mkGenPrimOp (fsLit "writeIntOffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, intPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteOffAddrOp_Word = mkGenPrimOp (fsLit "writeWordOffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, wordPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteOffAddrOp_Addr = mkGenPrimOp (fsLit "writeAddrOffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, addrPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteOffAddrOp_Float = mkGenPrimOp (fsLit "writeFloatOffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, floatPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteOffAddrOp_Double = mkGenPrimOp (fsLit "writeDoubleOffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, doublePrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteOffAddrOp_StablePtr = mkGenPrimOp (fsLit "writeStablePtrOffAddr#") [alphaTyVarSpec, deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStablePtrPrimTy alphaTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteOffAddrOp_Int8 = mkGenPrimOp (fsLit "writeInt8OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, int8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteOffAddrOp_Word8 = mkGenPrimOp (fsLit "writeWord8OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, word8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteOffAddrOp_Int16 = mkGenPrimOp (fsLit "writeInt16OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, int16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteOffAddrOp_Word16 = mkGenPrimOp (fsLit "writeWord16OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, word16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteOffAddrOp_Int32 = mkGenPrimOp (fsLit "writeInt32OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, int32PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteOffAddrOp_Word32 = mkGenPrimOp (fsLit "writeWord32OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, word32PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteOffAddrOp_Int64 = mkGenPrimOp (fsLit "writeInt64OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, int64PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteOffAddrOp_Word64 = mkGenPrimOp (fsLit "writeWord64OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, word64PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteOffAddrOp_Word8AsChar = mkGenPrimOp (fsLit "writeWord8OffAddrAsChar#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, charPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteOffAddrOp_Word8AsWideChar = mkGenPrimOp (fsLit "writeWord8OffAddrAsWideChar#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, charPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteOffAddrOp_Word8AsInt = mkGenPrimOp (fsLit "writeWord8OffAddrAsInt#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, intPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteOffAddrOp_Word8AsWord = mkGenPrimOp (fsLit "writeWord8OffAddrAsWord#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, wordPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteOffAddrOp_Word8AsAddr = mkGenPrimOp (fsLit "writeWord8OffAddrAsAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, addrPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteOffAddrOp_Word8AsFloat = mkGenPrimOp (fsLit "writeWord8OffAddrAsFloat#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, floatPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteOffAddrOp_Word8AsDouble = mkGenPrimOp (fsLit "writeWord8OffAddrAsDouble#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, doublePrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteOffAddrOp_Word8AsStablePtr = mkGenPrimOp (fsLit "writeWord8OffAddrAsStablePtr#") [alphaTyVarSpec, deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStablePtrPrimTy alphaTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteOffAddrOp_Word8AsInt16 = mkGenPrimOp (fsLit "writeWord8OffAddrAsInt16#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, int16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteOffAddrOp_Word8AsWord16 = mkGenPrimOp (fsLit "writeWord8OffAddrAsWord16#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, word16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteOffAddrOp_Word8AsInt32 = mkGenPrimOp (fsLit "writeWord8OffAddrAsInt32#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, int32PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteOffAddrOp_Word8AsWord32 = mkGenPrimOp (fsLit "writeWord8OffAddrAsWord32#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, word32PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteOffAddrOp_Word8AsInt64 = mkGenPrimOp (fsLit "writeWord8OffAddrAsInt64#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, int64PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteOffAddrOp_Word8AsWord64 = mkGenPrimOp (fsLit "writeWord8OffAddrAsWord64#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, word64PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo InterlockedExchange_Addr = mkGenPrimOp (fsLit "atomicExchangeAddrAddr#") [deltaTyVarSpec] [addrPrimTy, addrPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, addrPrimTy])) primOpInfo InterlockedExchange_Word = mkGenPrimOp (fsLit "atomicExchangeWordAddr#") [deltaTyVarSpec] [addrPrimTy, wordPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, wordPrimTy])) primOpInfo CasAddrOp_Addr = mkGenPrimOp (fsLit "atomicCasAddrAddr#") [deltaTyVarSpec] [addrPrimTy, addrPrimTy, addrPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, addrPrimTy])) primOpInfo CasAddrOp_Word = mkGenPrimOp (fsLit "atomicCasWordAddr#") [deltaTyVarSpec] [addrPrimTy, wordPrimTy, wordPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, wordPrimTy])) primOpInfo CasAddrOp_Word8 = mkGenPrimOp (fsLit "atomicCasWord8Addr#") [deltaTyVarSpec] [addrPrimTy, word8PrimTy, word8PrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word8PrimTy])) primOpInfo CasAddrOp_Word16 = mkGenPrimOp (fsLit "atomicCasWord16Addr#") [deltaTyVarSpec] [addrPrimTy, word16PrimTy, word16PrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word16PrimTy])) primOpInfo CasAddrOp_Word32 = mkGenPrimOp (fsLit "atomicCasWord32Addr#") [deltaTyVarSpec] [addrPrimTy, word32PrimTy, word32PrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word32PrimTy])) primOpInfo CasAddrOp_Word64 = mkGenPrimOp (fsLit "atomicCasWord64Addr#") [deltaTyVarSpec] [addrPrimTy, word64PrimTy, word64PrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word64PrimTy])) primOpInfo FetchAddAddrOp_Word = mkGenPrimOp (fsLit "fetchAddWordAddr#") [deltaTyVarSpec] [addrPrimTy, wordPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, wordPrimTy])) primOpInfo FetchSubAddrOp_Word = mkGenPrimOp (fsLit "fetchSubWordAddr#") [deltaTyVarSpec] [addrPrimTy, wordPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, wordPrimTy])) primOpInfo FetchAndAddrOp_Word = mkGenPrimOp (fsLit "fetchAndWordAddr#") [deltaTyVarSpec] [addrPrimTy, wordPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, wordPrimTy])) primOpInfo FetchNandAddrOp_Word = mkGenPrimOp (fsLit "fetchNandWordAddr#") [deltaTyVarSpec] [addrPrimTy, wordPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, wordPrimTy])) primOpInfo FetchOrAddrOp_Word = mkGenPrimOp (fsLit "fetchOrWordAddr#") [deltaTyVarSpec] [addrPrimTy, wordPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, wordPrimTy])) primOpInfo FetchXorAddrOp_Word = mkGenPrimOp (fsLit "fetchXorWordAddr#") [deltaTyVarSpec] [addrPrimTy, wordPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, wordPrimTy])) primOpInfo AtomicReadAddrOp_Word = mkGenPrimOp (fsLit "atomicReadWordAddr#") [deltaTyVarSpec] [addrPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, wordPrimTy])) primOpInfo AtomicWriteAddrOp_Word = mkGenPrimOp (fsLit "atomicWriteWordAddr#") [deltaTyVarSpec] [addrPrimTy, wordPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo NewMutVarOp = mkGenPrimOp (fsLit "newMutVar#") [levity1TyVarInf, levPolyAlphaTyVarSpec, deltaTyVarSpec] [levPolyAlphaTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, mkMutVarPrimTy deltaTy levPolyAlphaTy])) primOpInfo ReadMutVarOp = mkGenPrimOp (fsLit "readMutVar#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkMutVarPrimTy deltaTy levPolyAlphaTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, levPolyAlphaTy])) primOpInfo WriteMutVarOp = mkGenPrimOp (fsLit "writeMutVar#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkMutVarPrimTy deltaTy levPolyAlphaTy, levPolyAlphaTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo AtomicSwapMutVarOp = mkGenPrimOp (fsLit "atomicSwapMutVar#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkMutVarPrimTy deltaTy levPolyAlphaTy, levPolyAlphaTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, levPolyAlphaTy])) primOpInfo AtomicModifyMutVar2Op = mkGenPrimOp (fsLit "atomicModifyMutVar2#") [deltaTyVarSpec, alphaTyVarSpec, gammaTyVarSpec] [mkMutVarPrimTy deltaTy alphaTy, (mkVisFunTyMany (alphaTy) (gammaTy)), mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, alphaTy, gammaTy])) primOpInfo AtomicModifyMutVar_Op = mkGenPrimOp (fsLit "atomicModifyMutVar_#") [deltaTyVarSpec, alphaTyVarSpec] [mkMutVarPrimTy deltaTy alphaTy, (mkVisFunTyMany (alphaTy) (alphaTy)), mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, alphaTy, alphaTy])) primOpInfo CasMutVarOp = mkGenPrimOp (fsLit "casMutVar#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkMutVarPrimTy deltaTy levPolyAlphaTy, levPolyAlphaTy, levPolyAlphaTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, intPrimTy, levPolyAlphaTy])) primOpInfo CatchOp = mkGenPrimOp (fsLit "catch#") [runtimeRep1TyVarInf, levity2TyVarInf, openAlphaTyVarSpec, levPolyBetaTyVarSpec] [(mkVisFunTyMany (mkStatePrimTy realWorldTy) ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, openAlphaTy]))), (mkVisFunTyMany (levPolyBetaTy) ((mkVisFunTyMany (mkStatePrimTy realWorldTy) ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, openAlphaTy]))))), mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, openAlphaTy])) primOpInfo RaiseOp = mkGenPrimOp (fsLit "raise#") [levity1TyVarInf, runtimeRep2TyVarInf, levPolyAlphaTyVarSpec, openBetaTyVarSpec] [levPolyAlphaTy] (openBetaTy) primOpInfo RaiseUnderflowOp = mkGenPrimOp (fsLit "raiseUnderflow#") [runtimeRep2TyVarInf, openBetaTyVarSpec] [(mkTupleTy Unboxed [])] (openBetaTy) primOpInfo RaiseOverflowOp = mkGenPrimOp (fsLit "raiseOverflow#") [runtimeRep2TyVarInf, openBetaTyVarSpec] [(mkTupleTy Unboxed [])] (openBetaTy) primOpInfo RaiseDivZeroOp = mkGenPrimOp (fsLit "raiseDivZero#") [runtimeRep2TyVarInf, openBetaTyVarSpec] [(mkTupleTy Unboxed [])] (openBetaTy) primOpInfo RaiseIOOp = mkGenPrimOp (fsLit "raiseIO#") [levity1TyVarInf, runtimeRep2TyVarInf, levPolyAlphaTyVarSpec, openBetaTyVarSpec] [levPolyAlphaTy, mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, openBetaTy])) primOpInfo MaskAsyncExceptionsOp = mkGenPrimOp (fsLit "maskAsyncExceptions#") [runtimeRep1TyVarInf, openAlphaTyVarSpec] [(mkVisFunTyMany (mkStatePrimTy realWorldTy) ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, openAlphaTy]))), mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, openAlphaTy])) primOpInfo MaskUninterruptibleOp = mkGenPrimOp (fsLit "maskUninterruptible#") [runtimeRep1TyVarInf, openAlphaTyVarSpec] [(mkVisFunTyMany (mkStatePrimTy realWorldTy) ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, openAlphaTy]))), mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, openAlphaTy])) primOpInfo UnmaskAsyncExceptionsOp = mkGenPrimOp (fsLit "unmaskAsyncExceptions#") [runtimeRep1TyVarInf, openAlphaTyVarSpec] [(mkVisFunTyMany (mkStatePrimTy realWorldTy) ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, openAlphaTy]))), mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, openAlphaTy])) primOpInfo MaskStatus = mkGenPrimOp (fsLit "getMaskingState#") [] [mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, intPrimTy])) primOpInfo NewPromptTagOp = mkGenPrimOp (fsLit "newPromptTag#") [alphaTyVarSpec] [mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, mkPromptTagPrimTy alphaTy])) primOpInfo PromptOp = mkGenPrimOp (fsLit "prompt#") [alphaTyVarSpec] [mkPromptTagPrimTy alphaTy, (mkVisFunTyMany (mkStatePrimTy realWorldTy) ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, alphaTy]))), mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, alphaTy])) primOpInfo Control0Op = mkGenPrimOp (fsLit "control0#") [runtimeRep2TyVarInf, alphaTyVarSpec, openBetaTyVarSpec] [mkPromptTagPrimTy alphaTy, (mkVisFunTyMany ((mkVisFunTyMany ((mkVisFunTyMany (mkStatePrimTy realWorldTy) ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, openBetaTy])))) ((mkVisFunTyMany (mkStatePrimTy realWorldTy) ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, alphaTy])))))) ((mkVisFunTyMany (mkStatePrimTy realWorldTy) ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, alphaTy]))))), mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, openBetaTy])) primOpInfo AtomicallyOp = mkGenPrimOp (fsLit "atomically#") [levity1TyVarInf, levPolyAlphaTyVarSpec] [(mkVisFunTyMany (mkStatePrimTy realWorldTy) ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, levPolyAlphaTy]))), mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, levPolyAlphaTy])) primOpInfo RetryOp = mkGenPrimOp (fsLit "retry#") [levity1TyVarInf, levPolyAlphaTyVarSpec] [mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, levPolyAlphaTy])) primOpInfo CatchRetryOp = mkGenPrimOp (fsLit "catchRetry#") [levity1TyVarInf, levPolyAlphaTyVarSpec] [(mkVisFunTyMany (mkStatePrimTy realWorldTy) ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, levPolyAlphaTy]))), (mkVisFunTyMany (mkStatePrimTy realWorldTy) ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, levPolyAlphaTy]))), mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, levPolyAlphaTy])) primOpInfo CatchSTMOp = mkGenPrimOp (fsLit "catchSTM#") [levity1TyVarInf, levPolyAlphaTyVarSpec, betaTyVarSpec] [(mkVisFunTyMany (mkStatePrimTy realWorldTy) ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, levPolyAlphaTy]))), (mkVisFunTyMany (betaTy) ((mkVisFunTyMany (mkStatePrimTy realWorldTy) ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, levPolyAlphaTy]))))), mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, levPolyAlphaTy])) primOpInfo NewTVarOp = mkGenPrimOp (fsLit "newTVar#") [levity1TyVarInf, levPolyAlphaTyVarSpec, deltaTyVarSpec] [levPolyAlphaTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, mkTVarPrimTy deltaTy levPolyAlphaTy])) primOpInfo ReadTVarOp = mkGenPrimOp (fsLit "readTVar#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkTVarPrimTy deltaTy levPolyAlphaTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, levPolyAlphaTy])) primOpInfo ReadTVarIOOp = mkGenPrimOp (fsLit "readTVarIO#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkTVarPrimTy deltaTy levPolyAlphaTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, levPolyAlphaTy])) primOpInfo WriteTVarOp = mkGenPrimOp (fsLit "writeTVar#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkTVarPrimTy deltaTy levPolyAlphaTy, levPolyAlphaTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo NewMVarOp = mkGenPrimOp (fsLit "newMVar#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, mkMVarPrimTy deltaTy levPolyAlphaTy])) primOpInfo TakeMVarOp = mkGenPrimOp (fsLit "takeMVar#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkMVarPrimTy deltaTy levPolyAlphaTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, levPolyAlphaTy])) primOpInfo TryTakeMVarOp = mkGenPrimOp (fsLit "tryTakeMVar#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkMVarPrimTy deltaTy levPolyAlphaTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, intPrimTy, levPolyAlphaTy])) primOpInfo PutMVarOp = mkGenPrimOp (fsLit "putMVar#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkMVarPrimTy deltaTy levPolyAlphaTy, levPolyAlphaTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo TryPutMVarOp = mkGenPrimOp (fsLit "tryPutMVar#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkMVarPrimTy deltaTy levPolyAlphaTy, levPolyAlphaTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, intPrimTy])) primOpInfo ReadMVarOp = mkGenPrimOp (fsLit "readMVar#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkMVarPrimTy deltaTy levPolyAlphaTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, levPolyAlphaTy])) primOpInfo TryReadMVarOp = mkGenPrimOp (fsLit "tryReadMVar#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkMVarPrimTy deltaTy levPolyAlphaTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, intPrimTy, levPolyAlphaTy])) primOpInfo IsEmptyMVarOp = mkGenPrimOp (fsLit "isEmptyMVar#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkMVarPrimTy deltaTy levPolyAlphaTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, intPrimTy])) primOpInfo NewIOPortOp = mkGenPrimOp (fsLit "newIOPort#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, mkIOPortPrimTy deltaTy levPolyAlphaTy])) primOpInfo ReadIOPortOp = mkGenPrimOp (fsLit "readIOPort#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkIOPortPrimTy deltaTy levPolyAlphaTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, levPolyAlphaTy])) primOpInfo WriteIOPortOp = mkGenPrimOp (fsLit "writeIOPort#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkIOPortPrimTy deltaTy levPolyAlphaTy, levPolyAlphaTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, intPrimTy])) primOpInfo DelayOp = mkGenPrimOp (fsLit "delay#") [deltaTyVarSpec] [intPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WaitReadOp = mkGenPrimOp (fsLit "waitRead#") [deltaTyVarSpec] [intPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WaitWriteOp = mkGenPrimOp (fsLit "waitWrite#") [deltaTyVarSpec] [intPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo ForkOp = mkGenPrimOp (fsLit "fork#") [runtimeRep1TyVarInf, openAlphaTyVarSpec] [(mkVisFunTyMany (mkStatePrimTy realWorldTy) ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, openAlphaTy]))), mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, threadIdPrimTy])) primOpInfo ForkOnOp = mkGenPrimOp (fsLit "forkOn#") [runtimeRep1TyVarInf, openAlphaTyVarSpec] [intPrimTy, (mkVisFunTyMany (mkStatePrimTy realWorldTy) ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, openAlphaTy]))), mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, threadIdPrimTy])) primOpInfo KillThreadOp = mkGenPrimOp (fsLit "killThread#") [alphaTyVarSpec] [threadIdPrimTy, alphaTy, mkStatePrimTy realWorldTy] (mkStatePrimTy realWorldTy) primOpInfo YieldOp = mkGenPrimOp (fsLit "yield#") [] [mkStatePrimTy realWorldTy] (mkStatePrimTy realWorldTy) primOpInfo MyThreadIdOp = mkGenPrimOp (fsLit "myThreadId#") [] [mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, threadIdPrimTy])) primOpInfo LabelThreadOp = mkGenPrimOp (fsLit "labelThread#") [] [threadIdPrimTy, byteArrayPrimTy, mkStatePrimTy realWorldTy] (mkStatePrimTy realWorldTy) primOpInfo IsCurrentThreadBoundOp = mkGenPrimOp (fsLit "isCurrentThreadBound#") [] [mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, intPrimTy])) primOpInfo NoDuplicateOp = mkGenPrimOp (fsLit "noDuplicate#") [deltaTyVarSpec] [mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo GetThreadLabelOp = mkGenPrimOp (fsLit "threadLabel#") [] [threadIdPrimTy, mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, intPrimTy, byteArrayPrimTy])) primOpInfo ThreadStatusOp = mkGenPrimOp (fsLit "threadStatus#") [] [threadIdPrimTy, mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, intPrimTy, intPrimTy, intPrimTy])) primOpInfo ListThreadsOp = mkGenPrimOp (fsLit "listThreads#") [] [mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, mkArrayPrimTy threadIdPrimTy])) primOpInfo MkWeakOp = mkGenPrimOp (fsLit "mkWeak#") [levity1TyVarInf, levity2TyVarInf, levPolyAlphaTyVarSpec, levPolyBetaTyVarSpec, gammaTyVarSpec] [levPolyAlphaTy, levPolyBetaTy, (mkVisFunTyMany (mkStatePrimTy realWorldTy) ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, gammaTy]))), mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, mkWeakPrimTy levPolyBetaTy])) primOpInfo MkWeakNoFinalizerOp = mkGenPrimOp (fsLit "mkWeakNoFinalizer#") [levity1TyVarInf, levity2TyVarInf, levPolyAlphaTyVarSpec, levPolyBetaTyVarSpec] [levPolyAlphaTy, levPolyBetaTy, mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, mkWeakPrimTy levPolyBetaTy])) primOpInfo AddCFinalizerToWeakOp = mkGenPrimOp (fsLit "addCFinalizerToWeak#") [levity2TyVarInf, levPolyBetaTyVarSpec] [addrPrimTy, addrPrimTy, intPrimTy, addrPrimTy, mkWeakPrimTy levPolyBetaTy, mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, intPrimTy])) primOpInfo DeRefWeakOp = mkGenPrimOp (fsLit "deRefWeak#") [levity1TyVarInf, levPolyAlphaTyVarSpec] [mkWeakPrimTy levPolyAlphaTy, mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, intPrimTy, levPolyAlphaTy])) primOpInfo FinalizeWeakOp = mkGenPrimOp (fsLit "finalizeWeak#") [levity1TyVarInf, levPolyAlphaTyVarSpec, betaTyVarSpec] [mkWeakPrimTy levPolyAlphaTy, mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, intPrimTy, (mkVisFunTyMany (mkStatePrimTy realWorldTy) ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, betaTy])))])) primOpInfo TouchOp = mkGenPrimOp (fsLit "touch#") [levity1TyVarInf, levPolyAlphaTyVarSpec, deltaTyVarSpec] [levPolyAlphaTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo MakeStablePtrOp = mkGenPrimOp (fsLit "makeStablePtr#") [levity1TyVarInf, levPolyAlphaTyVarSpec] [levPolyAlphaTy, mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, mkStablePtrPrimTy levPolyAlphaTy])) primOpInfo DeRefStablePtrOp = mkGenPrimOp (fsLit "deRefStablePtr#") [levity1TyVarInf, levPolyAlphaTyVarSpec] [mkStablePtrPrimTy levPolyAlphaTy, mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, levPolyAlphaTy])) primOpInfo EqStablePtrOp = mkGenPrimOp (fsLit "eqStablePtr#") [levity1TyVarInf, levPolyAlphaTyVarSpec] [mkStablePtrPrimTy levPolyAlphaTy, mkStablePtrPrimTy levPolyAlphaTy] (intPrimTy) primOpInfo MakeStableNameOp = mkGenPrimOp (fsLit "makeStableName#") [levity1TyVarInf, levPolyAlphaTyVarSpec] [levPolyAlphaTy, mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, mkStableNamePrimTy levPolyAlphaTy])) primOpInfo StableNameToIntOp = mkGenPrimOp (fsLit "stableNameToInt#") [levity1TyVarInf, levPolyAlphaTyVarSpec] [mkStableNamePrimTy levPolyAlphaTy] (intPrimTy) primOpInfo CompactNewOp = mkGenPrimOp (fsLit "compactNew#") [] [wordPrimTy, mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, compactPrimTy])) primOpInfo CompactResizeOp = mkGenPrimOp (fsLit "compactResize#") [] [compactPrimTy, wordPrimTy, mkStatePrimTy realWorldTy] (mkStatePrimTy realWorldTy) primOpInfo CompactContainsOp = mkGenPrimOp (fsLit "compactContains#") [alphaTyVarSpec] [compactPrimTy, alphaTy, mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, intPrimTy])) primOpInfo CompactContainsAnyOp = mkGenPrimOp (fsLit "compactContainsAny#") [alphaTyVarSpec] [alphaTy, mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, intPrimTy])) primOpInfo CompactGetFirstBlockOp = mkGenPrimOp (fsLit "compactGetFirstBlock#") [] [compactPrimTy, mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, addrPrimTy, wordPrimTy])) primOpInfo CompactGetNextBlockOp = mkGenPrimOp (fsLit "compactGetNextBlock#") [] [compactPrimTy, addrPrimTy, mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, addrPrimTy, wordPrimTy])) primOpInfo CompactAllocateBlockOp = mkGenPrimOp (fsLit "compactAllocateBlock#") [] [wordPrimTy, addrPrimTy, mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, addrPrimTy])) primOpInfo CompactFixupPointersOp = mkGenPrimOp (fsLit "compactFixupPointers#") [] [addrPrimTy, addrPrimTy, mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, compactPrimTy, addrPrimTy])) primOpInfo CompactAdd = mkGenPrimOp (fsLit "compactAdd#") [alphaTyVarSpec] [compactPrimTy, alphaTy, mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, alphaTy])) primOpInfo CompactAddWithSharing = mkGenPrimOp (fsLit "compactAddWithSharing#") [alphaTyVarSpec] [compactPrimTy, alphaTy, mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, alphaTy])) primOpInfo CompactSize = mkGenPrimOp (fsLit "compactSize#") [] [compactPrimTy, mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, wordPrimTy])) primOpInfo ReallyUnsafePtrEqualityOp = mkGenPrimOp (fsLit "reallyUnsafePtrEquality#") [levity1TyVarInf, levity2TyVarInf, levPolyAlphaTyVarSpec, levPolyBetaTyVarSpec] [levPolyAlphaTy, levPolyBetaTy] (intPrimTy) primOpInfo ParOp = mkGenPrimOp (fsLit "par#") [alphaTyVarSpec] [alphaTy] (intPrimTy) primOpInfo SparkOp = mkGenPrimOp (fsLit "spark#") [alphaTyVarSpec, deltaTyVarSpec] [alphaTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, alphaTy])) primOpInfo GetSparkOp = mkGenPrimOp (fsLit "getSpark#") [deltaTyVarSpec, alphaTyVarSpec] [mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, intPrimTy, alphaTy])) primOpInfo NumSparks = mkGenPrimOp (fsLit "numSparks#") [deltaTyVarSpec] [mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, intPrimTy])) primOpInfo KeepAliveOp = mkGenPrimOp (fsLit "keepAlive#") [levity1TyVarInf, runtimeRep2TyVarInf, levPolyAlphaTyVarSpec, deltaTyVarSpec, openBetaTyVarSpec] [levPolyAlphaTy, mkStatePrimTy deltaTy, (mkVisFunTyMany (mkStatePrimTy deltaTy) (openBetaTy))] (openBetaTy) primOpInfo DataToTagSmallOp = mkGenPrimOp (fsLit "dataToTagSmall#") [levity1TyVarInf, levPolyAlphaTyVarSpec] [levPolyAlphaTy] (intPrimTy) primOpInfo DataToTagLargeOp = mkGenPrimOp (fsLit "dataToTagLarge#") [levity1TyVarInf, levPolyAlphaTyVarSpec] [levPolyAlphaTy] (intPrimTy) primOpInfo TagToEnumOp = mkGenPrimOp (fsLit "tagToEnum#") [alphaTyVarSpec] [intPrimTy] (alphaTy) primOpInfo AddrToAnyOp = mkGenPrimOp (fsLit "addrToAny#") [levity1TyVarInf, levPolyAlphaTyVarSpec] [addrPrimTy] ((mkTupleTy Unboxed [levPolyAlphaTy])) primOpInfo AnyToAddrOp = mkGenPrimOp (fsLit "anyToAddr#") [alphaTyVarSpec] [alphaTy, mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, addrPrimTy])) primOpInfo MkApUpd0_Op = mkGenPrimOp (fsLit "mkApUpd0#") [alphaTyVarSpec] [bcoPrimTy] ((mkTupleTy Unboxed [alphaTy])) primOpInfo NewBCOOp = mkGenPrimOp (fsLit "newBCO#") [alphaTyVarSpec, deltaTyVarSpec] [byteArrayPrimTy, byteArrayPrimTy, mkArrayPrimTy alphaTy, intPrimTy, byteArrayPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, bcoPrimTy])) primOpInfo UnpackClosureOp = mkGenPrimOp (fsLit "unpackClosure#") [alphaTyVarSpec, betaTyVarSpec] [alphaTy] ((mkTupleTy Unboxed [addrPrimTy, byteArrayPrimTy, mkArrayPrimTy betaTy])) primOpInfo ClosureSizeOp = mkGenPrimOp (fsLit "closureSize#") [alphaTyVarSpec] [alphaTy] (intPrimTy) primOpInfo GetApStackValOp = mkGenPrimOp (fsLit "getApStackVal#") [alphaTyVarSpec, betaTyVarSpec] [alphaTy, intPrimTy] ((mkTupleTy Unboxed [intPrimTy, betaTy])) primOpInfo GetCCSOfOp = mkGenPrimOp (fsLit "getCCSOf#") [alphaTyVarSpec, deltaTyVarSpec] [alphaTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, addrPrimTy])) primOpInfo GetCurrentCCSOp = mkGenPrimOp (fsLit "getCurrentCCS#") [alphaTyVarSpec, deltaTyVarSpec] [alphaTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, addrPrimTy])) primOpInfo ClearCCSOp = mkGenPrimOp (fsLit "clearCCS#") [deltaTyVarSpec, alphaTyVarSpec] [(mkVisFunTyMany (mkStatePrimTy deltaTy) ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, alphaTy]))), mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, alphaTy])) primOpInfo WhereFromOp = mkGenPrimOp (fsLit "whereFrom#") [alphaTyVarSpec, deltaTyVarSpec] [alphaTy, addrPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, intPrimTy])) primOpInfo TraceEventOp = mkGenPrimOp (fsLit "traceEvent#") [deltaTyVarSpec] [addrPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo TraceEventBinaryOp = mkGenPrimOp (fsLit "traceBinaryEvent#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo TraceMarkerOp = mkGenPrimOp (fsLit "traceMarker#") [deltaTyVarSpec] [addrPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo SetThreadAllocationCounter = mkGenPrimOp (fsLit "setThreadAllocationCounter#") [] [int64PrimTy, mkStatePrimTy realWorldTy] (mkStatePrimTy realWorldTy) primOpInfo (VecBroadcastOp IntVec 16 W8) = mkGenPrimOp (fsLit "broadcastInt8X16#") [] [int8PrimTy] (int8X16PrimTy) primOpInfo (VecBroadcastOp IntVec 8 W16) = mkGenPrimOp (fsLit "broadcastInt16X8#") [] [int16PrimTy] (int16X8PrimTy) primOpInfo (VecBroadcastOp IntVec 4 W32) = mkGenPrimOp (fsLit "broadcastInt32X4#") [] [int32PrimTy] (int32X4PrimTy) primOpInfo (VecBroadcastOp IntVec 2 W64) = mkGenPrimOp (fsLit "broadcastInt64X2#") [] [int64PrimTy] (int64X2PrimTy) primOpInfo (VecBroadcastOp IntVec 32 W8) = mkGenPrimOp (fsLit "broadcastInt8X32#") [] [int8PrimTy] (int8X32PrimTy) primOpInfo (VecBroadcastOp IntVec 16 W16) = mkGenPrimOp (fsLit "broadcastInt16X16#") [] [int16PrimTy] (int16X16PrimTy) primOpInfo (VecBroadcastOp IntVec 8 W32) = mkGenPrimOp (fsLit "broadcastInt32X8#") [] [int32PrimTy] (int32X8PrimTy) primOpInfo (VecBroadcastOp IntVec 4 W64) = mkGenPrimOp (fsLit "broadcastInt64X4#") [] [int64PrimTy] (int64X4PrimTy) primOpInfo (VecBroadcastOp IntVec 64 W8) = mkGenPrimOp (fsLit "broadcastInt8X64#") [] [int8PrimTy] (int8X64PrimTy) primOpInfo (VecBroadcastOp IntVec 32 W16) = mkGenPrimOp (fsLit "broadcastInt16X32#") [] [int16PrimTy] (int16X32PrimTy) primOpInfo (VecBroadcastOp IntVec 16 W32) = mkGenPrimOp (fsLit "broadcastInt32X16#") [] [int32PrimTy] (int32X16PrimTy) primOpInfo (VecBroadcastOp IntVec 8 W64) = mkGenPrimOp (fsLit "broadcastInt64X8#") [] [int64PrimTy] (int64X8PrimTy) primOpInfo (VecBroadcastOp WordVec 16 W8) = mkGenPrimOp (fsLit "broadcastWord8X16#") [] [word8PrimTy] (word8X16PrimTy) primOpInfo (VecBroadcastOp WordVec 8 W16) = mkGenPrimOp (fsLit "broadcastWord16X8#") [] [word16PrimTy] (word16X8PrimTy) primOpInfo (VecBroadcastOp WordVec 4 W32) = mkGenPrimOp (fsLit "broadcastWord32X4#") [] [word32PrimTy] (word32X4PrimTy) primOpInfo (VecBroadcastOp WordVec 2 W64) = mkGenPrimOp (fsLit "broadcastWord64X2#") [] [word64PrimTy] (word64X2PrimTy) primOpInfo (VecBroadcastOp WordVec 32 W8) = mkGenPrimOp (fsLit "broadcastWord8X32#") [] [word8PrimTy] (word8X32PrimTy) primOpInfo (VecBroadcastOp WordVec 16 W16) = mkGenPrimOp (fsLit "broadcastWord16X16#") [] [word16PrimTy] (word16X16PrimTy) primOpInfo (VecBroadcastOp WordVec 8 W32) = mkGenPrimOp (fsLit "broadcastWord32X8#") [] [word32PrimTy] (word32X8PrimTy) primOpInfo (VecBroadcastOp WordVec 4 W64) = mkGenPrimOp (fsLit "broadcastWord64X4#") [] [word64PrimTy] (word64X4PrimTy) primOpInfo (VecBroadcastOp WordVec 64 W8) = mkGenPrimOp (fsLit "broadcastWord8X64#") [] [word8PrimTy] (word8X64PrimTy) primOpInfo (VecBroadcastOp WordVec 32 W16) = mkGenPrimOp (fsLit "broadcastWord16X32#") [] [word16PrimTy] (word16X32PrimTy) primOpInfo (VecBroadcastOp WordVec 16 W32) = mkGenPrimOp (fsLit "broadcastWord32X16#") [] [word32PrimTy] (word32X16PrimTy) primOpInfo (VecBroadcastOp WordVec 8 W64) = mkGenPrimOp (fsLit "broadcastWord64X8#") [] [word64PrimTy] (word64X8PrimTy) primOpInfo (VecBroadcastOp FloatVec 4 W32) = mkGenPrimOp (fsLit "broadcastFloatX4#") [] [floatPrimTy] (floatX4PrimTy) primOpInfo (VecBroadcastOp FloatVec 2 W64) = mkGenPrimOp (fsLit "broadcastDoubleX2#") [] [doublePrimTy] (doubleX2PrimTy) primOpInfo (VecBroadcastOp FloatVec 8 W32) = mkGenPrimOp (fsLit "broadcastFloatX8#") [] [floatPrimTy] (floatX8PrimTy) primOpInfo (VecBroadcastOp FloatVec 4 W64) = mkGenPrimOp (fsLit "broadcastDoubleX4#") [] [doublePrimTy] (doubleX4PrimTy) primOpInfo (VecBroadcastOp FloatVec 16 W32) = mkGenPrimOp (fsLit "broadcastFloatX16#") [] [floatPrimTy] (floatX16PrimTy) primOpInfo (VecBroadcastOp FloatVec 8 W64) = mkGenPrimOp (fsLit "broadcastDoubleX8#") [] [doublePrimTy] (doubleX8PrimTy) primOpInfo (VecPackOp IntVec 16 W8) = mkGenPrimOp (fsLit "packInt8X16#") [] [(mkTupleTy Unboxed [int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy])] (int8X16PrimTy) primOpInfo (VecPackOp IntVec 8 W16) = mkGenPrimOp (fsLit "packInt16X8#") [] [(mkTupleTy Unboxed [int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy])] (int16X8PrimTy) primOpInfo (VecPackOp IntVec 4 W32) = mkGenPrimOp (fsLit "packInt32X4#") [] [(mkTupleTy Unboxed [int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy])] (int32X4PrimTy) primOpInfo (VecPackOp IntVec 2 W64) = mkGenPrimOp (fsLit "packInt64X2#") [] [(mkTupleTy Unboxed [int64PrimTy, int64PrimTy])] (int64X2PrimTy) primOpInfo (VecPackOp IntVec 32 W8) = mkGenPrimOp (fsLit "packInt8X32#") [] [(mkTupleTy Unboxed [int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy])] (int8X32PrimTy) primOpInfo (VecPackOp IntVec 16 W16) = mkGenPrimOp (fsLit "packInt16X16#") [] [(mkTupleTy Unboxed [int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy])] (int16X16PrimTy) primOpInfo (VecPackOp IntVec 8 W32) = mkGenPrimOp (fsLit "packInt32X8#") [] [(mkTupleTy Unboxed [int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy])] (int32X8PrimTy) primOpInfo (VecPackOp IntVec 4 W64) = mkGenPrimOp (fsLit "packInt64X4#") [] [(mkTupleTy Unboxed [int64PrimTy, int64PrimTy, int64PrimTy, int64PrimTy])] (int64X4PrimTy) primOpInfo (VecPackOp IntVec 64 W8) = mkGenPrimOp (fsLit "packInt8X64#") [] [(mkTupleTy Unboxed [int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy])] (int8X64PrimTy) primOpInfo (VecPackOp IntVec 32 W16) = mkGenPrimOp (fsLit "packInt16X32#") [] [(mkTupleTy Unboxed [int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy])] (int16X32PrimTy) primOpInfo (VecPackOp IntVec 16 W32) = mkGenPrimOp (fsLit "packInt32X16#") [] [(mkTupleTy Unboxed [int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy])] (int32X16PrimTy) primOpInfo (VecPackOp IntVec 8 W64) = mkGenPrimOp (fsLit "packInt64X8#") [] [(mkTupleTy Unboxed [int64PrimTy, int64PrimTy, int64PrimTy, int64PrimTy, int64PrimTy, int64PrimTy, int64PrimTy, int64PrimTy])] (int64X8PrimTy) primOpInfo (VecPackOp WordVec 16 W8) = mkGenPrimOp (fsLit "packWord8X16#") [] [(mkTupleTy Unboxed [word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy])] (word8X16PrimTy) primOpInfo (VecPackOp WordVec 8 W16) = mkGenPrimOp (fsLit "packWord16X8#") [] [(mkTupleTy Unboxed [word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy])] (word16X8PrimTy) primOpInfo (VecPackOp WordVec 4 W32) = mkGenPrimOp (fsLit "packWord32X4#") [] [(mkTupleTy Unboxed [word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy])] (word32X4PrimTy) primOpInfo (VecPackOp WordVec 2 W64) = mkGenPrimOp (fsLit "packWord64X2#") [] [(mkTupleTy Unboxed [word64PrimTy, word64PrimTy])] (word64X2PrimTy) primOpInfo (VecPackOp WordVec 32 W8) = mkGenPrimOp (fsLit "packWord8X32#") [] [(mkTupleTy Unboxed [word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy])] (word8X32PrimTy) primOpInfo (VecPackOp WordVec 16 W16) = mkGenPrimOp (fsLit "packWord16X16#") [] [(mkTupleTy Unboxed [word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy])] (word16X16PrimTy) primOpInfo (VecPackOp WordVec 8 W32) = mkGenPrimOp (fsLit "packWord32X8#") [] [(mkTupleTy Unboxed [word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy])] (word32X8PrimTy) primOpInfo (VecPackOp WordVec 4 W64) = mkGenPrimOp (fsLit "packWord64X4#") [] [(mkTupleTy Unboxed [word64PrimTy, word64PrimTy, word64PrimTy, word64PrimTy])] (word64X4PrimTy) primOpInfo (VecPackOp WordVec 64 W8) = mkGenPrimOp (fsLit "packWord8X64#") [] [(mkTupleTy Unboxed [word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy])] (word8X64PrimTy) primOpInfo (VecPackOp WordVec 32 W16) = mkGenPrimOp (fsLit "packWord16X32#") [] [(mkTupleTy Unboxed [word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy])] (word16X32PrimTy) primOpInfo (VecPackOp WordVec 16 W32) = mkGenPrimOp (fsLit "packWord32X16#") [] [(mkTupleTy Unboxed [word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy])] (word32X16PrimTy) primOpInfo (VecPackOp WordVec 8 W64) = mkGenPrimOp (fsLit "packWord64X8#") [] [(mkTupleTy Unboxed [word64PrimTy, word64PrimTy, word64PrimTy, word64PrimTy, word64PrimTy, word64PrimTy, word64PrimTy, word64PrimTy])] (word64X8PrimTy) primOpInfo (VecPackOp FloatVec 4 W32) = mkGenPrimOp (fsLit "packFloatX4#") [] [(mkTupleTy Unboxed [floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy])] (floatX4PrimTy) primOpInfo (VecPackOp FloatVec 2 W64) = mkGenPrimOp (fsLit "packDoubleX2#") [] [(mkTupleTy Unboxed [doublePrimTy, doublePrimTy])] (doubleX2PrimTy) primOpInfo (VecPackOp FloatVec 8 W32) = mkGenPrimOp (fsLit "packFloatX8#") [] [(mkTupleTy Unboxed [floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy])] (floatX8PrimTy) primOpInfo (VecPackOp FloatVec 4 W64) = mkGenPrimOp (fsLit "packDoubleX4#") [] [(mkTupleTy Unboxed [doublePrimTy, doublePrimTy, doublePrimTy, doublePrimTy])] (doubleX4PrimTy) primOpInfo (VecPackOp FloatVec 16 W32) = mkGenPrimOp (fsLit "packFloatX16#") [] [(mkTupleTy Unboxed [floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy])] (floatX16PrimTy) primOpInfo (VecPackOp FloatVec 8 W64) = mkGenPrimOp (fsLit "packDoubleX8#") [] [(mkTupleTy Unboxed [doublePrimTy, doublePrimTy, doublePrimTy, doublePrimTy, doublePrimTy, doublePrimTy, doublePrimTy, doublePrimTy])] (doubleX8PrimTy) primOpInfo (VecUnpackOp IntVec 16 W8) = mkGenPrimOp (fsLit "unpackInt8X16#") [] [int8X16PrimTy] ((mkTupleTy Unboxed [int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy])) primOpInfo (VecUnpackOp IntVec 8 W16) = mkGenPrimOp (fsLit "unpackInt16X8#") [] [int16X8PrimTy] ((mkTupleTy Unboxed [int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy])) primOpInfo (VecUnpackOp IntVec 4 W32) = mkGenPrimOp (fsLit "unpackInt32X4#") [] [int32X4PrimTy] ((mkTupleTy Unboxed [int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy])) primOpInfo (VecUnpackOp IntVec 2 W64) = mkGenPrimOp (fsLit "unpackInt64X2#") [] [int64X2PrimTy] ((mkTupleTy Unboxed [int64PrimTy, int64PrimTy])) primOpInfo (VecUnpackOp IntVec 32 W8) = mkGenPrimOp (fsLit "unpackInt8X32#") [] [int8X32PrimTy] ((mkTupleTy Unboxed [int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy])) primOpInfo (VecUnpackOp IntVec 16 W16) = mkGenPrimOp (fsLit "unpackInt16X16#") [] [int16X16PrimTy] ((mkTupleTy Unboxed [int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy])) primOpInfo (VecUnpackOp IntVec 8 W32) = mkGenPrimOp (fsLit "unpackInt32X8#") [] [int32X8PrimTy] ((mkTupleTy Unboxed [int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy])) primOpInfo (VecUnpackOp IntVec 4 W64) = mkGenPrimOp (fsLit "unpackInt64X4#") [] [int64X4PrimTy] ((mkTupleTy Unboxed [int64PrimTy, int64PrimTy, int64PrimTy, int64PrimTy])) primOpInfo (VecUnpackOp IntVec 64 W8) = mkGenPrimOp (fsLit "unpackInt8X64#") [] [int8X64PrimTy] ((mkTupleTy Unboxed [int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy])) primOpInfo (VecUnpackOp IntVec 32 W16) = mkGenPrimOp (fsLit "unpackInt16X32#") [] [int16X32PrimTy] ((mkTupleTy Unboxed [int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy])) primOpInfo (VecUnpackOp IntVec 16 W32) = mkGenPrimOp (fsLit "unpackInt32X16#") [] [int32X16PrimTy] ((mkTupleTy Unboxed [int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy])) primOpInfo (VecUnpackOp IntVec 8 W64) = mkGenPrimOp (fsLit "unpackInt64X8#") [] [int64X8PrimTy] ((mkTupleTy Unboxed [int64PrimTy, int64PrimTy, int64PrimTy, int64PrimTy, int64PrimTy, int64PrimTy, int64PrimTy, int64PrimTy])) primOpInfo (VecUnpackOp WordVec 16 W8) = mkGenPrimOp (fsLit "unpackWord8X16#") [] [word8X16PrimTy] ((mkTupleTy Unboxed [word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy])) primOpInfo (VecUnpackOp WordVec 8 W16) = mkGenPrimOp (fsLit "unpackWord16X8#") [] [word16X8PrimTy] ((mkTupleTy Unboxed [word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy])) primOpInfo (VecUnpackOp WordVec 4 W32) = mkGenPrimOp (fsLit "unpackWord32X4#") [] [word32X4PrimTy] ((mkTupleTy Unboxed [word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy])) primOpInfo (VecUnpackOp WordVec 2 W64) = mkGenPrimOp (fsLit "unpackWord64X2#") [] [word64X2PrimTy] ((mkTupleTy Unboxed [word64PrimTy, word64PrimTy])) primOpInfo (VecUnpackOp WordVec 32 W8) = mkGenPrimOp (fsLit "unpackWord8X32#") [] [word8X32PrimTy] ((mkTupleTy Unboxed [word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy])) primOpInfo (VecUnpackOp WordVec 16 W16) = mkGenPrimOp (fsLit "unpackWord16X16#") [] [word16X16PrimTy] ((mkTupleTy Unboxed [word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy])) primOpInfo (VecUnpackOp WordVec 8 W32) = mkGenPrimOp (fsLit "unpackWord32X8#") [] [word32X8PrimTy] ((mkTupleTy Unboxed [word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy])) primOpInfo (VecUnpackOp WordVec 4 W64) = mkGenPrimOp (fsLit "unpackWord64X4#") [] [word64X4PrimTy] ((mkTupleTy Unboxed [word64PrimTy, word64PrimTy, word64PrimTy, word64PrimTy])) primOpInfo (VecUnpackOp WordVec 64 W8) = mkGenPrimOp (fsLit "unpackWord8X64#") [] [word8X64PrimTy] ((mkTupleTy Unboxed [word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy])) primOpInfo (VecUnpackOp WordVec 32 W16) = mkGenPrimOp (fsLit "unpackWord16X32#") [] [word16X32PrimTy] ((mkTupleTy Unboxed [word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy])) primOpInfo (VecUnpackOp WordVec 16 W32) = mkGenPrimOp (fsLit "unpackWord32X16#") [] [word32X16PrimTy] ((mkTupleTy Unboxed [word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy])) primOpInfo (VecUnpackOp WordVec 8 W64) = mkGenPrimOp (fsLit "unpackWord64X8#") [] [word64X8PrimTy] ((mkTupleTy Unboxed [word64PrimTy, word64PrimTy, word64PrimTy, word64PrimTy, word64PrimTy, word64PrimTy, word64PrimTy, word64PrimTy])) primOpInfo (VecUnpackOp FloatVec 4 W32) = mkGenPrimOp (fsLit "unpackFloatX4#") [] [floatX4PrimTy] ((mkTupleTy Unboxed [floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy])) primOpInfo (VecUnpackOp FloatVec 2 W64) = mkGenPrimOp (fsLit "unpackDoubleX2#") [] [doubleX2PrimTy] ((mkTupleTy Unboxed [doublePrimTy, doublePrimTy])) primOpInfo (VecUnpackOp FloatVec 8 W32) = mkGenPrimOp (fsLit "unpackFloatX8#") [] [floatX8PrimTy] ((mkTupleTy Unboxed [floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy])) primOpInfo (VecUnpackOp FloatVec 4 W64) = mkGenPrimOp (fsLit "unpackDoubleX4#") [] [doubleX4PrimTy] ((mkTupleTy Unboxed [doublePrimTy, doublePrimTy, doublePrimTy, doublePrimTy])) primOpInfo (VecUnpackOp FloatVec 16 W32) = mkGenPrimOp (fsLit "unpackFloatX16#") [] [floatX16PrimTy] ((mkTupleTy Unboxed [floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy])) primOpInfo (VecUnpackOp FloatVec 8 W64) = mkGenPrimOp (fsLit "unpackDoubleX8#") [] [doubleX8PrimTy] ((mkTupleTy Unboxed [doublePrimTy, doublePrimTy, doublePrimTy, doublePrimTy, doublePrimTy, doublePrimTy, doublePrimTy, doublePrimTy])) primOpInfo (VecInsertOp IntVec 16 W8) = mkGenPrimOp (fsLit "insertInt8X16#") [] [int8X16PrimTy, int8PrimTy, intPrimTy] (int8X16PrimTy) primOpInfo (VecInsertOp IntVec 8 W16) = mkGenPrimOp (fsLit "insertInt16X8#") [] [int16X8PrimTy, int16PrimTy, intPrimTy] (int16X8PrimTy) primOpInfo (VecInsertOp IntVec 4 W32) = mkGenPrimOp (fsLit "insertInt32X4#") [] [int32X4PrimTy, int32PrimTy, intPrimTy] (int32X4PrimTy) primOpInfo (VecInsertOp IntVec 2 W64) = mkGenPrimOp (fsLit "insertInt64X2#") [] [int64X2PrimTy, int64PrimTy, intPrimTy] (int64X2PrimTy) primOpInfo (VecInsertOp IntVec 32 W8) = mkGenPrimOp (fsLit "insertInt8X32#") [] [int8X32PrimTy, int8PrimTy, intPrimTy] (int8X32PrimTy) primOpInfo (VecInsertOp IntVec 16 W16) = mkGenPrimOp (fsLit "insertInt16X16#") [] [int16X16PrimTy, int16PrimTy, intPrimTy] (int16X16PrimTy) primOpInfo (VecInsertOp IntVec 8 W32) = mkGenPrimOp (fsLit "insertInt32X8#") [] [int32X8PrimTy, int32PrimTy, intPrimTy] (int32X8PrimTy) primOpInfo (VecInsertOp IntVec 4 W64) = mkGenPrimOp (fsLit "insertInt64X4#") [] [int64X4PrimTy, int64PrimTy, intPrimTy] (int64X4PrimTy) primOpInfo (VecInsertOp IntVec 64 W8) = mkGenPrimOp (fsLit "insertInt8X64#") [] [int8X64PrimTy, int8PrimTy, intPrimTy] (int8X64PrimTy) primOpInfo (VecInsertOp IntVec 32 W16) = mkGenPrimOp (fsLit "insertInt16X32#") [] [int16X32PrimTy, int16PrimTy, intPrimTy] (int16X32PrimTy) primOpInfo (VecInsertOp IntVec 16 W32) = mkGenPrimOp (fsLit "insertInt32X16#") [] [int32X16PrimTy, int32PrimTy, intPrimTy] (int32X16PrimTy) primOpInfo (VecInsertOp IntVec 8 W64) = mkGenPrimOp (fsLit "insertInt64X8#") [] [int64X8PrimTy, int64PrimTy, intPrimTy] (int64X8PrimTy) primOpInfo (VecInsertOp WordVec 16 W8) = mkGenPrimOp (fsLit "insertWord8X16#") [] [word8X16PrimTy, word8PrimTy, intPrimTy] (word8X16PrimTy) primOpInfo (VecInsertOp WordVec 8 W16) = mkGenPrimOp (fsLit "insertWord16X8#") [] [word16X8PrimTy, word16PrimTy, intPrimTy] (word16X8PrimTy) primOpInfo (VecInsertOp WordVec 4 W32) = mkGenPrimOp (fsLit "insertWord32X4#") [] [word32X4PrimTy, word32PrimTy, intPrimTy] (word32X4PrimTy) primOpInfo (VecInsertOp WordVec 2 W64) = mkGenPrimOp (fsLit "insertWord64X2#") [] [word64X2PrimTy, word64PrimTy, intPrimTy] (word64X2PrimTy) primOpInfo (VecInsertOp WordVec 32 W8) = mkGenPrimOp (fsLit "insertWord8X32#") [] [word8X32PrimTy, word8PrimTy, intPrimTy] (word8X32PrimTy) primOpInfo (VecInsertOp WordVec 16 W16) = mkGenPrimOp (fsLit "insertWord16X16#") [] [word16X16PrimTy, word16PrimTy, intPrimTy] (word16X16PrimTy) primOpInfo (VecInsertOp WordVec 8 W32) = mkGenPrimOp (fsLit "insertWord32X8#") [] [word32X8PrimTy, word32PrimTy, intPrimTy] (word32X8PrimTy) primOpInfo (VecInsertOp WordVec 4 W64) = mkGenPrimOp (fsLit "insertWord64X4#") [] [word64X4PrimTy, word64PrimTy, intPrimTy] (word64X4PrimTy) primOpInfo (VecInsertOp WordVec 64 W8) = mkGenPrimOp (fsLit "insertWord8X64#") [] [word8X64PrimTy, word8PrimTy, intPrimTy] (word8X64PrimTy) primOpInfo (VecInsertOp WordVec 32 W16) = mkGenPrimOp (fsLit "insertWord16X32#") [] [word16X32PrimTy, word16PrimTy, intPrimTy] (word16X32PrimTy) primOpInfo (VecInsertOp WordVec 16 W32) = mkGenPrimOp (fsLit "insertWord32X16#") [] [word32X16PrimTy, word32PrimTy, intPrimTy] (word32X16PrimTy) primOpInfo (VecInsertOp WordVec 8 W64) = mkGenPrimOp (fsLit "insertWord64X8#") [] [word64X8PrimTy, word64PrimTy, intPrimTy] (word64X8PrimTy) primOpInfo (VecInsertOp FloatVec 4 W32) = mkGenPrimOp (fsLit "insertFloatX4#") [] [floatX4PrimTy, floatPrimTy, intPrimTy] (floatX4PrimTy) primOpInfo (VecInsertOp FloatVec 2 W64) = mkGenPrimOp (fsLit "insertDoubleX2#") [] [doubleX2PrimTy, doublePrimTy, intPrimTy] (doubleX2PrimTy) primOpInfo (VecInsertOp FloatVec 8 W32) = mkGenPrimOp (fsLit "insertFloatX8#") [] [floatX8PrimTy, floatPrimTy, intPrimTy] (floatX8PrimTy) primOpInfo (VecInsertOp FloatVec 4 W64) = mkGenPrimOp (fsLit "insertDoubleX4#") [] [doubleX4PrimTy, doublePrimTy, intPrimTy] (doubleX4PrimTy) primOpInfo (VecInsertOp FloatVec 16 W32) = mkGenPrimOp (fsLit "insertFloatX16#") [] [floatX16PrimTy, floatPrimTy, intPrimTy] (floatX16PrimTy) primOpInfo (VecInsertOp FloatVec 8 W64) = mkGenPrimOp (fsLit "insertDoubleX8#") [] [doubleX8PrimTy, doublePrimTy, intPrimTy] (doubleX8PrimTy) primOpInfo (VecAddOp IntVec 16 W8) = mkGenPrimOp (fsLit "plusInt8X16#") [] [int8X16PrimTy, int8X16PrimTy] (int8X16PrimTy) primOpInfo (VecAddOp IntVec 8 W16) = mkGenPrimOp (fsLit "plusInt16X8#") [] [int16X8PrimTy, int16X8PrimTy] (int16X8PrimTy) primOpInfo (VecAddOp IntVec 4 W32) = mkGenPrimOp (fsLit "plusInt32X4#") [] [int32X4PrimTy, int32X4PrimTy] (int32X4PrimTy) primOpInfo (VecAddOp IntVec 2 W64) = mkGenPrimOp (fsLit "plusInt64X2#") [] [int64X2PrimTy, int64X2PrimTy] (int64X2PrimTy) primOpInfo (VecAddOp IntVec 32 W8) = mkGenPrimOp (fsLit "plusInt8X32#") [] [int8X32PrimTy, int8X32PrimTy] (int8X32PrimTy) primOpInfo (VecAddOp IntVec 16 W16) = mkGenPrimOp (fsLit "plusInt16X16#") [] [int16X16PrimTy, int16X16PrimTy] (int16X16PrimTy) primOpInfo (VecAddOp IntVec 8 W32) = mkGenPrimOp (fsLit "plusInt32X8#") [] [int32X8PrimTy, int32X8PrimTy] (int32X8PrimTy) primOpInfo (VecAddOp IntVec 4 W64) = mkGenPrimOp (fsLit "plusInt64X4#") [] [int64X4PrimTy, int64X4PrimTy] (int64X4PrimTy) primOpInfo (VecAddOp IntVec 64 W8) = mkGenPrimOp (fsLit "plusInt8X64#") [] [int8X64PrimTy, int8X64PrimTy] (int8X64PrimTy) primOpInfo (VecAddOp IntVec 32 W16) = mkGenPrimOp (fsLit "plusInt16X32#") [] [int16X32PrimTy, int16X32PrimTy] (int16X32PrimTy) primOpInfo (VecAddOp IntVec 16 W32) = mkGenPrimOp (fsLit "plusInt32X16#") [] [int32X16PrimTy, int32X16PrimTy] (int32X16PrimTy) primOpInfo (VecAddOp IntVec 8 W64) = mkGenPrimOp (fsLit "plusInt64X8#") [] [int64X8PrimTy, int64X8PrimTy] (int64X8PrimTy) primOpInfo (VecAddOp WordVec 16 W8) = mkGenPrimOp (fsLit "plusWord8X16#") [] [word8X16PrimTy, word8X16PrimTy] (word8X16PrimTy) primOpInfo (VecAddOp WordVec 8 W16) = mkGenPrimOp (fsLit "plusWord16X8#") [] [word16X8PrimTy, word16X8PrimTy] (word16X8PrimTy) primOpInfo (VecAddOp WordVec 4 W32) = mkGenPrimOp (fsLit "plusWord32X4#") [] [word32X4PrimTy, word32X4PrimTy] (word32X4PrimTy) primOpInfo (VecAddOp WordVec 2 W64) = mkGenPrimOp (fsLit "plusWord64X2#") [] [word64X2PrimTy, word64X2PrimTy] (word64X2PrimTy) primOpInfo (VecAddOp WordVec 32 W8) = mkGenPrimOp (fsLit "plusWord8X32#") [] [word8X32PrimTy, word8X32PrimTy] (word8X32PrimTy) primOpInfo (VecAddOp WordVec 16 W16) = mkGenPrimOp (fsLit "plusWord16X16#") [] [word16X16PrimTy, word16X16PrimTy] (word16X16PrimTy) primOpInfo (VecAddOp WordVec 8 W32) = mkGenPrimOp (fsLit "plusWord32X8#") [] [word32X8PrimTy, word32X8PrimTy] (word32X8PrimTy) primOpInfo (VecAddOp WordVec 4 W64) = mkGenPrimOp (fsLit "plusWord64X4#") [] [word64X4PrimTy, word64X4PrimTy] (word64X4PrimTy) primOpInfo (VecAddOp WordVec 64 W8) = mkGenPrimOp (fsLit "plusWord8X64#") [] [word8X64PrimTy, word8X64PrimTy] (word8X64PrimTy) primOpInfo (VecAddOp WordVec 32 W16) = mkGenPrimOp (fsLit "plusWord16X32#") [] [word16X32PrimTy, word16X32PrimTy] (word16X32PrimTy) primOpInfo (VecAddOp WordVec 16 W32) = mkGenPrimOp (fsLit "plusWord32X16#") [] [word32X16PrimTy, word32X16PrimTy] (word32X16PrimTy) primOpInfo (VecAddOp WordVec 8 W64) = mkGenPrimOp (fsLit "plusWord64X8#") [] [word64X8PrimTy, word64X8PrimTy] (word64X8PrimTy) primOpInfo (VecAddOp FloatVec 4 W32) = mkGenPrimOp (fsLit "plusFloatX4#") [] [floatX4PrimTy, floatX4PrimTy] (floatX4PrimTy) primOpInfo (VecAddOp FloatVec 2 W64) = mkGenPrimOp (fsLit "plusDoubleX2#") [] [doubleX2PrimTy, doubleX2PrimTy] (doubleX2PrimTy) primOpInfo (VecAddOp FloatVec 8 W32) = mkGenPrimOp (fsLit "plusFloatX8#") [] [floatX8PrimTy, floatX8PrimTy] (floatX8PrimTy) primOpInfo (VecAddOp FloatVec 4 W64) = mkGenPrimOp (fsLit "plusDoubleX4#") [] [doubleX4PrimTy, doubleX4PrimTy] (doubleX4PrimTy) primOpInfo (VecAddOp FloatVec 16 W32) = mkGenPrimOp (fsLit "plusFloatX16#") [] [floatX16PrimTy, floatX16PrimTy] (floatX16PrimTy) primOpInfo (VecAddOp FloatVec 8 W64) = mkGenPrimOp (fsLit "plusDoubleX8#") [] [doubleX8PrimTy, doubleX8PrimTy] (doubleX8PrimTy) primOpInfo (VecSubOp IntVec 16 W8) = mkGenPrimOp (fsLit "minusInt8X16#") [] [int8X16PrimTy, int8X16PrimTy] (int8X16PrimTy) primOpInfo (VecSubOp IntVec 8 W16) = mkGenPrimOp (fsLit "minusInt16X8#") [] [int16X8PrimTy, int16X8PrimTy] (int16X8PrimTy) primOpInfo (VecSubOp IntVec 4 W32) = mkGenPrimOp (fsLit "minusInt32X4#") [] [int32X4PrimTy, int32X4PrimTy] (int32X4PrimTy) primOpInfo (VecSubOp IntVec 2 W64) = mkGenPrimOp (fsLit "minusInt64X2#") [] [int64X2PrimTy, int64X2PrimTy] (int64X2PrimTy) primOpInfo (VecSubOp IntVec 32 W8) = mkGenPrimOp (fsLit "minusInt8X32#") [] [int8X32PrimTy, int8X32PrimTy] (int8X32PrimTy) primOpInfo (VecSubOp IntVec 16 W16) = mkGenPrimOp (fsLit "minusInt16X16#") [] [int16X16PrimTy, int16X16PrimTy] (int16X16PrimTy) primOpInfo (VecSubOp IntVec 8 W32) = mkGenPrimOp (fsLit "minusInt32X8#") [] [int32X8PrimTy, int32X8PrimTy] (int32X8PrimTy) primOpInfo (VecSubOp IntVec 4 W64) = mkGenPrimOp (fsLit "minusInt64X4#") [] [int64X4PrimTy, int64X4PrimTy] (int64X4PrimTy) primOpInfo (VecSubOp IntVec 64 W8) = mkGenPrimOp (fsLit "minusInt8X64#") [] [int8X64PrimTy, int8X64PrimTy] (int8X64PrimTy) primOpInfo (VecSubOp IntVec 32 W16) = mkGenPrimOp (fsLit "minusInt16X32#") [] [int16X32PrimTy, int16X32PrimTy] (int16X32PrimTy) primOpInfo (VecSubOp IntVec 16 W32) = mkGenPrimOp (fsLit "minusInt32X16#") [] [int32X16PrimTy, int32X16PrimTy] (int32X16PrimTy) primOpInfo (VecSubOp IntVec 8 W64) = mkGenPrimOp (fsLit "minusInt64X8#") [] [int64X8PrimTy, int64X8PrimTy] (int64X8PrimTy) primOpInfo (VecSubOp WordVec 16 W8) = mkGenPrimOp (fsLit "minusWord8X16#") [] [word8X16PrimTy, word8X16PrimTy] (word8X16PrimTy) primOpInfo (VecSubOp WordVec 8 W16) = mkGenPrimOp (fsLit "minusWord16X8#") [] [word16X8PrimTy, word16X8PrimTy] (word16X8PrimTy) primOpInfo (VecSubOp WordVec 4 W32) = mkGenPrimOp (fsLit "minusWord32X4#") [] [word32X4PrimTy, word32X4PrimTy] (word32X4PrimTy) primOpInfo (VecSubOp WordVec 2 W64) = mkGenPrimOp (fsLit "minusWord64X2#") [] [word64X2PrimTy, word64X2PrimTy] (word64X2PrimTy) primOpInfo (VecSubOp WordVec 32 W8) = mkGenPrimOp (fsLit "minusWord8X32#") [] [word8X32PrimTy, word8X32PrimTy] (word8X32PrimTy) primOpInfo (VecSubOp WordVec 16 W16) = mkGenPrimOp (fsLit "minusWord16X16#") [] [word16X16PrimTy, word16X16PrimTy] (word16X16PrimTy) primOpInfo (VecSubOp WordVec 8 W32) = mkGenPrimOp (fsLit "minusWord32X8#") [] [word32X8PrimTy, word32X8PrimTy] (word32X8PrimTy) primOpInfo (VecSubOp WordVec 4 W64) = mkGenPrimOp (fsLit "minusWord64X4#") [] [word64X4PrimTy, word64X4PrimTy] (word64X4PrimTy) primOpInfo (VecSubOp WordVec 64 W8) = mkGenPrimOp (fsLit "minusWord8X64#") [] [word8X64PrimTy, word8X64PrimTy] (word8X64PrimTy) primOpInfo (VecSubOp WordVec 32 W16) = mkGenPrimOp (fsLit "minusWord16X32#") [] [word16X32PrimTy, word16X32PrimTy] (word16X32PrimTy) primOpInfo (VecSubOp WordVec 16 W32) = mkGenPrimOp (fsLit "minusWord32X16#") [] [word32X16PrimTy, word32X16PrimTy] (word32X16PrimTy) primOpInfo (VecSubOp WordVec 8 W64) = mkGenPrimOp (fsLit "minusWord64X8#") [] [word64X8PrimTy, word64X8PrimTy] (word64X8PrimTy) primOpInfo (VecSubOp FloatVec 4 W32) = mkGenPrimOp (fsLit "minusFloatX4#") [] [floatX4PrimTy, floatX4PrimTy] (floatX4PrimTy) primOpInfo (VecSubOp FloatVec 2 W64) = mkGenPrimOp (fsLit "minusDoubleX2#") [] [doubleX2PrimTy, doubleX2PrimTy] (doubleX2PrimTy) primOpInfo (VecSubOp FloatVec 8 W32) = mkGenPrimOp (fsLit "minusFloatX8#") [] [floatX8PrimTy, floatX8PrimTy] (floatX8PrimTy) primOpInfo (VecSubOp FloatVec 4 W64) = mkGenPrimOp (fsLit "minusDoubleX4#") [] [doubleX4PrimTy, doubleX4PrimTy] (doubleX4PrimTy) primOpInfo (VecSubOp FloatVec 16 W32) = mkGenPrimOp (fsLit "minusFloatX16#") [] [floatX16PrimTy, floatX16PrimTy] (floatX16PrimTy) primOpInfo (VecSubOp FloatVec 8 W64) = mkGenPrimOp (fsLit "minusDoubleX8#") [] [doubleX8PrimTy, doubleX8PrimTy] (doubleX8PrimTy) primOpInfo (VecMulOp IntVec 16 W8) = mkGenPrimOp (fsLit "timesInt8X16#") [] [int8X16PrimTy, int8X16PrimTy] (int8X16PrimTy) primOpInfo (VecMulOp IntVec 8 W16) = mkGenPrimOp (fsLit "timesInt16X8#") [] [int16X8PrimTy, int16X8PrimTy] (int16X8PrimTy) primOpInfo (VecMulOp IntVec 4 W32) = mkGenPrimOp (fsLit "timesInt32X4#") [] [int32X4PrimTy, int32X4PrimTy] (int32X4PrimTy) primOpInfo (VecMulOp IntVec 2 W64) = mkGenPrimOp (fsLit "timesInt64X2#") [] [int64X2PrimTy, int64X2PrimTy] (int64X2PrimTy) primOpInfo (VecMulOp IntVec 32 W8) = mkGenPrimOp (fsLit "timesInt8X32#") [] [int8X32PrimTy, int8X32PrimTy] (int8X32PrimTy) primOpInfo (VecMulOp IntVec 16 W16) = mkGenPrimOp (fsLit "timesInt16X16#") [] [int16X16PrimTy, int16X16PrimTy] (int16X16PrimTy) primOpInfo (VecMulOp IntVec 8 W32) = mkGenPrimOp (fsLit "timesInt32X8#") [] [int32X8PrimTy, int32X8PrimTy] (int32X8PrimTy) primOpInfo (VecMulOp IntVec 4 W64) = mkGenPrimOp (fsLit "timesInt64X4#") [] [int64X4PrimTy, int64X4PrimTy] (int64X4PrimTy) primOpInfo (VecMulOp IntVec 64 W8) = mkGenPrimOp (fsLit "timesInt8X64#") [] [int8X64PrimTy, int8X64PrimTy] (int8X64PrimTy) primOpInfo (VecMulOp IntVec 32 W16) = mkGenPrimOp (fsLit "timesInt16X32#") [] [int16X32PrimTy, int16X32PrimTy] (int16X32PrimTy) primOpInfo (VecMulOp IntVec 16 W32) = mkGenPrimOp (fsLit "timesInt32X16#") [] [int32X16PrimTy, int32X16PrimTy] (int32X16PrimTy) primOpInfo (VecMulOp IntVec 8 W64) = mkGenPrimOp (fsLit "timesInt64X8#") [] [int64X8PrimTy, int64X8PrimTy] (int64X8PrimTy) primOpInfo (VecMulOp WordVec 16 W8) = mkGenPrimOp (fsLit "timesWord8X16#") [] [word8X16PrimTy, word8X16PrimTy] (word8X16PrimTy) primOpInfo (VecMulOp WordVec 8 W16) = mkGenPrimOp (fsLit "timesWord16X8#") [] [word16X8PrimTy, word16X8PrimTy] (word16X8PrimTy) primOpInfo (VecMulOp WordVec 4 W32) = mkGenPrimOp (fsLit "timesWord32X4#") [] [word32X4PrimTy, word32X4PrimTy] (word32X4PrimTy) primOpInfo (VecMulOp WordVec 2 W64) = mkGenPrimOp (fsLit "timesWord64X2#") [] [word64X2PrimTy, word64X2PrimTy] (word64X2PrimTy) primOpInfo (VecMulOp WordVec 32 W8) = mkGenPrimOp (fsLit "timesWord8X32#") [] [word8X32PrimTy, word8X32PrimTy] (word8X32PrimTy) primOpInfo (VecMulOp WordVec 16 W16) = mkGenPrimOp (fsLit "timesWord16X16#") [] [word16X16PrimTy, word16X16PrimTy] (word16X16PrimTy) primOpInfo (VecMulOp WordVec 8 W32) = mkGenPrimOp (fsLit "timesWord32X8#") [] [word32X8PrimTy, word32X8PrimTy] (word32X8PrimTy) primOpInfo (VecMulOp WordVec 4 W64) = mkGenPrimOp (fsLit "timesWord64X4#") [] [word64X4PrimTy, word64X4PrimTy] (word64X4PrimTy) primOpInfo (VecMulOp WordVec 64 W8) = mkGenPrimOp (fsLit "timesWord8X64#") [] [word8X64PrimTy, word8X64PrimTy] (word8X64PrimTy) primOpInfo (VecMulOp WordVec 32 W16) = mkGenPrimOp (fsLit "timesWord16X32#") [] [word16X32PrimTy, word16X32PrimTy] (word16X32PrimTy) primOpInfo (VecMulOp WordVec 16 W32) = mkGenPrimOp (fsLit "timesWord32X16#") [] [word32X16PrimTy, word32X16PrimTy] (word32X16PrimTy) primOpInfo (VecMulOp WordVec 8 W64) = mkGenPrimOp (fsLit "timesWord64X8#") [] [word64X8PrimTy, word64X8PrimTy] (word64X8PrimTy) primOpInfo (VecMulOp FloatVec 4 W32) = mkGenPrimOp (fsLit "timesFloatX4#") [] [floatX4PrimTy, floatX4PrimTy] (floatX4PrimTy) primOpInfo (VecMulOp FloatVec 2 W64) = mkGenPrimOp (fsLit "timesDoubleX2#") [] [doubleX2PrimTy, doubleX2PrimTy] (doubleX2PrimTy) primOpInfo (VecMulOp FloatVec 8 W32) = mkGenPrimOp (fsLit "timesFloatX8#") [] [floatX8PrimTy, floatX8PrimTy] (floatX8PrimTy) primOpInfo (VecMulOp FloatVec 4 W64) = mkGenPrimOp (fsLit "timesDoubleX4#") [] [doubleX4PrimTy, doubleX4PrimTy] (doubleX4PrimTy) primOpInfo (VecMulOp FloatVec 16 W32) = mkGenPrimOp (fsLit "timesFloatX16#") [] [floatX16PrimTy, floatX16PrimTy] (floatX16PrimTy) primOpInfo (VecMulOp FloatVec 8 W64) = mkGenPrimOp (fsLit "timesDoubleX8#") [] [doubleX8PrimTy, doubleX8PrimTy] (doubleX8PrimTy) primOpInfo (VecDivOp FloatVec 4 W32) = mkGenPrimOp (fsLit "divideFloatX4#") [] [floatX4PrimTy, floatX4PrimTy] (floatX4PrimTy) primOpInfo (VecDivOp FloatVec 2 W64) = mkGenPrimOp (fsLit "divideDoubleX2#") [] [doubleX2PrimTy, doubleX2PrimTy] (doubleX2PrimTy) primOpInfo (VecDivOp FloatVec 8 W32) = mkGenPrimOp (fsLit "divideFloatX8#") [] [floatX8PrimTy, floatX8PrimTy] (floatX8PrimTy) primOpInfo (VecDivOp FloatVec 4 W64) = mkGenPrimOp (fsLit "divideDoubleX4#") [] [doubleX4PrimTy, doubleX4PrimTy] (doubleX4PrimTy) primOpInfo (VecDivOp FloatVec 16 W32) = mkGenPrimOp (fsLit "divideFloatX16#") [] [floatX16PrimTy, floatX16PrimTy] (floatX16PrimTy) primOpInfo (VecDivOp FloatVec 8 W64) = mkGenPrimOp (fsLit "divideDoubleX8#") [] [doubleX8PrimTy, doubleX8PrimTy] (doubleX8PrimTy) primOpInfo (VecQuotOp IntVec 16 W8) = mkGenPrimOp (fsLit "quotInt8X16#") [] [int8X16PrimTy, int8X16PrimTy] (int8X16PrimTy) primOpInfo (VecQuotOp IntVec 8 W16) = mkGenPrimOp (fsLit "quotInt16X8#") [] [int16X8PrimTy, int16X8PrimTy] (int16X8PrimTy) primOpInfo (VecQuotOp IntVec 4 W32) = mkGenPrimOp (fsLit "quotInt32X4#") [] [int32X4PrimTy, int32X4PrimTy] (int32X4PrimTy) primOpInfo (VecQuotOp IntVec 2 W64) = mkGenPrimOp (fsLit "quotInt64X2#") [] [int64X2PrimTy, int64X2PrimTy] (int64X2PrimTy) primOpInfo (VecQuotOp IntVec 32 W8) = mkGenPrimOp (fsLit "quotInt8X32#") [] [int8X32PrimTy, int8X32PrimTy] (int8X32PrimTy) primOpInfo (VecQuotOp IntVec 16 W16) = mkGenPrimOp (fsLit "quotInt16X16#") [] [int16X16PrimTy, int16X16PrimTy] (int16X16PrimTy) primOpInfo (VecQuotOp IntVec 8 W32) = mkGenPrimOp (fsLit "quotInt32X8#") [] [int32X8PrimTy, int32X8PrimTy] (int32X8PrimTy) primOpInfo (VecQuotOp IntVec 4 W64) = mkGenPrimOp (fsLit "quotInt64X4#") [] [int64X4PrimTy, int64X4PrimTy] (int64X4PrimTy) primOpInfo (VecQuotOp IntVec 64 W8) = mkGenPrimOp (fsLit "quotInt8X64#") [] [int8X64PrimTy, int8X64PrimTy] (int8X64PrimTy) primOpInfo (VecQuotOp IntVec 32 W16) = mkGenPrimOp (fsLit "quotInt16X32#") [] [int16X32PrimTy, int16X32PrimTy] (int16X32PrimTy) primOpInfo (VecQuotOp IntVec 16 W32) = mkGenPrimOp (fsLit "quotInt32X16#") [] [int32X16PrimTy, int32X16PrimTy] (int32X16PrimTy) primOpInfo (VecQuotOp IntVec 8 W64) = mkGenPrimOp (fsLit "quotInt64X8#") [] [int64X8PrimTy, int64X8PrimTy] (int64X8PrimTy) primOpInfo (VecQuotOp WordVec 16 W8) = mkGenPrimOp (fsLit "quotWord8X16#") [] [word8X16PrimTy, word8X16PrimTy] (word8X16PrimTy) primOpInfo (VecQuotOp WordVec 8 W16) = mkGenPrimOp (fsLit "quotWord16X8#") [] [word16X8PrimTy, word16X8PrimTy] (word16X8PrimTy) primOpInfo (VecQuotOp WordVec 4 W32) = mkGenPrimOp (fsLit "quotWord32X4#") [] [word32X4PrimTy, word32X4PrimTy] (word32X4PrimTy) primOpInfo (VecQuotOp WordVec 2 W64) = mkGenPrimOp (fsLit "quotWord64X2#") [] [word64X2PrimTy, word64X2PrimTy] (word64X2PrimTy) primOpInfo (VecQuotOp WordVec 32 W8) = mkGenPrimOp (fsLit "quotWord8X32#") [] [word8X32PrimTy, word8X32PrimTy] (word8X32PrimTy) primOpInfo (VecQuotOp WordVec 16 W16) = mkGenPrimOp (fsLit "quotWord16X16#") [] [word16X16PrimTy, word16X16PrimTy] (word16X16PrimTy) primOpInfo (VecQuotOp WordVec 8 W32) = mkGenPrimOp (fsLit "quotWord32X8#") [] [word32X8PrimTy, word32X8PrimTy] (word32X8PrimTy) primOpInfo (VecQuotOp WordVec 4 W64) = mkGenPrimOp (fsLit "quotWord64X4#") [] [word64X4PrimTy, word64X4PrimTy] (word64X4PrimTy) primOpInfo (VecQuotOp WordVec 64 W8) = mkGenPrimOp (fsLit "quotWord8X64#") [] [word8X64PrimTy, word8X64PrimTy] (word8X64PrimTy) primOpInfo (VecQuotOp WordVec 32 W16) = mkGenPrimOp (fsLit "quotWord16X32#") [] [word16X32PrimTy, word16X32PrimTy] (word16X32PrimTy) primOpInfo (VecQuotOp WordVec 16 W32) = mkGenPrimOp (fsLit "quotWord32X16#") [] [word32X16PrimTy, word32X16PrimTy] (word32X16PrimTy) primOpInfo (VecQuotOp WordVec 8 W64) = mkGenPrimOp (fsLit "quotWord64X8#") [] [word64X8PrimTy, word64X8PrimTy] (word64X8PrimTy) primOpInfo (VecRemOp IntVec 16 W8) = mkGenPrimOp (fsLit "remInt8X16#") [] [int8X16PrimTy, int8X16PrimTy] (int8X16PrimTy) primOpInfo (VecRemOp IntVec 8 W16) = mkGenPrimOp (fsLit "remInt16X8#") [] [int16X8PrimTy, int16X8PrimTy] (int16X8PrimTy) primOpInfo (VecRemOp IntVec 4 W32) = mkGenPrimOp (fsLit "remInt32X4#") [] [int32X4PrimTy, int32X4PrimTy] (int32X4PrimTy) primOpInfo (VecRemOp IntVec 2 W64) = mkGenPrimOp (fsLit "remInt64X2#") [] [int64X2PrimTy, int64X2PrimTy] (int64X2PrimTy) primOpInfo (VecRemOp IntVec 32 W8) = mkGenPrimOp (fsLit "remInt8X32#") [] [int8X32PrimTy, int8X32PrimTy] (int8X32PrimTy) primOpInfo (VecRemOp IntVec 16 W16) = mkGenPrimOp (fsLit "remInt16X16#") [] [int16X16PrimTy, int16X16PrimTy] (int16X16PrimTy) primOpInfo (VecRemOp IntVec 8 W32) = mkGenPrimOp (fsLit "remInt32X8#") [] [int32X8PrimTy, int32X8PrimTy] (int32X8PrimTy) primOpInfo (VecRemOp IntVec 4 W64) = mkGenPrimOp (fsLit "remInt64X4#") [] [int64X4PrimTy, int64X4PrimTy] (int64X4PrimTy) primOpInfo (VecRemOp IntVec 64 W8) = mkGenPrimOp (fsLit "remInt8X64#") [] [int8X64PrimTy, int8X64PrimTy] (int8X64PrimTy) primOpInfo (VecRemOp IntVec 32 W16) = mkGenPrimOp (fsLit "remInt16X32#") [] [int16X32PrimTy, int16X32PrimTy] (int16X32PrimTy) primOpInfo (VecRemOp IntVec 16 W32) = mkGenPrimOp (fsLit "remInt32X16#") [] [int32X16PrimTy, int32X16PrimTy] (int32X16PrimTy) primOpInfo (VecRemOp IntVec 8 W64) = mkGenPrimOp (fsLit "remInt64X8#") [] [int64X8PrimTy, int64X8PrimTy] (int64X8PrimTy) primOpInfo (VecRemOp WordVec 16 W8) = mkGenPrimOp (fsLit "remWord8X16#") [] [word8X16PrimTy, word8X16PrimTy] (word8X16PrimTy) primOpInfo (VecRemOp WordVec 8 W16) = mkGenPrimOp (fsLit "remWord16X8#") [] [word16X8PrimTy, word16X8PrimTy] (word16X8PrimTy) primOpInfo (VecRemOp WordVec 4 W32) = mkGenPrimOp (fsLit "remWord32X4#") [] [word32X4PrimTy, word32X4PrimTy] (word32X4PrimTy) primOpInfo (VecRemOp WordVec 2 W64) = mkGenPrimOp (fsLit "remWord64X2#") [] [word64X2PrimTy, word64X2PrimTy] (word64X2PrimTy) primOpInfo (VecRemOp WordVec 32 W8) = mkGenPrimOp (fsLit "remWord8X32#") [] [word8X32PrimTy, word8X32PrimTy] (word8X32PrimTy) primOpInfo (VecRemOp WordVec 16 W16) = mkGenPrimOp (fsLit "remWord16X16#") [] [word16X16PrimTy, word16X16PrimTy] (word16X16PrimTy) primOpInfo (VecRemOp WordVec 8 W32) = mkGenPrimOp (fsLit "remWord32X8#") [] [word32X8PrimTy, word32X8PrimTy] (word32X8PrimTy) primOpInfo (VecRemOp WordVec 4 W64) = mkGenPrimOp (fsLit "remWord64X4#") [] [word64X4PrimTy, word64X4PrimTy] (word64X4PrimTy) primOpInfo (VecRemOp WordVec 64 W8) = mkGenPrimOp (fsLit "remWord8X64#") [] [word8X64PrimTy, word8X64PrimTy] (word8X64PrimTy) primOpInfo (VecRemOp WordVec 32 W16) = mkGenPrimOp (fsLit "remWord16X32#") [] [word16X32PrimTy, word16X32PrimTy] (word16X32PrimTy) primOpInfo (VecRemOp WordVec 16 W32) = mkGenPrimOp (fsLit "remWord32X16#") [] [word32X16PrimTy, word32X16PrimTy] (word32X16PrimTy) primOpInfo (VecRemOp WordVec 8 W64) = mkGenPrimOp (fsLit "remWord64X8#") [] [word64X8PrimTy, word64X8PrimTy] (word64X8PrimTy) primOpInfo (VecNegOp IntVec 16 W8) = mkGenPrimOp (fsLit "negateInt8X16#") [] [int8X16PrimTy] (int8X16PrimTy) primOpInfo (VecNegOp IntVec 8 W16) = mkGenPrimOp (fsLit "negateInt16X8#") [] [int16X8PrimTy] (int16X8PrimTy) primOpInfo (VecNegOp IntVec 4 W32) = mkGenPrimOp (fsLit "negateInt32X4#") [] [int32X4PrimTy] (int32X4PrimTy) primOpInfo (VecNegOp IntVec 2 W64) = mkGenPrimOp (fsLit "negateInt64X2#") [] [int64X2PrimTy] (int64X2PrimTy) primOpInfo (VecNegOp IntVec 32 W8) = mkGenPrimOp (fsLit "negateInt8X32#") [] [int8X32PrimTy] (int8X32PrimTy) primOpInfo (VecNegOp IntVec 16 W16) = mkGenPrimOp (fsLit "negateInt16X16#") [] [int16X16PrimTy] (int16X16PrimTy) primOpInfo (VecNegOp IntVec 8 W32) = mkGenPrimOp (fsLit "negateInt32X8#") [] [int32X8PrimTy] (int32X8PrimTy) primOpInfo (VecNegOp IntVec 4 W64) = mkGenPrimOp (fsLit "negateInt64X4#") [] [int64X4PrimTy] (int64X4PrimTy) primOpInfo (VecNegOp IntVec 64 W8) = mkGenPrimOp (fsLit "negateInt8X64#") [] [int8X64PrimTy] (int8X64PrimTy) primOpInfo (VecNegOp IntVec 32 W16) = mkGenPrimOp (fsLit "negateInt16X32#") [] [int16X32PrimTy] (int16X32PrimTy) primOpInfo (VecNegOp IntVec 16 W32) = mkGenPrimOp (fsLit "negateInt32X16#") [] [int32X16PrimTy] (int32X16PrimTy) primOpInfo (VecNegOp IntVec 8 W64) = mkGenPrimOp (fsLit "negateInt64X8#") [] [int64X8PrimTy] (int64X8PrimTy) primOpInfo (VecNegOp FloatVec 4 W32) = mkGenPrimOp (fsLit "negateFloatX4#") [] [floatX4PrimTy] (floatX4PrimTy) primOpInfo (VecNegOp FloatVec 2 W64) = mkGenPrimOp (fsLit "negateDoubleX2#") [] [doubleX2PrimTy] (doubleX2PrimTy) primOpInfo (VecNegOp FloatVec 8 W32) = mkGenPrimOp (fsLit "negateFloatX8#") [] [floatX8PrimTy] (floatX8PrimTy) primOpInfo (VecNegOp FloatVec 4 W64) = mkGenPrimOp (fsLit "negateDoubleX4#") [] [doubleX4PrimTy] (doubleX4PrimTy) primOpInfo (VecNegOp FloatVec 16 W32) = mkGenPrimOp (fsLit "negateFloatX16#") [] [floatX16PrimTy] (floatX16PrimTy) primOpInfo (VecNegOp FloatVec 8 W64) = mkGenPrimOp (fsLit "negateDoubleX8#") [] [doubleX8PrimTy] (doubleX8PrimTy) primOpInfo (VecIndexByteArrayOp IntVec 16 W8) = mkGenPrimOp (fsLit "indexInt8X16Array#") [] [byteArrayPrimTy, intPrimTy] (int8X16PrimTy) primOpInfo (VecIndexByteArrayOp IntVec 8 W16) = mkGenPrimOp (fsLit "indexInt16X8Array#") [] [byteArrayPrimTy, intPrimTy] (int16X8PrimTy) primOpInfo (VecIndexByteArrayOp IntVec 4 W32) = mkGenPrimOp (fsLit "indexInt32X4Array#") [] [byteArrayPrimTy, intPrimTy] (int32X4PrimTy) primOpInfo (VecIndexByteArrayOp IntVec 2 W64) = mkGenPrimOp (fsLit "indexInt64X2Array#") [] [byteArrayPrimTy, intPrimTy] (int64X2PrimTy) primOpInfo (VecIndexByteArrayOp IntVec 32 W8) = mkGenPrimOp (fsLit "indexInt8X32Array#") [] [byteArrayPrimTy, intPrimTy] (int8X32PrimTy) primOpInfo (VecIndexByteArrayOp IntVec 16 W16) = mkGenPrimOp (fsLit "indexInt16X16Array#") [] [byteArrayPrimTy, intPrimTy] (int16X16PrimTy) primOpInfo (VecIndexByteArrayOp IntVec 8 W32) = mkGenPrimOp (fsLit "indexInt32X8Array#") [] [byteArrayPrimTy, intPrimTy] (int32X8PrimTy) primOpInfo (VecIndexByteArrayOp IntVec 4 W64) = mkGenPrimOp (fsLit "indexInt64X4Array#") [] [byteArrayPrimTy, intPrimTy] (int64X4PrimTy) primOpInfo (VecIndexByteArrayOp IntVec 64 W8) = mkGenPrimOp (fsLit "indexInt8X64Array#") [] [byteArrayPrimTy, intPrimTy] (int8X64PrimTy) primOpInfo (VecIndexByteArrayOp IntVec 32 W16) = mkGenPrimOp (fsLit "indexInt16X32Array#") [] [byteArrayPrimTy, intPrimTy] (int16X32PrimTy) primOpInfo (VecIndexByteArrayOp IntVec 16 W32) = mkGenPrimOp (fsLit "indexInt32X16Array#") [] [byteArrayPrimTy, intPrimTy] (int32X16PrimTy) primOpInfo (VecIndexByteArrayOp IntVec 8 W64) = mkGenPrimOp (fsLit "indexInt64X8Array#") [] [byteArrayPrimTy, intPrimTy] (int64X8PrimTy) primOpInfo (VecIndexByteArrayOp WordVec 16 W8) = mkGenPrimOp (fsLit "indexWord8X16Array#") [] [byteArrayPrimTy, intPrimTy] (word8X16PrimTy) primOpInfo (VecIndexByteArrayOp WordVec 8 W16) = mkGenPrimOp (fsLit "indexWord16X8Array#") [] [byteArrayPrimTy, intPrimTy] (word16X8PrimTy) primOpInfo (VecIndexByteArrayOp WordVec 4 W32) = mkGenPrimOp (fsLit "indexWord32X4Array#") [] [byteArrayPrimTy, intPrimTy] (word32X4PrimTy) primOpInfo (VecIndexByteArrayOp WordVec 2 W64) = mkGenPrimOp (fsLit "indexWord64X2Array#") [] [byteArrayPrimTy, intPrimTy] (word64X2PrimTy) primOpInfo (VecIndexByteArrayOp WordVec 32 W8) = mkGenPrimOp (fsLit "indexWord8X32Array#") [] [byteArrayPrimTy, intPrimTy] (word8X32PrimTy) primOpInfo (VecIndexByteArrayOp WordVec 16 W16) = mkGenPrimOp (fsLit "indexWord16X16Array#") [] [byteArrayPrimTy, intPrimTy] (word16X16PrimTy) primOpInfo (VecIndexByteArrayOp WordVec 8 W32) = mkGenPrimOp (fsLit "indexWord32X8Array#") [] [byteArrayPrimTy, intPrimTy] (word32X8PrimTy) primOpInfo (VecIndexByteArrayOp WordVec 4 W64) = mkGenPrimOp (fsLit "indexWord64X4Array#") [] [byteArrayPrimTy, intPrimTy] (word64X4PrimTy) primOpInfo (VecIndexByteArrayOp WordVec 64 W8) = mkGenPrimOp (fsLit "indexWord8X64Array#") [] [byteArrayPrimTy, intPrimTy] (word8X64PrimTy) primOpInfo (VecIndexByteArrayOp WordVec 32 W16) = mkGenPrimOp (fsLit "indexWord16X32Array#") [] [byteArrayPrimTy, intPrimTy] (word16X32PrimTy) primOpInfo (VecIndexByteArrayOp WordVec 16 W32) = mkGenPrimOp (fsLit "indexWord32X16Array#") [] [byteArrayPrimTy, intPrimTy] (word32X16PrimTy) primOpInfo (VecIndexByteArrayOp WordVec 8 W64) = mkGenPrimOp (fsLit "indexWord64X8Array#") [] [byteArrayPrimTy, intPrimTy] (word64X8PrimTy) primOpInfo (VecIndexByteArrayOp FloatVec 4 W32) = mkGenPrimOp (fsLit "indexFloatX4Array#") [] [byteArrayPrimTy, intPrimTy] (floatX4PrimTy) primOpInfo (VecIndexByteArrayOp FloatVec 2 W64) = mkGenPrimOp (fsLit "indexDoubleX2Array#") [] [byteArrayPrimTy, intPrimTy] (doubleX2PrimTy) primOpInfo (VecIndexByteArrayOp FloatVec 8 W32) = mkGenPrimOp (fsLit "indexFloatX8Array#") [] [byteArrayPrimTy, intPrimTy] (floatX8PrimTy) primOpInfo (VecIndexByteArrayOp FloatVec 4 W64) = mkGenPrimOp (fsLit "indexDoubleX4Array#") [] [byteArrayPrimTy, intPrimTy] (doubleX4PrimTy) primOpInfo (VecIndexByteArrayOp FloatVec 16 W32) = mkGenPrimOp (fsLit "indexFloatX16Array#") [] [byteArrayPrimTy, intPrimTy] (floatX16PrimTy) primOpInfo (VecIndexByteArrayOp FloatVec 8 W64) = mkGenPrimOp (fsLit "indexDoubleX8Array#") [] [byteArrayPrimTy, intPrimTy] (doubleX8PrimTy) primOpInfo (VecReadByteArrayOp IntVec 16 W8) = mkGenPrimOp (fsLit "readInt8X16Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int8X16PrimTy])) primOpInfo (VecReadByteArrayOp IntVec 8 W16) = mkGenPrimOp (fsLit "readInt16X8Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int16X8PrimTy])) primOpInfo (VecReadByteArrayOp IntVec 4 W32) = mkGenPrimOp (fsLit "readInt32X4Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int32X4PrimTy])) primOpInfo (VecReadByteArrayOp IntVec 2 W64) = mkGenPrimOp (fsLit "readInt64X2Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int64X2PrimTy])) primOpInfo (VecReadByteArrayOp IntVec 32 W8) = mkGenPrimOp (fsLit "readInt8X32Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int8X32PrimTy])) primOpInfo (VecReadByteArrayOp IntVec 16 W16) = mkGenPrimOp (fsLit "readInt16X16Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int16X16PrimTy])) primOpInfo (VecReadByteArrayOp IntVec 8 W32) = mkGenPrimOp (fsLit "readInt32X8Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int32X8PrimTy])) primOpInfo (VecReadByteArrayOp IntVec 4 W64) = mkGenPrimOp (fsLit "readInt64X4Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int64X4PrimTy])) primOpInfo (VecReadByteArrayOp IntVec 64 W8) = mkGenPrimOp (fsLit "readInt8X64Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int8X64PrimTy])) primOpInfo (VecReadByteArrayOp IntVec 32 W16) = mkGenPrimOp (fsLit "readInt16X32Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int16X32PrimTy])) primOpInfo (VecReadByteArrayOp IntVec 16 W32) = mkGenPrimOp (fsLit "readInt32X16Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int32X16PrimTy])) primOpInfo (VecReadByteArrayOp IntVec 8 W64) = mkGenPrimOp (fsLit "readInt64X8Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int64X8PrimTy])) primOpInfo (VecReadByteArrayOp WordVec 16 W8) = mkGenPrimOp (fsLit "readWord8X16Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word8X16PrimTy])) primOpInfo (VecReadByteArrayOp WordVec 8 W16) = mkGenPrimOp (fsLit "readWord16X8Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word16X8PrimTy])) primOpInfo (VecReadByteArrayOp WordVec 4 W32) = mkGenPrimOp (fsLit "readWord32X4Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word32X4PrimTy])) primOpInfo (VecReadByteArrayOp WordVec 2 W64) = mkGenPrimOp (fsLit "readWord64X2Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word64X2PrimTy])) primOpInfo (VecReadByteArrayOp WordVec 32 W8) = mkGenPrimOp (fsLit "readWord8X32Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word8X32PrimTy])) primOpInfo (VecReadByteArrayOp WordVec 16 W16) = mkGenPrimOp (fsLit "readWord16X16Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word16X16PrimTy])) primOpInfo (VecReadByteArrayOp WordVec 8 W32) = mkGenPrimOp (fsLit "readWord32X8Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word32X8PrimTy])) primOpInfo (VecReadByteArrayOp WordVec 4 W64) = mkGenPrimOp (fsLit "readWord64X4Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word64X4PrimTy])) primOpInfo (VecReadByteArrayOp WordVec 64 W8) = mkGenPrimOp (fsLit "readWord8X64Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word8X64PrimTy])) primOpInfo (VecReadByteArrayOp WordVec 32 W16) = mkGenPrimOp (fsLit "readWord16X32Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word16X32PrimTy])) primOpInfo (VecReadByteArrayOp WordVec 16 W32) = mkGenPrimOp (fsLit "readWord32X16Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word32X16PrimTy])) primOpInfo (VecReadByteArrayOp WordVec 8 W64) = mkGenPrimOp (fsLit "readWord64X8Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word64X8PrimTy])) primOpInfo (VecReadByteArrayOp FloatVec 4 W32) = mkGenPrimOp (fsLit "readFloatX4Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, floatX4PrimTy])) primOpInfo (VecReadByteArrayOp FloatVec 2 W64) = mkGenPrimOp (fsLit "readDoubleX2Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, doubleX2PrimTy])) primOpInfo (VecReadByteArrayOp FloatVec 8 W32) = mkGenPrimOp (fsLit "readFloatX8Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, floatX8PrimTy])) primOpInfo (VecReadByteArrayOp FloatVec 4 W64) = mkGenPrimOp (fsLit "readDoubleX4Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, doubleX4PrimTy])) primOpInfo (VecReadByteArrayOp FloatVec 16 W32) = mkGenPrimOp (fsLit "readFloatX16Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, floatX16PrimTy])) primOpInfo (VecReadByteArrayOp FloatVec 8 W64) = mkGenPrimOp (fsLit "readDoubleX8Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, doubleX8PrimTy])) primOpInfo (VecWriteByteArrayOp IntVec 16 W8) = mkGenPrimOp (fsLit "writeInt8X16Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int8X16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteByteArrayOp IntVec 8 W16) = mkGenPrimOp (fsLit "writeInt16X8Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int16X8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteByteArrayOp IntVec 4 W32) = mkGenPrimOp (fsLit "writeInt32X4Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int32X4PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteByteArrayOp IntVec 2 W64) = mkGenPrimOp (fsLit "writeInt64X2Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int64X2PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteByteArrayOp IntVec 32 W8) = mkGenPrimOp (fsLit "writeInt8X32Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int8X32PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteByteArrayOp IntVec 16 W16) = mkGenPrimOp (fsLit "writeInt16X16Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int16X16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteByteArrayOp IntVec 8 W32) = mkGenPrimOp (fsLit "writeInt32X8Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int32X8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteByteArrayOp IntVec 4 W64) = mkGenPrimOp (fsLit "writeInt64X4Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int64X4PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteByteArrayOp IntVec 64 W8) = mkGenPrimOp (fsLit "writeInt8X64Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int8X64PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteByteArrayOp IntVec 32 W16) = mkGenPrimOp (fsLit "writeInt16X32Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int16X32PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteByteArrayOp IntVec 16 W32) = mkGenPrimOp (fsLit "writeInt32X16Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int32X16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteByteArrayOp IntVec 8 W64) = mkGenPrimOp (fsLit "writeInt64X8Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int64X8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteByteArrayOp WordVec 16 W8) = mkGenPrimOp (fsLit "writeWord8X16Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, word8X16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteByteArrayOp WordVec 8 W16) = mkGenPrimOp (fsLit "writeWord16X8Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, word16X8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteByteArrayOp WordVec 4 W32) = mkGenPrimOp (fsLit "writeWord32X4Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, word32X4PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteByteArrayOp WordVec 2 W64) = mkGenPrimOp (fsLit "writeWord64X2Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, word64X2PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteByteArrayOp WordVec 32 W8) = mkGenPrimOp (fsLit "writeWord8X32Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, word8X32PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteByteArrayOp WordVec 16 W16) = mkGenPrimOp (fsLit "writeWord16X16Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, word16X16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteByteArrayOp WordVec 8 W32) = mkGenPrimOp (fsLit "writeWord32X8Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, word32X8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteByteArrayOp WordVec 4 W64) = mkGenPrimOp (fsLit "writeWord64X4Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, word64X4PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteByteArrayOp WordVec 64 W8) = mkGenPrimOp (fsLit "writeWord8X64Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, word8X64PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteByteArrayOp WordVec 32 W16) = mkGenPrimOp (fsLit "writeWord16X32Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, word16X32PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteByteArrayOp WordVec 16 W32) = mkGenPrimOp (fsLit "writeWord32X16Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, word32X16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteByteArrayOp WordVec 8 W64) = mkGenPrimOp (fsLit "writeWord64X8Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, word64X8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteByteArrayOp FloatVec 4 W32) = mkGenPrimOp (fsLit "writeFloatX4Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, floatX4PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteByteArrayOp FloatVec 2 W64) = mkGenPrimOp (fsLit "writeDoubleX2Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, doubleX2PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteByteArrayOp FloatVec 8 W32) = mkGenPrimOp (fsLit "writeFloatX8Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, floatX8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteByteArrayOp FloatVec 4 W64) = mkGenPrimOp (fsLit "writeDoubleX4Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, doubleX4PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteByteArrayOp FloatVec 16 W32) = mkGenPrimOp (fsLit "writeFloatX16Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, floatX16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteByteArrayOp FloatVec 8 W64) = mkGenPrimOp (fsLit "writeDoubleX8Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, doubleX8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecIndexOffAddrOp IntVec 16 W8) = mkGenPrimOp (fsLit "indexInt8X16OffAddr#") [] [addrPrimTy, intPrimTy] (int8X16PrimTy) primOpInfo (VecIndexOffAddrOp IntVec 8 W16) = mkGenPrimOp (fsLit "indexInt16X8OffAddr#") [] [addrPrimTy, intPrimTy] (int16X8PrimTy) primOpInfo (VecIndexOffAddrOp IntVec 4 W32) = mkGenPrimOp (fsLit "indexInt32X4OffAddr#") [] [addrPrimTy, intPrimTy] (int32X4PrimTy) primOpInfo (VecIndexOffAddrOp IntVec 2 W64) = mkGenPrimOp (fsLit "indexInt64X2OffAddr#") [] [addrPrimTy, intPrimTy] (int64X2PrimTy) primOpInfo (VecIndexOffAddrOp IntVec 32 W8) = mkGenPrimOp (fsLit "indexInt8X32OffAddr#") [] [addrPrimTy, intPrimTy] (int8X32PrimTy) primOpInfo (VecIndexOffAddrOp IntVec 16 W16) = mkGenPrimOp (fsLit "indexInt16X16OffAddr#") [] [addrPrimTy, intPrimTy] (int16X16PrimTy) primOpInfo (VecIndexOffAddrOp IntVec 8 W32) = mkGenPrimOp (fsLit "indexInt32X8OffAddr#") [] [addrPrimTy, intPrimTy] (int32X8PrimTy) primOpInfo (VecIndexOffAddrOp IntVec 4 W64) = mkGenPrimOp (fsLit "indexInt64X4OffAddr#") [] [addrPrimTy, intPrimTy] (int64X4PrimTy) primOpInfo (VecIndexOffAddrOp IntVec 64 W8) = mkGenPrimOp (fsLit "indexInt8X64OffAddr#") [] [addrPrimTy, intPrimTy] (int8X64PrimTy) primOpInfo (VecIndexOffAddrOp IntVec 32 W16) = mkGenPrimOp (fsLit "indexInt16X32OffAddr#") [] [addrPrimTy, intPrimTy] (int16X32PrimTy) primOpInfo (VecIndexOffAddrOp IntVec 16 W32) = mkGenPrimOp (fsLit "indexInt32X16OffAddr#") [] [addrPrimTy, intPrimTy] (int32X16PrimTy) primOpInfo (VecIndexOffAddrOp IntVec 8 W64) = mkGenPrimOp (fsLit "indexInt64X8OffAddr#") [] [addrPrimTy, intPrimTy] (int64X8PrimTy) primOpInfo (VecIndexOffAddrOp WordVec 16 W8) = mkGenPrimOp (fsLit "indexWord8X16OffAddr#") [] [addrPrimTy, intPrimTy] (word8X16PrimTy) primOpInfo (VecIndexOffAddrOp WordVec 8 W16) = mkGenPrimOp (fsLit "indexWord16X8OffAddr#") [] [addrPrimTy, intPrimTy] (word16X8PrimTy) primOpInfo (VecIndexOffAddrOp WordVec 4 W32) = mkGenPrimOp (fsLit "indexWord32X4OffAddr#") [] [addrPrimTy, intPrimTy] (word32X4PrimTy) primOpInfo (VecIndexOffAddrOp WordVec 2 W64) = mkGenPrimOp (fsLit "indexWord64X2OffAddr#") [] [addrPrimTy, intPrimTy] (word64X2PrimTy) primOpInfo (VecIndexOffAddrOp WordVec 32 W8) = mkGenPrimOp (fsLit "indexWord8X32OffAddr#") [] [addrPrimTy, intPrimTy] (word8X32PrimTy) primOpInfo (VecIndexOffAddrOp WordVec 16 W16) = mkGenPrimOp (fsLit "indexWord16X16OffAddr#") [] [addrPrimTy, intPrimTy] (word16X16PrimTy) primOpInfo (VecIndexOffAddrOp WordVec 8 W32) = mkGenPrimOp (fsLit "indexWord32X8OffAddr#") [] [addrPrimTy, intPrimTy] (word32X8PrimTy) primOpInfo (VecIndexOffAddrOp WordVec 4 W64) = mkGenPrimOp (fsLit "indexWord64X4OffAddr#") [] [addrPrimTy, intPrimTy] (word64X4PrimTy) primOpInfo (VecIndexOffAddrOp WordVec 64 W8) = mkGenPrimOp (fsLit "indexWord8X64OffAddr#") [] [addrPrimTy, intPrimTy] (word8X64PrimTy) primOpInfo (VecIndexOffAddrOp WordVec 32 W16) = mkGenPrimOp (fsLit "indexWord16X32OffAddr#") [] [addrPrimTy, intPrimTy] (word16X32PrimTy) primOpInfo (VecIndexOffAddrOp WordVec 16 W32) = mkGenPrimOp (fsLit "indexWord32X16OffAddr#") [] [addrPrimTy, intPrimTy] (word32X16PrimTy) primOpInfo (VecIndexOffAddrOp WordVec 8 W64) = mkGenPrimOp (fsLit "indexWord64X8OffAddr#") [] [addrPrimTy, intPrimTy] (word64X8PrimTy) primOpInfo (VecIndexOffAddrOp FloatVec 4 W32) = mkGenPrimOp (fsLit "indexFloatX4OffAddr#") [] [addrPrimTy, intPrimTy] (floatX4PrimTy) primOpInfo (VecIndexOffAddrOp FloatVec 2 W64) = mkGenPrimOp (fsLit "indexDoubleX2OffAddr#") [] [addrPrimTy, intPrimTy] (doubleX2PrimTy) primOpInfo (VecIndexOffAddrOp FloatVec 8 W32) = mkGenPrimOp (fsLit "indexFloatX8OffAddr#") [] [addrPrimTy, intPrimTy] (floatX8PrimTy) primOpInfo (VecIndexOffAddrOp FloatVec 4 W64) = mkGenPrimOp (fsLit "indexDoubleX4OffAddr#") [] [addrPrimTy, intPrimTy] (doubleX4PrimTy) primOpInfo (VecIndexOffAddrOp FloatVec 16 W32) = mkGenPrimOp (fsLit "indexFloatX16OffAddr#") [] [addrPrimTy, intPrimTy] (floatX16PrimTy) primOpInfo (VecIndexOffAddrOp FloatVec 8 W64) = mkGenPrimOp (fsLit "indexDoubleX8OffAddr#") [] [addrPrimTy, intPrimTy] (doubleX8PrimTy) primOpInfo (VecReadOffAddrOp IntVec 16 W8) = mkGenPrimOp (fsLit "readInt8X16OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int8X16PrimTy])) primOpInfo (VecReadOffAddrOp IntVec 8 W16) = mkGenPrimOp (fsLit "readInt16X8OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int16X8PrimTy])) primOpInfo (VecReadOffAddrOp IntVec 4 W32) = mkGenPrimOp (fsLit "readInt32X4OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int32X4PrimTy])) primOpInfo (VecReadOffAddrOp IntVec 2 W64) = mkGenPrimOp (fsLit "readInt64X2OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int64X2PrimTy])) primOpInfo (VecReadOffAddrOp IntVec 32 W8) = mkGenPrimOp (fsLit "readInt8X32OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int8X32PrimTy])) primOpInfo (VecReadOffAddrOp IntVec 16 W16) = mkGenPrimOp (fsLit "readInt16X16OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int16X16PrimTy])) primOpInfo (VecReadOffAddrOp IntVec 8 W32) = mkGenPrimOp (fsLit "readInt32X8OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int32X8PrimTy])) primOpInfo (VecReadOffAddrOp IntVec 4 W64) = mkGenPrimOp (fsLit "readInt64X4OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int64X4PrimTy])) primOpInfo (VecReadOffAddrOp IntVec 64 W8) = mkGenPrimOp (fsLit "readInt8X64OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int8X64PrimTy])) primOpInfo (VecReadOffAddrOp IntVec 32 W16) = mkGenPrimOp (fsLit "readInt16X32OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int16X32PrimTy])) primOpInfo (VecReadOffAddrOp IntVec 16 W32) = mkGenPrimOp (fsLit "readInt32X16OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int32X16PrimTy])) primOpInfo (VecReadOffAddrOp IntVec 8 W64) = mkGenPrimOp (fsLit "readInt64X8OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int64X8PrimTy])) primOpInfo (VecReadOffAddrOp WordVec 16 W8) = mkGenPrimOp (fsLit "readWord8X16OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word8X16PrimTy])) primOpInfo (VecReadOffAddrOp WordVec 8 W16) = mkGenPrimOp (fsLit "readWord16X8OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word16X8PrimTy])) primOpInfo (VecReadOffAddrOp WordVec 4 W32) = mkGenPrimOp (fsLit "readWord32X4OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word32X4PrimTy])) primOpInfo (VecReadOffAddrOp WordVec 2 W64) = mkGenPrimOp (fsLit "readWord64X2OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word64X2PrimTy])) primOpInfo (VecReadOffAddrOp WordVec 32 W8) = mkGenPrimOp (fsLit "readWord8X32OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word8X32PrimTy])) primOpInfo (VecReadOffAddrOp WordVec 16 W16) = mkGenPrimOp (fsLit "readWord16X16OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word16X16PrimTy])) primOpInfo (VecReadOffAddrOp WordVec 8 W32) = mkGenPrimOp (fsLit "readWord32X8OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word32X8PrimTy])) primOpInfo (VecReadOffAddrOp WordVec 4 W64) = mkGenPrimOp (fsLit "readWord64X4OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word64X4PrimTy])) primOpInfo (VecReadOffAddrOp WordVec 64 W8) = mkGenPrimOp (fsLit "readWord8X64OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word8X64PrimTy])) primOpInfo (VecReadOffAddrOp WordVec 32 W16) = mkGenPrimOp (fsLit "readWord16X32OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word16X32PrimTy])) primOpInfo (VecReadOffAddrOp WordVec 16 W32) = mkGenPrimOp (fsLit "readWord32X16OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word32X16PrimTy])) primOpInfo (VecReadOffAddrOp WordVec 8 W64) = mkGenPrimOp (fsLit "readWord64X8OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word64X8PrimTy])) primOpInfo (VecReadOffAddrOp FloatVec 4 W32) = mkGenPrimOp (fsLit "readFloatX4OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, floatX4PrimTy])) primOpInfo (VecReadOffAddrOp FloatVec 2 W64) = mkGenPrimOp (fsLit "readDoubleX2OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, doubleX2PrimTy])) primOpInfo (VecReadOffAddrOp FloatVec 8 W32) = mkGenPrimOp (fsLit "readFloatX8OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, floatX8PrimTy])) primOpInfo (VecReadOffAddrOp FloatVec 4 W64) = mkGenPrimOp (fsLit "readDoubleX4OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, doubleX4PrimTy])) primOpInfo (VecReadOffAddrOp FloatVec 16 W32) = mkGenPrimOp (fsLit "readFloatX16OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, floatX16PrimTy])) primOpInfo (VecReadOffAddrOp FloatVec 8 W64) = mkGenPrimOp (fsLit "readDoubleX8OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, doubleX8PrimTy])) primOpInfo (VecWriteOffAddrOp IntVec 16 W8) = mkGenPrimOp (fsLit "writeInt8X16OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, int8X16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteOffAddrOp IntVec 8 W16) = mkGenPrimOp (fsLit "writeInt16X8OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, int16X8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteOffAddrOp IntVec 4 W32) = mkGenPrimOp (fsLit "writeInt32X4OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, int32X4PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteOffAddrOp IntVec 2 W64) = mkGenPrimOp (fsLit "writeInt64X2OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, int64X2PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteOffAddrOp IntVec 32 W8) = mkGenPrimOp (fsLit "writeInt8X32OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, int8X32PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteOffAddrOp IntVec 16 W16) = mkGenPrimOp (fsLit "writeInt16X16OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, int16X16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteOffAddrOp IntVec 8 W32) = mkGenPrimOp (fsLit "writeInt32X8OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, int32X8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteOffAddrOp IntVec 4 W64) = mkGenPrimOp (fsLit "writeInt64X4OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, int64X4PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteOffAddrOp IntVec 64 W8) = mkGenPrimOp (fsLit "writeInt8X64OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, int8X64PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteOffAddrOp IntVec 32 W16) = mkGenPrimOp (fsLit "writeInt16X32OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, int16X32PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteOffAddrOp IntVec 16 W32) = mkGenPrimOp (fsLit "writeInt32X16OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, int32X16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteOffAddrOp IntVec 8 W64) = mkGenPrimOp (fsLit "writeInt64X8OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, int64X8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteOffAddrOp WordVec 16 W8) = mkGenPrimOp (fsLit "writeWord8X16OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, word8X16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteOffAddrOp WordVec 8 W16) = mkGenPrimOp (fsLit "writeWord16X8OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, word16X8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteOffAddrOp WordVec 4 W32) = mkGenPrimOp (fsLit "writeWord32X4OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, word32X4PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteOffAddrOp WordVec 2 W64) = mkGenPrimOp (fsLit "writeWord64X2OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, word64X2PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteOffAddrOp WordVec 32 W8) = mkGenPrimOp (fsLit "writeWord8X32OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, word8X32PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteOffAddrOp WordVec 16 W16) = mkGenPrimOp (fsLit "writeWord16X16OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, word16X16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteOffAddrOp WordVec 8 W32) = mkGenPrimOp (fsLit "writeWord32X8OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, word32X8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteOffAddrOp WordVec 4 W64) = mkGenPrimOp (fsLit "writeWord64X4OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, word64X4PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteOffAddrOp WordVec 64 W8) = mkGenPrimOp (fsLit "writeWord8X64OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, word8X64PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteOffAddrOp WordVec 32 W16) = mkGenPrimOp (fsLit "writeWord16X32OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, word16X32PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteOffAddrOp WordVec 16 W32) = mkGenPrimOp (fsLit "writeWord32X16OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, word32X16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteOffAddrOp WordVec 8 W64) = mkGenPrimOp (fsLit "writeWord64X8OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, word64X8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteOffAddrOp FloatVec 4 W32) = mkGenPrimOp (fsLit "writeFloatX4OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, floatX4PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteOffAddrOp FloatVec 2 W64) = mkGenPrimOp (fsLit "writeDoubleX2OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, doubleX2PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteOffAddrOp FloatVec 8 W32) = mkGenPrimOp (fsLit "writeFloatX8OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, floatX8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteOffAddrOp FloatVec 4 W64) = mkGenPrimOp (fsLit "writeDoubleX4OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, doubleX4PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteOffAddrOp FloatVec 16 W32) = mkGenPrimOp (fsLit "writeFloatX16OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, floatX16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteOffAddrOp FloatVec 8 W64) = mkGenPrimOp (fsLit "writeDoubleX8OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, doubleX8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecIndexScalarByteArrayOp IntVec 16 W8) = mkGenPrimOp (fsLit "indexInt8ArrayAsInt8X16#") [] [byteArrayPrimTy, intPrimTy] (int8X16PrimTy) primOpInfo (VecIndexScalarByteArrayOp IntVec 8 W16) = mkGenPrimOp (fsLit "indexInt16ArrayAsInt16X8#") [] [byteArrayPrimTy, intPrimTy] (int16X8PrimTy) primOpInfo (VecIndexScalarByteArrayOp IntVec 4 W32) = mkGenPrimOp (fsLit "indexInt32ArrayAsInt32X4#") [] [byteArrayPrimTy, intPrimTy] (int32X4PrimTy) primOpInfo (VecIndexScalarByteArrayOp IntVec 2 W64) = mkGenPrimOp (fsLit "indexInt64ArrayAsInt64X2#") [] [byteArrayPrimTy, intPrimTy] (int64X2PrimTy) primOpInfo (VecIndexScalarByteArrayOp IntVec 32 W8) = mkGenPrimOp (fsLit "indexInt8ArrayAsInt8X32#") [] [byteArrayPrimTy, intPrimTy] (int8X32PrimTy) primOpInfo (VecIndexScalarByteArrayOp IntVec 16 W16) = mkGenPrimOp (fsLit "indexInt16ArrayAsInt16X16#") [] [byteArrayPrimTy, intPrimTy] (int16X16PrimTy) primOpInfo (VecIndexScalarByteArrayOp IntVec 8 W32) = mkGenPrimOp (fsLit "indexInt32ArrayAsInt32X8#") [] [byteArrayPrimTy, intPrimTy] (int32X8PrimTy) primOpInfo (VecIndexScalarByteArrayOp IntVec 4 W64) = mkGenPrimOp (fsLit "indexInt64ArrayAsInt64X4#") [] [byteArrayPrimTy, intPrimTy] (int64X4PrimTy) primOpInfo (VecIndexScalarByteArrayOp IntVec 64 W8) = mkGenPrimOp (fsLit "indexInt8ArrayAsInt8X64#") [] [byteArrayPrimTy, intPrimTy] (int8X64PrimTy) primOpInfo (VecIndexScalarByteArrayOp IntVec 32 W16) = mkGenPrimOp (fsLit "indexInt16ArrayAsInt16X32#") [] [byteArrayPrimTy, intPrimTy] (int16X32PrimTy) primOpInfo (VecIndexScalarByteArrayOp IntVec 16 W32) = mkGenPrimOp (fsLit "indexInt32ArrayAsInt32X16#") [] [byteArrayPrimTy, intPrimTy] (int32X16PrimTy) primOpInfo (VecIndexScalarByteArrayOp IntVec 8 W64) = mkGenPrimOp (fsLit "indexInt64ArrayAsInt64X8#") [] [byteArrayPrimTy, intPrimTy] (int64X8PrimTy) primOpInfo (VecIndexScalarByteArrayOp WordVec 16 W8) = mkGenPrimOp (fsLit "indexWord8ArrayAsWord8X16#") [] [byteArrayPrimTy, intPrimTy] (word8X16PrimTy) primOpInfo (VecIndexScalarByteArrayOp WordVec 8 W16) = mkGenPrimOp (fsLit "indexWord16ArrayAsWord16X8#") [] [byteArrayPrimTy, intPrimTy] (word16X8PrimTy) primOpInfo (VecIndexScalarByteArrayOp WordVec 4 W32) = mkGenPrimOp (fsLit "indexWord32ArrayAsWord32X4#") [] [byteArrayPrimTy, intPrimTy] (word32X4PrimTy) primOpInfo (VecIndexScalarByteArrayOp WordVec 2 W64) = mkGenPrimOp (fsLit "indexWord64ArrayAsWord64X2#") [] [byteArrayPrimTy, intPrimTy] (word64X2PrimTy) primOpInfo (VecIndexScalarByteArrayOp WordVec 32 W8) = mkGenPrimOp (fsLit "indexWord8ArrayAsWord8X32#") [] [byteArrayPrimTy, intPrimTy] (word8X32PrimTy) primOpInfo (VecIndexScalarByteArrayOp WordVec 16 W16) = mkGenPrimOp (fsLit "indexWord16ArrayAsWord16X16#") [] [byteArrayPrimTy, intPrimTy] (word16X16PrimTy) primOpInfo (VecIndexScalarByteArrayOp WordVec 8 W32) = mkGenPrimOp (fsLit "indexWord32ArrayAsWord32X8#") [] [byteArrayPrimTy, intPrimTy] (word32X8PrimTy) primOpInfo (VecIndexScalarByteArrayOp WordVec 4 W64) = mkGenPrimOp (fsLit "indexWord64ArrayAsWord64X4#") [] [byteArrayPrimTy, intPrimTy] (word64X4PrimTy) primOpInfo (VecIndexScalarByteArrayOp WordVec 64 W8) = mkGenPrimOp (fsLit "indexWord8ArrayAsWord8X64#") [] [byteArrayPrimTy, intPrimTy] (word8X64PrimTy) primOpInfo (VecIndexScalarByteArrayOp WordVec 32 W16) = mkGenPrimOp (fsLit "indexWord16ArrayAsWord16X32#") [] [byteArrayPrimTy, intPrimTy] (word16X32PrimTy) primOpInfo (VecIndexScalarByteArrayOp WordVec 16 W32) = mkGenPrimOp (fsLit "indexWord32ArrayAsWord32X16#") [] [byteArrayPrimTy, intPrimTy] (word32X16PrimTy) primOpInfo (VecIndexScalarByteArrayOp WordVec 8 W64) = mkGenPrimOp (fsLit "indexWord64ArrayAsWord64X8#") [] [byteArrayPrimTy, intPrimTy] (word64X8PrimTy) primOpInfo (VecIndexScalarByteArrayOp FloatVec 4 W32) = mkGenPrimOp (fsLit "indexFloatArrayAsFloatX4#") [] [byteArrayPrimTy, intPrimTy] (floatX4PrimTy) primOpInfo (VecIndexScalarByteArrayOp FloatVec 2 W64) = mkGenPrimOp (fsLit "indexDoubleArrayAsDoubleX2#") [] [byteArrayPrimTy, intPrimTy] (doubleX2PrimTy) primOpInfo (VecIndexScalarByteArrayOp FloatVec 8 W32) = mkGenPrimOp (fsLit "indexFloatArrayAsFloatX8#") [] [byteArrayPrimTy, intPrimTy] (floatX8PrimTy) primOpInfo (VecIndexScalarByteArrayOp FloatVec 4 W64) = mkGenPrimOp (fsLit "indexDoubleArrayAsDoubleX4#") [] [byteArrayPrimTy, intPrimTy] (doubleX4PrimTy) primOpInfo (VecIndexScalarByteArrayOp FloatVec 16 W32) = mkGenPrimOp (fsLit "indexFloatArrayAsFloatX16#") [] [byteArrayPrimTy, intPrimTy] (floatX16PrimTy) primOpInfo (VecIndexScalarByteArrayOp FloatVec 8 W64) = mkGenPrimOp (fsLit "indexDoubleArrayAsDoubleX8#") [] [byteArrayPrimTy, intPrimTy] (doubleX8PrimTy) primOpInfo (VecReadScalarByteArrayOp IntVec 16 W8) = mkGenPrimOp (fsLit "readInt8ArrayAsInt8X16#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int8X16PrimTy])) primOpInfo (VecReadScalarByteArrayOp IntVec 8 W16) = mkGenPrimOp (fsLit "readInt16ArrayAsInt16X8#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int16X8PrimTy])) primOpInfo (VecReadScalarByteArrayOp IntVec 4 W32) = mkGenPrimOp (fsLit "readInt32ArrayAsInt32X4#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int32X4PrimTy])) primOpInfo (VecReadScalarByteArrayOp IntVec 2 W64) = mkGenPrimOp (fsLit "readInt64ArrayAsInt64X2#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int64X2PrimTy])) primOpInfo (VecReadScalarByteArrayOp IntVec 32 W8) = mkGenPrimOp (fsLit "readInt8ArrayAsInt8X32#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int8X32PrimTy])) primOpInfo (VecReadScalarByteArrayOp IntVec 16 W16) = mkGenPrimOp (fsLit "readInt16ArrayAsInt16X16#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int16X16PrimTy])) primOpInfo (VecReadScalarByteArrayOp IntVec 8 W32) = mkGenPrimOp (fsLit "readInt32ArrayAsInt32X8#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int32X8PrimTy])) primOpInfo (VecReadScalarByteArrayOp IntVec 4 W64) = mkGenPrimOp (fsLit "readInt64ArrayAsInt64X4#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int64X4PrimTy])) primOpInfo (VecReadScalarByteArrayOp IntVec 64 W8) = mkGenPrimOp (fsLit "readInt8ArrayAsInt8X64#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int8X64PrimTy])) primOpInfo (VecReadScalarByteArrayOp IntVec 32 W16) = mkGenPrimOp (fsLit "readInt16ArrayAsInt16X32#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int16X32PrimTy])) primOpInfo (VecReadScalarByteArrayOp IntVec 16 W32) = mkGenPrimOp (fsLit "readInt32ArrayAsInt32X16#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int32X16PrimTy])) primOpInfo (VecReadScalarByteArrayOp IntVec 8 W64) = mkGenPrimOp (fsLit "readInt64ArrayAsInt64X8#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int64X8PrimTy])) primOpInfo (VecReadScalarByteArrayOp WordVec 16 W8) = mkGenPrimOp (fsLit "readWord8ArrayAsWord8X16#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word8X16PrimTy])) primOpInfo (VecReadScalarByteArrayOp WordVec 8 W16) = mkGenPrimOp (fsLit "readWord16ArrayAsWord16X8#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word16X8PrimTy])) primOpInfo (VecReadScalarByteArrayOp WordVec 4 W32) = mkGenPrimOp (fsLit "readWord32ArrayAsWord32X4#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word32X4PrimTy])) primOpInfo (VecReadScalarByteArrayOp WordVec 2 W64) = mkGenPrimOp (fsLit "readWord64ArrayAsWord64X2#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word64X2PrimTy])) primOpInfo (VecReadScalarByteArrayOp WordVec 32 W8) = mkGenPrimOp (fsLit "readWord8ArrayAsWord8X32#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word8X32PrimTy])) primOpInfo (VecReadScalarByteArrayOp WordVec 16 W16) = mkGenPrimOp (fsLit "readWord16ArrayAsWord16X16#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word16X16PrimTy])) primOpInfo (VecReadScalarByteArrayOp WordVec 8 W32) = mkGenPrimOp (fsLit "readWord32ArrayAsWord32X8#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word32X8PrimTy])) primOpInfo (VecReadScalarByteArrayOp WordVec 4 W64) = mkGenPrimOp (fsLit "readWord64ArrayAsWord64X4#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word64X4PrimTy])) primOpInfo (VecReadScalarByteArrayOp WordVec 64 W8) = mkGenPrimOp (fsLit "readWord8ArrayAsWord8X64#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word8X64PrimTy])) primOpInfo (VecReadScalarByteArrayOp WordVec 32 W16) = mkGenPrimOp (fsLit "readWord16ArrayAsWord16X32#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word16X32PrimTy])) primOpInfo (VecReadScalarByteArrayOp WordVec 16 W32) = mkGenPrimOp (fsLit "readWord32ArrayAsWord32X16#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word32X16PrimTy])) primOpInfo (VecReadScalarByteArrayOp WordVec 8 W64) = mkGenPrimOp (fsLit "readWord64ArrayAsWord64X8#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word64X8PrimTy])) primOpInfo (VecReadScalarByteArrayOp FloatVec 4 W32) = mkGenPrimOp (fsLit "readFloatArrayAsFloatX4#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, floatX4PrimTy])) primOpInfo (VecReadScalarByteArrayOp FloatVec 2 W64) = mkGenPrimOp (fsLit "readDoubleArrayAsDoubleX2#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, doubleX2PrimTy])) primOpInfo (VecReadScalarByteArrayOp FloatVec 8 W32) = mkGenPrimOp (fsLit "readFloatArrayAsFloatX8#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, floatX8PrimTy])) primOpInfo (VecReadScalarByteArrayOp FloatVec 4 W64) = mkGenPrimOp (fsLit "readDoubleArrayAsDoubleX4#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, doubleX4PrimTy])) primOpInfo (VecReadScalarByteArrayOp FloatVec 16 W32) = mkGenPrimOp (fsLit "readFloatArrayAsFloatX16#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, floatX16PrimTy])) primOpInfo (VecReadScalarByteArrayOp FloatVec 8 W64) = mkGenPrimOp (fsLit "readDoubleArrayAsDoubleX8#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, doubleX8PrimTy])) primOpInfo (VecWriteScalarByteArrayOp IntVec 16 W8) = mkGenPrimOp (fsLit "writeInt8ArrayAsInt8X16#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int8X16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarByteArrayOp IntVec 8 W16) = mkGenPrimOp (fsLit "writeInt16ArrayAsInt16X8#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int16X8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarByteArrayOp IntVec 4 W32) = mkGenPrimOp (fsLit "writeInt32ArrayAsInt32X4#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int32X4PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarByteArrayOp IntVec 2 W64) = mkGenPrimOp (fsLit "writeInt64ArrayAsInt64X2#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int64X2PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarByteArrayOp IntVec 32 W8) = mkGenPrimOp (fsLit "writeInt8ArrayAsInt8X32#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int8X32PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarByteArrayOp IntVec 16 W16) = mkGenPrimOp (fsLit "writeInt16ArrayAsInt16X16#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int16X16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarByteArrayOp IntVec 8 W32) = mkGenPrimOp (fsLit "writeInt32ArrayAsInt32X8#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int32X8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarByteArrayOp IntVec 4 W64) = mkGenPrimOp (fsLit "writeInt64ArrayAsInt64X4#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int64X4PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarByteArrayOp IntVec 64 W8) = mkGenPrimOp (fsLit "writeInt8ArrayAsInt8X64#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int8X64PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarByteArrayOp IntVec 32 W16) = mkGenPrimOp (fsLit "writeInt16ArrayAsInt16X32#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int16X32PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarByteArrayOp IntVec 16 W32) = mkGenPrimOp (fsLit "writeInt32ArrayAsInt32X16#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int32X16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarByteArrayOp IntVec 8 W64) = mkGenPrimOp (fsLit "writeInt64ArrayAsInt64X8#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int64X8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarByteArrayOp WordVec 16 W8) = mkGenPrimOp (fsLit "writeWord8ArrayAsWord8X16#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, word8X16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarByteArrayOp WordVec 8 W16) = mkGenPrimOp (fsLit "writeWord16ArrayAsWord16X8#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, word16X8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarByteArrayOp WordVec 4 W32) = mkGenPrimOp (fsLit "writeWord32ArrayAsWord32X4#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, word32X4PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarByteArrayOp WordVec 2 W64) = mkGenPrimOp (fsLit "writeWord64ArrayAsWord64X2#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, word64X2PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarByteArrayOp WordVec 32 W8) = mkGenPrimOp (fsLit "writeWord8ArrayAsWord8X32#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, word8X32PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarByteArrayOp WordVec 16 W16) = mkGenPrimOp (fsLit "writeWord16ArrayAsWord16X16#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, word16X16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarByteArrayOp WordVec 8 W32) = mkGenPrimOp (fsLit "writeWord32ArrayAsWord32X8#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, word32X8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarByteArrayOp WordVec 4 W64) = mkGenPrimOp (fsLit "writeWord64ArrayAsWord64X4#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, word64X4PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarByteArrayOp WordVec 64 W8) = mkGenPrimOp (fsLit "writeWord8ArrayAsWord8X64#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, word8X64PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarByteArrayOp WordVec 32 W16) = mkGenPrimOp (fsLit "writeWord16ArrayAsWord16X32#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, word16X32PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarByteArrayOp WordVec 16 W32) = mkGenPrimOp (fsLit "writeWord32ArrayAsWord32X16#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, word32X16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarByteArrayOp WordVec 8 W64) = mkGenPrimOp (fsLit "writeWord64ArrayAsWord64X8#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, word64X8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarByteArrayOp FloatVec 4 W32) = mkGenPrimOp (fsLit "writeFloatArrayAsFloatX4#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, floatX4PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarByteArrayOp FloatVec 2 W64) = mkGenPrimOp (fsLit "writeDoubleArrayAsDoubleX2#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, doubleX2PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarByteArrayOp FloatVec 8 W32) = mkGenPrimOp (fsLit "writeFloatArrayAsFloatX8#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, floatX8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarByteArrayOp FloatVec 4 W64) = mkGenPrimOp (fsLit "writeDoubleArrayAsDoubleX4#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, doubleX4PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarByteArrayOp FloatVec 16 W32) = mkGenPrimOp (fsLit "writeFloatArrayAsFloatX16#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, floatX16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarByteArrayOp FloatVec 8 W64) = mkGenPrimOp (fsLit "writeDoubleArrayAsDoubleX8#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, doubleX8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecIndexScalarOffAddrOp IntVec 16 W8) = mkGenPrimOp (fsLit "indexInt8OffAddrAsInt8X16#") [] [addrPrimTy, intPrimTy] (int8X16PrimTy) primOpInfo (VecIndexScalarOffAddrOp IntVec 8 W16) = mkGenPrimOp (fsLit "indexInt16OffAddrAsInt16X8#") [] [addrPrimTy, intPrimTy] (int16X8PrimTy) primOpInfo (VecIndexScalarOffAddrOp IntVec 4 W32) = mkGenPrimOp (fsLit "indexInt32OffAddrAsInt32X4#") [] [addrPrimTy, intPrimTy] (int32X4PrimTy) primOpInfo (VecIndexScalarOffAddrOp IntVec 2 W64) = mkGenPrimOp (fsLit "indexInt64OffAddrAsInt64X2#") [] [addrPrimTy, intPrimTy] (int64X2PrimTy) primOpInfo (VecIndexScalarOffAddrOp IntVec 32 W8) = mkGenPrimOp (fsLit "indexInt8OffAddrAsInt8X32#") [] [addrPrimTy, intPrimTy] (int8X32PrimTy) primOpInfo (VecIndexScalarOffAddrOp IntVec 16 W16) = mkGenPrimOp (fsLit "indexInt16OffAddrAsInt16X16#") [] [addrPrimTy, intPrimTy] (int16X16PrimTy) primOpInfo (VecIndexScalarOffAddrOp IntVec 8 W32) = mkGenPrimOp (fsLit "indexInt32OffAddrAsInt32X8#") [] [addrPrimTy, intPrimTy] (int32X8PrimTy) primOpInfo (VecIndexScalarOffAddrOp IntVec 4 W64) = mkGenPrimOp (fsLit "indexInt64OffAddrAsInt64X4#") [] [addrPrimTy, intPrimTy] (int64X4PrimTy) primOpInfo (VecIndexScalarOffAddrOp IntVec 64 W8) = mkGenPrimOp (fsLit "indexInt8OffAddrAsInt8X64#") [] [addrPrimTy, intPrimTy] (int8X64PrimTy) primOpInfo (VecIndexScalarOffAddrOp IntVec 32 W16) = mkGenPrimOp (fsLit "indexInt16OffAddrAsInt16X32#") [] [addrPrimTy, intPrimTy] (int16X32PrimTy) primOpInfo (VecIndexScalarOffAddrOp IntVec 16 W32) = mkGenPrimOp (fsLit "indexInt32OffAddrAsInt32X16#") [] [addrPrimTy, intPrimTy] (int32X16PrimTy) primOpInfo (VecIndexScalarOffAddrOp IntVec 8 W64) = mkGenPrimOp (fsLit "indexInt64OffAddrAsInt64X8#") [] [addrPrimTy, intPrimTy] (int64X8PrimTy) primOpInfo (VecIndexScalarOffAddrOp WordVec 16 W8) = mkGenPrimOp (fsLit "indexWord8OffAddrAsWord8X16#") [] [addrPrimTy, intPrimTy] (word8X16PrimTy) primOpInfo (VecIndexScalarOffAddrOp WordVec 8 W16) = mkGenPrimOp (fsLit "indexWord16OffAddrAsWord16X8#") [] [addrPrimTy, intPrimTy] (word16X8PrimTy) primOpInfo (VecIndexScalarOffAddrOp WordVec 4 W32) = mkGenPrimOp (fsLit "indexWord32OffAddrAsWord32X4#") [] [addrPrimTy, intPrimTy] (word32X4PrimTy) primOpInfo (VecIndexScalarOffAddrOp WordVec 2 W64) = mkGenPrimOp (fsLit "indexWord64OffAddrAsWord64X2#") [] [addrPrimTy, intPrimTy] (word64X2PrimTy) primOpInfo (VecIndexScalarOffAddrOp WordVec 32 W8) = mkGenPrimOp (fsLit "indexWord8OffAddrAsWord8X32#") [] [addrPrimTy, intPrimTy] (word8X32PrimTy) primOpInfo (VecIndexScalarOffAddrOp WordVec 16 W16) = mkGenPrimOp (fsLit "indexWord16OffAddrAsWord16X16#") [] [addrPrimTy, intPrimTy] (word16X16PrimTy) primOpInfo (VecIndexScalarOffAddrOp WordVec 8 W32) = mkGenPrimOp (fsLit "indexWord32OffAddrAsWord32X8#") [] [addrPrimTy, intPrimTy] (word32X8PrimTy) primOpInfo (VecIndexScalarOffAddrOp WordVec 4 W64) = mkGenPrimOp (fsLit "indexWord64OffAddrAsWord64X4#") [] [addrPrimTy, intPrimTy] (word64X4PrimTy) primOpInfo (VecIndexScalarOffAddrOp WordVec 64 W8) = mkGenPrimOp (fsLit "indexWord8OffAddrAsWord8X64#") [] [addrPrimTy, intPrimTy] (word8X64PrimTy) primOpInfo (VecIndexScalarOffAddrOp WordVec 32 W16) = mkGenPrimOp (fsLit "indexWord16OffAddrAsWord16X32#") [] [addrPrimTy, intPrimTy] (word16X32PrimTy) primOpInfo (VecIndexScalarOffAddrOp WordVec 16 W32) = mkGenPrimOp (fsLit "indexWord32OffAddrAsWord32X16#") [] [addrPrimTy, intPrimTy] (word32X16PrimTy) primOpInfo (VecIndexScalarOffAddrOp WordVec 8 W64) = mkGenPrimOp (fsLit "indexWord64OffAddrAsWord64X8#") [] [addrPrimTy, intPrimTy] (word64X8PrimTy) primOpInfo (VecIndexScalarOffAddrOp FloatVec 4 W32) = mkGenPrimOp (fsLit "indexFloatOffAddrAsFloatX4#") [] [addrPrimTy, intPrimTy] (floatX4PrimTy) primOpInfo (VecIndexScalarOffAddrOp FloatVec 2 W64) = mkGenPrimOp (fsLit "indexDoubleOffAddrAsDoubleX2#") [] [addrPrimTy, intPrimTy] (doubleX2PrimTy) primOpInfo (VecIndexScalarOffAddrOp FloatVec 8 W32) = mkGenPrimOp (fsLit "indexFloatOffAddrAsFloatX8#") [] [addrPrimTy, intPrimTy] (floatX8PrimTy) primOpInfo (VecIndexScalarOffAddrOp FloatVec 4 W64) = mkGenPrimOp (fsLit "indexDoubleOffAddrAsDoubleX4#") [] [addrPrimTy, intPrimTy] (doubleX4PrimTy) primOpInfo (VecIndexScalarOffAddrOp FloatVec 16 W32) = mkGenPrimOp (fsLit "indexFloatOffAddrAsFloatX16#") [] [addrPrimTy, intPrimTy] (floatX16PrimTy) primOpInfo (VecIndexScalarOffAddrOp FloatVec 8 W64) = mkGenPrimOp (fsLit "indexDoubleOffAddrAsDoubleX8#") [] [addrPrimTy, intPrimTy] (doubleX8PrimTy) primOpInfo (VecReadScalarOffAddrOp IntVec 16 W8) = mkGenPrimOp (fsLit "readInt8OffAddrAsInt8X16#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int8X16PrimTy])) primOpInfo (VecReadScalarOffAddrOp IntVec 8 W16) = mkGenPrimOp (fsLit "readInt16OffAddrAsInt16X8#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int16X8PrimTy])) primOpInfo (VecReadScalarOffAddrOp IntVec 4 W32) = mkGenPrimOp (fsLit "readInt32OffAddrAsInt32X4#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int32X4PrimTy])) primOpInfo (VecReadScalarOffAddrOp IntVec 2 W64) = mkGenPrimOp (fsLit "readInt64OffAddrAsInt64X2#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int64X2PrimTy])) primOpInfo (VecReadScalarOffAddrOp IntVec 32 W8) = mkGenPrimOp (fsLit "readInt8OffAddrAsInt8X32#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int8X32PrimTy])) primOpInfo (VecReadScalarOffAddrOp IntVec 16 W16) = mkGenPrimOp (fsLit "readInt16OffAddrAsInt16X16#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int16X16PrimTy])) primOpInfo (VecReadScalarOffAddrOp IntVec 8 W32) = mkGenPrimOp (fsLit "readInt32OffAddrAsInt32X8#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int32X8PrimTy])) primOpInfo (VecReadScalarOffAddrOp IntVec 4 W64) = mkGenPrimOp (fsLit "readInt64OffAddrAsInt64X4#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int64X4PrimTy])) primOpInfo (VecReadScalarOffAddrOp IntVec 64 W8) = mkGenPrimOp (fsLit "readInt8OffAddrAsInt8X64#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int8X64PrimTy])) primOpInfo (VecReadScalarOffAddrOp IntVec 32 W16) = mkGenPrimOp (fsLit "readInt16OffAddrAsInt16X32#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int16X32PrimTy])) primOpInfo (VecReadScalarOffAddrOp IntVec 16 W32) = mkGenPrimOp (fsLit "readInt32OffAddrAsInt32X16#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int32X16PrimTy])) primOpInfo (VecReadScalarOffAddrOp IntVec 8 W64) = mkGenPrimOp (fsLit "readInt64OffAddrAsInt64X8#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int64X8PrimTy])) primOpInfo (VecReadScalarOffAddrOp WordVec 16 W8) = mkGenPrimOp (fsLit "readWord8OffAddrAsWord8X16#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word8X16PrimTy])) primOpInfo (VecReadScalarOffAddrOp WordVec 8 W16) = mkGenPrimOp (fsLit "readWord16OffAddrAsWord16X8#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word16X8PrimTy])) primOpInfo (VecReadScalarOffAddrOp WordVec 4 W32) = mkGenPrimOp (fsLit "readWord32OffAddrAsWord32X4#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word32X4PrimTy])) primOpInfo (VecReadScalarOffAddrOp WordVec 2 W64) = mkGenPrimOp (fsLit "readWord64OffAddrAsWord64X2#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word64X2PrimTy])) primOpInfo (VecReadScalarOffAddrOp WordVec 32 W8) = mkGenPrimOp (fsLit "readWord8OffAddrAsWord8X32#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word8X32PrimTy])) primOpInfo (VecReadScalarOffAddrOp WordVec 16 W16) = mkGenPrimOp (fsLit "readWord16OffAddrAsWord16X16#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word16X16PrimTy])) primOpInfo (VecReadScalarOffAddrOp WordVec 8 W32) = mkGenPrimOp (fsLit "readWord32OffAddrAsWord32X8#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word32X8PrimTy])) primOpInfo (VecReadScalarOffAddrOp WordVec 4 W64) = mkGenPrimOp (fsLit "readWord64OffAddrAsWord64X4#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word64X4PrimTy])) primOpInfo (VecReadScalarOffAddrOp WordVec 64 W8) = mkGenPrimOp (fsLit "readWord8OffAddrAsWord8X64#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word8X64PrimTy])) primOpInfo (VecReadScalarOffAddrOp WordVec 32 W16) = mkGenPrimOp (fsLit "readWord16OffAddrAsWord16X32#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word16X32PrimTy])) primOpInfo (VecReadScalarOffAddrOp WordVec 16 W32) = mkGenPrimOp (fsLit "readWord32OffAddrAsWord32X16#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word32X16PrimTy])) primOpInfo (VecReadScalarOffAddrOp WordVec 8 W64) = mkGenPrimOp (fsLit "readWord64OffAddrAsWord64X8#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word64X8PrimTy])) primOpInfo (VecReadScalarOffAddrOp FloatVec 4 W32) = mkGenPrimOp (fsLit "readFloatOffAddrAsFloatX4#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, floatX4PrimTy])) primOpInfo (VecReadScalarOffAddrOp FloatVec 2 W64) = mkGenPrimOp (fsLit "readDoubleOffAddrAsDoubleX2#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, doubleX2PrimTy])) primOpInfo (VecReadScalarOffAddrOp FloatVec 8 W32) = mkGenPrimOp (fsLit "readFloatOffAddrAsFloatX8#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, floatX8PrimTy])) primOpInfo (VecReadScalarOffAddrOp FloatVec 4 W64) = mkGenPrimOp (fsLit "readDoubleOffAddrAsDoubleX4#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, doubleX4PrimTy])) primOpInfo (VecReadScalarOffAddrOp FloatVec 16 W32) = mkGenPrimOp (fsLit "readFloatOffAddrAsFloatX16#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, floatX16PrimTy])) primOpInfo (VecReadScalarOffAddrOp FloatVec 8 W64) = mkGenPrimOp (fsLit "readDoubleOffAddrAsDoubleX8#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, doubleX8PrimTy])) primOpInfo (VecWriteScalarOffAddrOp IntVec 16 W8) = mkGenPrimOp (fsLit "writeInt8OffAddrAsInt8X16#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, int8X16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarOffAddrOp IntVec 8 W16) = mkGenPrimOp (fsLit "writeInt16OffAddrAsInt16X8#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, int16X8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarOffAddrOp IntVec 4 W32) = mkGenPrimOp (fsLit "writeInt32OffAddrAsInt32X4#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, int32X4PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarOffAddrOp IntVec 2 W64) = mkGenPrimOp (fsLit "writeInt64OffAddrAsInt64X2#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, int64X2PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarOffAddrOp IntVec 32 W8) = mkGenPrimOp (fsLit "writeInt8OffAddrAsInt8X32#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, int8X32PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarOffAddrOp IntVec 16 W16) = mkGenPrimOp (fsLit "writeInt16OffAddrAsInt16X16#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, int16X16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarOffAddrOp IntVec 8 W32) = mkGenPrimOp (fsLit "writeInt32OffAddrAsInt32X8#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, int32X8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarOffAddrOp IntVec 4 W64) = mkGenPrimOp (fsLit "writeInt64OffAddrAsInt64X4#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, int64X4PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarOffAddrOp IntVec 64 W8) = mkGenPrimOp (fsLit "writeInt8OffAddrAsInt8X64#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, int8X64PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarOffAddrOp IntVec 32 W16) = mkGenPrimOp (fsLit "writeInt16OffAddrAsInt16X32#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, int16X32PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarOffAddrOp IntVec 16 W32) = mkGenPrimOp (fsLit "writeInt32OffAddrAsInt32X16#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, int32X16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarOffAddrOp IntVec 8 W64) = mkGenPrimOp (fsLit "writeInt64OffAddrAsInt64X8#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, int64X8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarOffAddrOp WordVec 16 W8) = mkGenPrimOp (fsLit "writeWord8OffAddrAsWord8X16#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, word8X16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarOffAddrOp WordVec 8 W16) = mkGenPrimOp (fsLit "writeWord16OffAddrAsWord16X8#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, word16X8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarOffAddrOp WordVec 4 W32) = mkGenPrimOp (fsLit "writeWord32OffAddrAsWord32X4#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, word32X4PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarOffAddrOp WordVec 2 W64) = mkGenPrimOp (fsLit "writeWord64OffAddrAsWord64X2#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, word64X2PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarOffAddrOp WordVec 32 W8) = mkGenPrimOp (fsLit "writeWord8OffAddrAsWord8X32#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, word8X32PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarOffAddrOp WordVec 16 W16) = mkGenPrimOp (fsLit "writeWord16OffAddrAsWord16X16#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, word16X16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarOffAddrOp WordVec 8 W32) = mkGenPrimOp (fsLit "writeWord32OffAddrAsWord32X8#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, word32X8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarOffAddrOp WordVec 4 W64) = mkGenPrimOp (fsLit "writeWord64OffAddrAsWord64X4#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, word64X4PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarOffAddrOp WordVec 64 W8) = mkGenPrimOp (fsLit "writeWord8OffAddrAsWord8X64#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, word8X64PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarOffAddrOp WordVec 32 W16) = mkGenPrimOp (fsLit "writeWord16OffAddrAsWord16X32#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, word16X32PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarOffAddrOp WordVec 16 W32) = mkGenPrimOp (fsLit "writeWord32OffAddrAsWord32X16#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, word32X16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarOffAddrOp WordVec 8 W64) = mkGenPrimOp (fsLit "writeWord64OffAddrAsWord64X8#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, word64X8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarOffAddrOp FloatVec 4 W32) = mkGenPrimOp (fsLit "writeFloatOffAddrAsFloatX4#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, floatX4PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarOffAddrOp FloatVec 2 W64) = mkGenPrimOp (fsLit "writeDoubleOffAddrAsDoubleX2#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, doubleX2PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarOffAddrOp FloatVec 8 W32) = mkGenPrimOp (fsLit "writeFloatOffAddrAsFloatX8#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, floatX8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarOffAddrOp FloatVec 4 W64) = mkGenPrimOp (fsLit "writeDoubleOffAddrAsDoubleX4#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, doubleX4PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarOffAddrOp FloatVec 16 W32) = mkGenPrimOp (fsLit "writeFloatOffAddrAsFloatX16#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, floatX16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarOffAddrOp FloatVec 8 W64) = mkGenPrimOp (fsLit "writeDoubleOffAddrAsDoubleX8#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, doubleX8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecFMAdd FloatVec 4 W32) = mkGenPrimOp (fsLit "fmaddFloatX4#") [] [floatX4PrimTy, floatX4PrimTy, floatX4PrimTy] (floatX4PrimTy) primOpInfo (VecFMAdd FloatVec 2 W64) = mkGenPrimOp (fsLit "fmaddDoubleX2#") [] [doubleX2PrimTy, doubleX2PrimTy, doubleX2PrimTy] (doubleX2PrimTy) primOpInfo (VecFMAdd FloatVec 8 W32) = mkGenPrimOp (fsLit "fmaddFloatX8#") [] [floatX8PrimTy, floatX8PrimTy, floatX8PrimTy] (floatX8PrimTy) primOpInfo (VecFMAdd FloatVec 4 W64) = mkGenPrimOp (fsLit "fmaddDoubleX4#") [] [doubleX4PrimTy, doubleX4PrimTy, doubleX4PrimTy] (doubleX4PrimTy) primOpInfo (VecFMAdd FloatVec 16 W32) = mkGenPrimOp (fsLit "fmaddFloatX16#") [] [floatX16PrimTy, floatX16PrimTy, floatX16PrimTy] (floatX16PrimTy) primOpInfo (VecFMAdd FloatVec 8 W64) = mkGenPrimOp (fsLit "fmaddDoubleX8#") [] [doubleX8PrimTy, doubleX8PrimTy, doubleX8PrimTy] (doubleX8PrimTy) primOpInfo (VecFMSub FloatVec 4 W32) = mkGenPrimOp (fsLit "fmsubFloatX4#") [] [floatX4PrimTy, floatX4PrimTy, floatX4PrimTy] (floatX4PrimTy) primOpInfo (VecFMSub FloatVec 2 W64) = mkGenPrimOp (fsLit "fmsubDoubleX2#") [] [doubleX2PrimTy, doubleX2PrimTy, doubleX2PrimTy] (doubleX2PrimTy) primOpInfo (VecFMSub FloatVec 8 W32) = mkGenPrimOp (fsLit "fmsubFloatX8#") [] [floatX8PrimTy, floatX8PrimTy, floatX8PrimTy] (floatX8PrimTy) primOpInfo (VecFMSub FloatVec 4 W64) = mkGenPrimOp (fsLit "fmsubDoubleX4#") [] [doubleX4PrimTy, doubleX4PrimTy, doubleX4PrimTy] (doubleX4PrimTy) primOpInfo (VecFMSub FloatVec 16 W32) = mkGenPrimOp (fsLit "fmsubFloatX16#") [] [floatX16PrimTy, floatX16PrimTy, floatX16PrimTy] (floatX16PrimTy) primOpInfo (VecFMSub FloatVec 8 W64) = mkGenPrimOp (fsLit "fmsubDoubleX8#") [] [doubleX8PrimTy, doubleX8PrimTy, doubleX8PrimTy] (doubleX8PrimTy) primOpInfo (VecFNMAdd FloatVec 4 W32) = mkGenPrimOp (fsLit "fnmaddFloatX4#") [] [floatX4PrimTy, floatX4PrimTy, floatX4PrimTy] (floatX4PrimTy) primOpInfo (VecFNMAdd FloatVec 2 W64) = mkGenPrimOp (fsLit "fnmaddDoubleX2#") [] [doubleX2PrimTy, doubleX2PrimTy, doubleX2PrimTy] (doubleX2PrimTy) primOpInfo (VecFNMAdd FloatVec 8 W32) = mkGenPrimOp (fsLit "fnmaddFloatX8#") [] [floatX8PrimTy, floatX8PrimTy, floatX8PrimTy] (floatX8PrimTy) primOpInfo (VecFNMAdd FloatVec 4 W64) = mkGenPrimOp (fsLit "fnmaddDoubleX4#") [] [doubleX4PrimTy, doubleX4PrimTy, doubleX4PrimTy] (doubleX4PrimTy) primOpInfo (VecFNMAdd FloatVec 16 W32) = mkGenPrimOp (fsLit "fnmaddFloatX16#") [] [floatX16PrimTy, floatX16PrimTy, floatX16PrimTy] (floatX16PrimTy) primOpInfo (VecFNMAdd FloatVec 8 W64) = mkGenPrimOp (fsLit "fnmaddDoubleX8#") [] [doubleX8PrimTy, doubleX8PrimTy, doubleX8PrimTy] (doubleX8PrimTy) primOpInfo (VecFNMSub FloatVec 4 W32) = mkGenPrimOp (fsLit "fnmsubFloatX4#") [] [floatX4PrimTy, floatX4PrimTy, floatX4PrimTy] (floatX4PrimTy) primOpInfo (VecFNMSub FloatVec 2 W64) = mkGenPrimOp (fsLit "fnmsubDoubleX2#") [] [doubleX2PrimTy, doubleX2PrimTy, doubleX2PrimTy] (doubleX2PrimTy) primOpInfo (VecFNMSub FloatVec 8 W32) = mkGenPrimOp (fsLit "fnmsubFloatX8#") [] [floatX8PrimTy, floatX8PrimTy, floatX8PrimTy] (floatX8PrimTy) primOpInfo (VecFNMSub FloatVec 4 W64) = mkGenPrimOp (fsLit "fnmsubDoubleX4#") [] [doubleX4PrimTy, doubleX4PrimTy, doubleX4PrimTy] (doubleX4PrimTy) primOpInfo (VecFNMSub FloatVec 16 W32) = mkGenPrimOp (fsLit "fnmsubFloatX16#") [] [floatX16PrimTy, floatX16PrimTy, floatX16PrimTy] (floatX16PrimTy) primOpInfo (VecFNMSub FloatVec 8 W64) = mkGenPrimOp (fsLit "fnmsubDoubleX8#") [] [doubleX8PrimTy, doubleX8PrimTy, doubleX8PrimTy] (doubleX8PrimTy) primOpInfo (VecShuffleOp IntVec 16 W8) = mkGenPrimOp (fsLit "shuffleInt8X16#") [] [int8X16PrimTy, int8X16PrimTy, (mkTupleTy Unboxed [intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy])] (int8X16PrimTy) primOpInfo (VecShuffleOp IntVec 8 W16) = mkGenPrimOp (fsLit "shuffleInt16X8#") [] [int16X8PrimTy, int16X8PrimTy, (mkTupleTy Unboxed [intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy])] (int16X8PrimTy) primOpInfo (VecShuffleOp IntVec 4 W32) = mkGenPrimOp (fsLit "shuffleInt32X4#") [] [int32X4PrimTy, int32X4PrimTy, (mkTupleTy Unboxed [intPrimTy, intPrimTy, intPrimTy, intPrimTy])] (int32X4PrimTy) primOpInfo (VecShuffleOp IntVec 2 W64) = mkGenPrimOp (fsLit "shuffleInt64X2#") [] [int64X2PrimTy, int64X2PrimTy, (mkTupleTy Unboxed [intPrimTy, intPrimTy])] (int64X2PrimTy) primOpInfo (VecShuffleOp IntVec 32 W8) = mkGenPrimOp (fsLit "shuffleInt8X32#") [] [int8X32PrimTy, int8X32PrimTy, (mkTupleTy Unboxed [intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy])] (int8X32PrimTy) primOpInfo (VecShuffleOp IntVec 16 W16) = mkGenPrimOp (fsLit "shuffleInt16X16#") [] [int16X16PrimTy, int16X16PrimTy, (mkTupleTy Unboxed [intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy])] (int16X16PrimTy) primOpInfo (VecShuffleOp IntVec 8 W32) = mkGenPrimOp (fsLit "shuffleInt32X8#") [] [int32X8PrimTy, int32X8PrimTy, (mkTupleTy Unboxed [intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy])] (int32X8PrimTy) primOpInfo (VecShuffleOp IntVec 4 W64) = mkGenPrimOp (fsLit "shuffleInt64X4#") [] [int64X4PrimTy, int64X4PrimTy, (mkTupleTy Unboxed [intPrimTy, intPrimTy, intPrimTy, intPrimTy])] (int64X4PrimTy) primOpInfo (VecShuffleOp IntVec 64 W8) = mkGenPrimOp (fsLit "shuffleInt8X64#") [] [int8X64PrimTy, int8X64PrimTy, (mkTupleTy Unboxed [intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy])] (int8X64PrimTy) primOpInfo (VecShuffleOp IntVec 32 W16) = mkGenPrimOp (fsLit "shuffleInt16X32#") [] [int16X32PrimTy, int16X32PrimTy, (mkTupleTy Unboxed [intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy])] (int16X32PrimTy) primOpInfo (VecShuffleOp IntVec 16 W32) = mkGenPrimOp (fsLit "shuffleInt32X16#") [] [int32X16PrimTy, int32X16PrimTy, (mkTupleTy Unboxed [intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy])] (int32X16PrimTy) primOpInfo (VecShuffleOp IntVec 8 W64) = mkGenPrimOp (fsLit "shuffleInt64X8#") [] [int64X8PrimTy, int64X8PrimTy, (mkTupleTy Unboxed [intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy])] (int64X8PrimTy) primOpInfo (VecShuffleOp WordVec 16 W8) = mkGenPrimOp (fsLit "shuffleWord8X16#") [] [word8X16PrimTy, word8X16PrimTy, (mkTupleTy Unboxed [intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy])] (word8X16PrimTy) primOpInfo (VecShuffleOp WordVec 8 W16) = mkGenPrimOp (fsLit "shuffleWord16X8#") [] [word16X8PrimTy, word16X8PrimTy, (mkTupleTy Unboxed [intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy])] (word16X8PrimTy) primOpInfo (VecShuffleOp WordVec 4 W32) = mkGenPrimOp (fsLit "shuffleWord32X4#") [] [word32X4PrimTy, word32X4PrimTy, (mkTupleTy Unboxed [intPrimTy, intPrimTy, intPrimTy, intPrimTy])] (word32X4PrimTy) primOpInfo (VecShuffleOp WordVec 2 W64) = mkGenPrimOp (fsLit "shuffleWord64X2#") [] [word64X2PrimTy, word64X2PrimTy, (mkTupleTy Unboxed [intPrimTy, intPrimTy])] (word64X2PrimTy) primOpInfo (VecShuffleOp WordVec 32 W8) = mkGenPrimOp (fsLit "shuffleWord8X32#") [] [word8X32PrimTy, word8X32PrimTy, (mkTupleTy Unboxed [intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy])] (word8X32PrimTy) primOpInfo (VecShuffleOp WordVec 16 W16) = mkGenPrimOp (fsLit "shuffleWord16X16#") [] [word16X16PrimTy, word16X16PrimTy, (mkTupleTy Unboxed [intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy])] (word16X16PrimTy) primOpInfo (VecShuffleOp WordVec 8 W32) = mkGenPrimOp (fsLit "shuffleWord32X8#") [] [word32X8PrimTy, word32X8PrimTy, (mkTupleTy Unboxed [intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy])] (word32X8PrimTy) primOpInfo (VecShuffleOp WordVec 4 W64) = mkGenPrimOp (fsLit "shuffleWord64X4#") [] [word64X4PrimTy, word64X4PrimTy, (mkTupleTy Unboxed [intPrimTy, intPrimTy, intPrimTy, intPrimTy])] (word64X4PrimTy) primOpInfo (VecShuffleOp WordVec 64 W8) = mkGenPrimOp (fsLit "shuffleWord8X64#") [] [word8X64PrimTy, word8X64PrimTy, (mkTupleTy Unboxed [intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy])] (word8X64PrimTy) primOpInfo (VecShuffleOp WordVec 32 W16) = mkGenPrimOp (fsLit "shuffleWord16X32#") [] [word16X32PrimTy, word16X32PrimTy, (mkTupleTy Unboxed [intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy])] (word16X32PrimTy) primOpInfo (VecShuffleOp WordVec 16 W32) = mkGenPrimOp (fsLit "shuffleWord32X16#") [] [word32X16PrimTy, word32X16PrimTy, (mkTupleTy Unboxed [intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy])] (word32X16PrimTy) primOpInfo (VecShuffleOp WordVec 8 W64) = mkGenPrimOp (fsLit "shuffleWord64X8#") [] [word64X8PrimTy, word64X8PrimTy, (mkTupleTy Unboxed [intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy])] (word64X8PrimTy) primOpInfo (VecShuffleOp FloatVec 4 W32) = mkGenPrimOp (fsLit "shuffleFloatX4#") [] [floatX4PrimTy, floatX4PrimTy, (mkTupleTy Unboxed [intPrimTy, intPrimTy, intPrimTy, intPrimTy])] (floatX4PrimTy) primOpInfo (VecShuffleOp FloatVec 2 W64) = mkGenPrimOp (fsLit "shuffleDoubleX2#") [] [doubleX2PrimTy, doubleX2PrimTy, (mkTupleTy Unboxed [intPrimTy, intPrimTy])] (doubleX2PrimTy) primOpInfo (VecShuffleOp FloatVec 8 W32) = mkGenPrimOp (fsLit "shuffleFloatX8#") [] [floatX8PrimTy, floatX8PrimTy, (mkTupleTy Unboxed [intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy])] (floatX8PrimTy) primOpInfo (VecShuffleOp FloatVec 4 W64) = mkGenPrimOp (fsLit "shuffleDoubleX4#") [] [doubleX4PrimTy, doubleX4PrimTy, (mkTupleTy Unboxed [intPrimTy, intPrimTy, intPrimTy, intPrimTy])] (doubleX4PrimTy) primOpInfo (VecShuffleOp FloatVec 16 W32) = mkGenPrimOp (fsLit "shuffleFloatX16#") [] [floatX16PrimTy, floatX16PrimTy, (mkTupleTy Unboxed [intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy])] (floatX16PrimTy) primOpInfo (VecShuffleOp FloatVec 8 W64) = mkGenPrimOp (fsLit "shuffleDoubleX8#") [] [doubleX8PrimTy, doubleX8PrimTy, (mkTupleTy Unboxed [intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy, intPrimTy])] (doubleX8PrimTy) primOpInfo (VecMinOp IntVec 16 W8) = mkGenPrimOp (fsLit "minInt8X16#") [] [int8X16PrimTy, int8X16PrimTy] (int8X16PrimTy) primOpInfo (VecMinOp IntVec 8 W16) = mkGenPrimOp (fsLit "minInt16X8#") [] [int16X8PrimTy, int16X8PrimTy] (int16X8PrimTy) primOpInfo (VecMinOp IntVec 4 W32) = mkGenPrimOp (fsLit "minInt32X4#") [] [int32X4PrimTy, int32X4PrimTy] (int32X4PrimTy) primOpInfo (VecMinOp IntVec 2 W64) = mkGenPrimOp (fsLit "minInt64X2#") [] [int64X2PrimTy, int64X2PrimTy] (int64X2PrimTy) primOpInfo (VecMinOp IntVec 32 W8) = mkGenPrimOp (fsLit "minInt8X32#") [] [int8X32PrimTy, int8X32PrimTy] (int8X32PrimTy) primOpInfo (VecMinOp IntVec 16 W16) = mkGenPrimOp (fsLit "minInt16X16#") [] [int16X16PrimTy, int16X16PrimTy] (int16X16PrimTy) primOpInfo (VecMinOp IntVec 8 W32) = mkGenPrimOp (fsLit "minInt32X8#") [] [int32X8PrimTy, int32X8PrimTy] (int32X8PrimTy) primOpInfo (VecMinOp IntVec 4 W64) = mkGenPrimOp (fsLit "minInt64X4#") [] [int64X4PrimTy, int64X4PrimTy] (int64X4PrimTy) primOpInfo (VecMinOp IntVec 64 W8) = mkGenPrimOp (fsLit "minInt8X64#") [] [int8X64PrimTy, int8X64PrimTy] (int8X64PrimTy) primOpInfo (VecMinOp IntVec 32 W16) = mkGenPrimOp (fsLit "minInt16X32#") [] [int16X32PrimTy, int16X32PrimTy] (int16X32PrimTy) primOpInfo (VecMinOp IntVec 16 W32) = mkGenPrimOp (fsLit "minInt32X16#") [] [int32X16PrimTy, int32X16PrimTy] (int32X16PrimTy) primOpInfo (VecMinOp IntVec 8 W64) = mkGenPrimOp (fsLit "minInt64X8#") [] [int64X8PrimTy, int64X8PrimTy] (int64X8PrimTy) primOpInfo (VecMinOp WordVec 16 W8) = mkGenPrimOp (fsLit "minWord8X16#") [] [word8X16PrimTy, word8X16PrimTy] (word8X16PrimTy) primOpInfo (VecMinOp WordVec 8 W16) = mkGenPrimOp (fsLit "minWord16X8#") [] [word16X8PrimTy, word16X8PrimTy] (word16X8PrimTy) primOpInfo (VecMinOp WordVec 4 W32) = mkGenPrimOp (fsLit "minWord32X4#") [] [word32X4PrimTy, word32X4PrimTy] (word32X4PrimTy) primOpInfo (VecMinOp WordVec 2 W64) = mkGenPrimOp (fsLit "minWord64X2#") [] [word64X2PrimTy, word64X2PrimTy] (word64X2PrimTy) primOpInfo (VecMinOp WordVec 32 W8) = mkGenPrimOp (fsLit "minWord8X32#") [] [word8X32PrimTy, word8X32PrimTy] (word8X32PrimTy) primOpInfo (VecMinOp WordVec 16 W16) = mkGenPrimOp (fsLit "minWord16X16#") [] [word16X16PrimTy, word16X16PrimTy] (word16X16PrimTy) primOpInfo (VecMinOp WordVec 8 W32) = mkGenPrimOp (fsLit "minWord32X8#") [] [word32X8PrimTy, word32X8PrimTy] (word32X8PrimTy) primOpInfo (VecMinOp WordVec 4 W64) = mkGenPrimOp (fsLit "minWord64X4#") [] [word64X4PrimTy, word64X4PrimTy] (word64X4PrimTy) primOpInfo (VecMinOp WordVec 64 W8) = mkGenPrimOp (fsLit "minWord8X64#") [] [word8X64PrimTy, word8X64PrimTy] (word8X64PrimTy) primOpInfo (VecMinOp WordVec 32 W16) = mkGenPrimOp (fsLit "minWord16X32#") [] [word16X32PrimTy, word16X32PrimTy] (word16X32PrimTy) primOpInfo (VecMinOp WordVec 16 W32) = mkGenPrimOp (fsLit "minWord32X16#") [] [word32X16PrimTy, word32X16PrimTy] (word32X16PrimTy) primOpInfo (VecMinOp WordVec 8 W64) = mkGenPrimOp (fsLit "minWord64X8#") [] [word64X8PrimTy, word64X8PrimTy] (word64X8PrimTy) primOpInfo (VecMinOp FloatVec 4 W32) = mkGenPrimOp (fsLit "minFloatX4#") [] [floatX4PrimTy, floatX4PrimTy] (floatX4PrimTy) primOpInfo (VecMinOp FloatVec 2 W64) = mkGenPrimOp (fsLit "minDoubleX2#") [] [doubleX2PrimTy, doubleX2PrimTy] (doubleX2PrimTy) primOpInfo (VecMinOp FloatVec 8 W32) = mkGenPrimOp (fsLit "minFloatX8#") [] [floatX8PrimTy, floatX8PrimTy] (floatX8PrimTy) primOpInfo (VecMinOp FloatVec 4 W64) = mkGenPrimOp (fsLit "minDoubleX4#") [] [doubleX4PrimTy, doubleX4PrimTy] (doubleX4PrimTy) primOpInfo (VecMinOp FloatVec 16 W32) = mkGenPrimOp (fsLit "minFloatX16#") [] [floatX16PrimTy, floatX16PrimTy] (floatX16PrimTy) primOpInfo (VecMinOp FloatVec 8 W64) = mkGenPrimOp (fsLit "minDoubleX8#") [] [doubleX8PrimTy, doubleX8PrimTy] (doubleX8PrimTy) primOpInfo (VecMaxOp IntVec 16 W8) = mkGenPrimOp (fsLit "maxInt8X16#") [] [int8X16PrimTy, int8X16PrimTy] (int8X16PrimTy) primOpInfo (VecMaxOp IntVec 8 W16) = mkGenPrimOp (fsLit "maxInt16X8#") [] [int16X8PrimTy, int16X8PrimTy] (int16X8PrimTy) primOpInfo (VecMaxOp IntVec 4 W32) = mkGenPrimOp (fsLit "maxInt32X4#") [] [int32X4PrimTy, int32X4PrimTy] (int32X4PrimTy) primOpInfo (VecMaxOp IntVec 2 W64) = mkGenPrimOp (fsLit "maxInt64X2#") [] [int64X2PrimTy, int64X2PrimTy] (int64X2PrimTy) primOpInfo (VecMaxOp IntVec 32 W8) = mkGenPrimOp (fsLit "maxInt8X32#") [] [int8X32PrimTy, int8X32PrimTy] (int8X32PrimTy) primOpInfo (VecMaxOp IntVec 16 W16) = mkGenPrimOp (fsLit "maxInt16X16#") [] [int16X16PrimTy, int16X16PrimTy] (int16X16PrimTy) primOpInfo (VecMaxOp IntVec 8 W32) = mkGenPrimOp (fsLit "maxInt32X8#") [] [int32X8PrimTy, int32X8PrimTy] (int32X8PrimTy) primOpInfo (VecMaxOp IntVec 4 W64) = mkGenPrimOp (fsLit "maxInt64X4#") [] [int64X4PrimTy, int64X4PrimTy] (int64X4PrimTy) primOpInfo (VecMaxOp IntVec 64 W8) = mkGenPrimOp (fsLit "maxInt8X64#") [] [int8X64PrimTy, int8X64PrimTy] (int8X64PrimTy) primOpInfo (VecMaxOp IntVec 32 W16) = mkGenPrimOp (fsLit "maxInt16X32#") [] [int16X32PrimTy, int16X32PrimTy] (int16X32PrimTy) primOpInfo (VecMaxOp IntVec 16 W32) = mkGenPrimOp (fsLit "maxInt32X16#") [] [int32X16PrimTy, int32X16PrimTy] (int32X16PrimTy) primOpInfo (VecMaxOp IntVec 8 W64) = mkGenPrimOp (fsLit "maxInt64X8#") [] [int64X8PrimTy, int64X8PrimTy] (int64X8PrimTy) primOpInfo (VecMaxOp WordVec 16 W8) = mkGenPrimOp (fsLit "maxWord8X16#") [] [word8X16PrimTy, word8X16PrimTy] (word8X16PrimTy) primOpInfo (VecMaxOp WordVec 8 W16) = mkGenPrimOp (fsLit "maxWord16X8#") [] [word16X8PrimTy, word16X8PrimTy] (word16X8PrimTy) primOpInfo (VecMaxOp WordVec 4 W32) = mkGenPrimOp (fsLit "maxWord32X4#") [] [word32X4PrimTy, word32X4PrimTy] (word32X4PrimTy) primOpInfo (VecMaxOp WordVec 2 W64) = mkGenPrimOp (fsLit "maxWord64X2#") [] [word64X2PrimTy, word64X2PrimTy] (word64X2PrimTy) primOpInfo (VecMaxOp WordVec 32 W8) = mkGenPrimOp (fsLit "maxWord8X32#") [] [word8X32PrimTy, word8X32PrimTy] (word8X32PrimTy) primOpInfo (VecMaxOp WordVec 16 W16) = mkGenPrimOp (fsLit "maxWord16X16#") [] [word16X16PrimTy, word16X16PrimTy] (word16X16PrimTy) primOpInfo (VecMaxOp WordVec 8 W32) = mkGenPrimOp (fsLit "maxWord32X8#") [] [word32X8PrimTy, word32X8PrimTy] (word32X8PrimTy) primOpInfo (VecMaxOp WordVec 4 W64) = mkGenPrimOp (fsLit "maxWord64X4#") [] [word64X4PrimTy, word64X4PrimTy] (word64X4PrimTy) primOpInfo (VecMaxOp WordVec 64 W8) = mkGenPrimOp (fsLit "maxWord8X64#") [] [word8X64PrimTy, word8X64PrimTy] (word8X64PrimTy) primOpInfo (VecMaxOp WordVec 32 W16) = mkGenPrimOp (fsLit "maxWord16X32#") [] [word16X32PrimTy, word16X32PrimTy] (word16X32PrimTy) primOpInfo (VecMaxOp WordVec 16 W32) = mkGenPrimOp (fsLit "maxWord32X16#") [] [word32X16PrimTy, word32X16PrimTy] (word32X16PrimTy) primOpInfo (VecMaxOp WordVec 8 W64) = mkGenPrimOp (fsLit "maxWord64X8#") [] [word64X8PrimTy, word64X8PrimTy] (word64X8PrimTy) primOpInfo (VecMaxOp FloatVec 4 W32) = mkGenPrimOp (fsLit "maxFloatX4#") [] [floatX4PrimTy, floatX4PrimTy] (floatX4PrimTy) primOpInfo (VecMaxOp FloatVec 2 W64) = mkGenPrimOp (fsLit "maxDoubleX2#") [] [doubleX2PrimTy, doubleX2PrimTy] (doubleX2PrimTy) primOpInfo (VecMaxOp FloatVec 8 W32) = mkGenPrimOp (fsLit "maxFloatX8#") [] [floatX8PrimTy, floatX8PrimTy] (floatX8PrimTy) primOpInfo (VecMaxOp FloatVec 4 W64) = mkGenPrimOp (fsLit "maxDoubleX4#") [] [doubleX4PrimTy, doubleX4PrimTy] (doubleX4PrimTy) primOpInfo (VecMaxOp FloatVec 16 W32) = mkGenPrimOp (fsLit "maxFloatX16#") [] [floatX16PrimTy, floatX16PrimTy] (floatX16PrimTy) primOpInfo (VecMaxOp FloatVec 8 W64) = mkGenPrimOp (fsLit "maxDoubleX8#") [] [doubleX8PrimTy, doubleX8PrimTy] (doubleX8PrimTy) primOpInfo PrefetchByteArrayOp3 = mkGenPrimOp (fsLit "prefetchByteArray3#") [deltaTyVarSpec] [byteArrayPrimTy, intPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo PrefetchMutableByteArrayOp3 = mkGenPrimOp (fsLit "prefetchMutableByteArray3#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo PrefetchAddrOp3 = mkGenPrimOp (fsLit "prefetchAddr3#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo PrefetchValueOp3 = mkGenPrimOp (fsLit "prefetchValue3#") [alphaTyVarSpec, deltaTyVarSpec] [alphaTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo PrefetchByteArrayOp2 = mkGenPrimOp (fsLit "prefetchByteArray2#") [deltaTyVarSpec] [byteArrayPrimTy, intPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo PrefetchMutableByteArrayOp2 = mkGenPrimOp (fsLit "prefetchMutableByteArray2#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo PrefetchAddrOp2 = mkGenPrimOp (fsLit "prefetchAddr2#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo PrefetchValueOp2 = mkGenPrimOp (fsLit "prefetchValue2#") [alphaTyVarSpec, deltaTyVarSpec] [alphaTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo PrefetchByteArrayOp1 = mkGenPrimOp (fsLit "prefetchByteArray1#") [deltaTyVarSpec] [byteArrayPrimTy, intPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo PrefetchMutableByteArrayOp1 = mkGenPrimOp (fsLit "prefetchMutableByteArray1#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo PrefetchAddrOp1 = mkGenPrimOp (fsLit "prefetchAddr1#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo PrefetchValueOp1 = mkGenPrimOp (fsLit "prefetchValue1#") [alphaTyVarSpec, deltaTyVarSpec] [alphaTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo PrefetchByteArrayOp0 = mkGenPrimOp (fsLit "prefetchByteArray0#") [deltaTyVarSpec] [byteArrayPrimTy, intPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo PrefetchMutableByteArrayOp0 = mkGenPrimOp (fsLit "prefetchMutableByteArray0#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo PrefetchAddrOp0 = mkGenPrimOp (fsLit "prefetchAddr0#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo PrefetchValueOp0 = mkGenPrimOp (fsLit "prefetchValue0#") [alphaTyVarSpec, deltaTyVarSpec] [alphaTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) ghc-lib-parser-9.12.2.20250421/ghc-lib/stage0/compiler/build/primop-strictness.hs-incl0000644000000000000000000000505107346545000026061 0ustar0000000000000000primOpStrictness AtomicModifyMutVar2Op = \ _arity -> mkClosedDmdSig [ topDmd, lazyApply1Dmd, topDmd ] topDiv primOpStrictness AtomicModifyMutVar_Op = \ _arity -> mkClosedDmdSig [ topDmd, lazyApply1Dmd, topDmd ] topDiv primOpStrictness CatchOp = \ _arity -> mkClosedDmdSig [ lazyApply1Dmd , lazyApply2Dmd , topDmd] topDiv primOpStrictness RaiseOp = \ _arity -> mkClosedDmdSig [topDmd] botDiv primOpStrictness RaiseUnderflowOp = \ _arity -> mkClosedDmdSig [topDmd] botDiv primOpStrictness RaiseOverflowOp = \ _arity -> mkClosedDmdSig [topDmd] botDiv primOpStrictness RaiseDivZeroOp = \ _arity -> mkClosedDmdSig [topDmd] botDiv primOpStrictness RaiseIOOp = \ _arity -> mkClosedDmdSig [topDmd, topDmd] exnDiv primOpStrictness MaskAsyncExceptionsOp = \ _arity -> mkClosedDmdSig [strictOnceApply1Dmd,topDmd] topDiv primOpStrictness MaskUninterruptibleOp = \ _arity -> mkClosedDmdSig [strictOnceApply1Dmd,topDmd] topDiv primOpStrictness UnmaskAsyncExceptionsOp = \ _arity -> mkClosedDmdSig [strictOnceApply1Dmd,topDmd] topDiv primOpStrictness PromptOp = \ _arity -> mkClosedDmdSig [topDmd, lazyApply1Dmd, topDmd] topDiv primOpStrictness Control0Op = \ _arity -> mkClosedDmdSig [topDmd, lazyApply2Dmd, topDmd] topDiv primOpStrictness AtomicallyOp = \ _arity -> mkClosedDmdSig [strictManyApply1Dmd,topDmd] topDiv primOpStrictness RetryOp = \ _arity -> mkClosedDmdSig [topDmd] botDiv primOpStrictness CatchRetryOp = \ _arity -> mkClosedDmdSig [ lazyApply1Dmd , lazyApply1Dmd , topDmd ] topDiv primOpStrictness CatchSTMOp = \ _arity -> mkClosedDmdSig [ lazyApply1Dmd , lazyApply2Dmd , topDmd ] topDiv primOpStrictness ForkOp = \ _arity -> mkClosedDmdSig [ lazyApply1Dmd , topDmd ] topDiv primOpStrictness ForkOnOp = \ _arity -> mkClosedDmdSig [ topDmd , lazyApply1Dmd , topDmd ] topDiv primOpStrictness KeepAliveOp = \ _arity -> mkClosedDmdSig [topDmd, topDmd, strictOnceApply1Dmd] topDiv primOpStrictness DataToTagSmallOp = \ _arity -> mkClosedDmdSig [evalDmd] topDiv primOpStrictness DataToTagLargeOp = \ _arity -> mkClosedDmdSig [evalDmd] topDiv primOpStrictness _thisOp = \ arity -> mkClosedDmdSig (replicate arity topDmd) topDiv ghc-lib-parser-9.12.2.20250421/ghc-lib/stage0/compiler/build/primop-tag.hs-incl0000644000000000000000000016612707346545000024447 0ustar0000000000000000maxPrimOpTag :: Int maxPrimOpTag = 1491 primOpTag :: PrimOp -> Int primOpTag CharGtOp = 0 primOpTag CharGeOp = 1 primOpTag CharEqOp = 2 primOpTag CharNeOp = 3 primOpTag CharLtOp = 4 primOpTag CharLeOp = 5 primOpTag OrdOp = 6 primOpTag Int8ToIntOp = 7 primOpTag IntToInt8Op = 8 primOpTag Int8NegOp = 9 primOpTag Int8AddOp = 10 primOpTag Int8SubOp = 11 primOpTag Int8MulOp = 12 primOpTag Int8QuotOp = 13 primOpTag Int8RemOp = 14 primOpTag Int8QuotRemOp = 15 primOpTag Int8SllOp = 16 primOpTag Int8SraOp = 17 primOpTag Int8SrlOp = 18 primOpTag Int8ToWord8Op = 19 primOpTag Int8EqOp = 20 primOpTag Int8GeOp = 21 primOpTag Int8GtOp = 22 primOpTag Int8LeOp = 23 primOpTag Int8LtOp = 24 primOpTag Int8NeOp = 25 primOpTag Word8ToWordOp = 26 primOpTag WordToWord8Op = 27 primOpTag Word8AddOp = 28 primOpTag Word8SubOp = 29 primOpTag Word8MulOp = 30 primOpTag Word8QuotOp = 31 primOpTag Word8RemOp = 32 primOpTag Word8QuotRemOp = 33 primOpTag Word8AndOp = 34 primOpTag Word8OrOp = 35 primOpTag Word8XorOp = 36 primOpTag Word8NotOp = 37 primOpTag Word8SllOp = 38 primOpTag Word8SrlOp = 39 primOpTag Word8ToInt8Op = 40 primOpTag Word8EqOp = 41 primOpTag Word8GeOp = 42 primOpTag Word8GtOp = 43 primOpTag Word8LeOp = 44 primOpTag Word8LtOp = 45 primOpTag Word8NeOp = 46 primOpTag Int16ToIntOp = 47 primOpTag IntToInt16Op = 48 primOpTag Int16NegOp = 49 primOpTag Int16AddOp = 50 primOpTag Int16SubOp = 51 primOpTag Int16MulOp = 52 primOpTag Int16QuotOp = 53 primOpTag Int16RemOp = 54 primOpTag Int16QuotRemOp = 55 primOpTag Int16SllOp = 56 primOpTag Int16SraOp = 57 primOpTag Int16SrlOp = 58 primOpTag Int16ToWord16Op = 59 primOpTag Int16EqOp = 60 primOpTag Int16GeOp = 61 primOpTag Int16GtOp = 62 primOpTag Int16LeOp = 63 primOpTag Int16LtOp = 64 primOpTag Int16NeOp = 65 primOpTag Word16ToWordOp = 66 primOpTag WordToWord16Op = 67 primOpTag Word16AddOp = 68 primOpTag Word16SubOp = 69 primOpTag Word16MulOp = 70 primOpTag Word16QuotOp = 71 primOpTag Word16RemOp = 72 primOpTag Word16QuotRemOp = 73 primOpTag Word16AndOp = 74 primOpTag Word16OrOp = 75 primOpTag Word16XorOp = 76 primOpTag Word16NotOp = 77 primOpTag Word16SllOp = 78 primOpTag Word16SrlOp = 79 primOpTag Word16ToInt16Op = 80 primOpTag Word16EqOp = 81 primOpTag Word16GeOp = 82 primOpTag Word16GtOp = 83 primOpTag Word16LeOp = 84 primOpTag Word16LtOp = 85 primOpTag Word16NeOp = 86 primOpTag Int32ToIntOp = 87 primOpTag IntToInt32Op = 88 primOpTag Int32NegOp = 89 primOpTag Int32AddOp = 90 primOpTag Int32SubOp = 91 primOpTag Int32MulOp = 92 primOpTag Int32QuotOp = 93 primOpTag Int32RemOp = 94 primOpTag Int32QuotRemOp = 95 primOpTag Int32SllOp = 96 primOpTag Int32SraOp = 97 primOpTag Int32SrlOp = 98 primOpTag Int32ToWord32Op = 99 primOpTag Int32EqOp = 100 primOpTag Int32GeOp = 101 primOpTag Int32GtOp = 102 primOpTag Int32LeOp = 103 primOpTag Int32LtOp = 104 primOpTag Int32NeOp = 105 primOpTag Word32ToWordOp = 106 primOpTag WordToWord32Op = 107 primOpTag Word32AddOp = 108 primOpTag Word32SubOp = 109 primOpTag Word32MulOp = 110 primOpTag Word32QuotOp = 111 primOpTag Word32RemOp = 112 primOpTag Word32QuotRemOp = 113 primOpTag Word32AndOp = 114 primOpTag Word32OrOp = 115 primOpTag Word32XorOp = 116 primOpTag Word32NotOp = 117 primOpTag Word32SllOp = 118 primOpTag Word32SrlOp = 119 primOpTag Word32ToInt32Op = 120 primOpTag Word32EqOp = 121 primOpTag Word32GeOp = 122 primOpTag Word32GtOp = 123 primOpTag Word32LeOp = 124 primOpTag Word32LtOp = 125 primOpTag Word32NeOp = 126 primOpTag Int64ToIntOp = 127 primOpTag IntToInt64Op = 128 primOpTag Int64NegOp = 129 primOpTag Int64AddOp = 130 primOpTag Int64SubOp = 131 primOpTag Int64MulOp = 132 primOpTag Int64QuotOp = 133 primOpTag Int64RemOp = 134 primOpTag Int64SllOp = 135 primOpTag Int64SraOp = 136 primOpTag Int64SrlOp = 137 primOpTag Int64ToWord64Op = 138 primOpTag Int64EqOp = 139 primOpTag Int64GeOp = 140 primOpTag Int64GtOp = 141 primOpTag Int64LeOp = 142 primOpTag Int64LtOp = 143 primOpTag Int64NeOp = 144 primOpTag Word64ToWordOp = 145 primOpTag WordToWord64Op = 146 primOpTag Word64AddOp = 147 primOpTag Word64SubOp = 148 primOpTag Word64MulOp = 149 primOpTag Word64QuotOp = 150 primOpTag Word64RemOp = 151 primOpTag Word64AndOp = 152 primOpTag Word64OrOp = 153 primOpTag Word64XorOp = 154 primOpTag Word64NotOp = 155 primOpTag Word64SllOp = 156 primOpTag Word64SrlOp = 157 primOpTag Word64ToInt64Op = 158 primOpTag Word64EqOp = 159 primOpTag Word64GeOp = 160 primOpTag Word64GtOp = 161 primOpTag Word64LeOp = 162 primOpTag Word64LtOp = 163 primOpTag Word64NeOp = 164 primOpTag IntAddOp = 165 primOpTag IntSubOp = 166 primOpTag IntMulOp = 167 primOpTag IntMul2Op = 168 primOpTag IntMulMayOfloOp = 169 primOpTag IntQuotOp = 170 primOpTag IntRemOp = 171 primOpTag IntQuotRemOp = 172 primOpTag IntAndOp = 173 primOpTag IntOrOp = 174 primOpTag IntXorOp = 175 primOpTag IntNotOp = 176 primOpTag IntNegOp = 177 primOpTag IntAddCOp = 178 primOpTag IntSubCOp = 179 primOpTag IntGtOp = 180 primOpTag IntGeOp = 181 primOpTag IntEqOp = 182 primOpTag IntNeOp = 183 primOpTag IntLtOp = 184 primOpTag IntLeOp = 185 primOpTag ChrOp = 186 primOpTag IntToWordOp = 187 primOpTag IntToFloatOp = 188 primOpTag IntToDoubleOp = 189 primOpTag WordToFloatOp = 190 primOpTag WordToDoubleOp = 191 primOpTag IntSllOp = 192 primOpTag IntSraOp = 193 primOpTag IntSrlOp = 194 primOpTag WordAddOp = 195 primOpTag WordAddCOp = 196 primOpTag WordSubCOp = 197 primOpTag WordAdd2Op = 198 primOpTag WordSubOp = 199 primOpTag WordMulOp = 200 primOpTag WordMul2Op = 201 primOpTag WordQuotOp = 202 primOpTag WordRemOp = 203 primOpTag WordQuotRemOp = 204 primOpTag WordQuotRem2Op = 205 primOpTag WordAndOp = 206 primOpTag WordOrOp = 207 primOpTag WordXorOp = 208 primOpTag WordNotOp = 209 primOpTag WordSllOp = 210 primOpTag WordSrlOp = 211 primOpTag WordToIntOp = 212 primOpTag WordGtOp = 213 primOpTag WordGeOp = 214 primOpTag WordEqOp = 215 primOpTag WordNeOp = 216 primOpTag WordLtOp = 217 primOpTag WordLeOp = 218 primOpTag PopCnt8Op = 219 primOpTag PopCnt16Op = 220 primOpTag PopCnt32Op = 221 primOpTag PopCnt64Op = 222 primOpTag PopCntOp = 223 primOpTag Pdep8Op = 224 primOpTag Pdep16Op = 225 primOpTag Pdep32Op = 226 primOpTag Pdep64Op = 227 primOpTag PdepOp = 228 primOpTag Pext8Op = 229 primOpTag Pext16Op = 230 primOpTag Pext32Op = 231 primOpTag Pext64Op = 232 primOpTag PextOp = 233 primOpTag Clz8Op = 234 primOpTag Clz16Op = 235 primOpTag Clz32Op = 236 primOpTag Clz64Op = 237 primOpTag ClzOp = 238 primOpTag Ctz8Op = 239 primOpTag Ctz16Op = 240 primOpTag Ctz32Op = 241 primOpTag Ctz64Op = 242 primOpTag CtzOp = 243 primOpTag BSwap16Op = 244 primOpTag BSwap32Op = 245 primOpTag BSwap64Op = 246 primOpTag BSwapOp = 247 primOpTag BRev8Op = 248 primOpTag BRev16Op = 249 primOpTag BRev32Op = 250 primOpTag BRev64Op = 251 primOpTag BRevOp = 252 primOpTag Narrow8IntOp = 253 primOpTag Narrow16IntOp = 254 primOpTag Narrow32IntOp = 255 primOpTag Narrow8WordOp = 256 primOpTag Narrow16WordOp = 257 primOpTag Narrow32WordOp = 258 primOpTag DoubleGtOp = 259 primOpTag DoubleGeOp = 260 primOpTag DoubleEqOp = 261 primOpTag DoubleNeOp = 262 primOpTag DoubleLtOp = 263 primOpTag DoubleLeOp = 264 primOpTag DoubleMinOp = 265 primOpTag DoubleMaxOp = 266 primOpTag DoubleAddOp = 267 primOpTag DoubleSubOp = 268 primOpTag DoubleMulOp = 269 primOpTag DoubleDivOp = 270 primOpTag DoubleNegOp = 271 primOpTag DoubleFabsOp = 272 primOpTag DoubleToIntOp = 273 primOpTag DoubleToFloatOp = 274 primOpTag DoubleExpOp = 275 primOpTag DoubleExpM1Op = 276 primOpTag DoubleLogOp = 277 primOpTag DoubleLog1POp = 278 primOpTag DoubleSqrtOp = 279 primOpTag DoubleSinOp = 280 primOpTag DoubleCosOp = 281 primOpTag DoubleTanOp = 282 primOpTag DoubleAsinOp = 283 primOpTag DoubleAcosOp = 284 primOpTag DoubleAtanOp = 285 primOpTag DoubleSinhOp = 286 primOpTag DoubleCoshOp = 287 primOpTag DoubleTanhOp = 288 primOpTag DoubleAsinhOp = 289 primOpTag DoubleAcoshOp = 290 primOpTag DoubleAtanhOp = 291 primOpTag DoublePowerOp = 292 primOpTag DoubleDecode_2IntOp = 293 primOpTag DoubleDecode_Int64Op = 294 primOpTag CastDoubleToWord64Op = 295 primOpTag CastWord64ToDoubleOp = 296 primOpTag FloatGtOp = 297 primOpTag FloatGeOp = 298 primOpTag FloatEqOp = 299 primOpTag FloatNeOp = 300 primOpTag FloatLtOp = 301 primOpTag FloatLeOp = 302 primOpTag FloatMinOp = 303 primOpTag FloatMaxOp = 304 primOpTag FloatAddOp = 305 primOpTag FloatSubOp = 306 primOpTag FloatMulOp = 307 primOpTag FloatDivOp = 308 primOpTag FloatNegOp = 309 primOpTag FloatFabsOp = 310 primOpTag FloatToIntOp = 311 primOpTag FloatExpOp = 312 primOpTag FloatExpM1Op = 313 primOpTag FloatLogOp = 314 primOpTag FloatLog1POp = 315 primOpTag FloatSqrtOp = 316 primOpTag FloatSinOp = 317 primOpTag FloatCosOp = 318 primOpTag FloatTanOp = 319 primOpTag FloatAsinOp = 320 primOpTag FloatAcosOp = 321 primOpTag FloatAtanOp = 322 primOpTag FloatSinhOp = 323 primOpTag FloatCoshOp = 324 primOpTag FloatTanhOp = 325 primOpTag FloatAsinhOp = 326 primOpTag FloatAcoshOp = 327 primOpTag FloatAtanhOp = 328 primOpTag FloatPowerOp = 329 primOpTag FloatToDoubleOp = 330 primOpTag FloatDecode_IntOp = 331 primOpTag CastFloatToWord32Op = 332 primOpTag CastWord32ToFloatOp = 333 primOpTag FloatFMAdd = 334 primOpTag FloatFMSub = 335 primOpTag FloatFNMAdd = 336 primOpTag FloatFNMSub = 337 primOpTag DoubleFMAdd = 338 primOpTag DoubleFMSub = 339 primOpTag DoubleFNMAdd = 340 primOpTag DoubleFNMSub = 341 primOpTag NewArrayOp = 342 primOpTag ReadArrayOp = 343 primOpTag WriteArrayOp = 344 primOpTag SizeofArrayOp = 345 primOpTag SizeofMutableArrayOp = 346 primOpTag IndexArrayOp = 347 primOpTag UnsafeFreezeArrayOp = 348 primOpTag UnsafeThawArrayOp = 349 primOpTag CopyArrayOp = 350 primOpTag CopyMutableArrayOp = 351 primOpTag CloneArrayOp = 352 primOpTag CloneMutableArrayOp = 353 primOpTag FreezeArrayOp = 354 primOpTag ThawArrayOp = 355 primOpTag CasArrayOp = 356 primOpTag NewSmallArrayOp = 357 primOpTag ShrinkSmallMutableArrayOp_Char = 358 primOpTag ReadSmallArrayOp = 359 primOpTag WriteSmallArrayOp = 360 primOpTag SizeofSmallArrayOp = 361 primOpTag SizeofSmallMutableArrayOp = 362 primOpTag GetSizeofSmallMutableArrayOp = 363 primOpTag IndexSmallArrayOp = 364 primOpTag UnsafeFreezeSmallArrayOp = 365 primOpTag UnsafeThawSmallArrayOp = 366 primOpTag CopySmallArrayOp = 367 primOpTag CopySmallMutableArrayOp = 368 primOpTag CloneSmallArrayOp = 369 primOpTag CloneSmallMutableArrayOp = 370 primOpTag FreezeSmallArrayOp = 371 primOpTag ThawSmallArrayOp = 372 primOpTag CasSmallArrayOp = 373 primOpTag NewByteArrayOp_Char = 374 primOpTag NewPinnedByteArrayOp_Char = 375 primOpTag NewAlignedPinnedByteArrayOp_Char = 376 primOpTag MutableByteArrayIsPinnedOp = 377 primOpTag ByteArrayIsPinnedOp = 378 primOpTag ByteArrayIsWeaklyPinnedOp = 379 primOpTag MutableByteArrayIsWeaklyPinnedOp = 380 primOpTag ByteArrayContents_Char = 381 primOpTag MutableByteArrayContents_Char = 382 primOpTag ShrinkMutableByteArrayOp_Char = 383 primOpTag ResizeMutableByteArrayOp_Char = 384 primOpTag UnsafeFreezeByteArrayOp = 385 primOpTag UnsafeThawByteArrayOp = 386 primOpTag SizeofByteArrayOp = 387 primOpTag SizeofMutableByteArrayOp = 388 primOpTag GetSizeofMutableByteArrayOp = 389 primOpTag IndexByteArrayOp_Char = 390 primOpTag IndexByteArrayOp_WideChar = 391 primOpTag IndexByteArrayOp_Int = 392 primOpTag IndexByteArrayOp_Word = 393 primOpTag IndexByteArrayOp_Addr = 394 primOpTag IndexByteArrayOp_Float = 395 primOpTag IndexByteArrayOp_Double = 396 primOpTag IndexByteArrayOp_StablePtr = 397 primOpTag IndexByteArrayOp_Int8 = 398 primOpTag IndexByteArrayOp_Word8 = 399 primOpTag IndexByteArrayOp_Int16 = 400 primOpTag IndexByteArrayOp_Word16 = 401 primOpTag IndexByteArrayOp_Int32 = 402 primOpTag IndexByteArrayOp_Word32 = 403 primOpTag IndexByteArrayOp_Int64 = 404 primOpTag IndexByteArrayOp_Word64 = 405 primOpTag IndexByteArrayOp_Word8AsChar = 406 primOpTag IndexByteArrayOp_Word8AsWideChar = 407 primOpTag IndexByteArrayOp_Word8AsInt = 408 primOpTag IndexByteArrayOp_Word8AsWord = 409 primOpTag IndexByteArrayOp_Word8AsAddr = 410 primOpTag IndexByteArrayOp_Word8AsFloat = 411 primOpTag IndexByteArrayOp_Word8AsDouble = 412 primOpTag IndexByteArrayOp_Word8AsStablePtr = 413 primOpTag IndexByteArrayOp_Word8AsInt16 = 414 primOpTag IndexByteArrayOp_Word8AsWord16 = 415 primOpTag IndexByteArrayOp_Word8AsInt32 = 416 primOpTag IndexByteArrayOp_Word8AsWord32 = 417 primOpTag IndexByteArrayOp_Word8AsInt64 = 418 primOpTag IndexByteArrayOp_Word8AsWord64 = 419 primOpTag ReadByteArrayOp_Char = 420 primOpTag ReadByteArrayOp_WideChar = 421 primOpTag ReadByteArrayOp_Int = 422 primOpTag ReadByteArrayOp_Word = 423 primOpTag ReadByteArrayOp_Addr = 424 primOpTag ReadByteArrayOp_Float = 425 primOpTag ReadByteArrayOp_Double = 426 primOpTag ReadByteArrayOp_StablePtr = 427 primOpTag ReadByteArrayOp_Int8 = 428 primOpTag ReadByteArrayOp_Word8 = 429 primOpTag ReadByteArrayOp_Int16 = 430 primOpTag ReadByteArrayOp_Word16 = 431 primOpTag ReadByteArrayOp_Int32 = 432 primOpTag ReadByteArrayOp_Word32 = 433 primOpTag ReadByteArrayOp_Int64 = 434 primOpTag ReadByteArrayOp_Word64 = 435 primOpTag ReadByteArrayOp_Word8AsChar = 436 primOpTag ReadByteArrayOp_Word8AsWideChar = 437 primOpTag ReadByteArrayOp_Word8AsInt = 438 primOpTag ReadByteArrayOp_Word8AsWord = 439 primOpTag ReadByteArrayOp_Word8AsAddr = 440 primOpTag ReadByteArrayOp_Word8AsFloat = 441 primOpTag ReadByteArrayOp_Word8AsDouble = 442 primOpTag ReadByteArrayOp_Word8AsStablePtr = 443 primOpTag ReadByteArrayOp_Word8AsInt16 = 444 primOpTag ReadByteArrayOp_Word8AsWord16 = 445 primOpTag ReadByteArrayOp_Word8AsInt32 = 446 primOpTag ReadByteArrayOp_Word8AsWord32 = 447 primOpTag ReadByteArrayOp_Word8AsInt64 = 448 primOpTag ReadByteArrayOp_Word8AsWord64 = 449 primOpTag WriteByteArrayOp_Char = 450 primOpTag WriteByteArrayOp_WideChar = 451 primOpTag WriteByteArrayOp_Int = 452 primOpTag WriteByteArrayOp_Word = 453 primOpTag WriteByteArrayOp_Addr = 454 primOpTag WriteByteArrayOp_Float = 455 primOpTag WriteByteArrayOp_Double = 456 primOpTag WriteByteArrayOp_StablePtr = 457 primOpTag WriteByteArrayOp_Int8 = 458 primOpTag WriteByteArrayOp_Word8 = 459 primOpTag WriteByteArrayOp_Int16 = 460 primOpTag WriteByteArrayOp_Word16 = 461 primOpTag WriteByteArrayOp_Int32 = 462 primOpTag WriteByteArrayOp_Word32 = 463 primOpTag WriteByteArrayOp_Int64 = 464 primOpTag WriteByteArrayOp_Word64 = 465 primOpTag WriteByteArrayOp_Word8AsChar = 466 primOpTag WriteByteArrayOp_Word8AsWideChar = 467 primOpTag WriteByteArrayOp_Word8AsInt = 468 primOpTag WriteByteArrayOp_Word8AsWord = 469 primOpTag WriteByteArrayOp_Word8AsAddr = 470 primOpTag WriteByteArrayOp_Word8AsFloat = 471 primOpTag WriteByteArrayOp_Word8AsDouble = 472 primOpTag WriteByteArrayOp_Word8AsStablePtr = 473 primOpTag WriteByteArrayOp_Word8AsInt16 = 474 primOpTag WriteByteArrayOp_Word8AsWord16 = 475 primOpTag WriteByteArrayOp_Word8AsInt32 = 476 primOpTag WriteByteArrayOp_Word8AsWord32 = 477 primOpTag WriteByteArrayOp_Word8AsInt64 = 478 primOpTag WriteByteArrayOp_Word8AsWord64 = 479 primOpTag CompareByteArraysOp = 480 primOpTag CopyByteArrayOp = 481 primOpTag CopyMutableByteArrayOp = 482 primOpTag CopyMutableByteArrayNonOverlappingOp = 483 primOpTag CopyByteArrayToAddrOp = 484 primOpTag CopyMutableByteArrayToAddrOp = 485 primOpTag CopyAddrToByteArrayOp = 486 primOpTag CopyAddrToAddrOp = 487 primOpTag CopyAddrToAddrNonOverlappingOp = 488 primOpTag SetByteArrayOp = 489 primOpTag SetAddrRangeOp = 490 primOpTag AtomicReadByteArrayOp_Int = 491 primOpTag AtomicWriteByteArrayOp_Int = 492 primOpTag CasByteArrayOp_Int = 493 primOpTag CasByteArrayOp_Int8 = 494 primOpTag CasByteArrayOp_Int16 = 495 primOpTag CasByteArrayOp_Int32 = 496 primOpTag CasByteArrayOp_Int64 = 497 primOpTag FetchAddByteArrayOp_Int = 498 primOpTag FetchSubByteArrayOp_Int = 499 primOpTag FetchAndByteArrayOp_Int = 500 primOpTag FetchNandByteArrayOp_Int = 501 primOpTag FetchOrByteArrayOp_Int = 502 primOpTag FetchXorByteArrayOp_Int = 503 primOpTag AddrAddOp = 504 primOpTag AddrSubOp = 505 primOpTag AddrRemOp = 506 primOpTag AddrToIntOp = 507 primOpTag IntToAddrOp = 508 primOpTag AddrGtOp = 509 primOpTag AddrGeOp = 510 primOpTag AddrEqOp = 511 primOpTag AddrNeOp = 512 primOpTag AddrLtOp = 513 primOpTag AddrLeOp = 514 primOpTag IndexOffAddrOp_Char = 515 primOpTag IndexOffAddrOp_WideChar = 516 primOpTag IndexOffAddrOp_Int = 517 primOpTag IndexOffAddrOp_Word = 518 primOpTag IndexOffAddrOp_Addr = 519 primOpTag IndexOffAddrOp_Float = 520 primOpTag IndexOffAddrOp_Double = 521 primOpTag IndexOffAddrOp_StablePtr = 522 primOpTag IndexOffAddrOp_Int8 = 523 primOpTag IndexOffAddrOp_Word8 = 524 primOpTag IndexOffAddrOp_Int16 = 525 primOpTag IndexOffAddrOp_Word16 = 526 primOpTag IndexOffAddrOp_Int32 = 527 primOpTag IndexOffAddrOp_Word32 = 528 primOpTag IndexOffAddrOp_Int64 = 529 primOpTag IndexOffAddrOp_Word64 = 530 primOpTag IndexOffAddrOp_Word8AsChar = 531 primOpTag IndexOffAddrOp_Word8AsWideChar = 532 primOpTag IndexOffAddrOp_Word8AsInt = 533 primOpTag IndexOffAddrOp_Word8AsWord = 534 primOpTag IndexOffAddrOp_Word8AsAddr = 535 primOpTag IndexOffAddrOp_Word8AsFloat = 536 primOpTag IndexOffAddrOp_Word8AsDouble = 537 primOpTag IndexOffAddrOp_Word8AsStablePtr = 538 primOpTag IndexOffAddrOp_Word8AsInt16 = 539 primOpTag IndexOffAddrOp_Word8AsWord16 = 540 primOpTag IndexOffAddrOp_Word8AsInt32 = 541 primOpTag IndexOffAddrOp_Word8AsWord32 = 542 primOpTag IndexOffAddrOp_Word8AsInt64 = 543 primOpTag IndexOffAddrOp_Word8AsWord64 = 544 primOpTag ReadOffAddrOp_Char = 545 primOpTag ReadOffAddrOp_WideChar = 546 primOpTag ReadOffAddrOp_Int = 547 primOpTag ReadOffAddrOp_Word = 548 primOpTag ReadOffAddrOp_Addr = 549 primOpTag ReadOffAddrOp_Float = 550 primOpTag ReadOffAddrOp_Double = 551 primOpTag ReadOffAddrOp_StablePtr = 552 primOpTag ReadOffAddrOp_Int8 = 553 primOpTag ReadOffAddrOp_Word8 = 554 primOpTag ReadOffAddrOp_Int16 = 555 primOpTag ReadOffAddrOp_Word16 = 556 primOpTag ReadOffAddrOp_Int32 = 557 primOpTag ReadOffAddrOp_Word32 = 558 primOpTag ReadOffAddrOp_Int64 = 559 primOpTag ReadOffAddrOp_Word64 = 560 primOpTag ReadOffAddrOp_Word8AsChar = 561 primOpTag ReadOffAddrOp_Word8AsWideChar = 562 primOpTag ReadOffAddrOp_Word8AsInt = 563 primOpTag ReadOffAddrOp_Word8AsWord = 564 primOpTag ReadOffAddrOp_Word8AsAddr = 565 primOpTag ReadOffAddrOp_Word8AsFloat = 566 primOpTag ReadOffAddrOp_Word8AsDouble = 567 primOpTag ReadOffAddrOp_Word8AsStablePtr = 568 primOpTag ReadOffAddrOp_Word8AsInt16 = 569 primOpTag ReadOffAddrOp_Word8AsWord16 = 570 primOpTag ReadOffAddrOp_Word8AsInt32 = 571 primOpTag ReadOffAddrOp_Word8AsWord32 = 572 primOpTag ReadOffAddrOp_Word8AsInt64 = 573 primOpTag ReadOffAddrOp_Word8AsWord64 = 574 primOpTag WriteOffAddrOp_Char = 575 primOpTag WriteOffAddrOp_WideChar = 576 primOpTag WriteOffAddrOp_Int = 577 primOpTag WriteOffAddrOp_Word = 578 primOpTag WriteOffAddrOp_Addr = 579 primOpTag WriteOffAddrOp_Float = 580 primOpTag WriteOffAddrOp_Double = 581 primOpTag WriteOffAddrOp_StablePtr = 582 primOpTag WriteOffAddrOp_Int8 = 583 primOpTag WriteOffAddrOp_Word8 = 584 primOpTag WriteOffAddrOp_Int16 = 585 primOpTag WriteOffAddrOp_Word16 = 586 primOpTag WriteOffAddrOp_Int32 = 587 primOpTag WriteOffAddrOp_Word32 = 588 primOpTag WriteOffAddrOp_Int64 = 589 primOpTag WriteOffAddrOp_Word64 = 590 primOpTag WriteOffAddrOp_Word8AsChar = 591 primOpTag WriteOffAddrOp_Word8AsWideChar = 592 primOpTag WriteOffAddrOp_Word8AsInt = 593 primOpTag WriteOffAddrOp_Word8AsWord = 594 primOpTag WriteOffAddrOp_Word8AsAddr = 595 primOpTag WriteOffAddrOp_Word8AsFloat = 596 primOpTag WriteOffAddrOp_Word8AsDouble = 597 primOpTag WriteOffAddrOp_Word8AsStablePtr = 598 primOpTag WriteOffAddrOp_Word8AsInt16 = 599 primOpTag WriteOffAddrOp_Word8AsWord16 = 600 primOpTag WriteOffAddrOp_Word8AsInt32 = 601 primOpTag WriteOffAddrOp_Word8AsWord32 = 602 primOpTag WriteOffAddrOp_Word8AsInt64 = 603 primOpTag WriteOffAddrOp_Word8AsWord64 = 604 primOpTag InterlockedExchange_Addr = 605 primOpTag InterlockedExchange_Word = 606 primOpTag CasAddrOp_Addr = 607 primOpTag CasAddrOp_Word = 608 primOpTag CasAddrOp_Word8 = 609 primOpTag CasAddrOp_Word16 = 610 primOpTag CasAddrOp_Word32 = 611 primOpTag CasAddrOp_Word64 = 612 primOpTag FetchAddAddrOp_Word = 613 primOpTag FetchSubAddrOp_Word = 614 primOpTag FetchAndAddrOp_Word = 615 primOpTag FetchNandAddrOp_Word = 616 primOpTag FetchOrAddrOp_Word = 617 primOpTag FetchXorAddrOp_Word = 618 primOpTag AtomicReadAddrOp_Word = 619 primOpTag AtomicWriteAddrOp_Word = 620 primOpTag NewMutVarOp = 621 primOpTag ReadMutVarOp = 622 primOpTag WriteMutVarOp = 623 primOpTag AtomicSwapMutVarOp = 624 primOpTag AtomicModifyMutVar2Op = 625 primOpTag AtomicModifyMutVar_Op = 626 primOpTag CasMutVarOp = 627 primOpTag CatchOp = 628 primOpTag RaiseOp = 629 primOpTag RaiseUnderflowOp = 630 primOpTag RaiseOverflowOp = 631 primOpTag RaiseDivZeroOp = 632 primOpTag RaiseIOOp = 633 primOpTag MaskAsyncExceptionsOp = 634 primOpTag MaskUninterruptibleOp = 635 primOpTag UnmaskAsyncExceptionsOp = 636 primOpTag MaskStatus = 637 primOpTag NewPromptTagOp = 638 primOpTag PromptOp = 639 primOpTag Control0Op = 640 primOpTag AtomicallyOp = 641 primOpTag RetryOp = 642 primOpTag CatchRetryOp = 643 primOpTag CatchSTMOp = 644 primOpTag NewTVarOp = 645 primOpTag ReadTVarOp = 646 primOpTag ReadTVarIOOp = 647 primOpTag WriteTVarOp = 648 primOpTag NewMVarOp = 649 primOpTag TakeMVarOp = 650 primOpTag TryTakeMVarOp = 651 primOpTag PutMVarOp = 652 primOpTag TryPutMVarOp = 653 primOpTag ReadMVarOp = 654 primOpTag TryReadMVarOp = 655 primOpTag IsEmptyMVarOp = 656 primOpTag NewIOPortOp = 657 primOpTag ReadIOPortOp = 658 primOpTag WriteIOPortOp = 659 primOpTag DelayOp = 660 primOpTag WaitReadOp = 661 primOpTag WaitWriteOp = 662 primOpTag ForkOp = 663 primOpTag ForkOnOp = 664 primOpTag KillThreadOp = 665 primOpTag YieldOp = 666 primOpTag MyThreadIdOp = 667 primOpTag LabelThreadOp = 668 primOpTag IsCurrentThreadBoundOp = 669 primOpTag NoDuplicateOp = 670 primOpTag GetThreadLabelOp = 671 primOpTag ThreadStatusOp = 672 primOpTag ListThreadsOp = 673 primOpTag MkWeakOp = 674 primOpTag MkWeakNoFinalizerOp = 675 primOpTag AddCFinalizerToWeakOp = 676 primOpTag DeRefWeakOp = 677 primOpTag FinalizeWeakOp = 678 primOpTag TouchOp = 679 primOpTag MakeStablePtrOp = 680 primOpTag DeRefStablePtrOp = 681 primOpTag EqStablePtrOp = 682 primOpTag MakeStableNameOp = 683 primOpTag StableNameToIntOp = 684 primOpTag CompactNewOp = 685 primOpTag CompactResizeOp = 686 primOpTag CompactContainsOp = 687 primOpTag CompactContainsAnyOp = 688 primOpTag CompactGetFirstBlockOp = 689 primOpTag CompactGetNextBlockOp = 690 primOpTag CompactAllocateBlockOp = 691 primOpTag CompactFixupPointersOp = 692 primOpTag CompactAdd = 693 primOpTag CompactAddWithSharing = 694 primOpTag CompactSize = 695 primOpTag ReallyUnsafePtrEqualityOp = 696 primOpTag ParOp = 697 primOpTag SparkOp = 698 primOpTag GetSparkOp = 699 primOpTag NumSparks = 700 primOpTag KeepAliveOp = 701 primOpTag DataToTagSmallOp = 702 primOpTag DataToTagLargeOp = 703 primOpTag TagToEnumOp = 704 primOpTag AddrToAnyOp = 705 primOpTag AnyToAddrOp = 706 primOpTag MkApUpd0_Op = 707 primOpTag NewBCOOp = 708 primOpTag UnpackClosureOp = 709 primOpTag ClosureSizeOp = 710 primOpTag GetApStackValOp = 711 primOpTag GetCCSOfOp = 712 primOpTag GetCurrentCCSOp = 713 primOpTag ClearCCSOp = 714 primOpTag WhereFromOp = 715 primOpTag TraceEventOp = 716 primOpTag TraceEventBinaryOp = 717 primOpTag TraceMarkerOp = 718 primOpTag SetThreadAllocationCounter = 719 primOpTag (VecBroadcastOp IntVec 16 W8) = 720 primOpTag (VecBroadcastOp IntVec 8 W16) = 721 primOpTag (VecBroadcastOp IntVec 4 W32) = 722 primOpTag (VecBroadcastOp IntVec 2 W64) = 723 primOpTag (VecBroadcastOp IntVec 32 W8) = 724 primOpTag (VecBroadcastOp IntVec 16 W16) = 725 primOpTag (VecBroadcastOp IntVec 8 W32) = 726 primOpTag (VecBroadcastOp IntVec 4 W64) = 727 primOpTag (VecBroadcastOp IntVec 64 W8) = 728 primOpTag (VecBroadcastOp IntVec 32 W16) = 729 primOpTag (VecBroadcastOp IntVec 16 W32) = 730 primOpTag (VecBroadcastOp IntVec 8 W64) = 731 primOpTag (VecBroadcastOp WordVec 16 W8) = 732 primOpTag (VecBroadcastOp WordVec 8 W16) = 733 primOpTag (VecBroadcastOp WordVec 4 W32) = 734 primOpTag (VecBroadcastOp WordVec 2 W64) = 735 primOpTag (VecBroadcastOp WordVec 32 W8) = 736 primOpTag (VecBroadcastOp WordVec 16 W16) = 737 primOpTag (VecBroadcastOp WordVec 8 W32) = 738 primOpTag (VecBroadcastOp WordVec 4 W64) = 739 primOpTag (VecBroadcastOp WordVec 64 W8) = 740 primOpTag (VecBroadcastOp WordVec 32 W16) = 741 primOpTag (VecBroadcastOp WordVec 16 W32) = 742 primOpTag (VecBroadcastOp WordVec 8 W64) = 743 primOpTag (VecBroadcastOp FloatVec 4 W32) = 744 primOpTag (VecBroadcastOp FloatVec 2 W64) = 745 primOpTag (VecBroadcastOp FloatVec 8 W32) = 746 primOpTag (VecBroadcastOp FloatVec 4 W64) = 747 primOpTag (VecBroadcastOp FloatVec 16 W32) = 748 primOpTag (VecBroadcastOp FloatVec 8 W64) = 749 primOpTag (VecPackOp IntVec 16 W8) = 750 primOpTag (VecPackOp IntVec 8 W16) = 751 primOpTag (VecPackOp IntVec 4 W32) = 752 primOpTag (VecPackOp IntVec 2 W64) = 753 primOpTag (VecPackOp IntVec 32 W8) = 754 primOpTag (VecPackOp IntVec 16 W16) = 755 primOpTag (VecPackOp IntVec 8 W32) = 756 primOpTag (VecPackOp IntVec 4 W64) = 757 primOpTag (VecPackOp IntVec 64 W8) = 758 primOpTag (VecPackOp IntVec 32 W16) = 759 primOpTag (VecPackOp IntVec 16 W32) = 760 primOpTag (VecPackOp IntVec 8 W64) = 761 primOpTag (VecPackOp WordVec 16 W8) = 762 primOpTag (VecPackOp WordVec 8 W16) = 763 primOpTag (VecPackOp WordVec 4 W32) = 764 primOpTag (VecPackOp WordVec 2 W64) = 765 primOpTag (VecPackOp WordVec 32 W8) = 766 primOpTag (VecPackOp WordVec 16 W16) = 767 primOpTag (VecPackOp WordVec 8 W32) = 768 primOpTag (VecPackOp WordVec 4 W64) = 769 primOpTag (VecPackOp WordVec 64 W8) = 770 primOpTag (VecPackOp WordVec 32 W16) = 771 primOpTag (VecPackOp WordVec 16 W32) = 772 primOpTag (VecPackOp WordVec 8 W64) = 773 primOpTag (VecPackOp FloatVec 4 W32) = 774 primOpTag (VecPackOp FloatVec 2 W64) = 775 primOpTag (VecPackOp FloatVec 8 W32) = 776 primOpTag (VecPackOp FloatVec 4 W64) = 777 primOpTag (VecPackOp FloatVec 16 W32) = 778 primOpTag (VecPackOp FloatVec 8 W64) = 779 primOpTag (VecUnpackOp IntVec 16 W8) = 780 primOpTag (VecUnpackOp IntVec 8 W16) = 781 primOpTag (VecUnpackOp IntVec 4 W32) = 782 primOpTag (VecUnpackOp IntVec 2 W64) = 783 primOpTag (VecUnpackOp IntVec 32 W8) = 784 primOpTag (VecUnpackOp IntVec 16 W16) = 785 primOpTag (VecUnpackOp IntVec 8 W32) = 786 primOpTag (VecUnpackOp IntVec 4 W64) = 787 primOpTag (VecUnpackOp IntVec 64 W8) = 788 primOpTag (VecUnpackOp IntVec 32 W16) = 789 primOpTag (VecUnpackOp IntVec 16 W32) = 790 primOpTag (VecUnpackOp IntVec 8 W64) = 791 primOpTag (VecUnpackOp WordVec 16 W8) = 792 primOpTag (VecUnpackOp WordVec 8 W16) = 793 primOpTag (VecUnpackOp WordVec 4 W32) = 794 primOpTag (VecUnpackOp WordVec 2 W64) = 795 primOpTag (VecUnpackOp WordVec 32 W8) = 796 primOpTag (VecUnpackOp WordVec 16 W16) = 797 primOpTag (VecUnpackOp WordVec 8 W32) = 798 primOpTag (VecUnpackOp WordVec 4 W64) = 799 primOpTag (VecUnpackOp WordVec 64 W8) = 800 primOpTag (VecUnpackOp WordVec 32 W16) = 801 primOpTag (VecUnpackOp WordVec 16 W32) = 802 primOpTag (VecUnpackOp WordVec 8 W64) = 803 primOpTag (VecUnpackOp FloatVec 4 W32) = 804 primOpTag (VecUnpackOp FloatVec 2 W64) = 805 primOpTag (VecUnpackOp FloatVec 8 W32) = 806 primOpTag (VecUnpackOp FloatVec 4 W64) = 807 primOpTag (VecUnpackOp FloatVec 16 W32) = 808 primOpTag (VecUnpackOp FloatVec 8 W64) = 809 primOpTag (VecInsertOp IntVec 16 W8) = 810 primOpTag (VecInsertOp IntVec 8 W16) = 811 primOpTag (VecInsertOp IntVec 4 W32) = 812 primOpTag (VecInsertOp IntVec 2 W64) = 813 primOpTag (VecInsertOp IntVec 32 W8) = 814 primOpTag (VecInsertOp IntVec 16 W16) = 815 primOpTag (VecInsertOp IntVec 8 W32) = 816 primOpTag (VecInsertOp IntVec 4 W64) = 817 primOpTag (VecInsertOp IntVec 64 W8) = 818 primOpTag (VecInsertOp IntVec 32 W16) = 819 primOpTag (VecInsertOp IntVec 16 W32) = 820 primOpTag (VecInsertOp IntVec 8 W64) = 821 primOpTag (VecInsertOp WordVec 16 W8) = 822 primOpTag (VecInsertOp WordVec 8 W16) = 823 primOpTag (VecInsertOp WordVec 4 W32) = 824 primOpTag (VecInsertOp WordVec 2 W64) = 825 primOpTag (VecInsertOp WordVec 32 W8) = 826 primOpTag (VecInsertOp WordVec 16 W16) = 827 primOpTag (VecInsertOp WordVec 8 W32) = 828 primOpTag (VecInsertOp WordVec 4 W64) = 829 primOpTag (VecInsertOp WordVec 64 W8) = 830 primOpTag (VecInsertOp WordVec 32 W16) = 831 primOpTag (VecInsertOp WordVec 16 W32) = 832 primOpTag (VecInsertOp WordVec 8 W64) = 833 primOpTag (VecInsertOp FloatVec 4 W32) = 834 primOpTag (VecInsertOp FloatVec 2 W64) = 835 primOpTag (VecInsertOp FloatVec 8 W32) = 836 primOpTag (VecInsertOp FloatVec 4 W64) = 837 primOpTag (VecInsertOp FloatVec 16 W32) = 838 primOpTag (VecInsertOp FloatVec 8 W64) = 839 primOpTag (VecAddOp IntVec 16 W8) = 840 primOpTag (VecAddOp IntVec 8 W16) = 841 primOpTag (VecAddOp IntVec 4 W32) = 842 primOpTag (VecAddOp IntVec 2 W64) = 843 primOpTag (VecAddOp IntVec 32 W8) = 844 primOpTag (VecAddOp IntVec 16 W16) = 845 primOpTag (VecAddOp IntVec 8 W32) = 846 primOpTag (VecAddOp IntVec 4 W64) = 847 primOpTag (VecAddOp IntVec 64 W8) = 848 primOpTag (VecAddOp IntVec 32 W16) = 849 primOpTag (VecAddOp IntVec 16 W32) = 850 primOpTag (VecAddOp IntVec 8 W64) = 851 primOpTag (VecAddOp WordVec 16 W8) = 852 primOpTag (VecAddOp WordVec 8 W16) = 853 primOpTag (VecAddOp WordVec 4 W32) = 854 primOpTag (VecAddOp WordVec 2 W64) = 855 primOpTag (VecAddOp WordVec 32 W8) = 856 primOpTag (VecAddOp WordVec 16 W16) = 857 primOpTag (VecAddOp WordVec 8 W32) = 858 primOpTag (VecAddOp WordVec 4 W64) = 859 primOpTag (VecAddOp WordVec 64 W8) = 860 primOpTag (VecAddOp WordVec 32 W16) = 861 primOpTag (VecAddOp WordVec 16 W32) = 862 primOpTag (VecAddOp WordVec 8 W64) = 863 primOpTag (VecAddOp FloatVec 4 W32) = 864 primOpTag (VecAddOp FloatVec 2 W64) = 865 primOpTag (VecAddOp FloatVec 8 W32) = 866 primOpTag (VecAddOp FloatVec 4 W64) = 867 primOpTag (VecAddOp FloatVec 16 W32) = 868 primOpTag (VecAddOp FloatVec 8 W64) = 869 primOpTag (VecSubOp IntVec 16 W8) = 870 primOpTag (VecSubOp IntVec 8 W16) = 871 primOpTag (VecSubOp IntVec 4 W32) = 872 primOpTag (VecSubOp IntVec 2 W64) = 873 primOpTag (VecSubOp IntVec 32 W8) = 874 primOpTag (VecSubOp IntVec 16 W16) = 875 primOpTag (VecSubOp IntVec 8 W32) = 876 primOpTag (VecSubOp IntVec 4 W64) = 877 primOpTag (VecSubOp IntVec 64 W8) = 878 primOpTag (VecSubOp IntVec 32 W16) = 879 primOpTag (VecSubOp IntVec 16 W32) = 880 primOpTag (VecSubOp IntVec 8 W64) = 881 primOpTag (VecSubOp WordVec 16 W8) = 882 primOpTag (VecSubOp WordVec 8 W16) = 883 primOpTag (VecSubOp WordVec 4 W32) = 884 primOpTag (VecSubOp WordVec 2 W64) = 885 primOpTag (VecSubOp WordVec 32 W8) = 886 primOpTag (VecSubOp WordVec 16 W16) = 887 primOpTag (VecSubOp WordVec 8 W32) = 888 primOpTag (VecSubOp WordVec 4 W64) = 889 primOpTag (VecSubOp WordVec 64 W8) = 890 primOpTag (VecSubOp WordVec 32 W16) = 891 primOpTag (VecSubOp WordVec 16 W32) = 892 primOpTag (VecSubOp WordVec 8 W64) = 893 primOpTag (VecSubOp FloatVec 4 W32) = 894 primOpTag (VecSubOp FloatVec 2 W64) = 895 primOpTag (VecSubOp FloatVec 8 W32) = 896 primOpTag (VecSubOp FloatVec 4 W64) = 897 primOpTag (VecSubOp FloatVec 16 W32) = 898 primOpTag (VecSubOp FloatVec 8 W64) = 899 primOpTag (VecMulOp IntVec 16 W8) = 900 primOpTag (VecMulOp IntVec 8 W16) = 901 primOpTag (VecMulOp IntVec 4 W32) = 902 primOpTag (VecMulOp IntVec 2 W64) = 903 primOpTag (VecMulOp IntVec 32 W8) = 904 primOpTag (VecMulOp IntVec 16 W16) = 905 primOpTag (VecMulOp IntVec 8 W32) = 906 primOpTag (VecMulOp IntVec 4 W64) = 907 primOpTag (VecMulOp IntVec 64 W8) = 908 primOpTag (VecMulOp IntVec 32 W16) = 909 primOpTag (VecMulOp IntVec 16 W32) = 910 primOpTag (VecMulOp IntVec 8 W64) = 911 primOpTag (VecMulOp WordVec 16 W8) = 912 primOpTag (VecMulOp WordVec 8 W16) = 913 primOpTag (VecMulOp WordVec 4 W32) = 914 primOpTag (VecMulOp WordVec 2 W64) = 915 primOpTag (VecMulOp WordVec 32 W8) = 916 primOpTag (VecMulOp WordVec 16 W16) = 917 primOpTag (VecMulOp WordVec 8 W32) = 918 primOpTag (VecMulOp WordVec 4 W64) = 919 primOpTag (VecMulOp WordVec 64 W8) = 920 primOpTag (VecMulOp WordVec 32 W16) = 921 primOpTag (VecMulOp WordVec 16 W32) = 922 primOpTag (VecMulOp WordVec 8 W64) = 923 primOpTag (VecMulOp FloatVec 4 W32) = 924 primOpTag (VecMulOp FloatVec 2 W64) = 925 primOpTag (VecMulOp FloatVec 8 W32) = 926 primOpTag (VecMulOp FloatVec 4 W64) = 927 primOpTag (VecMulOp FloatVec 16 W32) = 928 primOpTag (VecMulOp FloatVec 8 W64) = 929 primOpTag (VecDivOp FloatVec 4 W32) = 930 primOpTag (VecDivOp FloatVec 2 W64) = 931 primOpTag (VecDivOp FloatVec 8 W32) = 932 primOpTag (VecDivOp FloatVec 4 W64) = 933 primOpTag (VecDivOp FloatVec 16 W32) = 934 primOpTag (VecDivOp FloatVec 8 W64) = 935 primOpTag (VecQuotOp IntVec 16 W8) = 936 primOpTag (VecQuotOp IntVec 8 W16) = 937 primOpTag (VecQuotOp IntVec 4 W32) = 938 primOpTag (VecQuotOp IntVec 2 W64) = 939 primOpTag (VecQuotOp IntVec 32 W8) = 940 primOpTag (VecQuotOp IntVec 16 W16) = 941 primOpTag (VecQuotOp IntVec 8 W32) = 942 primOpTag (VecQuotOp IntVec 4 W64) = 943 primOpTag (VecQuotOp IntVec 64 W8) = 944 primOpTag (VecQuotOp IntVec 32 W16) = 945 primOpTag (VecQuotOp IntVec 16 W32) = 946 primOpTag (VecQuotOp IntVec 8 W64) = 947 primOpTag (VecQuotOp WordVec 16 W8) = 948 primOpTag (VecQuotOp WordVec 8 W16) = 949 primOpTag (VecQuotOp WordVec 4 W32) = 950 primOpTag (VecQuotOp WordVec 2 W64) = 951 primOpTag (VecQuotOp WordVec 32 W8) = 952 primOpTag (VecQuotOp WordVec 16 W16) = 953 primOpTag (VecQuotOp WordVec 8 W32) = 954 primOpTag (VecQuotOp WordVec 4 W64) = 955 primOpTag (VecQuotOp WordVec 64 W8) = 956 primOpTag (VecQuotOp WordVec 32 W16) = 957 primOpTag (VecQuotOp WordVec 16 W32) = 958 primOpTag (VecQuotOp WordVec 8 W64) = 959 primOpTag (VecRemOp IntVec 16 W8) = 960 primOpTag (VecRemOp IntVec 8 W16) = 961 primOpTag (VecRemOp IntVec 4 W32) = 962 primOpTag (VecRemOp IntVec 2 W64) = 963 primOpTag (VecRemOp IntVec 32 W8) = 964 primOpTag (VecRemOp IntVec 16 W16) = 965 primOpTag (VecRemOp IntVec 8 W32) = 966 primOpTag (VecRemOp IntVec 4 W64) = 967 primOpTag (VecRemOp IntVec 64 W8) = 968 primOpTag (VecRemOp IntVec 32 W16) = 969 primOpTag (VecRemOp IntVec 16 W32) = 970 primOpTag (VecRemOp IntVec 8 W64) = 971 primOpTag (VecRemOp WordVec 16 W8) = 972 primOpTag (VecRemOp WordVec 8 W16) = 973 primOpTag (VecRemOp WordVec 4 W32) = 974 primOpTag (VecRemOp WordVec 2 W64) = 975 primOpTag (VecRemOp WordVec 32 W8) = 976 primOpTag (VecRemOp WordVec 16 W16) = 977 primOpTag (VecRemOp WordVec 8 W32) = 978 primOpTag (VecRemOp WordVec 4 W64) = 979 primOpTag (VecRemOp WordVec 64 W8) = 980 primOpTag (VecRemOp WordVec 32 W16) = 981 primOpTag (VecRemOp WordVec 16 W32) = 982 primOpTag (VecRemOp WordVec 8 W64) = 983 primOpTag (VecNegOp IntVec 16 W8) = 984 primOpTag (VecNegOp IntVec 8 W16) = 985 primOpTag (VecNegOp IntVec 4 W32) = 986 primOpTag (VecNegOp IntVec 2 W64) = 987 primOpTag (VecNegOp IntVec 32 W8) = 988 primOpTag (VecNegOp IntVec 16 W16) = 989 primOpTag (VecNegOp IntVec 8 W32) = 990 primOpTag (VecNegOp IntVec 4 W64) = 991 primOpTag (VecNegOp IntVec 64 W8) = 992 primOpTag (VecNegOp IntVec 32 W16) = 993 primOpTag (VecNegOp IntVec 16 W32) = 994 primOpTag (VecNegOp IntVec 8 W64) = 995 primOpTag (VecNegOp FloatVec 4 W32) = 996 primOpTag (VecNegOp FloatVec 2 W64) = 997 primOpTag (VecNegOp FloatVec 8 W32) = 998 primOpTag (VecNegOp FloatVec 4 W64) = 999 primOpTag (VecNegOp FloatVec 16 W32) = 1000 primOpTag (VecNegOp FloatVec 8 W64) = 1001 primOpTag (VecIndexByteArrayOp IntVec 16 W8) = 1002 primOpTag (VecIndexByteArrayOp IntVec 8 W16) = 1003 primOpTag (VecIndexByteArrayOp IntVec 4 W32) = 1004 primOpTag (VecIndexByteArrayOp IntVec 2 W64) = 1005 primOpTag (VecIndexByteArrayOp IntVec 32 W8) = 1006 primOpTag (VecIndexByteArrayOp IntVec 16 W16) = 1007 primOpTag (VecIndexByteArrayOp IntVec 8 W32) = 1008 primOpTag (VecIndexByteArrayOp IntVec 4 W64) = 1009 primOpTag (VecIndexByteArrayOp IntVec 64 W8) = 1010 primOpTag (VecIndexByteArrayOp IntVec 32 W16) = 1011 primOpTag (VecIndexByteArrayOp IntVec 16 W32) = 1012 primOpTag (VecIndexByteArrayOp IntVec 8 W64) = 1013 primOpTag (VecIndexByteArrayOp WordVec 16 W8) = 1014 primOpTag (VecIndexByteArrayOp WordVec 8 W16) = 1015 primOpTag (VecIndexByteArrayOp WordVec 4 W32) = 1016 primOpTag (VecIndexByteArrayOp WordVec 2 W64) = 1017 primOpTag (VecIndexByteArrayOp WordVec 32 W8) = 1018 primOpTag (VecIndexByteArrayOp WordVec 16 W16) = 1019 primOpTag (VecIndexByteArrayOp WordVec 8 W32) = 1020 primOpTag (VecIndexByteArrayOp WordVec 4 W64) = 1021 primOpTag (VecIndexByteArrayOp WordVec 64 W8) = 1022 primOpTag (VecIndexByteArrayOp WordVec 32 W16) = 1023 primOpTag (VecIndexByteArrayOp WordVec 16 W32) = 1024 primOpTag (VecIndexByteArrayOp WordVec 8 W64) = 1025 primOpTag (VecIndexByteArrayOp FloatVec 4 W32) = 1026 primOpTag (VecIndexByteArrayOp FloatVec 2 W64) = 1027 primOpTag (VecIndexByteArrayOp FloatVec 8 W32) = 1028 primOpTag (VecIndexByteArrayOp FloatVec 4 W64) = 1029 primOpTag (VecIndexByteArrayOp FloatVec 16 W32) = 1030 primOpTag (VecIndexByteArrayOp FloatVec 8 W64) = 1031 primOpTag (VecReadByteArrayOp IntVec 16 W8) = 1032 primOpTag (VecReadByteArrayOp IntVec 8 W16) = 1033 primOpTag (VecReadByteArrayOp IntVec 4 W32) = 1034 primOpTag (VecReadByteArrayOp IntVec 2 W64) = 1035 primOpTag (VecReadByteArrayOp IntVec 32 W8) = 1036 primOpTag (VecReadByteArrayOp IntVec 16 W16) = 1037 primOpTag (VecReadByteArrayOp IntVec 8 W32) = 1038 primOpTag (VecReadByteArrayOp IntVec 4 W64) = 1039 primOpTag (VecReadByteArrayOp IntVec 64 W8) = 1040 primOpTag (VecReadByteArrayOp IntVec 32 W16) = 1041 primOpTag (VecReadByteArrayOp IntVec 16 W32) = 1042 primOpTag (VecReadByteArrayOp IntVec 8 W64) = 1043 primOpTag (VecReadByteArrayOp WordVec 16 W8) = 1044 primOpTag (VecReadByteArrayOp WordVec 8 W16) = 1045 primOpTag (VecReadByteArrayOp WordVec 4 W32) = 1046 primOpTag (VecReadByteArrayOp WordVec 2 W64) = 1047 primOpTag (VecReadByteArrayOp WordVec 32 W8) = 1048 primOpTag (VecReadByteArrayOp WordVec 16 W16) = 1049 primOpTag (VecReadByteArrayOp WordVec 8 W32) = 1050 primOpTag (VecReadByteArrayOp WordVec 4 W64) = 1051 primOpTag (VecReadByteArrayOp WordVec 64 W8) = 1052 primOpTag (VecReadByteArrayOp WordVec 32 W16) = 1053 primOpTag (VecReadByteArrayOp WordVec 16 W32) = 1054 primOpTag (VecReadByteArrayOp WordVec 8 W64) = 1055 primOpTag (VecReadByteArrayOp FloatVec 4 W32) = 1056 primOpTag (VecReadByteArrayOp FloatVec 2 W64) = 1057 primOpTag (VecReadByteArrayOp FloatVec 8 W32) = 1058 primOpTag (VecReadByteArrayOp FloatVec 4 W64) = 1059 primOpTag (VecReadByteArrayOp FloatVec 16 W32) = 1060 primOpTag (VecReadByteArrayOp FloatVec 8 W64) = 1061 primOpTag (VecWriteByteArrayOp IntVec 16 W8) = 1062 primOpTag (VecWriteByteArrayOp IntVec 8 W16) = 1063 primOpTag (VecWriteByteArrayOp IntVec 4 W32) = 1064 primOpTag (VecWriteByteArrayOp IntVec 2 W64) = 1065 primOpTag (VecWriteByteArrayOp IntVec 32 W8) = 1066 primOpTag (VecWriteByteArrayOp IntVec 16 W16) = 1067 primOpTag (VecWriteByteArrayOp IntVec 8 W32) = 1068 primOpTag (VecWriteByteArrayOp IntVec 4 W64) = 1069 primOpTag (VecWriteByteArrayOp IntVec 64 W8) = 1070 primOpTag (VecWriteByteArrayOp IntVec 32 W16) = 1071 primOpTag (VecWriteByteArrayOp IntVec 16 W32) = 1072 primOpTag (VecWriteByteArrayOp IntVec 8 W64) = 1073 primOpTag (VecWriteByteArrayOp WordVec 16 W8) = 1074 primOpTag (VecWriteByteArrayOp WordVec 8 W16) = 1075 primOpTag (VecWriteByteArrayOp WordVec 4 W32) = 1076 primOpTag (VecWriteByteArrayOp WordVec 2 W64) = 1077 primOpTag (VecWriteByteArrayOp WordVec 32 W8) = 1078 primOpTag (VecWriteByteArrayOp WordVec 16 W16) = 1079 primOpTag (VecWriteByteArrayOp WordVec 8 W32) = 1080 primOpTag (VecWriteByteArrayOp WordVec 4 W64) = 1081 primOpTag (VecWriteByteArrayOp WordVec 64 W8) = 1082 primOpTag (VecWriteByteArrayOp WordVec 32 W16) = 1083 primOpTag (VecWriteByteArrayOp WordVec 16 W32) = 1084 primOpTag (VecWriteByteArrayOp WordVec 8 W64) = 1085 primOpTag (VecWriteByteArrayOp FloatVec 4 W32) = 1086 primOpTag (VecWriteByteArrayOp FloatVec 2 W64) = 1087 primOpTag (VecWriteByteArrayOp FloatVec 8 W32) = 1088 primOpTag (VecWriteByteArrayOp FloatVec 4 W64) = 1089 primOpTag (VecWriteByteArrayOp FloatVec 16 W32) = 1090 primOpTag (VecWriteByteArrayOp FloatVec 8 W64) = 1091 primOpTag (VecIndexOffAddrOp IntVec 16 W8) = 1092 primOpTag (VecIndexOffAddrOp IntVec 8 W16) = 1093 primOpTag (VecIndexOffAddrOp IntVec 4 W32) = 1094 primOpTag (VecIndexOffAddrOp IntVec 2 W64) = 1095 primOpTag (VecIndexOffAddrOp IntVec 32 W8) = 1096 primOpTag (VecIndexOffAddrOp IntVec 16 W16) = 1097 primOpTag (VecIndexOffAddrOp IntVec 8 W32) = 1098 primOpTag (VecIndexOffAddrOp IntVec 4 W64) = 1099 primOpTag (VecIndexOffAddrOp IntVec 64 W8) = 1100 primOpTag (VecIndexOffAddrOp IntVec 32 W16) = 1101 primOpTag (VecIndexOffAddrOp IntVec 16 W32) = 1102 primOpTag (VecIndexOffAddrOp IntVec 8 W64) = 1103 primOpTag (VecIndexOffAddrOp WordVec 16 W8) = 1104 primOpTag (VecIndexOffAddrOp WordVec 8 W16) = 1105 primOpTag (VecIndexOffAddrOp WordVec 4 W32) = 1106 primOpTag (VecIndexOffAddrOp WordVec 2 W64) = 1107 primOpTag (VecIndexOffAddrOp WordVec 32 W8) = 1108 primOpTag (VecIndexOffAddrOp WordVec 16 W16) = 1109 primOpTag (VecIndexOffAddrOp WordVec 8 W32) = 1110 primOpTag (VecIndexOffAddrOp WordVec 4 W64) = 1111 primOpTag (VecIndexOffAddrOp WordVec 64 W8) = 1112 primOpTag (VecIndexOffAddrOp WordVec 32 W16) = 1113 primOpTag (VecIndexOffAddrOp WordVec 16 W32) = 1114 primOpTag (VecIndexOffAddrOp WordVec 8 W64) = 1115 primOpTag (VecIndexOffAddrOp FloatVec 4 W32) = 1116 primOpTag (VecIndexOffAddrOp FloatVec 2 W64) = 1117 primOpTag (VecIndexOffAddrOp FloatVec 8 W32) = 1118 primOpTag (VecIndexOffAddrOp FloatVec 4 W64) = 1119 primOpTag (VecIndexOffAddrOp FloatVec 16 W32) = 1120 primOpTag (VecIndexOffAddrOp FloatVec 8 W64) = 1121 primOpTag (VecReadOffAddrOp IntVec 16 W8) = 1122 primOpTag (VecReadOffAddrOp IntVec 8 W16) = 1123 primOpTag (VecReadOffAddrOp IntVec 4 W32) = 1124 primOpTag (VecReadOffAddrOp IntVec 2 W64) = 1125 primOpTag (VecReadOffAddrOp IntVec 32 W8) = 1126 primOpTag (VecReadOffAddrOp IntVec 16 W16) = 1127 primOpTag (VecReadOffAddrOp IntVec 8 W32) = 1128 primOpTag (VecReadOffAddrOp IntVec 4 W64) = 1129 primOpTag (VecReadOffAddrOp IntVec 64 W8) = 1130 primOpTag (VecReadOffAddrOp IntVec 32 W16) = 1131 primOpTag (VecReadOffAddrOp IntVec 16 W32) = 1132 primOpTag (VecReadOffAddrOp IntVec 8 W64) = 1133 primOpTag (VecReadOffAddrOp WordVec 16 W8) = 1134 primOpTag (VecReadOffAddrOp WordVec 8 W16) = 1135 primOpTag (VecReadOffAddrOp WordVec 4 W32) = 1136 primOpTag (VecReadOffAddrOp WordVec 2 W64) = 1137 primOpTag (VecReadOffAddrOp WordVec 32 W8) = 1138 primOpTag (VecReadOffAddrOp WordVec 16 W16) = 1139 primOpTag (VecReadOffAddrOp WordVec 8 W32) = 1140 primOpTag (VecReadOffAddrOp WordVec 4 W64) = 1141 primOpTag (VecReadOffAddrOp WordVec 64 W8) = 1142 primOpTag (VecReadOffAddrOp WordVec 32 W16) = 1143 primOpTag (VecReadOffAddrOp WordVec 16 W32) = 1144 primOpTag (VecReadOffAddrOp WordVec 8 W64) = 1145 primOpTag (VecReadOffAddrOp FloatVec 4 W32) = 1146 primOpTag (VecReadOffAddrOp FloatVec 2 W64) = 1147 primOpTag (VecReadOffAddrOp FloatVec 8 W32) = 1148 primOpTag (VecReadOffAddrOp FloatVec 4 W64) = 1149 primOpTag (VecReadOffAddrOp FloatVec 16 W32) = 1150 primOpTag (VecReadOffAddrOp FloatVec 8 W64) = 1151 primOpTag (VecWriteOffAddrOp IntVec 16 W8) = 1152 primOpTag (VecWriteOffAddrOp IntVec 8 W16) = 1153 primOpTag (VecWriteOffAddrOp IntVec 4 W32) = 1154 primOpTag (VecWriteOffAddrOp IntVec 2 W64) = 1155 primOpTag (VecWriteOffAddrOp IntVec 32 W8) = 1156 primOpTag (VecWriteOffAddrOp IntVec 16 W16) = 1157 primOpTag (VecWriteOffAddrOp IntVec 8 W32) = 1158 primOpTag (VecWriteOffAddrOp IntVec 4 W64) = 1159 primOpTag (VecWriteOffAddrOp IntVec 64 W8) = 1160 primOpTag (VecWriteOffAddrOp IntVec 32 W16) = 1161 primOpTag (VecWriteOffAddrOp IntVec 16 W32) = 1162 primOpTag (VecWriteOffAddrOp IntVec 8 W64) = 1163 primOpTag (VecWriteOffAddrOp WordVec 16 W8) = 1164 primOpTag (VecWriteOffAddrOp WordVec 8 W16) = 1165 primOpTag (VecWriteOffAddrOp WordVec 4 W32) = 1166 primOpTag (VecWriteOffAddrOp WordVec 2 W64) = 1167 primOpTag (VecWriteOffAddrOp WordVec 32 W8) = 1168 primOpTag (VecWriteOffAddrOp WordVec 16 W16) = 1169 primOpTag (VecWriteOffAddrOp WordVec 8 W32) = 1170 primOpTag (VecWriteOffAddrOp WordVec 4 W64) = 1171 primOpTag (VecWriteOffAddrOp WordVec 64 W8) = 1172 primOpTag (VecWriteOffAddrOp WordVec 32 W16) = 1173 primOpTag (VecWriteOffAddrOp WordVec 16 W32) = 1174 primOpTag (VecWriteOffAddrOp WordVec 8 W64) = 1175 primOpTag (VecWriteOffAddrOp FloatVec 4 W32) = 1176 primOpTag (VecWriteOffAddrOp FloatVec 2 W64) = 1177 primOpTag (VecWriteOffAddrOp FloatVec 8 W32) = 1178 primOpTag (VecWriteOffAddrOp FloatVec 4 W64) = 1179 primOpTag (VecWriteOffAddrOp FloatVec 16 W32) = 1180 primOpTag (VecWriteOffAddrOp FloatVec 8 W64) = 1181 primOpTag (VecIndexScalarByteArrayOp IntVec 16 W8) = 1182 primOpTag (VecIndexScalarByteArrayOp IntVec 8 W16) = 1183 primOpTag (VecIndexScalarByteArrayOp IntVec 4 W32) = 1184 primOpTag (VecIndexScalarByteArrayOp IntVec 2 W64) = 1185 primOpTag (VecIndexScalarByteArrayOp IntVec 32 W8) = 1186 primOpTag (VecIndexScalarByteArrayOp IntVec 16 W16) = 1187 primOpTag (VecIndexScalarByteArrayOp IntVec 8 W32) = 1188 primOpTag (VecIndexScalarByteArrayOp IntVec 4 W64) = 1189 primOpTag (VecIndexScalarByteArrayOp IntVec 64 W8) = 1190 primOpTag (VecIndexScalarByteArrayOp IntVec 32 W16) = 1191 primOpTag (VecIndexScalarByteArrayOp IntVec 16 W32) = 1192 primOpTag (VecIndexScalarByteArrayOp IntVec 8 W64) = 1193 primOpTag (VecIndexScalarByteArrayOp WordVec 16 W8) = 1194 primOpTag (VecIndexScalarByteArrayOp WordVec 8 W16) = 1195 primOpTag (VecIndexScalarByteArrayOp WordVec 4 W32) = 1196 primOpTag (VecIndexScalarByteArrayOp WordVec 2 W64) = 1197 primOpTag (VecIndexScalarByteArrayOp WordVec 32 W8) = 1198 primOpTag (VecIndexScalarByteArrayOp WordVec 16 W16) = 1199 primOpTag (VecIndexScalarByteArrayOp WordVec 8 W32) = 1200 primOpTag (VecIndexScalarByteArrayOp WordVec 4 W64) = 1201 primOpTag (VecIndexScalarByteArrayOp WordVec 64 W8) = 1202 primOpTag (VecIndexScalarByteArrayOp WordVec 32 W16) = 1203 primOpTag (VecIndexScalarByteArrayOp WordVec 16 W32) = 1204 primOpTag (VecIndexScalarByteArrayOp WordVec 8 W64) = 1205 primOpTag (VecIndexScalarByteArrayOp FloatVec 4 W32) = 1206 primOpTag (VecIndexScalarByteArrayOp FloatVec 2 W64) = 1207 primOpTag (VecIndexScalarByteArrayOp FloatVec 8 W32) = 1208 primOpTag (VecIndexScalarByteArrayOp FloatVec 4 W64) = 1209 primOpTag (VecIndexScalarByteArrayOp FloatVec 16 W32) = 1210 primOpTag (VecIndexScalarByteArrayOp FloatVec 8 W64) = 1211 primOpTag (VecReadScalarByteArrayOp IntVec 16 W8) = 1212 primOpTag (VecReadScalarByteArrayOp IntVec 8 W16) = 1213 primOpTag (VecReadScalarByteArrayOp IntVec 4 W32) = 1214 primOpTag (VecReadScalarByteArrayOp IntVec 2 W64) = 1215 primOpTag (VecReadScalarByteArrayOp IntVec 32 W8) = 1216 primOpTag (VecReadScalarByteArrayOp IntVec 16 W16) = 1217 primOpTag (VecReadScalarByteArrayOp IntVec 8 W32) = 1218 primOpTag (VecReadScalarByteArrayOp IntVec 4 W64) = 1219 primOpTag (VecReadScalarByteArrayOp IntVec 64 W8) = 1220 primOpTag (VecReadScalarByteArrayOp IntVec 32 W16) = 1221 primOpTag (VecReadScalarByteArrayOp IntVec 16 W32) = 1222 primOpTag (VecReadScalarByteArrayOp IntVec 8 W64) = 1223 primOpTag (VecReadScalarByteArrayOp WordVec 16 W8) = 1224 primOpTag (VecReadScalarByteArrayOp WordVec 8 W16) = 1225 primOpTag (VecReadScalarByteArrayOp WordVec 4 W32) = 1226 primOpTag (VecReadScalarByteArrayOp WordVec 2 W64) = 1227 primOpTag (VecReadScalarByteArrayOp WordVec 32 W8) = 1228 primOpTag (VecReadScalarByteArrayOp WordVec 16 W16) = 1229 primOpTag (VecReadScalarByteArrayOp WordVec 8 W32) = 1230 primOpTag (VecReadScalarByteArrayOp WordVec 4 W64) = 1231 primOpTag (VecReadScalarByteArrayOp WordVec 64 W8) = 1232 primOpTag (VecReadScalarByteArrayOp WordVec 32 W16) = 1233 primOpTag (VecReadScalarByteArrayOp WordVec 16 W32) = 1234 primOpTag (VecReadScalarByteArrayOp WordVec 8 W64) = 1235 primOpTag (VecReadScalarByteArrayOp FloatVec 4 W32) = 1236 primOpTag (VecReadScalarByteArrayOp FloatVec 2 W64) = 1237 primOpTag (VecReadScalarByteArrayOp FloatVec 8 W32) = 1238 primOpTag (VecReadScalarByteArrayOp FloatVec 4 W64) = 1239 primOpTag (VecReadScalarByteArrayOp FloatVec 16 W32) = 1240 primOpTag (VecReadScalarByteArrayOp FloatVec 8 W64) = 1241 primOpTag (VecWriteScalarByteArrayOp IntVec 16 W8) = 1242 primOpTag (VecWriteScalarByteArrayOp IntVec 8 W16) = 1243 primOpTag (VecWriteScalarByteArrayOp IntVec 4 W32) = 1244 primOpTag (VecWriteScalarByteArrayOp IntVec 2 W64) = 1245 primOpTag (VecWriteScalarByteArrayOp IntVec 32 W8) = 1246 primOpTag (VecWriteScalarByteArrayOp IntVec 16 W16) = 1247 primOpTag (VecWriteScalarByteArrayOp IntVec 8 W32) = 1248 primOpTag (VecWriteScalarByteArrayOp IntVec 4 W64) = 1249 primOpTag (VecWriteScalarByteArrayOp IntVec 64 W8) = 1250 primOpTag (VecWriteScalarByteArrayOp IntVec 32 W16) = 1251 primOpTag (VecWriteScalarByteArrayOp IntVec 16 W32) = 1252 primOpTag (VecWriteScalarByteArrayOp IntVec 8 W64) = 1253 primOpTag (VecWriteScalarByteArrayOp WordVec 16 W8) = 1254 primOpTag (VecWriteScalarByteArrayOp WordVec 8 W16) = 1255 primOpTag (VecWriteScalarByteArrayOp WordVec 4 W32) = 1256 primOpTag (VecWriteScalarByteArrayOp WordVec 2 W64) = 1257 primOpTag (VecWriteScalarByteArrayOp WordVec 32 W8) = 1258 primOpTag (VecWriteScalarByteArrayOp WordVec 16 W16) = 1259 primOpTag (VecWriteScalarByteArrayOp WordVec 8 W32) = 1260 primOpTag (VecWriteScalarByteArrayOp WordVec 4 W64) = 1261 primOpTag (VecWriteScalarByteArrayOp WordVec 64 W8) = 1262 primOpTag (VecWriteScalarByteArrayOp WordVec 32 W16) = 1263 primOpTag (VecWriteScalarByteArrayOp WordVec 16 W32) = 1264 primOpTag (VecWriteScalarByteArrayOp WordVec 8 W64) = 1265 primOpTag (VecWriteScalarByteArrayOp FloatVec 4 W32) = 1266 primOpTag (VecWriteScalarByteArrayOp FloatVec 2 W64) = 1267 primOpTag (VecWriteScalarByteArrayOp FloatVec 8 W32) = 1268 primOpTag (VecWriteScalarByteArrayOp FloatVec 4 W64) = 1269 primOpTag (VecWriteScalarByteArrayOp FloatVec 16 W32) = 1270 primOpTag (VecWriteScalarByteArrayOp FloatVec 8 W64) = 1271 primOpTag (VecIndexScalarOffAddrOp IntVec 16 W8) = 1272 primOpTag (VecIndexScalarOffAddrOp IntVec 8 W16) = 1273 primOpTag (VecIndexScalarOffAddrOp IntVec 4 W32) = 1274 primOpTag (VecIndexScalarOffAddrOp IntVec 2 W64) = 1275 primOpTag (VecIndexScalarOffAddrOp IntVec 32 W8) = 1276 primOpTag (VecIndexScalarOffAddrOp IntVec 16 W16) = 1277 primOpTag (VecIndexScalarOffAddrOp IntVec 8 W32) = 1278 primOpTag (VecIndexScalarOffAddrOp IntVec 4 W64) = 1279 primOpTag (VecIndexScalarOffAddrOp IntVec 64 W8) = 1280 primOpTag (VecIndexScalarOffAddrOp IntVec 32 W16) = 1281 primOpTag (VecIndexScalarOffAddrOp IntVec 16 W32) = 1282 primOpTag (VecIndexScalarOffAddrOp IntVec 8 W64) = 1283 primOpTag (VecIndexScalarOffAddrOp WordVec 16 W8) = 1284 primOpTag (VecIndexScalarOffAddrOp WordVec 8 W16) = 1285 primOpTag (VecIndexScalarOffAddrOp WordVec 4 W32) = 1286 primOpTag (VecIndexScalarOffAddrOp WordVec 2 W64) = 1287 primOpTag (VecIndexScalarOffAddrOp WordVec 32 W8) = 1288 primOpTag (VecIndexScalarOffAddrOp WordVec 16 W16) = 1289 primOpTag (VecIndexScalarOffAddrOp WordVec 8 W32) = 1290 primOpTag (VecIndexScalarOffAddrOp WordVec 4 W64) = 1291 primOpTag (VecIndexScalarOffAddrOp WordVec 64 W8) = 1292 primOpTag (VecIndexScalarOffAddrOp WordVec 32 W16) = 1293 primOpTag (VecIndexScalarOffAddrOp WordVec 16 W32) = 1294 primOpTag (VecIndexScalarOffAddrOp WordVec 8 W64) = 1295 primOpTag (VecIndexScalarOffAddrOp FloatVec 4 W32) = 1296 primOpTag (VecIndexScalarOffAddrOp FloatVec 2 W64) = 1297 primOpTag (VecIndexScalarOffAddrOp FloatVec 8 W32) = 1298 primOpTag (VecIndexScalarOffAddrOp FloatVec 4 W64) = 1299 primOpTag (VecIndexScalarOffAddrOp FloatVec 16 W32) = 1300 primOpTag (VecIndexScalarOffAddrOp FloatVec 8 W64) = 1301 primOpTag (VecReadScalarOffAddrOp IntVec 16 W8) = 1302 primOpTag (VecReadScalarOffAddrOp IntVec 8 W16) = 1303 primOpTag (VecReadScalarOffAddrOp IntVec 4 W32) = 1304 primOpTag (VecReadScalarOffAddrOp IntVec 2 W64) = 1305 primOpTag (VecReadScalarOffAddrOp IntVec 32 W8) = 1306 primOpTag (VecReadScalarOffAddrOp IntVec 16 W16) = 1307 primOpTag (VecReadScalarOffAddrOp IntVec 8 W32) = 1308 primOpTag (VecReadScalarOffAddrOp IntVec 4 W64) = 1309 primOpTag (VecReadScalarOffAddrOp IntVec 64 W8) = 1310 primOpTag (VecReadScalarOffAddrOp IntVec 32 W16) = 1311 primOpTag (VecReadScalarOffAddrOp IntVec 16 W32) = 1312 primOpTag (VecReadScalarOffAddrOp IntVec 8 W64) = 1313 primOpTag (VecReadScalarOffAddrOp WordVec 16 W8) = 1314 primOpTag (VecReadScalarOffAddrOp WordVec 8 W16) = 1315 primOpTag (VecReadScalarOffAddrOp WordVec 4 W32) = 1316 primOpTag (VecReadScalarOffAddrOp WordVec 2 W64) = 1317 primOpTag (VecReadScalarOffAddrOp WordVec 32 W8) = 1318 primOpTag (VecReadScalarOffAddrOp WordVec 16 W16) = 1319 primOpTag (VecReadScalarOffAddrOp WordVec 8 W32) = 1320 primOpTag (VecReadScalarOffAddrOp WordVec 4 W64) = 1321 primOpTag (VecReadScalarOffAddrOp WordVec 64 W8) = 1322 primOpTag (VecReadScalarOffAddrOp WordVec 32 W16) = 1323 primOpTag (VecReadScalarOffAddrOp WordVec 16 W32) = 1324 primOpTag (VecReadScalarOffAddrOp WordVec 8 W64) = 1325 primOpTag (VecReadScalarOffAddrOp FloatVec 4 W32) = 1326 primOpTag (VecReadScalarOffAddrOp FloatVec 2 W64) = 1327 primOpTag (VecReadScalarOffAddrOp FloatVec 8 W32) = 1328 primOpTag (VecReadScalarOffAddrOp FloatVec 4 W64) = 1329 primOpTag (VecReadScalarOffAddrOp FloatVec 16 W32) = 1330 primOpTag (VecReadScalarOffAddrOp FloatVec 8 W64) = 1331 primOpTag (VecWriteScalarOffAddrOp IntVec 16 W8) = 1332 primOpTag (VecWriteScalarOffAddrOp IntVec 8 W16) = 1333 primOpTag (VecWriteScalarOffAddrOp IntVec 4 W32) = 1334 primOpTag (VecWriteScalarOffAddrOp IntVec 2 W64) = 1335 primOpTag (VecWriteScalarOffAddrOp IntVec 32 W8) = 1336 primOpTag (VecWriteScalarOffAddrOp IntVec 16 W16) = 1337 primOpTag (VecWriteScalarOffAddrOp IntVec 8 W32) = 1338 primOpTag (VecWriteScalarOffAddrOp IntVec 4 W64) = 1339 primOpTag (VecWriteScalarOffAddrOp IntVec 64 W8) = 1340 primOpTag (VecWriteScalarOffAddrOp IntVec 32 W16) = 1341 primOpTag (VecWriteScalarOffAddrOp IntVec 16 W32) = 1342 primOpTag (VecWriteScalarOffAddrOp IntVec 8 W64) = 1343 primOpTag (VecWriteScalarOffAddrOp WordVec 16 W8) = 1344 primOpTag (VecWriteScalarOffAddrOp WordVec 8 W16) = 1345 primOpTag (VecWriteScalarOffAddrOp WordVec 4 W32) = 1346 primOpTag (VecWriteScalarOffAddrOp WordVec 2 W64) = 1347 primOpTag (VecWriteScalarOffAddrOp WordVec 32 W8) = 1348 primOpTag (VecWriteScalarOffAddrOp WordVec 16 W16) = 1349 primOpTag (VecWriteScalarOffAddrOp WordVec 8 W32) = 1350 primOpTag (VecWriteScalarOffAddrOp WordVec 4 W64) = 1351 primOpTag (VecWriteScalarOffAddrOp WordVec 64 W8) = 1352 primOpTag (VecWriteScalarOffAddrOp WordVec 32 W16) = 1353 primOpTag (VecWriteScalarOffAddrOp WordVec 16 W32) = 1354 primOpTag (VecWriteScalarOffAddrOp WordVec 8 W64) = 1355 primOpTag (VecWriteScalarOffAddrOp FloatVec 4 W32) = 1356 primOpTag (VecWriteScalarOffAddrOp FloatVec 2 W64) = 1357 primOpTag (VecWriteScalarOffAddrOp FloatVec 8 W32) = 1358 primOpTag (VecWriteScalarOffAddrOp FloatVec 4 W64) = 1359 primOpTag (VecWriteScalarOffAddrOp FloatVec 16 W32) = 1360 primOpTag (VecWriteScalarOffAddrOp FloatVec 8 W64) = 1361 primOpTag (VecFMAdd FloatVec 4 W32) = 1362 primOpTag (VecFMAdd FloatVec 2 W64) = 1363 primOpTag (VecFMAdd FloatVec 8 W32) = 1364 primOpTag (VecFMAdd FloatVec 4 W64) = 1365 primOpTag (VecFMAdd FloatVec 16 W32) = 1366 primOpTag (VecFMAdd FloatVec 8 W64) = 1367 primOpTag (VecFMSub FloatVec 4 W32) = 1368 primOpTag (VecFMSub FloatVec 2 W64) = 1369 primOpTag (VecFMSub FloatVec 8 W32) = 1370 primOpTag (VecFMSub FloatVec 4 W64) = 1371 primOpTag (VecFMSub FloatVec 16 W32) = 1372 primOpTag (VecFMSub FloatVec 8 W64) = 1373 primOpTag (VecFNMAdd FloatVec 4 W32) = 1374 primOpTag (VecFNMAdd FloatVec 2 W64) = 1375 primOpTag (VecFNMAdd FloatVec 8 W32) = 1376 primOpTag (VecFNMAdd FloatVec 4 W64) = 1377 primOpTag (VecFNMAdd FloatVec 16 W32) = 1378 primOpTag (VecFNMAdd FloatVec 8 W64) = 1379 primOpTag (VecFNMSub FloatVec 4 W32) = 1380 primOpTag (VecFNMSub FloatVec 2 W64) = 1381 primOpTag (VecFNMSub FloatVec 8 W32) = 1382 primOpTag (VecFNMSub FloatVec 4 W64) = 1383 primOpTag (VecFNMSub FloatVec 16 W32) = 1384 primOpTag (VecFNMSub FloatVec 8 W64) = 1385 primOpTag (VecShuffleOp IntVec 16 W8) = 1386 primOpTag (VecShuffleOp IntVec 8 W16) = 1387 primOpTag (VecShuffleOp IntVec 4 W32) = 1388 primOpTag (VecShuffleOp IntVec 2 W64) = 1389 primOpTag (VecShuffleOp IntVec 32 W8) = 1390 primOpTag (VecShuffleOp IntVec 16 W16) = 1391 primOpTag (VecShuffleOp IntVec 8 W32) = 1392 primOpTag (VecShuffleOp IntVec 4 W64) = 1393 primOpTag (VecShuffleOp IntVec 64 W8) = 1394 primOpTag (VecShuffleOp IntVec 32 W16) = 1395 primOpTag (VecShuffleOp IntVec 16 W32) = 1396 primOpTag (VecShuffleOp IntVec 8 W64) = 1397 primOpTag (VecShuffleOp WordVec 16 W8) = 1398 primOpTag (VecShuffleOp WordVec 8 W16) = 1399 primOpTag (VecShuffleOp WordVec 4 W32) = 1400 primOpTag (VecShuffleOp WordVec 2 W64) = 1401 primOpTag (VecShuffleOp WordVec 32 W8) = 1402 primOpTag (VecShuffleOp WordVec 16 W16) = 1403 primOpTag (VecShuffleOp WordVec 8 W32) = 1404 primOpTag (VecShuffleOp WordVec 4 W64) = 1405 primOpTag (VecShuffleOp WordVec 64 W8) = 1406 primOpTag (VecShuffleOp WordVec 32 W16) = 1407 primOpTag (VecShuffleOp WordVec 16 W32) = 1408 primOpTag (VecShuffleOp WordVec 8 W64) = 1409 primOpTag (VecShuffleOp FloatVec 4 W32) = 1410 primOpTag (VecShuffleOp FloatVec 2 W64) = 1411 primOpTag (VecShuffleOp FloatVec 8 W32) = 1412 primOpTag (VecShuffleOp FloatVec 4 W64) = 1413 primOpTag (VecShuffleOp FloatVec 16 W32) = 1414 primOpTag (VecShuffleOp FloatVec 8 W64) = 1415 primOpTag (VecMinOp IntVec 16 W8) = 1416 primOpTag (VecMinOp IntVec 8 W16) = 1417 primOpTag (VecMinOp IntVec 4 W32) = 1418 primOpTag (VecMinOp IntVec 2 W64) = 1419 primOpTag (VecMinOp IntVec 32 W8) = 1420 primOpTag (VecMinOp IntVec 16 W16) = 1421 primOpTag (VecMinOp IntVec 8 W32) = 1422 primOpTag (VecMinOp IntVec 4 W64) = 1423 primOpTag (VecMinOp IntVec 64 W8) = 1424 primOpTag (VecMinOp IntVec 32 W16) = 1425 primOpTag (VecMinOp IntVec 16 W32) = 1426 primOpTag (VecMinOp IntVec 8 W64) = 1427 primOpTag (VecMinOp WordVec 16 W8) = 1428 primOpTag (VecMinOp WordVec 8 W16) = 1429 primOpTag (VecMinOp WordVec 4 W32) = 1430 primOpTag (VecMinOp WordVec 2 W64) = 1431 primOpTag (VecMinOp WordVec 32 W8) = 1432 primOpTag (VecMinOp WordVec 16 W16) = 1433 primOpTag (VecMinOp WordVec 8 W32) = 1434 primOpTag (VecMinOp WordVec 4 W64) = 1435 primOpTag (VecMinOp WordVec 64 W8) = 1436 primOpTag (VecMinOp WordVec 32 W16) = 1437 primOpTag (VecMinOp WordVec 16 W32) = 1438 primOpTag (VecMinOp WordVec 8 W64) = 1439 primOpTag (VecMinOp FloatVec 4 W32) = 1440 primOpTag (VecMinOp FloatVec 2 W64) = 1441 primOpTag (VecMinOp FloatVec 8 W32) = 1442 primOpTag (VecMinOp FloatVec 4 W64) = 1443 primOpTag (VecMinOp FloatVec 16 W32) = 1444 primOpTag (VecMinOp FloatVec 8 W64) = 1445 primOpTag (VecMaxOp IntVec 16 W8) = 1446 primOpTag (VecMaxOp IntVec 8 W16) = 1447 primOpTag (VecMaxOp IntVec 4 W32) = 1448 primOpTag (VecMaxOp IntVec 2 W64) = 1449 primOpTag (VecMaxOp IntVec 32 W8) = 1450 primOpTag (VecMaxOp IntVec 16 W16) = 1451 primOpTag (VecMaxOp IntVec 8 W32) = 1452 primOpTag (VecMaxOp IntVec 4 W64) = 1453 primOpTag (VecMaxOp IntVec 64 W8) = 1454 primOpTag (VecMaxOp IntVec 32 W16) = 1455 primOpTag (VecMaxOp IntVec 16 W32) = 1456 primOpTag (VecMaxOp IntVec 8 W64) = 1457 primOpTag (VecMaxOp WordVec 16 W8) = 1458 primOpTag (VecMaxOp WordVec 8 W16) = 1459 primOpTag (VecMaxOp WordVec 4 W32) = 1460 primOpTag (VecMaxOp WordVec 2 W64) = 1461 primOpTag (VecMaxOp WordVec 32 W8) = 1462 primOpTag (VecMaxOp WordVec 16 W16) = 1463 primOpTag (VecMaxOp WordVec 8 W32) = 1464 primOpTag (VecMaxOp WordVec 4 W64) = 1465 primOpTag (VecMaxOp WordVec 64 W8) = 1466 primOpTag (VecMaxOp WordVec 32 W16) = 1467 primOpTag (VecMaxOp WordVec 16 W32) = 1468 primOpTag (VecMaxOp WordVec 8 W64) = 1469 primOpTag (VecMaxOp FloatVec 4 W32) = 1470 primOpTag (VecMaxOp FloatVec 2 W64) = 1471 primOpTag (VecMaxOp FloatVec 8 W32) = 1472 primOpTag (VecMaxOp FloatVec 4 W64) = 1473 primOpTag (VecMaxOp FloatVec 16 W32) = 1474 primOpTag (VecMaxOp FloatVec 8 W64) = 1475 primOpTag PrefetchByteArrayOp3 = 1476 primOpTag PrefetchMutableByteArrayOp3 = 1477 primOpTag PrefetchAddrOp3 = 1478 primOpTag PrefetchValueOp3 = 1479 primOpTag PrefetchByteArrayOp2 = 1480 primOpTag PrefetchMutableByteArrayOp2 = 1481 primOpTag PrefetchAddrOp2 = 1482 primOpTag PrefetchValueOp2 = 1483 primOpTag PrefetchByteArrayOp1 = 1484 primOpTag PrefetchMutableByteArrayOp1 = 1485 primOpTag PrefetchAddrOp1 = 1486 primOpTag PrefetchValueOp1 = 1487 primOpTag PrefetchByteArrayOp0 = 1488 primOpTag PrefetchMutableByteArrayOp0 = 1489 primOpTag PrefetchAddrOp0 = 1490 primOpTag PrefetchValueOp0 = 1491 ghc-lib-parser-9.12.2.20250421/ghc-lib/stage0/compiler/build/primop-vector-tycons.hs-incl0000644000000000000000000000131007346545000026471 0ustar0000000000000000 , int8X16PrimTyCon , int16X8PrimTyCon , int32X4PrimTyCon , int64X2PrimTyCon , int8X32PrimTyCon , int16X16PrimTyCon , int32X8PrimTyCon , int64X4PrimTyCon , int8X64PrimTyCon , int16X32PrimTyCon , int32X16PrimTyCon , int64X8PrimTyCon , word8X16PrimTyCon , word16X8PrimTyCon , word32X4PrimTyCon , word64X2PrimTyCon , word8X32PrimTyCon , word16X16PrimTyCon , word32X8PrimTyCon , word64X4PrimTyCon , word8X64PrimTyCon , word16X32PrimTyCon , word32X16PrimTyCon , word64X8PrimTyCon , floatX4PrimTyCon , doubleX2PrimTyCon , floatX8PrimTyCon , doubleX4PrimTyCon , floatX16PrimTyCon , doubleX8PrimTyCon ghc-lib-parser-9.12.2.20250421/ghc-lib/stage0/compiler/build/primop-vector-tys-exports.hs-incl0000644000000000000000000000237207346545000027504 0ustar0000000000000000 int8X16PrimTy, int8X16PrimTyCon, int16X8PrimTy, int16X8PrimTyCon, int32X4PrimTy, int32X4PrimTyCon, int64X2PrimTy, int64X2PrimTyCon, int8X32PrimTy, int8X32PrimTyCon, int16X16PrimTy, int16X16PrimTyCon, int32X8PrimTy, int32X8PrimTyCon, int64X4PrimTy, int64X4PrimTyCon, int8X64PrimTy, int8X64PrimTyCon, int16X32PrimTy, int16X32PrimTyCon, int32X16PrimTy, int32X16PrimTyCon, int64X8PrimTy, int64X8PrimTyCon, word8X16PrimTy, word8X16PrimTyCon, word16X8PrimTy, word16X8PrimTyCon, word32X4PrimTy, word32X4PrimTyCon, word64X2PrimTy, word64X2PrimTyCon, word8X32PrimTy, word8X32PrimTyCon, word16X16PrimTy, word16X16PrimTyCon, word32X8PrimTy, word32X8PrimTyCon, word64X4PrimTy, word64X4PrimTyCon, word8X64PrimTy, word8X64PrimTyCon, word16X32PrimTy, word16X32PrimTyCon, word32X16PrimTy, word32X16PrimTyCon, word64X8PrimTy, word64X8PrimTyCon, floatX4PrimTy, floatX4PrimTyCon, doubleX2PrimTy, doubleX2PrimTyCon, floatX8PrimTy, floatX8PrimTyCon, doubleX4PrimTy, doubleX4PrimTyCon, floatX16PrimTy, floatX16PrimTyCon, doubleX8PrimTy, doubleX8PrimTyCon, ghc-lib-parser-9.12.2.20250421/ghc-lib/stage0/compiler/build/primop-vector-tys.hs-incl0000644000000000000000000002366407346545000026011 0ustar0000000000000000int8X16PrimTyConName :: Name int8X16PrimTyConName = mkPrimTc (fsLit "Int8X16#") int8X16PrimTyConKey int8X16PrimTyCon int8X16PrimTy :: Type int8X16PrimTy = mkTyConTy int8X16PrimTyCon int8X16PrimTyCon :: TyCon int8X16PrimTyCon = pcPrimTyCon0 int8X16PrimTyConName (TyConApp vecRepDataConTyCon [vec16DataConTy, int8ElemRepDataConTy]) int16X8PrimTyConName :: Name int16X8PrimTyConName = mkPrimTc (fsLit "Int16X8#") int16X8PrimTyConKey int16X8PrimTyCon int16X8PrimTy :: Type int16X8PrimTy = mkTyConTy int16X8PrimTyCon int16X8PrimTyCon :: TyCon int16X8PrimTyCon = pcPrimTyCon0 int16X8PrimTyConName (TyConApp vecRepDataConTyCon [vec8DataConTy, int16ElemRepDataConTy]) int32X4PrimTyConName :: Name int32X4PrimTyConName = mkPrimTc (fsLit "Int32X4#") int32X4PrimTyConKey int32X4PrimTyCon int32X4PrimTy :: Type int32X4PrimTy = mkTyConTy int32X4PrimTyCon int32X4PrimTyCon :: TyCon int32X4PrimTyCon = pcPrimTyCon0 int32X4PrimTyConName (TyConApp vecRepDataConTyCon [vec4DataConTy, int32ElemRepDataConTy]) int64X2PrimTyConName :: Name int64X2PrimTyConName = mkPrimTc (fsLit "Int64X2#") int64X2PrimTyConKey int64X2PrimTyCon int64X2PrimTy :: Type int64X2PrimTy = mkTyConTy int64X2PrimTyCon int64X2PrimTyCon :: TyCon int64X2PrimTyCon = pcPrimTyCon0 int64X2PrimTyConName (TyConApp vecRepDataConTyCon [vec2DataConTy, int64ElemRepDataConTy]) int8X32PrimTyConName :: Name int8X32PrimTyConName = mkPrimTc (fsLit "Int8X32#") int8X32PrimTyConKey int8X32PrimTyCon int8X32PrimTy :: Type int8X32PrimTy = mkTyConTy int8X32PrimTyCon int8X32PrimTyCon :: TyCon int8X32PrimTyCon = pcPrimTyCon0 int8X32PrimTyConName (TyConApp vecRepDataConTyCon [vec32DataConTy, int8ElemRepDataConTy]) int16X16PrimTyConName :: Name int16X16PrimTyConName = mkPrimTc (fsLit "Int16X16#") int16X16PrimTyConKey int16X16PrimTyCon int16X16PrimTy :: Type int16X16PrimTy = mkTyConTy int16X16PrimTyCon int16X16PrimTyCon :: TyCon int16X16PrimTyCon = pcPrimTyCon0 int16X16PrimTyConName (TyConApp vecRepDataConTyCon [vec16DataConTy, int16ElemRepDataConTy]) int32X8PrimTyConName :: Name int32X8PrimTyConName = mkPrimTc (fsLit "Int32X8#") int32X8PrimTyConKey int32X8PrimTyCon int32X8PrimTy :: Type int32X8PrimTy = mkTyConTy int32X8PrimTyCon int32X8PrimTyCon :: TyCon int32X8PrimTyCon = pcPrimTyCon0 int32X8PrimTyConName (TyConApp vecRepDataConTyCon [vec8DataConTy, int32ElemRepDataConTy]) int64X4PrimTyConName :: Name int64X4PrimTyConName = mkPrimTc (fsLit "Int64X4#") int64X4PrimTyConKey int64X4PrimTyCon int64X4PrimTy :: Type int64X4PrimTy = mkTyConTy int64X4PrimTyCon int64X4PrimTyCon :: TyCon int64X4PrimTyCon = pcPrimTyCon0 int64X4PrimTyConName (TyConApp vecRepDataConTyCon [vec4DataConTy, int64ElemRepDataConTy]) int8X64PrimTyConName :: Name int8X64PrimTyConName = mkPrimTc (fsLit "Int8X64#") int8X64PrimTyConKey int8X64PrimTyCon int8X64PrimTy :: Type int8X64PrimTy = mkTyConTy int8X64PrimTyCon int8X64PrimTyCon :: TyCon int8X64PrimTyCon = pcPrimTyCon0 int8X64PrimTyConName (TyConApp vecRepDataConTyCon [vec64DataConTy, int8ElemRepDataConTy]) int16X32PrimTyConName :: Name int16X32PrimTyConName = mkPrimTc (fsLit "Int16X32#") int16X32PrimTyConKey int16X32PrimTyCon int16X32PrimTy :: Type int16X32PrimTy = mkTyConTy int16X32PrimTyCon int16X32PrimTyCon :: TyCon int16X32PrimTyCon = pcPrimTyCon0 int16X32PrimTyConName (TyConApp vecRepDataConTyCon [vec32DataConTy, int16ElemRepDataConTy]) int32X16PrimTyConName :: Name int32X16PrimTyConName = mkPrimTc (fsLit "Int32X16#") int32X16PrimTyConKey int32X16PrimTyCon int32X16PrimTy :: Type int32X16PrimTy = mkTyConTy int32X16PrimTyCon int32X16PrimTyCon :: TyCon int32X16PrimTyCon = pcPrimTyCon0 int32X16PrimTyConName (TyConApp vecRepDataConTyCon [vec16DataConTy, int32ElemRepDataConTy]) int64X8PrimTyConName :: Name int64X8PrimTyConName = mkPrimTc (fsLit "Int64X8#") int64X8PrimTyConKey int64X8PrimTyCon int64X8PrimTy :: Type int64X8PrimTy = mkTyConTy int64X8PrimTyCon int64X8PrimTyCon :: TyCon int64X8PrimTyCon = pcPrimTyCon0 int64X8PrimTyConName (TyConApp vecRepDataConTyCon [vec8DataConTy, int64ElemRepDataConTy]) word8X16PrimTyConName :: Name word8X16PrimTyConName = mkPrimTc (fsLit "Word8X16#") word8X16PrimTyConKey word8X16PrimTyCon word8X16PrimTy :: Type word8X16PrimTy = mkTyConTy word8X16PrimTyCon word8X16PrimTyCon :: TyCon word8X16PrimTyCon = pcPrimTyCon0 word8X16PrimTyConName (TyConApp vecRepDataConTyCon [vec16DataConTy, word8ElemRepDataConTy]) word16X8PrimTyConName :: Name word16X8PrimTyConName = mkPrimTc (fsLit "Word16X8#") word16X8PrimTyConKey word16X8PrimTyCon word16X8PrimTy :: Type word16X8PrimTy = mkTyConTy word16X8PrimTyCon word16X8PrimTyCon :: TyCon word16X8PrimTyCon = pcPrimTyCon0 word16X8PrimTyConName (TyConApp vecRepDataConTyCon [vec8DataConTy, word16ElemRepDataConTy]) word32X4PrimTyConName :: Name word32X4PrimTyConName = mkPrimTc (fsLit "Word32X4#") word32X4PrimTyConKey word32X4PrimTyCon word32X4PrimTy :: Type word32X4PrimTy = mkTyConTy word32X4PrimTyCon word32X4PrimTyCon :: TyCon word32X4PrimTyCon = pcPrimTyCon0 word32X4PrimTyConName (TyConApp vecRepDataConTyCon [vec4DataConTy, word32ElemRepDataConTy]) word64X2PrimTyConName :: Name word64X2PrimTyConName = mkPrimTc (fsLit "Word64X2#") word64X2PrimTyConKey word64X2PrimTyCon word64X2PrimTy :: Type word64X2PrimTy = mkTyConTy word64X2PrimTyCon word64X2PrimTyCon :: TyCon word64X2PrimTyCon = pcPrimTyCon0 word64X2PrimTyConName (TyConApp vecRepDataConTyCon [vec2DataConTy, word64ElemRepDataConTy]) word8X32PrimTyConName :: Name word8X32PrimTyConName = mkPrimTc (fsLit "Word8X32#") word8X32PrimTyConKey word8X32PrimTyCon word8X32PrimTy :: Type word8X32PrimTy = mkTyConTy word8X32PrimTyCon word8X32PrimTyCon :: TyCon word8X32PrimTyCon = pcPrimTyCon0 word8X32PrimTyConName (TyConApp vecRepDataConTyCon [vec32DataConTy, word8ElemRepDataConTy]) word16X16PrimTyConName :: Name word16X16PrimTyConName = mkPrimTc (fsLit "Word16X16#") word16X16PrimTyConKey word16X16PrimTyCon word16X16PrimTy :: Type word16X16PrimTy = mkTyConTy word16X16PrimTyCon word16X16PrimTyCon :: TyCon word16X16PrimTyCon = pcPrimTyCon0 word16X16PrimTyConName (TyConApp vecRepDataConTyCon [vec16DataConTy, word16ElemRepDataConTy]) word32X8PrimTyConName :: Name word32X8PrimTyConName = mkPrimTc (fsLit "Word32X8#") word32X8PrimTyConKey word32X8PrimTyCon word32X8PrimTy :: Type word32X8PrimTy = mkTyConTy word32X8PrimTyCon word32X8PrimTyCon :: TyCon word32X8PrimTyCon = pcPrimTyCon0 word32X8PrimTyConName (TyConApp vecRepDataConTyCon [vec8DataConTy, word32ElemRepDataConTy]) word64X4PrimTyConName :: Name word64X4PrimTyConName = mkPrimTc (fsLit "Word64X4#") word64X4PrimTyConKey word64X4PrimTyCon word64X4PrimTy :: Type word64X4PrimTy = mkTyConTy word64X4PrimTyCon word64X4PrimTyCon :: TyCon word64X4PrimTyCon = pcPrimTyCon0 word64X4PrimTyConName (TyConApp vecRepDataConTyCon [vec4DataConTy, word64ElemRepDataConTy]) word8X64PrimTyConName :: Name word8X64PrimTyConName = mkPrimTc (fsLit "Word8X64#") word8X64PrimTyConKey word8X64PrimTyCon word8X64PrimTy :: Type word8X64PrimTy = mkTyConTy word8X64PrimTyCon word8X64PrimTyCon :: TyCon word8X64PrimTyCon = pcPrimTyCon0 word8X64PrimTyConName (TyConApp vecRepDataConTyCon [vec64DataConTy, word8ElemRepDataConTy]) word16X32PrimTyConName :: Name word16X32PrimTyConName = mkPrimTc (fsLit "Word16X32#") word16X32PrimTyConKey word16X32PrimTyCon word16X32PrimTy :: Type word16X32PrimTy = mkTyConTy word16X32PrimTyCon word16X32PrimTyCon :: TyCon word16X32PrimTyCon = pcPrimTyCon0 word16X32PrimTyConName (TyConApp vecRepDataConTyCon [vec32DataConTy, word16ElemRepDataConTy]) word32X16PrimTyConName :: Name word32X16PrimTyConName = mkPrimTc (fsLit "Word32X16#") word32X16PrimTyConKey word32X16PrimTyCon word32X16PrimTy :: Type word32X16PrimTy = mkTyConTy word32X16PrimTyCon word32X16PrimTyCon :: TyCon word32X16PrimTyCon = pcPrimTyCon0 word32X16PrimTyConName (TyConApp vecRepDataConTyCon [vec16DataConTy, word32ElemRepDataConTy]) word64X8PrimTyConName :: Name word64X8PrimTyConName = mkPrimTc (fsLit "Word64X8#") word64X8PrimTyConKey word64X8PrimTyCon word64X8PrimTy :: Type word64X8PrimTy = mkTyConTy word64X8PrimTyCon word64X8PrimTyCon :: TyCon word64X8PrimTyCon = pcPrimTyCon0 word64X8PrimTyConName (TyConApp vecRepDataConTyCon [vec8DataConTy, word64ElemRepDataConTy]) floatX4PrimTyConName :: Name floatX4PrimTyConName = mkPrimTc (fsLit "FloatX4#") floatX4PrimTyConKey floatX4PrimTyCon floatX4PrimTy :: Type floatX4PrimTy = mkTyConTy floatX4PrimTyCon floatX4PrimTyCon :: TyCon floatX4PrimTyCon = pcPrimTyCon0 floatX4PrimTyConName (TyConApp vecRepDataConTyCon [vec4DataConTy, floatElemRepDataConTy]) doubleX2PrimTyConName :: Name doubleX2PrimTyConName = mkPrimTc (fsLit "DoubleX2#") doubleX2PrimTyConKey doubleX2PrimTyCon doubleX2PrimTy :: Type doubleX2PrimTy = mkTyConTy doubleX2PrimTyCon doubleX2PrimTyCon :: TyCon doubleX2PrimTyCon = pcPrimTyCon0 doubleX2PrimTyConName (TyConApp vecRepDataConTyCon [vec2DataConTy, doubleElemRepDataConTy]) floatX8PrimTyConName :: Name floatX8PrimTyConName = mkPrimTc (fsLit "FloatX8#") floatX8PrimTyConKey floatX8PrimTyCon floatX8PrimTy :: Type floatX8PrimTy = mkTyConTy floatX8PrimTyCon floatX8PrimTyCon :: TyCon floatX8PrimTyCon = pcPrimTyCon0 floatX8PrimTyConName (TyConApp vecRepDataConTyCon [vec8DataConTy, floatElemRepDataConTy]) doubleX4PrimTyConName :: Name doubleX4PrimTyConName = mkPrimTc (fsLit "DoubleX4#") doubleX4PrimTyConKey doubleX4PrimTyCon doubleX4PrimTy :: Type doubleX4PrimTy = mkTyConTy doubleX4PrimTyCon doubleX4PrimTyCon :: TyCon doubleX4PrimTyCon = pcPrimTyCon0 doubleX4PrimTyConName (TyConApp vecRepDataConTyCon [vec4DataConTy, doubleElemRepDataConTy]) floatX16PrimTyConName :: Name floatX16PrimTyConName = mkPrimTc (fsLit "FloatX16#") floatX16PrimTyConKey floatX16PrimTyCon floatX16PrimTy :: Type floatX16PrimTy = mkTyConTy floatX16PrimTyCon floatX16PrimTyCon :: TyCon floatX16PrimTyCon = pcPrimTyCon0 floatX16PrimTyConName (TyConApp vecRepDataConTyCon [vec16DataConTy, floatElemRepDataConTy]) doubleX8PrimTyConName :: Name doubleX8PrimTyConName = mkPrimTc (fsLit "DoubleX8#") doubleX8PrimTyConKey doubleX8PrimTyCon doubleX8PrimTy :: Type doubleX8PrimTy = mkTyConTy doubleX8PrimTyCon doubleX8PrimTyCon :: TyCon doubleX8PrimTyCon = pcPrimTyCon0 doubleX8PrimTyConName (TyConApp vecRepDataConTyCon [vec8DataConTy, doubleElemRepDataConTy]) ghc-lib-parser-9.12.2.20250421/ghc-lib/stage0/compiler/build/primop-vector-uniques.hs-incl0000644000000000000000000000446207346545000026656 0ustar0000000000000000int8X16PrimTyConKey :: Unique int8X16PrimTyConKey = mkPreludeTyConUnique 300 int16X8PrimTyConKey :: Unique int16X8PrimTyConKey = mkPreludeTyConUnique 301 int32X4PrimTyConKey :: Unique int32X4PrimTyConKey = mkPreludeTyConUnique 302 int64X2PrimTyConKey :: Unique int64X2PrimTyConKey = mkPreludeTyConUnique 303 int8X32PrimTyConKey :: Unique int8X32PrimTyConKey = mkPreludeTyConUnique 304 int16X16PrimTyConKey :: Unique int16X16PrimTyConKey = mkPreludeTyConUnique 305 int32X8PrimTyConKey :: Unique int32X8PrimTyConKey = mkPreludeTyConUnique 306 int64X4PrimTyConKey :: Unique int64X4PrimTyConKey = mkPreludeTyConUnique 307 int8X64PrimTyConKey :: Unique int8X64PrimTyConKey = mkPreludeTyConUnique 308 int16X32PrimTyConKey :: Unique int16X32PrimTyConKey = mkPreludeTyConUnique 309 int32X16PrimTyConKey :: Unique int32X16PrimTyConKey = mkPreludeTyConUnique 310 int64X8PrimTyConKey :: Unique int64X8PrimTyConKey = mkPreludeTyConUnique 311 word8X16PrimTyConKey :: Unique word8X16PrimTyConKey = mkPreludeTyConUnique 312 word16X8PrimTyConKey :: Unique word16X8PrimTyConKey = mkPreludeTyConUnique 313 word32X4PrimTyConKey :: Unique word32X4PrimTyConKey = mkPreludeTyConUnique 314 word64X2PrimTyConKey :: Unique word64X2PrimTyConKey = mkPreludeTyConUnique 315 word8X32PrimTyConKey :: Unique word8X32PrimTyConKey = mkPreludeTyConUnique 316 word16X16PrimTyConKey :: Unique word16X16PrimTyConKey = mkPreludeTyConUnique 317 word32X8PrimTyConKey :: Unique word32X8PrimTyConKey = mkPreludeTyConUnique 318 word64X4PrimTyConKey :: Unique word64X4PrimTyConKey = mkPreludeTyConUnique 319 word8X64PrimTyConKey :: Unique word8X64PrimTyConKey = mkPreludeTyConUnique 320 word16X32PrimTyConKey :: Unique word16X32PrimTyConKey = mkPreludeTyConUnique 321 word32X16PrimTyConKey :: Unique word32X16PrimTyConKey = mkPreludeTyConUnique 322 word64X8PrimTyConKey :: Unique word64X8PrimTyConKey = mkPreludeTyConUnique 323 floatX4PrimTyConKey :: Unique floatX4PrimTyConKey = mkPreludeTyConUnique 324 doubleX2PrimTyConKey :: Unique doubleX2PrimTyConKey = mkPreludeTyConUnique 325 floatX8PrimTyConKey :: Unique floatX8PrimTyConKey = mkPreludeTyConUnique 326 doubleX4PrimTyConKey :: Unique doubleX4PrimTyConKey = mkPreludeTyConUnique 327 floatX16PrimTyConKey :: Unique floatX16PrimTyConKey = mkPreludeTyConUnique 328 doubleX8PrimTyConKey :: Unique doubleX8PrimTyConKey = mkPreludeTyConUnique 329 ghc-lib-parser-9.12.2.20250421/ghc-lib/stage0/lib/0000755000000000000000000000000007346545000016731 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/ghc-lib/stage0/lib/llvm-passes0000644000000000000000000000022607346545000021122 0ustar0000000000000000[ (0, "-passes=function(require),function(mem2reg),globalopt,function(lower-expect)"), (1, "-passes=default"), (2, "-passes=default") ] ghc-lib-parser-9.12.2.20250421/ghc-lib/stage0/lib/llvm-targets0000644000000000000000000001534407346545000021304 0ustar0000000000000000[("x86_64-unknown-windows-gnu", ("e-m:w-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", "x86-64", "")) ,("arm-unknown-linux-gnueabi", ("e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64", "arm7tdmi", "+strict-align")) ,("arm-unknown-linux-gnueabihf", ("e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64", "arm1176jzf-s", "+strict-align")) ,("arm-unknown-linux-musleabihf", ("e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64", "arm1176jzf-s", "+strict-align")) ,("armv6-unknown-linux-gnueabihf", ("e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64", "arm1136jf-s", "+strict-align")) ,("armv6-unknown-linux-musleabihf", ("e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64", "arm1136jf-s", "+strict-align")) ,("armv6l-unknown-linux-gnueabihf", ("e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64", "arm1176jzf-s", "+strict-align")) ,("armv6l-unknown-linux-musleabihf", ("e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64", "arm1176jzf-s", "+strict-align")) ,("armv7-unknown-linux-gnueabihf", ("e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64", "generic", "")) ,("armv7-unknown-linux-musleabihf", ("e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64", "generic", "")) ,("armv7a-unknown-linux-gnueabi", ("e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64", "generic", "")) ,("armv7a-unknown-linux-musleabi", ("e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64", "generic", "")) ,("armv7a-unknown-linux-gnueabihf", ("e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64", "generic", "")) ,("armv7a-unknown-linux-musleabihf", ("e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64", "generic", "")) ,("armv7l-unknown-linux-gnueabi", ("e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64", "generic", "")) ,("armv7l-unknown-linux-musleabi", ("e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64", "generic", "")) ,("armv7l-unknown-linux-gnueabihf", ("e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64", "generic", "")) ,("armv7l-unknown-linux-musleabihf", ("e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64", "generic", "")) ,("aarch64-unknown-linux-gnu", ("e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128", "generic", "+neon")) ,("aarch64-unknown-linux-musl", ("e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128", "generic", "+neon")) ,("aarch64-unknown-linux", ("e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128", "generic", "+neon")) ,("aarch64_be-unknown-linux-gnu", ("E-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128", "generic", "+neon")) ,("aarch64_be-unknown-linux-musl", ("E-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128", "generic", "+neon")) ,("aarch64_be-unknown-linux", ("E-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128", "generic", "+neon")) ,("i386-unknown-linux-gnu", ("e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-f64:32:64-f80:32-n8:16:32-S128", "pentium4", "")) ,("i386-unknown-linux-musl", ("e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-f64:32:64-f80:32-n8:16:32-S128", "pentium4", "")) ,("i386-unknown-linux", ("e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-f64:32:64-f80:32-n8:16:32-S128", "pentium4", "")) ,("i686-unknown-linux-gnu", ("e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-f64:32:64-f80:32-n8:16:32-S128", "pentium4", "")) ,("i686-unknown-linux-musl", ("e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-f64:32:64-f80:32-n8:16:32-S128", "pentium4", "")) ,("i686-unknown-linux", ("e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-f64:32:64-f80:32-n8:16:32-S128", "pentium4", "")) ,("x86_64-unknown-linux-gnu", ("e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", "x86-64", "")) ,("x86_64-unknown-linux-musl", ("e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", "x86-64", "")) ,("x86_64-unknown-linux", ("e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", "x86-64", "")) ,("x86_64-unknown-linux-android", ("e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", "x86-64", "+sse4.2 +popcnt +cx16")) ,("armv7-unknown-linux-androideabi", ("e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64", "generic", "+fpregs +vfp2 +vfp2sp +vfp3 +vfp3d16 +vfp3d16sp +vfp3sp -fp16 -vfp4 -vfp4d16 -vfp4d16sp -vfp4sp -fp-armv8 -fp-armv8d16 -fp-armv8d16sp -fp-armv8sp -fullfp16 +fp64 +d32 +neon -crypto -fp16fml")) ,("aarch64-unknown-linux-android", ("e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128", "generic", "+neon")) ,("armv7a-unknown-linux-androideabi", ("e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64", "generic", "+fpregs +vfp2 +vfp2sp +vfp3 +vfp3d16 +vfp3d16sp +vfp3sp -fp16 -vfp4 -vfp4d16 -vfp4d16sp -vfp4sp -fp-armv8 -fp-armv8d16 -fp-armv8d16sp -fp-armv8sp -fullfp16 +fp64 +d32 +neon -crypto -fp16fml")) ,("powerpc64le-unknown-linux-gnu", ("e-m:e-i64:64-n32:64", "ppc64le", "")) ,("powerpc64le-unknown-linux-musl", ("e-m:e-i64:64-n32:64", "ppc64le", "+secure-plt")) ,("powerpc64le-unknown-linux", ("e-m:e-i64:64-n32:64", "ppc64le", "")) ,("s390x-ibm-linux", ("E-m:e-i1:8:16-i8:8:16-i64:64-f128:64-a:8:16-n32:64", "z10", "")) ,("riscv64-unknown-linux-gnu", ("e-m:e-p:64:64-i64:64-i128:128-n64-S128", "", "+m +a +f +d +c +relax")) ,("riscv64-unknown-linux", ("e-m:e-p:64:64-i64:64-i128:128-n64-S128", "", "+m +a +f +d +c +relax")) ,("loongarch64-unknown-linux-gnu", ("e-m:e-p:64:64-i64:64-i128:128-n64-S128", "", "+f +d")) ,("loongarch64-unknown-linux", ("e-m:e-p:64:64-i64:64-i128:128-n64-S128", "", "+f +d")) ,("x86_64-apple-darwin", ("e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", "penryn", "")) ,("arm64-apple-darwin", ("e-m:o-i64:64-i128:128-n32:64-S128", "generic", "+v8.3a +fp-armv8 +neon +crc +crypto +fullfp16 +ras +lse +rdm +rcpc +zcm +zcz +sha2 +aes")) ,("aarch64-apple-ios", ("e-m:o-i64:64-i128:128-n32:64-S128", "apple-a7", "+fp-armv8 +neon +crypto +zcm +zcz +sha2 +aes")) ,("x86_64-apple-ios", ("e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", "core2", "")) ,("amd64-portbld-freebsd", ("e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", "x86-64", "")) ,("x86_64-unknown-freebsd", ("e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", "x86-64", "")) ,("aarch64-unknown-freebsd", ("e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128", "generic", "+neon")) ,("armv6-unknown-freebsd-gnueabihf", ("e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64", "arm1176jzf-s", "+strict-align")) ,("armv7-unknown-freebsd-gnueabihf", ("e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64", "generic", "+strict-align")) ,("aarch64-unknown-netbsd", ("e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128", "generic", "+neon")) ,("x86_64-unknown-openbsd", ("e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", "x86-64", "+retpoline-indirect-calls +retpoline-indirect-branches")) ,("arm-unknown-nto-qnx-eabi", ("e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64", "arm7tdmi", "+strict-align")) ] ghc-lib-parser-9.12.2.20250421/ghc-lib/stage0/lib/settings0000644000000000000000000000427007346545000020517 0ustar0000000000000000[("C compiler command", "gcc") ,("C compiler flags", "--target=x86_64-apple-darwin -Qunused-arguments") ,("C++ compiler command", "g++") ,("C++ compiler flags", "--target=x86_64-apple-darwin") ,("C compiler link flags", "--target=x86_64-apple-darwin -Wl,-no_fixup_chains -Wl,-no_warn_duplicate_libraries") ,("C compiler supports -no-pie", "NO") ,("CPP command", "gcc") ,("CPP flags", "-E") ,("Haskell CPP command", "gcc") ,("Haskell CPP flags", "-E -undef -traditional -Wno-invalid-pp-token -Wno-unicode -Wno-trigraphs") ,("JavaScript CPP command", "gcc") ,("JavaScript CPP flags", "-E -CC -Wno-unicode -nostdinc") ,("C-- CPP command", "gcc") ,("C-- CPP flags", "-E") ,("C-- CPP supports -g0", "YES") ,("ld supports compact unwind", "YES") ,("ld supports filelist", "YES") ,("ld supports single module", "NO") ,("ld is GNU ld", "NO") ,("Merge objects command", "ld") ,("Merge objects flags", "-r") ,("Merge objects supports response files", "YES") ,("ar command", "ar") ,("ar flags", "qcls") ,("ar supports at file", "NO") ,("ar supports -L", "NO") ,("ranlib command", "ranlib") ,("otool command", "otool") ,("install_name_tool command", "install_name_tool") ,("windres command", "/bin/false") ,("unlit command", "$topdir/../bin/unlit") ,("cross compiling", "NO") ,("target platform string", "x86_64-apple-darwin") ,("target os", "OSDarwin") ,("target arch", "ArchX86_64") ,("target word size", "8") ,("target word big endian", "NO") ,("target has GNU nonexec stack", "NO") ,("target has .ident directive", "YES") ,("target has subsections via symbols", "YES") ,("target has libm", "YES") ,("Unregisterised", "NO") ,("LLVM target", "x86_64-apple-darwin") ,("LLVM llc command", "llc") ,("LLVM opt command", "opt") ,("LLVM llvm-as command", "clang-19") ,("Use inplace MinGW toolchain", "NO") ,("target RTS linker only supports shared libraries", "NO") ,("Use interpreter", "YES") ,("Support SMP", "YES") ,("RTS ways", "v thr thr_debug thr_debug_p thr_debug_p_dyn thr_debug_dyn thr_p thr_p_dyn thr_dyn debug debug_p debug_p_dyn debug_dyn p p_dyn dyn") ,("Tables next to code", "YES") ,("Leading underscore", "YES") ,("Use LibFFI", "NO") ,("RTS expects libdw", "NO") ,("Relative Global Package DB", "../../stage1/lib/package.conf.d") ] ghc-lib-parser-9.12.2.20250421/ghc-lib/stage0/libraries/ghc-boot/build/GHC/Platform/0000755000000000000000000000000007346545000025125 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/ghc-lib/stage0/libraries/ghc-boot/build/GHC/Platform/Host.hs0000644000000000000000000000040107346545000026371 0ustar0000000000000000module GHC.Platform.Host where import GHC.Platform.ArchOS hostPlatformArch :: Arch hostPlatformArch = ArchX86_64 hostPlatformOS :: OS hostPlatformOS = OSDarwin hostPlatformArchOS :: ArchOS hostPlatformArchOS = ArchOS hostPlatformArch hostPlatformOS ghc-lib-parser-9.12.2.20250421/ghc-lib/stage0/libraries/ghc-boot/build/GHC/0000755000000000000000000000000007346545000023341 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/ghc-lib/stage0/libraries/ghc-boot/build/GHC/Version.hs0000644000000000000000000000076207346545000025327 0ustar0000000000000000module GHC.Version where import Prelude -- See Note [Why do we import Prelude here?] cProjectGitCommitId :: String cProjectGitCommitId = "383be28ffdddf65b57b7b111bfc89808b4229ebc" cProjectVersion :: String cProjectVersion = "9.12.2" cProjectVersionInt :: String cProjectVersionInt = "912" cProjectPatchLevel :: String cProjectPatchLevel = "2" cProjectPatchLevel1 :: String cProjectPatchLevel1 = "2" cProjectPatchLevel2 :: String cProjectPatchLevel2 = "0" ghc-lib-parser-9.12.2.20250421/ghc-lib/stage0/rts/build/include/0000755000000000000000000000000007346545000021515 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/ghc-lib/stage0/rts/build/include/GhclibDerivedConstants.h0000644000000000000000000010312707346545000026262 0ustar0000000000000000/* This file is created automatically. Do not edit by hand.*/ // MAX_Real_Vanilla_REG 6 // MAX_Real_Float_REG 6 // MAX_Real_Double_REG 6 // MAX_Real_XMM_REG 6 // MAX_Real_Long_REG 0 // WORD_SIZE 8 // BITMAP_BITS_SHIFT 6 // TAG_BITS 3 #define HS_CONSTANTS "291,1,2,4096,252,9,0,8,16,24,32,40,48,56,64,72,80,84,88,92,96,100,104,112,120,128,136,144,152,168,184,200,216,232,248,280,312,344,376,408,440,504,568,632,696,760,824,832,840,848,856,864,872,888,904,-24,-16,-8,24,0,8,48,46,96,72,8,48,8,8,16,8,64,8,16,8,0,72,56,8,8,16,0,8,8,0,8,0,112,128,16,8,16,0,0,4,4,24,20,4,15,7,1,-16,255,0,255,7,10,6,6,1,6,6,6,6,6,0,16384,21,1024,8,4,8,8,6,3,30,1152921503533105152,0,1152921504606846976,1" #define CONTROL_GROUP_CONST_291 291 #define STD_HDR_SIZE 1 #define PROF_HDR_SIZE 2 #define STACK_DIRTY 1 #define BLOCK_SIZE 4096 #define MBLOCK_SIZE 1048576 #define BLOCKS_PER_MBLOCK 252 #define TICKY_BIN_COUNT 9 #define OFFSET_StgRegTable_rR1 0 #define OFFSET_StgRegTable_rR2 8 #define OFFSET_StgRegTable_rR3 16 #define OFFSET_StgRegTable_rR4 24 #define OFFSET_StgRegTable_rR5 32 #define OFFSET_StgRegTable_rR6 40 #define OFFSET_StgRegTable_rR7 48 #define OFFSET_StgRegTable_rR8 56 #define OFFSET_StgRegTable_rR9 64 #define OFFSET_StgRegTable_rR10 72 #define OFFSET_StgRegTable_rF1 80 #define OFFSET_StgRegTable_rF2 84 #define OFFSET_StgRegTable_rF3 88 #define OFFSET_StgRegTable_rF4 92 #define OFFSET_StgRegTable_rF5 96 #define OFFSET_StgRegTable_rF6 100 #define OFFSET_StgRegTable_rD1 104 #define OFFSET_StgRegTable_rD2 112 #define OFFSET_StgRegTable_rD3 120 #define OFFSET_StgRegTable_rD4 128 #define OFFSET_StgRegTable_rD5 136 #define OFFSET_StgRegTable_rD6 144 #define OFFSET_StgRegTable_rXMM1 152 #define OFFSET_StgRegTable_rXMM2 168 #define OFFSET_StgRegTable_rXMM3 184 #define OFFSET_StgRegTable_rXMM4 200 #define OFFSET_StgRegTable_rXMM5 216 #define OFFSET_StgRegTable_rXMM6 232 #define OFFSET_StgRegTable_rYMM1 248 #define OFFSET_StgRegTable_rYMM2 280 #define OFFSET_StgRegTable_rYMM3 312 #define OFFSET_StgRegTable_rYMM4 344 #define OFFSET_StgRegTable_rYMM5 376 #define OFFSET_StgRegTable_rYMM6 408 #define OFFSET_StgRegTable_rZMM1 440 #define OFFSET_StgRegTable_rZMM2 504 #define OFFSET_StgRegTable_rZMM3 568 #define OFFSET_StgRegTable_rZMM4 632 #define OFFSET_StgRegTable_rZMM5 696 #define OFFSET_StgRegTable_rZMM6 760 #define OFFSET_StgRegTable_rL1 824 #define OFFSET_StgRegTable_rSp 832 #define OFFSET_StgRegTable_rSpLim 840 #define OFFSET_StgRegTable_rHp 848 #define OFFSET_StgRegTable_rHpLim 856 #define OFFSET_StgRegTable_rCCCS 864 #define OFFSET_StgRegTable_rCurrentTSO 872 #define OFFSET_StgRegTable_rCurrentNursery 888 #define OFFSET_StgRegTable_rHpAlloc 904 #define OFFSET_StgRegTable_rRet 912 #define REP_StgRegTable_rRet b64 #define StgRegTable_rRet(__ptr__) REP_StgRegTable_rRet[__ptr__+OFFSET_StgRegTable_rRet] #define OFFSET_StgRegTable_rNursery 880 #define REP_StgRegTable_rNursery b64 #define StgRegTable_rNursery(__ptr__) REP_StgRegTable_rNursery[__ptr__+OFFSET_StgRegTable_rNursery] #define OFFSET_stgEagerBlackholeInfo -24 #define OFFSET_stgGCEnter1 -16 #define OFFSET_stgGCFun -8 #define OFFSET_Capability_r 24 #define OFFSET_Capability_lock 1224 #define OFFSET_Capability_no 944 #define REP_Capability_no b32 #define Capability_no(__ptr__) REP_Capability_no[__ptr__+OFFSET_Capability_no] #define OFFSET_Capability_mut_lists 1016 #define REP_Capability_mut_lists b64 #define Capability_mut_lists(__ptr__) REP_Capability_mut_lists[__ptr__+OFFSET_Capability_mut_lists] #define OFFSET_Capability_context_switch 1192 #define REP_Capability_context_switch b32 #define Capability_context_switch(__ptr__) REP_Capability_context_switch[__ptr__+OFFSET_Capability_context_switch] #define OFFSET_Capability_interrupt 1196 #define REP_Capability_interrupt b32 #define Capability_interrupt(__ptr__) REP_Capability_interrupt[__ptr__+OFFSET_Capability_interrupt] #define OFFSET_Capability_sparks 1328 #define REP_Capability_sparks b64 #define Capability_sparks(__ptr__) REP_Capability_sparks[__ptr__+OFFSET_Capability_sparks] #define OFFSET_Capability_total_allocated 1200 #define REP_Capability_total_allocated b64 #define Capability_total_allocated(__ptr__) REP_Capability_total_allocated[__ptr__+OFFSET_Capability_total_allocated] #define OFFSET_Capability_weak_ptr_list_hd 1176 #define REP_Capability_weak_ptr_list_hd b64 #define Capability_weak_ptr_list_hd(__ptr__) REP_Capability_weak_ptr_list_hd[__ptr__+OFFSET_Capability_weak_ptr_list_hd] #define OFFSET_Capability_weak_ptr_list_tl 1184 #define REP_Capability_weak_ptr_list_tl b64 #define Capability_weak_ptr_list_tl(__ptr__) REP_Capability_weak_ptr_list_tl[__ptr__+OFFSET_Capability_weak_ptr_list_tl] #define OFFSET_Capability_n_run_queue 992 #define REP_Capability_n_run_queue b32 #define Capability_n_run_queue(__ptr__) REP_Capability_n_run_queue[__ptr__+OFFSET_Capability_n_run_queue] #define OFFSET_bdescr_start 0 #define REP_bdescr_start b64 #define bdescr_start(__ptr__) REP_bdescr_start[__ptr__+OFFSET_bdescr_start] #define OFFSET_bdescr_free 8 #define REP_bdescr_free b64 #define bdescr_free(__ptr__) REP_bdescr_free[__ptr__+OFFSET_bdescr_free] #define OFFSET_bdescr_blocks 48 #define REP_bdescr_blocks b32 #define bdescr_blocks(__ptr__) REP_bdescr_blocks[__ptr__+OFFSET_bdescr_blocks] #define OFFSET_bdescr_gen_no 40 #define REP_bdescr_gen_no b16 #define bdescr_gen_no(__ptr__) REP_bdescr_gen_no[__ptr__+OFFSET_bdescr_gen_no] #define OFFSET_bdescr_link 16 #define REP_bdescr_link b64 #define bdescr_link(__ptr__) REP_bdescr_link[__ptr__+OFFSET_bdescr_link] #define OFFSET_bdescr_flags 46 #define REP_bdescr_flags b16 #define bdescr_flags(__ptr__) REP_bdescr_flags[__ptr__+OFFSET_bdescr_flags] #define SIZEOF_generation 368 #define OFFSET_generation_n_new_large_words 56 #define REP_generation_n_new_large_words b64 #define generation_n_new_large_words(__ptr__) REP_generation_n_new_large_words[__ptr__+OFFSET_generation_n_new_large_words] #define OFFSET_generation_weak_ptr_list 112 #define REP_generation_weak_ptr_list b64 #define generation_weak_ptr_list(__ptr__) REP_generation_weak_ptr_list[__ptr__+OFFSET_generation_weak_ptr_list] #define SIZEOF_CostCentreStack 96 #define OFFSET_CostCentreStack_ccsID 0 #define REP_CostCentreStack_ccsID b64 #define CostCentreStack_ccsID(__ptr__) REP_CostCentreStack_ccsID[__ptr__+OFFSET_CostCentreStack_ccsID] #define OFFSET_CostCentreStack_mem_alloc 72 #define REP_CostCentreStack_mem_alloc b64 #define CostCentreStack_mem_alloc(__ptr__) REP_CostCentreStack_mem_alloc[__ptr__+OFFSET_CostCentreStack_mem_alloc] #define OFFSET_CostCentreStack_scc_count 48 #define REP_CostCentreStack_scc_count b64 #define CostCentreStack_scc_count(__ptr__) REP_CostCentreStack_scc_count[__ptr__+OFFSET_CostCentreStack_scc_count] #define OFFSET_CostCentreStack_prevStack 16 #define REP_CostCentreStack_prevStack b64 #define CostCentreStack_prevStack(__ptr__) REP_CostCentreStack_prevStack[__ptr__+OFFSET_CostCentreStack_prevStack] #define OFFSET_CostCentre_ccID 0 #define REP_CostCentre_ccID b64 #define CostCentre_ccID(__ptr__) REP_CostCentre_ccID[__ptr__+OFFSET_CostCentre_ccID] #define OFFSET_CostCentre_link 56 #define REP_CostCentre_link b64 #define CostCentre_link(__ptr__) REP_CostCentre_link[__ptr__+OFFSET_CostCentre_link] #define OFFSET_StgHeader_info 0 #define REP_StgHeader_info b64 #define StgHeader_info(__ptr__) REP_StgHeader_info[__ptr__+OFFSET_StgHeader_info] #define OFFSET_StgHeader_ccs 8 #define REP_StgHeader_ccs b64 #define StgHeader_ccs(__ptr__) REP_StgHeader_ccs[__ptr__+OFFSET_StgHeader_ccs] #define OFFSET_StgHeader_ldvw 16 #define REP_StgHeader_ldvw b64 #define StgHeader_ldvw(__ptr__) REP_StgHeader_ldvw[__ptr__+OFFSET_StgHeader_ldvw] #define SIZEOF_StgSMPThunkHeader 8 #define OFFSET_StgClosure_payload 0 #define StgClosure_payload(__ptr__,__ix__) W_[__ptr__+SIZEOF_StgHeader+OFFSET_StgClosure_payload + WDS(__ix__)] #define OFFSET_StgEntCounter_allocs 64 #define REP_StgEntCounter_allocs b64 #define StgEntCounter_allocs(__ptr__) REP_StgEntCounter_allocs[__ptr__+OFFSET_StgEntCounter_allocs] #define OFFSET_StgEntCounter_allocd 16 #define REP_StgEntCounter_allocd b64 #define StgEntCounter_allocd(__ptr__) REP_StgEntCounter_allocd[__ptr__+OFFSET_StgEntCounter_allocd] #define OFFSET_StgEntCounter_registeredp 0 #define REP_StgEntCounter_registeredp b64 #define StgEntCounter_registeredp(__ptr__) REP_StgEntCounter_registeredp[__ptr__+OFFSET_StgEntCounter_registeredp] #define OFFSET_StgEntCounter_link 72 #define REP_StgEntCounter_link b64 #define StgEntCounter_link(__ptr__) REP_StgEntCounter_link[__ptr__+OFFSET_StgEntCounter_link] #define OFFSET_StgEntCounter_entry_count 56 #define REP_StgEntCounter_entry_count b64 #define StgEntCounter_entry_count(__ptr__) REP_StgEntCounter_entry_count[__ptr__+OFFSET_StgEntCounter_entry_count] #define SIZEOF_StgUpdateFrame_NoHdr 8 #define SIZEOF_StgUpdateFrame (SIZEOF_StgHeader+8) #define SIZEOF_StgOrigThunkInfoFrame_NoHdr 8 #define SIZEOF_StgOrigThunkInfoFrame (SIZEOF_StgHeader+8) #define SIZEOF_StgCatchFrame_NoHdr 8 #define SIZEOF_StgCatchFrame (SIZEOF_StgHeader+8) #define SIZEOF_StgStopFrame_NoHdr 0 #define SIZEOF_StgStopFrame (SIZEOF_StgHeader+0) #define SIZEOF_StgDeadThreadFrame_NoHdr 8 #define SIZEOF_StgDeadThreadFrame (SIZEOF_StgHeader+8) #define OFFSET_StgDeadThreadFrame_result 0 #define REP_StgDeadThreadFrame_result b64 #define StgDeadThreadFrame_result(__ptr__) REP_StgDeadThreadFrame_result[__ptr__+SIZEOF_StgHeader+OFFSET_StgDeadThreadFrame_result] #define SIZEOF_StgMutArrPtrs_NoHdr 16 #define SIZEOF_StgMutArrPtrs (SIZEOF_StgHeader+16) #define OFFSET_StgMutArrPtrs_ptrs 0 #define REP_StgMutArrPtrs_ptrs b64 #define StgMutArrPtrs_ptrs(__ptr__) REP_StgMutArrPtrs_ptrs[__ptr__+SIZEOF_StgHeader+OFFSET_StgMutArrPtrs_ptrs] #define OFFSET_StgMutArrPtrs_size 8 #define REP_StgMutArrPtrs_size b64 #define StgMutArrPtrs_size(__ptr__) REP_StgMutArrPtrs_size[__ptr__+SIZEOF_StgHeader+OFFSET_StgMutArrPtrs_size] #define SIZEOF_StgSmallMutArrPtrs_NoHdr 8 #define SIZEOF_StgSmallMutArrPtrs (SIZEOF_StgHeader+8) #define OFFSET_StgSmallMutArrPtrs_ptrs 0 #define REP_StgSmallMutArrPtrs_ptrs b64 #define StgSmallMutArrPtrs_ptrs(__ptr__) REP_StgSmallMutArrPtrs_ptrs[__ptr__+SIZEOF_StgHeader+OFFSET_StgSmallMutArrPtrs_ptrs] #define SIZEOF_StgArrBytes_NoHdr 8 #define SIZEOF_StgArrBytes (SIZEOF_StgHeader+8) #define OFFSET_StgArrBytes_bytes 0 #define REP_StgArrBytes_bytes b64 #define StgArrBytes_bytes(__ptr__) REP_StgArrBytes_bytes[__ptr__+SIZEOF_StgHeader+OFFSET_StgArrBytes_bytes] #define OFFSET_StgArrBytes_payload 8 #define StgArrBytes_payload(__ptr__,__ix__) W_[__ptr__+SIZEOF_StgHeader+OFFSET_StgArrBytes_payload + WDS(__ix__)] #define OFFSET_StgTSO__link 0 #define REP_StgTSO__link b64 #define StgTSO__link(__ptr__) REP_StgTSO__link[__ptr__+SIZEOF_StgHeader+OFFSET_StgTSO__link] #define OFFSET_StgTSO_global_link 8 #define REP_StgTSO_global_link b64 #define StgTSO_global_link(__ptr__) REP_StgTSO_global_link[__ptr__+SIZEOF_StgHeader+OFFSET_StgTSO_global_link] #define OFFSET_StgTSO_what_next 24 #define REP_StgTSO_what_next b16 #define StgTSO_what_next(__ptr__) REP_StgTSO_what_next[__ptr__+SIZEOF_StgHeader+OFFSET_StgTSO_what_next] #define OFFSET_StgTSO_why_blocked 32 #define REP_StgTSO_why_blocked b32 #define StgTSO_why_blocked(__ptr__) REP_StgTSO_why_blocked[__ptr__+SIZEOF_StgHeader+OFFSET_StgTSO_why_blocked] #define OFFSET_StgTSO_block_info 40 #define REP_StgTSO_block_info b64 #define StgTSO_block_info(__ptr__) REP_StgTSO_block_info[__ptr__+SIZEOF_StgHeader+OFFSET_StgTSO_block_info] #define OFFSET_StgTSO_blocked_exceptions 96 #define REP_StgTSO_blocked_exceptions b64 #define StgTSO_blocked_exceptions(__ptr__) REP_StgTSO_blocked_exceptions[__ptr__+SIZEOF_StgHeader+OFFSET_StgTSO_blocked_exceptions] #define OFFSET_StgTSO_id 48 #define REP_StgTSO_id b64 #define StgTSO_id(__ptr__) REP_StgTSO_id[__ptr__+SIZEOF_StgHeader+OFFSET_StgTSO_id] #define OFFSET_StgTSO_cap 72 #define REP_StgTSO_cap b64 #define StgTSO_cap(__ptr__) REP_StgTSO_cap[__ptr__+SIZEOF_StgHeader+OFFSET_StgTSO_cap] #define OFFSET_StgTSO_saved_errno 56 #define REP_StgTSO_saved_errno b32 #define StgTSO_saved_errno(__ptr__) REP_StgTSO_saved_errno[__ptr__+SIZEOF_StgHeader+OFFSET_StgTSO_saved_errno] #define OFFSET_StgTSO_trec 80 #define REP_StgTSO_trec b64 #define StgTSO_trec(__ptr__) REP_StgTSO_trec[__ptr__+SIZEOF_StgHeader+OFFSET_StgTSO_trec] #define OFFSET_StgTSO_flags 28 #define REP_StgTSO_flags b32 #define StgTSO_flags(__ptr__) REP_StgTSO_flags[__ptr__+SIZEOF_StgHeader+OFFSET_StgTSO_flags] #define OFFSET_StgTSO_dirty 60 #define REP_StgTSO_dirty b32 #define StgTSO_dirty(__ptr__) REP_StgTSO_dirty[__ptr__+SIZEOF_StgHeader+OFFSET_StgTSO_dirty] #define OFFSET_StgTSO_bq 104 #define REP_StgTSO_bq b64 #define StgTSO_bq(__ptr__) REP_StgTSO_bq[__ptr__+SIZEOF_StgHeader+OFFSET_StgTSO_bq] #define OFFSET_StgTSO_label 88 #define REP_StgTSO_label b64 #define StgTSO_label(__ptr__) REP_StgTSO_label[__ptr__+SIZEOF_StgHeader+OFFSET_StgTSO_label] #define OFFSET_StgTSO_bound 64 #define REP_StgTSO_bound b64 #define StgTSO_bound(__ptr__) REP_StgTSO_bound[__ptr__+SIZEOF_StgHeader+OFFSET_StgTSO_bound] #define OFFSET_StgTSO_alloc_limit 112 #define REP_StgTSO_alloc_limit b64 #define StgTSO_alloc_limit(__ptr__) REP_StgTSO_alloc_limit[__ptr__+SIZEOF_StgHeader+OFFSET_StgTSO_alloc_limit] #define OFFSET_StgTSO_cccs 128 #define REP_StgTSO_cccs b64 #define StgTSO_cccs(__ptr__) REP_StgTSO_cccs[__ptr__+SIZEOF_StgHeader+OFFSET_StgTSO_cccs] #define OFFSET_StgTSO_stackobj 16 #define REP_StgTSO_stackobj b64 #define StgTSO_stackobj(__ptr__) REP_StgTSO_stackobj[__ptr__+SIZEOF_StgHeader+OFFSET_StgTSO_stackobj] #define OFFSET_StgStack_sp 8 #define REP_StgStack_sp b64 #define StgStack_sp(__ptr__) REP_StgStack_sp[__ptr__+SIZEOF_StgHeader+OFFSET_StgStack_sp] #define OFFSET_StgStack_stack 16 #define OFFSET_StgStack_stack_size 0 #define REP_StgStack_stack_size b32 #define StgStack_stack_size(__ptr__) REP_StgStack_stack_size[__ptr__+SIZEOF_StgHeader+OFFSET_StgStack_stack_size] #define OFFSET_StgStack_dirty 4 #define REP_StgStack_dirty b8 #define StgStack_dirty(__ptr__) REP_StgStack_dirty[__ptr__+SIZEOF_StgHeader+OFFSET_StgStack_dirty] #define OFFSET_StgStack_marking 5 #define REP_StgStack_marking b8 #define StgStack_marking(__ptr__) REP_StgStack_marking[__ptr__+SIZEOF_StgHeader+OFFSET_StgStack_marking] #define SIZEOF_StgTSOProfInfo 8 #define OFFSET_StgUpdateFrame_updatee 0 #define REP_StgUpdateFrame_updatee b64 #define StgUpdateFrame_updatee(__ptr__) REP_StgUpdateFrame_updatee[__ptr__+SIZEOF_StgHeader+OFFSET_StgUpdateFrame_updatee] #define OFFSET_StgOrigThunkInfoFrame_info_ptr 0 #define REP_StgOrigThunkInfoFrame_info_ptr b64 #define StgOrigThunkInfoFrame_info_ptr(__ptr__) REP_StgOrigThunkInfoFrame_info_ptr[__ptr__+SIZEOF_StgHeader+OFFSET_StgOrigThunkInfoFrame_info_ptr] #define OFFSET_StgCatchFrame_handler 0 #define REP_StgCatchFrame_handler b64 #define StgCatchFrame_handler(__ptr__) REP_StgCatchFrame_handler[__ptr__+SIZEOF_StgHeader+OFFSET_StgCatchFrame_handler] #define SIZEOF_StgRetFun 24 #define OFFSET_StgRetFun_size 8 #define OFFSET_StgRetFun_fun 16 #define OFFSET_StgRetFun_payload 24 #define SIZEOF_StgPAP_NoHdr 16 #define SIZEOF_StgPAP (SIZEOF_StgHeader+16) #define OFFSET_StgPAP_n_args 4 #define REP_StgPAP_n_args b32 #define StgPAP_n_args(__ptr__) REP_StgPAP_n_args[__ptr__+SIZEOF_StgHeader+OFFSET_StgPAP_n_args] #define OFFSET_StgPAP_fun 8 #define REP_StgPAP_fun gcptr #define StgPAP_fun(__ptr__) REP_StgPAP_fun[__ptr__+SIZEOF_StgHeader+OFFSET_StgPAP_fun] #define OFFSET_StgPAP_arity 0 #define REP_StgPAP_arity b32 #define StgPAP_arity(__ptr__) REP_StgPAP_arity[__ptr__+SIZEOF_StgHeader+OFFSET_StgPAP_arity] #define OFFSET_StgPAP_payload 16 #define StgPAP_payload(__ptr__,__ix__) W_[__ptr__+SIZEOF_StgHeader+OFFSET_StgPAP_payload + WDS(__ix__)] #define SIZEOF_StgAP_NoThunkHdr 16 #define SIZEOF_StgAP_NoHdr 24 #define SIZEOF_StgAP (SIZEOF_StgHeader+24) #define OFFSET_StgAP_n_args 12 #define REP_StgAP_n_args b32 #define StgAP_n_args(__ptr__) REP_StgAP_n_args[__ptr__+SIZEOF_StgHeader+OFFSET_StgAP_n_args] #define OFFSET_StgAP_fun 16 #define REP_StgAP_fun gcptr #define StgAP_fun(__ptr__) REP_StgAP_fun[__ptr__+SIZEOF_StgHeader+OFFSET_StgAP_fun] #define OFFSET_StgAP_payload 24 #define StgAP_payload(__ptr__,__ix__) W_[__ptr__+SIZEOF_StgHeader+OFFSET_StgAP_payload + WDS(__ix__)] #define SIZEOF_StgAP_STACK_NoThunkHdr 16 #define SIZEOF_StgAP_STACK_NoHdr 24 #define SIZEOF_StgAP_STACK (SIZEOF_StgHeader+24) #define OFFSET_StgAP_STACK_size 8 #define REP_StgAP_STACK_size b64 #define StgAP_STACK_size(__ptr__) REP_StgAP_STACK_size[__ptr__+SIZEOF_StgHeader+OFFSET_StgAP_STACK_size] #define OFFSET_StgAP_STACK_fun 16 #define REP_StgAP_STACK_fun gcptr #define StgAP_STACK_fun(__ptr__) REP_StgAP_STACK_fun[__ptr__+SIZEOF_StgHeader+OFFSET_StgAP_STACK_fun] #define OFFSET_StgAP_STACK_payload 24 #define StgAP_STACK_payload(__ptr__,__ix__) W_[__ptr__+SIZEOF_StgHeader+OFFSET_StgAP_STACK_payload + WDS(__ix__)] #define SIZEOF_StgContinuation_NoHdr 24 #define SIZEOF_StgContinuation (SIZEOF_StgHeader+24) #define OFFSET_StgContinuation_apply_mask_frame 0 #define REP_StgContinuation_apply_mask_frame b64 #define StgContinuation_apply_mask_frame(__ptr__) REP_StgContinuation_apply_mask_frame[__ptr__+SIZEOF_StgHeader+OFFSET_StgContinuation_apply_mask_frame] #define OFFSET_StgContinuation_mask_frame_offset 8 #define REP_StgContinuation_mask_frame_offset b64 #define StgContinuation_mask_frame_offset(__ptr__) REP_StgContinuation_mask_frame_offset[__ptr__+SIZEOF_StgHeader+OFFSET_StgContinuation_mask_frame_offset] #define OFFSET_StgContinuation_stack_size 16 #define REP_StgContinuation_stack_size b64 #define StgContinuation_stack_size(__ptr__) REP_StgContinuation_stack_size[__ptr__+SIZEOF_StgHeader+OFFSET_StgContinuation_stack_size] #define OFFSET_StgContinuation_stack 24 #define StgContinuation_stack(__ptr__,__ix__) W_[__ptr__+SIZEOF_StgHeader+OFFSET_StgContinuation_stack + WDS(__ix__)] #define SIZEOF_StgSelector_NoThunkHdr 8 #define SIZEOF_StgSelector_NoHdr 16 #define SIZEOF_StgSelector (SIZEOF_StgHeader+16) #define OFFSET_StgInd_indirectee 0 #define REP_StgInd_indirectee gcptr #define StgInd_indirectee(__ptr__) REP_StgInd_indirectee[__ptr__+SIZEOF_StgHeader+OFFSET_StgInd_indirectee] #define SIZEOF_StgMutVar_NoHdr 8 #define SIZEOF_StgMutVar (SIZEOF_StgHeader+8) #define OFFSET_StgMutVar_var 0 #define REP_StgMutVar_var b64 #define StgMutVar_var(__ptr__) REP_StgMutVar_var[__ptr__+SIZEOF_StgHeader+OFFSET_StgMutVar_var] #define SIZEOF_StgAtomicallyFrame_NoHdr 16 #define SIZEOF_StgAtomicallyFrame (SIZEOF_StgHeader+16) #define OFFSET_StgAtomicallyFrame_code 0 #define REP_StgAtomicallyFrame_code b64 #define StgAtomicallyFrame_code(__ptr__) REP_StgAtomicallyFrame_code[__ptr__+SIZEOF_StgHeader+OFFSET_StgAtomicallyFrame_code] #define OFFSET_StgAtomicallyFrame_result 8 #define REP_StgAtomicallyFrame_result b64 #define StgAtomicallyFrame_result(__ptr__) REP_StgAtomicallyFrame_result[__ptr__+SIZEOF_StgHeader+OFFSET_StgAtomicallyFrame_result] #define OFFSET_StgTRecHeader_enclosing_trec 0 #define REP_StgTRecHeader_enclosing_trec b64 #define StgTRecHeader_enclosing_trec(__ptr__) REP_StgTRecHeader_enclosing_trec[__ptr__+SIZEOF_StgHeader+OFFSET_StgTRecHeader_enclosing_trec] #define SIZEOF_StgCatchSTMFrame_NoHdr 16 #define SIZEOF_StgCatchSTMFrame (SIZEOF_StgHeader+16) #define OFFSET_StgCatchSTMFrame_handler 8 #define REP_StgCatchSTMFrame_handler b64 #define StgCatchSTMFrame_handler(__ptr__) REP_StgCatchSTMFrame_handler[__ptr__+SIZEOF_StgHeader+OFFSET_StgCatchSTMFrame_handler] #define OFFSET_StgCatchSTMFrame_code 0 #define REP_StgCatchSTMFrame_code b64 #define StgCatchSTMFrame_code(__ptr__) REP_StgCatchSTMFrame_code[__ptr__+SIZEOF_StgHeader+OFFSET_StgCatchSTMFrame_code] #define SIZEOF_StgCatchRetryFrame_NoHdr 24 #define SIZEOF_StgCatchRetryFrame (SIZEOF_StgHeader+24) #define OFFSET_StgCatchRetryFrame_running_alt_code 0 #define REP_StgCatchRetryFrame_running_alt_code b64 #define StgCatchRetryFrame_running_alt_code(__ptr__) REP_StgCatchRetryFrame_running_alt_code[__ptr__+SIZEOF_StgHeader+OFFSET_StgCatchRetryFrame_running_alt_code] #define OFFSET_StgCatchRetryFrame_first_code 8 #define REP_StgCatchRetryFrame_first_code b64 #define StgCatchRetryFrame_first_code(__ptr__) REP_StgCatchRetryFrame_first_code[__ptr__+SIZEOF_StgHeader+OFFSET_StgCatchRetryFrame_first_code] #define OFFSET_StgCatchRetryFrame_alt_code 16 #define REP_StgCatchRetryFrame_alt_code b64 #define StgCatchRetryFrame_alt_code(__ptr__) REP_StgCatchRetryFrame_alt_code[__ptr__+SIZEOF_StgHeader+OFFSET_StgCatchRetryFrame_alt_code] #define OFFSET_StgTVarWatchQueue_closure 0 #define REP_StgTVarWatchQueue_closure b64 #define StgTVarWatchQueue_closure(__ptr__) REP_StgTVarWatchQueue_closure[__ptr__+SIZEOF_StgHeader+OFFSET_StgTVarWatchQueue_closure] #define OFFSET_StgTVarWatchQueue_next_queue_entry 8 #define REP_StgTVarWatchQueue_next_queue_entry b64 #define StgTVarWatchQueue_next_queue_entry(__ptr__) REP_StgTVarWatchQueue_next_queue_entry[__ptr__+SIZEOF_StgHeader+OFFSET_StgTVarWatchQueue_next_queue_entry] #define OFFSET_StgTVarWatchQueue_prev_queue_entry 16 #define REP_StgTVarWatchQueue_prev_queue_entry b64 #define StgTVarWatchQueue_prev_queue_entry(__ptr__) REP_StgTVarWatchQueue_prev_queue_entry[__ptr__+SIZEOF_StgHeader+OFFSET_StgTVarWatchQueue_prev_queue_entry] #define SIZEOF_StgTVar_NoHdr 24 #define SIZEOF_StgTVar (SIZEOF_StgHeader+24) #define OFFSET_StgTVar_current_value 0 #define REP_StgTVar_current_value b64 #define StgTVar_current_value(__ptr__) REP_StgTVar_current_value[__ptr__+SIZEOF_StgHeader+OFFSET_StgTVar_current_value] #define OFFSET_StgTVar_first_watch_queue_entry 8 #define REP_StgTVar_first_watch_queue_entry b64 #define StgTVar_first_watch_queue_entry(__ptr__) REP_StgTVar_first_watch_queue_entry[__ptr__+SIZEOF_StgHeader+OFFSET_StgTVar_first_watch_queue_entry] #define OFFSET_StgTVar_num_updates 16 #define REP_StgTVar_num_updates b64 #define StgTVar_num_updates(__ptr__) REP_StgTVar_num_updates[__ptr__+SIZEOF_StgHeader+OFFSET_StgTVar_num_updates] #define SIZEOF_StgWeak_NoHdr 40 #define SIZEOF_StgWeak (SIZEOF_StgHeader+40) #define OFFSET_StgWeak_link 32 #define REP_StgWeak_link b64 #define StgWeak_link(__ptr__) REP_StgWeak_link[__ptr__+SIZEOF_StgHeader+OFFSET_StgWeak_link] #define OFFSET_StgWeak_key 8 #define REP_StgWeak_key b64 #define StgWeak_key(__ptr__) REP_StgWeak_key[__ptr__+SIZEOF_StgHeader+OFFSET_StgWeak_key] #define OFFSET_StgWeak_value 16 #define REP_StgWeak_value b64 #define StgWeak_value(__ptr__) REP_StgWeak_value[__ptr__+SIZEOF_StgHeader+OFFSET_StgWeak_value] #define OFFSET_StgWeak_finalizer 24 #define REP_StgWeak_finalizer b64 #define StgWeak_finalizer(__ptr__) REP_StgWeak_finalizer[__ptr__+SIZEOF_StgHeader+OFFSET_StgWeak_finalizer] #define OFFSET_StgWeak_cfinalizers 0 #define REP_StgWeak_cfinalizers b64 #define StgWeak_cfinalizers(__ptr__) REP_StgWeak_cfinalizers[__ptr__+SIZEOF_StgHeader+OFFSET_StgWeak_cfinalizers] #define SIZEOF_StgCFinalizerList_NoHdr 40 #define SIZEOF_StgCFinalizerList (SIZEOF_StgHeader+40) #define OFFSET_StgCFinalizerList_link 0 #define REP_StgCFinalizerList_link b64 #define StgCFinalizerList_link(__ptr__) REP_StgCFinalizerList_link[__ptr__+SIZEOF_StgHeader+OFFSET_StgCFinalizerList_link] #define OFFSET_StgCFinalizerList_fptr 8 #define REP_StgCFinalizerList_fptr b64 #define StgCFinalizerList_fptr(__ptr__) REP_StgCFinalizerList_fptr[__ptr__+SIZEOF_StgHeader+OFFSET_StgCFinalizerList_fptr] #define OFFSET_StgCFinalizerList_ptr 16 #define REP_StgCFinalizerList_ptr b64 #define StgCFinalizerList_ptr(__ptr__) REP_StgCFinalizerList_ptr[__ptr__+SIZEOF_StgHeader+OFFSET_StgCFinalizerList_ptr] #define OFFSET_StgCFinalizerList_eptr 24 #define REP_StgCFinalizerList_eptr b64 #define StgCFinalizerList_eptr(__ptr__) REP_StgCFinalizerList_eptr[__ptr__+SIZEOF_StgHeader+OFFSET_StgCFinalizerList_eptr] #define OFFSET_StgCFinalizerList_flag 32 #define REP_StgCFinalizerList_flag b64 #define StgCFinalizerList_flag(__ptr__) REP_StgCFinalizerList_flag[__ptr__+SIZEOF_StgHeader+OFFSET_StgCFinalizerList_flag] #define SIZEOF_StgMVar_NoHdr 24 #define SIZEOF_StgMVar (SIZEOF_StgHeader+24) #define OFFSET_StgMVar_head 0 #define REP_StgMVar_head b64 #define StgMVar_head(__ptr__) REP_StgMVar_head[__ptr__+SIZEOF_StgHeader+OFFSET_StgMVar_head] #define OFFSET_StgMVar_tail 8 #define REP_StgMVar_tail b64 #define StgMVar_tail(__ptr__) REP_StgMVar_tail[__ptr__+SIZEOF_StgHeader+OFFSET_StgMVar_tail] #define OFFSET_StgMVar_value 16 #define REP_StgMVar_value b64 #define StgMVar_value(__ptr__) REP_StgMVar_value[__ptr__+SIZEOF_StgHeader+OFFSET_StgMVar_value] #define SIZEOF_StgMVarTSOQueue_NoHdr 16 #define SIZEOF_StgMVarTSOQueue (SIZEOF_StgHeader+16) #define OFFSET_StgMVarTSOQueue_link 0 #define REP_StgMVarTSOQueue_link b64 #define StgMVarTSOQueue_link(__ptr__) REP_StgMVarTSOQueue_link[__ptr__+SIZEOF_StgHeader+OFFSET_StgMVarTSOQueue_link] #define OFFSET_StgMVarTSOQueue_tso 8 #define REP_StgMVarTSOQueue_tso b64 #define StgMVarTSOQueue_tso(__ptr__) REP_StgMVarTSOQueue_tso[__ptr__+SIZEOF_StgHeader+OFFSET_StgMVarTSOQueue_tso] #define SIZEOF_StgBCO_NoHdr 32 #define SIZEOF_StgBCO (SIZEOF_StgHeader+32) #define OFFSET_StgBCO_instrs 0 #define REP_StgBCO_instrs b64 #define StgBCO_instrs(__ptr__) REP_StgBCO_instrs[__ptr__+SIZEOF_StgHeader+OFFSET_StgBCO_instrs] #define OFFSET_StgBCO_literals 8 #define REP_StgBCO_literals b64 #define StgBCO_literals(__ptr__) REP_StgBCO_literals[__ptr__+SIZEOF_StgHeader+OFFSET_StgBCO_literals] #define OFFSET_StgBCO_ptrs 16 #define REP_StgBCO_ptrs b64 #define StgBCO_ptrs(__ptr__) REP_StgBCO_ptrs[__ptr__+SIZEOF_StgHeader+OFFSET_StgBCO_ptrs] #define OFFSET_StgBCO_arity 24 #define REP_StgBCO_arity b32 #define StgBCO_arity(__ptr__) REP_StgBCO_arity[__ptr__+SIZEOF_StgHeader+OFFSET_StgBCO_arity] #define OFFSET_StgBCO_size 28 #define REP_StgBCO_size b32 #define StgBCO_size(__ptr__) REP_StgBCO_size[__ptr__+SIZEOF_StgHeader+OFFSET_StgBCO_size] #define OFFSET_StgBCO_bitmap 32 #define StgBCO_bitmap(__ptr__,__ix__) W_[__ptr__+SIZEOF_StgHeader+OFFSET_StgBCO_bitmap + WDS(__ix__)] #define SIZEOF_StgStableName_NoHdr 8 #define SIZEOF_StgStableName (SIZEOF_StgHeader+8) #define OFFSET_StgStableName_sn 0 #define REP_StgStableName_sn b64 #define StgStableName_sn(__ptr__) REP_StgStableName_sn[__ptr__+SIZEOF_StgHeader+OFFSET_StgStableName_sn] #define SIZEOF_StgBlockingQueue_NoHdr 32 #define SIZEOF_StgBlockingQueue (SIZEOF_StgHeader+32) #define OFFSET_StgBlockingQueue_bh 8 #define REP_StgBlockingQueue_bh b64 #define StgBlockingQueue_bh(__ptr__) REP_StgBlockingQueue_bh[__ptr__+SIZEOF_StgHeader+OFFSET_StgBlockingQueue_bh] #define OFFSET_StgBlockingQueue_owner 16 #define REP_StgBlockingQueue_owner b64 #define StgBlockingQueue_owner(__ptr__) REP_StgBlockingQueue_owner[__ptr__+SIZEOF_StgHeader+OFFSET_StgBlockingQueue_owner] #define OFFSET_StgBlockingQueue_queue 24 #define REP_StgBlockingQueue_queue b64 #define StgBlockingQueue_queue(__ptr__) REP_StgBlockingQueue_queue[__ptr__+SIZEOF_StgHeader+OFFSET_StgBlockingQueue_queue] #define OFFSET_StgBlockingQueue_link 0 #define REP_StgBlockingQueue_link b64 #define StgBlockingQueue_link(__ptr__) REP_StgBlockingQueue_link[__ptr__+SIZEOF_StgHeader+OFFSET_StgBlockingQueue_link] #define SIZEOF_MessageBlackHole_NoHdr 24 #define SIZEOF_MessageBlackHole (SIZEOF_StgHeader+24) #define OFFSET_MessageBlackHole_link 0 #define REP_MessageBlackHole_link b64 #define MessageBlackHole_link(__ptr__) REP_MessageBlackHole_link[__ptr__+SIZEOF_StgHeader+OFFSET_MessageBlackHole_link] #define OFFSET_MessageBlackHole_tso 8 #define REP_MessageBlackHole_tso b64 #define MessageBlackHole_tso(__ptr__) REP_MessageBlackHole_tso[__ptr__+SIZEOF_StgHeader+OFFSET_MessageBlackHole_tso] #define OFFSET_MessageBlackHole_bh 16 #define REP_MessageBlackHole_bh b64 #define MessageBlackHole_bh(__ptr__) REP_MessageBlackHole_bh[__ptr__+SIZEOF_StgHeader+OFFSET_MessageBlackHole_bh] #define SIZEOF_StgCompactNFData_NoHdr 72 #define SIZEOF_StgCompactNFData (SIZEOF_StgHeader+72) #define OFFSET_StgCompactNFData_totalW 0 #define REP_StgCompactNFData_totalW b64 #define StgCompactNFData_totalW(__ptr__) REP_StgCompactNFData_totalW[__ptr__+SIZEOF_StgHeader+OFFSET_StgCompactNFData_totalW] #define OFFSET_StgCompactNFData_autoBlockW 8 #define REP_StgCompactNFData_autoBlockW b64 #define StgCompactNFData_autoBlockW(__ptr__) REP_StgCompactNFData_autoBlockW[__ptr__+SIZEOF_StgHeader+OFFSET_StgCompactNFData_autoBlockW] #define OFFSET_StgCompactNFData_nursery 32 #define REP_StgCompactNFData_nursery b64 #define StgCompactNFData_nursery(__ptr__) REP_StgCompactNFData_nursery[__ptr__+SIZEOF_StgHeader+OFFSET_StgCompactNFData_nursery] #define OFFSET_StgCompactNFData_last 40 #define REP_StgCompactNFData_last b64 #define StgCompactNFData_last(__ptr__) REP_StgCompactNFData_last[__ptr__+SIZEOF_StgHeader+OFFSET_StgCompactNFData_last] #define OFFSET_StgCompactNFData_hp 16 #define REP_StgCompactNFData_hp b64 #define StgCompactNFData_hp(__ptr__) REP_StgCompactNFData_hp[__ptr__+SIZEOF_StgHeader+OFFSET_StgCompactNFData_hp] #define OFFSET_StgCompactNFData_hpLim 24 #define REP_StgCompactNFData_hpLim b64 #define StgCompactNFData_hpLim(__ptr__) REP_StgCompactNFData_hpLim[__ptr__+SIZEOF_StgHeader+OFFSET_StgCompactNFData_hpLim] #define OFFSET_StgCompactNFData_hash 48 #define REP_StgCompactNFData_hash b64 #define StgCompactNFData_hash(__ptr__) REP_StgCompactNFData_hash[__ptr__+SIZEOF_StgHeader+OFFSET_StgCompactNFData_hash] #define OFFSET_StgCompactNFData_result 56 #define REP_StgCompactNFData_result b64 #define StgCompactNFData_result(__ptr__) REP_StgCompactNFData_result[__ptr__+SIZEOF_StgHeader+OFFSET_StgCompactNFData_result] #define SIZEOF_StgCompactNFDataBlock 24 #define OFFSET_StgCompactNFDataBlock_self 0 #define REP_StgCompactNFDataBlock_self b64 #define StgCompactNFDataBlock_self(__ptr__) REP_StgCompactNFDataBlock_self[__ptr__+OFFSET_StgCompactNFDataBlock_self] #define OFFSET_StgCompactNFDataBlock_owner 8 #define REP_StgCompactNFDataBlock_owner b64 #define StgCompactNFDataBlock_owner(__ptr__) REP_StgCompactNFDataBlock_owner[__ptr__+OFFSET_StgCompactNFDataBlock_owner] #define OFFSET_StgCompactNFDataBlock_next 16 #define REP_StgCompactNFDataBlock_next b64 #define StgCompactNFDataBlock_next(__ptr__) REP_StgCompactNFDataBlock_next[__ptr__+OFFSET_StgCompactNFDataBlock_next] #define OFFSET_RtsFlags_ProfFlags_doHeapProfile 288 #define REP_RtsFlags_ProfFlags_doHeapProfile b32 #define RtsFlags_ProfFlags_doHeapProfile(__ptr__) REP_RtsFlags_ProfFlags_doHeapProfile[__ptr__+OFFSET_RtsFlags_ProfFlags_doHeapProfile] #define OFFSET_RtsFlags_ProfFlags_showCCSOnException 311 #define REP_RtsFlags_ProfFlags_showCCSOnException b8 #define RtsFlags_ProfFlags_showCCSOnException(__ptr__) REP_RtsFlags_ProfFlags_showCCSOnException[__ptr__+OFFSET_RtsFlags_ProfFlags_showCCSOnException] #define OFFSET_RtsFlags_DebugFlags_apply 253 #define REP_RtsFlags_DebugFlags_apply b8 #define RtsFlags_DebugFlags_apply(__ptr__) REP_RtsFlags_DebugFlags_apply[__ptr__+OFFSET_RtsFlags_DebugFlags_apply] #define OFFSET_RtsFlags_DebugFlags_sanity 247 #define REP_RtsFlags_DebugFlags_sanity b8 #define RtsFlags_DebugFlags_sanity(__ptr__) REP_RtsFlags_DebugFlags_sanity[__ptr__+OFFSET_RtsFlags_DebugFlags_sanity] #define OFFSET_RtsFlags_DebugFlags_weak 242 #define REP_RtsFlags_DebugFlags_weak b8 #define RtsFlags_DebugFlags_weak(__ptr__) REP_RtsFlags_DebugFlags_weak[__ptr__+OFFSET_RtsFlags_DebugFlags_weak] #define OFFSET_RtsFlags_GcFlags_initialStkSize 16 #define REP_RtsFlags_GcFlags_initialStkSize b32 #define RtsFlags_GcFlags_initialStkSize(__ptr__) REP_RtsFlags_GcFlags_initialStkSize[__ptr__+OFFSET_RtsFlags_GcFlags_initialStkSize] #define OFFSET_RtsFlags_MiscFlags_tickInterval 208 #define REP_RtsFlags_MiscFlags_tickInterval b64 #define RtsFlags_MiscFlags_tickInterval(__ptr__) REP_RtsFlags_MiscFlags_tickInterval[__ptr__+OFFSET_RtsFlags_MiscFlags_tickInterval] #define SIZEOF_StgFunInfoExtraFwd 32 #define OFFSET_StgFunInfoExtraFwd_slow_apply 24 #define REP_StgFunInfoExtraFwd_slow_apply b64 #define StgFunInfoExtraFwd_slow_apply(__ptr__) REP_StgFunInfoExtraFwd_slow_apply[__ptr__+OFFSET_StgFunInfoExtraFwd_slow_apply] #define OFFSET_StgFunInfoExtraFwd_fun_type 0 #define REP_StgFunInfoExtraFwd_fun_type b32 #define StgFunInfoExtraFwd_fun_type(__ptr__) REP_StgFunInfoExtraFwd_fun_type[__ptr__+OFFSET_StgFunInfoExtraFwd_fun_type] #define OFFSET_StgFunInfoExtraFwd_arity 4 #define REP_StgFunInfoExtraFwd_arity b32 #define StgFunInfoExtraFwd_arity(__ptr__) REP_StgFunInfoExtraFwd_arity[__ptr__+OFFSET_StgFunInfoExtraFwd_arity] #define OFFSET_StgFunInfoExtraFwd_bitmap 16 #define REP_StgFunInfoExtraFwd_bitmap b64 #define StgFunInfoExtraFwd_bitmap(__ptr__) REP_StgFunInfoExtraFwd_bitmap[__ptr__+OFFSET_StgFunInfoExtraFwd_bitmap] #define SIZEOF_StgFunInfoExtraRev 24 #define OFFSET_StgFunInfoExtraRev_slow_apply_offset 0 #define REP_StgFunInfoExtraRev_slow_apply_offset b32 #define StgFunInfoExtraRev_slow_apply_offset(__ptr__) REP_StgFunInfoExtraRev_slow_apply_offset[__ptr__+OFFSET_StgFunInfoExtraRev_slow_apply_offset] #define OFFSET_StgFunInfoExtraRev_fun_type 16 #define REP_StgFunInfoExtraRev_fun_type b32 #define StgFunInfoExtraRev_fun_type(__ptr__) REP_StgFunInfoExtraRev_fun_type[__ptr__+OFFSET_StgFunInfoExtraRev_fun_type] #define OFFSET_StgFunInfoExtraRev_arity 20 #define REP_StgFunInfoExtraRev_arity b32 #define StgFunInfoExtraRev_arity(__ptr__) REP_StgFunInfoExtraRev_arity[__ptr__+OFFSET_StgFunInfoExtraRev_arity] #define OFFSET_StgFunInfoExtraRev_bitmap 8 #define REP_StgFunInfoExtraRev_bitmap b64 #define StgFunInfoExtraRev_bitmap(__ptr__) REP_StgFunInfoExtraRev_bitmap[__ptr__+OFFSET_StgFunInfoExtraRev_bitmap] #define OFFSET_StgFunInfoExtraRev_bitmap_offset 8 #define REP_StgFunInfoExtraRev_bitmap_offset b32 #define StgFunInfoExtraRev_bitmap_offset(__ptr__) REP_StgFunInfoExtraRev_bitmap_offset[__ptr__+OFFSET_StgFunInfoExtraRev_bitmap_offset] #define OFFSET_StgLargeBitmap_size 0 #define REP_StgLargeBitmap_size b64 #define StgLargeBitmap_size(__ptr__) REP_StgLargeBitmap_size[__ptr__+OFFSET_StgLargeBitmap_size] #define OFFSET_StgLargeBitmap_bitmap 8 #define SIZEOF_snEntry 24 #define OFFSET_snEntry_sn_obj 16 #define REP_snEntry_sn_obj b64 #define snEntry_sn_obj(__ptr__) REP_snEntry_sn_obj[__ptr__+OFFSET_snEntry_sn_obj] #define OFFSET_snEntry_addr 0 #define REP_snEntry_addr b64 #define snEntry_addr(__ptr__) REP_snEntry_addr[__ptr__+OFFSET_snEntry_addr] #define SIZEOF_spEntry 8 #define OFFSET_spEntry_addr 0 #define REP_spEntry_addr b64 #define spEntry_addr(__ptr__) REP_spEntry_addr[__ptr__+OFFSET_spEntry_addr] ghc-lib-parser-9.12.2.20250421/ghc-lib/stage0/rts/build/include/ghcautoconf.h0000644000000000000000000004566607346545000024207 0ustar0000000000000000#if !defined(__GHCAUTOCONF_H__) #define __GHCAUTOCONF_H__ /* ghcautoconf.h.autoconf. Generated from ghcautoconf.h.autoconf.in by configure. */ /* ghcautoconf.h.autoconf.in. Generated from configure.ac by autoheader. */ /* Define if building universal (internal helper macro) */ /* #undef AC_APPLE_UNIVERSAL_BUILD */ /* The alignment of a `char'. */ #define ALIGNMENT_CHAR 1 /* The alignment of a `double'. */ #define ALIGNMENT_DOUBLE 8 /* The alignment of a `float'. */ #define ALIGNMENT_FLOAT 4 /* The alignment of a `int'. */ #define ALIGNMENT_INT 4 /* The alignment of a `int16_t'. */ #define ALIGNMENT_INT16_T 2 /* The alignment of a `int32_t'. */ #define ALIGNMENT_INT32_T 4 /* The alignment of a `int64_t'. */ #define ALIGNMENT_INT64_T 8 /* The alignment of a `int8_t'. */ #define ALIGNMENT_INT8_T 1 /* The alignment of a `long'. */ #define ALIGNMENT_LONG 8 /* The alignment of a `long long'. */ #define ALIGNMENT_LONG_LONG 8 /* The alignment of a `short'. */ #define ALIGNMENT_SHORT 2 /* The alignment of a `uint16_t'. */ #define ALIGNMENT_UINT16_T 2 /* The alignment of a `uint32_t'. */ #define ALIGNMENT_UINT32_T 4 /* The alignment of a `uint64_t'. */ #define ALIGNMENT_UINT64_T 8 /* The alignment of a `uint8_t'. */ #define ALIGNMENT_UINT8_T 1 /* The alignment of a `unsigned char'. */ #define ALIGNMENT_UNSIGNED_CHAR 1 /* The alignment of a `unsigned int'. */ #define ALIGNMENT_UNSIGNED_INT 4 /* The alignment of a `unsigned long'. */ #define ALIGNMENT_UNSIGNED_LONG 8 /* The alignment of a `unsigned long long'. */ #define ALIGNMENT_UNSIGNED_LONG_LONG 8 /* The alignment of a `unsigned short'. */ #define ALIGNMENT_UNSIGNED_SHORT 2 /* The alignment of a `void *'. */ #define ALIGNMENT_VOID_P 8 /* Define (to 1) if C compiler has an LLVM back end */ #define CC_LLVM_BACKEND 1 /* Define to 1 if __thread is supported */ #define CC_SUPPORTS_TLS 1 /* Define to 1 if using 'alloca.c'. */ /* #undef C_ALLOCA */ /* Define to 1 if your processor stores words of floats with the most significant byte first */ /* #undef FLOAT_WORDS_BIGENDIAN */ /* Has musttail */ #define HAS_MUSTTAIL 1 /* Has visibility hidden */ #define HAS_VISIBILITY_HIDDEN 1 /* Define to 1 if you have 'alloca', as a function or macro. */ #define HAVE_ALLOCA 1 /* Define to 1 if works. */ #define HAVE_ALLOCA_H 1 /* Does the toolchain use ARMv8 outline atomics */ /* #undef HAVE_ARM_OUTLINE_ATOMICS */ /* Define to 1 if you have the header file. */ /* #undef HAVE_BFD_H */ /* Define to 1 if you have the 'clock_gettime' function. */ #define HAVE_CLOCK_GETTIME 1 /* Define to 1 if you have the 'ctime_r' function. */ #define HAVE_CTIME_R 1 /* Define to 1 if you have the header file. */ #define HAVE_CTYPE_H 1 /* Define to 1 if you have the declaration of 'ctime_r', and to 0 if you don't. */ #define HAVE_DECL_CTIME_R 1 /* Define to 1 if you have the declaration of 'environ', and to 0 if you don't. */ #define HAVE_DECL_ENVIRON 0 /* Define to 1 if you have the declaration of 'MADV_DONTNEED', and to 0 if you don't. */ #define HAVE_DECL_MADV_DONTNEED 1 /* Define to 1 if you have the declaration of 'MADV_FREE', and to 0 if you don't. */ #define HAVE_DECL_MADV_FREE 1 /* Define to 1 if you have the declaration of 'MAP_NORESERVE', and to 0 if you don't. */ #define HAVE_DECL_MAP_NORESERVE 1 /* Define to 1 if you have the declaration of 'program_invocation_short_name', and to 0 if you don't. */ #define HAVE_DECL_PROGRAM_INVOCATION_SHORT_NAME 0 /* Define to 1 if you have the header file. */ #define HAVE_DIRENT_H 1 /* Define to 1 if you have the header file. */ #define HAVE_DLFCN_H 1 /* Define to 1 if you have the 'dlinfo' function. */ /* #undef HAVE_DLINFO */ /* Define to 1 if you have the header file. */ #define HAVE_ERRNO_H 1 /* Define to 1 if you have the 'eventfd' function. */ /* #undef HAVE_EVENTFD */ /* Define to 1 if you have the header file. */ #define HAVE_FCNTL_H 1 /* Define to 1 if you have the 'fork' function. */ #define HAVE_FORK 1 /* Define to 1 if you have the 'getclock' function. */ /* #undef HAVE_GETCLOCK */ /* Define to 1 if you have the `GetModuleFileName' function. */ /* #undef HAVE_GETMODULEFILENAME */ /* Define to 1 if you have the 'getpid' function. */ #define HAVE_GETPID 1 /* Define to 1 if you have the 'getrusage' function. */ #define HAVE_GETRUSAGE 1 /* Define to 1 if you have the 'gettimeofday' function. */ #define HAVE_GETTIMEOFDAY 1 /* Define to 1 if you have the 'getuid' function. */ #define HAVE_GETUID 1 /* Define to 1 if you have the header file. */ #define HAVE_GRP_H 1 /* Define to 1 if you have the header file. */ #define HAVE_INTTYPES_H 1 /* Define to 1 if you have the 'bfd' library (-lbfd). */ /* #undef HAVE_LIBBFD */ /* Define to 1 if you have the 'dl' library (-ldl). */ #define HAVE_LIBDL 1 /* Define to 1 if you have the 'iberty' library (-liberty). */ /* #undef HAVE_LIBIBERTY */ /* Define to 1 if you need to link with libm */ #define HAVE_LIBM 1 /* Define to 1 if you have libnuma */ #define HAVE_LIBNUMA 0 /* Define to 1 if you have the 'pthread' library (-lpthread). */ #define HAVE_LIBPTHREAD 1 /* Define to 1 if you wish to compress IPE data in compiler results (requires libzstd) */ #define HAVE_LIBZSTD 0 /* Define to 1 if you have the header file. */ #define HAVE_LIMITS_H 1 /* Define to 1 if you have the header file. */ #define HAVE_LOCALE_H 1 /* Define to 1 if the system has the type 'long long'. */ #define HAVE_LONG_LONG 1 /* Define to 1 if you have the header file. */ /* #undef HAVE_MINIX_CONFIG_H */ /* Define to 1 if you have the header file. */ #define HAVE_NLIST_H 1 /* Define to 1 if you have the header file. */ /* #undef HAVE_NUMAIF_H */ /* Define to 1 if you have the header file. */ /* #undef HAVE_NUMA_H */ /* Define to 1 if we have printf$LDBLStub (Apple Mac OS >= 10.4, PPC). */ #define HAVE_PRINTF_LDBLSTUB 0 /* Define to 1 if you have the 'pthread_condattr_setclock' function. */ /* #undef HAVE_PTHREAD_CONDATTR_SETCLOCK */ /* Define to 1 if you have the header file. */ #define HAVE_PTHREAD_H 1 /* Define to 1 if you have the header file. */ /* #undef HAVE_PTHREAD_NP_H */ /* Define to 1 if you have the glibc version of pthread_setname_np */ /* #undef HAVE_PTHREAD_SETNAME_NP */ /* Define to 1 if you have the Darwin version of pthread_setname_np */ #define HAVE_PTHREAD_SETNAME_NP_DARWIN 1 /* Define to 1 if you have the NetBSD version of pthread_setname_np */ /* #undef HAVE_PTHREAD_SETNAME_NP_NETBSD */ /* Define to 1 if you have pthread_set_name_np */ /* #undef HAVE_PTHREAD_SET_NAME_NP */ /* Define to 1 if you have the header file. */ #define HAVE_PWD_H 1 /* Define to 1 if you have the 'raise' function. */ #define HAVE_RAISE 1 /* Define to 1 if you have the 'sched_getaffinity' function. */ /* #undef HAVE_SCHED_GETAFFINITY */ /* Define to 1 if you have the header file. */ #define HAVE_SCHED_H 1 /* Define to 1 if you have the 'sched_setaffinity' function. */ /* #undef HAVE_SCHED_SETAFFINITY */ /* Define to 1 if you have the 'setitimer' function. */ #define HAVE_SETITIMER 1 /* Define to 1 if you have the 'setlocale' function. */ #define HAVE_SETLOCALE 1 /* Define to 1 if you have the 'siginterrupt' function. */ #define HAVE_SIGINTERRUPT 1 /* Define to 1 if you have the header file. */ #define HAVE_SIGNAL_H 1 /* Define to 1 if you have the header file. */ #define HAVE_STDINT_H 1 /* Define to 1 if you have the header file. */ #define HAVE_STDIO_H 1 /* Define to 1 if you have the header file. */ #define HAVE_STDLIB_H 1 /* Define to 1 if you have the header file. */ #define HAVE_STRINGS_H 1 /* Define to 1 if you have the header file. */ #define HAVE_STRING_H 1 /* Define to 1 if Apple-style dead-stripping is supported. */ #define HAVE_SUBSECTIONS_VIA_SYMBOLS 1 /* Define to 1 if you have the 'sysconf' function. */ #define HAVE_SYSCONF 1 /* Define to 1 if you have the header file. */ /* #undef HAVE_SYS_CPUSET_H */ /* Define to 1 if you have the header file. */ /* #undef HAVE_SYS_EVENTFD_H */ /* Define to 1 if you have the header file. */ #define HAVE_SYS_MMAN_H 1 /* Define to 1 if you have the header file. */ #define HAVE_SYS_PARAM_H 1 /* Define to 1 if you have the header file. */ #define HAVE_SYS_RESOURCE_H 1 /* Define to 1 if you have the header file. */ #define HAVE_SYS_SELECT_H 1 /* Define to 1 if you have the header file. */ #define HAVE_SYS_STAT_H 1 /* Define to 1 if you have the header file. */ #define HAVE_SYS_TIMEB_H 1 /* Define to 1 if you have the header file. */ /* #undef HAVE_SYS_TIMERFD_H */ /* Define to 1 if you have the header file. */ /* #undef HAVE_SYS_TIMERS_H */ /* Define to 1 if you have the header file. */ #define HAVE_SYS_TIMES_H 1 /* Define to 1 if you have the header file. */ #define HAVE_SYS_TIME_H 1 /* Define to 1 if you have the header file. */ #define HAVE_SYS_TYPES_H 1 /* Define to 1 if you have the header file. */ #define HAVE_SYS_UTSNAME_H 1 /* Define to 1 if you have the header file. */ #define HAVE_SYS_WAIT_H 1 /* Define to 1 if you have the header file. */ #define HAVE_TERMIOS_H 1 /* Define to 1 if you have the 'timer_settime' function. */ /* #undef HAVE_TIMER_SETTIME */ /* Define to 1 if you have the 'times' function. */ #define HAVE_TIMES 1 /* Define to 1 if you have the header file. */ #define HAVE_UNISTD_H 1 /* Define to 1 if you have the 'uselocale' function. */ #define HAVE_USELOCALE 1 /* Define to 1 if you have the header file. */ #define HAVE_UTIME_H 1 /* Define to 1 if you have the 'vfork' function. */ #define HAVE_VFORK 1 /* Define to 1 if you have the header file. */ /* #undef HAVE_VFORK_H */ /* Define to 1 if you have the header file. */ #define HAVE_WCHAR_H 1 /* Define to 1 if you have the header file. */ /* #undef HAVE_WINDOWS_H */ /* Define to 1 if you have the `WinExec' function. */ /* #undef HAVE_WINEXEC */ /* Define to 1 if you have the header file. */ /* #undef HAVE_WINSOCK_H */ /* Define to 1 if 'fork' works. */ #define HAVE_WORKING_FORK 1 /* Define to 1 if 'vfork' works. */ #define HAVE_WORKING_VFORK 1 /* Define to 1 if the mio I/O manager should be built */ #define IOMGR_BUILD_MIO 1 /* Define to 1 if the select I/O manager should be built */ #define IOMGR_BUILD_SELECT 1 /* Define to 1 if the win32-legacy I/O manager should be built */ /* #undef IOMGR_BUILD_WIN32_LEGACY */ /* Define to 1 if the winio I/O manager should be built */ /* #undef IOMGR_BUILD_WINIO */ /* Define to 1 if the select I/O manager is the default for the non-threaded RTS */ #define IOMGR_DEFAULT_NON_THREADED_SELECT 1 /* Define to 1 if the win32-legacy I/O manager is the default for the non-threaded RTS */ /* #undef IOMGR_DEFAULT_NON_THREADED_WIN32_LEGACY */ /* Define to 1 if the winio I/O manager is the default for the non-threaded RTS */ /* #undef IOMGR_DEFAULT_NON_THREADED_WINIO */ /* Define to 1 if the mio I/O manager is the default for the threaded RTS */ #define IOMGR_DEFAULT_THREADED_MIO 1 /* Define to 1 if the winio I/O manager is the default for the threaded RTS */ /* #undef IOMGR_DEFAULT_THREADED_WINIO */ /* Define to 1 if C symbols have a leading underscore added by the compiler. */ #define LEADING_UNDERSCORE 1 /* Define to 1 if we need -latomic for sub-word atomic operations. */ #define NEED_ATOMIC_LIB 0 /* Define to the address where bug reports for this package should be sent. */ /* #undef PACKAGE_BUGREPORT */ /* Define to the full name of this package. */ /* #undef PACKAGE_NAME */ /* Define to the full name and version of this package. */ /* #undef PACKAGE_STRING */ /* Define to the one symbol short name of this package. */ /* #undef PACKAGE_TARNAME */ /* Define to the home page for this package. */ /* #undef PACKAGE_URL */ /* Define to the version of this package. */ /* #undef PACKAGE_VERSION */ /* Use mmap in the runtime linker */ #define RTS_LINKER_USE_MMAP 1 /* The size of 'char', as computed by sizeof. */ #define SIZEOF_CHAR 1 /* The size of 'double', as computed by sizeof. */ #define SIZEOF_DOUBLE 8 /* The size of 'float', as computed by sizeof. */ #define SIZEOF_FLOAT 4 /* The size of 'int', as computed by sizeof. */ #define SIZEOF_INT 4 /* The size of 'int16_t', as computed by sizeof. */ #define SIZEOF_INT16_T 2 /* The size of 'int32_t', as computed by sizeof. */ #define SIZEOF_INT32_T 4 /* The size of 'int64_t', as computed by sizeof. */ #define SIZEOF_INT64_T 8 /* The size of 'int8_t', as computed by sizeof. */ #define SIZEOF_INT8_T 1 /* The size of 'long', as computed by sizeof. */ #define SIZEOF_LONG 8 /* The size of 'long long', as computed by sizeof. */ #define SIZEOF_LONG_LONG 8 /* The size of 'short', as computed by sizeof. */ #define SIZEOF_SHORT 2 /* The size of 'uint16_t', as computed by sizeof. */ #define SIZEOF_UINT16_T 2 /* The size of 'uint32_t', as computed by sizeof. */ #define SIZEOF_UINT32_T 4 /* The size of 'uint64_t', as computed by sizeof. */ #define SIZEOF_UINT64_T 8 /* The size of 'uint8_t', as computed by sizeof. */ #define SIZEOF_UINT8_T 1 /* The size of 'unsigned char', as computed by sizeof. */ #define SIZEOF_UNSIGNED_CHAR 1 /* The size of 'unsigned int', as computed by sizeof. */ #define SIZEOF_UNSIGNED_INT 4 /* The size of 'unsigned long', as computed by sizeof. */ #define SIZEOF_UNSIGNED_LONG 8 /* The size of 'unsigned long long', as computed by sizeof. */ #define SIZEOF_UNSIGNED_LONG_LONG 8 /* The size of 'unsigned short', as computed by sizeof. */ #define SIZEOF_UNSIGNED_SHORT 2 /* The size of 'void *', as computed by sizeof. */ #define SIZEOF_VOID_P 8 /* If using the C implementation of alloca, define if you know the direction of stack growth for your system; otherwise it will be automatically deduced at runtime. STACK_DIRECTION > 0 => grows toward higher addresses STACK_DIRECTION < 0 => grows toward lower addresses STACK_DIRECTION = 0 => direction of growth unknown */ /* #undef STACK_DIRECTION */ /* Define to 1 if you wish to statically link the libzstd compression library in the compiler (requires libzstd) */ #define STATIC_LIBZSTD 0 /* Define to 1 if all of the C89 standard headers exist (not just the ones required in a freestanding environment). This macro is provided for backward compatibility; new code need not use it. */ #define STDC_HEADERS 1 /* Define to 1 if info tables are laid out next to code */ #define TABLES_NEXT_TO_CODE 1 /* Compile-in ASSERTs in all ways. */ /* #undef USE_ASSERTS_ALL_WAYS */ /* Enable single heap address space support */ #define USE_LARGE_ADDRESS_SPACE 1 /* Set to 1 to use libdw */ #define USE_LIBDW 0 /* Enable extensions on AIX, Interix, z/OS. */ #ifndef _ALL_SOURCE # define _ALL_SOURCE 1 #endif /* Enable general extensions on macOS. */ #ifndef _DARWIN_C_SOURCE # define _DARWIN_C_SOURCE 1 #endif /* Enable general extensions on Solaris. */ #ifndef __EXTENSIONS__ # define __EXTENSIONS__ 1 #endif /* Enable GNU extensions on systems that have them. */ #ifndef _GNU_SOURCE # define _GNU_SOURCE 1 #endif /* Enable X/Open compliant socket functions that do not require linking with -lxnet on HP-UX 11.11. */ #ifndef _HPUX_ALT_XOPEN_SOCKET_API # define _HPUX_ALT_XOPEN_SOCKET_API 1 #endif /* Identify the host operating system as Minix. This macro does not affect the system headers' behavior. A future release of Autoconf may stop defining this macro. */ #ifndef _MINIX /* # undef _MINIX */ #endif /* Enable general extensions on NetBSD. Enable NetBSD compatibility extensions on Minix. */ #ifndef _NETBSD_SOURCE # define _NETBSD_SOURCE 1 #endif /* Enable OpenBSD compatibility extensions on NetBSD. Oddly enough, this does nothing on OpenBSD. */ #ifndef _OPENBSD_SOURCE # define _OPENBSD_SOURCE 1 #endif /* Define to 1 if needed for POSIX-compatible behavior. */ #ifndef _POSIX_SOURCE /* # undef _POSIX_SOURCE */ #endif /* Define to 2 if needed for POSIX-compatible behavior. */ #ifndef _POSIX_1_SOURCE /* # undef _POSIX_1_SOURCE */ #endif /* Enable POSIX-compatible threading on Solaris. */ #ifndef _POSIX_PTHREAD_SEMANTICS # define _POSIX_PTHREAD_SEMANTICS 1 #endif /* Enable extensions specified by ISO/IEC TS 18661-5:2014. */ #ifndef __STDC_WANT_IEC_60559_ATTRIBS_EXT__ # define __STDC_WANT_IEC_60559_ATTRIBS_EXT__ 1 #endif /* Enable extensions specified by ISO/IEC TS 18661-1:2014. */ #ifndef __STDC_WANT_IEC_60559_BFP_EXT__ # define __STDC_WANT_IEC_60559_BFP_EXT__ 1 #endif /* Enable extensions specified by ISO/IEC TS 18661-2:2015. */ #ifndef __STDC_WANT_IEC_60559_DFP_EXT__ # define __STDC_WANT_IEC_60559_DFP_EXT__ 1 #endif /* Enable extensions specified by C23 Annex F. */ #ifndef __STDC_WANT_IEC_60559_EXT__ # define __STDC_WANT_IEC_60559_EXT__ 1 #endif /* Enable extensions specified by ISO/IEC TS 18661-4:2015. */ #ifndef __STDC_WANT_IEC_60559_FUNCS_EXT__ # define __STDC_WANT_IEC_60559_FUNCS_EXT__ 1 #endif /* Enable extensions specified by C23 Annex H and ISO/IEC TS 18661-3:2015. */ #ifndef __STDC_WANT_IEC_60559_TYPES_EXT__ # define __STDC_WANT_IEC_60559_TYPES_EXT__ 1 #endif /* Enable extensions specified by ISO/IEC TR 24731-2:2010. */ #ifndef __STDC_WANT_LIB_EXT2__ # define __STDC_WANT_LIB_EXT2__ 1 #endif /* Enable extensions specified by ISO/IEC 24747:2009. */ #ifndef __STDC_WANT_MATH_SPEC_FUNCS__ # define __STDC_WANT_MATH_SPEC_FUNCS__ 1 #endif /* Enable extensions on HP NonStop. */ #ifndef _TANDEM_SOURCE # define _TANDEM_SOURCE 1 #endif /* Enable X/Open extensions. Define to 500 only if necessary to make mbstate_t available. */ #ifndef _XOPEN_SOURCE /* # undef _XOPEN_SOURCE */ #endif /* Define to 1 if we can use timer_create(CLOCK_REALTIME,...) */ /* #undef USE_TIMER_CREATE */ /* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most significant byte first (like Motorola and SPARC, unlike Intel). */ #if defined AC_APPLE_UNIVERSAL_BUILD # if defined __BIG_ENDIAN__ # define WORDS_BIGENDIAN 1 # endif #else # ifndef WORDS_BIGENDIAN /* # undef WORDS_BIGENDIAN */ # endif #endif /* Number of bits in a file offset, on hosts where this is settable. */ /* #undef _FILE_OFFSET_BITS */ /* Define to 1 on platforms where this makes off_t a 64-bit type. */ /* #undef _LARGE_FILES */ /* Number of bits in time_t, on hosts where this is settable. */ /* #undef _TIME_BITS */ /* Define to 1 on platforms where this makes time_t a 64-bit type. */ /* #undef __MINGW_USE_VC2005_COMPAT */ /* ARM pre v6 */ /* #undef arm_HOST_ARCH_PRE_ARMv6 */ /* ARM pre v7 */ /* #undef arm_HOST_ARCH_PRE_ARMv7 */ /* Define to empty if 'const' does not conform to ANSI C. */ /* #undef const */ /* Define as a signed integer type capable of holding a process identifier. */ /* #undef pid_t */ /* Define as 'unsigned int' if doesn't define. */ /* #undef size_t */ /* Define as 'fork' if 'vfork' does not work. */ /* #undef vfork */ #endif /* __GHCAUTOCONF_H__ */ ghc-lib-parser-9.12.2.20250421/ghc-lib/stage0/rts/build/include/ghcplatform.h0000644000000000000000000000113207346545000024171 0ustar0000000000000000#if !defined(__GHCPLATFORM_H__) #define __GHCPLATFORM_H__ #define BuildPlatform_TYPE x86_64_apple_darwin #define HostPlatform_TYPE x86_64_apple_darwin #define x86_64_apple_darwin_BUILD 1 #define x86_64_apple_darwin_HOST 1 #define x86_64_BUILD_ARCH 1 #define x86_64_HOST_ARCH 1 #define BUILD_ARCH "x86_64" #define HOST_ARCH "x86_64" #define darwin_BUILD_OS 1 #define darwin_HOST_OS 1 #define BUILD_OS "darwin" #define HOST_OS "darwin" #define apple_BUILD_VENDOR 1 #define apple_HOST_VENDOR 1 #define BUILD_VENDOR "apple" #define HOST_VENDOR "apple" #endif /* __GHCPLATFORM_H__ */ ghc-lib-parser-9.12.2.20250421/ghc/0000755000000000000000000000000007346545000014234 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/ghc/ghc-bin.cabal0000644000000000000000000000554307346545000016536 0ustar0000000000000000-- WARNING: ghc-bin.cabal is automatically generated from ghc-bin.cabal.in by -- ./configure. Make sure you are editing ghc-bin.cabal.in, not ghc-bin.cabal. Name: ghc-bin Version: 9.12.2 Copyright: XXX -- License: XXX -- License-File: XXX Author: XXX Maintainer: glasgow-haskell-users@haskell.org Homepage: http://www.haskell.org/ghc/ Synopsis: The Glorious Glasgow Haskell Compiler. Description: This package contains the @ghc@ executable, the user facing front-end to the Glasgow Haskell Compiler. Category: Development Build-Type: Simple Cabal-Version: >=1.10 Flag internal-interpreter Description: Build with internal interpreter support. Default: False Manual: True Flag threaded Description: Link the ghc executable against the threaded RTS Default: True Manual: True Executable ghc Default-Language: GHC2021 Main-Is: Main.hs Build-Depends: base >= 4 && < 5, array >= 0.1 && < 0.6, bytestring >= 0.9 && < 0.13, directory >= 1 && < 1.4, process >= 1 && < 1.7, filepath >= 1.5 && < 1.6, containers >= 0.5 && < 0.8, transformers >= 0.5 && < 0.7, ghc-boot == 9.12.2, ghc == 9.12.2 if os(windows) Build-Depends: Win32 >= 2.3 && < 2.15 else Build-Depends: unix >= 2.7 && < 2.9 GHC-Options: -Wall -Wnoncanonical-monad-instances -Wnoncanonical-monoid-instances -rtsopts=all "-with-rtsopts=-K512M -H -I5 -T" if flag(internal-interpreter) -- NB: this is never built by the bootstrapping GHC+libraries Build-depends: deepseq >= 1.4 && < 1.6, ghc-prim >= 0.5.0 && < 0.14, ghci == 9.12.2, haskeline == 0.8.*, exceptions == 0.10.*, time >= 1.8 && < 1.15 CPP-Options: -DHAVE_INTERNAL_INTERPRETER Other-Modules: GHCi.Leak GHCi.UI GHCi.UI.Info GHCi.UI.Monad GHCi.UI.Exception GHCi.Util Other-Extensions: FlexibleInstances LambdaCase MagicHash MultiWayIf OverloadedStrings RankNTypes RecordWildCards ScopedTypeVariables UnboxedTuples ViewPatterns if flag(threaded) ghc-options: -threaded Other-Extensions: CPP NondecreasingIndentation TupleSections -- This should match the default-extensions used in 'ghc.cabal'. This way, -- GHCi can be used to load it all at once. Default-Extensions: NoImplicitPrelude , ScopedTypeVariables , BangPatterns ghc-lib-parser-9.12.2.20250421/libraries/containers/containers/include/0000755000000000000000000000000007346545000023404 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/libraries/containers/containers/include/containers.h0000644000000000000000000000103707346545000025723 0ustar0000000000000000/* * Common macros for containers */ #ifndef HASKELL_CONTAINERS_H #define HASKELL_CONTAINERS_H /* * On GHC, include MachDeps.h to get WORD_SIZE_IN_BITS macro. */ #ifdef __GLASGOW_HASKELL__ #include "MachDeps.h" #endif #ifdef __GLASGOW_HASKELL__ #define DEFINE_PATTERN_SYNONYMS 1 #endif #ifdef __GLASGOW_HASKELL__ # define USE_ST_MONAD 1 #ifndef WORDS_BIGENDIAN /* * Unboxed arrays are broken on big-endian architectures. * See https://gitlab.haskell.org/ghc/ghc/-/issues/16998 */ # define USE_UNBOXED_ARRAYS 1 #endif #endif #endif ghc-lib-parser-9.12.2.20250421/libraries/ghc-boot-th-internal/GHC/Internal/TH/Lib/0000755000000000000000000000000007346545000024572 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/libraries/ghc-boot-th-internal/GHC/Internal/TH/Lib/Map.hs0000644000000000000000000000764507346545000025657 0ustar0000000000000000{-# LANGUAGE BangPatterns #-} {-# LANGUAGE Safe #-} -- This is a non-exposed internal module -- -- The code in this module has been ripped from containers-0.5.5.1:Data.Map.Base [1] almost -- verbatimely to avoid a dependency of 'template-haskell' on the containers package. -- -- [1] see https://hackage.haskell.org/package/containers-0.5.5.1 -- -- The original code is BSD-licensed and copyrighted by Daan Leijen, Andriy Palamarchuk, et al. module GHC.Internal.TH.Lib.Map ( Map , empty , insert , GHC.Internal.TH.Lib.Map.lookup ) where import Prelude data Map k a = Bin {-# UNPACK #-} !Size !k a !(Map k a) !(Map k a) | Tip type Size = Int empty :: Map k a empty = Tip {-# INLINE empty #-} singleton :: k -> a -> Map k a singleton k x = Bin 1 k x Tip Tip {-# INLINE singleton #-} size :: Map k a -> Int size Tip = 0 size (Bin sz _ _ _ _) = sz {-# INLINE size #-} lookup :: Ord k => k -> Map k a -> Maybe a lookup = go where go _ Tip = Nothing go !k (Bin _ kx x l r) = case compare k kx of LT -> go k l GT -> go k r EQ -> Just x {-# INLINABLE lookup #-} insert :: Ord k => k -> a -> Map k a -> Map k a insert = go where go :: Ord k => k -> a -> Map k a -> Map k a go !kx x Tip = singleton kx x go !kx x (Bin sz ky y l r) = case compare kx ky of LT -> balanceL ky y (go kx x l) r GT -> balanceR ky y l (go kx x r) EQ -> Bin sz kx x l r {-# INLINABLE insert #-} balanceL :: k -> a -> Map k a -> Map k a -> Map k a balanceL k x l r = case r of Tip -> case l of Tip -> Bin 1 k x Tip Tip (Bin _ _ _ Tip Tip) -> Bin 2 k x l Tip (Bin _ lk lx Tip (Bin _ lrk lrx _ _)) -> Bin 3 lrk lrx (Bin 1 lk lx Tip Tip) (Bin 1 k x Tip Tip) (Bin _ lk lx ll@(Bin _ _ _ _ _) Tip) -> Bin 3 lk lx ll (Bin 1 k x Tip Tip) (Bin ls lk lx ll@(Bin lls _ _ _ _) lr@(Bin lrs lrk lrx lrl lrr)) | lrs < ratio*lls -> Bin (1+ls) lk lx ll (Bin (1+lrs) k x lr Tip) | otherwise -> Bin (1+ls) lrk lrx (Bin (1+lls+size lrl) lk lx ll lrl) (Bin (1+size lrr) k x lrr Tip) (Bin rs _ _ _ _) -> case l of Tip -> Bin (1+rs) k x Tip r (Bin ls lk lx ll lr) | ls > delta*rs -> case (ll, lr) of (Bin lls _ _ _ _, Bin lrs lrk lrx lrl lrr) | lrs < ratio*lls -> Bin (1+ls+rs) lk lx ll (Bin (1+rs+lrs) k x lr r) | otherwise -> Bin (1+ls+rs) lrk lrx (Bin (1+lls+size lrl) lk lx ll lrl) (Bin (1+rs+size lrr) k x lrr r) (_, _) -> error "Failure in Data.Map.balanceL" | otherwise -> Bin (1+ls+rs) k x l r {-# NOINLINE balanceL #-} balanceR :: k -> a -> Map k a -> Map k a -> Map k a balanceR k x l r = case l of Tip -> case r of Tip -> Bin 1 k x Tip Tip (Bin _ _ _ Tip Tip) -> Bin 2 k x Tip r (Bin _ rk rx Tip rr@(Bin _ _ _ _ _)) -> Bin 3 rk rx (Bin 1 k x Tip Tip) rr (Bin _ rk rx (Bin _ rlk rlx _ _) Tip) -> Bin 3 rlk rlx (Bin 1 k x Tip Tip) (Bin 1 rk rx Tip Tip) (Bin rs rk rx rl@(Bin rls rlk rlx rll rlr) rr@(Bin rrs _ _ _ _)) | rls < ratio*rrs -> Bin (1+rs) rk rx (Bin (1+rls) k x Tip rl) rr | otherwise -> Bin (1+rs) rlk rlx (Bin (1+size rll) k x Tip rll) (Bin (1+rrs+size rlr) rk rx rlr rr) (Bin ls _ _ _ _) -> case r of Tip -> Bin (1+ls) k x l Tip (Bin rs rk rx rl rr) | rs > delta*ls -> case (rl, rr) of (Bin rls rlk rlx rll rlr, Bin rrs _ _ _ _) | rls < ratio*rrs -> Bin (1+ls+rs) rk rx (Bin (1+ls+rls) k x l rl) rr | otherwise -> Bin (1+ls+rs) rlk rlx (Bin (1+ls+size rll) k x l rll) (Bin (1+rrs+size rlr) rk rx rlr rr) (_, _) -> error "Failure in Data.Map.balanceR" | otherwise -> Bin (1+ls+rs) k x l r {-# NOINLINE balanceR #-} delta,ratio :: Int delta = 3 ratio = 2 ghc-lib-parser-9.12.2.20250421/libraries/ghc-boot-th-internal/GHC/Internal/TH/0000755000000000000000000000000007346545000024064 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/libraries/ghc-boot-th-internal/GHC/Internal/TH/Ppr.hs0000644000000000000000000012322507346545000025166 0ustar0000000000000000{-# LANGUAGE Trustworthy #-} {-# LANGUAGE LambdaCase #-} -- | contains a prettyprinter for the -- Template Haskell datatypes module GHC.Internal.TH.Ppr where -- All of the exports from this module should -- be "public" functions. The main module TH -- re-exports them all. import Text.PrettyPrint (render) import GHC.Internal.TH.PprLib import GHC.Internal.TH.Syntax import Data.Word ( Word8 ) import Data.Char ( toLower, chr ) import Data.List ( intersperse ) import GHC.Show ( showMultiLineString ) import GHC.Lexeme( isVarSymChar ) import Data.Ratio ( numerator, denominator ) import Data.Foldable ( toList ) import qualified Data.List.NonEmpty as NE import Prelude hiding ((<>)) nestDepth :: Int nestDepth = 4 type Precedence = Int appPrec, opPrec, unopPrec, funPrec, qualPrec, sigPrec, noPrec :: Precedence appPrec = 6 -- Argument of a function or type application opPrec = 5 -- Argument of an infix operator unopPrec = 4 -- Argument of an unresolved infix operator funPrec = 3 -- Argument of a function arrow qualPrec = 2 -- Forall-qualified type or result of a function arrow sigPrec = 1 -- Argument of an explicit type signature noPrec = 0 -- Others parensIf :: Bool -> Doc -> Doc parensIf True d = parens d parensIf False d = d ------------------------------ pprint :: Ppr a => a -> String pprint x = render $ to_HPJ_Doc $ ppr x class Ppr a where ppr :: a -> Doc ppr_list :: [a] -> Doc ppr_list = vcat . map ppr instance Ppr a => Ppr [a] where ppr x = ppr_list x ------------------------------ instance Ppr Name where ppr v = pprName v ------------------------------ instance Ppr Info where ppr (TyConI d) = ppr d ppr (ClassI d is) = ppr d $$ vcat (map ppr is) ppr (FamilyI d is) = ppr d $$ vcat (map ppr is) ppr (PrimTyConI name arity is_unlifted) = text "Primitive" <+> (if is_unlifted then text "unlifted" else empty) <+> text "type constructor" <+> quotes (ppr name) <+> parens (text "arity" <+> int arity) ppr (ClassOpI v ty cls) = text "Class op from" <+> ppr cls <> colon <+> ppr_sig v ty ppr (DataConI v ty tc) = text "Constructor from" <+> ppr tc <> colon <+> ppr_sig v ty ppr (PatSynI nm ty) = pprPatSynSig nm ty ppr (TyVarI v ty) = text "Type variable" <+> ppr v <+> equals <+> ppr ty ppr (VarI v ty mb_d) = vcat [ppr_sig v ty, case mb_d of { Nothing -> empty; Just d -> ppr d }] ppr_sig :: Name -> Type -> Doc ppr_sig v ty = pprName' Applied v <+> dcolon <+> ppr ty pprFixity :: Name -> Fixity -> NamespaceSpecifier -> Doc pprFixity _ f _ | f == defaultFixity = empty pprFixity v (Fixity i d) ns_spec = ppr_fix d <+> int i <+> pprNamespaceSpecifier ns_spec <+> pprName' Infix v where ppr_fix InfixR = text "infixr" ppr_fix InfixL = text "infixl" ppr_fix InfixN = text "infix" pprNamespaceSpecifier :: NamespaceSpecifier -> Doc pprNamespaceSpecifier NoNamespaceSpecifier = empty pprNamespaceSpecifier TypeNamespaceSpecifier = text "type" pprNamespaceSpecifier DataNamespaceSpecifier = text "data" -- | Pretty prints a pattern synonym type signature pprPatSynSig :: Name -> PatSynType -> Doc pprPatSynSig nm ty = text "pattern" <+> pprPrefixOcc nm <+> dcolon <+> pprPatSynType ty -- | Pretty prints a pattern synonym's type; follows the usual -- conventions to print a pattern synonym type compactly, yet -- unambiguously. See the note on 'PatSynType' and the section on -- pattern synonyms in the GHC user's guide for more information. pprPatSynType :: PatSynType -> Doc pprPatSynType ty@(ForallT uniTys reqs ty'@(ForallT exTys provs ty'')) | null exTys, null provs = ppr (ForallT uniTys reqs ty'') | null uniTys, null reqs = noreqs <+> ppr ty' | null reqs = pprForallBndrs uniTys <+> noreqs <+> ppr ty' | otherwise = ppr ty where noreqs = text "() =>" pprForallBndrs tvs = text "forall" <+> hsep (map ppr tvs) <+> text "." pprPatSynType ty = ppr ty ------------------------------ instance Ppr Module where ppr (Module pkg m) = text (pkgString pkg) <+> text (modString m) instance Ppr ModuleInfo where ppr (ModuleInfo imps) = text "Module" <+> vcat (map ppr imps) ------------------------------ instance Ppr Exp where ppr = pprExp noPrec pprPrefixOcc :: Name -> Doc -- Print operators with parens around them pprPrefixOcc n = parensIf (isSymOcc n) (ppr n) isSymOcc :: Name -> Bool isSymOcc n = case nameBase n of [] -> True -- Empty name; weird (c:_) -> isVarSymChar c -- c.f. isVarSymChar in GHC itself pprInfixExp :: Exp -> Doc pprInfixExp (VarE v) = pprName' Infix v pprInfixExp (ConE v) = pprName' Infix v pprInfixExp (UnboundVarE v) = pprName' Infix v -- This case will only ever be reached in exceptional circumstances. -- For example, when printing an error message in case of a malformed expression. pprInfixExp e = text "`" <> ppr e <> text "`" pprExp :: Precedence -> Exp -> Doc pprExp _ (VarE v) = pprName' Applied v pprExp _ (ConE c) = pprName' Applied c pprExp i (LitE l) = pprLit i l pprExp i (AppE e1 e2) = parensIf (i >= appPrec) $ pprExp opPrec e1 <+> pprExp appPrec e2 pprExp i (AppTypeE e t) = parensIf (i >= appPrec) $ pprExp opPrec e <+> char '@' <> pprParendType t pprExp _ (ParensE e) = parens (pprExp noPrec e) pprExp i (UInfixE e1 op e2) = parensIf (i > unopPrec) $ pprExp unopPrec e1 <+> pprInfixExp op <+> pprExp unopPrec e2 pprExp i (InfixE (Just e1) op (Just e2)) = parensIf (i >= opPrec) $ pprExp opPrec e1 <+> pprInfixExp op <+> pprExp opPrec e2 pprExp _ (InfixE me1 op me2) = parens $ pprMaybeExp noPrec me1 <+> pprInfixExp op <+> pprMaybeExp noPrec me2 pprExp i (LamE [] e) = pprExp i e -- #13856 pprExp i (LamE ps e) = parensIf (i > noPrec) $ char '\\' <> hsep (map (pprPat appPrec) ps) <+> text "->" <+> ppr e pprExp i (LamCaseE ms) = parensIf (i > noPrec) $ text "\\case" $$ braces (semiSep ms) pprExp i (LamCasesE ms) = parensIf (i > noPrec) $ text "\\cases" $$ braces (semi_sep ms) where semi_sep = sep . punctuate semi . map (pprClause False) pprExp i (TupE es) | [Just e] <- es = pprExp i (ConE (tupleDataName 1) `AppE` e) | otherwise = parens (commaSepWith (pprMaybeExp noPrec) es) pprExp _ (UnboxedTupE es) = hashParens (commaSepWith (pprMaybeExp noPrec) es) pprExp _ (UnboxedSumE e alt arity) = unboxedSumBars (ppr e) alt arity -- Nesting in Cond is to avoid potential problems in do statements pprExp i (CondE guard true false) = parensIf (i > noPrec) $ sep [text "if" <+> ppr guard, nest 1 $ text "then" <+> ppr true, nest 1 $ text "else" <+> ppr false] pprExp i (MultiIfE alts) = parensIf (i > noPrec) $ vcat $ case alts of [] -> [text "if {}"] (alt : alts') -> text "if" <+> pprGuarded arrow alt : map (nest 3 . pprGuarded arrow) alts' pprExp i (LetE ds_ e) = parensIf (i > noPrec) $ text "let" <+> pprDecs ds_ $$ text " in" <+> ppr e where pprDecs [] = empty pprDecs [d] = ppr d pprDecs ds = braces (semiSep ds) pprExp i (CaseE e ms) = parensIf (i > noPrec) $ text "case" <+> ppr e <+> text "of" $$ braces (semiSep ms) pprExp i (DoE m ss_) = parensIf (i > noPrec) $ pprQualifier m <> text "do" <+> pprStms ss_ where pprQualifier Nothing = empty pprQualifier (Just modName) = text (modString modName) <> char '.' pprStms [] = empty pprStms [s] = ppr s pprStms ss = braces (semiSep ss) pprExp i (MDoE m ss_) = parensIf (i > noPrec) $ pprQualifier m <> text "mdo" <+> pprStms ss_ where pprQualifier Nothing = empty pprQualifier (Just modName) = text (modString modName) <> char '.' pprStms [] = empty pprStms [s] = ppr s pprStms ss = braces (semiSep ss) pprExp _ (CompE []) = text "<>" -- This will probably break with fixity declarations - would need a ';' pprExp _ (CompE ss) = if null ss' -- If there are no statements in a list comprehension besides the last -- one, we simply treat it like a normal list. then text "[" <> ppr s <> text "]" else text "[" <> ppr s <+> bar <+> commaSep ss' <> text "]" where s = last ss ss' = init ss pprExp _ (ArithSeqE d) = ppr d pprExp _ (ListE es) = brackets (commaSep es) pprExp i (SigE e t) = parensIf (i > noPrec) $ pprExp sigPrec e <+> dcolon <+> pprType sigPrec t pprExp _ (RecConE nm fs) = pprName' Applied nm <> braces (pprFields fs) pprExp _ (RecUpdE e fs) = pprExp appPrec e <> braces (pprFields fs) pprExp i (StaticE e) = parensIf (i >= appPrec) $ text "static"<+> pprExp appPrec e pprExp _ (UnboundVarE v) = pprName' Applied v pprExp _ (LabelE s) = text "#" <> text s pprExp _ (ImplicitParamVarE n) = text ('?' : n) pprExp _ (GetFieldE e f) = pprExp appPrec e <> text ('.': f) pprExp _ (ProjectionE xs) = parens $ hcat $ map ((char '.'<>) . text) $ toList xs pprExp _ (TypedBracketE e) = text "[||" <> ppr e <> text "||]" pprExp _ (TypedSpliceE e) = text "$$" <> pprExp appPrec e pprExp i (TypeE t) = parensIf (i > noPrec) $ text "type" <+> ppr t pprExp i (ForallVisE tvars body) = parensIf (i >= funPrec) $ sep [pprForallVis tvars [], pprExp qualPrec body] pprExp i (ForallE tvars body) = parensIf (i >= funPrec) $ sep [pprForall tvars [], pprExp qualPrec body] pprExp i (ConstrainedE ctx body) = parensIf (i >= funPrec) $ sep [pprCtxWith pprExp ctx, pprExp qualPrec body] pprFields :: [(Name,Exp)] -> Doc pprFields = sep . punctuate comma . map (\(s,e) -> pprName' Applied s <+> equals <+> ppr e) pprMaybeExp :: Precedence -> Maybe Exp -> Doc pprMaybeExp _ Nothing = empty pprMaybeExp i (Just e) = pprExp i e ------------------------------ instance Ppr Stmt where ppr (BindS p e) = ppr p <+> text "<-" <+> ppr e ppr (LetS ds) = text "let" <+> (braces (semiSep ds)) ppr (NoBindS e) = ppr e ppr (ParS sss) = sep $ punctuate bar $ map commaSep sss ppr (RecS ss) = text "rec" <+> (braces (semiSep ss)) ------------------------------ instance Ppr Match where ppr (Match p rhs ds) = pprMatchPat p <+> pprBody False rhs $$ where_clause ds pprMatchPat :: Pat -> Doc -- Everything except pattern signatures bind more tightly than (->) pprMatchPat p@(SigP {}) = parens (ppr p) pprMatchPat p = ppr p ------------------------------ pprGuarded :: Doc -> (Guard, Exp) -> Doc pprGuarded eqDoc (guard, expr) = case guard of NormalG guardExpr -> bar <+> ppr guardExpr <+> eqDoc <+> ppr expr PatG stmts -> bar <+> vcat (punctuate comma $ map ppr stmts) $$ nest nestDepth (eqDoc <+> ppr expr) ------------------------------ pprBody :: Bool -> Body -> Doc pprBody eq body = case body of GuardedB xs -> nest nestDepth $ vcat $ map (pprGuarded eqDoc) xs NormalB e -> eqDoc <+> ppr e where eqDoc | eq = equals | otherwise = arrow ------------------------------ pprClause :: Bool -> Clause -> Doc pprClause eqDoc (Clause ps rhs ds) = hsep (map (pprPat appPrec) ps) <+> pprBody eqDoc rhs $$ where_clause ds ------------------------------ instance Ppr Lit where ppr = pprLit noPrec pprLit :: Precedence -> Lit -> Doc pprLit i (IntPrimL x) = parensIf (i > noPrec && x < 0) (integer x <> char '#') pprLit _ (WordPrimL x) = integer x <> text "##" pprLit i (FloatPrimL x) = parensIf (i > noPrec && x < 0) (float (fromRational x) <> char '#') pprLit i (DoublePrimL x) = parensIf (i > noPrec && x < 0) (double (fromRational x) <> text "##") pprLit i (IntegerL x) = parensIf (i > noPrec && x < 0) (integer x) pprLit _ (CharL c) = text (show c) pprLit _ (CharPrimL c) = text (show c) <> char '#' pprLit _ (StringL s) = pprString s pprLit _ (StringPrimL s) = pprString (bytesToString s) <> char '#' pprLit _ (BytesPrimL {}) = pprString "" pprLit i (RationalL rat) | withoutFactor 2 (withoutFactor 5 $ denominator rat) /= 1 -- if the denominator has prime factors other than 2 and 5 -- or can't be represented as Double, show as fraction = parensIf (i > noPrec) $ integer (numerator rat) <+> char '/' <+> integer (denominator rat) | rat /= 0 && (zeroes < -2 || zeroes > 6), let (n, d) = properFraction (rat / magnitude) -- if < 0.01 or >= 100_000_000, use scientific notation = parensIf (i > noPrec && rat < 0) (integer n <> (if d == 0 then empty else char '.' <> decimals (abs d)) <> char 'e' <> integer zeroes) | let (n, d) = properFraction rat = parensIf (i > noPrec && rat < 0) (integer n <> char '.' <> if d == 0 then char '0' else decimals (abs d)) where zeroes :: Integer zeroes = log10 (abs rat) log10 :: Rational -> Integer log10 x | x >= 10 = 1 + log10 (x / 10) | x < 1 = -1 + log10 (x * 10) | otherwise = 0 magnitude :: Rational magnitude = 10 ^^ zeroes withoutFactor :: Integer -> Integer -> Integer withoutFactor _ 0 = 0 withoutFactor p n | (n', 0) <- divMod n p = withoutFactor p n' | otherwise = n -- | Expects the argument 0 <= x < 1 decimals :: Rational -> Doc decimals x | x == 0 = empty | otherwise = integer n <> decimals d where (n, d) = properFraction (x * 10) bytesToString :: [Word8] -> String bytesToString = map (chr . fromIntegral) pprString :: String -> Doc -- Print newlines as newlines with Haskell string escape notation, -- not as '\n'. For other non-printables use regular escape notation. pprString s = vcat (map text (showMultiLineString s)) ------------------------------ instance Ppr Pat where ppr = pprPat noPrec pprPat :: Precedence -> Pat -> Doc pprPat i (LitP l) = pprLit i l pprPat _ (VarP v) = pprName' Applied v pprPat i (TupP ps) | [_] <- ps = pprPat i (ConP (tupleDataName 1) [] ps) | otherwise = parens (commaSep ps) pprPat _ (UnboxedTupP ps) = hashParens (commaSep ps) pprPat _ (UnboxedSumP p alt arity) = unboxedSumBars (ppr p) alt arity pprPat i (ConP s ts ps) = parensIf (i >= appPrec) $ pprName' Applied s <+> sep (map (\t -> char '@' <> pprParendType t) ts) <+> sep (map (pprPat appPrec) ps) pprPat _ (ParensP p) = parens $ pprPat noPrec p pprPat i (UInfixP p1 n p2) = parensIf (i > unopPrec) (pprPat unopPrec p1 <+> pprName' Infix n <+> pprPat unopPrec p2) pprPat i (InfixP p1 n p2) = parensIf (i >= opPrec) (pprPat opPrec p1 <+> pprName' Infix n <+> pprPat opPrec p2) pprPat i (TildeP p) = parensIf (i > noPrec) $ char '~' <> pprPat appPrec p pprPat i (BangP p) = parensIf (i > noPrec) $ char '!' <> pprPat appPrec p pprPat i (AsP v p) = parensIf (i > noPrec) $ ppr v <> text "@" <> pprPat appPrec p pprPat _ WildP = text "_" pprPat _ (RecP nm fs) = parens $ pprName' Applied nm <+> braces (sep $ punctuate comma $ map (\(s,p) -> pprName' Applied s <+> equals <+> ppr p) fs) pprPat _ (ListP ps) = brackets (commaSep ps) pprPat i (SigP p t) = parensIf (i > noPrec) $ ppr p <+> dcolon <+> ppr t pprPat _ (ViewP e p) = parens $ pprExp noPrec e <+> text "->" <+> pprPat noPrec p pprPat _ (TypeP t) = parens $ text "type" <+> ppr t pprPat _ (InvisP t) = parens $ text "@" <+> ppr t pprPat _ (OrP t) = parens $ semiSep (NE.toList t) ------------------------------ instance Ppr Dec where ppr = ppr_dec True ppr_dec :: Bool -- ^ declaration on the toplevel? -> Dec -> Doc ppr_dec isTop (FunD f cs) = layout $ map (\c -> pprPrefixOcc f <+> ppr c) cs where layout :: [Doc] -> Doc layout = if isTop then vcat else semiSepWith id ppr_dec _ (ValD p r ds) = ppr p <+> pprBody True r $$ where_clause ds ppr_dec _ (TySynD t xs rhs) = ppr_tySyn empty (Just t) (hsep (map ppr xs)) rhs ppr_dec isTop (DataD ctxt t xs ksig cs decs) = ppr_data isTop empty ctxt (Just t) (hsep (map ppr xs)) ksig cs decs ppr_dec isTop (NewtypeD ctxt t xs ksig c decs) = ppr_newtype isTop empty ctxt (Just t) (sep (map ppr xs)) ksig c decs ppr_dec isTop (TypeDataD t xs ksig cs) = ppr_type_data isTop empty [] (Just t) (hsep (map ppr xs)) ksig cs [] ppr_dec _ (ClassD ctxt c xs fds ds) = text "class" <+> pprCxt ctxt <+> pprName' Applied c <+> hsep (map ppr xs) <+> ppr fds $$ where_clause ds ppr_dec _ (InstanceD o ctxt i ds) = text "instance" <+> maybe empty ppr_overlap o <+> pprCxt ctxt <+> ppr i $$ where_clause ds ppr_dec _ (SigD f t) = pprPrefixOcc f <+> dcolon <+> ppr t ppr_dec _ (KiSigD f k) = text "type" <+> pprPrefixOcc f <+> dcolon <+> ppr k ppr_dec _ (ForeignD f) = ppr f ppr_dec _ (InfixD fx ns_spec n) = pprFixity n fx ns_spec ppr_dec _ (DefaultD tys) = text "default" <+> parens (sep $ punctuate comma $ map ppr tys) ppr_dec _ (PragmaD p) = ppr p ppr_dec isTop (DataFamilyD tc tvs kind) = text "data" <+> maybeFamily <+> pprName' Applied tc <+> hsep (map ppr tvs) <+> maybeKind where maybeFamily | isTop = text "family" | otherwise = empty maybeKind | (Just k') <- kind = dcolon <+> ppr k' | otherwise = empty ppr_dec isTop (DataInstD ctxt bndrs ty ksig cs decs) = ppr_data isTop (maybeInst <+> ppr_bndrs bndrs) ctxt Nothing (ppr ty) ksig cs decs where maybeInst | isTop = text "instance" | otherwise = empty ppr_dec isTop (NewtypeInstD ctxt bndrs ty ksig c decs) = ppr_newtype isTop (maybeInst <+> ppr_bndrs bndrs) ctxt Nothing (ppr ty) ksig c decs where maybeInst | isTop = text "instance" | otherwise = empty ppr_dec isTop (TySynInstD (TySynEqn mb_bndrs ty rhs)) = ppr_tySyn (maybeInst <+> ppr_bndrs mb_bndrs) Nothing (ppr ty) rhs where maybeInst | isTop = text "instance" | otherwise = empty ppr_dec isTop (OpenTypeFamilyD tfhead) = text "type" <+> maybeFamily <+> ppr_tf_head tfhead where maybeFamily | isTop = text "family" | otherwise = empty ppr_dec _ (ClosedTypeFamilyD tfhead eqns) = hang (text "type family" <+> ppr_tf_head tfhead <+> text "where") nestDepth (vcat (map ppr_eqn eqns)) where ppr_eqn (TySynEqn mb_bndrs lhs rhs) = ppr_bndrs mb_bndrs <+> ppr lhs <+> text "=" <+> ppr rhs ppr_dec _ (RoleAnnotD name roles) = hsep [ text "type role", pprName' Applied name ] <+> hsep (map ppr roles) ppr_dec _ (StandaloneDerivD ds cxt ty) = hsep [ text "deriving" , maybe empty ppr_deriv_strategy ds , text "instance" , pprCxt cxt , ppr ty ] ppr_dec _ (DefaultSigD n ty) = hsep [ text "default", pprPrefixOcc n, dcolon, ppr ty ] ppr_dec _ (PatSynD name args dir pat) = text "pattern" <+> pprNameArgs <+> ppr dir <+> pprPatRHS where pprNameArgs | InfixPatSyn a1 a2 <- args = ppr a1 <+> pprName' Infix name <+> ppr a2 | otherwise = pprName' Applied name <+> ppr args pprPatRHS | ExplBidir cls <- dir = hang (ppr pat <+> text "where") nestDepth (vcat $ (pprName' Applied name <+>) . ppr <$> cls) | otherwise = ppr pat ppr_dec _ (PatSynSigD name ty) = pprPatSynSig name ty ppr_dec _ (ImplicitParamBindD n e) = hsep [text ('?' : n), text "=", ppr e] ppr_deriv_strategy :: DerivStrategy -> Doc ppr_deriv_strategy ds = case ds of StockStrategy -> text "stock" AnyclassStrategy -> text "anyclass" NewtypeStrategy -> text "newtype" ViaStrategy ty -> text "via" <+> pprParendType ty ppr_overlap :: Overlap -> Doc ppr_overlap o = text $ case o of Overlaps -> "{-# OVERLAPS #-}" Overlappable -> "{-# OVERLAPPABLE #-}" Overlapping -> "{-# OVERLAPPING #-}" Incoherent -> "{-# INCOHERENT #-}" ppr_data :: Bool -- ^ declaration on the toplevel? -> Doc -> Cxt -> Maybe Name -> Doc -> Maybe Kind -> [Con] -> [DerivClause] -> Doc ppr_data = ppr_typedef "data" ppr_newtype :: Bool -- ^ declaration on the toplevel? -> Doc -> Cxt -> Maybe Name -> Doc -> Maybe Kind -> Con -> [DerivClause] -> Doc ppr_newtype isTop maybeInst ctxt t argsDoc ksig c decs = ppr_typedef "newtype" isTop maybeInst ctxt t argsDoc ksig [c] decs ppr_type_data :: Bool -- ^ declaration on the toplevel? -> Doc -> Cxt -> Maybe Name -> Doc -> Maybe Kind -> [Con] -> [DerivClause] -> Doc ppr_type_data = ppr_typedef "type data" ppr_typedef :: String -> Bool -> Doc -> Cxt -> Maybe Name -> Doc -> Maybe Kind -> [Con] -> [DerivClause] -> Doc ppr_typedef data_or_newtype isTop maybeInst ctxt t argsDoc ksig cs decs = sep [text data_or_newtype <+> maybeInst <+> pprCxt ctxt <+> case t of Just n -> pprName' Applied n <+> argsDoc Nothing -> argsDoc <+> ksigDoc <+> maybeWhere, nest nestDepth (layout (pref $ map ppr cs)), if null decs then empty else nest nestDepth $ vcat $ map ppr_deriv_clause decs] where pref :: [Doc] -> [Doc] pref xs | isGadtDecl = xs pref [] = [] -- No constructors; can't happen in H98 pref (d:ds) = (char '=' <+> d):map (bar <+>) ds layout :: [Doc] -> Doc layout | isGadtDecl && not isTop = braces . semiSepWith id | otherwise = vcat maybeWhere :: Doc maybeWhere | isGadtDecl = text "where" | otherwise = empty isGadtDecl :: Bool isGadtDecl = not (null cs) && all isGadtCon cs where isGadtCon (GadtC _ _ _ ) = True isGadtCon (RecGadtC _ _ _) = True isGadtCon (ForallC _ _ x ) = isGadtCon x isGadtCon _ = False ksigDoc = case ksig of Nothing -> empty Just k -> dcolon <+> ppr k ppr_deriv_clause :: DerivClause -> Doc ppr_deriv_clause (DerivClause ds ctxt) = text "deriving" <+> pp_strat_before <+> ppr_cxt_preds appPrec ctxt <+> pp_strat_after where -- @via@ is unique in that in comes /after/ the class being derived, -- so we must special-case it. (pp_strat_before, pp_strat_after) = case ds of Just (via@ViaStrategy{}) -> (empty, ppr_deriv_strategy via) _ -> (maybe empty ppr_deriv_strategy ds, empty) ppr_tySyn :: Doc -> Maybe Name -> Doc -> Type -> Doc ppr_tySyn maybeInst t argsDoc rhs = text "type" <+> maybeInst <+> case t of Just n -> pprName' Applied n <+> argsDoc Nothing -> argsDoc <+> text "=" <+> ppr rhs ppr_tf_head :: TypeFamilyHead -> Doc ppr_tf_head (TypeFamilyHead tc tvs res inj) = pprName' Applied tc <+> hsep (map ppr tvs) <+> ppr res <+> maybeInj where maybeInj | (Just inj') <- inj = ppr inj' | otherwise = empty ppr_bndrs :: PprFlag flag => Maybe [TyVarBndr flag] -> Doc ppr_bndrs (Just bndrs) = text "forall" <+> sep (map ppr bndrs) <> text "." ppr_bndrs Nothing = empty ------------------------------ instance Ppr FunDep where ppr (FunDep xs ys) = hsep (map ppr xs) <+> text "->" <+> hsep (map ppr ys) ppr_list [] = empty ppr_list xs = bar <+> commaSep xs ------------------------------ instance Ppr FamilyResultSig where ppr NoSig = empty ppr (KindSig k) = dcolon <+> ppr k ppr (TyVarSig bndr) = text "=" <+> ppr bndr ------------------------------ instance Ppr InjectivityAnn where ppr (InjectivityAnn lhs rhs) = bar <+> ppr lhs <+> text "->" <+> hsep (map ppr rhs) ------------------------------ instance Ppr Foreign where ppr (ImportF callconv safety impent as typ) = text "foreign import" <+> showtextl callconv <+> showtextl safety <+> text (show impent) <+> pprName' Applied as <+> dcolon <+> ppr typ ppr (ExportF callconv expent as typ) = text "foreign export" <+> showtextl callconv <+> text (show expent) <+> pprName' Applied as <+> dcolon <+> ppr typ ------------------------------ instance Ppr Pragma where ppr (InlineP n inline rm phases) = text "{-#" <+> ppr inline <+> ppr rm <+> ppr phases <+> pprName' Applied n <+> text "#-}" ppr (OpaqueP n) = text "{-# OPAQUE" <+> pprName' Applied n <+> text "#-}" ppr (SpecialiseP n ty inline phases) = text "{-# SPECIALISE" <+> maybe empty ppr inline <+> ppr phases <+> sep [ pprName' Applied n <+> dcolon , nest 2 $ ppr ty ] <+> text "#-}" ppr (SpecialiseInstP inst) = text "{-# SPECIALISE instance" <+> ppr inst <+> text "#-}" ppr (RuleP n ty_bndrs tm_bndrs lhs rhs phases) = sep [ text "{-# RULES" <+> pprString n <+> ppr phases , nest 4 $ ppr_ty_forall ty_bndrs <+> ppr_tm_forall ty_bndrs <+> ppr lhs , nest 4 $ char '=' <+> ppr rhs <+> text "#-}" ] where ppr_ty_forall Nothing = empty ppr_ty_forall (Just bndrs) = text "forall" <+> fsep (map ppr bndrs) <+> char '.' ppr_tm_forall Nothing | null tm_bndrs = empty ppr_tm_forall _ = text "forall" <+> fsep (map ppr tm_bndrs) <+> char '.' ppr (AnnP tgt expr) = text "{-# ANN" <+> target1 tgt <+> ppr expr <+> text "#-}" where target1 ModuleAnnotation = text "module" target1 (TypeAnnotation t) = text "type" <+> pprName' Applied t target1 (ValueAnnotation v) = pprName' Applied v ppr (LineP line file) = text "{-# LINE" <+> int line <+> text (show file) <+> text "#-}" ppr (CompleteP cls mty) = text "{-# COMPLETE" <+> (fsep $ punctuate comma $ map (pprName' Applied) cls) <+> maybe empty (\ty -> dcolon <+> pprName' Applied ty) mty <+> text "#-}" ppr (SCCP nm str) = text "{-# SCC" <+> pprName' Applied nm <+> maybe empty pprString str <+> text "#-}" ------------------------------ instance Ppr Inline where ppr NoInline = text "NOINLINE" ppr Inline = text "INLINE" ppr Inlinable = text "INLINABLE" ------------------------------ instance Ppr RuleMatch where ppr ConLike = text "CONLIKE" ppr FunLike = empty ------------------------------ instance Ppr Phases where ppr AllPhases = empty ppr (FromPhase i) = brackets $ int i ppr (BeforePhase i) = brackets $ char '~' <> int i ------------------------------ instance Ppr RuleBndr where ppr (RuleVar n) = ppr n ppr (TypedRuleVar n ty) = parens $ ppr n <+> dcolon <+> ppr ty ------------------------------ instance Ppr Clause where ppr = pprClause True ------------------------------ instance Ppr Con where ppr (NormalC c sts) = pprName' Applied c <+> sep (map pprBangType sts) ppr (RecC c vsts) = pprName' Applied c <+> braces (sep (punctuate comma $ map pprVarBangType vsts)) ppr (InfixC st1 c st2) = pprBangType st1 <+> pprName' Infix c <+> pprBangType st2 ppr (ForallC ns ctxt (GadtC cs sts ty)) = commaSepApplied cs <+> dcolon <+> pprForall ns ctxt <+> pprGadtRHS sts ty ppr (ForallC ns ctxt (RecGadtC cs vsts ty)) = commaSepApplied cs <+> dcolon <+> pprForall ns ctxt <+> pprRecFields vsts ty ppr (ForallC ns ctxt con) = pprForall ns ctxt <+> ppr con ppr (GadtC cs sts ty) = commaSepApplied cs <+> dcolon <+> pprGadtRHS sts ty ppr (RecGadtC cs vsts ty) = commaSepApplied cs <+> dcolon <+> pprRecFields vsts ty instance Ppr PatSynDir where ppr Unidir = text "<-" ppr ImplBidir = text "=" ppr (ExplBidir _) = text "<-" -- the ExplBidir's clauses are pretty printed together with the -- entire pattern synonym; so only print the direction here. instance Ppr PatSynArgs where ppr (PrefixPatSyn args) = sep $ map ppr args ppr (InfixPatSyn a1 a2) = ppr a1 <+> ppr a2 ppr (RecordPatSyn sels) = braces $ sep (punctuate comma (map (pprName' Applied) sels)) commaSepApplied :: [Name] -> Doc commaSepApplied = commaSepWith (pprName' Applied) pprForall :: [TyVarBndr Specificity] -> Cxt -> Doc pprForall = pprForall' ForallInvis pprForallVis :: [TyVarBndr ()] -> Cxt -> Doc pprForallVis = pprForall' ForallVis pprForall' :: PprFlag flag => ForallVisFlag -> [TyVarBndr flag] -> Cxt -> Doc pprForall' fvf tvs cxt -- even in the case without any tvs, there could be a non-empty -- context cxt (e.g., in the case of pattern synonyms, where there -- are multiple forall binders and contexts). | [] <- tvs = pprCxt cxt | otherwise = text "forall" <+> hsep (map ppr tvs) <+> separator <+> pprCxt cxt where separator = case fvf of ForallVis -> text "->" ForallInvis -> char '.' pprRecFields :: [(Name, Strict, Type)] -> Type -> Doc pprRecFields vsts ty = braces (sep (punctuate comma $ map pprVarBangType vsts)) <+> arrow <+> ppr ty pprGadtRHS :: [(Strict, Type)] -> Type -> Doc pprGadtRHS [] ty = ppr ty pprGadtRHS sts ty = sep (punctuate (space <> arrow) (map pprBangType sts)) <+> arrow <+> ppr ty ------------------------------ pprVarBangType :: VarBangType -> Doc -- Slight infelicity: with print non-atomic type with parens pprVarBangType (v, bang, t) = pprName' Applied v <+> dcolon <+> pprBangType (bang, t) ------------------------------ pprBangType :: BangType -> Doc -- Make sure we print -- -- Con {-# UNPACK #-} a -- -- rather than -- -- Con {-# UNPACK #-}a -- -- when there's no strictness annotation. If there is a strictness annotation, -- it's okay to not put a space between it and the type. pprBangType (bt@(Bang _ NoSourceStrictness), t) = ppr bt <+> pprParendType t pprBangType (bt, t) = ppr bt <> pprParendType t ------------------------------ instance Ppr Bang where ppr (Bang su ss) = ppr su <+> ppr ss ------------------------------ instance Ppr SourceUnpackedness where ppr NoSourceUnpackedness = empty ppr SourceNoUnpack = text "{-# NOUNPACK #-}" ppr SourceUnpack = text "{-# UNPACK #-}" ------------------------------ instance Ppr SourceStrictness where ppr NoSourceStrictness = empty ppr SourceLazy = char '~' ppr SourceStrict = char '!' ------------------------------ instance Ppr DecidedStrictness where ppr DecidedLazy = empty ppr DecidedStrict = char '!' ppr DecidedUnpack = text "{-# UNPACK #-} !" ------------------------------ {-# DEPRECATED pprVarStrictType "As of @template-haskell-2.11.0.0@, 'VarStrictType' has been replaced by 'VarBangType'. Please use 'pprVarBangType' instead." #-} pprVarStrictType :: (Name, Strict, Type) -> Doc pprVarStrictType = pprVarBangType ------------------------------ {-# DEPRECATED pprStrictType "As of @template-haskell-2.11.0.0@, 'StrictType' has been replaced by 'BangType'. Please use 'pprBangType' instead." #-} pprStrictType :: (Strict, Type) -> Doc pprStrictType = pprBangType ------------------------------ pprType :: Precedence -> Type -> Doc pprType _ (VarT v) = pprName' Applied v -- `Applied` is used here instead of `ppr` because of infix names (#13887) pprType _ (ConT c) = pprName' Applied c pprType _ (TupleT 0) = text "()" pprType p (TupleT 1) = pprType p (ConT (tupleTypeName 1)) pprType _ (TupleT n) = parens (hcat (replicate (n-1) comma)) pprType _ (UnboxedTupleT n) = hashParens $ hcat $ replicate (n-1) comma pprType _ (UnboxedSumT arity) = hashParens $ hsep $ replicate (arity-1) bar pprType _ ArrowT = parens (text "->") pprType _ MulArrowT = text "FUN" pprType _ ListT = text "[]" pprType _ (LitT l) = pprTyLit l pprType _ (PromotedT c) = text "'" <> pprName' Applied c pprType _ (PromotedTupleT 0) = text "'()" pprType p (PromotedTupleT 1) = pprType p (PromotedT (tupleDataName 1)) pprType _ (PromotedTupleT n) = quoteParens (hcat (replicate (n-1) comma)) pprType _ PromotedNilT = text "'[]" pprType _ PromotedConsT = text "'(:)" pprType _ StarT = char '*' pprType _ ConstraintT = text "Constraint" pprType _ (SigT ty k) = parens (ppr ty <+> text "::" <+> ppr k) pprType _ WildCardT = char '_' pprType p t@(InfixT {}) = pprInfixT p t pprType p t@(UInfixT {}) = pprInfixT p t pprType p t@(PromotedInfixT {}) = pprInfixT p t pprType p t@(PromotedUInfixT {}) = pprInfixT p t pprType _ (ParensT t) = parens (pprType noPrec t) pprType p (ImplicitParamT n ty) = parensIf (p >= sigPrec) $ text ('?':n) <+> text "::" <+> pprType sigPrec ty pprType _ EqualityT = text "(~)" pprType p (ForallT tvars ctxt ty) = parensIf (p >= funPrec) $ sep [pprForall tvars ctxt, pprType qualPrec ty] pprType p (ForallVisT tvars ty) = parensIf (p >= funPrec) $ sep [pprForallVis tvars [], pprType qualPrec ty] pprType p t@AppT{} = pprTyApp p (split t) pprType p t@AppKindT{} = pprTyApp p (split t) ------------------------------ pprParendType :: Type -> Doc pprParendType = pprType appPrec pprInfixT :: Precedence -> Type -> Doc pprInfixT p = \case InfixT x n y -> with x n y "" opPrec UInfixT x n y -> with x n y "" unopPrec PromotedInfixT x n y -> with x n y "'" opPrec PromotedUInfixT x n y -> with x n y "'" unopPrec t -> pprParendType t where with x n y prefix p' = parensIf (p >= p') (pprType opPrec x <+> text prefix <> pprName' Infix n <+> pprType opPrec y) instance Ppr Type where ppr = pprType noPrec instance Ppr TypeArg where ppr (TANormal ty) = ppr ty ppr (TyArg ki) = char '@' <> parensIf (isStarT ki) (ppr ki) pprParendTypeArg :: TypeArg -> Doc pprParendTypeArg (TANormal ty) = pprParendType ty pprParendTypeArg (TyArg ki) = char '@' <> parensIf (isStarT ki) (pprParendType ki) isStarT :: Type -> Bool isStarT StarT = True isStarT _ = False {- Note [Pretty-printing kind signatures] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ GHC's parser only recognises a kind signature in a type when there are parens around it. E.g. the parens are required here: f :: (Int :: *) type instance F Int = (Bool :: *) So we always print a SigT with parens (see #10050). -} pprTyApp :: Precedence -> (Type, [TypeArg]) -> Doc pprTyApp p app@(MulArrowT, [TANormal (PromotedT c), TANormal arg1, TANormal arg2]) | p >= funPrec = parens (pprTyApp noPrec app) | c == oneName = sep [pprFunArgType arg1 <+> text "%1 ->", pprType qualPrec arg2] | c == manyName = sep [pprFunArgType arg1 <+> text "->", pprType qualPrec arg2] pprTyApp p (MulArrowT, [TANormal argm, TANormal arg1, TANormal arg2]) = parensIf (p >= funPrec) $ sep [pprFunArgType arg1 <+> text "%" <> pprType appPrec argm <+> text "->", pprType qualPrec arg2] pprTyApp p (ArrowT, [TANormal arg1, TANormal arg2]) = parensIf (p >= funPrec) $ sep [pprFunArgType arg1 <+> text "->", pprType qualPrec arg2] pprTyApp p (EqualityT, [TANormal arg1, TANormal arg2]) = parensIf (p >= opPrec) $ sep [pprType opPrec arg1 <+> text "~", pprType opPrec arg2] pprTyApp _ (ListT, [TANormal arg]) = brackets (pprType noPrec arg) pprTyApp p (TupleT 1, args) = pprTyApp p (ConT (tupleTypeName 1), args) pprTyApp _ (TupleT n, args) | length args == n, Just args' <- traverse fromTANormal args = parens (commaSep args') pprTyApp p (PromotedTupleT 1, args) = pprTyApp p (PromotedT (tupleDataName 1), args) pprTyApp _ (PromotedTupleT n, args) | length args == n, Just args' <- traverse fromTANormal args = quoteParens (commaSep args') pprTyApp _ (UnboxedTupleT n, args) | length args == n, Just args' <- traverse fromTANormal args = hashParens (commaSep args') pprTyApp _ (UnboxedSumT n, args) | length args == n, Just args' <- traverse fromTANormal args = hashParens (sep $ intersperse bar $ map ppr args') pprTyApp p (fun, args) = parensIf (p >= appPrec) $ pprParendType fun <+> sep (map pprParendTypeArg args) fromTANormal :: TypeArg -> Maybe Type fromTANormal (TANormal arg) = Just arg fromTANormal (TyArg _) = Nothing -- Print the type to the left of @->@. Everything except forall and (->) binds more tightly than (->). pprFunArgType :: Type -> Doc pprFunArgType = pprType funPrec data ForallVisFlag = ForallVis -- forall a -> {...} | ForallInvis -- forall a. {...} deriving Show data TypeArg = TANormal Type | TyArg Kind split :: Type -> (Type, [TypeArg]) -- Split into function and args split t = go t [] where go (AppT t1 t2) args = go t1 (TANormal t2:args) go (AppKindT ty ki) args = go ty (TyArg ki:args) go ty args = (ty, args) pprTyLit :: TyLit -> Doc pprTyLit (NumTyLit n) = integer n pprTyLit (StrTyLit s) = text (show s) pprTyLit (CharTyLit c) = text (show c) instance Ppr TyLit where ppr = pprTyLit ------------------------------ class PprFlag flag where pprTyVarBndr :: (TyVarBndr flag) -> Doc instance PprFlag () where pprTyVarBndr (PlainTV nm ()) = ppr nm pprTyVarBndr (KindedTV nm () k) = parens (ppr nm <+> dcolon <+> ppr k) instance PprFlag Specificity where pprTyVarBndr (PlainTV nm SpecifiedSpec) = ppr nm pprTyVarBndr (PlainTV nm InferredSpec) = braces (ppr nm) pprTyVarBndr (KindedTV nm SpecifiedSpec k) = parens (ppr nm <+> dcolon <+> ppr k) pprTyVarBndr (KindedTV nm InferredSpec k) = braces (ppr nm <+> dcolon <+> ppr k) instance PprFlag BndrVis where pprTyVarBndr (PlainTV nm vis) = pprBndrVis vis (ppr nm) pprTyVarBndr (KindedTV nm vis k) = pprBndrVis vis (parens (ppr nm <+> dcolon <+> ppr k)) pprBndrVis :: BndrVis -> Doc -> Doc pprBndrVis BndrReq d = d pprBndrVis BndrInvis d = char '@' <> d instance PprFlag flag => Ppr (TyVarBndr flag) where ppr bndr = pprTyVarBndr bndr instance Ppr Role where ppr NominalR = text "nominal" ppr RepresentationalR = text "representational" ppr PhantomR = text "phantom" ppr InferR = text "_" ------------------------------ pprCtxWith :: Ppr a => (Precedence -> a -> Doc) -> [a] -> Doc pprCtxWith _ [] = empty pprCtxWith ppr_fun ts = ppr_ctx_preds_with ppr_fun funPrec ts <+> text "=>" pprCxt :: Cxt -> Doc pprCxt = pprCtxWith pprType ppr_ctx_preds_with :: Ppr a => (Precedence -> a -> Doc) -> Precedence -> [a] -> Doc ppr_ctx_preds_with _ _ [] = text "()" ppr_ctx_preds_with f p [t] = f p t ppr_ctx_preds_with _ _ ts = parens (commaSep ts) ppr_cxt_preds :: Precedence -> Cxt -> Doc ppr_cxt_preds = ppr_ctx_preds_with pprType ------------------------------ instance Ppr Range where ppr = brackets . pprRange where pprRange :: Range -> Doc pprRange (FromR e) = ppr e <+> text ".." pprRange (FromThenR e1 e2) = ppr e1 <> text "," <+> ppr e2 <+> text ".." pprRange (FromToR e1 e2) = ppr e1 <+> text ".." <+> ppr e2 pprRange (FromThenToR e1 e2 e3) = ppr e1 <> text "," <+> ppr e2 <+> text ".." <+> ppr e3 ------------------------------ where_clause :: [Dec] -> Doc where_clause [] = empty where_clause ds = nest nestDepth $ text "where" <+> braces (semiSepWith (ppr_dec False) ds) showtextl :: Show a => a -> Doc showtextl = text . map toLower . show hashParens :: Doc -> Doc hashParens d = text "(# " <> d <> text " #)" quoteParens :: Doc -> Doc quoteParens d = text "'(" <> d <> text ")" ----------------------------- instance Ppr Loc where ppr (Loc { loc_module = md , loc_package = pkg , loc_start = (start_ln, start_col) , loc_end = (end_ln, end_col) }) = hcat [ text pkg, colon, text md, colon , parens $ int start_ln <> comma <> int start_col , text "-" , parens $ int end_ln <> comma <> int end_col ] -- Takes a separator and a pretty-printing function and prints a list of things -- separated by the separator followed by space. sepWith :: Doc -> (a -> Doc) -> [a] -> Doc sepWith sepDoc pprFun = sep . punctuate sepDoc . map pprFun -- Takes a list of printable things and prints them separated by commas followed -- by space. commaSep :: Ppr a => [a] -> Doc commaSep = commaSepWith ppr -- Takes a list of things and prints them with the given pretty-printing -- function, separated by commas followed by space. commaSepWith :: (a -> Doc) -> [a] -> Doc commaSepWith pprFun = sepWith comma pprFun -- Takes a list of printable things and prints them separated by semicolons -- followed by space. semiSep :: Ppr a => [a] -> Doc semiSep = sep . punctuate semi . map ppr -- Takes a list of things and prints them with the given pretty-printing -- function, separated by semicolons followed by space. semiSepWith :: (a -> Doc) -> [a] -> Doc semiSepWith pprFun = sepWith semi pprFun -- Prints out the series of vertical bars that wraps an expression or pattern -- used in an unboxed sum. unboxedSumBars :: Doc -> SumAlt -> SumArity -> Doc unboxedSumBars d alt arity = hashParens $ bars (alt-1) <> d <> bars (arity - alt) where bars i = hsep (replicate i bar) -- Text containing the vertical bar character. bar :: Doc bar = char '|' ghc-lib-parser-9.12.2.20250421/libraries/ghc-boot-th-internal/GHC/Internal/TH/PprLib.hs0000644000000000000000000001502507346545000025613 0ustar0000000000000000{-# LANGUAGE FlexibleInstances, Safe #-} -- | Monadic front-end to Text.PrettyPrint module GHC.Internal.TH.PprLib ( -- * The document type Doc, -- Abstract, instance of Show PprM, -- * Primitive Documents empty, semi, comma, colon, dcolon, space, equals, arrow, lparen, rparen, lbrack, rbrack, lbrace, rbrace, -- * Converting values into documents text, char, ptext, int, integer, float, double, rational, -- * Wrapping documents in delimiters parens, brackets, braces, quotes, doubleQuotes, -- * Combining documents (<>), (<+>), hcat, hsep, ($$), ($+$), vcat, sep, cat, fsep, fcat, nest, hang, punctuate, -- * Predicates on documents isEmpty, to_HPJ_Doc, pprName, pprName' ) where import GHC.Internal.TH.Syntax (Uniq, Name(..), showName', NameFlavour(..), NameIs(..)) import qualified Text.PrettyPrint as HPJ import Control.Monad (liftM, liftM2, ap) import GHC.Internal.TH.Lib.Map ( Map ) import qualified GHC.Internal.TH.Lib.Map as Map ( lookup, insert, empty ) import Prelude hiding ((<>)) infixl 6 <> infixl 6 <+> infixl 5 $$, $+$ -- --------------------------------------------------------------------------- -- The interface -- The primitive Doc values instance Show Doc where show d = HPJ.render (to_HPJ_Doc d) isEmpty :: Doc -> PprM Bool; -- ^ Returns 'True' if the document is empty empty :: Doc; -- ^ An empty document semi :: Doc; -- ^ A ';' character comma :: Doc; -- ^ A ',' character colon :: Doc; -- ^ A ':' character dcolon :: Doc; -- ^ A "::" string space :: Doc; -- ^ A space character equals :: Doc; -- ^ A '=' character arrow :: Doc; -- ^ A "->" string lparen :: Doc; -- ^ A '(' character rparen :: Doc; -- ^ A ')' character lbrack :: Doc; -- ^ A '[' character rbrack :: Doc; -- ^ A ']' character lbrace :: Doc; -- ^ A '{' character rbrace :: Doc; -- ^ A '}' character text :: String -> Doc ptext :: String -> Doc char :: Char -> Doc int :: Int -> Doc integer :: Integer -> Doc float :: Float -> Doc double :: Double -> Doc rational :: Rational -> Doc parens :: Doc -> Doc; -- ^ Wrap document in @(...)@ brackets :: Doc -> Doc; -- ^ Wrap document in @[...]@ braces :: Doc -> Doc; -- ^ Wrap document in @{...}@ quotes :: Doc -> Doc; -- ^ Wrap document in @\'...\'@ doubleQuotes :: Doc -> Doc; -- ^ Wrap document in @\"...\"@ -- Combining @Doc@ values (<>) :: Doc -> Doc -> Doc; -- ^Beside hcat :: [Doc] -> Doc; -- ^List version of '<>' (<+>) :: Doc -> Doc -> Doc; -- ^Beside, separated by space hsep :: [Doc] -> Doc; -- ^List version of '<+>' ($$) :: Doc -> Doc -> Doc; -- ^Above; if there is no -- overlap it \"dovetails\" the two ($+$) :: Doc -> Doc -> Doc; -- ^Above, without dovetailing. vcat :: [Doc] -> Doc; -- ^List version of '$$' cat :: [Doc] -> Doc; -- ^ Either hcat or vcat sep :: [Doc] -> Doc; -- ^ Either hsep or vcat fcat :: [Doc] -> Doc; -- ^ \"Paragraph fill\" version of cat fsep :: [Doc] -> Doc; -- ^ \"Paragraph fill\" version of sep nest :: Int -> Doc -> Doc; -- ^ Nested -- GHC-specific ones. hang :: Doc -> Int -> Doc -> Doc; -- ^ @hang d1 n d2 = sep [d1, nest n d2]@ punctuate :: Doc -> [Doc] -> [Doc] -- ^ @punctuate p [d1, ... dn] = [d1 \<> p, d2 \<> p, ... dn-1 \<> p, dn]@ -- --------------------------------------------------------------------------- -- The "implementation" type State = (Map Name Name, Uniq) data PprM a = PprM { runPprM :: State -> (a, State) } pprName :: Name -> Doc pprName = pprName' Alone pprName' :: NameIs -> Name -> Doc pprName' ni n@(Name o (NameU _)) = PprM $ \s@(fm, i) -> let (n', s') = case Map.lookup n fm of Just d -> (d, s) Nothing -> let n'' = Name o (NameU i) in (n'', (Map.insert n n'' fm, i + 1)) in (HPJ.text $ showName' ni n', s') pprName' ni n = text $ showName' ni n {- instance Show Name where show (Name occ (NameU u)) = occString occ ++ "_" ++ show (I# u) show (Name occ NameS) = occString occ show (Name occ (NameG ns m)) = modString m ++ "." ++ occString occ data Name = Name OccName NameFlavour data NameFlavour | NameU Int# -- A unique local name -} to_HPJ_Doc :: Doc -> HPJ.Doc to_HPJ_Doc d = fst $ runPprM d (Map.empty, 0) instance Functor PprM where fmap = liftM instance Applicative PprM where pure x = PprM $ \s -> (x, s) (<*>) = ap instance Monad PprM where m >>= k = PprM $ \s -> let (x, s') = runPprM m s in runPprM (k x) s' type Doc = PprM HPJ.Doc -- The primitive Doc values isEmpty = liftM HPJ.isEmpty empty = return HPJ.empty semi = return HPJ.semi comma = return HPJ.comma colon = return HPJ.colon dcolon = return $ HPJ.text "::" space = return HPJ.space equals = return HPJ.equals arrow = return $ HPJ.text "->" lparen = return HPJ.lparen rparen = return HPJ.rparen lbrack = return HPJ.lbrack rbrack = return HPJ.rbrack lbrace = return HPJ.lbrace rbrace = return HPJ.rbrace text = return . HPJ.text ptext = return . HPJ.ptext char = return . HPJ.char int = return . HPJ.int integer = return . HPJ.integer float = return . HPJ.float double = return . HPJ.double rational = return . HPJ.rational parens = liftM HPJ.parens brackets = liftM HPJ.brackets braces = liftM HPJ.braces quotes = liftM HPJ.quotes doubleQuotes = liftM HPJ.doubleQuotes -- Combining @Doc@ values (<>) = liftM2 (HPJ.<>) hcat = liftM HPJ.hcat . sequence (<+>) = liftM2 (HPJ.<+>) hsep = liftM HPJ.hsep . sequence ($$) = liftM2 (HPJ.$$) ($+$) = liftM2 (HPJ.$+$) vcat = liftM HPJ.vcat . sequence cat = liftM HPJ.cat . sequence sep = liftM HPJ.sep . sequence fcat = liftM HPJ.fcat . sequence fsep = liftM HPJ.fsep . sequence nest n = liftM (HPJ.nest n) hang d1 n d2 = do d1' <- d1 d2' <- d2 return (HPJ.hang d1' n d2') -- punctuate uses the same definition as Text.PrettyPrint punctuate _ [] = [] punctuate p (d:ds) = go d ds where go d' [] = [d'] go d' (e:es) = (d' <> p) : go e es ghc-lib-parser-9.12.2.20250421/libraries/ghc-boot-th/GHC/ForeignSrcLang/0000755000000000000000000000000007346545000023026 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/libraries/ghc-boot-th/GHC/ForeignSrcLang/Type.hs0000644000000000000000000000014407346545000024302 0ustar0000000000000000module GHC.ForeignSrcLang.Type ( ForeignSrcLang(..) ) where import GHC.Internal.ForeignSrcLang ghc-lib-parser-9.12.2.20250421/libraries/ghc-boot-th/GHC/LanguageExtensions/0000755000000000000000000000000007346545000023766 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/libraries/ghc-boot-th/GHC/LanguageExtensions/Type.hs0000644000000000000000000000065207346545000025246 0ustar0000000000000000----------------------------------------------------------------------------- -- | -- Module : GHC.LanguageExtensions.Type -- Copyright : (c) The GHC Team -- -- Maintainer : ghc-devs@haskell.org -- Portability : portable -- -- A data type defining the language extensions supported by GHC. -- {-# LANGUAGE Safe #-} module GHC.LanguageExtensions.Type ( Extension(..) ) where import GHC.Internal.LanguageExtensions ghc-lib-parser-9.12.2.20250421/libraries/ghc-boot-th/GHC/0000755000000000000000000000000007346545000020163 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/libraries/ghc-boot-th/GHC/Lexeme.hs0000644000000000000000000000103607346545000021736 0ustar0000000000000000{-# LANGUAGE CPP #-} ----------------------------------------------------------------------------- -- | -- Module : GHC.Lexeme -- Copyright : (c) The GHC Team -- -- Maintainer : ghc-devs@haskell.org -- Portability : portable -- -- Functions to evaluate whether or not a string is a valid identifier. -- module GHC.Lexeme ( -- * Lexical characteristics of Haskell names startsVarSym, startsVarId, startsConSym, startsConId, startsVarSymASCII, isVarSymChar, okSymChar ) where import GHC.Internal.Lexeme ghc-lib-parser-9.12.2.20250421/libraries/ghc-boot-th/0000755000000000000000000000000007346545000017562 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/libraries/ghc-boot-th/ghc-boot-th.cabal0000644000000000000000000000455707346545000022674 0ustar0000000000000000-- WARNING: ghc-boot-th.cabal is automatically generated from -- ghc-boot-th.cabal.in by ../../configure. Make sure you are editing -- ghc-boot-th.cabal.in, not ghc-boot-th.cabal. name: ghc-boot-th version: 9.12.2 license: BSD3 license-file: LICENSE category: GHC maintainer: ghc-devs@haskell.org bug-reports: https://gitlab.haskell.org/ghc/ghc/issues/new synopsis: Shared functionality between GHC and the @template-haskell@ library description: This library contains various bits shared between the @ghc@ and @template-haskell@ libraries. . This package exists to ensure that @template-haskell@ has a minimal set of transitive dependencies, since it is intended to be depended upon by user code. cabal-version: >=1.10 build-type: Simple extra-source-files: changelog.md source-repository head type: git location: https://gitlab.haskell.org/ghc/ghc.git subdir: libraries/ghc-boot-th Flag bootstrap Description: Enabled when building the stage1 compiler in order to vendor the in-tree `ghc-boot-th` library, and through that the in-tree TH AST defintions from `ghc-internal`. See Note [Bootstrapping Template Haskell] Default: False Manual: True Library default-language: Haskell2010 other-extensions: DeriveGeneric default-extensions: NoImplicitPrelude exposed-modules: GHC.ForeignSrcLang.Type GHC.Internal.TH.Lib.Map GHC.Internal.TH.Ppr GHC.Internal.TH.PprLib GHC.LanguageExtensions.Type GHC.Lexeme build-depends: base >= 4.7 && < 4.22, ghc-prim, pretty == 1.1.* if flag(bootstrap) cpp-options: -DBOOTSTRAP_TH hs-source-dirs: . ../ghc-internal/src exposed-modules: GHC.Internal.TH.Lib GHC.Internal.TH.Syntax other-modules: GHC.Internal.ForeignSrcLang GHC.Internal.LanguageExtensions GHC.Internal.Lexeme else hs-source-dirs: . build-depends: ghc-internal reexported-modules: GHC.Internal.TH.Lib, GHC.Internal.TH.Lift, GHC.Internal.TH.Quote, GHC.Internal.TH.Syntax ghc-lib-parser-9.12.2.20250421/libraries/ghc-boot/GHC/0000755000000000000000000000000007346545000017552 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/libraries/ghc-boot/GHC/BaseDir.hs0000644000000000000000000000646107346545000021426 0ustar0000000000000000{-# LANGUAGE CPP #-} -- | Note [Base Dir] -- ~~~~~~~~~~~~~~~~~ -- GHC's base directory or top directory containers miscellaneous settings and -- the package database. The main compiler of course needs this directory to -- read those settings and read and write packages. ghc-pkg uses it to find the -- global package database too. -- -- In the interest of making GHC builds more relocatable, many settings also -- will expand `${top_dir}` inside strings so GHC doesn't need to know it's on -- installation location at build time. ghc-pkg also can expand those variables -- and so needs the top dir location to do that too. module GHC.BaseDir ( expandTopDir , expandPathVar , getBaseDir ) where import Prelude -- See Note [Why do we import Prelude here?] import Data.List (stripPrefix) import Data.Maybe (listToMaybe) import System.FilePath #if MIN_VERSION_base(4,17,0) && !defined(openbsd_HOST_OS) import System.Environment (executablePath) #else import System.Environment (getExecutablePath) #endif -- | Expand occurrences of the @$topdir@ interpolation in a string. expandTopDir :: FilePath -> String -> String expandTopDir = expandPathVar "topdir" -- | @expandPathVar var value str@ -- -- replaces occurrences of variable @$var@ with @value@ in str. expandPathVar :: String -> FilePath -> String -> String expandPathVar var value str | Just str' <- stripPrefix ('$':var) str , maybe True isPathSeparator (listToMaybe str') = value ++ expandPathVar var value str' expandPathVar var value (x:xs) = x : expandPathVar var value xs expandPathVar _ _ [] = [] #if !MIN_VERSION_base(4,17,0) || defined(openbsd_HOST_OS) -- Polyfill for base-4.17 executablePath and OpenBSD which doesn't -- have executablePath. The best it can do is use argv[0] which is -- good enough for most uses of getBaseDir. executablePath :: Maybe (IO (Maybe FilePath)) executablePath = Just (Just <$> getExecutablePath) #elif !MIN_VERSION_base(4,18,0) && defined(js_HOST_ARCH) -- executablePath is missing from base < 4.18.0 on js_HOST_ARCH executablePath :: Maybe (IO (Maybe FilePath)) executablePath = Nothing #endif -- | Calculate the location of the base dir getBaseDir :: IO (Maybe String) #if defined(mingw32_HOST_OS) getBaseDir = maybe (pure Nothing) (((( "lib") . rootDir) <$>) <$>) executablePath where -- locate the "base dir" when given the path -- to the real ghc executable (as opposed to symlink) -- that is running this function. rootDir :: FilePath -> FilePath rootDir = takeDirectory . takeDirectory . normalise #else -- on unix, this is a bit more confusing. -- The layout right now is something like -- -- /bin/ghc-X.Y.Z <- wrapper script (1) -- /bin/ghc <- symlink to wrapper script (2) -- /lib/ghc-X.Y.Z/bin/ghc <- ghc executable (3) -- /lib/ghc-X.Y.Z <- $topdir (4) -- -- As such, we first need to find the absolute location to the -- binary. -- -- executablePath will return (3). One takeDirectory will -- give use /lib/ghc-X.Y.Z/bin, and another will give us (4). -- -- This of course only works due to the current layout. If -- the layout is changed, such that we have ghc-X.Y.Z/{bin,lib} -- this would need to be changed accordingly. -- getBaseDir = maybe (pure Nothing) (((( "lib") . rootDir) <$>) <$>) executablePath where rootDir :: FilePath -> FilePath rootDir = takeDirectory . takeDirectory #endif ghc-lib-parser-9.12.2.20250421/libraries/ghc-boot/GHC/Data/0000755000000000000000000000000007346545000020423 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/libraries/ghc-boot/GHC/Data/ShortText.hs0000644000000000000000000001353607346545000022733 0ustar0000000000000000{-# LANGUAGE BangPatterns, MagicHash, UnboxedTuples, GeneralizedNewtypeDeriving, DerivingStrategies, CPP #-} {-# OPTIONS_GHC -O2 -funbox-strict-fields #-} -- gross hack: we maneuvered ourselves into a position where we can't boot GHC with a LLVM based GHC anymore. -- LLVM based GHC's fail to compile memcmp ffi calls. These end up as memcmp$def in the llvm ir, however we -- don't have any prototypes and subsequently the llvm toolchain chokes on them. Since 7fdcce6d, we use -- ShortText for the package database. This however introduces this very module; which through inlining ends -- up bringing memcmp_ByteArray from bytestring:Data.ByteString.Short.Internal into scope, which results in -- the memcmp call we choke on. -- -- The solution thusly is to force late binding via the linker instead of inlining when comping with the -- bootstrap compiler. This will produce a slower (slightly less optimised) stage1 compiler only. -- -- See issue 18857. hsyl20 deserves credit for coming up with the idea for the solution. -- -- This can be removed when we exit the boot compiler window. Thus once we drop GHC-9.2 as boot compiler, -- we can drop this code as well. #if !MIN_VERSION_GLASGOW_HASKELL(9,3,0,0) {-# OPTIONS_GHC -fignore-interface-pragmas #-} #endif -- | -- An Unicode string for internal GHC use. Meant to replace String -- in places where being a lazy linked is not very useful and a more -- memory efficient data structure is desirable. -- Very similar to FastString, but not hash-consed and with some extra instances and -- functions for serialisation and I/O. Should be imported qualified. -- -- /Note:/ This string is stored in Modified UTF8 format, -- thus it's not byte-compatible with @ShortText@ type in @text-short@ -- package. module GHC.Data.ShortText ( -- * ShortText ShortText(..), -- ** Conversion to and from String singleton, pack, unpack, -- ** Operations codepointLength, byteLength, GHC.Data.ShortText.null, splitFilePath, GHC.Data.ShortText.head, stripPrefix ) where import Prelude import Control.Monad (guard) import Control.DeepSeq as DeepSeq import Data.Binary import qualified Data.ByteString.Char8 as B8 import qualified Data.ByteString.Short.Internal as SBS import GHC.Exts import GHC.IO import GHC.Utils.Encoding import System.FilePath (isPathSeparator) {-| A 'ShortText' is a modified UTF-8 encoded string meant for short strings like file paths, module descriptions, etc. -} newtype ShortText = ShortText { contents :: SBS.ShortByteString } deriving stock (Show) deriving newtype (Eq, Ord, Binary, Semigroup, Monoid, NFData) -- We don't want to derive this one from ShortByteString since that one won't handle -- UTF-8 characters correctly. instance IsString ShortText where fromString = pack -- | /O(n)/ Returns the length of the 'ShortText' in characters. codepointLength :: ShortText -> Int codepointLength st = utf8CountCharsShortByteString (contents st) -- | /O(1)/ Returns the length of the 'ShortText' in bytes. byteLength :: ShortText -> Int byteLength st = SBS.length $ contents st -- | /O(n)/ Convert a 'String' into a 'ShortText'. pack :: String -> ShortText pack s = ShortText $ utf8EncodeShortByteString s -- | Create a singleton singleton :: Char -> ShortText singleton s = pack [s] -- | /O(n)/ Convert a 'ShortText' into a 'String'. unpack :: ShortText -> String unpack st = utf8DecodeShortByteString $ contents st -- | /O(1)/ Test whether the 'ShortText' is the empty string. null :: ShortText -> Bool null st = SBS.null $ contents st -- | /O(n)/ Split a 'ShortText' representing a file path into its components by separating -- on the file separator characters for this platform. splitFilePath :: ShortText -> [ShortText] -- This seems dangerous, but since the path separators are in the ASCII set they map down -- to a single byte when encoded in UTF-8 and so this should work even when casting to ByteString. -- We DeepSeq.force the resulting list so that we can be sure that no references to the -- bytestring in `st'` remain in unevaluated thunks, which might prevent `st'` from being -- collected by the GC. splitFilePath st = DeepSeq.force $ map (ShortText . SBS.toShort) $ B8.splitWith isPathSeparator st' where st' = SBS.fromShort $ contents st -- | /O(1)/ Returns the first UTF-8 codepoint in the 'ShortText'. Depending on the string in -- question, this may or may not be the actual first character in the string due to Unicode -- non-printable characters. head :: ShortText -> Char head st | hd:_ <- unpack st = hd | otherwise = error "head: Empty ShortText" -- | /O(n)/ The 'stripPrefix' function takes two 'ShortText's and returns 'Just' the remainder of -- the second iff the first is its prefix, and otherwise Nothing. stripPrefix :: ShortText -> ShortText -> Maybe ShortText stripPrefix prefix st = do let !(SBS.SBS prefixBA) = contents prefix let !(SBS.SBS stBA) = contents st let prefixLength = sizeofByteArray# prefixBA let stLength = sizeofByteArray# stBA -- If the length of 'st' is not >= than the length of 'prefix', it is impossible for 'prefix' -- to be the prefix of `st`. guard $ (I# stLength) >= (I# prefixLength) -- 'prefix' is a prefix of 'st' if the first bytes of 'st' -- are equal to 'prefix' guard $ I# (compareByteArrays# prefixBA 0# stBA 0# prefixLength) == 0 -- Allocate a new ByteArray# and copy the remainder of the 'st' into it unsafeDupablePerformIO $ do let newBAsize = (stLength -# prefixLength) newSBS <- IO $ \s0 -> let !(# s1, ba #) = newByteArray# newBAsize s0 s2 = copyByteArray# stBA prefixLength ba 0# newBAsize s1 !(# s3, fba #) = unsafeFreezeByteArray# ba s2 in (# s3, SBS.SBS fba #) return . Just . ShortText $ newSBS ghc-lib-parser-9.12.2.20250421/libraries/ghc-boot/GHC/Data/SizedSeq.hs0000644000000000000000000000226107346545000022507 0ustar0000000000000000{-# LANGUAGE StandaloneDeriving, DeriveGeneric #-} module GHC.Data.SizedSeq ( SizedSeq(..) , emptySS , addToSS , addListToSS , ssElts , sizeSS ) where import Prelude -- See note [Why do we import Prelude here?] import Control.DeepSeq import Data.Binary import Data.List (genericLength) import GHC.Generics data SizedSeq a = SizedSeq {-# UNPACK #-} !Word [a] deriving (Generic, Show) instance Functor SizedSeq where fmap f (SizedSeq sz l) = SizedSeq sz (fmap f l) instance Foldable SizedSeq where foldr f c ss = foldr f c (ssElts ss) instance Traversable SizedSeq where traverse f (SizedSeq sz l) = SizedSeq sz . reverse <$> traverse f (reverse l) instance Binary a => Binary (SizedSeq a) instance NFData a => NFData (SizedSeq a) where rnf (SizedSeq _ xs) = rnf xs emptySS :: SizedSeq a emptySS = SizedSeq 0 [] addToSS :: SizedSeq a -> a -> SizedSeq a addToSS (SizedSeq n r_xs) x = SizedSeq (n+1) (x:r_xs) addListToSS :: SizedSeq a -> [a] -> SizedSeq a addListToSS (SizedSeq n r_xs) xs = SizedSeq (n + genericLength xs) (reverse xs ++ r_xs) ssElts :: SizedSeq a -> [a] ssElts (SizedSeq _ r_xs) = reverse r_xs sizeSS :: SizedSeq a -> Word sizeSS (SizedSeq n _) = n ghc-lib-parser-9.12.2.20250421/libraries/ghc-boot/GHC/ForeignSrcLang.hs0000644000000000000000000000041707346545000022753 0ustar0000000000000000{-# OPTIONS_GHC -fno-warn-orphans #-} -- | See @GHC.LanguageExtensions@ for an explanation -- on why this is needed module GHC.ForeignSrcLang ( module GHC.ForeignSrcLang.Type ) where import Data.Binary import GHC.ForeignSrcLang.Type instance Binary ForeignSrcLang ghc-lib-parser-9.12.2.20250421/libraries/ghc-boot/GHC/LanguageExtensions.hs0000644000000000000000000000120407346545000023706 0ustar0000000000000000{-# OPTIONS_GHC -fno-warn-orphans #-} -- | This module re-exports the 'Extension' type along with an orphan 'Binary' -- instance for it. -- -- Note that the @ghc-boot@ package has a large set of dependencies; for this -- reason the 'Extension' type itself is defined in the -- "GHC.LanguageExtensions.Type" module provided by the @ghc-boot-th@ package, -- which has no dependencies outside of @base@. For this reason -- @template-haskell@ depends upon @ghc-boot-th@, not @ghc-boot@. -- module GHC.LanguageExtensions ( module GHC.LanguageExtensions.Type ) where import Data.Binary import GHC.LanguageExtensions.Type instance Binary Extension ghc-lib-parser-9.12.2.20250421/libraries/ghc-boot/GHC/Serialized.hs0000644000000000000000000001477107346545000022213 0ustar0000000000000000{-# LANGUAGE GADTs #-} {-# LANGUAGE RankNTypes, ScopedTypeVariables #-} {-# OPTIONS_GHC -fno-warn-name-shadowing #-} -- -- (c) The University of Glasgow 2002-2006 -- -- Serialized values module GHC.Serialized ( -- * Main Serialized data type Serialized(..), -- * Going into and out of 'Serialized' toSerialized, fromSerialized, -- * Handy serialization functions serializeWithData, deserializeWithData, ) where import Prelude -- See note [Why do we import Prelude here?] import Data.Bits import Data.Word ( Word8 ) import Data.Data -- | Represents a serialized value of a particular type. Attempts can be made to deserialize it at certain types data Serialized = Serialized TypeRep [Word8] -- | Put a Typeable value that we are able to actually turn into bytes into a 'Serialized' value ready for deserialization later toSerialized :: forall a. Typeable a => (a -> [Word8]) -> a -> Serialized toSerialized serialize what = Serialized (typeOf what) (serialize what) -- | If the 'Serialized' value contains something of the given type, then use the specified deserializer to return @Just@ that. -- Otherwise return @Nothing@. fromSerialized :: forall a. Typeable a => ([Word8] -> a) -> Serialized -> Maybe a fromSerialized deserialize (Serialized the_type bytes) | the_type == rep = Just (deserialize bytes) | otherwise = Nothing where rep = typeRep (Proxy :: Proxy a) -- | Use a 'Data' instance to implement a serialization scheme dual to that of 'deserializeWithData' serializeWithData :: Data a => a -> [Word8] serializeWithData what = serializeWithData' what [] serializeWithData' :: Data a => a -> [Word8] -> [Word8] serializeWithData' what = fst $ gfoldl (\(before, a_to_b) a -> (before . serializeWithData' a, a_to_b a)) (\x -> (serializeConstr (constrRep (toConstr what)), x)) what -- | Use a 'Data' instance to implement a deserialization scheme dual to that of 'serializeWithData' deserializeWithData :: Data a => [Word8] -> a deserializeWithData = snd . deserializeWithData' deserializeWithData' :: forall a. Data a => [Word8] -> ([Word8], a) deserializeWithData' bytes = deserializeConstr bytes $ \constr_rep bytes -> gunfold (\(bytes, b_to_r) -> let (bytes', b) = deserializeWithData' bytes in (bytes', b_to_r b)) (\x -> (bytes, x)) (repConstr (dataTypeOf (undefined :: a)) constr_rep) serializeConstr :: ConstrRep -> [Word8] -> [Word8] serializeConstr (AlgConstr ix) = serializeWord8 1 . serializeInt ix serializeConstr (IntConstr i) = serializeWord8 2 . serializeInteger i serializeConstr (FloatConstr r) = serializeWord8 3 . serializeRational r serializeConstr (CharConstr c) = serializeWord8 4 . serializeChar c deserializeConstr :: [Word8] -> (ConstrRep -> [Word8] -> a) -> a deserializeConstr bytes k = deserializeWord8 bytes $ \constr_ix bytes -> case constr_ix of 1 -> deserializeInt bytes $ \ix -> k (AlgConstr ix) 2 -> deserializeInteger bytes $ \i -> k (IntConstr i) 3 -> deserializeRational bytes $ \r -> k (FloatConstr r) 4 -> deserializeChar bytes $ \c -> k (CharConstr c) x -> error $ "deserializeConstr: unrecognised serialized constructor type " ++ show x ++ " in context " ++ show bytes serializeFixedWidthNum :: forall a. (Integral a, FiniteBits a) => a -> [Word8] -> [Word8] serializeFixedWidthNum what = go (finiteBitSize what) what where go :: Int -> a -> [Word8] -> [Word8] go size current rest | size <= 0 = rest | otherwise = fromIntegral (current .&. 255) : go (size - 8) (current `shiftR` 8) rest deserializeFixedWidthNum :: forall a b. (Integral a, FiniteBits a) => [Word8] -> (a -> [Word8] -> b) -> b deserializeFixedWidthNum bytes k = go (finiteBitSize (undefined :: a)) bytes k where go :: Int -> [Word8] -> (a -> [Word8] -> b) -> b go size bytes k | size <= 0 = k 0 bytes | otherwise = case bytes of (byte:bytes) -> go (size - 8) bytes (\x -> k ((x `shiftL` 8) .|. fromIntegral byte)) [] -> error "deserializeFixedWidthNum: unexpected end of stream" serializeEnum :: (Enum a) => a -> [Word8] -> [Word8] serializeEnum = serializeInt . fromEnum deserializeEnum :: Enum a => [Word8] -> (a -> [Word8] -> b) -> b deserializeEnum bytes k = deserializeInt bytes (k . toEnum) serializeWord8 :: Word8 -> [Word8] -> [Word8] serializeWord8 x = (x:) deserializeWord8 :: [Word8] -> (Word8 -> [Word8] -> a) -> a deserializeWord8 (byte:bytes) k = k byte bytes deserializeWord8 [] _ = error "deserializeWord8: unexpected end of stream" serializeInt :: Int -> [Word8] -> [Word8] serializeInt = serializeFixedWidthNum deserializeInt :: [Word8] -> (Int -> [Word8] -> a) -> a deserializeInt = deserializeFixedWidthNum serializeRational :: (Real a) => a -> [Word8] -> [Word8] serializeRational = serializeString . show . toRational deserializeRational :: (Fractional a) => [Word8] -> (a -> [Word8] -> b) -> b deserializeRational bytes k = deserializeString bytes (k . fromRational . read) serializeInteger :: Integer -> [Word8] -> [Word8] serializeInteger = serializeString . show deserializeInteger :: [Word8] -> (Integer -> [Word8] -> a) -> a deserializeInteger bytes k = deserializeString bytes (k . read) serializeChar :: Char -> [Word8] -> [Word8] serializeChar = serializeString . show deserializeChar :: [Word8] -> (Char -> [Word8] -> a) -> a deserializeChar bytes k = deserializeString bytes (k . read) serializeString :: String -> [Word8] -> [Word8] serializeString = serializeList serializeEnum deserializeString :: [Word8] -> (String -> [Word8] -> a) -> a deserializeString = deserializeList deserializeEnum serializeList :: (a -> [Word8] -> [Word8]) -> [a] -> [Word8] -> [Word8] serializeList serialize_element xs = serializeInt (length xs) . foldr (.) id (map serialize_element xs) deserializeList :: forall a b. (forall c. [Word8] -> (a -> [Word8] -> c) -> c) -> [Word8] -> ([a] -> [Word8] -> b) -> b deserializeList deserialize_element bytes k = deserializeInt bytes $ \len bytes -> go len bytes k where go :: Int -> [Word8] -> ([a] -> [Word8] -> b) -> b go len bytes k | len <= 0 = k [] bytes | otherwise = deserialize_element bytes (\elt bytes -> go (len - 1) bytes (k . (elt:))) ghc-lib-parser-9.12.2.20250421/libraries/ghc-boot/GHC/Settings/0000755000000000000000000000000007346545000021352 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/libraries/ghc-boot/GHC/Settings/Utils.hs0000644000000000000000000000536407346545000023016 0ustar0000000000000000module GHC.Settings.Utils where import Prelude -- See Note [Why do we import Prelude here?] import Data.Char (isSpace) import Data.Map (Map) import qualified Data.Map as Map import GHC.BaseDir import GHC.Platform.ArchOS import System.FilePath maybeRead :: Read a => String -> Maybe a maybeRead str = case reads str of [(x, "")] -> Just x _ -> Nothing maybeReadFuzzy :: Read a => String -> Maybe a maybeReadFuzzy str = case reads str of [(x, s)] | all isSpace s -> Just x _ -> Nothing -- Note [Settings file] -- ~~~~~~~~~~~~~~~~~~~~ -- -- GHC has a file, `${top_dir}/settings`, which is the main source of run-time -- configuration. ghc-pkg needs just a little bit of it: the target platform CPU -- arch and OS. It uses that to figure out what subdirectory of `~/.ghc` is -- associated with the current version/target platform. -- -- This module has just enough code to read key value pairs from the settings -- file, and read the target platform from those pairs. type RawSettings = Map String String -- | Read target Arch/OS from the settings getTargetArchOS :: FilePath -- ^ Settings filepath (for error messages) -> RawSettings -- ^ Raw settings file contents -> Either String ArchOS getTargetArchOS settingsFile settings = ArchOS <$> readRawSetting settingsFile settings "target arch" <*> readRawSetting settingsFile settings "target os" getGlobalPackageDb :: FilePath -> RawSettings -> Either String FilePath getGlobalPackageDb settingsFile settings = do rel_db <- getRawSetting settingsFile settings "Relative Global Package DB" return (dropFileName settingsFile rel_db) getRawSetting :: FilePath -> RawSettings -> String -> Either String String getRawSetting settingsFile settings key = case Map.lookup key settings of Just xs -> Right xs Nothing -> Left $ "No entry for " ++ show key ++ " in " ++ show settingsFile getRawFilePathSetting :: FilePath -> FilePath -> RawSettings -> String -> Either String String getRawFilePathSetting top_dir settingsFile settings key = expandTopDir top_dir <$> getRawSetting settingsFile settings key getRawBooleanSetting :: FilePath -> RawSettings -> String -> Either String Bool getRawBooleanSetting settingsFile settings key = do rawValue <- getRawSetting settingsFile settings key case rawValue of "YES" -> Right True "NO" -> Right False xs -> Left $ "Bad value for " ++ show key ++ ": " ++ show xs readRawSetting :: (Show a, Read a) => FilePath -> RawSettings -> String -> Either String a readRawSetting settingsFile settings key = case Map.lookup key settings of Just xs -> case maybeRead xs of Just v -> Right v Nothing -> Left $ "Failed to read " ++ show key ++ " value " ++ show xs Nothing -> Left $ "No entry for " ++ show key ++ " in " ++ show settingsFile ghc-lib-parser-9.12.2.20250421/libraries/ghc-boot/GHC/UniqueSubdir.hs0000644000000000000000000000125007346545000022523 0ustar0000000000000000module GHC.UniqueSubdir ( uniqueSubdir ) where import Prelude -- See Note [Why do we import Prelude here?] import Data.List (intercalate) import GHC.Platform.ArchOS import GHC.Version (cProjectVersion) -- | A filepath like @x86_64-linux-7.6.3@ with the platform string to use when -- constructing platform-version-dependent files that need to co-exist. uniqueSubdir :: ArchOS -> FilePath uniqueSubdir (ArchOS arch os) = intercalate "-" [ stringEncodeArch arch , stringEncodeOS os , cProjectVersion ] -- NB: This functionality is reimplemented in Cabal, so if you -- change it, be sure to update Cabal. -- TODO make Cabal use this now that it is in ghc-boot. ghc-lib-parser-9.12.2.20250421/libraries/ghc-boot/GHC/Unit/0000755000000000000000000000000007346545000020471 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/libraries/ghc-boot/GHC/Unit/Database.hs0000644000000000000000000006502407346545000022540 0ustar0000000000000000{-# OPTIONS_GHC -fno-warn-name-shadowing #-} {-# LANGUAGE CPP #-} {-# LANGUAGE DataKinds #-} {-# LANGUAGE DeriveTraversable #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE GADTs #-} {-# LANGUAGE KindSignatures #-} {-# LANGUAGE StandaloneDeriving #-} {-# LANGUAGE TupleSections #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE TypeSynonymInstances #-} {-# LANGUAGE ExplicitNamespaces #-} {-# LANGUAGE RecordWildCards #-} {-# LANGUAGE OverloadedStrings #-} ----------------------------------------------------------------------------- -- | -- Module : GHC.Unit.Database -- Copyright : (c) The University of Glasgow 2009, Duncan Coutts 2014 -- -- Maintainer : ghc-devs@haskell.org -- Portability : portable -- -- This module provides the view of GHC's database of registered packages that -- is shared between GHC the compiler\/library, and the ghc-pkg program. It -- defines the database format that is shared between GHC and ghc-pkg. -- -- The database format, and this library are constructed so that GHC does not -- have to depend on the Cabal library. The ghc-pkg program acts as the -- gateway between the external package format (which is defined by Cabal) and -- the internal package format which is specialised just for GHC. -- -- GHC the compiler only needs some of the information which is kept about -- registered packages, such as module names, various paths etc. On the other -- hand ghc-pkg has to keep all the information from Cabal packages and be able -- to regurgitate it for users and other tools. -- -- The first trick is that we duplicate some of the information in the package -- database. We essentially keep two versions of the database in one file, one -- version used only by ghc-pkg which keeps the full information (using the -- serialised form of the 'InstalledPackageInfo' type defined by the Cabal -- library); and a second version written by ghc-pkg and read by GHC which has -- just the subset of information that GHC needs. -- -- The second trick is that this module only defines in detail the format of -- the second version -- the bit GHC uses -- and the part managed by ghc-pkg -- is kept in the file but here we treat it as an opaque blob of data. That way -- this library avoids depending on Cabal. -- module GHC.Unit.Database ( GenericUnitInfo(..) , type DbUnitInfo , DbModule (..) , DbInstUnitId (..) , mapGenericUnitInfo -- * Read and write , DbMode(..) , DbOpenMode(..) , isDbOpenReadMode , readPackageDbForGhc , readPackageDbForGhcPkg , writePackageDb -- * Locking , PackageDbLock , lockPackageDb , unlockPackageDb -- * Misc , mkMungePathUrl , mungeUnitInfoPaths ) where import Prelude -- See note [Why do we import Prelude here?] import Data.Version (Version(..)) import qualified Data.ByteString as BS import qualified Data.ByteString.Char8 as BS.Char8 import qualified Data.ByteString.Lazy as BS.Lazy import qualified Data.ByteString.Lazy.Internal as BS.Lazy (defaultChunkSize) import qualified Data.Foldable as F import qualified Data.Traversable as F import Data.Bifunctor import Data.Binary as Bin import Data.Binary.Put as Bin import Data.Binary.Get as Bin import Data.List (intersperse) import Control.Exception as Exception import Control.Monad (when) import System.FilePath as FilePath #if !defined(mingw32_HOST_OS) import Data.Bits ((.|.)) import System.Posix.Files import System.Posix.Types (FileMode) #endif import System.IO import System.IO.Error import GHC.IO.Exception (IOErrorType(InappropriateType)) import qualified GHC.Data.ShortText as ST import GHC.IO.Handle.Lock import System.Directory -- | @ghc-boot@'s UnitInfo, serialized to the database. type DbUnitInfo = GenericUnitInfo BS.ByteString BS.ByteString BS.ByteString BS.ByteString DbModule -- | Information about an unit (a unit is an installed module library). -- -- This is a subset of Cabal's 'InstalledPackageInfo', with just the bits -- that GHC is interested in. -- -- Some types are left as parameters to be instantiated differently in ghc-pkg -- and in ghc itself. -- data GenericUnitInfo srcpkgid srcpkgname uid modulename mod = GenericUnitInfo { unitId :: uid -- ^ Unique unit identifier that is used during compilation (e.g. to -- generate symbols). , unitInstanceOf :: uid -- ^ Identifier of an indefinite unit (i.e. with module holes) that this -- unit is an instance of. -- -- For non instantiated units, unitInstanceOf=unitId , unitInstantiations :: [(modulename, mod)] -- ^ How this unit instantiates some of its module holes. Map hole module -- names to actual module , unitPackageId :: srcpkgid -- ^ Source package identifier. -- -- Cabal instantiates this with Distribution.Types.PackageId.PackageId -- type which only contains the source package name and version. Notice -- that it doesn't contain the Hackage revision, nor any kind of hash. , unitPackageName :: srcpkgname -- ^ Source package name , unitPackageVersion :: Version -- ^ Source package version , unitComponentName :: Maybe srcpkgname -- ^ Name of the component. -- -- Cabal supports more than one components (libraries, executables, -- testsuites) in the same package. Each component has a name except the -- default one (that can only be a library component) for which we use -- "Nothing". -- -- GHC only deals with "library" components as they are the only kind of -- components that can be registered in a database and used by other -- modules. , unitAbiHash :: ST.ShortText -- ^ ABI hash used to avoid mixing up units compiled with different -- dependencies, compiler, options, etc. , unitDepends :: [uid] -- ^ Identifiers of the units this one depends on , unitAbiDepends :: [(uid, ST.ShortText)] -- ^ Like 'unitDepends', but each dependency is annotated with the ABI hash -- we expect the dependency to respect. , unitImportDirs :: [FilePathST] -- ^ Directories containing module interfaces , unitLibraries :: [ST.ShortText] -- ^ Names of the Haskell libraries provided by this unit , unitExtDepLibsSys :: [ST.ShortText] -- ^ Names of the external system libraries that this unit depends on. See -- also `unitExtDepLibsGhc` field. , unitExtDepLibsGhc :: [ST.ShortText] -- ^ Because of slight differences between the GHC dynamic linker (in -- GHC.Runtime.Linker) and the -- native system linker, some packages have to link with a different list -- of libraries when using GHC's. Examples include: libs that are actually -- gnu ld scripts, and the possibility that the .a libs do not exactly -- match the .so/.dll equivalents. -- -- If this field is set, then we use that instead of the -- `unitExtDepLibsSys` field. , unitLibraryDirs :: [FilePathST] -- ^ Directories containing libraries provided by this unit. See also -- `unitLibraryDynDirs`. -- -- It seems to be used to store paths to external library dependencies -- too. , unitLibraryDynDirs :: [FilePathST] -- ^ Directories containing the dynamic libraries provided by this unit. -- See also `unitLibraryDirs`. -- -- It seems to be used to store paths to external dynamic library -- dependencies too. , unitExtDepFrameworks :: [ST.ShortText] -- ^ Names of the external MacOS frameworks that this unit depends on. , unitExtDepFrameworkDirs :: [FilePathST] -- ^ Directories containing MacOS frameworks that this unit depends -- on. , unitLinkerOptions :: [ST.ShortText] -- ^ Linker (e.g. ld) command line options , unitCcOptions :: [ST.ShortText] -- ^ C compiler options that needs to be passed to the C compiler when we -- compile some C code against this unit. , unitIncludes :: [ST.ShortText] -- ^ C header files that are required by this unit (provided by this unit -- or external) , unitIncludeDirs :: [FilePathST] -- ^ Directories containing C header files that this unit depends -- on. , unitHaddockInterfaces :: [FilePathST] -- ^ Paths to Haddock interface files for this unit , unitHaddockHTMLs :: [FilePathST] -- ^ Paths to Haddock directories containing HTML files , unitExposedModules :: [(modulename, Maybe mod)] -- ^ Modules exposed by the unit. -- -- A module can be re-exported from another package. In this case, we -- indicate the module origin in the second parameter. , unitHiddenModules :: [modulename] -- ^ Hidden modules. -- -- These are useful for error reporting (e.g. if a hidden module is -- imported) , unitIsIndefinite :: Bool -- ^ True if this unit has some module holes that need to be instantiated -- with real modules to make the unit usable (a.k.a. Backpack). , unitIsExposed :: Bool -- ^ True if the unit is exposed. A unit could be installed in a database -- by "disabled" by not being exposed. , unitIsTrusted :: Bool -- ^ True if the unit is trusted (cf Safe Haskell) } deriving (Eq, Show) type FilePathST = ST.ShortText -- | Convert between GenericUnitInfo instances mapGenericUnitInfo :: (uid1 -> uid2) -> (srcpkg1 -> srcpkg2) -> (srcpkgname1 -> srcpkgname2) -> (modname1 -> modname2) -> (mod1 -> mod2) -> (GenericUnitInfo srcpkg1 srcpkgname1 uid1 modname1 mod1 -> GenericUnitInfo srcpkg2 srcpkgname2 uid2 modname2 mod2) mapGenericUnitInfo fuid fsrcpkg fsrcpkgname fmodname fmod g@(GenericUnitInfo {..}) = g { unitId = fuid unitId , unitInstanceOf = fuid unitInstanceOf , unitInstantiations = fmap (bimap fmodname fmod) unitInstantiations , unitPackageId = fsrcpkg unitPackageId , unitPackageName = fsrcpkgname unitPackageName , unitComponentName = fmap fsrcpkgname unitComponentName , unitDepends = fmap fuid unitDepends , unitAbiDepends = fmap (first fuid) unitAbiDepends , unitExposedModules = fmap (bimap fmodname (fmap fmod)) unitExposedModules , unitHiddenModules = fmap fmodname unitHiddenModules } -- | @ghc-boot@'s 'Module', serialized to the database. data DbModule = DbModule { dbModuleUnitId :: DbInstUnitId , dbModuleName :: BS.ByteString } | DbModuleVar { dbModuleVarName :: BS.ByteString } deriving (Eq, Show) -- | @ghc-boot@'s instantiated unit id, serialized to the database. data DbInstUnitId -- | Instantiated unit = DbInstUnitId BS.ByteString -- component id [(BS.ByteString, DbModule)] -- instantiations: [(modulename,module)] -- | Uninstantiated unit | DbUnitId BS.ByteString -- unit id deriving (Eq, Show) -- | Represents a lock of a package db. newtype PackageDbLock = PackageDbLock Handle -- | Acquire an exclusive lock related to package DB under given location. lockPackageDb :: FilePath -> IO PackageDbLock -- | Release the lock related to package DB. unlockPackageDb :: PackageDbLock -> IO () -- | Acquire a lock of given type related to package DB under given location. lockPackageDbWith :: LockMode -> FilePath -> IO PackageDbLock lockPackageDbWith mode file = do -- We are trying to open the lock file and then lock it. Thus the lock file -- needs to either exist or we need to be able to create it. Ideally we -- would not assume that the lock file always exists in advance. When we are -- dealing with a package DB where we have write access then if the lock -- file does not exist then we can create it by opening the file in -- read/write mode. On the other hand if we are dealing with a package DB -- where we do not have write access (e.g. a global DB) then we can only -- open in read mode, and the lock file had better exist already or we're in -- trouble. So for global read-only DBs on platforms where we must lock the -- DB for reading then we will require that the installer/packaging has -- included the lock file. -- -- Thus the logic here is to first try opening in read-write mode -- and if that fails we try read-only (to handle global read-only DBs). -- If either succeed then lock the file. IO exceptions (other than the first -- open attempt failing due to the file not existing) simply propagate. -- -- Note that there is a complexity here which was discovered in #13945: some -- filesystems (e.g. NFS) will only allow exclusive locking if the fd was -- opened for write access. We would previously try opening the lockfile for -- read-only access first, however this failed when run on such filesystems. -- Consequently, we now try read-write access first, falling back to read-only -- if we are denied permission (e.g. in the case of a global database). catchJust (\e -> if isPermissionError e then Just () else Nothing) (lockFileOpenIn ReadWriteMode) (const $ lockFileOpenIn ReadMode) where lock = file <.> "lock" lockFileOpenIn io_mode = bracketOnError (openBinaryFile lock io_mode) hClose -- If file locking support is not available, ignore the error and proceed -- normally. Without it the only thing we lose on non-Windows platforms is -- the ability to safely issue concurrent updates to the same package db. $ \hnd -> do hLock hnd mode `catch` \FileLockingNotSupported -> return () return $ PackageDbLock hnd lockPackageDb = lockPackageDbWith ExclusiveLock unlockPackageDb (PackageDbLock hnd) = do hUnlock hnd hClose hnd -- | Mode to open a package db in. data DbMode = DbReadOnly | DbReadWrite -- | 'DbOpenMode' holds a value of type @t@ but only in 'DbReadWrite' mode. So -- it is like 'Maybe' but with a type argument for the mode to enforce that the -- mode is used consistently. data DbOpenMode (mode :: DbMode) t where DbOpenReadOnly :: DbOpenMode 'DbReadOnly t DbOpenReadWrite :: t -> DbOpenMode 'DbReadWrite t deriving instance Functor (DbOpenMode mode) deriving instance F.Foldable (DbOpenMode mode) deriving instance F.Traversable (DbOpenMode mode) isDbOpenReadMode :: DbOpenMode mode t -> Bool isDbOpenReadMode = \case DbOpenReadOnly -> True DbOpenReadWrite{} -> False -- | Read the part of the package DB that GHC is interested in. -- readPackageDbForGhc :: FilePath -> IO [DbUnitInfo] readPackageDbForGhc file = decodeFromFile file DbOpenReadOnly getDbForGhc >>= \case (pkgs, DbOpenReadOnly) -> return pkgs where getDbForGhc = do _version <- getHeader _ghcPartLen <- get :: Get Word32 ghcPart <- get -- the next part is for ghc-pkg, but we stop here. return ghcPart -- | Read the part of the package DB that ghc-pkg is interested in -- -- Note that the Binary instance for ghc-pkg's representation of packages -- is not defined in this package. This is because ghc-pkg uses Cabal types -- (and Binary instances for these) which this package does not depend on. -- -- If we open the package db in read only mode, we get its contents. Otherwise -- we additionally receive a PackageDbLock that represents a lock on the -- database, so that we can safely update it later. -- readPackageDbForGhcPkg :: Binary pkgs => FilePath -> DbOpenMode mode t -> IO (pkgs, DbOpenMode mode PackageDbLock) readPackageDbForGhcPkg file mode = decodeFromFile file mode getDbForGhcPkg where getDbForGhcPkg = do _version <- getHeader -- skip over the ghc part ghcPartLen <- get :: Get Word32 _ghcPart <- skip (fromIntegral ghcPartLen) -- the next part is for ghc-pkg ghcPkgPart <- get return ghcPkgPart -- | Write the whole of the package DB, both parts. -- writePackageDb :: Binary pkgs => FilePath -> [DbUnitInfo] -> pkgs -> IO () writePackageDb file ghcPkgs ghcPkgPart = do writeFileAtomic file (runPut putDbForGhcPkg) #if !defined(mingw32_HOST_OS) addFileMode file 0o444 -- ^ In case the current umask is too restrictive force all read bits to -- allow access. #endif return () where putDbForGhcPkg = do putHeader put ghcPartLen putLazyByteString ghcPart put ghcPkgPart where ghcPartLen :: Word32 ghcPartLen = fromIntegral (BS.Lazy.length ghcPart) ghcPart = encode ghcPkgs #if !defined(mingw32_HOST_OS) addFileMode :: FilePath -> FileMode -> IO () addFileMode file m = do o <- fileMode <$> getFileStatus file setFileMode file (m .|. o) #endif getHeader :: Get (Word32, Word32) getHeader = do magic <- getByteString (BS.length headerMagic) when (magic /= headerMagic) $ fail "not a ghc-pkg db file, wrong file magic number" majorVersion <- get :: Get Word32 -- The major version is for incompatible changes minorVersion <- get :: Get Word32 -- The minor version is for compatible extensions when (majorVersion /= 1) $ fail "unsupported ghc-pkg db format version" -- If we ever support multiple major versions then we'll have to change -- this code -- The header can be extended without incrementing the major version, -- we ignore fields we don't know about (currently all). headerExtraLen <- get :: Get Word32 skip (fromIntegral headerExtraLen) return (majorVersion, minorVersion) putHeader :: Put putHeader = do putByteString headerMagic put majorVersion put minorVersion put headerExtraLen where majorVersion = 1 :: Word32 minorVersion = 0 :: Word32 headerExtraLen = 0 :: Word32 headerMagic :: BS.ByteString headerMagic = BS.Char8.pack "\0ghcpkg\0" -- TODO: we may be able to replace the following with utils from the binary -- package in future. -- | Feed a 'Get' decoder with data chunks from a file. -- decodeFromFile :: FilePath -> DbOpenMode mode t -> Get pkgs -> IO (pkgs, DbOpenMode mode PackageDbLock) decodeFromFile file mode decoder = case mode of DbOpenReadOnly -> do -- Note [Locking package database on Windows] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- When we open the package db in read only mode, there is no need to acquire -- shared lock on non-Windows platform because we update the database with an -- atomic rename, so readers will always see the database in a consistent -- state. #if defined(mingw32_HOST_OS) bracket (lockPackageDbWith SharedLock file) unlockPackageDb $ \_ -> do #endif (, DbOpenReadOnly) <$> decodeFileContents DbOpenReadWrite{} -> do -- When we open the package db in read/write mode, acquire an exclusive lock -- on the database and return it so we can keep it for the duration of the -- update. bracketOnError (lockPackageDb file) unlockPackageDb $ \lock -> do (, DbOpenReadWrite lock) <$> decodeFileContents where decodeFileContents = withBinaryFile file ReadMode $ \hnd -> feed hnd (runGetIncremental decoder) feed hnd (Partial k) = do chunk <- BS.hGet hnd BS.Lazy.defaultChunkSize if BS.null chunk then feed hnd (k Nothing) else feed hnd (k (Just chunk)) feed _ (Done _ _ res) = return res feed _ (Fail _ _ msg) = ioError err where err = mkIOError InappropriateType loc Nothing (Just file) `ioeSetErrorString` msg loc = "GHC.Unit.Database.readPackageDb" -- Copied from Cabal's Distribution.Simple.Utils. writeFileAtomic :: FilePath -> BS.Lazy.ByteString -> IO () writeFileAtomic targetPath content = do let (targetDir, targetFile) = splitFileName targetPath Exception.bracketOnError (openBinaryTempFileWithDefaultPermissions targetDir $ targetFile <.> "tmp") (\(tmpPath, handle) -> hClose handle >> removeFile tmpPath) (\(tmpPath, handle) -> do BS.Lazy.hPut handle content hClose handle renameFile tmpPath targetPath) instance Binary DbUnitInfo where put (GenericUnitInfo unitId unitInstanceOf unitInstantiations unitPackageId unitPackageName unitPackageVersion unitComponentName unitAbiHash unitDepends unitAbiDepends unitImportDirs unitLibraries unitExtDepLibsSys unitExtDepLibsGhc unitLibraryDirs unitLibraryDynDirs unitExtDepFrameworks unitExtDepFrameworkDirs unitLinkerOptions unitCcOptions unitIncludes unitIncludeDirs unitHaddockInterfaces unitHaddockHTMLs unitExposedModules unitHiddenModules unitIsIndefinite unitIsExposed unitIsTrusted) = do put unitPackageId put unitPackageName put unitPackageVersion put unitComponentName put unitId put unitInstanceOf put unitInstantiations put unitAbiHash put unitDepends put unitAbiDepends put unitImportDirs put unitLibraries put unitExtDepLibsSys put unitExtDepLibsGhc put unitLibraryDirs put unitLibraryDynDirs put unitExtDepFrameworks put unitExtDepFrameworkDirs put unitLinkerOptions put unitCcOptions put unitIncludes put unitIncludeDirs put unitHaddockInterfaces put unitHaddockHTMLs put unitExposedModules put unitHiddenModules put unitIsIndefinite put unitIsExposed put unitIsTrusted get = do unitPackageId <- get unitPackageName <- get unitPackageVersion <- get unitComponentName <- get unitId <- get unitInstanceOf <- get unitInstantiations <- get unitAbiHash <- get unitDepends <- get unitAbiDepends <- get unitImportDirs <- get unitLibraries <- get unitExtDepLibsSys <- get unitExtDepLibsGhc <- get libraryDirs <- get libraryDynDirs <- get frameworks <- get frameworkDirs <- get unitLinkerOptions <- get unitCcOptions <- get unitIncludes <- get unitIncludeDirs <- get unitHaddockInterfaces <- get unitHaddockHTMLs <- get unitExposedModules <- get unitHiddenModules <- get unitIsIndefinite <- get unitIsExposed <- get unitIsTrusted <- get return (GenericUnitInfo unitId unitInstanceOf unitInstantiations unitPackageId unitPackageName unitPackageVersion unitComponentName unitAbiHash unitDepends unitAbiDepends unitImportDirs unitLibraries unitExtDepLibsSys unitExtDepLibsGhc libraryDirs libraryDynDirs frameworks frameworkDirs unitLinkerOptions unitCcOptions unitIncludes unitIncludeDirs unitHaddockInterfaces unitHaddockHTMLs unitExposedModules unitHiddenModules unitIsIndefinite unitIsExposed unitIsTrusted) instance Binary DbModule where put (DbModule dbModuleUnitId dbModuleName) = do putWord8 0 put dbModuleUnitId put dbModuleName put (DbModuleVar dbModuleVarName) = do putWord8 1 put dbModuleVarName get = do b <- getWord8 case b of 0 -> DbModule <$> get <*> get _ -> DbModuleVar <$> get instance Binary DbInstUnitId where put (DbUnitId uid) = do putWord8 0 put uid put (DbInstUnitId dbUnitIdComponentId dbUnitIdInsts) = do putWord8 1 put dbUnitIdComponentId put dbUnitIdInsts get = do b <- getWord8 case b of 0 -> DbUnitId <$> get _ -> DbInstUnitId <$> get <*> get -- | Return functions to perform path/URL variable substitution as per the Cabal -- ${pkgroot} spec -- (http://www.haskell.org/pipermail/libraries/2009-May/011772.html) -- -- Paths/URLs can be relative to ${pkgroot} or ${pkgrooturl}. -- The "pkgroot" is the directory containing the package database. -- -- Also perform a similar substitution for the older GHC-specific -- "$topdir" variable. The "topdir" is the location of the ghc -- installation (obtained from the -B option). mkMungePathUrl :: FilePathST -> FilePathST -> (FilePathST -> FilePathST, FilePathST -> FilePathST) mkMungePathUrl top_dir pkgroot = (munge_path, munge_url) where munge_path p | Just p' <- stripVarPrefix "${pkgroot}" p = mappend pkgroot p' | Just p' <- stripVarPrefix "$topdir" p = mappend top_dir p' | otherwise = p munge_url p | Just p' <- stripVarPrefix "${pkgrooturl}" p = toUrlPath pkgroot p' | Just p' <- stripVarPrefix "$httptopdir" p = toUrlPath top_dir p' | otherwise = p toUrlPath r p = mconcat $ "file:///" : (intersperse "/" (r : (splitDirectories p))) -- URLs always use posix style '/' separators -- We need to drop a leading "/" or "\\" if there is one: splitDirectories :: FilePathST -> [FilePathST] splitDirectories p = filter (not . ST.null) $ ST.splitFilePath p -- We could drop the separator here, and then use above. However, -- by leaving it in and using ++ we keep the same path separator -- rather than letting FilePath change it to use \ as the separator stripVarPrefix var path = case ST.stripPrefix var path of Just "" -> Just "" Just cs | isPathSeparator (ST.head cs) -> Just cs _ -> Nothing -- | Perform path/URL variable substitution as per the Cabal ${pkgroot} spec -- (http://www.haskell.org/pipermail/libraries/2009-May/011772.html) -- Paths/URLs can be relative to ${pkgroot} or ${pkgrooturl}. -- The "pkgroot" is the directory containing the package database. -- -- Also perform a similar substitution for the older GHC-specific -- "$topdir" variable. The "topdir" is the location of the ghc -- installation (obtained from the -B option). mungeUnitInfoPaths :: FilePathST -> FilePathST -> GenericUnitInfo a b c d e -> GenericUnitInfo a b c d e mungeUnitInfoPaths top_dir pkgroot pkg = -- TODO: similar code is duplicated in utils/ghc-pkg/Main.hs pkg { unitImportDirs = munge_paths (unitImportDirs pkg) , unitIncludeDirs = munge_paths (unitIncludeDirs pkg) , unitLibraryDirs = munge_paths (unitLibraryDirs pkg) , unitLibraryDynDirs = munge_paths (unitLibraryDynDirs pkg) , unitExtDepFrameworkDirs = munge_paths (unitExtDepFrameworkDirs pkg) , unitHaddockInterfaces = munge_paths (unitHaddockInterfaces pkg) -- haddock-html is allowed to be either a URL or a file , unitHaddockHTMLs = munge_paths (munge_urls (unitHaddockHTMLs pkg)) } where munge_paths = map munge_path munge_urls = map munge_url (munge_path,munge_url) = mkMungePathUrl top_dir pkgroot ghc-lib-parser-9.12.2.20250421/libraries/ghc-boot/GHC/Utils/0000755000000000000000000000000007346545000020652 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/libraries/ghc-boot/GHC/Utils/Encoding.hs0000644000000000000000000002212007346545000022731 0ustar0000000000000000{-# LANGUAGE CPP #-} {-# LANGUAGE BangPatterns, MagicHash, UnboxedTuples, MultiWayIf #-} {-# OPTIONS_GHC -O2 -fno-warn-name-shadowing #-} -- We always optimise this, otherwise performance of a non-optimised -- compiler is severely affected. This module used to live in the `ghc` -- package but has been moved to `ghc-boot` because the definition -- of the package database (needed in both ghc and in ghc-pkg) lives in -- `ghc-boot` and uses ShortText, which in turn depends on this module. -- ----------------------------------------------------------------------------- -- -- (c) The University of Glasgow, 1997-2006 -- -- Character encodings -- -- ----------------------------------------------------------------------------- module GHC.Utils.Encoding ( -- * UTF-8 module GHC.Utils.Encoding.UTF8, -- * Z-encoding UserString, EncodedString, zEncodeString, zDecodeString, -- * Base62-encoding toBase62, toBase62Padded ) where import Prelude import Foreign import Data.Char import qualified Data.Char as Char import Numeric import GHC.Utils.Encoding.UTF8 -- ----------------------------------------------------------------------------- -- Note [Z-Encoding] -- ~~~~~~~~~~~~~~~~~ {- This is the main name-encoding and decoding function. It encodes any string into a string that is acceptable as a C name. This is done right before we emit a symbol name into the compiled C or asm code. Z-encoding of strings is cached in the FastString interface, so we never encode the same string more than once. The basic encoding scheme is this. * Tuples (,,,) are coded as Z3T * Alphabetic characters (upper and lower) and digits all translate to themselves; except 'Z', which translates to 'ZZ' and 'z', which translates to 'zz' We need both so that we can preserve the variable/tycon distinction * Most other printable characters translate to 'zx' or 'Zx' for some alphabetic character x * The others translate as 'znnnU' where 'nnn' is the decimal number of the character Before After -------------------------- Trak Trak foo_wib foozuwib > zg >1 zg1 foo# foozh foo## foozhzh foo##1 foozhzh1 fooZ fooZZ :+ ZCzp () Z0T 0-tuple (,,,,) Z5T 5-tuple (##) Z0H unboxed 0-tuple (#,,,,#) Z5H unboxed 5-tuple -} type UserString = String -- As the user typed it type EncodedString = String -- Encoded form zEncodeString :: UserString -> EncodedString zEncodeString cs = case maybe_tuple cs of Just n -> n -- Tuples go to Z2T etc Nothing -> go cs where go [] = [] go (c:cs) = encode_digit_ch c ++ go' cs go' [] = [] go' (c:cs) = encode_ch c ++ go' cs unencodedChar :: Char -> Bool -- True for chars that don't need encoding unencodedChar 'Z' = False unencodedChar 'z' = False unencodedChar c = c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' || c >= '0' && c <= '9' -- If a digit is at the start of a symbol then we need to encode it. -- Otherwise package names like 9pH-0.1 give linker errors. encode_digit_ch :: Char -> EncodedString encode_digit_ch c | c >= '0' && c <= '9' = encode_as_unicode_char c encode_digit_ch c | otherwise = encode_ch c encode_ch :: Char -> EncodedString encode_ch c | unencodedChar c = [c] -- Common case first -- Constructors encode_ch '(' = "ZL" -- Needed for things like (,), and (->) encode_ch ')' = "ZR" -- For symmetry with ( encode_ch '[' = "ZM" encode_ch ']' = "ZN" encode_ch ':' = "ZC" encode_ch 'Z' = "ZZ" -- Variables encode_ch 'z' = "zz" encode_ch '&' = "za" encode_ch '|' = "zb" encode_ch '^' = "zc" encode_ch '$' = "zd" encode_ch '=' = "ze" encode_ch '>' = "zg" encode_ch '#' = "zh" encode_ch '.' = "zi" encode_ch '<' = "zl" encode_ch '-' = "zm" encode_ch '!' = "zn" encode_ch '+' = "zp" encode_ch '\'' = "zq" encode_ch '\\' = "zr" encode_ch '/' = "zs" encode_ch '*' = "zt" encode_ch '_' = "zu" encode_ch '%' = "zv" encode_ch c = encode_as_unicode_char c encode_as_unicode_char :: Char -> EncodedString encode_as_unicode_char c = 'z' : case hex_str of hd : _ | isDigit hd -> hex_str _ -> '0' : hex_str where hex_str = showHex (ord c) "U" -- ToDo: we could improve the encoding here in various ways. -- eg. strings of unicode characters come out as 'z1234Uz5678U', we -- could remove the 'U' in the middle (the 'z' works as a separator). zDecodeString :: EncodedString -> UserString zDecodeString [] = [] zDecodeString ('Z' : d : rest) | isDigit d = decode_tuple d rest | otherwise = decode_upper d : zDecodeString rest zDecodeString ('z' : d : rest) | isDigit d = decode_num_esc d rest | otherwise = decode_lower d : zDecodeString rest zDecodeString (c : rest) = c : zDecodeString rest decode_upper, decode_lower :: Char -> Char decode_upper 'L' = '(' decode_upper 'R' = ')' decode_upper 'M' = '[' decode_upper 'N' = ']' decode_upper 'C' = ':' decode_upper 'Z' = 'Z' decode_upper ch = {-pprTrace "decode_upper" (char ch)-} ch decode_lower 'z' = 'z' decode_lower 'a' = '&' decode_lower 'b' = '|' decode_lower 'c' = '^' decode_lower 'd' = '$' decode_lower 'e' = '=' decode_lower 'g' = '>' decode_lower 'h' = '#' decode_lower 'i' = '.' decode_lower 'l' = '<' decode_lower 'm' = '-' decode_lower 'n' = '!' decode_lower 'p' = '+' decode_lower 'q' = '\'' decode_lower 'r' = '\\' decode_lower 's' = '/' decode_lower 't' = '*' decode_lower 'u' = '_' decode_lower 'v' = '%' decode_lower ch = {-pprTrace "decode_lower" (char ch)-} ch -- Characters not having a specific code are coded as z224U (in hex) decode_num_esc :: Char -> EncodedString -> UserString decode_num_esc d rest = go (digitToInt d) rest where go n (c : rest) | isHexDigit c = go (16*n + digitToInt c) rest go n ('U' : rest) = chr n : zDecodeString rest go n other = error ("decode_num_esc: " ++ show n ++ ' ':other) decode_tuple :: Char -> EncodedString -> UserString decode_tuple d rest = go (digitToInt d) rest where -- NB. recurse back to zDecodeString after decoding the tuple, because -- the tuple might be embedded in a longer name. go n (c : rest) | isDigit c = go (10*n + digitToInt c) rest go 0 ('T':rest) = "()" ++ zDecodeString rest go n ('T':rest) = '(' : replicate (n-1) ',' ++ ")" ++ zDecodeString rest go n ('H':rest) = '(' : '#' : replicate (n-1) ',' ++ "#)" ++ zDecodeString rest go n other = error ("decode_tuple: " ++ show n ++ ' ':other) {- Tuples are encoded as Z3T or Z3H for 3-tuples or unboxed 3-tuples respectively. No other encoding starts Z * "(##)" is the tycon for an unboxed 0-tuple * "()" is the tycon for a boxed 0-tuple -} maybe_tuple :: UserString -> Maybe EncodedString maybe_tuple "(##)" = Just("Z0H") maybe_tuple ('(' : '#' : cs) = case count_commas (0::Int) cs of (n, '#' : ')' : _) -> Just ('Z' : shows (n+1) "H") _ -> Nothing maybe_tuple "()" = Just("Z0T") maybe_tuple ('(' : cs) = case count_commas (0::Int) cs of (n, ')' : _) -> Just ('Z' : shows (n+1) "T") _ -> Nothing maybe_tuple _ = Nothing count_commas :: Int -> String -> (Int, String) count_commas n (',' : cs) = count_commas (n+1) cs count_commas n cs = (n,cs) {- ************************************************************************ * * Base 62 * * ************************************************************************ Note [Base 62 encoding 128-bit integers] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Instead of base-62 encoding a single 128-bit integer (ceil(21.49) characters), we'll base-62 a pair of 64-bit integers (2 * ceil(10.75) characters). Luckily for us, it's the same number of characters! -} -------------------------------------------------------------------------- -- Base 62 -- The base-62 code is based off of 'locators' -- ((c) Operational Dynamics Consulting, BSD3 licensed) -- | Size of a 64-bit word when written as a base-62 string word64Base62Len :: Int word64Base62Len = 11 -- | Converts a 64-bit word into a base-62 string toBase62Padded :: Word64 -> String toBase62Padded w = pad ++ str where pad = replicate len '0' len = word64Base62Len - length str -- 11 == ceil(64 / lg 62) str = toBase62 w toBase62 :: Word64 -> String toBase62 w = showIntAtBase 62 represent w "" where represent :: Int -> Char represent x | x < 10 = Char.chr (48 + x) | x < 36 = Char.chr (65 + x - 10) | x < 62 = Char.chr (97 + x - 36) | otherwise = error "represent (base 62): impossible!" ghc-lib-parser-9.12.2.20250421/libraries/ghc-boot/GHC/Utils/Encoding/0000755000000000000000000000000007346545000022400 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/libraries/ghc-boot/GHC/Utils/Encoding/UTF8.hs0000644000000000000000000003300707346545000023465 0ustar0000000000000000{-# LANGUAGE CPP #-} {-# LANGUAGE BangPatterns, MagicHash, UnboxedTuples, MultiWayIf #-} {-# OPTIONS_GHC -O2 -fno-warn-name-shadowing #-} -- We always optimise this, otherwise performance of a non-optimised -- compiler is severely affected. This module used to live in the `ghc` -- package but has been moved to `ghc-boot` because the definition -- of the package database (needed in both ghc and in ghc-pkg) lives in -- `ghc-boot` and uses ShortText, which in turn depends on this module. -- | Simple, non-streaming Modified UTF-8 codecs. -- -- This is one of several UTF-8 implementations provided by GHC; see Note -- [GHC's many UTF-8 implementations] in "GHC.Encoding.UTF8" for an -- overview. -- module GHC.Utils.Encoding.UTF8 ( -- * Decoding single characters utf8DecodeCharAddr# , utf8DecodeCharPtr , utf8DecodeCharByteArray# , utf8PrevChar , utf8CharStart , utf8UnconsByteString -- * Decoding strings , utf8DecodeByteString , utf8DecodeShortByteString , utf8DecodeForeignPtr , utf8DecodeByteArray# -- * Counting characters , utf8CountCharsShortByteString , utf8CountCharsByteArray# -- * Comparison , utf8CompareByteArray# , utf8CompareShortByteString -- * Encoding strings , utf8EncodeByteArray# , utf8EncodePtr , utf8EncodeByteString , utf8EncodeShortByteString , utf8EncodedLength ) where import Prelude import Foreign import GHC.IO #if MIN_VERSION_base(4,18,0) import GHC.Encoding.UTF8 #else import Foreign.ForeignPtr.Unsafe (unsafeForeignPtrToPtr) import Data.Char import GHC.Exts import GHC.ST #endif import Data.ByteString (ByteString) import qualified Data.ByteString.Internal as BS import Data.ByteString.Short.Internal (ShortByteString(..)) -- | Find the start of the codepoint preceding the codepoint at the given -- 'Ptr'. This is undefined if there is no previous valid codepoint. utf8PrevChar :: Ptr Word8 -> IO (Ptr Word8) utf8PrevChar p = utf8CharStart (p `plusPtr` (-1)) -- | Find the start of the codepoint at the given 'Ptr'. This is undefined if -- there is no previous valid codepoint. utf8CharStart :: Ptr Word8 -> IO (Ptr Word8) utf8CharStart p = go p where go p = do w <- peek p if w >= 0x80 && w < 0xC0 then go (p `plusPtr` (-1)) else return p utf8CountCharsShortByteString :: ShortByteString -> Int utf8CountCharsShortByteString (SBS ba) = utf8CountCharsByteArray# ba utf8DecodeShortByteString :: ShortByteString -> [Char] utf8DecodeShortByteString (SBS ba#) = utf8DecodeByteArray# ba# -- | Decode a 'ByteString' containing a UTF-8 string. utf8DecodeByteString :: ByteString -> [Char] utf8DecodeByteString (BS.PS fptr offset len) = utf8DecodeForeignPtr fptr offset len utf8EncodeShortByteString :: String -> ShortByteString utf8EncodeShortByteString str = SBS (utf8EncodeByteArray# str) -- | Encode a 'String' into a 'ByteString'. utf8EncodeByteString :: String -> ByteString utf8EncodeByteString s = unsafePerformIO $ do let len = utf8EncodedLength s buf <- mallocForeignPtrBytes len withForeignPtr buf $ \ptr -> do utf8EncodePtr ptr s pure (BS.fromForeignPtr buf 0 len) utf8UnconsByteString :: ByteString -> Maybe (Char, ByteString) utf8UnconsByteString (BS.PS _ _ 0) = Nothing utf8UnconsByteString (BS.PS fptr offset len) = unsafeDupablePerformIO $ withForeignPtr fptr $ \ptr -> do let (c,n) = utf8DecodeCharPtr (ptr `plusPtr` offset) return $ Just (c, BS.PS fptr (offset + n) (len - n)) utf8CompareShortByteString :: ShortByteString -> ShortByteString -> Ordering utf8CompareShortByteString (SBS a1) (SBS a2) = utf8CompareByteArray# a1 a2 --------------------------------------------------------- -- Everything below was moved into base in GHC 9.6 -- -- These can be dropped in GHC 9.6 + 2 major releases. --------------------------------------------------------- #if !MIN_VERSION_base(4,18,0) -- We can't write the decoder as efficiently as we'd like without -- resorting to unboxed extensions, unfortunately. I tried to write -- an IO version of this function, but GHC can't eliminate boxed -- results from an IO-returning function. -- -- We assume we can ignore overflow when parsing a multibyte character here. -- To make this safe, we add extra sentinel bytes to unparsed UTF-8 sequences -- before decoding them (see "GHC.Data.StringBuffer"). {-# INLINE utf8DecodeChar# #-} -- | Decode a single codepoint from a byte buffer indexed by the given indexing -- function. utf8DecodeChar# :: (Int# -> Word#) -> (# Char#, Int# #) utf8DecodeChar# indexWord8# = let !ch0 = word2Int# (indexWord8# 0#) in case () of _ | isTrue# (ch0 <=# 0x7F#) -> (# chr# ch0, 1# #) | isTrue# ((ch0 >=# 0xC0#) `andI#` (ch0 <=# 0xDF#)) -> let !ch1 = word2Int# (indexWord8# 1#) in if isTrue# ((ch1 <# 0x80#) `orI#` (ch1 >=# 0xC0#)) then fail 1# else (# chr# (((ch0 -# 0xC0#) `uncheckedIShiftL#` 6#) +# (ch1 -# 0x80#)), 2# #) | isTrue# ((ch0 >=# 0xE0#) `andI#` (ch0 <=# 0xEF#)) -> let !ch1 = word2Int# (indexWord8# 1#) in if isTrue# ((ch1 <# 0x80#) `orI#` (ch1 >=# 0xC0#)) then fail 1# else let !ch2 = word2Int# (indexWord8# 2#) in if isTrue# ((ch2 <# 0x80#) `orI#` (ch2 >=# 0xC0#)) then fail 2# else (# chr# (((ch0 -# 0xE0#) `uncheckedIShiftL#` 12#) +# ((ch1 -# 0x80#) `uncheckedIShiftL#` 6#) +# (ch2 -# 0x80#)), 3# #) | isTrue# ((ch0 >=# 0xF0#) `andI#` (ch0 <=# 0xF8#)) -> let !ch1 = word2Int# (indexWord8# 1#) in if isTrue# ((ch1 <# 0x80#) `orI#` (ch1 >=# 0xC0#)) then fail 1# else let !ch2 = word2Int# (indexWord8# 2#) in if isTrue# ((ch2 <# 0x80#) `orI#` (ch2 >=# 0xC0#)) then fail 2# else let !ch3 = word2Int# (indexWord8# 3#) in if isTrue# ((ch3 <# 0x80#) `orI#` (ch3 >=# 0xC0#)) then fail 3# else (# chr# (((ch0 -# 0xF0#) `uncheckedIShiftL#` 18#) +# ((ch1 -# 0x80#) `uncheckedIShiftL#` 12#) +# ((ch2 -# 0x80#) `uncheckedIShiftL#` 6#) +# (ch3 -# 0x80#)), 4# #) | otherwise -> fail 1# where -- all invalid sequences end up here: fail :: Int# -> (# Char#, Int# #) fail nBytes# = (# '\0'#, nBytes# #) -- '\xFFFD' would be the usual replacement character, but -- that's a valid symbol in Haskell, so will result in a -- confusing parse error later on. Instead we use '\0' which -- will signal a lexer error immediately. -- | Decode a single character at the given 'Addr#'. utf8DecodeCharAddr# :: Addr# -> Int# -> (# Char#, Int# #) utf8DecodeCharAddr# a# off# = #if !MIN_VERSION_base(4,16,0) utf8DecodeChar# (\i# -> indexWord8OffAddr# a# (i# +# off#)) #else utf8DecodeChar# (\i# -> word8ToWord# (indexWord8OffAddr# a# (i# +# off#))) #endif -- | Decode a single codepoint starting at the given 'Ptr'. utf8DecodeCharPtr :: Ptr Word8 -> (Char, Int) utf8DecodeCharPtr !(Ptr a#) = case utf8DecodeCharAddr# a# 0# of (# c#, nBytes# #) -> ( C# c#, I# nBytes# ) -- | Decode a single codepoint starting at the given byte offset into a -- 'ByteArray#'. utf8DecodeCharByteArray# :: ByteArray# -> Int# -> (# Char#, Int# #) utf8DecodeCharByteArray# ba# off# = #if !MIN_VERSION_base(4,16,0) utf8DecodeChar# (\i# -> indexWord8Array# ba# (i# +# off#)) #else utf8DecodeChar# (\i# -> word8ToWord# (indexWord8Array# ba# (i# +# off#))) #endif {-# INLINE utf8Decode# #-} utf8Decode# :: (IO ()) -> (Int# -> (# Char#, Int# #)) -> Int# -> IO [Char] utf8Decode# retain decodeChar# len# = unpack 0# where unpack i# | isTrue# (i# >=# len#) = retain >> return [] | otherwise = case decodeChar# i# of (# c#, nBytes# #) -> do rest <- unsafeDupableInterleaveIO $ unpack (i# +# nBytes#) return (C# c# : rest) utf8DecodeForeignPtr :: ForeignPtr Word8 -> Int -> Int -> [Char] utf8DecodeForeignPtr fp offset (I# len#) = unsafeDupablePerformIO $ do let !(Ptr a#) = unsafeForeignPtrToPtr fp `plusPtr` offset utf8Decode# (touchForeignPtr fp) (utf8DecodeCharAddr# a#) len# -- Note that since utf8Decode# returns a thunk the lifetime of the -- ForeignPtr actually needs to be longer than the lexical lifetime -- withForeignPtr would provide here. That's why we use touchForeignPtr to -- keep the fp alive until the last character has actually been decoded. utf8DecodeByteArray# :: ByteArray# -> [Char] utf8DecodeByteArray# ba# = unsafeDupablePerformIO $ let len# = sizeofByteArray# ba# in utf8Decode# (return ()) (utf8DecodeCharByteArray# ba#) len# utf8CompareByteArray# :: ByteArray# -> ByteArray# -> Ordering utf8CompareByteArray# a1 a2 = go 0# 0# -- UTF-8 has the property that sorting by bytes values also sorts by -- code-points. -- BUT we use "Modified UTF-8" which encodes \0 as 0xC080 so this property -- doesn't hold and we must explicitly check this case here. -- Note that decoding every code point would also work but it would be much -- more costly. where !sz1 = sizeofByteArray# a1 !sz2 = sizeofByteArray# a2 go off1 off2 | isTrue# ((off1 >=# sz1) `andI#` (off2 >=# sz2)) = EQ | isTrue# (off1 >=# sz1) = LT | isTrue# (off2 >=# sz2) = GT | otherwise = #if !MIN_VERSION_base(4,16,0) let !b1_1 = indexWord8Array# a1 off1 !b2_1 = indexWord8Array# a2 off2 #else let !b1_1 = word8ToWord# (indexWord8Array# a1 off1) !b2_1 = word8ToWord# (indexWord8Array# a2 off2) #endif in case b1_1 of 0xC0## -> case b2_1 of 0xC0## -> go (off1 +# 1#) (off2 +# 1#) #if !MIN_VERSION_base(4,16,0) _ -> case indexWord8Array# a1 (off1 +# 1#) of #else _ -> case word8ToWord# (indexWord8Array# a1 (off1 +# 1#)) of #endif 0x80## -> LT _ -> go (off1 +# 1#) (off2 +# 1#) _ -> case b2_1 of #if !MIN_VERSION_base(4,16,0) 0xC0## -> case indexWord8Array# a2 (off2 +# 1#) of #else 0xC0## -> case word8ToWord# (indexWord8Array# a2 (off2 +# 1#)) of #endif 0x80## -> GT _ -> go (off1 +# 1#) (off2 +# 1#) _ | isTrue# (b1_1 `gtWord#` b2_1) -> GT | isTrue# (b1_1 `ltWord#` b2_1) -> LT | otherwise -> go (off1 +# 1#) (off2 +# 1#) utf8CountCharsByteArray# :: ByteArray# -> Int utf8CountCharsByteArray# ba = go 0# 0# where len# = sizeofByteArray# ba go i# n# | isTrue# (i# >=# len#) = I# n# | otherwise = case utf8DecodeCharByteArray# ba i# of (# _, nBytes# #) -> go (i# +# nBytes#) (n# +# 1#) {-# INLINE utf8EncodeChar #-} utf8EncodeChar :: (Int# -> Word8# -> State# s -> State# s) -> Char -> ST s Int utf8EncodeChar write# c = let x = fromIntegral (ord c) in case () of _ | x > 0 && x <= 0x007f -> do write 0 x return 1 -- NB. '\0' is encoded as '\xC0\x80', not '\0'. This is so that we -- can have 0-terminated UTF-8 strings (see GHC.Base.unpackCStringUtf8). | x <= 0x07ff -> do write 0 (0xC0 .|. ((x `shiftR` 6) .&. 0x1F)) write 1 (0x80 .|. (x .&. 0x3F)) return 2 | x <= 0xffff -> do write 0 (0xE0 .|. (x `shiftR` 12) .&. 0x0F) write 1 (0x80 .|. (x `shiftR` 6) .&. 0x3F) write 2 (0x80 .|. (x .&. 0x3F)) return 3 | otherwise -> do write 0 (0xF0 .|. (x `shiftR` 18)) write 1 (0x80 .|. ((x `shiftR` 12) .&. 0x3F)) write 2 (0x80 .|. ((x `shiftR` 6) .&. 0x3F)) write 3 (0x80 .|. (x .&. 0x3F)) return 4 where {-# INLINE write #-} write (I# off#) (W# c#) = ST $ \s -> #if !MIN_VERSION_base(4,16,0) case write# off# (narrowWord8# c#) s of #else case write# off# (wordToWord8# c#) s of #endif s -> (# s, () #) utf8EncodePtr :: Ptr Word8 -> String -> IO () utf8EncodePtr (Ptr a#) str = go a# str where go !_ [] = return () go a# (c:cs) = do #if !MIN_VERSION_base(4,16,0) -- writeWord8OffAddr# was taking a Word# I# off# <- stToIO $ utf8EncodeChar (\i w -> writeWord8OffAddr# a# i (extendWord8# w)) c #else I# off# <- stToIO $ utf8EncodeChar (writeWord8OffAddr# a#) c #endif go (a# `plusAddr#` off#) cs utf8EncodeByteArray# :: String -> ByteArray# utf8EncodeByteArray# str = runRW# $ \s -> case utf8EncodedLength str of { I# len# -> case newByteArray# len# s of { (# s, mba# #) -> case go mba# 0# str of { ST f_go -> case f_go s of { (# s, () #) -> case unsafeFreezeByteArray# mba# s of { (# _, ba# #) -> ba# }}}}} where go _ _ [] = return () go mba# i# (c:cs) = do #if !MIN_VERSION_base(4,16,0) -- writeWord8Array# was taking a Word# I# off# <- utf8EncodeChar (\j# w -> writeWord8Array# mba# (i# +# j#) (extendWord8# w)) c #else I# off# <- utf8EncodeChar (\j# -> writeWord8Array# mba# (i# +# j#)) c #endif go mba# (i# +# off#) cs utf8EncodedLength :: String -> Int utf8EncodedLength str = go 0 str where go !n [] = n go n (c:cs) | ord c > 0 && ord c <= 0x007f = go (n+1) cs | ord c <= 0x07ff = go (n+2) cs | ord c <= 0xffff = go (n+3) cs | otherwise = go (n+4) cs #endif /* MIN_VERSION_base(4,18,0) */ ghc-lib-parser-9.12.2.20250421/libraries/ghc-boot/0000755000000000000000000000000007346545000017151 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/libraries/ghc-boot/ghc-boot.cabal0000644000000000000000000000703407346545000021643 0ustar0000000000000000cabal-version: 3.0 -- WARNING: ghc-boot.cabal is automatically generated from ghc-boot.cabal.in by -- ../../configure. Make sure you are editing ghc-boot.cabal.in, not -- ghc-boot.cabal. name: ghc-boot version: 9.12.2 license: BSD-3-Clause license-file: LICENSE category: GHC maintainer: ghc-devs@haskell.org bug-reports: https://gitlab.haskell.org/ghc/ghc/issues/new synopsis: Shared functionality between GHC and its boot libraries description: This library is shared between GHC, ghc-pkg, and other boot libraries. . A note about "GHC.Unit.Database": it only deals with the subset of the package database that the compiler cares about: modules paths etc and not package metadata like description, authors etc. It is thus not a library interface to ghc-pkg and is *not* suitable for modifying GHC package databases. . The package database format and this library are constructed in such a way that while ghc-pkg depends on Cabal, the GHC library and program do not have to depend on Cabal. build-type: Custom extra-source-files: changelog.md custom-setup setup-depends: base >= 3 && < 5, Cabal >= 1.6 && <3.14, directory, filepath source-repository head type: git location: https://gitlab.haskell.org/ghc/ghc.git subdir: libraries/ghc-boot Flag bootstrap Description: Enabled when building the stage1 compiler in order to vendor the in-tree `template-haskell` library (including its dependency `ghc-boot-th`), while allowing dependencies to depend on the boot `template-haskell` library. See Note [Bootstrapping Template Haskell] Default: False Manual: True Library default-language: Haskell2010 other-extensions: DeriveGeneric, RankNTypes, ScopedTypeVariables default-extensions: NoImplicitPrelude exposed-modules: GHC.BaseDir GHC.Data.ShortText GHC.Data.SizedSeq GHC.Utils.Encoding GHC.Utils.Encoding.UTF8 GHC.LanguageExtensions GHC.Unit.Database GHC.Serialized GHC.ForeignSrcLang GHC.HandleEncoding GHC.Platform.Host GHC.Settings.Utils GHC.UniqueSubdir GHC.Version -- reexport platform modules from ghc-platform reexported-modules: GHC.Platform.ArchOS -- but done by Hadrian autogen-modules: GHC.Version GHC.Platform.Host build-depends: base >= 4.7 && < 4.22, binary == 0.8.*, bytestring >= 0.10 && < 0.13, containers >= 0.5 && < 0.8, directory >= 1.2 && < 1.4, filepath >= 1.3 && < 1.6, deepseq >= 1.4 && < 1.6, ghc-platform >= 0.1, -- reexport modules from ghc-boot-th so that packages -- don't have to import all of ghc-boot and ghc-boot-th. -- It makes the dependency graph easier to understand. reexported-modules: GHC.LanguageExtensions.Type , GHC.ForeignSrcLang.Type , GHC.Lexeme if flag(bootstrap) build-depends: ghc-boot-th-next == 9.12.2 else build-depends: ghc-boot-th == 9.12.2 if !os(windows) build-depends: unix >= 2.7 && < 2.9 ghc-lib-parser-9.12.2.20250421/libraries/ghc-heap/GHC/Exts/0000755000000000000000000000000007346545000020447 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/libraries/ghc-heap/GHC/Exts/Heap.hs0000644000000000000000000003634407346545000021672 0ustar0000000000000000{-# LANGUAGE CPP #-} {-# LANGUAGE MagicHash #-} {-# LANGUAGE UnboxedTuples #-} {-# LANGUAGE TypeOperators #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE PolyKinds #-} {-# LANGUAGE DataKinds #-} {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE RankNTypes #-} {-# LANGUAGE UnliftedFFITypes #-} {-| Module : GHC.Exts.Heap Copyright : (c) 2012 Joachim Breitner License : BSD3 Maintainer : Joachim Breitner With this module, you can investigate the heap representation of Haskell values, i.e. to investigate sharing and lazy evaluation. -} module GHC.Exts.Heap ( -- * Closure types Closure , GenClosure(..) , ClosureType(..) , PrimType(..) , WhatNext(..) , WhyBlocked(..) , TsoFlags(..) , HasHeapRep(getClosureData) , getClosureDataFromHeapRep , getClosureDataFromHeapRepPrim -- * Info Table types , StgInfoTable(..) , EntryFunPtr , HalfWord , ItblCodes , itblSize , peekItbl , pokeItbl -- * Cost Centre (profiling) types , StgTSOProfInfo(..) , IndexTable(..) , CostCentre(..) , CostCentreStack(..) -- * Closure inspection , getBoxedClosureData , allClosures -- * Boxes , Box(..) , asBox , areBoxesEqual ) where import Prelude import GHC.Exts.Heap.Closures import GHC.Exts.Heap.ClosureTypes import GHC.Exts.Heap.Constants import GHC.Exts.Heap.ProfInfo.Types #if defined(PROFILING) import GHC.Exts.Heap.InfoTableProf #else import GHC.Exts.Heap.InfoTable #endif import GHC.Exts.Heap.Utils import qualified GHC.Exts.Heap.FFIClosures as FFIClosures import qualified GHC.Exts.Heap.ProfInfo.PeekProfInfo as PPI import Data.Bits import Foreign import GHC.Exts import GHC.Int import GHC.Word #include "ghcconfig.h" class HasHeapRep (a :: TYPE rep) where -- | Decode a closure to it's heap representation ('GenClosure'). getClosureData :: a -- ^ Closure to decode. -> IO Closure -- ^ Heap representation of the closure. #if __GLASGOW_HASKELL__ >= 901 instance HasHeapRep (a :: TYPE ('BoxedRep 'Lifted)) where #else instance HasHeapRep (a :: TYPE 'LiftedRep) where #endif getClosureData = getClosureDataFromHeapObject #if __GLASGOW_HASKELL__ >= 901 instance HasHeapRep (a :: TYPE ('BoxedRep 'Unlifted)) where #else instance HasHeapRep (a :: TYPE 'UnliftedRep) where #endif getClosureData x = getClosureDataFromHeapObject (unsafeCoerce# x) instance Int# ~ a => HasHeapRep (a :: TYPE 'IntRep) where getClosureData x = return $ IntClosure { ptipe = PInt, intVal = I# x } instance Word# ~ a => HasHeapRep (a :: TYPE 'WordRep) where getClosureData x = return $ WordClosure { ptipe = PWord, wordVal = W# x } instance Int64# ~ a => HasHeapRep (a :: TYPE 'Int64Rep) where getClosureData x = return $ Int64Closure { ptipe = PInt64, int64Val = I64# (unsafeCoerce# x) } instance Word64# ~ a => HasHeapRep (a :: TYPE 'Word64Rep) where getClosureData x = return $ Word64Closure { ptipe = PWord64, word64Val = W64# (unsafeCoerce# x) } instance Addr# ~ a => HasHeapRep (a :: TYPE 'AddrRep) where getClosureData x = return $ AddrClosure { ptipe = PAddr, addrVal = Ptr x } instance Float# ~ a => HasHeapRep (a :: TYPE 'FloatRep) where getClosureData x = return $ FloatClosure { ptipe = PFloat, floatVal = F# x } instance Double# ~ a => HasHeapRep (a :: TYPE 'DoubleRep) where getClosureData x = return $ DoubleClosure { ptipe = PDouble, doubleVal = D# x } -- | Get the heap representation of a closure _at this moment_, even if it is -- unevaluated or an indirection or other exotic stuff. Beware when passing -- something to this function, the same caveats as for -- 'GHC.Exts.Heap.Closures.asBox' apply. -- -- For most use cases 'getClosureData' is an easier to use alternative. -- -- Currently TSO and STACK objects will return `UnsupportedClosure`. This is -- because it is not memory safe to extract TSO and STACK objects (done via -- `unpackClosure#`). Other threads may be mutating those objects and interleave -- with reads in `unpackClosure#`. This is particularly problematic with STACKs -- where pointer values may be overwritten by non-pointer values as the -- corresponding haskell thread runs. getClosureDataFromHeapObject :: a -- ^ Heap object to decode. -> IO Closure -- ^ Heap representation of the closure. getClosureDataFromHeapObject x = do case unpackClosure# x of (# infoTableAddr, heapRep, pointersArray #) -> do let infoTablePtr = Ptr infoTableAddr ptrList = [case indexArray# pointersArray i of (# ptr #) -> Box ptr | I# i <- [0..I# (sizeofArray# pointersArray) - 1] ] infoTable <- peekItbl infoTablePtr case tipe infoTable of TSO -> pure $ UnsupportedClosure infoTable STACK -> pure $ UnsupportedClosure infoTable _ -> getClosureDataFromHeapRep heapRep infoTablePtr ptrList -- | Convert an unpacked heap object, to a `GenClosure b`. The inputs to this -- function can be generated from a heap object using `unpackClosure#`. getClosureDataFromHeapRep :: ByteArray# -> Ptr StgInfoTable -> [b] -> IO (GenClosure b) getClosureDataFromHeapRep heapRep infoTablePtr pts = do itbl <- peekItbl infoTablePtr getClosureDataFromHeapRepPrim (dataConNames infoTablePtr) PPI.peekTopCCS itbl heapRep pts getClosureDataFromHeapRepPrim :: IO (String, String, String) -- ^ A continuation used to decode the constructor description field, -- in ghc-debug this code can lead to segfaults because dataConNames -- will dereference a random part of memory. -> (Ptr a -> IO (Maybe CostCentreStack)) -- ^ A continuation which is used to decode a cost centre stack -- In ghc-debug, this code will need to call back into the debuggee to -- fetch the representation of the CCS before decoding it. Using -- `peekTopCCS` for this argument can lead to segfaults in ghc-debug as -- the CCS argument will point outside the copied closure. -> StgInfoTable -- ^ The `StgInfoTable` of the closure, extracted from the heap -- representation. -> ByteArray# -- ^ Heap representation of the closure as returned by `unpackClosure#`. -- This includes all of the object including the header, info table -- pointer, pointer data, and non-pointer data. The ByteArray# may be -- pinned or unpinned. -> [b] -- ^ Pointers in the payload of the closure, extracted from the heap -- representation as returned by `collect_pointers()` in `Heap.c`. The type -- `b` is some representation of a pointer e.g. `Any` or `Ptr Any`. -> IO (GenClosure b) -- ^ Heap representation of the closure. getClosureDataFromHeapRepPrim getConDesc decodeCCS itbl heapRep pts = do let -- heapRep as a list of words. rawHeapWords :: [Word] rawHeapWords = [W# (indexWordArray# heapRep i) | I# i <- [0.. end] ] where nelems = I# (sizeofByteArray# heapRep) `div` wORD_SIZE end = fromIntegral nelems - 1 -- Just the payload of rawHeapWords (no header). payloadWords :: [Word] payloadWords = drop (closureTypeHeaderSize (tipe itbl)) rawHeapWords -- The non-pointer words in the payload. Only valid for closures with a -- "pointers first" layout. Not valid for bit field layout. npts :: [Word] npts = drop (closureTypeHeaderSize (tipe itbl) + length pts) rawHeapWords case tipe itbl of t | t >= CONSTR && t <= CONSTR_NOCAF -> do (p, m, n) <- getConDesc pure $ ConstrClosure itbl pts npts p m n t | t >= THUNK && t <= THUNK_STATIC -> do pure $ ThunkClosure itbl pts npts THUNK_SELECTOR -> case pts of [] -> fail "Expected at least 1 ptr argument to THUNK_SELECTOR" hd : _ -> pure $ SelectorClosure itbl hd t | t >= FUN && t <= FUN_STATIC -> do pure $ FunClosure itbl pts npts AP -> case pts of [] -> fail "Expected at least 1 ptr argument to AP" hd : tl -> case payloadWords of -- We expect at least the arity, n_args, and fun fields splitWord : _ : _ -> pure $ APClosure itbl #if defined(WORDS_BIGENDIAN) (fromIntegral $ shiftR splitWord (wORD_SIZE_IN_BITS `div` 2)) (fromIntegral splitWord) #else (fromIntegral splitWord) (fromIntegral $ shiftR splitWord (wORD_SIZE_IN_BITS `div` 2)) #endif hd tl _ -> fail "Expected at least 2 raw words to AP" PAP -> case pts of [] -> fail "Expected at least 1 ptr argument to PAP" hd : tl -> case payloadWords of -- We expect at least the arity, n_args, and fun fields splitWord : _ : _ -> pure $ PAPClosure itbl #if defined(WORDS_BIGENDIAN) (fromIntegral $ shiftR splitWord (wORD_SIZE_IN_BITS `div` 2)) (fromIntegral splitWord) #else (fromIntegral splitWord) (fromIntegral $ shiftR splitWord (wORD_SIZE_IN_BITS `div` 2)) #endif hd tl _ -> fail "Expected at least 2 raw words to PAP" AP_STACK -> case pts of [] -> fail "Expected at least 1 ptr argument to AP_STACK" hd : tl -> pure $ APStackClosure itbl hd tl IND -> case pts of [] -> fail "Expected at least 1 ptr argument to IND" hd : _ -> pure $ IndClosure itbl hd IND_STATIC -> case pts of [] -> fail "Expected at least 1 ptr argument to IND_STATIC" hd : _ -> pure $ IndClosure itbl hd BLACKHOLE -> case pts of [] -> fail "Expected at least 1 ptr argument to BLACKHOLE" hd : _ -> pure $ BlackholeClosure itbl hd BCO -> case pts of pts0 : pts1 : pts2 : _ -> case payloadWords of _ : _ : _ : splitWord : payloadRest -> pure $ BCOClosure itbl pts0 pts1 pts2 #if defined(WORDS_BIGENDIAN) (fromIntegral $ shiftR splitWord (wORD_SIZE_IN_BITS `div` 2)) (fromIntegral splitWord) #else (fromIntegral splitWord) (fromIntegral $ shiftR splitWord (wORD_SIZE_IN_BITS `div` 2)) #endif payloadRest _ -> fail $ "Expected at least 4 words to BCO, found " ++ show (length payloadWords) _ -> fail $ "Expected at least 3 ptr argument to BCO, found " ++ show (length pts) ARR_WORDS -> case payloadWords of [] -> fail $ "Expected at least 1 words to ARR_WORDS, found " ++ show (length payloadWords) hd : tl -> pure $ ArrWordsClosure itbl hd tl t | t >= MUT_ARR_PTRS_CLEAN && t <= MUT_ARR_PTRS_FROZEN_CLEAN -> case payloadWords of p0 : p1 : _ -> pure $ MutArrClosure itbl p0 p1 pts _ -> fail $ "Expected at least 2 words to MUT_ARR_PTRS_* " ++ "found " ++ show (length payloadWords) t | t >= SMALL_MUT_ARR_PTRS_CLEAN && t <= SMALL_MUT_ARR_PTRS_FROZEN_CLEAN -> case payloadWords of [] -> fail $ "Expected at least 1 word to SMALL_MUT_ARR_PTRS_* " ++ "found " ++ show (length payloadWords) hd : _ -> pure $ SmallMutArrClosure itbl hd pts t | t == MUT_VAR_CLEAN || t == MUT_VAR_DIRTY -> case pts of [] -> fail $ "Expected at least 1 words to MUT_VAR, found " ++ show (length pts) hd : _ -> pure $ MutVarClosure itbl hd t | t == MVAR_CLEAN || t == MVAR_DIRTY -> case pts of pts0 : pts1 : pts2 : _ -> pure $ MVarClosure itbl pts0 pts1 pts2 _ -> fail $ "Expected at least 3 ptrs to MVAR, found " ++ show (length pts) BLOCKING_QUEUE | [_link, bh, _owner, msg] <- pts -> pure $ BlockingQueueClosure itbl _link bh _owner msg WEAK -> case pts of pts0 : pts1 : pts2 : pts3 : rest -> pure $ WeakClosure { info = itbl , cfinalizers = pts0 , key = pts1 , value = pts2 , finalizer = pts3 , weakLink = case rest of [] -> Nothing [p] -> Just p _ -> error $ "Expected 4 or 5 words in WEAK, but found more: " ++ show (length pts) } _ -> error $ "Expected 4 or 5 words in WEAK, but found less: " ++ show (length pts) TSO | ( u_lnk : u_gbl_lnk : tso_stack : u_trec : u_blk_ex : u_bq : other) <- pts -> withArray rawHeapWords (\ptr -> do fields <- FFIClosures.peekTSOFields decodeCCS ptr pure $ TSOClosure { info = itbl , link = u_lnk , global_link = u_gbl_lnk , tsoStack = tso_stack , trec = u_trec , blocked_exceptions = u_blk_ex , bq = u_bq , thread_label = case other of [tl] -> Just tl [] -> Nothing _ -> error $ "thead_label:Expected 0 or 1 extra arguments" , what_next = FFIClosures.tso_what_next fields , why_blocked = FFIClosures.tso_why_blocked fields , flags = FFIClosures.tso_flags fields , threadId = FFIClosures.tso_threadId fields , saved_errno = FFIClosures.tso_saved_errno fields , tso_dirty = FFIClosures.tso_dirty fields , alloc_limit = FFIClosures.tso_alloc_limit fields , tot_stack_size = FFIClosures.tso_tot_stack_size fields , prof = FFIClosures.tso_prof fields }) | otherwise -> fail $ "Expected at least 6 ptr arguments to TSO, found " ++ show (length pts) STACK | [] <- pts -> withArray rawHeapWords (\ptr -> do fields <- FFIClosures.peekStackFields ptr pure $ StackClosure { info = itbl , stack_size = FFIClosures.stack_size fields , stack_dirty = FFIClosures.stack_dirty fields #if __GLASGOW_HASKELL__ >= 811 , stack_marking = FFIClosures.stack_marking fields #endif }) | otherwise -> fail $ "Expected 0 ptr argument to STACK, found " ++ show (length pts) _ -> pure $ UnsupportedClosure itbl -- | Like 'getClosureData', but taking a 'Box', so it is easier to work with. getBoxedClosureData :: Box -> IO Closure getBoxedClosureData (Box a) = getClosureData a ghc-lib-parser-9.12.2.20250421/libraries/ghc-heap/GHC/Exts/Heap/0000755000000000000000000000000007346545000021324 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/libraries/ghc-heap/GHC/Exts/Heap/ClosureTypes.hs0000644000000000000000000000426707346545000024332 0ustar0000000000000000{-# LANGUAGE CPP #-} {-# LANGUAGE DeriveGeneric #-} module GHC.Exts.Heap.ClosureTypes ( ClosureType(..) , closureTypeHeaderSize ) where import Prelude -- See note [Why do we import Prelude here?] #if __GLASGOW_HASKELL__ >= 909 import GHC.Internal.ClosureTypes #else import GHC.Generics {- --------------------------------------------- -- Enum representing closure types -- This is a mirror of: -- rts/include/rts/storage/ClosureTypes.h -- ---------------------------------------------} data ClosureType = INVALID_OBJECT | CONSTR | CONSTR_1_0 | CONSTR_0_1 | CONSTR_2_0 | CONSTR_1_1 | CONSTR_0_2 | CONSTR_NOCAF | FUN | FUN_1_0 | FUN_0_1 | FUN_2_0 | FUN_1_1 | FUN_0_2 | FUN_STATIC | THUNK | THUNK_1_0 | THUNK_0_1 | THUNK_2_0 | THUNK_1_1 | THUNK_0_2 | THUNK_STATIC | THUNK_SELECTOR | BCO | AP | PAP | AP_STACK | IND | IND_STATIC | RET_BCO | RET_SMALL | RET_BIG | RET_FUN | UPDATE_FRAME | CATCH_FRAME | UNDERFLOW_FRAME | STOP_FRAME | BLOCKING_QUEUE | BLACKHOLE | MVAR_CLEAN | MVAR_DIRTY | TVAR | ARR_WORDS | MUT_ARR_PTRS_CLEAN | MUT_ARR_PTRS_DIRTY | MUT_ARR_PTRS_FROZEN_DIRTY | MUT_ARR_PTRS_FROZEN_CLEAN | MUT_VAR_CLEAN | MUT_VAR_DIRTY | WEAK | PRIM | MUT_PRIM | TSO | STACK | TREC_CHUNK | ATOMICALLY_FRAME | CATCH_RETRY_FRAME | CATCH_STM_FRAME | WHITEHOLE | SMALL_MUT_ARR_PTRS_CLEAN | SMALL_MUT_ARR_PTRS_DIRTY | SMALL_MUT_ARR_PTRS_FROZEN_DIRTY | SMALL_MUT_ARR_PTRS_FROZEN_CLEAN | COMPACT_NFDATA | CONTINUATION | N_CLOSURE_TYPES deriving (Enum, Eq, Ord, Show, Generic) #endif -- | Return the size of the closures header in words closureTypeHeaderSize :: ClosureType -> Int closureTypeHeaderSize closType = case closType of ct | THUNK <= ct && ct <= THUNK_0_2 -> thunkHeader ct | ct == THUNK_SELECTOR -> thunkHeader ct | ct == AP -> thunkHeader ct | ct == AP_STACK -> thunkHeader _ -> header where header = 1 + prof thunkHeader = 2 + prof #if defined(PROFILING) prof = 2 #else prof = 0 #endif ghc-lib-parser-9.12.2.20250421/libraries/ghc-heap/GHC/Exts/Heap/Closures.hs0000644000000000000000000004343407346545000023467 0ustar0000000000000000{-# LANGUAGE CPP #-} {-# LANGUAGE ForeignFunctionInterface #-} {-# LANGUAGE GHCForeignImportPrim #-} {-# LANGUAGE MagicHash #-} {-# LANGUAGE RecordWildCards #-} {-# LANGUAGE UnliftedFFITypes #-} {-# LANGUAGE DeriveGeneric #-} {-# LANGUAGE DeriveTraversable #-} -- Late cost centres introduce a thunk in the asBox function, which leads to -- an additional wrapper being added to any value placed inside a box. {-# OPTIONS_GHC -fno-prof-late #-} module GHC.Exts.Heap.Closures ( -- * Closures Closure , GenClosure(..) , PrimType(..) , WhatNext(..) , WhyBlocked(..) , TsoFlags(..) , allClosures , closureSize -- * Stack , StgStackClosure , GenStgStackClosure(..) , StackFrame , GenStackFrame(..) , StackField , GenStackField(..) -- * Boxes , Box(..) , areBoxesEqual , asBox ) where import Prelude -- See note [Why do we import Prelude here?] import GHC.Exts.Heap.Constants #if defined(PROFILING) import GHC.Exts.Heap.InfoTable () -- see Note [No way-dependent imports] import GHC.Exts.Heap.InfoTableProf #else import GHC.Exts.Heap.InfoTable import GHC.Exts.Heap.InfoTableProf () -- see Note [No way-dependent imports] {- Note [No way-dependent imports] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ `ghc -M` currently assumes that the imports for a module are the same in every way. This is arguably a bug, but breaking this assumption by importing different things in different ways can cause trouble. For example, this module in the profiling way imports and uses GHC.Exts.Heap.InfoTableProf. When it was not also imported in the vanilla way, there were intermittent build failures due to this module being compiled in the profiling way before GHC.Exts.Heap.InfoTableProf in the profiling way. (#15197) -} #endif import GHC.Exts.Heap.ProfInfo.Types import Data.Bits import Data.Foldable (toList) import Data.Int import Data.Word import GHC.Exts import GHC.Generics import Numeric ------------------------------------------------------------------------ -- Boxes foreign import prim "Ghclib_aToWordzh" aToWord# :: Any -> Word# foreign import prim "Ghclib_reallyUnsafePtrEqualityUpToTag" reallyUnsafePtrEqualityUpToTag# :: Any -> Any -> Int# -- | An arbitrary Haskell value in a safe Box. The point is that even -- unevaluated thunks can safely be moved around inside the Box, and when -- required, e.g. in 'getBoxedClosureData', the function knows how far it has -- to evaluate the argument. data Box = Box Any instance Show Box where -- From libraries/base/GHC/Ptr.lhs showsPrec _ (Box a) rs = -- unsafePerformIO (print "↓" >> pClosure a) `seq` pad_out (showHex addr "") ++ (if tag>0 then "/" ++ show tag else "") ++ rs where ptr = W# (aToWord# a) tag = ptr .&. fromIntegral tAG_MASK -- ((1 `shiftL` TAG_BITS) -1) addr = ptr - tag pad_out ls = '0':'x':ls -- |This takes an arbitrary value and puts it into a box. -- Note that calls like -- -- > asBox (head list) -- -- will put the thunk \"head list\" into the box, /not/ the element at the head -- of the list. For that, use careful case expressions: -- -- > case list of x:_ -> asBox x asBox :: a -> Box asBox x = Box (unsafeCoerce# x) -- | Boxes can be compared, but this is not pure, as different heap objects can, -- after garbage collection, become the same object. areBoxesEqual :: Box -> Box -> IO Bool areBoxesEqual (Box a) (Box b) = case reallyUnsafePtrEqualityUpToTag# a b of 0# -> pure False _ -> pure True ------------------------------------------------------------------------ -- Closures type Closure = GenClosure Box -- | This is the representation of a Haskell value on the heap. It reflects -- -- -- The data type is parametrized by `b`: the type to store references in. -- Usually this is a 'Box' with the type synonym 'Closure'. -- -- All Heap objects have the same basic layout. A header containing a pointer to -- the info table and a payload with various fields. The @info@ field below -- always refers to the info table pointed to by the header. The remaining -- fields are the payload. -- -- See -- -- for more information. data GenClosure b = -- | A data constructor ConstrClosure { info :: !StgInfoTable , ptrArgs :: ![b] -- ^ Pointer arguments , dataArgs :: ![Word] -- ^ Non-pointer arguments , pkg :: !String -- ^ Package name , modl :: !String -- ^ Module name , name :: !String -- ^ Constructor name } -- | A function | FunClosure { info :: !StgInfoTable , ptrArgs :: ![b] -- ^ Pointer arguments , dataArgs :: ![Word] -- ^ Non-pointer arguments } -- | A thunk, an expression not obviously in head normal form | ThunkClosure { info :: !StgInfoTable , ptrArgs :: ![b] -- ^ Pointer arguments , dataArgs :: ![Word] -- ^ Non-pointer arguments } -- | A thunk which performs a simple selection operation | SelectorClosure { info :: !StgInfoTable , selectee :: !b -- ^ Pointer to the object being -- selected from } -- | An unsaturated function application | PAPClosure { info :: !StgInfoTable , arity :: !HalfWord -- ^ Arity of the partial application , n_args :: !HalfWord -- ^ Size of the payload in words , fun :: !b -- ^ Pointer to a 'FunClosure' , payload :: ![b] -- ^ Sequence of already applied -- arguments } -- In GHCi, if Linker.h would allow a reverse lookup, we could for exported -- functions fun actually find the name here. -- At least the other direction works via "lookupSymbol -- base_GHCziBase_zpzp_closure" and yields the same address (up to tags) -- | A function application | APClosure { info :: !StgInfoTable , arity :: !HalfWord -- ^ Always 0 , n_args :: !HalfWord -- ^ Size of payload in words , fun :: !b -- ^ Pointer to a 'FunClosure' , payload :: ![b] -- ^ Sequence of already applied -- arguments } -- | A suspended thunk evaluation | APStackClosure { info :: !StgInfoTable , fun :: !b -- ^ Function closure , payload :: ![b] -- ^ Stack right before suspension } -- | A pointer to another closure, introduced when a thunk is updated -- to point at its value | IndClosure { info :: !StgInfoTable , indirectee :: !b -- ^ Target closure } -- | A byte-code object (BCO) which can be interpreted by GHC's byte-code -- interpreter (e.g. as used by GHCi) | BCOClosure { info :: !StgInfoTable , instrs :: !b -- ^ A pointer to an ArrWords -- of instructions , literals :: !b -- ^ A pointer to an ArrWords -- of literals , bcoptrs :: !b -- ^ A pointer to an ArrWords -- of byte code objects , arity :: !HalfWord -- ^ The arity of this BCO , size :: !HalfWord -- ^ The size of this BCO in words , bitmap :: ![Word] -- ^ An StgLargeBitmap describing the -- pointerhood of its args/free vars } -- | A thunk under evaluation by another thread | BlackholeClosure { info :: !StgInfoTable , indirectee :: !b -- ^ The target closure } -- | A @ByteArray#@ | ArrWordsClosure { info :: !StgInfoTable , bytes :: !Word -- ^ Size of array in bytes , arrWords :: ![Word] -- ^ Array payload } -- | A @MutableByteArray#@ | MutArrClosure { info :: !StgInfoTable , mccPtrs :: !Word -- ^ Number of pointers , mccSize :: !Word -- ^ ?? Closures.h vs ClosureMacros.h , mccPayload :: ![b] -- ^ Array payload -- Card table ignored } -- | A @SmallMutableArray#@ -- -- @since 8.10.1 | SmallMutArrClosure { info :: !StgInfoTable , mccPtrs :: !Word -- ^ Number of pointers , mccPayload :: ![b] -- ^ Array payload } -- | An @MVar#@, with a queue of thread state objects blocking on them | MVarClosure { info :: !StgInfoTable , queueHead :: !b -- ^ Pointer to head of queue , queueTail :: !b -- ^ Pointer to tail of queue , value :: !b -- ^ Pointer to closure } -- | An @IOPort#@, with a queue of thread state objects blocking on them | IOPortClosure { info :: !StgInfoTable , queueHead :: !b -- ^ Pointer to head of queue , queueTail :: !b -- ^ Pointer to tail of queue , value :: !b -- ^ Pointer to closure } -- | A @MutVar#@ | MutVarClosure { info :: !StgInfoTable , var :: !b -- ^ Pointer to contents } -- | An STM blocking queue. | BlockingQueueClosure { info :: !StgInfoTable , link :: !b -- ^ ?? Here so it looks like an IND , blackHole :: !b -- ^ The blackhole closure , owner :: !b -- ^ The owning thread state object , queue :: !b -- ^ ?? } | WeakClosure { info :: !StgInfoTable , cfinalizers :: !b , key :: !b , value :: !b , finalizer :: !b , weakLink :: !(Maybe b) -- ^ next weak pointer for the capability } -- | Representation of StgTSO: A Thread State Object. The values for -- 'what_next', 'why_blocked' and 'flags' are defined in @Constants.h@. | TSOClosure { info :: !StgInfoTable -- pointers , link :: !b , global_link :: !b , tsoStack :: !b -- ^ stackobj from StgTSO , trec :: !b , blocked_exceptions :: !b , bq :: !b , thread_label :: !(Maybe b) -- values , what_next :: !WhatNext , why_blocked :: !WhyBlocked , flags :: ![TsoFlags] , threadId :: !Word64 , saved_errno :: !Word32 , tso_dirty :: !Word32 -- ^ non-zero => dirty , alloc_limit :: !Int64 , tot_stack_size :: !Word32 , prof :: !(Maybe StgTSOProfInfo) } -- | Representation of StgStack: The 'tsoStack ' of a 'TSOClosure'. | StackClosure { info :: !StgInfoTable , stack_size :: !Word32 -- ^ stack size in *words* , stack_dirty :: !Word8 -- ^ non-zero => dirty #if __GLASGOW_HASKELL__ >= 811 , stack_marking :: !Word8 #endif } ------------------------------------------------------------ -- Unboxed unlifted closures -- | Primitive Int | IntClosure { ptipe :: PrimType , intVal :: !Int } -- | Primitive Word | WordClosure { ptipe :: PrimType , wordVal :: !Word } -- | Primitive Int64 | Int64Closure { ptipe :: PrimType , int64Val :: !Int64 } -- | Primitive Word64 | Word64Closure { ptipe :: PrimType , word64Val :: !Word64 } -- | Primitive Addr | AddrClosure { ptipe :: PrimType , addrVal :: !(Ptr ()) } -- | Primitive Float | FloatClosure { ptipe :: PrimType , floatVal :: !Float } -- | Primitive Double | DoubleClosure { ptipe :: PrimType , doubleVal :: !Double } ----------------------------------------------------------- -- Anything else -- | Another kind of closure | OtherClosure { info :: !StgInfoTable , hvalues :: ![b] , rawWords :: ![Word] } | UnsupportedClosure { info :: !StgInfoTable } -- | A primitive word from a bitmap encoded stack frame payload -- -- The type itself cannot be restored (i.e. it might represent a Word8# -- or an Int#). | UnknownTypeWordSizedPrimitive { wordVal :: !Word } deriving (Show, Generic, Functor, Foldable, Traversable) type StgStackClosure = GenStgStackClosure Box -- | A decoded @StgStack@ with `StackFrame`s -- -- Stack related data structures (`GenStgStackClosure`, `GenStackField`, -- `GenStackFrame`) are defined separately from `GenClosure` as their related -- functions are very different. Though, both are closures in the sense of RTS -- structures, their decoding logic differs: While it's safe to keep a reference -- to a heap closure, the garbage collector does not update references to stack -- located closures. -- -- Additionally, stack frames don't appear outside of the stack. Thus, keeping -- `GenStackFrame` and `GenClosure` separated, makes these types more precise -- (in the sense what values to expect.) data GenStgStackClosure b = GenStgStackClosure { ssc_info :: !StgInfoTable , ssc_stack_size :: !Word32 -- ^ stack size in *words* , ssc_stack :: ![GenStackFrame b] } deriving (Foldable, Functor, Generic, Show, Traversable) type StackField = GenStackField Box -- | Bitmap-encoded payload on the stack data GenStackField b -- | A non-pointer field = StackWord !Word -- | A pointer field | StackBox !b deriving (Foldable, Functor, Generic, Show, Traversable) type StackFrame = GenStackFrame Box -- | A single stack frame data GenStackFrame b = UpdateFrame { info_tbl :: !StgInfoTable , updatee :: !b } | CatchFrame { info_tbl :: !StgInfoTable , handler :: !b } | CatchStmFrame { info_tbl :: !StgInfoTable , catchFrameCode :: !b , handler :: !b } | CatchRetryFrame { info_tbl :: !StgInfoTable , running_alt_code :: !Word , first_code :: !b , alt_code :: !b } | AtomicallyFrame { info_tbl :: !StgInfoTable , atomicallyFrameCode :: !b , result :: !b } | UnderflowFrame { info_tbl :: !StgInfoTable , nextChunk :: !(GenStgStackClosure b) } | StopFrame { info_tbl :: !StgInfoTable } | RetSmall { info_tbl :: !StgInfoTable , stack_payload :: ![GenStackField b] } | RetBig { info_tbl :: !StgInfoTable , stack_payload :: ![GenStackField b] } | RetFun { info_tbl :: !StgInfoTable , retFunSize :: !Word , retFunFun :: !b , retFunPayload :: ![GenStackField b] } | RetBCO { info_tbl :: !StgInfoTable , bco :: !b -- ^ always a BCOClosure , bcoArgs :: ![GenStackField b] } deriving (Foldable, Functor, Generic, Show, Traversable) data PrimType = PInt | PWord | PInt64 | PWord64 | PAddr | PFloat | PDouble deriving (Eq, Show, Generic, Ord) data WhatNext = ThreadRunGHC | ThreadInterpret | ThreadKilled | ThreadComplete | WhatNextUnknownValue Word16 -- ^ Please report this as a bug deriving (Eq, Show, Generic, Ord) data WhyBlocked = NotBlocked | BlockedOnMVar | BlockedOnMVarRead | BlockedOnBlackHole | BlockedOnRead | BlockedOnWrite | BlockedOnDelay | BlockedOnSTM | BlockedOnDoProc | BlockedOnCCall | BlockedOnCCall_Interruptible | BlockedOnMsgThrowTo | ThreadMigrating | WhyBlockedUnknownValue Word16 -- ^ Please report this as a bug deriving (Eq, Show, Generic, Ord) data TsoFlags = TsoLocked | TsoBlockx | TsoInterruptible | TsoStoppedOnBreakpoint | TsoMarked | TsoSqueezed | TsoAllocLimit | TsoFlagsUnknownValue Word32 -- ^ Please report this as a bug deriving (Eq, Show, Generic, Ord) -- | For generic code, this function returns all referenced closures. allClosures :: GenClosure b -> [b] allClosures (ConstrClosure {..}) = ptrArgs allClosures (ThunkClosure {..}) = ptrArgs allClosures (SelectorClosure {..}) = [selectee] allClosures (IndClosure {..}) = [indirectee] allClosures (BlackholeClosure {..}) = [indirectee] allClosures (APClosure {..}) = fun:payload allClosures (PAPClosure {..}) = fun:payload allClosures (APStackClosure {..}) = fun:payload allClosures (BCOClosure {..}) = [instrs,literals,bcoptrs] allClosures (ArrWordsClosure {}) = [] allClosures (MutArrClosure {..}) = mccPayload allClosures (SmallMutArrClosure {..}) = mccPayload allClosures (MutVarClosure {..}) = [var] allClosures (MVarClosure {..}) = [queueHead,queueTail,value] allClosures (IOPortClosure {..}) = [queueHead,queueTail,value] allClosures (FunClosure {..}) = ptrArgs allClosures (BlockingQueueClosure {..}) = [link, blackHole, owner, queue] allClosures (WeakClosure {..}) = [cfinalizers, key, value, finalizer] ++ Data.Foldable.toList weakLink allClosures (OtherClosure {..}) = hvalues allClosures _ = [] -- | Get the size of the top-level closure in words. -- Includes header and payload. Does not follow pointers. -- -- @since 8.10.1 closureSize :: Box -> Int closureSize (Box x) = I# (closureSize# x) ghc-lib-parser-9.12.2.20250421/libraries/ghc-heap/GHC/Exts/Heap/Constants.hsc0000644000000000000000000000060507346545000024000 0ustar0000000000000000{-# LANGUAGE CPP #-} module GHC.Exts.Heap.Constants ( wORD_SIZE , tAG_MASK , wORD_SIZE_IN_BITS ) where #include "MachDeps.h" import Prelude -- See note [Why do we import Prelude here?] import Data.Bits wORD_SIZE, tAG_MASK, wORD_SIZE_IN_BITS :: Int wORD_SIZE = #const SIZEOF_HSWORD wORD_SIZE_IN_BITS = #const WORD_SIZE_IN_BITS tAG_MASK = (1 `shift` #const TAG_BITS) - 1 ghc-lib-parser-9.12.2.20250421/libraries/ghc-heap/GHC/Exts/Heap/FFIClosures.hs0000644000000000000000000000420407346545000024004 0ustar0000000000000000{-# LANGUAGE CPP #-} module GHC.Exts.Heap.FFIClosures (module Reexport) where -- NOTE [hsc and CPP workaround] -- -- # Problem -- -- Often, .hsc files are used to get the correct offsets of C struct fields. -- Those structs may be affected by CPP directives e.g. profiled vs not profiled -- closure headers is affected by the PROFILED cpp define. Since we are building -- multiple variants of the RTS, we must support all possible offsets e.g. by -- running hsc2hs with cpp defines corresponding to each RTS flavour. The -- problem is that GHC's build system runs hsc2hs *only once* per .hsc file -- without properly setting cpp defines. This results in the same (probably -- incorrect) offsets into our C structs. -- -- -- # Workaround -- -- To work around this issue, we create multiple .hsc files each manually -- defining thir cpp defines (see e.g. FFIClosures_ProfilingDisabled.hsc and -- FFIClosures_ProfilingEnabled.hsc). Then we rely on cpp defines working -- correctly in .hs files and use CPP to switch on which .hsc module to -- re-export (see below). In each case we import the desired .hsc module as -- `Reexport` and we import `()` (i.e. nothing) from all other .hsc variants -- just so that the build system sees all .hsc file as dependencies. -- -- -- # Future Work -- -- - Duplication of the code in the .hsc files could be reduced simply by -- placing the code in a single .hsc.in file and `#include`ing it from each -- .hsc file. The .hsc files would only be responsible for setting the correct -- cpp defines. This currently doesn't work as hadrian doesn't know to copy -- the .hsc.in file to the build directory. -- - The correct solution would be for the build system to run `hsc2hs` with the -- correct cpp defines once per RTS flavour. -- #if defined(PROFILING) import GHC.Exts.Heap.FFIClosures_ProfilingEnabled as Reexport import GHC.Exts.Heap.FFIClosures_ProfilingDisabled () -- See Note [No way-dependent imports] in GHC.Exts.Heap.Closures #else import GHC.Exts.Heap.FFIClosures_ProfilingDisabled as Reexport import GHC.Exts.Heap.FFIClosures_ProfilingEnabled () -- See Note [No way-dependent imports] in GHC.Exts.Heap.Closures #endif ghc-lib-parser-9.12.2.20250421/libraries/ghc-heap/GHC/Exts/Heap/FFIClosures_ProfilingDisabled.hsc0000644000000000000000000001171107346545000027611 0ustar0000000000000000{-# LANGUAGE CPP #-} {-# LANGUAGE MagicHash #-} module GHC.Exts.Heap.FFIClosures_ProfilingDisabled where -- See [hsc and CPP workaround] #undef PROFILING #include "Rts.h" import Prelude import Foreign import GHC.Exts import GHC.Exts.Heap.ProfInfo.PeekProfInfo import GHC.Exts.Heap.ProfInfo.Types import GHC.Exts.Heap.Closures(WhatNext(..), WhyBlocked(..), TsoFlags(..)) data TSOFields = TSOFields { tso_what_next :: WhatNext, tso_why_blocked :: WhyBlocked, tso_flags :: [TsoFlags], -- Unfortunately block_info is a union without clear discriminator. -- block_info :: TDB, tso_threadId :: Word64, tso_saved_errno :: Word32, tso_dirty:: Word32, tso_alloc_limit :: Int64, tso_tot_stack_size :: Word32, tso_prof :: Maybe StgTSOProfInfo } -- | Get non-pointer fields from @StgTSO_@ (@TSO.h@) peekTSOFields :: (Ptr a -> IO (Maybe CostCentreStack)) -> Ptr tsoPtr -> IO TSOFields peekTSOFields decodeCCS ptr = do what_next' <- (#peek struct StgTSO_, what_next) ptr why_blocked' <- (#peek struct StgTSO_, why_blocked) ptr flags' <- (#peek struct StgTSO_, flags) ptr threadId' <- (#peek struct StgTSO_, id) ptr saved_errno' <- (#peek struct StgTSO_, saved_errno) ptr dirty' <- (#peek struct StgTSO_, dirty) ptr alloc_limit' <- (#peek struct StgTSO_, alloc_limit) ptr tot_stack_size' <- (#peek struct StgTSO_, tot_stack_size) ptr tso_prof' <- peekStgTSOProfInfo decodeCCS ptr return TSOFields { tso_what_next = parseWhatNext what_next', tso_why_blocked = parseWhyBlocked why_blocked', tso_flags = parseTsoFlags flags', tso_threadId = threadId', tso_saved_errno = saved_errno', tso_dirty = dirty', tso_alloc_limit = alloc_limit', tso_tot_stack_size = tot_stack_size', tso_prof = tso_prof' } parseWhatNext :: Word16 -> WhatNext parseWhatNext w = case w of (#const ThreadRunGHC) -> ThreadRunGHC (#const ThreadInterpret) -> ThreadInterpret (#const ThreadKilled) -> ThreadKilled (#const ThreadComplete) -> ThreadComplete _ -> WhatNextUnknownValue w parseWhyBlocked :: Word16 -> WhyBlocked parseWhyBlocked w = case w of (#const NotBlocked) -> NotBlocked (#const BlockedOnMVar) -> BlockedOnMVar (#const BlockedOnMVarRead) -> BlockedOnMVarRead (#const BlockedOnBlackHole) -> BlockedOnBlackHole (#const BlockedOnRead) -> BlockedOnRead (#const BlockedOnWrite) -> BlockedOnWrite (#const BlockedOnDelay) -> BlockedOnDelay (#const BlockedOnSTM) -> BlockedOnSTM (#const BlockedOnDoProc) -> BlockedOnDoProc (#const BlockedOnCCall) -> BlockedOnCCall (#const BlockedOnCCall_Interruptible) -> BlockedOnCCall_Interruptible (#const BlockedOnMsgThrowTo) -> BlockedOnMsgThrowTo (#const ThreadMigrating) -> ThreadMigrating _ -> WhyBlockedUnknownValue w parseTsoFlags :: Word32 -> [TsoFlags] parseTsoFlags w | isSet (#const TSO_LOCKED) w = TsoLocked : parseTsoFlags (unset (#const TSO_LOCKED) w) | isSet (#const TSO_BLOCKEX) w = TsoBlockx : parseTsoFlags (unset (#const TSO_BLOCKEX) w) | isSet (#const TSO_INTERRUPTIBLE) w = TsoInterruptible : parseTsoFlags (unset (#const TSO_INTERRUPTIBLE) w) | isSet (#const TSO_STOPPED_ON_BREAKPOINT) w = TsoStoppedOnBreakpoint : parseTsoFlags (unset (#const TSO_STOPPED_ON_BREAKPOINT) w) | isSet (#const TSO_MARKED) w = TsoMarked : parseTsoFlags (unset (#const TSO_MARKED) w) | isSet (#const TSO_SQUEEZED) w = TsoSqueezed : parseTsoFlags (unset (#const TSO_SQUEEZED) w) | isSet (#const TSO_ALLOC_LIMIT) w = TsoAllocLimit : parseTsoFlags (unset (#const TSO_ALLOC_LIMIT) w) parseTsoFlags 0 = [] parseTsoFlags w = [TsoFlagsUnknownValue w] isSet :: Word32 -> Word32 -> Bool isSet bitMask w = w .&. bitMask /= 0 unset :: Word32 -> Word32 -> Word32 unset bitMask w = w `xor` bitMask data StackFields = StackFields { stack_size :: Word32, stack_dirty :: Word8, #if __GLASGOW_HASKELL__ >= 811 stack_marking :: Word8, #endif stack_sp :: Addr## } -- | Get non-closure fields from @StgStack_@ (@TSO.h@) peekStackFields :: Ptr a -> IO StackFields peekStackFields ptr = do stack_size' <- (#peek struct StgStack_, stack_size) ptr ::IO Word32 dirty' <- (#peek struct StgStack_, dirty) ptr #if __GLASGOW_HASKELL__ >= 811 marking' <- (#peek struct StgStack_, marking) ptr #endif Ptr sp' <- (#peek struct StgStack_, sp) ptr -- TODO decode the stack. return StackFields { stack_size = stack_size', stack_dirty = dirty', #if __GLASGOW_HASKELL__ >= 811 stack_marking = marking', #endif stack_sp = sp' } ghc-lib-parser-9.12.2.20250421/libraries/ghc-heap/GHC/Exts/Heap/FFIClosures_ProfilingEnabled.hsc0000644000000000000000000001171007346545000027433 0ustar0000000000000000{-# LANGUAGE CPP #-} {-# LANGUAGE MagicHash #-} module GHC.Exts.Heap.FFIClosures_ProfilingEnabled where -- See [hsc and CPP workaround] #define PROFILING #include "Rts.h" import Prelude import Foreign import GHC.Exts import GHC.Exts.Heap.ProfInfo.PeekProfInfo import GHC.Exts.Heap.ProfInfo.Types import GHC.Exts.Heap.Closures(WhatNext(..), WhyBlocked(..), TsoFlags(..)) data TSOFields = TSOFields { tso_what_next :: WhatNext, tso_why_blocked :: WhyBlocked, tso_flags :: [TsoFlags], -- Unfortunately block_info is a union without clear discriminator. -- block_info :: TDB, tso_threadId :: Word64, tso_saved_errno :: Word32, tso_dirty:: Word32, tso_alloc_limit :: Int64, tso_tot_stack_size :: Word32, tso_prof :: Maybe StgTSOProfInfo } -- | Get non-pointer fields from @StgTSO_@ (@TSO.h@) peekTSOFields :: (Ptr a -> IO (Maybe CostCentreStack)) -> Ptr tsoPtr -> IO TSOFields peekTSOFields decodeCCS ptr = do what_next' <- (#peek struct StgTSO_, what_next) ptr why_blocked' <- (#peek struct StgTSO_, why_blocked) ptr flags' <- (#peek struct StgTSO_, flags) ptr threadId' <- (#peek struct StgTSO_, id) ptr saved_errno' <- (#peek struct StgTSO_, saved_errno) ptr dirty' <- (#peek struct StgTSO_, dirty) ptr alloc_limit' <- (#peek struct StgTSO_, alloc_limit) ptr tot_stack_size' <- (#peek struct StgTSO_, tot_stack_size) ptr tso_prof' <- peekStgTSOProfInfo decodeCCS ptr return TSOFields { tso_what_next = parseWhatNext what_next', tso_why_blocked = parseWhyBlocked why_blocked', tso_flags = parseTsoFlags flags', tso_threadId = threadId', tso_saved_errno = saved_errno', tso_dirty = dirty', tso_alloc_limit = alloc_limit', tso_tot_stack_size = tot_stack_size', tso_prof = tso_prof' } parseWhatNext :: Word16 -> WhatNext parseWhatNext w = case w of (#const ThreadRunGHC) -> ThreadRunGHC (#const ThreadInterpret) -> ThreadInterpret (#const ThreadKilled) -> ThreadKilled (#const ThreadComplete) -> ThreadComplete _ -> WhatNextUnknownValue w parseWhyBlocked :: Word16 -> WhyBlocked parseWhyBlocked w = case w of (#const NotBlocked) -> NotBlocked (#const BlockedOnMVar) -> BlockedOnMVar (#const BlockedOnMVarRead) -> BlockedOnMVarRead (#const BlockedOnBlackHole) -> BlockedOnBlackHole (#const BlockedOnRead) -> BlockedOnRead (#const BlockedOnWrite) -> BlockedOnWrite (#const BlockedOnDelay) -> BlockedOnDelay (#const BlockedOnSTM) -> BlockedOnSTM (#const BlockedOnDoProc) -> BlockedOnDoProc (#const BlockedOnCCall) -> BlockedOnCCall (#const BlockedOnCCall_Interruptible) -> BlockedOnCCall_Interruptible (#const BlockedOnMsgThrowTo) -> BlockedOnMsgThrowTo (#const ThreadMigrating) -> ThreadMigrating _ -> WhyBlockedUnknownValue w parseTsoFlags :: Word32 -> [TsoFlags] parseTsoFlags w | isSet (#const TSO_LOCKED) w = TsoLocked : parseTsoFlags (unset (#const TSO_LOCKED) w) | isSet (#const TSO_BLOCKEX) w = TsoBlockx : parseTsoFlags (unset (#const TSO_BLOCKEX) w) | isSet (#const TSO_INTERRUPTIBLE) w = TsoInterruptible : parseTsoFlags (unset (#const TSO_INTERRUPTIBLE) w) | isSet (#const TSO_STOPPED_ON_BREAKPOINT) w = TsoStoppedOnBreakpoint : parseTsoFlags (unset (#const TSO_STOPPED_ON_BREAKPOINT) w) | isSet (#const TSO_MARKED) w = TsoMarked : parseTsoFlags (unset (#const TSO_MARKED) w) | isSet (#const TSO_SQUEEZED) w = TsoSqueezed : parseTsoFlags (unset (#const TSO_SQUEEZED) w) | isSet (#const TSO_ALLOC_LIMIT) w = TsoAllocLimit : parseTsoFlags (unset (#const TSO_ALLOC_LIMIT) w) parseTsoFlags 0 = [] parseTsoFlags w = [TsoFlagsUnknownValue w] isSet :: Word32 -> Word32 -> Bool isSet bitMask w = w .&. bitMask /= 0 unset :: Word32 -> Word32 -> Word32 unset bitMask w = w `xor` bitMask data StackFields = StackFields { stack_size :: Word32, stack_dirty :: Word8, #if __GLASGOW_HASKELL__ >= 811 stack_marking :: Word8, #endif stack_sp :: Addr## } -- | Get non-closure fields from @StgStack_@ (@TSO.h@) peekStackFields :: Ptr a -> IO StackFields peekStackFields ptr = do stack_size' <- (#peek struct StgStack_, stack_size) ptr ::IO Word32 dirty' <- (#peek struct StgStack_, dirty) ptr #if __GLASGOW_HASKELL__ >= 811 marking' <- (#peek struct StgStack_, marking) ptr #endif Ptr sp' <- (#peek struct StgStack_, sp) ptr -- TODO decode the stack. return StackFields { stack_size = stack_size', stack_dirty = dirty', #if __GLASGOW_HASKELL__ >= 811 stack_marking = marking', #endif stack_sp = sp' } ghc-lib-parser-9.12.2.20250421/libraries/ghc-heap/GHC/Exts/Heap/InfoTable.hsc0000644000000000000000000000465507346545000023700 0ustar0000000000000000{-# LANGUAGE NoMonoLocalBinds #-} module GHC.Exts.Heap.InfoTable ( module GHC.Exts.Heap.InfoTable.Types , itblSize , peekItbl , pokeItbl ) where #include "Rts.h" import Prelude -- See note [Why do we import Prelude here?] import GHC.Exts.Heap.InfoTable.Types #if !defined(TABLES_NEXT_TO_CODE) import GHC.Exts.Heap.Constants import Data.Maybe #endif import Foreign ------------------------------------------------------------------------- -- Profiling specific code -- -- The functions that follow all rely on PROFILING. They are duplicated in -- ghc-heap/GHC/Exts/Heap/InfoTableProf.hsc where PROFILING is defined. This -- allows hsc2hs to generate values for both profiling and non-profiling builds. -- | Read an InfoTable from the heap into a haskell type. -- WARNING: This code assumes it is passed a pointer to a "standard" info -- table. If tables_next_to_code is enabled, it will look 1 byte before the -- start for the entry field. peekItbl :: Ptr StgInfoTable -> IO StgInfoTable peekItbl a0 = do #if !defined(TABLES_NEXT_TO_CODE) let ptr = a0 `plusPtr` (negate wORD_SIZE) entry' <- Just <$> (#peek struct StgInfoTable_, entry) ptr #else let ptr = a0 entry' = Nothing #endif ptrs' <- (#peek struct StgInfoTable_, layout.payload.ptrs) ptr nptrs' <- (#peek struct StgInfoTable_, layout.payload.nptrs) ptr tipe' <- (#peek struct StgInfoTable_, type) ptr srtlen' <- (#peek struct StgInfoTable_, srt) a0 return StgInfoTable { entry = entry' , ptrs = ptrs' , nptrs = nptrs' , tipe = toEnum (fromIntegral (tipe' :: HalfWord)) , srtlen = srtlen' , code = Nothing } pokeItbl :: Ptr StgInfoTable -> StgInfoTable -> IO () pokeItbl a0 itbl = do #if !defined(TABLES_NEXT_TO_CODE) (#poke StgInfoTable, entry) a0 (fromJust (entry itbl)) #endif (#poke StgInfoTable, layout.payload.ptrs) a0 (ptrs itbl) (#poke StgInfoTable, layout.payload.nptrs) a0 (nptrs itbl) (#poke StgInfoTable, type) a0 (toHalfWord (fromEnum (tipe itbl))) (#poke StgInfoTable, srt) a0 (srtlen itbl) #if defined(TABLES_NEXT_TO_CODE) let code_offset = a0 `plusPtr` (#offset StgInfoTable, code) case code itbl of Nothing -> return () Just (Left xs) -> pokeArray code_offset xs Just (Right xs) -> pokeArray code_offset xs #endif where toHalfWord :: Int -> HalfWord toHalfWord i = fromIntegral i -- | Size in bytes of a standard InfoTable itblSize :: Int itblSize = (#size struct StgInfoTable_) ghc-lib-parser-9.12.2.20250421/libraries/ghc-heap/GHC/Exts/Heap/InfoTable/0000755000000000000000000000000007346545000023167 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/libraries/ghc-heap/GHC/Exts/Heap/InfoTable/Types.hsc0000644000000000000000000000212207346545000024767 0ustar0000000000000000{-# LANGUAGE DeriveGeneric #-} module GHC.Exts.Heap.InfoTable.Types ( StgInfoTable(..) , EntryFunPtr , HalfWord , ItblCodes ) where #include "Rts.h" import Prelude -- See note [Why do we import Prelude here?] import GHC.Generics import GHC.Exts.Heap.ClosureTypes import Foreign type ItblCodes = Either [Word8] [Word32] #include "ghcautoconf.h" -- Ultra-minimalist version specially for constructors #if SIZEOF_VOID_P == 8 type HalfWord = Word32 #elif SIZEOF_VOID_P == 4 type HalfWord = Word16 #else #error Unknown SIZEOF_VOID_P #endif type EntryFunPtr = FunPtr (Ptr () -> IO (Ptr ())) -- | This is a somewhat faithful representation of an info table. See -- -- for more details on this data structure. data StgInfoTable = StgInfoTable { entry :: Maybe EntryFunPtr, -- Just <=> not TABLES_NEXT_TO_CODE ptrs :: HalfWord, nptrs :: HalfWord, tipe :: ClosureType, srtlen :: HalfWord, code :: Maybe ItblCodes -- Just <=> TABLES_NEXT_TO_CODE } deriving (Eq, Show, Generic) ghc-lib-parser-9.12.2.20250421/libraries/ghc-heap/GHC/Exts/Heap/InfoTableProf.hsc0000644000000000000000000000427607346545000024526 0ustar0000000000000000{-# LANGUAGE NoMonoLocalBinds #-} module GHC.Exts.Heap.InfoTableProf ( module GHC.Exts.Heap.InfoTable.Types , itblSize , peekItbl , pokeItbl ) where -- This file overrides InfoTable.hsc's implementation of peekItbl and pokeItbl. -- Manually defining PROFILING gives the #peek and #poke macros an accurate -- representation of StgInfoTable_ when hsc2hs runs. #define PROFILING #include "Rts.h" import Prelude -- See note [Why do we import Prelude here?] import GHC.Exts.Heap.InfoTable.Types #if !defined(TABLES_NEXT_TO_CODE) import GHC.Exts.Heap.Constants import Data.Maybe #endif import Foreign -- | Read an InfoTable from the heap into a haskell type. -- WARNING: This code assumes it is passed a pointer to a "standard" info -- table. If tables_next_to_code is enabled, it will look 1 byte before the -- start for the entry field. peekItbl :: Ptr StgInfoTable -> IO StgInfoTable peekItbl a0 = do #if !defined(TABLES_NEXT_TO_CODE) let ptr = a0 `plusPtr` (negate wORD_SIZE) entry' <- Just <$> (#peek struct StgInfoTable_, entry) ptr #else let ptr = a0 entry' = Nothing #endif ptrs' <- (#peek struct StgInfoTable_, layout.payload.ptrs) ptr nptrs' <- (#peek struct StgInfoTable_, layout.payload.nptrs) ptr tipe' <- (#peek struct StgInfoTable_, type) ptr srtlen' <- (#peek struct StgInfoTable_, srt) a0 return StgInfoTable { entry = entry' , ptrs = ptrs' , nptrs = nptrs' , tipe = toEnum (fromIntegral (tipe' :: HalfWord)) , srtlen = srtlen' , code = Nothing } pokeItbl :: Ptr StgInfoTable -> StgInfoTable -> IO () pokeItbl a0 itbl = do #if !defined(TABLES_NEXT_TO_CODE) (#poke StgInfoTable, entry) a0 (fromJust (entry itbl)) #endif (#poke StgInfoTable, layout.payload.ptrs) a0 (ptrs itbl) (#poke StgInfoTable, layout.payload.nptrs) a0 (nptrs itbl) (#poke StgInfoTable, type) a0 (fromEnum (tipe itbl)) (#poke StgInfoTable, srt) a0 (srtlen itbl) #if defined(TABLES_NEXT_TO_CODE) let code_offset = a0 `plusPtr` (#offset StgInfoTable, code) case code itbl of Nothing -> return () Just (Left xs) -> pokeArray code_offset xs Just (Right xs) -> pokeArray code_offset xs #endif itblSize :: Int itblSize = (#size struct StgInfoTable_) ghc-lib-parser-9.12.2.20250421/libraries/ghc-heap/GHC/Exts/Heap/ProfInfo/0000755000000000000000000000000007346545000023046 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/libraries/ghc-heap/GHC/Exts/Heap/ProfInfo/PeekProfInfo.hs0000644000000000000000000000106707346545000025735 0ustar0000000000000000{-# LANGUAGE CPP #-} module GHC.Exts.Heap.ProfInfo.PeekProfInfo (module Reexport) where -- See [hsc and CPP workaround] #if defined(PROFILING) import GHC.Exts.Heap.ProfInfo.PeekProfInfo_ProfilingEnabled as Reexport import GHC.Exts.Heap.ProfInfo.PeekProfInfo_ProfilingDisabled () -- See Note [No way-dependent imports] in GHC.Exts.Heap.Closures #else import GHC.Exts.Heap.ProfInfo.PeekProfInfo_ProfilingDisabled as Reexport import GHC.Exts.Heap.ProfInfo.PeekProfInfo_ProfilingEnabled () -- See Note [No way-dependent imports] in GHC.Exts.Heap.Closures #endif libraries/ghc-heap/GHC/Exts/Heap/ProfInfo/PeekProfInfo_ProfilingDisabled.hsc0000644000000000000000000000103407346545000031454 0ustar0000000000000000ghc-lib-parser-9.12.2.20250421module GHC.Exts.Heap.ProfInfo.PeekProfInfo_ProfilingDisabled( peekStgTSOProfInfo , peekTopCCS ) where import Prelude import Foreign import GHC.Exts.Heap.ProfInfo.Types -- | This implementation is used when PROFILING is undefined. -- It always returns 'Nothing', because there is no profiling info available. peekStgTSOProfInfo :: (Ptr a -> IO (Maybe CostCentreStack)) -> Ptr tsoPtr -> IO (Maybe StgTSOProfInfo) peekStgTSOProfInfo _ _ = return Nothing peekTopCCS :: Ptr a -> IO (Maybe CostCentreStack) peekTopCCS _ = return Nothing libraries/ghc-heap/GHC/Exts/Heap/ProfInfo/PeekProfInfo_ProfilingEnabled.hsc0000644000000000000000000001560207346545000031305 0ustar0000000000000000ghc-lib-parser-9.12.2.20250421{-# LANGUAGE DeriveGeneric #-} {-# LANGUAGE MagicHash #-} module GHC.Exts.Heap.ProfInfo.PeekProfInfo_ProfilingEnabled( peekStgTSOProfInfo , peekTopCCS ) where #if __GLASGOW_HASKELL__ >= 811 -- See [hsc and CPP workaround] #define PROFILING #include "Rts.h" #undef BLOCK_SIZE #undef MBLOCK_SIZE #undef BLOCKS_PER_MBLOCK #include "DerivedConstants.h" import Data.IntSet (IntSet) import qualified Data.IntSet as IntSet import Data.IntMap.Strict (IntMap) import qualified Data.IntMap.Strict as IntMap import Data.IORef (IORef, newIORef, readIORef, writeIORef) import Foreign import Foreign.C.String import GHC.Exts import GHC.Exts.Heap.ProfInfo.Types import Prelude -- Use Int based containers for pointers (addresses) for better performance. -- These will be queried a lot! type AddressSet = IntSet type AddressMap = IntMap peekStgTSOProfInfo :: (Ptr b -> IO (Maybe CostCentreStack)) -> Ptr a -> IO (Maybe StgTSOProfInfo) peekStgTSOProfInfo decodeCCS tsoPtr = do cccs_ptr <- peekByteOff tsoPtr cccsOffset cccs' <- decodeCCS cccs_ptr return $ Just StgTSOProfInfo { cccs = cccs' } peekTopCCS :: Ptr b -> IO (Maybe CostCentreStack) peekTopCCS cccs_ptr = do costCenterCacheRef <- newIORef IntMap.empty peekCostCentreStack IntSet.empty costCenterCacheRef cccs_ptr cccsOffset :: Int cccsOffset = (#const OFFSET_StgTSO_cccs) + (#size StgHeader) peekCostCentreStack :: AddressSet -> IORef (AddressMap CostCentre) -> Ptr costCentreStack -> IO (Maybe CostCentreStack) peekCostCentreStack _ _ ptr | ptr == nullPtr = return Nothing peekCostCentreStack loopBreakers _ ptr | IntSet.member (ptrToInt ptr) loopBreakers = return Nothing peekCostCentreStack loopBreakers costCenterCacheRef ptr = do ccs_ccsID' <- (#peek struct CostCentreStack_, ccsID) ptr ccs_cc_ptr <- (#peek struct CostCentreStack_, cc) ptr ccs_cc' <- peekCostCentre costCenterCacheRef ccs_cc_ptr ccs_prevStack_ptr <- (#peek struct CostCentreStack_, prevStack) ptr let loopBreakers' = (IntSet.insert ptrAsInt loopBreakers) ccs_prevStack' <- peekCostCentreStack loopBreakers' costCenterCacheRef ccs_prevStack_ptr ccs_indexTable_ptr <- (#peek struct CostCentreStack_, indexTable) ptr ccs_indexTable' <- peekIndexTable loopBreakers' costCenterCacheRef ccs_indexTable_ptr ccs_root_ptr <- (#peek struct CostCentreStack_, root) ptr ccs_root' <- peekCostCentreStack loopBreakers' costCenterCacheRef ccs_root_ptr ccs_depth' <- (#peek struct CostCentreStack_, depth) ptr ccs_scc_count' <- (#peek struct CostCentreStack_, scc_count) ptr ccs_selected' <- (#peek struct CostCentreStack_, selected) ptr ccs_time_ticks' <- (#peek struct CostCentreStack_, time_ticks) ptr ccs_mem_alloc' <- (#peek struct CostCentreStack_, mem_alloc) ptr ccs_inherited_alloc' <- (#peek struct CostCentreStack_, inherited_alloc) ptr ccs_inherited_ticks' <- (#peek struct CostCentreStack_, inherited_ticks) ptr return $ Just CostCentreStack { ccs_ccsID = ccs_ccsID', ccs_cc = ccs_cc', ccs_prevStack = ccs_prevStack', ccs_indexTable = ccs_indexTable', ccs_root = ccs_root', ccs_depth = ccs_depth', ccs_scc_count = ccs_scc_count', ccs_selected = ccs_selected', ccs_time_ticks = ccs_time_ticks', ccs_mem_alloc = ccs_mem_alloc', ccs_inherited_alloc = ccs_inherited_alloc', ccs_inherited_ticks = ccs_inherited_ticks' } where ptrAsInt = ptrToInt ptr peekCostCentre :: IORef (AddressMap CostCentre) -> Ptr costCentre -> IO CostCentre peekCostCentre costCenterCacheRef ptr = do costCenterCache <- readIORef costCenterCacheRef case IntMap.lookup ptrAsInt costCenterCache of (Just a) -> return a Nothing -> do cc_ccID' <- (#peek struct CostCentre_, ccID) ptr cc_label_ptr <- (#peek struct CostCentre_, label) ptr cc_label' <- peekCString cc_label_ptr cc_module_ptr <- (#peek struct CostCentre_, module) ptr cc_module' <- peekCString cc_module_ptr cc_srcloc_ptr <- (#peek struct CostCentre_, srcloc) ptr cc_srcloc' <- do if cc_srcloc_ptr == nullPtr then return Nothing else fmap Just (peekCString cc_srcloc_ptr) cc_mem_alloc' <- (#peek struct CostCentre_, mem_alloc) ptr cc_time_ticks' <- (#peek struct CostCentre_, time_ticks) ptr cc_is_caf' <- (#peek struct CostCentre_, is_caf) ptr cc_link_ptr <- (#peek struct CostCentre_, link) ptr cc_link' <- if cc_link_ptr == nullPtr then return Nothing else fmap Just (peekCostCentre costCenterCacheRef cc_link_ptr) let result = CostCentre { cc_ccID = cc_ccID', cc_label = cc_label', cc_module = cc_module', cc_srcloc = cc_srcloc', cc_mem_alloc = cc_mem_alloc', cc_time_ticks = cc_time_ticks', cc_is_caf = cc_is_caf', cc_link = cc_link' } writeIORef costCenterCacheRef (IntMap.insert ptrAsInt result costCenterCache) return result where ptrAsInt = ptrToInt ptr peekIndexTable :: AddressSet -> IORef (AddressMap CostCentre) -> Ptr indexTable -> IO (Maybe IndexTable) peekIndexTable _ _ ptr | ptr == nullPtr = return Nothing peekIndexTable loopBreakers costCenterCacheRef ptr = do it_cc_ptr <- (#peek struct IndexTable_, cc) ptr it_cc' <- peekCostCentre costCenterCacheRef it_cc_ptr it_ccs_ptr <- (#peek struct IndexTable_, ccs) ptr it_ccs' <- peekCostCentreStack loopBreakers costCenterCacheRef it_ccs_ptr it_next_ptr <- (#peek struct IndexTable_, next) ptr it_next' <- peekIndexTable loopBreakers costCenterCacheRef it_next_ptr it_back_edge' <- (#peek struct IndexTable_, back_edge) ptr return $ Just IndexTable { it_cc = it_cc', it_ccs = it_ccs', it_next = it_next', it_back_edge = it_back_edge' } -- | casts a @Ptr@ to an @Int@ ptrToInt :: Ptr a -> Int ptrToInt (Ptr a##) = I## (addr2Int## a##) #else import Prelude import Foreign import GHC.Exts.Heap.ProfInfo.Types peekStgTSOProfInfo :: (Ptr b -> IO (Maybe CostCentreStack)) -> Ptr a -> IO (Maybe StgTSOProfInfo) peekStgTSOProfInfo _ _ = return Nothing peekTopCCS :: Ptr a -> IO (Maybe CostCentreStack) peekTopCCS _ = return Nothing #endif ghc-lib-parser-9.12.2.20250421/libraries/ghc-heap/GHC/Exts/Heap/ProfInfo/Types.hs0000644000000000000000000000361107346545000024507 0ustar0000000000000000{-# LANGUAGE DeriveGeneric #-} module GHC.Exts.Heap.ProfInfo.Types where import Prelude import Data.Word import GHC.Generics -- | This is a somewhat faithful representation of StgTSOProfInfo. See -- -- for more details on this data structure. newtype StgTSOProfInfo = StgTSOProfInfo { cccs :: Maybe CostCentreStack } deriving (Show, Generic, Eq, Ord) -- | This is a somewhat faithful representation of CostCentreStack. See -- -- for more details on this data structure. data CostCentreStack = CostCentreStack { ccs_ccsID :: Int, ccs_cc :: CostCentre, ccs_prevStack :: Maybe CostCentreStack, ccs_indexTable :: Maybe IndexTable, ccs_root :: Maybe CostCentreStack, ccs_depth :: Word, ccs_scc_count :: Word64, ccs_selected :: Word, ccs_time_ticks :: Word, ccs_mem_alloc :: Word64, ccs_inherited_alloc :: Word64, ccs_inherited_ticks :: Word } deriving (Show, Generic, Eq, Ord) -- | This is a somewhat faithful representation of CostCentre. See -- -- for more details on this data structure. data CostCentre = CostCentre { cc_ccID :: Int, cc_label :: String, cc_module :: String, cc_srcloc :: Maybe String, cc_mem_alloc :: Word64, cc_time_ticks :: Word, cc_is_caf :: Bool, cc_link :: Maybe CostCentre } deriving (Show, Generic, Eq, Ord) -- | This is a somewhat faithful representation of IndexTable. See -- -- for more details on this data structure. data IndexTable = IndexTable { it_cc :: CostCentre, it_ccs :: Maybe CostCentreStack, it_next :: Maybe IndexTable, it_back_edge :: Bool } deriving (Show, Generic, Eq, Ord) ghc-lib-parser-9.12.2.20250421/libraries/ghc-heap/GHC/Exts/Heap/Utils.hsc0000644000000000000000000001105107346545000023121 0ustar0000000000000000{-# LANGUAGE CPP, MagicHash #-} module GHC.Exts.Heap.Utils ( dataConNames ) where #include "Rts.h" import Prelude -- See note [Why do we import Prelude here?] import GHC.Exts.Heap.Constants import GHC.Exts.Heap.InfoTable import Data.Char import Data.List (intercalate) import Foreign import GHC.CString import GHC.Exts {- To find the string in the constructor's info table we need to consider the layout of info tables relative to the entry code for a closure. An info table can be next to the entry code for the closure, or it can be separate. The former (faster) is used in registerised versions of ghc, and the latter (portable) is for non-registerised versions. The diagrams below show where the string is to be found relative to the normal info table of the closure. 1) Tables next to code: -------------- | | <- pointer to the start of the string -------------- | | <- the (start of the) info table structure | | | | -------------- | entry code | | .... | In this case the pointer to the start of the string can be found in the memory location _one word before_ the first entry in the normal info table. 2) Tables NOT next to code: -------------- info table structure -> | *------------------> -------------- | | | entry code | | | | .... | -------------- ptr to start of str -> | | -------------- In this case the pointer to the start of the string can be found in the memory location: info_table_ptr + info_table_size -} -- Given a ptr to an 'StgInfoTable' for a data constructor -- return (Package, Module, Name) dataConNames :: Ptr StgInfoTable -> IO (String, String, String) dataConNames ptr = do conDescAddress <- getConDescAddress pure $ parse conDescAddress where -- Retrieve the con_desc field address pointing to -- 'Package:Module.Name' string getConDescAddress :: IO (Ptr Word8) getConDescAddress #if defined(TABLES_NEXT_TO_CODE) = do offsetToString <- peek (ptr `plusPtr` negate wORD_SIZE) pure $ (ptr `plusPtr` stdInfoTableSizeB) `plusPtr` fromIntegral (offsetToString :: Int32) #else = peek $ intPtrToPtr $ ptrToIntPtr ptr + fromIntegral stdInfoTableSizeB #endif stdInfoTableSizeW :: Int -- The size of a standard info table varies with profiling/ticky etc, -- so we can't get it from Constants -- It must vary in sync with mkStdInfoTable stdInfoTableSizeW = size_fixed + size_prof where size_fixed = 2 -- layout, type ##if defined(PROFILING) size_prof = 2 ##else size_prof = 0 ##endif stdInfoTableSizeB :: Int stdInfoTableSizeB = stdInfoTableSizeW * wORD_SIZE -- parsing names is a little bit fiddly because we have a string in the form: -- pkg:A.B.C.foo, and we want to split it into three parts: ("pkg", "A.B.C", "foo"). -- Thus we split at the leftmost colon and the rightmost occurrence of the dot. -- It would be easier if the string was in the form pkg:A.B.C:foo, but alas -- this is not the conventional way of writing Haskell names. We stick with -- convention, even though it makes the parsing code more troublesome. -- Warning: this code assumes that the string is well formed. parse :: Ptr Word8 -> (String, String, String) parse (Ptr addr) = if not . all (>0) . fmap length $ [p,m,occ] then ([], [], input) else (p, m, occ) where input = unpackCStringUtf8## addr (p, rest1) = break (== ':') input (m, occ) = (intercalate "." $ reverse modWords, occWord) where (modWords, occWord) = parseModOcc [] (drop 1 rest1) -- We only look for dots if str could start with a module name, -- i.e. if it starts with an upper case character. -- Otherwise we might think that "X.:->" is the module name in -- "X.:->.+", whereas actually "X" is the module name and -- ":->.+" is a constructor name. parseModOcc :: [String] -> String -> ([String], String) parseModOcc acc str@(c : _) | isUpper c = case break (== '.') str of (top, []) -> (acc, top) (top, _:bot) -> parseModOcc (top : acc) bot parseModOcc acc str = (acc, str) ghc-lib-parser-9.12.2.20250421/libraries/ghc-heap/cbits/0000755000000000000000000000000007346545000020227 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/libraries/ghc-heap/cbits/HeapPrim.cmm0000644000000000000000000000032607346545000022433 0ustar0000000000000000#include "Cmm.h" Ghclib_aToWordzh (P_ clos) { return (clos); } Ghclib_reallyUnsafePtrEqualityUpToTag (W_ clos1, W_ clos2) { clos1 = UNTAG(clos1); clos2 = UNTAG(clos2); return (clos1 == clos2); } ghc-lib-parser-9.12.2.20250421/libraries/ghc-heap/0000755000000000000000000000000007346545000017123 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/libraries/ghc-heap/ghc-heap.cabal0000644000000000000000000000400107346545000021556 0ustar0000000000000000cabal-version: 3.0 name: ghc-heap version: 9.12.2 license: BSD-3-Clause license-file: LICENSE maintainer: libraries@haskell.org bug-reports: https://gitlab.haskell.org/ghc/ghc/issues/new synopsis: Functions for walking GHC's heap category: GHC description: This package provides functions for walking the GHC heap data structures and retrieving information about those data structures. build-type: Simple tested-with: GHC==7.11 source-repository head type: git location: https://gitlab.haskell.org/ghc/ghc.git subdir: libraries/ghc-heap library default-language: Haskell2010 build-depends: base >= 4.9.0 && < 5.0 , ghc-prim > 0.2 && < 0.14 , rts == 1.0.* , containers >= 0.6.2.1 && < 0.8 if impl(ghc >= 9.9) build-depends: ghc-internal >= 9.900 && < 9.1202.99999 ghc-options: -Wall if !os(ghcjs) cmm-sources: cbits/HeapPrim.cmm cbits/Stack.cmm c-sources: cbits/Stack_c.c default-extensions: NoImplicitPrelude exposed-modules: GHC.Exts.Heap GHC.Exts.Heap.Closures GHC.Exts.Heap.ClosureTypes GHC.Exts.Heap.Constants GHC.Exts.Heap.InfoTable GHC.Exts.Heap.InfoTable.Types GHC.Exts.Heap.InfoTableProf GHC.Exts.Heap.Utils GHC.Exts.Heap.FFIClosures GHC.Exts.Heap.FFIClosures_ProfilingDisabled GHC.Exts.Heap.FFIClosures_ProfilingEnabled GHC.Exts.Heap.ProfInfo.Types GHC.Exts.Heap.ProfInfo.PeekProfInfo GHC.Exts.Heap.ProfInfo.PeekProfInfo_ProfilingDisabled GHC.Exts.Heap.ProfInfo.PeekProfInfo_ProfilingEnabled GHC.Exts.Stack.Constants GHC.Exts.Stack GHC.Exts.Stack.Decode ghc-lib-parser-9.12.2.20250421/libraries/ghc-internal/src/GHC/Internal/0000755000000000000000000000000007346545000022766 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/libraries/ghc-internal/src/GHC/Internal/ForeignSrcLang.hs0000644000000000000000000000117607346545000026172 0ustar0000000000000000{-# LANGUAGE CPP #-} {-# LANGUAGE DeriveGeneric #-} module GHC.Internal.ForeignSrcLang ( ForeignSrcLang(..) ) where #ifdef BOOTSTRAP_TH import Prelude -- See note [Why do we import Prelude here?] import GHC.Generics (Generic) #else import GHC.Internal.Base import GHC.Internal.Show import GHC.Internal.Generics #endif -- | Foreign formats supported by GHC via TH data ForeignSrcLang = LangC -- ^ C | LangCxx -- ^ C++ | LangObjc -- ^ Objective C | LangObjcxx -- ^ Objective C++ | LangAsm -- ^ Assembly language (.s) | LangJs -- ^ JavaScript | RawObject -- ^ Object (.o) deriving (Eq, Show, Generic) ghc-lib-parser-9.12.2.20250421/libraries/ghc-internal/src/GHC/Internal/LanguageExtensions.hs0000644000000000000000000001150407346545000027126 0ustar0000000000000000----------------------------------------------------------------------------- -- | -- Module : GHC.Internal.LanguageExtensions -- Copyright : (c) The GHC Team -- -- Maintainer : ghc-devs@haskell.org -- Portability : portable -- -- A data type defining the language extensions supported by GHC. -- {-# LANGUAGE DeriveGeneric, CPP, Trustworthy #-} module GHC.Internal.LanguageExtensions ( Extension(..) ) where #ifdef BOOTSTRAP_TH import Prelude -- See note [Why do we import Prelude here?] import GHC.Generics (Generic) #else import GHC.Internal.Base import GHC.Internal.Show import GHC.Internal.Generics import GHC.Internal.Enum #endif -- | The language extensions known to GHC. -- -- Note that there is an orphan 'Binary' instance for this type supplied by -- the "GHC.LanguageExtensions" module provided by @ghc-boot@. We can't provide -- here as this would require adding transitive dependencies to the -- @template-haskell@ package, which must have a minimal dependency set. data Extension -- See Note [Updating flag description in the User's Guide] in -- GHC.Driver.Session = Cpp | OverlappingInstances | UndecidableInstances | IncoherentInstances | UndecidableSuperClasses | MonomorphismRestriction | MonoLocalBinds | DeepSubsumption | RelaxedPolyRec -- Deprecated | ExtendedDefaultRules -- Use GHC's extended rules for defaulting | NamedDefaults | ForeignFunctionInterface | UnliftedFFITypes | InterruptibleFFI | CApiFFI | GHCForeignImportPrim | JavaScriptFFI | ParallelArrays -- Syntactic support for parallel arrays | Arrows -- Arrow-notation syntax | TemplateHaskell | TemplateHaskellQuotes -- subset of TH supported by stage1, no splice | QualifiedDo | QuasiQuotes | ImplicitParams | ImplicitPrelude | ScopedTypeVariables | AllowAmbiguousTypes | UnboxedTuples | UnboxedSums | UnliftedNewtypes | UnliftedDatatypes | BangPatterns | TypeFamilies | TypeFamilyDependencies | TypeInType -- Deprecated | OverloadedStrings | OverloadedLists | NumDecimals | DisambiguateRecordFields | RecordWildCards | NamedFieldPuns | ViewPatterns | OrPatterns | GADTs | GADTSyntax | NPlusKPatterns | DoAndIfThenElse | BlockArguments | RebindableSyntax | ConstraintKinds | PolyKinds -- Kind polymorphism | DataKinds -- Datatype promotion | TypeData -- allow @type data@ definitions | InstanceSigs | ApplicativeDo | LinearTypes | RequiredTypeArguments -- Visible forall (VDQ) in types of terms | StandaloneDeriving | DeriveDataTypeable | AutoDeriveTypeable -- Automatic derivation of Typeable | DeriveFunctor | DeriveTraversable | DeriveFoldable | DeriveGeneric -- Allow deriving Generic/1 | DefaultSignatures -- Allow extra signatures for defmeths | DeriveAnyClass -- Allow deriving any class | DeriveLift -- Allow deriving Lift | DerivingStrategies | DerivingVia -- Derive through equal representation | TypeSynonymInstances | FlexibleContexts | FlexibleInstances | ConstrainedClassMethods | MultiParamTypeClasses | NullaryTypeClasses | FunctionalDependencies | UnicodeSyntax | ExistentialQuantification | MagicHash | EmptyDataDecls | KindSignatures | RoleAnnotations | ParallelListComp | TransformListComp | MonadComprehensions | GeneralizedNewtypeDeriving | RecursiveDo | PostfixOperators | TupleSections | PatternGuards | LiberalTypeSynonyms | RankNTypes | ImpredicativeTypes | TypeOperators | ExplicitNamespaces | PackageImports | ExplicitForAll | AlternativeLayoutRule | AlternativeLayoutRuleTransitional | DatatypeContexts | NondecreasingIndentation | RelaxedLayout | TraditionalRecordSyntax | LambdaCase | MultiWayIf | BinaryLiterals | NegativeLiterals | HexFloatLiterals | DuplicateRecordFields | OverloadedLabels | EmptyCase | PatternSynonyms | PartialTypeSignatures | NamedWildCards | StaticPointers | TypeApplications | Strict | StrictData | EmptyDataDeriving | NumericUnderscores | QuantifiedConstraints | StarIsType | ImportQualifiedPost | CUSKs | StandaloneKindSignatures | LexicalNegation | FieldSelectors | OverloadedRecordDot | OverloadedRecordUpdate | TypeAbstractions | ExtendedLiterals | ListTuplePuns | MultilineStrings deriving (Eq, Enum, Show, Generic, Bounded) -- 'Ord' and 'Bounded' are provided for GHC API users (see discussions -- in https://gitlab.haskell.org/ghc/ghc/merge_requests/2707 and -- https://gitlab.haskell.org/ghc/ghc/merge_requests/826). instance Ord Extension where compare a b = compare (fromEnum a) (fromEnum b) ghc-lib-parser-9.12.2.20250421/libraries/ghc-internal/src/GHC/Internal/Lexeme.hs0000644000000000000000000000347607346545000024553 0ustar0000000000000000{-# LANGUAGE CPP #-} ----------------------------------------------------------------------------- -- | -- Module : GHC.Internal.Lexeme -- Copyright : (c) The GHC Team -- -- Maintainer : ghc-devs@haskell.org -- Portability : portable -- -- Functions to evaluate whether or not a string is a valid identifier. -- module GHC.Internal.Lexeme ( -- * Lexical characteristics of Haskell names startsVarSym, startsVarId, startsConSym, startsConId, startsVarSymASCII, isVarSymChar, okSymChar ) where #ifdef BOOTSTRAP_TH import Prelude -- See note [Why do we import Prelude here?] import Data.Char #else import GHC.Internal.Base import GHC.Internal.Unicode import GHC.Internal.List (elem) #endif -- | Is this character acceptable in a symbol (after the first char)? -- See alexGetByte in GHC.Parser.Lexer okSymChar :: Char -> Bool okSymChar c | c `elem` "(),;[]`{}_\"'" = False | otherwise = case generalCategory c of ConnectorPunctuation -> True DashPunctuation -> True OtherPunctuation -> True MathSymbol -> True CurrencySymbol -> True ModifierSymbol -> True OtherSymbol -> True _ -> False startsVarSym, startsVarId, startsConSym, startsConId :: Char -> Bool startsVarSym c = okSymChar c && c /= ':' -- Infix Ids startsConSym c = c == ':' -- Infix data constructors startsVarId c = c == '_' || case generalCategory c of -- Ordinary Ids LowercaseLetter -> True OtherLetter -> True -- See #1103 _ -> False startsConId c = isUpper c || c == '(' -- Ordinary type constructors and data constructors startsVarSymASCII :: Char -> Bool startsVarSymASCII c = c `elem` "!#$%&*+./<=>?@\\^|~-" isVarSymChar :: Char -> Bool isVarSymChar c = c == ':' || startsVarSym c ghc-lib-parser-9.12.2.20250421/libraries/ghc-internal/src/GHC/Internal/TH/0000755000000000000000000000000007346545000023301 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/libraries/ghc-internal/src/GHC/Internal/TH/Syntax.hs0000644000000000000000000027373107346545000025140 0ustar0000000000000000{-# OPTIONS_HADDOCK not-home #-} -- we want users to import Language.Haskell.TH.Syntax instead {-# LANGUAGE CPP, DeriveDataTypeable, DeriveGeneric, FlexibleInstances, DefaultSignatures, RankNTypes, RoleAnnotations, ScopedTypeVariables, MagicHash, KindSignatures, PolyKinds, TypeApplications, DataKinds, GADTs, UnboxedTuples, UnboxedSums, TypeOperators, Trustworthy, DeriveFunctor, DeriveTraversable, BangPatterns, RecordWildCards, ImplicitParams #-} {-# LANGUAGE TemplateHaskellQuotes #-} {-# LANGUAGE StandaloneKindSignatures #-} -- | This module is used internally in GHC's integration with Template Haskell -- and defines the abstract syntax of Template Haskell. -- -- This is not a part of the public API, and as such, there are no API -- guarantees for this module from version to version. -- -- Import "Language.Haskell.TH" or "Language.Haskell.TH.Syntax" instead! module GHC.Internal.TH.Syntax ( module GHC.Internal.TH.Syntax -- * Language extensions , module GHC.Internal.LanguageExtensions , ForeignSrcLang(..) -- * Notes -- ** Unresolved Infix -- $infix ) where #ifdef BOOTSTRAP_TH import Prelude import Data.Data hiding (Fixity(..)) import Data.IORef import System.IO.Unsafe ( unsafePerformIO ) import GHC.IO.Unsafe ( unsafeDupableInterleaveIO ) import Control.Monad.IO.Class (MonadIO (..)) import Control.Monad.Fix (MonadFix (..)) import Control.Exception (BlockedIndefinitelyOnMVar (..), catch, throwIO) import Control.Exception.Base (FixIOException (..)) import Control.Concurrent.MVar (newEmptyMVar, readMVar, putMVar) import System.IO ( hPutStrLn, stderr ) import Data.Char ( isAlpha, isAlphaNum, isUpper ) import Data.List.NonEmpty ( NonEmpty(..) ) import Data.Word import GHC.Generics ( Generic ) import qualified Data.Kind as Kind (Type) import GHC.Ptr ( Ptr, plusPtr ) import Foreign.ForeignPtr import Foreign.C.String import Foreign.C.Types import GHC.Types (TYPE, RuntimeRep(..)) #else import GHC.Internal.Base hiding (Type, Module, sequence) import GHC.Internal.Data.Data hiding (Fixity(..)) import GHC.Internal.Data.Traversable import GHC.Internal.Word import GHC.Internal.Generics (Generic) import GHC.Internal.IORef import GHC.Internal.System.IO import GHC.Internal.Show import GHC.Internal.Integer import GHC.Internal.Real import GHC.Internal.Data.Foldable import GHC.Internal.Foreign.Ptr import GHC.Internal.ForeignPtr import GHC.Internal.Data.Typeable import GHC.Internal.Control.Monad.IO.Class import GHC.Internal.Foreign.C.Types import GHC.Internal.Foreign.C.String import GHC.Internal.Control.Monad.Fail import GHC.Internal.Control.Monad.Fix import GHC.Internal.Control.Exception import GHC.Internal.Num import GHC.Internal.IO.Unsafe import GHC.Internal.List (dropWhile, break, replicate, reverse, last) import GHC.Internal.MVar import GHC.Internal.IO.Exception import GHC.Internal.Unicode import qualified GHC.Types as Kind (Type) #endif import GHC.Internal.ForeignSrcLang import GHC.Internal.LanguageExtensions ----------------------------------------------------- -- -- The Quasi class -- ----------------------------------------------------- class (MonadIO m, MonadFail m) => Quasi m where -- | Fresh names. See 'newName'. qNewName :: String -> m Name ------- Error reporting and recovery ------- -- | Report an error (True) or warning (False) -- ...but carry on; use 'fail' to stop. See 'report'. qReport :: Bool -> String -> m () -- | See 'recover'. qRecover :: m a -- ^ the error handler -> m a -- ^ action which may fail -> m a -- ^ Recover from the monadic 'fail' ------- Inspect the type-checker's environment ------- -- | True <=> type namespace, False <=> value namespace. See 'lookupName'. qLookupName :: Bool -> String -> m (Maybe Name) -- | See 'reify'. qReify :: Name -> m Info -- | See 'reifyFixity'. qReifyFixity :: Name -> m (Maybe Fixity) -- | See 'reifyType'. qReifyType :: Name -> m Type -- | Is (n tys) an instance? Returns list of matching instance Decs (with -- empty sub-Decs) Works for classes and type functions. See 'reifyInstances'. qReifyInstances :: Name -> [Type] -> m [Dec] -- | See 'reifyRoles'. qReifyRoles :: Name -> m [Role] -- | See 'reifyAnnotations'. qReifyAnnotations :: Data a => AnnLookup -> m [a] -- | See 'reifyModule'. qReifyModule :: Module -> m ModuleInfo -- | See 'reifyConStrictness'. qReifyConStrictness :: Name -> m [DecidedStrictness] -- | See 'location'. qLocation :: m Loc -- | Input/output (dangerous). See 'runIO'. qRunIO :: IO a -> m a qRunIO = liftIO -- | See 'getPackageRoot'. qGetPackageRoot :: m FilePath -- | See 'addDependentFile'. qAddDependentFile :: FilePath -> m () -- | See 'addTempFile'. qAddTempFile :: String -> m FilePath -- | See 'addTopDecls'. qAddTopDecls :: [Dec] -> m () -- | See 'addForeignFilePath'. qAddForeignFilePath :: ForeignSrcLang -> String -> m () -- | See 'addModFinalizer'. qAddModFinalizer :: Q () -> m () -- | See 'addCorePlugin'. qAddCorePlugin :: String -> m () -- | See 'getQ'. qGetQ :: Typeable a => m (Maybe a) -- | See 'putQ'. qPutQ :: Typeable a => a -> m () -- | See 'isExtEnabled'. qIsExtEnabled :: Extension -> m Bool -- | See 'extsEnabled'. qExtsEnabled :: m [Extension] -- | See 'putDoc'. qPutDoc :: DocLoc -> String -> m () -- | See 'getDoc'. qGetDoc :: DocLoc -> m (Maybe String) ----------------------------------------------------- -- The IO instance of Quasi ----------------------------------------------------- -- | This instance is used only when running a Q -- computation in the IO monad, usually just to -- print the result. There is no interesting -- type environment, so reification isn't going to -- work. instance Quasi IO where qNewName = newNameIO qReport True msg = hPutStrLn stderr ("Template Haskell error: " ++ msg) qReport False msg = hPutStrLn stderr ("Template Haskell error: " ++ msg) qLookupName _ _ = badIO "lookupName" qReify _ = badIO "reify" qReifyFixity _ = badIO "reifyFixity" qReifyType _ = badIO "reifyFixity" qReifyInstances _ _ = badIO "reifyInstances" qReifyRoles _ = badIO "reifyRoles" qReifyAnnotations _ = badIO "reifyAnnotations" qReifyModule _ = badIO "reifyModule" qReifyConStrictness _ = badIO "reifyConStrictness" qLocation = badIO "currentLocation" qRecover _ _ = badIO "recover" -- Maybe we could fix this? qGetPackageRoot = badIO "getProjectRoot" qAddDependentFile _ = badIO "addDependentFile" qAddTempFile _ = badIO "addTempFile" qAddTopDecls _ = badIO "addTopDecls" qAddForeignFilePath _ _ = badIO "addForeignFilePath" qAddModFinalizer _ = badIO "addModFinalizer" qAddCorePlugin _ = badIO "addCorePlugin" qGetQ = badIO "getQ" qPutQ _ = badIO "putQ" qIsExtEnabled _ = badIO "isExtEnabled" qExtsEnabled = badIO "extsEnabled" qPutDoc _ _ = badIO "putDoc" qGetDoc _ = badIO "getDoc" instance Quote IO where newName = newNameIO newNameIO :: String -> IO Name newNameIO s = do { n <- atomicModifyIORef' counter (\x -> (x + 1, x)) ; pure (mkNameU s n) } badIO :: String -> IO a badIO op = do { qReport True ("Can't do `" ++ op ++ "' in the IO monad") ; fail "Template Haskell failure" } -- Global variable to generate unique symbols counter :: IORef Uniq {-# NOINLINE counter #-} counter = unsafePerformIO (newIORef 0) ----------------------------------------------------- -- -- The Q monad -- ----------------------------------------------------- -- | In short, 'Q' provides the 'Quasi' operations in one neat monad for the -- user. -- -- The longer story, is that 'Q' wraps an arbitrary 'Quasi'-able monad. -- The perceptive reader notices that 'Quasi' has only two instances, 'Q' -- itself and 'IO', neither of which have concrete implementations.'Q' plays -- the trick of [dependency -- inversion](https://en.wikipedia.org/wiki/Dependency_inversion_principle), -- providing an abstract interface for the user which is later concretely -- fufilled by an concrete 'Quasi' instance, internal to GHC. newtype Q a = Q { unQ :: forall m. Quasi m => m a } -- | \"Runs\" the 'Q' monad. Normal users of Template Haskell -- should not need this function, as the splice brackets @$( ... )@ -- are the usual way of running a 'Q' computation. -- -- This function is primarily used in GHC internals, and for debugging -- splices by running them in 'IO'. -- -- Note that many functions in 'Q', such as 'reify' and other compiler -- queries, are not supported when running 'Q' in 'IO'; these operations -- simply fail at runtime. Indeed, the only operations guaranteed to succeed -- are 'newName', 'runIO', 'reportError' and 'reportWarning'. runQ :: Quasi m => Q a -> m a runQ (Q m) = m instance Monad Q where Q m >>= k = Q (m >>= \x -> unQ (k x)) (>>) = (*>) instance MonadFail Q where fail s = report True s >> Q (fail "Q monad failure") instance Functor Q where fmap f (Q x) = Q (fmap f x) instance Applicative Q where pure x = Q (pure x) Q f <*> Q x = Q (f <*> x) Q m *> Q n = Q (m *> n) -- | @since 2.17.0.0 instance Semigroup a => Semigroup (Q a) where (<>) = liftA2 (<>) -- | @since 2.17.0.0 instance Monoid a => Monoid (Q a) where mempty = pure mempty -- | If the function passed to 'mfix' inspects its argument, -- the resulting action will throw a 'FixIOException'. -- -- @since 2.17.0.0 instance MonadFix Q where -- We use the same blackholing approach as in fixIO. -- See Note [Blackholing in fixIO] in System.IO in base. mfix k = do m <- runIO newEmptyMVar ans <- runIO (unsafeDupableInterleaveIO (readMVar m `catch` \BlockedIndefinitelyOnMVar -> throwIO FixIOException)) result <- k ans runIO (putMVar m result) return result ----------------------------------------------------- -- -- The Quote class -- ----------------------------------------------------- -- | The 'Quote' class implements the minimal interface which is necessary for -- desugaring quotations. -- -- * The @Monad m@ superclass is needed to stitch together the different -- AST fragments. -- * 'newName' is used when desugaring binding structures such as lambdas -- to generate fresh names. -- -- Therefore the type of an untyped quotation in GHC is `Quote m => m Exp` -- -- For many years the type of a quotation was fixed to be `Q Exp` but by -- more precisely specifying the minimal interface it enables the `Exp` to -- be extracted purely from the quotation without interacting with `Q`. class Monad m => Quote m where {- | Generate a fresh name, which cannot be captured. For example, this: @f = $(do nm1 <- newName \"x\" let nm2 = 'mkName' \"x\" return ('LamE' ['VarP' nm1] (LamE [VarP nm2] ('VarE' nm1))) )@ will produce the splice >f = \x0 -> \x -> x0 In particular, the occurrence @VarE nm1@ refers to the binding @VarP nm1@, and is not captured by the binding @VarP nm2@. Although names generated by @newName@ cannot /be captured/, they can /capture/ other names. For example, this: >g = $(do > nm1 <- newName "x" > let nm2 = mkName "x" > return (LamE [VarP nm2] (LamE [VarP nm1] (VarE nm2))) > ) will produce the splice >g = \x -> \x0 -> x0 since the occurrence @VarE nm2@ is captured by the innermost binding of @x@, namely @VarP nm1@. -} newName :: String -> m Name instance Quote Q where newName s = Q (qNewName s) ----------------------------------------------------- -- -- The TExp type -- ----------------------------------------------------- type TExp :: TYPE r -> Kind.Type type role TExp nominal -- See Note [Role of TExp] newtype TExp a = TExp { unType :: Exp -- ^ Underlying untyped Template Haskell expression } -- ^ Typed wrapper around an 'Exp'. -- -- This is the typed representation of terms produced by typed quotes. -- -- Representation-polymorphic since /template-haskell-2.16.0.0/. -- | Discard the type annotation and produce a plain Template Haskell -- expression -- -- Representation-polymorphic since /template-haskell-2.16.0.0/. unTypeQ :: forall (r :: RuntimeRep) (a :: TYPE r) m . Quote m => m (TExp a) -> m Exp unTypeQ m = do { TExp e <- m ; return e } -- | Annotate the Template Haskell expression with a type -- -- This is unsafe because GHC cannot check for you that the expression -- really does have the type you claim it has. -- -- Representation-polymorphic since /template-haskell-2.16.0.0/. unsafeTExpCoerce :: forall (r :: RuntimeRep) (a :: TYPE r) m . Quote m => m Exp -> m (TExp a) unsafeTExpCoerce m = do { e <- m ; return (TExp e) } {- Note [Role of TExp] ~~~~~~~~~~~~~~~~~~~~~~ TExp's argument must have a nominal role, not phantom as would be inferred (#8459). Consider e :: Code Q Age e = [|| MkAge 3 ||] foo = $(coerce e) + 4::Int The splice will evaluate to (MkAge 3) and you can't add that to 4::Int. So you can't coerce a (Code Q Age) to a (Code Q Int). -} -- Code constructor #if __GLASGOW_HASKELL__ >= 909 type Code :: (Kind.Type -> Kind.Type) -> forall r. TYPE r -> Kind.Type -- See Note [Foralls to the right in Code] #else type Code :: (Kind.Type -> Kind.Type) -> TYPE r -> Kind.Type #endif type role Code representational nominal -- See Note [Role of TExp] newtype Code m a = Code { examineCode :: m (TExp a) -- ^ Underlying monadic value } -- ^ Represents an expression which has type @a@, built in monadic context @m@. Built on top of 'TExp', typed -- expressions allow for type-safe splicing via: -- -- - typed quotes, written as @[|| ... ||]@ where @...@ is an expression; if -- that expression has type @a@, then the quotation has type -- @Quote m => Code m a@ -- -- - typed splices inside of typed quotes, written as @$$(...)@ where @...@ -- is an arbitrary expression of type @Quote m => Code m a@ -- -- Traditional expression quotes and splices let us construct ill-typed -- expressions: -- -- >>> fmap ppr $ runQ (unTypeCode [| True == $( [| "foo" |] ) |]) -- GHC.Types.True GHC.Classes.== "foo" -- >>> GHC.Types.True GHC.Classes.== "foo" -- error: -- • Couldn't match expected type ‘Bool’ with actual type ‘[Char]’ -- • In the second argument of ‘(==)’, namely ‘"foo"’ -- In the expression: True == "foo" -- In an equation for ‘it’: it = True == "foo" -- -- With typed expressions, the type error occurs when /constructing/ the -- Template Haskell expression: -- -- >>> fmap ppr $ runQ (unTypeCode [|| True == $$( [|| "foo" ||] ) ||]) -- error: -- • Couldn't match type ‘[Char]’ with ‘Bool’ -- Expected type: Code Q Bool -- Actual type: Code Q [Char] -- • In the Template Haskell quotation [|| "foo" ||] -- In the expression: [|| "foo" ||] -- In the Template Haskell splice $$([|| "foo" ||]) {- Note [Foralls to the right in Code] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Code has the following type signature: type Code :: (Kind.Type -> Kind.Type) -> forall r. TYPE r -> Kind.Type This allows us to write data T (f :: forall r . (TYPE r) -> Type) = MkT (f Int) (f Int#) tcodeq :: T (Code Q) tcodeq = MkT [||5||] [||5#||] If we used the slightly more straightforward signature type Code :: foral r. (Kind.Type -> Kind.Type) -> TYPE r -> Kind.Type then the example above would become ill-typed. (See #23592 for some discussion.) -} -- | Unsafely convert an untyped code representation into a typed code -- representation. unsafeCodeCoerce :: forall (r :: RuntimeRep) (a :: TYPE r) m . Quote m => m Exp -> Code m a unsafeCodeCoerce m = Code (unsafeTExpCoerce m) -- | Lift a monadic action producing code into the typed 'Code' -- representation liftCode :: forall (r :: RuntimeRep) (a :: TYPE r) m . m (TExp a) -> Code m a liftCode = Code -- | Extract the untyped representation from the typed representation unTypeCode :: forall (r :: RuntimeRep) (a :: TYPE r) m . Quote m => Code m a -> m Exp unTypeCode = unTypeQ . examineCode -- | Modify the ambient monad used during code generation. For example, you -- can use `hoistCode` to handle a state effect: -- @ -- handleState :: Code (StateT Int Q) a -> Code Q a -- handleState = hoistCode (flip runState 0) -- @ hoistCode :: forall m n (r :: RuntimeRep) (a :: TYPE r) . Monad m => (forall x . m x -> n x) -> Code m a -> Code n a hoistCode f (Code a) = Code (f a) -- | Variant of '(>>=)' which allows effectful computations to be injected -- into code generation. bindCode :: forall m a (r :: RuntimeRep) (b :: TYPE r) . Monad m => m a -> (a -> Code m b) -> Code m b bindCode q k = liftCode (q >>= examineCode . k) -- | Variant of '(>>)' which allows effectful computations to be injected -- into code generation. bindCode_ :: forall m a (r :: RuntimeRep) (b :: TYPE r) . Monad m => m a -> Code m b -> Code m b bindCode_ q c = liftCode ( q >> examineCode c) -- | A useful combinator for embedding monadic actions into 'Code' -- @ -- myCode :: ... => Code m a -- myCode = joinCode $ do -- x <- someSideEffect -- return (makeCodeWith x) -- @ joinCode :: forall m (r :: RuntimeRep) (a :: TYPE r) . Monad m => m (Code m a) -> Code m a joinCode = flip bindCode id ---------------------------------------------------- -- Packaged versions for the programmer, hiding the Quasi-ness -- | Report an error (True) or warning (False), -- but carry on; use 'fail' to stop. report :: Bool -> String -> Q () report b s = Q (qReport b s) {-# DEPRECATED report "Use reportError or reportWarning instead" #-} -- deprecated in 7.6 -- | Report an error to the user, but allow the current splice's computation to carry on. To abort the computation, use 'fail'. reportError :: String -> Q () reportError = report True -- | Report a warning to the user, and carry on. reportWarning :: String -> Q () reportWarning = report False -- | Recover from errors raised by 'reportError' or 'fail'. recover :: Q a -- ^ handler to invoke on failure -> Q a -- ^ computation to run -> Q a recover (Q r) (Q m) = Q (qRecover r m) -- We don't export lookupName; the Bool isn't a great API -- Instead we export lookupTypeName, lookupValueName lookupName :: Bool -> String -> Q (Maybe Name) lookupName ns s = Q (qLookupName ns s) -- | Look up the given name in the (type namespace of the) current splice's scope. See "Language.Haskell.TH.Syntax#namelookup" for more details. lookupTypeName :: String -> Q (Maybe Name) lookupTypeName s = Q (qLookupName True s) -- | Look up the given name in the (value namespace of the) current splice's scope. See "Language.Haskell.TH.Syntax#namelookup" for more details. lookupValueName :: String -> Q (Maybe Name) lookupValueName s = Q (qLookupName False s) {- Note [Name lookup] ~~~~~~~~~~~~~~~~~~ -} {- $namelookup #namelookup# The functions 'lookupTypeName' and 'lookupValueName' provide a way to query the current splice's context for what names are in scope. The function 'lookupTypeName' queries the type namespace, whereas 'lookupValueName' queries the value namespace, but the functions are otherwise identical. A call @lookupValueName s@ will check if there is a value with name @s@ in scope at the current splice's location. If there is, the @Name@ of this value is returned; if not, then @Nothing@ is returned. The returned name cannot be \"captured\". For example: > f = "global" > g = $( do > Just nm <- lookupValueName "f" > [| let f = "local" in $( varE nm ) |] In this case, @g = \"global\"@; the call to @lookupValueName@ returned the global @f@, and this name was /not/ captured by the local definition of @f@. The lookup is performed in the context of the /top-level/ splice being run. For example: > f = "global" > g = $( [| let f = "local" in > $(do > Just nm <- lookupValueName "f" > varE nm > ) |] ) Again in this example, @g = \"global\"@, because the call to @lookupValueName@ queries the context of the outer-most @$(...)@. Operators should be queried without any surrounding parentheses, like so: > lookupValueName "+" Qualified names are also supported, like so: > lookupValueName "Prelude.+" > lookupValueName "Prelude.map" -} {- | 'reify' looks up information about the 'Name'. It will fail with a compile error if the 'Name' is not visible. A 'Name' is visible if it is imported or defined in a prior top-level declaration group. See the documentation for 'newDeclarationGroup' for more details. It is sometimes useful to construct the argument name using 'lookupTypeName' or 'lookupValueName' to ensure that we are reifying from the right namespace. For instance, in this context: > data D = D which @D@ does @reify (mkName \"D\")@ return information about? (Answer: @D@-the-type, but don't rely on it.) To ensure we get information about @D@-the-value, use 'lookupValueName': > do > Just nm <- lookupValueName "D" > reify nm and to get information about @D@-the-type, use 'lookupTypeName'. -} reify :: Name -> Q Info reify v = Q (qReify v) {- | @reifyFixity nm@ attempts to find a fixity declaration for @nm@. For example, if the function @foo@ has the fixity declaration @infixr 7 foo@, then @reifyFixity 'foo@ would return @'Just' ('Fixity' 7 'InfixR')@. If the function @bar@ does not have a fixity declaration, then @reifyFixity 'bar@ returns 'Nothing', so you may assume @bar@ has 'defaultFixity'. -} reifyFixity :: Name -> Q (Maybe Fixity) reifyFixity nm = Q (qReifyFixity nm) {- | @reifyType nm@ attempts to find the type or kind of @nm@. For example, @reifyType 'not@ returns @Bool -> Bool@, and @reifyType ''Bool@ returns @Type@. This works even if there's no explicit signature and the type or kind is inferred. -} reifyType :: Name -> Q Type reifyType nm = Q (qReifyType nm) {- | Template Haskell is capable of reifying information about types and terms defined in previous declaration groups. Top-level declaration splices break up declaration groups. For an example, consider this code block. We define a datatype @X@ and then try to call 'reify' on the datatype. @ module Check where data X = X deriving Eq $(do info <- reify ''X runIO $ print info ) @ This code fails to compile, noting that @X@ is not available for reification at the site of 'reify'. We can fix this by creating a new declaration group using an empty top-level splice: @ data X = X deriving Eq $(pure []) $(do info <- reify ''X runIO $ print info ) @ We provide 'newDeclarationGroup' as a means of documenting this behavior and providing a name for the pattern. Since top level splices infer the presence of the @$( ... )@ brackets, we can also write: @ data X = X deriving Eq newDeclarationGroup $(do info <- reify ''X runIO $ print info ) @ -} newDeclarationGroup :: Q [Dec] newDeclarationGroup = pure [] {- | @reifyInstances nm tys@ returns a list of all visible instances (see below for "visible") of @nm tys@. That is, if @nm@ is the name of a type class, then all instances of this class at the types @tys@ are returned. Alternatively, if @nm@ is the name of a data family or type family, all instances of this family at the types @tys@ are returned. Note that this is a \"shallow\" test; the declarations returned merely have instance heads which unify with @nm tys@, they need not actually be satisfiable. - @reifyInstances ''Eq [ 'TupleT' 2 \``AppT`\` 'ConT' ''A \``AppT`\` 'ConT' ''B ]@ contains the @instance (Eq a, Eq b) => Eq (a, b)@ regardless of whether @A@ and @B@ themselves implement 'Eq' - @reifyInstances ''Show [ 'VarT' ('mkName' "a") ]@ produces every available instance of 'Show' There is one edge case: @reifyInstances ''Typeable tys@ currently always produces an empty list (no matter what @tys@ are given). In principle, the *visible* instances are * all instances defined in a prior top-level declaration group (see docs on @newDeclarationGroup@), or * all instances defined in any module transitively imported by the module being compiled However, actually searching all modules transitively below the one being compiled is unreasonably expensive, so @reifyInstances@ will report only the instance for modules that GHC has had some cause to visit during this compilation. This is a shortcoming: @reifyInstances@ might fail to report instances for a type that is otherwise unusued, or instances defined in a different component. You can work around this shortcoming by explicitly importing the modules whose instances you want to be visible. GHC issue has some discussion around this. -} reifyInstances :: Name -> [Type] -> Q [InstanceDec] reifyInstances cls tys = Q (qReifyInstances cls tys) {- | @reifyRoles nm@ returns the list of roles associated with the parameters (both visible and invisible) of the tycon @nm@. Fails if @nm@ cannot be found or is not a tycon. The returned list should never contain 'InferR'. An invisible parameter to a tycon is often a kind parameter. For example, if we have @ type Proxy :: forall k. k -> Type data Proxy a = MkProxy @ and @reifyRoles Proxy@, we will get @['NominalR', 'PhantomR']@. The 'NominalR' is the role of the invisible @k@ parameter. Kind parameters are always nominal. -} reifyRoles :: Name -> Q [Role] reifyRoles nm = Q (qReifyRoles nm) -- | @reifyAnnotations target@ returns the list of annotations -- associated with @target@. Only the annotations that are -- appropriately typed is returned. So if you have @Int@ and @String@ -- annotations for the same target, you have to call this function twice. reifyAnnotations :: Data a => AnnLookup -> Q [a] reifyAnnotations an = Q (qReifyAnnotations an) -- | @reifyModule mod@ looks up information about module @mod@. To -- look up the current module, call this function with the return -- value of 'Language.Haskell.TH.Lib.thisModule'. reifyModule :: Module -> Q ModuleInfo reifyModule m = Q (qReifyModule m) -- | @reifyConStrictness nm@ looks up the strictness information for the fields -- of the constructor with the name @nm@. Note that the strictness information -- that 'reifyConStrictness' returns may not correspond to what is written in -- the source code. For example, in the following data declaration: -- -- @ -- data Pair a = Pair a a -- @ -- -- 'reifyConStrictness' would return @['DecidedLazy', DecidedLazy]@ under most -- circumstances, but it would return @['DecidedStrict', DecidedStrict]@ if the -- @-XStrictData@ language extension was enabled. reifyConStrictness :: Name -> Q [DecidedStrictness] reifyConStrictness n = Q (qReifyConStrictness n) -- | Is the list of instances returned by 'reifyInstances' nonempty? -- -- If you're confused by an instance not being visible despite being -- defined in the same module and above the splice in question, see the -- docs for 'newDeclarationGroup' for a possible explanation. isInstance :: Name -> [Type] -> Q Bool isInstance nm tys = do { decs <- reifyInstances nm tys ; return (not (null decs)) } -- | The location at which this computation is spliced. location :: Q Loc location = Q qLocation -- |The 'runIO' function lets you run an I\/O computation in the 'Q' monad. -- Take care: you are guaranteed the ordering of calls to 'runIO' within -- a single 'Q' computation, but not about the order in which splices are run. -- -- Note: for various murky reasons, stdout and stderr handles are not -- necessarily flushed when the compiler finishes running, so you should -- flush them yourself. runIO :: IO a -> Q a runIO m = Q (qRunIO m) -- | Get the package root for the current package which is being compiled. -- This can be set explicitly with the -package-root flag but is normally -- just the current working directory. -- -- The motivation for this flag is to provide a principled means to remove the -- assumption from splices that they will be executed in the directory where the -- cabal file resides. Projects such as haskell-language-server can't and don't -- change directory when compiling files but instead set the -package-root flag -- appropriately. getPackageRoot :: Q FilePath getPackageRoot = Q qGetPackageRoot -- | Record external files that runIO is using (dependent upon). -- The compiler can then recognize that it should re-compile the Haskell file -- when an external file changes. -- -- Expects an absolute file path. -- -- Notes: -- -- * ghc -M does not know about these dependencies - it does not execute TH. -- -- * The dependency is based on file content, not a modification time addDependentFile :: FilePath -> Q () addDependentFile fp = Q (qAddDependentFile fp) -- | Obtain a temporary file path with the given suffix. The compiler will -- delete this file after compilation. addTempFile :: String -> Q FilePath addTempFile suffix = Q (qAddTempFile suffix) -- | Add additional top-level declarations. The added declarations will be type -- checked along with the current declaration group. addTopDecls :: [Dec] -> Q () addTopDecls ds = Q (qAddTopDecls ds) -- | addForeignFile :: ForeignSrcLang -> String -> Q () addForeignFile = addForeignSource {-# DEPRECATED addForeignFile "Use 'Language.Haskell.TH.Syntax.addForeignSource' instead" #-} -- deprecated in 8.6 -- | Emit a foreign file which will be compiled and linked to the object for -- the current module. Currently only languages that can be compiled with -- the C compiler are supported, and the flags passed as part of -optc will -- be also applied to the C compiler invocation that will compile them. -- -- Note that for non-C languages (for example C++) @extern "C"@ directives -- must be used to get symbols that we can access from Haskell. -- -- To get better errors, it is recommended to use #line pragmas when -- emitting C files, e.g. -- -- > {-# LANGUAGE CPP #-} -- > ... -- > addForeignSource LangC $ unlines -- > [ "#line " ++ show (__LINE__ + 1) ++ " " ++ show __FILE__ -- > , ... -- > ] addForeignSource :: ForeignSrcLang -> String -> Q () addForeignSource lang src = do let suffix = case lang of LangC -> "c" LangCxx -> "cpp" LangObjc -> "m" LangObjcxx -> "mm" LangAsm -> "s" LangJs -> "js" RawObject -> "a" path <- addTempFile suffix runIO $ writeFile path src addForeignFilePath lang path -- | Same as 'addForeignSource', but expects to receive a path pointing to the -- foreign file instead of a 'String' of its contents. Consider using this in -- conjunction with 'addTempFile'. -- -- This is a good alternative to 'addForeignSource' when you are trying to -- directly link in an object file. addForeignFilePath :: ForeignSrcLang -> FilePath -> Q () addForeignFilePath lang fp = Q (qAddForeignFilePath lang fp) -- | Add a finalizer that will run in the Q monad after the current module has -- been type checked. This only makes sense when run within a top-level splice. -- -- The finalizer is given the local type environment at the splice point. Thus -- 'reify' is able to find the local definitions when executed inside the -- finalizer. addModFinalizer :: Q () -> Q () addModFinalizer act = Q (qAddModFinalizer (unQ act)) -- | Adds a core plugin to the compilation pipeline. -- -- @addCorePlugin m@ has almost the same effect as passing @-fplugin=m@ to ghc -- in the command line. The major difference is that the plugin module @m@ -- must not belong to the current package. When TH executes, it is too late -- to tell the compiler that we needed to compile first a plugin module in the -- current package. addCorePlugin :: String -> Q () addCorePlugin plugin = Q (qAddCorePlugin plugin) -- | Get state from the 'Q' monad. Note that the state is local to the -- Haskell module in which the Template Haskell expression is executed. getQ :: Typeable a => Q (Maybe a) getQ = Q qGetQ -- | Replace the state in the 'Q' monad. Note that the state is local to the -- Haskell module in which the Template Haskell expression is executed. putQ :: Typeable a => a -> Q () putQ x = Q (qPutQ x) -- | Determine whether the given language extension is enabled in the 'Q' monad. isExtEnabled :: Extension -> Q Bool isExtEnabled ext = Q (qIsExtEnabled ext) -- | List all enabled language extensions. extsEnabled :: Q [Extension] extsEnabled = Q qExtsEnabled -- | Add Haddock documentation to the specified location. This will overwrite -- any documentation at the location if it already exists. This will reify the -- specified name, so it must be in scope when you call it. If you want to add -- documentation to something that you are currently splicing, you can use -- 'addModFinalizer' e.g. -- -- > do -- > let nm = mkName "x" -- > addModFinalizer $ putDoc (DeclDoc nm) "Hello" -- > [d| $(varP nm) = 42 |] -- -- The helper functions 'withDecDoc' and 'withDecsDoc' will do this for you, as -- will the 'funD_doc' and other @_doc@ combinators. -- You most likely want to have the @-haddock@ flag turned on when using this. -- Adding documentation to anything outside of the current module will cause an -- error. putDoc :: DocLoc -> String -> Q () putDoc t s = Q (qPutDoc t s) -- | Retrieves the Haddock documentation at the specified location, if one -- exists. -- It can be used to read documentation on things defined outside of the current -- module, provided that those modules were compiled with the @-haddock@ flag. getDoc :: DocLoc -> Q (Maybe String) getDoc n = Q (qGetDoc n) instance MonadIO Q where liftIO = runIO instance Quasi Q where qNewName = newName qReport = report qRecover = recover qReify = reify qReifyFixity = reifyFixity qReifyType = reifyType qReifyInstances = reifyInstances qReifyRoles = reifyRoles qReifyAnnotations = reifyAnnotations qReifyModule = reifyModule qReifyConStrictness = reifyConStrictness qLookupName = lookupName qLocation = location qGetPackageRoot = getPackageRoot qAddDependentFile = addDependentFile qAddTempFile = addTempFile qAddTopDecls = addTopDecls qAddForeignFilePath = addForeignFilePath qAddModFinalizer = addModFinalizer qAddCorePlugin = addCorePlugin qGetQ = getQ qPutQ = putQ qIsExtEnabled = isExtEnabled qExtsEnabled = extsEnabled qPutDoc = putDoc qGetDoc = getDoc ---------------------------------------------------- -- The following operations are used solely in GHC.HsToCore.Quote when -- desugaring brackets. They are not necessary for the user, who can use -- ordinary return and (>>=) etc -- | This function is only used in 'GHC.HsToCore.Quote' when desugaring -- brackets. This is not necessary for the user, who can use the ordinary -- 'return' and '(>>=)' operations. sequenceQ :: forall m . Monad m => forall a . [m a] -> m [a] sequenceQ = sequence oneName, manyName :: Name -- | Synonym for @''GHC.Types.One'@, from @ghc-prim@. oneName = mkNameG DataName "ghc-prim" "GHC.Types" "One" -- | Synonym for @''GHC.Types.Many'@, from @ghc-prim@. manyName = mkNameG DataName "ghc-prim" "GHC.Types" "Many" ----------------------------------------------------- -- Names and uniques ----------------------------------------------------- -- | The name of a module. newtype ModName = ModName String -- Module name deriving (Show,Eq,Ord,Data,Generic) -- | The name of a package. newtype PkgName = PkgName String -- package name deriving (Show,Eq,Ord,Data,Generic) -- | Obtained from 'reifyModule' and 'Language.Haskell.TH.Lib.thisModule'. data Module = Module PkgName ModName -- package qualified module name deriving (Show,Eq,Ord,Data,Generic) -- | An "Occurence Name". newtype OccName = OccName String deriving (Show,Eq,Ord,Data,Generic) -- | Smart constructor for 'ModName' mkModName :: String -> ModName mkModName s = ModName s -- | Accessor for 'ModName' modString :: ModName -> String modString (ModName m) = m -- | Smart constructor for 'PkgName' mkPkgName :: String -> PkgName mkPkgName s = PkgName s -- | Accessor for 'PkgName' pkgString :: PkgName -> String pkgString (PkgName m) = m ----------------------------------------------------- -- OccName ----------------------------------------------------- -- | Smart constructor for 'OccName' mkOccName :: String -> OccName mkOccName s = OccName s -- | Accessor for 'OccName' occString :: OccName -> String occString (OccName occ) = occ ----------------------------------------------------- -- Names ----------------------------------------------------- -- -- For "global" names ('NameG') we need a totally unique name, -- so we must include the name-space of the thing -- -- For unique-numbered things ('NameU'), we've got a unique reference -- anyway, so no need for name space -- -- For dynamically bound thing ('NameS') we probably want them to -- in a context-dependent way, so again we don't want the name -- space. For example: -- -- > let v = mkName "T" in [| data $v = $v |] -- -- Here we use the same Name for both type constructor and data constructor -- -- -- NameL and NameG are bound *outside* the TH syntax tree -- either globally (NameG) or locally (NameL). Ex: -- -- > f x = $(h [| (map, x) |]) -- -- The 'map' will be a NameG, and 'x' wil be a NameL -- -- These Names should never appear in a binding position in a TH syntax tree {- $namecapture #namecapture# Much of 'Name' API is concerned with the problem of /name capture/, which can be seen in the following example. > f expr = [| let x = 0 in $expr |] > ... > g x = $( f [| x |] ) > h y = $( f [| y |] ) A naive desugaring of this would yield: > g x = let x = 0 in x > h y = let x = 0 in y All of a sudden, @g@ and @h@ have different meanings! In this case, we say that the @x@ in the RHS of @g@ has been /captured/ by the binding of @x@ in @f@. What we actually want is for the @x@ in @f@ to be distinct from the @x@ in @g@, so we get the following desugaring: > g x = let x' = 0 in x > h y = let x' = 0 in y which avoids name capture as desired. In the general case, we say that a @Name@ can be captured if the thing it refers to can be changed by adding new declarations. -} {- | An abstract type representing names in the syntax tree. 'Name's can be constructed in several ways, which come with different name-capture guarantees (see "Language.Haskell.TH.Syntax#namecapture" for an explanation of name capture): * the built-in syntax @'f@ and @''T@ can be used to construct names, The expression @'f@ gives a @Name@ which refers to the value @f@ currently in scope, and @''T@ gives a @Name@ which refers to the type @T@ currently in scope. These names can never be captured. * 'lookupValueName' and 'lookupTypeName' are similar to @'f@ and @''T@ respectively, but the @Name@s are looked up at the point where the current splice is being run. These names can never be captured. * 'newName' monadically generates a new name, which can never be captured. * 'mkName' generates a capturable name. Names constructed using @newName@ and @mkName@ may be used in bindings (such as @let x = ...@ or @\x -> ...@), but names constructed using @lookupValueName@, @lookupTypeName@, @'f@, @''T@ may not. -} data Name = Name OccName NameFlavour deriving (Data, Eq, Generic) instance Ord Name where -- check if unique is different before looking at strings (Name o1 f1) `compare` (Name o2 f2) = (f1 `compare` f2) `thenCmp` (o1 `compare` o2) data NameFlavour = NameS -- ^ An unqualified name; dynamically bound | NameQ ModName -- ^ A qualified name; dynamically bound | NameU !Uniq -- ^ A unique local name | NameL !Uniq -- ^ Local name bound outside of the TH AST | NameG NameSpace PkgName ModName -- ^ Global name bound outside of the TH AST: -- An original name (occurrences only, not binders) -- Need the namespace too to be sure which -- thing we are naming deriving ( Data, Eq, Ord, Show, Generic ) data NameSpace = VarName -- ^ Variables | DataName -- ^ Data constructors | TcClsName -- ^ Type constructors and classes; Haskell has them -- in the same name space for now. | FldName { fldParent :: !String -- ^ The textual name of the parent of the field. -- -- - For a field of a datatype, this is the name of the first constructor -- of the datatype (regardless of whether this constructor has this field). -- - For a field of a pattern synonym, this is the name of the pattern synonym. } deriving( Eq, Ord, Show, Data, Generic ) -- | @Uniq@ is used by GHC to distinguish names from each other. type Uniq = Integer -- | The name without its module prefix. -- -- ==== __Examples__ -- -- >>> nameBase ''Data.Either.Either -- "Either" -- >>> nameBase (mkName "foo") -- "foo" -- >>> nameBase (mkName "Module.foo") -- "foo" nameBase :: Name -> String nameBase (Name occ _) = occString occ -- | Module prefix of a name, if it exists. -- -- ==== __Examples__ -- -- >>> nameModule ''Data.Either.Either -- Just "Data.Either" -- >>> nameModule (mkName "foo") -- Nothing -- >>> nameModule (mkName "Module.foo") -- Just "Module" nameModule :: Name -> Maybe String nameModule (Name _ (NameQ m)) = Just (modString m) nameModule (Name _ (NameG _ _ m)) = Just (modString m) nameModule _ = Nothing -- | A name's package, if it exists. -- -- ==== __Examples__ -- -- >>> namePackage ''Data.Either.Either -- Just "base" -- >>> namePackage (mkName "foo") -- Nothing -- >>> namePackage (mkName "Module.foo") -- Nothing namePackage :: Name -> Maybe String namePackage (Name _ (NameG _ p _)) = Just (pkgString p) namePackage _ = Nothing -- | Returns whether a name represents an occurrence of a top-level variable -- ('VarName'), data constructor ('DataName'), type constructor, or type class -- ('TcClsName'). If we can't be sure, it returns 'Nothing'. -- -- ==== __Examples__ -- -- >>> nameSpace 'Prelude.id -- Just VarName -- >>> nameSpace (mkName "id") -- Nothing -- only works for top-level variable names -- >>> nameSpace 'Data.Maybe.Just -- Just DataName -- >>> nameSpace ''Data.Maybe.Maybe -- Just TcClsName -- >>> nameSpace ''Data.Ord.Ord -- Just TcClsName nameSpace :: Name -> Maybe NameSpace nameSpace (Name _ (NameG ns _ _)) = Just ns nameSpace _ = Nothing {- | Generate a capturable name. Occurrences of such names will be resolved according to the Haskell scoping rules at the occurrence site. For example: > f = [| pi + $(varE (mkName "pi")) |] > ... > g = let pi = 3 in $f In this case, @g@ is desugared to > g = Prelude.pi + 3 Note that @mkName@ may be used with qualified names: > mkName "Prelude.pi" See also 'Language.Haskell.TH.Lib.dyn' for a useful combinator. The above example could be rewritten using 'Language.Haskell.TH.Lib.dyn' as > f = [| pi + $(dyn "pi") |] -} mkName :: String -> Name -- The string can have a '.', thus "Foo.baz", -- giving a dynamically-bound qualified name, -- in which case we want to generate a NameQ -- -- Parse the string to see if it has a "." in it -- so we know whether to generate a qualified or unqualified name -- It's a bit tricky because we need to parse -- -- > Foo.Baz.x as Qual Foo.Baz x -- -- So we parse it from back to front mkName str = split [] (reverse str) where split occ [] = Name (mkOccName occ) NameS split occ ('.':rev) | not (null occ) , is_rev_mod_name rev = Name (mkOccName occ) (NameQ (mkModName (reverse rev))) -- The 'not (null occ)' guard ensures that -- mkName "&." = Name "&." NameS -- The 'is_rev_mod' guards ensure that -- mkName ".&" = Name ".&" NameS -- mkName "^.." = Name "^.." NameS -- #8633 -- mkName "Data.Bits..&" = Name ".&" (NameQ "Data.Bits") -- This rather bizarre case actually happened; (.&.) is in Data.Bits split occ (c:rev) = split (c:occ) rev -- Recognises a reversed module name xA.yB.C, -- with at least one component, -- and each component looks like a module name -- (i.e. non-empty, starts with capital, all alpha) is_rev_mod_name rev_mod_str | (compt, rest) <- break (== '.') rev_mod_str , not (null compt), isUpper (last compt), all is_mod_char compt = case rest of [] -> True (_dot : rest') -> is_rev_mod_name rest' | otherwise = False is_mod_char c = isAlphaNum c || c == '_' || c == '\'' -- | Only used internally mkNameU :: String -> Uniq -> Name mkNameU s u = Name (mkOccName s) (NameU u) -- | Only used internally mkNameL :: String -> Uniq -> Name mkNameL s u = Name (mkOccName s) (NameL u) -- | Only used internally mkNameQ :: String -> String -> Name mkNameQ mn occ = Name (mkOccName occ) (NameQ (mkModName mn)) -- | Used for 'x etc, but not available to the programmer mkNameG :: NameSpace -> String -> String -> String -> Name mkNameG ns pkg modu occ = Name (mkOccName occ) (NameG ns (mkPkgName pkg) (mkModName modu)) mkNameS :: String -> Name mkNameS n = Name (mkOccName n) NameS mkNameG_v, mkNameG_tc, mkNameG_d :: String -> String -> String -> Name mkNameG_v = mkNameG VarName mkNameG_tc = mkNameG TcClsName mkNameG_d = mkNameG DataName mkNameG_fld :: String -- ^ package -> String -- ^ module -> String -- ^ parent (first constructor of parent type) -> String -- ^ field name -> Name mkNameG_fld pkg modu con occ = mkNameG (FldName con) pkg modu occ data NameIs = Alone -- ^ @name@ | Applied -- ^ @(name)@ | Infix -- ^ @\`name\`@ showName :: Name -> String showName = showName' Alone showName' :: NameIs -> Name -> String showName' ni nm = case ni of Alone -> nms Applied | pnam -> nms | otherwise -> "(" ++ nms ++ ")" Infix | pnam -> "`" ++ nms ++ "`" | otherwise -> nms where -- For now, we make the NameQ and NameG print the same, even though -- NameQ is a qualified name (so what it means depends on what the -- current scope is), and NameG is an original name (so its meaning -- should be independent of what's in scope. -- We may well want to distinguish them in the end. -- Ditto NameU and NameL nms = case nm of Name occ NameS -> occString occ Name occ (NameQ m) -> modString m ++ "." ++ occString occ Name occ (NameG _ _ m) -> modString m ++ "." ++ occString occ Name occ (NameU u) -> occString occ ++ "_" ++ show u Name occ (NameL u) -> occString occ ++ "_" ++ show u pnam = classify nms -- True if we are function style, e.g. f, [], (,) -- False if we are operator style, e.g. +, :+ classify "" = False -- shouldn't happen; . operator is handled below classify (x:xs) | isAlpha x || (x `elem` "_[]()") = case dropWhile (/='.') xs of (_:xs') -> classify xs' [] -> True | otherwise = False instance Show Name where show = showName -- Tuple data and type constructors -- | Tuple data constructor tupleDataName :: Int -> Name -- | Tuple type constructor tupleTypeName :: Int -> Name tupleDataName n = mk_tup_name n DataName True tupleTypeName n = mk_tup_name n TcClsName True -- Unboxed tuple data and type constructors -- | Unboxed tuple data constructor unboxedTupleDataName :: Int -> Name -- | Unboxed tuple type constructor unboxedTupleTypeName :: Int -> Name unboxedTupleDataName n = mk_tup_name n DataName False unboxedTupleTypeName n = mk_tup_name n TcClsName False mk_tup_name :: Int -> NameSpace -> Bool -> Name mk_tup_name n space boxed = Name (mkOccName tup_occ) (NameG space (mkPkgName "ghc-prim") tup_mod) where withParens thing | boxed = "(" ++ thing ++ ")" | otherwise = "(#" ++ thing ++ "#)" tup_occ | n == 0, space == TcClsName = if boxed then "Unit" else "Unit#" | n == 1 = if boxed then solo else unboxed_solo | space == TcClsName = "Tuple" ++ show n ++ if boxed then "" else "#" | otherwise = withParens (replicate n_commas ',') n_commas = n - 1 tup_mod = mkModName (if boxed then "GHC.Tuple" else "GHC.Types") solo | space == DataName = "MkSolo" | otherwise = "Solo" unboxed_solo = solo ++ "#" -- Unboxed sum data and type constructors -- | Unboxed sum data constructor unboxedSumDataName :: SumAlt -> SumArity -> Name -- | Unboxed sum type constructor unboxedSumTypeName :: SumArity -> Name unboxedSumDataName alt arity | alt > arity = error $ prefix ++ "Index out of bounds." ++ debug_info | alt <= 0 = error $ prefix ++ "Alt must be > 0." ++ debug_info | arity < 2 = error $ prefix ++ "Arity must be >= 2." ++ debug_info | otherwise = Name (mkOccName sum_occ) (NameG DataName (mkPkgName "ghc-prim") (mkModName "GHC.Types")) where prefix = "unboxedSumDataName: " debug_info = " (alt: " ++ show alt ++ ", arity: " ++ show arity ++ ")" -- Synced with the definition of mkSumDataConOcc in GHC.Builtin.Types sum_occ = '(' : '#' : bars nbars_before ++ '_' : bars nbars_after ++ "#)" bars i = replicate i '|' nbars_before = alt - 1 nbars_after = arity - alt unboxedSumTypeName arity | arity < 2 = error $ "unboxedSumTypeName: Arity must be >= 2." ++ " (arity: " ++ show arity ++ ")" | otherwise = Name (mkOccName sum_occ) (NameG TcClsName (mkPkgName "ghc-prim") (mkModName "GHC.Types")) where -- Synced with the definition of mkSumTyConOcc in GHC.Builtin.Types sum_occ = "Sum" ++ show arity ++ "#" ----------------------------------------------------- -- Locations ----------------------------------------------------- -- | A location within a source file. data Loc = Loc { loc_filename :: String , loc_package :: String , loc_module :: String , loc_start :: CharPos , loc_end :: CharPos } deriving( Show, Eq, Ord, Data, Generic ) type CharPos = (Int, Int) -- ^ Line and character position ----------------------------------------------------- -- -- The Info returned by reification -- ----------------------------------------------------- -- | Obtained from 'reify' in the 'Q' Monad. data Info = -- | A class, with a list of its visible instances ClassI Dec [InstanceDec] -- | A class method | ClassOpI Name Type ParentName -- | A \"plain\" type constructor. \"Fancier\" type constructors are returned -- using 'PrimTyConI' or 'FamilyI' as appropriate. At present, this reified -- declaration will never have derived instances attached to it (if you wish -- to check for an instance, see 'reifyInstances'). | TyConI Dec -- | A type or data family, with a list of its visible instances. A closed -- type family is returned with 0 instances. | FamilyI Dec [InstanceDec] -- | A \"primitive\" type constructor, which can't be expressed with a 'Dec'. -- Examples: @(->)@, @Int#@. | PrimTyConI Name Arity Unlifted -- | A data constructor | DataConI Name Type ParentName -- | A pattern synonym | PatSynI Name PatSynType {- | A \"value\" variable (as opposed to a type variable, see 'TyVarI'). The @Maybe Dec@ field contains @Just@ the declaration which defined the variable - including the RHS of the declaration - or else @Nothing@, in the case where the RHS is unavailable to the compiler. At present, this value is /always/ @Nothing@: returning the RHS has not yet been implemented and is tracked by [GHC #14474](https://gitlab.haskell.org/ghc/ghc/-/issues/14474). -} | VarI Name Type (Maybe Dec) {- | A type variable. The @Type@ field contains the type which underlies the variable. At present, this is always @'VarT' theName@, but future changes may permit refinement of this. -} | TyVarI -- Scoped type variable Name Type -- What it is bound to deriving( Show, Eq, Ord, Data, Generic ) -- | Obtained from 'reifyModule' in the 'Q' Monad. data ModuleInfo = -- | Contains the import list of the module. ModuleInfo [Module] deriving( Show, Eq, Ord, Data, Generic ) {- | In 'ClassOpI' and 'DataConI', name of the parent class or type -} type ParentName = Name -- | In 'UnboxedSumE' and 'UnboxedSumP', the number associated with a -- particular data constructor. 'SumAlt's are one-indexed and should never -- exceed the value of its corresponding 'SumArity'. For example: -- -- * @(\#_|\#)@ has 'SumAlt' 1 (out of a total 'SumArity' of 2) -- -- * @(\#|_\#)@ has 'SumAlt' 2 (out of a total 'SumArity' of 2) type SumAlt = Int -- | In 'UnboxedSumE', 'UnboxedSumT', and 'UnboxedSumP', the total number of -- 'SumAlt's. For example, @(\#|\#)@ has a 'SumArity' of 2. type SumArity = Int -- | In 'PrimTyConI', arity of the type constructor type Arity = Int -- | In 'PrimTyConI', is the type constructor unlifted? type Unlifted = Bool -- | 'InstanceDec' describes a single instance of a class or type function. -- It is just a 'Dec', but guaranteed to be one of the following: -- -- * 'InstanceD' (with empty @['Dec']@) -- -- * 'DataInstD' or 'NewtypeInstD' (with empty derived @['Name']@) -- -- * 'TySynInstD' type InstanceDec = Dec -- | Fixity, as specified in a @infix[lr] n@ declaration. data Fixity = Fixity Int FixityDirection deriving( Eq, Ord, Show, Data, Generic ) -- | The associativity of an operator, as in an @infix@ declaration. data FixityDirection = InfixL | InfixR | InfixN deriving( Eq, Ord, Show, Data, Generic ) -- | Highest allowed operator precedence for 'Fixity' constructor (answer: 9) maxPrecedence :: Int maxPrecedence = (9::Int) -- | Default fixity: @infixl 9@ defaultFixity :: Fixity defaultFixity = Fixity maxPrecedence InfixL {- Note [Unresolved infix] ~~~~~~~~~~~~~~~~~~~~~~~ -} {- $infix #infix# When implementing antiquotation for quasiquoters, one often wants to parse strings into expressions: > parse :: String -> Maybe Exp But how should we parse @a + b * c@? If we don't know the fixities of @+@ and @*@, we don't know whether to parse it as @a + (b * c)@ or @(a + b) * c@. In cases like this, use 'UInfixE', 'UInfixP', 'UInfixT', or 'PromotedUInfixT', which stand for \"unresolved infix expression / pattern / type / promoted constructor\", respectively. When the compiler is given a splice containing a tree of @UInfixE@ applications such as > UInfixE > (UInfixE e1 op1 e2) > op2 > (UInfixE e3 op3 e4) it will look up and the fixities of the relevant operators and reassociate the tree as necessary. * trees will not be reassociated across 'ParensE', 'ParensP', or 'ParensT', which are of use for parsing expressions like > (a + b * c) + d * e * 'InfixE', 'InfixP', 'InfixT', and 'PromotedInfixT' expressions are never reassociated. * The 'UInfixE' constructor doesn't support sections. Sections such as @(a *)@ have no ambiguity, so 'InfixE' suffices. For longer sections such as @(a + b * c -)@, use an 'InfixE' constructor for the outer-most section, and use 'UInfixE' constructors for all other operators: > InfixE > Just (UInfixE ...a + b * c...) > op > Nothing Sections such as @(a + b +)@ and @((a + b) +)@ should be rendered into 'Exp's differently: > (+ a + b) ---> InfixE Nothing + (Just $ UInfixE a + b) > -- will result in a fixity error if (+) is left-infix > (+ (a + b)) ---> InfixE Nothing + (Just $ ParensE $ UInfixE a + b) > -- no fixity errors * Quoted expressions such as > [| a * b + c |] :: Q Exp > [p| a : b : c |] :: Q Pat > [t| T + T |] :: Q Type will never contain 'UInfixE', 'UInfixP', 'UInfixT', 'PromotedUInfixT', 'InfixT', 'PromotedInfixT, 'ParensE', 'ParensP', or 'ParensT' constructors. -} ----------------------------------------------------- -- -- The main syntax data types -- ----------------------------------------------------- -- | A Haskell literal. Note that the numeric types are all in terms of either -- 'Integer' or 'Rational', regardless of the type they represent. The extra -- precision reflects the textual representation in source code. data Lit = CharL Char -- ^ @\'c\'@ | StringL String -- ^ @"string"@ | IntegerL Integer -- ^ @123@. Used for overloaded and non-overloaded -- literals. We don't have a good way to -- represent non-overloaded literals at -- the moment. Maybe that doesn't matter? | RationalL Rational -- ^ @1.23@. See above comment on 'IntegerL'. | IntPrimL Integer -- ^ @123#@ | WordPrimL Integer -- ^ @123##@ | FloatPrimL Rational -- ^ @1.23#@ | DoublePrimL Rational -- ^ @1.23##@ | StringPrimL [Word8] -- ^ @"string"#@. A primitive C-style string, type 'Addr#' | BytesPrimL Bytes -- ^ Some raw bytes, type 'Addr#': | CharPrimL Char -- ^ @\'c\'#@ deriving( Show, Eq, Ord, Data, Generic ) -- We could add Int, Float, Double etc, as we do in HsLit, -- but that could complicate the -- supposedly-simple TH.Syntax literal type -- | Raw bytes embedded into the binary. -- -- Avoid using Bytes constructor directly as it is likely to change in the -- future. Use helpers such as `mkBytes` in Language.Haskell.TH.Lib instead. data Bytes = Bytes { bytesPtr :: ForeignPtr Word8 -- ^ Pointer to the data , bytesOffset :: Word -- ^ Offset from the pointer , bytesSize :: Word -- ^ Number of bytes -- Maybe someday: -- , bytesAlignement :: Word -- ^ Alignement constraint -- , bytesReadOnly :: Bool -- ^ Shall we embed into a read-only -- -- section or not -- , bytesInitialized :: Bool -- ^ False: only use `bytesSize` to allocate -- -- an uninitialized region } deriving (Data,Generic) -- We can't derive Show instance for Bytes because we don't want to show the -- pointer value but the actual bytes (similarly to what ByteString does). See -- #16457. instance Show Bytes where show b = unsafePerformIO $ withForeignPtr (bytesPtr b) $ \ptr -> peekCStringLen ( ptr `plusPtr` fromIntegral (bytesOffset b) , fromIntegral (bytesSize b) ) -- We can't derive Eq and Ord instances for Bytes because we don't want to -- compare pointer values but the actual bytes (similarly to what ByteString -- does). See #16457 instance Eq Bytes where (==) = eqBytes instance Ord Bytes where compare = compareBytes eqBytes :: Bytes -> Bytes -> Bool eqBytes a@(Bytes fp off len) b@(Bytes fp' off' len') | len /= len' = False -- short cut on length | fp == fp' && off == off' = True -- short cut for the same bytes | otherwise = compareBytes a b == EQ compareBytes :: Bytes -> Bytes -> Ordering compareBytes (Bytes _ _ 0) (Bytes _ _ 0) = EQ -- short cut for empty Bytes compareBytes (Bytes fp1 off1 len1) (Bytes fp2 off2 len2) = unsafePerformIO $ withForeignPtr fp1 $ \p1 -> withForeignPtr fp2 $ \p2 -> do i <- memcmp (p1 `plusPtr` fromIntegral off1) (p2 `plusPtr` fromIntegral off2) (fromIntegral (min len1 len2)) return $! (i `compare` 0) <> (len1 `compare` len2) foreign import ccall unsafe "memcmp" memcmp :: Ptr a -> Ptr b -> CSize -> IO CInt -- | Pattern in Haskell given in @{}@ data Pat = LitP Lit -- ^ @{ 5 or \'c\' }@ | VarP Name -- ^ @{ x }@ | TupP [Pat] -- ^ @{ (p1,p2) }@ | UnboxedTupP [Pat] -- ^ @{ (\# p1,p2 \#) }@ | UnboxedSumP Pat SumAlt SumArity -- ^ @{ (\#|p|\#) }@ | ConP Name [Type] [Pat] -- ^ @data T1 = C1 t1 t2; {C1 \@ty1 p1 p2} = e@ | InfixP Pat Name Pat -- ^ @foo ({x :+ y}) = e@ | UInfixP Pat Name Pat -- ^ @foo ({x :+ y}) = e@ -- -- See "Language.Haskell.TH.Syntax#infix" | ParensP Pat -- ^ @{(p)}@ -- -- See "Language.Haskell.TH.Syntax#infix" | TildeP Pat -- ^ @{ ~p }@ | BangP Pat -- ^ @{ !p }@ | AsP Name Pat -- ^ @{ x \@ p }@ | WildP -- ^ @{ _ }@ | RecP Name [FieldPat] -- ^ @f (Pt { pointx = x }) = g x@ | ListP [ Pat ] -- ^ @{ [1,2,3] }@ | SigP Pat Type -- ^ @{ p :: t }@ | ViewP Exp Pat -- ^ @{ e -> p }@ | TypeP Type -- ^ @{ type p }@ | InvisP Type -- ^ @{ @p }@ | OrP (NonEmpty Pat) -- ^ @{ p1; p2 }@ deriving( Show, Eq, Ord, Data, Generic ) -- | A (field name, pattern) pair. See 'RecP'. type FieldPat = (Name,Pat) -- | A @case@-alternative data Match = Match Pat Body [Dec] -- ^ @case e of { pat -> body where decs }@ deriving( Show, Eq, Ord, Data, Generic ) -- | A clause consists of patterns, guards, a body expression, and a list of -- declarations under a @where@. Clauses are seen in equations for function -- definitions, @case@-experssions, explicitly-bidirectional pattern synonyms, -- etc. data Clause = Clause [Pat] Body [Dec] -- ^ @f { p1 p2 = body where decs }@ deriving( Show, Eq, Ord, Data, Generic ) -- | A Haskell expression. data Exp = VarE Name -- ^ @{ x }@ | ConE Name -- ^ @data T1 = C1 t1 t2; p = {C1} e1 e2 @ | LitE Lit -- ^ @{ 5 or \'c\'}@ | AppE Exp Exp -- ^ @{ f x }@ | AppTypeE Exp Type -- ^ @{ f \@Int }@ | InfixE (Maybe Exp) Exp (Maybe Exp) -- ^ @{x + y} or {(x+)} or {(+ x)} or {(+)}@ -- It's a bit gruesome to use an Exp as the operator when a Name -- would suffice. Historically, Exp was used to make it easier to -- distinguish between infix constructors and non-constructors. -- This is a bit overkill, since one could just as well call -- `startsConId` or `startsConSym` (from `GHC.Lexeme`) on a Name. -- Unfortunately, changing this design now would involve lots of -- code churn for consumers of the TH API, so we continue to use -- an Exp as the operator and perform an extra check during conversion -- to ensure that the Exp is a constructor or a variable (#16895). | UInfixE Exp Exp Exp -- ^ @{x + y}@ -- -- See "Language.Haskell.TH.Syntax#infix" | ParensE Exp -- ^ @{ (e) }@ -- -- See "Language.Haskell.TH.Syntax#infix" | LamE [Pat] Exp -- ^ @{ \\ p1 p2 -> e }@ | LamCaseE [Match] -- ^ @{ \\case m1; m2 }@ | LamCasesE [Clause] -- ^ @{ \\cases m1; m2 }@ | TupE [Maybe Exp] -- ^ @{ (e1,e2) } @ -- -- The 'Maybe' is necessary for handling -- tuple sections. -- -- > (1,) -- -- translates to -- -- > TupE [Just (LitE (IntegerL 1)),Nothing] | UnboxedTupE [Maybe Exp] -- ^ @{ (\# e1,e2 \#) } @ -- -- The 'Maybe' is necessary for handling -- tuple sections. -- -- > (# 'c', #) -- -- translates to -- -- > UnboxedTupE [Just (LitE (CharL 'c')),Nothing] | UnboxedSumE Exp SumAlt SumArity -- ^ @{ (\#|e|\#) }@ | CondE Exp Exp Exp -- ^ @{ if e1 then e2 else e3 }@ | MultiIfE [(Guard, Exp)] -- ^ @{ if | g1 -> e1 | g2 -> e2 }@ | LetE [Dec] Exp -- ^ @{ let { x=e1; y=e2 } in e3 }@ | CaseE Exp [Match] -- ^ @{ case e of m1; m2 }@ | DoE (Maybe ModName) [Stmt] -- ^ @{ do { p <- e1; e2 } }@ or a qualified do if -- the module name is present | MDoE (Maybe ModName) [Stmt] -- ^ @{ mdo { x <- e1 y; y <- e2 x; } }@ or a qualified -- mdo if the module name is present | CompE [Stmt] -- ^ @{ [ (x,y) | x <- xs, y <- ys ] }@ -- -- The result expression of the comprehension is -- the /last/ of the @'Stmt'@s, and should be a 'NoBindS'. -- -- E.g. translation: -- -- > [ f x | x <- xs ] -- -- > CompE [BindS (VarP x) (VarE xs), NoBindS (AppE (VarE f) (VarE x))] | ArithSeqE Range -- ^ @{ [ 1 ,2 .. 10 ] }@ | ListE [ Exp ] -- ^ @{ [1,2,3] }@ | SigE Exp Type -- ^ @{ e :: t }@ | RecConE Name [FieldExp] -- ^ @{ T { x = y, z = w } }@ | RecUpdE Exp [FieldExp] -- ^ @{ (f x) { z = w } }@ | StaticE Exp -- ^ @{ static e }@ | UnboundVarE Name -- ^ @{ _x }@ -- -- This is used for holes or unresolved -- identifiers in AST quotes. Note that -- it could either have a variable name -- or constructor name. | LabelE String -- ^ @{ #x }@ ( Overloaded label ) | ImplicitParamVarE String -- ^ @{ ?x }@ ( Implicit parameter ) | GetFieldE Exp String -- ^ @{ exp.field }@ ( Overloaded Record Dot ) | ProjectionE (NonEmpty String) -- ^ @(.x)@ or @(.x.y)@ (Record projections) | TypedBracketE Exp -- ^ @[|| e ||]@ | TypedSpliceE Exp -- ^ @$$e@ | TypeE Type -- ^ @{ type t }@ | ForallE [TyVarBndr Specificity] Exp -- ^ @forall \. \@ | ForallVisE [TyVarBndr ()] Exp -- ^ @forall \ -> \@ | ConstrainedE [Exp] Exp -- ^ @\ => \@ deriving( Show, Eq, Ord, Data, Generic ) -- | A (field name, expression) pair. See 'RecConE' and 'RecUpdE'. type FieldExp = (Name,Exp) -- Omitted: implicit parameters -- | A potentially guarded expression, as in function definitions or case -- alternatives. data Body = GuardedB [(Guard,Exp)] -- ^ @f p { | e1 = e2 -- | e3 = e4 } -- where ds@ | NormalB Exp -- ^ @f p { = e } where ds@ deriving( Show, Eq, Ord, Data, Generic ) -- | A single guard. data Guard = NormalG Exp -- ^ @f x { | odd x } = x@ | PatG [Stmt] -- ^ @f x { | Just y <- x, Just z <- y } = z@ deriving( Show, Eq, Ord, Data, Generic ) -- | A single statement, as in @do@-notation. data Stmt = BindS Pat Exp -- ^ @p <- e@ | LetS [ Dec ] -- ^ @{ let { x=e1; y=e2 } }@ | NoBindS Exp -- ^ @e@ | ParS [[Stmt]] -- ^ @x <- e1 | s2, s3 | s4@ (in 'CompE') | RecS [Stmt] -- ^ @rec { s1; s2 }@ deriving( Show, Eq, Ord, Data, Generic ) -- | A list/enum range expression. data Range = FromR Exp -- ^ @[n ..]@ | FromThenR Exp Exp -- ^ @[n, m ..]@ | FromToR Exp Exp -- ^ @[n .. m]@ | FromThenToR Exp Exp Exp -- ^ @[n, m .. k]@ deriving( Show, Eq, Ord, Data, Generic ) -- | A single declaration. data Dec = FunD Name [Clause] -- ^ @{ f p1 p2 = b where decs }@ | ValD Pat Body [Dec] -- ^ @{ p = b where decs }@ | DataD Cxt Name [TyVarBndr BndrVis] (Maybe Kind) -- Kind signature (allowed only for GADTs) [Con] [DerivClause] -- ^ @{ data Cxt x => T x = A x | B (T x) -- deriving (Z,W) -- deriving stock Eq }@ | NewtypeD Cxt Name [TyVarBndr BndrVis] (Maybe Kind) -- Kind signature Con [DerivClause] -- ^ @{ newtype Cxt x => T x = A (B x) -- deriving (Z,W Q) -- deriving stock Eq }@ | TypeDataD Name [TyVarBndr BndrVis] (Maybe Kind) -- Kind signature (allowed only for GADTs) [Con] -- ^ @{ type data T x = A x | B (T x) }@ | TySynD Name [TyVarBndr BndrVis] Type -- ^ @{ type T x = (x,x) }@ | ClassD Cxt Name [TyVarBndr BndrVis] [FunDep] [Dec] -- ^ @{ class Eq a => Ord a where ds }@ | InstanceD (Maybe Overlap) Cxt Type [Dec] -- ^ @{ instance {\-\# OVERLAPS \#-\} -- Show w => Show [w] where ds }@ | SigD Name Type -- ^ @{ length :: [a] -> Int }@ | KiSigD Name Kind -- ^ @{ type TypeRep :: k -> Type }@ | ForeignD Foreign -- ^ @{ foreign import ... } --{ foreign export ... }@ | InfixD Fixity NamespaceSpecifier Name -- ^ @{ infix 3 data foo }@ | DefaultD [Type] -- ^ @{ default (Integer, Double) }@ -- | pragmas | PragmaD Pragma -- ^ @{ {\-\# INLINE [1] foo \#-\} }@ -- | data families (may also appear in [Dec] of 'ClassD' and 'InstanceD') | DataFamilyD Name [TyVarBndr BndrVis] (Maybe Kind) -- ^ @{ data family T a b c :: * }@ | DataInstD Cxt (Maybe [TyVarBndr ()]) Type (Maybe Kind) -- Kind signature [Con] [DerivClause] -- ^ @{ data instance Cxt x => T [x] -- = A x | B (T x) -- deriving (Z,W) -- deriving stock Eq }@ | NewtypeInstD Cxt (Maybe [TyVarBndr ()]) Type -- Quantified type vars (Maybe Kind) -- Kind signature Con [DerivClause] -- ^ @{ newtype instance Cxt x => T [x] -- = A (B x) -- deriving (Z,W) -- deriving stock Eq }@ | TySynInstD TySynEqn -- ^ @{ type instance ... }@ -- | open type families (may also appear in [Dec] of 'ClassD' and 'InstanceD') | OpenTypeFamilyD TypeFamilyHead -- ^ @{ type family T a b c = (r :: *) | r -> a b }@ | ClosedTypeFamilyD TypeFamilyHead [TySynEqn] -- ^ @{ type family F a b = (r :: *) | r -> a where ... }@ | RoleAnnotD Name [Role] -- ^ @{ type role T nominal representational }@ | StandaloneDerivD (Maybe DerivStrategy) Cxt Type -- ^ @{ deriving stock instance Ord a => Ord (Foo a) }@ | DefaultSigD Name Type -- ^ @{ default size :: Data a => a -> Int }@ -- | Pattern Synonyms | PatSynD Name PatSynArgs PatSynDir Pat -- ^ @{ pattern P v1 v2 .. vn <- p }@ unidirectional or -- @{ pattern P v1 v2 .. vn = p }@ implicit bidirectional or -- @{ pattern P v1 v2 .. vn <- p -- where P v1 v2 .. vn = e }@ explicit bidirectional -- -- also, besides prefix pattern synonyms, both infix and record -- pattern synonyms are supported. See 'PatSynArgs' for details | PatSynSigD Name PatSynType -- ^ A pattern synonym's type signature. | ImplicitParamBindD String Exp -- ^ @{ ?x = expr }@ -- -- Implicit parameter binding declaration. Can only be used in let -- and where clauses which consist entirely of implicit bindings. deriving( Show, Eq, Ord, Data, Generic ) -- | A way to specify a namespace to look in when GHC needs to find -- a name's source data NamespaceSpecifier = NoNamespaceSpecifier -- ^ Name may be everything; If there are two -- names in different namespaces, then consider both | TypeNamespaceSpecifier -- ^ Name should be a type-level entity, such as a -- data type, type alias, type family, type class, -- or type variable | DataNamespaceSpecifier -- ^ Name should be a term-level entity, such as a -- function, data constructor, or pattern synonym deriving( Show, Eq, Ord, Data, Generic ) -- | Varieties of allowed instance overlap. data Overlap = Overlappable -- ^ May be overlapped by more specific instances | Overlapping -- ^ May overlap a more general instance | Overlaps -- ^ Both 'Overlapping' and 'Overlappable' | Incoherent -- ^ Both 'Overlapping' and 'Overlappable', and -- pick an arbitrary one if multiple choices are -- available. deriving( Show, Eq, Ord, Data, Generic ) -- | A single @deriving@ clause at the end of a datatype declaration. data DerivClause = DerivClause (Maybe DerivStrategy) Cxt -- ^ @{ deriving stock (Eq, Ord) }@ deriving( Show, Eq, Ord, Data, Generic ) -- | What the user explicitly requests when deriving an instance with -- @-XDerivingStrategies@. data DerivStrategy = StockStrategy -- ^ @deriving {stock} C@ | AnyclassStrategy -- ^ @deriving {anyclass} C@, @-XDeriveAnyClass@ | NewtypeStrategy -- ^ @deriving {newtype} C@, @-XGeneralizedNewtypeDeriving@ | ViaStrategy Type -- ^ @deriving C {via T}@, @-XDerivingVia@ deriving( Show, Eq, Ord, Data, Generic ) -- | A pattern synonym's type. Note that a pattern synonym's /fully/ -- specified type has a peculiar shape coming with two forall -- quantifiers and two constraint contexts. For example, consider the -- pattern synonym -- -- > pattern P x1 x2 ... xn = -- -- P's complete type is of the following form -- -- > pattern P :: forall universals. required constraints -- > => forall existentials. provided constraints -- > => t1 -> t2 -> ... -> tn -> t -- -- consisting of four parts: -- -- 1. the (possibly empty lists of) universally quantified type -- variables and required constraints on them. -- 2. the (possibly empty lists of) existentially quantified -- type variables and the provided constraints on them. -- 3. the types @t1@, @t2@, .., @tn@ of @x1@, @x2@, .., @xn@, respectively -- 4. the type @t@ of @\@, mentioning only universals. -- -- Pattern synonym types interact with TH when (a) reifying a pattern -- synonym, (b) pretty printing, or (c) specifying a pattern synonym's -- type signature explicitly: -- -- * Reification always returns a pattern synonym's /fully/ specified -- type in abstract syntax. -- -- * Pretty printing via 'Language.Haskell.TH.Ppr.pprPatSynType' abbreviates -- a pattern synonym's type unambiguously in concrete syntax: The rule of -- thumb is to print initial empty universals and the required -- context as @() =>@, if existentials and a provided context -- follow. If only universals and their required context, but no -- existentials are specified, only the universals and their -- required context are printed. If both or none are specified, so -- both (or none) are printed. -- -- * When specifying a pattern synonym's type explicitly with -- 'PatSynSigD' either one of the universals, the existentials, or -- their contexts may be left empty. -- -- See the GHC user's guide for more information on pattern synonyms -- and their types: -- . type PatSynType = Type -- | Common elements of 'OpenTypeFamilyD' and 'ClosedTypeFamilyD'. By -- analogy with "head" for type classes and type class instances as -- defined in /Type classes: an exploration of the design space/, the -- @TypeFamilyHead@ is defined to be the elements of the declaration -- between @type family@ and @where@. data TypeFamilyHead = TypeFamilyHead Name [TyVarBndr BndrVis] FamilyResultSig (Maybe InjectivityAnn) deriving( Show, Eq, Ord, Data, Generic ) -- | One equation of a type family instance or closed type family. The -- arguments are the left-hand-side type and the right-hand-side result. -- -- For instance, if you had the following type family: -- -- @ -- type family Foo (a :: k) :: k where -- forall k (a :: k). Foo \@k a = a -- @ -- -- The @Foo \@k a = a@ equation would be represented as follows: -- -- @ -- 'TySynEqn' ('Just' ['PlainTV' k, 'KindedTV' a ('VarT' k)]) -- ('AppT' ('AppKindT' ('ConT' ''Foo) ('VarT' k)) ('VarT' a)) -- ('VarT' a) -- @ data TySynEqn = TySynEqn (Maybe [TyVarBndr ()]) Type Type deriving( Show, Eq, Ord, Data, Generic ) -- | [Functional dependency](https://ghc.gitlab.haskell.org/ghc/doc/users_guide/exts/functional_dependencies.html) -- syntax, as in a class declaration. data FunDep = FunDep [Name] [Name] -- ^ @class C a b {| a -> b}@ deriving( Show, Eq, Ord, Data, Generic ) -- | A @foreign@ declaration. data Foreign = ImportF Callconv Safety String Name Type -- ^ @foreign import callconv safety "foreign_name" haskellName :: type@ | ExportF Callconv String Name Type -- ^ @foreign export callconv "foreign_name" haskellName :: type@ deriving( Show, Eq, Ord, Data, Generic ) -- keep Callconv in sync with module ForeignCall in ghc/compiler/GHC/Types/ForeignCall.hs -- | A calling convention identifier, as in a 'Foreign' declaration. data Callconv = CCall | StdCall | CApi | Prim | JavaScript deriving( Show, Eq, Ord, Data, Generic ) -- | A safety level, as in a 'Foreign' declaration. data Safety = Unsafe | Safe | Interruptible deriving( Show, Eq, Ord, Data, Generic ) data Pragma = InlineP Name Inline RuleMatch Phases -- ^ @{ {\-\# [inline] [rule match] [phases] [phases] name #-} }@. See -- 'Inline' and 'RuleMatch'. | OpaqueP Name -- ^ @{ {\-\# OPAQUE T #-} }@ | SpecialiseP Name Type (Maybe Inline) Phases -- ^ @{ {\-\# SPECIALISE [INLINE] [phases] T #-} }@ | SpecialiseInstP Type -- ^ @{ {\-\# SPECIALISE instance I #-} }@ | RuleP String (Maybe [TyVarBndr ()]) [RuleBndr] Exp Exp Phases -- ^ @{ {\-\# RULES "name" [phases] [forall t_1 ... t_i]. [forall b_1 ... b_j] rules... e_1 = e_2 #-} }@ | AnnP AnnTarget Exp -- ^ @{ {\-\# ANN target exp #-} }@ | LineP Int String -- ^ @{ {\-\# LINE n "file name" #-} }@ | CompleteP [Name] (Maybe Name) -- ^ @{ {\-\# COMPLETE C_1, ..., C_i [ :: T ] \#-} }@ | SCCP Name (Maybe String) -- ^ @{ {\-\# SCC fun "optional_name" \#-} }@ deriving( Show, Eq, Ord, Data, Generic ) -- | An inline pragma. data Inline = NoInline -- ^ @{ {\-\# NOINLINE ... #-} }@ | Inline -- ^ @{ {\-\# INLINE ... #-} }@ | Inlinable -- ^ @{ {\-\# INLINABLE ... #-} }@ deriving (Show, Eq, Ord, Data, Generic) -- | A @CONLIKE@ modifier, as in one of the various inline pragmas, or lack -- thereof ('FunLike'). data RuleMatch = ConLike -- ^ @{ {\-\# CONLIKE [inline] ... #-} }@ | FunLike -- ^ @{ {\-\# [inline] ... #-} }@ deriving (Show, Eq, Ord, Data, Generic) -- | Phase control syntax. data Phases = AllPhases -- ^ The default when unspecified | FromPhase Int -- ^ @[n]@ | BeforePhase Int -- ^ @[~n]@ deriving (Show, Eq, Ord, Data, Generic) -- | A binder found in the @forall@ of a @RULES@ pragma. data RuleBndr = RuleVar Name -- ^ @forall {a} ... .@ | TypedRuleVar Name Type -- ^ @forall {(a :: t)} ... .@ deriving (Show, Eq, Ord, Data, Generic) -- | The target of an @ANN@ pragma data AnnTarget = ModuleAnnotation -- ^ @{\-\# ANN {module} ... #-}@ | TypeAnnotation Name -- ^ @{\-\# ANN type {name} ... #-}@ | ValueAnnotation Name -- ^ @{\-\# ANN {name} ... #-}@ deriving (Show, Eq, Ord, Data, Generic) -- | A context, as found on the left side of a @=>@ in a type. type Cxt = [Pred] -- ^ @(Eq a, Ord b)@ -- | Since the advent of @ConstraintKinds@, constraints are really just types. -- Equality constraints use the 'EqualityT' constructor. Constraints may also -- be tuples of other constraints. type Pred = Type -- | 'SourceUnpackedness' corresponds to unpack annotations found in the source code. -- -- This may not agree with the annotations returned by 'reifyConStrictness'. -- See 'reifyConStrictness' for more information. data SourceUnpackedness = NoSourceUnpackedness -- ^ @C a@ | SourceNoUnpack -- ^ @C { {\-\# NOUNPACK \#-\} } a@ | SourceUnpack -- ^ @C { {\-\# UNPACK \#-\} } a@ deriving (Show, Eq, Ord, Data, Generic) -- | 'SourceStrictness' corresponds to strictness annotations found in the source code. -- -- This may not agree with the annotations returned by 'reifyConStrictness'. -- See 'reifyConStrictness' for more information. data SourceStrictness = NoSourceStrictness -- ^ @C a@ | SourceLazy -- ^ @C {~}a@ | SourceStrict -- ^ @C {!}a@ deriving (Show, Eq, Ord, Data, Generic) -- | Unlike 'SourceStrictness' and 'SourceUnpackedness', 'DecidedStrictness' -- refers to the strictness annotations that the compiler chooses for a data constructor -- field, which may be different from what is written in source code. -- -- Note that non-unpacked strict fields are assigned 'DecidedLazy' when a bang would be inappropriate, -- such as the field of a newtype constructor and fields that have an unlifted type. -- -- See 'reifyConStrictness' for more information. data DecidedStrictness = DecidedLazy -- ^ Field inferred to not have a bang. | DecidedStrict -- ^ Field inferred to have a bang. | DecidedUnpack -- ^ Field inferred to be unpacked. deriving (Show, Eq, Ord, Data, Generic) -- | A data constructor. -- -- The constructors for 'Con' can roughly be divided up into two categories: -- those for constructors with \"vanilla\" syntax ('NormalC', 'RecC', and -- 'InfixC'), and those for constructors with GADT syntax ('GadtC' and -- 'RecGadtC'). The 'ForallC' constructor, which quantifies additional type -- variables and class contexts, can surround either variety of constructor. -- However, the type variables that it quantifies are different depending -- on what constructor syntax is used: -- -- * If a 'ForallC' surrounds a constructor with vanilla syntax, then the -- 'ForallC' will only quantify /existential/ type variables. For example: -- -- @ -- data Foo a = forall b. MkFoo a b -- @ -- -- In @MkFoo@, 'ForallC' will quantify @b@, but not @a@. -- -- * If a 'ForallC' surrounds a constructor with GADT syntax, then the -- 'ForallC' will quantify /all/ type variables used in the constructor. -- For example: -- -- @ -- data Bar a b where -- MkBar :: (a ~ b) => c -> MkBar a b -- @ -- -- In @MkBar@, 'ForallC' will quantify @a@, @b@, and @c@. -- -- Multiplicity annotations for data types are currently not supported -- in Template Haskell (i.e. all fields represented by Template Haskell -- will be linear). data Con = -- | @C Int a@ NormalC Name [BangType] -- | @C { v :: Int, w :: a }@ | RecC Name [VarBangType] -- | @Int :+ a@ | InfixC BangType Name BangType -- | @forall a. Eq a => C [a]@ | ForallC [TyVarBndr Specificity] Cxt Con -- @C :: a -> b -> T b Int@ | GadtC [Name] -- ^ The list of constructors, corresponding to the GADT constructor -- syntax @C1, C2 :: a -> T b@. -- -- Invariant: the list must be non-empty. [BangType] -- ^ The constructor arguments Type -- ^ See Note [GADT return type] -- | @C :: { v :: Int } -> T b Int@ | RecGadtC [Name] -- ^ The list of constructors, corresponding to the GADT record -- constructor syntax @C1, C2 :: { fld :: a } -> T b@. -- -- Invariant: the list must be non-empty. [VarBangType] -- ^ The constructor arguments Type -- ^ See Note [GADT return type] deriving (Show, Eq, Ord, Data, Generic) -- Note [GADT return type] -- ~~~~~~~~~~~~~~~~~~~~~~~ -- The return type of a GADT constructor does not necessarily match the name of -- the data type: -- -- type S = T -- -- data T a where -- MkT :: S Int -- -- -- type S a = T -- -- data T a where -- MkT :: S Char Int -- -- -- type Id a = a -- type S a = T -- -- data T a where -- MkT :: Id (S Char Int) -- -- -- That is why we allow the return type stored by a constructor to be an -- arbitrary type. See also #11341 -- | Strictness information in a data constructor's argument. data Bang = Bang SourceUnpackedness SourceStrictness -- ^ @C { {\-\# UNPACK \#-\} !}a@ deriving (Show, Eq, Ord, Data, Generic) -- | A type with a strictness annotation, as in data constructors. See 'Con'. type BangType = (Bang, Type) -- | 'BangType', but for record fields. See 'Con'. type VarBangType = (Name, Bang, Type) -- | As of @template-haskell-2.11.0.0@, 'Strict' has been replaced by 'Bang'. type Strict = Bang -- | As of @template-haskell-2.11.0.0@, 'StrictType' has been replaced by -- 'BangType'. type StrictType = BangType -- | As of @template-haskell-2.11.0.0@, 'VarStrictType' has been replaced by -- 'VarBangType'. type VarStrictType = VarBangType -- | A pattern synonym's directionality. data PatSynDir = Unidir -- ^ @pattern P x {<-} p@ | ImplBidir -- ^ @pattern P x {=} p@ | ExplBidir [Clause] -- ^ @pattern P x {<-} p where P x = e@ deriving( Show, Eq, Ord, Data, Generic ) -- | A pattern synonym's argument type. data PatSynArgs = PrefixPatSyn [Name] -- ^ @pattern P {x y z} = p@ | InfixPatSyn Name Name -- ^ @pattern {x P y} = p@ | RecordPatSyn [Name] -- ^ @pattern P { {x,y,z} } = p@ deriving( Show, Eq, Ord, Data, Generic ) -- | A Haskell type. data Type = ForallT [TyVarBndr Specificity] Cxt Type -- ^ @forall \. \ => \@ | ForallVisT [TyVarBndr ()] Type -- ^ @forall \ -> \@ | AppT Type Type -- ^ @T a b@ | AppKindT Type Kind -- ^ @T \@k t@ | SigT Type Kind -- ^ @t :: k@ | VarT Name -- ^ @a@ | ConT Name -- ^ @T@ | PromotedT Name -- ^ @'T@ | InfixT Type Name Type -- ^ @T + T@ | UInfixT Type Name Type -- ^ @T + T@ -- -- See "Language.Haskell.TH.Syntax#infix" | PromotedInfixT Type Name Type -- ^ @T :+: T@ | PromotedUInfixT Type Name Type -- ^ @T :+: T@ -- -- See "Language.Haskell.TH.Syntax#infix" | ParensT Type -- ^ @(T)@ -- See Note [Representing concrete syntax in types] | TupleT Int -- ^ @(,)@, @(,,)@, etc. | UnboxedTupleT Int -- ^ @(\#,\#)@, @(\#,,\#)@, etc. | UnboxedSumT SumArity -- ^ @(\#|\#)@, @(\#||\#)@, etc. | ArrowT -- ^ @->@ | MulArrowT -- ^ @%n ->@ -- -- Generalised arrow type with multiplicity argument | EqualityT -- ^ @~@ | ListT -- ^ @[]@ | PromotedTupleT Int -- ^ @'()@, @'(,)@, @'(,,)@, etc. | PromotedNilT -- ^ @'[]@ | PromotedConsT -- ^ @'(:)@ | StarT -- ^ @*@ | ConstraintT -- ^ @Constraint@ | LitT TyLit -- ^ @0@, @1@, @2@, etc. | WildCardT -- ^ @_@ | ImplicitParamT String Type -- ^ @?x :: t@ deriving( Show, Eq, Ord, Data, Generic ) -- | The specificity of a type variable in a @forall ...@. data Specificity = SpecifiedSpec -- ^ @a@ | InferredSpec -- ^ @{a}@ deriving( Show, Eq, Ord, Data, Generic ) -- | The @flag@ type parameter is instantiated to one of the following types: -- -- * 'Specificity' (examples: 'ForallC', 'ForallT') -- * 'BndrVis' (examples: 'DataD', 'ClassD', etc.) -- * '()', a catch-all type for other forms of binders, including 'ForallVisT', 'DataInstD', 'RuleP', and 'TyVarSig' -- data TyVarBndr flag = PlainTV Name flag -- ^ @a@ | KindedTV Name flag Kind -- ^ @(a :: k)@ deriving( Show, Eq, Ord, Data, Generic, Functor, Foldable, Traversable ) -- | Visibility of a type variable. See [Inferred vs. specified type variables](https://ghc.gitlab.haskell.org/ghc/doc/users_guide/exts/type_applications.html#inferred-vs-specified-type-variables). data BndrVis = BndrReq -- ^ @a@ | BndrInvis -- ^ @\@a@ deriving( Show, Eq, Ord, Data, Generic ) -- | Type family result signature data FamilyResultSig = NoSig -- ^ no signature | KindSig Kind -- ^ @k@ | TyVarSig (TyVarBndr ()) -- ^ @= r, = (r :: k)@ deriving( Show, Eq, Ord, Data, Generic ) -- | Injectivity annotation as in an [injective type family](https://ghc.gitlab.haskell.org/ghc/doc/users_guide/exts/type_families.html) data InjectivityAnn = InjectivityAnn Name [Name] deriving ( Show, Eq, Ord, Data, Generic ) -- | Type-level literals. data TyLit = NumTyLit Integer -- ^ @2@ | StrTyLit String -- ^ @\"Hello\"@ | CharTyLit Char -- ^ @\'C\'@, @since 4.16.0.0 deriving ( Show, Eq, Ord, Data, Generic ) -- | Role annotations data Role = NominalR -- ^ @nominal@ | RepresentationalR -- ^ @representational@ | PhantomR -- ^ @phantom@ | InferR -- ^ @_@ deriving( Show, Eq, Ord, Data, Generic ) -- | Annotation target for reifyAnnotations data AnnLookup = AnnLookupModule Module | AnnLookupName Name deriving( Show, Eq, Ord, Data, Generic ) -- | To avoid duplication between kinds and types, they -- are defined to be the same. Naturally, you would never -- have a type be 'StarT' and you would never have a kind -- be 'SigT', but many of the other constructors are shared. -- Note that the kind @Bool@ is denoted with 'ConT', not -- 'PromotedT'. Similarly, tuple kinds are made with 'TupleT', -- not 'PromotedTupleT'. type Kind = Type {- Note [Representing concrete syntax in types] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Haskell has a rich concrete syntax for types, including t1 -> t2, (t1,t2), [t], and so on In TH we represent all of this using AppT, with a distinguished type constructor at the head. So, Type TH representation ----------------------------------------------- t1 -> t2 ArrowT `AppT` t2 `AppT` t2 [t] ListT `AppT` t (t1,t2) TupleT 2 `AppT` t1 `AppT` t2 '(t1,t2) PromotedTupleT 2 `AppT` t1 `AppT` t2 But if the original HsSyn used prefix application, we won't use these special TH constructors. For example [] t ConT "[]" `AppT` t (->) t ConT "->" `AppT` t In this way we can faithfully represent in TH whether the original HsType used concrete syntax or not. The one case that doesn't fit this pattern is that of promoted lists '[ Maybe, IO ] PromotedListT 2 `AppT` t1 `AppT` t2 but it's very smelly because there really is no type constructor corresponding to PromotedListT. So we encode HsExplicitListTy with PromotedConsT and PromotedNilT (which *do* have underlying type constructors): '[ Maybe, IO ] PromotedConsT `AppT` Maybe `AppT` (PromotedConsT `AppT` IO `AppT` PromotedNilT) -} -- | A location at which to attach Haddock documentation. -- Note that adding documentation to a 'Name' defined oustide of the current -- module will cause an error. data DocLoc = ModuleDoc -- ^ At the current module's header. | DeclDoc Name -- ^ At a declaration, not necessarily top level. | ArgDoc Name Int -- ^ At a specific argument of a function, indexed by its -- position. | InstDoc Type -- ^ At a class or family instance. deriving ( Show, Eq, Ord, Data, Generic ) ----------------------------------------------------- -- Internal helper functions ----------------------------------------------------- -- | Internal helper function. cmpEq :: Ordering -> Bool cmpEq EQ = True cmpEq _ = False -- | Internal helper function. thenCmp :: Ordering -> Ordering -> Ordering thenCmp EQ o2 = o2 thenCmp o1 _ = o1 -- | Internal helper function. get_cons_names :: Con -> [Name] get_cons_names (NormalC n _) = [n] get_cons_names (RecC n _) = [n] get_cons_names (InfixC _ n _) = [n] get_cons_names (ForallC _ _ con) = get_cons_names con -- GadtC can have multiple names, e.g -- > data Bar a where -- > MkBar1, MkBar2 :: a -> Bar a -- Will have one GadtC with [MkBar1, MkBar2] as names get_cons_names (GadtC ns _ _) = ns get_cons_names (RecGadtC ns _ _) = ns ghc-lib-parser-9.12.2.20250421/libraries/ghc-platform/0000755000000000000000000000000007346545000020032 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/libraries/ghc-platform/ghc-platform.cabal0000644000000000000000000000107707346545000023406 0ustar0000000000000000cabal-version: 3.0 name: ghc-platform version: 0.1.0.0 synopsis: Platform information used by GHC and friends license: BSD-3-Clause license-file: LICENSE author: Rodrigo Mesquita maintainer: ghc-devs@haskell.org build-type: Simple extra-doc-files: CHANGELOG.md common warnings ghc-options: -Wall library import: warnings exposed-modules: GHC.Platform.ArchOS build-depends: base >=4.15.0.0 && <5 hs-source-dirs: src default-language: Haskell2010 ghc-lib-parser-9.12.2.20250421/libraries/ghc-platform/src/GHC/Platform/0000755000000000000000000000000007346545000023006 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/libraries/ghc-platform/src/GHC/Platform/ArchOS.hs0000644000000000000000000001150507346545000024463 0ustar0000000000000000{-# LANGUAGE LambdaCase, ScopedTypeVariables #-} -- | Platform architecture and OS module GHC.Platform.ArchOS ( ArchOS(..) -- * Architectures , Arch(..) , ArmISA(..) , ArmISAExt(..) , ArmABI(..) , PPC_64ABI(..) , isARM, isPPC , stringEncodeArch -- * Operating systems , OS(..) , osElfTarget , osMachOTarget , stringEncodeOS ) where import Prelude -- See Note [Why do we import Prelude here?] -- | Platform architecture and OS. data ArchOS = ArchOS { archOS_arch :: Arch , archOS_OS :: OS } deriving (Read, Show, Eq, Ord) -- | Architectures data Arch = ArchUnknown | ArchX86 | ArchX86_64 | ArchPPC | ArchPPC_64 PPC_64ABI | ArchS390X | ArchARM ArmISA [ArmISAExt] ArmABI | ArchAArch64 | ArchAlpha | ArchMipseb | ArchMipsel | ArchRISCV64 | ArchLoongArch64 | ArchJavaScript | ArchWasm32 deriving (Read, Show, Eq, Ord) -- | ARM Instruction Set Architecture data ArmISA = ARMv5 | ARMv6 | ARMv7 deriving (Read, Show, Eq, Ord) -- | ARM extensions data ArmISAExt = VFPv2 | VFPv3 | VFPv3D16 | NEON | IWMMX2 deriving (Read, Show, Eq, Ord) -- | ARM ABI data ArmABI = SOFT | SOFTFP | HARD deriving (Read, Show, Eq, Ord) -- | PowerPC 64-bit ABI data PPC_64ABI = ELF_V1 -- ^ PowerPC64 | ELF_V2 -- ^ PowerPC64 LE deriving (Read, Show, Eq, Ord) -- | Operating systems. -- -- Using OSUnknown to generate code should produce a sensible default, but no -- promises. data OS = OSUnknown | OSLinux | OSDarwin | OSSolaris2 | OSMinGW32 | OSFreeBSD | OSDragonFly | OSOpenBSD | OSNetBSD | OSKFreeBSD | OSHaiku | OSQNXNTO | OSAIX | OSHurd | OSWasi | OSGhcjs deriving (Read, Show, Eq, Ord) -- Note [Platform Syntax] -- ~~~~~~~~~~~~~~~~~~~~~~ -- -- There is a very loose encoding of platforms shared by many tools we are -- encoding to here. GNU Config (http://git.savannah.gnu.org/cgit/config.git), -- and LLVM's http://llvm.org/doxygen/classllvm_1_1Triple.html are perhaps the -- most definitional parsers. The basic syntax is a list of '-'-separated -- components. The Unix 'uname' command syntax is related but briefer. -- -- Those two parsers are quite forgiving, and even the 'config.sub' -- normalization is forgiving too. The "best" way to encode a platform is -- therefore somewhat a matter of taste. -- -- The 'stringEncode*' functions here convert each part of GHC's structured -- notion of a platform into one dash-separated component. -- | See Note [Platform Syntax]. stringEncodeArch :: Arch -> String stringEncodeArch = \case ArchUnknown -> "unknown" ArchX86 -> "i386" ArchX86_64 -> "x86_64" ArchPPC -> "powerpc" ArchPPC_64 _ -> "powerpc64" ArchS390X -> "s390x" ArchARM ARMv5 _ _ -> "armv5" ArchARM ARMv6 _ _ -> "armv6" ArchARM ARMv7 _ _ -> "armv7" ArchAArch64 -> "aarch64" ArchAlpha -> "alpha" ArchMipseb -> "mipseb" ArchMipsel -> "mipsel" ArchRISCV64 -> "riscv64" ArchLoongArch64 -> "loongarch64" ArchJavaScript -> "javascript" ArchWasm32 -> "wasm32" -- | See Note [Platform Syntax]. stringEncodeOS :: OS -> String stringEncodeOS = \case OSUnknown -> "unknown" OSLinux -> "linux" OSDarwin -> "darwin" OSSolaris2 -> "solaris2" OSMinGW32 -> "mingw32" OSFreeBSD -> "freebsd" OSDragonFly -> "dragonfly" OSOpenBSD -> "openbsd" OSNetBSD -> "netbsd" OSKFreeBSD -> "kfreebsdgnu" OSHaiku -> "haiku" OSQNXNTO -> "nto-qnx" OSAIX -> "aix" OSHurd -> "hurd" OSWasi -> "wasi" OSGhcjs -> "ghcjs" -- | This predicate tells us whether the OS uses the ELF as its primary object format. osElfTarget :: OS -> Bool osElfTarget OSLinux = True osElfTarget OSFreeBSD = True osElfTarget OSDragonFly = True osElfTarget OSOpenBSD = True osElfTarget OSNetBSD = True osElfTarget OSSolaris2 = True osElfTarget OSDarwin = False osElfTarget OSMinGW32 = False osElfTarget OSKFreeBSD = True osElfTarget OSHaiku = True osElfTarget OSQNXNTO = False osElfTarget OSAIX = False osElfTarget OSHurd = True osElfTarget OSWasi = False osElfTarget OSGhcjs = False osElfTarget OSUnknown = False -- Defaulting to False is safe; it means don't rely on any -- ELF-specific functionality. It is important to have a default for -- portability, otherwise we have to answer this question for every -- new platform we compile on (even unreg). isARM :: Arch -> Bool isARM (ArchARM {}) = True isARM ArchAArch64 = True isARM _ = False isPPC :: Arch -> Bool isPPC (ArchPPC_64 _) = True isPPC ArchPPC = True isPPC _ = False -- | This predicate tells us whether the OS support Mach-O shared libraries. osMachOTarget :: OS -> Bool osMachOTarget OSDarwin = True osMachOTarget _ = False ghc-lib-parser-9.12.2.20250421/libraries/ghci/GHCi/0000755000000000000000000000000007346545000017133 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/libraries/ghci/GHCi/BreakArray.hs0000644000000000000000000000712307346545000021515 0ustar0000000000000000{-# OPTIONS_GHC -fno-warn-name-shadowing #-} {-# LANGUAGE CPP, MagicHash, UnboxedTuples #-} ------------------------------------------------------------------------------- -- -- (c) The University of Glasgow 2007 -- -- | Break Arrays -- -- An array of words, indexed by a breakpoint number (breakpointId in Tickish) -- containing the ignore count for every breakpoint. -- There is one of these arrays per module. -- -- For each word with value n: -- n > 1 : the corresponding breakpoint is enabled. Next time the bp is hit, -- GHCi will decrement the ignore count and continue processing. -- n == 0 : The breakpoint is enabled, GHCi will stop next time it hits -- this breakpoint. -- n == -1: This breakpoint is disabled. -- n < -1 : Not used. -- ------------------------------------------------------------------------------- module GHCi.BreakArray ( BreakArray (BA) -- constructor is exported only for GHC.StgToByteCode , newBreakArray , getBreak , setupBreakpoint , breakOn , breakOff , showBreakArray ) where import Prelude -- See note [Why do we import Prelude here?] import Control.Monad import GHC.Exts import GHC.IO ( IO(..) ) import System.IO.Unsafe ( unsafeDupablePerformIO ) #include "MachDeps.h" data BreakArray = BA (MutableByteArray# RealWorld) breakOff, breakOn :: Int breakOn = 0 breakOff = -1 showBreakArray :: BreakArray -> IO () showBreakArray array = do forM_ [0 .. (size array - 1)] $ \i -> do val <- readBreakArray array i putStr $ ' ' : show val putStr "\n" setupBreakpoint :: BreakArray -> Int -> Int -> IO Bool setupBreakpoint breakArray ind val | safeIndex breakArray ind = do writeBreakArray breakArray ind val return True | otherwise = return False getBreak :: BreakArray -> Int -> IO (Maybe Int) getBreak array index | safeIndex array index = do val <- readBreakArray array index return $ Just val | otherwise = return Nothing safeIndex :: BreakArray -> Int -> Bool safeIndex array index = index < size array && index >= 0 size :: BreakArray -> Int size (BA array) = size `div` SIZEOF_HSWORD where -- We want to keep this operation pure. The mutable byte array -- is never resized so this is safe. size = unsafeDupablePerformIO $ sizeofMutableByteArray array sizeofMutableByteArray :: MutableByteArray# RealWorld -> IO Int sizeofMutableByteArray arr = IO $ \s -> case getSizeofMutableByteArray# arr s of (# s', n# #) -> (# s', I# n# #) allocBA :: Int# -> IO BreakArray allocBA sz# = IO $ \s1 -> case newByteArray# sz# s1 of { (# s2, array #) -> (# s2, BA array #) } -- create a new break array and initialise all elements to breakOff. newBreakArray :: Int -> IO BreakArray newBreakArray (I# sz#) = do BA array <- allocBA (sz# *# SIZEOF_HSWORD#) case breakOff of I# off -> do let loop n | isTrue# (n >=# sz#) = return () | otherwise = do writeBA# array n off; loop (n +# 1#) loop 0# return $ BA array writeBA# :: MutableByteArray# RealWorld -> Int# -> Int# -> IO () writeBA# array ind val = IO $ \s -> case writeIntArray# array ind val s of { s -> (# s, () #) } writeBreakArray :: BreakArray -> Int -> Int -> IO () writeBreakArray (BA array) (I# i) (I# val) = writeBA# array i val readBA# :: MutableByteArray# RealWorld -> Int# -> IO Int readBA# array i = IO $ \s -> case readIntArray# array i s of { (# s, c #) -> (# s, I# c #) } readBreakArray :: BreakArray -> Int -> IO Int readBreakArray (BA array) (I# ind# ) = readBA# array ind# ghc-lib-parser-9.12.2.20250421/libraries/ghci/GHCi/FFI.hsc0000644000000000000000000001353707346545000020247 0ustar0000000000000000----------------------------------------------------------------------------- -- -- libffi bindings -- -- (c) The University of Glasgow 2008 -- ----------------------------------------------------------------------------- {- Note [FFI for the JS-Backend] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The JS-backend does not use GHC's native rts, as such you might think that it doesn't require ghci. However, that is not true, because we need ghci in order to interoperate with iserv even if we do not use any of the FFI stuff in this file. So obviously we do not require libffi, but we still need to be able to build ghci in order for the JS-Backend to supply its own iserv interop solution. Thus we bite the bullet and wrap all the unneeded bits in a CPP conditional compilation blocks that detect the JS-backend. A necessary evil to be sure; notice that the only symbols remaining the JS_HOST_ARCH case are those that are explicitly exported by this module and set to error if they are every used. -} #if !defined(javascript_HOST_ARCH) -- See Note [FFI_GO_CLOSURES workaround] in ghc_ffi.h -- We can't include ghc_ffi.h here as we must build with stage0 #if defined(darwin_HOST_OS) #if !defined(FFI_GO_CLOSURES) #define FFI_GO_CLOSURES 0 #endif #endif #include #endif {-# LANGUAGE CPP, DeriveGeneric, DeriveAnyClass #-} module GHCi.FFI ( FFIType(..) , C_ffi_cif , prepForeignCall , freeForeignCallInfo ) where import Prelude -- See note [Why do we import Prelude here?] #if !defined(javascript_HOST_ARCH) import Control.Exception import Foreign.C #endif import Data.Binary import GHC.Generics import Foreign data FFIType = FFIVoid | FFIPointer | FFIFloat | FFIDouble | FFISInt8 | FFISInt16 | FFISInt32 | FFISInt64 | FFIUInt8 | FFIUInt16 | FFIUInt32 | FFIUInt64 deriving (Show, Generic, Binary) prepForeignCall :: [FFIType] -- arg types -> FFIType -- result type -> IO (Ptr C_ffi_cif) -- token for making calls (must be freed by caller) #if !defined(javascript_HOST_ARCH) prepForeignCall arg_types result_type = do let n_args = length arg_types arg_arr <- mallocArray n_args pokeArray arg_arr (map ffiType arg_types) cif <- mallocBytes (#const sizeof(ffi_cif)) r <- ffi_prep_cif cif fFI_DEFAULT_ABI (fromIntegral n_args) (ffiType result_type) arg_arr if r /= fFI_OK then throwIO $ ErrorCall $ concat [ "prepForeignCallFailed: ", strError r, "(arg tys: ", show arg_types, " res ty: ", show result_type, ")" ] else return (castPtr cif) #else prepForeignCall _ _ = error "GHCi.FFI.prepForeignCall: Called with JS_HOST_ARCH! Perhaps you need to run configure?" #endif freeForeignCallInfo :: Ptr C_ffi_cif -> IO () #if !defined(javascript_HOST_ARCH) freeForeignCallInfo p = do free ((#ptr ffi_cif, arg_types) p) free p #else freeForeignCallInfo _ = error "GHCi.FFI.freeForeignCallInfo: Called with JS_HOST_ARCH! Perhaps you need to run configure?" #endif data C_ffi_cif #if !defined(javascript_HOST_ARCH) data C_ffi_type strError :: C_ffi_status -> String strError r | r == fFI_BAD_ABI = "invalid ABI (FFI_BAD_ABI)" | r == fFI_BAD_TYPEDEF = "invalid type description (FFI_BAD_TYPEDEF)" | otherwise = "unknown error: " ++ show r ffiType :: FFIType -> Ptr C_ffi_type ffiType FFIVoid = ffi_type_void ffiType FFIPointer = ffi_type_pointer ffiType FFIFloat = ffi_type_float ffiType FFIDouble = ffi_type_double ffiType FFISInt8 = ffi_type_sint8 ffiType FFISInt16 = ffi_type_sint16 ffiType FFISInt32 = ffi_type_sint32 ffiType FFISInt64 = ffi_type_sint64 ffiType FFIUInt8 = ffi_type_uint8 ffiType FFIUInt16 = ffi_type_uint16 ffiType FFIUInt32 = ffi_type_uint32 ffiType FFIUInt64 = ffi_type_uint64 type C_ffi_status = (#type ffi_status) type C_ffi_abi = (#type ffi_abi) foreign import ccall "&ffi_type_void" ffi_type_void :: Ptr C_ffi_type foreign import ccall "&ffi_type_uint8" ffi_type_uint8 :: Ptr C_ffi_type foreign import ccall "&ffi_type_sint8" ffi_type_sint8 :: Ptr C_ffi_type foreign import ccall "&ffi_type_uint16" ffi_type_uint16 :: Ptr C_ffi_type foreign import ccall "&ffi_type_sint16" ffi_type_sint16 :: Ptr C_ffi_type foreign import ccall "&ffi_type_uint32" ffi_type_uint32 :: Ptr C_ffi_type foreign import ccall "&ffi_type_sint32" ffi_type_sint32 :: Ptr C_ffi_type foreign import ccall "&ffi_type_uint64" ffi_type_uint64 :: Ptr C_ffi_type foreign import ccall "&ffi_type_sint64" ffi_type_sint64 :: Ptr C_ffi_type foreign import ccall "&ffi_type_float" ffi_type_float :: Ptr C_ffi_type foreign import ccall "&ffi_type_double" ffi_type_double :: Ptr C_ffi_type foreign import ccall "&ffi_type_pointer"ffi_type_pointer :: Ptr C_ffi_type fFI_OK, fFI_BAD_ABI, fFI_BAD_TYPEDEF :: C_ffi_status fFI_OK = (#const FFI_OK) fFI_BAD_ABI = (#const FFI_BAD_ABI) fFI_BAD_TYPEDEF = (#const FFI_BAD_TYPEDEF) fFI_DEFAULT_ABI :: C_ffi_abi fFI_DEFAULT_ABI = (#const FFI_DEFAULT_ABI) -- ffi_status ffi_prep_cif(ffi_cif *cif, -- ffi_abi abi, -- unsigned int nargs, -- ffi_type *rtype, -- ffi_type **atypes); foreign import ccall "ffi_prep_cif" ffi_prep_cif :: Ptr C_ffi_cif -- cif -> C_ffi_abi -- abi -> CUInt -- nargs -> Ptr C_ffi_type -- result type -> Ptr (Ptr C_ffi_type) -- arg types -> IO C_ffi_status -- Currently unused: -- void ffi_call(ffi_cif *cif, -- void (*fn)(), -- void *rvalue, -- void **avalue); -- foreign import ccall "ffi_call" -- ffi_call :: Ptr C_ffi_cif -- cif -- -> FunPtr (IO ()) -- function to call -- -> Ptr () -- put result here -- -> Ptr (Ptr ()) -- arg values -- -> IO () #endif ghc-lib-parser-9.12.2.20250421/libraries/ghci/GHCi/Message.hs0000644000000000000000000006201107346545000021053 0ustar0000000000000000{-# LANGUAGE GADTs, DeriveGeneric, StandaloneDeriving, ScopedTypeVariables, GeneralizedNewtypeDeriving, ExistentialQuantification, RecordWildCards, CPP #-} {-# OPTIONS_GHC -fno-warn-name-shadowing -fno-warn-orphans #-} -- | -- Remote GHCi message types and serialization. -- -- For details on Remote GHCi, see Note [Remote GHCi] in -- compiler/GHC/Runtime/Interpreter.hs. -- module GHCi.Message ( Message(..), Msg(..) , THMessage(..), THMsg(..) , QResult(..) , EvalStatus_(..), EvalStatus, EvalResult(..), EvalOpts(..), EvalExpr(..) , EvalBreakpoint (..) , SerializableException(..) , toSerializableException, fromSerializableException , THResult(..), THResultType(..) , ResumeContext(..) , QState(..) , getMessage, putMessage, getTHMessage, putTHMessage , Pipe(..), remoteCall, remoteTHCall, readPipe, writePipe , BreakModule , LoadedDLL ) where import Prelude -- See note [Why do we import Prelude here?] import GHCi.RemoteTypes import GHCi.FFI import GHCi.TH.Binary () -- For Binary instances import GHCi.BreakArray import GHCi.ResolvedBCO import GHC.LanguageExtensions import qualified GHC.Exts.Heap as Heap import GHC.ForeignSrcLang import GHC.Fingerprint import GHC.Conc (pseq, par) import Control.Concurrent import Control.Exception #if MIN_VERSION_base(4,20,0) import Control.Exception.Context #endif import Data.Binary import Data.Binary.Get import Data.Binary.Put import Data.ByteString (ByteString) import qualified Data.ByteString as B import qualified Data.ByteString.Lazy as LB import Data.Dynamic import Data.Typeable (TypeRep) import Data.IORef import Data.Map (Map) import Foreign import GHC.Generics import GHC.Stack.CCS import qualified GHC.Internal.TH.Syntax as TH import System.Exit import System.IO import System.IO.Error -- ----------------------------------------------------------------------------- -- The RPC protocol between GHC and the interactive server -- | A @Message a@ is a message that returns a value of type @a@. -- These are requests sent from GHC to the server. data Message a where -- | Exit the iserv process Shutdown :: Message () RtsRevertCAFs :: Message () -- RTS Linker ------------------------------------------- -- These all invoke the corresponding functions in the RTS Linker API. InitLinker :: Message () LookupSymbol :: String -> Message (Maybe (RemotePtr ())) LookupSymbolInDLL :: RemotePtr LoadedDLL -> String -> Message (Maybe (RemotePtr ())) LookupClosure :: String -> Message (Maybe HValueRef) LoadDLL :: String -> Message (Either String (RemotePtr LoadedDLL)) LoadArchive :: String -> Message () -- error? LoadObj :: String -> Message () -- error? UnloadObj :: String -> Message () -- error? AddLibrarySearchPath :: String -> Message (RemotePtr ()) RemoveLibrarySearchPath :: RemotePtr () -> Message Bool ResolveObjs :: Message Bool FindSystemLibrary :: String -> Message (Maybe String) -- Interpreter ------------------------------------------- -- | Create a set of BCO objects, and return HValueRefs to them -- See @createBCOs@ in compiler/GHC/Runtime/Interpreter.hs. -- NB: this has a custom Binary behavior, -- see Note [Parallelize CreateBCOs serialization] CreateBCOs :: [ResolvedBCO] -> Message [HValueRef] -- | Release 'HValueRef's FreeHValueRefs :: [HValueRef] -> Message () -- | Add entries to the Static Pointer Table AddSptEntry :: Fingerprint -> HValueRef -> Message () -- | Malloc some data and return a 'RemotePtr' to it MallocData :: ByteString -> Message (RemotePtr ()) MallocStrings :: [ByteString] -> Message [RemotePtr ()] -- | Calls 'GHCi.FFI.prepareForeignCall' PrepFFI :: [FFIType] -> FFIType -> Message (RemotePtr C_ffi_cif) -- | Free data previously created by 'PrepFFI' FreeFFI :: RemotePtr C_ffi_cif -> Message () -- | Create an info table for a constructor MkConInfoTable :: Bool -- TABLES_NEXT_TO_CODE -> Int -- ptr words -> Int -- non-ptr words -> Int -- constr tag -> Int -- pointer tag -> ByteString -- constructor desccription -> Message (RemotePtr Heap.StgInfoTable) -- | Evaluate a statement EvalStmt :: EvalOpts -> EvalExpr HValueRef {- IO [a] -} -> Message (EvalStatus [HValueRef]) {- [a] -} -- | Resume evaluation of a statement after a breakpoint ResumeStmt :: EvalOpts -> RemoteRef (ResumeContext [HValueRef]) -> Message (EvalStatus [HValueRef]) -- | Abandon evaluation of a statement after a breakpoint AbandonStmt :: RemoteRef (ResumeContext [HValueRef]) -> Message () -- | Evaluate something of type @IO String@ EvalString :: HValueRef {- IO String -} -> Message (EvalResult String) -- | Evaluate something of type @String -> IO String@ EvalStringToString :: HValueRef {- String -> IO String -} -> String -> Message (EvalResult String) -- | Evaluate something of type @IO ()@ EvalIO :: HValueRef {- IO a -} -> Message (EvalResult ()) -- | Create a set of CostCentres with the same module name MkCostCentres :: String -- module, RemotePtr so it can be shared -> [(String,String)] -- (name, SrcSpan) -> Message [RemotePtr CostCentre] -- | Show a 'CostCentreStack' as a @[String]@ CostCentreStackInfo :: RemotePtr CostCentreStack -> Message [String] -- | Create a new array of breakpoint flags NewBreakArray :: Int -- size -> Message (RemoteRef BreakArray) -- | Set how many times a breakpoint should be ignored -- also used for enable/disable SetupBreakpoint :: RemoteRef BreakArray -> Int -- breakpoint index -> Int -- ignore count to be stored in the BreakArray -- -1 disable; 0 enable; >= 1 enable, ignore count. -> Message () -- | Query the status of a breakpoint (True <=> enabled) BreakpointStatus :: RemoteRef BreakArray -> Int -- index -> Message Bool -- True <=> enabled -- | Get a reference to a free variable at a breakpoint GetBreakpointVar :: HValueRef -- the AP_STACK from EvalBreak -> Int -> Message (Maybe HValueRef) -- Template Haskell ------------------------------------------- -- For more details on how TH works with Remote GHCi, see -- Note [Remote Template Haskell] in libraries/ghci/GHCi/TH.hs. -- | Start a new TH module, return a state token that should be StartTH :: Message (RemoteRef (IORef QState)) -- | Evaluate a TH computation. -- -- Returns a ByteString, because we have to force the result -- before returning it to ensure there are no errors lurking -- in it. The TH types don't have NFData instances, and even if -- they did, we have to serialize the value anyway, so we might -- as well serialize it to force it. RunTH :: RemoteRef (IORef QState) -> HValueRef {- e.g. TH.Q TH.Exp -} -> THResultType -> Maybe TH.Loc -> Message (QResult ByteString) -- | Run the given mod finalizers. RunModFinalizers :: RemoteRef (IORef QState) -> [RemoteRef (TH.Q ())] -> Message (QResult ()) -- | Remote interface to GHC.Exts.Heap.getClosureData. This is used by -- the GHCi debugger to inspect values in the heap for :print and -- type reconstruction. GetClosure :: HValueRef -> Message (Heap.GenClosure HValueRef) -- | Evaluate something. This is used to support :force in GHCi. Seq :: HValueRef -> Message (EvalStatus ()) -- | Resume forcing a free variable in a breakpoint (#2950) ResumeSeq :: RemoteRef (ResumeContext ()) -> Message (EvalStatus ()) -- | Allocate a string for a breakpoint module name. -- This uses an empty dummy type because @ModuleName@ isn't available here. NewBreakModule :: String -> Message (RemotePtr BreakModule) deriving instance Show (Message a) -- | Template Haskell return values data QResult a = QDone a -- ^ RunTH finished successfully; return value follows | QException String -- ^ RunTH threw an exception | QFail String -- ^ RunTH called 'fail' deriving (Generic, Show) instance Binary a => Binary (QResult a) -- | Messages sent back to GHC from GHCi.TH, to implement the methods -- of 'Quasi'. For an overview of how TH works with Remote GHCi, see -- Note [Remote Template Haskell] in GHCi.TH. data THMessage a where NewName :: String -> THMessage (THResult TH.Name) Report :: Bool -> String -> THMessage (THResult ()) LookupName :: Bool -> String -> THMessage (THResult (Maybe TH.Name)) Reify :: TH.Name -> THMessage (THResult TH.Info) ReifyFixity :: TH.Name -> THMessage (THResult (Maybe TH.Fixity)) ReifyType :: TH.Name -> THMessage (THResult TH.Type) ReifyInstances :: TH.Name -> [TH.Type] -> THMessage (THResult [TH.Dec]) ReifyRoles :: TH.Name -> THMessage (THResult [TH.Role]) ReifyAnnotations :: TH.AnnLookup -> TypeRep -> THMessage (THResult [ByteString]) ReifyModule :: TH.Module -> THMessage (THResult TH.ModuleInfo) ReifyConStrictness :: TH.Name -> THMessage (THResult [TH.DecidedStrictness]) GetPackageRoot :: THMessage (THResult FilePath) AddDependentFile :: FilePath -> THMessage (THResult ()) AddTempFile :: String -> THMessage (THResult FilePath) AddModFinalizer :: RemoteRef (TH.Q ()) -> THMessage (THResult ()) AddCorePlugin :: String -> THMessage (THResult ()) AddTopDecls :: [TH.Dec] -> THMessage (THResult ()) AddForeignFilePath :: ForeignSrcLang -> FilePath -> THMessage (THResult ()) IsExtEnabled :: Extension -> THMessage (THResult Bool) ExtsEnabled :: THMessage (THResult [Extension]) PutDoc :: TH.DocLoc -> String -> THMessage (THResult ()) GetDoc :: TH.DocLoc -> THMessage (THResult (Maybe String)) StartRecover :: THMessage () EndRecover :: Bool -> THMessage () FailIfErrs :: THMessage (THResult ()) -- | Indicates that this RunTH is finished, and the next message -- will be the result of RunTH (a QResult). RunTHDone :: THMessage () deriving instance Show (THMessage a) data THMsg = forall a . (Binary a, Show a) => THMsg (THMessage a) getTHMessage :: Get THMsg getTHMessage = do b <- getWord8 case b of 0 -> THMsg <$> NewName <$> get 1 -> THMsg <$> (Report <$> get <*> get) 2 -> THMsg <$> (LookupName <$> get <*> get) 3 -> THMsg <$> Reify <$> get 4 -> THMsg <$> ReifyFixity <$> get 5 -> THMsg <$> (ReifyInstances <$> get <*> get) 6 -> THMsg <$> ReifyRoles <$> get 7 -> THMsg <$> (ReifyAnnotations <$> get <*> get) 8 -> THMsg <$> ReifyModule <$> get 9 -> THMsg <$> ReifyConStrictness <$> get 10 -> THMsg <$> AddDependentFile <$> get 11 -> THMsg <$> AddTempFile <$> get 12 -> THMsg <$> AddTopDecls <$> get 13 -> THMsg <$> (IsExtEnabled <$> get) 14 -> THMsg <$> return ExtsEnabled 15 -> THMsg <$> return StartRecover 16 -> THMsg <$> EndRecover <$> get 17 -> THMsg <$> return FailIfErrs 18 -> return (THMsg RunTHDone) 19 -> THMsg <$> AddModFinalizer <$> get 20 -> THMsg <$> (AddForeignFilePath <$> get <*> get) 21 -> THMsg <$> AddCorePlugin <$> get 22 -> THMsg <$> ReifyType <$> get 23 -> THMsg <$> (PutDoc <$> get <*> get) 24 -> THMsg <$> GetDoc <$> get 25 -> THMsg <$> return GetPackageRoot n -> error ("getTHMessage: unknown message " ++ show n) putTHMessage :: THMessage a -> Put putTHMessage m = case m of NewName a -> putWord8 0 >> put a Report a b -> putWord8 1 >> put a >> put b LookupName a b -> putWord8 2 >> put a >> put b Reify a -> putWord8 3 >> put a ReifyFixity a -> putWord8 4 >> put a ReifyInstances a b -> putWord8 5 >> put a >> put b ReifyRoles a -> putWord8 6 >> put a ReifyAnnotations a b -> putWord8 7 >> put a >> put b ReifyModule a -> putWord8 8 >> put a ReifyConStrictness a -> putWord8 9 >> put a AddDependentFile a -> putWord8 10 >> put a AddTempFile a -> putWord8 11 >> put a AddTopDecls a -> putWord8 12 >> put a IsExtEnabled a -> putWord8 13 >> put a ExtsEnabled -> putWord8 14 StartRecover -> putWord8 15 EndRecover a -> putWord8 16 >> put a FailIfErrs -> putWord8 17 RunTHDone -> putWord8 18 AddModFinalizer a -> putWord8 19 >> put a AddForeignFilePath lang a -> putWord8 20 >> put lang >> put a AddCorePlugin a -> putWord8 21 >> put a ReifyType a -> putWord8 22 >> put a PutDoc l s -> putWord8 23 >> put l >> put s GetDoc l -> putWord8 24 >> put l GetPackageRoot -> putWord8 25 data EvalOpts = EvalOpts { useSandboxThread :: Bool , singleStep :: Bool , breakOnException :: Bool , breakOnError :: Bool } deriving (Generic, Show) instance Binary EvalOpts data ResumeContext a = ResumeContext { resumeBreakMVar :: MVar () , resumeStatusMVar :: MVar (EvalStatus a) , resumeThreadId :: ThreadId } -- | We can pass simple expressions to EvalStmt, consisting of values -- and application. This allows us to wrap the statement to be -- executed in another function, which is used by GHCi to implement -- :set args and :set prog. It might be worthwhile to extend this -- little language in the future. data EvalExpr a = EvalThis a | EvalApp (EvalExpr a) (EvalExpr a) deriving (Generic, Show) instance Binary a => Binary (EvalExpr a) type EvalStatus a = EvalStatus_ a a data EvalStatus_ a b = EvalComplete Word64 (EvalResult a) | EvalBreak HValueRef{- AP_STACK -} (Maybe EvalBreakpoint) (RemoteRef (ResumeContext b)) (RemotePtr CostCentreStack) -- Cost centre stack deriving (Generic, Show) instance Binary a => Binary (EvalStatus_ a b) data EvalBreakpoint = EvalBreakpoint { eb_tick_mod :: String -- ^ Breakpoint tick module , eb_tick_index :: Int -- ^ Breakpoint tick index , eb_info_mod :: String -- ^ Breakpoint info module , eb_info_index :: Int -- ^ Breakpoint info index } deriving (Generic, Show) instance Binary EvalBreakpoint data EvalResult a = EvalException SerializableException | EvalSuccess a deriving (Generic, Show) instance Binary a => Binary (EvalResult a) -- | A dummy type that tags the pointer to a breakpoint's @ModuleName@, because -- that type isn't available here. data BreakModule -- | A dummy type that tags pointers returned by 'LoadDLL'. data LoadedDLL -- SomeException can't be serialized because it contains dynamic -- types. However, we do very limited things with the exceptions that -- are thrown by interpreted computations: -- -- * We print them, e.g. "*** Exception: " -- * UserInterrupt has a special meaning -- * In ghc -e, exitWith should exit with the appropriate exit code -- -- So all we need to do is distinguish UserInterrupt and ExitCode, and -- all other exceptions can be represented by their 'show' string. -- data SerializableException = EUserInterrupt | EExitCode ExitCode | EOtherException String deriving (Generic, Show) toSerializableException :: SomeException -> SerializableException toSerializableException ex | Just UserInterrupt <- fromException ex = EUserInterrupt | Just (ec::ExitCode) <- fromException ex = (EExitCode ec) | otherwise = EOtherException $ #if MIN_VERSION_base(4,20,0) -- Exception plus backtrace as seen in `displayExceptionWithInfo` case displayExceptionContext (someExceptionContext ex) of "" -> displayException (ex :: SomeException) cx -> displayException (ex :: SomeException) ++ "\n\n" ++ cx #else show (ex :: SomeException) #endif fromSerializableException :: SerializableException -> SomeException fromSerializableException EUserInterrupt = toException UserInterrupt fromSerializableException (EExitCode c) = toException c fromSerializableException (EOtherException str) = toException (ErrorCall str) instance Binary ExitCode instance Binary SerializableException data THResult a = THException String | THComplete a deriving (Generic, Show) instance Binary a => Binary (THResult a) data THResultType = THExp | THPat | THType | THDec | THAnnWrapper deriving (Enum, Show, Generic) instance Binary THResultType -- | The server-side Template Haskell state. This is created by the -- StartTH message. A new one is created per module that GHC -- typechecks. data QState = QState { qsMap :: Map TypeRep Dynamic -- ^ persistent data between splices in a module , qsLocation :: Maybe TH.Loc -- ^ location for current splice, if any , qsPipe :: Pipe -- ^ pipe to communicate with GHC } instance Show QState where show _ = "" -- Orphan instances of Binary for Ptr / FunPtr by conversion to Word64. -- This is to support Binary StgInfoTable which includes these. instance Binary (Ptr a) where put p = put (fromIntegral (ptrToWordPtr p) :: Word64) get = (wordPtrToPtr . fromIntegral) <$> (get :: Get Word64) instance Binary (FunPtr a) where put = put . castFunPtrToPtr get = castPtrToFunPtr <$> get -- Binary instances to support the GetClosure message #ifndef MIN_VERSION_ghc_heap #define MIN_VERSION_ghc_heap(major1,major2,minor) (\ (major1) < 9 || \ (major1) == 9 && (major2) < 12 || \ (major1) == 9 && (major2) == 12 && (minor) <= 2) #endif /* MIN_VERSION_ghc_heap */ #if MIN_VERSION_ghc_heap(8,11,0) instance Binary Heap.StgTSOProfInfo instance Binary Heap.CostCentreStack instance Binary Heap.CostCentre instance Binary Heap.IndexTable instance Binary Heap.WhatNext instance Binary Heap.WhyBlocked instance Binary Heap.TsoFlags #endif instance Binary Heap.StgInfoTable instance Binary Heap.ClosureType instance Binary Heap.PrimType instance Binary a => Binary (Heap.GenClosure a) data Msg = forall a . (Binary a, Show a) => Msg (Message a) getMessage :: Get Msg getMessage = do b <- getWord8 case b of 0 -> Msg <$> return Shutdown 1 -> Msg <$> return InitLinker 2 -> Msg <$> LookupSymbol <$> get 3 -> Msg <$> LookupClosure <$> get 4 -> Msg <$> LoadDLL <$> get 5 -> Msg <$> LoadArchive <$> get 6 -> Msg <$> LoadObj <$> get 7 -> Msg <$> UnloadObj <$> get 8 -> Msg <$> AddLibrarySearchPath <$> get 9 -> Msg <$> RemoveLibrarySearchPath <$> get 10 -> Msg <$> return ResolveObjs 11 -> Msg <$> FindSystemLibrary <$> get 12 -> Msg <$> (CreateBCOs . concatMap (runGet get)) <$> (get :: Get [LB.ByteString]) -- See Note [Parallelize CreateBCOs serialization] 13 -> Msg <$> FreeHValueRefs <$> get 14 -> Msg <$> MallocData <$> get 15 -> Msg <$> MallocStrings <$> get 16 -> Msg <$> (PrepFFI <$> get <*> get) 17 -> Msg <$> FreeFFI <$> get 18 -> Msg <$> (MkConInfoTable <$> get <*> get <*> get <*> get <*> get <*> get) 19 -> Msg <$> (EvalStmt <$> get <*> get) 20 -> Msg <$> (ResumeStmt <$> get <*> get) 21 -> Msg <$> (AbandonStmt <$> get) 22 -> Msg <$> (EvalString <$> get) 23 -> Msg <$> (EvalStringToString <$> get <*> get) 24 -> Msg <$> (EvalIO <$> get) 25 -> Msg <$> (MkCostCentres <$> get <*> get) 26 -> Msg <$> (CostCentreStackInfo <$> get) 27 -> Msg <$> (NewBreakArray <$> get) 28 -> Msg <$> (SetupBreakpoint <$> get <*> get <*> get) 29 -> Msg <$> (BreakpointStatus <$> get <*> get) 30 -> Msg <$> (GetBreakpointVar <$> get <*> get) 31 -> Msg <$> return StartTH 32 -> Msg <$> (RunModFinalizers <$> get <*> get) 33 -> Msg <$> (AddSptEntry <$> get <*> get) 34 -> Msg <$> (RunTH <$> get <*> get <*> get <*> get) 35 -> Msg <$> (GetClosure <$> get) 36 -> Msg <$> (Seq <$> get) 37 -> Msg <$> return RtsRevertCAFs 38 -> Msg <$> (ResumeSeq <$> get) 39 -> Msg <$> (NewBreakModule <$> get) 40 -> Msg <$> (LookupSymbolInDLL <$> get <*> get) _ -> error $ "Unknown Message code " ++ (show b) putMessage :: Message a -> Put putMessage m = case m of Shutdown -> putWord8 0 InitLinker -> putWord8 1 LookupSymbol str -> putWord8 2 >> put str LookupClosure str -> putWord8 3 >> put str LoadDLL str -> putWord8 4 >> put str LoadArchive str -> putWord8 5 >> put str LoadObj str -> putWord8 6 >> put str UnloadObj str -> putWord8 7 >> put str AddLibrarySearchPath str -> putWord8 8 >> put str RemoveLibrarySearchPath ptr -> putWord8 9 >> put ptr ResolveObjs -> putWord8 10 FindSystemLibrary str -> putWord8 11 >> put str CreateBCOs bco -> putWord8 12 >> put (serializeBCOs bco) -- See Note [Parallelize CreateBCOs serialization] FreeHValueRefs val -> putWord8 13 >> put val MallocData bs -> putWord8 14 >> put bs MallocStrings bss -> putWord8 15 >> put bss PrepFFI args res -> putWord8 16 >> put args >> put res FreeFFI p -> putWord8 17 >> put p MkConInfoTable tc p n t pt d -> putWord8 18 >> put tc >> put p >> put n >> put t >> put pt >> put d EvalStmt opts val -> putWord8 19 >> put opts >> put val ResumeStmt opts val -> putWord8 20 >> put opts >> put val AbandonStmt val -> putWord8 21 >> put val EvalString val -> putWord8 22 >> put val EvalStringToString str val -> putWord8 23 >> put str >> put val EvalIO val -> putWord8 24 >> put val MkCostCentres mod ccs -> putWord8 25 >> put mod >> put ccs CostCentreStackInfo ptr -> putWord8 26 >> put ptr NewBreakArray sz -> putWord8 27 >> put sz SetupBreakpoint arr ix cnt -> putWord8 28 >> put arr >> put ix >> put cnt BreakpointStatus arr ix -> putWord8 29 >> put arr >> put ix GetBreakpointVar a b -> putWord8 30 >> put a >> put b StartTH -> putWord8 31 RunModFinalizers a b -> putWord8 32 >> put a >> put b AddSptEntry a b -> putWord8 33 >> put a >> put b RunTH st q loc ty -> putWord8 34 >> put st >> put q >> put loc >> put ty GetClosure a -> putWord8 35 >> put a Seq a -> putWord8 36 >> put a RtsRevertCAFs -> putWord8 37 ResumeSeq a -> putWord8 38 >> put a NewBreakModule name -> putWord8 39 >> put name LookupSymbolInDLL dll str -> putWord8 40 >> put dll >> put str {- Note [Parallelize CreateBCOs serialization] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Serializing ResolvedBCO is expensive, so we do it in parallel. We split the list [ResolvedBCO] into chunks of length <= 100, and serialize every chunk in parallel, getting a [LB.ByteString] where every bytestring corresponds to a single chunk (multiple ResolvedBCOs). Previously, we stored [LB.ByteString] in the Message object, but that incurs unneccessary serialization with the internal interpreter (#23919). -} serializeBCOs :: [ResolvedBCO] -> [LB.ByteString] serializeBCOs rbcos = parMap doChunk (chunkList 100 rbcos) where -- make sure we force the whole lazy ByteString doChunk c = pseq (LB.length bs) bs where bs = runPut (put c) -- We don't have the parallel package, so roll our own simple parMap parMap _ [] = [] parMap f (x:xs) = fx `par` (fxs `pseq` (fx : fxs)) where fx = f x; fxs = parMap f xs chunkList :: Int -> [a] -> [[a]] chunkList _ [] = [] chunkList n xs = as : chunkList n bs where (as,bs) = splitAt n xs -- ----------------------------------------------------------------------------- -- Reading/writing messages data Pipe = Pipe { pipeRead :: Handle , pipeWrite :: Handle , pipeLeftovers :: IORef (Maybe ByteString) } remoteCall :: Binary a => Pipe -> Message a -> IO a remoteCall pipe msg = do writePipe pipe (putMessage msg) readPipe pipe get remoteTHCall :: Binary a => Pipe -> THMessage a -> IO a remoteTHCall pipe msg = do writePipe pipe (putTHMessage msg) readPipe pipe get writePipe :: Pipe -> Put -> IO () writePipe Pipe{..} put | LB.null bs = return () | otherwise = do LB.hPut pipeWrite bs hFlush pipeWrite where bs = runPut put readPipe :: Pipe -> Get a -> IO a readPipe Pipe{..} get = do leftovers <- readIORef pipeLeftovers m <- getBin pipeRead get leftovers case m of Nothing -> throw $ mkIOError eofErrorType "GHCi.Message.remoteCall" (Just pipeRead) Nothing Just (result, new_leftovers) -> do writeIORef pipeLeftovers new_leftovers return result getBin :: Handle -> Get a -> Maybe ByteString -> IO (Maybe (a, Maybe ByteString)) getBin h get leftover = go leftover (runGetIncremental get) where go Nothing (Done leftover _ msg) = return (Just (msg, if B.null leftover then Nothing else Just leftover)) go _ Done{} = throwIO (ErrorCall "getBin: Done with leftovers") go (Just leftover) (Partial fun) = do go Nothing (fun (Just leftover)) go Nothing (Partial fun) = do -- putStrLn "before hGetSome" b <- B.hGetSome h (32*1024) -- putStrLn $ "hGetSome: " ++ show (B.length b) if B.null b then return Nothing else go Nothing (fun (Just b)) go _lft (Fail _rest _off str) = throwIO (ErrorCall ("getBin: " ++ str)) ghc-lib-parser-9.12.2.20250421/libraries/ghci/GHCi/RemoteTypes.hs0000644000000000000000000001062507346545000021753 0ustar0000000000000000{-# LANGUAGE CPP, StandaloneDeriving, GeneralizedNewtypeDeriving #-} -- | -- Types for referring to remote objects in Remote GHCi. For more -- details, see Note [External GHCi pointers] in compiler/GHC/Runtime/Interpreter.hs -- -- For details on Remote GHCi, see Note [Remote GHCi] in -- compiler/GHC/Runtime/Interpreter.hs. -- module GHCi.RemoteTypes ( -- * Remote pointer RemotePtr(..) , toRemotePtr , fromRemotePtr , castRemotePtr -- * RemoteRef: reference to some heap object (potentially remote) , RemoteRef (..) , mkRemoteRef , localRef , freeRemoteRef , castRemoteRef -- * ForeignRef: RemoteRef with a finalizer , ForeignRef , mkForeignRef , withForeignRef , finalizeForeignRef , castForeignRef , unsafeForeignRefToRemoteRef -- * HValue , HValue(..) , HValueRef , ForeignHValue ) where import Prelude -- See note [Why do we import Prelude here?] import Control.DeepSeq import Data.Word import Foreign hiding (newForeignPtr) import Foreign.Concurrent import Data.Binary import GHC.Exts import GHC.ForeignPtr -- ----------------------------------------------------------------------------- -- RemotePtr -- Static pointers only; don't use this for heap-resident pointers. -- Instead use HValueRef. We will fix the remote pointer to be 64 bits. This -- should cover 64 and 32bit systems, and permits the exchange of remote ptrs -- between machines of different word size. For example, when connecting to -- an iserv instance on a different architecture with different word size via -- -fexternal-interpreter. newtype RemotePtr a = RemotePtr Word64 toRemotePtr :: Ptr a -> RemotePtr a toRemotePtr p = RemotePtr (fromIntegral (ptrToWordPtr p)) fromRemotePtr :: RemotePtr a -> Ptr a fromRemotePtr (RemotePtr p) = wordPtrToPtr (fromIntegral p) castRemotePtr :: RemotePtr a -> RemotePtr b castRemotePtr (RemotePtr a) = RemotePtr a deriving instance Show (RemotePtr a) deriving instance Binary (RemotePtr a) deriving instance NFData (RemotePtr a) -- ----------------------------------------------------------------------------- -- HValue: alias for Any newtype HValue = HValue Any instance Show HValue where show _ = "" -- For convenience type HValueRef = RemoteRef HValue type ForeignHValue = ForeignRef HValue -- ----------------------------------------------------------------------------- -- RemoteRef: pointer to a Heap object -- | A reference to a heap object. Potentially in a remote heap! -- These are allocated and freed explicitly. newtype RemoteRef a = RemoteRef (RemotePtr ()) deriving (Show, Binary) -- We can discard type information if we want castRemoteRef :: RemoteRef a -> RemoteRef b castRemoteRef = coerce -- | Make a reference to a local value that we can send remotely. -- This reference will keep the value that it refers to alive until -- 'freeRemoteRef' is called. mkRemoteRef :: a -> IO (RemoteRef a) mkRemoteRef a = do sp <- newStablePtr a return $! RemoteRef (toRemotePtr (castStablePtrToPtr sp)) -- | Convert a RemoteRef to its carried type. Should only be used if the -- RemoteRef originated in this process. localRef :: RemoteRef a -> IO a localRef (RemoteRef w) = deRefStablePtr (castPtrToStablePtr (fromRemotePtr w)) -- | Release a RemoteRef that originated in this process freeRemoteRef :: RemoteRef a -> IO () freeRemoteRef (RemoteRef w) = freeStablePtr (castPtrToStablePtr (fromRemotePtr w)) -- | An RemoteRef with a finalizer newtype ForeignRef a = ForeignRef (ForeignPtr ()) instance NFData (ForeignRef a) where rnf x = x `seq` () -- | Create a 'ForeignRef' from a 'RemoteRef'. The finalizer -- should arrange to call 'freeRemoteRef' on the 'RemoteRef'. (since -- this function needs to be called in the process that created the -- 'RemoteRef', it cannot be called directly from the finalizer). mkForeignRef :: RemoteRef a -> IO () -> IO (ForeignRef a) mkForeignRef (RemoteRef hvref) finalizer = ForeignRef <$> newForeignPtr (fromRemotePtr hvref) finalizer -- | Use a 'ForeignRef' withForeignRef :: ForeignRef a -> (RemoteRef a -> IO b) -> IO b withForeignRef (ForeignRef fp) f = withForeignPtr fp (f . RemoteRef . toRemotePtr) unsafeForeignRefToRemoteRef :: ForeignRef a -> RemoteRef a unsafeForeignRefToRemoteRef (ForeignRef fp) = RemoteRef (toRemotePtr (unsafeForeignPtrToPtr fp)) finalizeForeignRef :: ForeignRef a -> IO () finalizeForeignRef (ForeignRef fp) = finalizeForeignPtr fp castForeignRef :: ForeignRef a -> ForeignRef b castForeignRef = coerce ghc-lib-parser-9.12.2.20250421/libraries/ghci/GHCi/ResolvedBCO.hs0000644000000000000000000001140507346545000021577 0ustar0000000000000000{-# LANGUAGE RecordWildCards, DeriveGeneric, GeneralizedNewtypeDeriving, BangPatterns, CPP, MagicHash, FlexibleInstances, FlexibleContexts, TypeApplications, ScopedTypeVariables, UnboxedTuples, UndecidableInstances #-} module GHCi.ResolvedBCO ( ResolvedBCO(..) , ResolvedBCOPtr(..) , isLittleEndian , BCOByteArray(..) , mkBCOByteArray ) where import Prelude -- See note [Why do we import Prelude here?] import GHC.Data.SizedSeq import GHCi.RemoteTypes import GHCi.BreakArray import Data.Binary import GHC.Generics import Foreign.Storable import GHC.Exts import Data.Array.Base (IArray, UArray(..)) #include "MachDeps.h" isLittleEndian :: Bool #if defined(WORDS_BIGENDIAN) isLittleEndian = False #else isLittleEndian = True #endif -- ----------------------------------------------------------------------------- -- ResolvedBCO -- | A 'ResolvedBCO' is one in which all the 'Name' references have been -- resolved to actual addresses or 'RemoteHValues'. -- data ResolvedBCO = ResolvedBCO { resolvedBCOIsLE :: Bool, resolvedBCOArity :: {-# UNPACK #-} !Int, resolvedBCOInstrs :: BCOByteArray Word16, -- insns resolvedBCOBitmap :: BCOByteArray Word, -- bitmap resolvedBCOLits :: BCOByteArray Word, -- non-ptrs resolvedBCOPtrs :: (SizedSeq ResolvedBCOPtr) -- ptrs } deriving (Generic, Show) -- | Wrapper for a 'ByteArray#'. -- The phantom type tells what elements are stored in the 'ByteArray#'. -- Creating a 'ByteArray#' can be achieved using 'UArray''s API, -- where the underlying 'ByteArray#' can be unpacked. data BCOByteArray a = BCOByteArray { getBCOByteArray :: !ByteArray# } fromBCOByteArray :: forall a . Storable a => BCOByteArray a -> UArray Int a fromBCOByteArray (BCOByteArray ba#) = UArray 0 (n - 1) n ba# where len# = sizeofByteArray# ba# n = (I# len#) `div` sizeOf (undefined :: a) mkBCOByteArray :: UArray Int a -> BCOByteArray a mkBCOByteArray (UArray _ _ _ arr) = BCOByteArray arr instance Show (BCOByteArray Word16) where showsPrec _ _ = showString "BCOByteArray Word16" instance Show (BCOByteArray Word) where showsPrec _ _ = showString "BCOByteArray Word" -- | The Binary instance for ResolvedBCOs. -- -- Note, that we do encode the endianness, however there is no support for mixed -- endianness setups. This is primarily to ensure that ghc and iserv share the -- same endianness. instance Binary ResolvedBCO where put ResolvedBCO{..} = do put resolvedBCOIsLE put resolvedBCOArity put resolvedBCOInstrs put resolvedBCOBitmap put resolvedBCOLits put resolvedBCOPtrs get = ResolvedBCO <$> get <*> get <*> get <*> get <*> get <*> get -- See Note [BCOByteArray serialization] instance (Binary a, Storable a, IArray UArray a) => Binary (BCOByteArray a) where put = put . fromBCOByteArray get = mkBCOByteArray <$> get data ResolvedBCOPtr = ResolvedBCORef {-# UNPACK #-} !Int -- ^ reference to the Nth BCO in the current set | ResolvedBCOPtr {-# UNPACK #-} !(RemoteRef HValue) -- ^ reference to a previously created BCO | ResolvedBCOStaticPtr {-# UNPACK #-} !(RemotePtr ()) -- ^ reference to a static ptr | ResolvedBCOPtrBCO ResolvedBCO -- ^ a nested BCO | ResolvedBCOPtrBreakArray {-# UNPACK #-} !(RemoteRef BreakArray) -- ^ Resolves to the MutableArray# inside the BreakArray deriving (Generic, Show) instance Binary ResolvedBCOPtr -- Note [BCOByteArray serialization] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- -- !12142 changed some BCO blob types from UArray to -- BCOByteArray(ByteArray#) to save a little space. Unfortunately, a -- nasty serialization bug has surfaced since then. It happens when we -- need to pass BCOByteArray between host/target with mismatching word -- sizes. When 32-bit iserv receives a `BCOByteArray Word` from 64-bit -- host GHC, it would parse the buffer assuming each Word=Word32, even -- if host GHC assumes each Word=Word64, and of course it's horribly -- wrong! -- -- The root issue here is the usage of platform sized integer types in -- BCO (and any messages we pass between ghc/iserv really), we should -- do what we already do for RemotePtr: always use Word64 instead of -- Word. But that takes much more work, and there's an easier -- mitigation: keep BCOByteArray as ByteArray#, but serialize it as -- UArray, given the Binary instances are independent of platform word -- size and endianness, so each Word/Int is always serialized as -- 64-bit big-endian Word64/Int64, and the entire UArray is serialized -- as a list (length+elements). -- -- Since we erase the metadata in UArray, we need to find a way to -- calculate the item count by dividing the ByteArray# length with -- element size. The element size comes from Storable's sizeOf method, -- thus the addition of Storable constraint. ghc-lib-parser-9.12.2.20250421/libraries/ghci/GHCi/TH/0000755000000000000000000000000007346545000017446 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/libraries/ghci/GHCi/TH/Binary.hs0000644000000000000000000000475507346545000021241 0ustar0000000000000000{-# OPTIONS_GHC -fno-warn-orphans #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE PolyKinds #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE GADTs #-} -- This module is full of orphans, unfortunately module GHCi.TH.Binary () where import Prelude -- See note [Why do we import Prelude here?] import Data.Binary import qualified Data.ByteString as B import qualified Data.ByteString.Internal as B import GHC.Serialized import qualified GHC.Internal.TH.Syntax as TH -- Put these in a separate module because they take ages to compile instance Binary TH.Loc instance Binary TH.Name instance Binary TH.ModName instance Binary TH.NameFlavour instance Binary TH.PkgName instance Binary TH.NameSpace instance Binary TH.Module instance Binary TH.Info instance Binary TH.Type instance Binary TH.TyLit instance Binary TH.Specificity instance Binary TH.BndrVis instance Binary flag => Binary (TH.TyVarBndr flag) instance Binary TH.Role instance Binary TH.Lit instance Binary TH.Range instance Binary TH.Stmt instance Binary TH.Pat instance Binary TH.Exp instance Binary TH.Dec instance Binary TH.NamespaceSpecifier instance Binary TH.Overlap instance Binary TH.DerivClause instance Binary TH.DerivStrategy instance Binary TH.Guard instance Binary TH.Body instance Binary TH.Match instance Binary TH.Fixity instance Binary TH.TySynEqn instance Binary TH.FunDep instance Binary TH.AnnTarget instance Binary TH.RuleBndr instance Binary TH.Phases instance Binary TH.RuleMatch instance Binary TH.Inline instance Binary TH.Pragma instance Binary TH.Safety instance Binary TH.Callconv instance Binary TH.Foreign instance Binary TH.Bang instance Binary TH.SourceUnpackedness instance Binary TH.SourceStrictness instance Binary TH.DecidedStrictness instance Binary TH.FixityDirection instance Binary TH.OccName instance Binary TH.Con instance Binary TH.AnnLookup instance Binary TH.ModuleInfo instance Binary TH.Clause instance Binary TH.InjectivityAnn instance Binary TH.FamilyResultSig instance Binary TH.TypeFamilyHead instance Binary TH.PatSynDir instance Binary TH.PatSynArgs instance Binary TH.DocLoc -- We need Binary TypeRep for serializing annotations instance Binary Serialized where put (Serialized tyrep wds) = put tyrep >> put (B.pack wds) get = Serialized <$> get <*> (B.unpack <$> get) instance Binary TH.Bytes where put (TH.Bytes ptr off sz) = put bs where bs = B.PS ptr (fromIntegral off) (fromIntegral sz) get = do B.PS ptr off sz <- get return (TH.Bytes ptr (fromIntegral off) (fromIntegral sz)) ghc-lib-parser-9.12.2.20250421/libraries/ghci/0000755000000000000000000000000007346545000016361 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/libraries/ghci/ghci.cabal0000644000000000000000000000644007346545000020263 0ustar0000000000000000-- WARNING: ghci.cabal is automatically generated from ghci.cabal.in by -- ../../configure. Make sure you are editing ghci.cabal.in, not ghci.cabal. name: ghci version: 9.12.2 license: BSD3 license-file: LICENSE category: GHC maintainer: ghc-devs@haskell.org bug-reports: https://gitlab.haskell.org/ghc/ghc/issues/new synopsis: The library supporting GHC's interactive interpreter description: This library offers interfaces which mediate interactions between the @ghci@ interactive shell and @iserv@, GHC's out-of-process interpreter backend. cabal-version: >=1.10 build-type: Simple extra-source-files: changelog.md Flag internal-interpreter Description: Build with internal interpreter support. Default: False Manual: True Flag bootstrap Description: Enabled when building the stage1 compiler in order to vendor the in-tree `ghc-boot-th` library, and through that the in-tree TH AST defintions from `ghc-internal`. See Note [Bootstrapping Template Haskell] Default: False Manual: True source-repository head type: git location: https://gitlab.haskell.org/ghc/ghc.git subdir: libraries/ghci library default-language: Haskell2010 default-extensions: NoImplicitPrelude other-extensions: BangPatterns CPP DeriveGeneric ExistentialQuantification FlexibleInstances GADTs GeneralizedNewtypeDeriving InstanceSigs MagicHash MultiParamTypeClasses RecordWildCards ScopedTypeVariables StandaloneDeriving TupleSections UnboxedTuples if flag(internal-interpreter) CPP-Options: -DHAVE_INTERNAL_INTERPRETER exposed-modules: GHCi.Run GHCi.CreateBCO GHCi.ObjLink GHCi.Signals GHCi.StaticPtrTable GHCi.TH GHCi.Server if !arch(javascript) exposed-modules: GHCi.InfoTable exposed-modules: GHCi.BreakArray GHCi.BinaryArray GHCi.Message GHCi.ResolvedBCO GHCi.RemoteTypes GHCi.FFI GHCi.TH.Binary GHCi.Utils Build-Depends: rts, array == 0.5.*, base >= 4.8 && < 4.22, -- ghc-internal == 9.1202.* -- TODO: Use GHC.Internal.Desugar and GHC.Internal.Base from -- ghc-internal instead of ignoring the deprecation warning in GHCi.TH -- and GHCi.CreateBCO when we require ghc-internal of the bootstrap -- compiler ghc-prim >= 0.5.0 && < 0.14, binary == 0.8.*, bytestring >= 0.10 && < 0.13, containers >= 0.5 && < 0.8, deepseq >= 1.4 && < 1.6, filepath >= 1.4 && < 1.6, ghc-boot == 9.12.2, ghc-heap == 9.12.2, transformers >= 0.5 && < 0.7 if flag(bootstrap) build-depends: ghc-boot-th-next == 9.12.2 else build-depends: ghc-boot-th == 9.12.2 if !os(windows) Build-Depends: unix >= 2.7 && < 2.9 if arch(wasm32) build-depends: ghc-experimental == 9.1202.0 ghc-lib-parser-9.12.2.20250421/libraries/template-haskell/0000755000000000000000000000000007346545000020703 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/libraries/template-haskell/template-haskell.cabal0000644000000000000000000000334407346545000025127 0ustar0000000000000000-- WARNING: template-haskell.cabal is automatically generated from template-haskell.cabal.in by -- ../../configure. Make sure you are editing template-haskell.cabal.in, not -- template-haskell.cabal. name: template-haskell version: 2.23.0.0 -- NOTE: Don't forget to update ./changelog.md license: BSD3 license-file: LICENSE category: Template Haskell maintainer: libraries@haskell.org bug-reports: https://gitlab.haskell.org/ghc/ghc/issues/new synopsis: Support library for Template Haskell build-type: Simple Cabal-Version: >= 1.10 description: This package provides modules containing facilities for manipulating Haskell source code using Template Haskell. . See for more information. extra-source-files: changelog.md source-repository head type: git location: https://gitlab.haskell.org/ghc/ghc.git subdir: libraries/template-haskell Library default-language: Haskell2010 other-extensions: BangPatterns CPP DefaultSignatures DeriveDataTypeable DeriveGeneric FlexibleInstances RankNTypes RoleAnnotations ScopedTypeVariables exposed-modules: Language.Haskell.TH Language.Haskell.TH.Lib Language.Haskell.TH.Lib.Internal Language.Haskell.TH.Ppr Language.Haskell.TH.PprLib Language.Haskell.TH.Quote Language.Haskell.TH.Syntax Language.Haskell.TH.LanguageExtensions Language.Haskell.TH.CodeDo build-depends: base >= 4.11 && < 4.22, ghc-boot-th == 9.12.2 build-depends: filepath hs-source-dirs: . ghc-options: -Wall ghc-lib-parser-9.12.2.20250421/rts/include/0000755000000000000000000000000007346545000015726 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/rts/include/ghcconfig.h0000644000000000000000000000010007346545000020015 0ustar0000000000000000#pragma once #include "ghcautoconf.h" #include "ghcplatform.h" ghc-lib-parser-9.12.2.20250421/rts/include/stg/MachRegs/0000755000000000000000000000000007346545000020214 5ustar0000000000000000ghc-lib-parser-9.12.2.20250421/rts/include/stg/MachRegs/arm32.h0000644000000000000000000000373007346545000021314 0ustar0000000000000000#pragma once /* ----------------------------------------------------------------------------- The ARM EABI register mapping Here we consider ARM mode (i.e. 32bit isns) and also CPU with full VFPv3 implementation ARM registers (see Chapter 5.1 in ARM IHI 0042D and Section 9.2.2 in ARM Software Development Toolkit Reference Guide) r15 PC The Program Counter. r14 LR The Link Register. r13 SP The Stack Pointer. r12 IP The Intra-Procedure-call scratch register. r11 v8/fp Variable-register 8. r10 v7/sl Variable-register 7. r9 v6/SB/TR Platform register. The meaning of this register is defined by the platform standard. r8 v5 Variable-register 5. r7 v4 Variable register 4. r6 v3 Variable register 3. r5 v2 Variable register 2. r4 v1 Variable register 1. r3 a4 Argument / scratch register 4. r2 a3 Argument / scratch register 3. r1 a2 Argument / result / scratch register 2. r0 a1 Argument / result / scratch register 1. VFPv2/VFPv3/NEON registers s0-s15/d0-d7/q0-q3 Argument / result/ scratch registers s16-s31/d8-d15/q4-q7 callee-saved registers (must be preserved across subroutine calls) VFPv3/NEON registers (added to the VFPv2 registers set) d16-d31/q8-q15 Argument / result/ scratch registers ----------------------------------------------------------------------------- */ #define REG(x) __asm__(#x) #define REG_Base r4 #define REG_Sp r5 #define REG_Hp r6 #define REG_R1 r7 #define REG_R2 r8 #define REG_R3 r9 #define REG_R4 r10 #define REG_SpLim r11 #if !defined(arm_HOST_ARCH_PRE_ARMv6) /* d8 */ #define REG_F1 s16 #define REG_F2 s17 /* d9 */ #define REG_F3 s18 #define REG_F4 s19 #define REG_D1 d10 #define REG_D2 d11 #endifghc-lib-parser-9.12.2.20250421/rts/include/stg/MachRegs/arm64.h0000644000000000000000000000373507346545000021326 0ustar0000000000000000#pragma once /* ----------------------------------------------------------------------------- The ARMv8/AArch64 ABI register mapping The AArch64 provides 31 64-bit general purpose registers and 32 128-bit SIMD/floating point registers. General purpose registers (see Chapter 5.1.1 in ARM IHI 0055B) Register | Special | Role in the procedure call standard ---------+---------+------------------------------------ SP | | The Stack Pointer r30 | LR | The Link Register r29 | FP | The Frame Pointer r19-r28 | | Callee-saved registers r18 | | The Platform Register, if needed; | | or temporary register r17 | IP1 | The second intra-procedure-call temporary register r16 | IP0 | The first intra-procedure-call scratch register r9-r15 | | Temporary registers r8 | | Indirect result location register r0-r7 | | Parameter/result registers FPU/SIMD registers s/d/q/v0-v7 Argument / result/ scratch registers s/d/q/v8-v15 callee-saved registers (must be preserved across subroutine calls, but only bottom 64-bit value needs to be preserved) s/d/q/v16-v31 temporary registers ----------------------------------------------------------------------------- */ #define REG(x) __asm__(#x) #define REG_Base r19 #define REG_Sp r20 #define REG_Hp r21 #define REG_R1 r22 #define REG_R2 r23 #define REG_R3 r24 #define REG_R4 r25 #define REG_R5 r26 #define REG_R6 r27 #define REG_SpLim r28 #define REG_F1 s8 #define REG_F2 s9 #define REG_F3 s10 #define REG_F4 s11 #define REG_D1 d12 #define REG_D2 d13 #define REG_D3 d14 #define REG_D4 d15 #define REG_XMM1 q4 #define REG_XMM2 q5 #define CALLER_SAVES_XMM1 #define CALLER_SAVES_XMM2 ghc-lib-parser-9.12.2.20250421/rts/include/stg/MachRegs/loongarch64.h0000644000000000000000000000377207346545000022524 0ustar0000000000000000#pragma once /* ----------------------------------------------------------------------------- The loongarch64 register mapping Register | Role(s) | Call effect ------------+-----------------------------------------+------------- zero | Hard-wired zero | - ra | Return address | caller-saved tp | Thread pointer | - sp | Stack pointer | callee-saved a0,a1 | Arguments / return values | caller-saved a2..a7 | Arguments | caller-saved t0..t8 | - | caller-saved u0 | Reserve | - fp | Frame pointer | callee-saved s0..s8 | - | callee-saved fa0,fa1 | Arguments / return values | caller-saved fa2..fa7 | Arguments | caller-saved ft0..ft15 | - | caller-saved fs0..fs7 | - | callee-saved Each general purpose register as well as each floating-point register is 64 bits wide, also, the u0 register is called r21 in some cases. -------------------------------------------------------------------------- */ #define REG(x) __asm__("$" #x) #define REG_Base s0 #define REG_Sp s1 #define REG_Hp s2 #define REG_R1 s3 #define REG_R2 s4 #define REG_R3 s5 #define REG_R4 s6 #define REG_R5 s7 #define REG_SpLim s8 #define REG_F1 fs0 #define REG_F2 fs1 #define REG_F3 fs2 #define REG_F4 fs3 #define REG_D1 fs4 #define REG_D2 fs5 #define REG_D3 fs6 #define REG_D4 fs7 #define MAX_REAL_FLOAT_REG 4 #define MAX_REAL_DOUBLE_REG 4 ghc-lib-parser-9.12.2.20250421/rts/include/stg/MachRegs/ppc.h0000644000000000000000000000403007346545000021144 0ustar0000000000000000#pragma once /* ----------------------------------------------------------------------------- The PowerPC register mapping 0 system glue? (caller-save, volatile) 1 SP (callee-save, non-volatile) 2 AIX, powerpc64-linux: RTOC (a strange special case) powerpc32-linux: reserved for use by system 3-10 args/return (caller-save, volatile) 11,12 system glue? (caller-save, volatile) 13 on 64-bit: reserved for thread state pointer on 32-bit: (callee-save, non-volatile) 14-31 (callee-save, non-volatile) f0 (caller-save, volatile) f1-f13 args/return (caller-save, volatile) f14-f31 (callee-save, non-volatile) \tr{14}--\tr{31} are wonderful callee-save registers on all ppc OSes. \tr{0}--\tr{12} are caller-save registers. \tr{%f14}--\tr{%f31} are callee-save floating-point registers. We can do the Whole Business with callee-save registers only! -------------------------------------------------------------------------- */ #define REG(x) __asm__(#x) #define REG_R1 r14 #define REG_R2 r15 #define REG_R3 r16 #define REG_R4 r17 #define REG_R5 r18 #define REG_R6 r19 #define REG_R7 r20 #define REG_R8 r21 #define REG_R9 r22 #define REG_R10 r23 #define REG_F1 fr14 #define REG_F2 fr15 #define REG_F3 fr16 #define REG_F4 fr17 #define REG_F5 fr18 #define REG_F6 fr19 #define REG_D1 fr20 #define REG_D2 fr21 #define REG_D3 fr22 #define REG_D4 fr23 #define REG_D5 fr24 #define REG_D6 fr25 #define REG_Sp r24 #define REG_SpLim r25 #define REG_Hp r26 #define REG_Base r27 #define MAX_REAL_FLOAT_REG 6 #define MAX_REAL_DOUBLE_REG 6ghc-lib-parser-9.12.2.20250421/rts/include/stg/MachRegs/riscv64.h0000644000000000000000000000463607346545000021676 0ustar0000000000000000#pragma once /* ----------------------------------------------------------------------------- The riscv64 register mapping Register | Role(s) | Call effect ------------+-----------------------------------------+------------- zero | Hard-wired zero | - ra | Return address | caller-saved sp | Stack pointer | callee-saved gp | Global pointer | callee-saved tp | Thread pointer | callee-saved t0,t1,t2 | - | caller-saved s0 | Frame pointer | callee-saved s1 | - | callee-saved a0,a1 | Arguments / return values | caller-saved a2..a7 | Arguments | caller-saved s2..s11 | - | callee-saved t3..t6 | - | caller-saved ft0..ft7 | - | caller-saved fs0,fs1 | - | callee-saved fa0,fa1 | Arguments / return values | caller-saved fa2..fa7 | Arguments | caller-saved fs2..fs11 | - | callee-saved ft8..ft11 | - | caller-saved Each general purpose register as well as each floating-point register is 64 bits wide. -------------------------------------------------------------------------- */ #define REG(x) __asm__(#x) #define REG_Base s1 #define REG_Sp s2 #define REG_Hp s3 #define REG_R1 s4 #define REG_R2 s5 #define REG_R3 s6 #define REG_R4 s7 #define REG_R5 s8 #define REG_R6 s9 #define REG_R7 s10 #define REG_SpLim s11 #define REG_F1 fs0 #define REG_F2 fs1 #define REG_F3 fs2 #define REG_F4 fs3 #define REG_F5 fs4 #define REG_F6 fs5 #define REG_D1 fs6 #define REG_D2 fs7 #define REG_D3 fs8 #define REG_D4 fs9 #define REG_D5 fs10 #define REG_D6 fs11 #define MAX_REAL_FLOAT_REG 6 #define MAX_REAL_DOUBLE_REG 6ghc-lib-parser-9.12.2.20250421/rts/include/stg/MachRegs/s390x.h0000644000000000000000000000500307346545000021251 0ustar0000000000000000#pragma once /* ----------------------------------------------------------------------------- The s390x register mapping Register | Role(s) | Call effect ------------+-------------------------------------+----------------- r0,r1 | - | caller-saved r2 | Argument / return value | caller-saved r3,r4,r5 | Arguments | caller-saved r6 | Argument | callee-saved r7...r11 | - | callee-saved r12 | (Commonly used as GOT pointer) | callee-saved r13 | (Commonly used as literal pool pointer) | callee-saved r14 | Return address | caller-saved r15 | Stack pointer | callee-saved f0 | Argument / return value | caller-saved f2,f4,f6 | Arguments | caller-saved f1,f3,f5,f7 | - | caller-saved f8...f15 | - | callee-saved v0...v31 | - | caller-saved Each general purpose register r0 through r15 as well as each floating-point register f0 through f15 is 64 bits wide. Each vector register v0 through v31 is 128 bits wide. Note, the vector registers v0 through v15 overlap with the floating-point registers f0 through f15. -------------------------------------------------------------------------- */ #define REG(x) __asm__("%" #x) #define REG_Base r7 #define REG_Sp r8 #define REG_Hp r10 #define REG_R1 r11 #define REG_R2 r12 #define REG_R3 r13 #define REG_R4 r6 #define REG_R5 r2 #define REG_R6 r3 #define REG_R7 r4 #define REG_R8 r5 #define REG_SpLim r9 #define REG_MachSp r15 #define REG_F1 f8 #define REG_F2 f9 #define REG_F3 f10 #define REG_F4 f11 #define REG_F5 f0 #define REG_F6 f1 #define REG_D1 f12 #define REG_D2 f13 #define REG_D3 f14 #define REG_D4 f15 #define REG_D5 f2 #define REG_D6 f3 #define CALLER_SAVES_R5 #define CALLER_SAVES_R6 #define CALLER_SAVES_R7 #define CALLER_SAVES_R8 #define CALLER_SAVES_F5 #define CALLER_SAVES_F6 #define CALLER_SAVES_D5 #define CALLER_SAVES_D6ghc-lib-parser-9.12.2.20250421/rts/include/stg/MachRegs/wasm32.h0000644000000000000000000000152107346545000021500 0ustar0000000000000000#pragma once #define REG_Base 0 #define REG_R1 1 #define REG_R2 2 #define REG_R3 3 #define REG_R4 4 #define REG_R5 5 #define REG_R6 6 #define REG_R7 7 #define REG_R8 8 #define REG_R9 9 #define REG_R10 10 #define REG_F1 11 #define REG_F2 12 #define REG_F3 13 #define REG_F4 14 #define REG_F5 15 #define REG_F6 16 #define REG_D1 17 #define REG_D2 18 #define REG_D3 19 #define REG_D4 20 #define REG_D5 21 #define REG_D6 22 #define REG_L1 23 #define REG_Sp 24 #define REG_SpLim 25 #define REG_Hp 26 #define REG_HpLim 27 ghc-lib-parser-9.12.2.20250421/rts/include/stg/MachRegs/x86.h0000644000000000000000000001235607346545000021021 0ustar0000000000000000/* ----------------------------------------------------------------------------- The x86 register mapping Ok, we've only got 6 general purpose registers, a frame pointer and a stack pointer. \tr{%eax} and \tr{%edx} are return values from C functions, hence they get trashed across ccalls and are caller saves. \tr{%ebx}, \tr{%esi}, \tr{%edi}, \tr{%ebp} are all callee-saves. Reg STG-Reg --------------- ebx Base ebp Sp esi R1 edi Hp Leaving SpLim out of the picture. -------------------------------------------------------------------------- */ #if defined(MACHREGS_i386) #define REG(x) __asm__("%" #x) #if !defined(not_doing_dynamic_linking) #define REG_Base ebx #endif #define REG_Sp ebp #if !defined(STOLEN_X86_REGS) #define STOLEN_X86_REGS 4 #endif #if STOLEN_X86_REGS >= 3 # define REG_R1 esi #endif #if STOLEN_X86_REGS >= 4 # define REG_Hp edi #endif #define REG_MachSp esp #define REG_XMM1 xmm0 #define REG_XMM2 xmm1 #define REG_XMM3 xmm2 #define REG_XMM4 xmm3 #define REG_YMM1 ymm0 #define REG_YMM2 ymm1 #define REG_YMM3 ymm2 #define REG_YMM4 ymm3 #define REG_ZMM1 zmm0 #define REG_ZMM2 zmm1 #define REG_ZMM3 zmm2 #define REG_ZMM4 zmm3 #define MAX_REAL_VANILLA_REG 1 /* always, since it defines the entry conv */ #define MAX_REAL_FLOAT_REG 0 #define MAX_REAL_DOUBLE_REG 0 #define MAX_REAL_LONG_REG 0 #define MAX_REAL_XMM_REG 4 #define MAX_REAL_YMM_REG 4 #define MAX_REAL_ZMM_REG 4 /* ----------------------------------------------------------------------------- The x86-64 register mapping %rax caller-saves, don't steal this one %rbx YES %rcx arg reg, caller-saves %rdx arg reg, caller-saves %rsi arg reg, caller-saves %rdi arg reg, caller-saves %rbp YES (our *prime* register) %rsp (unavailable - stack pointer) %r8 arg reg, caller-saves %r9 arg reg, caller-saves %r10 caller-saves %r11 caller-saves %r12 YES %r13 YES %r14 YES %r15 YES %xmm0-7 arg regs, caller-saves %xmm8-15 caller-saves Use the caller-saves regs for Rn, because we don't always have to save those (as opposed to Sp/Hp/SpLim etc. which always have to be saved). --------------------------------------------------------------------------- */ #elif defined(MACHREGS_x86_64) #define REG(x) __asm__("%" #x) #define REG_Base r13 #define REG_Sp rbp #define REG_Hp r12 #define REG_R1 rbx #define REG_R2 r14 #define REG_R3 rsi #define REG_R4 rdi #define REG_R5 r8 #define REG_R6 r9 #define REG_SpLim r15 #define REG_MachSp rsp /* Map Fn, Dn and XMMn to register xmmn. This unfortunately conflicts with the C calling convention, where the first argument and destination registers is xmm0, but the GHC calling convention in LLVM starts with xmm1 instead (and we can't easily change that). The aliasing allows us to pass a function any combination of up to six Float#, Double# or vector arguments without touching the stack (when using the System V calling convention). See Note [Overlapping global registers] for implications. */ #define REG_F1 xmm1 #define REG_F2 xmm2 #define REG_F3 xmm3 #define REG_F4 xmm4 #define REG_F5 xmm5 #define REG_F6 xmm6 #define REG_D1 xmm1 #define REG_D2 xmm2 #define REG_D3 xmm3 #define REG_D4 xmm4 #define REG_D5 xmm5 #define REG_D6 xmm6 #define REG_XMM1 xmm1 #define REG_XMM2 xmm2 #define REG_XMM3 xmm3 #define REG_XMM4 xmm4 #define REG_XMM5 xmm5 #define REG_XMM6 xmm6 #define REG_YMM1 ymm1 #define REG_YMM2 ymm2 #define REG_YMM3 ymm3 #define REG_YMM4 ymm4 #define REG_YMM5 ymm5 #define REG_YMM6 ymm6 #define REG_ZMM1 zmm1 #define REG_ZMM2 zmm2 #define REG_ZMM3 zmm3 #define REG_ZMM4 zmm4 #define REG_ZMM5 zmm5 #define REG_ZMM6 zmm6 #if !defined(mingw32_HOST_OS) #define CALLER_SAVES_R3 #define CALLER_SAVES_R4 #endif #define CALLER_SAVES_R5 #define CALLER_SAVES_R6 #define CALLER_SAVES_F1 #define CALLER_SAVES_F2 #define CALLER_SAVES_F3 #define CALLER_SAVES_F4 #define CALLER_SAVES_F5 #if !defined(mingw32_HOST_OS) #define CALLER_SAVES_F6 #endif #define CALLER_SAVES_D1 #define CALLER_SAVES_D2 #define CALLER_SAVES_D3 #define CALLER_SAVES_D4 #define CALLER_SAVES_D5 #if !defined(mingw32_HOST_OS) #define CALLER_SAVES_D6 #endif #define CALLER_SAVES_XMM1 #define CALLER_SAVES_XMM2 #define CALLER_SAVES_XMM3 #define CALLER_SAVES_XMM4 #define CALLER_SAVES_XMM5 #if !defined(mingw32_HOST_OS) #define CALLER_SAVES_XMM6 #endif #define CALLER_SAVES_YMM1 #define CALLER_SAVES_YMM2 #define CALLER_SAVES_YMM3 #define CALLER_SAVES_YMM4 #define CALLER_SAVES_YMM5 #if !defined(mingw32_HOST_OS) #define CALLER_SAVES_YMM6 #endif #define CALLER_SAVES_ZMM1 #define CALLER_SAVES_ZMM2 #define CALLER_SAVES_ZMM3 #define CALLER_SAVES_ZMM4 #define CALLER_SAVES_ZMM5 #if !defined(mingw32_HOST_OS) #define CALLER_SAVES_ZMM6 #endif #define MAX_REAL_VANILLA_REG 6 #define MAX_REAL_FLOAT_REG 6 #define MAX_REAL_DOUBLE_REG 6 #define MAX_REAL_LONG_REG 0 #define MAX_REAL_XMM_REG 6 #define MAX_REAL_YMM_REG 6 #define MAX_REAL_ZMM_REG 6 #endif /* MACHREGS_i386 || MACHREGS_x86_64 */